diff --git a/accounts/ethkey/cli/src/main.rs b/accounts/ethkey/cli/src/main.rs index 759f5f484..9d5ceb5fb 100644 --- a/accounts/ethkey/cli/src/main.rs +++ b/accounts/ethkey/cli/src/main.rs @@ -26,11 +26,13 @@ extern crate threadpool; #[macro_use] extern crate serde_derive; -use std::num::ParseIntError; -use std::{env, fmt, process, io, sync}; +use std::{env, fmt, io, num::ParseIntError, process, sync}; use docopt::Docopt; -use ethkey::{KeyPair, Random, Brain, BrainPrefix, Prefix, Error as EthkeyError, Generator, sign, verify_public, verify_address, brain_recover}; +use ethkey::{ + brain_recover, sign, verify_address, verify_public, Brain, BrainPrefix, Error as EthkeyError, + Generator, KeyPair, Prefix, Random, +}; use rustc_hex::{FromHex, FromHexError}; const USAGE: &'static str = r#" @@ -65,387 +67,428 @@ Commands: #[derive(Debug, Deserialize)] struct Args { - cmd_info: bool, - cmd_generate: bool, - cmd_random: bool, - cmd_prefix: bool, - cmd_sign: bool, - cmd_verify: bool, - cmd_public: bool, - cmd_address: bool, - cmd_recover: bool, - arg_prefix: String, - arg_secret: String, - arg_secret_or_phrase: String, - arg_known_phrase: String, - arg_message: String, - arg_public: String, - arg_address: String, - arg_signature: String, - flag_secret: bool, - flag_public: bool, - flag_address: bool, - flag_brain: bool, + cmd_info: bool, + cmd_generate: bool, + cmd_random: bool, + cmd_prefix: bool, + cmd_sign: bool, + cmd_verify: bool, + cmd_public: bool, + cmd_address: bool, + cmd_recover: bool, + arg_prefix: String, + arg_secret: String, + arg_secret_or_phrase: String, + arg_known_phrase: String, + arg_message: String, + arg_public: String, + arg_address: String, + arg_signature: String, + flag_secret: bool, + flag_public: bool, + flag_address: bool, + flag_brain: bool, } #[derive(Debug)] enum Error { - Ethkey(EthkeyError), - FromHex(FromHexError), - ParseInt(ParseIntError), - Docopt(docopt::Error), - Io(io::Error), + Ethkey(EthkeyError), + FromHex(FromHexError), + ParseInt(ParseIntError), + Docopt(docopt::Error), + Io(io::Error), } impl From for Error { - fn from(err: EthkeyError) -> Self { - Error::Ethkey(err) - } + fn from(err: EthkeyError) -> Self { + Error::Ethkey(err) + } } impl From for Error { - fn from(err: FromHexError) -> Self { - Error::FromHex(err) - } + fn from(err: FromHexError) -> Self { + Error::FromHex(err) + } } impl From for Error { - fn from(err: ParseIntError) -> Self { - Error::ParseInt(err) - } + fn from(err: ParseIntError) -> Self { + Error::ParseInt(err) + } } impl From for Error { - fn from(err: docopt::Error) -> Self { - Error::Docopt(err) - } + fn from(err: docopt::Error) -> Self { + Error::Docopt(err) + } } impl From for Error { - fn from(err: io::Error) -> Self { - Error::Io(err) - } + fn from(err: io::Error) -> Self { + Error::Io(err) + } } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - match *self { - Error::Ethkey(ref e) => write!(f, "{}", e), - Error::FromHex(ref e) => write!(f, "{}", e), - Error::ParseInt(ref e) => write!(f, "{}", e), - Error::Docopt(ref e) => write!(f, "{}", e), - Error::Io(ref e) => write!(f, "{}", e), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + match *self { + Error::Ethkey(ref e) => write!(f, "{}", e), + Error::FromHex(ref e) => write!(f, "{}", e), + Error::ParseInt(ref e) => write!(f, "{}", e), + Error::Docopt(ref e) => write!(f, "{}", e), + Error::Io(ref e) => write!(f, "{}", e), + } + } } enum DisplayMode { - KeyPair, - Secret, - Public, - Address, + KeyPair, + Secret, + Public, + Address, } impl DisplayMode { - fn new(args: &Args) -> Self { - if args.flag_secret { - DisplayMode::Secret - } else if args.flag_public { - DisplayMode::Public - } else if args.flag_address { - DisplayMode::Address - } else { - DisplayMode::KeyPair - } - } + fn new(args: &Args) -> Self { + if args.flag_secret { + DisplayMode::Secret + } else if args.flag_public { + DisplayMode::Public + } else if args.flag_address { + DisplayMode::Address + } else { + DisplayMode::KeyPair + } + } } fn main() { - panic_hook::set_abort(); - env_logger::try_init().expect("Logger initialized only once."); + panic_hook::set_abort(); + env_logger::try_init().expect("Logger initialized only once."); - match execute(env::args()) { - Ok(ok) => println!("{}", ok), - Err(Error::Docopt(ref e)) => e.exit(), - Err(err) => { - eprintln!("{}", err); - process::exit(1); - } - } + match execute(env::args()) { + Ok(ok) => println!("{}", ok), + Err(Error::Docopt(ref e)) => e.exit(), + Err(err) => { + eprintln!("{}", err); + process::exit(1); + } + } } fn display(result: (KeyPair, Option), mode: DisplayMode) -> String { - let keypair = result.0; - match mode { - DisplayMode::KeyPair => match result.1 { - Some(extra_data) => format!("{}\n{}", extra_data, keypair), - None => format!("{}", keypair) - }, - DisplayMode::Secret => format!("{:x}", keypair.secret()), - DisplayMode::Public => format!("{:x}", keypair.public()), - DisplayMode::Address => format!("{:x}", keypair.address()), - } + let keypair = result.0; + match mode { + DisplayMode::KeyPair => match result.1 { + Some(extra_data) => format!("{}\n{}", extra_data, keypair), + None => format!("{}", keypair), + }, + DisplayMode::Secret => format!("{:x}", keypair.secret()), + DisplayMode::Public => format!("{:x}", keypair.public()), + DisplayMode::Address => format!("{:x}", keypair.address()), + } } -fn execute(command: I) -> Result where I: IntoIterator, S: AsRef { - let args: Args = Docopt::new(USAGE) - .and_then(|d| d.argv(command).deserialize())?; +fn execute(command: I) -> Result +where + I: IntoIterator, + S: AsRef, +{ + let args: Args = Docopt::new(USAGE).and_then(|d| d.argv(command).deserialize())?; - return if args.cmd_info { - let display_mode = DisplayMode::new(&args); + return if args.cmd_info { + let display_mode = DisplayMode::new(&args); - let result = if args.flag_brain { - let phrase = args.arg_secret_or_phrase; - let phrase_info = validate_phrase(&phrase); - let keypair = Brain::new(phrase).generate().expect("Brain wallet generator is infallible; qed"); - (keypair, Some(phrase_info)) - } else { - let secret = args.arg_secret_or_phrase.parse().map_err(|_| EthkeyError::InvalidSecret)?; - (KeyPair::from_secret(secret)?, None) - }; - Ok(display(result, display_mode)) - } else if args.cmd_generate { - let display_mode = DisplayMode::new(&args); - let result = if args.cmd_random { - if args.flag_brain { - let mut brain = BrainPrefix::new(vec![0], usize::max_value(), BRAIN_WORDS); - let keypair = brain.generate()?; - let phrase = format!("recovery phrase: {}", brain.phrase()); - (keypair, Some(phrase)) - } else { - (Random.generate()?, None) - } - } else if args.cmd_prefix { - let prefix = args.arg_prefix.from_hex()?; - let brain = args.flag_brain; - in_threads(move || { - let iterations = 1024; - let prefix = prefix.clone(); - move || { - let prefix = prefix.clone(); - let res = if brain { - let mut brain = BrainPrefix::new(prefix, iterations, BRAIN_WORDS); - let result = brain.generate(); - let phrase = format!("recovery phrase: {}", brain.phrase()); - result.map(|keypair| (keypair, Some(phrase))) - } else { - let result = Prefix::new(prefix, iterations).generate(); - result.map(|res| (res, None)) - }; + let result = if args.flag_brain { + let phrase = args.arg_secret_or_phrase; + let phrase_info = validate_phrase(&phrase); + let keypair = Brain::new(phrase) + .generate() + .expect("Brain wallet generator is infallible; qed"); + (keypair, Some(phrase_info)) + } else { + let secret = args + .arg_secret_or_phrase + .parse() + .map_err(|_| EthkeyError::InvalidSecret)?; + (KeyPair::from_secret(secret)?, None) + }; + Ok(display(result, display_mode)) + } else if args.cmd_generate { + let display_mode = DisplayMode::new(&args); + let result = if args.cmd_random { + if args.flag_brain { + let mut brain = BrainPrefix::new(vec![0], usize::max_value(), BRAIN_WORDS); + let keypair = brain.generate()?; + let phrase = format!("recovery phrase: {}", brain.phrase()); + (keypair, Some(phrase)) + } else { + (Random.generate()?, None) + } + } else if args.cmd_prefix { + let prefix = args.arg_prefix.from_hex()?; + let brain = args.flag_brain; + in_threads(move || { + let iterations = 1024; + let prefix = prefix.clone(); + move || { + let prefix = prefix.clone(); + let res = if brain { + let mut brain = BrainPrefix::new(prefix, iterations, BRAIN_WORDS); + let result = brain.generate(); + let phrase = format!("recovery phrase: {}", brain.phrase()); + result.map(|keypair| (keypair, Some(phrase))) + } else { + let result = Prefix::new(prefix, iterations).generate(); + result.map(|res| (res, None)) + }; - Ok(res.map(Some).unwrap_or(None)) - } - })? - } else { - return Ok(format!("{}", USAGE)) - }; - Ok(display(result, display_mode)) - } else if args.cmd_sign { - let secret = args.arg_secret.parse().map_err(|_| EthkeyError::InvalidSecret)?; - let message = args.arg_message.parse().map_err(|_| EthkeyError::InvalidMessage)?; - let signature = sign(&secret, &message)?; - Ok(format!("{}", signature)) - } else if args.cmd_verify { - let signature = args.arg_signature.parse().map_err(|_| EthkeyError::InvalidSignature)?; - let message = args.arg_message.parse().map_err(|_| EthkeyError::InvalidMessage)?; - let ok = if args.cmd_public { - let public = args.arg_public.parse().map_err(|_| EthkeyError::InvalidPublic)?; - verify_public(&public, &signature, &message)? - } else if args.cmd_address { - let address = args.arg_address.parse().map_err(|_| EthkeyError::InvalidAddress)?; - verify_address(&address, &signature, &message)? - } else { - return Ok(format!("{}", USAGE)) - }; - Ok(format!("{}", ok)) - } else if args.cmd_recover { - let display_mode = DisplayMode::new(&args); - let known_phrase = args.arg_known_phrase; - let address = args.arg_address.parse().map_err(|_| EthkeyError::InvalidAddress)?; - let (phrase, keypair) = in_threads(move || { - let mut it = brain_recover::PhrasesIterator::from_known_phrase(&known_phrase, BRAIN_WORDS); - move || { - let mut i = 0; - while let Some(phrase) = it.next() { - i += 1; + Ok(res.map(Some).unwrap_or(None)) + } + })? + } else { + return Ok(format!("{}", USAGE)); + }; + Ok(display(result, display_mode)) + } else if args.cmd_sign { + let secret = args + .arg_secret + .parse() + .map_err(|_| EthkeyError::InvalidSecret)?; + let message = args + .arg_message + .parse() + .map_err(|_| EthkeyError::InvalidMessage)?; + let signature = sign(&secret, &message)?; + Ok(format!("{}", signature)) + } else if args.cmd_verify { + let signature = args + .arg_signature + .parse() + .map_err(|_| EthkeyError::InvalidSignature)?; + let message = args + .arg_message + .parse() + .map_err(|_| EthkeyError::InvalidMessage)?; + let ok = if args.cmd_public { + let public = args + .arg_public + .parse() + .map_err(|_| EthkeyError::InvalidPublic)?; + verify_public(&public, &signature, &message)? + } else if args.cmd_address { + let address = args + .arg_address + .parse() + .map_err(|_| EthkeyError::InvalidAddress)?; + verify_address(&address, &signature, &message)? + } else { + return Ok(format!("{}", USAGE)); + }; + Ok(format!("{}", ok)) + } else if args.cmd_recover { + let display_mode = DisplayMode::new(&args); + let known_phrase = args.arg_known_phrase; + let address = args + .arg_address + .parse() + .map_err(|_| EthkeyError::InvalidAddress)?; + let (phrase, keypair) = in_threads(move || { + let mut it = + brain_recover::PhrasesIterator::from_known_phrase(&known_phrase, BRAIN_WORDS); + move || { + let mut i = 0; + while let Some(phrase) = it.next() { + i += 1; - let keypair = Brain::new(phrase.clone()).generate().unwrap(); - if keypair.address() == address { - return Ok(Some((phrase, keypair))) - } + let keypair = Brain::new(phrase.clone()).generate().unwrap(); + if keypair.address() == address { + return Ok(Some((phrase, keypair))); + } - if i >= 1024 { - return Ok(None) - } - } + if i >= 1024 { + return Ok(None); + } + } - Err(EthkeyError::Custom("Couldn't find any results.".into())) - } - })?; - Ok(display((keypair, Some(phrase)), display_mode)) - } else { - Ok(format!("{}", USAGE)) - } + Err(EthkeyError::Custom("Couldn't find any results.".into())) + } + })?; + Ok(display((keypair, Some(phrase)), display_mode)) + } else { + Ok(format!("{}", USAGE)) + }; } const BRAIN_WORDS: usize = 12; fn validate_phrase(phrase: &str) -> String { - match Brain::validate_phrase(phrase, BRAIN_WORDS) { - Ok(()) => format!("The recovery phrase looks correct.\n"), - Err(err) => format!("The recover phrase was not generated by Parity: {}", err) - } + match Brain::validate_phrase(phrase, BRAIN_WORDS) { + Ok(()) => format!("The recovery phrase looks correct.\n"), + Err(err) => format!("The recover phrase was not generated by Parity: {}", err), + } } -fn in_threads(prepare: F) -> Result where - O: Send + 'static, - X: Send + 'static, - F: Fn() -> X, - X: FnMut() -> Result, EthkeyError>, +fn in_threads(prepare: F) -> Result +where + O: Send + 'static, + X: Send + 'static, + F: Fn() -> X, + X: FnMut() -> Result, EthkeyError>, { - let pool = threadpool::Builder::new().build(); + let pool = threadpool::Builder::new().build(); - let (tx, rx) = sync::mpsc::sync_channel(1); - let is_done = sync::Arc::new(sync::atomic::AtomicBool::default()); + let (tx, rx) = sync::mpsc::sync_channel(1); + let is_done = sync::Arc::new(sync::atomic::AtomicBool::default()); - for _ in 0..pool.max_count() { - let is_done = is_done.clone(); - let tx = tx.clone(); - let mut task = prepare(); - pool.execute(move || { - loop { - if is_done.load(sync::atomic::Ordering::SeqCst) { - return; - } + for _ in 0..pool.max_count() { + let is_done = is_done.clone(); + let tx = tx.clone(); + let mut task = prepare(); + pool.execute(move || { + loop { + if is_done.load(sync::atomic::Ordering::SeqCst) { + return; + } - let res = match task() { - Ok(None) => continue, - Ok(Some(v)) => Ok(v), - Err(err) => Err(err), - }; + let res = match task() { + Ok(None) => continue, + Ok(Some(v)) => Ok(v), + Err(err) => Err(err), + }; - // We are interested only in the first response. - let _ = tx.send(res); - } - }); - } + // We are interested only in the first response. + let _ = tx.send(res); + } + }); + } - if let Ok(solution) = rx.recv() { - is_done.store(true, sync::atomic::Ordering::SeqCst); - return solution; - } + if let Ok(solution) = rx.recv() { + is_done.store(true, sync::atomic::Ordering::SeqCst); + return solution; + } - Err(EthkeyError::Custom("No results found.".into())) + Err(EthkeyError::Custom("No results found.".into())) } #[cfg(test)] mod tests { - use super::execute; + use super::execute; - #[test] - fn info() { - let command = vec!["ethkey", "info", "17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55"] - .into_iter() - .map(Into::into) - .collect::>(); + #[test] + fn info() { + let command = vec![ + "ethkey", + "info", + "17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55", + ] + .into_iter() + .map(Into::into) + .collect::>(); - let expected = + let expected = "secret: 17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55 public: 689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124 address: 26d1ec50b4e62c1d1a40d16e7cacc6a6580757d5".to_owned(); - assert_eq!(execute(command).unwrap(), expected); - } + assert_eq!(execute(command).unwrap(), expected); + } - #[test] - fn brain() { - let command = vec!["ethkey", "info", "--brain", "this is sparta"] - .into_iter() - .map(Into::into) - .collect::>(); + #[test] + fn brain() { + let command = vec!["ethkey", "info", "--brain", "this is sparta"] + .into_iter() + .map(Into::into) + .collect::>(); - let expected = + let expected = "The recover phrase was not generated by Parity: The word 'this' does not come from the dictionary. secret: aa22b54c0cb43ee30a014afe5ef3664b1cde299feabca46cd3167a85a57c39f2 public: c4c5398da6843632c123f543d714d2d2277716c11ff612b2a2f23c6bda4d6f0327c31cd58c55a9572c3cc141dade0c32747a13b7ef34c241b26c84adbb28fcf4 address: 006e27b6a72e1f34c626762f3c4761547aff1421".to_owned(); - assert_eq!(execute(command).unwrap(), expected); - } + assert_eq!(execute(command).unwrap(), expected); + } - #[test] - fn secret() { - let command = vec!["ethkey", "info", "--brain", "this is sparta", "--secret"] + #[test] + fn secret() { + let command = vec!["ethkey", "info", "--brain", "this is sparta", "--secret"] + .into_iter() + .map(Into::into) + .collect::>(); + + let expected = + "aa22b54c0cb43ee30a014afe5ef3664b1cde299feabca46cd3167a85a57c39f2".to_owned(); + assert_eq!(execute(command).unwrap(), expected); + } + + #[test] + fn public() { + let command = vec!["ethkey", "info", "--brain", "this is sparta", "--public"] + .into_iter() + .map(Into::into) + .collect::>(); + + let expected = "c4c5398da6843632c123f543d714d2d2277716c11ff612b2a2f23c6bda4d6f0327c31cd58c55a9572c3cc141dade0c32747a13b7ef34c241b26c84adbb28fcf4".to_owned(); + assert_eq!(execute(command).unwrap(), expected); + } + + #[test] + fn address() { + let command = vec!["ethkey", "info", "-b", "this is sparta", "--address"] + .into_iter() + .map(Into::into) + .collect::>(); + + let expected = "006e27b6a72e1f34c626762f3c4761547aff1421".to_owned(); + assert_eq!(execute(command).unwrap(), expected); + } + + #[test] + fn sign() { + let command = vec![ + "ethkey", + "sign", + "17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55", + "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987", + ] + .into_iter() + .map(Into::into) + .collect::>(); + + let expected = "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200".to_owned(); + assert_eq!(execute(command).unwrap(), expected); + } + + #[test] + fn verify_valid_public() { + let command = vec!["ethkey", "verify", "public", "689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987"] .into_iter() .map(Into::into) .collect::>(); - let expected = "aa22b54c0cb43ee30a014afe5ef3664b1cde299feabca46cd3167a85a57c39f2".to_owned(); - assert_eq!(execute(command).unwrap(), expected); - } + let expected = "true".to_owned(); + assert_eq!(execute(command).unwrap(), expected); + } - #[test] - fn public() { - let command = vec!["ethkey", "info", "--brain", "this is sparta", "--public"] + #[test] + fn verify_valid_address() { + let command = vec!["ethkey", "verify", "address", "26d1ec50b4e62c1d1a40d16e7cacc6a6580757d5", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987"] .into_iter() .map(Into::into) .collect::>(); - let expected = "c4c5398da6843632c123f543d714d2d2277716c11ff612b2a2f23c6bda4d6f0327c31cd58c55a9572c3cc141dade0c32747a13b7ef34c241b26c84adbb28fcf4".to_owned(); - assert_eq!(execute(command).unwrap(), expected); - } + let expected = "true".to_owned(); + assert_eq!(execute(command).unwrap(), expected); + } - #[test] - fn address() { - let command = vec!["ethkey", "info", "-b", "this is sparta", "--address"] + #[test] + fn verify_invalid() { + let command = vec!["ethkey", "verify", "public", "689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec986"] .into_iter() .map(Into::into) .collect::>(); - let expected = "006e27b6a72e1f34c626762f3c4761547aff1421".to_owned(); - assert_eq!(execute(command).unwrap(), expected); - } - - #[test] - fn sign() { - let command = vec!["ethkey", "sign", "17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987"] - .into_iter() - .map(Into::into) - .collect::>(); - - let expected = "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200".to_owned(); - assert_eq!(execute(command).unwrap(), expected); - } - - #[test] - fn verify_valid_public() { - let command = vec!["ethkey", "verify", "public", "689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987"] - .into_iter() - .map(Into::into) - .collect::>(); - - let expected = "true".to_owned(); - assert_eq!(execute(command).unwrap(), expected); - } - - #[test] - fn verify_valid_address() { - let command = vec!["ethkey", "verify", "address", "26d1ec50b4e62c1d1a40d16e7cacc6a6580757d5", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987"] - .into_iter() - .map(Into::into) - .collect::>(); - - let expected = "true".to_owned(); - assert_eq!(execute(command).unwrap(), expected); - } - - #[test] - fn verify_invalid() { - let command = vec!["ethkey", "verify", "public", "689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec986"] - .into_iter() - .map(Into::into) - .collect::>(); - - let expected = "false".to_owned(); - assert_eq!(execute(command).unwrap(), expected); - } + let expected = "false".to_owned(); + assert_eq!(execute(command).unwrap(), expected); + } } diff --git a/accounts/ethkey/src/brain.rs b/accounts/ethkey/src/brain.rs index 3a970e17c..4f4719f44 100644 --- a/accounts/ethkey/src/brain.rs +++ b/accounts/ethkey/src/brain.rs @@ -14,60 +14,61 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use super::{Generator, KeyPair, Secret}; use keccak::Keccak256; -use super::{KeyPair, Generator, Secret}; use parity_wordlist; /// Simple brainwallet. pub struct Brain(String); impl Brain { - pub fn new(s: String) -> Self { - Brain(s) - } + pub fn new(s: String) -> Self { + Brain(s) + } - pub fn validate_phrase(phrase: &str, expected_words: usize) -> Result<(), ::WordlistError> { - parity_wordlist::validate_phrase(phrase, expected_words) - } + pub fn validate_phrase(phrase: &str, expected_words: usize) -> Result<(), ::WordlistError> { + parity_wordlist::validate_phrase(phrase, expected_words) + } } impl Generator for Brain { type Error = ::Void; - fn generate(&mut self) -> Result { - let seed = self.0.clone(); - let mut secret = seed.into_bytes().keccak256(); + fn generate(&mut self) -> Result { + let seed = self.0.clone(); + let mut secret = seed.into_bytes().keccak256(); - let mut i = 0; - loop { - secret = secret.keccak256(); + let mut i = 0; + loop { + secret = secret.keccak256(); - match i > 16384 { - false => i += 1, - true => { - if let Ok(pair) = Secret::from_unsafe_slice(&secret) - .and_then(KeyPair::from_secret) - { - if pair.address()[0] == 0 { - trace!("Testing: {}, got: {:?}", self.0, pair.address()); - return Ok(pair) - } - } - }, - } - } - } + match i > 16384 { + false => i += 1, + true => { + if let Ok(pair) = + Secret::from_unsafe_slice(&secret).and_then(KeyPair::from_secret) + { + if pair.address()[0] == 0 { + trace!("Testing: {}, got: {:?}", self.0, pair.address()); + return Ok(pair); + } + } + } + } + } + } } #[cfg(test)] mod tests { - use {Brain, Generator}; + use Brain; + use Generator; - #[test] - fn test_brain() { - let words = "this is sparta!".to_owned(); - let first_keypair = Brain::new(words.clone()).generate().unwrap(); - let second_keypair = Brain::new(words.clone()).generate().unwrap(); - assert_eq!(first_keypair.secret(), second_keypair.secret()); - } + #[test] + fn test_brain() { + let words = "this is sparta!".to_owned(); + let first_keypair = Brain::new(words.clone()).generate().unwrap(); + let second_keypair = Brain::new(words.clone()).generate().unwrap(); + assert_eq!(first_keypair.secret(), second_keypair.secret()); + } } diff --git a/accounts/ethkey/src/brain_prefix.rs b/accounts/ethkey/src/brain_prefix.rs index ba0d81296..1ad566b25 100644 --- a/accounts/ethkey/src/brain_prefix.rs +++ b/accounts/ethkey/src/brain_prefix.rs @@ -14,57 +14,60 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use super::{Generator, KeyPair, Error, Brain}; +use super::{Brain, Error, Generator, KeyPair}; use parity_wordlist as wordlist; /// Tries to find brain-seed keypair with address starting with given prefix. pub struct BrainPrefix { - prefix: Vec, - iterations: usize, - no_of_words: usize, - last_phrase: String, + prefix: Vec, + iterations: usize, + no_of_words: usize, + last_phrase: String, } impl BrainPrefix { - pub fn new(prefix: Vec, iterations: usize, no_of_words: usize) -> Self { - BrainPrefix { - prefix, - iterations, - no_of_words, - last_phrase: String::new(), - } - } + pub fn new(prefix: Vec, iterations: usize, no_of_words: usize) -> Self { + BrainPrefix { + prefix, + iterations, + no_of_words, + last_phrase: String::new(), + } + } - pub fn phrase(&self) -> &str { - &self.last_phrase - } + pub fn phrase(&self) -> &str { + &self.last_phrase + } } impl Generator for BrainPrefix { - type Error = Error; + type Error = Error; - fn generate(&mut self) -> Result { - for _ in 0..self.iterations { - let phrase = wordlist::random_phrase(self.no_of_words); - let keypair = Brain::new(phrase.clone()).generate().unwrap(); - if keypair.address().starts_with(&self.prefix) { - self.last_phrase = phrase; - return Ok(keypair) - } - } + fn generate(&mut self) -> Result { + for _ in 0..self.iterations { + let phrase = wordlist::random_phrase(self.no_of_words); + let keypair = Brain::new(phrase.clone()).generate().unwrap(); + if keypair.address().starts_with(&self.prefix) { + self.last_phrase = phrase; + return Ok(keypair); + } + } - Err(Error::Custom("Could not find keypair".into())) - } + Err(Error::Custom("Could not find keypair".into())) + } } #[cfg(test)] mod tests { - use {Generator, BrainPrefix}; + use BrainPrefix; + use Generator; - #[test] - fn prefix_generator() { - let prefix = vec![0x00u8]; - let keypair = BrainPrefix::new(prefix.clone(), usize::max_value(), 12).generate().unwrap(); - assert!(keypair.address().starts_with(&prefix)); - } + #[test] + fn prefix_generator() { + let prefix = vec![0x00u8]; + let keypair = BrainPrefix::new(prefix.clone(), usize::max_value(), 12) + .generate() + .unwrap(); + assert!(keypair.address().starts_with(&prefix)); + } } diff --git a/accounts/ethkey/src/brain_recover.rs b/accounts/ethkey/src/brain_recover.rs index f9922fae9..c9bea1642 100644 --- a/accounts/ethkey/src/brain_recover.rs +++ b/accounts/ethkey/src/brain_recover.rs @@ -26,148 +26,153 @@ use super::{Address, Brain, Generator}; /// /// Returns `None` if phrase couldn't be found. pub fn brain_recover( - address: &Address, - known_phrase: &str, - expected_words: usize, + address: &Address, + known_phrase: &str, + expected_words: usize, ) -> Option { - let it = PhrasesIterator::from_known_phrase(known_phrase, expected_words); - for phrase in it { - let keypair = Brain::new(phrase.clone()).generate().expect("Brain wallets are infallible; qed"); - trace!("Testing: {}, got: {:?}", phrase, keypair.address()); - if &keypair.address() == address { - return Some(phrase); - } - } + let it = PhrasesIterator::from_known_phrase(known_phrase, expected_words); + for phrase in it { + let keypair = Brain::new(phrase.clone()) + .generate() + .expect("Brain wallets are infallible; qed"); + trace!("Testing: {}, got: {:?}", phrase, keypair.address()); + if &keypair.address() == address { + return Some(phrase); + } + } - None + None } fn generate_substitutions(word: &str) -> Vec<&'static str> { - let mut words = parity_wordlist::WORDS.iter().cloned() - .map(|w| (edit_distance(w, word), w)) - .collect::>(); - words.sort_by(|a, b| a.0.cmp(&b.0)); + let mut words = parity_wordlist::WORDS + .iter() + .cloned() + .map(|w| (edit_distance(w, word), w)) + .collect::>(); + words.sort_by(|a, b| a.0.cmp(&b.0)); - words.into_iter() - .map(|pair| pair.1) - .collect() + words.into_iter().map(|pair| pair.1).collect() } /// Iterator over possible pub struct PhrasesIterator { - words: Vec>, - combinations: u64, - indexes: Vec, - has_next: bool, + words: Vec>, + combinations: u64, + indexes: Vec, + has_next: bool, } impl PhrasesIterator { - pub fn from_known_phrase(known_phrase: &str, expected_words: usize) -> Self { - let known_words = parity_wordlist::WORDS.iter().cloned().collect::>(); - let mut words = known_phrase.split(' ') - .map(|word| match known_words.get(word) { - None => { - info!("Invalid word '{}', looking for potential substitutions.", word); - let substitutions = generate_substitutions(word); - info!("Closest words: {:?}", &substitutions[..10]); - substitutions - }, - Some(word) => vec![*word], - }) - .collect::>(); + pub fn from_known_phrase(known_phrase: &str, expected_words: usize) -> Self { + let known_words = parity_wordlist::WORDS + .iter() + .cloned() + .collect::>(); + let mut words = known_phrase + .split(' ') + .map(|word| match known_words.get(word) { + None => { + info!( + "Invalid word '{}', looking for potential substitutions.", + word + ); + let substitutions = generate_substitutions(word); + info!("Closest words: {:?}", &substitutions[..10]); + substitutions + } + Some(word) => vec![*word], + }) + .collect::>(); - // add missing words - if words.len() < expected_words { - let to_add = expected_words - words.len(); - info!("Number of words is insuficcient adding {} more.", to_add); - for _ in 0..to_add { - words.push(parity_wordlist::WORDS.iter().cloned().collect()); - } - } + // add missing words + if words.len() < expected_words { + let to_add = expected_words - words.len(); + info!("Number of words is insuficcient adding {} more.", to_add); + for _ in 0..to_add { + words.push(parity_wordlist::WORDS.iter().cloned().collect()); + } + } - // start searching - PhrasesIterator::new(words) - } + // start searching + PhrasesIterator::new(words) + } - pub fn new(words: Vec>) -> Self { - let combinations = words.iter().fold(1u64, |acc, x| acc * x.len() as u64); - let indexes = words.iter().map(|_| 0).collect(); - info!("Starting to test {} possible combinations.", combinations); + pub fn new(words: Vec>) -> Self { + let combinations = words.iter().fold(1u64, |acc, x| acc * x.len() as u64); + let indexes = words.iter().map(|_| 0).collect(); + info!("Starting to test {} possible combinations.", combinations); - PhrasesIterator { - words, - combinations, - indexes, - has_next: combinations > 0, - } - } + PhrasesIterator { + words, + combinations, + indexes, + has_next: combinations > 0, + } + } - pub fn combinations(&self) -> u64 { - self.combinations - } + pub fn combinations(&self) -> u64 { + self.combinations + } - fn current(&self) -> String { - let mut s = self.words[0][self.indexes[0]].to_owned(); - for i in 1..self.indexes.len() { - s.push(' '); - s.push_str(self.words[i][self.indexes[i]]); - } - s - } + fn current(&self) -> String { + let mut s = self.words[0][self.indexes[0]].to_owned(); + for i in 1..self.indexes.len() { + s.push(' '); + s.push_str(self.words[i][self.indexes[i]]); + } + s + } - fn next_index(&mut self) -> bool { - let mut pos = self.indexes.len(); - while pos > 0 { - pos -= 1; - self.indexes[pos] += 1; - if self.indexes[pos] >= self.words[pos].len() { - self.indexes[pos] = 0; - } else { - return true; - } - } + fn next_index(&mut self) -> bool { + let mut pos = self.indexes.len(); + while pos > 0 { + pos -= 1; + self.indexes[pos] += 1; + if self.indexes[pos] >= self.words[pos].len() { + self.indexes[pos] = 0; + } else { + return true; + } + } - false - } + false + } } impl Iterator for PhrasesIterator { - type Item = String; + type Item = String; - fn next(&mut self) -> Option { - if !self.has_next { - return None; - } + fn next(&mut self) -> Option { + if !self.has_next { + return None; + } - let phrase = self.current(); - self.has_next = self.next_index(); - Some(phrase) - } + let phrase = self.current(); + self.has_next = self.next_index(); + Some(phrase) + } } #[cfg(test)] mod tests { - use super::PhrasesIterator; + use super::PhrasesIterator; - #[test] - fn should_generate_possible_combinations() { - let mut it = PhrasesIterator::new(vec![ - vec!["1", "2", "3"], - vec!["test"], - vec!["a", "b", "c"], - ]); - - assert_eq!(it.combinations(), 9); - assert_eq!(it.next(), Some("1 test a".to_owned())); - assert_eq!(it.next(), Some("1 test b".to_owned())); - assert_eq!(it.next(), Some("1 test c".to_owned())); - assert_eq!(it.next(), Some("2 test a".to_owned())); - assert_eq!(it.next(), Some("2 test b".to_owned())); - assert_eq!(it.next(), Some("2 test c".to_owned())); - assert_eq!(it.next(), Some("3 test a".to_owned())); - assert_eq!(it.next(), Some("3 test b".to_owned())); - assert_eq!(it.next(), Some("3 test c".to_owned())); - assert_eq!(it.next(), None); - } + #[test] + fn should_generate_possible_combinations() { + let mut it = + PhrasesIterator::new(vec![vec!["1", "2", "3"], vec!["test"], vec!["a", "b", "c"]]); + assert_eq!(it.combinations(), 9); + assert_eq!(it.next(), Some("1 test a".to_owned())); + assert_eq!(it.next(), Some("1 test b".to_owned())); + assert_eq!(it.next(), Some("1 test c".to_owned())); + assert_eq!(it.next(), Some("2 test a".to_owned())); + assert_eq!(it.next(), Some("2 test b".to_owned())); + assert_eq!(it.next(), Some("2 test c".to_owned())); + assert_eq!(it.next(), Some("3 test a".to_owned())); + assert_eq!(it.next(), Some("3 test b".to_owned())); + assert_eq!(it.next(), Some("3 test c".to_owned())); + assert_eq!(it.next(), None); + } } diff --git a/accounts/ethkey/src/crypto.rs b/accounts/ethkey/src/crypto.rs index ec883dcb6..6fccba941 100644 --- a/accounts/ethkey/src/crypto.rs +++ b/accounts/ethkey/src/crypto.rs @@ -14,176 +14,187 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use parity_crypto::error::SymmError; use secp256k1; use std::io; -use parity_crypto::error::SymmError; quick_error! { - #[derive(Debug)] - pub enum Error { - Secp(e: secp256k1::Error) { - display("secp256k1 error: {}", e) - cause(e) - from() - } - Io(e: io::Error) { - display("i/o error: {}", e) - cause(e) - from() - } - InvalidMessage { - display("invalid message") - } - Symm(e: SymmError) { - cause(e) - from() - } - } + #[derive(Debug)] + pub enum Error { + Secp(e: secp256k1::Error) { + display("secp256k1 error: {}", e) + cause(e) + from() + } + Io(e: io::Error) { + display("i/o error: {}", e) + cause(e) + from() + } + InvalidMessage { + display("invalid message") + } + Symm(e: SymmError) { + cause(e) + from() + } + } } /// ECDH functions pub mod ecdh { - use secp256k1::{self, ecdh, key}; - use super::Error; - use {Secret, Public, SECP256K1}; + use super::Error; + use secp256k1::{self, ecdh, key}; + use Public; + use Secret; + use SECP256K1; - /// Agree on a shared secret - pub fn agree(secret: &Secret, public: &Public) -> Result { - let context = &SECP256K1; - let pdata = { - let mut temp = [4u8; 65]; - (&mut temp[1..65]).copy_from_slice(&public[0..64]); - temp - }; + /// Agree on a shared secret + pub fn agree(secret: &Secret, public: &Public) -> Result { + let context = &SECP256K1; + let pdata = { + let mut temp = [4u8; 65]; + (&mut temp[1..65]).copy_from_slice(&public[0..64]); + temp + }; - let publ = key::PublicKey::from_slice(context, &pdata)?; - let sec = key::SecretKey::from_slice(context, &secret)?; - let shared = ecdh::SharedSecret::new_raw(context, &publ, &sec); + let publ = key::PublicKey::from_slice(context, &pdata)?; + let sec = key::SecretKey::from_slice(context, &secret)?; + let shared = ecdh::SharedSecret::new_raw(context, &publ, &sec); - Secret::from_unsafe_slice(&shared[0..32]) - .map_err(|_| Error::Secp(secp256k1::Error::InvalidSecretKey)) - } + Secret::from_unsafe_slice(&shared[0..32]) + .map_err(|_| Error::Secp(secp256k1::Error::InvalidSecretKey)) + } } /// ECIES function pub mod ecies { - use parity_crypto::{aes, digest, hmac, is_equal}; - use ethereum_types::H128; - use super::{ecdh, Error}; - use {Random, Generator, Public, Secret}; + use super::{ecdh, Error}; + use ethereum_types::H128; + use parity_crypto::{aes, digest, hmac, is_equal}; + use Generator; + use Public; + use Random; + use Secret; - /// Encrypt a message with a public key, writing an HMAC covering both - /// the plaintext and authenticated data. - /// - /// Authenticated data may be empty. - pub fn encrypt(public: &Public, auth_data: &[u8], plain: &[u8]) -> Result, Error> { - let r = Random.generate()?; - let z = ecdh::agree(r.secret(), public)?; - let mut key = [0u8; 32]; - kdf(&z, &[0u8; 0], &mut key); + /// Encrypt a message with a public key, writing an HMAC covering both + /// the plaintext and authenticated data. + /// + /// Authenticated data may be empty. + pub fn encrypt(public: &Public, auth_data: &[u8], plain: &[u8]) -> Result, Error> { + let r = Random.generate()?; + let z = ecdh::agree(r.secret(), public)?; + let mut key = [0u8; 32]; + kdf(&z, &[0u8; 0], &mut key); - let ekey = &key[0..16]; - let mkey = hmac::SigKey::sha256(&digest::sha256(&key[16..32])); + let ekey = &key[0..16]; + let mkey = hmac::SigKey::sha256(&digest::sha256(&key[16..32])); - let mut msg = vec![0u8; 1 + 64 + 16 + plain.len() + 32]; - msg[0] = 0x04u8; - { - let msgd = &mut msg[1..]; - msgd[0..64].copy_from_slice(r.public()); - let iv = H128::random(); - msgd[64..80].copy_from_slice(&iv); - { - let cipher = &mut msgd[(64 + 16)..(64 + 16 + plain.len())]; - aes::encrypt_128_ctr(ekey, &iv, plain, cipher)?; - } - let mut hmac = hmac::Signer::with(&mkey); - { - let cipher_iv = &msgd[64..(64 + 16 + plain.len())]; - hmac.update(cipher_iv); - } - hmac.update(auth_data); - let sig = hmac.sign(); - msgd[(64 + 16 + plain.len())..].copy_from_slice(&sig); - } - Ok(msg) - } + let mut msg = vec![0u8; 1 + 64 + 16 + plain.len() + 32]; + msg[0] = 0x04u8; + { + let msgd = &mut msg[1..]; + msgd[0..64].copy_from_slice(r.public()); + let iv = H128::random(); + msgd[64..80].copy_from_slice(&iv); + { + let cipher = &mut msgd[(64 + 16)..(64 + 16 + plain.len())]; + aes::encrypt_128_ctr(ekey, &iv, plain, cipher)?; + } + let mut hmac = hmac::Signer::with(&mkey); + { + let cipher_iv = &msgd[64..(64 + 16 + plain.len())]; + hmac.update(cipher_iv); + } + hmac.update(auth_data); + let sig = hmac.sign(); + msgd[(64 + 16 + plain.len())..].copy_from_slice(&sig); + } + Ok(msg) + } - /// Decrypt a message with a secret key, checking HMAC for ciphertext - /// and authenticated data validity. - pub fn decrypt(secret: &Secret, auth_data: &[u8], encrypted: &[u8]) -> Result, Error> { - let meta_len = 1 + 64 + 16 + 32; - if encrypted.len() < meta_len || encrypted[0] < 2 || encrypted[0] > 4 { - return Err(Error::InvalidMessage); //invalid message: publickey - } + /// Decrypt a message with a secret key, checking HMAC for ciphertext + /// and authenticated data validity. + pub fn decrypt(secret: &Secret, auth_data: &[u8], encrypted: &[u8]) -> Result, Error> { + let meta_len = 1 + 64 + 16 + 32; + if encrypted.len() < meta_len || encrypted[0] < 2 || encrypted[0] > 4 { + return Err(Error::InvalidMessage); //invalid message: publickey + } - let e = &encrypted[1..]; - let p = Public::from_slice(&e[0..64]); - let z = ecdh::agree(secret, &p)?; - let mut key = [0u8; 32]; - kdf(&z, &[0u8; 0], &mut key); + let e = &encrypted[1..]; + let p = Public::from_slice(&e[0..64]); + let z = ecdh::agree(secret, &p)?; + let mut key = [0u8; 32]; + kdf(&z, &[0u8; 0], &mut key); - let ekey = &key[0..16]; - let mkey = hmac::SigKey::sha256(&digest::sha256(&key[16..32])); + let ekey = &key[0..16]; + let mkey = hmac::SigKey::sha256(&digest::sha256(&key[16..32])); - let clen = encrypted.len() - meta_len; - let cipher_with_iv = &e[64..(64+16+clen)]; - let cipher_iv = &cipher_with_iv[0..16]; - let cipher_no_iv = &cipher_with_iv[16..]; - let msg_mac = &e[(64+16+clen)..]; + let clen = encrypted.len() - meta_len; + let cipher_with_iv = &e[64..(64 + 16 + clen)]; + let cipher_iv = &cipher_with_iv[0..16]; + let cipher_no_iv = &cipher_with_iv[16..]; + let msg_mac = &e[(64 + 16 + clen)..]; - // Verify tag - let mut hmac = hmac::Signer::with(&mkey); - hmac.update(cipher_with_iv); - hmac.update(auth_data); - let mac = hmac.sign(); + // Verify tag + let mut hmac = hmac::Signer::with(&mkey); + hmac.update(cipher_with_iv); + hmac.update(auth_data); + let mac = hmac.sign(); - if !is_equal(&mac.as_ref()[..], msg_mac) { - return Err(Error::InvalidMessage); - } + if !is_equal(&mac.as_ref()[..], msg_mac) { + return Err(Error::InvalidMessage); + } - let mut msg = vec![0u8; clen]; - aes::decrypt_128_ctr(ekey, cipher_iv, cipher_no_iv, &mut msg[..])?; - Ok(msg) - } + let mut msg = vec![0u8; clen]; + aes::decrypt_128_ctr(ekey, cipher_iv, cipher_no_iv, &mut msg[..])?; + Ok(msg) + } - fn kdf(secret: &Secret, s1: &[u8], dest: &mut [u8]) { - // SEC/ISO/Shoup specify counter size SHOULD be equivalent - // to size of hash output, however, it also notes that - // the 4 bytes is okay. NIST specifies 4 bytes. - let mut ctr = 1u32; - let mut written = 0usize; - while written < dest.len() { - let mut hasher = digest::Hasher::sha256(); - let ctrs = [(ctr >> 24) as u8, (ctr >> 16) as u8, (ctr >> 8) as u8, ctr as u8]; - hasher.update(&ctrs); - hasher.update(secret); - hasher.update(s1); - let d = hasher.finish(); - &mut dest[written..(written + 32)].copy_from_slice(&d); - written += 32; - ctr += 1; - } - } + fn kdf(secret: &Secret, s1: &[u8], dest: &mut [u8]) { + // SEC/ISO/Shoup specify counter size SHOULD be equivalent + // to size of hash output, however, it also notes that + // the 4 bytes is okay. NIST specifies 4 bytes. + let mut ctr = 1u32; + let mut written = 0usize; + while written < dest.len() { + let mut hasher = digest::Hasher::sha256(); + let ctrs = [ + (ctr >> 24) as u8, + (ctr >> 16) as u8, + (ctr >> 8) as u8, + ctr as u8, + ]; + hasher.update(&ctrs); + hasher.update(secret); + hasher.update(s1); + let d = hasher.finish(); + &mut dest[written..(written + 32)].copy_from_slice(&d); + written += 32; + ctr += 1; + } + } } #[cfg(test)] mod tests { - use super::ecies; - use {Random, Generator}; + use super::ecies; + use Generator; + use Random; - #[test] - fn ecies_shared() { - let kp = Random.generate().unwrap(); - let message = b"So many books, so little time"; + #[test] + fn ecies_shared() { + let kp = Random.generate().unwrap(); + let message = b"So many books, so little time"; - let shared = b"shared"; - let wrong_shared = b"incorrect"; - let encrypted = ecies::encrypt(kp.public(), shared, message).unwrap(); - assert!(encrypted[..] != message[..]); - assert_eq!(encrypted[0], 0x04); + let shared = b"shared"; + let wrong_shared = b"incorrect"; + let encrypted = ecies::encrypt(kp.public(), shared, message).unwrap(); + assert!(encrypted[..] != message[..]); + assert_eq!(encrypted[0], 0x04); - assert!(ecies::decrypt(kp.secret(), wrong_shared, &encrypted).is_err()); - let decrypted = ecies::decrypt(kp.secret(), shared, &encrypted).unwrap(); - assert_eq!(decrypted[..message.len()], message[..]); - } + assert!(ecies::decrypt(kp.secret(), wrong_shared, &encrypted).is_err()); + let decrypted = ecies::decrypt(kp.secret(), shared, &encrypted).unwrap(); + assert_eq!(decrypted[..message.len()], message[..]); + } } diff --git a/accounts/ethkey/src/error.rs b/accounts/ethkey/src/error.rs index ee1911574..f7683f610 100644 --- a/accounts/ethkey/src/error.rs +++ b/accounts/ethkey/src/error.rs @@ -14,68 +14,68 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::{fmt, error}; +use std::{error, fmt}; #[derive(Debug)] /// Crypto error pub enum Error { - /// Invalid secret key - InvalidSecret, - /// Invalid public key - InvalidPublic, - /// Invalid address - InvalidAddress, - /// Invalid EC signature - InvalidSignature, - /// Invalid AES message - InvalidMessage, - /// IO Error - Io(::std::io::Error), - /// Custom - Custom(String), + /// Invalid secret key + InvalidSecret, + /// Invalid public key + InvalidPublic, + /// Invalid address + InvalidAddress, + /// Invalid EC signature + InvalidSignature, + /// Invalid AES message + InvalidMessage, + /// IO Error + Io(::std::io::Error), + /// Custom + Custom(String), } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let msg = match *self { - Error::InvalidSecret => "Invalid secret".into(), - Error::InvalidPublic => "Invalid public".into(), - Error::InvalidAddress => "Invalid address".into(), - Error::InvalidSignature => "Invalid EC signature".into(), - Error::InvalidMessage => "Invalid AES message".into(), - Error::Io(ref err) => format!("I/O error: {}", err), - Error::Custom(ref s) => s.clone(), - }; + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let msg = match *self { + Error::InvalidSecret => "Invalid secret".into(), + Error::InvalidPublic => "Invalid public".into(), + Error::InvalidAddress => "Invalid address".into(), + Error::InvalidSignature => "Invalid EC signature".into(), + Error::InvalidMessage => "Invalid AES message".into(), + Error::Io(ref err) => format!("I/O error: {}", err), + Error::Custom(ref s) => s.clone(), + }; - f.write_fmt(format_args!("Crypto error ({})", msg)) - } + f.write_fmt(format_args!("Crypto error ({})", msg)) + } } impl error::Error for Error { - fn description(&self) -> &str { - "Crypto error" - } + fn description(&self) -> &str { + "Crypto error" + } } impl Into for Error { - fn into(self) -> String { - format!("{}", self) - } + fn into(self) -> String { + format!("{}", self) + } } impl From<::secp256k1::Error> for Error { - fn from(e: ::secp256k1::Error) -> Error { - match e { - ::secp256k1::Error::InvalidMessage => Error::InvalidMessage, - ::secp256k1::Error::InvalidPublicKey => Error::InvalidPublic, - ::secp256k1::Error::InvalidSecretKey => Error::InvalidSecret, - _ => Error::InvalidSignature, - } - } + fn from(e: ::secp256k1::Error) -> Error { + match e { + ::secp256k1::Error::InvalidMessage => Error::InvalidMessage, + ::secp256k1::Error::InvalidPublicKey => Error::InvalidPublic, + ::secp256k1::Error::InvalidSecretKey => Error::InvalidSecret, + _ => Error::InvalidSignature, + } + } } impl From<::std::io::Error> for Error { - fn from(err: ::std::io::Error) -> Error { - Error::Io(err) - } + fn from(err: ::std::io::Error) -> Error { + Error::Io(err) + } } diff --git a/accounts/ethkey/src/extended.rs b/accounts/ethkey/src/extended.rs index 401d98f2f..bc78ec9a1 100644 --- a/accounts/ethkey/src/extended.rs +++ b/accounts/ethkey/src/extended.rs @@ -16,485 +16,574 @@ //! Extended keys +pub use self::derivation::Error as DerivationError; +use ethereum_types::H256; use secret::Secret; use Public; -use ethereum_types::H256; -pub use self::derivation::Error as DerivationError; /// Represents label that can be stored as a part of key derivation pub trait Label { - /// Length of the data that label occupies - fn len() -> usize; + /// Length of the data that label occupies + fn len() -> usize; - /// Store label data to the key derivation sequence - /// Must not use more than `len()` bytes from slice - fn store(&self, target: &mut [u8]); + /// Store label data to the key derivation sequence + /// Must not use more than `len()` bytes from slice + fn store(&self, target: &mut [u8]); } impl Label for u32 { - fn len() -> usize { 4 } + fn len() -> usize { + 4 + } - fn store(&self, target: &mut [u8]) { - let bytes = self.to_be_bytes(); - target[0..4].copy_from_slice(&bytes); - } + fn store(&self, target: &mut [u8]) { + let bytes = self.to_be_bytes(); + target[0..4].copy_from_slice(&bytes); + } } /// Key derivation over generic label `T` pub enum Derivation { - /// Soft key derivation (allow proof of parent) - Soft(T), - /// Hard key derivation (does not allow proof of parent) - Hard(T), + /// Soft key derivation (allow proof of parent) + Soft(T), + /// Hard key derivation (does not allow proof of parent) + Hard(T), } impl From for Derivation { - fn from(index: u32) -> Self { - if index < (2 << 30) { - Derivation::Soft(index) - } - else { - Derivation::Hard(index) - } - } + fn from(index: u32) -> Self { + if index < (2 << 30) { + Derivation::Soft(index) + } else { + Derivation::Hard(index) + } + } } impl Label for H256 { - fn len() -> usize { 32 } + fn len() -> usize { + 32 + } - fn store(&self, target: &mut [u8]) { - self.copy_to(&mut target[0..32]); - } + fn store(&self, target: &mut [u8]) { + self.copy_to(&mut target[0..32]); + } } /// Extended secret key, allows deterministic derivation of subsequent keys. pub struct ExtendedSecret { - secret: Secret, - chain_code: H256, + secret: Secret, + chain_code: H256, } impl ExtendedSecret { - /// New extended key from given secret and chain code. - pub fn with_code(secret: Secret, chain_code: H256) -> ExtendedSecret { - ExtendedSecret { - secret: secret, - chain_code: chain_code, - } - } + /// New extended key from given secret and chain code. + pub fn with_code(secret: Secret, chain_code: H256) -> ExtendedSecret { + ExtendedSecret { + secret: secret, + chain_code: chain_code, + } + } - /// New extended key from given secret with the random chain code. - pub fn new_random(secret: Secret) -> ExtendedSecret { - ExtendedSecret::with_code(secret, H256::random()) - } + /// New extended key from given secret with the random chain code. + pub fn new_random(secret: Secret) -> ExtendedSecret { + ExtendedSecret::with_code(secret, H256::random()) + } - /// New extended key from given secret. - /// Chain code will be derived from the secret itself (in a deterministic way). - pub fn new(secret: Secret) -> ExtendedSecret { - let chain_code = derivation::chain_code(*secret); - ExtendedSecret::with_code(secret, chain_code) - } + /// New extended key from given secret. + /// Chain code will be derived from the secret itself (in a deterministic way). + pub fn new(secret: Secret) -> ExtendedSecret { + let chain_code = derivation::chain_code(*secret); + ExtendedSecret::with_code(secret, chain_code) + } - /// Derive new private key - pub fn derive(&self, index: Derivation) -> ExtendedSecret where T: Label { - let (derived_key, next_chain_code) = derivation::private(*self.secret, self.chain_code, index); + /// Derive new private key + pub fn derive(&self, index: Derivation) -> ExtendedSecret + where + T: Label, + { + let (derived_key, next_chain_code) = + derivation::private(*self.secret, self.chain_code, index); - let derived_secret = Secret::from(derived_key.0); + let derived_secret = Secret::from(derived_key.0); - ExtendedSecret::with_code(derived_secret, next_chain_code) - } + ExtendedSecret::with_code(derived_secret, next_chain_code) + } - /// Private key component of the extended key. - pub fn as_raw(&self) -> &Secret { - &self.secret - } + /// Private key component of the extended key. + pub fn as_raw(&self) -> &Secret { + &self.secret + } } /// Extended public key, allows deterministic derivation of subsequent keys. pub struct ExtendedPublic { - public: Public, - chain_code: H256, + public: Public, + chain_code: H256, } impl ExtendedPublic { - /// New extended public key from known parent and chain code - pub fn new(public: Public, chain_code: H256) -> Self { - ExtendedPublic { public: public, chain_code: chain_code } - } + /// New extended public key from known parent and chain code + pub fn new(public: Public, chain_code: H256) -> Self { + ExtendedPublic { + public: public, + chain_code: chain_code, + } + } - /// Create new extended public key from known secret - pub fn from_secret(secret: &ExtendedSecret) -> Result { - Ok( - ExtendedPublic::new( - derivation::point(**secret.as_raw())?, - secret.chain_code.clone(), - ) - ) - } + /// Create new extended public key from known secret + pub fn from_secret(secret: &ExtendedSecret) -> Result { + Ok(ExtendedPublic::new( + derivation::point(**secret.as_raw())?, + secret.chain_code.clone(), + )) + } - /// Derive new public key - /// Operation is defined only for index belongs [0..2^31) - pub fn derive(&self, index: Derivation) -> Result where T: Label { - let (derived_key, next_chain_code) = derivation::public(self.public, self.chain_code, index)?; - Ok(ExtendedPublic::new(derived_key, next_chain_code)) - } + /// Derive new public key + /// Operation is defined only for index belongs [0..2^31) + pub fn derive(&self, index: Derivation) -> Result + where + T: Label, + { + let (derived_key, next_chain_code) = + derivation::public(self.public, self.chain_code, index)?; + Ok(ExtendedPublic::new(derived_key, next_chain_code)) + } - pub fn public(&self) -> &Public { - &self.public - } + pub fn public(&self) -> &Public { + &self.public + } } pub struct ExtendedKeyPair { - secret: ExtendedSecret, - public: ExtendedPublic, + secret: ExtendedSecret, + public: ExtendedPublic, } impl ExtendedKeyPair { - pub fn new(secret: Secret) -> Self { - let extended_secret = ExtendedSecret::new(secret); - let extended_public = ExtendedPublic::from_secret(&extended_secret) - .expect("Valid `Secret` always produces valid public; qed"); - ExtendedKeyPair { - secret: extended_secret, - public: extended_public, - } - } + pub fn new(secret: Secret) -> Self { + let extended_secret = ExtendedSecret::new(secret); + let extended_public = ExtendedPublic::from_secret(&extended_secret) + .expect("Valid `Secret` always produces valid public; qed"); + ExtendedKeyPair { + secret: extended_secret, + public: extended_public, + } + } - pub fn with_code(secret: Secret, public: Public, chain_code: H256) -> Self { - ExtendedKeyPair { - secret: ExtendedSecret::with_code(secret, chain_code.clone()), - public: ExtendedPublic::new(public, chain_code), - } - } + pub fn with_code(secret: Secret, public: Public, chain_code: H256) -> Self { + ExtendedKeyPair { + secret: ExtendedSecret::with_code(secret, chain_code.clone()), + public: ExtendedPublic::new(public, chain_code), + } + } - pub fn with_secret(secret: Secret, chain_code: H256) -> Self { - let extended_secret = ExtendedSecret::with_code(secret, chain_code); - let extended_public = ExtendedPublic::from_secret(&extended_secret) - .expect("Valid `Secret` always produces valid public; qed"); - ExtendedKeyPair { - secret: extended_secret, - public: extended_public, - } - } + pub fn with_secret(secret: Secret, chain_code: H256) -> Self { + let extended_secret = ExtendedSecret::with_code(secret, chain_code); + let extended_public = ExtendedPublic::from_secret(&extended_secret) + .expect("Valid `Secret` always produces valid public; qed"); + ExtendedKeyPair { + secret: extended_secret, + public: extended_public, + } + } - pub fn with_seed(seed: &[u8]) -> Result { - let (master_key, chain_code) = derivation::seed_pair(seed); - Ok(ExtendedKeyPair::with_secret( - Secret::from_unsafe_slice(&*master_key).map_err(|_| DerivationError::InvalidSeed)?, - chain_code, - )) - } + pub fn with_seed(seed: &[u8]) -> Result { + let (master_key, chain_code) = derivation::seed_pair(seed); + Ok(ExtendedKeyPair::with_secret( + Secret::from_unsafe_slice(&*master_key).map_err(|_| DerivationError::InvalidSeed)?, + chain_code, + )) + } - pub fn secret(&self) -> &ExtendedSecret { - &self.secret - } + pub fn secret(&self) -> &ExtendedSecret { + &self.secret + } - pub fn public(&self) -> &ExtendedPublic { - &self.public - } + pub fn public(&self) -> &ExtendedPublic { + &self.public + } - pub fn derive(&self, index: Derivation) -> Result where T: Label { - let derived = self.secret.derive(index); + pub fn derive(&self, index: Derivation) -> Result + where + T: Label, + { + let derived = self.secret.derive(index); - Ok(ExtendedKeyPair { - public: ExtendedPublic::from_secret(&derived)?, - secret: derived, - }) - } + Ok(ExtendedKeyPair { + public: ExtendedPublic::from_secret(&derived)?, + secret: derived, + }) + } } // Derivation functions for private and public keys // Work is based on BIP0032 // https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki mod derivation { - use parity_crypto::hmac; - use ethereum_types::{U256, U512, H512, H256}; - use secp256k1::key::{SecretKey, PublicKey}; - use SECP256K1; - use keccak; - use math::curve_order; - use super::{Label, Derivation}; + use super::{Derivation, Label}; + use ethereum_types::{H256, H512, U256, U512}; + use keccak; + use math::curve_order; + use parity_crypto::hmac; + use secp256k1::key::{PublicKey, SecretKey}; + use SECP256K1; - #[derive(Debug)] - pub enum Error { - InvalidHardenedUse, - InvalidPoint, - MissingIndex, - InvalidSeed, - } + #[derive(Debug)] + pub enum Error { + InvalidHardenedUse, + InvalidPoint, + MissingIndex, + InvalidSeed, + } - // Deterministic derivation of the key using secp256k1 elliptic curve. - // Derivation can be either hardened or not. - // For hardened derivation, pass u32 index at least 2^31 or custom Derivation::Hard(T) enum - // - // Can panic if passed `private_key` is not a valid secp256k1 private key - // (outside of (0..curve_order()]) field - pub fn private(private_key: H256, chain_code: H256, index: Derivation) -> (H256, H256) where T: Label { - match index { - Derivation::Soft(index) => private_soft(private_key, chain_code, index), - Derivation::Hard(index) => private_hard(private_key, chain_code, index), - } - } + // Deterministic derivation of the key using secp256k1 elliptic curve. + // Derivation can be either hardened or not. + // For hardened derivation, pass u32 index at least 2^31 or custom Derivation::Hard(T) enum + // + // Can panic if passed `private_key` is not a valid secp256k1 private key + // (outside of (0..curve_order()]) field + pub fn private(private_key: H256, chain_code: H256, index: Derivation) -> (H256, H256) + where + T: Label, + { + match index { + Derivation::Soft(index) => private_soft(private_key, chain_code, index), + Derivation::Hard(index) => private_hard(private_key, chain_code, index), + } + } - fn hmac_pair(data: &[u8], private_key: H256, chain_code: H256) -> (H256, H256) { - let private: U256 = private_key.into(); + fn hmac_pair(data: &[u8], private_key: H256, chain_code: H256) -> (H256, H256) { + let private: U256 = private_key.into(); - // produces 512-bit derived hmac (I) - let skey = hmac::SigKey::sha512(&*chain_code); - let i_512 = hmac::sign(&skey, &data[..]); + // produces 512-bit derived hmac (I) + let skey = hmac::SigKey::sha512(&*chain_code); + let i_512 = hmac::sign(&skey, &data[..]); - // left most 256 bits are later added to original private key - let hmac_key: U256 = H256::from_slice(&i_512[0..32]).into(); - // right most 256 bits are new chain code for later derivations - let next_chain_code = H256::from(&i_512[32..64]); + // left most 256 bits are later added to original private key + let hmac_key: U256 = H256::from_slice(&i_512[0..32]).into(); + // right most 256 bits are new chain code for later derivations + let next_chain_code = H256::from(&i_512[32..64]); - let child_key = private_add(hmac_key, private).into(); - (child_key, next_chain_code) - } + let child_key = private_add(hmac_key, private).into(); + (child_key, next_chain_code) + } - // Can panic if passed `private_key` is not a valid secp256k1 private key - // (outside of (0..curve_order()]) field - fn private_soft(private_key: H256, chain_code: H256, index: T) -> (H256, H256) where T: Label { - let mut data = vec![0u8; 33 + T::len()]; + // Can panic if passed `private_key` is not a valid secp256k1 private key + // (outside of (0..curve_order()]) field + fn private_soft(private_key: H256, chain_code: H256, index: T) -> (H256, H256) + where + T: Label, + { + let mut data = vec![0u8; 33 + T::len()]; - let sec_private = SecretKey::from_slice(&SECP256K1, &*private_key) - .expect("Caller should provide valid private key"); - let sec_public = PublicKey::from_secret_key(&SECP256K1, &sec_private) - .expect("Caller should provide valid private key"); - let public_serialized = sec_public.serialize_vec(&SECP256K1, true); + let sec_private = SecretKey::from_slice(&SECP256K1, &*private_key) + .expect("Caller should provide valid private key"); + let sec_public = PublicKey::from_secret_key(&SECP256K1, &sec_private) + .expect("Caller should provide valid private key"); + let public_serialized = sec_public.serialize_vec(&SECP256K1, true); - // curve point (compressed public key) -- index - // 0.33 -- 33..end - data[0..33].copy_from_slice(&public_serialized); - index.store(&mut data[33..]); + // curve point (compressed public key) -- index + // 0.33 -- 33..end + data[0..33].copy_from_slice(&public_serialized); + index.store(&mut data[33..]); - hmac_pair(&data, private_key, chain_code) - } + hmac_pair(&data, private_key, chain_code) + } - // Deterministic derivation of the key using secp256k1 elliptic curve - // This is hardened derivation and does not allow to associate - // corresponding public keys of the original and derived private keys - fn private_hard(private_key: H256, chain_code: H256, index: T) -> (H256, H256) where T: Label { - let mut data: Vec = vec![0u8; 33 + T::len()]; - let private: U256 = private_key.into(); + // Deterministic derivation of the key using secp256k1 elliptic curve + // This is hardened derivation and does not allow to associate + // corresponding public keys of the original and derived private keys + fn private_hard(private_key: H256, chain_code: H256, index: T) -> (H256, H256) + where + T: Label, + { + let mut data: Vec = vec![0u8; 33 + T::len()]; + let private: U256 = private_key.into(); - // 0x00 (padding) -- private_key -- index - // 0 -- 1..33 -- 33..end - private.to_big_endian(&mut data[1..33]); - index.store(&mut data[33..(33 + T::len())]); + // 0x00 (padding) -- private_key -- index + // 0 -- 1..33 -- 33..end + private.to_big_endian(&mut data[1..33]); + index.store(&mut data[33..(33 + T::len())]); - hmac_pair(&data, private_key, chain_code) - } + hmac_pair(&data, private_key, chain_code) + } - fn private_add(k1: U256, k2: U256) -> U256 { - let sum = U512::from(k1) + U512::from(k2); - modulo(sum, curve_order()) - } + fn private_add(k1: U256, k2: U256) -> U256 { + let sum = U512::from(k1) + U512::from(k2); + modulo(sum, curve_order()) + } - // todo: surely can be optimized - fn modulo(u1: U512, u2: U256) -> U256 { - let dv = u1 / U512::from(u2); - let md = u1 - (dv * U512::from(u2)); - md.into() - } + // todo: surely can be optimized + fn modulo(u1: U512, u2: U256) -> U256 { + let dv = u1 / U512::from(u2); + let md = u1 - (dv * U512::from(u2)); + md.into() + } - pub fn public(public_key: H512, chain_code: H256, derivation: Derivation) -> Result<(H512, H256), Error> where T: Label { - let index = match derivation { - Derivation::Soft(index) => index, - Derivation::Hard(_) => { return Err(Error::InvalidHardenedUse); } - }; + pub fn public( + public_key: H512, + chain_code: H256, + derivation: Derivation, + ) -> Result<(H512, H256), Error> + where + T: Label, + { + let index = match derivation { + Derivation::Soft(index) => index, + Derivation::Hard(_) => { + return Err(Error::InvalidHardenedUse); + } + }; - let mut public_sec_raw = [0u8; 65]; - public_sec_raw[0] = 4; - public_sec_raw[1..65].copy_from_slice(&*public_key); - let public_sec = PublicKey::from_slice(&SECP256K1, &public_sec_raw).map_err(|_| Error::InvalidPoint)?; - let public_serialized = public_sec.serialize_vec(&SECP256K1, true); + let mut public_sec_raw = [0u8; 65]; + public_sec_raw[0] = 4; + public_sec_raw[1..65].copy_from_slice(&*public_key); + let public_sec = + PublicKey::from_slice(&SECP256K1, &public_sec_raw).map_err(|_| Error::InvalidPoint)?; + let public_serialized = public_sec.serialize_vec(&SECP256K1, true); - let mut data = vec![0u8; 33 + T::len()]; - // curve point (compressed public key) -- index - // 0.33 -- 33..end - data[0..33].copy_from_slice(&public_serialized); - index.store(&mut data[33..(33 + T::len())]); + let mut data = vec![0u8; 33 + T::len()]; + // curve point (compressed public key) -- index + // 0.33 -- 33..end + data[0..33].copy_from_slice(&public_serialized); + index.store(&mut data[33..(33 + T::len())]); - // HMAC512SHA produces [derived private(256); new chain code(256)] - let skey = hmac::SigKey::sha512(&*chain_code); - let i_512 = hmac::sign(&skey, &data[..]); + // HMAC512SHA produces [derived private(256); new chain code(256)] + let skey = hmac::SigKey::sha512(&*chain_code); + let i_512 = hmac::sign(&skey, &data[..]); - let new_private = H256::from(&i_512[0..32]); - let new_chain_code = H256::from(&i_512[32..64]); + let new_private = H256::from(&i_512[0..32]); + let new_chain_code = H256::from(&i_512[32..64]); - // Generated private key can (extremely rarely) be out of secp256k1 key field - if curve_order() <= new_private.clone().into() { return Err(Error::MissingIndex); } - let new_private_sec = SecretKey::from_slice(&SECP256K1, &*new_private) + // Generated private key can (extremely rarely) be out of secp256k1 key field + if curve_order() <= new_private.clone().into() { + return Err(Error::MissingIndex); + } + let new_private_sec = SecretKey::from_slice(&SECP256K1, &*new_private) .expect("Private key belongs to the field [0..CURVE_ORDER) (checked above); So initializing can never fail; qed"); - let mut new_public = PublicKey::from_secret_key(&SECP256K1, &new_private_sec) - .expect("Valid private key produces valid public key"); + let mut new_public = PublicKey::from_secret_key(&SECP256K1, &new_private_sec) + .expect("Valid private key produces valid public key"); - // Adding two points on the elliptic curves (combining two public keys) - new_public.add_assign(&SECP256K1, &public_sec) - .expect("Addition of two valid points produce valid point"); + // Adding two points on the elliptic curves (combining two public keys) + new_public + .add_assign(&SECP256K1, &public_sec) + .expect("Addition of two valid points produce valid point"); - let serialized = new_public.serialize_vec(&SECP256K1, false); + let serialized = new_public.serialize_vec(&SECP256K1, false); - Ok(( - H512::from(&serialized[1..65]), - new_chain_code, - )) - } + Ok((H512::from(&serialized[1..65]), new_chain_code)) + } - fn sha3(slc: &[u8]) -> H256 { - keccak::Keccak256::keccak256(slc).into() - } + fn sha3(slc: &[u8]) -> H256 { + keccak::Keccak256::keccak256(slc).into() + } - pub fn chain_code(secret: H256) -> H256 { - // 10,000 rounds of sha3 - let mut running_sha3 = sha3(&*secret); - for _ in 0..99999 { running_sha3 = sha3(&*running_sha3); } - running_sha3 - } + pub fn chain_code(secret: H256) -> H256 { + // 10,000 rounds of sha3 + let mut running_sha3 = sha3(&*secret); + for _ in 0..99999 { + running_sha3 = sha3(&*running_sha3); + } + running_sha3 + } - pub fn point(secret: H256) -> Result { - let sec = SecretKey::from_slice(&SECP256K1, &*secret) - .map_err(|_| Error::InvalidPoint)?; - let public_sec = PublicKey::from_secret_key(&SECP256K1, &sec) - .map_err(|_| Error::InvalidPoint)?; - let serialized = public_sec.serialize_vec(&SECP256K1, false); - Ok(H512::from(&serialized[1..65])) - } + pub fn point(secret: H256) -> Result { + let sec = SecretKey::from_slice(&SECP256K1, &*secret).map_err(|_| Error::InvalidPoint)?; + let public_sec = + PublicKey::from_secret_key(&SECP256K1, &sec).map_err(|_| Error::InvalidPoint)?; + let serialized = public_sec.serialize_vec(&SECP256K1, false); + Ok(H512::from(&serialized[1..65])) + } - pub fn seed_pair(seed: &[u8]) -> (H256, H256) { - let skey = hmac::SigKey::sha512(b"Bitcoin seed"); - let i_512 = hmac::sign(&skey, seed); + pub fn seed_pair(seed: &[u8]) -> (H256, H256) { + let skey = hmac::SigKey::sha512(b"Bitcoin seed"); + let i_512 = hmac::sign(&skey, seed); - let master_key = H256::from_slice(&i_512[0..32]); - let chain_code = H256::from_slice(&i_512[32..64]); + let master_key = H256::from_slice(&i_512[0..32]); + let chain_code = H256::from_slice(&i_512[32..64]); - (master_key, chain_code) - } + (master_key, chain_code) + } } #[cfg(test)] mod tests { - use super::{ExtendedSecret, ExtendedPublic, ExtendedKeyPair}; - use secret::Secret; - use std::str::FromStr; - use ethereum_types::{H128, H256}; - use super::{derivation, Derivation}; + use super::{derivation, Derivation, ExtendedKeyPair, ExtendedPublic, ExtendedSecret}; + use ethereum_types::{H128, H256}; + use secret::Secret; + use std::str::FromStr; - fn master_chain_basic() -> (H256, H256) { - let seed = H128::from_str("000102030405060708090a0b0c0d0e0f") - .expect("Seed should be valid H128") - .to_vec(); + fn master_chain_basic() -> (H256, H256) { + let seed = H128::from_str("000102030405060708090a0b0c0d0e0f") + .expect("Seed should be valid H128") + .to_vec(); - derivation::seed_pair(&*seed) - } + derivation::seed_pair(&*seed) + } - fn test_extended(f: F, test_private: H256) where F: Fn(ExtendedSecret) -> ExtendedSecret { - let (private_seed, chain_code) = master_chain_basic(); - let extended_secret = ExtendedSecret::with_code(Secret::from(private_seed.0), chain_code); - let derived = f(extended_secret); - assert_eq!(**derived.as_raw(), test_private); - } + fn test_extended(f: F, test_private: H256) + where + F: Fn(ExtendedSecret) -> ExtendedSecret, + { + let (private_seed, chain_code) = master_chain_basic(); + let extended_secret = ExtendedSecret::with_code(Secret::from(private_seed.0), chain_code); + let derived = f(extended_secret); + assert_eq!(**derived.as_raw(), test_private); + } - #[test] - fn smoky() { - let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); - let extended_secret = ExtendedSecret::with_code(secret.clone(), 0u64.into()); + #[test] + fn smoky() { + let secret = + Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65") + .unwrap(); + let extended_secret = ExtendedSecret::with_code(secret.clone(), 0u64.into()); - // hardened - assert_eq!(&**extended_secret.as_raw(), &*secret); - assert_eq!(&**extended_secret.derive(2147483648.into()).as_raw(), &"0927453daed47839608e414a3738dfad10aed17c459bbd9ab53f89b026c834b6".into()); - assert_eq!(&**extended_secret.derive(2147483649.into()).as_raw(), &"44238b6a29c6dcbe9b401364141ba11e2198c289a5fed243a1c11af35c19dc0f".into()); + // hardened + assert_eq!(&**extended_secret.as_raw(), &*secret); + assert_eq!( + &**extended_secret.derive(2147483648.into()).as_raw(), + &"0927453daed47839608e414a3738dfad10aed17c459bbd9ab53f89b026c834b6".into() + ); + assert_eq!( + &**extended_secret.derive(2147483649.into()).as_raw(), + &"44238b6a29c6dcbe9b401364141ba11e2198c289a5fed243a1c11af35c19dc0f".into() + ); - // normal - assert_eq!(&**extended_secret.derive(0.into()).as_raw(), &"bf6a74e3f7b36fc4c96a1e12f31abc817f9f5904f5a8fc27713163d1f0b713f6".into()); - assert_eq!(&**extended_secret.derive(1.into()).as_raw(), &"bd4fca9eb1f9c201e9448c1eecd66e302d68d4d313ce895b8c134f512205c1bc".into()); - assert_eq!(&**extended_secret.derive(2.into()).as_raw(), &"86932b542d6cab4d9c65490c7ef502d89ecc0e2a5f4852157649e3251e2a3268".into()); + // normal + assert_eq!( + &**extended_secret.derive(0.into()).as_raw(), + &"bf6a74e3f7b36fc4c96a1e12f31abc817f9f5904f5a8fc27713163d1f0b713f6".into() + ); + assert_eq!( + &**extended_secret.derive(1.into()).as_raw(), + &"bd4fca9eb1f9c201e9448c1eecd66e302d68d4d313ce895b8c134f512205c1bc".into() + ); + assert_eq!( + &**extended_secret.derive(2.into()).as_raw(), + &"86932b542d6cab4d9c65490c7ef502d89ecc0e2a5f4852157649e3251e2a3268".into() + ); - let extended_public = ExtendedPublic::from_secret(&extended_secret).expect("Extended public should be created"); - let derived_public = extended_public.derive(0.into()).expect("First derivation of public should succeed"); - assert_eq!(&*derived_public.public(), &"f7b3244c96688f92372bfd4def26dc4151529747bab9f188a4ad34e141d47bd66522ff048bc6f19a0a4429b04318b1a8796c000265b4fa200dae5f6dda92dd94".into()); + let extended_public = ExtendedPublic::from_secret(&extended_secret) + .expect("Extended public should be created"); + let derived_public = extended_public + .derive(0.into()) + .expect("First derivation of public should succeed"); + assert_eq!(&*derived_public.public(), &"f7b3244c96688f92372bfd4def26dc4151529747bab9f188a4ad34e141d47bd66522ff048bc6f19a0a4429b04318b1a8796c000265b4fa200dae5f6dda92dd94".into()); - let keypair = ExtendedKeyPair::with_secret( - Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(), - 064.into(), - ); - assert_eq!(&**keypair.derive(2147483648u32.into()).expect("Derivation of keypair should succeed").secret().as_raw(), &"edef54414c03196557cf73774bc97a645c9a1df2164ed34f0c2a78d1375a930c".into()); - } + let keypair = ExtendedKeyPair::with_secret( + Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65") + .unwrap(), + 064.into(), + ); + assert_eq!( + &**keypair + .derive(2147483648u32.into()) + .expect("Derivation of keypair should succeed") + .secret() + .as_raw(), + &"edef54414c03196557cf73774bc97a645c9a1df2164ed34f0c2a78d1375a930c".into() + ); + } - #[test] - fn h256_soft_match() { - let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); - let derivation_secret = H256::from_str("51eaf04f9dbbc1417dc97e789edd0c37ecda88bac490434e367ea81b71b7b015").unwrap(); + #[test] + fn h256_soft_match() { + let secret = + Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65") + .unwrap(); + let derivation_secret = + H256::from_str("51eaf04f9dbbc1417dc97e789edd0c37ecda88bac490434e367ea81b71b7b015") + .unwrap(); - let extended_secret = ExtendedSecret::with_code(secret.clone(), 0u64.into()); - let extended_public = ExtendedPublic::from_secret(&extended_secret).expect("Extended public should be created"); + let extended_secret = ExtendedSecret::with_code(secret.clone(), 0u64.into()); + let extended_public = ExtendedPublic::from_secret(&extended_secret) + .expect("Extended public should be created"); - let derived_secret0 = extended_secret.derive(Derivation::Soft(derivation_secret)); - let derived_public0 = extended_public.derive(Derivation::Soft(derivation_secret)).expect("First derivation of public should succeed"); + let derived_secret0 = extended_secret.derive(Derivation::Soft(derivation_secret)); + let derived_public0 = extended_public + .derive(Derivation::Soft(derivation_secret)) + .expect("First derivation of public should succeed"); - let public_from_secret0 = ExtendedPublic::from_secret(&derived_secret0).expect("Extended public should be created"); + let public_from_secret0 = ExtendedPublic::from_secret(&derived_secret0) + .expect("Extended public should be created"); - assert_eq!(public_from_secret0.public(), derived_public0.public()); - } + assert_eq!(public_from_secret0.public(), derived_public0.public()); + } - #[test] - fn h256_hard() { - let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); - let derivation_secret = H256::from_str("51eaf04f9dbbc1417dc97e789edd0c37ecda88bac490434e367ea81b71b7b015").unwrap(); - let extended_secret = ExtendedSecret::with_code(secret.clone(), 1u64.into()); + #[test] + fn h256_hard() { + let secret = + Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65") + .unwrap(); + let derivation_secret = + H256::from_str("51eaf04f9dbbc1417dc97e789edd0c37ecda88bac490434e367ea81b71b7b015") + .unwrap(); + let extended_secret = ExtendedSecret::with_code(secret.clone(), 1u64.into()); - assert_eq!(&**extended_secret.derive(Derivation::Hard(derivation_secret)).as_raw(), &"2bc2d696fb744d77ff813b4a1ef0ad64e1e5188b622c54ba917acc5ebc7c5486".into()); - } + assert_eq!( + &**extended_secret + .derive(Derivation::Hard(derivation_secret)) + .as_raw(), + &"2bc2d696fb744d77ff813b4a1ef0ad64e1e5188b622c54ba917acc5ebc7c5486".into() + ); + } - #[test] - fn match_() { - let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); - let extended_secret = ExtendedSecret::with_code(secret.clone(), 1.into()); - let extended_public = ExtendedPublic::from_secret(&extended_secret).expect("Extended public should be created"); + #[test] + fn match_() { + let secret = + Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65") + .unwrap(); + let extended_secret = ExtendedSecret::with_code(secret.clone(), 1.into()); + let extended_public = ExtendedPublic::from_secret(&extended_secret) + .expect("Extended public should be created"); - let derived_secret0 = extended_secret.derive(0.into()); - let derived_public0 = extended_public.derive(0.into()).expect("First derivation of public should succeed"); + let derived_secret0 = extended_secret.derive(0.into()); + let derived_public0 = extended_public + .derive(0.into()) + .expect("First derivation of public should succeed"); - let public_from_secret0 = ExtendedPublic::from_secret(&derived_secret0).expect("Extended public should be created"); + let public_from_secret0 = ExtendedPublic::from_secret(&derived_secret0) + .expect("Extended public should be created"); - assert_eq!(public_from_secret0.public(), derived_public0.public()); - } + assert_eq!(public_from_secret0.public(), derived_public0.public()); + } - #[test] - fn test_seeds() { - let seed = H128::from_str("000102030405060708090a0b0c0d0e0f") - .expect("Seed should be valid H128") - .to_vec(); + #[test] + fn test_seeds() { + let seed = H128::from_str("000102030405060708090a0b0c0d0e0f") + .expect("Seed should be valid H128") + .to_vec(); - // private key from bitcoin test vector - // xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs - let test_private = H256::from_str("e8f32e723decf4051aefac8e2c93c9c5b214313817cdb01a1494b917c8436b35") - .expect("Private should be decoded ok"); + // private key from bitcoin test vector + // xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs + let test_private = + H256::from_str("e8f32e723decf4051aefac8e2c93c9c5b214313817cdb01a1494b917c8436b35") + .expect("Private should be decoded ok"); - let (private_seed, _) = derivation::seed_pair(&*seed); + let (private_seed, _) = derivation::seed_pair(&*seed); - assert_eq!(private_seed, test_private); - } + assert_eq!(private_seed, test_private); + } - #[test] - fn test_vector_1() { - // xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7 - // H(0) - test_extended( - |secret| secret.derive(2147483648.into()), - H256::from_str("edb2e14f9ee77d26dd93b4ecede8d16ed408ce149b6cd80b0715a2d911a0afea") - .expect("Private should be decoded ok") - ); - } + #[test] + fn test_vector_1() { + // xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7 + // H(0) + test_extended( + |secret| secret.derive(2147483648.into()), + H256::from_str("edb2e14f9ee77d26dd93b4ecede8d16ed408ce149b6cd80b0715a2d911a0afea") + .expect("Private should be decoded ok"), + ); + } - #[test] - fn test_vector_2() { - // xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs - // H(0)/1 - test_extended( - |secret| secret.derive(2147483648.into()).derive(1.into()), - H256::from_str("3c6cb8d0f6a264c91ea8b5030fadaa8e538b020f0a387421a12de9319dc93368") - .expect("Private should be decoded ok") - ); - } + #[test] + fn test_vector_2() { + // xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs + // H(0)/1 + test_extended( + |secret| secret.derive(2147483648.into()).derive(1.into()), + H256::from_str("3c6cb8d0f6a264c91ea8b5030fadaa8e538b020f0a387421a12de9319dc93368") + .expect("Private should be decoded ok"), + ); + } } diff --git a/accounts/ethkey/src/keccak.rs b/accounts/ethkey/src/keccak.rs index 202c21193..5a1e1e0ad 100644 --- a/accounts/ethkey/src/keccak.rs +++ b/accounts/ethkey/src/keccak.rs @@ -17,15 +17,17 @@ use tiny_keccak::Keccak; pub trait Keccak256 { - fn keccak256(&self) -> T where T: Sized; + fn keccak256(&self) -> T + where + T: Sized; } impl Keccak256<[u8; 32]> for [u8] { - fn keccak256(&self) -> [u8; 32] { - let mut keccak = Keccak::new_keccak256(); - let mut result = [0u8; 32]; - keccak.update(self); - keccak.finalize(&mut result); - result - } + fn keccak256(&self) -> [u8; 32] { + let mut keccak = Keccak::new_keccak256(); + let mut result = [0u8; 32]; + keccak.update(self); + keccak.finalize(&mut result); + result + } } diff --git a/accounts/ethkey/src/keypair.rs b/accounts/ethkey/src/keypair.rs index 2919f0cfb..eca1ed862 100644 --- a/accounts/ethkey/src/keypair.rs +++ b/accounts/ethkey/src/keypair.rs @@ -14,102 +14,107 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::fmt; -use secp256k1::key; -use rustc_hex::ToHex; +use super::{Address, Error, Public, Secret, SECP256K1}; use keccak::Keccak256; -use super::{Secret, Public, Address, SECP256K1, Error}; +use rustc_hex::ToHex; +use secp256k1::key; +use std::fmt; pub fn public_to_address(public: &Public) -> Address { - let hash = public.keccak256(); - let mut result = Address::default(); - result.copy_from_slice(&hash[12..]); - result + let hash = public.keccak256(); + let mut result = Address::default(); + result.copy_from_slice(&hash[12..]); + result } #[derive(Debug, Clone, PartialEq)] /// secp256k1 key pair pub struct KeyPair { - secret: Secret, - public: Public, + secret: Secret, + public: Public, } impl fmt::Display for KeyPair { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - writeln!(f, "secret: {}", self.secret.to_hex())?; - writeln!(f, "public: {}", self.public.to_hex())?; - write!(f, "address: {}", self.address().to_hex()) - } + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + writeln!(f, "secret: {}", self.secret.to_hex())?; + writeln!(f, "public: {}", self.public.to_hex())?; + write!(f, "address: {}", self.address().to_hex()) + } } impl KeyPair { - /// Create a pair from secret key - pub fn from_secret(secret: Secret) -> Result { - let context = &SECP256K1; - let s: key::SecretKey = key::SecretKey::from_slice(context, &secret[..])?; - let pub_key = key::PublicKey::from_secret_key(context, &s)?; - let serialized = pub_key.serialize_vec(context, false); + /// Create a pair from secret key + pub fn from_secret(secret: Secret) -> Result { + let context = &SECP256K1; + let s: key::SecretKey = key::SecretKey::from_slice(context, &secret[..])?; + let pub_key = key::PublicKey::from_secret_key(context, &s)?; + let serialized = pub_key.serialize_vec(context, false); - let mut public = Public::default(); - public.copy_from_slice(&serialized[1..65]); + let mut public = Public::default(); + public.copy_from_slice(&serialized[1..65]); - let keypair = KeyPair { - secret: secret, - public: public, - }; + let keypair = KeyPair { + secret: secret, + public: public, + }; - Ok(keypair) - } + Ok(keypair) + } - pub fn from_secret_slice(slice: &[u8]) -> Result { - Self::from_secret(Secret::from_unsafe_slice(slice)?) - } + pub fn from_secret_slice(slice: &[u8]) -> Result { + Self::from_secret(Secret::from_unsafe_slice(slice)?) + } - pub fn from_keypair(sec: key::SecretKey, publ: key::PublicKey) -> Self { - let context = &SECP256K1; - let serialized = publ.serialize_vec(context, false); - let secret = Secret::from(sec); - let mut public = Public::default(); - public.copy_from_slice(&serialized[1..65]); + pub fn from_keypair(sec: key::SecretKey, publ: key::PublicKey) -> Self { + let context = &SECP256K1; + let serialized = publ.serialize_vec(context, false); + let secret = Secret::from(sec); + let mut public = Public::default(); + public.copy_from_slice(&serialized[1..65]); - KeyPair { - secret: secret, - public: public, - } - } + KeyPair { + secret: secret, + public: public, + } + } - pub fn secret(&self) -> &Secret { - &self.secret - } + pub fn secret(&self) -> &Secret { + &self.secret + } - pub fn public(&self) -> &Public { - &self.public - } + pub fn public(&self) -> &Public { + &self.public + } - pub fn address(&self) -> Address { - public_to_address(&self.public) - } + pub fn address(&self) -> Address { + public_to_address(&self.public) + } } #[cfg(test)] mod tests { - use std::str::FromStr; - use {KeyPair, Secret}; + use std::str::FromStr; + use KeyPair; + use Secret; - #[test] - fn from_secret() { - let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); - let _ = KeyPair::from_secret(secret).unwrap(); - } + #[test] + fn from_secret() { + let secret = + Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65") + .unwrap(); + let _ = KeyPair::from_secret(secret).unwrap(); + } - #[test] - fn keypair_display() { - let expected = + #[test] + fn keypair_display() { + let expected = "secret: a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65 public: 8ce0db0b0359ffc5866ba61903cc2518c3675ef2cf380a7e54bde7ea20e6fa1ab45b7617346cd11b7610001ee6ae5b0155c41cad9527cbcdff44ec67848943a4 address: 5b073e9233944b5e729e46d618f0d8edf3d9c34a".to_owned(); - let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); - let kp = KeyPair::from_secret(secret).unwrap(); - assert_eq!(format!("{}", kp), expected); - } + let secret = + Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65") + .unwrap(); + let kp = KeyPair::from_secret(secret).unwrap(); + assert_eq!(format!("{}", kp), expected); + } } diff --git a/accounts/ethkey/src/lib.rs b/accounts/ethkey/src/lib.rs index 2a1968bbe..081ba1ea5 100644 --- a/accounts/ethkey/src/lib.rs +++ b/accounts/ethkey/src/lib.rs @@ -17,9 +17,9 @@ // #![warn(missing_docs)] extern crate edit_distance; -extern crate parity_crypto; extern crate ethereum_types; extern crate memzero; +extern crate parity_crypto; extern crate parity_wordlist; #[macro_use] extern crate quick_error; @@ -39,31 +39,33 @@ extern crate serde_derive; mod brain; mod brain_prefix; mod error; -mod keypair; +mod extended; mod keccak; +mod keypair; mod password; mod prefix; mod random; -mod signature; mod secret; -mod extended; +mod signature; pub mod brain_recover; pub mod crypto; pub mod math; -pub use self::parity_wordlist::Error as WordlistError; -pub use self::brain::Brain; -pub use self::brain_prefix::BrainPrefix; -pub use self::error::Error; -pub use self::keypair::{KeyPair, public_to_address}; -pub use self::math::public_is_valid; -pub use self::password::Password; -pub use self::prefix::Prefix; -pub use self::random::Random; -pub use self::signature::{sign, verify_public, verify_address, recover, Signature}; -pub use self::secret::Secret; -pub use self::extended::{ExtendedPublic, ExtendedSecret, ExtendedKeyPair, DerivationError, Derivation}; +pub use self::{ + brain::Brain, + brain_prefix::BrainPrefix, + error::Error, + extended::{Derivation, DerivationError, ExtendedKeyPair, ExtendedPublic, ExtendedSecret}, + keypair::{public_to_address, KeyPair}, + math::public_is_valid, + parity_wordlist::Error as WordlistError, + password::Password, + prefix::Prefix, + random::Random, + secret::Secret, + signature::{recover, sign, verify_address, verify_public, Signature}, +}; use ethereum_types::H256; @@ -71,7 +73,7 @@ pub use ethereum_types::{Address, Public}; pub type Message = H256; lazy_static! { - pub static ref SECP256K1: secp256k1::Secp256k1 = secp256k1::Secp256k1::new(); + pub static ref SECP256K1: secp256k1::Secp256k1 = secp256k1::Secp256k1::new(); } /// Uninstantiatable error type for infallible generators. @@ -80,8 +82,8 @@ pub enum Void {} /// Generates new keypair. pub trait Generator { - type Error; + type Error; - /// Should be called to generate new keypair. - fn generate(&mut self) -> Result; + /// Should be called to generate new keypair. + fn generate(&mut self) -> Result; } diff --git a/accounts/ethkey/src/math.rs b/accounts/ethkey/src/math.rs index 8c3fe650d..9f226972d 100644 --- a/accounts/ethkey/src/math.rs +++ b/accounts/ethkey/src/math.rs @@ -14,116 +14,121 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use super::{SECP256K1, Public, Secret, Error}; -use secp256k1::key; -use secp256k1::constants::{GENERATOR_X, GENERATOR_Y, CURVE_ORDER}; -use ethereum_types::{U256, H256}; +use super::{Error, Public, Secret, SECP256K1}; +use ethereum_types::{H256, U256}; +use secp256k1::{ + constants::{CURVE_ORDER, GENERATOR_X, GENERATOR_Y}, + key, +}; /// Whether the public key is valid. pub fn public_is_valid(public: &Public) -> bool { - to_secp256k1_public(public).ok() - .map_or(false, |p| p.is_valid()) + to_secp256k1_public(public) + .ok() + .map_or(false, |p| p.is_valid()) } /// Inplace multiply public key by secret key (EC point * scalar) pub fn public_mul_secret(public: &mut Public, secret: &Secret) -> Result<(), Error> { - let key_secret = secret.to_secp256k1_secret()?; - let mut key_public = to_secp256k1_public(public)?; - key_public.mul_assign(&SECP256K1, &key_secret)?; - set_public(public, &key_public); - Ok(()) + let key_secret = secret.to_secp256k1_secret()?; + let mut key_public = to_secp256k1_public(public)?; + key_public.mul_assign(&SECP256K1, &key_secret)?; + set_public(public, &key_public); + Ok(()) } /// Inplace add one public key to another (EC point + EC point) pub fn public_add(public: &mut Public, other: &Public) -> Result<(), Error> { - let mut key_public = to_secp256k1_public(public)?; - let other_public = to_secp256k1_public(other)?; - key_public.add_assign(&SECP256K1, &other_public)?; - set_public(public, &key_public); - Ok(()) + let mut key_public = to_secp256k1_public(public)?; + let other_public = to_secp256k1_public(other)?; + key_public.add_assign(&SECP256K1, &other_public)?; + set_public(public, &key_public); + Ok(()) } /// Inplace sub one public key from another (EC point - EC point) pub fn public_sub(public: &mut Public, other: &Public) -> Result<(), Error> { - let mut key_neg_other = to_secp256k1_public(other)?; - key_neg_other.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; + let mut key_neg_other = to_secp256k1_public(other)?; + key_neg_other.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; - let mut key_public = to_secp256k1_public(public)?; - key_public.add_assign(&SECP256K1, &key_neg_other)?; - set_public(public, &key_public); - Ok(()) + let mut key_public = to_secp256k1_public(public)?; + key_public.add_assign(&SECP256K1, &key_neg_other)?; + set_public(public, &key_public); + Ok(()) } /// Replace public key with its negation (EC point = - EC point) pub fn public_negate(public: &mut Public) -> Result<(), Error> { - let mut key_public = to_secp256k1_public(public)?; - key_public.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; - set_public(public, &key_public); - Ok(()) + let mut key_public = to_secp256k1_public(public)?; + key_public.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; + set_public(public, &key_public); + Ok(()) } /// Return base point of secp256k1 pub fn generation_point() -> Public { - let mut public_sec_raw = [0u8; 65]; - public_sec_raw[0] = 4; - public_sec_raw[1..33].copy_from_slice(&GENERATOR_X); - public_sec_raw[33..65].copy_from_slice(&GENERATOR_Y); + let mut public_sec_raw = [0u8; 65]; + public_sec_raw[0] = 4; + public_sec_raw[1..33].copy_from_slice(&GENERATOR_X); + public_sec_raw[33..65].copy_from_slice(&GENERATOR_Y); - let public_key = key::PublicKey::from_slice(&SECP256K1, &public_sec_raw) - .expect("constructing using predefined constants; qed"); - let mut public = Public::default(); - set_public(&mut public, &public_key); - public + let public_key = key::PublicKey::from_slice(&SECP256K1, &public_sec_raw) + .expect("constructing using predefined constants; qed"); + let mut public = Public::default(); + set_public(&mut public, &public_key); + public } /// Return secp256k1 elliptic curve order pub fn curve_order() -> U256 { - H256::from_slice(&CURVE_ORDER).into() + H256::from_slice(&CURVE_ORDER).into() } fn to_secp256k1_public(public: &Public) -> Result { - let public_data = { - let mut temp = [4u8; 65]; - (&mut temp[1..65]).copy_from_slice(&public[0..64]); - temp - }; + let public_data = { + let mut temp = [4u8; 65]; + (&mut temp[1..65]).copy_from_slice(&public[0..64]); + temp + }; - Ok(key::PublicKey::from_slice(&SECP256K1, &public_data)?) + Ok(key::PublicKey::from_slice(&SECP256K1, &public_data)?) } fn set_public(public: &mut Public, key_public: &key::PublicKey) { - let key_public_serialized = key_public.serialize_vec(&SECP256K1, false); - public.copy_from_slice(&key_public_serialized[1..65]); + let key_public_serialized = key_public.serialize_vec(&SECP256K1, false); + public.copy_from_slice(&key_public_serialized[1..65]); } #[cfg(test)] mod tests { - use super::super::{Random, Generator}; - use super::{public_add, public_sub}; + use super::{ + super::{Generator, Random}, + public_add, public_sub, + }; - #[test] - fn public_addition_is_commutative() { - let public1 = Random.generate().unwrap().public().clone(); - let public2 = Random.generate().unwrap().public().clone(); + #[test] + fn public_addition_is_commutative() { + let public1 = Random.generate().unwrap().public().clone(); + let public2 = Random.generate().unwrap().public().clone(); - let mut left = public1.clone(); - public_add(&mut left, &public2).unwrap(); + let mut left = public1.clone(); + public_add(&mut left, &public2).unwrap(); - let mut right = public2.clone(); - public_add(&mut right, &public1).unwrap(); + let mut right = public2.clone(); + public_add(&mut right, &public1).unwrap(); - assert_eq!(left, right); - } + assert_eq!(left, right); + } - #[test] - fn public_addition_is_reversible_with_subtraction() { - let public1 = Random.generate().unwrap().public().clone(); - let public2 = Random.generate().unwrap().public().clone(); + #[test] + fn public_addition_is_reversible_with_subtraction() { + let public1 = Random.generate().unwrap().public().clone(); + let public2 = Random.generate().unwrap().public().clone(); - let mut sum = public1.clone(); - public_add(&mut sum, &public2).unwrap(); - public_sub(&mut sum, &public2).unwrap(); + let mut sum = public1.clone(); + public_add(&mut sum, &public2).unwrap(); + public_sub(&mut sum, &public2).unwrap(); - assert_eq!(sum, public1); - } + assert_eq!(sum, public1); + } } diff --git a/accounts/ethkey/src/password.rs b/accounts/ethkey/src/password.rs index 6ad665e39..48359cbec 100644 --- a/accounts/ethkey/src/password.rs +++ b/accounts/ethkey/src/password.rs @@ -20,40 +20,40 @@ use std::{fmt, ptr}; pub struct Password(String); impl fmt::Debug for Password { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Password(******)") } } impl Password { - pub fn as_bytes(&self) -> &[u8] { - self.0.as_bytes() - } + pub fn as_bytes(&self) -> &[u8] { + self.0.as_bytes() + } - pub fn as_str(&self) -> &str { - self.0.as_str() - } + pub fn as_str(&self) -> &str { + self.0.as_str() + } } // Custom drop impl to zero out memory. impl Drop for Password { - fn drop(&mut self) { - unsafe { - for byte_ref in self.0.as_mut_vec() { - ptr::write_volatile(byte_ref, 0) - } - } - } + fn drop(&mut self) { + unsafe { + for byte_ref in self.0.as_mut_vec() { + ptr::write_volatile(byte_ref, 0) + } + } + } } impl From for Password { - fn from(s: String) -> Password { - Password(s) - } + fn from(s: String) -> Password { + Password(s) + } } impl<'a> From<&'a str> for Password { - fn from(s: &'a str) -> Password { - Password::from(String::from(s)) - } + fn from(s: &'a str) -> Password { + Password::from(String::from(s)) + } } diff --git a/accounts/ethkey/src/prefix.rs b/accounts/ethkey/src/prefix.rs index 6695e93c5..0180d4613 100644 --- a/accounts/ethkey/src/prefix.rs +++ b/accounts/ethkey/src/prefix.rs @@ -14,46 +14,49 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use super::{Random, Generator, KeyPair, Error}; +use super::{Error, Generator, KeyPair, Random}; /// Tries to find keypair with address starting with given prefix. pub struct Prefix { - prefix: Vec, - iterations: usize, + prefix: Vec, + iterations: usize, } impl Prefix { - pub fn new(prefix: Vec, iterations: usize) -> Self { - Prefix { - prefix: prefix, - iterations: iterations, - } - } + pub fn new(prefix: Vec, iterations: usize) -> Self { + Prefix { + prefix: prefix, + iterations: iterations, + } + } } impl Generator for Prefix { - type Error = Error; + type Error = Error; - fn generate(&mut self) -> Result { - for _ in 0..self.iterations { - let keypair = Random.generate()?; - if keypair.address().starts_with(&self.prefix) { - return Ok(keypair) - } - } + fn generate(&mut self) -> Result { + for _ in 0..self.iterations { + let keypair = Random.generate()?; + if keypair.address().starts_with(&self.prefix) { + return Ok(keypair); + } + } - Err(Error::Custom("Could not find keypair".into())) - } + Err(Error::Custom("Could not find keypair".into())) + } } #[cfg(test)] mod tests { - use {Generator, Prefix}; + use Generator; + use Prefix; - #[test] - fn prefix_generator() { - let prefix = vec![0xffu8]; - let keypair = Prefix::new(prefix.clone(), usize::max_value()).generate().unwrap(); - assert!(keypair.address().starts_with(&prefix)); - } + #[test] + fn prefix_generator() { + let prefix = vec![0xffu8]; + let keypair = Prefix::new(prefix.clone(), usize::max_value()) + .generate() + .unwrap(); + assert!(keypair.address().starts_with(&prefix)); + } } diff --git a/accounts/ethkey/src/random.rs b/accounts/ethkey/src/random.rs index 1966cb361..fe1d3c3dd 100644 --- a/accounts/ethkey/src/random.rs +++ b/accounts/ethkey/src/random.rs @@ -14,31 +14,32 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use rand::os::OsRng; use super::{Generator, KeyPair, SECP256K1}; +use rand::os::OsRng; /// Randomly generates new keypair, instantiating the RNG each time. pub struct Random; impl Generator for Random { - type Error = ::std::io::Error; + type Error = ::std::io::Error; - fn generate(&mut self) -> Result { - let mut rng = OsRng::new()?; - match rng.generate() { - Ok(pair) => Ok(pair), - Err(void) => match void {}, // LLVM unreachable - } - } + fn generate(&mut self) -> Result { + let mut rng = OsRng::new()?; + match rng.generate() { + Ok(pair) => Ok(pair), + Err(void) => match void {}, // LLVM unreachable + } + } } impl Generator for OsRng { - type Error = ::Void; + type Error = ::Void; - fn generate(&mut self) -> Result { - let (sec, publ) = SECP256K1.generate_keypair(self) - .expect("context always created with full capabilities; qed"); + fn generate(&mut self) -> Result { + let (sec, publ) = SECP256K1 + .generate_keypair(self) + .expect("context always created with full capabilities; qed"); - Ok(KeyPair::from_keypair(sec, publ)) - } + Ok(KeyPair::from_keypair(sec, publ)) + } } diff --git a/accounts/ethkey/src/secret.rs b/accounts/ethkey/src/secret.rs index 84e849cab..8fa9c7b21 100644 --- a/accounts/ethkey/src/secret.rs +++ b/accounts/ethkey/src/secret.rs @@ -14,285 +14,309 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::fmt; -use std::ops::Deref; -use std::str::FromStr; -use rustc_hex::ToHex; -use secp256k1::constants::{SECRET_KEY_SIZE as SECP256K1_SECRET_KEY_SIZE}; -use secp256k1::key; use ethereum_types::H256; use memzero::Memzero; -use {Error, SECP256K1}; +use rustc_hex::ToHex; +use secp256k1::{constants::SECRET_KEY_SIZE as SECP256K1_SECRET_KEY_SIZE, key}; +use std::{fmt, ops::Deref, str::FromStr}; +use Error; +use SECP256K1; #[derive(Clone, PartialEq, Eq)] pub struct Secret { - inner: Memzero, + inner: Memzero, } impl ToHex for Secret { - fn to_hex(&self) -> String { - format!("{:x}", *self.inner) - } + fn to_hex(&self) -> String { + format!("{:x}", *self.inner) + } } impl fmt::LowerHex for Secret { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(fmt) - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(fmt) + } } impl fmt::Debug for Secret { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(fmt) - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(fmt) + } } impl fmt::Display for Secret { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "Secret: 0x{:x}{:x}..{:x}{:x}", self.inner[0], self.inner[1], self.inner[30], self.inner[31]) - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!( + fmt, + "Secret: 0x{:x}{:x}..{:x}{:x}", + self.inner[0], self.inner[1], self.inner[30], self.inner[31] + ) + } } impl Secret { - /// Creates a `Secret` from the given slice, returning `None` if the slice length != 32. - pub fn from_slice(key: &[u8]) -> Option { - if key.len() != 32 { - return None - } - let mut h = H256::default(); - h.copy_from_slice(&key[0..32]); - Some(Secret { inner: Memzero::from(h) }) - } + /// Creates a `Secret` from the given slice, returning `None` if the slice length != 32. + pub fn from_slice(key: &[u8]) -> Option { + if key.len() != 32 { + return None; + } + let mut h = H256::default(); + h.copy_from_slice(&key[0..32]); + Some(Secret { + inner: Memzero::from(h), + }) + } - /// Creates zero key, which is invalid for crypto operations, but valid for math operation. - pub fn zero() -> Self { - Secret { inner: Memzero::from(H256::default()) } - } + /// Creates zero key, which is invalid for crypto operations, but valid for math operation. + pub fn zero() -> Self { + Secret { + inner: Memzero::from(H256::default()), + } + } - /// Imports and validates the key. - pub fn from_unsafe_slice(key: &[u8]) -> Result { - let secret = key::SecretKey::from_slice(&super::SECP256K1, key)?; - Ok(secret.into()) - } + /// Imports and validates the key. + pub fn from_unsafe_slice(key: &[u8]) -> Result { + let secret = key::SecretKey::from_slice(&super::SECP256K1, key)?; + Ok(secret.into()) + } - /// Checks validity of this key. - pub fn check_validity(&self) -> Result<(), Error> { - self.to_secp256k1_secret().map(|_| ()) - } + /// Checks validity of this key. + pub fn check_validity(&self) -> Result<(), Error> { + self.to_secp256k1_secret().map(|_| ()) + } - /// Inplace add one secret key to another (scalar + scalar) - pub fn add(&mut self, other: &Secret) -> Result<(), Error> { - match (self.is_zero(), other.is_zero()) { - (true, true) | (false, true) => Ok(()), - (true, false) => { - *self = other.clone(); - Ok(()) - }, - (false, false) => { - let mut key_secret = self.to_secp256k1_secret()?; - let other_secret = other.to_secp256k1_secret()?; - key_secret.add_assign(&SECP256K1, &other_secret)?; + /// Inplace add one secret key to another (scalar + scalar) + pub fn add(&mut self, other: &Secret) -> Result<(), Error> { + match (self.is_zero(), other.is_zero()) { + (true, true) | (false, true) => Ok(()), + (true, false) => { + *self = other.clone(); + Ok(()) + } + (false, false) => { + let mut key_secret = self.to_secp256k1_secret()?; + let other_secret = other.to_secp256k1_secret()?; + key_secret.add_assign(&SECP256K1, &other_secret)?; - *self = key_secret.into(); - Ok(()) - }, - } - } + *self = key_secret.into(); + Ok(()) + } + } + } - /// Inplace subtract one secret key from another (scalar - scalar) - pub fn sub(&mut self, other: &Secret) -> Result<(), Error> { - match (self.is_zero(), other.is_zero()) { - (true, true) | (false, true) => Ok(()), - (true, false) => { - *self = other.clone(); - self.neg() - }, - (false, false) => { - let mut key_secret = self.to_secp256k1_secret()?; - let mut other_secret = other.to_secp256k1_secret()?; - other_secret.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; - key_secret.add_assign(&SECP256K1, &other_secret)?; + /// Inplace subtract one secret key from another (scalar - scalar) + pub fn sub(&mut self, other: &Secret) -> Result<(), Error> { + match (self.is_zero(), other.is_zero()) { + (true, true) | (false, true) => Ok(()), + (true, false) => { + *self = other.clone(); + self.neg() + } + (false, false) => { + let mut key_secret = self.to_secp256k1_secret()?; + let mut other_secret = other.to_secp256k1_secret()?; + other_secret.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; + key_secret.add_assign(&SECP256K1, &other_secret)?; - *self = key_secret.into(); - Ok(()) - }, - } - } + *self = key_secret.into(); + Ok(()) + } + } + } - /// Inplace decrease secret key (scalar - 1) - pub fn dec(&mut self) -> Result<(), Error> { - match self.is_zero() { - true => { - *self = key::MINUS_ONE_KEY.into(); - Ok(()) - }, - false => { - let mut key_secret = self.to_secp256k1_secret()?; - key_secret.add_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; + /// Inplace decrease secret key (scalar - 1) + pub fn dec(&mut self) -> Result<(), Error> { + match self.is_zero() { + true => { + *self = key::MINUS_ONE_KEY.into(); + Ok(()) + } + false => { + let mut key_secret = self.to_secp256k1_secret()?; + key_secret.add_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; - *self = key_secret.into(); - Ok(()) - }, - } - } + *self = key_secret.into(); + Ok(()) + } + } + } - /// Inplace multiply one secret key to another (scalar * scalar) - pub fn mul(&mut self, other: &Secret) -> Result<(), Error> { - match (self.is_zero(), other.is_zero()) { - (true, true) | (true, false) => Ok(()), - (false, true) => { - *self = Self::zero(); - Ok(()) - }, - (false, false) => { - let mut key_secret = self.to_secp256k1_secret()?; - let other_secret = other.to_secp256k1_secret()?; - key_secret.mul_assign(&SECP256K1, &other_secret)?; + /// Inplace multiply one secret key to another (scalar * scalar) + pub fn mul(&mut self, other: &Secret) -> Result<(), Error> { + match (self.is_zero(), other.is_zero()) { + (true, true) | (true, false) => Ok(()), + (false, true) => { + *self = Self::zero(); + Ok(()) + } + (false, false) => { + let mut key_secret = self.to_secp256k1_secret()?; + let other_secret = other.to_secp256k1_secret()?; + key_secret.mul_assign(&SECP256K1, &other_secret)?; - *self = key_secret.into(); - Ok(()) - }, - } - } + *self = key_secret.into(); + Ok(()) + } + } + } - /// Inplace negate secret key (-scalar) - pub fn neg(&mut self) -> Result<(), Error> { - match self.is_zero() { - true => Ok(()), - false => { - let mut key_secret = self.to_secp256k1_secret()?; - key_secret.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; + /// Inplace negate secret key (-scalar) + pub fn neg(&mut self) -> Result<(), Error> { + match self.is_zero() { + true => Ok(()), + false => { + let mut key_secret = self.to_secp256k1_secret()?; + key_secret.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; - *self = key_secret.into(); - Ok(()) - }, - } - } + *self = key_secret.into(); + Ok(()) + } + } + } - /// Inplace inverse secret key (1 / scalar) - pub fn inv(&mut self) -> Result<(), Error> { - let mut key_secret = self.to_secp256k1_secret()?; - key_secret.inv_assign(&SECP256K1)?; + /// Inplace inverse secret key (1 / scalar) + pub fn inv(&mut self) -> Result<(), Error> { + let mut key_secret = self.to_secp256k1_secret()?; + key_secret.inv_assign(&SECP256K1)?; - *self = key_secret.into(); - Ok(()) - } + *self = key_secret.into(); + Ok(()) + } - /// Compute power of secret key inplace (secret ^ pow). - /// This function is not intended to be used with large powers. - pub fn pow(&mut self, pow: usize) -> Result<(), Error> { - if self.is_zero() { - return Ok(()); - } + /// Compute power of secret key inplace (secret ^ pow). + /// This function is not intended to be used with large powers. + pub fn pow(&mut self, pow: usize) -> Result<(), Error> { + if self.is_zero() { + return Ok(()); + } - match pow { - 0 => *self = key::ONE_KEY.into(), - 1 => (), - _ => { - let c = self.clone(); - for _ in 1..pow { - self.mul(&c)?; - } - }, - } + match pow { + 0 => *self = key::ONE_KEY.into(), + 1 => (), + _ => { + let c = self.clone(); + for _ in 1..pow { + self.mul(&c)?; + } + } + } - Ok(()) - } + Ok(()) + } - /// Create `secp256k1::key::SecretKey` based on this secret - pub fn to_secp256k1_secret(&self) -> Result { - Ok(key::SecretKey::from_slice(&SECP256K1, &self[..])?) - } + /// Create `secp256k1::key::SecretKey` based on this secret + pub fn to_secp256k1_secret(&self) -> Result { + Ok(key::SecretKey::from_slice(&SECP256K1, &self[..])?) + } } impl FromStr for Secret { - type Err = Error; - fn from_str(s: &str) -> Result { - Ok(H256::from_str(s).map_err(|e| Error::Custom(format!("{:?}", e)))?.into()) - } + type Err = Error; + fn from_str(s: &str) -> Result { + Ok(H256::from_str(s) + .map_err(|e| Error::Custom(format!("{:?}", e)))? + .into()) + } } impl From<[u8; 32]> for Secret { - fn from(k: [u8; 32]) -> Self { - Secret { inner: Memzero::from(H256(k)) } - } + fn from(k: [u8; 32]) -> Self { + Secret { + inner: Memzero::from(H256(k)), + } + } } impl From for Secret { - fn from(s: H256) -> Self { - s.0.into() - } + fn from(s: H256) -> Self { + s.0.into() + } } impl From<&'static str> for Secret { - fn from(s: &'static str) -> Self { - s.parse().expect(&format!("invalid string literal for {}: '{}'", stringify!(Self), s)) - } + fn from(s: &'static str) -> Self { + s.parse().expect(&format!( + "invalid string literal for {}: '{}'", + stringify!(Self), + s + )) + } } impl From for Secret { - fn from(key: key::SecretKey) -> Self { - let mut a = [0; SECP256K1_SECRET_KEY_SIZE]; - a.copy_from_slice(&key[0 .. SECP256K1_SECRET_KEY_SIZE]); - a.into() - } + fn from(key: key::SecretKey) -> Self { + let mut a = [0; SECP256K1_SECRET_KEY_SIZE]; + a.copy_from_slice(&key[0..SECP256K1_SECRET_KEY_SIZE]); + a.into() + } } impl Deref for Secret { - type Target = H256; + type Target = H256; - fn deref(&self) -> &Self::Target { - &self.inner - } + fn deref(&self) -> &Self::Target { + &self.inner + } } #[cfg(test)] mod tests { - use std::str::FromStr; - use super::super::{Random, Generator}; - use super::Secret; + use super::{ + super::{Generator, Random}, + Secret, + }; + use std::str::FromStr; - #[test] - fn multiplicating_secret_inversion_with_secret_gives_one() { - let secret = Random.generate().unwrap().secret().clone(); - let mut inversion = secret.clone(); - inversion.inv().unwrap(); - inversion.mul(&secret).unwrap(); - assert_eq!(inversion, Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap()); - } + #[test] + fn multiplicating_secret_inversion_with_secret_gives_one() { + let secret = Random.generate().unwrap().secret().clone(); + let mut inversion = secret.clone(); + inversion.inv().unwrap(); + inversion.mul(&secret).unwrap(); + assert_eq!( + inversion, + Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001") + .unwrap() + ); + } - #[test] - fn secret_inversion_is_reversible_with_inversion() { - let secret = Random.generate().unwrap().secret().clone(); - let mut inversion = secret.clone(); - inversion.inv().unwrap(); - inversion.inv().unwrap(); - assert_eq!(inversion, secret); - } + #[test] + fn secret_inversion_is_reversible_with_inversion() { + let secret = Random.generate().unwrap().secret().clone(); + let mut inversion = secret.clone(); + inversion.inv().unwrap(); + inversion.inv().unwrap(); + assert_eq!(inversion, secret); + } - #[test] - fn secret_pow() { - let secret = Random.generate().unwrap().secret().clone(); + #[test] + fn secret_pow() { + let secret = Random.generate().unwrap().secret().clone(); - let mut pow0 = secret.clone(); - pow0.pow(0).unwrap(); - assert_eq!(pow0, Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap()); + let mut pow0 = secret.clone(); + pow0.pow(0).unwrap(); + assert_eq!( + pow0, + Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001") + .unwrap() + ); - let mut pow1 = secret.clone(); - pow1.pow(1).unwrap(); - assert_eq!(pow1, secret); + let mut pow1 = secret.clone(); + pow1.pow(1).unwrap(); + assert_eq!(pow1, secret); - let mut pow2 = secret.clone(); - pow2.pow(2).unwrap(); - let mut pow2_expected = secret.clone(); - pow2_expected.mul(&secret).unwrap(); - assert_eq!(pow2, pow2_expected); + let mut pow2 = secret.clone(); + pow2.pow(2).unwrap(); + let mut pow2_expected = secret.clone(); + pow2_expected.mul(&secret).unwrap(); + assert_eq!(pow2, pow2_expected); - let mut pow3 = secret.clone(); - pow3.pow(3).unwrap(); - let mut pow3_expected = secret.clone(); - pow3_expected.mul(&secret).unwrap(); - pow3_expected.mul(&secret).unwrap(); - assert_eq!(pow3, pow3_expected); - } + let mut pow3 = secret.clone(); + pow3.pow(3).unwrap(); + let mut pow3_expected = secret.clone(); + pow3_expected.mul(&secret).unwrap(); + pow3_expected.mul(&secret).unwrap(); + assert_eq!(pow3, pow3_expected); + } } diff --git a/accounts/ethkey/src/signature.rs b/accounts/ethkey/src/signature.rs index cc712df69..efd192119 100644 --- a/accounts/ethkey/src/signature.rs +++ b/accounts/ethkey/src/signature.rs @@ -14,281 +14,312 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::ops::{Deref, DerefMut}; -use std::cmp::PartialEq; -use std::fmt; -use std::str::FromStr; -use std::hash::{Hash, Hasher}; -use secp256k1::{Message as SecpMessage, RecoverableSignature, RecoveryId, Error as SecpError}; -use secp256k1::key::{SecretKey, PublicKey}; -use rustc_hex::{ToHex, FromHex}; -use ethereum_types::{H520, H256}; -use {Secret, Public, SECP256K1, Error, Message, public_to_address, Address}; +use ethereum_types::{H256, H520}; +use public_to_address; +use rustc_hex::{FromHex, ToHex}; +use secp256k1::{ + key::{PublicKey, SecretKey}, + Error as SecpError, Message as SecpMessage, RecoverableSignature, RecoveryId, +}; +use std::{ + cmp::PartialEq, + fmt, + hash::{Hash, Hasher}, + ops::{Deref, DerefMut}, + str::FromStr, +}; +use Address; +use Error; +use Message; +use Public; +use Secret; +use SECP256K1; /// Signature encoded as RSV components #[repr(C)] pub struct Signature([u8; 65]); impl Signature { - /// Get a slice into the 'r' portion of the data. - pub fn r(&self) -> &[u8] { - &self.0[0..32] - } + /// Get a slice into the 'r' portion of the data. + pub fn r(&self) -> &[u8] { + &self.0[0..32] + } - /// Get a slice into the 's' portion of the data. - pub fn s(&self) -> &[u8] { - &self.0[32..64] - } + /// Get a slice into the 's' portion of the data. + pub fn s(&self) -> &[u8] { + &self.0[32..64] + } - /// Get the recovery byte. - pub fn v(&self) -> u8 { - self.0[64] - } + /// Get the recovery byte. + pub fn v(&self) -> u8 { + self.0[64] + } - /// Encode the signature into RSV array (V altered to be in "Electrum" notation). - pub fn into_electrum(mut self) -> [u8; 65] { - self.0[64] += 27; - self.0 - } + /// Encode the signature into RSV array (V altered to be in "Electrum" notation). + pub fn into_electrum(mut self) -> [u8; 65] { + self.0[64] += 27; + self.0 + } - /// Parse bytes as a signature encoded as RSV (V in "Electrum" notation). - /// May return empty (invalid) signature if given data has invalid length. - pub fn from_electrum(data: &[u8]) -> Self { - if data.len() != 65 || data[64] < 27 { - // fallback to empty (invalid) signature - return Signature::default(); - } + /// Parse bytes as a signature encoded as RSV (V in "Electrum" notation). + /// May return empty (invalid) signature if given data has invalid length. + pub fn from_electrum(data: &[u8]) -> Self { + if data.len() != 65 || data[64] < 27 { + // fallback to empty (invalid) signature + return Signature::default(); + } - let mut sig = [0u8; 65]; - sig.copy_from_slice(data); - sig[64] -= 27; - Signature(sig) - } + let mut sig = [0u8; 65]; + sig.copy_from_slice(data); + sig[64] -= 27; + Signature(sig) + } - /// Create a signature object from the sig. - pub fn from_rsv(r: &H256, s: &H256, v: u8) -> Self { - let mut sig = [0u8; 65]; - sig[0..32].copy_from_slice(&r); - sig[32..64].copy_from_slice(&s); - sig[64] = v; - Signature(sig) - } + /// Create a signature object from the sig. + pub fn from_rsv(r: &H256, s: &H256, v: u8) -> Self { + let mut sig = [0u8; 65]; + sig[0..32].copy_from_slice(&r); + sig[32..64].copy_from_slice(&s); + sig[64] = v; + Signature(sig) + } - /// Check if this is a "low" signature. - pub fn is_low_s(&self) -> bool { - H256::from_slice(self.s()) <= "7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0".into() - } + /// Check if this is a "low" signature. + pub fn is_low_s(&self) -> bool { + H256::from_slice(self.s()) + <= "7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0".into() + } - /// Check if each component of the signature is in range. - pub fn is_valid(&self) -> bool { - self.v() <= 1 && - H256::from_slice(self.r()) < "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141".into() && - H256::from_slice(self.r()) >= 1.into() && - H256::from_slice(self.s()) < "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141".into() && - H256::from_slice(self.s()) >= 1.into() - } + /// Check if each component of the signature is in range. + pub fn is_valid(&self) -> bool { + self.v() <= 1 + && H256::from_slice(self.r()) + < "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141".into() + && H256::from_slice(self.r()) >= 1.into() + && H256::from_slice(self.s()) + < "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141".into() + && H256::from_slice(self.s()) >= 1.into() + } } // manual implementation large arrays don't have trait impls by default. // remove when integer generics exist impl PartialEq for Signature { - fn eq(&self, other: &Self) -> bool { - &self.0[..] == &other.0[..] - } + fn eq(&self, other: &Self) -> bool { + &self.0[..] == &other.0[..] + } } // manual implementation required in Rust 1.13+, see `std::cmp::AssertParamIsEq`. -impl Eq for Signature { } +impl Eq for Signature {} // also manual for the same reason, but the pretty printing might be useful. impl fmt::Debug for Signature { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("Signature") - .field("r", &self.0[0..32].to_hex()) - .field("s", &self.0[32..64].to_hex()) - .field("v", &self.0[64..65].to_hex()) - .finish() - } + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + f.debug_struct("Signature") + .field("r", &self.0[0..32].to_hex()) + .field("s", &self.0[32..64].to_hex()) + .field("v", &self.0[64..65].to_hex()) + .finish() + } } impl fmt::Display for Signature { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - write!(f, "{}", self.to_hex()) - } + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(f, "{}", self.to_hex()) + } } impl FromStr for Signature { - type Err = Error; + type Err = Error; - fn from_str(s: &str) -> Result { - match s.from_hex() { - Ok(ref hex) if hex.len() == 65 => { - let mut data = [0; 65]; - data.copy_from_slice(&hex[0..65]); - Ok(Signature(data)) - }, - _ => Err(Error::InvalidSignature) - } - } + fn from_str(s: &str) -> Result { + match s.from_hex() { + Ok(ref hex) if hex.len() == 65 => { + let mut data = [0; 65]; + data.copy_from_slice(&hex[0..65]); + Ok(Signature(data)) + } + _ => Err(Error::InvalidSignature), + } + } } impl Default for Signature { - fn default() -> Self { - Signature([0; 65]) - } + fn default() -> Self { + Signature([0; 65]) + } } impl Hash for Signature { fn hash(&self, state: &mut H) { - H520::from(self.0).hash(state); + H520::from(self.0).hash(state); } } impl Clone for Signature { fn clone(&self) -> Self { - Signature(self.0) + Signature(self.0) } } impl From<[u8; 65]> for Signature { - fn from(s: [u8; 65]) -> Self { - Signature(s) - } + fn from(s: [u8; 65]) -> Self { + Signature(s) + } } impl Into<[u8; 65]> for Signature { - fn into(self) -> [u8; 65] { - self.0 - } + fn into(self) -> [u8; 65] { + self.0 + } } impl From for H520 { - fn from(s: Signature) -> Self { - H520::from(s.0) - } + fn from(s: Signature) -> Self { + H520::from(s.0) + } } impl From for Signature { - fn from(bytes: H520) -> Self { - Signature(bytes.into()) - } + fn from(bytes: H520) -> Self { + Signature(bytes.into()) + } } impl Deref for Signature { - type Target = [u8; 65]; + type Target = [u8; 65]; - fn deref(&self) -> &Self::Target { - &self.0 - } + fn deref(&self) -> &Self::Target { + &self.0 + } } impl DerefMut for Signature { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } } pub fn sign(secret: &Secret, message: &Message) -> Result { - let context = &SECP256K1; - let sec = SecretKey::from_slice(context, &secret)?; - let s = context.sign_recoverable(&SecpMessage::from_slice(&message[..])?, &sec)?; - let (rec_id, data) = s.serialize_compact(context); - let mut data_arr = [0; 65]; + let context = &SECP256K1; + let sec = SecretKey::from_slice(context, &secret)?; + let s = context.sign_recoverable(&SecpMessage::from_slice(&message[..])?, &sec)?; + let (rec_id, data) = s.serialize_compact(context); + let mut data_arr = [0; 65]; - // no need to check if s is low, it always is - data_arr[0..64].copy_from_slice(&data[0..64]); - data_arr[64] = rec_id.to_i32() as u8; - Ok(Signature(data_arr)) + // no need to check if s is low, it always is + data_arr[0..64].copy_from_slice(&data[0..64]); + data_arr[64] = rec_id.to_i32() as u8; + Ok(Signature(data_arr)) } -pub fn verify_public(public: &Public, signature: &Signature, message: &Message) -> Result { - let context = &SECP256K1; - let rsig = RecoverableSignature::from_compact(context, &signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?; - let sig = rsig.to_standard(context); +pub fn verify_public( + public: &Public, + signature: &Signature, + message: &Message, +) -> Result { + let context = &SECP256K1; + let rsig = RecoverableSignature::from_compact( + context, + &signature[0..64], + RecoveryId::from_i32(signature[64] as i32)?, + )?; + let sig = rsig.to_standard(context); - let pdata: [u8; 65] = { - let mut temp = [4u8; 65]; - temp[1..65].copy_from_slice(&**public); - temp - }; + let pdata: [u8; 65] = { + let mut temp = [4u8; 65]; + temp[1..65].copy_from_slice(&**public); + temp + }; - let publ = PublicKey::from_slice(context, &pdata)?; - match context.verify(&SecpMessage::from_slice(&message[..])?, &sig, &publ) { - Ok(_) => Ok(true), - Err(SecpError::IncorrectSignature) => Ok(false), - Err(x) => Err(Error::from(x)) - } + let publ = PublicKey::from_slice(context, &pdata)?; + match context.verify(&SecpMessage::from_slice(&message[..])?, &sig, &publ) { + Ok(_) => Ok(true), + Err(SecpError::IncorrectSignature) => Ok(false), + Err(x) => Err(Error::from(x)), + } } -pub fn verify_address(address: &Address, signature: &Signature, message: &Message) -> Result { - let public = recover(signature, message)?; - let recovered_address = public_to_address(&public); - Ok(address == &recovered_address) +pub fn verify_address( + address: &Address, + signature: &Signature, + message: &Message, +) -> Result { + let public = recover(signature, message)?; + let recovered_address = public_to_address(&public); + Ok(address == &recovered_address) } pub fn recover(signature: &Signature, message: &Message) -> Result { - let context = &SECP256K1; - let rsig = RecoverableSignature::from_compact(context, &signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?; - let pubkey = context.recover(&SecpMessage::from_slice(&message[..])?, &rsig)?; - let serialized = pubkey.serialize_vec(context, false); + let context = &SECP256K1; + let rsig = RecoverableSignature::from_compact( + context, + &signature[0..64], + RecoveryId::from_i32(signature[64] as i32)?, + )?; + let pubkey = context.recover(&SecpMessage::from_slice(&message[..])?, &rsig)?; + let serialized = pubkey.serialize_vec(context, false); - let mut public = Public::default(); - public.copy_from_slice(&serialized[1..65]); - Ok(public) + let mut public = Public::default(); + public.copy_from_slice(&serialized[1..65]); + Ok(public) } #[cfg(test)] mod tests { - use std::str::FromStr; - use {Generator, Random, Message}; - use super::{sign, verify_public, verify_address, recover, Signature}; + use super::{recover, sign, verify_address, verify_public, Signature}; + use std::str::FromStr; + use Generator; + use Message; + use Random; - #[test] - fn vrs_conversion() { - // given - let keypair = Random.generate().unwrap(); - let message = Message::default(); - let signature = sign(keypair.secret(), &message).unwrap(); + #[test] + fn vrs_conversion() { + // given + let keypair = Random.generate().unwrap(); + let message = Message::default(); + let signature = sign(keypair.secret(), &message).unwrap(); - // when - let vrs = signature.clone().into_electrum(); - let from_vrs = Signature::from_electrum(&vrs); + // when + let vrs = signature.clone().into_electrum(); + let from_vrs = Signature::from_electrum(&vrs); - // then - assert_eq!(signature, from_vrs); - } + // then + assert_eq!(signature, from_vrs); + } - #[test] - fn signature_to_and_from_str() { - let keypair = Random.generate().unwrap(); - let message = Message::default(); - let signature = sign(keypair.secret(), &message).unwrap(); - let string = format!("{}", signature); - let deserialized = Signature::from_str(&string).unwrap(); - assert_eq!(signature, deserialized); - } + #[test] + fn signature_to_and_from_str() { + let keypair = Random.generate().unwrap(); + let message = Message::default(); + let signature = sign(keypair.secret(), &message).unwrap(); + let string = format!("{}", signature); + let deserialized = Signature::from_str(&string).unwrap(); + assert_eq!(signature, deserialized); + } - #[test] - fn sign_and_recover_public() { - let keypair = Random.generate().unwrap(); - let message = Message::default(); - let signature = sign(keypair.secret(), &message).unwrap(); - assert_eq!(keypair.public(), &recover(&signature, &message).unwrap()); - } + #[test] + fn sign_and_recover_public() { + let keypair = Random.generate().unwrap(); + let message = Message::default(); + let signature = sign(keypair.secret(), &message).unwrap(); + assert_eq!(keypair.public(), &recover(&signature, &message).unwrap()); + } - #[test] - fn sign_and_verify_public() { - let keypair = Random.generate().unwrap(); - let message = Message::default(); - let signature = sign(keypair.secret(), &message).unwrap(); - assert!(verify_public(keypair.public(), &signature, &message).unwrap()); - } + #[test] + fn sign_and_verify_public() { + let keypair = Random.generate().unwrap(); + let message = Message::default(); + let signature = sign(keypair.secret(), &message).unwrap(); + assert!(verify_public(keypair.public(), &signature, &message).unwrap()); + } - #[test] - fn sign_and_verify_address() { - let keypair = Random.generate().unwrap(); - let message = Message::default(); - let signature = sign(keypair.secret(), &message).unwrap(); - assert!(verify_address(&keypair.address(), &signature, &message).unwrap()); - } + #[test] + fn sign_and_verify_address() { + let keypair = Random.generate().unwrap(); + let message = Message::default(); + let signature = sign(keypair.secret(), &message).unwrap(); + assert!(verify_address(&keypair.address(), &signature, &message).unwrap()); + } } diff --git a/accounts/ethstore/cli/src/crack.rs b/accounts/ethstore/cli/src/crack.rs index abe171c35..deb858d0d 100644 --- a/accounts/ethstore/cli/src/crack.rs +++ b/accounts/ethstore/cli/src/crack.rs @@ -14,53 +14,53 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::{cmp, thread}; -use std::sync::Arc; -use std::collections::VecDeque; use parking_lot::Mutex; +use std::{cmp, collections::VecDeque, sync::Arc, thread}; -use ethstore::{ethkey::Password, PresaleWallet, Error}; +use ethstore::{ethkey::Password, Error, PresaleWallet}; use num_cpus; pub fn run(passwords: VecDeque, wallet_path: &str) -> Result<(), Error> { - let passwords = Arc::new(Mutex::new(passwords)); + let passwords = Arc::new(Mutex::new(passwords)); - let mut handles = Vec::new(); + let mut handles = Vec::new(); - for _ in 0..num_cpus::get() { - let passwords = passwords.clone(); - let wallet = PresaleWallet::open(&wallet_path)?; - handles.push(thread::spawn(move || { - look_for_password(passwords, wallet); - })); - } + for _ in 0..num_cpus::get() { + let passwords = passwords.clone(); + let wallet = PresaleWallet::open(&wallet_path)?; + handles.push(thread::spawn(move || { + look_for_password(passwords, wallet); + })); + } - for handle in handles { - handle.join().map_err(|err| Error::Custom(format!("Error finishing thread: {:?}", err)))?; - } + for handle in handles { + handle + .join() + .map_err(|err| Error::Custom(format!("Error finishing thread: {:?}", err)))?; + } - Ok(()) + Ok(()) } fn look_for_password(passwords: Arc>>, wallet: PresaleWallet) { - let mut counter = 0; - while !passwords.lock().is_empty() { - let package = { - let mut passwords = passwords.lock(); - let len = passwords.len(); - passwords.split_off(cmp::min(len, 32)) - }; - for pass in package { - counter += 1; - match wallet.decrypt(&pass) { - Ok(_) => { - println!("Found password: {}", pass.as_str()); - passwords.lock().clear(); - return; - }, - _ if counter % 100 == 0 => print!("."), - _ => {}, - } - } - } + let mut counter = 0; + while !passwords.lock().is_empty() { + let package = { + let mut passwords = passwords.lock(); + let len = passwords.len(); + passwords.split_off(cmp::min(len, 32)) + }; + for pass in package { + counter += 1; + match wallet.decrypt(&pass) { + Ok(_) => { + println!("Found password: {}", pass.as_str()); + passwords.lock().clear(); + return; + } + _ if counter % 100 == 0 => print!("."), + _ => {} + } + } + } } diff --git a/accounts/ethstore/cli/src/main.rs b/accounts/ethstore/cli/src/main.rs index 0f5644063..2056fb73b 100644 --- a/accounts/ethstore/cli/src/main.rs +++ b/accounts/ethstore/cli/src/main.rs @@ -28,14 +28,15 @@ extern crate env_logger; #[macro_use] extern crate serde_derive; -use std::collections::VecDeque; -use std::io::Read; -use std::{env, process, fs, fmt}; +use std::{collections::VecDeque, env, fmt, fs, io::Read, process}; use docopt::Docopt; -use ethstore::accounts_dir::{KeyDirectory, RootDiskDirectory}; -use ethstore::ethkey::{Address, Password}; -use ethstore::{EthStore, SimpleSecretStore, SecretStore, import_accounts, PresaleWallet, SecretVaultRef, StoreAccountRef}; +use ethstore::{ + accounts_dir::{KeyDirectory, RootDiskDirectory}, + ethkey::{Address, Password}, + import_accounts, EthStore, PresaleWallet, SecretStore, SecretVaultRef, SimpleSecretStore, + StoreAccountRef, +}; mod crack; @@ -92,226 +93,271 @@ Commands: #[derive(Debug, Deserialize)] struct Args { - cmd_insert: bool, - cmd_change_pwd: bool, - cmd_list: bool, - cmd_import: bool, - cmd_import_wallet: bool, - cmd_find_wallet_pass: bool, - cmd_remove: bool, - cmd_sign: bool, - cmd_public: bool, - cmd_list_vaults: bool, - cmd_create_vault: bool, - cmd_change_vault_pwd: bool, - cmd_move_to_vault: bool, - cmd_move_from_vault: bool, - arg_secret: String, - arg_password: String, - arg_old_pwd: String, - arg_new_pwd: String, - arg_address: String, - arg_message: String, - arg_path: String, - arg_vault: String, - flag_src: String, - flag_dir: String, - flag_vault: String, - flag_vault_pwd: String, + cmd_insert: bool, + cmd_change_pwd: bool, + cmd_list: bool, + cmd_import: bool, + cmd_import_wallet: bool, + cmd_find_wallet_pass: bool, + cmd_remove: bool, + cmd_sign: bool, + cmd_public: bool, + cmd_list_vaults: bool, + cmd_create_vault: bool, + cmd_change_vault_pwd: bool, + cmd_move_to_vault: bool, + cmd_move_from_vault: bool, + arg_secret: String, + arg_password: String, + arg_old_pwd: String, + arg_new_pwd: String, + arg_address: String, + arg_message: String, + arg_path: String, + arg_vault: String, + flag_src: String, + flag_dir: String, + flag_vault: String, + flag_vault_pwd: String, } enum Error { - Ethstore(ethstore::Error), - Docopt(docopt::Error), + Ethstore(ethstore::Error), + Docopt(docopt::Error), } impl From for Error { - fn from(err: ethstore::Error) -> Self { - Error::Ethstore(err) - } + fn from(err: ethstore::Error) -> Self { + Error::Ethstore(err) + } } impl From for Error { - fn from(err: docopt::Error) -> Self { - Error::Docopt(err) - } + fn from(err: docopt::Error) -> Self { + Error::Docopt(err) + } } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Error::Ethstore(ref err) => fmt::Display::fmt(err, f), - Error::Docopt(ref err) => fmt::Display::fmt(err, f), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Error::Ethstore(ref err) => fmt::Display::fmt(err, f), + Error::Docopt(ref err) => fmt::Display::fmt(err, f), + } + } } fn main() { - panic_hook::set_abort(); - if env::var("RUST_LOG").is_err() { - env::set_var("RUST_LOG", "warn") - } - env_logger::try_init().expect("Logger initialized only once."); + panic_hook::set_abort(); + if env::var("RUST_LOG").is_err() { + env::set_var("RUST_LOG", "warn") + } + env_logger::try_init().expect("Logger initialized only once."); - match execute(env::args()) { - Ok(result) => println!("{}", result), - Err(Error::Docopt(ref e)) => e.exit(), - Err(err) => { - eprintln!("{}", err); - process::exit(1); - } - } + match execute(env::args()) { + Ok(result) => println!("{}", result), + Err(Error::Docopt(ref e)) => e.exit(), + Err(err) => { + eprintln!("{}", err); + process::exit(1); + } + } } fn key_dir(location: &str, password: Option) -> Result, Error> { - let dir: RootDiskDirectory = match location { - "geth" => RootDiskDirectory::create(dir::geth(false))?, - "geth-test" => RootDiskDirectory::create(dir::geth(true))?, - path if path.starts_with("parity") => { - let chain = path.split('-').nth(1).unwrap_or("ethereum"); - let path = dir::parity(chain); - RootDiskDirectory::create(path)? - }, - path => RootDiskDirectory::create(path)?, - }; + let dir: RootDiskDirectory = match location { + "geth" => RootDiskDirectory::create(dir::geth(false))?, + "geth-test" => RootDiskDirectory::create(dir::geth(true))?, + path if path.starts_with("parity") => { + let chain = path.split('-').nth(1).unwrap_or("ethereum"); + let path = dir::parity(chain); + RootDiskDirectory::create(path)? + } + path => RootDiskDirectory::create(path)?, + }; - Ok(Box::new(dir.with_password(password))) + Ok(Box::new(dir.with_password(password))) } fn open_args_vault(store: &EthStore, args: &Args) -> Result { - if args.flag_vault.is_empty() { - return Ok(SecretVaultRef::Root); - } + if args.flag_vault.is_empty() { + return Ok(SecretVaultRef::Root); + } - let vault_pwd = load_password(&args.flag_vault_pwd)?; - store.open_vault(&args.flag_vault, &vault_pwd)?; - Ok(SecretVaultRef::Vault(args.flag_vault.clone())) + let vault_pwd = load_password(&args.flag_vault_pwd)?; + store.open_vault(&args.flag_vault, &vault_pwd)?; + Ok(SecretVaultRef::Vault(args.flag_vault.clone())) } -fn open_args_vault_account(store: &EthStore, address: Address, args: &Args) -> Result { - match open_args_vault(store, args)? { - SecretVaultRef::Root => Ok(StoreAccountRef::root(address)), - SecretVaultRef::Vault(name) => Ok(StoreAccountRef::vault(&name, address)), - } +fn open_args_vault_account( + store: &EthStore, + address: Address, + args: &Args, +) -> Result { + match open_args_vault(store, args)? { + SecretVaultRef::Root => Ok(StoreAccountRef::root(address)), + SecretVaultRef::Vault(name) => Ok(StoreAccountRef::vault(&name, address)), + } } fn format_accounts(accounts: &[Address]) -> String { - accounts.iter() - .enumerate() - .map(|(i, a)| format!("{:2}: 0x{:x}", i, a)) - .collect::>() - .join("\n") + accounts + .iter() + .enumerate() + .map(|(i, a)| format!("{:2}: 0x{:x}", i, a)) + .collect::>() + .join("\n") } fn format_vaults(vaults: &[String]) -> String { - vaults.join("\n") + vaults.join("\n") } fn load_password(path: &str) -> Result { - let mut file = fs::File::open(path).map_err(|e| ethstore::Error::Custom(format!("Error opening password file '{}': {}", path, e)))?; - let mut password = String::new(); - file.read_to_string(&mut password).map_err(|e| ethstore::Error::Custom(format!("Error reading password file '{}': {}", path, e)))?; - // drop EOF - let _ = password.pop(); - Ok(password.into()) + let mut file = fs::File::open(path).map_err(|e| { + ethstore::Error::Custom(format!("Error opening password file '{}': {}", path, e)) + })?; + let mut password = String::new(); + file.read_to_string(&mut password).map_err(|e| { + ethstore::Error::Custom(format!("Error reading password file '{}': {}", path, e)) + })?; + // drop EOF + let _ = password.pop(); + Ok(password.into()) } -fn execute(command: I) -> Result where I: IntoIterator, S: AsRef { - let args: Args = Docopt::new(USAGE) - .and_then(|d| d.argv(command).deserialize())?; +fn execute(command: I) -> Result +where + I: IntoIterator, + S: AsRef, +{ + let args: Args = Docopt::new(USAGE).and_then(|d| d.argv(command).deserialize())?; - let store = EthStore::open(key_dir(&args.flag_dir, None)?)?; + let store = EthStore::open(key_dir(&args.flag_dir, None)?)?; - return if args.cmd_insert { - let secret = args.arg_secret.parse().map_err(|_| ethstore::Error::InvalidSecret)?; - let password = load_password(&args.arg_password)?; - let vault_ref = open_args_vault(&store, &args)?; - let account_ref = store.insert_account(vault_ref, secret, &password)?; - Ok(format!("0x{:x}", account_ref.address)) - } else if args.cmd_change_pwd { - let address = args.arg_address.parse().map_err(|_| ethstore::Error::InvalidAccount)?; - let old_pwd = load_password(&args.arg_old_pwd)?; - let new_pwd = load_password(&args.arg_new_pwd)?; - let account_ref = open_args_vault_account(&store, address, &args)?; - let ok = store.change_password(&account_ref, &old_pwd, &new_pwd).is_ok(); - Ok(format!("{}", ok)) - } else if args.cmd_list { - let vault_ref = open_args_vault(&store, &args)?; - let accounts = store.accounts()?; - let accounts: Vec<_> = accounts - .into_iter() - .filter(|a| &a.vault == &vault_ref) - .map(|a| a.address) - .collect(); - Ok(format_accounts(&accounts)) - } else if args.cmd_import { - let password = match args.arg_password.as_ref() { - "" => None, - _ => Some(load_password(&args.arg_password)?) - }; - let src = key_dir(&args.flag_src, password)?; - let dst = key_dir(&args.flag_dir, None)?; + return if args.cmd_insert { + let secret = args + .arg_secret + .parse() + .map_err(|_| ethstore::Error::InvalidSecret)?; + let password = load_password(&args.arg_password)?; + let vault_ref = open_args_vault(&store, &args)?; + let account_ref = store.insert_account(vault_ref, secret, &password)?; + Ok(format!("0x{:x}", account_ref.address)) + } else if args.cmd_change_pwd { + let address = args + .arg_address + .parse() + .map_err(|_| ethstore::Error::InvalidAccount)?; + let old_pwd = load_password(&args.arg_old_pwd)?; + let new_pwd = load_password(&args.arg_new_pwd)?; + let account_ref = open_args_vault_account(&store, address, &args)?; + let ok = store + .change_password(&account_ref, &old_pwd, &new_pwd) + .is_ok(); + Ok(format!("{}", ok)) + } else if args.cmd_list { + let vault_ref = open_args_vault(&store, &args)?; + let accounts = store.accounts()?; + let accounts: Vec<_> = accounts + .into_iter() + .filter(|a| &a.vault == &vault_ref) + .map(|a| a.address) + .collect(); + Ok(format_accounts(&accounts)) + } else if args.cmd_import { + let password = match args.arg_password.as_ref() { + "" => None, + _ => Some(load_password(&args.arg_password)?), + }; + let src = key_dir(&args.flag_src, password)?; + let dst = key_dir(&args.flag_dir, None)?; - let accounts = import_accounts(&*src, &*dst)?; - Ok(format_accounts(&accounts)) - } else if args.cmd_import_wallet { - let wallet = PresaleWallet::open(&args.arg_path)?; - let password = load_password(&args.arg_password)?; - let kp = wallet.decrypt(&password)?; - let vault_ref = open_args_vault(&store, &args)?; - let account_ref = store.insert_account(vault_ref, kp.secret().clone(), &password)?; - Ok(format!("0x{:x}", account_ref.address)) - } else if args.cmd_find_wallet_pass { - let passwords = load_password(&args.arg_password)?; - let passwords = passwords.as_str().lines().map(|line| str::to_owned(line).into()).collect::>(); - crack::run(passwords, &args.arg_path)?; - Ok(format!("Password not found.")) - } else if args.cmd_remove { - let address = args.arg_address.parse().map_err(|_| ethstore::Error::InvalidAccount)?; - let password = load_password(&args.arg_password)?; - let account_ref = open_args_vault_account(&store, address, &args)?; - let ok = store.remove_account(&account_ref, &password).is_ok(); - Ok(format!("{}", ok)) - } else if args.cmd_sign { - let address = args.arg_address.parse().map_err(|_| ethstore::Error::InvalidAccount)?; - let message = args.arg_message.parse().map_err(|_| ethstore::Error::InvalidMessage)?; - let password = load_password(&args.arg_password)?; - let account_ref = open_args_vault_account(&store, address, &args)?; - let signature = store.sign(&account_ref, &password, &message)?; - Ok(format!("0x{}", signature)) - } else if args.cmd_public { - let address = args.arg_address.parse().map_err(|_| ethstore::Error::InvalidAccount)?; - let password = load_password(&args.arg_password)?; - let account_ref = open_args_vault_account(&store, address, &args)?; - let public = store.public(&account_ref, &password)?; - Ok(format!("0x{:x}", public)) - } else if args.cmd_list_vaults { - let vaults = store.list_vaults()?; - Ok(format_vaults(&vaults)) - } else if args.cmd_create_vault { - let password = load_password(&args.arg_password)?; - store.create_vault(&args.arg_vault, &password)?; - Ok("OK".to_owned()) - } else if args.cmd_change_vault_pwd { - let old_pwd = load_password(&args.arg_old_pwd)?; - let new_pwd = load_password(&args.arg_new_pwd)?; - store.open_vault(&args.arg_vault, &old_pwd)?; - store.change_vault_password(&args.arg_vault, &new_pwd)?; - Ok("OK".to_owned()) - } else if args.cmd_move_to_vault { - let address = args.arg_address.parse().map_err(|_| ethstore::Error::InvalidAccount)?; - let password = load_password(&args.arg_password)?; - let account_ref = open_args_vault_account(&store, address, &args)?; - store.open_vault(&args.arg_vault, &password)?; - store.change_account_vault(SecretVaultRef::Vault(args.arg_vault), account_ref)?; - Ok("OK".to_owned()) - } else if args.cmd_move_from_vault { - let address = args.arg_address.parse().map_err(|_| ethstore::Error::InvalidAccount)?; - let password = load_password(&args.arg_password)?; - store.open_vault(&args.arg_vault, &password)?; - store.change_account_vault(SecretVaultRef::Root, StoreAccountRef::vault(&args.arg_vault, address))?; - Ok("OK".to_owned()) - } else { - Ok(format!("{}", USAGE)) - } + let accounts = import_accounts(&*src, &*dst)?; + Ok(format_accounts(&accounts)) + } else if args.cmd_import_wallet { + let wallet = PresaleWallet::open(&args.arg_path)?; + let password = load_password(&args.arg_password)?; + let kp = wallet.decrypt(&password)?; + let vault_ref = open_args_vault(&store, &args)?; + let account_ref = store.insert_account(vault_ref, kp.secret().clone(), &password)?; + Ok(format!("0x{:x}", account_ref.address)) + } else if args.cmd_find_wallet_pass { + let passwords = load_password(&args.arg_password)?; + let passwords = passwords + .as_str() + .lines() + .map(|line| str::to_owned(line).into()) + .collect::>(); + crack::run(passwords, &args.arg_path)?; + Ok(format!("Password not found.")) + } else if args.cmd_remove { + let address = args + .arg_address + .parse() + .map_err(|_| ethstore::Error::InvalidAccount)?; + let password = load_password(&args.arg_password)?; + let account_ref = open_args_vault_account(&store, address, &args)?; + let ok = store.remove_account(&account_ref, &password).is_ok(); + Ok(format!("{}", ok)) + } else if args.cmd_sign { + let address = args + .arg_address + .parse() + .map_err(|_| ethstore::Error::InvalidAccount)?; + let message = args + .arg_message + .parse() + .map_err(|_| ethstore::Error::InvalidMessage)?; + let password = load_password(&args.arg_password)?; + let account_ref = open_args_vault_account(&store, address, &args)?; + let signature = store.sign(&account_ref, &password, &message)?; + Ok(format!("0x{}", signature)) + } else if args.cmd_public { + let address = args + .arg_address + .parse() + .map_err(|_| ethstore::Error::InvalidAccount)?; + let password = load_password(&args.arg_password)?; + let account_ref = open_args_vault_account(&store, address, &args)?; + let public = store.public(&account_ref, &password)?; + Ok(format!("0x{:x}", public)) + } else if args.cmd_list_vaults { + let vaults = store.list_vaults()?; + Ok(format_vaults(&vaults)) + } else if args.cmd_create_vault { + let password = load_password(&args.arg_password)?; + store.create_vault(&args.arg_vault, &password)?; + Ok("OK".to_owned()) + } else if args.cmd_change_vault_pwd { + let old_pwd = load_password(&args.arg_old_pwd)?; + let new_pwd = load_password(&args.arg_new_pwd)?; + store.open_vault(&args.arg_vault, &old_pwd)?; + store.change_vault_password(&args.arg_vault, &new_pwd)?; + Ok("OK".to_owned()) + } else if args.cmd_move_to_vault { + let address = args + .arg_address + .parse() + .map_err(|_| ethstore::Error::InvalidAccount)?; + let password = load_password(&args.arg_password)?; + let account_ref = open_args_vault_account(&store, address, &args)?; + store.open_vault(&args.arg_vault, &password)?; + store.change_account_vault(SecretVaultRef::Vault(args.arg_vault), account_ref)?; + Ok("OK".to_owned()) + } else if args.cmd_move_from_vault { + let address = args + .arg_address + .parse() + .map_err(|_| ethstore::Error::InvalidAccount)?; + let password = load_password(&args.arg_password)?; + store.open_vault(&args.arg_vault, &password)?; + store.change_account_vault( + SecretVaultRef::Root, + StoreAccountRef::vault(&args.arg_vault, address), + )?; + Ok("OK".to_owned()) + } else { + Ok(format!("{}", USAGE)) + }; } diff --git a/accounts/ethstore/cli/tests/cli.rs b/accounts/ethstore/cli/tests/cli.rs index 39e40864f..eb5122ff4 100644 --- a/accounts/ethstore/cli/tests/cli.rs +++ b/accounts/ethstore/cli/tests/cli.rs @@ -15,68 +15,93 @@ // along with Parity Ethereum. If not, see . extern crate tempdir; -use std::process::Command; +use std::{fs::File, io::Write, process::Command}; use tempdir::TempDir; -use std::fs::File; -use std::io::Write; fn run(args: &[&str]) -> String { - let output = Command::new("cargo") - .args(&["run", "--"]) - .args(args) - .output() - .unwrap(); - assert!(output.status.success()); - String::from_utf8(output.stdout).unwrap() + let output = Command::new("cargo") + .args(&["run", "--"]) + .args(args) + .output() + .unwrap(); + assert!(output.status.success()); + String::from_utf8(output.stdout).unwrap() } #[test] fn cli_cmd() { - Command::new("cargo") - .arg("build") - .output() - .unwrap(); + Command::new("cargo").arg("build").output().unwrap(); - let dir = TempDir::new("test-vault").unwrap(); + let dir = TempDir::new("test-vault").unwrap(); - let mut passwd = File::create(dir.path().join("test-password")).unwrap(); - writeln!(passwd, "password").unwrap(); + let mut passwd = File::create(dir.path().join("test-password")).unwrap(); + writeln!(passwd, "password").unwrap(); - let mut passwd2 = File::create(dir.path().join("test-vault-addr")).unwrap(); - writeln!(passwd2, "password2").unwrap(); + let mut passwd2 = File::create(dir.path().join("test-vault-addr")).unwrap(); + writeln!(passwd2, "password2").unwrap(); - let test_password_buf = dir.path().join("test-password"); - let test_password: &str = test_password_buf.to_str().unwrap(); - let dir_str: &str = dir.path().to_str().unwrap(); - let test_vault_addr_buf = dir.path().join("test-vault-addr"); - let test_vault_addr = test_vault_addr_buf.to_str().unwrap(); + let test_password_buf = dir.path().join("test-password"); + let test_password: &str = test_password_buf.to_str().unwrap(); + let dir_str: &str = dir.path().to_str().unwrap(); + let test_vault_addr_buf = dir.path().join("test-vault-addr"); + let test_vault_addr = test_vault_addr_buf.to_str().unwrap(); - run(&["create-vault", "test-vault", test_password, "--dir", dir_str]); + run(&[ + "create-vault", + "test-vault", + test_password, + "--dir", + dir_str, + ]); - let output = run(&["insert", "7d29fab185a33e2cd955812397354c472d2b84615b645aa135ff539f6b0d70d5", - test_vault_addr, - "--dir", dir_str, - "--vault", "test-vault", - "--vault-pwd", test_password]); - let address = output.trim(); + let output = run(&[ + "insert", + "7d29fab185a33e2cd955812397354c472d2b84615b645aa135ff539f6b0d70d5", + test_vault_addr, + "--dir", + dir_str, + "--vault", + "test-vault", + "--vault-pwd", + test_password, + ]); + let address = output.trim(); - let output = run(&["list", - "--dir", dir_str, - "--vault", "test-vault", - "--vault-pwd", test_password]); - assert_eq!(output, " 0: 0xa8fa5dd30a87bb9e3288d604eb74949c515ab66e\n"); + let output = run(&[ + "list", + "--dir", + dir_str, + "--vault", + "test-vault", + "--vault-pwd", + test_password, + ]); + assert_eq!(output, " 0: 0xa8fa5dd30a87bb9e3288d604eb74949c515ab66e\n"); - let output = run(&["sign", &address[2..], - test_vault_addr, - "7d29fab185a33e2cd955812397354c472d2b84615b645aa135ff539f6b0d70d5", - "--dir", dir_str, - "--vault", "test-vault", - "--vault-pwd", test_password]); - assert_eq!(output, "0x54ab6e5cf0c5cb40043fdca5d15d611a3a94285414a076dafecc8dc9c04183f413296a3defff61092c0bb478dc9887ec01070e1275234211208fb8f4be4a9b0101\n"); + let output = run(&[ + "sign", + &address[2..], + test_vault_addr, + "7d29fab185a33e2cd955812397354c472d2b84615b645aa135ff539f6b0d70d5", + "--dir", + dir_str, + "--vault", + "test-vault", + "--vault-pwd", + test_password, + ]); + assert_eq!(output, "0x54ab6e5cf0c5cb40043fdca5d15d611a3a94285414a076dafecc8dc9c04183f413296a3defff61092c0bb478dc9887ec01070e1275234211208fb8f4be4a9b0101\n"); - let output = run(&["public", &address[2..], test_vault_addr, - "--dir", dir_str, - "--vault", "test-vault", - "--vault-pwd", test_password]); - assert_eq!(output, "0x35f222d88b80151857a2877826d940104887376a94c1cbd2c8c7c192eb701df88a18a4ecb8b05b1466c5b3706042027b5e079fe3a3683e66d822b0e047aa3418\n"); + let output = run(&[ + "public", + &address[2..], + test_vault_addr, + "--dir", + dir_str, + "--vault", + "test-vault", + "--vault-pwd", + test_password, + ]); + assert_eq!(output, "0x35f222d88b80151857a2877826d940104887376a94c1cbd2c8c7c192eb701df88a18a4ecb8b05b1466c5b3706042027b5e079fe3a3683e66d822b0e047aa3418\n"); } diff --git a/accounts/ethstore/src/account/cipher.rs b/accounts/ethstore/src/account/cipher.rs index 1d97b69e8..246cf2069 100644 --- a/accounts/ethstore/src/account/cipher.rs +++ b/accounts/ethstore/src/account/cipher.rs @@ -18,42 +18,40 @@ use json; #[derive(Debug, PartialEq, Clone)] pub struct Aes128Ctr { - pub iv: [u8; 16], + pub iv: [u8; 16], } #[derive(Debug, PartialEq, Clone)] pub enum Cipher { - Aes128Ctr(Aes128Ctr), + Aes128Ctr(Aes128Ctr), } impl From for Aes128Ctr { - fn from(json: json::Aes128Ctr) -> Self { - Aes128Ctr { - iv: json.iv.into() - } - } + fn from(json: json::Aes128Ctr) -> Self { + Aes128Ctr { iv: json.iv.into() } + } } impl Into for Aes128Ctr { - fn into(self) -> json::Aes128Ctr { - json::Aes128Ctr { - iv: From::from(self.iv) - } - } + fn into(self) -> json::Aes128Ctr { + json::Aes128Ctr { + iv: From::from(self.iv), + } + } } impl From for Cipher { - fn from(json: json::Cipher) -> Self { - match json { - json::Cipher::Aes128Ctr(params) => Cipher::Aes128Ctr(From::from(params)), - } - } + fn from(json: json::Cipher) -> Self { + match json { + json::Cipher::Aes128Ctr(params) => Cipher::Aes128Ctr(From::from(params)), + } + } } impl Into for Cipher { - fn into(self) -> json::Cipher { - match self { - Cipher::Aes128Ctr(params) => json::Cipher::Aes128Ctr(params.into()), - } - } + fn into(self) -> json::Cipher { + match self { + Cipher::Aes128Ctr(params) => json::Cipher::Aes128Ctr(params.into()), + } + } } diff --git a/accounts/ethstore/src/account/crypto.rs b/accounts/ethstore/src/account/crypto.rs index a3f6f9e9a..8dacd73ca 100644 --- a/accounts/ethstore/src/account/crypto.rs +++ b/accounts/ethstore/src/account/crypto.rs @@ -14,198 +14,221 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::str; -use std::num::NonZeroU32; +use account::{Aes128Ctr, Cipher, Kdf, Pbkdf2, Prf}; +use crypto::{self, Keccak256}; use ethkey::{Password, Secret}; -use {json, Error, crypto}; -use crypto::Keccak256; +use json; use random::Random; use smallvec::SmallVec; -use account::{Cipher, Kdf, Aes128Ctr, Pbkdf2, Prf}; +use std::{num::NonZeroU32, str}; +use Error; /// Encrypted data #[derive(Debug, PartialEq, Clone)] pub struct Crypto { - /// Encryption parameters - pub cipher: Cipher, - /// Encrypted data buffer - pub ciphertext: Vec, - /// Key derivation function parameters - pub kdf: Kdf, - /// Message authentication code - pub mac: [u8; 32], + /// Encryption parameters + pub cipher: Cipher, + /// Encrypted data buffer + pub ciphertext: Vec, + /// Key derivation function parameters + pub kdf: Kdf, + /// Message authentication code + pub mac: [u8; 32], } impl From for Crypto { - fn from(json: json::Crypto) -> Self { - Crypto { - cipher: json.cipher.into(), - ciphertext: json.ciphertext.into(), - kdf: json.kdf.into(), - mac: json.mac.into(), - } - } + fn from(json: json::Crypto) -> Self { + Crypto { + cipher: json.cipher.into(), + ciphertext: json.ciphertext.into(), + kdf: json.kdf.into(), + mac: json.mac.into(), + } + } } impl From for json::Crypto { - fn from(c: Crypto) -> Self { - json::Crypto { - cipher: c.cipher.into(), - ciphertext: c.ciphertext.into(), - kdf: c.kdf.into(), - mac: c.mac.into(), - } - } + fn from(c: Crypto) -> Self { + json::Crypto { + cipher: c.cipher.into(), + ciphertext: c.ciphertext.into(), + kdf: c.kdf.into(), + mac: c.mac.into(), + } + } } impl str::FromStr for Crypto { - type Err = ::Err; + type Err = ::Err; - fn from_str(s: &str) -> Result { - s.parse::().map(Into::into) - } + fn from_str(s: &str) -> Result { + s.parse::().map(Into::into) + } } impl From for String { - fn from(c: Crypto) -> Self { - json::Crypto::from(c).into() - } + fn from(c: Crypto) -> Self { + json::Crypto::from(c).into() + } } impl Crypto { - /// Encrypt account secret - pub fn with_secret(secret: &Secret, password: &Password, iterations: NonZeroU32) -> Result { - Crypto::with_plain(&*secret, password, iterations) - } + /// Encrypt account secret + pub fn with_secret( + secret: &Secret, + password: &Password, + iterations: NonZeroU32, + ) -> Result { + Crypto::with_plain(&*secret, password, iterations) + } - /// Encrypt custom plain data - pub fn with_plain(plain: &[u8], password: &Password, iterations: NonZeroU32) -> Result { - let salt: [u8; 32] = Random::random(); - let iv: [u8; 16] = Random::random(); + /// Encrypt custom plain data + pub fn with_plain( + plain: &[u8], + password: &Password, + iterations: NonZeroU32, + ) -> Result { + let salt: [u8; 32] = Random::random(); + let iv: [u8; 16] = Random::random(); - // two parts of derived key - // DK = [ DK[0..15] DK[16..31] ] = [derived_left_bits, derived_right_bits] - let (derived_left_bits, derived_right_bits) = - crypto::derive_key_iterations(password.as_bytes(), &salt, iterations); + // two parts of derived key + // DK = [ DK[0..15] DK[16..31] ] = [derived_left_bits, derived_right_bits] + let (derived_left_bits, derived_right_bits) = + crypto::derive_key_iterations(password.as_bytes(), &salt, iterations); - // preallocated (on-stack in case of `Secret`) buffer to hold cipher - // length = length(plain) as we are using CTR-approach - let plain_len = plain.len(); - let mut ciphertext: SmallVec<[u8; 32]> = SmallVec::from_vec(vec![0; plain_len]); + // preallocated (on-stack in case of `Secret`) buffer to hold cipher + // length = length(plain) as we are using CTR-approach + let plain_len = plain.len(); + let mut ciphertext: SmallVec<[u8; 32]> = SmallVec::from_vec(vec![0; plain_len]); - // aes-128-ctr with initial vector of iv - crypto::aes::encrypt_128_ctr(&derived_left_bits, &iv, plain, &mut *ciphertext)?; + // aes-128-ctr with initial vector of iv + crypto::aes::encrypt_128_ctr(&derived_left_bits, &iv, plain, &mut *ciphertext)?; - // KECCAK(DK[16..31] ++ ), where DK[16..31] - derived_right_bits - let mac = crypto::derive_mac(&derived_right_bits, &*ciphertext).keccak256(); + // KECCAK(DK[16..31] ++ ), where DK[16..31] - derived_right_bits + let mac = crypto::derive_mac(&derived_right_bits, &*ciphertext).keccak256(); - Ok(Crypto { - cipher: Cipher::Aes128Ctr(Aes128Ctr { - iv: iv, - }), - ciphertext: ciphertext.into_vec(), - kdf: Kdf::Pbkdf2(Pbkdf2 { - dklen: crypto::KEY_LENGTH as u32, - salt: salt.to_vec(), - c: iterations, - prf: Prf::HmacSha256, - }), - mac: mac, - }) - } + Ok(Crypto { + cipher: Cipher::Aes128Ctr(Aes128Ctr { iv: iv }), + ciphertext: ciphertext.into_vec(), + kdf: Kdf::Pbkdf2(Pbkdf2 { + dklen: crypto::KEY_LENGTH as u32, + salt: salt.to_vec(), + c: iterations, + prf: Prf::HmacSha256, + }), + mac: mac, + }) + } - /// Try to decrypt and convert result to account secret - pub fn secret(&self, password: &Password) -> Result { - if self.ciphertext.len() > 32 { - return Err(Error::InvalidSecret); - } + /// Try to decrypt and convert result to account secret + pub fn secret(&self, password: &Password) -> Result { + if self.ciphertext.len() > 32 { + return Err(Error::InvalidSecret); + } - let secret = self.do_decrypt(password, 32)?; - Ok(Secret::from_unsafe_slice(&secret)?) - } + let secret = self.do_decrypt(password, 32)?; + Ok(Secret::from_unsafe_slice(&secret)?) + } - /// Try to decrypt and return result as is - pub fn decrypt(&self, password: &Password) -> Result, Error> { - let expected_len = self.ciphertext.len(); - self.do_decrypt(password, expected_len) - } + /// Try to decrypt and return result as is + pub fn decrypt(&self, password: &Password) -> Result, Error> { + let expected_len = self.ciphertext.len(); + self.do_decrypt(password, expected_len) + } - fn do_decrypt(&self, password: &Password, expected_len: usize) -> Result, Error> { - let (derived_left_bits, derived_right_bits) = match self.kdf { - Kdf::Pbkdf2(ref params) => crypto::derive_key_iterations(password.as_bytes(), ¶ms.salt, params.c), - Kdf::Scrypt(ref params) => crypto::scrypt::derive_key(password.as_bytes(), ¶ms.salt, params.n, params.p, params.r)?, - }; + fn do_decrypt(&self, password: &Password, expected_len: usize) -> Result, Error> { + let (derived_left_bits, derived_right_bits) = match self.kdf { + Kdf::Pbkdf2(ref params) => { + crypto::derive_key_iterations(password.as_bytes(), ¶ms.salt, params.c) + } + Kdf::Scrypt(ref params) => crypto::scrypt::derive_key( + password.as_bytes(), + ¶ms.salt, + params.n, + params.p, + params.r, + )?, + }; - let mac = crypto::derive_mac(&derived_right_bits, &self.ciphertext).keccak256(); + let mac = crypto::derive_mac(&derived_right_bits, &self.ciphertext).keccak256(); - if !crypto::is_equal(&mac, &self.mac) { - return Err(Error::InvalidPassword) - } + if !crypto::is_equal(&mac, &self.mac) { + return Err(Error::InvalidPassword); + } - let mut plain: SmallVec<[u8; 32]> = SmallVec::from_vec(vec![0; expected_len]); + let mut plain: SmallVec<[u8; 32]> = SmallVec::from_vec(vec![0; expected_len]); - match self.cipher { - Cipher::Aes128Ctr(ref params) => { - // checker by callers - debug_assert!(expected_len >= self.ciphertext.len()); + match self.cipher { + Cipher::Aes128Ctr(ref params) => { + // checker by callers + debug_assert!(expected_len >= self.ciphertext.len()); - let from = expected_len - self.ciphertext.len(); - crypto::aes::decrypt_128_ctr(&derived_left_bits, ¶ms.iv, &self.ciphertext, &mut plain[from..])?; - Ok(plain.into_iter().collect()) - }, - } - } + let from = expected_len - self.ciphertext.len(); + crypto::aes::decrypt_128_ctr( + &derived_left_bits, + ¶ms.iv, + &self.ciphertext, + &mut plain[from..], + )?; + Ok(plain.into_iter().collect()) + } + } + } } #[cfg(test)] mod tests { - use ethkey::{Generator, Random}; - use super::{Crypto, Error, NonZeroU32}; + use super::{Crypto, Error, NonZeroU32}; + use ethkey::{Generator, Random}; - lazy_static! { - static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(10240).expect("10240 > 0; qed"); - } + lazy_static! { + static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(10240).expect("10240 > 0; qed"); + } - #[test] - fn crypto_with_secret_create() { - let keypair = Random.generate().unwrap(); - let passwd = "this is sparta".into(); - let crypto = Crypto::with_secret(keypair.secret(), &passwd, *ITERATIONS).unwrap(); - let secret = crypto.secret(&passwd).unwrap(); - assert_eq!(keypair.secret(), &secret); - } + #[test] + fn crypto_with_secret_create() { + let keypair = Random.generate().unwrap(); + let passwd = "this is sparta".into(); + let crypto = Crypto::with_secret(keypair.secret(), &passwd, *ITERATIONS).unwrap(); + let secret = crypto.secret(&passwd).unwrap(); + assert_eq!(keypair.secret(), &secret); + } - #[test] - fn crypto_with_secret_invalid_password() { - let keypair = Random.generate().unwrap(); - let crypto = Crypto::with_secret(keypair.secret(), &"this is sparta".into(), *ITERATIONS).unwrap(); - assert_matches!(crypto.secret(&"this is sparta!".into()), Err(Error::InvalidPassword)) - } + #[test] + fn crypto_with_secret_invalid_password() { + let keypair = Random.generate().unwrap(); + let crypto = + Crypto::with_secret(keypair.secret(), &"this is sparta".into(), *ITERATIONS).unwrap(); + assert_matches!( + crypto.secret(&"this is sparta!".into()), + Err(Error::InvalidPassword) + ) + } - #[test] - fn crypto_with_null_plain_data() { - let original_data = b""; - let passwd = "this is sparta".into(); - let crypto = Crypto::with_plain(&original_data[..], &passwd, *ITERATIONS).unwrap(); - let decrypted_data = crypto.decrypt(&passwd).unwrap(); - assert_eq!(original_data[..], *decrypted_data); - } + #[test] + fn crypto_with_null_plain_data() { + let original_data = b""; + let passwd = "this is sparta".into(); + let crypto = Crypto::with_plain(&original_data[..], &passwd, *ITERATIONS).unwrap(); + let decrypted_data = crypto.decrypt(&passwd).unwrap(); + assert_eq!(original_data[..], *decrypted_data); + } - #[test] - fn crypto_with_tiny_plain_data() { - let original_data = b"{}"; - let passwd = "this is sparta".into(); - let crypto = Crypto::with_plain(&original_data[..], &passwd, *ITERATIONS).unwrap(); - let decrypted_data = crypto.decrypt(&passwd).unwrap(); - assert_eq!(original_data[..], *decrypted_data); - } + #[test] + fn crypto_with_tiny_plain_data() { + let original_data = b"{}"; + let passwd = "this is sparta".into(); + let crypto = Crypto::with_plain(&original_data[..], &passwd, *ITERATIONS).unwrap(); + let decrypted_data = crypto.decrypt(&passwd).unwrap(); + assert_eq!(original_data[..], *decrypted_data); + } - #[test] - fn crypto_with_huge_plain_data() { - let original_data: Vec<_> = (1..65536).map(|i| (i % 256) as u8).collect(); - let passwd = "this is sparta".into(); - let crypto = Crypto::with_plain(&original_data, &passwd, *ITERATIONS).unwrap(); - let decrypted_data = crypto.decrypt(&passwd).unwrap(); - assert_eq!(&original_data, &decrypted_data); - } + #[test] + fn crypto_with_huge_plain_data() { + let original_data: Vec<_> = (1..65536).map(|i| (i % 256) as u8).collect(); + let passwd = "this is sparta".into(); + let crypto = Crypto::with_plain(&original_data, &passwd, *ITERATIONS).unwrap(); + let decrypted_data = crypto.decrypt(&passwd).unwrap(); + assert_eq!(&original_data, &decrypted_data); + } } diff --git a/accounts/ethstore/src/account/kdf.rs b/accounts/ethstore/src/account/kdf.rs index 06b361cdc..e0d2ccb89 100644 --- a/accounts/ethstore/src/account/kdf.rs +++ b/accounts/ethstore/src/account/kdf.rs @@ -19,108 +19,108 @@ use std::num::NonZeroU32; #[derive(Debug, PartialEq, Clone)] pub enum Prf { - HmacSha256, + HmacSha256, } #[derive(Debug, PartialEq, Clone)] pub struct Pbkdf2 { - pub c: NonZeroU32, - pub dklen: u32, - pub prf: Prf, - pub salt: Vec, + pub c: NonZeroU32, + pub dklen: u32, + pub prf: Prf, + pub salt: Vec, } #[derive(Debug, PartialEq, Clone)] pub struct Scrypt { - pub dklen: u32, - pub p: u32, - pub n: u32, - pub r: u32, - pub salt: Vec, + pub dklen: u32, + pub p: u32, + pub n: u32, + pub r: u32, + pub salt: Vec, } #[derive(Debug, PartialEq, Clone)] pub enum Kdf { - Pbkdf2(Pbkdf2), - Scrypt(Scrypt), + Pbkdf2(Pbkdf2), + Scrypt(Scrypt), } impl From for Prf { - fn from(json: json::Prf) -> Self { - match json { - json::Prf::HmacSha256 => Prf::HmacSha256, - } - } + fn from(json: json::Prf) -> Self { + match json { + json::Prf::HmacSha256 => Prf::HmacSha256, + } + } } impl Into for Prf { - fn into(self) -> json::Prf { - match self { - Prf::HmacSha256 => json::Prf::HmacSha256, - } - } + fn into(self) -> json::Prf { + match self { + Prf::HmacSha256 => json::Prf::HmacSha256, + } + } } impl From for Pbkdf2 { - fn from(json: json::Pbkdf2) -> Self { - Pbkdf2 { - c: json.c, - dklen: json.dklen, - prf: From::from(json.prf), - salt: json.salt.into(), - } - } + fn from(json: json::Pbkdf2) -> Self { + Pbkdf2 { + c: json.c, + dklen: json.dklen, + prf: From::from(json.prf), + salt: json.salt.into(), + } + } } impl Into for Pbkdf2 { - fn into(self) -> json::Pbkdf2 { - json::Pbkdf2 { - c: self.c, - dklen: self.dklen, - prf: self.prf.into(), - salt: From::from(self.salt), - } - } + fn into(self) -> json::Pbkdf2 { + json::Pbkdf2 { + c: self.c, + dklen: self.dklen, + prf: self.prf.into(), + salt: From::from(self.salt), + } + } } impl From for Scrypt { - fn from(json: json::Scrypt) -> Self { - Scrypt { - dklen: json.dklen, - p: json.p, - n: json.n, - r: json.r, - salt: json.salt.into(), - } - } + fn from(json: json::Scrypt) -> Self { + Scrypt { + dklen: json.dklen, + p: json.p, + n: json.n, + r: json.r, + salt: json.salt.into(), + } + } } impl Into for Scrypt { - fn into(self) -> json::Scrypt { - json::Scrypt { - dklen: self.dklen, - p: self.p, - n: self.n, - r: self.r, - salt: From::from(self.salt), - } - } + fn into(self) -> json::Scrypt { + json::Scrypt { + dklen: self.dklen, + p: self.p, + n: self.n, + r: self.r, + salt: From::from(self.salt), + } + } } impl From for Kdf { - fn from(json: json::Kdf) -> Self { - match json { - json::Kdf::Pbkdf2(params) => Kdf::Pbkdf2(From::from(params)), - json::Kdf::Scrypt(params) => Kdf::Scrypt(From::from(params)), - } - } + fn from(json: json::Kdf) -> Self { + match json { + json::Kdf::Pbkdf2(params) => Kdf::Pbkdf2(From::from(params)), + json::Kdf::Scrypt(params) => Kdf::Scrypt(From::from(params)), + } + } } impl Into for Kdf { - fn into(self) -> json::Kdf { - match self { - Kdf::Pbkdf2(params) => json::Kdf::Pbkdf2(params.into()), - Kdf::Scrypt(params) => json::Kdf::Scrypt(params.into()), - } - } + fn into(self) -> json::Kdf { + match self { + Kdf::Pbkdf2(params) => json::Kdf::Pbkdf2(params.into()), + Kdf::Scrypt(params) => json::Kdf::Scrypt(params.into()), + } + } } diff --git a/accounts/ethstore/src/account/mod.rs b/accounts/ethstore/src/account/mod.rs index b979d34a5..e6225fd60 100644 --- a/accounts/ethstore/src/account/mod.rs +++ b/accounts/ethstore/src/account/mod.rs @@ -20,8 +20,10 @@ mod kdf; mod safe_account; mod version; -pub use self::cipher::{Cipher, Aes128Ctr}; -pub use self::crypto::Crypto; -pub use self::kdf::{Kdf, Pbkdf2, Scrypt, Prf}; -pub use self::safe_account::SafeAccount; -pub use self::version::Version; +pub use self::{ + cipher::{Aes128Ctr, Cipher}, + crypto::Crypto, + kdf::{Kdf, Pbkdf2, Prf, Scrypt}, + safe_account::SafeAccount, + version::Version, +}; diff --git a/accounts/ethstore/src/account/safe_account.rs b/accounts/ethstore/src/account/safe_account.rs index 63971ef6a..4e0750d3b 100644 --- a/accounts/ethstore/src/account/safe_account.rs +++ b/accounts/ethstore/src/account/safe_account.rs @@ -14,221 +14,273 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use ethkey::{self, KeyPair, sign, Address, Password, Signature, Message, Public, Secret}; -use ethkey::crypto::ecdh::agree; -use {json, Error}; +use super::crypto::Crypto; use account::Version; use crypto; -use super::crypto::Crypto; +use ethkey::{ + self, crypto::ecdh::agree, sign, Address, KeyPair, Message, Password, Public, Secret, Signature, +}; +use json; use std::num::NonZeroU32; +use Error; /// Account representation. #[derive(Debug, PartialEq, Clone)] pub struct SafeAccount { - /// Account ID - pub id: [u8; 16], - /// Account version - pub version: Version, - /// Account address - pub address: Address, - /// Account private key derivation definition. - pub crypto: Crypto, - /// Account filename - pub filename: Option, - /// Account name - pub name: String, - /// Account metadata - pub meta: String, + /// Account ID + pub id: [u8; 16], + /// Account version + pub version: Version, + /// Account address + pub address: Address, + /// Account private key derivation definition. + pub crypto: Crypto, + /// Account filename + pub filename: Option, + /// Account name + pub name: String, + /// Account metadata + pub meta: String, } impl Into for SafeAccount { - fn into(self) -> json::KeyFile { - json::KeyFile { - id: From::from(self.id), - version: self.version.into(), - address: Some(self.address.into()), - crypto: self.crypto.into(), - name: Some(self.name.into()), - meta: Some(self.meta.into()), - } - } + fn into(self) -> json::KeyFile { + json::KeyFile { + id: From::from(self.id), + version: self.version.into(), + address: Some(self.address.into()), + crypto: self.crypto.into(), + name: Some(self.name.into()), + meta: Some(self.meta.into()), + } + } } impl SafeAccount { - /// Create a new account - pub fn create( - keypair: &KeyPair, - id: [u8; 16], - password: &Password, - iterations: NonZeroU32, - name: String, - meta: String - ) -> Result { - Ok(SafeAccount { - id: id, - version: Version::V3, - crypto: Crypto::with_secret(keypair.secret(), password, iterations)?, - address: keypair.address(), - filename: None, - name: name, - meta: meta, - }) - } + /// Create a new account + pub fn create( + keypair: &KeyPair, + id: [u8; 16], + password: &Password, + iterations: NonZeroU32, + name: String, + meta: String, + ) -> Result { + Ok(SafeAccount { + id: id, + version: Version::V3, + crypto: Crypto::with_secret(keypair.secret(), password, iterations)?, + address: keypair.address(), + filename: None, + name: name, + meta: meta, + }) + } - /// Create a new `SafeAccount` from the given `json`; if it was read from a - /// file, the `filename` should be `Some` name. If it is as yet anonymous, then it - /// can be left `None`. - /// In case `password` is provided, we will attempt to read the secret from the keyfile - /// and derive the address from it instead of reading it directly. - /// Providing password is required for `json::KeyFile`s with no address. - pub fn from_file(json: json::KeyFile, filename: Option, password: &Option) -> Result { - let crypto = Crypto::from(json.crypto); - let address = match (password, &json.address) { - (None, Some(json_address)) => json_address.into(), - (None, None) => Err(Error::Custom( - "This keystore does not contain address. You need to provide password to import it".into()))?, - (Some(password), json_address) => { - let derived_address = KeyPair::from_secret( - crypto.secret(&password).map_err(|_| Error::InvalidPassword)? - )?.address(); + /// Create a new `SafeAccount` from the given `json`; if it was read from a + /// file, the `filename` should be `Some` name. If it is as yet anonymous, then it + /// can be left `None`. + /// In case `password` is provided, we will attempt to read the secret from the keyfile + /// and derive the address from it instead of reading it directly. + /// Providing password is required for `json::KeyFile`s with no address. + pub fn from_file( + json: json::KeyFile, + filename: Option, + password: &Option, + ) -> Result { + let crypto = Crypto::from(json.crypto); + let address = match (password, &json.address) { + (None, Some(json_address)) => json_address.into(), + (None, None) => Err(Error::Custom( + "This keystore does not contain address. You need to provide password to import it" + .into(), + ))?, + (Some(password), json_address) => { + let derived_address = KeyPair::from_secret( + crypto + .secret(&password) + .map_err(|_| Error::InvalidPassword)?, + )? + .address(); - match json_address { - Some(json_address) => { - let json_address = json_address.into(); - if derived_address != json_address { - warn!("Detected address mismatch when opening an account. Derived: {:?}, in json got: {:?}", + match json_address { + Some(json_address) => { + let json_address = json_address.into(); + if derived_address != json_address { + warn!("Detected address mismatch when opening an account. Derived: {:?}, in json got: {:?}", derived_address, json_address); - } - }, - _ => {}, - } - derived_address - } - }; + } + } + _ => {} + } + derived_address + } + }; - Ok(SafeAccount { - id: json.id.into(), - version: json.version.into(), - address, - crypto, - filename, - name: json.name.unwrap_or(String::new()), - meta: json.meta.unwrap_or("{}".to_owned()), - }) - } + Ok(SafeAccount { + id: json.id.into(), + version: json.version.into(), + address, + crypto, + filename, + name: json.name.unwrap_or(String::new()), + meta: json.meta.unwrap_or("{}".to_owned()), + }) + } - /// Create a new `SafeAccount` from the given vault `json`; if it was read from a - /// file, the `filename` should be `Some` name. If it is as yet anonymous, then it - /// can be left `None`. - pub fn from_vault_file(password: &Password, json: json::VaultKeyFile, filename: Option) -> Result { - let meta_crypto: Crypto = json.metacrypto.into(); - let meta_plain = meta_crypto.decrypt(password)?; - let meta_plain = json::VaultKeyMeta::load(&meta_plain).map_err(|e| Error::Custom(format!("{:?}", e)))?; + /// Create a new `SafeAccount` from the given vault `json`; if it was read from a + /// file, the `filename` should be `Some` name. If it is as yet anonymous, then it + /// can be left `None`. + pub fn from_vault_file( + password: &Password, + json: json::VaultKeyFile, + filename: Option, + ) -> Result { + let meta_crypto: Crypto = json.metacrypto.into(); + let meta_plain = meta_crypto.decrypt(password)?; + let meta_plain = + json::VaultKeyMeta::load(&meta_plain).map_err(|e| Error::Custom(format!("{:?}", e)))?; - SafeAccount::from_file(json::KeyFile { - id: json.id, - version: json.version, - crypto: json.crypto, - address: Some(meta_plain.address), - name: meta_plain.name, - meta: meta_plain.meta, - }, filename, &None) - } + SafeAccount::from_file( + json::KeyFile { + id: json.id, + version: json.version, + crypto: json.crypto, + address: Some(meta_plain.address), + name: meta_plain.name, + meta: meta_plain.meta, + }, + filename, + &None, + ) + } - /// Create a new `VaultKeyFile` from the given `self` - pub fn into_vault_file(self, iterations: NonZeroU32, password: &Password) -> Result { - let meta_plain = json::VaultKeyMeta { - address: self.address.into(), - name: Some(self.name), - meta: Some(self.meta), - }; - let meta_plain = meta_plain.write().map_err(|e| Error::Custom(format!("{:?}", e)))?; - let meta_crypto = Crypto::with_plain(&meta_plain, password, iterations)?; + /// Create a new `VaultKeyFile` from the given `self` + pub fn into_vault_file( + self, + iterations: NonZeroU32, + password: &Password, + ) -> Result { + let meta_plain = json::VaultKeyMeta { + address: self.address.into(), + name: Some(self.name), + meta: Some(self.meta), + }; + let meta_plain = meta_plain + .write() + .map_err(|e| Error::Custom(format!("{:?}", e)))?; + let meta_crypto = Crypto::with_plain(&meta_plain, password, iterations)?; - Ok(json::VaultKeyFile { - id: self.id.into(), - version: self.version.into(), - crypto: self.crypto.into(), - metacrypto: meta_crypto.into(), - }) - } + Ok(json::VaultKeyFile { + id: self.id.into(), + version: self.version.into(), + crypto: self.crypto.into(), + metacrypto: meta_crypto.into(), + }) + } - /// Sign a message. - pub fn sign(&self, password: &Password, message: &Message) -> Result { - let secret = self.crypto.secret(password)?; - sign(&secret, message).map_err(From::from) - } + /// Sign a message. + pub fn sign(&self, password: &Password, message: &Message) -> Result { + let secret = self.crypto.secret(password)?; + sign(&secret, message).map_err(From::from) + } - /// Decrypt a message. - pub fn decrypt(&self, password: &Password, shared_mac: &[u8], message: &[u8]) -> Result, Error> { - let secret = self.crypto.secret(password)?; - ethkey::crypto::ecies::decrypt(&secret, shared_mac, message).map_err(From::from) - } + /// Decrypt a message. + pub fn decrypt( + &self, + password: &Password, + shared_mac: &[u8], + message: &[u8], + ) -> Result, Error> { + let secret = self.crypto.secret(password)?; + ethkey::crypto::ecies::decrypt(&secret, shared_mac, message).map_err(From::from) + } - /// Agree on shared key. - pub fn agree(&self, password: &Password, other: &Public) -> Result { - let secret = self.crypto.secret(password)?; - agree(&secret, other).map_err(From::from) - } + /// Agree on shared key. + pub fn agree(&self, password: &Password, other: &Public) -> Result { + let secret = self.crypto.secret(password)?; + agree(&secret, other).map_err(From::from) + } - /// Derive public key. - pub fn public(&self, password: &Password) -> Result { - let secret = self.crypto.secret(password)?; - Ok(KeyPair::from_secret(secret)?.public().clone()) - } + /// Derive public key. + pub fn public(&self, password: &Password) -> Result { + let secret = self.crypto.secret(password)?; + Ok(KeyPair::from_secret(secret)?.public().clone()) + } - /// Change account's password. - pub fn change_password(&self, old_password: &Password, new_password: &Password, iterations: NonZeroU32) -> Result { - let secret = self.crypto.secret(old_password)?; - let result = SafeAccount { - id: self.id.clone(), - version: self.version.clone(), - crypto: Crypto::with_secret(&secret, new_password, iterations)?, - address: self.address.clone(), - filename: self.filename.clone(), - name: self.name.clone(), - meta: self.meta.clone(), - }; - Ok(result) - } + /// Change account's password. + pub fn change_password( + &self, + old_password: &Password, + new_password: &Password, + iterations: NonZeroU32, + ) -> Result { + let secret = self.crypto.secret(old_password)?; + let result = SafeAccount { + id: self.id.clone(), + version: self.version.clone(), + crypto: Crypto::with_secret(&secret, new_password, iterations)?, + address: self.address.clone(), + filename: self.filename.clone(), + name: self.name.clone(), + meta: self.meta.clone(), + }; + Ok(result) + } - /// Check if password matches the account. - pub fn check_password(&self, password: &Password) -> bool { - self.crypto.secret(password).is_ok() - } + /// Check if password matches the account. + pub fn check_password(&self, password: &Password) -> bool { + self.crypto.secret(password).is_ok() + } } #[cfg(test)] mod tests { - use ethkey::{Generator, Random, verify_public, Message}; - use super::{SafeAccount, NonZeroU32}; + use super::{NonZeroU32, SafeAccount}; + use ethkey::{verify_public, Generator, Message, Random}; - lazy_static! { - static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(10240).expect("10240 > 0; qed"); - } + lazy_static! { + static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(10240).expect("10240 > 0; qed"); + } + #[test] + fn sign_and_verify_public() { + let keypair = Random.generate().unwrap(); + let password = "hello world".into(); + let message = Message::default(); + let account = SafeAccount::create( + &keypair, + [0u8; 16], + &password, + *ITERATIONS, + "Test".to_owned(), + "{}".to_owned(), + ); + let signature = account.unwrap().sign(&password, &message).unwrap(); + assert!(verify_public(keypair.public(), &signature, &message).unwrap()); + } - #[test] - fn sign_and_verify_public() { - let keypair = Random.generate().unwrap(); - let password = "hello world".into(); - let message = Message::default(); - let account = SafeAccount::create(&keypair, [0u8; 16], &password, *ITERATIONS, "Test".to_owned(), "{}".to_owned()); - let signature = account.unwrap().sign(&password, &message).unwrap(); - assert!(verify_public(keypair.public(), &signature, &message).unwrap()); - } - - #[test] - fn change_password() { - let keypair = Random.generate().unwrap(); - let first_password = "hello world".into(); - let sec_password = "this is sparta".into(); - let message = Message::default(); - let account = SafeAccount::create(&keypair, [0u8; 16], &first_password, *ITERATIONS, "Test".to_owned(), "{}".to_owned()).unwrap(); - let new_account = account.change_password(&first_password, &sec_password, *ITERATIONS).unwrap(); - assert!(account.sign(&first_password, &message).is_ok()); - assert!(account.sign(&sec_password, &message).is_err()); - assert!(new_account.sign(&first_password, &message).is_err()); - assert!(new_account.sign(&sec_password, &message).is_ok()); - } + #[test] + fn change_password() { + let keypair = Random.generate().unwrap(); + let first_password = "hello world".into(); + let sec_password = "this is sparta".into(); + let message = Message::default(); + let account = SafeAccount::create( + &keypair, + [0u8; 16], + &first_password, + *ITERATIONS, + "Test".to_owned(), + "{}".to_owned(), + ) + .unwrap(); + let new_account = account + .change_password(&first_password, &sec_password, *ITERATIONS) + .unwrap(); + assert!(account.sign(&first_password, &message).is_ok()); + assert!(account.sign(&sec_password, &message).is_err()); + assert!(new_account.sign(&first_password, &message).is_err()); + assert!(new_account.sign(&sec_password, &message).is_ok()); + } } diff --git a/accounts/ethstore/src/account/version.rs b/accounts/ethstore/src/account/version.rs index 3048b95b0..14d5f3a67 100644 --- a/accounts/ethstore/src/account/version.rs +++ b/accounts/ethstore/src/account/version.rs @@ -18,21 +18,21 @@ use json; #[derive(Debug, PartialEq, Clone)] pub enum Version { - V3, + V3, } impl From for Version { - fn from(json: json::Version) -> Self { - match json { - json::Version::V3 => Version::V3, - } - } + fn from(json: json::Version) -> Self { + match json { + json::Version::V3 => Version::V3, + } + } } impl Into for Version { - fn into(self) -> json::Version { - match self { - Version::V3 => json::Version::V3, - } - } + fn into(self) -> json::Version { + match self { + Version::V3 => json::Version::V3, + } + } } diff --git a/accounts/ethstore/src/accounts_dir/disk.rs b/accounts/ethstore/src/accounts_dir/disk.rs index 00c59b254..3c08b860c 100644 --- a/accounts/ethstore/src/accounts_dir/disk.rs +++ b/accounts/ethstore/src/accounts_dir/disk.rs @@ -14,90 +14,101 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::{fs, io}; -use std::io::Write; -use std::path::{PathBuf, Path}; -use std::collections::HashMap; -use time; -use {json, SafeAccount, Error}; -use json::Uuid; -use super::{KeyDirectory, VaultKeyDirectory, VaultKeyDirectoryProvider, VaultKey}; -use super::vault::{VAULT_FILE_NAME, VaultDiskDirectory}; +use super::{ + vault::{VaultDiskDirectory, VAULT_FILE_NAME}, + KeyDirectory, VaultKey, VaultKeyDirectory, VaultKeyDirectoryProvider, +}; use ethkey::Password; +use json::{self, Uuid}; +use std::{ + collections::HashMap, + fs, io, + io::Write, + path::{Path, PathBuf}, +}; +use time; +use Error; +use SafeAccount; const IGNORED_FILES: &'static [&'static str] = &[ - "thumbs.db", - "address_book.json", - "dapps_policy.json", - "dapps_accounts.json", - "dapps_history.json", - "vault.json", + "thumbs.db", + "address_book.json", + "dapps_policy.json", + "dapps_accounts.json", + "dapps_history.json", + "vault.json", ]; /// Find a unique filename that does not exist using four-letter random suffix. -pub fn find_unique_filename_using_random_suffix(parent_path: &Path, original_filename: &str) -> io::Result { - let mut path = parent_path.join(original_filename); - let mut deduped_filename = original_filename.to_string(); +pub fn find_unique_filename_using_random_suffix( + parent_path: &Path, + original_filename: &str, +) -> io::Result { + let mut path = parent_path.join(original_filename); + let mut deduped_filename = original_filename.to_string(); - if path.exists() { - const MAX_RETRIES: usize = 500; - let mut retries = 0; + if path.exists() { + const MAX_RETRIES: usize = 500; + let mut retries = 0; - while path.exists() { - if retries >= MAX_RETRIES { - return Err(io::Error::new(io::ErrorKind::Other, "Exceeded maximum retries when deduplicating filename.")); - } + while path.exists() { + if retries >= MAX_RETRIES { + return Err(io::Error::new( + io::ErrorKind::Other, + "Exceeded maximum retries when deduplicating filename.", + )); + } - let suffix = ::random::random_string(4); - deduped_filename = format!("{}-{}", original_filename, suffix); - path.set_file_name(&deduped_filename); - retries += 1; - } - } + let suffix = ::random::random_string(4); + deduped_filename = format!("{}-{}", original_filename, suffix); + path.set_file_name(&deduped_filename); + retries += 1; + } + } - Ok(deduped_filename) + Ok(deduped_filename) } /// Create a new file and restrict permissions to owner only. It errors if the file already exists. #[cfg(unix)] pub fn create_new_file_with_permissions_to_owner(file_path: &Path) -> io::Result { - use libc; - use std::os::unix::fs::OpenOptionsExt; + use libc; + use std::os::unix::fs::OpenOptionsExt; - fs::OpenOptions::new() - .write(true) - .create_new(true) - .mode((libc::S_IWUSR | libc::S_IRUSR) as u32) - .open(file_path) + fs::OpenOptions::new() + .write(true) + .create_new(true) + .mode((libc::S_IWUSR | libc::S_IRUSR) as u32) + .open(file_path) } /// Create a new file and restrict permissions to owner only. It errors if the file already exists. #[cfg(not(unix))] pub fn create_new_file_with_permissions_to_owner(file_path: &Path) -> io::Result { - fs::OpenOptions::new() - .write(true) - .create_new(true) - .open(file_path) + fs::OpenOptions::new() + .write(true) + .create_new(true) + .open(file_path) } /// Create a new file and restrict permissions to owner only. It replaces the existing file if it already exists. #[cfg(unix)] pub fn replace_file_with_permissions_to_owner(file_path: &Path) -> io::Result { - use libc; - use std::os::unix::fs::PermissionsExt; + use libc; + use std::os::unix::fs::PermissionsExt; - let file = fs::File::create(file_path)?; - let mut permissions = file.metadata()?.permissions(); - permissions.set_mode((libc::S_IWUSR | libc::S_IRUSR) as u32); - file.set_permissions(permissions)?; + let file = fs::File::create(file_path)?; + let mut permissions = file.metadata()?.permissions(); + permissions.set_mode((libc::S_IWUSR | libc::S_IRUSR) as u32); + file.set_permissions(permissions)?; - Ok(file) + Ok(file) } /// Create a new file and restrict permissions to owner only. It replaces the existing file if it already exists. #[cfg(not(unix))] pub fn replace_file_with_permissions_to_owner(file_path: &Path) -> io::Result { - fs::File::create(file_path) + fs::File::create(file_path) } /// Root keys directory implementation @@ -105,388 +116,495 @@ pub type RootDiskDirectory = DiskDirectory; /// Disk directory key file manager pub trait KeyFileManager: Send + Sync { - /// Read `SafeAccount` from given key file stream - fn read(&self, filename: Option, reader: T) -> Result where T: io::Read; + /// Read `SafeAccount` from given key file stream + fn read(&self, filename: Option, reader: T) -> Result + where + T: io::Read; - /// Write `SafeAccount` to given key file stream - fn write(&self, account: SafeAccount, writer: &mut T) -> Result<(), Error> where T: io::Write; + /// Write `SafeAccount` to given key file stream + fn write(&self, account: SafeAccount, writer: &mut T) -> Result<(), Error> + where + T: io::Write; } /// Disk-based keys directory implementation -pub struct DiskDirectory where T: KeyFileManager { - path: PathBuf, - key_manager: T, +pub struct DiskDirectory +where + T: KeyFileManager, +{ + path: PathBuf, + key_manager: T, } /// Keys file manager for root keys directory #[derive(Default)] pub struct DiskKeyFileManager { - password: Option, + password: Option, } impl RootDiskDirectory { - pub fn create

(path: P) -> Result where P: AsRef { - fs::create_dir_all(&path)?; - Ok(Self::at(path)) - } + pub fn create

(path: P) -> Result + where + P: AsRef, + { + fs::create_dir_all(&path)?; + Ok(Self::at(path)) + } - /// allows to read keyfiles with given password (needed for keyfiles w/o address) - pub fn with_password(&self, password: Option) -> Self { - DiskDirectory::new(&self.path, DiskKeyFileManager { password }) - } + /// allows to read keyfiles with given password (needed for keyfiles w/o address) + pub fn with_password(&self, password: Option) -> Self { + DiskDirectory::new(&self.path, DiskKeyFileManager { password }) + } - pub fn at

(path: P) -> Self where P: AsRef { - DiskDirectory::new(path, DiskKeyFileManager::default()) - } + pub fn at

(path: P) -> Self + where + P: AsRef, + { + DiskDirectory::new(path, DiskKeyFileManager::default()) + } } -impl DiskDirectory where T: KeyFileManager { - /// Create new disk directory instance - pub fn new

(path: P, key_manager: T) -> Self where P: AsRef { - DiskDirectory { - path: path.as_ref().to_path_buf(), - key_manager: key_manager, - } - } +impl DiskDirectory +where + T: KeyFileManager, +{ + /// Create new disk directory instance + pub fn new

(path: P, key_manager: T) -> Self + where + P: AsRef, + { + DiskDirectory { + path: path.as_ref().to_path_buf(), + key_manager: key_manager, + } + } - fn files(&self) -> Result, Error> { - Ok(fs::read_dir(&self.path)? - .flat_map(Result::ok) - .filter(|entry| { - let metadata = entry.metadata().ok(); - let file_name = entry.file_name(); - let name = file_name.to_string_lossy(); - // filter directories - metadata.map_or(false, |m| !m.is_dir()) && + fn files(&self) -> Result, Error> { + Ok(fs::read_dir(&self.path)? + .flat_map(Result::ok) + .filter(|entry| { + let metadata = entry.metadata().ok(); + let file_name = entry.file_name(); + let name = file_name.to_string_lossy(); + // filter directories + metadata.map_or(false, |m| !m.is_dir()) && // hidden files !name.starts_with(".") && // other ignored files !IGNORED_FILES.contains(&&*name) - }) - .map(|entry| entry.path()) - .collect::>() - ) - } + }) + .map(|entry| entry.path()) + .collect::>()) + } - pub fn files_hash(&self) -> Result { - use std::collections::hash_map::DefaultHasher; - use std::hash::Hasher; + pub fn files_hash(&self) -> Result { + use std::{collections::hash_map::DefaultHasher, hash::Hasher}; - let mut hasher = DefaultHasher::new(); - let files = self.files()?; - for file in files { - hasher.write(file.to_str().unwrap_or("").as_bytes()) - } + let mut hasher = DefaultHasher::new(); + let files = self.files()?; + for file in files { + hasher.write(file.to_str().unwrap_or("").as_bytes()) + } - Ok(hasher.finish()) - } + Ok(hasher.finish()) + } - fn last_modification_date(&self) -> Result { - use std::time::{Duration, UNIX_EPOCH}; - let duration = fs::metadata(&self.path)?.modified()?.duration_since(UNIX_EPOCH).unwrap_or(Duration::default()); - let timestamp = duration.as_secs() ^ (duration.subsec_nanos() as u64); - Ok(timestamp) - } + fn last_modification_date(&self) -> Result { + use std::time::{Duration, UNIX_EPOCH}; + let duration = fs::metadata(&self.path)? + .modified()? + .duration_since(UNIX_EPOCH) + .unwrap_or(Duration::default()); + let timestamp = duration.as_secs() ^ (duration.subsec_nanos() as u64); + Ok(timestamp) + } - /// all accounts found in keys directory - fn files_content(&self) -> Result, Error> { - // it's not done using one iterator cause - // there is an issue with rustc and it takes tooo much time to compile - let paths = self.files()?; - Ok(paths - .into_iter() - .filter_map(|path| { - let filename = Some(path.file_name().and_then(|n| n.to_str()).expect("Keys have valid UTF8 names only.").to_owned()); - fs::File::open(path.clone()) - .map_err(Into::into) - .and_then(|file| self.key_manager.read(filename, file)) - .map_err(|err| { - warn!("Invalid key file: {:?} ({})", path, err); - err - }) - .map(|account| (path, account)) - .ok() - }) - .collect() - ) - } + /// all accounts found in keys directory + fn files_content(&self) -> Result, Error> { + // it's not done using one iterator cause + // there is an issue with rustc and it takes tooo much time to compile + let paths = self.files()?; + Ok(paths + .into_iter() + .filter_map(|path| { + let filename = Some( + path.file_name() + .and_then(|n| n.to_str()) + .expect("Keys have valid UTF8 names only.") + .to_owned(), + ); + fs::File::open(path.clone()) + .map_err(Into::into) + .and_then(|file| self.key_manager.read(filename, file)) + .map_err(|err| { + warn!("Invalid key file: {:?} ({})", path, err); + err + }) + .map(|account| (path, account)) + .ok() + }) + .collect()) + } - /// insert account with given filename. if the filename is a duplicate of any stored account and dedup is set to - /// true, a random suffix is appended to the filename. - pub fn insert_with_filename(&self, account: SafeAccount, mut filename: String, dedup: bool) -> Result { - if dedup { - filename = find_unique_filename_using_random_suffix(&self.path, &filename)?; - } + /// insert account with given filename. if the filename is a duplicate of any stored account and dedup is set to + /// true, a random suffix is appended to the filename. + pub fn insert_with_filename( + &self, + account: SafeAccount, + mut filename: String, + dedup: bool, + ) -> Result { + if dedup { + filename = find_unique_filename_using_random_suffix(&self.path, &filename)?; + } - // path to keyfile - let keyfile_path = self.path.join(filename.as_str()); + // path to keyfile + let keyfile_path = self.path.join(filename.as_str()); - // update account filename - let original_account = account.clone(); - let mut account = account; - account.filename = Some(filename); + // update account filename + let original_account = account.clone(); + let mut account = account; + account.filename = Some(filename); - { - // save the file - let mut file = if dedup { - create_new_file_with_permissions_to_owner(&keyfile_path)? - } else { - replace_file_with_permissions_to_owner(&keyfile_path)? - }; + { + // save the file + let mut file = if dedup { + create_new_file_with_permissions_to_owner(&keyfile_path)? + } else { + replace_file_with_permissions_to_owner(&keyfile_path)? + }; - // write key content - self.key_manager.write(original_account, &mut file).map_err(|e| Error::Custom(format!("{:?}", e)))?; + // write key content + self.key_manager + .write(original_account, &mut file) + .map_err(|e| Error::Custom(format!("{:?}", e)))?; - file.flush()?; - file.sync_all()?; - } + file.flush()?; + file.sync_all()?; + } - Ok(account) - } + Ok(account) + } - /// Get key file manager referece - pub fn key_manager(&self) -> &T { - &self.key_manager - } + /// Get key file manager referece + pub fn key_manager(&self) -> &T { + &self.key_manager + } } -impl KeyDirectory for DiskDirectory where T: KeyFileManager { - fn load(&self) -> Result, Error> { - let accounts = self.files_content()? - .into_iter() - .map(|(_, account)| account) - .collect(); - Ok(accounts) - } +impl KeyDirectory for DiskDirectory +where + T: KeyFileManager, +{ + fn load(&self) -> Result, Error> { + let accounts = self + .files_content()? + .into_iter() + .map(|(_, account)| account) + .collect(); + Ok(accounts) + } - fn update(&self, account: SafeAccount) -> Result { - // Disk store handles updates correctly iff filename is the same - let filename = account_filename(&account); - self.insert_with_filename(account, filename, false) - } + fn update(&self, account: SafeAccount) -> Result { + // Disk store handles updates correctly iff filename is the same + let filename = account_filename(&account); + self.insert_with_filename(account, filename, false) + } - fn insert(&self, account: SafeAccount) -> Result { - let filename = account_filename(&account); - self.insert_with_filename(account, filename, true) - } + fn insert(&self, account: SafeAccount) -> Result { + let filename = account_filename(&account); + self.insert_with_filename(account, filename, true) + } - fn remove(&self, account: &SafeAccount) -> Result<(), Error> { - // enumerate all entries in keystore - // and find entry with given address - let to_remove = self.files_content()? - .into_iter() - .find(|&(_, ref acc)| acc.id == account.id && acc.address == account.address); + fn remove(&self, account: &SafeAccount) -> Result<(), Error> { + // enumerate all entries in keystore + // and find entry with given address + let to_remove = self + .files_content()? + .into_iter() + .find(|&(_, ref acc)| acc.id == account.id && acc.address == account.address); - // remove it - match to_remove { - None => Err(Error::InvalidAccount), - Some((path, _)) => fs::remove_file(path).map_err(From::from) - } - } + // remove it + match to_remove { + None => Err(Error::InvalidAccount), + Some((path, _)) => fs::remove_file(path).map_err(From::from), + } + } - fn path(&self) -> Option<&PathBuf> { Some(&self.path) } + fn path(&self) -> Option<&PathBuf> { + Some(&self.path) + } - fn as_vault_provider(&self) -> Option<&VaultKeyDirectoryProvider> { - Some(self) - } + fn as_vault_provider(&self) -> Option<&VaultKeyDirectoryProvider> { + Some(self) + } - fn unique_repr(&self) -> Result { - self.last_modification_date() - } + fn unique_repr(&self) -> Result { + self.last_modification_date() + } } -impl VaultKeyDirectoryProvider for DiskDirectory where T: KeyFileManager { - fn create(&self, name: &str, key: VaultKey) -> Result, Error> { - let vault_dir = VaultDiskDirectory::create(&self.path, name, key)?; - Ok(Box::new(vault_dir)) - } +impl VaultKeyDirectoryProvider for DiskDirectory +where + T: KeyFileManager, +{ + fn create(&self, name: &str, key: VaultKey) -> Result, Error> { + let vault_dir = VaultDiskDirectory::create(&self.path, name, key)?; + Ok(Box::new(vault_dir)) + } - fn open(&self, name: &str, key: VaultKey) -> Result, Error> { - let vault_dir = VaultDiskDirectory::at(&self.path, name, key)?; - Ok(Box::new(vault_dir)) - } + fn open(&self, name: &str, key: VaultKey) -> Result, Error> { + let vault_dir = VaultDiskDirectory::at(&self.path, name, key)?; + Ok(Box::new(vault_dir)) + } - fn list_vaults(&self) -> Result, Error> { - Ok(fs::read_dir(&self.path)? - .filter_map(|e| e.ok().map(|e| e.path())) - .filter_map(|path| { - let mut vault_file_path = path.clone(); - vault_file_path.push(VAULT_FILE_NAME); - if vault_file_path.is_file() { - path.file_name().and_then(|f| f.to_str()).map(|f| f.to_owned()) - } else { - None - } - }) - .collect()) - } + fn list_vaults(&self) -> Result, Error> { + Ok(fs::read_dir(&self.path)? + .filter_map(|e| e.ok().map(|e| e.path())) + .filter_map(|path| { + let mut vault_file_path = path.clone(); + vault_file_path.push(VAULT_FILE_NAME); + if vault_file_path.is_file() { + path.file_name() + .and_then(|f| f.to_str()) + .map(|f| f.to_owned()) + } else { + None + } + }) + .collect()) + } - fn vault_meta(&self, name: &str) -> Result { - VaultDiskDirectory::meta_at(&self.path, name) - } + fn vault_meta(&self, name: &str) -> Result { + VaultDiskDirectory::meta_at(&self.path, name) + } } impl KeyFileManager for DiskKeyFileManager { - fn read(&self, filename: Option, reader: T) -> Result where T: io::Read { - let key_file = json::KeyFile::load(reader).map_err(|e| Error::Custom(format!("{:?}", e)))?; - SafeAccount::from_file(key_file, filename, &self.password) - } + fn read(&self, filename: Option, reader: T) -> Result + where + T: io::Read, + { + let key_file = + json::KeyFile::load(reader).map_err(|e| Error::Custom(format!("{:?}", e)))?; + SafeAccount::from_file(key_file, filename, &self.password) + } - fn write(&self, mut account: SafeAccount, writer: &mut T) -> Result<(), Error> where T: io::Write { - // when account is moved back to root directory from vault - // => remove vault field from meta - account.meta = json::remove_vault_name_from_json_meta(&account.meta) - .map_err(|err| Error::Custom(format!("{:?}", err)))?; + fn write(&self, mut account: SafeAccount, writer: &mut T) -> Result<(), Error> + where + T: io::Write, + { + // when account is moved back to root directory from vault + // => remove vault field from meta + account.meta = json::remove_vault_name_from_json_meta(&account.meta) + .map_err(|err| Error::Custom(format!("{:?}", err)))?; - let key_file: json::KeyFile = account.into(); - key_file.write(writer).map_err(|e| Error::Custom(format!("{:?}", e))) - } + let key_file: json::KeyFile = account.into(); + key_file + .write(writer) + .map_err(|e| Error::Custom(format!("{:?}", e))) + } } fn account_filename(account: &SafeAccount) -> String { - // build file path - account.filename.clone().unwrap_or_else(|| { - let timestamp = time::strftime("%Y-%m-%dT%H-%M-%S", &time::now_utc()).expect("Time-format string is valid."); - format!("UTC--{}Z--{}", timestamp, Uuid::from(account.id)) - }) + // build file path + account.filename.clone().unwrap_or_else(|| { + let timestamp = time::strftime("%Y-%m-%dT%H-%M-%S", &time::now_utc()) + .expect("Time-format string is valid."); + format!("UTC--{}Z--{}", timestamp, Uuid::from(account.id)) + }) } #[cfg(test)] mod test { - extern crate tempdir; + extern crate tempdir; - use std::{env, fs}; - use std::num::NonZeroU32; - use super::{KeyDirectory, RootDiskDirectory, VaultKey}; - use account::SafeAccount; - use ethkey::{Random, Generator}; - use self::tempdir::TempDir; + use self::tempdir::TempDir; + use super::{KeyDirectory, RootDiskDirectory, VaultKey}; + use account::SafeAccount; + use ethkey::{Generator, Random}; + use std::{env, fs, num::NonZeroU32}; - lazy_static! { - static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(1024).expect("1024 > 0; qed"); - } + lazy_static! { + static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(1024).expect("1024 > 0; qed"); + } - #[test] - fn should_create_new_account() { - // given - let mut dir = env::temp_dir(); - dir.push("ethstore_should_create_new_account"); - let keypair = Random.generate().unwrap(); - let password = "hello world".into(); - let directory = RootDiskDirectory::create(dir.clone()).unwrap(); + #[test] + fn should_create_new_account() { + // given + let mut dir = env::temp_dir(); + dir.push("ethstore_should_create_new_account"); + let keypair = Random.generate().unwrap(); + let password = "hello world".into(); + let directory = RootDiskDirectory::create(dir.clone()).unwrap(); - // when - let account = SafeAccount::create(&keypair, [0u8; 16], &password, *ITERATIONS, "Test".to_owned(), "{}".to_owned()); - let res = directory.insert(account.unwrap()); + // when + let account = SafeAccount::create( + &keypair, + [0u8; 16], + &password, + *ITERATIONS, + "Test".to_owned(), + "{}".to_owned(), + ); + let res = directory.insert(account.unwrap()); - // then - assert!(res.is_ok(), "Should save account succesfuly."); - assert!(res.unwrap().filename.is_some(), "Filename has been assigned."); + // then + assert!(res.is_ok(), "Should save account succesfuly."); + assert!( + res.unwrap().filename.is_some(), + "Filename has been assigned." + ); - // cleanup - let _ = fs::remove_dir_all(dir); - } + // cleanup + let _ = fs::remove_dir_all(dir); + } - #[test] - fn should_handle_duplicate_filenames() { - // given - let mut dir = env::temp_dir(); - dir.push("ethstore_should_handle_duplicate_filenames"); - let keypair = Random.generate().unwrap(); - let password = "hello world".into(); - let directory = RootDiskDirectory::create(dir.clone()).unwrap(); + #[test] + fn should_handle_duplicate_filenames() { + // given + let mut dir = env::temp_dir(); + dir.push("ethstore_should_handle_duplicate_filenames"); + let keypair = Random.generate().unwrap(); + let password = "hello world".into(); + let directory = RootDiskDirectory::create(dir.clone()).unwrap(); - // when - let account = SafeAccount::create(&keypair, [0u8; 16], &password, *ITERATIONS, "Test".to_owned(), "{}".to_owned()).unwrap(); - let filename = "test".to_string(); - let dedup = true; + // when + let account = SafeAccount::create( + &keypair, + [0u8; 16], + &password, + *ITERATIONS, + "Test".to_owned(), + "{}".to_owned(), + ) + .unwrap(); + let filename = "test".to_string(); + let dedup = true; - directory.insert_with_filename(account.clone(), "foo".to_string(), dedup).unwrap(); - let file1 = directory.insert_with_filename(account.clone(), filename.clone(), dedup).unwrap().filename.unwrap(); - let file2 = directory.insert_with_filename(account.clone(), filename.clone(), dedup).unwrap().filename.unwrap(); - let file3 = directory.insert_with_filename(account.clone(), filename.clone(), dedup).unwrap().filename.unwrap(); + directory + .insert_with_filename(account.clone(), "foo".to_string(), dedup) + .unwrap(); + let file1 = directory + .insert_with_filename(account.clone(), filename.clone(), dedup) + .unwrap() + .filename + .unwrap(); + let file2 = directory + .insert_with_filename(account.clone(), filename.clone(), dedup) + .unwrap() + .filename + .unwrap(); + let file3 = directory + .insert_with_filename(account.clone(), filename.clone(), dedup) + .unwrap() + .filename + .unwrap(); - // then - // the first file should have the original names - assert_eq!(file1, filename); + // then + // the first file should have the original names + assert_eq!(file1, filename); - // the following duplicate files should have a suffix appended - assert!(file2 != file3); - assert_eq!(file2.len(), filename.len() + 5); - assert_eq!(file3.len(), filename.len() + 5); + // the following duplicate files should have a suffix appended + assert!(file2 != file3); + assert_eq!(file2.len(), filename.len() + 5); + assert_eq!(file3.len(), filename.len() + 5); - // cleanup - let _ = fs::remove_dir_all(dir); - } + // cleanup + let _ = fs::remove_dir_all(dir); + } - #[test] - fn should_manage_vaults() { - // given - let mut dir = env::temp_dir(); - dir.push("should_create_new_vault"); - let directory = RootDiskDirectory::create(dir.clone()).unwrap(); - let vault_name = "vault"; - let password = "password".into(); + #[test] + fn should_manage_vaults() { + // given + let mut dir = env::temp_dir(); + dir.push("should_create_new_vault"); + let directory = RootDiskDirectory::create(dir.clone()).unwrap(); + let vault_name = "vault"; + let password = "password".into(); - // then - assert!(directory.as_vault_provider().is_some()); + // then + assert!(directory.as_vault_provider().is_some()); - // and when - let before_root_items_count = fs::read_dir(&dir).unwrap().count(); - let vault = directory.as_vault_provider().unwrap().create(vault_name, VaultKey::new(&password, *ITERATIONS)); + // and when + let before_root_items_count = fs::read_dir(&dir).unwrap().count(); + let vault = directory + .as_vault_provider() + .unwrap() + .create(vault_name, VaultKey::new(&password, *ITERATIONS)); - // then - assert!(vault.is_ok()); - let after_root_items_count = fs::read_dir(&dir).unwrap().count(); - assert!(after_root_items_count > before_root_items_count); + // then + assert!(vault.is_ok()); + let after_root_items_count = fs::read_dir(&dir).unwrap().count(); + assert!(after_root_items_count > before_root_items_count); - // and when - let vault = directory.as_vault_provider().unwrap().open(vault_name, VaultKey::new(&password, *ITERATIONS)); + // and when + let vault = directory + .as_vault_provider() + .unwrap() + .open(vault_name, VaultKey::new(&password, *ITERATIONS)); - // then - assert!(vault.is_ok()); - let after_root_items_count2 = fs::read_dir(&dir).unwrap().count(); - assert!(after_root_items_count == after_root_items_count2); + // then + assert!(vault.is_ok()); + let after_root_items_count2 = fs::read_dir(&dir).unwrap().count(); + assert!(after_root_items_count == after_root_items_count2); - // cleanup - let _ = fs::remove_dir_all(dir); - } + // cleanup + let _ = fs::remove_dir_all(dir); + } - #[test] - fn should_list_vaults() { - // given - let temp_path = TempDir::new("").unwrap(); - let directory = RootDiskDirectory::create(&temp_path).unwrap(); - let vault_provider = directory.as_vault_provider().unwrap(); - let iter = NonZeroU32::new(1).expect("1 > 0; qed"); - vault_provider.create("vault1", VaultKey::new(&"password1".into(), iter)).unwrap(); - vault_provider.create("vault2", VaultKey::new(&"password2".into(), iter)).unwrap(); + #[test] + fn should_list_vaults() { + // given + let temp_path = TempDir::new("").unwrap(); + let directory = RootDiskDirectory::create(&temp_path).unwrap(); + let vault_provider = directory.as_vault_provider().unwrap(); + let iter = NonZeroU32::new(1).expect("1 > 0; qed"); + vault_provider + .create("vault1", VaultKey::new(&"password1".into(), iter)) + .unwrap(); + vault_provider + .create("vault2", VaultKey::new(&"password2".into(), iter)) + .unwrap(); - // then - let vaults = vault_provider.list_vaults().unwrap(); - assert_eq!(vaults.len(), 2); - assert!(vaults.iter().any(|v| &*v == "vault1")); - assert!(vaults.iter().any(|v| &*v == "vault2")); - } + // then + let vaults = vault_provider.list_vaults().unwrap(); + assert_eq!(vaults.len(), 2); + assert!(vaults.iter().any(|v| &*v == "vault1")); + assert!(vaults.iter().any(|v| &*v == "vault2")); + } - #[test] - fn hash_of_files() { - let temp_path = TempDir::new("").unwrap(); - let directory = RootDiskDirectory::create(&temp_path).unwrap(); + #[test] + fn hash_of_files() { + let temp_path = TempDir::new("").unwrap(); + let directory = RootDiskDirectory::create(&temp_path).unwrap(); - let hash = directory.files_hash().expect("Files hash should be calculated ok"); - assert_eq!( - hash, - 15130871412783076140 - ); + let hash = directory + .files_hash() + .expect("Files hash should be calculated ok"); + assert_eq!(hash, 15130871412783076140); - let keypair = Random.generate().unwrap(); - let password = "test pass".into(); - let account = SafeAccount::create(&keypair, [0u8; 16], &password, *ITERATIONS, "Test".to_owned(), "{}".to_owned()); - directory.insert(account.unwrap()).expect("Account should be inserted ok"); + let keypair = Random.generate().unwrap(); + let password = "test pass".into(); + let account = SafeAccount::create( + &keypair, + [0u8; 16], + &password, + *ITERATIONS, + "Test".to_owned(), + "{}".to_owned(), + ); + directory + .insert(account.unwrap()) + .expect("Account should be inserted ok"); - let new_hash = directory.files_hash().expect("New files hash should be calculated ok"); + let new_hash = directory + .files_hash() + .expect("New files hash should be calculated ok"); - assert!(new_hash != hash, "hash of the file list should change once directory content changed"); - } + assert!( + new_hash != hash, + "hash of the file list should change once directory content changed" + ); + } } diff --git a/accounts/ethstore/src/accounts_dir/memory.rs b/accounts/ethstore/src/accounts_dir/memory.rs index 617e7bcb0..b0b0e395e 100644 --- a/accounts/ethstore/src/accounts_dir/memory.rs +++ b/accounts/ethstore/src/accounts_dir/memory.rs @@ -14,61 +14,64 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::HashMap; -use parking_lot::RwLock; -use itertools; use ethkey::Address; +use itertools; +use parking_lot::RwLock; +use std::collections::HashMap; -use {SafeAccount, Error}; use super::KeyDirectory; +use Error; +use SafeAccount; /// Accounts in-memory storage. #[derive(Default)] pub struct MemoryDirectory { - accounts: RwLock>>, + accounts: RwLock>>, } impl KeyDirectory for MemoryDirectory { - fn load(&self) -> Result, Error> { - Ok(itertools::Itertools::flatten(self.accounts.read().values().cloned()).collect()) - } + fn load(&self) -> Result, Error> { + Ok(itertools::Itertools::flatten(self.accounts.read().values().cloned()).collect()) + } - fn update(&self, account: SafeAccount) -> Result { - let mut lock = self.accounts.write(); - let accounts = lock.entry(account.address.clone()).or_insert_with(Vec::new); - // If the filename is the same we just need to replace the entry - accounts.retain(|acc| acc.filename != account.filename); - accounts.push(account.clone()); - Ok(account) - } + fn update(&self, account: SafeAccount) -> Result { + let mut lock = self.accounts.write(); + let accounts = lock.entry(account.address.clone()).or_insert_with(Vec::new); + // If the filename is the same we just need to replace the entry + accounts.retain(|acc| acc.filename != account.filename); + accounts.push(account.clone()); + Ok(account) + } - fn insert(&self, account: SafeAccount) -> Result { - let mut lock = self.accounts.write(); - let accounts = lock.entry(account.address.clone()).or_insert_with(Vec::new); - accounts.push(account.clone()); - Ok(account) - } + fn insert(&self, account: SafeAccount) -> Result { + let mut lock = self.accounts.write(); + let accounts = lock.entry(account.address.clone()).or_insert_with(Vec::new); + accounts.push(account.clone()); + Ok(account) + } - fn remove(&self, account: &SafeAccount) -> Result<(), Error> { - let mut accounts = self.accounts.write(); - let is_empty = if let Some(accounts) = accounts.get_mut(&account.address) { - if let Some(position) = accounts.iter().position(|acc| acc == account) { - accounts.remove(position); - } - accounts.is_empty() - } else { - false - }; - if is_empty { - accounts.remove(&account.address); - } - Ok(()) - } + fn remove(&self, account: &SafeAccount) -> Result<(), Error> { + let mut accounts = self.accounts.write(); + let is_empty = if let Some(accounts) = accounts.get_mut(&account.address) { + if let Some(position) = accounts.iter().position(|acc| acc == account) { + accounts.remove(position); + } + accounts.is_empty() + } else { + false + }; + if is_empty { + accounts.remove(&account.address); + } + Ok(()) + } - fn unique_repr(&self) -> Result { - let mut val = 0u64; - let accounts = self.accounts.read(); - for acc in accounts.keys() { val = val ^ acc.low_u64() } - Ok(val) - } + fn unique_repr(&self) -> Result { + let mut val = 0u64; + let accounts = self.accounts.read(); + for acc in accounts.keys() { + val = val ^ acc.low_u64() + } + Ok(val) + } } diff --git a/accounts/ethstore/src/accounts_dir/mod.rs b/accounts/ethstore/src/accounts_dir/mod.rs index 9b1328e11..da682c41e 100644 --- a/accounts/ethstore/src/accounts_dir/mod.rs +++ b/accounts/ethstore/src/accounts_dir/mod.rs @@ -17,9 +17,9 @@ //! Accounts Directory use ethkey::Password; -use std::num::NonZeroU32; -use std::path::{PathBuf}; -use {SafeAccount, Error}; +use std::{num::NonZeroU32, path::PathBuf}; +use Error; +use SafeAccount; mod disk; mod memory; @@ -28,79 +28,85 @@ mod vault; /// `VaultKeyDirectory::set_key` error #[derive(Debug)] pub enum SetKeyError { - /// Error is fatal and directory is probably in inconsistent state - Fatal(Error), - /// Error is non fatal, directory is reverted to pre-operation state - NonFatalOld(Error), - /// Error is non fatal, directory is consistent with new key - NonFatalNew(Error), + /// Error is fatal and directory is probably in inconsistent state + Fatal(Error), + /// Error is non fatal, directory is reverted to pre-operation state + NonFatalOld(Error), + /// Error is non fatal, directory is consistent with new key + NonFatalNew(Error), } /// Vault key #[derive(Clone, PartialEq, Eq)] pub struct VaultKey { - /// Vault password - pub password: Password, - /// Number of iterations to produce a derived key from password - pub iterations: NonZeroU32, + /// Vault password + pub password: Password, + /// Number of iterations to produce a derived key from password + pub iterations: NonZeroU32, } /// Keys directory pub trait KeyDirectory: Send + Sync { - /// Read keys from directory - fn load(&self) -> Result, Error>; - /// Insert new key to directory - fn insert(&self, account: SafeAccount) -> Result; - /// Update key in the directory - fn update(&self, account: SafeAccount) -> Result; - /// Remove key from directory - fn remove(&self, account: &SafeAccount) -> Result<(), Error>; - /// Get directory filesystem path, if available - fn path(&self) -> Option<&PathBuf> { None } - /// Return vault provider, if available - fn as_vault_provider(&self) -> Option<&VaultKeyDirectoryProvider> { None } - /// Unique representation of directory account collection - fn unique_repr(&self) -> Result; + /// Read keys from directory + fn load(&self) -> Result, Error>; + /// Insert new key to directory + fn insert(&self, account: SafeAccount) -> Result; + /// Update key in the directory + fn update(&self, account: SafeAccount) -> Result; + /// Remove key from directory + fn remove(&self, account: &SafeAccount) -> Result<(), Error>; + /// Get directory filesystem path, if available + fn path(&self) -> Option<&PathBuf> { + None + } + /// Return vault provider, if available + fn as_vault_provider(&self) -> Option<&VaultKeyDirectoryProvider> { + None + } + /// Unique representation of directory account collection + fn unique_repr(&self) -> Result; } /// Vaults provider pub trait VaultKeyDirectoryProvider { - /// Create new vault with given key - fn create(&self, name: &str, key: VaultKey) -> Result, Error>; - /// Open existing vault with given key - fn open(&self, name: &str, key: VaultKey) -> Result, Error>; - /// List all vaults - fn list_vaults(&self) -> Result, Error>; - /// Get vault meta - fn vault_meta(&self, name: &str) -> Result; + /// Create new vault with given key + fn create(&self, name: &str, key: VaultKey) -> Result, Error>; + /// Open existing vault with given key + fn open(&self, name: &str, key: VaultKey) -> Result, Error>; + /// List all vaults + fn list_vaults(&self) -> Result, Error>; + /// Get vault meta + fn vault_meta(&self, name: &str) -> Result; } /// Vault directory pub trait VaultKeyDirectory: KeyDirectory { - /// Cast to `KeyDirectory` - fn as_key_directory(&self) -> &KeyDirectory; - /// Vault name - fn name(&self) -> &str; - /// Get vault key - fn key(&self) -> VaultKey; - /// Set new key for vault - fn set_key(&self, key: VaultKey) -> Result<(), SetKeyError>; - /// Get vault meta - fn meta(&self) -> String; - /// Set vault meta - fn set_meta(&self, meta: &str) -> Result<(), Error>; + /// Cast to `KeyDirectory` + fn as_key_directory(&self) -> &KeyDirectory; + /// Vault name + fn name(&self) -> &str; + /// Get vault key + fn key(&self) -> VaultKey; + /// Set new key for vault + fn set_key(&self, key: VaultKey) -> Result<(), SetKeyError>; + /// Get vault meta + fn meta(&self) -> String; + /// Set vault meta + fn set_meta(&self, meta: &str) -> Result<(), Error>; } -pub use self::disk::{RootDiskDirectory, DiskKeyFileManager, KeyFileManager}; -pub use self::memory::MemoryDirectory; -pub use self::vault::VaultDiskDirectory; +pub use self::{ + disk::{DiskKeyFileManager, KeyFileManager, RootDiskDirectory}, + memory::MemoryDirectory, + vault::VaultDiskDirectory, +}; impl VaultKey { - /// Create new vault key - pub fn new(password: &Password, iterations: NonZeroU32) -> Self { - VaultKey { - password: password.clone(), - iterations: iterations, - } - } + /// Create new vault key + pub fn new(password: &Password, iterations: NonZeroU32) -> Self { + VaultKey { + password: password.clone(), + iterations: iterations, + } + } } diff --git a/accounts/ethstore/src/accounts_dir/vault.rs b/accounts/ethstore/src/accounts_dir/vault.rs index c54de7c12..5041a4439 100644 --- a/accounts/ethstore/src/accounts_dir/vault.rs +++ b/accounts/ethstore/src/accounts_dir/vault.rs @@ -14,14 +14,20 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::{fs, io}; -use std::path::{PathBuf, Path}; -use parking_lot::Mutex; -use {json, SafeAccount, Error}; +use super::{ + super::account::Crypto, + disk::{self, DiskDirectory, KeyFileManager}, + KeyDirectory, SetKeyError, VaultKey, VaultKeyDirectory, +}; use crypto::Keccak256; -use super::super::account::Crypto; -use super::{KeyDirectory, VaultKeyDirectory, VaultKey, SetKeyError}; -use super::disk::{self, DiskDirectory, KeyFileManager}; +use json; +use parking_lot::Mutex; +use std::{ + fs, io, + path::{Path, PathBuf}, +}; +use Error; +use SafeAccount; /// Name of vault metadata file pub const VAULT_FILE_NAME: &'static str = "vault.json"; @@ -33,417 +39,489 @@ pub type VaultDiskDirectory = DiskDirectory; /// Vault key file manager pub struct VaultKeyFileManager { - name: String, - key: VaultKey, - meta: Mutex, + name: String, + key: VaultKey, + meta: Mutex, } impl VaultDiskDirectory { - /// Create new vault directory with given key - pub fn create

(root: P, name: &str, key: VaultKey) -> Result where P: AsRef { - // check that vault directory does not exists - let vault_dir_path = make_vault_dir_path(root, name, true)?; - if vault_dir_path.exists() { - return Err(Error::CreationFailed); - } + /// Create new vault directory with given key + pub fn create

(root: P, name: &str, key: VaultKey) -> Result + where + P: AsRef, + { + // check that vault directory does not exists + let vault_dir_path = make_vault_dir_path(root, name, true)?; + if vault_dir_path.exists() { + return Err(Error::CreationFailed); + } - // create vault && vault file - let vault_meta = "{}"; - fs::create_dir_all(&vault_dir_path)?; - if let Err(err) = create_vault_file(&vault_dir_path, &key, vault_meta) { - let _ = fs::remove_dir_all(&vault_dir_path); // can't do anything with this - return Err(err); - } + // create vault && vault file + let vault_meta = "{}"; + fs::create_dir_all(&vault_dir_path)?; + if let Err(err) = create_vault_file(&vault_dir_path, &key, vault_meta) { + let _ = fs::remove_dir_all(&vault_dir_path); // can't do anything with this + return Err(err); + } - Ok(DiskDirectory::new(vault_dir_path, VaultKeyFileManager::new(name, key, vault_meta))) - } + Ok(DiskDirectory::new( + vault_dir_path, + VaultKeyFileManager::new(name, key, vault_meta), + )) + } - /// Open existing vault directory with given key - pub fn at

(root: P, name: &str, key: VaultKey) -> Result where P: AsRef { - // check that vault directory exists - let vault_dir_path = make_vault_dir_path(root, name, true)?; - if !vault_dir_path.is_dir() { - return Err(Error::CreationFailed); - } + /// Open existing vault directory with given key + pub fn at

(root: P, name: &str, key: VaultKey) -> Result + where + P: AsRef, + { + // check that vault directory exists + let vault_dir_path = make_vault_dir_path(root, name, true)?; + if !vault_dir_path.is_dir() { + return Err(Error::CreationFailed); + } - // check that passed key matches vault file - let meta = read_vault_file(&vault_dir_path, Some(&key))?; + // check that passed key matches vault file + let meta = read_vault_file(&vault_dir_path, Some(&key))?; - Ok(DiskDirectory::new(vault_dir_path, VaultKeyFileManager::new(name, key, &meta))) - } + Ok(DiskDirectory::new( + vault_dir_path, + VaultKeyFileManager::new(name, key, &meta), + )) + } - /// Read vault meta without actually opening the vault - pub fn meta_at

(root: P, name: &str) -> Result where P: AsRef { - // check that vault directory exists - let vault_dir_path = make_vault_dir_path(root, name, true)?; - if !vault_dir_path.is_dir() { - return Err(Error::VaultNotFound); - } + /// Read vault meta without actually opening the vault + pub fn meta_at

(root: P, name: &str) -> Result + where + P: AsRef, + { + // check that vault directory exists + let vault_dir_path = make_vault_dir_path(root, name, true)?; + if !vault_dir_path.is_dir() { + return Err(Error::VaultNotFound); + } - // check that passed key matches vault file - read_vault_file(&vault_dir_path, None) - } + // check that passed key matches vault file + read_vault_file(&vault_dir_path, None) + } - fn create_temp_vault(&self, key: VaultKey) -> Result { - let original_path = self.path().expect("self is instance of DiskDirectory; DiskDirectory always returns path; qed"); - let mut path: PathBuf = original_path.clone(); - let name = self.name(); + fn create_temp_vault(&self, key: VaultKey) -> Result { + let original_path = self + .path() + .expect("self is instance of DiskDirectory; DiskDirectory always returns path; qed"); + let mut path: PathBuf = original_path.clone(); + let name = self.name(); - path.push(name); // to jump to the next level + path.push(name); // to jump to the next level - let mut index = 0; - loop { - let name = format!("{}_temp_{}", name, index); - path.set_file_name(&name); - if !path.exists() { - return VaultDiskDirectory::create(original_path, &name, key); - } + let mut index = 0; + loop { + let name = format!("{}_temp_{}", name, index); + path.set_file_name(&name); + if !path.exists() { + return VaultDiskDirectory::create(original_path, &name, key); + } - index += 1; - } - } + index += 1; + } + } - fn copy_to_vault(&self, vault: &VaultDiskDirectory) -> Result<(), Error> { - for account in self.load()? { - let filename = account.filename.clone().expect("self is instance of DiskDirectory; DiskDirectory fills filename in load; qed"); - vault.insert_with_filename(account, filename, true)?; - } + fn copy_to_vault(&self, vault: &VaultDiskDirectory) -> Result<(), Error> { + for account in self.load()? { + let filename = account.filename.clone().expect( + "self is instance of DiskDirectory; DiskDirectory fills filename in load; qed", + ); + vault.insert_with_filename(account, filename, true)?; + } - Ok(()) - } + Ok(()) + } - fn delete(&self) -> Result<(), Error> { - let path = self.path().expect("self is instance of DiskDirectory; DiskDirectory always returns path; qed"); - fs::remove_dir_all(path).map_err(Into::into) - } + fn delete(&self) -> Result<(), Error> { + let path = self + .path() + .expect("self is instance of DiskDirectory; DiskDirectory always returns path; qed"); + fs::remove_dir_all(path).map_err(Into::into) + } } impl VaultKeyDirectory for VaultDiskDirectory { - fn as_key_directory(&self) -> &KeyDirectory { - self - } + fn as_key_directory(&self) -> &KeyDirectory { + self + } - fn name(&self) -> &str { - &self.key_manager().name - } + fn name(&self) -> &str { + &self.key_manager().name + } - fn key(&self) -> VaultKey { - self.key_manager().key.clone() - } + fn key(&self) -> VaultKey { + self.key_manager().key.clone() + } - fn set_key(&self, new_key: VaultKey) -> Result<(), SetKeyError> { - let temp_vault = VaultDiskDirectory::create_temp_vault(self, new_key.clone()).map_err(|err| SetKeyError::NonFatalOld(err))?; - let mut source_path = temp_vault.path().expect("temp_vault is instance of DiskDirectory; DiskDirectory always returns path; qed").clone(); - let mut target_path = self.path().expect("self is instance of DiskDirectory; DiskDirectory always returns path; qed").clone(); + fn set_key(&self, new_key: VaultKey) -> Result<(), SetKeyError> { + let temp_vault = VaultDiskDirectory::create_temp_vault(self, new_key.clone()) + .map_err(|err| SetKeyError::NonFatalOld(err))?; + let mut source_path = temp_vault + .path() + .expect( + "temp_vault is instance of DiskDirectory; DiskDirectory always returns path; qed", + ) + .clone(); + let mut target_path = self + .path() + .expect("self is instance of DiskDirectory; DiskDirectory always returns path; qed") + .clone(); - // preserve meta - temp_vault.set_meta(&self.meta()).map_err(SetKeyError::NonFatalOld)?; + // preserve meta + temp_vault + .set_meta(&self.meta()) + .map_err(SetKeyError::NonFatalOld)?; - // jump to next fs level - source_path.push("next"); - target_path.push("next"); + // jump to next fs level + source_path.push("next"); + target_path.push("next"); - let temp_accounts = self.copy_to_vault(&temp_vault) - .and_then(|_| temp_vault.load()) - .map_err(|err| { - // ignore error, as we already processing error - let _ = temp_vault.delete(); - SetKeyError::NonFatalOld(err) - })?; + let temp_accounts = self + .copy_to_vault(&temp_vault) + .and_then(|_| temp_vault.load()) + .map_err(|err| { + // ignore error, as we already processing error + let _ = temp_vault.delete(); + SetKeyError::NonFatalOld(err) + })?; - // we can't just delete temp vault until all files moved, because - // original vault content has already been partially replaced - // => when error or crash happens here, we can't do anything - for temp_account in temp_accounts { - let filename = temp_account.filename.expect("self is instance of DiskDirectory; DiskDirectory fills filename in load; qed"); - source_path.set_file_name(&filename); - target_path.set_file_name(&filename); - fs::rename(&source_path, &target_path).map_err(|err| SetKeyError::Fatal(err.into()))?; - } - source_path.set_file_name(VAULT_FILE_NAME); - target_path.set_file_name(VAULT_FILE_NAME); - fs::rename(source_path, target_path).map_err(|err| SetKeyError::Fatal(err.into()))?; + // we can't just delete temp vault until all files moved, because + // original vault content has already been partially replaced + // => when error or crash happens here, we can't do anything + for temp_account in temp_accounts { + let filename = temp_account.filename.expect( + "self is instance of DiskDirectory; DiskDirectory fills filename in load; qed", + ); + source_path.set_file_name(&filename); + target_path.set_file_name(&filename); + fs::rename(&source_path, &target_path).map_err(|err| SetKeyError::Fatal(err.into()))?; + } + source_path.set_file_name(VAULT_FILE_NAME); + target_path.set_file_name(VAULT_FILE_NAME); + fs::rename(source_path, target_path).map_err(|err| SetKeyError::Fatal(err.into()))?; - temp_vault.delete().map_err(|err| SetKeyError::NonFatalNew(err)) - } + temp_vault + .delete() + .map_err(|err| SetKeyError::NonFatalNew(err)) + } - fn meta(&self) -> String { - self.key_manager().meta.lock().clone() - } + fn meta(&self) -> String { + self.key_manager().meta.lock().clone() + } - fn set_meta(&self, meta: &str) -> Result<(), Error> { - let key_manager = self.key_manager(); - let vault_path = self.path().expect("self is instance of DiskDirectory; DiskDirectory always returns path; qed"); - create_vault_file(vault_path, &key_manager.key, meta)?; - *key_manager.meta.lock() = meta.to_owned(); - Ok(()) - } + fn set_meta(&self, meta: &str) -> Result<(), Error> { + let key_manager = self.key_manager(); + let vault_path = self + .path() + .expect("self is instance of DiskDirectory; DiskDirectory always returns path; qed"); + create_vault_file(vault_path, &key_manager.key, meta)?; + *key_manager.meta.lock() = meta.to_owned(); + Ok(()) + } } impl VaultKeyFileManager { - pub fn new(name: &str, key: VaultKey, meta: &str) -> Self { - VaultKeyFileManager { - name: name.into(), - key: key, - meta: Mutex::new(meta.to_owned()), - } - } + pub fn new(name: &str, key: VaultKey, meta: &str) -> Self { + VaultKeyFileManager { + name: name.into(), + key: key, + meta: Mutex::new(meta.to_owned()), + } + } } impl KeyFileManager for VaultKeyFileManager { - fn read(&self, filename: Option, reader: T) -> Result where T: io::Read { - let vault_file = json::VaultKeyFile::load(reader).map_err(|e| Error::Custom(format!("{:?}", e)))?; - let mut safe_account = SafeAccount::from_vault_file(&self.key.password, vault_file, filename.clone())?; + fn read(&self, filename: Option, reader: T) -> Result + where + T: io::Read, + { + let vault_file = + json::VaultKeyFile::load(reader).map_err(|e| Error::Custom(format!("{:?}", e)))?; + let mut safe_account = + SafeAccount::from_vault_file(&self.key.password, vault_file, filename.clone())?; - safe_account.meta = json::insert_vault_name_to_json_meta(&safe_account.meta, &self.name) - .map_err(|err| Error::Custom(format!("{:?}", err)))?; - Ok(safe_account) - } + safe_account.meta = json::insert_vault_name_to_json_meta(&safe_account.meta, &self.name) + .map_err(|err| Error::Custom(format!("{:?}", err)))?; + Ok(safe_account) + } - fn write(&self, mut account: SafeAccount, writer: &mut T) -> Result<(), Error> where T: io::Write { - account.meta = json::remove_vault_name_from_json_meta(&account.meta) - .map_err(|err| Error::Custom(format!("{:?}", err)))?; + fn write(&self, mut account: SafeAccount, writer: &mut T) -> Result<(), Error> + where + T: io::Write, + { + account.meta = json::remove_vault_name_from_json_meta(&account.meta) + .map_err(|err| Error::Custom(format!("{:?}", err)))?; - let vault_file: json::VaultKeyFile = account.into_vault_file(self.key.iterations, &self.key.password)?; - vault_file.write(writer).map_err(|e| Error::Custom(format!("{:?}", e))) - } + let vault_file: json::VaultKeyFile = + account.into_vault_file(self.key.iterations, &self.key.password)?; + vault_file + .write(writer) + .map_err(|e| Error::Custom(format!("{:?}", e))) + } } /// Makes path to vault directory, checking that vault name is appropriate -fn make_vault_dir_path

(root: P, name: &str, check_name: bool) -> Result where P: AsRef { - // check vault name - if check_name && !check_vault_name(name) { - return Err(Error::InvalidVaultName); - } +fn make_vault_dir_path

(root: P, name: &str, check_name: bool) -> Result +where + P: AsRef, +{ + // check vault name + if check_name && !check_vault_name(name) { + return Err(Error::InvalidVaultName); + } - let mut vault_dir_path: PathBuf = root.as_ref().into(); - vault_dir_path.push(name); - Ok(vault_dir_path) + let mut vault_dir_path: PathBuf = root.as_ref().into(); + vault_dir_path.push(name); + Ok(vault_dir_path) } /// Every vault must have unique name => we rely on filesystem to check this /// => vault name must not contain any fs-special characters to avoid directory traversal /// => we only allow alphanumeric + separator characters in vault name. fn check_vault_name(name: &str) -> bool { - !name.is_empty() - && name.chars() - .all(|c| c.is_alphanumeric() - || c.is_whitespace() - || c == '-' || c == '_') + !name.is_empty() + && name + .chars() + .all(|c| c.is_alphanumeric() || c.is_whitespace() || c == '-' || c == '_') } /// Vault can be empty, but still must be pluggable => we store vault password in separate file -fn create_vault_file

(vault_dir_path: P, key: &VaultKey, meta: &str) -> Result<(), Error> where P: AsRef { - let password_hash = key.password.as_bytes().keccak256(); - let crypto = Crypto::with_plain(&password_hash, &key.password, key.iterations)?; +fn create_vault_file

(vault_dir_path: P, key: &VaultKey, meta: &str) -> Result<(), Error> +where + P: AsRef, +{ + let password_hash = key.password.as_bytes().keccak256(); + let crypto = Crypto::with_plain(&password_hash, &key.password, key.iterations)?; - let vault_file_path = vault_dir_path.as_ref().join(VAULT_FILE_NAME); - let temp_vault_file_name = disk::find_unique_filename_using_random_suffix(vault_dir_path.as_ref(), &VAULT_TEMP_FILE_NAME)?; - let temp_vault_file_path = vault_dir_path.as_ref().join(&temp_vault_file_name); + let vault_file_path = vault_dir_path.as_ref().join(VAULT_FILE_NAME); + let temp_vault_file_name = disk::find_unique_filename_using_random_suffix( + vault_dir_path.as_ref(), + &VAULT_TEMP_FILE_NAME, + )?; + let temp_vault_file_path = vault_dir_path.as_ref().join(&temp_vault_file_name); - // this method is used to rewrite existing vault file - // => write to temporary file first, then rename temporary file to vault file - let mut vault_file = disk::create_new_file_with_permissions_to_owner(&temp_vault_file_path)?; - let vault_file_contents = json::VaultFile { - crypto: crypto.into(), - meta: Some(meta.to_owned()), - }; - vault_file_contents.write(&mut vault_file).map_err(|e| Error::Custom(format!("{:?}", e)))?; - drop(vault_file); - fs::rename(&temp_vault_file_path, &vault_file_path)?; + // this method is used to rewrite existing vault file + // => write to temporary file first, then rename temporary file to vault file + let mut vault_file = disk::create_new_file_with_permissions_to_owner(&temp_vault_file_path)?; + let vault_file_contents = json::VaultFile { + crypto: crypto.into(), + meta: Some(meta.to_owned()), + }; + vault_file_contents + .write(&mut vault_file) + .map_err(|e| Error::Custom(format!("{:?}", e)))?; + drop(vault_file); + fs::rename(&temp_vault_file_path, &vault_file_path)?; - Ok(()) + Ok(()) } /// When vault is opened => we must check that password matches && read metadata -fn read_vault_file

(vault_dir_path: P, key: Option<&VaultKey>) -> Result where P: AsRef { - let mut vault_file_path: PathBuf = vault_dir_path.as_ref().into(); - vault_file_path.push(VAULT_FILE_NAME); +fn read_vault_file

(vault_dir_path: P, key: Option<&VaultKey>) -> Result +where + P: AsRef, +{ + let mut vault_file_path: PathBuf = vault_dir_path.as_ref().into(); + vault_file_path.push(VAULT_FILE_NAME); - let vault_file = fs::File::open(vault_file_path)?; - let vault_file_contents = json::VaultFile::load(vault_file).map_err(|e| Error::Custom(format!("{:?}", e)))?; - let vault_file_meta = vault_file_contents.meta.unwrap_or("{}".to_owned()); - let vault_file_crypto: Crypto = vault_file_contents.crypto.into(); + let vault_file = fs::File::open(vault_file_path)?; + let vault_file_contents = + json::VaultFile::load(vault_file).map_err(|e| Error::Custom(format!("{:?}", e)))?; + let vault_file_meta = vault_file_contents.meta.unwrap_or("{}".to_owned()); + let vault_file_crypto: Crypto = vault_file_contents.crypto.into(); - if let Some(key) = key { - let password_bytes = vault_file_crypto.decrypt(&key.password)?; - let password_hash = key.password.as_bytes().keccak256(); - if password_hash != password_bytes.as_slice() { - return Err(Error::InvalidPassword); - } - } + if let Some(key) = key { + let password_bytes = vault_file_crypto.decrypt(&key.password)?; + let password_hash = key.password.as_bytes().keccak256(); + if password_hash != password_bytes.as_slice() { + return Err(Error::InvalidPassword); + } + } - Ok(vault_file_meta) + Ok(vault_file_meta) } #[cfg(test)] mod test { - extern crate tempdir; + extern crate tempdir; - use std::fs; - use std::io::Write; - use std::num::NonZeroU32; - use std::path::PathBuf; - use super::VaultKey; - use super::{VAULT_FILE_NAME, check_vault_name, make_vault_dir_path, create_vault_file, read_vault_file, VaultDiskDirectory}; - use self::tempdir::TempDir; + use self::tempdir::TempDir; + use super::{ + check_vault_name, create_vault_file, make_vault_dir_path, read_vault_file, + VaultDiskDirectory, VaultKey, VAULT_FILE_NAME, + }; + use std::{fs, io::Write, num::NonZeroU32, path::PathBuf}; - - lazy_static! { - static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(1024).expect("1024 > 0; qed"); - } + lazy_static! { + static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(1024).expect("1024 > 0; qed"); + } - #[test] - fn check_vault_name_succeeds() { - assert!(check_vault_name("vault")); - assert!(check_vault_name("vault with spaces")); - assert!(check_vault_name("vault with tabs")); - assert!(check_vault_name("vault_with_underscores")); - assert!(check_vault_name("vault-with-dashes")); - assert!(check_vault_name("vault-with-digits-123")); - assert!(check_vault_name("vault中文名字")); - } + #[test] + fn check_vault_name_succeeds() { + assert!(check_vault_name("vault")); + assert!(check_vault_name("vault with spaces")); + assert!(check_vault_name("vault with tabs")); + assert!(check_vault_name("vault_with_underscores")); + assert!(check_vault_name("vault-with-dashes")); + assert!(check_vault_name("vault-with-digits-123")); + assert!(check_vault_name("vault中文名字")); + } - #[test] - fn check_vault_name_fails() { - assert!(!check_vault_name("")); - assert!(!check_vault_name(".")); - assert!(!check_vault_name("*")); - assert!(!check_vault_name("../.bash_history")); - assert!(!check_vault_name("/etc/passwd")); - assert!(!check_vault_name("c:\\windows")); - } + #[test] + fn check_vault_name_fails() { + assert!(!check_vault_name("")); + assert!(!check_vault_name(".")); + assert!(!check_vault_name("*")); + assert!(!check_vault_name("../.bash_history")); + assert!(!check_vault_name("/etc/passwd")); + assert!(!check_vault_name("c:\\windows")); + } - #[test] - fn make_vault_dir_path_succeeds() { - use std::path::Path; + #[test] + fn make_vault_dir_path_succeeds() { + use std::path::Path; - assert_eq!(&make_vault_dir_path("/home/user/parity", "vault", true).unwrap(), &Path::new("/home/user/parity/vault")); - assert_eq!(&make_vault_dir_path("/home/user/parity", "*bad-name*", false).unwrap(), &Path::new("/home/user/parity/*bad-name*")); - } + assert_eq!( + &make_vault_dir_path("/home/user/parity", "vault", true).unwrap(), + &Path::new("/home/user/parity/vault") + ); + assert_eq!( + &make_vault_dir_path("/home/user/parity", "*bad-name*", false).unwrap(), + &Path::new("/home/user/parity/*bad-name*") + ); + } - #[test] - fn make_vault_dir_path_fails() { - assert!(make_vault_dir_path("/home/user/parity", "*bad-name*", true).is_err()); - } + #[test] + fn make_vault_dir_path_fails() { + assert!(make_vault_dir_path("/home/user/parity", "*bad-name*", true).is_err()); + } - #[test] - fn create_vault_file_succeeds() { - // given - let temp_path = TempDir::new("").unwrap(); - let key = VaultKey::new(&"password".into(), *ITERATIONS); - let mut vault_dir: PathBuf = temp_path.path().into(); - vault_dir.push("vault"); - fs::create_dir_all(&vault_dir).unwrap(); + #[test] + fn create_vault_file_succeeds() { + // given + let temp_path = TempDir::new("").unwrap(); + let key = VaultKey::new(&"password".into(), *ITERATIONS); + let mut vault_dir: PathBuf = temp_path.path().into(); + vault_dir.push("vault"); + fs::create_dir_all(&vault_dir).unwrap(); - // when - let result = create_vault_file(&vault_dir, &key, "{}"); + // when + let result = create_vault_file(&vault_dir, &key, "{}"); - // then - assert!(result.is_ok()); - let mut vault_file_path = vault_dir.clone(); - vault_file_path.push(VAULT_FILE_NAME); - assert!(vault_file_path.exists() && vault_file_path.is_file()); - } + // then + assert!(result.is_ok()); + let mut vault_file_path = vault_dir.clone(); + vault_file_path.push(VAULT_FILE_NAME); + assert!(vault_file_path.exists() && vault_file_path.is_file()); + } - #[test] - fn read_vault_file_succeeds() { - // given - let temp_path = TempDir::new("").unwrap(); - let key = VaultKey::new(&"password".into(), *ITERATIONS); - let vault_file_contents = r#"{"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"758696c8dc6378ab9b25bb42790da2f5"},"ciphertext":"54eb50683717d41caaeb12ea969f2c159daada5907383f26f327606a37dc7168","kdf":"pbkdf2","kdfparams":{"c":1024,"dklen":32,"prf":"hmac-sha256","salt":"3c320fa566a1a7963ac8df68a19548d27c8f40bf92ef87c84594dcd5bbc402b6"},"mac":"9e5c2314c2a0781962db85611417c614bd6756666b6b1e93840f5b6ed895f003"}}"#; - let dir: PathBuf = temp_path.path().into(); - let mut vault_file_path: PathBuf = dir.clone(); - vault_file_path.push(VAULT_FILE_NAME); - { - let mut vault_file = fs::File::create(vault_file_path).unwrap(); - vault_file.write_all(vault_file_contents.as_bytes()).unwrap(); - } + #[test] + fn read_vault_file_succeeds() { + // given + let temp_path = TempDir::new("").unwrap(); + let key = VaultKey::new(&"password".into(), *ITERATIONS); + let vault_file_contents = r#"{"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"758696c8dc6378ab9b25bb42790da2f5"},"ciphertext":"54eb50683717d41caaeb12ea969f2c159daada5907383f26f327606a37dc7168","kdf":"pbkdf2","kdfparams":{"c":1024,"dklen":32,"prf":"hmac-sha256","salt":"3c320fa566a1a7963ac8df68a19548d27c8f40bf92ef87c84594dcd5bbc402b6"},"mac":"9e5c2314c2a0781962db85611417c614bd6756666b6b1e93840f5b6ed895f003"}}"#; + let dir: PathBuf = temp_path.path().into(); + let mut vault_file_path: PathBuf = dir.clone(); + vault_file_path.push(VAULT_FILE_NAME); + { + let mut vault_file = fs::File::create(vault_file_path).unwrap(); + vault_file + .write_all(vault_file_contents.as_bytes()) + .unwrap(); + } - // when - let result = read_vault_file(&dir, Some(&key)); + // when + let result = read_vault_file(&dir, Some(&key)); - // then - assert!(result.is_ok()); - } + // then + assert!(result.is_ok()); + } - #[test] - fn read_vault_file_fails() { - // given - let temp_path = TempDir::new("").unwrap(); - let key = VaultKey::new(&"password1".into(), *ITERATIONS); - let dir: PathBuf = temp_path.path().into(); - let mut vault_file_path: PathBuf = dir.clone(); - vault_file_path.push(VAULT_FILE_NAME); + #[test] + fn read_vault_file_fails() { + // given + let temp_path = TempDir::new("").unwrap(); + let key = VaultKey::new(&"password1".into(), *ITERATIONS); + let dir: PathBuf = temp_path.path().into(); + let mut vault_file_path: PathBuf = dir.clone(); + vault_file_path.push(VAULT_FILE_NAME); - // when - let result = read_vault_file(&dir, Some(&key)); + // when + let result = read_vault_file(&dir, Some(&key)); - // then - assert!(result.is_err()); + // then + assert!(result.is_err()); - // and when given - let vault_file_contents = r#"{"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"0155e3690be19fbfbecabcd440aa284b"},"ciphertext":"4d6938a1f49b7782","kdf":"pbkdf2","kdfparams":{"c":1024,"dklen":32,"prf":"hmac-sha256","salt":"b6a9338a7ccd39288a86dba73bfecd9101b4f3db9c9830e7c76afdbd4f6872e5"},"mac":"16381463ea11c6eb2239a9f339c2e780516d29d234ce30ac5f166f9080b5a262"}}"#; - { - let mut vault_file = fs::File::create(vault_file_path).unwrap(); - vault_file.write_all(vault_file_contents.as_bytes()).unwrap(); - } + // and when given + let vault_file_contents = r#"{"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"0155e3690be19fbfbecabcd440aa284b"},"ciphertext":"4d6938a1f49b7782","kdf":"pbkdf2","kdfparams":{"c":1024,"dklen":32,"prf":"hmac-sha256","salt":"b6a9338a7ccd39288a86dba73bfecd9101b4f3db9c9830e7c76afdbd4f6872e5"},"mac":"16381463ea11c6eb2239a9f339c2e780516d29d234ce30ac5f166f9080b5a262"}}"#; + { + let mut vault_file = fs::File::create(vault_file_path).unwrap(); + vault_file + .write_all(vault_file_contents.as_bytes()) + .unwrap(); + } - // when - let result = read_vault_file(&dir, Some(&key)); + // when + let result = read_vault_file(&dir, Some(&key)); - // then - assert!(result.is_err()); - } + // then + assert!(result.is_err()); + } - #[test] - fn vault_directory_can_be_created() { - // given - let temp_path = TempDir::new("").unwrap(); - let key = VaultKey::new(&"password".into(), *ITERATIONS); - let dir: PathBuf = temp_path.path().into(); + #[test] + fn vault_directory_can_be_created() { + // given + let temp_path = TempDir::new("").unwrap(); + let key = VaultKey::new(&"password".into(), *ITERATIONS); + let dir: PathBuf = temp_path.path().into(); - // when - let vault = VaultDiskDirectory::create(&dir, "vault", key.clone()); + // when + let vault = VaultDiskDirectory::create(&dir, "vault", key.clone()); - // then - assert!(vault.is_ok()); + // then + assert!(vault.is_ok()); - // and when - let vault = VaultDiskDirectory::at(&dir, "vault", key); + // and when + let vault = VaultDiskDirectory::at(&dir, "vault", key); - // then - assert!(vault.is_ok()); - } + // then + assert!(vault.is_ok()); + } - #[test] - fn vault_directory_cannot_be_created_if_already_exists() { - // given - let temp_path = TempDir::new("").unwrap(); - let key = VaultKey::new(&"password".into(), *ITERATIONS); - let dir: PathBuf = temp_path.path().into(); - let mut vault_dir = dir.clone(); - vault_dir.push("vault"); - fs::create_dir_all(&vault_dir).unwrap(); + #[test] + fn vault_directory_cannot_be_created_if_already_exists() { + // given + let temp_path = TempDir::new("").unwrap(); + let key = VaultKey::new(&"password".into(), *ITERATIONS); + let dir: PathBuf = temp_path.path().into(); + let mut vault_dir = dir.clone(); + vault_dir.push("vault"); + fs::create_dir_all(&vault_dir).unwrap(); - // when - let vault = VaultDiskDirectory::create(&dir, "vault", key); + // when + let vault = VaultDiskDirectory::create(&dir, "vault", key); - // then - assert!(vault.is_err()); - } + // then + assert!(vault.is_err()); + } - #[test] - fn vault_directory_cannot_be_opened_if_not_exists() { - // given - let temp_path = TempDir::new("").unwrap(); - let key = VaultKey::new(&"password".into(), *ITERATIONS); - let dir: PathBuf = temp_path.path().into(); + #[test] + fn vault_directory_cannot_be_opened_if_not_exists() { + // given + let temp_path = TempDir::new("").unwrap(); + let key = VaultKey::new(&"password".into(), *ITERATIONS); + let dir: PathBuf = temp_path.path().into(); - // when - let vault = VaultDiskDirectory::at(&dir, "vault", key); + // when + let vault = VaultDiskDirectory::at(&dir, "vault", key); - // then - assert!(vault.is_err()); - } + // then + assert!(vault.is_err()); + } } diff --git a/accounts/ethstore/src/error.rs b/accounts/ethstore/src/error.rs index fceaf1676..ec6622f16 100644 --- a/accounts/ethstore/src/error.rs +++ b/accounts/ethstore/src/error.rs @@ -14,115 +14,113 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::fmt; -use std::io::Error as IoError; -use ethkey::{self, Error as EthKeyError}; use crypto::{self, Error as EthCryptoError}; -use ethkey::DerivationError; +use ethkey::{self, DerivationError, Error as EthKeyError}; +use std::{fmt, io::Error as IoError}; /// Account-related errors. #[derive(Debug)] pub enum Error { - /// IO error - Io(IoError), - /// Invalid Password - InvalidPassword, - /// Account's secret is invalid. - InvalidSecret, - /// Invalid Vault Crypto meta. - InvalidCryptoMeta, - /// Invalid Account. - InvalidAccount, - /// Invalid Message. - InvalidMessage, - /// Invalid Key File - InvalidKeyFile(String), - /// Vaults are not supported. - VaultsAreNotSupported, - /// Unsupported vault - UnsupportedVault, - /// Invalid vault name - InvalidVaultName, - /// Vault not found - VaultNotFound, - /// Account creation failed. - CreationFailed, - /// `EthKey` error - EthKey(EthKeyError), - /// `ethkey::crypto::Error` - EthKeyCrypto(ethkey::crypto::Error), - /// `EthCrypto` error - EthCrypto(EthCryptoError), - /// Derivation error - Derivation(DerivationError), - /// Custom error - Custom(String), + /// IO error + Io(IoError), + /// Invalid Password + InvalidPassword, + /// Account's secret is invalid. + InvalidSecret, + /// Invalid Vault Crypto meta. + InvalidCryptoMeta, + /// Invalid Account. + InvalidAccount, + /// Invalid Message. + InvalidMessage, + /// Invalid Key File + InvalidKeyFile(String), + /// Vaults are not supported. + VaultsAreNotSupported, + /// Unsupported vault + UnsupportedVault, + /// Invalid vault name + InvalidVaultName, + /// Vault not found + VaultNotFound, + /// Account creation failed. + CreationFailed, + /// `EthKey` error + EthKey(EthKeyError), + /// `ethkey::crypto::Error` + EthKeyCrypto(ethkey::crypto::Error), + /// `EthCrypto` error + EthCrypto(EthCryptoError), + /// Derivation error + Derivation(DerivationError), + /// Custom error + Custom(String), } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - let s = match *self { - Error::Io(ref err) => err.to_string(), - Error::InvalidPassword => "Invalid password".into(), - Error::InvalidSecret => "Invalid secret".into(), - Error::InvalidCryptoMeta => "Invalid crypted metadata".into(), - Error::InvalidAccount => "Invalid account".into(), - Error::InvalidMessage => "Invalid message".into(), - Error::InvalidKeyFile(ref reason) => format!("Invalid key file: {}", reason), - Error::VaultsAreNotSupported => "Vaults are not supported".into(), - Error::UnsupportedVault => "Vault is not supported for this operation".into(), - Error::InvalidVaultName => "Invalid vault name".into(), - Error::VaultNotFound => "Vault not found".into(), - Error::CreationFailed => "Account creation failed".into(), - Error::EthKey(ref err) => err.to_string(), - Error::EthKeyCrypto(ref err) => err.to_string(), - Error::EthCrypto(ref err) => err.to_string(), - Error::Derivation(ref err) => format!("Derivation error: {:?}", err), - Error::Custom(ref s) => s.clone(), - }; + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + let s = match *self { + Error::Io(ref err) => err.to_string(), + Error::InvalidPassword => "Invalid password".into(), + Error::InvalidSecret => "Invalid secret".into(), + Error::InvalidCryptoMeta => "Invalid crypted metadata".into(), + Error::InvalidAccount => "Invalid account".into(), + Error::InvalidMessage => "Invalid message".into(), + Error::InvalidKeyFile(ref reason) => format!("Invalid key file: {}", reason), + Error::VaultsAreNotSupported => "Vaults are not supported".into(), + Error::UnsupportedVault => "Vault is not supported for this operation".into(), + Error::InvalidVaultName => "Invalid vault name".into(), + Error::VaultNotFound => "Vault not found".into(), + Error::CreationFailed => "Account creation failed".into(), + Error::EthKey(ref err) => err.to_string(), + Error::EthKeyCrypto(ref err) => err.to_string(), + Error::EthCrypto(ref err) => err.to_string(), + Error::Derivation(ref err) => format!("Derivation error: {:?}", err), + Error::Custom(ref s) => s.clone(), + }; - write!(f, "{}", s) - } + write!(f, "{}", s) + } } impl From for Error { - fn from(err: IoError) -> Self { - Error::Io(err) - } + fn from(err: IoError) -> Self { + Error::Io(err) + } } impl From for Error { - fn from(err: EthKeyError) -> Self { - Error::EthKey(err) - } + fn from(err: EthKeyError) -> Self { + Error::EthKey(err) + } } impl From for Error { - fn from(err: ethkey::crypto::Error) -> Self { - Error::EthKeyCrypto(err) - } + fn from(err: ethkey::crypto::Error) -> Self { + Error::EthKeyCrypto(err) + } } impl From for Error { - fn from(err: EthCryptoError) -> Self { - Error::EthCrypto(err) - } + fn from(err: EthCryptoError) -> Self { + Error::EthCrypto(err) + } } impl From for Error { - fn from(err: crypto::error::ScryptError) -> Self { - Error::EthCrypto(err.into()) - } + fn from(err: crypto::error::ScryptError) -> Self { + Error::EthCrypto(err.into()) + } } impl From for Error { - fn from(err: crypto::error::SymmError) -> Self { - Error::EthCrypto(err.into()) - } + fn from(err: crypto::error::SymmError) -> Self { + Error::EthCrypto(err.into()) + } } impl From for Error { - fn from(err: DerivationError) -> Self { - Error::Derivation(err) - } + fn from(err: DerivationError) -> Self { + Error::Derivation(err) + } } diff --git a/accounts/ethstore/src/ethkey.rs b/accounts/ethstore/src/ethkey.rs index 8cd2c533a..2c45716e8 100644 --- a/accounts/ethstore/src/ethkey.rs +++ b/accounts/ethstore/src/ethkey.rs @@ -19,23 +19,23 @@ pub use _ethkey::*; use json; impl Into for Address { - fn into(self) -> json::H160 { - let a: [u8; 20] = self.into(); - From::from(a) - } + fn into(self) -> json::H160 { + let a: [u8; 20] = self.into(); + From::from(a) + } } impl From for Address { - fn from(json: json::H160) -> Self { - let a: [u8; 20] = json.into(); - From::from(a) - } + fn from(json: json::H160) -> Self { + let a: [u8; 20] = json.into(); + From::from(a) + } } impl<'a> From<&'a json::H160> for Address { - fn from(json: &'a json::H160) -> Self { - let mut a = [0u8; 20]; - a.copy_from_slice(json); - From::from(a) - } + fn from(json: &'a json::H160) -> Self { + let mut a = [0u8; 20]; + a.copy_from_slice(json); + From::from(a) + } } diff --git a/accounts/ethstore/src/ethstore.rs b/accounts/ethstore/src/ethstore.rs index 92eb94967..cd78c5342 100644 --- a/accounts/ethstore/src/ethstore.rs +++ b/accounts/ethstore/src/ethstore.rs @@ -14,1121 +14,1544 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::{BTreeMap, HashMap}; -use std::num::NonZeroU32; -use std::mem; -use std::path::PathBuf; use parking_lot::{Mutex, RwLock}; -use std::time::{Instant, Duration}; +use std::{ + collections::{BTreeMap, HashMap}, + mem, + num::NonZeroU32, + path::PathBuf, + time::{Duration, Instant}, +}; -use random::Random; -use ethkey::{self, Signature, Password, Address, Message, Secret, Public, KeyPair, ExtendedKeyPair}; -use accounts_dir::{KeyDirectory, VaultKeyDirectory, VaultKey, SetKeyError}; use account::SafeAccount; +use accounts_dir::{KeyDirectory, SetKeyError, VaultKey, VaultKeyDirectory}; +use ethkey::{ + self, Address, ExtendedKeyPair, KeyPair, Message, Password, Public, Secret, Signature, +}; +use import; +use json::{self, OpaqueKeyFile, Uuid}; use presale::PresaleWallet; -use json::{self, Uuid, OpaqueKeyFile}; -use {import, Error, SimpleSecretStore, SecretStore, SecretVaultRef, StoreAccountRef, Derivation, OpaqueSecret}; - +use random::Random; +use Derivation; +use Error; +use OpaqueSecret; +use SecretStore; +use SecretVaultRef; +use SimpleSecretStore; +use StoreAccountRef; lazy_static! { - static ref KEY_ITERATIONS: NonZeroU32 = - NonZeroU32::new(crypto::KEY_ITERATIONS as u32).expect("KEY_ITERATIONS > 0; qed"); + static ref KEY_ITERATIONS: NonZeroU32 = + NonZeroU32::new(crypto::KEY_ITERATIONS as u32).expect("KEY_ITERATIONS > 0; qed"); } /// Accounts store. pub struct EthStore { - store: EthMultiStore, + store: EthMultiStore, } impl EthStore { - /// Open a new accounts store with given key directory backend. - pub fn open(directory: Box) -> Result { - Self::open_with_iterations(directory, *KEY_ITERATIONS) - } + /// Open a new accounts store with given key directory backend. + pub fn open(directory: Box) -> Result { + Self::open_with_iterations(directory, *KEY_ITERATIONS) + } - /// Open a new account store with given key directory backend and custom number of iterations. - pub fn open_with_iterations(directory: Box, iterations: NonZeroU32) -> Result { - Ok(EthStore { - store: EthMultiStore::open_with_iterations(directory, iterations)?, - }) - } + /// Open a new account store with given key directory backend and custom number of iterations. + pub fn open_with_iterations( + directory: Box, + iterations: NonZeroU32, + ) -> Result { + Ok(EthStore { + store: EthMultiStore::open_with_iterations(directory, iterations)?, + }) + } - /// Modify account refresh timeout - how often they are re-read from `KeyDirectory`. - /// - /// Setting this to low values (or 0) will cause new accounts to be picked up quickly, - /// although it may induce heavy disk reads and is not recommended if you manage many keys (say over 10k). - /// - /// By default refreshing is disabled, so only accounts created using this instance of `EthStore` are taken into account. - pub fn set_refresh_time(&self, time: Duration) { - self.store.set_refresh_time(time) - } + /// Modify account refresh timeout - how often they are re-read from `KeyDirectory`. + /// + /// Setting this to low values (or 0) will cause new accounts to be picked up quickly, + /// although it may induce heavy disk reads and is not recommended if you manage many keys (say over 10k). + /// + /// By default refreshing is disabled, so only accounts created using this instance of `EthStore` are taken into account. + pub fn set_refresh_time(&self, time: Duration) { + self.store.set_refresh_time(time) + } - fn get(&self, account: &StoreAccountRef) -> Result { - let mut accounts = self.store.get_accounts(account)?.into_iter(); - accounts.next().ok_or(Error::InvalidAccount) - } + fn get(&self, account: &StoreAccountRef) -> Result { + let mut accounts = self.store.get_accounts(account)?.into_iter(); + accounts.next().ok_or(Error::InvalidAccount) + } } impl SimpleSecretStore for EthStore { - fn insert_account(&self, vault: SecretVaultRef, secret: Secret, password: &Password) -> Result { - self.store.insert_account(vault, secret, password) - } + fn insert_account( + &self, + vault: SecretVaultRef, + secret: Secret, + password: &Password, + ) -> Result { + self.store.insert_account(vault, secret, password) + } - fn insert_derived(&self, vault: SecretVaultRef, account_ref: &StoreAccountRef, password: &Password, derivation: Derivation) - -> Result - { - self.store.insert_derived(vault, account_ref, password, derivation) - } + fn insert_derived( + &self, + vault: SecretVaultRef, + account_ref: &StoreAccountRef, + password: &Password, + derivation: Derivation, + ) -> Result { + self.store + .insert_derived(vault, account_ref, password, derivation) + } - fn generate_derived(&self, account_ref: &StoreAccountRef, password: &Password, derivation: Derivation) -> Result { - self.store.generate_derived(account_ref, password, derivation) - } + fn generate_derived( + &self, + account_ref: &StoreAccountRef, + password: &Password, + derivation: Derivation, + ) -> Result { + self.store + .generate_derived(account_ref, password, derivation) + } - fn account_ref(&self, address: &Address) -> Result { - self.store.account_ref(address) - } + fn account_ref(&self, address: &Address) -> Result { + self.store.account_ref(address) + } - fn accounts(&self) -> Result, Error> { - self.store.accounts() - } + fn accounts(&self) -> Result, Error> { + self.store.accounts() + } - fn change_password(&self, account: &StoreAccountRef, old_password: &Password, new_password: &Password) -> Result<(), Error> { - self.store.change_password(account, old_password, new_password) - } + fn change_password( + &self, + account: &StoreAccountRef, + old_password: &Password, + new_password: &Password, + ) -> Result<(), Error> { + self.store + .change_password(account, old_password, new_password) + } - fn export_account(&self, account: &StoreAccountRef, password: &Password) -> Result { - self.store.export_account(account, password) - } + fn export_account( + &self, + account: &StoreAccountRef, + password: &Password, + ) -> Result { + self.store.export_account(account, password) + } - fn remove_account(&self, account: &StoreAccountRef, password: &Password) -> Result<(), Error> { - self.store.remove_account(account, password) - } + fn remove_account(&self, account: &StoreAccountRef, password: &Password) -> Result<(), Error> { + self.store.remove_account(account, password) + } - fn sign(&self, account: &StoreAccountRef, password: &Password, message: &Message) -> Result { - self.get(account)?.sign(password, message) - } + fn sign( + &self, + account: &StoreAccountRef, + password: &Password, + message: &Message, + ) -> Result { + self.get(account)?.sign(password, message) + } - fn sign_derived(&self, account_ref: &StoreAccountRef, password: &Password, derivation: Derivation, message: &Message) - -> Result - { - self.store.sign_derived(account_ref, password, derivation, message) - } + fn sign_derived( + &self, + account_ref: &StoreAccountRef, + password: &Password, + derivation: Derivation, + message: &Message, + ) -> Result { + self.store + .sign_derived(account_ref, password, derivation, message) + } - fn agree(&self, account: &StoreAccountRef, password: &Password, other: &Public) -> Result { - self.store.agree(account, password, other) - } + fn agree( + &self, + account: &StoreAccountRef, + password: &Password, + other: &Public, + ) -> Result { + self.store.agree(account, password, other) + } - fn decrypt(&self, account: &StoreAccountRef, password: &Password, shared_mac: &[u8], message: &[u8]) -> Result, Error> { - let account = self.get(account)?; - account.decrypt(password, shared_mac, message) - } + fn decrypt( + &self, + account: &StoreAccountRef, + password: &Password, + shared_mac: &[u8], + message: &[u8], + ) -> Result, Error> { + let account = self.get(account)?; + account.decrypt(password, shared_mac, message) + } - fn create_vault(&self, name: &str, password: &Password) -> Result<(), Error> { - self.store.create_vault(name, password) - } + fn create_vault(&self, name: &str, password: &Password) -> Result<(), Error> { + self.store.create_vault(name, password) + } - fn open_vault(&self, name: &str, password: &Password) -> Result<(), Error> { - self.store.open_vault(name, password) - } + fn open_vault(&self, name: &str, password: &Password) -> Result<(), Error> { + self.store.open_vault(name, password) + } - fn close_vault(&self, name: &str) -> Result<(), Error> { - self.store.close_vault(name) - } + fn close_vault(&self, name: &str) -> Result<(), Error> { + self.store.close_vault(name) + } - fn list_vaults(&self) -> Result, Error> { - self.store.list_vaults() - } + fn list_vaults(&self) -> Result, Error> { + self.store.list_vaults() + } - fn list_opened_vaults(&self) -> Result, Error> { - self.store.list_opened_vaults() - } + fn list_opened_vaults(&self) -> Result, Error> { + self.store.list_opened_vaults() + } - fn change_vault_password(&self, name: &str, new_password: &Password) -> Result<(), Error> { - self.store.change_vault_password(name, new_password) - } + fn change_vault_password(&self, name: &str, new_password: &Password) -> Result<(), Error> { + self.store.change_vault_password(name, new_password) + } - fn change_account_vault(&self, vault: SecretVaultRef, account: StoreAccountRef) -> Result { - self.store.change_account_vault(vault, account) - } + fn change_account_vault( + &self, + vault: SecretVaultRef, + account: StoreAccountRef, + ) -> Result { + self.store.change_account_vault(vault, account) + } - fn get_vault_meta(&self, name: &str) -> Result { - self.store.get_vault_meta(name) - } + fn get_vault_meta(&self, name: &str) -> Result { + self.store.get_vault_meta(name) + } - fn set_vault_meta(&self, name: &str, meta: &str) -> Result<(), Error> { - self.store.set_vault_meta(name, meta) - } + fn set_vault_meta(&self, name: &str, meta: &str) -> Result<(), Error> { + self.store.set_vault_meta(name, meta) + } } impl SecretStore for EthStore { - fn raw_secret(&self, account: &StoreAccountRef, password: &Password) -> Result { - Ok(OpaqueSecret(self.get(account)?.crypto.secret(password)?)) - } + fn raw_secret( + &self, + account: &StoreAccountRef, + password: &Password, + ) -> Result { + Ok(OpaqueSecret(self.get(account)?.crypto.secret(password)?)) + } - fn import_presale(&self, vault: SecretVaultRef, json: &[u8], password: &Password) -> Result { - let json_wallet = json::PresaleWallet::load(json).map_err(|_| Error::InvalidKeyFile("Invalid JSON format".to_owned()))?; - let wallet = PresaleWallet::from(json_wallet); - let keypair = wallet.decrypt(password).map_err(|_| Error::InvalidPassword)?; - self.insert_account(vault, keypair.secret().clone(), password) - } + fn import_presale( + &self, + vault: SecretVaultRef, + json: &[u8], + password: &Password, + ) -> Result { + let json_wallet = json::PresaleWallet::load(json) + .map_err(|_| Error::InvalidKeyFile("Invalid JSON format".to_owned()))?; + let wallet = PresaleWallet::from(json_wallet); + let keypair = wallet + .decrypt(password) + .map_err(|_| Error::InvalidPassword)?; + self.insert_account(vault, keypair.secret().clone(), password) + } - fn import_wallet(&self, vault: SecretVaultRef, json: &[u8], password: &Password, gen_id: bool) -> Result { - let json_keyfile = json::KeyFile::load(json).map_err(|_| Error::InvalidKeyFile("Invalid JSON format".to_owned()))?; - let mut safe_account = SafeAccount::from_file(json_keyfile, None, &None)?; + fn import_wallet( + &self, + vault: SecretVaultRef, + json: &[u8], + password: &Password, + gen_id: bool, + ) -> Result { + let json_keyfile = json::KeyFile::load(json) + .map_err(|_| Error::InvalidKeyFile("Invalid JSON format".to_owned()))?; + let mut safe_account = SafeAccount::from_file(json_keyfile, None, &None)?; - if gen_id { - safe_account.id = Random::random(); - } + if gen_id { + safe_account.id = Random::random(); + } - let secret = safe_account.crypto.secret(password).map_err(|_| Error::InvalidPassword)?; - safe_account.address = KeyPair::from_secret(secret)?.address(); - self.store.import(vault, safe_account) - } + let secret = safe_account + .crypto + .secret(password) + .map_err(|_| Error::InvalidPassword)?; + safe_account.address = KeyPair::from_secret(secret)?.address(); + self.store.import(vault, safe_account) + } - fn test_password(&self, account: &StoreAccountRef, password: &Password) -> Result { - let account = self.get(account)?; - Ok(account.check_password(password)) - } + fn test_password(&self, account: &StoreAccountRef, password: &Password) -> Result { + let account = self.get(account)?; + Ok(account.check_password(password)) + } - fn copy_account(&self, new_store: &SimpleSecretStore, new_vault: SecretVaultRef, account: &StoreAccountRef, password: &Password, new_password: &Password) -> Result<(), Error> { - let account = self.get(account)?; - let secret = account.crypto.secret(password)?; - new_store.insert_account(new_vault, secret, new_password)?; - Ok(()) - } + fn copy_account( + &self, + new_store: &SimpleSecretStore, + new_vault: SecretVaultRef, + account: &StoreAccountRef, + password: &Password, + new_password: &Password, + ) -> Result<(), Error> { + let account = self.get(account)?; + let secret = account.crypto.secret(password)?; + new_store.insert_account(new_vault, secret, new_password)?; + Ok(()) + } - fn public(&self, account: &StoreAccountRef, password: &Password) -> Result { - let account = self.get(account)?; - account.public(password) - } + fn public(&self, account: &StoreAccountRef, password: &Password) -> Result { + let account = self.get(account)?; + account.public(password) + } - fn uuid(&self, account: &StoreAccountRef) -> Result { - let account = self.get(account)?; - Ok(account.id.into()) - } + fn uuid(&self, account: &StoreAccountRef) -> Result { + let account = self.get(account)?; + Ok(account.id.into()) + } - fn name(&self, account: &StoreAccountRef) -> Result { - let account = self.get(account)?; - Ok(account.name.clone()) - } + fn name(&self, account: &StoreAccountRef) -> Result { + let account = self.get(account)?; + Ok(account.name.clone()) + } - fn meta(&self, account: &StoreAccountRef) -> Result { - let account = self.get(account)?; - Ok(account.meta.clone()) - } + fn meta(&self, account: &StoreAccountRef) -> Result { + let account = self.get(account)?; + Ok(account.meta.clone()) + } - fn set_name(&self, account_ref: &StoreAccountRef, name: String) -> Result<(), Error> { - let old = self.get(account_ref)?; - let mut safe_account = old.clone(); - safe_account.name = name; + fn set_name(&self, account_ref: &StoreAccountRef, name: String) -> Result<(), Error> { + let old = self.get(account_ref)?; + let mut safe_account = old.clone(); + safe_account.name = name; - // save to file - self.store.update(account_ref, old, safe_account) - } + // save to file + self.store.update(account_ref, old, safe_account) + } - fn set_meta(&self, account_ref: &StoreAccountRef, meta: String) -> Result<(), Error> { - let old = self.get(account_ref)?; - let mut safe_account = old.clone(); - safe_account.meta = meta; + fn set_meta(&self, account_ref: &StoreAccountRef, meta: String) -> Result<(), Error> { + let old = self.get(account_ref)?; + let mut safe_account = old.clone(); + safe_account.meta = meta; - // save to file - self.store.update(account_ref, old, safe_account) - } + // save to file + self.store.update(account_ref, old, safe_account) + } - fn local_path(&self) -> PathBuf { - self.store.dir.path().cloned().unwrap_or_else(PathBuf::new) - } + fn local_path(&self) -> PathBuf { + self.store.dir.path().cloned().unwrap_or_else(PathBuf::new) + } - fn list_geth_accounts(&self, testnet: bool) -> Vec

{ - import::read_geth_accounts(testnet) - } + fn list_geth_accounts(&self, testnet: bool) -> Vec
{ + import::read_geth_accounts(testnet) + } - fn import_geth_accounts(&self, vault: SecretVaultRef, desired: Vec
, testnet: bool) -> Result, Error> { - let imported_addresses = match vault { - SecretVaultRef::Root => import::import_geth_accounts(&*self.store.dir, desired.into_iter().collect(), testnet), - SecretVaultRef::Vault(vault_name) => { - if let Some(vault) = self.store.vaults.lock().get(&vault_name) { - import::import_geth_accounts(vault.as_key_directory(), desired.into_iter().collect(), testnet) - } else { - Err(Error::VaultNotFound) - } - }, - }; + fn import_geth_accounts( + &self, + vault: SecretVaultRef, + desired: Vec
, + testnet: bool, + ) -> Result, Error> { + let imported_addresses = match vault { + SecretVaultRef::Root => import::import_geth_accounts( + &*self.store.dir, + desired.into_iter().collect(), + testnet, + ), + SecretVaultRef::Vault(vault_name) => { + if let Some(vault) = self.store.vaults.lock().get(&vault_name) { + import::import_geth_accounts( + vault.as_key_directory(), + desired.into_iter().collect(), + testnet, + ) + } else { + Err(Error::VaultNotFound) + } + } + }; - imported_addresses - .map(|a| a.into_iter().map(|a| StoreAccountRef::root(a)).collect()) - } + imported_addresses.map(|a| a.into_iter().map(|a| StoreAccountRef::root(a)).collect()) + } } /// Similar to `EthStore` but may store many accounts (with different passwords) for the same `Address` pub struct EthMultiStore { - dir: Box, - iterations: NonZeroU32, - // order lock: cache, then vaults - cache: RwLock>>, - vaults: Mutex>>, - timestamp: Mutex, + dir: Box, + iterations: NonZeroU32, + // order lock: cache, then vaults + cache: RwLock>>, + vaults: Mutex>>, + timestamp: Mutex, } struct Timestamp { - dir_hash: Option, - last_checked: Instant, - refresh_time: Duration, + dir_hash: Option, + last_checked: Instant, + refresh_time: Duration, } impl EthMultiStore { - /// Open new multi-accounts store with given key directory backend. - pub fn open(directory: Box) -> Result { - Self::open_with_iterations(directory, *KEY_ITERATIONS) - } + /// Open new multi-accounts store with given key directory backend. + pub fn open(directory: Box) -> Result { + Self::open_with_iterations(directory, *KEY_ITERATIONS) + } - /// Open new multi-accounts store with given key directory backend and custom number of iterations for new keys. - pub fn open_with_iterations(directory: Box, iterations: NonZeroU32) -> Result { - let store = EthMultiStore { - dir: directory, - vaults: Mutex::new(HashMap::new()), - iterations: iterations, - cache: Default::default(), - timestamp: Mutex::new(Timestamp { - dir_hash: None, - last_checked: Instant::now(), - // by default we never refresh accounts - refresh_time: Duration::from_secs(u64::max_value()), - }), - }; - store.reload_accounts()?; - Ok(store) - } + /// Open new multi-accounts store with given key directory backend and custom number of iterations for new keys. + pub fn open_with_iterations( + directory: Box, + iterations: NonZeroU32, + ) -> Result { + let store = EthMultiStore { + dir: directory, + vaults: Mutex::new(HashMap::new()), + iterations: iterations, + cache: Default::default(), + timestamp: Mutex::new(Timestamp { + dir_hash: None, + last_checked: Instant::now(), + // by default we never refresh accounts + refresh_time: Duration::from_secs(u64::max_value()), + }), + }; + store.reload_accounts()?; + Ok(store) + } - /// Modify account refresh timeout - how often they are re-read from `KeyDirectory`. - /// - /// Setting this to low values (or 0) will cause new accounts to be picked up quickly, - /// although it may induce heavy disk reads and is not recommended if you manage many keys (say over 10k). - /// - /// By default refreshing is disabled, so only accounts created using this instance of `EthStore` are taken into account. - pub fn set_refresh_time(&self, time: Duration) { - self.timestamp.lock().refresh_time = time; - } + /// Modify account refresh timeout - how often they are re-read from `KeyDirectory`. + /// + /// Setting this to low values (or 0) will cause new accounts to be picked up quickly, + /// although it may induce heavy disk reads and is not recommended if you manage many keys (say over 10k). + /// + /// By default refreshing is disabled, so only accounts created using this instance of `EthStore` are taken into account. + pub fn set_refresh_time(&self, time: Duration) { + self.timestamp.lock().refresh_time = time; + } - fn reload_if_changed(&self) -> Result<(), Error> { - let mut last_timestamp = self.timestamp.lock(); - let now = Instant::now(); - if now - last_timestamp.last_checked > last_timestamp.refresh_time { - let dir_hash = Some(self.dir.unique_repr()?); - last_timestamp.last_checked = now; - if last_timestamp.dir_hash == dir_hash { - return Ok(()) - } - self.reload_accounts()?; - last_timestamp.dir_hash = dir_hash; - } - Ok(()) - } + fn reload_if_changed(&self) -> Result<(), Error> { + let mut last_timestamp = self.timestamp.lock(); + let now = Instant::now(); + if now - last_timestamp.last_checked > last_timestamp.refresh_time { + let dir_hash = Some(self.dir.unique_repr()?); + last_timestamp.last_checked = now; + if last_timestamp.dir_hash == dir_hash { + return Ok(()); + } + self.reload_accounts()?; + last_timestamp.dir_hash = dir_hash; + } + Ok(()) + } - fn reload_accounts(&self) -> Result<(), Error> { - let mut cache = self.cache.write(); + fn reload_accounts(&self) -> Result<(), Error> { + let mut cache = self.cache.write(); - let mut new_accounts = BTreeMap::new(); - for account in self.dir.load()? { - let account_ref = StoreAccountRef::root(account.address); - new_accounts - .entry(account_ref) - .or_insert_with(Vec::new) - .push(account); - } - for (vault_name, vault) in &*self.vaults.lock() { - for account in vault.load()? { - let account_ref = StoreAccountRef::vault(vault_name, account.address); - new_accounts - .entry(account_ref) - .or_insert_with(Vec::new) - .push(account); - } - } + let mut new_accounts = BTreeMap::new(); + for account in self.dir.load()? { + let account_ref = StoreAccountRef::root(account.address); + new_accounts + .entry(account_ref) + .or_insert_with(Vec::new) + .push(account); + } + for (vault_name, vault) in &*self.vaults.lock() { + for account in vault.load()? { + let account_ref = StoreAccountRef::vault(vault_name, account.address); + new_accounts + .entry(account_ref) + .or_insert_with(Vec::new) + .push(account); + } + } - mem::replace(&mut *cache, new_accounts); - Ok(()) - } + mem::replace(&mut *cache, new_accounts); + Ok(()) + } - fn get_accounts(&self, account: &StoreAccountRef) -> Result, Error> { - let from_cache = |account| { - let cache = self.cache.read(); - if let Some(accounts) = cache.get(account) { - if !accounts.is_empty() { - return Some(accounts.clone()) - } - } + fn get_accounts(&self, account: &StoreAccountRef) -> Result, Error> { + let from_cache = |account| { + let cache = self.cache.read(); + if let Some(accounts) = cache.get(account) { + if !accounts.is_empty() { + return Some(accounts.clone()); + } + } - None - }; + None + }; - match from_cache(account) { - Some(accounts) => Ok(accounts), - None => { - self.reload_if_changed()?; - from_cache(account).ok_or(Error::InvalidAccount) - } - } - } + match from_cache(account) { + Some(accounts) => Ok(accounts), + None => { + self.reload_if_changed()?; + from_cache(account).ok_or(Error::InvalidAccount) + } + } + } - fn get_matching(&self, account: &StoreAccountRef, password: &Password) -> Result, Error> { - let accounts = self.get_accounts(account)?; + fn get_matching( + &self, + account: &StoreAccountRef, + password: &Password, + ) -> Result, Error> { + let accounts = self.get_accounts(account)?; - Ok(accounts.into_iter() - .filter(|acc| acc.check_password(password)) - .collect() - ) - } + Ok(accounts + .into_iter() + .filter(|acc| acc.check_password(password)) + .collect()) + } - fn import(&self, vault: SecretVaultRef, account: SafeAccount) -> Result { - // save to file - let account = match vault { - SecretVaultRef::Root => self.dir.insert(account)?, - SecretVaultRef::Vault(ref vault_name) => self.vaults.lock().get_mut(vault_name).ok_or(Error::VaultNotFound)?.insert(account)?, - }; + fn import( + &self, + vault: SecretVaultRef, + account: SafeAccount, + ) -> Result { + // save to file + let account = match vault { + SecretVaultRef::Root => self.dir.insert(account)?, + SecretVaultRef::Vault(ref vault_name) => self + .vaults + .lock() + .get_mut(vault_name) + .ok_or(Error::VaultNotFound)? + .insert(account)?, + }; - // update cache - let account_ref = StoreAccountRef::new(vault, account.address.clone()); - let mut cache = self.cache.write(); - cache.entry(account_ref.clone()) - .or_insert_with(Vec::new) - .push(account); + // update cache + let account_ref = StoreAccountRef::new(vault, account.address.clone()); + let mut cache = self.cache.write(); + cache + .entry(account_ref.clone()) + .or_insert_with(Vec::new) + .push(account); - Ok(account_ref) - } + Ok(account_ref) + } - fn update(&self, account_ref: &StoreAccountRef, old: SafeAccount, new: SafeAccount) -> Result<(), Error> { - // save to file - let account = match account_ref.vault { - SecretVaultRef::Root => self.dir.update(new)?, - SecretVaultRef::Vault(ref vault_name) => self.vaults.lock().get_mut(vault_name).ok_or(Error::VaultNotFound)?.update(new)?, - }; + fn update( + &self, + account_ref: &StoreAccountRef, + old: SafeAccount, + new: SafeAccount, + ) -> Result<(), Error> { + // save to file + let account = match account_ref.vault { + SecretVaultRef::Root => self.dir.update(new)?, + SecretVaultRef::Vault(ref vault_name) => self + .vaults + .lock() + .get_mut(vault_name) + .ok_or(Error::VaultNotFound)? + .update(new)?, + }; - // update cache - let mut cache = self.cache.write(); - let accounts = cache.entry(account_ref.clone()).or_insert_with(Vec::new); - // Remove old account - accounts.retain(|acc| acc != &old); - // And push updated to the end - accounts.push(account); - Ok(()) + // update cache + let mut cache = self.cache.write(); + let accounts = cache.entry(account_ref.clone()).or_insert_with(Vec::new); + // Remove old account + accounts.retain(|acc| acc != &old); + // And push updated to the end + accounts.push(account); + Ok(()) + } - } + fn remove_safe_account( + &self, + account_ref: &StoreAccountRef, + account: &SafeAccount, + ) -> Result<(), Error> { + // Remove from dir + match account_ref.vault { + SecretVaultRef::Root => self.dir.remove(&account)?, + SecretVaultRef::Vault(ref vault_name) => self + .vaults + .lock() + .get(vault_name) + .ok_or(Error::VaultNotFound)? + .remove(&account)?, + }; - fn remove_safe_account(&self, account_ref: &StoreAccountRef, account: &SafeAccount) -> Result<(), Error> { - // Remove from dir - match account_ref.vault { - SecretVaultRef::Root => self.dir.remove(&account)?, - SecretVaultRef::Vault(ref vault_name) => self.vaults.lock().get(vault_name).ok_or(Error::VaultNotFound)?.remove(&account)?, - }; + // Remove from cache + let mut cache = self.cache.write(); + let is_empty = { + if let Some(accounts) = cache.get_mut(account_ref) { + if let Some(position) = accounts.iter().position(|acc| acc == account) { + accounts.remove(position); + } + accounts.is_empty() + } else { + false + } + }; - // Remove from cache - let mut cache = self.cache.write(); - let is_empty = { - if let Some(accounts) = cache.get_mut(account_ref) { - if let Some(position) = accounts.iter().position(|acc| acc == account) { - accounts.remove(position); - } - accounts.is_empty() - } else { - false - } - }; + if is_empty { + cache.remove(account_ref); + } - if is_empty { - cache.remove(account_ref); - } + return Ok(()); + } - return Ok(()); - } - - fn generate(&self, secret: Secret, derivation: Derivation) -> Result { - let mut extended = ExtendedKeyPair::new(secret); - match derivation { - Derivation::Hierarchical(path) => { - for path_item in path { - extended = extended.derive( - if path_item.soft { ethkey::Derivation::Soft(path_item.index) } - else { ethkey::Derivation::Hard(path_item.index) } - )?; - } - }, - Derivation::SoftHash(h256) => { extended = extended.derive(ethkey::Derivation::Soft(h256))?; } - Derivation::HardHash(h256) => { extended = extended.derive(ethkey::Derivation::Hard(h256))?; } - } - Ok(extended) - } + fn generate(&self, secret: Secret, derivation: Derivation) -> Result { + let mut extended = ExtendedKeyPair::new(secret); + match derivation { + Derivation::Hierarchical(path) => { + for path_item in path { + extended = extended.derive(if path_item.soft { + ethkey::Derivation::Soft(path_item.index) + } else { + ethkey::Derivation::Hard(path_item.index) + })?; + } + } + Derivation::SoftHash(h256) => { + extended = extended.derive(ethkey::Derivation::Soft(h256))?; + } + Derivation::HardHash(h256) => { + extended = extended.derive(ethkey::Derivation::Hard(h256))?; + } + } + Ok(extended) + } } impl SimpleSecretStore for EthMultiStore { - fn insert_account(&self, vault: SecretVaultRef, secret: Secret, password: &Password) -> Result { - let keypair = KeyPair::from_secret(secret).map_err(|_| Error::CreationFailed)?; - let id: [u8; 16] = Random::random(); - let account = SafeAccount::create(&keypair, id, password, self.iterations, "".to_owned(), "{}".to_owned())?; - self.import(vault, account) - } + fn insert_account( + &self, + vault: SecretVaultRef, + secret: Secret, + password: &Password, + ) -> Result { + let keypair = KeyPair::from_secret(secret).map_err(|_| Error::CreationFailed)?; + let id: [u8; 16] = Random::random(); + let account = SafeAccount::create( + &keypair, + id, + password, + self.iterations, + "".to_owned(), + "{}".to_owned(), + )?; + self.import(vault, account) + } - fn insert_derived(&self, vault: SecretVaultRef, account_ref: &StoreAccountRef, password: &Password, derivation: Derivation) - -> Result - { - let accounts = self.get_matching(account_ref, password)?; - for account in accounts { - let extended = self.generate(account.crypto.secret(password)?, derivation)?; - return self.insert_account(vault, extended.secret().as_raw().clone(), password); - } - Err(Error::InvalidPassword) - } + fn insert_derived( + &self, + vault: SecretVaultRef, + account_ref: &StoreAccountRef, + password: &Password, + derivation: Derivation, + ) -> Result { + let accounts = self.get_matching(account_ref, password)?; + for account in accounts { + let extended = self.generate(account.crypto.secret(password)?, derivation)?; + return self.insert_account(vault, extended.secret().as_raw().clone(), password); + } + Err(Error::InvalidPassword) + } - fn generate_derived(&self, account_ref: &StoreAccountRef, password: &Password, derivation: Derivation) - -> Result - { - let accounts = self.get_matching(&account_ref, password)?; - for account in accounts { - let extended = self.generate(account.crypto.secret(password)?, derivation)?; - return Ok(ethkey::public_to_address(extended.public().public())); - } - Err(Error::InvalidPassword) - } + fn generate_derived( + &self, + account_ref: &StoreAccountRef, + password: &Password, + derivation: Derivation, + ) -> Result { + let accounts = self.get_matching(&account_ref, password)?; + for account in accounts { + let extended = self.generate(account.crypto.secret(password)?, derivation)?; + return Ok(ethkey::public_to_address(extended.public().public())); + } + Err(Error::InvalidPassword) + } - fn sign_derived(&self, account_ref: &StoreAccountRef, password: &Password, derivation: Derivation, message: &Message) - -> Result - { - let accounts = self.get_matching(&account_ref, password)?; - for account in accounts { - let extended = self.generate(account.crypto.secret(password)?, derivation)?; - let secret = extended.secret().as_raw(); - return Ok(ethkey::sign(&secret, message)?) - } - Err(Error::InvalidPassword) - } + fn sign_derived( + &self, + account_ref: &StoreAccountRef, + password: &Password, + derivation: Derivation, + message: &Message, + ) -> Result { + let accounts = self.get_matching(&account_ref, password)?; + for account in accounts { + let extended = self.generate(account.crypto.secret(password)?, derivation)?; + let secret = extended.secret().as_raw(); + return Ok(ethkey::sign(&secret, message)?); + } + Err(Error::InvalidPassword) + } - fn account_ref(&self, address: &Address) -> Result { - let read_from_cache = |address: &Address| { - use std::collections::Bound; - let cache = self.cache.read(); - let mut r = cache.range((Bound::Included(*address), Bound::Included(*address))); - r.next().map(|(k, _)| k.clone()) - }; + fn account_ref(&self, address: &Address) -> Result { + let read_from_cache = |address: &Address| { + use std::collections::Bound; + let cache = self.cache.read(); + let mut r = cache.range((Bound::Included(*address), Bound::Included(*address))); + r.next().map(|(k, _)| k.clone()) + }; - match read_from_cache(address) { - Some(account) => Ok(account), - None => { - self.reload_if_changed()?; - read_from_cache(address).ok_or(Error::InvalidAccount) - } - } - } + match read_from_cache(address) { + Some(account) => Ok(account), + None => { + self.reload_if_changed()?; + read_from_cache(address).ok_or(Error::InvalidAccount) + } + } + } - fn accounts(&self) -> Result, Error> { - self.reload_if_changed()?; - Ok(self.cache.read().keys().cloned().collect()) - } + fn accounts(&self) -> Result, Error> { + self.reload_if_changed()?; + Ok(self.cache.read().keys().cloned().collect()) + } - fn remove_account(&self, account_ref: &StoreAccountRef, password: &Password) -> Result<(), Error> { - let accounts = self.get_matching(account_ref, password)?; + fn remove_account( + &self, + account_ref: &StoreAccountRef, + password: &Password, + ) -> Result<(), Error> { + let accounts = self.get_matching(account_ref, password)?; - for account in accounts { - return self.remove_safe_account(account_ref, &account); - } + for account in accounts { + return self.remove_safe_account(account_ref, &account); + } - Err(Error::InvalidPassword) - } + Err(Error::InvalidPassword) + } - fn change_password(&self, account_ref: &StoreAccountRef, old_password: &Password, new_password: &Password) -> Result<(), Error> { - let accounts = self.get_matching(account_ref, old_password)?; + fn change_password( + &self, + account_ref: &StoreAccountRef, + old_password: &Password, + new_password: &Password, + ) -> Result<(), Error> { + let accounts = self.get_matching(account_ref, old_password)?; - if accounts.is_empty() { - return Err(Error::InvalidPassword); - } + if accounts.is_empty() { + return Err(Error::InvalidPassword); + } - for account in accounts { - // Change password - let new_account = account.change_password(old_password, new_password, self.iterations)?; - self.update(account_ref, account, new_account)?; - } + for account in accounts { + // Change password + let new_account = + account.change_password(old_password, new_password, self.iterations)?; + self.update(account_ref, account, new_account)?; + } - Ok(()) - } + Ok(()) + } - fn export_account(&self, account_ref: &StoreAccountRef, password: &Password) -> Result { - self.get_matching(account_ref, password)?.into_iter().nth(0).map(Into::into).ok_or(Error::InvalidPassword) - } + fn export_account( + &self, + account_ref: &StoreAccountRef, + password: &Password, + ) -> Result { + self.get_matching(account_ref, password)? + .into_iter() + .nth(0) + .map(Into::into) + .ok_or(Error::InvalidPassword) + } - fn sign(&self, account: &StoreAccountRef, password: &Password, message: &Message) -> Result { - let accounts = self.get_matching(account, password)?; - match accounts.first() { - Some(ref account) => account.sign(password, message), - None => Err(Error::InvalidPassword), - } - } + fn sign( + &self, + account: &StoreAccountRef, + password: &Password, + message: &Message, + ) -> Result { + let accounts = self.get_matching(account, password)?; + match accounts.first() { + Some(ref account) => account.sign(password, message), + None => Err(Error::InvalidPassword), + } + } - fn decrypt(&self, account: &StoreAccountRef, password: &Password, shared_mac: &[u8], message: &[u8]) -> Result, Error> { - let accounts = self.get_matching(account, password)?; - match accounts.first() { - Some(ref account) => account.decrypt(password, shared_mac, message), - None => Err(Error::InvalidPassword), - } - } + fn decrypt( + &self, + account: &StoreAccountRef, + password: &Password, + shared_mac: &[u8], + message: &[u8], + ) -> Result, Error> { + let accounts = self.get_matching(account, password)?; + match accounts.first() { + Some(ref account) => account.decrypt(password, shared_mac, message), + None => Err(Error::InvalidPassword), + } + } - fn agree(&self, account: &StoreAccountRef, password: &Password, other: &Public) -> Result { - let accounts = self.get_matching(account, password)?; - match accounts.first() { - Some(ref account) => account.agree(password, other), - None => Err(Error::InvalidPassword), - } - } + fn agree( + &self, + account: &StoreAccountRef, + password: &Password, + other: &Public, + ) -> Result { + let accounts = self.get_matching(account, password)?; + match accounts.first() { + Some(ref account) => account.agree(password, other), + None => Err(Error::InvalidPassword), + } + } - fn create_vault(&self, name: &str, password: &Password) -> Result<(), Error> { - let is_vault_created = { // lock border - let mut vaults = self.vaults.lock(); - if !vaults.contains_key(&name.to_owned()) { - let vault_provider = self.dir.as_vault_provider().ok_or(Error::VaultsAreNotSupported)?; - let vault = vault_provider.create(name, VaultKey::new(password, self.iterations))?; - vaults.insert(name.to_owned(), vault); - true - } else { - false - } - }; + fn create_vault(&self, name: &str, password: &Password) -> Result<(), Error> { + let is_vault_created = { + // lock border + let mut vaults = self.vaults.lock(); + if !vaults.contains_key(&name.to_owned()) { + let vault_provider = self + .dir + .as_vault_provider() + .ok_or(Error::VaultsAreNotSupported)?; + let vault = + vault_provider.create(name, VaultKey::new(password, self.iterations))?; + vaults.insert(name.to_owned(), vault); + true + } else { + false + } + }; - if is_vault_created { - self.reload_accounts()?; - } + if is_vault_created { + self.reload_accounts()?; + } - Ok(()) - } + Ok(()) + } - fn open_vault(&self, name: &str, password: &Password) -> Result<(), Error> { - let is_vault_opened = { // lock border - let mut vaults = self.vaults.lock(); - if !vaults.contains_key(&name.to_owned()) { - let vault_provider = self.dir.as_vault_provider().ok_or(Error::VaultsAreNotSupported)?; - let vault = vault_provider.open(name, VaultKey::new(password, self.iterations))?; - vaults.insert(name.to_owned(), vault); - true - } else { - false - } - }; + fn open_vault(&self, name: &str, password: &Password) -> Result<(), Error> { + let is_vault_opened = { + // lock border + let mut vaults = self.vaults.lock(); + if !vaults.contains_key(&name.to_owned()) { + let vault_provider = self + .dir + .as_vault_provider() + .ok_or(Error::VaultsAreNotSupported)?; + let vault = vault_provider.open(name, VaultKey::new(password, self.iterations))?; + vaults.insert(name.to_owned(), vault); + true + } else { + false + } + }; - if is_vault_opened { - self.reload_accounts()?; - } + if is_vault_opened { + self.reload_accounts()?; + } - Ok(()) - } + Ok(()) + } - fn close_vault(&self, name: &str) -> Result<(), Error> { - let is_vault_removed = self.vaults.lock().remove(&name.to_owned()).is_some(); - if is_vault_removed { - self.reload_accounts()?; - } - Ok(()) - } + fn close_vault(&self, name: &str) -> Result<(), Error> { + let is_vault_removed = self.vaults.lock().remove(&name.to_owned()).is_some(); + if is_vault_removed { + self.reload_accounts()?; + } + Ok(()) + } - fn list_vaults(&self) -> Result, Error> { - let vault_provider = self.dir.as_vault_provider().ok_or(Error::VaultsAreNotSupported)?; - vault_provider.list_vaults() - } + fn list_vaults(&self) -> Result, Error> { + let vault_provider = self + .dir + .as_vault_provider() + .ok_or(Error::VaultsAreNotSupported)?; + vault_provider.list_vaults() + } - fn list_opened_vaults(&self) -> Result, Error> { - Ok(self.vaults.lock().keys().cloned().collect()) - } + fn list_opened_vaults(&self) -> Result, Error> { + Ok(self.vaults.lock().keys().cloned().collect()) + } - fn change_vault_password(&self, name: &str, new_password: &Password) -> Result<(), Error> { - let old_key = self.vaults.lock().get(name).map(|v| v.key()).ok_or(Error::VaultNotFound)?; - let vault_provider = self.dir.as_vault_provider().ok_or(Error::VaultsAreNotSupported)?; - let vault = vault_provider.open(name, old_key)?; - match vault.set_key(VaultKey::new(new_password, self.iterations)) { - Ok(_) => { - self.close_vault(name) - .and_then(|_| self.open_vault(name, new_password)) - }, - Err(SetKeyError::Fatal(err)) => { - let _ = self.close_vault(name); - Err(err) - }, - Err(SetKeyError::NonFatalNew(err)) => { - let _ = self.close_vault(name) - .and_then(|_| self.open_vault(name, new_password)); - Err(err) - }, - Err(SetKeyError::NonFatalOld(err)) => Err(err), - } - } + fn change_vault_password(&self, name: &str, new_password: &Password) -> Result<(), Error> { + let old_key = self + .vaults + .lock() + .get(name) + .map(|v| v.key()) + .ok_or(Error::VaultNotFound)?; + let vault_provider = self + .dir + .as_vault_provider() + .ok_or(Error::VaultsAreNotSupported)?; + let vault = vault_provider.open(name, old_key)?; + match vault.set_key(VaultKey::new(new_password, self.iterations)) { + Ok(_) => self + .close_vault(name) + .and_then(|_| self.open_vault(name, new_password)), + Err(SetKeyError::Fatal(err)) => { + let _ = self.close_vault(name); + Err(err) + } + Err(SetKeyError::NonFatalNew(err)) => { + let _ = self + .close_vault(name) + .and_then(|_| self.open_vault(name, new_password)); + Err(err) + } + Err(SetKeyError::NonFatalOld(err)) => Err(err), + } + } - fn change_account_vault(&self, vault: SecretVaultRef, account_ref: StoreAccountRef) -> Result { - if account_ref.vault == vault { - return Ok(account_ref); - } + fn change_account_vault( + &self, + vault: SecretVaultRef, + account_ref: StoreAccountRef, + ) -> Result { + if account_ref.vault == vault { + return Ok(account_ref); + } - let account = self.get_accounts(&account_ref)?.into_iter().nth(0).ok_or(Error::InvalidAccount)?; - let new_account_ref = self.import(vault, account.clone())?; - self.remove_safe_account(&account_ref, &account)?; - self.reload_accounts()?; - Ok(new_account_ref) - } + let account = self + .get_accounts(&account_ref)? + .into_iter() + .nth(0) + .ok_or(Error::InvalidAccount)?; + let new_account_ref = self.import(vault, account.clone())?; + self.remove_safe_account(&account_ref, &account)?; + self.reload_accounts()?; + Ok(new_account_ref) + } - fn get_vault_meta(&self, name: &str) -> Result { - // vault meta contains password hint - // => allow reading meta even if vault is not yet opened - self.vaults.lock() - .get(name) - .and_then(|v| Some(v.meta())) - .ok_or(Error::VaultNotFound) - .or_else(|_| { - let vault_provider = self.dir.as_vault_provider().ok_or(Error::VaultsAreNotSupported)?; - vault_provider.vault_meta(name) - }) + fn get_vault_meta(&self, name: &str) -> Result { + // vault meta contains password hint + // => allow reading meta even if vault is not yet opened + self.vaults + .lock() + .get(name) + .and_then(|v| Some(v.meta())) + .ok_or(Error::VaultNotFound) + .or_else(|_| { + let vault_provider = self + .dir + .as_vault_provider() + .ok_or(Error::VaultsAreNotSupported)?; + vault_provider.vault_meta(name) + }) + } - } - - fn set_vault_meta(&self, name: &str, meta: &str) -> Result<(), Error> { - self.vaults.lock() - .get(name) - .ok_or(Error::VaultNotFound) - .and_then(|v| v.set_meta(meta)) - } + fn set_vault_meta(&self, name: &str, meta: &str) -> Result<(), Error> { + self.vaults + .lock() + .get(name) + .ok_or(Error::VaultNotFound) + .and_then(|v| v.set_meta(meta)) + } } #[cfg(test)] mod tests { - extern crate tempdir; - - use accounts_dir::{KeyDirectory, MemoryDirectory, RootDiskDirectory}; - use ethkey::{Random, Generator, KeyPair}; - use secret_store::{SimpleSecretStore, SecretStore, SecretVaultRef, StoreAccountRef, Derivation}; - use super::{EthStore, EthMultiStore}; - use self::tempdir::TempDir; - use ethereum_types::H256; - - fn keypair() -> KeyPair { - Random.generate().unwrap() - } - - fn store() -> EthStore { - EthStore::open(Box::new(MemoryDirectory::default())).expect("MemoryDirectory always load successfuly; qed") - } - - fn multi_store() -> EthMultiStore { - EthMultiStore::open(Box::new(MemoryDirectory::default())).expect("MemoryDirectory always load successfuly; qed") - } - - struct RootDiskDirectoryGuard { - pub key_dir: Option>, - _path: TempDir, - } - - impl RootDiskDirectoryGuard { - pub fn new() -> Self { - let temp_path = TempDir::new("").unwrap(); - let disk_dir = Box::new(RootDiskDirectory::create(temp_path.path()).unwrap()); - - RootDiskDirectoryGuard { - key_dir: Some(disk_dir), - _path: temp_path, - } - } - } - - #[test] - fn should_insert_account_successfully() { - // given - let store = store(); - let keypair = keypair(); - - // when - let passwd = "test".into(); - let address = store.insert_account(SecretVaultRef::Root, keypair.secret().clone(), &passwd).unwrap(); - - // then - assert_eq!(address, StoreAccountRef::root(keypair.address())); - assert!(store.get(&address).is_ok(), "Should contain account."); - assert_eq!(store.accounts().unwrap().len(), 1, "Should have one account."); - } - - #[test] - fn should_update_meta_and_name() { - // given - let store = store(); - let keypair = keypair(); - let passwd = "test".into(); - let address = store.insert_account(SecretVaultRef::Root, keypair.secret().clone(), &passwd).unwrap(); - assert_eq!(&store.meta(&address).unwrap(), "{}"); - assert_eq!(&store.name(&address).unwrap(), ""); - - // when - store.set_meta(&address, "meta".into()).unwrap(); - store.set_name(&address, "name".into()).unwrap(); - - // then - assert_eq!(&store.meta(&address).unwrap(), "meta"); - assert_eq!(&store.name(&address).unwrap(), "name"); - assert_eq!(store.accounts().unwrap().len(), 1); - } - - #[test] - fn should_remove_account() { - // given - let store = store(); - let passwd = "test".into(); - let keypair = keypair(); - let address = store.insert_account(SecretVaultRef::Root, keypair.secret().clone(), &passwd).unwrap(); - - // when - store.remove_account(&address, &passwd).unwrap(); - - // then - assert_eq!(store.accounts().unwrap().len(), 0, "Should remove account."); - } - - #[test] - fn should_return_true_if_password_is_correct() { - // given - let store = store(); - let passwd = "test".into(); - let keypair = keypair(); - let address = store.insert_account(SecretVaultRef::Root, keypair.secret().clone(), &passwd).unwrap(); - - // when - let res1 = store.test_password(&address, &"x".into()).unwrap(); - let res2 = store.test_password(&address, &passwd).unwrap(); - - assert!(!res1, "First password should be invalid."); - assert!(res2, "Second password should be correct."); - } - - #[test] - fn multistore_should_be_able_to_have_the_same_account_twice() { - // given - let store = multi_store(); - let passwd1 = "test".into(); - let passwd2 = "xyz".into(); - let keypair = keypair(); - let address = store.insert_account(SecretVaultRef::Root, keypair.secret().clone(), &passwd1).unwrap(); - let address2 = store.insert_account(SecretVaultRef::Root, keypair.secret().clone(), &passwd2).unwrap(); - assert_eq!(address, address2); - - // when - assert!(store.remove_account(&address, &passwd1).is_ok(), "First password should work."); - assert_eq!(store.accounts().unwrap().len(), 1); - - assert!(store.remove_account(&address, &passwd2).is_ok(), "Second password should work too."); - assert_eq!(store.accounts().unwrap().len(), 0); - } - - #[test] - fn should_copy_account() { - // given - let store = store(); - let passwd1 = "test".into(); - let passwd2 = "xzy".into(); - let multi_store = multi_store(); - let keypair = keypair(); - let address = store.insert_account(SecretVaultRef::Root, keypair.secret().clone(), &passwd1).unwrap(); - assert_eq!(multi_store.accounts().unwrap().len(), 0); - - // when - store.copy_account(&multi_store, SecretVaultRef::Root, &address, &passwd1, &passwd2).unwrap(); - - // then - assert!(store.test_password(&address, &passwd1).unwrap(), "First password should work for store."); - assert!(multi_store.sign(&address, &passwd2, &Default::default()).is_ok(), "Second password should work for second store."); - assert_eq!(multi_store.accounts().unwrap().len(), 1); - } - - #[test] - fn should_create_and_open_vaults() { - // given - let mut dir = RootDiskDirectoryGuard::new(); - let store = EthStore::open(dir.key_dir.take().unwrap()).unwrap(); - let name1 = "vault1"; let password1 = "password1".into(); - let name2 = "vault2"; let password2 = "password2".into(); - let keypair1 = keypair(); - let keypair2 = keypair(); - let keypair3 = keypair(); let password3 = "password3".into(); - - // when - store.create_vault(name1, &password1).unwrap(); - store.create_vault(name2, &password2).unwrap(); - - // then [can create vaults] ^^^ - - // and when - store.insert_account(SecretVaultRef::Vault(name1.to_owned()), keypair1.secret().clone(), &password1).unwrap(); - store.insert_account(SecretVaultRef::Vault(name2.to_owned()), keypair2.secret().clone(), &password2).unwrap(); - store.insert_account(SecretVaultRef::Root, keypair3.secret().clone(), &password3).unwrap(); - store.insert_account(SecretVaultRef::Vault("vault3".to_owned()), keypair1.secret().clone(), &password3).unwrap_err(); - let accounts = store.accounts().unwrap(); - - // then [can create accounts in vaults] - assert_eq!(accounts.len(), 3); - assert!(accounts.iter().any(|a| a.vault == SecretVaultRef::Root)); - assert!(accounts.iter().any(|a| a.vault == SecretVaultRef::Vault(name1.to_owned()))); - assert!(accounts.iter().any(|a| a.vault == SecretVaultRef::Vault(name2.to_owned()))); - - // and when - store.close_vault(name1).unwrap(); - store.close_vault(name2).unwrap(); - store.close_vault("vault3").unwrap(); - let accounts = store.accounts().unwrap(); - - // then [can close vaults + accounts from vaults disappear] - assert_eq!(accounts.len(), 1); - assert!(accounts.iter().any(|a| a.vault == SecretVaultRef::Root)); - - // and when - store.open_vault(name1, &password2).unwrap_err(); - store.open_vault(name2, &password1).unwrap_err(); - store.open_vault(name1, &password1).unwrap(); - store.open_vault(name2, &password2).unwrap(); - let accounts = store.accounts().unwrap(); - - // then [can check vaults on open + can reopen vaults + accounts from vaults appear] - assert_eq!(accounts.len(), 3); - assert!(accounts.iter().any(|a| a.vault == SecretVaultRef::Root)); - assert!(accounts.iter().any(|a| a.vault == SecretVaultRef::Vault(name1.to_owned()))); - assert!(accounts.iter().any(|a| a.vault == SecretVaultRef::Vault(name2.to_owned()))); - } - - #[test] - fn should_move_vault_acounts() { - // given - let mut dir = RootDiskDirectoryGuard::new(); - let store = EthStore::open(dir.key_dir.take().unwrap()).unwrap(); - let name1 = "vault1"; let password1 = "password1".into(); - let name2 = "vault2"; let password2 = "password2".into(); - let password3 = "password3".into(); - let keypair1 = keypair(); - let keypair2 = keypair(); - let keypair3 = keypair(); - - // when - store.create_vault(name1, &password1).unwrap(); - store.create_vault(name2, &password2).unwrap(); - let account1 = store.insert_account(SecretVaultRef::Vault(name1.to_owned()), keypair1.secret().clone(), &password1).unwrap(); - let account2 = store.insert_account(SecretVaultRef::Vault(name1.to_owned()), keypair2.secret().clone(), &password1).unwrap(); - let account3 = store.insert_account(SecretVaultRef::Root, keypair3.secret().clone(), &password3).unwrap(); - - // then - let account1 = store.change_account_vault(SecretVaultRef::Root, account1.clone()).unwrap(); - let account2 = store.change_account_vault(SecretVaultRef::Vault(name2.to_owned()), account2.clone()).unwrap(); - let account3 = store.change_account_vault(SecretVaultRef::Vault(name2.to_owned()), account3).unwrap(); - let accounts = store.accounts().unwrap(); - assert_eq!(accounts.len(), 3); - assert!(accounts.iter().any(|a| a == &StoreAccountRef::root(account1.address.clone()))); - assert!(accounts.iter().any(|a| a == &StoreAccountRef::vault(name2, account2.address.clone()))); - assert!(accounts.iter().any(|a| a == &StoreAccountRef::vault(name2, account3.address.clone()))); - - // and then - assert_eq!(store.meta(&StoreAccountRef::root(account1.address)).unwrap(), r#"{}"#); - assert_eq!(store.meta(&StoreAccountRef::vault("vault2", account2.address)).unwrap(), r#"{"vault":"vault2"}"#); - assert_eq!(store.meta(&StoreAccountRef::vault("vault2", account3.address)).unwrap(), r#"{"vault":"vault2"}"#); - } - - #[test] - fn should_not_remove_account_when_moving_to_self() { - // given - let mut dir = RootDiskDirectoryGuard::new(); - let store = EthStore::open(dir.key_dir.take().unwrap()).unwrap(); - let password1 = "password1".into(); - let keypair1 = keypair(); - - // when - let account1 = store.insert_account(SecretVaultRef::Root, keypair1.secret().clone(), &password1).unwrap(); - store.change_account_vault(SecretVaultRef::Root, account1).unwrap(); - - // then - let accounts = store.accounts().unwrap(); - assert_eq!(accounts.len(), 1); - } - - #[test] - fn should_remove_account_from_vault() { - // given - let mut dir = RootDiskDirectoryGuard::new(); - let store = EthStore::open(dir.key_dir.take().unwrap()).unwrap(); - let name1 = "vault1"; let password1 = "password1".into(); - let keypair1 = keypair(); - - // when - store.create_vault(name1, &password1).unwrap(); - let account1 = store.insert_account(SecretVaultRef::Vault(name1.to_owned()), keypair1.secret().clone(), &password1).unwrap(); - assert_eq!(store.accounts().unwrap().len(), 1); - - // then - store.remove_account(&account1, &password1).unwrap(); - assert_eq!(store.accounts().unwrap().len(), 0); - } - - #[test] - fn should_not_remove_account_from_vault_when_password_is_incorrect() { - // given - let mut dir = RootDiskDirectoryGuard::new(); - let store = EthStore::open(dir.key_dir.take().unwrap()).unwrap(); - let name1 = "vault1"; let password1 = "password1".into(); - let password2 = "password2".into(); - let keypair1 = keypair(); - - // when - store.create_vault(name1, &password1).unwrap(); - let account1 = store.insert_account(SecretVaultRef::Vault(name1.to_owned()), keypair1.secret().clone(), &password1).unwrap(); - assert_eq!(store.accounts().unwrap().len(), 1); - - // then - store.remove_account(&account1, &password2).unwrap_err(); - assert_eq!(store.accounts().unwrap().len(), 1); - } - - #[test] - fn should_change_vault_password() { - // given - let mut dir = RootDiskDirectoryGuard::new(); - let store = EthStore::open(dir.key_dir.take().unwrap()).unwrap(); - let name = "vault"; let password = "password".into(); - let keypair = keypair(); - - // when - store.create_vault(name, &password).unwrap(); - store.insert_account(SecretVaultRef::Vault(name.to_owned()), keypair.secret().clone(), &password).unwrap(); - - // then - assert_eq!(store.accounts().unwrap().len(), 1); - let new_password = "new_password".into(); - store.change_vault_password(name, &new_password).unwrap(); - assert_eq!(store.accounts().unwrap().len(), 1); - - // and when - store.close_vault(name).unwrap(); - - // then - store.open_vault(name, &new_password).unwrap(); - assert_eq!(store.accounts().unwrap().len(), 1); - } - - #[test] - fn should_have_different_passwords_for_vault_secret_and_meta() { - // given - let mut dir = RootDiskDirectoryGuard::new(); - let store = EthStore::open(dir.key_dir.take().unwrap()).unwrap(); - let name = "vault"; let password = "password".into(); - let secret_password = "sec_password".into(); - let keypair = keypair(); - - // when - store.create_vault(name, &password).unwrap(); - let account_ref = store.insert_account(SecretVaultRef::Vault(name.to_owned()), keypair.secret().clone(), &secret_password).unwrap(); - - // then - assert_eq!(store.accounts().unwrap().len(), 1); - let new_secret_password = "new_sec_password".into(); - store.change_password(&account_ref, &secret_password, &new_secret_password).unwrap(); - assert_eq!(store.accounts().unwrap().len(), 1); - } - - #[test] - fn should_list_opened_vaults() { - // given - let mut dir = RootDiskDirectoryGuard::new(); - let store = EthStore::open(dir.key_dir.take().unwrap()).unwrap(); - let name1 = "vault1"; let password1 = "password1".into(); - let name2 = "vault2"; let password2 = "password2".into(); - let name3 = "vault3"; let password3 = "password3".into(); - - // when - store.create_vault(name1, &password1).unwrap(); - store.create_vault(name2, &password2).unwrap(); - store.create_vault(name3, &password3).unwrap(); - store.close_vault(name2).unwrap(); - - // then - let opened_vaults = store.list_opened_vaults().unwrap(); - assert_eq!(opened_vaults.len(), 2); - assert!(opened_vaults.iter().any(|v| &*v == name1)); - assert!(opened_vaults.iter().any(|v| &*v == name3)); - } - - #[test] - fn should_manage_vaults_meta() { - // given - let mut dir = RootDiskDirectoryGuard::new(); - let store = EthStore::open(dir.key_dir.take().unwrap()).unwrap(); - let name1 = "vault1"; let password1 = "password1".into(); - - // when - store.create_vault(name1, &password1).unwrap(); - - // then - assert_eq!(store.get_vault_meta(name1).unwrap(), "{}".to_owned()); - assert!(store.set_vault_meta(name1, "Hello, world!!!").is_ok()); - assert_eq!(store.get_vault_meta(name1).unwrap(), "Hello, world!!!".to_owned()); - - // and when - store.close_vault(name1).unwrap(); - store.open_vault(name1, &password1).unwrap(); - - // then - assert_eq!(store.get_vault_meta(name1).unwrap(), "Hello, world!!!".to_owned()); - - // and when - store.close_vault(name1).unwrap(); - - // then - assert_eq!(store.get_vault_meta(name1).unwrap(), "Hello, world!!!".to_owned()); - assert!(store.get_vault_meta("vault2").is_err()); - } - - #[test] - fn should_store_derived_keys() { - // given we have one account in the store - let store = store(); - let keypair = keypair(); - let address = store.insert_account(SecretVaultRef::Root, keypair.secret().clone(), &"test".into()).unwrap(); - - // when we deriving from that account - let derived = store.insert_derived( - SecretVaultRef::Root, - &address, - &"test".into(), - Derivation::HardHash(H256::from(0)), - ).unwrap(); - - // there should be 2 accounts in the store - let accounts = store.accounts().unwrap(); - assert_eq!(accounts.len(), 2); - - // and we can sign with the derived contract - assert!(store.sign(&derived, &"test".into(), &Default::default()).is_ok(), "Second password should work for second store."); - } - - #[test] - fn should_save_meta_when_setting_before_password() { - // given - let mut dir = RootDiskDirectoryGuard::new(); - let store = EthStore::open(dir.key_dir.take().unwrap()).unwrap(); - let name = "vault"; let password = "password1".into(); - let new_password = "password2".into(); - - // when - store.create_vault(name, &password).unwrap(); - store.set_vault_meta(name, "OldMeta").unwrap(); - store.change_vault_password(name, &new_password).unwrap(); - - // then - assert_eq!(store.get_vault_meta(name).unwrap(), "OldMeta".to_owned()); - } - - #[test] - fn should_export_account() { - // given - let store = store(); - let keypair = keypair(); - let address = store.insert_account(SecretVaultRef::Root, keypair.secret().clone(), &"test".into()).unwrap(); - - // when - let exported = store.export_account(&address, &"test".into()); - - // then - assert!(exported.is_ok(), "Should export single account: {:?}", exported); - } + extern crate tempdir; + + use self::tempdir::TempDir; + use super::{EthMultiStore, EthStore}; + use accounts_dir::{KeyDirectory, MemoryDirectory, RootDiskDirectory}; + use ethereum_types::H256; + use ethkey::{Generator, KeyPair, Random}; + use secret_store::{ + Derivation, SecretStore, SecretVaultRef, SimpleSecretStore, StoreAccountRef, + }; + + fn keypair() -> KeyPair { + Random.generate().unwrap() + } + + fn store() -> EthStore { + EthStore::open(Box::new(MemoryDirectory::default())) + .expect("MemoryDirectory always load successfuly; qed") + } + + fn multi_store() -> EthMultiStore { + EthMultiStore::open(Box::new(MemoryDirectory::default())) + .expect("MemoryDirectory always load successfuly; qed") + } + + struct RootDiskDirectoryGuard { + pub key_dir: Option>, + _path: TempDir, + } + + impl RootDiskDirectoryGuard { + pub fn new() -> Self { + let temp_path = TempDir::new("").unwrap(); + let disk_dir = Box::new(RootDiskDirectory::create(temp_path.path()).unwrap()); + + RootDiskDirectoryGuard { + key_dir: Some(disk_dir), + _path: temp_path, + } + } + } + + #[test] + fn should_insert_account_successfully() { + // given + let store = store(); + let keypair = keypair(); + + // when + let passwd = "test".into(); + let address = store + .insert_account(SecretVaultRef::Root, keypair.secret().clone(), &passwd) + .unwrap(); + + // then + assert_eq!(address, StoreAccountRef::root(keypair.address())); + assert!(store.get(&address).is_ok(), "Should contain account."); + assert_eq!( + store.accounts().unwrap().len(), + 1, + "Should have one account." + ); + } + + #[test] + fn should_update_meta_and_name() { + // given + let store = store(); + let keypair = keypair(); + let passwd = "test".into(); + let address = store + .insert_account(SecretVaultRef::Root, keypair.secret().clone(), &passwd) + .unwrap(); + assert_eq!(&store.meta(&address).unwrap(), "{}"); + assert_eq!(&store.name(&address).unwrap(), ""); + + // when + store.set_meta(&address, "meta".into()).unwrap(); + store.set_name(&address, "name".into()).unwrap(); + + // then + assert_eq!(&store.meta(&address).unwrap(), "meta"); + assert_eq!(&store.name(&address).unwrap(), "name"); + assert_eq!(store.accounts().unwrap().len(), 1); + } + + #[test] + fn should_remove_account() { + // given + let store = store(); + let passwd = "test".into(); + let keypair = keypair(); + let address = store + .insert_account(SecretVaultRef::Root, keypair.secret().clone(), &passwd) + .unwrap(); + + // when + store.remove_account(&address, &passwd).unwrap(); + + // then + assert_eq!(store.accounts().unwrap().len(), 0, "Should remove account."); + } + + #[test] + fn should_return_true_if_password_is_correct() { + // given + let store = store(); + let passwd = "test".into(); + let keypair = keypair(); + let address = store + .insert_account(SecretVaultRef::Root, keypair.secret().clone(), &passwd) + .unwrap(); + + // when + let res1 = store.test_password(&address, &"x".into()).unwrap(); + let res2 = store.test_password(&address, &passwd).unwrap(); + + assert!(!res1, "First password should be invalid."); + assert!(res2, "Second password should be correct."); + } + + #[test] + fn multistore_should_be_able_to_have_the_same_account_twice() { + // given + let store = multi_store(); + let passwd1 = "test".into(); + let passwd2 = "xyz".into(); + let keypair = keypair(); + let address = store + .insert_account(SecretVaultRef::Root, keypair.secret().clone(), &passwd1) + .unwrap(); + let address2 = store + .insert_account(SecretVaultRef::Root, keypair.secret().clone(), &passwd2) + .unwrap(); + assert_eq!(address, address2); + + // when + assert!( + store.remove_account(&address, &passwd1).is_ok(), + "First password should work." + ); + assert_eq!(store.accounts().unwrap().len(), 1); + + assert!( + store.remove_account(&address, &passwd2).is_ok(), + "Second password should work too." + ); + assert_eq!(store.accounts().unwrap().len(), 0); + } + + #[test] + fn should_copy_account() { + // given + let store = store(); + let passwd1 = "test".into(); + let passwd2 = "xzy".into(); + let multi_store = multi_store(); + let keypair = keypair(); + let address = store + .insert_account(SecretVaultRef::Root, keypair.secret().clone(), &passwd1) + .unwrap(); + assert_eq!(multi_store.accounts().unwrap().len(), 0); + + // when + store + .copy_account( + &multi_store, + SecretVaultRef::Root, + &address, + &passwd1, + &passwd2, + ) + .unwrap(); + + // then + assert!( + store.test_password(&address, &passwd1).unwrap(), + "First password should work for store." + ); + assert!( + multi_store + .sign(&address, &passwd2, &Default::default()) + .is_ok(), + "Second password should work for second store." + ); + assert_eq!(multi_store.accounts().unwrap().len(), 1); + } + + #[test] + fn should_create_and_open_vaults() { + // given + let mut dir = RootDiskDirectoryGuard::new(); + let store = EthStore::open(dir.key_dir.take().unwrap()).unwrap(); + let name1 = "vault1"; + let password1 = "password1".into(); + let name2 = "vault2"; + let password2 = "password2".into(); + let keypair1 = keypair(); + let keypair2 = keypair(); + let keypair3 = keypair(); + let password3 = "password3".into(); + + // when + store.create_vault(name1, &password1).unwrap(); + store.create_vault(name2, &password2).unwrap(); + + // then [can create vaults] ^^^ + + // and when + store + .insert_account( + SecretVaultRef::Vault(name1.to_owned()), + keypair1.secret().clone(), + &password1, + ) + .unwrap(); + store + .insert_account( + SecretVaultRef::Vault(name2.to_owned()), + keypair2.secret().clone(), + &password2, + ) + .unwrap(); + store + .insert_account(SecretVaultRef::Root, keypair3.secret().clone(), &password3) + .unwrap(); + store + .insert_account( + SecretVaultRef::Vault("vault3".to_owned()), + keypair1.secret().clone(), + &password3, + ) + .unwrap_err(); + let accounts = store.accounts().unwrap(); + + // then [can create accounts in vaults] + assert_eq!(accounts.len(), 3); + assert!(accounts.iter().any(|a| a.vault == SecretVaultRef::Root)); + assert!(accounts + .iter() + .any(|a| a.vault == SecretVaultRef::Vault(name1.to_owned()))); + assert!(accounts + .iter() + .any(|a| a.vault == SecretVaultRef::Vault(name2.to_owned()))); + + // and when + store.close_vault(name1).unwrap(); + store.close_vault(name2).unwrap(); + store.close_vault("vault3").unwrap(); + let accounts = store.accounts().unwrap(); + + // then [can close vaults + accounts from vaults disappear] + assert_eq!(accounts.len(), 1); + assert!(accounts.iter().any(|a| a.vault == SecretVaultRef::Root)); + + // and when + store.open_vault(name1, &password2).unwrap_err(); + store.open_vault(name2, &password1).unwrap_err(); + store.open_vault(name1, &password1).unwrap(); + store.open_vault(name2, &password2).unwrap(); + let accounts = store.accounts().unwrap(); + + // then [can check vaults on open + can reopen vaults + accounts from vaults appear] + assert_eq!(accounts.len(), 3); + assert!(accounts.iter().any(|a| a.vault == SecretVaultRef::Root)); + assert!(accounts + .iter() + .any(|a| a.vault == SecretVaultRef::Vault(name1.to_owned()))); + assert!(accounts + .iter() + .any(|a| a.vault == SecretVaultRef::Vault(name2.to_owned()))); + } + + #[test] + fn should_move_vault_acounts() { + // given + let mut dir = RootDiskDirectoryGuard::new(); + let store = EthStore::open(dir.key_dir.take().unwrap()).unwrap(); + let name1 = "vault1"; + let password1 = "password1".into(); + let name2 = "vault2"; + let password2 = "password2".into(); + let password3 = "password3".into(); + let keypair1 = keypair(); + let keypair2 = keypair(); + let keypair3 = keypair(); + + // when + store.create_vault(name1, &password1).unwrap(); + store.create_vault(name2, &password2).unwrap(); + let account1 = store + .insert_account( + SecretVaultRef::Vault(name1.to_owned()), + keypair1.secret().clone(), + &password1, + ) + .unwrap(); + let account2 = store + .insert_account( + SecretVaultRef::Vault(name1.to_owned()), + keypair2.secret().clone(), + &password1, + ) + .unwrap(); + let account3 = store + .insert_account(SecretVaultRef::Root, keypair3.secret().clone(), &password3) + .unwrap(); + + // then + let account1 = store + .change_account_vault(SecretVaultRef::Root, account1.clone()) + .unwrap(); + let account2 = store + .change_account_vault(SecretVaultRef::Vault(name2.to_owned()), account2.clone()) + .unwrap(); + let account3 = store + .change_account_vault(SecretVaultRef::Vault(name2.to_owned()), account3) + .unwrap(); + let accounts = store.accounts().unwrap(); + assert_eq!(accounts.len(), 3); + assert!(accounts + .iter() + .any(|a| a == &StoreAccountRef::root(account1.address.clone()))); + assert!(accounts + .iter() + .any(|a| a == &StoreAccountRef::vault(name2, account2.address.clone()))); + assert!(accounts + .iter() + .any(|a| a == &StoreAccountRef::vault(name2, account3.address.clone()))); + + // and then + assert_eq!( + store + .meta(&StoreAccountRef::root(account1.address)) + .unwrap(), + r#"{}"# + ); + assert_eq!( + store + .meta(&StoreAccountRef::vault("vault2", account2.address)) + .unwrap(), + r#"{"vault":"vault2"}"# + ); + assert_eq!( + store + .meta(&StoreAccountRef::vault("vault2", account3.address)) + .unwrap(), + r#"{"vault":"vault2"}"# + ); + } + + #[test] + fn should_not_remove_account_when_moving_to_self() { + // given + let mut dir = RootDiskDirectoryGuard::new(); + let store = EthStore::open(dir.key_dir.take().unwrap()).unwrap(); + let password1 = "password1".into(); + let keypair1 = keypair(); + + // when + let account1 = store + .insert_account(SecretVaultRef::Root, keypair1.secret().clone(), &password1) + .unwrap(); + store + .change_account_vault(SecretVaultRef::Root, account1) + .unwrap(); + + // then + let accounts = store.accounts().unwrap(); + assert_eq!(accounts.len(), 1); + } + + #[test] + fn should_remove_account_from_vault() { + // given + let mut dir = RootDiskDirectoryGuard::new(); + let store = EthStore::open(dir.key_dir.take().unwrap()).unwrap(); + let name1 = "vault1"; + let password1 = "password1".into(); + let keypair1 = keypair(); + + // when + store.create_vault(name1, &password1).unwrap(); + let account1 = store + .insert_account( + SecretVaultRef::Vault(name1.to_owned()), + keypair1.secret().clone(), + &password1, + ) + .unwrap(); + assert_eq!(store.accounts().unwrap().len(), 1); + + // then + store.remove_account(&account1, &password1).unwrap(); + assert_eq!(store.accounts().unwrap().len(), 0); + } + + #[test] + fn should_not_remove_account_from_vault_when_password_is_incorrect() { + // given + let mut dir = RootDiskDirectoryGuard::new(); + let store = EthStore::open(dir.key_dir.take().unwrap()).unwrap(); + let name1 = "vault1"; + let password1 = "password1".into(); + let password2 = "password2".into(); + let keypair1 = keypair(); + + // when + store.create_vault(name1, &password1).unwrap(); + let account1 = store + .insert_account( + SecretVaultRef::Vault(name1.to_owned()), + keypair1.secret().clone(), + &password1, + ) + .unwrap(); + assert_eq!(store.accounts().unwrap().len(), 1); + + // then + store.remove_account(&account1, &password2).unwrap_err(); + assert_eq!(store.accounts().unwrap().len(), 1); + } + + #[test] + fn should_change_vault_password() { + // given + let mut dir = RootDiskDirectoryGuard::new(); + let store = EthStore::open(dir.key_dir.take().unwrap()).unwrap(); + let name = "vault"; + let password = "password".into(); + let keypair = keypair(); + + // when + store.create_vault(name, &password).unwrap(); + store + .insert_account( + SecretVaultRef::Vault(name.to_owned()), + keypair.secret().clone(), + &password, + ) + .unwrap(); + + // then + assert_eq!(store.accounts().unwrap().len(), 1); + let new_password = "new_password".into(); + store.change_vault_password(name, &new_password).unwrap(); + assert_eq!(store.accounts().unwrap().len(), 1); + + // and when + store.close_vault(name).unwrap(); + + // then + store.open_vault(name, &new_password).unwrap(); + assert_eq!(store.accounts().unwrap().len(), 1); + } + + #[test] + fn should_have_different_passwords_for_vault_secret_and_meta() { + // given + let mut dir = RootDiskDirectoryGuard::new(); + let store = EthStore::open(dir.key_dir.take().unwrap()).unwrap(); + let name = "vault"; + let password = "password".into(); + let secret_password = "sec_password".into(); + let keypair = keypair(); + + // when + store.create_vault(name, &password).unwrap(); + let account_ref = store + .insert_account( + SecretVaultRef::Vault(name.to_owned()), + keypair.secret().clone(), + &secret_password, + ) + .unwrap(); + + // then + assert_eq!(store.accounts().unwrap().len(), 1); + let new_secret_password = "new_sec_password".into(); + store + .change_password(&account_ref, &secret_password, &new_secret_password) + .unwrap(); + assert_eq!(store.accounts().unwrap().len(), 1); + } + + #[test] + fn should_list_opened_vaults() { + // given + let mut dir = RootDiskDirectoryGuard::new(); + let store = EthStore::open(dir.key_dir.take().unwrap()).unwrap(); + let name1 = "vault1"; + let password1 = "password1".into(); + let name2 = "vault2"; + let password2 = "password2".into(); + let name3 = "vault3"; + let password3 = "password3".into(); + + // when + store.create_vault(name1, &password1).unwrap(); + store.create_vault(name2, &password2).unwrap(); + store.create_vault(name3, &password3).unwrap(); + store.close_vault(name2).unwrap(); + + // then + let opened_vaults = store.list_opened_vaults().unwrap(); + assert_eq!(opened_vaults.len(), 2); + assert!(opened_vaults.iter().any(|v| &*v == name1)); + assert!(opened_vaults.iter().any(|v| &*v == name3)); + } + + #[test] + fn should_manage_vaults_meta() { + // given + let mut dir = RootDiskDirectoryGuard::new(); + let store = EthStore::open(dir.key_dir.take().unwrap()).unwrap(); + let name1 = "vault1"; + let password1 = "password1".into(); + + // when + store.create_vault(name1, &password1).unwrap(); + + // then + assert_eq!(store.get_vault_meta(name1).unwrap(), "{}".to_owned()); + assert!(store.set_vault_meta(name1, "Hello, world!!!").is_ok()); + assert_eq!( + store.get_vault_meta(name1).unwrap(), + "Hello, world!!!".to_owned() + ); + + // and when + store.close_vault(name1).unwrap(); + store.open_vault(name1, &password1).unwrap(); + + // then + assert_eq!( + store.get_vault_meta(name1).unwrap(), + "Hello, world!!!".to_owned() + ); + + // and when + store.close_vault(name1).unwrap(); + + // then + assert_eq!( + store.get_vault_meta(name1).unwrap(), + "Hello, world!!!".to_owned() + ); + assert!(store.get_vault_meta("vault2").is_err()); + } + + #[test] + fn should_store_derived_keys() { + // given we have one account in the store + let store = store(); + let keypair = keypair(); + let address = store + .insert_account( + SecretVaultRef::Root, + keypair.secret().clone(), + &"test".into(), + ) + .unwrap(); + + // when we deriving from that account + let derived = store + .insert_derived( + SecretVaultRef::Root, + &address, + &"test".into(), + Derivation::HardHash(H256::from(0)), + ) + .unwrap(); + + // there should be 2 accounts in the store + let accounts = store.accounts().unwrap(); + assert_eq!(accounts.len(), 2); + + // and we can sign with the derived contract + assert!( + store + .sign(&derived, &"test".into(), &Default::default()) + .is_ok(), + "Second password should work for second store." + ); + } + + #[test] + fn should_save_meta_when_setting_before_password() { + // given + let mut dir = RootDiskDirectoryGuard::new(); + let store = EthStore::open(dir.key_dir.take().unwrap()).unwrap(); + let name = "vault"; + let password = "password1".into(); + let new_password = "password2".into(); + + // when + store.create_vault(name, &password).unwrap(); + store.set_vault_meta(name, "OldMeta").unwrap(); + store.change_vault_password(name, &new_password).unwrap(); + + // then + assert_eq!(store.get_vault_meta(name).unwrap(), "OldMeta".to_owned()); + } + + #[test] + fn should_export_account() { + // given + let store = store(); + let keypair = keypair(); + let address = store + .insert_account( + SecretVaultRef::Root, + keypair.secret().clone(), + &"test".into(), + ) + .unwrap(); + + // when + let exported = store.export_account(&address, &"test".into()); + + // then + assert!( + exported.is_ok(), + "Should export single account: {:?}", + exported + ); + } } diff --git a/accounts/ethstore/src/import.rs b/accounts/ethstore/src/import.rs index 87e9783ea..d8c760069 100644 --- a/accounts/ethstore/src/import.rs +++ b/accounts/ethstore/src/import.rs @@ -14,67 +14,86 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::HashSet; -use std::path::Path; -use std::fs; +use std::{collections::HashSet, fs, path::Path}; -use ethkey::Address; -use accounts_dir::{KeyDirectory, RootDiskDirectory, DiskKeyFileManager, KeyFileManager}; +use accounts_dir::{DiskKeyFileManager, KeyDirectory, KeyFileManager, RootDiskDirectory}; use dir; +use ethkey::Address; use Error; /// Import an account from a file. pub fn import_account(path: &Path, dst: &KeyDirectory) -> Result { - let key_manager = DiskKeyFileManager::default(); - let existing_accounts = dst.load()?.into_iter().map(|a| a.address).collect::>(); - let filename = path.file_name().and_then(|n| n.to_str()).map(|f| f.to_owned()); - let account = fs::File::open(&path) - .map_err(Into::into) - .and_then(|file| key_manager.read(filename, file))?; + let key_manager = DiskKeyFileManager::default(); + let existing_accounts = dst + .load()? + .into_iter() + .map(|a| a.address) + .collect::>(); + let filename = path + .file_name() + .and_then(|n| n.to_str()) + .map(|f| f.to_owned()); + let account = fs::File::open(&path) + .map_err(Into::into) + .and_then(|file| key_manager.read(filename, file))?; - let address = account.address.clone(); - if !existing_accounts.contains(&address) { - dst.insert(account)?; - } - Ok(address) + let address = account.address.clone(); + if !existing_accounts.contains(&address) { + dst.insert(account)?; + } + Ok(address) } /// Import all accounts from one directory to the other. pub fn import_accounts(src: &KeyDirectory, dst: &KeyDirectory) -> Result, Error> { - let accounts = src.load()?; - let existing_accounts = dst.load()?.into_iter() - .map(|a| a.address) - .collect::>(); + let accounts = src.load()?; + let existing_accounts = dst + .load()? + .into_iter() + .map(|a| a.address) + .collect::>(); - accounts.into_iter() - .filter(|a| !existing_accounts.contains(&a.address)) - .map(|a| { - let address = a.address.clone(); - dst.insert(a)?; - Ok(address) - }).collect() + accounts + .into_iter() + .filter(|a| !existing_accounts.contains(&a.address)) + .map(|a| { + let address = a.address.clone(); + dst.insert(a)?; + Ok(address) + }) + .collect() } /// Provide a `HashSet` of all accounts available for import from the Geth keystore. pub fn read_geth_accounts(testnet: bool) -> Vec
{ - RootDiskDirectory::at(dir::geth(testnet)) - .load() - .map(|d| d.into_iter().map(|a| a.address).collect()) - .unwrap_or_else(|_| Vec::new()) + RootDiskDirectory::at(dir::geth(testnet)) + .load() + .map(|d| d.into_iter().map(|a| a.address).collect()) + .unwrap_or_else(|_| Vec::new()) } /// Import specific `desired` accounts from the Geth keystore into `dst`. -pub fn import_geth_accounts(dst: &KeyDirectory, desired: HashSet
, testnet: bool) -> Result, Error> { - let src = RootDiskDirectory::at(dir::geth(testnet)); - let accounts = src.load()?; - let existing_accounts = dst.load()?.into_iter().map(|a| a.address).collect::>(); +pub fn import_geth_accounts( + dst: &KeyDirectory, + desired: HashSet
, + testnet: bool, +) -> Result, Error> { + let src = RootDiskDirectory::at(dir::geth(testnet)); + let accounts = src.load()?; + let existing_accounts = dst + .load()? + .into_iter() + .map(|a| a.address) + .collect::>(); - accounts.into_iter() - .filter(|a| !existing_accounts.contains(&a.address)) - .filter(|a| desired.contains(&a.address)) - .map(|a| { - let address = a.address.clone(); - dst.insert(a)?; - Ok(address) - }).collect() + accounts + .into_iter() + .filter(|a| !existing_accounts.contains(&a.address)) + .filter(|a| desired.contains(&a.address)) + .map(|a| { + let address = a.address.clone(); + dst.insert(a)?; + Ok(address) + }) + .collect() } diff --git a/accounts/ethstore/src/json/bytes.rs b/accounts/ethstore/src/json/bytes.rs index 71391d8d1..c4fa05092 100644 --- a/accounts/ethstore/src/json/bytes.rs +++ b/accounts/ethstore/src/json/bytes.rs @@ -14,61 +14,69 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use rustc_hex::{FromHex, FromHexError, ToHex}; +use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; use std::{ops, str}; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use serde::de::Error; -use rustc_hex::{ToHex, FromHex, FromHexError}; #[derive(Debug, PartialEq)] pub struct Bytes(Vec); impl ops::Deref for Bytes { - type Target = [u8]; + type Target = [u8]; - fn deref(&self) -> &Self::Target { - &self.0 - } + fn deref(&self) -> &Self::Target { + &self.0 + } } impl<'a> Deserialize<'a> for Bytes { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> - { - let s = String::deserialize(deserializer)?; - let data = s.from_hex().map_err(|e| Error::custom(format!("Invalid hex value {}", e)))?; - Ok(Bytes(data)) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + let s = String::deserialize(deserializer)?; + let data = s + .from_hex() + .map_err(|e| Error::custom(format!("Invalid hex value {}", e)))?; + Ok(Bytes(data)) + } } impl Serialize for Bytes { - fn serialize(&self, serializer: S) -> Result - where S: Serializer { - serializer.serialize_str(&self.0.to_hex()) - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&self.0.to_hex()) + } } impl str::FromStr for Bytes { - type Err = FromHexError; + type Err = FromHexError; - fn from_str(s: &str) -> Result { - s.from_hex().map(Bytes) - } + fn from_str(s: &str) -> Result { + s.from_hex().map(Bytes) + } } impl From<&'static str> for Bytes { - fn from(s: &'static str) -> Self { - s.parse().expect(&format!("invalid string literal for {}: '{}'", stringify!(Self), s)) - } + fn from(s: &'static str) -> Self { + s.parse().expect(&format!( + "invalid string literal for {}: '{}'", + stringify!(Self), + s + )) + } } impl From> for Bytes { - fn from(v: Vec) -> Self { - Bytes(v) - } + fn from(v: Vec) -> Self { + Bytes(v) + } } impl From for Vec { - fn from(b: Bytes) -> Self { - b.0 - } + fn from(b: Bytes) -> Self { + b.0 + } } diff --git a/accounts/ethstore/src/json/cipher.rs b/accounts/ethstore/src/json/cipher.rs index 38d897b64..b6fb7380b 100644 --- a/accounts/ethstore/src/json/cipher.rs +++ b/accounts/ethstore/src/json/cipher.rs @@ -14,83 +14,99 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::fmt; -use serde::{Serialize, Serializer, Deserialize, Deserializer}; -use serde::de::{Visitor, Error as SerdeError}; use super::{Error, H128}; +use serde::{ + de::{Error as SerdeError, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, +}; +use std::fmt; #[derive(Debug, PartialEq)] pub enum CipherSer { - Aes128Ctr, + Aes128Ctr, } impl Serialize for CipherSer { - fn serialize(&self, serializer: S) -> Result - where S: Serializer { - match *self { - CipherSer::Aes128Ctr => serializer.serialize_str("aes-128-ctr"), - } - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match *self { + CipherSer::Aes128Ctr => serializer.serialize_str("aes-128-ctr"), + } + } } impl<'a> Deserialize<'a> for CipherSer { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> { - deserializer.deserialize_any(CipherSerVisitor) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + deserializer.deserialize_any(CipherSerVisitor) + } } struct CipherSerVisitor; impl<'a> Visitor<'a> for CipherSerVisitor { - type Value = CipherSer; + type Value = CipherSer; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a valid cipher identifier") - } + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a valid cipher identifier") + } - fn visit_str(self, value: &str) -> Result where E: SerdeError { - match value { - "aes-128-ctr" => Ok(CipherSer::Aes128Ctr), - _ => Err(SerdeError::custom(Error::UnsupportedCipher)) - } - } + fn visit_str(self, value: &str) -> Result + where + E: SerdeError, + { + match value { + "aes-128-ctr" => Ok(CipherSer::Aes128Ctr), + _ => Err(SerdeError::custom(Error::UnsupportedCipher)), + } + } - fn visit_string(self, value: String) -> Result where E: SerdeError { - self.visit_str(value.as_ref()) - } + fn visit_string(self, value: String) -> Result + where + E: SerdeError, + { + self.visit_str(value.as_ref()) + } } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct Aes128Ctr { - pub iv: H128, + pub iv: H128, } #[derive(Debug, PartialEq)] pub enum CipherSerParams { - Aes128Ctr(Aes128Ctr), + Aes128Ctr(Aes128Ctr), } impl Serialize for CipherSerParams { - fn serialize(&self, serializer: S) -> Result - where S: Serializer { - match *self { - CipherSerParams::Aes128Ctr(ref params) => params.serialize(serializer), - } - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match *self { + CipherSerParams::Aes128Ctr(ref params) => params.serialize(serializer), + } + } } impl<'a> Deserialize<'a> for CipherSerParams { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> { - Aes128Ctr::deserialize(deserializer) - .map(CipherSerParams::Aes128Ctr) - .map_err(|_| Error::InvalidCipherParams) - .map_err(SerdeError::custom) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + Aes128Ctr::deserialize(deserializer) + .map(CipherSerParams::Aes128Ctr) + .map_err(|_| Error::InvalidCipherParams) + .map_err(SerdeError::custom) + } } #[derive(Debug, PartialEq)] pub enum Cipher { - Aes128Ctr(Aes128Ctr), + Aes128Ctr(Aes128Ctr), } diff --git a/accounts/ethstore/src/json/crypto.rs b/accounts/ethstore/src/json/crypto.rs index 34664f98b..3690f6a7a 100644 --- a/accounts/ethstore/src/json/crypto.rs +++ b/accounts/ethstore/src/json/crypto.rs @@ -14,181 +14,205 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::{fmt, str}; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use serde::ser::SerializeStruct; -use serde::de::{Visitor, MapAccess, Error}; +use super::{Bytes, Cipher, CipherSer, CipherSerParams, Kdf, KdfSer, KdfSerParams, H256}; +use serde::{ + de::{Error, MapAccess, Visitor}, + ser::SerializeStruct, + Deserialize, Deserializer, Serialize, Serializer, +}; use serde_json; -use super::{Cipher, CipherSer, CipherSerParams, Kdf, KdfSer, KdfSerParams, H256, Bytes}; +use std::{fmt, str}; pub type CipherText = Bytes; #[derive(Debug, PartialEq)] pub struct Crypto { - pub cipher: Cipher, - pub ciphertext: CipherText, - pub kdf: Kdf, - pub mac: H256, + pub cipher: Cipher, + pub ciphertext: CipherText, + pub kdf: Kdf, + pub mac: H256, } impl str::FromStr for Crypto { - type Err = serde_json::error::Error; + type Err = serde_json::error::Error; - fn from_str(s: &str) -> Result { - serde_json::from_str(s) - } + fn from_str(s: &str) -> Result { + serde_json::from_str(s) + } } impl From for String { - fn from(c: Crypto) -> Self { - serde_json::to_string(&c).expect("serialization cannot fail, cause all crypto keys are strings") - } + fn from(c: Crypto) -> Self { + serde_json::to_string(&c) + .expect("serialization cannot fail, cause all crypto keys are strings") + } } enum CryptoField { - Cipher, - CipherParams, - CipherText, - Kdf, - KdfParams, - Mac, - Version, + Cipher, + CipherParams, + CipherText, + Kdf, + KdfParams, + Mac, + Version, } impl<'a> Deserialize<'a> for CryptoField { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> - { - deserializer.deserialize_any(CryptoFieldVisitor) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + deserializer.deserialize_any(CryptoFieldVisitor) + } } struct CryptoFieldVisitor; impl<'a> Visitor<'a> for CryptoFieldVisitor { - type Value = CryptoField; + type Value = CryptoField; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a valid crypto struct description") - } + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a valid crypto struct description") + } - fn visit_str(self, value: &str) -> Result - where E: Error - { - match value { - "cipher" => Ok(CryptoField::Cipher), - "cipherparams" => Ok(CryptoField::CipherParams), - "ciphertext" => Ok(CryptoField::CipherText), - "kdf" => Ok(CryptoField::Kdf), - "kdfparams" => Ok(CryptoField::KdfParams), - "mac" => Ok(CryptoField::Mac), - "version" => Ok(CryptoField::Version), - _ => Err(Error::custom(format!("Unknown field: '{}'", value))), - } - } + fn visit_str(self, value: &str) -> Result + where + E: Error, + { + match value { + "cipher" => Ok(CryptoField::Cipher), + "cipherparams" => Ok(CryptoField::CipherParams), + "ciphertext" => Ok(CryptoField::CipherText), + "kdf" => Ok(CryptoField::Kdf), + "kdfparams" => Ok(CryptoField::KdfParams), + "mac" => Ok(CryptoField::Mac), + "version" => Ok(CryptoField::Version), + _ => Err(Error::custom(format!("Unknown field: '{}'", value))), + } + } } impl<'a> Deserialize<'a> for Crypto { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> - { - static FIELDS: &'static [&'static str] = &["id", "version", "crypto", "Crypto", "address"]; - deserializer.deserialize_struct("Crypto", FIELDS, CryptoVisitor) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + static FIELDS: &'static [&'static str] = &["id", "version", "crypto", "Crypto", "address"]; + deserializer.deserialize_struct("Crypto", FIELDS, CryptoVisitor) + } } struct CryptoVisitor; impl<'a> Visitor<'a> for CryptoVisitor { - type Value = Crypto; + type Value = Crypto; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a valid vault crypto object") - } + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a valid vault crypto object") + } - fn visit_map(self, mut visitor: V) -> Result - where V: MapAccess<'a> - { - let mut cipher = None; - let mut cipherparams = None; - let mut ciphertext = None; - let mut kdf = None; - let mut kdfparams = None; - let mut mac = None; + fn visit_map(self, mut visitor: V) -> Result + where + V: MapAccess<'a>, + { + let mut cipher = None; + let mut cipherparams = None; + let mut ciphertext = None; + let mut kdf = None; + let mut kdfparams = None; + let mut mac = None; - loop { - match visitor.next_key()? { - Some(CryptoField::Cipher) => { cipher = Some(visitor.next_value()?); } - Some(CryptoField::CipherParams) => { cipherparams = Some(visitor.next_value()?); } - Some(CryptoField::CipherText) => { ciphertext = Some(visitor.next_value()?); } - Some(CryptoField::Kdf) => { kdf = Some(visitor.next_value()?); } - Some(CryptoField::KdfParams) => { kdfparams = Some(visitor.next_value()?); } - Some(CryptoField::Mac) => { mac = Some(visitor.next_value()?); } - // skip not required version field (it appears in pyethereum generated keystores) - Some(CryptoField::Version) => { visitor.next_value().unwrap_or(()) } - None => { break; } - } - } + loop { + match visitor.next_key()? { + Some(CryptoField::Cipher) => { + cipher = Some(visitor.next_value()?); + } + Some(CryptoField::CipherParams) => { + cipherparams = Some(visitor.next_value()?); + } + Some(CryptoField::CipherText) => { + ciphertext = Some(visitor.next_value()?); + } + Some(CryptoField::Kdf) => { + kdf = Some(visitor.next_value()?); + } + Some(CryptoField::KdfParams) => { + kdfparams = Some(visitor.next_value()?); + } + Some(CryptoField::Mac) => { + mac = Some(visitor.next_value()?); + } + // skip not required version field (it appears in pyethereum generated keystores) + Some(CryptoField::Version) => visitor.next_value().unwrap_or(()), + None => { + break; + } + } + } - let cipher = match (cipher, cipherparams) { - (Some(CipherSer::Aes128Ctr), Some(CipherSerParams::Aes128Ctr(params))) => Cipher::Aes128Ctr(params), - (None, _) => return Err(V::Error::missing_field("cipher")), - (Some(_), None) => return Err(V::Error::missing_field("cipherparams")), - }; + let cipher = match (cipher, cipherparams) { + (Some(CipherSer::Aes128Ctr), Some(CipherSerParams::Aes128Ctr(params))) => { + Cipher::Aes128Ctr(params) + } + (None, _) => return Err(V::Error::missing_field("cipher")), + (Some(_), None) => return Err(V::Error::missing_field("cipherparams")), + }; - let ciphertext = match ciphertext { - Some(ciphertext) => ciphertext, - None => return Err(V::Error::missing_field("ciphertext")), - }; + let ciphertext = match ciphertext { + Some(ciphertext) => ciphertext, + None => return Err(V::Error::missing_field("ciphertext")), + }; - let kdf = match (kdf, kdfparams) { - (Some(KdfSer::Pbkdf2), Some(KdfSerParams::Pbkdf2(params))) => Kdf::Pbkdf2(params), - (Some(KdfSer::Scrypt), Some(KdfSerParams::Scrypt(params))) => Kdf::Scrypt(params), - (Some(_), Some(_)) => return Err(V::Error::custom("Invalid cipherparams")), - (None, _) => return Err(V::Error::missing_field("kdf")), - (Some(_), None) => return Err(V::Error::missing_field("kdfparams")), - }; + let kdf = match (kdf, kdfparams) { + (Some(KdfSer::Pbkdf2), Some(KdfSerParams::Pbkdf2(params))) => Kdf::Pbkdf2(params), + (Some(KdfSer::Scrypt), Some(KdfSerParams::Scrypt(params))) => Kdf::Scrypt(params), + (Some(_), Some(_)) => return Err(V::Error::custom("Invalid cipherparams")), + (None, _) => return Err(V::Error::missing_field("kdf")), + (Some(_), None) => return Err(V::Error::missing_field("kdfparams")), + }; - let mac = match mac { - Some(mac) => mac, - None => return Err(V::Error::missing_field("mac")), - }; + let mac = match mac { + Some(mac) => mac, + None => return Err(V::Error::missing_field("mac")), + }; - let result = Crypto { - cipher: cipher, - ciphertext: ciphertext, - kdf: kdf, - mac: mac, - }; + let result = Crypto { + cipher: cipher, + ciphertext: ciphertext, + kdf: kdf, + mac: mac, + }; - Ok(result) - } + Ok(result) + } } impl Serialize for Crypto { - fn serialize(&self, serializer: S) -> Result - where S: Serializer - { - let mut crypto = serializer.serialize_struct("Crypto", 6)?; - match self.cipher { - Cipher::Aes128Ctr(ref params) => { - crypto.serialize_field("cipher", &CipherSer::Aes128Ctr)?; - crypto.serialize_field("cipherparams", params)?; - }, - } - crypto.serialize_field("ciphertext", &self.ciphertext)?; - match self.kdf { - Kdf::Pbkdf2(ref params) => { - crypto.serialize_field("kdf", &KdfSer::Pbkdf2)?; - crypto.serialize_field("kdfparams", params)?; - }, - Kdf::Scrypt(ref params) => { - crypto.serialize_field("kdf", &KdfSer::Scrypt)?; - crypto.serialize_field("kdfparams", params)?; - }, - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut crypto = serializer.serialize_struct("Crypto", 6)?; + match self.cipher { + Cipher::Aes128Ctr(ref params) => { + crypto.serialize_field("cipher", &CipherSer::Aes128Ctr)?; + crypto.serialize_field("cipherparams", params)?; + } + } + crypto.serialize_field("ciphertext", &self.ciphertext)?; + match self.kdf { + Kdf::Pbkdf2(ref params) => { + crypto.serialize_field("kdf", &KdfSer::Pbkdf2)?; + crypto.serialize_field("kdfparams", params)?; + } + Kdf::Scrypt(ref params) => { + crypto.serialize_field("kdf", &KdfSer::Scrypt)?; + crypto.serialize_field("kdfparams", params)?; + } + } - crypto.serialize_field("mac", &self.mac)?; - crypto.end() - } + crypto.serialize_field("mac", &self.mac)?; + crypto.end() + } } diff --git a/accounts/ethstore/src/json/error.rs b/accounts/ethstore/src/json/error.rs index e02ecb963..465856c1d 100644 --- a/accounts/ethstore/src/json/error.rs +++ b/accounts/ethstore/src/json/error.rs @@ -18,33 +18,33 @@ use std::fmt; #[derive(Debug, PartialEq)] pub enum Error { - UnsupportedCipher, - InvalidCipherParams, - UnsupportedKdf, - InvalidUuid, - UnsupportedVersion, - InvalidCiphertext, - InvalidH256, - InvalidPrf, + UnsupportedCipher, + InvalidCipherParams, + UnsupportedKdf, + InvalidUuid, + UnsupportedVersion, + InvalidCiphertext, + InvalidH256, + InvalidPrf, } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - match *self { - Error::InvalidUuid => write!(f, "Invalid Uuid"), - Error::UnsupportedVersion => write!(f, "Unsupported version"), - Error::UnsupportedKdf => write!(f, "Unsupported kdf"), - Error::InvalidCiphertext => write!(f, "Invalid ciphertext"), - Error::UnsupportedCipher => write!(f, "Unsupported cipher"), - Error::InvalidCipherParams => write!(f, "Invalid cipher params"), - Error::InvalidH256 => write!(f, "Invalid hash"), - Error::InvalidPrf => write!(f, "Invalid prf"), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + match *self { + Error::InvalidUuid => write!(f, "Invalid Uuid"), + Error::UnsupportedVersion => write!(f, "Unsupported version"), + Error::UnsupportedKdf => write!(f, "Unsupported kdf"), + Error::InvalidCiphertext => write!(f, "Invalid ciphertext"), + Error::UnsupportedCipher => write!(f, "Unsupported cipher"), + Error::InvalidCipherParams => write!(f, "Invalid cipher params"), + Error::InvalidH256 => write!(f, "Invalid hash"), + Error::InvalidPrf => write!(f, "Invalid prf"), + } + } } impl Into for Error { - fn into(self) -> String { - format!("{}", self) - } + fn into(self) -> String { + format!("{}", self) + } } diff --git a/accounts/ethstore/src/json/hash.rs b/accounts/ethstore/src/json/hash.rs index 6678abb73..93c4034b3 100644 --- a/accounts/ethstore/src/json/hash.rs +++ b/accounts/ethstore/src/json/hash.rs @@ -14,104 +14,120 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::{ops, fmt, str}; -use rustc_hex::{FromHex, ToHex}; -use serde::{Serialize, Serializer, Deserialize, Deserializer}; -use serde::de::{Visitor, Error as SerdeError}; use super::Error; +use rustc_hex::{FromHex, ToHex}; +use serde::{ + de::{Error as SerdeError, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, +}; +use std::{fmt, ops, str}; macro_rules! impl_hash { - ($name: ident, $size: expr) => { - pub struct $name([u8; $size]); + ($name: ident, $size: expr) => { + pub struct $name([u8; $size]); - impl fmt::Debug for $name { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - let self_ref: &[u8] = &self.0; - write!(f, "{:?}", self_ref) - } - } + impl fmt::Debug for $name { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + let self_ref: &[u8] = &self.0; + write!(f, "{:?}", self_ref) + } + } - impl PartialEq for $name { - fn eq(&self, other: &Self) -> bool { - let self_ref: &[u8] = &self.0; - let other_ref: &[u8] = &other.0; - self_ref == other_ref - } - } + impl PartialEq for $name { + fn eq(&self, other: &Self) -> bool { + let self_ref: &[u8] = &self.0; + let other_ref: &[u8] = &other.0; + self_ref == other_ref + } + } - impl ops::Deref for $name { - type Target = [u8]; + impl ops::Deref for $name { + type Target = [u8]; - fn deref(&self) -> &Self::Target { - &self.0 - } - } + fn deref(&self) -> &Self::Target { + &self.0 + } + } - impl Serialize for $name { - fn serialize(&self, serializer: S) -> Result - where S: Serializer { - serializer.serialize_str(&self.0.to_hex()) - } - } + impl Serialize for $name { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&self.0.to_hex()) + } + } - impl<'a> Deserialize<'a> for $name { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> { - struct HashVisitor; + impl<'a> Deserialize<'a> for $name { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + struct HashVisitor; - impl<'b> Visitor<'b> for HashVisitor { - type Value = $name; + impl<'b> Visitor<'b> for HashVisitor { + type Value = $name; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a hex-encoded {}", stringify!($name)) - } + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a hex-encoded {}", stringify!($name)) + } - fn visit_str(self, value: &str) -> Result where E: SerdeError { - value.parse().map_err(SerdeError::custom) - } + fn visit_str(self, value: &str) -> Result + where + E: SerdeError, + { + value.parse().map_err(SerdeError::custom) + } - fn visit_string(self, value: String) -> Result where E: SerdeError { - self.visit_str(value.as_ref()) - } - } + fn visit_string(self, value: String) -> Result + where + E: SerdeError, + { + self.visit_str(value.as_ref()) + } + } - deserializer.deserialize_any(HashVisitor) - } - } + deserializer.deserialize_any(HashVisitor) + } + } - impl str::FromStr for $name { - type Err = Error; + impl str::FromStr for $name { + type Err = Error; - fn from_str(value: &str) -> Result { - match value.from_hex() { - Ok(ref hex) if hex.len() == $size => { - let mut hash = [0u8; $size]; - hash.clone_from_slice(hex); - Ok($name(hash)) - } - _ => Err(Error::InvalidH256), - } - } - } + fn from_str(value: &str) -> Result { + match value.from_hex() { + Ok(ref hex) if hex.len() == $size => { + let mut hash = [0u8; $size]; + hash.clone_from_slice(hex); + Ok($name(hash)) + } + _ => Err(Error::InvalidH256), + } + } + } - impl From<&'static str> for $name { - fn from(s: &'static str) -> Self { - s.parse().expect(&format!("invalid string literal for {}: '{}'", stringify!($name), s)) - } - } + impl From<&'static str> for $name { + fn from(s: &'static str) -> Self { + s.parse().expect(&format!( + "invalid string literal for {}: '{}'", + stringify!($name), + s + )) + } + } - impl From<[u8; $size]> for $name { - fn from(bytes: [u8; $size]) -> Self { - $name(bytes) - } - } + impl From<[u8; $size]> for $name { + fn from(bytes: [u8; $size]) -> Self { + $name(bytes) + } + } - impl Into<[u8; $size]> for $name { - fn into(self) -> [u8; $size] { - self.0 - } - } - } + impl Into<[u8; $size]> for $name { + fn into(self) -> [u8; $size] { + self.0 + } + } + }; } impl_hash!(H128, 16); diff --git a/accounts/ethstore/src/json/id.rs b/accounts/ethstore/src/json/id.rs index 27550428f..d5d542b4f 100644 --- a/accounts/ethstore/src/json/id.rs +++ b/accounts/ethstore/src/json/id.rs @@ -15,139 +15,165 @@ // along with Parity Ethereum. If not, see . //! Universaly unique identifier. -use std::{fmt, str}; -use rustc_hex::{ToHex, FromHex}; -use serde::{Deserialize, Serialize, Deserializer, Serializer}; -use serde::de::{Visitor, Error as SerdeError}; use super::Error; +use rustc_hex::{FromHex, ToHex}; +use serde::{ + de::{Error as SerdeError, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, +}; +use std::{fmt, str}; /// Universaly unique identifier. #[derive(Debug, PartialEq)] pub struct Uuid([u8; 16]); impl From<[u8; 16]> for Uuid { - fn from(uuid: [u8; 16]) -> Self { - Uuid(uuid) - } + fn from(uuid: [u8; 16]) -> Self { + Uuid(uuid) + } } impl<'a> Into for &'a Uuid { - fn into(self) -> String { - let d1 = &self.0[0..4]; - let d2 = &self.0[4..6]; - let d3 = &self.0[6..8]; - let d4 = &self.0[8..10]; - let d5 = &self.0[10..16]; - [d1, d2, d3, d4, d5].into_iter().map(|d| d.to_hex()).collect::>().join("-") - } + fn into(self) -> String { + let d1 = &self.0[0..4]; + let d2 = &self.0[4..6]; + let d3 = &self.0[6..8]; + let d4 = &self.0[8..10]; + let d5 = &self.0[10..16]; + [d1, d2, d3, d4, d5] + .into_iter() + .map(|d| d.to_hex()) + .collect::>() + .join("-") + } } impl Into for Uuid { - fn into(self) -> String { - Into::into(&self) - } + fn into(self) -> String { + Into::into(&self) + } } impl Into<[u8; 16]> for Uuid { - fn into(self) -> [u8; 16] { - self.0 - } + fn into(self) -> [u8; 16] { + self.0 + } } impl fmt::Display for Uuid { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - let s: String = (self as &Uuid).into(); - write!(f, "{}", s) - } + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + let s: String = (self as &Uuid).into(); + write!(f, "{}", s) + } } fn copy_into(from: &str, into: &mut [u8]) -> Result<(), Error> { - let from = from.from_hex().map_err(|_| Error::InvalidUuid)?; + let from = from.from_hex().map_err(|_| Error::InvalidUuid)?; - if from.len() != into.len() { - return Err(Error::InvalidUuid); - } + if from.len() != into.len() { + return Err(Error::InvalidUuid); + } - into.copy_from_slice(&from); - Ok(()) + into.copy_from_slice(&from); + Ok(()) } impl str::FromStr for Uuid { - type Err = Error; + type Err = Error; - fn from_str(s: &str) -> Result { - let parts: Vec<&str> = s.split("-").collect(); + fn from_str(s: &str) -> Result { + let parts: Vec<&str> = s.split("-").collect(); - if parts.len() != 5 { - return Err(Error::InvalidUuid); - } + if parts.len() != 5 { + return Err(Error::InvalidUuid); + } - let mut uuid = [0u8; 16]; + let mut uuid = [0u8; 16]; - copy_into(parts[0], &mut uuid[0..4])?; - copy_into(parts[1], &mut uuid[4..6])?; - copy_into(parts[2], &mut uuid[6..8])?; - copy_into(parts[3], &mut uuid[8..10])?; - copy_into(parts[4], &mut uuid[10..16])?; + copy_into(parts[0], &mut uuid[0..4])?; + copy_into(parts[1], &mut uuid[4..6])?; + copy_into(parts[2], &mut uuid[6..8])?; + copy_into(parts[3], &mut uuid[8..10])?; + copy_into(parts[4], &mut uuid[10..16])?; - Ok(Uuid(uuid)) - } + Ok(Uuid(uuid)) + } } impl From<&'static str> for Uuid { - fn from(s: &'static str) -> Self { - s.parse().expect(&format!("invalid string literal for {}: '{}'", stringify!(Self), s)) - } + fn from(s: &'static str) -> Self { + s.parse().expect(&format!( + "invalid string literal for {}: '{}'", + stringify!(Self), + s + )) + } } impl Serialize for Uuid { - fn serialize(&self, serializer: S) -> Result - where S: Serializer { - let s: String = self.into(); - serializer.serialize_str(&s) - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let s: String = self.into(); + serializer.serialize_str(&s) + } } impl<'a> Deserialize<'a> for Uuid { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> { - deserializer.deserialize_any(UuidVisitor) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + deserializer.deserialize_any(UuidVisitor) + } } struct UuidVisitor; impl<'a> Visitor<'a> for UuidVisitor { - type Value = Uuid; + type Value = Uuid; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a valid hex-encoded UUID") - } + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a valid hex-encoded UUID") + } - fn visit_str(self, value: &str) -> Result where E: SerdeError { - value.parse().map_err(SerdeError::custom) - } + fn visit_str(self, value: &str) -> Result + where + E: SerdeError, + { + value.parse().map_err(SerdeError::custom) + } - fn visit_string(self, value: String) -> Result where E: SerdeError { - self.visit_str(value.as_ref()) - } + fn visit_string(self, value: String) -> Result + where + E: SerdeError, + { + self.visit_str(value.as_ref()) + } } #[cfg(test)] mod tests { - use super::Uuid; + use super::Uuid; - #[test] - fn uuid_from_str() { - let uuid: Uuid = "3198bc9c-6672-5ab3-d995-4942343ae5b6".into(); - assert_eq!(uuid, Uuid::from([0x31, 0x98, 0xbc, 0x9c, 0x66, 0x72, 0x5a, 0xb3, 0xd9, 0x95, 0x49, 0x42, 0x34, 0x3a, 0xe5, 0xb6])); - } + #[test] + fn uuid_from_str() { + let uuid: Uuid = "3198bc9c-6672-5ab3-d995-4942343ae5b6".into(); + assert_eq!( + uuid, + Uuid::from([ + 0x31, 0x98, 0xbc, 0x9c, 0x66, 0x72, 0x5a, 0xb3, 0xd9, 0x95, 0x49, 0x42, 0x34, 0x3a, + 0xe5, 0xb6 + ]) + ); + } - #[test] - fn uuid_from_and_to_str() { - let from = "3198bc9c-6672-5ab3-d995-4942343ae5b6"; - let uuid: Uuid = from.into(); - let to: String = uuid.into(); - assert_eq!(from, &to); - } + #[test] + fn uuid_from_and_to_str() { + let from = "3198bc9c-6672-5ab3-d995-4942343ae5b6"; + let uuid: Uuid = from.into(); + let to: String = uuid.into(); + assert_eq!(from, &to); + } } diff --git a/accounts/ethstore/src/json/kdf.rs b/accounts/ethstore/src/json/kdf.rs index a8bb8b261..2157e07c5 100644 --- a/accounts/ethstore/src/json/kdf.rs +++ b/accounts/ethstore/src/json/kdf.rs @@ -14,147 +14,173 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::fmt; -use std::num::NonZeroU32; -use serde::{Serialize, Serializer, Deserialize, Deserializer}; -use serde::de::{Visitor, Error as SerdeError}; -use super::{Error, Bytes}; +use super::{Bytes, Error}; +use serde::{ + de::{Error as SerdeError, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, +}; +use std::{fmt, num::NonZeroU32}; #[derive(Debug, PartialEq)] pub enum KdfSer { - Pbkdf2, - Scrypt, + Pbkdf2, + Scrypt, } impl Serialize for KdfSer { - fn serialize(&self, serializer: S) -> Result - where S: Serializer { - match *self { - KdfSer::Pbkdf2 => serializer.serialize_str("pbkdf2"), - KdfSer::Scrypt => serializer.serialize_str("scrypt"), - } - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match *self { + KdfSer::Pbkdf2 => serializer.serialize_str("pbkdf2"), + KdfSer::Scrypt => serializer.serialize_str("scrypt"), + } + } } impl<'a> Deserialize<'a> for KdfSer { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> { - deserializer.deserialize_any(KdfSerVisitor) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + deserializer.deserialize_any(KdfSerVisitor) + } } struct KdfSerVisitor; impl<'a> Visitor<'a> for KdfSerVisitor { - type Value = KdfSer; + type Value = KdfSer; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a kdf algorithm identifier") - } + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a kdf algorithm identifier") + } - fn visit_str(self, value: &str) -> Result where E: SerdeError { - match value { - "pbkdf2" => Ok(KdfSer::Pbkdf2), - "scrypt" => Ok(KdfSer::Scrypt), - _ => Err(SerdeError::custom(Error::UnsupportedKdf)) - } - } + fn visit_str(self, value: &str) -> Result + where + E: SerdeError, + { + match value { + "pbkdf2" => Ok(KdfSer::Pbkdf2), + "scrypt" => Ok(KdfSer::Scrypt), + _ => Err(SerdeError::custom(Error::UnsupportedKdf)), + } + } - fn visit_string(self, value: String) -> Result where E: SerdeError { - self.visit_str(value.as_ref()) - } + fn visit_string(self, value: String) -> Result + where + E: SerdeError, + { + self.visit_str(value.as_ref()) + } } #[derive(Debug, PartialEq)] pub enum Prf { - HmacSha256, + HmacSha256, } impl Serialize for Prf { - fn serialize(&self, serializer: S) -> Result - where S: Serializer { - match *self { - Prf::HmacSha256 => serializer.serialize_str("hmac-sha256"), - } - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match *self { + Prf::HmacSha256 => serializer.serialize_str("hmac-sha256"), + } + } } impl<'a> Deserialize<'a> for Prf { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> { - deserializer.deserialize_any(PrfVisitor) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + deserializer.deserialize_any(PrfVisitor) + } } struct PrfVisitor; impl<'a> Visitor<'a> for PrfVisitor { - type Value = Prf; + type Value = Prf; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a prf algorithm identifier") - } + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a prf algorithm identifier") + } - fn visit_str(self, value: &str) -> Result where E: SerdeError { - match value { - "hmac-sha256" => Ok(Prf::HmacSha256), - _ => Err(SerdeError::custom(Error::InvalidPrf)), - } - } + fn visit_str(self, value: &str) -> Result + where + E: SerdeError, + { + match value { + "hmac-sha256" => Ok(Prf::HmacSha256), + _ => Err(SerdeError::custom(Error::InvalidPrf)), + } + } - fn visit_string(self, value: String) -> Result where E: SerdeError { - self.visit_str(value.as_ref()) - } + fn visit_string(self, value: String) -> Result + where + E: SerdeError, + { + self.visit_str(value.as_ref()) + } } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct Pbkdf2 { - pub c: NonZeroU32, - pub dklen: u32, - pub prf: Prf, - pub salt: Bytes, + pub c: NonZeroU32, + pub dklen: u32, + pub prf: Prf, + pub salt: Bytes, } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct Scrypt { - pub dklen: u32, - pub p: u32, - pub n: u32, - pub r: u32, - pub salt: Bytes, + pub dklen: u32, + pub p: u32, + pub n: u32, + pub r: u32, + pub salt: Bytes, } #[derive(Debug, PartialEq)] pub enum KdfSerParams { - Pbkdf2(Pbkdf2), - Scrypt(Scrypt), + Pbkdf2(Pbkdf2), + Scrypt(Scrypt), } impl Serialize for KdfSerParams { - fn serialize(&self, serializer: S) -> Result - where S: Serializer { - match *self { - KdfSerParams::Pbkdf2(ref params) => params.serialize(serializer), - KdfSerParams::Scrypt(ref params) => params.serialize(serializer), - } - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match *self { + KdfSerParams::Pbkdf2(ref params) => params.serialize(serializer), + KdfSerParams::Scrypt(ref params) => params.serialize(serializer), + } + } } impl<'a> Deserialize<'a> for KdfSerParams { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> { - use serde_json::{Value, from_value}; + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + use serde_json::{from_value, Value}; - let v: Value = Deserialize::deserialize(deserializer)?; + let v: Value = Deserialize::deserialize(deserializer)?; - from_value(v.clone()).map(KdfSerParams::Pbkdf2) - .or_else(|_| from_value(v).map(KdfSerParams::Scrypt)) - .map_err(|_| D::Error::custom("Invalid KDF algorithm")) - } + from_value(v.clone()) + .map(KdfSerParams::Pbkdf2) + .or_else(|_| from_value(v).map(KdfSerParams::Scrypt)) + .map_err(|_| D::Error::custom("Invalid KDF algorithm")) + } } #[derive(Debug, PartialEq)] pub enum Kdf { - Pbkdf2(Pbkdf2), - Scrypt(Scrypt), + Pbkdf2(Pbkdf2), + Scrypt(Scrypt), } diff --git a/accounts/ethstore/src/json/key_file.rs b/accounts/ethstore/src/json/key_file.rs index 60c3ae859..17d7a9ca6 100644 --- a/accounts/ethstore/src/json/key_file.rs +++ b/accounts/ethstore/src/json/key_file.rs @@ -14,182 +14,214 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::fmt; -use std::io::{Read, Write}; -use serde::{Serialize, Serializer, Deserialize, Deserializer}; -use serde::de::{Error, Visitor, MapAccess, DeserializeOwned}; +use super::{Crypto, Uuid, Version, H160}; +use serde::{ + de::{DeserializeOwned, Error, MapAccess, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, +}; use serde_json; -use super::{Uuid, Version, Crypto, H160}; +use std::{ + fmt, + io::{Read, Write}, +}; /// Public opaque type representing serializable `KeyFile`. #[derive(Debug, PartialEq)] pub struct OpaqueKeyFile { - key_file: KeyFile + key_file: KeyFile, } impl Serialize for OpaqueKeyFile { - fn serialize(&self, serializer: S) -> Result where - S: Serializer, - { - self.key_file.serialize(serializer) - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + self.key_file.serialize(serializer) + } } -impl From for OpaqueKeyFile where T: Into { - fn from(val: T) -> Self { - OpaqueKeyFile { key_file: val.into() } - } +impl From for OpaqueKeyFile +where + T: Into, +{ + fn from(val: T) -> Self { + OpaqueKeyFile { + key_file: val.into(), + } + } } #[derive(Debug, PartialEq, Serialize)] pub struct KeyFile { - pub id: Uuid, - pub version: Version, - pub crypto: Crypto, - pub address: Option, - pub name: Option, - pub meta: Option, + pub id: Uuid, + pub version: Version, + pub crypto: Crypto, + pub address: Option, + pub name: Option, + pub meta: Option, } enum KeyFileField { - Id, - Version, - Crypto, - Address, - Name, - Meta, + Id, + Version, + Crypto, + Address, + Name, + Meta, } impl<'a> Deserialize<'a> for KeyFileField { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> - { - deserializer.deserialize_any(KeyFileFieldVisitor) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + deserializer.deserialize_any(KeyFileFieldVisitor) + } } struct KeyFileFieldVisitor; impl<'a> Visitor<'a> for KeyFileFieldVisitor { - type Value = KeyFileField; + type Value = KeyFileField; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a valid key file field") - } + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a valid key file field") + } - fn visit_str(self, value: &str) -> Result - where E: Error - { - match value { - "id" => Ok(KeyFileField::Id), - "version" => Ok(KeyFileField::Version), - "crypto" => Ok(KeyFileField::Crypto), - "Crypto" => Ok(KeyFileField::Crypto), - "address" => Ok(KeyFileField::Address), - "name" => Ok(KeyFileField::Name), - "meta" => Ok(KeyFileField::Meta), - _ => Err(Error::custom(format!("Unknown field: '{}'", value))), - } - } + fn visit_str(self, value: &str) -> Result + where + E: Error, + { + match value { + "id" => Ok(KeyFileField::Id), + "version" => Ok(KeyFileField::Version), + "crypto" => Ok(KeyFileField::Crypto), + "Crypto" => Ok(KeyFileField::Crypto), + "address" => Ok(KeyFileField::Address), + "name" => Ok(KeyFileField::Name), + "meta" => Ok(KeyFileField::Meta), + _ => Err(Error::custom(format!("Unknown field: '{}'", value))), + } + } } impl<'a> Deserialize<'a> for KeyFile { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> - { - static FIELDS: &'static [&'static str] = &["id", "version", "crypto", "Crypto", "address"]; - deserializer.deserialize_struct("KeyFile", FIELDS, KeyFileVisitor) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + static FIELDS: &'static [&'static str] = &["id", "version", "crypto", "Crypto", "address"]; + deserializer.deserialize_struct("KeyFile", FIELDS, KeyFileVisitor) + } } -fn none_if_empty<'a, T>(v: Option) -> Option where - T: DeserializeOwned +fn none_if_empty<'a, T>(v: Option) -> Option +where + T: DeserializeOwned, { - v.and_then(|v| if v.is_null() { - None - } else { - serde_json::from_value(v).ok() - }) - + v.and_then(|v| { + if v.is_null() { + None + } else { + serde_json::from_value(v).ok() + } + }) } struct KeyFileVisitor; impl<'a> Visitor<'a> for KeyFileVisitor { - type Value = KeyFile; + type Value = KeyFile; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a valid key object") - } + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a valid key object") + } - fn visit_map(self, mut visitor: V) -> Result - where V: MapAccess<'a> - { - let mut id = None; - let mut version = None; - let mut crypto = None; - let mut address = None; - let mut name = None; - let mut meta = None; + fn visit_map(self, mut visitor: V) -> Result + where + V: MapAccess<'a>, + { + let mut id = None; + let mut version = None; + let mut crypto = None; + let mut address = None; + let mut name = None; + let mut meta = None; - loop { - match visitor.next_key()? { - Some(KeyFileField::Id) => { id = Some(visitor.next_value()?); } - Some(KeyFileField::Version) => { version = Some(visitor.next_value()?); } - Some(KeyFileField::Crypto) => { crypto = Some(visitor.next_value()?); } - Some(KeyFileField::Address) => { address = Some(visitor.next_value()?); } - Some(KeyFileField::Name) => { name = none_if_empty(visitor.next_value().ok()) } - Some(KeyFileField::Meta) => { meta = none_if_empty(visitor.next_value().ok()) } - None => { break; } - } - } + loop { + match visitor.next_key()? { + Some(KeyFileField::Id) => { + id = Some(visitor.next_value()?); + } + Some(KeyFileField::Version) => { + version = Some(visitor.next_value()?); + } + Some(KeyFileField::Crypto) => { + crypto = Some(visitor.next_value()?); + } + Some(KeyFileField::Address) => { + address = Some(visitor.next_value()?); + } + Some(KeyFileField::Name) => name = none_if_empty(visitor.next_value().ok()), + Some(KeyFileField::Meta) => meta = none_if_empty(visitor.next_value().ok()), + None => { + break; + } + } + } - let id = match id { - Some(id) => id, - None => return Err(V::Error::missing_field("id")), - }; + let id = match id { + Some(id) => id, + None => return Err(V::Error::missing_field("id")), + }; - let version = match version { - Some(version) => version, - None => return Err(V::Error::missing_field("version")), - }; + let version = match version { + Some(version) => version, + None => return Err(V::Error::missing_field("version")), + }; - let crypto = match crypto { - Some(crypto) => crypto, - None => return Err(V::Error::missing_field("crypto")), - }; + let crypto = match crypto { + Some(crypto) => crypto, + None => return Err(V::Error::missing_field("crypto")), + }; - let result = KeyFile { - id: id, - version: version, - crypto: crypto, - address: address, - name: name, - meta: meta, - }; + let result = KeyFile { + id: id, + version: version, + crypto: crypto, + address: address, + name: name, + meta: meta, + }; - Ok(result) - } + Ok(result) + } } impl KeyFile { - pub fn load(reader: R) -> Result where R: Read { - serde_json::from_reader(reader) - } + pub fn load(reader: R) -> Result + where + R: Read, + { + serde_json::from_reader(reader) + } - pub fn write(&self, writer: &mut W) -> Result<(), serde_json::Error> where W: Write { - serde_json::to_writer(writer, self) - } + pub fn write(&self, writer: &mut W) -> Result<(), serde_json::Error> + where + W: Write, + { + serde_json::to_writer(writer, self) + } } #[cfg(test)] mod tests { - use std::str::FromStr; - use serde_json; - use json::{KeyFile, Uuid, Version, Crypto, Cipher, Aes128Ctr, Kdf, Scrypt}; + use json::{Aes128Ctr, Cipher, Crypto, Kdf, KeyFile, Scrypt, Uuid, Version}; + use serde_json; + use std::str::FromStr; - #[test] - fn basic_keyfile() { - let json = r#" + #[test] + fn basic_keyfile() { + let json = r#" { "address": "6edddfc6349aff20bc6467ccf276c5b52487f7a8", "crypto": { @@ -214,35 +246,36 @@ mod tests { "meta": "{}" }"#; - let expected = KeyFile { - id: Uuid::from_str("8777d9f6-7860-4b9b-88b7-0b57ee6b3a73").unwrap(), - version: Version::V3, - address: Some("6edddfc6349aff20bc6467ccf276c5b52487f7a8".into()), - crypto: Crypto { - cipher: Cipher::Aes128Ctr(Aes128Ctr { - iv: "b5a7ec855ec9e2c405371356855fec83".into(), - }), - ciphertext: "7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc".into(), - kdf: Kdf::Scrypt(Scrypt { - n: 262144, - dklen: 32, - p: 1, - r: 8, - salt: "1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209".into(), - }), - mac: "46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f".into(), - }, - name: Some("Test".to_owned()), - meta: Some("{}".to_owned()), - }; + let expected = KeyFile { + id: Uuid::from_str("8777d9f6-7860-4b9b-88b7-0b57ee6b3a73").unwrap(), + version: Version::V3, + address: Some("6edddfc6349aff20bc6467ccf276c5b52487f7a8".into()), + crypto: Crypto { + cipher: Cipher::Aes128Ctr(Aes128Ctr { + iv: "b5a7ec855ec9e2c405371356855fec83".into(), + }), + ciphertext: "7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc" + .into(), + kdf: Kdf::Scrypt(Scrypt { + n: 262144, + dklen: 32, + p: 1, + r: 8, + salt: "1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209".into(), + }), + mac: "46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f".into(), + }, + name: Some("Test".to_owned()), + meta: Some("{}".to_owned()), + }; - let keyfile: KeyFile = serde_json::from_str(json).unwrap(); - assert_eq!(keyfile, expected); - } + let keyfile: KeyFile = serde_json::from_str(json).unwrap(); + assert_eq!(keyfile, expected); + } - #[test] - fn capital_crypto_keyfile() { - let json = r#" + #[test] + fn capital_crypto_keyfile() { + let json = r#" { "address": "6edddfc6349aff20bc6467ccf276c5b52487f7a8", "Crypto": { @@ -265,60 +298,62 @@ mod tests { "version": 3 }"#; - let expected = KeyFile { - id: "8777d9f6-7860-4b9b-88b7-0b57ee6b3a73".into(), - version: Version::V3, - address: Some("6edddfc6349aff20bc6467ccf276c5b52487f7a8".into()), - crypto: Crypto { - cipher: Cipher::Aes128Ctr(Aes128Ctr { - iv: "b5a7ec855ec9e2c405371356855fec83".into(), - }), - ciphertext: "7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc".into(), - kdf: Kdf::Scrypt(Scrypt { - n: 262144, - dklen: 32, - p: 1, - r: 8, - salt: "1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209".into(), - }), - mac: "46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f".into(), - }, - name: None, - meta: None, - }; + let expected = KeyFile { + id: "8777d9f6-7860-4b9b-88b7-0b57ee6b3a73".into(), + version: Version::V3, + address: Some("6edddfc6349aff20bc6467ccf276c5b52487f7a8".into()), + crypto: Crypto { + cipher: Cipher::Aes128Ctr(Aes128Ctr { + iv: "b5a7ec855ec9e2c405371356855fec83".into(), + }), + ciphertext: "7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc" + .into(), + kdf: Kdf::Scrypt(Scrypt { + n: 262144, + dklen: 32, + p: 1, + r: 8, + salt: "1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209".into(), + }), + mac: "46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f".into(), + }, + name: None, + meta: None, + }; - let keyfile: KeyFile = serde_json::from_str(json).unwrap(); - assert_eq!(keyfile, expected); - } + let keyfile: KeyFile = serde_json::from_str(json).unwrap(); + assert_eq!(keyfile, expected); + } - #[test] - fn to_and_from_json() { - let file = KeyFile { - id: "8777d9f6-7860-4b9b-88b7-0b57ee6b3a73".into(), - version: Version::V3, - address: Some("6edddfc6349aff20bc6467ccf276c5b52487f7a8".into()), - crypto: Crypto { - cipher: Cipher::Aes128Ctr(Aes128Ctr { - iv: "b5a7ec855ec9e2c405371356855fec83".into(), - }), - ciphertext: "7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc".into(), - kdf: Kdf::Scrypt(Scrypt { - n: 262144, - dklen: 32, - p: 1, - r: 8, - salt: "1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209".into(), - }), - mac: "46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f".into(), - }, - name: Some("Test".to_owned()), - meta: None, - }; + #[test] + fn to_and_from_json() { + let file = KeyFile { + id: "8777d9f6-7860-4b9b-88b7-0b57ee6b3a73".into(), + version: Version::V3, + address: Some("6edddfc6349aff20bc6467ccf276c5b52487f7a8".into()), + crypto: Crypto { + cipher: Cipher::Aes128Ctr(Aes128Ctr { + iv: "b5a7ec855ec9e2c405371356855fec83".into(), + }), + ciphertext: "7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc" + .into(), + kdf: Kdf::Scrypt(Scrypt { + n: 262144, + dklen: 32, + p: 1, + r: 8, + salt: "1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209".into(), + }), + mac: "46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f".into(), + }, + name: Some("Test".to_owned()), + meta: None, + }; - let serialized = serde_json::to_string(&file).unwrap(); - println!("{}", serialized); - let deserialized = serde_json::from_str(&serialized).unwrap(); + let serialized = serde_json::to_string(&file).unwrap(); + println!("{}", serialized); + let deserialized = serde_json::from_str(&serialized).unwrap(); - assert_eq!(file, deserialized); - } + assert_eq!(file, deserialized); + } } diff --git a/accounts/ethstore/src/json/mod.rs b/accounts/ethstore/src/json/mod.rs index 2b6348aae..94e4af19f 100644 --- a/accounts/ethstore/src/json/mod.rs +++ b/accounts/ethstore/src/json/mod.rs @@ -29,15 +29,20 @@ mod vault_file; mod vault_key_file; mod version; -pub use self::bytes::Bytes; -pub use self::cipher::{Cipher, CipherSer, CipherSerParams, Aes128Ctr}; -pub use self::crypto::{Crypto, CipherText}; -pub use self::error::Error; -pub use self::hash::{H128, H160, H256}; -pub use self::id::Uuid; -pub use self::kdf::{Kdf, KdfSer, Prf, Pbkdf2, Scrypt, KdfSerParams}; -pub use self::key_file::{KeyFile, OpaqueKeyFile}; -pub use self::presale::{PresaleWallet, Encseed}; -pub use self::vault_file::VaultFile; -pub use self::vault_key_file::{VaultKeyFile, VaultKeyMeta, insert_vault_name_to_json_meta, remove_vault_name_from_json_meta}; -pub use self::version::Version; +pub use self::{ + bytes::Bytes, + cipher::{Aes128Ctr, Cipher, CipherSer, CipherSerParams}, + crypto::{CipherText, Crypto}, + error::Error, + hash::{H128, H160, H256}, + id::Uuid, + kdf::{Kdf, KdfSer, KdfSerParams, Pbkdf2, Prf, Scrypt}, + key_file::{KeyFile, OpaqueKeyFile}, + presale::{Encseed, PresaleWallet}, + vault_file::VaultFile, + vault_key_file::{ + insert_vault_name_to_json_meta, remove_vault_name_from_json_meta, VaultKeyFile, + VaultKeyMeta, + }, + version::Version, +}; diff --git a/accounts/ethstore/src/json/presale.rs b/accounts/ethstore/src/json/presale.rs index 70568d510..9dc91f27a 100644 --- a/accounts/ethstore/src/json/presale.rs +++ b/accounts/ethstore/src/json/presale.rs @@ -14,34 +14,37 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::io::Read; +use super::{Bytes, H160}; use serde_json; -use super::{H160, Bytes}; +use std::io::Read; pub type Encseed = Bytes; #[derive(Debug, PartialEq, Deserialize)] pub struct PresaleWallet { - pub encseed: Encseed, - #[serde(rename = "ethaddr")] - pub address: H160, + pub encseed: Encseed, + #[serde(rename = "ethaddr")] + pub address: H160, } impl PresaleWallet { - pub fn load(reader: R) -> Result where R: Read { - serde_json::from_reader(reader) - } + pub fn load(reader: R) -> Result + where + R: Read, + { + serde_json::from_reader(reader) + } } #[cfg(test)] mod tests { - use std::str::FromStr; - use serde_json; - use json::{PresaleWallet, H160}; + use json::{PresaleWallet, H160}; + use serde_json; + use std::str::FromStr; - #[test] - fn presale_wallet() { - let json = r#" + #[test] + fn presale_wallet() { + let json = r#" { "encseed": "137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066", "ethaddr": "ede84640d1a1d3e06902048e67aa7db8d52c2ce1", @@ -49,18 +52,18 @@ mod tests { "btcaddr": "1JvqEc6WLhg6GnyrLBe2ztPAU28KRfuseH" } "#; - let expected = PresaleWallet { + let expected = PresaleWallet { encseed: "137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066".into(), address: H160::from_str("ede84640d1a1d3e06902048e67aa7db8d52c2ce1").unwrap(), }; - let wallet: PresaleWallet = serde_json::from_str(json).unwrap(); - assert_eq!(expected, wallet); - } + let wallet: PresaleWallet = serde_json::from_str(json).unwrap(); + assert_eq!(expected, wallet); + } - #[test] - fn long_presale_wallet() { - let json = r#" + #[test] + fn long_presale_wallet() { + let json = r#" { "encseed": "137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0d", @@ -69,12 +72,12 @@ mod tests { "btcaddr": "1JvqEc6WLhg6GnyrLBe2ztPAU28KRfuseH" } "#; - let expected = PresaleWallet { + let expected = PresaleWallet { encseed: "137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0d".into(), address: H160::from_str("ede84640d1a1d3e06902048e67aa7db8d52c2ce1").unwrap(), }; - let wallet: PresaleWallet = serde_json::from_str(json).unwrap(); - assert_eq!(expected, wallet); - } + let wallet: PresaleWallet = serde_json::from_str(json).unwrap(); + assert_eq!(expected, wallet); + } } diff --git a/accounts/ethstore/src/json/vault_file.rs b/accounts/ethstore/src/json/vault_file.rs index 0da870931..acb8f75a2 100644 --- a/accounts/ethstore/src/json/vault_file.rs +++ b/accounts/ethstore/src/json/vault_file.rs @@ -14,86 +14,92 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::io::{Read, Write}; -use serde_json; use super::Crypto; +use serde_json; +use std::io::{Read, Write}; /// Vault meta file #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct VaultFile { - /// Vault password, encrypted with vault password - pub crypto: Crypto, - /// Vault metadata string - pub meta: Option, + /// Vault password, encrypted with vault password + pub crypto: Crypto, + /// Vault metadata string + pub meta: Option, } impl VaultFile { - pub fn load(reader: R) -> Result where R: Read { - serde_json::from_reader(reader) - } + pub fn load(reader: R) -> Result + where + R: Read, + { + serde_json::from_reader(reader) + } - pub fn write(&self, writer: &mut W) -> Result<(), serde_json::Error> where W: Write { - serde_json::to_writer(writer, self) - } + pub fn write(&self, writer: &mut W) -> Result<(), serde_json::Error> + where + W: Write, + { + serde_json::to_writer(writer, self) + } } #[cfg(test)] mod test { - use serde_json; - use json::{VaultFile, Crypto, Cipher, Aes128Ctr, Kdf, Pbkdf2, Prf}; - use std::num::NonZeroU32; + use json::{Aes128Ctr, Cipher, Crypto, Kdf, Pbkdf2, Prf, VaultFile}; + use serde_json; + use std::num::NonZeroU32; - lazy_static! { - static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(1024).expect("1024 > 0; qed"); - } + lazy_static! { + static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(1024).expect("1024 > 0; qed"); + } - #[test] - fn to_and_from_json() { - let file = VaultFile { - crypto: Crypto { - cipher: Cipher::Aes128Ctr(Aes128Ctr { - iv: "0155e3690be19fbfbecabcd440aa284b".into(), - }), - ciphertext: "4d6938a1f49b7782".into(), - kdf: Kdf::Pbkdf2(Pbkdf2 { - c: *ITERATIONS, - dklen: 32, - prf: Prf::HmacSha256, - salt: "b6a9338a7ccd39288a86dba73bfecd9101b4f3db9c9830e7c76afdbd4f6872e5".into(), - }), - mac: "16381463ea11c6eb2239a9f339c2e780516d29d234ce30ac5f166f9080b5a262".into(), - }, - meta: Some("{}".into()), - }; + #[test] + fn to_and_from_json() { + let file = VaultFile { + crypto: Crypto { + cipher: Cipher::Aes128Ctr(Aes128Ctr { + iv: "0155e3690be19fbfbecabcd440aa284b".into(), + }), + ciphertext: "4d6938a1f49b7782".into(), + kdf: Kdf::Pbkdf2(Pbkdf2 { + c: *ITERATIONS, + dklen: 32, + prf: Prf::HmacSha256, + salt: "b6a9338a7ccd39288a86dba73bfecd9101b4f3db9c9830e7c76afdbd4f6872e5".into(), + }), + mac: "16381463ea11c6eb2239a9f339c2e780516d29d234ce30ac5f166f9080b5a262".into(), + }, + meta: Some("{}".into()), + }; - let serialized = serde_json::to_string(&file).unwrap(); - let deserialized = serde_json::from_str(&serialized).unwrap(); + let serialized = serde_json::to_string(&file).unwrap(); + let deserialized = serde_json::from_str(&serialized).unwrap(); - assert_eq!(file, deserialized); - } + assert_eq!(file, deserialized); + } - #[test] - fn to_and_from_json_no_meta() { - let file = VaultFile { - crypto: Crypto { - cipher: Cipher::Aes128Ctr(Aes128Ctr { - iv: "0155e3690be19fbfbecabcd440aa284b".into(), - }), - ciphertext: "4d6938a1f49b7782".into(), - kdf: Kdf::Pbkdf2(Pbkdf2 { - c: *ITERATIONS, - dklen: 32, - prf: Prf::HmacSha256, - salt: "b6a9338a7ccd39288a86dba73bfecd9101b4f3db9c9830e7c76afdbd4f6872e5".into(), - }), - mac: "16381463ea11c6eb2239a9f339c2e780516d29d234ce30ac5f166f9080b5a262".into(), - }, - meta: None, - }; + #[test] + fn to_and_from_json_no_meta() { + let file = VaultFile { + crypto: Crypto { + cipher: Cipher::Aes128Ctr(Aes128Ctr { + iv: "0155e3690be19fbfbecabcd440aa284b".into(), + }), + ciphertext: "4d6938a1f49b7782".into(), + kdf: Kdf::Pbkdf2(Pbkdf2 { + c: *ITERATIONS, + dklen: 32, + prf: Prf::HmacSha256, + salt: "b6a9338a7ccd39288a86dba73bfecd9101b4f3db9c9830e7c76afdbd4f6872e5".into(), + }), + mac: "16381463ea11c6eb2239a9f339c2e780516d29d234ce30ac5f166f9080b5a262".into(), + }, + meta: None, + }; - let serialized = serde_json::to_string(&file).unwrap(); - let deserialized = serde_json::from_str(&serialized).unwrap(); + let serialized = serde_json::to_string(&file).unwrap(); + let deserialized = serde_json::from_str(&serialized).unwrap(); - assert_eq!(file, deserialized); - } + assert_eq!(file, deserialized); + } } diff --git a/accounts/ethstore/src/json/vault_key_file.rs b/accounts/ethstore/src/json/vault_key_file.rs index dd4ba4979..ab8f2d756 100644 --- a/accounts/ethstore/src/json/vault_key_file.rs +++ b/accounts/ethstore/src/json/vault_key_file.rs @@ -14,12 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::io::{Read, Write}; +use super::{Crypto, Uuid, Version, H160}; use serde::de::Error; -use serde_json; -use serde_json::value::Value; -use serde_json::error; -use super::{Uuid, Version, Crypto, H160}; +use serde_json::{self, error, value::Value}; +use std::io::{Read, Write}; /// Meta key name for vault field const VAULT_NAME_META_KEY: &'static str = "vault"; @@ -27,94 +25,112 @@ const VAULT_NAME_META_KEY: &'static str = "vault"; /// Key file as stored in vaults #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct VaultKeyFile { - /// Key id - pub id: Uuid, - /// Key version - pub version: Version, - /// Secret, encrypted with account password - pub crypto: Crypto, - /// Serialized `VaultKeyMeta`, encrypted with vault password - pub metacrypto: Crypto, + /// Key id + pub id: Uuid, + /// Key version + pub version: Version, + /// Secret, encrypted with account password + pub crypto: Crypto, + /// Serialized `VaultKeyMeta`, encrypted with vault password + pub metacrypto: Crypto, } /// Data, stored in `VaultKeyFile::metacrypto` #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct VaultKeyMeta { - /// Key address - pub address: H160, - /// Key name - pub name: Option, - /// Key metadata - pub meta: Option, + /// Key address + pub address: H160, + /// Key name + pub name: Option, + /// Key metadata + pub meta: Option, } /// Insert vault name to the JSON meta field -pub fn insert_vault_name_to_json_meta(meta: &str, vault_name: &str) -> Result { - let mut meta = if meta.is_empty() { - Value::Object(serde_json::Map::new()) - } else { - serde_json::from_str(meta)? - }; +pub fn insert_vault_name_to_json_meta( + meta: &str, + vault_name: &str, +) -> Result { + let mut meta = if meta.is_empty() { + Value::Object(serde_json::Map::new()) + } else { + serde_json::from_str(meta)? + }; - if let Some(meta_obj) = meta.as_object_mut() { - meta_obj.insert(VAULT_NAME_META_KEY.to_owned(), Value::String(vault_name.to_owned())); - serde_json::to_string(meta_obj) - } else { - Err(error::Error::custom("Meta is expected to be a serialized JSON object")) - } + if let Some(meta_obj) = meta.as_object_mut() { + meta_obj.insert( + VAULT_NAME_META_KEY.to_owned(), + Value::String(vault_name.to_owned()), + ); + serde_json::to_string(meta_obj) + } else { + Err(error::Error::custom( + "Meta is expected to be a serialized JSON object", + )) + } } /// Remove vault name from the JSON meta field pub fn remove_vault_name_from_json_meta(meta: &str) -> Result { - let mut meta = if meta.is_empty() { - Value::Object(serde_json::Map::new()) - } else { - serde_json::from_str(meta)? - }; + let mut meta = if meta.is_empty() { + Value::Object(serde_json::Map::new()) + } else { + serde_json::from_str(meta)? + }; - if let Some(meta_obj) = meta.as_object_mut() { - meta_obj.remove(VAULT_NAME_META_KEY); - serde_json::to_string(meta_obj) - } else { - Err(error::Error::custom("Meta is expected to be a serialized JSON object")) - } + if let Some(meta_obj) = meta.as_object_mut() { + meta_obj.remove(VAULT_NAME_META_KEY); + serde_json::to_string(meta_obj) + } else { + Err(error::Error::custom( + "Meta is expected to be a serialized JSON object", + )) + } } impl VaultKeyFile { - pub fn load(reader: R) -> Result where R: Read { - serde_json::from_reader(reader) - } + pub fn load(reader: R) -> Result + where + R: Read, + { + serde_json::from_reader(reader) + } - pub fn write(&self, writer: &mut W) -> Result<(), serde_json::Error> where W: Write { - serde_json::to_writer(writer, self) - } + pub fn write(&self, writer: &mut W) -> Result<(), serde_json::Error> + where + W: Write, + { + serde_json::to_writer(writer, self) + } } impl VaultKeyMeta { - pub fn load(bytes: &[u8]) -> Result { - serde_json::from_slice(&bytes) - } + pub fn load(bytes: &[u8]) -> Result { + serde_json::from_slice(&bytes) + } - pub fn write(&self) -> Result, serde_json::Error> { - let s = serde_json::to_string(self)?; - Ok(s.as_bytes().into()) - } + pub fn write(&self) -> Result, serde_json::Error> { + let s = serde_json::to_string(self)?; + Ok(s.as_bytes().into()) + } } #[cfg(test)] mod test { - use serde_json; - use json::{VaultKeyFile, Version, Crypto, Cipher, Aes128Ctr, Kdf, Pbkdf2, Prf, - insert_vault_name_to_json_meta, remove_vault_name_from_json_meta}; - use std::num::NonZeroU32; + use json::{ + insert_vault_name_to_json_meta, remove_vault_name_from_json_meta, Aes128Ctr, Cipher, + Crypto, Kdf, Pbkdf2, Prf, VaultKeyFile, Version, + }; + use serde_json; + use std::num::NonZeroU32; - lazy_static! { - static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(10240).expect("10240 > 0; qed"); - } + lazy_static! { + static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(10240).expect("10240 > 0; qed"); + } - #[test] - fn to_and_from_json() { - let file = VaultKeyFile { + #[test] + fn to_and_from_json() { + let file = VaultKeyFile { id: "08d82c39-88e3-7a71-6abb-89c8f36c3ceb".into(), version: Version::V3, crypto: Crypto { @@ -145,33 +161,45 @@ mod test { } }; - let serialized = serde_json::to_string(&file).unwrap(); - let deserialized = serde_json::from_str(&serialized).unwrap(); + let serialized = serde_json::to_string(&file).unwrap(); + let deserialized = serde_json::from_str(&serialized).unwrap(); - assert_eq!(file, deserialized); - } + assert_eq!(file, deserialized); + } - #[test] - fn vault_name_inserted_to_json_meta() { - assert_eq!(insert_vault_name_to_json_meta(r#""#, "MyVault").unwrap(), r#"{"vault":"MyVault"}"#); - assert_eq!(insert_vault_name_to_json_meta(r#"{"tags":["kalabala"]}"#, "MyVault").unwrap(), r#"{"tags":["kalabala"],"vault":"MyVault"}"#); - } + #[test] + fn vault_name_inserted_to_json_meta() { + assert_eq!( + insert_vault_name_to_json_meta(r#""#, "MyVault").unwrap(), + r#"{"vault":"MyVault"}"# + ); + assert_eq!( + insert_vault_name_to_json_meta(r#"{"tags":["kalabala"]}"#, "MyVault").unwrap(), + r#"{"tags":["kalabala"],"vault":"MyVault"}"# + ); + } - #[test] - fn vault_name_not_inserted_to_json_meta() { - assert!(insert_vault_name_to_json_meta(r#"///3533"#, "MyVault").is_err()); - assert!(insert_vault_name_to_json_meta(r#""string""#, "MyVault").is_err()); - } + #[test] + fn vault_name_not_inserted_to_json_meta() { + assert!(insert_vault_name_to_json_meta(r#"///3533"#, "MyVault").is_err()); + assert!(insert_vault_name_to_json_meta(r#""string""#, "MyVault").is_err()); + } - #[test] - fn vault_name_removed_from_json_meta() { - assert_eq!(remove_vault_name_from_json_meta(r#"{"vault":"MyVault"}"#).unwrap(), r#"{}"#); - assert_eq!(remove_vault_name_from_json_meta(r#"{"tags":["kalabala"],"vault":"MyVault"}"#).unwrap(), r#"{"tags":["kalabala"]}"#); - } + #[test] + fn vault_name_removed_from_json_meta() { + assert_eq!( + remove_vault_name_from_json_meta(r#"{"vault":"MyVault"}"#).unwrap(), + r#"{}"# + ); + assert_eq!( + remove_vault_name_from_json_meta(r#"{"tags":["kalabala"],"vault":"MyVault"}"#).unwrap(), + r#"{"tags":["kalabala"]}"# + ); + } - #[test] - fn vault_name_not_removed_from_json_meta() { - assert!(remove_vault_name_from_json_meta(r#"///3533"#).is_err()); - assert!(remove_vault_name_from_json_meta(r#""string""#).is_err()); - } + #[test] + fn vault_name_not_removed_from_json_meta() { + assert!(remove_vault_name_from_json_meta(r#"///3533"#).is_err()); + assert!(remove_vault_name_from_json_meta(r#""string""#).is_err()); + } } diff --git a/accounts/ethstore/src/json/version.rs b/accounts/ethstore/src/json/version.rs index cd8439c59..75f6470a0 100644 --- a/accounts/ethstore/src/json/version.rs +++ b/accounts/ethstore/src/json/version.rs @@ -14,45 +14,54 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::fmt; -use serde::{Serialize, Serializer, Deserialize, Deserializer}; -use serde::de::{Error as SerdeError, Visitor}; use super::Error; +use serde::{ + de::{Error as SerdeError, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, +}; +use std::fmt; #[derive(Debug, PartialEq)] pub enum Version { - V3, + V3, } impl Serialize for Version { - fn serialize(&self, serializer: S) -> Result - where S: Serializer { - match *self { - Version::V3 => serializer.serialize_u64(3) - } - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match *self { + Version::V3 => serializer.serialize_u64(3), + } + } } impl<'a> Deserialize<'a> for Version { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> { - deserializer.deserialize_any(VersionVisitor) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + deserializer.deserialize_any(VersionVisitor) + } } struct VersionVisitor; impl<'a> Visitor<'a> for VersionVisitor { - type Value = Version; + type Value = Version; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a valid key version identifier") - } + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a valid key version identifier") + } - fn visit_u64(self, value: u64) -> Result where E: SerdeError { - match value { - 3 => Ok(Version::V3), - _ => Err(SerdeError::custom(Error::UnsupportedVersion)) - } - } + fn visit_u64(self, value: u64) -> Result + where + E: SerdeError, + { + match value { + 3 => Ok(Version::V3), + _ => Err(SerdeError::custom(Error::UnsupportedVersion)), + } + } } diff --git a/accounts/ethstore/src/lib.rs b/accounts/ethstore/src/lib.rs index c0955caeb..092090330 100644 --- a/accounts/ethstore/src/lib.rs +++ b/accounts/ethstore/src/lib.rs @@ -27,13 +27,13 @@ extern crate rustc_hex; extern crate serde; extern crate serde_json; extern crate smallvec; +extern crate tempdir; extern crate time; extern crate tiny_keccak; -extern crate tempdir; -extern crate parity_crypto as crypto; extern crate ethereum_types; extern crate ethkey as _ethkey; +extern crate parity_crypto as crypto; extern crate parity_wordlist; #[macro_use] @@ -60,18 +60,20 @@ mod presale; mod random; mod secret_store; -pub use self::account::{SafeAccount, Crypto}; -pub use self::error::Error; -pub use self::ethstore::{EthStore, EthMultiStore}; -pub use self::import::{import_account, import_accounts, read_geth_accounts}; -pub use self::json::OpaqueKeyFile as KeyFile; -pub use self::presale::PresaleWallet; -pub use self::secret_store::{ - SecretVaultRef, StoreAccountRef, SimpleSecretStore, SecretStore, - Derivation, IndexDerivation, +pub use self::{ + account::{Crypto, SafeAccount}, + error::Error, + ethstore::{EthMultiStore, EthStore}, + import::{import_account, import_accounts, read_geth_accounts}, + json::OpaqueKeyFile as KeyFile, + parity_wordlist::random_phrase, + presale::PresaleWallet, + random::random_string, + secret_store::{ + Derivation, IndexDerivation, SecretStore, SecretVaultRef, SimpleSecretStore, + StoreAccountRef, + }, }; -pub use self::random::random_string; -pub use self::parity_wordlist::random_phrase; /// An opaque wrapper for secret. pub struct OpaqueSecret(::ethkey::Secret); diff --git a/accounts/ethstore/src/presale.rs b/accounts/ethstore/src/presale.rs index 8ca5d0b98..3a1b71969 100644 --- a/accounts/ethstore/src/presale.rs +++ b/accounts/ethstore/src/presale.rs @@ -14,78 +14,80 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::fs; -use std::num::NonZeroU32; -use std::path::Path; +use crypto::{self, pbkdf2, Keccak256}; +use ethkey::{Address, KeyPair, Password, Secret}; use json; -use ethkey::{Address, Secret, KeyPair, Password}; -use crypto::{Keccak256, pbkdf2}; -use {crypto, Error}; +use std::{fs, num::NonZeroU32, path::Path}; +use Error; /// Pre-sale wallet. pub struct PresaleWallet { - iv: [u8; 16], - ciphertext: Vec, - address: Address, + iv: [u8; 16], + ciphertext: Vec, + address: Address, } impl From for PresaleWallet { - fn from(wallet: json::PresaleWallet) -> Self { - let mut iv = [0u8; 16]; - iv.copy_from_slice(&wallet.encseed[..16]); + fn from(wallet: json::PresaleWallet) -> Self { + let mut iv = [0u8; 16]; + iv.copy_from_slice(&wallet.encseed[..16]); - let mut ciphertext = vec![]; - ciphertext.extend_from_slice(&wallet.encseed[16..]); + let mut ciphertext = vec![]; + ciphertext.extend_from_slice(&wallet.encseed[16..]); - PresaleWallet { - iv: iv, - ciphertext: ciphertext, - address: Address::from(wallet.address), - } - } + PresaleWallet { + iv: iv, + ciphertext: ciphertext, + address: Address::from(wallet.address), + } + } } impl PresaleWallet { - /// Open a pre-sale wallet. - pub fn open

(path: P) -> Result where P: AsRef { - let file = fs::File::open(path)?; - let presale = json::PresaleWallet::load(file) - .map_err(|e| Error::InvalidKeyFile(format!("{}", e)))?; - Ok(PresaleWallet::from(presale)) - } + /// Open a pre-sale wallet. + pub fn open

(path: P) -> Result + where + P: AsRef, + { + let file = fs::File::open(path)?; + let presale = + json::PresaleWallet::load(file).map_err(|e| Error::InvalidKeyFile(format!("{}", e)))?; + Ok(PresaleWallet::from(presale)) + } - /// Decrypt the wallet. - pub fn decrypt(&self, password: &Password) -> Result { - let mut derived_key = [0u8; 32]; - let salt = pbkdf2::Salt(password.as_bytes()); - let sec = pbkdf2::Secret(password.as_bytes()); - let iter = NonZeroU32::new(2000).expect("2000 > 0; qed"); - pbkdf2::sha256(iter, salt, sec, &mut derived_key); + /// Decrypt the wallet. + pub fn decrypt(&self, password: &Password) -> Result { + let mut derived_key = [0u8; 32]; + let salt = pbkdf2::Salt(password.as_bytes()); + let sec = pbkdf2::Secret(password.as_bytes()); + let iter = NonZeroU32::new(2000).expect("2000 > 0; qed"); + pbkdf2::sha256(iter, salt, sec, &mut derived_key); - let mut key = vec![0; self.ciphertext.len()]; - let len = crypto::aes::decrypt_128_cbc(&derived_key[0..16], &self.iv, &self.ciphertext, &mut key) - .map_err(|_| Error::InvalidPassword)?; - let unpadded = &key[..len]; + let mut key = vec![0; self.ciphertext.len()]; + let len = + crypto::aes::decrypt_128_cbc(&derived_key[0..16], &self.iv, &self.ciphertext, &mut key) + .map_err(|_| Error::InvalidPassword)?; + let unpadded = &key[..len]; - let secret = Secret::from_unsafe_slice(&unpadded.keccak256())?; - if let Ok(kp) = KeyPair::from_secret(secret) { - if kp.address() == self.address { - return Ok(kp) - } - } + let secret = Secret::from_unsafe_slice(&unpadded.keccak256())?; + if let Ok(kp) = KeyPair::from_secret(secret) { + if kp.address() == self.address { + return Ok(kp); + } + } - Err(Error::InvalidPassword) - } + Err(Error::InvalidPassword) + } } #[cfg(test)] mod tests { - use super::PresaleWallet; - use json; + use super::PresaleWallet; + use json; - #[test] - fn test() { - let json = r#" + #[test] + fn test() { + let json = r#" { "encseed": "137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066", "ethaddr": "ede84640d1a1d3e06902048e67aa7db8d52c2ce1", @@ -93,9 +95,9 @@ mod tests { "btcaddr": "1JvqEc6WLhg6GnyrLBe2ztPAU28KRfuseH" } "#; - let wallet = json::PresaleWallet::load(json.as_bytes()).unwrap(); - let wallet = PresaleWallet::from(wallet); - assert!(wallet.decrypt(&"123".into()).is_ok()); - assert!(wallet.decrypt(&"124".into()).is_err()); - } + let wallet = json::PresaleWallet::load(json.as_bytes()).unwrap(); + let wallet = PresaleWallet::from(wallet); + assert!(wallet.decrypt(&"123".into()).is_ok()); + assert!(wallet.decrypt(&"124".into()).is_err()); + } } diff --git a/accounts/ethstore/src/random.rs b/accounts/ethstore/src/random.rs index 969c8a366..38df9e05e 100644 --- a/accounts/ethstore/src/random.rs +++ b/accounts/ethstore/src/random.rs @@ -14,32 +14,34 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use rand::{Rng, OsRng}; +use rand::{OsRng, Rng}; pub trait Random { - fn random() -> Self where Self: Sized; + fn random() -> Self + where + Self: Sized; } impl Random for [u8; 16] { - fn random() -> Self { - let mut result = [0u8; 16]; - let mut rng = OsRng::new().unwrap(); - rng.fill_bytes(&mut result); - result - } + fn random() -> Self { + let mut result = [0u8; 16]; + let mut rng = OsRng::new().unwrap(); + rng.fill_bytes(&mut result); + result + } } impl Random for [u8; 32] { - fn random() -> Self { - let mut result = [0u8; 32]; - let mut rng = OsRng::new().unwrap(); - rng.fill_bytes(&mut result); - result - } + fn random() -> Self { + let mut result = [0u8; 32]; + let mut rng = OsRng::new().unwrap(); + rng.fill_bytes(&mut result); + result + } } /// Generate a random string of given length. pub fn random_string(length: usize) -> String { - let mut rng = OsRng::new().expect("Not able to operate without random source."); - rng.gen_ascii_chars().take(length).collect() + let mut rng = OsRng::new().expect("Not able to operate without random source."); + rng.gen_ascii_chars().take(length).collect() } diff --git a/accounts/ethstore/src/secret_store.rs b/accounts/ethstore/src/secret_store.rs index 5571f83c0..55c7148bf 100644 --- a/accounts/ethstore/src/secret_store.rs +++ b/accounts/ethstore/src/secret_store.rs @@ -14,177 +14,264 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::hash::{Hash, Hasher}; -use std::path::PathBuf; -use std::cmp::Ordering; -use ethkey::{Address, Message, Signature, Secret, Password, Public}; -use Error; -use json::{Uuid, OpaqueKeyFile}; use ethereum_types::H256; +use ethkey::{Address, Message, Password, Public, Secret, Signature}; +use json::{OpaqueKeyFile, Uuid}; +use std::{ + cmp::Ordering, + hash::{Hash, Hasher}, + path::PathBuf, +}; +use Error; use OpaqueSecret; /// Key directory reference #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] pub enum SecretVaultRef { - /// Reference to key in root directory - Root, - /// Referenc to key in specific vault - Vault(String), + /// Reference to key in root directory + Root, + /// Referenc to key in specific vault + Vault(String), } /// Stored account reference #[derive(Debug, Clone, PartialEq, Eq, Ord)] pub struct StoreAccountRef { - /// Account address - pub address: Address, - /// Vault reference - pub vault: SecretVaultRef, + /// Account address + pub address: Address, + /// Vault reference + pub vault: SecretVaultRef, } impl PartialOrd for StoreAccountRef { - fn partial_cmp(&self, other: &StoreAccountRef) -> Option { - Some(self.address.cmp(&other.address).then_with(|| self.vault.cmp(&other.vault))) - } + fn partial_cmp(&self, other: &StoreAccountRef) -> Option { + Some( + self.address + .cmp(&other.address) + .then_with(|| self.vault.cmp(&other.vault)), + ) + } } impl ::std::borrow::Borrow

for StoreAccountRef { - fn borrow(&self) -> &Address { - &self.address - } + fn borrow(&self) -> &Address { + &self.address + } } /// Simple Secret Store API pub trait SimpleSecretStore: Send + Sync { - /// Inserts new accounts to the store (or vault) with given password. - fn insert_account(&self, vault: SecretVaultRef, secret: Secret, password: &Password) -> Result; - /// Inserts new derived account to the store (or vault) with given password. - fn insert_derived(&self, vault: SecretVaultRef, account_ref: &StoreAccountRef, password: &Password, derivation: Derivation) -> Result; - /// Changes accounts password. - fn change_password(&self, account: &StoreAccountRef, old_password: &Password, new_password: &Password) -> Result<(), Error>; - /// Exports key details for account. - fn export_account(&self, account: &StoreAccountRef, password: &Password) -> Result; - /// Entirely removes account from the store and underlying storage. - fn remove_account(&self, account: &StoreAccountRef, password: &Password) -> Result<(), Error>; - /// Generates new derived account. - fn generate_derived(&self, account_ref: &StoreAccountRef, password: &Password, derivation: Derivation) -> Result; - /// Sign a message with given account. - fn sign(&self, account: &StoreAccountRef, password: &Password, message: &Message) -> Result; - /// Sign a message with derived account. - fn sign_derived(&self, account_ref: &StoreAccountRef, password: &Password, derivation: Derivation, message: &Message) -> Result; - /// Decrypt a messages with given account. - fn decrypt(&self, account: &StoreAccountRef, password: &Password, shared_mac: &[u8], message: &[u8]) -> Result, Error>; - /// Agree on shared key. - fn agree(&self, account: &StoreAccountRef, password: &Password, other: &Public) -> Result; + /// Inserts new accounts to the store (or vault) with given password. + fn insert_account( + &self, + vault: SecretVaultRef, + secret: Secret, + password: &Password, + ) -> Result; + /// Inserts new derived account to the store (or vault) with given password. + fn insert_derived( + &self, + vault: SecretVaultRef, + account_ref: &StoreAccountRef, + password: &Password, + derivation: Derivation, + ) -> Result; + /// Changes accounts password. + fn change_password( + &self, + account: &StoreAccountRef, + old_password: &Password, + new_password: &Password, + ) -> Result<(), Error>; + /// Exports key details for account. + fn export_account( + &self, + account: &StoreAccountRef, + password: &Password, + ) -> Result; + /// Entirely removes account from the store and underlying storage. + fn remove_account(&self, account: &StoreAccountRef, password: &Password) -> Result<(), Error>; + /// Generates new derived account. + fn generate_derived( + &self, + account_ref: &StoreAccountRef, + password: &Password, + derivation: Derivation, + ) -> Result; + /// Sign a message with given account. + fn sign( + &self, + account: &StoreAccountRef, + password: &Password, + message: &Message, + ) -> Result; + /// Sign a message with derived account. + fn sign_derived( + &self, + account_ref: &StoreAccountRef, + password: &Password, + derivation: Derivation, + message: &Message, + ) -> Result; + /// Decrypt a messages with given account. + fn decrypt( + &self, + account: &StoreAccountRef, + password: &Password, + shared_mac: &[u8], + message: &[u8], + ) -> Result, Error>; + /// Agree on shared key. + fn agree( + &self, + account: &StoreAccountRef, + password: &Password, + other: &Public, + ) -> Result; - /// Returns all accounts in this secret store. - fn accounts(&self) -> Result, Error>; - /// Get reference to some account with given address. - /// This method could be removed if we will guarantee that there is max(1) account for given address. - fn account_ref(&self, address: &Address) -> Result; + /// Returns all accounts in this secret store. + fn accounts(&self) -> Result, Error>; + /// Get reference to some account with given address. + /// This method could be removed if we will guarantee that there is max(1) account for given address. + fn account_ref(&self, address: &Address) -> Result; - /// Create new vault with given password - fn create_vault(&self, name: &str, password: &Password) -> Result<(), Error>; - /// Open vault with given password - fn open_vault(&self, name: &str, password: &Password) -> Result<(), Error>; - /// Close vault - fn close_vault(&self, name: &str) -> Result<(), Error>; - /// List all vaults - fn list_vaults(&self) -> Result, Error>; - /// List all currently opened vaults - fn list_opened_vaults(&self) -> Result, Error>; - /// Change vault password - fn change_vault_password(&self, name: &str, new_password: &Password) -> Result<(), Error>; - /// Cnage account' vault - fn change_account_vault(&self, vault: SecretVaultRef, account: StoreAccountRef) -> Result; - /// Get vault metadata string. - fn get_vault_meta(&self, name: &str) -> Result; - /// Set vault metadata string. - fn set_vault_meta(&self, name: &str, meta: &str) -> Result<(), Error>; + /// Create new vault with given password + fn create_vault(&self, name: &str, password: &Password) -> Result<(), Error>; + /// Open vault with given password + fn open_vault(&self, name: &str, password: &Password) -> Result<(), Error>; + /// Close vault + fn close_vault(&self, name: &str) -> Result<(), Error>; + /// List all vaults + fn list_vaults(&self) -> Result, Error>; + /// List all currently opened vaults + fn list_opened_vaults(&self) -> Result, Error>; + /// Change vault password + fn change_vault_password(&self, name: &str, new_password: &Password) -> Result<(), Error>; + /// Cnage account' vault + fn change_account_vault( + &self, + vault: SecretVaultRef, + account: StoreAccountRef, + ) -> Result; + /// Get vault metadata string. + fn get_vault_meta(&self, name: &str) -> Result; + /// Set vault metadata string. + fn set_vault_meta(&self, name: &str, meta: &str) -> Result<(), Error>; } /// Secret Store API pub trait SecretStore: SimpleSecretStore { + /// Returns a raw opaque Secret that can be later used to sign a message. + fn raw_secret( + &self, + account: &StoreAccountRef, + password: &Password, + ) -> Result; - /// Returns a raw opaque Secret that can be later used to sign a message. - fn raw_secret(&self, account: &StoreAccountRef, password: &Password) -> Result; + /// Signs a message with raw secret. + fn sign_with_secret( + &self, + secret: &OpaqueSecret, + message: &Message, + ) -> Result { + Ok(::ethkey::sign(&secret.0, message)?) + } - /// Signs a message with raw secret. - fn sign_with_secret(&self, secret: &OpaqueSecret, message: &Message) -> Result { - Ok(::ethkey::sign(&secret.0, message)?) - } + /// Imports presale wallet + fn import_presale( + &self, + vault: SecretVaultRef, + json: &[u8], + password: &Password, + ) -> Result; + /// Imports existing JSON wallet + fn import_wallet( + &self, + vault: SecretVaultRef, + json: &[u8], + password: &Password, + gen_id: bool, + ) -> Result; + /// Copies account between stores and vaults. + fn copy_account( + &self, + new_store: &SimpleSecretStore, + new_vault: SecretVaultRef, + account: &StoreAccountRef, + password: &Password, + new_password: &Password, + ) -> Result<(), Error>; + /// Checks if password matches given account. + fn test_password(&self, account: &StoreAccountRef, password: &Password) -> Result; - /// Imports presale wallet - fn import_presale(&self, vault: SecretVaultRef, json: &[u8], password: &Password) -> Result; - /// Imports existing JSON wallet - fn import_wallet(&self, vault: SecretVaultRef, json: &[u8], password: &Password, gen_id: bool) -> Result; - /// Copies account between stores and vaults. - fn copy_account(&self, new_store: &SimpleSecretStore, new_vault: SecretVaultRef, account: &StoreAccountRef, password: &Password, new_password: &Password) -> Result<(), Error>; - /// Checks if password matches given account. - fn test_password(&self, account: &StoreAccountRef, password: &Password) -> Result; + /// Returns a public key for given account. + fn public(&self, account: &StoreAccountRef, password: &Password) -> Result; - /// Returns a public key for given account. - fn public(&self, account: &StoreAccountRef, password: &Password) -> Result; + /// Returns uuid of an account. + fn uuid(&self, account: &StoreAccountRef) -> Result; + /// Returns account's name. + fn name(&self, account: &StoreAccountRef) -> Result; + /// Returns account's metadata. + fn meta(&self, account: &StoreAccountRef) -> Result; - /// Returns uuid of an account. - fn uuid(&self, account: &StoreAccountRef) -> Result; - /// Returns account's name. - fn name(&self, account: &StoreAccountRef) -> Result; - /// Returns account's metadata. - fn meta(&self, account: &StoreAccountRef) -> Result; + /// Modifies account metadata. + fn set_name(&self, account: &StoreAccountRef, name: String) -> Result<(), Error>; + /// Modifies account name. + fn set_meta(&self, account: &StoreAccountRef, meta: String) -> Result<(), Error>; - /// Modifies account metadata. - fn set_name(&self, account: &StoreAccountRef, name: String) -> Result<(), Error>; - /// Modifies account name. - fn set_meta(&self, account: &StoreAccountRef, meta: String) -> Result<(), Error>; - - /// Returns local path of the store. - fn local_path(&self) -> PathBuf; - /// Lists all found geth accounts. - fn list_geth_accounts(&self, testnet: bool) -> Vec
; - /// Imports geth accounts to the store/vault. - fn import_geth_accounts(&self, vault: SecretVaultRef, desired: Vec
, testnet: bool) -> Result, Error>; + /// Returns local path of the store. + fn local_path(&self) -> PathBuf; + /// Lists all found geth accounts. + fn list_geth_accounts(&self, testnet: bool) -> Vec
; + /// Imports geth accounts to the store/vault. + fn import_geth_accounts( + &self, + vault: SecretVaultRef, + desired: Vec
, + testnet: bool, + ) -> Result, Error>; } impl StoreAccountRef { - /// Create reference to root account with given address - pub fn root(address: Address) -> Self { - StoreAccountRef::new(SecretVaultRef::Root, address) - } + /// Create reference to root account with given address + pub fn root(address: Address) -> Self { + StoreAccountRef::new(SecretVaultRef::Root, address) + } - /// Create reference to vault account with given address - pub fn vault(vault_name: &str, address: Address) -> Self { - StoreAccountRef::new(SecretVaultRef::Vault(vault_name.to_owned()), address) - } + /// Create reference to vault account with given address + pub fn vault(vault_name: &str, address: Address) -> Self { + StoreAccountRef::new(SecretVaultRef::Vault(vault_name.to_owned()), address) + } - /// Create new account reference - pub fn new(vault_ref: SecretVaultRef, address: Address) -> Self { - StoreAccountRef { - vault: vault_ref, - address: address, - } - } + /// Create new account reference + pub fn new(vault_ref: SecretVaultRef, address: Address) -> Self { + StoreAccountRef { + vault: vault_ref, + address: address, + } + } } impl Hash for StoreAccountRef { - fn hash(&self, state: &mut H) { - self.address.hash(state); - } + fn hash(&self, state: &mut H) { + self.address.hash(state); + } } /// Node in hierarchical derivation. pub struct IndexDerivation { - /// Node is soft (allows proof of parent from parent node). - pub soft: bool, - /// Index sequence of the node. - pub index: u32, + /// Node is soft (allows proof of parent from parent node). + pub soft: bool, + /// Index sequence of the node. + pub index: u32, } /// Derivation scheme for keys pub enum Derivation { - /// Hierarchical derivation - Hierarchical(Vec), - /// Hash derivation, soft. - SoftHash(H256), - /// Hash derivation, hard. - HardHash(H256), + /// Hierarchical derivation + Hierarchical(Vec), + /// Hash derivation, soft. + SoftHash(H256), + /// Hash derivation, hard. + HardHash(H256), } diff --git a/accounts/ethstore/tests/api.rs b/accounts/ethstore/tests/api.rs index c27473752..abf593deb 100644 --- a/accounts/ethstore/tests/api.rs +++ b/accounts/ethstore/tests/api.rs @@ -14,141 +14,184 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -extern crate rand; extern crate ethstore; +extern crate rand; mod util; -use ethstore::{EthStore, SimpleSecretStore, SecretVaultRef, StoreAccountRef}; -use ethstore::ethkey::{Random, Generator, Secret, KeyPair, verify_address}; -use ethstore::accounts_dir::RootDiskDirectory; +use ethstore::{ + accounts_dir::RootDiskDirectory, + ethkey::{verify_address, Generator, KeyPair, Random, Secret}, + EthStore, SecretVaultRef, SimpleSecretStore, StoreAccountRef, +}; use util::TransientDir; #[test] fn secret_store_create() { - let dir = TransientDir::create().unwrap(); - let _ = EthStore::open(Box::new(dir)).unwrap(); + let dir = TransientDir::create().unwrap(); + let _ = EthStore::open(Box::new(dir)).unwrap(); } #[test] #[should_panic] fn secret_store_open_not_existing() { - let dir = TransientDir::open(); - let _ = EthStore::open(Box::new(dir)).unwrap(); + let dir = TransientDir::open(); + let _ = EthStore::open(Box::new(dir)).unwrap(); } fn random_secret() -> Secret { - Random.generate().unwrap().secret().clone() + Random.generate().unwrap().secret().clone() } #[test] fn secret_store_create_account() { - let dir = TransientDir::create().unwrap(); - let store = EthStore::open(Box::new(dir)).unwrap(); - assert_eq!(store.accounts().unwrap().len(), 0); - assert!(store.insert_account(SecretVaultRef::Root, random_secret(), &"".into()).is_ok()); - assert_eq!(store.accounts().unwrap().len(), 1); - assert!(store.insert_account(SecretVaultRef::Root, random_secret(), &"".into()).is_ok()); - assert_eq!(store.accounts().unwrap().len(), 2); + let dir = TransientDir::create().unwrap(); + let store = EthStore::open(Box::new(dir)).unwrap(); + assert_eq!(store.accounts().unwrap().len(), 0); + assert!(store + .insert_account(SecretVaultRef::Root, random_secret(), &"".into()) + .is_ok()); + assert_eq!(store.accounts().unwrap().len(), 1); + assert!(store + .insert_account(SecretVaultRef::Root, random_secret(), &"".into()) + .is_ok()); + assert_eq!(store.accounts().unwrap().len(), 2); } #[test] fn secret_store_sign() { - let dir = TransientDir::create().unwrap(); - let store = EthStore::open(Box::new(dir)).unwrap(); - assert!(store.insert_account(SecretVaultRef::Root, random_secret(), &"".into()).is_ok()); - let accounts = store.accounts().unwrap(); - assert_eq!(accounts.len(), 1); - assert!(store.sign(&accounts[0], &"".into(), &Default::default()).is_ok()); - assert!(store.sign(&accounts[0], &"1".into(), &Default::default()).is_err()); + let dir = TransientDir::create().unwrap(); + let store = EthStore::open(Box::new(dir)).unwrap(); + assert!(store + .insert_account(SecretVaultRef::Root, random_secret(), &"".into()) + .is_ok()); + let accounts = store.accounts().unwrap(); + assert_eq!(accounts.len(), 1); + assert!(store + .sign(&accounts[0], &"".into(), &Default::default()) + .is_ok()); + assert!(store + .sign(&accounts[0], &"1".into(), &Default::default()) + .is_err()); } #[test] fn secret_store_change_password() { - let dir = TransientDir::create().unwrap(); - let store = EthStore::open(Box::new(dir)).unwrap(); - assert!(store.insert_account(SecretVaultRef::Root, random_secret(), &"".into()).is_ok()); - let accounts = store.accounts().unwrap(); - assert_eq!(accounts.len(), 1); - assert!(store.sign(&accounts[0], &"".into(), &Default::default()).is_ok()); - assert!(store.change_password(&accounts[0], &"".into(), &"1".into()).is_ok()); - assert!(store.sign(&accounts[0], &"".into(), &Default::default()).is_err()); - assert!(store.sign(&accounts[0], &"1".into(), &Default::default()).is_ok()); + let dir = TransientDir::create().unwrap(); + let store = EthStore::open(Box::new(dir)).unwrap(); + assert!(store + .insert_account(SecretVaultRef::Root, random_secret(), &"".into()) + .is_ok()); + let accounts = store.accounts().unwrap(); + assert_eq!(accounts.len(), 1); + assert!(store + .sign(&accounts[0], &"".into(), &Default::default()) + .is_ok()); + assert!(store + .change_password(&accounts[0], &"".into(), &"1".into()) + .is_ok()); + assert!(store + .sign(&accounts[0], &"".into(), &Default::default()) + .is_err()); + assert!(store + .sign(&accounts[0], &"1".into(), &Default::default()) + .is_ok()); } #[test] fn secret_store_remove_account() { - let dir = TransientDir::create().unwrap(); - let store = EthStore::open(Box::new(dir)).unwrap(); - assert!(store.insert_account(SecretVaultRef::Root, random_secret(), &"".into()).is_ok()); - let accounts = store.accounts().unwrap(); - assert_eq!(accounts.len(), 1); - assert!(store.remove_account(&accounts[0], &"".into()).is_ok()); - assert_eq!(store.accounts().unwrap().len(), 0); - assert!(store.remove_account(&accounts[0], &"".into()).is_err()); + let dir = TransientDir::create().unwrap(); + let store = EthStore::open(Box::new(dir)).unwrap(); + assert!(store + .insert_account(SecretVaultRef::Root, random_secret(), &"".into()) + .is_ok()); + let accounts = store.accounts().unwrap(); + assert_eq!(accounts.len(), 1); + assert!(store.remove_account(&accounts[0], &"".into()).is_ok()); + assert_eq!(store.accounts().unwrap().len(), 0); + assert!(store.remove_account(&accounts[0], &"".into()).is_err()); } fn test_path() -> &'static str { - match ::std::fs::metadata("ethstore") { - Ok(_) => "ethstore/tests/res/geth_keystore", - Err(_) => "tests/res/geth_keystore", - } + match ::std::fs::metadata("ethstore") { + Ok(_) => "ethstore/tests/res/geth_keystore", + Err(_) => "tests/res/geth_keystore", + } } fn pat_path() -> &'static str { - match ::std::fs::metadata("ethstore") { - Ok(_) => "ethstore/tests/res/pat", - Err(_) => "tests/res/pat", - } + match ::std::fs::metadata("ethstore") { + Ok(_) => "ethstore/tests/res/pat", + Err(_) => "tests/res/pat", + } } fn ciphertext_path() -> &'static str { - match ::std::fs::metadata("ethstore") { - Ok(_) => "ethstore/tests/res/ciphertext", - Err(_) => "tests/res/ciphertext", - } + match ::std::fs::metadata("ethstore") { + Ok(_) => "ethstore/tests/res/ciphertext", + Err(_) => "tests/res/ciphertext", + } } #[test] fn secret_store_laod_geth_files() { - let dir = RootDiskDirectory::at(test_path()); - let store = EthStore::open(Box::new(dir)).unwrap(); - assert_eq!(store.accounts().unwrap(), vec![ - StoreAccountRef::root("3f49624084b67849c7b4e805c5988c21a430f9d9".into()), - StoreAccountRef::root("5ba4dcf897e97c2bdf8315b9ef26c13c085988cf".into()), - StoreAccountRef::root("63121b431a52f8043c16fcf0d1df9cb7b5f66649".into()), - ]); + let dir = RootDiskDirectory::at(test_path()); + let store = EthStore::open(Box::new(dir)).unwrap(); + assert_eq!( + store.accounts().unwrap(), + vec![ + StoreAccountRef::root("3f49624084b67849c7b4e805c5988c21a430f9d9".into()), + StoreAccountRef::root("5ba4dcf897e97c2bdf8315b9ef26c13c085988cf".into()), + StoreAccountRef::root("63121b431a52f8043c16fcf0d1df9cb7b5f66649".into()), + ] + ); } #[test] fn secret_store_load_pat_files() { - let dir = RootDiskDirectory::at(pat_path()); - let store = EthStore::open(Box::new(dir)).unwrap(); - assert_eq!(store.accounts().unwrap(), vec![ - StoreAccountRef::root("3f49624084b67849c7b4e805c5988c21a430f9d9".into()), - StoreAccountRef::root("5ba4dcf897e97c2bdf8315b9ef26c13c085988cf".into()), - ]); + let dir = RootDiskDirectory::at(pat_path()); + let store = EthStore::open(Box::new(dir)).unwrap(); + assert_eq!( + store.accounts().unwrap(), + vec![ + StoreAccountRef::root("3f49624084b67849c7b4e805c5988c21a430f9d9".into()), + StoreAccountRef::root("5ba4dcf897e97c2bdf8315b9ef26c13c085988cf".into()), + ] + ); } #[test] fn test_decrypting_files_with_short_ciphertext() { - // 31e9d1e6d844bd3a536800ef8d8be6a9975db509, 30 - let kp1 = KeyPair::from_secret("000081c29e8142bb6a81bef5a92bda7a8328a5c85bb2f9542e76f9b0f94fc018".parse().unwrap()).unwrap(); - // d1e64e5480bfaf733ba7d48712decb8227797a4e , 31 - let kp2 = KeyPair::from_secret("00fa7b3db73dc7dfdf8c5fbdb796d741e4488628c41fc4febd9160a866ba0f35".parse().unwrap()).unwrap(); - let dir = RootDiskDirectory::at(ciphertext_path()); - let store = EthStore::open(Box::new(dir)).unwrap(); - let accounts = store.accounts().unwrap(); - assert_eq!(accounts, vec![ - StoreAccountRef::root("31e9d1e6d844bd3a536800ef8d8be6a9975db509".into()), - StoreAccountRef::root("d1e64e5480bfaf733ba7d48712decb8227797a4e".into()), - ]); + // 31e9d1e6d844bd3a536800ef8d8be6a9975db509, 30 + let kp1 = KeyPair::from_secret( + "000081c29e8142bb6a81bef5a92bda7a8328a5c85bb2f9542e76f9b0f94fc018" + .parse() + .unwrap(), + ) + .unwrap(); + // d1e64e5480bfaf733ba7d48712decb8227797a4e , 31 + let kp2 = KeyPair::from_secret( + "00fa7b3db73dc7dfdf8c5fbdb796d741e4488628c41fc4febd9160a866ba0f35" + .parse() + .unwrap(), + ) + .unwrap(); + let dir = RootDiskDirectory::at(ciphertext_path()); + let store = EthStore::open(Box::new(dir)).unwrap(); + let accounts = store.accounts().unwrap(); + assert_eq!( + accounts, + vec![ + StoreAccountRef::root("31e9d1e6d844bd3a536800ef8d8be6a9975db509".into()), + StoreAccountRef::root("d1e64e5480bfaf733ba7d48712decb8227797a4e".into()), + ] + ); - let message = Default::default(); + let message = Default::default(); - let s1 = store.sign(&accounts[0], &"foo".into(), &message).unwrap(); - let s2 = store.sign(&accounts[1], &"foo".into(), &message).unwrap(); - assert!(verify_address(&accounts[0].address, &s1, &message).unwrap()); - assert!(verify_address(&kp1.address(), &s1, &message).unwrap()); - assert!(verify_address(&kp2.address(), &s2, &message).unwrap()); + let s1 = store.sign(&accounts[0], &"foo".into(), &message).unwrap(); + let s2 = store.sign(&accounts[1], &"foo".into(), &message).unwrap(); + assert!(verify_address(&accounts[0].address, &s1, &message).unwrap()); + assert!(verify_address(&kp1.address(), &s1, &message).unwrap()); + assert!(verify_address(&kp2.address(), &s2, &message).unwrap()); } diff --git a/accounts/ethstore/tests/util/transient_dir.rs b/accounts/ethstore/tests/util/transient_dir.rs index 67511a9b9..6e4aa339d 100644 --- a/accounts/ethstore/tests/util/transient_dir.rs +++ b/accounts/ethstore/tests/util/transient_dir.rs @@ -14,68 +14,69 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::path::PathBuf; -use std::{env, fs}; -use rand::{Rng, OsRng}; -use ethstore::accounts_dir::{KeyDirectory, RootDiskDirectory}; -use ethstore::{Error, SafeAccount}; +use ethstore::{ + accounts_dir::{KeyDirectory, RootDiskDirectory}, + Error, SafeAccount, +}; +use rand::{OsRng, Rng}; +use std::{env, fs, path::PathBuf}; pub fn random_dir() -> PathBuf { - let mut rng = OsRng::new().unwrap(); - let mut dir = env::temp_dir(); - dir.push(format!("{:x}-{:x}", rng.next_u64(), rng.next_u64())); - dir + let mut rng = OsRng::new().unwrap(); + let mut dir = env::temp_dir(); + dir.push(format!("{:x}-{:x}", rng.next_u64(), rng.next_u64())); + dir } pub struct TransientDir { - dir: RootDiskDirectory, - path: PathBuf, + dir: RootDiskDirectory, + path: PathBuf, } impl TransientDir { - pub fn create() -> Result { - let path = random_dir(); - let result = TransientDir { - dir: RootDiskDirectory::create(&path)?, - path: path, - }; + pub fn create() -> Result { + let path = random_dir(); + let result = TransientDir { + dir: RootDiskDirectory::create(&path)?, + path: path, + }; - Ok(result) - } + Ok(result) + } - pub fn open() -> Self { - let path = random_dir(); - TransientDir { - dir: RootDiskDirectory::at(&path), - path: path, - } - } + pub fn open() -> Self { + let path = random_dir(); + TransientDir { + dir: RootDiskDirectory::at(&path), + path: path, + } + } } impl Drop for TransientDir { - fn drop(&mut self) { - fs::remove_dir_all(&self.path).expect("Expected to remove temp dir"); - } + fn drop(&mut self) { + fs::remove_dir_all(&self.path).expect("Expected to remove temp dir"); + } } impl KeyDirectory for TransientDir { - fn load(&self) -> Result, Error> { - self.dir.load() - } + fn load(&self) -> Result, Error> { + self.dir.load() + } - fn update(&self, account: SafeAccount) -> Result { - self.dir.update(account) - } + fn update(&self, account: SafeAccount) -> Result { + self.dir.update(account) + } - fn insert(&self, account: SafeAccount) -> Result { - self.dir.insert(account) - } + fn insert(&self, account: SafeAccount) -> Result { + self.dir.insert(account) + } - fn remove(&self, account: &SafeAccount) -> Result<(), Error> { - self.dir.remove(account) - } + fn remove(&self, account: &SafeAccount) -> Result<(), Error> { + self.dir.remove(account) + } - fn unique_repr(&self) -> Result { - self.dir.unique_repr() - } + fn unique_repr(&self) -> Result { + self.dir.unique_repr() + } } diff --git a/accounts/fake-hardware-wallet/src/lib.rs b/accounts/fake-hardware-wallet/src/lib.rs index d04590865..2f88d6e03 100644 --- a/accounts/fake-hardware-wallet/src/lib.rs +++ b/accounts/fake-hardware-wallet/src/lib.rs @@ -19,83 +19,89 @@ extern crate ethereum_types; extern crate ethkey; -use std::fmt; use ethereum_types::U256; use ethkey::{Address, Signature}; +use std::fmt; pub struct WalletInfo { - pub address: Address, - pub name: String, - pub manufacturer: String, + pub address: Address, + pub name: String, + pub manufacturer: String, } #[derive(Debug)] /// `ErrorType` for devices with no `hardware wallet` pub enum Error { - NoWallet, - KeyNotFound, + NoWallet, + KeyNotFound, } pub struct TransactionInfo { - /// Nonce - pub nonce: U256, - /// Gas price - pub gas_price: U256, - /// Gas limit - pub gas_limit: U256, - /// Receiver - pub to: Option
, - /// Value - pub value: U256, - /// Data - pub data: Vec, - /// Chain ID - pub chain_id: Option, + /// Nonce + pub nonce: U256, + /// Gas price + pub gas_price: U256, + /// Gas limit + pub gas_limit: U256, + /// Receiver + pub to: Option
, + /// Value + pub value: U256, + /// Data + pub data: Vec, + /// Chain ID + pub chain_id: Option, } pub enum KeyPath { - /// Ethereum. - Ethereum, - /// Ethereum classic. - EthereumClassic, + /// Ethereum. + Ethereum, + /// Ethereum classic. + EthereumClassic, } /// `HardwareWalletManager` for devices with no `hardware wallet` pub struct HardwareWalletManager; impl HardwareWalletManager { - pub fn new() -> Result { - Err(Error::NoWallet) - } + pub fn new() -> Result { + Err(Error::NoWallet) + } - pub fn set_key_path(&self, _key_path: KeyPath) {} + pub fn set_key_path(&self, _key_path: KeyPath) {} - pub fn wallet_info(&self, _: &Address) -> Option { - None - } + pub fn wallet_info(&self, _: &Address) -> Option { + None + } - pub fn list_wallets(&self) -> Vec { - Vec::with_capacity(0) - } + pub fn list_wallets(&self) -> Vec { + Vec::with_capacity(0) + } - pub fn list_locked_wallets(&self) -> Result, Error> { - Err(Error::NoWallet) - } + pub fn list_locked_wallets(&self) -> Result, Error> { + Err(Error::NoWallet) + } - pub fn pin_matrix_ack(&self, _: &str, _: &str) -> Result { - Err(Error::NoWallet) - } - - pub fn sign_transaction(&self, _address: &Address, _transaction: &TransactionInfo, _rlp_transaction: &[u8]) -> Result { - Err(Error::NoWallet) } - - pub fn sign_message(&self, _address: &Address, _msg: &[u8]) -> Result { - Err(Error::NoWallet) - } + pub fn pin_matrix_ack(&self, _: &str, _: &str) -> Result { + Err(Error::NoWallet) + } + + pub fn sign_transaction( + &self, + _address: &Address, + _transaction: &TransactionInfo, + _rlp_transaction: &[u8], + ) -> Result { + Err(Error::NoWallet) + } + + pub fn sign_message(&self, _address: &Address, _msg: &[u8]) -> Result { + Err(Error::NoWallet) + } } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "No hardware wallet!!") - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "No hardware wallet!!") + } } diff --git a/accounts/hw/src/ledger.rs b/accounts/hw/src/ledger.rs index 9bad48ec5..e2c50dc52 100644 --- a/accounts/hw/src/ledger.rs +++ b/accounts/hw/src/ledger.rs @@ -17,26 +17,31 @@ //! Ledger hardware wallet module. Supports Ledger Blue and Nano S. //! See for protocol details. -use std::cmp::min; -use std::str::FromStr; -use std::sync::Arc; -use std::time::{Duration, Instant}; -use std::fmt; +use std::{ + cmp::min, + fmt, + str::FromStr, + sync::Arc, + time::{Duration, Instant}, +}; -use ethereum_types::{H256, Address}; +use super::{is_valid_hid_device, Device, DeviceDirection, KeyPath, Wallet, WalletInfo}; +use ethereum_types::{Address, H256}; use ethkey::Signature; use hidapi; use libusb; use parking_lot::{Mutex, RwLock}; use semver::Version as FirmwareVersion; -use super::{WalletInfo, KeyPath, Device, DeviceDirection, Wallet, is_valid_hid_device}; const APDU_TAG: u8 = 0x05; const APDU_CLA: u8 = 0xe0; const APDU_PAYLOAD_HEADER_LEN: usize = 7; -const ETH_DERIVATION_PATH_BE: [u8; 17] = [4, 0x80, 0, 0, 44, 0x80, 0, 0, 60, 0x80, 0, 0, 0, 0, 0, 0, 0]; // 44'/60'/0'/0 -const ETC_DERIVATION_PATH_BE: [u8; 21] = [5, 0x80, 0, 0, 44, 0x80, 0, 0, 60, 0x80, 0x02, 0x73, 0xd0, 0x80, 0, 0, 0, 0, 0, 0, 0]; // 44'/60'/160720'/0'/0 +const ETH_DERIVATION_PATH_BE: [u8; 17] = + [4, 0x80, 0, 0, 44, 0x80, 0, 0, 60, 0x80, 0, 0, 0, 0, 0, 0, 0]; // 44'/60'/0'/0 +const ETC_DERIVATION_PATH_BE: [u8; 21] = [ + 5, 0x80, 0, 0, 44, 0x80, 0, 0, 60, 0x80, 0x02, 0x73, 0xd0, 0x80, 0, 0, 0, 0, 0, 0, 0, +]; // 44'/60'/160720'/0'/0 /// Ledger vendor ID const LEDGER_VID: u16 = 0x2c97; @@ -48,487 +53,579 @@ const MAX_CHUNK_SIZE: usize = 255; const HID_PACKET_SIZE: usize = 64 + HID_PREFIX_ZERO; -#[cfg(windows)] const HID_PREFIX_ZERO: usize = 1; -#[cfg(not(windows))] const HID_PREFIX_ZERO: usize = 0; +#[cfg(windows)] +const HID_PREFIX_ZERO: usize = 1; +#[cfg(not(windows))] +const HID_PREFIX_ZERO: usize = 0; mod commands { - pub const GET_APP_CONFIGURATION: u8 = 0x06; - pub const GET_ETH_PUBLIC_ADDRESS: u8 = 0x02; - pub const SIGN_ETH_TRANSACTION: u8 = 0x04; - pub const SIGN_ETH_PERSONAL_MESSAGE: u8 = 0x08; + pub const GET_APP_CONFIGURATION: u8 = 0x06; + pub const GET_ETH_PUBLIC_ADDRESS: u8 = 0x02; + pub const SIGN_ETH_TRANSACTION: u8 = 0x04; + pub const SIGN_ETH_PERSONAL_MESSAGE: u8 = 0x08; } /// Hardware wallet error. #[derive(Debug)] pub enum Error { - /// Ethereum wallet protocol error. - Protocol(&'static str), - /// Hidapi error. - Usb(hidapi::HidError), - /// Libusb error - LibUsb(libusb::Error), - /// Device with request key is not available. - KeyNotFound, - /// Signing has been cancelled by user. - UserCancel, - /// Impossible error - Impossible, - /// No device arrived - NoDeviceArrived, - /// No device left - NoDeviceLeft, - /// Invalid PID or VID - InvalidDevice, + /// Ethereum wallet protocol error. + Protocol(&'static str), + /// Hidapi error. + Usb(hidapi::HidError), + /// Libusb error + LibUsb(libusb::Error), + /// Device with request key is not available. + KeyNotFound, + /// Signing has been cancelled by user. + UserCancel, + /// Impossible error + Impossible, + /// No device arrived + NoDeviceArrived, + /// No device left + NoDeviceLeft, + /// Invalid PID or VID + InvalidDevice, } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - match *self { - Error::Protocol(ref s) => write!(f, "Ledger protocol error: {}", s), - Error::Usb(ref e) => write!(f, "USB communication error: {}", e), - Error::LibUsb(ref e) => write!(f, "LibUSB communication error: {}", e), - Error::KeyNotFound => write!(f, "Key not found"), - Error::UserCancel => write!(f, "Operation has been cancelled"), - Error::Impossible => write!(f, "Placeholder error"), - Error::NoDeviceArrived => write!(f, "No device arrived"), - Error::NoDeviceLeft=> write!(f, "No device left"), - Error::InvalidDevice => write!(f, "Device with non-supported product ID or vendor ID was detected"), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + match *self { + Error::Protocol(ref s) => write!(f, "Ledger protocol error: {}", s), + Error::Usb(ref e) => write!(f, "USB communication error: {}", e), + Error::LibUsb(ref e) => write!(f, "LibUSB communication error: {}", e), + Error::KeyNotFound => write!(f, "Key not found"), + Error::UserCancel => write!(f, "Operation has been cancelled"), + Error::Impossible => write!(f, "Placeholder error"), + Error::NoDeviceArrived => write!(f, "No device arrived"), + Error::NoDeviceLeft => write!(f, "No device left"), + Error::InvalidDevice => write!( + f, + "Device with non-supported product ID or vendor ID was detected" + ), + } + } } impl From for Error { - fn from(err: hidapi::HidError) -> Self { - Error::Usb(err) - } + fn from(err: hidapi::HidError) -> Self { + Error::Usb(err) + } } impl From for Error { - fn from(err: libusb::Error) -> Self { - Error::LibUsb(err) - } + fn from(err: libusb::Error) -> Self { + Error::LibUsb(err) + } } /// Ledger device manager. pub struct Manager { - usb: Arc>, - devices: RwLock>, - key_path: RwLock, + usb: Arc>, + devices: RwLock>, + key_path: RwLock, } impl Manager { - /// Create a new instance. - pub fn new(usb: Arc>) -> Arc { - Arc::new(Self { - usb, - devices: RwLock::new(Vec::new()), - key_path: RwLock::new(KeyPath::Ethereum), - }) - } + /// Create a new instance. + pub fn new(usb: Arc>) -> Arc { + Arc::new(Self { + usb, + devices: RwLock::new(Vec::new()), + key_path: RwLock::new(KeyPath::Ethereum), + }) + } - // Transport Protocol: - // * Communication Channel Id (2 bytes big endian ) - // * Command Tag (1 byte) - // * Packet Sequence ID (2 bytes big endian) - // * Payload (Optional) - // - // Payload - // * APDU Total Length (2 bytes big endian) - // * APDU_CLA (1 byte) - // * APDU_INS (1 byte) - // * APDU_P1 (1 byte) - // * APDU_P2 (1 byte) - // * APDU_LENGTH (1 byte) - // * APDU_Payload (Variable) - // - fn write(handle: &hidapi::HidDevice, command: u8, p1: u8, p2: u8, data: &[u8]) -> Result<(), Error> { - let data_len = data.len(); - let mut offset = 0; - let mut sequence_number = 0; - let mut hid_chunk = [0_u8; HID_PACKET_SIZE]; + // Transport Protocol: + // * Communication Channel Id (2 bytes big endian ) + // * Command Tag (1 byte) + // * Packet Sequence ID (2 bytes big endian) + // * Payload (Optional) + // + // Payload + // * APDU Total Length (2 bytes big endian) + // * APDU_CLA (1 byte) + // * APDU_INS (1 byte) + // * APDU_P1 (1 byte) + // * APDU_P2 (1 byte) + // * APDU_LENGTH (1 byte) + // * APDU_Payload (Variable) + // + fn write( + handle: &hidapi::HidDevice, + command: u8, + p1: u8, + p2: u8, + data: &[u8], + ) -> Result<(), Error> { + let data_len = data.len(); + let mut offset = 0; + let mut sequence_number = 0; + let mut hid_chunk = [0_u8; HID_PACKET_SIZE]; - while sequence_number == 0 || offset < data_len { - let header = if sequence_number == 0 { LEDGER_TRANSPORT_HEADER_LEN + APDU_PAYLOAD_HEADER_LEN } else { LEDGER_TRANSPORT_HEADER_LEN }; - let size = min(64 - header, data_len - offset); - { - let chunk = &mut hid_chunk[HID_PREFIX_ZERO..]; - chunk[0..5].copy_from_slice(&[0x01, 0x01, APDU_TAG, (sequence_number >> 8) as u8, (sequence_number & 0xff) as u8 ]); + while sequence_number == 0 || offset < data_len { + let header = if sequence_number == 0 { + LEDGER_TRANSPORT_HEADER_LEN + APDU_PAYLOAD_HEADER_LEN + } else { + LEDGER_TRANSPORT_HEADER_LEN + }; + let size = min(64 - header, data_len - offset); + { + let chunk = &mut hid_chunk[HID_PREFIX_ZERO..]; + chunk[0..5].copy_from_slice(&[ + 0x01, + 0x01, + APDU_TAG, + (sequence_number >> 8) as u8, + (sequence_number & 0xff) as u8, + ]); - if sequence_number == 0 { - let data_len = data.len() + 5; - chunk[5..12].copy_from_slice(&[(data_len >> 8) as u8, (data_len & 0xff) as u8, APDU_CLA, command, p1, p2, data.len() as u8]); - } + if sequence_number == 0 { + let data_len = data.len() + 5; + chunk[5..12].copy_from_slice(&[ + (data_len >> 8) as u8, + (data_len & 0xff) as u8, + APDU_CLA, + command, + p1, + p2, + data.len() as u8, + ]); + } - chunk[header..header + size].copy_from_slice(&data[offset..offset + size]); - } - trace!(target: "hw", "Ledger write {:?}", &hid_chunk[..]); - let n = handle.write(&hid_chunk[..])?; - if n < size + header { - return Err(Error::Protocol("Write data size mismatch")); - } - offset += size; - sequence_number += 1; - if sequence_number >= 0xffff { - return Err(Error::Protocol("Maximum sequence number reached")); - } - } - Ok(()) - } + chunk[header..header + size].copy_from_slice(&data[offset..offset + size]); + } + trace!(target: "hw", "Ledger write {:?}", &hid_chunk[..]); + let n = handle.write(&hid_chunk[..])?; + if n < size + header { + return Err(Error::Protocol("Write data size mismatch")); + } + offset += size; + sequence_number += 1; + if sequence_number >= 0xffff { + return Err(Error::Protocol("Maximum sequence number reached")); + } + } + Ok(()) + } - // Transport Protocol: - // * Communication Channel Id (2 bytes big endian ) - // * Command Tag (1 byte) - // * Packet Sequence ID (2 bytes big endian) - // * Payload (Optional) - // - // Payload - // * APDU Total Length (2 bytes big endian) - // * APDU_CLA (1 byte) - // * APDU_INS (1 byte) - // * APDU_P1 (1 byte) - // * APDU_P2 (1 byte) - // * APDU_LENGTH (1 byte) - // * APDU_Payload (Variable) - // - fn read(handle: &hidapi::HidDevice) -> Result, Error> { - let mut message_size = 0; - let mut message = Vec::new(); + // Transport Protocol: + // * Communication Channel Id (2 bytes big endian ) + // * Command Tag (1 byte) + // * Packet Sequence ID (2 bytes big endian) + // * Payload (Optional) + // + // Payload + // * APDU Total Length (2 bytes big endian) + // * APDU_CLA (1 byte) + // * APDU_INS (1 byte) + // * APDU_P1 (1 byte) + // * APDU_P2 (1 byte) + // * APDU_LENGTH (1 byte) + // * APDU_Payload (Variable) + // + fn read(handle: &hidapi::HidDevice) -> Result, Error> { + let mut message_size = 0; + let mut message = Vec::new(); - // terminate the loop if `sequence_number` reaches its max_value and report error - for chunk_index in 0..=0xffff { - let mut chunk: [u8; HID_PACKET_SIZE] = [0; HID_PACKET_SIZE]; - let chunk_size = handle.read(&mut chunk)?; - trace!(target: "hw", "Ledger read {:?}", &chunk[..]); - if chunk_size < LEDGER_TRANSPORT_HEADER_LEN || chunk[0] != 0x01 || chunk[1] != 0x01 || chunk[2] != APDU_TAG { - return Err(Error::Protocol("Unexpected chunk header")); - } - let seq = (chunk[3] as usize) << 8 | (chunk[4] as usize); - if seq != chunk_index { - return Err(Error::Protocol("Unexpected chunk header")); - } + // terminate the loop if `sequence_number` reaches its max_value and report error + for chunk_index in 0..=0xffff { + let mut chunk: [u8; HID_PACKET_SIZE] = [0; HID_PACKET_SIZE]; + let chunk_size = handle.read(&mut chunk)?; + trace!(target: "hw", "Ledger read {:?}", &chunk[..]); + if chunk_size < LEDGER_TRANSPORT_HEADER_LEN + || chunk[0] != 0x01 + || chunk[1] != 0x01 + || chunk[2] != APDU_TAG + { + return Err(Error::Protocol("Unexpected chunk header")); + } + let seq = (chunk[3] as usize) << 8 | (chunk[4] as usize); + if seq != chunk_index { + return Err(Error::Protocol("Unexpected chunk header")); + } - let mut offset = 5; - if seq == 0 { - // Read message size and status word. - if chunk_size < 7 { - return Err(Error::Protocol("Unexpected chunk header")); - } - message_size = (chunk[5] as usize) << 8 | (chunk[6] as usize); - offset += 2; - } - message.extend_from_slice(&chunk[offset..chunk_size]); - message.truncate(message_size); - if message.len() == message_size { - break; - } - } - if message.len() < 2 { - return Err(Error::Protocol("No status word")); - } - let status = (message[message.len() - 2] as usize) << 8 | (message[message.len() - 1] as usize); - debug!(target: "hw", "Read status {:x}", status); - match status { - 0x6700 => Err(Error::Protocol("Incorrect length")), - 0x6982 => Err(Error::Protocol("Security status not satisfied (Canceled by user)")), - 0x6a80 => Err(Error::Protocol("Invalid data")), - 0x6a82 => Err(Error::Protocol("File not found")), - 0x6a85 => Err(Error::UserCancel), - 0x6b00 => Err(Error::Protocol("Incorrect parameters")), - 0x6d00 => Err(Error::Protocol("Not implemented. Make sure the Ledger Ethereum Wallet app is running.")), - 0x6faa => Err(Error::Protocol("Your Ledger need to be unplugged")), - 0x6f00...0x6fff => Err(Error::Protocol("Internal error")), - 0x9000 => Ok(()), - _ => Err(Error::Protocol("Unknown error")), + let mut offset = 5; + if seq == 0 { + // Read message size and status word. + if chunk_size < 7 { + return Err(Error::Protocol("Unexpected chunk header")); + } + message_size = (chunk[5] as usize) << 8 | (chunk[6] as usize); + offset += 2; + } + message.extend_from_slice(&chunk[offset..chunk_size]); + message.truncate(message_size); + if message.len() == message_size { + break; + } + } + if message.len() < 2 { + return Err(Error::Protocol("No status word")); + } + let status = + (message[message.len() - 2] as usize) << 8 | (message[message.len() - 1] as usize); + debug!(target: "hw", "Read status {:x}", status); + match status { + 0x6700 => Err(Error::Protocol("Incorrect length")), + 0x6982 => Err(Error::Protocol( + "Security status not satisfied (Canceled by user)", + )), + 0x6a80 => Err(Error::Protocol("Invalid data")), + 0x6a82 => Err(Error::Protocol("File not found")), + 0x6a85 => Err(Error::UserCancel), + 0x6b00 => Err(Error::Protocol("Incorrect parameters")), + 0x6d00 => Err(Error::Protocol( + "Not implemented. Make sure the Ledger Ethereum Wallet app is running.", + )), + 0x6faa => Err(Error::Protocol("Your Ledger need to be unplugged")), + 0x6f00...0x6fff => Err(Error::Protocol("Internal error")), + 0x9000 => Ok(()), + _ => Err(Error::Protocol("Unknown error")), + }?; + let new_len = message.len() - 2; + message.truncate(new_len); + Ok(message) + } - }?; - let new_len = message.len() - 2; - message.truncate(new_len); - Ok(message) - } + fn send_apdu( + handle: &hidapi::HidDevice, + command: u8, + p1: u8, + p2: u8, + data: &[u8], + ) -> Result, Error> { + Self::write(&handle, command, p1, p2, data)?; + Self::read(&handle) + } - fn send_apdu(handle: &hidapi::HidDevice, command: u8, p1: u8, p2: u8, data: &[u8]) -> Result, Error> { - Self::write(&handle, command, p1, p2, data)?; - Self::read(&handle) - } + fn get_firmware_version(handle: &hidapi::HidDevice) -> Result { + let ver = Self::send_apdu(&handle, commands::GET_APP_CONFIGURATION, 0, 0, &[])?; + if ver.len() != 4 { + return Err(Error::Protocol("Version packet size mismatch")); + } + Ok(FirmwareVersion::new( + ver[1].into(), + ver[2].into(), + ver[3].into(), + )) + } - fn get_firmware_version(handle: &hidapi::HidDevice) -> Result { - let ver = Self::send_apdu(&handle, commands::GET_APP_CONFIGURATION, 0, 0, &[])?; - if ver.len() != 4 { - return Err(Error::Protocol("Version packet size mismatch")); - } - Ok(FirmwareVersion::new(ver[1].into(), ver[2].into(), ver[3].into())) - } + fn get_derivation_path(&self) -> &[u8] { + match *self.key_path.read() { + KeyPath::Ethereum => Ð_DERIVATION_PATH_BE, + KeyPath::EthereumClassic => &ETC_DERIVATION_PATH_BE, + } + } - fn get_derivation_path(&self) -> &[u8] { - match *self.key_path.read() { - KeyPath::Ethereum => Ð_DERIVATION_PATH_BE, - KeyPath::EthereumClassic => &ETC_DERIVATION_PATH_BE, - } - } + fn signer_helper( + &self, + address: &Address, + data: &[u8], + command: u8, + ) -> Result { + let usb = self.usb.lock(); + let devices = self.devices.read(); + let device = devices + .iter() + .find(|d| &d.info.address == address) + .ok_or(Error::KeyNotFound)?; + let handle = self.open_path(|| usb.open_path(&device.path))?; - fn signer_helper(&self, address: &Address, data: &[u8], command: u8) -> Result { - let usb = self.usb.lock(); - let devices = self.devices.read(); - let device = devices.iter().find(|d| &d.info.address == address).ok_or(Error::KeyNotFound)?; - let handle = self.open_path(|| usb.open_path(&device.path))?; + // Signing personal messages are only support by Ledger firmware version 1.0.8 or newer + if command == commands::SIGN_ETH_PERSONAL_MESSAGE { + let version = Self::get_firmware_version(&handle)?; + if version < FirmwareVersion::new(1, 0, 8) { + return Err(Error::Protocol( + "Signing personal messages with Ledger requires version 1.0.8", + )); + } + } - // Signing personal messages are only support by Ledger firmware version 1.0.8 or newer - if command == commands::SIGN_ETH_PERSONAL_MESSAGE { - let version = Self::get_firmware_version(&handle)?; - if version < FirmwareVersion::new(1, 0, 8) { - return Err(Error::Protocol("Signing personal messages with Ledger requires version 1.0.8")); - } - } + let mut chunk = [0_u8; MAX_CHUNK_SIZE]; + let derivation_path = self.get_derivation_path(); - let mut chunk= [0_u8; MAX_CHUNK_SIZE]; - let derivation_path = self.get_derivation_path(); + // Copy the address of the key (only done once) + chunk[0..derivation_path.len()].copy_from_slice(derivation_path); - // Copy the address of the key (only done once) - chunk[0..derivation_path.len()].copy_from_slice(derivation_path); + let key_length = derivation_path.len(); + let max_payload_size = MAX_CHUNK_SIZE - key_length; + let data_len = data.len(); - let key_length = derivation_path.len(); - let max_payload_size = MAX_CHUNK_SIZE - key_length; - let data_len = data.len(); + let mut result = Vec::new(); + let mut offset = 0; - let mut result = Vec::new(); - let mut offset = 0; + while offset < data_len { + let p1 = if offset == 0 { 0 } else { 0x80 }; + let take = min(max_payload_size, data_len - offset); - while offset < data_len { - let p1 = if offset == 0 { 0 } else { 0x80 }; - let take = min(max_payload_size, data_len - offset); + // Fetch piece of data and copy it! + { + let (_key, d) = &mut chunk.split_at_mut(key_length); + let (dst, _rem) = &mut d.split_at_mut(take); + dst.copy_from_slice(&data[offset..(offset + take)]); + } - // Fetch piece of data and copy it! - { - let (_key, d) = &mut chunk.split_at_mut(key_length); - let (dst, _rem) = &mut d.split_at_mut(take); - dst.copy_from_slice(&data[offset..(offset + take)]); - } + result = Self::send_apdu(&handle, command, p1, 0, &chunk[0..(key_length + take)])?; + offset += take; + } - result = Self::send_apdu(&handle, command, p1, 0, &chunk[0..(key_length + take)])?; - offset += take; - } + if result.len() != 65 { + return Err(Error::Protocol("Signature packet size mismatch")); + } + let v = (result[0] + 1) % 2; + let r = H256::from_slice(&result[1..33]); + let s = H256::from_slice(&result[33..65]); + Ok(Signature::from_rsv(&r, &s, v)) + } - if result.len() != 65 { - return Err(Error::Protocol("Signature packet size mismatch")); - } - let v = (result[0] + 1) % 2; - let r = H256::from_slice(&result[1..33]); - let s = H256::from_slice(&result[33..65]); - Ok(Signature::from_rsv(&r, &s, v)) - } - - pub fn sign_message(&self, address: &Address, msg: &[u8]) -> Result { - self.signer_helper(address, msg, commands::SIGN_ETH_PERSONAL_MESSAGE) - } + pub fn sign_message(&self, address: &Address, msg: &[u8]) -> Result { + self.signer_helper(address, msg, commands::SIGN_ETH_PERSONAL_MESSAGE) + } } impl<'a> Wallet<'a> for Manager { - type Error = Error; - type Transaction = &'a [u8]; + type Error = Error; + type Transaction = &'a [u8]; - fn sign_transaction(&self, address: &Address, transaction: Self::Transaction) -> Result { - self.signer_helper(address, transaction, commands::SIGN_ETH_TRANSACTION) - } + fn sign_transaction( + &self, + address: &Address, + transaction: Self::Transaction, + ) -> Result { + self.signer_helper(address, transaction, commands::SIGN_ETH_TRANSACTION) + } - fn set_key_path(&self, key_path: KeyPath) { - *self.key_path.write() = key_path; - } + fn set_key_path(&self, key_path: KeyPath) { + *self.key_path.write() = key_path; + } - fn update_devices(&self, device_direction: DeviceDirection) -> Result { - let mut usb = self.usb.lock(); - usb.refresh_devices(); - let devices = usb.devices(); - let num_prev_devices = self.devices.read().len(); + fn update_devices(&self, device_direction: DeviceDirection) -> Result { + let mut usb = self.usb.lock(); + usb.refresh_devices(); + let devices = usb.devices(); + let num_prev_devices = self.devices.read().len(); - // Sometimes when a ledger is connected at run-time with no other devices connected it will case a `disconnected` event. - // To work around this, ignore such spurious events and poll a couple of extra times in order to get the correct state. - if DeviceDirection::Left == device_direction && num_prev_devices == 0 { - return Err(Error::NoDeviceArrived); - } + // Sometimes when a ledger is connected at run-time with no other devices connected it will case a `disconnected` event. + // To work around this, ignore such spurious events and poll a couple of extra times in order to get the correct state. + if DeviceDirection::Left == device_direction && num_prev_devices == 0 { + return Err(Error::NoDeviceArrived); + } - let detected_devices = devices.iter() - .filter(|&d| is_valid_ledger(d.vendor_id, d.product_id) && - is_valid_hid_device(d.usage_page, d.interface_number) - ) - .fold(Vec::new(), |mut v, d| { - match self.read_device(&usb, &d) { - Ok(info) => { - trace!(target: "hw", "Found device: {:?}", info); - v.push(info); - } - Err(e) => trace!(target: "hw", "Error reading device info: {}", e), - }; - v - }); + let detected_devices = devices + .iter() + .filter(|&d| { + is_valid_ledger(d.vendor_id, d.product_id) + && is_valid_hid_device(d.usage_page, d.interface_number) + }) + .fold(Vec::new(), |mut v, d| { + match self.read_device(&usb, &d) { + Ok(info) => { + trace!(target: "hw", "Found device: {:?}", info); + v.push(info); + } + Err(e) => trace!(target: "hw", "Error reading device info: {}", e), + }; + v + }); - let num_curr_devices = detected_devices.len(); - *self.devices.write() = detected_devices; + let num_curr_devices = detected_devices.len(); + *self.devices.write() = detected_devices; - match device_direction { - DeviceDirection::Arrived => { - if num_curr_devices > num_prev_devices { - Ok(num_curr_devices - num_prev_devices) - } else { - Err(Error::NoDeviceArrived) - } - } - DeviceDirection::Left => { - if num_prev_devices > num_curr_devices { - Ok(num_prev_devices - num_curr_devices) - } else { - Err(Error::NoDeviceLeft) - } - } - } - } + match device_direction { + DeviceDirection::Arrived => { + if num_curr_devices > num_prev_devices { + Ok(num_curr_devices - num_prev_devices) + } else { + Err(Error::NoDeviceArrived) + } + } + DeviceDirection::Left => { + if num_prev_devices > num_curr_devices { + Ok(num_prev_devices - num_curr_devices) + } else { + Err(Error::NoDeviceLeft) + } + } + } + } - fn read_device(&self, usb: &hidapi::HidApi, dev_info: &hidapi::HidDeviceInfo) -> Result { - let handle = self.open_path(|| usb.open_path(&dev_info.path))?; - let manufacturer = dev_info.manufacturer_string.clone().unwrap_or_else(|| "Unknown".to_owned()); - let name = dev_info.product_string.clone().unwrap_or_else(|| "Unknown".to_owned()); - let serial = dev_info.serial_number.clone().unwrap_or_else(|| "Unknown".to_owned()); - match self.get_address(&handle) { - Ok(Some(addr)) => { - Ok(Device { - path: dev_info.path.clone(), - info: WalletInfo { - name, - manufacturer, - serial, - address: addr, - }, - }) - } - // This variant is not possible, but the trait forces this return type - Ok(None) => Err(Error::Impossible), - Err(e) => Err(e), - } - } + fn read_device( + &self, + usb: &hidapi::HidApi, + dev_info: &hidapi::HidDeviceInfo, + ) -> Result { + let handle = self.open_path(|| usb.open_path(&dev_info.path))?; + let manufacturer = dev_info + .manufacturer_string + .clone() + .unwrap_or_else(|| "Unknown".to_owned()); + let name = dev_info + .product_string + .clone() + .unwrap_or_else(|| "Unknown".to_owned()); + let serial = dev_info + .serial_number + .clone() + .unwrap_or_else(|| "Unknown".to_owned()); + match self.get_address(&handle) { + Ok(Some(addr)) => Ok(Device { + path: dev_info.path.clone(), + info: WalletInfo { + name, + manufacturer, + serial, + address: addr, + }, + }), + // This variant is not possible, but the trait forces this return type + Ok(None) => Err(Error::Impossible), + Err(e) => Err(e), + } + } - fn list_devices(&self) -> Vec { - self.devices.read().iter().map(|d| d.info.clone()).collect() - } + fn list_devices(&self) -> Vec { + self.devices.read().iter().map(|d| d.info.clone()).collect() + } - // Not used because it is not supported by Ledger - fn list_locked_devices(&self) -> Vec { - vec![] - } + // Not used because it is not supported by Ledger + fn list_locked_devices(&self) -> Vec { + vec![] + } - fn get_wallet(&self, address: &Address) -> Option { - self.devices.read().iter().find(|d| &d.info.address == address).map(|d| d.info.clone()) - } + fn get_wallet(&self, address: &Address) -> Option { + self.devices + .read() + .iter() + .find(|d| &d.info.address == address) + .map(|d| d.info.clone()) + } - fn get_address(&self, device: &hidapi::HidDevice) -> Result, Self::Error> { - let ledger_version = Self::get_firmware_version(&device)?; - if ledger_version < FirmwareVersion::new(1, 0, 3) { - return Err(Error::Protocol("Ledger version 1.0.3 is required")); - } + fn get_address(&self, device: &hidapi::HidDevice) -> Result, Self::Error> { + let ledger_version = Self::get_firmware_version(&device)?; + if ledger_version < FirmwareVersion::new(1, 0, 3) { + return Err(Error::Protocol("Ledger version 1.0.3 is required")); + } - let derivation_path = self.get_derivation_path(); + let derivation_path = self.get_derivation_path(); - let key_and_address = Self::send_apdu(device, commands::GET_ETH_PUBLIC_ADDRESS, 0, 0, derivation_path)?; - if key_and_address.len() != 107 { // 1 + 65 PK + 1 + 40 Addr (ascii-hex) - return Err(Error::Protocol("Key packet size mismatch")); - } - let address_string = ::std::str::from_utf8(&key_and_address[67..107]) - .map_err(|_| Error::Protocol("Invalid address string"))?; + let key_and_address = Self::send_apdu( + device, + commands::GET_ETH_PUBLIC_ADDRESS, + 0, + 0, + derivation_path, + )?; + if key_and_address.len() != 107 { + // 1 + 65 PK + 1 + 40 Addr (ascii-hex) + return Err(Error::Protocol("Key packet size mismatch")); + } + let address_string = ::std::str::from_utf8(&key_and_address[67..107]) + .map_err(|_| Error::Protocol("Invalid address string"))?; - let address = Address::from_str(&address_string) - .map_err(|_| Error::Protocol("Invalid address string"))?; + let address = Address::from_str(&address_string) + .map_err(|_| Error::Protocol("Invalid address string"))?; - Ok(Some(address)) - } + Ok(Some(address)) + } - fn open_path(&self, f: F) -> Result - where F: Fn() -> Result - { - f().map_err(Into::into) - } + fn open_path(&self, f: F) -> Result + where + F: Fn() -> Result, + { + f().map_err(Into::into) + } } /// Check if the detected device is a valid `Ledger device` by checking both the product ID and the vendor ID pub fn is_valid_ledger(vendor_id: u16, product_id: u16) -> bool { - vendor_id == LEDGER_VID && LEDGER_PIDS.contains(&product_id) + vendor_id == LEDGER_VID && LEDGER_PIDS.contains(&product_id) } /// Poll the device in maximum `max_polling_duration` if it doesn't succeed -pub fn try_connect_polling(ledger: &Manager, max_polling_duration: &Duration, device_direction: DeviceDirection) -> bool { - let start_time = Instant::now(); - while start_time.elapsed() <= *max_polling_duration { - if let Ok(num_devices) = ledger.update_devices(device_direction) { - trace!(target: "hw", "{} number of Ledger(s) {}", num_devices, device_direction); - return true; - } - } - false +pub fn try_connect_polling( + ledger: &Manager, + max_polling_duration: &Duration, + device_direction: DeviceDirection, +) -> bool { + let start_time = Instant::now(); + while start_time.elapsed() <= *max_polling_duration { + if let Ok(num_devices) = ledger.update_devices(device_direction) { + trace!(target: "hw", "{} number of Ledger(s) {}", num_devices, device_direction); + return true; + } + } + false } #[cfg(test)] mod tests { - use rustc_hex::FromHex; - use super::*; - use ::HardwareWalletManager; + use super::*; + use rustc_hex::FromHex; + use HardwareWalletManager; - /// This test can't be run without an actual ledger device connected with the `Ledger Wallet Ethereum application` running - #[test] - #[ignore] - fn sign_personal_message() { - let manager = HardwareWalletManager::new().unwrap(); + /// This test can't be run without an actual ledger device connected with the `Ledger Wallet Ethereum application` running + #[test] + #[ignore] + fn sign_personal_message() { + let manager = HardwareWalletManager::new().unwrap(); - let ledger = &manager.ledger; + let ledger = &manager.ledger; - // Update device list - ledger.update_devices(DeviceDirection::Arrived).expect("No Ledger found, make sure you have a unlocked Ledger connected with the Ledger Wallet Ethereum running"); + // Update device list + ledger.update_devices(DeviceDirection::Arrived).expect("No Ledger found, make sure you have a unlocked Ledger connected with the Ledger Wallet Ethereum running"); - // Fetch the ethereum address of a connected ledger device - let address = ledger.list_devices() - .iter() - .filter(|d| d.manufacturer == "Ledger".to_string()) - .nth(0) - .map(|d| d.address.clone()) - .expect("No ledger device detected"); + // Fetch the ethereum address of a connected ledger device + let address = ledger + .list_devices() + .iter() + .filter(|d| d.manufacturer == "Ledger".to_string()) + .nth(0) + .map(|d| d.address.clone()) + .expect("No ledger device detected"); - // 44 bytes transaction - let tx = FromHex::from_hex("eb018504a817c80082520894a6ca2e6707f2cc189794a9dd459d5b05ed1bcd1c8703f26fcfb7a22480018080").unwrap(); - let signature = ledger.sign_transaction(&address, &tx); - assert!(signature.is_ok()); - } + // 44 bytes transaction + let tx = FromHex::from_hex("eb018504a817c80082520894a6ca2e6707f2cc189794a9dd459d5b05ed1bcd1c8703f26fcfb7a22480018080").unwrap(); + let signature = ledger.sign_transaction(&address, &tx); + assert!(signature.is_ok()); + } - /// This test can't be run without an actual ledger device connected with the `Ledger Wallet Ethereum application` running - #[test] - #[ignore] - fn smoke() { - let manager = HardwareWalletManager::new().unwrap(); - let ledger = &manager.ledger; + /// This test can't be run without an actual ledger device connected with the `Ledger Wallet Ethereum application` running + #[test] + #[ignore] + fn smoke() { + let manager = HardwareWalletManager::new().unwrap(); + let ledger = &manager.ledger; - // Update device list - ledger.update_devices(DeviceDirection::Arrived).expect("No Ledger found, make sure you have a unlocked Ledger connected with the Ledger Wallet Ethereum running"); + // Update device list + ledger.update_devices(DeviceDirection::Arrived).expect("No Ledger found, make sure you have a unlocked Ledger connected with the Ledger Wallet Ethereum running"); - // Fetch the ethereum address of a connected ledger device - let address = ledger.list_devices() - .iter() - .filter(|d| d.manufacturer == "Ledger".to_string()) - .nth(0) - .map(|d| d.address) - .expect("No ledger device detected"); + // Fetch the ethereum address of a connected ledger device + let address = ledger + .list_devices() + .iter() + .filter(|d| d.manufacturer == "Ledger".to_string()) + .nth(0) + .map(|d| d.address) + .expect("No ledger device detected"); - // 44 bytes transaction - let tx = FromHex::from_hex("eb018504a817c80082520894a6ca2e6707f2cc189794a9dd459d5b05ed1bcd1c8703f26fcfb7a22480018080").unwrap(); - let signature = ledger.sign_transaction(&address, &tx); - println!("Got {:?}", signature); - assert!(signature.is_ok()); + // 44 bytes transaction + let tx = FromHex::from_hex("eb018504a817c80082520894a6ca2e6707f2cc189794a9dd459d5b05ed1bcd1c8703f26fcfb7a22480018080").unwrap(); + let signature = ledger.sign_transaction(&address, &tx); + println!("Got {:?}", signature); + assert!(signature.is_ok()); - // 218 bytes transaction - let large_tx = FromHex::from_hex("f86b028511cfc15d00825208940975ca9f986eee35f5cbba2d672ad9bc8d2a08448766c92c5cf830008026a0d2b0d401b543872d2a6a50de92455decbb868440321bf63a13b310c069e2ba5ba03c6d51bcb2e1653be86546b87f8a12ddb45b6d4e568420299b96f64c19701040f86b028511cfc15d00825208940975ca9f986eee35f5cbba2d672ad9bc8d2a08448766c92c5cf830008026a0d2b0d401b543872d2a6a50de92455decbb868440321bf63a13b310c069e2ba5ba03c6d51bcb2e1653be86546b87f8a12ddb45b6d4e568420299b96f64c19701040").unwrap(); - let signature = ledger.sign_transaction(&address, &large_tx); - println!("Got {:?}", signature); - assert!(signature.is_ok()); + // 218 bytes transaction + let large_tx = FromHex::from_hex("f86b028511cfc15d00825208940975ca9f986eee35f5cbba2d672ad9bc8d2a08448766c92c5cf830008026a0d2b0d401b543872d2a6a50de92455decbb868440321bf63a13b310c069e2ba5ba03c6d51bcb2e1653be86546b87f8a12ddb45b6d4e568420299b96f64c19701040f86b028511cfc15d00825208940975ca9f986eee35f5cbba2d672ad9bc8d2a08448766c92c5cf830008026a0d2b0d401b543872d2a6a50de92455decbb868440321bf63a13b310c069e2ba5ba03c6d51bcb2e1653be86546b87f8a12ddb45b6d4e568420299b96f64c19701040").unwrap(); + let signature = ledger.sign_transaction(&address, &large_tx); + println!("Got {:?}", signature); + assert!(signature.is_ok()); - // 36206 bytes transaction (You need to confirm many transaction on your `Ledger` for this) - let huge_tx = FromHex::from_hex("f86b028511cfc15d00825208940975ca9f986eee35f5cbba2d672ad9bc8d2a08448766c92c5cf830008026a0d2b0d401b543872d2a6a50de92455decbb868440321bf63a13b310c069e2ba5ba03c6d51bcb2e1653be86546b87f8a12ddb45b6d4e568420299b96f64c19701040f86b028511cfc15d00825208940975ca9f986eee35f5cbba2d672ad9bc8d2a08448766c92c5cf830008026a0d2b0d401b543872d2a6a50de92455decbb868440321bf63a13b310c069e2ba5ba03c6d51bcb2e1653be86546b87f8a12ddb45b6d4e568420299b96f64c1970104000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7cd58ab9190c2792714ab06df5b67e66d9e3873eed251d7beb4fa252d6fed6a0ab1e5fabd284f40878d38f6e63d72eec55c6e1aa8d79c06adf714e3523a1f83da763f4bcc9d34424aba82981534066379c1cba244352042de13168556be761f8b1000807b6a6cd340b97a93cd850ee54335b1043bac153c1b0736a88919bb1a21d6befba34d9af51a9b3eb39164c64fe88efe62f136d0bc83cad1f963aec6344b9e406f7381ad2462dcf1434c90c426ee907e6a05abe39c2b36d1dfb966bcf5a4de5af9f07819256357489365c96b21d92a103a776b656fc10ad1083cf679d240bf09bf2eb7635d7bfa969ce7fbb4e0cd5835f79ca9f5583e3a9eca219fab2f773d9c7e838a7a9ef8755dc22e4880367c2b5e40795fe526fc5d1461e50d5cb053e001206460fc6617a38499db525112a7edde38b9547853ad6e5ab359233611148f196501deafae414acde9df81efd7c4144b8fd27f63ac252ecede9609b3f9e634ae95c13058ad2b4529bbb07b5d7ac567c2da994084c3c73ef7c453fc139fcdb3939461da5bf0fa3f2a83517463d02b903af5d845929cf12c9a1479f6801f20085887a94d72814671dac994e14b2faa3251d465ce16d855f33259d94fcc9553b25b488d5c45fe74de60c303bc75bcdde9374ca268767f5767638d1aec5f6f95cab8e9e27b9a80ddf3dbbe24f790debd9e3baa30145d499dd1afb5662a11788b1bb3dedc1ebc5eff9641fa6918d958e4738bae3854e4cd43f9173cd4c9c821190ec287c18035a530c2dc63077d292b3a35b3756ba9e08295a02e37d332552f9f4fdbb945df004aa5b072f9f0e9fc2e4ed6fe455d95b003e5e593dcbfad0b3b47aa855b34008e0e9a2e1cc23b975a3e6808be59dcaa8a87145c1d5183c799d06100d500227e6a758757b4f7d042b3485aa0ce5e91b2b2e67d3cfdf1c226b7ab90e40f0a0d30cbbf425f495bd5a80202909ad419745a59210e2c42a1846e656f67a764ee307abbd76fbb0c99a702253b7a753c3b93e974881f3c97987856b57449e92ffa759da041a2acac59ea2d53836098196355ae0aa2a185dbb002a67c1a278a6032f156bc1e6d7f4ff6c674126af272fdfd1dcd6a810f42878164f1c7ae346b0dd91b678b363d0e33f4b81f2d7cc14da555dcbe4b9f80ac0fed6265a6ecce278888c9794373dcb0d20aa811a9fe9864fab25eaf12764bb2f1a68cd8756cd0b3583f6e5ec74ca5c327b3f6599fa9ec32ccd1831ae323689ef4a1b1a587cbbd2120e0bb8e59f9fc87d93e0365eb36557be6c45c30c1baeba33cdaa877a87e51fd70f2b5521078607d012d65f1fcca8051a01004a6d10f662dfa6445b2ac015cb3ce8fde56bbff93f5d620171e638c6e05504c2aeeeb74c7667aee1709846cb84d345a011c21c1b4e3fd09774ab4dcc63bda04bb0f4fc49d6145d202d807cc2d8eab29b3babe15e53a3656daf0b022ac37513f77660d43d60bdd3e882eef239bfe13dba2e12707733d56e49f638005e06019a7335d8184f1039ab18084de896a946c23045e5c164dc9d32f2f227c89f717a87d1243516b922e5f270c751f1bdb2b1d3a38a15b18a7b8b7e0818573f31320d496e14a348f979b7606c5124e007493f2f40c931f68e3483a46ab2b853a90bd38ae85e6252fece6fd36f7dad0d07b6763d8001a0d6abee62452904f979cc52fa15001b06eef08f17d6e16d493d227ce9277392337a1c71713603e03803d38d1c24184b52049bc029f4f00b22d2acdef91c776a74aa184cc84b0e764f463ed05c2e16a7a0dcb6c27dd4aeca8aeac1545b48896775ba3fe9de4ea36e946d8f4ec16ca7ae58165e8ddc9189d5cc569888a59733529add4b213ea5c00ad3ed3709c0175b542513c90e18f2d4fa2301389102839d969e9f0d614943fe489750f27382f7ab273f51fcb995f449fa5fba108ad0955ed0819a0a62308021ac4ab0c97f04de9fb8533489b2685447ad71c7f9a9bc89975f9cdde87a3af89ae5bff37d1f192a31b7c5aad50486931bc07820d7dae398960965baba6cfc05c56df18b8ef0f5db488eb87be803fc94e3ad3bd6e4f358fe7ce15ca21c9a4752ddfa98337177a7c096d829886e8d71340a01644c64090c84e88235b11bd1fefe506d59733cdd82286fb466ee215914b06a138356e82c0ae6d5fd8e5fb310eb375540308d95b5d53832a5dae9652f91c1e8c14402991e38836813604dcaf272fc552e7682a6eaa7aacfd4ed1c7107b0232cdee00aef865c5577f2391937b76e34810f9d49fe31e54425b6f5e1d0e436e1366e9762d8295877e27ae495ace18fccfaafd850544c9be949d15d421cf6f4bb180225f7f86ca64480975c486df0eeb4fa80a4632cff28d36585cb5dc534553454ea810260983d02060caf6b1eb2b9443b1552ff73d243fecc9779635ed137a3bc8c04ef13f0329a7a5a54b2af0738218cc91be0ee63512f009435d8623ff4e8cdaf743818510b22e42b586a7e5e75525bb61dd2deb96adc95e07998a265d58fe4df4b9ead5b5f15b9daee510558fbdfae7a56931a6f4c729c18e0d29c467fed504810b7d9dfa0613d1657d9bfa5887e3f327cf46d7059a8a0fd654c60cb9c683c55439cd5186d1615f45f7108f261aff77791cf24c975120acf2b357dfbd2defafac0016525cff9400e0feeddff27910fbf2fa84c35fcaaec90863b605db5adbad0593601447605d68b943249861f8cd33c6419c7611403376a6bb438ee857ced2e6842f99ed1b4a9dc79f835813a4f8d07c14f1ef98773286e79cec1c9ce8c26e00418f1b27c7ef104fc96ea2b2ddefb46e2fec4feef2771a1d7e2643586b6fb97094a8d298de12a6f8f78d88e5d67442ed3310fb40aa6439b89c834e43ecd4a80c0a1d74ce6a90a67bcc996a7e93b6f397fe7ab2fa43711a72b84f8c94bd1e4ac62657b98a4b814d8ef2bb469165464a90d5353aa95d09b6ef4ffef081cab5e9dc12d743364f06d4118a585f7d455fd6e3b01434a728a768987c181409eb939e9396666560d394fb151fc67cb9cddea0a94d3e33382bd0617c95304da97994f110eafaaaff6eecb54421e01dc850dc73d77df18bbf68ecc8b37ee2fff7b6f88c139f7d88d763248deb8b4e16a8fab216c0ce88faea030f3a5c994c6e4ef6a9a68cbc9310787232198b020a7c014a1fa32c1736885603dd4921cd360bfb7dca7aafcbe81d7621dbeb4e5c094c2584c339ce70176d7fd2a6cfc4bbea6b433377eff7320d412947ac774688010369b197ec4d0471b9cc73cf9a3e71bd10901beefb10ca1c53428b89ea63427aae9ede5ba104d3fb54d0447458dd9780cd4e925f1edad33f6f0884cc47da562a3c6e2f5a958a8d8723919c4b88d067343a246c6722b6f9f82018d5213648792f38fa8ea1e635b3983dc1f941630fb3762ef1814ee3f41691b24583ddca585289568b4e64f82448b54797d382916e562b3f4795e2d726facea988249e2c3f72d44ec7197b6f783c6c7a133004d5e131b7b4d6a9557c56942ca4bd1f070a2b46c3a6b81bb9a4d570ac6afea75de65ecd331dff1e0252e0f9095f974f47b2d340d67704343b2e8832232210d2f79665bebccab528745c1dc3b28a78aafa3785c29ce2eb6a8403e4d8eded1cc2554ece0a542aa2febd711164f7d7e3a492a87b01d6b4206e593b3aa6d431e908282fcfee0d14dae4b99176a16fa32f730c2d336dcfe7eff84a7aaab1fc32ac8c2e9ab6ebb72c0306bc6998ec22d6cf20c2b6660cfbbeb064b3047c1cf650df12bd153cd7eec5dc181e46575f07c8e292cc191117cd28302d1f9c72d79b1f4062dd683ca95c3a744ac310764e56b2f02a0c2850a2f24c1b298e712374e9adfe68e5414386d7671bd52f6f472eebfdf51677ce379afe7b8085459fb1e6966f5cef45b256489b7ec8a8939cd931009c8a26642f1ff78cab06a5d25522a922cd5e4541dcdbde4848177a42476b141ce9ea035d28742cee0e5e85eb78ceb2b720e112aeb76cd0eb3fc34574c7476110b3b9dff5c19fceae816715b31fc289c0e7149e8488a59e075ac6683f237886a63a25ad23bf903480b9acf3f724d5ace0ca3a842939d4828910cc735e6513dfc4055624d68a048a626fab6b910eaf558c1b43daf1cf26338bca68b5e308b734b61624c97bf70a82430d586a6c3cf59e1bab2532fd9fa1f6fe4f757c7ede0cabea52f2cbf00cc88ca7db4ccc0ff92c0836e7405ebef2ad2e4b7d3b455d8e4d9ae575d884347bdadb67f5e24058a44ae1335280b671ec3bb9d8247e28fecedf5c151fe892bb0f6e67351752e4b1bf75dcd5af3e62ab4aedc5aa32a1606b4a0de3156b356b0fe74e898065d1e720b81663453fc97f935da3b5755a0629f38d6ae5f8e5e77eb64bbef5fc70d4081ebee7a9f7169df4f0e11796f7a79e9128ec996b6fbd8f6fa56e11f17db4925c27f4cd3ddbdee8a50e0b0d4d8f6e527302cbc4dbeef4b0338e6ac7515c1e796b39c8e83f457b50925c39d405f4cd3c1aaf3188c5ac62bf1dd362bc8c9d4e49d3d2b7c2dd2291fa4bb22d7cbe7963b654d92643b789366d1dce842f47919a1cf5073da8916701f907c4d2f8a710c58e85b59f590123d3f8e57cdc14df41a1481a893b9f9505dc0637ba9b27657b0ceab87b0e4bc742924e6d8bf895b407c54df8622018417f9e543fe49f5b10a7a5fc66e5589304af33a20ea108ddf63facebcb20d22eac2fdf4a97285ae6d3f87865fae1331d00e631dfe5366345e0d78bb39a8077484a941176bc63f469f001cfd230347580b6226d6adff5ab112dcd53e7118925296b1a05978a703e383e6ffa5158fc36781f74501564992ab244d3475e1ee8e7146033da2dc116489b84c378e4a750947eb9ccb982a197f13976bb105c81624618c697f32a5b9e03f3675b2315fe773e4922c2e3da7f68ac225107405ece58dc6bbe2bd8947f3e4269ce245589497cd892c750f9ace0440f48057090c8a6cbd5046d3d982d634b4ad6ba41c7a38b7b8b0f91cb6898e769479fc3c7e7d2010b7fb38ef13c17db705a36455a34969803323806009a4e141a5c42da0f7a5e4760d07250d7e483ca6274e57cc2885e5728c24c8b5102845e8bb74b1c394fa7a206ec052c953967380d64c148ca480ab0edbc5da1a7a1e649c2ebfd19fefc52d81aeed7cd83f3c1d2128bd66feb99d5d8fbced01383d2abbf9be47f3390dd336c22b533a731d1c59c3bc5361d781ca15430d84f3c67d6981ab99100f53b6b5623df9d8eecc99d24e02d9301d636c2d5988e98a54339d5b516379a67d50dd9994a28fae5b806c56b353a84cb31729487a6d9851960b83ebc5178be689720a80c5c412e67f8ed55724534c92ab15c3bbc5bf13dfbff02d41ce4c9bc112746b62dea2b21d034e9a31e276eacfeeafc672b95e701ec0fc7ebd4b020a73fc37361b3f136246a0e3a8378442eb5e60abd7da2032dca9b5556aa22e5007c901f438c5e1baeb5d3ec6128a84d310363c6ec17d4ffece27f502b5c63d20cb1d11d0cfc316074faa820a03e6c577389e5e82ebe5f0976b6f5266618f5eb56986714d5cc75fe87176e92dcf01c58029d2b838022c0812c933db17dc4566d233720075065fda26f44b0ed3a46b6143fe180b7a1e6c1558f87b875aedf8c2fa968e2c925f0c08c7e0f23a9cf1b46f7955d9f1db300dab801f5672e2a7231bb2b622b0dc0dd9f2ec64a5f10c239e613247f8685369ed60b2d262c038fcc43924c5aca318385c12412b10d89753f9dfca43eff5f2be7d7d7b2788b877efa8b46ec5c9e99f922839bef71c613cd44cba597cf68de366eaa8874032c14d8012b41e72fd66422f7031d26be0dc4fef8f36a3c124e4ae767a665a94233812984c4466f5bd698b5fc22153c9c2f4110d9defb23c00e722692983b32ee0e84514169910bb21b14066d048960b29b3ff4c090dd5723ca4dcdebd207d4f88da831f0ee7de4aa302a06589a4aba3ca696e7d3c3e9a93af79db91f7a06b0ad825a8652f74bdb72f580e9afb31aae58807e24067f08dd719abb4e6e458bc8aa272d7a5bbd00710c43a1fea220b9022a26b574997517d04573786a4c3e09d30f3ec32f328462e26d4f7ff015121758ce1a2fd51e7f419eb6d8ac04497ab812aa6ba2e981a312ca16c38ed887b2342b0a91348198797919671a23e2b0634b523f931e48ce0d8eb840c54045d9193afec069803901e5ec1108782503cabd0f43373a85acacfa8af44ef2b1d09e4589d2dd4fdcefbf435cb61254f189ad433fa6a4e190627732ae4ef2b0c85cfcbbbaa0137033034e70a3906112dc76ec101f3198e25fb38aad46261d6019690dbf059d66c44e7ada244589c55edfc2e7d18c0ddfcd2d3841bd54d8502763cd0f4696d44686ae3be29ba3063ff6e7aee14de126dc43302f7c0b57d59eb4fdfc4903ccbd3f7309225dd90b5f25c5ade49c14334c0e00fd18b1dc611b10fbbb98c560ad4908842e765c661b9bce005aeede6461254338b8dad3203ee1b58bac1062c7e02e2aa6d420283ed81525839f2c8ff54ac71cc105042c594fb7fd7b55c14cd1247347a197ea8f93c1bbeada1dbf3e59b798c9b15765ab23f856fcf4eeaa5892c3857646bcfd8ad2bf0a15607e0d6696a8548da32955f1f8476f8a20fe4f59b3e9bf4468730b8d46c824a370d37695d1bdcac521032804c5cc66505637701e653ccbddb052f4ecf185b3605d0ba3a4fd99161973e36a35bf79571841ef7506db822dd2a5c959f36418a8dd8acb5b3ecbf3e7918a73695501ef8f440aba43c6e4575880ba3bb83e0a839254fd8d8c6b979d79337a68d218565a5dcb1518c6c82aa73ce7f54a9434ceb5f5fd503137164d74a230e46ce298b98576fea88806bc51e393acdb2abac1da23219b4dbcfba366d834d40dd8e616d214c3478136050555539eba776bf506870c3d20c4a4645b9a7c4ffa976534068009840aadae71f578ef1a325717f64dff840b9dda81b123086a47a172e6793e68af6140b1492058fecd68c4c23db1cc13d2b57f52d0cba89cd4c26d1bd580dd2a054a1d934a80b9eda8ffb503b7e3e62d00a3d075235410149e976529d8029595e4daaae1aa685f3cbdac9b26916320e75b0846d2de8673600212bb648b26e3f1709df425136f33f46129afc90839d24de1e9fee51c685db8a280a5dd4c3ac1539664cc36ffd4537af480d4082146e7395cd6de1f8b652bca8853ec742366702afd6ed79a5920e4ad1317545266f6dbb796ace0fdc731997cd94e1bd8e6689c856adcf153909cfe882b9b02650f4f9eb8620983f0c6b95b3558682d8134a9ec8fa97e174173041115b2eae21fa0b72d0a3c7c2bf9b022fa141a8b495de8321c152b0a9a942c5baf290a234ade4e8b579238a627196fa5621b196ecbe31583517ec4ed82e8d3fb21a892dfd65ccfccd2d36c5d32afa4d4bf201d684c4b1c8c1207db455dede5b908ac63d5fc0bd2b36e11df53bd52e5ce27a9af9444a8cc4391ccc82914b79ba2971ef4ea5d5c30372e7cdbe9bedfcea9ccc8140f8c3ad1bcda29d11fe51affc74f17c9832798e10222701e0d6e93fd109cc9a12df4ee5d38c531574d39a9f4357a60f8150ee509c68e469b4eb0e9be2e6ef9099f1bb949f738fa801d223316fbb1e179b74445228c8b3c40440306e4821077860c37d6b8c17230fcf7ea48d0bb0d98fd3f1f00655e11a8b2e0a7d5da8427784a8fc6d1a2d4d1d3adcc02030b50a700788ce4078c199fc733e2ad469dd9c775d7a8025b4db9b960619f0263b7f09d038cdf85045ac2a1cc5a18364048bf242af713ac4db889489d781ff16b1dcdf66acd89bd6c7651f25a17ce751b67697739dc4d1a125fdd5a8ecbb0cfaf31cd4179249e91171ef3e628dda697afed9d09b53260ae475d59ccb45a6ffd85a2c4241fd134462cf2ec21b51422439aac77954d1b2396761f16e1c6e3242b538f23f584b95cd4b811e35a526748050a7eaa02cebdf8887d94287c99500bf9c2afb7f36ff47e17906534097b02f10620958e889d2392d30660e513c22f580a505314eea4a865d97adb9136c495403e321f425348b56ce8f8e8e91ccd702ade0bbd1efdebef8344bb9defd471ef4b214976556f59f679e0fa39a2007bb9902f5a60ba044c4316c27f6b634241acdc3ce437c4fad599aabba291bfd71c05eca6d9df49abc33ae7709f6622e516c22418e7ab86144f6baf3697bfeeee65294175e5dc9ce5ec82da64537f5f5b83f5a938e41fa8f6f97f9102fda8bcbfb6a5c58f79648b97e948a074e459b9b75a1793cf7d9ca5d7ab27cf7035ece0612d348a23c0fed509c5e18d19b1e659af237c3b9aba4fa8477de805c5f8ccd0cbf3846b6ee1bc9ef76a190952115bd08a5108c8bba76d8d762184c122d081c6dc8b4c49a7f0e16ad4cbed86c6818d4f22c03a100c9afe3675a2f354bf1c2cde1f5e5a63b95761e10d27c9482539387e3aeeaadaeab59faaa20cf595d4d8c57509c751446282581ed28cc55736211e6fabb63d0f299e39ac1cd2af1431bfb03f86e5e59691dffad4e275d4611cb2d7d3be3defcb77907c94db86d989a2ca7e19729e3454eef23b0d58bff8203b08f41b40913f2d2dd2e8c98af09e5aaee76030d8201640d78e7bcfc6c1171e04cb39a6bd060ca41ebbfd090883d8b3569c39fc19cb5d87c15062c9f09138d4e3d3f3421227fb2ac48b224438b12702cb67e2db161a3c771d866c3cc55d15a094f72fe314092e846256e44a1dc513b02bbdd976321f470f81f36e719b9acf22179855d36ad0c50dab79da662e9ea7f9685ec0b44817271ffe2b7254ab7f3ddc389847e17edbd33fbf789bcd604ccca0c01c60deca286858b16dfa17c5875916e0159dfd4f0495c08bf6de51365e2175e47325d5ee71c96ea8ce24c4541886e0854bf7dd8a980aea1aba9add0316f3d052a2eea95c02c241523f3274ee62c883c4ac440d7626cdb4f0aba7a4ea686b2778cd7d7be220357de63cce55a3928aab4c200a2cd65b04d831ba0b54dc91cd6ea410359512130d2a0122f3c9752ba6210ea3b115caf891f0a0a7ef210d1988324a9af926cea8487640a473aefb2e3b4b9259ca4da66089d7f7800f87cb2bd068b8c268dfac897b9a2dd1ff4ac2b19a48b7e95a39ebc6afa2dceca7928ed8e43630d673e5c7ba1fb4afbbd40243ed411b6519420e738c24ab183f900872f10248190358636c789b842f156987d0593fa7cb813f5c688652f871aada7cb5a9c2e15ddedac147151b4d5a7bc4b33cecac961a3487984918868515ca73ebc647945fd9044f3c085b184b3f9d333a7b74927fbbe4a0d846744e0fd6bc36f9381f76422633946fe79e64c3fd63e30096ef400df8cd8c884bad1955b82c013c1a190db92699d39217e46d3db284f35b18b782e791d722d12b85c8a26ac98e9dea8356f9d3ca58833aef4ffd883953f24c96f5351438dccf33693230db5d72389905b49d7308cc30b805fa968532a976009a527bfce9ea921ff4ea9723be5b5972ace8553441a4dac7f0b2114edd3a25666d70c4f94131a63f4521dbd004309157bb32f9fc649058ffbe747bc3addc523f805f1b34787b0f446c9ed1d1966550c7d0c10e342316c6b34899064d0d2dcbb09087ac20572103ee01193a3eab06c06e3206cd60bdbe367af81dee5ab3e5dde9836c558e54c9bb6aa306a609225cf25a65b575fa97d9c962b72b798e9a7fd8192ba879964cedf623d544c8929af5c8dea56721d25578434e2b234289895c697c9c1bc4556e4f6df479a837d1e9132c011e47f9e23fd27b70e7601fdd24f28937efb9e46673b9f56914638c793f5c3b625664f2b221afb3fce5aee92a84d45bab5cda58c49777f82b2b1c8293d727fec90dd73581b087367add474dc7b4cad75cea1e43619ef3fa1b35175f5f0889c031c2083e764b0f4389fffeb307831b73763e73d2c3112adff579d4dcfa1c09d3f2c5927568a70027242e6bec83c5e2cf7e125d8b5e4ad2ec339fb79bb15b8b9a6db0ea9408fc6fb8ca6efe9ae0c8c25900d859b17fc44c4a262c7a5e06ae9e2083fc6dc36bd08d648e9a1a3d8fcbedf12777d690ff15dc7096e7c8b33e71b19005c9e1b20d2c2b6f5c7c1204edc691b389b6ad04f896ed297922bb92b9e6d10a2df2a83dc71c15d2010b595c72d5677017d6d7938ca3538d671e13b8496583b4f9fa59fd481f1f438f92b01a6c5f7169d44b93c0b6863c1a183e871e7f50e26e6d41243a1c509d423309dc886dbb9ac245263ae9d6024456e72b57e17cb08ef00f4fa4dd9fd27de0685c4c6c680ad654e3d81dbb450f0a5e7821412d442c2034093e3fb10234e6a51b98fd388eafd0eec66b42c275a3547f72c7f3d16ed81395e9a2664faacdf99bb22327280e518e4ff047451e6f7420b562c68877c96e129d0cbe18896aff48d49da028dc97aa0108da9b29c540c5238d676dccafdf463694aea34ad4f513b6c7a58d071c335ff1313d41b7cdd902904b8c9fbea2ed34878b407ebb8144f603683ce4ce61eb0690a00d492978aac3a0f3010b7479667811c3332c06553c14809c723316c84d084530e93a63bf0b7658f7bf367d29577236e23ab658a685f2612f0216a932a24aa4f70b8d0609aa9ca14e4d91b8ed9fb62864ded646012ef675ea359117c07f528d7dfb742aab9ac892851e97c94f72d5c34d4feebc7f67e09fdc6f633f050833192f15a7acf4f8c8beb3adf3860fb26fee39a416ec362e4b6d9ced09fa57b3d5b7fb7de018e4fd93eb65634c08f6d4f1e2f490c2a8b1be2794a27de0dbecc9949fd1d5eefa0fc6f0033a2bdecfcaa267280b445e92385d2edd4c2b31bdc5d54ddd6cb30b3c370a893c217945d346d1c5b8b98ac754a01afeba6f5526939ccfe9f2432461a99c7b9b44a3983eb65fb064c32f8c72e18b8f6e42e72a1bac21b3cf94526f81089b235794412d1aed20f48324d742d4079e9546f495248cf7f42839852d604598ca2079fe44b125ae9970973b57c156e83fabe6d64c9aaab5c243d1dc71520d45317b913205979fe5bc075b0068d8a5ceb7c8ff9149c763c22b08d35a09feb8156bf7d8eda212a102906e251efcef1ebed894556f18444a0938b4c050f2b873505bdce97cd4fe539a944b94e281292f38850dec9e9f108d3b2d5a83837d114bcb3d6e6511629f310d194328eb05a7b88e7a053e97dd92881c89a1169e7d23a4fa1ebf532eed2579fc4482b9c93da2b5e9619f289f346160996cc61a3f380ea71b25e777af37dce79039cf90a2bf16ddd46733fe9c1cddbe7a42fc5faa7869c96ec463e9817495bc24a23cd9968213927522ddb0d6ba5db92f5736a5723135305a6c083a9bb54da7e43da3ebb07066ad94e597706062118fef17e9e65363f71d8859d30527a495f06bb025c1d26c6fc80e9b140c7108c57ee5583063bd8d2a7efe6a3026a79f2294e09ce980be8ce1a017132ccf48a63eb32454b12506a6099d4e310f07612e77da46aa0caed8fb0446fd6091140db2cb1432bb93cbf681cefae9d849fee6b0d87898d52d31a209ca6f168b6305011e2c9a55fc5ad2237d7c2d06b98e0703ff2a89fc7af8471aecd2a6cc0a4745082db863bc8d46209d51135333a03b328345b86d6cfc23d6d7384fae5d8546f05725ab139e2c25b0dd9b2113b2774391aa058cf90915bc97a94e74ca0ff6785243122f12decdc48aaa8ff27200007f35e928e62269f7f07407802c9a10648a91180d559c5c37cf3f425c9949b9e38ce4c99b71810babe45344d929906776a66fab175e20bc5930f1dc4b5b888301028b6e0f92293e468d0c6b191f0840ed822c036e6257bbd4f0db8e931463826c0be855add67bff5fdc6d4de7347fa07e63d68f4b6876774a39dff1ae927614f8a879f128713e24b263850f1ab3176ed0e9ca9369af947bb8e862e927cf803ea7b53b68eb8c5f87f1cde2399122b7892ccd4071610f0873981ece2ed719bebb0d508037e46b95610d14e9a826549cfedecea1d32074aa439592929873b49d9434f35646adeabc8b52e323ec2dd6d0d6e27b530361fd8bf9e4e3a0a58e3079dc63156a684bd5cde53ba8c9c51da274bd61cdab187a3fc0a84d5005319f05fc7ddbda575f73f3178336413f8ba0b99cbfdd5c350a3a925260284d75fe06371716f951d76078df7cbe6f25beab46b8f4222c74f68822d6747314b688839540d3bb9bd0f45a028e780fe2b5c78e28dbce66680f1e57b68d6088101146aa9f976bad10933e4f5481444a46d40413ae5d00044a29dd3760c712c04771976280f793ac5bf8cc1187976096e4620d646358f207a9166b9d27030721fc00688a0df926e6f4944ba6e78dc862a8e55e3d1a20d2993d8c8410548e9bf1b6efa181daf8bc060bd1af3dbd8853d6d3f54bdd1f6270b20fcf7f90310109b98f6b366a4ebc6f717962e408bf865d0128fc9ed607f848d376ab1c50e66152f74916a28539a762c75387d144bdaf4a0b8b0e7baec532e8d531501674a8727547916fbcb2e45f9c7d41063bcfec3de1b0adee000e555397ab16fb0977a8c3ac1385dfc89eb7db5cceb9109077d36ca9ff5fcf9feed6b985693746a95ba34f7d2875f61ee8606302b6470f8ad17b781daab036e288e5ee083a3a36eb116a34f5ad97e1675181818289f514efe868feeec3b48b1a574b9405668aa536e572f0e2b46fdfccaea5b2f65285f6a9a05c020bf440f5db912c8ac289c67b9d724225eff88366992f08711f35112e66b765872d39b54cdb5c4c0719b2c17dfade7e2f19281e6ae7885708ee8a8f6f90ce79387e6e47b33f15f212c5b386a5aa5f93cb597698dae4b5999ccb4d652a08c41ed27c45d2ecbd112a679374ddd6606ca76ceca9ab08f7f648d248622ddd633dfc121f9470930ae058cfa9455ddbd25a38aaf48f242ab6e0dc895c5b2af0d9ab0c996df526f144cce6297af5f3ac5fa1d159f52e072b827dbd273afcc6e3b8fa1151acaaca5965a4b6cf5b0ea6275da3208159c6bd6d716eb61309eb4ddfe1bbc4ef8d013d477668cb3506ebb4724ccc72affdab79dcdfaaee55a5946b4a3f768dae9fedddedc6c5712296f26c025ed2ee299cd15b1e692c616094f500fc53fcd9838401c0ea6b6ccb883c149a52d875501ec2e647b1d6720a8227e33cbc1f429ef60103f3334e3de2e40ed4a59d811b8cc51a695de25ebc66eca519222dafa22dbca634220097b1d3f9aeddc91d11019d7215629122b4dc6e3211ad842288b581c31e44fa79e1f7855d8fa77e7a224cf571aa3c16b5f4fe5feb16d7d1bdecc543b0e8ff01c677ec6801e87241ddaa02a5c83bbfd1d84c62e269f6ce8a708e693b86d8e5439f129431a4c1c0bc6ad47784c38e1cacf6c523da23f65a76c264b96aabb50aa9e299be6abd1c9d078ac3b2c5f2c3986b5707f143513b4ea91a2052731ef5b48780dd0cc6626a0f0c358454f6eb36df7caee6f8dfb3ea19a0ae79c0d1587140147be3efb2a0da1305d5fe056010c518e3471572d889304c4ce00acc78fed04a4b888d5e7e57d6cb5cf4e5cf1f8782e1b25ad948eb3e443db75af9233aaaf6659adbe0ef33d4b3ba5214b85e656719df2eba42235b2e268f80e3c5971d28957f8e93f5b04a3d5eaa607fd4bb838ae48661bd093342762cfd1ed60b21f04f5b95c3e5426ca6127b04810e2ee25bb56ae81d7840328d8d4f7d1bd341ed58b102d9860806f4a4d117c044f472c85ba422eab084faf8994cfe0a880bc46dc9c1a8c11995610756e2ac50c5fea8ebbcb53dcc76b1944ce364f8878f42310fe0f8cc211c62f627d12b20527dfd84b78c98b1122050cbcbdb70e08010f68294a6a805d3fab97e76cd695f918e73763ac2c3dfe4a8d75db87dc37e2399fd854f3284d29c7bae3d3e31c4375ad9e047f03a5204c2ba93b6025c112ea2c9fcd731e380a8aaa42860c859c2e2cfd333f0bee741e21f78776defea86e862711f0d0bbf64003ee848a8d1a12dd00c024cbee343d1093e653555c033c198401caeb951860392b5b1eed6200828aa310ed466e41d855dc4231464adc2b6b6fd66e03fd42736fb791387efec28b37d0686272a6bb181a621aae7be06866bdc1c4be69e94642c8d3782f5ab7cc8c890699008b52a11b149a517771b93bc2ae597dedaf0237ea8d9674e26fc75c3b468e04e2fc317d03484a75fb274f7ba1617bbb72ec16da1fd4109952d052e9de7c00761736dd17e70db0976692626ccf8bc9e88ad6c25ed88a2f7c2750add4ceb95744f690ee5f2fa423a2b62ae57c1105958bd8e81025c9412fa71f5d1e81bd6cffa01f489fab7e90ab8a3c8aaffc8e3d594beb254c460347196473117ec2a416dea464eaff95da6cec26b5535954901298f11932ebeca52aded139f2d5aa2c24174e2f6c701ce1f4564c60861ce3b9cdac1cfecf071295c5ec581f0f075096fa457373c124b6c8cae3aaf915e4701ad94ec9c01e5ca0552019bd7f107a7d5afab9e4a5e7cc7b4c5416656ad064f4a0f89afbf7c5b884b69a12fbce8aa73a49b2e5c5728c67a7396bb8341afdf52213b2f7f8e84962cccbeaea63a3c7b24881ecdde39cc57b4f211cd57c6f982217758042f61b648496e62b612b7b8bbe1b9f15d237aeac42b54d15166b5c71eb27ccca1fc9e050adc62a267eb82ca2144ba323a73aa11e2fdaa87695c70316754faf7aec44a49b668362b0b35e884019227e7b9a35e8841e64e0009c713d7f3e4a74cc3feaecf4c99b8d0ecd85c8ff89771b63a38e3af990641f28fa7e4ea560577d600f43ccd467d6a347fef04d392d42f8e97659348c68b41299f94db4b713d61868adbd20a4db74f61bd0d1e7846bfc8b8f8bb50bf50c2fbfdaa87328933741aa2b1ca50cb759c1276f1a7930952ed656921f5ce5569ed16b31b2a1b6009c784199ae60ce2e35d573808a195974536f220cd14dd634bd06800435cf1219047f6246c2d9bdea5e489ab4862f0cb0f01439ad2ad1e2042b3f63b8611a87efbe842613c21761de4c79291a8491092c20134252b8e900e5d3cc70e75d32cc41452c5c33b66087213c34f67ae73fd56a183be858f1c3bcd73d814bb9e3f78cd18992b0ea401d8f25c3b60c055df8e6430b62899bc86167d0b5e2bbf16d75bf3f2b94c26542202bbfa0abe99be1a07c78140f42c12f51576007bb5439966a47cadf5c4ea624a75e7a4f01d8733aee57e3497c013de4a33cf54a94acad9b1aad837865a6881db9a725310eed49581d2223f2b0984757bf3fc5122c5dd572ecc781b48fc508122775779d2b2849e11684a585ce844d21352f8d35ea53f0f34d772bd9ca76cc4dc33aa3f2e72418c097614fa5260eaf3c2d724d3599dfa0991a9c0eec9c4d550886c85e1ab2541e9868a36afbe0d9c07c93e44c4c73c66f88e770e5d4e4ac331fafc6870c928fca85756c444c6e8f6cf75865859abf0cfecc8e89b8c806a2e6af7cb752215bec6201eeb41759b27d599931dc2ae75d605b3e387bf263ebfd09ce2154b81479675555ec74ad85150f8eb8c1b3c4f31f6409648f9c1b4678c82e8e2afa9c887f3210afffed160d1634ab0259e1bf5565d8598605a435bd289afbbc12034f67199b67bb0fddb4b9180908c483ae5a8eed16221687e1f524d010ce5db78d1b999069f225479fd6bf0681c7ee95d4665925bc96399989b85284087e67d5a070f2713feb78bcb91bc019f3f19bf3abb7cf36ebb98f09fd64b61e2bddc9ae6335da48ba85b62562726e142bb9d9e5c8f278dbaa0657dfe3e410f03211a072555624d98790aefe8e7b0281ff6af3de79dd5a414632f9d4913a480e9cd6990f94350304f853ba5679a4cb3a647b98bf1eee6cf70f77581a1ff82a9ffd7296e8fd172d37b1b0d1621692cbfeff8de18658f04af5d5be08bce66e5dfec5989b674219f9ceb6a1037c80a8febdfac63d482debd34c3057a677420f0bdd66e2c2b25a9c1d34b76b4a998ad3ee21d1e49f812422c83016c12c201ac2b0f07ddc00638846f215bfa6c575cbfd577178eb0282ade2c459a13386f5dee8a7502321292a7de077f4fd12967b8c8055596e7a43287639843b6ebee58d463fa044562ec2da7f9c2a7f28cce685178eddd3b9fe7b10202997b6b170555a71555cfebd06cba6bb019f8cfac2ec5db3b1d1ca88acef9accf76b6a74600e590a0eba1c839d6a577d3877e7d6d010b04fc58e160ec9733bf200a9e0b24fe8ef32613cf2c7b1515008b8833e34d3967ccbc8bbe30fd1810f23bb153b814392eb37d8917e96260b3cb16895ef13b96d72c81a14b908224571680dd56d04a59a6583a232ec58e8cff16f6428b5e3dd19f362992608aba912b642aac9950777627ffa4eadfe9f31b73c3fbca11d2abb623b732f3d7c296806151257c9f2306dee1c84eb05d586e7a82a8750905716b3e51600250a1e3b4bf274130a1bfa47117cc8b6db3741ba04d977015b8ee250c3ffaf859fdf0372b88fec188830b5870f251889584333547f3436a548801fd3236da2ccb2b504f85ef1d259bc3e00f0ced934a4b297ecce0d668fb3ecb524d3ff4380a7856c7060006de31931d0b26ec1d084e0dce3b9a123741cdc326b441131d777799623c6340410c331c7e8a4a8175d7d250274cc4ffebd5d46d855bf90842888893c348f0a447998e3aaffc81c9b65e3a772eca5c2f0907ee13ab6a2babe99f388755fa3ac9dc79a2ba4ad7a869a876448ed1d4dd6a8c678065cfc90df8470b29c83719bfcbec7c5e3244a665a28593ad42ab84663bccf570a8e8b783565f909b5e6e8cd69ed6f79fc945ce5d845c998f25b9dc118c96dd2c0f592a73497dbd9e050632c8d82656a71460d0ae7f5f38636692a78083b2fffaa517dc2dfe18ae020e6a5562be54ed9046c7129b3a57dcbd1917efb0579fa9a3978690fded8e52e4860db75b2a93c77316a6e84df4965291a7531e2abc0fcc0d0016acc29680baa575cb7be1a03206236310eb5120ab4069e0f8f0cc3f6bd188ca91963eafc2bc66b1a42f8c49359cf3171a72eef94eddd8aab03f770cb2f489aece4e09a85fe6b9790ced5feced19e4cfe6bcafd1a5d99fe56b78f7a14fdea11fd5e331e23191a3f74b32d8ff2740409f346aedf469eb8aca16b43dcc44c400ae3e6d1c4717ae1f18a2f70830aa0c4d5734922374dad8c006ab97e02a4263999ecad0b1e9f24ed0b599467c962932ec610e63c0b3ac845f5d4d10979c92bd884669908696172609e0da039728baa1f0dca8885d5439ca420e87f5c449908b2a5f69b65b60adbf5d74b21eb1f4e0d79558c59b4499c245a9952de8d3a51021f2e77c44e06a489df3b72d28e5d03ddd358ced4f5a1fe057e58b86f9e717cb9001cec6d6665cc0f5b9cf89873e6e7d10355746e99494766c937683684312b630337d1c411f3f2eddc52a8267e19d38ee12c810cc4e33193e26790b13d1847c56282ac86697996daa386b06ec2ceaa97fac9c018baf644622c74546177267b053a82292c1a1cf194909beba3f2670acf1d095b0caed4b8da2fe48c9da3dc61969d938707a62ce9cf55b89ceaa04a9069d38f4e89db794a335933c5b45fe215976e76dc71b7719c2ef29d06d2dbcfce0470007331a221dbce6baa3f418f989d7dd927d343152ee310d084799300e8d3801f9d464d9bbd5687e3203cfb8e589fbab39ad4851b07bd13b29d7f4b767858d13c5937a482207470f673593aa9abe339b3d63b7ea4ad60e51e7f9080381eb07213ad1996ba7bd28f8b44b7ea037e0bf9716f56820f908fd4027249df11aea06df25b3860cb18b68a7df5ed0d14730035291346049e1e5cbdefb30719548fde4f986bd9871a71b5bc7f6e03ea4fcf1c6ddfecb06413832ac27b08d203070acdaf432bafdb288908dfd673caddbfe41af8255ff7106d39db8d003ec1abcc3000bd7fe1daec2624bbe8417f81150f20a8a48324100ef1570a6de7c0a21e16f6991b23016671bc96ee55e99a97a5a0120af8ecb816137d5f40b9e71d56cbecf61569dcd2f850ede77437be06fd85b54d7220b9bcd13e682a8227c7a05a4efc8d258b0331b0f47cf45ec370b491d6b2e4e601e50483480d9437fdf570b6be69b28b964972fac047f8aaecbe567c8ee3d583a46d5b58fa3c361dd3ad73c91727e4d0594f428acfa977206c20995612834497928d507eb62aca1752a8f3048c932b9f0f80f7c627a87f2b50d581961b8739bddfe2afabb1c757f366acd1e639de808409f598755dad254c60b5aefbbdcbad52f72c756e5e4b286a6866af769593f66256fadc939d3d23d1db9096038b40ed224ace023f2e3ea84fb4092c974cb44ffbe489f0ddbdd79e66281ef9c44e81781b849b0d3101c17e54ebf8bd69393b9220c75c7d3c564862ef35d7dfedc855e2ea15a6159c6c2bd01d2c4f3c316ddc43f937cc295fe35365a69ffe68a2a3bfa7eff90c2fe8563f6438117c31ab48cbd5a3ef1c7a03a03a048be4a9fe0de1d6a86feb144731f4e84f1b509db65d35b1b8ec3d0f462392da10694b207ef1d9fa2581b572f9c45012151f039ebed848b3fc211b2b4d6d48266e8bf800e68cb1165cfb17cb14af4fff107e57bc90b9e32006dd090ae12ff39b000c474f77da32549f51d07bb23d233485be9143c55849b5fa241337c050d48d88e4723f7f1032120cb609c584cb10cd777404556df84cd095c4a9668d392cb9a6197ce04e4234d48b47f8deaad83ee95292c9a9e9d42838c12e34046483ebd821284ac349fddb3d89c0e9a85716ca5f2c60569686d3580c6c7bce0a0ec4183fea724ad02763f66f85992fedf49c67a54c8ecc5b47d6e00cfeaf23b2425b795be93d65d92fe0ac761cca8b2feb4fd7a4bd21bc98a7328f178a61aabc2edf843e23ee94c757a457d448f3588b4e39cb14d855c35372c2060966df0e3382afe2d18988ee7676511e43afae09d6e16b50bfd290c1202c5c82520bfadb7b9eff22c2e9d202e7606f23182c08f0d405cfda6e8bf4b222a14a96015602cd77b2e0af5027938348075115b146166990bdccdaefa94626e140f8ea6fe6b51fb38fbf7ec39b89e68174db08d243a5da08a573545993db451bcd7462ba2c308849e6f54fd68eac003dff1971d19a00ae1d326d9db706197ce15397066ca114645ee39bb1a950c068908be503b2cf3ee74048dd92808e07172ba1362b3ad4103953c990e19b4581c54b5a240d90ec56150fdd5d9d1e497090941b541a9fa202d09f2790bd29f53fcf2adeddd4b4ecbff252921feca36cbe51e5185234641c8df314dec556280e408ad6605cb82f9fa5cbec32b2d478e876b4c3bc5019c344ee2f0bc33d26ae3b69e349771a8069f38f879d82e1c68f84d44516db921ca606b6e310e9ef0729b9fc76eaff94d3e44f865a6943eecc5ea1dc097e69e91344f7b287223fdf25ed3512e1fa34b0879ade1a2786571435e71d3fab19a6ba93b5d83e20f05afba10ab48ddee2c6feee813635318ac35bece3a339fb5c2278df5b9a6b7859343ff5530a2dbeda669a47a5eb0efc46c148ab00165563023536cf71f189c6b855ca6aaa056233ba82edf29e82d96c6118a0e6bf37d2ab2945ed1904f1dfd19ede3dfcf257aea6d560e3776159ffc384b3540deb1cc38d1022e530c2d46557a21eeb744ed5c00843f7b6d5953f1ff4770d26dde34c4cfbd308074e0df53264afc5a3a7ab8a57dae296c39bd72b88ad988319ba9e13ea529783d5c926d2f48599720695fd174f8873d0f660f002d8d0ee134271450c12e9dddb641b240795c2c09b958778e16081bc9180442c45fa916de16c83f16c50092eef58a56191bbcd906eb475b97d37b7f5cb00a79a9ad66a636e1052f9dd1e75d02a5af4840dfda7eac68c749bb857675e67b450a484d3e7b13a77fdabff0e97dfb705e5f4f6cf1e95a5f6cc38e099634a020087f868580ce2ec0837525b8c58f08444d7fd4333a589c0356de22568b4fad8766ee3325cbd65843f2c713ecdb44c96411ea871c039915b546ed6fbafbd51805ac48d06c6924d3f7036e1814250f50f27342c8c4ded3e68b6b3f161d46379c1088a7a123f48f0e7cb5a348f472eb155956fe232fd301e64f341041683ce3b25bba7f290a10282a8dba3a2a3da24461a5be148c2241d627889adca5acad981583fac81d0ee4ef77038c1f80db9dfe740720904512691a9c8545a9d173c08c2e8599010c972c2c34287d91ac7803a5700a0d6e29b7774f8f487b70cf8d0ec9474443e2c0c051116b16aef491c3945a65e6ddcd7931a7259e56902a2866b95d3c0bb7a3ea61b1f3b54ae56e6a7366ea895056ea0d1c251cd74f7b82b0d47464826f4aca77434df3d909271a825b57890cd830011981d95229cc0427cdc97758ddbc76d6cc77ba06c92d19daac8bbecbf55535e98bd4754ec06a6e632225c43bc46068baa688636eaba53926ca093a7addcd6a696a902ac35631aa43d9d66f77270cc7bf66140dac239034ba304e1aa0a265131e9fb2b7f079861b0e4cb9c911ce82ef0b685002476baf26401dc8cc444543129f82ac6b103881c596b19d9eba8ed6b230c17914d5c34a0040c18dc54d8c4b637ee683637fc5a82ac1cf12691bb28fc0bbb307fc032ec3d2b06eaec56ed769b5e892816c7350dce89551e87918f67a117c39f256a368586c78c2e9614e9658161511a8dad53afe8cb9eebe67c6596a90eeea1d3d2466a4d77a1129c0a4409b98d8ac0b925c4b2b3500665a3cf4ceb82cb0b6732eea8a796f9b79d2ea49be97066bc1f606d9f1f59f41d2acbb878a0783093fc4ab0ef866ff60a6a1a58d3cee90307f09247b5212f8709856251ff5d8fb77657110bbb3f3aeff07898f049c821a82c11e27b0c176a9feb12de5d08498018f7607156c5065cb56bf9d6867a4495f26a07e0f01312c2ee897b82d8eba0cbc473da402814dba727521cfec6afac2cc59cdd6a75e1f8f40585e5cda51a7434a81ccf4b7de33c663dc174ba973cebc5a56831005d231c719ea34ce42999c471fccfdbdbaf1acd2f9c16f258e32c70511c475ab264173246ebf31459a05ecb4df443066b61a243903e80ff907af17a96d7afd9763df8f8c4fc49775bc805e2dc165bd6f1c4e06688521557ed9ddb6860fbed1e32957bea1174b3a9aa809d7fa6301fbbb6b3774cd856095f14c6378cfe98f05d4f06fae91769165dd0adfc51bf8f57d701ef14a99d608db0a104ea78fe5b13794cb8529afa5352d1dbc8235d96148c8f9c2e29d6e2359a8dbeba56c9376b26f8384c66548979f4d982fe0652cd86bb60e6f2463ec63dcdd5f93d4bfaefe48f8012c63b32ad3c02ec9088896f6a0c8b1097c1ad911ada7a2d6f0d201a28b70752182885464dd688535bdfc045e8dafbe34b20eca00848e757b4a37de219be5a5fe7a4bc5cfaad29ed92e9eda2bed08407e0d0f53caf6b3590210067d8b9ef16f9a8f5612315dfa415f1efc8d7349394143a149480ce3ccd60ccaff0d9a8a797820f41b431ce3afc4adb2e07cde16015087e09e08bd13471dee960db35cbc3b53c187a5bca7ed50017e09b2ae2c837b1f6557753c7f5b004332ffa2b52d8a2269e7cf9cc397c6079aa5add61d7a560a894e71510e104f52a93622e34037b1db70a05bcfc546ea2ec7153e69a8df18fa9eadaae2c1438710477a9a23e0f7092c310c5288e2d39d362a0a33f9e3d8d9792b51a71d9014abcff66ee509baa3dad341b1e4b6c601a2966f77172a4df0f32170f3386a6600b0b63699fe21e26eeb475507e99f666e0ac349b9e23463450f4fa4498356887d9e1c5f7d18ade51e526d27ccae799d6775336ca9ca8e54d707639ecb0618a3c675533494e2435c0b3780a66defddd217d2cc464014bef8a051d8f292abf9e5cafa78c600c21ed3d40ede937b1e162a1e14757d39d77d4fad8711b6b46ae707b82ced0739f9fb6bcd9b557982e89bb3af5f3fb5448ea960f454f4475ee78970acda37501a8825a04cecf3e544651eea8933379da3c3e7de0a875d689003c00d276470fda3b6ed6473cf8094ab91784d1c0f9468379e8e9729dc1032a5ca14378f8147409f13cd6994de961e2245b35c814596087625d3d3267fc0c1e5614a4af94993091ead40bc9e1d3093228b70c188855ae9e914b15aacfd4f83fde83072af92b2cc968c93cac74e15322eaff32a7bbbb982fb725aeb71f34bf16323d9c0a11dbaf3ab676a9cd1dcfc3f8a0c66d1f082f23806133002c50b59d4513dbe3419d5002263287ff47abdba0862341effe669f26b375337170c8e0742113e1063e8141c4aa9eb4970471f3187f581b71e6f7fe2f8043d065620da8a066d112fedeb33525eb1061c0d0fe9fb415bddae8ed2eb5c3ae6aa0549230e436afacaddc389b2c66499d7fdec2090e7e13560ca0a64803554c7cd9cfcc1cb48427cf9ccd954bb7446c887e2756db2882ff12eaa64efae3a24b35d1d0402922efe90319510495420301d3360f4486d3f87e3dc4f9337bf3fb4e3c6a82850a840153a1936e7cf74086757b72a8db19d33a62a29f3dd4fdef454d9222031aa0958af21851b66aebc09a5c08efd204f3ff18cb1055e8181d6630309fcc91c0d6daef19e618a3ee23e817a586d02364710cfab0b9f2cf18502a34e67d112f1730d44ccae54dc221d7f3877bb828e7109878109f8e95e2e1407df4e588801d25d9c2a1c501e74890631e9a92d823ebbe6b5635488f7d48788ef77658e3bbaf287536b37d3a7ab1ec1749656f2ebfe562765e71dd3e1b895d9b5c315fcf2b3a063c57e74ad1e7586b293ede4c77732f38d316c14210a121153fc50007f78ed64a8e207e9d04b312ae7f97a946c74d2a1181b67e845c3ac6e340b2428c8a5546679707fded3406fc221900b118a3279e13b74926c793e27fc4cc32ae478b4421d6eef75d3a273ff61d0e95b4981e8dd57e16bb00e09bfbbc2ce60cd844a9abb839b8b671fabddfd6e86a30c0a24e73c3c17770f34641951e5dc73ca11d8f8419a7407d483e0f5f1714df0a1775574b5500e8a5a28c655dbc28d7a1ca4b83fd4ebcc7ef2e4994c97c87659681acebe7417328c8612e8570e7ade7ead7f4fc711c9c539362779e6be525bdf5ec037f670b5235c06a1acd89b4ffc21668a7269cc73bf6d1399852eebb8b1dde8ef072e8d80832ba32c8e9480da2c4f5c3209c557f31beef41c00d22ee7c7e2c1bf9952ba8a03c1afae9b4aa63135d2b131f2b2804afcdcc762e1bcec8c8151f471572888933ce97dd787121ced446aa9718bf3766bb6d8a752692c59489d5b565e1693aa0f67b352f915808e415cba13a9864bbd33ebc97dfdc0d357d6769f2f545cc6529c0f634da901ae63bfcbab0a3896bc43faed6a6c23bb4e92f3d669d2e0ff485287cce322b98d02866f026cc556ec8aba6608ac2b5dbc29e104ef2e28d7b51ce63110025bdbfc5d44e8aa7a04ecece07b9860618a162e7289e8d672bb9b15b6ffc87f738b0c7a2b733c5794afe58b1beee4b6780ed453bf2ef2b584dcf32bf732c98fe359abced05fc115e531b088c61b0d5d5058af10120581d7db192e13a5b7b17874f000343aecc8d5005b91b13720bc831de5f1de5e3ddce27ba05213cd126a7cda0afa9745f498200269a5736f63b0faec36bbd646a868100c17cb7f6639f2f14b6c52198fab04c1645bed8763799acf8fef62b82fda1825a3379c000255002788d686695b4c17be3931e69db8980d0216024e9b7b0588cdf8c8102d11f55f971b3163c392cfaa796e0b85dd0bbacd6ca50b3ab80c2e90fa0c18d3526e05b2a46c2eab823c0511b43c71122d533e27ee6d6e34706fc411c67a3b87440a3429df3009996743ed3e4dc244fac98a789f17818a926a0aae81ecde260982b80acc299f57a570a86ee28d0414edc91fb6d5f9a88aeb31bf22270bf3517aefe1140b05be97123cc43df6e8e8e4df96803fdd59715c87afcf0189fb5448663eb35d2c4e5b13dd0233a95f8d6187bf0d5d3ba35adba59e162e877d5a0397d9495ebfc771ae68283be15d883e91b81b1bb0cd8da6c300df7e2bc8a21094cadc974c8270d8ee37fc7e7501a57eaecbc244ed61cfc8d556e38c0611a5269c3b930ee5f37a9771f0c152a5e28df07a104360c973b9a83d3ec5c0aa012bff141842e9b68222647c7d022753dbaae024877f421ff36b3721c26a39b3009683c8c510ba0ba8b5dc1033f9b56e9a43b3141a92599378622a2ca8136f5f1f51cf7b7dce7d043f65f8562b33c4864adc30e7d4c808b10abbbd92f94272b68b063f7d7baf7fd6eb31cc76690042233bc8dee7253f89ce23de7a535af022dae95ac321694d6ce311744d9c152e4424a0a502d221b2e602ada71c60a2f15b7086d75867476b0633063297681fbb0a3e154efe552cdbd9d3203f2e447b60b643b823ea12f504f33f6b6c3bd20e54cf38e3c45c5d472814db60741687894e6cc3c78196d5e722499d202334fb742f14dc2ccb7d114ae0c4cd61ce2ed0cc7fe25a395d6b73c1dfee9174e59d129e7f3c42f93a246d918028d4e2dc804438799 + // 36206 bytes transaction (You need to confirm many transaction on your `Ledger` for this) + let huge_tx = FromHex::from_hex("f86b028511cfc15d00825208940975ca9f986eee35f5cbba2d672ad9bc8d2a08448766c92c5cf830008026a0d2b0d401b543872d2a6a50de92455decbb868440321bf63a13b310c069e2ba5ba03c6d51bcb2e1653be86546b87f8a12ddb45b6d4e568420299b96f64c19701040f86b028511cfc15d00825208940975ca9f986eee35f5cbba2d672ad9bc8d2a08448766c92c5cf830008026a0d2b0d401b543872d2a6a50de92455decbb868440321bf63a13b310c069e2ba5ba03c6d51bcb2e1653be86546b87f8a12ddb45b6d4e568420299b96f64c1970104000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7cd58ab9190c2792714ab06df5b67e66d9e3873eed251d7beb4fa252d6fed6a0ab1e5fabd284f40878d38f6e63d72eec55c6e1aa8d79c06adf714e3523a1f83da763f4bcc9d34424aba82981534066379c1cba244352042de13168556be761f8b1000807b6a6cd340b97a93cd850ee54335b1043bac153c1b0736a88919bb1a21d6befba34d9af51a9b3eb39164c64fe88efe62f136d0bc83cad1f963aec6344b9e406f7381ad2462dcf1434c90c426ee907e6a05abe39c2b36d1dfb966bcf5a4de5af9f07819256357489365c96b21d92a103a776b656fc10ad1083cf679d240bf09bf2eb7635d7bfa969ce7fbb4e0cd5835f79ca9f5583e3a9eca219fab2f773d9c7e838a7a9ef8755dc22e4880367c2b5e40795fe526fc5d1461e50d5cb053e001206460fc6617a38499db525112a7edde38b9547853ad6e5ab359233611148f196501deafae414acde9df81efd7c4144b8fd27f63ac252ecede9609b3f9e634ae95c13058ad2b4529bbb07b5d7ac567c2da994084c3c73ef7c453fc139fcdb3939461da5bf0fa3f2a83517463d02b903af5d845929cf12c9a1479f6801f20085887a94d72814671dac994e14b2faa3251d465ce16d855f33259d94fcc9553b25b488d5c45fe74de60c303bc75bcdde9374ca268767f5767638d1aec5f6f95cab8e9e27b9a80ddf3dbbe24f790debd9e3baa30145d499dd1afb5662a11788b1bb3dedc1ebc5eff9641fa6918d958e4738bae3854e4cd43f9173cd4c9c821190ec287c18035a530c2dc63077d292b3a35b3756ba9e08295a02e37d332552f9f4fdbb945df004aa5b072f9f0e9fc2e4ed6fe455d95b003e5e593dcbfad0b3b47aa855b34008e0e9a2e1cc23b975a3e6808be59dcaa8a87145c1d5183c799d06100d500227e6a758757b4f7d042b3485aa0ce5e91b2b2e67d3cfdf1c226b7ab90e40f0a0d30cbbf425f495bd5a80202909ad419745a59210e2c42a1846e656f67a764ee307abbd76fbb0c99a702253b7a753c3b93e974881f3c97987856b57449e92ffa759da041a2acac59ea2d53836098196355ae0aa2a185dbb002a67c1a278a6032f156bc1e6d7f4ff6c674126af272fdfd1dcd6a810f42878164f1c7ae346b0dd91b678b363d0e33f4b81f2d7cc14da555dcbe4b9f80ac0fed6265a6ecce278888c9794373dcb0d20aa811a9fe9864fab25eaf12764bb2f1a68cd8756cd0b3583f6e5ec74ca5c327b3f6599fa9ec32ccd1831ae323689ef4a1b1a587cbbd2120e0bb8e59f9fc87d93e0365eb36557be6c45c30c1baeba33cdaa877a87e51fd70f2b5521078607d012d65f1fcca8051a01004a6d10f662dfa6445b2ac015cb3ce8fde56bbff93f5d620171e638c6e05504c2aeeeb74c7667aee1709846cb84d345a011c21c1b4e3fd09774ab4dcc63bda04bb0f4fc49d6145d202d807cc2d8eab29b3babe15e53a3656daf0b022ac37513f77660d43d60bdd3e882eef239bfe13dba2e12707733d56e49f638005e06019a7335d8184f1039ab18084de896a946c23045e5c164dc9d32f2f227c89f717a87d1243516b922e5f270c751f1bdb2b1d3a38a15b18a7b8b7e0818573f31320d496e14a348f979b7606c5124e007493f2f40c931f68e3483a46ab2b853a90bd38ae85e6252fece6fd36f7dad0d07b6763d8001a0d6abee62452904f979cc52fa15001b06eef08f17d6e16d493d227ce9277392337a1c71713603e03803d38d1c24184b52049bc029f4f00b22d2acdef91c776a74aa184cc84b0e764f463ed05c2e16a7a0dcb6c27dd4aeca8aeac1545b48896775ba3fe9de4ea36e946d8f4ec16ca7ae58165e8ddc9189d5cc569888a59733529add4b213ea5c00ad3ed3709c0175b542513c90e18f2d4fa2301389102839d969e9f0d614943fe489750f27382f7ab273f51fcb995f449fa5fba108ad0955ed0819a0a62308021ac4ab0c97f04de9fb8533489b2685447ad71c7f9a9bc89975f9cdde87a3af89ae5bff37d1f192a31b7c5aad50486931bc07820d7dae398960965baba6cfc05c56df18b8ef0f5db488eb87be803fc94e3ad3bd6e4f358fe7ce15ca21c9a4752ddfa98337177a7c096d829886e8d71340a01644c64090c84e88235b11bd1fefe506d59733cdd82286fb466ee215914b06a138356e82c0ae6d5fd8e5fb310eb375540308d95b5d53832a5dae9652f91c1e8c14402991e38836813604dcaf272fc552e7682a6eaa7aacfd4ed1c7107b0232cdee00aef865c5577f2391937b76e34810f9d49fe31e54425b6f5e1d0e436e1366e9762d8295877e27ae495ace18fccfaafd850544c9be949d15d421cf6f4bb180225f7f86ca64480975c486df0eeb4fa80a4632cff28d36585cb5dc534553454ea810260983d02060caf6b1eb2b9443b1552ff73d243fecc9779635ed137a3bc8c04ef13f0329a7a5a54b2af0738218cc91be0ee63512f009435d8623ff4e8cdaf743818510b22e42b586a7e5e75525bb61dd2deb96adc95e07998a265d58fe4df4b9ead5b5f15b9daee510558fbdfae7a56931a6f4c729c18e0d29c467fed504810b7d9dfa0613d1657d9bfa5887e3f327cf46d7059a8a0fd654c60cb9c683c55439cd5186d1615f45f7108f261aff77791cf24c975120acf2b357dfbd2defafac0016525cff9400e0feeddff27910fbf2fa84c35fcaaec90863b605db5adbad0593601447605d68b943249861f8cd33c6419c7611403376a6bb438ee857ced2e6842f99ed1b4a9dc79f835813a4f8d07c14f1ef98773286e79cec1c9ce8c26e00418f1b27c7ef104fc96ea2b2ddefb46e2fec4feef2771a1d7e2643586b6fb97094a8d298de12a6f8f78d88e5d67442ed3310fb40aa6439b89c834e43ecd4a80c0a1d74ce6a90a67bcc996a7e93b6f397fe7ab2fa43711a72b84f8c94bd1e4ac62657b98a4b814d8ef2bb469165464a90d5353aa95d09b6ef4ffef081cab5e9dc12d743364f06d4118a585f7d455fd6e3b01434a728a768987c181409eb939e9396666560d394fb151fc67cb9cddea0a94d3e33382bd0617c95304da97994f110eafaaaff6eecb54421e01dc850dc73d77df18bbf68ecc8b37ee2fff7b6f88c139f7d88d763248deb8b4e16a8fab216c0ce88faea030f3a5c994c6e4ef6a9a68cbc9310787232198b020a7c014a1fa32c1736885603dd4921cd360bfb7dca7aafcbe81d7621dbeb4e5c094c2584c339ce70176d7fd2a6cfc4bbea6b433377eff7320d412947ac774688010369b197ec4d0471b9cc73cf9a3e71bd10901beefb10ca1c53428b89ea63427aae9ede5ba104d3fb54d0447458dd9780cd4e925f1edad33f6f0884cc47da562a3c6e2f5a958a8d8723919c4b88d067343a246c6722b6f9f82018d5213648792f38fa8ea1e635b3983dc1f941630fb3762ef1814ee3f41691b24583ddca585289568b4e64f82448b54797d382916e562b3f4795e2d726facea988249e2c3f72d44ec7197b6f783c6c7a133004d5e131b7b4d6a9557c56942ca4bd1f070a2b46c3a6b81bb9a4d570ac6afea75de65ecd331dff1e0252e0f9095f974f47b2d340d67704343b2e8832232210d2f79665bebccab528745c1dc3b28a78aafa3785c29ce2eb6a8403e4d8eded1cc2554ece0a542aa2febd711164f7d7e3a492a87b01d6b4206e593b3aa6d431e908282fcfee0d14dae4b99176a16fa32f730c2d336dcfe7eff84a7aaab1fc32ac8c2e9ab6ebb72c0306bc6998ec22d6cf20c2b6660cfbbeb064b3047c1cf650df12bd153cd7eec5dc181e46575f07c8e292cc191117cd28302d1f9c72d79b1f4062dd683ca95c3a744ac310764e56b2f02a0c2850a2f24c1b298e712374e9adfe68e5414386d7671bd52f6f472eebfdf51677ce379afe7b8085459fb1e6966f5cef45b256489b7ec8a8939cd931009c8a26642f1ff78cab06a5d25522a922cd5e4541dcdbde4848177a42476b141ce9ea035d28742cee0e5e85eb78ceb2b720e112aeb76cd0eb3fc34574c7476110b3b9dff5c19fceae816715b31fc289c0e7149e8488a59e075ac6683f237886a63a25ad23bf903480b9acf3f724d5ace0ca3a842939d4828910cc735e6513dfc4055624d68a048a626fab6b910eaf558c1b43daf1cf26338bca68b5e308b734b61624c97bf70a82430d586a6c3cf59e1bab2532fd9fa1f6fe4f757c7ede0cabea52f2cbf00cc88ca7db4ccc0ff92c0836e7405ebef2ad2e4b7d3b455d8e4d9ae575d884347bdadb67f5e24058a44ae1335280b671ec3bb9d8247e28fecedf5c151fe892bb0f6e67351752e4b1bf75dcd5af3e62ab4aedc5aa32a1606b4a0de3156b356b0fe74e898065d1e720b81663453fc97f935da3b5755a0629f38d6ae5f8e5e77eb64bbef5fc70d4081ebee7a9f7169df4f0e11796f7a79e9128ec996b6fbd8f6fa56e11f17db4925c27f4cd3ddbdee8a50e0b0d4d8f6e527302cbc4dbeef4b0338e6ac7515c1e796b39c8e83f457b50925c39d405f4cd3c1aaf3188c5ac62bf1dd362bc8c9d4e49d3d2b7c2dd2291fa4bb22d7cbe7963b654d92643b789366d1dce842f47919a1cf5073da8916701f907c4d2f8a710c58e85b59f590123d3f8e57cdc14df41a1481a893b9f9505dc0637ba9b27657b0ceab87b0e4bc742924e6d8bf895b407c54df8622018417f9e543fe49f5b10a7a5fc66e5589304af33a20ea108ddf63facebcb20d22eac2fdf4a97285ae6d3f87865fae1331d00e631dfe5366345e0d78bb39a8077484a941176bc63f469f001cfd230347580b6226d6adff5ab112dcd53e7118925296b1a05978a703e383e6ffa5158fc36781f74501564992ab244d3475e1ee8e7146033da2dc116489b84c378e4a750947eb9ccb982a197f13976bb105c81624618c697f32a5b9e03f3675b2315fe773e4922c2e3da7f68ac225107405ece58dc6bbe2bd8947f3e4269ce245589497cd892c750f9ace0440f48057090c8a6cbd5046d3d982d634b4ad6ba41c7a38b7b8b0f91cb6898e769479fc3c7e7d2010b7fb38ef13c17db705a36455a34969803323806009a4e141a5c42da0f7a5e4760d07250d7e483ca6274e57cc2885e5728c24c8b5102845e8bb74b1c394fa7a206ec052c953967380d64c148ca480ab0edbc5da1a7a1e649c2ebfd19fefc52d81aeed7cd83f3c1d2128bd66feb99d5d8fbced01383d2abbf9be47f3390dd336c22b533a731d1c59c3bc5361d781ca15430d84f3c67d6981ab99100f53b6b5623df9d8eecc99d24e02d9301d636c2d5988e98a54339d5b516379a67d50dd9994a28fae5b806c56b353a84cb31729487a6d9851960b83ebc5178be689720a80c5c412e67f8ed55724534c92ab15c3bbc5bf13dfbff02d41ce4c9bc112746b62dea2b21d034e9a31e276eacfeeafc672b95e701ec0fc7ebd4b020a73fc37361b3f136246a0e3a8378442eb5e60abd7da2032dca9b5556aa22e5007c901f438c5e1baeb5d3ec6128a84d310363c6ec17d4ffece27f502b5c63d20cb1d11d0cfc316074faa820a03e6c577389e5e82ebe5f0976b6f5266618f5eb56986714d5cc75fe87176e92dcf01c58029d2b838022c0812c933db17dc4566d233720075065fda26f44b0ed3a46b6143fe180b7a1e6c1558f87b875aedf8c2fa968e2c925f0c08c7e0f23a9cf1b46f7955d9f1db300dab801f5672e2a7231bb2b622b0dc0dd9f2ec64a5f10c239e613247f8685369ed60b2d262c038fcc43924c5aca318385c12412b10d89753f9dfca43eff5f2be7d7d7b2788b877efa8b46ec5c9e99f922839bef71c613cd44cba597cf68de366eaa8874032c14d8012b41e72fd66422f7031d26be0dc4fef8f36a3c124e4ae767a665a94233812984c4466f5bd698b5fc22153c9c2f4110d9defb23c00e722692983b32ee0e84514169910bb21b14066d048960b29b3ff4c090dd5723ca4dcdebd207d4f88da831f0ee7de4aa302a06589a4aba3ca696e7d3c3e9a93af79db91f7a06b0ad825a8652f74bdb72f580e9afb31aae58807e24067f08dd719abb4e6e458bc8aa272d7a5bbd00710c43a1fea220b9022a26b574997517d04573786a4c3e09d30f3ec32f328462e26d4f7ff015121758ce1a2fd51e7f419eb6d8ac04497ab812aa6ba2e981a312ca16c38ed887b2342b0a91348198797919671a23e2b0634b523f931e48ce0d8eb840c54045d9193afec069803901e5ec1108782503cabd0f43373a85acacfa8af44ef2b1d09e4589d2dd4fdcefbf435cb61254f189ad433fa6a4e190627732ae4ef2b0c85cfcbbbaa0137033034e70a3906112dc76ec101f3198e25fb38aad46261d6019690dbf059d66c44e7ada244589c55edfc2e7d18c0ddfcd2d3841bd54d8502763cd0f4696d44686ae3be29ba3063ff6e7aee14de126dc43302f7c0b57d59eb4fdfc4903ccbd3f7309225dd90b5f25c5ade49c14334c0e00fd18b1dc611b10fbbb98c560ad4908842e765c661b9bce005aeede6461254338b8dad3203ee1b58bac1062c7e02e2aa6d420283ed81525839f2c8ff54ac71cc105042c594fb7fd7b55c14cd1247347a197ea8f93c1bbeada1dbf3e59b798c9b15765ab23f856fcf4eeaa5892c3857646bcfd8ad2bf0a15607e0d6696a8548da32955f1f8476f8a20fe4f59b3e9bf4468730b8d46c824a370d37695d1bdcac521032804c5cc66505637701e653ccbddb052f4ecf185b3605d0ba3a4fd99161973e36a35bf79571841ef7506db822dd2a5c959f36418a8dd8acb5b3ecbf3e7918a73695501ef8f440aba43c6e4575880ba3bb83e0a839254fd8d8c6b979d79337a68d218565a5dcb1518c6c82aa73ce7f54a9434ceb5f5fd503137164d74a230e46ce298b98576fea88806bc51e393acdb2abac1da23219b4dbcfba366d834d40dd8e616d214c3478136050555539eba776bf506870c3d20c4a4645b9a7c4ffa976534068009840aadae71f578ef1a325717f64dff840b9dda81b123086a47a172e6793e68af6140b1492058fecd68c4c23db1cc13d2b57f52d0cba89cd4c26d1bd580dd2a054a1d934a80b9eda8ffb503b7e3e62d00a3d075235410149e976529d8029595e4daaae1aa685f3cbdac9b26916320e75b0846d2de8673600212bb648b26e3f1709df425136f33f46129afc90839d24de1e9fee51c685db8a280a5dd4c3ac1539664cc36ffd4537af480d4082146e7395cd6de1f8b652bca8853ec742366702afd6ed79a5920e4ad1317545266f6dbb796ace0fdc731997cd94e1bd8e6689c856adcf153909cfe882b9b02650f4f9eb8620983f0c6b95b3558682d8134a9ec8fa97e174173041115b2eae21fa0b72d0a3c7c2bf9b022fa141a8b495de8321c152b0a9a942c5baf290a234ade4e8b579238a627196fa5621b196ecbe31583517ec4ed82e8d3fb21a892dfd65ccfccd2d36c5d32afa4d4bf201d684c4b1c8c1207db455dede5b908ac63d5fc0bd2b36e11df53bd52e5ce27a9af9444a8cc4391ccc82914b79ba2971ef4ea5d5c30372e7cdbe9bedfcea9ccc8140f8c3ad1bcda29d11fe51affc74f17c9832798e10222701e0d6e93fd109cc9a12df4ee5d38c531574d39a9f4357a60f8150ee509c68e469b4eb0e9be2e6ef9099f1bb949f738fa801d223316fbb1e179b74445228c8b3c40440306e4821077860c37d6b8c17230fcf7ea48d0bb0d98fd3f1f00655e11a8b2e0a7d5da8427784a8fc6d1a2d4d1d3adcc02030b50a700788ce4078c199fc733e2ad469dd9c775d7a8025b4db9b960619f0263b7f09d038cdf85045ac2a1cc5a18364048bf242af713ac4db889489d781ff16b1dcdf66acd89bd6c7651f25a17ce751b67697739dc4d1a125fdd5a8ecbb0cfaf31cd4179249e91171ef3e628dda697afed9d09b53260ae475d59ccb45a6ffd85a2c4241fd134462cf2ec21b51422439aac77954d1b2396761f16e1c6e3242b538f23f584b95cd4b811e35a526748050a7eaa02cebdf8887d94287c99500bf9c2afb7f36ff47e17906534097b02f10620958e889d2392d30660e513c22f580a505314eea4a865d97adb9136c495403e321f425348b56ce8f8e8e91ccd702ade0bbd1efdebef8344bb9defd471ef4b214976556f59f679e0fa39a2007bb9902f5a60ba044c4316c27f6b634241acdc3ce437c4fad599aabba291bfd71c05eca6d9df49abc33ae7709f6622e516c22418e7ab86144f6baf3697bfeeee65294175e5dc9ce5ec82da64537f5f5b83f5a938e41fa8f6f97f9102fda8bcbfb6a5c58f79648b97e948a074e459b9b75a1793cf7d9ca5d7ab27cf7035ece0612d348a23c0fed509c5e18d19b1e659af237c3b9aba4fa8477de805c5f8ccd0cbf3846b6ee1bc9ef76a190952115bd08a5108c8bba76d8d762184c122d081c6dc8b4c49a7f0e16ad4cbed86c6818d4f22c03a100c9afe3675a2f354bf1c2cde1f5e5a63b95761e10d27c9482539387e3aeeaadaeab59faaa20cf595d4d8c57509c751446282581ed28cc55736211e6fabb63d0f299e39ac1cd2af1431bfb03f86e5e59691dffad4e275d4611cb2d7d3be3defcb77907c94db86d989a2ca7e19729e3454eef23b0d58bff8203b08f41b40913f2d2dd2e8c98af09e5aaee76030d8201640d78e7bcfc6c1171e04cb39a6bd060ca41ebbfd090883d8b3569c39fc19cb5d87c15062c9f09138d4e3d3f3421227fb2ac48b224438b12702cb67e2db161a3c771d866c3cc55d15a094f72fe314092e846256e44a1dc513b02bbdd976321f470f81f36e719b9acf22179855d36ad0c50dab79da662e9ea7f9685ec0b44817271ffe2b7254ab7f3ddc389847e17edbd33fbf789bcd604ccca0c01c60deca286858b16dfa17c5875916e0159dfd4f0495c08bf6de51365e2175e47325d5ee71c96ea8ce24c4541886e0854bf7dd8a980aea1aba9add0316f3d052a2eea95c02c241523f3274ee62c883c4ac440d7626cdb4f0aba7a4ea686b2778cd7d7be220357de63cce55a3928aab4c200a2cd65b04d831ba0b54dc91cd6ea410359512130d2a0122f3c9752ba6210ea3b115caf891f0a0a7ef210d1988324a9af926cea8487640a473aefb2e3b4b9259ca4da66089d7f7800f87cb2bd068b8c268dfac897b9a2dd1ff4ac2b19a48b7e95a39ebc6afa2dceca7928ed8e43630d673e5c7ba1fb4afbbd40243ed411b6519420e738c24ab183f900872f10248190358636c789b842f156987d0593fa7cb813f5c688652f871aada7cb5a9c2e15ddedac147151b4d5a7bc4b33cecac961a3487984918868515ca73ebc647945fd9044f3c085b184b3f9d333a7b74927fbbe4a0d846744e0fd6bc36f9381f76422633946fe79e64c3fd63e30096ef400df8cd8c884bad1955b82c013c1a190db92699d39217e46d3db284f35b18b782e791d722d12b85c8a26ac98e9dea8356f9d3ca58833aef4ffd883953f24c96f5351438dccf33693230db5d72389905b49d7308cc30b805fa968532a976009a527bfce9ea921ff4ea9723be5b5972ace8553441a4dac7f0b2114edd3a25666d70c4f94131a63f4521dbd004309157bb32f9fc649058ffbe747bc3addc523f805f1b34787b0f446c9ed1d1966550c7d0c10e342316c6b34899064d0d2dcbb09087ac20572103ee01193a3eab06c06e3206cd60bdbe367af81dee5ab3e5dde9836c558e54c9bb6aa306a609225cf25a65b575fa97d9c962b72b798e9a7fd8192ba879964cedf623d544c8929af5c8dea56721d25578434e2b234289895c697c9c1bc4556e4f6df479a837d1e9132c011e47f9e23fd27b70e7601fdd24f28937efb9e46673b9f56914638c793f5c3b625664f2b221afb3fce5aee92a84d45bab5cda58c49777f82b2b1c8293d727fec90dd73581b087367add474dc7b4cad75cea1e43619ef3fa1b35175f5f0889c031c2083e764b0f4389fffeb307831b73763e73d2c3112adff579d4dcfa1c09d3f2c5927568a70027242e6bec83c5e2cf7e125d8b5e4ad2ec339fb79bb15b8b9a6db0ea9408fc6fb8ca6efe9ae0c8c25900d859b17fc44c4a262c7a5e06ae9e2083fc6dc36bd08d648e9a1a3d8fcbedf12777d690ff15dc7096e7c8b33e71b19005c9e1b20d2c2b6f5c7c1204edc691b389b6ad04f896ed297922bb92b9e6d10a2df2a83dc71c15d2010b595c72d5677017d6d7938ca3538d671e13b8496583b4f9fa59fd481f1f438f92b01a6c5f7169d44b93c0b6863c1a183e871e7f50e26e6d41243a1c509d423309dc886dbb9ac245263ae9d6024456e72b57e17cb08ef00f4fa4dd9fd27de0685c4c6c680ad654e3d81dbb450f0a5e7821412d442c2034093e3fb10234e6a51b98fd388eafd0eec66b42c275a3547f72c7f3d16ed81395e9a2664faacdf99bb22327280e518e4ff047451e6f7420b562c68877c96e129d0cbe18896aff48d49da028dc97aa0108da9b29c540c5238d676dccafdf463694aea34ad4f513b6c7a58d071c335ff1313d41b7cdd902904b8c9fbea2ed34878b407ebb8144f603683ce4ce61eb0690a00d492978aac3a0f3010b7479667811c3332c06553c14809c723316c84d084530e93a63bf0b7658f7bf367d29577236e23ab658a685f2612f0216a932a24aa4f70b8d0609aa9ca14e4d91b8ed9fb62864ded646012ef675ea359117c07f528d7dfb742aab9ac892851e97c94f72d5c34d4feebc7f67e09fdc6f633f050833192f15a7acf4f8c8beb3adf3860fb26fee39a416ec362e4b6d9ced09fa57b3d5b7fb7de018e4fd93eb65634c08f6d4f1e2f490c2a8b1be2794a27de0dbecc9949fd1d5eefa0fc6f0033a2bdecfcaa267280b445e92385d2edd4c2b31bdc5d54ddd6cb30b3c370a893c217945d346d1c5b8b98ac754a01afeba6f5526939ccfe9f2432461a99c7b9b44a3983eb65fb064c32f8c72e18b8f6e42e72a1bac21b3cf94526f81089b235794412d1aed20f48324d742d4079e9546f495248cf7f42839852d604598ca2079fe44b125ae9970973b57c156e83fabe6d64c9aaab5c243d1dc71520d45317b913205979fe5bc075b0068d8a5ceb7c8ff9149c763c22b08d35a09feb8156bf7d8eda212a102906e251efcef1ebed894556f18444a0938b4c050f2b873505bdce97cd4fe539a944b94e281292f38850dec9e9f108d3b2d5a83837d114bcb3d6e6511629f310d194328eb05a7b88e7a053e97dd92881c89a1169e7d23a4fa1ebf532eed2579fc4482b9c93da2b5e9619f289f346160996cc61a3f380ea71b25e777af37dce79039cf90a2bf16ddd46733fe9c1cddbe7a42fc5faa7869c96ec463e9817495bc24a23cd9968213927522ddb0d6ba5db92f5736a5723135305a6c083a9bb54da7e43da3ebb07066ad94e597706062118fef17e9e65363f71d8859d30527a495f06bb025c1d26c6fc80e9b140c7108c57ee5583063bd8d2a7efe6a3026a79f2294e09ce980be8ce1a017132ccf48a63eb32454b12506a6099d4e310f07612e77da46aa0caed8fb0446fd6091140db2cb1432bb93cbf681cefae9d849fee6b0d87898d52d31a209ca6f168b6305011e2c9a55fc5ad2237d7c2d06b98e0703ff2a89fc7af8471aecd2a6cc0a4745082db863bc8d46209d51135333a03b328345b86d6cfc23d6d7384fae5d8546f05725ab139e2c25b0dd9b2113b2774391aa058cf90915bc97a94e74ca0ff6785243122f12decdc48aaa8ff27200007f35e928e62269f7f07407802c9a10648a91180d559c5c37cf3f425c9949b9e38ce4c99b71810babe45344d929906776a66fab175e20bc5930f1dc4b5b888301028b6e0f92293e468d0c6b191f0840ed822c036e6257bbd4f0db8e931463826c0be855add67bff5fdc6d4de7347fa07e63d68f4b6876774a39dff1ae927614f8a879f128713e24b263850f1ab3176ed0e9ca9369af947bb8e862e927cf803ea7b53b68eb8c5f87f1cde2399122b7892ccd4071610f0873981ece2ed719bebb0d508037e46b95610d14e9a826549cfedecea1d32074aa439592929873b49d9434f35646adeabc8b52e323ec2dd6d0d6e27b530361fd8bf9e4e3a0a58e3079dc63156a684bd5cde53ba8c9c51da274bd61cdab187a3fc0a84d5005319f05fc7ddbda575f73f3178336413f8ba0b99cbfdd5c350a3a925260284d75fe06371716f951d76078df7cbe6f25beab46b8f4222c74f68822d6747314b688839540d3bb9bd0f45a028e780fe2b5c78e28dbce66680f1e57b68d6088101146aa9f976bad10933e4f5481444a46d40413ae5d00044a29dd3760c712c04771976280f793ac5bf8cc1187976096e4620d646358f207a9166b9d27030721fc00688a0df926e6f4944ba6e78dc862a8e55e3d1a20d2993d8c8410548e9bf1b6efa181daf8bc060bd1af3dbd8853d6d3f54bdd1f6270b20fcf7f90310109b98f6b366a4ebc6f717962e408bf865d0128fc9ed607f848d376ab1c50e66152f74916a28539a762c75387d144bdaf4a0b8b0e7baec532e8d531501674a8727547916fbcb2e45f9c7d41063bcfec3de1b0adee000e555397ab16fb0977a8c3ac1385dfc89eb7db5cceb9109077d36ca9ff5fcf9feed6b985693746a95ba34f7d2875f61ee8606302b6470f8ad17b781daab036e288e5ee083a3a36eb116a34f5ad97e1675181818289f514efe868feeec3b48b1a574b9405668aa536e572f0e2b46fdfccaea5b2f65285f6a9a05c020bf440f5db912c8ac289c67b9d724225eff88366992f08711f35112e66b765872d39b54cdb5c4c0719b2c17dfade7e2f19281e6ae7885708ee8a8f6f90ce79387e6e47b33f15f212c5b386a5aa5f93cb597698dae4b5999ccb4d652a08c41ed27c45d2ecbd112a679374ddd6606ca76ceca9ab08f7f648d248622ddd633dfc121f9470930ae058cfa9455ddbd25a38aaf48f242ab6e0dc895c5b2af0d9ab0c996df526f144cce6297af5f3ac5fa1d159f52e072b827dbd273afcc6e3b8fa1151acaaca5965a4b6cf5b0ea6275da3208159c6bd6d716eb61309eb4ddfe1bbc4ef8d013d477668cb3506ebb4724ccc72affdab79dcdfaaee55a5946b4a3f768dae9fedddedc6c5712296f26c025ed2ee299cd15b1e692c616094f500fc53fcd9838401c0ea6b6ccb883c149a52d875501ec2e647b1d6720a8227e33cbc1f429ef60103f3334e3de2e40ed4a59d811b8cc51a695de25ebc66eca519222dafa22dbca634220097b1d3f9aeddc91d11019d7215629122b4dc6e3211ad842288b581c31e44fa79e1f7855d8fa77e7a224cf571aa3c16b5f4fe5feb16d7d1bdecc543b0e8ff01c677ec6801e87241ddaa02a5c83bbfd1d84c62e269f6ce8a708e693b86d8e5439f129431a4c1c0bc6ad47784c38e1cacf6c523da23f65a76c264b96aabb50aa9e299be6abd1c9d078ac3b2c5f2c3986b5707f143513b4ea91a2052731ef5b48780dd0cc6626a0f0c358454f6eb36df7caee6f8dfb3ea19a0ae79c0d1587140147be3efb2a0da1305d5fe056010c518e3471572d889304c4ce00acc78fed04a4b888d5e7e57d6cb5cf4e5cf1f8782e1b25ad948eb3e443db75af9233aaaf6659adbe0ef33d4b3ba5214b85e656719df2eba42235b2e268f80e3c5971d28957f8e93f5b04a3d5eaa607fd4bb838ae48661bd093342762cfd1ed60b21f04f5b95c3e5426ca6127b04810e2ee25bb56ae81d7840328d8d4f7d1bd341ed58b102d9860806f4a4d117c044f472c85ba422eab084faf8994cfe0a880bc46dc9c1a8c11995610756e2ac50c5fea8ebbcb53dcc76b1944ce364f8878f42310fe0f8cc211c62f627d12b20527dfd84b78c98b1122050cbcbdb70e08010f68294a6a805d3fab97e76cd695f918e73763ac2c3dfe4a8d75db87dc37e2399fd854f3284d29c7bae3d3e31c4375ad9e047f03a5204c2ba93b6025c112ea2c9fcd731e380a8aaa42860c859c2e2cfd333f0bee741e21f78776defea86e862711f0d0bbf64003ee848a8d1a12dd00c024cbee343d1093e653555c033c198401caeb951860392b5b1eed6200828aa310ed466e41d855dc4231464adc2b6b6fd66e03fd42736fb791387efec28b37d0686272a6bb181a621aae7be06866bdc1c4be69e94642c8d3782f5ab7cc8c890699008b52a11b149a517771b93bc2ae597dedaf0237ea8d9674e26fc75c3b468e04e2fc317d03484a75fb274f7ba1617bbb72ec16da1fd4109952d052e9de7c00761736dd17e70db0976692626ccf8bc9e88ad6c25ed88a2f7c2750add4ceb95744f690ee5f2fa423a2b62ae57c1105958bd8e81025c9412fa71f5d1e81bd6cffa01f489fab7e90ab8a3c8aaffc8e3d594beb254c460347196473117ec2a416dea464eaff95da6cec26b5535954901298f11932ebeca52aded139f2d5aa2c24174e2f6c701ce1f4564c60861ce3b9cdac1cfecf071295c5ec581f0f075096fa457373c124b6c8cae3aaf915e4701ad94ec9c01e5ca0552019bd7f107a7d5afab9e4a5e7cc7b4c5416656ad064f4a0f89afbf7c5b884b69a12fbce8aa73a49b2e5c5728c67a7396bb8341afdf52213b2f7f8e84962cccbeaea63a3c7b24881ecdde39cc57b4f211cd57c6f982217758042f61b648496e62b612b7b8bbe1b9f15d237aeac42b54d15166b5c71eb27ccca1fc9e050adc62a267eb82ca2144ba323a73aa11e2fdaa87695c70316754faf7aec44a49b668362b0b35e884019227e7b9a35e8841e64e0009c713d7f3e4a74cc3feaecf4c99b8d0ecd85c8ff89771b63a38e3af990641f28fa7e4ea560577d600f43ccd467d6a347fef04d392d42f8e97659348c68b41299f94db4b713d61868adbd20a4db74f61bd0d1e7846bfc8b8f8bb50bf50c2fbfdaa87328933741aa2b1ca50cb759c1276f1a7930952ed656921f5ce5569ed16b31b2a1b6009c784199ae60ce2e35d573808a195974536f220cd14dd634bd06800435cf1219047f6246c2d9bdea5e489ab4862f0cb0f01439ad2ad1e2042b3f63b8611a87efbe842613c21761de4c79291a8491092c20134252b8e900e5d3cc70e75d32cc41452c5c33b66087213c34f67ae73fd56a183be858f1c3bcd73d814bb9e3f78cd18992b0ea401d8f25c3b60c055df8e6430b62899bc86167d0b5e2bbf16d75bf3f2b94c26542202bbfa0abe99be1a07c78140f42c12f51576007bb5439966a47cadf5c4ea624a75e7a4f01d8733aee57e3497c013de4a33cf54a94acad9b1aad837865a6881db9a725310eed49581d2223f2b0984757bf3fc5122c5dd572ecc781b48fc508122775779d2b2849e11684a585ce844d21352f8d35ea53f0f34d772bd9ca76cc4dc33aa3f2e72418c097614fa5260eaf3c2d724d3599dfa0991a9c0eec9c4d550886c85e1ab2541e9868a36afbe0d9c07c93e44c4c73c66f88e770e5d4e4ac331fafc6870c928fca85756c444c6e8f6cf75865859abf0cfecc8e89b8c806a2e6af7cb752215bec6201eeb41759b27d599931dc2ae75d605b3e387bf263ebfd09ce2154b81479675555ec74ad85150f8eb8c1b3c4f31f6409648f9c1b4678c82e8e2afa9c887f3210afffed160d1634ab0259e1bf5565d8598605a435bd289afbbc12034f67199b67bb0fddb4b9180908c483ae5a8eed16221687e1f524d010ce5db78d1b999069f225479fd6bf0681c7ee95d4665925bc96399989b85284087e67d5a070f2713feb78bcb91bc019f3f19bf3abb7cf36ebb98f09fd64b61e2bddc9ae6335da48ba85b62562726e142bb9d9e5c8f278dbaa0657dfe3e410f03211a072555624d98790aefe8e7b0281ff6af3de79dd5a414632f9d4913a480e9cd6990f94350304f853ba5679a4cb3a647b98bf1eee6cf70f77581a1ff82a9ffd7296e8fd172d37b1b0d1621692cbfeff8de18658f04af5d5be08bce66e5dfec5989b674219f9ceb6a1037c80a8febdfac63d482debd34c3057a677420f0bdd66e2c2b25a9c1d34b76b4a998ad3ee21d1e49f812422c83016c12c201ac2b0f07ddc00638846f215bfa6c575cbfd577178eb0282ade2c459a13386f5dee8a7502321292a7de077f4fd12967b8c8055596e7a43287639843b6ebee58d463fa044562ec2da7f9c2a7f28cce685178eddd3b9fe7b10202997b6b170555a71555cfebd06cba6bb019f8cfac2ec5db3b1d1ca88acef9accf76b6a74600e590a0eba1c839d6a577d3877e7d6d010b04fc58e160ec9733bf200a9e0b24fe8ef32613cf2c7b1515008b8833e34d3967ccbc8bbe30fd1810f23bb153b814392eb37d8917e96260b3cb16895ef13b96d72c81a14b908224571680dd56d04a59a6583a232ec58e8cff16f6428b5e3dd19f362992608aba912b642aac9950777627ffa4eadfe9f31b73c3fbca11d2abb623b732f3d7c296806151257c9f2306dee1c84eb05d586e7a82a8750905716b3e51600250a1e3b4bf274130a1bfa47117cc8b6db3741ba04d977015b8ee250c3ffaf859fdf0372b88fec188830b5870f251889584333547f3436a548801fd3236da2ccb2b504f85ef1d259bc3e00f0ced934a4b297ecce0d668fb3ecb524d3ff4380a7856c7060006de31931d0b26ec1d084e0dce3b9a123741cdc326b441131d777799623c6340410c331c7e8a4a8175d7d250274cc4ffebd5d46d855bf90842888893c348f0a447998e3aaffc81c9b65e3a772eca5c2f0907ee13ab6a2babe99f388755fa3ac9dc79a2ba4ad7a869a876448ed1d4dd6a8c678065cfc90df8470b29c83719bfcbec7c5e3244a665a28593ad42ab84663bccf570a8e8b783565f909b5e6e8cd69ed6f79fc945ce5d845c998f25b9dc118c96dd2c0f592a73497dbd9e050632c8d82656a71460d0ae7f5f38636692a78083b2fffaa517dc2dfe18ae020e6a5562be54ed9046c7129b3a57dcbd1917efb0579fa9a3978690fded8e52e4860db75b2a93c77316a6e84df4965291a7531e2abc0fcc0d0016acc29680baa575cb7be1a03206236310eb5120ab4069e0f8f0cc3f6bd188ca91963eafc2bc66b1a42f8c49359cf3171a72eef94eddd8aab03f770cb2f489aece4e09a85fe6b9790ced5feced19e4cfe6bcafd1a5d99fe56b78f7a14fdea11fd5e331e23191a3f74b32d8ff2740409f346aedf469eb8aca16b43dcc44c400ae3e6d1c4717ae1f18a2f70830aa0c4d5734922374dad8c006ab97e02a4263999ecad0b1e9f24ed0b599467c962932ec610e63c0b3ac845f5d4d10979c92bd884669908696172609e0da039728baa1f0dca8885d5439ca420e87f5c449908b2a5f69b65b60adbf5d74b21eb1f4e0d79558c59b4499c245a9952de8d3a51021f2e77c44e06a489df3b72d28e5d03ddd358ced4f5a1fe057e58b86f9e717cb9001cec6d6665cc0f5b9cf89873e6e7d10355746e99494766c937683684312b630337d1c411f3f2eddc52a8267e19d38ee12c810cc4e33193e26790b13d1847c56282ac86697996daa386b06ec2ceaa97fac9c018baf644622c74546177267b053a82292c1a1cf194909beba3f2670acf1d095b0caed4b8da2fe48c9da3dc61969d938707a62ce9cf55b89ceaa04a9069d38f4e89db794a335933c5b45fe215976e76dc71b7719c2ef29d06d2dbcfce0470007331a221dbce6baa3f418f989d7dd927d343152ee310d084799300e8d3801f9d464d9bbd5687e3203cfb8e589fbab39ad4851b07bd13b29d7f4b767858d13c5937a482207470f673593aa9abe339b3d63b7ea4ad60e51e7f9080381eb07213ad1996ba7bd28f8b44b7ea037e0bf9716f56820f908fd4027249df11aea06df25b3860cb18b68a7df5ed0d14730035291346049e1e5cbdefb30719548fde4f986bd9871a71b5bc7f6e03ea4fcf1c6ddfecb06413832ac27b08d203070acdaf432bafdb288908dfd673caddbfe41af8255ff7106d39db8d003ec1abcc3000bd7fe1daec2624bbe8417f81150f20a8a48324100ef1570a6de7c0a21e16f6991b23016671bc96ee55e99a97a5a0120af8ecb816137d5f40b9e71d56cbecf61569dcd2f850ede77437be06fd85b54d7220b9bcd13e682a8227c7a05a4efc8d258b0331b0f47cf45ec370b491d6b2e4e601e50483480d9437fdf570b6be69b28b964972fac047f8aaecbe567c8ee3d583a46d5b58fa3c361dd3ad73c91727e4d0594f428acfa977206c20995612834497928d507eb62aca1752a8f3048c932b9f0f80f7c627a87f2b50d581961b8739bddfe2afabb1c757f366acd1e639de808409f598755dad254c60b5aefbbdcbad52f72c756e5e4b286a6866af769593f66256fadc939d3d23d1db9096038b40ed224ace023f2e3ea84fb4092c974cb44ffbe489f0ddbdd79e66281ef9c44e81781b849b0d3101c17e54ebf8bd69393b9220c75c7d3c564862ef35d7dfedc855e2ea15a6159c6c2bd01d2c4f3c316ddc43f937cc295fe35365a69ffe68a2a3bfa7eff90c2fe8563f6438117c31ab48cbd5a3ef1c7a03a03a048be4a9fe0de1d6a86feb144731f4e84f1b509db65d35b1b8ec3d0f462392da10694b207ef1d9fa2581b572f9c45012151f039ebed848b3fc211b2b4d6d48266e8bf800e68cb1165cfb17cb14af4fff107e57bc90b9e32006dd090ae12ff39b000c474f77da32549f51d07bb23d233485be9143c55849b5fa241337c050d48d88e4723f7f1032120cb609c584cb10cd777404556df84cd095c4a9668d392cb9a6197ce04e4234d48b47f8deaad83ee95292c9a9e9d42838c12e34046483ebd821284ac349fddb3d89c0e9a85716ca5f2c60569686d3580c6c7bce0a0ec4183fea724ad02763f66f85992fedf49c67a54c8ecc5b47d6e00cfeaf23b2425b795be93d65d92fe0ac761cca8b2feb4fd7a4bd21bc98a7328f178a61aabc2edf843e23ee94c757a457d448f3588b4e39cb14d855c35372c2060966df0e3382afe2d18988ee7676511e43afae09d6e16b50bfd290c1202c5c82520bfadb7b9eff22c2e9d202e7606f23182c08f0d405cfda6e8bf4b222a14a96015602cd77b2e0af5027938348075115b146166990bdccdaefa94626e140f8ea6fe6b51fb38fbf7ec39b89e68174db08d243a5da08a573545993db451bcd7462ba2c308849e6f54fd68eac003dff1971d19a00ae1d326d9db706197ce15397066ca114645ee39bb1a950c068908be503b2cf3ee74048dd92808e07172ba1362b3ad4103953c990e19b4581c54b5a240d90ec56150fdd5d9d1e497090941b541a9fa202d09f2790bd29f53fcf2adeddd4b4ecbff252921feca36cbe51e5185234641c8df314dec556280e408ad6605cb82f9fa5cbec32b2d478e876b4c3bc5019c344ee2f0bc33d26ae3b69e349771a8069f38f879d82e1c68f84d44516db921ca606b6e310e9ef0729b9fc76eaff94d3e44f865a6943eecc5ea1dc097e69e91344f7b287223fdf25ed3512e1fa34b0879ade1a2786571435e71d3fab19a6ba93b5d83e20f05afba10ab48ddee2c6feee813635318ac35bece3a339fb5c2278df5b9a6b7859343ff5530a2dbeda669a47a5eb0efc46c148ab00165563023536cf71f189c6b855ca6aaa056233ba82edf29e82d96c6118a0e6bf37d2ab2945ed1904f1dfd19ede3dfcf257aea6d560e3776159ffc384b3540deb1cc38d1022e530c2d46557a21eeb744ed5c00843f7b6d5953f1ff4770d26dde34c4cfbd308074e0df53264afc5a3a7ab8a57dae296c39bd72b88ad988319ba9e13ea529783d5c926d2f48599720695fd174f8873d0f660f002d8d0ee134271450c12e9dddb641b240795c2c09b958778e16081bc9180442c45fa916de16c83f16c50092eef58a56191bbcd906eb475b97d37b7f5cb00a79a9ad66a636e1052f9dd1e75d02a5af4840dfda7eac68c749bb857675e67b450a484d3e7b13a77fdabff0e97dfb705e5f4f6cf1e95a5f6cc38e099634a020087f868580ce2ec0837525b8c58f08444d7fd4333a589c0356de22568b4fad8766ee3325cbd65843f2c713ecdb44c96411ea871c039915b546ed6fbafbd51805ac48d06c6924d3f7036e1814250f50f27342c8c4ded3e68b6b3f161d46379c1088a7a123f48f0e7cb5a348f472eb155956fe232fd301e64f341041683ce3b25bba7f290a10282a8dba3a2a3da24461a5be148c2241d627889adca5acad981583fac81d0ee4ef77038c1f80db9dfe740720904512691a9c8545a9d173c08c2e8599010c972c2c34287d91ac7803a5700a0d6e29b7774f8f487b70cf8d0ec9474443e2c0c051116b16aef491c3945a65e6ddcd7931a7259e56902a2866b95d3c0bb7a3ea61b1f3b54ae56e6a7366ea895056ea0d1c251cd74f7b82b0d47464826f4aca77434df3d909271a825b57890cd830011981d95229cc0427cdc97758ddbc76d6cc77ba06c92d19daac8bbecbf55535e98bd4754ec06a6e632225c43bc46068baa688636eaba53926ca093a7addcd6a696a902ac35631aa43d9d66f77270cc7bf66140dac239034ba304e1aa0a265131e9fb2b7f079861b0e4cb9c911ce82ef0b685002476baf26401dc8cc444543129f82ac6b103881c596b19d9eba8ed6b230c17914d5c34a0040c18dc54d8c4b637ee683637fc5a82ac1cf12691bb28fc0bbb307fc032ec3d2b06eaec56ed769b5e892816c7350dce89551e87918f67a117c39f256a368586c78c2e9614e9658161511a8dad53afe8cb9eebe67c6596a90eeea1d3d2466a4d77a1129c0a4409b98d8ac0b925c4b2b3500665a3cf4ceb82cb0b6732eea8a796f9b79d2ea49be97066bc1f606d9f1f59f41d2acbb878a0783093fc4ab0ef866ff60a6a1a58d3cee90307f09247b5212f8709856251ff5d8fb77657110bbb3f3aeff07898f049c821a82c11e27b0c176a9feb12de5d08498018f7607156c5065cb56bf9d6867a4495f26a07e0f01312c2ee897b82d8eba0cbc473da402814dba727521cfec6afac2cc59cdd6a75e1f8f40585e5cda51a7434a81ccf4b7de33c663dc174ba973cebc5a56831005d231c719ea34ce42999c471fccfdbdbaf1acd2f9c16f258e32c70511c475ab264173246ebf31459a05ecb4df443066b61a243903e80ff907af17a96d7afd9763df8f8c4fc49775bc805e2dc165bd6f1c4e06688521557ed9ddb6860fbed1e32957bea1174b3a9aa809d7fa6301fbbb6b3774cd856095f14c6378cfe98f05d4f06fae91769165dd0adfc51bf8f57d701ef14a99d608db0a104ea78fe5b13794cb8529afa5352d1dbc8235d96148c8f9c2e29d6e2359a8dbeba56c9376b26f8384c66548979f4d982fe0652cd86bb60e6f2463ec63dcdd5f93d4bfaefe48f8012c63b32ad3c02ec9088896f6a0c8b1097c1ad911ada7a2d6f0d201a28b70752182885464dd688535bdfc045e8dafbe34b20eca00848e757b4a37de219be5a5fe7a4bc5cfaad29ed92e9eda2bed08407e0d0f53caf6b3590210067d8b9ef16f9a8f5612315dfa415f1efc8d7349394143a149480ce3ccd60ccaff0d9a8a797820f41b431ce3afc4adb2e07cde16015087e09e08bd13471dee960db35cbc3b53c187a5bca7ed50017e09b2ae2c837b1f6557753c7f5b004332ffa2b52d8a2269e7cf9cc397c6079aa5add61d7a560a894e71510e104f52a93622e34037b1db70a05bcfc546ea2ec7153e69a8df18fa9eadaae2c1438710477a9a23e0f7092c310c5288e2d39d362a0a33f9e3d8d9792b51a71d9014abcff66ee509baa3dad341b1e4b6c601a2966f77172a4df0f32170f3386a6600b0b63699fe21e26eeb475507e99f666e0ac349b9e23463450f4fa4498356887d9e1c5f7d18ade51e526d27ccae799d6775336ca9ca8e54d707639ecb0618a3c675533494e2435c0b3780a66defddd217d2cc464014bef8a051d8f292abf9e5cafa78c600c21ed3d40ede937b1e162a1e14757d39d77d4fad8711b6b46ae707b82ced0739f9fb6bcd9b557982e89bb3af5f3fb5448ea960f454f4475ee78970acda37501a8825a04cecf3e544651eea8933379da3c3e7de0a875d689003c00d276470fda3b6ed6473cf8094ab91784d1c0f9468379e8e9729dc1032a5ca14378f8147409f13cd6994de961e2245b35c814596087625d3d3267fc0c1e5614a4af94993091ead40bc9e1d3093228b70c188855ae9e914b15aacfd4f83fde83072af92b2cc968c93cac74e15322eaff32a7bbbb982fb725aeb71f34bf16323d9c0a11dbaf3ab676a9cd1dcfc3f8a0c66d1f082f23806133002c50b59d4513dbe3419d5002263287ff47abdba0862341effe669f26b375337170c8e0742113e1063e8141c4aa9eb4970471f3187f581b71e6f7fe2f8043d065620da8a066d112fedeb33525eb1061c0d0fe9fb415bddae8ed2eb5c3ae6aa0549230e436afacaddc389b2c66499d7fdec2090e7e13560ca0a64803554c7cd9cfcc1cb48427cf9ccd954bb7446c887e2756db2882ff12eaa64efae3a24b35d1d0402922efe90319510495420301d3360f4486d3f87e3dc4f9337bf3fb4e3c6a82850a840153a1936e7cf74086757b72a8db19d33a62a29f3dd4fdef454d9222031aa0958af21851b66aebc09a5c08efd204f3ff18cb1055e8181d6630309fcc91c0d6daef19e618a3ee23e817a586d02364710cfab0b9f2cf18502a34e67d112f1730d44ccae54dc221d7f3877bb828e7109878109f8e95e2e1407df4e588801d25d9c2a1c501e74890631e9a92d823ebbe6b5635488f7d48788ef77658e3bbaf287536b37d3a7ab1ec1749656f2ebfe562765e71dd3e1b895d9b5c315fcf2b3a063c57e74ad1e7586b293ede4c77732f38d316c14210a121153fc50007f78ed64a8e207e9d04b312ae7f97a946c74d2a1181b67e845c3ac6e340b2428c8a5546679707fded3406fc221900b118a3279e13b74926c793e27fc4cc32ae478b4421d6eef75d3a273ff61d0e95b4981e8dd57e16bb00e09bfbbc2ce60cd844a9abb839b8b671fabddfd6e86a30c0a24e73c3c17770f34641951e5dc73ca11d8f8419a7407d483e0f5f1714df0a1775574b5500e8a5a28c655dbc28d7a1ca4b83fd4ebcc7ef2e4994c97c87659681acebe7417328c8612e8570e7ade7ead7f4fc711c9c539362779e6be525bdf5ec037f670b5235c06a1acd89b4ffc21668a7269cc73bf6d1399852eebb8b1dde8ef072e8d80832ba32c8e9480da2c4f5c3209c557f31beef41c00d22ee7c7e2c1bf9952ba8a03c1afae9b4aa63135d2b131f2b2804afcdcc762e1bcec8c8151f471572888933ce97dd787121ced446aa9718bf3766bb6d8a752692c59489d5b565e1693aa0f67b352f915808e415cba13a9864bbd33ebc97dfdc0d357d6769f2f545cc6529c0f634da901ae63bfcbab0a3896bc43faed6a6c23bb4e92f3d669d2e0ff485287cce322b98d02866f026cc556ec8aba6608ac2b5dbc29e104ef2e28d7b51ce63110025bdbfc5d44e8aa7a04ecece07b9860618a162e7289e8d672bb9b15b6ffc87f738b0c7a2b733c5794afe58b1beee4b6780ed453bf2ef2b584dcf32bf732c98fe359abced05fc115e531b088c61b0d5d5058af10120581d7db192e13a5b7b17874f000343aecc8d5005b91b13720bc831de5f1de5e3ddce27ba05213cd126a7cda0afa9745f498200269a5736f63b0faec36bbd646a868100c17cb7f6639f2f14b6c52198fab04c1645bed8763799acf8fef62b82fda1825a3379c000255002788d686695b4c17be3931e69db8980d0216024e9b7b0588cdf8c8102d11f55f971b3163c392cfaa796e0b85dd0bbacd6ca50b3ab80c2e90fa0c18d3526e05b2a46c2eab823c0511b43c71122d533e27ee6d6e34706fc411c67a3b87440a3429df3009996743ed3e4dc244fac98a789f17818a926a0aae81ecde260982b80acc299f57a570a86ee28d0414edc91fb6d5f9a88aeb31bf22270bf3517aefe1140b05be97123cc43df6e8e8e4df96803fdd59715c87afcf0189fb5448663eb35d2c4e5b13dd0233a95f8d6187bf0d5d3ba35adba59e162e877d5a0397d9495ebfc771ae68283be15d883e91b81b1bb0cd8da6c300df7e2bc8a21094cadc974c8270d8ee37fc7e7501a57eaecbc244ed61cfc8d556e38c0611a5269c3b930ee5f37a9771f0c152a5e28df07a104360c973b9a83d3ec5c0aa012bff141842e9b68222647c7d022753dbaae024877f421ff36b3721c26a39b3009683c8c510ba0ba8b5dc1033f9b56e9a43b3141a92599378622a2ca8136f5f1f51cf7b7dce7d043f65f8562b33c4864adc30e7d4c808b10abbbd92f94272b68b063f7d7baf7fd6eb31cc76690042233bc8dee7253f89ce23de7a535af022dae95ac321694d6ce311744d9c152e4424a0a502d221b2e602ada71c60a2f15b7086d75867476b0633063297681fbb0a3e154efe552cdbd9d3203f2e447b60b643b823ea12f504f33f6b6c3bd20e54cf38e3c45c5d472814db60741687894e6cc3c78196d5e722499d202334fb742f14dc2ccb7d114ae0c4cd61ce2ed0cc7fe25a395d6b73c1dfee9174e59d129e7f3c42f93a246d918028d4e2dc804438799 ").unwrap(); - let signature = ledger.sign_transaction(&address, &huge_tx); - println!("Got {:?}", signature); - assert!(signature.is_ok()); - } + let signature = ledger.sign_transaction(&address, &huge_tx); + println!("Got {:?}", signature); + assert!(signature.is_ok()); + } } diff --git a/accounts/hw/src/lib.rs b/accounts/hw/src/lib.rs index a7da8da45..b36f43647 100644 --- a/accounts/hw/src/lib.rs +++ b/accounts/hw/src/lib.rs @@ -28,15 +28,20 @@ extern crate protobuf; extern crate semver; extern crate trezor_sys; -#[macro_use] extern crate log; -#[cfg(test)] extern crate rustc_hex; +#[macro_use] +extern crate log; +#[cfg(test)] +extern crate rustc_hex; mod ledger; mod trezor; -use std::sync::{Arc, atomic, atomic::AtomicBool, Weak}; -use std::{fmt, time::Duration}; -use std::thread; +use std::{ + fmt, + sync::{atomic, atomic::AtomicBool, Arc, Weak}, + thread, + time::Duration, +}; use ethereum_types::U256; use ethkey::{Address, Signature}; @@ -50,291 +55,311 @@ const USB_EVENT_POLLING_INTERVAL: Duration = Duration::from_millis(500); /// `HardwareWallet` device #[derive(Debug)] pub struct Device { - path: String, - info: WalletInfo, + path: String, + info: WalletInfo, } /// `Wallet` trait pub trait Wallet<'a> { - /// Error - type Error; - /// Transaction data format - type Transaction; + /// Error + type Error; + /// Transaction data format + type Transaction; - /// Sign transaction data with wallet managing `address`. - fn sign_transaction(&self, address: &Address, transaction: Self::Transaction) -> Result; + /// Sign transaction data with wallet managing `address`. + fn sign_transaction( + &self, + address: &Address, + transaction: Self::Transaction, + ) -> Result; - /// Set key derivation path for a chain. - fn set_key_path(&self, key_path: KeyPath); + /// Set key derivation path for a chain. + fn set_key_path(&self, key_path: KeyPath); - /// Re-populate device list - /// Note, this assumes all devices are iterated over and updated - fn update_devices(&self, device_direction: DeviceDirection) -> Result; + /// Re-populate device list + /// Note, this assumes all devices are iterated over and updated + fn update_devices(&self, device_direction: DeviceDirection) -> Result; - /// Read device info - fn read_device(&self, usb: &hidapi::HidApi, dev_info: &hidapi::HidDeviceInfo) -> Result; + /// Read device info + fn read_device( + &self, + usb: &hidapi::HidApi, + dev_info: &hidapi::HidDeviceInfo, + ) -> Result; - /// List connected and acknowledged wallets - fn list_devices(&self) -> Vec; + /// List connected and acknowledged wallets + fn list_devices(&self) -> Vec; - /// List locked wallets - /// This may be moved if it is the wrong assumption, for example this is not supported by Ledger - /// Then this method return a empty vector - fn list_locked_devices(&self) -> Vec; + /// List locked wallets + /// This may be moved if it is the wrong assumption, for example this is not supported by Ledger + /// Then this method return a empty vector + fn list_locked_devices(&self) -> Vec; - /// Get wallet info. - fn get_wallet(&self, address: &Address) -> Option; + /// Get wallet info. + fn get_wallet(&self, address: &Address) -> Option; - /// Generate ethereum address for a Wallet - fn get_address(&self, device: &hidapi::HidDevice) -> Result, Self::Error>; + /// Generate ethereum address for a Wallet + fn get_address(&self, device: &hidapi::HidDevice) -> Result, Self::Error>; - /// Open a device using `device path` - /// Note, f - is a closure that borrows HidResult - /// HidDevice is in turn a type alias for a `c_void function pointer` - /// For further information see: - /// * - /// * - fn open_path(&self, f: F) -> Result - where F: Fn() -> Result; + /// Open a device using `device path` + /// Note, f - is a closure that borrows HidResult + /// HidDevice is in turn a type alias for a `c_void function pointer` + /// For further information see: + /// * + /// * + fn open_path(&self, f: F) -> Result + where + F: Fn() -> Result; } /// Hardware wallet error. #[derive(Debug)] pub enum Error { - /// Ledger device error. - LedgerDevice(ledger::Error), - /// Trezor device error - TrezorDevice(trezor::Error), - /// USB error. - Usb(libusb::Error), - /// HID error - Hid(String), - /// Hardware wallet not found for specified key. - KeyNotFound, + /// Ledger device error. + LedgerDevice(ledger::Error), + /// Trezor device error + TrezorDevice(trezor::Error), + /// USB error. + Usb(libusb::Error), + /// HID error + Hid(String), + /// Hardware wallet not found for specified key. + KeyNotFound, } /// This is the transaction info we need to supply to Trezor message. It's more /// or less a duplicate of `ethcore::transaction::Transaction`, but we can't /// import ethcore here as that would be a circular dependency. pub struct TransactionInfo { - /// Nonce - pub nonce: U256, - /// Gas price - pub gas_price: U256, - /// Gas limit - pub gas_limit: U256, - /// Receiver - pub to: Option
, - /// Value - pub value: U256, - /// Data - pub data: Vec, - /// Chain ID - pub chain_id: Option, + /// Nonce + pub nonce: U256, + /// Gas price + pub gas_price: U256, + /// Gas limit + pub gas_limit: U256, + /// Receiver + pub to: Option
, + /// Value + pub value: U256, + /// Data + pub data: Vec, + /// Chain ID + pub chain_id: Option, } /// Hardware wallet information. #[derive(Debug, Clone)] pub struct WalletInfo { - /// Wallet device name. - pub name: String, - /// Wallet device manufacturer. - pub manufacturer: String, - /// Wallet device serial number. - pub serial: String, - /// Ethereum address. - pub address: Address, + /// Wallet device name. + pub name: String, + /// Wallet device manufacturer. + pub manufacturer: String, + /// Wallet device serial number. + pub serial: String, + /// Ethereum address. + pub address: Address, } /// Key derivation paths used on hardware wallets. #[derive(Debug, Clone, Copy)] pub enum KeyPath { - /// Ethereum. - Ethereum, - /// Ethereum classic. - EthereumClassic, + /// Ethereum. + Ethereum, + /// Ethereum classic. + EthereumClassic, } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - match *self { - Error::KeyNotFound => write!(f, "Key not found for given address."), - Error::LedgerDevice(ref e) => write!(f, "{}", e), - Error::TrezorDevice(ref e) => write!(f, "{}", e), - Error::Usb(ref e) => write!(f, "{}", e), - Error::Hid(ref e) => write!(f, "{}", e), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + match *self { + Error::KeyNotFound => write!(f, "Key not found for given address."), + Error::LedgerDevice(ref e) => write!(f, "{}", e), + Error::TrezorDevice(ref e) => write!(f, "{}", e), + Error::Usb(ref e) => write!(f, "{}", e), + Error::Hid(ref e) => write!(f, "{}", e), + } + } } impl From for Error { - fn from(err: ledger::Error) -> Self { - match err { - ledger::Error::KeyNotFound => Error::KeyNotFound, - _ => Error::LedgerDevice(err), - } - } + fn from(err: ledger::Error) -> Self { + match err { + ledger::Error::KeyNotFound => Error::KeyNotFound, + _ => Error::LedgerDevice(err), + } + } } impl From for Error { - fn from(err: trezor::Error) -> Self { - match err { - trezor::Error::KeyNotFound => Error::KeyNotFound, - _ => Error::TrezorDevice(err), - } - } + fn from(err: trezor::Error) -> Self { + match err { + trezor::Error::KeyNotFound => Error::KeyNotFound, + _ => Error::TrezorDevice(err), + } + } } impl From for Error { - fn from(err: libusb::Error) -> Self { - Error::Usb(err) - } + fn from(err: libusb::Error) -> Self { + Error::Usb(err) + } } /// Specifies the direction of the `HardwareWallet` i.e, whether it arrived or left #[derive(Debug, Copy, Clone, PartialEq)] pub enum DeviceDirection { - /// Device arrived - Arrived, - /// Device left - Left, + /// Device arrived + Arrived, + /// Device left + Left, } impl fmt::Display for DeviceDirection { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - DeviceDirection::Arrived => write!(f, "arrived"), - DeviceDirection::Left => write!(f, "left"), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + DeviceDirection::Arrived => write!(f, "arrived"), + DeviceDirection::Left => write!(f, "left"), + } + } } /// Hardware wallet management interface. pub struct HardwareWalletManager { - exiting: Arc, - ledger: Arc, - trezor: Arc, + exiting: Arc, + ledger: Arc, + trezor: Arc, } impl HardwareWalletManager { - /// Hardware wallet constructor - pub fn new() -> Result { - let exiting = Arc::new(AtomicBool::new(false)); - let hidapi = Arc::new(Mutex::new(hidapi::HidApi::new().map_err(|e| Error::Hid(e.to_string().clone()))?)); - let ledger = ledger::Manager::new(hidapi.clone()); - let trezor = trezor::Manager::new(hidapi.clone()); - let usb_context = Arc::new(libusb::Context::new()?); + /// Hardware wallet constructor + pub fn new() -> Result { + let exiting = Arc::new(AtomicBool::new(false)); + let hidapi = Arc::new(Mutex::new( + hidapi::HidApi::new().map_err(|e| Error::Hid(e.to_string().clone()))?, + )); + let ledger = ledger::Manager::new(hidapi.clone()); + let trezor = trezor::Manager::new(hidapi.clone()); + let usb_context = Arc::new(libusb::Context::new()?); - let l = ledger.clone(); - let t = trezor.clone(); - let exit = exiting.clone(); + let l = ledger.clone(); + let t = trezor.clone(); + let exit = exiting.clone(); - // Subscribe to all vendor IDs (VIDs) and product IDs (PIDs) - // This means that the `HardwareWalletManager` is responsible to validate the detected device - usb_context.register_callback( - None, None, Some(HID_USB_DEVICE_CLASS), - Box::new(EventHandler::new( - Arc::downgrade(&ledger), - Arc::downgrade(&trezor) - )) - )?; + // Subscribe to all vendor IDs (VIDs) and product IDs (PIDs) + // This means that the `HardwareWalletManager` is responsible to validate the detected device + usb_context.register_callback( + None, + None, + Some(HID_USB_DEVICE_CLASS), + Box::new(EventHandler::new( + Arc::downgrade(&ledger), + Arc::downgrade(&trezor), + )), + )?; - // Hardware event subscriber thread - thread::Builder::new() - .name("hw_wallet_manager".to_string()) - .spawn(move || { - if let Err(e) = l.update_devices(DeviceDirection::Arrived) { - debug!(target: "hw", "Ledger couldn't connect at startup, error: {}", e); - } - if let Err(e) = t.update_devices(DeviceDirection::Arrived) { - debug!(target: "hw", "Trezor couldn't connect at startup, error: {}", e); - } + // Hardware event subscriber thread + thread::Builder::new() + .name("hw_wallet_manager".to_string()) + .spawn(move || { + if let Err(e) = l.update_devices(DeviceDirection::Arrived) { + debug!(target: "hw", "Ledger couldn't connect at startup, error: {}", e); + } + if let Err(e) = t.update_devices(DeviceDirection::Arrived) { + debug!(target: "hw", "Trezor couldn't connect at startup, error: {}", e); + } - while !exit.load(atomic::Ordering::Acquire) { - if let Err(e) = usb_context.handle_events(Some(USB_EVENT_POLLING_INTERVAL)) { - debug!(target: "hw", "HardwareWalletManager event handler error: {}", e); - } - } - }) - .ok(); + while !exit.load(atomic::Ordering::Acquire) { + if let Err(e) = usb_context.handle_events(Some(USB_EVENT_POLLING_INTERVAL)) { + debug!(target: "hw", "HardwareWalletManager event handler error: {}", e); + } + } + }) + .ok(); - Ok(Self { - exiting, - trezor, - ledger, - }) - } + Ok(Self { + exiting, + trezor, + ledger, + }) + } - /// Select key derivation path for a chain. - /// Currently, only one hard-coded keypath is supported - /// It is managed by `ethcore/account_provider` - pub fn set_key_path(&self, key_path: KeyPath) { - self.ledger.set_key_path(key_path); - self.trezor.set_key_path(key_path); - } + /// Select key derivation path for a chain. + /// Currently, only one hard-coded keypath is supported + /// It is managed by `ethcore/account_provider` + pub fn set_key_path(&self, key_path: KeyPath) { + self.ledger.set_key_path(key_path); + self.trezor.set_key_path(key_path); + } - /// List connected wallets. This only returns wallets that are ready to be used. - pub fn list_wallets(&self) -> Vec { - let mut wallets = Vec::new(); - wallets.extend(self.ledger.list_devices()); - wallets.extend(self.trezor.list_devices()); - wallets - } + /// List connected wallets. This only returns wallets that are ready to be used. + pub fn list_wallets(&self) -> Vec { + let mut wallets = Vec::new(); + wallets.extend(self.ledger.list_devices()); + wallets.extend(self.trezor.list_devices()); + wallets + } - /// Return a list of paths to locked hardware wallets - /// This is only applicable to Trezor because Ledger only appears as - /// a device when it is unlocked - pub fn list_locked_wallets(&self) -> Result, Error> { - Ok(self.trezor.list_locked_devices()) - } + /// Return a list of paths to locked hardware wallets + /// This is only applicable to Trezor because Ledger only appears as + /// a device when it is unlocked + pub fn list_locked_wallets(&self) -> Result, Error> { + Ok(self.trezor.list_locked_devices()) + } - /// Get connected wallet info. - pub fn wallet_info(&self, address: &Address) -> Option { - if let Some(info) = self.ledger.get_wallet(address) { - Some(info) - } else { - self.trezor.get_wallet(address) - } - } + /// Get connected wallet info. + pub fn wallet_info(&self, address: &Address) -> Option { + if let Some(info) = self.ledger.get_wallet(address) { + Some(info) + } else { + self.trezor.get_wallet(address) + } + } - /// Sign a message with the wallet (only supported by Ledger) - pub fn sign_message(&self, address: &Address, msg: &[u8]) -> Result { - if self.ledger.get_wallet(address).is_some() { - Ok(self.ledger.sign_message(address, msg)?) - } else if self.trezor.get_wallet(address).is_some() { - Err(Error::TrezorDevice(trezor::Error::NoSigningMessage)) - } else { - Err(Error::KeyNotFound) - } - } + /// Sign a message with the wallet (only supported by Ledger) + pub fn sign_message(&self, address: &Address, msg: &[u8]) -> Result { + if self.ledger.get_wallet(address).is_some() { + Ok(self.ledger.sign_message(address, msg)?) + } else if self.trezor.get_wallet(address).is_some() { + Err(Error::TrezorDevice(trezor::Error::NoSigningMessage)) + } else { + Err(Error::KeyNotFound) + } + } - /// Sign transaction data with wallet managing `address`. - pub fn sign_transaction(&self, address: &Address, t_info: &TransactionInfo, encoded_transaction: &[u8]) -> Result { - if self.ledger.get_wallet(address).is_some() { - Ok(self.ledger.sign_transaction(address, encoded_transaction)?) - } else if self.trezor.get_wallet(address).is_some() { - Ok(self.trezor.sign_transaction(address, t_info)?) - } else { - Err(Error::KeyNotFound) - } - } + /// Sign transaction data with wallet managing `address`. + pub fn sign_transaction( + &self, + address: &Address, + t_info: &TransactionInfo, + encoded_transaction: &[u8], + ) -> Result { + if self.ledger.get_wallet(address).is_some() { + Ok(self.ledger.sign_transaction(address, encoded_transaction)?) + } else if self.trezor.get_wallet(address).is_some() { + Ok(self.trezor.sign_transaction(address, t_info)?) + } else { + Err(Error::KeyNotFound) + } + } - /// Send a pin to a device at a certain path to unlock it - /// This is only applicable to Trezor because Ledger only appears as - /// a device when it is unlocked - pub fn pin_matrix_ack(&self, path: &str, pin: &str) -> Result { - self.trezor.pin_matrix_ack(path, pin).map_err(Error::TrezorDevice) - } + /// Send a pin to a device at a certain path to unlock it + /// This is only applicable to Trezor because Ledger only appears as + /// a device when it is unlocked + pub fn pin_matrix_ack(&self, path: &str, pin: &str) -> Result { + self.trezor + .pin_matrix_ack(path, pin) + .map_err(Error::TrezorDevice) + } } impl Drop for HardwareWalletManager { - fn drop(&mut self) { - // Indicate to the USB Hotplug handler that it - // shall terminate but don't wait for it to terminate. - // If it doesn't terminate for some reason USB Hotplug events will be handled - // even if the HardwareWalletManger has been dropped - self.exiting.store(true, atomic::Ordering::Release); - } + fn drop(&mut self) { + // Indicate to the USB Hotplug handler that it + // shall terminate but don't wait for it to terminate. + // If it doesn't terminate for some reason USB Hotplug events will be handled + // even if the HardwareWalletManger has been dropped + self.exiting.store(true, atomic::Ordering::Release); + } } /// Hardware wallet event handler @@ -342,61 +367,77 @@ impl Drop for HardwareWalletManager { /// Note, that this runs to completion and race-conditions can't occur but it can /// stop other events for being processed with an infinite loop or similar struct EventHandler { - ledger: Weak, - trezor: Weak, + ledger: Weak, + trezor: Weak, } impl EventHandler { - /// Trezor event handler constructor - pub fn new(ledger: Weak, trezor: Weak) -> Self { - Self { ledger, trezor } - } + /// Trezor event handler constructor + pub fn new(ledger: Weak, trezor: Weak) -> Self { + Self { ledger, trezor } + } - fn extract_device_info(device: &libusb::Device) -> Result<(u16, u16), Error> { - let desc = device.device_descriptor()?; - Ok((desc.vendor_id(), desc.product_id())) - } + fn extract_device_info(device: &libusb::Device) -> Result<(u16, u16), Error> { + let desc = device.device_descriptor()?; + Ok((desc.vendor_id(), desc.product_id())) + } } impl libusb::Hotplug for EventHandler { - fn device_arrived(&mut self, device: libusb::Device) { - // Upgrade reference to an Arc - if let (Some(ledger), Some(trezor)) = (self.ledger.upgrade(), self.trezor.upgrade()) { - // Version ID and Product ID are available - if let Ok((vid, pid)) = Self::extract_device_info(&device) { - if trezor::is_valid_trezor(vid, pid) { - if !trezor::try_connect_polling(&trezor, &MAX_POLLING_DURATION, DeviceDirection::Arrived) { - trace!(target: "hw", "Trezor device was detected but connection failed"); - } - } else if ledger::is_valid_ledger(vid, pid) { - if !ledger::try_connect_polling(&ledger, &MAX_POLLING_DURATION, DeviceDirection::Arrived) { - trace!(target: "hw", "Ledger device was detected but connection failed"); - } - } - } - } - } + fn device_arrived(&mut self, device: libusb::Device) { + // Upgrade reference to an Arc + if let (Some(ledger), Some(trezor)) = (self.ledger.upgrade(), self.trezor.upgrade()) { + // Version ID and Product ID are available + if let Ok((vid, pid)) = Self::extract_device_info(&device) { + if trezor::is_valid_trezor(vid, pid) { + if !trezor::try_connect_polling( + &trezor, + &MAX_POLLING_DURATION, + DeviceDirection::Arrived, + ) { + trace!(target: "hw", "Trezor device was detected but connection failed"); + } + } else if ledger::is_valid_ledger(vid, pid) { + if !ledger::try_connect_polling( + &ledger, + &MAX_POLLING_DURATION, + DeviceDirection::Arrived, + ) { + trace!(target: "hw", "Ledger device was detected but connection failed"); + } + } + } + } + } - fn device_left(&mut self, device: libusb::Device) { - // Upgrade reference to an Arc - if let (Some(ledger), Some(trezor)) = (self.ledger.upgrade(), self.trezor.upgrade()) { - // Version ID and Product ID are available - if let Ok((vid, pid)) = Self::extract_device_info(&device) { - if trezor::is_valid_trezor(vid, pid) { - if !trezor::try_connect_polling(&trezor, &MAX_POLLING_DURATION, DeviceDirection::Left) { - trace!(target: "hw", "Trezor device was detected but disconnection failed"); - } - } else if ledger::is_valid_ledger(vid, pid) { - if !ledger::try_connect_polling(&ledger, &MAX_POLLING_DURATION, DeviceDirection::Left) { - trace!(target: "hw", "Ledger device was detected but disconnection failed"); - } - } - } - } - } + fn device_left(&mut self, device: libusb::Device) { + // Upgrade reference to an Arc + if let (Some(ledger), Some(trezor)) = (self.ledger.upgrade(), self.trezor.upgrade()) { + // Version ID and Product ID are available + if let Ok((vid, pid)) = Self::extract_device_info(&device) { + if trezor::is_valid_trezor(vid, pid) { + if !trezor::try_connect_polling( + &trezor, + &MAX_POLLING_DURATION, + DeviceDirection::Left, + ) { + trace!(target: "hw", "Trezor device was detected but disconnection failed"); + } + } else if ledger::is_valid_ledger(vid, pid) { + if !ledger::try_connect_polling( + &ledger, + &MAX_POLLING_DURATION, + DeviceDirection::Left, + ) { + trace!(target: "hw", "Ledger device was detected but disconnection failed"); + } + } + } + } + } } /// Helper to determine if a device is a valid HID pub fn is_valid_hid_device(usage_page: u16, interface_number: i32) -> bool { - usage_page == HID_GLOBAL_USAGE_PAGE || interface_number == HID_USB_DEVICE_CLASS as i32 + usage_page == HID_GLOBAL_USAGE_PAGE || interface_number == HID_USB_DEVICE_CLASS as i32 } diff --git a/accounts/hw/src/trezor.rs b/accounts/hw/src/trezor.rs index b20123ed8..dd185c6e3 100644 --- a/accounts/hw/src/trezor.rs +++ b/accounts/hw/src/trezor.rs @@ -19,19 +19,26 @@ //! and //! for protocol details. -use std::cmp::{min, max}; -use std::sync::Arc; -use std::time::{Duration, Instant}; -use std::fmt; +use std::{ + cmp::{max, min}, + fmt, + sync::Arc, + time::{Duration, Instant}, +}; -use ethereum_types::{U256, H256, Address}; +use super::{ + is_valid_hid_device, Device, DeviceDirection, KeyPath, TransactionInfo, Wallet, WalletInfo, +}; +use ethereum_types::{Address, H256, U256}; use ethkey::Signature; use hidapi; use libusb; use parking_lot::{Mutex, RwLock}; use protobuf::{self, Message, ProtobufEnum}; -use super::{DeviceDirection, WalletInfo, TransactionInfo, KeyPath, Wallet, Device, is_valid_hid_device}; -use trezor_sys::messages::{EthereumAddress, PinMatrixAck, MessageType, EthereumTxRequest, EthereumSignTx, EthereumGetAddress, EthereumTxAck, ButtonAck}; +use trezor_sys::messages::{ + ButtonAck, EthereumAddress, EthereumGetAddress, EthereumSignTx, EthereumTxAck, + EthereumTxRequest, MessageType, PinMatrixAck, +}; /// Trezor v1 vendor ID const TREZOR_VID: u16 = 0x534c; @@ -44,388 +51,450 @@ const ETC_DERIVATION_PATH: [u32; 5] = [0x8000_002C, 0x8000_003D, 0x8000_0000, 0, /// Hardware wallet error. #[derive(Debug)] pub enum Error { - /// Ethereum wallet protocol error. - Protocol(&'static str), - /// Hidapi error. - Usb(hidapi::HidError), - /// Libusb error - LibUsb(libusb::Error), - /// Device with request key is not available. - KeyNotFound, - /// Signing has been cancelled by user. - UserCancel, - /// The Message Type given in the trezor RPC call is not something we recognize - BadMessageType, - /// Trying to read from a closed device at the given path - LockedDevice(String), - /// Signing messages are not supported by Trezor - NoSigningMessage, - /// No device arrived - NoDeviceArrived, - /// No device left - NoDeviceLeft, - /// Invalid PID or VID - InvalidDevice, + /// Ethereum wallet protocol error. + Protocol(&'static str), + /// Hidapi error. + Usb(hidapi::HidError), + /// Libusb error + LibUsb(libusb::Error), + /// Device with request key is not available. + KeyNotFound, + /// Signing has been cancelled by user. + UserCancel, + /// The Message Type given in the trezor RPC call is not something we recognize + BadMessageType, + /// Trying to read from a closed device at the given path + LockedDevice(String), + /// Signing messages are not supported by Trezor + NoSigningMessage, + /// No device arrived + NoDeviceArrived, + /// No device left + NoDeviceLeft, + /// Invalid PID or VID + InvalidDevice, } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - match *self { - Error::Protocol(ref s) => write!(f, "Trezor protocol error: {}", s), - Error::Usb(ref e) => write!(f, "USB communication error: {}", e), - Error::LibUsb(ref e) => write!(f, "LibUSB communication error: {}", e), - Error::KeyNotFound => write!(f, "Key not found"), - Error::UserCancel => write!(f, "Operation has been cancelled"), - Error::BadMessageType => write!(f, "Bad Message Type in RPC call"), - Error::LockedDevice(ref s) => write!(f, "Device is locked, needs PIN to perform operations: {}", s), - Error::NoSigningMessage=> write!(f, "Signing messages are not supported by Trezor"), - Error::NoDeviceArrived => write!(f, "No device arrived"), - Error::NoDeviceLeft => write!(f, "No device left"), - Error::InvalidDevice => write!(f, "Device with non-supported product ID or vendor ID was detected"), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + match *self { + Error::Protocol(ref s) => write!(f, "Trezor protocol error: {}", s), + Error::Usb(ref e) => write!(f, "USB communication error: {}", e), + Error::LibUsb(ref e) => write!(f, "LibUSB communication error: {}", e), + Error::KeyNotFound => write!(f, "Key not found"), + Error::UserCancel => write!(f, "Operation has been cancelled"), + Error::BadMessageType => write!(f, "Bad Message Type in RPC call"), + Error::LockedDevice(ref s) => write!( + f, + "Device is locked, needs PIN to perform operations: {}", + s + ), + Error::NoSigningMessage => write!(f, "Signing messages are not supported by Trezor"), + Error::NoDeviceArrived => write!(f, "No device arrived"), + Error::NoDeviceLeft => write!(f, "No device left"), + Error::InvalidDevice => write!( + f, + "Device with non-supported product ID or vendor ID was detected" + ), + } + } } impl From for Error { - fn from(err: hidapi::HidError) -> Self { - Error::Usb(err) - } + fn from(err: hidapi::HidError) -> Self { + Error::Usb(err) + } } impl From for Error { - fn from(err: libusb::Error) -> Self { - Error::LibUsb(err) - } + fn from(err: libusb::Error) -> Self { + Error::LibUsb(err) + } } impl From for Error { - fn from(_: protobuf::ProtobufError) -> Self { - Error::Protocol(&"Could not read response from Trezor Device") - } + fn from(_: protobuf::ProtobufError) -> Self { + Error::Protocol(&"Could not read response from Trezor Device") + } } /// Trezor device manager pub struct Manager { - usb: Arc>, - devices: RwLock>, - locked_devices: RwLock>, - key_path: RwLock, + usb: Arc>, + devices: RwLock>, + locked_devices: RwLock>, + key_path: RwLock, } /// HID Version used for the Trezor device enum HidVersion { - V1, - V2, + V1, + V2, } impl Manager { - /// Create a new instance. - pub fn new(usb: Arc>) -> Arc { - Arc::new(Self { - usb, - devices: RwLock::new(Vec::new()), - locked_devices: RwLock::new(Vec::new()), - key_path: RwLock::new(KeyPath::Ethereum), - }) - } + /// Create a new instance. + pub fn new(usb: Arc>) -> Arc { + Arc::new(Self { + usb, + devices: RwLock::new(Vec::new()), + locked_devices: RwLock::new(Vec::new()), + key_path: RwLock::new(KeyPath::Ethereum), + }) + } - pub fn pin_matrix_ack(&self, device_path: &str, pin: &str) -> Result { - let unlocked = { - let usb = self.usb.lock(); - let device = self.open_path(|| usb.open_path(&device_path))?; - let t = MessageType::MessageType_PinMatrixAck; - let mut m = PinMatrixAck::new(); - m.set_pin(pin.to_string()); - self.send_device_message(&device, t, &m)?; - let (resp_type, _) = self.read_device_response(&device)?; - match resp_type { - // Getting an Address back means it's unlocked, this is undocumented behavior - MessageType::MessageType_EthereumAddress => Ok(true), - // Getting anything else means we didn't unlock it - _ => Ok(false), + pub fn pin_matrix_ack(&self, device_path: &str, pin: &str) -> Result { + let unlocked = { + let usb = self.usb.lock(); + let device = self.open_path(|| usb.open_path(&device_path))?; + let t = MessageType::MessageType_PinMatrixAck; + let mut m = PinMatrixAck::new(); + m.set_pin(pin.to_string()); + self.send_device_message(&device, t, &m)?; + let (resp_type, _) = self.read_device_response(&device)?; + match resp_type { + // Getting an Address back means it's unlocked, this is undocumented behavior + MessageType::MessageType_EthereumAddress => Ok(true), + // Getting anything else means we didn't unlock it + _ => Ok(false), + } + }; + self.update_devices(DeviceDirection::Arrived)?; + unlocked + } - } - }; - self.update_devices(DeviceDirection::Arrived)?; - unlocked - } + fn u256_to_be_vec(&self, val: &U256) -> Vec { + let mut buf = [0_u8; 32]; + val.to_big_endian(&mut buf); + buf.iter().skip_while(|x| **x == 0).cloned().collect() + } - fn u256_to_be_vec(&self, val: &U256) -> Vec { - let mut buf = [0_u8; 32]; - val.to_big_endian(&mut buf); - buf.iter().skip_while(|x| **x == 0).cloned().collect() - } + fn signing_loop( + &self, + handle: &hidapi::HidDevice, + chain_id: &Option, + data: &[u8], + ) -> Result { + let (resp_type, bytes) = self.read_device_response(&handle)?; + match resp_type { + MessageType::MessageType_Cancel => Err(Error::UserCancel), + MessageType::MessageType_ButtonRequest => { + self.send_device_message( + handle, + MessageType::MessageType_ButtonAck, + &ButtonAck::new(), + )?; + // Signing loop goes back to the top and reading blocks + // for up to 5 minutes waiting for response from the device + // if the user doesn't click any button within 5 minutes you + // get a signing error and the device sort of locks up on the signing screen + self.signing_loop(handle, chain_id, data) + } + MessageType::MessageType_EthereumTxRequest => { + let resp: EthereumTxRequest = protobuf::core::parse_from_bytes(&bytes)?; + if resp.has_data_length() { + let mut msg = EthereumTxAck::new(); + let len = resp.get_data_length() as usize; + msg.set_data_chunk(data[..len].to_vec()); + self.send_device_message(handle, MessageType::MessageType_EthereumTxAck, &msg)?; + self.signing_loop(handle, chain_id, &data[len..]) + } else { + let v = resp.get_signature_v(); + let r = H256::from_slice(resp.get_signature_r()); + let s = H256::from_slice(resp.get_signature_s()); + if let Some(c_id) = *chain_id { + // If there is a chain_id supplied, Trezor will return a v + // part of the signature that is already adjusted for EIP-155, + // so v' = v + 2 * chain_id + 35, but code further down the + // pipeline will already do this transformation, so remove it here + let adjustment = 35 + 2 * c_id as u32; + Ok(Signature::from_rsv( + &r, + &s, + (max(v, adjustment) - adjustment) as u8, + )) + } else { + // If there isn't a chain_id, v will be returned as v + 27 + let adjusted_v = if v < 27 { v } else { v - 27 }; + Ok(Signature::from_rsv(&r, &s, adjusted_v as u8)) + } + } + } + MessageType::MessageType_Failure => { + Err(Error::Protocol("Last message sent to Trezor failed")) + } + _ => Err(Error::Protocol("Unexpected response from Trezor device.")), + } + } - fn signing_loop(&self, handle: &hidapi::HidDevice, chain_id: &Option, data: &[u8]) -> Result { - let (resp_type, bytes) = self.read_device_response(&handle)?; - match resp_type { - MessageType::MessageType_Cancel => Err(Error::UserCancel), - MessageType::MessageType_ButtonRequest => { - self.send_device_message(handle, MessageType::MessageType_ButtonAck, &ButtonAck::new())?; - // Signing loop goes back to the top and reading blocks - // for up to 5 minutes waiting for response from the device - // if the user doesn't click any button within 5 minutes you - // get a signing error and the device sort of locks up on the signing screen - self.signing_loop(handle, chain_id, data) - } - MessageType::MessageType_EthereumTxRequest => { - let resp: EthereumTxRequest = protobuf::core::parse_from_bytes(&bytes)?; - if resp.has_data_length() { - let mut msg = EthereumTxAck::new(); - let len = resp.get_data_length() as usize; - msg.set_data_chunk(data[..len].to_vec()); - self.send_device_message(handle, MessageType::MessageType_EthereumTxAck, &msg)?; - self.signing_loop(handle, chain_id, &data[len..]) - } else { - let v = resp.get_signature_v(); - let r = H256::from_slice(resp.get_signature_r()); - let s = H256::from_slice(resp.get_signature_s()); - if let Some(c_id) = *chain_id { - // If there is a chain_id supplied, Trezor will return a v - // part of the signature that is already adjusted for EIP-155, - // so v' = v + 2 * chain_id + 35, but code further down the - // pipeline will already do this transformation, so remove it here - let adjustment = 35 + 2 * c_id as u32; - Ok(Signature::from_rsv(&r, &s, (max(v, adjustment) - adjustment) as u8)) - } else { - // If there isn't a chain_id, v will be returned as v + 27 - let adjusted_v = if v < 27 { v } else { v - 27 }; - Ok(Signature::from_rsv(&r, &s, adjusted_v as u8)) - } - } - } - MessageType::MessageType_Failure => Err(Error::Protocol("Last message sent to Trezor failed")), - _ => Err(Error::Protocol("Unexpected response from Trezor device.")), - } - } + fn send_device_message( + &self, + device: &hidapi::HidDevice, + msg_type: MessageType, + msg: &Message, + ) -> Result { + let msg_id = msg_type as u16; + let mut message = msg.write_to_bytes()?; + let msg_size = message.len(); + let mut data = Vec::new(); + let hid_version = self.probe_hid_version(device)?; + // Magic constants + data.push(b'#'); + data.push(b'#'); + // Convert msg_id to BE and split into bytes + data.push(((msg_id >> 8) & 0xFF) as u8); + data.push((msg_id & 0xFF) as u8); + // Convert msg_size to BE and split into bytes + data.push(((msg_size >> 24) & 0xFF) as u8); + data.push(((msg_size >> 16) & 0xFF) as u8); + data.push(((msg_size >> 8) & 0xFF) as u8); + data.push((msg_size & 0xFF) as u8); + data.append(&mut message); + while data.len() % 63 > 0 { + data.push(0); + } + let mut total_written = 0; + for chunk in data.chunks(63) { + let mut padded_chunk = match hid_version { + HidVersion::V1 => vec![b'?'], + HidVersion::V2 => vec![0, b'?'], + }; + padded_chunk.extend_from_slice(&chunk); + total_written += device.write(&padded_chunk)?; + } + Ok(total_written) + } - fn send_device_message(&self, device: &hidapi::HidDevice, msg_type: MessageType, msg: &Message) -> Result { - let msg_id = msg_type as u16; - let mut message = msg.write_to_bytes()?; - let msg_size = message.len(); - let mut data = Vec::new(); - let hid_version = self.probe_hid_version(device)?; - // Magic constants - data.push(b'#'); - data.push(b'#'); - // Convert msg_id to BE and split into bytes - data.push(((msg_id >> 8) & 0xFF) as u8); - data.push((msg_id & 0xFF) as u8); - // Convert msg_size to BE and split into bytes - data.push(((msg_size >> 24) & 0xFF) as u8); - data.push(((msg_size >> 16) & 0xFF) as u8); - data.push(((msg_size >> 8) & 0xFF) as u8); - data.push((msg_size & 0xFF) as u8); - data.append(&mut message); - while data.len() % 63 > 0 { - data.push(0); - } - let mut total_written = 0; - for chunk in data.chunks(63) { - let mut padded_chunk = match hid_version { - HidVersion::V1 => vec![b'?'], - HidVersion::V2 => vec![0, b'?'], - }; - padded_chunk.extend_from_slice(&chunk); - total_written += device.write(&padded_chunk)?; - } - Ok(total_written) - } + fn probe_hid_version(&self, device: &hidapi::HidDevice) -> Result { + let mut buf2 = [0xFF_u8; 65]; + buf2[0] = 0; + buf2[1] = 63; + let mut buf1 = [0xFF_u8; 64]; + buf1[0] = 63; + if device.write(&buf2)? == 65 { + Ok(HidVersion::V2) + } else if device.write(&buf1)? == 64 { + Ok(HidVersion::V1) + } else { + Err(Error::Usb("Unable to determine HID Version")) + } + } - fn probe_hid_version(&self, device: &hidapi::HidDevice) -> Result { - let mut buf2 = [0xFF_u8; 65]; - buf2[0] = 0; - buf2[1] = 63; - let mut buf1 = [0xFF_u8; 64]; - buf1[0] = 63; - if device.write(&buf2)? == 65 { - Ok(HidVersion::V2) - } else if device.write(&buf1)? == 64 { - Ok(HidVersion::V1) - } else { - Err(Error::Usb("Unable to determine HID Version")) - } - } + fn read_device_response( + &self, + device: &hidapi::HidDevice, + ) -> Result<(MessageType, Vec), Error> { + let protocol_err = Error::Protocol(&"Unexpected wire response from Trezor Device"); + let mut buf = vec![0; 64]; - fn read_device_response(&self, device: &hidapi::HidDevice) -> Result<(MessageType, Vec), Error> { - let protocol_err = Error::Protocol(&"Unexpected wire response from Trezor Device"); - let mut buf = vec![0; 64]; - - let first_chunk = device.read_timeout(&mut buf, 300_000)?; - if first_chunk < 9 || buf[0] != b'?' || buf[1] != b'#' || buf[2] != b'#' { - return Err(protocol_err); - } - let msg_type = MessageType::from_i32(((buf[3] as i32 & 0xFF) << 8) + (buf[4] as i32 & 0xFF)).ok_or(protocol_err)?; - let msg_size = ((buf[5] as u32 & 0xFF) << 24) + ((buf[6] as u32 & 0xFF) << 16) + ((buf[7] as u32 & 0xFF) << 8) + (buf[8] as u32 & 0xFF); - let mut data = Vec::new(); - data.extend_from_slice(&buf[9..]); - while data.len() < (msg_size as usize) { - device.read_timeout(&mut buf, 10_000)?; - data.extend_from_slice(&buf[1..]); - } - Ok((msg_type, data[..msg_size as usize].to_vec())) - } + let first_chunk = device.read_timeout(&mut buf, 300_000)?; + if first_chunk < 9 || buf[0] != b'?' || buf[1] != b'#' || buf[2] != b'#' { + return Err(protocol_err); + } + let msg_type = + MessageType::from_i32(((buf[3] as i32 & 0xFF) << 8) + (buf[4] as i32 & 0xFF)) + .ok_or(protocol_err)?; + let msg_size = ((buf[5] as u32 & 0xFF) << 24) + + ((buf[6] as u32 & 0xFF) << 16) + + ((buf[7] as u32 & 0xFF) << 8) + + (buf[8] as u32 & 0xFF); + let mut data = Vec::new(); + data.extend_from_slice(&buf[9..]); + while data.len() < (msg_size as usize) { + device.read_timeout(&mut buf, 10_000)?; + data.extend_from_slice(&buf[1..]); + } + Ok((msg_type, data[..msg_size as usize].to_vec())) + } } impl<'a> Wallet<'a> for Manager { - type Error = Error; - type Transaction = &'a TransactionInfo; + type Error = Error; + type Transaction = &'a TransactionInfo; - fn sign_transaction(&self, address: &Address, t_info: Self::Transaction) -> - Result { - let usb = self.usb.lock(); - let devices = self.devices.read(); - let device = devices.iter().find(|d| &d.info.address == address).ok_or(Error::KeyNotFound)?; - let handle = self.open_path(|| usb.open_path(&device.path))?; - let msg_type = MessageType::MessageType_EthereumSignTx; - let mut message = EthereumSignTx::new(); - match *self.key_path.read() { - KeyPath::Ethereum => message.set_address_n(ETH_DERIVATION_PATH.to_vec()), - KeyPath::EthereumClassic => message.set_address_n(ETC_DERIVATION_PATH.to_vec()), - } - message.set_nonce(self.u256_to_be_vec(&t_info.nonce)); - message.set_gas_limit(self.u256_to_be_vec(&t_info.gas_limit)); - message.set_gas_price(self.u256_to_be_vec(&t_info.gas_price)); - message.set_value(self.u256_to_be_vec(&t_info.value)); + fn sign_transaction( + &self, + address: &Address, + t_info: Self::Transaction, + ) -> Result { + let usb = self.usb.lock(); + let devices = self.devices.read(); + let device = devices + .iter() + .find(|d| &d.info.address == address) + .ok_or(Error::KeyNotFound)?; + let handle = self.open_path(|| usb.open_path(&device.path))?; + let msg_type = MessageType::MessageType_EthereumSignTx; + let mut message = EthereumSignTx::new(); + match *self.key_path.read() { + KeyPath::Ethereum => message.set_address_n(ETH_DERIVATION_PATH.to_vec()), + KeyPath::EthereumClassic => message.set_address_n(ETC_DERIVATION_PATH.to_vec()), + } + message.set_nonce(self.u256_to_be_vec(&t_info.nonce)); + message.set_gas_limit(self.u256_to_be_vec(&t_info.gas_limit)); + message.set_gas_price(self.u256_to_be_vec(&t_info.gas_price)); + message.set_value(self.u256_to_be_vec(&t_info.value)); - if let Some(addr) = t_info.to { - message.set_to(addr.to_vec()) - } - let first_chunk_length = min(t_info.data.len(), 1024); - let chunk = &t_info.data[0..first_chunk_length]; - message.set_data_initial_chunk(chunk.to_vec()); - message.set_data_length(t_info.data.len() as u32); - if let Some(c_id) = t_info.chain_id { - message.set_chain_id(c_id as u32); - } + if let Some(addr) = t_info.to { + message.set_to(addr.to_vec()) + } + let first_chunk_length = min(t_info.data.len(), 1024); + let chunk = &t_info.data[0..first_chunk_length]; + message.set_data_initial_chunk(chunk.to_vec()); + message.set_data_length(t_info.data.len() as u32); + if let Some(c_id) = t_info.chain_id { + message.set_chain_id(c_id as u32); + } - self.send_device_message(&handle, msg_type, &message)?; + self.send_device_message(&handle, msg_type, &message)?; - self.signing_loop(&handle, &t_info.chain_id, &t_info.data[first_chunk_length..]) - } + self.signing_loop( + &handle, + &t_info.chain_id, + &t_info.data[first_chunk_length..], + ) + } - fn set_key_path(&self, key_path: KeyPath) { - *self.key_path.write() = key_path; - } + fn set_key_path(&self, key_path: KeyPath) { + *self.key_path.write() = key_path; + } - fn update_devices(&self, device_direction: DeviceDirection) -> Result { - let mut usb = self.usb.lock(); - usb.refresh_devices(); - let devices = usb.devices(); - let num_prev_devices = self.devices.read().len(); + fn update_devices(&self, device_direction: DeviceDirection) -> Result { + let mut usb = self.usb.lock(); + usb.refresh_devices(); + let devices = usb.devices(); + let num_prev_devices = self.devices.read().len(); - let detected_devices = devices.iter() - .filter(|&d| is_valid_trezor(d.vendor_id, d.product_id) && - is_valid_hid_device(d.usage_page, d.interface_number) - ) - .fold(Vec::new(), |mut v, d| { - match self.read_device(&usb, &d) { - Ok(info) => { - trace!(target: "hw", "Found device: {:?}", info); - v.push(info); - } - Err(e) => trace!(target: "hw", "Error reading device info: {}", e), - }; - v - }); + let detected_devices = devices + .iter() + .filter(|&d| { + is_valid_trezor(d.vendor_id, d.product_id) + && is_valid_hid_device(d.usage_page, d.interface_number) + }) + .fold(Vec::new(), |mut v, d| { + match self.read_device(&usb, &d) { + Ok(info) => { + trace!(target: "hw", "Found device: {:?}", info); + v.push(info); + } + Err(e) => trace!(target: "hw", "Error reading device info: {}", e), + }; + v + }); - let num_curr_devices = detected_devices.len(); - *self.devices.write() = detected_devices; + let num_curr_devices = detected_devices.len(); + *self.devices.write() = detected_devices; - match device_direction { - DeviceDirection::Arrived => { - if num_curr_devices > num_prev_devices { - Ok(num_curr_devices - num_prev_devices) - } else { - Err(Error::NoDeviceArrived) - } - } - DeviceDirection::Left => { - if num_prev_devices > num_curr_devices { - Ok(num_prev_devices - num_curr_devices) - } else { - Err(Error::NoDeviceLeft) - } - } - } - } + match device_direction { + DeviceDirection::Arrived => { + if num_curr_devices > num_prev_devices { + Ok(num_curr_devices - num_prev_devices) + } else { + Err(Error::NoDeviceArrived) + } + } + DeviceDirection::Left => { + if num_prev_devices > num_curr_devices { + Ok(num_prev_devices - num_curr_devices) + } else { + Err(Error::NoDeviceLeft) + } + } + } + } - fn read_device(&self, usb: &hidapi::HidApi, dev_info: &hidapi::HidDeviceInfo) -> Result { - let handle = self.open_path(|| usb.open_path(&dev_info.path))?; - let manufacturer = dev_info.manufacturer_string.clone().unwrap_or_else(|| "Unknown".to_owned()); - let name = dev_info.product_string.clone().unwrap_or_else(|| "Unknown".to_owned()); - let serial = dev_info.serial_number.clone().unwrap_or_else(|| "Unknown".to_owned()); - match self.get_address(&handle) { - Ok(Some(addr)) => { - Ok(Device { - path: dev_info.path.clone(), - info: WalletInfo { - name, - manufacturer, - serial, - address: addr, - }, - }) - } - Ok(None) => Err(Error::LockedDevice(dev_info.path.clone())), - Err(e) => Err(e), - } - } + fn read_device( + &self, + usb: &hidapi::HidApi, + dev_info: &hidapi::HidDeviceInfo, + ) -> Result { + let handle = self.open_path(|| usb.open_path(&dev_info.path))?; + let manufacturer = dev_info + .manufacturer_string + .clone() + .unwrap_or_else(|| "Unknown".to_owned()); + let name = dev_info + .product_string + .clone() + .unwrap_or_else(|| "Unknown".to_owned()); + let serial = dev_info + .serial_number + .clone() + .unwrap_or_else(|| "Unknown".to_owned()); + match self.get_address(&handle) { + Ok(Some(addr)) => Ok(Device { + path: dev_info.path.clone(), + info: WalletInfo { + name, + manufacturer, + serial, + address: addr, + }, + }), + Ok(None) => Err(Error::LockedDevice(dev_info.path.clone())), + Err(e) => Err(e), + } + } - fn list_devices(&self) -> Vec { - self.devices.read().iter().map(|d| d.info.clone()).collect() - } + fn list_devices(&self) -> Vec { + self.devices.read().iter().map(|d| d.info.clone()).collect() + } - fn list_locked_devices(&self) -> Vec { - (*self.locked_devices.read()).clone() - } + fn list_locked_devices(&self) -> Vec { + (*self.locked_devices.read()).clone() + } - fn get_wallet(&self, address: &Address) -> Option { - self.devices.read().iter().find(|d| &d.info.address == address).map(|d| d.info.clone()) - } + fn get_wallet(&self, address: &Address) -> Option { + self.devices + .read() + .iter() + .find(|d| &d.info.address == address) + .map(|d| d.info.clone()) + } - fn get_address(&self, device: &hidapi::HidDevice) -> Result, Error> { - let typ = MessageType::MessageType_EthereumGetAddress; - let mut message = EthereumGetAddress::new(); - match *self.key_path.read() { - KeyPath::Ethereum => message.set_address_n(ETH_DERIVATION_PATH.to_vec()), - KeyPath::EthereumClassic => message.set_address_n(ETC_DERIVATION_PATH.to_vec()), - } - message.set_show_display(false); - self.send_device_message(&device, typ, &message)?; + fn get_address(&self, device: &hidapi::HidDevice) -> Result, Error> { + let typ = MessageType::MessageType_EthereumGetAddress; + let mut message = EthereumGetAddress::new(); + match *self.key_path.read() { + KeyPath::Ethereum => message.set_address_n(ETH_DERIVATION_PATH.to_vec()), + KeyPath::EthereumClassic => message.set_address_n(ETC_DERIVATION_PATH.to_vec()), + } + message.set_show_display(false); + self.send_device_message(&device, typ, &message)?; - let (resp_type, bytes) = self.read_device_response(&device)?; - match resp_type { - MessageType::MessageType_EthereumAddress => { - let response: EthereumAddress = protobuf::core::parse_from_bytes(&bytes)?; - Ok(Some(From::from(response.get_address()))) - } - _ => Ok(None), - } - } + let (resp_type, bytes) = self.read_device_response(&device)?; + match resp_type { + MessageType::MessageType_EthereumAddress => { + let response: EthereumAddress = protobuf::core::parse_from_bytes(&bytes)?; + Ok(Some(From::from(response.get_address()))) + } + _ => Ok(None), + } + } - fn open_path(&self, f: F) -> Result - where F: Fn() -> Result - { - f().map_err(Into::into) - } + fn open_path(&self, f: F) -> Result + where + F: Fn() -> Result, + { + f().map_err(Into::into) + } } /// Poll the device in maximum `max_polling_duration` if it doesn't succeed pub fn try_connect_polling(trezor: &Manager, duration: &Duration, dir: DeviceDirection) -> bool { - let start_time = Instant::now(); - while start_time.elapsed() <= *duration { - if let Ok(num_devices) = trezor.update_devices(dir) { - trace!(target: "hw", "{} Trezor devices {}", num_devices, dir); - return true - } - } - false + let start_time = Instant::now(); + while start_time.elapsed() <= *duration { + if let Ok(num_devices) = trezor.update_devices(dir) { + trace!(target: "hw", "{} Trezor devices {}", num_devices, dir); + return true; + } + } + false } /// Check if the detected device is a Trezor device by checking both the product ID and the vendor ID pub fn is_valid_trezor(vid: u16, pid: u16) -> bool { - vid == TREZOR_VID && TREZOR_PIDS.contains(&pid) + vid == TREZOR_VID && TREZOR_PIDS.contains(&pid) } #[test] @@ -433,31 +502,39 @@ pub fn is_valid_trezor(vid: u16, pid: u16) -> bool { /// This test can't be run without an actual trezor device connected /// (and unlocked) attached to the machine that's running the test fn test_signature() { - use ethereum_types::Address; - use MAX_POLLING_DURATION; - use super::HardwareWalletManager; + use super::HardwareWalletManager; + use ethereum_types::Address; + use MAX_POLLING_DURATION; - let manager = HardwareWalletManager::new().unwrap(); + let manager = HardwareWalletManager::new().unwrap(); - assert_eq!(try_connect_polling(&manager.trezor, &MAX_POLLING_DURATION, DeviceDirection::Arrived), true); + assert_eq!( + try_connect_polling( + &manager.trezor, + &MAX_POLLING_DURATION, + DeviceDirection::Arrived + ), + true + ); - let addr: Address = manager.list_wallets() - .iter() - .filter(|d| d.name == "TREZOR".to_string() && d.manufacturer == "SatoshiLabs".to_string()) - .nth(0) - .map(|d| d.address) - .unwrap(); + let addr: Address = manager + .list_wallets() + .iter() + .filter(|d| d.name == "TREZOR".to_string() && d.manufacturer == "SatoshiLabs".to_string()) + .nth(0) + .map(|d| d.address) + .unwrap(); - let t_info = TransactionInfo { - nonce: U256::from(1), - gas_price: U256::from(100), - gas_limit: U256::from(21_000), - to: Some(Address::from(1337)), - chain_id: Some(1), - value: U256::from(1_000_000), - data: (&[1u8; 3000]).to_vec(), - }; + let t_info = TransactionInfo { + nonce: U256::from(1), + gas_price: U256::from(100), + gas_limit: U256::from(21_000), + to: Some(Address::from(1337)), + chain_id: Some(1), + value: U256::from(1_000_000), + data: (&[1u8; 3000]).to_vec(), + }; - let signature = manager.trezor.sign_transaction(&addr, &t_info); - assert!(signature.is_ok()); + let signature = manager.trezor.sign_transaction(&addr, &t_info); + assert!(signature.is_ok()); } diff --git a/accounts/src/account_data.rs b/accounts/src/account_data.rs index a36d38740..0c55e0bc0 100644 --- a/accounts/src/account_data.rs +++ b/accounts/src/account_data.rs @@ -16,58 +16,56 @@ //! Account Metadata -use std::{ - collections::HashMap, - time::Instant, -}; +use std::{collections::HashMap, time::Instant}; use ethkey::{Address, Password}; -use serde_derive::{Serialize, Deserialize}; +use serde_derive::{Deserialize, Serialize}; use serde_json; /// Type of unlock. #[derive(Clone, PartialEq)] pub enum Unlock { - /// If account is unlocked temporarily, it should be locked after first usage. - OneTime, - /// Account unlocked permanently can always sign message. - /// Use with caution. - Perm, - /// Account unlocked with a timeout - Timed(Instant), + /// If account is unlocked temporarily, it should be locked after first usage. + OneTime, + /// Account unlocked permanently can always sign message. + /// Use with caution. + Perm, + /// Account unlocked with a timeout + Timed(Instant), } /// Data associated with account. #[derive(Clone)] pub struct AccountData { - pub unlock: Unlock, - pub password: Password, + pub unlock: Unlock, + pub password: Password, } /// Collected account metadata #[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AccountMeta { - /// The name of the account. - pub name: String, - /// The rest of the metadata of the account. - pub meta: String, - /// The 128-bit Uuid of the account, if it has one (brain-wallets don't). - pub uuid: Option, + /// The name of the account. + pub name: String, + /// The rest of the metadata of the account. + pub meta: String, + /// The 128-bit Uuid of the account, if it has one (brain-wallets don't). + pub uuid: Option, } impl AccountMeta { - /// Read a hash map of Address -> AccountMeta - pub fn read(reader: R) -> Result, serde_json::Error> where - R: ::std::io::Read, - { - serde_json::from_reader(reader) - } + /// Read a hash map of Address -> AccountMeta + pub fn read(reader: R) -> Result, serde_json::Error> + where + R: ::std::io::Read, + { + serde_json::from_reader(reader) + } - /// Write a hash map of Address -> AccountMeta - pub fn write(m: &HashMap, writer: &mut W) -> Result<(), serde_json::Error> where - W: ::std::io::Write, - { - serde_json::to_writer(writer, m) - } + /// Write a hash map of Address -> AccountMeta + pub fn write(m: &HashMap, writer: &mut W) -> Result<(), serde_json::Error> + where + W: ::std::io::Write, + { + serde_json::to_writer(writer, m) + } } - diff --git a/accounts/src/error.rs b/accounts/src/error.rs index 2aa3564ef..09c256eea 100644 --- a/accounts/src/error.rs +++ b/accounts/src/error.rs @@ -16,41 +16,41 @@ use std::fmt; -use ethstore::{Error as SSError}; -use hardware_wallet::{Error as HardwareError}; +use ethstore::Error as SSError; +use hardware_wallet::Error as HardwareError; /// Signing error #[derive(Debug)] pub enum SignError { - /// Account is not unlocked - NotUnlocked, - /// Account does not exist. - NotFound, - /// Low-level hardware device error. - Hardware(HardwareError), - /// Low-level error from store - SStore(SSError), + /// Account is not unlocked + NotUnlocked, + /// Account does not exist. + NotFound, + /// Low-level hardware device error. + Hardware(HardwareError), + /// Low-level error from store + SStore(SSError), } impl fmt::Display for SignError { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - match *self { - SignError::NotUnlocked => write!(f, "Account is locked"), - SignError::NotFound => write!(f, "Account does not exist"), - SignError::Hardware(ref e) => write!(f, "{}", e), - SignError::SStore(ref e) => write!(f, "{}", e), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + match *self { + SignError::NotUnlocked => write!(f, "Account is locked"), + SignError::NotFound => write!(f, "Account does not exist"), + SignError::Hardware(ref e) => write!(f, "{}", e), + SignError::SStore(ref e) => write!(f, "{}", e), + } + } } impl From for SignError { - fn from(e: HardwareError) -> Self { - SignError::Hardware(e) - } + fn from(e: HardwareError) -> Self { + SignError::Hardware(e) + } } impl From for SignError { - fn from(e: SSError) -> Self { - SignError::SStore(e) - } + fn from(e: SSError) -> Self { + SignError::SStore(e) + } } diff --git a/accounts/src/lib.rs b/accounts/src/lib.rs index 0107eadad..84fbd1fe3 100644 --- a/accounts/src/lib.rs +++ b/accounts/src/lib.rs @@ -25,727 +25,990 @@ mod stores; #[cfg(not(any(target_os = "linux", target_os = "macos", target_os = "windows")))] extern crate fake_hardware_wallet as hardware_wallet; -use self::account_data::{Unlock, AccountData}; -use self::stores::AddressBook; +use self::{ + account_data::{AccountData, Unlock}, + stores::AddressBook, +}; -use std::collections::HashMap; -use std::time::{Instant, Duration}; +use std::{ + collections::HashMap, + time::{Duration, Instant}, +}; use common_types::transaction::{Action, Transaction}; -use ethkey::{Address, Message, Public, Secret, Password, Random, Generator}; -use ethstore::accounts_dir::MemoryDirectory; +use ethkey::{Address, Generator, Message, Password, Public, Random, Secret}; use ethstore::{ - SimpleSecretStore, SecretStore, EthStore, EthMultiStore, - random_string, SecretVaultRef, StoreAccountRef, OpaqueSecret, + accounts_dir::MemoryDirectory, random_string, EthMultiStore, EthStore, OpaqueSecret, + SecretStore, SecretVaultRef, SimpleSecretStore, StoreAccountRef, }; -use log::{warn, debug}; +use log::{debug, warn}; use parking_lot::RwLock; pub use ethkey::Signature; -pub use ethstore::{Derivation, IndexDerivation, KeyFile, Error}; -pub use hardware_wallet::{Error as HardwareError, HardwareWalletManager, KeyPath, TransactionInfo}; +pub use ethstore::{Derivation, Error, IndexDerivation, KeyFile}; +pub use hardware_wallet::{ + Error as HardwareError, HardwareWalletManager, KeyPath, TransactionInfo, +}; -pub use self::account_data::AccountMeta; -pub use self::error::SignError; +pub use self::{account_data::AccountMeta, error::SignError}; type AccountToken = Password; /// Account management settings. #[derive(Debug, Default)] pub struct AccountProviderSettings { - /// Enable hardware wallet support. - pub enable_hardware_wallets: bool, - /// Use the classic chain key on the hardware wallet. - pub hardware_wallet_classic_key: bool, - /// Store raw account secret when unlocking the account permanently. - pub unlock_keep_secret: bool, - /// Disallowed accounts. - pub blacklisted_accounts: Vec
, + /// Enable hardware wallet support. + pub enable_hardware_wallets: bool, + /// Use the classic chain key on the hardware wallet. + pub hardware_wallet_classic_key: bool, + /// Store raw account secret when unlocking the account permanently. + pub unlock_keep_secret: bool, + /// Disallowed accounts. + pub blacklisted_accounts: Vec
, } /// Account management. /// Responsible for unlocking accounts. pub struct AccountProvider { - /// For performance reasons some methods can re-use unlocked secrets. - unlocked_secrets: RwLock>, - /// Unlocked account data. - unlocked: RwLock>, - /// Address book. - address_book: RwLock, - /// Accounts on disk - sstore: Box, - /// Accounts unlocked with rolling tokens - transient_sstore: EthMultiStore, - /// Accounts in hardware wallets. - hardware_store: Option, - /// When unlocking account permanently we additionally keep a raw secret in memory - /// to increase the performance of transaction signing. - unlock_keep_secret: bool, - /// Disallowed accounts. - blacklisted_accounts: Vec
, + /// For performance reasons some methods can re-use unlocked secrets. + unlocked_secrets: RwLock>, + /// Unlocked account data. + unlocked: RwLock>, + /// Address book. + address_book: RwLock, + /// Accounts on disk + sstore: Box, + /// Accounts unlocked with rolling tokens + transient_sstore: EthMultiStore, + /// Accounts in hardware wallets. + hardware_store: Option, + /// When unlocking account permanently we additionally keep a raw secret in memory + /// to increase the performance of transaction signing. + unlock_keep_secret: bool, + /// Disallowed accounts. + blacklisted_accounts: Vec
, } fn transient_sstore() -> EthMultiStore { - EthMultiStore::open(Box::new(MemoryDirectory::default())).expect("MemoryDirectory load always succeeds; qed") + EthMultiStore::open(Box::new(MemoryDirectory::default())) + .expect("MemoryDirectory load always succeeds; qed") } impl AccountProvider { - /// Creates new account provider. - pub fn new(sstore: Box, settings: AccountProviderSettings) -> Self { - let mut hardware_store = None; + /// Creates new account provider. + pub fn new(sstore: Box, settings: AccountProviderSettings) -> Self { + let mut hardware_store = None; - if settings.enable_hardware_wallets { - match HardwareWalletManager::new() { - Ok(manager) => { - manager.set_key_path(if settings.hardware_wallet_classic_key { KeyPath::EthereumClassic } else { KeyPath::Ethereum }); - hardware_store = Some(manager) - }, - Err(e) => debug!("Error initializing hardware wallets: {}", e), - } - } + if settings.enable_hardware_wallets { + match HardwareWalletManager::new() { + Ok(manager) => { + manager.set_key_path(if settings.hardware_wallet_classic_key { + KeyPath::EthereumClassic + } else { + KeyPath::Ethereum + }); + hardware_store = Some(manager) + } + Err(e) => debug!("Error initializing hardware wallets: {}", e), + } + } - if let Ok(accounts) = sstore.accounts() { - for account in accounts.into_iter().filter(|a| settings.blacklisted_accounts.contains(&a.address)) { - warn!("Local Account {} has a blacklisted (known to be weak) address and will be ignored", + if let Ok(accounts) = sstore.accounts() { + for account in accounts + .into_iter() + .filter(|a| settings.blacklisted_accounts.contains(&a.address)) + { + warn!("Local Account {} has a blacklisted (known to be weak) address and will be ignored", account.address); - } - } + } + } - // Remove blacklisted accounts from address book. - let mut address_book = AddressBook::new(&sstore.local_path()); - for addr in &settings.blacklisted_accounts { - address_book.remove(*addr); - } + // Remove blacklisted accounts from address book. + let mut address_book = AddressBook::new(&sstore.local_path()); + for addr in &settings.blacklisted_accounts { + address_book.remove(*addr); + } - AccountProvider { - unlocked_secrets: RwLock::new(HashMap::new()), - unlocked: RwLock::new(HashMap::new()), - address_book: RwLock::new(address_book), - sstore: sstore, - transient_sstore: transient_sstore(), - hardware_store: hardware_store, - unlock_keep_secret: settings.unlock_keep_secret, - blacklisted_accounts: settings.blacklisted_accounts, - } - } + AccountProvider { + unlocked_secrets: RwLock::new(HashMap::new()), + unlocked: RwLock::new(HashMap::new()), + address_book: RwLock::new(address_book), + sstore: sstore, + transient_sstore: transient_sstore(), + hardware_store: hardware_store, + unlock_keep_secret: settings.unlock_keep_secret, + blacklisted_accounts: settings.blacklisted_accounts, + } + } - /// Creates not disk backed provider. - pub fn transient_provider() -> Self { - AccountProvider { - unlocked_secrets: RwLock::new(HashMap::new()), - unlocked: RwLock::new(HashMap::new()), - address_book: RwLock::new(AddressBook::transient()), - sstore: Box::new(EthStore::open(Box::new(MemoryDirectory::default())).expect("MemoryDirectory load always succeeds; qed")), - transient_sstore: transient_sstore(), - hardware_store: None, - unlock_keep_secret: false, - blacklisted_accounts: vec![], - } - } + /// Creates not disk backed provider. + pub fn transient_provider() -> Self { + AccountProvider { + unlocked_secrets: RwLock::new(HashMap::new()), + unlocked: RwLock::new(HashMap::new()), + address_book: RwLock::new(AddressBook::transient()), + sstore: Box::new( + EthStore::open(Box::new(MemoryDirectory::default())) + .expect("MemoryDirectory load always succeeds; qed"), + ), + transient_sstore: transient_sstore(), + hardware_store: None, + unlock_keep_secret: false, + blacklisted_accounts: vec![], + } + } - /// Creates new random account. - pub fn new_account(&self, password: &Password) -> Result { - self.new_account_and_public(password).map(|d| d.0) - } + /// Creates new random account. + pub fn new_account(&self, password: &Password) -> Result { + self.new_account_and_public(password).map(|d| d.0) + } - /// Creates new random account and returns address and public key - pub fn new_account_and_public(&self, password: &Password) -> Result<(Address, Public), Error> { - let acc = Random.generate().expect("secp context has generation capabilities; qed"); - let public = acc.public().clone(); - let secret = acc.secret().clone(); - let account = self.sstore.insert_account(SecretVaultRef::Root, secret, password)?; - Ok((account.address, public)) - } + /// Creates new random account and returns address and public key + pub fn new_account_and_public(&self, password: &Password) -> Result<(Address, Public), Error> { + let acc = Random + .generate() + .expect("secp context has generation capabilities; qed"); + let public = acc.public().clone(); + let secret = acc.secret().clone(); + let account = self + .sstore + .insert_account(SecretVaultRef::Root, secret, password)?; + Ok((account.address, public)) + } - /// Inserts new account into underlying store. - /// Does not unlock account! - pub fn insert_account(&self, secret: Secret, password: &Password) -> Result { - let account = self.sstore.insert_account(SecretVaultRef::Root, secret, password)?; - if self.blacklisted_accounts.contains(&account.address) { - self.sstore.remove_account(&account, password)?; - return Err(Error::InvalidAccount.into()); - } - Ok(account.address) - } + /// Inserts new account into underlying store. + /// Does not unlock account! + pub fn insert_account(&self, secret: Secret, password: &Password) -> Result { + let account = self + .sstore + .insert_account(SecretVaultRef::Root, secret, password)?; + if self.blacklisted_accounts.contains(&account.address) { + self.sstore.remove_account(&account, password)?; + return Err(Error::InvalidAccount.into()); + } + Ok(account.address) + } - /// Generates new derived account based on the existing one - /// If password is not provided, account must be unlocked - /// New account will be created with the same password (if save: true) - pub fn derive_account(&self, address: &Address, password: Option, derivation: Derivation, save: bool) - -> Result - { - let account = self.sstore.account_ref(&address)?; - let password = password.map(Ok).unwrap_or_else(|| self.password(&account))?; - Ok( - if save { self.sstore.insert_derived(SecretVaultRef::Root, &account, &password, derivation)?.address } - else { self.sstore.generate_derived(&account, &password, derivation)? } - ) - } + /// Generates new derived account based on the existing one + /// If password is not provided, account must be unlocked + /// New account will be created with the same password (if save: true) + pub fn derive_account( + &self, + address: &Address, + password: Option, + derivation: Derivation, + save: bool, + ) -> Result { + let account = self.sstore.account_ref(&address)?; + let password = password + .map(Ok) + .unwrap_or_else(|| self.password(&account))?; + Ok(if save { + self.sstore + .insert_derived(SecretVaultRef::Root, &account, &password, derivation)? + .address + } else { + self.sstore + .generate_derived(&account, &password, derivation)? + }) + } - /// Import a new presale wallet. - pub fn import_presale(&self, presale_json: &[u8], password: &Password) -> Result { - let account = self.sstore.import_presale(SecretVaultRef::Root, presale_json, password)?; - Ok(Address::from(account.address).into()) - } + /// Import a new presale wallet. + pub fn import_presale( + &self, + presale_json: &[u8], + password: &Password, + ) -> Result { + let account = self + .sstore + .import_presale(SecretVaultRef::Root, presale_json, password)?; + Ok(Address::from(account.address).into()) + } - /// Import a new wallet. - pub fn import_wallet(&self, json: &[u8], password: &Password, gen_id: bool) -> Result { - let account = self.sstore.import_wallet(SecretVaultRef::Root, json, password, gen_id)?; - if self.blacklisted_accounts.contains(&account.address) { - self.sstore.remove_account(&account, password)?; - return Err(Error::InvalidAccount.into()); - } - Ok(Address::from(account.address).into()) - } + /// Import a new wallet. + pub fn import_wallet( + &self, + json: &[u8], + password: &Password, + gen_id: bool, + ) -> Result { + let account = self + .sstore + .import_wallet(SecretVaultRef::Root, json, password, gen_id)?; + if self.blacklisted_accounts.contains(&account.address) { + self.sstore.remove_account(&account, password)?; + return Err(Error::InvalidAccount.into()); + } + Ok(Address::from(account.address).into()) + } - /// Checks whether an account with a given address is present. - pub fn has_account(&self, address: Address) -> bool { - self.sstore.account_ref(&address).is_ok() && !self.blacklisted_accounts.contains(&address) - } + /// Checks whether an account with a given address is present. + pub fn has_account(&self, address: Address) -> bool { + self.sstore.account_ref(&address).is_ok() && !self.blacklisted_accounts.contains(&address) + } - /// Returns addresses of all accounts. - pub fn accounts(&self) -> Result, Error> { - let accounts = self.sstore.accounts()?; - Ok(accounts - .into_iter() - .map(|a| a.address) - .filter(|address| !self.blacklisted_accounts.contains(address)) - .collect() - ) - } + /// Returns addresses of all accounts. + pub fn accounts(&self) -> Result, Error> { + let accounts = self.sstore.accounts()?; + Ok(accounts + .into_iter() + .map(|a| a.address) + .filter(|address| !self.blacklisted_accounts.contains(address)) + .collect()) + } - /// Returns the address of default account. - pub fn default_account(&self) -> Result { - Ok(self.accounts()?.first().cloned().unwrap_or_default()) - } + /// Returns the address of default account. + pub fn default_account(&self) -> Result { + Ok(self.accounts()?.first().cloned().unwrap_or_default()) + } - /// Returns addresses of hardware accounts. - pub fn hardware_accounts(&self) -> Result, Error> { - if let Some(accounts) = self.hardware_store.as_ref().map(|h| h.list_wallets()) { - if !accounts.is_empty() { - return Ok(accounts.into_iter().map(|a| a.address).collect()); - } - } - Err(Error::Custom("No hardware wallet accounts were found".into())) - } + /// Returns addresses of hardware accounts. + pub fn hardware_accounts(&self) -> Result, Error> { + if let Some(accounts) = self.hardware_store.as_ref().map(|h| h.list_wallets()) { + if !accounts.is_empty() { + return Ok(accounts.into_iter().map(|a| a.address).collect()); + } + } + Err(Error::Custom( + "No hardware wallet accounts were found".into(), + )) + } - /// Get a list of paths to locked hardware wallets - pub fn locked_hardware_accounts(&self) -> Result, SignError> { - match self.hardware_store.as_ref().map(|h| h.list_locked_wallets()) { - None => Err(SignError::NotFound), - Some(Err(e)) => Err(SignError::Hardware(e)), - Some(Ok(s)) => Ok(s), - } - } + /// Get a list of paths to locked hardware wallets + pub fn locked_hardware_accounts(&self) -> Result, SignError> { + match self + .hardware_store + .as_ref() + .map(|h| h.list_locked_wallets()) + { + None => Err(SignError::NotFound), + Some(Err(e)) => Err(SignError::Hardware(e)), + Some(Ok(s)) => Ok(s), + } + } - /// Provide a pin to a locked hardware wallet on USB path to unlock it - pub fn hardware_pin_matrix_ack(&self, path: &str, pin: &str) -> Result { - match self.hardware_store.as_ref().map(|h| h.pin_matrix_ack(path, pin)) { - None => Err(SignError::NotFound), - Some(Err(e)) => Err(SignError::Hardware(e)), - Some(Ok(s)) => Ok(s), - } - } + /// Provide a pin to a locked hardware wallet on USB path to unlock it + pub fn hardware_pin_matrix_ack(&self, path: &str, pin: &str) -> Result { + match self + .hardware_store + .as_ref() + .map(|h| h.pin_matrix_ack(path, pin)) + { + None => Err(SignError::NotFound), + Some(Err(e)) => Err(SignError::Hardware(e)), + Some(Ok(s)) => Ok(s), + } + } - /// Returns each address along with metadata. - pub fn addresses_info(&self) -> HashMap { - self.address_book.read().get() - } + /// Returns each address along with metadata. + pub fn addresses_info(&self) -> HashMap { + self.address_book.read().get() + } - /// Returns each address along with metadata. - pub fn set_address_name(&self, account: Address, name: String) { - self.address_book.write().set_name(account, name) - } + /// Returns each address along with metadata. + pub fn set_address_name(&self, account: Address, name: String) { + self.address_book.write().set_name(account, name) + } - /// Returns each address along with metadata. - pub fn set_address_meta(&self, account: Address, meta: String) { - self.address_book.write().set_meta(account, meta) - } + /// Returns each address along with metadata. + pub fn set_address_meta(&self, account: Address, meta: String) { + self.address_book.write().set_meta(account, meta) + } - /// Removes and address from the address book - pub fn remove_address(&self, addr: Address) { - self.address_book.write().remove(addr) - } + /// Removes and address from the address book + pub fn remove_address(&self, addr: Address) { + self.address_book.write().remove(addr) + } - /// Returns each account along with name and meta. - pub fn accounts_info(&self) -> Result, Error> { - let r = self.sstore.accounts()? - .into_iter() - .filter(|a| !self.blacklisted_accounts.contains(&a.address)) - .map(|a| (a.address.clone(), self.account_meta(a.address).ok().unwrap_or_default())) - .collect(); - Ok(r) - } + /// Returns each account along with name and meta. + pub fn accounts_info(&self) -> Result, Error> { + let r = self + .sstore + .accounts()? + .into_iter() + .filter(|a| !self.blacklisted_accounts.contains(&a.address)) + .map(|a| { + ( + a.address.clone(), + self.account_meta(a.address).ok().unwrap_or_default(), + ) + }) + .collect(); + Ok(r) + } - /// Returns each hardware account along with name and meta. - pub fn hardware_accounts_info(&self) -> Result, Error> { - let r = self.hardware_accounts()? - .into_iter() - .map(|address| (address.clone(), self.account_meta(address).ok().unwrap_or_default())) - .collect(); - Ok(r) - } + /// Returns each hardware account along with name and meta. + pub fn hardware_accounts_info(&self) -> Result, Error> { + let r = self + .hardware_accounts()? + .into_iter() + .map(|address| { + ( + address.clone(), + self.account_meta(address).ok().unwrap_or_default(), + ) + }) + .collect(); + Ok(r) + } - /// Returns each hardware account along with name and meta. - pub fn is_hardware_address(&self, address: &Address) -> bool { - self.hardware_store.as_ref().and_then(|s| s.wallet_info(address)).is_some() - } + /// Returns each hardware account along with name and meta. + pub fn is_hardware_address(&self, address: &Address) -> bool { + self.hardware_store + .as_ref() + .and_then(|s| s.wallet_info(address)) + .is_some() + } - /// Returns each account along with name and meta. - pub fn account_meta(&self, address: Address) -> Result { - if let Some(info) = self.hardware_store.as_ref().and_then(|s| s.wallet_info(&address)) { - Ok(AccountMeta { - name: info.name, - meta: info.manufacturer, - uuid: None, - }) - } else { - let account = self.sstore.account_ref(&address)?; - Ok(AccountMeta { - name: self.sstore.name(&account)?, - meta: self.sstore.meta(&account)?, - uuid: self.sstore.uuid(&account).ok().map(Into::into), // allowed to not have a Uuid - }) - } - } + /// Returns each account along with name and meta. + pub fn account_meta(&self, address: Address) -> Result { + if let Some(info) = self + .hardware_store + .as_ref() + .and_then(|s| s.wallet_info(&address)) + { + Ok(AccountMeta { + name: info.name, + meta: info.manufacturer, + uuid: None, + }) + } else { + let account = self.sstore.account_ref(&address)?; + Ok(AccountMeta { + name: self.sstore.name(&account)?, + meta: self.sstore.meta(&account)?, + uuid: self.sstore.uuid(&account).ok().map(Into::into), // allowed to not have a Uuid + }) + } + } - /// Returns account public key. - pub fn account_public(&self, address: Address, password: &Password) -> Result { - self.sstore.public(&self.sstore.account_ref(&address)?, password) - } + /// Returns account public key. + pub fn account_public(&self, address: Address, password: &Password) -> Result { + self.sstore + .public(&self.sstore.account_ref(&address)?, password) + } - /// Returns each account along with name and meta. - pub fn set_account_name(&self, address: Address, name: String) -> Result<(), Error> { - self.sstore.set_name(&self.sstore.account_ref(&address)?, name)?; - Ok(()) - } + /// Returns each account along with name and meta. + pub fn set_account_name(&self, address: Address, name: String) -> Result<(), Error> { + self.sstore + .set_name(&self.sstore.account_ref(&address)?, name)?; + Ok(()) + } - /// Returns each account along with name and meta. - pub fn set_account_meta(&self, address: Address, meta: String) -> Result<(), Error> { - self.sstore.set_meta(&self.sstore.account_ref(&address)?, meta)?; - Ok(()) - } + /// Returns each account along with name and meta. + pub fn set_account_meta(&self, address: Address, meta: String) -> Result<(), Error> { + self.sstore + .set_meta(&self.sstore.account_ref(&address)?, meta)?; + Ok(()) + } - /// Returns `true` if the password for `account` is `password`. `false` if not. - pub fn test_password(&self, address: &Address, password: &Password) -> Result { - self.sstore.test_password(&self.sstore.account_ref(&address)?, password) - .map_err(Into::into) - } + /// Returns `true` if the password for `account` is `password`. `false` if not. + pub fn test_password(&self, address: &Address, password: &Password) -> Result { + self.sstore + .test_password(&self.sstore.account_ref(&address)?, password) + .map_err(Into::into) + } - /// Permanently removes an account. - pub fn kill_account(&self, address: &Address, password: &Password) -> Result<(), Error> { - self.sstore.remove_account(&self.sstore.account_ref(&address)?, &password)?; - Ok(()) - } + /// Permanently removes an account. + pub fn kill_account(&self, address: &Address, password: &Password) -> Result<(), Error> { + self.sstore + .remove_account(&self.sstore.account_ref(&address)?, &password)?; + Ok(()) + } - /// Changes the password of `account` from `password` to `new_password`. Fails if incorrect `password` given. - pub fn change_password(&self, address: &Address, password: Password, new_password: Password) -> Result<(), Error> { - self.sstore.change_password(&self.sstore.account_ref(address)?, &password, &new_password) - } + /// Changes the password of `account` from `password` to `new_password`. Fails if incorrect `password` given. + pub fn change_password( + &self, + address: &Address, + password: Password, + new_password: Password, + ) -> Result<(), Error> { + self.sstore + .change_password(&self.sstore.account_ref(address)?, &password, &new_password) + } - /// Exports an account for given address. - pub fn export_account(&self, address: &Address, password: Password) -> Result { - self.sstore.export_account(&self.sstore.account_ref(address)?, &password) - } + /// Exports an account for given address. + pub fn export_account(&self, address: &Address, password: Password) -> Result { + self.sstore + .export_account(&self.sstore.account_ref(address)?, &password) + } - /// Helper method used for unlocking accounts. - fn unlock_account(&self, address: Address, password: Password, unlock: Unlock) -> Result<(), Error> { - let account = self.sstore.account_ref(&address)?; + /// Helper method used for unlocking accounts. + fn unlock_account( + &self, + address: Address, + password: Password, + unlock: Unlock, + ) -> Result<(), Error> { + let account = self.sstore.account_ref(&address)?; - // check if account is already unlocked permanently, if it is, do nothing - let mut unlocked = self.unlocked.write(); - if let Some(data) = unlocked.get(&account) { - if let Unlock::Perm = data.unlock { - return Ok(()) - } - } + // check if account is already unlocked permanently, if it is, do nothing + let mut unlocked = self.unlocked.write(); + if let Some(data) = unlocked.get(&account) { + if let Unlock::Perm = data.unlock { + return Ok(()); + } + } - if self.unlock_keep_secret && unlock == Unlock::Perm { - // verify password and get the secret - let secret = self.sstore.raw_secret(&account, &password)?; - self.unlocked_secrets.write().insert(account.clone(), secret); - } else { - // verify password by signing dump message - // result may be discarded - let _ = self.sstore.sign(&account, &password, &Default::default())?; - } + if self.unlock_keep_secret && unlock == Unlock::Perm { + // verify password and get the secret + let secret = self.sstore.raw_secret(&account, &password)?; + self.unlocked_secrets + .write() + .insert(account.clone(), secret); + } else { + // verify password by signing dump message + // result may be discarded + let _ = self.sstore.sign(&account, &password, &Default::default())?; + } - let data = AccountData { - unlock: unlock, - password: password, - }; + let data = AccountData { + unlock: unlock, + password: password, + }; - unlocked.insert(account, data); - Ok(()) - } + unlocked.insert(account, data); + Ok(()) + } - fn password(&self, account: &StoreAccountRef) -> Result { - let mut unlocked = self.unlocked.write(); - let data = unlocked.get(account).ok_or(SignError::NotUnlocked)?.clone(); - if let Unlock::OneTime = data.unlock { - unlocked.remove(account).expect("data exists: so key must exist: qed"); - } - if let Unlock::Timed(ref end) = data.unlock { - if Instant::now() > *end { - unlocked.remove(account).expect("data exists: so key must exist: qed"); - return Err(SignError::NotUnlocked); - } - } - Ok(data.password) - } + fn password(&self, account: &StoreAccountRef) -> Result { + let mut unlocked = self.unlocked.write(); + let data = unlocked.get(account).ok_or(SignError::NotUnlocked)?.clone(); + if let Unlock::OneTime = data.unlock { + unlocked + .remove(account) + .expect("data exists: so key must exist: qed"); + } + if let Unlock::Timed(ref end) = data.unlock { + if Instant::now() > *end { + unlocked + .remove(account) + .expect("data exists: so key must exist: qed"); + return Err(SignError::NotUnlocked); + } + } + Ok(data.password) + } - /// Unlocks account permanently. - pub fn unlock_account_permanently(&self, account: Address, password: Password) -> Result<(), Error> { - self.unlock_account(account, password, Unlock::Perm) - } + /// Unlocks account permanently. + pub fn unlock_account_permanently( + &self, + account: Address, + password: Password, + ) -> Result<(), Error> { + self.unlock_account(account, password, Unlock::Perm) + } - /// Unlocks account temporarily (for one signing). - pub fn unlock_account_temporarily(&self, account: Address, password: Password) -> Result<(), Error> { - self.unlock_account(account, password, Unlock::OneTime) - } + /// Unlocks account temporarily (for one signing). + pub fn unlock_account_temporarily( + &self, + account: Address, + password: Password, + ) -> Result<(), Error> { + self.unlock_account(account, password, Unlock::OneTime) + } - /// Unlocks account temporarily with a timeout. - pub fn unlock_account_timed(&self, account: Address, password: Password, duration: Duration) -> Result<(), Error> { - self.unlock_account(account, password, Unlock::Timed(Instant::now() + duration)) - } + /// Unlocks account temporarily with a timeout. + pub fn unlock_account_timed( + &self, + account: Address, + password: Password, + duration: Duration, + ) -> Result<(), Error> { + self.unlock_account(account, password, Unlock::Timed(Instant::now() + duration)) + } - /// Checks if given account is unlocked - pub fn is_unlocked(&self, address: &Address) -> bool { - let unlocked = self.unlocked.read(); - let unlocked_secrets = self.unlocked_secrets.read(); - self.sstore.account_ref(address) - .map(|r| unlocked.get(&r).is_some() || unlocked_secrets.get(&r).is_some()) - .unwrap_or(false) - } + /// Checks if given account is unlocked + pub fn is_unlocked(&self, address: &Address) -> bool { + let unlocked = self.unlocked.read(); + let unlocked_secrets = self.unlocked_secrets.read(); + self.sstore + .account_ref(address) + .map(|r| unlocked.get(&r).is_some() || unlocked_secrets.get(&r).is_some()) + .unwrap_or(false) + } - /// Checks if given account is unlocked permanently - pub fn is_unlocked_permanently(&self, address: &Address) -> bool { - let unlocked = self.unlocked.read(); - self.sstore.account_ref(address) - .map(|r| unlocked.get(&r).map_or(false, |account| account.unlock == Unlock::Perm)) - .unwrap_or(false) - } + /// Checks if given account is unlocked permanently + pub fn is_unlocked_permanently(&self, address: &Address) -> bool { + let unlocked = self.unlocked.read(); + self.sstore + .account_ref(address) + .map(|r| { + unlocked + .get(&r) + .map_or(false, |account| account.unlock == Unlock::Perm) + }) + .unwrap_or(false) + } - /// Signs the message. If password is not provided the account must be unlocked. - pub fn sign(&self, address: Address, password: Option, message: Message) -> Result { - let account = self.sstore.account_ref(&address)?; - match self.unlocked_secrets.read().get(&account) { - Some(secret) => { - Ok(self.sstore.sign_with_secret(&secret, &message)?) - }, - None => { - let password = password.map(Ok).unwrap_or_else(|| self.password(&account))?; - Ok(self.sstore.sign(&account, &password, &message)?) - } - } - } + /// Signs the message. If password is not provided the account must be unlocked. + pub fn sign( + &self, + address: Address, + password: Option, + message: Message, + ) -> Result { + let account = self.sstore.account_ref(&address)?; + match self.unlocked_secrets.read().get(&account) { + Some(secret) => Ok(self.sstore.sign_with_secret(&secret, &message)?), + None => { + let password = password + .map(Ok) + .unwrap_or_else(|| self.password(&account))?; + Ok(self.sstore.sign(&account, &password, &message)?) + } + } + } - /// Signs message using the derived secret. If password is not provided the account must be unlocked. - pub fn sign_derived(&self, address: &Address, password: Option, derivation: Derivation, message: Message) - -> Result - { - let account = self.sstore.account_ref(address)?; - let password = password.map(Ok).unwrap_or_else(|| self.password(&account))?; - Ok(self.sstore.sign_derived(&account, &password, derivation, &message)?) - } + /// Signs message using the derived secret. If password is not provided the account must be unlocked. + pub fn sign_derived( + &self, + address: &Address, + password: Option, + derivation: Derivation, + message: Message, + ) -> Result { + let account = self.sstore.account_ref(address)?; + let password = password + .map(Ok) + .unwrap_or_else(|| self.password(&account))?; + Ok(self + .sstore + .sign_derived(&account, &password, derivation, &message)?) + } - /// Signs given message with supplied token. Returns a token to use in next signing within this session. - pub fn sign_with_token(&self, address: Address, token: AccountToken, message: Message) -> Result<(Signature, AccountToken), SignError> { - let account = self.sstore.account_ref(&address)?; - let is_std_password = self.sstore.test_password(&account, &token)?; + /// Signs given message with supplied token. Returns a token to use in next signing within this session. + pub fn sign_with_token( + &self, + address: Address, + token: AccountToken, + message: Message, + ) -> Result<(Signature, AccountToken), SignError> { + let account = self.sstore.account_ref(&address)?; + let is_std_password = self.sstore.test_password(&account, &token)?; - let new_token = Password::from(random_string(16)); - let signature = if is_std_password { - // Insert to transient store - self.sstore.copy_account(&self.transient_sstore, SecretVaultRef::Root, &account, &token, &new_token)?; - // sign - self.sstore.sign(&account, &token, &message)? - } else { - // check transient store - self.transient_sstore.change_password(&account, &token, &new_token)?; - // and sign - self.transient_sstore.sign(&account, &new_token, &message)? - }; + let new_token = Password::from(random_string(16)); + let signature = if is_std_password { + // Insert to transient store + self.sstore.copy_account( + &self.transient_sstore, + SecretVaultRef::Root, + &account, + &token, + &new_token, + )?; + // sign + self.sstore.sign(&account, &token, &message)? + } else { + // check transient store + self.transient_sstore + .change_password(&account, &token, &new_token)?; + // and sign + self.transient_sstore.sign(&account, &new_token, &message)? + }; - Ok((signature, new_token)) - } + Ok((signature, new_token)) + } - /// Decrypts a message with given token. Returns a token to use in next operation for this account. - pub fn decrypt_with_token(&self, address: Address, token: AccountToken, shared_mac: &[u8], message: &[u8]) - -> Result<(Vec, AccountToken), SignError> - { - let account = self.sstore.account_ref(&address)?; - let is_std_password = self.sstore.test_password(&account, &token)?; + /// Decrypts a message with given token. Returns a token to use in next operation for this account. + pub fn decrypt_with_token( + &self, + address: Address, + token: AccountToken, + shared_mac: &[u8], + message: &[u8], + ) -> Result<(Vec, AccountToken), SignError> { + let account = self.sstore.account_ref(&address)?; + let is_std_password = self.sstore.test_password(&account, &token)?; - let new_token = Password::from(random_string(16)); - let message = if is_std_password { - // Insert to transient store - self.sstore.copy_account(&self.transient_sstore, SecretVaultRef::Root, &account, &token, &new_token)?; - // decrypt - self.sstore.decrypt(&account, &token, shared_mac, message)? - } else { - // check transient store - self.transient_sstore.change_password(&account, &token, &new_token)?; - // and decrypt - self.transient_sstore.decrypt(&account, &token, shared_mac, message)? - }; + let new_token = Password::from(random_string(16)); + let message = if is_std_password { + // Insert to transient store + self.sstore.copy_account( + &self.transient_sstore, + SecretVaultRef::Root, + &account, + &token, + &new_token, + )?; + // decrypt + self.sstore.decrypt(&account, &token, shared_mac, message)? + } else { + // check transient store + self.transient_sstore + .change_password(&account, &token, &new_token)?; + // and decrypt + self.transient_sstore + .decrypt(&account, &token, shared_mac, message)? + }; - Ok((message, new_token)) - } + Ok((message, new_token)) + } - /// Decrypts a message. If password is not provided the account must be unlocked. - pub fn decrypt(&self, address: Address, password: Option, shared_mac: &[u8], message: &[u8]) -> Result, SignError> { - let account = self.sstore.account_ref(&address)?; - let password = password.map(Ok).unwrap_or_else(|| self.password(&account))?; - Ok(self.sstore.decrypt(&account, &password, shared_mac, message)?) - } + /// Decrypts a message. If password is not provided the account must be unlocked. + pub fn decrypt( + &self, + address: Address, + password: Option, + shared_mac: &[u8], + message: &[u8], + ) -> Result, SignError> { + let account = self.sstore.account_ref(&address)?; + let password = password + .map(Ok) + .unwrap_or_else(|| self.password(&account))?; + Ok(self + .sstore + .decrypt(&account, &password, shared_mac, message)?) + } - /// Agree on shared key. - pub fn agree(&self, address: Address, password: Option, other_public: &Public) -> Result { - let account = self.sstore.account_ref(&address)?; - let password = password.map(Ok).unwrap_or_else(|| self.password(&account))?; - Ok(self.sstore.agree(&account, &password, other_public)?) - } + /// Agree on shared key. + pub fn agree( + &self, + address: Address, + password: Option, + other_public: &Public, + ) -> Result { + let account = self.sstore.account_ref(&address)?; + let password = password + .map(Ok) + .unwrap_or_else(|| self.password(&account))?; + Ok(self.sstore.agree(&account, &password, other_public)?) + } - /// Returns the underlying `SecretStore` reference if one exists. - pub fn list_geth_accounts(&self, testnet: bool) -> Vec
{ - self.sstore.list_geth_accounts(testnet).into_iter().map(|a| Address::from(a).into()).collect() - } + /// Returns the underlying `SecretStore` reference if one exists. + pub fn list_geth_accounts(&self, testnet: bool) -> Vec
{ + self.sstore + .list_geth_accounts(testnet) + .into_iter() + .map(|a| Address::from(a).into()) + .collect() + } - /// Returns the underlying `SecretStore` reference if one exists. - pub fn import_geth_accounts(&self, desired: Vec
, testnet: bool) -> Result, Error> { - self.sstore.import_geth_accounts(SecretVaultRef::Root, desired, testnet) - .map(|a| a.into_iter().map(|a| a.address).collect()) - .map_err(Into::into) - } + /// Returns the underlying `SecretStore` reference if one exists. + pub fn import_geth_accounts( + &self, + desired: Vec
, + testnet: bool, + ) -> Result, Error> { + self.sstore + .import_geth_accounts(SecretVaultRef::Root, desired, testnet) + .map(|a| a.into_iter().map(|a| a.address).collect()) + .map_err(Into::into) + } - /// Create new vault. - pub fn create_vault(&self, name: &str, password: &Password) -> Result<(), Error> { - self.sstore.create_vault(name, password) - .map_err(Into::into) - } + /// Create new vault. + pub fn create_vault(&self, name: &str, password: &Password) -> Result<(), Error> { + self.sstore.create_vault(name, password).map_err(Into::into) + } - /// Open existing vault. - pub fn open_vault(&self, name: &str, password: &Password) -> Result<(), Error> { - self.sstore.open_vault(name, password) - .map_err(Into::into) - } + /// Open existing vault. + pub fn open_vault(&self, name: &str, password: &Password) -> Result<(), Error> { + self.sstore.open_vault(name, password).map_err(Into::into) + } - /// Close previously opened vault. - pub fn close_vault(&self, name: &str) -> Result<(), Error> { - self.sstore.close_vault(name) - .map_err(Into::into) - } + /// Close previously opened vault. + pub fn close_vault(&self, name: &str) -> Result<(), Error> { + self.sstore.close_vault(name).map_err(Into::into) + } - /// List all vaults - pub fn list_vaults(&self) -> Result, Error> { - self.sstore.list_vaults() - .map_err(Into::into) - } + /// List all vaults + pub fn list_vaults(&self) -> Result, Error> { + self.sstore.list_vaults().map_err(Into::into) + } - /// List all currently opened vaults - pub fn list_opened_vaults(&self) -> Result, Error> { - self.sstore.list_opened_vaults() - .map_err(Into::into) - } + /// List all currently opened vaults + pub fn list_opened_vaults(&self) -> Result, Error> { + self.sstore.list_opened_vaults().map_err(Into::into) + } - /// Change vault password. - pub fn change_vault_password(&self, name: &str, new_password: &Password) -> Result<(), Error> { - self.sstore.change_vault_password(name, new_password) - .map_err(Into::into) - } + /// Change vault password. + pub fn change_vault_password(&self, name: &str, new_password: &Password) -> Result<(), Error> { + self.sstore + .change_vault_password(name, new_password) + .map_err(Into::into) + } - /// Change vault of the given address. - pub fn change_vault(&self, address: Address, new_vault: &str) -> Result<(), Error> { - let new_vault_ref = if new_vault.is_empty() { SecretVaultRef::Root } else { SecretVaultRef::Vault(new_vault.to_owned()) }; - let old_account_ref = self.sstore.account_ref(&address)?; - self.sstore.change_account_vault(new_vault_ref, old_account_ref) - .map_err(Into::into) - .map(|_| ()) - } + /// Change vault of the given address. + pub fn change_vault(&self, address: Address, new_vault: &str) -> Result<(), Error> { + let new_vault_ref = if new_vault.is_empty() { + SecretVaultRef::Root + } else { + SecretVaultRef::Vault(new_vault.to_owned()) + }; + let old_account_ref = self.sstore.account_ref(&address)?; + self.sstore + .change_account_vault(new_vault_ref, old_account_ref) + .map_err(Into::into) + .map(|_| ()) + } - /// Get vault metadata string. - pub fn get_vault_meta(&self, name: &str) -> Result { - self.sstore.get_vault_meta(name) - .map_err(Into::into) - } + /// Get vault metadata string. + pub fn get_vault_meta(&self, name: &str) -> Result { + self.sstore.get_vault_meta(name).map_err(Into::into) + } - /// Set vault metadata string. - pub fn set_vault_meta(&self, name: &str, meta: &str) -> Result<(), Error> { - self.sstore.set_vault_meta(name, meta) - .map_err(Into::into) - } + /// Set vault metadata string. + pub fn set_vault_meta(&self, name: &str, meta: &str) -> Result<(), Error> { + self.sstore.set_vault_meta(name, meta).map_err(Into::into) + } - /// Sign message with hardware wallet. - pub fn sign_message_with_hardware(&self, address: &Address, message: &[u8]) -> Result { - match self.hardware_store.as_ref().map(|s| s.sign_message(address, message)) { - None | Some(Err(HardwareError::KeyNotFound)) => Err(SignError::NotFound), - Some(Err(e)) => Err(From::from(e)), - Some(Ok(s)) => Ok(s), - } - } + /// Sign message with hardware wallet. + pub fn sign_message_with_hardware( + &self, + address: &Address, + message: &[u8], + ) -> Result { + match self + .hardware_store + .as_ref() + .map(|s| s.sign_message(address, message)) + { + None | Some(Err(HardwareError::KeyNotFound)) => Err(SignError::NotFound), + Some(Err(e)) => Err(From::from(e)), + Some(Ok(s)) => Ok(s), + } + } - /// Sign transaction with hardware wallet. - pub fn sign_transaction_with_hardware(&self, address: &Address, transaction: &Transaction, chain_id: Option, rlp_encoded_transaction: &[u8]) -> Result { - let t_info = TransactionInfo { - nonce: transaction.nonce, - gas_price: transaction.gas_price, - gas_limit: transaction.gas, - to: match transaction.action { - Action::Create => None, - Action::Call(ref to) => Some(to.clone()), - }, - value: transaction.value, - data: transaction.data.to_vec(), - chain_id: chain_id, - }; - match self.hardware_store.as_ref().map(|s| s.sign_transaction(&address, &t_info, rlp_encoded_transaction)) { - None | Some(Err(HardwareError::KeyNotFound)) => Err(SignError::NotFound), - Some(Err(e)) => Err(From::from(e)), - Some(Ok(s)) => Ok(s), - } - } + /// Sign transaction with hardware wallet. + pub fn sign_transaction_with_hardware( + &self, + address: &Address, + transaction: &Transaction, + chain_id: Option, + rlp_encoded_transaction: &[u8], + ) -> Result { + let t_info = TransactionInfo { + nonce: transaction.nonce, + gas_price: transaction.gas_price, + gas_limit: transaction.gas, + to: match transaction.action { + Action::Create => None, + Action::Call(ref to) => Some(to.clone()), + }, + value: transaction.value, + data: transaction.data.to_vec(), + chain_id: chain_id, + }; + match self + .hardware_store + .as_ref() + .map(|s| s.sign_transaction(&address, &t_info, rlp_encoded_transaction)) + { + None | Some(Err(HardwareError::KeyNotFound)) => Err(SignError::NotFound), + Some(Err(e)) => Err(From::from(e)), + Some(Ok(s)) => Ok(s), + } + } } #[cfg(test)] mod tests { - use super::{AccountProvider, Unlock}; - use std::time::{Duration, Instant}; - use ethkey::{Generator, Random, Address}; - use ethstore::{StoreAccountRef, Derivation}; - use ethereum_types::H256; + use super::{AccountProvider, Unlock}; + use ethereum_types::H256; + use ethkey::{Address, Generator, Random}; + use ethstore::{Derivation, StoreAccountRef}; + use std::time::{Duration, Instant}; - #[test] - fn unlock_account_temp() { - let kp = Random.generate().unwrap(); - let ap = AccountProvider::transient_provider(); - assert!(ap.insert_account(kp.secret().clone(), &"test".into()).is_ok()); - assert!(ap.unlock_account_temporarily(kp.address(), "test1".into()).is_err()); - assert!(ap.unlock_account_temporarily(kp.address(), "test".into()).is_ok()); - assert!(ap.sign(kp.address(), None, Default::default()).is_ok()); - assert!(ap.sign(kp.address(), None, Default::default()).is_err()); - } + #[test] + fn unlock_account_temp() { + let kp = Random.generate().unwrap(); + let ap = AccountProvider::transient_provider(); + assert!(ap + .insert_account(kp.secret().clone(), &"test".into()) + .is_ok()); + assert!(ap + .unlock_account_temporarily(kp.address(), "test1".into()) + .is_err()); + assert!(ap + .unlock_account_temporarily(kp.address(), "test".into()) + .is_ok()); + assert!(ap.sign(kp.address(), None, Default::default()).is_ok()); + assert!(ap.sign(kp.address(), None, Default::default()).is_err()); + } - #[test] - fn derived_account_nosave() { - let kp = Random.generate().unwrap(); - let ap = AccountProvider::transient_provider(); - assert!(ap.insert_account(kp.secret().clone(), &"base".into()).is_ok()); - assert!(ap.unlock_account_permanently(kp.address(), "base".into()).is_ok()); + #[test] + fn derived_account_nosave() { + let kp = Random.generate().unwrap(); + let ap = AccountProvider::transient_provider(); + assert!(ap + .insert_account(kp.secret().clone(), &"base".into()) + .is_ok()); + assert!(ap + .unlock_account_permanently(kp.address(), "base".into()) + .is_ok()); - let derived_addr = ap.derive_account( - &kp.address(), - None, - Derivation::SoftHash(H256::from(999)), - false, - ).expect("Derivation should not fail"); + let derived_addr = ap + .derive_account( + &kp.address(), + None, + Derivation::SoftHash(H256::from(999)), + false, + ) + .expect("Derivation should not fail"); - assert!(ap.unlock_account_permanently(derived_addr, "base".into()).is_err(), - "There should be an error because account is not supposed to be saved"); - } + assert!( + ap.unlock_account_permanently(derived_addr, "base".into()) + .is_err(), + "There should be an error because account is not supposed to be saved" + ); + } - #[test] - fn derived_account_save() { - let kp = Random.generate().unwrap(); - let ap = AccountProvider::transient_provider(); - assert!(ap.insert_account(kp.secret().clone(), &"base".into()).is_ok()); - assert!(ap.unlock_account_permanently(kp.address(), "base".into()).is_ok()); + #[test] + fn derived_account_save() { + let kp = Random.generate().unwrap(); + let ap = AccountProvider::transient_provider(); + assert!(ap + .insert_account(kp.secret().clone(), &"base".into()) + .is_ok()); + assert!(ap + .unlock_account_permanently(kp.address(), "base".into()) + .is_ok()); - let derived_addr = ap.derive_account( - &kp.address(), - None, - Derivation::SoftHash(H256::from(999)), - true, - ).expect("Derivation should not fail"); + let derived_addr = ap + .derive_account( + &kp.address(), + None, + Derivation::SoftHash(H256::from(999)), + true, + ) + .expect("Derivation should not fail"); - assert!(ap.unlock_account_permanently(derived_addr, "base_wrong".into()).is_err(), - "There should be an error because password is invalid"); + assert!( + ap.unlock_account_permanently(derived_addr, "base_wrong".into()) + .is_err(), + "There should be an error because password is invalid" + ); - assert!(ap.unlock_account_permanently(derived_addr, "base".into()).is_ok(), - "Should be ok because account is saved and password is valid"); - } + assert!( + ap.unlock_account_permanently(derived_addr, "base".into()) + .is_ok(), + "Should be ok because account is saved and password is valid" + ); + } - #[test] - fn derived_account_sign() { - let kp = Random.generate().unwrap(); - let ap = AccountProvider::transient_provider(); - assert!(ap.insert_account(kp.secret().clone(), &"base".into()).is_ok()); - assert!(ap.unlock_account_permanently(kp.address(), "base".into()).is_ok()); + #[test] + fn derived_account_sign() { + let kp = Random.generate().unwrap(); + let ap = AccountProvider::transient_provider(); + assert!(ap + .insert_account(kp.secret().clone(), &"base".into()) + .is_ok()); + assert!(ap + .unlock_account_permanently(kp.address(), "base".into()) + .is_ok()); - let derived_addr = ap.derive_account( - &kp.address(), - None, - Derivation::SoftHash(H256::from(1999)), - true, - ).expect("Derivation should not fail"); - ap.unlock_account_permanently(derived_addr, "base".into()) - .expect("Should be ok because account is saved and password is valid"); + let derived_addr = ap + .derive_account( + &kp.address(), + None, + Derivation::SoftHash(H256::from(1999)), + true, + ) + .expect("Derivation should not fail"); + ap.unlock_account_permanently(derived_addr, "base".into()) + .expect("Should be ok because account is saved and password is valid"); - let msg = Default::default(); - let signed_msg1 = ap.sign(derived_addr, None, msg) - .expect("Signing with existing unlocked account should not fail"); - let signed_msg2 = ap.sign_derived( - &kp.address(), - None, - Derivation::SoftHash(H256::from(1999)), - msg, - ).expect("Derived signing with existing unlocked account should not fail"); + let msg = Default::default(); + let signed_msg1 = ap + .sign(derived_addr, None, msg) + .expect("Signing with existing unlocked account should not fail"); + let signed_msg2 = ap + .sign_derived( + &kp.address(), + None, + Derivation::SoftHash(H256::from(1999)), + msg, + ) + .expect("Derived signing with existing unlocked account should not fail"); - assert_eq!(signed_msg1, signed_msg2, - "Signed messages should match"); - } + assert_eq!(signed_msg1, signed_msg2, "Signed messages should match"); + } - #[test] - fn unlock_account_perm() { - let kp = Random.generate().unwrap(); - let ap = AccountProvider::transient_provider(); - assert!(ap.insert_account(kp.secret().clone(), &"test".into()).is_ok()); - assert!(ap.unlock_account_permanently(kp.address(), "test1".into()).is_err()); - assert!(ap.unlock_account_permanently(kp.address(), "test".into()).is_ok()); - assert!(ap.sign(kp.address(), None, Default::default()).is_ok()); - assert!(ap.sign(kp.address(), None, Default::default()).is_ok()); - assert!(ap.unlock_account_temporarily(kp.address(), "test".into()).is_ok()); - assert!(ap.sign(kp.address(), None, Default::default()).is_ok()); - assert!(ap.sign(kp.address(), None, Default::default()).is_ok()); - } + #[test] + fn unlock_account_perm() { + let kp = Random.generate().unwrap(); + let ap = AccountProvider::transient_provider(); + assert!(ap + .insert_account(kp.secret().clone(), &"test".into()) + .is_ok()); + assert!(ap + .unlock_account_permanently(kp.address(), "test1".into()) + .is_err()); + assert!(ap + .unlock_account_permanently(kp.address(), "test".into()) + .is_ok()); + assert!(ap.sign(kp.address(), None, Default::default()).is_ok()); + assert!(ap.sign(kp.address(), None, Default::default()).is_ok()); + assert!(ap + .unlock_account_temporarily(kp.address(), "test".into()) + .is_ok()); + assert!(ap.sign(kp.address(), None, Default::default()).is_ok()); + assert!(ap.sign(kp.address(), None, Default::default()).is_ok()); + } - #[test] - fn unlock_account_timer() { - let kp = Random.generate().unwrap(); - let ap = AccountProvider::transient_provider(); - assert!(ap.insert_account(kp.secret().clone(), &"test".into()).is_ok()); - assert!(ap.unlock_account_timed(kp.address(), "test1".into(), Duration::from_secs(60)).is_err()); - assert!(ap.unlock_account_timed(kp.address(), "test".into(), Duration::from_secs(60)).is_ok()); - assert!(ap.sign(kp.address(), None, Default::default()).is_ok()); - ap.unlocked.write().get_mut(&StoreAccountRef::root(kp.address())).unwrap().unlock = Unlock::Timed(Instant::now()); - assert!(ap.sign(kp.address(), None, Default::default()).is_err()); - } + #[test] + fn unlock_account_timer() { + let kp = Random.generate().unwrap(); + let ap = AccountProvider::transient_provider(); + assert!(ap + .insert_account(kp.secret().clone(), &"test".into()) + .is_ok()); + assert!(ap + .unlock_account_timed(kp.address(), "test1".into(), Duration::from_secs(60)) + .is_err()); + assert!(ap + .unlock_account_timed(kp.address(), "test".into(), Duration::from_secs(60)) + .is_ok()); + assert!(ap.sign(kp.address(), None, Default::default()).is_ok()); + ap.unlocked + .write() + .get_mut(&StoreAccountRef::root(kp.address())) + .unwrap() + .unlock = Unlock::Timed(Instant::now()); + assert!(ap.sign(kp.address(), None, Default::default()).is_err()); + } - #[test] - fn should_sign_and_return_token() { - // given - let kp = Random.generate().unwrap(); - let ap = AccountProvider::transient_provider(); - assert!(ap.insert_account(kp.secret().clone(), &"test".into()).is_ok()); + #[test] + fn should_sign_and_return_token() { + // given + let kp = Random.generate().unwrap(); + let ap = AccountProvider::transient_provider(); + assert!(ap + .insert_account(kp.secret().clone(), &"test".into()) + .is_ok()); - // when - let (_signature, token) = ap.sign_with_token(kp.address(), "test".into(), Default::default()).unwrap(); + // when + let (_signature, token) = ap + .sign_with_token(kp.address(), "test".into(), Default::default()) + .unwrap(); - // then - ap.sign_with_token(kp.address(), token.clone(), Default::default()) - .expect("First usage of token should be correct."); - assert!(ap.sign_with_token(kp.address(), token, Default::default()).is_err(), "Second usage of the same token should fail."); - } + // then + ap.sign_with_token(kp.address(), token.clone(), Default::default()) + .expect("First usage of token should be correct."); + assert!( + ap.sign_with_token(kp.address(), token, Default::default()) + .is_err(), + "Second usage of the same token should fail." + ); + } - #[test] - fn should_not_return_blacklisted_account() { - // given - let mut ap = AccountProvider::transient_provider(); - let acc = ap.new_account(&"test".into()).unwrap(); - ap.blacklisted_accounts = vec![acc]; + #[test] + fn should_not_return_blacklisted_account() { + // given + let mut ap = AccountProvider::transient_provider(); + let acc = ap.new_account(&"test".into()).unwrap(); + ap.blacklisted_accounts = vec![acc]; - // then - assert_eq!(ap.accounts_info().unwrap().keys().cloned().collect::>(), vec![]); - assert_eq!(ap.accounts().unwrap(), vec![]); - } + // then + assert_eq!( + ap.accounts_info() + .unwrap() + .keys() + .cloned() + .collect::>(), + vec![] + ); + assert_eq!(ap.accounts().unwrap(), vec![]); + } } diff --git a/accounts/src/stores.rs b/accounts/src/stores.rs index baa26cc48..b2d4c8b9b 100644 --- a/accounts/src/stores.rs +++ b/accounts/src/stores.rs @@ -16,9 +16,11 @@ //! Address Book Store -use std::{fs, fmt, hash, ops}; -use std::collections::HashMap; -use std::path::{Path, PathBuf}; +use std::{ + collections::HashMap, + fmt, fs, hash, ops, + path::{Path, PathBuf}, +}; use ethkey::Address; use log::{trace, warn}; @@ -27,163 +29,209 @@ use crate::AccountMeta; /// Disk-backed map from Address to String. Uses JSON. pub struct AddressBook { - cache: DiskMap, + cache: DiskMap, } impl AddressBook { - /// Creates new address book at given directory. - pub fn new(path: &Path) -> Self { - let mut r = AddressBook { - cache: DiskMap::new(path, "address_book.json") - }; - r.cache.revert(AccountMeta::read); - r - } + /// Creates new address book at given directory. + pub fn new(path: &Path) -> Self { + let mut r = AddressBook { + cache: DiskMap::new(path, "address_book.json"), + }; + r.cache.revert(AccountMeta::read); + r + } - /// Creates transient address book (no changes are saved to disk). - pub fn transient() -> Self { - AddressBook { - cache: DiskMap::transient() - } - } + /// Creates transient address book (no changes are saved to disk). + pub fn transient() -> Self { + AddressBook { + cache: DiskMap::transient(), + } + } - /// Get the address book. - pub fn get(&self) -> HashMap { - self.cache.clone() - } + /// Get the address book. + pub fn get(&self) -> HashMap { + self.cache.clone() + } - fn save(&self) { - self.cache.save(AccountMeta::write) - } + fn save(&self) { + self.cache.save(AccountMeta::write) + } - /// Sets new name for given address. - pub fn set_name(&mut self, a: Address, name: String) { - { - let x = self.cache.entry(a) - .or_insert_with(|| AccountMeta {name: Default::default(), meta: "{}".to_owned(), uuid: None}); - x.name = name; - } - self.save(); - } + /// Sets new name for given address. + pub fn set_name(&mut self, a: Address, name: String) { + { + let x = self.cache.entry(a).or_insert_with(|| AccountMeta { + name: Default::default(), + meta: "{}".to_owned(), + uuid: None, + }); + x.name = name; + } + self.save(); + } - /// Sets new meta for given address. - pub fn set_meta(&mut self, a: Address, meta: String) { - { - let x = self.cache.entry(a) - .or_insert_with(|| AccountMeta {name: "Anonymous".to_owned(), meta: Default::default(), uuid: None}); - x.meta = meta; - } - self.save(); - } + /// Sets new meta for given address. + pub fn set_meta(&mut self, a: Address, meta: String) { + { + let x = self.cache.entry(a).or_insert_with(|| AccountMeta { + name: "Anonymous".to_owned(), + meta: Default::default(), + uuid: None, + }); + x.meta = meta; + } + self.save(); + } - /// Removes an entry - pub fn remove(&mut self, a: Address) { - self.cache.remove(&a); - self.save(); - } + /// Removes an entry + pub fn remove(&mut self, a: Address) { + self.cache.remove(&a); + self.save(); + } } /// Disk-serializable HashMap #[derive(Debug)] struct DiskMap { - path: PathBuf, - cache: HashMap, - transient: bool, + path: PathBuf, + cache: HashMap, + transient: bool, } impl ops::Deref for DiskMap { - type Target = HashMap; - fn deref(&self) -> &Self::Target { - &self.cache - } + type Target = HashMap; + fn deref(&self) -> &Self::Target { + &self.cache + } } impl ops::DerefMut for DiskMap { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.cache - } + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.cache + } } impl DiskMap { - pub fn new(path: &Path, file_name: &str) -> Self { - let mut path = path.to_owned(); - path.push(file_name); - trace!(target: "diskmap", "path={:?}", path); - DiskMap { - path: path, - cache: HashMap::new(), - transient: false, - } - } + pub fn new(path: &Path, file_name: &str) -> Self { + let mut path = path.to_owned(); + path.push(file_name); + trace!(target: "diskmap", "path={:?}", path); + DiskMap { + path: path, + cache: HashMap::new(), + transient: false, + } + } - pub fn transient() -> Self { - let mut map = DiskMap::new(&PathBuf::new(), "diskmap.json".into()); - map.transient = true; - map - } + pub fn transient() -> Self { + let mut map = DiskMap::new(&PathBuf::new(), "diskmap.json".into()); + map.transient = true; + map + } - fn revert(&mut self, read: F) where - F: Fn(fs::File) -> Result, E>, - E: fmt::Display, - { - if self.transient { return; } - trace!(target: "diskmap", "revert {:?}", self.path); - let _ = fs::File::open(self.path.clone()) - .map_err(|e| trace!(target: "diskmap", "Couldn't open disk map: {}", e)) - .and_then(|f| read(f).map_err(|e| warn!(target: "diskmap", "Couldn't read disk map: {}", e))) - .and_then(|m| { - self.cache = m; - Ok(()) - }); - } + fn revert(&mut self, read: F) + where + F: Fn(fs::File) -> Result, E>, + E: fmt::Display, + { + if self.transient { + return; + } + trace!(target: "diskmap", "revert {:?}", self.path); + let _ = fs::File::open(self.path.clone()) + .map_err(|e| trace!(target: "diskmap", "Couldn't open disk map: {}", e)) + .and_then(|f| { + read(f).map_err(|e| warn!(target: "diskmap", "Couldn't read disk map: {}", e)) + }) + .and_then(|m| { + self.cache = m; + Ok(()) + }); + } - fn save(&self, write: F) where - F: Fn(&HashMap, &mut fs::File) -> Result<(), E>, - E: fmt::Display, - { - if self.transient { return; } - trace!(target: "diskmap", "save {:?}", self.path); - let _ = fs::File::create(self.path.clone()) - .map_err(|e| warn!(target: "diskmap", "Couldn't open disk map for writing: {}", e)) - .and_then(|mut f| { - write(&self.cache, &mut f).map_err(|e| warn!(target: "diskmap", "Couldn't write to disk map: {}", e)) - }); - } + fn save(&self, write: F) + where + F: Fn(&HashMap, &mut fs::File) -> Result<(), E>, + E: fmt::Display, + { + if self.transient { + return; + } + trace!(target: "diskmap", "save {:?}", self.path); + let _ = fs::File::create(self.path.clone()) + .map_err(|e| warn!(target: "diskmap", "Couldn't open disk map for writing: {}", e)) + .and_then(|mut f| { + write(&self.cache, &mut f) + .map_err(|e| warn!(target: "diskmap", "Couldn't write to disk map: {}", e)) + }); + } } #[cfg(test)] mod tests { - use super::AddressBook; - use std::collections::HashMap; - use tempdir::TempDir; - use crate::account_data::AccountMeta; + use super::AddressBook; + use crate::account_data::AccountMeta; + use std::collections::HashMap; + use tempdir::TempDir; - #[test] - fn should_save_and_reload_address_book() { - let tempdir = TempDir::new("").unwrap(); - let mut b = AddressBook::new(tempdir.path()); - b.set_name(1.into(), "One".to_owned()); - b.set_meta(1.into(), "{1:1}".to_owned()); - let b = AddressBook::new(tempdir.path()); - assert_eq!(b.get(), vec![ - (1, AccountMeta {name: "One".to_owned(), meta: "{1:1}".to_owned(), uuid: None}) - ].into_iter().map(|(a, b)| (a.into(), b)).collect::>()); - } + #[test] + fn should_save_and_reload_address_book() { + let tempdir = TempDir::new("").unwrap(); + let mut b = AddressBook::new(tempdir.path()); + b.set_name(1.into(), "One".to_owned()); + b.set_meta(1.into(), "{1:1}".to_owned()); + let b = AddressBook::new(tempdir.path()); + assert_eq!( + b.get(), + vec![( + 1, + AccountMeta { + name: "One".to_owned(), + meta: "{1:1}".to_owned(), + uuid: None + } + )] + .into_iter() + .map(|(a, b)| (a.into(), b)) + .collect::>() + ); + } - #[test] - fn should_remove_address() { - let tempdir = TempDir::new("").unwrap(); - let mut b = AddressBook::new(tempdir.path()); + #[test] + fn should_remove_address() { + let tempdir = TempDir::new("").unwrap(); + let mut b = AddressBook::new(tempdir.path()); - b.set_name(1.into(), "One".to_owned()); - b.set_name(2.into(), "Two".to_owned()); - b.set_name(3.into(), "Three".to_owned()); - b.remove(2.into()); + b.set_name(1.into(), "One".to_owned()); + b.set_name(2.into(), "Two".to_owned()); + b.set_name(3.into(), "Three".to_owned()); + b.remove(2.into()); - let b = AddressBook::new(tempdir.path()); - assert_eq!(b.get(), vec![ - (1, AccountMeta{name: "One".to_owned(), meta: "{}".to_owned(), uuid: None}), - (3, AccountMeta{name: "Three".to_owned(), meta: "{}".to_owned(), uuid: None}), - ].into_iter().map(|(a, b)| (a.into(), b)).collect::>()); - } + let b = AddressBook::new(tempdir.path()); + assert_eq!( + b.get(), + vec![ + ( + 1, + AccountMeta { + name: "One".to_owned(), + meta: "{}".to_owned(), + uuid: None + } + ), + ( + 3, + AccountMeta { + name: "Three".to_owned(), + meta: "{}".to_owned(), + uuid: None + } + ), + ] + .into_iter() + .map(|(a, b)| (a.into(), b)) + .collect::>() + ); + } } diff --git a/chainspec/src/main.rs b/chainspec/src/main.rs index 45490fe7f..c18d888e9 100644 --- a/chainspec/src/main.rs +++ b/chainspec/src/main.rs @@ -14,36 +14,38 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -extern crate serde_json; extern crate ethjson; +extern crate serde_json; -use std::{fs, env, process}; use ethjson::spec::Spec; +use std::{env, fs, process}; fn quit(s: &str) -> ! { - println!("{}", s); - process::exit(1); + println!("{}", s); + process::exit(1); } fn main() { - let mut args = env::args(); - if args.len() != 2 { - quit("You need to specify chainspec.json\n\ + let mut args = env::args(); + if args.len() != 2 { + quit( + "You need to specify chainspec.json\n\ \n\ - ./chainspec "); - } + ./chainspec ", + ); + } - let path = args.nth(1).expect("args.len() == 2; qed"); - let file = match fs::File::open(&path) { - Ok(file) => file, - Err(_) => quit(&format!("{} could not be opened", path)), - }; + let path = args.nth(1).expect("args.len() == 2; qed"); + let file = match fs::File::open(&path) { + Ok(file) => file, + Err(_) => quit(&format!("{} could not be opened", path)), + }; - let spec: Result = serde_json::from_reader(file); + let spec: Result = serde_json::from_reader(file); - if let Err(err) = spec { - quit(&format!("{} {}", path, err.to_string())); - } + if let Err(err) = spec { + quit(&format!("{} {}", path, err.to_string())); + } - println!("{} is valid", path); + println!("{} is valid", path); } diff --git a/cli-signer/rpc-client/src/client.rs b/cli-signer/rpc-client/src/client.rs index a9ca5e68a..94e2371ae 100644 --- a/cli-signer/rpc-client/src/client.rs +++ b/cli-signer/rpc-client/src/client.rs @@ -14,334 +14,314 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::fmt::{Debug, Formatter, Error as FmtError}; -use std::io::{BufReader, BufRead}; -use std::sync::Arc; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::collections::BTreeMap; -use std::thread; -use std::time; +use std::{ + collections::BTreeMap, + fmt::{Debug, Error as FmtError, Formatter}, + io::{BufRead, BufReader}, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + thread, time, +}; -use std::path::PathBuf; use hash::keccak; use parking_lot::Mutex; +use std::{fs::File, path::PathBuf}; use url::Url; -use std::fs::File; use ws::ws::{ - self, - Request, - Handler, - Sender, - Handshake, - Error as WsError, - ErrorKind as WsErrorKind, - Message, - Result as WsResult, + self, Error as WsError, ErrorKind as WsErrorKind, Handler, Handshake, Message, Request, + Result as WsResult, Sender, }; use serde::de::DeserializeOwned; -use serde_json::{ - self as json, - Value as JsonValue, - Error as JsonError, +use serde_json::{self as json, Error as JsonError, Value as JsonValue}; + +use futures::{done, oneshot, Canceled, Complete, Future}; + +use jsonrpc_core::{ + request::MethodCall, + response::{Failure, Output, Success}, + Error as JsonRpcError, Id, Params, Version, }; -use futures::{Canceled, Complete, Future, oneshot, done}; - -use jsonrpc_core::{Id, Version, Params, Error as JsonRpcError}; -use jsonrpc_core::request::MethodCall; -use jsonrpc_core::response::{Output, Success, Failure}; - use BoxFuture; /// The actual websocket connection handler, passed into the /// event loop of ws-rs struct RpcHandler { - pending: Pending, - // Option is used here as temporary storage until connection - // is setup and the values are moved into the new `Rpc` - complete: Option>>, - auth_code: String, - out: Option, + pending: Pending, + // Option is used here as temporary storage until connection + // is setup and the values are moved into the new `Rpc` + complete: Option>>, + auth_code: String, + out: Option, } impl RpcHandler { - fn new( - out: Sender, - auth_code: String, - complete: Complete> - ) -> Self { - RpcHandler { - out: Some(out), - auth_code: auth_code, - pending: Pending::new(), - complete: Some(complete), - } - } + fn new(out: Sender, auth_code: String, complete: Complete>) -> Self { + RpcHandler { + out: Some(out), + auth_code: auth_code, + pending: Pending::new(), + complete: Some(complete), + } + } } impl Handler for RpcHandler { - fn build_request(&mut self, url: &Url) -> WsResult { - match Request::from_url(url) { - Ok(mut r) => { - let timestamp = time::UNIX_EPOCH.elapsed().map_err(|err| { - WsError::new(WsErrorKind::Internal, format!("{}", err)) - })?; - let secs = timestamp.as_secs(); - let hashed = keccak(format!("{}:{}", self.auth_code, secs)); - let proto = format!("{:x}_{}", hashed, secs); - r.add_protocol(&proto); - Ok(r) - }, - Err(e) => - Err(WsError::new(WsErrorKind::Internal, format!("{}", e))), - } - } - fn on_error(&mut self, err: WsError) { - match self.complete.take() { - Some(c) => match c.send(Err(RpcError::WsError(err))) { - Ok(_) => {}, - Err(_) => warn!(target: "rpc-client", "Unable to notify about error."), - }, - None => warn!(target: "rpc-client", "unexpected error: {}", err), - } - } - fn on_open(&mut self, _: Handshake) -> WsResult<()> { - match (self.complete.take(), self.out.take()) { - (Some(c), Some(out)) => { - let res = c.send(Ok(Rpc { - out: out, - counter: AtomicUsize::new(0), - pending: self.pending.clone(), - })); - if let Err(_) = res { - warn!(target: "rpc-client", "Unable to open a connection.") - } - Ok(()) - }, - _ => { - let msg = format!("on_open called twice"); - Err(WsError::new(WsErrorKind::Internal, msg)) - } - } - } - fn on_message(&mut self, msg: Message) -> WsResult<()> { - let ret: Result; - let response_id; - let string = &msg.to_string(); - match json::from_str::(&string) { - Ok(Output::Success(Success { result, id: Id::Num(id), .. })) => - { - ret = Ok(result); - response_id = id as usize; - } - Ok(Output::Failure(Failure { error, id: Id::Num(id), .. })) => { - ret = Err(error); - response_id = id as usize; - } - Err(e) => { - warn!( - target: "rpc-client", - "recieved invalid message: {}\n {:?}", - string, - e - ); - return Ok(()) - }, - _ => { - warn!( - target: "rpc-client", - "recieved invalid message: {}", - string - ); - return Ok(()) - } - } + fn build_request(&mut self, url: &Url) -> WsResult { + match Request::from_url(url) { + Ok(mut r) => { + let timestamp = time::UNIX_EPOCH + .elapsed() + .map_err(|err| WsError::new(WsErrorKind::Internal, format!("{}", err)))?; + let secs = timestamp.as_secs(); + let hashed = keccak(format!("{}:{}", self.auth_code, secs)); + let proto = format!("{:x}_{}", hashed, secs); + r.add_protocol(&proto); + Ok(r) + } + Err(e) => Err(WsError::new(WsErrorKind::Internal, format!("{}", e))), + } + } + fn on_error(&mut self, err: WsError) { + match self.complete.take() { + Some(c) => match c.send(Err(RpcError::WsError(err))) { + Ok(_) => {} + Err(_) => warn!(target: "rpc-client", "Unable to notify about error."), + }, + None => warn!(target: "rpc-client", "unexpected error: {}", err), + } + } + fn on_open(&mut self, _: Handshake) -> WsResult<()> { + match (self.complete.take(), self.out.take()) { + (Some(c), Some(out)) => { + let res = c.send(Ok(Rpc { + out: out, + counter: AtomicUsize::new(0), + pending: self.pending.clone(), + })); + if let Err(_) = res { + warn!(target: "rpc-client", "Unable to open a connection.") + } + Ok(()) + } + _ => { + let msg = format!("on_open called twice"); + Err(WsError::new(WsErrorKind::Internal, msg)) + } + } + } + fn on_message(&mut self, msg: Message) -> WsResult<()> { + let ret: Result; + let response_id; + let string = &msg.to_string(); + match json::from_str::(&string) { + Ok(Output::Success(Success { + result, + id: Id::Num(id), + .. + })) => { + ret = Ok(result); + response_id = id as usize; + } + Ok(Output::Failure(Failure { + error, + id: Id::Num(id), + .. + })) => { + ret = Err(error); + response_id = id as usize; + } + Err(e) => { + warn!( + target: "rpc-client", + "recieved invalid message: {}\n {:?}", + string, + e + ); + return Ok(()); + } + _ => { + warn!( + target: "rpc-client", + "recieved invalid message: {}", + string + ); + return Ok(()); + } + } - match self.pending.remove(response_id) { - Some(c) => if let Err(_) = c.send(ret.map_err(|err| RpcError::JsonRpc(err))) { - warn!(target: "rpc-client", "Unable to send response.") - }, - None => warn!( - target: "rpc-client", - "warning: unexpected id: {}", - response_id - ), - } - Ok(()) - } + match self.pending.remove(response_id) { + Some(c) => { + if let Err(_) = c.send(ret.map_err(|err| RpcError::JsonRpc(err))) { + warn!(target: "rpc-client", "Unable to send response.") + } + } + None => warn!( + target: "rpc-client", + "warning: unexpected id: {}", + response_id + ), + } + Ok(()) + } } /// Keeping track of issued requests to be matched up with responses #[derive(Clone)] -struct Pending( - Arc>>>> -); +struct Pending(Arc>>>>); impl Pending { - fn new() -> Self { - Pending(Arc::new(Mutex::new(BTreeMap::new()))) - } - fn insert(&mut self, k: usize, v: Complete>) { - self.0.lock().insert(k, v); - } - fn remove( - &mut self, - k: usize - ) -> Option>> { - self.0.lock().remove(&k) - } + fn new() -> Self { + Pending(Arc::new(Mutex::new(BTreeMap::new()))) + } + fn insert(&mut self, k: usize, v: Complete>) { + self.0.lock().insert(k, v); + } + fn remove(&mut self, k: usize) -> Option>> { + self.0.lock().remove(&k) + } } fn get_authcode(path: &PathBuf) -> Result { - if let Ok(fd) = File::open(path) { - if let Some(Ok(line)) = BufReader::new(fd).lines().next() { - let mut parts = line.split(';'); - let token = parts.next(); + if let Ok(fd) = File::open(path) { + if let Some(Ok(line)) = BufReader::new(fd).lines().next() { + let mut parts = line.split(';'); + let token = parts.next(); - if let Some(code) = token { - return Ok(code.into()); - } - } - } - Err(RpcError::NoAuthCode) + if let Some(code) = token { + return Ok(code.into()); + } + } + } + Err(RpcError::NoAuthCode) } /// The handle to the connection pub struct Rpc { - out: Sender, - counter: AtomicUsize, - pending: Pending, + out: Sender, + counter: AtomicUsize, + pending: Pending, } impl Rpc { - /// Blocking, returns a new initialized connection or RpcError - pub fn new(url: &str, authpath: &PathBuf) -> Result { - let rpc = Self::connect(url, authpath).map(|rpc| rpc).wait()?; - rpc - } + /// Blocking, returns a new initialized connection or RpcError + pub fn new(url: &str, authpath: &PathBuf) -> Result { + let rpc = Self::connect(url, authpath).map(|rpc| rpc).wait()?; + rpc + } - /// Non-blocking, returns a future - pub fn connect( - url: &str, authpath: &PathBuf - ) -> BoxFuture, Canceled> { - let (c, p) = oneshot::>(); - match get_authcode(authpath) { - Err(e) => return Box::new(done(Ok(Err(e)))), - Ok(code) => { - let url = String::from(url); - // The ws::connect takes a FnMut closure, which means c cannot - // be moved into it, since it's consumed on complete. - // Therefore we wrap it in an option and pick it out once. - let mut once = Some(c); - thread::spawn(move || { - let conn = ws::connect(url, |out| { - // this will panic if the closure is called twice, - // which it should never be. - let c = once.take() - .expect("connection closure called only once"); - RpcHandler::new(out, code.clone(), c) - }); - match conn { - Err(err) => { - // since ws::connect is only called once, it cannot - // both fail and succeed. - let c = once.take() - .expect("connection closure called only once"); - let _ = c.send(Err(RpcError::WsError(err))); - }, - // c will complete on the `on_open` event in the Handler - _ => () - } - }); - Box::new(p) - } - } - } + /// Non-blocking, returns a future + pub fn connect(url: &str, authpath: &PathBuf) -> BoxFuture, Canceled> { + let (c, p) = oneshot::>(); + match get_authcode(authpath) { + Err(e) => return Box::new(done(Ok(Err(e)))), + Ok(code) => { + let url = String::from(url); + // The ws::connect takes a FnMut closure, which means c cannot + // be moved into it, since it's consumed on complete. + // Therefore we wrap it in an option and pick it out once. + let mut once = Some(c); + thread::spawn(move || { + let conn = ws::connect(url, |out| { + // this will panic if the closure is called twice, + // which it should never be. + let c = once.take().expect("connection closure called only once"); + RpcHandler::new(out, code.clone(), c) + }); + match conn { + Err(err) => { + // since ws::connect is only called once, it cannot + // both fail and succeed. + let c = once.take().expect("connection closure called only once"); + let _ = c.send(Err(RpcError::WsError(err))); + } + // c will complete on the `on_open` event in the Handler + _ => (), + } + }); + Box::new(p) + } + } + } - /// Non-blocking, returns a future of the request response - pub fn request( - &mut self, method: &'static str, params: Vec - ) -> BoxFuture, Canceled> - where T: DeserializeOwned + Send + Sized { + /// Non-blocking, returns a future of the request response + pub fn request( + &mut self, + method: &'static str, + params: Vec, + ) -> BoxFuture, Canceled> + where + T: DeserializeOwned + Send + Sized, + { + let (c, p) = oneshot::>(); - let (c, p) = oneshot::>(); + let id = self.counter.fetch_add(1, Ordering::Relaxed); + self.pending.insert(id, c); - let id = self.counter.fetch_add(1, Ordering::Relaxed); - self.pending.insert(id, c); + let request = MethodCall { + jsonrpc: Some(Version::V2), + method: method.to_owned(), + params: Params::Array(params), + id: Id::Num(id as u64), + }; - let request = MethodCall { - jsonrpc: Some(Version::V2), - method: method.to_owned(), - params: Params::Array(params), - id: Id::Num(id as u64), - }; + let serialized = json::to_string(&request).expect("request is serializable"); + let _ = self.out.send(serialized); - let serialized = json::to_string(&request) - .expect("request is serializable"); - let _ = self.out.send(serialized); - - Box::new(p.map(|result| { - match result { - Ok(json) => { - let t: T = json::from_value(json)?; - Ok(t) - }, - Err(err) => Err(err) - } - })) - } + Box::new(p.map(|result| match result { + Ok(json) => { + let t: T = json::from_value(json)?; + Ok(t) + } + Err(err) => Err(err), + })) + } } pub enum RpcError { - WrongVersion(String), - ParseError(JsonError), - MalformedResponse(String), - JsonRpc(JsonRpcError), - WsError(WsError), - Canceled(Canceled), - UnexpectedId, - NoAuthCode, + WrongVersion(String), + ParseError(JsonError), + MalformedResponse(String), + JsonRpc(JsonRpcError), + WsError(WsError), + Canceled(Canceled), + UnexpectedId, + NoAuthCode, } impl Debug for RpcError { - fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { - match *self { - RpcError::WrongVersion(ref s) - => write!(f, "Expected version 2.0, got {}", s), - RpcError::ParseError(ref err) - => write!(f, "ParseError: {}", err), - RpcError::MalformedResponse(ref s) - => write!(f, "Malformed response: {}", s), - RpcError::JsonRpc(ref json) - => write!(f, "JsonRpc error: {:?}", json), - RpcError::WsError(ref s) - => write!(f, "Websocket error: {}", s), - RpcError::Canceled(ref s) - => write!(f, "Futures error: {:?}", s), - RpcError::UnexpectedId - => write!(f, "Unexpected response id"), - RpcError::NoAuthCode - => write!(f, "No authcodes available"), - } - } + fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { + match *self { + RpcError::WrongVersion(ref s) => write!(f, "Expected version 2.0, got {}", s), + RpcError::ParseError(ref err) => write!(f, "ParseError: {}", err), + RpcError::MalformedResponse(ref s) => write!(f, "Malformed response: {}", s), + RpcError::JsonRpc(ref json) => write!(f, "JsonRpc error: {:?}", json), + RpcError::WsError(ref s) => write!(f, "Websocket error: {}", s), + RpcError::Canceled(ref s) => write!(f, "Futures error: {:?}", s), + RpcError::UnexpectedId => write!(f, "Unexpected response id"), + RpcError::NoAuthCode => write!(f, "No authcodes available"), + } + } } impl From for RpcError { - fn from(err: JsonError) -> RpcError { - RpcError::ParseError(err) - } + fn from(err: JsonError) -> RpcError { + RpcError::ParseError(err) + } } impl From for RpcError { - fn from(err: WsError) -> RpcError { - RpcError::WsError(err) - } + fn from(err: WsError) -> RpcError { + RpcError::WsError(err) + } } impl From for RpcError { - fn from(err: Canceled) -> RpcError { - RpcError::Canceled(err) - } + fn from(err: Canceled) -> RpcError { + RpcError::Canceled(err) + } } diff --git a/cli-signer/rpc-client/src/lib.rs b/cli-signer/rpc-client/src/lib.rs index d0e087e59..11fdbacc8 100644 --- a/cli-signer/rpc-client/src/lib.rs +++ b/cli-signer/rpc-client/src/lib.rs @@ -21,12 +21,12 @@ extern crate ethereum_types; extern crate futures; extern crate jsonrpc_core; extern crate jsonrpc_ws_server as ws; +extern crate keccak_hash as hash; extern crate parity_rpc as rpc; extern crate parking_lot; extern crate serde; extern crate serde_json; extern crate url; -extern crate keccak_hash as hash; #[macro_use] extern crate log; @@ -36,56 +36,55 @@ extern crate log; extern crate matches; /// Boxed future response. -pub type BoxFuture = Box + Send>; +pub type BoxFuture = Box + Send>; #[cfg(test)] mod tests { - use futures::Future; - use std::path::PathBuf; - use client::{Rpc, RpcError}; - use rpc; + use client::{Rpc, RpcError}; + use futures::Future; + use rpc; + use std::path::PathBuf; - #[test] - fn test_connection_refused() { - let (_srv, port, mut authcodes) = rpc::tests::ws::serve(); + #[test] + fn test_connection_refused() { + let (_srv, port, mut authcodes) = rpc::tests::ws::serve(); - let _ = authcodes.generate_new(); - authcodes.to_file(&authcodes.path).unwrap(); + let _ = authcodes.generate_new(); + authcodes.to_file(&authcodes.path).unwrap(); - let connect = Rpc::connect(&format!("ws://127.0.0.1:{}", port - 1), - &authcodes.path); + let connect = Rpc::connect(&format!("ws://127.0.0.1:{}", port - 1), &authcodes.path); - let _ = connect.map(|conn| { - assert!(matches!(&conn, &Err(RpcError::WsError(_)))); - }).wait(); - } + let _ = connect + .map(|conn| { + assert!(matches!(&conn, &Err(RpcError::WsError(_)))); + }) + .wait(); + } - #[test] - fn test_authcode_fail() { - let (_srv, port, _) = rpc::tests::ws::serve(); - let path = PathBuf::from("nonexist"); + #[test] + fn test_authcode_fail() { + let (_srv, port, _) = rpc::tests::ws::serve(); + let path = PathBuf::from("nonexist"); - let connect = Rpc::connect(&format!("ws://127.0.0.1:{}", port), &path); + let connect = Rpc::connect(&format!("ws://127.0.0.1:{}", port), &path); - let _ = connect.map(|conn| { - assert!(matches!(&conn, &Err(RpcError::NoAuthCode))); - }).wait(); - } + let _ = connect + .map(|conn| { + assert!(matches!(&conn, &Err(RpcError::NoAuthCode))); + }) + .wait(); + } - #[test] - fn test_authcode_correct() { - let (_srv, port, mut authcodes) = rpc::tests::ws::serve(); + #[test] + fn test_authcode_correct() { + let (_srv, port, mut authcodes) = rpc::tests::ws::serve(); - let _ = authcodes.generate_new(); - authcodes.to_file(&authcodes.path).unwrap(); + let _ = authcodes.generate_new(); + authcodes.to_file(&authcodes.path).unwrap(); - let connect = Rpc::connect(&format!("ws://127.0.0.1:{}", port), - &authcodes.path); - - let _ = connect.map(|conn| { - assert!(conn.is_ok()) - }).wait(); - } + let connect = Rpc::connect(&format!("ws://127.0.0.1:{}", port), &authcodes.path); + let _ = connect.map(|conn| assert!(conn.is_ok())).wait(); + } } diff --git a/cli-signer/rpc-client/src/signer_client.rs b/cli-signer/rpc-client/src/signer_client.rs index 997841936..f2f380eac 100644 --- a/cli-signer/rpc-client/src/signer_client.rs +++ b/cli-signer/rpc-client/src/signer_client.rs @@ -16,48 +16,61 @@ use client::{Rpc, RpcError}; use ethereum_types::U256; -use rpc::signer::{ConfirmationRequest, TransactionModification, TransactionCondition}; +use futures::Canceled; +use rpc::signer::{ConfirmationRequest, TransactionCondition, TransactionModification}; use serde; -use serde_json::{Value as JsonValue, to_value}; +use serde_json::{to_value, Value as JsonValue}; use std::path::PathBuf; -use futures::{Canceled}; -use {BoxFuture}; +use BoxFuture; pub struct SignerRpc { - rpc: Rpc, + rpc: Rpc, } impl SignerRpc { - pub fn new(url: &str, authfile: &PathBuf) -> Result { - Ok(SignerRpc { rpc: Rpc::new(&url, authfile)? }) - } + pub fn new(url: &str, authfile: &PathBuf) -> Result { + Ok(SignerRpc { + rpc: Rpc::new(&url, authfile)?, + }) + } - pub fn requests_to_confirm(&mut self) -> BoxFuture, RpcError>, Canceled> { - self.rpc.request("signer_requestsToConfirm", vec![]) - } + pub fn requests_to_confirm( + &mut self, + ) -> BoxFuture, RpcError>, Canceled> { + self.rpc.request("signer_requestsToConfirm", vec![]) + } - pub fn confirm_request( - &mut self, - id: U256, - new_gas: Option, - new_gas_price: Option, - new_condition: Option>, - pwd: &str - ) -> BoxFuture, Canceled> { - self.rpc.request("signer_confirmRequest", vec![ - Self::to_value(&format!("{:#x}", id)), - Self::to_value(&TransactionModification { sender: None, gas_price: new_gas_price, gas: new_gas, condition: new_condition }), - Self::to_value(&pwd), - ]) - } + pub fn confirm_request( + &mut self, + id: U256, + new_gas: Option, + new_gas_price: Option, + new_condition: Option>, + pwd: &str, + ) -> BoxFuture, Canceled> { + self.rpc.request( + "signer_confirmRequest", + vec![ + Self::to_value(&format!("{:#x}", id)), + Self::to_value(&TransactionModification { + sender: None, + gas_price: new_gas_price, + gas: new_gas, + condition: new_condition, + }), + Self::to_value(&pwd), + ], + ) + } - pub fn reject_request(&mut self, id: U256) -> BoxFuture, Canceled> { - self.rpc.request("signer_rejectRequest", vec![ - JsonValue::String(format!("{:#x}", id)) - ]) - } + pub fn reject_request(&mut self, id: U256) -> BoxFuture, Canceled> { + self.rpc.request( + "signer_rejectRequest", + vec![JsonValue::String(format!("{:#x}", id))], + ) + } - fn to_value(v: &T) -> JsonValue { - to_value(v).expect("Our types are always serializable; qed") - } + fn to_value(v: &T) -> JsonValue { + to_value(v).expect("Our types are always serializable; qed") + } } diff --git a/cli-signer/src/lib.rs b/cli-signer/src/lib.rs index 3ef6e7054..d96072911 100644 --- a/cli-signer/src/lib.rs +++ b/cli-signer/src/lib.rs @@ -21,177 +21,142 @@ extern crate rpassword; extern crate parity_rpc as rpc; extern crate parity_rpc_client as client; +use client::signer_client::SignerRpc; use ethereum_types::U256; use rpc::signer::ConfirmationRequest; -use client::signer_client::SignerRpc; -use std::io::{Write, BufRead, BufReader, stdout, stdin}; -use std::path::PathBuf; -use std::fs::File; +use std::{ + fs::File, + io::{stdin, stdout, BufRead, BufReader, Write}, + path::PathBuf, +}; use futures::Future; -fn sign_interactive( - signer: &mut SignerRpc, - password: &str, - request: ConfirmationRequest -) { - print!("\n{}\nSign this transaction? (y)es/(N)o/(r)eject: ", request); - let _ = stdout().flush(); - match BufReader::new(stdin()).lines().next() { - Some(Ok(line)) => { - match line.to_lowercase().chars().nth(0) { - Some('y') => { - match sign_transaction(signer, request.id, password) { - Ok(s) | Err(s) => println!("{}", s), - } - } - Some('r') => { - match reject_transaction(signer, request.id) { - Ok(s) | Err(s) => println!("{}", s), - } - } - _ => () - } - } - _ => println!("Could not read from stdin") - } +fn sign_interactive(signer: &mut SignerRpc, password: &str, request: ConfirmationRequest) { + print!( + "\n{}\nSign this transaction? (y)es/(N)o/(r)eject: ", + request + ); + let _ = stdout().flush(); + match BufReader::new(stdin()).lines().next() { + Some(Ok(line)) => match line.to_lowercase().chars().nth(0) { + Some('y') => match sign_transaction(signer, request.id, password) { + Ok(s) | Err(s) => println!("{}", s), + }, + Some('r') => match reject_transaction(signer, request.id) { + Ok(s) | Err(s) => println!("{}", s), + }, + _ => (), + }, + _ => println!("Could not read from stdin"), + } } -fn sign_transactions( - signer: &mut SignerRpc, - password: String -) -> Result { - signer.requests_to_confirm().map(|reqs| { - match reqs { - Ok(ref reqs) if reqs.is_empty() => { - Ok("No transactions in signing queue".to_owned()) - } - Ok(reqs) => { - for r in reqs { - sign_interactive(signer, &password, r) - } - Ok("".to_owned()) - } - Err(err) => { - Err(format!("error: {:?}", err)) - } - } - }).map_err(|err| { - format!("{:?}", err) - }).wait()? +fn sign_transactions(signer: &mut SignerRpc, password: String) -> Result { + signer + .requests_to_confirm() + .map(|reqs| match reqs { + Ok(ref reqs) if reqs.is_empty() => Ok("No transactions in signing queue".to_owned()), + Ok(reqs) => { + for r in reqs { + sign_interactive(signer, &password, r) + } + Ok("".to_owned()) + } + Err(err) => Err(format!("error: {:?}", err)), + }) + .map_err(|err| format!("{:?}", err)) + .wait()? } fn list_transactions(signer: &mut SignerRpc) -> Result { - signer.requests_to_confirm().map(|reqs| { - match reqs { - Ok(ref reqs) if reqs.is_empty() => { - Ok("No transactions in signing queue".to_owned()) - } - Ok(ref reqs) => { - Ok(format!("Transaction queue:\n{}", reqs - .iter() - .map(|r| format!("{}", r)) - .collect::>() - .join("\n"))) - } - Err(err) => { - Err(format!("error: {:?}", err)) - } - } - }).map_err(|err| { - format!("{:?}", err) - }).wait()? + signer + .requests_to_confirm() + .map(|reqs| match reqs { + Ok(ref reqs) if reqs.is_empty() => Ok("No transactions in signing queue".to_owned()), + Ok(ref reqs) => Ok(format!( + "Transaction queue:\n{}", + reqs.iter() + .map(|r| format!("{}", r)) + .collect::>() + .join("\n") + )), + Err(err) => Err(format!("error: {:?}", err)), + }) + .map_err(|err| format!("{:?}", err)) + .wait()? } -fn sign_transaction( - signer: &mut SignerRpc, id: U256, password: &str -) -> Result { - signer.confirm_request(id, None, None, None, password).map(|res| { - match res { - Ok(u) => Ok(format!("Signed transaction id: {:#x}", u)), - Err(e) => Err(format!("{:?}", e)), - } - }).map_err(|err| { - format!("{:?}", err) - }).wait()? +fn sign_transaction(signer: &mut SignerRpc, id: U256, password: &str) -> Result { + signer + .confirm_request(id, None, None, None, password) + .map(|res| match res { + Ok(u) => Ok(format!("Signed transaction id: {:#x}", u)), + Err(e) => Err(format!("{:?}", e)), + }) + .map_err(|err| format!("{:?}", err)) + .wait()? } -fn reject_transaction( - signer: &mut SignerRpc, id: U256) -> Result -{ - signer.reject_request(id).map(|res| { - match res { - Ok(true) => Ok(format!("Rejected transaction id {:#x}", id)), - Ok(false) => Err(format!("No such request")), - Err(e) => Err(format!("{:?}", e)), - } - }).map_err(|err| { - format!("{:?}", err) - }).wait()? +fn reject_transaction(signer: &mut SignerRpc, id: U256) -> Result { + signer + .reject_request(id) + .map(|res| match res { + Ok(true) => Ok(format!("Rejected transaction id {:#x}", id)), + Ok(false) => Err(format!("No such request")), + Err(e) => Err(format!("{:?}", e)), + }) + .map_err(|err| format!("{:?}", err)) + .wait()? } // cmds -pub fn signer_list( - signerport: u16, authfile: PathBuf -) -> Result { - let addr = &format!("ws://127.0.0.1:{}", signerport); - let mut signer = SignerRpc::new(addr, &authfile).map_err(|err| { - format!("{:?}", err) - })?; - list_transactions(&mut signer) +pub fn signer_list(signerport: u16, authfile: PathBuf) -> Result { + let addr = &format!("ws://127.0.0.1:{}", signerport); + let mut signer = SignerRpc::new(addr, &authfile).map_err(|err| format!("{:?}", err))?; + list_transactions(&mut signer) } pub fn signer_reject( - id: Option, signerport: u16, authfile: PathBuf + id: Option, + signerport: u16, + authfile: PathBuf, ) -> Result { - let id = id.ok_or(format!("id required for signer reject"))?; - let addr = &format!("ws://127.0.0.1:{}", signerport); - let mut signer = SignerRpc::new(addr, &authfile).map_err(|err| { - format!("{:?}", err) - })?; - reject_transaction(&mut signer, U256::from(id)) + let id = id.ok_or(format!("id required for signer reject"))?; + let addr = &format!("ws://127.0.0.1:{}", signerport); + let mut signer = SignerRpc::new(addr, &authfile).map_err(|err| format!("{:?}", err))?; + reject_transaction(&mut signer, U256::from(id)) } pub fn signer_sign( - id: Option, - pwfile: Option, - signerport: u16, - authfile: PathBuf + id: Option, + pwfile: Option, + signerport: u16, + authfile: PathBuf, ) -> Result { - let password; - match pwfile { - Some(pwfile) => { - match File::open(pwfile) { - Ok(fd) => { - match BufReader::new(fd).lines().next() { - Some(Ok(line)) => password = line, - _ => return Err(format!("No password in file")) - } - }, - Err(e) => - return Err(format!("Could not open password file: {}", e)) - } - } - None => { - password = match rpassword::prompt_password_stdout("Password: ") { - Ok(p) => p, - Err(e) => return Err(format!("{}", e)), - } - } - } + let password; + match pwfile { + Some(pwfile) => match File::open(pwfile) { + Ok(fd) => match BufReader::new(fd).lines().next() { + Some(Ok(line)) => password = line, + _ => return Err(format!("No password in file")), + }, + Err(e) => return Err(format!("Could not open password file: {}", e)), + }, + None => { + password = match rpassword::prompt_password_stdout("Password: ") { + Ok(p) => p, + Err(e) => return Err(format!("{}", e)), + } + } + } - let addr = &format!("ws://127.0.0.1:{}", signerport); - let mut signer = SignerRpc::new(addr, &authfile).map_err(|err| { - format!("{:?}", err) - })?; + let addr = &format!("ws://127.0.0.1:{}", signerport); + let mut signer = SignerRpc::new(addr, &authfile).map_err(|err| format!("{:?}", err))?; - match id { - Some(id) => { - sign_transaction(&mut signer, U256::from(id), &password) - }, - None => { - sign_transactions(&mut signer, password) - } - } + match id { + Some(id) => sign_transaction(&mut signer, U256::from(id), &password), + None => sign_transactions(&mut signer, password), + } } diff --git a/ethash/benches/basic.rs b/ethash/benches/basic.rs index 5bc10e948..86659fc5f 100644 --- a/ethash/benches/basic.rs +++ b/ethash/benches/basic.rs @@ -21,93 +21,106 @@ extern crate ethash; use criterion::Criterion; use ethash::{NodeCacheBuilder, OptimizeFor}; -const HASH: [u8; 32] = [0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, - 0xe4, 0x0a, 0xb3, 0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, - 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94, 0x05, 0x52, 0x7d, 0x72]; +const HASH: [u8; 32] = [ + 0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3, 0x35, 0x8a, + 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94, 0x05, 0x52, 0x7d, 0x72, +]; const NONCE: u64 = 0xd7b3ac70a301a249; criterion_group!( - basic, - bench_light_compute_memmap, - bench_light_compute_memory, - bench_light_new_round_trip_memmap, - bench_light_new_round_trip_memory, - bench_light_from_file_round_trip_memory, - bench_light_from_file_round_trip_memmap + basic, + bench_light_compute_memmap, + bench_light_compute_memory, + bench_light_new_round_trip_memmap, + bench_light_new_round_trip_memory, + bench_light_from_file_round_trip_memory, + bench_light_from_file_round_trip_memmap ); criterion_main!(basic); fn bench_light_compute_memmap(b: &mut Criterion) { - use std::env; + use std::env; - let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value()); - let light = builder.light(&env::temp_dir(), 486382); + let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value()); + let light = builder.light(&env::temp_dir(), 486382); - b.bench_function("bench_light_compute_memmap", move |b| b.iter(|| light.compute(&HASH, NONCE, u64::max_value()))); + b.bench_function("bench_light_compute_memmap", move |b| { + b.iter(|| light.compute(&HASH, NONCE, u64::max_value())) + }); } fn bench_light_compute_memory(b: &mut Criterion) { - use std::env; + use std::env; - let builder = NodeCacheBuilder::new(OptimizeFor::Cpu, u64::max_value()); - let light = builder.light(&env::temp_dir(), 486382); + let builder = NodeCacheBuilder::new(OptimizeFor::Cpu, u64::max_value()); + let light = builder.light(&env::temp_dir(), 486382); - b.bench_function("bench_light_compute_memmap", move |b| b.iter(|| light.compute(&HASH, NONCE, u64::max_value()))); + b.bench_function("bench_light_compute_memmap", move |b| { + b.iter(|| light.compute(&HASH, NONCE, u64::max_value())) + }); } fn bench_light_new_round_trip_memmap(b: &mut Criterion) { - use std::env; + use std::env; - b.bench_function("bench_light_compute_memmap", move |b| b.iter(|| { - let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value()); - let light = builder.light(&env::temp_dir(), 486382); - light.compute(&HASH, NONCE, u64::max_value()); - })); + b.bench_function("bench_light_compute_memmap", move |b| { + b.iter(|| { + let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value()); + let light = builder.light(&env::temp_dir(), 486382); + light.compute(&HASH, NONCE, u64::max_value()); + }) + }); } fn bench_light_new_round_trip_memory(b: &mut Criterion) { - use std::env; + use std::env; - b.bench_function("bench_light_compute_memmap", move |b| b.iter(|| { - let builder = NodeCacheBuilder::new(OptimizeFor::Cpu, u64::max_value()); - let light = builder.light(&env::temp_dir(), 486382); - light.compute(&HASH, NONCE, u64::max_value()); - })); + b.bench_function("bench_light_compute_memmap", move |b| { + b.iter(|| { + let builder = NodeCacheBuilder::new(OptimizeFor::Cpu, u64::max_value()); + let light = builder.light(&env::temp_dir(), 486382); + light.compute(&HASH, NONCE, u64::max_value()); + }) + }); } fn bench_light_from_file_round_trip_memory(b: &mut Criterion) { - use std::env; + use std::env; - let dir = env::temp_dir(); - let height = 486382; - { - let builder = NodeCacheBuilder::new(OptimizeFor::Cpu, u64::max_value()); - let mut dummy = builder.light(&dir, height); - dummy.to_file().unwrap(); - } + let dir = env::temp_dir(); + let height = 486382; + { + let builder = NodeCacheBuilder::new(OptimizeFor::Cpu, u64::max_value()); + let mut dummy = builder.light(&dir, height); + dummy.to_file().unwrap(); + } - b.bench_function("bench_light_compute_memmap", move |b| b.iter(|| { - let builder = NodeCacheBuilder::new(OptimizeFor::Cpu, u64::max_value()); - let light = builder.light_from_file(&dir, 486382).unwrap(); - light.compute(&HASH, NONCE, u64::max_value()); - })); + b.bench_function("bench_light_compute_memmap", move |b| { + b.iter(|| { + let builder = NodeCacheBuilder::new(OptimizeFor::Cpu, u64::max_value()); + let light = builder.light_from_file(&dir, 486382).unwrap(); + light.compute(&HASH, NONCE, u64::max_value()); + }) + }); } fn bench_light_from_file_round_trip_memmap(b: &mut Criterion) { - use std::env; + use std::env; - let dir = env::temp_dir(); - let height = 486382; + let dir = env::temp_dir(); + let height = 486382; - { - let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value()); - let mut dummy = builder.light(&dir, height); - dummy.to_file().unwrap(); - } + { + let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value()); + let mut dummy = builder.light(&dir, height); + dummy.to_file().unwrap(); + } - b.bench_function("bench_light_compute_memmap", move |b| b.iter(|| { - let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value()); - let light = builder.light_from_file(&dir, 486382).unwrap(); - light.compute(&HASH, NONCE, u64::max_value()); - })); + b.bench_function("bench_light_compute_memmap", move |b| { + b.iter(|| { + let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value()); + let light = builder.light_from_file(&dir, 486382).unwrap(); + light.compute(&HASH, NONCE, u64::max_value()); + }) + }); } diff --git a/ethash/benches/progpow.rs b/ethash/benches/progpow.rs index e086a14b4..939818e7b 100644 --- a/ethash/benches/progpow.rs +++ b/ethash/benches/progpow.rs @@ -7,80 +7,71 @@ extern crate tempdir; use criterion::Criterion; use ethash::progpow; -use tempdir::TempDir; +use ethash::{compute::light_compute, NodeCacheBuilder, OptimizeFor}; use rustc_hex::FromHex; -use ethash::{NodeCacheBuilder, OptimizeFor}; -use ethash::compute::light_compute; +use tempdir::TempDir; fn bench_hashimoto_light(c: &mut Criterion) { - let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value()); - let tempdir = TempDir::new("").unwrap(); - let light = builder.light(&tempdir.path(), 1); - let h = FromHex::from_hex("c9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f").unwrap(); - let mut hash = [0; 32]; - hash.copy_from_slice(&h); + let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value()); + let tempdir = TempDir::new("").unwrap(); + let light = builder.light(&tempdir.path(), 1); + let h = FromHex::from_hex("c9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f") + .unwrap(); + let mut hash = [0; 32]; + hash.copy_from_slice(&h); - c.bench_function("hashimoto_light", move |b| { - b.iter(|| light_compute(&light, &hash, 0)) - }); + c.bench_function("hashimoto_light", move |b| { + b.iter(|| light_compute(&light, &hash, 0)) + }); } fn bench_progpow_light(c: &mut Criterion) { - let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value()); - let tempdir = TempDir::new("").unwrap(); - let cache = builder.new_cache(tempdir.into_path(), 0); + let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value()); + let tempdir = TempDir::new("").unwrap(); + let cache = builder.new_cache(tempdir.into_path(), 0); - let h = FromHex::from_hex("c9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f").unwrap(); - let mut hash = [0; 32]; - hash.copy_from_slice(&h); + let h = FromHex::from_hex("c9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f") + .unwrap(); + let mut hash = [0; 32]; + hash.copy_from_slice(&h); - c.bench_function("progpow_light", move |b| { - b.iter(|| { - let c_dag = progpow::generate_cdag(cache.as_ref()); - progpow::progpow( - hash, - 0, - 0, - cache.as_ref(), - &c_dag, - ); - }) - }); + c.bench_function("progpow_light", move |b| { + b.iter(|| { + let c_dag = progpow::generate_cdag(cache.as_ref()); + progpow::progpow(hash, 0, 0, cache.as_ref(), &c_dag); + }) + }); } fn bench_progpow_optimal_light(c: &mut Criterion) { - let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value()); - let tempdir = TempDir::new("").unwrap(); - let cache = builder.new_cache(tempdir.into_path(), 0); - let c_dag = progpow::generate_cdag(cache.as_ref()); + let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value()); + let tempdir = TempDir::new("").unwrap(); + let cache = builder.new_cache(tempdir.into_path(), 0); + let c_dag = progpow::generate_cdag(cache.as_ref()); - let h = FromHex::from_hex("c9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f").unwrap(); - let mut hash = [0; 32]; - hash.copy_from_slice(&h); + let h = FromHex::from_hex("c9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f") + .unwrap(); + let mut hash = [0; 32]; + hash.copy_from_slice(&h); - c.bench_function("progpow_optimal_light", move |b| { - b.iter(|| { - progpow::progpow( - hash, - 0, - 0, - cache.as_ref(), - &c_dag, - ); - }) - }); + c.bench_function("progpow_optimal_light", move |b| { + b.iter(|| { + progpow::progpow(hash, 0, 0, cache.as_ref(), &c_dag); + }) + }); } fn bench_keccak_f800_long(c: &mut Criterion) { - c.bench_function("keccak_f800_long(0, 0, 0)", |b| { - b.iter(|| progpow::keccak_f800_long([0; 32], 0, [0; 8])) - }); + c.bench_function("keccak_f800_long(0, 0, 0)", |b| { + b.iter(|| progpow::keccak_f800_long([0; 32], 0, [0; 8])) + }); } -criterion_group!(benches, - bench_hashimoto_light, - bench_progpow_light, - bench_progpow_optimal_light, - bench_keccak_f800_long, +criterion_group!( + benches, + bench_hashimoto_light, + bench_progpow_light, + bench_progpow_optimal_light, + bench_keccak_f800_long, ); criterion_main!(benches); diff --git a/ethash/src/cache.rs b/ethash/src/cache.rs index b16d27314..e3b8a6096 100644 --- a/ethash/src/cache.rs +++ b/ethash/src/cache.rs @@ -16,291 +16,298 @@ use compute::Light; use either::Either; -use keccak::{H256, keccak_512}; +use keccak::{keccak_512, H256}; use memmap::MmapMut; use parking_lot::Mutex; use seed_compute::SeedHashCompute; -use shared::{ETHASH_CACHE_ROUNDS, NODE_BYTES, NODE_DWORDS, Node, epoch, get_cache_size, to_hex}; +use shared::{epoch, get_cache_size, to_hex, Node, ETHASH_CACHE_ROUNDS, NODE_BYTES, NODE_DWORDS}; -use std::borrow::Cow; -use std::fs; -use std::io::{self, Read, Write}; -use std::path::{Path, PathBuf}; -use std::slice; -use std::sync::Arc; +use std::{ + borrow::Cow, + fs, + io::{self, Read, Write}, + path::{Path, PathBuf}, + slice, + sync::Arc, +}; type Cache = Either, MmapMut>; #[derive(PartialEq, Eq, Debug, Clone, Copy)] pub enum OptimizeFor { - Cpu, - Memory, + Cpu, + Memory, } impl Default for OptimizeFor { - fn default() -> Self { - OptimizeFor::Cpu - } + fn default() -> Self { + OptimizeFor::Cpu + } } fn byte_size(cache: &Cache) -> usize { - use self::Either::{Left, Right}; + use self::Either::{Left, Right}; - match *cache { - Left(ref vec) => vec.len() * NODE_BYTES, - Right(ref mmap) => mmap.len(), - } + match *cache { + Left(ref vec) => vec.len() * NODE_BYTES, + Right(ref mmap) => mmap.len(), + } } fn new_buffer(path: &Path, num_nodes: usize, ident: &H256, optimize_for: OptimizeFor) -> Cache { - let memmap = match optimize_for { - OptimizeFor::Cpu => None, - OptimizeFor::Memory => make_memmapped_cache(path, num_nodes, ident).ok(), - }; + let memmap = match optimize_for { + OptimizeFor::Cpu => None, + OptimizeFor::Memory => make_memmapped_cache(path, num_nodes, ident).ok(), + }; - memmap.map(Either::Right).unwrap_or_else(|| { - Either::Left(make_memory_cache(num_nodes, ident)) - }) + memmap + .map(Either::Right) + .unwrap_or_else(|| Either::Left(make_memory_cache(num_nodes, ident))) } #[derive(Clone)] pub struct NodeCacheBuilder { - // TODO: Remove this locking and just use an `Rc`? - seedhash: Arc>, - optimize_for: OptimizeFor, - progpow_transition: u64, + // TODO: Remove this locking and just use an `Rc`? + seedhash: Arc>, + optimize_for: OptimizeFor, + progpow_transition: u64, } // TODO: Abstract the "optimize for" logic pub struct NodeCache { - builder: NodeCacheBuilder, - cache_dir: Cow<'static, Path>, - cache_path: PathBuf, - epoch: u64, - cache: Cache, + builder: NodeCacheBuilder, + cache_dir: Cow<'static, Path>, + cache_path: PathBuf, + epoch: u64, + cache: Cache, } impl NodeCacheBuilder { - pub fn light(&self, cache_dir: &Path, block_number: u64) -> Light { - Light::new_with_builder(self, cache_dir, block_number, self.progpow_transition) - } + pub fn light(&self, cache_dir: &Path, block_number: u64) -> Light { + Light::new_with_builder(self, cache_dir, block_number, self.progpow_transition) + } - pub fn light_from_file(&self, cache_dir: &Path, block_number: u64) -> io::Result { - Light::from_file_with_builder(self, cache_dir, block_number, self.progpow_transition) - } + pub fn light_from_file(&self, cache_dir: &Path, block_number: u64) -> io::Result { + Light::from_file_with_builder(self, cache_dir, block_number, self.progpow_transition) + } - pub fn new>>(optimize_for: T, progpow_transition: u64) -> Self { - NodeCacheBuilder { - seedhash: Arc::new(Mutex::new(SeedHashCompute::default())), - optimize_for: optimize_for.into().unwrap_or_default(), - progpow_transition - } - } + pub fn new>>(optimize_for: T, progpow_transition: u64) -> Self { + NodeCacheBuilder { + seedhash: Arc::new(Mutex::new(SeedHashCompute::default())), + optimize_for: optimize_for.into().unwrap_or_default(), + progpow_transition, + } + } - fn block_number_to_ident(&self, block_number: u64) -> H256 { - self.seedhash.lock().hash_block_number(block_number) - } + fn block_number_to_ident(&self, block_number: u64) -> H256 { + self.seedhash.lock().hash_block_number(block_number) + } - fn epoch_to_ident(&self, epoch: u64) -> H256 { - self.seedhash.lock().hash_epoch(epoch) - } + fn epoch_to_ident(&self, epoch: u64) -> H256 { + self.seedhash.lock().hash_epoch(epoch) + } - pub fn from_file>>( - &self, - cache_dir: P, - block_number: u64, - ) -> io::Result { - let cache_dir = cache_dir.into(); - let ident = self.block_number_to_ident(block_number); + pub fn from_file>>( + &self, + cache_dir: P, + block_number: u64, + ) -> io::Result { + let cache_dir = cache_dir.into(); + let ident = self.block_number_to_ident(block_number); - let path = cache_path(cache_dir.as_ref(), &ident); + let path = cache_path(cache_dir.as_ref(), &ident); - let cache = cache_from_path(&path, self.optimize_for)?; - let expected_cache_size = get_cache_size(block_number); + let cache = cache_from_path(&path, self.optimize_for)?; + let expected_cache_size = get_cache_size(block_number); - if byte_size(&cache) == expected_cache_size { - Ok(NodeCache { - builder: self.clone(), - epoch: epoch(block_number), - cache_dir: cache_dir, - cache_path: path, - cache: cache, - }) - } else { - Err(io::Error::new( - io::ErrorKind::InvalidData, - "Node cache is of incorrect size", - )) - } - } + if byte_size(&cache) == expected_cache_size { + Ok(NodeCache { + builder: self.clone(), + epoch: epoch(block_number), + cache_dir: cache_dir, + cache_path: path, + cache: cache, + }) + } else { + Err(io::Error::new( + io::ErrorKind::InvalidData, + "Node cache is of incorrect size", + )) + } + } - pub fn new_cache>>( - &self, - cache_dir: P, - block_number: u64, - ) -> NodeCache { - let cache_dir = cache_dir.into(); - let ident = self.block_number_to_ident(block_number); + pub fn new_cache>>( + &self, + cache_dir: P, + block_number: u64, + ) -> NodeCache { + let cache_dir = cache_dir.into(); + let ident = self.block_number_to_ident(block_number); - let cache_size = get_cache_size(block_number); + let cache_size = get_cache_size(block_number); - // We use `debug_assert` since it is impossible for `get_cache_size` to return an unaligned - // value with the current implementation. If the implementation changes, CI will catch it. - debug_assert!(cache_size % NODE_BYTES == 0, "Unaligned cache size"); - let num_nodes = cache_size / NODE_BYTES; + // We use `debug_assert` since it is impossible for `get_cache_size` to return an unaligned + // value with the current implementation. If the implementation changes, CI will catch it. + debug_assert!(cache_size % NODE_BYTES == 0, "Unaligned cache size"); + let num_nodes = cache_size / NODE_BYTES; - let path = cache_path(cache_dir.as_ref(), &ident); - let nodes = new_buffer(&path, num_nodes, &ident, self.optimize_for); + let path = cache_path(cache_dir.as_ref(), &ident); + let nodes = new_buffer(&path, num_nodes, &ident, self.optimize_for); - NodeCache { - builder: self.clone(), - epoch: epoch(block_number), - cache_dir: cache_dir.into(), - cache_path: path, - cache: nodes, - } - } + NodeCache { + builder: self.clone(), + epoch: epoch(block_number), + cache_dir: cache_dir.into(), + cache_path: path, + cache: nodes, + } + } } impl NodeCache { - pub fn cache_path(&self) -> &Path { - &self.cache_path - } + pub fn cache_path(&self) -> &Path { + &self.cache_path + } - pub fn flush(&mut self) -> io::Result<()> { - if let Some(last) = self.epoch.checked_sub(2).map(|ep| { - cache_path(self.cache_dir.as_ref(), &self.builder.epoch_to_ident(ep)) - }) - { - fs::remove_file(last).unwrap_or_else(|error| match error.kind() { - io::ErrorKind::NotFound => (), - _ => warn!("Error removing stale DAG cache: {:?}", error), - }); - } + pub fn flush(&mut self) -> io::Result<()> { + if let Some(last) = self + .epoch + .checked_sub(2) + .map(|ep| cache_path(self.cache_dir.as_ref(), &self.builder.epoch_to_ident(ep))) + { + fs::remove_file(last).unwrap_or_else(|error| match error.kind() { + io::ErrorKind::NotFound => (), + _ => warn!("Error removing stale DAG cache: {:?}", error), + }); + } - consume_cache(&mut self.cache, &self.cache_path) - } + consume_cache(&mut self.cache, &self.cache_path) + } } fn make_memmapped_cache(path: &Path, num_nodes: usize, ident: &H256) -> io::Result { - use std::fs::OpenOptions; + use std::fs::OpenOptions; - let file = OpenOptions::new() - .read(true) - .write(true) - .create(true) - .open(&path)?; - file.set_len((num_nodes * NODE_BYTES) as _)?; + let file = OpenOptions::new() + .read(true) + .write(true) + .create(true) + .open(&path)?; + file.set_len((num_nodes * NODE_BYTES) as _)?; - let mut memmap = unsafe { MmapMut::map_mut(&file)? }; + let mut memmap = unsafe { MmapMut::map_mut(&file)? }; - unsafe { initialize_memory(memmap.as_mut_ptr() as *mut Node, num_nodes, ident) }; + unsafe { initialize_memory(memmap.as_mut_ptr() as *mut Node, num_nodes, ident) }; - Ok(memmap) + Ok(memmap) } fn make_memory_cache(num_nodes: usize, ident: &H256) -> Vec { - let mut nodes: Vec = Vec::with_capacity(num_nodes); - // Use uninit instead of unnecessarily writing `size_of::() * num_nodes` 0s - unsafe { - initialize_memory(nodes.as_mut_ptr(), num_nodes, ident); - nodes.set_len(num_nodes); - } + let mut nodes: Vec = Vec::with_capacity(num_nodes); + // Use uninit instead of unnecessarily writing `size_of::() * num_nodes` 0s + unsafe { + initialize_memory(nodes.as_mut_ptr(), num_nodes, ident); + nodes.set_len(num_nodes); + } - nodes + nodes } fn cache_path<'a, P: Into>>(path: P, ident: &H256) -> PathBuf { - let mut buf = path.into().into_owned(); - buf.push(to_hex(ident)); - buf + let mut buf = path.into().into_owned(); + buf.push(to_hex(ident)); + buf } fn consume_cache(cache: &mut Cache, path: &Path) -> io::Result<()> { - use std::fs::OpenOptions; + use std::fs::OpenOptions; - match *cache { - Either::Left(ref mut vec) => { - let mut file = OpenOptions::new() - .read(true) - .write(true) - .create(true) - .open(&path)?; + match *cache { + Either::Left(ref mut vec) => { + let mut file = OpenOptions::new() + .read(true) + .write(true) + .create(true) + .open(&path)?; - let buf = unsafe { - slice::from_raw_parts_mut(vec.as_mut_ptr() as *mut u8, vec.len() * NODE_BYTES) - }; + let buf = unsafe { + slice::from_raw_parts_mut(vec.as_mut_ptr() as *mut u8, vec.len() * NODE_BYTES) + }; - file.write_all(buf).map(|_| ()) - } - Either::Right(ref mmap) => { - mmap.flush() - } - } + file.write_all(buf).map(|_| ()) + } + Either::Right(ref mmap) => mmap.flush(), + } } fn cache_from_path(path: &Path, optimize_for: OptimizeFor) -> io::Result { - let memmap = match optimize_for { - OptimizeFor::Cpu => None, - OptimizeFor::Memory => { - let file = fs::OpenOptions::new().read(true).write(true).create(true).open(path)?; - unsafe { MmapMut::map_mut(&file).ok() } - }, - }; + let memmap = match optimize_for { + OptimizeFor::Cpu => None, + OptimizeFor::Memory => { + let file = fs::OpenOptions::new() + .read(true) + .write(true) + .create(true) + .open(path)?; + unsafe { MmapMut::map_mut(&file).ok() } + } + }; - memmap.map(Either::Right).ok_or(()).or_else(|_| { - read_from_path(path).map(Either::Left) - }) + memmap + .map(Either::Right) + .ok_or(()) + .or_else(|_| read_from_path(path).map(Either::Left)) } fn read_from_path(path: &Path) -> io::Result> { - use std::fs::File; - use std::mem; + use std::{fs::File, mem}; - let mut file = File::open(path)?; + let mut file = File::open(path)?; - let mut nodes: Vec = Vec::with_capacity(file.metadata().map(|m| m.len() as _).unwrap_or( - NODE_BYTES * 1_000_000, - )); - file.read_to_end(&mut nodes)?; + let mut nodes: Vec = Vec::with_capacity( + file.metadata() + .map(|m| m.len() as _) + .unwrap_or(NODE_BYTES * 1_000_000), + ); + file.read_to_end(&mut nodes)?; - nodes.shrink_to_fit(); + nodes.shrink_to_fit(); - if nodes.len() % NODE_BYTES != 0 || nodes.capacity() % NODE_BYTES != 0 { - return Err(io::Error::new( - io::ErrorKind::Other, - "Node cache is not a multiple of node size", - )); - } + if nodes.len() % NODE_BYTES != 0 || nodes.capacity() % NODE_BYTES != 0 { + return Err(io::Error::new( + io::ErrorKind::Other, + "Node cache is not a multiple of node size", + )); + } - let out: Vec = unsafe { - Vec::from_raw_parts( - nodes.as_mut_ptr() as *mut _, - nodes.len() / NODE_BYTES, - nodes.capacity() / NODE_BYTES, - ) - }; + let out: Vec = unsafe { + Vec::from_raw_parts( + nodes.as_mut_ptr() as *mut _, + nodes.len() / NODE_BYTES, + nodes.capacity() / NODE_BYTES, + ) + }; - mem::forget(nodes); + mem::forget(nodes); - Ok(out) + Ok(out) } impl AsRef<[Node]> for NodeCache { - fn as_ref(&self) -> &[Node] { - match self.cache { - Either::Left(ref vec) => vec, - Either::Right(ref mmap) => unsafe { - let bytes = mmap.as_ptr(); - // This isn't a safety issue, so we can keep this a debug lint. We don't care about - // people manually messing with the files unless it can cause unsafety, but if we're - // generating incorrect files then we want to catch that in CI. - debug_assert_eq!(mmap.len() % NODE_BYTES, 0); - slice::from_raw_parts(bytes as _, mmap.len() / NODE_BYTES) - }, - } - } + fn as_ref(&self) -> &[Node] { + match self.cache { + Either::Left(ref vec) => vec, + Either::Right(ref mmap) => unsafe { + let bytes = mmap.as_ptr(); + // This isn't a safety issue, so we can keep this a debug lint. We don't care about + // people manually messing with the files unless it can cause unsafety, but if we're + // generating incorrect files then we want to catch that in CI. + debug_assert_eq!(mmap.len() % NODE_BYTES, 0); + slice::from_raw_parts(bytes as _, mmap.len() / NODE_BYTES) + }, + } + } } // This takes a raw pointer and a counter because `memory` may be uninitialized. `memory` _must_ be @@ -311,47 +318,47 @@ impl AsRef<[Node]> for NodeCache { // out. It counts as a read and causes all writes afterwards to be elided. Yes, really. I know, I // want to refactor this to use less `unsafe` as much as the next rustacean. unsafe fn initialize_memory(memory: *mut Node, num_nodes: usize, ident: &H256) { - let dst = memory as *mut u8; + let dst = memory as *mut u8; - debug_assert_eq!(ident.len(), 32); - keccak_512::unchecked(dst, NODE_BYTES, ident.as_ptr(), ident.len()); + debug_assert_eq!(ident.len(), 32); + keccak_512::unchecked(dst, NODE_BYTES, ident.as_ptr(), ident.len()); - for i in 1..num_nodes { - // We use raw pointers here, see above - let dst = memory.offset(i as _) as *mut u8; - let src = memory.offset(i as isize - 1) as *mut u8; + for i in 1..num_nodes { + // We use raw pointers here, see above + let dst = memory.offset(i as _) as *mut u8; + let src = memory.offset(i as isize - 1) as *mut u8; - keccak_512::unchecked(dst, NODE_BYTES, src, NODE_BYTES); - } + keccak_512::unchecked(dst, NODE_BYTES, src, NODE_BYTES); + } - // Now this is initialized, we can treat it as a slice. - let nodes: &mut [Node] = slice::from_raw_parts_mut(memory, num_nodes); + // Now this is initialized, we can treat it as a slice. + let nodes: &mut [Node] = slice::from_raw_parts_mut(memory, num_nodes); - // For `unroll!`, see below. If the literal in `unroll!` is not the same as the RHS here then - // these have got out of sync! Don't let this happen! - debug_assert_eq!(NODE_DWORDS, 8); + // For `unroll!`, see below. If the literal in `unroll!` is not the same as the RHS here then + // these have got out of sync! Don't let this happen! + debug_assert_eq!(NODE_DWORDS, 8); - // This _should_ get unrolled by the compiler, since it's not using the loop variable. - for _ in 0..ETHASH_CACHE_ROUNDS { - for i in 0..num_nodes { - let data_idx = (num_nodes - 1 + i) % num_nodes; - let idx = nodes.get_unchecked_mut(i).as_words()[0] as usize % num_nodes; + // This _should_ get unrolled by the compiler, since it's not using the loop variable. + for _ in 0..ETHASH_CACHE_ROUNDS { + for i in 0..num_nodes { + let data_idx = (num_nodes - 1 + i) % num_nodes; + let idx = nodes.get_unchecked_mut(i).as_words()[0] as usize % num_nodes; - let data = { - let mut data: Node = nodes.get_unchecked(data_idx).clone(); - let rhs: &Node = nodes.get_unchecked(idx); + let data = { + let mut data: Node = nodes.get_unchecked(data_idx).clone(); + let rhs: &Node = nodes.get_unchecked(idx); - unroll! { - for w in 0..8 { - *data.as_dwords_mut().get_unchecked_mut(w) ^= - *rhs.as_dwords().get_unchecked(w); - } - } + unroll! { + for w in 0..8 { + *data.as_dwords_mut().get_unchecked_mut(w) ^= + *rhs.as_dwords().get_unchecked(w); + } + } - data - }; + data + }; - keccak_512::write(&data.bytes, &mut nodes.get_unchecked_mut(i).bytes); - } - } + keccak_512::write(&data.bytes, &mut nodes.get_unchecked_mut(i).bytes); + } + } } diff --git a/ethash/src/compute.rs b/ethash/src/compute.rs index 36826121d..6e75b4c2a 100644 --- a/ethash/src/compute.rs +++ b/ethash/src/compute.rs @@ -19,15 +19,14 @@ // TODO: fix endianess for big endian -use keccak::{keccak_512, keccak_256, H256}; use cache::{NodeCache, NodeCacheBuilder}; -use progpow::{CDag, generate_cdag, progpow, keccak_f800_short, keccak_f800_long}; +use keccak::{keccak_256, keccak_512, H256}; +use progpow::{generate_cdag, keccak_f800_long, keccak_f800_short, progpow, CDag}; use seed_compute::SeedHashCompute; use shared::*; use std::io; -use std::{mem, ptr}; -use std::path::Path; +use std::{mem, path::Path, ptr}; const MIX_WORDS: usize = ETHASH_MIX_BYTES / 4; const MIX_NODES: usize = MIX_WORDS / NODE_WORDS; @@ -35,92 +34,99 @@ pub const FNV_PRIME: u32 = 0x01000193; /// Computation result pub struct ProofOfWork { - /// Difficulty boundary - pub value: H256, - /// Mix - pub mix_hash: H256, + /// Difficulty boundary + pub value: H256, + /// Mix + pub mix_hash: H256, } enum Algorithm { - Hashimoto, - Progpow(Box), + Hashimoto, + Progpow(Box), } pub struct Light { - block_number: u64, - cache: NodeCache, - algorithm: Algorithm, + block_number: u64, + cache: NodeCache, + algorithm: Algorithm, } /// Light cache structure impl Light { - pub fn new_with_builder( - builder: &NodeCacheBuilder, - cache_dir: &Path, - block_number: u64, - progpow_transition: u64, - ) -> Self { - let cache = builder.new_cache(cache_dir.to_path_buf(), block_number); + pub fn new_with_builder( + builder: &NodeCacheBuilder, + cache_dir: &Path, + block_number: u64, + progpow_transition: u64, + ) -> Self { + let cache = builder.new_cache(cache_dir.to_path_buf(), block_number); - let algorithm = if block_number >= progpow_transition { - Algorithm::Progpow(Box::new(generate_cdag(cache.as_ref()))) - } else { - Algorithm::Hashimoto - }; + let algorithm = if block_number >= progpow_transition { + Algorithm::Progpow(Box::new(generate_cdag(cache.as_ref()))) + } else { + Algorithm::Hashimoto + }; - Light { block_number, cache, algorithm } - } + Light { + block_number, + cache, + algorithm, + } + } - /// Calculate the light boundary data - /// `header_hash` - The header hash to pack into the mix - /// `nonce` - The nonce to pack into the mix - pub fn compute(&self, header_hash: &H256, nonce: u64, block_number: u64) -> ProofOfWork { - match self.algorithm { - Algorithm::Progpow(ref c_dag) => { - let (value, mix_hash) = progpow( - *header_hash, - nonce, - block_number, - self.cache.as_ref(), - c_dag, - ); + /// Calculate the light boundary data + /// `header_hash` - The header hash to pack into the mix + /// `nonce` - The nonce to pack into the mix + pub fn compute(&self, header_hash: &H256, nonce: u64, block_number: u64) -> ProofOfWork { + match self.algorithm { + Algorithm::Progpow(ref c_dag) => { + let (value, mix_hash) = progpow( + *header_hash, + nonce, + block_number, + self.cache.as_ref(), + c_dag, + ); - ProofOfWork { value, mix_hash } - }, - Algorithm::Hashimoto => light_compute(self, header_hash, nonce), - } + ProofOfWork { value, mix_hash } + } + Algorithm::Hashimoto => light_compute(self, header_hash, nonce), + } + } - } + pub fn from_file_with_builder( + builder: &NodeCacheBuilder, + cache_dir: &Path, + block_number: u64, + progpow_transition: u64, + ) -> io::Result { + let cache = builder.from_file(cache_dir.to_path_buf(), block_number)?; - pub fn from_file_with_builder( - builder: &NodeCacheBuilder, - cache_dir: &Path, - block_number: u64, - progpow_transition: u64, - ) -> io::Result { - let cache = builder.from_file(cache_dir.to_path_buf(), block_number)?; + let algorithm = if block_number >= progpow_transition { + Algorithm::Progpow(Box::new(generate_cdag(cache.as_ref()))) + } else { + Algorithm::Hashimoto + }; - let algorithm = if block_number >= progpow_transition { - Algorithm::Progpow(Box::new(generate_cdag(cache.as_ref()))) - } else { - Algorithm::Hashimoto - }; + Ok(Light { + block_number, + cache, + algorithm, + }) + } - Ok(Light { block_number, cache, algorithm }) - } - - pub fn to_file(&mut self) -> io::Result<&Path> { - self.cache.flush()?; - Ok(self.cache.cache_path()) - } + pub fn to_file(&mut self) -> io::Result<&Path> { + self.cache.flush()?; + Ok(self.cache.cache_path()) + } } pub fn slow_hash_block_number(block_number: u64) -> H256 { - SeedHashCompute::resume_compute_seedhash([0u8; 32], 0, block_number / ETHASH_EPOCH_LENGTH) + SeedHashCompute::resume_compute_seedhash([0u8; 32], 0, block_number / ETHASH_EPOCH_LENGTH) } fn fnv_hash(x: u32, y: u32) -> u32 { - return x.wrapping_mul(FNV_PRIME) ^ y; + return x.wrapping_mul(FNV_PRIME) ^ y; } /// Difficulty quick check for POW preverification @@ -129,33 +135,38 @@ fn fnv_hash(x: u32, y: u32) -> u32 { /// `nonce` The block's nonce /// `mix_hash` The mix digest hash /// Boundary recovered from mix hash -pub fn quick_get_difficulty(header_hash: &H256, nonce: u64, mix_hash: &H256, progpow: bool) -> H256 { - unsafe { - if progpow { - let seed = keccak_f800_short(*header_hash, nonce, [0u32; 8]); - keccak_f800_long(*header_hash, seed, mem::transmute(*mix_hash)) - } else { - // This is safe - the `keccak_512` call below reads the first 40 bytes (which we explicitly set - // with two `copy_nonoverlapping` calls) but writes the first 64, and then we explicitly write - // the next 32 bytes before we read the whole thing with `keccak_256`. - // - // This cannot be elided by the compiler as it doesn't know the implementation of - // `keccak_512`. - let mut buf: [u8; 64 + 32] = mem::uninitialized(); +pub fn quick_get_difficulty( + header_hash: &H256, + nonce: u64, + mix_hash: &H256, + progpow: bool, +) -> H256 { + unsafe { + if progpow { + let seed = keccak_f800_short(*header_hash, nonce, [0u32; 8]); + keccak_f800_long(*header_hash, seed, mem::transmute(*mix_hash)) + } else { + // This is safe - the `keccak_512` call below reads the first 40 bytes (which we explicitly set + // with two `copy_nonoverlapping` calls) but writes the first 64, and then we explicitly write + // the next 32 bytes before we read the whole thing with `keccak_256`. + // + // This cannot be elided by the compiler as it doesn't know the implementation of + // `keccak_512`. + let mut buf: [u8; 64 + 32] = mem::uninitialized(); - ptr::copy_nonoverlapping(header_hash.as_ptr(), buf.as_mut_ptr(), 32); - ptr::copy_nonoverlapping(&nonce as *const u64 as *const u8, buf[32..].as_mut_ptr(), 8); + ptr::copy_nonoverlapping(header_hash.as_ptr(), buf.as_mut_ptr(), 32); + ptr::copy_nonoverlapping(&nonce as *const u64 as *const u8, buf[32..].as_mut_ptr(), 8); - keccak_512::unchecked(buf.as_mut_ptr(), 64, buf.as_ptr(), 40); - ptr::copy_nonoverlapping(mix_hash.as_ptr(), buf[64..].as_mut_ptr(), 32); + keccak_512::unchecked(buf.as_mut_ptr(), 64, buf.as_ptr(), 40); + ptr::copy_nonoverlapping(mix_hash.as_ptr(), buf[64..].as_mut_ptr(), 32); - // This is initialized in `keccak_256` - let mut hash: [u8; 32] = mem::uninitialized(); - keccak_256::unchecked(hash.as_mut_ptr(), hash.len(), buf.as_ptr(), buf.len()); + // This is initialized in `keccak_256` + let mut hash: [u8; 32] = mem::uninitialized(); + keccak_256::unchecked(hash.as_mut_ptr(), hash.len(), buf.as_ptr(), buf.len()); - hash - } - } + hash + } + } } /// Calculate the light client data @@ -163,289 +174,310 @@ pub fn quick_get_difficulty(header_hash: &H256, nonce: u64, mix_hash: &H256, pro /// `header_hash` - The header hash to pack into the mix /// `nonce` - The nonce to pack into the mix pub fn light_compute(light: &Light, header_hash: &H256, nonce: u64) -> ProofOfWork { - let full_size = get_data_size(light.block_number); - hash_compute(light, full_size, header_hash, nonce) + let full_size = get_data_size(light.block_number); + hash_compute(light, full_size, header_hash, nonce) } fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64) -> ProofOfWork { - macro_rules! make_const_array { - ($n:expr, $value:expr) => {{ - // We use explicit lifetimes to ensure that val's borrow is invalidated until the - // transmuted val dies. - unsafe fn make_const_array(val: &mut [T]) -> &mut [U; $n] { - use ::std::mem; + macro_rules! make_const_array { + ($n:expr, $value:expr) => {{ + // We use explicit lifetimes to ensure that val's borrow is invalidated until the + // transmuted val dies. + unsafe fn make_const_array(val: &mut [T]) -> &mut [U; $n] { + use std::mem; - debug_assert_eq!(val.len() * mem::size_of::(), $n * mem::size_of::()); - &mut *(val.as_mut_ptr() as *mut [U; $n]) - } + debug_assert_eq!(val.len() * mem::size_of::(), $n * mem::size_of::()); + &mut *(val.as_mut_ptr() as *mut [U; $n]) + } - make_const_array($value) - }} - } + make_const_array($value) + }}; + } - #[repr(C)] - struct MixBuf { - half_mix: Node, - compress_bytes: [u8; MIX_WORDS], - }; + #[repr(C)] + struct MixBuf { + half_mix: Node, + compress_bytes: [u8; MIX_WORDS], + }; - if full_size % MIX_WORDS != 0 { - panic!("Unaligned full size"); - } + if full_size % MIX_WORDS != 0 { + panic!("Unaligned full size"); + } - // You may be asking yourself: what in the name of Crypto Jesus is going on here? So: we need - // `half_mix` and `compress_bytes` in a single array later down in the code (we hash them - // together to create `value`) so that we can hash the full array. However, we do a bunch of - // reading and writing to these variables first. We originally allocated two arrays and then - // stuck them together with `ptr::copy_nonoverlapping` at the end, but this method is - // _significantly_ faster - by my benchmarks, a consistent 3-5%. This is the most ridiculous - // optimization I have ever done and I am so sorry. I can only chalk it up to cache locality - // improvements, since I can't imagine that 3-5% of our runtime is taken up by catting two - // arrays together. - let mut buf: MixBuf = MixBuf { - half_mix: unsafe { - // Pack `header_hash` and `nonce` together - // We explicitly write the first 40 bytes, leaving the last 24 as uninitialized. Then - // `keccak_512` reads the first 40 bytes (4th parameter) and overwrites the entire array, - // leaving it fully initialized. - let mut out: [u8; NODE_BYTES] = mem::uninitialized(); + // You may be asking yourself: what in the name of Crypto Jesus is going on here? So: we need + // `half_mix` and `compress_bytes` in a single array later down in the code (we hash them + // together to create `value`) so that we can hash the full array. However, we do a bunch of + // reading and writing to these variables first. We originally allocated two arrays and then + // stuck them together with `ptr::copy_nonoverlapping` at the end, but this method is + // _significantly_ faster - by my benchmarks, a consistent 3-5%. This is the most ridiculous + // optimization I have ever done and I am so sorry. I can only chalk it up to cache locality + // improvements, since I can't imagine that 3-5% of our runtime is taken up by catting two + // arrays together. + let mut buf: MixBuf = MixBuf { + half_mix: unsafe { + // Pack `header_hash` and `nonce` together + // We explicitly write the first 40 bytes, leaving the last 24 as uninitialized. Then + // `keccak_512` reads the first 40 bytes (4th parameter) and overwrites the entire array, + // leaving it fully initialized. + let mut out: [u8; NODE_BYTES] = mem::uninitialized(); - ptr::copy_nonoverlapping(header_hash.as_ptr(), out.as_mut_ptr(), header_hash.len()); - ptr::copy_nonoverlapping( - &nonce as *const u64 as *const u8, - out[header_hash.len()..].as_mut_ptr(), - mem::size_of::(), - ); + ptr::copy_nonoverlapping(header_hash.as_ptr(), out.as_mut_ptr(), header_hash.len()); + ptr::copy_nonoverlapping( + &nonce as *const u64 as *const u8, + out[header_hash.len()..].as_mut_ptr(), + mem::size_of::(), + ); - // compute keccak-512 hash and replicate across mix - keccak_512::unchecked( - out.as_mut_ptr(), - NODE_BYTES, - out.as_ptr(), - header_hash.len() + mem::size_of::(), - ); + // compute keccak-512 hash and replicate across mix + keccak_512::unchecked( + out.as_mut_ptr(), + NODE_BYTES, + out.as_ptr(), + header_hash.len() + mem::size_of::(), + ); - Node { bytes: out } - }, - // This is fully initialized before being read, see `let mut compress = ...` below - compress_bytes: unsafe { mem::uninitialized() }, - }; + Node { bytes: out } + }, + // This is fully initialized before being read, see `let mut compress = ...` below + compress_bytes: unsafe { mem::uninitialized() }, + }; - let mut mix: [_; MIX_NODES] = [buf.half_mix.clone(), buf.half_mix.clone()]; + let mut mix: [_; MIX_NODES] = [buf.half_mix.clone(), buf.half_mix.clone()]; - let page_size = 4 * MIX_WORDS; - let num_full_pages = (full_size / page_size) as u32; - // deref once for better performance - let cache: &[Node] = light.cache.as_ref(); - let first_val = buf.half_mix.as_words()[0]; + let page_size = 4 * MIX_WORDS; + let num_full_pages = (full_size / page_size) as u32; + // deref once for better performance + let cache: &[Node] = light.cache.as_ref(); + let first_val = buf.half_mix.as_words()[0]; - debug_assert_eq!(MIX_NODES, 2); - debug_assert_eq!(NODE_WORDS, 16); + debug_assert_eq!(MIX_NODES, 2); + debug_assert_eq!(NODE_WORDS, 16); - for i in 0..ETHASH_ACCESSES as u32 { - let index = { - // This is trivially safe, but does not work on big-endian. The safety of this is - // asserted in debug builds (see the definition of `make_const_array!`). - let mix_words: &mut [u32; MIX_WORDS] = - unsafe { make_const_array!(MIX_WORDS, &mut mix) }; + for i in 0..ETHASH_ACCESSES as u32 { + let index = { + // This is trivially safe, but does not work on big-endian. The safety of this is + // asserted in debug builds (see the definition of `make_const_array!`). + let mix_words: &mut [u32; MIX_WORDS] = + unsafe { make_const_array!(MIX_WORDS, &mut mix) }; - fnv_hash(first_val ^ i, mix_words[i as usize % MIX_WORDS]) % num_full_pages - }; + fnv_hash(first_val ^ i, mix_words[i as usize % MIX_WORDS]) % num_full_pages + }; - unroll! { - // MIX_NODES - for n in 0..2 { - let tmp_node = calculate_dag_item( - index * MIX_NODES as u32 + n as u32, - cache, - ); + unroll! { + // MIX_NODES + for n in 0..2 { + let tmp_node = calculate_dag_item( + index * MIX_NODES as u32 + n as u32, + cache, + ); - unroll! { - // NODE_WORDS - for w in 0..16 { - mix[n].as_words_mut()[w] = - fnv_hash( - mix[n].as_words()[w], - tmp_node.as_words()[w], - ); - } - } - } - } - } + unroll! { + // NODE_WORDS + for w in 0..16 { + mix[n].as_words_mut()[w] = + fnv_hash( + mix[n].as_words()[w], + tmp_node.as_words()[w], + ); + } + } + } + } + } - let mix_words: [u32; MIX_WORDS] = unsafe { mem::transmute(mix) }; + let mix_words: [u32; MIX_WORDS] = unsafe { mem::transmute(mix) }; - { - // This is an uninitialized buffer to begin with, but we iterate precisely `compress.len()` - // times and set each index, leaving the array fully initialized. THIS ONLY WORKS ON LITTLE- - // ENDIAN MACHINES. See a future PR to make this and the rest of the code work correctly on - // big-endian arches like mips. - let compress: &mut [u32; MIX_WORDS / 4] = - unsafe { make_const_array!(MIX_WORDS / 4, &mut buf.compress_bytes) }; + { + // This is an uninitialized buffer to begin with, but we iterate precisely `compress.len()` + // times and set each index, leaving the array fully initialized. THIS ONLY WORKS ON LITTLE- + // ENDIAN MACHINES. See a future PR to make this and the rest of the code work correctly on + // big-endian arches like mips. + let compress: &mut [u32; MIX_WORDS / 4] = + unsafe { make_const_array!(MIX_WORDS / 4, &mut buf.compress_bytes) }; - // Compress mix - debug_assert_eq!(MIX_WORDS / 4, 8); - unroll! { - for i in 0..8 { - let w = i * 4; + // Compress mix + debug_assert_eq!(MIX_WORDS / 4, 8); + unroll! { + for i in 0..8 { + let w = i * 4; - let mut reduction = mix_words[w + 0]; - reduction = reduction.wrapping_mul(FNV_PRIME) ^ mix_words[w + 1]; - reduction = reduction.wrapping_mul(FNV_PRIME) ^ mix_words[w + 2]; - reduction = reduction.wrapping_mul(FNV_PRIME) ^ mix_words[w + 3]; - compress[i] = reduction; - } - } - } + let mut reduction = mix_words[w + 0]; + reduction = reduction.wrapping_mul(FNV_PRIME) ^ mix_words[w + 1]; + reduction = reduction.wrapping_mul(FNV_PRIME) ^ mix_words[w + 2]; + reduction = reduction.wrapping_mul(FNV_PRIME) ^ mix_words[w + 3]; + compress[i] = reduction; + } + } + } - let mix_hash = buf.compress_bytes; + let mix_hash = buf.compress_bytes; - let value: H256 = { - // We can interpret the buffer as an array of `u8`s, since it's `repr(C)`. - let read_ptr: *const u8 = &buf as *const MixBuf as *const u8; - // We overwrite the second half since `keccak_256` has an internal buffer and so allows - // overlapping arrays as input. - let write_ptr: *mut u8 = &mut buf.compress_bytes as *mut [u8; 32] as *mut u8; - unsafe { - keccak_256::unchecked( - write_ptr, - buf.compress_bytes.len(), - read_ptr, - buf.half_mix.bytes.len() + buf.compress_bytes.len(), - ); - } - buf.compress_bytes - }; + let value: H256 = { + // We can interpret the buffer as an array of `u8`s, since it's `repr(C)`. + let read_ptr: *const u8 = &buf as *const MixBuf as *const u8; + // We overwrite the second half since `keccak_256` has an internal buffer and so allows + // overlapping arrays as input. + let write_ptr: *mut u8 = &mut buf.compress_bytes as *mut [u8; 32] as *mut u8; + unsafe { + keccak_256::unchecked( + write_ptr, + buf.compress_bytes.len(), + read_ptr, + buf.half_mix.bytes.len() + buf.compress_bytes.len(), + ); + } + buf.compress_bytes + }; - ProofOfWork { mix_hash: mix_hash, value: value } + ProofOfWork { + mix_hash: mix_hash, + value: value, + } } // TODO: Use the `simd` crate pub fn calculate_dag_item(node_index: u32, cache: &[Node]) -> Node { - let num_parent_nodes = cache.len(); - let mut ret = cache[node_index as usize % num_parent_nodes].clone(); - ret.as_words_mut()[0] ^= node_index; + let num_parent_nodes = cache.len(); + let mut ret = cache[node_index as usize % num_parent_nodes].clone(); + ret.as_words_mut()[0] ^= node_index; - keccak_512::inplace(ret.as_bytes_mut()); + keccak_512::inplace(ret.as_bytes_mut()); - debug_assert_eq!(NODE_WORDS, 16); - for i in 0..ETHASH_DATASET_PARENTS as u32 { - let parent_index = fnv_hash(node_index ^ i, ret.as_words()[i as usize % NODE_WORDS]) % - num_parent_nodes as u32; - let parent = &cache[parent_index as usize]; + debug_assert_eq!(NODE_WORDS, 16); + for i in 0..ETHASH_DATASET_PARENTS as u32 { + let parent_index = fnv_hash(node_index ^ i, ret.as_words()[i as usize % NODE_WORDS]) + % num_parent_nodes as u32; + let parent = &cache[parent_index as usize]; - unroll! { - for w in 0..16 { - ret.as_words_mut()[w] = fnv_hash(ret.as_words()[w], parent.as_words()[w]); - } - } - } + unroll! { + for w in 0..16 { + ret.as_words_mut()[w] = fnv_hash(ret.as_words()[w], parent.as_words()[w]); + } + } + } - keccak_512::inplace(ret.as_bytes_mut()); + keccak_512::inplace(ret.as_bytes_mut()); - ret + ret } #[cfg(test)] mod test { - use super::*; - use std::fs; - use tempdir::TempDir; + use super::*; + use std::fs; + use tempdir::TempDir; - #[test] - fn test_get_cache_size() { - // https://github.com/ethereum/wiki/wiki/Ethash/ef6b93f9596746a088ea95d01ca2778be43ae68f#data-sizes - assert_eq!(16776896usize, get_cache_size(0)); - assert_eq!(16776896usize, get_cache_size(1)); - assert_eq!(16776896usize, get_cache_size(ETHASH_EPOCH_LENGTH - 1)); - assert_eq!(16907456usize, get_cache_size(ETHASH_EPOCH_LENGTH)); - assert_eq!(16907456usize, get_cache_size(ETHASH_EPOCH_LENGTH + 1)); - assert_eq!(284950208usize, get_cache_size(2046 * ETHASH_EPOCH_LENGTH)); - assert_eq!(285081536usize, get_cache_size(2047 * ETHASH_EPOCH_LENGTH)); - assert_eq!(285081536usize, get_cache_size(2048 * ETHASH_EPOCH_LENGTH - 1)); - } + #[test] + fn test_get_cache_size() { + // https://github.com/ethereum/wiki/wiki/Ethash/ef6b93f9596746a088ea95d01ca2778be43ae68f#data-sizes + assert_eq!(16776896usize, get_cache_size(0)); + assert_eq!(16776896usize, get_cache_size(1)); + assert_eq!(16776896usize, get_cache_size(ETHASH_EPOCH_LENGTH - 1)); + assert_eq!(16907456usize, get_cache_size(ETHASH_EPOCH_LENGTH)); + assert_eq!(16907456usize, get_cache_size(ETHASH_EPOCH_LENGTH + 1)); + assert_eq!(284950208usize, get_cache_size(2046 * ETHASH_EPOCH_LENGTH)); + assert_eq!(285081536usize, get_cache_size(2047 * ETHASH_EPOCH_LENGTH)); + assert_eq!( + 285081536usize, + get_cache_size(2048 * ETHASH_EPOCH_LENGTH - 1) + ); + } - #[test] - fn test_get_data_size() { - // https://github.com/ethereum/wiki/wiki/Ethash/ef6b93f9596746a088ea95d01ca2778be43ae68f#data-sizes - assert_eq!(1073739904usize, get_data_size(0)); - assert_eq!(1073739904usize, get_data_size(1)); - assert_eq!(1073739904usize, get_data_size(ETHASH_EPOCH_LENGTH - 1)); - assert_eq!(1082130304usize, get_data_size(ETHASH_EPOCH_LENGTH)); - assert_eq!(1082130304usize, get_data_size(ETHASH_EPOCH_LENGTH + 1)); - assert_eq!(18236833408usize, get_data_size(2046 * ETHASH_EPOCH_LENGTH)); - assert_eq!(18245220736usize, get_data_size(2047 * ETHASH_EPOCH_LENGTH)); - } + #[test] + fn test_get_data_size() { + // https://github.com/ethereum/wiki/wiki/Ethash/ef6b93f9596746a088ea95d01ca2778be43ae68f#data-sizes + assert_eq!(1073739904usize, get_data_size(0)); + assert_eq!(1073739904usize, get_data_size(1)); + assert_eq!(1073739904usize, get_data_size(ETHASH_EPOCH_LENGTH - 1)); + assert_eq!(1082130304usize, get_data_size(ETHASH_EPOCH_LENGTH)); + assert_eq!(1082130304usize, get_data_size(ETHASH_EPOCH_LENGTH + 1)); + assert_eq!(18236833408usize, get_data_size(2046 * ETHASH_EPOCH_LENGTH)); + assert_eq!(18245220736usize, get_data_size(2047 * ETHASH_EPOCH_LENGTH)); + } - #[test] - fn test_difficulty_test() { - let hash = [ - 0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3, - 0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94, - 0x05, 0x52, 0x7d, 0x72, - ]; - let mix_hash = [ - 0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce, - 0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a, - 0x64, 0x31, 0xab, 0x6d, - ]; - let nonce = 0xd7b3ac70a301a249; - let boundary_good = [ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2, - 0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a, - 0xe9, 0x7e, 0x53, 0x84, - ]; - assert_eq!(quick_get_difficulty(&hash, nonce, &mix_hash, false)[..], boundary_good[..]); - let boundary_bad = [ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3a, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2, - 0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a, - 0xe9, 0x7e, 0x53, 0x84, - ]; - assert!(quick_get_difficulty(&hash, nonce, &mix_hash, false)[..] != boundary_bad[..]); - } + #[test] + fn test_difficulty_test() { + let hash = [ + 0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3, + 0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94, + 0x05, 0x52, 0x7d, 0x72, + ]; + let mix_hash = [ + 0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce, + 0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a, + 0x64, 0x31, 0xab, 0x6d, + ]; + let nonce = 0xd7b3ac70a301a249; + let boundary_good = [ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2, + 0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a, + 0xe9, 0x7e, 0x53, 0x84, + ]; + assert_eq!( + quick_get_difficulty(&hash, nonce, &mix_hash, false)[..], + boundary_good[..] + ); + let boundary_bad = [ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3a, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2, + 0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a, + 0xe9, 0x7e, 0x53, 0x84, + ]; + assert!(quick_get_difficulty(&hash, nonce, &mix_hash, false)[..] != boundary_bad[..]); + } - #[test] - fn test_light_compute() { - let hash = [ - 0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3, - 0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94, - 0x05, 0x52, 0x7d, 0x72, - ]; - let mix_hash = [ - 0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce, - 0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a, - 0x64, 0x31, 0xab, 0x6d, - ]; - let boundary = [ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2, - 0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a, - 0xe9, 0x7e, 0x53, 0x84, - ]; - let nonce = 0xd7b3ac70a301a249; + #[test] + fn test_light_compute() { + let hash = [ + 0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3, + 0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94, + 0x05, 0x52, 0x7d, 0x72, + ]; + let mix_hash = [ + 0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce, + 0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a, + 0x64, 0x31, 0xab, 0x6d, + ]; + let boundary = [ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2, + 0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a, + 0xe9, 0x7e, 0x53, 0x84, + ]; + let nonce = 0xd7b3ac70a301a249; - let tempdir = TempDir::new("").unwrap(); - // difficulty = 0x085657254bd9u64; - let light = NodeCacheBuilder::new(None, u64::max_value()).light(tempdir.path(), 486382); - let result = light_compute(&light, &hash, nonce); - assert_eq!(result.mix_hash[..], mix_hash[..]); - assert_eq!(result.value[..], boundary[..]); - } + let tempdir = TempDir::new("").unwrap(); + // difficulty = 0x085657254bd9u64; + let light = NodeCacheBuilder::new(None, u64::max_value()).light(tempdir.path(), 486382); + let result = light_compute(&light, &hash, nonce); + assert_eq!(result.mix_hash[..], mix_hash[..]); + assert_eq!(result.value[..], boundary[..]); + } - #[test] - fn test_drop_old_data() { - let tempdir = TempDir::new("").unwrap(); - let builder = NodeCacheBuilder::new(None, u64::max_value()); - let first = builder.light(tempdir.path(), 0).to_file().unwrap().to_owned(); + #[test] + fn test_drop_old_data() { + let tempdir = TempDir::new("").unwrap(); + let builder = NodeCacheBuilder::new(None, u64::max_value()); + let first = builder + .light(tempdir.path(), 0) + .to_file() + .unwrap() + .to_owned(); - let second = builder.light(tempdir.path(), ETHASH_EPOCH_LENGTH).to_file().unwrap().to_owned(); - assert!(fs::metadata(&first).is_ok()); + let second = builder + .light(tempdir.path(), ETHASH_EPOCH_LENGTH) + .to_file() + .unwrap() + .to_owned(); + assert!(fs::metadata(&first).is_ok()); - let _ = builder.light(tempdir.path(), ETHASH_EPOCH_LENGTH * 2).to_file(); - assert!(fs::metadata(&first).is_err()); - assert!(fs::metadata(&second).is_ok()); + let _ = builder + .light(tempdir.path(), ETHASH_EPOCH_LENGTH * 2) + .to_file(); + assert!(fs::metadata(&first).is_err()); + assert!(fs::metadata(&second).is_ok()); - let _ = builder.light(tempdir.path(), ETHASH_EPOCH_LENGTH * 3).to_file(); - assert!(fs::metadata(&second).is_err()); - } + let _ = builder + .light(tempdir.path(), ETHASH_EPOCH_LENGTH * 3) + .to_file(); + assert!(fs::metadata(&second).is_err()); + } } diff --git a/ethash/src/keccak.rs b/ethash/src/keccak.rs index 3f7576c7b..f9ee8716c 100644 --- a/ethash/src/keccak.rs +++ b/ethash/src/keccak.rs @@ -19,38 +19,48 @@ extern crate keccak_hash as hash; pub type H256 = [u8; 32]; pub mod keccak_512 { - use super::hash; + use super::hash; - pub use self::hash::keccak_512_unchecked as unchecked; + pub use self::hash::keccak_512_unchecked as unchecked; - pub fn write(input: &[u8], output: &mut [u8]) { - hash::keccak_512(input, output); - } + pub fn write(input: &[u8], output: &mut [u8]) { + hash::keccak_512(input, output); + } - pub fn inplace(input: &mut [u8]) { - // This is safe since `keccak_*` uses an internal buffer and copies the result to the output. This - // means that we can reuse the input buffer for both input and output. - unsafe { - hash::keccak_512_unchecked(input.as_mut_ptr(), input.len(), input.as_ptr(), input.len()); - } - } + pub fn inplace(input: &mut [u8]) { + // This is safe since `keccak_*` uses an internal buffer and copies the result to the output. This + // means that we can reuse the input buffer for both input and output. + unsafe { + hash::keccak_512_unchecked( + input.as_mut_ptr(), + input.len(), + input.as_ptr(), + input.len(), + ); + } + } } pub mod keccak_256 { - use super::hash; + use super::hash; - pub use self::hash::keccak_256_unchecked as unchecked; + pub use self::hash::keccak_256_unchecked as unchecked; - #[allow(dead_code)] - pub fn write(input: &[u8], output: &mut [u8]) { - hash::keccak_256(input, output); - } + #[allow(dead_code)] + pub fn write(input: &[u8], output: &mut [u8]) { + hash::keccak_256(input, output); + } - pub fn inplace(input: &mut [u8]) { - // This is safe since `keccak_*` uses an internal buffer and copies the result to the output. This - // means that we can reuse the input buffer for both input and output. - unsafe { - hash::keccak_256_unchecked(input.as_mut_ptr(), input.len(), input.as_ptr(), input.len()); - } - } + pub fn inplace(input: &mut [u8]) { + // This is safe since `keccak_*` uses an internal buffer and copies the result to the output. This + // means that we can reuse the input buffer for both input and output. + unsafe { + hash::keccak_256_unchecked( + input.as_mut_ptr(), + input.len(), + input.as_ptr(), + input.len(), + ); + } + } } diff --git a/ethash/src/lib.rs b/ethash/src/lib.rs index e40c08920..99c758632 100644 --- a/ethash/src/lib.rs +++ b/ethash/src/lib.rs @@ -39,9 +39,9 @@ pub mod compute; #[cfg(not(feature = "bench"))] mod compute; -mod seed_compute; mod cache; mod keccak; +mod seed_compute; mod shared; #[cfg(feature = "bench")] @@ -50,190 +50,221 @@ pub mod progpow; mod progpow; pub use cache::{NodeCacheBuilder, OptimizeFor}; -pub use compute::{ProofOfWork, quick_get_difficulty, slow_hash_block_number}; use compute::Light; +pub use compute::{quick_get_difficulty, slow_hash_block_number, ProofOfWork}; use ethereum_types::{U256, U512}; use keccak::H256; use parking_lot::Mutex; pub use seed_compute::SeedHashCompute; pub use shared::ETHASH_EPOCH_LENGTH; -use std::mem; -use std::path::{Path, PathBuf}; +use std::{ + mem, + path::{Path, PathBuf}, +}; use std::sync::Arc; struct LightCache { - recent_epoch: Option, - recent: Option>, - prev_epoch: Option, - prev: Option>, + recent_epoch: Option, + recent: Option>, + prev_epoch: Option, + prev: Option>, } /// Light/Full cache manager. pub struct EthashManager { - nodecache_builder: NodeCacheBuilder, - cache: Mutex, - cache_dir: PathBuf, - progpow_transition: u64, + nodecache_builder: NodeCacheBuilder, + cache: Mutex, + cache_dir: PathBuf, + progpow_transition: u64, } impl EthashManager { - /// Create a new new instance of ethash manager - pub fn new>>(cache_dir: &Path, optimize_for: T, progpow_transition: u64) -> EthashManager { - EthashManager { - cache_dir: cache_dir.to_path_buf(), - nodecache_builder: NodeCacheBuilder::new(optimize_for.into().unwrap_or_default(), progpow_transition), - progpow_transition: progpow_transition, - cache: Mutex::new(LightCache { - recent_epoch: None, - recent: None, - prev_epoch: None, - prev: None, - }), - } - } + /// Create a new new instance of ethash manager + pub fn new>>( + cache_dir: &Path, + optimize_for: T, + progpow_transition: u64, + ) -> EthashManager { + EthashManager { + cache_dir: cache_dir.to_path_buf(), + nodecache_builder: NodeCacheBuilder::new( + optimize_for.into().unwrap_or_default(), + progpow_transition, + ), + progpow_transition: progpow_transition, + cache: Mutex::new(LightCache { + recent_epoch: None, + recent: None, + prev_epoch: None, + prev: None, + }), + } + } - /// Calculate the light client data - /// `block_number` - Block number to check - /// `light` - The light client handler - /// `header_hash` - The header hash to pack into the mix - /// `nonce` - The nonce to pack into the mix - pub fn compute_light(&self, block_number: u64, header_hash: &H256, nonce: u64) -> ProofOfWork { - let epoch = block_number / ETHASH_EPOCH_LENGTH; - let light = { - let mut lights = self.cache.lock(); - let light = if block_number == self.progpow_transition { - // we need to regenerate the cache to trigger algorithm change to progpow inside `Light` - None - } else { - match lights.recent_epoch.clone() { - Some(ref e) if *e == epoch => lights.recent.clone(), - _ => match lights.prev_epoch.clone() { - Some(e) if e == epoch => { - // don't swap if recent is newer. - if lights.recent_epoch > lights.prev_epoch { - None - } else { - // swap - let t = lights.prev_epoch; - lights.prev_epoch = lights.recent_epoch; - lights.recent_epoch = t; - let t = lights.prev.clone(); - lights.prev = lights.recent.clone(); - lights.recent = t; - lights.recent.clone() - } - } - _ => None, - }, - } - }; + /// Calculate the light client data + /// `block_number` - Block number to check + /// `light` - The light client handler + /// `header_hash` - The header hash to pack into the mix + /// `nonce` - The nonce to pack into the mix + pub fn compute_light(&self, block_number: u64, header_hash: &H256, nonce: u64) -> ProofOfWork { + let epoch = block_number / ETHASH_EPOCH_LENGTH; + let light = { + let mut lights = self.cache.lock(); + let light = if block_number == self.progpow_transition { + // we need to regenerate the cache to trigger algorithm change to progpow inside `Light` + None + } else { + match lights.recent_epoch.clone() { + Some(ref e) if *e == epoch => lights.recent.clone(), + _ => match lights.prev_epoch.clone() { + Some(e) if e == epoch => { + // don't swap if recent is newer. + if lights.recent_epoch > lights.prev_epoch { + None + } else { + // swap + let t = lights.prev_epoch; + lights.prev_epoch = lights.recent_epoch; + lights.recent_epoch = t; + let t = lights.prev.clone(); + lights.prev = lights.recent.clone(); + lights.recent = t; + lights.recent.clone() + } + } + _ => None, + }, + } + }; - match light { - None => { - let light = match self.nodecache_builder.light_from_file( - &self.cache_dir, - block_number, - ) { - Ok(light) => Arc::new(light), - Err(e) => { - debug!("Light cache file not found for {}:{}", block_number, e); - let mut light = self.nodecache_builder.light( - &self.cache_dir, - block_number, - ); - if let Err(e) = light.to_file() { - warn!("Light cache file write error: {}", e); - } - Arc::new(light) - } - }; - lights.prev_epoch = mem::replace(&mut lights.recent_epoch, Some(epoch)); - lights.prev = mem::replace(&mut lights.recent, Some(light.clone())); - light - } - Some(light) => light, - } - }; - light.compute(header_hash, nonce, block_number) - } + match light { + None => { + let light = match self + .nodecache_builder + .light_from_file(&self.cache_dir, block_number) + { + Ok(light) => Arc::new(light), + Err(e) => { + debug!("Light cache file not found for {}:{}", block_number, e); + let mut light = + self.nodecache_builder.light(&self.cache_dir, block_number); + if let Err(e) = light.to_file() { + warn!("Light cache file write error: {}", e); + } + Arc::new(light) + } + }; + lights.prev_epoch = mem::replace(&mut lights.recent_epoch, Some(epoch)); + lights.prev = mem::replace(&mut lights.recent, Some(light.clone())); + light + } + Some(light) => light, + } + }; + light.compute(header_hash, nonce, block_number) + } } /// Convert an Ethash boundary to its original difficulty. Basically just `f(x) = 2^256 / x`. pub fn boundary_to_difficulty(boundary: ðereum_types::H256) -> U256 { - difficulty_to_boundary_aux(&**boundary) + difficulty_to_boundary_aux(&**boundary) } /// Convert an Ethash difficulty to the target boundary. Basically just `f(x) = 2^256 / x`. pub fn difficulty_to_boundary(difficulty: &U256) -> ethereum_types::H256 { - difficulty_to_boundary_aux(difficulty).into() + difficulty_to_boundary_aux(difficulty).into() } fn difficulty_to_boundary_aux>(difficulty: T) -> ethereum_types::U256 { - let difficulty = difficulty.into(); + let difficulty = difficulty.into(); - assert!(!difficulty.is_zero()); + assert!(!difficulty.is_zero()); - if difficulty == U512::one() { - U256::max_value() - } else { - // difficulty > 1, so result should never overflow 256 bits - U256::from((U512::one() << 256) / difficulty) - } + if difficulty == U512::one() { + U256::max_value() + } else { + // difficulty > 1, so result should never overflow 256 bits + U256::from((U512::one() << 256) / difficulty) + } } #[test] fn test_lru() { - use tempdir::TempDir; + use tempdir::TempDir; - let tempdir = TempDir::new("").unwrap(); - let ethash = EthashManager::new(tempdir.path(), None, u64::max_value()); - let hash = [0u8; 32]; - ethash.compute_light(1, &hash, 1); - ethash.compute_light(50000, &hash, 1); - assert_eq!(ethash.cache.lock().recent_epoch.unwrap(), 1); - assert_eq!(ethash.cache.lock().prev_epoch.unwrap(), 0); - ethash.compute_light(1, &hash, 1); - assert_eq!(ethash.cache.lock().recent_epoch.unwrap(), 0); - assert_eq!(ethash.cache.lock().prev_epoch.unwrap(), 1); - ethash.compute_light(70000, &hash, 1); - assert_eq!(ethash.cache.lock().recent_epoch.unwrap(), 2); - assert_eq!(ethash.cache.lock().prev_epoch.unwrap(), 0); + let tempdir = TempDir::new("").unwrap(); + let ethash = EthashManager::new(tempdir.path(), None, u64::max_value()); + let hash = [0u8; 32]; + ethash.compute_light(1, &hash, 1); + ethash.compute_light(50000, &hash, 1); + assert_eq!(ethash.cache.lock().recent_epoch.unwrap(), 1); + assert_eq!(ethash.cache.lock().prev_epoch.unwrap(), 0); + ethash.compute_light(1, &hash, 1); + assert_eq!(ethash.cache.lock().recent_epoch.unwrap(), 0); + assert_eq!(ethash.cache.lock().prev_epoch.unwrap(), 1); + ethash.compute_light(70000, &hash, 1); + assert_eq!(ethash.cache.lock().recent_epoch.unwrap(), 2); + assert_eq!(ethash.cache.lock().prev_epoch.unwrap(), 0); } #[test] fn test_difficulty_to_boundary() { - use ethereum_types::H256; - use std::str::FromStr; + use ethereum_types::H256; + use std::str::FromStr; - assert_eq!(difficulty_to_boundary(&U256::from(1)), H256::from(U256::max_value())); - assert_eq!(difficulty_to_boundary(&U256::from(2)), H256::from_str("8000000000000000000000000000000000000000000000000000000000000000").unwrap()); - assert_eq!(difficulty_to_boundary(&U256::from(4)), H256::from_str("4000000000000000000000000000000000000000000000000000000000000000").unwrap()); - assert_eq!(difficulty_to_boundary(&U256::from(32)), H256::from_str("0800000000000000000000000000000000000000000000000000000000000000").unwrap()); + assert_eq!( + difficulty_to_boundary(&U256::from(1)), + H256::from(U256::max_value()) + ); + assert_eq!( + difficulty_to_boundary(&U256::from(2)), + H256::from_str("8000000000000000000000000000000000000000000000000000000000000000").unwrap() + ); + assert_eq!( + difficulty_to_boundary(&U256::from(4)), + H256::from_str("4000000000000000000000000000000000000000000000000000000000000000").unwrap() + ); + assert_eq!( + difficulty_to_boundary(&U256::from(32)), + H256::from_str("0800000000000000000000000000000000000000000000000000000000000000").unwrap() + ); } #[test] fn test_difficulty_to_boundary_regression() { - use ethereum_types::H256; + use ethereum_types::H256; - // the last bit was originally being truncated when performing the conversion - // https://github.com/paritytech/parity-ethereum/issues/8397 - for difficulty in 1..9 { - assert_eq!(U256::from(difficulty), boundary_to_difficulty(&difficulty_to_boundary(&difficulty.into()))); - assert_eq!(H256::from(difficulty), difficulty_to_boundary(&boundary_to_difficulty(&difficulty.into()))); - assert_eq!(U256::from(difficulty), boundary_to_difficulty(&boundary_to_difficulty(&difficulty.into()).into())); - assert_eq!(H256::from(difficulty), difficulty_to_boundary(&difficulty_to_boundary(&difficulty.into()).into())); - } + // the last bit was originally being truncated when performing the conversion + // https://github.com/paritytech/parity-ethereum/issues/8397 + for difficulty in 1..9 { + assert_eq!( + U256::from(difficulty), + boundary_to_difficulty(&difficulty_to_boundary(&difficulty.into())) + ); + assert_eq!( + H256::from(difficulty), + difficulty_to_boundary(&boundary_to_difficulty(&difficulty.into())) + ); + assert_eq!( + U256::from(difficulty), + boundary_to_difficulty(&boundary_to_difficulty(&difficulty.into()).into()) + ); + assert_eq!( + H256::from(difficulty), + difficulty_to_boundary(&difficulty_to_boundary(&difficulty.into()).into()) + ); + } } #[test] #[should_panic] fn test_difficulty_to_boundary_panics_on_zero() { - difficulty_to_boundary(&U256::from(0)); + difficulty_to_boundary(&U256::from(0)); } #[test] #[should_panic] fn test_boundary_to_difficulty_panics_on_zero() { - boundary_to_difficulty(ðereum_types::H256::from(0)); + boundary_to_difficulty(ðereum_types::H256::from(0)); } diff --git a/ethash/src/progpow.rs b/ethash/src/progpow.rs index 038f38c22..137269a02 100644 --- a/ethash/src/progpow.rs +++ b/ethash/src/progpow.rs @@ -14,9 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use compute::{FNV_PRIME, calculate_dag_item}; +use compute::{calculate_dag_item, FNV_PRIME}; use keccak::H256; -use shared::{ETHASH_ACCESSES, ETHASH_MIX_BYTES, Node, get_data_size}; +use shared::{get_data_size, Node, ETHASH_ACCESSES, ETHASH_MIX_BYTES}; const PROGPOW_CACHE_BYTES: usize = 16 * 1024; const PROGPOW_CACHE_WORDS: usize = PROGPOW_CACHE_BYTES / 4; @@ -32,564 +32,551 @@ const PROGPOW_REGS: usize = 32; const FNV_HASH: u32 = 0x811c9dc5; const KECCAKF_RNDC: [u32; 24] = [ - 0x00000001, 0x00008082, 0x0000808a, 0x80008000, 0x0000808b, 0x80000001, - 0x80008081, 0x00008009, 0x0000008a, 0x00000088, 0x80008009, 0x8000000a, - 0x8000808b, 0x0000008b, 0x00008089, 0x00008003, 0x00008002, 0x00000080, - 0x0000800a, 0x8000000a, 0x80008081, 0x00008080, 0x80000001, 0x80008008 + 0x00000001, 0x00008082, 0x0000808a, 0x80008000, 0x0000808b, 0x80000001, 0x80008081, 0x00008009, + 0x0000008a, 0x00000088, 0x80008009, 0x8000000a, 0x8000808b, 0x0000008b, 0x00008089, 0x00008003, + 0x00008002, 0x00000080, 0x0000800a, 0x8000000a, 0x80008081, 0x00008080, 0x80000001, 0x80008008, ]; const KECCAKF_ROTC: [u32; 24] = [ - 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, - 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44 + 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44, ]; const KECCAKF_PILN: [usize; 24] = [ - 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, - 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1 + 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1, ]; fn keccak_f800_round(st: &mut [u32; 25], r: usize) { - // Theta - let mut bc = [0u32; 5]; - for i in 0..bc.len() { - bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20]; - } + // Theta + let mut bc = [0u32; 5]; + for i in 0..bc.len() { + bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20]; + } - for i in 0..bc.len() { - let t = bc[(i + 4) % 5] ^ bc[(i + 1) % 5].rotate_left(1); - for j in (0..st.len()).step_by(5) { - st[j + i] ^= t; - } - } + for i in 0..bc.len() { + let t = bc[(i + 4) % 5] ^ bc[(i + 1) % 5].rotate_left(1); + for j in (0..st.len()).step_by(5) { + st[j + i] ^= t; + } + } - // Rho Pi - let mut t = st[1]; + // Rho Pi + let mut t = st[1]; - debug_assert_eq!(KECCAKF_ROTC.len(), 24); - for i in 0..24 { - let j = KECCAKF_PILN[i]; - bc[0] = st[j]; - st[j] = t.rotate_left(KECCAKF_ROTC[i]); - t = bc[0]; - } + debug_assert_eq!(KECCAKF_ROTC.len(), 24); + for i in 0..24 { + let j = KECCAKF_PILN[i]; + bc[0] = st[j]; + st[j] = t.rotate_left(KECCAKF_ROTC[i]); + t = bc[0]; + } - // Chi - for j in (0..st.len()).step_by(5) { - for i in 0..bc.len() { - bc[i] = st[j + i]; - } - for i in 0..bc.len() { - st[j + i] ^= (!bc[(i + 1) % 5]) & bc[(i + 2) % 5]; - } - } + // Chi + for j in (0..st.len()).step_by(5) { + for i in 0..bc.len() { + bc[i] = st[j + i]; + } + for i in 0..bc.len() { + st[j + i] ^= (!bc[(i + 1) % 5]) & bc[(i + 2) % 5]; + } + } - // Iota - debug_assert!(r < KECCAKF_RNDC.len()); - st[0] ^= KECCAKF_RNDC[r]; + // Iota + debug_assert!(r < KECCAKF_RNDC.len()); + st[0] ^= KECCAKF_RNDC[r]; } fn keccak_f800(header_hash: H256, nonce: u64, result: [u32; 8], st: &mut [u32; 25]) { - for i in 0..8 { - st[i] = (header_hash[4 * i] as u32) + - ((header_hash[4 * i + 1] as u32) << 8) + - ((header_hash[4 * i + 2] as u32) << 16) + - ((header_hash[4 * i + 3] as u32) << 24); - } + for i in 0..8 { + st[i] = (header_hash[4 * i] as u32) + + ((header_hash[4 * i + 1] as u32) << 8) + + ((header_hash[4 * i + 2] as u32) << 16) + + ((header_hash[4 * i + 3] as u32) << 24); + } - st[8] = nonce as u32; - st[9] = (nonce >> 32) as u32; + st[8] = nonce as u32; + st[9] = (nonce >> 32) as u32; - for i in 0..8 { - st[10 + i] = result[i]; - } + for i in 0..8 { + st[10 + i] = result[i]; + } - for r in 0..22 { - keccak_f800_round(st, r); - } + for r in 0..22 { + keccak_f800_round(st, r); + } } pub fn keccak_f800_short(header_hash: H256, nonce: u64, result: [u32; 8]) -> u64 { - let mut st = [0u32; 25]; - keccak_f800(header_hash, nonce, result, &mut st); - (st[0].swap_bytes() as u64) << 32 | st[1].swap_bytes() as u64 + let mut st = [0u32; 25]; + keccak_f800(header_hash, nonce, result, &mut st); + (st[0].swap_bytes() as u64) << 32 | st[1].swap_bytes() as u64 } pub fn keccak_f800_long(header_hash: H256, nonce: u64, result: [u32; 8]) -> H256 { - let mut st = [0u32; 25]; - keccak_f800(header_hash, nonce, result, &mut st); + let mut st = [0u32; 25]; + keccak_f800(header_hash, nonce, result, &mut st); - // NOTE: transmute from `[u32; 8]` to `[u8; 32]` - unsafe { - std::mem::transmute( - [st[0], st[1], st[2], st[3], st[4], st[5], st[6], st[7]] - ) - } + // NOTE: transmute from `[u32; 8]` to `[u8; 32]` + unsafe { std::mem::transmute([st[0], st[1], st[2], st[3], st[4], st[5], st[6], st[7]]) } } #[inline] fn fnv1a_hash(h: u32, d: u32) -> u32 { - (h ^ d).wrapping_mul(FNV_PRIME) + (h ^ d).wrapping_mul(FNV_PRIME) } #[derive(Clone)] struct Kiss99 { - z: u32, - w: u32, - jsr: u32, - jcong: u32, + z: u32, + w: u32, + jsr: u32, + jcong: u32, } impl Kiss99 { - fn new(z: u32, w: u32, jsr: u32, jcong: u32) -> Kiss99 { - Kiss99 { z, w, jsr, jcong } - } + fn new(z: u32, w: u32, jsr: u32, jcong: u32) -> Kiss99 { + Kiss99 { z, w, jsr, jcong } + } - #[inline] - fn next_u32(&mut self) -> u32 { - self.z = 36969u32.wrapping_mul(self.z & 65535).wrapping_add(self.z >> 16); - self.w = 18000u32.wrapping_mul(self.w & 65535).wrapping_add(self.w >> 16); - let mwc = (self.z << 16).wrapping_add(self.w); - self.jsr ^= self.jsr << 17; - self.jsr ^= self.jsr >> 13; - self.jsr ^= self.jsr << 5; - self.jcong = 69069u32.wrapping_mul(self.jcong).wrapping_add(1234567); + #[inline] + fn next_u32(&mut self) -> u32 { + self.z = 36969u32 + .wrapping_mul(self.z & 65535) + .wrapping_add(self.z >> 16); + self.w = 18000u32 + .wrapping_mul(self.w & 65535) + .wrapping_add(self.w >> 16); + let mwc = (self.z << 16).wrapping_add(self.w); + self.jsr ^= self.jsr << 17; + self.jsr ^= self.jsr >> 13; + self.jsr ^= self.jsr << 5; + self.jcong = 69069u32.wrapping_mul(self.jcong).wrapping_add(1234567); - (mwc ^ self.jcong).wrapping_add(self.jsr) - } + (mwc ^ self.jcong).wrapping_add(self.jsr) + } } fn fill_mix(seed: u64, lane_id: u32) -> [u32; PROGPOW_REGS] { - // Use FNV to expand the per-warp seed to per-lane - // Use KISS to expand the per-lane seed to fill mix - let z = fnv1a_hash(FNV_HASH, seed as u32); - let w = fnv1a_hash(z, (seed >> 32) as u32); - let jsr = fnv1a_hash(w, lane_id); - let jcong = fnv1a_hash(jsr, lane_id); + // Use FNV to expand the per-warp seed to per-lane + // Use KISS to expand the per-lane seed to fill mix + let z = fnv1a_hash(FNV_HASH, seed as u32); + let w = fnv1a_hash(z, (seed >> 32) as u32); + let jsr = fnv1a_hash(w, lane_id); + let jcong = fnv1a_hash(jsr, lane_id); - let mut rnd = Kiss99::new(z, w, jsr, jcong); + let mut rnd = Kiss99::new(z, w, jsr, jcong); - let mut mix = [0; PROGPOW_REGS]; + let mut mix = [0; PROGPOW_REGS]; - debug_assert_eq!(PROGPOW_REGS, 32); - for i in 0..32 { - mix[i] = rnd.next_u32(); - } + debug_assert_eq!(PROGPOW_REGS, 32); + for i in 0..32 { + mix[i] = rnd.next_u32(); + } - mix + mix } // Merge new data from b into the value in a. Assuming A has high entropy only // do ops that retain entropy even if B is low entropy (IE don't do A&B) fn merge(a: u32, b: u32, r: u32) -> u32 { - match r % 4 { - 0 => a.wrapping_mul(33).wrapping_add(b), - 1 => (a ^ b).wrapping_mul(33), - 2 => a.rotate_left(((r >> 16) % 31) + 1) ^ b, - _ => a.rotate_right(((r >> 16) % 31) + 1) ^ b, - } + match r % 4 { + 0 => a.wrapping_mul(33).wrapping_add(b), + 1 => (a ^ b).wrapping_mul(33), + 2 => a.rotate_left(((r >> 16) % 31) + 1) ^ b, + _ => a.rotate_right(((r >> 16) % 31) + 1) ^ b, + } } fn math(a: u32, b: u32, r: u32) -> u32 { - match r % 11 { - 0 => a.wrapping_add(b), - 1 => a.wrapping_mul(b), - 2 => ((a as u64).wrapping_mul(b as u64) >> 32) as u32, - 3 => a.min(b), - 4 => a.rotate_left(b), - 5 => a.rotate_right(b), - 6 => a & b, - 7 => a | b, - 8 => a ^ b, - 9 => a.leading_zeros() + b.leading_zeros(), - _ => a.count_ones() + b.count_ones(), - } + match r % 11 { + 0 => a.wrapping_add(b), + 1 => a.wrapping_mul(b), + 2 => ((a as u64).wrapping_mul(b as u64) >> 32) as u32, + 3 => a.min(b), + 4 => a.rotate_left(b), + 5 => a.rotate_right(b), + 6 => a & b, + 7 => a | b, + 8 => a ^ b, + 9 => a.leading_zeros() + b.leading_zeros(), + _ => a.count_ones() + b.count_ones(), + } } fn progpow_init(seed: u64) -> (Kiss99, [u32; PROGPOW_REGS], [u32; PROGPOW_REGS]) { - let z = fnv1a_hash(FNV_HASH, seed as u32); - let w = fnv1a_hash(z, (seed >> 32) as u32); - let jsr = fnv1a_hash(w, seed as u32); - let jcong = fnv1a_hash(jsr, (seed >> 32) as u32); + let z = fnv1a_hash(FNV_HASH, seed as u32); + let w = fnv1a_hash(z, (seed >> 32) as u32); + let jsr = fnv1a_hash(w, seed as u32); + let jcong = fnv1a_hash(jsr, (seed >> 32) as u32); - let mut rnd = Kiss99::new(z, w, jsr, jcong); + let mut rnd = Kiss99::new(z, w, jsr, jcong); - // Create a random sequence of mix destinations for merge() and mix sources - // for cache reads guarantees every destination merged once and guarantees - // no duplicate cache reads, which could be optimized away. Uses - // Fisher-Yates shuffle. - let mut mix_seq_dst = [0u32; PROGPOW_REGS]; - let mut mix_seq_cache = [0u32; PROGPOW_REGS]; - for i in 0..mix_seq_dst.len() { - mix_seq_dst[i] = i as u32; - mix_seq_cache[i] = i as u32; - } + // Create a random sequence of mix destinations for merge() and mix sources + // for cache reads guarantees every destination merged once and guarantees + // no duplicate cache reads, which could be optimized away. Uses + // Fisher-Yates shuffle. + let mut mix_seq_dst = [0u32; PROGPOW_REGS]; + let mut mix_seq_cache = [0u32; PROGPOW_REGS]; + for i in 0..mix_seq_dst.len() { + mix_seq_dst[i] = i as u32; + mix_seq_cache[i] = i as u32; + } - for i in (1..mix_seq_dst.len()).rev() { - let j = rnd.next_u32() as usize % (i + 1); - mix_seq_dst.swap(i, j); + for i in (1..mix_seq_dst.len()).rev() { + let j = rnd.next_u32() as usize % (i + 1); + mix_seq_dst.swap(i, j); - let j = rnd.next_u32() as usize % (i + 1); - mix_seq_cache.swap(i, j); - } + let j = rnd.next_u32() as usize % (i + 1); + mix_seq_cache.swap(i, j); + } - (rnd, mix_seq_dst, mix_seq_cache) + (rnd, mix_seq_dst, mix_seq_cache) } pub type CDag = [u32; PROGPOW_CACHE_WORDS]; fn progpow_loop( - seed: u64, - loop_: usize, - mix: &mut [[u32; PROGPOW_REGS]; PROGPOW_LANES], - cache: &[Node], - c_dag: &CDag, - data_size: usize, + seed: u64, + loop_: usize, + mix: &mut [[u32; PROGPOW_REGS]; PROGPOW_LANES], + cache: &[Node], + c_dag: &CDag, + data_size: usize, ) { - // All lanes share a base address for the global load. Global offset uses - // mix[0] to guarantee it depends on the load result. - let g_offset = mix[loop_ % PROGPOW_LANES][0] as usize % - (64 * data_size / (PROGPOW_LANES * PROGPOW_DAG_LOADS)); + // All lanes share a base address for the global load. Global offset uses + // mix[0] to guarantee it depends on the load result. + let g_offset = mix[loop_ % PROGPOW_LANES][0] as usize + % (64 * data_size / (PROGPOW_LANES * PROGPOW_DAG_LOADS)); - // 256 bytes of dag data - let mut dag_item = [0u32; 64]; + // 256 bytes of dag data + let mut dag_item = [0u32; 64]; - // Fetch DAG nodes (64 bytes each) - for l in 0..PROGPOW_DAG_LOADS { - let index = g_offset * PROGPOW_LANES * PROGPOW_DAG_LOADS + l * 16; - let node = calculate_dag_item(index as u32 / 16, cache); - dag_item[l * 16..(l + 1) * 16].clone_from_slice(node.as_words()); - } + // Fetch DAG nodes (64 bytes each) + for l in 0..PROGPOW_DAG_LOADS { + let index = g_offset * PROGPOW_LANES * PROGPOW_DAG_LOADS + l * 16; + let node = calculate_dag_item(index as u32 / 16, cache); + dag_item[l * 16..(l + 1) * 16].clone_from_slice(node.as_words()); + } - let (rnd, mix_seq_dst, mix_seq_cache) = progpow_init(seed); + let (rnd, mix_seq_dst, mix_seq_cache) = progpow_init(seed); - // Lanes can execute in parallel and will be convergent - for l in 0..mix.len() { - let mut rnd = rnd.clone(); + // Lanes can execute in parallel and will be convergent + for l in 0..mix.len() { + let mut rnd = rnd.clone(); - // Initialize the seed and mix destination sequence - let mut mix_seq_dst_cnt = 0; - let mut mix_seq_cache_cnt = 0; + // Initialize the seed and mix destination sequence + let mut mix_seq_dst_cnt = 0; + let mut mix_seq_cache_cnt = 0; - let mut mix_dst = || { - let res = mix_seq_dst[mix_seq_dst_cnt % PROGPOW_REGS] as usize; - mix_seq_dst_cnt += 1; - res - }; - let mut mix_cache = || { - let res = mix_seq_cache[mix_seq_cache_cnt % PROGPOW_REGS] as usize; - mix_seq_cache_cnt += 1; - res - }; + let mut mix_dst = || { + let res = mix_seq_dst[mix_seq_dst_cnt % PROGPOW_REGS] as usize; + mix_seq_dst_cnt += 1; + res + }; + let mut mix_cache = || { + let res = mix_seq_cache[mix_seq_cache_cnt % PROGPOW_REGS] as usize; + mix_seq_cache_cnt += 1; + res + }; - for i in 0..PROGPOW_CNT_CACHE.max(PROGPOW_CNT_MATH) { - if i < PROGPOW_CNT_CACHE { - // Cached memory access, lanes access random 32-bit locations - // within the first portion of the DAG - let offset = mix[l][mix_cache()] as usize % PROGPOW_CACHE_WORDS; - let data = c_dag[offset]; - let dst = mix_dst(); + for i in 0..PROGPOW_CNT_CACHE.max(PROGPOW_CNT_MATH) { + if i < PROGPOW_CNT_CACHE { + // Cached memory access, lanes access random 32-bit locations + // within the first portion of the DAG + let offset = mix[l][mix_cache()] as usize % PROGPOW_CACHE_WORDS; + let data = c_dag[offset]; + let dst = mix_dst(); - mix[l][dst] = merge(mix[l][dst], data, rnd.next_u32()); - } + mix[l][dst] = merge(mix[l][dst], data, rnd.next_u32()); + } - if i < PROGPOW_CNT_MATH { - // Random math - // Generate 2 unique sources - let src_rnd = rnd.next_u32() % (PROGPOW_REGS * (PROGPOW_REGS - 1)) as u32; - let src1 = src_rnd % PROGPOW_REGS as u32; // 0 <= src1 < PROGPOW_REGS - let mut src2 = src_rnd / PROGPOW_REGS as u32; // 0 <= src2 < PROGPOW_REGS - 1 - if src2 >= src1 { - src2 += 1; // src2 is now any reg other than src1 - } + if i < PROGPOW_CNT_MATH { + // Random math + // Generate 2 unique sources + let src_rnd = rnd.next_u32() % (PROGPOW_REGS * (PROGPOW_REGS - 1)) as u32; + let src1 = src_rnd % PROGPOW_REGS as u32; // 0 <= src1 < PROGPOW_REGS + let mut src2 = src_rnd / PROGPOW_REGS as u32; // 0 <= src2 < PROGPOW_REGS - 1 + if src2 >= src1 { + src2 += 1; // src2 is now any reg other than src1 + } - let data = math(mix[l][src1 as usize], mix[l][src2 as usize], rnd.next_u32()); - let dst = mix_dst(); + let data = math(mix[l][src1 as usize], mix[l][src2 as usize], rnd.next_u32()); + let dst = mix_dst(); - mix[l][dst] = merge(mix[l][dst], data, rnd.next_u32()); - } - } + mix[l][dst] = merge(mix[l][dst], data, rnd.next_u32()); + } + } - // Global load to sequential locations - let mut data_g = [0u32; PROGPOW_DAG_LOADS]; - let index = ((l ^ loop_) % PROGPOW_LANES) * PROGPOW_DAG_LOADS; - for i in 0..PROGPOW_DAG_LOADS { - data_g[i] = dag_item[index + i]; - } + // Global load to sequential locations + let mut data_g = [0u32; PROGPOW_DAG_LOADS]; + let index = ((l ^ loop_) % PROGPOW_LANES) * PROGPOW_DAG_LOADS; + for i in 0..PROGPOW_DAG_LOADS { + data_g[i] = dag_item[index + i]; + } - // Consume the global load data at the very end of the loop to allow - // full latency hiding. Always merge into `mix[0]` to feed the offset - // calculation. - mix[l][0] = merge(mix[l][0], data_g[0], rnd.next_u32()); - for i in 1..PROGPOW_DAG_LOADS { - let dst = mix_dst(); - mix[l][dst] = merge(mix[l][dst], data_g[i], rnd.next_u32()); - } - } + // Consume the global load data at the very end of the loop to allow + // full latency hiding. Always merge into `mix[0]` to feed the offset + // calculation. + mix[l][0] = merge(mix[l][0], data_g[0], rnd.next_u32()); + for i in 1..PROGPOW_DAG_LOADS { + let dst = mix_dst(); + mix[l][dst] = merge(mix[l][dst], data_g[i], rnd.next_u32()); + } + } } pub fn progpow( - header_hash: H256, - nonce: u64, - block_number: u64, - cache: &[Node], - c_dag: &CDag, + header_hash: H256, + nonce: u64, + block_number: u64, + cache: &[Node], + c_dag: &CDag, ) -> (H256, H256) { - let mut mix = [[0u32; PROGPOW_REGS]; PROGPOW_LANES]; - let mut lane_results = [0u32; PROGPOW_LANES]; - let mut result = [0u32; 8]; + let mut mix = [[0u32; PROGPOW_REGS]; PROGPOW_LANES]; + let mut lane_results = [0u32; PROGPOW_LANES]; + let mut result = [0u32; 8]; - let data_size = get_data_size(block_number) / PROGPOW_MIX_BYTES; + let data_size = get_data_size(block_number) / PROGPOW_MIX_BYTES; - // NOTE: This assert is required to aid the optimizer elide the non-zero - // remainder check in `progpow_loop`. - assert!(data_size > 0); + // NOTE: This assert is required to aid the optimizer elide the non-zero + // remainder check in `progpow_loop`. + assert!(data_size > 0); - // Initialize mix for all lanes - let seed = keccak_f800_short(header_hash, nonce, result); + // Initialize mix for all lanes + let seed = keccak_f800_short(header_hash, nonce, result); - for l in 0..mix.len() { - mix[l] = fill_mix(seed, l as u32); - } + for l in 0..mix.len() { + mix[l] = fill_mix(seed, l as u32); + } - // Execute the randomly generated inner loop - let period = block_number / PROGPOW_PERIOD_LENGTH as u64; - for i in 0..PROGPOW_CNT_DAG { - progpow_loop( - period, - i, - &mut mix, - cache, - c_dag, - data_size, - ); - } + // Execute the randomly generated inner loop + let period = block_number / PROGPOW_PERIOD_LENGTH as u64; + for i in 0..PROGPOW_CNT_DAG { + progpow_loop(period, i, &mut mix, cache, c_dag, data_size); + } - // Reduce mix data to a single per-lane result - for l in 0..lane_results.len() { - lane_results[l] = FNV_HASH; - for i in 0..PROGPOW_REGS { - lane_results[l] = fnv1a_hash(lane_results[l], mix[l][i]); - } - } + // Reduce mix data to a single per-lane result + for l in 0..lane_results.len() { + lane_results[l] = FNV_HASH; + for i in 0..PROGPOW_REGS { + lane_results[l] = fnv1a_hash(lane_results[l], mix[l][i]); + } + } - // Reduce all lanes to a single 128-bit result - result = [FNV_HASH; 8]; - for l in 0..PROGPOW_LANES { - result[l % 8] = fnv1a_hash(result[l % 8], lane_results[l]); - } + // Reduce all lanes to a single 128-bit result + result = [FNV_HASH; 8]; + for l in 0..PROGPOW_LANES { + result[l % 8] = fnv1a_hash(result[l % 8], lane_results[l]); + } - let digest = keccak_f800_long(header_hash, seed, result); + let digest = keccak_f800_long(header_hash, seed, result); - // NOTE: transmute from `[u32; 8]` to `[u8; 32]` - let result = unsafe { ::std::mem::transmute(result) }; + // NOTE: transmute from `[u32; 8]` to `[u8; 32]` + let result = unsafe { ::std::mem::transmute(result) }; - (digest, result) + (digest, result) } pub fn generate_cdag(cache: &[Node]) -> CDag { - let mut c_dag = [0u32; PROGPOW_CACHE_WORDS]; + let mut c_dag = [0u32; PROGPOW_CACHE_WORDS]; - for i in 0..PROGPOW_CACHE_WORDS / 16 { - let node = calculate_dag_item(i as u32, cache); - for j in 0..16 { - c_dag[i * 16 + j] = node.as_words()[j]; - } - } + for i in 0..PROGPOW_CACHE_WORDS / 16 { + let node = calculate_dag_item(i as u32, cache); + for j in 0..16 { + c_dag[i * 16 + j] = node.as_words()[j]; + } + } - c_dag + c_dag } #[cfg(test)] mod test { - use tempdir::TempDir; + use tempdir::TempDir; - use cache::{NodeCacheBuilder, OptimizeFor}; - use keccak::H256; - use rustc_hex::FromHex; - use serde_json::{self, Value}; - use std::collections::VecDeque; - use super::*; + use super::*; + use cache::{NodeCacheBuilder, OptimizeFor}; + use keccak::H256; + use rustc_hex::FromHex; + use serde_json::{self, Value}; + use std::collections::VecDeque; - fn h256(hex: &str) -> H256 { - let bytes = FromHex::from_hex(hex).unwrap(); - let mut res = [0; 32]; - res.copy_from_slice(&bytes); - res - } + fn h256(hex: &str) -> H256 { + let bytes = FromHex::from_hex(hex).unwrap(); + let mut res = [0; 32]; + res.copy_from_slice(&bytes); + res + } - #[test] - fn test_cdag() { - let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value()); - let tempdir = TempDir::new("").unwrap(); - let cache = builder.new_cache(tempdir.into_path(), 0); + #[test] + fn test_cdag() { + let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value()); + let tempdir = TempDir::new("").unwrap(); + let cache = builder.new_cache(tempdir.into_path(), 0); - let c_dag = generate_cdag(cache.as_ref()); + let c_dag = generate_cdag(cache.as_ref()); - let expected = vec![ - 690150178u32, 1181503948, 2248155602, 2118233073, 2193871115, - 1791778428, 1067701239, 724807309, 530799275, 3480325829, 3899029234, - 1998124059, 2541974622, 1100859971, 1297211151, 3268320000, 2217813733, - 2690422980, 3172863319, 2651064309 - ]; + let expected = vec![ + 690150178u32, + 1181503948, + 2248155602, + 2118233073, + 2193871115, + 1791778428, + 1067701239, + 724807309, + 530799275, + 3480325829, + 3899029234, + 1998124059, + 2541974622, + 1100859971, + 1297211151, + 3268320000, + 2217813733, + 2690422980, + 3172863319, + 2651064309, + ]; - assert_eq!( - c_dag.iter().take(20).cloned().collect::>(), - expected, - ); - } + assert_eq!(c_dag.iter().take(20).cloned().collect::>(), expected,); + } - #[test] - fn test_random_merge() { - let tests = [ - (1000000u32, 101u32, 33000101u32), - (2000000, 102, 66003366), - (3000000, 103, 6000103), - (4000000, 104, 2000104), - (1000000, 0, 33000000), - (2000000, 0, 66000000), - (3000000, 0, 6000000), - (4000000, 0, 2000000), - ]; + #[test] + fn test_random_merge() { + let tests = [ + (1000000u32, 101u32, 33000101u32), + (2000000, 102, 66003366), + (3000000, 103, 6000103), + (4000000, 104, 2000104), + (1000000, 0, 33000000), + (2000000, 0, 66000000), + (3000000, 0, 6000000), + (4000000, 0, 2000000), + ]; - for (i, &(a, b, expected)) in tests.iter().enumerate() { - assert_eq!( - merge(a, b, i as u32), - expected, - ); - } - } + for (i, &(a, b, expected)) in tests.iter().enumerate() { + assert_eq!(merge(a, b, i as u32), expected,); + } + } - #[test] - fn test_random_math() { - let tests = [ - (20u32, 22u32, 42u32), - (70000, 80000, 1305032704), - (70000, 80000, 1), - (1, 2, 1), - (3, 10000, 196608), - (3, 0, 3), - (3, 6, 2), - (3, 6, 7), - (3, 6, 5), - (0, 0xffffffff, 32), - (3 << 13, 1 << 5, 3), - (22, 20, 42), - (80000, 70000, 1305032704), - (80000, 70000, 1), - (2, 1, 1), - (10000, 3, 80000), - (0, 3, 0), - (6, 3, 2), - (6, 3, 7), - (6, 3, 5), - (0, 0xffffffff, 32), - (3 << 13, 1 << 5, 3), - ]; + #[test] + fn test_random_math() { + let tests = [ + (20u32, 22u32, 42u32), + (70000, 80000, 1305032704), + (70000, 80000, 1), + (1, 2, 1), + (3, 10000, 196608), + (3, 0, 3), + (3, 6, 2), + (3, 6, 7), + (3, 6, 5), + (0, 0xffffffff, 32), + (3 << 13, 1 << 5, 3), + (22, 20, 42), + (80000, 70000, 1305032704), + (80000, 70000, 1), + (2, 1, 1), + (10000, 3, 80000), + (0, 3, 0), + (6, 3, 2), + (6, 3, 7), + (6, 3, 5), + (0, 0xffffffff, 32), + (3 << 13, 1 << 5, 3), + ]; - for (i, &(a, b, expected)) in tests.iter().enumerate() { - assert_eq!( - math(a, b, i as u32), - expected, - ); - } - } + for (i, &(a, b, expected)) in tests.iter().enumerate() { + assert_eq!(math(a, b, i as u32), expected,); + } + } - #[test] - fn test_keccak_256() { - let expected = "5dd431e5fbc604f499bfa0232f45f8f142d0ff5178f539e5a7800bf0643697af"; - assert_eq!( - keccak_f800_long([0; 32], 0, [0; 8]), - h256(expected), - ); - } + #[test] + fn test_keccak_256() { + let expected = "5dd431e5fbc604f499bfa0232f45f8f142d0ff5178f539e5a7800bf0643697af"; + assert_eq!(keccak_f800_long([0; 32], 0, [0; 8]), h256(expected),); + } - #[test] - fn test_keccak_64() { - let expected: u64 = 0x5dd431e5fbc604f4; - assert_eq!( - keccak_f800_short([0; 32], 0, [0; 8]), - expected, - ); - } + #[test] + fn test_keccak_64() { + let expected: u64 = 0x5dd431e5fbc604f4; + assert_eq!(keccak_f800_short([0; 32], 0, [0; 8]), expected,); + } - #[test] - fn test_progpow_hash() { - let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value()); - let tempdir = TempDir::new("").unwrap(); - let cache = builder.new_cache(tempdir.into_path(), 0); - let c_dag = generate_cdag(cache.as_ref()); + #[test] + fn test_progpow_hash() { + let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value()); + let tempdir = TempDir::new("").unwrap(); + let cache = builder.new_cache(tempdir.into_path(), 0); + let c_dag = generate_cdag(cache.as_ref()); - let header_hash = [0; 32]; + let header_hash = [0; 32]; - let (digest, result) = progpow( - header_hash, - 0, - 0, - cache.as_ref(), - &c_dag, - ); + let (digest, result) = progpow(header_hash, 0, 0, cache.as_ref(), &c_dag); - let expected_digest = FromHex::from_hex("63155f732f2bf556967f906155b510c917e48e99685ead76ea83f4eca03ab12b").unwrap(); - let expected_result = FromHex::from_hex("faeb1be51075b03a4ff44b335067951ead07a3b078539ace76fd56fc410557a3").unwrap(); + let expected_digest = + FromHex::from_hex("63155f732f2bf556967f906155b510c917e48e99685ead76ea83f4eca03ab12b") + .unwrap(); + let expected_result = + FromHex::from_hex("faeb1be51075b03a4ff44b335067951ead07a3b078539ace76fd56fc410557a3") + .unwrap(); - assert_eq!( - digest.to_vec(), - expected_digest, - ); + assert_eq!(digest.to_vec(), expected_digest,); - assert_eq!( - result.to_vec(), - expected_result, - ); - } + assert_eq!(result.to_vec(), expected_result,); + } - #[test] - fn test_progpow_testvectors() { - struct ProgpowTest { - block_number: u64, - header_hash: H256, - nonce: u64, - mix_hash: H256, - final_hash: H256, - } + #[test] + fn test_progpow_testvectors() { + struct ProgpowTest { + block_number: u64, + header_hash: H256, + nonce: u64, + mix_hash: H256, + final_hash: H256, + } - let tests: Vec> = - serde_json::from_slice(include_bytes!("../res/progpow_testvectors.json")).unwrap(); + let tests: Vec> = + serde_json::from_slice(include_bytes!("../res/progpow_testvectors.json")).unwrap(); - let tests: Vec = tests.into_iter().map(|mut test: VecDeque| { - assert!(test.len() == 5); + let tests: Vec = tests + .into_iter() + .map(|mut test: VecDeque| { + assert!(test.len() == 5); - let block_number: u64 = serde_json::from_value(test.pop_front().unwrap()).unwrap(); - let header_hash: String = serde_json::from_value(test.pop_front().unwrap()).unwrap(); - let nonce: String = serde_json::from_value(test.pop_front().unwrap()).unwrap(); - let mix_hash: String = serde_json::from_value(test.pop_front().unwrap()).unwrap(); - let final_hash: String = serde_json::from_value(test.pop_front().unwrap()).unwrap(); + let block_number: u64 = serde_json::from_value(test.pop_front().unwrap()).unwrap(); + let header_hash: String = + serde_json::from_value(test.pop_front().unwrap()).unwrap(); + let nonce: String = serde_json::from_value(test.pop_front().unwrap()).unwrap(); + let mix_hash: String = serde_json::from_value(test.pop_front().unwrap()).unwrap(); + let final_hash: String = serde_json::from_value(test.pop_front().unwrap()).unwrap(); - ProgpowTest { - block_number, - header_hash: h256(&header_hash), - nonce: u64::from_str_radix(&nonce, 16).unwrap(), - mix_hash: h256(&mix_hash), - final_hash: h256(&final_hash), - } - }).collect(); + ProgpowTest { + block_number, + header_hash: h256(&header_hash), + nonce: u64::from_str_radix(&nonce, 16).unwrap(), + mix_hash: h256(&mix_hash), + final_hash: h256(&final_hash), + } + }) + .collect(); - for test in tests { - let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value()); - let tempdir = TempDir::new("").unwrap(); - let cache = builder.new_cache(tempdir.path().to_owned(), test.block_number); - let c_dag = generate_cdag(cache.as_ref()); + for test in tests { + let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value()); + let tempdir = TempDir::new("").unwrap(); + let cache = builder.new_cache(tempdir.path().to_owned(), test.block_number); + let c_dag = generate_cdag(cache.as_ref()); - let (digest, result) = progpow( - test.header_hash, - test.nonce, - test.block_number, - cache.as_ref(), - &c_dag, - ); + let (digest, result) = progpow( + test.header_hash, + test.nonce, + test.block_number, + cache.as_ref(), + &c_dag, + ); - assert_eq!(digest, test.final_hash); - assert_eq!(result, test.mix_hash); - } - } + assert_eq!(digest, test.final_hash); + assert_eq!(result, test.mix_hash); + } + } } diff --git a/ethash/src/seed_compute.rs b/ethash/src/seed_compute.rs index 517828032..899a73eed 100644 --- a/ethash/src/seed_compute.rs +++ b/ethash/src/seed_compute.rs @@ -14,89 +14,97 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use shared; use keccak::{keccak_256, H256}; +use shared; use std::cell::Cell; #[derive(Default)] pub struct SeedHashCompute { - prev_epoch: Cell, - prev_seedhash: Cell, + prev_epoch: Cell, + prev_seedhash: Cell, } impl SeedHashCompute { - #[inline] - fn reset_cache(&self) { - self.prev_epoch.set(0); - self.prev_seedhash.set([0u8; 32]); - } + #[inline] + fn reset_cache(&self) { + self.prev_epoch.set(0); + self.prev_seedhash.set([0u8; 32]); + } - #[inline] - pub fn hash_block_number(&self, block_number: u64) -> H256 { - self.hash_epoch(shared::epoch(block_number)) - } + #[inline] + pub fn hash_block_number(&self, block_number: u64) -> H256 { + self.hash_epoch(shared::epoch(block_number)) + } - #[inline] - pub fn hash_epoch(&self, epoch: u64) -> H256 { - if epoch < self.prev_epoch.get() { - // can't build on previous hash if requesting an older block - self.reset_cache(); - } - if epoch > self.prev_epoch.get() { - let seed_hash = SeedHashCompute::resume_compute_seedhash( - self.prev_seedhash.get(), - self.prev_epoch.get(), - epoch, - ); - self.prev_seedhash.set(seed_hash); - self.prev_epoch.set(epoch); - } - self.prev_seedhash.get() - } + #[inline] + pub fn hash_epoch(&self, epoch: u64) -> H256 { + if epoch < self.prev_epoch.get() { + // can't build on previous hash if requesting an older block + self.reset_cache(); + } + if epoch > self.prev_epoch.get() { + let seed_hash = SeedHashCompute::resume_compute_seedhash( + self.prev_seedhash.get(), + self.prev_epoch.get(), + epoch, + ); + self.prev_seedhash.set(seed_hash); + self.prev_epoch.set(epoch); + } + self.prev_seedhash.get() + } - #[inline] - pub fn resume_compute_seedhash(mut hash: H256, start_epoch: u64, end_epoch: u64) -> H256 { - for _ in start_epoch..end_epoch { - keccak_256::inplace(&mut hash); - } - hash - } + #[inline] + pub fn resume_compute_seedhash(mut hash: H256, start_epoch: u64, end_epoch: u64) -> H256 { + for _ in start_epoch..end_epoch { + keccak_256::inplace(&mut hash); + } + hash + } } #[cfg(test)] mod tests { - use super::SeedHashCompute; + use super::SeedHashCompute; - #[test] - fn test_seed_compute_once() { - let seed_compute = SeedHashCompute::default(); - let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162]; - assert_eq!(seed_compute.hash_block_number(486382), hash); - } + #[test] + fn test_seed_compute_once() { + let seed_compute = SeedHashCompute::default(); + let hash = [ + 241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, + 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162, + ]; + assert_eq!(seed_compute.hash_block_number(486382), hash); + } - #[test] - fn test_seed_compute_zero() { - let seed_compute = SeedHashCompute::default(); - assert_eq!(seed_compute.hash_block_number(0), [0u8; 32]); - } + #[test] + fn test_seed_compute_zero() { + let seed_compute = SeedHashCompute::default(); + assert_eq!(seed_compute.hash_block_number(0), [0u8; 32]); + } - #[test] - fn test_seed_compute_after_older() { - let seed_compute = SeedHashCompute::default(); - // calculating an older value first shouldn't affect the result - let _ = seed_compute.hash_block_number(50000); - let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162]; - assert_eq!(seed_compute.hash_block_number(486382), hash); - } - - #[test] - fn test_seed_compute_after_newer() { - let seed_compute = SeedHashCompute::default(); - // calculating an newer value first shouldn't affect the result - let _ = seed_compute.hash_block_number(972764); - let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162]; - assert_eq!(seed_compute.hash_block_number(486382), hash); - } + #[test] + fn test_seed_compute_after_older() { + let seed_compute = SeedHashCompute::default(); + // calculating an older value first shouldn't affect the result + let _ = seed_compute.hash_block_number(50000); + let hash = [ + 241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, + 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162, + ]; + assert_eq!(seed_compute.hash_block_number(486382), hash); + } + #[test] + fn test_seed_compute_after_newer() { + let seed_compute = SeedHashCompute::default(); + // calculating an newer value first shouldn't affect the result + let _ = seed_compute.hash_block_number(972764); + let hash = [ + 241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, + 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162, + ]; + assert_eq!(seed_compute.hash_block_number(486382), hash); + } } diff --git a/ethash/src/shared.rs b/ethash/src/shared.rs index 2c9a9fa9d..6a5586ada 100644 --- a/ethash/src/shared.rs +++ b/ethash/src/shared.rs @@ -31,38 +31,39 @@ pub const NODE_WORDS: usize = NODE_BYTES / 4; pub const NODE_BYTES: usize = 64; pub fn epoch(block_number: u64) -> u64 { - block_number / ETHASH_EPOCH_LENGTH + block_number / ETHASH_EPOCH_LENGTH } static CHARS: &'static [u8] = b"0123456789abcdef"; pub fn to_hex(bytes: &[u8]) -> String { - let mut v = Vec::with_capacity(bytes.len() * 2); - for &byte in bytes.iter() { - v.push(CHARS[(byte >> 4) as usize]); - v.push(CHARS[(byte & 0xf) as usize]); - } + let mut v = Vec::with_capacity(bytes.len() * 2); + for &byte in bytes.iter() { + v.push(CHARS[(byte >> 4) as usize]); + v.push(CHARS[(byte & 0xf) as usize]); + } - unsafe { String::from_utf8_unchecked(v) } + unsafe { String::from_utf8_unchecked(v) } } pub fn get_cache_size(block_number: u64) -> usize { - // TODO: Memoise - let mut sz: u64 = CACHE_BYTES_INIT + CACHE_BYTES_GROWTH * (block_number / ETHASH_EPOCH_LENGTH); - sz = sz - NODE_BYTES as u64; - while !is_prime(sz / NODE_BYTES as u64) { - sz = sz - 2 * NODE_BYTES as u64; - } - sz as usize + // TODO: Memoise + let mut sz: u64 = CACHE_BYTES_INIT + CACHE_BYTES_GROWTH * (block_number / ETHASH_EPOCH_LENGTH); + sz = sz - NODE_BYTES as u64; + while !is_prime(sz / NODE_BYTES as u64) { + sz = sz - 2 * NODE_BYTES as u64; + } + sz as usize } pub fn get_data_size(block_number: u64) -> usize { - // TODO: Memoise - let mut sz: u64 = DATASET_BYTES_INIT + DATASET_BYTES_GROWTH * (block_number / ETHASH_EPOCH_LENGTH); - sz = sz - ETHASH_MIX_BYTES as u64; - while !is_prime(sz / ETHASH_MIX_BYTES as u64) { - sz = sz - 2 * ETHASH_MIX_BYTES as u64; - } - sz as usize + // TODO: Memoise + let mut sz: u64 = + DATASET_BYTES_INIT + DATASET_BYTES_GROWTH * (block_number / ETHASH_EPOCH_LENGTH); + sz = sz - ETHASH_MIX_BYTES as u64; + while !is_prime(sz / ETHASH_MIX_BYTES as u64) { + sz = sz - 2 * ETHASH_MIX_BYTES as u64; + } + sz as usize } pub type NodeBytes = [u8; NODE_BYTES]; @@ -100,15 +101,19 @@ static_assert_size_eq!(Node, NodeBytes, NodeWords, NodeDwords); #[repr(C)] pub union Node { - pub dwords: NodeDwords, - pub words: NodeWords, - pub bytes: NodeBytes, + pub dwords: NodeDwords, + pub words: NodeWords, + pub bytes: NodeBytes, } impl Clone for Node { - fn clone(&self) -> Self { - unsafe { Node { bytes: *&self.bytes } } - } + fn clone(&self) -> Self { + unsafe { + Node { + bytes: *&self.bytes, + } + } + } } // We use `inline(always)` because I was experiencing an 100% slowdown and `perf` showed that these @@ -117,33 +122,33 @@ impl Clone for Node { // performance regression. It's not caused by the `debug_assert_eq!` either, your guess is as good // as mine. impl Node { - #[inline(always)] - pub fn as_bytes(&self) -> &NodeBytes { - unsafe { &self.bytes } - } + #[inline(always)] + pub fn as_bytes(&self) -> &NodeBytes { + unsafe { &self.bytes } + } - #[inline(always)] - pub fn as_bytes_mut(&mut self) -> &mut NodeBytes { - unsafe { &mut self.bytes } - } + #[inline(always)] + pub fn as_bytes_mut(&mut self) -> &mut NodeBytes { + unsafe { &mut self.bytes } + } - #[inline(always)] - pub fn as_words(&self) -> &NodeWords { - unsafe { &self.words } - } + #[inline(always)] + pub fn as_words(&self) -> &NodeWords { + unsafe { &self.words } + } - #[inline(always)] - pub fn as_words_mut(&mut self) -> &mut NodeWords { - unsafe { &mut self.words } - } + #[inline(always)] + pub fn as_words_mut(&mut self) -> &mut NodeWords { + unsafe { &mut self.words } + } - #[inline(always)] - pub fn as_dwords(&self) -> &NodeDwords { - unsafe { &self.dwords } - } + #[inline(always)] + pub fn as_dwords(&self) -> &NodeDwords { + unsafe { &self.dwords } + } - #[inline(always)] - pub fn as_dwords_mut(&mut self) -> &mut NodeDwords { - unsafe { &mut self.dwords } - } + #[inline(always)] + pub fn as_dwords_mut(&mut self) -> &mut NodeDwords { + unsafe { &mut self.dwords } + } } diff --git a/ethcore/benches/builtin.rs b/ethcore/benches/builtin.rs index e3e56eb7e..b25ab7745 100644 --- a/ethcore/benches/builtin.rs +++ b/ethcore/benches/builtin.rs @@ -19,129 +19,126 @@ extern crate criterion; #[macro_use] extern crate lazy_static; -extern crate ethcore_builtin; extern crate ethcore; +extern crate ethcore_builtin; extern crate ethereum_types; extern crate parity_bytes as bytes; extern crate rustc_hex; -use criterion::{Criterion, Bencher}; use bytes::BytesRef; +use criterion::{Bencher, Criterion}; +use ethcore::{ethereum::new_byzantium_test_machine, machine::EthereumMachine}; use ethcore_builtin::Builtin; -use ethcore::machine::EthereumMachine; use ethereum_types::U256; -use ethcore::ethereum::new_byzantium_test_machine; use rustc_hex::FromHex; lazy_static! { - static ref BYZANTIUM_MACHINE: EthereumMachine = new_byzantium_test_machine(); + static ref BYZANTIUM_MACHINE: EthereumMachine = new_byzantium_test_machine(); } struct BuiltinBenchmark<'a> { - builtin: &'a Builtin, - input: Vec, - expected: Vec, + builtin: &'a Builtin, + input: Vec, + expected: Vec, } impl<'a> BuiltinBenchmark<'a> { - fn new(builtin_address: &'static str, input: &str, expected: &str) -> BuiltinBenchmark<'a> { - let builtins = BYZANTIUM_MACHINE.builtins(); + fn new(builtin_address: &'static str, input: &str, expected: &str) -> BuiltinBenchmark<'a> { + let builtins = BYZANTIUM_MACHINE.builtins(); - let builtin = builtins.get(&builtin_address.into()).unwrap().clone(); - let input = FromHex::from_hex(input).unwrap(); - let expected = FromHex::from_hex(expected).unwrap(); + let builtin = builtins.get(&builtin_address.into()).unwrap().clone(); + let input = FromHex::from_hex(input).unwrap(); + let expected = FromHex::from_hex(expected).unwrap(); - BuiltinBenchmark { - builtin, input, expected - } - } + BuiltinBenchmark { + builtin, + input, + expected, + } + } - fn run(&self, b: &mut Bencher) { - let mut output = vec![0; self.expected.len()]; + fn run(&self, b: &mut Bencher) { + let mut output = vec![0; self.expected.len()]; - b.iter(|| { - self.builtin.execute(&self.input, &mut BytesRef::Fixed(&mut output)).unwrap(); - }); + b.iter(|| { + self.builtin + .execute(&self.input, &mut BytesRef::Fixed(&mut output)) + .unwrap(); + }); - assert_eq!(self.expected[..], output[..]); - } + assert_eq!(self.expected[..], output[..]); + } } -fn bench( - id: &str, - builtin_address: &'static str, - input: &str, - expected: &str, - b: &mut Criterion, -) { - let bench = BuiltinBenchmark::new(builtin_address, input, expected); - b.bench_function(id, move |b| bench.run(b)); +fn bench(id: &str, builtin_address: &'static str, input: &str, expected: &str, b: &mut Criterion) { + let bench = BuiltinBenchmark::new(builtin_address, input, expected); + b.bench_function(id, move |b| bench.run(b)); } criterion_group!( - builtin, - ecrecover, - sha256, - ripemd, - identity, - modexp_eip_example1, - modexp_eip_example2, - modexp_nagydani_1_square, - modexp_nagydani_1_qube, - modexp_nagydani_1_pow0x10001, - modexp_nagydani_2_square, - modexp_nagydani_2_qube, - modexp_nagydani_2_pow0x10001, - modexp_nagydani_3_square, - modexp_nagydani_3_qube, - modexp_nagydani_3_pow0x10001, - modexp_nagydani_4_square, - modexp_nagydani_4_qube, - modexp_nagydani_4_pow0x10001, - modexp_nagydani_5_square, - modexp_nagydani_5_qube, - modexp_nagydani_5_pow0x10001, - alt_bn128_add_chfast1, - alt_bn128_add_chfast2, - alt_bn128_add_cdetrio1, - alt_bn128_add_cdetrio2, - alt_bn128_add_cdetrio3, - alt_bn128_add_cdetrio4, - alt_bn128_add_cdetrio5, - alt_bn128_add_cdetrio6, - alt_bn128_add_cdetrio7, - alt_bn128_add_cdetrio8, - alt_bn128_add_cdetrio9, - alt_bn128_add_cdetrio10, - alt_bn128_add_cdetrio11, - alt_bn128_add_cdetrio12, - alt_bn128_add_cdetrio13, - alt_bn128_add_cdetrio14, - alt_bn128_mul_chfast1, - alt_bn128_mul_chfast2, - alt_bn128_mul_chfast3, - alt_bn128_mul_cdetrio1, - alt_bn128_mul_cdetrio6, - alt_bn128_mul_cdetrio11, - alt_bn128_pairing_jeff1, - alt_bn128_pairing_jeff2, - alt_bn128_pairing_jeff3, - alt_bn128_pairing_jeff4, - alt_bn128_pairing_jeff5, - alt_bn128_pairing_jeff6, - alt_bn128_pairing_empty_data, - alt_bn128_pairing_one_point, - alt_bn128_pairing_two_point_match_2, - alt_bn128_pairing_two_point_match_3, - alt_bn128_pairing_two_point_match_4, - alt_bn128_pairing_ten_point_match_1, - alt_bn128_pairing_ten_point_match_2, - alt_bn128_pairing_ten_point_match_3 + builtin, + ecrecover, + sha256, + ripemd, + identity, + modexp_eip_example1, + modexp_eip_example2, + modexp_nagydani_1_square, + modexp_nagydani_1_qube, + modexp_nagydani_1_pow0x10001, + modexp_nagydani_2_square, + modexp_nagydani_2_qube, + modexp_nagydani_2_pow0x10001, + modexp_nagydani_3_square, + modexp_nagydani_3_qube, + modexp_nagydani_3_pow0x10001, + modexp_nagydani_4_square, + modexp_nagydani_4_qube, + modexp_nagydani_4_pow0x10001, + modexp_nagydani_5_square, + modexp_nagydani_5_qube, + modexp_nagydani_5_pow0x10001, + alt_bn128_add_chfast1, + alt_bn128_add_chfast2, + alt_bn128_add_cdetrio1, + alt_bn128_add_cdetrio2, + alt_bn128_add_cdetrio3, + alt_bn128_add_cdetrio4, + alt_bn128_add_cdetrio5, + alt_bn128_add_cdetrio6, + alt_bn128_add_cdetrio7, + alt_bn128_add_cdetrio8, + alt_bn128_add_cdetrio9, + alt_bn128_add_cdetrio10, + alt_bn128_add_cdetrio11, + alt_bn128_add_cdetrio12, + alt_bn128_add_cdetrio13, + alt_bn128_add_cdetrio14, + alt_bn128_mul_chfast1, + alt_bn128_mul_chfast2, + alt_bn128_mul_chfast3, + alt_bn128_mul_cdetrio1, + alt_bn128_mul_cdetrio6, + alt_bn128_mul_cdetrio11, + alt_bn128_pairing_jeff1, + alt_bn128_pairing_jeff2, + alt_bn128_pairing_jeff3, + alt_bn128_pairing_jeff4, + alt_bn128_pairing_jeff5, + alt_bn128_pairing_jeff6, + alt_bn128_pairing_empty_data, + alt_bn128_pairing_one_point, + alt_bn128_pairing_two_point_match_2, + alt_bn128_pairing_two_point_match_3, + alt_bn128_pairing_two_point_match_4, + alt_bn128_pairing_ten_point_match_1, + alt_bn128_pairing_ten_point_match_2, + alt_bn128_pairing_ten_point_match_3 ); criterion_main!(builtin); fn ecrecover(b: &mut Criterion) { - bench( + bench( "ecrecover", "0000000000000000000000000000000000000001", // ecrecover "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02", @@ -151,7 +148,7 @@ fn ecrecover(b: &mut Criterion) { } fn sha256(b: &mut Criterion) { - bench( + bench( "sha256", "0000000000000000000000000000000000000002", // sha256 "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02", @@ -161,7 +158,7 @@ fn sha256(b: &mut Criterion) { } fn ripemd(b: &mut Criterion) { - bench( + bench( "ripemd", "0000000000000000000000000000000000000003", // ripemd "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02", @@ -171,7 +168,7 @@ fn ripemd(b: &mut Criterion) { } fn identity(b: &mut Criterion) { - bench( + bench( "identity", "0000000000000000000000000000000000000004", // identity "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02", @@ -181,7 +178,7 @@ fn identity(b: &mut Criterion) { } fn modexp_eip_example1(b: &mut Criterion) { - bench( + bench( "modexp_eip_example1", "0000000000000000000000000000000000000005", // modexp "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002003fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2efffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", @@ -191,7 +188,7 @@ fn modexp_eip_example1(b: &mut Criterion) { } fn modexp_eip_example2(b: &mut Criterion) { - bench( + bench( "modexp_eip_example2", "0000000000000000000000000000000000000005", // modexp "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000020fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2efffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", @@ -201,7 +198,7 @@ fn modexp_eip_example2(b: &mut Criterion) { } fn modexp_nagydani_1_square(b: &mut Criterion) { - bench( + bench( "modexp_nagydani_1_square", "0000000000000000000000000000000000000005", // modexp "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb502fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b", @@ -211,7 +208,7 @@ fn modexp_nagydani_1_square(b: &mut Criterion) { } fn modexp_nagydani_1_qube(b: &mut Criterion) { - bench( + bench( "modexp_nagydani_1_qube", "0000000000000000000000000000000000000005", // modexp "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb503fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b", @@ -221,7 +218,7 @@ fn modexp_nagydani_1_qube(b: &mut Criterion) { } fn modexp_nagydani_1_pow0x10001(b: &mut Criterion) { - bench( + bench( "modexp_nagydani_1_pow0x10001", "0000000000000000000000000000000000000005", // modexp "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb5010001fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b", @@ -231,7 +228,7 @@ fn modexp_nagydani_1_pow0x10001(b: &mut Criterion) { } fn modexp_nagydani_2_square(b: &mut Criterion) { - bench( + bench( "modexp_nagydani_2_square", "0000000000000000000000000000000000000005", // modexp "000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf5102e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087", @@ -241,7 +238,7 @@ fn modexp_nagydani_2_square(b: &mut Criterion) { } fn modexp_nagydani_2_qube(b: &mut Criterion) { - bench( + bench( "modexp_nagydani_2_qube", "0000000000000000000000000000000000000005", // modexp "000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf5103e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087", @@ -251,7 +248,7 @@ fn modexp_nagydani_2_qube(b: &mut Criterion) { } fn modexp_nagydani_2_pow0x10001(b: &mut Criterion) { - bench( + bench( "modexp_nagydani_2_pow0x10001", "0000000000000000000000000000000000000005", // modexp "000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf51010001e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087", @@ -261,7 +258,7 @@ fn modexp_nagydani_2_pow0x10001(b: &mut Criterion) { } fn modexp_nagydani_3_square(b: &mut Criterion) { - bench( + bench( "modexp_nagydani_3_square", "0000000000000000000000000000000000000005", // modexp "000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb02d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d", @@ -271,7 +268,7 @@ fn modexp_nagydani_3_square(b: &mut Criterion) { } fn modexp_nagydani_3_qube(b: &mut Criterion) { - bench( + bench( "modexp_nagydani_3_qube", "0000000000000000000000000000000000000005", // modexp "000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb03d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d", @@ -281,7 +278,7 @@ fn modexp_nagydani_3_qube(b: &mut Criterion) { } fn modexp_nagydani_3_pow0x10001(b: &mut Criterion) { - bench( + bench( "modexp_nagydani_3_pow0x10001", "0000000000000000000000000000000000000005", // modexp "000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb010001d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d", @@ -291,7 +288,7 @@ fn modexp_nagydani_3_pow0x10001(b: &mut Criterion) { } fn modexp_nagydani_4_square(b: &mut Criterion) { - bench( + bench( "modexp_nagydani_4_square", "0000000000000000000000000000000000000005", // modexp "000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b8102df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f", @@ -301,7 +298,7 @@ fn modexp_nagydani_4_square(b: &mut Criterion) { } fn modexp_nagydani_4_qube(b: &mut Criterion) { - bench( + bench( "modexp_nagydani_4_qube", "0000000000000000000000000000000000000005", // modexp "000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b8103df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f", @@ -311,7 +308,7 @@ fn modexp_nagydani_4_qube(b: &mut Criterion) { } fn modexp_nagydani_4_pow0x10001(b: &mut Criterion) { - bench( + bench( "modexp_nagydani_4_pow0x10001", "0000000000000000000000000000000000000005", // modexp "000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b81010001df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f", @@ -321,7 +318,7 @@ fn modexp_nagydani_4_pow0x10001(b: &mut Criterion) { } fn modexp_nagydani_5_square(b: &mut Criterion) { - bench( + bench( "modexp_nagydani_5_square", "0000000000000000000000000000000000000005", // modexp "000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf02e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad", @@ -331,7 +328,7 @@ fn modexp_nagydani_5_square(b: &mut Criterion) { } fn modexp_nagydani_5_qube(b: &mut Criterion) { - bench( + bench( "modexp_nagydani_5_qube", "0000000000000000000000000000000000000005", // modexp "000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf03e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad", @@ -341,7 +338,7 @@ fn modexp_nagydani_5_qube(b: &mut Criterion) { } fn modexp_nagydani_5_pow0x10001(b: &mut Criterion) { - bench( + bench( "modexp_nagydani_5_pow0x10001", "0000000000000000000000000000000000000005", // modexp "000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf010001e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad", @@ -351,7 +348,7 @@ fn modexp_nagydani_5_pow0x10001(b: &mut Criterion) { } fn alt_bn128_add_chfast1(b: &mut Criterion) { - bench( + bench( "alt_bn128_add_chfast1", "0000000000000000000000000000000000000006", // alt_bn128_add "18b18acfb4c2c30276db5411368e7185b311dd124691610c5d3b74034e093dc9063c909c4720840cb5134cb9f59fa749755796819658d32efc0d288198f3726607c2b7f58a84bd6145f00c9c2bc0bb1a187f20ff2c92963a88019e7c6a014eed06614e20c147e940f2d70da3f74c9a17df361706a4485c742bd6788478fa17d7", @@ -361,7 +358,7 @@ fn alt_bn128_add_chfast1(b: &mut Criterion) { } fn alt_bn128_add_chfast2(b: &mut Criterion) { - bench( + bench( "alt_bn128_add_chfast2", "0000000000000000000000000000000000000006", // alt_bn128_add "2243525c5efd4b9c3d3c45ac0ca3fe4dd85e830a4ce6b65fa1eeaee202839703301d1d33be6da8e509df21cc35964723180eed7532537db9ae5e7d48f195c91518b18acfb4c2c30276db5411368e7185b311dd124691610c5d3b74034e093dc9063c909c4720840cb5134cb9f59fa749755796819658d32efc0d288198f37266", @@ -371,7 +368,7 @@ fn alt_bn128_add_chfast2(b: &mut Criterion) { } fn alt_bn128_add_cdetrio1(b: &mut Criterion) { - bench( + bench( "alt_bn128_add_cdetrio1", "0000000000000000000000000000000000000006", // alt_bn128_add "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", @@ -381,7 +378,7 @@ fn alt_bn128_add_cdetrio1(b: &mut Criterion) { } fn alt_bn128_add_cdetrio2(b: &mut Criterion) { - bench( + bench( "alt_bn128_add_cdetrio2", "0000000000000000000000000000000000000006", // alt_bn128_add "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", @@ -391,7 +388,7 @@ fn alt_bn128_add_cdetrio2(b: &mut Criterion) { } fn alt_bn128_add_cdetrio3(b: &mut Criterion) { - bench( + bench( "alt_bn128_add_cdetrio3", "0000000000000000000000000000000000000006", // alt_bn128_add "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", @@ -401,7 +398,7 @@ fn alt_bn128_add_cdetrio3(b: &mut Criterion) { } fn alt_bn128_add_cdetrio4(b: &mut Criterion) { - bench( + bench( "alt_bn128_add_cdetrio4", "0000000000000000000000000000000000000006", // alt_bn128_add "", @@ -411,7 +408,7 @@ fn alt_bn128_add_cdetrio4(b: &mut Criterion) { } fn alt_bn128_add_cdetrio5(b: &mut Criterion) { - bench( + bench( "alt_bn128_add_cdetrio5", "0000000000000000000000000000000000000006", // alt_bn128_add "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", @@ -421,7 +418,7 @@ fn alt_bn128_add_cdetrio5(b: &mut Criterion) { } fn alt_bn128_add_cdetrio6(b: &mut Criterion) { - bench( + bench( "alt_bn128_add_cdetrio6", "0000000000000000000000000000000000000006", // alt_bn128_add "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", @@ -431,7 +428,7 @@ fn alt_bn128_add_cdetrio6(b: &mut Criterion) { } fn alt_bn128_add_cdetrio7(b: &mut Criterion) { - bench( + bench( "alt_bn128_add_cdetrio7", "0000000000000000000000000000000000000006", // alt_bn128_add "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", @@ -441,7 +438,7 @@ fn alt_bn128_add_cdetrio7(b: &mut Criterion) { } fn alt_bn128_add_cdetrio8(b: &mut Criterion) { - bench( + bench( "alt_bn128_add_cdetrio8", "0000000000000000000000000000000000000006", // alt_bn128_add "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", @@ -451,7 +448,7 @@ fn alt_bn128_add_cdetrio8(b: &mut Criterion) { } fn alt_bn128_add_cdetrio9(b: &mut Criterion) { - bench( + bench( "alt_bn128_add_cdetrio9", "0000000000000000000000000000000000000006", // alt_bn128_add "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", @@ -461,7 +458,7 @@ fn alt_bn128_add_cdetrio9(b: &mut Criterion) { } fn alt_bn128_add_cdetrio10(b: &mut Criterion) { - bench( + bench( "alt_bn128_add_cdetrio10", "0000000000000000000000000000000000000006", // alt_bn128_add "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", @@ -471,7 +468,7 @@ fn alt_bn128_add_cdetrio10(b: &mut Criterion) { } fn alt_bn128_add_cdetrio11(b: &mut Criterion) { - bench( + bench( "alt_bn128_add_cdetrio11", "0000000000000000000000000000000000000006", // alt_bn128_add "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", @@ -481,7 +478,7 @@ fn alt_bn128_add_cdetrio11(b: &mut Criterion) { } fn alt_bn128_add_cdetrio12(b: &mut Criterion) { - bench( + bench( "alt_bn128_add_cdetrio12", "0000000000000000000000000000000000000006", // alt_bn128_add "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", @@ -491,7 +488,7 @@ fn alt_bn128_add_cdetrio12(b: &mut Criterion) { } fn alt_bn128_add_cdetrio13(b: &mut Criterion) { - bench( + bench( "alt_bn128_add_cdetrio13", "0000000000000000000000000000000000000006", // alt_bn128_add "17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa901e0559bacb160664764a357af8a9fe70baa9258e0b959273ffc5718c6d4cc7c039730ea8dff1254c0fee9c0ea777d29a9c710b7e616683f194f18c43b43b869073a5ffcc6fc7a28c30723d6e58ce577356982d65b833a5a5c15bf9024b43d98", @@ -501,7 +498,7 @@ fn alt_bn128_add_cdetrio13(b: &mut Criterion) { } fn alt_bn128_add_cdetrio14(b: &mut Criterion) { - bench( + bench( "alt_bn128_add_cdetrio14", "0000000000000000000000000000000000000006", // alt_bn128_add "17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa901e0559bacb160664764a357af8a9fe70baa9258e0b959273ffc5718c6d4cc7c17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa92e83f8d734803fc370eba25ed1f6b8768bd6d83887b87165fc2434fe11a830cb00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", @@ -511,7 +508,7 @@ fn alt_bn128_add_cdetrio14(b: &mut Criterion) { } fn alt_bn128_mul_chfast1(b: &mut Criterion) { - bench( + bench( "alt_bn128_mul_chfast1", "0000000000000000000000000000000000000007", // alt_bn128_mul "2bd3e6d0f3b142924f5ca7b49ce5b9d54c4703d7ae5648e61d02268b1a0a9fb721611ce0a6af85915e2f1d70300909ce2e49dfad4a4619c8390cae66cefdb20400000000000000000000000000000000000000000000000011138ce750fa15c2", @@ -521,7 +518,7 @@ fn alt_bn128_mul_chfast1(b: &mut Criterion) { } fn alt_bn128_mul_chfast2(b: &mut Criterion) { - bench( + bench( "alt_bn128_mul_chfast2", "0000000000000000000000000000000000000007", // alt_bn128_mul "070a8d6a982153cae4be29d434e8faef8a47b274a053f5a4ee2a6c9c13c31e5c031b8ce914eba3a9ffb989f9cdd5b0f01943074bf4f0f315690ec3cec6981afc30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd46", @@ -531,7 +528,7 @@ fn alt_bn128_mul_chfast2(b: &mut Criterion) { } fn alt_bn128_mul_chfast3(b: &mut Criterion) { - bench( + bench( "alt_bn128_mul_chfast3", "0000000000000000000000000000000000000007", // alt_bn128_mul "025a6f4181d2b4ea8b724290ffb40156eb0adb514c688556eb79cdea0752c2bb2eff3f31dea215f1eb86023a133a996eb6300b44da664d64251d05381bb8a02e183227397098d014dc2822db40c0ac2ecbc0b548b438e5469e10460b6c3e7ea3", @@ -541,7 +538,7 @@ fn alt_bn128_mul_chfast3(b: &mut Criterion) { } fn alt_bn128_mul_cdetrio1(b: &mut Criterion) { - bench( + bench( "alt_bn128_mul_cdetrio1", "0000000000000000000000000000000000000007", // alt_bn128_mul "1a87b0584ce92f4593d161480614f2989035225609f08058ccfa3d0f940febe31a2f3c951f6dadcc7ee9007dff81504b0fcd6d7cf59996efdc33d92bf7f9f8f6ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", @@ -551,7 +548,7 @@ fn alt_bn128_mul_cdetrio1(b: &mut Criterion) { } fn alt_bn128_mul_cdetrio6(b: &mut Criterion) { - bench( + bench( "alt_bn128_mul_cdetrio6", "0000000000000000000000000000000000000007", // alt_bn128_mul "17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa901e0559bacb160664764a357af8a9fe70baa9258e0b959273ffc5718c6d4cc7cffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", @@ -561,7 +558,7 @@ fn alt_bn128_mul_cdetrio6(b: &mut Criterion) { } fn alt_bn128_mul_cdetrio11(b: &mut Criterion) { - bench( + bench( "alt_bn128_mul_cdetrio11", "0000000000000000000000000000000000000007", // alt_bn128_mul "039730ea8dff1254c0fee9c0ea777d29a9c710b7e616683f194f18c43b43b869073a5ffcc6fc7a28c30723d6e58ce577356982d65b833a5a5c15bf9024b43d98ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", @@ -571,7 +568,7 @@ fn alt_bn128_mul_cdetrio11(b: &mut Criterion) { } fn alt_bn128_pairing_jeff1(b: &mut Criterion) { - bench( + bench( "alt_bn128_pairing_jeff1", "0000000000000000000000000000000000000008", // alt_bn128_pairing "1c76476f4def4bb94541d57ebba1193381ffa7aa76ada664dd31c16024c43f593034dd2920f673e204fee2811c678745fc819b55d3e9d294e45c9b03a76aef41209dd15ebff5d46c4bd888e51a93cf99a7329636c63514396b4a452003a35bf704bf11ca01483bfa8b34b43561848d28905960114c8ac04049af4b6315a416782bb8324af6cfc93537a2ad1a445cfd0ca2a71acd7ac41fadbf933c2a51be344d120a2a4cf30c1bf9845f20c6fe39e07ea2cce61f0c9bb048165fe5e4de877550111e129f1cf1097710d41c4ac70fcdfa5ba2023c6ff1cbeac322de49d1b6df7c2032c61a830e3c17286de9462bf242fca2883585b93870a73853face6a6bf411198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", @@ -581,7 +578,7 @@ fn alt_bn128_pairing_jeff1(b: &mut Criterion) { } fn alt_bn128_pairing_jeff2(b: &mut Criterion) { - bench( + bench( "alt_bn128_pairing_jeff2", "0000000000000000000000000000000000000008", // alt_bn128_pairing "2eca0c7238bf16e83e7a1e6c5d49540685ff51380f309842a98561558019fc0203d3260361bb8451de5ff5ecd17f010ff22f5c31cdf184e9020b06fa5997db841213d2149b006137fcfb23036606f848d638d576a120ca981b5b1a5f9300b3ee2276cf730cf493cd95d64677bbb75fc42db72513a4c1e387b476d056f80aa75f21ee6226d31426322afcda621464d0611d226783262e21bb3bc86b537e986237096df1f82dff337dd5972e32a8ad43e28a78a96a823ef1cd4debe12b6552ea5f06967a1237ebfeca9aaae0d6d0bab8e28c198c5a339ef8a2407e31cdac516db922160fa257a5fd5b280642ff47b65eca77e626cb685c84fa6d3b6882a283ddd1198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", @@ -591,7 +588,7 @@ fn alt_bn128_pairing_jeff2(b: &mut Criterion) { } fn alt_bn128_pairing_jeff3(b: &mut Criterion) { - bench( + bench( "alt_bn128_pairing_jeff3", "0000000000000000000000000000000000000008", // alt_bn128_pairing "0f25929bcb43d5a57391564615c9e70a992b10eafa4db109709649cf48c50dd216da2f5cb6be7a0aa72c440c53c9bbdfec6c36c7d515536431b3a865468acbba2e89718ad33c8bed92e210e81d1853435399a271913a6520736a4729cf0d51eb01a9e2ffa2e92599b68e44de5bcf354fa2642bd4f26b259daa6f7ce3ed57aeb314a9a87b789a58af499b314e13c3d65bede56c07ea2d418d6874857b70763713178fb49a2d6cd347dc58973ff49613a20757d0fcc22079f9abd10c3baee245901b9e027bd5cfc2cb5db82d4dc9677ac795ec500ecd47deee3b5da006d6d049b811d7511c78158de484232fc68daf8a45cf217d1c2fae693ff5871e8752d73b21198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", @@ -601,7 +598,7 @@ fn alt_bn128_pairing_jeff3(b: &mut Criterion) { } fn alt_bn128_pairing_jeff4(b: &mut Criterion) { - bench( + bench( "alt_bn128_pairing_jeff4", "0000000000000000000000000000000000000008", // alt_bn128_pairing "2f2ea0b3da1e8ef11914acf8b2e1b32d99df51f5f4f206fc6b947eae860eddb6068134ddb33dc888ef446b648d72338684d678d2eb2371c61a50734d78da4b7225f83c8b6ab9de74e7da488ef02645c5a16a6652c3c71a15dc37fe3a5dcb7cb122acdedd6308e3bb230d226d16a105295f523a8a02bfc5e8bd2da135ac4c245d065bbad92e7c4e31bf3757f1fe7362a63fbfee50e7dc68da116e67d600d9bf6806d302580dc0661002994e7cd3a7f224e7ddc27802777486bf80f40e4ca3cfdb186bac5188a98c45e6016873d107f5cd131f3a3e339d0375e58bd6219347b008122ae2b09e539e152ec5364e7e2204b03d11d3caa038bfc7cd499f8176aacbee1f39e4e4afc4bc74790a4a028aff2c3d2538731fb755edefd8cb48d6ea589b5e283f150794b6736f670d6a1033f9b46c6f5204f50813eb85c8dc4b59db1c5d39140d97ee4d2b36d99bc49974d18ecca3e7ad51011956051b464d9e27d46cc25e0764bb98575bd466d32db7b15f582b2d5c452b36aa394b789366e5e3ca5aabd415794ab061441e51d01e94640b7e3084a07e02c78cf3103c542bc5b298669f211b88da1679b0b64a63b7e0e7bfe52aae524f73a55be7fe70c7e9bfc94b4cf0da1213d2149b006137fcfb23036606f848d638d576a120ca981b5b1a5f9300b3ee2276cf730cf493cd95d64677bbb75fc42db72513a4c1e387b476d056f80aa75f21ee6226d31426322afcda621464d0611d226783262e21bb3bc86b537e986237096df1f82dff337dd5972e32a8ad43e28a78a96a823ef1cd4debe12b6552ea5f", @@ -611,7 +608,7 @@ fn alt_bn128_pairing_jeff4(b: &mut Criterion) { } fn alt_bn128_pairing_jeff5(b: &mut Criterion) { - bench( + bench( "alt_bn128_pairing_jeff5", "0000000000000000000000000000000000000008", // alt_bn128_pairing "20a754d2071d4d53903e3b31a7e98ad6882d58aec240ef981fdf0a9d22c5926a29c853fcea789887315916bbeb89ca37edb355b4f980c9a12a94f30deeed30211213d2149b006137fcfb23036606f848d638d576a120ca981b5b1a5f9300b3ee2276cf730cf493cd95d64677bbb75fc42db72513a4c1e387b476d056f80aa75f21ee6226d31426322afcda621464d0611d226783262e21bb3bc86b537e986237096df1f82dff337dd5972e32a8ad43e28a78a96a823ef1cd4debe12b6552ea5f1abb4a25eb9379ae96c84fff9f0540abcfc0a0d11aeda02d4f37e4baf74cb0c11073b3ff2cdbb38755f8691ea59e9606696b3ff278acfc098fa8226470d03869217cee0a9ad79a4493b5253e2e4e3a39fc2df38419f230d341f60cb064a0ac290a3d76f140db8418ba512272381446eb73958670f00cf46f1d9e64cba057b53c26f64a8ec70387a13e41430ed3ee4a7db2059cc5fc13c067194bcc0cb49a98552fd72bd9edb657346127da132e5b82ab908f5816c826acb499e22f2412d1a2d70f25929bcb43d5a57391564615c9e70a992b10eafa4db109709649cf48c50dd2198a1f162a73261f112401aa2db79c7dab1533c9935c77290a6ce3b191f2318d198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", @@ -621,7 +618,7 @@ fn alt_bn128_pairing_jeff5(b: &mut Criterion) { } fn alt_bn128_pairing_jeff6(b: &mut Criterion) { - bench( + bench( "alt_bn128_pairing_jeff6", "0000000000000000000000000000000000000008", // alt_bn128_pairing "1c76476f4def4bb94541d57ebba1193381ffa7aa76ada664dd31c16024c43f593034dd2920f673e204fee2811c678745fc819b55d3e9d294e45c9b03a76aef41209dd15ebff5d46c4bd888e51a93cf99a7329636c63514396b4a452003a35bf704bf11ca01483bfa8b34b43561848d28905960114c8ac04049af4b6315a416782bb8324af6cfc93537a2ad1a445cfd0ca2a71acd7ac41fadbf933c2a51be344d120a2a4cf30c1bf9845f20c6fe39e07ea2cce61f0c9bb048165fe5e4de877550111e129f1cf1097710d41c4ac70fcdfa5ba2023c6ff1cbeac322de49d1b6df7c103188585e2364128fe25c70558f1560f4f9350baf3959e603cc91486e110936198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", @@ -631,17 +628,17 @@ fn alt_bn128_pairing_jeff6(b: &mut Criterion) { } fn alt_bn128_pairing_empty_data(b: &mut Criterion) { - bench( - "alt_bn128_pairing_empty_data", - "0000000000000000000000000000000000000008", // alt_bn128_pairing - "", - "0000000000000000000000000000000000000000000000000000000000000001", - b, - ); + bench( + "alt_bn128_pairing_empty_data", + "0000000000000000000000000000000000000008", // alt_bn128_pairing + "", + "0000000000000000000000000000000000000000000000000000000000000001", + b, + ); } fn alt_bn128_pairing_one_point(b: &mut Criterion) { - bench( + bench( "alt_bn128_pairing_one_point", "0000000000000000000000000000000000000008", // alt_bn128_pairing "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", @@ -651,7 +648,7 @@ fn alt_bn128_pairing_one_point(b: &mut Criterion) { } fn alt_bn128_pairing_two_point_match_2(b: &mut Criterion) { - bench( + bench( "alt_bn128_pairing_two_point_match_2", "0000000000000000000000000000000000000008", // alt_bn128_pairing "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d", @@ -661,7 +658,7 @@ fn alt_bn128_pairing_two_point_match_2(b: &mut Criterion) { } fn alt_bn128_pairing_two_point_match_3(b: &mut Criterion) { - bench( + bench( "alt_bn128_pairing_two_point_match_3", "0000000000000000000000000000000000000008", // alt_bn128_pairing "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", @@ -671,7 +668,7 @@ fn alt_bn128_pairing_two_point_match_3(b: &mut Criterion) { } fn alt_bn128_pairing_two_point_match_4(b: &mut Criterion) { - bench( + bench( "alt_bn128_pairing_two_point_match_4", "0000000000000000000000000000000000000008", // alt_bn128_pairing "105456a333e6d636854f987ea7bb713dfd0ae8371a72aea313ae0c32c0bf10160cf031d41b41557f3e7e3ba0c51bebe5da8e6ecd855ec50fc87efcdeac168bcc0476be093a6d2b4bbf907172049874af11e1b6267606e00804d3ff0037ec57fd3010c68cb50161b7d1d96bb71edfec9880171954e56871abf3d93cc94d745fa114c059d74e5b6c4ec14ae5864ebe23a71781d86c29fb8fb6cce94f70d3de7a2101b33461f39d9e887dbb100f170a2345dde3c07e256d1dfa2b657ba5cd030427000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000021a2c3013d2ea92e13c800cde68ef56a294b883f6ac35d25f587c09b1b3c635f7290158a80cd3d66530f74dc94c94adb88f5cdb481acca997b6e60071f08a115f2f997f3dbd66a7afe07fe7862ce239edba9e05c5afff7f8a1259c9733b2dfbb929d1691530ca701b4a106054688728c9972c8512e9789e9567aae23e302ccd75", @@ -681,7 +678,7 @@ fn alt_bn128_pairing_two_point_match_4(b: &mut Criterion) { } fn alt_bn128_pairing_ten_point_match_1(b: &mut Criterion) { - bench( + bench( "alt_bn128_pairing_ten_point_match_1", "0000000000000000000000000000000000000008", // alt_bn128_pairing "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d", @@ -691,7 +688,7 @@ fn alt_bn128_pairing_ten_point_match_1(b: &mut Criterion) { } fn alt_bn128_pairing_ten_point_match_2(b: &mut Criterion) { - bench( + bench( "alt_bn128_pairing_ten_point_match_2", "0000000000000000000000000000000000000008", // alt_bn128_pairing "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", @@ -701,7 +698,7 @@ fn alt_bn128_pairing_ten_point_match_2(b: &mut Criterion) { } fn alt_bn128_pairing_ten_point_match_3(b: &mut Criterion) { - bench( + bench( "alt_bn128_pairing_ten_point_match_3", "0000000000000000000000000000000000000008", // alt_bn128_pairing "105456a333e6d636854f987ea7bb713dfd0ae8371a72aea313ae0c32c0bf10160cf031d41b41557f3e7e3ba0c51bebe5da8e6ecd855ec50fc87efcdeac168bcc0476be093a6d2b4bbf907172049874af11e1b6267606e00804d3ff0037ec57fd3010c68cb50161b7d1d96bb71edfec9880171954e56871abf3d93cc94d745fa114c059d74e5b6c4ec14ae5864ebe23a71781d86c29fb8fb6cce94f70d3de7a2101b33461f39d9e887dbb100f170a2345dde3c07e256d1dfa2b657ba5cd030427000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000021a2c3013d2ea92e13c800cde68ef56a294b883f6ac35d25f587c09b1b3c635f7290158a80cd3d66530f74dc94c94adb88f5cdb481acca997b6e60071f08a115f2f997f3dbd66a7afe07fe7862ce239edba9e05c5afff7f8a1259c9733b2dfbb929d1691530ca701b4a106054688728c9972c8512e9789e9567aae23e302ccd75", diff --git a/ethcore/blockchain/src/best_block.rs b/ethcore/blockchain/src/best_block.rs index 20f247391..1de5e80fd 100644 --- a/ethcore/blockchain/src/best_block.rs +++ b/ethcore/blockchain/src/best_block.rs @@ -16,8 +16,7 @@ use ethereum_types::{H256, U256}; -use common_types::{encoded, BlockNumber}; -use common_types::header::Header; +use common_types::{encoded, header::Header, BlockNumber}; /// Contains information on a best block that is specific to the consensus engine. /// @@ -26,19 +25,19 @@ use common_types::header::Header; /// /// Sometimes refered as 'latest block'. pub struct BestBlock { - /// Best block decoded header. - pub header: Header, - /// Best block uncompressed bytes. - pub block: encoded::Block, - /// Best block total difficulty. - pub total_difficulty: U256, + /// Best block decoded header. + pub header: Header, + /// Best block uncompressed bytes. + pub block: encoded::Block, + /// Best block total difficulty. + pub total_difficulty: U256, } /// Best ancient block info. If the blockchain has a gap this keeps track of where it starts. #[derive(Default)] pub struct BestAncientBlock { - /// Best block hash. - pub hash: H256, - /// Best block number. - pub number: BlockNumber, + /// Best block hash. + pub hash: H256, + /// Best block number. + pub number: BlockNumber, } diff --git a/ethcore/blockchain/src/block_info.rs b/ethcore/blockchain/src/block_info.rs index 15f71ecb8..0c6f73430 100644 --- a/ethcore/blockchain/src/block_info.rs +++ b/ethcore/blockchain/src/block_info.rs @@ -14,41 +14,41 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use ethereum_types::{H256, U256}; use common_types::BlockNumber; +use ethereum_types::{H256, U256}; /// Brief info about inserted block. #[derive(Clone)] pub struct BlockInfo { - /// Block hash. - pub hash: H256, - /// Block number. - pub number: BlockNumber, - /// Total block difficulty. - pub total_difficulty: U256, - /// Block location in blockchain. - pub location: BlockLocation + /// Block hash. + pub hash: H256, + /// Block number. + pub number: BlockNumber, + /// Total block difficulty. + pub total_difficulty: U256, + /// Block location in blockchain. + pub location: BlockLocation, } /// Describes location of newly inserted block. #[derive(Debug, Clone, PartialEq)] pub enum BlockLocation { - /// It's part of the canon chain. - CanonChain, - /// It's not a part of the canon chain. - Branch, - /// It's part of the fork which should become canon chain, - /// because its total difficulty is higher than current - /// canon chain difficulty. - BranchBecomingCanonChain(BranchBecomingCanonChainData), + /// It's part of the canon chain. + CanonChain, + /// It's not a part of the canon chain. + Branch, + /// It's part of the fork which should become canon chain, + /// because its total difficulty is higher than current + /// canon chain difficulty. + BranchBecomingCanonChain(BranchBecomingCanonChainData), } #[derive(Debug, Clone, PartialEq)] pub struct BranchBecomingCanonChainData { - /// Hash of the newest common ancestor with old canon chain. - pub ancestor: H256, - /// Hashes of the blocks between ancestor and this block. - pub enacted: Vec, - /// Hashes of the blocks which were invalidated. - pub retracted: Vec, + /// Hash of the newest common ancestor with old canon chain. + pub ancestor: H256, + /// Hashes of the blocks between ancestor and this block. + pub enacted: Vec, + /// Hashes of the blocks which were invalidated. + pub retracted: Vec, } diff --git a/ethcore/blockchain/src/blockchain.rs b/ethcore/blockchain/src/blockchain.rs index 1ff1f71f8..808ef8d62 100644 --- a/ethcore/blockchain/src/blockchain.rs +++ b/ethcore/blockchain/src/blockchain.rs @@ -16,368 +16,439 @@ //! Blockchain database. -use std::collections::{HashMap, HashSet}; -use std::{mem, io}; -use std::path::Path; -use std::sync::Arc; +use std::{ + collections::{HashMap, HashSet}, + io, mem, + path::Path, + sync::Arc, +}; use ansi_term::Colour; use blooms_db; -use common_types::BlockNumber; -use common_types::blockchain_info::BlockChainInfo; -use common_types::encoded; -use common_types::engines::ForkChoice; -use common_types::engines::epoch::{Transition as EpochTransition, PendingTransition as PendingEpochTransition}; -use common_types::header::{Header, ExtendedHeader}; -use common_types::log_entry::{LogEntry, LocalizedLogEntry}; -use common_types::receipt::Receipt; -use common_types::transaction::LocalizedTransaction; -use common_types::tree_route::TreeRoute; -use common_types::view; -use common_types::views::{BlockView, HeaderView}; -use ethcore_db::cache_manager::CacheManager; -use ethcore_db::keys::{BlockReceipts, BlockDetails, TransactionAddress, EPOCH_KEY_PREFIX, EpochTransitions}; -use ethcore_db::{self as db, Writable, Readable, CacheUpdatePolicy}; -use ethereum_types::{H256, Bloom, BloomRef, U256}; +use common_types::{ + blockchain_info::BlockChainInfo, + encoded, + engines::{ + epoch::{PendingTransition as PendingEpochTransition, Transition as EpochTransition}, + ForkChoice, + }, + header::{ExtendedHeader, Header}, + log_entry::{LocalizedLogEntry, LogEntry}, + receipt::Receipt, + transaction::LocalizedTransaction, + tree_route::TreeRoute, + view, + views::{BlockView, HeaderView}, + BlockNumber, +}; +use ethcore_db::{ + self as db, + cache_manager::CacheManager, + keys::{BlockDetails, BlockReceipts, EpochTransitions, TransactionAddress, EPOCH_KEY_PREFIX}, + CacheUpdatePolicy, Readable, Writable, +}; +use ethereum_types::{Bloom, BloomRef, H256, U256}; use heapsize::HeapSizeOf; use itertools::Itertools; use kvdb::{DBTransaction, KeyValueDB}; -use log::{trace, warn, info}; +use log::{info, trace, warn}; use parity_bytes::Bytes; use parking_lot::{Mutex, RwLock}; use rayon::prelude::*; use rlp::RlpStream; -use rlp_compress::{compress, decompress, blocks_swapper}; +use rlp_compress::{blocks_swapper, compress, decompress}; -use crate::best_block::{BestBlock, BestAncientBlock}; -use crate::block_info::{BlockInfo, BlockLocation, BranchBecomingCanonChainData}; -use crate::update::{ExtrasUpdate, ExtrasInsert}; -use crate::{CacheSize, ImportRoute, Config}; +use crate::{ + best_block::{BestAncientBlock, BestBlock}, + block_info::{BlockInfo, BlockLocation, BranchBecomingCanonChainData}, + update::{ExtrasInsert, ExtrasUpdate}, + CacheSize, Config, ImportRoute, +}; /// Database backing `BlockChain`. pub trait BlockChainDB: Send + Sync { - /// Generic key value store. - fn key_value(&self) -> &Arc; + /// Generic key value store. + fn key_value(&self) -> &Arc; - /// Header blooms database. - fn blooms(&self) -> &blooms_db::Database; + /// Header blooms database. + fn blooms(&self) -> &blooms_db::Database; - /// Trace blooms database. - fn trace_blooms(&self) -> &blooms_db::Database; + /// Trace blooms database. + fn trace_blooms(&self) -> &blooms_db::Database; - /// Restore the DB from the given path - fn restore(&self, new_db: &str) -> Result<(), io::Error> { - // First, close the Blooms databases - self.blooms().close()?; - self.trace_blooms().close()?; + /// Restore the DB from the given path + fn restore(&self, new_db: &str) -> Result<(), io::Error> { + // First, close the Blooms databases + self.blooms().close()?; + self.trace_blooms().close()?; - // Restore the key_value DB - self.key_value().restore(new_db)?; + // Restore the key_value DB + self.key_value().restore(new_db)?; - // Re-open the Blooms databases - self.blooms().reopen()?; - self.trace_blooms().reopen()?; - Ok(()) - } + // Re-open the Blooms databases + self.blooms().reopen()?; + self.trace_blooms().reopen()?; + Ok(()) + } } /// Generic database handler. This trait contains one function `open`. When called, it opens database with a /// predefined config. pub trait BlockChainDBHandler: Send + Sync { - /// Open the predefined key-value database. - fn open(&self, path: &Path) -> io::Result>; + /// Open the predefined key-value database. + fn open(&self, path: &Path) -> io::Result>; } /// Interface for querying blocks by hash and by number. pub trait BlockProvider { - /// Returns true if the given block is known - /// (though not necessarily a part of the canon chain). - fn is_known(&self, hash: &H256) -> bool; + /// Returns true if the given block is known + /// (though not necessarily a part of the canon chain). + fn is_known(&self, hash: &H256) -> bool; - /// Returns true if the given block is known and in the canon chain. - fn is_canon(&self, hash: &H256) -> bool { - let is_canon = || Some(hash == &self.block_hash(self.block_number(hash)?)?); - is_canon().unwrap_or(false) - } + /// Returns true if the given block is known and in the canon chain. + fn is_canon(&self, hash: &H256) -> bool { + let is_canon = || Some(hash == &self.block_hash(self.block_number(hash)?)?); + is_canon().unwrap_or(false) + } - /// Get the first block of the best part of the chain. - /// Return `None` if there is no gap and the first block is the genesis. - /// Any queries of blocks which precede this one are not guaranteed to - /// succeed. - fn first_block(&self) -> Option; + /// Get the first block of the best part of the chain. + /// Return `None` if there is no gap and the first block is the genesis. + /// Any queries of blocks which precede this one are not guaranteed to + /// succeed. + fn first_block(&self) -> Option; - /// Get the number of the first block. - fn first_block_number(&self) -> Option { - self.first_block().map(|b| self.block_number(&b).expect("First block is always set to an existing block or `None`. Existing block always has a number; qed")) - } + /// Get the number of the first block. + fn first_block_number(&self) -> Option { + self.first_block().map(|b| self.block_number(&b).expect("First block is always set to an existing block or `None`. Existing block always has a number; qed")) + } - /// Get the best block of an first block sequence if there is a gap. - fn best_ancient_block(&self) -> Option; + /// Get the best block of an first block sequence if there is a gap. + fn best_ancient_block(&self) -> Option; - /// Get the number of the first block. - fn best_ancient_number(&self) -> Option { - self.best_ancient_block().map(|h| self.block_number(&h).expect("Ancient block is always set to an existing block or `None`. Existing block always has a number; qed")) - } - /// Get raw block data - fn block(&self, hash: &H256) -> Option; + /// Get the number of the first block. + fn best_ancient_number(&self) -> Option { + self.best_ancient_block().map(|h| self.block_number(&h).expect("Ancient block is always set to an existing block or `None`. Existing block always has a number; qed")) + } + /// Get raw block data + fn block(&self, hash: &H256) -> Option; - /// Get the familial details concerning a block. - fn block_details(&self, hash: &H256) -> Option; + /// Get the familial details concerning a block. + fn block_details(&self, hash: &H256) -> Option; - /// Get the hash of given block's number. - fn block_hash(&self, index: BlockNumber) -> Option; + /// Get the hash of given block's number. + fn block_hash(&self, index: BlockNumber) -> Option; - /// Get the address of transaction with given hash. - fn transaction_address(&self, hash: &H256) -> Option; + /// Get the address of transaction with given hash. + fn transaction_address(&self, hash: &H256) -> Option; - /// Get receipts of block with given hash. - fn block_receipts(&self, hash: &H256) -> Option; + /// Get receipts of block with given hash. + fn block_receipts(&self, hash: &H256) -> Option; - /// Get the header RLP of a block. - fn block_header_data(&self, hash: &H256) -> Option; + /// Get the header RLP of a block. + fn block_header_data(&self, hash: &H256) -> Option; - /// Get the block body (uncles and transactions). - fn block_body(&self, hash: &H256) -> Option; + /// Get the block body (uncles and transactions). + fn block_body(&self, hash: &H256) -> Option; - /// Get a list of uncles for a given block. - /// Returns None if block does not exist. - fn uncles(&self, hash: &H256) -> Option> { - self.block_body(hash).map(|body| body.uncles()) - } + /// Get a list of uncles for a given block. + /// Returns None if block does not exist. + fn uncles(&self, hash: &H256) -> Option> { + self.block_body(hash).map(|body| body.uncles()) + } - /// Get a list of uncle hashes for a given block. - /// Returns None if block does not exist. - fn uncle_hashes(&self, hash: &H256) -> Option> { - self.block_body(hash).map(|body| body.uncle_hashes()) - } + /// Get a list of uncle hashes for a given block. + /// Returns None if block does not exist. + fn uncle_hashes(&self, hash: &H256) -> Option> { + self.block_body(hash).map(|body| body.uncle_hashes()) + } - /// Get the number of given block's hash. - fn block_number(&self, hash: &H256) -> Option { - self.block_header_data(hash).map(|header| header.number()) - } + /// Get the number of given block's hash. + fn block_number(&self, hash: &H256) -> Option { + self.block_header_data(hash).map(|header| header.number()) + } - /// Get transaction with given transaction hash. - fn transaction(&self, address: &TransactionAddress) -> Option { - self.block_body(&address.block_hash) - .and_then(|body| self.block_number(&address.block_hash) - .and_then(|n| body.view().localized_transaction_at(&address.block_hash, n, address.index))) - } + /// Get transaction with given transaction hash. + fn transaction(&self, address: &TransactionAddress) -> Option { + self.block_body(&address.block_hash).and_then(|body| { + self.block_number(&address.block_hash).and_then(|n| { + body.view() + .localized_transaction_at(&address.block_hash, n, address.index) + }) + }) + } - /// Get a list of transactions for a given block. - /// Returns None if block does not exist. - fn transactions(&self, hash: &H256) -> Option> { - self.block_body(hash) - .and_then(|body| self.block_number(hash) - .map(|n| body.view().localized_transactions(hash, n))) - } + /// Get a list of transactions for a given block. + /// Returns None if block does not exist. + fn transactions(&self, hash: &H256) -> Option> { + self.block_body(hash).and_then(|body| { + self.block_number(hash) + .map(|n| body.view().localized_transactions(hash, n)) + }) + } - /// Returns reference to genesis hash. - fn genesis_hash(&self) -> H256 { - self.block_hash(0).expect("Genesis hash should always exist") - } + /// Returns reference to genesis hash. + fn genesis_hash(&self) -> H256 { + self.block_hash(0) + .expect("Genesis hash should always exist") + } - /// Returns the header of the genesis block. - fn genesis_header(&self) -> encoded::Header { - self.block_header_data(&self.genesis_hash()) - .expect("Genesis header always stored; qed") - } + /// Returns the header of the genesis block. + fn genesis_header(&self) -> encoded::Header { + self.block_header_data(&self.genesis_hash()) + .expect("Genesis header always stored; qed") + } - /// Returns numbers of blocks containing given bloom. - fn blocks_with_bloom<'a, B, I, II>(&self, blooms: II, from_block: BlockNumber, to_block: BlockNumber) -> Vec - where - BloomRef<'a>: From, - II: IntoIterator + Copy, - I: Iterator, - Self: Sized; + /// Returns numbers of blocks containing given bloom. + fn blocks_with_bloom<'a, B, I, II>( + &self, + blooms: II, + from_block: BlockNumber, + to_block: BlockNumber, + ) -> Vec + where + BloomRef<'a>: From, + II: IntoIterator + Copy, + I: Iterator, + Self: Sized; - /// Returns logs matching given filter. - fn logs(&self, blocks: Vec, matches: F, limit: Option) -> Vec - where F: Fn(&LogEntry) -> bool + Send + Sync, Self: Sized; + /// Returns logs matching given filter. + fn logs( + &self, + blocks: Vec, + matches: F, + limit: Option, + ) -> Vec + where + F: Fn(&LogEntry) -> bool + Send + Sync, + Self: Sized; } /// Interface for querying blocks with pending db transaction by hash and by number. trait InTransactionBlockProvider { - /// Get the familial details concerning a block. - fn uncommitted_block_details(&self, hash: &H256) -> Option; + /// Get the familial details concerning a block. + fn uncommitted_block_details(&self, hash: &H256) -> Option; } #[derive(Debug, Hash, Eq, PartialEq, Clone)] enum CacheId { - BlockHeader(H256), - BlockBody(H256), - BlockDetails(H256), - BlockHashes(BlockNumber), - TransactionAddresses(H256), - BlockReceipts(H256), + BlockHeader(H256), + BlockBody(H256), + BlockDetails(H256), + BlockHashes(BlockNumber), + TransactionAddresses(H256), + BlockReceipts(H256), } /// Structure providing fast access to blockchain data. /// /// **Does not do input data verification.** pub struct BlockChain { - // All locks must be captured in the order declared here. - best_block: RwLock, - // Stores best block of the first uninterrupted sequence of blocks. `None` if there are no gaps. - // Only updated with `insert_unordered_block`. - best_ancient_block: RwLock>, - // Stores the last block of the last sequence of blocks. `None` if there are no gaps. - // This is calculated on start and does not get updated. - first_block: Option, + // All locks must be captured in the order declared here. + best_block: RwLock, + // Stores best block of the first uninterrupted sequence of blocks. `None` if there are no gaps. + // Only updated with `insert_unordered_block`. + best_ancient_block: RwLock>, + // Stores the last block of the last sequence of blocks. `None` if there are no gaps. + // This is calculated on start and does not get updated. + first_block: Option, - // block cache - block_headers: RwLock>, - block_bodies: RwLock>, + // block cache + block_headers: RwLock>, + block_bodies: RwLock>, - // extra caches - block_details: RwLock>, - block_hashes: RwLock>, - transaction_addresses: RwLock>, - block_receipts: RwLock>, + // extra caches + block_details: RwLock>, + block_hashes: RwLock>, + transaction_addresses: RwLock>, + block_receipts: RwLock>, - db: Arc, + db: Arc, - cache_man: Mutex>, + cache_man: Mutex>, - pending_best_ancient_block: RwLock>>, - pending_best_block: RwLock>, - pending_block_hashes: RwLock>, - pending_block_details: RwLock>, - pending_transaction_addresses: RwLock>>, + pending_best_ancient_block: RwLock>>, + pending_best_block: RwLock>, + pending_block_hashes: RwLock>, + pending_block_details: RwLock>, + pending_transaction_addresses: RwLock>>, } impl BlockProvider for BlockChain { - /// Returns true if the given block is known - /// (though not necessarily a part of the canon chain). - fn is_known(&self, hash: &H256) -> bool { - self.db.key_value().exists_with_cache(db::COL_EXTRA, &self.block_details, hash) - } + /// Returns true if the given block is known + /// (though not necessarily a part of the canon chain). + fn is_known(&self, hash: &H256) -> bool { + self.db + .key_value() + .exists_with_cache(db::COL_EXTRA, &self.block_details, hash) + } - fn first_block(&self) -> Option { - self.first_block.clone() - } + fn first_block(&self) -> Option { + self.first_block.clone() + } - fn best_ancient_block(&self) -> Option { - self.best_ancient_block.read().as_ref().map(|b| b.hash) - } + fn best_ancient_block(&self) -> Option { + self.best_ancient_block.read().as_ref().map(|b| b.hash) + } - fn best_ancient_number(&self) -> Option { - self.best_ancient_block.read().as_ref().map(|b| b.number) - } + fn best_ancient_number(&self) -> Option { + self.best_ancient_block.read().as_ref().map(|b| b.number) + } - /// Get raw block data - fn block(&self, hash: &H256) -> Option { - let header = self.block_header_data(hash)?; - let body = self.block_body(hash)?; - Some(encoded::Block::new_from_header_and_body(&header.view(), &body.view())) - } + /// Get raw block data + fn block(&self, hash: &H256) -> Option { + let header = self.block_header_data(hash)?; + let body = self.block_body(hash)?; + Some(encoded::Block::new_from_header_and_body( + &header.view(), + &body.view(), + )) + } - /// Get block header data - fn block_header_data(&self, hash: &H256) -> Option { - // Check cache first - { - let read = self.block_headers.read(); - if let Some(v) = read.get(hash) { - return Some(v.clone()); - } - } + /// Get block header data + fn block_header_data(&self, hash: &H256) -> Option { + // Check cache first + { + let read = self.block_headers.read(); + if let Some(v) = read.get(hash) { + return Some(v.clone()); + } + } - // Check if it's the best block - { - let best_block = self.best_block.read(); - if &best_block.header.hash() == hash { - return Some(best_block.header.encoded()) - } - } + // Check if it's the best block + { + let best_block = self.best_block.read(); + if &best_block.header.hash() == hash { + return Some(best_block.header.encoded()); + } + } - // Read from DB and populate cache - let b = self.db.key_value().get(db::COL_HEADERS, hash) - .expect("Low level database error when fetching block header data. Some issue with disk?")?; + // Read from DB and populate cache + let b = self.db.key_value().get(db::COL_HEADERS, hash).expect( + "Low level database error when fetching block header data. Some issue with disk?", + )?; - let header = encoded::Header::new(decompress(&b, blocks_swapper()).into_vec()); - let mut write = self.block_headers.write(); - write.insert(*hash, header.clone()); + let header = encoded::Header::new(decompress(&b, blocks_swapper()).into_vec()); + let mut write = self.block_headers.write(); + write.insert(*hash, header.clone()); - self.cache_man.lock().note_used(CacheId::BlockHeader(*hash)); - Some(header) - } + self.cache_man.lock().note_used(CacheId::BlockHeader(*hash)); + Some(header) + } - /// Get block body data - fn block_body(&self, hash: &H256) -> Option { - // Check cache first - { - let read = self.block_bodies.read(); - if let Some(v) = read.get(hash) { - return Some(v.clone()); - } - } + /// Get block body data + fn block_body(&self, hash: &H256) -> Option { + // Check cache first + { + let read = self.block_bodies.read(); + if let Some(v) = read.get(hash) { + return Some(v.clone()); + } + } - // Check if it's the best block - { - let best_block = self.best_block.read(); - if &best_block.header.hash() == hash { - return Some(encoded::Body::new(Self::block_to_body(best_block.block.rlp().as_raw()))); - } - } + // Check if it's the best block + { + let best_block = self.best_block.read(); + if &best_block.header.hash() == hash { + return Some(encoded::Body::new(Self::block_to_body( + best_block.block.rlp().as_raw(), + ))); + } + } - // Read from DB and populate cache - let b = self.db.key_value().get(db::COL_BODIES, hash) - .expect("Low level database error when fetching block body data. Some issue with disk?")?; + // Read from DB and populate cache + let b = self.db.key_value().get(db::COL_BODIES, hash).expect( + "Low level database error when fetching block body data. Some issue with disk?", + )?; - let body = encoded::Body::new(decompress(&b, blocks_swapper()).into_vec()); - let mut write = self.block_bodies.write(); - write.insert(*hash, body.clone()); + let body = encoded::Body::new(decompress(&b, blocks_swapper()).into_vec()); + let mut write = self.block_bodies.write(); + write.insert(*hash, body.clone()); - self.cache_man.lock().note_used(CacheId::BlockBody(*hash)); - Some(body) - } + self.cache_man.lock().note_used(CacheId::BlockBody(*hash)); + Some(body) + } - /// Get the familial details concerning a block. - fn block_details(&self, hash: &H256) -> Option { - let result = self.db.key_value().read_with_cache(db::COL_EXTRA, &self.block_details, hash)?; - self.cache_man.lock().note_used(CacheId::BlockDetails(*hash)); - Some(result) - } + /// Get the familial details concerning a block. + fn block_details(&self, hash: &H256) -> Option { + let result = + self.db + .key_value() + .read_with_cache(db::COL_EXTRA, &self.block_details, hash)?; + self.cache_man + .lock() + .note_used(CacheId::BlockDetails(*hash)); + Some(result) + } - /// Get the hash of given block's number. - fn block_hash(&self, index: BlockNumber) -> Option { - let result = self.db.key_value().read_with_cache(db::COL_EXTRA, &self.block_hashes, &index)?; - self.cache_man.lock().note_used(CacheId::BlockHashes(index)); - Some(result) - } + /// Get the hash of given block's number. + fn block_hash(&self, index: BlockNumber) -> Option { + let result = + self.db + .key_value() + .read_with_cache(db::COL_EXTRA, &self.block_hashes, &index)?; + self.cache_man.lock().note_used(CacheId::BlockHashes(index)); + Some(result) + } - /// Get the address of transaction with given hash. - fn transaction_address(&self, hash: &H256) -> Option { - let result = self.db.key_value().read_with_cache(db::COL_EXTRA, &self.transaction_addresses, hash)?; - self.cache_man.lock().note_used(CacheId::TransactionAddresses(*hash)); - Some(result) - } + /// Get the address of transaction with given hash. + fn transaction_address(&self, hash: &H256) -> Option { + let result = self.db.key_value().read_with_cache( + db::COL_EXTRA, + &self.transaction_addresses, + hash, + )?; + self.cache_man + .lock() + .note_used(CacheId::TransactionAddresses(*hash)); + Some(result) + } - /// Get receipts of block with given hash. - fn block_receipts(&self, hash: &H256) -> Option { - let result = self.db.key_value().read_with_cache(db::COL_EXTRA, &self.block_receipts, hash)?; - self.cache_man.lock().note_used(CacheId::BlockReceipts(*hash)); - Some(result) - } + /// Get receipts of block with given hash. + fn block_receipts(&self, hash: &H256) -> Option { + let result = + self.db + .key_value() + .read_with_cache(db::COL_EXTRA, &self.block_receipts, hash)?; + self.cache_man + .lock() + .note_used(CacheId::BlockReceipts(*hash)); + Some(result) + } - /// Returns numbers of blocks containing given bloom. - fn blocks_with_bloom<'a, B, I, II>(&self, blooms: II, from_block: BlockNumber, to_block: BlockNumber) -> Vec - where - BloomRef<'a>: From, - II: IntoIterator + Copy, - I: Iterator { - self.db.blooms() - .filter(from_block, to_block, blooms) - .expect("Low level database error when searching blooms. Some issue with disk?") - } + /// Returns numbers of blocks containing given bloom. + fn blocks_with_bloom<'a, B, I, II>( + &self, + blooms: II, + from_block: BlockNumber, + to_block: BlockNumber, + ) -> Vec + where + BloomRef<'a>: From, + II: IntoIterator + Copy, + I: Iterator, + { + self.db + .blooms() + .filter(from_block, to_block, blooms) + .expect("Low level database error when searching blooms. Some issue with disk?") + } - /// Returns logs matching given filter. The order of logs returned will be the same as the order of the blocks - /// provided. And it's the callers responsibility to sort blocks provided in advance. - fn logs(&self, mut blocks: Vec, matches: F, limit: Option) -> Vec - where F: Fn(&LogEntry) -> bool + Send + Sync, Self: Sized { - // sort in reverse order - blocks.reverse(); + /// Returns logs matching given filter. The order of logs returned will be the same as the order of the blocks + /// provided. And it's the callers responsibility to sort blocks provided in advance. + fn logs( + &self, + mut blocks: Vec, + matches: F, + limit: Option, + ) -> Vec + where + F: Fn(&LogEntry) -> bool + Send + Sync, + Self: Sized, + { + // sort in reverse order + blocks.reverse(); - let mut logs = blocks + let mut logs = blocks .chunks(128) .flat_map(move |blocks_chunk| { blocks_chunk.into_par_iter() @@ -425,2182 +496,2625 @@ impl BlockProvider for BlockChain { }) .take(limit.unwrap_or(::std::usize::MAX)) .collect::>(); - logs.reverse(); - logs - } + logs.reverse(); + logs + } } impl InTransactionBlockProvider for BlockChain { - fn uncommitted_block_details(&self, hash: &H256) -> Option { - let result = self.db.key_value().read_with_two_layer_cache( - db::COL_EXTRA, - &self.pending_block_details, - &self.block_details, - hash - )?; - self.cache_man.lock().note_used(CacheId::BlockDetails(*hash)); - Some(result) - } + fn uncommitted_block_details(&self, hash: &H256) -> Option { + let result = self.db.key_value().read_with_two_layer_cache( + db::COL_EXTRA, + &self.pending_block_details, + &self.block_details, + hash, + )?; + self.cache_man + .lock() + .note_used(CacheId::BlockDetails(*hash)); + Some(result) + } } /// An iterator which walks the blockchain towards the genesis. #[derive(Clone)] pub struct AncestryIter<'a> { - current: H256, - chain: &'a BlockChain, + current: H256, + chain: &'a BlockChain, } impl<'a> Iterator for AncestryIter<'a> { - type Item = H256; - fn next(&mut self) -> Option { - if self.current.is_zero() { - None - } else { - self.chain.block_details(&self.current) - .map(|details| mem::replace(&mut self.current, details.parent)) - } - } + type Item = H256; + fn next(&mut self) -> Option { + if self.current.is_zero() { + None + } else { + self.chain + .block_details(&self.current) + .map(|details| mem::replace(&mut self.current, details.parent)) + } + } } /// An iterator which walks the blockchain towards the genesis, with metadata information. pub struct AncestryWithMetadataIter<'a> { - current: H256, - chain: &'a BlockChain, + current: H256, + chain: &'a BlockChain, } impl<'a> Iterator for AncestryWithMetadataIter<'a> { - type Item = ExtendedHeader; - fn next(&mut self) -> Option { - if self.current.is_zero() { - None - } else { - let details = self.chain.block_details(&self.current); - let header = self.chain.block_header_data(&self.current) - .map(|h| h.decode().expect("Stored block header data is valid RLP; qed")); + type Item = ExtendedHeader; + fn next(&mut self) -> Option { + if self.current.is_zero() { + None + } else { + let details = self.chain.block_details(&self.current); + let header = self.chain.block_header_data(&self.current).map(|h| { + h.decode() + .expect("Stored block header data is valid RLP; qed") + }); - match (details, header) { - (Some(details), Some(header)) => { - self.current = details.parent; - Some(ExtendedHeader { - parent_total_difficulty: details.total_difficulty - *header.difficulty(), - is_finalized: details.is_finalized, - header, - }) - }, - _ => { - self.current = H256::default(); - None - }, - } - } - } + match (details, header) { + (Some(details), Some(header)) => { + self.current = details.parent; + Some(ExtendedHeader { + parent_total_difficulty: details.total_difficulty - *header.difficulty(), + is_finalized: details.is_finalized, + header, + }) + } + _ => { + self.current = H256::default(); + None + } + } + } + } } /// An iterator which walks all epoch transitions. /// Returns epoch transitions. pub struct EpochTransitionIter<'a> { - chain: &'a BlockChain, - prefix_iter: Box, Box<[u8]>)> + 'a>, + chain: &'a BlockChain, + prefix_iter: Box, Box<[u8]>)> + 'a>, } impl<'a> Iterator for EpochTransitionIter<'a> { - type Item = (u64, EpochTransition); + type Item = (u64, EpochTransition); - fn next(&mut self) -> Option { - loop { - // some epochs never occurred on the main chain. - let (key, val) = self.prefix_iter.next()?; + fn next(&mut self) -> Option { + loop { + // some epochs never occurred on the main chain. + let (key, val) = self.prefix_iter.next()?; - // iterator may continue beyond values beginning with this - // prefix. - if !key.starts_with(&EPOCH_KEY_PREFIX[..]) { - return None - } + // iterator may continue beyond values beginning with this + // prefix. + if !key.starts_with(&EPOCH_KEY_PREFIX[..]) { + return None; + } - let transitions: EpochTransitions = ::rlp::decode(&val[..]).expect("decode error: the db is corrupted or the data structure has changed"); + let transitions: EpochTransitions = ::rlp::decode(&val[..]) + .expect("decode error: the db is corrupted or the data structure has changed"); - // if there are multiple candidates, at most one will be on the - // canon chain. - for transition in transitions.candidates.into_iter() { - let is_in_canon_chain = self.chain.block_hash(transition.block_number) - .map_or(false, |hash| hash == transition.block_hash); + // if there are multiple candidates, at most one will be on the + // canon chain. + for transition in transitions.candidates.into_iter() { + let is_in_canon_chain = self + .chain + .block_hash(transition.block_number) + .map_or(false, |hash| hash == transition.block_hash); - // if the transition is within the block gap, there will only be - // one candidate, and it will be from a snapshot restored from. - let is_ancient = self.chain.first_block_number() - .map_or(false, |first| first > transition.block_number); + // if the transition is within the block gap, there will only be + // one candidate, and it will be from a snapshot restored from. + let is_ancient = self + .chain + .first_block_number() + .map_or(false, |first| first > transition.block_number); - if is_ancient || is_in_canon_chain { - return Some((transitions.number, transition)) - } - } - } - } + if is_ancient || is_in_canon_chain { + return Some((transitions.number, transition)); + } + } + } + } } impl BlockChain { - /// Create new instance of blockchain from given Genesis. - pub fn new(config: Config, genesis: &[u8], db: Arc) -> BlockChain { - // 400 is the average size of the key - let cache_man = CacheManager::new(config.pref_cache_size, config.max_cache_size, 400); + /// Create new instance of blockchain from given Genesis. + pub fn new(config: Config, genesis: &[u8], db: Arc) -> BlockChain { + // 400 is the average size of the key + let cache_man = CacheManager::new(config.pref_cache_size, config.max_cache_size, 400); - let mut bc = BlockChain { - first_block: None, - best_block: RwLock::new(BestBlock { - // BestBlock will be overwritten anyway. - header: Default::default(), - total_difficulty: Default::default(), - block: encoded::Block::new(genesis.into()), - }), - best_ancient_block: RwLock::new(None), - block_headers: RwLock::new(HashMap::new()), - block_bodies: RwLock::new(HashMap::new()), - block_details: RwLock::new(HashMap::new()), - block_hashes: RwLock::new(HashMap::new()), - transaction_addresses: RwLock::new(HashMap::new()), - block_receipts: RwLock::new(HashMap::new()), - db: db.clone(), - cache_man: Mutex::new(cache_man), - pending_best_ancient_block: RwLock::new(None), - pending_best_block: RwLock::new(None), - pending_block_hashes: RwLock::new(HashMap::new()), - pending_block_details: RwLock::new(HashMap::new()), - pending_transaction_addresses: RwLock::new(HashMap::new()), - }; + let mut bc = BlockChain { + first_block: None, + best_block: RwLock::new(BestBlock { + // BestBlock will be overwritten anyway. + header: Default::default(), + total_difficulty: Default::default(), + block: encoded::Block::new(genesis.into()), + }), + best_ancient_block: RwLock::new(None), + block_headers: RwLock::new(HashMap::new()), + block_bodies: RwLock::new(HashMap::new()), + block_details: RwLock::new(HashMap::new()), + block_hashes: RwLock::new(HashMap::new()), + transaction_addresses: RwLock::new(HashMap::new()), + block_receipts: RwLock::new(HashMap::new()), + db: db.clone(), + cache_man: Mutex::new(cache_man), + pending_best_ancient_block: RwLock::new(None), + pending_best_block: RwLock::new(None), + pending_block_hashes: RwLock::new(HashMap::new()), + pending_block_details: RwLock::new(HashMap::new()), + pending_transaction_addresses: RwLock::new(HashMap::new()), + }; - // load best block - let best_block_hash = match bc.db.key_value().get(db::COL_EXTRA, b"best") - .expect("Low-level database error when fetching 'best' block. Some issue with disk?") - { - Some(best) => { - H256::from_slice(&best) - } - None => { - // best block does not exist - // we need to insert genesis into the cache - let block = view!(BlockView, genesis); - let header = block.header_view(); - let hash = block.hash(); + // load best block + let best_block_hash = + match bc.db.key_value().get(db::COL_EXTRA, b"best").expect( + "Low-level database error when fetching 'best' block. Some issue with disk?", + ) { + Some(best) => H256::from_slice(&best), + None => { + // best block does not exist + // we need to insert genesis into the cache + let block = view!(BlockView, genesis); + let header = block.header_view(); + let hash = block.hash(); - let details = BlockDetails { - number: header.number(), - total_difficulty: header.difficulty(), - parent: header.parent_hash(), - children: vec![], - is_finalized: false, - }; + let details = BlockDetails { + number: header.number(), + total_difficulty: header.difficulty(), + parent: header.parent_hash(), + children: vec![], + is_finalized: false, + }; - let mut batch = DBTransaction::new(); - batch.put(db::COL_HEADERS, &hash, block.header_rlp().as_raw()); - batch.put(db::COL_BODIES, &hash, &Self::block_to_body(genesis)); + let mut batch = DBTransaction::new(); + batch.put(db::COL_HEADERS, &hash, block.header_rlp().as_raw()); + batch.put(db::COL_BODIES, &hash, &Self::block_to_body(genesis)); - batch.write(db::COL_EXTRA, &hash, &details); - batch.write(db::COL_EXTRA, &header.number(), &hash); + batch.write(db::COL_EXTRA, &hash, &details); + batch.write(db::COL_EXTRA, &header.number(), &hash); - batch.put(db::COL_EXTRA, b"best", &hash); - bc.db.key_value().write(batch).expect("Low level database error when fetching 'best' block. Some issue with disk?"); - hash - } - }; + batch.put(db::COL_EXTRA, b"best", &hash); + bc.db.key_value().write(batch).expect( + "Low level database error when fetching 'best' block. Some issue with disk?", + ); + hash + } + }; - { - // Fetch best block details - let best_block_total_difficulty = bc.block_details(&best_block_hash) + { + // Fetch best block details + let best_block_total_difficulty = bc.block_details(&best_block_hash) .expect("Best block is from a known block hash; a known block hash always comes with a known block detail; qed") .total_difficulty; - let best_block_rlp = bc.block(&best_block_hash) - .expect("Best block is from a known block hash; qed"); + let best_block_rlp = bc + .block(&best_block_hash) + .expect("Best block is from a known block hash; qed"); - // and write them to the cache. - let mut best_block = bc.best_block.write(); - *best_block = BestBlock { - total_difficulty: best_block_total_difficulty, - header: best_block_rlp.decode_header(), - block: best_block_rlp, - }; - } + // and write them to the cache. + let mut best_block = bc.best_block.write(); + *best_block = BestBlock { + total_difficulty: best_block_total_difficulty, + header: best_block_rlp.decode_header(), + block: best_block_rlp, + }; + } - { - let best_block_number = bc.best_block.read().header.number(); - // Fetch first and best ancient block details - let raw_first = bc.db.key_value().get(db::COL_EXTRA, b"first") - .expect("Low level database error when fetching 'first' block. Some issue with disk?") - .map(|v| v.into_vec()); - let mut best_ancient = bc.db.key_value().get(db::COL_EXTRA, b"ancient") + { + let best_block_number = bc.best_block.read().header.number(); + // Fetch first and best ancient block details + let raw_first = bc + .db + .key_value() + .get(db::COL_EXTRA, b"first") + .expect( + "Low level database error when fetching 'first' block. Some issue with disk?", + ) + .map(|v| v.into_vec()); + let mut best_ancient = bc.db.key_value().get(db::COL_EXTRA, b"ancient") .expect("Low level database error when fetching 'best ancient' block. Some issue with disk?") .map(|h| H256::from_slice(&h)); - let best_ancient_number; - if best_ancient.is_none() && best_block_number > 1 && bc.block_hash(1).is_none() { - best_ancient = Some(bc.genesis_hash()); - best_ancient_number = Some(0); - } else { - best_ancient_number = best_ancient.as_ref().and_then(|h| bc.block_number(h)); - } + let best_ancient_number; + if best_ancient.is_none() && best_block_number > 1 && bc.block_hash(1).is_none() { + best_ancient = Some(bc.genesis_hash()); + best_ancient_number = Some(0); + } else { + best_ancient_number = best_ancient.as_ref().and_then(|h| bc.block_number(h)); + } - // binary search for the first block. - match raw_first { - None => { - let (mut f, mut hash) = (best_block_number, best_block_hash); - let mut l = best_ancient_number.unwrap_or(0); + // binary search for the first block. + match raw_first { + None => { + let (mut f, mut hash) = (best_block_number, best_block_hash); + let mut l = best_ancient_number.unwrap_or(0); - loop { - if l >= f { break; } + loop { + if l >= f { + break; + } - let step = (f - l) >> 1; - let m = l + step; + let step = (f - l) >> 1; + let m = l + step; - match bc.block_hash(m) { - Some(h) => { f = m; hash = h }, - None => { l = m + 1 }, - } - } + match bc.block_hash(m) { + Some(h) => { + f = m; + hash = h + } + None => l = m + 1, + } + } - if hash != bc.genesis_hash() { - trace!("First block calculated: {:?}", hash); - let mut batch = db.key_value().transaction(); - batch.put(db::COL_EXTRA, b"first", &hash); - db.key_value().write(batch).expect("Low level database error when writing 'first' block. Some issue with disk?"); - bc.first_block = Some(hash); - } - }, - Some(raw_first) => { - bc.first_block = Some(H256::from_slice(&raw_first)); - }, - } + if hash != bc.genesis_hash() { + trace!("First block calculated: {:?}", hash); + let mut batch = db.key_value().transaction(); + batch.put(db::COL_EXTRA, b"first", &hash); + db.key_value().write(batch).expect("Low level database error when writing 'first' block. Some issue with disk?"); + bc.first_block = Some(hash); + } + } + Some(raw_first) => { + bc.first_block = Some(H256::from_slice(&raw_first)); + } + } - // and write them - if let (Some(hash), Some(number)) = (best_ancient, best_ancient_number) { - let mut best_ancient_block = bc.best_ancient_block.write(); - *best_ancient_block = Some(BestAncientBlock { - hash: hash, - number: number, - }); - } - } + // and write them + if let (Some(hash), Some(number)) = (best_ancient, best_ancient_number) { + let mut best_ancient_block = bc.best_ancient_block.write(); + *best_ancient_block = Some(BestAncientBlock { + hash: hash, + number: number, + }); + } + } - bc - } + bc + } - /// Returns true if the given parent block has given child - /// (though not necessarily a part of the canon chain). - fn is_known_child(&self, parent: &H256, hash: &H256) -> bool { - self.db.key_value().read_with_cache(db::COL_EXTRA, &self.block_details, parent).map_or(false, |d| d.children.contains(hash)) - } + /// Returns true if the given parent block has given child + /// (though not necessarily a part of the canon chain). + fn is_known_child(&self, parent: &H256, hash: &H256) -> bool { + self.db + .key_value() + .read_with_cache(db::COL_EXTRA, &self.block_details, parent) + .map_or(false, |d| d.children.contains(hash)) + } - /// Returns a tree route between `from` and `to`, which is a tuple of: - /// - /// - a vector of hashes of all blocks, ordered from `from` to `to`. - /// - /// - common ancestor of these blocks. - /// - /// - an index where best common ancestor would be - /// - /// 1.) from newer to older - /// - /// - bc: `A1 -> A2 -> A3 -> A4 -> A5` - /// - from: A5, to: A4 - /// - route: - /// - /// ```json - /// { blocks: [A5], ancestor: A4, index: 1 } - /// ``` - /// - /// 2.) from older to newer - /// - /// - bc: `A1 -> A2 -> A3 -> A4 -> A5` - /// - from: A3, to: A4 - /// - route: - /// - /// ```json - /// { blocks: [A4], ancestor: A3, index: 0 } - /// ``` - /// - /// 3.) fork: - /// - /// - bc: - /// - /// ```text - /// A1 -> A2 -> A3 -> A4 - /// -> B3 -> B4 - /// ``` - /// - from: B4, to: A4 - /// - route: - /// - /// ```json - /// { blocks: [B4, B3, A3, A4], ancestor: A2, index: 2 } - /// ``` - /// - /// If the tree route verges into pruned or unknown blocks, - /// `None` is returned. - /// - /// `is_from_route_finalized` returns whether the `from` part of the - /// route contains a finalized block. This only holds if the two parts (from - /// and to) are on different branches, ie. on 2 different forks. - pub fn tree_route(&self, from: H256, to: H256) -> Option { - let mut from_branch = vec![]; - let mut is_from_route_finalized = false; - let mut to_branch = vec![]; + /// Returns a tree route between `from` and `to`, which is a tuple of: + /// + /// - a vector of hashes of all blocks, ordered from `from` to `to`. + /// + /// - common ancestor of these blocks. + /// + /// - an index where best common ancestor would be + /// + /// 1.) from newer to older + /// + /// - bc: `A1 -> A2 -> A3 -> A4 -> A5` + /// - from: A5, to: A4 + /// - route: + /// + /// ```json + /// { blocks: [A5], ancestor: A4, index: 1 } + /// ``` + /// + /// 2.) from older to newer + /// + /// - bc: `A1 -> A2 -> A3 -> A4 -> A5` + /// - from: A3, to: A4 + /// - route: + /// + /// ```json + /// { blocks: [A4], ancestor: A3, index: 0 } + /// ``` + /// + /// 3.) fork: + /// + /// - bc: + /// + /// ```text + /// A1 -> A2 -> A3 -> A4 + /// -> B3 -> B4 + /// ``` + /// - from: B4, to: A4 + /// - route: + /// + /// ```json + /// { blocks: [B4, B3, A3, A4], ancestor: A2, index: 2 } + /// ``` + /// + /// If the tree route verges into pruned or unknown blocks, + /// `None` is returned. + /// + /// `is_from_route_finalized` returns whether the `from` part of the + /// route contains a finalized block. This only holds if the two parts (from + /// and to) are on different branches, ie. on 2 different forks. + pub fn tree_route(&self, from: H256, to: H256) -> Option { + let mut from_branch = vec![]; + let mut is_from_route_finalized = false; + let mut to_branch = vec![]; - let mut from_details = self.block_details(&from)?; - let mut to_details = self.block_details(&to)?; - let mut current_from = from; - let mut current_to = to; + let mut from_details = self.block_details(&from)?; + let mut to_details = self.block_details(&to)?; + let mut current_from = from; + let mut current_to = to; - // reset from && to to the same level - while from_details.number > to_details.number { - from_branch.push(current_from); - is_from_route_finalized = is_from_route_finalized || from_details.is_finalized; - current_from = from_details.parent.clone(); - from_details = self.block_details(&from_details.parent)?; - } + // reset from && to to the same level + while from_details.number > to_details.number { + from_branch.push(current_from); + is_from_route_finalized = is_from_route_finalized || from_details.is_finalized; + current_from = from_details.parent.clone(); + from_details = self.block_details(&from_details.parent)?; + } - while to_details.number > from_details.number { - to_branch.push(current_to); - current_to = to_details.parent.clone(); - to_details = self.block_details(&to_details.parent)?; - } + while to_details.number > from_details.number { + to_branch.push(current_to); + current_to = to_details.parent.clone(); + to_details = self.block_details(&to_details.parent)?; + } - assert_eq!(from_details.number, to_details.number); + assert_eq!(from_details.number, to_details.number); - // move to shared parent - while current_from != current_to { - from_branch.push(current_from); - is_from_route_finalized = is_from_route_finalized || from_details.is_finalized; - current_from = from_details.parent.clone(); - from_details = self.block_details(&from_details.parent)?; + // move to shared parent + while current_from != current_to { + from_branch.push(current_from); + is_from_route_finalized = is_from_route_finalized || from_details.is_finalized; + current_from = from_details.parent.clone(); + from_details = self.block_details(&from_details.parent)?; - to_branch.push(current_to); - current_to = to_details.parent.clone(); - to_details = self.block_details(&to_details.parent)?; - } + to_branch.push(current_to); + current_to = to_details.parent.clone(); + to_details = self.block_details(&to_details.parent)?; + } - let index = from_branch.len(); + let index = from_branch.len(); - from_branch.extend(to_branch.into_iter().rev()); + from_branch.extend(to_branch.into_iter().rev()); - Some(TreeRoute { - blocks: from_branch, - ancestor: current_from, - index: index, - is_from_route_finalized: is_from_route_finalized, - }) - } + Some(TreeRoute { + blocks: from_branch, + ancestor: current_from, + index: index, + is_from_route_finalized: is_from_route_finalized, + }) + } - /// Inserts a verified, known block from the canonical chain. - /// - /// Can be performed out-of-order, but care must be taken that the final chain is in a correct state. - /// This is used by snapshot restoration and when downloading missing blocks for the chain gap. - /// `is_best` forces the best block to be updated to this block. - /// `is_ancient` forces the best block of the first block sequence to be updated to this block. - /// `parent_td` is a parent total diffuculty - /// Supply a dummy parent total difficulty when the parent block may not be in the chain. - /// Returns true if the block is disconnected. - pub fn insert_unordered_block(&self, batch: &mut DBTransaction, block: encoded::Block, receipts: Vec, parent_td: Option, is_best: bool, is_ancient: bool) -> bool { - let block_number = block.header_view().number(); - let block_parent_hash = block.header_view().parent_hash(); - let block_difficulty = block.header_view().difficulty(); - let hash = block.header_view().hash(); + /// Inserts a verified, known block from the canonical chain. + /// + /// Can be performed out-of-order, but care must be taken that the final chain is in a correct state. + /// This is used by snapshot restoration and when downloading missing blocks for the chain gap. + /// `is_best` forces the best block to be updated to this block. + /// `is_ancient` forces the best block of the first block sequence to be updated to this block. + /// `parent_td` is a parent total diffuculty + /// Supply a dummy parent total difficulty when the parent block may not be in the chain. + /// Returns true if the block is disconnected. + pub fn insert_unordered_block( + &self, + batch: &mut DBTransaction, + block: encoded::Block, + receipts: Vec, + parent_td: Option, + is_best: bool, + is_ancient: bool, + ) -> bool { + let block_number = block.header_view().number(); + let block_parent_hash = block.header_view().parent_hash(); + let block_difficulty = block.header_view().difficulty(); + let hash = block.header_view().hash(); - if self.is_known(&hash) { - return false; - } + if self.is_known(&hash) { + return false; + } - assert!(self.pending_best_block.read().is_none()); + assert!(self.pending_best_block.read().is_none()); - let compressed_header = compress(block.header_view().rlp().as_raw(), blocks_swapper()); - let compressed_body = compress(&Self::block_to_body(block.raw()), blocks_swapper()); + let compressed_header = compress(block.header_view().rlp().as_raw(), blocks_swapper()); + let compressed_body = compress(&Self::block_to_body(block.raw()), blocks_swapper()); - // store block in db - batch.put(db::COL_HEADERS, &hash, &compressed_header); - batch.put(db::COL_BODIES, &hash, &compressed_body); + // store block in db + batch.put(db::COL_HEADERS, &hash, &compressed_header); + batch.put(db::COL_BODIES, &hash, &compressed_body); - let maybe_parent = self.uncommitted_block_details(&block_parent_hash); + let maybe_parent = self.uncommitted_block_details(&block_parent_hash); - if let Some(parent_details) = maybe_parent { - // parent known to be in chain. - let info = BlockInfo { - hash: hash, - number: block_number, - total_difficulty: parent_details.total_difficulty + block_difficulty, - location: BlockLocation::CanonChain, - }; + if let Some(parent_details) = maybe_parent { + // parent known to be in chain. + let info = BlockInfo { + hash: hash, + number: block_number, + total_difficulty: parent_details.total_difficulty + block_difficulty, + location: BlockLocation::CanonChain, + }; - self.prepare_update(batch, ExtrasUpdate { - block_hashes: self.prepare_block_hashes_update(&info), - block_details: self.prepare_block_details_update(block_parent_hash, &info, false), - block_receipts: self.prepare_block_receipts_update(receipts, &info), - blocks_blooms: self.prepare_block_blooms_update(block.header_view().log_bloom(), &info), - transactions_addresses: self.prepare_transaction_addresses_update(block.view().transaction_hashes(), &info), - info: info, - block, - }, is_best); + self.prepare_update( + batch, + ExtrasUpdate { + block_hashes: self.prepare_block_hashes_update(&info), + block_details: self.prepare_block_details_update( + block_parent_hash, + &info, + false, + ), + block_receipts: self.prepare_block_receipts_update(receipts, &info), + blocks_blooms: self + .prepare_block_blooms_update(block.header_view().log_bloom(), &info), + transactions_addresses: self.prepare_transaction_addresses_update( + block.view().transaction_hashes(), + &info, + ), + info: info, + block, + }, + is_best, + ); - if is_ancient { - self.set_best_ancient_block(block_number, &hash, batch); - } + if is_ancient { + self.set_best_ancient_block(block_number, &hash, batch); + } - false - } else { - // parent not in the chain yet. we need the parent difficulty to proceed. - let d = parent_td + false + } else { + // parent not in the chain yet. we need the parent difficulty to proceed. + let d = parent_td .expect("parent total difficulty always supplied for first block in chunk. only first block can have missing parent; qed"); - let info = BlockInfo { - hash: hash, - number: block_number, - total_difficulty: d + block_difficulty, - location: BlockLocation::CanonChain, - }; + let info = BlockInfo { + hash: hash, + number: block_number, + total_difficulty: d + block_difficulty, + location: BlockLocation::CanonChain, + }; - // TODO [sorpaas] support warp sync insertion of finalization and metadata. - let block_details = BlockDetails { - number: block_number, - total_difficulty: info.total_difficulty, - parent: block_parent_hash, - children: Vec::new(), - is_finalized: false, - }; + // TODO [sorpaas] support warp sync insertion of finalization and metadata. + let block_details = BlockDetails { + number: block_number, + total_difficulty: info.total_difficulty, + parent: block_parent_hash, + children: Vec::new(), + is_finalized: false, + }; - let mut update = HashMap::new(); - update.insert(hash, block_details); + let mut update = HashMap::new(); + update.insert(hash, block_details); - self.prepare_update(batch, ExtrasUpdate { - block_hashes: self.prepare_block_hashes_update(&info), - block_details: update, - block_receipts: self.prepare_block_receipts_update(receipts, &info), - blocks_blooms: self.prepare_block_blooms_update(block.header_view().log_bloom(), &info), - transactions_addresses: self.prepare_transaction_addresses_update(block.view().transaction_hashes(), &info), - info: info, - block, - }, is_best); - true - } - } + self.prepare_update( + batch, + ExtrasUpdate { + block_hashes: self.prepare_block_hashes_update(&info), + block_details: update, + block_receipts: self.prepare_block_receipts_update(receipts, &info), + blocks_blooms: self + .prepare_block_blooms_update(block.header_view().log_bloom(), &info), + transactions_addresses: self.prepare_transaction_addresses_update( + block.view().transaction_hashes(), + &info, + ), + info: info, + block, + }, + is_best, + ); + true + } + } - /// clears all caches, re-loads best block from disk for testing purposes - pub fn clear_cache(&self) { - self.block_bodies.write().clear(); - self.block_details.write().clear(); - self.block_hashes.write().clear(); - self.block_headers.write().clear(); - // Fetch best block details from disk - let best_block_hash = self.db.key_value().get(db::COL_EXTRA, b"best") - .expect("Low-level database error when fetching 'best' block. Some issue with disk?") - .as_ref() - .map(|r| H256::from_slice(r)) - .unwrap(); - let best_block_total_difficulty = self.block_details(&best_block_hash) + /// clears all caches, re-loads best block from disk for testing purposes + pub fn clear_cache(&self) { + self.block_bodies.write().clear(); + self.block_details.write().clear(); + self.block_hashes.write().clear(); + self.block_headers.write().clear(); + // Fetch best block details from disk + let best_block_hash = self + .db + .key_value() + .get(db::COL_EXTRA, b"best") + .expect("Low-level database error when fetching 'best' block. Some issue with disk?") + .as_ref() + .map(|r| H256::from_slice(r)) + .unwrap(); + let best_block_total_difficulty = self.block_details(&best_block_hash) .expect("Best block is from a known block hash; a known block hash always comes with a known block detail; qed") .total_difficulty; - let best_block_rlp = self.block(&best_block_hash) - .expect("Best block is from a known block hash; qed"); + let best_block_rlp = self + .block(&best_block_hash) + .expect("Best block is from a known block hash; qed"); - // and write them to the cache - let mut best_block = self.best_block.write(); - *best_block = BestBlock { - total_difficulty: best_block_total_difficulty, - header: best_block_rlp.decode_header(), - block: best_block_rlp, - }; - } + // and write them to the cache + let mut best_block = self.best_block.write(); + *best_block = BestBlock { + total_difficulty: best_block_total_difficulty, + header: best_block_rlp.decode_header(), + block: best_block_rlp, + }; + } - /// Update the best ancient block to the given hash, after checking that - /// it's directly linked to the currently known best ancient block - pub fn update_best_ancient_block(&self, hash: &H256) { - // Get the block view of the next ancient block (it must - // be in DB at this point) - let block_view = match self.block(hash) { - Some(v) => v, - None => return, - }; + /// Update the best ancient block to the given hash, after checking that + /// it's directly linked to the currently known best ancient block + pub fn update_best_ancient_block(&self, hash: &H256) { + // Get the block view of the next ancient block (it must + // be in DB at this point) + let block_view = match self.block(hash) { + Some(v) => v, + None => return, + }; - // So that `best_ancient_block` gets unlocked before calling - // `set_best_ancient_block` - { - // Get the target hash ; if there are no ancient block, - // it means that the chain is already fully linked - // Release the `best_ancient_block` RwLock - let target_hash = { - let best_ancient_block = self.best_ancient_block.read(); - let cur_ancient_block = match *best_ancient_block { - Some(ref b) => b, - None => return, - }; + // So that `best_ancient_block` gets unlocked before calling + // `set_best_ancient_block` + { + // Get the target hash ; if there are no ancient block, + // it means that the chain is already fully linked + // Release the `best_ancient_block` RwLock + let target_hash = { + let best_ancient_block = self.best_ancient_block.read(); + let cur_ancient_block = match *best_ancient_block { + Some(ref b) => b, + None => return, + }; - // Ensure that the new best ancient block is after the current one - if block_view.number() <= cur_ancient_block.number { - return; - } + // Ensure that the new best ancient block is after the current one + if block_view.number() <= cur_ancient_block.number { + return; + } - cur_ancient_block.hash.clone() - }; + cur_ancient_block.hash.clone() + }; - let mut block_hash = *hash; - let mut is_linked = false; + let mut block_hash = *hash; + let mut is_linked = false; - loop { - if block_hash == target_hash { - is_linked = true; - break; - } + loop { + if block_hash == target_hash { + is_linked = true; + break; + } - match self.block_details(&block_hash) { - Some(block_details) => { - block_hash = block_details.parent; - }, - None => break, - } - } + match self.block_details(&block_hash) { + Some(block_details) => { + block_hash = block_details.parent; + } + None => break, + } + } - if !is_linked { - trace!(target: "blockchain", "The given block {:x} is not linked to the known ancient block {:x}", hash, target_hash); - return; - } - } + if !is_linked { + trace!(target: "blockchain", "The given block {:x} is not linked to the known ancient block {:x}", hash, target_hash); + return; + } + } - let mut batch = self.db.key_value().transaction(); - self.set_best_ancient_block(block_view.number(), hash, &mut batch); - self.db.key_value().write(batch).expect("Low level database error."); - } + let mut batch = self.db.key_value().transaction(); + self.set_best_ancient_block(block_view.number(), hash, &mut batch); + self.db + .key_value() + .write(batch) + .expect("Low level database error."); + } - /// Set the best ancient block with the given value: private method - /// `best_ancient_block` must not be locked, otherwise a DeadLock would occur - fn set_best_ancient_block(&self, block_number: BlockNumber, block_hash: &H256, batch: &mut DBTransaction) { - let mut pending_best_ancient_block = self.pending_best_ancient_block.write(); - let ancient_number = self.best_ancient_block.read().as_ref().map_or(0, |b| b.number); - if self.block_hash(block_number + 1).is_some() { - trace!(target: "blockchain", "The two ends of the chain have met."); - batch.delete(db::COL_EXTRA, b"ancient"); - *pending_best_ancient_block = Some(None); - } else if block_number > ancient_number { - trace!(target: "blockchain", "Updating the best ancient block to {}.", block_number); - batch.put(db::COL_EXTRA, b"ancient", &block_hash); - *pending_best_ancient_block = Some(Some(BestAncientBlock { - hash: *block_hash, - number: block_number, - })); - } - } + /// Set the best ancient block with the given value: private method + /// `best_ancient_block` must not be locked, otherwise a DeadLock would occur + fn set_best_ancient_block( + &self, + block_number: BlockNumber, + block_hash: &H256, + batch: &mut DBTransaction, + ) { + let mut pending_best_ancient_block = self.pending_best_ancient_block.write(); + let ancient_number = self + .best_ancient_block + .read() + .as_ref() + .map_or(0, |b| b.number); + if self.block_hash(block_number + 1).is_some() { + trace!(target: "blockchain", "The two ends of the chain have met."); + batch.delete(db::COL_EXTRA, b"ancient"); + *pending_best_ancient_block = Some(None); + } else if block_number > ancient_number { + trace!(target: "blockchain", "Updating the best ancient block to {}.", block_number); + batch.put(db::COL_EXTRA, b"ancient", &block_hash); + *pending_best_ancient_block = Some(Some(BestAncientBlock { + hash: *block_hash, + number: block_number, + })); + } + } - /// Insert an epoch transition. Provide an epoch number being transitioned to - /// and epoch transition object. - /// - /// The block the transition occurred at should have already been inserted into the chain. - pub fn insert_epoch_transition(&self, batch: &mut DBTransaction, epoch_num: u64, transition: EpochTransition) { - let mut transitions = match self.db.key_value().read(db::COL_EXTRA, &epoch_num) { - Some(existing) => existing, - None => EpochTransitions { - number: epoch_num, - candidates: Vec::with_capacity(1), - } - }; + /// Insert an epoch transition. Provide an epoch number being transitioned to + /// and epoch transition object. + /// + /// The block the transition occurred at should have already been inserted into the chain. + pub fn insert_epoch_transition( + &self, + batch: &mut DBTransaction, + epoch_num: u64, + transition: EpochTransition, + ) { + let mut transitions = match self.db.key_value().read(db::COL_EXTRA, &epoch_num) { + Some(existing) => existing, + None => EpochTransitions { + number: epoch_num, + candidates: Vec::with_capacity(1), + }, + }; - // ensure we don't write any duplicates. - if transitions.candidates.iter().find(|c| c.block_hash == transition.block_hash).is_none() { - transitions.candidates.push(transition); - batch.write(db::COL_EXTRA, &epoch_num, &transitions); - } - } + // ensure we don't write any duplicates. + if transitions + .candidates + .iter() + .find(|c| c.block_hash == transition.block_hash) + .is_none() + { + transitions.candidates.push(transition); + batch.write(db::COL_EXTRA, &epoch_num, &transitions); + } + } - /// Iterate over all epoch transitions. - /// This will only return transitions within the canonical chain. - pub fn epoch_transitions(&self) -> EpochTransitionIter { - let iter = self.db.key_value().iter_from_prefix(db::COL_EXTRA, &EPOCH_KEY_PREFIX[..]); - EpochTransitionIter { - chain: self, - prefix_iter: iter, - } - } + /// Iterate over all epoch transitions. + /// This will only return transitions within the canonical chain. + pub fn epoch_transitions(&self) -> EpochTransitionIter { + let iter = self + .db + .key_value() + .iter_from_prefix(db::COL_EXTRA, &EPOCH_KEY_PREFIX[..]); + EpochTransitionIter { + chain: self, + prefix_iter: iter, + } + } - /// Get a specific epoch transition by block number and provided block hash. - pub fn epoch_transition(&self, block_num: u64, block_hash: H256) -> Option { - trace!(target: "blockchain", "Loading epoch transition at block {}, {}", + /// Get a specific epoch transition by block number and provided block hash. + pub fn epoch_transition(&self, block_num: u64, block_hash: H256) -> Option { + trace!(target: "blockchain", "Loading epoch transition at block {}, {}", block_num, block_hash); - self.db.key_value().read(db::COL_EXTRA, &block_num).and_then(|transitions: EpochTransitions| { - transitions.candidates.into_iter().find(|c| c.block_hash == block_hash) - }) - } - - /// Get the transition to the epoch the given parent hash is part of - /// or transitions to. - /// This will give the epoch that any children of this parent belong to. - /// - /// The block corresponding the the parent hash must be stored already. - pub fn epoch_transition_for(&self, parent_hash: H256) -> Option { - // slow path: loop back block by block - for hash in self.ancestry_iter(parent_hash)? { - let details = self.block_details(&hash)?; - - // look for transition in database. - if let Some(transition) = self.epoch_transition(details.number, hash) { - return Some(transition) - } - - // canonical hash -> fast breakout: - // get the last epoch transition up to this block. - // - // if `block_hash` is canonical it will only return transitions up to - // the parent. - if self.block_hash(details.number)? == hash { - return self.epoch_transitions() - .map(|(_, t)| t) - .take_while(|t| t.block_number <= details.number) - .last() - } - } - - // should never happen as the loop will encounter genesis before concluding. - None - } - - /// Write a pending epoch transition by block hash. - pub fn insert_pending_transition(&self, batch: &mut DBTransaction, hash: H256, t: PendingEpochTransition) { - batch.write(db::COL_EXTRA, &hash, &t); - } - - /// Get a pending epoch transition by block hash. - // TODO: implement removal safely: this can only be done upon finality of a block - // that _uses_ the pending transition. - pub fn get_pending_transition(&self, hash: H256) -> Option { - self.db.key_value().read(db::COL_EXTRA, &hash) - } - - /// Add a child to a given block. Assumes that the block hash is in - /// the chain and the child's parent is this block. - /// - /// Used in snapshots to glue the chunks together at the end. - pub fn add_child(&self, batch: &mut DBTransaction, block_hash: H256, child_hash: H256) { - let mut parent_details = self.uncommitted_block_details(&block_hash) - .unwrap_or_else(|| panic!("Invalid block hash: {:?}", block_hash)); - - parent_details.children.push(child_hash); - - let mut update = HashMap::new(); - update.insert(block_hash, parent_details); - - let mut write_details = self.block_details.write(); - batch.extend_with_cache(db::COL_EXTRA, &mut *write_details, update, CacheUpdatePolicy::Overwrite); - - self.cache_man.lock().note_used(CacheId::BlockDetails(block_hash)); - } - - /// Inserts the block into backing cache database. - /// Expects the block to be valid and already verified. - /// If the block is already known, does nothing. - pub fn insert_block(&self, batch: &mut DBTransaction, block: encoded::Block, receipts: Vec, extras: ExtrasInsert) -> ImportRoute { - let parent_hash = block.header_view().parent_hash(); - let best_hash = self.best_block_hash(); - - let route = self.tree_route(best_hash, parent_hash).expect("forks are only kept when it has common ancestors; tree route from best to prospective's parent always exists; qed"); - - self.insert_block_with_route(batch, block, receipts, route, extras) - } - - /// Inserts the block into backing cache database with already generated route information. - /// Expects the block to be valid and already verified and route is tree route information from current best block to new block's parent. - /// If the block is already known, does nothing. - pub fn insert_block_with_route(&self, batch: &mut DBTransaction, block: encoded::Block, receipts: Vec, route: TreeRoute, extras: ExtrasInsert) -> ImportRoute { - let hash = block.header_view().hash(); - let parent_hash = block.header_view().parent_hash(); - - if self.is_known_child(&parent_hash, &hash) { - return ImportRoute::none(); - } - - assert!(self.pending_best_block.read().is_none()); - - let compressed_header = compress(block.header_view().rlp().as_raw(), blocks_swapper()); - let compressed_body = compress(&Self::block_to_body(block.raw()), blocks_swapper()); - - // store block in db - batch.put(db::COL_HEADERS, &hash, &compressed_header); - batch.put(db::COL_BODIES, &hash, &compressed_body); - - let info = self.block_info(&block.header_view(), route, &extras); - - if let BlockLocation::BranchBecomingCanonChain(ref d) = info.location { - info!(target: "reorg", "Reorg to {} ({} {} {})", - Colour::Yellow.bold().paint(format!("#{} {}", info.number, info.hash)), - Colour::Red.paint(d.retracted.iter().join(" ")), - Colour::White.paint(format!("#{} {}", self.block_details(&d.ancestor).expect("`ancestor` is in the route; qed").number, d.ancestor)), - Colour::Green.paint(d.enacted.iter().join(" ")) - ); - } - - self.prepare_update(batch, ExtrasUpdate { - block_hashes: self.prepare_block_hashes_update(&info), - block_details: self.prepare_block_details_update(parent_hash, &info, extras.is_finalized), - block_receipts: self.prepare_block_receipts_update(receipts, &info), - blocks_blooms: self.prepare_block_blooms_update(block.header_view().log_bloom(), &info), - transactions_addresses: self.prepare_transaction_addresses_update(block.view().transaction_hashes(), &info), - info: info.clone(), - block, - }, true); - - ImportRoute::from(info) - } - - /// Get inserted block info which is critical to prepare extras updates. - fn block_info(&self, header: &HeaderView, route: TreeRoute, extras: &ExtrasInsert) -> BlockInfo { - let hash = header.hash(); - let number = header.number(); - let parent_hash = header.parent_hash(); - let parent_details = self.block_details(&parent_hash).unwrap_or_else(|| panic!("Invalid parent hash: {:?}", parent_hash)); - - BlockInfo { - hash: hash, - number: number, - total_difficulty: parent_details.total_difficulty + header.difficulty(), - location: match extras.fork_choice { - ForkChoice::New => { - // On new best block we need to make sure that all ancestors - // are moved to "canon chain" - // find the route between old best block and the new one - match route.blocks.len() { - 0 => BlockLocation::CanonChain, - _ => { - let retracted = route.blocks.iter().take(route.index).cloned().collect::>().into_iter().collect::>(); - let enacted = route.blocks.into_iter().skip(route.index).collect::>(); - BlockLocation::BranchBecomingCanonChain(BranchBecomingCanonChainData { - ancestor: route.ancestor, - enacted: enacted, - retracted: retracted, - }) - } - } - }, - ForkChoice::Old => BlockLocation::Branch, - }, - } - } - - /// Mark a block to be considered finalized. Returns `Some(())` if the operation succeeds, and `None` if the block - /// hash is not found. - pub fn mark_finalized(&self, batch: &mut DBTransaction, block_hash: H256) -> Option<()> { - let mut block_details = self.uncommitted_block_details(&block_hash)?; - block_details.is_finalized = true; - - self.update_block_details(batch, block_hash, block_details); - Some(()) - } - - /// Prepares extras block detail update. - fn update_block_details(&self, batch: &mut DBTransaction, block_hash: H256, block_details: BlockDetails) { - let mut details_map = HashMap::new(); - details_map.insert(block_hash, block_details); - - // We're only updating one existing value. So it shouldn't suffer from cache decoherence problem. - let mut write_details = self.pending_block_details.write(); - batch.extend_with_cache(db::COL_EXTRA, &mut *write_details, details_map, CacheUpdatePolicy::Overwrite); - } - - /// Prepares extras update. - fn prepare_update(&self, batch: &mut DBTransaction, update: ExtrasUpdate, is_best: bool) { - - { - let mut write_receipts = self.block_receipts.write(); - batch.extend_with_cache(db::COL_EXTRA, &mut *write_receipts, update.block_receipts, CacheUpdatePolicy::Remove); - } - - if let Some((block, blooms)) = update.blocks_blooms { - self.db.blooms() - .insert_blooms(block, blooms.iter()) - .expect("Low level database error when updating blooms. Some issue with disk?"); - } - - // These cached values must be updated last with all four locks taken to avoid - // cache decoherence - { - let mut best_block = self.pending_best_block.write(); - if is_best && update.info.location != BlockLocation::Branch { - batch.put(db::COL_EXTRA, b"best", &update.info.hash); - *best_block = Some(BestBlock { - total_difficulty: update.info.total_difficulty, - header: update.block.decode_header(), - block: update.block, - }); - } - - let mut write_hashes = self.pending_block_hashes.write(); - let mut write_details = self.pending_block_details.write(); - let mut write_txs = self.pending_transaction_addresses.write(); - - batch.extend_with_cache(db::COL_EXTRA, &mut *write_details, update.block_details, CacheUpdatePolicy::Overwrite); - batch.extend_with_cache(db::COL_EXTRA, &mut *write_hashes, update.block_hashes, CacheUpdatePolicy::Overwrite); - batch.extend_with_option_cache(db::COL_EXTRA, &mut *write_txs, update.transactions_addresses, CacheUpdatePolicy::Overwrite); - } - } - - /// Apply pending insertion updates - pub fn commit(&self) { - let mut pending_best_ancient_block = self.pending_best_ancient_block.write(); - let mut pending_best_block = self.pending_best_block.write(); - let mut pending_write_hashes = self.pending_block_hashes.write(); - let mut pending_block_details = self.pending_block_details.write(); - let mut pending_write_txs = self.pending_transaction_addresses.write(); - - let mut best_block = self.best_block.write(); - let mut best_ancient_block = self.best_ancient_block.write(); - let mut write_block_details = self.block_details.write(); - let mut write_hashes = self.block_hashes.write(); - let mut write_txs = self.transaction_addresses.write(); - // update best ancient block - if let Some(block_option) = pending_best_ancient_block.take() { - *best_ancient_block = block_option; - } - // update best block - if let Some(block) = pending_best_block.take() { - *best_block = block; - } - - let pending_txs = mem::replace(&mut *pending_write_txs, HashMap::new()); - let (retracted_txs, enacted_txs) = pending_txs.into_iter().partition::, _>(|&(_, ref value)| value.is_none()); - - let pending_hashes_keys: Vec<_> = pending_write_hashes.keys().cloned().collect(); - let enacted_txs_keys: Vec<_> = enacted_txs.keys().cloned().collect(); - let pending_block_hashes: Vec<_> = pending_block_details.keys().cloned().collect(); - - write_hashes.extend(mem::replace(&mut *pending_write_hashes, HashMap::new())); - write_txs.extend(enacted_txs.into_iter().map(|(k, v)| (k, v.expect("Transactions were partitioned; qed")))); - write_block_details.extend(mem::replace(&mut *pending_block_details, HashMap::new())); - - for hash in retracted_txs.keys() { - write_txs.remove(hash); - } - - let mut cache_man = self.cache_man.lock(); - for n in pending_hashes_keys { - cache_man.note_used(CacheId::BlockHashes(n)); - } - - for hash in enacted_txs_keys { - cache_man.note_used(CacheId::TransactionAddresses(hash)); - } - - for hash in pending_block_hashes { - cache_man.note_used(CacheId::BlockDetails(hash)); - } - } - - /// Iterator that lists `first` and then all of `first`'s ancestors, by hash. - pub fn ancestry_iter(&self, first: H256) -> Option { - if self.is_known(&first) { - Some(AncestryIter { - current: first, - chain: self, - }) - } else { - None - } - } - - /// Iterator that lists `first` and then all of `first`'s ancestors, by extended header. - pub fn ancestry_with_metadata_iter<'a>(&'a self, first: H256) -> AncestryWithMetadataIter { - AncestryWithMetadataIter { - current: if self.is_known(&first) { - first - } else { - H256::default() // zero hash - }, - chain: self - } - } - - /// Given a block's `parent`, find every block header which represents a valid possible uncle. - pub fn find_uncle_headers(&self, parent: &H256, uncle_generations: usize) -> Option> { - self.find_uncle_hashes(parent, uncle_generations) - .map(|v| v.into_iter().filter_map(|h| self.block_header_data(&h)).collect()) - } - - /// Given a block's `parent`, find every block hash which represents a valid possible uncle. - pub fn find_uncle_hashes(&self, parent: &H256, uncle_generations: usize) -> Option> { - if !self.is_known(parent) { - return None; - } - - let mut excluded = HashSet::new(); - let ancestry = self.ancestry_iter(parent.clone())?; - - for a in ancestry.clone().take(uncle_generations) { - if let Some(uncles) = self.uncle_hashes(&a) { - excluded.extend(uncles); - excluded.insert(a); - } else { - break - } - } - - let mut ret = Vec::new(); - for a in ancestry.skip(1).take(uncle_generations) { - if let Some(details) = self.block_details(&a) { - ret.extend(details.children.iter().filter(|h| !excluded.contains(h))) - } else { - break - } - } - - Some(ret) - } - - /// This function returns modified block hashes. - fn prepare_block_hashes_update(&self, info: &BlockInfo) -> HashMap { - let mut block_hashes = HashMap::new(); - - match info.location { - BlockLocation::Branch => (), - BlockLocation::CanonChain => { - block_hashes.insert(info.number, info.hash); - }, - BlockLocation::BranchBecomingCanonChain(ref data) => { - let ancestor_number = self.block_number(&data.ancestor).expect("Block number of ancestor is always in DB"); - let start_number = ancestor_number + 1; - - for (index, hash) in data.enacted.iter().cloned().enumerate() { - block_hashes.insert(start_number + index as BlockNumber, hash); - } - - block_hashes.insert(info.number, info.hash); - } - } - - block_hashes - } - - /// This function returns modified block details. - /// Uses the given parent details or attempts to load them from the database. - fn prepare_block_details_update(&self, parent_hash: H256, info: &BlockInfo, is_finalized: bool) -> HashMap { - // update parent - let mut parent_details = self.uncommitted_block_details(&parent_hash).unwrap_or_else(|| panic!("Invalid parent hash: {:?}", parent_hash)); - parent_details.children.push(info.hash); - - // create current block details. - let details = BlockDetails { - number: info.number, - total_difficulty: info.total_difficulty, - parent: parent_hash, - children: vec![], - is_finalized: is_finalized, - }; - - // write to batch - let mut block_details = HashMap::new(); - block_details.insert(parent_hash, parent_details); - block_details.insert(info.hash, details); - block_details - } - - /// This function returns modified block receipts. - fn prepare_block_receipts_update(&self, receipts: Vec, info: &BlockInfo) -> HashMap { - let mut block_receipts = HashMap::new(); - block_receipts.insert(info.hash, BlockReceipts::new(receipts)); - block_receipts - } - - /// This function returns modified transaction addresses. - fn prepare_transaction_addresses_update(&self, transaction_hashes: Vec, info: &BlockInfo) -> HashMap> { - match info.location { - BlockLocation::CanonChain => { - transaction_hashes.into_iter() - .enumerate() - .map(|(i ,tx_hash)| { - (tx_hash, Some(TransactionAddress { - block_hash: info.hash, - index: i - })) - }) - .collect() - }, - BlockLocation::BranchBecomingCanonChain(ref data) => { - let addresses = data.enacted.iter() - .flat_map(|hash| { - let body = self.block_body(hash).expect("Enacted block must be in database."); - let hashes = body.transaction_hashes(); - hashes.into_iter() - .enumerate() - .map(|(i, tx_hash)| (tx_hash, Some(TransactionAddress { - block_hash: *hash, - index: i, - }))) - .collect::>>() - }); - - let current_addresses = transaction_hashes.into_iter() - .enumerate() - .map(|(i ,tx_hash)| { - (tx_hash, Some(TransactionAddress { - block_hash: info.hash, - index: i - })) - }); - - let retracted = data.retracted.iter().flat_map(|hash| { - let body = self.block_body(hash).expect("Retracted block must be in database."); - let hashes = body.transaction_hashes(); - hashes.into_iter().map(|hash| (hash, None)).collect::>>() - }); - - // The order here is important! Don't remove transaction if it was part of enacted blocks as well. - retracted.chain(addresses).chain(current_addresses).collect() - }, - BlockLocation::Branch => HashMap::new(), - } - } - - /// This functions returns modified blocks blooms. - /// - /// To accelerate blooms lookups, blomms are stored in multiple - /// layers (BLOOM_LEVELS, currently 3). - /// ChainFilter is responsible for building and rebuilding these layers. - /// It returns them in HashMap, where values are Blooms and - /// keys are BloomIndexes. BloomIndex represents bloom location on one - /// of these layers. - /// - /// To reduce number of queries to databse, block blooms are stored - /// in BlocksBlooms structure which contains info about several - /// (BLOOM_INDEX_SIZE, currently 16) consecutive blocks blooms. - /// - /// Later, BloomIndexer is used to map bloom location on filter layer (BloomIndex) - /// to bloom location in database (BlocksBloomLocation). - /// - fn prepare_block_blooms_update(&self, log_bloom: Bloom, info: &BlockInfo) -> Option<(u64, Vec)> { - match info.location { - BlockLocation::Branch => None, - BlockLocation::CanonChain => { - if log_bloom.is_zero() { - None - } else { - Some((info.number, vec![log_bloom])) - } - }, - BlockLocation::BranchBecomingCanonChain(ref data) => { - let ancestor_number = self.block_number(&data.ancestor) + self.db + .key_value() + .read(db::COL_EXTRA, &block_num) + .and_then(|transitions: EpochTransitions| { + transitions + .candidates + .into_iter() + .find(|c| c.block_hash == block_hash) + }) + } + + /// Get the transition to the epoch the given parent hash is part of + /// or transitions to. + /// This will give the epoch that any children of this parent belong to. + /// + /// The block corresponding the the parent hash must be stored already. + pub fn epoch_transition_for(&self, parent_hash: H256) -> Option { + // slow path: loop back block by block + for hash in self.ancestry_iter(parent_hash)? { + let details = self.block_details(&hash)?; + + // look for transition in database. + if let Some(transition) = self.epoch_transition(details.number, hash) { + return Some(transition); + } + + // canonical hash -> fast breakout: + // get the last epoch transition up to this block. + // + // if `block_hash` is canonical it will only return transitions up to + // the parent. + if self.block_hash(details.number)? == hash { + return self + .epoch_transitions() + .map(|(_, t)| t) + .take_while(|t| t.block_number <= details.number) + .last(); + } + } + + // should never happen as the loop will encounter genesis before concluding. + None + } + + /// Write a pending epoch transition by block hash. + pub fn insert_pending_transition( + &self, + batch: &mut DBTransaction, + hash: H256, + t: PendingEpochTransition, + ) { + batch.write(db::COL_EXTRA, &hash, &t); + } + + /// Get a pending epoch transition by block hash. + // TODO: implement removal safely: this can only be done upon finality of a block + // that _uses_ the pending transition. + pub fn get_pending_transition(&self, hash: H256) -> Option { + self.db.key_value().read(db::COL_EXTRA, &hash) + } + + /// Add a child to a given block. Assumes that the block hash is in + /// the chain and the child's parent is this block. + /// + /// Used in snapshots to glue the chunks together at the end. + pub fn add_child(&self, batch: &mut DBTransaction, block_hash: H256, child_hash: H256) { + let mut parent_details = self + .uncommitted_block_details(&block_hash) + .unwrap_or_else(|| panic!("Invalid block hash: {:?}", block_hash)); + + parent_details.children.push(child_hash); + + let mut update = HashMap::new(); + update.insert(block_hash, parent_details); + + let mut write_details = self.block_details.write(); + batch.extend_with_cache( + db::COL_EXTRA, + &mut *write_details, + update, + CacheUpdatePolicy::Overwrite, + ); + + self.cache_man + .lock() + .note_used(CacheId::BlockDetails(block_hash)); + } + + /// Inserts the block into backing cache database. + /// Expects the block to be valid and already verified. + /// If the block is already known, does nothing. + pub fn insert_block( + &self, + batch: &mut DBTransaction, + block: encoded::Block, + receipts: Vec, + extras: ExtrasInsert, + ) -> ImportRoute { + let parent_hash = block.header_view().parent_hash(); + let best_hash = self.best_block_hash(); + + let route = self.tree_route(best_hash, parent_hash).expect("forks are only kept when it has common ancestors; tree route from best to prospective's parent always exists; qed"); + + self.insert_block_with_route(batch, block, receipts, route, extras) + } + + /// Inserts the block into backing cache database with already generated route information. + /// Expects the block to be valid and already verified and route is tree route information from current best block to new block's parent. + /// If the block is already known, does nothing. + pub fn insert_block_with_route( + &self, + batch: &mut DBTransaction, + block: encoded::Block, + receipts: Vec, + route: TreeRoute, + extras: ExtrasInsert, + ) -> ImportRoute { + let hash = block.header_view().hash(); + let parent_hash = block.header_view().parent_hash(); + + if self.is_known_child(&parent_hash, &hash) { + return ImportRoute::none(); + } + + assert!(self.pending_best_block.read().is_none()); + + let compressed_header = compress(block.header_view().rlp().as_raw(), blocks_swapper()); + let compressed_body = compress(&Self::block_to_body(block.raw()), blocks_swapper()); + + // store block in db + batch.put(db::COL_HEADERS, &hash, &compressed_header); + batch.put(db::COL_BODIES, &hash, &compressed_body); + + let info = self.block_info(&block.header_view(), route, &extras); + + if let BlockLocation::BranchBecomingCanonChain(ref d) = info.location { + info!(target: "reorg", "Reorg to {} ({} {} {})", + Colour::Yellow.bold().paint(format!("#{} {}", info.number, info.hash)), + Colour::Red.paint(d.retracted.iter().join(" ")), + Colour::White.paint(format!("#{} {}", self.block_details(&d.ancestor).expect("`ancestor` is in the route; qed").number, d.ancestor)), + Colour::Green.paint(d.enacted.iter().join(" ")) + ); + } + + self.prepare_update( + batch, + ExtrasUpdate { + block_hashes: self.prepare_block_hashes_update(&info), + block_details: self.prepare_block_details_update( + parent_hash, + &info, + extras.is_finalized, + ), + block_receipts: self.prepare_block_receipts_update(receipts, &info), + blocks_blooms: self + .prepare_block_blooms_update(block.header_view().log_bloom(), &info), + transactions_addresses: self + .prepare_transaction_addresses_update(block.view().transaction_hashes(), &info), + info: info.clone(), + block, + }, + true, + ); + + ImportRoute::from(info) + } + + /// Get inserted block info which is critical to prepare extras updates. + fn block_info( + &self, + header: &HeaderView, + route: TreeRoute, + extras: &ExtrasInsert, + ) -> BlockInfo { + let hash = header.hash(); + let number = header.number(); + let parent_hash = header.parent_hash(); + let parent_details = self + .block_details(&parent_hash) + .unwrap_or_else(|| panic!("Invalid parent hash: {:?}", parent_hash)); + + BlockInfo { + hash: hash, + number: number, + total_difficulty: parent_details.total_difficulty + header.difficulty(), + location: match extras.fork_choice { + ForkChoice::New => { + // On new best block we need to make sure that all ancestors + // are moved to "canon chain" + // find the route between old best block and the new one + match route.blocks.len() { + 0 => BlockLocation::CanonChain, + _ => { + let retracted = route + .blocks + .iter() + .take(route.index) + .cloned() + .collect::>() + .into_iter() + .collect::>(); + let enacted = route + .blocks + .into_iter() + .skip(route.index) + .collect::>(); + BlockLocation::BranchBecomingCanonChain(BranchBecomingCanonChainData { + ancestor: route.ancestor, + enacted: enacted, + retracted: retracted, + }) + } + } + } + ForkChoice::Old => BlockLocation::Branch, + }, + } + } + + /// Mark a block to be considered finalized. Returns `Some(())` if the operation succeeds, and `None` if the block + /// hash is not found. + pub fn mark_finalized(&self, batch: &mut DBTransaction, block_hash: H256) -> Option<()> { + let mut block_details = self.uncommitted_block_details(&block_hash)?; + block_details.is_finalized = true; + + self.update_block_details(batch, block_hash, block_details); + Some(()) + } + + /// Prepares extras block detail update. + fn update_block_details( + &self, + batch: &mut DBTransaction, + block_hash: H256, + block_details: BlockDetails, + ) { + let mut details_map = HashMap::new(); + details_map.insert(block_hash, block_details); + + // We're only updating one existing value. So it shouldn't suffer from cache decoherence problem. + let mut write_details = self.pending_block_details.write(); + batch.extend_with_cache( + db::COL_EXTRA, + &mut *write_details, + details_map, + CacheUpdatePolicy::Overwrite, + ); + } + + /// Prepares extras update. + fn prepare_update(&self, batch: &mut DBTransaction, update: ExtrasUpdate, is_best: bool) { + { + let mut write_receipts = self.block_receipts.write(); + batch.extend_with_cache( + db::COL_EXTRA, + &mut *write_receipts, + update.block_receipts, + CacheUpdatePolicy::Remove, + ); + } + + if let Some((block, blooms)) = update.blocks_blooms { + self.db + .blooms() + .insert_blooms(block, blooms.iter()) + .expect("Low level database error when updating blooms. Some issue with disk?"); + } + + // These cached values must be updated last with all four locks taken to avoid + // cache decoherence + { + let mut best_block = self.pending_best_block.write(); + if is_best && update.info.location != BlockLocation::Branch { + batch.put(db::COL_EXTRA, b"best", &update.info.hash); + *best_block = Some(BestBlock { + total_difficulty: update.info.total_difficulty, + header: update.block.decode_header(), + block: update.block, + }); + } + + let mut write_hashes = self.pending_block_hashes.write(); + let mut write_details = self.pending_block_details.write(); + let mut write_txs = self.pending_transaction_addresses.write(); + + batch.extend_with_cache( + db::COL_EXTRA, + &mut *write_details, + update.block_details, + CacheUpdatePolicy::Overwrite, + ); + batch.extend_with_cache( + db::COL_EXTRA, + &mut *write_hashes, + update.block_hashes, + CacheUpdatePolicy::Overwrite, + ); + batch.extend_with_option_cache( + db::COL_EXTRA, + &mut *write_txs, + update.transactions_addresses, + CacheUpdatePolicy::Overwrite, + ); + } + } + + /// Apply pending insertion updates + pub fn commit(&self) { + let mut pending_best_ancient_block = self.pending_best_ancient_block.write(); + let mut pending_best_block = self.pending_best_block.write(); + let mut pending_write_hashes = self.pending_block_hashes.write(); + let mut pending_block_details = self.pending_block_details.write(); + let mut pending_write_txs = self.pending_transaction_addresses.write(); + + let mut best_block = self.best_block.write(); + let mut best_ancient_block = self.best_ancient_block.write(); + let mut write_block_details = self.block_details.write(); + let mut write_hashes = self.block_hashes.write(); + let mut write_txs = self.transaction_addresses.write(); + // update best ancient block + if let Some(block_option) = pending_best_ancient_block.take() { + *best_ancient_block = block_option; + } + // update best block + if let Some(block) = pending_best_block.take() { + *best_block = block; + } + + let pending_txs = mem::replace(&mut *pending_write_txs, HashMap::new()); + let (retracted_txs, enacted_txs) = pending_txs + .into_iter() + .partition::, _>(|&(_, ref value)| value.is_none()); + + let pending_hashes_keys: Vec<_> = pending_write_hashes.keys().cloned().collect(); + let enacted_txs_keys: Vec<_> = enacted_txs.keys().cloned().collect(); + let pending_block_hashes: Vec<_> = pending_block_details.keys().cloned().collect(); + + write_hashes.extend(mem::replace(&mut *pending_write_hashes, HashMap::new())); + write_txs.extend( + enacted_txs + .into_iter() + .map(|(k, v)| (k, v.expect("Transactions were partitioned; qed"))), + ); + write_block_details.extend(mem::replace(&mut *pending_block_details, HashMap::new())); + + for hash in retracted_txs.keys() { + write_txs.remove(hash); + } + + let mut cache_man = self.cache_man.lock(); + for n in pending_hashes_keys { + cache_man.note_used(CacheId::BlockHashes(n)); + } + + for hash in enacted_txs_keys { + cache_man.note_used(CacheId::TransactionAddresses(hash)); + } + + for hash in pending_block_hashes { + cache_man.note_used(CacheId::BlockDetails(hash)); + } + } + + /// Iterator that lists `first` and then all of `first`'s ancestors, by hash. + pub fn ancestry_iter(&self, first: H256) -> Option { + if self.is_known(&first) { + Some(AncestryIter { + current: first, + chain: self, + }) + } else { + None + } + } + + /// Iterator that lists `first` and then all of `first`'s ancestors, by extended header. + pub fn ancestry_with_metadata_iter<'a>(&'a self, first: H256) -> AncestryWithMetadataIter { + AncestryWithMetadataIter { + current: if self.is_known(&first) { + first + } else { + H256::default() // zero hash + }, + chain: self, + } + } + + /// Given a block's `parent`, find every block header which represents a valid possible uncle. + pub fn find_uncle_headers( + &self, + parent: &H256, + uncle_generations: usize, + ) -> Option> { + self.find_uncle_hashes(parent, uncle_generations).map(|v| { + v.into_iter() + .filter_map(|h| self.block_header_data(&h)) + .collect() + }) + } + + /// Given a block's `parent`, find every block hash which represents a valid possible uncle. + pub fn find_uncle_hashes(&self, parent: &H256, uncle_generations: usize) -> Option> { + if !self.is_known(parent) { + return None; + } + + let mut excluded = HashSet::new(); + let ancestry = self.ancestry_iter(parent.clone())?; + + for a in ancestry.clone().take(uncle_generations) { + if let Some(uncles) = self.uncle_hashes(&a) { + excluded.extend(uncles); + excluded.insert(a); + } else { + break; + } + } + + let mut ret = Vec::new(); + for a in ancestry.skip(1).take(uncle_generations) { + if let Some(details) = self.block_details(&a) { + ret.extend(details.children.iter().filter(|h| !excluded.contains(h))) + } else { + break; + } + } + + Some(ret) + } + + /// This function returns modified block hashes. + fn prepare_block_hashes_update(&self, info: &BlockInfo) -> HashMap { + let mut block_hashes = HashMap::new(); + + match info.location { + BlockLocation::Branch => (), + BlockLocation::CanonChain => { + block_hashes.insert(info.number, info.hash); + } + BlockLocation::BranchBecomingCanonChain(ref data) => { + let ancestor_number = self + .block_number(&data.ancestor) + .expect("Block number of ancestor is always in DB"); + let start_number = ancestor_number + 1; + + for (index, hash) in data.enacted.iter().cloned().enumerate() { + block_hashes.insert(start_number + index as BlockNumber, hash); + } + + block_hashes.insert(info.number, info.hash); + } + } + + block_hashes + } + + /// This function returns modified block details. + /// Uses the given parent details or attempts to load them from the database. + fn prepare_block_details_update( + &self, + parent_hash: H256, + info: &BlockInfo, + is_finalized: bool, + ) -> HashMap { + // update parent + let mut parent_details = self + .uncommitted_block_details(&parent_hash) + .unwrap_or_else(|| panic!("Invalid parent hash: {:?}", parent_hash)); + parent_details.children.push(info.hash); + + // create current block details. + let details = BlockDetails { + number: info.number, + total_difficulty: info.total_difficulty, + parent: parent_hash, + children: vec![], + is_finalized: is_finalized, + }; + + // write to batch + let mut block_details = HashMap::new(); + block_details.insert(parent_hash, parent_details); + block_details.insert(info.hash, details); + block_details + } + + /// This function returns modified block receipts. + fn prepare_block_receipts_update( + &self, + receipts: Vec, + info: &BlockInfo, + ) -> HashMap { + let mut block_receipts = HashMap::new(); + block_receipts.insert(info.hash, BlockReceipts::new(receipts)); + block_receipts + } + + /// This function returns modified transaction addresses. + fn prepare_transaction_addresses_update( + &self, + transaction_hashes: Vec, + info: &BlockInfo, + ) -> HashMap> { + match info.location { + BlockLocation::CanonChain => transaction_hashes + .into_iter() + .enumerate() + .map(|(i, tx_hash)| { + ( + tx_hash, + Some(TransactionAddress { + block_hash: info.hash, + index: i, + }), + ) + }) + .collect(), + BlockLocation::BranchBecomingCanonChain(ref data) => { + let addresses = data.enacted.iter().flat_map(|hash| { + let body = self + .block_body(hash) + .expect("Enacted block must be in database."); + let hashes = body.transaction_hashes(); + hashes + .into_iter() + .enumerate() + .map(|(i, tx_hash)| { + ( + tx_hash, + Some(TransactionAddress { + block_hash: *hash, + index: i, + }), + ) + }) + .collect::>>() + }); + + let current_addresses = + transaction_hashes + .into_iter() + .enumerate() + .map(|(i, tx_hash)| { + ( + tx_hash, + Some(TransactionAddress { + block_hash: info.hash, + index: i, + }), + ) + }); + + let retracted = data.retracted.iter().flat_map(|hash| { + let body = self + .block_body(hash) + .expect("Retracted block must be in database."); + let hashes = body.transaction_hashes(); + hashes + .into_iter() + .map(|hash| (hash, None)) + .collect::>>() + }); + + // The order here is important! Don't remove transaction if it was part of enacted blocks as well. + retracted + .chain(addresses) + .chain(current_addresses) + .collect() + } + BlockLocation::Branch => HashMap::new(), + } + } + + /// This functions returns modified blocks blooms. + /// + /// To accelerate blooms lookups, blomms are stored in multiple + /// layers (BLOOM_LEVELS, currently 3). + /// ChainFilter is responsible for building and rebuilding these layers. + /// It returns them in HashMap, where values are Blooms and + /// keys are BloomIndexes. BloomIndex represents bloom location on one + /// of these layers. + /// + /// To reduce number of queries to databse, block blooms are stored + /// in BlocksBlooms structure which contains info about several + /// (BLOOM_INDEX_SIZE, currently 16) consecutive blocks blooms. + /// + /// Later, BloomIndexer is used to map bloom location on filter layer (BloomIndex) + /// to bloom location in database (BlocksBloomLocation). + /// + fn prepare_block_blooms_update( + &self, + log_bloom: Bloom, + info: &BlockInfo, + ) -> Option<(u64, Vec)> { + match info.location { + BlockLocation::Branch => None, + BlockLocation::CanonChain => { + if log_bloom.is_zero() { + None + } else { + Some((info.number, vec![log_bloom])) + } + } + BlockLocation::BranchBecomingCanonChain(ref data) => { + let ancestor_number = self.block_number(&data.ancestor) .expect("hash belongs to an ancestor of an inserted block; this branch is only reachable for normal block insertion (non-ancient); ancestors of an inserted block are always available for normal block insertion; block number of an inserted block is always available; qed"); - let start_number = ancestor_number + 1; + let start_number = ancestor_number + 1; - let mut blooms: Vec = data.enacted.iter() + let mut blooms: Vec = data.enacted.iter() .map(|hash| self.block_header_data(hash) .expect("hash belongs to an inserted block; block header data of an inserted block is always available; qed")) .map(|h| h.log_bloom()) .collect(); - blooms.push(log_bloom); - Some((start_number, blooms)) - } - } - } + blooms.push(log_bloom); + Some((start_number, blooms)) + } + } + } - /// Get best block hash. - pub fn best_block_hash(&self) -> H256 { - self.best_block.read().header.hash() - } + /// Get best block hash. + pub fn best_block_hash(&self) -> H256 { + self.best_block.read().header.hash() + } - /// Get best block number. - pub fn best_block_number(&self) -> BlockNumber { - self.best_block.read().header.number() - } + /// Get best block number. + pub fn best_block_number(&self) -> BlockNumber { + self.best_block.read().header.number() + } - /// Get best block timestamp. - pub fn best_block_timestamp(&self) -> u64 { - self.best_block.read().header.timestamp() - } + /// Get best block timestamp. + pub fn best_block_timestamp(&self) -> u64 { + self.best_block.read().header.timestamp() + } - /// Get best block total difficulty. - pub fn best_block_total_difficulty(&self) -> U256 { - self.best_block.read().total_difficulty - } + /// Get best block total difficulty. + pub fn best_block_total_difficulty(&self) -> U256 { + self.best_block.read().total_difficulty + } - /// Get best block header - pub fn best_block_header(&self) -> Header { - self.best_block.read().header.clone() - } + /// Get best block header + pub fn best_block_header(&self) -> Header { + self.best_block.read().header.clone() + } - /// Get current cache size. - pub fn cache_size(&self) -> CacheSize { - CacheSize { - blocks: self.block_headers.read().heap_size_of_children() + self.block_bodies.read().heap_size_of_children(), - block_details: self.block_details.read().heap_size_of_children(), - transaction_addresses: self.transaction_addresses.read().heap_size_of_children(), - block_receipts: self.block_receipts.read().heap_size_of_children(), - } - } + /// Get current cache size. + pub fn cache_size(&self) -> CacheSize { + CacheSize { + blocks: self.block_headers.read().heap_size_of_children() + + self.block_bodies.read().heap_size_of_children(), + block_details: self.block_details.read().heap_size_of_children(), + transaction_addresses: self.transaction_addresses.read().heap_size_of_children(), + block_receipts: self.block_receipts.read().heap_size_of_children(), + } + } - /// Ticks our cache system and throws out any old data. - pub fn collect_garbage(&self) { - let current_size = self.cache_size().total(); + /// Ticks our cache system and throws out any old data. + pub fn collect_garbage(&self) { + let current_size = self.cache_size().total(); - let mut block_headers = self.block_headers.write(); - let mut block_bodies = self.block_bodies.write(); - let mut block_details = self.block_details.write(); - let mut block_hashes = self.block_hashes.write(); - let mut transaction_addresses = self.transaction_addresses.write(); - let mut block_receipts = self.block_receipts.write(); + let mut block_headers = self.block_headers.write(); + let mut block_bodies = self.block_bodies.write(); + let mut block_details = self.block_details.write(); + let mut block_hashes = self.block_hashes.write(); + let mut transaction_addresses = self.transaction_addresses.write(); + let mut block_receipts = self.block_receipts.write(); - let mut cache_man = self.cache_man.lock(); - cache_man.collect_garbage(current_size, | ids | { - for id in &ids { - match *id { - CacheId::BlockHeader(ref h) => { block_headers.remove(h); }, - CacheId::BlockBody(ref h) => { block_bodies.remove(h); }, - CacheId::BlockDetails(ref h) => { block_details.remove(h); } - CacheId::BlockHashes(ref h) => { block_hashes.remove(h); } - CacheId::TransactionAddresses(ref h) => { transaction_addresses.remove(h); } - CacheId::BlockReceipts(ref h) => { block_receipts.remove(h); } - } - } + let mut cache_man = self.cache_man.lock(); + cache_man.collect_garbage(current_size, |ids| { + for id in &ids { + match *id { + CacheId::BlockHeader(ref h) => { + block_headers.remove(h); + } + CacheId::BlockBody(ref h) => { + block_bodies.remove(h); + } + CacheId::BlockDetails(ref h) => { + block_details.remove(h); + } + CacheId::BlockHashes(ref h) => { + block_hashes.remove(h); + } + CacheId::TransactionAddresses(ref h) => { + transaction_addresses.remove(h); + } + CacheId::BlockReceipts(ref h) => { + block_receipts.remove(h); + } + } + } - block_headers.shrink_to_fit(); - block_bodies.shrink_to_fit(); - block_details.shrink_to_fit(); - block_hashes.shrink_to_fit(); - transaction_addresses.shrink_to_fit(); - block_receipts.shrink_to_fit(); + block_headers.shrink_to_fit(); + block_bodies.shrink_to_fit(); + block_details.shrink_to_fit(); + block_hashes.shrink_to_fit(); + transaction_addresses.shrink_to_fit(); + block_receipts.shrink_to_fit(); - block_headers.heap_size_of_children() + - block_bodies.heap_size_of_children() + - block_details.heap_size_of_children() + - block_hashes.heap_size_of_children() + - transaction_addresses.heap_size_of_children() + - block_receipts.heap_size_of_children() - }); - } + block_headers.heap_size_of_children() + + block_bodies.heap_size_of_children() + + block_details.heap_size_of_children() + + block_hashes.heap_size_of_children() + + transaction_addresses.heap_size_of_children() + + block_receipts.heap_size_of_children() + }); + } - /// Create a block body from a block. - pub fn block_to_body(block: &[u8]) -> Bytes { - let mut body = RlpStream::new_list(2); - let block_view = view!(BlockView, block); - body.append_raw(block_view.transactions_rlp().as_raw(), 1); - body.append_raw(block_view.uncles_rlp().as_raw(), 1); - body.out() - } + /// Create a block body from a block. + pub fn block_to_body(block: &[u8]) -> Bytes { + let mut body = RlpStream::new_list(2); + let block_view = view!(BlockView, block); + body.append_raw(block_view.transactions_rlp().as_raw(), 1); + body.append_raw(block_view.uncles_rlp().as_raw(), 1); + body.out() + } - /// Returns general blockchain information - pub fn chain_info(&self) -> BlockChainInfo { - // Make sure to call internal methods first to avoid - // recursive locking of `best_block`. - let first_block_hash = self.first_block(); - let first_block_number = self.first_block_number().into(); - let genesis_hash = self.genesis_hash(); + /// Returns general blockchain information + pub fn chain_info(&self) -> BlockChainInfo { + // Make sure to call internal methods first to avoid + // recursive locking of `best_block`. + let first_block_hash = self.first_block(); + let first_block_number = self.first_block_number().into(); + let genesis_hash = self.genesis_hash(); - // ensure data consistencly by locking everything first - let best_block = self.best_block.read(); - let best_ancient_block = self.best_ancient_block.read(); - BlockChainInfo { - total_difficulty: best_block.total_difficulty, - pending_total_difficulty: best_block.total_difficulty, - genesis_hash, - best_block_hash: best_block.header.hash(), - best_block_number: best_block.header.number(), - best_block_timestamp: best_block.header.timestamp(), - first_block_hash, - first_block_number, - ancient_block_hash: best_ancient_block.as_ref().map(|b| b.hash), - ancient_block_number: best_ancient_block.as_ref().map(|b| b.number), - } - } + // ensure data consistencly by locking everything first + let best_block = self.best_block.read(); + let best_ancient_block = self.best_ancient_block.read(); + BlockChainInfo { + total_difficulty: best_block.total_difficulty, + pending_total_difficulty: best_block.total_difficulty, + genesis_hash, + best_block_hash: best_block.header.hash(), + best_block_number: best_block.header.number(), + best_block_timestamp: best_block.header.timestamp(), + first_block_hash, + first_block_number, + ancient_block_hash: best_ancient_block.as_ref().map(|b| b.hash), + ancient_block_number: best_ancient_block.as_ref().map(|b| b.number), + } + } } #[cfg(test)] mod tests { - use super::*; - - use std::iter; - - use common_types::receipt::{Receipt, TransactionOutcome}; - use common_types::transaction::{Transaction, Action}; - use crate::generator::{BlockGenerator, BlockBuilder, BlockOptions}; - use ethkey::Secret; - use keccak_hash::keccak; - use rustc_hex::FromHex; - use tempdir::TempDir; - - struct TestBlockChainDB { - _blooms_dir: TempDir, - _trace_blooms_dir: TempDir, - blooms: blooms_db::Database, - trace_blooms: blooms_db::Database, - key_value: Arc, - } - - impl BlockChainDB for TestBlockChainDB { - fn key_value(&self) -> &Arc { - &self.key_value - } - - fn blooms(&self) -> &blooms_db::Database { - &self.blooms - } - - fn trace_blooms(&self) -> &blooms_db::Database { - &self.trace_blooms - } - } - - /// Creates new test instance of `BlockChainDB` - pub fn new_db() -> Arc { - let blooms_dir = TempDir::new("").unwrap(); - let trace_blooms_dir = TempDir::new("").unwrap(); - - let db = TestBlockChainDB { - blooms: blooms_db::Database::open(blooms_dir.path()).unwrap(), - trace_blooms: blooms_db::Database::open(trace_blooms_dir.path()).unwrap(), - _blooms_dir: blooms_dir, - _trace_blooms_dir: trace_blooms_dir, - key_value: Arc::new(kvdb_memorydb::create(ethcore_db::NUM_COLUMNS.unwrap())) - }; - - Arc::new(db) - } - - fn new_chain(genesis: encoded::Block, db: Arc) -> BlockChain { - BlockChain::new(Config::default(), genesis.raw(), db) - } - - fn insert_block(db: &Arc, bc: &BlockChain, block: encoded::Block, receipts: Vec) -> ImportRoute { - insert_block_commit(db, bc, block, receipts, true) - } - - fn insert_block_commit(db: &Arc, bc: &BlockChain, block: encoded::Block, receipts: Vec, commit: bool) -> ImportRoute { - let mut batch = db.key_value().transaction(); - let res = insert_block_batch(&mut batch, bc, block, receipts); - db.key_value().write(batch).unwrap(); - if commit { - bc.commit(); - } - res - } - - fn insert_block_batch(batch: &mut DBTransaction, bc: &BlockChain, block: encoded::Block, receipts: Vec) -> ImportRoute { - use crate::ExtrasInsert; - - let fork_choice = { - let header = block.header_view(); - let parent_hash = header.parent_hash(); - let parent_details = bc.uncommitted_block_details(&parent_hash).unwrap_or_else(|| panic!("Invalid parent hash: {:?}", parent_hash)); - let block_total_difficulty = parent_details.total_difficulty + header.difficulty(); - if block_total_difficulty > bc.best_block_total_difficulty() { - common_types::engines::ForkChoice::New - } else { - common_types::engines::ForkChoice::Old - } - }; - - bc.insert_block(batch, block, receipts, ExtrasInsert { - fork_choice: fork_choice, - is_finalized: false, - }) - } - - #[test] - fn should_cache_best_block() { - // given - let genesis = BlockBuilder::genesis(); - let first = genesis.add_block(); - - let db = new_db(); - let bc = new_chain(genesis.last().encoded(), db.clone()); - assert_eq!(bc.best_block_number(), 0); - - // when - insert_block_commit(&db, &bc, first.last().encoded(), vec![], false); - assert_eq!(bc.best_block_number(), 0); - bc.commit(); - // NOTE no db.write here (we want to check if best block is cached) - - // then - assert_eq!(bc.best_block_number(), 1); - assert!(bc.block(&bc.best_block_hash()).is_some(), "Best block should be queryable even without DB write."); - } - - #[test] - fn basic_blockchain_insert() { - let genesis = BlockBuilder::genesis(); - let first = genesis.add_block(); - - let genesis = genesis.last(); - let first = first.last(); - let genesis_hash = genesis.hash(); - let first_hash = first.hash(); - - let db = new_db(); - let bc = new_chain(genesis.encoded(), db.clone()); - - assert_eq!(bc.genesis_hash(), genesis_hash); - assert_eq!(bc.best_block_hash(), genesis_hash); - assert_eq!(bc.block_hash(0), Some(genesis_hash)); - assert_eq!(bc.block_hash(1), None); - assert_eq!(bc.block_details(&genesis_hash).unwrap().children, vec![]); - - let mut batch = db.key_value().transaction(); - insert_block_batch(&mut batch, &bc, first.encoded(), vec![]); - db.key_value().write(batch).unwrap(); - bc.commit(); - - assert_eq!(bc.block_hash(0), Some(genesis_hash)); - assert_eq!(bc.best_block_number(), 1); - assert_eq!(bc.best_block_hash(), first_hash); - assert_eq!(bc.block_hash(1), Some(first_hash)); - assert_eq!(bc.block_details(&first_hash).unwrap().parent, genesis_hash); - assert_eq!(bc.block_details(&genesis_hash).unwrap().children, vec![first_hash]); - assert_eq!(bc.block_hash(2), None); - } - - #[test] - fn check_ancestry_iter() { - let genesis = BlockBuilder::genesis(); - let first_10 = genesis.add_blocks(10); - let generator = BlockGenerator::new(vec![first_10]); - - let db = new_db(); - let bc = new_chain(genesis.last().encoded(), db.clone()); - - let mut block_hashes = vec![genesis.last().hash()]; - let mut batch = db.key_value().transaction(); - for block in generator { - block_hashes.push(block.hash()); - insert_block_batch(&mut batch, &bc, block.encoded(), vec![]); - bc.commit(); - } - db.key_value().write(batch).unwrap(); - - block_hashes.reverse(); - - assert_eq!(bc.ancestry_iter(block_hashes[0].clone()).unwrap().collect::>(), block_hashes); - assert_eq!(block_hashes.len(), 11); - } - - #[test] - fn test_find_uncles() { - let genesis = BlockBuilder::genesis(); - let b1a = genesis.add_block(); - let b2a = b1a.add_block(); - let b3a = b2a.add_block(); - let b4a = b3a.add_block(); - let b5a = b4a.add_block(); - - let b1b = genesis.add_block_with_difficulty(9); - let b2b = b1a.add_block_with_difficulty(9); - let b3b = b2a.add_block_with_difficulty(9); - let b4b = b3a.add_block_with_difficulty(9); - let b5b = b4a.add_block_with_difficulty(9); - - let uncle_headers = vec![ - b4b.last().header().encoded(), - b3b.last().header().encoded(), - b2b.last().header().encoded(), - ]; - let b4a_hash = b4a.last().hash(); - - let generator = BlockGenerator::new( - vec![b1a, b1b, b2a, b2b, b3a, b3b, b4a, b4b, b5a, b5b] - ); - - let db = new_db(); - let bc = new_chain(genesis.last().encoded(), db.clone()); - - for b in generator { - insert_block(&db, &bc, b.encoded(), vec![]); - } - - assert_eq!(uncle_headers, bc.find_uncle_headers(&b4a_hash, 3).unwrap()); - // TODO: insert block that already includes one of them as an uncle to check it's not allowed. - } - - fn secret() -> Secret { - keccak("").into() - } - - #[test] - fn test_fork_transaction_addresses() { - let t1 = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Create, - value: 100.into(), - data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), - }.sign(&secret(), None); - - let t1_hash = t1.hash(); - - let genesis = BlockBuilder::genesis(); - let b1a = genesis.add_block_with_transactions(iter::once(t1)); - let b1b = genesis.add_block_with_difficulty(9); - let b2 = b1b.add_block(); - - let b1a_hash = b1a.last().hash(); - let b2_hash = b2.last().hash(); - - let db = new_db(); - let bc = new_chain(genesis.last().encoded(), db.clone()); - - let mut batch = db.key_value().transaction(); - let _ = insert_block_batch(&mut batch, &bc, b1a.last().encoded(), vec![]); - bc.commit(); - let _ = insert_block_batch(&mut batch, &bc, b1b.last().encoded(), vec![]); - bc.commit(); - db.key_value().write(batch).unwrap(); - - assert_eq!(bc.best_block_hash(), b1a_hash); - assert_eq!(bc.transaction_address(&t1_hash), Some(TransactionAddress { - block_hash: b1a_hash, - index: 0, - })); - - // now let's make forked chain the canon chain - let mut batch = db.key_value().transaction(); - let _ = insert_block_batch(&mut batch, &bc, b2.last().encoded(), vec![]); - bc.commit(); - db.key_value().write(batch).unwrap(); - - // Transaction should be retracted - assert_eq!(bc.best_block_hash(), b2_hash); - assert_eq!(bc.transaction_address(&t1_hash), None); - } - - #[test] - fn test_overwriting_transaction_addresses() { - let t1 = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Create, - value: 100.into(), - data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), - }.sign(&secret(), None); - - let t2 = Transaction { - nonce: 1.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Create, - value: 100.into(), - data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), - }.sign(&secret(), None); - - let t3 = Transaction { - nonce: 2.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Create, - value: 100.into(), - data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), - }.sign(&secret(), None); - - let genesis = BlockBuilder::genesis(); - let b1a = genesis.add_block_with_transactions(vec![t1.clone(), t2.clone()]); - // insert transactions in different order, - // the block has lower difficulty, so the hash is also different - let b1b = genesis.add_block_with(|| BlockOptions { - difficulty: 9.into(), - transactions: vec![t2.clone(), t1.clone()], - ..Default::default() - }); - let b2 = b1b.add_block_with_transactions(iter::once(t3.clone())); - - let b1a_hash = b1a.last().hash(); - let b1b_hash = b1b.last().hash(); - let b2_hash = b2.last().hash(); - - let t1_hash = t1.hash(); - let t2_hash = t2.hash(); - let t3_hash = t3.hash(); - - let db = new_db(); - let bc = new_chain(genesis.last().encoded(), db.clone()); - - let mut batch = db.key_value().transaction(); - let _ = insert_block_batch(&mut batch, &bc, b1a.last().encoded(), vec![]); - bc.commit(); - let _ = insert_block_batch(&mut batch, &bc, b1b.last().encoded(), vec![]); - bc.commit(); - db.key_value().write(batch).unwrap(); - - assert_eq!(bc.best_block_hash(), b1a_hash); - assert_eq!(bc.transaction_address(&t1_hash), Some(TransactionAddress { - block_hash: b1a_hash, - index: 0, - })); - assert_eq!(bc.transaction_address(&t2_hash), Some(TransactionAddress { - block_hash: b1a_hash, - index: 1, - })); - - // now let's make forked chain the canon chain - let mut batch = db.key_value().transaction(); - let _ = insert_block_batch(&mut batch, &bc, b2.last().encoded(), vec![]); - bc.commit(); - db.key_value().write(batch).unwrap(); - - assert_eq!(bc.best_block_hash(), b2_hash); - assert_eq!(bc.transaction_address(&t1_hash), Some(TransactionAddress { - block_hash: b1b_hash, - index: 1, - })); - assert_eq!(bc.transaction_address(&t2_hash), Some(TransactionAddress { - block_hash: b1b_hash, - index: 0, - })); - assert_eq!(bc.transaction_address(&t3_hash), Some(TransactionAddress { - block_hash: b2_hash, - index: 0, - })); - } - - #[test] - fn test_small_fork() { - let genesis = BlockBuilder::genesis(); - let b1 = genesis.add_block(); - let b2 = b1.add_block(); - let b3a = b2.add_block(); - let b3b = b2.add_block_with_difficulty(9); - - let genesis_hash = genesis.last().hash(); - let b1_hash = b1.last().hash(); - let b2_hash = b2.last().hash(); - let b3a_hash = b3a.last().hash(); - let b3b_hash = b3b.last().hash(); - - // b3a is a part of canon chain, whereas b3b is part of sidechain - let best_block_hash = b3a_hash; - - let db = new_db(); - let bc = new_chain(genesis.last().encoded(), db.clone()); - - let mut batch = db.key_value().transaction(); - let ir1 = insert_block_batch(&mut batch, &bc, b1.last().encoded(), vec![]); - bc.commit(); - let ir2 = insert_block_batch(&mut batch, &bc, b2.last().encoded(), vec![]); - bc.commit(); - let ir3b = insert_block_batch(&mut batch, &bc, b3b.last().encoded(), vec![]); - bc.commit(); - db.key_value().write(batch).unwrap(); - assert_eq!(bc.block_hash(3).unwrap(), b3b_hash); - let mut batch = db.key_value().transaction(); - let ir3a = insert_block_batch(&mut batch, &bc, b3a.last().encoded(), vec![]); - bc.commit(); - db.key_value().write(batch).unwrap(); - - assert_eq!(ir1, ImportRoute { - enacted: vec![b1_hash], - retracted: vec![], - omitted: vec![], - }); - - assert_eq!(ir2, ImportRoute { - enacted: vec![b2_hash], - retracted: vec![], - omitted: vec![], - }); - - assert_eq!(ir3b, ImportRoute { - enacted: vec![b3b_hash], - retracted: vec![], - omitted: vec![], - }); - - assert_eq!(ir3a, ImportRoute { - enacted: vec![b3a_hash], - retracted: vec![b3b_hash], - omitted: vec![], - }); - - assert_eq!(bc.best_block_hash(), best_block_hash); - assert_eq!(bc.block_number(&genesis_hash).unwrap(), 0); - assert_eq!(bc.block_number(&b1_hash).unwrap(), 1); - assert_eq!(bc.block_number(&b2_hash).unwrap(), 2); - assert_eq!(bc.block_number(&b3a_hash).unwrap(), 3); - assert_eq!(bc.block_number(&b3b_hash).unwrap(), 3); - - assert_eq!(bc.block_hash(0).unwrap(), genesis_hash); - assert_eq!(bc.block_hash(1).unwrap(), b1_hash); - assert_eq!(bc.block_hash(2).unwrap(), b2_hash); - assert_eq!(bc.block_hash(3).unwrap(), b3a_hash); - - // test trie route - let r0_1 = bc.tree_route(genesis_hash, b1_hash).unwrap(); - assert_eq!(r0_1.ancestor, genesis_hash); - assert_eq!(r0_1.blocks, [b1_hash]); - assert_eq!(r0_1.index, 0); - - let r0_2 = bc.tree_route(genesis_hash, b2_hash).unwrap(); - assert_eq!(r0_2.ancestor, genesis_hash); - assert_eq!(r0_2.blocks, [b1_hash, b2_hash]); - assert_eq!(r0_2.index, 0); - - let r1_3a = bc.tree_route(b1_hash, b3a_hash).unwrap(); - assert_eq!(r1_3a.ancestor, b1_hash); - assert_eq!(r1_3a.blocks, [b2_hash, b3a_hash]); - assert_eq!(r1_3a.index, 0); - - let r1_3b = bc.tree_route(b1_hash, b3b_hash).unwrap(); - assert_eq!(r1_3b.ancestor, b1_hash); - assert_eq!(r1_3b.blocks, [b2_hash, b3b_hash]); - assert_eq!(r1_3b.index, 0); - - let r3a_3b = bc.tree_route(b3a_hash, b3b_hash).unwrap(); - assert_eq!(r3a_3b.ancestor, b2_hash); - assert_eq!(r3a_3b.blocks, [b3a_hash, b3b_hash]); - assert_eq!(r3a_3b.index, 1); - - let r1_0 = bc.tree_route(b1_hash, genesis_hash).unwrap(); - assert_eq!(r1_0.ancestor, genesis_hash); - assert_eq!(r1_0.blocks, [b1_hash]); - assert_eq!(r1_0.index, 1); - - let r2_0 = bc.tree_route(b2_hash, genesis_hash).unwrap(); - assert_eq!(r2_0.ancestor, genesis_hash); - assert_eq!(r2_0.blocks, [b2_hash, b1_hash]); - assert_eq!(r2_0.index, 2); - - let r3a_1 = bc.tree_route(b3a_hash, b1_hash).unwrap(); - assert_eq!(r3a_1.ancestor, b1_hash); - assert_eq!(r3a_1.blocks, [b3a_hash, b2_hash]); - assert_eq!(r3a_1.index, 2); - - let r3b_1 = bc.tree_route(b3b_hash, b1_hash).unwrap(); - assert_eq!(r3b_1.ancestor, b1_hash); - assert_eq!(r3b_1.blocks, [b3b_hash, b2_hash]); - assert_eq!(r3b_1.index, 2); - - let r3b_3a = bc.tree_route(b3b_hash, b3a_hash).unwrap(); - assert_eq!(r3b_3a.ancestor, b2_hash); - assert_eq!(r3b_3a.blocks, [b3b_hash, b3a_hash]); - assert_eq!(r3b_3a.index, 1); - } - - #[test] - fn test_reopen_blockchain_db() { - let genesis = BlockBuilder::genesis(); - let first = genesis.add_block(); - let genesis_hash = genesis.last().hash(); - let first_hash = first.last().hash(); - - let db = new_db(); - - { - let bc = new_chain(genesis.last().encoded(), db.clone()); - assert_eq!(bc.best_block_hash(), genesis_hash); - let mut batch = db.key_value().transaction(); - insert_block_batch(&mut batch, &bc, first.last().encoded(), vec![]); - db.key_value().write(batch).unwrap(); - bc.commit(); - assert_eq!(bc.best_block_hash(), first_hash); - } - - { - let bc = new_chain(genesis.last().encoded(), db.clone()); - - assert_eq!(bc.best_block_hash(), first_hash); - } - } - - #[test] - fn find_transaction_by_hash() { - let genesis = "f901fcf901f7a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0af81e09f8c46ca322193edfda764fa7e88e81923f802f1d325ec0b0308ac2cd0a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200008083023e38808454c98c8142a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421880102030405060708c0c0".from_hex().unwrap(); - let b1 = "f904a8f901faa0ce1f26f798dd03c8782d63b3e42e79a64eaea5694ea686ac5d7ce3df5171d1aea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0a65c2364cd0f1542d761823dc0109c6b072f14c20459598c5455c274601438f4a070616ebd7ad2ed6fb7860cf7e9df00163842351c38a87cac2c1cb193895035a2a05c5b4fc43c2d45787f54e1ae7d27afdb4ad16dfc567c5692070d5c4556e0b1d7b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000183023ec683021536845685109780a029f07836e4e59229b3a065913afc27702642c683bba689910b2b2fd45db310d3888957e6d004a31802f902a7f85f800a8255f094aaaf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ca0575da4e21b66fa764be5f74da9389e67693d066fb0d1312e19e17e501da00ecda06baf5a5327595f6619dfc2fcb3f2e6fb410b5810af3cb52d0e7508038e91a188f85f010a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba04fa966bf34b93abc1bcd665554b7f316b50f928477b50be0f3285ead29d18c5ba017bba0eeec1625ab433746955e125d46d80b7fdc97386c51266f842d8e02192ef85f020a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ca004377418ae981cc32b1312b4a427a1d69a821b28db8584f5f2bd8c6d42458adaa053a1dba1af177fac92f3b6af0a9fa46a22adf56e686c93794b6a012bf254abf5f85f030a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ca04fe13febd28a05f4fcb2f451d7ddc2dda56486d9f8c79a62b0ba4da775122615a0651b2382dd402df9ebc27f8cb4b2e0f3cea68dda2dca0ee9603608f0b6f51668f85f040a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba078e6a0ba086a08f8450e208a399bb2f2d2a0d984acd2517c7c7df66ccfab567da013254002cd45a97fac049ae00afbc43ed0d9961d0c56a3b2382c80ce41c198ddf85f050a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba0a7174d8f43ea71c8e3ca9477691add8d80ac8e0ed89d8d8b572041eef81f4a54a0534ea2e28ec4da3b5b944b18c51ec84a5cf35f5b3343c5fb86521fd2d388f506f85f060a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba034bd04065833536a10c77ee2a43a5371bc6d34837088b861dd9d4b7f44074b59a078807715786a13876d3455716a6b9cb2186b7a4887a5c31160fc877454958616c0".from_hex().unwrap(); - let b1_hash: H256 = "f53f268d23a71e85c7d6d83a9504298712b84c1a2ba220441c86eeda0bf0b6e3".into(); - - let db = new_db(); - let bc = new_chain(encoded::Block::new(genesis), db.clone()); - let mut batch = db.key_value().transaction(); - insert_block_batch(&mut batch, &bc, encoded::Block::new(b1), vec![]); - db.key_value().write(batch).unwrap(); - bc.commit(); - - let transactions = bc.transactions(&b1_hash).unwrap(); - assert_eq!(transactions.len(), 7); - for t in transactions { - assert_eq!(bc.transaction(&bc.transaction_address(&t.hash()).unwrap()).unwrap(), t); - } - } - - #[test] - fn test_logs() { - let t1 = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Create, - value: 101.into(), - data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), - }.sign(&secret(), None); - let t2 = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Create, - value: 102.into(), - data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), - }.sign(&secret(), None); - let t3 = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Create, - value: 103.into(), - data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), - }.sign(&secret(), None); - let t4 = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Create, - value: 104.into(), - data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), - }.sign(&secret(), None); - let tx_hash1 = t1.hash(); - let tx_hash2 = t2.hash(); - let tx_hash3 = t3.hash(); - let tx_hash4 = t4.hash(); - - let genesis = BlockBuilder::genesis(); - let b1 = genesis.add_block_with_transactions(vec![t1, t2]); - let b2 = b1.add_block_with_transactions(iter::once(t3)); - let b3 = genesis.add_block_with(|| BlockOptions { - transactions: vec![t4.clone()], - difficulty: U256::from(9), - ..Default::default() - }); // Branch block - let b1_hash = b1.last().hash(); - let b1_number = b1.last().number(); - let b2_hash = b2.last().hash(); - let b2_number = b2.last().number(); - let b3_hash = b3.last().hash(); - let b3_number = b3.last().number(); - - let db = new_db(); - let bc = new_chain(genesis.last().encoded(), db.clone()); - insert_block(&db, &bc, b1.last().encoded(), vec![Receipt { - outcome: TransactionOutcome::StateRoot(H256::default()), - gas_used: 10_000.into(), - log_bloom: Default::default(), - logs: vec![ - LogEntry { address: Default::default(), topics: vec![], data: vec![1], }, - LogEntry { address: Default::default(), topics: vec![], data: vec![2], }, - ], - }, - Receipt { - outcome: TransactionOutcome::StateRoot(H256::default()), - gas_used: 10_000.into(), - log_bloom: Default::default(), - logs: vec![ - LogEntry { address: Default::default(), topics: vec![], data: vec![3], }, - ], - }]); - insert_block(&db, &bc, b2.last().encoded(), vec![ - Receipt { - outcome: TransactionOutcome::StateRoot(H256::default()), - gas_used: 10_000.into(), - log_bloom: Default::default(), - logs: vec![ - LogEntry { address: Default::default(), topics: vec![], data: vec![4], }, - ], - } - ]); - insert_block(&db, &bc, b3.last().encoded(), vec![ - Receipt { - outcome: TransactionOutcome::StateRoot(H256::default()), - gas_used: 10_000.into(), - log_bloom: Default::default(), - logs: vec![ - LogEntry { address: Default::default(), topics: vec![], data: vec![5], }, - ], - } - ]); - - // when - let logs1 = bc.logs(vec![b1_hash, b2_hash], |_| true, None); - let logs2 = bc.logs(vec![b1_hash, b2_hash], |_| true, Some(1)); - let logs3 = bc.logs(vec![b3_hash], |_| true, None); - - // then - assert_eq!(logs1, vec![ - LocalizedLogEntry { - entry: LogEntry { address: Default::default(), topics: vec![], data: vec![1] }, - block_hash: b1_hash, - block_number: b1_number, - transaction_hash: tx_hash1, - transaction_index: 0, - transaction_log_index: 0, - log_index: 0, - }, - LocalizedLogEntry { - entry: LogEntry { address: Default::default(), topics: vec![], data: vec![2] }, - block_hash: b1_hash, - block_number: b1_number, - transaction_hash: tx_hash1, - transaction_index: 0, - transaction_log_index: 1, - log_index: 1, - }, - LocalizedLogEntry { - entry: LogEntry { address: Default::default(), topics: vec![], data: vec![3] }, - block_hash: b1_hash, - block_number: b1_number, - transaction_hash: tx_hash2, - transaction_index: 1, - transaction_log_index: 0, - log_index: 2, - }, - LocalizedLogEntry { - entry: LogEntry { address: Default::default(), topics: vec![], data: vec![4] }, - block_hash: b2_hash, - block_number: b2_number, - transaction_hash: tx_hash3, - transaction_index: 0, - transaction_log_index: 0, - log_index: 0, - } - ]); - assert_eq!(logs2, vec![ - LocalizedLogEntry { - entry: LogEntry { address: Default::default(), topics: vec![], data: vec![4] }, - block_hash: b2_hash, - block_number: b2_number, - transaction_hash: tx_hash3, - transaction_index: 0, - transaction_log_index: 0, - log_index: 0, - } - ]); - assert_eq!(logs3, vec![ - LocalizedLogEntry { - entry: LogEntry { address: Default::default(), topics: vec![], data: vec![5] }, - block_hash: b3_hash, - block_number: b3_number, - transaction_hash: tx_hash4, - transaction_index: 0, - transaction_log_index: 0, - log_index: 0, - } - ]); - } - - #[test] - fn test_bloom_filter_simple() { - let bloom_b1: Bloom = "00000020000000000000000000000000000000000000000002000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000400000000000000000000002000".into(); - - let bloom_b2: Bloom = "00000000000000000000000000000000000000000000020000001000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(); - - let bloom_ba: Bloom = "00000000000000000000000000000000000000000000020000000800000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(); - - let genesis = BlockBuilder::genesis(); - let b1 = genesis.add_block_with(|| BlockOptions { - bloom: bloom_b1.clone(), - difficulty: 9.into(), - ..Default::default() - }); - let b2 = b1.add_block_with_bloom(bloom_b2); - let b3 = b2.add_block_with_bloom(bloom_ba); - - let b1a = genesis.add_block_with_bloom(bloom_ba); - let b2a = b1a.add_block_with_bloom(bloom_ba); - - let db = new_db(); - let bc = new_chain(genesis.last().encoded(), db.clone()); - - let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5); - let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5); - assert!(blocks_b1.is_empty()); - assert!(blocks_b2.is_empty()); - - insert_block(&db, &bc, b1.last().encoded(), vec![]); - let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5); - let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5); - assert_eq!(blocks_b1, vec![1]); - assert!(blocks_b2.is_empty()); - - insert_block(&db, &bc, b2.last().encoded(), vec![]); - let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5); - let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5); - assert_eq!(blocks_b1, vec![1]); - assert_eq!(blocks_b2, vec![2]); - - // hasn't been forked yet - insert_block(&db, &bc, b1a.last().encoded(), vec![]); - let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5); - let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5); - let blocks_ba = bc.blocks_with_bloom(Some(&bloom_ba), 0, 5); - assert_eq!(blocks_b1, vec![1]); - assert_eq!(blocks_b2, vec![2]); - assert!(blocks_ba.is_empty()); - - // fork has happend - insert_block(&db, &bc, b2a.last().encoded(), vec![]); - let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5); - let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5); - let blocks_ba = bc.blocks_with_bloom(Some(&bloom_ba), 0, 5); - assert!(blocks_b1.is_empty()); - assert!(blocks_b2.is_empty()); - assert_eq!(blocks_ba, vec![1, 2]); - - // fork back - insert_block(&db, &bc, b3.last().encoded(), vec![]); - let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5); - let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5); - let blocks_ba = bc.blocks_with_bloom(Some(&bloom_ba), 0, 5); - assert_eq!(blocks_b1, vec![1]); - assert_eq!(blocks_b2, vec![2]); - assert_eq!(blocks_ba, vec![3]); - } - - #[test] - fn test_insert_unordered() { - let bloom_b1: Bloom = "00000020000000000000000000000000000000000000000002000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000400000000000000000000002000".into(); - - let bloom_b2: Bloom = "00000000000000000000000000000000000000000000020000001000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(); - - let bloom_b3: Bloom = "00000000000000000000000000000000000000000000020000000800000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(); - - let genesis = BlockBuilder::genesis(); - let b1 = genesis.add_block_with_bloom(bloom_b1); - let b2 = b1.add_block_with_bloom(bloom_b2); - let b3 = b2.add_block_with_bloom(bloom_b3); - let b1_total_difficulty = genesis.last().difficulty() + b1.last().difficulty(); - - let db = new_db(); - let bc = new_chain(genesis.last().encoded(), db.clone()); - let mut batch = db.key_value().transaction(); - bc.insert_unordered_block(&mut batch, b2.last().encoded(), vec![], Some(b1_total_difficulty), false, false); - bc.commit(); - bc.insert_unordered_block(&mut batch, b3.last().encoded(), vec![], None, true, false); - bc.commit(); - bc.insert_unordered_block(&mut batch, b1.last().encoded(), vec![], None, false, false); - bc.commit(); - db.key_value().write(batch).unwrap(); - - assert_eq!(bc.best_block_hash(), b3.last().hash()); - assert_eq!(bc.block_hash(1).unwrap(), b1.last().hash()); - assert_eq!(bc.block_hash(2).unwrap(), b2.last().hash()); - assert_eq!(bc.block_hash(3).unwrap(), b3.last().hash()); - - let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 3); - let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 3); - let blocks_b3 = bc.blocks_with_bloom(Some(&bloom_b3), 0, 3); - - assert_eq!(blocks_b1, vec![1]); - assert_eq!(blocks_b2, vec![2]); - assert_eq!(blocks_b3, vec![3]); - } - - #[test] - fn test_best_block_update() { - let genesis = BlockBuilder::genesis(); - let next_5 = genesis.add_blocks(5); - let uncle = genesis.add_block_with_difficulty(9); - let generator = BlockGenerator::new(iter::once(next_5)); - - let db = new_db(); - { - let bc = new_chain(genesis.last().encoded(), db.clone()); - - let mut batch = db.key_value().transaction(); - // create a longer fork - for block in generator { - insert_block_batch(&mut batch, &bc, block.encoded(), vec![]); - bc.commit(); - } - - assert_eq!(bc.best_block_number(), 5); - insert_block_batch(&mut batch, &bc, uncle.last().encoded(), vec![]); - db.key_value().write(batch).unwrap(); - bc.commit(); - } - - // re-loading the blockchain should load the correct best block. - let bc = new_chain(genesis.last().encoded(), db); - assert_eq!(bc.best_block_number(), 5); - } - - #[test] - fn epoch_transitions_iter() { - use common_types::engines::epoch::Transition as EpochTransition; - - let genesis = BlockBuilder::genesis(); - let next_5 = genesis.add_blocks(5); - let uncle = genesis.add_block_with_difficulty(9); - let generator = BlockGenerator::new(iter::once(next_5)); - - let db = new_db(); - { - let bc = new_chain(genesis.last().encoded(), db.clone()); - - let mut batch = db.key_value().transaction(); - // create a longer fork - for (i, block) in generator.into_iter().enumerate() { - - insert_block_batch(&mut batch, &bc, block.encoded(), vec![]); - bc.insert_epoch_transition(&mut batch, i as u64, EpochTransition { - block_hash: block.hash(), - block_number: i as u64 + 1, - proof: vec![], - }); - bc.commit(); - } - - assert_eq!(bc.best_block_number(), 5); - - insert_block_batch(&mut batch, &bc, uncle.last().encoded(), vec![]); - bc.insert_epoch_transition(&mut batch, 999, EpochTransition { - block_hash: uncle.last().hash(), - block_number: 1, - proof: vec![], - }); - - db.key_value().write(batch).unwrap(); - bc.commit(); - - // epoch 999 not in canonical chain. - assert_eq!(bc.epoch_transitions().map(|(i, _)| i).collect::>(), vec![0, 1, 2, 3, 4]); - } - - // re-loading the blockchain should load the correct best block. - let bc = new_chain(genesis.last().encoded(), db); - - assert_eq!(bc.best_block_number(), 5); - assert_eq!(bc.epoch_transitions().map(|(i, _)| i).collect::>(), vec![0, 1, 2, 3, 4]); - } - - #[test] - fn epoch_transition_for() { - use common_types::engines::epoch::Transition as EpochTransition; - - let genesis = BlockBuilder::genesis(); - let fork_7 = genesis.add_blocks_with(7, || BlockOptions { - difficulty: 9.into(), - ..Default::default() - }); - let next_10 = genesis.add_blocks(10); - let fork_generator = BlockGenerator::new(iter::once(fork_7)); - let next_generator = BlockGenerator::new(iter::once(next_10)); - - let db = new_db(); - - let bc = new_chain(genesis.last().encoded(), db.clone()); - - let mut batch = db.key_value().transaction(); - bc.insert_epoch_transition(&mut batch, 0, EpochTransition { - block_hash: bc.genesis_hash(), - block_number: 0, - proof: vec![], - }); - db.key_value().write(batch).unwrap(); - - // set up a chain where we have a canonical chain of 10 blocks - // and a non-canonical fork of 8 from genesis. - let fork_hash = { - for block in fork_generator { - insert_block(&db, &bc, block.encoded(), vec![]); - } - - assert_eq!(bc.best_block_number(), 7); - bc.chain_info().best_block_hash - }; - - for block in next_generator { - insert_block(&db, &bc, block.encoded(), vec![]); - } - - assert_eq!(bc.best_block_number(), 10); - - let mut batch = db.key_value().transaction(); - bc.insert_epoch_transition(&mut batch, 4, EpochTransition { - block_hash: bc.block_hash(4).unwrap(), - block_number: 4, - proof: vec![], - }); - db.key_value().write(batch).unwrap(); - - // blocks where the parent is one of the first 4 will be part of genesis epoch. - for i in 0..4 { - let hash = bc.block_hash(i).unwrap(); - assert_eq!(bc.epoch_transition_for(hash).unwrap().block_number, 0); - } - - // blocks where the parent is the transition at 4 or after will be - // part of that epoch. - for i in 4..11 { - let hash = bc.block_hash(i).unwrap(); - assert_eq!(bc.epoch_transition_for(hash).unwrap().block_number, 4); - } - - let fork_hashes = bc.ancestry_iter(fork_hash).unwrap().collect::>(); - assert_eq!(fork_hashes.len(), 8); - - // non-canonical fork blocks should all have genesis transition - for fork_hash in fork_hashes { - assert_eq!(bc.epoch_transition_for(fork_hash).unwrap().block_number, 0); - } - } - - #[test] - fn tree_rout_with_finalization() { - let genesis = BlockBuilder::genesis(); - let a = genesis.add_block(); - // First branch - let a1 = a.add_block_with_random_transactions(); - let a2 = a1.add_block_with_random_transactions(); - let a3 = a2.add_block_with_random_transactions(); - // Second branch - let b1 = a.add_block_with_random_transactions(); - let b2 = b1.add_block_with_random_transactions(); - - let a_hash = a.last().hash(); - let a1_hash = a1.last().hash(); - let a2_hash = a2.last().hash(); - let a3_hash = a3.last().hash(); - let b2_hash = b2.last().hash(); - - let bootstrap_chain = |blocks: Vec<&BlockBuilder>| { - let db = new_db(); - let bc = new_chain(genesis.last().encoded(), db.clone()); - let mut batch = db.key_value().transaction(); - for block in blocks { - insert_block_batch(&mut batch, &bc, block.last().encoded(), vec![]); - bc.commit(); - } - db.key_value().write(batch).unwrap(); - (db, bc) - }; - - let mark_finalized = |block_hash: H256, db: &Arc, bc: &BlockChain| { - let mut batch = db.key_value().transaction(); - bc.mark_finalized(&mut batch, block_hash).unwrap(); - bc.commit(); - db.key_value().write(batch).unwrap(); - }; - - // Case 1: fork, with finalized common ancestor - { - let (db, bc) = bootstrap_chain(vec![&a, &a1, &a2, &a3, &b1, &b2]); - assert_eq!(bc.best_block_hash(), a3_hash); - assert_eq!(bc.block_hash(2).unwrap(), a1_hash); - - mark_finalized(a_hash, &db, &bc); - assert!(!bc.tree_route(a3_hash, b2_hash).unwrap().is_from_route_finalized); - assert!(!bc.tree_route(b2_hash, a3_hash).unwrap().is_from_route_finalized); - } - - // Case 2: fork with a finalized block on a branch - { - let (db, bc) = bootstrap_chain(vec![&a, &a1, &a2, &a3, &b1, &b2]); - assert_eq!(bc.best_block_hash(), a3_hash); - assert_eq!(bc.block_hash(2).unwrap(), a1_hash); - - mark_finalized(a2_hash, &db, &bc); - assert!(bc.tree_route(a3_hash, b2_hash).unwrap().is_from_route_finalized); - assert!(!bc.tree_route(b2_hash, a3_hash).unwrap().is_from_route_finalized); - } - - // Case 3: no-fork, with a finalized block - { - let (db, bc) = bootstrap_chain(vec![&a, &a1, &a2]); - assert_eq!(bc.best_block_hash(), a2_hash); - - mark_finalized(a1_hash, &db, &bc); - assert!(!bc.tree_route(a1_hash, a2_hash).unwrap().is_from_route_finalized); - assert!(!bc.tree_route(a2_hash, a1_hash).unwrap().is_from_route_finalized); - } - } + use super::*; + + use std::iter; + + use crate::generator::{BlockBuilder, BlockGenerator, BlockOptions}; + use common_types::{ + receipt::{Receipt, TransactionOutcome}, + transaction::{Action, Transaction}, + }; + use ethkey::Secret; + use keccak_hash::keccak; + use rustc_hex::FromHex; + use tempdir::TempDir; + + struct TestBlockChainDB { + _blooms_dir: TempDir, + _trace_blooms_dir: TempDir, + blooms: blooms_db::Database, + trace_blooms: blooms_db::Database, + key_value: Arc, + } + + impl BlockChainDB for TestBlockChainDB { + fn key_value(&self) -> &Arc { + &self.key_value + } + + fn blooms(&self) -> &blooms_db::Database { + &self.blooms + } + + fn trace_blooms(&self) -> &blooms_db::Database { + &self.trace_blooms + } + } + + /// Creates new test instance of `BlockChainDB` + pub fn new_db() -> Arc { + let blooms_dir = TempDir::new("").unwrap(); + let trace_blooms_dir = TempDir::new("").unwrap(); + + let db = TestBlockChainDB { + blooms: blooms_db::Database::open(blooms_dir.path()).unwrap(), + trace_blooms: blooms_db::Database::open(trace_blooms_dir.path()).unwrap(), + _blooms_dir: blooms_dir, + _trace_blooms_dir: trace_blooms_dir, + key_value: Arc::new(kvdb_memorydb::create(ethcore_db::NUM_COLUMNS.unwrap())), + }; + + Arc::new(db) + } + + fn new_chain(genesis: encoded::Block, db: Arc) -> BlockChain { + BlockChain::new(Config::default(), genesis.raw(), db) + } + + fn insert_block( + db: &Arc, + bc: &BlockChain, + block: encoded::Block, + receipts: Vec, + ) -> ImportRoute { + insert_block_commit(db, bc, block, receipts, true) + } + + fn insert_block_commit( + db: &Arc, + bc: &BlockChain, + block: encoded::Block, + receipts: Vec, + commit: bool, + ) -> ImportRoute { + let mut batch = db.key_value().transaction(); + let res = insert_block_batch(&mut batch, bc, block, receipts); + db.key_value().write(batch).unwrap(); + if commit { + bc.commit(); + } + res + } + + fn insert_block_batch( + batch: &mut DBTransaction, + bc: &BlockChain, + block: encoded::Block, + receipts: Vec, + ) -> ImportRoute { + use crate::ExtrasInsert; + + let fork_choice = { + let header = block.header_view(); + let parent_hash = header.parent_hash(); + let parent_details = bc + .uncommitted_block_details(&parent_hash) + .unwrap_or_else(|| panic!("Invalid parent hash: {:?}", parent_hash)); + let block_total_difficulty = parent_details.total_difficulty + header.difficulty(); + if block_total_difficulty > bc.best_block_total_difficulty() { + common_types::engines::ForkChoice::New + } else { + common_types::engines::ForkChoice::Old + } + }; + + bc.insert_block( + batch, + block, + receipts, + ExtrasInsert { + fork_choice: fork_choice, + is_finalized: false, + }, + ) + } + + #[test] + fn should_cache_best_block() { + // given + let genesis = BlockBuilder::genesis(); + let first = genesis.add_block(); + + let db = new_db(); + let bc = new_chain(genesis.last().encoded(), db.clone()); + assert_eq!(bc.best_block_number(), 0); + + // when + insert_block_commit(&db, &bc, first.last().encoded(), vec![], false); + assert_eq!(bc.best_block_number(), 0); + bc.commit(); + // NOTE no db.write here (we want to check if best block is cached) + + // then + assert_eq!(bc.best_block_number(), 1); + assert!( + bc.block(&bc.best_block_hash()).is_some(), + "Best block should be queryable even without DB write." + ); + } + + #[test] + fn basic_blockchain_insert() { + let genesis = BlockBuilder::genesis(); + let first = genesis.add_block(); + + let genesis = genesis.last(); + let first = first.last(); + let genesis_hash = genesis.hash(); + let first_hash = first.hash(); + + let db = new_db(); + let bc = new_chain(genesis.encoded(), db.clone()); + + assert_eq!(bc.genesis_hash(), genesis_hash); + assert_eq!(bc.best_block_hash(), genesis_hash); + assert_eq!(bc.block_hash(0), Some(genesis_hash)); + assert_eq!(bc.block_hash(1), None); + assert_eq!(bc.block_details(&genesis_hash).unwrap().children, vec![]); + + let mut batch = db.key_value().transaction(); + insert_block_batch(&mut batch, &bc, first.encoded(), vec![]); + db.key_value().write(batch).unwrap(); + bc.commit(); + + assert_eq!(bc.block_hash(0), Some(genesis_hash)); + assert_eq!(bc.best_block_number(), 1); + assert_eq!(bc.best_block_hash(), first_hash); + assert_eq!(bc.block_hash(1), Some(first_hash)); + assert_eq!(bc.block_details(&first_hash).unwrap().parent, genesis_hash); + assert_eq!( + bc.block_details(&genesis_hash).unwrap().children, + vec![first_hash] + ); + assert_eq!(bc.block_hash(2), None); + } + + #[test] + fn check_ancestry_iter() { + let genesis = BlockBuilder::genesis(); + let first_10 = genesis.add_blocks(10); + let generator = BlockGenerator::new(vec![first_10]); + + let db = new_db(); + let bc = new_chain(genesis.last().encoded(), db.clone()); + + let mut block_hashes = vec![genesis.last().hash()]; + let mut batch = db.key_value().transaction(); + for block in generator { + block_hashes.push(block.hash()); + insert_block_batch(&mut batch, &bc, block.encoded(), vec![]); + bc.commit(); + } + db.key_value().write(batch).unwrap(); + + block_hashes.reverse(); + + assert_eq!( + bc.ancestry_iter(block_hashes[0].clone()) + .unwrap() + .collect::>(), + block_hashes + ); + assert_eq!(block_hashes.len(), 11); + } + + #[test] + fn test_find_uncles() { + let genesis = BlockBuilder::genesis(); + let b1a = genesis.add_block(); + let b2a = b1a.add_block(); + let b3a = b2a.add_block(); + let b4a = b3a.add_block(); + let b5a = b4a.add_block(); + + let b1b = genesis.add_block_with_difficulty(9); + let b2b = b1a.add_block_with_difficulty(9); + let b3b = b2a.add_block_with_difficulty(9); + let b4b = b3a.add_block_with_difficulty(9); + let b5b = b4a.add_block_with_difficulty(9); + + let uncle_headers = vec![ + b4b.last().header().encoded(), + b3b.last().header().encoded(), + b2b.last().header().encoded(), + ]; + let b4a_hash = b4a.last().hash(); + + let generator = BlockGenerator::new(vec![b1a, b1b, b2a, b2b, b3a, b3b, b4a, b4b, b5a, b5b]); + + let db = new_db(); + let bc = new_chain(genesis.last().encoded(), db.clone()); + + for b in generator { + insert_block(&db, &bc, b.encoded(), vec![]); + } + + assert_eq!(uncle_headers, bc.find_uncle_headers(&b4a_hash, 3).unwrap()); + // TODO: insert block that already includes one of them as an uncle to check it's not allowed. + } + + fn secret() -> Secret { + keccak("").into() + } + + #[test] + fn test_fork_transaction_addresses() { + let t1 = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Create, + value: 100.into(), + data: "601080600c6000396000f3006000355415600957005b60203560003555" + .from_hex() + .unwrap(), + } + .sign(&secret(), None); + + let t1_hash = t1.hash(); + + let genesis = BlockBuilder::genesis(); + let b1a = genesis.add_block_with_transactions(iter::once(t1)); + let b1b = genesis.add_block_with_difficulty(9); + let b2 = b1b.add_block(); + + let b1a_hash = b1a.last().hash(); + let b2_hash = b2.last().hash(); + + let db = new_db(); + let bc = new_chain(genesis.last().encoded(), db.clone()); + + let mut batch = db.key_value().transaction(); + let _ = insert_block_batch(&mut batch, &bc, b1a.last().encoded(), vec![]); + bc.commit(); + let _ = insert_block_batch(&mut batch, &bc, b1b.last().encoded(), vec![]); + bc.commit(); + db.key_value().write(batch).unwrap(); + + assert_eq!(bc.best_block_hash(), b1a_hash); + assert_eq!( + bc.transaction_address(&t1_hash), + Some(TransactionAddress { + block_hash: b1a_hash, + index: 0, + }) + ); + + // now let's make forked chain the canon chain + let mut batch = db.key_value().transaction(); + let _ = insert_block_batch(&mut batch, &bc, b2.last().encoded(), vec![]); + bc.commit(); + db.key_value().write(batch).unwrap(); + + // Transaction should be retracted + assert_eq!(bc.best_block_hash(), b2_hash); + assert_eq!(bc.transaction_address(&t1_hash), None); + } + + #[test] + fn test_overwriting_transaction_addresses() { + let t1 = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Create, + value: 100.into(), + data: "601080600c6000396000f3006000355415600957005b60203560003555" + .from_hex() + .unwrap(), + } + .sign(&secret(), None); + + let t2 = Transaction { + nonce: 1.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Create, + value: 100.into(), + data: "601080600c6000396000f3006000355415600957005b60203560003555" + .from_hex() + .unwrap(), + } + .sign(&secret(), None); + + let t3 = Transaction { + nonce: 2.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Create, + value: 100.into(), + data: "601080600c6000396000f3006000355415600957005b60203560003555" + .from_hex() + .unwrap(), + } + .sign(&secret(), None); + + let genesis = BlockBuilder::genesis(); + let b1a = genesis.add_block_with_transactions(vec![t1.clone(), t2.clone()]); + // insert transactions in different order, + // the block has lower difficulty, so the hash is also different + let b1b = genesis.add_block_with(|| BlockOptions { + difficulty: 9.into(), + transactions: vec![t2.clone(), t1.clone()], + ..Default::default() + }); + let b2 = b1b.add_block_with_transactions(iter::once(t3.clone())); + + let b1a_hash = b1a.last().hash(); + let b1b_hash = b1b.last().hash(); + let b2_hash = b2.last().hash(); + + let t1_hash = t1.hash(); + let t2_hash = t2.hash(); + let t3_hash = t3.hash(); + + let db = new_db(); + let bc = new_chain(genesis.last().encoded(), db.clone()); + + let mut batch = db.key_value().transaction(); + let _ = insert_block_batch(&mut batch, &bc, b1a.last().encoded(), vec![]); + bc.commit(); + let _ = insert_block_batch(&mut batch, &bc, b1b.last().encoded(), vec![]); + bc.commit(); + db.key_value().write(batch).unwrap(); + + assert_eq!(bc.best_block_hash(), b1a_hash); + assert_eq!( + bc.transaction_address(&t1_hash), + Some(TransactionAddress { + block_hash: b1a_hash, + index: 0, + }) + ); + assert_eq!( + bc.transaction_address(&t2_hash), + Some(TransactionAddress { + block_hash: b1a_hash, + index: 1, + }) + ); + + // now let's make forked chain the canon chain + let mut batch = db.key_value().transaction(); + let _ = insert_block_batch(&mut batch, &bc, b2.last().encoded(), vec![]); + bc.commit(); + db.key_value().write(batch).unwrap(); + + assert_eq!(bc.best_block_hash(), b2_hash); + assert_eq!( + bc.transaction_address(&t1_hash), + Some(TransactionAddress { + block_hash: b1b_hash, + index: 1, + }) + ); + assert_eq!( + bc.transaction_address(&t2_hash), + Some(TransactionAddress { + block_hash: b1b_hash, + index: 0, + }) + ); + assert_eq!( + bc.transaction_address(&t3_hash), + Some(TransactionAddress { + block_hash: b2_hash, + index: 0, + }) + ); + } + + #[test] + fn test_small_fork() { + let genesis = BlockBuilder::genesis(); + let b1 = genesis.add_block(); + let b2 = b1.add_block(); + let b3a = b2.add_block(); + let b3b = b2.add_block_with_difficulty(9); + + let genesis_hash = genesis.last().hash(); + let b1_hash = b1.last().hash(); + let b2_hash = b2.last().hash(); + let b3a_hash = b3a.last().hash(); + let b3b_hash = b3b.last().hash(); + + // b3a is a part of canon chain, whereas b3b is part of sidechain + let best_block_hash = b3a_hash; + + let db = new_db(); + let bc = new_chain(genesis.last().encoded(), db.clone()); + + let mut batch = db.key_value().transaction(); + let ir1 = insert_block_batch(&mut batch, &bc, b1.last().encoded(), vec![]); + bc.commit(); + let ir2 = insert_block_batch(&mut batch, &bc, b2.last().encoded(), vec![]); + bc.commit(); + let ir3b = insert_block_batch(&mut batch, &bc, b3b.last().encoded(), vec![]); + bc.commit(); + db.key_value().write(batch).unwrap(); + assert_eq!(bc.block_hash(3).unwrap(), b3b_hash); + let mut batch = db.key_value().transaction(); + let ir3a = insert_block_batch(&mut batch, &bc, b3a.last().encoded(), vec![]); + bc.commit(); + db.key_value().write(batch).unwrap(); + + assert_eq!( + ir1, + ImportRoute { + enacted: vec![b1_hash], + retracted: vec![], + omitted: vec![], + } + ); + + assert_eq!( + ir2, + ImportRoute { + enacted: vec![b2_hash], + retracted: vec![], + omitted: vec![], + } + ); + + assert_eq!( + ir3b, + ImportRoute { + enacted: vec![b3b_hash], + retracted: vec![], + omitted: vec![], + } + ); + + assert_eq!( + ir3a, + ImportRoute { + enacted: vec![b3a_hash], + retracted: vec![b3b_hash], + omitted: vec![], + } + ); + + assert_eq!(bc.best_block_hash(), best_block_hash); + assert_eq!(bc.block_number(&genesis_hash).unwrap(), 0); + assert_eq!(bc.block_number(&b1_hash).unwrap(), 1); + assert_eq!(bc.block_number(&b2_hash).unwrap(), 2); + assert_eq!(bc.block_number(&b3a_hash).unwrap(), 3); + assert_eq!(bc.block_number(&b3b_hash).unwrap(), 3); + + assert_eq!(bc.block_hash(0).unwrap(), genesis_hash); + assert_eq!(bc.block_hash(1).unwrap(), b1_hash); + assert_eq!(bc.block_hash(2).unwrap(), b2_hash); + assert_eq!(bc.block_hash(3).unwrap(), b3a_hash); + + // test trie route + let r0_1 = bc.tree_route(genesis_hash, b1_hash).unwrap(); + assert_eq!(r0_1.ancestor, genesis_hash); + assert_eq!(r0_1.blocks, [b1_hash]); + assert_eq!(r0_1.index, 0); + + let r0_2 = bc.tree_route(genesis_hash, b2_hash).unwrap(); + assert_eq!(r0_2.ancestor, genesis_hash); + assert_eq!(r0_2.blocks, [b1_hash, b2_hash]); + assert_eq!(r0_2.index, 0); + + let r1_3a = bc.tree_route(b1_hash, b3a_hash).unwrap(); + assert_eq!(r1_3a.ancestor, b1_hash); + assert_eq!(r1_3a.blocks, [b2_hash, b3a_hash]); + assert_eq!(r1_3a.index, 0); + + let r1_3b = bc.tree_route(b1_hash, b3b_hash).unwrap(); + assert_eq!(r1_3b.ancestor, b1_hash); + assert_eq!(r1_3b.blocks, [b2_hash, b3b_hash]); + assert_eq!(r1_3b.index, 0); + + let r3a_3b = bc.tree_route(b3a_hash, b3b_hash).unwrap(); + assert_eq!(r3a_3b.ancestor, b2_hash); + assert_eq!(r3a_3b.blocks, [b3a_hash, b3b_hash]); + assert_eq!(r3a_3b.index, 1); + + let r1_0 = bc.tree_route(b1_hash, genesis_hash).unwrap(); + assert_eq!(r1_0.ancestor, genesis_hash); + assert_eq!(r1_0.blocks, [b1_hash]); + assert_eq!(r1_0.index, 1); + + let r2_0 = bc.tree_route(b2_hash, genesis_hash).unwrap(); + assert_eq!(r2_0.ancestor, genesis_hash); + assert_eq!(r2_0.blocks, [b2_hash, b1_hash]); + assert_eq!(r2_0.index, 2); + + let r3a_1 = bc.tree_route(b3a_hash, b1_hash).unwrap(); + assert_eq!(r3a_1.ancestor, b1_hash); + assert_eq!(r3a_1.blocks, [b3a_hash, b2_hash]); + assert_eq!(r3a_1.index, 2); + + let r3b_1 = bc.tree_route(b3b_hash, b1_hash).unwrap(); + assert_eq!(r3b_1.ancestor, b1_hash); + assert_eq!(r3b_1.blocks, [b3b_hash, b2_hash]); + assert_eq!(r3b_1.index, 2); + + let r3b_3a = bc.tree_route(b3b_hash, b3a_hash).unwrap(); + assert_eq!(r3b_3a.ancestor, b2_hash); + assert_eq!(r3b_3a.blocks, [b3b_hash, b3a_hash]); + assert_eq!(r3b_3a.index, 1); + } + + #[test] + fn test_reopen_blockchain_db() { + let genesis = BlockBuilder::genesis(); + let first = genesis.add_block(); + let genesis_hash = genesis.last().hash(); + let first_hash = first.last().hash(); + + let db = new_db(); + + { + let bc = new_chain(genesis.last().encoded(), db.clone()); + assert_eq!(bc.best_block_hash(), genesis_hash); + let mut batch = db.key_value().transaction(); + insert_block_batch(&mut batch, &bc, first.last().encoded(), vec![]); + db.key_value().write(batch).unwrap(); + bc.commit(); + assert_eq!(bc.best_block_hash(), first_hash); + } + + { + let bc = new_chain(genesis.last().encoded(), db.clone()); + + assert_eq!(bc.best_block_hash(), first_hash); + } + } + + #[test] + fn find_transaction_by_hash() { + let genesis = "f901fcf901f7a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0af81e09f8c46ca322193edfda764fa7e88e81923f802f1d325ec0b0308ac2cd0a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200008083023e38808454c98c8142a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421880102030405060708c0c0".from_hex().unwrap(); + let b1 = "f904a8f901faa0ce1f26f798dd03c8782d63b3e42e79a64eaea5694ea686ac5d7ce3df5171d1aea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0a65c2364cd0f1542d761823dc0109c6b072f14c20459598c5455c274601438f4a070616ebd7ad2ed6fb7860cf7e9df00163842351c38a87cac2c1cb193895035a2a05c5b4fc43c2d45787f54e1ae7d27afdb4ad16dfc567c5692070d5c4556e0b1d7b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000183023ec683021536845685109780a029f07836e4e59229b3a065913afc27702642c683bba689910b2b2fd45db310d3888957e6d004a31802f902a7f85f800a8255f094aaaf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ca0575da4e21b66fa764be5f74da9389e67693d066fb0d1312e19e17e501da00ecda06baf5a5327595f6619dfc2fcb3f2e6fb410b5810af3cb52d0e7508038e91a188f85f010a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba04fa966bf34b93abc1bcd665554b7f316b50f928477b50be0f3285ead29d18c5ba017bba0eeec1625ab433746955e125d46d80b7fdc97386c51266f842d8e02192ef85f020a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ca004377418ae981cc32b1312b4a427a1d69a821b28db8584f5f2bd8c6d42458adaa053a1dba1af177fac92f3b6af0a9fa46a22adf56e686c93794b6a012bf254abf5f85f030a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ca04fe13febd28a05f4fcb2f451d7ddc2dda56486d9f8c79a62b0ba4da775122615a0651b2382dd402df9ebc27f8cb4b2e0f3cea68dda2dca0ee9603608f0b6f51668f85f040a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba078e6a0ba086a08f8450e208a399bb2f2d2a0d984acd2517c7c7df66ccfab567da013254002cd45a97fac049ae00afbc43ed0d9961d0c56a3b2382c80ce41c198ddf85f050a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba0a7174d8f43ea71c8e3ca9477691add8d80ac8e0ed89d8d8b572041eef81f4a54a0534ea2e28ec4da3b5b944b18c51ec84a5cf35f5b3343c5fb86521fd2d388f506f85f060a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba034bd04065833536a10c77ee2a43a5371bc6d34837088b861dd9d4b7f44074b59a078807715786a13876d3455716a6b9cb2186b7a4887a5c31160fc877454958616c0".from_hex().unwrap(); + let b1_hash: H256 = + "f53f268d23a71e85c7d6d83a9504298712b84c1a2ba220441c86eeda0bf0b6e3".into(); + + let db = new_db(); + let bc = new_chain(encoded::Block::new(genesis), db.clone()); + let mut batch = db.key_value().transaction(); + insert_block_batch(&mut batch, &bc, encoded::Block::new(b1), vec![]); + db.key_value().write(batch).unwrap(); + bc.commit(); + + let transactions = bc.transactions(&b1_hash).unwrap(); + assert_eq!(transactions.len(), 7); + for t in transactions { + assert_eq!( + bc.transaction(&bc.transaction_address(&t.hash()).unwrap()) + .unwrap(), + t + ); + } + } + + #[test] + fn test_logs() { + let t1 = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Create, + value: 101.into(), + data: "601080600c6000396000f3006000355415600957005b60203560003555" + .from_hex() + .unwrap(), + } + .sign(&secret(), None); + let t2 = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Create, + value: 102.into(), + data: "601080600c6000396000f3006000355415600957005b60203560003555" + .from_hex() + .unwrap(), + } + .sign(&secret(), None); + let t3 = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Create, + value: 103.into(), + data: "601080600c6000396000f3006000355415600957005b60203560003555" + .from_hex() + .unwrap(), + } + .sign(&secret(), None); + let t4 = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Create, + value: 104.into(), + data: "601080600c6000396000f3006000355415600957005b60203560003555" + .from_hex() + .unwrap(), + } + .sign(&secret(), None); + let tx_hash1 = t1.hash(); + let tx_hash2 = t2.hash(); + let tx_hash3 = t3.hash(); + let tx_hash4 = t4.hash(); + + let genesis = BlockBuilder::genesis(); + let b1 = genesis.add_block_with_transactions(vec![t1, t2]); + let b2 = b1.add_block_with_transactions(iter::once(t3)); + let b3 = genesis.add_block_with(|| BlockOptions { + transactions: vec![t4.clone()], + difficulty: U256::from(9), + ..Default::default() + }); // Branch block + let b1_hash = b1.last().hash(); + let b1_number = b1.last().number(); + let b2_hash = b2.last().hash(); + let b2_number = b2.last().number(); + let b3_hash = b3.last().hash(); + let b3_number = b3.last().number(); + + let db = new_db(); + let bc = new_chain(genesis.last().encoded(), db.clone()); + insert_block( + &db, + &bc, + b1.last().encoded(), + vec![ + Receipt { + outcome: TransactionOutcome::StateRoot(H256::default()), + gas_used: 10_000.into(), + log_bloom: Default::default(), + logs: vec![ + LogEntry { + address: Default::default(), + topics: vec![], + data: vec![1], + }, + LogEntry { + address: Default::default(), + topics: vec![], + data: vec![2], + }, + ], + }, + Receipt { + outcome: TransactionOutcome::StateRoot(H256::default()), + gas_used: 10_000.into(), + log_bloom: Default::default(), + logs: vec![LogEntry { + address: Default::default(), + topics: vec![], + data: vec![3], + }], + }, + ], + ); + insert_block( + &db, + &bc, + b2.last().encoded(), + vec![Receipt { + outcome: TransactionOutcome::StateRoot(H256::default()), + gas_used: 10_000.into(), + log_bloom: Default::default(), + logs: vec![LogEntry { + address: Default::default(), + topics: vec![], + data: vec![4], + }], + }], + ); + insert_block( + &db, + &bc, + b3.last().encoded(), + vec![Receipt { + outcome: TransactionOutcome::StateRoot(H256::default()), + gas_used: 10_000.into(), + log_bloom: Default::default(), + logs: vec![LogEntry { + address: Default::default(), + topics: vec![], + data: vec![5], + }], + }], + ); + + // when + let logs1 = bc.logs(vec![b1_hash, b2_hash], |_| true, None); + let logs2 = bc.logs(vec![b1_hash, b2_hash], |_| true, Some(1)); + let logs3 = bc.logs(vec![b3_hash], |_| true, None); + + // then + assert_eq!( + logs1, + vec![ + LocalizedLogEntry { + entry: LogEntry { + address: Default::default(), + topics: vec![], + data: vec![1] + }, + block_hash: b1_hash, + block_number: b1_number, + transaction_hash: tx_hash1, + transaction_index: 0, + transaction_log_index: 0, + log_index: 0, + }, + LocalizedLogEntry { + entry: LogEntry { + address: Default::default(), + topics: vec![], + data: vec![2] + }, + block_hash: b1_hash, + block_number: b1_number, + transaction_hash: tx_hash1, + transaction_index: 0, + transaction_log_index: 1, + log_index: 1, + }, + LocalizedLogEntry { + entry: LogEntry { + address: Default::default(), + topics: vec![], + data: vec![3] + }, + block_hash: b1_hash, + block_number: b1_number, + transaction_hash: tx_hash2, + transaction_index: 1, + transaction_log_index: 0, + log_index: 2, + }, + LocalizedLogEntry { + entry: LogEntry { + address: Default::default(), + topics: vec![], + data: vec![4] + }, + block_hash: b2_hash, + block_number: b2_number, + transaction_hash: tx_hash3, + transaction_index: 0, + transaction_log_index: 0, + log_index: 0, + } + ] + ); + assert_eq!( + logs2, + vec![LocalizedLogEntry { + entry: LogEntry { + address: Default::default(), + topics: vec![], + data: vec![4] + }, + block_hash: b2_hash, + block_number: b2_number, + transaction_hash: tx_hash3, + transaction_index: 0, + transaction_log_index: 0, + log_index: 0, + }] + ); + assert_eq!( + logs3, + vec![LocalizedLogEntry { + entry: LogEntry { + address: Default::default(), + topics: vec![], + data: vec![5] + }, + block_hash: b3_hash, + block_number: b3_number, + transaction_hash: tx_hash4, + transaction_index: 0, + transaction_log_index: 0, + log_index: 0, + }] + ); + } + + #[test] + fn test_bloom_filter_simple() { + let bloom_b1: Bloom = "00000020000000000000000000000000000000000000000002000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000400000000000000000000002000".into(); + + let bloom_b2: Bloom = "00000000000000000000000000000000000000000000020000001000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(); + + let bloom_ba: Bloom = "00000000000000000000000000000000000000000000020000000800000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(); + + let genesis = BlockBuilder::genesis(); + let b1 = genesis.add_block_with(|| BlockOptions { + bloom: bloom_b1.clone(), + difficulty: 9.into(), + ..Default::default() + }); + let b2 = b1.add_block_with_bloom(bloom_b2); + let b3 = b2.add_block_with_bloom(bloom_ba); + + let b1a = genesis.add_block_with_bloom(bloom_ba); + let b2a = b1a.add_block_with_bloom(bloom_ba); + + let db = new_db(); + let bc = new_chain(genesis.last().encoded(), db.clone()); + + let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5); + let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5); + assert!(blocks_b1.is_empty()); + assert!(blocks_b2.is_empty()); + + insert_block(&db, &bc, b1.last().encoded(), vec![]); + let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5); + let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5); + assert_eq!(blocks_b1, vec![1]); + assert!(blocks_b2.is_empty()); + + insert_block(&db, &bc, b2.last().encoded(), vec![]); + let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5); + let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5); + assert_eq!(blocks_b1, vec![1]); + assert_eq!(blocks_b2, vec![2]); + + // hasn't been forked yet + insert_block(&db, &bc, b1a.last().encoded(), vec![]); + let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5); + let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5); + let blocks_ba = bc.blocks_with_bloom(Some(&bloom_ba), 0, 5); + assert_eq!(blocks_b1, vec![1]); + assert_eq!(blocks_b2, vec![2]); + assert!(blocks_ba.is_empty()); + + // fork has happend + insert_block(&db, &bc, b2a.last().encoded(), vec![]); + let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5); + let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5); + let blocks_ba = bc.blocks_with_bloom(Some(&bloom_ba), 0, 5); + assert!(blocks_b1.is_empty()); + assert!(blocks_b2.is_empty()); + assert_eq!(blocks_ba, vec![1, 2]); + + // fork back + insert_block(&db, &bc, b3.last().encoded(), vec![]); + let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5); + let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5); + let blocks_ba = bc.blocks_with_bloom(Some(&bloom_ba), 0, 5); + assert_eq!(blocks_b1, vec![1]); + assert_eq!(blocks_b2, vec![2]); + assert_eq!(blocks_ba, vec![3]); + } + + #[test] + fn test_insert_unordered() { + let bloom_b1: Bloom = "00000020000000000000000000000000000000000000000002000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000400000000000000000000002000".into(); + + let bloom_b2: Bloom = "00000000000000000000000000000000000000000000020000001000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(); + + let bloom_b3: Bloom = "00000000000000000000000000000000000000000000020000000800000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(); + + let genesis = BlockBuilder::genesis(); + let b1 = genesis.add_block_with_bloom(bloom_b1); + let b2 = b1.add_block_with_bloom(bloom_b2); + let b3 = b2.add_block_with_bloom(bloom_b3); + let b1_total_difficulty = genesis.last().difficulty() + b1.last().difficulty(); + + let db = new_db(); + let bc = new_chain(genesis.last().encoded(), db.clone()); + let mut batch = db.key_value().transaction(); + bc.insert_unordered_block( + &mut batch, + b2.last().encoded(), + vec![], + Some(b1_total_difficulty), + false, + false, + ); + bc.commit(); + bc.insert_unordered_block(&mut batch, b3.last().encoded(), vec![], None, true, false); + bc.commit(); + bc.insert_unordered_block(&mut batch, b1.last().encoded(), vec![], None, false, false); + bc.commit(); + db.key_value().write(batch).unwrap(); + + assert_eq!(bc.best_block_hash(), b3.last().hash()); + assert_eq!(bc.block_hash(1).unwrap(), b1.last().hash()); + assert_eq!(bc.block_hash(2).unwrap(), b2.last().hash()); + assert_eq!(bc.block_hash(3).unwrap(), b3.last().hash()); + + let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 3); + let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 3); + let blocks_b3 = bc.blocks_with_bloom(Some(&bloom_b3), 0, 3); + + assert_eq!(blocks_b1, vec![1]); + assert_eq!(blocks_b2, vec![2]); + assert_eq!(blocks_b3, vec![3]); + } + + #[test] + fn test_best_block_update() { + let genesis = BlockBuilder::genesis(); + let next_5 = genesis.add_blocks(5); + let uncle = genesis.add_block_with_difficulty(9); + let generator = BlockGenerator::new(iter::once(next_5)); + + let db = new_db(); + { + let bc = new_chain(genesis.last().encoded(), db.clone()); + + let mut batch = db.key_value().transaction(); + // create a longer fork + for block in generator { + insert_block_batch(&mut batch, &bc, block.encoded(), vec![]); + bc.commit(); + } + + assert_eq!(bc.best_block_number(), 5); + insert_block_batch(&mut batch, &bc, uncle.last().encoded(), vec![]); + db.key_value().write(batch).unwrap(); + bc.commit(); + } + + // re-loading the blockchain should load the correct best block. + let bc = new_chain(genesis.last().encoded(), db); + assert_eq!(bc.best_block_number(), 5); + } + + #[test] + fn epoch_transitions_iter() { + use common_types::engines::epoch::Transition as EpochTransition; + + let genesis = BlockBuilder::genesis(); + let next_5 = genesis.add_blocks(5); + let uncle = genesis.add_block_with_difficulty(9); + let generator = BlockGenerator::new(iter::once(next_5)); + + let db = new_db(); + { + let bc = new_chain(genesis.last().encoded(), db.clone()); + + let mut batch = db.key_value().transaction(); + // create a longer fork + for (i, block) in generator.into_iter().enumerate() { + insert_block_batch(&mut batch, &bc, block.encoded(), vec![]); + bc.insert_epoch_transition( + &mut batch, + i as u64, + EpochTransition { + block_hash: block.hash(), + block_number: i as u64 + 1, + proof: vec![], + }, + ); + bc.commit(); + } + + assert_eq!(bc.best_block_number(), 5); + + insert_block_batch(&mut batch, &bc, uncle.last().encoded(), vec![]); + bc.insert_epoch_transition( + &mut batch, + 999, + EpochTransition { + block_hash: uncle.last().hash(), + block_number: 1, + proof: vec![], + }, + ); + + db.key_value().write(batch).unwrap(); + bc.commit(); + + // epoch 999 not in canonical chain. + assert_eq!( + bc.epoch_transitions().map(|(i, _)| i).collect::>(), + vec![0, 1, 2, 3, 4] + ); + } + + // re-loading the blockchain should load the correct best block. + let bc = new_chain(genesis.last().encoded(), db); + + assert_eq!(bc.best_block_number(), 5); + assert_eq!( + bc.epoch_transitions().map(|(i, _)| i).collect::>(), + vec![0, 1, 2, 3, 4] + ); + } + + #[test] + fn epoch_transition_for() { + use common_types::engines::epoch::Transition as EpochTransition; + + let genesis = BlockBuilder::genesis(); + let fork_7 = genesis.add_blocks_with(7, || BlockOptions { + difficulty: 9.into(), + ..Default::default() + }); + let next_10 = genesis.add_blocks(10); + let fork_generator = BlockGenerator::new(iter::once(fork_7)); + let next_generator = BlockGenerator::new(iter::once(next_10)); + + let db = new_db(); + + let bc = new_chain(genesis.last().encoded(), db.clone()); + + let mut batch = db.key_value().transaction(); + bc.insert_epoch_transition( + &mut batch, + 0, + EpochTransition { + block_hash: bc.genesis_hash(), + block_number: 0, + proof: vec![], + }, + ); + db.key_value().write(batch).unwrap(); + + // set up a chain where we have a canonical chain of 10 blocks + // and a non-canonical fork of 8 from genesis. + let fork_hash = { + for block in fork_generator { + insert_block(&db, &bc, block.encoded(), vec![]); + } + + assert_eq!(bc.best_block_number(), 7); + bc.chain_info().best_block_hash + }; + + for block in next_generator { + insert_block(&db, &bc, block.encoded(), vec![]); + } + + assert_eq!(bc.best_block_number(), 10); + + let mut batch = db.key_value().transaction(); + bc.insert_epoch_transition( + &mut batch, + 4, + EpochTransition { + block_hash: bc.block_hash(4).unwrap(), + block_number: 4, + proof: vec![], + }, + ); + db.key_value().write(batch).unwrap(); + + // blocks where the parent is one of the first 4 will be part of genesis epoch. + for i in 0..4 { + let hash = bc.block_hash(i).unwrap(); + assert_eq!(bc.epoch_transition_for(hash).unwrap().block_number, 0); + } + + // blocks where the parent is the transition at 4 or after will be + // part of that epoch. + for i in 4..11 { + let hash = bc.block_hash(i).unwrap(); + assert_eq!(bc.epoch_transition_for(hash).unwrap().block_number, 4); + } + + let fork_hashes = bc.ancestry_iter(fork_hash).unwrap().collect::>(); + assert_eq!(fork_hashes.len(), 8); + + // non-canonical fork blocks should all have genesis transition + for fork_hash in fork_hashes { + assert_eq!(bc.epoch_transition_for(fork_hash).unwrap().block_number, 0); + } + } + + #[test] + fn tree_rout_with_finalization() { + let genesis = BlockBuilder::genesis(); + let a = genesis.add_block(); + // First branch + let a1 = a.add_block_with_random_transactions(); + let a2 = a1.add_block_with_random_transactions(); + let a3 = a2.add_block_with_random_transactions(); + // Second branch + let b1 = a.add_block_with_random_transactions(); + let b2 = b1.add_block_with_random_transactions(); + + let a_hash = a.last().hash(); + let a1_hash = a1.last().hash(); + let a2_hash = a2.last().hash(); + let a3_hash = a3.last().hash(); + let b2_hash = b2.last().hash(); + + let bootstrap_chain = |blocks: Vec<&BlockBuilder>| { + let db = new_db(); + let bc = new_chain(genesis.last().encoded(), db.clone()); + let mut batch = db.key_value().transaction(); + for block in blocks { + insert_block_batch(&mut batch, &bc, block.last().encoded(), vec![]); + bc.commit(); + } + db.key_value().write(batch).unwrap(); + (db, bc) + }; + + let mark_finalized = |block_hash: H256, db: &Arc, bc: &BlockChain| { + let mut batch = db.key_value().transaction(); + bc.mark_finalized(&mut batch, block_hash).unwrap(); + bc.commit(); + db.key_value().write(batch).unwrap(); + }; + + // Case 1: fork, with finalized common ancestor + { + let (db, bc) = bootstrap_chain(vec![&a, &a1, &a2, &a3, &b1, &b2]); + assert_eq!(bc.best_block_hash(), a3_hash); + assert_eq!(bc.block_hash(2).unwrap(), a1_hash); + + mark_finalized(a_hash, &db, &bc); + assert!( + !bc.tree_route(a3_hash, b2_hash) + .unwrap() + .is_from_route_finalized + ); + assert!( + !bc.tree_route(b2_hash, a3_hash) + .unwrap() + .is_from_route_finalized + ); + } + + // Case 2: fork with a finalized block on a branch + { + let (db, bc) = bootstrap_chain(vec![&a, &a1, &a2, &a3, &b1, &b2]); + assert_eq!(bc.best_block_hash(), a3_hash); + assert_eq!(bc.block_hash(2).unwrap(), a1_hash); + + mark_finalized(a2_hash, &db, &bc); + assert!( + bc.tree_route(a3_hash, b2_hash) + .unwrap() + .is_from_route_finalized + ); + assert!( + !bc.tree_route(b2_hash, a3_hash) + .unwrap() + .is_from_route_finalized + ); + } + + // Case 3: no-fork, with a finalized block + { + let (db, bc) = bootstrap_chain(vec![&a, &a1, &a2]); + assert_eq!(bc.best_block_hash(), a2_hash); + + mark_finalized(a1_hash, &db, &bc); + assert!( + !bc.tree_route(a1_hash, a2_hash) + .unwrap() + .is_from_route_finalized + ); + assert!( + !bc.tree_route(a2_hash, a1_hash) + .unwrap() + .is_from_route_finalized + ); + } + } } diff --git a/ethcore/blockchain/src/cache.rs b/ethcore/blockchain/src/cache.rs index f17afbb27..0fa126b26 100644 --- a/ethcore/blockchain/src/cache.rs +++ b/ethcore/blockchain/src/cache.rs @@ -17,19 +17,19 @@ /// Represents blockchain's in-memory cache size in bytes. #[derive(Debug)] pub struct CacheSize { - /// Blocks cache size. - pub blocks: usize, - /// BlockDetails cache size. - pub block_details: usize, - /// Transaction addresses cache size. - pub transaction_addresses: usize, - /// Block receipts size. - pub block_receipts: usize, + /// Blocks cache size. + pub blocks: usize, + /// BlockDetails cache size. + pub block_details: usize, + /// Transaction addresses cache size. + pub transaction_addresses: usize, + /// Block receipts size. + pub block_receipts: usize, } impl CacheSize { - /// Total amount used by the cache. - pub fn total(&self) -> usize { - self.blocks + self.block_details + self.transaction_addresses + self.block_receipts - } + /// Total amount used by the cache. + pub fn total(&self) -> usize { + self.blocks + self.block_details + self.transaction_addresses + self.block_receipts + } } diff --git a/ethcore/blockchain/src/config.rs b/ethcore/blockchain/src/config.rs index 8cd84b593..b09394015 100644 --- a/ethcore/blockchain/src/config.rs +++ b/ethcore/blockchain/src/config.rs @@ -19,17 +19,17 @@ /// Blockchain configuration. #[derive(Debug, PartialEq, Clone)] pub struct Config { - /// Preferred cache size in bytes. - pub pref_cache_size: usize, - /// Maximum cache size in bytes. - pub max_cache_size: usize, + /// Preferred cache size in bytes. + pub pref_cache_size: usize, + /// Maximum cache size in bytes. + pub max_cache_size: usize, } impl Default for Config { - fn default() -> Self { - Config { - pref_cache_size: 1 << 14, - max_cache_size: 1 << 20, - } - } + fn default() -> Self { + Config { + pref_cache_size: 1 << 14, + max_cache_size: 1 << 20, + } + } } diff --git a/ethcore/blockchain/src/generator.rs b/ethcore/blockchain/src/generator.rs index e5161d409..adda9851d 100644 --- a/ethcore/blockchain/src/generator.rs +++ b/ethcore/blockchain/src/generator.rs @@ -16,14 +16,16 @@ //! Blockchain generator for tests. +use ethereum_types::{Bloom, H256, U256}; use std::collections::VecDeque; -use ethereum_types::{U256, H256, Bloom}; -use common_types::encoded; -use common_types::header::Header; -use common_types::transaction::{SignedTransaction, Transaction, Action}; -use common_types::view; -use common_types::views::BlockView; +use common_types::{ + encoded, + header::Header, + transaction::{Action, SignedTransaction, Transaction}, + view, + views::BlockView, +}; use keccak_hash::keccak; use rlp::encode; use rlp_derive::RlpEncodable; @@ -32,247 +34,260 @@ use triehash_ethereum::ordered_trie_root; /// Helper structure, used for encoding blocks. #[derive(Default, Clone, RlpEncodable)] pub struct Block { - /// Block header - pub header: Header, - /// Block transactions - pub transactions: Vec, - /// Block uncles - pub uncles: Vec
+ /// Block header + pub header: Header, + /// Block transactions + pub transactions: Vec, + /// Block uncles + pub uncles: Vec
, } impl Block { - /// Get a copy of the header - #[inline] - pub fn header(&self) -> Header { - self.header.clone() - } + /// Get a copy of the header + #[inline] + pub fn header(&self) -> Header { + self.header.clone() + } - /// Get block hash - #[inline] - pub fn hash(&self) -> H256 { - view!(BlockView, &self.encoded().raw()).header_view().hash() - } + /// Get block hash + #[inline] + pub fn hash(&self) -> H256 { + view!(BlockView, &self.encoded().raw()).header_view().hash() + } - /// Get block number - #[inline] - pub fn number(&self) -> u64 { - self.header.number() - } + /// Get block number + #[inline] + pub fn number(&self) -> u64 { + self.header.number() + } - /// Get RLP encoding of this block - #[inline] - pub fn encoded(&self) -> encoded::Block { - encoded::Block::new(encode(self)) - } + /// Get RLP encoding of this block + #[inline] + pub fn encoded(&self) -> encoded::Block { + encoded::Block::new(encode(self)) + } - /// Get block difficulty - #[inline] - pub fn difficulty(&self) -> U256 { - *self.header.difficulty() - } + /// Get block difficulty + #[inline] + pub fn difficulty(&self) -> U256 { + *self.header.difficulty() + } } /// Specify block options for generator #[derive(Debug)] pub struct BlockOptions { - /// Difficulty - pub difficulty: U256, - /// Set bloom filter - pub bloom: Bloom, - /// Transactions included in blocks - pub transactions: Vec, + /// Difficulty + pub difficulty: U256, + /// Set bloom filter + pub bloom: Bloom, + /// Transactions included in blocks + pub transactions: Vec, } impl Default for BlockOptions { - fn default() -> Self { - BlockOptions { - difficulty: 10.into(), - bloom: Bloom::default(), - transactions: Vec::new(), - } - } + fn default() -> Self { + BlockOptions { + difficulty: 10.into(), + bloom: Bloom::default(), + transactions: Vec::new(), + } + } } /// Utility to create blocks #[derive(Clone)] pub struct BlockBuilder { - blocks: VecDeque, + blocks: VecDeque, } impl BlockBuilder { - /// Create new BlockBuilder starting at genesis. - pub fn genesis() -> Self { - let mut blocks = VecDeque::with_capacity(1); - blocks.push_back(Block::default()); + /// Create new BlockBuilder starting at genesis. + pub fn genesis() -> Self { + let mut blocks = VecDeque::with_capacity(1); + blocks.push_back(Block::default()); - BlockBuilder { - blocks, - } - } + BlockBuilder { blocks } + } - /// Add new block with default options. - #[inline] - pub fn add_block(&self) -> Self { - self.add_block_with(|| BlockOptions::default()) - } + /// Add new block with default options. + #[inline] + pub fn add_block(&self) -> Self { + self.add_block_with(|| BlockOptions::default()) + } - /// Add `count` number of blocks with default options. - #[inline] - pub fn add_blocks(&self, count: usize) -> Self { - self.add_blocks_with(count, || BlockOptions::default()) - } + /// Add `count` number of blocks with default options. + #[inline] + pub fn add_blocks(&self, count: usize) -> Self { + self.add_blocks_with(count, || BlockOptions::default()) + } - /// Add block with specified options. - #[inline] - pub fn add_block_with(&self, get_metadata: T) -> Self where T: Fn() -> BlockOptions { - self.add_blocks_with(1, get_metadata) - } + /// Add block with specified options. + #[inline] + pub fn add_block_with(&self, get_metadata: T) -> Self + where + T: Fn() -> BlockOptions, + { + self.add_blocks_with(1, get_metadata) + } - /// Add a block with given difficulty - #[inline] - pub fn add_block_with_difficulty(&self, difficulty: T) -> Self where T: Into { - let difficulty = difficulty.into(); - self.add_blocks_with(1, move || BlockOptions { - difficulty, - ..Default::default() - }) - } + /// Add a block with given difficulty + #[inline] + pub fn add_block_with_difficulty(&self, difficulty: T) -> Self + where + T: Into, + { + let difficulty = difficulty.into(); + self.add_blocks_with(1, move || BlockOptions { + difficulty, + ..Default::default() + }) + } - /// Add a block with randomly generated transactions. - #[inline] - pub fn add_block_with_random_transactions(&self) -> Self { - // Maximum of ~50 transactions - let count = rand::random::() as usize / 5; - let transactions = std::iter::repeat_with(|| { - let data_len = rand::random::(); - let data = std::iter::repeat_with(|| rand::random::()) - .take(data_len as usize) - .collect::>(); - Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Create, - value: 100.into(), - data, - }.sign(&keccak("").into(), None) - }).take(count); + /// Add a block with randomly generated transactions. + #[inline] + pub fn add_block_with_random_transactions(&self) -> Self { + // Maximum of ~50 transactions + let count = rand::random::() as usize / 5; + let transactions = std::iter::repeat_with(|| { + let data_len = rand::random::(); + let data = std::iter::repeat_with(|| rand::random::()) + .take(data_len as usize) + .collect::>(); + Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Create, + value: 100.into(), + data, + } + .sign(&keccak("").into(), None) + }) + .take(count); - self.add_block_with_transactions(transactions) - } + self.add_block_with_transactions(transactions) + } - /// Add a block with given transactions. - #[inline] - pub fn add_block_with_transactions(&self, transactions: T) -> Self - where T: IntoIterator { - let transactions = transactions.into_iter().collect::>(); - self.add_blocks_with(1, || BlockOptions { - transactions: transactions.clone(), - ..Default::default() - }) - } + /// Add a block with given transactions. + #[inline] + pub fn add_block_with_transactions(&self, transactions: T) -> Self + where + T: IntoIterator, + { + let transactions = transactions.into_iter().collect::>(); + self.add_blocks_with(1, || BlockOptions { + transactions: transactions.clone(), + ..Default::default() + }) + } - /// Add a block with given bloom filter. - #[inline] - pub fn add_block_with_bloom(&self, bloom: Bloom) -> Self { - self.add_blocks_with(1, move || BlockOptions { - bloom, - ..Default::default() - }) - } + /// Add a block with given bloom filter. + #[inline] + pub fn add_block_with_bloom(&self, bloom: Bloom) -> Self { + self.add_blocks_with(1, move || BlockOptions { + bloom, + ..Default::default() + }) + } - /// Add a bunch of blocks with given metadata. - pub fn add_blocks_with(&self, count: usize, get_metadata: T) -> Self where T: Fn() -> BlockOptions { - assert!(count > 0, "There must be at least 1 block"); - let mut parent_hash = self.last().hash(); - let mut parent_number = self.last().number(); - let mut blocks = VecDeque::with_capacity(count); - for _ in 0..count { - let mut block = Block::default(); - let metadata = get_metadata(); - let block_number = parent_number + 1; - let transactions = metadata.transactions; - let transactions_root = ordered_trie_root(transactions.iter().map(rlp::encode)); + /// Add a bunch of blocks with given metadata. + pub fn add_blocks_with(&self, count: usize, get_metadata: T) -> Self + where + T: Fn() -> BlockOptions, + { + assert!(count > 0, "There must be at least 1 block"); + let mut parent_hash = self.last().hash(); + let mut parent_number = self.last().number(); + let mut blocks = VecDeque::with_capacity(count); + for _ in 0..count { + let mut block = Block::default(); + let metadata = get_metadata(); + let block_number = parent_number + 1; + let transactions = metadata.transactions; + let transactions_root = ordered_trie_root(transactions.iter().map(rlp::encode)); - block.header.set_parent_hash(parent_hash); - block.header.set_number(block_number); - block.header.set_log_bloom(metadata.bloom); - block.header.set_difficulty(metadata.difficulty); - block.header.set_transactions_root(transactions_root); - block.transactions = transactions; + block.header.set_parent_hash(parent_hash); + block.header.set_number(block_number); + block.header.set_log_bloom(metadata.bloom); + block.header.set_difficulty(metadata.difficulty); + block.header.set_transactions_root(transactions_root); + block.transactions = transactions; - parent_hash = block.hash(); - parent_number = block_number; + parent_hash = block.hash(); + parent_number = block_number; - blocks.push_back(block); - } + blocks.push_back(block); + } - BlockBuilder { - blocks, - } - } + BlockBuilder { blocks } + } - /// Get a reference to the last generated block. - #[inline] - pub fn last(&self) -> &Block { - self.blocks.back().expect("There is always at least 1 block") - } + /// Get a reference to the last generated block. + #[inline] + pub fn last(&self) -> &Block { + self.blocks + .back() + .expect("There is always at least 1 block") + } } /// Generates a blockchain from given block builders (blocks will be concatenated). #[derive(Clone)] pub struct BlockGenerator { - builders: VecDeque, + builders: VecDeque, } impl BlockGenerator { - /// Create new block generator. - pub fn new(builders: T) -> Self where T: IntoIterator { - BlockGenerator { - builders: builders.into_iter().collect(), - } - } + /// Create new block generator. + pub fn new(builders: T) -> Self + where + T: IntoIterator, + { + BlockGenerator { + builders: builders.into_iter().collect(), + } + } } impl Iterator for BlockGenerator { - type Item = Block; + type Item = Block; - fn next(&mut self) -> Option { - loop { - match self.builders.front_mut() { - Some(ref mut builder) => { - if let Some(block) = builder.blocks.pop_front() { - return Some(block); - } - }, - None => return None, - } - self.builders.pop_front(); - } - - } + fn next(&mut self) -> Option { + loop { + match self.builders.front_mut() { + Some(ref mut builder) => { + if let Some(block) = builder.blocks.pop_front() { + return Some(block); + } + } + None => return None, + } + self.builders.pop_front(); + } + } } #[cfg(test)] mod tests { - use super::{BlockBuilder, BlockOptions, BlockGenerator}; + use super::{BlockBuilder, BlockGenerator, BlockOptions}; - #[test] - fn test_block_builder() { - let genesis = BlockBuilder::genesis(); - let block_1 = genesis.add_block(); - let block_1001 = block_1.add_blocks(1000); - let block_1002 = block_1001.add_block_with(|| BlockOptions::default()); - let generator = BlockGenerator::new(vec![genesis, block_1, block_1001, block_1002]); - assert_eq!(generator.count(), 1003); - } + #[test] + fn test_block_builder() { + let genesis = BlockBuilder::genesis(); + let block_1 = genesis.add_block(); + let block_1001 = block_1.add_blocks(1000); + let block_1002 = block_1001.add_block_with(|| BlockOptions::default()); + let generator = BlockGenerator::new(vec![genesis, block_1, block_1001, block_1002]); + assert_eq!(generator.count(), 1003); + } - #[test] - fn test_block_builder_fork() { - let genesis = BlockBuilder::genesis(); - let block_10a = genesis.add_blocks(10); - let block_11b = genesis.add_blocks(11); - assert_eq!(block_10a.last().number(), 10); - assert_eq!(block_11b.last().number(), 11); - } + #[test] + fn test_block_builder_fork() { + let genesis = BlockBuilder::genesis(); + let block_10a = genesis.add_blocks(10); + let block_11b = genesis.add_blocks(11); + assert_eq!(block_10a.last().number(), 10); + assert_eq!(block_11b.last().number(), 11); + } } diff --git a/ethcore/blockchain/src/import_route.rs b/ethcore/blockchain/src/import_route.rs index 8c635b4e5..9146f8f13 100644 --- a/ethcore/blockchain/src/import_route.rs +++ b/ethcore/blockchain/src/import_route.rs @@ -16,120 +16,132 @@ //! Import route. -use ethereum_types::H256; use crate::block_info::{BlockInfo, BlockLocation}; +use ethereum_types::H256; /// Import route for newly inserted block. #[derive(Debug, PartialEq, Clone)] pub struct ImportRoute { - /// Blocks that were invalidated by new block. - pub retracted: Vec, - /// Blocks that were validated by new block. - pub enacted: Vec, - /// Blocks which are neither retracted nor enacted. - pub omitted: Vec, + /// Blocks that were invalidated by new block. + pub retracted: Vec, + /// Blocks that were validated by new block. + pub enacted: Vec, + /// Blocks which are neither retracted nor enacted. + pub omitted: Vec, } impl ImportRoute { - /// Empty import route. - pub fn none() -> Self { - ImportRoute { - retracted: vec![], - enacted: vec![], - omitted: vec![], - } - } + /// Empty import route. + pub fn none() -> Self { + ImportRoute { + retracted: vec![], + enacted: vec![], + omitted: vec![], + } + } } impl From for ImportRoute { - fn from(info: BlockInfo) -> ImportRoute { - match info.location { - BlockLocation::CanonChain => ImportRoute { - retracted: vec![], - enacted: vec![info.hash], - omitted: vec![], - }, - BlockLocation::Branch => ImportRoute { - retracted: vec![], - enacted: vec![], - omitted: vec![info.hash], - }, - BlockLocation::BranchBecomingCanonChain(mut data) => { - data.enacted.push(info.hash); - ImportRoute { - retracted: data.retracted, - enacted: data.enacted, - omitted: vec![], - } - } - } - } + fn from(info: BlockInfo) -> ImportRoute { + match info.location { + BlockLocation::CanonChain => ImportRoute { + retracted: vec![], + enacted: vec![info.hash], + omitted: vec![], + }, + BlockLocation::Branch => ImportRoute { + retracted: vec![], + enacted: vec![], + omitted: vec![info.hash], + }, + BlockLocation::BranchBecomingCanonChain(mut data) => { + data.enacted.push(info.hash); + ImportRoute { + retracted: data.retracted, + enacted: data.enacted, + omitted: vec![], + } + } + } + } } #[cfg(test)] mod tests { - use ethereum_types::{H256, U256}; - use crate::block_info::{BlockInfo, BlockLocation, BranchBecomingCanonChainData}; - use super::ImportRoute; + use super::ImportRoute; + use crate::block_info::{BlockInfo, BlockLocation, BranchBecomingCanonChainData}; + use ethereum_types::{H256, U256}; - #[test] - fn import_route_none() { - assert_eq!(ImportRoute::none(), ImportRoute { - enacted: vec![], - retracted: vec![], - omitted: vec![], - }); - } + #[test] + fn import_route_none() { + assert_eq!( + ImportRoute::none(), + ImportRoute { + enacted: vec![], + retracted: vec![], + omitted: vec![], + } + ); + } - #[test] - fn import_route_branch() { - let info = BlockInfo { - hash: H256::from(U256::from(1)), - number: 0, - total_difficulty: U256::from(0), - location: BlockLocation::Branch, - }; + #[test] + fn import_route_branch() { + let info = BlockInfo { + hash: H256::from(U256::from(1)), + number: 0, + total_difficulty: U256::from(0), + location: BlockLocation::Branch, + }; - assert_eq!(ImportRoute::from(info), ImportRoute { - retracted: vec![], - enacted: vec![], - omitted: vec![H256::from(U256::from(1))], - }); - } + assert_eq!( + ImportRoute::from(info), + ImportRoute { + retracted: vec![], + enacted: vec![], + omitted: vec![H256::from(U256::from(1))], + } + ); + } - #[test] - fn import_route_canon_chain() { - let info = BlockInfo { - hash: H256::from(U256::from(1)), - number: 0, - total_difficulty: U256::from(0), - location: BlockLocation::CanonChain, - }; + #[test] + fn import_route_canon_chain() { + let info = BlockInfo { + hash: H256::from(U256::from(1)), + number: 0, + total_difficulty: U256::from(0), + location: BlockLocation::CanonChain, + }; - assert_eq!(ImportRoute::from(info), ImportRoute { - retracted: vec![], - enacted: vec![H256::from(U256::from(1))], - omitted: vec![], - }); - } + assert_eq!( + ImportRoute::from(info), + ImportRoute { + retracted: vec![], + enacted: vec![H256::from(U256::from(1))], + omitted: vec![], + } + ); + } - #[test] - fn import_route_branch_becoming_canon_chain() { - let info = BlockInfo { - hash: H256::from(U256::from(2)), - number: 0, - total_difficulty: U256::from(0), - location: BlockLocation::BranchBecomingCanonChain(BranchBecomingCanonChainData { - ancestor: H256::from(U256::from(0)), - enacted: vec![H256::from(U256::from(1))], - retracted: vec![H256::from(U256::from(3)), H256::from(U256::from(4))], - }) - }; + #[test] + fn import_route_branch_becoming_canon_chain() { + let info = BlockInfo { + hash: H256::from(U256::from(2)), + number: 0, + total_difficulty: U256::from(0), + location: BlockLocation::BranchBecomingCanonChain(BranchBecomingCanonChainData { + ancestor: H256::from(U256::from(0)), + enacted: vec![H256::from(U256::from(1))], + retracted: vec![H256::from(U256::from(3)), H256::from(U256::from(4))], + }), + }; - assert_eq!(ImportRoute::from(info), ImportRoute { - retracted: vec![H256::from(U256::from(3)), H256::from(U256::from(4))], - enacted: vec![H256::from(U256::from(1)), H256::from(U256::from(2))], - omitted: vec![], - }); - } + assert_eq!( + ImportRoute::from(info), + ImportRoute { + retracted: vec![H256::from(U256::from(3)), H256::from(U256::from(4))], + enacted: vec![H256::from(U256::from(1)), H256::from(U256::from(2))], + omitted: vec![], + } + ); + } } diff --git a/ethcore/blockchain/src/lib.rs b/ethcore/blockchain/src/lib.rs index 3f07a6d80..3314739b9 100644 --- a/ethcore/blockchain/src/lib.rs +++ b/ethcore/blockchain/src/lib.rs @@ -28,10 +28,12 @@ mod update; pub mod generator; -pub use self::blockchain::{BlockProvider, BlockChain, BlockChainDB, BlockChainDBHandler}; -pub use self::cache::CacheSize; -pub use self::config::Config; -pub use self::import_route::ImportRoute; -pub use self::update::ExtrasInsert; -pub use ethcore_db::keys::{BlockReceipts, BlockDetails, TransactionAddress, BlockNumberKey}; +pub use self::{ + blockchain::{BlockChain, BlockChainDB, BlockChainDBHandler, BlockProvider}, + cache::CacheSize, + config::Config, + import_route::ImportRoute, + update::ExtrasInsert, +}; pub use common_types::tree_route::TreeRoute; +pub use ethcore_db::keys::{BlockDetails, BlockNumberKey, BlockReceipts, TransactionAddress}; diff --git a/ethcore/blockchain/src/update.rs b/ethcore/blockchain/src/update.rs index 959f55fdf..0adcd7183 100644 --- a/ethcore/blockchain/src/update.rs +++ b/ethcore/blockchain/src/update.rs @@ -16,36 +16,34 @@ use std::collections::HashMap; -use common_types::BlockNumber; -use common_types::encoded::Block; -use common_types::engines::ForkChoice; +use common_types::{encoded::Block, engines::ForkChoice, BlockNumber}; use ethcore_db::keys::{BlockDetails, BlockReceipts, TransactionAddress}; -use ethereum_types::{H256, Bloom}; +use ethereum_types::{Bloom, H256}; use crate::block_info::BlockInfo; /// Block extras update info. pub struct ExtrasUpdate { - /// Block info. - pub info: BlockInfo, - /// Current block uncompressed rlp bytes - pub block: Block, - /// Modified block hashes. - pub block_hashes: HashMap, - /// Modified block details. - pub block_details: HashMap, - /// Modified block receipts. - pub block_receipts: HashMap, - /// Modified blocks blooms. - pub blocks_blooms: Option<(u64, Vec)>, - /// Modified transaction addresses (None signifies removed transactions). - pub transactions_addresses: HashMap>, + /// Block info. + pub info: BlockInfo, + /// Current block uncompressed rlp bytes + pub block: Block, + /// Modified block hashes. + pub block_hashes: HashMap, + /// Modified block details. + pub block_details: HashMap, + /// Modified block receipts. + pub block_receipts: HashMap, + /// Modified blocks blooms. + pub blocks_blooms: Option<(u64, Vec)>, + /// Modified transaction addresses (None signifies removed transactions). + pub transactions_addresses: HashMap>, } /// Extra information in block insertion. pub struct ExtrasInsert { - /// The primitive fork choice before applying finalization rules. - pub fork_choice: ForkChoice, - /// Is the inserted block considered finalized. - pub is_finalized: bool, + /// The primitive fork choice before applying finalization rules. + pub fork_choice: ForkChoice, + /// Is the inserted block considered finalized. + pub is_finalized: bool, } diff --git a/ethcore/builtin/src/lib.rs b/ethcore/builtin/src/lib.rs index f03e85d0f..e2155e1b1 100644 --- a/ethcore/builtin/src/lib.rs +++ b/ethcore/builtin/src/lib.rs @@ -19,35 +19,35 @@ #![warn(missing_docs)] use std::{ - cmp::{max, min}, - collections::BTreeMap, - convert::{TryFrom, TryInto}, - io::{self, Read, Cursor}, - mem::size_of, - str::FromStr + cmp::{max, min}, + collections::BTreeMap, + convert::{TryFrom, TryInto}, + io::{self, Cursor, Read}, + mem::size_of, + str::FromStr, }; use byteorder::{BigEndian, LittleEndian, ReadBytesExt}; +use eip_152::compress; use ethereum_types::{H256, U256}; use ethjson; -use ethkey::{Signature, recover as ec_recover}; -use eip_152::compress; +use ethkey::{recover as ec_recover, Signature}; use keccak_hash::keccak; -use log::{warn, trace}; -use num::{BigUint, Zero, One}; +use log::{trace, warn}; +use num::{BigUint, One, Zero}; use parity_bytes::BytesRef; use parity_crypto::digest; /// Native implementation of a built-in contract. pub trait Implementation: Send + Sync { - /// execute this built-in on the given input, writing to the given output. - fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str>; + /// execute this built-in on the given input, writing to the given output. + fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str>; } /// A gas pricing scheme for built-in contracts. trait Pricer: Send + Sync { - /// The gas cost of running this built-in for the given input data at block number `at` - fn cost(&self, input: &[u8]) -> U256; + /// The gas cost of running this built-in for the given input data at block number `at` + fn cost(&self, input: &[u8]) -> U256; } /// Pricing for the Blake2 compression function (aka "F"). @@ -56,155 +56,164 @@ trait Pricer: Send + Sync { pub type Blake2FPricer = u64; impl Pricer for Blake2FPricer { - fn cost(&self, input: &[u8]) -> U256 { - const FOUR: usize = std::mem::size_of::(); - // Returning zero if the conversion fails is fine because `execute()` will check the length - // and bail with the appropriate error. - if input.len() < FOUR { - return U256::zero(); - } - let (rounds_bytes, _) = input.split_at(FOUR); - let rounds = u32::from_be_bytes(rounds_bytes.try_into().unwrap_or([0u8; 4])); - U256::from(*self as u64 * rounds as u64) - } + fn cost(&self, input: &[u8]) -> U256 { + const FOUR: usize = std::mem::size_of::(); + // Returning zero if the conversion fails is fine because `execute()` will check the length + // and bail with the appropriate error. + if input.len() < FOUR { + return U256::zero(); + } + let (rounds_bytes, _) = input.split_at(FOUR); + let rounds = u32::from_be_bytes(rounds_bytes.try_into().unwrap_or([0u8; 4])); + U256::from(*self as u64 * rounds as u64) + } } /// Pricing model #[derive(Debug)] enum Pricing { - AltBn128Pairing(AltBn128PairingPricer), - AltBn128ConstOperations(AltBn128ConstOperations), - Blake2F(Blake2FPricer), - Linear(Linear), - Modexp(ModexpPricer), + AltBn128Pairing(AltBn128PairingPricer), + AltBn128ConstOperations(AltBn128ConstOperations), + Blake2F(Blake2FPricer), + Linear(Linear), + Modexp(ModexpPricer), } impl Pricer for Pricing { - fn cost(&self, input: &[u8]) -> U256 { - match self { - Pricing::AltBn128Pairing(inner) => inner.cost(input), - Pricing::AltBn128ConstOperations(inner) => inner.cost(input), - Pricing::Blake2F(inner) => inner.cost(input), - Pricing::Linear(inner) => inner.cost(input), - Pricing::Modexp(inner) => inner.cost(input), - } - } + fn cost(&self, input: &[u8]) -> U256 { + match self { + Pricing::AltBn128Pairing(inner) => inner.cost(input), + Pricing::AltBn128ConstOperations(inner) => inner.cost(input), + Pricing::Blake2F(inner) => inner.cost(input), + Pricing::Linear(inner) => inner.cost(input), + Pricing::Modexp(inner) => inner.cost(input), + } + } } /// A linear pricing model. This computes a price using a base cost and a cost per-word. #[derive(Debug)] struct Linear { - base: u64, - word: u64, + base: u64, + word: u64, } /// A special pricing model for modular exponentiation. #[derive(Debug)] struct ModexpPricer { - divisor: u64, + divisor: u64, } impl Pricer for Linear { - fn cost(&self, input: &[u8]) -> U256 { - U256::from(self.base) + U256::from(self.word) * U256::from((input.len() + 31) / 32) - } + fn cost(&self, input: &[u8]) -> U256 { + U256::from(self.base) + U256::from(self.word) * U256::from((input.len() + 31) / 32) + } } /// alt_bn128 pairing price #[derive(Debug, Copy, Clone)] struct AltBn128PairingPrice { - base: u64, - pair: u64, + base: u64, + pair: u64, } /// alt_bn128_pairing pricing model. This computes a price using a base cost and a cost per pair. #[derive(Debug)] struct AltBn128PairingPricer { - price: AltBn128PairingPrice, + price: AltBn128PairingPrice, } /// Pricing for constant alt_bn128 operations (ECADD and ECMUL) #[derive(Debug, Copy, Clone)] pub struct AltBn128ConstOperations { - /// Fixed price. - pub price: u64, + /// Fixed price. + pub price: u64, } impl Pricer for AltBn128ConstOperations { - fn cost(&self, _input: &[u8]) -> U256 { - self.price.into() - } + fn cost(&self, _input: &[u8]) -> U256 { + self.price.into() + } } impl Pricer for AltBn128PairingPricer { - fn cost(&self, input: &[u8]) -> U256 { - U256::from(self.price.base) + U256::from(self.price.pair) * U256::from(input.len() / 192) - } + fn cost(&self, input: &[u8]) -> U256 { + U256::from(self.price.base) + U256::from(self.price.pair) * U256::from(input.len() / 192) + } } impl Pricer for ModexpPricer { - fn cost(&self, input: &[u8]) -> U256 { - let mut reader = input.chain(io::repeat(0)); - let mut buf = [0; 32]; + fn cost(&self, input: &[u8]) -> U256 { + let mut reader = input.chain(io::repeat(0)); + let mut buf = [0; 32]; - // read lengths as U256 here for accurate gas calculation. - let mut read_len = || { - reader.read_exact(&mut buf[..]).expect("reading from zero-extended memory cannot fail; qed"); - U256::from(H256::from_slice(&buf[..])) - }; - let base_len = read_len(); - let exp_len = read_len(); - let mod_len = read_len(); + // read lengths as U256 here for accurate gas calculation. + let mut read_len = || { + reader + .read_exact(&mut buf[..]) + .expect("reading from zero-extended memory cannot fail; qed"); + U256::from(H256::from_slice(&buf[..])) + }; + let base_len = read_len(); + let exp_len = read_len(); + let mod_len = read_len(); - if mod_len.is_zero() && base_len.is_zero() { - return U256::zero() - } + if mod_len.is_zero() && base_len.is_zero() { + return U256::zero(); + } - let max_len = U256::from(u32::max_value() / 2); - if base_len > max_len || mod_len > max_len || exp_len > max_len { - return U256::max_value(); - } - let (base_len, exp_len, mod_len) = (base_len.low_u64(), exp_len.low_u64(), mod_len.low_u64()); + let max_len = U256::from(u32::max_value() / 2); + if base_len > max_len || mod_len > max_len || exp_len > max_len { + return U256::max_value(); + } + let (base_len, exp_len, mod_len) = + (base_len.low_u64(), exp_len.low_u64(), mod_len.low_u64()); - let m = max(mod_len, base_len); - // read fist 32-byte word of the exponent. - let exp_low = if base_len + 96 >= input.len() as u64 { - U256::zero() - } else { - buf.iter_mut().for_each(|b| *b = 0); - let mut reader = input[(96 + base_len as usize)..].chain(io::repeat(0)); - let len = min(exp_len, 32) as usize; - reader.read_exact(&mut buf[(32 - len)..]).expect("reading from zero-extended memory cannot fail; qed"); - U256::from(H256::from_slice(&buf[..])) - }; + let m = max(mod_len, base_len); + // read fist 32-byte word of the exponent. + let exp_low = if base_len + 96 >= input.len() as u64 { + U256::zero() + } else { + buf.iter_mut().for_each(|b| *b = 0); + let mut reader = input[(96 + base_len as usize)..].chain(io::repeat(0)); + let len = min(exp_len, 32) as usize; + reader + .read_exact(&mut buf[(32 - len)..]) + .expect("reading from zero-extended memory cannot fail; qed"); + U256::from(H256::from_slice(&buf[..])) + }; - let adjusted_exp_len = Self::adjusted_exp_len(exp_len, exp_low); + let adjusted_exp_len = Self::adjusted_exp_len(exp_len, exp_low); - let (gas, overflow) = Self::mult_complexity(m).overflowing_mul(max(adjusted_exp_len, 1)); - if overflow { - return U256::max_value(); - } - (gas / self.divisor as u64).into() - } + let (gas, overflow) = Self::mult_complexity(m).overflowing_mul(max(adjusted_exp_len, 1)); + if overflow { + return U256::max_value(); + } + (gas / self.divisor as u64).into() + } } impl ModexpPricer { - fn adjusted_exp_len(len: u64, exp_low: U256) -> u64 { - let bit_index = if exp_low.is_zero() { 0 } else { (255 - exp_low.leading_zeros()) as u64 }; - if len <= 32 { - bit_index - } else { - 8 * (len - 32) + bit_index - } - } + fn adjusted_exp_len(len: u64, exp_low: U256) -> u64 { + let bit_index = if exp_low.is_zero() { + 0 + } else { + (255 - exp_low.leading_zeros()) as u64 + }; + if len <= 32 { + bit_index + } else { + 8 * (len - 32) + bit_index + } + } - fn mult_complexity(x: u64) -> u64 { - match x { - x if x <= 64 => x * x, - x if x <= 1024 => (x * x) / 4 + 96 * x - 3072, - x => (x * x) / 16 + 480 * x - 199_680, - } - } + fn mult_complexity(x: u64) -> u64 { + match x { + x if x <= 64 => x * x, + x if x <= 1024 => (x * x) / 4 + 96 * x - 3072, + x => (x * x) / 16 + 480 * x - 199_680, + } + } } /// Pricing scheme, execution definition, and activation block for a built-in contract. @@ -212,150 +221,146 @@ impl ModexpPricer { /// Call `cost` to compute cost for the given input, `execute` to execute the contract /// on the given input, and `is_active` to determine whether the contract is active. pub struct Builtin { - pricer: BTreeMap, - native: EthereumBuiltin, + pricer: BTreeMap, + native: EthereumBuiltin, } impl Builtin { - /// Simple forwarder for cost. - /// - /// Return the cost of the most recently activated pricer at the current block number. - /// - /// If no pricer is actived `zero` is returned - /// - /// If multiple `activation_at` has the same block number the last one is used - /// (follows `BTreeMap` semantics). - #[inline] - pub fn cost(&self, input: &[u8], at: u64) -> U256 { - if let Some((_, pricer)) = self.pricer.range(0..=at).last() { - pricer.cost(input) - } else { - U256::zero() - } - } + /// Simple forwarder for cost. + /// + /// Return the cost of the most recently activated pricer at the current block number. + /// + /// If no pricer is actived `zero` is returned + /// + /// If multiple `activation_at` has the same block number the last one is used + /// (follows `BTreeMap` semantics). + #[inline] + pub fn cost(&self, input: &[u8], at: u64) -> U256 { + if let Some((_, pricer)) = self.pricer.range(0..=at).last() { + pricer.cost(input) + } else { + U256::zero() + } + } - /// Simple forwarder for execute. - #[inline] - pub fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { - self.native.execute(input, output) - } + /// Simple forwarder for execute. + #[inline] + pub fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { + self.native.execute(input, output) + } - /// Whether the builtin is activated at the given block number. - #[inline] - pub fn is_active(&self, at: u64) -> bool { - self.pricer.range(0..=at).last().is_some() - } + /// Whether the builtin is activated at the given block number. + #[inline] + pub fn is_active(&self, at: u64) -> bool { + self.pricer.range(0..=at).last().is_some() + } } impl TryFrom for Builtin { - type Error = String; + type Error = String; - fn try_from(b: ethjson::spec::builtin::Builtin) -> Result { - let native = EthereumBuiltin::from_str(&b.name)?; - let mut pricer = BTreeMap::new(); + fn try_from(b: ethjson::spec::builtin::Builtin) -> Result { + let native = EthereumBuiltin::from_str(&b.name)?; + let mut pricer = BTreeMap::new(); - for (activate_at, p) in b.pricing { - pricer.insert(activate_at, p.price.into()); - } + for (activate_at, p) in b.pricing { + pricer.insert(activate_at, p.price.into()); + } - Ok(Self { pricer, native }) - } + Ok(Self { pricer, native }) + } } impl From for Pricing { - fn from(pricing: ethjson::spec::builtin::Pricing) -> Self { - match pricing { - ethjson::spec::builtin::Pricing::Blake2F { gas_per_round } => { - Pricing::Blake2F(gas_per_round) - } - ethjson::spec::builtin::Pricing::Linear(linear) => { - Pricing::Linear(Linear { - base: linear.base, - word: linear.word, - }) - } - ethjson::spec::builtin::Pricing::Modexp(exp) => { - Pricing::Modexp(ModexpPricer { - divisor: if exp.divisor == 0 { - warn!(target: "builtin", "Zero modexp divisor specified. Falling back to default: 10."); - 10 - } else { - exp.divisor - } - }) - } - ethjson::spec::builtin::Pricing::AltBn128Pairing(pricer) => { - Pricing::AltBn128Pairing(AltBn128PairingPricer { - price: AltBn128PairingPrice { - base: pricer.base, - pair: pricer.pair, - }, - }) - } - ethjson::spec::builtin::Pricing::AltBn128ConstOperations(pricer) => { - Pricing::AltBn128ConstOperations(AltBn128ConstOperations { - price: pricer.price - }) - } - } - } + fn from(pricing: ethjson::spec::builtin::Pricing) -> Self { + match pricing { + ethjson::spec::builtin::Pricing::Blake2F { gas_per_round } => { + Pricing::Blake2F(gas_per_round) + } + ethjson::spec::builtin::Pricing::Linear(linear) => Pricing::Linear(Linear { + base: linear.base, + word: linear.word, + }), + ethjson::spec::builtin::Pricing::Modexp(exp) => Pricing::Modexp(ModexpPricer { + divisor: if exp.divisor == 0 { + warn!(target: "builtin", "Zero modexp divisor specified. Falling back to default: 10."); + 10 + } else { + exp.divisor + }, + }), + ethjson::spec::builtin::Pricing::AltBn128Pairing(pricer) => { + Pricing::AltBn128Pairing(AltBn128PairingPricer { + price: AltBn128PairingPrice { + base: pricer.base, + pair: pricer.pair, + }, + }) + } + ethjson::spec::builtin::Pricing::AltBn128ConstOperations(pricer) => { + Pricing::AltBn128ConstOperations(AltBn128ConstOperations { + price: pricer.price, + }) + } + } + } } /// Ethereum builtins: enum EthereumBuiltin { - /// The identity function - Identity(Identity), - /// ec recovery - EcRecover(EcRecover), - /// sha256 - Sha256(Sha256), - /// ripemd160 - Ripemd160(Ripemd160), - /// modexp (EIP 198) - Modexp(Modexp), - /// alt_bn128_add - Bn128Add(Bn128Add), - /// alt_bn128_mul - Bn128Mul(Bn128Mul), - /// alt_bn128_pairing - Bn128Pairing(Bn128Pairing), - /// blake2_f (The Blake2 compression function F, EIP-152) - Blake2F(Blake2F) + /// The identity function + Identity(Identity), + /// ec recovery + EcRecover(EcRecover), + /// sha256 + Sha256(Sha256), + /// ripemd160 + Ripemd160(Ripemd160), + /// modexp (EIP 198) + Modexp(Modexp), + /// alt_bn128_add + Bn128Add(Bn128Add), + /// alt_bn128_mul + Bn128Mul(Bn128Mul), + /// alt_bn128_pairing + Bn128Pairing(Bn128Pairing), + /// blake2_f (The Blake2 compression function F, EIP-152) + Blake2F(Blake2F), } impl FromStr for EthereumBuiltin { - type Err = String; + type Err = String; - fn from_str(name: &str) -> Result { - match name { - "identity" => Ok(EthereumBuiltin::Identity(Identity)), - "ecrecover" => Ok(EthereumBuiltin::EcRecover(EcRecover)), - "sha256" => Ok(EthereumBuiltin::Sha256(Sha256)), - "ripemd160" => Ok(EthereumBuiltin::Ripemd160(Ripemd160)), - "modexp" => Ok(EthereumBuiltin::Modexp(Modexp)), - "alt_bn128_add" => Ok(EthereumBuiltin::Bn128Add(Bn128Add)), - "alt_bn128_mul" => Ok(EthereumBuiltin::Bn128Mul(Bn128Mul)), - "alt_bn128_pairing" => Ok(EthereumBuiltin::Bn128Pairing(Bn128Pairing)), - "blake2_f" => Ok(EthereumBuiltin::Blake2F(Blake2F)), - _ => return Err(format!("invalid builtin name: {}", name)), - } - } + fn from_str(name: &str) -> Result { + match name { + "identity" => Ok(EthereumBuiltin::Identity(Identity)), + "ecrecover" => Ok(EthereumBuiltin::EcRecover(EcRecover)), + "sha256" => Ok(EthereumBuiltin::Sha256(Sha256)), + "ripemd160" => Ok(EthereumBuiltin::Ripemd160(Ripemd160)), + "modexp" => Ok(EthereumBuiltin::Modexp(Modexp)), + "alt_bn128_add" => Ok(EthereumBuiltin::Bn128Add(Bn128Add)), + "alt_bn128_mul" => Ok(EthereumBuiltin::Bn128Mul(Bn128Mul)), + "alt_bn128_pairing" => Ok(EthereumBuiltin::Bn128Pairing(Bn128Pairing)), + "blake2_f" => Ok(EthereumBuiltin::Blake2F(Blake2F)), + _ => return Err(format!("invalid builtin name: {}", name)), + } + } } impl Implementation for EthereumBuiltin { - fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { - match self { - EthereumBuiltin::Identity(inner) => inner.execute(input, output), - EthereumBuiltin::EcRecover(inner) => inner.execute(input, output), - EthereumBuiltin::Sha256(inner) => inner.execute(input, output), - EthereumBuiltin::Ripemd160(inner) => inner.execute(input, output), - EthereumBuiltin::Modexp(inner) => inner.execute(input, output), - EthereumBuiltin::Bn128Add(inner) => inner.execute(input, output), - EthereumBuiltin::Bn128Mul(inner) => inner.execute(input, output), - EthereumBuiltin::Bn128Pairing(inner) => inner.execute(input, output), - EthereumBuiltin::Blake2F(inner) => inner.execute(input, output), - } - } + fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { + match self { + EthereumBuiltin::Identity(inner) => inner.execute(input, output), + EthereumBuiltin::EcRecover(inner) => inner.execute(input, output), + EthereumBuiltin::Sha256(inner) => inner.execute(input, output), + EthereumBuiltin::Ripemd160(inner) => inner.execute(input, output), + EthereumBuiltin::Modexp(inner) => inner.execute(input, output), + EthereumBuiltin::Bn128Add(inner) => inner.execute(input, output), + EthereumBuiltin::Bn128Mul(inner) => inner.execute(input, output), + EthereumBuiltin::Bn128Pairing(inner) => inner.execute(input, output), + EthereumBuiltin::Blake2F(inner) => inner.execute(input, output), + } + } } #[derive(Debug)] @@ -386,1125 +391,1284 @@ pub struct Bn128Pairing; pub struct Blake2F; impl Implementation for Identity { - fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { - output.write(0, input); - Ok(()) - } + fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { + output.write(0, input); + Ok(()) + } } impl Implementation for EcRecover { - fn execute(&self, i: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { - let len = min(i.len(), 128); + fn execute(&self, i: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { + let len = min(i.len(), 128); - let mut input = [0; 128]; - input[..len].copy_from_slice(&i[..len]); + let mut input = [0; 128]; + input[..len].copy_from_slice(&i[..len]); - let hash = H256::from_slice(&input[0..32]); - let v = H256::from_slice(&input[32..64]); - let r = H256::from_slice(&input[64..96]); - let s = H256::from_slice(&input[96..128]); + let hash = H256::from_slice(&input[0..32]); + let v = H256::from_slice(&input[32..64]); + let r = H256::from_slice(&input[64..96]); + let s = H256::from_slice(&input[96..128]); - let bit = match v[31] { - 27 | 28 if v.0[..31] == [0; 31] => v[31] - 27, - _ => { return Ok(()); }, - }; + let bit = match v[31] { + 27 | 28 if v.0[..31] == [0; 31] => v[31] - 27, + _ => { + return Ok(()); + } + }; - let s = Signature::from_rsv(&r, &s, bit); - if s.is_valid() { - if let Ok(p) = ec_recover(&s, &hash) { - let r = keccak(p); - output.write(0, &[0; 12]); - output.write(12, &r[12..r.len()]); - } - } + let s = Signature::from_rsv(&r, &s, bit); + if s.is_valid() { + if let Ok(p) = ec_recover(&s, &hash) { + let r = keccak(p); + output.write(0, &[0; 12]); + output.write(12, &r[12..r.len()]); + } + } - Ok(()) - } + Ok(()) + } } impl Implementation for Sha256 { - fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { - let d = digest::sha256(input); - output.write(0, &*d); - Ok(()) - } + fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { + let d = digest::sha256(input); + output.write(0, &*d); + Ok(()) + } } impl Implementation for Blake2F { - /// Format of `input`: - /// [4 bytes for rounds][64 bytes for h][128 bytes for m][8 bytes for t_0][8 bytes for t_1][1 byte for f] - fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { - const BLAKE2_F_ARG_LEN: usize = 213; - const PROOF: &str = "Checked the length of the input above; qed"; + /// Format of `input`: + /// [4 bytes for rounds][64 bytes for h][128 bytes for m][8 bytes for t_0][8 bytes for t_1][1 byte for f] + fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { + const BLAKE2_F_ARG_LEN: usize = 213; + const PROOF: &str = "Checked the length of the input above; qed"; - if input.len() != BLAKE2_F_ARG_LEN { - trace!(target: "builtin", "input length for Blake2 F precompile should be exactly 213 bytes, was {}", input.len()); - return Err("input length for Blake2 F precompile should be exactly 213 bytes".into()) - } + if input.len() != BLAKE2_F_ARG_LEN { + trace!(target: "builtin", "input length for Blake2 F precompile should be exactly 213 bytes, was {}", input.len()); + return Err("input length for Blake2 F precompile should be exactly 213 bytes".into()); + } - let mut cursor = Cursor::new(input); - let rounds = cursor.read_u32::().expect(PROOF); + let mut cursor = Cursor::new(input); + let rounds = cursor.read_u32::().expect(PROOF); - // state vector, h - let mut h = [0u64; 8]; - for state_word in &mut h { - *state_word = cursor.read_u64::().expect(PROOF); - } + // state vector, h + let mut h = [0u64; 8]; + for state_word in &mut h { + *state_word = cursor.read_u64::().expect(PROOF); + } - // message block vector, m - let mut m = [0u64; 16]; - for msg_word in &mut m { - *msg_word = cursor.read_u64::().expect(PROOF); - } + // message block vector, m + let mut m = [0u64; 16]; + for msg_word in &mut m { + *msg_word = cursor.read_u64::().expect(PROOF); + } - // 2w-bit offset counter, t - let t = [ - cursor.read_u64::().expect(PROOF), - cursor.read_u64::().expect(PROOF), - ]; + // 2w-bit offset counter, t + let t = [ + cursor.read_u64::().expect(PROOF), + cursor.read_u64::().expect(PROOF), + ]; - // final block indicator flag, "f" - let f = match input.last() { - Some(1) => true, - Some(0) => false, - _ => { - trace!(target: "builtin", "incorrect final block indicator flag, was: {:?}", input.last()); - return Err("incorrect final block indicator flag".into()) - } - }; + // final block indicator flag, "f" + let f = match input.last() { + Some(1) => true, + Some(0) => false, + _ => { + trace!(target: "builtin", "incorrect final block indicator flag, was: {:?}", input.last()); + return Err("incorrect final block indicator flag".into()); + } + }; - compress(&mut h, m, t, f, rounds as usize); + compress(&mut h, m, t, f, rounds as usize); - let mut output_buf = [0u8; 8 * size_of::()]; - for (i, state_word) in h.iter().enumerate() { - output_buf[i*8..(i+1)*8].copy_from_slice(&state_word.to_le_bytes()); - } - output.write(0, &output_buf[..]); - Ok(()) - } + let mut output_buf = [0u8; 8 * size_of::()]; + for (i, state_word) in h.iter().enumerate() { + output_buf[i * 8..(i + 1) * 8].copy_from_slice(&state_word.to_le_bytes()); + } + output.write(0, &output_buf[..]); + Ok(()) + } } impl Implementation for Ripemd160 { - fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { - let hash = digest::ripemd160(input); - output.write(0, &[0; 12][..]); - output.write(12, &hash); - Ok(()) - } + fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { + let hash = digest::ripemd160(input); + output.write(0, &[0; 12][..]); + output.write(12, &hash); + Ok(()) + } } // calculate modexp: left-to-right binary exponentiation to keep multiplicands lower fn modexp(mut base: BigUint, exp: Vec, modulus: BigUint) -> BigUint { - const BITS_PER_DIGIT: usize = 8; + const BITS_PER_DIGIT: usize = 8; - // n^m % 0 || n^m % 1 - if modulus <= BigUint::one() { - return BigUint::zero(); - } + // n^m % 0 || n^m % 1 + if modulus <= BigUint::one() { + return BigUint::zero(); + } - // normalize exponent - let mut exp = exp.into_iter().skip_while(|d| *d == 0).peekable(); + // normalize exponent + let mut exp = exp.into_iter().skip_while(|d| *d == 0).peekable(); - // n^0 % m - if exp.peek().is_none() { - return BigUint::one(); - } + // n^0 % m + if exp.peek().is_none() { + return BigUint::one(); + } - // 0^n % m, n > 0 - if base.is_zero() { - return BigUint::zero(); - } + // 0^n % m, n > 0 + if base.is_zero() { + return BigUint::zero(); + } - base %= &modulus; + base %= &modulus; - // Fast path for base divisible by modulus. - if base.is_zero() { return BigUint::zero() } + // Fast path for base divisible by modulus. + if base.is_zero() { + return BigUint::zero(); + } - // Left-to-right binary exponentiation (Handbook of Applied Cryptography - Algorithm 14.79). - // http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf - let mut result = BigUint::one(); + // Left-to-right binary exponentiation (Handbook of Applied Cryptography - Algorithm 14.79). + // http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf + let mut result = BigUint::one(); - for digit in exp { - let mut mask = 1 << (BITS_PER_DIGIT - 1); + for digit in exp { + let mut mask = 1 << (BITS_PER_DIGIT - 1); - for _ in 0..BITS_PER_DIGIT { - result = &result * &result % &modulus; + for _ in 0..BITS_PER_DIGIT { + result = &result * &result % &modulus; - if digit & mask > 0 { - result = result * &base % &modulus; - } + if digit & mask > 0 { + result = result * &base % &modulus; + } - mask >>= 1; - } - } + mask >>= 1; + } + } - result + result } impl Implementation for Modexp { - fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { - let mut reader = input.chain(io::repeat(0)); - let mut buf = [0; 32]; + fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { + let mut reader = input.chain(io::repeat(0)); + let mut buf = [0; 32]; - // read lengths as usize. - // ignoring the first 24 bytes might technically lead us to fall out of consensus, - // but so would running out of addressable memory! - let mut read_len = |reader: &mut io::Chain<&[u8], io::Repeat>| { - reader.read_exact(&mut buf[..]).expect("reading from zero-extended memory cannot fail; qed"); - let mut len_bytes = [0u8; 8]; - len_bytes.copy_from_slice(&buf[24..]); - u64::from_be_bytes(len_bytes) as usize - }; + // read lengths as usize. + // ignoring the first 24 bytes might technically lead us to fall out of consensus, + // but so would running out of addressable memory! + let mut read_len = |reader: &mut io::Chain<&[u8], io::Repeat>| { + reader + .read_exact(&mut buf[..]) + .expect("reading from zero-extended memory cannot fail; qed"); + let mut len_bytes = [0u8; 8]; + len_bytes.copy_from_slice(&buf[24..]); + u64::from_be_bytes(len_bytes) as usize + }; - let base_len = read_len(&mut reader); - let exp_len = read_len(&mut reader); - let mod_len = read_len(&mut reader); + let base_len = read_len(&mut reader); + let exp_len = read_len(&mut reader); + let mod_len = read_len(&mut reader); - // Gas formula allows arbitrary large exp_len when base and modulus are empty, so we need to handle empty base first. - let r = if base_len == 0 && mod_len == 0 { - BigUint::zero() - } else { - // read the numbers themselves. - let mut buf = vec![0; max(mod_len, max(base_len, exp_len))]; - let mut read_num = |reader: &mut io::Chain<&[u8], io::Repeat>, len: usize| { - reader.read_exact(&mut buf[..len]).expect("reading from zero-extended memory cannot fail; qed"); - BigUint::from_bytes_be(&buf[..len]) - }; + // Gas formula allows arbitrary large exp_len when base and modulus are empty, so we need to handle empty base first. + let r = if base_len == 0 && mod_len == 0 { + BigUint::zero() + } else { + // read the numbers themselves. + let mut buf = vec![0; max(mod_len, max(base_len, exp_len))]; + let mut read_num = |reader: &mut io::Chain<&[u8], io::Repeat>, len: usize| { + reader + .read_exact(&mut buf[..len]) + .expect("reading from zero-extended memory cannot fail; qed"); + BigUint::from_bytes_be(&buf[..len]) + }; - let base = read_num(&mut reader, base_len); + let base = read_num(&mut reader, base_len); - let mut exp_buf = vec![0; exp_len]; - reader.read_exact(&mut exp_buf[..exp_len]).expect("reading from zero-extended memory cannot fail; qed"); + let mut exp_buf = vec![0; exp_len]; + reader + .read_exact(&mut exp_buf[..exp_len]) + .expect("reading from zero-extended memory cannot fail; qed"); - let modulus = read_num(&mut reader, mod_len); + let modulus = read_num(&mut reader, mod_len); - modexp(base, exp_buf, modulus) - }; + modexp(base, exp_buf, modulus) + }; - // write output to given memory, left padded and same length as the modulus. - let bytes = r.to_bytes_be(); + // write output to given memory, left padded and same length as the modulus. + let bytes = r.to_bytes_be(); - // always true except in the case of zero-length modulus, which leads to - // output of length and value 1. - if bytes.len() <= mod_len { - let res_start = mod_len - bytes.len(); - output.write(res_start, &bytes); - } + // always true except in the case of zero-length modulus, which leads to + // output of length and value 1. + if bytes.len() <= mod_len { + let res_start = mod_len - bytes.len(); + output.write(res_start, &bytes); + } - Ok(()) - } + Ok(()) + } } fn read_fr(reader: &mut io::Chain<&[u8], io::Repeat>) -> Result { - let mut buf = [0u8; 32]; + let mut buf = [0u8; 32]; - reader.read_exact(&mut buf[..]).expect("reading from zero-extended memory cannot fail; qed"); - bn::Fr::from_slice(&buf[0..32]).map_err(|_| "Invalid field element") + reader + .read_exact(&mut buf[..]) + .expect("reading from zero-extended memory cannot fail; qed"); + bn::Fr::from_slice(&buf[0..32]).map_err(|_| "Invalid field element") } fn read_point(reader: &mut io::Chain<&[u8], io::Repeat>) -> Result { - use bn::{Fq, AffineG1, G1, Group}; + use bn::{AffineG1, Fq, Group, G1}; - let mut buf = [0u8; 32]; + let mut buf = [0u8; 32]; - reader.read_exact(&mut buf[..]).expect("reading from zero-extended memory cannot fail; qed"); - let px = Fq::from_slice(&buf[0..32]).map_err(|_| "Invalid point x coordinate")?; + reader + .read_exact(&mut buf[..]) + .expect("reading from zero-extended memory cannot fail; qed"); + let px = Fq::from_slice(&buf[0..32]).map_err(|_| "Invalid point x coordinate")?; - reader.read_exact(&mut buf[..]).expect("reading from zero-extended memory cannot fail; qed"); - let py = Fq::from_slice(&buf[0..32]).map_err(|_| "Invalid point y coordinate")?; - Ok( - if px == Fq::zero() && py == Fq::zero() { - G1::zero() - } else { - AffineG1::new(px, py).map_err(|_| "Invalid curve point")?.into() - } - ) + reader + .read_exact(&mut buf[..]) + .expect("reading from zero-extended memory cannot fail; qed"); + let py = Fq::from_slice(&buf[0..32]).map_err(|_| "Invalid point y coordinate")?; + Ok(if px == Fq::zero() && py == Fq::zero() { + G1::zero() + } else { + AffineG1::new(px, py) + .map_err(|_| "Invalid curve point")? + .into() + }) } impl Implementation for Bn128Add { - // Can fail if any of the 2 points does not belong the bn128 curve - fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { - use bn::AffineG1; + // Can fail if any of the 2 points does not belong the bn128 curve + fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { + use bn::AffineG1; - let mut padded_input = input.chain(io::repeat(0)); - let p1 = read_point(&mut padded_input)?; - let p2 = read_point(&mut padded_input)?; + let mut padded_input = input.chain(io::repeat(0)); + let p1 = read_point(&mut padded_input)?; + let p2 = read_point(&mut padded_input)?; - let mut write_buf = [0u8; 64]; - if let Some(sum) = AffineG1::from_jacobian(p1 + p2) { - // point not at infinity - sum.x().to_big_endian(&mut write_buf[0..32]).expect("Cannot fail since 0..32 is 32-byte length"); - sum.y().to_big_endian(&mut write_buf[32..64]).expect("Cannot fail since 32..64 is 32-byte length"); - } - output.write(0, &write_buf); + let mut write_buf = [0u8; 64]; + if let Some(sum) = AffineG1::from_jacobian(p1 + p2) { + // point not at infinity + sum.x() + .to_big_endian(&mut write_buf[0..32]) + .expect("Cannot fail since 0..32 is 32-byte length"); + sum.y() + .to_big_endian(&mut write_buf[32..64]) + .expect("Cannot fail since 32..64 is 32-byte length"); + } + output.write(0, &write_buf); - Ok(()) - } + Ok(()) + } } impl Implementation for Bn128Mul { - // Can fail if first paramter (bn128 curve point) does not actually belong to the curve - fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { - use bn::AffineG1; + // Can fail if first paramter (bn128 curve point) does not actually belong to the curve + fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { + use bn::AffineG1; - let mut padded_input = input.chain(io::repeat(0)); - let p = read_point(&mut padded_input)?; - let fr = read_fr(&mut padded_input)?; + let mut padded_input = input.chain(io::repeat(0)); + let p = read_point(&mut padded_input)?; + let fr = read_fr(&mut padded_input)?; - let mut write_buf = [0u8; 64]; - if let Some(sum) = AffineG1::from_jacobian(p * fr) { - // point not at infinity - sum.x().to_big_endian(&mut write_buf[0..32]).expect("Cannot fail since 0..32 is 32-byte length"); - sum.y().to_big_endian(&mut write_buf[32..64]).expect("Cannot fail since 32..64 is 32-byte length"); - } - output.write(0, &write_buf); - Ok(()) - } + let mut write_buf = [0u8; 64]; + if let Some(sum) = AffineG1::from_jacobian(p * fr) { + // point not at infinity + sum.x() + .to_big_endian(&mut write_buf[0..32]) + .expect("Cannot fail since 0..32 is 32-byte length"); + sum.y() + .to_big_endian(&mut write_buf[32..64]) + .expect("Cannot fail since 32..64 is 32-byte length"); + } + output.write(0, &write_buf); + Ok(()) + } } impl Implementation for Bn128Pairing { - /// Can fail if: - /// - input length is not a multiple of 192 - /// - any of odd points does not belong to bn128 curve - /// - any of even points does not belong to the twisted bn128 curve over the field F_p^2 = F_p[i] / (i^2 + 1) - fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { - if input.len() % 192 != 0 { - return Err("Invalid input length, must be multiple of 192 (3 * (32*2))".into()) - } + /// Can fail if: + /// - input length is not a multiple of 192 + /// - any of odd points does not belong to bn128 curve + /// - any of even points does not belong to the twisted bn128 curve over the field F_p^2 = F_p[i] / (i^2 + 1) + fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { + if input.len() % 192 != 0 { + return Err("Invalid input length, must be multiple of 192 (3 * (32*2))".into()); + } - if let Err(err) = self.execute_with_error(input, output) { - trace!(target: "builtin", "Pairing error: {:?}", err); - return Err(err) - } - Ok(()) - } + if let Err(err) = self.execute_with_error(input, output) { + trace!(target: "builtin", "Pairing error: {:?}", err); + return Err(err); + } + Ok(()) + } } impl Bn128Pairing { - fn execute_with_error(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { - use bn::{AffineG1, AffineG2, Fq, Fq2, pairing, G1, G2, Gt, Group}; + fn execute_with_error(&self, input: &[u8], output: &mut BytesRef) -> Result<(), &'static str> { + use bn::{pairing, AffineG1, AffineG2, Fq, Fq2, Group, Gt, G1, G2}; - let ret_val = if input.is_empty() { - U256::one() - } else { - // (a, b_a, b_b - each 64-byte affine coordinates) - let elements = input.len() / 192; - let mut vals = Vec::new(); - for idx in 0..elements { - let a_x = Fq::from_slice(&input[idx*192..idx*192+32]) - .map_err(|_| "Invalid a argument x coordinate")?; + let ret_val = if input.is_empty() { + U256::one() + } else { + // (a, b_a, b_b - each 64-byte affine coordinates) + let elements = input.len() / 192; + let mut vals = Vec::new(); + for idx in 0..elements { + let a_x = Fq::from_slice(&input[idx * 192..idx * 192 + 32]) + .map_err(|_| "Invalid a argument x coordinate")?; - let a_y = Fq::from_slice(&input[idx*192+32..idx*192+64]) - .map_err(|_| "Invalid a argument y coordinate")?; + let a_y = Fq::from_slice(&input[idx * 192 + 32..idx * 192 + 64]) + .map_err(|_| "Invalid a argument y coordinate")?; - let b_a_y = Fq::from_slice(&input[idx*192+64..idx*192+96]) - .map_err(|_| "Invalid b argument imaginary coeff x coordinate")?; + let b_a_y = Fq::from_slice(&input[idx * 192 + 64..idx * 192 + 96]) + .map_err(|_| "Invalid b argument imaginary coeff x coordinate")?; - let b_a_x = Fq::from_slice(&input[idx*192+96..idx*192+128]) - .map_err(|_| "Invalid b argument imaginary coeff y coordinate")?; + let b_a_x = Fq::from_slice(&input[idx * 192 + 96..idx * 192 + 128]) + .map_err(|_| "Invalid b argument imaginary coeff y coordinate")?; - let b_b_y = Fq::from_slice(&input[idx*192+128..idx*192+160]) - .map_err(|_| "Invalid b argument real coeff x coordinate")?; + let b_b_y = Fq::from_slice(&input[idx * 192 + 128..idx * 192 + 160]) + .map_err(|_| "Invalid b argument real coeff x coordinate")?; - let b_b_x = Fq::from_slice(&input[idx*192+160..idx*192+192]) - .map_err(|_| "Invalid b argument real coeff y coordinate")?; + let b_b_x = Fq::from_slice(&input[idx * 192 + 160..idx * 192 + 192]) + .map_err(|_| "Invalid b argument real coeff y coordinate")?; - let b_a = Fq2::new(b_a_x, b_a_y); - let b_b = Fq2::new(b_b_x, b_b_y); - let b = if b_a.is_zero() && b_b.is_zero() { - G2::zero() - } else { - G2::from(AffineG2::new(b_a, b_b).map_err(|_| "Invalid b argument - not on curve")?) - }; - let a = if a_x.is_zero() && a_y.is_zero() { - G1::zero() - } else { - G1::from(AffineG1::new(a_x, a_y).map_err(|_| "Invalid a argument - not on curve")?) - }; - vals.push((a, b)); - }; + let b_a = Fq2::new(b_a_x, b_a_y); + let b_b = Fq2::new(b_b_x, b_b_y); + let b = if b_a.is_zero() && b_b.is_zero() { + G2::zero() + } else { + G2::from( + AffineG2::new(b_a, b_b).map_err(|_| "Invalid b argument - not on curve")?, + ) + }; + let a = if a_x.is_zero() && a_y.is_zero() { + G1::zero() + } else { + G1::from( + AffineG1::new(a_x, a_y).map_err(|_| "Invalid a argument - not on curve")?, + ) + }; + vals.push((a, b)); + } - let mul = vals.into_iter().fold(Gt::one(), |s, (a, b)| s * pairing(a, b)); + let mul = vals + .into_iter() + .fold(Gt::one(), |s, (a, b)| s * pairing(a, b)); - if mul == Gt::one() { - U256::one() - } else { - U256::zero() - } - }; + if mul == Gt::one() { + U256::one() + } else { + U256::zero() + } + }; - let mut buf = [0u8; 32]; - ret_val.to_big_endian(&mut buf); - output.write(0, &buf); + let mut buf = [0u8; 32]; + ret_val.to_big_endian(&mut buf); + output.write(0, &buf); - Ok(()) - } + Ok(()) + } } #[cfg(test)] mod tests { - use std::convert::TryFrom; - use ethereum_types::U256; - use ethjson::spec::builtin::{ - Builtin as JsonBuiltin, Linear as JsonLinearPricing, - PricingAt, AltBn128Pairing as JsonAltBn128PairingPricing, Pricing as JsonPricing, - }; - use hex_literal::hex; - use macros::map; - use num::{BigUint, Zero, One}; - use parity_bytes::BytesRef; - use super::{ - BTreeMap, Builtin, EthereumBuiltin, FromStr, Implementation, Linear, - ModexpPricer, modexp as me, Pricing - }; + use super::{ + modexp as me, BTreeMap, Builtin, EthereumBuiltin, FromStr, Implementation, Linear, + ModexpPricer, Pricing, + }; + use ethereum_types::U256; + use ethjson::spec::builtin::{ + AltBn128Pairing as JsonAltBn128PairingPricing, Builtin as JsonBuiltin, + Linear as JsonLinearPricing, Pricing as JsonPricing, PricingAt, + }; + use hex_literal::hex; + use macros::map; + use num::{BigUint, One, Zero}; + use parity_bytes::BytesRef; + use std::convert::TryFrom; - #[test] - fn blake2f_cost() { - let f = Builtin { - pricer: map![0 => Pricing::Blake2F(123)], - native: EthereumBuiltin::from_str("blake2_f").unwrap(), - }; - // 5 rounds - let input = hex!("0000000548c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"); - let mut output = [0u8; 64]; - f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])).unwrap(); + #[test] + fn blake2f_cost() { + let f = Builtin { + pricer: map![0 => Pricing::Blake2F(123)], + native: EthereumBuiltin::from_str("blake2_f").unwrap(), + }; + // 5 rounds + let input = hex!("0000000548c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"); + let mut output = [0u8; 64]; + f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])) + .unwrap(); - assert_eq!(f.cost(&input[..], 0), U256::from(123*5)); - } + assert_eq!(f.cost(&input[..], 0), U256::from(123 * 5)); + } - #[test] - fn blake2f_cost_on_invalid_length() { - let f = Builtin { - pricer: map![0 => Pricing::Blake2F(123)], - native: EthereumBuiltin::from_str("blake2_f").expect("known builtin"), - }; - // invalid input (too short) - let input = hex!("00"); + #[test] + fn blake2f_cost_on_invalid_length() { + let f = Builtin { + pricer: map![0 => Pricing::Blake2F(123)], + native: EthereumBuiltin::from_str("blake2_f").expect("known builtin"), + }; + // invalid input (too short) + let input = hex!("00"); - assert_eq!(f.cost(&input[..], 0), U256::from(0)); - } + assert_eq!(f.cost(&input[..], 0), U256::from(0)); + } - #[test] - fn blake2_f_is_err_on_invalid_length() { - let blake2 = EthereumBuiltin::from_str("blake2_f").unwrap(); - // Test vector 1 and expected output from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-152.md#test-vector-1 - let input = hex!("00000c48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"); - let mut out = [0u8; 64]; + #[test] + fn blake2_f_is_err_on_invalid_length() { + let blake2 = EthereumBuiltin::from_str("blake2_f").unwrap(); + // Test vector 1 and expected output from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-152.md#test-vector-1 + let input = hex!("00000c48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"); + let mut out = [0u8; 64]; - let result = blake2.execute(&input[..], &mut BytesRef::Fixed(&mut out[..])); - assert!(result.is_err()); - assert_eq!(result.unwrap_err(), "input length for Blake2 F precompile should be exactly 213 bytes"); - } + let result = blake2.execute(&input[..], &mut BytesRef::Fixed(&mut out[..])); + assert!(result.is_err()); + assert_eq!( + result.unwrap_err(), + "input length for Blake2 F precompile should be exactly 213 bytes" + ); + } - #[test] - fn blake2_f_is_err_on_invalid_length_2() { - let blake2 = EthereumBuiltin::from_str("blake2_f").unwrap(); - // Test vector 2 and expected output from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-152.md#test-vector-2 - let input = hex!("000000000c48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"); - let mut out = [0u8; 64]; + #[test] + fn blake2_f_is_err_on_invalid_length_2() { + let blake2 = EthereumBuiltin::from_str("blake2_f").unwrap(); + // Test vector 2 and expected output from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-152.md#test-vector-2 + let input = hex!("000000000c48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"); + let mut out = [0u8; 64]; - let result = blake2.execute(&input[..], &mut BytesRef::Fixed(&mut out[..])); - assert!(result.is_err()); - assert_eq!(result.unwrap_err(), "input length for Blake2 F precompile should be exactly 213 bytes"); - } + let result = blake2.execute(&input[..], &mut BytesRef::Fixed(&mut out[..])); + assert!(result.is_err()); + assert_eq!( + result.unwrap_err(), + "input length for Blake2 F precompile should be exactly 213 bytes" + ); + } - #[test] - fn blake2_f_is_err_on_bad_finalization_flag() { - let blake2 = EthereumBuiltin::from_str("blake2_f").unwrap(); - // Test vector 3 and expected output from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-152.md#test-vector-3 - let input = hex!("0000000c48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000002"); - let mut out = [0u8; 64]; + #[test] + fn blake2_f_is_err_on_bad_finalization_flag() { + let blake2 = EthereumBuiltin::from_str("blake2_f").unwrap(); + // Test vector 3 and expected output from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-152.md#test-vector-3 + let input = hex!("0000000c48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000002"); + let mut out = [0u8; 64]; - let result = blake2.execute(&input[..], &mut BytesRef::Fixed(&mut out[..])); - assert!(result.is_err()); - assert_eq!(result.unwrap_err(), "incorrect final block indicator flag"); - } + let result = blake2.execute(&input[..], &mut BytesRef::Fixed(&mut out[..])); + assert!(result.is_err()); + assert_eq!(result.unwrap_err(), "incorrect final block indicator flag"); + } - #[test] - fn blake2_f_zero_rounds_is_ok_test_vector_4() { - let blake2 = EthereumBuiltin::from_str("blake2_f").unwrap(); - // Test vector 4 and expected output from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-152.md#test-vector-4 - let input = hex!("0000000048c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"); - let expected = hex!("08c9bcf367e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d282e6ad7f520e511f6c3e2b8c68059b9442be0454267ce079217e1319cde05b"); - let mut output = [0u8; 64]; - blake2.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])).unwrap(); - assert_eq!(&output[..], &expected[..]); - } + #[test] + fn blake2_f_zero_rounds_is_ok_test_vector_4() { + let blake2 = EthereumBuiltin::from_str("blake2_f").unwrap(); + // Test vector 4 and expected output from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-152.md#test-vector-4 + let input = hex!("0000000048c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"); + let expected = hex!("08c9bcf367e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d282e6ad7f520e511f6c3e2b8c68059b9442be0454267ce079217e1319cde05b"); + let mut output = [0u8; 64]; + blake2 + .execute(&input[..], &mut BytesRef::Fixed(&mut output[..])) + .unwrap(); + assert_eq!(&output[..], &expected[..]); + } - #[test] - fn blake2_f_test_vector_5() { - let blake2 = EthereumBuiltin::from_str("blake2_f").unwrap(); - // Test vector 5 and expected output from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-152.md#test-vector-5 - let input = hex!("0000000c48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"); - let expected = hex!("ba80a53f981c4d0d6a2797b69f12f6e94c212f14685ac4b74b12bb6fdbffa2d17d87c5392aab792dc252d5de4533cc9518d38aa8dbf1925ab92386edd4009923"); - let mut out = [0u8; 64]; - blake2.execute(&input[..], &mut BytesRef::Fixed(&mut out[..])).unwrap(); - assert_eq!(&out[..], &expected[..]); - } + #[test] + fn blake2_f_test_vector_5() { + let blake2 = EthereumBuiltin::from_str("blake2_f").unwrap(); + // Test vector 5 and expected output from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-152.md#test-vector-5 + let input = hex!("0000000c48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"); + let expected = hex!("ba80a53f981c4d0d6a2797b69f12f6e94c212f14685ac4b74b12bb6fdbffa2d17d87c5392aab792dc252d5de4533cc9518d38aa8dbf1925ab92386edd4009923"); + let mut out = [0u8; 64]; + blake2 + .execute(&input[..], &mut BytesRef::Fixed(&mut out[..])) + .unwrap(); + assert_eq!(&out[..], &expected[..]); + } - #[test] - fn blake2_f_test_vector_6() { - let blake2 = EthereumBuiltin::from_str("blake2_f").unwrap(); - // Test vector 6 and expected output from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-152.md#test-vector-6 - let input = hex!("0000000c48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000"); - let expected = hex!("75ab69d3190a562c51aef8d88f1c2775876944407270c42c9844252c26d2875298743e7f6d5ea2f2d3e8d226039cd31b4e426ac4f2d3d666a610c2116fde4735"); - let mut out = [0u8; 64]; - blake2.execute(&input[..], &mut BytesRef::Fixed(&mut out[..])).unwrap(); - assert_eq!(&out[..], &expected[..]); - } + #[test] + fn blake2_f_test_vector_6() { + let blake2 = EthereumBuiltin::from_str("blake2_f").unwrap(); + // Test vector 6 and expected output from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-152.md#test-vector-6 + let input = hex!("0000000c48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000"); + let expected = hex!("75ab69d3190a562c51aef8d88f1c2775876944407270c42c9844252c26d2875298743e7f6d5ea2f2d3e8d226039cd31b4e426ac4f2d3d666a610c2116fde4735"); + let mut out = [0u8; 64]; + blake2 + .execute(&input[..], &mut BytesRef::Fixed(&mut out[..])) + .unwrap(); + assert_eq!(&out[..], &expected[..]); + } - #[test] - fn blake2_f_test_vector_7() { - let blake2 = EthereumBuiltin::from_str("blake2_f").unwrap(); - // Test vector 7 and expected output from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-152.md#test-vector-7 - let input = hex!("0000000148c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"); - let expected = hex!("b63a380cb2897d521994a85234ee2c181b5f844d2c624c002677e9703449d2fba551b3a8333bcdf5f2f7e08993d53923de3d64fcc68c034e717b9293fed7a421"); - let mut out = [0u8; 64]; - blake2.execute(&input[..], &mut BytesRef::Fixed(&mut out[..])).unwrap(); - assert_eq!(&out[..], &expected[..]); - } + #[test] + fn blake2_f_test_vector_7() { + let blake2 = EthereumBuiltin::from_str("blake2_f").unwrap(); + // Test vector 7 and expected output from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-152.md#test-vector-7 + let input = hex!("0000000148c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"); + let expected = hex!("b63a380cb2897d521994a85234ee2c181b5f844d2c624c002677e9703449d2fba551b3a8333bcdf5f2f7e08993d53923de3d64fcc68c034e717b9293fed7a421"); + let mut out = [0u8; 64]; + blake2 + .execute(&input[..], &mut BytesRef::Fixed(&mut out[..])) + .unwrap(); + assert_eq!(&out[..], &expected[..]); + } - #[ignore] - #[test] - fn blake2_f_test_vector_8() { - let blake2 = EthereumBuiltin::from_str("blake2_f").unwrap(); - // Test vector 8 and expected output from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-152.md#test-vector-8 - // Note this test is slow, 4294967295/0xffffffff rounds take a while. - let input = hex!("ffffffff48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"); - let expected = hex!("fc59093aafa9ab43daae0e914c57635c5402d8e3d2130eb9b3cc181de7f0ecf9b22bf99a7815ce16419e200e01846e6b5df8cc7703041bbceb571de6631d2615"); - let mut out = [0u8; 64]; - blake2.execute(&input[..], &mut BytesRef::Fixed(&mut out[..])).unwrap(); - assert_eq!(&out[..], &expected[..]); - } + #[ignore] + #[test] + fn blake2_f_test_vector_8() { + let blake2 = EthereumBuiltin::from_str("blake2_f").unwrap(); + // Test vector 8 and expected output from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-152.md#test-vector-8 + // Note this test is slow, 4294967295/0xffffffff rounds take a while. + let input = hex!("ffffffff48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"); + let expected = hex!("fc59093aafa9ab43daae0e914c57635c5402d8e3d2130eb9b3cc181de7f0ecf9b22bf99a7815ce16419e200e01846e6b5df8cc7703041bbceb571de6631d2615"); + let mut out = [0u8; 64]; + blake2 + .execute(&input[..], &mut BytesRef::Fixed(&mut out[..])) + .unwrap(); + assert_eq!(&out[..], &expected[..]); + } - #[test] - fn modexp_func() { - // n^0 % m == 1 - let mut base = BigUint::parse_bytes(b"12345", 10).unwrap(); - let mut exp = BigUint::zero(); - let mut modulus = BigUint::parse_bytes(b"789", 10).unwrap(); - assert_eq!(me(base, exp.to_bytes_be(), modulus), BigUint::one()); + #[test] + fn modexp_func() { + // n^0 % m == 1 + let mut base = BigUint::parse_bytes(b"12345", 10).unwrap(); + let mut exp = BigUint::zero(); + let mut modulus = BigUint::parse_bytes(b"789", 10).unwrap(); + assert_eq!(me(base, exp.to_bytes_be(), modulus), BigUint::one()); - // 0^n % m == 0 - base = BigUint::zero(); - exp = BigUint::parse_bytes(b"12345", 10).unwrap(); - modulus = BigUint::parse_bytes(b"789", 10).unwrap(); - assert_eq!(me(base, exp.to_bytes_be(), modulus), BigUint::zero()); + // 0^n % m == 0 + base = BigUint::zero(); + exp = BigUint::parse_bytes(b"12345", 10).unwrap(); + modulus = BigUint::parse_bytes(b"789", 10).unwrap(); + assert_eq!(me(base, exp.to_bytes_be(), modulus), BigUint::zero()); - // n^m % 1 == 0 - base = BigUint::parse_bytes(b"12345", 10).unwrap(); - exp = BigUint::parse_bytes(b"789", 10).unwrap(); - modulus = BigUint::one(); - assert_eq!(me(base, exp.to_bytes_be(), modulus), BigUint::zero()); + // n^m % 1 == 0 + base = BigUint::parse_bytes(b"12345", 10).unwrap(); + exp = BigUint::parse_bytes(b"789", 10).unwrap(); + modulus = BigUint::one(); + assert_eq!(me(base, exp.to_bytes_be(), modulus), BigUint::zero()); - // if n % d == 0, then n^m % d == 0 - base = BigUint::parse_bytes(b"12345", 10).unwrap(); - exp = BigUint::parse_bytes(b"789", 10).unwrap(); - modulus = BigUint::parse_bytes(b"15", 10).unwrap(); - assert_eq!(me(base, exp.to_bytes_be(), modulus), BigUint::zero()); + // if n % d == 0, then n^m % d == 0 + base = BigUint::parse_bytes(b"12345", 10).unwrap(); + exp = BigUint::parse_bytes(b"789", 10).unwrap(); + modulus = BigUint::parse_bytes(b"15", 10).unwrap(); + assert_eq!(me(base, exp.to_bytes_be(), modulus), BigUint::zero()); - // others - base = BigUint::parse_bytes(b"12345", 10).unwrap(); - exp = BigUint::parse_bytes(b"789", 10).unwrap(); - modulus = BigUint::parse_bytes(b"97", 10).unwrap(); - assert_eq!(me(base, exp.to_bytes_be(), modulus), BigUint::parse_bytes(b"55", 10).unwrap()); - } + // others + base = BigUint::parse_bytes(b"12345", 10).unwrap(); + exp = BigUint::parse_bytes(b"789", 10).unwrap(); + modulus = BigUint::parse_bytes(b"97", 10).unwrap(); + assert_eq!( + me(base, exp.to_bytes_be(), modulus), + BigUint::parse_bytes(b"55", 10).unwrap() + ); + } - #[test] - fn identity() { - let f = EthereumBuiltin::from_str("identity").unwrap(); - let i = [0u8, 1, 2, 3]; + #[test] + fn identity() { + let f = EthereumBuiltin::from_str("identity").unwrap(); + let i = [0u8, 1, 2, 3]; - let mut o2 = [255u8; 2]; - f.execute(&i[..], &mut BytesRef::Fixed(&mut o2[..])).expect("Builtin should not fail"); - assert_eq!(i[0..2], o2); + let mut o2 = [255u8; 2]; + f.execute(&i[..], &mut BytesRef::Fixed(&mut o2[..])) + .expect("Builtin should not fail"); + assert_eq!(i[0..2], o2); - let mut o4 = [255u8; 4]; - f.execute(&i[..], &mut BytesRef::Fixed(&mut o4[..])).expect("Builtin should not fail"); - assert_eq!(i, o4); + let mut o4 = [255u8; 4]; + f.execute(&i[..], &mut BytesRef::Fixed(&mut o4[..])) + .expect("Builtin should not fail"); + assert_eq!(i, o4); - let mut o8 = [255u8; 8]; - f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..])).expect("Builtin should not fail"); - assert_eq!(i, o8[..4]); - assert_eq!([255u8; 4], o8[4..]); - } + let mut o8 = [255u8; 8]; + f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..])) + .expect("Builtin should not fail"); + assert_eq!(i, o8[..4]); + assert_eq!([255u8; 4], o8[4..]); + } - #[test] - fn sha256() { - let f = EthereumBuiltin::from_str("sha256").unwrap(); - let i = [0u8; 0]; + #[test] + fn sha256() { + let f = EthereumBuiltin::from_str("sha256").unwrap(); + let i = [0u8; 0]; - let mut o = [255u8; 32]; - f.execute(&i[..], &mut BytesRef::Fixed(&mut o[..])).expect("Builtin should not fail"); - assert_eq!(&o[..], hex!("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")); + let mut o = [255u8; 32]; + f.execute(&i[..], &mut BytesRef::Fixed(&mut o[..])) + .expect("Builtin should not fail"); + assert_eq!( + &o[..], + hex!("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") + ); - let mut o8 = [255u8; 8]; - f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..])).expect("Builtin should not fail"); - assert_eq!(&o8[..], hex!("e3b0c44298fc1c14")); + let mut o8 = [255u8; 8]; + f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..])) + .expect("Builtin should not fail"); + assert_eq!(&o8[..], hex!("e3b0c44298fc1c14")); - let mut o34 = [255u8; 34]; - f.execute(&i[..], &mut BytesRef::Fixed(&mut o34[..])).expect("Builtin should not fail"); - assert_eq!(&o34[..], &hex!("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855ffff")[..]); + let mut o34 = [255u8; 34]; + f.execute(&i[..], &mut BytesRef::Fixed(&mut o34[..])) + .expect("Builtin should not fail"); + assert_eq!( + &o34[..], + &hex!("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855ffff")[..] + ); - let mut ov = vec![]; - f.execute(&i[..], &mut BytesRef::Flexible(&mut ov)).expect("Builtin should not fail"); - assert_eq!(&ov[..], &hex!("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")[..]); - } + let mut ov = vec![]; + f.execute(&i[..], &mut BytesRef::Flexible(&mut ov)) + .expect("Builtin should not fail"); + assert_eq!( + &ov[..], + &hex!("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")[..] + ); + } - #[test] - fn ripemd160() { - let f = EthereumBuiltin::from_str("ripemd160").unwrap(); - let i = [0u8; 0]; + #[test] + fn ripemd160() { + let f = EthereumBuiltin::from_str("ripemd160").unwrap(); + let i = [0u8; 0]; - let mut o = [255u8; 32]; - f.execute(&i[..], &mut BytesRef::Fixed(&mut o[..])).expect("Builtin should not fail"); - assert_eq!(&o[..], &hex!("0000000000000000000000009c1185a5c5e9fc54612808977ee8f548b2258d31")[..]); + let mut o = [255u8; 32]; + f.execute(&i[..], &mut BytesRef::Fixed(&mut o[..])) + .expect("Builtin should not fail"); + assert_eq!( + &o[..], + &hex!("0000000000000000000000009c1185a5c5e9fc54612808977ee8f548b2258d31")[..] + ); - let mut o8 = [255u8; 8]; - f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..])).expect("Builtin should not fail"); - assert_eq!(&o8[..], &hex!("0000000000000000")[..]); + let mut o8 = [255u8; 8]; + f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..])) + .expect("Builtin should not fail"); + assert_eq!(&o8[..], &hex!("0000000000000000")[..]); - let mut o34 = [255u8; 34]; - f.execute(&i[..], &mut BytesRef::Fixed(&mut o34[..])).expect("Builtin should not fail"); - assert_eq!(&o34[..], &hex!("0000000000000000000000009c1185a5c5e9fc54612808977ee8f548b2258d31ffff")[..]); - } + let mut o34 = [255u8; 34]; + f.execute(&i[..], &mut BytesRef::Fixed(&mut o34[..])) + .expect("Builtin should not fail"); + assert_eq!( + &o34[..], + &hex!("0000000000000000000000009c1185a5c5e9fc54612808977ee8f548b2258d31ffff")[..] + ); + } - #[test] - fn ecrecover() { - let f = EthereumBuiltin::from_str("ecrecover").unwrap(); + #[test] + fn ecrecover() { + let f = EthereumBuiltin::from_str("ecrecover").unwrap(); - let i = hex!("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03"); + let i = hex!("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03"); - let mut o = [255u8; 32]; - f.execute(&i[..], &mut BytesRef::Fixed(&mut o[..])).expect("Builtin should not fail"); - assert_eq!(&o[..], &hex!("000000000000000000000000c08b5542d177ac6686946920409741463a15dddb")[..]); + let mut o = [255u8; 32]; + f.execute(&i[..], &mut BytesRef::Fixed(&mut o[..])) + .expect("Builtin should not fail"); + assert_eq!( + &o[..], + &hex!("000000000000000000000000c08b5542d177ac6686946920409741463a15dddb")[..] + ); - let mut o8 = [255u8; 8]; - f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..])).expect("Builtin should not fail"); - assert_eq!(&o8[..], &hex!("0000000000000000")[..]); + let mut o8 = [255u8; 8]; + f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..])) + .expect("Builtin should not fail"); + assert_eq!(&o8[..], &hex!("0000000000000000")[..]); - let mut o34 = [255u8; 34]; - f.execute(&i[..], &mut BytesRef::Fixed(&mut o34[..])).expect("Builtin should not fail"); - assert_eq!(&o34[..], &hex!("000000000000000000000000c08b5542d177ac6686946920409741463a15dddbffff")[..]); + let mut o34 = [255u8; 34]; + f.execute(&i[..], &mut BytesRef::Fixed(&mut o34[..])) + .expect("Builtin should not fail"); + assert_eq!( + &o34[..], + &hex!("000000000000000000000000c08b5542d177ac6686946920409741463a15dddbffff")[..] + ); - let i_bad = hex!("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001a650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03"); - let mut o = [255u8; 32]; - f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])).expect("Builtin should not fail"); - assert_eq!(&o[..], &hex!("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")[..]); + let i_bad = hex!("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001a650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03"); + let mut o = [255u8; 32]; + f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])) + .expect("Builtin should not fail"); + assert_eq!( + &o[..], + &hex!("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")[..] + ); - let i_bad = hex!("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000000000"); - let mut o = [255u8; 32]; - f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])).expect("Builtin should not fail"); - assert_eq!(&o[..], &hex!("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")[..]); + let i_bad = hex!("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000000000"); + let mut o = [255u8; 32]; + f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])) + .expect("Builtin should not fail"); + assert_eq!( + &o[..], + &hex!("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")[..] + ); - let i_bad = hex!("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b"); - let mut o = [255u8; 32]; - f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])).expect("Builtin should not fail"); - assert_eq!(&o[..], &hex!("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")[..]); + let i_bad = hex!("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b"); + let mut o = [255u8; 32]; + f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])) + .expect("Builtin should not fail"); + assert_eq!( + &o[..], + &hex!("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")[..] + ); - let i_bad = hex!("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000000000000000000000000000000000000000000000000000000000001b"); - let mut o = [255u8; 32]; - f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])).expect("Builtin should not fail"); - assert_eq!(&o[..], &hex!("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")[..]); + let i_bad = hex!("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000000000000000000000000000000000000000000000000000000000001b"); + let mut o = [255u8; 32]; + f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])) + .expect("Builtin should not fail"); + assert_eq!( + &o[..], + &hex!("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")[..] + ); - let i_bad = hex!("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); - let mut o = [255u8; 32]; - f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])).expect("Builtin should not fail"); - assert_eq!(&o[..], &hex!("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")[..]); + let i_bad = hex!("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); + let mut o = [255u8; 32]; + f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])) + .expect("Builtin should not fail"); + assert_eq!( + &o[..], + &hex!("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")[..] + ); - // TODO: Should this (corrupted version of the above) fail rather than returning some address? - /* let i_bad = FromHex::from_hex("48173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap(); - let mut o = [255u8; 32]; - f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])); - assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);*/ - } + // TODO: Should this (corrupted version of the above) fail rather than returning some address? + /* let i_bad = FromHex::from_hex("48173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap(); + let mut o = [255u8; 32]; + f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])); + assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);*/ + } - #[test] - fn modexp() { - let f = Builtin { - pricer: map![0 => Pricing::Modexp(ModexpPricer { divisor: 20 })], - native: EthereumBuiltin::from_str("modexp").unwrap(), - }; + #[test] + fn modexp() { + let f = Builtin { + pricer: map![0 => Pricing::Modexp(ModexpPricer { divisor: 20 })], + native: EthereumBuiltin::from_str("modexp").unwrap(), + }; - // test for potential gas cost multiplication overflow - { - let input = hex!("0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000003b27bafd00000000000000000000000000000000000000000000000000000000503c8ac3"); - let expected_cost = U256::max_value(); - assert_eq!(f.cost(&input[..], 0), expected_cost); - } + // test for potential gas cost multiplication overflow + { + let input = hex!("0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000003b27bafd00000000000000000000000000000000000000000000000000000000503c8ac3"); + let expected_cost = U256::max_value(); + assert_eq!(f.cost(&input[..], 0), expected_cost); + } - // test for potential exp len overflow - { - let input = hex!(" + // test for potential exp len overflow + { + let input = hex!( + " 00000000000000000000000000000000000000000000000000000000000000ff 2a1e530000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000" - ); + ); - let mut output = vec![0u8; 32]; - let expected = hex!("0000000000000000000000000000000000000000000000000000000000000000"); - let expected_cost = U256::max_value(); + let mut output = vec![0u8; 32]; + let expected = hex!("0000000000000000000000000000000000000000000000000000000000000000"); + let expected_cost = U256::max_value(); - f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])).expect("Builtin should fail"); - assert_eq!(output, expected); - assert_eq!(f.cost(&input[..], 0), expected_cost); - } + f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])) + .expect("Builtin should fail"); + assert_eq!(output, expected); + assert_eq!(f.cost(&input[..], 0), expected_cost); + } - // fermat's little theorem example. - { - let input = hex!(" + // fermat's little theorem example. + { + let input = hex!( + " 0000000000000000000000000000000000000000000000000000000000000001 0000000000000000000000000000000000000000000000000000000000000020 0000000000000000000000000000000000000000000000000000000000000020 03 fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f" - ); + ); - let mut output = vec![0u8; 32]; - let expected = hex!("0000000000000000000000000000000000000000000000000000000000000001"); - let expected_cost = 13056; + let mut output = vec![0u8; 32]; + let expected = hex!("0000000000000000000000000000000000000000000000000000000000000001"); + let expected_cost = 13056; - f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])).expect("Builtin should not fail"); - assert_eq!(output, expected); - assert_eq!(f.cost(&input[..], 0), expected_cost.into()); - } + f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])) + .expect("Builtin should not fail"); + assert_eq!(output, expected); + assert_eq!(f.cost(&input[..], 0), expected_cost.into()); + } - // second example from EIP: zero base. - { - let input = hex!(" + // second example from EIP: zero base. + { + let input = hex!( + " 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000020 0000000000000000000000000000000000000000000000000000000000000020 fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f" - ); + ); - let mut output = vec![0u8; 32]; - let expected = hex!("0000000000000000000000000000000000000000000000000000000000000000"); - let expected_cost = 13056; + let mut output = vec![0u8; 32]; + let expected = hex!("0000000000000000000000000000000000000000000000000000000000000000"); + let expected_cost = 13056; - f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])).expect("Builtin should not fail"); - assert_eq!(output, expected); - assert_eq!(f.cost(&input[..], 0), expected_cost.into()); - } + f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])) + .expect("Builtin should not fail"); + assert_eq!(output, expected); + assert_eq!(f.cost(&input[..], 0), expected_cost.into()); + } - // another example from EIP: zero-padding - { - let input = hex!(" + // another example from EIP: zero-padding + { + let input = hex!( + " 0000000000000000000000000000000000000000000000000000000000000001 0000000000000000000000000000000000000000000000000000000000000002 0000000000000000000000000000000000000000000000000000000000000020 03 ffff 80" - ); + ); - let mut output = vec![0u8; 32]; - let expected = hex!("3b01b01ac41f2d6e917c6d6a221ce793802469026d9ab7578fa2e79e4da6aaab"); - let expected_cost = 768; + let mut output = vec![0u8; 32]; + let expected = hex!("3b01b01ac41f2d6e917c6d6a221ce793802469026d9ab7578fa2e79e4da6aaab"); + let expected_cost = 768; - f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])).expect("Builtin should not fail"); - assert_eq!(output, expected); - assert_eq!(f.cost(&input[..], 0), expected_cost.into()); - } + f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])) + .expect("Builtin should not fail"); + assert_eq!(output, expected); + assert_eq!(f.cost(&input[..], 0), expected_cost.into()); + } - // zero-length modulus. - { - let input = hex!(" + // zero-length modulus. + { + let input = hex!( + " 0000000000000000000000000000000000000000000000000000000000000001 0000000000000000000000000000000000000000000000000000000000000002 0000000000000000000000000000000000000000000000000000000000000000 03 ffff" - ); + ); - let mut output = vec![]; - let expected_cost = 0; + let mut output = vec![]; + let expected_cost = 0; - f.execute(&input[..], &mut BytesRef::Flexible(&mut output)).expect("Builtin should not fail"); - assert_eq!(output.len(), 0); // shouldn't have written any output. - assert_eq!(f.cost(&input[..], 0), expected_cost.into()); - } - } + f.execute(&input[..], &mut BytesRef::Flexible(&mut output)) + .expect("Builtin should not fail"); + assert_eq!(output.len(), 0); // shouldn't have written any output. + assert_eq!(f.cost(&input[..], 0), expected_cost.into()); + } + } - #[test] - fn bn128_add() { + #[test] + fn bn128_add() { + let f = Builtin { + pricer: map![0 => Pricing::Linear(Linear { base: 0, word: 0 })], + native: EthereumBuiltin::from_str("alt_bn128_add").unwrap(), + }; - let f = Builtin { - pricer: map![0 => Pricing::Linear(Linear { base: 0, word: 0 })], - native: EthereumBuiltin::from_str("alt_bn128_add").unwrap(), - }; - - // zero-points additions - { - let input = hex!(" + // zero-points additions + { + let input = hex!( + " 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000" - ); + ); - let mut output = vec![0u8; 64]; - let expected = hex!(" + let mut output = vec![0u8; 64]; + let expected = hex!( + " 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000" - ); + ); - f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])).expect("Builtin should not fail"); - assert_eq!(output, &expected[..]); - } + f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])) + .expect("Builtin should not fail"); + assert_eq!(output, &expected[..]); + } - // no input, should not fail - { - let mut empty = [0u8; 0]; - let input = BytesRef::Fixed(&mut empty); + // no input, should not fail + { + let mut empty = [0u8; 0]; + let input = BytesRef::Fixed(&mut empty); - let mut output = vec![0u8; 64]; - let expected = hex!(" + let mut output = vec![0u8; 64]; + let expected = hex!( + " 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000" - ); + ); - f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])).expect("Builtin should not fail"); - assert_eq!(output, &expected[..]); - } + f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])) + .expect("Builtin should not fail"); + assert_eq!(output, &expected[..]); + } - // should fail - point not on curve - { - let input = hex!(" + // should fail - point not on curve + { + let input = hex!( + " 1111111111111111111111111111111111111111111111111111111111111111 1111111111111111111111111111111111111111111111111111111111111111 1111111111111111111111111111111111111111111111111111111111111111 1111111111111111111111111111111111111111111111111111111111111111" - ); + ); - let mut output = vec![0u8; 64]; + let mut output = vec![0u8; 64]; - let res = f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])); - assert!(res.is_err(), "There should be built-in error here"); - } - } + let res = f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])); + assert!(res.is_err(), "There should be built-in error here"); + } + } - #[test] - fn bn128_mul() { - let f = Builtin { - pricer: map![0 => Pricing::Linear(Linear { base: 0, word: 0 })], - native: EthereumBuiltin::from_str("alt_bn128_mul").unwrap(), - }; + #[test] + fn bn128_mul() { + let f = Builtin { + pricer: map![0 => Pricing::Linear(Linear { base: 0, word: 0 })], + native: EthereumBuiltin::from_str("alt_bn128_mul").unwrap(), + }; - // zero-point multiplication - { - let input = hex!(" + // zero-point multiplication + { + let input = hex!( + " 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0200000000000000000000000000000000000000000000000000000000000000" - ); + ); - let mut output = vec![0u8; 64]; - let expected = hex!(" + let mut output = vec![0u8; 64]; + let expected = hex!( + " 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000" - ); + ); - f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])).expect("Builtin should not fail"); - assert_eq!(output, &expected[..]); - } + f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])) + .expect("Builtin should not fail"); + assert_eq!(output, &expected[..]); + } - // should fail - point not on curve - { - let input = hex!(" + // should fail - point not on curve + { + let input = hex!( + " 1111111111111111111111111111111111111111111111111111111111111111 1111111111111111111111111111111111111111111111111111111111111111 0f00000000000000000000000000000000000000000000000000000000000000" - ); + ); - let mut output = vec![0u8; 64]; + let mut output = vec![0u8; 64]; - let res = f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])); - assert!(res.is_err(), "There should be built-in error here"); - } - } + let res = f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])); + assert!(res.is_err(), "There should be built-in error here"); + } + } - fn builtin_pairing() -> Builtin { - Builtin { - pricer: map![0 => Pricing::Linear(Linear { base: 0, word: 0 })], - native: EthereumBuiltin::from_str("alt_bn128_pairing").unwrap(), - } - } + fn builtin_pairing() -> Builtin { + Builtin { + pricer: map![0 => Pricing::Linear(Linear { base: 0, word: 0 })], + native: EthereumBuiltin::from_str("alt_bn128_pairing").unwrap(), + } + } - fn empty_test(f: Builtin, expected: Vec) { - let mut empty = [0u8; 0]; - let input = BytesRef::Fixed(&mut empty); + fn empty_test(f: Builtin, expected: Vec) { + let mut empty = [0u8; 0]; + let input = BytesRef::Fixed(&mut empty); - let mut output = vec![0u8; expected.len()]; + let mut output = vec![0u8; expected.len()]; - f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])).expect("Builtin should not fail"); - assert_eq!(output, expected); - } + f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])) + .expect("Builtin should not fail"); + assert_eq!(output, expected); + } - fn error_test(f: Builtin, input: &[u8], msg_contains: Option<&str>) { - let mut output = vec![0u8; 64]; - let res = f.execute(input, &mut BytesRef::Fixed(&mut output[..])); - if let Some(msg) = msg_contains { - if let Err(e) = res { - if !e.contains(msg) { - panic!("There should be error containing '{}' here, but got: '{}'", msg, e); - } - } - } else { - assert!(res.is_err(), "There should be built-in error here"); - } - } + fn error_test(f: Builtin, input: &[u8], msg_contains: Option<&str>) { + let mut output = vec![0u8; 64]; + let res = f.execute(input, &mut BytesRef::Fixed(&mut output[..])); + if let Some(msg) = msg_contains { + if let Err(e) = res { + if !e.contains(msg) { + panic!( + "There should be error containing '{}' here, but got: '{}'", + msg, e + ); + } + } + } else { + assert!(res.is_err(), "There should be built-in error here"); + } + } - #[test] - fn bn128_pairing_empty() { - // should not fail, because empty input is a valid input of 0 elements - empty_test( - builtin_pairing(), - hex!("0000000000000000000000000000000000000000000000000000000000000001").to_vec(), - ); - } + #[test] + fn bn128_pairing_empty() { + // should not fail, because empty input is a valid input of 0 elements + empty_test( + builtin_pairing(), + hex!("0000000000000000000000000000000000000000000000000000000000000001").to_vec(), + ); + } - #[test] - fn bn128_pairing_notcurve() { - // should fail - point not on curve - error_test( - builtin_pairing(), - &hex!(" + #[test] + fn bn128_pairing_notcurve() { + // should fail - point not on curve + error_test( + builtin_pairing(), + &hex!( + " 1111111111111111111111111111111111111111111111111111111111111111 1111111111111111111111111111111111111111111111111111111111111111 1111111111111111111111111111111111111111111111111111111111111111 1111111111111111111111111111111111111111111111111111111111111111 1111111111111111111111111111111111111111111111111111111111111111 1111111111111111111111111111111111111111111111111111111111111111" - ), - Some("not on curve"), - ); - } + ), + Some("not on curve"), + ); + } - #[test] - fn bn128_pairing_fragmented() { - // should fail - input length is invalid - error_test( - builtin_pairing(), - &hex!(" + #[test] + fn bn128_pairing_fragmented() { + // should fail - input length is invalid + error_test( + builtin_pairing(), + &hex!( + " 1111111111111111111111111111111111111111111111111111111111111111 1111111111111111111111111111111111111111111111111111111111111111 111111111111111111111111111111" - ), - Some("Invalid input length"), - ); - } + ), + Some("Invalid input length"), + ); + } - #[test] - #[should_panic] - fn from_unknown_linear() { - let _ = EthereumBuiltin::from_str("foo").unwrap(); - } + #[test] + #[should_panic] + fn from_unknown_linear() { + let _ = EthereumBuiltin::from_str("foo").unwrap(); + } - #[test] - fn is_active() { - let pricer = Pricing::Linear(Linear { base: 10, word: 20 }); - let b = Builtin { - pricer: map![100_000 => pricer], - native: EthereumBuiltin::from_str("identity").unwrap(), - }; + #[test] + fn is_active() { + let pricer = Pricing::Linear(Linear { base: 10, word: 20 }); + let b = Builtin { + pricer: map![100_000 => pricer], + native: EthereumBuiltin::from_str("identity").unwrap(), + }; - assert!(!b.is_active(99_999)); - assert!(b.is_active(100_000)); - assert!(b.is_active(100_001)); - } + assert!(!b.is_active(99_999)); + assert!(b.is_active(100_000)); + assert!(b.is_active(100_001)); + } - #[test] - fn from_named_linear() { - let pricer = Pricing::Linear(Linear { base: 10, word: 20 }); - let b = Builtin { - pricer: map![0 => pricer], - native: EthereumBuiltin::from_str("identity").unwrap(), - }; + #[test] + fn from_named_linear() { + let pricer = Pricing::Linear(Linear { base: 10, word: 20 }); + let b = Builtin { + pricer: map![0 => pricer], + native: EthereumBuiltin::from_str("identity").unwrap(), + }; - assert_eq!(b.cost(&[0; 0], 0), U256::from(10)); - assert_eq!(b.cost(&[0; 1], 0), U256::from(30)); - assert_eq!(b.cost(&[0; 32], 0), U256::from(30)); - assert_eq!(b.cost(&[0; 33], 0), U256::from(50)); + assert_eq!(b.cost(&[0; 0], 0), U256::from(10)); + assert_eq!(b.cost(&[0; 1], 0), U256::from(30)); + assert_eq!(b.cost(&[0; 32], 0), U256::from(30)); + assert_eq!(b.cost(&[0; 33], 0), U256::from(50)); - let i = [0u8, 1, 2, 3]; - let mut o = [255u8; 4]; - b.execute(&i[..], &mut BytesRef::Fixed(&mut o[..])).expect("Builtin should not fail"); - assert_eq!(i, o); - } + let i = [0u8, 1, 2, 3]; + let mut o = [255u8; 4]; + b.execute(&i[..], &mut BytesRef::Fixed(&mut o[..])) + .expect("Builtin should not fail"); + assert_eq!(i, o); + } - #[test] - fn from_json() { - let b = Builtin::try_from(ethjson::spec::Builtin { - name: "identity".to_owned(), - pricing: map![ - 0 => PricingAt { - info: None, - price: JsonPricing::Linear(JsonLinearPricing { base: 10, word: 20 }) - } - ] - }).expect("known builtin"); + #[test] + fn from_json() { + let b = Builtin::try_from(ethjson::spec::Builtin { + name: "identity".to_owned(), + pricing: map![ + 0 => PricingAt { + info: None, + price: JsonPricing::Linear(JsonLinearPricing { base: 10, word: 20 }) + } + ], + }) + .expect("known builtin"); - assert_eq!(b.cost(&[0; 0], 0), U256::from(10)); - assert_eq!(b.cost(&[0; 1], 0), U256::from(30)); - assert_eq!(b.cost(&[0; 32], 0), U256::from(30)); - assert_eq!(b.cost(&[0; 33], 0), U256::from(50)); + assert_eq!(b.cost(&[0; 0], 0), U256::from(10)); + assert_eq!(b.cost(&[0; 1], 0), U256::from(30)); + assert_eq!(b.cost(&[0; 32], 0), U256::from(30)); + assert_eq!(b.cost(&[0; 33], 0), U256::from(50)); - let i = [0u8, 1, 2, 3]; - let mut o = [255u8; 4]; - b.execute(&i[..], &mut BytesRef::Fixed(&mut o[..])).expect("Builtin should not fail"); - assert_eq!(i, o); - } + let i = [0u8, 1, 2, 3]; + let mut o = [255u8; 4]; + b.execute(&i[..], &mut BytesRef::Fixed(&mut o[..])) + .expect("Builtin should not fail"); + assert_eq!(i, o); + } - #[test] - fn bn128_pairing_eip1108_transition() { - let b = Builtin::try_from(JsonBuiltin { - name: "alt_bn128_pairing".to_owned(), - pricing: map![ - 10 => PricingAt { - info: None, - price: JsonPricing::AltBn128Pairing(JsonAltBn128PairingPricing { - base: 100_000, - pair: 80_000, - }), - }, - 20 => PricingAt { - info: None, - price: JsonPricing::AltBn128Pairing(JsonAltBn128PairingPricing { - base: 45_000, - pair: 34_000, - }), - } - ], - }).unwrap(); + #[test] + fn bn128_pairing_eip1108_transition() { + let b = Builtin::try_from(JsonBuiltin { + name: "alt_bn128_pairing".to_owned(), + pricing: map![ + 10 => PricingAt { + info: None, + price: JsonPricing::AltBn128Pairing(JsonAltBn128PairingPricing { + base: 100_000, + pair: 80_000, + }), + }, + 20 => PricingAt { + info: None, + price: JsonPricing::AltBn128Pairing(JsonAltBn128PairingPricing { + base: 45_000, + pair: 34_000, + }), + } + ], + }) + .unwrap(); - assert_eq!(b.cost(&[0; 192 * 3], 10), U256::from(340_000), "80 000 * 3 + 100 000 == 340 000"); - assert_eq!(b.cost(&[0; 192 * 7], 20), U256::from(283_000), "34 000 * 7 + 45 000 == 283 000"); - } + assert_eq!( + b.cost(&[0; 192 * 3], 10), + U256::from(340_000), + "80 000 * 3 + 100 000 == 340 000" + ); + assert_eq!( + b.cost(&[0; 192 * 7], 20), + U256::from(283_000), + "34 000 * 7 + 45 000 == 283 000" + ); + } - #[test] - fn bn128_add_eip1108_transition() { - let b = Builtin::try_from(JsonBuiltin { - name: "alt_bn128_add".to_owned(), - pricing: map![ - 10 => PricingAt { - info: None, - price: JsonPricing::Linear(JsonLinearPricing { - base: 500, - word: 0, - }), - }, - 20 => PricingAt { - info: None, - price: JsonPricing::Linear(JsonLinearPricing { - base: 150, - word: 0, - }), - } - ], - }).unwrap(); + #[test] + fn bn128_add_eip1108_transition() { + let b = Builtin::try_from(JsonBuiltin { + name: "alt_bn128_add".to_owned(), + pricing: map![ + 10 => PricingAt { + info: None, + price: JsonPricing::Linear(JsonLinearPricing { + base: 500, + word: 0, + }), + }, + 20 => PricingAt { + info: None, + price: JsonPricing::Linear(JsonLinearPricing { + base: 150, + word: 0, + }), + } + ], + }) + .unwrap(); - assert_eq!(b.cost(&[0; 192], 10), U256::from(500)); - assert_eq!(b.cost(&[0; 10], 20), U256::from(150), "after istanbul hardfork gas cost for add should be 150"); - } + assert_eq!(b.cost(&[0; 192], 10), U256::from(500)); + assert_eq!( + b.cost(&[0; 10], 20), + U256::from(150), + "after istanbul hardfork gas cost for add should be 150" + ); + } - #[test] - fn bn128_mul_eip1108_transition() { - let b = Builtin::try_from(JsonBuiltin { - name: "alt_bn128_mul".to_owned(), - pricing: map![ - 10 => PricingAt { - info: None, - price: JsonPricing::Linear(JsonLinearPricing { - base: 40_000, - word: 0, - }), - }, - 20 => PricingAt { - info: None, - price: JsonPricing::Linear(JsonLinearPricing { - base: 6_000, - word: 0, - }), - } - ], - }).unwrap(); + #[test] + fn bn128_mul_eip1108_transition() { + let b = Builtin::try_from(JsonBuiltin { + name: "alt_bn128_mul".to_owned(), + pricing: map![ + 10 => PricingAt { + info: None, + price: JsonPricing::Linear(JsonLinearPricing { + base: 40_000, + word: 0, + }), + }, + 20 => PricingAt { + info: None, + price: JsonPricing::Linear(JsonLinearPricing { + base: 6_000, + word: 0, + }), + } + ], + }) + .unwrap(); - assert_eq!(b.cost(&[0; 192], 10), U256::from(40_000)); - assert_eq!(b.cost(&[0; 10], 20), U256::from(6_000), "after istanbul hardfork gas cost for mul should be 6 000"); - } + assert_eq!(b.cost(&[0; 192], 10), U256::from(40_000)); + assert_eq!( + b.cost(&[0; 10], 20), + U256::from(6_000), + "after istanbul hardfork gas cost for mul should be 6 000" + ); + } + #[test] + fn multimap_use_most_recent_on_activate() { + let b = Builtin::try_from(JsonBuiltin { + name: "alt_bn128_mul".to_owned(), + pricing: map![ + 10 => PricingAt { + info: None, + price: JsonPricing::Linear(JsonLinearPricing { + base: 40_000, + word: 0, + }), + }, + 20 => PricingAt { + info: None, + price: JsonPricing::Linear(JsonLinearPricing { + base: 6_000, + word: 0, + }) + }, + 100 => PricingAt { + info: None, + price: JsonPricing::Linear(JsonLinearPricing { + base: 1_337, + word: 0, + }) + } + ], + }) + .unwrap(); - #[test] - fn multimap_use_most_recent_on_activate() { - let b = Builtin::try_from(JsonBuiltin { - name: "alt_bn128_mul".to_owned(), - pricing: map![ - 10 => PricingAt { - info: None, - price: JsonPricing::Linear(JsonLinearPricing { - base: 40_000, - word: 0, - }), - }, - 20 => PricingAt { - info: None, - price: JsonPricing::Linear(JsonLinearPricing { - base: 6_000, - word: 0, - }) - }, - 100 => PricingAt { - info: None, - price: JsonPricing::Linear(JsonLinearPricing { - base: 1_337, - word: 0, - }) - } - ] - }).unwrap(); + assert_eq!( + b.cost(&[0; 2], 0), + U256::zero(), + "not activated yet; should be zero" + ); + assert_eq!(b.cost(&[0; 3], 10), U256::from(40_000), "use price #1"); + assert_eq!(b.cost(&[0; 4], 20), U256::from(6_000), "use price #2"); + assert_eq!(b.cost(&[0; 1], 99), U256::from(6_000), "use price #2"); + assert_eq!(b.cost(&[0; 1], 100), U256::from(1_337), "use price #3"); + assert_eq!( + b.cost(&[0; 1], u64::max_value()), + U256::from(1_337), + "use price #3 indefinitely" + ); + } - assert_eq!(b.cost(&[0; 2], 0), U256::zero(), "not activated yet; should be zero"); - assert_eq!(b.cost(&[0; 3], 10), U256::from(40_000), "use price #1"); - assert_eq!(b.cost(&[0; 4], 20), U256::from(6_000), "use price #2"); - assert_eq!(b.cost(&[0; 1], 99), U256::from(6_000), "use price #2"); - assert_eq!(b.cost(&[0; 1], 100), U256::from(1_337), "use price #3"); - assert_eq!(b.cost(&[0; 1], u64::max_value()), U256::from(1_337), "use price #3 indefinitely"); - } + #[test] + fn multimap_use_last_with_same_activate_at() { + let b = Builtin::try_from(JsonBuiltin { + name: "alt_bn128_mul".to_owned(), + pricing: map![ + 1 => PricingAt { + info: None, + price: JsonPricing::Linear(JsonLinearPricing { + base: 40_000, + word: 0, + }), + }, + 1 => PricingAt { + info: None, + price: JsonPricing::Linear(JsonLinearPricing { + base: 6_000, + word: 0, + }), + }, + 1 => PricingAt { + info: None, + price: JsonPricing::Linear(JsonLinearPricing { + base: 1_337, + word: 0, + }), + } + ], + }) + .unwrap(); - - #[test] - fn multimap_use_last_with_same_activate_at() { - let b = Builtin::try_from(JsonBuiltin { - name: "alt_bn128_mul".to_owned(), - pricing: map![ - 1 => PricingAt { - info: None, - price: JsonPricing::Linear(JsonLinearPricing { - base: 40_000, - word: 0, - }), - }, - 1 => PricingAt { - info: None, - price: JsonPricing::Linear(JsonLinearPricing { - base: 6_000, - word: 0, - }), - }, - 1 => PricingAt { - info: None, - price: JsonPricing::Linear(JsonLinearPricing { - base: 1_337, - word: 0, - }), - } - ], - }).unwrap(); - - assert_eq!(b.cost(&[0; 1], 0), U256::from(0), "not activated yet"); - assert_eq!(b.cost(&[0; 1], 1), U256::from(1_337)); - } + assert_eq!(b.cost(&[0; 1], 0), U256::from(0), "not activated yet"); + assert_eq!(b.cost(&[0; 1], 1), U256::from(1_337)); + } } diff --git a/ethcore/call-contract/src/call_contract.rs b/ethcore/call-contract/src/call_contract.rs index 8b042f083..1870c698d 100644 --- a/ethcore/call-contract/src/call_contract.rs +++ b/ethcore/call-contract/src/call_contract.rs @@ -22,12 +22,12 @@ use types::ids::BlockId; /// Provides `call_contract` method pub trait CallContract { - /// Like `call`, but with various defaults. Designed to be used for calling contracts. - fn call_contract(&self, id: BlockId, address: Address, data: Bytes) -> Result; + /// Like `call`, but with various defaults. Designed to be used for calling contracts. + fn call_contract(&self, id: BlockId, address: Address, data: Bytes) -> Result; } /// Provides information on a blockchain service and it's registry pub trait RegistryInfo { - /// Get the address of a particular blockchain service, if available. - fn registry_address(&self, name: String, block: BlockId) -> Option
; + /// Get the address of a particular blockchain service, if available. + fn registry_address(&self, name: String, block: BlockId) -> Option
; } diff --git a/ethcore/call-contract/src/lib.rs b/ethcore/call-contract/src/lib.rs index 1cbfb1137..9235b094c 100644 --- a/ethcore/call-contract/src/lib.rs +++ b/ethcore/call-contract/src/lib.rs @@ -17,7 +17,7 @@ #![warn(missing_docs)] //! Call Contract module -//! +//! //! This crate exposes traits required to call contracts at particular block. //! All utilities that depend on on-chain data should use those traits to access it. diff --git a/ethcore/db/src/cache_manager.rs b/ethcore/db/src/cache_manager.rs index 34a02d721..cf664423d 100644 --- a/ethcore/db/src/cache_manager.rs +++ b/ethcore/db/src/cache_manager.rs @@ -16,67 +16,91 @@ //! Database cache manager -use std::collections::{VecDeque, HashSet}; -use std::hash::Hash; +use std::{ + collections::{HashSet, VecDeque}, + hash::Hash, +}; const COLLECTION_QUEUE_SIZE: usize = 8; /// DB cache manager pub struct CacheManager { - pref_cache_size: usize, - max_cache_size: usize, - bytes_per_cache_entry: usize, - cache_usage: VecDeque> + pref_cache_size: usize, + max_cache_size: usize, + bytes_per_cache_entry: usize, + cache_usage: VecDeque>, } -impl CacheManager where T: Eq + Hash { - /// Create new cache manager with preferred (heap) sizes. - pub fn new(pref_cache_size: usize, max_cache_size: usize, bytes_per_cache_entry: usize) -> Self { - CacheManager { - pref_cache_size: pref_cache_size, - max_cache_size: max_cache_size, - bytes_per_cache_entry: bytes_per_cache_entry, - cache_usage: (0..COLLECTION_QUEUE_SIZE).into_iter().map(|_| Default::default()).collect(), - } - } +impl CacheManager +where + T: Eq + Hash, +{ + /// Create new cache manager with preferred (heap) sizes. + pub fn new( + pref_cache_size: usize, + max_cache_size: usize, + bytes_per_cache_entry: usize, + ) -> Self { + CacheManager { + pref_cache_size: pref_cache_size, + max_cache_size: max_cache_size, + bytes_per_cache_entry: bytes_per_cache_entry, + cache_usage: (0..COLLECTION_QUEUE_SIZE) + .into_iter() + .map(|_| Default::default()) + .collect(), + } + } - /// Mark element as used. - pub fn note_used(&mut self, id: T) { - if !self.cache_usage[0].contains(&id) { - if let Some(c) = self.cache_usage.iter_mut().skip(1).find(|e| e.contains(&id)) { - c.remove(&id); - } - self.cache_usage[0].insert(id); - } - } + /// Mark element as used. + pub fn note_used(&mut self, id: T) { + if !self.cache_usage[0].contains(&id) { + if let Some(c) = self + .cache_usage + .iter_mut() + .skip(1) + .find(|e| e.contains(&id)) + { + c.remove(&id); + } + self.cache_usage[0].insert(id); + } + } - /// Collects unused objects from cache. - /// First params is the current size of the cache. - /// Second one is an with objects to remove. It should also return new size of the cache. - pub fn collect_garbage(&mut self, current_size: usize, mut notify_unused: F) where F: FnMut(HashSet) -> usize { - if current_size < self.pref_cache_size { - self.rotate_cache_if_needed(); - return; - } + /// Collects unused objects from cache. + /// First params is the current size of the cache. + /// Second one is an with objects to remove. It should also return new size of the cache. + pub fn collect_garbage(&mut self, current_size: usize, mut notify_unused: F) + where + F: FnMut(HashSet) -> usize, + { + if current_size < self.pref_cache_size { + self.rotate_cache_if_needed(); + return; + } - for _ in 0..COLLECTION_QUEUE_SIZE { - if let Some(back) = self.cache_usage.pop_back() { - let current_size = notify_unused(back); - self.cache_usage.push_front(Default::default()); - if current_size < self.max_cache_size { - break - } - } - } - } + for _ in 0..COLLECTION_QUEUE_SIZE { + if let Some(back) = self.cache_usage.pop_back() { + let current_size = notify_unused(back); + self.cache_usage.push_front(Default::default()); + if current_size < self.max_cache_size { + break; + } + } + } + } - fn rotate_cache_if_needed(&mut self) { - if self.cache_usage.is_empty() { return } + fn rotate_cache_if_needed(&mut self) { + if self.cache_usage.is_empty() { + return; + } - if self.cache_usage[0].len() * self.bytes_per_cache_entry > self.pref_cache_size / COLLECTION_QUEUE_SIZE { - if let Some(cache) = self.cache_usage.pop_back() { - self.cache_usage.push_front(cache); - } - } - } + if self.cache_usage[0].len() * self.bytes_per_cache_entry + > self.pref_cache_size / COLLECTION_QUEUE_SIZE + { + if let Some(cache) = self.cache_usage.pop_back() { + self.cache_usage.push_front(cache); + } + } + } } diff --git a/ethcore/db/src/db.rs b/ethcore/db/src/db.rs index 192bb75a8..5fc837691 100644 --- a/ethcore/db/src/db.rs +++ b/ethcore/db/src/db.rs @@ -16,11 +16,9 @@ //! Database utilities and definitions. -use std::ops::Deref; -use std::hash::Hash; -use std::collections::HashMap; -use parking_lot::RwLock; use kvdb::{DBTransaction, KeyValueDB}; +use parking_lot::RwLock; +use std::{collections::HashMap, hash::Hash, ops::Deref}; use rlp; @@ -47,208 +45,270 @@ pub const NUM_COLUMNS: Option = Some(8); /// Modes for updating caches. #[derive(Clone, Copy)] pub enum CacheUpdatePolicy { - /// Overwrite entries. - Overwrite, - /// Remove entries. - Remove, + /// Overwrite entries. + Overwrite, + /// Remove entries. + Remove, } /// A cache for arbitrary key-value pairs. pub trait Cache { - /// Insert an entry into the cache and get the old value. - fn insert(&mut self, k: K, v: V) -> Option; + /// Insert an entry into the cache and get the old value. + fn insert(&mut self, k: K, v: V) -> Option; - /// Remove an entry from the cache, getting the old value if it existed. - fn remove(&mut self, k: &K) -> Option; + /// Remove an entry from the cache, getting the old value if it existed. + fn remove(&mut self, k: &K) -> Option; - /// Query the cache for a key's associated value. - fn get(&self, k: &K) -> Option<&V>; + /// Query the cache for a key's associated value. + fn get(&self, k: &K) -> Option<&V>; } -impl Cache for HashMap where K: Hash + Eq { - fn insert(&mut self, k: K, v: V) -> Option { - HashMap::insert(self, k, v) - } +impl Cache for HashMap +where + K: Hash + Eq, +{ + fn insert(&mut self, k: K, v: V) -> Option { + HashMap::insert(self, k, v) + } - fn remove(&mut self, k: &K) -> Option { - HashMap::remove(self, k) - } + fn remove(&mut self, k: &K) -> Option { + HashMap::remove(self, k) + } - fn get(&self, k: &K) -> Option<&V> { - HashMap::get(self, k) - } + fn get(&self, k: &K) -> Option<&V> { + HashMap::get(self, k) + } } /// Should be used to get database key associated with given value. pub trait Key { - /// The db key associated with this value. - type Target: Deref; + /// The db key associated with this value. + type Target: Deref; - /// Returns db key. - fn key(&self) -> Self::Target; + /// Returns db key. + fn key(&self) -> Self::Target; } /// Should be used to write value into database. pub trait Writable { - /// Writes the value into the database. - fn write(&mut self, col: Option, key: &Key, value: &T) where T: rlp::Encodable, R: Deref; + /// Writes the value into the database. + fn write(&mut self, col: Option, key: &Key, value: &T) + where + T: rlp::Encodable, + R: Deref; - /// Deletes key from the databse. - fn delete(&mut self, col: Option, key: &Key) where T: rlp::Encodable, R: Deref; + /// Deletes key from the databse. + fn delete(&mut self, col: Option, key: &Key) + where + T: rlp::Encodable, + R: Deref; - /// Writes the value into the database and updates the cache. - fn write_with_cache(&mut self, col: Option, cache: &mut Cache, key: K, value: T, policy: CacheUpdatePolicy) where - K: Key + Hash + Eq, - T: rlp::Encodable, - R: Deref { - self.write(col, &key, &value); - match policy { - CacheUpdatePolicy::Overwrite => { - cache.insert(key, value); - }, - CacheUpdatePolicy::Remove => { - cache.remove(&key); - } - } - } + /// Writes the value into the database and updates the cache. + fn write_with_cache( + &mut self, + col: Option, + cache: &mut Cache, + key: K, + value: T, + policy: CacheUpdatePolicy, + ) where + K: Key + Hash + Eq, + T: rlp::Encodable, + R: Deref, + { + self.write(col, &key, &value); + match policy { + CacheUpdatePolicy::Overwrite => { + cache.insert(key, value); + } + CacheUpdatePolicy::Remove => { + cache.remove(&key); + } + } + } - /// Writes the values into the database and updates the cache. - fn extend_with_cache(&mut self, col: Option, cache: &mut Cache, values: HashMap, policy: CacheUpdatePolicy) where - K: Key + Hash + Eq, - T: rlp::Encodable, - R: Deref { - match policy { - CacheUpdatePolicy::Overwrite => { - for (key, value) in values { - self.write(col, &key, &value); - cache.insert(key, value); - } - }, - CacheUpdatePolicy::Remove => { - for (key, value) in &values { - self.write(col, key, value); - cache.remove(key); - } - }, - } - } - - /// Writes and removes the values into the database and updates the cache. - fn extend_with_option_cache(&mut self, col: Option, cache: &mut Cache>, values: HashMap>, policy: CacheUpdatePolicy) where - K: Key + Hash + Eq, - T: rlp::Encodable, - R: Deref { - match policy { - CacheUpdatePolicy::Overwrite => { - for (key, value) in values { - match value { - Some(ref v) => self.write(col, &key, v), - None => self.delete(col, &key), - } - cache.insert(key, value); - } - }, - CacheUpdatePolicy::Remove => { - for (key, value) in values { - match value { - Some(v) => self.write(col, &key, &v), - None => self.delete(col, &key), - } - cache.remove(&key); - } - }, - } - } + /// Writes the values into the database and updates the cache. + fn extend_with_cache( + &mut self, + col: Option, + cache: &mut Cache, + values: HashMap, + policy: CacheUpdatePolicy, + ) where + K: Key + Hash + Eq, + T: rlp::Encodable, + R: Deref, + { + match policy { + CacheUpdatePolicy::Overwrite => { + for (key, value) in values { + self.write(col, &key, &value); + cache.insert(key, value); + } + } + CacheUpdatePolicy::Remove => { + for (key, value) in &values { + self.write(col, key, value); + cache.remove(key); + } + } + } + } + /// Writes and removes the values into the database and updates the cache. + fn extend_with_option_cache( + &mut self, + col: Option, + cache: &mut Cache>, + values: HashMap>, + policy: CacheUpdatePolicy, + ) where + K: Key + Hash + Eq, + T: rlp::Encodable, + R: Deref, + { + match policy { + CacheUpdatePolicy::Overwrite => { + for (key, value) in values { + match value { + Some(ref v) => self.write(col, &key, v), + None => self.delete(col, &key), + } + cache.insert(key, value); + } + } + CacheUpdatePolicy::Remove => { + for (key, value) in values { + match value { + Some(v) => self.write(col, &key, &v), + None => self.delete(col, &key), + } + cache.remove(&key); + } + } + } + } } /// Should be used to read values from database. pub trait Readable { - /// Returns value for given key. - fn read(&self, col: Option, key: &Key) -> Option where - T: rlp::Decodable, - R: Deref; + /// Returns value for given key. + fn read(&self, col: Option, key: &Key) -> Option + where + T: rlp::Decodable, + R: Deref; - /// Returns value for given key either in cache or in database. - fn read_with_cache(&self, col: Option, cache: &RwLock, key: &K) -> Option where - K: Key + Eq + Hash + Clone, - T: Clone + rlp::Decodable, - C: Cache { - { - let read = cache.read(); - if let Some(v) = read.get(key) { - return Some(v.clone()); - } - } + /// Returns value for given key either in cache or in database. + fn read_with_cache(&self, col: Option, cache: &RwLock, key: &K) -> Option + where + K: Key + Eq + Hash + Clone, + T: Clone + rlp::Decodable, + C: Cache, + { + { + let read = cache.read(); + if let Some(v) = read.get(key) { + return Some(v.clone()); + } + } - self.read(col, key).map(|value: T|{ - let mut write = cache.write(); - write.insert(key.clone(), value.clone()); - value - }) - } + self.read(col, key).map(|value: T| { + let mut write = cache.write(); + write.insert(key.clone(), value.clone()); + value + }) + } - /// Returns value for given key either in two-layered cache or in database. - fn read_with_two_layer_cache(&self, col: Option, l1_cache: &RwLock, l2_cache: &RwLock, key: &K) -> Option where - K: Key + Eq + Hash + Clone, - T: Clone + rlp::Decodable, - C: Cache { - { - let read = l1_cache.read(); - if let Some(v) = read.get(key) { - return Some(v.clone()); - } - } + /// Returns value for given key either in two-layered cache or in database. + fn read_with_two_layer_cache( + &self, + col: Option, + l1_cache: &RwLock, + l2_cache: &RwLock, + key: &K, + ) -> Option + where + K: Key + Eq + Hash + Clone, + T: Clone + rlp::Decodable, + C: Cache, + { + { + let read = l1_cache.read(); + if let Some(v) = read.get(key) { + return Some(v.clone()); + } + } - self.read_with_cache(col, l2_cache, key) - } + self.read_with_cache(col, l2_cache, key) + } - /// Returns true if given value exists. - fn exists(&self, col: Option, key: &Key) -> bool where R: Deref; + /// Returns true if given value exists. + fn exists(&self, col: Option, key: &Key) -> bool + where + R: Deref; - /// Returns true if given value exists either in cache or in database. - fn exists_with_cache(&self, col: Option, cache: &RwLock, key: &K) -> bool where - K: Eq + Hash + Key, - R: Deref, - C: Cache { - { - let read = cache.read(); - if read.get(key).is_some() { - return true; - } - } + /// Returns true if given value exists either in cache or in database. + fn exists_with_cache(&self, col: Option, cache: &RwLock, key: &K) -> bool + where + K: Eq + Hash + Key, + R: Deref, + C: Cache, + { + { + let read = cache.read(); + if read.get(key).is_some() { + return true; + } + } - self.exists::(col, key) - } + self.exists::(col, key) + } } impl Writable for DBTransaction { - fn write(&mut self, col: Option, key: &Key, value: &T) where T: rlp::Encodable, R: Deref { - self.put(col, &key.key(), &rlp::encode(value)); - } + fn write(&mut self, col: Option, key: &Key, value: &T) + where + T: rlp::Encodable, + R: Deref, + { + self.put(col, &key.key(), &rlp::encode(value)); + } - fn delete(&mut self, col: Option, key: &Key) where T: rlp::Encodable, R: Deref { - self.delete(col, &key.key()); - } + fn delete(&mut self, col: Option, key: &Key) + where + T: rlp::Encodable, + R: Deref, + { + self.delete(col, &key.key()); + } } impl Readable for KVDB { - fn read(&self, col: Option, key: &Key) -> Option - where T: rlp::Decodable, R: Deref { - self.get(col, &key.key()) - .expect(&format!("db get failed, key: {:?}", &key.key() as &[u8])) - .map(|v| rlp::decode(&v).expect("decode db value failed") ) + fn read(&self, col: Option, key: &Key) -> Option + where + T: rlp::Decodable, + R: Deref, + { + self.get(col, &key.key()) + .expect(&format!("db get failed, key: {:?}", &key.key() as &[u8])) + .map(|v| rlp::decode(&v).expect("decode db value failed")) + } - } + fn exists(&self, col: Option, key: &Key) -> bool + where + R: Deref, + { + let result = self.get(col, &key.key()); - fn exists(&self, col: Option, key: &Key) -> bool where R: Deref { - let result = self.get(col, &key.key()); - - match result { - Ok(v) => v.is_some(), - Err(err) => { - panic!("db get failed, key: {:?}, err: {:?}", &key.key() as &[u8], err); - } - } - } + match result { + Ok(v) => v.is_some(), + Err(err) => { + panic!( + "db get failed, key: {:?}, err: {:?}", + &key.key() as &[u8], + err + ); + } + } + } } diff --git a/ethcore/db/src/keys.rs b/ethcore/db/src/keys.rs index 96ecde85f..0b04f0a84 100644 --- a/ethcore/db/src/keys.rs +++ b/ethcore/db/src/keys.rs @@ -16,99 +16,96 @@ //! Blockchain DB extras. -use std::io::Write; -use std::ops; +use std::{io::Write, ops}; -use common_types::BlockNumber; -use common_types::engines::epoch::Transition as EpochTransition; -use common_types::receipt::Receipt; +use common_types::{engines::epoch::Transition as EpochTransition, receipt::Receipt, BlockNumber}; use ethereum_types::{H256, H264, U256}; use heapsize::HeapSizeOf; use kvdb::PREFIX_LEN as DB_PREFIX_LEN; use rlp; -use rlp_derive::{RlpEncodableWrapper, RlpDecodableWrapper, RlpEncodable, RlpDecodable}; +use rlp_derive::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; use crate::db::Key; /// Represents index of extra data in database #[derive(Copy, Debug, Hash, Eq, PartialEq, Clone)] pub enum ExtrasIndex { - /// Block details index - BlockDetails = 0, - /// Block hash index - BlockHash = 1, - /// Transaction address index - TransactionAddress = 2, - /// Block receipts index - BlockReceipts = 4, - /// Epoch transition data index. - EpochTransitions = 5, - /// Pending epoch transition data index. - PendingEpochTransition = 6, + /// Block details index + BlockDetails = 0, + /// Block hash index + BlockHash = 1, + /// Transaction address index + TransactionAddress = 2, + /// Block receipts index + BlockReceipts = 4, + /// Epoch transition data index. + EpochTransitions = 5, + /// Pending epoch transition data index. + PendingEpochTransition = 6, } fn with_index(hash: &H256, i: ExtrasIndex) -> H264 { - let mut result = H264::default(); - result[0] = i as u8; - (*result)[1..].clone_from_slice(hash); - result + let mut result = H264::default(); + result[0] = i as u8; + (*result)[1..].clone_from_slice(hash); + result } /// Wrapper for block number used as a DB key. pub struct BlockNumberKey([u8; 5]); impl ops::Deref for BlockNumberKey { - type Target = [u8]; + type Target = [u8]; - fn deref(&self) -> &Self::Target { - &self.0 - } + fn deref(&self) -> &Self::Target { + &self.0 + } } impl Key for BlockNumber { - type Target = BlockNumberKey; + type Target = BlockNumberKey; - fn key(&self) -> Self::Target { - let mut result = [0u8; 5]; - result[0] = ExtrasIndex::BlockHash as u8; - result[1] = (self >> 24) as u8; - result[2] = (self >> 16) as u8; - result[3] = (self >> 8) as u8; - result[4] = *self as u8; - BlockNumberKey(result) - } + fn key(&self) -> Self::Target { + let mut result = [0u8; 5]; + result[0] = ExtrasIndex::BlockHash as u8; + result[1] = (self >> 24) as u8; + result[2] = (self >> 16) as u8; + result[3] = (self >> 8) as u8; + result[4] = *self as u8; + BlockNumberKey(result) + } } impl Key for H256 { - type Target = H264; + type Target = H264; - fn key(&self) -> H264 { - with_index(self, ExtrasIndex::BlockDetails) - } + fn key(&self) -> H264 { + with_index(self, ExtrasIndex::BlockDetails) + } } impl Key for H256 { - type Target = H264; + type Target = H264; - fn key(&self) -> H264 { - with_index(self, ExtrasIndex::TransactionAddress) - } + fn key(&self) -> H264 { + with_index(self, ExtrasIndex::TransactionAddress) + } } impl Key for H256 { - type Target = H264; + type Target = H264; - fn key(&self) -> H264 { - with_index(self, ExtrasIndex::BlockReceipts) - } + fn key(&self) -> H264 { + with_index(self, ExtrasIndex::BlockReceipts) + } } impl Key for H256 { - type Target = H264; + type Target = H264; - fn key(&self) -> H264 { - with_index(self, ExtrasIndex::PendingEpochTransition) - } + fn key(&self) -> H264 { + with_index(self, ExtrasIndex::PendingEpochTransition) + } } /// length of epoch keys. @@ -117,153 +114,170 @@ pub const EPOCH_KEY_LEN: usize = DB_PREFIX_LEN + 16; /// epoch key prefix. /// used to iterate over all epoch transitions in order from genesis. pub const EPOCH_KEY_PREFIX: &'static [u8; DB_PREFIX_LEN] = &[ - ExtrasIndex::EpochTransitions as u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ExtrasIndex::EpochTransitions as u8, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, ]; /// Epoch transitions key pub struct EpochTransitionsKey([u8; EPOCH_KEY_LEN]); impl ops::Deref for EpochTransitionsKey { - type Target = [u8]; + type Target = [u8]; - fn deref(&self) -> &[u8] { &self.0[..] } + fn deref(&self) -> &[u8] { + &self.0[..] + } } impl Key for u64 { - type Target = EpochTransitionsKey; + type Target = EpochTransitionsKey; - fn key(&self) -> Self::Target { - let mut arr = [0u8; EPOCH_KEY_LEN]; - arr[..DB_PREFIX_LEN].copy_from_slice(&EPOCH_KEY_PREFIX[..]); + fn key(&self) -> Self::Target { + let mut arr = [0u8; EPOCH_KEY_LEN]; + arr[..DB_PREFIX_LEN].copy_from_slice(&EPOCH_KEY_PREFIX[..]); - write!(&mut arr[DB_PREFIX_LEN..], "{:016x}", self) - .expect("format arg is valid; no more than 16 chars will be written; qed"); + write!(&mut arr[DB_PREFIX_LEN..], "{:016x}", self) + .expect("format arg is valid; no more than 16 chars will be written; qed"); - EpochTransitionsKey(arr) - } + EpochTransitionsKey(arr) + } } /// Familial details concerning a block #[derive(Debug, Clone)] pub struct BlockDetails { - /// Block number - pub number: BlockNumber, - /// Total difficulty of the block and all its parents - pub total_difficulty: U256, - /// Parent block hash - pub parent: H256, - /// List of children block hashes - pub children: Vec, - /// Whether the block is considered finalized - pub is_finalized: bool, + /// Block number + pub number: BlockNumber, + /// Total difficulty of the block and all its parents + pub total_difficulty: U256, + /// Parent block hash + pub parent: H256, + /// List of children block hashes + pub children: Vec, + /// Whether the block is considered finalized + pub is_finalized: bool, } impl rlp::Encodable for BlockDetails { - fn rlp_append(&self, stream: &mut rlp::RlpStream) { - let use_short_version = !self.is_finalized; + fn rlp_append(&self, stream: &mut rlp::RlpStream) { + let use_short_version = !self.is_finalized; - match use_short_version { - true => { stream.begin_list(4); }, - false => { stream.begin_list(5); }, - } + match use_short_version { + true => { + stream.begin_list(4); + } + false => { + stream.begin_list(5); + } + } - stream.append(&self.number); - stream.append(&self.total_difficulty); - stream.append(&self.parent); - stream.append_list(&self.children); - if !use_short_version { - stream.append(&self.is_finalized); - } - } + stream.append(&self.number); + stream.append(&self.total_difficulty); + stream.append(&self.parent); + stream.append_list(&self.children); + if !use_short_version { + stream.append(&self.is_finalized); + } + } } impl rlp::Decodable for BlockDetails { - fn decode(rlp: &rlp::Rlp) -> Result { - let use_short_version = match rlp.item_count()? { - 4 => true, - 5 => false, - _ => return Err(rlp::DecoderError::RlpIncorrectListLen), - }; + fn decode(rlp: &rlp::Rlp) -> Result { + let use_short_version = match rlp.item_count()? { + 4 => true, + 5 => false, + _ => return Err(rlp::DecoderError::RlpIncorrectListLen), + }; - Ok(BlockDetails { - number: rlp.val_at(0)?, - total_difficulty: rlp.val_at(1)?, - parent: rlp.val_at(2)?, - children: rlp.list_at(3)?, - is_finalized: if use_short_version { - false - } else { - rlp.val_at(4)? - }, - }) - } + Ok(BlockDetails { + number: rlp.val_at(0)?, + total_difficulty: rlp.val_at(1)?, + parent: rlp.val_at(2)?, + children: rlp.list_at(3)?, + is_finalized: if use_short_version { + false + } else { + rlp.val_at(4)? + }, + }) + } } impl HeapSizeOf for BlockDetails { - fn heap_size_of_children(&self) -> usize { - self.children.heap_size_of_children() - } + fn heap_size_of_children(&self) -> usize { + self.children.heap_size_of_children() + } } /// Represents address of certain transaction within block #[derive(Debug, PartialEq, Clone, RlpEncodable, RlpDecodable)] pub struct TransactionAddress { - /// Block hash - pub block_hash: H256, - /// Transaction index within the block - pub index: usize + /// Block hash + pub block_hash: H256, + /// Transaction index within the block + pub index: usize, } impl HeapSizeOf for TransactionAddress { - fn heap_size_of_children(&self) -> usize { 0 } + fn heap_size_of_children(&self) -> usize { + 0 + } } /// Contains all block receipts. #[derive(Clone, RlpEncodableWrapper, RlpDecodableWrapper)] pub struct BlockReceipts { - /// Block receipts - pub receipts: Vec, + /// Block receipts + pub receipts: Vec, } impl BlockReceipts { - /// Create new block receipts wrapper. - pub fn new(receipts: Vec) -> Self { - BlockReceipts { - receipts: receipts - } - } + /// Create new block receipts wrapper. + pub fn new(receipts: Vec) -> Self { + BlockReceipts { receipts: receipts } + } } impl HeapSizeOf for BlockReceipts { - fn heap_size_of_children(&self) -> usize { - self.receipts.heap_size_of_children() - } + fn heap_size_of_children(&self) -> usize { + self.receipts.heap_size_of_children() + } } /// Candidate transitions to an epoch with specific number. #[derive(Clone, RlpEncodable, RlpDecodable)] pub struct EpochTransitions { - /// Epoch number - pub number: u64, - /// List of candidate transitions - pub candidates: Vec, + /// Epoch number + pub number: u64, + /// List of candidate transitions + pub candidates: Vec, } #[cfg(test)] mod tests { - use rlp::*; + use rlp::*; - use super::BlockReceipts; + use super::BlockReceipts; - #[test] - fn encode_block_receipts() { - let br = BlockReceipts::new(Vec::new()); + #[test] + fn encode_block_receipts() { + let br = BlockReceipts::new(Vec::new()); - let mut s = RlpStream::new_list(2); - s.append(&br); - assert!(!s.is_finished(), "List shouldn't finished yet"); - s.append(&br); - assert!(s.is_finished(), "List should be finished now"); - s.out(); - } + let mut s = RlpStream::new_list(2); + s.append(&br); + assert!(!s.is_finished(), "List shouldn't finished yet"); + s.append(&br); + assert!(s.is_finished(), "List should be finished now"); + s.out(); + } } diff --git a/ethcore/db/src/lib.rs b/ethcore/db/src/lib.rs index 3fdb368a1..7deb7a7ab 100644 --- a/ethcore/db/src/lib.rs +++ b/ethcore/db/src/lib.rs @@ -20,7 +20,7 @@ mod db; -pub mod keys; pub mod cache_manager; +pub mod keys; pub use self::db::*; diff --git a/ethcore/evm/benches/basic.rs b/ethcore/evm/benches/basic.rs index c86afcc57..643b0ee79 100644 --- a/ethcore/evm/benches/basic.rs +++ b/ethcore/evm/benches/basic.rs @@ -20,140 +20,140 @@ extern crate criterion; extern crate bit_set; extern crate ethereum_types; -extern crate parking_lot; -extern crate heapsize; -extern crate vm; extern crate evm; +extern crate heapsize; extern crate keccak_hash as hash; extern crate memory_cache; extern crate parity_bytes as bytes; +extern crate parking_lot; extern crate rustc_hex; +extern crate vm; -use criterion::{Criterion, Bencher, black_box}; -use std::str::FromStr; -use std::sync::Arc; -use ethereum_types::{U256, Address}; -use vm::{ActionParams, Result, GasLeft, Ext}; -use vm::tests::FakeExt; +use criterion::{black_box, Bencher, Criterion}; +use ethereum_types::{Address, U256}; use evm::Factory; use rustc_hex::FromHex; +use std::{str::FromStr, sync::Arc}; +use vm::{tests::FakeExt, ActionParams, Ext, GasLeft, Result}; criterion_group!( - basic, - simple_loop_log0_usize, - simple_loop_log0_u256, - mem_gas_calculation_same_usize, - mem_gas_calculation_same_u256, - mem_gas_calculation_increasing_usize, - mem_gas_calculation_increasing_u256, - blockhash_mulmod_small, - blockhash_mulmod_large, + basic, + simple_loop_log0_usize, + simple_loop_log0_u256, + mem_gas_calculation_same_usize, + mem_gas_calculation_same_u256, + mem_gas_calculation_increasing_usize, + mem_gas_calculation_increasing_u256, + blockhash_mulmod_small, + blockhash_mulmod_large, ); criterion_main!(basic); fn simple_loop_log0_usize(b: &mut Criterion) { - b.bench_function("simple_loop_log0_usize", |b| { - simple_loop_log0(U256::from(::std::usize::MAX), b); - }); + b.bench_function("simple_loop_log0_usize", |b| { + simple_loop_log0(U256::from(::std::usize::MAX), b); + }); } fn simple_loop_log0_u256(b: &mut Criterion) { - b.bench_function("simple_loop_log0_u256", |b| { - simple_loop_log0(!U256::zero(), b); - }); + b.bench_function("simple_loop_log0_u256", |b| { + simple_loop_log0(!U256::zero(), b); + }); } fn simple_loop_log0(gas: U256, b: &mut Bencher) { - let factory = Factory::default(); - let mut ext = FakeExt::new(); + let factory = Factory::default(); + let mut ext = FakeExt::new(); - let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let code = black_box( - "62ffffff5b600190036000600fa0600357".from_hex().unwrap() - ); + let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + let code = black_box("62ffffff5b600190036000600fa0600357".from_hex().unwrap()); - b.iter(|| { - let mut params = ActionParams::default(); - params.address = address.clone(); - params.gas = gas; - params.code = Some(Arc::new(code.clone())); + b.iter(|| { + let mut params = ActionParams::default(); + params.address = address.clone(); + params.gas = gas; + params.code = Some(Arc::new(code.clone())); - let vm = factory.create(params, ext.schedule(), 0); + let vm = factory.create(params, ext.schedule(), 0); - result(vm.exec(&mut ext).ok().unwrap()) - }); + result(vm.exec(&mut ext).ok().unwrap()) + }); } fn mem_gas_calculation_same_usize(b: &mut Criterion) { - b.bench_function("mem_gas_calculation_same_usize", |b| { - mem_gas_calculation_same(U256::from(::std::usize::MAX), b); - }); + b.bench_function("mem_gas_calculation_same_usize", |b| { + mem_gas_calculation_same(U256::from(::std::usize::MAX), b); + }); } fn mem_gas_calculation_same_u256(b: &mut Criterion) { - b.bench_function("mem_gas_calculation_same_u256", |b| { - mem_gas_calculation_same(!U256::zero(), b); - }); + b.bench_function("mem_gas_calculation_same_u256", |b| { + mem_gas_calculation_same(!U256::zero(), b); + }); } fn mem_gas_calculation_same(gas: U256, b: &mut Bencher) { - let factory = Factory::default(); - let mut ext = FakeExt::new(); + let factory = Factory::default(); + let mut ext = FakeExt::new(); - let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - b.iter(|| { - let code = black_box( - "6110006001556001546000555b610fff805560016000540380600055600c57".from_hex().unwrap() - ); + b.iter(|| { + let code = black_box( + "6110006001556001546000555b610fff805560016000540380600055600c57" + .from_hex() + .unwrap(), + ); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.gas = gas; - params.code = Some(Arc::new(code.clone())); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.gas = gas; + params.code = Some(Arc::new(code.clone())); - let vm = factory.create(params, ext.schedule(), 0); + let vm = factory.create(params, ext.schedule(), 0); - result(vm.exec(&mut ext).ok().unwrap()) - }); + result(vm.exec(&mut ext).ok().unwrap()) + }); } fn mem_gas_calculation_increasing_usize(b: &mut Criterion) { - b.bench_function("mem_gas_calculation_increasing_usize", |b| { - mem_gas_calculation_increasing(U256::from(::std::usize::MAX), b); - }); + b.bench_function("mem_gas_calculation_increasing_usize", |b| { + mem_gas_calculation_increasing(U256::from(::std::usize::MAX), b); + }); } fn mem_gas_calculation_increasing_u256(b: &mut Criterion) { - b.bench_function("mem_gas_calculation_increasing_u256", |b| { - mem_gas_calculation_increasing(!U256::zero(), b); - }); + b.bench_function("mem_gas_calculation_increasing_u256", |b| { + mem_gas_calculation_increasing(!U256::zero(), b); + }); } fn mem_gas_calculation_increasing(gas: U256, b: &mut Bencher) { - let factory = Factory::default(); - let mut ext = FakeExt::new(); + let factory = Factory::default(); + let mut ext = FakeExt::new(); - let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - b.iter(|| { - let code = black_box( - "6110006001556001546000555b610fff60005401805560016000540380600055600c57".from_hex().unwrap() - ); + b.iter(|| { + let code = black_box( + "6110006001556001546000555b610fff60005401805560016000540380600055600c57" + .from_hex() + .unwrap(), + ); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.gas = gas; - params.code = Some(Arc::new(code.clone())); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.gas = gas; + params.code = Some(Arc::new(code.clone())); - let vm = factory.create(params, ext.schedule(), 0); + let vm = factory.create(params, ext.schedule(), 0); - result(vm.exec(&mut ext).ok().unwrap()) - }); + result(vm.exec(&mut ext).ok().unwrap()) + }); } fn blockhash_mulmod_small(b: &mut Criterion) { - b.bench_function("blockhash_mulmod_small", |b| { + b.bench_function("blockhash_mulmod_small", |b| { let factory = Factory::default(); let mut ext = FakeExt::new(); @@ -177,7 +177,7 @@ fn blockhash_mulmod_small(b: &mut Criterion) { } fn blockhash_mulmod_large(b: &mut Criterion) { - b.bench_function("blockhash_mulmod_large", |b| { + b.bench_function("blockhash_mulmod_large", |b| { let factory = Factory::default(); let mut ext = FakeExt::new(); @@ -201,9 +201,9 @@ fn blockhash_mulmod_large(b: &mut Criterion) { } fn result(r: Result) -> U256 { - match r { - Ok(GasLeft::Known(gas_left)) => gas_left, - Ok(GasLeft::NeedsReturn { gas_left, .. }) => gas_left, - _ => U256::zero(), - } + match r { + Ok(GasLeft::Known(gas_left)) => gas_left, + Ok(GasLeft::NeedsReturn { gas_left, .. }) => gas_left, + _ => U256::zero(), + } } diff --git a/ethcore/evm/src/evm.rs b/ethcore/evm/src/evm.rs index 3c88155f2..0d76b78b3 100644 --- a/ethcore/evm/src/evm.rs +++ b/ethcore/evm/src/evm.rs @@ -16,20 +16,20 @@ //! Evm interface. -use std::{ops, cmp, fmt}; use ethereum_types::{U128, U256, U512}; -use vm::{Ext, Result, ReturnData, GasLeft, Error}; +use std::{cmp, fmt, ops}; +use vm::{Error, Ext, GasLeft, Result, ReturnData}; /// Finalization result. Gas Left: either it is a known value, or it needs to be computed by processing /// a return instruction. #[derive(Debug)] pub struct FinalizationResult { - /// Final amount of gas left. - pub gas_left: U256, - /// Apply execution state changes or revert them. - pub apply_state: bool, - /// Return data buffer. - pub return_data: ReturnData, + /// Final amount of gas left. + pub gas_left: U256, + /// Apply execution state changes or revert them. + pub apply_state: bool, + /// Return data buffer. + pub return_data: ReturnData, } /// Types that can be "finalized" using an EVM. @@ -37,177 +37,188 @@ pub struct FinalizationResult { /// In practice, this is just used to define an inherent impl on /// `Reult>`. pub trait Finalize { - /// Consume the externalities, call return if necessary, and produce call result. - fn finalize(self, ext: E) -> Result; + /// Consume the externalities, call return if necessary, and produce call result. + fn finalize(self, ext: E) -> Result; } impl Finalize for Result { - fn finalize(self, ext: E) -> Result { - match self { - Ok(GasLeft::Known(gas_left)) => { - Ok(FinalizationResult { - gas_left, - apply_state: true, - return_data: ReturnData::empty() - }) - }, - Ok(GasLeft::NeedsReturn { gas_left, data, apply_state }) => { - ext.ret(&gas_left, &data, apply_state).map(|gas_left| - FinalizationResult { gas_left, apply_state, return_data: data } - ) - }, - Err(err) => Err(err), - } - } + fn finalize(self, ext: E) -> Result { + match self { + Ok(GasLeft::Known(gas_left)) => Ok(FinalizationResult { + gas_left, + apply_state: true, + return_data: ReturnData::empty(), + }), + Ok(GasLeft::NeedsReturn { + gas_left, + data, + apply_state, + }) => ext + .ret(&gas_left, &data, apply_state) + .map(|gas_left| FinalizationResult { + gas_left, + apply_state, + return_data: data, + }), + Err(err) => Err(err), + } + } } impl Finalize for Error { - fn finalize(self, _ext: E) -> Result { - Err(self) - } + fn finalize(self, _ext: E) -> Result { + Err(self) + } } /// Cost calculation type. For low-gas usage we calculate costs using usize instead of U256 -pub trait CostType: Sized + From + Copy + Send - + ops::Mul + ops::Div + ops::Add + ops::Sub - + ops::Shr + ops::Shl - + cmp::Ord + fmt::Debug { - /// Converts this cost into `U256` - fn as_u256(&self) -> U256; - /// Tries to fit `U256` into this `Cost` type - fn from_u256(val: U256) -> Result; - /// Convert to usize (may panic) - fn as_usize(&self) -> usize; - /// Add with overflow - fn overflow_add(self, other: Self) -> (Self, bool); - /// Multiple with overflow - fn overflow_mul(self, other: Self) -> (Self, bool); - /// Single-step full multiplication and shift: `(self*other) >> shr` - /// Should not overflow on intermediate steps - fn overflow_mul_shr(self, other: Self, shr: usize) -> (Self, bool); +pub trait CostType: + Sized + + From + + Copy + + Send + + ops::Mul + + ops::Div + + ops::Add + + ops::Sub + + ops::Shr + + ops::Shl + + cmp::Ord + + fmt::Debug +{ + /// Converts this cost into `U256` + fn as_u256(&self) -> U256; + /// Tries to fit `U256` into this `Cost` type + fn from_u256(val: U256) -> Result; + /// Convert to usize (may panic) + fn as_usize(&self) -> usize; + /// Add with overflow + fn overflow_add(self, other: Self) -> (Self, bool); + /// Multiple with overflow + fn overflow_mul(self, other: Self) -> (Self, bool); + /// Single-step full multiplication and shift: `(self*other) >> shr` + /// Should not overflow on intermediate steps + fn overflow_mul_shr(self, other: Self, shr: usize) -> (Self, bool); } impl CostType for U256 { - fn as_u256(&self) -> U256 { - *self - } + fn as_u256(&self) -> U256 { + *self + } - fn from_u256(val: U256) -> Result { - Ok(val) - } + fn from_u256(val: U256) -> Result { + Ok(val) + } - fn as_usize(&self) -> usize { - self.as_u64() as usize - } + fn as_usize(&self) -> usize { + self.as_u64() as usize + } - fn overflow_add(self, other: Self) -> (Self, bool) { - self.overflowing_add(other) - } + fn overflow_add(self, other: Self) -> (Self, bool) { + self.overflowing_add(other) + } - fn overflow_mul(self, other: Self) -> (Self, bool) { - self.overflowing_mul(other) - } + fn overflow_mul(self, other: Self) -> (Self, bool) { + self.overflowing_mul(other) + } - fn overflow_mul_shr(self, other: Self, shr: usize) -> (Self, bool) { - let x = self.full_mul(other); - let U512(parts) = x; - let overflow = (parts[4] | parts[5] | parts[6] | parts[7]) > 0; - let U512(parts) = x >> shr; - ( - U256([parts[0], parts[1], parts[2], parts[3]]), - overflow - ) - } + fn overflow_mul_shr(self, other: Self, shr: usize) -> (Self, bool) { + let x = self.full_mul(other); + let U512(parts) = x; + let overflow = (parts[4] | parts[5] | parts[6] | parts[7]) > 0; + let U512(parts) = x >> shr; + (U256([parts[0], parts[1], parts[2], parts[3]]), overflow) + } } impl CostType for usize { - fn as_u256(&self) -> U256 { - U256::from(*self) - } + fn as_u256(&self) -> U256 { + U256::from(*self) + } - fn from_u256(val: U256) -> Result { - let res = val.low_u64() as usize; + fn from_u256(val: U256) -> Result { + let res = val.low_u64() as usize; - // validate if value fits into usize - if U256::from(res) != val { - return Err(Error::OutOfGas); - } + // validate if value fits into usize + if U256::from(res) != val { + return Err(Error::OutOfGas); + } - Ok(res) - } + Ok(res) + } - fn as_usize(&self) -> usize { - *self - } + fn as_usize(&self) -> usize { + *self + } - fn overflow_add(self, other: Self) -> (Self, bool) { - self.overflowing_add(other) - } + fn overflow_add(self, other: Self) -> (Self, bool) { + self.overflowing_add(other) + } - fn overflow_mul(self, other: Self) -> (Self, bool) { - self.overflowing_mul(other) - } + fn overflow_mul(self, other: Self) -> (Self, bool) { + self.overflowing_mul(other) + } - fn overflow_mul_shr(self, other: Self, shr: usize) -> (Self, bool) { - let (c, o) = U128::from(self).overflowing_mul(U128::from(other)); - let U128(parts) = c; - let overflow = o | (parts[1] > 0); - let U128(parts) = c >> shr; - let result = parts[0] as usize; - let overflow = overflow | (parts[0] > result as u64); - (result, overflow) - } + fn overflow_mul_shr(self, other: Self, shr: usize) -> (Self, bool) { + let (c, o) = U128::from(self).overflowing_mul(U128::from(other)); + let U128(parts) = c; + let overflow = o | (parts[1] > 0); + let U128(parts) = c >> shr; + let result = parts[0] as usize; + let overflow = overflow | (parts[0] > result as u64); + (result, overflow) + } } #[cfg(test)] mod tests { - use ethereum_types::U256; - use super::CostType; + use super::CostType; + use ethereum_types::U256; - #[test] - fn should_calculate_overflow_mul_shr_without_overflow() { - // given - let num = 1048576; + #[test] + fn should_calculate_overflow_mul_shr_without_overflow() { + // given + let num = 1048576; - // when - let (res1, o1) = U256::from(num).overflow_mul_shr(U256::from(num), 20); - let (res2, o2) = num.overflow_mul_shr(num, 20); + // when + let (res1, o1) = U256::from(num).overflow_mul_shr(U256::from(num), 20); + let (res2, o2) = num.overflow_mul_shr(num, 20); - // then - assert_eq!(res1, U256::from(num)); - assert!(!o1); - assert_eq!(res2, num); - assert!(!o2); - } + // then + assert_eq!(res1, U256::from(num)); + assert!(!o1); + assert_eq!(res2, num); + assert!(!o2); + } - #[test] - fn should_calculate_overflow_mul_shr_with_overflow() { - // given - let max = u64::max_value(); - let num1 = U256([max, max, max, max]); - let num2 = usize::max_value(); + #[test] + fn should_calculate_overflow_mul_shr_with_overflow() { + // given + let max = u64::max_value(); + let num1 = U256([max, max, max, max]); + let num2 = usize::max_value(); - // when - let (res1, o1) = num1.overflow_mul_shr(num1, 256); - let (res2, o2) = num2.overflow_mul_shr(num2, 64); + // when + let (res1, o1) = num1.overflow_mul_shr(num1, 256); + let (res2, o2) = num2.overflow_mul_shr(num2, 64); - // then - assert_eq!(res2, num2 - 1); - assert!(o2); + // then + assert_eq!(res2, num2 - 1); + assert!(o2); - assert_eq!(res1, !U256::zero() - U256::one()); - assert!(o1); - } + assert_eq!(res1, !U256::zero() - U256::one()); + assert!(o1); + } - #[test] - fn should_validate_u256_to_usize_conversion() { - // given - let v = U256::from(usize::max_value()) + U256::from(1); + #[test] + fn should_validate_u256_to_usize_conversion() { + // given + let v = U256::from(usize::max_value()) + U256::from(1); - // when - let res = usize::from_u256(v); + // when + let res = usize::from_u256(v); - // then - assert!(res.is_err()); - } + // then + assert!(res.is_err()); + } } diff --git a/ethcore/evm/src/factory.rs b/ethcore/evm/src/factory.rs index 5dbaf4f82..f9eae766f 100644 --- a/ethcore/evm/src/factory.rs +++ b/ethcore/evm/src/factory.rs @@ -16,67 +16,76 @@ //! Evm factory. //! +use super::{interpreter::SharedCache, vm::ActionParams, vmtype::VMType}; +use ethereum_types::U256; use std::sync::Arc; use vm::{Exec, Schedule}; -use ethereum_types::U256; -use super::vm::ActionParams; -use super::interpreter::SharedCache; -use super::vmtype::VMType; /// Evm factory. Creates appropriate Evm. #[derive(Clone)] pub struct Factory { - evm: VMType, - evm_cache: Arc, + evm: VMType, + evm_cache: Arc, } impl Factory { - /// Create fresh instance of VM - /// Might choose implementation depending on supplied gas. - pub fn create(&self, params: ActionParams, schedule: &Schedule, depth: usize) -> Box { - match self.evm { - VMType::Interpreter => if Self::can_fit_in_usize(¶ms.gas) { - Box::new(super::interpreter::Interpreter::::new(params, self.evm_cache.clone(), schedule, depth)) - } else { - Box::new(super::interpreter::Interpreter::::new(params, self.evm_cache.clone(), schedule, depth)) - } - } - } + /// Create fresh instance of VM + /// Might choose implementation depending on supplied gas. + pub fn create(&self, params: ActionParams, schedule: &Schedule, depth: usize) -> Box { + match self.evm { + VMType::Interpreter => { + if Self::can_fit_in_usize(¶ms.gas) { + Box::new(super::interpreter::Interpreter::::new( + params, + self.evm_cache.clone(), + schedule, + depth, + )) + } else { + Box::new(super::interpreter::Interpreter::::new( + params, + self.evm_cache.clone(), + schedule, + depth, + )) + } + } + } + } - /// Create new instance of specific `VMType` factory, with a size in bytes - /// for caching jump destinations. - pub fn new(evm: VMType, cache_size: usize) -> Self { - Factory { - evm, - evm_cache: Arc::new(SharedCache::new(cache_size)), - } - } + /// Create new instance of specific `VMType` factory, with a size in bytes + /// for caching jump destinations. + pub fn new(evm: VMType, cache_size: usize) -> Self { + Factory { + evm, + evm_cache: Arc::new(SharedCache::new(cache_size)), + } + } - fn can_fit_in_usize(gas: &U256) -> bool { - gas == &U256::from(gas.low_u64() as usize) - } + fn can_fit_in_usize(gas: &U256) -> bool { + gas == &U256::from(gas.low_u64() as usize) + } } impl Default for Factory { - /// Returns native rust evm factory - fn default() -> Factory { - Factory { - evm: VMType::Interpreter, - evm_cache: Arc::new(SharedCache::default()), - } - } + /// Returns native rust evm factory + fn default() -> Factory { + Factory { + evm: VMType::Interpreter, + evm_cache: Arc::new(SharedCache::default()), + } + } } #[test] fn test_create_vm() { - use vm::Ext; - use vm::tests::FakeExt; - use bytes::Bytes; + use bytes::Bytes; + use vm::{tests::FakeExt, Ext}; - let mut params = ActionParams::default(); - params.code = Some(Arc::new(Bytes::default())); - let ext = FakeExt::new(); - let _vm = Factory::default().create(params, ext.schedule(), ext.depth()); + let mut params = ActionParams::default(); + params.code = Some(Arc::new(Bytes::default())); + let ext = FakeExt::new(); + let _vm = Factory::default().create(params, ext.schedule(), ext.depth()); } /// Create tests by injecting different VM factories diff --git a/ethcore/evm/src/instructions.rs b/ethcore/evm/src/instructions.rs index b0a66c159..266bf7ded 100644 --- a/ethcore/evm/src/instructions.rs +++ b/ethcore/evm/src/instructions.rs @@ -43,560 +43,565 @@ macro_rules! enum_with_from_u8 { } enum_with_from_u8! { - #[doc = "Virtual machine bytecode instruction."] - #[repr(u8)] - #[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Copy, Debug, Hash)] - pub enum Instruction { - #[doc = "halts execution"] - STOP = 0x00, - #[doc = "addition operation"] - ADD = 0x01, - #[doc = "mulitplication operation"] - MUL = 0x02, - #[doc = "subtraction operation"] - SUB = 0x03, - #[doc = "integer division operation"] - DIV = 0x04, - #[doc = "signed integer division operation"] - SDIV = 0x05, - #[doc = "modulo remainder operation"] - MOD = 0x06, - #[doc = "signed modulo remainder operation"] - SMOD = 0x07, - #[doc = "unsigned modular addition"] - ADDMOD = 0x08, - #[doc = "unsigned modular multiplication"] - MULMOD = 0x09, - #[doc = "exponential operation"] - EXP = 0x0a, - #[doc = "extend length of signed integer"] - SIGNEXTEND = 0x0b, + #[doc = "Virtual machine bytecode instruction."] + #[repr(u8)] + #[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Copy, Debug, Hash)] + pub enum Instruction { + #[doc = "halts execution"] + STOP = 0x00, + #[doc = "addition operation"] + ADD = 0x01, + #[doc = "mulitplication operation"] + MUL = 0x02, + #[doc = "subtraction operation"] + SUB = 0x03, + #[doc = "integer division operation"] + DIV = 0x04, + #[doc = "signed integer division operation"] + SDIV = 0x05, + #[doc = "modulo remainder operation"] + MOD = 0x06, + #[doc = "signed modulo remainder operation"] + SMOD = 0x07, + #[doc = "unsigned modular addition"] + ADDMOD = 0x08, + #[doc = "unsigned modular multiplication"] + MULMOD = 0x09, + #[doc = "exponential operation"] + EXP = 0x0a, + #[doc = "extend length of signed integer"] + SIGNEXTEND = 0x0b, - #[doc = "less-than comparision"] - LT = 0x10, - #[doc = "greater-than comparision"] - GT = 0x11, - #[doc = "signed less-than comparision"] - SLT = 0x12, - #[doc = "signed greater-than comparision"] - SGT = 0x13, - #[doc = "equality comparision"] - EQ = 0x14, - #[doc = "simple not operator"] - ISZERO = 0x15, - #[doc = "bitwise AND operation"] - AND = 0x16, - #[doc = "bitwise OR operation"] - OR = 0x17, - #[doc = "bitwise XOR operation"] - XOR = 0x18, - #[doc = "bitwise NOT opertation"] - NOT = 0x19, - #[doc = "retrieve single byte from word"] - BYTE = 0x1a, - #[doc = "shift left operation"] - SHL = 0x1b, - #[doc = "logical shift right operation"] - SHR = 0x1c, - #[doc = "arithmetic shift right operation"] - SAR = 0x1d, + #[doc = "less-than comparision"] + LT = 0x10, + #[doc = "greater-than comparision"] + GT = 0x11, + #[doc = "signed less-than comparision"] + SLT = 0x12, + #[doc = "signed greater-than comparision"] + SGT = 0x13, + #[doc = "equality comparision"] + EQ = 0x14, + #[doc = "simple not operator"] + ISZERO = 0x15, + #[doc = "bitwise AND operation"] + AND = 0x16, + #[doc = "bitwise OR operation"] + OR = 0x17, + #[doc = "bitwise XOR operation"] + XOR = 0x18, + #[doc = "bitwise NOT opertation"] + NOT = 0x19, + #[doc = "retrieve single byte from word"] + BYTE = 0x1a, + #[doc = "shift left operation"] + SHL = 0x1b, + #[doc = "logical shift right operation"] + SHR = 0x1c, + #[doc = "arithmetic shift right operation"] + SAR = 0x1d, - #[doc = "compute SHA3-256 hash"] - SHA3 = 0x20, + #[doc = "compute SHA3-256 hash"] + SHA3 = 0x20, - #[doc = "get address of currently executing account"] - ADDRESS = 0x30, - #[doc = "get balance of the given account"] - BALANCE = 0x31, - #[doc = "get execution origination address"] - ORIGIN = 0x32, - #[doc = "get caller address"] - CALLER = 0x33, - #[doc = "get deposited value by the instruction/transaction responsible for this execution"] - CALLVALUE = 0x34, - #[doc = "get input data of current environment"] - CALLDATALOAD = 0x35, - #[doc = "get size of input data in current environment"] - CALLDATASIZE = 0x36, - #[doc = "copy input data in current environment to memory"] - CALLDATACOPY = 0x37, - #[doc = "get size of code running in current environment"] - CODESIZE = 0x38, - #[doc = "copy code running in current environment to memory"] - CODECOPY = 0x39, - #[doc = "get price of gas in current environment"] - GASPRICE = 0x3a, - #[doc = "get external code size (from another contract)"] - EXTCODESIZE = 0x3b, - #[doc = "copy external code (from another contract)"] - EXTCODECOPY = 0x3c, - #[doc = "get the size of the return data buffer for the last call"] - RETURNDATASIZE = 0x3d, - #[doc = "copy return data buffer to memory"] - RETURNDATACOPY = 0x3e, - #[doc = "return the keccak256 hash of contract code"] - EXTCODEHASH = 0x3f, + #[doc = "get address of currently executing account"] + ADDRESS = 0x30, + #[doc = "get balance of the given account"] + BALANCE = 0x31, + #[doc = "get execution origination address"] + ORIGIN = 0x32, + #[doc = "get caller address"] + CALLER = 0x33, + #[doc = "get deposited value by the instruction/transaction responsible for this execution"] + CALLVALUE = 0x34, + #[doc = "get input data of current environment"] + CALLDATALOAD = 0x35, + #[doc = "get size of input data in current environment"] + CALLDATASIZE = 0x36, + #[doc = "copy input data in current environment to memory"] + CALLDATACOPY = 0x37, + #[doc = "get size of code running in current environment"] + CODESIZE = 0x38, + #[doc = "copy code running in current environment to memory"] + CODECOPY = 0x39, + #[doc = "get price of gas in current environment"] + GASPRICE = 0x3a, + #[doc = "get external code size (from another contract)"] + EXTCODESIZE = 0x3b, + #[doc = "copy external code (from another contract)"] + EXTCODECOPY = 0x3c, + #[doc = "get the size of the return data buffer for the last call"] + RETURNDATASIZE = 0x3d, + #[doc = "copy return data buffer to memory"] + RETURNDATACOPY = 0x3e, + #[doc = "return the keccak256 hash of contract code"] + EXTCODEHASH = 0x3f, - #[doc = "get hash of most recent complete block"] - BLOCKHASH = 0x40, - #[doc = "get the block's coinbase address"] - COINBASE = 0x41, - #[doc = "get the block's timestamp"] - TIMESTAMP = 0x42, - #[doc = "get the block's number"] - NUMBER = 0x43, - #[doc = "get the block's difficulty"] - DIFFICULTY = 0x44, - #[doc = "get the block's gas limit"] - GASLIMIT = 0x45, - #[doc = "get chain ID"] - CHAINID = 0x46, - #[doc = "get balance of own account"] - SELFBALANCE = 0x47, + #[doc = "get hash of most recent complete block"] + BLOCKHASH = 0x40, + #[doc = "get the block's coinbase address"] + COINBASE = 0x41, + #[doc = "get the block's timestamp"] + TIMESTAMP = 0x42, + #[doc = "get the block's number"] + NUMBER = 0x43, + #[doc = "get the block's difficulty"] + DIFFICULTY = 0x44, + #[doc = "get the block's gas limit"] + GASLIMIT = 0x45, + #[doc = "get chain ID"] + CHAINID = 0x46, + #[doc = "get balance of own account"] + SELFBALANCE = 0x47, - #[doc = "remove item from stack"] - POP = 0x50, - #[doc = "load word from memory"] - MLOAD = 0x51, - #[doc = "save word to memory"] - MSTORE = 0x52, - #[doc = "save byte to memory"] - MSTORE8 = 0x53, - #[doc = "load word from storage"] - SLOAD = 0x54, - #[doc = "save word to storage"] - SSTORE = 0x55, - #[doc = "alter the program counter"] - JUMP = 0x56, - #[doc = "conditionally alter the program counter"] - JUMPI = 0x57, - #[doc = "get the program counter"] - PC = 0x58, - #[doc = "get the size of active memory"] - MSIZE = 0x59, - #[doc = "get the amount of available gas"] - GAS = 0x5a, - #[doc = "set a potential jump destination"] - JUMPDEST = 0x5b, + #[doc = "remove item from stack"] + POP = 0x50, + #[doc = "load word from memory"] + MLOAD = 0x51, + #[doc = "save word to memory"] + MSTORE = 0x52, + #[doc = "save byte to memory"] + MSTORE8 = 0x53, + #[doc = "load word from storage"] + SLOAD = 0x54, + #[doc = "save word to storage"] + SSTORE = 0x55, + #[doc = "alter the program counter"] + JUMP = 0x56, + #[doc = "conditionally alter the program counter"] + JUMPI = 0x57, + #[doc = "get the program counter"] + PC = 0x58, + #[doc = "get the size of active memory"] + MSIZE = 0x59, + #[doc = "get the amount of available gas"] + GAS = 0x5a, + #[doc = "set a potential jump destination"] + JUMPDEST = 0x5b, - #[doc = "place 1 byte item on stack"] - PUSH1 = 0x60, - #[doc = "place 2 byte item on stack"] - PUSH2 = 0x61, - #[doc = "place 3 byte item on stack"] - PUSH3 = 0x62, - #[doc = "place 4 byte item on stack"] - PUSH4 = 0x63, - #[doc = "place 5 byte item on stack"] - PUSH5 = 0x64, - #[doc = "place 6 byte item on stack"] - PUSH6 = 0x65, - #[doc = "place 7 byte item on stack"] - PUSH7 = 0x66, - #[doc = "place 8 byte item on stack"] - PUSH8 = 0x67, - #[doc = "place 9 byte item on stack"] - PUSH9 = 0x68, - #[doc = "place 10 byte item on stack"] - PUSH10 = 0x69, - #[doc = "place 11 byte item on stack"] - PUSH11 = 0x6a, - #[doc = "place 12 byte item on stack"] - PUSH12 = 0x6b, - #[doc = "place 13 byte item on stack"] - PUSH13 = 0x6c, - #[doc = "place 14 byte item on stack"] - PUSH14 = 0x6d, - #[doc = "place 15 byte item on stack"] - PUSH15 = 0x6e, - #[doc = "place 16 byte item on stack"] - PUSH16 = 0x6f, - #[doc = "place 17 byte item on stack"] - PUSH17 = 0x70, - #[doc = "place 18 byte item on stack"] - PUSH18 = 0x71, - #[doc = "place 19 byte item on stack"] - PUSH19 = 0x72, - #[doc = "place 20 byte item on stack"] - PUSH20 = 0x73, - #[doc = "place 21 byte item on stack"] - PUSH21 = 0x74, - #[doc = "place 22 byte item on stack"] - PUSH22 = 0x75, - #[doc = "place 23 byte item on stack"] - PUSH23 = 0x76, - #[doc = "place 24 byte item on stack"] - PUSH24 = 0x77, - #[doc = "place 25 byte item on stack"] - PUSH25 = 0x78, - #[doc = "place 26 byte item on stack"] - PUSH26 = 0x79, - #[doc = "place 27 byte item on stack"] - PUSH27 = 0x7a, - #[doc = "place 28 byte item on stack"] - PUSH28 = 0x7b, - #[doc = "place 29 byte item on stack"] - PUSH29 = 0x7c, - #[doc = "place 30 byte item on stack"] - PUSH30 = 0x7d, - #[doc = "place 31 byte item on stack"] - PUSH31 = 0x7e, - #[doc = "place 32 byte item on stack"] - PUSH32 = 0x7f, + #[doc = "place 1 byte item on stack"] + PUSH1 = 0x60, + #[doc = "place 2 byte item on stack"] + PUSH2 = 0x61, + #[doc = "place 3 byte item on stack"] + PUSH3 = 0x62, + #[doc = "place 4 byte item on stack"] + PUSH4 = 0x63, + #[doc = "place 5 byte item on stack"] + PUSH5 = 0x64, + #[doc = "place 6 byte item on stack"] + PUSH6 = 0x65, + #[doc = "place 7 byte item on stack"] + PUSH7 = 0x66, + #[doc = "place 8 byte item on stack"] + PUSH8 = 0x67, + #[doc = "place 9 byte item on stack"] + PUSH9 = 0x68, + #[doc = "place 10 byte item on stack"] + PUSH10 = 0x69, + #[doc = "place 11 byte item on stack"] + PUSH11 = 0x6a, + #[doc = "place 12 byte item on stack"] + PUSH12 = 0x6b, + #[doc = "place 13 byte item on stack"] + PUSH13 = 0x6c, + #[doc = "place 14 byte item on stack"] + PUSH14 = 0x6d, + #[doc = "place 15 byte item on stack"] + PUSH15 = 0x6e, + #[doc = "place 16 byte item on stack"] + PUSH16 = 0x6f, + #[doc = "place 17 byte item on stack"] + PUSH17 = 0x70, + #[doc = "place 18 byte item on stack"] + PUSH18 = 0x71, + #[doc = "place 19 byte item on stack"] + PUSH19 = 0x72, + #[doc = "place 20 byte item on stack"] + PUSH20 = 0x73, + #[doc = "place 21 byte item on stack"] + PUSH21 = 0x74, + #[doc = "place 22 byte item on stack"] + PUSH22 = 0x75, + #[doc = "place 23 byte item on stack"] + PUSH23 = 0x76, + #[doc = "place 24 byte item on stack"] + PUSH24 = 0x77, + #[doc = "place 25 byte item on stack"] + PUSH25 = 0x78, + #[doc = "place 26 byte item on stack"] + PUSH26 = 0x79, + #[doc = "place 27 byte item on stack"] + PUSH27 = 0x7a, + #[doc = "place 28 byte item on stack"] + PUSH28 = 0x7b, + #[doc = "place 29 byte item on stack"] + PUSH29 = 0x7c, + #[doc = "place 30 byte item on stack"] + PUSH30 = 0x7d, + #[doc = "place 31 byte item on stack"] + PUSH31 = 0x7e, + #[doc = "place 32 byte item on stack"] + PUSH32 = 0x7f, - #[doc = "copies the highest item in the stack to the top of the stack"] - DUP1 = 0x80, - #[doc = "copies the second highest item in the stack to the top of the stack"] - DUP2 = 0x81, - #[doc = "copies the third highest item in the stack to the top of the stack"] - DUP3 = 0x82, - #[doc = "copies the 4th highest item in the stack to the top of the stack"] - DUP4 = 0x83, - #[doc = "copies the 5th highest item in the stack to the top of the stack"] - DUP5 = 0x84, - #[doc = "copies the 6th highest item in the stack to the top of the stack"] - DUP6 = 0x85, - #[doc = "copies the 7th highest item in the stack to the top of the stack"] - DUP7 = 0x86, - #[doc = "copies the 8th highest item in the stack to the top of the stack"] - DUP8 = 0x87, - #[doc = "copies the 9th highest item in the stack to the top of the stack"] - DUP9 = 0x88, - #[doc = "copies the 10th highest item in the stack to the top of the stack"] - DUP10 = 0x89, - #[doc = "copies the 11th highest item in the stack to the top of the stack"] - DUP11 = 0x8a, - #[doc = "copies the 12th highest item in the stack to the top of the stack"] - DUP12 = 0x8b, - #[doc = "copies the 13th highest item in the stack to the top of the stack"] - DUP13 = 0x8c, - #[doc = "copies the 14th highest item in the stack to the top of the stack"] - DUP14 = 0x8d, - #[doc = "copies the 15th highest item in the stack to the top of the stack"] - DUP15 = 0x8e, - #[doc = "copies the 16th highest item in the stack to the top of the stack"] - DUP16 = 0x8f, + #[doc = "copies the highest item in the stack to the top of the stack"] + DUP1 = 0x80, + #[doc = "copies the second highest item in the stack to the top of the stack"] + DUP2 = 0x81, + #[doc = "copies the third highest item in the stack to the top of the stack"] + DUP3 = 0x82, + #[doc = "copies the 4th highest item in the stack to the top of the stack"] + DUP4 = 0x83, + #[doc = "copies the 5th highest item in the stack to the top of the stack"] + DUP5 = 0x84, + #[doc = "copies the 6th highest item in the stack to the top of the stack"] + DUP6 = 0x85, + #[doc = "copies the 7th highest item in the stack to the top of the stack"] + DUP7 = 0x86, + #[doc = "copies the 8th highest item in the stack to the top of the stack"] + DUP8 = 0x87, + #[doc = "copies the 9th highest item in the stack to the top of the stack"] + DUP9 = 0x88, + #[doc = "copies the 10th highest item in the stack to the top of the stack"] + DUP10 = 0x89, + #[doc = "copies the 11th highest item in the stack to the top of the stack"] + DUP11 = 0x8a, + #[doc = "copies the 12th highest item in the stack to the top of the stack"] + DUP12 = 0x8b, + #[doc = "copies the 13th highest item in the stack to the top of the stack"] + DUP13 = 0x8c, + #[doc = "copies the 14th highest item in the stack to the top of the stack"] + DUP14 = 0x8d, + #[doc = "copies the 15th highest item in the stack to the top of the stack"] + DUP15 = 0x8e, + #[doc = "copies the 16th highest item in the stack to the top of the stack"] + DUP16 = 0x8f, - #[doc = "swaps the highest and second highest value on the stack"] - SWAP1 = 0x90, - #[doc = "swaps the highest and third highest value on the stack"] - SWAP2 = 0x91, - #[doc = "swaps the highest and 4th highest value on the stack"] - SWAP3 = 0x92, - #[doc = "swaps the highest and 5th highest value on the stack"] - SWAP4 = 0x93, - #[doc = "swaps the highest and 6th highest value on the stack"] - SWAP5 = 0x94, - #[doc = "swaps the highest and 7th highest value on the stack"] - SWAP6 = 0x95, - #[doc = "swaps the highest and 8th highest value on the stack"] - SWAP7 = 0x96, - #[doc = "swaps the highest and 9th highest value on the stack"] - SWAP8 = 0x97, - #[doc = "swaps the highest and 10th highest value on the stack"] - SWAP9 = 0x98, - #[doc = "swaps the highest and 11th highest value on the stack"] - SWAP10 = 0x99, - #[doc = "swaps the highest and 12th highest value on the stack"] - SWAP11 = 0x9a, - #[doc = "swaps the highest and 13th highest value on the stack"] - SWAP12 = 0x9b, - #[doc = "swaps the highest and 14th highest value on the stack"] - SWAP13 = 0x9c, - #[doc = "swaps the highest and 15th highest value on the stack"] - SWAP14 = 0x9d, - #[doc = "swaps the highest and 16th highest value on the stack"] - SWAP15 = 0x9e, - #[doc = "swaps the highest and 17th highest value on the stack"] - SWAP16 = 0x9f, + #[doc = "swaps the highest and second highest value on the stack"] + SWAP1 = 0x90, + #[doc = "swaps the highest and third highest value on the stack"] + SWAP2 = 0x91, + #[doc = "swaps the highest and 4th highest value on the stack"] + SWAP3 = 0x92, + #[doc = "swaps the highest and 5th highest value on the stack"] + SWAP4 = 0x93, + #[doc = "swaps the highest and 6th highest value on the stack"] + SWAP5 = 0x94, + #[doc = "swaps the highest and 7th highest value on the stack"] + SWAP6 = 0x95, + #[doc = "swaps the highest and 8th highest value on the stack"] + SWAP7 = 0x96, + #[doc = "swaps the highest and 9th highest value on the stack"] + SWAP8 = 0x97, + #[doc = "swaps the highest and 10th highest value on the stack"] + SWAP9 = 0x98, + #[doc = "swaps the highest and 11th highest value on the stack"] + SWAP10 = 0x99, + #[doc = "swaps the highest and 12th highest value on the stack"] + SWAP11 = 0x9a, + #[doc = "swaps the highest and 13th highest value on the stack"] + SWAP12 = 0x9b, + #[doc = "swaps the highest and 14th highest value on the stack"] + SWAP13 = 0x9c, + #[doc = "swaps the highest and 15th highest value on the stack"] + SWAP14 = 0x9d, + #[doc = "swaps the highest and 16th highest value on the stack"] + SWAP15 = 0x9e, + #[doc = "swaps the highest and 17th highest value on the stack"] + SWAP16 = 0x9f, - #[doc = "Makes a log entry, no topics."] - LOG0 = 0xa0, - #[doc = "Makes a log entry, 1 topic."] - LOG1 = 0xa1, - #[doc = "Makes a log entry, 2 topics."] - LOG2 = 0xa2, - #[doc = "Makes a log entry, 3 topics."] - LOG3 = 0xa3, - #[doc = "Makes a log entry, 4 topics."] - LOG4 = 0xa4, + #[doc = "Makes a log entry, no topics."] + LOG0 = 0xa0, + #[doc = "Makes a log entry, 1 topic."] + LOG1 = 0xa1, + #[doc = "Makes a log entry, 2 topics."] + LOG2 = 0xa2, + #[doc = "Makes a log entry, 3 topics."] + LOG3 = 0xa3, + #[doc = "Makes a log entry, 4 topics."] + LOG4 = 0xa4, - #[doc = "create a new account with associated code"] - CREATE = 0xf0, - #[doc = "message-call into an account"] - CALL = 0xf1, - #[doc = "message-call with another account's code only"] - CALLCODE = 0xf2, - #[doc = "halt execution returning output data"] - RETURN = 0xf3, - #[doc = "like CALLCODE but keeps caller's value and sender"] - DELEGATECALL = 0xf4, - #[doc = "create a new account and set creation address to sha3(sender + sha3(init code)) % 2**160"] - CREATE2 = 0xf5, - #[doc = "stop execution and revert state changes. Return output data."] - REVERT = 0xfd, - #[doc = "like CALL but it does not take value, nor modify the state"] - STATICCALL = 0xfa, - #[doc = "halt execution and register account for later deletion"] - SUICIDE = 0xff, - } + #[doc = "create a new account with associated code"] + CREATE = 0xf0, + #[doc = "message-call into an account"] + CALL = 0xf1, + #[doc = "message-call with another account's code only"] + CALLCODE = 0xf2, + #[doc = "halt execution returning output data"] + RETURN = 0xf3, + #[doc = "like CALLCODE but keeps caller's value and sender"] + DELEGATECALL = 0xf4, + #[doc = "create a new account and set creation address to sha3(sender + sha3(init code)) % 2**160"] + CREATE2 = 0xf5, + #[doc = "stop execution and revert state changes. Return output data."] + REVERT = 0xfd, + #[doc = "like CALL but it does not take value, nor modify the state"] + STATICCALL = 0xfa, + #[doc = "halt execution and register account for later deletion"] + SUICIDE = 0xff, + } } impl Instruction { - /// Returns true if given instruction is `PUSHN` instruction. - pub fn is_push(&self) -> bool { - *self >= PUSH1 && *self <= PUSH32 - } + /// Returns true if given instruction is `PUSHN` instruction. + pub fn is_push(&self) -> bool { + *self >= PUSH1 && *self <= PUSH32 + } - /// Returns number of bytes to read for `PUSHN` instruction - /// PUSH1 -> 1 - pub fn push_bytes(&self) -> Option { - if self.is_push() { - Some(((*self as u8) - (PUSH1 as u8) + 1) as usize) - } else { - None - } - } + /// Returns number of bytes to read for `PUSHN` instruction + /// PUSH1 -> 1 + pub fn push_bytes(&self) -> Option { + if self.is_push() { + Some(((*self as u8) - (PUSH1 as u8) + 1) as usize) + } else { + None + } + } - /// Returns stack position of item to duplicate - /// DUP1 -> 0 - pub fn dup_position(&self) -> Option { - if *self >= DUP1 && *self <= DUP16 { - Some(((*self as u8) - (DUP1 as u8)) as usize) - } else { - None - } - } + /// Returns stack position of item to duplicate + /// DUP1 -> 0 + pub fn dup_position(&self) -> Option { + if *self >= DUP1 && *self <= DUP16 { + Some(((*self as u8) - (DUP1 as u8)) as usize) + } else { + None + } + } - /// Returns stack position of item to SWAP top with - /// SWAP1 -> 1 - pub fn swap_position(&self) -> Option { - if *self >= SWAP1 && *self <= SWAP16 { - Some(((*self as u8) - (SWAP1 as u8) + 1) as usize) - } else { - None - } - } + /// Returns stack position of item to SWAP top with + /// SWAP1 -> 1 + pub fn swap_position(&self) -> Option { + if *self >= SWAP1 && *self <= SWAP16 { + Some(((*self as u8) - (SWAP1 as u8) + 1) as usize) + } else { + None + } + } - /// Returns number of topics to take from stack - /// LOG0 -> 0 - pub fn log_topics(&self) -> Option { - if *self >= LOG0 && *self <= LOG4 { - Some(((*self as u8) - (LOG0 as u8)) as usize) - } else { - None - } - } + /// Returns number of topics to take from stack + /// LOG0 -> 0 + pub fn log_topics(&self) -> Option { + if *self >= LOG0 && *self <= LOG4 { + Some(((*self as u8) - (LOG0 as u8)) as usize) + } else { + None + } + } - /// Returns the instruction info. - pub fn info(&self) -> &'static InstructionInfo { - INSTRUCTIONS[*self as usize].as_ref().expect("A instruction is defined in Instruction enum, but it is not found in InstructionInfo struct; this indicates a logic failure in the code.") - } + /// Returns the instruction info. + pub fn info(&self) -> &'static InstructionInfo { + INSTRUCTIONS[*self as usize].as_ref().expect("A instruction is defined in Instruction enum, but it is not found in InstructionInfo struct; this indicates a logic failure in the code.") + } } #[derive(PartialEq, Clone, Copy)] pub enum GasPriceTier { - /// 0 Zero - Zero, - /// 2 Quick - Base, - /// 3 Fastest - VeryLow, - /// 5 Fast - Low, - /// 8 Mid - Mid, - /// 10 Slow - High, - /// 20 Ext - Ext, - /// Multiparam or otherwise special - Special, + /// 0 Zero + Zero, + /// 2 Quick + Base, + /// 3 Fastest + VeryLow, + /// 5 Fast + Low, + /// 8 Mid + Mid, + /// 10 Slow + High, + /// 20 Ext + Ext, + /// Multiparam or otherwise special + Special, } impl GasPriceTier { - /// Returns the index in schedule for specific `GasPriceTier` - pub fn idx(&self) -> usize { - match self { - &GasPriceTier::Zero => 0, - &GasPriceTier::Base => 1, - &GasPriceTier::VeryLow => 2, - &GasPriceTier::Low => 3, - &GasPriceTier::Mid => 4, - &GasPriceTier::High => 5, - &GasPriceTier::Ext => 6, - &GasPriceTier::Special => 7, - } - } + /// Returns the index in schedule for specific `GasPriceTier` + pub fn idx(&self) -> usize { + match self { + &GasPriceTier::Zero => 0, + &GasPriceTier::Base => 1, + &GasPriceTier::VeryLow => 2, + &GasPriceTier::Low => 3, + &GasPriceTier::Mid => 4, + &GasPriceTier::High => 5, + &GasPriceTier::Ext => 6, + &GasPriceTier::Special => 7, + } + } } /// EVM instruction information. #[derive(Copy, Clone)] pub struct InstructionInfo { - /// Mnemonic name. - pub name: &'static str, - /// Number of stack arguments. - pub args: usize, - /// Number of returned stack items. - pub ret: usize, - /// Gas price tier. - pub tier: GasPriceTier + /// Mnemonic name. + pub name: &'static str, + /// Number of stack arguments. + pub args: usize, + /// Number of returned stack items. + pub ret: usize, + /// Gas price tier. + pub tier: GasPriceTier, } impl InstructionInfo { - /// Create new instruction info. - pub fn new(name: &'static str, args: usize, ret: usize, tier: GasPriceTier) -> Self { - InstructionInfo { name, args, ret, tier } - } + /// Create new instruction info. + pub fn new(name: &'static str, args: usize, ret: usize, tier: GasPriceTier) -> Self { + InstructionInfo { + name, + args, + ret, + tier, + } + } } lazy_static! { - /// Static instruction table. - static ref INSTRUCTIONS: [Option; 0x100] = { - let mut arr = [None; 0x100]; - arr[STOP as usize] = Some(InstructionInfo::new("STOP", 0, 0, GasPriceTier::Zero)); - arr[ADD as usize] = Some(InstructionInfo::new("ADD", 2, 1, GasPriceTier::VeryLow)); - arr[SUB as usize] = Some(InstructionInfo::new("SUB", 2, 1, GasPriceTier::VeryLow)); - arr[MUL as usize] = Some(InstructionInfo::new("MUL", 2, 1, GasPriceTier::Low)); - arr[DIV as usize] = Some(InstructionInfo::new("DIV", 2, 1, GasPriceTier::Low)); - arr[SDIV as usize] = Some(InstructionInfo::new("SDIV", 2, 1, GasPriceTier::Low)); - arr[MOD as usize] = Some(InstructionInfo::new("MOD", 2, 1, GasPriceTier::Low)); - arr[SMOD as usize] = Some(InstructionInfo::new("SMOD", 2, 1, GasPriceTier::Low)); - arr[EXP as usize] = Some(InstructionInfo::new("EXP", 2, 1, GasPriceTier::Special)); - arr[NOT as usize] = Some(InstructionInfo::new("NOT", 1, 1, GasPriceTier::VeryLow)); - arr[LT as usize] = Some(InstructionInfo::new("LT", 2, 1, GasPriceTier::VeryLow)); - arr[GT as usize] = Some(InstructionInfo::new("GT", 2, 1, GasPriceTier::VeryLow)); - arr[SLT as usize] = Some(InstructionInfo::new("SLT", 2, 1, GasPriceTier::VeryLow)); - arr[SGT as usize] = Some(InstructionInfo::new("SGT", 2, 1, GasPriceTier::VeryLow)); - arr[EQ as usize] = Some(InstructionInfo::new("EQ", 2, 1, GasPriceTier::VeryLow)); - arr[ISZERO as usize] = Some(InstructionInfo::new("ISZERO", 1, 1, GasPriceTier::VeryLow)); - arr[AND as usize] = Some(InstructionInfo::new("AND", 2, 1, GasPriceTier::VeryLow)); - arr[OR as usize] = Some(InstructionInfo::new("OR", 2, 1, GasPriceTier::VeryLow)); - arr[XOR as usize] = Some(InstructionInfo::new("XOR", 2, 1, GasPriceTier::VeryLow)); - arr[BYTE as usize] = Some(InstructionInfo::new("BYTE", 2, 1, GasPriceTier::VeryLow)); - arr[SHL as usize] = Some(InstructionInfo::new("SHL", 2, 1, GasPriceTier::VeryLow)); - arr[SHR as usize] = Some(InstructionInfo::new("SHR", 2, 1, GasPriceTier::VeryLow)); - arr[SAR as usize] = Some(InstructionInfo::new("SAR", 2, 1, GasPriceTier::VeryLow)); - arr[ADDMOD as usize] = Some(InstructionInfo::new("ADDMOD", 3, 1, GasPriceTier::Mid)); - arr[MULMOD as usize] = Some(InstructionInfo::new("MULMOD", 3, 1, GasPriceTier::Mid)); - arr[SIGNEXTEND as usize] = Some(InstructionInfo::new("SIGNEXTEND", 2, 1, GasPriceTier::Low)); - arr[RETURNDATASIZE as usize] = Some(InstructionInfo::new("RETURNDATASIZE", 0, 1, GasPriceTier::Base)); - arr[RETURNDATACOPY as usize] = Some(InstructionInfo::new("RETURNDATACOPY", 3, 0, GasPriceTier::VeryLow)); - arr[SHA3 as usize] = Some(InstructionInfo::new("SHA3", 2, 1, GasPriceTier::Special)); - arr[ADDRESS as usize] = Some(InstructionInfo::new("ADDRESS", 0, 1, GasPriceTier::Base)); - arr[BALANCE as usize] = Some(InstructionInfo::new("BALANCE", 1, 1, GasPriceTier::Special)); - arr[ORIGIN as usize] = Some(InstructionInfo::new("ORIGIN", 0, 1, GasPriceTier::Base)); - arr[CALLER as usize] = Some(InstructionInfo::new("CALLER", 0, 1, GasPriceTier::Base)); - arr[CALLVALUE as usize] = Some(InstructionInfo::new("CALLVALUE", 0, 1, GasPriceTier::Base)); - arr[CALLDATALOAD as usize] = Some(InstructionInfo::new("CALLDATALOAD", 1, 1, GasPriceTier::VeryLow)); - arr[CALLDATASIZE as usize] = Some(InstructionInfo::new("CALLDATASIZE", 0, 1, GasPriceTier::Base)); - arr[CALLDATACOPY as usize] = Some(InstructionInfo::new("CALLDATACOPY", 3, 0, GasPriceTier::VeryLow)); - arr[EXTCODEHASH as usize] = Some(InstructionInfo::new("EXTCODEHASH", 1, 1, GasPriceTier::Special)); - arr[CODESIZE as usize] = Some(InstructionInfo::new("CODESIZE", 0, 1, GasPriceTier::Base)); - arr[CODECOPY as usize] = Some(InstructionInfo::new("CODECOPY", 3, 0, GasPriceTier::VeryLow)); - arr[GASPRICE as usize] = Some(InstructionInfo::new("GASPRICE", 0, 1, GasPriceTier::Base)); - arr[EXTCODESIZE as usize] = Some(InstructionInfo::new("EXTCODESIZE", 1, 1, GasPriceTier::Special)); - arr[EXTCODECOPY as usize] = Some(InstructionInfo::new("EXTCODECOPY", 4, 0, GasPriceTier::Special)); - arr[BLOCKHASH as usize] = Some(InstructionInfo::new("BLOCKHASH", 1, 1, GasPriceTier::Ext)); - arr[COINBASE as usize] = Some(InstructionInfo::new("COINBASE", 0, 1, GasPriceTier::Base)); - arr[TIMESTAMP as usize] = Some(InstructionInfo::new("TIMESTAMP", 0, 1, GasPriceTier::Base)); - arr[NUMBER as usize] = Some(InstructionInfo::new("NUMBER", 0, 1, GasPriceTier::Base)); - arr[DIFFICULTY as usize] = Some(InstructionInfo::new("DIFFICULTY", 0, 1, GasPriceTier::Base)); - arr[GASLIMIT as usize] = Some(InstructionInfo::new("GASLIMIT", 0, 1, GasPriceTier::Base)); - arr[CHAINID as usize] = Some(InstructionInfo::new("CHAINID", 0, 1, GasPriceTier::Base)); - arr[SELFBALANCE as usize] = Some(InstructionInfo::new("SELFBALANCE", 0, 1, GasPriceTier::Low)); - arr[POP as usize] = Some(InstructionInfo::new("POP", 1, 0, GasPriceTier::Base)); - arr[MLOAD as usize] = Some(InstructionInfo::new("MLOAD", 1, 1, GasPriceTier::VeryLow)); - arr[MSTORE as usize] = Some(InstructionInfo::new("MSTORE", 2, 0, GasPriceTier::VeryLow)); - arr[MSTORE8 as usize] = Some(InstructionInfo::new("MSTORE8", 2, 0, GasPriceTier::VeryLow)); - arr[SLOAD as usize] = Some(InstructionInfo::new("SLOAD", 1, 1, GasPriceTier::Special)); - arr[SSTORE as usize] = Some(InstructionInfo::new("SSTORE", 2, 0, GasPriceTier::Special)); - arr[JUMP as usize] = Some(InstructionInfo::new("JUMP", 1, 0, GasPriceTier::Mid)); - arr[JUMPI as usize] = Some(InstructionInfo::new("JUMPI", 2, 0, GasPriceTier::High)); - arr[PC as usize] = Some(InstructionInfo::new("PC", 0, 1, GasPriceTier::Base)); - arr[MSIZE as usize] = Some(InstructionInfo::new("MSIZE", 0, 1, GasPriceTier::Base)); - arr[GAS as usize] = Some(InstructionInfo::new("GAS", 0, 1, GasPriceTier::Base)); - arr[JUMPDEST as usize] = Some(InstructionInfo::new("JUMPDEST", 0, 0, GasPriceTier::Special)); - arr[PUSH1 as usize] = Some(InstructionInfo::new("PUSH1", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH2 as usize] = Some(InstructionInfo::new("PUSH2", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH3 as usize] = Some(InstructionInfo::new("PUSH3", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH4 as usize] = Some(InstructionInfo::new("PUSH4", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH5 as usize] = Some(InstructionInfo::new("PUSH5", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH6 as usize] = Some(InstructionInfo::new("PUSH6", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH7 as usize] = Some(InstructionInfo::new("PUSH7", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH8 as usize] = Some(InstructionInfo::new("PUSH8", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH9 as usize] = Some(InstructionInfo::new("PUSH9", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH10 as usize] = Some(InstructionInfo::new("PUSH10", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH11 as usize] = Some(InstructionInfo::new("PUSH11", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH12 as usize] = Some(InstructionInfo::new("PUSH12", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH13 as usize] = Some(InstructionInfo::new("PUSH13", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH14 as usize] = Some(InstructionInfo::new("PUSH14", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH15 as usize] = Some(InstructionInfo::new("PUSH15", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH16 as usize] = Some(InstructionInfo::new("PUSH16", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH17 as usize] = Some(InstructionInfo::new("PUSH17", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH18 as usize] = Some(InstructionInfo::new("PUSH18", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH19 as usize] = Some(InstructionInfo::new("PUSH19", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH20 as usize] = Some(InstructionInfo::new("PUSH20", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH21 as usize] = Some(InstructionInfo::new("PUSH21", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH22 as usize] = Some(InstructionInfo::new("PUSH22", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH23 as usize] = Some(InstructionInfo::new("PUSH23", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH24 as usize] = Some(InstructionInfo::new("PUSH24", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH25 as usize] = Some(InstructionInfo::new("PUSH25", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH26 as usize] = Some(InstructionInfo::new("PUSH26", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH27 as usize] = Some(InstructionInfo::new("PUSH27", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH28 as usize] = Some(InstructionInfo::new("PUSH28", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH29 as usize] = Some(InstructionInfo::new("PUSH29", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH30 as usize] = Some(InstructionInfo::new("PUSH30", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH31 as usize] = Some(InstructionInfo::new("PUSH31", 0, 1, GasPriceTier::VeryLow)); - arr[PUSH32 as usize] = Some(InstructionInfo::new("PUSH32", 0, 1, GasPriceTier::VeryLow)); - arr[DUP1 as usize] = Some(InstructionInfo::new("DUP1", 1, 2, GasPriceTier::VeryLow)); - arr[DUP2 as usize] = Some(InstructionInfo::new("DUP2", 2, 3, GasPriceTier::VeryLow)); - arr[DUP3 as usize] = Some(InstructionInfo::new("DUP3", 3, 4, GasPriceTier::VeryLow)); - arr[DUP4 as usize] = Some(InstructionInfo::new("DUP4", 4, 5, GasPriceTier::VeryLow)); - arr[DUP5 as usize] = Some(InstructionInfo::new("DUP5", 5, 6, GasPriceTier::VeryLow)); - arr[DUP6 as usize] = Some(InstructionInfo::new("DUP6", 6, 7, GasPriceTier::VeryLow)); - arr[DUP7 as usize] = Some(InstructionInfo::new("DUP7", 7, 8, GasPriceTier::VeryLow)); - arr[DUP8 as usize] = Some(InstructionInfo::new("DUP8", 8, 9, GasPriceTier::VeryLow)); - arr[DUP9 as usize] = Some(InstructionInfo::new("DUP9", 9, 10, GasPriceTier::VeryLow)); - arr[DUP10 as usize] = Some(InstructionInfo::new("DUP10", 10, 11, GasPriceTier::VeryLow)); - arr[DUP11 as usize] = Some(InstructionInfo::new("DUP11", 11, 12, GasPriceTier::VeryLow)); - arr[DUP12 as usize] = Some(InstructionInfo::new("DUP12", 12, 13, GasPriceTier::VeryLow)); - arr[DUP13 as usize] = Some(InstructionInfo::new("DUP13", 13, 14, GasPriceTier::VeryLow)); - arr[DUP14 as usize] = Some(InstructionInfo::new("DUP14", 14, 15, GasPriceTier::VeryLow)); - arr[DUP15 as usize] = Some(InstructionInfo::new("DUP15", 15, 16, GasPriceTier::VeryLow)); - arr[DUP16 as usize] = Some(InstructionInfo::new("DUP16", 16, 17, GasPriceTier::VeryLow)); - arr[SWAP1 as usize] = Some(InstructionInfo::new("SWAP1", 2, 2, GasPriceTier::VeryLow)); - arr[SWAP2 as usize] = Some(InstructionInfo::new("SWAP2", 3, 3, GasPriceTier::VeryLow)); - arr[SWAP3 as usize] = Some(InstructionInfo::new("SWAP3", 4, 4, GasPriceTier::VeryLow)); - arr[SWAP4 as usize] = Some(InstructionInfo::new("SWAP4", 5, 5, GasPriceTier::VeryLow)); - arr[SWAP5 as usize] = Some(InstructionInfo::new("SWAP5", 6, 6, GasPriceTier::VeryLow)); - arr[SWAP6 as usize] = Some(InstructionInfo::new("SWAP6", 7, 7, GasPriceTier::VeryLow)); - arr[SWAP7 as usize] = Some(InstructionInfo::new("SWAP7", 8, 8, GasPriceTier::VeryLow)); - arr[SWAP8 as usize] = Some(InstructionInfo::new("SWAP8", 9, 9, GasPriceTier::VeryLow)); - arr[SWAP9 as usize] = Some(InstructionInfo::new("SWAP9", 10, 10, GasPriceTier::VeryLow)); - arr[SWAP10 as usize] = Some(InstructionInfo::new("SWAP10", 11, 11, GasPriceTier::VeryLow)); - arr[SWAP11 as usize] = Some(InstructionInfo::new("SWAP11", 12, 12, GasPriceTier::VeryLow)); - arr[SWAP12 as usize] = Some(InstructionInfo::new("SWAP12", 13, 13, GasPriceTier::VeryLow)); - arr[SWAP13 as usize] = Some(InstructionInfo::new("SWAP13", 14, 14, GasPriceTier::VeryLow)); - arr[SWAP14 as usize] = Some(InstructionInfo::new("SWAP14", 15, 15, GasPriceTier::VeryLow)); - arr[SWAP15 as usize] = Some(InstructionInfo::new("SWAP15", 16, 16, GasPriceTier::VeryLow)); - arr[SWAP16 as usize] = Some(InstructionInfo::new("SWAP16", 17, 17, GasPriceTier::VeryLow)); - arr[LOG0 as usize] = Some(InstructionInfo::new("LOG0", 2, 0, GasPriceTier::Special)); - arr[LOG1 as usize] = Some(InstructionInfo::new("LOG1", 3, 0, GasPriceTier::Special)); - arr[LOG2 as usize] = Some(InstructionInfo::new("LOG2", 4, 0, GasPriceTier::Special)); - arr[LOG3 as usize] = Some(InstructionInfo::new("LOG3", 5, 0, GasPriceTier::Special)); - arr[LOG4 as usize] = Some(InstructionInfo::new("LOG4", 6, 0, GasPriceTier::Special)); - arr[CREATE as usize] = Some(InstructionInfo::new("CREATE", 3, 1, GasPriceTier::Special)); - arr[CALL as usize] = Some(InstructionInfo::new("CALL", 7, 1, GasPriceTier::Special)); - arr[CALLCODE as usize] = Some(InstructionInfo::new("CALLCODE", 7, 1, GasPriceTier::Special)); - arr[RETURN as usize] = Some(InstructionInfo::new("RETURN", 2, 0, GasPriceTier::Zero)); - arr[DELEGATECALL as usize] = Some(InstructionInfo::new("DELEGATECALL", 6, 1, GasPriceTier::Special)); - arr[STATICCALL as usize] = Some(InstructionInfo::new("STATICCALL", 6, 1, GasPriceTier::Special)); - arr[SUICIDE as usize] = Some(InstructionInfo::new("SUICIDE", 1, 0, GasPriceTier::Special)); - arr[CREATE2 as usize] = Some(InstructionInfo::new("CREATE2", 4, 1, GasPriceTier::Special)); - arr[REVERT as usize] = Some(InstructionInfo::new("REVERT", 2, 0, GasPriceTier::Zero)); - arr - }; + /// Static instruction table. + static ref INSTRUCTIONS: [Option; 0x100] = { + let mut arr = [None; 0x100]; + arr[STOP as usize] = Some(InstructionInfo::new("STOP", 0, 0, GasPriceTier::Zero)); + arr[ADD as usize] = Some(InstructionInfo::new("ADD", 2, 1, GasPriceTier::VeryLow)); + arr[SUB as usize] = Some(InstructionInfo::new("SUB", 2, 1, GasPriceTier::VeryLow)); + arr[MUL as usize] = Some(InstructionInfo::new("MUL", 2, 1, GasPriceTier::Low)); + arr[DIV as usize] = Some(InstructionInfo::new("DIV", 2, 1, GasPriceTier::Low)); + arr[SDIV as usize] = Some(InstructionInfo::new("SDIV", 2, 1, GasPriceTier::Low)); + arr[MOD as usize] = Some(InstructionInfo::new("MOD", 2, 1, GasPriceTier::Low)); + arr[SMOD as usize] = Some(InstructionInfo::new("SMOD", 2, 1, GasPriceTier::Low)); + arr[EXP as usize] = Some(InstructionInfo::new("EXP", 2, 1, GasPriceTier::Special)); + arr[NOT as usize] = Some(InstructionInfo::new("NOT", 1, 1, GasPriceTier::VeryLow)); + arr[LT as usize] = Some(InstructionInfo::new("LT", 2, 1, GasPriceTier::VeryLow)); + arr[GT as usize] = Some(InstructionInfo::new("GT", 2, 1, GasPriceTier::VeryLow)); + arr[SLT as usize] = Some(InstructionInfo::new("SLT", 2, 1, GasPriceTier::VeryLow)); + arr[SGT as usize] = Some(InstructionInfo::new("SGT", 2, 1, GasPriceTier::VeryLow)); + arr[EQ as usize] = Some(InstructionInfo::new("EQ", 2, 1, GasPriceTier::VeryLow)); + arr[ISZERO as usize] = Some(InstructionInfo::new("ISZERO", 1, 1, GasPriceTier::VeryLow)); + arr[AND as usize] = Some(InstructionInfo::new("AND", 2, 1, GasPriceTier::VeryLow)); + arr[OR as usize] = Some(InstructionInfo::new("OR", 2, 1, GasPriceTier::VeryLow)); + arr[XOR as usize] = Some(InstructionInfo::new("XOR", 2, 1, GasPriceTier::VeryLow)); + arr[BYTE as usize] = Some(InstructionInfo::new("BYTE", 2, 1, GasPriceTier::VeryLow)); + arr[SHL as usize] = Some(InstructionInfo::new("SHL", 2, 1, GasPriceTier::VeryLow)); + arr[SHR as usize] = Some(InstructionInfo::new("SHR", 2, 1, GasPriceTier::VeryLow)); + arr[SAR as usize] = Some(InstructionInfo::new("SAR", 2, 1, GasPriceTier::VeryLow)); + arr[ADDMOD as usize] = Some(InstructionInfo::new("ADDMOD", 3, 1, GasPriceTier::Mid)); + arr[MULMOD as usize] = Some(InstructionInfo::new("MULMOD", 3, 1, GasPriceTier::Mid)); + arr[SIGNEXTEND as usize] = Some(InstructionInfo::new("SIGNEXTEND", 2, 1, GasPriceTier::Low)); + arr[RETURNDATASIZE as usize] = Some(InstructionInfo::new("RETURNDATASIZE", 0, 1, GasPriceTier::Base)); + arr[RETURNDATACOPY as usize] = Some(InstructionInfo::new("RETURNDATACOPY", 3, 0, GasPriceTier::VeryLow)); + arr[SHA3 as usize] = Some(InstructionInfo::new("SHA3", 2, 1, GasPriceTier::Special)); + arr[ADDRESS as usize] = Some(InstructionInfo::new("ADDRESS", 0, 1, GasPriceTier::Base)); + arr[BALANCE as usize] = Some(InstructionInfo::new("BALANCE", 1, 1, GasPriceTier::Special)); + arr[ORIGIN as usize] = Some(InstructionInfo::new("ORIGIN", 0, 1, GasPriceTier::Base)); + arr[CALLER as usize] = Some(InstructionInfo::new("CALLER", 0, 1, GasPriceTier::Base)); + arr[CALLVALUE as usize] = Some(InstructionInfo::new("CALLVALUE", 0, 1, GasPriceTier::Base)); + arr[CALLDATALOAD as usize] = Some(InstructionInfo::new("CALLDATALOAD", 1, 1, GasPriceTier::VeryLow)); + arr[CALLDATASIZE as usize] = Some(InstructionInfo::new("CALLDATASIZE", 0, 1, GasPriceTier::Base)); + arr[CALLDATACOPY as usize] = Some(InstructionInfo::new("CALLDATACOPY", 3, 0, GasPriceTier::VeryLow)); + arr[EXTCODEHASH as usize] = Some(InstructionInfo::new("EXTCODEHASH", 1, 1, GasPriceTier::Special)); + arr[CODESIZE as usize] = Some(InstructionInfo::new("CODESIZE", 0, 1, GasPriceTier::Base)); + arr[CODECOPY as usize] = Some(InstructionInfo::new("CODECOPY", 3, 0, GasPriceTier::VeryLow)); + arr[GASPRICE as usize] = Some(InstructionInfo::new("GASPRICE", 0, 1, GasPriceTier::Base)); + arr[EXTCODESIZE as usize] = Some(InstructionInfo::new("EXTCODESIZE", 1, 1, GasPriceTier::Special)); + arr[EXTCODECOPY as usize] = Some(InstructionInfo::new("EXTCODECOPY", 4, 0, GasPriceTier::Special)); + arr[BLOCKHASH as usize] = Some(InstructionInfo::new("BLOCKHASH", 1, 1, GasPriceTier::Ext)); + arr[COINBASE as usize] = Some(InstructionInfo::new("COINBASE", 0, 1, GasPriceTier::Base)); + arr[TIMESTAMP as usize] = Some(InstructionInfo::new("TIMESTAMP", 0, 1, GasPriceTier::Base)); + arr[NUMBER as usize] = Some(InstructionInfo::new("NUMBER", 0, 1, GasPriceTier::Base)); + arr[DIFFICULTY as usize] = Some(InstructionInfo::new("DIFFICULTY", 0, 1, GasPriceTier::Base)); + arr[GASLIMIT as usize] = Some(InstructionInfo::new("GASLIMIT", 0, 1, GasPriceTier::Base)); + arr[CHAINID as usize] = Some(InstructionInfo::new("CHAINID", 0, 1, GasPriceTier::Base)); + arr[SELFBALANCE as usize] = Some(InstructionInfo::new("SELFBALANCE", 0, 1, GasPriceTier::Low)); + arr[POP as usize] = Some(InstructionInfo::new("POP", 1, 0, GasPriceTier::Base)); + arr[MLOAD as usize] = Some(InstructionInfo::new("MLOAD", 1, 1, GasPriceTier::VeryLow)); + arr[MSTORE as usize] = Some(InstructionInfo::new("MSTORE", 2, 0, GasPriceTier::VeryLow)); + arr[MSTORE8 as usize] = Some(InstructionInfo::new("MSTORE8", 2, 0, GasPriceTier::VeryLow)); + arr[SLOAD as usize] = Some(InstructionInfo::new("SLOAD", 1, 1, GasPriceTier::Special)); + arr[SSTORE as usize] = Some(InstructionInfo::new("SSTORE", 2, 0, GasPriceTier::Special)); + arr[JUMP as usize] = Some(InstructionInfo::new("JUMP", 1, 0, GasPriceTier::Mid)); + arr[JUMPI as usize] = Some(InstructionInfo::new("JUMPI", 2, 0, GasPriceTier::High)); + arr[PC as usize] = Some(InstructionInfo::new("PC", 0, 1, GasPriceTier::Base)); + arr[MSIZE as usize] = Some(InstructionInfo::new("MSIZE", 0, 1, GasPriceTier::Base)); + arr[GAS as usize] = Some(InstructionInfo::new("GAS", 0, 1, GasPriceTier::Base)); + arr[JUMPDEST as usize] = Some(InstructionInfo::new("JUMPDEST", 0, 0, GasPriceTier::Special)); + arr[PUSH1 as usize] = Some(InstructionInfo::new("PUSH1", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH2 as usize] = Some(InstructionInfo::new("PUSH2", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH3 as usize] = Some(InstructionInfo::new("PUSH3", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH4 as usize] = Some(InstructionInfo::new("PUSH4", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH5 as usize] = Some(InstructionInfo::new("PUSH5", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH6 as usize] = Some(InstructionInfo::new("PUSH6", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH7 as usize] = Some(InstructionInfo::new("PUSH7", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH8 as usize] = Some(InstructionInfo::new("PUSH8", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH9 as usize] = Some(InstructionInfo::new("PUSH9", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH10 as usize] = Some(InstructionInfo::new("PUSH10", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH11 as usize] = Some(InstructionInfo::new("PUSH11", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH12 as usize] = Some(InstructionInfo::new("PUSH12", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH13 as usize] = Some(InstructionInfo::new("PUSH13", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH14 as usize] = Some(InstructionInfo::new("PUSH14", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH15 as usize] = Some(InstructionInfo::new("PUSH15", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH16 as usize] = Some(InstructionInfo::new("PUSH16", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH17 as usize] = Some(InstructionInfo::new("PUSH17", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH18 as usize] = Some(InstructionInfo::new("PUSH18", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH19 as usize] = Some(InstructionInfo::new("PUSH19", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH20 as usize] = Some(InstructionInfo::new("PUSH20", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH21 as usize] = Some(InstructionInfo::new("PUSH21", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH22 as usize] = Some(InstructionInfo::new("PUSH22", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH23 as usize] = Some(InstructionInfo::new("PUSH23", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH24 as usize] = Some(InstructionInfo::new("PUSH24", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH25 as usize] = Some(InstructionInfo::new("PUSH25", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH26 as usize] = Some(InstructionInfo::new("PUSH26", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH27 as usize] = Some(InstructionInfo::new("PUSH27", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH28 as usize] = Some(InstructionInfo::new("PUSH28", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH29 as usize] = Some(InstructionInfo::new("PUSH29", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH30 as usize] = Some(InstructionInfo::new("PUSH30", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH31 as usize] = Some(InstructionInfo::new("PUSH31", 0, 1, GasPriceTier::VeryLow)); + arr[PUSH32 as usize] = Some(InstructionInfo::new("PUSH32", 0, 1, GasPriceTier::VeryLow)); + arr[DUP1 as usize] = Some(InstructionInfo::new("DUP1", 1, 2, GasPriceTier::VeryLow)); + arr[DUP2 as usize] = Some(InstructionInfo::new("DUP2", 2, 3, GasPriceTier::VeryLow)); + arr[DUP3 as usize] = Some(InstructionInfo::new("DUP3", 3, 4, GasPriceTier::VeryLow)); + arr[DUP4 as usize] = Some(InstructionInfo::new("DUP4", 4, 5, GasPriceTier::VeryLow)); + arr[DUP5 as usize] = Some(InstructionInfo::new("DUP5", 5, 6, GasPriceTier::VeryLow)); + arr[DUP6 as usize] = Some(InstructionInfo::new("DUP6", 6, 7, GasPriceTier::VeryLow)); + arr[DUP7 as usize] = Some(InstructionInfo::new("DUP7", 7, 8, GasPriceTier::VeryLow)); + arr[DUP8 as usize] = Some(InstructionInfo::new("DUP8", 8, 9, GasPriceTier::VeryLow)); + arr[DUP9 as usize] = Some(InstructionInfo::new("DUP9", 9, 10, GasPriceTier::VeryLow)); + arr[DUP10 as usize] = Some(InstructionInfo::new("DUP10", 10, 11, GasPriceTier::VeryLow)); + arr[DUP11 as usize] = Some(InstructionInfo::new("DUP11", 11, 12, GasPriceTier::VeryLow)); + arr[DUP12 as usize] = Some(InstructionInfo::new("DUP12", 12, 13, GasPriceTier::VeryLow)); + arr[DUP13 as usize] = Some(InstructionInfo::new("DUP13", 13, 14, GasPriceTier::VeryLow)); + arr[DUP14 as usize] = Some(InstructionInfo::new("DUP14", 14, 15, GasPriceTier::VeryLow)); + arr[DUP15 as usize] = Some(InstructionInfo::new("DUP15", 15, 16, GasPriceTier::VeryLow)); + arr[DUP16 as usize] = Some(InstructionInfo::new("DUP16", 16, 17, GasPriceTier::VeryLow)); + arr[SWAP1 as usize] = Some(InstructionInfo::new("SWAP1", 2, 2, GasPriceTier::VeryLow)); + arr[SWAP2 as usize] = Some(InstructionInfo::new("SWAP2", 3, 3, GasPriceTier::VeryLow)); + arr[SWAP3 as usize] = Some(InstructionInfo::new("SWAP3", 4, 4, GasPriceTier::VeryLow)); + arr[SWAP4 as usize] = Some(InstructionInfo::new("SWAP4", 5, 5, GasPriceTier::VeryLow)); + arr[SWAP5 as usize] = Some(InstructionInfo::new("SWAP5", 6, 6, GasPriceTier::VeryLow)); + arr[SWAP6 as usize] = Some(InstructionInfo::new("SWAP6", 7, 7, GasPriceTier::VeryLow)); + arr[SWAP7 as usize] = Some(InstructionInfo::new("SWAP7", 8, 8, GasPriceTier::VeryLow)); + arr[SWAP8 as usize] = Some(InstructionInfo::new("SWAP8", 9, 9, GasPriceTier::VeryLow)); + arr[SWAP9 as usize] = Some(InstructionInfo::new("SWAP9", 10, 10, GasPriceTier::VeryLow)); + arr[SWAP10 as usize] = Some(InstructionInfo::new("SWAP10", 11, 11, GasPriceTier::VeryLow)); + arr[SWAP11 as usize] = Some(InstructionInfo::new("SWAP11", 12, 12, GasPriceTier::VeryLow)); + arr[SWAP12 as usize] = Some(InstructionInfo::new("SWAP12", 13, 13, GasPriceTier::VeryLow)); + arr[SWAP13 as usize] = Some(InstructionInfo::new("SWAP13", 14, 14, GasPriceTier::VeryLow)); + arr[SWAP14 as usize] = Some(InstructionInfo::new("SWAP14", 15, 15, GasPriceTier::VeryLow)); + arr[SWAP15 as usize] = Some(InstructionInfo::new("SWAP15", 16, 16, GasPriceTier::VeryLow)); + arr[SWAP16 as usize] = Some(InstructionInfo::new("SWAP16", 17, 17, GasPriceTier::VeryLow)); + arr[LOG0 as usize] = Some(InstructionInfo::new("LOG0", 2, 0, GasPriceTier::Special)); + arr[LOG1 as usize] = Some(InstructionInfo::new("LOG1", 3, 0, GasPriceTier::Special)); + arr[LOG2 as usize] = Some(InstructionInfo::new("LOG2", 4, 0, GasPriceTier::Special)); + arr[LOG3 as usize] = Some(InstructionInfo::new("LOG3", 5, 0, GasPriceTier::Special)); + arr[LOG4 as usize] = Some(InstructionInfo::new("LOG4", 6, 0, GasPriceTier::Special)); + arr[CREATE as usize] = Some(InstructionInfo::new("CREATE", 3, 1, GasPriceTier::Special)); + arr[CALL as usize] = Some(InstructionInfo::new("CALL", 7, 1, GasPriceTier::Special)); + arr[CALLCODE as usize] = Some(InstructionInfo::new("CALLCODE", 7, 1, GasPriceTier::Special)); + arr[RETURN as usize] = Some(InstructionInfo::new("RETURN", 2, 0, GasPriceTier::Zero)); + arr[DELEGATECALL as usize] = Some(InstructionInfo::new("DELEGATECALL", 6, 1, GasPriceTier::Special)); + arr[STATICCALL as usize] = Some(InstructionInfo::new("STATICCALL", 6, 1, GasPriceTier::Special)); + arr[SUICIDE as usize] = Some(InstructionInfo::new("SUICIDE", 1, 0, GasPriceTier::Special)); + arr[CREATE2 as usize] = Some(InstructionInfo::new("CREATE2", 4, 1, GasPriceTier::Special)); + arr[REVERT as usize] = Some(InstructionInfo::new("REVERT", 2, 0, GasPriceTier::Zero)); + arr + }; } /// Maximal number of topics for log instructions @@ -604,40 +609,40 @@ pub const MAX_NO_OF_TOPICS: usize = 4; #[cfg(test)] mod tests { - use super::*; + use super::*; - #[test] - fn test_is_push() { - assert!(PUSH1.is_push()); - assert!(PUSH32.is_push()); - assert!(!DUP1.is_push()); - } + #[test] + fn test_is_push() { + assert!(PUSH1.is_push()); + assert!(PUSH32.is_push()); + assert!(!DUP1.is_push()); + } - #[test] - fn test_get_push_bytes() { - assert_eq!(PUSH1.push_bytes(), Some(1)); - assert_eq!(PUSH3.push_bytes(), Some(3)); - assert_eq!(PUSH32.push_bytes(), Some(32)); - } + #[test] + fn test_get_push_bytes() { + assert_eq!(PUSH1.push_bytes(), Some(1)); + assert_eq!(PUSH3.push_bytes(), Some(3)); + assert_eq!(PUSH32.push_bytes(), Some(32)); + } - #[test] - fn test_get_dup_position() { - assert_eq!(DUP1.dup_position(), Some(0)); - assert_eq!(DUP5.dup_position(), Some(4)); - assert_eq!(DUP10.dup_position(), Some(9)); - } + #[test] + fn test_get_dup_position() { + assert_eq!(DUP1.dup_position(), Some(0)); + assert_eq!(DUP5.dup_position(), Some(4)); + assert_eq!(DUP10.dup_position(), Some(9)); + } - #[test] - fn test_get_swap_position() { - assert_eq!(SWAP1.swap_position(), Some(1)); - assert_eq!(SWAP5.swap_position(), Some(5)); - assert_eq!(SWAP10.swap_position(), Some(10)); - } + #[test] + fn test_get_swap_position() { + assert_eq!(SWAP1.swap_position(), Some(1)); + assert_eq!(SWAP5.swap_position(), Some(5)); + assert_eq!(SWAP10.swap_position(), Some(10)); + } - #[test] - fn test_get_log_topics() { - assert_eq!(LOG0.log_topics(), Some(0)); - assert_eq!(LOG2.log_topics(), Some(2)); - assert_eq!(LOG4.log_topics(), Some(4)); - } + #[test] + fn test_get_log_topics() { + assert_eq!(LOG0.log_topics(), Some(0)); + assert_eq!(LOG2.log_topics(), Some(2)); + assert_eq!(LOG4.log_topics(), Some(4)); + } } diff --git a/ethcore/evm/src/interpreter/gasometer.rs b/ethcore/evm/src/interpreter/gasometer.rs index b90540d9e..acb3fdb1c 100644 --- a/ethcore/evm/src/interpreter/gasometer.rs +++ b/ethcore/evm/src/interpreter/gasometer.rs @@ -14,475 +14,503 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::cmp; -use ethereum_types::{U256, H256}; use super::u256_to_address; +use ethereum_types::{H256, U256}; +use std::cmp; -use {evm, vm}; +use evm; use instructions::{self, Instruction, InstructionInfo}; use interpreter::stack::Stack; -use vm::Schedule; +use vm::{self, Schedule}; macro_rules! overflowing { - ($x: expr) => {{ - let (v, overflow) = $x; - if overflow { return Err(vm::Error::OutOfGas); } - v - }} + ($x: expr) => {{ + let (v, overflow) = $x; + if overflow { + return Err(vm::Error::OutOfGas); + } + v + }}; } enum Request { - Gas(Cost), - GasMem(Cost, Cost), - GasMemProvide(Cost, Cost, Option), - GasMemCopy(Cost, Cost, Cost) + Gas(Cost), + GasMem(Cost, Cost), + GasMemProvide(Cost, Cost, Option), + GasMemCopy(Cost, Cost, Cost), } pub struct InstructionRequirements { - pub gas_cost: Cost, - pub provide_gas: Option, - pub memory_total_gas: Cost, - pub memory_required_size: usize, + pub gas_cost: Cost, + pub provide_gas: Option, + pub memory_total_gas: Cost, + pub memory_required_size: usize, } pub struct Gasometer { - pub current_gas: Gas, - pub current_mem_gas: Gas, + pub current_gas: Gas, + pub current_mem_gas: Gas, } impl Gasometer { + pub fn new(current_gas: Gas) -> Self { + Gasometer { + current_gas: current_gas, + current_mem_gas: Gas::from(0), + } + } - pub fn new(current_gas: Gas) -> Self { - Gasometer { - current_gas: current_gas, - current_mem_gas: Gas::from(0), - } - } + pub fn verify_gas(&self, gas_cost: &Gas) -> vm::Result<()> { + match &self.current_gas < gas_cost { + true => Err(vm::Error::OutOfGas), + false => Ok(()), + } + } - pub fn verify_gas(&self, gas_cost: &Gas) -> vm::Result<()> { - match &self.current_gas < gas_cost { - true => Err(vm::Error::OutOfGas), - false => Ok(()) - } - } + /// How much gas is provided to a CALL/CREATE, given that we need to deduct `needed` for this operation + /// and that we `requested` some. + pub fn gas_provided( + &self, + schedule: &Schedule, + needed: Gas, + requested: Option, + ) -> vm::Result { + // Try converting requested gas to `Gas` (`U256/u64`) + // but in EIP150 even if we request more we should never fail from OOG + let requested = requested.map(Gas::from_u256); - /// How much gas is provided to a CALL/CREATE, given that we need to deduct `needed` for this operation - /// and that we `requested` some. - pub fn gas_provided(&self, schedule: &Schedule, needed: Gas, requested: Option) -> vm::Result { - // Try converting requested gas to `Gas` (`U256/u64`) - // but in EIP150 even if we request more we should never fail from OOG - let requested = requested.map(Gas::from_u256); + match schedule.sub_gas_cap_divisor { + Some(cap_divisor) if self.current_gas >= needed => { + let gas_remaining = self.current_gas - needed; + let max_gas_provided = match cap_divisor { + 64 => gas_remaining - (gas_remaining >> 6), + cap_divisor => gas_remaining - gas_remaining / Gas::from(cap_divisor), + }; - match schedule.sub_gas_cap_divisor { - Some(cap_divisor) if self.current_gas >= needed => { - let gas_remaining = self.current_gas - needed; - let max_gas_provided = match cap_divisor { - 64 => gas_remaining - (gas_remaining >> 6), - cap_divisor => gas_remaining - gas_remaining / Gas::from(cap_divisor), - }; + if let Some(Ok(r)) = requested { + Ok(cmp::min(r, max_gas_provided)) + } else { + Ok(max_gas_provided) + } + } + _ => { + if let Some(r) = requested { + r + } else if self.current_gas >= needed { + Ok(self.current_gas - needed) + } else { + Ok(0.into()) + } + } + } + } - if let Some(Ok(r)) = requested { - Ok(cmp::min(r, max_gas_provided)) - } else { - Ok(max_gas_provided) - } - }, - _ => { - if let Some(r) = requested { - r - } else if self.current_gas >= needed { - Ok(self.current_gas - needed) - } else { - Ok(0.into()) - } - }, - } - } + /// Determine how much gas is used by the given instruction, given the machine's state. + /// + /// We guarantee that the final element of the returned tuple (`provided`) will be `Some` + /// iff the `instruction` is one of `CREATE`, or any of the `CALL` variants. In this case, + /// it will be the amount of gas that the current context provides to the child context. + pub fn requirements( + &mut self, + ext: &vm::Ext, + instruction: Instruction, + info: &InstructionInfo, + stack: &Stack, + current_mem_size: usize, + ) -> vm::Result> { + let schedule = ext.schedule(); + let tier = info.tier.idx(); + let default_gas = Gas::from(schedule.tier_step_gas[tier]); - /// Determine how much gas is used by the given instruction, given the machine's state. - /// - /// We guarantee that the final element of the returned tuple (`provided`) will be `Some` - /// iff the `instruction` is one of `CREATE`, or any of the `CALL` variants. In this case, - /// it will be the amount of gas that the current context provides to the child context. - pub fn requirements( - &mut self, - ext: &vm::Ext, - instruction: Instruction, - info: &InstructionInfo, - stack: &Stack, - current_mem_size: usize, - ) -> vm::Result> { - let schedule = ext.schedule(); - let tier = info.tier.idx(); - let default_gas = Gas::from(schedule.tier_step_gas[tier]); + let cost = match instruction { + instructions::JUMPDEST => Request::Gas(Gas::from(1)), + instructions::SSTORE => { + if schedule.eip1706 && self.current_gas <= Gas::from(schedule.call_stipend) { + return Err(vm::Error::OutOfGas); + } + let address = H256::from(stack.peek(0)); + let newval = stack.peek(1); + let val = U256::from(&*ext.storage_at(&address)?); - let cost = match instruction { - instructions::JUMPDEST => { - Request::Gas(Gas::from(1)) - }, - instructions::SSTORE => { - if schedule.eip1706 && self.current_gas <= Gas::from(schedule.call_stipend) { - return Err(vm::Error::OutOfGas); - } - let address = H256::from(stack.peek(0)); - let newval = stack.peek(1); - let val = U256::from(&*ext.storage_at(&address)?); + let gas = if schedule.eip1283 { + let orig = U256::from(&*ext.initial_storage_at(&address)?); + calculate_eip1283_sstore_gas(schedule, &orig, &val, &newval) + } else { + if val.is_zero() && !newval.is_zero() { + schedule.sstore_set_gas + } else { + // Refund for below case is added when actually executing sstore + // !is_zero(&val) && is_zero(newval) + schedule.sstore_reset_gas + } + }; + Request::Gas(Gas::from(gas)) + } + instructions::SLOAD => Request::Gas(Gas::from(schedule.sload_gas)), + instructions::BALANCE => Request::Gas(Gas::from(schedule.balance_gas)), + instructions::EXTCODESIZE => Request::Gas(Gas::from(schedule.extcodesize_gas)), + instructions::EXTCODEHASH => Request::Gas(Gas::from(schedule.extcodehash_gas)), + instructions::SUICIDE => { + let mut gas = Gas::from(schedule.suicide_gas); - let gas = if schedule.eip1283 { - let orig = U256::from(&*ext.initial_storage_at(&address)?); - calculate_eip1283_sstore_gas(schedule, &orig, &val, &newval) - } else { - if val.is_zero() && !newval.is_zero() { - schedule.sstore_set_gas - } else { - // Refund for below case is added when actually executing sstore - // !is_zero(&val) && is_zero(newval) - schedule.sstore_reset_gas - } - }; - Request::Gas(Gas::from(gas)) - }, - instructions::SLOAD => { - Request::Gas(Gas::from(schedule.sload_gas)) - }, - instructions::BALANCE => { - Request::Gas(Gas::from(schedule.balance_gas)) - }, - instructions::EXTCODESIZE => { - Request::Gas(Gas::from(schedule.extcodesize_gas)) - }, - instructions::EXTCODEHASH => { - Request::Gas(Gas::from(schedule.extcodehash_gas)) - }, - instructions::SUICIDE => { - let mut gas = Gas::from(schedule.suicide_gas); + let is_value_transfer = !ext.origin_balance()?.is_zero(); + let address = u256_to_address(stack.peek(0)); + if (!schedule.no_empty && !ext.exists(&address)?) + || (schedule.no_empty + && is_value_transfer + && !ext.exists_and_not_null(&address)?) + { + gas = + overflowing!(gas.overflow_add(schedule.suicide_to_new_account_cost.into())); + } - let is_value_transfer = !ext.origin_balance()?.is_zero(); - let address = u256_to_address(stack.peek(0)); - if ( - !schedule.no_empty && !ext.exists(&address)? - ) || ( - schedule.no_empty && is_value_transfer && !ext.exists_and_not_null(&address)? - ) { - gas = overflowing!(gas.overflow_add(schedule.suicide_to_new_account_cost.into())); - } + Request::Gas(gas) + } + instructions::MSTORE | instructions::MLOAD => { + Request::GasMem(default_gas, mem_needed_const(stack.peek(0), 32)?) + } + instructions::MSTORE8 => { + Request::GasMem(default_gas, mem_needed_const(stack.peek(0), 1)?) + } + instructions::RETURN | instructions::REVERT => { + Request::GasMem(default_gas, mem_needed(stack.peek(0), stack.peek(1))?) + } + instructions::SHA3 => { + let words = overflowing!(to_word_size(Gas::from_u256(*stack.peek(1))?)); + let gas = overflowing!(Gas::from(schedule.sha3_gas).overflow_add(overflowing!( + Gas::from(schedule.sha3_word_gas).overflow_mul(words) + ))); + Request::GasMem(gas, mem_needed(stack.peek(0), stack.peek(1))?) + } + instructions::CALLDATACOPY | instructions::CODECOPY | instructions::RETURNDATACOPY => { + Request::GasMemCopy( + default_gas, + mem_needed(stack.peek(0), stack.peek(2))?, + Gas::from_u256(*stack.peek(2))?, + ) + } + instructions::EXTCODECOPY => Request::GasMemCopy( + schedule.extcodecopy_base_gas.into(), + mem_needed(stack.peek(1), stack.peek(3))?, + Gas::from_u256(*stack.peek(3))?, + ), + instructions::LOG0 + | instructions::LOG1 + | instructions::LOG2 + | instructions::LOG3 + | instructions::LOG4 => { + let no_of_topics = instruction + .log_topics() + .expect("log_topics always return some for LOG* instructions; qed"); + let log_gas = schedule.log_gas + schedule.log_topic_gas * no_of_topics; - Request::Gas(gas) - }, - instructions::MSTORE | instructions::MLOAD => { - Request::GasMem(default_gas, mem_needed_const(stack.peek(0), 32)?) - }, - instructions::MSTORE8 => { - Request::GasMem(default_gas, mem_needed_const(stack.peek(0), 1)?) - }, - instructions::RETURN | instructions::REVERT => { - Request::GasMem(default_gas, mem_needed(stack.peek(0), stack.peek(1))?) - }, - instructions::SHA3 => { - let words = overflowing!(to_word_size(Gas::from_u256(*stack.peek(1))?)); - let gas = overflowing!(Gas::from(schedule.sha3_gas).overflow_add(overflowing!(Gas::from(schedule.sha3_word_gas).overflow_mul(words)))); - Request::GasMem(gas, mem_needed(stack.peek(0), stack.peek(1))?) - }, - instructions::CALLDATACOPY | instructions::CODECOPY | instructions::RETURNDATACOPY => { - Request::GasMemCopy(default_gas, mem_needed(stack.peek(0), stack.peek(2))?, Gas::from_u256(*stack.peek(2))?) - }, - instructions::EXTCODECOPY => { - Request::GasMemCopy(schedule.extcodecopy_base_gas.into(), mem_needed(stack.peek(1), stack.peek(3))?, Gas::from_u256(*stack.peek(3))?) - }, - instructions::LOG0 | instructions::LOG1 | instructions::LOG2 | instructions::LOG3 | instructions::LOG4 => { - let no_of_topics = instruction.log_topics().expect("log_topics always return some for LOG* instructions; qed"); - let log_gas = schedule.log_gas + schedule.log_topic_gas * no_of_topics; + let data_gas = + overflowing!(Gas::from_u256(*stack.peek(1))? + .overflow_mul(Gas::from(schedule.log_data_gas))); + let gas = overflowing!(data_gas.overflow_add(Gas::from(log_gas))); + Request::GasMem(gas, mem_needed(stack.peek(0), stack.peek(1))?) + } + instructions::CALL | instructions::CALLCODE => { + let mut gas = Gas::from(schedule.call_gas); + let mem = cmp::max( + mem_needed(stack.peek(5), stack.peek(6))?, + mem_needed(stack.peek(3), stack.peek(4))?, + ); - let data_gas = overflowing!(Gas::from_u256(*stack.peek(1))?.overflow_mul(Gas::from(schedule.log_data_gas))); - let gas = overflowing!(data_gas.overflow_add(Gas::from(log_gas))); - Request::GasMem(gas, mem_needed(stack.peek(0), stack.peek(1))?) - }, - instructions::CALL | instructions::CALLCODE => { - let mut gas = Gas::from(schedule.call_gas); - let mem = cmp::max( - mem_needed(stack.peek(5), stack.peek(6))?, - mem_needed(stack.peek(3), stack.peek(4))? - ); + let address = u256_to_address(stack.peek(1)); + let is_value_transfer = !stack.peek(2).is_zero(); - let address = u256_to_address(stack.peek(1)); - let is_value_transfer = !stack.peek(2).is_zero(); + if instruction == instructions::CALL + && ((!schedule.no_empty && !ext.exists(&address)?) + || (schedule.no_empty + && is_value_transfer + && !ext.exists_and_not_null(&address)?)) + { + gas = overflowing!(gas.overflow_add(schedule.call_new_account_gas.into())); + } - if instruction == instructions::CALL && ( - (!schedule.no_empty && !ext.exists(&address)?) - || - (schedule.no_empty && is_value_transfer && !ext.exists_and_not_null(&address)?) - ) { - gas = overflowing!(gas.overflow_add(schedule.call_new_account_gas.into())); - } + if is_value_transfer { + gas = overflowing!(gas.overflow_add(schedule.call_value_transfer_gas.into())); + } - if is_value_transfer { - gas = overflowing!(gas.overflow_add(schedule.call_value_transfer_gas.into())); - } + let requested = *stack.peek(0); - let requested = *stack.peek(0); + Request::GasMemProvide(gas, mem, Some(requested)) + } + instructions::DELEGATECALL | instructions::STATICCALL => { + let gas = Gas::from(schedule.call_gas); + let mem = cmp::max( + mem_needed(stack.peek(4), stack.peek(5))?, + mem_needed(stack.peek(2), stack.peek(3))?, + ); + let requested = *stack.peek(0); - Request::GasMemProvide(gas, mem, Some(requested)) - }, - instructions::DELEGATECALL | instructions::STATICCALL => { - let gas = Gas::from(schedule.call_gas); - let mem = cmp::max( - mem_needed(stack.peek(4), stack.peek(5))?, - mem_needed(stack.peek(2), stack.peek(3))? - ); - let requested = *stack.peek(0); + Request::GasMemProvide(gas, mem, Some(requested)) + } + instructions::CREATE => { + let start = stack.peek(1); + let len = stack.peek(2); - Request::GasMemProvide(gas, mem, Some(requested)) - }, - instructions::CREATE => { - let start = stack.peek(1); - let len = stack.peek(2); + let gas = Gas::from(schedule.create_gas); + let mem = mem_needed(start, len)?; - let gas = Gas::from(schedule.create_gas); - let mem = mem_needed(start, len)?; + Request::GasMemProvide(gas, mem, None) + } + instructions::CREATE2 => { + let start = stack.peek(1); + let len = stack.peek(2); - Request::GasMemProvide(gas, mem, None) - }, - instructions::CREATE2 => { - let start = stack.peek(1); - let len = stack.peek(2); + let base = Gas::from(schedule.create_gas); + let word = overflowing!(to_word_size(Gas::from_u256(*len)?)); + let word_gas = overflowing!(Gas::from(schedule.sha3_word_gas).overflow_mul(word)); + let gas = overflowing!(base.overflow_add(word_gas)); + let mem = mem_needed(start, len)?; - let base = Gas::from(schedule.create_gas); - let word = overflowing!(to_word_size(Gas::from_u256(*len)?)); - let word_gas = overflowing!(Gas::from(schedule.sha3_word_gas).overflow_mul(word)); - let gas = overflowing!(base.overflow_add(word_gas)); - let mem = mem_needed(start, len)?; + Request::GasMemProvide(gas, mem, None) + } + instructions::EXP => { + let expon = stack.peek(1); + let bytes = ((expon.bits() + 7) / 8) as usize; + let gas = Gas::from(schedule.exp_gas + schedule.exp_byte_gas * bytes); + Request::Gas(gas) + } + instructions::BLOCKHASH => Request::Gas(Gas::from(schedule.blockhash_gas)), + _ => Request::Gas(default_gas), + }; - Request::GasMemProvide(gas, mem, None) - }, - instructions::EXP => { - let expon = stack.peek(1); - let bytes = ((expon.bits() + 7) / 8) as usize; - let gas = Gas::from(schedule.exp_gas + schedule.exp_byte_gas * bytes); - Request::Gas(gas) - }, - instructions::BLOCKHASH => { - Request::Gas(Gas::from(schedule.blockhash_gas)) - }, - _ => Request::Gas(default_gas), - }; + Ok(match cost { + Request::Gas(gas) => InstructionRequirements { + gas_cost: gas, + provide_gas: None, + memory_required_size: 0, + memory_total_gas: self.current_mem_gas, + }, + Request::GasMem(gas, mem_size) => { + let (mem_gas_cost, new_mem_gas, new_mem_size) = + self.mem_gas_cost(schedule, current_mem_size, &mem_size)?; + let gas = overflowing!(gas.overflow_add(mem_gas_cost)); + InstructionRequirements { + gas_cost: gas, + provide_gas: None, + memory_required_size: new_mem_size, + memory_total_gas: new_mem_gas, + } + } + Request::GasMemProvide(gas, mem_size, requested) => { + let (mem_gas_cost, new_mem_gas, new_mem_size) = + self.mem_gas_cost(schedule, current_mem_size, &mem_size)?; + let gas = overflowing!(gas.overflow_add(mem_gas_cost)); + let provided = self.gas_provided(schedule, gas, requested)?; + let total_gas = overflowing!(gas.overflow_add(provided)); - Ok(match cost { - Request::Gas(gas) => { - InstructionRequirements { - gas_cost: gas, - provide_gas: None, - memory_required_size: 0, - memory_total_gas: self.current_mem_gas, - } - }, - Request::GasMem(gas, mem_size) => { - let (mem_gas_cost, new_mem_gas, new_mem_size) = self.mem_gas_cost(schedule, current_mem_size, &mem_size)?; - let gas = overflowing!(gas.overflow_add(mem_gas_cost)); - InstructionRequirements { - gas_cost: gas, - provide_gas: None, - memory_required_size: new_mem_size, - memory_total_gas: new_mem_gas, - } - }, - Request::GasMemProvide(gas, mem_size, requested) => { - let (mem_gas_cost, new_mem_gas, new_mem_size) = self.mem_gas_cost(schedule, current_mem_size, &mem_size)?; - let gas = overflowing!(gas.overflow_add(mem_gas_cost)); - let provided = self.gas_provided(schedule, gas, requested)?; - let total_gas = overflowing!(gas.overflow_add(provided)); + InstructionRequirements { + gas_cost: total_gas, + provide_gas: Some(provided), + memory_required_size: new_mem_size, + memory_total_gas: new_mem_gas, + } + } + Request::GasMemCopy(gas, mem_size, copy) => { + let (mem_gas_cost, new_mem_gas, new_mem_size) = + self.mem_gas_cost(schedule, current_mem_size, &mem_size)?; + let copy = overflowing!(to_word_size(copy)); + let copy_gas = overflowing!(Gas::from(schedule.copy_gas).overflow_mul(copy)); + let gas = overflowing!(gas.overflow_add(copy_gas)); + let gas = overflowing!(gas.overflow_add(mem_gas_cost)); - InstructionRequirements { - gas_cost: total_gas, - provide_gas: Some(provided), - memory_required_size: new_mem_size, - memory_total_gas: new_mem_gas, - } - }, - Request::GasMemCopy(gas, mem_size, copy) => { - let (mem_gas_cost, new_mem_gas, new_mem_size) = self.mem_gas_cost(schedule, current_mem_size, &mem_size)?; - let copy = overflowing!(to_word_size(copy)); - let copy_gas = overflowing!(Gas::from(schedule.copy_gas).overflow_mul(copy)); - let gas = overflowing!(gas.overflow_add(copy_gas)); - let gas = overflowing!(gas.overflow_add(mem_gas_cost)); + InstructionRequirements { + gas_cost: gas, + provide_gas: None, + memory_required_size: new_mem_size, + memory_total_gas: new_mem_gas, + } + } + }) + } - InstructionRequirements { - gas_cost: gas, - provide_gas: None, - memory_required_size: new_mem_size, - memory_total_gas: new_mem_gas, - } - }, - }) - } + fn mem_gas_cost( + &self, + schedule: &Schedule, + current_mem_size: usize, + mem_size: &Gas, + ) -> vm::Result<(Gas, Gas, usize)> { + let gas_for_mem = |mem_size: Gas| { + let s = mem_size >> 5; + // s * memory_gas + s * s / quad_coeff_div + let a = overflowing!(s.overflow_mul(Gas::from(schedule.memory_gas))); - fn mem_gas_cost(&self, schedule: &Schedule, current_mem_size: usize, mem_size: &Gas) -> vm::Result<(Gas, Gas, usize)> { - let gas_for_mem = |mem_size: Gas| { - let s = mem_size >> 5; - // s * memory_gas + s * s / quad_coeff_div - let a = overflowing!(s.overflow_mul(Gas::from(schedule.memory_gas))); + // Calculate s*s/quad_coeff_div + assert_eq!(schedule.quad_coeff_div, 512); + let b = overflowing!(s.overflow_mul_shr(s, 9)); + Ok(overflowing!(a.overflow_add(b))) + }; - // Calculate s*s/quad_coeff_div - assert_eq!(schedule.quad_coeff_div, 512); - let b = overflowing!(s.overflow_mul_shr(s, 9)); - Ok(overflowing!(a.overflow_add(b))) - }; + let current_mem_size = Gas::from(current_mem_size); + let req_mem_size_rounded = overflowing!(to_word_size(*mem_size)) << 5; - let current_mem_size = Gas::from(current_mem_size); - let req_mem_size_rounded = overflowing!(to_word_size(*mem_size)) << 5; + let (mem_gas_cost, new_mem_gas) = if req_mem_size_rounded > current_mem_size { + let new_mem_gas = gas_for_mem(req_mem_size_rounded)?; + (new_mem_gas - self.current_mem_gas, new_mem_gas) + } else { + (Gas::from(0), self.current_mem_gas) + }; - let (mem_gas_cost, new_mem_gas) = if req_mem_size_rounded > current_mem_size { - let new_mem_gas = gas_for_mem(req_mem_size_rounded)?; - (new_mem_gas - self.current_mem_gas, new_mem_gas) - } else { - (Gas::from(0), self.current_mem_gas) - }; - - Ok((mem_gas_cost, new_mem_gas, req_mem_size_rounded.as_usize())) - } + Ok((mem_gas_cost, new_mem_gas, req_mem_size_rounded.as_usize())) + } } #[inline] fn mem_needed_const(mem: &U256, add: usize) -> vm::Result { - Gas::from_u256(overflowing!(mem.overflowing_add(U256::from(add)))) + Gas::from_u256(overflowing!(mem.overflowing_add(U256::from(add)))) } #[inline] fn mem_needed(offset: &U256, size: &U256) -> vm::Result { - if size.is_zero() { - return Ok(Gas::from(0)); - } + if size.is_zero() { + return Ok(Gas::from(0)); + } - Gas::from_u256(overflowing!(offset.overflowing_add(*size))) + Gas::from_u256(overflowing!(offset.overflowing_add(*size))) } #[inline] fn add_gas_usize(value: Gas, num: usize) -> (Gas, bool) { - value.overflow_add(Gas::from(num)) + value.overflow_add(Gas::from(num)) } #[inline] fn to_word_size(value: Gas) -> (Gas, bool) { - let (gas, overflow) = add_gas_usize(value, 31); - if overflow { - return (gas, overflow); - } + let (gas, overflow) = add_gas_usize(value, 31); + if overflow { + return (gas, overflow); + } - (gas >> 5, false) + (gas >> 5, false) } #[inline] -fn calculate_eip1283_sstore_gas(schedule: &Schedule, original: &U256, current: &U256, new: &U256) -> Gas { - Gas::from( - if current == new { - // 1. If current value equals new value (this is a no-op), 200 gas is deducted. - schedule.sload_gas - } else { - // 2. If current value does not equal new value - if original == current { - // 2.1. If original value equals current value (this storage slot has not been changed by the current execution context) - if original.is_zero() { - // 2.1.1. If original value is 0, 20000 gas is deducted. - schedule.sstore_set_gas - } else { - // 2.1.2. Otherwise, 5000 gas is deducted. - schedule.sstore_reset_gas +fn calculate_eip1283_sstore_gas( + schedule: &Schedule, + original: &U256, + current: &U256, + new: &U256, +) -> Gas { + Gas::from(if current == new { + // 1. If current value equals new value (this is a no-op), 200 gas is deducted. + schedule.sload_gas + } else { + // 2. If current value does not equal new value + if original == current { + // 2.1. If original value equals current value (this storage slot has not been changed by the current execution context) + if original.is_zero() { + // 2.1.1. If original value is 0, 20000 gas is deducted. + schedule.sstore_set_gas + } else { + // 2.1.2. Otherwise, 5000 gas is deducted. + schedule.sstore_reset_gas - // 2.1.2.1. If new value is 0, add 15000 gas to refund counter. - } - } else { - // 2.2. If original value does not equal current value (this storage slot is dirty), 200 gas is deducted. Apply both of the following clauses. - schedule.sload_gas + // 2.1.2.1. If new value is 0, add 15000 gas to refund counter. + } + } else { + // 2.2. If original value does not equal current value (this storage slot is dirty), 200 gas is deducted. Apply both of the following clauses. + schedule.sload_gas - // 2.2.1. If original value is not 0 - // 2.2.1.1. If current value is 0 (also means that new value is not 0), remove 15000 gas from refund counter. We can prove that refund counter will never go below 0. - // 2.2.1.2. If new value is 0 (also means that current value is not 0), add 15000 gas to refund counter. + // 2.2.1. If original value is not 0 + // 2.2.1.1. If current value is 0 (also means that new value is not 0), remove 15000 gas from refund counter. We can prove that refund counter will never go below 0. + // 2.2.1.2. If new value is 0 (also means that current value is not 0), add 15000 gas to refund counter. - // 2.2.2. If original value equals new value (this storage slot is reset) - // 2.2.2.1. If original value is 0, add 19800 gas to refund counter. - // 2.2.2.2. Otherwise, add 4800 gas to refund counter. - } - } - ) + // 2.2.2. If original value equals new value (this storage slot is reset) + // 2.2.2.1. If original value is 0, add 19800 gas to refund counter. + // 2.2.2.2. Otherwise, add 4800 gas to refund counter. + } + }) } -pub fn handle_eip1283_sstore_clears_refund(ext: &mut vm::Ext, original: &U256, current: &U256, new: &U256) { - let sstore_clears_schedule = ext.schedule().sstore_refund_gas; +pub fn handle_eip1283_sstore_clears_refund( + ext: &mut vm::Ext, + original: &U256, + current: &U256, + new: &U256, +) { + let sstore_clears_schedule = ext.schedule().sstore_refund_gas; - if current == new { - // 1. If current value equals new value (this is a no-op), 200 gas is deducted. - } else { - // 2. If current value does not equal new value - if original == current { - // 2.1. If original value equals current value (this storage slot has not been changed by the current execution context) - if original.is_zero() { - // 2.1.1. If original value is 0, 20000 gas is deducted. - } else { - // 2.1.2. Otherwise, 5000 gas is deducted. - if new.is_zero() { - // 2.1.2.1. If new value is 0, add 15000 gas to refund counter. - ext.add_sstore_refund(sstore_clears_schedule); - } - } - } else { - // 2.2. If original value does not equal current value (this storage slot is dirty), 200 gas is deducted. Apply both of the following clauses. + if current == new { + // 1. If current value equals new value (this is a no-op), 200 gas is deducted. + } else { + // 2. If current value does not equal new value + if original == current { + // 2.1. If original value equals current value (this storage slot has not been changed by the current execution context) + if original.is_zero() { + // 2.1.1. If original value is 0, 20000 gas is deducted. + } else { + // 2.1.2. Otherwise, 5000 gas is deducted. + if new.is_zero() { + // 2.1.2.1. If new value is 0, add 15000 gas to refund counter. + ext.add_sstore_refund(sstore_clears_schedule); + } + } + } else { + // 2.2. If original value does not equal current value (this storage slot is dirty), 200 gas is deducted. Apply both of the following clauses. - if !original.is_zero() { - // 2.2.1. If original value is not 0 - if current.is_zero() { - // 2.2.1.1. If current value is 0 (also means that new value is not 0), remove 15000 gas from refund counter. We can prove that refund counter will never go below 0. - ext.sub_sstore_refund(sstore_clears_schedule); - } else if new.is_zero() { - // 2.2.1.2. If new value is 0 (also means that current value is not 0), add 15000 gas to refund counter. - ext.add_sstore_refund(sstore_clears_schedule); - } - } + if !original.is_zero() { + // 2.2.1. If original value is not 0 + if current.is_zero() { + // 2.2.1.1. If current value is 0 (also means that new value is not 0), remove 15000 gas from refund counter. We can prove that refund counter will never go below 0. + ext.sub_sstore_refund(sstore_clears_schedule); + } else if new.is_zero() { + // 2.2.1.2. If new value is 0 (also means that current value is not 0), add 15000 gas to refund counter. + ext.add_sstore_refund(sstore_clears_schedule); + } + } - if original == new { - // 2.2.2. If original value equals new value (this storage slot is reset) - if original.is_zero() { - // 2.2.2.1. If original value is 0, add 19800 gas to refund counter. - let refund = ext.schedule().sstore_set_gas - ext.schedule().sload_gas; - ext.add_sstore_refund(refund); - } else { - // 2.2.2.2. Otherwise, add 4800 gas to refund counter. - let refund = ext.schedule().sstore_reset_gas - ext.schedule().sload_gas; - ext.add_sstore_refund(refund); - } - } - } - } + if original == new { + // 2.2.2. If original value equals new value (this storage slot is reset) + if original.is_zero() { + // 2.2.2.1. If original value is 0, add 19800 gas to refund counter. + let refund = ext.schedule().sstore_set_gas - ext.schedule().sload_gas; + ext.add_sstore_refund(refund); + } else { + // 2.2.2.2. Otherwise, add 4800 gas to refund counter. + let refund = ext.schedule().sstore_reset_gas - ext.schedule().sload_gas; + ext.add_sstore_refund(refund); + } + } + } + } } #[test] fn test_mem_gas_cost() { - // given - let gasometer = Gasometer::::new(U256::zero()); - let schedule = Schedule::default(); - let current_mem_size = 5; - let mem_size = !U256::zero(); + // given + let gasometer = Gasometer::::new(U256::zero()); + let schedule = Schedule::default(); + let current_mem_size = 5; + let mem_size = !U256::zero(); - // when - let result = gasometer.mem_gas_cost(&schedule, current_mem_size, &mem_size); + // when + let result = gasometer.mem_gas_cost(&schedule, current_mem_size, &mem_size); - // then - if result.is_ok() { - assert!(false, "Should fail with OutOfGas"); - } + // then + if result.is_ok() { + assert!(false, "Should fail with OutOfGas"); + } } #[test] fn test_calculate_mem_cost() { - // given - let gasometer = Gasometer::::new(0); - let schedule = Schedule::default(); - let current_mem_size = 0; - let mem_size = 5; + // given + let gasometer = Gasometer::::new(0); + let schedule = Schedule::default(); + let current_mem_size = 0; + let mem_size = 5; - // when - let (mem_cost, new_mem_gas, mem_size) = gasometer.mem_gas_cost(&schedule, current_mem_size, &mem_size).unwrap(); + // when + let (mem_cost, new_mem_gas, mem_size) = gasometer + .mem_gas_cost(&schedule, current_mem_size, &mem_size) + .unwrap(); - // then - assert_eq!(mem_cost, 3); - assert_eq!(new_mem_gas, 3); - assert_eq!(mem_size, 32); + // then + assert_eq!(mem_cost, 3); + assert_eq!(new_mem_gas, 3); + assert_eq!(mem_size, 32); } diff --git a/ethcore/evm/src/interpreter/informant.rs b/ethcore/evm/src/interpreter/informant.rs index 93d459f41..f5d45eb8d 100644 --- a/ethcore/evm/src/interpreter/informant.rs +++ b/ethcore/evm/src/interpreter/informant.rs @@ -19,144 +19,156 @@ pub use self::inner::*; #[macro_use] #[cfg(not(feature = "evm-debug"))] mod inner { - macro_rules! evm_debug { - ($x: expr) => {} - } + macro_rules! evm_debug { + ($x: expr) => {}; + } - pub struct EvmInformant; - impl EvmInformant { - pub fn new(_depth: usize) -> Self { - EvmInformant {} - } - pub fn done(&mut self) {} - } + pub struct EvmInformant; + impl EvmInformant { + pub fn new(_depth: usize) -> Self { + EvmInformant {} + } + pub fn done(&mut self) {} + } } #[macro_use] #[cfg(feature = "evm-debug")] mod inner { - use std::iter; - use std::collections::HashMap; - use std::time::{Instant, Duration}; + use std::{ + collections::HashMap, + iter, + time::{Duration, Instant}, + }; - use ethereum_types::U256; + use ethereum_types::U256; - use interpreter::stack::Stack; - use instructions::{Instruction, InstructionInfo}; - use CostType; + use instructions::{Instruction, InstructionInfo}; + use interpreter::stack::Stack; + use CostType; - macro_rules! evm_debug { - ($x: expr) => { - $x - } - } + macro_rules! evm_debug { + ($x: expr) => { + $x + }; + } - fn print(data: String) { - if cfg!(feature = "evm-debug-tests") { - println!("{}", data); - } else { - debug!(target: "evm", "{}", data); - } - } + fn print(data: String) { + if cfg!(feature = "evm-debug-tests") { + println!("{}", data); + } else { + debug!(target: "evm", "{}", data); + } + } - pub struct EvmInformant { - spacing: String, - last_instruction: Instant, - stats: HashMap, - } + pub struct EvmInformant { + spacing: String, + last_instruction: Instant, + stats: HashMap, + } - impl EvmInformant { + impl EvmInformant { + fn color(instruction: Instruction, name: &str) -> String { + let c = instruction as usize % 6; + let colors = [31, 34, 33, 32, 35, 36]; + format!("\x1B[1;{}m{}\x1B[0m", colors[c], name) + } - fn color(instruction: Instruction, name: &str) -> String { - let c = instruction as usize % 6; - let colors = [31, 34, 33, 32, 35, 36]; - format!("\x1B[1;{}m{}\x1B[0m", colors[c], name) - } + fn as_micro(duration: &Duration) -> u64 { + let mut sec = duration.as_secs(); + let subsec = duration.subsec_nanos() as u64; + sec = sec.saturating_mul(1_000_000u64); + sec += subsec / 1_000; + sec + } - fn as_micro(duration: &Duration) -> u64 { - let mut sec = duration.as_secs(); - let subsec = duration.subsec_nanos() as u64; - sec = sec.saturating_mul(1_000_000u64); - sec += subsec / 1_000; - sec - } + pub fn new(depth: usize) -> Self { + EvmInformant { + spacing: iter::repeat(".").take(depth).collect(), + last_instruction: Instant::now(), + stats: HashMap::new(), + } + } - pub fn new(depth: usize) -> Self { - EvmInformant { - spacing: iter::repeat(".").take(depth).collect(), - last_instruction: Instant::now(), - stats: HashMap::new(), - } - } + pub fn before_instruction( + &mut self, + pc: usize, + instruction: Instruction, + info: &InstructionInfo, + current_gas: &Cost, + stack: &Stack, + ) { + let time = self.last_instruction.elapsed(); + self.last_instruction = Instant::now(); - pub fn before_instruction(&mut self, pc: usize, instruction: Instruction, info: &InstructionInfo, current_gas: &Cost, stack: &Stack) { - let time = self.last_instruction.elapsed(); - self.last_instruction = Instant::now(); + print(format!( + "{}[0x{:<3x}][{:>19}(0x{:<2x}) Gas Left: {:6?} (Previous took: {:10}μs)", + &self.spacing, + pc, + Self::color(instruction, info.name), + instruction as u8, + current_gas, + Self::as_micro(&time), + )); - print(format!("{}[0x{:<3x}][{:>19}(0x{:<2x}) Gas Left: {:6?} (Previous took: {:10}μs)", - &self.spacing, - pc, - Self::color(instruction, info.name), - instruction as u8, - current_gas, - Self::as_micro(&time), - )); + if info.args > 0 { + for (idx, item) in stack.peek_top(info.args).iter().enumerate() { + print(format!("{} |{:2}: {:?}", self.spacing, idx, item)); + } + } + } - if info.args > 0 { - for (idx, item) in stack.peek_top(info.args).iter().enumerate() { - print(format!("{} |{:2}: {:?}", self.spacing, idx, item)); - } - } - } + pub fn after_instruction(&mut self, instruction: Instruction) { + let stats = self + .stats + .entry(instruction) + .or_insert_with(|| Stats::default()); + let took = self.last_instruction.elapsed(); + stats.note(took); + } - pub fn after_instruction(&mut self, instruction: Instruction) { - let stats = self.stats.entry(instruction).or_insert_with(|| Stats::default()); - let took = self.last_instruction.elapsed(); - stats.note(took); - } + pub fn done(&mut self) { + // Print out stats + let mut stats: Vec<(_, _)> = self.stats.drain().collect(); + stats.sort_by(|ref a, ref b| b.1.avg().cmp(&a.1.avg())); - pub fn done(&mut self) { - // Print out stats - let mut stats: Vec<(_,_)> = self.stats.drain().collect(); - stats.sort_by(|ref a, ref b| b.1.avg().cmp(&a.1.avg())); + print(format!("\n{}-------OPCODE STATS:", self.spacing)); + for (instruction, stats) in stats.into_iter() { + let info = instruction.info(); + print(format!( + "{}-------{:>19}(0x{:<2x}) count: {:4}, avg: {:10}μs", + self.spacing, + Self::color(instruction, info.name), + instruction as u8, + stats.count, + stats.avg(), + )); + } + } + } - print(format!("\n{}-------OPCODE STATS:", self.spacing)); - for (instruction, stats) in stats.into_iter() { - let info = instruction.info(); - print(format!("{}-------{:>19}(0x{:<2x}) count: {:4}, avg: {:10}μs", - self.spacing, - Self::color(instruction, info.name), - instruction as u8, - stats.count, - stats.avg(), - )); - } - } + struct Stats { + count: u64, + total_duration: Duration, + } - } + impl Default for Stats { + fn default() -> Self { + Stats { + count: 0, + total_duration: Duration::from_secs(0), + } + } + } - struct Stats { - count: u64, - total_duration: Duration, - } + impl Stats { + fn note(&mut self, took: Duration) { + self.count += 1; + self.total_duration += took; + } - impl Default for Stats { - fn default() -> Self { - Stats { - count: 0, - total_duration: Duration::from_secs(0), - } - } - } - - impl Stats { - fn note(&mut self, took: Duration) { - self.count += 1; - self.total_duration += took; - } - - fn avg(&self) -> u64 { - EvmInformant::as_micro(&self.total_duration) / self.count - } - } + fn avg(&self) -> u64 { + EvmInformant::as_micro(&self.total_duration) / self.count + } + } } diff --git a/ethcore/evm/src/interpreter/memory.rs b/ethcore/evm/src/interpreter/memory.rs index 16c575d5e..d834a3065 100644 --- a/ethcore/evm/src/interpreter/memory.rs +++ b/ethcore/evm/src/interpreter/memory.rs @@ -20,172 +20,175 @@ use vm::ReturnData; const MAX_RETURN_WASTE_BYTES: usize = 16384; pub trait Memory { - /// Retrieve current size of the memory - fn size(&self) -> usize; - /// Resize (shrink or expand) the memory to specified size (fills 0) - fn resize(&mut self, new_size: usize); - /// Resize the memory only if its smaller - fn expand(&mut self, new_size: usize); - /// Write single byte to memory - fn write_byte(&mut self, offset: U256, value: U256); - /// Write a word to memory. Does not resize memory! - fn write(&mut self, offset: U256, value: U256); - /// Read a word from memory - fn read(&self, offset: U256) -> U256; - /// Write slice of bytes to memory. Does not resize memory! - fn write_slice(&mut self, offset: U256, &[u8]); - /// Retrieve part of the memory between offset and offset + size - fn read_slice(&self, offset: U256, size: U256) -> &[u8]; - /// Retrieve writeable part of memory - fn writeable_slice(&mut self, offset: U256, size: U256) -> &mut[u8]; - /// Convert memory into return data. - fn into_return_data(self, offset: U256, size: U256) -> ReturnData; + /// Retrieve current size of the memory + fn size(&self) -> usize; + /// Resize (shrink or expand) the memory to specified size (fills 0) + fn resize(&mut self, new_size: usize); + /// Resize the memory only if its smaller + fn expand(&mut self, new_size: usize); + /// Write single byte to memory + fn write_byte(&mut self, offset: U256, value: U256); + /// Write a word to memory. Does not resize memory! + fn write(&mut self, offset: U256, value: U256); + /// Read a word from memory + fn read(&self, offset: U256) -> U256; + /// Write slice of bytes to memory. Does not resize memory! + fn write_slice(&mut self, offset: U256, &[u8]); + /// Retrieve part of the memory between offset and offset + size + fn read_slice(&self, offset: U256, size: U256) -> &[u8]; + /// Retrieve writeable part of memory + fn writeable_slice(&mut self, offset: U256, size: U256) -> &mut [u8]; + /// Convert memory into return data. + fn into_return_data(self, offset: U256, size: U256) -> ReturnData; } /// Checks whether offset and size is valid memory range -pub fn is_valid_range(off: usize, size: usize) -> bool { - // When size is zero we haven't actually expanded the memory - let overflow = off.overflowing_add(size).1; - size > 0 && !overflow +pub fn is_valid_range(off: usize, size: usize) -> bool { + // When size is zero we haven't actually expanded the memory + let overflow = off.overflowing_add(size).1; + size > 0 && !overflow } impl Memory for Vec { - fn size(&self) -> usize { - self.len() - } + fn size(&self) -> usize { + self.len() + } - fn read_slice(&self, init_off_u: U256, init_size_u: U256) -> &[u8] { - let off = init_off_u.low_u64() as usize; - let size = init_size_u.low_u64() as usize; - if !is_valid_range(off, size) { - &self[0..0] - } else { - &self[off..off+size] - } - } + fn read_slice(&self, init_off_u: U256, init_size_u: U256) -> &[u8] { + let off = init_off_u.low_u64() as usize; + let size = init_size_u.low_u64() as usize; + if !is_valid_range(off, size) { + &self[0..0] + } else { + &self[off..off + size] + } + } - fn read(&self, offset: U256) -> U256 { - let off = offset.low_u64() as usize; - U256::from(&self[off..off+32]) - } + fn read(&self, offset: U256) -> U256 { + let off = offset.low_u64() as usize; + U256::from(&self[off..off + 32]) + } - fn writeable_slice(&mut self, offset: U256, size: U256) -> &mut [u8] { - let off = offset.low_u64() as usize; - let s = size.low_u64() as usize; - if !is_valid_range(off, s) { - &mut self[0..0] - } else { - &mut self[off..off+s] - } - } + fn writeable_slice(&mut self, offset: U256, size: U256) -> &mut [u8] { + let off = offset.low_u64() as usize; + let s = size.low_u64() as usize; + if !is_valid_range(off, s) { + &mut self[0..0] + } else { + &mut self[off..off + s] + } + } - fn write_slice(&mut self, offset: U256, slice: &[u8]) { - if !slice.is_empty() { - let off = offset.low_u64() as usize; - self[off..off+slice.len()].copy_from_slice(slice); - } - } + fn write_slice(&mut self, offset: U256, slice: &[u8]) { + if !slice.is_empty() { + let off = offset.low_u64() as usize; + self[off..off + slice.len()].copy_from_slice(slice); + } + } - fn write(&mut self, offset: U256, value: U256) { - let off = offset.low_u64() as usize; - value.to_big_endian(&mut self[off..off+32]); - } + fn write(&mut self, offset: U256, value: U256) { + let off = offset.low_u64() as usize; + value.to_big_endian(&mut self[off..off + 32]); + } - fn write_byte(&mut self, offset: U256, value: U256) { - let off = offset.low_u64() as usize; - let val = value.low_u64() as u64; - self[off] = val as u8; - } + fn write_byte(&mut self, offset: U256, value: U256) { + let off = offset.low_u64() as usize; + let val = value.low_u64() as u64; + self[off] = val as u8; + } - fn resize(&mut self, new_size: usize) { - self.resize(new_size, 0); - } + fn resize(&mut self, new_size: usize) { + self.resize(new_size, 0); + } - fn expand(&mut self, size: usize) { - if size > self.len() { - Memory::resize(self, size) - } - } + fn expand(&mut self, size: usize) { + if size > self.len() { + Memory::resize(self, size) + } + } - fn into_return_data(mut self, offset: U256, size: U256) -> ReturnData { - let mut offset = offset.low_u64() as usize; - let size = size.low_u64() as usize; + fn into_return_data(mut self, offset: U256, size: U256) -> ReturnData { + let mut offset = offset.low_u64() as usize; + let size = size.low_u64() as usize; - if !is_valid_range(offset, size) { - return ReturnData::empty(); - } + if !is_valid_range(offset, size) { + return ReturnData::empty(); + } - if self.len() - size > MAX_RETURN_WASTE_BYTES { - if offset == 0 { - self.truncate(size); - self.shrink_to_fit(); - } else { - self = self[offset..(offset + size)].to_vec(); - offset = 0; - } - } - ReturnData::new(self, offset, size) - } + if self.len() - size > MAX_RETURN_WASTE_BYTES { + if offset == 0 { + self.truncate(size); + self.shrink_to_fit(); + } else { + self = self[offset..(offset + size)].to_vec(); + offset = 0; + } + } + ReturnData::new(self, offset, size) + } } #[cfg(test)] mod tests { - use ethereum_types::U256; - use super::Memory; + use super::Memory; + use ethereum_types::U256; - #[test] - fn test_memory_read_and_write() { - // given - let mem: &mut Memory = &mut vec![]; - mem.resize(0x80 + 32); + #[test] + fn test_memory_read_and_write() { + // given + let mem: &mut Memory = &mut vec![]; + mem.resize(0x80 + 32); - // when - mem.write(U256::from(0x80), U256::from(0xabcdef)); + // when + mem.write(U256::from(0x80), U256::from(0xabcdef)); - // then - assert_eq!(mem.read(U256::from(0x80)), U256::from(0xabcdef)); - } + // then + assert_eq!(mem.read(U256::from(0x80)), U256::from(0xabcdef)); + } - #[test] - fn test_memory_read_and_write_byte() { - // given - let mem: &mut Memory = &mut vec![]; - mem.resize(32); + #[test] + fn test_memory_read_and_write_byte() { + // given + let mem: &mut Memory = &mut vec![]; + mem.resize(32); - // when - mem.write_byte(U256::from(0x1d), U256::from(0xab)); - mem.write_byte(U256::from(0x1e), U256::from(0xcd)); - mem.write_byte(U256::from(0x1f), U256::from(0xef)); + // when + mem.write_byte(U256::from(0x1d), U256::from(0xab)); + mem.write_byte(U256::from(0x1e), U256::from(0xcd)); + mem.write_byte(U256::from(0x1f), U256::from(0xef)); - // then - assert_eq!(mem.read(U256::from(0x00)), U256::from(0xabcdef)); - } + // then + assert_eq!(mem.read(U256::from(0x00)), U256::from(0xabcdef)); + } - #[test] - fn test_memory_read_slice_and_write_slice() { - let mem: &mut Memory = &mut vec![]; - mem.resize(32); + #[test] + fn test_memory_read_slice_and_write_slice() { + let mem: &mut Memory = &mut vec![]; + mem.resize(32); - { - let slice = "abcdefghijklmnopqrstuvwxyz012345".as_bytes(); - mem.write_slice(U256::from(0), slice); + { + let slice = "abcdefghijklmnopqrstuvwxyz012345".as_bytes(); + mem.write_slice(U256::from(0), slice); - assert_eq!(mem.read_slice(U256::from(0), U256::from(32)), slice); - } + assert_eq!(mem.read_slice(U256::from(0), U256::from(32)), slice); + } - // write again - { - let slice = "67890".as_bytes(); - mem.write_slice(U256::from(0x1), slice); + // write again + { + let slice = "67890".as_bytes(); + mem.write_slice(U256::from(0x1), slice); - assert_eq!(mem.read_slice(U256::from(0), U256::from(7)), "a67890g".as_bytes()); - } + assert_eq!( + mem.read_slice(U256::from(0), U256::from(7)), + "a67890g".as_bytes() + ); + } - // write empty slice out of bounds - { - let slice = []; - mem.write_slice(U256::from(0x1000), &slice); - assert_eq!(mem.size(), 32); - } - } + // write empty slice out of bounds + { + let slice = []; + mem.write_slice(U256::from(0x1000), &slice); + assert_eq!(mem.size(), 32); + } + } } diff --git a/ethcore/evm/src/interpreter/mod.rs b/ethcore/evm/src/interpreter/mod.rs index 21e7b463b..4ee8c5661 100644 --- a/ethcore/evm/src/interpreter/mod.rs +++ b/ethcore/evm/src/interpreter/mod.rs @@ -19,31 +19,30 @@ #[macro_use] mod informant; mod gasometer; -mod stack; mod memory; mod shared_cache; +mod stack; -use std::marker::PhantomData; -use std::{cmp, mem}; -use std::sync::Arc; -use hash::keccak; use bytes::Bytes; -use ethereum_types::{U256, H256, Address}; +use ethereum_types::{Address, H256, U256}; +use hash::keccak; use num_bigint::BigUint; +use std::{cmp, marker::PhantomData, mem, sync::Arc}; use vm::{ - self, ActionParams, ParamsType, ActionValue, CallType, MessageCallResult, - ContractCreateResult, CreateContractAddress, ReturnData, GasLeft, Schedule, - TrapKind, TrapError + self, ActionParams, ActionValue, CallType, ContractCreateResult, CreateContractAddress, + GasLeft, MessageCallResult, ParamsType, ReturnData, Schedule, TrapError, TrapKind, }; use evm::CostType; use instructions::{self, Instruction, InstructionInfo}; -use self::gasometer::Gasometer; -use self::stack::{Stack, VecStack}; -use self::memory::Memory; pub use self::shared_cache::SharedCache; +use self::{ + gasometer::Gasometer, + memory::Memory, + stack::{Stack, VecStack}, +}; use bit_set::BitSet; @@ -63,1212 +62,1453 @@ const TWO_POW_224: U256 = U256([0, 0, 0, 0x100000000]); //0x1 00000000 00000000 const TWO_POW_248: U256 = U256([0, 0, 0, 0x100000000000000]); //0x1 00000000 00000000 00000000 00000000 00000000 00000000 00000000 000000 fn to_biguint(x: U256) -> BigUint { - let mut bytes = [0u8; 32]; - x.to_little_endian(&mut bytes); - BigUint::from_bytes_le(&bytes) + let mut bytes = [0u8; 32]; + x.to_little_endian(&mut bytes); + BigUint::from_bytes_le(&bytes) } fn from_biguint(x: BigUint) -> U256 { - let bytes = x.to_bytes_le(); - U256::from_little_endian(&bytes) + let bytes = x.to_bytes_le(); + U256::from_little_endian(&bytes) } /// Abstraction over raw vector of Bytes. Easier state management of PC. struct CodeReader { - position: ProgramCounter, - code: Arc, + position: ProgramCounter, + code: Arc, } impl CodeReader { - /// Create new code reader - starting at position 0. - fn new(code: Arc) -> Self { - CodeReader { - code, - position: 0, - } - } + /// Create new code reader - starting at position 0. + fn new(code: Arc) -> Self { + CodeReader { code, position: 0 } + } - /// Get `no_of_bytes` from code and convert to U256. Move PC - fn read(&mut self, no_of_bytes: usize) -> U256 { - let pos = self.position; - self.position += no_of_bytes; - let max = cmp::min(pos + no_of_bytes, self.code.len()); - U256::from(&self.code[pos..max]) - } + /// Get `no_of_bytes` from code and convert to U256. Move PC + fn read(&mut self, no_of_bytes: usize) -> U256 { + let pos = self.position; + self.position += no_of_bytes; + let max = cmp::min(pos + no_of_bytes, self.code.len()); + U256::from(&self.code[pos..max]) + } - fn len(&self) -> usize { - self.code.len() - } + fn len(&self) -> usize { + self.code.len() + } } enum InstructionResult { - Ok, - UnusedGas(Gas), - JumpToPosition(U256), - StopExecutionNeedsReturn { - /// Gas left. - gas: Gas, - /// Return data offset. - init_off: U256, - /// Return data size. - init_size: U256, - /// Apply or revert state changes. - apply: bool, - }, - StopExecution, - Trap(TrapKind), + Ok, + UnusedGas(Gas), + JumpToPosition(U256), + StopExecutionNeedsReturn { + /// Gas left. + gas: Gas, + /// Return data offset. + init_off: U256, + /// Return data size. + init_size: U256, + /// Apply or revert state changes. + apply: bool, + }, + StopExecution, + Trap(TrapKind), } /// ActionParams without code, so that it can be feed into CodeReader. #[derive(Debug)] struct InterpreterParams { - /// Address of currently executed code. - pub code_address: Address, - /// Hash of currently executed code. - pub code_hash: Option, - /// Receive address. Usually equal to code_address, - /// except when called using CALLCODE. - pub address: Address, - /// Sender of current part of the transaction. - pub sender: Address, - /// Transaction initiator. - pub origin: Address, - /// Gas paid up front for transaction execution - pub gas: U256, - /// Gas price. - pub gas_price: U256, - /// Transaction value. - pub value: ActionValue, - /// Input data. - pub data: Option, - /// Type of call - pub call_type: CallType, - /// Param types encoding - pub params_type: ParamsType, + /// Address of currently executed code. + pub code_address: Address, + /// Hash of currently executed code. + pub code_hash: Option, + /// Receive address. Usually equal to code_address, + /// except when called using CALLCODE. + pub address: Address, + /// Sender of current part of the transaction. + pub sender: Address, + /// Transaction initiator. + pub origin: Address, + /// Gas paid up front for transaction execution + pub gas: U256, + /// Gas price. + pub gas_price: U256, + /// Transaction value. + pub value: ActionValue, + /// Input data. + pub data: Option, + /// Type of call + pub call_type: CallType, + /// Param types encoding + pub params_type: ParamsType, } impl From for InterpreterParams { - fn from(params: ActionParams) -> Self { - InterpreterParams { - code_address: params.code_address, - code_hash: params.code_hash, - address: params.address, - sender: params.sender, - origin: params.origin, - gas: params.gas, - gas_price: params.gas_price, - value: params.value, - data: params.data, - call_type: params.call_type, - params_type: params.params_type, - } - } + fn from(params: ActionParams) -> Self { + InterpreterParams { + code_address: params.code_address, + code_hash: params.code_hash, + address: params.address, + sender: params.sender, + origin: params.origin, + gas: params.gas, + gas_price: params.gas_price, + value: params.value, + data: params.data, + call_type: params.call_type, + params_type: params.params_type, + } + } } /// Stepping result returned by interpreter. pub enum InterpreterResult { - /// The VM has already stopped. - Stopped, - /// The VM has just finished execution in the current step. - Done(vm::Result), - /// The VM can continue to run. - Continue, - Trap(TrapKind), + /// The VM has already stopped. + Stopped, + /// The VM has just finished execution in the current step. + Done(vm::Result), + /// The VM can continue to run. + Continue, + Trap(TrapKind), } /// Intepreter EVM implementation pub struct Interpreter { - mem: Vec, - cache: Arc, - params: InterpreterParams, - reader: CodeReader, - return_data: ReturnData, - informant: informant::EvmInformant, - do_trace: bool, - done: bool, - valid_jump_destinations: Option>, - gasometer: Option>, - stack: VecStack, - resume_output_range: Option<(U256, U256)>, - resume_result: Option>, - last_stack_ret_len: usize, - _type: PhantomData, + mem: Vec, + cache: Arc, + params: InterpreterParams, + reader: CodeReader, + return_data: ReturnData, + informant: informant::EvmInformant, + do_trace: bool, + done: bool, + valid_jump_destinations: Option>, + gasometer: Option>, + stack: VecStack, + resume_output_range: Option<(U256, U256)>, + resume_result: Option>, + last_stack_ret_len: usize, + _type: PhantomData, } impl vm::Exec for Interpreter { - fn exec(mut self: Box, ext: &mut vm::Ext) -> vm::ExecTrapResult { - loop { - let result = self.step(ext); - match result { - InterpreterResult::Continue => {}, - InterpreterResult::Done(value) => return Ok(value), - InterpreterResult::Trap(trap) => match trap { - TrapKind::Call(params) => { - return Err(TrapError::Call(params, self)); - }, - TrapKind::Create(params, address) => { - return Err(TrapError::Create(params, address, self)); - }, - }, - InterpreterResult::Stopped => panic!("Attempted to execute an already stopped VM.") - } - } - } + fn exec(mut self: Box, ext: &mut vm::Ext) -> vm::ExecTrapResult { + loop { + let result = self.step(ext); + match result { + InterpreterResult::Continue => {} + InterpreterResult::Done(value) => return Ok(value), + InterpreterResult::Trap(trap) => match trap { + TrapKind::Call(params) => { + return Err(TrapError::Call(params, self)); + } + TrapKind::Create(params, address) => { + return Err(TrapError::Create(params, address, self)); + } + }, + InterpreterResult::Stopped => panic!("Attempted to execute an already stopped VM."), + } + } + } } impl vm::ResumeCall for Interpreter { - fn resume_call(mut self: Box, result: MessageCallResult) -> Box { - { - let this = &mut *self; - let (out_off, out_size) = this.resume_output_range.take().expect("Box is obtained from a call opcode; resume_output_range is always set after those opcodes are executed; qed"); + fn resume_call(mut self: Box, result: MessageCallResult) -> Box { + { + let this = &mut *self; + let (out_off, out_size) = this.resume_output_range.take().expect("Box is obtained from a call opcode; resume_output_range is always set after those opcodes are executed; qed"); - match result { - MessageCallResult::Success(gas_left, data) => { - let output = this.mem.writeable_slice(out_off, out_size); - let len = cmp::min(output.len(), data.len()); - (&mut output[..len]).copy_from_slice(&data[..len]); + match result { + MessageCallResult::Success(gas_left, data) => { + let output = this.mem.writeable_slice(out_off, out_size); + let len = cmp::min(output.len(), data.len()); + (&mut output[..len]).copy_from_slice(&data[..len]); - this.return_data = data; - this.stack.push(U256::one()); - this.resume_result = Some(InstructionResult::UnusedGas(Cost::from_u256(gas_left).expect("Gas left cannot be greater than current one"))); - }, - MessageCallResult::Reverted(gas_left, data) => { - let output = this.mem.writeable_slice(out_off, out_size); - let len = cmp::min(output.len(), data.len()); - (&mut output[..len]).copy_from_slice(&data[..len]); + this.return_data = data; + this.stack.push(U256::one()); + this.resume_result = Some(InstructionResult::UnusedGas( + Cost::from_u256(gas_left) + .expect("Gas left cannot be greater than current one"), + )); + } + MessageCallResult::Reverted(gas_left, data) => { + let output = this.mem.writeable_slice(out_off, out_size); + let len = cmp::min(output.len(), data.len()); + (&mut output[..len]).copy_from_slice(&data[..len]); - this.return_data = data; - this.stack.push(U256::zero()); - this.resume_result = Some(InstructionResult::UnusedGas(Cost::from_u256(gas_left).expect("Gas left cannot be greater than current one"))); - }, - MessageCallResult::Failed => { - this.stack.push(U256::zero()); - this.resume_result = Some(InstructionResult::Ok); - }, - } - } - self - } + this.return_data = data; + this.stack.push(U256::zero()); + this.resume_result = Some(InstructionResult::UnusedGas( + Cost::from_u256(gas_left) + .expect("Gas left cannot be greater than current one"), + )); + } + MessageCallResult::Failed => { + this.stack.push(U256::zero()); + this.resume_result = Some(InstructionResult::Ok); + } + } + } + self + } } impl vm::ResumeCreate for Interpreter { - fn resume_create(mut self: Box, result: ContractCreateResult) -> Box { - match result { - ContractCreateResult::Created(address, gas_left) => { - self.stack.push(address_to_u256(address)); - self.resume_result = Some(InstructionResult::UnusedGas(Cost::from_u256(gas_left).expect("Gas left cannot be greater."))); - }, - ContractCreateResult::Reverted(gas_left, return_data) => { - self.stack.push(U256::zero()); - self.return_data = return_data; - self.resume_result = Some(InstructionResult::UnusedGas(Cost::from_u256(gas_left).expect("Gas left cannot be greater."))); - }, - ContractCreateResult::Failed => { - self.stack.push(U256::zero()); - self.resume_result = Some(InstructionResult::Ok); - }, - } - self - } + fn resume_create(mut self: Box, result: ContractCreateResult) -> Box { + match result { + ContractCreateResult::Created(address, gas_left) => { + self.stack.push(address_to_u256(address)); + self.resume_result = Some(InstructionResult::UnusedGas( + Cost::from_u256(gas_left).expect("Gas left cannot be greater."), + )); + } + ContractCreateResult::Reverted(gas_left, return_data) => { + self.stack.push(U256::zero()); + self.return_data = return_data; + self.resume_result = Some(InstructionResult::UnusedGas( + Cost::from_u256(gas_left).expect("Gas left cannot be greater."), + )); + } + ContractCreateResult::Failed => { + self.stack.push(U256::zero()); + self.resume_result = Some(InstructionResult::Ok); + } + } + self + } } impl Interpreter { - /// Create a new `Interpreter` instance with shared cache. - pub fn new(mut params: ActionParams, cache: Arc, schedule: &Schedule, depth: usize) -> Interpreter { - let reader = CodeReader::new(params.code.take().expect("VM always called with code; qed")); - let params = InterpreterParams::from(params); - let informant = informant::EvmInformant::new(depth); - let valid_jump_destinations = None; - let gasometer = Cost::from_u256(params.gas).ok().map(|gas| Gasometer::::new(gas)); - let stack = VecStack::with_capacity(schedule.stack_limit, U256::zero()); - - Interpreter { - cache, params, reader, informant, - valid_jump_destinations, gasometer, stack, - done: false, - // Overridden in `step_inner` based on - // the result of `ext.trace_next_instruction`. - do_trace: true, - mem: Vec::new(), - return_data: ReturnData::empty(), - last_stack_ret_len: 0, - resume_output_range: None, - resume_result: None, - _type: PhantomData, - } - } - - /// Execute a single step on the VM. - #[inline(always)] - pub fn step(&mut self, ext: &mut vm::Ext) -> InterpreterResult { - if self.done { - return InterpreterResult::Stopped; - } - - let result = if self.gasometer.is_none() { - InterpreterResult::Done(Err(vm::Error::OutOfGas)) - } else if self.reader.len() == 0 { - let current_gas = self.gasometer - .as_ref() - .expect("Gasometer None case is checked above; qed") - .current_gas - .as_u256(); - InterpreterResult::Done(Ok(GasLeft::Known(current_gas))) - } else { - self.step_inner(ext) - }; - - if let &InterpreterResult::Done(_) = &result { - self.done = true; - self.informant.done(); - } - result - } - - /// Inner helper function for step. - #[inline(always)] - fn step_inner(&mut self, ext: &mut dyn vm::Ext) -> InterpreterResult { - let result = match self.resume_result.take() { - Some(result) => result, - None => { - let opcode = self.reader.code[self.reader.position]; - let instruction = Instruction::from_u8(opcode); - self.reader.position += 1; - - // TODO: make compile-time removable if too much of a performance hit. - self.do_trace = self.do_trace && ext.trace_next_instruction( - self.reader.position - 1, opcode, self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas.as_u256(), - ); - - let instruction = match instruction { - Some(i) => i, - None => return InterpreterResult::Done(Err(vm::Error::BadInstruction { - instruction: opcode - })), - }; - - let info = instruction.info(); - self.last_stack_ret_len = info.ret; - if let Err(e) = self.verify_instruction(ext, instruction, info) { - return InterpreterResult::Done(Err(e)); - }; - - // Calculate gas cost - let requirements = match self.gasometer.as_mut().expect(GASOMETER_PROOF).requirements(ext, instruction, info, &self.stack, self.mem.size()) { - Ok(t) => t, - Err(e) => return InterpreterResult::Done(Err(e)), - }; - if self.do_trace { - ext.trace_prepare_execute(self.reader.position - 1, opcode, requirements.gas_cost.as_u256(), Self::mem_written(instruction, &self.stack), Self::store_written(instruction, &self.stack)); - } - if let Err(e) = self.gasometer.as_mut().expect(GASOMETER_PROOF).verify_gas(&requirements.gas_cost) { - if self.do_trace { - ext.trace_failed(); - } - return InterpreterResult::Done(Err(e)); - } - self.mem.expand(requirements.memory_required_size); - self.gasometer.as_mut().expect(GASOMETER_PROOF).current_mem_gas = requirements.memory_total_gas; - self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas = self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas - requirements.gas_cost; - - evm_debug!({ self.informant.before_instruction(self.reader.position, instruction, info, &self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas, &self.stack) }); - - // Execute instruction - let current_gas = self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas; - let result = match self.exec_instruction( - current_gas, ext, instruction, requirements.provide_gas - ) { - Err(x) => { - if self.do_trace { - ext.trace_failed(); - } - return InterpreterResult::Done(Err(x)); - }, - Ok(x) => x, - }; - evm_debug!({ self.informant.after_instruction(instruction) }); - result - }, - }; - - if let InstructionResult::Trap(trap) = result { - return InterpreterResult::Trap(trap); - } - - if let InstructionResult::UnusedGas(ref gas) = result { - self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas = self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas + *gas; - } - - if self.do_trace { - ext.trace_executed( - self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas.as_u256(), - self.stack.peek_top(self.last_stack_ret_len), - &self.mem, - ); - } - - // Advance - match result { - InstructionResult::JumpToPosition(position) => { - if self.valid_jump_destinations.is_none() { - self.valid_jump_destinations = Some(self.cache.jump_destinations(&self.params.code_hash, &self.reader.code)); - } - let jump_destinations = self.valid_jump_destinations.as_ref().expect("jump_destinations are initialized on first jump; qed"); - let pos = match self.verify_jump(position, jump_destinations) { - Ok(x) => x, - Err(e) => return InterpreterResult::Done(Err(e)) - }; - self.reader.position = pos; - }, - InstructionResult::StopExecutionNeedsReturn {gas, init_off, init_size, apply} => { - let mem = mem::replace(&mut self.mem, Vec::new()); - return InterpreterResult::Done(Ok(GasLeft::NeedsReturn { - gas_left: gas.as_u256(), - data: mem.into_return_data(init_off, init_size), - apply_state: apply - })); - }, - InstructionResult::StopExecution => { - return InterpreterResult::Done(Ok(GasLeft::Known(self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas.as_u256()))); - }, - _ => {}, - } - - if self.reader.position >= self.reader.len() { - return InterpreterResult::Done(Ok(GasLeft::Known(self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas.as_u256()))); - } - - InterpreterResult::Continue - } - - fn verify_instruction(&self, ext: &vm::Ext, instruction: Instruction, info: &InstructionInfo) -> vm::Result<()> { - let schedule = ext.schedule(); - - if (instruction == instructions::DELEGATECALL && !schedule.have_delegate_call) || - (instruction == instructions::CREATE2 && !schedule.have_create2) || - (instruction == instructions::STATICCALL && !schedule.have_static_call) || - ((instruction == instructions::RETURNDATACOPY || instruction == instructions::RETURNDATASIZE) && !schedule.have_return_data) || - (instruction == instructions::REVERT && !schedule.have_revert) || - ((instruction == instructions::SHL || instruction == instructions::SHR || instruction == instructions::SAR) && !schedule.have_bitwise_shifting) || - (instruction == instructions::EXTCODEHASH && !schedule.have_extcodehash) || - (instruction == instructions::CHAINID && !schedule.have_chain_id) || - (instruction == instructions::SELFBALANCE && !schedule.have_selfbalance) - { - return Err(vm::Error::BadInstruction { - instruction: instruction as u8 - }); - } - - if !self.stack.has(info.args) { - Err(vm::Error::StackUnderflow { - instruction: info.name, - wanted: info.args, - on_stack: self.stack.size() - }) - } else if self.stack.size() - info.args + info.ret > schedule.stack_limit { - Err(vm::Error::OutOfStack { - instruction: info.name, - wanted: info.ret - info.args, - limit: schedule.stack_limit - }) - } else { - Ok(()) - } - } - - fn mem_written( - instruction: Instruction, - stack: &Stack - ) -> Option<(usize, usize)> { - let read = |pos| stack.peek(pos).low_u64() as usize; - let written = match instruction { - instructions::MSTORE | instructions::MLOAD => Some((read(0), 32)), - instructions::MSTORE8 => Some((read(0), 1)), - instructions::CALLDATACOPY | instructions::CODECOPY | instructions::RETURNDATACOPY => Some((read(0), read(2))), - instructions::EXTCODECOPY => Some((read(1), read(3))), - instructions::CALL | instructions::CALLCODE => Some((read(5), read(6))), - instructions::DELEGATECALL | instructions::STATICCALL => Some((read(4), read(5))), - _ => None, - }; - - match written { - Some((offset, size)) if !memory::is_valid_range(offset, size) => None, - written => written, - } - } - - fn store_written( - instruction: Instruction, - stack: &Stack - ) -> Option<(U256, U256)> { - match instruction { - instructions::SSTORE => Some((stack.peek(0).clone(), stack.peek(1).clone())), - _ => None, - } - } - - fn exec_instruction( - &mut self, - gas: Cost, - ext: &mut vm::Ext, - instruction: Instruction, - provided: Option - ) -> vm::Result> { - match instruction { - instructions::JUMP => { - let jump = self.stack.pop_back(); - return Ok(InstructionResult::JumpToPosition( - jump - )); - }, - instructions::JUMPI => { - let jump = self.stack.pop_back(); - let condition = self.stack.pop_back(); - if !condition.is_zero() { - return Ok(InstructionResult::JumpToPosition( - jump - )); - } - }, - instructions::JUMPDEST => { - // ignore - }, - instructions::CREATE | instructions::CREATE2 => { - let endowment = self.stack.pop_back(); - let init_off = self.stack.pop_back(); - let init_size = self.stack.pop_back(); - let address_scheme = match instruction { - instructions::CREATE => CreateContractAddress::FromSenderAndNonce, - instructions::CREATE2 => CreateContractAddress::FromSenderSaltAndCodeHash(self.stack.pop_back().into()), - _ => unreachable!("instruction can only be CREATE/CREATE2 checked above; qed"), - }; - - let create_gas = provided.expect("`provided` comes through Self::exec from `Gasometer::get_gas_cost_mem`; `gas_gas_mem_cost` guarantees `Some` when instruction is `CALL`/`CALLCODE`/`DELEGATECALL`/`CREATE`; this is `CREATE`; qed"); - - if ext.is_static() { - return Err(vm::Error::MutableCallInStaticContext); - } - - // clear return data buffer before creating new call frame. - self.return_data = ReturnData::empty(); - - let can_create = ext.balance(&self.params.address)? >= endowment && ext.depth() < ext.schedule().max_depth; - if !can_create { - self.stack.push(U256::zero()); - return Ok(InstructionResult::UnusedGas(create_gas)); - } - - let contract_code = self.mem.read_slice(init_off, init_size); - - let create_result = ext.create(&create_gas.as_u256(), &endowment, contract_code, address_scheme, true); - return match create_result { - Ok(ContractCreateResult::Created(address, gas_left)) => { - self.stack.push(address_to_u256(address)); - Ok(InstructionResult::UnusedGas(Cost::from_u256(gas_left).expect("Gas left cannot be greater."))) - }, - Ok(ContractCreateResult::Reverted(gas_left, return_data)) => { - self.stack.push(U256::zero()); - self.return_data = return_data; - Ok(InstructionResult::UnusedGas(Cost::from_u256(gas_left).expect("Gas left cannot be greater."))) - }, - Ok(ContractCreateResult::Failed) => { - self.stack.push(U256::zero()); - Ok(InstructionResult::Ok) - }, - Err(trap) => { - Ok(InstructionResult::Trap(trap)) - }, - }; - }, - instructions::CALL | instructions::CALLCODE | instructions::DELEGATECALL | instructions::STATICCALL => { - assert!(ext.schedule().call_value_transfer_gas > ext.schedule().call_stipend, "overflow possible"); - - self.stack.pop_back(); - let call_gas = provided.expect("`provided` comes through Self::exec from `Gasometer::get_gas_cost_mem`; `gas_gas_mem_cost` guarantees `Some` when instruction is `CALL`/`CALLCODE`/`DELEGATECALL`/`CREATE`; this is one of `CALL`/`CALLCODE`/`DELEGATECALL`; qed"); - let code_address = self.stack.pop_back(); - let code_address = u256_to_address(&code_address); - - let value = if instruction == instructions::DELEGATECALL { - None - } else if instruction == instructions::STATICCALL { - Some(U256::zero()) - } else { - Some(self.stack.pop_back()) - }; - - let in_off = self.stack.pop_back(); - let in_size = self.stack.pop_back(); - let out_off = self.stack.pop_back(); - let out_size = self.stack.pop_back(); - - // Add stipend (only CALL|CALLCODE when value > 0) - let call_gas = call_gas.overflow_add(value.map_or_else(|| Cost::from(0), |val| match val.is_zero() { - false => Cost::from(ext.schedule().call_stipend), - true => Cost::from(0), - })).0; - - // Get sender & receive addresses, check if we have balance - let (sender_address, receive_address, has_balance, call_type) = match instruction { - instructions::CALL => { - if ext.is_static() && value.map_or(false, |v| !v.is_zero()) { - return Err(vm::Error::MutableCallInStaticContext); - } - let has_balance = ext.balance(&self.params.address)? >= value.expect("value set for all but delegate call; qed"); - (&self.params.address, &code_address, has_balance, CallType::Call) - }, - instructions::CALLCODE => { - let has_balance = ext.balance(&self.params.address)? >= value.expect("value set for all but delegate call; qed"); - (&self.params.address, &self.params.address, has_balance, CallType::CallCode) - }, - instructions::DELEGATECALL => (&self.params.sender, &self.params.address, true, CallType::DelegateCall), - instructions::STATICCALL => (&self.params.address, &code_address, true, CallType::StaticCall), - _ => panic!(format!("Unexpected instruction {:?} in CALL branch.", instruction)) - }; - - // clear return data buffer before creating new call frame. - self.return_data = ReturnData::empty(); - - let can_call = has_balance && ext.depth() < ext.schedule().max_depth; - if !can_call { - self.stack.push(U256::zero()); - return Ok(InstructionResult::UnusedGas(call_gas)); - } - - let call_result = { - let input = self.mem.read_slice(in_off, in_size); - ext.call(&call_gas.as_u256(), sender_address, receive_address, value, input, &code_address, call_type, true) - }; - - self.resume_output_range = Some((out_off, out_size)); - - return match call_result { - Ok(MessageCallResult::Success(gas_left, data)) => { - let output = self.mem.writeable_slice(out_off, out_size); - let len = cmp::min(output.len(), data.len()); - (&mut output[..len]).copy_from_slice(&data[..len]); - - self.stack.push(U256::one()); - self.return_data = data; - Ok(InstructionResult::UnusedGas(Cost::from_u256(gas_left).expect("Gas left cannot be greater than current one"))) - }, - Ok(MessageCallResult::Reverted(gas_left, data)) => { - let output = self.mem.writeable_slice(out_off, out_size); - let len = cmp::min(output.len(), data.len()); - (&mut output[..len]).copy_from_slice(&data[..len]); - - self.stack.push(U256::zero()); - self.return_data = data; - Ok(InstructionResult::UnusedGas(Cost::from_u256(gas_left).expect("Gas left cannot be greater than current one"))) - }, - Ok(MessageCallResult::Failed) => { - self.stack.push(U256::zero()); - Ok(InstructionResult::Ok) - }, - Err(trap) => { - Ok(InstructionResult::Trap(trap)) - }, - }; - }, - instructions::RETURN => { - let init_off = self.stack.pop_back(); - let init_size = self.stack.pop_back(); - - return Ok(InstructionResult::StopExecutionNeedsReturn {gas: gas, init_off: init_off, init_size: init_size, apply: true}) - }, - instructions::REVERT => { - let init_off = self.stack.pop_back(); - let init_size = self.stack.pop_back(); - - return Ok(InstructionResult::StopExecutionNeedsReturn {gas: gas, init_off: init_off, init_size: init_size, apply: false}) - }, - instructions::STOP => { - return Ok(InstructionResult::StopExecution); - }, - instructions::SUICIDE => { - let address = self.stack.pop_back(); - ext.suicide(&u256_to_address(&address))?; - return Ok(InstructionResult::StopExecution); - }, - instructions::LOG0 | instructions::LOG1 | instructions::LOG2 | instructions::LOG3 | instructions::LOG4 => { - let no_of_topics = instruction.log_topics().expect("log_topics always return some for LOG* instructions; qed"); - - let offset = self.stack.pop_back(); - let size = self.stack.pop_back(); - let topics = self.stack.pop_n(no_of_topics) - .iter() - .map(H256::from) - .collect(); - ext.log(topics, self.mem.read_slice(offset, size))?; - }, - instructions::PUSH1 | instructions::PUSH2 | instructions::PUSH3 | instructions::PUSH4 | - instructions::PUSH5 | instructions::PUSH6 | instructions::PUSH7 | instructions::PUSH8 | - instructions::PUSH9 | instructions::PUSH10 | instructions::PUSH11 | instructions::PUSH12 | - instructions::PUSH13 | instructions::PUSH14 | instructions::PUSH15 | instructions::PUSH16 | - instructions::PUSH17 | instructions::PUSH18 | instructions::PUSH19 | instructions::PUSH20 | - instructions::PUSH21 | instructions::PUSH22 | instructions::PUSH23 | instructions::PUSH24 | - instructions::PUSH25 | instructions::PUSH26 | instructions::PUSH27 | instructions::PUSH28 | - instructions::PUSH29 | instructions::PUSH30 | instructions::PUSH31 | instructions::PUSH32 => { - let bytes = instruction.push_bytes().expect("push_bytes always return some for PUSH* instructions"); - let val = self.reader.read(bytes); - self.stack.push(val); - }, - instructions::MLOAD => { - let word = self.mem.read(self.stack.pop_back()); - self.stack.push(U256::from(word)); - }, - instructions::MSTORE => { - let offset = self.stack.pop_back(); - let word = self.stack.pop_back(); - Memory::write(&mut self.mem, offset, word); - }, - instructions::MSTORE8 => { - let offset = self.stack.pop_back(); - let byte = self.stack.pop_back(); - self.mem.write_byte(offset, byte); - }, - instructions::MSIZE => { - self.stack.push(U256::from(self.mem.size())); - }, - instructions::SHA3 => { - let offset = self.stack.pop_back(); - let size = self.stack.pop_back(); - let k = keccak(self.mem.read_slice(offset, size)); - self.stack.push(U256::from(&*k)); - }, - instructions::SLOAD => { - let key = H256::from(&self.stack.pop_back()); - let word = U256::from(&*ext.storage_at(&key)?); - self.stack.push(word); - }, - instructions::SSTORE => { - let address = H256::from(&self.stack.pop_back()); - let val = self.stack.pop_back(); - - let current_val = U256::from(&*ext.storage_at(&address)?); - // Increase refund for clear - if ext.schedule().eip1283 { - let original_val = U256::from(&*ext.initial_storage_at(&address)?); - gasometer::handle_eip1283_sstore_clears_refund(ext, &original_val, ¤t_val, &val); - } else { - if !current_val.is_zero() && val.is_zero() { - let sstore_clears_schedule = ext.schedule().sstore_refund_gas; - ext.add_sstore_refund(sstore_clears_schedule); - } - } - ext.set_storage(address, H256::from(&val))?; - }, - instructions::PC => { - self.stack.push(U256::from(self.reader.position - 1)); - }, - instructions::GAS => { - self.stack.push(gas.as_u256()); - }, - instructions::ADDRESS => { - self.stack.push(address_to_u256(self.params.address.clone())); - }, - instructions::ORIGIN => { - self.stack.push(address_to_u256(self.params.origin.clone())); - }, - instructions::BALANCE => { - let address = u256_to_address(&self.stack.pop_back()); - let balance = ext.balance(&address)?; - self.stack.push(balance); - }, - instructions::CALLER => { - self.stack.push(address_to_u256(self.params.sender.clone())); - }, - instructions::CALLVALUE => { - self.stack.push(match self.params.value { - ActionValue::Transfer(val) | ActionValue::Apparent(val) => val - }); - }, - instructions::CALLDATALOAD => { - let big_id = self.stack.pop_back(); - let id = big_id.low_u64() as usize; - let max = id.wrapping_add(32); - if let Some(data) = self.params.data.as_ref() { - let bound = cmp::min(data.len(), max); - if id < bound && big_id < U256::from(data.len()) { - let mut v = [0u8; 32]; - v[0..bound-id].clone_from_slice(&data[id..bound]); - self.stack.push(U256::from(&v[..])) - } else { - self.stack.push(U256::zero()) - } - } else { - self.stack.push(U256::zero()) - } - }, - instructions::CALLDATASIZE => { - self.stack.push(U256::from(self.params.data.as_ref().map_or(0, |l| l.len()))); - }, - instructions::CODESIZE => { - self.stack.push(U256::from(self.reader.len())); - }, - instructions::RETURNDATASIZE => { - self.stack.push(U256::from(self.return_data.len())) - }, - instructions::EXTCODESIZE => { - let address = u256_to_address(&self.stack.pop_back()); - let len = ext.extcodesize(&address)?.unwrap_or(0); - self.stack.push(U256::from(len)); - }, - instructions::EXTCODEHASH => { - let address = u256_to_address(&self.stack.pop_back()); - let hash = ext.extcodehash(&address)?.unwrap_or_else(H256::zero); - self.stack.push(U256::from(hash)); - }, - instructions::CALLDATACOPY => { - Self::copy_data_to_memory(&mut self.mem, &mut self.stack, &self.params.data.as_ref().map_or_else(|| &[] as &[u8], |d| &*d as &[u8])); - }, - instructions::RETURNDATACOPY => { - { - let source_offset = self.stack.peek(1); - let size = self.stack.peek(2); - let return_data_len = U256::from(self.return_data.len()); - if source_offset.saturating_add(*size) > return_data_len { - return Err(vm::Error::OutOfBounds); - } - } - Self::copy_data_to_memory(&mut self.mem, &mut self.stack, &*self.return_data); - }, - instructions::CODECOPY => { - Self::copy_data_to_memory(&mut self.mem, &mut self.stack, &self.reader.code); - }, - instructions::EXTCODECOPY => { - let address = u256_to_address(&self.stack.pop_back()); - let code = ext.extcode(&address)?; - Self::copy_data_to_memory( - &mut self.mem, - &mut self.stack, - code.as_ref().map(|c| &(*c)[..]).unwrap_or(&[]) - ); - }, - instructions::GASPRICE => { - self.stack.push(self.params.gas_price.clone()); - }, - instructions::BLOCKHASH => { - let block_number = self.stack.pop_back(); - let block_hash = ext.blockhash(&block_number); - self.stack.push(U256::from(&*block_hash)); - }, - instructions::COINBASE => { - self.stack.push(address_to_u256(ext.env_info().author.clone())); - }, - instructions::TIMESTAMP => { - self.stack.push(U256::from(ext.env_info().timestamp)); - }, - instructions::NUMBER => { - self.stack.push(U256::from(ext.env_info().number)); - }, - instructions::DIFFICULTY => { - self.stack.push(ext.env_info().difficulty.clone()); - }, - instructions::GASLIMIT => { - self.stack.push(ext.env_info().gas_limit.clone()); - }, - instructions::CHAINID => { - self.stack.push(ext.chain_id().into()) - }, - instructions::SELFBALANCE => { - self.stack.push(ext.balance(&self.params.address)?); - } - - // Stack instructions - - instructions::DUP1 | instructions::DUP2 | instructions::DUP3 | instructions::DUP4 | - instructions::DUP5 | instructions::DUP6 | instructions::DUP7 | instructions::DUP8 | - instructions::DUP9 | instructions::DUP10 | instructions::DUP11 | instructions::DUP12 | - instructions::DUP13 | instructions::DUP14 | instructions::DUP15 | instructions::DUP16 => { - let position = instruction.dup_position().expect("dup_position always return some for DUP* instructions"); - let val = self.stack.peek(position).clone(); - self.stack.push(val); - }, - instructions::SWAP1 | instructions::SWAP2 | instructions::SWAP3 | instructions::SWAP4 | - instructions::SWAP5 | instructions::SWAP6 | instructions::SWAP7 | instructions::SWAP8 | - instructions::SWAP9 | instructions::SWAP10 | instructions::SWAP11 | instructions::SWAP12 | - instructions::SWAP13 | instructions::SWAP14 | instructions::SWAP15 | instructions::SWAP16 => { - let position = instruction.swap_position().expect("swap_position always return some for SWAP* instructions"); - self.stack.swap_with_top(position) - }, - instructions::POP => { - self.stack.pop_back(); - }, - instructions::ADD => { - let a = self.stack.pop_back(); - let b = self.stack.pop_back(); - self.stack.push(a.overflowing_add(b).0); - }, - instructions::MUL => { - let a = self.stack.pop_back(); - let b = self.stack.pop_back(); - self.stack.push(a.overflowing_mul(b).0); - }, - instructions::SUB => { - let a = self.stack.pop_back(); - let b = self.stack.pop_back(); - self.stack.push(a.overflowing_sub(b).0); - }, - instructions::DIV => { - let a = self.stack.pop_back(); - let b = self.stack.pop_back(); - self.stack.push(if !b.is_zero() { - match b { - ONE => a, - TWO => a >> 1, - TWO_POW_5 => a >> 5, - TWO_POW_8 => a >> 8, - TWO_POW_16 => a >> 16, - TWO_POW_24 => a >> 24, - TWO_POW_64 => a >> 64, - TWO_POW_96 => a >> 96, - TWO_POW_224 => a >> 224, - TWO_POW_248 => a >> 248, - _ => a / b, - } - } else { - U256::zero() - }); - }, - instructions::MOD => { - let a = self.stack.pop_back(); - let b = self.stack.pop_back(); - self.stack.push(if !b.is_zero() { - a % b - } else { - U256::zero() - }); - }, - instructions::SDIV => { - let (a, sign_a) = get_and_reset_sign(self.stack.pop_back()); - let (b, sign_b) = get_and_reset_sign(self.stack.pop_back()); - - // -2^255 - let min = (U256::one() << 255) - U256::one(); - self.stack.push(if b.is_zero() { - U256::zero() - } else if a == min && b == !U256::zero() { - min - } else { - let c = a / b; - set_sign(c, sign_a ^ sign_b) - }); - }, - instructions::SMOD => { - let ua = self.stack.pop_back(); - let ub = self.stack.pop_back(); - let (a, sign_a) = get_and_reset_sign(ua); - let b = get_and_reset_sign(ub).0; - - self.stack.push(if !b.is_zero() { - let c = a % b; - set_sign(c, sign_a) - } else { - U256::zero() - }); - }, - instructions::EXP => { - let base = self.stack.pop_back(); - let expon = self.stack.pop_back(); - let res = base.overflowing_pow(expon).0; - self.stack.push(res); - }, - instructions::NOT => { - let a = self.stack.pop_back(); - self.stack.push(!a); - }, - instructions::LT => { - let a = self.stack.pop_back(); - let b = self.stack.pop_back(); - self.stack.push(Self::bool_to_u256(a < b)); - }, - instructions::SLT => { - let (a, neg_a) = get_and_reset_sign(self.stack.pop_back()); - let (b, neg_b) = get_and_reset_sign(self.stack.pop_back()); - - let is_positive_lt = a < b && !(neg_a | neg_b); - let is_negative_lt = a > b && (neg_a & neg_b); - let has_different_signs = neg_a && !neg_b; - - self.stack.push(Self::bool_to_u256(is_positive_lt | is_negative_lt | has_different_signs)); - }, - instructions::GT => { - let a = self.stack.pop_back(); - let b = self.stack.pop_back(); - self.stack.push(Self::bool_to_u256(a > b)); - }, - instructions::SGT => { - let (a, neg_a) = get_and_reset_sign(self.stack.pop_back()); - let (b, neg_b) = get_and_reset_sign(self.stack.pop_back()); - - let is_positive_gt = a > b && !(neg_a | neg_b); - let is_negative_gt = a < b && (neg_a & neg_b); - let has_different_signs = !neg_a && neg_b; - - self.stack.push(Self::bool_to_u256(is_positive_gt | is_negative_gt | has_different_signs)); - }, - instructions::EQ => { - let a = self.stack.pop_back(); - let b = self.stack.pop_back(); - self.stack.push(Self::bool_to_u256(a == b)); - }, - instructions::ISZERO => { - let a = self.stack.pop_back(); - self.stack.push(Self::bool_to_u256(a.is_zero())); - }, - instructions::AND => { - let a = self.stack.pop_back(); - let b = self.stack.pop_back(); - self.stack.push(a & b); - }, - instructions::OR => { - let a = self.stack.pop_back(); - let b = self.stack.pop_back(); - self.stack.push(a | b); - }, - instructions::XOR => { - let a = self.stack.pop_back(); - let b = self.stack.pop_back(); - self.stack.push(a ^ b); - }, - instructions::BYTE => { - let word = self.stack.pop_back(); - let val = self.stack.pop_back(); - let byte = match word < U256::from(32) { - true => (val >> (8 * (31 - word.low_u64() as usize))) & U256::from(0xff), - false => U256::zero() - }; - self.stack.push(byte); - }, - instructions::ADDMOD => { - let a = self.stack.pop_back(); - let b = self.stack.pop_back(); - let c = self.stack.pop_back(); - - self.stack.push(if !c.is_zero() { - let a_num = to_biguint(a); - let b_num = to_biguint(b); - let c_num = to_biguint(c); - let res = a_num + b_num; - let x = res % c_num; - from_biguint(x) - } else { - U256::zero() - }); - }, - instructions::MULMOD => { - let a = self.stack.pop_back(); - let b = self.stack.pop_back(); - let c = self.stack.pop_back(); - - self.stack.push(if !c.is_zero() { - let a_num = to_biguint(a); - let b_num = to_biguint(b); - let c_num = to_biguint(c); - let res = a_num * b_num; - let x = res % c_num; - from_biguint(x) - } else { - U256::zero() - }); - }, - instructions::SIGNEXTEND => { - let bit = self.stack.pop_back(); - if bit < U256::from(32) { - let number = self.stack.pop_back(); - let bit_position = (bit.low_u64() * 8 + 7) as usize; - - let bit = number.bit(bit_position); - let mask = (U256::one() << bit_position) - U256::one(); - self.stack.push(if bit { - number | !mask - } else { - number & mask - }); - } - }, - instructions::SHL => { - const CONST_256: U256 = U256([256, 0, 0, 0]); - - let shift = self.stack.pop_back(); - let value = self.stack.pop_back(); - - let result = if shift >= CONST_256 { - U256::zero() - } else { - value << (shift.as_u32() as usize) - }; - self.stack.push(result); - }, - instructions::SHR => { - const CONST_256: U256 = U256([256, 0, 0, 0]); - - let shift = self.stack.pop_back(); - let value = self.stack.pop_back(); - - let result = if shift >= CONST_256 { - U256::zero() - } else { - value >> (shift.as_u32() as usize) - }; - self.stack.push(result); - }, - instructions::SAR => { - // We cannot use get_and_reset_sign/set_sign here, because the rounding looks different. - - const CONST_256: U256 = U256([256, 0, 0, 0]); - const CONST_HIBIT: U256 = U256([0, 0, 0, 0x8000000000000000]); - - let shift = self.stack.pop_back(); - let value = self.stack.pop_back(); - let sign = value & CONST_HIBIT != U256::zero(); - - let result = if shift >= CONST_256 { - if sign { - U256::max_value() - } else { - U256::zero() - } - } else { - let shift = shift.as_u32() as usize; - let mut shifted = value >> shift; - if sign { - shifted = shifted | (U256::max_value() << (256 - shift)); - } - shifted - }; - self.stack.push(result); - }, - }; - Ok(InstructionResult::Ok) - } - - fn copy_data_to_memory(mem: &mut Vec, stack: &mut Stack, source: &[u8]) { - let dest_offset = stack.pop_back(); - let source_offset = stack.pop_back(); - let size = stack.pop_back(); - let source_size = U256::from(source.len()); - - let output_end = match source_offset > source_size || size > source_size || source_offset + size > source_size { - true => { - let zero_slice = if source_offset > source_size { - mem.writeable_slice(dest_offset, size) - } else { - mem.writeable_slice(dest_offset + source_size - source_offset, source_offset + size - source_size) - }; - for i in zero_slice.iter_mut() { - *i = 0; - } - source.len() - }, - false => (size.low_u64() + source_offset.low_u64()) as usize - }; - - if source_offset < source_size { - let output_begin = source_offset.low_u64() as usize; - mem.write_slice(dest_offset, &source[output_begin..output_end]); - } - } - - fn verify_jump(&self, jump_u: U256, valid_jump_destinations: &BitSet) -> vm::Result { - let jump = jump_u.low_u64() as usize; - - if valid_jump_destinations.contains(jump) && U256::from(jump) == jump_u { - Ok(jump) - } else { - Err(vm::Error::BadJumpDestination { - destination: jump - }) - } - } - - fn bool_to_u256(val: bool) -> U256 { - if val { - U256::one() - } else { - U256::zero() - } - } + /// Create a new `Interpreter` instance with shared cache. + pub fn new( + mut params: ActionParams, + cache: Arc, + schedule: &Schedule, + depth: usize, + ) -> Interpreter { + let reader = CodeReader::new(params.code.take().expect("VM always called with code; qed")); + let params = InterpreterParams::from(params); + let informant = informant::EvmInformant::new(depth); + let valid_jump_destinations = None; + let gasometer = Cost::from_u256(params.gas) + .ok() + .map(|gas| Gasometer::::new(gas)); + let stack = VecStack::with_capacity(schedule.stack_limit, U256::zero()); + + Interpreter { + cache, + params, + reader, + informant, + valid_jump_destinations, + gasometer, + stack, + done: false, + // Overridden in `step_inner` based on + // the result of `ext.trace_next_instruction`. + do_trace: true, + mem: Vec::new(), + return_data: ReturnData::empty(), + last_stack_ret_len: 0, + resume_output_range: None, + resume_result: None, + _type: PhantomData, + } + } + + /// Execute a single step on the VM. + #[inline(always)] + pub fn step(&mut self, ext: &mut vm::Ext) -> InterpreterResult { + if self.done { + return InterpreterResult::Stopped; + } + + let result = if self.gasometer.is_none() { + InterpreterResult::Done(Err(vm::Error::OutOfGas)) + } else if self.reader.len() == 0 { + let current_gas = self + .gasometer + .as_ref() + .expect("Gasometer None case is checked above; qed") + .current_gas + .as_u256(); + InterpreterResult::Done(Ok(GasLeft::Known(current_gas))) + } else { + self.step_inner(ext) + }; + + if let &InterpreterResult::Done(_) = &result { + self.done = true; + self.informant.done(); + } + result + } + + /// Inner helper function for step. + #[inline(always)] + fn step_inner(&mut self, ext: &mut dyn vm::Ext) -> InterpreterResult { + let result = match self.resume_result.take() { + Some(result) => result, + None => { + let opcode = self.reader.code[self.reader.position]; + let instruction = Instruction::from_u8(opcode); + self.reader.position += 1; + + // TODO: make compile-time removable if too much of a performance hit. + self.do_trace = self.do_trace + && ext.trace_next_instruction( + self.reader.position - 1, + opcode, + self.gasometer + .as_mut() + .expect(GASOMETER_PROOF) + .current_gas + .as_u256(), + ); + + let instruction = match instruction { + Some(i) => i, + None => { + return InterpreterResult::Done(Err(vm::Error::BadInstruction { + instruction: opcode, + })) + } + }; + + let info = instruction.info(); + self.last_stack_ret_len = info.ret; + if let Err(e) = self.verify_instruction(ext, instruction, info) { + return InterpreterResult::Done(Err(e)); + }; + + // Calculate gas cost + let requirements = match self + .gasometer + .as_mut() + .expect(GASOMETER_PROOF) + .requirements(ext, instruction, info, &self.stack, self.mem.size()) + { + Ok(t) => t, + Err(e) => return InterpreterResult::Done(Err(e)), + }; + if self.do_trace { + ext.trace_prepare_execute( + self.reader.position - 1, + opcode, + requirements.gas_cost.as_u256(), + Self::mem_written(instruction, &self.stack), + Self::store_written(instruction, &self.stack), + ); + } + if let Err(e) = self + .gasometer + .as_mut() + .expect(GASOMETER_PROOF) + .verify_gas(&requirements.gas_cost) + { + if self.do_trace { + ext.trace_failed(); + } + return InterpreterResult::Done(Err(e)); + } + self.mem.expand(requirements.memory_required_size); + self.gasometer + .as_mut() + .expect(GASOMETER_PROOF) + .current_mem_gas = requirements.memory_total_gas; + self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas = + self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas + - requirements.gas_cost; + + evm_debug!({ + self.informant.before_instruction( + self.reader.position, + instruction, + info, + &self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas, + &self.stack, + ) + }); + + // Execute instruction + let current_gas = self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas; + let result = match self.exec_instruction( + current_gas, + ext, + instruction, + requirements.provide_gas, + ) { + Err(x) => { + if self.do_trace { + ext.trace_failed(); + } + return InterpreterResult::Done(Err(x)); + } + Ok(x) => x, + }; + evm_debug!({ self.informant.after_instruction(instruction) }); + result + } + }; + + if let InstructionResult::Trap(trap) = result { + return InterpreterResult::Trap(trap); + } + + if let InstructionResult::UnusedGas(ref gas) = result { + self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas = + self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas + *gas; + } + + if self.do_trace { + ext.trace_executed( + self.gasometer + .as_mut() + .expect(GASOMETER_PROOF) + .current_gas + .as_u256(), + self.stack.peek_top(self.last_stack_ret_len), + &self.mem, + ); + } + + // Advance + match result { + InstructionResult::JumpToPosition(position) => { + if self.valid_jump_destinations.is_none() { + self.valid_jump_destinations = Some( + self.cache + .jump_destinations(&self.params.code_hash, &self.reader.code), + ); + } + let jump_destinations = self + .valid_jump_destinations + .as_ref() + .expect("jump_destinations are initialized on first jump; qed"); + let pos = match self.verify_jump(position, jump_destinations) { + Ok(x) => x, + Err(e) => return InterpreterResult::Done(Err(e)), + }; + self.reader.position = pos; + } + InstructionResult::StopExecutionNeedsReturn { + gas, + init_off, + init_size, + apply, + } => { + let mem = mem::replace(&mut self.mem, Vec::new()); + return InterpreterResult::Done(Ok(GasLeft::NeedsReturn { + gas_left: gas.as_u256(), + data: mem.into_return_data(init_off, init_size), + apply_state: apply, + })); + } + InstructionResult::StopExecution => { + return InterpreterResult::Done(Ok(GasLeft::Known( + self.gasometer + .as_mut() + .expect(GASOMETER_PROOF) + .current_gas + .as_u256(), + ))); + } + _ => {} + } + + if self.reader.position >= self.reader.len() { + return InterpreterResult::Done(Ok(GasLeft::Known( + self.gasometer + .as_mut() + .expect(GASOMETER_PROOF) + .current_gas + .as_u256(), + ))); + } + + InterpreterResult::Continue + } + + fn verify_instruction( + &self, + ext: &vm::Ext, + instruction: Instruction, + info: &InstructionInfo, + ) -> vm::Result<()> { + let schedule = ext.schedule(); + + if (instruction == instructions::DELEGATECALL && !schedule.have_delegate_call) + || (instruction == instructions::CREATE2 && !schedule.have_create2) + || (instruction == instructions::STATICCALL && !schedule.have_static_call) + || ((instruction == instructions::RETURNDATACOPY + || instruction == instructions::RETURNDATASIZE) + && !schedule.have_return_data) + || (instruction == instructions::REVERT && !schedule.have_revert) + || ((instruction == instructions::SHL + || instruction == instructions::SHR + || instruction == instructions::SAR) + && !schedule.have_bitwise_shifting) + || (instruction == instructions::EXTCODEHASH && !schedule.have_extcodehash) + || (instruction == instructions::CHAINID && !schedule.have_chain_id) + || (instruction == instructions::SELFBALANCE && !schedule.have_selfbalance) + { + return Err(vm::Error::BadInstruction { + instruction: instruction as u8, + }); + } + + if !self.stack.has(info.args) { + Err(vm::Error::StackUnderflow { + instruction: info.name, + wanted: info.args, + on_stack: self.stack.size(), + }) + } else if self.stack.size() - info.args + info.ret > schedule.stack_limit { + Err(vm::Error::OutOfStack { + instruction: info.name, + wanted: info.ret - info.args, + limit: schedule.stack_limit, + }) + } else { + Ok(()) + } + } + + fn mem_written(instruction: Instruction, stack: &Stack) -> Option<(usize, usize)> { + let read = |pos| stack.peek(pos).low_u64() as usize; + let written = match instruction { + instructions::MSTORE | instructions::MLOAD => Some((read(0), 32)), + instructions::MSTORE8 => Some((read(0), 1)), + instructions::CALLDATACOPY | instructions::CODECOPY | instructions::RETURNDATACOPY => { + Some((read(0), read(2))) + } + instructions::EXTCODECOPY => Some((read(1), read(3))), + instructions::CALL | instructions::CALLCODE => Some((read(5), read(6))), + instructions::DELEGATECALL | instructions::STATICCALL => Some((read(4), read(5))), + _ => None, + }; + + match written { + Some((offset, size)) if !memory::is_valid_range(offset, size) => None, + written => written, + } + } + + fn store_written(instruction: Instruction, stack: &Stack) -> Option<(U256, U256)> { + match instruction { + instructions::SSTORE => Some((stack.peek(0).clone(), stack.peek(1).clone())), + _ => None, + } + } + + fn exec_instruction( + &mut self, + gas: Cost, + ext: &mut vm::Ext, + instruction: Instruction, + provided: Option, + ) -> vm::Result> { + match instruction { + instructions::JUMP => { + let jump = self.stack.pop_back(); + return Ok(InstructionResult::JumpToPosition(jump)); + } + instructions::JUMPI => { + let jump = self.stack.pop_back(); + let condition = self.stack.pop_back(); + if !condition.is_zero() { + return Ok(InstructionResult::JumpToPosition(jump)); + } + } + instructions::JUMPDEST => { + // ignore + } + instructions::CREATE | instructions::CREATE2 => { + let endowment = self.stack.pop_back(); + let init_off = self.stack.pop_back(); + let init_size = self.stack.pop_back(); + let address_scheme = match instruction { + instructions::CREATE => CreateContractAddress::FromSenderAndNonce, + instructions::CREATE2 => CreateContractAddress::FromSenderSaltAndCodeHash( + self.stack.pop_back().into(), + ), + _ => unreachable!("instruction can only be CREATE/CREATE2 checked above; qed"), + }; + + let create_gas = provided.expect("`provided` comes through Self::exec from `Gasometer::get_gas_cost_mem`; `gas_gas_mem_cost` guarantees `Some` when instruction is `CALL`/`CALLCODE`/`DELEGATECALL`/`CREATE`; this is `CREATE`; qed"); + + if ext.is_static() { + return Err(vm::Error::MutableCallInStaticContext); + } + + // clear return data buffer before creating new call frame. + self.return_data = ReturnData::empty(); + + let can_create = ext.balance(&self.params.address)? >= endowment + && ext.depth() < ext.schedule().max_depth; + if !can_create { + self.stack.push(U256::zero()); + return Ok(InstructionResult::UnusedGas(create_gas)); + } + + let contract_code = self.mem.read_slice(init_off, init_size); + + let create_result = ext.create( + &create_gas.as_u256(), + &endowment, + contract_code, + address_scheme, + true, + ); + return match create_result { + Ok(ContractCreateResult::Created(address, gas_left)) => { + self.stack.push(address_to_u256(address)); + Ok(InstructionResult::UnusedGas( + Cost::from_u256(gas_left).expect("Gas left cannot be greater."), + )) + } + Ok(ContractCreateResult::Reverted(gas_left, return_data)) => { + self.stack.push(U256::zero()); + self.return_data = return_data; + Ok(InstructionResult::UnusedGas( + Cost::from_u256(gas_left).expect("Gas left cannot be greater."), + )) + } + Ok(ContractCreateResult::Failed) => { + self.stack.push(U256::zero()); + Ok(InstructionResult::Ok) + } + Err(trap) => Ok(InstructionResult::Trap(trap)), + }; + } + instructions::CALL + | instructions::CALLCODE + | instructions::DELEGATECALL + | instructions::STATICCALL => { + assert!( + ext.schedule().call_value_transfer_gas > ext.schedule().call_stipend, + "overflow possible" + ); + + self.stack.pop_back(); + let call_gas = provided.expect("`provided` comes through Self::exec from `Gasometer::get_gas_cost_mem`; `gas_gas_mem_cost` guarantees `Some` when instruction is `CALL`/`CALLCODE`/`DELEGATECALL`/`CREATE`; this is one of `CALL`/`CALLCODE`/`DELEGATECALL`; qed"); + let code_address = self.stack.pop_back(); + let code_address = u256_to_address(&code_address); + + let value = if instruction == instructions::DELEGATECALL { + None + } else if instruction == instructions::STATICCALL { + Some(U256::zero()) + } else { + Some(self.stack.pop_back()) + }; + + let in_off = self.stack.pop_back(); + let in_size = self.stack.pop_back(); + let out_off = self.stack.pop_back(); + let out_size = self.stack.pop_back(); + + // Add stipend (only CALL|CALLCODE when value > 0) + let call_gas = call_gas + .overflow_add(value.map_or_else( + || Cost::from(0), + |val| match val.is_zero() { + false => Cost::from(ext.schedule().call_stipend), + true => Cost::from(0), + }, + )) + .0; + + // Get sender & receive addresses, check if we have balance + let (sender_address, receive_address, has_balance, call_type) = match instruction { + instructions::CALL => { + if ext.is_static() && value.map_or(false, |v| !v.is_zero()) { + return Err(vm::Error::MutableCallInStaticContext); + } + let has_balance = ext.balance(&self.params.address)? + >= value.expect("value set for all but delegate call; qed"); + ( + &self.params.address, + &code_address, + has_balance, + CallType::Call, + ) + } + instructions::CALLCODE => { + let has_balance = ext.balance(&self.params.address)? + >= value.expect("value set for all but delegate call; qed"); + ( + &self.params.address, + &self.params.address, + has_balance, + CallType::CallCode, + ) + } + instructions::DELEGATECALL => ( + &self.params.sender, + &self.params.address, + true, + CallType::DelegateCall, + ), + instructions::STATICCALL => ( + &self.params.address, + &code_address, + true, + CallType::StaticCall, + ), + _ => panic!(format!( + "Unexpected instruction {:?} in CALL branch.", + instruction + )), + }; + + // clear return data buffer before creating new call frame. + self.return_data = ReturnData::empty(); + + let can_call = has_balance && ext.depth() < ext.schedule().max_depth; + if !can_call { + self.stack.push(U256::zero()); + return Ok(InstructionResult::UnusedGas(call_gas)); + } + + let call_result = { + let input = self.mem.read_slice(in_off, in_size); + ext.call( + &call_gas.as_u256(), + sender_address, + receive_address, + value, + input, + &code_address, + call_type, + true, + ) + }; + + self.resume_output_range = Some((out_off, out_size)); + + return match call_result { + Ok(MessageCallResult::Success(gas_left, data)) => { + let output = self.mem.writeable_slice(out_off, out_size); + let len = cmp::min(output.len(), data.len()); + (&mut output[..len]).copy_from_slice(&data[..len]); + + self.stack.push(U256::one()); + self.return_data = data; + Ok(InstructionResult::UnusedGas( + Cost::from_u256(gas_left) + .expect("Gas left cannot be greater than current one"), + )) + } + Ok(MessageCallResult::Reverted(gas_left, data)) => { + let output = self.mem.writeable_slice(out_off, out_size); + let len = cmp::min(output.len(), data.len()); + (&mut output[..len]).copy_from_slice(&data[..len]); + + self.stack.push(U256::zero()); + self.return_data = data; + Ok(InstructionResult::UnusedGas( + Cost::from_u256(gas_left) + .expect("Gas left cannot be greater than current one"), + )) + } + Ok(MessageCallResult::Failed) => { + self.stack.push(U256::zero()); + Ok(InstructionResult::Ok) + } + Err(trap) => Ok(InstructionResult::Trap(trap)), + }; + } + instructions::RETURN => { + let init_off = self.stack.pop_back(); + let init_size = self.stack.pop_back(); + + return Ok(InstructionResult::StopExecutionNeedsReturn { + gas: gas, + init_off: init_off, + init_size: init_size, + apply: true, + }); + } + instructions::REVERT => { + let init_off = self.stack.pop_back(); + let init_size = self.stack.pop_back(); + + return Ok(InstructionResult::StopExecutionNeedsReturn { + gas: gas, + init_off: init_off, + init_size: init_size, + apply: false, + }); + } + instructions::STOP => { + return Ok(InstructionResult::StopExecution); + } + instructions::SUICIDE => { + let address = self.stack.pop_back(); + ext.suicide(&u256_to_address(&address))?; + return Ok(InstructionResult::StopExecution); + } + instructions::LOG0 + | instructions::LOG1 + | instructions::LOG2 + | instructions::LOG3 + | instructions::LOG4 => { + let no_of_topics = instruction + .log_topics() + .expect("log_topics always return some for LOG* instructions; qed"); + + let offset = self.stack.pop_back(); + let size = self.stack.pop_back(); + let topics = self + .stack + .pop_n(no_of_topics) + .iter() + .map(H256::from) + .collect(); + ext.log(topics, self.mem.read_slice(offset, size))?; + } + instructions::PUSH1 + | instructions::PUSH2 + | instructions::PUSH3 + | instructions::PUSH4 + | instructions::PUSH5 + | instructions::PUSH6 + | instructions::PUSH7 + | instructions::PUSH8 + | instructions::PUSH9 + | instructions::PUSH10 + | instructions::PUSH11 + | instructions::PUSH12 + | instructions::PUSH13 + | instructions::PUSH14 + | instructions::PUSH15 + | instructions::PUSH16 + | instructions::PUSH17 + | instructions::PUSH18 + | instructions::PUSH19 + | instructions::PUSH20 + | instructions::PUSH21 + | instructions::PUSH22 + | instructions::PUSH23 + | instructions::PUSH24 + | instructions::PUSH25 + | instructions::PUSH26 + | instructions::PUSH27 + | instructions::PUSH28 + | instructions::PUSH29 + | instructions::PUSH30 + | instructions::PUSH31 + | instructions::PUSH32 => { + let bytes = instruction + .push_bytes() + .expect("push_bytes always return some for PUSH* instructions"); + let val = self.reader.read(bytes); + self.stack.push(val); + } + instructions::MLOAD => { + let word = self.mem.read(self.stack.pop_back()); + self.stack.push(U256::from(word)); + } + instructions::MSTORE => { + let offset = self.stack.pop_back(); + let word = self.stack.pop_back(); + Memory::write(&mut self.mem, offset, word); + } + instructions::MSTORE8 => { + let offset = self.stack.pop_back(); + let byte = self.stack.pop_back(); + self.mem.write_byte(offset, byte); + } + instructions::MSIZE => { + self.stack.push(U256::from(self.mem.size())); + } + instructions::SHA3 => { + let offset = self.stack.pop_back(); + let size = self.stack.pop_back(); + let k = keccak(self.mem.read_slice(offset, size)); + self.stack.push(U256::from(&*k)); + } + instructions::SLOAD => { + let key = H256::from(&self.stack.pop_back()); + let word = U256::from(&*ext.storage_at(&key)?); + self.stack.push(word); + } + instructions::SSTORE => { + let address = H256::from(&self.stack.pop_back()); + let val = self.stack.pop_back(); + + let current_val = U256::from(&*ext.storage_at(&address)?); + // Increase refund for clear + if ext.schedule().eip1283 { + let original_val = U256::from(&*ext.initial_storage_at(&address)?); + gasometer::handle_eip1283_sstore_clears_refund( + ext, + &original_val, + ¤t_val, + &val, + ); + } else { + if !current_val.is_zero() && val.is_zero() { + let sstore_clears_schedule = ext.schedule().sstore_refund_gas; + ext.add_sstore_refund(sstore_clears_schedule); + } + } + ext.set_storage(address, H256::from(&val))?; + } + instructions::PC => { + self.stack.push(U256::from(self.reader.position - 1)); + } + instructions::GAS => { + self.stack.push(gas.as_u256()); + } + instructions::ADDRESS => { + self.stack + .push(address_to_u256(self.params.address.clone())); + } + instructions::ORIGIN => { + self.stack.push(address_to_u256(self.params.origin.clone())); + } + instructions::BALANCE => { + let address = u256_to_address(&self.stack.pop_back()); + let balance = ext.balance(&address)?; + self.stack.push(balance); + } + instructions::CALLER => { + self.stack.push(address_to_u256(self.params.sender.clone())); + } + instructions::CALLVALUE => { + self.stack.push(match self.params.value { + ActionValue::Transfer(val) | ActionValue::Apparent(val) => val, + }); + } + instructions::CALLDATALOAD => { + let big_id = self.stack.pop_back(); + let id = big_id.low_u64() as usize; + let max = id.wrapping_add(32); + if let Some(data) = self.params.data.as_ref() { + let bound = cmp::min(data.len(), max); + if id < bound && big_id < U256::from(data.len()) { + let mut v = [0u8; 32]; + v[0..bound - id].clone_from_slice(&data[id..bound]); + self.stack.push(U256::from(&v[..])) + } else { + self.stack.push(U256::zero()) + } + } else { + self.stack.push(U256::zero()) + } + } + instructions::CALLDATASIZE => { + self.stack + .push(U256::from(self.params.data.as_ref().map_or(0, |l| l.len()))); + } + instructions::CODESIZE => { + self.stack.push(U256::from(self.reader.len())); + } + instructions::RETURNDATASIZE => self.stack.push(U256::from(self.return_data.len())), + instructions::EXTCODESIZE => { + let address = u256_to_address(&self.stack.pop_back()); + let len = ext.extcodesize(&address)?.unwrap_or(0); + self.stack.push(U256::from(len)); + } + instructions::EXTCODEHASH => { + let address = u256_to_address(&self.stack.pop_back()); + let hash = ext.extcodehash(&address)?.unwrap_or_else(H256::zero); + self.stack.push(U256::from(hash)); + } + instructions::CALLDATACOPY => { + Self::copy_data_to_memory( + &mut self.mem, + &mut self.stack, + &self + .params + .data + .as_ref() + .map_or_else(|| &[] as &[u8], |d| &*d as &[u8]), + ); + } + instructions::RETURNDATACOPY => { + { + let source_offset = self.stack.peek(1); + let size = self.stack.peek(2); + let return_data_len = U256::from(self.return_data.len()); + if source_offset.saturating_add(*size) > return_data_len { + return Err(vm::Error::OutOfBounds); + } + } + Self::copy_data_to_memory(&mut self.mem, &mut self.stack, &*self.return_data); + } + instructions::CODECOPY => { + Self::copy_data_to_memory(&mut self.mem, &mut self.stack, &self.reader.code); + } + instructions::EXTCODECOPY => { + let address = u256_to_address(&self.stack.pop_back()); + let code = ext.extcode(&address)?; + Self::copy_data_to_memory( + &mut self.mem, + &mut self.stack, + code.as_ref().map(|c| &(*c)[..]).unwrap_or(&[]), + ); + } + instructions::GASPRICE => { + self.stack.push(self.params.gas_price.clone()); + } + instructions::BLOCKHASH => { + let block_number = self.stack.pop_back(); + let block_hash = ext.blockhash(&block_number); + self.stack.push(U256::from(&*block_hash)); + } + instructions::COINBASE => { + self.stack + .push(address_to_u256(ext.env_info().author.clone())); + } + instructions::TIMESTAMP => { + self.stack.push(U256::from(ext.env_info().timestamp)); + } + instructions::NUMBER => { + self.stack.push(U256::from(ext.env_info().number)); + } + instructions::DIFFICULTY => { + self.stack.push(ext.env_info().difficulty.clone()); + } + instructions::GASLIMIT => { + self.stack.push(ext.env_info().gas_limit.clone()); + } + instructions::CHAINID => self.stack.push(ext.chain_id().into()), + instructions::SELFBALANCE => { + self.stack.push(ext.balance(&self.params.address)?); + } + + // Stack instructions + instructions::DUP1 + | instructions::DUP2 + | instructions::DUP3 + | instructions::DUP4 + | instructions::DUP5 + | instructions::DUP6 + | instructions::DUP7 + | instructions::DUP8 + | instructions::DUP9 + | instructions::DUP10 + | instructions::DUP11 + | instructions::DUP12 + | instructions::DUP13 + | instructions::DUP14 + | instructions::DUP15 + | instructions::DUP16 => { + let position = instruction + .dup_position() + .expect("dup_position always return some for DUP* instructions"); + let val = self.stack.peek(position).clone(); + self.stack.push(val); + } + instructions::SWAP1 + | instructions::SWAP2 + | instructions::SWAP3 + | instructions::SWAP4 + | instructions::SWAP5 + | instructions::SWAP6 + | instructions::SWAP7 + | instructions::SWAP8 + | instructions::SWAP9 + | instructions::SWAP10 + | instructions::SWAP11 + | instructions::SWAP12 + | instructions::SWAP13 + | instructions::SWAP14 + | instructions::SWAP15 + | instructions::SWAP16 => { + let position = instruction + .swap_position() + .expect("swap_position always return some for SWAP* instructions"); + self.stack.swap_with_top(position) + } + instructions::POP => { + self.stack.pop_back(); + } + instructions::ADD => { + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(a.overflowing_add(b).0); + } + instructions::MUL => { + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(a.overflowing_mul(b).0); + } + instructions::SUB => { + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(a.overflowing_sub(b).0); + } + instructions::DIV => { + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(if !b.is_zero() { + match b { + ONE => a, + TWO => a >> 1, + TWO_POW_5 => a >> 5, + TWO_POW_8 => a >> 8, + TWO_POW_16 => a >> 16, + TWO_POW_24 => a >> 24, + TWO_POW_64 => a >> 64, + TWO_POW_96 => a >> 96, + TWO_POW_224 => a >> 224, + TWO_POW_248 => a >> 248, + _ => a / b, + } + } else { + U256::zero() + }); + } + instructions::MOD => { + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack + .push(if !b.is_zero() { a % b } else { U256::zero() }); + } + instructions::SDIV => { + let (a, sign_a) = get_and_reset_sign(self.stack.pop_back()); + let (b, sign_b) = get_and_reset_sign(self.stack.pop_back()); + + // -2^255 + let min = (U256::one() << 255) - U256::one(); + self.stack.push(if b.is_zero() { + U256::zero() + } else if a == min && b == !U256::zero() { + min + } else { + let c = a / b; + set_sign(c, sign_a ^ sign_b) + }); + } + instructions::SMOD => { + let ua = self.stack.pop_back(); + let ub = self.stack.pop_back(); + let (a, sign_a) = get_and_reset_sign(ua); + let b = get_and_reset_sign(ub).0; + + self.stack.push(if !b.is_zero() { + let c = a % b; + set_sign(c, sign_a) + } else { + U256::zero() + }); + } + instructions::EXP => { + let base = self.stack.pop_back(); + let expon = self.stack.pop_back(); + let res = base.overflowing_pow(expon).0; + self.stack.push(res); + } + instructions::NOT => { + let a = self.stack.pop_back(); + self.stack.push(!a); + } + instructions::LT => { + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(Self::bool_to_u256(a < b)); + } + instructions::SLT => { + let (a, neg_a) = get_and_reset_sign(self.stack.pop_back()); + let (b, neg_b) = get_and_reset_sign(self.stack.pop_back()); + + let is_positive_lt = a < b && !(neg_a | neg_b); + let is_negative_lt = a > b && (neg_a & neg_b); + let has_different_signs = neg_a && !neg_b; + + self.stack.push(Self::bool_to_u256( + is_positive_lt | is_negative_lt | has_different_signs, + )); + } + instructions::GT => { + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(Self::bool_to_u256(a > b)); + } + instructions::SGT => { + let (a, neg_a) = get_and_reset_sign(self.stack.pop_back()); + let (b, neg_b) = get_and_reset_sign(self.stack.pop_back()); + + let is_positive_gt = a > b && !(neg_a | neg_b); + let is_negative_gt = a < b && (neg_a & neg_b); + let has_different_signs = !neg_a && neg_b; + + self.stack.push(Self::bool_to_u256( + is_positive_gt | is_negative_gt | has_different_signs, + )); + } + instructions::EQ => { + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(Self::bool_to_u256(a == b)); + } + instructions::ISZERO => { + let a = self.stack.pop_back(); + self.stack.push(Self::bool_to_u256(a.is_zero())); + } + instructions::AND => { + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(a & b); + } + instructions::OR => { + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(a | b); + } + instructions::XOR => { + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(a ^ b); + } + instructions::BYTE => { + let word = self.stack.pop_back(); + let val = self.stack.pop_back(); + let byte = match word < U256::from(32) { + true => (val >> (8 * (31 - word.low_u64() as usize))) & U256::from(0xff), + false => U256::zero(), + }; + self.stack.push(byte); + } + instructions::ADDMOD => { + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + let c = self.stack.pop_back(); + + self.stack.push(if !c.is_zero() { + let a_num = to_biguint(a); + let b_num = to_biguint(b); + let c_num = to_biguint(c); + let res = a_num + b_num; + let x = res % c_num; + from_biguint(x) + } else { + U256::zero() + }); + } + instructions::MULMOD => { + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + let c = self.stack.pop_back(); + + self.stack.push(if !c.is_zero() { + let a_num = to_biguint(a); + let b_num = to_biguint(b); + let c_num = to_biguint(c); + let res = a_num * b_num; + let x = res % c_num; + from_biguint(x) + } else { + U256::zero() + }); + } + instructions::SIGNEXTEND => { + let bit = self.stack.pop_back(); + if bit < U256::from(32) { + let number = self.stack.pop_back(); + let bit_position = (bit.low_u64() * 8 + 7) as usize; + + let bit = number.bit(bit_position); + let mask = (U256::one() << bit_position) - U256::one(); + self.stack + .push(if bit { number | !mask } else { number & mask }); + } + } + instructions::SHL => { + const CONST_256: U256 = U256([256, 0, 0, 0]); + + let shift = self.stack.pop_back(); + let value = self.stack.pop_back(); + + let result = if shift >= CONST_256 { + U256::zero() + } else { + value << (shift.as_u32() as usize) + }; + self.stack.push(result); + } + instructions::SHR => { + const CONST_256: U256 = U256([256, 0, 0, 0]); + + let shift = self.stack.pop_back(); + let value = self.stack.pop_back(); + + let result = if shift >= CONST_256 { + U256::zero() + } else { + value >> (shift.as_u32() as usize) + }; + self.stack.push(result); + } + instructions::SAR => { + // We cannot use get_and_reset_sign/set_sign here, because the rounding looks different. + + const CONST_256: U256 = U256([256, 0, 0, 0]); + const CONST_HIBIT: U256 = U256([0, 0, 0, 0x8000000000000000]); + + let shift = self.stack.pop_back(); + let value = self.stack.pop_back(); + let sign = value & CONST_HIBIT != U256::zero(); + + let result = if shift >= CONST_256 { + if sign { + U256::max_value() + } else { + U256::zero() + } + } else { + let shift = shift.as_u32() as usize; + let mut shifted = value >> shift; + if sign { + shifted = shifted | (U256::max_value() << (256 - shift)); + } + shifted + }; + self.stack.push(result); + } + }; + Ok(InstructionResult::Ok) + } + + fn copy_data_to_memory(mem: &mut Vec, stack: &mut Stack, source: &[u8]) { + let dest_offset = stack.pop_back(); + let source_offset = stack.pop_back(); + let size = stack.pop_back(); + let source_size = U256::from(source.len()); + + let output_end = match source_offset > source_size + || size > source_size + || source_offset + size > source_size + { + true => { + let zero_slice = if source_offset > source_size { + mem.writeable_slice(dest_offset, size) + } else { + mem.writeable_slice( + dest_offset + source_size - source_offset, + source_offset + size - source_size, + ) + }; + for i in zero_slice.iter_mut() { + *i = 0; + } + source.len() + } + false => (size.low_u64() + source_offset.low_u64()) as usize, + }; + + if source_offset < source_size { + let output_begin = source_offset.low_u64() as usize; + mem.write_slice(dest_offset, &source[output_begin..output_end]); + } + } + + fn verify_jump(&self, jump_u: U256, valid_jump_destinations: &BitSet) -> vm::Result { + let jump = jump_u.low_u64() as usize; + + if valid_jump_destinations.contains(jump) && U256::from(jump) == jump_u { + Ok(jump) + } else { + Err(vm::Error::BadJumpDestination { destination: jump }) + } + } + + fn bool_to_u256(val: bool) -> U256 { + if val { + U256::one() + } else { + U256::zero() + } + } } fn get_and_reset_sign(value: U256) -> (U256, bool) { - let U256(arr) = value; - let sign = arr[3].leading_zeros() == 0; - (set_sign(value, sign), sign) + let U256(arr) = value; + let sign = arr[3].leading_zeros() == 0; + (set_sign(value, sign), sign) } fn set_sign(value: U256, sign: bool) -> U256 { - if sign { - (!U256::zero() ^ value).overflowing_add(U256::one()).0 - } else { - value - } + if sign { + (!U256::zero() ^ value).overflowing_add(U256::one()).0 + } else { + value + } } #[inline] fn u256_to_address(value: &U256) -> Address { - Address::from(H256::from(value)) + Address::from(H256::from(value)) } #[inline] fn address_to_u256(value: Address) -> U256 { - U256::from(&*H256::from(value)) + U256::from(&*H256::from(value)) } #[cfg(test)] mod tests { - use std::sync::Arc; - use rustc_hex::FromHex; - use vmtype::VMType; - use factory::Factory; - use vm::{self, Exec, ActionParams, ActionValue}; - use vm::tests::{FakeExt, test_finalize}; + use factory::Factory; + use rustc_hex::FromHex; + use std::sync::Arc; + use vm::{ + self, + tests::{test_finalize, FakeExt}, + ActionParams, ActionValue, Exec, + }; + use vmtype::VMType; - fn interpreter(params: ActionParams, ext: &vm::Ext) -> Box { - Factory::new(VMType::Interpreter, 1).create(params, ext.schedule(), ext.depth()) - } + fn interpreter(params: ActionParams, ext: &vm::Ext) -> Box { + Factory::new(VMType::Interpreter, 1).create(params, ext.schedule(), ext.depth()) + } - #[test] - fn should_not_fail_on_tracing_mem() { - let code = "7feeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff006000527faaffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffaa6020526000620f120660406000601773945304eb96065b2a98b57a48a06ae28d285a71b56101f4f1600055".from_hex().unwrap(); + #[test] + fn should_not_fail_on_tracing_mem() { + let code = "7feeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff006000527faaffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffaa6020526000620f120660406000601773945304eb96065b2a98b57a48a06ae28d285a71b56101f4f1600055".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.address = 5.into(); - params.gas = 300_000.into(); - params.gas_price = 1.into(); - params.value = ActionValue::Transfer(100_000.into()); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); - ext.balances.insert(5.into(), 1_000_000_000.into()); - ext.tracing = true; + let mut params = ActionParams::default(); + params.address = 5.into(); + params.gas = 300_000.into(); + params.gas_price = 1.into(); + params.value = ActionValue::Transfer(100_000.into()); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); + ext.balances.insert(5.into(), 1_000_000_000.into()); + ext.tracing = true; - let gas_left = { - let mut vm = interpreter(params, &ext); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = interpreter(params, &ext); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_eq!(ext.calls.len(), 1); - assert_eq!(gas_left, 248_212.into()); - } + assert_eq!(ext.calls.len(), 1); + assert_eq!(gas_left, 248_212.into()); + } - #[test] - fn should_not_overflow_returndata() { - let code = "6001600160000360003e00".from_hex().unwrap(); + #[test] + fn should_not_overflow_returndata() { + let code = "6001600160000360003e00".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.address = 5.into(); - params.gas = 300_000.into(); - params.gas_price = 1.into(); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new_byzantium(); - ext.balances.insert(5.into(), 1_000_000_000.into()); - ext.tracing = true; + let mut params = ActionParams::default(); + params.address = 5.into(); + params.gas = 300_000.into(); + params.gas_price = 1.into(); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new_byzantium(); + ext.balances.insert(5.into(), 1_000_000_000.into()); + ext.tracing = true; - let err = { - let mut vm = interpreter(params, &ext); - test_finalize(vm.exec(&mut ext).ok().unwrap()).err().unwrap() - }; + let err = { + let mut vm = interpreter(params, &ext); + test_finalize(vm.exec(&mut ext).ok().unwrap()) + .err() + .unwrap() + }; - assert_eq!(err, ::vm::Error::OutOfBounds); - } + assert_eq!(err, ::vm::Error::OutOfBounds); + } } diff --git a/ethcore/evm/src/interpreter/shared_cache.rs b/ethcore/evm/src/interpreter/shared_cache.rs index da7c03efa..c74383341 100644 --- a/ethcore/evm/src/interpreter/shared_cache.rs +++ b/ethcore/evm/src/interpreter/shared_cache.rs @@ -14,14 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; +use super::super::instructions::{self, Instruction}; +use bit_set::BitSet; +use ethereum_types::H256; use hash::KECCAK_EMPTY; use heapsize::HeapSizeOf; -use ethereum_types::H256; -use parking_lot::Mutex; use memory_cache::MemoryLruCache; -use bit_set::BitSet; -use super::super::instructions::{self, Instruction}; +use parking_lot::Mutex; +use std::sync::Arc; const DEFAULT_CACHE_SIZE: usize = 4 * 1024 * 1024; @@ -29,84 +29,86 @@ const DEFAULT_CACHE_SIZE: usize = 4 * 1024 * 1024; struct Bits(Arc); impl HeapSizeOf for Bits { - fn heap_size_of_children(&self) -> usize { - // dealing in bits here - self.0.capacity() * 8 - } + fn heap_size_of_children(&self) -> usize { + // dealing in bits here + self.0.capacity() * 8 + } } /// Global cache for EVM interpreter pub struct SharedCache { - jump_destinations: Mutex>, + jump_destinations: Mutex>, } impl SharedCache { - /// Create a jump destinations cache with a maximum size in bytes - /// to cache. - pub fn new(max_size: usize) -> Self { - SharedCache { - jump_destinations: Mutex::new(MemoryLruCache::new(max_size)), - } - } + /// Create a jump destinations cache with a maximum size in bytes + /// to cache. + pub fn new(max_size: usize) -> Self { + SharedCache { + jump_destinations: Mutex::new(MemoryLruCache::new(max_size)), + } + } - /// Get jump destinations bitmap for a contract. - pub fn jump_destinations(&self, code_hash: &Option, code: &[u8]) -> Arc { - if let Some(ref code_hash) = code_hash { - if code_hash == &KECCAK_EMPTY { - return Self::find_jump_destinations(code); - } + /// Get jump destinations bitmap for a contract. + pub fn jump_destinations(&self, code_hash: &Option, code: &[u8]) -> Arc { + if let Some(ref code_hash) = code_hash { + if code_hash == &KECCAK_EMPTY { + return Self::find_jump_destinations(code); + } - if let Some(d) = self.jump_destinations.lock().get_mut(code_hash) { - return d.0.clone(); - } - } + if let Some(d) = self.jump_destinations.lock().get_mut(code_hash) { + return d.0.clone(); + } + } - let d = Self::find_jump_destinations(code); + let d = Self::find_jump_destinations(code); - if let Some(ref code_hash) = code_hash { - self.jump_destinations.lock().insert(*code_hash, Bits(d.clone())); - } + if let Some(ref code_hash) = code_hash { + self.jump_destinations + .lock() + .insert(*code_hash, Bits(d.clone())); + } - d - } + d + } - fn find_jump_destinations(code: &[u8]) -> Arc { - let mut jump_dests = BitSet::with_capacity(code.len()); - let mut position = 0; + fn find_jump_destinations(code: &[u8]) -> Arc { + let mut jump_dests = BitSet::with_capacity(code.len()); + let mut position = 0; - while position < code.len() { - let instruction = Instruction::from_u8(code[position]); + while position < code.len() { + let instruction = Instruction::from_u8(code[position]); - if let Some(instruction) = instruction { - if instruction == instructions::JUMPDEST { - jump_dests.insert(position); - } else if let Some(push_bytes) = instruction.push_bytes() { - position += push_bytes; - } - } - position += 1; - } + if let Some(instruction) = instruction { + if instruction == instructions::JUMPDEST { + jump_dests.insert(position); + } else if let Some(push_bytes) = instruction.push_bytes() { + position += push_bytes; + } + } + position += 1; + } - jump_dests.shrink_to_fit(); - Arc::new(jump_dests) - } + jump_dests.shrink_to_fit(); + Arc::new(jump_dests) + } } impl Default for SharedCache { - fn default() -> Self { - SharedCache::new(DEFAULT_CACHE_SIZE) - } + fn default() -> Self { + SharedCache::new(DEFAULT_CACHE_SIZE) + } } #[test] fn test_find_jump_destinations() { - use rustc_hex::FromHex; - // given - let code = "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5b01600055".from_hex().unwrap(); + use rustc_hex::FromHex; + // given + let code = "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5b01600055".from_hex().unwrap(); - // when - let valid_jump_destinations = SharedCache::find_jump_destinations(&code); + // when + let valid_jump_destinations = SharedCache::find_jump_destinations(&code); - // then - assert!(valid_jump_destinations.contains(66)); + // then + assert!(valid_jump_destinations.contains(66)); } diff --git a/ethcore/evm/src/interpreter/stack.rs b/ethcore/evm/src/interpreter/stack.rs index 87e14bdad..cc2e82310 100644 --- a/ethcore/evm/src/interpreter/stack.rs +++ b/ethcore/evm/src/interpreter/stack.rs @@ -14,80 +14,85 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::fmt; use instructions; +use std::fmt; /// Stack trait with VM-friendly API pub trait Stack { - /// Returns `Stack[len(Stack) - no_from_top]` - fn peek(&self, no_from_top: usize) -> &T; - /// Swaps Stack[len(Stack)] and Stack[len(Stack) - no_from_top] - fn swap_with_top(&mut self, no_from_top: usize); - /// Returns true if Stack has at least `no_of_elems` elements - fn has(&self, no_of_elems: usize) -> bool; - /// Get element from top and remove it from Stack. Panics if stack is empty. - fn pop_back(&mut self) -> T; - /// Get (up to `instructions::MAX_NO_OF_TOPICS`) elements from top and remove them from Stack. Panics if stack is empty. - fn pop_n(&mut self, no_of_elems: usize) -> &[T]; - /// Add element on top of the Stack - fn push(&mut self, elem: T); - /// Get number of elements on Stack - fn size(&self) -> usize; - /// Returns all data on stack. - fn peek_top(&self, no_of_elems: usize) -> &[T]; + /// Returns `Stack[len(Stack) - no_from_top]` + fn peek(&self, no_from_top: usize) -> &T; + /// Swaps Stack[len(Stack)] and Stack[len(Stack) - no_from_top] + fn swap_with_top(&mut self, no_from_top: usize); + /// Returns true if Stack has at least `no_of_elems` elements + fn has(&self, no_of_elems: usize) -> bool; + /// Get element from top and remove it from Stack. Panics if stack is empty. + fn pop_back(&mut self) -> T; + /// Get (up to `instructions::MAX_NO_OF_TOPICS`) elements from top and remove them from Stack. Panics if stack is empty. + fn pop_n(&mut self, no_of_elems: usize) -> &[T]; + /// Add element on top of the Stack + fn push(&mut self, elem: T); + /// Get number of elements on Stack + fn size(&self) -> usize; + /// Returns all data on stack. + fn peek_top(&self, no_of_elems: usize) -> &[T]; } pub struct VecStack { - stack: Vec, - logs: [S; instructions::MAX_NO_OF_TOPICS] + stack: Vec, + logs: [S; instructions::MAX_NO_OF_TOPICS], } -impl VecStack { - pub fn with_capacity(capacity: usize, zero: S) -> Self { - VecStack { - stack: Vec::with_capacity(capacity), - logs: [zero; instructions::MAX_NO_OF_TOPICS] - } - } +impl VecStack { + pub fn with_capacity(capacity: usize, zero: S) -> Self { + VecStack { + stack: Vec::with_capacity(capacity), + logs: [zero; instructions::MAX_NO_OF_TOPICS], + } + } } -impl Stack for VecStack { - fn peek(&self, no_from_top: usize) -> &S { - &self.stack[self.stack.len() - no_from_top - 1] - } +impl Stack for VecStack { + fn peek(&self, no_from_top: usize) -> &S { + &self.stack[self.stack.len() - no_from_top - 1] + } - fn swap_with_top(&mut self, no_from_top: usize) { - let len = self.stack.len(); - self.stack.swap(len - no_from_top - 1, len - 1); - } + fn swap_with_top(&mut self, no_from_top: usize) { + let len = self.stack.len(); + self.stack.swap(len - no_from_top - 1, len - 1); + } - fn has(&self, no_of_elems: usize) -> bool { - self.stack.len() >= no_of_elems - } + fn has(&self, no_of_elems: usize) -> bool { + self.stack.len() >= no_of_elems + } - fn pop_back(&mut self) -> S { - self.stack.pop().expect("instruction validation prevents from popping too many items; qed") - } + fn pop_back(&mut self) -> S { + self.stack + .pop() + .expect("instruction validation prevents from popping too many items; qed") + } - fn pop_n(&mut self, no_of_elems: usize) -> &[S] { - assert!(no_of_elems <= instructions::MAX_NO_OF_TOPICS); + fn pop_n(&mut self, no_of_elems: usize) -> &[S] { + assert!(no_of_elems <= instructions::MAX_NO_OF_TOPICS); - for i in 0..no_of_elems { - self.logs[i] = self.pop_back(); - } - &self.logs[0..no_of_elems] - } + for i in 0..no_of_elems { + self.logs[i] = self.pop_back(); + } + &self.logs[0..no_of_elems] + } - fn push(&mut self, elem: S) { - self.stack.push(elem); - } + fn push(&mut self, elem: S) { + self.stack.push(elem); + } - fn size(&self) -> usize { - self.stack.len() - } + fn size(&self) -> usize { + self.stack.len() + } - fn peek_top(&self, no_from_top: usize) -> &[S] { - assert!(self.stack.len() >= no_from_top, "peek_top asked for more items than exist."); - &self.stack[self.stack.len() - no_from_top .. self.stack.len()] - } + fn peek_top(&self, no_from_top: usize) -> &[S] { + assert!( + self.stack.len() >= no_from_top, + "peek_top asked for more items than exist." + ); + &self.stack[self.stack.len() - no_from_top..self.stack.len()] + } } diff --git a/ethcore/evm/src/lib.rs b/ethcore/evm/src/lib.rs index 6e9409375..ec87ebccf 100644 --- a/ethcore/evm/src/lib.rs +++ b/ethcore/evm/src/lib.rs @@ -18,13 +18,13 @@ extern crate bit_set; extern crate ethereum_types; -extern crate parking_lot; extern crate heapsize; -extern crate vm; extern crate keccak_hash as hash; extern crate memory_cache; -extern crate parity_bytes as bytes; extern crate num_bigint; +extern crate parity_bytes as bytes; +extern crate parking_lot; +extern crate vm; #[macro_use] extern crate lazy_static; @@ -32,28 +32,29 @@ extern crate lazy_static; #[cfg_attr(feature = "evm-debug", macro_use)] extern crate log; -#[cfg(test)] -extern crate rustc_hex; #[cfg(test)] extern crate hex_literal; +#[cfg(test)] +extern crate rustc_hex; pub mod evm; pub mod interpreter; #[macro_use] pub mod factory; -mod vmtype; mod instructions; +mod vmtype; #[cfg(test)] mod tests; -pub use vm::{ - Schedule, CleanDustMode, EnvInfo, CallType, ActionParams, Ext, - ContractCreateResult, MessageCallResult, CreateContractAddress, - GasLeft, ReturnData +pub use self::{ + evm::{CostType, FinalizationResult, Finalize}, + factory::Factory, + instructions::{Instruction, InstructionInfo}, + vmtype::VMType, +}; +pub use vm::{ + ActionParams, CallType, CleanDustMode, ContractCreateResult, CreateContractAddress, EnvInfo, + Ext, GasLeft, MessageCallResult, ReturnData, Schedule, }; -pub use self::evm::{Finalize, FinalizationResult, CostType}; -pub use self::instructions::{InstructionInfo, Instruction}; -pub use self::vmtype::VMType; -pub use self::factory::Factory; diff --git a/ethcore/evm/src/tests.rs b/ethcore/evm/src/tests.rs index 73a176ee0..4df52cbb1 100644 --- a/ethcore/evm/src/tests.rs +++ b/ethcore/evm/src/tests.rs @@ -14,1104 +14,1425 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::fmt::Debug; -use std::str::FromStr; -use std::hash::Hash; -use std::sync::Arc; -use std::collections::{HashMap, HashSet}; -use rustc_hex::FromHex; -use ethereum_types::{U256, H256, Address}; -use vm::{self, ActionParams, ActionValue, Ext}; -use vm::tests::{FakeExt, FakeCall, FakeCallType, test_finalize}; +use ethereum_types::{Address, H256, U256}; use factory::Factory; -use vmtype::VMType; use hex_literal::hex; +use rustc_hex::FromHex; +use std::{ + collections::{HashMap, HashSet}, + fmt::Debug, + hash::Hash, + str::FromStr, + sync::Arc, +}; +use vm::{ + self, + tests::{test_finalize, FakeCall, FakeCallType, FakeExt}, + ActionParams, ActionValue, Ext, +}; +use vmtype::VMType; -evm_test!{test_add: test_add_int} +evm_test! {test_add: test_add_int} fn test_add(factory: super::Factory) { - let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let code = "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01600055".from_hex().unwrap(); + let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + let code = "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01600055".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_eq!(gas_left, U256::from(79_988)); - assert_store(&ext, 0, "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"); + assert_eq!(gas_left, U256::from(79_988)); + assert_store( + &ext, + 0, + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe", + ); } -evm_test!{test_sha3: test_sha3_int} +evm_test! {test_sha3: test_sha3_int} fn test_sha3(factory: super::Factory) { - let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let code = "6000600020600055".from_hex().unwrap(); + let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + let code = "6000600020600055".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_eq!(gas_left, U256::from(79_961)); - assert_store(&ext, 0, "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"); + assert_eq!(gas_left, U256::from(79_961)); + assert_store( + &ext, + 0, + "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + ); } -evm_test!{test_address: test_address_int} +evm_test! {test_address: test_address_int} fn test_address(factory: super::Factory) { - let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let code = "30600055".from_hex().unwrap(); + let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + let code = "30600055".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_eq!(gas_left, U256::from(79_995)); - assert_store(&ext, 0, "0000000000000000000000000f572e5295c57f15886f9b263e2f6d2d6c7b5ec6"); + assert_eq!(gas_left, U256::from(79_995)); + assert_store( + &ext, + 0, + "0000000000000000000000000f572e5295c57f15886f9b263e2f6d2d6c7b5ec6", + ); } -evm_test!{test_origin: test_origin_int} +evm_test! {test_origin: test_origin_int} fn test_origin(factory: super::Factory) { - let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let origin = Address::from_str("cd1722f2947def4cf144679da39c4c32bdc35681").unwrap(); - let code = "32600055".from_hex().unwrap(); + let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + let origin = Address::from_str("cd1722f2947def4cf144679da39c4c32bdc35681").unwrap(); + let code = "32600055".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.origin = origin.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.origin = origin.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_eq!(gas_left, U256::from(79_995)); - assert_store(&ext, 0, "000000000000000000000000cd1722f2947def4cf144679da39c4c32bdc35681"); + assert_eq!(gas_left, U256::from(79_995)); + assert_store( + &ext, + 0, + "000000000000000000000000cd1722f2947def4cf144679da39c4c32bdc35681", + ); } -evm_test!{test_selfbalance: test_selfbalance_int} +evm_test! {test_selfbalance: test_selfbalance_int} fn test_selfbalance(factory: super::Factory) { - let own_addr = Address::from_str("1337000000000000000000000000000000000000").unwrap(); - // 47 SELFBALANCE - // 60 ff PUSH ff - // 55 SSTORE - let code = hex!("47 60 ff 55").to_vec(); + let own_addr = Address::from_str("1337000000000000000000000000000000000000").unwrap(); + // 47 SELFBALANCE + // 60 ff PUSH ff + // 55 SSTORE + let code = hex!("47 60 ff 55").to_vec(); - let mut params = ActionParams::default(); - params.address = own_addr.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new_istanbul(); - ext.balances = { - let mut x = HashMap::new(); - x.insert(own_addr, U256::from(1_025)); // 0x401 - x - }; - let gas_left = { - let vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; - assert_eq!(gas_left, U256::from(79_992)); // TODO[dvdplm]: do the sums here, SELFBALANCE-5 + PUSH1-3 + ONEBYTE-4 + SSTORE-?? = 100_000 - 79_992 - assert_store(&ext, 0xff, "0000000000000000000000000000000000000000000000000000000000000401"); + let mut params = ActionParams::default(); + params.address = own_addr.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new_istanbul(); + ext.balances = { + let mut x = HashMap::new(); + x.insert(own_addr, U256::from(1_025)); // 0x401 + x + }; + let gas_left = { + let vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; + assert_eq!(gas_left, U256::from(79_992)); // TODO[dvdplm]: do the sums here, SELFBALANCE-5 + PUSH1-3 + ONEBYTE-4 + SSTORE-?? = 100_000 - 79_992 + assert_store( + &ext, + 0xff, + "0000000000000000000000000000000000000000000000000000000000000401", + ); } -evm_test!{test_sender: test_sender_int} +evm_test! {test_sender: test_sender_int} fn test_sender(factory: super::Factory) { - let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let sender = Address::from_str("cd1722f2947def4cf144679da39c4c32bdc35681").unwrap(); - let code = "33600055".from_hex().unwrap(); + let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + let sender = Address::from_str("cd1722f2947def4cf144679da39c4c32bdc35681").unwrap(); + let code = "33600055".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.sender = sender.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.sender = sender.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_eq!(gas_left, U256::from(79_995)); - assert_store(&ext, 0, "000000000000000000000000cd1722f2947def4cf144679da39c4c32bdc35681"); + assert_eq!(gas_left, U256::from(79_995)); + assert_store( + &ext, + 0, + "000000000000000000000000cd1722f2947def4cf144679da39c4c32bdc35681", + ); } -evm_test!{test_chain_id: test_chain_id_int} +evm_test! {test_chain_id: test_chain_id_int} fn test_chain_id(factory: super::Factory) { - // 46 CHAINID - // 60 00 PUSH 0 - // 55 SSTORE - let code = hex!("46 60 00 55").to_vec(); + // 46 CHAINID + // 60 00 PUSH 0 + // 55 SSTORE + let code = hex!("46 60 00 55").to_vec(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new_istanbul().with_chain_id(9); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new_istanbul().with_chain_id(9); - let gas_left = { - let vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_eq!(gas_left, U256::from(79_995)); - assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000000009"); + assert_eq!(gas_left, U256::from(79_995)); + assert_store( + &ext, + 0, + "0000000000000000000000000000000000000000000000000000000000000009", + ); } -evm_test!{test_extcodecopy: test_extcodecopy_int} +evm_test! {test_extcodecopy: test_extcodecopy_int} fn test_extcodecopy(factory: super::Factory) { - // 33 - sender - // 3b - extcodesize - // 60 00 - push 0 - // 60 00 - push 0 - // 33 - sender - // 3c - extcodecopy - // 60 00 - push 0 - // 51 - load word from memory - // 60 00 - push 0 - // 55 - sstore + // 33 - sender + // 3b - extcodesize + // 60 00 - push 0 + // 60 00 - push 0 + // 33 - sender + // 3c - extcodecopy + // 60 00 - push 0 + // 51 - load word from memory + // 60 00 - push 0 + // 55 - sstore - let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let sender = Address::from_str("cd1722f2947def4cf144679da39c4c32bdc35681").unwrap(); - let code = "333b60006000333c600051600055".from_hex().unwrap(); - let sender_code = "6005600055".from_hex().unwrap(); + let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + let sender = Address::from_str("cd1722f2947def4cf144679da39c4c32bdc35681").unwrap(); + let code = "333b60006000333c600051600055".from_hex().unwrap(); + let sender_code = "6005600055".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.sender = sender.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); - ext.codes.insert(sender, Arc::new(sender_code)); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.sender = sender.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); + ext.codes.insert(sender, Arc::new(sender_code)); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_eq!(gas_left, U256::from(79_935)); - assert_store(&ext, 0, "6005600055000000000000000000000000000000000000000000000000000000"); + assert_eq!(gas_left, U256::from(79_935)); + assert_store( + &ext, + 0, + "6005600055000000000000000000000000000000000000000000000000000000", + ); } -evm_test!{test_log_empty: test_log_empty_int} +evm_test! {test_log_empty: test_log_empty_int} fn test_log_empty(factory: super::Factory) { - let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let code = "60006000a0".from_hex().unwrap(); + let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + let code = "60006000a0".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_eq!(gas_left, U256::from(99_619)); - assert_eq!(ext.logs.len(), 1); - assert_eq!(ext.logs[0].topics.len(), 0); - assert!(ext.logs[0].data.is_empty()); + assert_eq!(gas_left, U256::from(99_619)); + assert_eq!(ext.logs.len(), 1); + assert_eq!(ext.logs[0].topics.len(), 0); + assert!(ext.logs[0].data.is_empty()); } -evm_test!{test_log_sender: test_log_sender_int} +evm_test! {test_log_sender: test_log_sender_int} fn test_log_sender(factory: super::Factory) { - // 60 ff - push ff - // 60 00 - push 00 - // 53 - mstore - // 33 - sender - // 60 20 - push 20 - // 60 00 - push 0 - // a1 - log with 1 topic + // 60 ff - push ff + // 60 00 - push 00 + // 53 - mstore + // 33 - sender + // 60 20 - push 20 + // 60 00 - push 0 + // a1 - log with 1 topic - let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); - let code = "60ff6000533360206000a1".from_hex().unwrap(); + let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); + let code = "60ff6000533360206000a1".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.sender = sender.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.sender = sender.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_eq!(gas_left, U256::from(98_974)); - assert_eq!(ext.logs.len(), 1); - assert_eq!(ext.logs[0].topics.len(), 1); - assert_eq!(ext.logs[0].topics[0], H256::from_str("000000000000000000000000cd1722f3947def4cf144679da39c4c32bdc35681").unwrap()); - assert_eq!(ext.logs[0].data, "ff00000000000000000000000000000000000000000000000000000000000000".from_hex().unwrap()); + assert_eq!(gas_left, U256::from(98_974)); + assert_eq!(ext.logs.len(), 1); + assert_eq!(ext.logs[0].topics.len(), 1); + assert_eq!( + ext.logs[0].topics[0], + H256::from_str("000000000000000000000000cd1722f3947def4cf144679da39c4c32bdc35681").unwrap() + ); + assert_eq!( + ext.logs[0].data, + "ff00000000000000000000000000000000000000000000000000000000000000" + .from_hex() + .unwrap() + ); } -evm_test!{test_blockhash: test_blockhash_int} +evm_test! {test_blockhash: test_blockhash_int} fn test_blockhash(factory: super::Factory) { - let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let code = "600040600055".from_hex().unwrap(); - let blockhash = H256::from_str("123400000000000000000000cd1722f2947def4cf144679da39c4c32bdc35681").unwrap(); + let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + let code = "600040600055".from_hex().unwrap(); + let blockhash = + H256::from_str("123400000000000000000000cd1722f2947def4cf144679da39c4c32bdc35681").unwrap(); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); - ext.blockhashes.insert(U256::zero(), blockhash.clone()); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); + ext.blockhashes.insert(U256::zero(), blockhash.clone()); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_eq!(gas_left, U256::from(79_974)); - assert_eq!(ext.store.get(&H256::new()).unwrap(), &blockhash); + assert_eq!(gas_left, U256::from(79_974)); + assert_eq!(ext.store.get(&H256::new()).unwrap(), &blockhash); } -evm_test!{test_calldataload: test_calldataload_int} +evm_test! {test_calldataload: test_calldataload_int} fn test_calldataload(factory: super::Factory) { - let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let code = "600135600055".from_hex().unwrap(); - let data = "0123ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff23".from_hex().unwrap(); + let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + let code = "600135600055".from_hex().unwrap(); + let data = "0123ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff23" + .from_hex() + .unwrap(); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.data = Some(data); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + params.data = Some(data); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_eq!(gas_left, U256::from(79_991)); - assert_store(&ext, 0, "23ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff23"); + assert_eq!(gas_left, U256::from(79_991)); + assert_store( + &ext, + 0, + "23ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff23", + ); } -evm_test!{test_author: test_author_int} +evm_test! {test_author: test_author_int} fn test_author(factory: super::Factory) { - let author = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let code = "41600055".from_hex().unwrap(); + let author = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + let code = "41600055".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); - ext.info.author = author; + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); + ext.info.author = author; - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_eq!(gas_left, U256::from(79_995)); - assert_store(&ext, 0, "0000000000000000000000000f572e5295c57f15886f9b263e2f6d2d6c7b5ec6"); + assert_eq!(gas_left, U256::from(79_995)); + assert_store( + &ext, + 0, + "0000000000000000000000000f572e5295c57f15886f9b263e2f6d2d6c7b5ec6", + ); } -evm_test!{test_timestamp: test_timestamp_int} +evm_test! {test_timestamp: test_timestamp_int} fn test_timestamp(factory: super::Factory) { - let timestamp = 0x1234; - let code = "42600055".from_hex().unwrap(); + let timestamp = 0x1234; + let code = "42600055".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); - ext.info.timestamp = timestamp; + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); + ext.info.timestamp = timestamp; - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_eq!(gas_left, U256::from(79_995)); - assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000001234"); + assert_eq!(gas_left, U256::from(79_995)); + assert_store( + &ext, + 0, + "0000000000000000000000000000000000000000000000000000000000001234", + ); } -evm_test!{test_number: test_number_int} +evm_test! {test_number: test_number_int} fn test_number(factory: super::Factory) { - let number = 0x1234; - let code = "43600055".from_hex().unwrap(); + let number = 0x1234; + let code = "43600055".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); - ext.info.number = number; + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); + ext.info.number = number; - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_eq!(gas_left, U256::from(79_995)); - assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000001234"); + assert_eq!(gas_left, U256::from(79_995)); + assert_store( + &ext, + 0, + "0000000000000000000000000000000000000000000000000000000000001234", + ); } -evm_test!{test_difficulty: test_difficulty_int} +evm_test! {test_difficulty: test_difficulty_int} fn test_difficulty(factory: super::Factory) { - let difficulty = U256::from(0x1234); - let code = "44600055".from_hex().unwrap(); + let difficulty = U256::from(0x1234); + let code = "44600055".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); - ext.info.difficulty = difficulty; + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); + ext.info.difficulty = difficulty; - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_eq!(gas_left, U256::from(79_995)); - assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000001234"); + assert_eq!(gas_left, U256::from(79_995)); + assert_store( + &ext, + 0, + "0000000000000000000000000000000000000000000000000000000000001234", + ); } -evm_test!{test_gas_limit: test_gas_limit_int} +evm_test! {test_gas_limit: test_gas_limit_int} fn test_gas_limit(factory: super::Factory) { - let gas_limit = U256::from(0x1234); - let code = "45600055".from_hex().unwrap(); + let gas_limit = U256::from(0x1234); + let code = "45600055".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); - ext.info.gas_limit = gas_limit; + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); + ext.info.gas_limit = gas_limit; - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_eq!(gas_left, U256::from(79_995)); - assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000001234"); + assert_eq!(gas_left, U256::from(79_995)); + assert_store( + &ext, + 0, + "0000000000000000000000000000000000000000000000000000000000001234", + ); } -evm_test!{test_mul: test_mul_int} +evm_test! {test_mul: test_mul_int} fn test_mul(factory: super::Factory) { - let code = "65012365124623626543219002600055".from_hex().unwrap(); + let code = "65012365124623626543219002600055".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_store(&ext, 0, "000000000000000000000000000000000000000000000000734349397b853383"); - assert_eq!(gas_left, U256::from(79_983)); + assert_store( + &ext, + 0, + "000000000000000000000000000000000000000000000000734349397b853383", + ); + assert_eq!(gas_left, U256::from(79_983)); } -evm_test!{test_sub: test_sub_int} +evm_test! {test_sub: test_sub_int} fn test_sub(factory: super::Factory) { - let code = "65012365124623626543219003600055".from_hex().unwrap(); + let code = "65012365124623626543219003600055".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000012364ad0302"); - assert_eq!(gas_left, U256::from(79_985)); + assert_store( + &ext, + 0, + "0000000000000000000000000000000000000000000000000000012364ad0302", + ); + assert_eq!(gas_left, U256::from(79_985)); } -evm_test!{test_div: test_div_int} +evm_test! {test_div: test_div_int} fn test_div(factory: super::Factory) { - let code = "65012365124623626543219004600055".from_hex().unwrap(); + let code = "65012365124623626543219004600055".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_store(&ext, 0, "000000000000000000000000000000000000000000000000000000000002e0ac"); - assert_eq!(gas_left, U256::from(79_983)); + assert_store( + &ext, + 0, + "000000000000000000000000000000000000000000000000000000000002e0ac", + ); + assert_eq!(gas_left, U256::from(79_983)); } -evm_test!{test_div_zero: test_div_zero_int} +evm_test! {test_div_zero: test_div_zero_int} fn test_div_zero(factory: super::Factory) { - let code = "6501236512462360009004600055".from_hex().unwrap(); + let code = "6501236512462360009004600055".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000000000"); - assert_eq!(gas_left, U256::from(94_983)); + assert_store( + &ext, + 0, + "0000000000000000000000000000000000000000000000000000000000000000", + ); + assert_eq!(gas_left, U256::from(94_983)); } -evm_test!{test_mod: test_mod_int} +evm_test! {test_mod: test_mod_int} fn test_mod(factory: super::Factory) { - let code = "650123651246236265432290066000556501236512462360009006600155".from_hex().unwrap(); + let code = "650123651246236265432290066000556501236512462360009006600155" + .from_hex() + .unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000076b4b"); - assert_store(&ext, 1, "0000000000000000000000000000000000000000000000000000000000000000"); - assert_eq!(gas_left, U256::from(74_966)); + assert_store( + &ext, + 0, + "0000000000000000000000000000000000000000000000000000000000076b4b", + ); + assert_store( + &ext, + 1, + "0000000000000000000000000000000000000000000000000000000000000000", + ); + assert_eq!(gas_left, U256::from(74_966)); } -evm_test!{test_smod: test_smod_int} +evm_test! {test_smod: test_smod_int} fn test_smod(factory: super::Factory) { - let code = "650123651246236265432290076000556501236512462360009007600155".from_hex().unwrap(); + let code = "650123651246236265432290076000556501236512462360009007600155" + .from_hex() + .unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000076b4b"); - assert_store(&ext, 1, "0000000000000000000000000000000000000000000000000000000000000000"); - assert_eq!(gas_left, U256::from(74_966)); + assert_store( + &ext, + 0, + "0000000000000000000000000000000000000000000000000000000000076b4b", + ); + assert_store( + &ext, + 1, + "0000000000000000000000000000000000000000000000000000000000000000", + ); + assert_eq!(gas_left, U256::from(74_966)); } -evm_test!{test_sdiv: test_sdiv_int} +evm_test! {test_sdiv: test_sdiv_int} fn test_sdiv(factory: super::Factory) { - let code = "650123651246236265432290056000556501236512462360009005600155".from_hex().unwrap(); + let code = "650123651246236265432290056000556501236512462360009005600155" + .from_hex() + .unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_store(&ext, 0, "000000000000000000000000000000000000000000000000000000000002e0ac"); - assert_store(&ext, 1, "0000000000000000000000000000000000000000000000000000000000000000"); - assert_eq!(gas_left, U256::from(74_966)); + assert_store( + &ext, + 0, + "000000000000000000000000000000000000000000000000000000000002e0ac", + ); + assert_store( + &ext, + 1, + "0000000000000000000000000000000000000000000000000000000000000000", + ); + assert_eq!(gas_left, U256::from(74_966)); } -evm_test!{test_exp: test_exp_int} +evm_test! {test_exp: test_exp_int} fn test_exp(factory: super::Factory) { - let code = "6016650123651246230a6000556001650123651246230a6001556000650123651246230a600255".from_hex().unwrap(); + let code = "6016650123651246230a6000556001650123651246230a6001556000650123651246230a600255" + .from_hex() + .unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_store(&ext, 0, "90fd23767b60204c3d6fc8aec9e70a42a3f127140879c133a20129a597ed0c59"); - assert_store(&ext, 1, "0000000000000000000000000000000000000000000000000000012365124623"); - assert_store(&ext, 2, "0000000000000000000000000000000000000000000000000000000000000001"); - assert_eq!(gas_left, U256::from(39_923)); + assert_store( + &ext, + 0, + "90fd23767b60204c3d6fc8aec9e70a42a3f127140879c133a20129a597ed0c59", + ); + assert_store( + &ext, + 1, + "0000000000000000000000000000000000000000000000000000012365124623", + ); + assert_store( + &ext, + 2, + "0000000000000000000000000000000000000000000000000000000000000001", + ); + assert_eq!(gas_left, U256::from(39_923)); } -evm_test!{test_comparison: test_comparison_int} +evm_test! {test_comparison: test_comparison_int} fn test_comparison(factory: super::Factory) { - let code = "601665012365124623818181811060005511600155146002556415235412358014600355".from_hex().unwrap(); + let code = "601665012365124623818181811060005511600155146002556415235412358014600355" + .from_hex() + .unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000000000"); - assert_store(&ext, 1, "0000000000000000000000000000000000000000000000000000000000000001"); - assert_store(&ext, 2, "0000000000000000000000000000000000000000000000000000000000000000"); - assert_store(&ext, 3, "0000000000000000000000000000000000000000000000000000000000000001"); - assert_eq!(gas_left, U256::from(49_952)); + assert_store( + &ext, + 0, + "0000000000000000000000000000000000000000000000000000000000000000", + ); + assert_store( + &ext, + 1, + "0000000000000000000000000000000000000000000000000000000000000001", + ); + assert_store( + &ext, + 2, + "0000000000000000000000000000000000000000000000000000000000000000", + ); + assert_store( + &ext, + 3, + "0000000000000000000000000000000000000000000000000000000000000001", + ); + assert_eq!(gas_left, U256::from(49_952)); } -evm_test!{test_signed_comparison: test_signed_comparison_int} +evm_test! {test_signed_comparison: test_signed_comparison_int} fn test_signed_comparison(factory: super::Factory) { - let code = "60106000036010818112600055136001556010601060000381811260025513600355".from_hex().unwrap(); + let code = "60106000036010818112600055136001556010601060000381811260025513600355" + .from_hex() + .unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000000000"); - assert_store(&ext, 1, "0000000000000000000000000000000000000000000000000000000000000001"); - assert_store(&ext, 2, "0000000000000000000000000000000000000000000000000000000000000001"); - assert_store(&ext, 3, "0000000000000000000000000000000000000000000000000000000000000000"); - assert_eq!(gas_left, U256::from(49_940)); + assert_store( + &ext, + 0, + "0000000000000000000000000000000000000000000000000000000000000000", + ); + assert_store( + &ext, + 1, + "0000000000000000000000000000000000000000000000000000000000000001", + ); + assert_store( + &ext, + 2, + "0000000000000000000000000000000000000000000000000000000000000001", + ); + assert_store( + &ext, + 3, + "0000000000000000000000000000000000000000000000000000000000000000", + ); + assert_eq!(gas_left, U256::from(49_940)); } -evm_test!{test_bitops: test_bitops_int} +evm_test! {test_bitops: test_bitops_int} fn test_bitops(factory: super::Factory) { - let code = "60ff610ff08181818116600055176001551860025560008015600355198015600455600555".from_hex().unwrap(); + let code = "60ff610ff08181818116600055176001551860025560008015600355198015600455600555" + .from_hex() + .unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(150_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.gas = U256::from(150_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_store(&ext, 0, "00000000000000000000000000000000000000000000000000000000000000f0"); - assert_store(&ext, 1, "0000000000000000000000000000000000000000000000000000000000000fff"); - assert_store(&ext, 2, "0000000000000000000000000000000000000000000000000000000000000f0f"); - assert_store(&ext, 3, "0000000000000000000000000000000000000000000000000000000000000001"); - assert_store(&ext, 4, "0000000000000000000000000000000000000000000000000000000000000000"); - assert_store(&ext, 5, "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); - assert_eq!(gas_left, U256::from(44_937)); + assert_store( + &ext, + 0, + "00000000000000000000000000000000000000000000000000000000000000f0", + ); + assert_store( + &ext, + 1, + "0000000000000000000000000000000000000000000000000000000000000fff", + ); + assert_store( + &ext, + 2, + "0000000000000000000000000000000000000000000000000000000000000f0f", + ); + assert_store( + &ext, + 3, + "0000000000000000000000000000000000000000000000000000000000000001", + ); + assert_store( + &ext, + 4, + "0000000000000000000000000000000000000000000000000000000000000000", + ); + assert_store( + &ext, + 5, + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ); + assert_eq!(gas_left, U256::from(44_937)); } -evm_test!{test_addmod_mulmod: test_addmod_mulmod_int} +evm_test! {test_addmod_mulmod: test_addmod_mulmod_int} fn test_addmod_mulmod(factory: super::Factory) { - let code = "60ff60f060108282820860005509600155600060f0601082828208196002550919600355".from_hex().unwrap(); + let code = "60ff60f060108282820860005509600155600060f0601082828208196002550919600355" + .from_hex() + .unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000000001"); - assert_store(&ext, 1, "000000000000000000000000000000000000000000000000000000000000000f"); - assert_store(&ext, 2, "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); - assert_store(&ext, 3, "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); - assert_eq!(gas_left, U256::from(19_914)); + assert_store( + &ext, + 0, + "0000000000000000000000000000000000000000000000000000000000000001", + ); + assert_store( + &ext, + 1, + "000000000000000000000000000000000000000000000000000000000000000f", + ); + assert_store( + &ext, + 2, + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ); + assert_store( + &ext, + 3, + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ); + assert_eq!(gas_left, U256::from(19_914)); } -evm_test!{test_byte: test_byte_int} +evm_test! {test_byte: test_byte_int} fn test_byte(factory: super::Factory) { - let code = "60f061ffff1a600055610fff601f1a600155".from_hex().unwrap(); + let code = "60f061ffff1a600055610fff601f1a600155".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000000000"); - assert_store(&ext, 1, "00000000000000000000000000000000000000000000000000000000000000ff"); - assert_eq!(gas_left, U256::from(74_976)); + assert_store( + &ext, + 0, + "0000000000000000000000000000000000000000000000000000000000000000", + ); + assert_store( + &ext, + 1, + "00000000000000000000000000000000000000000000000000000000000000ff", + ); + assert_eq!(gas_left, U256::from(74_976)); } -evm_test!{test_signextend: test_signextend_int} +evm_test! {test_signextend: test_signextend_int} fn test_signextend(factory: super::Factory) { - let code = "610fff60020b60005560ff60200b600155".from_hex().unwrap(); + let code = "610fff60020b60005560ff60200b600155".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000000fff"); - assert_store(&ext, 1, "00000000000000000000000000000000000000000000000000000000000000ff"); - assert_eq!(gas_left, U256::from(59_972)); + assert_store( + &ext, + 0, + "0000000000000000000000000000000000000000000000000000000000000fff", + ); + assert_store( + &ext, + 1, + "00000000000000000000000000000000000000000000000000000000000000ff", + ); + assert_eq!(gas_left, U256::from(59_972)); } #[test] // JIT just returns out of gas fn test_badinstruction_int() { - let factory = super::Factory::new(VMType::Interpreter, 1024 * 32); - let code = "af".from_hex().unwrap(); + let factory = super::Factory::new(VMType::Interpreter, 1024 * 32); + let code = "af".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let err = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap_err() - }; + let err = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap_err() + }; - match err { - vm::Error::BadInstruction { instruction: 0xaf } => (), - _ => assert!(false, "Expected bad instruction") - } + match err { + vm::Error::BadInstruction { instruction: 0xaf } => (), + _ => assert!(false, "Expected bad instruction"), + } } -evm_test!{test_pop: test_pop_int} +evm_test! {test_pop: test_pop_int} fn test_pop(factory: super::Factory) { - let code = "60f060aa50600055".from_hex().unwrap(); + let code = "60f060aa50600055".from_hex().unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_store(&ext, 0, "00000000000000000000000000000000000000000000000000000000000000f0"); - assert_eq!(gas_left, U256::from(79_989)); + assert_store( + &ext, + 0, + "00000000000000000000000000000000000000000000000000000000000000f0", + ); + assert_eq!(gas_left, U256::from(79_989)); } -evm_test!{test_extops: test_extops_int} +evm_test! {test_extops: test_extops_int} fn test_extops(factory: super::Factory) { - let code = "5a6001555836553a600255386003553460045560016001526016590454600555".from_hex().unwrap(); + let code = "5a6001555836553a600255386003553460045560016001526016590454600555" + .from_hex() + .unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(150_000); - params.gas_price = U256::from(0x32); - params.value = ActionValue::Transfer(U256::from(0x99)); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.gas = U256::from(150_000); + params.gas_price = U256::from(0x32); + params.value = ActionValue::Transfer(U256::from(0x99)); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000000004"); // PC / CALLDATASIZE - assert_store(&ext, 1, "00000000000000000000000000000000000000000000000000000000000249ee"); // GAS - assert_store(&ext, 2, "0000000000000000000000000000000000000000000000000000000000000032"); // GASPRICE - assert_store(&ext, 3, "0000000000000000000000000000000000000000000000000000000000000020"); // CODESIZE - assert_store(&ext, 4, "0000000000000000000000000000000000000000000000000000000000000099"); // CALLVALUE - assert_store(&ext, 5, "0000000000000000000000000000000000000000000000000000000000000032"); - assert_eq!(gas_left, U256::from(29_898)); + assert_store( + &ext, + 0, + "0000000000000000000000000000000000000000000000000000000000000004", + ); // PC / CALLDATASIZE + assert_store( + &ext, + 1, + "00000000000000000000000000000000000000000000000000000000000249ee", + ); // GAS + assert_store( + &ext, + 2, + "0000000000000000000000000000000000000000000000000000000000000032", + ); // GASPRICE + assert_store( + &ext, + 3, + "0000000000000000000000000000000000000000000000000000000000000020", + ); // CODESIZE + assert_store( + &ext, + 4, + "0000000000000000000000000000000000000000000000000000000000000099", + ); // CALLVALUE + assert_store( + &ext, + 5, + "0000000000000000000000000000000000000000000000000000000000000032", + ); + assert_eq!(gas_left, U256::from(29_898)); } -evm_test!{test_jumps: test_jumps_int} +evm_test! {test_jumps: test_jumps_int} fn test_jumps(factory: super::Factory) { - let code = "600160015560066000555b60016000540380806000551560245760015402600155600a565b".from_hex().unwrap(); + let code = "600160015560066000555b60016000540380806000551560245760015402600155600a565b" + .from_hex() + .unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(150_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new(); + let mut params = ActionParams::default(); + params.gas = U256::from(150_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_eq!(ext.sstore_clears, ext.schedule().sstore_refund_gas as i128); - assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000000000"); // 5! - assert_store(&ext, 1, "0000000000000000000000000000000000000000000000000000000000000078"); // 5! - assert_eq!(gas_left, U256::from(54_117)); + assert_eq!(ext.sstore_clears, ext.schedule().sstore_refund_gas as i128); + assert_store( + &ext, + 0, + "0000000000000000000000000000000000000000000000000000000000000000", + ); // 5! + assert_store( + &ext, + 1, + "0000000000000000000000000000000000000000000000000000000000000078", + ); // 5! + assert_eq!(gas_left, U256::from(54_117)); } -evm_test!{test_calls: test_calls_int} +evm_test! {test_calls: test_calls_int} fn test_calls(factory: super::Factory) { - let code = "600054602d57600160005560006000600060006050610998610100f160006000600060006050610998610100f25b".from_hex().unwrap(); + let code = "600054602d57600160005560006000600060006050610998610100f160006000600060006050610998610100f25b".from_hex().unwrap(); - let address = Address::from(0x155); - let code_address = Address::from(0x998); - let mut params = ActionParams::default(); - params.gas = U256::from(150_000); - params.code = Some(Arc::new(code)); - params.address = address.clone(); - let mut ext = FakeExt::new(); - ext.balances = { - let mut s = HashMap::new(); - s.insert(params.address.clone(), params.gas); - s - }; + let address = Address::from(0x155); + let code_address = Address::from(0x998); + let mut params = ActionParams::default(); + params.gas = U256::from(150_000); + params.code = Some(Arc::new(code)); + params.address = address.clone(); + let mut ext = FakeExt::new(); + ext.balances = { + let mut s = HashMap::new(); + s.insert(params.address.clone(), params.gas); + s + }; - let gas_left = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_set_contains(&ext.calls, &FakeCall { - call_type: FakeCallType::Call, - create_scheme: None, - gas: U256::from(2556), - sender_address: Some(address.clone()), - receive_address: Some(code_address.clone()), - value: Some(U256::from(0x50)), - data: vec!(), - code_address: Some(code_address.clone()) - }); - assert_set_contains(&ext.calls, &FakeCall { - call_type: FakeCallType::Call, - create_scheme: None, - gas: U256::from(2556), - sender_address: Some(address.clone()), - receive_address: Some(address.clone()), - value: Some(U256::from(0x50)), - data: vec!(), - code_address: Some(code_address.clone()) - }); - assert_eq!(gas_left, U256::from(91_405)); - assert_eq!(ext.calls.len(), 2); + assert_set_contains( + &ext.calls, + &FakeCall { + call_type: FakeCallType::Call, + create_scheme: None, + gas: U256::from(2556), + sender_address: Some(address.clone()), + receive_address: Some(code_address.clone()), + value: Some(U256::from(0x50)), + data: vec![], + code_address: Some(code_address.clone()), + }, + ); + assert_set_contains( + &ext.calls, + &FakeCall { + call_type: FakeCallType::Call, + create_scheme: None, + gas: U256::from(2556), + sender_address: Some(address.clone()), + receive_address: Some(address.clone()), + value: Some(U256::from(0x50)), + data: vec![], + code_address: Some(code_address.clone()), + }, + ); + assert_eq!(gas_left, U256::from(91_405)); + assert_eq!(ext.calls.len(), 2); } -evm_test!{test_create_in_staticcall: test_create_in_staticcall_int} +evm_test! {test_create_in_staticcall: test_create_in_staticcall_int} fn test_create_in_staticcall(factory: super::Factory) { - let code = "600060006064f000".from_hex().unwrap(); + let code = "600060006064f000".from_hex().unwrap(); - let address = Address::from(0x155); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.address = address.clone(); - let mut ext = FakeExt::new_byzantium(); - ext.is_static = true; + let address = Address::from(0x155); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + params.address = address.clone(); + let mut ext = FakeExt::new_byzantium(); + ext.is_static = true; - let err = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap_err() - }; + let err = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap_err() + }; - assert_eq!(err, vm::Error::MutableCallInStaticContext); - assert_eq!(ext.calls.len(), 0); + assert_eq!(err, vm::Error::MutableCallInStaticContext); + assert_eq!(ext.calls.len(), 0); } -evm_test!{test_shl: test_shl_int} +evm_test! {test_shl: test_shl_int} fn test_shl(factory: super::Factory) { - push_two_pop_one_constantinople_test( - &factory, - 0x1b, - "0000000000000000000000000000000000000000000000000000000000000001", - "00", - "0000000000000000000000000000000000000000000000000000000000000001"); - push_two_pop_one_constantinople_test( - &factory, - 0x1b, - "0000000000000000000000000000000000000000000000000000000000000001", - "01", - "0000000000000000000000000000000000000000000000000000000000000002"); - push_two_pop_one_constantinople_test( - &factory, - 0x1b, - "0000000000000000000000000000000000000000000000000000000000000001", - "ff", - "8000000000000000000000000000000000000000000000000000000000000000"); - push_two_pop_one_constantinople_test( - &factory, - 0x1b, - "0000000000000000000000000000000000000000000000000000000000000001", - "0100", - "0000000000000000000000000000000000000000000000000000000000000000"); - push_two_pop_one_constantinople_test( - &factory, - 0x1b, - "0000000000000000000000000000000000000000000000000000000000000001", - "0101", - "0000000000000000000000000000000000000000000000000000000000000000"); - push_two_pop_one_constantinople_test( - &factory, - 0x1b, - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "00", - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); - push_two_pop_one_constantinople_test( - &factory, - 0x1b, - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "01", - "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"); - push_two_pop_one_constantinople_test( - &factory, - 0x1b, - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "ff", - "8000000000000000000000000000000000000000000000000000000000000000"); - push_two_pop_one_constantinople_test( - &factory, - 0x1b, - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "0100", - "0000000000000000000000000000000000000000000000000000000000000000"); - push_two_pop_one_constantinople_test( - &factory, - 0x1b, - "0000000000000000000000000000000000000000000000000000000000000000", - "01", - "0000000000000000000000000000000000000000000000000000000000000000"); - push_two_pop_one_constantinople_test( - &factory, - 0x1b, - "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "01", - "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"); + push_two_pop_one_constantinople_test( + &factory, + 0x1b, + "0000000000000000000000000000000000000000000000000000000000000001", + "00", + "0000000000000000000000000000000000000000000000000000000000000001", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1b, + "0000000000000000000000000000000000000000000000000000000000000001", + "01", + "0000000000000000000000000000000000000000000000000000000000000002", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1b, + "0000000000000000000000000000000000000000000000000000000000000001", + "ff", + "8000000000000000000000000000000000000000000000000000000000000000", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1b, + "0000000000000000000000000000000000000000000000000000000000000001", + "0100", + "0000000000000000000000000000000000000000000000000000000000000000", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1b, + "0000000000000000000000000000000000000000000000000000000000000001", + "0101", + "0000000000000000000000000000000000000000000000000000000000000000", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1b, + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "00", + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1b, + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "01", + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1b, + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "ff", + "8000000000000000000000000000000000000000000000000000000000000000", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1b, + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "0100", + "0000000000000000000000000000000000000000000000000000000000000000", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1b, + "0000000000000000000000000000000000000000000000000000000000000000", + "01", + "0000000000000000000000000000000000000000000000000000000000000000", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1b, + "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "01", + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe", + ); } -evm_test!{test_shr: test_shr_int} +evm_test! {test_shr: test_shr_int} fn test_shr(factory: super::Factory) { - push_two_pop_one_constantinople_test( - &factory, - 0x1c, - "0000000000000000000000000000000000000000000000000000000000000001", - "00", - "0000000000000000000000000000000000000000000000000000000000000001"); - push_two_pop_one_constantinople_test( - &factory, - 0x1c, - "0000000000000000000000000000000000000000000000000000000000000001", - "01", - "0000000000000000000000000000000000000000000000000000000000000000"); - push_two_pop_one_constantinople_test( - &factory, - 0x1c, - "8000000000000000000000000000000000000000000000000000000000000000", - "01", - "4000000000000000000000000000000000000000000000000000000000000000"); - push_two_pop_one_constantinople_test( - &factory, - 0x1c, - "8000000000000000000000000000000000000000000000000000000000000000", - "ff", - "0000000000000000000000000000000000000000000000000000000000000001"); - push_two_pop_one_constantinople_test( - &factory, - 0x1c, - "8000000000000000000000000000000000000000000000000000000000000000", - "0100", - "0000000000000000000000000000000000000000000000000000000000000000"); - push_two_pop_one_constantinople_test( - &factory, - 0x1c, - "8000000000000000000000000000000000000000000000000000000000000000", - "0101", - "0000000000000000000000000000000000000000000000000000000000000000"); - push_two_pop_one_constantinople_test( - &factory, - 0x1c, - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "00", - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); - push_two_pop_one_constantinople_test( - &factory, - 0x1c, - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "01", - "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); - push_two_pop_one_constantinople_test( - &factory, - 0x1c, - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "ff", - "0000000000000000000000000000000000000000000000000000000000000001"); - push_two_pop_one_constantinople_test( - &factory, - 0x1c, - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "0100", - "0000000000000000000000000000000000000000000000000000000000000000"); - push_two_pop_one_constantinople_test( - &factory, - 0x1c, - "0000000000000000000000000000000000000000000000000000000000000000", - "01", - "0000000000000000000000000000000000000000000000000000000000000000"); + push_two_pop_one_constantinople_test( + &factory, + 0x1c, + "0000000000000000000000000000000000000000000000000000000000000001", + "00", + "0000000000000000000000000000000000000000000000000000000000000001", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1c, + "0000000000000000000000000000000000000000000000000000000000000001", + "01", + "0000000000000000000000000000000000000000000000000000000000000000", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1c, + "8000000000000000000000000000000000000000000000000000000000000000", + "01", + "4000000000000000000000000000000000000000000000000000000000000000", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1c, + "8000000000000000000000000000000000000000000000000000000000000000", + "ff", + "0000000000000000000000000000000000000000000000000000000000000001", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1c, + "8000000000000000000000000000000000000000000000000000000000000000", + "0100", + "0000000000000000000000000000000000000000000000000000000000000000", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1c, + "8000000000000000000000000000000000000000000000000000000000000000", + "0101", + "0000000000000000000000000000000000000000000000000000000000000000", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1c, + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "00", + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1c, + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "01", + "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1c, + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "ff", + "0000000000000000000000000000000000000000000000000000000000000001", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1c, + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "0100", + "0000000000000000000000000000000000000000000000000000000000000000", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1c, + "0000000000000000000000000000000000000000000000000000000000000000", + "01", + "0000000000000000000000000000000000000000000000000000000000000000", + ); } -evm_test!{test_sar: test_sar_int} +evm_test! {test_sar: test_sar_int} fn test_sar(factory: super::Factory) { - push_two_pop_one_constantinople_test( - &factory, - 0x1d, - "0000000000000000000000000000000000000000000000000000000000000001", - "00", - "0000000000000000000000000000000000000000000000000000000000000001"); - push_two_pop_one_constantinople_test( - &factory, - 0x1d, - "0000000000000000000000000000000000000000000000000000000000000001", - "01", - "0000000000000000000000000000000000000000000000000000000000000000"); - push_two_pop_one_constantinople_test( - &factory, - 0x1d, - "8000000000000000000000000000000000000000000000000000000000000000", - "01", - "c000000000000000000000000000000000000000000000000000000000000000"); - push_two_pop_one_constantinople_test( - &factory, - 0x1d, - "8000000000000000000000000000000000000000000000000000000000000000", - "ff", - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); - push_two_pop_one_constantinople_test( - &factory, - 0x1d, - "8000000000000000000000000000000000000000000000000000000000000000", - "0100", - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); - push_two_pop_one_constantinople_test( - &factory, - 0x1d, - "8000000000000000000000000000000000000000000000000000000000000000", - "0101", - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); - push_two_pop_one_constantinople_test( - &factory, - 0x1d, - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "00", - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); - push_two_pop_one_constantinople_test( - &factory, - 0x1d, - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "01", - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); - push_two_pop_one_constantinople_test( - &factory, - 0x1d, - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "ff", - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); - push_two_pop_one_constantinople_test( - &factory, - 0x1d, - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "0100", - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); - push_two_pop_one_constantinople_test( - &factory, - 0x1d, - "0000000000000000000000000000000000000000000000000000000000000000", - "01", - "0000000000000000000000000000000000000000000000000000000000000000"); - push_two_pop_one_constantinople_test( - &factory, - 0x1d, - "4000000000000000000000000000000000000000000000000000000000000000", - "fe", - "0000000000000000000000000000000000000000000000000000000000000001"); - push_two_pop_one_constantinople_test( - &factory, - 0x1d, - "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "f8", - "000000000000000000000000000000000000000000000000000000000000007f"); - push_two_pop_one_constantinople_test( - &factory, - 0x1d, - "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "fe", - "0000000000000000000000000000000000000000000000000000000000000001"); - push_two_pop_one_constantinople_test( - &factory, - 0x1d, - "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "ff", - "0000000000000000000000000000000000000000000000000000000000000000"); - push_two_pop_one_constantinople_test( - &factory, - 0x1d, - "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "0100", - "0000000000000000000000000000000000000000000000000000000000000000"); + push_two_pop_one_constantinople_test( + &factory, + 0x1d, + "0000000000000000000000000000000000000000000000000000000000000001", + "00", + "0000000000000000000000000000000000000000000000000000000000000001", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1d, + "0000000000000000000000000000000000000000000000000000000000000001", + "01", + "0000000000000000000000000000000000000000000000000000000000000000", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1d, + "8000000000000000000000000000000000000000000000000000000000000000", + "01", + "c000000000000000000000000000000000000000000000000000000000000000", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1d, + "8000000000000000000000000000000000000000000000000000000000000000", + "ff", + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1d, + "8000000000000000000000000000000000000000000000000000000000000000", + "0100", + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1d, + "8000000000000000000000000000000000000000000000000000000000000000", + "0101", + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1d, + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "00", + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1d, + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "01", + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1d, + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "ff", + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1d, + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "0100", + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1d, + "0000000000000000000000000000000000000000000000000000000000000000", + "01", + "0000000000000000000000000000000000000000000000000000000000000000", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1d, + "4000000000000000000000000000000000000000000000000000000000000000", + "fe", + "0000000000000000000000000000000000000000000000000000000000000001", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1d, + "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "f8", + "000000000000000000000000000000000000000000000000000000000000007f", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1d, + "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "fe", + "0000000000000000000000000000000000000000000000000000000000000001", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1d, + "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "ff", + "0000000000000000000000000000000000000000000000000000000000000000", + ); + push_two_pop_one_constantinople_test( + &factory, + 0x1d, + "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "0100", + "0000000000000000000000000000000000000000000000000000000000000000", + ); } -fn push_two_pop_one_constantinople_test(factory: &super::Factory, opcode: u8, push1: &str, push2: &str, result: &str) { - let mut push1 = push1.from_hex().unwrap(); - let mut push2 = push2.from_hex().unwrap(); - assert!(push1.len() <= 32 && push1.len() != 0); - assert!(push2.len() <= 32 && push2.len() != 0); +fn push_two_pop_one_constantinople_test( + factory: &super::Factory, + opcode: u8, + push1: &str, + push2: &str, + result: &str, +) { + let mut push1 = push1.from_hex().unwrap(); + let mut push2 = push2.from_hex().unwrap(); + assert!(push1.len() <= 32 && push1.len() != 0); + assert!(push2.len() <= 32 && push2.len() != 0); - let mut code = Vec::new(); - code.push(0x60 + ((push1.len() - 1) as u8)); - code.append(&mut push1); - code.push(0x60 + ((push2.len() - 1) as u8)); - code.append(&mut push2); - code.push(opcode); - code.append(&mut vec![0x60, 0x00, 0x55]); + let mut code = Vec::new(); + code.push(0x60 + ((push1.len() - 1) as u8)); + code.append(&mut push1); + code.push(0x60 + ((push2.len() - 1) as u8)); + code.append(&mut push2); + code.push(opcode); + code.append(&mut vec![0x60, 0x00, 0x55]); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new_constantinople(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new_constantinople(); - let _ = { - let mut vm = factory.create(params, ext.schedule(), ext.depth()); - test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() - }; + let _ = { + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_store(&ext, 0, result); + assert_store(&ext, 0, result); } -fn assert_set_contains(set: &HashSet, val: &T) { - let contains = set.contains(val); - if !contains { - println!("Set: {:?}", set); - println!("Elem: {:?}", val); - } - assert!(contains, "Element not found in HashSet"); +fn assert_set_contains(set: &HashSet, val: &T) { + let contains = set.contains(val); + if !contains { + println!("Set: {:?}", set); + println!("Elem: {:?}", val); + } + assert!(contains, "Element not found in HashSet"); } fn assert_store(ext: &FakeExt, pos: u64, val: &str) { - assert_eq!(ext.store.get(&H256::from(pos)).unwrap(), &H256::from_str(val).unwrap()); + assert_eq!( + ext.store.get(&H256::from(pos)).unwrap(), + &H256::from_str(val).unwrap() + ); } diff --git a/ethcore/evm/src/vmtype.rs b/ethcore/evm/src/vmtype.rs index 2ae40e2c1..14ab677b3 100644 --- a/ethcore/evm/src/vmtype.rs +++ b/ethcore/evm/src/vmtype.rs @@ -19,27 +19,31 @@ use std::fmt; /// Type of EVM to use. #[derive(Debug, PartialEq, Clone)] pub enum VMType { - /// RUST EVM - Interpreter + /// RUST EVM + Interpreter, } impl fmt::Display for VMType { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", match *self { - VMType::Interpreter => "INT" - }) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "{}", + match *self { + VMType::Interpreter => "INT", + } + ) + } } impl Default for VMType { - fn default() -> Self { - VMType::Interpreter - } + fn default() -> Self { + VMType::Interpreter + } } impl VMType { - /// Return all possible VMs (Interpreter) - pub fn all() -> Vec { - vec![VMType::Interpreter] - } + /// Return all possible VMs (Interpreter) + pub fn all() -> Vec { + vec![VMType::Interpreter] + } } diff --git a/ethcore/light/src/cache.rs b/ethcore/light/src/cache.rs index d75e0ff7d..8d131c748 100644 --- a/ethcore/light/src/cache.rs +++ b/ethcore/light/src/cache.rs @@ -20,11 +20,9 @@ //! Furthermore, stores a "gas price corpus" of relative recency, which is a sorted //! vector of all gas prices from a recent range of blocks. -use std::time::{Instant, Duration}; +use std::time::{Duration, Instant}; -use common_types::encoded; -use common_types::BlockNumber; -use common_types::receipt::Receipt; +use common_types::{encoded, receipt::Receipt, BlockNumber}; use ethereum_types::{H256, U256}; use heapsize::HeapSizeOf; use memory_cache::MemoryLruCache; @@ -33,29 +31,29 @@ use stats::Corpus; /// Configuration for how much data to cache. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct CacheSizes { - /// Maximum size, in bytes, of cached headers. - pub headers: usize, - /// Maximum size, in bytes, of cached canonical hashes. - pub canon_hashes: usize, - /// Maximum size, in bytes, of cached block bodies. - pub bodies: usize, - /// Maximum size, in bytes, of cached block receipts. - pub receipts: usize, - /// Maximum size, in bytes, of cached chain score for the block. - pub chain_score: usize, + /// Maximum size, in bytes, of cached headers. + pub headers: usize, + /// Maximum size, in bytes, of cached canonical hashes. + pub canon_hashes: usize, + /// Maximum size, in bytes, of cached block bodies. + pub bodies: usize, + /// Maximum size, in bytes, of cached block receipts. + pub receipts: usize, + /// Maximum size, in bytes, of cached chain score for the block. + pub chain_score: usize, } impl Default for CacheSizes { - fn default() -> Self { - const MB: usize = 1024 * 1024; - CacheSizes { - headers: 10 * MB, - canon_hashes: 3 * MB, - bodies: 20 * MB, - receipts: 10 * MB, - chain_score: 7 * MB, - } - } + fn default() -> Self { + const MB: usize = 1024 * 1024; + CacheSizes { + headers: 10 * MB, + canon_hashes: 3 * MB, + bodies: 20 * MB, + receipts: 10 * MB, + chain_score: 7 * MB, + } + } } /// The light client data cache. @@ -64,131 +62,131 @@ impl Default for CacheSizes { /// the underlying LRU-caches on read. /// [LRU-cache](https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_Recently_Used_.28LRU.29) pub struct Cache { - headers: MemoryLruCache, - canon_hashes: MemoryLruCache, - bodies: MemoryLruCache, - receipts: MemoryLruCache>, - chain_score: MemoryLruCache, - corpus: Option<(Corpus, Instant)>, - corpus_expiration: Duration, + headers: MemoryLruCache, + canon_hashes: MemoryLruCache, + bodies: MemoryLruCache, + receipts: MemoryLruCache>, + chain_score: MemoryLruCache, + corpus: Option<(Corpus, Instant)>, + corpus_expiration: Duration, } impl Cache { - /// Create a new data cache with the given sizes and gas price corpus expiration time. - pub fn new(sizes: CacheSizes, corpus_expiration: Duration) -> Self { - Cache { - headers: MemoryLruCache::new(sizes.headers), - canon_hashes: MemoryLruCache::new(sizes.canon_hashes), - bodies: MemoryLruCache::new(sizes.bodies), - receipts: MemoryLruCache::new(sizes.receipts), - chain_score: MemoryLruCache::new(sizes.chain_score), - corpus: None, - corpus_expiration, - } - } + /// Create a new data cache with the given sizes and gas price corpus expiration time. + pub fn new(sizes: CacheSizes, corpus_expiration: Duration) -> Self { + Cache { + headers: MemoryLruCache::new(sizes.headers), + canon_hashes: MemoryLruCache::new(sizes.canon_hashes), + bodies: MemoryLruCache::new(sizes.bodies), + receipts: MemoryLruCache::new(sizes.receipts), + chain_score: MemoryLruCache::new(sizes.chain_score), + corpus: None, + corpus_expiration, + } + } - /// Query header by hash. - pub fn block_header(&mut self, hash: &H256) -> Option { - self.headers.get_mut(hash).cloned() - } + /// Query header by hash. + pub fn block_header(&mut self, hash: &H256) -> Option { + self.headers.get_mut(hash).cloned() + } - /// Query hash by number. - pub fn block_hash(&mut self, num: BlockNumber) -> Option { - self.canon_hashes.get_mut(&num).map(|h| *h) - } + /// Query hash by number. + pub fn block_hash(&mut self, num: BlockNumber) -> Option { + self.canon_hashes.get_mut(&num).map(|h| *h) + } - /// Query block body by block hash. - pub fn block_body(&mut self, hash: &H256) -> Option { - self.bodies.get_mut(hash).cloned() - } + /// Query block body by block hash. + pub fn block_body(&mut self, hash: &H256) -> Option { + self.bodies.get_mut(hash).cloned() + } - /// Query block receipts by block hash. - pub fn block_receipts(&mut self, hash: &H256) -> Option> { - self.receipts.get_mut(hash).cloned() - } + /// Query block receipts by block hash. + pub fn block_receipts(&mut self, hash: &H256) -> Option> { + self.receipts.get_mut(hash).cloned() + } - /// Query chain score by block hash. - pub fn chain_score(&mut self, hash: &H256) -> Option { - self.chain_score.get_mut(hash).map(|h| *h) - } + /// Query chain score by block hash. + pub fn chain_score(&mut self, hash: &H256) -> Option { + self.chain_score.get_mut(hash).map(|h| *h) + } - /// Cache the given header. - pub fn insert_block_header(&mut self, hash: H256, hdr: encoded::Header) { - self.headers.insert(hash, hdr); - } + /// Cache the given header. + pub fn insert_block_header(&mut self, hash: H256, hdr: encoded::Header) { + self.headers.insert(hash, hdr); + } - /// Cache the given canonical block hash. - pub fn insert_block_hash(&mut self, num: BlockNumber, hash: H256) { - self.canon_hashes.insert(num, hash); - } + /// Cache the given canonical block hash. + pub fn insert_block_hash(&mut self, num: BlockNumber, hash: H256) { + self.canon_hashes.insert(num, hash); + } - /// Cache the given block body. - pub fn insert_block_body(&mut self, hash: H256, body: encoded::Body) { - self.bodies.insert(hash, body); - } + /// Cache the given block body. + pub fn insert_block_body(&mut self, hash: H256, body: encoded::Body) { + self.bodies.insert(hash, body); + } - /// Cache the given block receipts. - pub fn insert_block_receipts(&mut self, hash: H256, receipts: Vec) { - self.receipts.insert(hash, receipts); - } + /// Cache the given block receipts. + pub fn insert_block_receipts(&mut self, hash: H256, receipts: Vec) { + self.receipts.insert(hash, receipts); + } - /// Cache the given chain scoring. - pub fn insert_chain_score(&mut self, hash: H256, score: U256) { - self.chain_score.insert(hash, score); - } + /// Cache the given chain scoring. + pub fn insert_chain_score(&mut self, hash: H256, score: U256) { + self.chain_score.insert(hash, score); + } - /// Get gas price corpus, if recent enough. - pub fn gas_price_corpus(&self) -> Option> { - let now = Instant::now(); + /// Get gas price corpus, if recent enough. + pub fn gas_price_corpus(&self) -> Option> { + let now = Instant::now(); - self.corpus.as_ref().and_then(|&(ref corpus, ref tm)| { - if *tm + self.corpus_expiration >= now { - Some(corpus.clone()) - } else { - None - } - }) - } + self.corpus.as_ref().and_then(|&(ref corpus, ref tm)| { + if *tm + self.corpus_expiration >= now { + Some(corpus.clone()) + } else { + None + } + }) + } - /// Set the cached gas price corpus. - pub fn set_gas_price_corpus(&mut self, corpus: Corpus) { - self.corpus = Some((corpus, Instant::now())) - } + /// Set the cached gas price corpus. + pub fn set_gas_price_corpus(&mut self, corpus: Corpus) { + self.corpus = Some((corpus, Instant::now())) + } - /// Get the memory used. - pub fn mem_used(&self) -> usize { - self.heap_size_of_children() - } + /// Get the memory used. + pub fn mem_used(&self) -> usize { + self.heap_size_of_children() + } } impl HeapSizeOf for Cache { - fn heap_size_of_children(&self) -> usize { - self.headers.current_size() - + self.canon_hashes.current_size() - + self.bodies.current_size() - + self.receipts.current_size() - + self.chain_score.current_size() - // TODO: + corpus - } + fn heap_size_of_children(&self) -> usize { + self.headers.current_size() + + self.canon_hashes.current_size() + + self.bodies.current_size() + + self.receipts.current_size() + + self.chain_score.current_size() + // TODO: + corpus + } } #[cfg(test)] mod tests { - use super::Cache; - use std::time::Duration; + use super::Cache; + use std::time::Duration; - #[test] - fn corpus_inaccessible() { - let duration = Duration::from_secs(20); - let mut cache = Cache::new(Default::default(), duration.clone()); + #[test] + fn corpus_inaccessible() { + let duration = Duration::from_secs(20); + let mut cache = Cache::new(Default::default(), duration.clone()); - cache.set_gas_price_corpus(vec![].into()); - assert_eq!(cache.gas_price_corpus(), Some(vec![].into())); + cache.set_gas_price_corpus(vec![].into()); + assert_eq!(cache.gas_price_corpus(), Some(vec![].into())); - { - let corpus_time = &mut cache.corpus.as_mut().unwrap().1; - *corpus_time = *corpus_time - duration; - } - assert!(cache.gas_price_corpus().is_none()); - } + { + let corpus_time = &mut cache.corpus.as_mut().unwrap().1; + *corpus_time = *corpus_time - duration; + } + assert!(cache.gas_price_corpus().is_none()); + } } diff --git a/ethcore/light/src/cht.rs b/ethcore/light/src/cht.rs index a9bc5d7f2..7b056b302 100644 --- a/ethcore/light/src/cht.rs +++ b/ethcore/light/src/cht.rs @@ -23,29 +23,31 @@ //! root has. A correct proof implies that the claimed block is identical to the one //! we discarded. +use bytes::Bytes; use common_types::ids::BlockId; use ethereum_types::{H256, U256}; +use ethtrie::{self, TrieDB, TrieDBMut}; use hash_db::HashDB; +use journaldb::new_memory_db; use keccak_hasher::KeccakHasher; use kvdb::DBValue; use memory_db::MemoryDB; -use journaldb::new_memory_db; -use bytes::Bytes; -use trie::{TrieMut, Trie, Recorder}; -use ethtrie::{self, TrieDB, TrieDBMut}; -use rlp::{RlpStream, Rlp}; +use rlp::{Rlp, RlpStream}; +use trie::{Recorder, Trie, TrieMut}; // encode a key. macro_rules! key { - ($num: expr) => { ::rlp::encode(&$num) } + ($num: expr) => { + ::rlp::encode(&$num) + }; } macro_rules! val { - ($hash: expr, $td: expr) => {{ - let mut stream = RlpStream::new_list(2); - stream.append(&$hash).append(&$td); - stream.drain() - }} + ($hash: expr, $td: expr) => {{ + let mut stream = RlpStream::new_list(2); + stream.append(&$hash).append(&$td); + stream.drain() + }}; } /// The size of each CHT. @@ -55,96 +57,104 @@ pub const SIZE: u64 = 2048; /// See module docs for more details. #[derive(Debug, Clone)] pub struct CHT> { - db: DB, - root: H256, // the root of this CHT. - number: u64, + db: DB, + root: H256, // the root of this CHT. + number: u64, } impl> CHT { - /// Query the root of the CHT. - pub fn root(&self) -> H256 { self.root } + /// Query the root of the CHT. + pub fn root(&self) -> H256 { + self.root + } - /// Query the number of the CHT. - pub fn number(&self) -> u64 { self.number } + /// Query the number of the CHT. + pub fn number(&self) -> u64 { + self.number + } - /// Generate an inclusion proof for the entry at a specific block. - /// Nodes before level `from_level` will be omitted. - /// Returns an error on an incomplete trie, and `Ok(None)` on an unprovable request. - pub fn prove(&self, num: u64, from_level: u32) -> ethtrie::Result>> { - if block_to_cht_number(num) != Some(self.number) { return Ok(None) } + /// Generate an inclusion proof for the entry at a specific block. + /// Nodes before level `from_level` will be omitted. + /// Returns an error on an incomplete trie, and `Ok(None)` on an unprovable request. + pub fn prove(&self, num: u64, from_level: u32) -> ethtrie::Result>> { + if block_to_cht_number(num) != Some(self.number) { + return Ok(None); + } - let mut recorder = Recorder::with_depth(from_level); - let db: &HashDB<_,_> = &self.db; - let t = TrieDB::new(&db, &self.root)?; - t.get_with(&key!(num), &mut recorder)?; + let mut recorder = Recorder::with_depth(from_level); + let db: &HashDB<_, _> = &self.db; + let t = TrieDB::new(&db, &self.root)?; + t.get_with(&key!(num), &mut recorder)?; - Ok(Some(recorder.drain().into_iter().map(|x| x.data).collect())) - } + Ok(Some(recorder.drain().into_iter().map(|x| x.data).collect())) + } } /// Block information necessary to build a CHT. pub struct BlockInfo { - /// The block's hash. - pub hash: H256, - /// The block's parent's hash. - pub parent_hash: H256, - /// The block's total difficulty. - pub total_difficulty: U256, + /// The block's hash. + pub hash: H256, + /// The block's parent's hash. + pub parent_hash: H256, + /// The block's total difficulty. + pub total_difficulty: U256, } /// Build an in-memory CHT from a closure which provides necessary information /// about blocks. If the fetcher ever fails to provide the info, the CHT /// will not be generated. pub fn build(cht_num: u64, mut fetcher: F) -> Option>> - where F: FnMut(BlockId) -> Option +where + F: FnMut(BlockId) -> Option, { - let mut db = new_memory_db(); + let mut db = new_memory_db(); - // start from the last block by number and work backwards. - let last_num = start_number(cht_num + 1) - 1; - let mut id = BlockId::Number(last_num); + // start from the last block by number and work backwards. + let last_num = start_number(cht_num + 1) - 1; + let mut id = BlockId::Number(last_num); - let mut root = H256::default(); + let mut root = H256::default(); - { - let mut t = TrieDBMut::new(&mut db, &mut root); - for blk_num in (0..SIZE).map(|n| last_num - n) { - let info = match fetcher(id) { - Some(info) => info, - None => return None, - }; + { + let mut t = TrieDBMut::new(&mut db, &mut root); + for blk_num in (0..SIZE).map(|n| last_num - n) { + let info = match fetcher(id) { + Some(info) => info, + None => return None, + }; - id = BlockId::Hash(info.parent_hash); - t.insert(&key!(blk_num), &val!(info.hash, info.total_difficulty)) - .expect("fresh in-memory database is infallible; qed"); - } - } + id = BlockId::Hash(info.parent_hash); + t.insert(&key!(blk_num), &val!(info.hash, info.total_difficulty)) + .expect("fresh in-memory database is infallible; qed"); + } + } - Some(CHT { - db, - root, - number: cht_num, - }) + Some(CHT { + db, + root, + number: cht_num, + }) } /// Compute a CHT root from an iterator of (hash, td) pairs. Fails if shorter than /// SIZE items. The items are assumed to proceed sequentially from `start_number(cht_num)`. /// Discards the trie's nodes. pub fn compute_root(cht_num: u64, iterable: I) -> Option - where I: IntoIterator +where + I: IntoIterator, { - let mut v = Vec::with_capacity(SIZE as usize); - let start_num = start_number(cht_num) as usize; + let mut v = Vec::with_capacity(SIZE as usize); + let start_num = start_number(cht_num) as usize; - for (i, (h, td)) in iterable.into_iter().take(SIZE as usize).enumerate() { - v.push((key!(i + start_num), val!(h, td))) - } + for (i, (h, td)) in iterable.into_iter().take(SIZE as usize).enumerate() { + v.push((key!(i + start_num), val!(h, td))) + } - if v.len() == SIZE as usize { - Some(::triehash::trie_root(v)) - } else { - None - } + if v.len() == SIZE as usize { + Some(::triehash::trie_root(v)) + } else { + None + } } /// Check a proof for a CHT. @@ -152,32 +162,34 @@ pub fn compute_root(cht_num: u64, iterable: I) -> Option /// verify the given trie branch and extract the canonical hash and total difficulty. // TODO: better support for partially-checked queries. pub fn check_proof(proof: &[Bytes], num: u64, root: H256) -> Option<(H256, U256)> { - let mut db = new_memory_db(); + let mut db = new_memory_db(); - for node in proof { db.insert(&node[..]); } - let res = match TrieDB::new(&db, &root) { - Err(_) => return None, - Ok(trie) => trie.get_with(&key!(num), |val: &[u8]| { - let rlp = Rlp::new(val); - rlp.val_at::(0) - .and_then(|h| rlp.val_at::(1).map(|td| (h, td))) - .ok() - }) - }; + for node in proof { + db.insert(&node[..]); + } + let res = match TrieDB::new(&db, &root) { + Err(_) => return None, + Ok(trie) => trie.get_with(&key!(num), |val: &[u8]| { + let rlp = Rlp::new(val); + rlp.val_at::(0) + .and_then(|h| rlp.val_at::(1).map(|td| (h, td))) + .ok() + }), + }; - match res { - Ok(Some(Some((hash, td)))) => Some((hash, td)), - _ => None, - } + match res { + Ok(Some(Some((hash, td)))) => Some((hash, td)), + _ => None, + } } /// Convert a block number to a CHT number. /// Returns `None` for `block_num` == 0, `Some` otherwise. pub fn block_to_cht_number(block_num: u64) -> Option { - match block_num { - 0 => None, - n => Some((n - 1) / SIZE), - } + match block_num { + 0 => None, + n => Some((n - 1) / SIZE), + } } /// Get the starting block of a given CHT. @@ -187,29 +199,29 @@ pub fn block_to_cht_number(block_num: u64) -> Option { /// This is because the genesis hash is assumed to be known /// and including it would be redundant. pub fn start_number(cht_num: u64) -> u64 { - (cht_num * SIZE) + 1 + (cht_num * SIZE) + 1 } #[cfg(test)] mod tests { - #[test] - fn size_is_lt_usize() { - // to ensure safe casting on the target platform. - assert!(::cht::SIZE < usize::max_value() as u64) - } + #[test] + fn size_is_lt_usize() { + // to ensure safe casting on the target platform. + assert!(::cht::SIZE < usize::max_value() as u64) + } - #[test] - fn block_to_cht_number() { - assert!(::cht::block_to_cht_number(0).is_none()); - assert_eq!(::cht::block_to_cht_number(1).unwrap(), 0); - assert_eq!(::cht::block_to_cht_number(::cht::SIZE + 1).unwrap(), 1); - assert_eq!(::cht::block_to_cht_number(::cht::SIZE).unwrap(), 0); - } + #[test] + fn block_to_cht_number() { + assert!(::cht::block_to_cht_number(0).is_none()); + assert_eq!(::cht::block_to_cht_number(1).unwrap(), 0); + assert_eq!(::cht::block_to_cht_number(::cht::SIZE + 1).unwrap(), 1); + assert_eq!(::cht::block_to_cht_number(::cht::SIZE).unwrap(), 0); + } - #[test] - fn start_number() { - assert_eq!(::cht::start_number(0), 1); - assert_eq!(::cht::start_number(1), ::cht::SIZE + 1); - assert_eq!(::cht::start_number(2), ::cht::SIZE * 2 + 1); - } + #[test] + fn start_number() { + assert_eq!(::cht::start_number(0), 1); + assert_eq!(::cht::start_number(1), ::cht::SIZE + 1); + assert_eq!(::cht::start_number(2), ::cht::SIZE * 2 + 1); + } } diff --git a/ethcore/light/src/client/fetch.rs b/ethcore/light/src/client/fetch.rs index 86a3770bf..0be6283dd 100644 --- a/ethcore/light/src/client/fetch.rs +++ b/ethcore/light/src/client/fetch.rs @@ -18,68 +18,70 @@ use std::sync::Arc; -use common_types::encoded; -use common_types::header::Header; -use common_types::receipt::Receipt; -use ethcore::engines::{EthEngine, StateDependentProof}; -use ethcore::machine::EthereumMachine; +use common_types::{encoded, header::Header, receipt::Receipt}; +use ethcore::{ + engines::{EthEngine, StateDependentProof}, + machine::EthereumMachine, +}; use ethereum_types::H256; use futures::future::IntoFuture; /// Provides full chain data. pub trait ChainDataFetcher: Send + Sync + 'static { - /// Error type when data unavailable. - type Error: ::std::fmt::Debug; + /// Error type when data unavailable. + type Error: ::std::fmt::Debug; - /// Future for fetching block body. - type Body: IntoFuture; - /// Future for fetching block receipts. - type Receipts: IntoFuture, Error=Self::Error>; - /// Future for fetching epoch transition - type Transition: IntoFuture, Error=Self::Error>; + /// Future for fetching block body. + type Body: IntoFuture; + /// Future for fetching block receipts. + type Receipts: IntoFuture, Error = Self::Error>; + /// Future for fetching epoch transition + type Transition: IntoFuture, Error = Self::Error>; - /// Fetch a block body. - fn block_body(&self, header: &Header) -> Self::Body; + /// Fetch a block body. + fn block_body(&self, header: &Header) -> Self::Body; - /// Fetch block receipts. - fn block_receipts(&self, header: &Header) -> Self::Receipts; + /// Fetch block receipts. + fn block_receipts(&self, header: &Header) -> Self::Receipts; - /// Fetch epoch transition proof at given header. - fn epoch_transition( - &self, - _hash: H256, - _engine: Arc, - _checker: Arc> - ) -> Self::Transition; + /// Fetch epoch transition proof at given header. + fn epoch_transition( + &self, + _hash: H256, + _engine: Arc, + _checker: Arc>, + ) -> Self::Transition; } /// Fetcher implementation which cannot fetch anything. pub struct Unavailable; /// Create a fetcher which has all data unavailable. -pub fn unavailable() -> Unavailable { Unavailable } +pub fn unavailable() -> Unavailable { + Unavailable +} impl ChainDataFetcher for Unavailable { - type Error = &'static str; + type Error = &'static str; - type Body = Result; - type Receipts = Result, &'static str>; - type Transition = Result, &'static str>; + type Body = Result; + type Receipts = Result, &'static str>; + type Transition = Result, &'static str>; - fn block_body(&self, _header: &Header) -> Self::Body { - Err("fetching block bodies unavailable") - } + fn block_body(&self, _header: &Header) -> Self::Body { + Err("fetching block bodies unavailable") + } - fn block_receipts(&self, _header: &Header) -> Self::Receipts { - Err("fetching block receipts unavailable") - } + fn block_receipts(&self, _header: &Header) -> Self::Receipts { + Err("fetching block receipts unavailable") + } - fn epoch_transition( - &self, - _hash: H256, - _engine: Arc, - _checker: Arc> - ) -> Self::Transition { - Err("fetching epoch transition proofs unavailable") - } + fn epoch_transition( + &self, + _hash: H256, + _engine: Arc, + _checker: Arc>, + ) -> Self::Transition { + Err("fetching epoch transition proofs unavailable") + } } diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index b72a099d0..0946712cb 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -25,24 +25,22 @@ //! - It stores only headers (and a pruned subset of them) //! - To allow for flexibility in the database layout.. -use std::collections::BTreeMap; -use std::sync::Arc; +use std::{collections::BTreeMap, sync::Arc}; use cache::Cache; use cht; -use common_types::block_status::BlockStatus; -use common_types::encoded; -use common_types::header::Header; -use common_types::ids::BlockId; -use ethcore::engines::epoch::{Transition as EpochTransition, PendingTransition as PendingEpochTransition}; -use ethcore::error::{Error, EthcoreResult, ErrorKind as EthcoreErrorKind, BlockError}; -use ethcore::spec::{Spec, SpecHardcodedSync}; +use common_types::{block_status::BlockStatus, encoded, header::Header, ids::BlockId}; +use ethcore::{ + engines::epoch::{PendingTransition as PendingEpochTransition, Transition as EpochTransition}, + error::{BlockError, Error, ErrorKind as EthcoreErrorKind, EthcoreResult}, + spec::{Spec, SpecHardcodedSync}, +}; use ethereum_types::{H256, H264, U256}; +use fastmap::H256FastMap; use heapsize::HeapSizeOf; use kvdb::{DBTransaction, KeyValueDB}; use parking_lot::{Mutex, RwLock}; -use fastmap::H256FastMap; -use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp}; +use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; use smallvec::SmallVec; /// Store at least this many candidate headers at all times. @@ -59,459 +57,492 @@ const LAST_CANONICAL_TRANSITION: &[u8] = &*b"canonical_transition"; /// Information about a block. #[derive(Debug, Clone)] pub struct BlockDescriptor { - /// The block's hash - pub hash: H256, - /// The block's number - pub number: u64, - /// The block's total difficulty. - pub total_difficulty: U256, + /// The block's hash + pub hash: H256, + /// The block's number + pub number: u64, + /// The block's total difficulty. + pub total_difficulty: U256, } // best block data #[derive(RlpEncodable, RlpDecodable)] struct BestAndLatest { - best_num: u64, - latest_num: u64 + best_num: u64, + latest_num: u64, } impl BestAndLatest { - fn new(best_num: u64, latest_num: u64) -> Self { - BestAndLatest { - best_num, - latest_num, - } - } + fn new(best_num: u64, latest_num: u64) -> Self { + BestAndLatest { + best_num, + latest_num, + } + } } // candidate block description. struct Candidate { - hash: H256, - parent_hash: H256, - total_difficulty: U256, + hash: H256, + parent_hash: H256, + total_difficulty: U256, } struct Entry { - candidates: SmallVec<[Candidate; 3]>, // 3 arbitrarily chosen - canonical_hash: H256, + candidates: SmallVec<[Candidate; 3]>, // 3 arbitrarily chosen + canonical_hash: H256, } impl HeapSizeOf for Entry { - fn heap_size_of_children(&self) -> usize { - if self.candidates.spilled() { - self.candidates.capacity() * ::std::mem::size_of::() - } else { - 0 - } - } + fn heap_size_of_children(&self) -> usize { + if self.candidates.spilled() { + self.candidates.capacity() * ::std::mem::size_of::() + } else { + 0 + } + } } impl Encodable for Entry { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(self.candidates.len()); + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(self.candidates.len()); - for candidate in &self.candidates { - s.begin_list(3) - .append(&candidate.hash) - .append(&candidate.parent_hash) - .append(&candidate.total_difficulty); - } - } + for candidate in &self.candidates { + s.begin_list(3) + .append(&candidate.hash) + .append(&candidate.parent_hash) + .append(&candidate.total_difficulty); + } + } } impl Decodable for Entry { - fn decode(rlp: &Rlp) -> Result { - let mut candidates = SmallVec::<[Candidate; 3]>::new(); + fn decode(rlp: &Rlp) -> Result { + let mut candidates = SmallVec::<[Candidate; 3]>::new(); - for item in rlp.iter() { - candidates.push(Candidate { - hash: item.val_at(0)?, - parent_hash: item.val_at(1)?, - total_difficulty: item.val_at(2)?, - }) - } + for item in rlp.iter() { + candidates.push(Candidate { + hash: item.val_at(0)?, + parent_hash: item.val_at(1)?, + total_difficulty: item.val_at(2)?, + }) + } - if candidates.is_empty() { return Err(DecoderError::Custom("Empty candidates vector submitted.")) } + if candidates.is_empty() { + return Err(DecoderError::Custom("Empty candidates vector submitted.")); + } - // rely on the invariant that the canonical entry is always first. - let canon_hash = candidates[0].hash; - Ok(Entry { - candidates, - canonical_hash: canon_hash, - }) - } + // rely on the invariant that the canonical entry is always first. + let canon_hash = candidates[0].hash; + Ok(Entry { + candidates, + canonical_hash: canon_hash, + }) + } } fn cht_key(number: u64) -> String { - format!("{:08x}_canonical", number) + format!("{:08x}_canonical", number) } fn era_key(number: u64) -> String { - format!("candidates_{}", number) + format!("candidates_{}", number) } fn pending_transition_key(block_hash: H256) -> H264 { - const LEADING: u8 = 1; + const LEADING: u8 = 1; - let mut key = H264::default(); + let mut key = H264::default(); - key[0] = LEADING; - key.0[1..].copy_from_slice(&block_hash.0[..]); + key[0] = LEADING; + key.0[1..].copy_from_slice(&block_hash.0[..]); - key + key } fn transition_key(block_hash: H256) -> H264 { - const LEADING: u8 = 2; + const LEADING: u8 = 2; - let mut key = H264::default(); + let mut key = H264::default(); - key[0] = LEADING; - key.0[1..].copy_from_slice(&block_hash.0[..]); + key[0] = LEADING; + key.0[1..].copy_from_slice(&block_hash.0[..]); - key + key } // encode last canonical transition entry: header and proof. fn encode_canonical_transition(header: &Header, proof: &[u8]) -> Vec { - let mut stream = RlpStream::new_list(2); - stream.append(header).append(&proof); - stream.out() + let mut stream = RlpStream::new_list(2); + stream.append(header).append(&proof); + stream.out() } // decode last canonical transition entry. fn decode_canonical_transition(t: &[u8]) -> Result<(Header, &[u8]), DecoderError> { - let rlp = Rlp::new(t); + let rlp = Rlp::new(t); - Ok((rlp.val_at(0)?, rlp.at(1)?.data()?)) + Ok((rlp.val_at(0)?, rlp.at(1)?.data()?)) } /// Pending changes from `insert` to be applied after the database write has finished. pub struct PendingChanges { - best_block: Option, // new best block. + best_block: Option, // new best block. } /// Whether or not the hardcoded sync feature is allowed. pub enum HardcodedSync { - Allow, - Deny, + Allow, + Deny, } /// Header chain. See module docs for more details. pub struct HeaderChain { - genesis_header: encoded::Header, // special-case the genesis. - candidates: RwLock>, - best_block: RwLock, - live_epoch_proofs: RwLock>, - db: Arc, - col: Option, - cache: Arc>, + genesis_header: encoded::Header, // special-case the genesis. + candidates: RwLock>, + best_block: RwLock, + live_epoch_proofs: RwLock>, + db: Arc, + col: Option, + cache: Arc>, } impl HeaderChain { - /// Create a new header chain given this genesis block and database to read from. - pub fn new( - db: Arc, - col: Option, - spec: &Spec, - cache: Arc>, - allow_hs: HardcodedSync, - ) -> Result { - let mut live_epoch_proofs = ::std::collections::HashMap::default(); + /// Create a new header chain given this genesis block and database to read from. + pub fn new( + db: Arc, + col: Option, + spec: &Spec, + cache: Arc>, + allow_hs: HardcodedSync, + ) -> Result { + let mut live_epoch_proofs = ::std::collections::HashMap::default(); - let genesis = ::rlp::encode(&spec.genesis_header()); - let decoded_header = spec.genesis_header(); + let genesis = ::rlp::encode(&spec.genesis_header()); + let decoded_header = spec.genesis_header(); - let chain = if let Some(current) = db.get(col, CURRENT_KEY)? { - let curr : BestAndLatest = ::rlp::decode(¤t).expect("decoding db value failed"); + let chain = if let Some(current) = db.get(col, CURRENT_KEY)? { + let curr: BestAndLatest = ::rlp::decode(¤t).expect("decoding db value failed"); - let mut cur_number = curr.latest_num; - let mut candidates = BTreeMap::new(); + let mut cur_number = curr.latest_num; + let mut candidates = BTreeMap::new(); - // load all era entries, referenced headers within them, - // and live epoch proofs. - while let Some(entry) = db.get(col, era_key(cur_number).as_bytes())? { - let entry: Entry = ::rlp::decode(&entry).expect("decoding db value failed"); - trace!(target: "chain", "loaded header chain entry for era {} with {} candidates", + // load all era entries, referenced headers within them, + // and live epoch proofs. + while let Some(entry) = db.get(col, era_key(cur_number).as_bytes())? { + let entry: Entry = ::rlp::decode(&entry).expect("decoding db value failed"); + trace!(target: "chain", "loaded header chain entry for era {} with {} candidates", cur_number, entry.candidates.len()); - for c in &entry.candidates { - let key = transition_key(c.hash); + for c in &entry.candidates { + let key = transition_key(c.hash); - if let Some(proof) = db.get(col, &*key)? { - live_epoch_proofs.insert(c.hash, EpochTransition { - block_hash: c.hash, - block_number: cur_number, - proof: proof.into_vec(), - }); - } - } - candidates.insert(cur_number, entry); + if let Some(proof) = db.get(col, &*key)? { + live_epoch_proofs.insert( + c.hash, + EpochTransition { + block_hash: c.hash, + block_number: cur_number, + proof: proof.into_vec(), + }, + ); + } + } + candidates.insert(cur_number, entry); - cur_number -= 1; - } + cur_number -= 1; + } - // fill best block block descriptor. - let best_block = { - let era = match candidates.get(&curr.best_num) { - Some(era) => era, - None => bail!("Database corrupt: highest block referenced but no data."), - }; + // fill best block block descriptor. + let best_block = { + let era = match candidates.get(&curr.best_num) { + Some(era) => era, + None => bail!("Database corrupt: highest block referenced but no data."), + }; - let best = &era.candidates[0]; - BlockDescriptor { - hash: best.hash, - number: curr.best_num, - total_difficulty: best.total_difficulty, - } - }; + let best = &era.candidates[0]; + BlockDescriptor { + hash: best.hash, + number: curr.best_num, + total_difficulty: best.total_difficulty, + } + }; - HeaderChain { - genesis_header: encoded::Header::new(genesis), - best_block: RwLock::new(best_block), - candidates: RwLock::new(candidates), - live_epoch_proofs: RwLock::new(live_epoch_proofs), - db, - col, - cache, - } + HeaderChain { + genesis_header: encoded::Header::new(genesis), + best_block: RwLock::new(best_block), + candidates: RwLock::new(candidates), + live_epoch_proofs: RwLock::new(live_epoch_proofs), + db, + col, + cache, + } + } else { + let chain = HeaderChain { + genesis_header: encoded::Header::new(genesis), + best_block: RwLock::new(BlockDescriptor { + hash: decoded_header.hash(), + number: 0, + total_difficulty: *decoded_header.difficulty(), + }), + candidates: RwLock::new(BTreeMap::new()), + live_epoch_proofs: RwLock::new(live_epoch_proofs), + db: db.clone(), + col, + cache, + }; - } else { - let chain = HeaderChain { - genesis_header: encoded::Header::new(genesis), - best_block: RwLock::new(BlockDescriptor { - hash: decoded_header.hash(), - number: 0, - total_difficulty: *decoded_header.difficulty(), - }), - candidates: RwLock::new(BTreeMap::new()), - live_epoch_proofs: RwLock::new(live_epoch_proofs), - db: db.clone(), - col, - cache, - }; + // insert the hardcoded sync into the database. + if let (&Some(ref hardcoded_sync), HardcodedSync::Allow) = + (&spec.hardcoded_sync, allow_hs) + { + let mut batch = db.transaction(); - // insert the hardcoded sync into the database. - if let (&Some(ref hardcoded_sync), HardcodedSync::Allow) = (&spec.hardcoded_sync, allow_hs) { - let mut batch = db.transaction(); + // insert the hardcoded CHT roots into the database. + for (cht_num, cht_root) in hardcoded_sync.chts.iter().enumerate() { + batch.put( + col, + cht_key(cht_num as u64).as_bytes(), + &::rlp::encode(cht_root), + ); + } - // insert the hardcoded CHT roots into the database. - for (cht_num, cht_root) in hardcoded_sync.chts.iter().enumerate() { - batch.put(col, cht_key(cht_num as u64).as_bytes(), &::rlp::encode(cht_root)); - } + let decoded_header = hardcoded_sync.header.decode()?; + let decoded_header_num = decoded_header.number(); - let decoded_header = hardcoded_sync.header.decode()?; - let decoded_header_num = decoded_header.number(); + // write the block in the DB. + info!(target: "chain", "Inserting hardcoded block #{} in chain", decoded_header_num); + let pending = chain.insert_with_td( + &mut batch, + &decoded_header, + hardcoded_sync.total_difficulty, + None, + )?; - // write the block in the DB. - info!(target: "chain", "Inserting hardcoded block #{} in chain", decoded_header_num); - let pending = chain.insert_with_td(&mut batch, &decoded_header, - hardcoded_sync.total_difficulty, None)?; - - // check that we have enough hardcoded CHT roots. avoids panicking later. - let cht_num = cht::block_to_cht_number(decoded_header_num - 1) - .expect("specs provided a hardcoded block with height 0"); - if cht_num >= hardcoded_sync.chts.len() as u64 { - warn!(target: "chain", "specs didn't provide enough CHT roots for its \ + // check that we have enough hardcoded CHT roots. avoids panicking later. + let cht_num = cht::block_to_cht_number(decoded_header_num - 1) + .expect("specs provided a hardcoded block with height 0"); + if cht_num >= hardcoded_sync.chts.len() as u64 { + warn!(target: "chain", "specs didn't provide enough CHT roots for its \ hardcoded block ; falling back to non-hardcoded sync \ mode"); - } else { - db.write_buffered(batch); - chain.apply_pending(pending); - } - } + } else { + db.write_buffered(batch); + chain.apply_pending(pending); + } + } - chain - }; + chain + }; - // instantiate genesis epoch data if it doesn't exist. - if chain.db.get(col, LAST_CANONICAL_TRANSITION)?.is_none() { - let genesis_data = spec.genesis_epoch_data()?; + // instantiate genesis epoch data if it doesn't exist. + if chain.db.get(col, LAST_CANONICAL_TRANSITION)?.is_none() { + let genesis_data = spec.genesis_epoch_data()?; - { - let mut batch = chain.db.transaction(); - let data = encode_canonical_transition(&decoded_header, &genesis_data); - batch.put_vec(col, LAST_CANONICAL_TRANSITION, data); - chain.db.write(batch)?; - } - } + { + let mut batch = chain.db.transaction(); + let data = encode_canonical_transition(&decoded_header, &genesis_data); + batch.put_vec(col, LAST_CANONICAL_TRANSITION, data); + chain.db.write(batch)?; + } + } - Ok(chain) - } + Ok(chain) + } - /// Insert a pre-verified header. - /// - /// This blindly trusts that the data given to it is sensible. - /// Returns a set of pending changes to be applied with `apply_pending` - /// before the next call to insert and after the transaction has been written. - /// - /// If the block is an epoch transition, provide the transition along with - /// the header. - pub fn insert( - &self, - transaction: &mut DBTransaction, - header: &Header, - transition_proof: Option>, - ) -> EthcoreResult { - self.insert_inner(transaction, header, None, transition_proof) - } + /// Insert a pre-verified header. + /// + /// This blindly trusts that the data given to it is sensible. + /// Returns a set of pending changes to be applied with `apply_pending` + /// before the next call to insert and after the transaction has been written. + /// + /// If the block is an epoch transition, provide the transition along with + /// the header. + pub fn insert( + &self, + transaction: &mut DBTransaction, + header: &Header, + transition_proof: Option>, + ) -> EthcoreResult { + self.insert_inner(transaction, header, None, transition_proof) + } - /// Insert a pre-verified header, with a known total difficulty. Similary to `insert`. - /// - /// This blindly trusts that the data given to it is sensible. - pub fn insert_with_td( - &self, - transaction: &mut DBTransaction, - header: &Header, - total_difficulty: U256, - transition_proof: Option>, - ) -> EthcoreResult { - self.insert_inner(transaction, header, Some(total_difficulty), transition_proof) - } + /// Insert a pre-verified header, with a known total difficulty. Similary to `insert`. + /// + /// This blindly trusts that the data given to it is sensible. + pub fn insert_with_td( + &self, + transaction: &mut DBTransaction, + header: &Header, + total_difficulty: U256, + transition_proof: Option>, + ) -> EthcoreResult { + self.insert_inner( + transaction, + header, + Some(total_difficulty), + transition_proof, + ) + } - fn insert_inner( - &self, - transaction: &mut DBTransaction, - header: &Header, - total_difficulty: Option, - transition_proof: Option>, - ) -> EthcoreResult { - let hash = header.hash(); - let number = header.number(); - let parent_hash = *header.parent_hash(); - let transition = transition_proof.map(|proof| EpochTransition { - block_hash: hash, - block_number: number, - proof, - }); + fn insert_inner( + &self, + transaction: &mut DBTransaction, + header: &Header, + total_difficulty: Option, + transition_proof: Option>, + ) -> EthcoreResult { + let hash = header.hash(); + let number = header.number(); + let parent_hash = *header.parent_hash(); + let transition = transition_proof.map(|proof| EpochTransition { + block_hash: hash, + block_number: number, + proof, + }); - let mut pending = PendingChanges { - best_block: None, - }; + let mut pending = PendingChanges { best_block: None }; - // hold candidates the whole time to guard import order. - let mut candidates = self.candidates.write(); + // hold candidates the whole time to guard import order. + let mut candidates = self.candidates.write(); - // find total difficulty. - let total_difficulty = match total_difficulty { - Some(td) => td, - None => { - let parent_td = - if number == 1 { - self.genesis_header.difficulty() - } else { - candidates.get(&(number - 1)) - .and_then(|entry| entry.candidates.iter().find(|c| c.hash == parent_hash)) - .map(|c| c.total_difficulty) - .ok_or_else(|| BlockError::UnknownParent(parent_hash)) - .map_err(EthcoreErrorKind::Block)? - }; + // find total difficulty. + let total_difficulty = match total_difficulty { + Some(td) => td, + None => { + let parent_td = if number == 1 { + self.genesis_header.difficulty() + } else { + candidates + .get(&(number - 1)) + .and_then(|entry| entry.candidates.iter().find(|c| c.hash == parent_hash)) + .map(|c| c.total_difficulty) + .ok_or_else(|| BlockError::UnknownParent(parent_hash)) + .map_err(EthcoreErrorKind::Block)? + }; - parent_td + *header.difficulty() - }, - }; + parent_td + *header.difficulty() + } + }; - // insert headers and candidates entries and write era to disk. - { - let cur_era = candidates.entry(number) - .or_insert_with(|| Entry { candidates: SmallVec::new(), canonical_hash: hash }); - cur_era.candidates.push(Candidate { - hash, - parent_hash, - total_difficulty, - }); + // insert headers and candidates entries and write era to disk. + { + let cur_era = candidates.entry(number).or_insert_with(|| Entry { + candidates: SmallVec::new(), + canonical_hash: hash, + }); + cur_era.candidates.push(Candidate { + hash, + parent_hash, + total_difficulty, + }); - // fix ordering of era before writing. - if total_difficulty > cur_era.candidates[0].total_difficulty { - let cur_pos = cur_era.candidates.len() - 1; - cur_era.candidates.swap(cur_pos, 0); - cur_era.canonical_hash = hash; - } + // fix ordering of era before writing. + if total_difficulty > cur_era.candidates[0].total_difficulty { + let cur_pos = cur_era.candidates.len() - 1; + cur_era.candidates.swap(cur_pos, 0); + cur_era.canonical_hash = hash; + } - transaction.put(self.col, era_key(number).as_bytes(), &::rlp::encode(&*cur_era)) - } + transaction.put( + self.col, + era_key(number).as_bytes(), + &::rlp::encode(&*cur_era), + ) + } - if let Some(transition) = transition { - transaction.put(self.col, &*transition_key(hash), &transition.proof); - self.live_epoch_proofs.write().insert(hash, transition); - } + if let Some(transition) = transition { + transaction.put(self.col, &*transition_key(hash), &transition.proof); + self.live_epoch_proofs.write().insert(hash, transition); + } - let raw = header.encoded().into_inner(); - transaction.put_vec(self.col, &hash[..], raw); + let raw = header.encoded().into_inner(); + transaction.put_vec(self.col, &hash[..], raw); - // TODO: For engines when required, use cryptoeconomic guarantees. - let (best_num, is_new_best) = { - let cur_best = self.best_block.read(); - if cur_best.total_difficulty < total_difficulty { - (number, true) - } else { - (cur_best.number, false) - } - }; + // TODO: For engines when required, use cryptoeconomic guarantees. + let (best_num, is_new_best) = { + let cur_best = self.best_block.read(); + if cur_best.total_difficulty < total_difficulty { + (number, true) + } else { + (cur_best.number, false) + } + }; - // reorganize ancestors so canonical entries are first in their - // respective candidates vectors. - if is_new_best { - let mut canon_hash = hash; - for (&height, entry) in candidates.iter_mut().rev().skip_while(|&(height, _)| *height > number) { - if height != number && entry.canonical_hash == canon_hash { break; } + // reorganize ancestors so canonical entries are first in their + // respective candidates vectors. + if is_new_best { + let mut canon_hash = hash; + for (&height, entry) in candidates + .iter_mut() + .rev() + .skip_while(|&(height, _)| *height > number) + { + if height != number && entry.canonical_hash == canon_hash { + break; + } - trace!(target: "chain", "Setting new canonical block {} for block height {}", + trace!(target: "chain", "Setting new canonical block {} for block height {}", canon_hash, height); - let canon_pos = entry.candidates.iter().position(|x| x.hash == canon_hash) + let canon_pos = entry.candidates.iter().position(|x| x.hash == canon_hash) .expect("blocks are only inserted if parent is present; or this is the block we just added; qed"); - // move the new canonical entry to the front and set the - // era's canonical hash. - entry.candidates.swap(0, canon_pos); - entry.canonical_hash = canon_hash; + // move the new canonical entry to the front and set the + // era's canonical hash. + entry.candidates.swap(0, canon_pos); + entry.canonical_hash = canon_hash; - // what about reorgs > cht::SIZE + HISTORY? - // resetting to the last block of a given CHT should be possible. - canon_hash = entry.candidates[0].parent_hash; + // what about reorgs > cht::SIZE + HISTORY? + // resetting to the last block of a given CHT should be possible. + canon_hash = entry.candidates[0].parent_hash; - // write altered era to disk - if height != number { - let rlp_era = ::rlp::encode(&*entry); - transaction.put(self.col, era_key(height).as_bytes(), &rlp_era); - } - } + // write altered era to disk + if height != number { + let rlp_era = ::rlp::encode(&*entry); + transaction.put(self.col, era_key(height).as_bytes(), &rlp_era); + } + } - trace!(target: "chain", "New best block: ({}, {}), TD {}", number, hash, total_difficulty); - pending.best_block = Some(BlockDescriptor { - hash, - number, - total_difficulty, - }); + trace!(target: "chain", "New best block: ({}, {}), TD {}", number, hash, total_difficulty); + pending.best_block = Some(BlockDescriptor { + hash, + number, + total_difficulty, + }); - // produce next CHT root if it's time. - let earliest_era = *candidates.keys().next().expect("at least one era just created; qed"); - if earliest_era + HISTORY + cht::SIZE <= number { - let cht_num = cht::block_to_cht_number(earliest_era) - .expect("fails only for number == 0; genesis never imported; qed"); + // produce next CHT root if it's time. + let earliest_era = *candidates + .keys() + .next() + .expect("at least one era just created; qed"); + if earliest_era + HISTORY + cht::SIZE <= number { + let cht_num = cht::block_to_cht_number(earliest_era) + .expect("fails only for number == 0; genesis never imported; qed"); - let mut last_canonical_transition = None; - let cht_root = { - let mut i = earliest_era; - let mut live_epoch_proofs = self.live_epoch_proofs.write(); + let mut last_canonical_transition = None; + let cht_root = { + let mut i = earliest_era; + let mut live_epoch_proofs = self.live_epoch_proofs.write(); - // iterable function which removes the candidates as it goes - // along. this will only be called until the CHT is complete. - let iter = || { - let era_entry = candidates.remove(&i) - .expect("all eras are sequential with no gaps; qed"); - transaction.delete(self.col, era_key(i).as_bytes()); + // iterable function which removes the candidates as it goes + // along. this will only be called until the CHT is complete. + let iter = || { + let era_entry = candidates + .remove(&i) + .expect("all eras are sequential with no gaps; qed"); + transaction.delete(self.col, era_key(i).as_bytes()); - i += 1; + i += 1; - // prune old blocks and epoch proofs. - for ancient in &era_entry.candidates { - let maybe_transition = live_epoch_proofs.remove(&ancient.hash); - if let Some(epoch_transition) = maybe_transition { - transaction.delete(self.col, &*transition_key(ancient.hash)); + // prune old blocks and epoch proofs. + for ancient in &era_entry.candidates { + let maybe_transition = live_epoch_proofs.remove(&ancient.hash); + if let Some(epoch_transition) = maybe_transition { + transaction.delete(self.col, &*transition_key(ancient.hash)); - if ancient.hash == era_entry.canonical_hash { - last_canonical_transition = match self.db.get(self.col, &ancient.hash) { + if ancient.hash == era_entry.canonical_hash { + last_canonical_transition = match self.db.get(self.col, &ancient.hash) { Err(e) => { warn!(target: "chain", "Error reading from DB: {}\n ", e); @@ -523,703 +554,794 @@ impl HeaderChain { ::rlp::decode(&header).expect("decoding value from db failed") )), }; - } - } + } + } - transaction.delete(self.col, &ancient.hash); - } + transaction.delete(self.col, &ancient.hash); + } - let canon = &era_entry.candidates[0]; - (canon.hash, canon.total_difficulty) - }; - cht::compute_root(cht_num, ::itertools::repeat_call(iter)) - .expect("fails only when too few items; this is checked; qed") - }; + let canon = &era_entry.candidates[0]; + (canon.hash, canon.total_difficulty) + }; + cht::compute_root(cht_num, ::itertools::repeat_call(iter)) + .expect("fails only when too few items; this is checked; qed") + }; - // write the CHT root to the database. - debug!(target: "chain", "Produced CHT {} root: {:?}", cht_num, cht_root); - transaction.put(self.col, cht_key(cht_num).as_bytes(), &::rlp::encode(&cht_root)); + // write the CHT root to the database. + debug!(target: "chain", "Produced CHT {} root: {:?}", cht_num, cht_root); + transaction.put( + self.col, + cht_key(cht_num).as_bytes(), + &::rlp::encode(&cht_root), + ); - // update the last canonical transition proof - if let Some((epoch_transition, header)) = last_canonical_transition { - let x = encode_canonical_transition(&header, &epoch_transition.proof); - transaction.put_vec(self.col, LAST_CANONICAL_TRANSITION, x); - } - } - } + // update the last canonical transition proof + if let Some((epoch_transition, header)) = last_canonical_transition { + let x = encode_canonical_transition(&header, &epoch_transition.proof); + transaction.put_vec(self.col, LAST_CANONICAL_TRANSITION, x); + } + } + } - // write the best and latest eras to the database. - { - let latest_num = *candidates.iter().rev().next().expect("at least one era just inserted; qed").0; - let curr = BestAndLatest::new(best_num, latest_num); - transaction.put(self.col, CURRENT_KEY, &::rlp::encode(&curr)) - } - Ok(pending) - } + // write the best and latest eras to the database. + { + let latest_num = *candidates + .iter() + .rev() + .next() + .expect("at least one era just inserted; qed") + .0; + let curr = BestAndLatest::new(best_num, latest_num); + transaction.put(self.col, CURRENT_KEY, &::rlp::encode(&curr)) + } + Ok(pending) + } - /// Generates the specifications for hardcoded sync. This is typically only called manually - /// from time to time by a Parity developer in order to update the chain specifications. - /// - /// Returns `None` if we are at the genesis block, or if an error happens . - pub fn read_hardcoded_sync(&self) -> Result, Error> { - let mut chts = Vec::new(); - let mut cht_num = 0; + /// Generates the specifications for hardcoded sync. This is typically only called manually + /// from time to time by a Parity developer in order to update the chain specifications. + /// + /// Returns `None` if we are at the genesis block, or if an error happens . + pub fn read_hardcoded_sync(&self) -> Result, Error> { + let mut chts = Vec::new(); + let mut cht_num = 0; - loop { - let cht = match self.cht_root(cht_num) { - Some(cht) => cht, - None if cht_num != 0 => { - // end of the iteration - let h_num = 1 + cht_num as u64 * cht::SIZE; - let header = if let Some(header) = self.block_header(BlockId::Number(h_num)) { - header - } else { - let msg = format!("header of block #{} not found in DB ; database in an \ - inconsistent state", h_num); - bail!(msg); - }; + loop { + let cht = match self.cht_root(cht_num) { + Some(cht) => cht, + None if cht_num != 0 => { + // end of the iteration + let h_num = 1 + cht_num as u64 * cht::SIZE; + let header = if let Some(header) = self.block_header(BlockId::Number(h_num)) { + header + } else { + let msg = format!( + "header of block #{} not found in DB ; database in an \ + inconsistent state", + h_num + ); + bail!(msg); + }; - let decoded = header.decode().expect("decoding db value failed"); + let decoded = header.decode().expect("decoding db value failed"); - let entry: Entry = { - let bytes = self.db.get(self.col, era_key(h_num).as_bytes())? - .ok_or_else(|| { - format!("entry for era #{} not found in DB ; database \ - in an inconsistent state", h_num) - })?; - ::rlp::decode(&bytes).expect("decoding db value failed") - }; + let entry: Entry = { + let bytes = self + .db + .get(self.col, era_key(h_num).as_bytes())? + .ok_or_else(|| { + format!( + "entry for era #{} not found in DB ; database \ + in an inconsistent state", + h_num + ) + })?; + ::rlp::decode(&bytes).expect("decoding db value failed") + }; - let total_difficulty = entry.candidates.iter() - .find(|c| c.hash == decoded.hash()) - .ok_or_else(|| { - "no candidate matching block found in DB ; database in an \ + let total_difficulty = entry + .candidates + .iter() + .find(|c| c.hash == decoded.hash()) + .ok_or_else(|| { + "no candidate matching block found in DB ; database in an \ inconsistent state" - })? - .total_difficulty; + })? + .total_difficulty; - break Ok(Some(SpecHardcodedSync { - header, - total_difficulty, - chts, - })); - }, - None => { - break Ok(None); - }, - }; + break Ok(Some(SpecHardcodedSync { + header, + total_difficulty, + chts, + })); + } + None => { + break Ok(None); + } + }; - chts.push(cht); - cht_num += 1; - } - } + chts.push(cht); + cht_num += 1; + } + } - /// Apply pending changes from a previous `insert` operation. - /// Must be done before the next `insert` call. - pub fn apply_pending(&self, pending: PendingChanges) { - if let Some(best_block) = pending.best_block { - *self.best_block.write() = best_block; - } - } + /// Apply pending changes from a previous `insert` operation. + /// Must be done before the next `insert` call. + pub fn apply_pending(&self, pending: PendingChanges) { + if let Some(best_block) = pending.best_block { + *self.best_block.write() = best_block; + } + } - /// Get a block's hash by ID. In the case of query by number, only canonical results - /// will be returned. - pub fn block_hash(&self, id: BlockId) -> Option { - match id { - BlockId::Earliest | BlockId::Number(0) => Some(self.genesis_hash()), - BlockId::Hash(hash) => Some(hash), - BlockId::Number(num) => { - if self.best_block.read().number < num { return None } - self.candidates.read().get(&num).map(|entry| entry.canonical_hash) - } - BlockId::Latest => { - Some(self.best_block.read().hash) - } - } - } + /// Get a block's hash by ID. In the case of query by number, only canonical results + /// will be returned. + pub fn block_hash(&self, id: BlockId) -> Option { + match id { + BlockId::Earliest | BlockId::Number(0) => Some(self.genesis_hash()), + BlockId::Hash(hash) => Some(hash), + BlockId::Number(num) => { + if self.best_block.read().number < num { + return None; + } + self.candidates + .read() + .get(&num) + .map(|entry| entry.canonical_hash) + } + BlockId::Latest => Some(self.best_block.read().hash), + } + } - /// Get a block header. In the case of query by number, only canonical blocks - /// will be returned. - pub fn block_header(&self, id: BlockId) -> Option { - let load_from_db = |hash: H256| { - let mut cache = self.cache.lock(); + /// Get a block header. In the case of query by number, only canonical blocks + /// will be returned. + pub fn block_header(&self, id: BlockId) -> Option { + let load_from_db = |hash: H256| { + let mut cache = self.cache.lock(); - match cache.block_header(&hash) { - Some(header) => Some(header), - None => { - match self.db.get(self.col, &hash) { - Ok(db_value) => { - db_value.map(|x| x.into_vec()).map(encoded::Header::new) - .and_then(|header| { - cache.insert_block_header(hash, header.clone()); - Some(header) - }) - }, - Err(e) => { - warn!(target: "chain", "Failed to read from database: {}", e); - None - } - } - } - } - }; + match cache.block_header(&hash) { + Some(header) => Some(header), + None => match self.db.get(self.col, &hash) { + Ok(db_value) => db_value + .map(|x| x.into_vec()) + .map(encoded::Header::new) + .and_then(|header| { + cache.insert_block_header(hash, header.clone()); + Some(header) + }), + Err(e) => { + warn!(target: "chain", "Failed to read from database: {}", e); + None + } + }, + } + }; - match id { - BlockId::Earliest | BlockId::Number(0) => Some(self.genesis_header.clone()), - BlockId::Hash(hash) if hash == self.genesis_hash() => { Some(self.genesis_header.clone()) } - BlockId::Hash(hash) => load_from_db(hash), - BlockId::Number(num) => { - if self.best_block.read().number < num { return None } + match id { + BlockId::Earliest | BlockId::Number(0) => Some(self.genesis_header.clone()), + BlockId::Hash(hash) if hash == self.genesis_hash() => Some(self.genesis_header.clone()), + BlockId::Hash(hash) => load_from_db(hash), + BlockId::Number(num) => { + if self.best_block.read().number < num { + return None; + } - self.candidates.read().get(&num).map(|entry| entry.canonical_hash) - .and_then(load_from_db) - } - BlockId::Latest => { - // hold candidates hear to prevent deletion of the header - // as we read it. - let _candidates = self.candidates.read(); - let hash = { - let best = self.best_block.read(); - if best.number == 0 { - return Some(self.genesis_header.clone()) - } + self.candidates + .read() + .get(&num) + .map(|entry| entry.canonical_hash) + .and_then(load_from_db) + } + BlockId::Latest => { + // hold candidates hear to prevent deletion of the header + // as we read it. + let _candidates = self.candidates.read(); + let hash = { + let best = self.best_block.read(); + if best.number == 0 { + return Some(self.genesis_header.clone()); + } - best.hash - }; + best.hash + }; - load_from_db(hash) - } - } - } + load_from_db(hash) + } + } + } - /// Get a block's chain score. - /// Returns nothing for non-canonical blocks. - pub fn score(&self, id: BlockId) -> Option { - let genesis_hash = self.genesis_hash(); - match id { - BlockId::Earliest | BlockId::Number(0) => Some(self.genesis_header.difficulty()), - BlockId::Hash(hash) if hash == genesis_hash => Some(self.genesis_header.difficulty()), - BlockId::Hash(hash) => match self.block_header(BlockId::Hash(hash)) { - Some(header) => self.candidates.read().get(&header.number()) - .and_then(|era| era.candidates.iter().find(|e| e.hash == hash)) - .map(|c| c.total_difficulty), - None => None, - }, - BlockId::Number(num) => { - let candidates = self.candidates.read(); - if self.best_block.read().number < num { return None } - candidates.get(&num).map(|era| era.candidates[0].total_difficulty) - } - BlockId::Latest => Some(self.best_block.read().total_difficulty) - } - } + /// Get a block's chain score. + /// Returns nothing for non-canonical blocks. + pub fn score(&self, id: BlockId) -> Option { + let genesis_hash = self.genesis_hash(); + match id { + BlockId::Earliest | BlockId::Number(0) => Some(self.genesis_header.difficulty()), + BlockId::Hash(hash) if hash == genesis_hash => Some(self.genesis_header.difficulty()), + BlockId::Hash(hash) => match self.block_header(BlockId::Hash(hash)) { + Some(header) => self + .candidates + .read() + .get(&header.number()) + .and_then(|era| era.candidates.iter().find(|e| e.hash == hash)) + .map(|c| c.total_difficulty), + None => None, + }, + BlockId::Number(num) => { + let candidates = self.candidates.read(); + if self.best_block.read().number < num { + return None; + } + candidates + .get(&num) + .map(|era| era.candidates[0].total_difficulty) + } + BlockId::Latest => Some(self.best_block.read().total_difficulty), + } + } - /// Get the best block's header. - pub fn best_header(&self) -> encoded::Header { - self.block_header(BlockId::Latest).expect("Header for best block always stored; qed") - } + /// Get the best block's header. + pub fn best_header(&self) -> encoded::Header { + self.block_header(BlockId::Latest) + .expect("Header for best block always stored; qed") + } - /// Get an iterator over a block and its ancestry. - pub fn ancestry_iter(&self, start: BlockId) -> AncestryIter { - AncestryIter { - next: self.block_header(start), - chain: self, - } - } + /// Get an iterator over a block and its ancestry. + pub fn ancestry_iter(&self, start: BlockId) -> AncestryIter { + AncestryIter { + next: self.block_header(start), + chain: self, + } + } - /// Get the nth CHT root, if it's been computed. - /// - /// CHT root 0 is from block `1..2048`. - /// CHT root 1 is from block `2049..4096` - /// and so on. - /// - /// This is because it's assumed that the genesis hash is known, - /// so including it within a CHT would be redundant. - pub fn cht_root(&self, n: usize) -> Option { - match self.db.get(self.col, cht_key(n as u64).as_bytes()) { - Ok(db_fetch) => db_fetch.map(|bytes| ::rlp::decode(&bytes).expect("decoding value from db failed")), - Err(e) => { - warn!(target: "chain", "Error reading from database: {}", e); - None - } - } - } + /// Get the nth CHT root, if it's been computed. + /// + /// CHT root 0 is from block `1..2048`. + /// CHT root 1 is from block `2049..4096` + /// and so on. + /// + /// This is because it's assumed that the genesis hash is known, + /// so including it within a CHT would be redundant. + pub fn cht_root(&self, n: usize) -> Option { + match self.db.get(self.col, cht_key(n as u64).as_bytes()) { + Ok(db_fetch) => { + db_fetch.map(|bytes| ::rlp::decode(&bytes).expect("decoding value from db failed")) + } + Err(e) => { + warn!(target: "chain", "Error reading from database: {}", e); + None + } + } + } - /// Get the genesis hash. - pub fn genesis_hash(&self) -> H256 { - self.genesis_header.hash() - } + /// Get the genesis hash. + pub fn genesis_hash(&self) -> H256 { + self.genesis_header.hash() + } - /// Get the best block's data. - pub fn best_block(&self) -> BlockDescriptor { - self.best_block.read().clone() - } + /// Get the best block's data. + pub fn best_block(&self) -> BlockDescriptor { + self.best_block.read().clone() + } - /// If there is a gap between the genesis and the rest - /// of the stored blocks, return the first post-gap block. - pub fn first_block(&self) -> Option { - let candidates = self.candidates.read(); - match candidates.iter().next() { - None | Some((&1, _)) => None, - Some((&height, entry)) => Some(BlockDescriptor { - number: height, - hash: entry.canonical_hash, - total_difficulty: entry.candidates.iter().find(|x| x.hash == entry.canonical_hash) - .expect("entry always stores canonical candidate; qed").total_difficulty, - }) - } - } + /// If there is a gap between the genesis and the rest + /// of the stored blocks, return the first post-gap block. + pub fn first_block(&self) -> Option { + let candidates = self.candidates.read(); + match candidates.iter().next() { + None | Some((&1, _)) => None, + Some((&height, entry)) => Some(BlockDescriptor { + number: height, + hash: entry.canonical_hash, + total_difficulty: entry + .candidates + .iter() + .find(|x| x.hash == entry.canonical_hash) + .expect("entry always stores canonical candidate; qed") + .total_difficulty, + }), + } + } - /// Get block status. - pub fn status(&self, hash: &H256) -> BlockStatus { - if self.db.get(self.col, hash).ok().map_or(false, |x| x.is_some()) { - BlockStatus::InChain - } else { - BlockStatus::Unknown - } - } + /// Get block status. + pub fn status(&self, hash: &H256) -> BlockStatus { + if self + .db + .get(self.col, hash) + .ok() + .map_or(false, |x| x.is_some()) + { + BlockStatus::InChain + } else { + BlockStatus::Unknown + } + } - /// Insert a pending transition. - pub fn insert_pending_transition(&self, batch: &mut DBTransaction, hash: H256, t: &PendingEpochTransition) { - let key = pending_transition_key(hash); - batch.put(self.col, &*key, &*::rlp::encode(t)); - } + /// Insert a pending transition. + pub fn insert_pending_transition( + &self, + batch: &mut DBTransaction, + hash: H256, + t: &PendingEpochTransition, + ) { + let key = pending_transition_key(hash); + batch.put(self.col, &*key, &*::rlp::encode(t)); + } - /// Get pending transition for a specific block hash. - pub fn pending_transition(&self, hash: H256) -> Option { - let key = pending_transition_key(hash); - match self.db.get(self.col, &*key) { - Ok(db_fetch) => db_fetch.map(|bytes| ::rlp::decode(&bytes).expect("decoding value from db failed")), - Err(e) => { - warn!(target: "chain", "Error reading from database: {}", e); - None - } - } - } + /// Get pending transition for a specific block hash. + pub fn pending_transition(&self, hash: H256) -> Option { + let key = pending_transition_key(hash); + match self.db.get(self.col, &*key) { + Ok(db_fetch) => { + db_fetch.map(|bytes| ::rlp::decode(&bytes).expect("decoding value from db failed")) + } + Err(e) => { + warn!(target: "chain", "Error reading from database: {}", e); + None + } + } + } - /// Get the transition to the epoch the given parent hash is part of - /// or transitions to. - /// This will give the epoch that any children of this parent belong to. - /// - /// The header corresponding the the parent hash must be stored already. - pub fn epoch_transition_for(&self, parent_hash: H256) -> Option<(Header, Vec)> { - // slow path: loop back block by block - let live_proofs = self.live_epoch_proofs.read(); + /// Get the transition to the epoch the given parent hash is part of + /// or transitions to. + /// This will give the epoch that any children of this parent belong to. + /// + /// The header corresponding the the parent hash must be stored already. + pub fn epoch_transition_for(&self, parent_hash: H256) -> Option<(Header, Vec)> { + // slow path: loop back block by block + let live_proofs = self.live_epoch_proofs.read(); - for hdr in self.ancestry_iter(BlockId::Hash(parent_hash)) { - if let Some(transition) = live_proofs.get(&hdr.hash()).cloned() { - return hdr.decode().map(|decoded_hdr| { - (decoded_hdr, transition.proof) - }).ok(); - } - } + for hdr in self.ancestry_iter(BlockId::Hash(parent_hash)) { + if let Some(transition) = live_proofs.get(&hdr.hash()).cloned() { + return hdr + .decode() + .map(|decoded_hdr| (decoded_hdr, transition.proof)) + .ok(); + } + } - // any blocks left must be descendants of the last canonical transition block. - match self.db.get(self.col, LAST_CANONICAL_TRANSITION) { - Ok(x) => { - let x = x.expect("last canonical transition always instantiated; qed"); + // any blocks left must be descendants of the last canonical transition block. + match self.db.get(self.col, LAST_CANONICAL_TRANSITION) { + Ok(x) => { + let x = x.expect("last canonical transition always instantiated; qed"); - let (hdr, proof) = decode_canonical_transition(&x) - .expect("last canonical transition always encoded correctly; qed"); + let (hdr, proof) = decode_canonical_transition(&x) + .expect("last canonical transition always encoded correctly; qed"); - Some((hdr, proof.to_vec())) - } - Err(e) => { - warn!("Error reading from DB: {}", e); - None - } - } - } + Some((hdr, proof.to_vec())) + } + Err(e) => { + warn!("Error reading from DB: {}", e); + None + } + } + } } impl HeapSizeOf for HeaderChain { - fn heap_size_of_children(&self) -> usize { - self.candidates.read().heap_size_of_children() - } + fn heap_size_of_children(&self) -> usize { + self.candidates.read().heap_size_of_children() + } } /// Iterator over a block's ancestry. pub struct AncestryIter<'a> { - next: Option, - chain: &'a HeaderChain, + next: Option, + chain: &'a HeaderChain, } impl<'a> Iterator for AncestryIter<'a> { - type Item = encoded::Header; + type Item = encoded::Header; - fn next(&mut self) -> Option { - let next = self.next.take(); - if let Some(p_hash) = next.as_ref().map(|hdr| hdr.parent_hash()) { - self.next = self.chain.block_header(BlockId::Hash(p_hash)); - } + fn next(&mut self) -> Option { + let next = self.next.take(); + if let Some(p_hash) = next.as_ref().map(|hdr| hdr.parent_hash()) { + self.next = self.chain.block_header(BlockId::Hash(p_hash)); + } - next - } + next + } } #[cfg(test)] mod tests { - use super::{HeaderChain, HardcodedSync}; - use std::sync::Arc; - - use cache::Cache; - use common_types::header::Header; - use common_types::ids::BlockId; - use ethcore::spec::Spec; - use ethereum_types::U256; - use kvdb::KeyValueDB; - use kvdb_memorydb; - - use std::time::Duration; - use parking_lot::Mutex; - - fn make_db() -> Arc { - Arc::new(kvdb_memorydb::create(0)) - } - - #[test] - fn basic_chain() { - let spec = Spec::new_test(); - let genesis_header = spec.genesis_header(); - let db = make_db(); - - let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600)))); - - let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap(); - - let mut parent_hash = genesis_header.hash(); - let mut rolling_timestamp = genesis_header.timestamp(); - for i in 1..10000 { - let mut header = Header::new(); - header.set_parent_hash(parent_hash); - header.set_number(i); - header.set_timestamp(rolling_timestamp); - header.set_difficulty(*genesis_header.difficulty() * i as u32); - parent_hash = header.hash(); - - let mut tx = db.transaction(); - let pending = chain.insert(&mut tx, &header, None).unwrap(); - db.write(tx).unwrap(); - chain.apply_pending(pending); - - rolling_timestamp += 10; - } - - assert!(chain.block_header(BlockId::Number(10)).is_none()); - assert!(chain.block_header(BlockId::Number(9000)).is_some()); - assert!(chain.cht_root(2).is_some()); - assert!(chain.cht_root(3).is_none()); - } - - #[test] - fn reorganize() { - let spec = Spec::new_test(); - let genesis_header = spec.genesis_header(); - let db = make_db(); - let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600)))); - - let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap(); - - let mut parent_hash = genesis_header.hash(); - let mut rolling_timestamp = genesis_header.timestamp(); - for i in 1..6 { - let mut header = Header::new(); - header.set_parent_hash(parent_hash); - header.set_number(i); - header.set_timestamp(rolling_timestamp); - header.set_difficulty(*genesis_header.difficulty() * i as u32); - parent_hash = header.hash(); - - let mut tx = db.transaction(); - let pending = chain.insert(&mut tx, &header, None).unwrap(); - db.write(tx).unwrap(); - chain.apply_pending(pending); - - rolling_timestamp += 10; - } - - { - let mut rolling_timestamp = rolling_timestamp; - let mut parent_hash = parent_hash; - for i in 6..16 { - let mut header = Header::new(); - header.set_parent_hash(parent_hash); - header.set_number(i); - header.set_timestamp(rolling_timestamp); - header.set_difficulty(*genesis_header.difficulty() * i as u32); - parent_hash = header.hash(); - - let mut tx = db.transaction(); - let pending = chain.insert(&mut tx, &header, None).unwrap(); - db.write(tx).unwrap(); - chain.apply_pending(pending); - - rolling_timestamp += 10; - } - } - - assert_eq!(chain.best_block().number, 15); - - { - let mut rolling_timestamp = rolling_timestamp; - let mut parent_hash = parent_hash; - - // import a shorter chain which has better TD. - for i in 6..13 { - let mut header = Header::new(); - header.set_parent_hash(parent_hash); - header.set_number(i); - header.set_timestamp(rolling_timestamp); - header.set_difficulty(*genesis_header.difficulty() * U256::from(i * i)); - parent_hash = header.hash(); - - let mut tx = db.transaction(); - let pending = chain.insert(&mut tx, &header, None).unwrap(); - db.write(tx).unwrap(); - chain.apply_pending(pending); - - rolling_timestamp += 11; - } - } - - let (mut num, mut canon_hash) = (chain.best_block().number, chain.best_block().hash); - assert_eq!(num, 12); - - while num > 0 { - let header = chain.block_header(BlockId::Number(num)).unwrap(); - assert_eq!(header.hash(), canon_hash); - - canon_hash = header.parent_hash(); - num -= 1; - } - } - - #[test] - fn earliest_is_latest() { - let spec = Spec::new_test(); - let db = make_db(); - let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600)))); - - let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap(); - - assert!(chain.block_header(BlockId::Earliest).is_some()); - assert!(chain.block_header(BlockId::Latest).is_some()); - } - - #[test] - fn restore_from_db() { - let spec = Spec::new_test(); - let genesis_header = spec.genesis_header(); - let db = make_db(); - let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600)))); - - { - let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone(), - HardcodedSync::Allow).unwrap(); - let mut parent_hash = genesis_header.hash(); - let mut rolling_timestamp = genesis_header.timestamp(); - for i in 1..10000 { - let mut header = Header::new(); - header.set_parent_hash(parent_hash); - header.set_number(i); - header.set_timestamp(rolling_timestamp); - header.set_difficulty(*genesis_header.difficulty() * i as u32); - parent_hash = header.hash(); - - let mut tx = db.transaction(); - let pending = chain.insert(&mut tx, &header, None).unwrap(); - db.write(tx).unwrap(); - chain.apply_pending(pending); - - rolling_timestamp += 10; - } - } - - let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone(), - HardcodedSync::Allow).unwrap(); - assert!(chain.block_header(BlockId::Number(10)).is_none()); - assert!(chain.block_header(BlockId::Number(9000)).is_some()); - assert!(chain.cht_root(2).is_some()); - assert!(chain.cht_root(3).is_none()); - assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 9999); - } - - #[test] - fn restore_higher_non_canonical() { - let spec = Spec::new_test(); - let genesis_header = spec.genesis_header(); - let db = make_db(); - let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600)))); - - { - let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone(), - HardcodedSync::Allow).unwrap(); - let mut parent_hash = genesis_header.hash(); - let mut rolling_timestamp = genesis_header.timestamp(); - - // push 100 low-difficulty blocks. - for i in 1..101 { - let mut header = Header::new(); - header.set_parent_hash(parent_hash); - header.set_number(i); - header.set_timestamp(rolling_timestamp); - header.set_difficulty(*genesis_header.difficulty() * i as u32); - parent_hash = header.hash(); - - let mut tx = db.transaction(); - let pending = chain.insert(&mut tx, &header, None).unwrap(); - db.write(tx).unwrap(); - chain.apply_pending(pending); - - rolling_timestamp += 10; - } - - // push fewer high-difficulty blocks. - for i in 1..11 { - let mut header = Header::new(); - header.set_parent_hash(parent_hash); - header.set_number(i); - header.set_timestamp(rolling_timestamp); - header.set_difficulty(*genesis_header.difficulty() * U256::from(i as u32 * 1000u32)); - parent_hash = header.hash(); - - let mut tx = db.transaction(); - let pending = chain.insert(&mut tx, &header, None).unwrap(); - db.write(tx).unwrap(); - chain.apply_pending(pending); - - rolling_timestamp += 10; - } - - assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10); - } - - // after restoration, non-canonical eras should still be loaded. - let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone(), - HardcodedSync::Allow).unwrap(); - assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10); - assert!(chain.candidates.read().get(&100).is_some()) - } - - #[test] - fn genesis_header_available() { - let spec = Spec::new_test(); - let genesis_header = spec.genesis_header(); - let db = make_db(); - let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600)))); - - let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone(), - HardcodedSync::Allow).unwrap(); - - assert!(chain.block_header(BlockId::Earliest).is_some()); - assert!(chain.block_header(BlockId::Number(0)).is_some()); - assert!(chain.block_header(BlockId::Hash(genesis_header.hash())).is_some()); - } - - #[test] - fn epoch_transitions_available_after_cht() { - let spec = Spec::new_test(); - let genesis_header = spec.genesis_header(); - let db = make_db(); - let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600)))); - - let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap(); - - let mut parent_hash = genesis_header.hash(); - let mut rolling_timestamp = genesis_header.timestamp(); - for i in 1..6 { - let mut header = Header::new(); - header.set_parent_hash(parent_hash); - header.set_number(i); - header.set_timestamp(rolling_timestamp); - header.set_difficulty(*genesis_header.difficulty() * i as u32); - parent_hash = header.hash(); - - let mut tx = db.transaction(); - let epoch_proof = if i == 3 { - Some(vec![1, 2, 3, 4]) - } else { - None - }; - - let pending = chain.insert(&mut tx, &header, epoch_proof).unwrap(); - db.write(tx).unwrap(); - chain.apply_pending(pending); - - rolling_timestamp += 10; - } - - // these 3 should end up falling back to the genesis epoch proof in DB - for i in 0..3 { - let hash = chain.block_hash(BlockId::Number(i)).unwrap(); - assert_eq!(chain.epoch_transition_for(hash).unwrap().1, Vec::::new()); - } - - // these are live. - for i in 3..6 { - let hash = chain.block_hash(BlockId::Number(i)).unwrap(); - assert_eq!(chain.epoch_transition_for(hash).unwrap().1, vec![1, 2, 3, 4]); - } - - for i in 6..10000 { - let mut header = Header::new(); - header.set_parent_hash(parent_hash); - header.set_number(i); - header.set_timestamp(rolling_timestamp); - header.set_difficulty(*genesis_header.difficulty() * i as u32); - parent_hash = header.hash(); - - let mut tx = db.transaction(); - let pending = chain.insert(&mut tx, &header, None).unwrap(); - db.write(tx).unwrap(); - chain.apply_pending(pending); - - rolling_timestamp += 10; - } - - // no live blocks have associated epoch proofs -- make sure we aren't leaking memory. - assert!(chain.live_epoch_proofs.read().is_empty()); - assert_eq!(chain.epoch_transition_for(parent_hash).unwrap().1, vec![1, 2, 3, 4]); - } - - #[test] - fn hardcoded_sync_gen() { - let spec = Spec::new_test(); - let genesis_header = spec.genesis_header(); - let db = make_db(); - - let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600)))); - - let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).expect("failed to instantiate a new HeaderChain"); - - let mut parent_hash = genesis_header.hash(); - let mut rolling_timestamp = genesis_header.timestamp(); - let mut total_difficulty = *genesis_header.difficulty(); - let h_num = 3 * ::cht::SIZE + 1; - for i in 1..10000 { - let mut header = Header::new(); - header.set_parent_hash(parent_hash); - header.set_number(i); - header.set_timestamp(rolling_timestamp); - let diff = *genesis_header.difficulty() * i as u32; - header.set_difficulty(diff); - if i <= h_num { - total_difficulty = total_difficulty + diff; - } - parent_hash = header.hash(); - - let mut tx = db.transaction(); - let pending = chain.insert(&mut tx, &header, None).expect("failed inserting a transaction"); - db.write(tx).unwrap(); - chain.apply_pending(pending); - - rolling_timestamp += 10; - } - - let hardcoded_sync = chain.read_hardcoded_sync().expect("failed reading hardcoded sync").expect("failed unwrapping hardcoded sync"); - assert_eq!(hardcoded_sync.chts.len(), 3); - assert_eq!(hardcoded_sync.total_difficulty, total_difficulty); - let decoded: Header = hardcoded_sync.header.decode().expect("decoding failed"); - assert_eq!(decoded.number(), h_num); - } + use super::{HardcodedSync, HeaderChain}; + use std::sync::Arc; + + use cache::Cache; + use common_types::{header::Header, ids::BlockId}; + use ethcore::spec::Spec; + use ethereum_types::U256; + use kvdb::KeyValueDB; + use kvdb_memorydb; + + use parking_lot::Mutex; + use std::time::Duration; + + fn make_db() -> Arc { + Arc::new(kvdb_memorydb::create(0)) + } + + #[test] + fn basic_chain() { + let spec = Spec::new_test(); + let genesis_header = spec.genesis_header(); + let db = make_db(); + + let cache = Arc::new(Mutex::new(Cache::new( + Default::default(), + Duration::from_secs(6 * 3600), + ))); + + let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap(); + + let mut parent_hash = genesis_header.hash(); + let mut rolling_timestamp = genesis_header.timestamp(); + for i in 1..10000 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i as u32); + parent_hash = header.hash(); + + let mut tx = db.transaction(); + let pending = chain.insert(&mut tx, &header, None).unwrap(); + db.write(tx).unwrap(); + chain.apply_pending(pending); + + rolling_timestamp += 10; + } + + assert!(chain.block_header(BlockId::Number(10)).is_none()); + assert!(chain.block_header(BlockId::Number(9000)).is_some()); + assert!(chain.cht_root(2).is_some()); + assert!(chain.cht_root(3).is_none()); + } + + #[test] + fn reorganize() { + let spec = Spec::new_test(); + let genesis_header = spec.genesis_header(); + let db = make_db(); + let cache = Arc::new(Mutex::new(Cache::new( + Default::default(), + Duration::from_secs(6 * 3600), + ))); + + let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap(); + + let mut parent_hash = genesis_header.hash(); + let mut rolling_timestamp = genesis_header.timestamp(); + for i in 1..6 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i as u32); + parent_hash = header.hash(); + + let mut tx = db.transaction(); + let pending = chain.insert(&mut tx, &header, None).unwrap(); + db.write(tx).unwrap(); + chain.apply_pending(pending); + + rolling_timestamp += 10; + } + + { + let mut rolling_timestamp = rolling_timestamp; + let mut parent_hash = parent_hash; + for i in 6..16 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i as u32); + parent_hash = header.hash(); + + let mut tx = db.transaction(); + let pending = chain.insert(&mut tx, &header, None).unwrap(); + db.write(tx).unwrap(); + chain.apply_pending(pending); + + rolling_timestamp += 10; + } + } + + assert_eq!(chain.best_block().number, 15); + + { + let mut rolling_timestamp = rolling_timestamp; + let mut parent_hash = parent_hash; + + // import a shorter chain which has better TD. + for i in 6..13 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * U256::from(i * i)); + parent_hash = header.hash(); + + let mut tx = db.transaction(); + let pending = chain.insert(&mut tx, &header, None).unwrap(); + db.write(tx).unwrap(); + chain.apply_pending(pending); + + rolling_timestamp += 11; + } + } + + let (mut num, mut canon_hash) = (chain.best_block().number, chain.best_block().hash); + assert_eq!(num, 12); + + while num > 0 { + let header = chain.block_header(BlockId::Number(num)).unwrap(); + assert_eq!(header.hash(), canon_hash); + + canon_hash = header.parent_hash(); + num -= 1; + } + } + + #[test] + fn earliest_is_latest() { + let spec = Spec::new_test(); + let db = make_db(); + let cache = Arc::new(Mutex::new(Cache::new( + Default::default(), + Duration::from_secs(6 * 3600), + ))); + + let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap(); + + assert!(chain.block_header(BlockId::Earliest).is_some()); + assert!(chain.block_header(BlockId::Latest).is_some()); + } + + #[test] + fn restore_from_db() { + let spec = Spec::new_test(); + let genesis_header = spec.genesis_header(); + let db = make_db(); + let cache = Arc::new(Mutex::new(Cache::new( + Default::default(), + Duration::from_secs(6 * 3600), + ))); + + { + let chain = + HeaderChain::new(db.clone(), None, &spec, cache.clone(), HardcodedSync::Allow) + .unwrap(); + let mut parent_hash = genesis_header.hash(); + let mut rolling_timestamp = genesis_header.timestamp(); + for i in 1..10000 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i as u32); + parent_hash = header.hash(); + + let mut tx = db.transaction(); + let pending = chain.insert(&mut tx, &header, None).unwrap(); + db.write(tx).unwrap(); + chain.apply_pending(pending); + + rolling_timestamp += 10; + } + } + + let chain = + HeaderChain::new(db.clone(), None, &spec, cache.clone(), HardcodedSync::Allow).unwrap(); + assert!(chain.block_header(BlockId::Number(10)).is_none()); + assert!(chain.block_header(BlockId::Number(9000)).is_some()); + assert!(chain.cht_root(2).is_some()); + assert!(chain.cht_root(3).is_none()); + assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 9999); + } + + #[test] + fn restore_higher_non_canonical() { + let spec = Spec::new_test(); + let genesis_header = spec.genesis_header(); + let db = make_db(); + let cache = Arc::new(Mutex::new(Cache::new( + Default::default(), + Duration::from_secs(6 * 3600), + ))); + + { + let chain = + HeaderChain::new(db.clone(), None, &spec, cache.clone(), HardcodedSync::Allow) + .unwrap(); + let mut parent_hash = genesis_header.hash(); + let mut rolling_timestamp = genesis_header.timestamp(); + + // push 100 low-difficulty blocks. + for i in 1..101 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i as u32); + parent_hash = header.hash(); + + let mut tx = db.transaction(); + let pending = chain.insert(&mut tx, &header, None).unwrap(); + db.write(tx).unwrap(); + chain.apply_pending(pending); + + rolling_timestamp += 10; + } + + // push fewer high-difficulty blocks. + for i in 1..11 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header + .set_difficulty(*genesis_header.difficulty() * U256::from(i as u32 * 1000u32)); + parent_hash = header.hash(); + + let mut tx = db.transaction(); + let pending = chain.insert(&mut tx, &header, None).unwrap(); + db.write(tx).unwrap(); + chain.apply_pending(pending); + + rolling_timestamp += 10; + } + + assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10); + } + + // after restoration, non-canonical eras should still be loaded. + let chain = + HeaderChain::new(db.clone(), None, &spec, cache.clone(), HardcodedSync::Allow).unwrap(); + assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10); + assert!(chain.candidates.read().get(&100).is_some()) + } + + #[test] + fn genesis_header_available() { + let spec = Spec::new_test(); + let genesis_header = spec.genesis_header(); + let db = make_db(); + let cache = Arc::new(Mutex::new(Cache::new( + Default::default(), + Duration::from_secs(6 * 3600), + ))); + + let chain = + HeaderChain::new(db.clone(), None, &spec, cache.clone(), HardcodedSync::Allow).unwrap(); + + assert!(chain.block_header(BlockId::Earliest).is_some()); + assert!(chain.block_header(BlockId::Number(0)).is_some()); + assert!(chain + .block_header(BlockId::Hash(genesis_header.hash())) + .is_some()); + } + + #[test] + fn epoch_transitions_available_after_cht() { + let spec = Spec::new_test(); + let genesis_header = spec.genesis_header(); + let db = make_db(); + let cache = Arc::new(Mutex::new(Cache::new( + Default::default(), + Duration::from_secs(6 * 3600), + ))); + + let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap(); + + let mut parent_hash = genesis_header.hash(); + let mut rolling_timestamp = genesis_header.timestamp(); + for i in 1..6 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i as u32); + parent_hash = header.hash(); + + let mut tx = db.transaction(); + let epoch_proof = if i == 3 { Some(vec![1, 2, 3, 4]) } else { None }; + + let pending = chain.insert(&mut tx, &header, epoch_proof).unwrap(); + db.write(tx).unwrap(); + chain.apply_pending(pending); + + rolling_timestamp += 10; + } + + // these 3 should end up falling back to the genesis epoch proof in DB + for i in 0..3 { + let hash = chain.block_hash(BlockId::Number(i)).unwrap(); + assert_eq!( + chain.epoch_transition_for(hash).unwrap().1, + Vec::::new() + ); + } + + // these are live. + for i in 3..6 { + let hash = chain.block_hash(BlockId::Number(i)).unwrap(); + assert_eq!( + chain.epoch_transition_for(hash).unwrap().1, + vec![1, 2, 3, 4] + ); + } + + for i in 6..10000 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i as u32); + parent_hash = header.hash(); + + let mut tx = db.transaction(); + let pending = chain.insert(&mut tx, &header, None).unwrap(); + db.write(tx).unwrap(); + chain.apply_pending(pending); + + rolling_timestamp += 10; + } + + // no live blocks have associated epoch proofs -- make sure we aren't leaking memory. + assert!(chain.live_epoch_proofs.read().is_empty()); + assert_eq!( + chain.epoch_transition_for(parent_hash).unwrap().1, + vec![1, 2, 3, 4] + ); + } + + #[test] + fn hardcoded_sync_gen() { + let spec = Spec::new_test(); + let genesis_header = spec.genesis_header(); + let db = make_db(); + + let cache = Arc::new(Mutex::new(Cache::new( + Default::default(), + Duration::from_secs(6 * 3600), + ))); + + let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow) + .expect("failed to instantiate a new HeaderChain"); + + let mut parent_hash = genesis_header.hash(); + let mut rolling_timestamp = genesis_header.timestamp(); + let mut total_difficulty = *genesis_header.difficulty(); + let h_num = 3 * ::cht::SIZE + 1; + for i in 1..10000 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + let diff = *genesis_header.difficulty() * i as u32; + header.set_difficulty(diff); + if i <= h_num { + total_difficulty = total_difficulty + diff; + } + parent_hash = header.hash(); + + let mut tx = db.transaction(); + let pending = chain + .insert(&mut tx, &header, None) + .expect("failed inserting a transaction"); + db.write(tx).unwrap(); + chain.apply_pending(pending); + + rolling_timestamp += 10; + } + + let hardcoded_sync = chain + .read_hardcoded_sync() + .expect("failed reading hardcoded sync") + .expect("failed unwrapping hardcoded sync"); + assert_eq!(hardcoded_sync.chts.len(), 3); + assert_eq!(hardcoded_sync.total_difficulty, total_difficulty); + let decoded: Header = hardcoded_sync.header.decode().expect("decoding failed"); + assert_eq!(decoded.number(), h_num); + } } diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index ee42e6829..d4c3f3280 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -16,29 +16,31 @@ //! Light client implementation. Stores data from light sync -use std::sync::{Weak, Arc}; +use std::sync::{Arc, Weak}; -use ethcore::client::{ClientReport, EnvInfo, ClientIoMessage, traits::ForceUpdateSealing}; -use ethcore::engines::{epoch, EthEngine, EpochChange, EpochTransition, Proof}; -use ethcore::machine::EthereumMachine; -use ethcore::error::{Error, EthcoreResult}; -use ethcore::verification::queue::{self, HeaderQueue}; -use ethcore::spec::{Spec, SpecHardcodedSync}; +use common_types::{ + block_status::BlockStatus, blockchain_info::BlockChainInfo, encoded, header::Header, + ids::BlockId, BlockNumber, +}; +use ethcore::{ + client::{traits::ForceUpdateSealing, ClientIoMessage, ClientReport, EnvInfo}, + engines::{epoch, EpochChange, EpochTransition, EthEngine, Proof}, + error::{Error, EthcoreResult}, + machine::EthereumMachine, + spec::{Spec, SpecHardcodedSync}, + verification::queue::{self, HeaderQueue}, +}; +use ethereum_types::{H256, U256}; +use futures::{Future, IntoFuture}; use io::IoChannel; use parking_lot::{Mutex, RwLock}; -use ethereum_types::{H256, U256}; -use futures::{IntoFuture, Future}; -use common_types::BlockNumber; -use common_types::block_status::BlockStatus; -use common_types::blockchain_info::BlockChainInfo; -use common_types::encoded; -use common_types::header::Header; -use common_types::ids::BlockId; use kvdb::KeyValueDB; -use self::fetch::ChainDataFetcher; -use self::header_chain::{AncestryIter, HeaderChain, HardcodedSync}; +use self::{ + fetch::ChainDataFetcher, + header_chain::{AncestryIter, HardcodedSync, HeaderChain}, +}; use cache::Cache; @@ -52,595 +54,629 @@ pub mod fetch; /// Configuration for the light client. #[derive(Debug, Clone)] pub struct Config { - /// Verification queue config. - pub queue: queue::Config, - /// Chain column in database. - pub chain_column: Option, - /// Should it do full verification of blocks? - pub verify_full: bool, - /// Should it check the seal of blocks? - pub check_seal: bool, - /// Disable hardcoded sync. - pub no_hardcoded_sync: bool, + /// Verification queue config. + pub queue: queue::Config, + /// Chain column in database. + pub chain_column: Option, + /// Should it do full verification of blocks? + pub verify_full: bool, + /// Should it check the seal of blocks? + pub check_seal: bool, + /// Disable hardcoded sync. + pub no_hardcoded_sync: bool, } impl Default for Config { - fn default() -> Config { - Config { - queue: Default::default(), - chain_column: None, - verify_full: true, - check_seal: true, - no_hardcoded_sync: false, - } - } + fn default() -> Config { + Config { + queue: Default::default(), + chain_column: None, + verify_full: true, + check_seal: true, + no_hardcoded_sync: false, + } + } } /// Trait for interacting with the header chain abstractly. pub trait LightChainClient: Send + Sync { - /// Adds a new `LightChainNotify` listener. - fn add_listener(&self, listener: Weak); + /// Adds a new `LightChainNotify` listener. + fn add_listener(&self, listener: Weak); - /// Get chain info. - fn chain_info(&self) -> BlockChainInfo; + /// Get chain info. + fn chain_info(&self) -> BlockChainInfo; - /// Queue header to be verified. Required that all headers queued have their - /// parent queued prior. - fn queue_header(&self, header: Header) -> EthcoreResult; + /// Queue header to be verified. Required that all headers queued have their + /// parent queued prior. + fn queue_header(&self, header: Header) -> EthcoreResult; - /// Attempt to get a block hash by block id. - fn block_hash(&self, id: BlockId) -> Option; + /// Attempt to get a block hash by block id. + fn block_hash(&self, id: BlockId) -> Option; - /// Attempt to get block header by block id. - fn block_header(&self, id: BlockId) -> Option; + /// Attempt to get block header by block id. + fn block_header(&self, id: BlockId) -> Option; - /// Get the best block header. - fn best_block_header(&self) -> encoded::Header; + /// Get the best block header. + fn best_block_header(&self) -> encoded::Header; - /// Get a block's chain score by ID. - fn score(&self, id: BlockId) -> Option; + /// Get a block's chain score by ID. + fn score(&self, id: BlockId) -> Option; - /// Get an iterator over a block and its ancestry. - fn ancestry_iter<'a>(&'a self, start: BlockId) -> Box + 'a>; + /// Get an iterator over a block and its ancestry. + fn ancestry_iter<'a>(&'a self, start: BlockId) -> Box + 'a>; - /// Get the signing chain ID. - fn signing_chain_id(&self) -> Option; + /// Get the signing chain ID. + fn signing_chain_id(&self) -> Option; - /// Get environment info for execution at a given block. - /// Fails if that block's header is not stored. - fn env_info(&self, id: BlockId) -> Option; + /// Get environment info for execution at a given block. + /// Fails if that block's header is not stored. + fn env_info(&self, id: BlockId) -> Option; - /// Get a handle to the consensus engine. - fn engine(&self) -> &Arc; + /// Get a handle to the consensus engine. + fn engine(&self) -> &Arc; - /// Query whether a block is known. - fn is_known(&self, hash: &H256) -> bool; + /// Query whether a block is known. + fn is_known(&self, hash: &H256) -> bool; - /// Set the chain via a spec name. - fn set_spec_name(&self, new_spec_name: String) -> Result<(), ()>; + /// Set the chain via a spec name. + fn set_spec_name(&self, new_spec_name: String) -> Result<(), ()>; - /// Clear the queue. - fn clear_queue(&self); + /// Clear the queue. + fn clear_queue(&self); - /// Flush the queue. - fn flush_queue(&self); + /// Flush the queue. + fn flush_queue(&self); - /// Get queue info. - fn queue_info(&self) -> queue::QueueInfo; + /// Get queue info. + fn queue_info(&self) -> queue::QueueInfo; - /// Get the `i`th CHT root. - fn cht_root(&self, i: usize) -> Option; + /// Get the `i`th CHT root. + fn cht_root(&self, i: usize) -> Option; - /// Get a report of import activity since the last call. - fn report(&self) -> ClientReport; + /// Get a report of import activity since the last call. + fn report(&self) -> ClientReport; } /// An actor listening to light chain events. pub trait LightChainNotify: Send + Sync { - /// Notifies about imported headers. - fn new_headers(&self, good: &[H256]); + /// Notifies about imported headers. + fn new_headers(&self, good: &[H256]); } /// Something which can be treated as a `LightChainClient`. pub trait AsLightClient { - /// The kind of light client this can be treated as. - type Client: LightChainClient; + /// The kind of light client this can be treated as. + type Client: LightChainClient; - /// Access the underlying light client. - fn as_light_client(&self) -> &Self::Client; + /// Access the underlying light client. + fn as_light_client(&self) -> &Self::Client; } impl AsLightClient for T { - type Client = Self; + type Client = Self; - fn as_light_client(&self) -> &Self { self } + fn as_light_client(&self) -> &Self { + self + } } /// Light client implementation. pub struct Client { - queue: HeaderQueue, - engine: Arc, - chain: HeaderChain, - report: RwLock, - import_lock: Mutex<()>, - db: Arc, - listeners: RwLock>>, - fetcher: T, - verify_full: bool, - /// A closure to call when we want to restart the client - exit_handler: Mutex>>, + queue: HeaderQueue, + engine: Arc, + chain: HeaderChain, + report: RwLock, + import_lock: Mutex<()>, + db: Arc, + listeners: RwLock>>, + fetcher: T, + verify_full: bool, + /// A closure to call when we want to restart the client + exit_handler: Mutex>>, } impl Client { - /// Create a new `Client`. - pub fn new( - config: Config, - db: Arc, - chain_col: Option, - spec: &Spec, - fetcher: T, - io_channel: IoChannel, - cache: Arc> - ) -> Result { - Ok(Self { - queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, config.check_seal), - engine: spec.engine.clone(), - chain: { - let hs_cfg = if config.no_hardcoded_sync { HardcodedSync::Deny } else { HardcodedSync::Allow }; - HeaderChain::new(db.clone(), chain_col, &spec, cache, hs_cfg)? - }, - report: RwLock::new(ClientReport::default()), - import_lock: Mutex::new(()), - db, - listeners: RwLock::new(vec![]), - fetcher, - verify_full: config.verify_full, - exit_handler: Mutex::new(None), - }) - } + /// Create a new `Client`. + pub fn new( + config: Config, + db: Arc, + chain_col: Option, + spec: &Spec, + fetcher: T, + io_channel: IoChannel, + cache: Arc>, + ) -> Result { + Ok(Self { + queue: HeaderQueue::new( + config.queue, + spec.engine.clone(), + io_channel, + config.check_seal, + ), + engine: spec.engine.clone(), + chain: { + let hs_cfg = if config.no_hardcoded_sync { + HardcodedSync::Deny + } else { + HardcodedSync::Allow + }; + HeaderChain::new(db.clone(), chain_col, &spec, cache, hs_cfg)? + }, + report: RwLock::new(ClientReport::default()), + import_lock: Mutex::new(()), + db, + listeners: RwLock::new(vec![]), + fetcher, + verify_full: config.verify_full, + exit_handler: Mutex::new(None), + }) + } - /// Generates the specifications for hardcoded sync. This is typically only called manually - /// from time to time by a Parity developer in order to update the chain specifications. - /// - /// Returns `None` if we are at the genesis block. - pub fn read_hardcoded_sync(&self) -> Result, Error> { - self.chain.read_hardcoded_sync() - } + /// Generates the specifications for hardcoded sync. This is typically only called manually + /// from time to time by a Parity developer in order to update the chain specifications. + /// + /// Returns `None` if we are at the genesis block. + pub fn read_hardcoded_sync(&self) -> Result, Error> { + self.chain.read_hardcoded_sync() + } - /// Adds a new `LightChainNotify` listener. - pub fn add_listener(&self, listener: Weak) { - self.listeners.write().push(listener); - } + /// Adds a new `LightChainNotify` listener. + pub fn add_listener(&self, listener: Weak) { + self.listeners.write().push(listener); + } - /// Import a header to the queue for additional verification. - pub fn import_header(&self, header: Header) -> EthcoreResult { - self.queue.import(header).map_err(|(_, e)| e) - } + /// Import a header to the queue for additional verification. + pub fn import_header(&self, header: Header) -> EthcoreResult { + self.queue.import(header).map_err(|(_, e)| e) + } - /// Inquire about the status of a given header. - pub fn status(&self, hash: &H256) -> BlockStatus { - match self.queue.status(hash) { - queue::Status::Unknown => self.chain.status(hash), - other => other.into(), - } - } + /// Inquire about the status of a given header. + pub fn status(&self, hash: &H256) -> BlockStatus { + match self.queue.status(hash) { + queue::Status::Unknown => self.chain.status(hash), + other => other.into(), + } + } - /// Get the chain info. - pub fn chain_info(&self) -> BlockChainInfo { - let best_hdr = self.chain.best_header(); - let best_td = self.chain.best_block().total_difficulty; + /// Get the chain info. + pub fn chain_info(&self) -> BlockChainInfo { + let best_hdr = self.chain.best_header(); + let best_td = self.chain.best_block().total_difficulty; - let first_block = self.chain.first_block(); - let genesis_hash = self.chain.genesis_hash(); + let first_block = self.chain.first_block(); + let genesis_hash = self.chain.genesis_hash(); - BlockChainInfo { - total_difficulty: best_td, - pending_total_difficulty: best_td + self.queue.total_difficulty(), - genesis_hash, - best_block_hash: best_hdr.hash(), - best_block_number: best_hdr.number(), - best_block_timestamp: best_hdr.timestamp(), - ancient_block_hash: if first_block.is_some() { Some(genesis_hash) } else { None }, - ancient_block_number: if first_block.is_some() { Some(0) } else { None }, - first_block_hash: first_block.as_ref().map(|first| first.hash), - first_block_number: first_block.as_ref().map(|first| first.number), - } - } + BlockChainInfo { + total_difficulty: best_td, + pending_total_difficulty: best_td + self.queue.total_difficulty(), + genesis_hash, + best_block_hash: best_hdr.hash(), + best_block_number: best_hdr.number(), + best_block_timestamp: best_hdr.timestamp(), + ancient_block_hash: if first_block.is_some() { + Some(genesis_hash) + } else { + None + }, + ancient_block_number: if first_block.is_some() { Some(0) } else { None }, + first_block_hash: first_block.as_ref().map(|first| first.hash), + first_block_number: first_block.as_ref().map(|first| first.number), + } + } - /// Get the header queue info. - pub fn queue_info(&self) -> queue::QueueInfo { - self.queue.queue_info() - } + /// Get the header queue info. + pub fn queue_info(&self) -> queue::QueueInfo { + self.queue.queue_info() + } - /// Attempt to get a block hash by block id. - pub fn block_hash(&self, id: BlockId) -> Option { - self.chain.block_hash(id) - } + /// Attempt to get a block hash by block id. + pub fn block_hash(&self, id: BlockId) -> Option { + self.chain.block_hash(id) + } - /// Get a block header by Id. - pub fn block_header(&self, id: BlockId) -> Option { - self.chain.block_header(id) - } + /// Get a block header by Id. + pub fn block_header(&self, id: BlockId) -> Option { + self.chain.block_header(id) + } - /// Get the best block header. - pub fn best_block_header(&self) -> encoded::Header { - self.chain.best_header() - } + /// Get the best block header. + pub fn best_block_header(&self) -> encoded::Header { + self.chain.best_header() + } - /// Get a block's chain score. - pub fn score(&self, id: BlockId) -> Option { - self.chain.score(id) - } + /// Get a block's chain score. + pub fn score(&self, id: BlockId) -> Option { + self.chain.score(id) + } - /// Get an iterator over a block and its ancestry. - pub fn ancestry_iter(&self, start: BlockId) -> AncestryIter { - self.chain.ancestry_iter(start) - } + /// Get an iterator over a block and its ancestry. + pub fn ancestry_iter(&self, start: BlockId) -> AncestryIter { + self.chain.ancestry_iter(start) + } - /// Get the signing chain id. - pub fn signing_chain_id(&self) -> Option { - self.engine.signing_chain_id(&self.latest_env_info()) - } + /// Get the signing chain id. + pub fn signing_chain_id(&self) -> Option { + self.engine.signing_chain_id(&self.latest_env_info()) + } - /// Flush the header queue. - pub fn flush_queue(&self) { - self.queue.flush() - } + /// Flush the header queue. + pub fn flush_queue(&self) { + self.queue.flush() + } - /// Get the `i`th CHT root. - pub fn cht_root(&self, i: usize) -> Option { - self.chain.cht_root(i) - } + /// Get the `i`th CHT root. + pub fn cht_root(&self, i: usize) -> Option { + self.chain.cht_root(i) + } - /// Import a set of pre-verified headers from the queue. - pub fn import_verified(&self) { - const MAX: usize = 256; + /// Import a set of pre-verified headers from the queue. + pub fn import_verified(&self) { + const MAX: usize = 256; - let _lock = self.import_lock.lock(); + let _lock = self.import_lock.lock(); - let mut bad = Vec::new(); - let mut good = Vec::new(); - for verified_header in self.queue.drain(MAX) { - let (num, hash) = (verified_header.number(), verified_header.hash()); - trace!(target: "client", "importing block {}", num); + let mut bad = Vec::new(); + let mut good = Vec::new(); + for verified_header in self.queue.drain(MAX) { + let (num, hash) = (verified_header.number(), verified_header.hash()); + trace!(target: "client", "importing block {}", num); - if self.verify_full && !self.check_header(&mut bad, &verified_header) { - continue - } + if self.verify_full && !self.check_header(&mut bad, &verified_header) { + continue; + } - let write_proof_result = match self.check_epoch_signal(&verified_header) { - Ok(Some(proof)) => self.write_pending_proof(&verified_header, proof), - Ok(None) => Ok(()), - Err(e) => - panic!("Unable to fetch epoch transition proof: {:?}", e), - }; + let write_proof_result = match self.check_epoch_signal(&verified_header) { + Ok(Some(proof)) => self.write_pending_proof(&verified_header, proof), + Ok(None) => Ok(()), + Err(e) => panic!("Unable to fetch epoch transition proof: {:?}", e), + }; - if let Err(e) = write_proof_result { - warn!(target: "client", "Error writing pending transition proof to DB: {:?} \ + if let Err(e) = write_proof_result { + warn!(target: "client", "Error writing pending transition proof to DB: {:?} \ The node may not be able to synchronize further.", e); - } + } - let epoch_proof = self.engine.is_epoch_end_light( - &verified_header, - &|h| self.chain.block_header(BlockId::Hash(h)).and_then(|hdr| hdr.decode().ok()), - &|h| self.chain.pending_transition(h), - ); + let epoch_proof = self.engine.is_epoch_end_light( + &verified_header, + &|h| { + self.chain + .block_header(BlockId::Hash(h)) + .and_then(|hdr| hdr.decode().ok()) + }, + &|h| self.chain.pending_transition(h), + ); - let mut tx = self.db.transaction(); - let pending = match self.chain.insert(&mut tx, &verified_header, epoch_proof) { - Ok(pending) => { - good.push(hash); - self.report.write().blocks_imported += 1; - pending - } - Err(e) => { - debug!(target: "client", "Error importing header {:?}: {:?}", (num, hash), e); - bad.push(hash); - continue; - } - }; + let mut tx = self.db.transaction(); + let pending = match self.chain.insert(&mut tx, &verified_header, epoch_proof) { + Ok(pending) => { + good.push(hash); + self.report.write().blocks_imported += 1; + pending + } + Err(e) => { + debug!(target: "client", "Error importing header {:?}: {:?}", (num, hash), e); + bad.push(hash); + continue; + } + }; - self.db.write_buffered(tx); - self.chain.apply_pending(pending); - } + self.db.write_buffered(tx); + self.chain.apply_pending(pending); + } - if let Err(e) = self.db.flush() { - panic!("Database flush failed: {}. Check disk health and space.", e); - } + if let Err(e) = self.db.flush() { + panic!("Database flush failed: {}. Check disk health and space.", e); + } - self.queue.mark_as_bad(&bad); - self.queue.mark_as_good(&good); + self.queue.mark_as_bad(&bad); + self.queue.mark_as_good(&good); - self.notify(|listener| listener.new_headers(&good)); - } + self.notify(|listener| listener.new_headers(&good)); + } - /// Get a report about blocks imported. - pub fn report(&self) -> ClientReport { - self.report.read().clone() - } + /// Get a report about blocks imported. + pub fn report(&self) -> ClientReport { + self.report.read().clone() + } - /// Get blockchain mem usage in bytes. - pub fn chain_mem_used(&self) -> usize { - use heapsize::HeapSizeOf; + /// Get blockchain mem usage in bytes. + pub fn chain_mem_used(&self) -> usize { + use heapsize::HeapSizeOf; - self.chain.heap_size_of_children() - } + self.chain.heap_size_of_children() + } - /// Set a closure to call when the client wants to be restarted. - /// - /// The parameter passed to the callback is the name of the new chain spec to use after - /// the restart. - pub fn set_exit_handler(&self, f: F) where F: Fn(String) + 'static + Send { - *self.exit_handler.lock() = Some(Box::new(f)); - } + /// Set a closure to call when the client wants to be restarted. + /// + /// The parameter passed to the callback is the name of the new chain spec to use after + /// the restart. + pub fn set_exit_handler(&self, f: F) + where + F: Fn(String) + 'static + Send, + { + *self.exit_handler.lock() = Some(Box::new(f)); + } - /// Get a handle to the verification engine. - pub fn engine(&self) -> &Arc { - &self.engine - } + /// Get a handle to the verification engine. + pub fn engine(&self) -> &Arc { + &self.engine + } - /// Get the latest environment info. - pub fn latest_env_info(&self) -> EnvInfo { - self.env_info(BlockId::Latest) - .expect("Best block header and recent hashes always stored; qed") - } + /// Get the latest environment info. + pub fn latest_env_info(&self) -> EnvInfo { + self.env_info(BlockId::Latest) + .expect("Best block header and recent hashes always stored; qed") + } - /// Get environment info for a given block. - pub fn env_info(&self, id: BlockId) -> Option { - let header = match self.block_header(id) { - Some(hdr) => hdr, - None => return None, - }; + /// Get environment info for a given block. + pub fn env_info(&self, id: BlockId) -> Option { + let header = match self.block_header(id) { + Some(hdr) => hdr, + None => return None, + }; - Some(EnvInfo { - number: header.number(), - author: header.author(), - timestamp: header.timestamp(), - difficulty: header.difficulty(), - last_hashes: self.build_last_hashes(header.parent_hash()), - gas_used: Default::default(), - gas_limit: header.gas_limit(), - }) - } + Some(EnvInfo { + number: header.number(), + author: header.author(), + timestamp: header.timestamp(), + difficulty: header.difficulty(), + last_hashes: self.build_last_hashes(header.parent_hash()), + gas_used: Default::default(), + gas_limit: header.gas_limit(), + }) + } - fn build_last_hashes(&self, mut parent_hash: H256) -> Arc> { - let mut v = Vec::with_capacity(256); - for _ in 0..255 { - v.push(parent_hash); - match self.block_header(BlockId::Hash(parent_hash)) { - Some(header) => parent_hash = header.hash(), - None => break, - } - } + fn build_last_hashes(&self, mut parent_hash: H256) -> Arc> { + let mut v = Vec::with_capacity(256); + for _ in 0..255 { + v.push(parent_hash); + match self.block_header(BlockId::Hash(parent_hash)) { + Some(header) => parent_hash = header.hash(), + None => break, + } + } - Arc::new(v) - } + Arc::new(v) + } - fn notify(&self, f: F) { - for listener in &*self.listeners.read() { - if let Some(listener) = listener.upgrade() { - f(&*listener) - } - } - } + fn notify(&self, f: F) { + for listener in &*self.listeners.read() { + if let Some(listener) = listener.upgrade() { + f(&*listener) + } + } + } - // return false if should skip, true otherwise. may push onto bad if - // should skip. - fn check_header(&self, bad: &mut Vec, verified_header: &Header) -> bool { - let hash = verified_header.hash(); - let parent_header = match self.chain.block_header(BlockId::Hash(*verified_header.parent_hash())) { - Some(header) => header, - None => { - trace!(target: "client", "No parent for block ({}, {})", + // return false if should skip, true otherwise. may push onto bad if + // should skip. + fn check_header(&self, bad: &mut Vec, verified_header: &Header) -> bool { + let hash = verified_header.hash(); + let parent_header = match self + .chain + .block_header(BlockId::Hash(*verified_header.parent_hash())) + { + Some(header) => header, + None => { + trace!(target: "client", "No parent for block ({}, {})", verified_header.number(), hash); - return false // skip import of block with missing parent. - } - }; + return false; // skip import of block with missing parent. + } + }; - // Verify Block Family + // Verify Block Family - let verify_family_result = { - parent_header.decode() - .map_err(|dec_err| dec_err.into()) - .and_then(|decoded| { - self.engine.verify_block_family(&verified_header, &decoded) - }) - - }; - if let Err(e) = verify_family_result { - warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", + let verify_family_result = { + parent_header + .decode() + .map_err(|dec_err| dec_err.into()) + .and_then(|decoded| self.engine.verify_block_family(&verified_header, &decoded)) + }; + if let Err(e) = verify_family_result { + warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", verified_header.number(), verified_header.hash(), e); - bad.push(hash); - return false; - }; + bad.push(hash); + return false; + }; - // "external" verification. - let verify_external_result = self.engine.verify_block_external(&verified_header); - if let Err(e) = verify_external_result { - warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", + // "external" verification. + let verify_external_result = self.engine.verify_block_external(&verified_header); + if let Err(e) = verify_external_result { + warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", verified_header.number(), verified_header.hash(), e); - bad.push(hash); - return false; - }; + bad.push(hash); + return false; + }; - true - } + true + } - fn check_epoch_signal(&self, verified_header: &Header) -> Result>, T::Error> { - use ethcore::machine::{AuxiliaryRequest, AuxiliaryData}; + fn check_epoch_signal( + &self, + verified_header: &Header, + ) -> Result>, T::Error> { + use ethcore::machine::{AuxiliaryData, AuxiliaryRequest}; - let mut block: Option> = None; - let mut receipts: Option> = None; + let mut block: Option> = None; + let mut receipts: Option> = None; - loop { + loop { + let is_signal = { + let auxiliary = AuxiliaryData { + bytes: block.as_ref().map(|x| &x[..]), + receipts: receipts.as_ref().map(|x| &x[..]), + }; - let is_signal = { - let auxiliary = AuxiliaryData { - bytes: block.as_ref().map(|x| &x[..]), - receipts: receipts.as_ref().map(|x| &x[..]), - }; + self.engine.signals_epoch_end(verified_header, auxiliary) + }; - self.engine.signals_epoch_end(verified_header, auxiliary) - }; + // check with any auxiliary data fetched so far + match is_signal { + EpochChange::No => return Ok(None), + EpochChange::Yes(proof) => return Ok(Some(proof)), + EpochChange::Unsure(unsure) => { + let (b, r) = match unsure { + AuxiliaryRequest::Body => { + (Some(self.fetcher.block_body(verified_header)), None) + } + AuxiliaryRequest::Receipts => { + (None, Some(self.fetcher.block_receipts(verified_header))) + } + AuxiliaryRequest::Both => ( + Some(self.fetcher.block_body(verified_header)), + Some(self.fetcher.block_receipts(verified_header)), + ), + }; - // check with any auxiliary data fetched so far - match is_signal { - EpochChange::No => return Ok(None), - EpochChange::Yes(proof) => return Ok(Some(proof)), - EpochChange::Unsure(unsure) => { - let (b, r) = match unsure { - AuxiliaryRequest::Body => - (Some(self.fetcher.block_body(verified_header)), None), - AuxiliaryRequest::Receipts => - (None, Some(self.fetcher.block_receipts(verified_header))), - AuxiliaryRequest::Both => ( - Some(self.fetcher.block_body(verified_header)), - Some(self.fetcher.block_receipts(verified_header)), - ), - }; + if let Some(b) = b { + block = Some(b.into_future().wait()?.into_inner()); + } - if let Some(b) = b { - block = Some(b.into_future().wait()?.into_inner()); - } + if let Some(r) = r { + receipts = Some(r.into_future().wait()?); + } + } + } + } + } - if let Some(r) = r { - receipts = Some(r.into_future().wait()?); - } - } - } - } - } + // attempts to fetch the epoch proof from the network until successful. + fn write_pending_proof( + &self, + header: &Header, + proof: Proof, + ) -> Result<(), T::Error> { + let proof = match proof { + Proof::Known(known) => known, + Proof::WithState(state_dependent) => self + .fetcher + .epoch_transition(header.hash(), self.engine.clone(), state_dependent) + .into_future() + .wait()?, + }; - // attempts to fetch the epoch proof from the network until successful. - fn write_pending_proof(&self, header: &Header, proof: Proof) -> Result<(), T::Error> { - let proof = match proof { - Proof::Known(known) => known, - Proof::WithState(state_dependent) => { - self.fetcher.epoch_transition( - header.hash(), - self.engine.clone(), - state_dependent - ).into_future().wait()? - } - }; - - let mut batch = self.db.transaction(); - self.chain.insert_pending_transition(&mut batch, header.hash(), &epoch::PendingTransition { - proof, - }); - self.db.write_buffered(batch); - Ok(()) - } + let mut batch = self.db.transaction(); + self.chain.insert_pending_transition( + &mut batch, + header.hash(), + &epoch::PendingTransition { proof }, + ); + self.db.write_buffered(batch); + Ok(()) + } } impl LightChainClient for Client { - fn add_listener(&self, listener: Weak) { - Client::add_listener(self, listener) - } + fn add_listener(&self, listener: Weak) { + Client::add_listener(self, listener) + } - fn chain_info(&self) -> BlockChainInfo { Client::chain_info(self) } + fn chain_info(&self) -> BlockChainInfo { + Client::chain_info(self) + } - fn queue_header(&self, header: Header) -> EthcoreResult { - self.import_header(header) - } + fn queue_header(&self, header: Header) -> EthcoreResult { + self.import_header(header) + } - fn block_hash(&self, id: BlockId) -> Option { - Client::block_hash(self, id) - } + fn block_hash(&self, id: BlockId) -> Option { + Client::block_hash(self, id) + } - fn block_header(&self, id: BlockId) -> Option { - Client::block_header(self, id) - } + fn block_header(&self, id: BlockId) -> Option { + Client::block_header(self, id) + } - fn best_block_header(&self) -> encoded::Header { - Client::best_block_header(self) - } + fn best_block_header(&self) -> encoded::Header { + Client::best_block_header(self) + } - fn score(&self, id: BlockId) -> Option { - Client::score(self, id) - } + fn score(&self, id: BlockId) -> Option { + Client::score(self, id) + } - fn ancestry_iter<'a>(&'a self, start: BlockId) -> Box + 'a> { - Box::new(Client::ancestry_iter(self, start)) - } + fn ancestry_iter<'a>(&'a self, start: BlockId) -> Box + 'a> { + Box::new(Client::ancestry_iter(self, start)) + } - fn signing_chain_id(&self) -> Option { - Client::signing_chain_id(self) - } + fn signing_chain_id(&self) -> Option { + Client::signing_chain_id(self) + } - fn env_info(&self, id: BlockId) -> Option { - Client::env_info(self, id) - } + fn env_info(&self, id: BlockId) -> Option { + Client::env_info(self, id) + } - fn engine(&self) -> &Arc { - Client::engine(self) - } + fn engine(&self) -> &Arc { + Client::engine(self) + } - fn set_spec_name(&self, new_spec_name: String) -> Result<(), ()> { - trace!(target: "mode", "Client::set_spec_name({:?})", new_spec_name); - if let Some(ref h) = *self.exit_handler.lock() { - (*h)(new_spec_name); - Ok(()) - } else { - warn!("Not hypervised; cannot change chain."); - Err(()) - } - } + fn set_spec_name(&self, new_spec_name: String) -> Result<(), ()> { + trace!(target: "mode", "Client::set_spec_name({:?})", new_spec_name); + if let Some(ref h) = *self.exit_handler.lock() { + (*h)(new_spec_name); + Ok(()) + } else { + warn!("Not hypervised; cannot change chain."); + Err(()) + } + } - fn is_known(&self, hash: &H256) -> bool { - self.status(hash) == BlockStatus::InChain - } + fn is_known(&self, hash: &H256) -> bool { + self.status(hash) == BlockStatus::InChain + } - fn clear_queue(&self) { - self.queue.clear() - } + fn clear_queue(&self) { + self.queue.clear() + } - fn flush_queue(&self) { - Client::flush_queue(self); - } + fn flush_queue(&self) { + Client::flush_queue(self); + } - fn queue_info(&self) -> queue::QueueInfo { - self.queue.queue_info() - } + fn queue_info(&self) -> queue::QueueInfo { + self.queue.queue_info() + } - fn cht_root(&self, i: usize) -> Option { - Client::cht_root(self, i) - } + fn cht_root(&self, i: usize) -> Option { + Client::cht_root(self, i) + } - fn report(&self) -> ClientReport { - Client::report(self) - } + fn report(&self) -> ClientReport { + Client::report(self) + } } impl ::ethcore::client::ChainInfo for Client { - fn chain_info(&self) -> BlockChainInfo { - Client::chain_info(self) - } + fn chain_info(&self) -> BlockChainInfo { + Client::chain_info(self) + } } impl ::ethcore::client::EngineClient for Client { - fn update_sealing(&self, _force: ForceUpdateSealing) {} - fn submit_seal(&self, _block_hash: H256, _seal: Vec>) { } - fn broadcast_consensus_message(&self, _message: Vec) { } + fn update_sealing(&self, _force: ForceUpdateSealing) {} + fn submit_seal(&self, _block_hash: H256, _seal: Vec>) {} + fn broadcast_consensus_message(&self, _message: Vec) {} - fn epoch_transition_for(&self, parent_hash: H256) -> Option { - self.chain.epoch_transition_for(parent_hash).map(|(hdr, proof)| EpochTransition { - block_hash: hdr.hash(), - block_number: hdr.number(), - proof, - }) - } + fn epoch_transition_for(&self, parent_hash: H256) -> Option { + self.chain + .epoch_transition_for(parent_hash) + .map(|(hdr, proof)| EpochTransition { + block_hash: hdr.hash(), + block_number: hdr.number(), + proof, + }) + } - fn as_full_client(&self) -> Option<&::ethcore::client::BlockChainClient> { - None - } + fn as_full_client(&self) -> Option<&::ethcore::client::BlockChainClient> { + None + } - fn block_number(&self, id: BlockId) -> Option { - self.block_header(id).map(|hdr| hdr.number()) - } + fn block_number(&self, id: BlockId) -> Option { + self.block_header(id).map(|hdr| hdr.number()) + } - fn block_header(&self, id: BlockId) -> Option { - Client::block_header(self, id) - } + fn block_header(&self, id: BlockId) -> Option { + Client::block_header(self, id) + } } diff --git a/ethcore/light/src/client/service.rs b/ethcore/light/src/client/service.rs index 9672974fc..3e03dde4b 100644 --- a/ethcore/light/src/client/service.rs +++ b/ethcore/light/src/client/service.rs @@ -17,118 +17,126 @@ //! Minimal IO service for light client. //! Just handles block import messages and passes them to the client. -use std::fmt; -use std::sync::Arc; +use std::{fmt, sync::Arc}; -use ethcore_db as db; +use ethcore::{client::ClientIoMessage, error::Error as CoreError, spec::Spec}; use ethcore_blockchain::BlockChainDB; -use ethcore::client::ClientIoMessage; -use ethcore::error::Error as CoreError; -use ethcore::spec::Spec; +use ethcore_db as db; use io::{IoContext, IoError, IoHandler, IoService}; use cache::Cache; use parking_lot::Mutex; -use super::{ChainDataFetcher, LightChainNotify, Client, Config as ClientConfig}; +use super::{ChainDataFetcher, Client, Config as ClientConfig, LightChainNotify}; /// Errors on service initialization. #[derive(Debug)] pub enum Error { - /// Core error. - Core(CoreError), - /// I/O service error. - Io(IoError), + /// Core error. + Core(CoreError), + /// I/O service error. + Io(IoError), } impl From for Error { - #[inline] - fn from(err: CoreError) -> Error { - Error::Core(err) - } + #[inline] + fn from(err: CoreError) -> Error { + Error::Core(err) + } } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Error::Core(ref msg) => write!(f, "Core error: {}", msg), - Error::Io(ref err) => write!(f, "I/O service error: {}", err), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Error::Core(ref msg) => write!(f, "Core error: {}", msg), + Error::Io(ref err) => write!(f, "I/O service error: {}", err), + } + } } /// Light client service. pub struct Service { - client: Arc>, - io_service: IoService, + client: Arc>, + io_service: IoService, } impl Service { - /// Start the service: initialize I/O workers and client itself. - pub fn start(config: ClientConfig, spec: &Spec, fetcher: T, db: Arc, cache: Arc>) -> Result { - let io_service = IoService::::start().map_err(Error::Io)?; - let client = Arc::new(Client::new(config, - db.key_value().clone(), - db::COL_LIGHT_CHAIN, - spec, - fetcher, - io_service.channel(), - cache, - )?); + /// Start the service: initialize I/O workers and client itself. + pub fn start( + config: ClientConfig, + spec: &Spec, + fetcher: T, + db: Arc, + cache: Arc>, + ) -> Result { + let io_service = IoService::::start().map_err(Error::Io)?; + let client = Arc::new(Client::new( + config, + db.key_value().clone(), + db::COL_LIGHT_CHAIN, + spec, + fetcher, + io_service.channel(), + cache, + )?); - io_service.register_handler(Arc::new(ImportBlocks(client.clone()))).map_err(Error::Io)?; - spec.engine.register_client(Arc::downgrade(&client) as _); + io_service + .register_handler(Arc::new(ImportBlocks(client.clone()))) + .map_err(Error::Io)?; + spec.engine.register_client(Arc::downgrade(&client) as _); - Ok(Service { - client, - io_service, - }) - } + Ok(Service { client, io_service }) + } - /// Set the actor to be notified on certain chain events - pub fn add_notify(&self, notify: Arc) { - self.client.add_listener(Arc::downgrade(¬ify)); - } + /// Set the actor to be notified on certain chain events + pub fn add_notify(&self, notify: Arc) { + self.client.add_listener(Arc::downgrade(¬ify)); + } - /// Register an I/O handler on the service. - pub fn register_handler(&self, handler: Arc + Send>) -> Result<(), IoError> { - self.io_service.register_handler(handler) - } + /// Register an I/O handler on the service. + pub fn register_handler( + &self, + handler: Arc + Send>, + ) -> Result<(), IoError> { + self.io_service.register_handler(handler) + } - /// Get a handle to the client. - pub fn client(&self) -> &Arc> { - &self.client - } + /// Get a handle to the client. + pub fn client(&self) -> &Arc> { + &self.client + } } struct ImportBlocks(Arc>); impl IoHandler for ImportBlocks { - fn message(&self, _io: &IoContext, message: &ClientIoMessage) { - if let ClientIoMessage::BlockVerified = *message { - self.0.import_verified(); - } - } + fn message(&self, _io: &IoContext, message: &ClientIoMessage) { + if let ClientIoMessage::BlockVerified = *message { + self.0.import_verified(); + } + } } #[cfg(test)] mod tests { - use super::Service; - use ethcore::spec::Spec; + use super::Service; + use ethcore::spec::Spec; - use std::sync::Arc; - use cache::Cache; - use client::fetch; - use std::time::Duration; - use parking_lot::Mutex; - use ethcore::test_helpers; + use cache::Cache; + use client::fetch; + use ethcore::test_helpers; + use parking_lot::Mutex; + use std::{sync::Arc, time::Duration}; - #[test] - fn it_works() { - let db = test_helpers::new_db(); - let spec = Spec::new_test(); - let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600)))); + #[test] + fn it_works() { + let db = test_helpers::new_db(); + let spec = Spec::new_test(); + let cache = Arc::new(Mutex::new(Cache::new( + Default::default(), + Duration::from_secs(6 * 3600), + ))); - Service::start(Default::default(), &spec, fetch::unavailable(), db, cache).unwrap(); - } + Service::start(Default::default(), &spec, fetch::unavailable(), db, cache).unwrap(); + } } diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index 93e912e1d..2ebd6d98f 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -32,20 +32,22 @@ #![deny(missing_docs)] -pub mod client; +pub mod cache; pub mod cht; +pub mod client; pub mod net; pub mod on_demand; -pub mod transaction_queue; -pub mod cache; pub mod provider; +pub mod transaction_queue; mod types; -pub use self::cache::Cache; -pub use self::provider::{Provider, MAX_HEADERS_PER_REQUEST}; -pub use self::transaction_queue::TransactionQueue; -pub use types::request as request; +pub use self::{ + cache::Cache, + provider::{Provider, MAX_HEADERS_PER_REQUEST}, + transaction_queue::TransactionQueue, +}; +pub use types::request; #[macro_use] extern crate serde_derive; @@ -55,41 +57,41 @@ extern crate log; extern crate bincode; extern crate common_types; +extern crate ethcore; extern crate ethcore_blockchain; extern crate ethcore_db; extern crate ethcore_io as io; extern crate ethcore_network as network; -extern crate parity_bytes as bytes; extern crate ethereum_types; -extern crate ethcore; +extern crate failsafe; +extern crate fastmap; +extern crate futures; extern crate hash_db; extern crate heapsize; -extern crate failsafe; -extern crate futures; extern crate itertools; extern crate keccak_hasher; extern crate memory_db; -extern crate trie_db as trie; +extern crate parity_bytes as bytes; +extern crate parking_lot; extern crate patricia_trie_ethereum as ethtrie; -extern crate fastmap; extern crate rand; extern crate rlp; -extern crate parking_lot; +extern crate trie_db as trie; #[macro_use] extern crate rlp_derive; +extern crate keccak_hash as hash; +extern crate kvdb; +extern crate memory_cache; extern crate serde; extern crate smallvec; extern crate stats; -extern crate vm; -extern crate keccak_hash as hash; extern crate triehash_ethereum as triehash; -extern crate kvdb; -extern crate memory_cache; +extern crate vm; #[macro_use] extern crate error_chain; +extern crate journaldb; #[cfg(test)] extern crate kvdb_memorydb; #[cfg(test)] extern crate tempdir; -extern crate journaldb; diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs index 6e38959cc..5aa4b34a1 100644 --- a/ethcore/light/src/net/context.rs +++ b/ethcore/light/src/net/context.rs @@ -16,178 +16,180 @@ //! I/O and event context generalizations. -use network::{NetworkContext, PeerId, NodeId}; +use network::{NetworkContext, NodeId, PeerId}; -use super::{Announcement, LightProtocol, ReqId}; -use super::error::Error; +use super::{error::Error, Announcement, LightProtocol, ReqId}; use request::NetworkRequests as Requests; /// An I/O context which allows sending and receiving packets as well as /// disconnecting peers. This is used as a generalization of the portions /// of a p2p network which the light protocol structure makes use of. pub trait IoContext { - /// Send a packet to a specific peer. - fn send(&self, peer: PeerId, packet_id: u8, packet_body: Vec); + /// Send a packet to a specific peer. + fn send(&self, peer: PeerId, packet_id: u8, packet_body: Vec); - /// Respond to a peer's message. Only works if this context is a byproduct - /// of a packet handler. - fn respond(&self, packet_id: u8, packet_body: Vec); + /// Respond to a peer's message. Only works if this context is a byproduct + /// of a packet handler. + fn respond(&self, packet_id: u8, packet_body: Vec); - /// Disconnect a peer. - fn disconnect_peer(&self, peer: PeerId); + /// Disconnect a peer. + fn disconnect_peer(&self, peer: PeerId); - /// Disable a peer -- this is a disconnect + a time-out. - fn disable_peer(&self, peer: PeerId); + /// Disable a peer -- this is a disconnect + a time-out. + fn disable_peer(&self, peer: PeerId); - /// Get a peer's protocol version. - fn protocol_version(&self, peer: PeerId) -> Option; + /// Get a peer's protocol version. + fn protocol_version(&self, peer: PeerId) -> Option; - /// Persistent peer id - fn persistent_peer_id(&self, peer: PeerId) -> Option; + /// Persistent peer id + fn persistent_peer_id(&self, peer: PeerId) -> Option; - /// Whether given peer id is reserved peer - fn is_reserved_peer(&self, peer: PeerId) -> bool; + /// Whether given peer id is reserved peer + fn is_reserved_peer(&self, peer: PeerId) -> bool; } -impl IoContext for T where T: ?Sized + NetworkContext { - fn send(&self, peer: PeerId, packet_id: u8, packet_body: Vec) { - if let Err(e) = self.send(peer, packet_id, packet_body) { - debug!(target: "pip", "Error sending packet to peer {}: {}", peer, e); - } - } +impl IoContext for T +where + T: ?Sized + NetworkContext, +{ + fn send(&self, peer: PeerId, packet_id: u8, packet_body: Vec) { + if let Err(e) = self.send(peer, packet_id, packet_body) { + debug!(target: "pip", "Error sending packet to peer {}: {}", peer, e); + } + } - fn respond(&self, packet_id: u8, packet_body: Vec) { - if let Err(e) = self.respond(packet_id, packet_body) { - debug!(target: "pip", "Error responding to peer message: {}", e); - } - } + fn respond(&self, packet_id: u8, packet_body: Vec) { + if let Err(e) = self.respond(packet_id, packet_body) { + debug!(target: "pip", "Error responding to peer message: {}", e); + } + } - fn disconnect_peer(&self, peer: PeerId) { - trace!(target: "pip", "Initiating disconnect of peer {}", peer); - NetworkContext::disconnect_peer(self, peer); - } + fn disconnect_peer(&self, peer: PeerId) { + trace!(target: "pip", "Initiating disconnect of peer {}", peer); + NetworkContext::disconnect_peer(self, peer); + } - fn disable_peer(&self, peer: PeerId) { - trace!(target: "pip", "Initiating disable of peer {}", peer); - NetworkContext::disable_peer(self, peer); - } + fn disable_peer(&self, peer: PeerId) { + trace!(target: "pip", "Initiating disable of peer {}", peer); + NetworkContext::disable_peer(self, peer); + } - fn protocol_version(&self, peer: PeerId) -> Option { - self.protocol_version(self.subprotocol_name(), peer) - } + fn protocol_version(&self, peer: PeerId) -> Option { + self.protocol_version(self.subprotocol_name(), peer) + } - fn persistent_peer_id(&self, peer: PeerId) -> Option { - self.session_info(peer).and_then(|info| info.id) - } + fn persistent_peer_id(&self, peer: PeerId) -> Option { + self.session_info(peer).and_then(|info| info.id) + } - fn is_reserved_peer(&self, peer: PeerId) -> bool { - NetworkContext::is_reserved_peer(self, peer) - } + fn is_reserved_peer(&self, peer: PeerId) -> bool { + NetworkContext::is_reserved_peer(self, peer) + } } /// Basic context for the protocol. pub trait BasicContext { - /// Returns the relevant's peer persistent Id (aka NodeId). - fn persistent_peer_id(&self, peer: PeerId) -> Option; + /// Returns the relevant's peer persistent Id (aka NodeId). + fn persistent_peer_id(&self, peer: PeerId) -> Option; - /// Make a request from a peer. - /// - /// Fails on: nonexistent peer, network error, peer not server, - /// insufficient credits. Does not check capabilities before sending. - /// On success, returns a request id which can later be coordinated - /// with an event. - fn request_from(&self, peer: PeerId, request: Requests) -> Result; + /// Make a request from a peer. + /// + /// Fails on: nonexistent peer, network error, peer not server, + /// insufficient credits. Does not check capabilities before sending. + /// On success, returns a request id which can later be coordinated + /// with an event. + fn request_from(&self, peer: PeerId, request: Requests) -> Result; - /// Make an announcement of new capabilities to the rest of the peers. - // TODO: maybe just put this on a timer in LightProtocol? - fn make_announcement(&self, announcement: Announcement); + /// Make an announcement of new capabilities to the rest of the peers. + // TODO: maybe just put this on a timer in LightProtocol? + fn make_announcement(&self, announcement: Announcement); - /// Disconnect a peer. - fn disconnect_peer(&self, peer: PeerId); + /// Disconnect a peer. + fn disconnect_peer(&self, peer: PeerId); - /// Disable a peer. - fn disable_peer(&self, peer: PeerId); + /// Disable a peer. + fn disable_peer(&self, peer: PeerId); } /// Context for a protocol event which has a peer ID attached. pub trait EventContext: BasicContext { - /// Get the peer relevant to the event e.g. message sender, - /// disconnected/connected peer. - fn peer(&self) -> PeerId; + /// Get the peer relevant to the event e.g. message sender, + /// disconnected/connected peer. + fn peer(&self) -> PeerId; - /// Treat the event context as a basic context. - fn as_basic(&self) -> &BasicContext; + /// Treat the event context as a basic context. + fn as_basic(&self) -> &BasicContext; } /// Basic context. pub struct TickCtx<'a> { - /// Io context to enable dispatch. - pub io: &'a IoContext, - /// Protocol implementation. - pub proto: &'a LightProtocol, + /// Io context to enable dispatch. + pub io: &'a IoContext, + /// Protocol implementation. + pub proto: &'a LightProtocol, } impl<'a> BasicContext for TickCtx<'a> { - fn persistent_peer_id(&self, id: PeerId) -> Option { - self.io.persistent_peer_id(id) - } + fn persistent_peer_id(&self, id: PeerId) -> Option { + self.io.persistent_peer_id(id) + } - fn request_from(&self, peer: PeerId, requests: Requests) -> Result { - self.proto.request_from(self.io, peer, requests) - } + fn request_from(&self, peer: PeerId, requests: Requests) -> Result { + self.proto.request_from(self.io, peer, requests) + } - fn make_announcement(&self, announcement: Announcement) { - self.proto.make_announcement(self.io, announcement); - } + fn make_announcement(&self, announcement: Announcement) { + self.proto.make_announcement(self.io, announcement); + } - fn disconnect_peer(&self, peer: PeerId) { - self.io.disconnect_peer(peer); - } + fn disconnect_peer(&self, peer: PeerId) { + self.io.disconnect_peer(peer); + } - fn disable_peer(&self, peer: PeerId) { - self.io.disable_peer(peer); - } + fn disable_peer(&self, peer: PeerId) { + self.io.disable_peer(peer); + } } /// Concrete implementation of `EventContext` over the light protocol struct and /// an io context. pub struct Ctx<'a> { - /// Io context to enable immediate response to events. - pub io: &'a IoContext, - /// Protocol implementation. - pub proto: &'a LightProtocol, - /// Relevant peer for event. - pub peer: PeerId, + /// Io context to enable immediate response to events. + pub io: &'a IoContext, + /// Protocol implementation. + pub proto: &'a LightProtocol, + /// Relevant peer for event. + pub peer: PeerId, } impl<'a> BasicContext for Ctx<'a> { - fn persistent_peer_id(&self, id: PeerId) -> Option { - self.io.persistent_peer_id(id) - } + fn persistent_peer_id(&self, id: PeerId) -> Option { + self.io.persistent_peer_id(id) + } - fn request_from(&self, peer: PeerId, requests: Requests) -> Result { - self.proto.request_from(self.io, peer, requests) - } + fn request_from(&self, peer: PeerId, requests: Requests) -> Result { + self.proto.request_from(self.io, peer, requests) + } - fn make_announcement(&self, announcement: Announcement) { - self.proto.make_announcement(self.io, announcement); - } + fn make_announcement(&self, announcement: Announcement) { + self.proto.make_announcement(self.io, announcement); + } - fn disconnect_peer(&self, peer: PeerId) { - self.io.disconnect_peer(peer); - } + fn disconnect_peer(&self, peer: PeerId) { + self.io.disconnect_peer(peer); + } - fn disable_peer(&self, peer: PeerId) { - self.io.disable_peer(peer); - } + fn disable_peer(&self, peer: PeerId) { + self.io.disable_peer(peer); + } } impl<'a> EventContext for Ctx<'a> { - fn peer(&self) -> PeerId { - self.peer - } + fn peer(&self) -> PeerId { + self.peer + } - fn as_basic(&self) -> &BasicContext { - &*self - } + fn as_basic(&self) -> &BasicContext { + &*self + } } diff --git a/ethcore/light/src/net/error.rs b/ethcore/light/src/net/error.rs index b29327c60..614287e79 100644 --- a/ethcore/light/src/net/error.rs +++ b/ethcore/light/src/net/error.rs @@ -17,8 +17,9 @@ //! Defines error types and levels of punishment to use upon //! encountering. +use network; +use rlp; use std::fmt; -use {rlp, network}; /// Levels of punishment. /// @@ -27,98 +28,100 @@ use {rlp, network}; // In ascending order #[derive(Debug, PartialEq, Eq)] pub enum Punishment { - /// Perform no punishment. - None, - /// Disconnect the peer, but don't prevent them from reconnecting. - Disconnect, - /// Disconnect the peer and prevent them from reconnecting. - Disable, + /// Perform no punishment. + None, + /// Disconnect the peer, but don't prevent them from reconnecting. + Disconnect, + /// Disconnect the peer and prevent them from reconnecting. + Disable, } /// Kinds of errors which can be encountered in the course of LES. #[derive(Debug)] pub enum Error { - /// An RLP decoding error. - Rlp(rlp::DecoderError), - /// A network error. - Network(network::Error), - /// Out of credits. - NoCredits, - /// Unrecognized packet code. - UnrecognizedPacket(u8), - /// Unexpected handshake. - UnexpectedHandshake, - /// Peer on wrong network (wrong NetworkId or genesis hash) - WrongNetwork, - /// Unknown peer. - UnknownPeer, - /// Unsolicited response. - UnsolicitedResponse, - /// Bad back-reference in request. - BadBackReference, - /// Not a server. - NotServer, - /// Unsupported protocol version. - UnsupportedProtocolVersion(u8), - /// Bad protocol version. - BadProtocolVersion, - /// Peer is overburdened. - Overburdened, - /// No handler kept the peer. - RejectedByHandlers, + /// An RLP decoding error. + Rlp(rlp::DecoderError), + /// A network error. + Network(network::Error), + /// Out of credits. + NoCredits, + /// Unrecognized packet code. + UnrecognizedPacket(u8), + /// Unexpected handshake. + UnexpectedHandshake, + /// Peer on wrong network (wrong NetworkId or genesis hash) + WrongNetwork, + /// Unknown peer. + UnknownPeer, + /// Unsolicited response. + UnsolicitedResponse, + /// Bad back-reference in request. + BadBackReference, + /// Not a server. + NotServer, + /// Unsupported protocol version. + UnsupportedProtocolVersion(u8), + /// Bad protocol version. + BadProtocolVersion, + /// Peer is overburdened. + Overburdened, + /// No handler kept the peer. + RejectedByHandlers, } impl Error { - /// What level of punishment does this error warrant? - pub fn punishment(&self) -> Punishment { - match *self { - Error::Rlp(_) => Punishment::Disable, - Error::Network(_) => Punishment::None, - Error::NoCredits => Punishment::Disable, - Error::UnrecognizedPacket(_) => Punishment::Disconnect, - Error::UnexpectedHandshake => Punishment::Disconnect, - Error::WrongNetwork => Punishment::Disable, - Error::UnknownPeer => Punishment::Disconnect, - Error::UnsolicitedResponse => Punishment::Disable, - Error::BadBackReference => Punishment::Disable, - Error::NotServer => Punishment::Disable, - Error::UnsupportedProtocolVersion(_) => Punishment::Disable, - Error::BadProtocolVersion => Punishment::Disable, - Error::Overburdened => Punishment::None, - Error::RejectedByHandlers => Punishment::Disconnect, - } - } + /// What level of punishment does this error warrant? + pub fn punishment(&self) -> Punishment { + match *self { + Error::Rlp(_) => Punishment::Disable, + Error::Network(_) => Punishment::None, + Error::NoCredits => Punishment::Disable, + Error::UnrecognizedPacket(_) => Punishment::Disconnect, + Error::UnexpectedHandshake => Punishment::Disconnect, + Error::WrongNetwork => Punishment::Disable, + Error::UnknownPeer => Punishment::Disconnect, + Error::UnsolicitedResponse => Punishment::Disable, + Error::BadBackReference => Punishment::Disable, + Error::NotServer => Punishment::Disable, + Error::UnsupportedProtocolVersion(_) => Punishment::Disable, + Error::BadProtocolVersion => Punishment::Disable, + Error::Overburdened => Punishment::None, + Error::RejectedByHandlers => Punishment::Disconnect, + } + } } impl From for Error { - fn from(err: rlp::DecoderError) -> Self { - Error::Rlp(err) - } + fn from(err: rlp::DecoderError) -> Self { + Error::Rlp(err) + } } impl From for Error { - fn from(err: network::Error) -> Self { - Error::Network(err) - } + fn from(err: network::Error) -> Self { + Error::Network(err) + } } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Error::Rlp(ref err) => err.fmt(f), - Error::Network(ref err) => err.fmt(f), - Error::NoCredits => write!(f, "Out of request credits"), - Error::UnrecognizedPacket(code) => write!(f, "Unrecognized packet: 0x{:x}", code), - Error::UnexpectedHandshake => write!(f, "Unexpected handshake"), - Error::WrongNetwork => write!(f, "Wrong network"), - Error::UnknownPeer => write!(f, "Unknown peer"), - Error::UnsolicitedResponse => write!(f, "Peer provided unsolicited data"), - Error::BadBackReference => write!(f, "Bad back-reference in request."), - Error::NotServer => write!(f, "Peer not a server."), - Error::UnsupportedProtocolVersion(pv) => write!(f, "Unsupported protocol version: {}", pv), - Error::BadProtocolVersion => write!(f, "Bad protocol version in handshake"), - Error::Overburdened => write!(f, "Peer overburdened"), - Error::RejectedByHandlers => write!(f, "No handler kept this peer"), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Error::Rlp(ref err) => err.fmt(f), + Error::Network(ref err) => err.fmt(f), + Error::NoCredits => write!(f, "Out of request credits"), + Error::UnrecognizedPacket(code) => write!(f, "Unrecognized packet: 0x{:x}", code), + Error::UnexpectedHandshake => write!(f, "Unexpected handshake"), + Error::WrongNetwork => write!(f, "Wrong network"), + Error::UnknownPeer => write!(f, "Unknown peer"), + Error::UnsolicitedResponse => write!(f, "Peer provided unsolicited data"), + Error::BadBackReference => write!(f, "Bad back-reference in request."), + Error::NotServer => write!(f, "Peer not a server."), + Error::UnsupportedProtocolVersion(pv) => { + write!(f, "Unsupported protocol version: {}", pv) + } + Error::BadProtocolVersion => write!(f, "Bad protocol version in handshake"), + Error::Overburdened => write!(f, "Peer overburdened"), + Error::RejectedByHandlers => write!(f, "No handler kept this peer"), + } + } } diff --git a/ethcore/light/src/net/load_timer.rs b/ethcore/light/src/net/load_timer.rs index 9dc033c47..1736fce0f 100644 --- a/ethcore/light/src/net/load_timer.rs +++ b/ethcore/light/src/net/load_timer.rs @@ -24,15 +24,17 @@ //! length `TIME_PERIOD_MS`, with the exception that time periods where no data is //! gathered are excluded. -use std::collections::{HashMap, VecDeque}; -use std::fs::File; -use std::path::PathBuf; -use std::time::{Duration, Instant}; +use std::{ + collections::{HashMap, VecDeque}, + fs::File, + path::PathBuf, + time::{Duration, Instant}, +}; use request::{CompleteRequest, Kind}; use bincode; -use parking_lot::{RwLock, Mutex}; +use parking_lot::{Mutex, RwLock}; /// Number of time periods samples should be kept for. pub const MOVING_SAMPLE_SIZE: usize = 256; @@ -40,11 +42,11 @@ pub const MOVING_SAMPLE_SIZE: usize = 256; /// Stores rolling load timer samples. // TODO: switch to bigint if possible (FP casts aren't available) pub trait SampleStore: Send + Sync { - /// Load samples. - fn load(&self) -> HashMap>; + /// Load samples. + fn load(&self) -> HashMap>; - /// Store all samples. - fn store(&self, samples: &HashMap>); + /// Store all samples. + fn store(&self, samples: &HashMap>); } // get a hardcoded, arbitrarily determined (but intended overestimate) @@ -52,231 +54,252 @@ pub trait SampleStore: Send + Sync { // // TODO: seed this with empirical data. fn hardcoded_serve_time(kind: Kind) -> Duration { - Duration::new(0, match kind { - Kind::Headers => 500_000, - Kind::HeaderProof => 500_000, - Kind::TransactionIndex => 500_000, - Kind::Receipts => 1_000_000, - Kind::Body => 1_000_000, - Kind::Account => 1_500_000, - Kind::Storage => 2_000_000, - Kind::Code => 1_500_000, - Kind::Execution => 250, // per gas. - Kind::Signal => 500_000, - }) + Duration::new( + 0, + match kind { + Kind::Headers => 500_000, + Kind::HeaderProof => 500_000, + Kind::TransactionIndex => 500_000, + Kind::Receipts => 1_000_000, + Kind::Body => 1_000_000, + Kind::Account => 1_500_000, + Kind::Storage => 2_000_000, + Kind::Code => 1_500_000, + Kind::Execution => 250, // per gas. + Kind::Signal => 500_000, + }, + ) } /// A no-op store. pub struct NullStore; impl SampleStore for NullStore { - fn load(&self) -> HashMap> { HashMap::new() } - fn store(&self, _samples: &HashMap>) { } + fn load(&self) -> HashMap> { + HashMap::new() + } + fn store(&self, _samples: &HashMap>) {} } /// Request load distributions. pub struct LoadDistribution { - active_period: RwLock>>, - samples: RwLock>>, + active_period: RwLock>>, + samples: RwLock>>, } impl LoadDistribution { - /// Load rolling samples from the given store. - pub fn load(store: &SampleStore) -> Self { - let mut samples = store.load(); + /// Load rolling samples from the given store. + pub fn load(store: &SampleStore) -> Self { + let mut samples = store.load(); - for kind_samples in samples.values_mut() { - while kind_samples.len() > MOVING_SAMPLE_SIZE { - kind_samples.pop_front(); - } - } + for kind_samples in samples.values_mut() { + while kind_samples.len() > MOVING_SAMPLE_SIZE { + kind_samples.pop_front(); + } + } - LoadDistribution { - active_period: RwLock::new(HashMap::new()), - samples: RwLock::new(samples), - } - } + LoadDistribution { + active_period: RwLock::new(HashMap::new()), + samples: RwLock::new(samples), + } + } - /// Begin a timer. - pub fn begin_timer<'a>(&'a self, req: &CompleteRequest) -> LoadTimer<'a> { - let kind = req.kind(); - let n = match *req { - CompleteRequest::Headers(ref req) => req.max, - CompleteRequest::Execution(ref req) => req.gas.low_u64(), - _ => 1, - }; + /// Begin a timer. + pub fn begin_timer<'a>(&'a self, req: &CompleteRequest) -> LoadTimer<'a> { + let kind = req.kind(); + let n = match *req { + CompleteRequest::Headers(ref req) => req.max, + CompleteRequest::Execution(ref req) => req.gas.low_u64(), + _ => 1, + }; - LoadTimer { - start: Instant::now(), - n, - dist: self, - kind, - } - } + LoadTimer { + start: Instant::now(), + n, + dist: self, + kind, + } + } - /// Calculate EMA of load for a specific request kind. - /// If there is no data for the given request kind, no EMA will be calculated, - /// but a hardcoded time will be returned. - pub fn expected_time(&self, kind: Kind) -> Duration { - let samples = self.samples.read(); - samples.get(&kind).and_then(|s| { - if s.is_empty() { return None } + /// Calculate EMA of load for a specific request kind. + /// If there is no data for the given request kind, no EMA will be calculated, + /// but a hardcoded time will be returned. + pub fn expected_time(&self, kind: Kind) -> Duration { + let samples = self.samples.read(); + samples + .get(&kind) + .and_then(|s| { + if s.is_empty() { + return None; + } - let alpha: f64 = 1_f64 / s.len() as f64; - let start = *s.front().expect("length known to be non-zero; qed") as f64; - let ema = s.iter().skip(1).fold(start, |a, &c| { - (alpha * c as f64) + ((1.0 - alpha) * a) - }); + let alpha: f64 = 1_f64 / s.len() as f64; + let start = *s.front().expect("length known to be non-zero; qed") as f64; + let ema = s + .iter() + .skip(1) + .fold(start, |a, &c| (alpha * c as f64) + ((1.0 - alpha) * a)); - Some(Duration::from_nanos(ema as u64)) - }).unwrap_or_else(move || hardcoded_serve_time(kind)) - } + Some(Duration::from_nanos(ema as u64)) + }) + .unwrap_or_else(move || hardcoded_serve_time(kind)) + } - /// End the current time period. Provide a store to - pub fn end_period(&self, store: &SampleStore) { - let active_period = self.active_period.read(); - let mut samples = self.samples.write(); + /// End the current time period. Provide a store to + pub fn end_period(&self, store: &SampleStore) { + let active_period = self.active_period.read(); + let mut samples = self.samples.write(); - for (&kind, set) in active_period.iter() { - let (elapsed, n) = ::std::mem::replace(&mut *set.lock(), (0, 0)); - if n == 0 { continue } + for (&kind, set) in active_period.iter() { + let (elapsed, n) = ::std::mem::replace(&mut *set.lock(), (0, 0)); + if n == 0 { + continue; + } - let kind_samples = samples.entry(kind) - .or_insert_with(|| VecDeque::with_capacity(MOVING_SAMPLE_SIZE)); + let kind_samples = samples + .entry(kind) + .or_insert_with(|| VecDeque::with_capacity(MOVING_SAMPLE_SIZE)); - if kind_samples.len() == MOVING_SAMPLE_SIZE { kind_samples.pop_front(); } - kind_samples.push_back(elapsed / n); - } + if kind_samples.len() == MOVING_SAMPLE_SIZE { + kind_samples.pop_front(); + } + kind_samples.push_back(elapsed / n); + } - store.store(&*samples); - } + store.store(&*samples); + } - fn update(&self, kind: Kind, elapsed: Duration, n: u64) { - macro_rules! update_counters { - ($counters: expr) => { - $counters.0 = $counters.0.saturating_add({ elapsed.as_secs() * 1_000_000_000 + elapsed.subsec_nanos() as u64 }); - $counters.1 = $counters.1.saturating_add(n); - } - }; + fn update(&self, kind: Kind, elapsed: Duration, n: u64) { + macro_rules! update_counters { + ($counters: expr) => { + $counters.0 = $counters.0.saturating_add({ + elapsed.as_secs() * 1_000_000_000 + elapsed.subsec_nanos() as u64 + }); + $counters.1 = $counters.1.saturating_add(n); + }; + }; - { - let set = self.active_period.read(); - if let Some(counters) = set.get(&kind) { - let mut counters = counters.lock(); - update_counters!(counters); - return; - } - } + { + let set = self.active_period.read(); + if let Some(counters) = set.get(&kind) { + let mut counters = counters.lock(); + update_counters!(counters); + return; + } + } - let mut set = self.active_period.write(); - let counters = set - .entry(kind) - .or_insert_with(|| Mutex::new((0, 0))); + let mut set = self.active_period.write(); + let counters = set.entry(kind).or_insert_with(|| Mutex::new((0, 0))); - update_counters!(counters.get_mut()); - } + update_counters!(counters.get_mut()); + } } /// A timer for a single request. /// On drop, this will update the distribution. pub struct LoadTimer<'a> { - start: Instant, - n: u64, - dist: &'a LoadDistribution, - kind: Kind, + start: Instant, + n: u64, + dist: &'a LoadDistribution, + kind: Kind, } impl<'a> Drop for LoadTimer<'a> { - fn drop(&mut self) { - let elapsed = self.start.elapsed(); - self.dist.update(self.kind, elapsed, self.n); - } + fn drop(&mut self) { + let elapsed = self.start.elapsed(); + self.dist.update(self.kind, elapsed, self.n); + } } /// A store which writes directly to a file. pub struct FileStore(pub PathBuf); impl SampleStore for FileStore { - fn load(&self) -> HashMap> { - File::open(&self.0) - .map_err(|e| Box::new(bincode::ErrorKind::IoError(e))) - .and_then(|mut file| bincode::deserialize_from(&mut file, bincode::Infinite)) - .unwrap_or_else(|_| HashMap::new()) - } + fn load(&self) -> HashMap> { + File::open(&self.0) + .map_err(|e| Box::new(bincode::ErrorKind::IoError(e))) + .and_then(|mut file| bincode::deserialize_from(&mut file, bincode::Infinite)) + .unwrap_or_else(|_| HashMap::new()) + } - fn store(&self, samples: &HashMap>) { - let res = File::create(&self.0) - .map_err(|e| Box::new(bincode::ErrorKind::IoError(e))) - .and_then(|mut file| bincode::serialize_into(&mut file, samples, bincode::Infinite)); + fn store(&self, samples: &HashMap>) { + let res = File::create(&self.0) + .map_err(|e| Box::new(bincode::ErrorKind::IoError(e))) + .and_then(|mut file| bincode::serialize_into(&mut file, samples, bincode::Infinite)); - if let Err(e) = res { - warn!(target: "pip", "Error writing light request timing samples to file: {}", e); - } - } + if let Err(e) = res { + warn!(target: "pip", "Error writing light request timing samples to file: {}", e); + } + } } #[cfg(test)] mod tests { - use super::*; - use request::Kind; + use super::*; + use request::Kind; - #[test] - fn hardcoded_before_data() { - let dist = LoadDistribution::load(&NullStore); - assert_eq!(dist.expected_time(Kind::Headers), hardcoded_serve_time(Kind::Headers)); + #[test] + fn hardcoded_before_data() { + let dist = LoadDistribution::load(&NullStore); + assert_eq!( + dist.expected_time(Kind::Headers), + hardcoded_serve_time(Kind::Headers) + ); - dist.update(Kind::Headers, Duration::new(0, 100_000), 100); - dist.end_period(&NullStore); + dist.update(Kind::Headers, Duration::new(0, 100_000), 100); + dist.end_period(&NullStore); - assert_eq!(dist.expected_time(Kind::Headers), Duration::new(0, 1000)); - } + assert_eq!(dist.expected_time(Kind::Headers), Duration::new(0, 1000)); + } - #[test] - fn moving_average() { - let dist = LoadDistribution::load(&NullStore); + #[test] + fn moving_average() { + let dist = LoadDistribution::load(&NullStore); - let mut sum = 0; + let mut sum = 0; - for (i, x) in (0..10).map(|x| x * 10_000).enumerate() { - dist.update(Kind::Headers, Duration::new(0, x), 1); - dist.end_period(&NullStore); + for (i, x) in (0..10).map(|x| x * 10_000).enumerate() { + dist.update(Kind::Headers, Duration::new(0, x), 1); + dist.end_period(&NullStore); - sum += x; - if i == 0 { continue } + sum += x; + if i == 0 { + continue; + } - let moving_average = dist.expected_time(Kind::Headers); + let moving_average = dist.expected_time(Kind::Headers); - // should be weighted below the maximum entry. - let arith_average = (sum as f64 / (i + 1) as f64) as u32; - assert!(moving_average < Duration::new(0, x)); + // should be weighted below the maximum entry. + let arith_average = (sum as f64 / (i + 1) as f64) as u32; + assert!(moving_average < Duration::new(0, x)); - // when there are only 2 entries, they should be equal due to choice of - // ALPHA = 1/N. - // otherwise, the weight should be below the arithmetic mean because the much - // smaller previous values are discounted less. - if i == 1 { - assert_eq!(moving_average, Duration::new(0, arith_average)); - } else { - assert!(moving_average < Duration::new(0, arith_average)) - } - } - } + // when there are only 2 entries, they should be equal due to choice of + // ALPHA = 1/N. + // otherwise, the weight should be below the arithmetic mean because the much + // smaller previous values are discounted less. + if i == 1 { + assert_eq!(moving_average, Duration::new(0, arith_average)); + } else { + assert!(moving_average < Duration::new(0, arith_average)) + } + } + } - #[test] - fn file_store() { - let tempdir = ::tempdir::TempDir::new("").unwrap(); - let path = tempdir.path().join("file"); - let store = FileStore(path); + #[test] + fn file_store() { + let tempdir = ::tempdir::TempDir::new("").unwrap(); + let path = tempdir.path().join("file"); + let store = FileStore(path); - let mut samples = store.load(); - assert!(samples.is_empty()); - samples.insert(Kind::Headers, vec![5, 2, 7, 2, 2, 4].into()); - samples.insert(Kind::Execution, vec![1, 1, 100, 250].into()); + let mut samples = store.load(); + assert!(samples.is_empty()); + samples.insert(Kind::Headers, vec![5, 2, 7, 2, 2, 4].into()); + samples.insert(Kind::Execution, vec![1, 1, 100, 250].into()); - store.store(&samples); + store.store(&samples); - let dup = store.load(); + let dup = store.load(); - assert_eq!(samples, dup); - } + assert_eq!(samples, dup); + } } diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 2fd6c340e..632156418 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -22,40 +22,48 @@ use common_types::transaction::UnverifiedTransaction; use ethereum_types::{H256, U256}; use io::TimerToken; use kvdb::DBValue; -use network::{NetworkProtocolHandler, NetworkContext, PeerId}; +use network::{NetworkContext, NetworkProtocolHandler, PeerId}; use parking_lot::{Mutex, RwLock}; use provider::Provider; -use request::{Request, NetworkRequests as Requests, Response}; -use rlp::{RlpStream, Rlp}; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::fmt; -use std::ops::{BitOr, BitAnd, Not}; -use std::sync::Arc; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::time::{Duration, Instant}; +use request::{NetworkRequests as Requests, Request, Response}; +use rlp::{Rlp, RlpStream}; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + fmt, + ops::{BitAnd, BitOr, Not}, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::{Duration, Instant}, +}; -use self::request_credits::{Credits, FlowParams}; -use self::context::{Ctx, TickCtx}; -use self::error::Punishment; -use self::load_timer::{LoadDistribution, NullStore, MOVING_SAMPLE_SIZE}; -use self::request_set::RequestSet; -use self::id_guard::IdGuard; +use self::{ + context::{Ctx, TickCtx}, + error::Punishment, + id_guard::IdGuard, + load_timer::{LoadDistribution, NullStore, MOVING_SAMPLE_SIZE}, + request_credits::{Credits, FlowParams}, + request_set::RequestSet, +}; mod context; mod error; mod load_timer; -mod status; mod request_set; +mod status; #[cfg(test)] mod tests; pub mod request_credits; -pub use self::context::{BasicContext, EventContext, IoContext}; -pub use self::error::Error; -pub use self::load_timer::{SampleStore, FileStore}; -pub use self::status::{Status, Capabilities, Announcement}; +pub use self::{ + context::{BasicContext, EventContext, IoContext}, + error::Error, + load_timer::{FileStore, SampleStore}, + status::{Announcement, Capabilities, Status}, +}; const TIMEOUT: TimerToken = 0; const TIMEOUT_INTERVAL: Duration = Duration::from_secs(1); @@ -86,54 +94,52 @@ const UPDATE_INTERVAL: Duration = Duration::from_millis(5000); const PACKET_COUNT_V1: u8 = 9; /// Supported protocol versions. -pub const PROTOCOL_VERSIONS: &[(u8, u8)] = &[ - (1, PACKET_COUNT_V1), -]; +pub const PROTOCOL_VERSIONS: &[(u8, u8)] = &[(1, PACKET_COUNT_V1)]; /// Max protocol version. pub const MAX_PROTOCOL_VERSION: u8 = 1; // packet ID definitions. mod packet { - // the status packet. - pub const STATUS: u8 = 0x00; + // the status packet. + pub const STATUS: u8 = 0x00; - // announcement of new block hashes or capabilities. - pub const ANNOUNCE: u8 = 0x01; + // announcement of new block hashes or capabilities. + pub const ANNOUNCE: u8 = 0x01; - // request and response. - pub const REQUEST: u8 = 0x02; - pub const RESPONSE: u8 = 0x03; + // request and response. + pub const REQUEST: u8 = 0x02; + pub const RESPONSE: u8 = 0x03; - // request credits update and acknowledgement. - pub const UPDATE_CREDITS: u8 = 0x04; - pub const ACKNOWLEDGE_UPDATE: u8 = 0x05; + // request credits update and acknowledgement. + pub const UPDATE_CREDITS: u8 = 0x04; + pub const ACKNOWLEDGE_UPDATE: u8 = 0x05; - // relay transactions to peers. - pub const SEND_TRANSACTIONS: u8 = 0x06; + // relay transactions to peers. + pub const SEND_TRANSACTIONS: u8 = 0x06; - // two packets were previously meant to be reserved for epoch proofs. - // these have since been moved to requests. + // two packets were previously meant to be reserved for epoch proofs. + // these have since been moved to requests. } // timeouts for different kinds of requests. all values are in milliseconds. mod timeout { - use std::time::Duration; + use std::time::Duration; - pub const HANDSHAKE: Duration = Duration::from_millis(4_000); - pub const ACKNOWLEDGE_UPDATE: Duration = Duration::from_millis(5_000); - pub const BASE: u64 = 2_500; // base timeout for packet. + pub const HANDSHAKE: Duration = Duration::from_millis(4_000); + pub const ACKNOWLEDGE_UPDATE: Duration = Duration::from_millis(5_000); + pub const BASE: u64 = 2_500; // base timeout for packet. - // timeouts per request within packet. - pub const HEADERS: u64 = 250; // per header? - pub const TRANSACTION_INDEX: u64 = 100; - pub const BODY: u64 = 50; - pub const RECEIPT: u64 = 50; - pub const PROOF: u64 = 100; // state proof - pub const CONTRACT_CODE: u64 = 100; - pub const HEADER_PROOF: u64 = 100; - pub const TRANSACTION_PROOF: u64 = 1000; // per gas? - pub const EPOCH_SIGNAL: u64 = 200; + // timeouts per request within packet. + pub const HEADERS: u64 = 250; // per header? + pub const TRANSACTION_INDEX: u64 = 100; + pub const BODY: u64 = 50; + pub const RECEIPT: u64 = 50; + pub const PROOF: u64 = 100; // state proof + pub const CONTRACT_CODE: u64 = 100; + pub const HEADER_PROOF: u64 = 100; + pub const TRANSACTION_PROOF: u64 = 1000; // per gas? + pub const EPOCH_SIGNAL: u64 = 200; } /// A request id. @@ -146,81 +152,81 @@ pub struct ReqId(usize); pub struct ReqId(pub usize); impl fmt::Display for ReqId { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Request #{}", self.0) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Request #{}", self.0) + } } // A pending peer: one we've sent our status to but // may not have received one for. struct PendingPeer { - sent_head: H256, - last_update: Instant, + sent_head: H256, + last_update: Instant, } /// Relevant data to each peer. Not accessible publicly, only `pub` due to /// limitations of the privacy system. pub struct Peer { - local_credits: Credits, // their credits relative to us - status: Status, - capabilities: Capabilities, - remote_flow: Option<(Credits, FlowParams)>, - sent_head: H256, // last chain head we've given them. - last_update: Instant, - pending_requests: RequestSet, - failed_requests: Vec, - propagated_transactions: HashSet, - skip_update: bool, - local_flow: Arc, - awaiting_acknowledge: Option<(Instant, Arc)>, + local_credits: Credits, // their credits relative to us + status: Status, + capabilities: Capabilities, + remote_flow: Option<(Credits, FlowParams)>, + sent_head: H256, // last chain head we've given them. + last_update: Instant, + pending_requests: RequestSet, + failed_requests: Vec, + propagated_transactions: HashSet, + skip_update: bool, + local_flow: Arc, + awaiting_acknowledge: Option<(Instant, Arc)>, } /// Whether or not a peer was kept by a handler #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum PeerStatus { - /// The peer was kept - Kept, - /// The peer was not kept - Unkept, + /// The peer was kept + Kept, + /// The peer was not kept + Unkept, } impl Not for PeerStatus { - type Output = Self; + type Output = Self; - fn not(self) -> Self { - use self::PeerStatus::*; + fn not(self) -> Self { + use self::PeerStatus::*; - match self { - Kept => Unkept, - Unkept => Kept, - } - } + match self { + Kept => Unkept, + Unkept => Kept, + } + } } impl BitAnd for PeerStatus { - type Output = Self; + type Output = Self; - fn bitand(self, other: Self) -> Self { - use self::PeerStatus::*; + fn bitand(self, other: Self) -> Self { + use self::PeerStatus::*; - match (self, other) { - (Kept, Kept) => Kept, - _ => Unkept, - } - } + match (self, other) { + (Kept, Kept) => Kept, + _ => Unkept, + } + } } impl BitOr for PeerStatus { - type Output = Self; + type Output = Self; - fn bitor(self, other: Self) -> Self { - use self::PeerStatus::*; + fn bitor(self, other: Self) -> Self { + use self::PeerStatus::*; - match (self, other) { - (_, Kept) | (Kept, _) => Kept, - _ => Unkept, - } - } + match (self, other) { + (_, Kept) | (Kept, _) => Kept, + _ => Unkept, + } + } } /// A light protocol event handler. @@ -233,64 +239,66 @@ impl BitOr for PeerStatus { /// Response handlers are not given a copy of the original request; it is assumed /// that relevant data will be stored by interested handlers. pub trait Handler: Send + Sync { - /// Called when a peer connects. - fn on_connect( - &self, - _ctx: &EventContext, - _status: &Status, - _capabilities: &Capabilities - ) -> PeerStatus { PeerStatus::Kept } - /// Called when a peer disconnects, with a list of unfulfilled request IDs as - /// of yet. - fn on_disconnect(&self, _ctx: &EventContext, _unfulfilled: &[ReqId]) { } - /// Called when a peer makes an announcement. - fn on_announcement(&self, _ctx: &EventContext, _announcement: &Announcement) { } - /// Called when a peer requests relay of some transactions. - fn on_transactions(&self, _ctx: &EventContext, _relay: &[UnverifiedTransaction]) { } - /// Called when a peer responds to requests. - /// Responses not guaranteed to contain valid data and are not yet checked against - /// the requests they correspond to. - fn on_responses(&self, _ctx: &EventContext, _req_id: ReqId, _responses: &[Response]) { } - /// Called when a peer responds with a transaction proof. Each proof is a vector of state items. - fn on_transaction_proof(&self, _ctx: &EventContext, _req_id: ReqId, _state_items: &[DBValue]) { } - /// Called to "tick" the handler periodically. - fn tick(&self, _ctx: &BasicContext) { } - /// Called on abort. This signals to handlers that they should clean up - /// and ignore peers. - // TODO: coreresponding `on_activate`? - fn on_abort(&self) { } + /// Called when a peer connects. + fn on_connect( + &self, + _ctx: &EventContext, + _status: &Status, + _capabilities: &Capabilities, + ) -> PeerStatus { + PeerStatus::Kept + } + /// Called when a peer disconnects, with a list of unfulfilled request IDs as + /// of yet. + fn on_disconnect(&self, _ctx: &EventContext, _unfulfilled: &[ReqId]) {} + /// Called when a peer makes an announcement. + fn on_announcement(&self, _ctx: &EventContext, _announcement: &Announcement) {} + /// Called when a peer requests relay of some transactions. + fn on_transactions(&self, _ctx: &EventContext, _relay: &[UnverifiedTransaction]) {} + /// Called when a peer responds to requests. + /// Responses not guaranteed to contain valid data and are not yet checked against + /// the requests they correspond to. + fn on_responses(&self, _ctx: &EventContext, _req_id: ReqId, _responses: &[Response]) {} + /// Called when a peer responds with a transaction proof. Each proof is a vector of state items. + fn on_transaction_proof(&self, _ctx: &EventContext, _req_id: ReqId, _state_items: &[DBValue]) {} + /// Called to "tick" the handler periodically. + fn tick(&self, _ctx: &BasicContext) {} + /// Called on abort. This signals to handlers that they should clean up + /// and ignore peers. + // TODO: coreresponding `on_activate`? + fn on_abort(&self) {} } /// Configuration. pub struct Config { - /// How many stored seconds of credits peers should be able to accumulate. - pub max_stored_seconds: u64, - /// The network config median peers (used as default peer count) - pub median_peers: f64, + /// How many stored seconds of credits peers should be able to accumulate. + pub max_stored_seconds: u64, + /// The network config median peers (used as default peer count) + pub median_peers: f64, } impl Default for Config { - fn default() -> Self { - const MEDIAN_PEERS: f64 = 25.0; - const MAX_ACCUMULATED: u64 = 60 * 5; // only charge for 5 minutes. + fn default() -> Self { + const MEDIAN_PEERS: f64 = 25.0; + const MAX_ACCUMULATED: u64 = 60 * 5; // only charge for 5 minutes. - Config { - max_stored_seconds: MAX_ACCUMULATED, - median_peers: MEDIAN_PEERS, - } - } + Config { + max_stored_seconds: MAX_ACCUMULATED, + median_peers: MEDIAN_PEERS, + } + } } /// Protocol initialization parameters. pub struct Params { - /// Network id. - pub network_id: u64, - /// Config. - pub config: Config, - /// Initial capabilities. - pub capabilities: Capabilities, - /// The sample store (`None` if data shouldn't persist between runs). - pub sample_store: Option>, + /// Network id. + pub network_id: u64, + /// Config. + pub config: Config, + /// Initial capabilities. + pub capabilities: Capabilities, + /// The sample store (`None` if data shouldn't persist between runs). + pub sample_store: Option>, } /// Type alias for convenience. @@ -298,86 +306,90 @@ pub type PeerMap = HashMap>; mod id_guard { - use network::PeerId; - use parking_lot::RwLockReadGuard; + use network::PeerId; + use parking_lot::RwLockReadGuard; - use super::{PeerMap, ReqId}; + use super::{PeerMap, ReqId}; - // Guards success or failure of given request. - // On drop, inserts the req_id into the "failed requests" - // set for the peer unless defused. In separate module to enforce correct usage. - pub struct IdGuard<'a> { - peers: RwLockReadGuard<'a, PeerMap>, - peer_id: PeerId, - req_id: ReqId, - active: bool, - } + // Guards success or failure of given request. + // On drop, inserts the req_id into the "failed requests" + // set for the peer unless defused. In separate module to enforce correct usage. + pub struct IdGuard<'a> { + peers: RwLockReadGuard<'a, PeerMap>, + peer_id: PeerId, + req_id: ReqId, + active: bool, + } - impl<'a> IdGuard<'a> { - /// Create a new `IdGuard`, which will prevent access of the inner ReqId - /// (for forming responses, triggering handlers) until defused - pub fn new(peers: RwLockReadGuard<'a, PeerMap>, peer_id: PeerId, req_id: ReqId) -> Self { - IdGuard { - peers, - peer_id, - req_id, - active: true, - } - } + impl<'a> IdGuard<'a> { + /// Create a new `IdGuard`, which will prevent access of the inner ReqId + /// (for forming responses, triggering handlers) until defused + pub fn new(peers: RwLockReadGuard<'a, PeerMap>, peer_id: PeerId, req_id: ReqId) -> Self { + IdGuard { + peers, + peer_id, + req_id, + active: true, + } + } - /// Defuse the guard, signalling that the request has been successfully decoded. - pub fn defuse(mut self) -> ReqId { - // can't use the mem::forget trick here since we need the - // read guard to drop. - self.active = false; - self.req_id - } - } + /// Defuse the guard, signalling that the request has been successfully decoded. + pub fn defuse(mut self) -> ReqId { + // can't use the mem::forget trick here since we need the + // read guard to drop. + self.active = false; + self.req_id + } + } - impl<'a> Drop for IdGuard<'a> { - fn drop(&mut self) { - if !self.active { return } - if let Some(p) = self.peers.get(&self.peer_id) { - p.lock().failed_requests.push(self.req_id); - } - } - } + impl<'a> Drop for IdGuard<'a> { + fn drop(&mut self) { + if !self.active { + return; + } + if let Some(p) = self.peers.get(&self.peer_id) { + p.lock().failed_requests.push(self.req_id); + } + } + } } /// Provides various statistics that could /// be used to compute costs pub struct Statistics { - /// Samples of peer count - peer_counts: VecDeque, + /// Samples of peer count + peer_counts: VecDeque, } impl Statistics { - /// Create a new Statistics instance - pub fn new() -> Self { - Statistics { - peer_counts: VecDeque::with_capacity(MOVING_SAMPLE_SIZE), - } - } + /// Create a new Statistics instance + pub fn new() -> Self { + Statistics { + peer_counts: VecDeque::with_capacity(MOVING_SAMPLE_SIZE), + } + } - /// Add a new peer_count sample - pub fn add_peer_count(&mut self, peer_count: usize) { - while self.peer_counts.len() >= MOVING_SAMPLE_SIZE { - self.peer_counts.pop_front(); - } - self.peer_counts.push_back(peer_count); - } + /// Add a new peer_count sample + pub fn add_peer_count(&mut self, peer_count: usize) { + while self.peer_counts.len() >= MOVING_SAMPLE_SIZE { + self.peer_counts.pop_front(); + } + self.peer_counts.push_back(peer_count); + } - /// Get the average peer count from previous samples. Is always >= 1.0 - pub fn avg_peer_count(&self) -> f64 { - let len = self.peer_counts.len(); - if len == 0 { - return 1.0; - } - let avg = self.peer_counts.iter() - .fold(0, |sum: u32, &v| sum.saturating_add(v as u32)) as f64 - / len as f64; - avg.max(1.0) - } + /// Get the average peer count from previous samples. Is always >= 1.0 + pub fn avg_peer_count(&self) -> f64 { + let len = self.peer_counts.len(); + if len == 0 { + return 1.0; + } + let avg = self + .peer_counts + .iter() + .fold(0, |sum: u32, &v| sum.saturating_add(v as u32)) as f64 + / len as f64; + avg.max(1.0) + } } /// This is an implementation of the light ethereum network protocol, abstracted @@ -391,158 +403,172 @@ impl Statistics { // Locks must be acquired in the order declared, and when holding a read lock // on the peers, only one peer may be held at a time. pub struct LightProtocol { - provider: Arc, - config: Config, - genesis_hash: H256, - network_id: u64, - pending_peers: RwLock>, - peers: RwLock, - capabilities: RwLock, - flow_params: RwLock>, - free_flow_params: Arc, - handlers: Vec>, - req_id: AtomicUsize, - sample_store: Box, - load_distribution: LoadDistribution, - statistics: RwLock, + provider: Arc, + config: Config, + genesis_hash: H256, + network_id: u64, + pending_peers: RwLock>, + peers: RwLock, + capabilities: RwLock, + flow_params: RwLock>, + free_flow_params: Arc, + handlers: Vec>, + req_id: AtomicUsize, + sample_store: Box, + load_distribution: LoadDistribution, + statistics: RwLock, } impl LightProtocol { - /// Create a new instance of the protocol manager. - pub fn new(provider: Arc, params: Params) -> Self { - debug!(target: "pip", "Initializing light protocol handler"); + /// Create a new instance of the protocol manager. + pub fn new(provider: Arc, params: Params) -> Self { + debug!(target: "pip", "Initializing light protocol handler"); - let genesis_hash = provider.chain_info().genesis_hash; - let sample_store = params.sample_store.unwrap_or_else(|| Box::new(NullStore)); - let load_distribution = LoadDistribution::load(&*sample_store); - // Default load share relative to median peers - let load_share = MAX_LIGHTSERV_LOAD / params.config.median_peers; - let flow_params = FlowParams::from_request_times( - |kind| load_distribution.expected_time(kind), - load_share, - Duration::from_secs(params.config.max_stored_seconds), - ); + let genesis_hash = provider.chain_info().genesis_hash; + let sample_store = params.sample_store.unwrap_or_else(|| Box::new(NullStore)); + let load_distribution = LoadDistribution::load(&*sample_store); + // Default load share relative to median peers + let load_share = MAX_LIGHTSERV_LOAD / params.config.median_peers; + let flow_params = FlowParams::from_request_times( + |kind| load_distribution.expected_time(kind), + load_share, + Duration::from_secs(params.config.max_stored_seconds), + ); - LightProtocol { - provider, - config: params.config, - genesis_hash, - network_id: params.network_id, - pending_peers: RwLock::new(HashMap::new()), - peers: RwLock::new(HashMap::new()), - capabilities: RwLock::new(params.capabilities), - flow_params: RwLock::new(Arc::new(flow_params)), - free_flow_params: Arc::new(FlowParams::free()), - handlers: Vec::new(), - req_id: AtomicUsize::new(0), - sample_store, - load_distribution, - statistics: RwLock::new(Statistics::new()), - } - } + LightProtocol { + provider, + config: params.config, + genesis_hash, + network_id: params.network_id, + pending_peers: RwLock::new(HashMap::new()), + peers: RwLock::new(HashMap::new()), + capabilities: RwLock::new(params.capabilities), + flow_params: RwLock::new(Arc::new(flow_params)), + free_flow_params: Arc::new(FlowParams::free()), + handlers: Vec::new(), + req_id: AtomicUsize::new(0), + sample_store, + load_distribution, + statistics: RwLock::new(Statistics::new()), + } + } - /// Attempt to get peer status. - pub fn peer_status(&self, peer: PeerId) -> Option { - self.peers.read().get(&peer) - .map(|peer| peer.lock().status.clone()) - } + /// Attempt to get peer status. + pub fn peer_status(&self, peer: PeerId) -> Option { + self.peers + .read() + .get(&peer) + .map(|peer| peer.lock().status.clone()) + } - /// Get number of (connected, active) peers. - pub fn peer_count(&self) -> (usize, usize) { - let num_pending = self.pending_peers.read().len(); - let peers = self.peers.read(); - ( - num_pending + peers.len(), - peers.values().filter(|p| !p.lock().pending_requests.is_empty()).count(), - ) - } + /// Get number of (connected, active) peers. + pub fn peer_count(&self) -> (usize, usize) { + let num_pending = self.pending_peers.read().len(); + let peers = self.peers.read(); + ( + num_pending + peers.len(), + peers + .values() + .filter(|p| !p.lock().pending_requests.is_empty()) + .count(), + ) + } - /// Get the number of active light peers downloading from the - /// node - pub fn leecher_count(&self) -> usize { - let credit_limit = *self.flow_params.read().limit(); - // Count the number of peers that used some credit - self.peers.read().iter() - .filter(|(_, p)| p.lock().local_credits.current() < credit_limit) - .count() - } + /// Get the number of active light peers downloading from the + /// node + pub fn leecher_count(&self) -> usize { + let credit_limit = *self.flow_params.read().limit(); + // Count the number of peers that used some credit + self.peers + .read() + .iter() + .filter(|(_, p)| p.lock().local_credits.current() < credit_limit) + .count() + } - /// Make a request to a peer. - /// - /// Fails on: nonexistent peer, network error, peer not server, - /// insufficient credits. Does not check capabilities before sending. - /// On success, returns a request id which can later be coordinated - /// with an event. - pub fn request_from(&self, io: &IoContext, peer_id: PeerId, requests: Requests) -> Result { - let peers = self.peers.read(); - let peer = match peers.get(&peer_id) { - Some(peer) => peer, - None => return Err(Error::UnknownPeer), - }; + /// Make a request to a peer. + /// + /// Fails on: nonexistent peer, network error, peer not server, + /// insufficient credits. Does not check capabilities before sending. + /// On success, returns a request id which can later be coordinated + /// with an event. + pub fn request_from( + &self, + io: &IoContext, + peer_id: PeerId, + requests: Requests, + ) -> Result { + let peers = self.peers.read(); + let peer = match peers.get(&peer_id) { + Some(peer) => peer, + None => return Err(Error::UnknownPeer), + }; - let mut peer = peer.lock(); - let peer = &mut *peer; - match peer.remote_flow { - None => Err(Error::NotServer), - Some((ref mut creds, ref params)) => { - // apply recharge to credits if there's no pending requests. - if peer.pending_requests.is_empty() { - params.recharge(creds); - } + let mut peer = peer.lock(); + let peer = &mut *peer; + match peer.remote_flow { + None => Err(Error::NotServer), + Some((ref mut creds, ref params)) => { + // apply recharge to credits if there's no pending requests. + if peer.pending_requests.is_empty() { + params.recharge(creds); + } - // compute and deduct cost. - let pre_creds = creds.current(); - let cost = match params.compute_cost_multi(requests.requests()) { - Some(cost) => cost, - None => return Err(Error::NotServer), - }; + // compute and deduct cost. + let pre_creds = creds.current(); + let cost = match params.compute_cost_multi(requests.requests()) { + Some(cost) => cost, + None => return Err(Error::NotServer), + }; - creds.deduct_cost(cost)?; + creds.deduct_cost(cost)?; - trace!(target: "pip", "requesting from peer {}. Cost: {}; Available: {}", + trace!(target: "pip", "requesting from peer {}. Cost: {}; Available: {}", peer_id, cost, pre_creds); - let req_id = ReqId(self.req_id.fetch_add(1, Ordering::SeqCst)); - io.send(peer_id, packet::REQUEST, { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id.0).append_list(&requests.requests()); - stream.out() - }); + let req_id = ReqId(self.req_id.fetch_add(1, Ordering::SeqCst)); + io.send(peer_id, packet::REQUEST, { + let mut stream = RlpStream::new_list(2); + stream.append(&req_id.0).append_list(&requests.requests()); + stream.out() + }); - // begin timeout. - peer.pending_requests.insert(req_id, requests, cost, Instant::now()); - Ok(req_id) - } - } - } + // begin timeout. + peer.pending_requests + .insert(req_id, requests, cost, Instant::now()); + Ok(req_id) + } + } + } - /// Make an announcement of new chain head and capabilities to all peers. - /// The announcement is expected to be valid. - pub fn make_announcement(&self, io: &IoContext, mut announcement: Announcement) { - let mut reorgs_map = HashMap::new(); - let now = Instant::now(); + /// Make an announcement of new chain head and capabilities to all peers. + /// The announcement is expected to be valid. + pub fn make_announcement(&self, io: &IoContext, mut announcement: Announcement) { + let mut reorgs_map = HashMap::new(); + let now = Instant::now(); - // update stored capabilities - self.capabilities.write().update_from(&announcement); + // update stored capabilities + self.capabilities.write().update_from(&announcement); - // calculate reorg info and send packets - for (peer_id, peer_info) in self.peers.read().iter() { - let mut peer_info = peer_info.lock(); + // calculate reorg info and send packets + for (peer_id, peer_info) in self.peers.read().iter() { + let mut peer_info = peer_info.lock(); - // TODO: "urgent" announcements like new blocks? - // the timer approach will skip 1 (possibly 2) in rare occasions. - if peer_info.sent_head == announcement.head_hash || + // TODO: "urgent" announcements like new blocks? + // the timer approach will skip 1 (possibly 2) in rare occasions. + if peer_info.sent_head == announcement.head_hash || peer_info.status.head_num >= announcement.head_num || // fix for underflow reported in // https://github.com/paritytech/parity-ethereum/issues/10419 now < peer_info.last_update || - now - peer_info.last_update < UPDATE_INTERVAL { - continue - } + now - peer_info.last_update < UPDATE_INTERVAL + { + continue; + } - peer_info.last_update = now; + peer_info.last_update = now; - let reorg_depth = reorgs_map.entry(peer_info.sent_head) + let reorg_depth = reorgs_map.entry(peer_info.sent_head) .or_insert_with(|| { match self.provider.reorg_depth(&announcement.head_hash, &peer_info.sent_head) { Some(depth) => depth, @@ -556,644 +582,717 @@ impl LightProtocol { } }); - peer_info.sent_head = announcement.head_hash; - announcement.reorg_depth = *reorg_depth; + peer_info.sent_head = announcement.head_hash; + announcement.reorg_depth = *reorg_depth; - io.send(*peer_id, packet::ANNOUNCE, status::write_announcement(&announcement)); - } - } + io.send( + *peer_id, + packet::ANNOUNCE, + status::write_announcement(&announcement), + ); + } + } - /// Add an event handler. - /// - /// These are intended to be added when the protocol structure - /// is initialized as a means of customizing its behavior, - /// and dispatching requests immediately upon events. - pub fn add_handler(&mut self, handler: Arc) { - self.handlers.push(handler); - } + /// Add an event handler. + /// + /// These are intended to be added when the protocol structure + /// is initialized as a means of customizing its behavior, + /// and dispatching requests immediately upon events. + pub fn add_handler(&mut self, handler: Arc) { + self.handlers.push(handler); + } - /// Signal to handlers that network activity is being aborted - /// and clear peer data. - pub fn abort(&self) { - for handler in &self.handlers { - handler.on_abort(); - } + /// Signal to handlers that network activity is being aborted + /// and clear peer data. + pub fn abort(&self) { + for handler in &self.handlers { + handler.on_abort(); + } - // acquire in order and hold. - let mut pending_peers = self.pending_peers.write(); - let mut peers = self.peers.write(); + // acquire in order and hold. + let mut pending_peers = self.pending_peers.write(); + let mut peers = self.peers.write(); - pending_peers.clear(); - peers.clear(); - } + pending_peers.clear(); + peers.clear(); + } - // Does the common pre-verification of responses before the response itself - // is actually decoded: - // - check whether peer exists - // - check whether request was made - // - check whether request kinds match - fn pre_verify_response(&self, peer: PeerId, raw: &Rlp) -> Result { - let req_id = ReqId(raw.val_at(0)?); - let cur_credits: U256 = raw.val_at(1)?; + // Does the common pre-verification of responses before the response itself + // is actually decoded: + // - check whether peer exists + // - check whether request was made + // - check whether request kinds match + fn pre_verify_response(&self, peer: PeerId, raw: &Rlp) -> Result { + let req_id = ReqId(raw.val_at(0)?); + let cur_credits: U256 = raw.val_at(1)?; - trace!(target: "pip", "pre-verifying response for {} from peer {}", req_id, peer); + trace!(target: "pip", "pre-verifying response for {} from peer {}", req_id, peer); - let peers = self.peers.read(); - let res = match peers.get(&peer) { - Some(peer_info) => { - let mut peer_info = peer_info.lock(); - let peer_info: &mut Peer = &mut *peer_info; - let req_info = peer_info.pending_requests.remove(req_id, Instant::now()); - let last_batched = peer_info.pending_requests.is_empty(); - let flow_info = peer_info.remote_flow.as_mut(); + let peers = self.peers.read(); + let res = match peers.get(&peer) { + Some(peer_info) => { + let mut peer_info = peer_info.lock(); + let peer_info: &mut Peer = &mut *peer_info; + let req_info = peer_info.pending_requests.remove(req_id, Instant::now()); + let last_batched = peer_info.pending_requests.is_empty(); + let flow_info = peer_info.remote_flow.as_mut(); - match (req_info, flow_info) { - (Some(_), Some(flow_info)) => { - let &mut (ref mut c, ref mut flow) = flow_info; + match (req_info, flow_info) { + (Some(_), Some(flow_info)) => { + let &mut (ref mut c, ref mut flow) = flow_info; - // only update if the cumulative cost of the request set is zero. - // and this response wasn't from before request costs were updated. - if !peer_info.skip_update && last_batched { - let actual_credits = ::std::cmp::min(cur_credits, *flow.limit()); - c.update_to(actual_credits); - } + // only update if the cumulative cost of the request set is zero. + // and this response wasn't from before request costs were updated. + if !peer_info.skip_update && last_batched { + let actual_credits = ::std::cmp::min(cur_credits, *flow.limit()); + c.update_to(actual_credits); + } - if last_batched { peer_info.skip_update = false } + if last_batched { + peer_info.skip_update = false + } - Ok(()) - } - (None, _) => Err(Error::UnsolicitedResponse), - (_, None) => Err(Error::NotServer), // really should be impossible. - } - } - None => Err(Error::UnknownPeer), // probably only occurs in a race of some kind. - }; + Ok(()) + } + (None, _) => Err(Error::UnsolicitedResponse), + (_, None) => Err(Error::NotServer), // really should be impossible. + } + } + None => Err(Error::UnknownPeer), // probably only occurs in a race of some kind. + }; - res.map(|_| IdGuard::new(peers, peer, req_id)) - } + res.map(|_| IdGuard::new(peers, peer, req_id)) + } - /// Handle a packet using the given io context. - /// Packet data is _untrusted_, which means that invalid data won't lead to - /// issues. - pub fn handle_packet(&self, io: &IoContext, peer: PeerId, packet_id: u8, data: &[u8]) { - let rlp = Rlp::new(data); + /// Handle a packet using the given io context. + /// Packet data is _untrusted_, which means that invalid data won't lead to + /// issues. + pub fn handle_packet(&self, io: &IoContext, peer: PeerId, packet_id: u8, data: &[u8]) { + let rlp = Rlp::new(data); - trace!(target: "pip", "Incoming packet {} from peer {}", packet_id, peer); + trace!(target: "pip", "Incoming packet {} from peer {}", packet_id, peer); - // handle the packet - let res = match packet_id { - packet::STATUS => self.status(peer, io, &rlp), - packet::ANNOUNCE => self.announcement(peer, io, &rlp), + // handle the packet + let res = match packet_id { + packet::STATUS => self.status(peer, io, &rlp), + packet::ANNOUNCE => self.announcement(peer, io, &rlp), - packet::REQUEST => self.request(peer, io, &rlp), - packet::RESPONSE => self.response(peer, io, &rlp), + packet::REQUEST => self.request(peer, io, &rlp), + packet::RESPONSE => self.response(peer, io, &rlp), - packet::UPDATE_CREDITS => self.update_credits(peer, io, &rlp), - packet::ACKNOWLEDGE_UPDATE => self.acknowledge_update(peer, io, &rlp), + packet::UPDATE_CREDITS => self.update_credits(peer, io, &rlp), + packet::ACKNOWLEDGE_UPDATE => self.acknowledge_update(peer, io, &rlp), - packet::SEND_TRANSACTIONS => self.relay_transactions(peer, io, &rlp), + packet::SEND_TRANSACTIONS => self.relay_transactions(peer, io, &rlp), - other => { - Err(Error::UnrecognizedPacket(other)) - } - }; + other => Err(Error::UnrecognizedPacket(other)), + }; - if let Err(e) = res { - punish(peer, io, &e); - } - } + if let Err(e) = res { + punish(peer, io, &e); + } + } - // check timeouts and punish peers. - fn timeout_check(&self, io: &IoContext) { - let now = Instant::now(); + // check timeouts and punish peers. + fn timeout_check(&self, io: &IoContext) { + let now = Instant::now(); - // handshake timeout - { - let mut pending = self.pending_peers.write(); - let slowpokes: Vec<_> = pending.iter() - .filter(|&(_, ref peer)| { - peer.last_update + timeout::HANDSHAKE <= now - }) - .map(|(&p, _)| p) - .collect(); + // handshake timeout + { + let mut pending = self.pending_peers.write(); + let slowpokes: Vec<_> = pending + .iter() + .filter(|&(_, ref peer)| peer.last_update + timeout::HANDSHAKE <= now) + .map(|(&p, _)| p) + .collect(); - for slowpoke in slowpokes { - debug!(target: "pip", "Peer {} handshake timed out", slowpoke); - pending.remove(&slowpoke); - io.disconnect_peer(slowpoke); - } - } + for slowpoke in slowpokes { + debug!(target: "pip", "Peer {} handshake timed out", slowpoke); + pending.remove(&slowpoke); + io.disconnect_peer(slowpoke); + } + } - // request and update ack timeouts - let ack_duration = timeout::ACKNOWLEDGE_UPDATE; - { - for (peer_id, peer) in self.peers.read().iter() { - let peer = peer.lock(); - if peer.pending_requests.check_timeout(now) { - debug!(target: "pip", "Peer {} request timeout", peer_id); - io.disconnect_peer(*peer_id); - } + // request and update ack timeouts + let ack_duration = timeout::ACKNOWLEDGE_UPDATE; + { + for (peer_id, peer) in self.peers.read().iter() { + let peer = peer.lock(); + if peer.pending_requests.check_timeout(now) { + debug!(target: "pip", "Peer {} request timeout", peer_id); + io.disconnect_peer(*peer_id); + } - if let Some((ref start, _)) = peer.awaiting_acknowledge { - if *start + ack_duration <= now { - debug!(target: "pip", "Peer {} update acknowledgement timeout", peer_id); - io.disconnect_peer(*peer_id); - } - } - } - } - } + if let Some((ref start, _)) = peer.awaiting_acknowledge { + if *start + ack_duration <= now { + debug!(target: "pip", "Peer {} update acknowledgement timeout", peer_id); + io.disconnect_peer(*peer_id); + } + } + } + } + } - // propagate transactions to relay peers. - // if we aren't on the mainnet, we just propagate to all relay peers - fn propagate_transactions(&self, io: &IoContext) { - if self.capabilities.read().tx_relay { return } + // propagate transactions to relay peers. + // if we aren't on the mainnet, we just propagate to all relay peers + fn propagate_transactions(&self, io: &IoContext) { + if self.capabilities.read().tx_relay { + return; + } - let ready_transactions = self.provider.transactions_to_propagate(); - if ready_transactions.is_empty() { return } + let ready_transactions = self.provider.transactions_to_propagate(); + if ready_transactions.is_empty() { + return; + } - trace!(target: "pip", "propagate transactions: {} ready", ready_transactions.len()); + trace!(target: "pip", "propagate transactions: {} ready", ready_transactions.len()); - let all_transaction_hashes: HashSet<_> = ready_transactions.iter().map(|tx| tx.hash()).collect(); - let mut buf = Vec::new(); + let all_transaction_hashes: HashSet<_> = + ready_transactions.iter().map(|tx| tx.hash()).collect(); + let mut buf = Vec::new(); - let peers = self.peers.read(); - for (peer_id, peer_info) in peers.iter() { - let mut peer_info = peer_info.lock(); - if !peer_info.capabilities.tx_relay { continue } + let peers = self.peers.read(); + for (peer_id, peer_info) in peers.iter() { + let mut peer_info = peer_info.lock(); + if !peer_info.capabilities.tx_relay { + continue; + } - let prop_filter = &mut peer_info.propagated_transactions; - *prop_filter = &*prop_filter & &all_transaction_hashes; + let prop_filter = &mut peer_info.propagated_transactions; + *prop_filter = &*prop_filter & &all_transaction_hashes; - // fill the buffer with all non-propagated transactions. - let to_propagate = ready_transactions.iter() - .filter(|tx| prop_filter.insert(tx.hash())) - .map(|tx| &tx.transaction); + // fill the buffer with all non-propagated transactions. + let to_propagate = ready_transactions + .iter() + .filter(|tx| prop_filter.insert(tx.hash())) + .map(|tx| &tx.transaction); - buf.extend(to_propagate); + buf.extend(to_propagate); - // propagate to the given peer. - if buf.is_empty() { continue } - io.send(*peer_id, packet::SEND_TRANSACTIONS, { - let mut stream = RlpStream::new_list(buf.len()); - for pending_tx in buf.drain(..) { - stream.append(pending_tx); - } + // propagate to the given peer. + if buf.is_empty() { + continue; + } + io.send(*peer_id, packet::SEND_TRANSACTIONS, { + let mut stream = RlpStream::new_list(buf.len()); + for pending_tx in buf.drain(..) { + stream.append(pending_tx); + } - stream.out() - }) - } - } + stream.out() + }) + } + } - /// called when a peer connects. - pub fn on_connect(&self, peer: PeerId, io: &IoContext) { - let proto_version = match io.protocol_version(peer).ok_or(Error::WrongNetwork) { - Ok(pv) => pv, - Err(e) => { punish(peer, io, &e); return } - }; + /// called when a peer connects. + pub fn on_connect(&self, peer: PeerId, io: &IoContext) { + let proto_version = match io.protocol_version(peer).ok_or(Error::WrongNetwork) { + Ok(pv) => pv, + Err(e) => { + punish(peer, io, &e); + return; + } + }; - if PROTOCOL_VERSIONS.iter().find(|x| x.0 == proto_version).is_none() { - punish(peer, io, &Error::UnsupportedProtocolVersion(proto_version)); - return; - } + if PROTOCOL_VERSIONS + .iter() + .find(|x| x.0 == proto_version) + .is_none() + { + punish(peer, io, &Error::UnsupportedProtocolVersion(proto_version)); + return; + } - let chain_info = self.provider.chain_info(); + let chain_info = self.provider.chain_info(); - let status = Status { - head_td: chain_info.total_difficulty, - head_hash: chain_info.best_block_hash, - head_num: chain_info.best_block_number, - genesis_hash: chain_info.genesis_hash, - protocol_version: proto_version as u32, // match peer proto version - network_id: self.network_id, - last_head: None, - }; + let status = Status { + head_td: chain_info.total_difficulty, + head_hash: chain_info.best_block_hash, + head_num: chain_info.best_block_number, + genesis_hash: chain_info.genesis_hash, + protocol_version: proto_version as u32, // match peer proto version + network_id: self.network_id, + last_head: None, + }; - let capabilities = self.capabilities.read(); - let cost_local_flow = self.flow_params.read(); - let local_flow = if io.is_reserved_peer(peer) { - &*self.free_flow_params - } else { - &**cost_local_flow - }; - let status_packet = status::write_handshake(&status, &capabilities, Some(local_flow)); + let capabilities = self.capabilities.read(); + let cost_local_flow = self.flow_params.read(); + let local_flow = if io.is_reserved_peer(peer) { + &*self.free_flow_params + } else { + &**cost_local_flow + }; + let status_packet = status::write_handshake(&status, &capabilities, Some(local_flow)); - self.pending_peers.write().insert(peer, PendingPeer { - sent_head: chain_info.best_block_hash, - last_update: Instant::now(), - }); + self.pending_peers.write().insert( + peer, + PendingPeer { + sent_head: chain_info.best_block_hash, + last_update: Instant::now(), + }, + ); - trace!(target: "pip", "Sending status to peer {}", peer); - io.send(peer, packet::STATUS, status_packet); - } + trace!(target: "pip", "Sending status to peer {}", peer); + io.send(peer, packet::STATUS, status_packet); + } - /// called when a peer disconnects. - pub fn on_disconnect(&self, peer: PeerId, io: &IoContext) { - trace!(target: "pip", "Peer {} disconnecting", peer); + /// called when a peer disconnects. + pub fn on_disconnect(&self, peer: PeerId, io: &IoContext) { + trace!(target: "pip", "Peer {} disconnecting", peer); - self.pending_peers.write().remove(&peer); - let unfulfilled = match self.peers.write().remove(&peer) { - None => return, - Some(peer_info) => { - let peer_info = peer_info.into_inner(); - let mut unfulfilled: Vec<_> = peer_info.pending_requests.collect_ids(); - unfulfilled.extend(peer_info.failed_requests); + self.pending_peers.write().remove(&peer); + let unfulfilled = match self.peers.write().remove(&peer) { + None => return, + Some(peer_info) => { + let peer_info = peer_info.into_inner(); + let mut unfulfilled: Vec<_> = peer_info.pending_requests.collect_ids(); + unfulfilled.extend(peer_info.failed_requests); - unfulfilled - } - }; + unfulfilled + } + }; - for handler in &self.handlers { - handler.on_disconnect(&Ctx { - peer, - io, - proto: self, - }, &unfulfilled) - } - } + for handler in &self.handlers { + handler.on_disconnect( + &Ctx { + peer, + io, + proto: self, + }, + &unfulfilled, + ) + } + } - /// Execute the given closure with a basic context derived from the I/O context. - pub fn with_context(&self, io: &IoContext, f: F) -> T - where F: FnOnce(&BasicContext) -> T - { - f(&TickCtx { - io, - proto: self, - }) - } + /// Execute the given closure with a basic context derived from the I/O context. + pub fn with_context(&self, io: &IoContext, f: F) -> T + where + F: FnOnce(&BasicContext) -> T, + { + f(&TickCtx { io, proto: self }) + } - fn tick_handlers(&self, io: &IoContext) { - for handler in &self.handlers { - handler.tick(&TickCtx { - io, - proto: self, - }) - } - } + fn tick_handlers(&self, io: &IoContext) { + for handler in &self.handlers { + handler.tick(&TickCtx { io, proto: self }) + } + } - fn begin_new_cost_period(&self, io: &IoContext) { - self.load_distribution.end_period(&*self.sample_store); + fn begin_new_cost_period(&self, io: &IoContext) { + self.load_distribution.end_period(&*self.sample_store); - let avg_peer_count = self.statistics.read().avg_peer_count(); - // Load share relative to average peer count +LEECHER_COUNT_FACTOR% - let load_share = MAX_LIGHTSERV_LOAD / (avg_peer_count * LEECHER_COUNT_FACTOR); - let new_params = Arc::new(FlowParams::from_request_times( - |kind| self.load_distribution.expected_time(kind), - load_share, - Duration::from_secs(self.config.max_stored_seconds), - )); - *self.flow_params.write() = new_params.clone(); - trace!(target: "pip", "New cost period: avg_peers={} ; cost_table:{:?}", avg_peer_count, new_params.cost_table()); + let avg_peer_count = self.statistics.read().avg_peer_count(); + // Load share relative to average peer count +LEECHER_COUNT_FACTOR% + let load_share = MAX_LIGHTSERV_LOAD / (avg_peer_count * LEECHER_COUNT_FACTOR); + let new_params = Arc::new(FlowParams::from_request_times( + |kind| self.load_distribution.expected_time(kind), + load_share, + Duration::from_secs(self.config.max_stored_seconds), + )); + *self.flow_params.write() = new_params.clone(); + trace!(target: "pip", "New cost period: avg_peers={} ; cost_table:{:?}", avg_peer_count, new_params.cost_table()); - let peers = self.peers.read(); - let now = Instant::now(); + let peers = self.peers.read(); + let now = Instant::now(); - let packet_body = { - let mut stream = RlpStream::new_list(3); - stream.append(new_params.limit()) - .append(new_params.recharge_rate()) - .append(new_params.cost_table()); - stream.out() - }; + let packet_body = { + let mut stream = RlpStream::new_list(3); + stream + .append(new_params.limit()) + .append(new_params.recharge_rate()) + .append(new_params.cost_table()); + stream.out() + }; - for (peer_id, peer_info) in peers.iter() { - let mut peer_info = peer_info.lock(); + for (peer_id, peer_info) in peers.iter() { + let mut peer_info = peer_info.lock(); - io.send(*peer_id, packet::UPDATE_CREDITS, packet_body.clone()); - peer_info.awaiting_acknowledge = Some((now, new_params.clone())); - } - } + io.send(*peer_id, packet::UPDATE_CREDITS, packet_body.clone()); + peer_info.awaiting_acknowledge = Some((now, new_params.clone())); + } + } - fn tick_statistics(&self) { - let leecher_count = self.leecher_count(); - self.statistics.write().add_peer_count(leecher_count); - } + fn tick_statistics(&self) { + let leecher_count = self.leecher_count(); + self.statistics.write().add_peer_count(leecher_count); + } } impl LightProtocol { - // Handle status message from peer. - fn status(&self, peer: PeerId, io: &IoContext, data: &Rlp) -> Result<(), Error> { - let pending = match self.pending_peers.write().remove(&peer) { - Some(pending) => pending, - None => { - return Err(Error::UnexpectedHandshake); - } - }; + // Handle status message from peer. + fn status(&self, peer: PeerId, io: &IoContext, data: &Rlp) -> Result<(), Error> { + let pending = match self.pending_peers.write().remove(&peer) { + Some(pending) => pending, + None => { + return Err(Error::UnexpectedHandshake); + } + }; - let (status, capabilities, flow_params) = status::parse_handshake(data)?; + let (status, capabilities, flow_params) = status::parse_handshake(data)?; - trace!(target: "pip", "Connected peer with chain head {:?}", (status.head_hash, status.head_num)); + trace!(target: "pip", "Connected peer with chain head {:?}", (status.head_hash, status.head_num)); - if (status.network_id, status.genesis_hash) != (self.network_id, self.genesis_hash) { - trace!(target: "pip", "peer {} wrong network: network_id is {} vs our {}, gh is {} vs our {}", + if (status.network_id, status.genesis_hash) != (self.network_id, self.genesis_hash) { + trace!(target: "pip", "peer {} wrong network: network_id is {} vs our {}, gh is {} vs our {}", peer, status.network_id, self.network_id, status.genesis_hash, self.genesis_hash); - return Err(Error::WrongNetwork); - } + return Err(Error::WrongNetwork); + } - if Some(status.protocol_version as u8) != io.protocol_version(peer) { - return Err(Error::BadProtocolVersion); - } + if Some(status.protocol_version as u8) != io.protocol_version(peer) { + return Err(Error::BadProtocolVersion); + } - let remote_flow = flow_params.map(|params| (params.create_credits(), params)); - let local_flow = if io.is_reserved_peer(peer) { - self.free_flow_params.clone() - } else { - self.flow_params.read().clone() - }; + let remote_flow = flow_params.map(|params| (params.create_credits(), params)); + let local_flow = if io.is_reserved_peer(peer) { + self.free_flow_params.clone() + } else { + self.flow_params.read().clone() + }; - self.peers.write().insert(peer, Mutex::new(Peer { - local_credits: local_flow.create_credits(), - status: status.clone(), - capabilities, - remote_flow, - sent_head: pending.sent_head, - last_update: pending.last_update, - pending_requests: RequestSet::default(), - failed_requests: Vec::new(), - propagated_transactions: HashSet::new(), - skip_update: false, - local_flow, - awaiting_acknowledge: None, - })); + self.peers.write().insert( + peer, + Mutex::new(Peer { + local_credits: local_flow.create_credits(), + status: status.clone(), + capabilities, + remote_flow, + sent_head: pending.sent_head, + last_update: pending.last_update, + pending_requests: RequestSet::default(), + failed_requests: Vec::new(), + propagated_transactions: HashSet::new(), + skip_update: false, + local_flow, + awaiting_acknowledge: None, + }), + ); - let any_kept = self.handlers.iter().map( - |handler| handler.on_connect( - &Ctx { - peer, - io, - proto: self, - }, - &status, - &capabilities - ) - ).fold(PeerStatus::Kept, PeerStatus::bitor); + let any_kept = self + .handlers + .iter() + .map(|handler| { + handler.on_connect( + &Ctx { + peer, + io, + proto: self, + }, + &status, + &capabilities, + ) + }) + .fold(PeerStatus::Kept, PeerStatus::bitor); - if any_kept == PeerStatus::Unkept { - Err(Error::RejectedByHandlers) - } else { - Ok(()) - } - } + if any_kept == PeerStatus::Unkept { + Err(Error::RejectedByHandlers) + } else { + Ok(()) + } + } - // Handle an announcement. - fn announcement(&self, peer: PeerId, io: &IoContext, data: &Rlp) -> Result<(), Error> { - if !self.peers.read().contains_key(&peer) { - debug!(target: "pip", "Ignoring announcement from unknown peer"); - return Ok(()) - } + // Handle an announcement. + fn announcement(&self, peer: PeerId, io: &IoContext, data: &Rlp) -> Result<(), Error> { + if !self.peers.read().contains_key(&peer) { + debug!(target: "pip", "Ignoring announcement from unknown peer"); + return Ok(()); + } - let announcement = status::parse_announcement(data)?; + let announcement = status::parse_announcement(data)?; - // scope to ensure locks are dropped before moving into handler-space. - { - let peers = self.peers.read(); - let peer_info = match peers.get(&peer) { - Some(info) => info, - None => return Ok(()), - }; + // scope to ensure locks are dropped before moving into handler-space. + { + let peers = self.peers.read(); + let peer_info = match peers.get(&peer) { + Some(info) => info, + None => return Ok(()), + }; - let mut peer_info = peer_info.lock(); + let mut peer_info = peer_info.lock(); - // update status. - { - // TODO: punish peer if they've moved backwards. - let status = &mut peer_info.status; - let last_head = status.head_hash; - status.head_hash = announcement.head_hash; - status.head_td = announcement.head_td; - status.head_num = announcement.head_num; - status.last_head = Some((last_head, announcement.reorg_depth)); - } + // update status. + { + // TODO: punish peer if they've moved backwards. + let status = &mut peer_info.status; + let last_head = status.head_hash; + status.head_hash = announcement.head_hash; + status.head_td = announcement.head_td; + status.head_num = announcement.head_num; + status.last_head = Some((last_head, announcement.reorg_depth)); + } - // update capabilities. - peer_info.capabilities.update_from(&announcement); - } + // update capabilities. + peer_info.capabilities.update_from(&announcement); + } - for handler in &self.handlers { - handler.on_announcement(&Ctx { - peer, - io, - proto: self, - }, &announcement); - } + for handler in &self.handlers { + handler.on_announcement( + &Ctx { + peer, + io, + proto: self, + }, + &announcement, + ); + } - Ok(()) - } + Ok(()) + } - // Receive requests from a peer. - fn request(&self, peer_id: PeerId, io: &IoContext, raw: &Rlp) -> Result<(), Error> { - // the maximum amount of requests we'll fill in a single packet. - const MAX_REQUESTS: usize = 256; + // Receive requests from a peer. + fn request(&self, peer_id: PeerId, io: &IoContext, raw: &Rlp) -> Result<(), Error> { + // the maximum amount of requests we'll fill in a single packet. + const MAX_REQUESTS: usize = 256; - use ::request::Builder; - use ::request::CompleteRequest; + use request::{Builder, CompleteRequest}; - let peers = self.peers.read(); - let peer = match peers.get(&peer_id) { - Some(peer) => peer, - None => { - debug!(target: "pip", "Ignoring request from unknown peer"); - return Ok(()) - } - }; - let mut peer = peer.lock(); - let peer: &mut Peer = &mut *peer; + let peers = self.peers.read(); + let peer = match peers.get(&peer_id) { + Some(peer) => peer, + None => { + debug!(target: "pip", "Ignoring request from unknown peer"); + return Ok(()); + } + }; + let mut peer = peer.lock(); + let peer: &mut Peer = &mut *peer; - let req_id: u64 = raw.val_at(0)?; - let mut request_builder = Builder::default(); + let req_id: u64 = raw.val_at(0)?; + let mut request_builder = Builder::default(); - trace!(target: "pip", "Received requests (id: {}) from peer {}", req_id, peer_id); + trace!(target: "pip", "Received requests (id: {}) from peer {}", req_id, peer_id); - // deserialize requests, check costs and request validity. - peer.local_flow.recharge(&mut peer.local_credits); + // deserialize requests, check costs and request validity. + peer.local_flow.recharge(&mut peer.local_credits); - peer.local_credits.deduct_cost(peer.local_flow.base_cost())?; - for request_rlp in raw.at(1)?.iter().take(MAX_REQUESTS) { - let request: Request = request_rlp.as_val()?; - let cost = peer.local_flow.compute_cost(&request).ok_or(Error::NotServer)?; - peer.local_credits.deduct_cost(cost)?; - request_builder.push(request).map_err(|_| Error::BadBackReference)?; - } + peer.local_credits + .deduct_cost(peer.local_flow.base_cost())?; + for request_rlp in raw.at(1)?.iter().take(MAX_REQUESTS) { + let request: Request = request_rlp.as_val()?; + let cost = peer + .local_flow + .compute_cost(&request) + .ok_or(Error::NotServer)?; + peer.local_credits.deduct_cost(cost)?; + request_builder + .push(request) + .map_err(|_| Error::BadBackReference)?; + } - let requests = request_builder.build(); - let num_requests = requests.requests().len(); - trace!(target: "pip", "Beginning to respond to requests (id: {}) from peer {}", req_id, peer_id); + let requests = request_builder.build(); + let num_requests = requests.requests().len(); + trace!(target: "pip", "Beginning to respond to requests (id: {}) from peer {}", req_id, peer_id); - // respond to all requests until one fails. - let responses = requests.respond_to_all(|complete_req| { - let _timer = self.load_distribution.begin_timer(&complete_req); - match complete_req { - CompleteRequest::Headers(req) => self.provider.block_headers(req).map(Response::Headers), - CompleteRequest::HeaderProof(req) => self.provider.header_proof(req).map(Response::HeaderProof), - CompleteRequest::TransactionIndex(req) => self.provider.transaction_index(req).map(Response::TransactionIndex), - CompleteRequest::Body(req) => self.provider.block_body(req).map(Response::Body), - CompleteRequest::Receipts(req) => self.provider.block_receipts(req).map(Response::Receipts), - CompleteRequest::Account(req) => self.provider.account_proof(req).map(Response::Account), - CompleteRequest::Storage(req) => self.provider.storage_proof(req).map(Response::Storage), - CompleteRequest::Code(req) => self.provider.contract_code(req).map(Response::Code), - CompleteRequest::Execution(req) => self.provider.transaction_proof(req).map(Response::Execution), - CompleteRequest::Signal(req) => self.provider.epoch_signal(req).map(Response::Signal), - } - }); + // respond to all requests until one fails. + let responses = requests.respond_to_all(|complete_req| { + let _timer = self.load_distribution.begin_timer(&complete_req); + match complete_req { + CompleteRequest::Headers(req) => { + self.provider.block_headers(req).map(Response::Headers) + } + CompleteRequest::HeaderProof(req) => { + self.provider.header_proof(req).map(Response::HeaderProof) + } + CompleteRequest::TransactionIndex(req) => self + .provider + .transaction_index(req) + .map(Response::TransactionIndex), + CompleteRequest::Body(req) => self.provider.block_body(req).map(Response::Body), + CompleteRequest::Receipts(req) => { + self.provider.block_receipts(req).map(Response::Receipts) + } + CompleteRequest::Account(req) => { + self.provider.account_proof(req).map(Response::Account) + } + CompleteRequest::Storage(req) => { + self.provider.storage_proof(req).map(Response::Storage) + } + CompleteRequest::Code(req) => self.provider.contract_code(req).map(Response::Code), + CompleteRequest::Execution(req) => self + .provider + .transaction_proof(req) + .map(Response::Execution), + CompleteRequest::Signal(req) => { + self.provider.epoch_signal(req).map(Response::Signal) + } + } + }); - trace!(target: "pip", "Responded to {}/{} requests in packet {}", responses.len(), num_requests, req_id); - trace!(target: "pip", "Peer {} has {} credits remaining.", peer_id, peer.local_credits.current()); + trace!(target: "pip", "Responded to {}/{} requests in packet {}", responses.len(), num_requests, req_id); + trace!(target: "pip", "Peer {} has {} credits remaining.", peer_id, peer.local_credits.current()); - io.respond(packet::RESPONSE, { - let mut stream = RlpStream::new_list(3); - let cur_credits = peer.local_credits.current(); - stream.append(&req_id).append(&cur_credits).append_list(&responses); - stream.out() - }); - Ok(()) - } + io.respond(packet::RESPONSE, { + let mut stream = RlpStream::new_list(3); + let cur_credits = peer.local_credits.current(); + stream + .append(&req_id) + .append(&cur_credits) + .append_list(&responses); + stream.out() + }); + Ok(()) + } - // handle a packet with responses. - fn response(&self, peer: PeerId, io: &IoContext, raw: &Rlp) -> Result<(), Error> { - let (req_id, responses) = { - let id_guard = self.pre_verify_response(peer, &raw)?; - let responses: Vec = raw.list_at(2)?; - (id_guard.defuse(), responses) - }; + // handle a packet with responses. + fn response(&self, peer: PeerId, io: &IoContext, raw: &Rlp) -> Result<(), Error> { + let (req_id, responses) = { + let id_guard = self.pre_verify_response(peer, &raw)?; + let responses: Vec = raw.list_at(2)?; + (id_guard.defuse(), responses) + }; - for handler in &self.handlers { - handler.on_responses(&Ctx { - io, - proto: self, - peer, - }, req_id, &responses); - } + for handler in &self.handlers { + handler.on_responses( + &Ctx { + io, + proto: self, + peer, + }, + req_id, + &responses, + ); + } - Ok(()) - } + Ok(()) + } - // handle an update of request credits parameters. - fn update_credits(&self, peer_id: PeerId, io: &IoContext, raw: &Rlp) -> Result<(), Error> { - let peers = self.peers.read(); + // handle an update of request credits parameters. + fn update_credits(&self, peer_id: PeerId, io: &IoContext, raw: &Rlp) -> Result<(), Error> { + let peers = self.peers.read(); - let peer = peers.get(&peer_id).ok_or(Error::UnknownPeer)?; - let mut peer = peer.lock(); + let peer = peers.get(&peer_id).ok_or(Error::UnknownPeer)?; + let mut peer = peer.lock(); - trace!(target: "pip", "Received an update to request credit params from peer {}", peer_id); + trace!(target: "pip", "Received an update to request credit params from peer {}", peer_id); - { - let &mut (ref mut credits, ref mut old_params) = peer.remote_flow.as_mut().ok_or(Error::NotServer)?; - old_params.recharge(credits); + { + let &mut (ref mut credits, ref mut old_params) = + peer.remote_flow.as_mut().ok_or(Error::NotServer)?; + old_params.recharge(credits); - let new_params = FlowParams::new( - raw.val_at(0)?, // limit - raw.val_at(2)?, // cost table - raw.val_at(1)?, // recharge. - ); + let new_params = FlowParams::new( + raw.val_at(0)?, // limit + raw.val_at(2)?, // cost table + raw.val_at(1)?, // recharge. + ); - // preserve ratio of current : limit when updating params. - credits.maintain_ratio(*old_params.limit(), *new_params.limit()); - *old_params = new_params; - } + // preserve ratio of current : limit when updating params. + credits.maintain_ratio(*old_params.limit(), *new_params.limit()); + *old_params = new_params; + } - // set flag to true when there is an in-flight request - // corresponding to old flow params. - if !peer.pending_requests.is_empty() { - peer.skip_update = true; - } + // set flag to true when there is an in-flight request + // corresponding to old flow params. + if !peer.pending_requests.is_empty() { + peer.skip_update = true; + } - // let peer know we've acknowledged the update. - io.respond(packet::ACKNOWLEDGE_UPDATE, Vec::new()); - Ok(()) - } + // let peer know we've acknowledged the update. + io.respond(packet::ACKNOWLEDGE_UPDATE, Vec::new()); + Ok(()) + } - // handle an acknowledgement of request credits update. - fn acknowledge_update(&self, peer_id: PeerId, _io: &IoContext, _raw: &Rlp) -> Result<(), Error> { - let peers = self.peers.read(); - let peer = peers.get(&peer_id).ok_or(Error::UnknownPeer)?; - let mut peer = peer.lock(); + // handle an acknowledgement of request credits update. + fn acknowledge_update( + &self, + peer_id: PeerId, + _io: &IoContext, + _raw: &Rlp, + ) -> Result<(), Error> { + let peers = self.peers.read(); + let peer = peers.get(&peer_id).ok_or(Error::UnknownPeer)?; + let mut peer = peer.lock(); - trace!(target: "pip", "Received an acknowledgement for new request credit params from peer {}", peer_id); + trace!(target: "pip", "Received an acknowledgement for new request credit params from peer {}", peer_id); - let (_, new_params) = match peer.awaiting_acknowledge.take() { - Some(x) => x, - None => return Err(Error::UnsolicitedResponse), - }; + let (_, new_params) = match peer.awaiting_acknowledge.take() { + Some(x) => x, + None => return Err(Error::UnsolicitedResponse), + }; - let old_limit = *peer.local_flow.limit(); - peer.local_credits.maintain_ratio(old_limit, *new_params.limit()); - peer.local_flow = new_params; - Ok(()) - } + let old_limit = *peer.local_flow.limit(); + peer.local_credits + .maintain_ratio(old_limit, *new_params.limit()); + peer.local_flow = new_params; + Ok(()) + } - // Receive a set of transactions to relay. - fn relay_transactions(&self, peer: PeerId, io: &IoContext, data: &Rlp) -> Result<(), Error> { - const MAX_TRANSACTIONS: usize = 256; + // Receive a set of transactions to relay. + fn relay_transactions(&self, peer: PeerId, io: &IoContext, data: &Rlp) -> Result<(), Error> { + const MAX_TRANSACTIONS: usize = 256; - let txs: Vec<_> = data.iter() - .take(MAX_TRANSACTIONS) - .map(|x| x.as_val::()) - .collect::>()?; + let txs: Vec<_> = data + .iter() + .take(MAX_TRANSACTIONS) + .map(|x| x.as_val::()) + .collect::>()?; - debug!(target: "pip", "Received {} transactions to relay from peer {}", txs.len(), peer); + debug!(target: "pip", "Received {} transactions to relay from peer {}", txs.len(), peer); - for handler in &self.handlers { - handler.on_transactions(&Ctx { - peer, - io, - proto: self, - }, &txs); - } + for handler in &self.handlers { + handler.on_transactions( + &Ctx { + peer, + io, + proto: self, + }, + &txs, + ); + } - Ok(()) - } + Ok(()) + } } // if something went wrong, figure out how much to punish the peer. fn punish(peer: PeerId, io: &IoContext, e: &Error) { - match e.punishment() { - Punishment::None => {} - Punishment::Disconnect => { - debug!(target: "pip", "Disconnecting peer {}: {}", peer, e); - io.disconnect_peer(peer) - } - Punishment::Disable => { - debug!(target: "pip", "Disabling peer {}: {}", peer, e); - io.disable_peer(peer) - } - } + match e.punishment() { + Punishment::None => {} + Punishment::Disconnect => { + debug!(target: "pip", "Disconnecting peer {}: {}", peer, e); + io.disconnect_peer(peer) + } + Punishment::Disable => { + debug!(target: "pip", "Disabling peer {}: {}", peer, e); + io.disable_peer(peer) + } + } } impl NetworkProtocolHandler for LightProtocol { - fn initialize(&self, io: &NetworkContext) { - io.register_timer(TIMEOUT, TIMEOUT_INTERVAL) - .expect("Error registering sync timer."); - io.register_timer(TICK_TIMEOUT, TICK_TIMEOUT_INTERVAL) - .expect("Error registering sync timer."); - io.register_timer(PROPAGATE_TIMEOUT, PROPAGATE_TIMEOUT_INTERVAL) - .expect("Error registering sync timer."); - io.register_timer(RECALCULATE_COSTS_TIMEOUT, RECALCULATE_COSTS_INTERVAL) - .expect("Error registering request timer interval token."); - io.register_timer(STATISTICS_TIMEOUT, STATISTICS_INTERVAL) - .expect("Error registering statistics timer."); - } + fn initialize(&self, io: &NetworkContext) { + io.register_timer(TIMEOUT, TIMEOUT_INTERVAL) + .expect("Error registering sync timer."); + io.register_timer(TICK_TIMEOUT, TICK_TIMEOUT_INTERVAL) + .expect("Error registering sync timer."); + io.register_timer(PROPAGATE_TIMEOUT, PROPAGATE_TIMEOUT_INTERVAL) + .expect("Error registering sync timer."); + io.register_timer(RECALCULATE_COSTS_TIMEOUT, RECALCULATE_COSTS_INTERVAL) + .expect("Error registering request timer interval token."); + io.register_timer(STATISTICS_TIMEOUT, STATISTICS_INTERVAL) + .expect("Error registering statistics timer."); + } - fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { - self.handle_packet(&io, *peer, packet_id, data); - } + fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { + self.handle_packet(&io, *peer, packet_id, data); + } - fn connected(&self, io: &NetworkContext, peer: &PeerId) { - self.on_connect(*peer, &io); - } + fn connected(&self, io: &NetworkContext, peer: &PeerId) { + self.on_connect(*peer, &io); + } - fn disconnected(&self, io: &NetworkContext, peer: &PeerId) { - self.on_disconnect(*peer, &io); - } + fn disconnected(&self, io: &NetworkContext, peer: &PeerId) { + self.on_disconnect(*peer, &io); + } - fn timeout(&self, io: &NetworkContext, timer: TimerToken) { - match timer { - TIMEOUT => self.timeout_check(&io), - TICK_TIMEOUT => self.tick_handlers(&io), - PROPAGATE_TIMEOUT => self.propagate_transactions(&io), - RECALCULATE_COSTS_TIMEOUT => self.begin_new_cost_period(&io), - STATISTICS_TIMEOUT => self.tick_statistics(), - _ => warn!(target: "pip", "received timeout on unknown token {}", timer), - } - } + fn timeout(&self, io: &NetworkContext, timer: TimerToken) { + match timer { + TIMEOUT => self.timeout_check(&io), + TICK_TIMEOUT => self.tick_handlers(&io), + PROPAGATE_TIMEOUT => self.propagate_transactions(&io), + RECALCULATE_COSTS_TIMEOUT => self.begin_new_cost_period(&io), + STATISTICS_TIMEOUT => self.tick_statistics(), + _ => warn!(target: "pip", "received timeout on unknown token {}", timer), + } + } } diff --git a/ethcore/light/src/net/request_credits.rs b/ethcore/light/src/net/request_credits.rs index c3fc139f4..01abb8fe3 100644 --- a/ethcore/light/src/net/request_credits.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -26,11 +26,11 @@ //! Current default costs are picked completely arbitrarily, not based //! on any empirical timings or mathematical models. -use request::{self, Request}; use super::error::Error; +use request::{self, Request}; -use rlp::{Rlp, RlpStream, Decodable, Encodable, DecoderError}; use ethereum_types::U256; +use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; use std::time::{Duration, Instant}; /// Credits value. @@ -40,416 +40,434 @@ use std::time::{Duration, Instant}; /// point to the time of the update. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Credits { - estimate: U256, - recharge_point: Instant, + estimate: U256, + recharge_point: Instant, } impl Credits { - /// Get the current amount of credits.. - pub fn current(&self) -> U256 { self.estimate } + /// Get the current amount of credits.. + pub fn current(&self) -> U256 { + self.estimate + } - /// Make a definitive update. - /// This will be the value obtained after receiving - /// a response to a request. - pub fn update_to(&mut self, value: U256) { - self.estimate = value; - self.recharge_point = Instant::now(); - } + /// Make a definitive update. + /// This will be the value obtained after receiving + /// a response to a request. + pub fn update_to(&mut self, value: U256) { + self.estimate = value; + self.recharge_point = Instant::now(); + } - /// Maintain ratio to current limit against an old limit. - pub fn maintain_ratio(&mut self, old_limit: U256, new_limit: U256) { - self.estimate = (new_limit * self.estimate) / old_limit; - } + /// Maintain ratio to current limit against an old limit. + pub fn maintain_ratio(&mut self, old_limit: U256, new_limit: U256) { + self.estimate = (new_limit * self.estimate) / old_limit; + } - /// Attempt to apply the given cost to the amount of credits. - /// - /// If successful, the cost will be deducted successfully. - /// - /// If unsuccessful, the structure will be unaltered an an - /// error will be produced. - pub fn deduct_cost(&mut self, cost: U256) -> Result<(), Error> { - if cost > self.estimate { - Err(Error::NoCredits) - } else { - self.estimate = self.estimate - cost; - Ok(()) - } - } + /// Attempt to apply the given cost to the amount of credits. + /// + /// If successful, the cost will be deducted successfully. + /// + /// If unsuccessful, the structure will be unaltered an an + /// error will be produced. + pub fn deduct_cost(&mut self, cost: U256) -> Result<(), Error> { + if cost > self.estimate { + Err(Error::NoCredits) + } else { + self.estimate = self.estimate - cost; + Ok(()) + } + } } /// A cost table, mapping requests to base and per-request costs. /// Costs themselves may be missing. #[derive(Debug, Clone, PartialEq, Eq)] pub struct CostTable { - base: U256, // cost per packet. - headers: Option, // cost per header - transaction_index: Option, - body: Option, - receipts: Option, - account: Option, - storage: Option, - code: Option, - header_proof: Option, - transaction_proof: Option, // cost per gas. - epoch_signal: Option, + base: U256, // cost per packet. + headers: Option, // cost per header + transaction_index: Option, + body: Option, + receipts: Option, + account: Option, + storage: Option, + code: Option, + header_proof: Option, + transaction_proof: Option, // cost per gas. + epoch_signal: Option, } impl CostTable { - fn costs_set(&self) -> usize { - let mut num_set = 0; + fn costs_set(&self) -> usize { + let mut num_set = 0; - { - let mut incr_if_set = |cost: &Option<_>| if cost.is_some() { num_set += 1 }; - incr_if_set(&self.headers); - incr_if_set(&self.transaction_index); - incr_if_set(&self.body); - incr_if_set(&self.receipts); - incr_if_set(&self.account); - incr_if_set(&self.storage); - incr_if_set(&self.code); - incr_if_set(&self.header_proof); - incr_if_set(&self.transaction_proof); - incr_if_set(&self.epoch_signal); - } + { + let mut incr_if_set = |cost: &Option<_>| { + if cost.is_some() { + num_set += 1 + } + }; + incr_if_set(&self.headers); + incr_if_set(&self.transaction_index); + incr_if_set(&self.body); + incr_if_set(&self.receipts); + incr_if_set(&self.account); + incr_if_set(&self.storage); + incr_if_set(&self.code); + incr_if_set(&self.header_proof); + incr_if_set(&self.transaction_proof); + incr_if_set(&self.epoch_signal); + } - num_set - } + num_set + } } impl Default for CostTable { - fn default() -> Self { - // arbitrarily chosen constants. - CostTable { - base: 100_000.into(), - headers: Some(10000.into()), - transaction_index: Some(10000.into()), - body: Some(15000.into()), - receipts: Some(5000.into()), - account: Some(25000.into()), - storage: Some(25000.into()), - code: Some(20000.into()), - header_proof: Some(15000.into()), - transaction_proof: Some(2.into()), - epoch_signal: Some(10000.into()), - } - } + fn default() -> Self { + // arbitrarily chosen constants. + CostTable { + base: 100_000.into(), + headers: Some(10000.into()), + transaction_index: Some(10000.into()), + body: Some(15000.into()), + receipts: Some(5000.into()), + account: Some(25000.into()), + storage: Some(25000.into()), + code: Some(20000.into()), + header_proof: Some(15000.into()), + transaction_proof: Some(2.into()), + epoch_signal: Some(10000.into()), + } + } } impl Encodable for CostTable { - fn rlp_append(&self, s: &mut RlpStream) { - fn append_cost(s: &mut RlpStream, cost: &Option, kind: request::Kind) { - if let Some(ref cost) = *cost { - s.begin_list(2); - // hack around https://github.com/paritytech/parity-ethereum/issues/4356 - Encodable::rlp_append(&kind, s); - s.append(cost); - } - } + fn rlp_append(&self, s: &mut RlpStream) { + fn append_cost(s: &mut RlpStream, cost: &Option, kind: request::Kind) { + if let Some(ref cost) = *cost { + s.begin_list(2); + // hack around https://github.com/paritytech/parity-ethereum/issues/4356 + Encodable::rlp_append(&kind, s); + s.append(cost); + } + } - s.begin_list(1 + self.costs_set()).append(&self.base); - append_cost(s, &self.headers, request::Kind::Headers); - append_cost(s, &self.transaction_index, request::Kind::TransactionIndex); - append_cost(s, &self.body, request::Kind::Body); - append_cost(s, &self.receipts, request::Kind::Receipts); - append_cost(s, &self.account, request::Kind::Account); - append_cost(s, &self.storage, request::Kind::Storage); - append_cost(s, &self.code, request::Kind::Code); - append_cost(s, &self.header_proof, request::Kind::HeaderProof); - append_cost(s, &self.transaction_proof, request::Kind::Execution); - append_cost(s, &self.epoch_signal, request::Kind::Signal); - } + s.begin_list(1 + self.costs_set()).append(&self.base); + append_cost(s, &self.headers, request::Kind::Headers); + append_cost(s, &self.transaction_index, request::Kind::TransactionIndex); + append_cost(s, &self.body, request::Kind::Body); + append_cost(s, &self.receipts, request::Kind::Receipts); + append_cost(s, &self.account, request::Kind::Account); + append_cost(s, &self.storage, request::Kind::Storage); + append_cost(s, &self.code, request::Kind::Code); + append_cost(s, &self.header_proof, request::Kind::HeaderProof); + append_cost(s, &self.transaction_proof, request::Kind::Execution); + append_cost(s, &self.epoch_signal, request::Kind::Signal); + } } impl Decodable for CostTable { - fn decode(rlp: &Rlp) -> Result { - let base = rlp.val_at(0)?; + fn decode(rlp: &Rlp) -> Result { + let base = rlp.val_at(0)?; - let mut headers = None; - let mut transaction_index = None; - let mut body = None; - let mut receipts = None; - let mut account = None; - let mut storage = None; - let mut code = None; - let mut header_proof = None; - let mut transaction_proof = None; - let mut epoch_signal = None; + let mut headers = None; + let mut transaction_index = None; + let mut body = None; + let mut receipts = None; + let mut account = None; + let mut storage = None; + let mut code = None; + let mut header_proof = None; + let mut transaction_proof = None; + let mut epoch_signal = None; - for cost_list in rlp.iter().skip(1) { - let cost = cost_list.val_at(1)?; - match cost_list.val_at(0)? { - request::Kind::Headers => headers = Some(cost), - request::Kind::TransactionIndex => transaction_index = Some(cost), - request::Kind::Body => body = Some(cost), - request::Kind::Receipts => receipts = Some(cost), - request::Kind::Account => account = Some(cost), - request::Kind::Storage => storage = Some(cost), - request::Kind::Code => code = Some(cost), - request::Kind::HeaderProof => header_proof = Some(cost), - request::Kind::Execution => transaction_proof = Some(cost), - request::Kind::Signal => epoch_signal = Some(cost), - } - } + for cost_list in rlp.iter().skip(1) { + let cost = cost_list.val_at(1)?; + match cost_list.val_at(0)? { + request::Kind::Headers => headers = Some(cost), + request::Kind::TransactionIndex => transaction_index = Some(cost), + request::Kind::Body => body = Some(cost), + request::Kind::Receipts => receipts = Some(cost), + request::Kind::Account => account = Some(cost), + request::Kind::Storage => storage = Some(cost), + request::Kind::Code => code = Some(cost), + request::Kind::HeaderProof => header_proof = Some(cost), + request::Kind::Execution => transaction_proof = Some(cost), + request::Kind::Signal => epoch_signal = Some(cost), + } + } - let table = CostTable { - base, - headers, - transaction_index, - body, - receipts, - account, - storage, - code, - header_proof, - transaction_proof, - epoch_signal, - }; + let table = CostTable { + base, + headers, + transaction_index, + body, + receipts, + account, + storage, + code, + header_proof, + transaction_proof, + epoch_signal, + }; - if table.costs_set() == 0 { - Err(DecoderError::Custom("no cost types set.")) - } else { - Ok(table) - } - } + if table.costs_set() == 0 { + Err(DecoderError::Custom("no cost types set.")) + } else { + Ok(table) + } + } } /// Handles costs, recharge, limits of request credits. #[derive(Debug, Clone, PartialEq)] pub struct FlowParams { - costs: CostTable, - limit: U256, - recharge: U256, + costs: CostTable, + limit: U256, + recharge: U256, } impl FlowParams { - /// Create new flow parameters from a request cost table, - /// credit limit, and (minimum) rate of recharge. - pub fn new(limit: U256, costs: CostTable, recharge: U256) -> Self { - FlowParams { - costs, - limit, - recharge, - } - } + /// Create new flow parameters from a request cost table, + /// credit limit, and (minimum) rate of recharge. + pub fn new(limit: U256, costs: CostTable, recharge: U256) -> Self { + FlowParams { + costs, + limit, + recharge, + } + } - /// Create new flow parameters from , - /// proportion of total capacity which should be given to a peer, - /// and stored capacity a peer can accumulate. - pub fn from_request_times Duration>( - request_time: F, - load_share: f64, - max_stored: Duration - ) -> Self { - use request::Kind; + /// Create new flow parameters from , + /// proportion of total capacity which should be given to a peer, + /// and stored capacity a peer can accumulate. + pub fn from_request_times Duration>( + request_time: F, + load_share: f64, + max_stored: Duration, + ) -> Self { + use request::Kind; - let load_share = load_share.abs(); + let load_share = load_share.abs(); - let recharge: u64 = 100_000_000; - let max = { - let sec = max_stored.as_secs().saturating_mul(recharge); - let nanos = (max_stored.subsec_nanos() as u64).saturating_mul(recharge) / 1_000_000_000; - sec + nanos - }; + let recharge: u64 = 100_000_000; + let max = { + let sec = max_stored.as_secs().saturating_mul(recharge); + let nanos = (max_stored.subsec_nanos() as u64).saturating_mul(recharge) / 1_000_000_000; + sec + nanos + }; - let cost_for_kind = |kind| { - // how many requests we can handle per second - let rq_dur = request_time(kind); - let second_duration = { - let as_ns = rq_dur.as_secs() as f64 * 1_000_000_000f64 + rq_dur.subsec_nanos() as f64; - 1_000_000_000f64 / as_ns - }; + let cost_for_kind = |kind| { + // how many requests we can handle per second + let rq_dur = request_time(kind); + let second_duration = { + let as_ns = + rq_dur.as_secs() as f64 * 1_000_000_000f64 + rq_dur.subsec_nanos() as f64; + 1_000_000_000f64 / as_ns + }; - // scale by share of the load given to this peer. - let serve_per_second = second_duration * load_share; - let serve_per_second = serve_per_second.max(1.0 / 10_000.0); + // scale by share of the load given to this peer. + let serve_per_second = second_duration * load_share; + let serve_per_second = serve_per_second.max(1.0 / 10_000.0); - // as a percentage of the recharge per second. - Some(U256::from((recharge as f64 / serve_per_second) as u64)) - }; + // as a percentage of the recharge per second. + Some(U256::from((recharge as f64 / serve_per_second) as u64)) + }; - let costs = CostTable { - base: 0.into(), - headers: cost_for_kind(Kind::Headers), - transaction_index: cost_for_kind(Kind::TransactionIndex), - body: cost_for_kind(Kind::Body), - receipts: cost_for_kind(Kind::Receipts), - account: cost_for_kind(Kind::Account), - storage: cost_for_kind(Kind::Storage), - code: cost_for_kind(Kind::Code), - header_proof: cost_for_kind(Kind::HeaderProof), - transaction_proof: cost_for_kind(Kind::Execution), - epoch_signal: cost_for_kind(Kind::Signal), - }; + let costs = CostTable { + base: 0.into(), + headers: cost_for_kind(Kind::Headers), + transaction_index: cost_for_kind(Kind::TransactionIndex), + body: cost_for_kind(Kind::Body), + receipts: cost_for_kind(Kind::Receipts), + account: cost_for_kind(Kind::Account), + storage: cost_for_kind(Kind::Storage), + code: cost_for_kind(Kind::Code), + header_proof: cost_for_kind(Kind::HeaderProof), + transaction_proof: cost_for_kind(Kind::Execution), + epoch_signal: cost_for_kind(Kind::Signal), + }; - FlowParams { - costs, - limit: max.into(), - recharge: recharge.into(), - } - } + FlowParams { + costs, + limit: max.into(), + recharge: recharge.into(), + } + } - /// Create effectively infinite flow params. - pub fn free() -> Self { - let free_cost: Option = Some(0.into()); - FlowParams { - limit: (!0_u64).into(), - recharge: 1.into(), - costs: CostTable { - base: 0.into(), - headers: free_cost, - transaction_index: free_cost, - body: free_cost, - receipts: free_cost, - account: free_cost, - storage: free_cost, - code: free_cost, - header_proof: free_cost, - transaction_proof: free_cost, - epoch_signal: free_cost, - } - } - } + /// Create effectively infinite flow params. + pub fn free() -> Self { + let free_cost: Option = Some(0.into()); + FlowParams { + limit: (!0_u64).into(), + recharge: 1.into(), + costs: CostTable { + base: 0.into(), + headers: free_cost, + transaction_index: free_cost, + body: free_cost, + receipts: free_cost, + account: free_cost, + storage: free_cost, + code: free_cost, + header_proof: free_cost, + transaction_proof: free_cost, + epoch_signal: free_cost, + }, + } + } - /// Get a reference to the credit limit. - pub fn limit(&self) -> &U256 { &self.limit } + /// Get a reference to the credit limit. + pub fn limit(&self) -> &U256 { + &self.limit + } - /// Get a reference to the cost table. - pub fn cost_table(&self) -> &CostTable { &self.costs } + /// Get a reference to the cost table. + pub fn cost_table(&self) -> &CostTable { + &self.costs + } - /// Get the base cost of a request. - pub fn base_cost(&self) -> U256 { self.costs.base } + /// Get the base cost of a request. + pub fn base_cost(&self) -> U256 { + self.costs.base + } - /// Get a reference to the recharge rate. - pub fn recharge_rate(&self) -> &U256 { &self.recharge } + /// Get a reference to the recharge rate. + pub fn recharge_rate(&self) -> &U256 { + &self.recharge + } - /// Compute the actual cost of a request, given the kind of request - /// and number of requests made. - pub fn compute_cost(&self, request: &Request) -> Option { - match *request { - Request::Headers(ref req) => self.costs.headers.map(|c| c * U256::from(req.max)), - Request::HeaderProof(_) => self.costs.header_proof, - Request::TransactionIndex(_) => self.costs.transaction_index, - Request::Body(_) => self.costs.body, - Request::Receipts(_) => self.costs.receipts, - Request::Account(_) => self.costs.account, - Request::Storage(_) => self.costs.storage, - Request::Code(_) => self.costs.code, - Request::Execution(ref req) => self.costs.transaction_proof.map(|c| c * req.gas), - Request::Signal(_) => self.costs.epoch_signal, - } - } + /// Compute the actual cost of a request, given the kind of request + /// and number of requests made. + pub fn compute_cost(&self, request: &Request) -> Option { + match *request { + Request::Headers(ref req) => self.costs.headers.map(|c| c * U256::from(req.max)), + Request::HeaderProof(_) => self.costs.header_proof, + Request::TransactionIndex(_) => self.costs.transaction_index, + Request::Body(_) => self.costs.body, + Request::Receipts(_) => self.costs.receipts, + Request::Account(_) => self.costs.account, + Request::Storage(_) => self.costs.storage, + Request::Code(_) => self.costs.code, + Request::Execution(ref req) => self.costs.transaction_proof.map(|c| c * req.gas), + Request::Signal(_) => self.costs.epoch_signal, + } + } - /// Compute the cost of a set of requests. - /// This is the base cost plus the cost of each individual request. - pub fn compute_cost_multi(&self, requests: &[Request]) -> Option { - let mut cost = self.costs.base; - for request in requests { - match self.compute_cost(request) { - Some(c) => cost = cost + c, - None => return None, - } - } + /// Compute the cost of a set of requests. + /// This is the base cost plus the cost of each individual request. + pub fn compute_cost_multi(&self, requests: &[Request]) -> Option { + let mut cost = self.costs.base; + for request in requests { + match self.compute_cost(request) { + Some(c) => cost = cost + c, + None => return None, + } + } - Some(cost) - } + Some(cost) + } - /// Create initial credits. - pub fn create_credits(&self) -> Credits { - Credits { - estimate: self.limit, - recharge_point: Instant::now(), - } - } + /// Create initial credits. + pub fn create_credits(&self) -> Credits { + Credits { + estimate: self.limit, + recharge_point: Instant::now(), + } + } - /// Recharge the given credits based on time passed since last - /// update. - pub fn recharge(&self, credits: &mut Credits) { - let now = Instant::now(); + /// Recharge the given credits based on time passed since last + /// update. + pub fn recharge(&self, credits: &mut Credits) { + let now = Instant::now(); - // recompute and update only in terms of full seconds elapsed - // in order to keep the estimate as an underestimate. - let elapsed = (now - credits.recharge_point).as_secs(); - credits.recharge_point += Duration::from_secs(elapsed); + // recompute and update only in terms of full seconds elapsed + // in order to keep the estimate as an underestimate. + let elapsed = (now - credits.recharge_point).as_secs(); + credits.recharge_point += Duration::from_secs(elapsed); - let elapsed: U256 = elapsed.into(); + let elapsed: U256 = elapsed.into(); - credits.estimate = ::std::cmp::min(self.limit, credits.estimate + (elapsed * self.recharge)); - } + credits.estimate = + ::std::cmp::min(self.limit, credits.estimate + (elapsed * self.recharge)); + } - /// Refund some credits which were previously deducted. - /// Does not update the recharge timestamp. - pub fn refund(&self, credits: &mut Credits, refund_amount: U256) { - credits.estimate = credits.estimate + refund_amount; + /// Refund some credits which were previously deducted. + /// Does not update the recharge timestamp. + pub fn refund(&self, credits: &mut Credits, refund_amount: U256) { + credits.estimate = credits.estimate + refund_amount; - if credits.estimate > self.limit { - credits.estimate = self.limit - } - } + if credits.estimate > self.limit { + credits.estimate = self.limit + } + } } impl Default for FlowParams { - fn default() -> Self { - FlowParams { - limit: 50_000_000.into(), - costs: CostTable::default(), - recharge: 100_000.into(), - } - } + fn default() -> Self { + FlowParams { + limit: 50_000_000.into(), + costs: CostTable::default(), + recharge: 100_000.into(), + } + } } #[cfg(test)] mod tests { - use super::*; + use super::*; - #[test] - fn should_serialize_cost_table() { - let costs = CostTable::default(); - let serialized = ::rlp::encode(&costs); + #[test] + fn should_serialize_cost_table() { + let costs = CostTable::default(); + let serialized = ::rlp::encode(&costs); - let new_costs: CostTable = ::rlp::decode(&*serialized).unwrap(); + let new_costs: CostTable = ::rlp::decode(&*serialized).unwrap(); - assert_eq!(costs, new_costs); - } + assert_eq!(costs, new_costs); + } - #[test] - fn credits_mechanism() { - use std::thread; - use std::time::Duration; + #[test] + fn credits_mechanism() { + use std::{thread, time::Duration}; - let flow_params = FlowParams::new(100.into(), Default::default(), 20.into()); - let mut credits = flow_params.create_credits(); + let flow_params = FlowParams::new(100.into(), Default::default(), 20.into()); + let mut credits = flow_params.create_credits(); - assert!(credits.deduct_cost(101.into()).is_err()); - assert!(credits.deduct_cost(10.into()).is_ok()); + assert!(credits.deduct_cost(101.into()).is_err()); + assert!(credits.deduct_cost(10.into()).is_ok()); - thread::sleep(Duration::from_secs(1)); + thread::sleep(Duration::from_secs(1)); - flow_params.recharge(&mut credits); + flow_params.recharge(&mut credits); - assert_eq!(credits.estimate, 100.into()); - } + assert_eq!(credits.estimate, 100.into()); + } - #[test] - fn scale_by_load_share_and_time() { - let flow_params = FlowParams::from_request_times( - |_| Duration::new(0, 10_000), - 0.05, - Duration::from_secs(60), - ); + #[test] + fn scale_by_load_share_and_time() { + let flow_params = FlowParams::from_request_times( + |_| Duration::new(0, 10_000), + 0.05, + Duration::from_secs(60), + ); - let flow_params2 = FlowParams::from_request_times( - |_| Duration::new(0, 10_000), - 0.1, - Duration::from_secs(60), - ); + let flow_params2 = FlowParams::from_request_times( + |_| Duration::new(0, 10_000), + 0.1, + Duration::from_secs(60), + ); - let flow_params3 = FlowParams::from_request_times( - |_| Duration::new(0, 5_000), - 0.05, - Duration::from_secs(60), - ); + let flow_params3 = FlowParams::from_request_times( + |_| Duration::new(0, 5_000), + 0.05, + Duration::from_secs(60), + ); - assert_eq!(flow_params2.costs, flow_params3.costs); - assert_eq!(flow_params.costs.headers.unwrap(), flow_params2.costs.headers.unwrap() * 2u32); - } + assert_eq!(flow_params2.costs, flow_params3.costs); + assert_eq!( + flow_params.costs.headers.unwrap(), + flow_params2.costs.headers.unwrap() * 2u32 + ); + } } diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index f3ec63547..f12a4d878 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -21,14 +21,15 @@ //! //! Whenever a request becomes the earliest, its timeout period begins at that moment. -use std::collections::{BTreeMap, HashMap}; -use std::iter::FromIterator; -use std::time::{Duration, Instant}; +use std::{ + collections::{BTreeMap, HashMap}, + iter::FromIterator, + time::{Duration, Instant}, +}; -use request::Request; -use request::NetworkRequests as Requests; -use net::{timeout, ReqId}; use ethereum_types::U256; +use net::{timeout, ReqId}; +use request::{NetworkRequests as Requests, Request}; // Request set entry: requests + cost. #[derive(Debug)] @@ -37,154 +38,174 @@ struct Entry(Requests, U256); /// Request set. #[derive(Debug)] pub struct RequestSet { - counter: u64, - cumulative_cost: U256, - base: Option, - ids: HashMap, - reqs: BTreeMap, + counter: u64, + cumulative_cost: U256, + base: Option, + ids: HashMap, + reqs: BTreeMap, } impl Default for RequestSet { - fn default() -> Self { - RequestSet { - counter: 0, - cumulative_cost: 0.into(), - base: None, - ids: HashMap::new(), - reqs: BTreeMap::new(), - } - } + fn default() -> Self { + RequestSet { + counter: 0, + cumulative_cost: 0.into(), + base: None, + ids: HashMap::new(), + reqs: BTreeMap::new(), + } + } } impl RequestSet { - /// Push requests onto the stack. - pub fn insert(&mut self, req_id: ReqId, req: Requests, cost: U256, now: Instant) { - let counter = self.counter; - self.cumulative_cost = self.cumulative_cost + cost; + /// Push requests onto the stack. + pub fn insert(&mut self, req_id: ReqId, req: Requests, cost: U256, now: Instant) { + let counter = self.counter; + self.cumulative_cost = self.cumulative_cost + cost; - self.ids.insert(req_id, counter); - self.reqs.insert(counter, Entry(req, cost)); + self.ids.insert(req_id, counter); + self.reqs.insert(counter, Entry(req, cost)); - if self.reqs.keys().next().map_or(true, |x| *x == counter) { - self.base = Some(now); - } + if self.reqs.keys().next().map_or(true, |x| *x == counter) { + self.base = Some(now); + } - self.counter += 1; - } + self.counter += 1; + } - /// Remove a set of requests from the stack. - pub fn remove(&mut self, req_id: ReqId, now: Instant) -> Option { - let id = match self.ids.remove(&req_id) { - Some(id) => id, - None => return None, - }; + /// Remove a set of requests from the stack. + pub fn remove(&mut self, req_id: ReqId, now: Instant) -> Option { + let id = match self.ids.remove(&req_id) { + Some(id) => id, + None => return None, + }; - let Entry(req, cost) = self.reqs.remove(&id).expect("entry in `ids` implies entry in `reqs`; qed"); + let Entry(req, cost) = self + .reqs + .remove(&id) + .expect("entry in `ids` implies entry in `reqs`; qed"); - match self.reqs.keys().next() { - Some(k) if *k > id => self.base = Some(now), - None => self.base = None, - _ => {} - } + match self.reqs.keys().next() { + Some(k) if *k > id => self.base = Some(now), + None => self.base = None, + _ => {} + } - self.cumulative_cost = self.cumulative_cost - cost; - Some(req) - } + self.cumulative_cost = self.cumulative_cost - cost; + Some(req) + } - /// Check for timeout against the given time. Returns true if - /// has timed out, false otherwise. - pub fn check_timeout(&self, now: Instant) -> bool { - let base = match self.base.as_ref().cloned() { - Some(base) => base, - None => return false, - }; + /// Check for timeout against the given time. Returns true if + /// has timed out, false otherwise. + pub fn check_timeout(&self, now: Instant) -> bool { + let base = match self.base.as_ref().cloned() { + Some(base) => base, + None => return false, + }; - let first_req = self.reqs.values().next() - .expect("base existing implies `reqs` non-empty; qed"); + let first_req = self + .reqs + .values() + .next() + .expect("base existing implies `reqs` non-empty; qed"); - base + compute_timeout(&first_req.0) <= now - } + base + compute_timeout(&first_req.0) <= now + } - /// Collect all pending request ids. - pub fn collect_ids(&self) -> F where F: FromIterator { - self.ids.keys().cloned().collect() - } + /// Collect all pending request ids. + pub fn collect_ids(&self) -> F + where + F: FromIterator, + { + self.ids.keys().cloned().collect() + } - /// Number of requests in the set. - pub fn len(&self) -> usize { - self.ids.len() - } + /// Number of requests in the set. + pub fn len(&self) -> usize { + self.ids.len() + } - /// Whether the set is empty. - pub fn is_empty(&self) -> bool { self.len() == 0 } + /// Whether the set is empty. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } - /// The cumulative cost of all requests in the set. - // this may be useful later for load balancing. - #[allow(dead_code)] - pub fn cumulative_cost(&self) -> U256 { self.cumulative_cost } + /// The cumulative cost of all requests in the set. + // this may be useful later for load balancing. + #[allow(dead_code)] + pub fn cumulative_cost(&self) -> U256 { + self.cumulative_cost + } } // helper to calculate timeout for a specific set of requests. // it's a base amount + some amount per request. fn compute_timeout(reqs: &Requests) -> Duration { - Duration::from_millis(reqs.requests().iter().fold(timeout::BASE, |tm, req| { - tm + match *req { - Request::Headers(_) => timeout::HEADERS, - Request::HeaderProof(_) => timeout::HEADER_PROOF, - Request::TransactionIndex(_) => timeout::TRANSACTION_INDEX, - Request::Receipts(_) => timeout::RECEIPT, - Request::Body(_) => timeout::BODY, - Request::Account(_) => timeout::PROOF, - Request::Storage(_) => timeout::PROOF, - Request::Code(_) => timeout::CONTRACT_CODE, - Request::Execution(_) => timeout::TRANSACTION_PROOF, - Request::Signal(_) => timeout::EPOCH_SIGNAL, - } - })) + Duration::from_millis(reqs.requests().iter().fold(timeout::BASE, |tm, req| { + tm + match *req { + Request::Headers(_) => timeout::HEADERS, + Request::HeaderProof(_) => timeout::HEADER_PROOF, + Request::TransactionIndex(_) => timeout::TRANSACTION_INDEX, + Request::Receipts(_) => timeout::RECEIPT, + Request::Body(_) => timeout::BODY, + Request::Account(_) => timeout::PROOF, + Request::Storage(_) => timeout::PROOF, + Request::Code(_) => timeout::CONTRACT_CODE, + Request::Execution(_) => timeout::TRANSACTION_PROOF, + Request::Signal(_) => timeout::EPOCH_SIGNAL, + } + })) } #[cfg(test)] mod tests { - use net::ReqId; - use request::Builder; - use std::time::{Instant, Duration}; - use super::{RequestSet, compute_timeout}; + use super::{compute_timeout, RequestSet}; + use net::ReqId; + use request::Builder; + use std::time::{Duration, Instant}; - #[test] - fn multi_timeout() { - let test_begin = Instant::now(); - let mut req_set = RequestSet::default(); + #[test] + fn multi_timeout() { + let test_begin = Instant::now(); + let mut req_set = RequestSet::default(); - let the_req = Builder::default().build(); - let req_time = compute_timeout(&the_req); - req_set.insert(ReqId(0), the_req.clone(), 0.into(), test_begin); - req_set.insert(ReqId(1), the_req, 0.into(), test_begin + Duration::from_secs(1)); + let the_req = Builder::default().build(); + let req_time = compute_timeout(&the_req); + req_set.insert(ReqId(0), the_req.clone(), 0.into(), test_begin); + req_set.insert( + ReqId(1), + the_req, + 0.into(), + test_begin + Duration::from_secs(1), + ); - assert_eq!(req_set.base, Some(test_begin)); + assert_eq!(req_set.base, Some(test_begin)); - let test_end = test_begin + req_time; - assert!(req_set.check_timeout(test_end)); + let test_end = test_begin + req_time; + assert!(req_set.check_timeout(test_end)); - req_set.remove(ReqId(0), test_begin + Duration::from_secs(1)).unwrap(); - assert!(!req_set.check_timeout(test_end)); - assert!(req_set.check_timeout(test_end + Duration::from_secs(1))); - } + req_set + .remove(ReqId(0), test_begin + Duration::from_secs(1)) + .unwrap(); + assert!(!req_set.check_timeout(test_end)); + assert!(req_set.check_timeout(test_end + Duration::from_secs(1))); + } - #[test] - fn cumulative_cost() { - let the_req = Builder::default().build(); - let test_begin = Instant::now(); - let test_end = test_begin + Duration::from_secs(1); - let mut req_set = RequestSet::default(); + #[test] + fn cumulative_cost() { + let the_req = Builder::default().build(); + let test_begin = Instant::now(); + let test_end = test_begin + Duration::from_secs(1); + let mut req_set = RequestSet::default(); - for i in 0..5 { - req_set.insert(ReqId(i), the_req.clone(), 1.into(), test_begin); - assert_eq!(req_set.cumulative_cost, (i + 1).into()); - } + for i in 0..5 { + req_set.insert(ReqId(i), the_req.clone(), 1.into(), test_begin); + assert_eq!(req_set.cumulative_cost, (i + 1).into()); + } - for i in (0..5).rev() { - assert!(req_set.remove(ReqId(i), test_end).is_some()); - assert_eq!(req_set.cumulative_cost, i.into()); - } - } + for i in (0..5).rev() { + assert!(req_set.remove(ReqId(i), test_end).is_some()); + assert_eq!(req_set.cumulative_cost, i.into()); + } + } } diff --git a/ethcore/light/src/net/status.rs b/ethcore/light/src/net/status.rs index ecbe1d3cc..4f79938f7 100644 --- a/ethcore/light/src/net/status.rs +++ b/ethcore/light/src/net/status.rs @@ -17,7 +17,7 @@ //! Peer status and capabilities. use ethereum_types::{H256, U256}; -use rlp::{DecoderError, Encodable, Decodable, RlpStream, Rlp}; +use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; use super::request_credits::FlowParams; @@ -26,550 +26,544 @@ use super::request_credits::FlowParams; // their string values are defined in the LES spec. #[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)] enum Key { - ProtocolVersion, - NetworkId, - HeadTD, - HeadHash, - HeadNum, - GenesisHash, - ServeHeaders, - ServeChainSince, - ServeStateSince, - TxRelay, - BufferLimit, - BufferCostTable, - BufferRechargeRate, + ProtocolVersion, + NetworkId, + HeadTD, + HeadHash, + HeadNum, + GenesisHash, + ServeHeaders, + ServeChainSince, + ServeStateSince, + TxRelay, + BufferLimit, + BufferCostTable, + BufferRechargeRate, } impl Key { - // get the string value of this key. - fn as_str(self) -> &'static str { - match self { - Key::ProtocolVersion => "protocolVersion", - Key::NetworkId => "networkId", - Key::HeadTD => "headTd", - Key::HeadHash => "headHash", - Key::HeadNum => "headNum", - Key::GenesisHash => "genesisHash", - Key::ServeHeaders => "serveHeaders", - Key::ServeChainSince => "serveChainSince", - Key::ServeStateSince => "serveStateSince", - Key::TxRelay => "txRelay", - Key::BufferLimit => "flowControl/BL", - Key::BufferCostTable => "flowControl/MRC", - Key::BufferRechargeRate => "flowControl/MRR", - } - } + // get the string value of this key. + fn as_str(self) -> &'static str { + match self { + Key::ProtocolVersion => "protocolVersion", + Key::NetworkId => "networkId", + Key::HeadTD => "headTd", + Key::HeadHash => "headHash", + Key::HeadNum => "headNum", + Key::GenesisHash => "genesisHash", + Key::ServeHeaders => "serveHeaders", + Key::ServeChainSince => "serveChainSince", + Key::ServeStateSince => "serveStateSince", + Key::TxRelay => "txRelay", + Key::BufferLimit => "flowControl/BL", + Key::BufferCostTable => "flowControl/MRC", + Key::BufferRechargeRate => "flowControl/MRR", + } + } - // try to parse the key value from a string. - fn from_str(s: &str) -> Option { - match s { - "protocolVersion" => Some(Key::ProtocolVersion), - "networkId" => Some(Key::NetworkId), - "headTd" => Some(Key::HeadTD), - "headHash" => Some(Key::HeadHash), - "headNum" => Some(Key::HeadNum), - "genesisHash" => Some(Key::GenesisHash), - "serveHeaders" => Some(Key::ServeHeaders), - "serveChainSince" => Some(Key::ServeChainSince), - "serveStateSince" => Some(Key::ServeStateSince), - "txRelay" => Some(Key::TxRelay), - "flowControl/BL" => Some(Key::BufferLimit), - "flowControl/MRC" => Some(Key::BufferCostTable), - "flowControl/MRR" => Some(Key::BufferRechargeRate), - _ => None - } - } + // try to parse the key value from a string. + fn from_str(s: &str) -> Option { + match s { + "protocolVersion" => Some(Key::ProtocolVersion), + "networkId" => Some(Key::NetworkId), + "headTd" => Some(Key::HeadTD), + "headHash" => Some(Key::HeadHash), + "headNum" => Some(Key::HeadNum), + "genesisHash" => Some(Key::GenesisHash), + "serveHeaders" => Some(Key::ServeHeaders), + "serveChainSince" => Some(Key::ServeChainSince), + "serveStateSince" => Some(Key::ServeStateSince), + "txRelay" => Some(Key::TxRelay), + "flowControl/BL" => Some(Key::BufferLimit), + "flowControl/MRC" => Some(Key::BufferCostTable), + "flowControl/MRR" => Some(Key::BufferRechargeRate), + _ => None, + } + } } // helper for decoding key-value pairs in the handshake or an announcement. struct Parser<'a> { - pos: usize, - rlp: &'a Rlp<'a>, + pos: usize, + rlp: &'a Rlp<'a>, } impl<'a> Parser<'a> { - // expect a specific next key, and decode the value. - // error on unexpected key or invalid value. - fn expect(&mut self, key: Key) -> Result { - self.expect_raw(key).and_then(|item| item.as_val()) - } + // expect a specific next key, and decode the value. + // error on unexpected key or invalid value. + fn expect(&mut self, key: Key) -> Result { + self.expect_raw(key).and_then(|item| item.as_val()) + } - // expect a specific next key, and get the value's RLP. - // if the key isn't found, the position isn't advanced. - fn expect_raw(&mut self, key: Key) -> Result, DecoderError> { - trace!(target: "les", "Expecting key {}", key.as_str()); - let pre_pos = self.pos; - if let Some((k, val)) = self.get_next()? { - if k == key { return Ok(val) } - } + // expect a specific next key, and get the value's RLP. + // if the key isn't found, the position isn't advanced. + fn expect_raw(&mut self, key: Key) -> Result, DecoderError> { + trace!(target: "les", "Expecting key {}", key.as_str()); + let pre_pos = self.pos; + if let Some((k, val)) = self.get_next()? { + if k == key { + return Ok(val); + } + } - self.pos = pre_pos; - Err(DecoderError::Custom("Missing expected key")) - } + self.pos = pre_pos; + Err(DecoderError::Custom("Missing expected key")) + } - // get the next key and value RLP. - fn get_next(&mut self) -> Result)>, DecoderError> { - while self.pos < self.rlp.item_count()? { - let pair = self.rlp.at(self.pos)?; - let k: String = pair.val_at(0)?; + // get the next key and value RLP. + fn get_next(&mut self) -> Result)>, DecoderError> { + while self.pos < self.rlp.item_count()? { + let pair = self.rlp.at(self.pos)?; + let k: String = pair.val_at(0)?; - self.pos += 1; - match Key::from_str(&k) { - Some(key) => return Ok(Some((key , pair.at(1)?))), - None => continue, - } - } + self.pos += 1; + match Key::from_str(&k) { + Some(key) => return Ok(Some((key, pair.at(1)?))), + None => continue, + } + } - Ok(None) - } + Ok(None) + } } // Helper for encoding a key-value pair fn encode_pair(key: Key, val: &T) -> Vec { - let mut s = RlpStream::new_list(2); - s.append(&key.as_str()).append(val); - s.out() + let mut s = RlpStream::new_list(2); + s.append(&key.as_str()).append(val); + s.out() } // Helper for encoding a flag. fn encode_flag(key: Key) -> Vec { - let mut s = RlpStream::new_list(2); - s.append(&key.as_str()).append_empty_data(); - s.out() + let mut s = RlpStream::new_list(2); + s.append(&key.as_str()).append_empty_data(); + s.out() } /// A peer status message. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Status { - /// Protocol version. - pub protocol_version: u32, - /// Network id of this peer. - pub network_id: u64, - /// Total difficulty of the head of the chain. - pub head_td: U256, - /// Hash of the best block. - pub head_hash: H256, - /// Number of the best block. - pub head_num: u64, - /// Genesis hash - pub genesis_hash: H256, - /// Last announced chain head and reorg depth to common ancestor. - pub last_head: Option<(H256, u64)>, + /// Protocol version. + pub protocol_version: u32, + /// Network id of this peer. + pub network_id: u64, + /// Total difficulty of the head of the chain. + pub head_td: U256, + /// Hash of the best block. + pub head_hash: H256, + /// Number of the best block. + pub head_num: u64, + /// Genesis hash + pub genesis_hash: H256, + /// Last announced chain head and reorg depth to common ancestor. + pub last_head: Option<(H256, u64)>, } impl Status { - /// Update the status from an announcement. - pub fn update_from(&mut self, announcement: &Announcement) { - self.last_head = Some((self.head_hash, announcement.reorg_depth)); - self.head_td = announcement.head_td; - self.head_hash = announcement.head_hash; - self.head_num = announcement.head_num; - } + /// Update the status from an announcement. + pub fn update_from(&mut self, announcement: &Announcement) { + self.last_head = Some((self.head_hash, announcement.reorg_depth)); + self.head_td = announcement.head_td; + self.head_hash = announcement.head_hash; + self.head_num = announcement.head_num; + } } /// Peer capabilities. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct Capabilities { - /// Whether this peer can serve headers - pub serve_headers: bool, - /// Earliest block number it can serve block/receipt requests for. - /// `None` means no requests will be servable. - pub serve_chain_since: Option, - /// Earliest block number it can serve state requests for. - /// `None` means no requests will be servable. - pub serve_state_since: Option, - /// Whether it can relay transactions to the eth network. - pub tx_relay: bool, + /// Whether this peer can serve headers + pub serve_headers: bool, + /// Earliest block number it can serve block/receipt requests for. + /// `None` means no requests will be servable. + pub serve_chain_since: Option, + /// Earliest block number it can serve state requests for. + /// `None` means no requests will be servable. + pub serve_state_since: Option, + /// Whether it can relay transactions to the eth network. + pub tx_relay: bool, } impl Default for Capabilities { - fn default() -> Self { - Capabilities { - serve_headers: true, - serve_chain_since: None, - serve_state_since: None, - tx_relay: false, - } - } + fn default() -> Self { + Capabilities { + serve_headers: true, + serve_chain_since: None, + serve_state_since: None, + tx_relay: false, + } + } } impl Capabilities { - /// Update the capabilities from an announcement. - pub fn update_from(&mut self, announcement: &Announcement) { - self.serve_headers = self.serve_headers || announcement.serve_headers; - self.serve_state_since = self.serve_state_since.or(announcement.serve_state_since); - self.serve_chain_since = self.serve_chain_since.or(announcement.serve_chain_since); - self.tx_relay = self.tx_relay || announcement.tx_relay; - } + /// Update the capabilities from an announcement. + pub fn update_from(&mut self, announcement: &Announcement) { + self.serve_headers = self.serve_headers || announcement.serve_headers; + self.serve_state_since = self.serve_state_since.or(announcement.serve_state_since); + self.serve_chain_since = self.serve_chain_since.or(announcement.serve_chain_since); + self.tx_relay = self.tx_relay || announcement.tx_relay; + } } /// Attempt to parse a handshake message into its three parts: /// - chain status /// - serving capabilities /// - request credit parameters -pub fn parse_handshake(rlp: &Rlp) -> Result<(Status, Capabilities, Option), DecoderError> { - let mut parser = Parser { - pos: 0, - rlp, - }; +pub fn parse_handshake( + rlp: &Rlp, +) -> Result<(Status, Capabilities, Option), DecoderError> { + let mut parser = Parser { pos: 0, rlp }; - let status = Status { - protocol_version: parser.expect(Key::ProtocolVersion)?, - network_id: parser.expect(Key::NetworkId)?, - head_td: parser.expect(Key::HeadTD)?, - head_hash: parser.expect(Key::HeadHash)?, - head_num: parser.expect(Key::HeadNum)?, - genesis_hash: parser.expect(Key::GenesisHash)?, - last_head: None, - }; + let status = Status { + protocol_version: parser.expect(Key::ProtocolVersion)?, + network_id: parser.expect(Key::NetworkId)?, + head_td: parser.expect(Key::HeadTD)?, + head_hash: parser.expect(Key::HeadHash)?, + head_num: parser.expect(Key::HeadNum)?, + genesis_hash: parser.expect(Key::GenesisHash)?, + last_head: None, + }; - let capabilities = Capabilities { - serve_headers: parser.expect_raw(Key::ServeHeaders).is_ok(), - serve_chain_since: parser.expect(Key::ServeChainSince).ok(), - serve_state_since: parser.expect(Key::ServeStateSince).ok(), - tx_relay: parser.expect_raw(Key::TxRelay).is_ok(), - }; + let capabilities = Capabilities { + serve_headers: parser.expect_raw(Key::ServeHeaders).is_ok(), + serve_chain_since: parser.expect(Key::ServeChainSince).ok(), + serve_state_since: parser.expect(Key::ServeStateSince).ok(), + tx_relay: parser.expect_raw(Key::TxRelay).is_ok(), + }; - let flow_params = match ( - parser.expect(Key::BufferLimit), - parser.expect(Key::BufferCostTable), - parser.expect(Key::BufferRechargeRate) - ) { - (Ok(bl), Ok(bct), Ok(brr)) => Some(FlowParams::new(bl, bct, brr)), - _ => None, - }; + let flow_params = match ( + parser.expect(Key::BufferLimit), + parser.expect(Key::BufferCostTable), + parser.expect(Key::BufferRechargeRate), + ) { + (Ok(bl), Ok(bct), Ok(brr)) => Some(FlowParams::new(bl, bct, brr)), + _ => None, + }; - Ok((status, capabilities, flow_params)) + Ok((status, capabilities, flow_params)) } /// Write a handshake, given status, capabilities, and flow parameters. -pub fn write_handshake(status: &Status, capabilities: &Capabilities, flow_params: Option<&FlowParams>) -> Vec { - let mut pairs = Vec::new(); - pairs.push(encode_pair(Key::ProtocolVersion, &status.protocol_version)); - pairs.push(encode_pair(Key::NetworkId, &(status.network_id as u64))); - pairs.push(encode_pair(Key::HeadTD, &status.head_td)); - pairs.push(encode_pair(Key::HeadHash, &status.head_hash)); - pairs.push(encode_pair(Key::HeadNum, &status.head_num)); - pairs.push(encode_pair(Key::GenesisHash, &status.genesis_hash)); +pub fn write_handshake( + status: &Status, + capabilities: &Capabilities, + flow_params: Option<&FlowParams>, +) -> Vec { + let mut pairs = Vec::new(); + pairs.push(encode_pair(Key::ProtocolVersion, &status.protocol_version)); + pairs.push(encode_pair(Key::NetworkId, &(status.network_id as u64))); + pairs.push(encode_pair(Key::HeadTD, &status.head_td)); + pairs.push(encode_pair(Key::HeadHash, &status.head_hash)); + pairs.push(encode_pair(Key::HeadNum, &status.head_num)); + pairs.push(encode_pair(Key::GenesisHash, &status.genesis_hash)); - if capabilities.serve_headers { - pairs.push(encode_flag(Key::ServeHeaders)); - } - if let Some(ref serve_chain_since) = capabilities.serve_chain_since { - pairs.push(encode_pair(Key::ServeChainSince, serve_chain_since)); - } - if let Some(ref serve_state_since) = capabilities.serve_state_since { - pairs.push(encode_pair(Key::ServeStateSince, serve_state_since)); - } - if capabilities.tx_relay { - pairs.push(encode_flag(Key::TxRelay)); - } + if capabilities.serve_headers { + pairs.push(encode_flag(Key::ServeHeaders)); + } + if let Some(ref serve_chain_since) = capabilities.serve_chain_since { + pairs.push(encode_pair(Key::ServeChainSince, serve_chain_since)); + } + if let Some(ref serve_state_since) = capabilities.serve_state_since { + pairs.push(encode_pair(Key::ServeStateSince, serve_state_since)); + } + if capabilities.tx_relay { + pairs.push(encode_flag(Key::TxRelay)); + } - if let Some(flow_params) = flow_params { - pairs.push(encode_pair(Key::BufferLimit, flow_params.limit())); - pairs.push(encode_pair(Key::BufferCostTable, flow_params.cost_table())); - pairs.push(encode_pair(Key::BufferRechargeRate, flow_params.recharge_rate())); - } + if let Some(flow_params) = flow_params { + pairs.push(encode_pair(Key::BufferLimit, flow_params.limit())); + pairs.push(encode_pair(Key::BufferCostTable, flow_params.cost_table())); + pairs.push(encode_pair( + Key::BufferRechargeRate, + flow_params.recharge_rate(), + )); + } - let mut stream = RlpStream::new_list(pairs.len()); + let mut stream = RlpStream::new_list(pairs.len()); - for pair in pairs { - stream.append_raw(&pair, 1); - } + for pair in pairs { + stream.append_raw(&pair, 1); + } - stream.out() + stream.out() } /// An announcement of new chain head or capabilities made by a peer. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Announcement { - /// Hash of the best block. - pub head_hash: H256, - /// Number of the best block. - pub head_num: u64, - /// Head total difficulty - pub head_td: U256, - /// reorg depth to common ancestor of last announced head. - pub reorg_depth: u64, - /// optional new header-serving capability. false means "no change" - pub serve_headers: bool, - /// optional new state-serving capability - pub serve_state_since: Option, - /// optional new chain-serving capability - pub serve_chain_since: Option, - /// optional new transaction-relay capability. false means "no change" - pub tx_relay: bool, - // TODO: changes in request credits. + /// Hash of the best block. + pub head_hash: H256, + /// Number of the best block. + pub head_num: u64, + /// Head total difficulty + pub head_td: U256, + /// reorg depth to common ancestor of last announced head. + pub reorg_depth: u64, + /// optional new header-serving capability. false means "no change" + pub serve_headers: bool, + /// optional new state-serving capability + pub serve_state_since: Option, + /// optional new chain-serving capability + pub serve_chain_since: Option, + /// optional new transaction-relay capability. false means "no change" + pub tx_relay: bool, + // TODO: changes in request credits. } /// Parse an announcement. pub fn parse_announcement(rlp: &Rlp) -> Result { - let mut last_key = None; + let mut last_key = None; - let mut announcement = Announcement { - head_hash: rlp.val_at(0)?, - head_num: rlp.val_at(1)?, - head_td: rlp.val_at(2)?, - reorg_depth: rlp.val_at(3)?, - serve_headers: false, - serve_state_since: None, - serve_chain_since: None, - tx_relay: false, - }; + let mut announcement = Announcement { + head_hash: rlp.val_at(0)?, + head_num: rlp.val_at(1)?, + head_td: rlp.val_at(2)?, + reorg_depth: rlp.val_at(3)?, + serve_headers: false, + serve_state_since: None, + serve_chain_since: None, + tx_relay: false, + }; - let mut parser = Parser { - pos: 4, - rlp, - }; + let mut parser = Parser { pos: 4, rlp }; - while let Some((key, item)) = parser.get_next()? { - if Some(key) <= last_key { return Err(DecoderError::Custom("Invalid announcement key ordering")) } - last_key = Some(key); + while let Some((key, item)) = parser.get_next()? { + if Some(key) <= last_key { + return Err(DecoderError::Custom("Invalid announcement key ordering")); + } + last_key = Some(key); - match key { - Key::ServeHeaders => announcement.serve_headers = true, - Key::ServeStateSince => announcement.serve_state_since = Some(item.as_val()?), - Key::ServeChainSince => announcement.serve_chain_since = Some(item.as_val()?), - Key::TxRelay => announcement.tx_relay = true, - _ => return Err(DecoderError::Custom("Nonsensical key in announcement")), - } - } + match key { + Key::ServeHeaders => announcement.serve_headers = true, + Key::ServeStateSince => announcement.serve_state_since = Some(item.as_val()?), + Key::ServeChainSince => announcement.serve_chain_since = Some(item.as_val()?), + Key::TxRelay => announcement.tx_relay = true, + _ => return Err(DecoderError::Custom("Nonsensical key in announcement")), + } + } - Ok(announcement) + Ok(announcement) } /// Write an announcement out. pub fn write_announcement(announcement: &Announcement) -> Vec { - let mut pairs = Vec::new(); - if announcement.serve_headers { - pairs.push(encode_flag(Key::ServeHeaders)); - } - if let Some(ref serve_chain_since) = announcement.serve_chain_since { - pairs.push(encode_pair(Key::ServeChainSince, serve_chain_since)); - } - if let Some(ref serve_state_since) = announcement.serve_state_since { - pairs.push(encode_pair(Key::ServeStateSince, serve_state_since)); - } - if announcement.tx_relay { - pairs.push(encode_flag(Key::TxRelay)); - } + let mut pairs = Vec::new(); + if announcement.serve_headers { + pairs.push(encode_flag(Key::ServeHeaders)); + } + if let Some(ref serve_chain_since) = announcement.serve_chain_since { + pairs.push(encode_pair(Key::ServeChainSince, serve_chain_since)); + } + if let Some(ref serve_state_since) = announcement.serve_state_since { + pairs.push(encode_pair(Key::ServeStateSince, serve_state_since)); + } + if announcement.tx_relay { + pairs.push(encode_flag(Key::TxRelay)); + } - let mut stream = RlpStream::new_list(4 + pairs.len()); - stream - .append(&announcement.head_hash) - .append(&announcement.head_num) - .append(&announcement.head_td) - .append(&announcement.reorg_depth); + let mut stream = RlpStream::new_list(4 + pairs.len()); + stream + .append(&announcement.head_hash) + .append(&announcement.head_num) + .append(&announcement.head_td) + .append(&announcement.reorg_depth); - for item in pairs { - stream.append_raw(&item, 1); - } + for item in pairs { + stream.append_raw(&item, 1); + } - stream.out() + stream.out() } #[cfg(test)] mod tests { - use super::*; - use super::super::request_credits::FlowParams; - use ethereum_types::{U256, H256}; - use rlp::{RlpStream, Rlp}; + use super::{super::request_credits::FlowParams, *}; + use ethereum_types::{H256, U256}; + use rlp::{Rlp, RlpStream}; - #[test] - fn full_handshake() { - let status = Status { - protocol_version: 1, - network_id: 1, - head_td: U256::default(), - head_hash: H256::default(), - head_num: 10, - genesis_hash: H256::zero(), - last_head: None, - }; + #[test] + fn full_handshake() { + let status = Status { + protocol_version: 1, + network_id: 1, + head_td: U256::default(), + head_hash: H256::default(), + head_num: 10, + genesis_hash: H256::zero(), + last_head: None, + }; - let capabilities = Capabilities { - serve_headers: true, - serve_chain_since: Some(5), - serve_state_since: Some(8), - tx_relay: true, - }; + let capabilities = Capabilities { + serve_headers: true, + serve_chain_since: Some(5), + serve_state_since: Some(8), + tx_relay: true, + }; - let flow_params = FlowParams::new( - 1_000_000.into(), - Default::default(), - 1000.into(), - ); + let flow_params = FlowParams::new(1_000_000.into(), Default::default(), 1000.into()); - let handshake = write_handshake(&status, &capabilities, Some(&flow_params)); + let handshake = write_handshake(&status, &capabilities, Some(&flow_params)); - let (read_status, read_capabilities, read_flow) - = parse_handshake(&Rlp::new(&handshake)).unwrap(); + let (read_status, read_capabilities, read_flow) = + parse_handshake(&Rlp::new(&handshake)).unwrap(); - assert_eq!(read_status, status); - assert_eq!(read_capabilities, capabilities); - assert_eq!(read_flow.unwrap(), flow_params); - } + assert_eq!(read_status, status); + assert_eq!(read_capabilities, capabilities); + assert_eq!(read_flow.unwrap(), flow_params); + } - #[test] - fn partial_handshake() { - let status = Status { - protocol_version: 1, - network_id: 1, - head_td: U256::default(), - head_hash: H256::default(), - head_num: 10, - genesis_hash: H256::zero(), - last_head: None, - }; + #[test] + fn partial_handshake() { + let status = Status { + protocol_version: 1, + network_id: 1, + head_td: U256::default(), + head_hash: H256::default(), + head_num: 10, + genesis_hash: H256::zero(), + last_head: None, + }; - let capabilities = Capabilities { - serve_headers: false, - serve_chain_since: Some(5), - serve_state_since: None, - tx_relay: true, - }; + let capabilities = Capabilities { + serve_headers: false, + serve_chain_since: Some(5), + serve_state_since: None, + tx_relay: true, + }; - let flow_params = FlowParams::new( - 1_000_000.into(), - Default::default(), - 1000.into(), - ); + let flow_params = FlowParams::new(1_000_000.into(), Default::default(), 1000.into()); - let handshake = write_handshake(&status, &capabilities, Some(&flow_params)); + let handshake = write_handshake(&status, &capabilities, Some(&flow_params)); - let (read_status, read_capabilities, read_flow) - = parse_handshake(&Rlp::new(&handshake)).unwrap(); + let (read_status, read_capabilities, read_flow) = + parse_handshake(&Rlp::new(&handshake)).unwrap(); - assert_eq!(read_status, status); - assert_eq!(read_capabilities, capabilities); - assert_eq!(read_flow.unwrap(), flow_params); - } + assert_eq!(read_status, status); + assert_eq!(read_capabilities, capabilities); + assert_eq!(read_flow.unwrap(), flow_params); + } - #[test] - fn skip_unknown_keys() { - let status = Status { - protocol_version: 1, - network_id: 1, - head_td: U256::default(), - head_hash: H256::default(), - head_num: 10, - genesis_hash: H256::zero(), - last_head: None, - }; + #[test] + fn skip_unknown_keys() { + let status = Status { + protocol_version: 1, + network_id: 1, + head_td: U256::default(), + head_hash: H256::default(), + head_num: 10, + genesis_hash: H256::zero(), + last_head: None, + }; - let capabilities = Capabilities { - serve_headers: false, - serve_chain_since: Some(5), - serve_state_since: None, - tx_relay: true, - }; + let capabilities = Capabilities { + serve_headers: false, + serve_chain_since: Some(5), + serve_state_since: None, + tx_relay: true, + }; - let flow_params = FlowParams::new( - 1_000_000.into(), - Default::default(), - 1000.into(), - ); + let flow_params = FlowParams::new(1_000_000.into(), Default::default(), 1000.into()); - let handshake = write_handshake(&status, &capabilities, Some(&flow_params)); - let interleaved = { - let handshake = Rlp::new(&handshake); - let mut stream = RlpStream::new_list(handshake.item_count().unwrap_or(0) * 3); + let handshake = write_handshake(&status, &capabilities, Some(&flow_params)); + let interleaved = { + let handshake = Rlp::new(&handshake); + let mut stream = RlpStream::new_list(handshake.item_count().unwrap_or(0) * 3); - for item in handshake.iter() { - stream.append_raw(item.as_raw(), 1); - let (mut s1, mut s2) = (RlpStream::new_list(2), RlpStream::new_list(2)); - s1.append(&"foo").append_empty_data(); - s2.append(&"bar").append_empty_data(); - stream.append_raw(&s1.out(), 1); - stream.append_raw(&s2.out(), 1); - } + for item in handshake.iter() { + stream.append_raw(item.as_raw(), 1); + let (mut s1, mut s2) = (RlpStream::new_list(2), RlpStream::new_list(2)); + s1.append(&"foo").append_empty_data(); + s2.append(&"bar").append_empty_data(); + stream.append_raw(&s1.out(), 1); + stream.append_raw(&s2.out(), 1); + } - stream.out() - }; + stream.out() + }; - let (read_status, read_capabilities, read_flow) - = parse_handshake(&Rlp::new(&interleaved)).unwrap(); + let (read_status, read_capabilities, read_flow) = + parse_handshake(&Rlp::new(&interleaved)).unwrap(); - assert_eq!(read_status, status); - assert_eq!(read_capabilities, capabilities); - assert_eq!(read_flow.unwrap(), flow_params); - } + assert_eq!(read_status, status); + assert_eq!(read_capabilities, capabilities); + assert_eq!(read_flow.unwrap(), flow_params); + } - #[test] - fn announcement_roundtrip() { - let announcement = Announcement { - head_hash: H256::random(), - head_num: 100_000, - head_td: 1_000_000.into(), - reorg_depth: 4, - serve_headers: false, - serve_state_since: Some(99_000), - serve_chain_since: Some(1), - tx_relay: true, - }; + #[test] + fn announcement_roundtrip() { + let announcement = Announcement { + head_hash: H256::random(), + head_num: 100_000, + head_td: 1_000_000.into(), + reorg_depth: 4, + serve_headers: false, + serve_state_since: Some(99_000), + serve_chain_since: Some(1), + tx_relay: true, + }; - let serialized = write_announcement(&announcement); - let read = parse_announcement(&Rlp::new(&serialized)).unwrap(); + let serialized = write_announcement(&announcement); + let read = parse_announcement(&Rlp::new(&serialized)).unwrap(); - assert_eq!(read, announcement); - } + assert_eq!(read, announcement); + } - #[test] - fn keys_out_of_order() { - use super::{Key, encode_pair, encode_flag}; + #[test] + fn keys_out_of_order() { + use super::{encode_flag, encode_pair, Key}; - let mut stream = RlpStream::new_list(6); - stream - .append(&H256::zero()) - .append(&10_u64) - .append(&100_000_u64) - .append(&2_u64) - .append_raw(&encode_pair(Key::ServeStateSince, &44_u64), 1) - .append_raw(&encode_flag(Key::ServeHeaders), 1); + let mut stream = RlpStream::new_list(6); + stream + .append(&H256::zero()) + .append(&10_u64) + .append(&100_000_u64) + .append(&2_u64) + .append_raw(&encode_pair(Key::ServeStateSince, &44_u64), 1) + .append_raw(&encode_flag(Key::ServeHeaders), 1); - let out = stream.drain(); - assert!(parse_announcement(&Rlp::new(&out)).is_err()); + let out = stream.drain(); + assert!(parse_announcement(&Rlp::new(&out)).is_err()); - let mut stream = RlpStream::new_list(6); - stream - .append(&H256::zero()) - .append(&10_u64) - .append(&100_000_u64) - .append(&2_u64) - .append_raw(&encode_flag(Key::ServeHeaders), 1) - .append_raw(&encode_pair(Key::ServeStateSince, &44_u64), 1); + let mut stream = RlpStream::new_list(6); + stream + .append(&H256::zero()) + .append(&10_u64) + .append(&100_000_u64) + .append(&2_u64) + .append_raw(&encode_flag(Key::ServeHeaders), 1) + .append_raw(&encode_pair(Key::ServeStateSince, &44_u64), 1); - let out = stream.drain(); - assert!(parse_announcement(&Rlp::new(&out)).is_ok()); - } + let out = stream.drain(); + assert!(parse_announcement(&Rlp::new(&out)).is_ok()); + } - #[test] - fn optional_flow() { - let status = Status { - protocol_version: 1, - network_id: 1, - head_td: U256::default(), - head_hash: H256::default(), - head_num: 10, - genesis_hash: H256::zero(), - last_head: None, - }; + #[test] + fn optional_flow() { + let status = Status { + protocol_version: 1, + network_id: 1, + head_td: U256::default(), + head_hash: H256::default(), + head_num: 10, + genesis_hash: H256::zero(), + last_head: None, + }; - let capabilities = Capabilities { - serve_headers: true, - serve_chain_since: Some(5), - serve_state_since: Some(8), - tx_relay: true, - }; + let capabilities = Capabilities { + serve_headers: true, + serve_chain_since: Some(5), + serve_state_since: Some(8), + tx_relay: true, + }; - let handshake = write_handshake(&status, &capabilities, None); + let handshake = write_handshake(&status, &capabilities, None); - let (read_status, read_capabilities, read_flow) - = parse_handshake(&Rlp::new(&handshake)).unwrap(); + let (read_status, read_capabilities, read_flow) = + parse_handshake(&Rlp::new(&handshake)).unwrap(); - assert_eq!(read_status, status); - assert_eq!(read_capabilities, capabilities); - assert!(read_flow.is_none()); - } + assert_eq!(read_status, status); + assert_eq!(read_capabilities, capabilities); + assert!(read_flow.is_none()); + } } diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index 2ca7477f2..fc1f3d017 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -17,285 +17,319 @@ //! Tests for the `LightProtocol` implementation. //! These don't test of the higher level logic on top of -use common_types::blockchain_info::BlockChainInfo; -use common_types::encoded; -use common_types::ids::BlockId; -use common_types::transaction::{Action, PendingTransaction}; +use common_types::{ + blockchain_info::BlockChainInfo, + encoded, + ids::BlockId, + transaction::{Action, PendingTransaction}, +}; use ethcore::client::{EachBlockWith, TestBlockChainClient}; -use ethereum_types::{H256, U256, Address}; -use net::context::IoContext; -use net::load_timer::MOVING_SAMPLE_SIZE; -use net::status::{Capabilities, Status}; -use net::{LightProtocol, Params, packet, Peer, Statistics}; -use network::{PeerId, NodeId}; +use ethereum_types::{Address, H256, U256}; +use net::{ + context::IoContext, + load_timer::MOVING_SAMPLE_SIZE, + packet, + status::{Capabilities, Status}, + LightProtocol, Params, Peer, Statistics, +}; +use network::{NodeId, PeerId}; use provider::Provider; -use request::*; -use request; +use request::{self, *}; use rlp::{Rlp, RlpStream}; -use std::sync::Arc; -use std::time::Instant; +use std::{sync::Arc, time::Instant}; // helper for encoding a single request into a packet. // panics on bad backreference. fn encode_single(request: Request) -> NetworkRequests { - let mut builder = Builder::default(); - builder.push(request).unwrap(); - builder.build() + let mut builder = Builder::default(); + builder.push(request).unwrap(); + builder.build() } // helper for making a packet out of `Requests`. fn make_packet(req_id: usize, requests: &NetworkRequests) -> Vec { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).append_list(&requests.requests()); - stream.out() + let mut stream = RlpStream::new_list(2); + stream.append(&req_id).append_list(&requests.requests()); + stream.out() } // expected result from a call. #[derive(Debug, PartialEq, Eq)] enum Expect { - /// Expect to have message sent to peer. - Send(PeerId, u8, Vec), - /// Expect this response. - Respond(u8, Vec), - /// Expect a punishment (disconnect/disable) - Punish(PeerId), - /// Expect nothing. - Nothing, + /// Expect to have message sent to peer. + Send(PeerId, u8, Vec), + /// Expect this response. + Respond(u8, Vec), + /// Expect a punishment (disconnect/disable) + Punish(PeerId), + /// Expect nothing. + Nothing, } impl IoContext for Expect { - fn send(&self, peer: PeerId, packet_id: u8, packet_body: Vec) { - assert_eq!(self, &Expect::Send(peer, packet_id, packet_body)); - } + fn send(&self, peer: PeerId, packet_id: u8, packet_body: Vec) { + assert_eq!(self, &Expect::Send(peer, packet_id, packet_body)); + } - fn respond(&self, packet_id: u8, packet_body: Vec) { - assert_eq!(self, &Expect::Respond(packet_id, packet_body)); - } + fn respond(&self, packet_id: u8, packet_body: Vec) { + assert_eq!(self, &Expect::Respond(packet_id, packet_body)); + } - fn disconnect_peer(&self, peer: PeerId) { - assert_eq!(self, &Expect::Punish(peer)); - } + fn disconnect_peer(&self, peer: PeerId) { + assert_eq!(self, &Expect::Punish(peer)); + } - fn disable_peer(&self, peer: PeerId) { - assert_eq!(self, &Expect::Punish(peer)); - } + fn disable_peer(&self, peer: PeerId) { + assert_eq!(self, &Expect::Punish(peer)); + } - fn protocol_version(&self, _peer: PeerId) -> Option { - Some(super::MAX_PROTOCOL_VERSION) - } + fn protocol_version(&self, _peer: PeerId) -> Option { + Some(super::MAX_PROTOCOL_VERSION) + } - fn persistent_peer_id(&self, _peer: PeerId) -> Option { - None - } + fn persistent_peer_id(&self, _peer: PeerId) -> Option { + None + } - fn is_reserved_peer(&self, peer: PeerId) -> bool { - peer == 0xff - } + fn is_reserved_peer(&self, peer: PeerId) -> bool { + peer == 0xff + } } // can't implement directly for Arc due to cross-crate orphan rules. struct TestProvider(Arc); struct TestProviderInner { - client: TestBlockChainClient, + client: TestBlockChainClient, } impl Provider for TestProvider { - fn chain_info(&self) -> BlockChainInfo { - self.0.client.chain_info() - } + fn chain_info(&self) -> BlockChainInfo { + self.0.client.chain_info() + } - fn reorg_depth(&self, a: &H256, b: &H256) -> Option { - self.0.client.reorg_depth(a, b) - } + fn reorg_depth(&self, a: &H256, b: &H256) -> Option { + self.0.client.reorg_depth(a, b) + } - fn earliest_state(&self) -> Option { - None - } + fn earliest_state(&self) -> Option { + None + } - fn block_header(&self, id: BlockId) -> Option { - self.0.client.block_header(id) - } + fn block_header(&self, id: BlockId) -> Option { + self.0.client.block_header(id) + } - fn transaction_index(&self, req: request::CompleteTransactionIndexRequest) - -> Option - { - Some(request::TransactionIndexResponse { - num: 100, - hash: req.hash, - index: 55, - }) - } + fn transaction_index( + &self, + req: request::CompleteTransactionIndexRequest, + ) -> Option { + Some(request::TransactionIndexResponse { + num: 100, + hash: req.hash, + index: 55, + }) + } - fn block_body(&self, req: request::CompleteBodyRequest) -> Option { - self.0.client.block_body(req) - } + fn block_body(&self, req: request::CompleteBodyRequest) -> Option { + self.0.client.block_body(req) + } - fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option { - self.0.client.block_receipts(req) - } + fn block_receipts( + &self, + req: request::CompleteReceiptsRequest, + ) -> Option { + self.0.client.block_receipts(req) + } - fn account_proof(&self, req: request::CompleteAccountRequest) -> Option { - // sort of a leaf node - let mut stream = RlpStream::new_list(2); - stream.append(&req.address_hash).append_empty_data(); - Some(AccountResponse { - proof: vec![stream.out()], - balance: 10.into(), - nonce: 100.into(), - code_hash: Default::default(), - storage_root: Default::default(), - }) - } + fn account_proof( + &self, + req: request::CompleteAccountRequest, + ) -> Option { + // sort of a leaf node + let mut stream = RlpStream::new_list(2); + stream.append(&req.address_hash).append_empty_data(); + Some(AccountResponse { + proof: vec![stream.out()], + balance: 10.into(), + nonce: 100.into(), + code_hash: Default::default(), + storage_root: Default::default(), + }) + } - fn storage_proof(&self, req: request::CompleteStorageRequest) -> Option { - Some(StorageResponse { - proof: vec![::rlp::encode(&req.key_hash)], - value: req.key_hash | req.address_hash, - }) - } + fn storage_proof( + &self, + req: request::CompleteStorageRequest, + ) -> Option { + Some(StorageResponse { + proof: vec![::rlp::encode(&req.key_hash)], + value: req.key_hash | req.address_hash, + }) + } - fn contract_code(&self, req: request::CompleteCodeRequest) -> Option { - Some(CodeResponse { - code: req.block_hash.iter().chain(req.code_hash.iter()).cloned().collect(), - }) - } + fn contract_code(&self, req: request::CompleteCodeRequest) -> Option { + Some(CodeResponse { + code: req + .block_hash + .iter() + .chain(req.code_hash.iter()) + .cloned() + .collect(), + }) + } - fn header_proof(&self, _req: request::CompleteHeaderProofRequest) -> Option { - None - } + fn header_proof( + &self, + _req: request::CompleteHeaderProofRequest, + ) -> Option { + None + } - fn transaction_proof(&self, _req: request::CompleteExecutionRequest) -> Option { - None - } + fn transaction_proof( + &self, + _req: request::CompleteExecutionRequest, + ) -> Option { + None + } - fn epoch_signal(&self, _req: request::CompleteSignalRequest) -> Option { - Some(request::SignalResponse { - signal: vec![1, 2, 3, 4], - }) - } + fn epoch_signal( + &self, + _req: request::CompleteSignalRequest, + ) -> Option { + Some(request::SignalResponse { + signal: vec![1, 2, 3, 4], + }) + } - fn transactions_to_propagate(&self) -> Vec { - self.0.client.transactions_to_propagate() - } + fn transactions_to_propagate(&self) -> Vec { + self.0.client.transactions_to_propagate() + } } fn capabilities() -> Capabilities { - Capabilities { - serve_headers: true, - serve_chain_since: Some(1), - serve_state_since: Some(1), - tx_relay: true, - } + Capabilities { + serve_headers: true, + serve_chain_since: Some(1), + serve_state_since: Some(1), + tx_relay: true, + } } fn write_handshake(status: &Status, capabilities: &Capabilities, proto: &LightProtocol) -> Vec { - let flow_params = proto.flow_params.read().clone(); - ::net::status::write_handshake(status, capabilities, Some(&*flow_params)) + let flow_params = proto.flow_params.read().clone(); + ::net::status::write_handshake(status, capabilities, Some(&*flow_params)) } -fn write_free_handshake(status: &Status, capabilities: &Capabilities, proto: &LightProtocol) -> Vec { - ::net::status::write_handshake(status, capabilities, Some(&proto.free_flow_params)) +fn write_free_handshake( + status: &Status, + capabilities: &Capabilities, + proto: &LightProtocol, +) -> Vec { + ::net::status::write_handshake(status, capabilities, Some(&proto.free_flow_params)) } // helper for setting up the protocol handler and provider. fn setup(capabilities: Capabilities) -> (Arc, LightProtocol) { - let provider = Arc::new(TestProviderInner { - client: TestBlockChainClient::new(), - }); + let provider = Arc::new(TestProviderInner { + client: TestBlockChainClient::new(), + }); - let proto = LightProtocol::new(Arc::new(TestProvider(provider.clone())), Params { - network_id: 2, - config: Default::default(), - capabilities: capabilities, - sample_store: None, - }); + let proto = LightProtocol::new( + Arc::new(TestProvider(provider.clone())), + Params { + network_id: 2, + config: Default::default(), + capabilities: capabilities, + sample_store: None, + }, + ); - (provider, proto) + (provider, proto) } fn status(chain_info: BlockChainInfo) -> Status { - Status { - protocol_version: 1, - network_id: 2, - head_td: chain_info.total_difficulty, - head_hash: chain_info.best_block_hash, - head_num: chain_info.best_block_number, - genesis_hash: chain_info.genesis_hash, - last_head: None, - } + Status { + protocol_version: 1, + network_id: 2, + head_td: chain_info.total_difficulty, + head_hash: chain_info.best_block_hash, + head_num: chain_info.best_block_number, + genesis_hash: chain_info.genesis_hash, + last_head: None, + } } #[test] fn handshake_expected() { - let capabilities = capabilities(); + let capabilities = capabilities(); - let (provider, proto) = setup(capabilities); + let (provider, proto) = setup(capabilities); - let status = status(provider.client.chain_info()); + let status = status(provider.client.chain_info()); - let packet_body = write_handshake(&status, &capabilities, &proto); + let packet_body = write_handshake(&status, &capabilities, &proto); - proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body)); + proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body)); } #[test] fn reserved_handshake_expected() { - let capabilities = capabilities(); + let capabilities = capabilities(); - let (provider, proto) = setup(capabilities); + let (provider, proto) = setup(capabilities); - let status = status(provider.client.chain_info()); + let status = status(provider.client.chain_info()); - let packet_body = write_free_handshake(&status, &capabilities, &proto); + let packet_body = write_free_handshake(&status, &capabilities, &proto); - proto.on_connect(0xff, &Expect::Send(0xff, packet::STATUS, packet_body)); + proto.on_connect(0xff, &Expect::Send(0xff, packet::STATUS, packet_body)); } #[test] #[should_panic] fn genesis_mismatch() { - let capabilities = capabilities(); + let capabilities = capabilities(); - let (provider, proto) = setup(capabilities); + let (provider, proto) = setup(capabilities); - let mut status = status(provider.client.chain_info()); - status.genesis_hash = H256::default(); + let mut status = status(provider.client.chain_info()); + status.genesis_hash = H256::default(); - let packet_body = write_handshake(&status, &capabilities, &proto); + let packet_body = write_handshake(&status, &capabilities, &proto); - proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body)); + proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body)); } #[test] fn credit_overflow() { - let capabilities = capabilities(); + let capabilities = capabilities(); - let (provider, proto) = setup(capabilities); + let (provider, proto) = setup(capabilities); - let status = status(provider.client.chain_info()); + let status = status(provider.client.chain_info()); - { - let packet_body = write_handshake(&status, &capabilities, &proto); - proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body)); - } + { + let packet_body = write_handshake(&status, &capabilities, &proto); + proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body)); + } - { - let my_status = write_handshake(&status, &capabilities, &proto); - proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &my_status); - } + { + let my_status = write_handshake(&status, &capabilities, &proto); + proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &my_status); + } - // 1 billion requests is far too many for the default flow params. - let requests = encode_single(Request::Headers(IncompleteHeadersRequest { - start: HashOrNumber::Number(1).into(), - max: 1_000_000_000, - skip: 0, - reverse: false, - })); - let request = make_packet(111, &requests); + // 1 billion requests is far too many for the default flow params. + let requests = encode_single(Request::Headers(IncompleteHeadersRequest { + start: HashOrNumber::Number(1).into(), + max: 1_000_000_000, + skip: 0, + reverse: false, + })); + let request = make_packet(111, &requests); - proto.handle_packet(&Expect::Punish(1), 1, packet::REQUEST, &request); + proto.handle_packet(&Expect::Punish(1), 1, packet::REQUEST, &request); } // test the basic request types -- these just make sure that requests are parsed @@ -303,512 +337,591 @@ fn credit_overflow() { #[test] fn get_block_headers() { - let capabilities = capabilities(); + let capabilities = capabilities(); - let (provider, proto) = setup(capabilities); - let flow_params = proto.flow_params.read().clone(); + let (provider, proto) = setup(capabilities); + let flow_params = proto.flow_params.read().clone(); - let cur_status = status(provider.client.chain_info()); - let my_status = write_handshake(&cur_status, &capabilities, &proto); + let cur_status = status(provider.client.chain_info()); + let my_status = write_handshake(&cur_status, &capabilities, &proto); - provider.client.add_blocks(100, EachBlockWith::Nothing); + provider.client.add_blocks(100, EachBlockWith::Nothing); - let cur_status = status(provider.client.chain_info()); + let cur_status = status(provider.client.chain_info()); - { - let packet_body = write_handshake(&cur_status, &capabilities, &proto); - proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body)); - proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &my_status); - } + { + let packet_body = write_handshake(&cur_status, &capabilities, &proto); + proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body)); + proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &my_status); + } - let request = Request::Headers(IncompleteHeadersRequest { - start: HashOrNumber::Number(1).into(), - max: 10, - skip: 0, - reverse: false, - }); + let request = Request::Headers(IncompleteHeadersRequest { + start: HashOrNumber::Number(1).into(), + max: 10, + skip: 0, + reverse: false, + }); - let req_id = 111; + let req_id = 111; - let requests = encode_single(request.clone()); - let request_body = make_packet(req_id, &requests); + let requests = encode_single(request.clone()); + let request_body = make_packet(req_id, &requests); - let response = { - let headers: Vec<_> = (0..10).map(|i| provider.client.block_header(BlockId::Number(i + 1)).unwrap()).collect(); - assert_eq!(headers.len(), 10); + let response = { + let headers: Vec<_> = (0..10) + .map(|i| { + provider + .client + .block_header(BlockId::Number(i + 1)) + .unwrap() + }) + .collect(); + assert_eq!(headers.len(), 10); - let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap(); + let new_creds = + *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap(); - let response = vec![Response::Headers(HeadersResponse { headers })]; + let response = vec![Response::Headers(HeadersResponse { headers })]; - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&new_creds).append_list(&response); + let mut stream = RlpStream::new_list(3); + stream + .append(&req_id) + .append(&new_creds) + .append_list(&response); - stream.out() - }; + stream.out() + }; - let expected = Expect::Respond(packet::RESPONSE, response); - proto.handle_packet(&expected, 1, packet::REQUEST, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, 1, packet::REQUEST, &request_body); } #[test] fn get_block_bodies() { - let capabilities = capabilities(); + let capabilities = capabilities(); - let (provider, proto) = setup(capabilities); - let flow_params = proto.flow_params.read().clone(); + let (provider, proto) = setup(capabilities); + let flow_params = proto.flow_params.read().clone(); - let cur_status = status(provider.client.chain_info()); - let my_status = write_handshake(&cur_status, &capabilities, &proto); + let cur_status = status(provider.client.chain_info()); + let my_status = write_handshake(&cur_status, &capabilities, &proto); - provider.client.add_blocks(100, EachBlockWith::Nothing); + provider.client.add_blocks(100, EachBlockWith::Nothing); - let cur_status = status(provider.client.chain_info()); + let cur_status = status(provider.client.chain_info()); - { - let packet_body = write_handshake(&cur_status, &capabilities, &proto); - proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body)); - proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &my_status); - } + { + let packet_body = write_handshake(&cur_status, &capabilities, &proto); + proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body)); + proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &my_status); + } - let mut builder = Builder::default(); - let mut bodies = Vec::new(); + let mut builder = Builder::default(); + let mut bodies = Vec::new(); - for i in 0..10 { - let hash = provider.client.block_header(BlockId::Number(i)).unwrap().hash(); - builder.push(Request::Body(IncompleteBodyRequest { - hash: hash.into(), - })).unwrap(); - bodies.push(Response::Body(provider.client.block_body(CompleteBodyRequest { - hash: hash, - }).unwrap())); - } - let req_id = 111; - let requests = builder.build(); - let request_body = make_packet(req_id, &requests); + for i in 0..10 { + let hash = provider + .client + .block_header(BlockId::Number(i)) + .unwrap() + .hash(); + builder + .push(Request::Body(IncompleteBodyRequest { hash: hash.into() })) + .unwrap(); + bodies.push(Response::Body( + provider + .client + .block_body(CompleteBodyRequest { hash: hash }) + .unwrap(), + )); + } + let req_id = 111; + let requests = builder.build(); + let request_body = make_packet(req_id, &requests); - let response = { - let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap(); + let response = { + let new_creds = + *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap(); - let mut response_stream = RlpStream::new_list(3); - response_stream.append(&req_id).append(&new_creds).append_list(&bodies); - response_stream.out() - }; + let mut response_stream = RlpStream::new_list(3); + response_stream + .append(&req_id) + .append(&new_creds) + .append_list(&bodies); + response_stream.out() + }; - let expected = Expect::Respond(packet::RESPONSE, response); - proto.handle_packet(&expected, 1, packet::REQUEST, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, 1, packet::REQUEST, &request_body); } #[test] fn get_block_receipts() { - let capabilities = capabilities(); + let capabilities = capabilities(); - let (provider, proto) = setup(capabilities); - let flow_params = proto.flow_params.read().clone(); + let (provider, proto) = setup(capabilities); + let flow_params = proto.flow_params.read().clone(); - let cur_status = status(provider.client.chain_info()); - let my_status = write_handshake(&cur_status, &capabilities, &proto); + let cur_status = status(provider.client.chain_info()); + let my_status = write_handshake(&cur_status, &capabilities, &proto); - provider.client.add_blocks(1000, EachBlockWith::Nothing); + provider.client.add_blocks(1000, EachBlockWith::Nothing); - let cur_status = status(provider.client.chain_info()); + let cur_status = status(provider.client.chain_info()); - { - let packet_body = write_handshake(&cur_status, &capabilities, &proto); - proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body)); - proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &my_status); - } + { + let packet_body = write_handshake(&cur_status, &capabilities, &proto); + proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body)); + proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &my_status); + } - // find the first 10 block hashes starting with `f` because receipts are only provided - // by the test client in that case. - let block_hashes: Vec = (0..1000) - .map(|i| provider.client.block_header(BlockId::Number(i)).unwrap().hash()) - .filter(|hash| format!("{}", hash).starts_with("0xf")) - .take(10) - .collect(); + // find the first 10 block hashes starting with `f` because receipts are only provided + // by the test client in that case. + let block_hashes: Vec = (0..1000) + .map(|i| { + provider + .client + .block_header(BlockId::Number(i)) + .unwrap() + .hash() + }) + .filter(|hash| format!("{}", hash).starts_with("0xf")) + .take(10) + .collect(); - let mut builder = Builder::default(); - let mut receipts = Vec::new(); - for hash in block_hashes.iter().cloned() { - builder.push(Request::Receipts(IncompleteReceiptsRequest { hash: hash.into() })).unwrap(); - receipts.push(Response::Receipts(provider.client.block_receipts(CompleteReceiptsRequest { - hash: hash - }).unwrap())); - } + let mut builder = Builder::default(); + let mut receipts = Vec::new(); + for hash in block_hashes.iter().cloned() { + builder + .push(Request::Receipts(IncompleteReceiptsRequest { + hash: hash.into(), + })) + .unwrap(); + receipts.push(Response::Receipts( + provider + .client + .block_receipts(CompleteReceiptsRequest { hash: hash }) + .unwrap(), + )); + } - let req_id = 111; - let requests = builder.build(); - let request_body = make_packet(req_id, &requests); + let req_id = 111; + let requests = builder.build(); + let request_body = make_packet(req_id, &requests); - let response = { - assert_eq!(receipts.len(), 10); + let response = { + assert_eq!(receipts.len(), 10); - let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap(); + let new_creds = + *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap(); - let mut response_stream = RlpStream::new_list(3); - response_stream.append(&req_id).append(&new_creds).append_list(&receipts); - response_stream.out() - }; + let mut response_stream = RlpStream::new_list(3); + response_stream + .append(&req_id) + .append(&new_creds) + .append_list(&receipts); + response_stream.out() + }; - let expected = Expect::Respond(packet::RESPONSE, response); - proto.handle_packet(&expected, 1, packet::REQUEST, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, 1, packet::REQUEST, &request_body); } #[test] fn get_state_proofs() { - let capabilities = capabilities(); + let capabilities = capabilities(); - let (provider, proto) = setup(capabilities); - let flow_params = proto.flow_params.read().clone(); + let (provider, proto) = setup(capabilities); + let flow_params = proto.flow_params.read().clone(); - let provider = TestProvider(provider); + let provider = TestProvider(provider); - let cur_status = status(provider.0.client.chain_info()); + let cur_status = status(provider.0.client.chain_info()); - { - let packet_body = write_handshake(&cur_status, &capabilities, &proto); - proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body.clone())); - proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &packet_body); - } + { + let packet_body = write_handshake(&cur_status, &capabilities, &proto); + proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body.clone())); + proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &packet_body); + } - let req_id = 112; - let key1: H256 = U256::from(11223344).into(); - let key2: H256 = U256::from(99988887).into(); + let req_id = 112; + let key1: H256 = U256::from(11223344).into(); + let key2: H256 = U256::from(99988887).into(); - let mut builder = Builder::default(); - builder.push(Request::Account(IncompleteAccountRequest { - block_hash: H256::default().into(), - address_hash: key1.into(), - })).unwrap(); - builder.push(Request::Storage(IncompleteStorageRequest { - block_hash: H256::default().into(), - address_hash: key1.into(), - key_hash: key2.into(), - })).unwrap(); + let mut builder = Builder::default(); + builder + .push(Request::Account(IncompleteAccountRequest { + block_hash: H256::default().into(), + address_hash: key1.into(), + })) + .unwrap(); + builder + .push(Request::Storage(IncompleteStorageRequest { + block_hash: H256::default().into(), + address_hash: key1.into(), + key_hash: key2.into(), + })) + .unwrap(); - let requests = builder.build(); + let requests = builder.build(); - let request_body = make_packet(req_id, &requests); - let response = { - let responses = vec![ - Response::Account(provider.account_proof(CompleteAccountRequest { - block_hash: H256::default(), - address_hash: key1, - }).unwrap()), - Response::Storage(provider.storage_proof(CompleteStorageRequest { - block_hash: H256::default(), - address_hash: key1, - key_hash: key2, - }).unwrap()), - ]; + let request_body = make_packet(req_id, &requests); + let response = { + let responses = vec![ + Response::Account( + provider + .account_proof(CompleteAccountRequest { + block_hash: H256::default(), + address_hash: key1, + }) + .unwrap(), + ), + Response::Storage( + provider + .storage_proof(CompleteStorageRequest { + block_hash: H256::default(), + address_hash: key1, + key_hash: key2, + }) + .unwrap(), + ), + ]; - let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap(); + let new_creds = + *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap(); - let mut response_stream = RlpStream::new_list(3); - response_stream.append(&req_id).append(&new_creds).append_list(&responses); - response_stream.out() - }; + let mut response_stream = RlpStream::new_list(3); + response_stream + .append(&req_id) + .append(&new_creds) + .append_list(&responses); + response_stream.out() + }; - let expected = Expect::Respond(packet::RESPONSE, response); - proto.handle_packet(&expected, 1, packet::REQUEST, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, 1, packet::REQUEST, &request_body); } #[test] fn get_contract_code() { - let capabilities = capabilities(); + let capabilities = capabilities(); - let (provider, proto) = setup(capabilities); - let flow_params = proto.flow_params.read().clone(); + let (provider, proto) = setup(capabilities); + let flow_params = proto.flow_params.read().clone(); - let cur_status = status(provider.client.chain_info()); + let cur_status = status(provider.client.chain_info()); - { - let packet_body = write_handshake(&cur_status, &capabilities, &proto); - proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body.clone())); - proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &packet_body); - } + { + let packet_body = write_handshake(&cur_status, &capabilities, &proto); + proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body.clone())); + proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &packet_body); + } - let req_id = 112; - let key1: H256 = U256::from(11223344).into(); - let key2: H256 = U256::from(99988887).into(); + let req_id = 112; + let key1: H256 = U256::from(11223344).into(); + let key2: H256 = U256::from(99988887).into(); - let request = Request::Code(IncompleteCodeRequest { - block_hash: key1.into(), - code_hash: key2.into(), - }); + let request = Request::Code(IncompleteCodeRequest { + block_hash: key1.into(), + code_hash: key2.into(), + }); - let requests = encode_single(request.clone()); - let request_body = make_packet(req_id, &requests); - let response = { - let response = vec![Response::Code(CodeResponse { - code: key1.iter().chain(key2.iter()).cloned().collect(), - })]; + let requests = encode_single(request.clone()); + let request_body = make_packet(req_id, &requests); + let response = { + let response = vec![Response::Code(CodeResponse { + code: key1.iter().chain(key2.iter()).cloned().collect(), + })]; - let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap(); + let new_creds = + *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap(); - let mut response_stream = RlpStream::new_list(3); + let mut response_stream = RlpStream::new_list(3); - response_stream.append(&req_id).append(&new_creds).append_list(&response); - response_stream.out() - }; + response_stream + .append(&req_id) + .append(&new_creds) + .append_list(&response); + response_stream.out() + }; - let expected = Expect::Respond(packet::RESPONSE, response); - proto.handle_packet(&expected, 1, packet::REQUEST, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, 1, packet::REQUEST, &request_body); } #[test] fn epoch_signal() { - let capabilities = capabilities(); + let capabilities = capabilities(); - let (provider, proto) = setup(capabilities); - let flow_params = proto.flow_params.read().clone(); + let (provider, proto) = setup(capabilities); + let flow_params = proto.flow_params.read().clone(); - let cur_status = status(provider.client.chain_info()); + let cur_status = status(provider.client.chain_info()); - { - let packet_body = write_handshake(&cur_status, &capabilities, &proto); - proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body.clone())); - proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &packet_body); - } + { + let packet_body = write_handshake(&cur_status, &capabilities, &proto); + proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body.clone())); + proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &packet_body); + } - let req_id = 112; - let request = Request::Signal(request::IncompleteSignalRequest { - block_hash: H256([1; 32]).into(), - }); + let req_id = 112; + let request = Request::Signal(request::IncompleteSignalRequest { + block_hash: H256([1; 32]).into(), + }); - let requests = encode_single(request.clone()); - let request_body = make_packet(req_id, &requests); + let requests = encode_single(request.clone()); + let request_body = make_packet(req_id, &requests); - let response = { - let response = vec![Response::Signal(SignalResponse { - signal: vec![1, 2, 3, 4], - })]; + let response = { + let response = vec![Response::Signal(SignalResponse { + signal: vec![1, 2, 3, 4], + })]; - let limit = *flow_params.limit(); - let cost = flow_params.compute_cost_multi(requests.requests()).unwrap(); + let limit = *flow_params.limit(); + let cost = flow_params.compute_cost_multi(requests.requests()).unwrap(); - let new_creds = limit - cost; + let new_creds = limit - cost; - let mut response_stream = RlpStream::new_list(3); - response_stream.append(&req_id).append(&new_creds).append_list(&response); + let mut response_stream = RlpStream::new_list(3); + response_stream + .append(&req_id) + .append(&new_creds) + .append_list(&response); - response_stream.out() - }; + response_stream.out() + }; - let expected = Expect::Respond(packet::RESPONSE, response); - proto.handle_packet(&expected, 1, packet::REQUEST, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, 1, packet::REQUEST, &request_body); } #[test] fn proof_of_execution() { - let capabilities = capabilities(); + let capabilities = capabilities(); - let (provider, proto) = setup(capabilities); - let flow_params = proto.flow_params.read().clone(); + let (provider, proto) = setup(capabilities); + let flow_params = proto.flow_params.read().clone(); - let cur_status = status(provider.client.chain_info()); + let cur_status = status(provider.client.chain_info()); - { - let packet_body = write_handshake(&cur_status, &capabilities, &proto); - proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body.clone())); - proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &packet_body); - } + { + let packet_body = write_handshake(&cur_status, &capabilities, &proto); + proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body.clone())); + proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &packet_body); + } - let req_id = 112; - let mut request = Request::Execution(request::IncompleteExecutionRequest { - block_hash: H256::default().into(), - from: Address::default(), - action: Action::Call(Address::default()), - gas: 100.into(), - gas_price: 0.into(), - value: 0.into(), - data: Vec::new(), - }); + let req_id = 112; + let mut request = Request::Execution(request::IncompleteExecutionRequest { + block_hash: H256::default().into(), + from: Address::default(), + action: Action::Call(Address::default()), + gas: 100.into(), + gas_price: 0.into(), + value: 0.into(), + data: Vec::new(), + }); - // first: a valid amount to request execution of. - let requests = encode_single(request.clone()); - let request_body = make_packet(req_id, &requests); + // first: a valid amount to request execution of. + let requests = encode_single(request.clone()); + let request_body = make_packet(req_id, &requests); - let response = { - let limit = *flow_params.limit(); - let cost = flow_params.compute_cost_multi(requests.requests()).unwrap(); + let response = { + let limit = *flow_params.limit(); + let cost = flow_params.compute_cost_multi(requests.requests()).unwrap(); - let new_creds = limit - cost; + let new_creds = limit - cost; - let mut response_stream = RlpStream::new_list(3); - response_stream.append(&req_id).append(&new_creds).begin_list(0); + let mut response_stream = RlpStream::new_list(3); + response_stream + .append(&req_id) + .append(&new_creds) + .begin_list(0); - response_stream.out() - }; + response_stream.out() + }; - let expected = Expect::Respond(packet::RESPONSE, response); - proto.handle_packet(&expected, 1, packet::REQUEST, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, 1, packet::REQUEST, &request_body); - // next: way too much requested gas. - if let Request::Execution(ref mut req) = request { - req.gas = 100_000_000.into(); - } - let req_id = 113; - let requests = encode_single(request.clone()); - let request_body = make_packet(req_id, &requests); + // next: way too much requested gas. + if let Request::Execution(ref mut req) = request { + req.gas = 100_000_000.into(); + } + let req_id = 113; + let requests = encode_single(request.clone()); + let request_body = make_packet(req_id, &requests); - let expected = Expect::Punish(1); - proto.handle_packet(&expected, 1, packet::REQUEST, &request_body); + let expected = Expect::Punish(1); + proto.handle_packet(&expected, 1, packet::REQUEST, &request_body); } #[test] fn id_guard() { - use super::request_set::RequestSet; - use super::ReqId; + use super::{request_set::RequestSet, ReqId}; - let capabilities = capabilities(); + let capabilities = capabilities(); - let (provider, proto) = setup(capabilities); - let flow_params = proto.flow_params.read().clone(); + let (provider, proto) = setup(capabilities); + let flow_params = proto.flow_params.read().clone(); - let req_id_1 = ReqId(5143); - let req_id_2 = ReqId(1111); + let req_id_1 = ReqId(5143); + let req_id_2 = ReqId(1111); - let req = encode_single(Request::Headers(IncompleteHeadersRequest { - start: HashOrNumber::Number(5u64).into(), - max: 100, - skip: 0, - reverse: false, - })); + let req = encode_single(Request::Headers(IncompleteHeadersRequest { + start: HashOrNumber::Number(5u64).into(), + max: 100, + skip: 0, + reverse: false, + })); - let peer_id = 9876; + let peer_id = 9876; - let mut pending_requests = RequestSet::default(); + let mut pending_requests = RequestSet::default(); - pending_requests.insert(req_id_1, req.clone(), 0.into(), Instant::now()); - pending_requests.insert(req_id_2, req, 1.into(), Instant::now()); + pending_requests.insert(req_id_1, req.clone(), 0.into(), Instant::now()); + pending_requests.insert(req_id_2, req, 1.into(), Instant::now()); - proto.peers.write().insert(peer_id, ::parking_lot::Mutex::new(Peer { - local_credits: flow_params.create_credits(), - status: status(provider.client.chain_info()), - capabilities, - remote_flow: Some((flow_params.create_credits(), (&*flow_params).clone())), - sent_head: provider.client.chain_info().best_block_hash, - last_update: Instant::now(), - pending_requests: pending_requests, - failed_requests: Vec::new(), - propagated_transactions: Default::default(), - skip_update: false, - local_flow: flow_params, - awaiting_acknowledge: None, - })); + proto.peers.write().insert( + peer_id, + ::parking_lot::Mutex::new(Peer { + local_credits: flow_params.create_credits(), + status: status(provider.client.chain_info()), + capabilities, + remote_flow: Some((flow_params.create_credits(), (&*flow_params).clone())), + sent_head: provider.client.chain_info().best_block_hash, + last_update: Instant::now(), + pending_requests: pending_requests, + failed_requests: Vec::new(), + propagated_transactions: Default::default(), + skip_update: false, + local_flow: flow_params, + awaiting_acknowledge: None, + }), + ); - // first, malformed responses. - { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id_1.0); - stream.append(&4_000_000_usize); - stream.begin_list(2).append(&125_usize).append(&3_usize); + // first, malformed responses. + { + let mut stream = RlpStream::new_list(3); + stream.append(&req_id_1.0); + stream.append(&4_000_000_usize); + stream.begin_list(2).append(&125_usize).append(&3_usize); - let packet = stream.out(); - assert!(proto.response(peer_id, &Expect::Nothing, &Rlp::new(&packet)).is_err()); - } + let packet = stream.out(); + assert!(proto + .response(peer_id, &Expect::Nothing, &Rlp::new(&packet)) + .is_err()); + } - // next, do an unexpected response. - { - let mut stream = RlpStream::new_list(3); - stream.append(&10000_usize); - stream.append(&3_000_000_usize); - stream.begin_list(0); + // next, do an unexpected response. + { + let mut stream = RlpStream::new_list(3); + stream.append(&10000_usize); + stream.append(&3_000_000_usize); + stream.begin_list(0); - let packet = stream.out(); - assert!(proto.response(peer_id, &Expect::Nothing, &Rlp::new(&packet)).is_err()); - } + let packet = stream.out(); + assert!(proto + .response(peer_id, &Expect::Nothing, &Rlp::new(&packet)) + .is_err()); + } - // lastly, do a valid (but empty) response. - { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id_2.0); - stream.append(&3_000_000_usize); - stream.begin_list(0); + // lastly, do a valid (but empty) response. + { + let mut stream = RlpStream::new_list(3); + stream.append(&req_id_2.0); + stream.append(&3_000_000_usize); + stream.begin_list(0); - let packet = stream.out(); - assert!(proto.response(peer_id, &Expect::Nothing, &Rlp::new(&packet)).is_ok()); - } + let packet = stream.out(); + assert!(proto + .response(peer_id, &Expect::Nothing, &Rlp::new(&packet)) + .is_ok()); + } - let peers = proto.peers.read(); - if let Some(ref peer_info) = peers.get(&peer_id) { - let peer_info = peer_info.lock(); - assert!(peer_info.pending_requests.collect_ids::>().is_empty()); - assert_eq!(peer_info.failed_requests, &[req_id_1]); - } + let peers = proto.peers.read(); + if let Some(ref peer_info) = peers.get(&peer_id) { + let peer_info = peer_info.lock(); + assert!(peer_info + .pending_requests + .collect_ids::>() + .is_empty()); + assert_eq!(peer_info.failed_requests, &[req_id_1]); + } } #[test] fn get_transaction_index() { - let capabilities = capabilities(); + let capabilities = capabilities(); - let (provider, proto) = setup(capabilities); - let flow_params = proto.flow_params.read().clone(); + let (provider, proto) = setup(capabilities); + let flow_params = proto.flow_params.read().clone(); - let cur_status = status(provider.client.chain_info()); + let cur_status = status(provider.client.chain_info()); - { - let packet_body = write_handshake(&cur_status, &capabilities, &proto); - proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body.clone())); - proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &packet_body); - } + { + let packet_body = write_handshake(&cur_status, &capabilities, &proto); + proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body.clone())); + proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &packet_body); + } - let req_id = 112; - let key1: H256 = U256::from(11223344).into(); + let req_id = 112; + let key1: H256 = U256::from(11223344).into(); - let request = Request::TransactionIndex(IncompleteTransactionIndexRequest { - hash: key1.into(), - }); + let request = + Request::TransactionIndex(IncompleteTransactionIndexRequest { hash: key1.into() }); - let requests = encode_single(request.clone()); - let request_body = make_packet(req_id, &requests); - let response = { - let response = vec![Response::TransactionIndex(TransactionIndexResponse { - num: 100, - hash: key1, - index: 55, - })]; + let requests = encode_single(request.clone()); + let request_body = make_packet(req_id, &requests); + let response = { + let response = vec![Response::TransactionIndex(TransactionIndexResponse { + num: 100, + hash: key1, + index: 55, + })]; - let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap(); + let new_creds = + *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap(); - let mut response_stream = RlpStream::new_list(3); + let mut response_stream = RlpStream::new_list(3); - response_stream.append(&req_id).append(&new_creds).append_list(&response); - response_stream.out() - }; + response_stream + .append(&req_id) + .append(&new_creds) + .append_list(&response); + response_stream.out() + }; - let expected = Expect::Respond(packet::RESPONSE, response); - proto.handle_packet(&expected, 1, packet::REQUEST, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, 1, packet::REQUEST, &request_body); } #[test] fn sync_statistics() { - let mut stats = Statistics::new(); + let mut stats = Statistics::new(); - // Empty set should return 1.0 - assert_eq!(stats.avg_peer_count(), 1.0); + // Empty set should return 1.0 + assert_eq!(stats.avg_peer_count(), 1.0); - // Average < 1.0 should return 1.0 - stats.add_peer_count(0); - assert_eq!(stats.avg_peer_count(), 1.0); + // Average < 1.0 should return 1.0 + stats.add_peer_count(0); + assert_eq!(stats.avg_peer_count(), 1.0); - stats = Statistics::new(); + stats = Statistics::new(); - const N: f64 = 50.0; + const N: f64 = 50.0; - for i in 1..(N as usize + 1) { - stats.add_peer_count(i); - } + for i in 1..(N as usize + 1) { + stats.add_peer_count(i); + } - // Compute the average for the sum 1..N - assert_eq!(stats.avg_peer_count(), N * (N + 1.0) / 2.0 / N); + // Compute the average for the sum 1..N + assert_eq!(stats.avg_peer_count(), N * (N + 1.0) / 2.0 / N); - for _ in 1..(MOVING_SAMPLE_SIZE + 1) { - stats.add_peer_count(40); - } + for _ in 1..(MOVING_SAMPLE_SIZE + 1) { + stats.add_peer_count(40); + } - // Test that it returns the average of the last - // `MOVING_SAMPLE_SIZE` values - assert_eq!(stats.avg_peer_count(), 40.0); + // Test that it returns the average of the last + // `MOVING_SAMPLE_SIZE` values + assert_eq!(stats.avg_peer_count(), 40.0); } diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 7d1f4fabf..981edadd5 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -18,32 +18,30 @@ //! The request service is implemented using Futures. Higher level request handlers //! will take the raw data received here and extract meaningful results from it. -use std::cmp; -use std::collections::HashMap; -use std::marker::PhantomData; -use std::sync::Arc; -use std::time::Duration; +use std::{cmp, collections::HashMap, marker::PhantomData, sync::Arc, time::Duration}; -use futures::{Poll, Future, Async}; -use futures::sync::oneshot::{self, Receiver}; +use futures::{ + sync::oneshot::{self, Receiver}, + Async, Future, Poll, +}; use network::PeerId; -use parking_lot::{RwLock, Mutex}; -use rand; -use rand::Rng; +use parking_lot::{Mutex, RwLock}; +use rand::{self, Rng}; use net::{ - Handler, PeerStatus, Status, Capabilities, - Announcement, EventContext, BasicContext, ReqId, + Announcement, BasicContext, Capabilities, EventContext, Handler, PeerStatus, ReqId, Status, }; +use self::request::CheckedRequest; use cache::Cache; use request::{self as basic_request, Request as NetworkRequest}; -use self::request::CheckedRequest; +pub use self::{ + request::{Error as ValidityError, HeaderRef, Request, Response}, + request_guard::{Error as RequestError, RequestGuard}, + response_guard::{Error as ResponseGuardError, Inner as ResponseGuardInner, ResponseGuard}, +}; pub use ethcore::executed::ExecutionResult; -pub use self::request::{Request, Response, HeaderRef, Error as ValidityError}; -pub use self::request_guard::{RequestGuard, Error as RequestError}; -pub use self::response_guard::{ResponseGuard, Error as ResponseGuardError, Inner as ResponseGuardInner}; pub use types::request::ResponseError; #[cfg(test)] @@ -66,76 +64,80 @@ pub const DEFAULT_NUM_CONSECUTIVE_FAILED_REQUESTS: usize = 1; /// OnDemand related errors pub mod error { - // Silence: `use of deprecated item 'std::error::Error::cause': replaced by Error::source, which can support downcasting` - // https://github.com/paritytech/parity-ethereum/issues/10302 - #![allow(deprecated)] + // Silence: `use of deprecated item 'std::error::Error::cause': replaced by Error::source, which can support downcasting` + // https://github.com/paritytech/parity-ethereum/issues/10302 + #![allow(deprecated)] - use futures::sync::oneshot::Canceled; + use futures::sync::oneshot::Canceled; - error_chain! { + error_chain! { - foreign_links { - ChannelCanceled(Canceled) #[doc = "Canceled oneshot channel"]; - } + foreign_links { + ChannelCanceled(Canceled) #[doc = "Canceled oneshot channel"]; + } - errors { - #[doc = "Timeout bad response"] - BadResponse(err: String) { - description("Max response evaluation time exceeded") - display("{}", err) - } + errors { + #[doc = "Timeout bad response"] + BadResponse(err: String) { + description("Max response evaluation time exceeded") + display("{}", err) + } - #[doc = "OnDemand requests limit exceeded"] - RequestLimit { - description("OnDemand request maximum backoff iterations exceeded") - display("OnDemand request maximum backoff iterations exceeded") - } - } - } + #[doc = "OnDemand requests limit exceeded"] + RequestLimit { + description("OnDemand request maximum backoff iterations exceeded") + display("OnDemand request maximum backoff iterations exceeded") + } + } + } } /// Public interface for performing network requests `OnDemand` pub trait OnDemandRequester: Send + Sync { - /// Submit a strongly-typed batch of requests. - /// - /// Fails if back-reference are not coherent. - fn request(&self, ctx: &BasicContext, requests: T) -> Result, basic_request::NoSuchOutput> - where - T: request::RequestAdapter; + /// Submit a strongly-typed batch of requests. + /// + /// Fails if back-reference are not coherent. + fn request( + &self, + ctx: &BasicContext, + requests: T, + ) -> Result, basic_request::NoSuchOutput> + where + T: request::RequestAdapter; - /// Submit a vector of requests to be processed together. - /// - /// Fails if back-references are not coherent. - /// The returned vector of responses will correspond to the requests exactly. - fn request_raw(&self, ctx: &BasicContext, requests: Vec) - -> Result, basic_request::NoSuchOutput>; + /// Submit a vector of requests to be processed together. + /// + /// Fails if back-references are not coherent. + /// The returned vector of responses will correspond to the requests exactly. + fn request_raw( + &self, + ctx: &BasicContext, + requests: Vec, + ) -> Result, basic_request::NoSuchOutput>; } - // relevant peer info. #[derive(Debug, Clone, PartialEq, Eq)] struct Peer { - status: Status, - capabilities: Capabilities, + status: Status, + capabilities: Capabilities, } impl Peer { - // whether this peer can fulfill the necessary capabilities for the given - // request. - fn can_fulfill(&self, request: &Capabilities) -> bool { - let local_caps = &self.capabilities; - let can_serve_since = |req, local| { - match (req, local) { - (Some(request_block), Some(serve_since)) => request_block >= serve_since, - (Some(_), None) => false, - (None, _) => true, - } - }; + // whether this peer can fulfill the necessary capabilities for the given + // request. + fn can_fulfill(&self, request: &Capabilities) -> bool { + let local_caps = &self.capabilities; + let can_serve_since = |req, local| match (req, local) { + (Some(request_block), Some(serve_since)) => request_block >= serve_since, + (Some(_), None) => false, + (None, _) => true, + }; - local_caps.serve_headers >= request.serve_headers && - can_serve_since(request.serve_chain_since, local_caps.serve_chain_since) && - can_serve_since(request.serve_state_since, local_caps.serve_state_since) - } + local_caps.serve_headers >= request.serve_headers + && can_serve_since(request.serve_chain_since, local_caps.serve_chain_since) + && can_serve_since(request.serve_state_since, local_caps.serve_state_since) + } } /// Either an array of responses or a single error. @@ -143,217 +145,235 @@ type PendingResponse = self::error::Result>; // Attempted request info and sender to put received value. struct Pending { - requests: basic_request::Batch, - net_requests: basic_request::Batch, - required_capabilities: Capabilities, - responses: Vec, - sender: oneshot::Sender, - request_guard: RequestGuard, - response_guard: ResponseGuard, + requests: basic_request::Batch, + net_requests: basic_request::Batch, + required_capabilities: Capabilities, + responses: Vec, + sender: oneshot::Sender, + request_guard: RequestGuard, + response_guard: ResponseGuard, } impl Pending { - // answer as many of the given requests from the supplied cache as possible. - // TODO: support re-shuffling. - fn answer_from_cache(&mut self, cache: &Mutex) { - while !self.requests.is_complete() { - let idx = self.requests.num_answered(); - match self.requests[idx].respond_local(cache) { - Some(response) => { - self.requests.supply_response_unchecked(&response); + // answer as many of the given requests from the supplied cache as possible. + // TODO: support re-shuffling. + fn answer_from_cache(&mut self, cache: &Mutex) { + while !self.requests.is_complete() { + let idx = self.requests.num_answered(); + match self.requests[idx].respond_local(cache) { + Some(response) => { + self.requests.supply_response_unchecked(&response); - // update header and back-references after each from-cache - // response to ensure that the requests are left in a consistent - // state and increase the likelihood of being able to answer - // the next request from cache. - self.update_header_refs(idx, &response); - self.fill_unanswered(); + // update header and back-references after each from-cache + // response to ensure that the requests are left in a consistent + // state and increase the likelihood of being able to answer + // the next request from cache. + self.update_header_refs(idx, &response); + self.fill_unanswered(); - self.responses.push(response); - } - None => break, - } - } - } + self.responses.push(response); + } + None => break, + } + } + } - // update header refs if the given response contains a header future requests require for - // verification. - // `idx` is the index of the request the response corresponds to. - fn update_header_refs(&mut self, idx: usize, response: &Response) { - if let Response::HeaderByHash(ref hdr) = *response { - // fill the header for all requests waiting on this one. - // TODO: could be faster if we stored a map usize => Vec - // but typical use just has one header request that others - // depend on. - for r in self.requests.iter_mut().skip(idx + 1) { - if r.needs_header().map_or(false, |(i, _)| i == idx) { - r.provide_header(hdr.clone()) - } - } - } - } + // update header refs if the given response contains a header future requests require for + // verification. + // `idx` is the index of the request the response corresponds to. + fn update_header_refs(&mut self, idx: usize, response: &Response) { + if let Response::HeaderByHash(ref hdr) = *response { + // fill the header for all requests waiting on this one. + // TODO: could be faster if we stored a map usize => Vec + // but typical use just has one header request that others + // depend on. + for r in self.requests.iter_mut().skip(idx + 1) { + if r.needs_header().map_or(false, |(i, _)| i == idx) { + r.provide_header(hdr.clone()) + } + } + } + } - // supply a response. - fn supply_response(&mut self, cache: &Mutex, response: &basic_request::Response) - -> Result<(), basic_request::ResponseError> - { - match self.requests.supply_response(&cache, response) { - Ok(response) => { - let idx = self.responses.len(); - self.update_header_refs(idx, &response); - self.responses.push(response); - Ok(()) - } - Err(e) => Err(e), - } - } + // supply a response. + fn supply_response( + &mut self, + cache: &Mutex, + response: &basic_request::Response, + ) -> Result<(), basic_request::ResponseError> { + match self.requests.supply_response(&cache, response) { + Ok(response) => { + let idx = self.responses.len(); + self.update_header_refs(idx, &response); + self.responses.push(response); + Ok(()) + } + Err(e) => Err(e), + } + } - // if the requests are complete, send the result and consume self. - fn try_complete(self) -> Option { - if self.requests.is_complete() { - if self.sender.send(Ok(self.responses)).is_err() { - debug!(target: "on_demand", "Dropped oneshot channel receiver on request"); - } - None - } else { - Some(self) - } - } + // if the requests are complete, send the result and consume self. + fn try_complete(self) -> Option { + if self.requests.is_complete() { + if self.sender.send(Ok(self.responses)).is_err() { + debug!(target: "on_demand", "Dropped oneshot channel receiver on request"); + } + None + } else { + Some(self) + } + } - fn fill_unanswered(&mut self) { - self.requests.fill_unanswered(); - } + fn fill_unanswered(&mut self) { + self.requests.fill_unanswered(); + } - // update the cached network requests. - fn update_net_requests(&mut self) { - use request::IncompleteRequest; + // update the cached network requests. + fn update_net_requests(&mut self) { + use request::IncompleteRequest; - let mut builder = basic_request::Builder::default(); - let num_answered = self.requests.num_answered(); - let mut mapping = move |idx| idx - num_answered; + let mut builder = basic_request::Builder::default(); + let num_answered = self.requests.num_answered(); + let mut mapping = move |idx| idx - num_answered; - for request in self.requests.iter().skip(num_answered) { - let mut net_req = request.clone().into_net_request(); + for request in self.requests.iter().skip(num_answered) { + let mut net_req = request.clone().into_net_request(); - // all back-references with request index less than `num_answered` have - // been filled by now. all remaining requests point to nothing earlier - // than the next unanswered request. - net_req.adjust_refs(&mut mapping); - builder.push(net_req) - .expect("all back-references to answered requests have been filled; qed"); - } + // all back-references with request index less than `num_answered` have + // been filled by now. all remaining requests point to nothing earlier + // than the next unanswered request. + net_req.adjust_refs(&mut mapping); + builder + .push(net_req) + .expect("all back-references to answered requests have been filled; qed"); + } - // update pending fields. - let capabilities = guess_capabilities(&self.requests[num_answered..]); - self.net_requests = builder.build(); - self.required_capabilities = capabilities; - } + // update pending fields. + let capabilities = guess_capabilities(&self.requests[num_answered..]); + self.net_requests = builder.build(); + self.required_capabilities = capabilities; + } - // received too many empty responses, may be away to indicate a faulty request - fn bad_response(self, response_err: ResponseGuardError) { - let reqs: Vec<&str> = self.requests.requests().iter().map(|req| { - match req { - CheckedRequest::HeaderProof(_, _) => "HeaderProof", - CheckedRequest::HeaderByHash(_, _) => "HeaderByHash", - CheckedRequest::HeaderWithAncestors(_, _) => "HeaderWithAncestors", - CheckedRequest::TransactionIndex(_, _) => "TransactionIndex", - CheckedRequest::Receipts(_, _) => "Receipts", - CheckedRequest::Body(_, _) => "Body", - CheckedRequest::Account(_, _) => "Account", - CheckedRequest::Code(_, _) => "Code", - CheckedRequest::Execution(_, _) => "Execution", - CheckedRequest::Signal(_, _) => "Signal", - } - }).collect(); + // received too many empty responses, may be away to indicate a faulty request + fn bad_response(self, response_err: ResponseGuardError) { + let reqs: Vec<&str> = self + .requests + .requests() + .iter() + .map(|req| match req { + CheckedRequest::HeaderProof(_, _) => "HeaderProof", + CheckedRequest::HeaderByHash(_, _) => "HeaderByHash", + CheckedRequest::HeaderWithAncestors(_, _) => "HeaderWithAncestors", + CheckedRequest::TransactionIndex(_, _) => "TransactionIndex", + CheckedRequest::Receipts(_, _) => "Receipts", + CheckedRequest::Body(_, _) => "Body", + CheckedRequest::Account(_, _) => "Account", + CheckedRequest::Code(_, _) => "Code", + CheckedRequest::Execution(_, _) => "Execution", + CheckedRequest::Signal(_, _) => "Signal", + }) + .collect(); - let err = format!("Bad response on {}: [ {} ]. {}", - if reqs.len() > 1 { "requests" } else { "request" }, - reqs.join(", "), - response_err - ); + let err = format!( + "Bad response on {}: [ {} ]. {}", + if reqs.len() > 1 { + "requests" + } else { + "request" + }, + reqs.join(", "), + response_err + ); - let err = self::error::ErrorKind::BadResponse(err); - if self.sender.send(Err(err.into())).is_err() { - debug!(target: "on_demand", "Dropped oneshot channel receiver on no response"); - } - } + let err = self::error::ErrorKind::BadResponse(err); + if self.sender.send(Err(err.into())).is_err() { + debug!(target: "on_demand", "Dropped oneshot channel receiver on no response"); + } + } - // returning a peer discovery timeout during query attempts - fn request_limit_reached(self) { - let err = self::error::ErrorKind::RequestLimit; - if self.sender.send(Err(err.into())).is_err() { - debug!(target: "on_demand", "Dropped oneshot channel receiver on time out"); - } - } + // returning a peer discovery timeout during query attempts + fn request_limit_reached(self) { + let err = self::error::ErrorKind::RequestLimit; + if self.sender.send(Err(err.into())).is_err() { + debug!(target: "on_demand", "Dropped oneshot channel receiver on time out"); + } + } } // helper to guess capabilities required for a given batch of network requests. fn guess_capabilities(requests: &[CheckedRequest]) -> Capabilities { - let mut caps = Capabilities { - serve_headers: false, - serve_chain_since: None, - serve_state_since: None, - tx_relay: false, - }; + let mut caps = Capabilities { + serve_headers: false, + serve_chain_since: None, + serve_state_since: None, + tx_relay: false, + }; - let update_since = |current: &mut Option, new| - *current = match *current { - Some(x) => Some(::std::cmp::min(x, new)), - None => Some(new), - }; + let update_since = |current: &mut Option, new| { + *current = match *current { + Some(x) => Some(::std::cmp::min(x, new)), + None => Some(new), + } + }; - for request in requests { - match *request { - // TODO: might be worth returning a required block number for this also. - CheckedRequest::HeaderProof(_, _) => - caps.serve_headers = true, - CheckedRequest::HeaderByHash(_, _) => - caps.serve_headers = true, - CheckedRequest::HeaderWithAncestors(_, _) => - caps.serve_headers = true, - CheckedRequest::TransactionIndex(_, _) => {} // hashes yield no info. - CheckedRequest::Signal(_, _) => - caps.serve_headers = true, - CheckedRequest::Body(ref req, _) => if let Ok(ref hdr) = req.0.as_ref() { - update_since(&mut caps.serve_chain_since, hdr.number()); - }, - CheckedRequest::Receipts(ref req, _) => if let Ok(ref hdr) = req.0.as_ref() { - update_since(&mut caps.serve_chain_since, hdr.number()); - }, - CheckedRequest::Account(ref req, _) => if let Ok(ref hdr) = req.header.as_ref() { - update_since(&mut caps.serve_state_since, hdr.number()); - }, - CheckedRequest::Code(ref req, _) => if let Ok(ref hdr) = req.header.as_ref() { - update_since(&mut caps.serve_state_since, hdr.number()); - }, - CheckedRequest::Execution(ref req, _) => if let Ok(ref hdr) = req.header.as_ref() { - update_since(&mut caps.serve_state_since, hdr.number()); - }, - } - } + for request in requests { + match *request { + // TODO: might be worth returning a required block number for this also. + CheckedRequest::HeaderProof(_, _) => caps.serve_headers = true, + CheckedRequest::HeaderByHash(_, _) => caps.serve_headers = true, + CheckedRequest::HeaderWithAncestors(_, _) => caps.serve_headers = true, + CheckedRequest::TransactionIndex(_, _) => {} // hashes yield no info. + CheckedRequest::Signal(_, _) => caps.serve_headers = true, + CheckedRequest::Body(ref req, _) => { + if let Ok(ref hdr) = req.0.as_ref() { + update_since(&mut caps.serve_chain_since, hdr.number()); + } + } + CheckedRequest::Receipts(ref req, _) => { + if let Ok(ref hdr) = req.0.as_ref() { + update_since(&mut caps.serve_chain_since, hdr.number()); + } + } + CheckedRequest::Account(ref req, _) => { + if let Ok(ref hdr) = req.header.as_ref() { + update_since(&mut caps.serve_state_since, hdr.number()); + } + } + CheckedRequest::Code(ref req, _) => { + if let Ok(ref hdr) = req.header.as_ref() { + update_since(&mut caps.serve_state_since, hdr.number()); + } + } + CheckedRequest::Execution(ref req, _) => { + if let Ok(ref hdr) = req.header.as_ref() { + update_since(&mut caps.serve_state_since, hdr.number()); + } + } + } + } - caps + caps } /// A future extracting the concrete output type of the generic adapter /// from a vector of responses. pub struct OnResponses { - receiver: Receiver, - _marker: PhantomData, + receiver: Receiver, + _marker: PhantomData, } impl Future for OnResponses { - type Item = T::Out; - type Error = self::error::Error; + type Item = T::Out; + type Error = self::error::Error; - fn poll(&mut self) -> Poll { - match self.receiver.poll() { - Ok(Async::Ready(Ok(v))) => Ok(Async::Ready(T::extract_from(v))), - Ok(Async::Ready(Err(e))) => Err(e), - Ok(Async::NotReady) => Ok(Async::NotReady), - Err(e) => Err(e.into()), - } - } + fn poll(&mut self) -> Poll { + match self.receiver.poll() { + Ok(Async::Ready(Ok(v))) => Ok(Async::Ready(T::extract_from(v))), + Ok(Async::Ready(Err(e))) => Err(e), + Ok(Async::NotReady) => Ok(Async::NotReady), + Err(e) => Err(e.into()), + } + } } /// On demand request service. See module docs for more details. @@ -361,169 +381,188 @@ impl Future for OnResponses { /// requests to them accordingly. // lock in declaration order. pub struct OnDemand { - pending: RwLock>, - peers: RwLock>, - in_transit: RwLock>, - cache: Arc>, - no_immediate_dispatch: bool, - response_time_window: Duration, - request_backoff_start: Duration, - request_backoff_max: Duration, - request_backoff_rounds_max: usize, - request_number_of_consecutive_errors: usize + pending: RwLock>, + peers: RwLock>, + in_transit: RwLock>, + cache: Arc>, + no_immediate_dispatch: bool, + response_time_window: Duration, + request_backoff_start: Duration, + request_backoff_max: Duration, + request_backoff_rounds_max: usize, + request_number_of_consecutive_errors: usize, } impl OnDemandRequester for OnDemand { - fn request_raw(&self, ctx: &BasicContext, requests: Vec) - -> Result, basic_request::NoSuchOutput> - { - let (sender, receiver) = oneshot::channel(); - if requests.is_empty() { - assert!(sender.send(Ok(Vec::new())).is_ok(), "receiver still in scope; qed"); - return Ok(receiver); - } + fn request_raw( + &self, + ctx: &BasicContext, + requests: Vec, + ) -> Result, basic_request::NoSuchOutput> { + let (sender, receiver) = oneshot::channel(); + if requests.is_empty() { + assert!( + sender.send(Ok(Vec::new())).is_ok(), + "receiver still in scope; qed" + ); + return Ok(receiver); + } - let mut builder = basic_request::Builder::default(); + let mut builder = basic_request::Builder::default(); - let responses = Vec::with_capacity(requests.len()); + let responses = Vec::with_capacity(requests.len()); - let mut header_producers = HashMap::new(); - for (i, request) in requests.into_iter().enumerate() { - let request = CheckedRequest::from(request); + let mut header_producers = HashMap::new(); + for (i, request) in requests.into_iter().enumerate() { + let request = CheckedRequest::from(request); - // ensure that all requests needing headers will get them. - if let Some((idx, field)) = request.needs_header() { - // a request chain with a header back-reference is valid only if it both - // points to a request that returns a header and has the same back-reference - // for the block hash. - match header_producers.get(&idx) { - Some(ref f) if &field == *f => {} - _ => return Err(basic_request::NoSuchOutput), - } - } - if let CheckedRequest::HeaderByHash(ref req, _) = request { - header_producers.insert(i, req.0); - } + // ensure that all requests needing headers will get them. + if let Some((idx, field)) = request.needs_header() { + // a request chain with a header back-reference is valid only if it both + // points to a request that returns a header and has the same back-reference + // for the block hash. + match header_producers.get(&idx) { + Some(ref f) if &field == *f => {} + _ => return Err(basic_request::NoSuchOutput), + } + } + if let CheckedRequest::HeaderByHash(ref req, _) = request { + header_producers.insert(i, req.0); + } - builder.push(request)?; - } + builder.push(request)?; + } - let requests = builder.build(); - let net_requests = requests.clone().map_requests(|req| req.into_net_request()); - let capabilities = guess_capabilities(requests.requests()); + let requests = builder.build(); + let net_requests = requests.clone().map_requests(|req| req.into_net_request()); + let capabilities = guess_capabilities(requests.requests()); - self.submit_pending(ctx, Pending { - requests, - net_requests, - required_capabilities: capabilities, - responses, - sender, - request_guard: RequestGuard::new( - self.request_number_of_consecutive_errors as u32, - self.request_backoff_rounds_max, - self.request_backoff_start, - self.request_backoff_max, - ), - response_guard: ResponseGuard::new(self.response_time_window), - }); + self.submit_pending( + ctx, + Pending { + requests, + net_requests, + required_capabilities: capabilities, + responses, + sender, + request_guard: RequestGuard::new( + self.request_number_of_consecutive_errors as u32, + self.request_backoff_rounds_max, + self.request_backoff_start, + self.request_backoff_max, + ), + response_guard: ResponseGuard::new(self.response_time_window), + }, + ); - Ok(receiver) - } - - fn request(&self, ctx: &BasicContext, requests: T) -> Result, basic_request::NoSuchOutput> - where T: request::RequestAdapter - { - self.request_raw(ctx, requests.make_requests()).map(|recv| OnResponses { - receiver: recv, - _marker: PhantomData, - }) - } + Ok(receiver) + } + fn request( + &self, + ctx: &BasicContext, + requests: T, + ) -> Result, basic_request::NoSuchOutput> + where + T: request::RequestAdapter, + { + self.request_raw(ctx, requests.make_requests()) + .map(|recv| OnResponses { + receiver: recv, + _marker: PhantomData, + }) + } } impl OnDemand { + /// Create a new `OnDemand` service with the given cache. + pub fn new( + cache: Arc>, + response_time_window: Duration, + request_backoff_start: Duration, + request_backoff_max: Duration, + request_backoff_rounds_max: usize, + request_number_of_consecutive_errors: usize, + ) -> Self { + Self { + pending: RwLock::new(Vec::new()), + peers: RwLock::new(HashMap::new()), + in_transit: RwLock::new(HashMap::new()), + cache, + no_immediate_dispatch: false, + response_time_window: Self::sanitize_circuit_breaker_input( + response_time_window, + "Response time window", + ), + request_backoff_start: Self::sanitize_circuit_breaker_input( + request_backoff_start, + "Request initial backoff time window", + ), + request_backoff_max: Self::sanitize_circuit_breaker_input( + request_backoff_max, + "Request maximum backoff time window", + ), + request_backoff_rounds_max, + request_number_of_consecutive_errors, + } + } - /// Create a new `OnDemand` service with the given cache. - pub fn new( - cache: Arc>, - response_time_window: Duration, - request_backoff_start: Duration, - request_backoff_max: Duration, - request_backoff_rounds_max: usize, - request_number_of_consecutive_errors: usize, - ) -> Self { - - Self { - pending: RwLock::new(Vec::new()), - peers: RwLock::new(HashMap::new()), - in_transit: RwLock::new(HashMap::new()), - cache, - no_immediate_dispatch: false, - response_time_window: Self::sanitize_circuit_breaker_input(response_time_window, "Response time window"), - request_backoff_start: Self::sanitize_circuit_breaker_input(request_backoff_start, "Request initial backoff time window"), - request_backoff_max: Self::sanitize_circuit_breaker_input(request_backoff_max, "Request maximum backoff time window"), - request_backoff_rounds_max, - request_number_of_consecutive_errors, - } - } - - fn sanitize_circuit_breaker_input(dur: Duration, name: &'static str) -> Duration { - if dur.as_secs() < 1 { - warn!(target: "on_demand", + fn sanitize_circuit_breaker_input(dur: Duration, name: &'static str) -> Duration { + if dur.as_secs() < 1 { + warn!(target: "on_demand", "{} is too short must be at least 1 second, configuring it to 1 second", name); - Duration::from_secs(1) - } else { - dur - } - } + Duration::from_secs(1) + } else { + dur + } + } - // make a test version: this doesn't dispatch pending requests - // until you trigger it manually. - #[cfg(test)] - fn new_test( - cache: Arc>, - request_ttl: Duration, - request_backoff_start: Duration, - request_backoff_max: Duration, - request_backoff_rounds_max: usize, - request_number_of_consecutive_errors: usize, - ) -> Self { - let mut me = OnDemand::new( - cache, - request_ttl, - request_backoff_start, - request_backoff_max, - request_backoff_rounds_max, - request_number_of_consecutive_errors, - ); - me.no_immediate_dispatch = true; + // make a test version: this doesn't dispatch pending requests + // until you trigger it manually. + #[cfg(test)] + fn new_test( + cache: Arc>, + request_ttl: Duration, + request_backoff_start: Duration, + request_backoff_max: Duration, + request_backoff_rounds_max: usize, + request_number_of_consecutive_errors: usize, + ) -> Self { + let mut me = OnDemand::new( + cache, + request_ttl, + request_backoff_start, + request_backoff_max, + request_backoff_rounds_max, + request_number_of_consecutive_errors, + ); + me.no_immediate_dispatch = true; - me - } + me + } + // maybe dispatch pending requests. + // sometimes + fn attempt_dispatch(&self, ctx: &BasicContext) { + if !self.no_immediate_dispatch { + self.dispatch_pending(ctx) + } + } - // maybe dispatch pending requests. - // sometimes - fn attempt_dispatch(&self, ctx: &BasicContext) { - if !self.no_immediate_dispatch { - self.dispatch_pending(ctx) - } - } + // dispatch pending requests, and discard those for which the corresponding + // receiver has been dropped. + fn dispatch_pending(&self, ctx: &BasicContext) { + if self.pending.read().is_empty() { + return; + } - // dispatch pending requests, and discard those for which the corresponding - // receiver has been dropped. - fn dispatch_pending(&self, ctx: &BasicContext) { - if self.pending.read().is_empty() { - return - } + let mut pending = self.pending.write(); - let mut pending = self.pending.write(); + // iterate over all pending requests, and check them for hang-up. + // then, try and find a peer who can serve it. + let peers = self.peers.read(); - // iterate over all pending requests, and check them for hang-up. - // then, try and find a peer who can serve it. - let peers = self.peers.read(); - - *pending = ::std::mem::replace(&mut *pending, Vec::new()) + *pending = ::std::mem::replace(&mut *pending, Vec::new()) .into_iter() .filter(|pending| !pending.sender.is_canceled()) .filter_map(|mut pending| { @@ -562,108 +601,119 @@ impl OnDemand { }) .collect(); // `pending` now contains all requests we couldn't dispatch - trace!(target: "on_demand", "Was unable to dispatch {} requests.", pending.len()); - } + trace!(target: "on_demand", "Was unable to dispatch {} requests.", pending.len()); + } - // submit a pending request set. attempts to answer from cache before - // going to the network. if complete, sends response and consumes the struct. - fn submit_pending(&self, ctx: &BasicContext, mut pending: Pending) { - // answer as many requests from cache as we can, and schedule for dispatch - // if incomplete. + // submit a pending request set. attempts to answer from cache before + // going to the network. if complete, sends response and consumes the struct. + fn submit_pending(&self, ctx: &BasicContext, mut pending: Pending) { + // answer as many requests from cache as we can, and schedule for dispatch + // if incomplete. - pending.answer_from_cache(&*self.cache); - if let Some(mut pending) = pending.try_complete() { - // update cached requests - pending.update_net_requests(); - // push into `pending` buffer - self.pending.write().push(pending); - // try to dispatch - self.attempt_dispatch(ctx); - } - } + pending.answer_from_cache(&*self.cache); + if let Some(mut pending) = pending.try_complete() { + // update cached requests + pending.update_net_requests(); + // push into `pending` buffer + self.pending.write().push(pending); + // try to dispatch + self.attempt_dispatch(ctx); + } + } } impl Handler for OnDemand { - fn on_connect( - &self, - ctx: &EventContext, - status: &Status, - capabilities: &Capabilities - ) -> PeerStatus { - self.peers.write().insert( - ctx.peer(), - Peer { status: status.clone(), capabilities: *capabilities } - ); - self.attempt_dispatch(ctx.as_basic()); - PeerStatus::Kept - } + fn on_connect( + &self, + ctx: &EventContext, + status: &Status, + capabilities: &Capabilities, + ) -> PeerStatus { + self.peers.write().insert( + ctx.peer(), + Peer { + status: status.clone(), + capabilities: *capabilities, + }, + ); + self.attempt_dispatch(ctx.as_basic()); + PeerStatus::Kept + } - fn on_disconnect(&self, ctx: &EventContext, unfulfilled: &[ReqId]) { - self.peers.write().remove(&ctx.peer()); - let ctx = ctx.as_basic(); + fn on_disconnect(&self, ctx: &EventContext, unfulfilled: &[ReqId]) { + self.peers.write().remove(&ctx.peer()); + let ctx = ctx.as_basic(); - { - let mut pending = self.pending.write(); - for unfulfilled in unfulfilled { - if let Some(unfulfilled) = self.in_transit.write().remove(unfulfilled) { - trace!(target: "on_demand", "Attempting to reassign dropped request"); - pending.push(unfulfilled); - } - } - } + { + let mut pending = self.pending.write(); + for unfulfilled in unfulfilled { + if let Some(unfulfilled) = self.in_transit.write().remove(unfulfilled) { + trace!(target: "on_demand", "Attempting to reassign dropped request"); + pending.push(unfulfilled); + } + } + } - self.attempt_dispatch(ctx); - } + self.attempt_dispatch(ctx); + } - fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) { - { - let mut peers = self.peers.write(); - if let Some(ref mut peer) = peers.get_mut(&ctx.peer()) { - peer.status.update_from(&announcement); - peer.capabilities.update_from(&announcement); - } - } + fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) { + { + let mut peers = self.peers.write(); + if let Some(ref mut peer) = peers.get_mut(&ctx.peer()) { + peer.status.update_from(&announcement); + peer.capabilities.update_from(&announcement); + } + } - self.attempt_dispatch(ctx.as_basic()); - } + self.attempt_dispatch(ctx.as_basic()); + } - fn on_responses(&self, ctx: &EventContext, req_id: ReqId, responses: &[basic_request::Response]) { - let mut pending = match self.in_transit.write().remove(&req_id) { - Some(req) => req, - None => return, - }; + fn on_responses( + &self, + ctx: &EventContext, + req_id: ReqId, + responses: &[basic_request::Response], + ) { + let mut pending = match self.in_transit.write().remove(&req_id) { + Some(req) => req, + None => return, + }; - if responses.is_empty() { - // Max number of `bad` responses reached, drop the request - if let Err(e) = pending.response_guard.register_error(&ResponseError::Validity(ValidityError::Empty)) { - pending.bad_response(e); - return; - } - } + if responses.is_empty() { + // Max number of `bad` responses reached, drop the request + if let Err(e) = pending + .response_guard + .register_error(&ResponseError::Validity(ValidityError::Empty)) + { + pending.bad_response(e); + return; + } + } - // for each incoming response - // 1. ensure verification data filled. - // 2. pending.requests.supply_response - // 3. if extracted on-demand response, keep it for later. - for response in responses { - if let Err(e) = pending.supply_response(&*self.cache, response) { - let peer = ctx.peer(); - debug!(target: "on_demand", "Peer {} gave bad response: {:?}", peer, e); - ctx.disable_peer(peer); + // for each incoming response + // 1. ensure verification data filled. + // 2. pending.requests.supply_response + // 3. if extracted on-demand response, keep it for later. + for response in responses { + if let Err(e) = pending.supply_response(&*self.cache, response) { + let peer = ctx.peer(); + debug!(target: "on_demand", "Peer {} gave bad response: {:?}", peer, e); + ctx.disable_peer(peer); - // Max number of `bad` responses reached, drop the request - if let Err(err) = pending.response_guard.register_error(&e) { - pending.bad_response(err); - return; - } - } - } + // Max number of `bad` responses reached, drop the request + if let Err(err) = pending.response_guard.register_error(&e) { + pending.bad_response(err); + return; + } + } + } - pending.fill_unanswered(); - self.submit_pending(ctx.as_basic(), pending); - } + pending.fill_unanswered(); + self.submit_pending(ctx.as_basic(), pending); + } - fn tick(&self, ctx: &BasicContext) { - self.attempt_dispatch(ctx) - } + fn tick(&self, ctx: &BasicContext) { + self.attempt_dispatch(ctx) + } } diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index a183dcbca..7704903a2 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -16,130 +16,139 @@ //! Request types, verification, and verification errors. -use std::cmp; -use std::sync::Arc; +use std::{cmp, sync::Arc}; use bytes::Bytes; -use common_types::basic_account::BasicAccount; -use common_types::encoded; -use common_types::receipt::Receipt; -use common_types::transaction::SignedTransaction; -use ethcore::engines::{EthEngine, StateDependentProof}; -use ethcore::machine::EthereumMachine; -use ethcore::state::{self, ProvedExecution}; -use ethereum_types::{H256, U256, Address}; -use ethtrie::{TrieError, TrieDB}; -use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY, KECCAK_EMPTY_LIST_RLP, keccak}; +use common_types::{ + basic_account::BasicAccount, encoded, receipt::Receipt, transaction::SignedTransaction, +}; +use ethcore::{ + engines::{EthEngine, StateDependentProof}, + machine::EthereumMachine, + state::{self, ProvedExecution}, +}; +use ethereum_types::{Address, H256, U256}; +use ethtrie::{TrieDB, TrieError}; +use hash::{keccak, KECCAK_EMPTY, KECCAK_EMPTY_LIST_RLP, KECCAK_NULL_RLP}; use hash_db::HashDB; use kvdb::DBValue; use parking_lot::Mutex; -use request::{self as net_request, IncompleteRequest, CompleteRequest, Output, OutputKind, Field}; -use rlp::{RlpStream, Rlp}; +use request::{self as net_request, CompleteRequest, Field, IncompleteRequest, Output, OutputKind}; +use rlp::{Rlp, RlpStream}; use trie::Trie; use vm::EnvInfo; -const SUPPLIED_MATCHES: &str = "supplied responses always match produced requests; enforced by `check_response`; qed"; +const SUPPLIED_MATCHES: &str = + "supplied responses always match produced requests; enforced by `check_response`; qed"; /// Core unit of the API: submit batches of these to be answered with `Response`s. #[derive(Clone)] pub enum Request { - /// A request for a header proof. - HeaderProof(HeaderProof), - /// A request for a header by hash. - HeaderByHash(HeaderByHash), - /// A request for a header by hash with a range of its ancestors. - HeaderWithAncestors(HeaderWithAncestors), - /// A request for the index of a transaction. - TransactionIndex(TransactionIndex), - /// A request for block receipts. - Receipts(BlockReceipts), - /// A request for a block body. - Body(Body), - /// A request for an account. - Account(Account), - /// A request for a contract's code. - Code(Code), - /// A request for proof of execution. - Execution(TransactionProof), - /// A request for epoch change signal. - Signal(Signal), + /// A request for a header proof. + HeaderProof(HeaderProof), + /// A request for a header by hash. + HeaderByHash(HeaderByHash), + /// A request for a header by hash with a range of its ancestors. + HeaderWithAncestors(HeaderWithAncestors), + /// A request for the index of a transaction. + TransactionIndex(TransactionIndex), + /// A request for block receipts. + Receipts(BlockReceipts), + /// A request for a block body. + Body(Body), + /// A request for an account. + Account(Account), + /// A request for a contract's code. + Code(Code), + /// A request for proof of execution. + Execution(TransactionProof), + /// A request for epoch change signal. + Signal(Signal), } /// A request argument. pub trait RequestArg { - /// the response type. - type Out; + /// the response type. + type Out; - /// Create the request type. - /// `extract` must not fail when presented with the corresponding - /// `Response`. - fn make(self) -> Request; + /// Create the request type. + /// `extract` must not fail when presented with the corresponding + /// `Response`. + fn make(self) -> Request; - /// May not panic if the response corresponds with the request - /// from `make`. - /// Is free to panic otherwise. - fn extract(r: Response) -> Self::Out; + /// May not panic if the response corresponds with the request + /// from `make`. + /// Is free to panic otherwise. + fn extract(r: Response) -> Self::Out; } /// An adapter can be thought of as a grouping of request argument types. /// This is implemented for various tuples and convenient types. pub trait RequestAdapter { - /// The output type. - type Out; + /// The output type. + type Out; - /// Infallibly produce requests. When `extract_from` is presented - /// with the corresponding response vector, it may not fail. - fn make_requests(self) -> Vec; + /// Infallibly produce requests. When `extract_from` is presented + /// with the corresponding response vector, it may not fail. + fn make_requests(self) -> Vec; - /// Extract the output type from the given responses. - /// If they are the corresponding responses to the requests - /// made by `make_requests`, do not panic. - fn extract_from(Vec) -> Self::Out; + /// Extract the output type from the given responses. + /// If they are the corresponding responses to the requests + /// made by `make_requests`, do not panic. + fn extract_from(Vec) -> Self::Out; } impl RequestAdapter for Vec { - type Out = Vec; + type Out = Vec; - fn make_requests(self) -> Vec { - self.into_iter().map(RequestArg::make).collect() - } + fn make_requests(self) -> Vec { + self.into_iter().map(RequestArg::make).collect() + } - fn extract_from(r: Vec) -> Self::Out { - r.into_iter().map(T::extract).collect() - } + fn extract_from(r: Vec) -> Self::Out { + r.into_iter().map(T::extract).collect() + } } // helper to implement `RequestArg` and `From` for a single request kind. macro_rules! impl_single { - ($variant: ident, $me: ty, $out: ty) => { - impl RequestArg for $me { - type Out = $out; + ($variant: ident, $me: ty, $out: ty) => { + impl RequestArg for $me { + type Out = $out; - fn make(self) -> Request { - Request::$variant(self) - } + fn make(self) -> Request { + Request::$variant(self) + } - fn extract(r: Response) -> $out { - match r { - Response::$variant(x) => x, - _ => panic!(SUPPLIED_MATCHES), - } - } - } + fn extract(r: Response) -> $out { + match r { + Response::$variant(x) => x, + _ => panic!(SUPPLIED_MATCHES), + } + } + } - impl From<$me> for Request { - fn from(me: $me) -> Request { - Request::$variant(me) - } - } - } + impl From<$me> for Request { + fn from(me: $me) -> Request { + Request::$variant(me) + } + } + }; } // implement traits for each kind of request. impl_single!(HeaderProof, HeaderProof, (H256, U256)); impl_single!(HeaderByHash, HeaderByHash, encoded::Header); -impl_single!(HeaderWithAncestors, HeaderWithAncestors, Vec); -impl_single!(TransactionIndex, TransactionIndex, net_request::TransactionIndexResponse); +impl_single!( + HeaderWithAncestors, + HeaderWithAncestors, + Vec +); +impl_single!( + TransactionIndex, + TransactionIndex, + net_request::TransactionIndexResponse +); impl_single!(Receipts, BlockReceipts, Vec); impl_single!(Body, Body, encoded::Block); impl_single!(Account, Account, Option); @@ -191,55 +200,55 @@ macro_rules! impl_args { } mod impls { - #![allow(non_snake_case)] + #![allow(non_snake_case)] - use super::{RequestAdapter, RequestArg, Request, Response, SUPPLIED_MATCHES}; + use super::{Request, RequestAdapter, RequestArg, Response, SUPPLIED_MATCHES}; - impl_args!(A, B, C, D, E, F, G, H, I, J, K, L,); + impl_args!(A, B, C, D, E, F, G, H, I, J, K, L,); } /// A block header to be used for verification. /// May be stored or an unresolved output of a prior request. #[derive(Debug, Clone, PartialEq, Eq)] pub enum HeaderRef { - /// A stored header. - Stored(encoded::Header), - /// An unresolved header. The first item here is the index of the request which - /// will return the header. The second is a back-reference pointing to a block hash - /// which can be used to make requests until that header is resolved. - Unresolved(usize, Field), + /// A stored header. + Stored(encoded::Header), + /// An unresolved header. The first item here is the index of the request which + /// will return the header. The second is a back-reference pointing to a block hash + /// which can be used to make requests until that header is resolved. + Unresolved(usize, Field), } impl HeaderRef { - /// Attempt to inspect the header. - pub fn as_ref(&self) -> Result<&encoded::Header, Error> { - match *self { - HeaderRef::Stored(ref hdr) => Ok(hdr), - HeaderRef::Unresolved(idx, _) => Err(Error::UnresolvedHeader(idx)), - } - } + /// Attempt to inspect the header. + pub fn as_ref(&self) -> Result<&encoded::Header, Error> { + match *self { + HeaderRef::Stored(ref hdr) => Ok(hdr), + HeaderRef::Unresolved(idx, _) => Err(Error::UnresolvedHeader(idx)), + } + } - // get the blockhash field to be used in requests. - fn field(&self) -> Field { - match *self { - HeaderRef::Stored(ref hdr) => Field::Scalar(hdr.hash()), - HeaderRef::Unresolved(_, field) => field, - } - } + // get the blockhash field to be used in requests. + fn field(&self) -> Field { + match *self { + HeaderRef::Stored(ref hdr) => Field::Scalar(hdr.hash()), + HeaderRef::Unresolved(_, field) => field, + } + } - // yield the index of the request which will produce the header. - fn needs_header(&self) -> Option<(usize, Field)> { - match *self { - HeaderRef::Stored(_) => None, - HeaderRef::Unresolved(idx, field) => Some((idx, field)), - } - } + // yield the index of the request which will produce the header. + fn needs_header(&self) -> Option<(usize, Field)> { + match *self { + HeaderRef::Stored(_) => None, + HeaderRef::Unresolved(idx, field) => Some((idx, field)), + } + } } impl From for HeaderRef { - fn from(header: encoded::Header) -> Self { - HeaderRef::Stored(header) - } + fn from(header: encoded::Header) -> Self { + HeaderRef::Stored(header) + } } /// Requests coupled with their required data for verification. @@ -247,614 +256,665 @@ impl From for HeaderRef { #[derive(Clone)] #[allow(missing_docs)] pub enum CheckedRequest { - HeaderProof(HeaderProof, net_request::IncompleteHeaderProofRequest), - HeaderByHash(HeaderByHash, net_request::IncompleteHeadersRequest), - HeaderWithAncestors(HeaderWithAncestors, net_request::IncompleteHeadersRequest), - TransactionIndex(TransactionIndex, net_request::IncompleteTransactionIndexRequest), - Receipts(BlockReceipts, net_request::IncompleteReceiptsRequest), - Body(Body, net_request::IncompleteBodyRequest), - Account(Account, net_request::IncompleteAccountRequest), - Code(Code, net_request::IncompleteCodeRequest), - Execution(TransactionProof, net_request::IncompleteExecutionRequest), - Signal(Signal, net_request::IncompleteSignalRequest) + HeaderProof(HeaderProof, net_request::IncompleteHeaderProofRequest), + HeaderByHash(HeaderByHash, net_request::IncompleteHeadersRequest), + HeaderWithAncestors(HeaderWithAncestors, net_request::IncompleteHeadersRequest), + TransactionIndex( + TransactionIndex, + net_request::IncompleteTransactionIndexRequest, + ), + Receipts(BlockReceipts, net_request::IncompleteReceiptsRequest), + Body(Body, net_request::IncompleteBodyRequest), + Account(Account, net_request::IncompleteAccountRequest), + Code(Code, net_request::IncompleteCodeRequest), + Execution(TransactionProof, net_request::IncompleteExecutionRequest), + Signal(Signal, net_request::IncompleteSignalRequest), } impl From for CheckedRequest { - fn from(req: Request) -> Self { - match req { - Request::HeaderByHash(req) => { - let net_req = net_request::IncompleteHeadersRequest { - start: req.0.map(Into::into), - skip: 0, - max: 1, - reverse: false, - }; - trace!(target: "on_demand", "HeaderByHash Request, {:?}", net_req); - CheckedRequest::HeaderByHash(req, net_req) - } - Request::HeaderWithAncestors(req) => { - let net_req = net_request::IncompleteHeadersRequest { - start: req.block_hash.map(Into::into), - skip: 0, - max: req.ancestor_count + 1, - reverse: true, - }; - trace!(target: "on_demand", "HeaderWithAncestors Request, {:?}", net_req); - CheckedRequest::HeaderWithAncestors(req, net_req) - } - Request::HeaderProof(req) => { - let net_req = net_request::IncompleteHeaderProofRequest { - num: req.num().into(), - }; - trace!(target: "on_demand", "HeaderProof Request, {:?}", net_req); - CheckedRequest::HeaderProof(req, net_req) - } - Request::TransactionIndex(req) => { - let net_req = net_request::IncompleteTransactionIndexRequest { - hash: req.0, - }; - trace!(target: "on_demand", "TransactionIndex Request, {:?}", net_req); - CheckedRequest::TransactionIndex(req, net_req) - } - Request::Body(req) => { - let net_req = net_request::IncompleteBodyRequest { - hash: req.0.field(), - }; - trace!(target: "on_demand", "Body Request, {:?}", net_req); - CheckedRequest::Body(req, net_req) - } - Request::Receipts(req) => { - let net_req = net_request::IncompleteReceiptsRequest { - hash: req.0.field(), - }; - trace!(target: "on_demand", "Receipt Request, {:?}", net_req); - CheckedRequest::Receipts(req, net_req) - } - Request::Account(req) => { - let net_req = net_request::IncompleteAccountRequest { - block_hash: req.header.field(), - address_hash: ::hash::keccak(&req.address).into(), - }; - trace!(target: "on_demand", "Account Request, {:?}", net_req); - CheckedRequest::Account(req, net_req) - } - Request::Code(req) => { - let net_req = net_request::IncompleteCodeRequest { - block_hash: req.header.field(), - code_hash: req.code_hash, - }; - trace!(target: "on_demand", "Code Request, {:?}", net_req); - CheckedRequest::Code(req, net_req) - } - Request::Execution(req) => { - let net_req = net_request::IncompleteExecutionRequest { - block_hash: req.header.field(), - from: req.tx.sender(), - gas: req.tx.gas, - gas_price: req.tx.gas_price, - action: req.tx.action.clone(), - value: req.tx.value, - data: req.tx.data.clone(), - }; - trace!(target: "on_demand", "Execution request, {:?}", net_req); - CheckedRequest::Execution(req, net_req) - } - Request::Signal(req) => { - let net_req = net_request::IncompleteSignalRequest { - block_hash: req.hash.into(), - }; - trace!(target: "on_demand", "Signal Request, {:?}", net_req); - CheckedRequest::Signal(req, net_req) - } - } - } + fn from(req: Request) -> Self { + match req { + Request::HeaderByHash(req) => { + let net_req = net_request::IncompleteHeadersRequest { + start: req.0.map(Into::into), + skip: 0, + max: 1, + reverse: false, + }; + trace!(target: "on_demand", "HeaderByHash Request, {:?}", net_req); + CheckedRequest::HeaderByHash(req, net_req) + } + Request::HeaderWithAncestors(req) => { + let net_req = net_request::IncompleteHeadersRequest { + start: req.block_hash.map(Into::into), + skip: 0, + max: req.ancestor_count + 1, + reverse: true, + }; + trace!(target: "on_demand", "HeaderWithAncestors Request, {:?}", net_req); + CheckedRequest::HeaderWithAncestors(req, net_req) + } + Request::HeaderProof(req) => { + let net_req = net_request::IncompleteHeaderProofRequest { + num: req.num().into(), + }; + trace!(target: "on_demand", "HeaderProof Request, {:?}", net_req); + CheckedRequest::HeaderProof(req, net_req) + } + Request::TransactionIndex(req) => { + let net_req = net_request::IncompleteTransactionIndexRequest { hash: req.0 }; + trace!(target: "on_demand", "TransactionIndex Request, {:?}", net_req); + CheckedRequest::TransactionIndex(req, net_req) + } + Request::Body(req) => { + let net_req = net_request::IncompleteBodyRequest { + hash: req.0.field(), + }; + trace!(target: "on_demand", "Body Request, {:?}", net_req); + CheckedRequest::Body(req, net_req) + } + Request::Receipts(req) => { + let net_req = net_request::IncompleteReceiptsRequest { + hash: req.0.field(), + }; + trace!(target: "on_demand", "Receipt Request, {:?}", net_req); + CheckedRequest::Receipts(req, net_req) + } + Request::Account(req) => { + let net_req = net_request::IncompleteAccountRequest { + block_hash: req.header.field(), + address_hash: ::hash::keccak(&req.address).into(), + }; + trace!(target: "on_demand", "Account Request, {:?}", net_req); + CheckedRequest::Account(req, net_req) + } + Request::Code(req) => { + let net_req = net_request::IncompleteCodeRequest { + block_hash: req.header.field(), + code_hash: req.code_hash, + }; + trace!(target: "on_demand", "Code Request, {:?}", net_req); + CheckedRequest::Code(req, net_req) + } + Request::Execution(req) => { + let net_req = net_request::IncompleteExecutionRequest { + block_hash: req.header.field(), + from: req.tx.sender(), + gas: req.tx.gas, + gas_price: req.tx.gas_price, + action: req.tx.action.clone(), + value: req.tx.value, + data: req.tx.data.clone(), + }; + trace!(target: "on_demand", "Execution request, {:?}", net_req); + CheckedRequest::Execution(req, net_req) + } + Request::Signal(req) => { + let net_req = net_request::IncompleteSignalRequest { + block_hash: req.hash.into(), + }; + trace!(target: "on_demand", "Signal Request, {:?}", net_req); + CheckedRequest::Signal(req, net_req) + } + } + } } impl CheckedRequest { - /// Convert this into a network request. - pub fn into_net_request(self) -> net_request::Request { - use ::request::Request as NetRequest; + /// Convert this into a network request. + pub fn into_net_request(self) -> net_request::Request { + use request::Request as NetRequest; - match self { - CheckedRequest::HeaderProof(_, req) => NetRequest::HeaderProof(req), - CheckedRequest::HeaderByHash(_, req) => NetRequest::Headers(req), - CheckedRequest::HeaderWithAncestors(_, req) => NetRequest::Headers(req), - CheckedRequest::TransactionIndex(_, req) => NetRequest::TransactionIndex(req), - CheckedRequest::Receipts(_, req) => NetRequest::Receipts(req), - CheckedRequest::Body(_, req) => NetRequest::Body(req), - CheckedRequest::Account(_, req) => NetRequest::Account(req), - CheckedRequest::Code(_, req) => NetRequest::Code(req), - CheckedRequest::Execution(_, req) => NetRequest::Execution(req), - CheckedRequest::Signal(_, req) => NetRequest::Signal(req), - } - } + match self { + CheckedRequest::HeaderProof(_, req) => NetRequest::HeaderProof(req), + CheckedRequest::HeaderByHash(_, req) => NetRequest::Headers(req), + CheckedRequest::HeaderWithAncestors(_, req) => NetRequest::Headers(req), + CheckedRequest::TransactionIndex(_, req) => NetRequest::TransactionIndex(req), + CheckedRequest::Receipts(_, req) => NetRequest::Receipts(req), + CheckedRequest::Body(_, req) => NetRequest::Body(req), + CheckedRequest::Account(_, req) => NetRequest::Account(req), + CheckedRequest::Code(_, req) => NetRequest::Code(req), + CheckedRequest::Execution(_, req) => NetRequest::Execution(req), + CheckedRequest::Signal(_, req) => NetRequest::Signal(req), + } + } - /// Whether this needs a header from a prior request. - /// Returns `Some` with the index of the request returning the header - /// and the field giving the hash - /// if so, `None` otherwise. - pub fn needs_header(&self) -> Option<(usize, Field)> { - match *self { - CheckedRequest::Receipts(ref x, _) => x.0.needs_header(), - CheckedRequest::Body(ref x, _) => x.0.needs_header(), - CheckedRequest::Account(ref x, _) => x.header.needs_header(), - CheckedRequest::Code(ref x, _) => x.header.needs_header(), - CheckedRequest::Execution(ref x, _) => x.header.needs_header(), - _ => None, - } - } + /// Whether this needs a header from a prior request. + /// Returns `Some` with the index of the request returning the header + /// and the field giving the hash + /// if so, `None` otherwise. + pub fn needs_header(&self) -> Option<(usize, Field)> { + match *self { + CheckedRequest::Receipts(ref x, _) => x.0.needs_header(), + CheckedRequest::Body(ref x, _) => x.0.needs_header(), + CheckedRequest::Account(ref x, _) => x.header.needs_header(), + CheckedRequest::Code(ref x, _) => x.header.needs_header(), + CheckedRequest::Execution(ref x, _) => x.header.needs_header(), + _ => None, + } + } - /// Provide a header where one was needed. Should only be called if `needs_header` - /// returns `Some`, and for correctness, only use the header yielded by the correct - /// request. - pub fn provide_header(&mut self, header: encoded::Header) { - match *self { - CheckedRequest::Receipts(ref mut x, _) => x.0 = HeaderRef::Stored(header), - CheckedRequest::Body(ref mut x, _) => x.0 = HeaderRef::Stored(header), - CheckedRequest::Account(ref mut x, _) => x.header = HeaderRef::Stored(header), - CheckedRequest::Code(ref mut x, _) => x.header = HeaderRef::Stored(header), - CheckedRequest::Execution(ref mut x, _) => x.header = HeaderRef::Stored(header), - _ => {}, - } - } + /// Provide a header where one was needed. Should only be called if `needs_header` + /// returns `Some`, and for correctness, only use the header yielded by the correct + /// request. + pub fn provide_header(&mut self, header: encoded::Header) { + match *self { + CheckedRequest::Receipts(ref mut x, _) => x.0 = HeaderRef::Stored(header), + CheckedRequest::Body(ref mut x, _) => x.0 = HeaderRef::Stored(header), + CheckedRequest::Account(ref mut x, _) => x.header = HeaderRef::Stored(header), + CheckedRequest::Code(ref mut x, _) => x.header = HeaderRef::Stored(header), + CheckedRequest::Execution(ref mut x, _) => x.header = HeaderRef::Stored(header), + _ => {} + } + } - /// Attempt to complete the request based on data in the cache. - pub fn respond_local(&self, cache: &Mutex<::cache::Cache>) -> Option { - match *self { - CheckedRequest::HeaderProof(ref check, _) => { - let mut cache = cache.lock(); - cache.block_hash(check.num) - .and_then(|h| cache.chain_score(&h).map(|s| (h, s))) - .map(|(h, s)| Response::HeaderProof((h, s))) - } - CheckedRequest::HeaderByHash(_, ref req) => { - if let Some(&net_request::HashOrNumber::Hash(ref h)) = req.start.as_ref() { - return cache.lock().block_header(h).map(Response::HeaderByHash); - } + /// Attempt to complete the request based on data in the cache. + pub fn respond_local(&self, cache: &Mutex<::cache::Cache>) -> Option { + match *self { + CheckedRequest::HeaderProof(ref check, _) => { + let mut cache = cache.lock(); + cache + .block_hash(check.num) + .and_then(|h| cache.chain_score(&h).map(|s| (h, s))) + .map(|(h, s)| Response::HeaderProof((h, s))) + } + CheckedRequest::HeaderByHash(_, ref req) => { + if let Some(&net_request::HashOrNumber::Hash(ref h)) = req.start.as_ref() { + return cache.lock().block_header(h).map(Response::HeaderByHash); + } - None - } - CheckedRequest::HeaderWithAncestors(_, ref req) => { - if req.skip != 1 || !req.reverse { - return None; - } + None + } + CheckedRequest::HeaderWithAncestors(_, ref req) => { + if req.skip != 1 || !req.reverse { + return None; + } - if let Some(&net_request::HashOrNumber::Hash(start)) = req.start.as_ref() { - let mut result = Vec::with_capacity(req.max as usize); - let mut hash = start; - let mut cache = cache.lock(); - for _ in 0..req.max { - match cache.block_header(&hash) { - Some(header) => { - hash = header.parent_hash(); - result.push(header); - } - None => return None, - } - } - Some(Response::HeaderWithAncestors(result)) - } else { None } - } - CheckedRequest::Receipts(ref check, ref req) => { - // empty transactions -> no receipts - if check.0.as_ref().ok().map_or(false, |hdr| hdr.receipts_root() == KECCAK_NULL_RLP) { - return Some(Response::Receipts(Vec::new())); - } + if let Some(&net_request::HashOrNumber::Hash(start)) = req.start.as_ref() { + let mut result = Vec::with_capacity(req.max as usize); + let mut hash = start; + let mut cache = cache.lock(); + for _ in 0..req.max { + match cache.block_header(&hash) { + Some(header) => { + hash = header.parent_hash(); + result.push(header); + } + None => return None, + } + } + Some(Response::HeaderWithAncestors(result)) + } else { + None + } + } + CheckedRequest::Receipts(ref check, ref req) => { + // empty transactions -> no receipts + if check + .0 + .as_ref() + .ok() + .map_or(false, |hdr| hdr.receipts_root() == KECCAK_NULL_RLP) + { + return Some(Response::Receipts(Vec::new())); + } - req.hash.as_ref() - .and_then(|hash| cache.lock().block_receipts(hash)) - .map(Response::Receipts) - } - CheckedRequest::Body(ref check, ref req) => { - // check for empty body. - if let Ok(hdr) = check.0.as_ref() { - if hdr.transactions_root() == KECCAK_NULL_RLP && hdr.uncles_hash() == KECCAK_EMPTY_LIST_RLP { - let mut stream = RlpStream::new_list(3); - stream.append_raw(hdr.rlp().as_raw(), 1); - stream.begin_list(0); - stream.begin_list(0); + req.hash + .as_ref() + .and_then(|hash| cache.lock().block_receipts(hash)) + .map(Response::Receipts) + } + CheckedRequest::Body(ref check, ref req) => { + // check for empty body. + if let Ok(hdr) = check.0.as_ref() { + if hdr.transactions_root() == KECCAK_NULL_RLP + && hdr.uncles_hash() == KECCAK_EMPTY_LIST_RLP + { + let mut stream = RlpStream::new_list(3); + stream.append_raw(hdr.rlp().as_raw(), 1); + stream.begin_list(0); + stream.begin_list(0); - return Some(Response::Body(encoded::Block::new(stream.out()))); - } - } + return Some(Response::Body(encoded::Block::new(stream.out()))); + } + } - // otherwise, check for cached body and header. - let block_hash = req.hash.as_ref() - .cloned() - .or_else(|| check.0.as_ref().ok().map(|hdr| hdr.hash())); - let block_hash = match block_hash { - Some(hash) => hash, - None => return None, - }; + // otherwise, check for cached body and header. + let block_hash = req + .hash + .as_ref() + .cloned() + .or_else(|| check.0.as_ref().ok().map(|hdr| hdr.hash())); + let block_hash = match block_hash { + Some(hash) => hash, + None => return None, + }; - let mut cache = cache.lock(); - let cached_header; + let mut cache = cache.lock(); + let cached_header; - // can't use as_ref here although it seems like you would be able to: - // it complains about uninitialized `cached_header`. - let block_header = match check.0.as_ref().ok() { - Some(hdr) => Some(hdr), - None => { - cached_header = cache.block_header(&block_hash); - cached_header.as_ref() - } - }; + // can't use as_ref here although it seems like you would be able to: + // it complains about uninitialized `cached_header`. + let block_header = match check.0.as_ref().ok() { + Some(hdr) => Some(hdr), + None => { + cached_header = cache.block_header(&block_hash); + cached_header.as_ref() + } + }; - block_header - .and_then(|hdr| cache.block_body(&block_hash).map(|b| (hdr, b))) - .map(|(hdr, body)| { - Response::Body(encoded::Block::new_from_header_and_body(&hdr.view(), &body.view())) - }) - } - CheckedRequest::Code(_, ref req) => { - if req.code_hash.as_ref().map_or(false, |&h| h == KECCAK_EMPTY) { - Some(Response::Code(Vec::new())) - } else { - None - } - } - _ => None, - } - } + block_header + .and_then(|hdr| cache.block_body(&block_hash).map(|b| (hdr, b))) + .map(|(hdr, body)| { + Response::Body(encoded::Block::new_from_header_and_body( + &hdr.view(), + &body.view(), + )) + }) + } + CheckedRequest::Code(_, ref req) => { + if req.code_hash.as_ref().map_or(false, |&h| h == KECCAK_EMPTY) { + Some(Response::Code(Vec::new())) + } else { + None + } + } + _ => None, + } + } } macro_rules! match_me { - ($me: expr, ($check: pat, $req: pat) => $e: expr) => { - match $me { - CheckedRequest::HeaderProof($check, $req) => $e, - CheckedRequest::HeaderByHash($check, $req) => $e, - CheckedRequest::HeaderWithAncestors($check, $req) => $e, - CheckedRequest::TransactionIndex($check, $req) => $e, - CheckedRequest::Receipts($check, $req) => $e, - CheckedRequest::Body($check, $req) => $e, - CheckedRequest::Account($check, $req) => $e, - CheckedRequest::Code($check, $req) => $e, - CheckedRequest::Execution($check, $req) => $e, - CheckedRequest::Signal($check, $req) => $e, - } - } + ($me: expr, ($check: pat, $req: pat) => $e: expr) => { + match $me { + CheckedRequest::HeaderProof($check, $req) => $e, + CheckedRequest::HeaderByHash($check, $req) => $e, + CheckedRequest::HeaderWithAncestors($check, $req) => $e, + CheckedRequest::TransactionIndex($check, $req) => $e, + CheckedRequest::Receipts($check, $req) => $e, + CheckedRequest::Body($check, $req) => $e, + CheckedRequest::Account($check, $req) => $e, + CheckedRequest::Code($check, $req) => $e, + CheckedRequest::Execution($check, $req) => $e, + CheckedRequest::Signal($check, $req) => $e, + } + }; } impl IncompleteRequest for CheckedRequest { - type Complete = CompleteRequest; - type Response = net_request::Response; + type Complete = CompleteRequest; + type Response = net_request::Response; - fn check_outputs(&self, mut f: F) -> Result<(), net_request::NoSuchOutput> - where F: FnMut(usize, usize, OutputKind) -> Result<(), net_request::NoSuchOutput> - { - match *self { - CheckedRequest::HeaderProof(_, ref req) => req.check_outputs(f), - CheckedRequest::HeaderByHash(ref check, ref req) => { - req.check_outputs(&mut f)?; + fn check_outputs(&self, mut f: F) -> Result<(), net_request::NoSuchOutput> + where + F: FnMut(usize, usize, OutputKind) -> Result<(), net_request::NoSuchOutput>, + { + match *self { + CheckedRequest::HeaderProof(_, ref req) => req.check_outputs(f), + CheckedRequest::HeaderByHash(ref check, ref req) => { + req.check_outputs(&mut f)?; - // make sure the output given is definitively a hash. - match check.0 { - Field::BackReference(r, idx) => f(r, idx, OutputKind::Hash), - _ => Ok(()), - } - } - CheckedRequest::HeaderWithAncestors(ref check, ref req) => { - req.check_outputs(&mut f)?; + // make sure the output given is definitively a hash. + match check.0 { + Field::BackReference(r, idx) => f(r, idx, OutputKind::Hash), + _ => Ok(()), + } + } + CheckedRequest::HeaderWithAncestors(ref check, ref req) => { + req.check_outputs(&mut f)?; - // make sure the output given is definitively a hash. - match check.block_hash { - Field::BackReference(r, idx) => f(r, idx, OutputKind::Hash), - _ => Ok(()), - } - } - CheckedRequest::TransactionIndex(_, ref req) => req.check_outputs(f), - CheckedRequest::Receipts(_, ref req) => req.check_outputs(f), - CheckedRequest::Body(_, ref req) => req.check_outputs(f), - CheckedRequest::Account(_, ref req) => req.check_outputs(f), - CheckedRequest::Code(_, ref req) => req.check_outputs(f), - CheckedRequest::Execution(_, ref req) => req.check_outputs(f), - CheckedRequest::Signal(_, ref req) => req.check_outputs(f), - } - } + // make sure the output given is definitively a hash. + match check.block_hash { + Field::BackReference(r, idx) => f(r, idx, OutputKind::Hash), + _ => Ok(()), + } + } + CheckedRequest::TransactionIndex(_, ref req) => req.check_outputs(f), + CheckedRequest::Receipts(_, ref req) => req.check_outputs(f), + CheckedRequest::Body(_, ref req) => req.check_outputs(f), + CheckedRequest::Account(_, ref req) => req.check_outputs(f), + CheckedRequest::Code(_, ref req) => req.check_outputs(f), + CheckedRequest::Execution(_, ref req) => req.check_outputs(f), + CheckedRequest::Signal(_, ref req) => req.check_outputs(f), + } + } - fn note_outputs(&self, f: F) where F: FnMut(usize, OutputKind) { - match_me!(*self, (_, ref req) => req.note_outputs(f)) - } + fn note_outputs(&self, f: F) + where + F: FnMut(usize, OutputKind), + { + match_me!(*self, (_, ref req) => req.note_outputs(f)) + } - fn fill(&mut self, f: F) where F: Fn(usize, usize) -> Result { - match_me!(*self, (_, ref mut req) => req.fill(f)) - } + fn fill(&mut self, f: F) + where + F: Fn(usize, usize) -> Result, + { + match_me!(*self, (_, ref mut req) => req.fill(f)) + } - fn complete(self) -> Result { - match self { - CheckedRequest::HeaderProof(_, req) => { - trace!(target: "on_demand", "HeaderProof request completed {:?}", req); - req.complete().map(CompleteRequest::HeaderProof) - } - CheckedRequest::HeaderByHash(_, req) => { - trace!(target: "on_demand", "HeaderByHash request completed {:?}", req); - req.complete().map(CompleteRequest::Headers) - } - CheckedRequest::HeaderWithAncestors(_, req) => { - trace!(target: "on_demand", "HeaderWithAncestors request completed {:?}", req); - req.complete().map(CompleteRequest::Headers) - } - CheckedRequest::TransactionIndex(_, req) => { - trace!(target: "on_demand", "TransactionIndex request completed {:?}", req); - req.complete().map(CompleteRequest::TransactionIndex) - } - CheckedRequest::Receipts(_, req) => { - trace!(target: "on_demand", "Receipt request completed {:?}", req); - req.complete().map(CompleteRequest::Receipts) - } - CheckedRequest::Body(_, req) => { - trace!(target: "on_demand", "Block request completed {:?}", req); - req.complete().map(CompleteRequest::Body) - } - CheckedRequest::Account(_, req) => { - trace!(target: "on_demand", "Account request completed {:?}", req); - req.complete().map(CompleteRequest::Account) - } - CheckedRequest::Code(_, req) => { - trace!(target: "on_demand", "Code request completed {:?}", req); - req.complete().map(CompleteRequest::Code) - } - CheckedRequest::Execution(_, req) => { - trace!(target: "on_demand", "Execution request completed {:?}", req); - req.complete().map(CompleteRequest::Execution) - } - CheckedRequest::Signal(_, req) => { - trace!(target: "on_demand", "Signal request completed {:?}", req); - req.complete().map(CompleteRequest::Signal) - } - } - } + fn complete(self) -> Result { + match self { + CheckedRequest::HeaderProof(_, req) => { + trace!(target: "on_demand", "HeaderProof request completed {:?}", req); + req.complete().map(CompleteRequest::HeaderProof) + } + CheckedRequest::HeaderByHash(_, req) => { + trace!(target: "on_demand", "HeaderByHash request completed {:?}", req); + req.complete().map(CompleteRequest::Headers) + } + CheckedRequest::HeaderWithAncestors(_, req) => { + trace!(target: "on_demand", "HeaderWithAncestors request completed {:?}", req); + req.complete().map(CompleteRequest::Headers) + } + CheckedRequest::TransactionIndex(_, req) => { + trace!(target: "on_demand", "TransactionIndex request completed {:?}", req); + req.complete().map(CompleteRequest::TransactionIndex) + } + CheckedRequest::Receipts(_, req) => { + trace!(target: "on_demand", "Receipt request completed {:?}", req); + req.complete().map(CompleteRequest::Receipts) + } + CheckedRequest::Body(_, req) => { + trace!(target: "on_demand", "Block request completed {:?}", req); + req.complete().map(CompleteRequest::Body) + } + CheckedRequest::Account(_, req) => { + trace!(target: "on_demand", "Account request completed {:?}", req); + req.complete().map(CompleteRequest::Account) + } + CheckedRequest::Code(_, req) => { + trace!(target: "on_demand", "Code request completed {:?}", req); + req.complete().map(CompleteRequest::Code) + } + CheckedRequest::Execution(_, req) => { + trace!(target: "on_demand", "Execution request completed {:?}", req); + req.complete().map(CompleteRequest::Execution) + } + CheckedRequest::Signal(_, req) => { + trace!(target: "on_demand", "Signal request completed {:?}", req); + req.complete().map(CompleteRequest::Signal) + } + } + } - fn adjust_refs(&mut self, mapping: F) where F: FnMut(usize) -> usize { - match_me!(*self, (_, ref mut req) => req.adjust_refs(mapping)) - } + fn adjust_refs(&mut self, mapping: F) + where + F: FnMut(usize) -> usize, + { + match_me!(*self, (_, ref mut req) => req.adjust_refs(mapping)) + } } impl net_request::CheckedRequest for CheckedRequest { - type Extract = Response; - type Error = Error; - type Environment = Mutex<::cache::Cache>; + type Extract = Response; + type Error = Error; + type Environment = Mutex<::cache::Cache>; - /// Check whether the response matches (beyond the type). - fn check_response(&self, complete: &Self::Complete, cache: &Mutex<::cache::Cache>, response: &Self::Response) -> Result { - use ::request::Response as NetResponse; + /// Check whether the response matches (beyond the type). + fn check_response( + &self, + complete: &Self::Complete, + cache: &Mutex<::cache::Cache>, + response: &Self::Response, + ) -> Result { + use request::Response as NetResponse; - // helper for expecting a specific response for a given request. - macro_rules! expect { - ($res: pat => $e: expr) => {{ - match (response, complete) { - $res => $e, - _ => Err(Error::WrongKind), - } - }} - } + // helper for expecting a specific response for a given request. + macro_rules! expect { + ($res: pat => $e: expr) => {{ + match (response, complete) { + $res => $e, + _ => Err(Error::WrongKind), + } + }}; + } - // check response against contained prover. - match *self { - CheckedRequest::HeaderProof(ref prover, _) => - expect!((&NetResponse::HeaderProof(ref res), _) => - prover.check_response(cache, &res.proof).map(Response::HeaderProof)), - CheckedRequest::HeaderByHash(ref prover, _) => - expect!((&NetResponse::Headers(ref res), &CompleteRequest::Headers(ref req)) => - prover.check_response(cache, &req.start, &res.headers).map(Response::HeaderByHash)), - CheckedRequest::HeaderWithAncestors(ref prover, _) => - expect!((&NetResponse::Headers(ref res), &CompleteRequest::Headers(ref req)) => - prover.check_response(cache, &req.start, &res.headers).map(Response::HeaderWithAncestors)), - CheckedRequest::TransactionIndex(ref prover, _) => - expect!((&NetResponse::TransactionIndex(ref res), _) => - prover.check_response(cache, res).map(Response::TransactionIndex)), - CheckedRequest::Receipts(ref prover, _) => - expect!((&NetResponse::Receipts(ref res), _) => - prover.check_response(cache, &res.receipts).map(Response::Receipts)), - CheckedRequest::Body(ref prover, _) => - expect!((&NetResponse::Body(ref res), _) => + // check response against contained prover. + match *self { + CheckedRequest::HeaderProof(ref prover, _) => { + expect!((&NetResponse::HeaderProof(ref res), _) => + prover.check_response(cache, &res.proof).map(Response::HeaderProof)) + } + CheckedRequest::HeaderByHash(ref prover, _) => { + expect!((&NetResponse::Headers(ref res), &CompleteRequest::Headers(ref req)) => + prover.check_response(cache, &req.start, &res.headers).map(Response::HeaderByHash)) + } + CheckedRequest::HeaderWithAncestors(ref prover, _) => { + expect!((&NetResponse::Headers(ref res), &CompleteRequest::Headers(ref req)) => + prover.check_response(cache, &req.start, &res.headers).map(Response::HeaderWithAncestors)) + } + CheckedRequest::TransactionIndex(ref prover, _) => { + expect!((&NetResponse::TransactionIndex(ref res), _) => + prover.check_response(cache, res).map(Response::TransactionIndex)) + } + CheckedRequest::Receipts(ref prover, _) => { + expect!((&NetResponse::Receipts(ref res), _) => + prover.check_response(cache, &res.receipts).map(Response::Receipts)) + } + CheckedRequest::Body(ref prover, _) => expect!((&NetResponse::Body(ref res), _) => prover.check_response(cache, &res.body).map(Response::Body)), - CheckedRequest::Account(ref prover, _) => - expect!((&NetResponse::Account(ref res), _) => - prover.check_response(cache, &res.proof).map(Response::Account)), - CheckedRequest::Code(ref prover, _) => - expect!((&NetResponse::Code(ref res), &CompleteRequest::Code(ref req)) => - prover.check_response(cache, &req.code_hash, &res.code).map(Response::Code)), - CheckedRequest::Execution(ref prover, _) => - expect!((&NetResponse::Execution(ref res), _) => - prover.check_response(cache, &res.items).map(Response::Execution)), - CheckedRequest::Signal(ref prover, _) => - expect!((&NetResponse::Signal(ref res), _) => + CheckedRequest::Account(ref prover, _) => { + expect!((&NetResponse::Account(ref res), _) => + prover.check_response(cache, &res.proof).map(Response::Account)) + } + CheckedRequest::Code(ref prover, _) => { + expect!((&NetResponse::Code(ref res), &CompleteRequest::Code(ref req)) => + prover.check_response(cache, &req.code_hash, &res.code).map(Response::Code)) + } + CheckedRequest::Execution(ref prover, _) => { + expect!((&NetResponse::Execution(ref res), _) => + prover.check_response(cache, &res.items).map(Response::Execution)) + } + CheckedRequest::Signal(ref prover, _) => expect!((&NetResponse::Signal(ref res), _) => prover.check_response(cache, &res.signal).map(Response::Signal)), - } - } + } + } } /// Responses to on-demand requests. /// All of these are checked. pub enum Response { - /// Response to a header proof request. - /// Returns the hash and chain score. - HeaderProof((H256, U256)), - /// Response to a header-by-hash request. - HeaderByHash(encoded::Header), - /// Response to a header-by-hash with ancestors request. - HeaderWithAncestors(Vec), - /// Response to a transaction-index request. - TransactionIndex(net_request::TransactionIndexResponse), - /// Response to a receipts request. - Receipts(Vec), - /// Response to a block body request. - Body(encoded::Block), - /// Response to an Account request. - // TODO: `unwrap_or(engine_defaults)` - Account(Option), - /// Response to a request for code. - Code(Vec), - /// Response to a request for proved execution. - Execution(super::ExecutionResult), - /// Response to a request for epoch change signal. - Signal(Vec), + /// Response to a header proof request. + /// Returns the hash and chain score. + HeaderProof((H256, U256)), + /// Response to a header-by-hash request. + HeaderByHash(encoded::Header), + /// Response to a header-by-hash with ancestors request. + HeaderWithAncestors(Vec), + /// Response to a transaction-index request. + TransactionIndex(net_request::TransactionIndexResponse), + /// Response to a receipts request. + Receipts(Vec), + /// Response to a block body request. + Body(encoded::Block), + /// Response to an Account request. + // TODO: `unwrap_or(engine_defaults)` + Account(Option), + /// Response to a request for code. + Code(Vec), + /// Response to a request for proved execution. + Execution(super::ExecutionResult), + /// Response to a request for epoch change signal. + Signal(Vec), } impl net_request::ResponseLike for Response { - fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { - match *self { - Response::HeaderProof((ref hash, _)) => f(0, Output::Hash(*hash)), - Response::Account(None) => { - f(0, Output::Hash(KECCAK_EMPTY)); // code hash - f(1, Output::Hash(KECCAK_NULL_RLP)); // storage root. - } - Response::Account(Some(ref acc)) => { - f(0, Output::Hash(acc.code_hash)); - f(1, Output::Hash(acc.storage_root)); - } - _ => {} - } - } + fn fill_outputs(&self, mut f: F) + where + F: FnMut(usize, Output), + { + match *self { + Response::HeaderProof((ref hash, _)) => f(0, Output::Hash(*hash)), + Response::Account(None) => { + f(0, Output::Hash(KECCAK_EMPTY)); // code hash + f(1, Output::Hash(KECCAK_NULL_RLP)); // storage root. + } + Response::Account(Some(ref acc)) => { + f(0, Output::Hash(acc.code_hash)); + f(1, Output::Hash(acc.storage_root)); + } + _ => {} + } + } } /// Errors in verification. #[derive(Debug, PartialEq)] pub enum Error { - /// RLP decoder error. - Decoder(::rlp::DecoderError), - /// Empty response. - Empty, - /// Response data length exceeds request max. - TooManyResults(u64, u64), - /// Response data is incomplete. - TooFewResults(u64, u64), - /// Trie lookup error (result of bad proof) - Trie(TrieError), - /// Bad inclusion proof - BadProof, - /// Header by number instead of hash. - HeaderByNumber, - /// Unresolved header reference. - UnresolvedHeader(usize), - /// Wrong header number. - WrongNumber(u64, u64), - /// Wrong hash. - WrongHash(H256, H256), - /// Wrong trie root. - WrongTrieRoot(H256, H256), - /// Wrong response kind. - WrongKind, - /// Wrong sequence of headers. - WrongHeaderSequence, + /// RLP decoder error. + Decoder(::rlp::DecoderError), + /// Empty response. + Empty, + /// Response data length exceeds request max. + TooManyResults(u64, u64), + /// Response data is incomplete. + TooFewResults(u64, u64), + /// Trie lookup error (result of bad proof) + Trie(TrieError), + /// Bad inclusion proof + BadProof, + /// Header by number instead of hash. + HeaderByNumber, + /// Unresolved header reference. + UnresolvedHeader(usize), + /// Wrong header number. + WrongNumber(u64, u64), + /// Wrong hash. + WrongHash(H256, H256), + /// Wrong trie root. + WrongTrieRoot(H256, H256), + /// Wrong response kind. + WrongKind, + /// Wrong sequence of headers. + WrongHeaderSequence, } impl From<::rlp::DecoderError> for Error { - fn from(err: ::rlp::DecoderError) -> Self { - Error::Decoder(err) - } + fn from(err: ::rlp::DecoderError) -> Self { + Error::Decoder(err) + } } impl From> for Error { - fn from(err: Box) -> Self { - Error::Trie(*err) - } + fn from(err: Box) -> Self { + Error::Trie(*err) + } } /// Request for header proof by number #[derive(Debug, Clone, PartialEq, Eq)] pub struct HeaderProof { - /// The header's number. - num: u64, - /// The cht number for the given block number. - cht_num: u64, - /// The root of the CHT containing this header. - cht_root: H256, + /// The header's number. + num: u64, + /// The cht number for the given block number. + cht_num: u64, + /// The root of the CHT containing this header. + cht_root: H256, } impl HeaderProof { - /// Construct a new header-by-number request. Fails if the given number is 0. - /// Provide the expected CHT root to compare against. - pub fn new(num: u64, cht_root: H256) -> Option { - ::cht::block_to_cht_number(num).map(|cht_num| HeaderProof { - num, - cht_num, - cht_root, - }) - } + /// Construct a new header-by-number request. Fails if the given number is 0. + /// Provide the expected CHT root to compare against. + pub fn new(num: u64, cht_root: H256) -> Option { + ::cht::block_to_cht_number(num).map(|cht_num| HeaderProof { + num, + cht_num, + cht_root, + }) + } - /// Access the requested block number. - pub fn num(&self) -> u64 { self.num } + /// Access the requested block number. + pub fn num(&self) -> u64 { + self.num + } - /// Access the CHT number. - pub fn cht_num(&self) -> u64 { self.cht_num } + /// Access the CHT number. + pub fn cht_num(&self) -> u64 { + self.cht_num + } - /// Access the expected CHT root. - pub fn cht_root(&self) -> H256 { self.cht_root } + /// Access the expected CHT root. + pub fn cht_root(&self) -> H256 { + self.cht_root + } - /// Check a response with a CHT proof, get a hash and total difficulty back. - pub fn check_response(&self, cache: &Mutex<::cache::Cache>, proof: &[Bytes]) -> Result<(H256, U256), Error> { - match ::cht::check_proof(proof, self.num, self.cht_root) { - Some((expected_hash, td)) => { - let mut cache = cache.lock(); - cache.insert_block_hash(self.num, expected_hash); - cache.insert_chain_score(expected_hash, td); + /// Check a response with a CHT proof, get a hash and total difficulty back. + pub fn check_response( + &self, + cache: &Mutex<::cache::Cache>, + proof: &[Bytes], + ) -> Result<(H256, U256), Error> { + match ::cht::check_proof(proof, self.num, self.cht_root) { + Some((expected_hash, td)) => { + let mut cache = cache.lock(); + cache.insert_block_hash(self.num, expected_hash); + cache.insert_chain_score(expected_hash, td); - Ok((expected_hash, td)) - } - None => Err(Error::BadProof), - } - } + Ok((expected_hash, td)) + } + None => Err(Error::BadProof), + } + } } /// Request for a header by hash with a range of ancestors. #[derive(Debug, Clone, PartialEq, Eq)] pub struct HeaderWithAncestors { - /// Hash of the last block in the range to fetch. - pub block_hash: Field, - /// Number of headers before the last block to fetch in addition. - pub ancestor_count: u64, + /// Hash of the last block in the range to fetch. + pub block_hash: Field, + /// Number of headers before the last block to fetch in addition. + pub ancestor_count: u64, } impl HeaderWithAncestors { - /// Check a response for the headers. - pub fn check_response( - &self, - cache: &Mutex<::cache::Cache>, - start: &net_request::HashOrNumber, - headers: &[encoded::Header] - ) -> Result, Error> { - let expected_hash = match (self.block_hash, start) { - (Field::Scalar(h), &net_request::HashOrNumber::Hash(h2)) => { - if h != h2 { return Err(Error::WrongHash(h, h2)) } - h - } - (_, &net_request::HashOrNumber::Hash(h2)) => h2, - _ => return Err(Error::HeaderByNumber), - }; + /// Check a response for the headers. + pub fn check_response( + &self, + cache: &Mutex<::cache::Cache>, + start: &net_request::HashOrNumber, + headers: &[encoded::Header], + ) -> Result, Error> { + let expected_hash = match (self.block_hash, start) { + (Field::Scalar(h), &net_request::HashOrNumber::Hash(h2)) => { + if h != h2 { + return Err(Error::WrongHash(h, h2)); + } + h + } + (_, &net_request::HashOrNumber::Hash(h2)) => h2, + _ => return Err(Error::HeaderByNumber), + }; - let start_header = headers.first().ok_or(Error::Empty)?; - let start_hash = start_header.hash(); - if start_hash != expected_hash { - return Err(Error::WrongHash(expected_hash, start_hash)); - } + let start_header = headers.first().ok_or(Error::Empty)?; + let start_hash = start_header.hash(); + if start_hash != expected_hash { + return Err(Error::WrongHash(expected_hash, start_hash)); + } - let expected_len = 1 + cmp::min(self.ancestor_count, start_header.number()); - let actual_len = headers.len() as u64; - match actual_len.cmp(&expected_len) { - cmp::Ordering::Less => - return Err(Error::TooFewResults(expected_len, actual_len)), - cmp::Ordering::Greater => - return Err(Error::TooManyResults(expected_len, actual_len)), - cmp::Ordering::Equal => (), - }; + let expected_len = 1 + cmp::min(self.ancestor_count, start_header.number()); + let actual_len = headers.len() as u64; + match actual_len.cmp(&expected_len) { + cmp::Ordering::Less => return Err(Error::TooFewResults(expected_len, actual_len)), + cmp::Ordering::Greater => return Err(Error::TooManyResults(expected_len, actual_len)), + cmp::Ordering::Equal => (), + }; - for (header, prev_header) in headers.iter().zip(headers[1..].iter()) { - if header.number() != prev_header.number() + 1 || - header.parent_hash() != prev_header.hash() - { - return Err(Error::WrongHeaderSequence) - } - } + for (header, prev_header) in headers.iter().zip(headers[1..].iter()) { + if header.number() != prev_header.number() + 1 + || header.parent_hash() != prev_header.hash() + { + return Err(Error::WrongHeaderSequence); + } + } - let mut cache = cache.lock(); - for header in headers { - cache.insert_block_header(header.hash(), header.clone()); - } + let mut cache = cache.lock(); + for header in headers { + cache.insert_block_header(header.hash(), header.clone()); + } - Ok(headers.to_vec()) - } + Ok(headers.to_vec()) + } } /// Request for a header by hash. @@ -862,31 +922,33 @@ impl HeaderWithAncestors { pub struct HeaderByHash(pub Field); impl HeaderByHash { - /// Check a response for the header. - pub fn check_response( - &self, - cache: &Mutex<::cache::Cache>, - start: &net_request::HashOrNumber, - headers: &[encoded::Header] - ) -> Result { - let expected_hash = match (self.0, start) { - (Field::Scalar(h), &net_request::HashOrNumber::Hash(h2)) => { - if h != h2 { return Err(Error::WrongHash(h, h2)) } - h - } - (_, &net_request::HashOrNumber::Hash(h2)) => h2, - _ => return Err(Error::HeaderByNumber), - }; + /// Check a response for the header. + pub fn check_response( + &self, + cache: &Mutex<::cache::Cache>, + start: &net_request::HashOrNumber, + headers: &[encoded::Header], + ) -> Result { + let expected_hash = match (self.0, start) { + (Field::Scalar(h), &net_request::HashOrNumber::Hash(h2)) => { + if h != h2 { + return Err(Error::WrongHash(h, h2)); + } + h + } + (_, &net_request::HashOrNumber::Hash(h2)) => h2, + _ => return Err(Error::HeaderByNumber), + }; - let header = headers.get(0).ok_or(Error::Empty)?; - let hash = header.hash(); - if hash == expected_hash { - cache.lock().insert_block_header(hash, header.clone()); - Ok(header.clone()) - } else { - Err(Error::WrongHash(expected_hash, hash)) - } - } + let header = headers.get(0).ok_or(Error::Empty)?; + let hash = header.hash(); + if hash == expected_hash { + cache.lock().insert_block_header(hash, header.clone()); + Ok(header.clone()) + } else { + Err(Error::WrongHash(expected_hash, hash)) + } + } } /// Request for a transaction index. @@ -894,26 +956,26 @@ impl HeaderByHash { pub struct TransactionIndex(pub Field); impl TransactionIndex { - /// Check a response for the transaction index. - // - // TODO: proper checking involves looking at canonicality of the - // hash w.r.t. the current best block header. - // - // unlike all other forms of request, we don't know the header to check - // until we make this request. - // - // This would require lookups in the database or perhaps CHT requests, - // which aren't currently possible. - // - // Also, returning a result that is not locally canonical doesn't necessarily - // indicate misbehavior, so the punishment scheme would need to be revised. - pub fn check_response( - &self, - _cache: &Mutex<::cache::Cache>, - res: &net_request::TransactionIndexResponse, - ) -> Result { - Ok(res.clone()) - } + /// Check a response for the transaction index. + // + // TODO: proper checking involves looking at canonicality of the + // hash w.r.t. the current best block header. + // + // unlike all other forms of request, we don't know the header to check + // until we make this request. + // + // This would require lookups in the database or perhaps CHT requests, + // which aren't currently possible. + // + // Also, returning a result that is not locally canonical doesn't necessarily + // indicate misbehavior, so the punishment scheme would need to be revised. + pub fn check_response( + &self, + _cache: &Mutex<::cache::Cache>, + res: &net_request::TransactionIndexResponse, + ) -> Result { + Ok(res.clone()) + } } /// Request for a block, with header for verification. @@ -921,28 +983,33 @@ impl TransactionIndex { pub struct Body(pub HeaderRef); impl Body { - /// Check a response for this block body. - pub fn check_response(&self, cache: &Mutex<::cache::Cache>, body: &encoded::Body) -> Result { - // check the integrity of the the body against the header - let header = self.0.as_ref()?; - let tx_root = ::triehash::ordered_trie_root(body.transactions_rlp().iter().map(|r| r.as_raw())); - if tx_root != header.transactions_root() { - trace!(target: "on_demand", "Body Response: \"WrongTrieRoot\" tx_root: {:?} header_root: {:?}", tx_root, header.transactions_root()); - return Err(Error::WrongTrieRoot(header.transactions_root(), tx_root)); - } + /// Check a response for this block body. + pub fn check_response( + &self, + cache: &Mutex<::cache::Cache>, + body: &encoded::Body, + ) -> Result { + // check the integrity of the the body against the header + let header = self.0.as_ref()?; + let tx_root = + ::triehash::ordered_trie_root(body.transactions_rlp().iter().map(|r| r.as_raw())); + if tx_root != header.transactions_root() { + trace!(target: "on_demand", "Body Response: \"WrongTrieRoot\" tx_root: {:?} header_root: {:?}", tx_root, header.transactions_root()); + return Err(Error::WrongTrieRoot(header.transactions_root(), tx_root)); + } - let uncles_hash = keccak(body.uncles_rlp().as_raw()); - if uncles_hash != header.uncles_hash() { - trace!(target: "on_demand", "Body Response: \"WrongHash\" tx_root: {:?} header_root: {:?}", uncles_hash, header.uncles_hash()); - return Err(Error::WrongHash(header.uncles_hash(), uncles_hash)); - } + let uncles_hash = keccak(body.uncles_rlp().as_raw()); + if uncles_hash != header.uncles_hash() { + trace!(target: "on_demand", "Body Response: \"WrongHash\" tx_root: {:?} header_root: {:?}", uncles_hash, header.uncles_hash()); + return Err(Error::WrongHash(header.uncles_hash(), uncles_hash)); + } - // concatenate the header and the body. - let block = encoded::Block::new_from_header_and_body(&header.view(), &body.view()); + // concatenate the header and the body. + let block = encoded::Block::new_from_header_and_body(&header.view(), &body.view()); - cache.lock().insert_block_body(header.hash(), body.clone()); - Ok(block) - } + cache.lock().insert_block_body(header.hash(), body.clone()); + Ok(block) + } } /// Request for a block's receipts with header for verification. @@ -950,392 +1017,471 @@ impl Body { pub struct BlockReceipts(pub HeaderRef); impl BlockReceipts { - /// Check a response with receipts against the stored header. - pub fn check_response(&self, cache: &Mutex<::cache::Cache>, receipts: &[Receipt]) -> Result, Error> { - let receipts_root = self.0.as_ref()?.receipts_root(); - let found_root = ::triehash::ordered_trie_root(receipts.iter().map(|r| ::rlp::encode(r))); + /// Check a response with receipts against the stored header. + pub fn check_response( + &self, + cache: &Mutex<::cache::Cache>, + receipts: &[Receipt], + ) -> Result, Error> { + let receipts_root = self.0.as_ref()?.receipts_root(); + let found_root = ::triehash::ordered_trie_root(receipts.iter().map(|r| ::rlp::encode(r))); - if receipts_root == found_root { - cache.lock().insert_block_receipts(receipts_root, receipts.to_vec()); - Ok(receipts.to_vec()) - } else { - trace!(target: "on_demand", "Receipt Reponse: \"WrongTrieRoot\" receipts_root: {:?} found_root: {:?}", receipts_root, found_root); - Err(Error::WrongTrieRoot(receipts_root, found_root)) - } - } + if receipts_root == found_root { + cache + .lock() + .insert_block_receipts(receipts_root, receipts.to_vec()); + Ok(receipts.to_vec()) + } else { + trace!(target: "on_demand", "Receipt Reponse: \"WrongTrieRoot\" receipts_root: {:?} found_root: {:?}", receipts_root, found_root); + Err(Error::WrongTrieRoot(receipts_root, found_root)) + } + } } /// Request for an account structure. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Account { - /// Header for verification. - pub header: HeaderRef, - /// Address requested. - pub address: Address, + /// Header for verification. + pub header: HeaderRef, + /// Address requested. + pub address: Address, } impl Account { - /// Check a response with an account against the stored header. - pub fn check_response(&self, _: &Mutex<::cache::Cache>, proof: &[Bytes]) -> Result, Error> { - let header = self.header.as_ref()?; - let state_root = header.state_root(); + /// Check a response with an account against the stored header. + pub fn check_response( + &self, + _: &Mutex<::cache::Cache>, + proof: &[Bytes], + ) -> Result, Error> { + let header = self.header.as_ref()?; + let state_root = header.state_root(); - let mut db = journaldb::new_memory_db(); - for node in proof { db.insert(&node[..]); } + let mut db = journaldb::new_memory_db(); + for node in proof { + db.insert(&node[..]); + } - match TrieDB::new(&db, &state_root).and_then(|t| t.get(&keccak(&self.address)))? { - Some(val) => { - let rlp = Rlp::new(&val); - Ok(Some(BasicAccount { - nonce: rlp.val_at(0)?, - balance: rlp.val_at(1)?, - storage_root: rlp.val_at(2)?, - code_hash: rlp.val_at(3)?, - })) - }, - None => { - trace!(target: "on_demand", "Account {:?} not found", self.address); - Ok(None) - } - } - } + match TrieDB::new(&db, &state_root).and_then(|t| t.get(&keccak(&self.address)))? { + Some(val) => { + let rlp = Rlp::new(&val); + Ok(Some(BasicAccount { + nonce: rlp.val_at(0)?, + balance: rlp.val_at(1)?, + storage_root: rlp.val_at(2)?, + code_hash: rlp.val_at(3)?, + })) + } + None => { + trace!(target: "on_demand", "Account {:?} not found", self.address); + Ok(None) + } + } + } } /// Request for account code. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Code { - /// Header reference. - pub header: HeaderRef, - /// Account's code hash. - pub code_hash: Field, + /// Header reference. + pub header: HeaderRef, + /// Account's code hash. + pub code_hash: Field, } impl Code { - /// Check a response with code against the code hash. - pub fn check_response( - &self, - _: &Mutex<::cache::Cache>, - code_hash: &H256, - code: &[u8] - ) -> Result, Error> { - let found_hash = keccak(code); - if &found_hash == code_hash { - Ok(code.to_vec()) - } else { - Err(Error::WrongHash(*code_hash, found_hash)) - } - } + /// Check a response with code against the code hash. + pub fn check_response( + &self, + _: &Mutex<::cache::Cache>, + code_hash: &H256, + code: &[u8], + ) -> Result, Error> { + let found_hash = keccak(code); + if &found_hash == code_hash { + Ok(code.to_vec()) + } else { + Err(Error::WrongHash(*code_hash, found_hash)) + } + } } /// Request for transaction execution, along with the parts necessary to verify the proof. #[derive(Clone)] pub struct TransactionProof { - /// The transaction to request proof of. - pub tx: SignedTransaction, - /// Block header. - pub header: HeaderRef, - /// Transaction environment info. - // TODO: it's not really possible to provide this if the header is unknown. - pub env_info: EnvInfo, - /// Consensus engine. - pub engine: Arc, + /// The transaction to request proof of. + pub tx: SignedTransaction, + /// Block header. + pub header: HeaderRef, + /// Transaction environment info. + // TODO: it's not really possible to provide this if the header is unknown. + pub env_info: EnvInfo, + /// Consensus engine. + pub engine: Arc, } impl TransactionProof { - /// Check the proof, returning the proved execution or indicate that the proof was bad. - pub fn check_response(&self, _: &Mutex<::cache::Cache>, state_items: &[DBValue]) -> Result { - let root = self.header.as_ref()?.state_root(); + /// Check the proof, returning the proved execution or indicate that the proof was bad. + pub fn check_response( + &self, + _: &Mutex<::cache::Cache>, + state_items: &[DBValue], + ) -> Result { + let root = self.header.as_ref()?.state_root(); - let mut env_info = self.env_info.clone(); - env_info.gas_limit = self.tx.gas; + let mut env_info = self.env_info.clone(); + env_info.gas_limit = self.tx.gas; - let proved_execution = state::check_proof( - state_items, - root, - &self.tx, - self.engine.machine(), - &self.env_info, - ); + let proved_execution = state::check_proof( + state_items, + root, + &self.tx, + self.engine.machine(), + &self.env_info, + ); - match proved_execution { - ProvedExecution::BadProof => { - trace!(target: "on_demand", "BadExecution Proof"); - Err(Error::BadProof) - } - ProvedExecution::Failed(e) => { - trace!(target: "on_demand", "Execution Proof failed: {:?}", e); - Ok(Err(e)) - } - ProvedExecution::Complete(e) => { - trace!(target: "on_demand", "Execution successful: {:?}", e); - Ok(Ok(e)) - } - } - } + match proved_execution { + ProvedExecution::BadProof => { + trace!(target: "on_demand", "BadExecution Proof"); + Err(Error::BadProof) + } + ProvedExecution::Failed(e) => { + trace!(target: "on_demand", "Execution Proof failed: {:?}", e); + Ok(Err(e)) + } + ProvedExecution::Complete(e) => { + trace!(target: "on_demand", "Execution successful: {:?}", e); + Ok(Ok(e)) + } + } + } } /// Request for epoch signal. /// Provide engine and state-dependent proof checker. #[derive(Clone)] pub struct Signal { - /// Block hash and number to fetch proof for. - pub hash: H256, - /// Consensus engine, used to check the proof. - pub engine: Arc, - /// Special checker for the proof. - pub proof_check: Arc>, + /// Block hash and number to fetch proof for. + pub hash: H256, + /// Consensus engine, used to check the proof. + pub engine: Arc, + /// Special checker for the proof. + pub proof_check: Arc>, } impl Signal { - /// Check the signal, returning the signal or indicate that it's bad. - pub fn check_response(&self, _: &Mutex<::cache::Cache>, signal: &[u8]) -> Result, Error> { - self.proof_check.check_proof(self.engine.machine(), signal) - .map(|_| signal.to_owned()) - .map_err(|_| Error::BadProof) - } + /// Check the signal, returning the signal or indicate that it's bad. + pub fn check_response( + &self, + _: &Mutex<::cache::Cache>, + signal: &[u8], + ) -> Result, Error> { + self.proof_check + .check_proof(self.engine.machine(), signal) + .map(|_| signal.to_owned()) + .map_err(|_| Error::BadProof) + } } #[cfg(test)] mod tests { - use super::*; - use std::time::Duration; - use ethereum_types::{H256, Address}; - use parking_lot::Mutex; - use trie::{Trie, TrieMut}; - use ethtrie::{SecTrieDB, SecTrieDBMut}; - use trie::Recorder; - use hash::keccak; + use super::*; + use ethereum_types::{Address, H256}; + use ethtrie::{SecTrieDB, SecTrieDBMut}; + use hash::keccak; + use parking_lot::Mutex; + use std::time::Duration; + use trie::{Recorder, Trie, TrieMut}; - use ethcore::client::{BlockChainClient, BlockInfo, TestBlockChainClient, EachBlockWith}; - use common_types::header::Header; - use common_types::encoded; - use common_types::receipt::{Receipt, TransactionOutcome}; + use common_types::{ + encoded, + header::Header, + receipt::{Receipt, TransactionOutcome}, + }; + use ethcore::client::{BlockChainClient, BlockInfo, EachBlockWith, TestBlockChainClient}; - fn make_cache() -> ::cache::Cache { - ::cache::Cache::new(Default::default(), Duration::from_secs(1)) - } + fn make_cache() -> ::cache::Cache { + ::cache::Cache::new(Default::default(), Duration::from_secs(1)) + } - #[test] - fn no_invalid_header_by_number() { - assert!(HeaderProof::new(0, Default::default()).is_none()) - } + #[test] + fn no_invalid_header_by_number() { + assert!(HeaderProof::new(0, Default::default()).is_none()) + } - #[test] - fn check_header_proof() { - use ::cht; + #[test] + fn check_header_proof() { + use cht; - let test_client = TestBlockChainClient::new(); - test_client.add_blocks(10500, EachBlockWith::Nothing); + let test_client = TestBlockChainClient::new(); + test_client.add_blocks(10500, EachBlockWith::Nothing); - let cht = { - let fetcher = |id| { - let hdr = test_client.block_header(id).unwrap(); - let td = test_client.block_total_difficulty(id).unwrap(); - Some(cht::BlockInfo { - hash: hdr.hash(), - parent_hash: hdr.parent_hash(), - total_difficulty: td, - }) - }; + let cht = { + let fetcher = |id| { + let hdr = test_client.block_header(id).unwrap(); + let td = test_client.block_total_difficulty(id).unwrap(); + Some(cht::BlockInfo { + hash: hdr.hash(), + parent_hash: hdr.parent_hash(), + total_difficulty: td, + }) + }; - cht::build(cht::block_to_cht_number(10_000).unwrap(), fetcher).unwrap() - }; + cht::build(cht::block_to_cht_number(10_000).unwrap(), fetcher).unwrap() + }; - let proof = cht.prove(10_000, 0).unwrap().unwrap(); - let req = HeaderProof::new(10_000, cht.root()).unwrap(); + let proof = cht.prove(10_000, 0).unwrap().unwrap(); + let req = HeaderProof::new(10_000, cht.root()).unwrap(); - let cache = Mutex::new(make_cache()); - assert!(req.check_response(&cache, &proof[..]).is_ok()); - } + let cache = Mutex::new(make_cache()); + assert!(req.check_response(&cache, &proof[..]).is_ok()); + } - #[test] - fn check_header_by_hash() { - let mut header = Header::new(); - header.set_number(10_000); - header.set_extra_data(b"test_header".to_vec()); - let hash = header.hash(); - let raw_header = encoded::Header::new(::rlp::encode(&header)); + #[test] + fn check_header_by_hash() { + let mut header = Header::new(); + header.set_number(10_000); + header.set_extra_data(b"test_header".to_vec()); + let hash = header.hash(); + let raw_header = encoded::Header::new(::rlp::encode(&header)); - let cache = Mutex::new(make_cache()); - assert!(HeaderByHash(hash.into()).check_response(&cache, &hash.into(), &[raw_header]).is_ok()) - } + let cache = Mutex::new(make_cache()); + assert!(HeaderByHash(hash.into()) + .check_response(&cache, &hash.into(), &[raw_header]) + .is_ok()) + } - #[test] - fn check_header_with_ancestors() { - let mut last_header_hash = H256::default(); - let mut headers = (0..11).map(|num| { - let mut header = Header::new(); - header.set_number(num); - header.set_parent_hash(last_header_hash); + #[test] + fn check_header_with_ancestors() { + let mut last_header_hash = H256::default(); + let mut headers = (0..11) + .map(|num| { + let mut header = Header::new(); + header.set_number(num); + header.set_parent_hash(last_header_hash); - last_header_hash = header.hash(); - header - }).collect::>(); + last_header_hash = header.hash(); + header + }) + .collect::>(); - headers.reverse(); // because responses are in reverse order + headers.reverse(); // because responses are in reverse order - let raw_headers = headers.iter() - .map(|hdr| encoded::Header::new(::rlp::encode(hdr))) - .collect::>(); + let raw_headers = headers + .iter() + .map(|hdr| encoded::Header::new(::rlp::encode(hdr))) + .collect::>(); - let mut invalid_successor = Header::new(); - invalid_successor.set_number(11); - invalid_successor.set_parent_hash(headers[1].hash()); + let mut invalid_successor = Header::new(); + invalid_successor.set_number(11); + invalid_successor.set_parent_hash(headers[1].hash()); - let raw_invalid_successor = encoded::Header::new(::rlp::encode(&invalid_successor)); + let raw_invalid_successor = encoded::Header::new(::rlp::encode(&invalid_successor)); - let cache = Mutex::new(make_cache()); + let cache = Mutex::new(make_cache()); - let header_with_ancestors = |hash, count| { - HeaderWithAncestors { - block_hash: hash, - ancestor_count: count - } - }; + let header_with_ancestors = |hash, count| HeaderWithAncestors { + block_hash: hash, + ancestor_count: count, + }; - // Correct responses - assert!(header_with_ancestors(headers[0].hash().into(), 0) - .check_response(&cache, &headers[0].hash().into(), &raw_headers[0..1]).is_ok()); - assert!(header_with_ancestors(headers[0].hash().into(), 2) - .check_response(&cache, &headers[0].hash().into(), &raw_headers[0..3]).is_ok()); - assert!(header_with_ancestors(headers[0].hash().into(), 10) - .check_response(&cache, &headers[0].hash().into(), &raw_headers[0..11]).is_ok()); - assert!(header_with_ancestors(headers[2].hash().into(), 2) - .check_response(&cache, &headers[2].hash().into(), &raw_headers[2..5]).is_ok()); - assert!(header_with_ancestors(headers[2].hash().into(), 10) - .check_response(&cache, &headers[2].hash().into(), &raw_headers[2..11]).is_ok()); - assert!(header_with_ancestors(invalid_successor.hash().into(), 0) - .check_response(&cache, &invalid_successor.hash().into(), &[raw_invalid_successor.clone()]).is_ok()); + // Correct responses + assert!(header_with_ancestors(headers[0].hash().into(), 0) + .check_response(&cache, &headers[0].hash().into(), &raw_headers[0..1]) + .is_ok()); + assert!(header_with_ancestors(headers[0].hash().into(), 2) + .check_response(&cache, &headers[0].hash().into(), &raw_headers[0..3]) + .is_ok()); + assert!(header_with_ancestors(headers[0].hash().into(), 10) + .check_response(&cache, &headers[0].hash().into(), &raw_headers[0..11]) + .is_ok()); + assert!(header_with_ancestors(headers[2].hash().into(), 2) + .check_response(&cache, &headers[2].hash().into(), &raw_headers[2..5]) + .is_ok()); + assert!(header_with_ancestors(headers[2].hash().into(), 10) + .check_response(&cache, &headers[2].hash().into(), &raw_headers[2..11]) + .is_ok()); + assert!(header_with_ancestors(invalid_successor.hash().into(), 0) + .check_response( + &cache, + &invalid_successor.hash().into(), + &[raw_invalid_successor.clone()] + ) + .is_ok()); - // Incorrect responses - assert_eq!(header_with_ancestors(invalid_successor.hash().into(), 0) - .check_response(&cache, &headers[0].hash().into(), &raw_headers[0..1]), - Err(Error::WrongHash(invalid_successor.hash(), headers[0].hash()))); - assert_eq!(header_with_ancestors(headers[0].hash().into(), 0) - .check_response(&cache, &headers[0].hash().into(), &[]), - Err(Error::Empty)); - assert_eq!(header_with_ancestors(headers[0].hash().into(), 10) - .check_response(&cache, &headers[0].hash().into(), &raw_headers[0..10]), - Err(Error::TooFewResults(11, 10))); - assert_eq!(header_with_ancestors(headers[0].hash().into(), 9) - .check_response(&cache, &headers[0].hash().into(), &raw_headers[0..11]), - Err(Error::TooManyResults(10, 11))); + // Incorrect responses + assert_eq!( + header_with_ancestors(invalid_successor.hash().into(), 0).check_response( + &cache, + &headers[0].hash().into(), + &raw_headers[0..1] + ), + Err(Error::WrongHash( + invalid_successor.hash(), + headers[0].hash() + )) + ); + assert_eq!( + header_with_ancestors(headers[0].hash().into(), 0).check_response( + &cache, + &headers[0].hash().into(), + &[] + ), + Err(Error::Empty) + ); + assert_eq!( + header_with_ancestors(headers[0].hash().into(), 10).check_response( + &cache, + &headers[0].hash().into(), + &raw_headers[0..10] + ), + Err(Error::TooFewResults(11, 10)) + ); + assert_eq!( + header_with_ancestors(headers[0].hash().into(), 9).check_response( + &cache, + &headers[0].hash().into(), + &raw_headers[0..11] + ), + Err(Error::TooManyResults(10, 11)) + ); - let response = &[raw_headers[0].clone(), raw_headers[2].clone()]; - assert_eq!(header_with_ancestors(headers[0].hash().into(), 1) - .check_response(&cache, &headers[0].hash().into(), response), - Err(Error::WrongHeaderSequence)); + let response = &[raw_headers[0].clone(), raw_headers[2].clone()]; + assert_eq!( + header_with_ancestors(headers[0].hash().into(), 1).check_response( + &cache, + &headers[0].hash().into(), + response + ), + Err(Error::WrongHeaderSequence) + ); - let response = &[raw_invalid_successor.clone(), raw_headers[0].clone()]; - assert_eq!(header_with_ancestors(invalid_successor.hash().into(), 1) - .check_response(&cache, &invalid_successor.hash().into(), response), - Err(Error::WrongHeaderSequence)); + let response = &[raw_invalid_successor.clone(), raw_headers[0].clone()]; + assert_eq!( + header_with_ancestors(invalid_successor.hash().into(), 1).check_response( + &cache, + &invalid_successor.hash().into(), + response + ), + Err(Error::WrongHeaderSequence) + ); - let response = &[raw_invalid_successor.clone(), raw_headers[1].clone()]; - assert_eq!(header_with_ancestors(invalid_successor.hash().into(), 1) - .check_response(&cache, &invalid_successor.hash().into(), response), - Err(Error::WrongHeaderSequence)); - } + let response = &[raw_invalid_successor.clone(), raw_headers[1].clone()]; + assert_eq!( + header_with_ancestors(invalid_successor.hash().into(), 1).check_response( + &cache, + &invalid_successor.hash().into(), + response + ), + Err(Error::WrongHeaderSequence) + ); + } - #[test] - fn check_body() { - use rlp::RlpStream; + #[test] + fn check_body() { + use rlp::RlpStream; - let header = Header::new(); - let mut body_stream = RlpStream::new_list(2); - body_stream.begin_list(0).begin_list(0); + let header = Header::new(); + let mut body_stream = RlpStream::new_list(2); + body_stream.begin_list(0).begin_list(0); - let req = Body(encoded::Header::new(::rlp::encode(&header)).into()); + let req = Body(encoded::Header::new(::rlp::encode(&header)).into()); - let cache = Mutex::new(make_cache()); - let response = encoded::Body::new(body_stream.drain()); - assert!(req.check_response(&cache, &response).is_ok()) - } + let cache = Mutex::new(make_cache()); + let response = encoded::Body::new(body_stream.drain()); + assert!(req.check_response(&cache, &response).is_ok()) + } - #[test] - fn check_receipts() { - let receipts = (0..5).map(|_| Receipt { - outcome: TransactionOutcome::StateRoot(H256::random()), - gas_used: 21_000u64.into(), - log_bloom: Default::default(), - logs: Vec::new(), - }).collect::>(); + #[test] + fn check_receipts() { + let receipts = (0..5) + .map(|_| Receipt { + outcome: TransactionOutcome::StateRoot(H256::random()), + gas_used: 21_000u64.into(), + log_bloom: Default::default(), + logs: Vec::new(), + }) + .collect::>(); - let mut header = Header::new(); - let receipts_root = ::triehash::ordered_trie_root( - receipts.iter().map(|x| ::rlp::encode(x)) - ); + let mut header = Header::new(); + let receipts_root = + ::triehash::ordered_trie_root(receipts.iter().map(|x| ::rlp::encode(x))); - header.set_receipts_root(receipts_root); + header.set_receipts_root(receipts_root); - let req = BlockReceipts(encoded::Header::new(::rlp::encode(&header)).into()); + let req = BlockReceipts(encoded::Header::new(::rlp::encode(&header)).into()); - let cache = Mutex::new(make_cache()); - assert!(req.check_response(&cache, &receipts).is_ok()) - } + let cache = Mutex::new(make_cache()); + assert!(req.check_response(&cache, &receipts).is_ok()) + } - #[test] - fn check_state_proof() { - use rlp::RlpStream; + #[test] + fn check_state_proof() { + use rlp::RlpStream; - let mut root = H256::default(); - let mut db = journaldb::new_memory_db(); - let mut header = Header::new(); - header.set_number(123_456); - header.set_extra_data(b"test_header".to_vec()); + let mut root = H256::default(); + let mut db = journaldb::new_memory_db(); + let mut header = Header::new(); + header.set_number(123_456); + header.set_extra_data(b"test_header".to_vec()); - let addr = Address::random(); - let rand_acc = || { - let mut stream = RlpStream::new_list(4); - stream.append(&2u64) - .append(&100_000_000u64) - .append(&H256::random()) - .append(&H256::random()); + let addr = Address::random(); + let rand_acc = || { + let mut stream = RlpStream::new_list(4); + stream + .append(&2u64) + .append(&100_000_000u64) + .append(&H256::random()) + .append(&H256::random()); - stream.out() - }; - { - let mut trie = SecTrieDBMut::new(&mut db, &mut root); - for _ in 0..100 { - let address = Address::random(); - trie.insert(&*address, &rand_acc()).unwrap(); - } + stream.out() + }; + { + let mut trie = SecTrieDBMut::new(&mut db, &mut root); + for _ in 0..100 { + let address = Address::random(); + trie.insert(&*address, &rand_acc()).unwrap(); + } - trie.insert(&*addr, &rand_acc()).unwrap(); - } + trie.insert(&*addr, &rand_acc()).unwrap(); + } - let proof = { - let trie = SecTrieDB::new(&db, &root).unwrap(); - let mut recorder = Recorder::new(); + let proof = { + let trie = SecTrieDB::new(&db, &root).unwrap(); + let mut recorder = Recorder::new(); - trie.get_with(&*addr, &mut recorder).unwrap().unwrap(); + trie.get_with(&*addr, &mut recorder).unwrap().unwrap(); - recorder.drain().into_iter().map(|r| r.data).collect::>() - }; + recorder + .drain() + .into_iter() + .map(|r| r.data) + .collect::>() + }; - header.set_state_root(root.clone()); + header.set_state_root(root.clone()); - let req = Account { - header: encoded::Header::new(::rlp::encode(&header)).into(), - address: addr, - }; + let req = Account { + header: encoded::Header::new(::rlp::encode(&header)).into(), + address: addr, + }; - let cache = Mutex::new(make_cache()); - assert!(req.check_response(&cache, &proof[..]).is_ok()); - } + let cache = Mutex::new(make_cache()); + assert!(req.check_response(&cache, &proof[..]).is_ok()); + } - #[test] - fn check_code() { - let code = vec![1u8; 256]; - let code_hash = keccak(&code); - let header = Header::new(); - let req = Code { - header: encoded::Header::new(::rlp::encode(&header)).into(), - code_hash: code_hash.into(), - }; + #[test] + fn check_code() { + let code = vec![1u8; 256]; + let code_hash = keccak(&code); + let header = Header::new(); + let req = Code { + header: encoded::Header::new(::rlp::encode(&header)).into(), + code_hash: code_hash.into(), + }; - let cache = Mutex::new(make_cache()); - assert!(req.check_response(&cache, &code_hash, &code).is_ok()); - assert!(req.check_response(&cache, &code_hash, &[]).is_err()); - } + let cache = Mutex::new(make_cache()); + assert!(req.check_response(&cache, &code_hash, &code).is_ok()); + assert!(req.check_response(&cache, &code_hash, &[]).is_err()); + } } diff --git a/ethcore/light/src/on_demand/request_guard.rs b/ethcore/light/src/on_demand/request_guard.rs index 1c67ab0c8..6dbdb6f24 100644 --- a/ethcore/light/src/on_demand/request_guard.rs +++ b/ethcore/light/src/on_demand/request_guard.rs @@ -22,102 +22,112 @@ type RequestPolicy = failsafe::failure_policy::ConsecutiveFailures, + backoff_round: usize, + max_backoff_rounds: usize, + state: failsafe::StateMachine, } impl RequestGuard { - /// Constructor - pub fn new( - consecutive_failures: u32, - max_backoff_rounds: usize, - start_backoff: Duration, - max_backoff: Duration, - ) -> Self { - let backoff = failsafe::backoff::exponential(start_backoff, max_backoff); - // success_rate not used because only errors are registered - let policy = failsafe::failure_policy::consecutive_failures(consecutive_failures as u32, backoff); + /// Constructor + pub fn new( + consecutive_failures: u32, + max_backoff_rounds: usize, + start_backoff: Duration, + max_backoff: Duration, + ) -> Self { + let backoff = failsafe::backoff::exponential(start_backoff, max_backoff); + // success_rate not used because only errors are registered + let policy = + failsafe::failure_policy::consecutive_failures(consecutive_failures as u32, backoff); - Self { - backoff_round: 0, - max_backoff_rounds, - state: failsafe::StateMachine::new(policy, ()), - } - } + Self { + backoff_round: 0, + max_backoff_rounds, + state: failsafe::StateMachine::new(policy, ()), + } + } - /// Update the state after a `faulty` call - pub fn register_error(&mut self) -> Error { - trace!(target: "circuit_breaker", "RequestGuard; backoff_round: {}/{}, state {:?}", + /// Update the state after a `faulty` call + pub fn register_error(&mut self) -> Error { + trace!(target: "circuit_breaker", "RequestGuard; backoff_round: {}/{}, state {:?}", self.backoff_round, self.max_backoff_rounds, self.state); - if self.backoff_round >= self.max_backoff_rounds { - Error::ReachedLimit - } else if self.state.is_call_permitted() { - self.state.on_error(); - if self.state.is_call_permitted() { - Error::LetThrough - } else { - self.backoff_round += 1; - Error::Rejected - } - } else { - Error::Rejected - } - } + if self.backoff_round >= self.max_backoff_rounds { + Error::ReachedLimit + } else if self.state.is_call_permitted() { + self.state.on_error(); + if self.state.is_call_permitted() { + Error::LetThrough + } else { + self.backoff_round += 1; + Error::Rejected + } + } else { + Error::Rejected + } + } - /// Poll the circuit breaker, to check if the call is permitted - pub fn is_call_permitted(&self) -> bool { - self.state.is_call_permitted() - } + /// Poll the circuit breaker, to check if the call is permitted + pub fn is_call_permitted(&self) -> bool { + self.state.is_call_permitted() + } } #[cfg(test)] mod tests { - use std::iter; - use std::time::Instant; - use super::*; + use super::*; + use std::{iter, time::Instant}; - #[test] - fn one_consecutive_failure_with_10_backoffs() { - // 1, 2, 4, 5, 5 .... 5 - let binary_exp_backoff = vec![1_u64, 2, 4].into_iter().chain(iter::repeat(5_u64).take(7)); - let mut guard = RequestGuard::new(1, 10, Duration::from_secs(1), Duration::from_secs(5)); - for backoff in binary_exp_backoff { - assert_eq!(guard.register_error(), Error::Rejected); - let now = Instant::now(); - while now.elapsed() <= Duration::from_secs(backoff) {} - } - assert_eq!(guard.register_error(), Error::ReachedLimit, "10 backoffs should be error"); - } + #[test] + fn one_consecutive_failure_with_10_backoffs() { + // 1, 2, 4, 5, 5 .... 5 + let binary_exp_backoff = vec![1_u64, 2, 4] + .into_iter() + .chain(iter::repeat(5_u64).take(7)); + let mut guard = RequestGuard::new(1, 10, Duration::from_secs(1), Duration::from_secs(5)); + for backoff in binary_exp_backoff { + assert_eq!(guard.register_error(), Error::Rejected); + let now = Instant::now(); + while now.elapsed() <= Duration::from_secs(backoff) {} + } + assert_eq!( + guard.register_error(), + Error::ReachedLimit, + "10 backoffs should be error" + ); + } - #[test] - fn five_consecutive_failures_with_3_backoffs() { - let mut guard = RequestGuard::new(5, 3, Duration::from_secs(1), Duration::from_secs(30)); + #[test] + fn five_consecutive_failures_with_3_backoffs() { + let mut guard = RequestGuard::new(5, 3, Duration::from_secs(1), Duration::from_secs(30)); - // register five errors - for _ in 0..4 { - assert_eq!(guard.register_error(), Error::LetThrough); - } + // register five errors + for _ in 0..4 { + assert_eq!(guard.register_error(), Error::LetThrough); + } - let binary_exp_backoff = [1, 2, 4]; - for backoff in &binary_exp_backoff { - assert_eq!(guard.register_error(), Error::Rejected); - let now = Instant::now(); - while now.elapsed() <= Duration::from_secs(*backoff) {} - } + let binary_exp_backoff = [1, 2, 4]; + for backoff in &binary_exp_backoff { + assert_eq!(guard.register_error(), Error::Rejected); + let now = Instant::now(); + while now.elapsed() <= Duration::from_secs(*backoff) {} + } - assert_eq!(guard.register_error(), Error::ReachedLimit, "3 backoffs should be an error"); - } + assert_eq!( + guard.register_error(), + Error::ReachedLimit, + "3 backoffs should be an error" + ); + } } diff --git a/ethcore/light/src/on_demand/response_guard.rs b/ethcore/light/src/on_demand/response_guard.rs index c4c2ac23a..c862d13ae 100644 --- a/ethcore/light/src/on_demand/response_guard.rs +++ b/ethcore/light/src/on_demand/response_guard.rs @@ -20,154 +20,180 @@ //! 1) Register non-successful responses which will reported back if it fails //! 2) A timeout mechanism that will wait for successful response at most t seconds -use std::time::{Duration, Instant}; -use std::collections::HashMap; -use std::fmt; +use std::{ + collections::HashMap, + fmt, + time::{Duration, Instant}, +}; use super::{ResponseError, ValidityError}; /// Response guard error type #[derive(Debug, Eq, PartialEq)] pub enum Error { - /// No majority, the error reason can't be determined - NoMajority(usize), - /// Majority, with the error reason - Majority(Inner, usize, usize), + /// No majority, the error reason can't be determined + NoMajority(usize), + /// Majority, with the error reason + Majority(Inner, usize, usize), } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Error::Majority(err, majority, total) => { - write!(f, "Error cause was {:?}, (majority count: {} / total: {})", - err, majority, total) - } - Error::NoMajority(total) => { - write!(f, "Error cause couldn't be determined, the total number of responses was {}", total) - } - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Error::Majority(err, majority, total) => write!( + f, + "Error cause was {:?}, (majority count: {} / total: {})", + err, majority, total + ), + Error::NoMajority(total) => write!( + f, + "Error cause couldn't be determined, the total number of responses was {}", + total + ), + } + } } /// Dummy type to convert a generic type with no trait bounds #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)] pub enum Inner { - /// Bad execution proof - BadProof, - /// RLP decoding - Decoder, - /// Empty response - EmptyResponse, - /// Wrong header sequence - HeaderByNumber, - /// Too few results - TooFewResults, - /// Too many results - TooManyResults, - /// Trie error - Trie, - /// Unresolved header - UnresolvedHeader, - /// No responses expected. - Unexpected, - /// Wrong hash - WrongHash, - /// Wrong Header sequence - WrongHeaderSequence, - /// Wrong response kind - WrongKind, - /// Wrong number - WrongNumber, - /// Wrong Trie Root - WrongTrieRoot, + /// Bad execution proof + BadProof, + /// RLP decoding + Decoder, + /// Empty response + EmptyResponse, + /// Wrong header sequence + HeaderByNumber, + /// Too few results + TooFewResults, + /// Too many results + TooManyResults, + /// Trie error + Trie, + /// Unresolved header + UnresolvedHeader, + /// No responses expected. + Unexpected, + /// Wrong hash + WrongHash, + /// Wrong Header sequence + WrongHeaderSequence, + /// Wrong response kind + WrongKind, + /// Wrong number + WrongNumber, + /// Wrong Trie Root + WrongTrieRoot, } /// Handle and register responses that can fail #[derive(Debug)] pub struct ResponseGuard { - request_start: Instant, - time_to_live: Duration, - responses: HashMap, - number_responses: usize, + request_start: Instant, + time_to_live: Duration, + responses: HashMap, + number_responses: usize, } impl ResponseGuard { - /// Constructor - pub fn new(time_to_live: Duration) -> Self { - Self { - request_start: Instant::now(), - time_to_live, - responses: HashMap::new(), - number_responses: 0, - } - } + /// Constructor + pub fn new(time_to_live: Duration) -> Self { + Self { + request_start: Instant::now(), + time_to_live, + responses: HashMap::new(), + number_responses: 0, + } + } - fn into_reason(&self, err: &ResponseError) -> Inner { - match err { - ResponseError::Unexpected => Inner::Unexpected, - ResponseError::Validity(ValidityError::BadProof) => Inner::BadProof, - ResponseError::Validity(ValidityError::Decoder(_)) => Inner::Decoder, - ResponseError::Validity(ValidityError::Empty) => Inner::EmptyResponse, - ResponseError::Validity(ValidityError::HeaderByNumber) => Inner::HeaderByNumber, - ResponseError::Validity(ValidityError::TooFewResults(_, _)) => Inner::TooFewResults, - ResponseError::Validity(ValidityError::TooManyResults(_, _)) => Inner::TooManyResults, - ResponseError::Validity(ValidityError::Trie(_)) => Inner::Trie, - ResponseError::Validity(ValidityError::UnresolvedHeader(_)) => Inner::UnresolvedHeader, - ResponseError::Validity(ValidityError::WrongHash(_, _)) => Inner::WrongHash, - ResponseError::Validity(ValidityError::WrongHeaderSequence) => Inner::WrongHeaderSequence, - ResponseError::Validity(ValidityError::WrongKind) => Inner::WrongKind, - ResponseError::Validity(ValidityError::WrongNumber(_, _)) => Inner::WrongNumber, - ResponseError::Validity(ValidityError::WrongTrieRoot(_, _)) => Inner::WrongTrieRoot, - } - } + fn into_reason(&self, err: &ResponseError) -> Inner { + match err { + ResponseError::Unexpected => Inner::Unexpected, + ResponseError::Validity(ValidityError::BadProof) => Inner::BadProof, + ResponseError::Validity(ValidityError::Decoder(_)) => Inner::Decoder, + ResponseError::Validity(ValidityError::Empty) => Inner::EmptyResponse, + ResponseError::Validity(ValidityError::HeaderByNumber) => Inner::HeaderByNumber, + ResponseError::Validity(ValidityError::TooFewResults(_, _)) => Inner::TooFewResults, + ResponseError::Validity(ValidityError::TooManyResults(_, _)) => Inner::TooManyResults, + ResponseError::Validity(ValidityError::Trie(_)) => Inner::Trie, + ResponseError::Validity(ValidityError::UnresolvedHeader(_)) => Inner::UnresolvedHeader, + ResponseError::Validity(ValidityError::WrongHash(_, _)) => Inner::WrongHash, + ResponseError::Validity(ValidityError::WrongHeaderSequence) => { + Inner::WrongHeaderSequence + } + ResponseError::Validity(ValidityError::WrongKind) => Inner::WrongKind, + ResponseError::Validity(ValidityError::WrongNumber(_, _)) => Inner::WrongNumber, + ResponseError::Validity(ValidityError::WrongTrieRoot(_, _)) => Inner::WrongTrieRoot, + } + } - /// Update the state after a `faulty` call - pub fn register_error(&mut self, err: &ResponseError) -> Result<(), Error> { - let err = self.into_reason(err); - *self.responses.entry(err).or_insert(0) += 1; - self.number_responses = self.number_responses.saturating_add(1); - trace!(target: "circuit_breaker", "ResponseGuard: {:?}", self.responses); - // The request has exceeded its timeout - if self.request_start.elapsed() >= self.time_to_live { - let (&err, &max_count) = self.responses.iter().max_by_key(|(_k, v)| *v).expect("got at least one element; qed"); - let majority = self.responses.values().filter(|v| **v == max_count).count() == 1; - if majority { - Err(Error::Majority(err, max_count, self.number_responses)) - } else { - Err(Error::NoMajority(self.number_responses)) - } - } else { - Ok(()) - } - } + /// Update the state after a `faulty` call + pub fn register_error( + &mut self, + err: &ResponseError, + ) -> Result<(), Error> { + let err = self.into_reason(err); + *self.responses.entry(err).or_insert(0) += 1; + self.number_responses = self.number_responses.saturating_add(1); + trace!(target: "circuit_breaker", "ResponseGuard: {:?}", self.responses); + // The request has exceeded its timeout + if self.request_start.elapsed() >= self.time_to_live { + let (&err, &max_count) = self + .responses + .iter() + .max_by_key(|(_k, v)| *v) + .expect("got at least one element; qed"); + let majority = self.responses.values().filter(|v| **v == max_count).count() == 1; + if majority { + Err(Error::Majority(err, max_count, self.number_responses)) + } else { + Err(Error::NoMajority(self.number_responses)) + } + } else { + Ok(()) + } + } } #[cfg(test)] mod tests { - use std::thread; - use super::*; + use super::*; + use std::thread; - #[test] - fn test_basic_by_majority() { - let mut guard = ResponseGuard::new(Duration::from_secs(5)); - guard.register_error(&ResponseError::Validity(ValidityError::Empty)).unwrap(); - guard.register_error(&ResponseError::Unexpected).unwrap(); - guard.register_error(&ResponseError::Unexpected).unwrap(); - guard.register_error(&ResponseError::Unexpected).unwrap(); - thread::sleep(Duration::from_secs(5)); + #[test] + fn test_basic_by_majority() { + let mut guard = ResponseGuard::new(Duration::from_secs(5)); + guard + .register_error(&ResponseError::Validity(ValidityError::Empty)) + .unwrap(); + guard.register_error(&ResponseError::Unexpected).unwrap(); + guard.register_error(&ResponseError::Unexpected).unwrap(); + guard.register_error(&ResponseError::Unexpected).unwrap(); + thread::sleep(Duration::from_secs(5)); - assert_eq!(guard.register_error(&ResponseError::Validity(ValidityError::WrongKind)), Err(Error::Majority(Inner::Unexpected, 3, 5))); - } + assert_eq!( + guard.register_error(&ResponseError::Validity(ValidityError::WrongKind)), + Err(Error::Majority(Inner::Unexpected, 3, 5)) + ); + } - #[test] - fn test_no_majority() { - let mut guard = ResponseGuard::new(Duration::from_secs(5)); - guard.register_error(&ResponseError::Validity(ValidityError::Empty)).unwrap(); - guard.register_error(&ResponseError::Validity(ValidityError::Empty)).unwrap(); - guard.register_error(&ResponseError::Unexpected).unwrap(); - guard.register_error(&ResponseError::Unexpected).unwrap(); - thread::sleep(Duration::from_secs(5)); + #[test] + fn test_no_majority() { + let mut guard = ResponseGuard::new(Duration::from_secs(5)); + guard + .register_error(&ResponseError::Validity(ValidityError::Empty)) + .unwrap(); + guard + .register_error(&ResponseError::Validity(ValidityError::Empty)) + .unwrap(); + guard.register_error(&ResponseError::Unexpected).unwrap(); + guard.register_error(&ResponseError::Unexpected).unwrap(); + thread::sleep(Duration::from_secs(5)); - assert_eq!(guard.register_error(&ResponseError::Validity(ValidityError::WrongKind)), Err(Error::NoMajority(5))); - } + assert_eq!( + guard.register_error(&ResponseError::Validity(ValidityError::WrongKind)), + Err(Error::NoMajority(5)) + ); + } } diff --git a/ethcore/light/src/on_demand/tests.rs b/ethcore/light/src/on_demand/tests.rs index 49ec35f10..2085174a7 100644 --- a/ethcore/light/src/on_demand/tests.rs +++ b/ethcore/light/src/on_demand/tests.rs @@ -17,582 +17,700 @@ //! Tests for the on-demand service. use cache::Cache; -use futures::Future; -use network::{PeerId, NodeId}; -use net::*; use common_types::header::Header; use ethereum_types::H256; +use futures::Future; +use net::*; +use network::{NodeId, PeerId}; use parking_lot::Mutex; use request::{self as basic_request, Response}; -use std::sync::Arc; -use std::time::{Duration, Instant}; -use std::thread; +use std::{ + sync::Arc, + thread, + time::{Duration, Instant}, +}; -use super::{request, OnDemand, OnDemandRequester, Peer, HeaderRef}; +use super::{request, HeaderRef, OnDemand, OnDemandRequester, Peer}; // useful contexts to give the service. enum Context { - NoOp, - WithPeer(PeerId), - RequestFrom(PeerId, ReqId), - Punish(PeerId), - FaultyRequest, + NoOp, + WithPeer(PeerId), + RequestFrom(PeerId, ReqId), + Punish(PeerId), + FaultyRequest, } impl EventContext for Context { - fn peer(&self) -> PeerId { - match *self { - Context::WithPeer(id) - | Context::RequestFrom(id, _) - | Context::Punish(id) => id, - | Context::FaultyRequest => 0, - _ => panic!("didn't expect to have peer queried."), - } - } + fn peer(&self) -> PeerId { + match *self { + Context::WithPeer(id) | Context::RequestFrom(id, _) | Context::Punish(id) => id, + Context::FaultyRequest => 0, + _ => panic!("didn't expect to have peer queried."), + } + } - fn as_basic(&self) -> &BasicContext { self } + fn as_basic(&self) -> &BasicContext { + self + } } impl BasicContext for Context { - /// Returns the relevant's peer persistent Id (aka NodeId). - fn persistent_peer_id(&self, _: PeerId) -> Option { - panic!("didn't expect to provide persistent ID") - } + /// Returns the relevant's peer persistent Id (aka NodeId). + fn persistent_peer_id(&self, _: PeerId) -> Option { + panic!("didn't expect to provide persistent ID") + } - fn request_from(&self, peer_id: PeerId, _: ::request::NetworkRequests) -> Result { - match *self { - Context::RequestFrom(id, req_id) => if peer_id == id { Ok(req_id) } else { Err(Error::NoCredits) }, - Context::FaultyRequest => Err(Error::NoCredits), - _ => panic!("didn't expect to have requests dispatched."), - } - } + fn request_from(&self, peer_id: PeerId, _: ::request::NetworkRequests) -> Result { + match *self { + Context::RequestFrom(id, req_id) => { + if peer_id == id { + Ok(req_id) + } else { + Err(Error::NoCredits) + } + } + Context::FaultyRequest => Err(Error::NoCredits), + _ => panic!("didn't expect to have requests dispatched."), + } + } - fn make_announcement(&self, _: Announcement) { - panic!("didn't expect to make announcement") - } + fn make_announcement(&self, _: Announcement) { + panic!("didn't expect to make announcement") + } - fn disconnect_peer(&self, id: PeerId) { - self.disable_peer(id) - } + fn disconnect_peer(&self, id: PeerId) { + self.disable_peer(id) + } - fn disable_peer(&self, peer_id: PeerId) { - match *self { - Context::Punish(id) if id == peer_id => {}, - _ => panic!("Unexpectedly punished peer."), - } - } + fn disable_peer(&self, peer_id: PeerId) { + match *self { + Context::Punish(id) if id == peer_id => {} + _ => panic!("Unexpectedly punished peer."), + } + } } // test harness. struct Harness { - service: OnDemand, + service: OnDemand, } impl Harness { - fn create() -> Self { - let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(60)))); - Harness { - service: OnDemand::new_test( - cache, - // Response `time_to_live` - Duration::from_secs(5), - // Request start backoff - Duration::from_secs(1), - // Request max backoff - Duration::from_secs(20), - super::DEFAULT_MAX_REQUEST_BACKOFF_ROUNDS, - super::DEFAULT_NUM_CONSECUTIVE_FAILED_REQUESTS - ) - } - } + fn create() -> Self { + let cache = Arc::new(Mutex::new(Cache::new( + Default::default(), + Duration::from_secs(60), + ))); + Harness { + service: OnDemand::new_test( + cache, + // Response `time_to_live` + Duration::from_secs(5), + // Request start backoff + Duration::from_secs(1), + // Request max backoff + Duration::from_secs(20), + super::DEFAULT_MAX_REQUEST_BACKOFF_ROUNDS, + super::DEFAULT_NUM_CONSECUTIVE_FAILED_REQUESTS, + ), + } + } - fn inject_peer(&self, id: PeerId, peer: Peer) { - self.service.peers.write().insert(id, peer); - } + fn inject_peer(&self, id: PeerId, peer: Peer) { + self.service.peers.write().insert(id, peer); + } } fn dummy_status() -> Status { - Status { - protocol_version: 1, - network_id: 999, - head_td: 1.into(), - head_hash: H256::default(), - head_num: 1359, - genesis_hash: H256::default(), - last_head: None, - } + Status { + protocol_version: 1, + network_id: 999, + head_td: 1.into(), + head_hash: H256::default(), + head_num: 1359, + genesis_hash: H256::default(), + last_head: None, + } } fn dummy_capabilities() -> Capabilities { - Capabilities { - serve_headers: true, - serve_chain_since: Some(1), - serve_state_since: Some(1), - tx_relay: true, - } + Capabilities { + serve_headers: true, + serve_chain_since: Some(1), + serve_state_since: Some(1), + tx_relay: true, + } } #[test] fn detects_hangup() { - let on_demand = Harness::create().service; - let result = on_demand.request_raw( - &Context::NoOp, - vec![request::HeaderByHash(H256::default().into()).into()], - ); + let on_demand = Harness::create().service; + let result = on_demand.request_raw( + &Context::NoOp, + vec![request::HeaderByHash(H256::default().into()).into()], + ); - assert_eq!(on_demand.pending.read().len(), 1); - drop(result); + assert_eq!(on_demand.pending.read().len(), 1); + drop(result); - on_demand.dispatch_pending(&Context::NoOp); - assert!(on_demand.pending.read().is_empty()); + on_demand.dispatch_pending(&Context::NoOp); + assert!(on_demand.pending.read().is_empty()); } #[test] fn single_request() { - let harness = Harness::create(); + let harness = Harness::create(); - let peer_id = 10101; - let req_id = ReqId(14426); + let peer_id = 10101; + let req_id = ReqId(14426); - harness.inject_peer(peer_id, Peer { - status: dummy_status(), - capabilities: dummy_capabilities(), - }); + harness.inject_peer( + peer_id, + Peer { + status: dummy_status(), + capabilities: dummy_capabilities(), + }, + ); - let header = Header::default(); - let encoded = header.encoded(); + let header = Header::default(); + let encoded = header.encoded(); - let recv = harness.service.request_raw( - &Context::NoOp, - vec![request::HeaderByHash(header.hash().into()).into()] - ).unwrap(); + let recv = harness + .service + .request_raw( + &Context::NoOp, + vec![request::HeaderByHash(header.hash().into()).into()], + ) + .unwrap(); - assert_eq!(harness.service.pending.read().len(), 1); + assert_eq!(harness.service.pending.read().len(), 1); - harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_id)); + harness + .service + .dispatch_pending(&Context::RequestFrom(peer_id, req_id)); - assert_eq!(harness.service.pending.read().len(), 0); + assert_eq!(harness.service.pending.read().len(), 0); - harness.service.on_responses( - &Context::WithPeer(peer_id), - req_id, - &[Response::Headers(basic_request::HeadersResponse { headers: vec![encoded] })] - ); + harness.service.on_responses( + &Context::WithPeer(peer_id), + req_id, + &[Response::Headers(basic_request::HeadersResponse { + headers: vec![encoded], + })], + ); - assert!(recv.wait().is_ok()); + assert!(recv.wait().is_ok()); } #[test] fn no_capabilities() { - let harness = Harness::create(); + let harness = Harness::create(); - let peer_id = 10101; + let peer_id = 10101; - let mut capabilities = dummy_capabilities(); - capabilities.serve_headers = false; + let mut capabilities = dummy_capabilities(); + capabilities.serve_headers = false; - harness.inject_peer(peer_id, Peer { - status: dummy_status(), - capabilities: capabilities, - }); + harness.inject_peer( + peer_id, + Peer { + status: dummy_status(), + capabilities: capabilities, + }, + ); - let _recv = harness.service.request_raw( - &Context::NoOp, - vec![request::HeaderByHash(H256::default().into()).into()] - ).unwrap(); + let _recv = harness + .service + .request_raw( + &Context::NoOp, + vec![request::HeaderByHash(H256::default().into()).into()], + ) + .unwrap(); - assert_eq!(harness.service.pending.read().len(), 1); + assert_eq!(harness.service.pending.read().len(), 1); - harness.service.dispatch_pending(&Context::NoOp); + harness.service.dispatch_pending(&Context::NoOp); - assert_eq!(harness.service.pending.read().len(), 1); + assert_eq!(harness.service.pending.read().len(), 1); } #[test] fn reassign() { - let harness = Harness::create(); + let harness = Harness::create(); - let peer_ids = (10101, 12345); - let req_ids = (ReqId(14426), ReqId(555)); + let peer_ids = (10101, 12345); + let req_ids = (ReqId(14426), ReqId(555)); - harness.inject_peer(peer_ids.0, Peer { - status: dummy_status(), - capabilities: dummy_capabilities(), - }); + harness.inject_peer( + peer_ids.0, + Peer { + status: dummy_status(), + capabilities: dummy_capabilities(), + }, + ); - let header = Header::default(); - let encoded = header.encoded(); + let header = Header::default(); + let encoded = header.encoded(); - let recv = harness.service.request_raw( - &Context::NoOp, - vec![request::HeaderByHash(header.hash().into()).into()] - ).unwrap(); + let recv = harness + .service + .request_raw( + &Context::NoOp, + vec![request::HeaderByHash(header.hash().into()).into()], + ) + .unwrap(); - assert_eq!(harness.service.pending.read().len(), 1); + assert_eq!(harness.service.pending.read().len(), 1); - harness.service.dispatch_pending(&Context::RequestFrom(peer_ids.0, req_ids.0)); - assert_eq!(harness.service.pending.read().len(), 0); + harness + .service + .dispatch_pending(&Context::RequestFrom(peer_ids.0, req_ids.0)); + assert_eq!(harness.service.pending.read().len(), 0); - harness.service.on_disconnect(&Context::WithPeer(peer_ids.0), &[req_ids.0]); - assert_eq!(harness.service.pending.read().len(), 1); + harness + .service + .on_disconnect(&Context::WithPeer(peer_ids.0), &[req_ids.0]); + assert_eq!(harness.service.pending.read().len(), 1); - harness.inject_peer(peer_ids.1, Peer { - status: dummy_status(), - capabilities: dummy_capabilities(), - }); + harness.inject_peer( + peer_ids.1, + Peer { + status: dummy_status(), + capabilities: dummy_capabilities(), + }, + ); - harness.service.dispatch_pending(&Context::RequestFrom(peer_ids.1, req_ids.1)); - assert_eq!(harness.service.pending.read().len(), 0); + harness + .service + .dispatch_pending(&Context::RequestFrom(peer_ids.1, req_ids.1)); + assert_eq!(harness.service.pending.read().len(), 0); - harness.service.on_responses( - &Context::WithPeer(peer_ids.1), - req_ids.1, - &[Response::Headers(basic_request::HeadersResponse { headers: vec![encoded] })] - ); + harness.service.on_responses( + &Context::WithPeer(peer_ids.1), + req_ids.1, + &[Response::Headers(basic_request::HeadersResponse { + headers: vec![encoded], + })], + ); - assert!(recv.wait().is_ok()); + assert!(recv.wait().is_ok()); } #[test] fn partial_response() { - let harness = Harness::create(); + let harness = Harness::create(); - let peer_id = 111; - let req_ids = (ReqId(14426), ReqId(555)); + let peer_id = 111; + let req_ids = (ReqId(14426), ReqId(555)); - harness.inject_peer(peer_id, Peer { - status: dummy_status(), - capabilities: dummy_capabilities(), - }); + harness.inject_peer( + peer_id, + Peer { + status: dummy_status(), + capabilities: dummy_capabilities(), + }, + ); - let make = |num| { - let mut hdr = Header::default(); - hdr.set_number(num); + let make = |num| { + let mut hdr = Header::default(); + hdr.set_number(num); - let encoded = hdr.encoded(); - (hdr, encoded) - }; + let encoded = hdr.encoded(); + (hdr, encoded) + }; - let (header1, encoded1) = make(5); - let (header2, encoded2) = make(23452); + let (header1, encoded1) = make(5); + let (header2, encoded2) = make(23452); - // request two headers. - let recv = harness.service.request_raw( - &Context::NoOp, - vec![ - request::HeaderByHash(header1.hash().into()).into(), - request::HeaderByHash(header2.hash().into()).into(), - ], - ).unwrap(); + // request two headers. + let recv = harness + .service + .request_raw( + &Context::NoOp, + vec![ + request::HeaderByHash(header1.hash().into()).into(), + request::HeaderByHash(header2.hash().into()).into(), + ], + ) + .unwrap(); - assert_eq!(harness.service.pending.read().len(), 1); + assert_eq!(harness.service.pending.read().len(), 1); - harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.0)); - assert_eq!(harness.service.pending.read().len(), 0); + harness + .service + .dispatch_pending(&Context::RequestFrom(peer_id, req_ids.0)); + assert_eq!(harness.service.pending.read().len(), 0); - // supply only the first one. - harness.service.on_responses( - &Context::WithPeer(peer_id), - req_ids.0, - &[Response::Headers(basic_request::HeadersResponse { headers: vec![encoded1] })] - ); + // supply only the first one. + harness.service.on_responses( + &Context::WithPeer(peer_id), + req_ids.0, + &[Response::Headers(basic_request::HeadersResponse { + headers: vec![encoded1], + })], + ); - assert_eq!(harness.service.pending.read().len(), 1); + assert_eq!(harness.service.pending.read().len(), 1); - harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.1)); - assert_eq!(harness.service.pending.read().len(), 0); + harness + .service + .dispatch_pending(&Context::RequestFrom(peer_id, req_ids.1)); + assert_eq!(harness.service.pending.read().len(), 0); - // supply the next one. - harness.service.on_responses( - &Context::WithPeer(peer_id), - req_ids.1, - &[Response::Headers(basic_request::HeadersResponse { headers: vec![encoded2] })] - ); + // supply the next one. + harness.service.on_responses( + &Context::WithPeer(peer_id), + req_ids.1, + &[Response::Headers(basic_request::HeadersResponse { + headers: vec![encoded2], + })], + ); - assert!(recv.wait().is_ok()); + assert!(recv.wait().is_ok()); } #[test] fn part_bad_part_good() { - let harness = Harness::create(); + let harness = Harness::create(); - let peer_id = 111; - let req_ids = (ReqId(14426), ReqId(555)); + let peer_id = 111; + let req_ids = (ReqId(14426), ReqId(555)); - harness.inject_peer(peer_id, Peer { - status: dummy_status(), - capabilities: dummy_capabilities(), - }); + harness.inject_peer( + peer_id, + Peer { + status: dummy_status(), + capabilities: dummy_capabilities(), + }, + ); - let make = |num| { - let mut hdr = Header::default(); - hdr.set_number(num); + let make = |num| { + let mut hdr = Header::default(); + hdr.set_number(num); - let encoded = hdr.encoded(); - (hdr, encoded) - }; + let encoded = hdr.encoded(); + (hdr, encoded) + }; - let (header1, encoded1) = make(5); - let (header2, encoded2) = make(23452); + let (header1, encoded1) = make(5); + let (header2, encoded2) = make(23452); - // request two headers. - let recv = harness.service.request_raw( - &Context::NoOp, - vec![ - request::HeaderByHash(header1.hash().into()).into(), - request::HeaderByHash(header2.hash().into()).into(), - ], - ).unwrap(); + // request two headers. + let recv = harness + .service + .request_raw( + &Context::NoOp, + vec![ + request::HeaderByHash(header1.hash().into()).into(), + request::HeaderByHash(header2.hash().into()).into(), + ], + ) + .unwrap(); - assert_eq!(harness.service.pending.read().len(), 1); + assert_eq!(harness.service.pending.read().len(), 1); - harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.0)); - assert_eq!(harness.service.pending.read().len(), 0); + harness + .service + .dispatch_pending(&Context::RequestFrom(peer_id, req_ids.0)); + assert_eq!(harness.service.pending.read().len(), 0); - // supply only the first one, but followed by the wrong kind of response. - // the first header should be processed. - harness.service.on_responses( - &Context::Punish(peer_id), - req_ids.0, - &[ - Response::Headers(basic_request::HeadersResponse { headers: vec![encoded1] }), - Response::Receipts(basic_request::ReceiptsResponse { receipts: vec![] } ), - ] - ); + // supply only the first one, but followed by the wrong kind of response. + // the first header should be processed. + harness.service.on_responses( + &Context::Punish(peer_id), + req_ids.0, + &[ + Response::Headers(basic_request::HeadersResponse { + headers: vec![encoded1], + }), + Response::Receipts(basic_request::ReceiptsResponse { receipts: vec![] }), + ], + ); - assert_eq!(harness.service.pending.read().len(), 1); + assert_eq!(harness.service.pending.read().len(), 1); - harness.inject_peer(peer_id, Peer { - status: dummy_status(), - capabilities: dummy_capabilities(), - }); + harness.inject_peer( + peer_id, + Peer { + status: dummy_status(), + capabilities: dummy_capabilities(), + }, + ); - harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.1)); - assert_eq!(harness.service.pending.read().len(), 0); + harness + .service + .dispatch_pending(&Context::RequestFrom(peer_id, req_ids.1)); + assert_eq!(harness.service.pending.read().len(), 0); - // supply the next one. - harness.service.on_responses( - &Context::WithPeer(peer_id), - req_ids.1, - &[Response::Headers(basic_request::HeadersResponse { headers: vec![encoded2] })] - ); + // supply the next one. + harness.service.on_responses( + &Context::WithPeer(peer_id), + req_ids.1, + &[Response::Headers(basic_request::HeadersResponse { + headers: vec![encoded2], + })], + ); - assert!(recv.wait().is_ok()); + assert!(recv.wait().is_ok()); } #[test] fn wrong_kind() { - let harness = Harness::create(); + let harness = Harness::create(); - let peer_id = 10101; - let req_id = ReqId(14426); + let peer_id = 10101; + let req_id = ReqId(14426); - harness.inject_peer(peer_id, Peer { - status: dummy_status(), - capabilities: dummy_capabilities(), - }); + harness.inject_peer( + peer_id, + Peer { + status: dummy_status(), + capabilities: dummy_capabilities(), + }, + ); - let _recv = harness.service.request_raw( - &Context::NoOp, - vec![request::HeaderByHash(H256::default().into()).into()] - ).unwrap(); + let _recv = harness + .service + .request_raw( + &Context::NoOp, + vec![request::HeaderByHash(H256::default().into()).into()], + ) + .unwrap(); - assert_eq!(harness.service.pending.read().len(), 1); + assert_eq!(harness.service.pending.read().len(), 1); - harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_id)); + harness + .service + .dispatch_pending(&Context::RequestFrom(peer_id, req_id)); - assert_eq!(harness.service.pending.read().len(), 0); + assert_eq!(harness.service.pending.read().len(), 0); - harness.service.on_responses( - &Context::Punish(peer_id), - req_id, - &[Response::Receipts(basic_request::ReceiptsResponse { receipts: vec![] })] - ); + harness.service.on_responses( + &Context::Punish(peer_id), + req_id, + &[Response::Receipts(basic_request::ReceiptsResponse { + receipts: vec![], + })], + ); - assert_eq!(harness.service.pending.read().len(), 1); + assert_eq!(harness.service.pending.read().len(), 1); } #[test] fn back_references() { - let harness = Harness::create(); + let harness = Harness::create(); - let peer_id = 10101; - let req_id = ReqId(14426); + let peer_id = 10101; + let req_id = ReqId(14426); - harness.inject_peer(peer_id, Peer { - status: dummy_status(), - capabilities: dummy_capabilities(), - }); + harness.inject_peer( + peer_id, + Peer { + status: dummy_status(), + capabilities: dummy_capabilities(), + }, + ); - let header = Header::default(); - let encoded = header.encoded(); + let header = Header::default(); + let encoded = header.encoded(); - let recv = harness.service.request_raw( - &Context::NoOp, - vec![ - request::HeaderByHash(header.hash().into()).into(), - request::BlockReceipts(HeaderRef::Unresolved(0, header.hash().into())).into(), - ] - ).unwrap(); + let recv = harness + .service + .request_raw( + &Context::NoOp, + vec![ + request::HeaderByHash(header.hash().into()).into(), + request::BlockReceipts(HeaderRef::Unresolved(0, header.hash().into())).into(), + ], + ) + .unwrap(); - assert_eq!(harness.service.pending.read().len(), 1); + assert_eq!(harness.service.pending.read().len(), 1); - harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_id)); + harness + .service + .dispatch_pending(&Context::RequestFrom(peer_id, req_id)); - assert_eq!(harness.service.pending.read().len(), 0); + assert_eq!(harness.service.pending.read().len(), 0); - harness.service.on_responses( - &Context::WithPeer(peer_id), - req_id, - &[ - Response::Headers(basic_request::HeadersResponse { headers: vec![encoded] }), - Response::Receipts(basic_request::ReceiptsResponse { receipts: vec![] }), - ] - ); + harness.service.on_responses( + &Context::WithPeer(peer_id), + req_id, + &[ + Response::Headers(basic_request::HeadersResponse { + headers: vec![encoded], + }), + Response::Receipts(basic_request::ReceiptsResponse { receipts: vec![] }), + ], + ); - assert!(recv.wait().is_ok()); + assert!(recv.wait().is_ok()); } #[test] #[should_panic] fn bad_back_reference() { - let harness = Harness::create(); + let harness = Harness::create(); - let header = Header::default(); + let header = Header::default(); - let _ = harness.service.request_raw( - &Context::NoOp, - vec![ - request::HeaderByHash(header.hash().into()).into(), - request::BlockReceipts(HeaderRef::Unresolved(1, header.hash().into())).into(), - ] - ).unwrap(); + let _ = harness + .service + .request_raw( + &Context::NoOp, + vec![ + request::HeaderByHash(header.hash().into()).into(), + request::BlockReceipts(HeaderRef::Unresolved(1, header.hash().into())).into(), + ], + ) + .unwrap(); } #[test] fn fill_from_cache() { - let harness = Harness::create(); + let harness = Harness::create(); - let peer_id = 10101; - let req_id = ReqId(14426); + let peer_id = 10101; + let req_id = ReqId(14426); - harness.inject_peer(peer_id, Peer { - status: dummy_status(), - capabilities: dummy_capabilities(), - }); + harness.inject_peer( + peer_id, + Peer { + status: dummy_status(), + capabilities: dummy_capabilities(), + }, + ); - let header = Header::default(); - let encoded = header.encoded(); + let header = Header::default(); + let encoded = header.encoded(); - let recv = harness.service.request_raw( - &Context::NoOp, - vec![ - request::HeaderByHash(header.hash().into()).into(), - request::BlockReceipts(HeaderRef::Unresolved(0, header.hash().into())).into(), - ] - ).unwrap(); + let recv = harness + .service + .request_raw( + &Context::NoOp, + vec![ + request::HeaderByHash(header.hash().into()).into(), + request::BlockReceipts(HeaderRef::Unresolved(0, header.hash().into())).into(), + ], + ) + .unwrap(); - assert_eq!(harness.service.pending.read().len(), 1); + assert_eq!(harness.service.pending.read().len(), 1); - harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_id)); + harness + .service + .dispatch_pending(&Context::RequestFrom(peer_id, req_id)); - assert_eq!(harness.service.pending.read().len(), 0); + assert_eq!(harness.service.pending.read().len(), 0); - harness.service.on_responses( - &Context::WithPeer(peer_id), - req_id, - &[ - Response::Headers(basic_request::HeadersResponse { headers: vec![encoded] }), - ] - ); + harness.service.on_responses( + &Context::WithPeer(peer_id), + req_id, + &[Response::Headers(basic_request::HeadersResponse { + headers: vec![encoded], + })], + ); - assert!(recv.wait().is_ok()); + assert!(recv.wait().is_ok()); } #[test] fn request_without_response_should_backoff_and_then_be_dropped() { - let harness = Harness::create(); - let peer_id = 0; - let req_id = ReqId(13); + let harness = Harness::create(); + let peer_id = 0; + let req_id = ReqId(13); - harness.inject_peer( - peer_id, - Peer { - status: dummy_status(), - capabilities: dummy_capabilities(), - } - ); + harness.inject_peer( + peer_id, + Peer { + status: dummy_status(), + capabilities: dummy_capabilities(), + }, + ); - let binary_exp_backoff: Vec = vec![1, 2, 4, 8, 16, 20, 20, 20, 20, 20]; + let binary_exp_backoff: Vec = vec![1, 2, 4, 8, 16, 20, 20, 20, 20, 20]; - let _recv = harness.service.request_raw( - &Context::RequestFrom(peer_id, req_id), - vec![request::HeaderByHash(Header::default().encoded().hash().into()).into()], - ).unwrap(); - assert_eq!(harness.service.pending.read().len(), 1); + let _recv = harness + .service + .request_raw( + &Context::RequestFrom(peer_id, req_id), + vec![request::HeaderByHash(Header::default().encoded().hash().into()).into()], + ) + .unwrap(); + assert_eq!(harness.service.pending.read().len(), 1); - for backoff in &binary_exp_backoff { - harness.service.dispatch_pending(&Context::FaultyRequest); - assert_eq!(harness.service.pending.read().len(), 1, "Request should not be dropped"); - let now = Instant::now(); - while now.elapsed() < Duration::from_secs(*backoff) {} - } + for backoff in &binary_exp_backoff { + harness.service.dispatch_pending(&Context::FaultyRequest); + assert_eq!( + harness.service.pending.read().len(), + 1, + "Request should not be dropped" + ); + let now = Instant::now(); + while now.elapsed() < Duration::from_secs(*backoff) {} + } - harness.service.dispatch_pending(&Context::FaultyRequest); - assert_eq!(harness.service.pending.read().len(), 0, "Request exceeded the 10 backoff rounds should be dropped"); + harness.service.dispatch_pending(&Context::FaultyRequest); + assert_eq!( + harness.service.pending.read().len(), + 0, + "Request exceeded the 10 backoff rounds should be dropped" + ); } #[test] fn empty_responses_exceeds_limit_should_be_dropped() { - let harness = Harness::create(); - let peer_id = 0; - let req_id = ReqId(13); + let harness = Harness::create(); + let peer_id = 0; + let req_id = ReqId(13); - harness.inject_peer( - peer_id, - Peer { - status: dummy_status(), - capabilities: dummy_capabilities(), - } - ); + harness.inject_peer( + peer_id, + Peer { + status: dummy_status(), + capabilities: dummy_capabilities(), + }, + ); - let _recv = harness.service.request_raw( - &Context::RequestFrom(peer_id, req_id), - vec![request::HeaderByHash(Header::default().encoded().hash().into()).into()], - ).unwrap(); + let _recv = harness + .service + .request_raw( + &Context::RequestFrom(peer_id, req_id), + vec![request::HeaderByHash(Header::default().encoded().hash().into()).into()], + ) + .unwrap(); - harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_id)); + harness + .service + .dispatch_pending(&Context::RequestFrom(peer_id, req_id)); - assert_eq!(harness.service.pending.read().len(), 0); - assert_eq!(harness.service.in_transit.read().len(), 1); + assert_eq!(harness.service.pending.read().len(), 0); + assert_eq!(harness.service.in_transit.read().len(), 1); - let now = Instant::now(); + let now = Instant::now(); - // Send `empty responses` in the current time window - // Use only half of the `time_window` because we can't be sure exactly - // when the window started and the clock accurancy - while now.elapsed() < harness.service.response_time_window / 2 { - harness.service.on_responses( - &Context::RequestFrom(13, req_id), - req_id, - &[] - ); - assert!(harness.service.pending.read().len() != 0); - let pending = harness.service.pending.write().remove(0); - harness.service.in_transit.write().insert(req_id, pending); - } + // Send `empty responses` in the current time window + // Use only half of the `time_window` because we can't be sure exactly + // when the window started and the clock accurancy + while now.elapsed() < harness.service.response_time_window / 2 { + harness + .service + .on_responses(&Context::RequestFrom(13, req_id), req_id, &[]); + assert!(harness.service.pending.read().len() != 0); + let pending = harness.service.pending.write().remove(0); + harness.service.in_transit.write().insert(req_id, pending); + } - // Make sure we passed the first `time window` - thread::sleep(Duration::from_secs(5)); + // Make sure we passed the first `time window` + thread::sleep(Duration::from_secs(5)); - // Now, response is in failure state but need another response to be `polled` - harness.service.on_responses( - &Context::RequestFrom(13, req_id), - req_id, - &[] - ); + // Now, response is in failure state but need another response to be `polled` + harness + .service + .on_responses(&Context::RequestFrom(13, req_id), req_id, &[]); - assert!(harness.service.in_transit.read().is_empty()); - assert!(harness.service.pending.read().is_empty()); + assert!(harness.service.in_transit.read().is_empty()); + assert!(harness.service.pending.read().is_empty()); } diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index 309ff6ec1..20d736857 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -19,16 +19,17 @@ use std::sync::Arc; -use common_types::blockchain_info::BlockChainInfo; -use common_types::encoded; -use common_types::ids::BlockId; -use common_types::transaction::PendingTransaction; -use ethcore::client::{BlockChainClient, ProvingBlockChainClient, ChainInfo, BlockInfo as ClientBlockInfo}; +use common_types::{ + blockchain_info::BlockChainInfo, encoded, ids::BlockId, transaction::PendingTransaction, +}; +use ethcore::client::{ + BlockChainClient, BlockInfo as ClientBlockInfo, ChainInfo, ProvingBlockChainClient, +}; use ethereum_types::H256; use parking_lot::RwLock; use cht::{self, BlockInfo}; -use client::{LightChainClient, AsLightClient}; +use client::{AsLightClient, LightChainClient}; use transaction_queue::TransactionQueue; use request; @@ -38,373 +39,440 @@ pub const MAX_HEADERS_PER_REQUEST: u64 = 512; /// Defines the operations that a provider for the light subprotocol must fulfill. pub trait Provider: Send + Sync { - /// Provide current blockchain info. - fn chain_info(&self) -> BlockChainInfo; + /// Provide current blockchain info. + fn chain_info(&self) -> BlockChainInfo; - /// Find the depth of a common ancestor between two blocks. - /// If either block is unknown or an ancestor can't be found - /// then return `None`. - fn reorg_depth(&self, a: &H256, b: &H256) -> Option; + /// Find the depth of a common ancestor between two blocks. + /// If either block is unknown or an ancestor can't be found + /// then return `None`. + fn reorg_depth(&self, a: &H256, b: &H256) -> Option; - /// Earliest block where state queries are available. - /// If `None`, no state queries are servable. - fn earliest_state(&self) -> Option; + /// Earliest block where state queries are available. + /// If `None`, no state queries are servable. + fn earliest_state(&self) -> Option; - /// Provide a list of headers starting at the requested block, - /// possibly in reverse and skipping `skip` at a time. - /// - /// The returned vector may have any length in the range [0, `max`], but the - /// results within must adhere to the `skip` and `reverse` parameters. - fn block_headers(&self, req: request::CompleteHeadersRequest) -> Option { - use request::HashOrNumber; + /// Provide a list of headers starting at the requested block, + /// possibly in reverse and skipping `skip` at a time. + /// + /// The returned vector may have any length in the range [0, `max`], but the + /// results within must adhere to the `skip` and `reverse` parameters. + fn block_headers( + &self, + req: request::CompleteHeadersRequest, + ) -> Option { + use request::HashOrNumber; - if req.max == 0 { return None } + if req.max == 0 { + return None; + } - let best_num = self.chain_info().best_block_number; - let start_num = match req.start { - HashOrNumber::Number(start_num) => start_num, - HashOrNumber::Hash(hash) => match self.block_header(BlockId::Hash(hash)) { - None => { - trace!(target: "pip_provider", "Unknown block hash {} requested", hash); - return None; - } - Some(header) => { - let num = header.number(); - let canon_hash = self.block_header(BlockId::Number(num)) - .map(|h| h.hash()); + let best_num = self.chain_info().best_block_number; + let start_num = match req.start { + HashOrNumber::Number(start_num) => start_num, + HashOrNumber::Hash(hash) => match self.block_header(BlockId::Hash(hash)) { + None => { + trace!(target: "pip_provider", "Unknown block hash {} requested", hash); + return None; + } + Some(header) => { + let num = header.number(); + let canon_hash = self.block_header(BlockId::Number(num)).map(|h| h.hash()); - if req.max == 1 || canon_hash != Some(hash) { - // Non-canonical header or single header requested. - return Some(::request::HeadersResponse { - headers: vec![header], - }) - } + if req.max == 1 || canon_hash != Some(hash) { + // Non-canonical header or single header requested. + return Some(::request::HeadersResponse { + headers: vec![header], + }); + } - num - } - } - }; + num + } + }, + }; - let max = ::std::cmp::min(MAX_HEADERS_PER_REQUEST, req.max); + let max = ::std::cmp::min(MAX_HEADERS_PER_REQUEST, req.max); - let headers: Vec<_> = (0_u64..max) - .map(|x: u64| x.saturating_mul(req.skip.saturating_add(1))) - .take_while(|&x| if req.reverse { x < start_num } else { best_num.saturating_sub(start_num) >= x }) - .map(|x| if req.reverse { start_num.saturating_sub(x) } else { start_num.saturating_add(x) }) - .map(|x| self.block_header(BlockId::Number(x))) - .take_while(|x| x.is_some()) - .flat_map(|x| x) - .collect(); + let headers: Vec<_> = (0_u64..max) + .map(|x: u64| x.saturating_mul(req.skip.saturating_add(1))) + .take_while(|&x| { + if req.reverse { + x < start_num + } else { + best_num.saturating_sub(start_num) >= x + } + }) + .map(|x| { + if req.reverse { + start_num.saturating_sub(x) + } else { + start_num.saturating_add(x) + } + }) + .map(|x| self.block_header(BlockId::Number(x))) + .take_while(|x| x.is_some()) + .flat_map(|x| x) + .collect(); - if headers.is_empty() { - None - } else { - Some(::request::HeadersResponse { headers }) - } - } + if headers.is_empty() { + None + } else { + Some(::request::HeadersResponse { headers }) + } + } - /// Get a block header by id. - fn block_header(&self, id: BlockId) -> Option; + /// Get a block header by id. + fn block_header(&self, id: BlockId) -> Option; - /// Get a transaction index by hash. - fn transaction_index(&self, req: request::CompleteTransactionIndexRequest) - -> Option; + /// Get a transaction index by hash. + fn transaction_index( + &self, + req: request::CompleteTransactionIndexRequest, + ) -> Option; - /// Fulfill a block body request. - fn block_body(&self, req: request::CompleteBodyRequest) -> Option; + /// Fulfill a block body request. + fn block_body(&self, req: request::CompleteBodyRequest) -> Option; - /// Fulfill a request for block receipts. - fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option; + /// Fulfill a request for block receipts. + fn block_receipts( + &self, + req: request::CompleteReceiptsRequest, + ) -> Option; - /// Get an account proof. - fn account_proof(&self, req: request::CompleteAccountRequest) -> Option; + /// Get an account proof. + fn account_proof( + &self, + req: request::CompleteAccountRequest, + ) -> Option; - /// Get a storage proof. - fn storage_proof(&self, req: request::CompleteStorageRequest) -> Option; + /// Get a storage proof. + fn storage_proof( + &self, + req: request::CompleteStorageRequest, + ) -> Option; - /// Provide contract code for the specified (block_hash, code_hash) pair. - fn contract_code(&self, req: request::CompleteCodeRequest) -> Option; + /// Provide contract code for the specified (block_hash, code_hash) pair. + fn contract_code(&self, req: request::CompleteCodeRequest) -> Option; - /// Provide a header proof from a given Canonical Hash Trie as well as the - /// corresponding header. - fn header_proof(&self, req: request::CompleteHeaderProofRequest) -> Option; + /// Provide a header proof from a given Canonical Hash Trie as well as the + /// corresponding header. + fn header_proof( + &self, + req: request::CompleteHeaderProofRequest, + ) -> Option; - /// Provide pending transactions. - fn transactions_to_propagate(&self) -> Vec; + /// Provide pending transactions. + fn transactions_to_propagate(&self) -> Vec; - /// Provide a proof-of-execution for the given transaction proof request. - /// Returns a vector of all state items necessary to execute the transaction. - fn transaction_proof(&self, req: request::CompleteExecutionRequest) -> Option; + /// Provide a proof-of-execution for the given transaction proof request. + /// Returns a vector of all state items necessary to execute the transaction. + fn transaction_proof( + &self, + req: request::CompleteExecutionRequest, + ) -> Option; - /// Provide epoch signal data at given block hash. This should be just the - fn epoch_signal(&self, req: request::CompleteSignalRequest) -> Option; + /// Provide epoch signal data at given block hash. This should be just the + fn epoch_signal(&self, req: request::CompleteSignalRequest) -> Option; } // Implementation of a light client data provider for a client. impl Provider for T { - fn chain_info(&self) -> BlockChainInfo { - ChainInfo::chain_info(self) - } + fn chain_info(&self) -> BlockChainInfo { + ChainInfo::chain_info(self) + } - fn reorg_depth(&self, a: &H256, b: &H256) -> Option { - self.tree_route(a, b).map(|route| route.index as u64) - } + fn reorg_depth(&self, a: &H256, b: &H256) -> Option { + self.tree_route(a, b).map(|route| route.index as u64) + } - fn earliest_state(&self) -> Option { - Some(self.pruning_info().earliest_state) - } + fn earliest_state(&self) -> Option { + Some(self.pruning_info().earliest_state) + } - fn block_header(&self, id: BlockId) -> Option { - ClientBlockInfo::block_header(self, id) - } + fn block_header(&self, id: BlockId) -> Option { + ClientBlockInfo::block_header(self, id) + } - fn transaction_index(&self, req: request::CompleteTransactionIndexRequest) - -> Option - { - use common_types::ids::TransactionId; + fn transaction_index( + &self, + req: request::CompleteTransactionIndexRequest, + ) -> Option { + use common_types::ids::TransactionId; - self.transaction_receipt(TransactionId::Hash(req.hash)).map(|receipt| request::TransactionIndexResponse { - num: receipt.block_number, - hash: receipt.block_hash, - index: receipt.transaction_index as u64, - }) - } + self.transaction_receipt(TransactionId::Hash(req.hash)) + .map(|receipt| request::TransactionIndexResponse { + num: receipt.block_number, + hash: receipt.block_hash, + index: receipt.transaction_index as u64, + }) + } - fn block_body(&self, req: request::CompleteBodyRequest) -> Option { - BlockChainClient::block_body(self, BlockId::Hash(req.hash)) - .map(|body| ::request::BodyResponse { body }) - } + fn block_body(&self, req: request::CompleteBodyRequest) -> Option { + BlockChainClient::block_body(self, BlockId::Hash(req.hash)) + .map(|body| ::request::BodyResponse { body }) + } - fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option { - BlockChainClient::block_receipts(self, &req.hash) - .map(|x| ::request::ReceiptsResponse { receipts: x.receipts }) - } + fn block_receipts( + &self, + req: request::CompleteReceiptsRequest, + ) -> Option { + BlockChainClient::block_receipts(self, &req.hash).map(|x| ::request::ReceiptsResponse { + receipts: x.receipts, + }) + } - fn account_proof(&self, req: request::CompleteAccountRequest) -> Option { - self.prove_account(req.address_hash, BlockId::Hash(req.block_hash)).map(|(proof, acc)| { - ::request::AccountResponse { - proof, - nonce: acc.nonce, - balance: acc.balance, - code_hash: acc.code_hash, - storage_root: acc.storage_root, - } - }) - } + fn account_proof( + &self, + req: request::CompleteAccountRequest, + ) -> Option { + self.prove_account(req.address_hash, BlockId::Hash(req.block_hash)) + .map(|(proof, acc)| ::request::AccountResponse { + proof, + nonce: acc.nonce, + balance: acc.balance, + code_hash: acc.code_hash, + storage_root: acc.storage_root, + }) + } - fn storage_proof(&self, req: request::CompleteStorageRequest) -> Option { - self.prove_storage(req.address_hash, req.key_hash, BlockId::Hash(req.block_hash)).map(|(proof, item) | { - ::request::StorageResponse { - proof, - value: item, - } - }) - } + fn storage_proof( + &self, + req: request::CompleteStorageRequest, + ) -> Option { + self.prove_storage( + req.address_hash, + req.key_hash, + BlockId::Hash(req.block_hash), + ) + .map(|(proof, item)| ::request::StorageResponse { proof, value: item }) + } - fn contract_code(&self, req: request::CompleteCodeRequest) -> Option { - self.state_data(&req.code_hash) - .map(|code| ::request::CodeResponse { code }) - } + fn contract_code(&self, req: request::CompleteCodeRequest) -> Option { + self.state_data(&req.code_hash) + .map(|code| ::request::CodeResponse { code }) + } - fn header_proof(&self, req: request::CompleteHeaderProofRequest) -> Option { - let cht_number = match cht::block_to_cht_number(req.num) { - Some(cht_num) => cht_num, - None => { - debug!(target: "pip_provider", "Requested CHT proof with invalid block number"); - return None; - } - }; + fn header_proof( + &self, + req: request::CompleteHeaderProofRequest, + ) -> Option { + let cht_number = match cht::block_to_cht_number(req.num) { + Some(cht_num) => cht_num, + None => { + debug!(target: "pip_provider", "Requested CHT proof with invalid block number"); + return None; + } + }; - let mut needed = None; + let mut needed = None; - // build the CHT, caching the requested header as we pass through it. - let cht = { - let block_info = |id| { - let hdr = self.block_header(id); - let td = self.block_total_difficulty(id); + // build the CHT, caching the requested header as we pass through it. + let cht = { + let block_info = |id| { + let hdr = self.block_header(id); + let td = self.block_total_difficulty(id); - match (hdr, td) { - (Some(hdr), Some(td)) => { - let info = BlockInfo { - hash: hdr.hash(), - parent_hash: hdr.parent_hash(), - total_difficulty: td, - }; + match (hdr, td) { + (Some(hdr), Some(td)) => { + let info = BlockInfo { + hash: hdr.hash(), + parent_hash: hdr.parent_hash(), + total_difficulty: td, + }; - if hdr.number() == req.num { - needed = Some((hdr, td)); - } + if hdr.number() == req.num { + needed = Some((hdr, td)); + } - Some(info) - } - _ => None, - } - }; + Some(info) + } + _ => None, + } + }; - match cht::build(cht_number, block_info) { - Some(cht) => cht, - None => return None, // incomplete CHT. - } - }; + match cht::build(cht_number, block_info) { + Some(cht) => cht, + None => return None, // incomplete CHT. + } + }; - let (needed_hdr, needed_td) = needed.expect("`needed` always set in loop, number checked before; qed"); + let (needed_hdr, needed_td) = + needed.expect("`needed` always set in loop, number checked before; qed"); - // prove our result. - match cht.prove(req.num, 0) { - Ok(Some(proof)) => Some(::request::HeaderProofResponse { - proof, - hash: needed_hdr.hash(), - td: needed_td, - }), - Ok(None) => None, - Err(e) => { - debug!(target: "pip_provider", "Error looking up number in freshly-created CHT: {}", e); - None - } - } - } + // prove our result. + match cht.prove(req.num, 0) { + Ok(Some(proof)) => Some(::request::HeaderProofResponse { + proof, + hash: needed_hdr.hash(), + td: needed_td, + }), + Ok(None) => None, + Err(e) => { + debug!(target: "pip_provider", "Error looking up number in freshly-created CHT: {}", e); + None + } + } + } - fn transaction_proof(&self, req: request::CompleteExecutionRequest) -> Option { - use common_types::transaction::Transaction; + fn transaction_proof( + &self, + req: request::CompleteExecutionRequest, + ) -> Option { + use common_types::transaction::Transaction; - let id = BlockId::Hash(req.block_hash); - let nonce = match self.nonce(&req.from, id) { - Some(nonce) => nonce, - None => return None, - }; - let transaction = Transaction { - nonce, - gas: req.gas, - gas_price: req.gas_price, - action: req.action, - value: req.value, - data: req.data, - }.fake_sign(req.from); + let id = BlockId::Hash(req.block_hash); + let nonce = match self.nonce(&req.from, id) { + Some(nonce) => nonce, + None => return None, + }; + let transaction = Transaction { + nonce, + gas: req.gas, + gas_price: req.gas_price, + action: req.action, + value: req.value, + data: req.data, + } + .fake_sign(req.from); - self.prove_transaction(transaction, id) - .map(|(_, proof)| ::request::ExecutionResponse { items: proof }) - } + self.prove_transaction(transaction, id) + .map(|(_, proof)| ::request::ExecutionResponse { items: proof }) + } - fn transactions_to_propagate(&self) -> Vec { - BlockChainClient::transactions_to_propagate(self) - .into_iter() - .map(|tx| tx.pending().clone()) - .collect() - } + fn transactions_to_propagate(&self) -> Vec { + BlockChainClient::transactions_to_propagate(self) + .into_iter() + .map(|tx| tx.pending().clone()) + .collect() + } - fn epoch_signal(&self, req: request::CompleteSignalRequest) -> Option { - self.epoch_signal(req.block_hash).map(|signal| request::SignalResponse { - signal, - }) - } + fn epoch_signal(&self, req: request::CompleteSignalRequest) -> Option { + self.epoch_signal(req.block_hash) + .map(|signal| request::SignalResponse { signal }) + } } /// The light client "provider" implementation. This wraps a `LightClient` and /// a light transaction queue. pub struct LightProvider { - client: Arc, - txqueue: Arc>, + client: Arc, + txqueue: Arc>, } impl LightProvider { - /// Create a new `LightProvider` from the given client and transaction queue. - pub fn new(client: Arc, txqueue: Arc>) -> Self { - LightProvider { - client, - txqueue, - } - } + /// Create a new `LightProvider` from the given client and transaction queue. + pub fn new(client: Arc, txqueue: Arc>) -> Self { + LightProvider { client, txqueue } + } } // TODO: draw from cache (shared between this and the RPC layer) impl Provider for LightProvider { - fn chain_info(&self) -> BlockChainInfo { - self.client.as_light_client().chain_info() - } + fn chain_info(&self) -> BlockChainInfo { + self.client.as_light_client().chain_info() + } - fn reorg_depth(&self, _a: &H256, _b: &H256) -> Option { - None - } + fn reorg_depth(&self, _a: &H256, _b: &H256) -> Option { + None + } - fn earliest_state(&self) -> Option { - None - } + fn earliest_state(&self) -> Option { + None + } - fn block_header(&self, id: BlockId) -> Option { - self.client.as_light_client().block_header(id) - } + fn block_header(&self, id: BlockId) -> Option { + self.client.as_light_client().block_header(id) + } - fn transaction_index(&self, _req: request::CompleteTransactionIndexRequest) - -> Option - { - None - } + fn transaction_index( + &self, + _req: request::CompleteTransactionIndexRequest, + ) -> Option { + None + } - fn block_body(&self, _req: request::CompleteBodyRequest) -> Option { - None - } + fn block_body(&self, _req: request::CompleteBodyRequest) -> Option { + None + } - fn block_receipts(&self, _req: request::CompleteReceiptsRequest) -> Option { - None - } + fn block_receipts( + &self, + _req: request::CompleteReceiptsRequest, + ) -> Option { + None + } - fn account_proof(&self, _req: request::CompleteAccountRequest) -> Option { - None - } + fn account_proof( + &self, + _req: request::CompleteAccountRequest, + ) -> Option { + None + } - fn storage_proof(&self, _req: request::CompleteStorageRequest) -> Option { - None - } + fn storage_proof( + &self, + _req: request::CompleteStorageRequest, + ) -> Option { + None + } - fn contract_code(&self, _req: request::CompleteCodeRequest) -> Option { - None - } + fn contract_code(&self, _req: request::CompleteCodeRequest) -> Option { + None + } - fn header_proof(&self, _req: request::CompleteHeaderProofRequest) -> Option { - None - } + fn header_proof( + &self, + _req: request::CompleteHeaderProofRequest, + ) -> Option { + None + } - fn transaction_proof(&self, _req: request::CompleteExecutionRequest) -> Option { - None - } + fn transaction_proof( + &self, + _req: request::CompleteExecutionRequest, + ) -> Option { + None + } - fn epoch_signal(&self, _req: request::CompleteSignalRequest) -> Option { - None - } + fn epoch_signal( + &self, + _req: request::CompleteSignalRequest, + ) -> Option { + None + } - fn transactions_to_propagate(&self) -> Vec { - let chain_info = self.chain_info(); - self.txqueue.read() - .ready_transactions(chain_info.best_block_number, chain_info.best_block_timestamp) - } + fn transactions_to_propagate(&self) -> Vec { + let chain_info = self.chain_info(); + self.txqueue.read().ready_transactions( + chain_info.best_block_number, + chain_info.best_block_timestamp, + ) + } } impl AsLightClient for LightProvider { - type Client = L::Client; + type Client = L::Client; - fn as_light_client(&self) -> &L::Client { - self.client.as_light_client() - } + fn as_light_client(&self) -> &L::Client { + self.client.as_light_client() + } } #[cfg(test)] mod tests { - use ethcore::client::{EachBlockWith, TestBlockChainClient}; - use super::Provider; + use super::Provider; + use ethcore::client::{EachBlockWith, TestBlockChainClient}; - #[test] - fn cht_proof() { - let client = TestBlockChainClient::new(); - client.add_blocks(2000, EachBlockWith::Nothing); + #[test] + fn cht_proof() { + let client = TestBlockChainClient::new(); + client.add_blocks(2000, EachBlockWith::Nothing); - let req = ::request::CompleteHeaderProofRequest { - num: 1500, - }; + let req = ::request::CompleteHeaderProofRequest { num: 1500 }; - assert!(client.header_proof(req.clone()).is_none()); + assert!(client.header_proof(req.clone()).is_none()); - client.add_blocks(48, EachBlockWith::Nothing); + client.add_blocks(48, EachBlockWith::Nothing); - assert!(client.header_proof(req.clone()).is_some()); - } + assert!(client.header_proof(req.clone()).is_some()); + } } diff --git a/ethcore/light/src/transaction_queue.rs b/ethcore/light/src/transaction_queue.rs index 65e646d84..980f71417 100644 --- a/ethcore/light/src/transaction_queue.rs +++ b/ethcore/light/src/transaction_queue.rs @@ -23,107 +23,110 @@ //! accounts for which they create transactions, this queue is structured in an //! address-wise manner. -use std::fmt; -use std::collections::{BTreeMap, HashMap}; -use std::collections::hash_map::Entry; +use std::{ + collections::{hash_map::Entry, BTreeMap, HashMap}, + fmt, +}; use common_types::transaction::{self, Condition, PendingTransaction, SignedTransaction}; -use ethereum_types::{H256, U256, Address}; +use ethereum_types::{Address, H256, U256}; use fastmap::H256FastMap; // Knowledge of an account's current nonce. #[derive(Debug, Clone, PartialEq, Eq)] enum CurrentNonce { - // Assumed current nonce. - Assumed(U256), - // Known current nonce. - Known(U256), + // Assumed current nonce. + Assumed(U256), + // Known current nonce. + Known(U256), } impl CurrentNonce { - // whether this nonce is assumed - fn is_assumed(&self) -> bool { - match *self { - CurrentNonce::Assumed(_) => true, - CurrentNonce::Known(_) => false, - } - } + // whether this nonce is assumed + fn is_assumed(&self) -> bool { + match *self { + CurrentNonce::Assumed(_) => true, + CurrentNonce::Known(_) => false, + } + } - // whether this nonce is known for certain from an external source. - fn is_known(&self) -> bool { - !self.is_assumed() - } + // whether this nonce is known for certain from an external source. + fn is_known(&self) -> bool { + !self.is_assumed() + } - // the current nonce's value. - fn value(&self) -> &U256 { - match *self { - CurrentNonce::Assumed(ref val) => val, - CurrentNonce::Known(ref val) => val, - } - } + // the current nonce's value. + fn value(&self) -> &U256 { + match *self { + CurrentNonce::Assumed(ref val) => val, + CurrentNonce::Known(ref val) => val, + } + } } #[derive(Debug, Clone, PartialEq, Eq)] struct TransactionInfo { - hash: H256, - nonce: U256, - condition: Option, + hash: H256, + nonce: U256, + condition: Option, } impl<'a> From<&'a PendingTransaction> for TransactionInfo { - fn from(tx: &'a PendingTransaction) -> Self { - TransactionInfo { - hash: tx.hash(), - nonce: tx.nonce, - condition: tx.condition.clone(), - } - } + fn from(tx: &'a PendingTransaction) -> Self { + TransactionInfo { + hash: tx.hash(), + nonce: tx.nonce, + condition: tx.condition.clone(), + } + } } // transactions associated with a specific account. #[derive(Debug, Clone, PartialEq, Eq)] struct AccountTransactions { - // believed current nonce (gotten from initial given TX or `cull` calls). - cur_nonce: CurrentNonce, - current: Vec, // ordered "current" transactions (cur_nonce onwards) - future: BTreeMap, // "future" transactions. + // believed current nonce (gotten from initial given TX or `cull` calls). + cur_nonce: CurrentNonce, + current: Vec, // ordered "current" transactions (cur_nonce onwards) + future: BTreeMap, // "future" transactions. } impl AccountTransactions { - fn is_empty(&self) -> bool { - self.current.is_empty() && self.future.is_empty() - } + fn is_empty(&self) -> bool { + self.current.is_empty() && self.future.is_empty() + } - fn next_nonce(&self) -> U256 { - self.current.last().map(|last| last.nonce.saturating_add(1.into())) - .unwrap_or_else(|| *self.cur_nonce.value()) - } + fn next_nonce(&self) -> U256 { + self.current + .last() + .map(|last| last.nonce.saturating_add(1.into())) + .unwrap_or_else(|| *self.cur_nonce.value()) + } - // attempt to move transactions from the future queue into the current queue. - fn adjust_future(&mut self) -> Vec { - let mut promoted = Vec::new(); - let mut next_nonce = self.next_nonce(); + // attempt to move transactions from the future queue into the current queue. + fn adjust_future(&mut self) -> Vec { + let mut promoted = Vec::new(); + let mut next_nonce = self.next_nonce(); - while let Some(tx) = self.future.remove(&next_nonce) { - promoted.push(tx.hash); - self.current.push(tx); - next_nonce = next_nonce.saturating_add(1.into()); - } + while let Some(tx) = self.future.remove(&next_nonce) { + promoted.push(tx.hash); + self.current.push(tx); + next_nonce = next_nonce.saturating_add(1.into()); + } - promoted - } + promoted + } } /// Transaction import result. pub enum ImportDestination { - /// Transaction has been imported to the current queue. - /// - /// It's going to be propagated to peers. - Current, - /// Transaction has been imported to future queue. - /// - /// It means it won't be propagated until the gap is filled. - Future, + /// Transaction has been imported to the current queue. + /// + /// It's going to be propagated to peers. + Current, + /// Transaction has been imported to future queue. + /// + /// It means it won't be propagated until the gap is filled. + Future, } type Listener = Box; @@ -131,129 +134,150 @@ type Listener = Box; /// Light transaction queue. See module docs for more details. #[derive(Default)] pub struct TransactionQueue { - by_account: HashMap, - by_hash: H256FastMap, - listeners: Vec, + by_account: HashMap, + by_hash: H256FastMap, + listeners: Vec, } impl fmt::Debug for TransactionQueue { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("TransactionQueue") - .field("by_account", &self.by_account) - .field("by_hash", &self.by_hash) - .field("listeners", &self.listeners.len()) - .finish() - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("TransactionQueue") + .field("by_account", &self.by_account) + .field("by_hash", &self.by_hash) + .field("listeners", &self.listeners.len()) + .finish() + } } impl TransactionQueue { - /// Import a pending transaction to be queued. - pub fn import(&mut self, tx: PendingTransaction) -> Result { - let sender = tx.sender(); - let hash = tx.hash(); - let nonce = tx.nonce; - let tx_info = TransactionInfo::from(&tx); + /// Import a pending transaction to be queued. + pub fn import( + &mut self, + tx: PendingTransaction, + ) -> Result { + let sender = tx.sender(); + let hash = tx.hash(); + let nonce = tx.nonce; + let tx_info = TransactionInfo::from(&tx); - if self.by_hash.contains_key(&hash) { return Err(transaction::Error::AlreadyImported) } + if self.by_hash.contains_key(&hash) { + return Err(transaction::Error::AlreadyImported); + } - let (res, promoted) = match self.by_account.entry(sender) { - Entry::Vacant(entry) => { - entry.insert(AccountTransactions { - cur_nonce: CurrentNonce::Assumed(nonce), - current: vec![tx_info], - future: BTreeMap::new(), - }); + let (res, promoted) = match self.by_account.entry(sender) { + Entry::Vacant(entry) => { + entry.insert(AccountTransactions { + cur_nonce: CurrentNonce::Assumed(nonce), + current: vec![tx_info], + future: BTreeMap::new(), + }); - (ImportDestination::Current, vec![hash]) - } - Entry::Occupied(mut entry) => { - let acct_txs = entry.get_mut(); - if nonce < *acct_txs.cur_nonce.value() { - // don't accept txs from before known current nonce. - if acct_txs.cur_nonce.is_known() { - return Err(transaction::Error::Old) - } + (ImportDestination::Current, vec![hash]) + } + Entry::Occupied(mut entry) => { + let acct_txs = entry.get_mut(); + if nonce < *acct_txs.cur_nonce.value() { + // don't accept txs from before known current nonce. + if acct_txs.cur_nonce.is_known() { + return Err(transaction::Error::Old); + } - // lower our assumption until corrected later. - acct_txs.cur_nonce = CurrentNonce::Assumed(nonce); - } + // lower our assumption until corrected later. + acct_txs.cur_nonce = CurrentNonce::Assumed(nonce); + } - match acct_txs.current.binary_search_by(|x| x.nonce.cmp(&nonce)) { - Ok(idx) => { - trace!(target: "txqueue", "Replacing existing transaction from {} with nonce {}", + match acct_txs.current.binary_search_by(|x| x.nonce.cmp(&nonce)) { + Ok(idx) => { + trace!(target: "txqueue", "Replacing existing transaction from {} with nonce {}", sender, nonce); - let old = ::std::mem::replace(&mut acct_txs.current[idx], tx_info); - self.by_hash.remove(&old.hash); + let old = ::std::mem::replace(&mut acct_txs.current[idx], tx_info); + self.by_hash.remove(&old.hash); - (ImportDestination::Current, vec![hash]) - } - Err(idx) => { - let cur_len = acct_txs.current.len(); - let incr_nonce = nonce + 1; + (ImportDestination::Current, vec![hash]) + } + Err(idx) => { + let cur_len = acct_txs.current.len(); + let incr_nonce = nonce + 1; - // current is sorted with one tx per nonce, - // so if a tx with given nonce wasn't found that means it is either - // earlier in nonce than all other "current" transactions or later. - assert!(idx == 0 || idx == cur_len); + // current is sorted with one tx per nonce, + // so if a tx with given nonce wasn't found that means it is either + // earlier in nonce than all other "current" transactions or later. + assert!(idx == 0 || idx == cur_len); - if idx == 0 && acct_txs.current.first().map_or(false, |f| f.nonce != incr_nonce) { - let old_cur = ::std::mem::replace(&mut acct_txs.current, vec![tx_info]); + if idx == 0 + && acct_txs + .current + .first() + .map_or(false, |f| f.nonce != incr_nonce) + { + let old_cur = ::std::mem::replace(&mut acct_txs.current, vec![tx_info]); - trace!(target: "txqueue", "Moving {} transactions with nonce > {} to future", + trace!(target: "txqueue", "Moving {} transactions with nonce > {} to future", old_cur.len(), incr_nonce); - for future in old_cur { - let future_nonce = future.nonce; - acct_txs.future.insert(future_nonce, future); - } + for future in old_cur { + let future_nonce = future.nonce; + acct_txs.future.insert(future_nonce, future); + } - (ImportDestination::Current, vec![hash]) - } else if idx == cur_len && acct_txs.current.last().map_or(false, |f| f.nonce + 1 != nonce) { - trace!(target: "txqueue", "Queued future transaction for {}, nonce={}", sender, nonce); - let future_nonce = nonce; - acct_txs.future.insert(future_nonce, tx_info); + (ImportDestination::Current, vec![hash]) + } else if idx == cur_len + && acct_txs + .current + .last() + .map_or(false, |f| f.nonce + 1 != nonce) + { + trace!(target: "txqueue", "Queued future transaction for {}, nonce={}", sender, nonce); + let future_nonce = nonce; + acct_txs.future.insert(future_nonce, tx_info); - (ImportDestination::Future, vec![]) - } else { - trace!(target: "txqueue", "Queued current transaction for {}, nonce={}", sender, nonce); + (ImportDestination::Future, vec![]) + } else { + trace!(target: "txqueue", "Queued current transaction for {}, nonce={}", sender, nonce); - // insert, then check if we've filled any gaps. - acct_txs.current.insert(idx, tx_info); - let mut promoted = acct_txs.adjust_future(); - promoted.insert(0, hash); + // insert, then check if we've filled any gaps. + acct_txs.current.insert(idx, tx_info); + let mut promoted = acct_txs.adjust_future(); + promoted.insert(0, hash); - (ImportDestination::Current, promoted) - } - } - } - } - }; + (ImportDestination::Current, promoted) + } + } + } + } + }; - self.by_hash.insert(hash, tx); - self.notify(&promoted); - Ok(res) - } + self.by_hash.insert(hash, tx); + self.notify(&promoted); + Ok(res) + } - /// Get pending transaction by hash. - pub fn transaction(&self, hash: &H256) -> Option { - self.by_hash.get(hash).map(|tx| (&**tx).clone()) - } + /// Get pending transaction by hash. + pub fn transaction(&self, hash: &H256) -> Option { + self.by_hash.get(hash).map(|tx| (&**tx).clone()) + } - /// Get the next nonce for a given address based on what's within the queue. - /// If the address has no queued transactions, then `None` will be returned - /// and the next nonce will have to be deduced via other means. - pub fn next_nonce(&self, address: &Address) -> Option { - self.by_account.get(address).map(AccountTransactions::next_nonce) - } + /// Get the next nonce for a given address based on what's within the queue. + /// If the address has no queued transactions, then `None` will be returned + /// and the next nonce will have to be deduced via other means. + pub fn next_nonce(&self, address: &Address) -> Option { + self.by_account + .get(address) + .map(AccountTransactions::next_nonce) + } - /// Get all transactions ready to be propagated. - /// `best_block_number` and `best_block_timestamp` are used to filter out conditionally - /// propagated transactions. - /// - /// Returned transactions are batched by sender, in order of ascending nonce. - pub fn ready_transactions(&self, best_block_number: u64, best_block_timestamp: u64) -> Vec { - self.by_account.values() + /// Get all transactions ready to be propagated. + /// `best_block_number` and `best_block_timestamp` are used to filter out conditionally + /// propagated transactions. + /// + /// Returned transactions are batched by sender, in order of ascending nonce. + pub fn ready_transactions( + &self, + best_block_number: u64, + best_block_timestamp: u64, + ) -> Vec { + self.by_account.values() .flat_map(|acct_txs| { acct_txs.current.iter().take_while(|tx| match tx.condition { None => true, @@ -270,15 +294,19 @@ impl TransactionQueue { } }) .collect() - } + } - /// Get all transactions not ready to be propagated. - /// `best_block_number` and `best_block_timestamp` are used to filter out conditionally - /// propagated transactions. - /// - /// Returned transactions are batched by sender, in order of ascending nonce. - pub fn future_transactions(&self, best_block_number: u64, best_block_timestamp: u64) -> Vec { - self.by_account.values() + /// Get all transactions not ready to be propagated. + /// `best_block_number` and `best_block_timestamp` are used to filter out conditionally + /// propagated transactions. + /// + /// Returned transactions are batched by sender, in order of ascending nonce. + pub fn future_transactions( + &self, + best_block_number: u64, + best_block_timestamp: u64, + ) -> Vec { + self.by_account.values() .flat_map(|acct_txs| { acct_txs.current.iter().skip_while(|tx| match tx.condition { None => true, @@ -295,267 +323,275 @@ impl TransactionQueue { } }) .collect() - } + } - /// Addresses for which we store transactions. - pub fn queued_senders(&self) -> Vec
{ - self.by_account.keys().cloned().collect() - } + /// Addresses for which we store transactions. + pub fn queued_senders(&self) -> Vec
{ + self.by_account.keys().cloned().collect() + } - /// Cull out all transactions by the given address which are invalidated by the given nonce. - pub fn cull(&mut self, address: Address, cur_nonce: U256) { - let mut removed_hashes = vec![]; - if let Entry::Occupied(mut entry) = self.by_account.entry(address) { - { - let acct_txs = entry.get_mut(); - acct_txs.cur_nonce = CurrentNonce::Known(cur_nonce); + /// Cull out all transactions by the given address which are invalidated by the given nonce. + pub fn cull(&mut self, address: Address, cur_nonce: U256) { + let mut removed_hashes = vec![]; + if let Entry::Occupied(mut entry) = self.by_account.entry(address) { + { + let acct_txs = entry.get_mut(); + acct_txs.cur_nonce = CurrentNonce::Known(cur_nonce); - // cull old "future" keys. - let old_future: Vec<_> = acct_txs.future.keys().take_while(|&&k| k < cur_nonce).cloned().collect(); + // cull old "future" keys. + let old_future: Vec<_> = acct_txs + .future + .keys() + .take_while(|&&k| k < cur_nonce) + .cloned() + .collect(); - for old in old_future { - let hash = acct_txs.future.remove(&old) - .expect("key extracted from keys iterator; known to exist; qed") - .hash; - removed_hashes.push(hash); - } + for old in old_future { + let hash = acct_txs + .future + .remove(&old) + .expect("key extracted from keys iterator; known to exist; qed") + .hash; + removed_hashes.push(hash); + } - // then cull from "current". - let valid_pos = acct_txs.current.iter().position(|tx| tx.nonce >= cur_nonce); - match valid_pos { - None => - removed_hashes.extend(acct_txs.current.drain(..).map(|tx| tx.hash)), - Some(valid) => - removed_hashes.extend(acct_txs.current.drain(..valid).map(|tx| tx.hash)), - } + // then cull from "current". + let valid_pos = acct_txs.current.iter().position(|tx| tx.nonce >= cur_nonce); + match valid_pos { + None => removed_hashes.extend(acct_txs.current.drain(..).map(|tx| tx.hash)), + Some(valid) => { + removed_hashes.extend(acct_txs.current.drain(..valid).map(|tx| tx.hash)) + } + } - // now try and move stuff out of future into current. - acct_txs.adjust_future(); - } + // now try and move stuff out of future into current. + acct_txs.adjust_future(); + } - if entry.get_mut().is_empty() { - trace!(target: "txqueue", "No more queued transactions for {} after nonce {}", + if entry.get_mut().is_empty() { + trace!(target: "txqueue", "No more queued transactions for {} after nonce {}", address, cur_nonce); - entry.remove(); - } - } + entry.remove(); + } + } - trace!(target: "txqueue", "Culled {} old transactions from sender {} (nonce={})", + trace!(target: "txqueue", "Culled {} old transactions from sender {} (nonce={})", removed_hashes.len(), address, cur_nonce); - for hash in removed_hashes { - self.by_hash.remove(&hash); - } - } + for hash in removed_hashes { + self.by_hash.remove(&hash); + } + } - /// Get a transaction by hash. - pub fn get(&self, hash: &H256) -> Option<&PendingTransaction> { - self.by_hash.get(&hash) - } + /// Get a transaction by hash. + pub fn get(&self, hash: &H256) -> Option<&PendingTransaction> { + self.by_hash.get(&hash) + } - /// Add a transaction queue listener. - pub fn add_listener(&mut self, f: Listener) { - self.listeners.push(f); - } + /// Add a transaction queue listener. + pub fn add_listener(&mut self, f: Listener) { + self.listeners.push(f); + } - /// Notifies all listeners about new pending transaction. - fn notify(&self, hashes: &[H256]) { - for listener in &self.listeners { - listener(hashes) - } - } + /// Notifies all listeners about new pending transaction. + fn notify(&self, hashes: &[H256]) { + for listener in &self.listeners { + listener(hashes) + } + } } #[cfg(test)] mod tests { - use super::TransactionQueue; - use ethereum_types::Address; - use common_types::transaction::{Transaction, PendingTransaction, Condition}; + use super::TransactionQueue; + use common_types::transaction::{Condition, PendingTransaction, Transaction}; + use ethereum_types::Address; - #[test] - fn queued_senders() { - let sender = Address::default(); - let mut txq = TransactionQueue::default(); - let tx = Transaction::default().fake_sign(sender); + #[test] + fn queued_senders() { + let sender = Address::default(); + let mut txq = TransactionQueue::default(); + let tx = Transaction::default().fake_sign(sender); - txq.import(tx.into()).unwrap(); + txq.import(tx.into()).unwrap(); - assert_eq!(txq.queued_senders(), vec![sender]); + assert_eq!(txq.queued_senders(), vec![sender]); - txq.cull(sender, 1.into()); + txq.cull(sender, 1.into()); - assert_eq!(txq.queued_senders(), vec![]); - assert!(txq.by_hash.is_empty()); - } + assert_eq!(txq.queued_senders(), vec![]); + assert!(txq.by_hash.is_empty()); + } - #[test] - fn next_nonce() { - let sender = Address::default(); - let mut txq = TransactionQueue::default(); + #[test] + fn next_nonce() { + let sender = Address::default(); + let mut txq = TransactionQueue::default(); - for i in (0..5).chain(10..15) { - let mut tx = Transaction::default(); - tx.nonce = i.into(); + for i in (0..5).chain(10..15) { + let mut tx = Transaction::default(); + tx.nonce = i.into(); - let tx = tx.fake_sign(sender); + let tx = tx.fake_sign(sender); - txq.import(tx.into()).unwrap(); - } + txq.import(tx.into()).unwrap(); + } - // current: 0..5, future: 10..15 - assert_eq!(txq.ready_transactions(0, 0).len(), 5); - assert_eq!(txq.next_nonce(&sender).unwrap(), 5.into()); + // current: 0..5, future: 10..15 + assert_eq!(txq.ready_transactions(0, 0).len(), 5); + assert_eq!(txq.next_nonce(&sender).unwrap(), 5.into()); - txq.cull(sender, 8.into()); + txq.cull(sender, 8.into()); - // current: empty, future: 10..15 - assert_eq!(txq.ready_transactions(0, 0).len(), 0); - assert_eq!(txq.next_nonce(&sender).unwrap(), 8.into()); + // current: empty, future: 10..15 + assert_eq!(txq.ready_transactions(0, 0).len(), 0); + assert_eq!(txq.next_nonce(&sender).unwrap(), 8.into()); - txq.cull(sender, 10.into()); + txq.cull(sender, 10.into()); - // current: 10..15, future: empty - assert_eq!(txq.ready_transactions(0, 0).len(), 5); - assert_eq!(txq.next_nonce(&sender).unwrap(), 15.into()); - } + // current: 10..15, future: empty + assert_eq!(txq.ready_transactions(0, 0).len(), 5); + assert_eq!(txq.next_nonce(&sender).unwrap(), 15.into()); + } - #[test] - fn current_to_future() { - let sender = Address::default(); - let mut txq = TransactionQueue::default(); + #[test] + fn current_to_future() { + let sender = Address::default(); + let mut txq = TransactionQueue::default(); - for i in 5..10 { - let mut tx = Transaction::default(); - tx.nonce = i.into(); + for i in 5..10 { + let mut tx = Transaction::default(); + tx.nonce = i.into(); - let tx = tx.fake_sign(sender); + let tx = tx.fake_sign(sender); - txq.import(tx.into()).unwrap(); - } + txq.import(tx.into()).unwrap(); + } - assert_eq!(txq.ready_transactions(0, 0).len(), 5); - assert_eq!(txq.next_nonce(&sender).unwrap(), 10.into()); + assert_eq!(txq.ready_transactions(0, 0).len(), 5); + assert_eq!(txq.next_nonce(&sender).unwrap(), 10.into()); - for i in 0..3 { - let mut tx = Transaction::default(); - tx.nonce = i.into(); + for i in 0..3 { + let mut tx = Transaction::default(); + tx.nonce = i.into(); - let tx = tx.fake_sign(sender); + let tx = tx.fake_sign(sender); - txq.import(tx.into()).unwrap(); - } + txq.import(tx.into()).unwrap(); + } - assert_eq!(txq.ready_transactions(0, 0).len(), 3); - assert_eq!(txq.next_nonce(&sender).unwrap(), 3.into()); + assert_eq!(txq.ready_transactions(0, 0).len(), 3); + assert_eq!(txq.next_nonce(&sender).unwrap(), 3.into()); - for i in 3..5 { - let mut tx = Transaction::default(); - tx.nonce = i.into(); + for i in 3..5 { + let mut tx = Transaction::default(); + tx.nonce = i.into(); - let tx = tx.fake_sign(sender); + let tx = tx.fake_sign(sender); - txq.import(tx.into()).unwrap(); - } + txq.import(tx.into()).unwrap(); + } - assert_eq!(txq.ready_transactions(0, 0).len(), 10); - assert_eq!(txq.next_nonce(&sender).unwrap(), 10.into()); - } + assert_eq!(txq.ready_transactions(0, 0).len(), 10); + assert_eq!(txq.next_nonce(&sender).unwrap(), 10.into()); + } - #[test] - fn conditional() { - let mut txq = TransactionQueue::default(); - let sender = Address::default(); + #[test] + fn conditional() { + let mut txq = TransactionQueue::default(); + let sender = Address::default(); - for i in 0..5 { - let mut tx = Transaction::default(); - tx.nonce = i.into(); - let tx = tx.fake_sign(sender); + for i in 0..5 { + let mut tx = Transaction::default(); + tx.nonce = i.into(); + let tx = tx.fake_sign(sender); - txq.import(match i { - 3 => PendingTransaction::new(tx, Some(Condition::Number(100))), - 4 => PendingTransaction::new(tx, Some(Condition::Timestamp(1234))), - _ => tx.into(), - }).unwrap(); - } + txq.import(match i { + 3 => PendingTransaction::new(tx, Some(Condition::Number(100))), + 4 => PendingTransaction::new(tx, Some(Condition::Timestamp(1234))), + _ => tx.into(), + }) + .unwrap(); + } - assert_eq!(txq.ready_transactions(0, 0).len(), 3); - assert_eq!(txq.ready_transactions(0, 1234).len(), 3); - assert_eq!(txq.ready_transactions(100, 0).len(), 4); - assert_eq!(txq.ready_transactions(100, 1234).len(), 5); - } + assert_eq!(txq.ready_transactions(0, 0).len(), 3); + assert_eq!(txq.ready_transactions(0, 1234).len(), 3); + assert_eq!(txq.ready_transactions(100, 0).len(), 4); + assert_eq!(txq.ready_transactions(100, 1234).len(), 5); + } - #[test] - fn cull_from_future() { - let sender = Address::default(); - let mut txq = TransactionQueue::default(); + #[test] + fn cull_from_future() { + let sender = Address::default(); + let mut txq = TransactionQueue::default(); - for i in (0..1).chain(3..10) { - let mut tx = Transaction::default(); - tx.nonce = i.into(); + for i in (0..1).chain(3..10) { + let mut tx = Transaction::default(); + tx.nonce = i.into(); - let tx = tx.fake_sign(sender); + let tx = tx.fake_sign(sender); - txq.import(tx.into()).unwrap(); - } + txq.import(tx.into()).unwrap(); + } - txq.cull(sender, 6.into()); + txq.cull(sender, 6.into()); - assert_eq!(txq.ready_transactions(0, 0).len(), 4); - assert_eq!(txq.next_nonce(&sender).unwrap(), 10.into()); - } + assert_eq!(txq.ready_transactions(0, 0).len(), 4); + assert_eq!(txq.next_nonce(&sender).unwrap(), 10.into()); + } - #[test] - fn import_old() { - let sender = Address::default(); - let mut txq = TransactionQueue::default(); + #[test] + fn import_old() { + let sender = Address::default(); + let mut txq = TransactionQueue::default(); - let mut tx_a = Transaction::default(); - tx_a.nonce = 3.into(); + let mut tx_a = Transaction::default(); + tx_a.nonce = 3.into(); - let mut tx_b = Transaction::default(); - tx_b.nonce = 2.into(); + let mut tx_b = Transaction::default(); + tx_b.nonce = 2.into(); - txq.import(tx_a.fake_sign(sender).into()).unwrap(); - txq.cull(sender, 3.into()); + txq.import(tx_a.fake_sign(sender).into()).unwrap(); + txq.cull(sender, 3.into()); - assert!(txq.import(tx_b.fake_sign(sender).into()).is_err()) - } + assert!(txq.import(tx_b.fake_sign(sender).into()).is_err()) + } - #[test] - fn replace_is_removed() { - let sender = Address::default(); - let mut txq = TransactionQueue::default(); + #[test] + fn replace_is_removed() { + let sender = Address::default(); + let mut txq = TransactionQueue::default(); - let tx_b: PendingTransaction = Transaction::default().fake_sign(sender).into(); - let tx_a: PendingTransaction = { - let mut tx_a = Transaction::default(); - tx_a.gas_price = tx_b.gas_price + 1; - tx_a.fake_sign(sender).into() - }; + let tx_b: PendingTransaction = Transaction::default().fake_sign(sender).into(); + let tx_a: PendingTransaction = { + let mut tx_a = Transaction::default(); + tx_a.gas_price = tx_b.gas_price + 1; + tx_a.fake_sign(sender).into() + }; - let hash = tx_a.hash(); + let hash = tx_a.hash(); - txq.import(tx_a).unwrap(); - txq.import(tx_b).unwrap(); + txq.import(tx_a).unwrap(); + txq.import(tx_b).unwrap(); - assert!(txq.transaction(&hash).is_none()); - } + assert!(txq.transaction(&hash).is_none()); + } - #[test] - fn future_transactions() { - let sender = Address::default(); - let mut txq = TransactionQueue::default(); + #[test] + fn future_transactions() { + let sender = Address::default(); + let mut txq = TransactionQueue::default(); - for i in (0..1).chain(3..10) { - let mut tx = Transaction::default(); - tx.nonce = i.into(); + for i in (0..1).chain(3..10) { + let mut tx = Transaction::default(); + tx.nonce = i.into(); - let tx = tx.fake_sign(sender); + let tx = tx.fake_sign(sender); - txq.import(tx.into()).unwrap(); - } + txq.import(tx.into()).unwrap(); + } - assert_eq!(txq.future_transactions(0, 0).len(), 7); - assert_eq!(txq.next_nonce(&sender).unwrap(), 1.into()); - } + assert_eq!(txq.future_transactions(0, 0).len(), 7); + assert_eq!(txq.next_nonce(&sender).unwrap(), 1.into()); + } } diff --git a/ethcore/light/src/types/request/batch.rs b/ethcore/light/src/types/request/batch.rs index 63641b5da..739d67a2a 100644 --- a/ethcore/light/src/types/request/batch.rs +++ b/ethcore/light/src/types/request/batch.rs @@ -18,300 +18,358 @@ //! Push requests with `push`. Back-references and data required to verify responses must be //! supplied as well. -use std::collections::HashMap; -use std::ops::{Deref, DerefMut}; -use request::{ - IncompleteRequest, OutputKind, Output, NoSuchOutput, ResponseError, ResponseLike, +use request::{IncompleteRequest, NoSuchOutput, Output, OutputKind, ResponseError, ResponseLike}; +use std::{ + collections::HashMap, + ops::{Deref, DerefMut}, }; /// Build chained requests. Push them onto the series with `push`, /// and produce a `Batch` object with `build`. Outputs are checked for consistency. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Builder { - output_kinds: HashMap<(usize, usize), OutputKind>, - requests: Vec, + output_kinds: HashMap<(usize, usize), OutputKind>, + requests: Vec, } impl Default for Builder { - fn default() -> Self { - Builder { - output_kinds: HashMap::new(), - requests: Vec::new(), - } - } + fn default() -> Self { + Builder { + output_kinds: HashMap::new(), + requests: Vec::new(), + } + } } impl Builder { - /// Attempt to push a request onto the request chain. Fails if the request - /// references a non-existent output of a prior request. - pub fn push(&mut self, request: T) -> Result<(), NoSuchOutput> { - request.check_outputs(|req, idx, kind| { - match self.output_kinds.get(&(req, idx)) { - Some(k) if k == &kind => Ok(()), - _ => Err(NoSuchOutput), - } - })?; - let req_idx = self.requests.len(); - request.note_outputs(|idx, kind| { self.output_kinds.insert((req_idx, idx), kind); }); - self.requests.push(request); - Ok(()) - } + /// Attempt to push a request onto the request chain. Fails if the request + /// references a non-existent output of a prior request. + pub fn push(&mut self, request: T) -> Result<(), NoSuchOutput> { + request.check_outputs(|req, idx, kind| match self.output_kinds.get(&(req, idx)) { + Some(k) if k == &kind => Ok(()), + _ => Err(NoSuchOutput), + })?; + let req_idx = self.requests.len(); + request.note_outputs(|idx, kind| { + self.output_kinds.insert((req_idx, idx), kind); + }); + self.requests.push(request); + Ok(()) + } - /// Get a reference to the output kinds map. - pub fn output_kinds(&self) -> &HashMap<(usize, usize), OutputKind> { - &self.output_kinds - } + /// Get a reference to the output kinds map. + pub fn output_kinds(&self) -> &HashMap<(usize, usize), OutputKind> { + &self.output_kinds + } - /// Convert this into a "batch" object. - pub fn build(self) -> Batch { - Batch { - outputs: HashMap::new(), - requests: self.requests, - answered: 0, - } - } + /// Convert this into a "batch" object. + pub fn build(self) -> Batch { + Batch { + outputs: HashMap::new(), + requests: self.requests, + answered: 0, + } + } } /// Requests pending responses. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Batch { - outputs: HashMap<(usize, usize), Output>, - requests: Vec, - answered: usize, + outputs: HashMap<(usize, usize), Output>, + requests: Vec, + answered: usize, } impl Batch { - /// Get access to the underlying slice of requests. - // TODO: unimplemented -> Vec, // do we _have to_ allocate? - pub fn requests(&self) -> &[T] { &self.requests } + /// Get access to the underlying slice of requests. + // TODO: unimplemented -> Vec, // do we _have to_ allocate? + pub fn requests(&self) -> &[T] { + &self.requests + } - /// Get the number of answered requests. - pub fn num_answered(&self) -> usize { self.answered } + /// Get the number of answered requests. + pub fn num_answered(&self) -> usize { + self.answered + } - /// Whether the batch is complete. - pub fn is_complete(&self) -> bool { - self.answered == self.requests.len() - } + /// Whether the batch is complete. + pub fn is_complete(&self) -> bool { + self.answered == self.requests.len() + } - /// Map requests from one type into another. - pub fn map_requests(self, f: F) -> Batch - where F: FnMut(T) -> U, U: IncompleteRequest - { - Batch { - outputs: self.outputs, - requests: self.requests.into_iter().map(f).collect(), - answered: self.answered, - } - } + /// Map requests from one type into another. + pub fn map_requests(self, f: F) -> Batch + where + F: FnMut(T) -> U, + U: IncompleteRequest, + { + Batch { + outputs: self.outputs, + requests: self.requests.into_iter().map(f).collect(), + answered: self.answered, + } + } } impl Batch { - /// Get the next request as a filled request. Returns `None` when all requests answered. - pub fn next_complete(&self) -> Option { - if self.is_complete() { - None - } else { - Some(self.requests[self.answered].clone() - .complete() - .expect("All outputs checked as invariant of `Batch` object; qed")) - } - } + /// Get the next request as a filled request. Returns `None` when all requests answered. + pub fn next_complete(&self) -> Option { + if self.is_complete() { + None + } else { + Some( + self.requests[self.answered] + .clone() + .complete() + .expect("All outputs checked as invariant of `Batch` object; qed"), + ) + } + } - /// Sweep through all unanswered requests, filling them as necessary. - pub fn fill_unanswered(&mut self) { - let outputs = &mut self.outputs; + /// Sweep through all unanswered requests, filling them as necessary. + pub fn fill_unanswered(&mut self) { + let outputs = &mut self.outputs; - for req in self.requests.iter_mut().skip(self.answered) { - req.fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput)) - } - } + for req in self.requests.iter_mut().skip(self.answered) { + req.fill(|req_idx, out_idx| { + outputs + .get(&(req_idx, out_idx)) + .cloned() + .ok_or(NoSuchOutput) + }) + } + } - /// Supply a response, asserting its correctness. - /// Fill outputs based upon it. - pub fn supply_response_unchecked(&mut self, response: &R) { - if self.is_complete() { return } + /// Supply a response, asserting its correctness. + /// Fill outputs based upon it. + pub fn supply_response_unchecked(&mut self, response: &R) { + if self.is_complete() { + return; + } - let outputs = &mut self.outputs; - let idx = self.answered; - response.fill_outputs(|out_idx, output| { - // we don't need to check output kinds here because all back-references - // are validated in the builder. - // TODO: optimization for only storing outputs we "care about"? - outputs.insert((idx, out_idx), output); - }); + let outputs = &mut self.outputs; + let idx = self.answered; + response.fill_outputs(|out_idx, output| { + // we don't need to check output kinds here because all back-references + // are validated in the builder. + // TODO: optimization for only storing outputs we "care about"? + outputs.insert((idx, out_idx), output); + }); - self.answered += 1; + self.answered += 1; - // fill as much of the next request as we can. - if let Some(ref mut req) = self.requests.get_mut(self.answered) { - req.fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput)) - } - } + // fill as much of the next request as we can. + if let Some(ref mut req) = self.requests.get_mut(self.answered) { + req.fill(|req_idx, out_idx| { + outputs + .get(&(req_idx, out_idx)) + .cloned() + .ok_or(NoSuchOutput) + }) + } + } } impl Batch { - /// Supply a response for the next request. - /// Fails on: wrong request kind, all requests answered already. - pub fn supply_response(&mut self, env: &T::Environment, response: &T::Response) - -> Result> - { - let idx = self.answered; + /// Supply a response for the next request. + /// Fails on: wrong request kind, all requests answered already. + pub fn supply_response( + &mut self, + env: &T::Environment, + response: &T::Response, + ) -> Result> { + let idx = self.answered; - // check validity. - if idx == self.requests.len() { return Err(ResponseError::Unexpected) } - let completed = self.next_complete() - .expect("only fails when all requests have been answered; this just checked against; qed"); + // check validity. + if idx == self.requests.len() { + return Err(ResponseError::Unexpected); + } + let completed = self.next_complete().expect( + "only fails when all requests have been answered; this just checked against; qed", + ); - let extracted = self.requests[idx] - .check_response(&completed, env, response).map_err(ResponseError::Validity)?; + let extracted = self.requests[idx] + .check_response(&completed, env, response) + .map_err(ResponseError::Validity)?; - self.supply_response_unchecked(response); - Ok(extracted) - } + self.supply_response_unchecked(response); + Ok(extracted) + } } impl Batch { - /// For each request, produce a response. - /// The responses vector produced goes up to the point where the responder - /// first returns `None`, an invalid response, or until all requests have been responded to. - pub fn respond_to_all(mut self, responder: F) -> Vec - where F: Fn(super::CompleteRequest) -> Option - { - let mut responses = Vec::new(); + /// For each request, produce a response. + /// The responses vector produced goes up to the point where the responder + /// first returns `None`, an invalid response, or until all requests have been responded to. + pub fn respond_to_all(mut self, responder: F) -> Vec + where + F: Fn(super::CompleteRequest) -> Option, + { + let mut responses = Vec::new(); - while let Some(response) = self.next_complete().and_then(&responder) { - match self.supply_response(&(), &response) { - Ok(()) => responses.push(response), - Err(e) => { - debug!(target: "pip", "produced bad response to request: {:?}", e); - return responses; - } - } - } + while let Some(response) = self.next_complete().and_then(&responder) { + match self.supply_response(&(), &response) { + Ok(()) => responses.push(response), + Err(e) => { + debug!(target: "pip", "produced bad response to request: {:?}", e); + return responses; + } + } + } - responses - } + responses + } } impl Deref for Batch { - type Target = [T]; + type Target = [T]; - fn deref(&self) -> &[T] { - &self.requests[..] - } + fn deref(&self) -> &[T] { + &self.requests[..] + } } impl DerefMut for Batch { - fn deref_mut(&mut self) -> &mut [T] { - &mut self.requests[..] - } + fn deref_mut(&mut self) -> &mut [T] { + &mut self.requests[..] + } } #[cfg(test)] mod tests { - use request::*; - use super::Builder; - use ethereum_types::H256; + use super::Builder; + use ethereum_types::H256; + use request::*; - #[test] - fn all_scalar() { - let mut builder = Builder::default(); - builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { - num: 100.into(), - })).unwrap(); - builder.push(Request::Receipts(IncompleteReceiptsRequest { - hash: H256::default().into(), - })).unwrap(); - } + #[test] + fn all_scalar() { + let mut builder = Builder::default(); + builder + .push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: 100.into(), + })) + .unwrap(); + builder + .push(Request::Receipts(IncompleteReceiptsRequest { + hash: H256::default().into(), + })) + .unwrap(); + } - #[test] - #[should_panic] - fn missing_backref() { - let mut builder = Builder::default(); - builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { - num: Field::BackReference(100, 3), - })).unwrap(); - } + #[test] + #[should_panic] + fn missing_backref() { + let mut builder = Builder::default(); + builder + .push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: Field::BackReference(100, 3), + })) + .unwrap(); + } - #[test] - #[should_panic] - fn wrong_kind() { - let mut builder = Builder::default(); - assert!(builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { - num: 100.into(), - })).is_ok()); - builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { - num: Field::BackReference(0, 0), - })).unwrap(); - } + #[test] + #[should_panic] + fn wrong_kind() { + let mut builder = Builder::default(); + assert!(builder + .push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: 100.into(), + })) + .is_ok()); + builder + .push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: Field::BackReference(0, 0), + })) + .unwrap(); + } - #[test] - fn good_backreference() { - let mut builder = Builder::default(); - builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { - num: 100.into(), // header proof puts hash at output 0. - })).unwrap(); - builder.push(Request::Receipts(IncompleteReceiptsRequest { - hash: Field::BackReference(0, 0), - })).unwrap(); - } + #[test] + fn good_backreference() { + let mut builder = Builder::default(); + builder + .push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: 100.into(), // header proof puts hash at output 0. + })) + .unwrap(); + builder + .push(Request::Receipts(IncompleteReceiptsRequest { + hash: Field::BackReference(0, 0), + })) + .unwrap(); + } - #[test] - fn batch_tx_index_backreference() { - let mut builder = Builder::default(); - builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { - num: 100.into(), // header proof puts hash at output 0. - })).unwrap(); - builder.push(Request::TransactionIndex(IncompleteTransactionIndexRequest { - hash: Field::BackReference(0, 0), - })).unwrap(); + #[test] + fn batch_tx_index_backreference() { + let mut builder = Builder::default(); + builder + .push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: 100.into(), // header proof puts hash at output 0. + })) + .unwrap(); + builder + .push(Request::TransactionIndex( + IncompleteTransactionIndexRequest { + hash: Field::BackReference(0, 0), + }, + )) + .unwrap(); - let mut batch = builder.build(); - batch.requests[1].fill(|_req_idx, _out_idx| Ok(Output::Hash(42.into()))); + let mut batch = builder.build(); + batch.requests[1].fill(|_req_idx, _out_idx| Ok(Output::Hash(42.into()))); - assert!(batch.next_complete().is_some()); - batch.answered += 1; - assert!(batch.next_complete().is_some()); - } + assert!(batch.next_complete().is_some()); + batch.answered += 1; + assert!(batch.next_complete().is_some()); + } - #[test] - fn batch_tx_index_backreference_public_api() { - let mut builder = Builder::default(); - builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { - num: 100.into(), // header proof puts hash at output 0. - })).unwrap(); - builder.push(Request::TransactionIndex(IncompleteTransactionIndexRequest { - hash: Field::BackReference(0, 0), - })).unwrap(); + #[test] + fn batch_tx_index_backreference_public_api() { + let mut builder = Builder::default(); + builder + .push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: 100.into(), // header proof puts hash at output 0. + })) + .unwrap(); + builder + .push(Request::TransactionIndex( + IncompleteTransactionIndexRequest { + hash: Field::BackReference(0, 0), + }, + )) + .unwrap(); - let mut batch = builder.build(); + let mut batch = builder.build(); - assert!(batch.next_complete().is_some()); - let hdr_proof_res = header_proof::Response { - proof: vec![], - hash: 12.into(), - td: 21.into(), - }; - batch.supply_response_unchecked(&hdr_proof_res); + assert!(batch.next_complete().is_some()); + let hdr_proof_res = header_proof::Response { + proof: vec![], + hash: 12.into(), + td: 21.into(), + }; + batch.supply_response_unchecked(&hdr_proof_res); - assert!(batch.next_complete().is_some()); - } + assert!(batch.next_complete().is_some()); + } - #[test] - fn batch_receipts_backreference() { - let mut builder = Builder::default(); - builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { - num: 100.into(), // header proof puts hash at output 0. - })).unwrap(); - builder.push(Request::Receipts(IncompleteReceiptsRequest { - hash: Field::BackReference(0, 0), - })).unwrap(); + #[test] + fn batch_receipts_backreference() { + let mut builder = Builder::default(); + builder + .push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: 100.into(), // header proof puts hash at output 0. + })) + .unwrap(); + builder + .push(Request::Receipts(IncompleteReceiptsRequest { + hash: Field::BackReference(0, 0), + })) + .unwrap(); - let mut batch = builder.build(); - batch.requests[1].fill(|_req_idx, _out_idx| Ok(Output::Hash(42.into()))); + let mut batch = builder.build(); + batch.requests[1].fill(|_req_idx, _out_idx| Ok(Output::Hash(42.into()))); - assert!(batch.next_complete().is_some()); - batch.answered += 1; - assert!(batch.next_complete().is_some()); - } + assert!(batch.next_complete().is_some()); + batch.answered += 1; + assert!(batch.next_complete().is_some()); + } } diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index cacfbcbe5..2aa7de027 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -16,61 +16,53 @@ //! Light protocol request types. -use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp}; use ethereum_types::H256; +use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; mod batch; // re-exports of request types. -pub use self::header::{ - Complete as CompleteHeadersRequest, - Incomplete as IncompleteHeadersRequest, - Response as HeadersResponse -}; -pub use self::header_proof::{ - Complete as CompleteHeaderProofRequest, - Incomplete as IncompleteHeaderProofRequest, - Response as HeaderProofResponse -}; -pub use self::transaction_index::{ - Complete as CompleteTransactionIndexRequest, - Incomplete as IncompleteTransactionIndexRequest, - Response as TransactionIndexResponse -}; -pub use self::block_body::{ - Complete as CompleteBodyRequest, - Incomplete as IncompleteBodyRequest, - Response as BodyResponse -}; -pub use self::block_receipts::{ - Complete as CompleteReceiptsRequest, - Incomplete as IncompleteReceiptsRequest, - Response as ReceiptsResponse -}; -pub use self::account::{ - Complete as CompleteAccountRequest, - Incomplete as IncompleteAccountRequest, - Response as AccountResponse, -}; -pub use self::storage::{ - Complete as CompleteStorageRequest, - Incomplete as IncompleteStorageRequest, - Response as StorageResponse -}; -pub use self::contract_code::{ - Complete as CompleteCodeRequest, - Incomplete as IncompleteCodeRequest, - Response as CodeResponse, -}; -pub use self::execution::{ - Complete as CompleteExecutionRequest, - Incomplete as IncompleteExecutionRequest, - Response as ExecutionResponse, -}; -pub use self::epoch_signal::{ - Complete as CompleteSignalRequest, - Incomplete as IncompleteSignalRequest, - Response as SignalResponse, +pub use self::{ + account::{ + Complete as CompleteAccountRequest, Incomplete as IncompleteAccountRequest, + Response as AccountResponse, + }, + block_body::{ + Complete as CompleteBodyRequest, Incomplete as IncompleteBodyRequest, + Response as BodyResponse, + }, + block_receipts::{ + Complete as CompleteReceiptsRequest, Incomplete as IncompleteReceiptsRequest, + Response as ReceiptsResponse, + }, + contract_code::{ + Complete as CompleteCodeRequest, Incomplete as IncompleteCodeRequest, + Response as CodeResponse, + }, + epoch_signal::{ + Complete as CompleteSignalRequest, Incomplete as IncompleteSignalRequest, + Response as SignalResponse, + }, + execution::{ + Complete as CompleteExecutionRequest, Incomplete as IncompleteExecutionRequest, + Response as ExecutionResponse, + }, + header::{ + Complete as CompleteHeadersRequest, Incomplete as IncompleteHeadersRequest, + Response as HeadersResponse, + }, + header_proof::{ + Complete as CompleteHeaderProofRequest, Incomplete as IncompleteHeaderProofRequest, + Response as HeaderProofResponse, + }, + storage::{ + Complete as CompleteStorageRequest, Incomplete as IncompleteStorageRequest, + Response as StorageResponse, + }, + transaction_index::{ + Complete as CompleteTransactionIndexRequest, + Incomplete as IncompleteTransactionIndexRequest, Response as TransactionIndexResponse, + }, }; pub use self::batch::{Batch, Builder}; @@ -86,155 +78,162 @@ pub struct WrongKind; /// Error on processing a response. #[derive(Debug, Clone, PartialEq, Eq)] pub enum ResponseError { - /// Error in validity. - Validity(T), - /// No responses expected. - Unexpected, + /// Error in validity. + Validity(T), + /// No responses expected. + Unexpected, } /// An input to a request. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Field { - /// A pre-specified input. - Scalar(T), - /// An input which can be resolved later on. - /// (Request index, output index) - BackReference(usize, usize), + /// A pre-specified input. + Scalar(T), + /// An input which can be resolved later on. + /// (Request index, output index) + BackReference(usize, usize), } impl Field { - /// Helper for creating a new back-reference field. - pub fn back_ref(idx: usize, req: usize) -> Self { - Field::BackReference(idx, req) - } + /// Helper for creating a new back-reference field. + pub fn back_ref(idx: usize, req: usize) -> Self { + Field::BackReference(idx, req) + } - /// map a scalar into some other item. - pub fn map(self, f: F) -> Field where F: FnOnce(T) -> U { - match self { - Field::Scalar(x) => Field::Scalar(f(x)), - Field::BackReference(req, idx) => Field::BackReference(req, idx), - } - } + /// map a scalar into some other item. + pub fn map(self, f: F) -> Field + where + F: FnOnce(T) -> U, + { + match self { + Field::Scalar(x) => Field::Scalar(f(x)), + Field::BackReference(req, idx) => Field::BackReference(req, idx), + } + } - /// Attempt to get a reference to the inner scalar. - pub fn as_ref(&self) -> Option<&T> { - match *self { - Field::Scalar(ref x) => Some(x), - Field::BackReference(_, _) => None, - } - } + /// Attempt to get a reference to the inner scalar. + pub fn as_ref(&self) -> Option<&T> { + match *self { + Field::Scalar(ref x) => Some(x), + Field::BackReference(_, _) => None, + } + } - // attempt conversion into scalar value. - fn into_scalar(self) -> Result { - match self { - Field::Scalar(val) => Ok(val), - _ => Err(NoSuchOutput), - } - } + // attempt conversion into scalar value. + fn into_scalar(self) -> Result { + match self { + Field::Scalar(val) => Ok(val), + _ => Err(NoSuchOutput), + } + } - fn adjust_req(&mut self, mut mapping: F) where F: FnMut(usize) -> usize { - if let Field::BackReference(ref mut req_idx, _) = *self { - *req_idx = mapping(*req_idx) - } - } + fn adjust_req(&mut self, mut mapping: F) + where + F: FnMut(usize) -> usize, + { + if let Field::BackReference(ref mut req_idx, _) = *self { + *req_idx = mapping(*req_idx) + } + } } impl From for Field { - fn from(val: T) -> Self { - Field::Scalar(val) - } + fn from(val: T) -> Self { + Field::Scalar(val) + } } impl Decodable for Field { - fn decode(rlp: &Rlp) -> Result { - match rlp.val_at::(0)? { - 0 => Ok(Field::Scalar(rlp.val_at::(1)?)), - 1 => Ok({ - let inner_rlp = rlp.at(1)?; - Field::BackReference(inner_rlp.val_at(0)?, inner_rlp.val_at(1)?) - }), - _ => Err(DecoderError::Custom("Unknown discriminant for PIP field.")), - } - } + fn decode(rlp: &Rlp) -> Result { + match rlp.val_at::(0)? { + 0 => Ok(Field::Scalar(rlp.val_at::(1)?)), + 1 => Ok({ + let inner_rlp = rlp.at(1)?; + Field::BackReference(inner_rlp.val_at(0)?, inner_rlp.val_at(1)?) + }), + _ => Err(DecoderError::Custom("Unknown discriminant for PIP field.")), + } + } } impl Encodable for Field { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(2); - match *self { - Field::Scalar(ref data) => { - s.append(&0u8).append(data); - } - Field::BackReference(ref req, ref idx) => { - s.append(&1u8).begin_list(2).append(req).append(idx); - } - } - } + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2); + match *self { + Field::Scalar(ref data) => { + s.append(&0u8).append(data); + } + Field::BackReference(ref req, ref idx) => { + s.append(&1u8).begin_list(2).append(req).append(idx); + } + } + } } /// Request outputs which can be reused as inputs. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Output { - /// A 32-byte hash output. - Hash(H256), - /// An unsigned-integer output. - Number(u64), + /// A 32-byte hash output. + Hash(H256), + /// An unsigned-integer output. + Number(u64), } impl Output { - /// Get the output kind. - pub fn kind(&self) -> OutputKind { - match *self { - Output::Hash(_) => OutputKind::Hash, - Output::Number(_) => OutputKind::Number, - } - } + /// Get the output kind. + pub fn kind(&self) -> OutputKind { + match *self { + Output::Hash(_) => OutputKind::Hash, + Output::Number(_) => OutputKind::Number, + } + } } /// Response output kinds which can be used as back-references. #[derive(Debug, Clone, PartialEq, Eq)] pub enum OutputKind { - /// A 32-byte hash output. - Hash, - /// An unsigned-integer output. - Number, + /// A 32-byte hash output. + Hash, + /// An unsigned-integer output. + Number, } /// Either a hash or a number. #[derive(Debug, Clone, PartialEq, Eq)] pub enum HashOrNumber { - /// Block hash variant. - Hash(H256), - /// Block number variant. - Number(u64), + /// Block hash variant. + Hash(H256), + /// Block number variant. + Number(u64), } impl From for HashOrNumber { - fn from(hash: H256) -> Self { - HashOrNumber::Hash(hash) - } + fn from(hash: H256) -> Self { + HashOrNumber::Hash(hash) + } } impl From for HashOrNumber { - fn from(num: u64) -> Self { - HashOrNumber::Number(num) - } + fn from(num: u64) -> Self { + HashOrNumber::Number(num) + } } impl Decodable for HashOrNumber { - fn decode(rlp: &Rlp) -> Result { - rlp.as_val::().map(HashOrNumber::Hash) - .or_else(|_| rlp.as_val().map(HashOrNumber::Number)) - } + fn decode(rlp: &Rlp) -> Result { + rlp.as_val::() + .map(HashOrNumber::Hash) + .or_else(|_| rlp.as_val().map(HashOrNumber::Number)) + } } impl Encodable for HashOrNumber { - fn rlp_append(&self, s: &mut RlpStream) { - match *self { - HashOrNumber::Hash(ref hash) => s.append(hash), - HashOrNumber::Number(ref num) => s.append(num), - }; - } + fn rlp_append(&self, s: &mut RlpStream) { + match *self { + HashOrNumber::Hash(ref hash) => s.append(hash), + HashOrNumber::Number(ref num) => s.append(num), + }; + } } /// Type alias for "network requests". @@ -245,222 +244,237 @@ pub type NetworkRequests = Batch; /// of prior requests. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Request { - /// A request for block headers. - Headers(IncompleteHeadersRequest), - /// A request for a header proof (from a CHT) - HeaderProof(IncompleteHeaderProofRequest), - /// A request for a transaction index by hash. - TransactionIndex(IncompleteTransactionIndexRequest), - /// A request for a block's receipts. - Receipts(IncompleteReceiptsRequest), - /// A request for a block body. - Body(IncompleteBodyRequest), - /// A request for a merkle proof of an account. - Account(IncompleteAccountRequest), - /// A request for a merkle proof of contract storage. - Storage(IncompleteStorageRequest), - /// A request for contract code. - Code(IncompleteCodeRequest), - /// A request for proof of execution, - Execution(IncompleteExecutionRequest), - /// A request for an epoch signal. - Signal(IncompleteSignalRequest), + /// A request for block headers. + Headers(IncompleteHeadersRequest), + /// A request for a header proof (from a CHT) + HeaderProof(IncompleteHeaderProofRequest), + /// A request for a transaction index by hash. + TransactionIndex(IncompleteTransactionIndexRequest), + /// A request for a block's receipts. + Receipts(IncompleteReceiptsRequest), + /// A request for a block body. + Body(IncompleteBodyRequest), + /// A request for a merkle proof of an account. + Account(IncompleteAccountRequest), + /// A request for a merkle proof of contract storage. + Storage(IncompleteStorageRequest), + /// A request for contract code. + Code(IncompleteCodeRequest), + /// A request for proof of execution, + Execution(IncompleteExecutionRequest), + /// A request for an epoch signal. + Signal(IncompleteSignalRequest), } /// All request types, in an answerable state. #[derive(Debug, Clone, PartialEq, Eq)] pub enum CompleteRequest { - /// A request for block headers. - Headers(CompleteHeadersRequest), - /// A request for a header proof (from a CHT) - HeaderProof(CompleteHeaderProofRequest), - /// A request for a transaction index by hash. - TransactionIndex(CompleteTransactionIndexRequest), - /// A request for a block's receipts. - Receipts(CompleteReceiptsRequest), - /// A request for a block body. - Body(CompleteBodyRequest), - /// A request for a merkle proof of an account. - Account(CompleteAccountRequest), - /// A request for a merkle proof of contract storage. - Storage(CompleteStorageRequest), - /// A request for contract code. - Code(CompleteCodeRequest), - /// A request for proof of execution, - Execution(CompleteExecutionRequest), - /// A request for an epoch signal. - Signal(CompleteSignalRequest), + /// A request for block headers. + Headers(CompleteHeadersRequest), + /// A request for a header proof (from a CHT) + HeaderProof(CompleteHeaderProofRequest), + /// A request for a transaction index by hash. + TransactionIndex(CompleteTransactionIndexRequest), + /// A request for a block's receipts. + Receipts(CompleteReceiptsRequest), + /// A request for a block body. + Body(CompleteBodyRequest), + /// A request for a merkle proof of an account. + Account(CompleteAccountRequest), + /// A request for a merkle proof of contract storage. + Storage(CompleteStorageRequest), + /// A request for contract code. + Code(CompleteCodeRequest), + /// A request for proof of execution, + Execution(CompleteExecutionRequest), + /// A request for an epoch signal. + Signal(CompleteSignalRequest), } impl CompleteRequest { - /// Inspect the kind of this response. - pub fn kind(&self) -> Kind { - match *self { - CompleteRequest::Headers(_) => Kind::Headers, - CompleteRequest::HeaderProof(_) => Kind::HeaderProof, - CompleteRequest::TransactionIndex(_) => Kind::TransactionIndex, - CompleteRequest::Receipts(_) => Kind::Receipts, - CompleteRequest::Body(_) => Kind::Body, - CompleteRequest::Account(_) => Kind::Account, - CompleteRequest::Storage(_) => Kind::Storage, - CompleteRequest::Code(_) => Kind::Code, - CompleteRequest::Execution(_) => Kind::Execution, - CompleteRequest::Signal(_) => Kind::Signal, - } - } + /// Inspect the kind of this response. + pub fn kind(&self) -> Kind { + match *self { + CompleteRequest::Headers(_) => Kind::Headers, + CompleteRequest::HeaderProof(_) => Kind::HeaderProof, + CompleteRequest::TransactionIndex(_) => Kind::TransactionIndex, + CompleteRequest::Receipts(_) => Kind::Receipts, + CompleteRequest::Body(_) => Kind::Body, + CompleteRequest::Account(_) => Kind::Account, + CompleteRequest::Storage(_) => Kind::Storage, + CompleteRequest::Code(_) => Kind::Code, + CompleteRequest::Execution(_) => Kind::Execution, + CompleteRequest::Signal(_) => Kind::Signal, + } + } } impl Request { - /// Get the request kind. - pub fn kind(&self) -> Kind { - match *self { - Request::Headers(_) => Kind::Headers, - Request::HeaderProof(_) => Kind::HeaderProof, - Request::TransactionIndex(_) => Kind::TransactionIndex, - Request::Receipts(_) => Kind::Receipts, - Request::Body(_) => Kind::Body, - Request::Account(_) => Kind::Account, - Request::Storage(_) => Kind::Storage, - Request::Code(_) => Kind::Code, - Request::Execution(_) => Kind::Execution, - Request::Signal(_) => Kind::Signal, - } - } + /// Get the request kind. + pub fn kind(&self) -> Kind { + match *self { + Request::Headers(_) => Kind::Headers, + Request::HeaderProof(_) => Kind::HeaderProof, + Request::TransactionIndex(_) => Kind::TransactionIndex, + Request::Receipts(_) => Kind::Receipts, + Request::Body(_) => Kind::Body, + Request::Account(_) => Kind::Account, + Request::Storage(_) => Kind::Storage, + Request::Code(_) => Kind::Code, + Request::Execution(_) => Kind::Execution, + Request::Signal(_) => Kind::Signal, + } + } } impl Decodable for Request { - fn decode(rlp: &Rlp) -> Result { - match rlp.val_at::(0)? { - Kind::Headers => Ok(Request::Headers(rlp.val_at(1)?)), - Kind::HeaderProof => Ok(Request::HeaderProof(rlp.val_at(1)?)), - Kind::TransactionIndex => Ok(Request::TransactionIndex(rlp.val_at(1)?)), - Kind::Receipts => Ok(Request::Receipts(rlp.val_at(1)?)), - Kind::Body => Ok(Request::Body(rlp.val_at(1)?)), - Kind::Account => Ok(Request::Account(rlp.val_at(1)?)), - Kind::Storage => Ok(Request::Storage(rlp.val_at(1)?)), - Kind::Code => Ok(Request::Code(rlp.val_at(1)?)), - Kind::Execution => Ok(Request::Execution(rlp.val_at(1)?)), - Kind::Signal => Ok(Request::Signal(rlp.val_at(1)?)), - } - } + fn decode(rlp: &Rlp) -> Result { + match rlp.val_at::(0)? { + Kind::Headers => Ok(Request::Headers(rlp.val_at(1)?)), + Kind::HeaderProof => Ok(Request::HeaderProof(rlp.val_at(1)?)), + Kind::TransactionIndex => Ok(Request::TransactionIndex(rlp.val_at(1)?)), + Kind::Receipts => Ok(Request::Receipts(rlp.val_at(1)?)), + Kind::Body => Ok(Request::Body(rlp.val_at(1)?)), + Kind::Account => Ok(Request::Account(rlp.val_at(1)?)), + Kind::Storage => Ok(Request::Storage(rlp.val_at(1)?)), + Kind::Code => Ok(Request::Code(rlp.val_at(1)?)), + Kind::Execution => Ok(Request::Execution(rlp.val_at(1)?)), + Kind::Signal => Ok(Request::Signal(rlp.val_at(1)?)), + } + } } impl Encodable for Request { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(2); + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2); - // hack around https://github.com/paritytech/parity-ethereum/issues/4356 - Encodable::rlp_append(&self.kind(), s); + // hack around https://github.com/paritytech/parity-ethereum/issues/4356 + Encodable::rlp_append(&self.kind(), s); - match *self { - Request::Headers(ref req) => s.append(req), - Request::HeaderProof(ref req) => s.append(req), - Request::TransactionIndex(ref req) => s.append(req), - Request::Receipts(ref req) => s.append(req), - Request::Body(ref req) => s.append(req), - Request::Account(ref req) => s.append(req), - Request::Storage(ref req) => s.append(req), - Request::Code(ref req) => s.append(req), - Request::Execution(ref req) => s.append(req), - Request::Signal(ref req) => s.append(req), - }; - } + match *self { + Request::Headers(ref req) => s.append(req), + Request::HeaderProof(ref req) => s.append(req), + Request::TransactionIndex(ref req) => s.append(req), + Request::Receipts(ref req) => s.append(req), + Request::Body(ref req) => s.append(req), + Request::Account(ref req) => s.append(req), + Request::Storage(ref req) => s.append(req), + Request::Code(ref req) => s.append(req), + Request::Execution(ref req) => s.append(req), + Request::Signal(ref req) => s.append(req), + }; + } } impl IncompleteRequest for Request { - type Complete = CompleteRequest; - type Response = Response; + type Complete = CompleteRequest; + type Response = Response; - fn check_outputs(&self, f: F) -> Result<(), NoSuchOutput> - where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> - { - match *self { - Request::Headers(ref req) => req.check_outputs(f), - Request::HeaderProof(ref req) => req.check_outputs(f), - Request::TransactionIndex(ref req) => req.check_outputs(f), - Request::Receipts(ref req) => req.check_outputs(f), - Request::Body(ref req) => req.check_outputs(f), - Request::Account(ref req) => req.check_outputs(f), - Request::Storage(ref req) => req.check_outputs(f), - Request::Code(ref req) => req.check_outputs(f), - Request::Execution(ref req) => req.check_outputs(f), - Request::Signal(ref req) => req.check_outputs(f), - } - } + fn check_outputs(&self, f: F) -> Result<(), NoSuchOutput> + where + F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>, + { + match *self { + Request::Headers(ref req) => req.check_outputs(f), + Request::HeaderProof(ref req) => req.check_outputs(f), + Request::TransactionIndex(ref req) => req.check_outputs(f), + Request::Receipts(ref req) => req.check_outputs(f), + Request::Body(ref req) => req.check_outputs(f), + Request::Account(ref req) => req.check_outputs(f), + Request::Storage(ref req) => req.check_outputs(f), + Request::Code(ref req) => req.check_outputs(f), + Request::Execution(ref req) => req.check_outputs(f), + Request::Signal(ref req) => req.check_outputs(f), + } + } - fn note_outputs(&self, f: F) where F: FnMut(usize, OutputKind) { - match *self { - Request::Headers(ref req) => req.note_outputs(f), - Request::HeaderProof(ref req) => req.note_outputs(f), - Request::TransactionIndex(ref req) => req.note_outputs(f), - Request::Receipts(ref req) => req.note_outputs(f), - Request::Body(ref req) => req.note_outputs(f), - Request::Account(ref req) => req.note_outputs(f), - Request::Storage(ref req) => req.note_outputs(f), - Request::Code(ref req) => req.note_outputs(f), - Request::Execution(ref req) => req.note_outputs(f), - Request::Signal(ref req) => req.note_outputs(f), - } - } + fn note_outputs(&self, f: F) + where + F: FnMut(usize, OutputKind), + { + match *self { + Request::Headers(ref req) => req.note_outputs(f), + Request::HeaderProof(ref req) => req.note_outputs(f), + Request::TransactionIndex(ref req) => req.note_outputs(f), + Request::Receipts(ref req) => req.note_outputs(f), + Request::Body(ref req) => req.note_outputs(f), + Request::Account(ref req) => req.note_outputs(f), + Request::Storage(ref req) => req.note_outputs(f), + Request::Code(ref req) => req.note_outputs(f), + Request::Execution(ref req) => req.note_outputs(f), + Request::Signal(ref req) => req.note_outputs(f), + } + } - fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { - match *self { - Request::Headers(ref mut req) => req.fill(oracle), - Request::HeaderProof(ref mut req) => req.fill(oracle), - Request::TransactionIndex(ref mut req) => req.fill(oracle), - Request::Receipts(ref mut req) => req.fill(oracle), - Request::Body(ref mut req) => req.fill(oracle), - Request::Account(ref mut req) => req.fill(oracle), - Request::Storage(ref mut req) => req.fill(oracle), - Request::Code(ref mut req) => req.fill(oracle), - Request::Execution(ref mut req) => req.fill(oracle), - Request::Signal(ref mut req) => req.fill(oracle), - } - } + fn fill(&mut self, oracle: F) + where + F: Fn(usize, usize) -> Result, + { + match *self { + Request::Headers(ref mut req) => req.fill(oracle), + Request::HeaderProof(ref mut req) => req.fill(oracle), + Request::TransactionIndex(ref mut req) => req.fill(oracle), + Request::Receipts(ref mut req) => req.fill(oracle), + Request::Body(ref mut req) => req.fill(oracle), + Request::Account(ref mut req) => req.fill(oracle), + Request::Storage(ref mut req) => req.fill(oracle), + Request::Code(ref mut req) => req.fill(oracle), + Request::Execution(ref mut req) => req.fill(oracle), + Request::Signal(ref mut req) => req.fill(oracle), + } + } - fn complete(self) -> Result { - match self { - Request::Headers(req) => req.complete().map(CompleteRequest::Headers), - Request::HeaderProof(req) => req.complete().map(CompleteRequest::HeaderProof), - Request::TransactionIndex(req) => req.complete().map(CompleteRequest::TransactionIndex), - Request::Receipts(req) => req.complete().map(CompleteRequest::Receipts), - Request::Body(req) => req.complete().map(CompleteRequest::Body), - Request::Account(req) => req.complete().map(CompleteRequest::Account), - Request::Storage(req) => req.complete().map(CompleteRequest::Storage), - Request::Code(req) => req.complete().map(CompleteRequest::Code), - Request::Execution(req) => req.complete().map(CompleteRequest::Execution), - Request::Signal(req) => req.complete().map(CompleteRequest::Signal), - } - } + fn complete(self) -> Result { + match self { + Request::Headers(req) => req.complete().map(CompleteRequest::Headers), + Request::HeaderProof(req) => req.complete().map(CompleteRequest::HeaderProof), + Request::TransactionIndex(req) => req.complete().map(CompleteRequest::TransactionIndex), + Request::Receipts(req) => req.complete().map(CompleteRequest::Receipts), + Request::Body(req) => req.complete().map(CompleteRequest::Body), + Request::Account(req) => req.complete().map(CompleteRequest::Account), + Request::Storage(req) => req.complete().map(CompleteRequest::Storage), + Request::Code(req) => req.complete().map(CompleteRequest::Code), + Request::Execution(req) => req.complete().map(CompleteRequest::Execution), + Request::Signal(req) => req.complete().map(CompleteRequest::Signal), + } + } - fn adjust_refs(&mut self, mapping: F) where F: FnMut(usize) -> usize { - match *self { - Request::Headers(ref mut req) => req.adjust_refs(mapping), - Request::HeaderProof(ref mut req) => req.adjust_refs(mapping), - Request::TransactionIndex(ref mut req) => req.adjust_refs(mapping), - Request::Receipts(ref mut req) => req.adjust_refs(mapping), - Request::Body(ref mut req) => req.adjust_refs(mapping), - Request::Account(ref mut req) => req.adjust_refs(mapping), - Request::Storage(ref mut req) => req.adjust_refs(mapping), - Request::Code(ref mut req) => req.adjust_refs(mapping), - Request::Execution(ref mut req) => req.adjust_refs(mapping), - Request::Signal(ref mut req) => req.adjust_refs(mapping), - } - } + fn adjust_refs(&mut self, mapping: F) + where + F: FnMut(usize) -> usize, + { + match *self { + Request::Headers(ref mut req) => req.adjust_refs(mapping), + Request::HeaderProof(ref mut req) => req.adjust_refs(mapping), + Request::TransactionIndex(ref mut req) => req.adjust_refs(mapping), + Request::Receipts(ref mut req) => req.adjust_refs(mapping), + Request::Body(ref mut req) => req.adjust_refs(mapping), + Request::Account(ref mut req) => req.adjust_refs(mapping), + Request::Storage(ref mut req) => req.adjust_refs(mapping), + Request::Code(ref mut req) => req.adjust_refs(mapping), + Request::Execution(ref mut req) => req.adjust_refs(mapping), + Request::Signal(ref mut req) => req.adjust_refs(mapping), + } + } } impl CheckedRequest for Request { - type Extract = (); - type Error = WrongKind; - type Environment = (); + type Extract = (); + type Error = WrongKind; + type Environment = (); - fn check_response(&self, _: &Self::Complete, _: &(), response: &Response) -> Result<(), WrongKind> { - if self.kind() == response.kind() { - Ok(()) - } else { - Err(WrongKind) - } - } + fn check_response( + &self, + _: &Self::Complete, + _: &(), + response: &Response, + ) -> Result<(), WrongKind> { + if self.kind() == response.kind() { + Ok(()) + } else { + Err(WrongKind) + } + } } /// Kinds of requests. @@ -468,1471 +482,1654 @@ impl CheckedRequest for Request { #[repr(u8)] #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, Serialize, Deserialize)] pub enum Kind { - /// A request for headers. - Headers = 0, - /// A request for a header proof. - HeaderProof = 1, - /// A request for a transaction index. - TransactionIndex = 2, - /// A request for block receipts. - Receipts = 3, - /// A request for a block body. - Body = 4, - /// A request for an account + merkle proof. - Account = 5, - /// A request for contract storage + merkle proof - Storage = 6, - /// A request for contract. - Code = 7, - /// A request for transaction execution + state proof. - Execution = 8, - /// A request for epoch transition signal. - Signal = 9, + /// A request for headers. + Headers = 0, + /// A request for a header proof. + HeaderProof = 1, + /// A request for a transaction index. + TransactionIndex = 2, + /// A request for block receipts. + Receipts = 3, + /// A request for a block body. + Body = 4, + /// A request for an account + merkle proof. + Account = 5, + /// A request for contract storage + merkle proof + Storage = 6, + /// A request for contract. + Code = 7, + /// A request for transaction execution + state proof. + Execution = 8, + /// A request for epoch transition signal. + Signal = 9, } impl Decodable for Kind { - fn decode(rlp: &Rlp) -> Result { - match rlp.as_val::()? { - 0 => Ok(Kind::Headers), - 1 => Ok(Kind::HeaderProof), - 2 => Ok(Kind::TransactionIndex), - 3 => Ok(Kind::Receipts), - 4 => Ok(Kind::Body), - 5 => Ok(Kind::Account), - 6 => Ok(Kind::Storage), - 7 => Ok(Kind::Code), - 8 => Ok(Kind::Execution), - 9 => Ok(Kind::Signal), - _ => Err(DecoderError::Custom("Unknown PIP request ID.")), - } - } + fn decode(rlp: &Rlp) -> Result { + match rlp.as_val::()? { + 0 => Ok(Kind::Headers), + 1 => Ok(Kind::HeaderProof), + 2 => Ok(Kind::TransactionIndex), + 3 => Ok(Kind::Receipts), + 4 => Ok(Kind::Body), + 5 => Ok(Kind::Account), + 6 => Ok(Kind::Storage), + 7 => Ok(Kind::Code), + 8 => Ok(Kind::Execution), + 9 => Ok(Kind::Signal), + _ => Err(DecoderError::Custom("Unknown PIP request ID.")), + } + } } impl Encodable for Kind { - fn rlp_append(&self, s: &mut RlpStream) { - s.append(&(*self as u8)); - } + fn rlp_append(&self, s: &mut RlpStream) { + s.append(&(*self as u8)); + } } /// All response types. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Response { - /// A response for block headers. - Headers(HeadersResponse), - /// A response for a header proof (from a CHT) - HeaderProof(HeaderProofResponse), - /// A response for a transaction index. - TransactionIndex(TransactionIndexResponse), - /// A response for a block's receipts. - Receipts(ReceiptsResponse), - /// A response for a block body. - Body(BodyResponse), - /// A response for a merkle proof of an account. - Account(AccountResponse), - /// A response for a merkle proof of contract storage. - Storage(StorageResponse), - /// A response for contract code. - Code(CodeResponse), - /// A response for proof of execution, - Execution(ExecutionResponse), - /// A response for epoch change signal. - Signal(SignalResponse), + /// A response for block headers. + Headers(HeadersResponse), + /// A response for a header proof (from a CHT) + HeaderProof(HeaderProofResponse), + /// A response for a transaction index. + TransactionIndex(TransactionIndexResponse), + /// A response for a block's receipts. + Receipts(ReceiptsResponse), + /// A response for a block body. + Body(BodyResponse), + /// A response for a merkle proof of an account. + Account(AccountResponse), + /// A response for a merkle proof of contract storage. + Storage(StorageResponse), + /// A response for contract code. + Code(CodeResponse), + /// A response for proof of execution, + Execution(ExecutionResponse), + /// A response for epoch change signal. + Signal(SignalResponse), } impl ResponseLike for Response { - /// Fill reusable outputs by writing them into the function. - fn fill_outputs(&self, f: F) where F: FnMut(usize, Output) { - match *self { - Response::Headers(ref res) => res.fill_outputs(f), - Response::HeaderProof(ref res) => res.fill_outputs(f), - Response::TransactionIndex(ref res) => res.fill_outputs(f), - Response::Receipts(ref res) => res.fill_outputs(f), - Response::Body(ref res) => res.fill_outputs(f), - Response::Account(ref res) => res.fill_outputs(f), - Response::Storage(ref res) => res.fill_outputs(f), - Response::Code(ref res) => res.fill_outputs(f), - Response::Execution(ref res) => res.fill_outputs(f), - Response::Signal(ref res) => res.fill_outputs(f), - } - } + /// Fill reusable outputs by writing them into the function. + fn fill_outputs(&self, f: F) + where + F: FnMut(usize, Output), + { + match *self { + Response::Headers(ref res) => res.fill_outputs(f), + Response::HeaderProof(ref res) => res.fill_outputs(f), + Response::TransactionIndex(ref res) => res.fill_outputs(f), + Response::Receipts(ref res) => res.fill_outputs(f), + Response::Body(ref res) => res.fill_outputs(f), + Response::Account(ref res) => res.fill_outputs(f), + Response::Storage(ref res) => res.fill_outputs(f), + Response::Code(ref res) => res.fill_outputs(f), + Response::Execution(ref res) => res.fill_outputs(f), + Response::Signal(ref res) => res.fill_outputs(f), + } + } } impl Response { - /// Inspect the kind of this response. - pub fn kind(&self) -> Kind { - match *self { - Response::Headers(_) => Kind::Headers, - Response::HeaderProof(_) => Kind::HeaderProof, - Response::TransactionIndex(_) => Kind::TransactionIndex, - Response::Receipts(_) => Kind::Receipts, - Response::Body(_) => Kind::Body, - Response::Account(_) => Kind::Account, - Response::Storage(_) => Kind::Storage, - Response::Code(_) => Kind::Code, - Response::Execution(_) => Kind::Execution, - Response::Signal(_) => Kind::Signal, - } - } + /// Inspect the kind of this response. + pub fn kind(&self) -> Kind { + match *self { + Response::Headers(_) => Kind::Headers, + Response::HeaderProof(_) => Kind::HeaderProof, + Response::TransactionIndex(_) => Kind::TransactionIndex, + Response::Receipts(_) => Kind::Receipts, + Response::Body(_) => Kind::Body, + Response::Account(_) => Kind::Account, + Response::Storage(_) => Kind::Storage, + Response::Code(_) => Kind::Code, + Response::Execution(_) => Kind::Execution, + Response::Signal(_) => Kind::Signal, + } + } } impl Decodable for Response { - fn decode(rlp: &Rlp) -> Result { - match rlp.val_at::(0)? { - Kind::Headers => Ok(Response::Headers(rlp.val_at(1)?)), - Kind::HeaderProof => Ok(Response::HeaderProof(rlp.val_at(1)?)), - Kind::TransactionIndex => Ok(Response::TransactionIndex(rlp.val_at(1)?)), - Kind::Receipts => Ok(Response::Receipts(rlp.val_at(1)?)), - Kind::Body => Ok(Response::Body(rlp.val_at(1)?)), - Kind::Account => Ok(Response::Account(rlp.val_at(1)?)), - Kind::Storage => Ok(Response::Storage(rlp.val_at(1)?)), - Kind::Code => Ok(Response::Code(rlp.val_at(1)?)), - Kind::Execution => Ok(Response::Execution(rlp.val_at(1)?)), - Kind::Signal => Ok(Response::Signal(rlp.val_at(1)?)), - } - } + fn decode(rlp: &Rlp) -> Result { + match rlp.val_at::(0)? { + Kind::Headers => Ok(Response::Headers(rlp.val_at(1)?)), + Kind::HeaderProof => Ok(Response::HeaderProof(rlp.val_at(1)?)), + Kind::TransactionIndex => Ok(Response::TransactionIndex(rlp.val_at(1)?)), + Kind::Receipts => Ok(Response::Receipts(rlp.val_at(1)?)), + Kind::Body => Ok(Response::Body(rlp.val_at(1)?)), + Kind::Account => Ok(Response::Account(rlp.val_at(1)?)), + Kind::Storage => Ok(Response::Storage(rlp.val_at(1)?)), + Kind::Code => Ok(Response::Code(rlp.val_at(1)?)), + Kind::Execution => Ok(Response::Execution(rlp.val_at(1)?)), + Kind::Signal => Ok(Response::Signal(rlp.val_at(1)?)), + } + } } impl Encodable for Response { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(2); + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2); - // hack around https://github.com/paritytech/parity-ethereum/issues/4356 - Encodable::rlp_append(&self.kind(), s); + // hack around https://github.com/paritytech/parity-ethereum/issues/4356 + Encodable::rlp_append(&self.kind(), s); - match *self { - Response::Headers(ref res) => s.append(res), - Response::HeaderProof(ref res) => s.append(res), - Response::TransactionIndex(ref res) => s.append(res), - Response::Receipts(ref res) => s.append(res), - Response::Body(ref res) => s.append(res), - Response::Account(ref res) => s.append(res), - Response::Storage(ref res) => s.append(res), - Response::Code(ref res) => s.append(res), - Response::Execution(ref res) => s.append(res), - Response::Signal(ref res) => s.append(res), - }; - } + match *self { + Response::Headers(ref res) => s.append(res), + Response::HeaderProof(ref res) => s.append(res), + Response::TransactionIndex(ref res) => s.append(res), + Response::Receipts(ref res) => s.append(res), + Response::Body(ref res) => s.append(res), + Response::Account(ref res) => s.append(res), + Response::Storage(ref res) => s.append(res), + Response::Code(ref res) => s.append(res), + Response::Execution(ref res) => s.append(res), + Response::Signal(ref res) => s.append(res), + }; + } } /// A potentially incomplete request. pub trait IncompleteRequest: Sized { - /// The complete variant of this request. - type Complete; - /// The response to this request. - type Response: ResponseLike; + /// The complete variant of this request. + type Complete; + /// The response to this request. + type Response: ResponseLike; - /// Check prior outputs against the needed inputs. - /// - /// This is called to ensure consistency of this request with - /// others in the same packet. - fn check_outputs(&self, f: F) -> Result<(), NoSuchOutput> - where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>; + /// Check prior outputs against the needed inputs. + /// + /// This is called to ensure consistency of this request with + /// others in the same packet. + fn check_outputs(&self, f: F) -> Result<(), NoSuchOutput> + where + F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>; - /// Note that this request will produce the following outputs. - fn note_outputs(&self, f: F) where F: FnMut(usize, OutputKind); + /// Note that this request will produce the following outputs. + fn note_outputs(&self, f: F) + where + F: FnMut(usize, OutputKind); - /// Fill fields of the request. - /// - /// This function is provided an "output oracle" which allows fetching of - /// prior request outputs. - /// Only outputs previously checked with `check_outputs` may be available. - fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result; + /// Fill fields of the request. + /// + /// This function is provided an "output oracle" which allows fetching of + /// prior request outputs. + /// Only outputs previously checked with `check_outputs` may be available. + fn fill(&mut self, oracle: F) + where + F: Fn(usize, usize) -> Result; - /// Attempt to convert this request into its complete variant. - /// Will succeed if all fields have been filled, will fail otherwise. - fn complete(self) -> Result; + /// Attempt to convert this request into its complete variant. + /// Will succeed if all fields have been filled, will fail otherwise. + fn complete(self) -> Result; - /// Adjust back-reference request indices. - fn adjust_refs(&mut self, mapping: F) where F: FnMut(usize) -> usize; + /// Adjust back-reference request indices. + fn adjust_refs(&mut self, mapping: F) + where + F: FnMut(usize) -> usize; } /// A request which can be checked against its response for more validity. pub trait CheckedRequest: IncompleteRequest { - /// Data extracted during the check. - type Extract; - /// Error encountered during the check. - type Error; - /// Environment passed to response check. - type Environment; + /// Data extracted during the check. + type Extract; + /// Error encountered during the check. + type Error; + /// Environment passed to response check. + type Environment; - /// Check whether the response matches (beyond the type). - fn check_response(&self, &Self::Complete, &Self::Environment, &Self::Response) -> Result; + /// Check whether the response matches (beyond the type). + fn check_response( + &self, + &Self::Complete, + &Self::Environment, + &Self::Response, + ) -> Result; } /// A response-like object. /// /// These contain re-usable outputs. pub trait ResponseLike { - /// Write all re-usable outputs into the provided function. - fn fill_outputs(&self, output_store: F) where F: FnMut(usize, Output); + /// Write all re-usable outputs into the provided function. + fn fill_outputs(&self, output_store: F) + where + F: FnMut(usize, Output); } /// Header request. pub mod header { - use super::{Field, HashOrNumber, NoSuchOutput, OutputKind, Output}; - use common_types::encoded; - use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp}; + use super::{Field, HashOrNumber, NoSuchOutput, Output, OutputKind}; + use common_types::encoded; + use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; - /// Potentially incomplete headers request. - #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] - pub struct Incomplete { - /// Start block. - pub start: Field, - /// Skip between. - pub skip: u64, - /// Maximum to return. - pub max: u64, - /// Whether to reverse from start. - pub reverse: bool, - } + /// Potentially incomplete headers request. + #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] + pub struct Incomplete { + /// Start block. + pub start: Field, + /// Skip between. + pub skip: u64, + /// Maximum to return. + pub max: u64, + /// Whether to reverse from start. + pub reverse: bool, + } - impl super::IncompleteRequest for Incomplete { - type Complete = Complete; - type Response = Response; + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + type Response = Response; - fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> - where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> - { - match self.start { - Field::Scalar(_) => Ok(()), - Field::BackReference(req, idx) => - f(req, idx, OutputKind::Hash).or_else(|_| f(req, idx, OutputKind::Number)) - } - } + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where + F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>, + { + match self.start { + Field::Scalar(_) => Ok(()), + Field::BackReference(req, idx) => { + f(req, idx, OutputKind::Hash).or_else(|_| f(req, idx, OutputKind::Number)) + } + } + } - fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) { } + fn note_outputs(&self, _: F) + where + F: FnMut(usize, OutputKind), + { + } - fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { - if let Field::BackReference(req, idx) = self.start { - self.start = match oracle(req, idx) { - Ok(Output::Hash(hash)) => Field::Scalar(hash.into()), - Ok(Output::Number(num)) => Field::Scalar(num.into()), - Err(_) => Field::BackReference(req, idx), - } - } - } + fn fill(&mut self, oracle: F) + where + F: Fn(usize, usize) -> Result, + { + if let Field::BackReference(req, idx) = self.start { + self.start = match oracle(req, idx) { + Ok(Output::Hash(hash)) => Field::Scalar(hash.into()), + Ok(Output::Number(num)) => Field::Scalar(num.into()), + Err(_) => Field::BackReference(req, idx), + } + } + } - fn complete(self) -> Result { - Ok(Complete { - start: self.start.into_scalar()?, - skip: self.skip, - max: self.max, - reverse: self.reverse, - }) - } + fn complete(self) -> Result { + Ok(Complete { + start: self.start.into_scalar()?, + skip: self.skip, + max: self.max, + reverse: self.reverse, + }) + } - fn adjust_refs(&mut self, mapping: F) where F: FnMut(usize) -> usize { - self.start.adjust_req(mapping) - } - } + fn adjust_refs(&mut self, mapping: F) + where + F: FnMut(usize) -> usize, + { + self.start.adjust_req(mapping) + } + } - /// A complete header request. - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct Complete { - /// Start block. - pub start: HashOrNumber, - /// Skip between. - pub skip: u64, - /// Maximum to return. - pub max: u64, - /// Whether to reverse from start. - pub reverse: bool, - } + /// A complete header request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// Start block. + pub start: HashOrNumber, + /// Skip between. + pub skip: u64, + /// Maximum to return. + pub max: u64, + /// Whether to reverse from start. + pub reverse: bool, + } - /// The output of a request for headers. - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct Response { - /// The headers requested. - pub headers: Vec, - } + /// The output of a request for headers. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// The headers requested. + pub headers: Vec, + } - impl super::ResponseLike for Response { - /// Fill reusable outputs by writing them into the function. - fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) { } - } + impl super::ResponseLike for Response { + /// Fill reusable outputs by writing them into the function. + fn fill_outputs(&self, _: F) + where + F: FnMut(usize, Output), + { + } + } - impl Decodable for Response { - fn decode(rlp: &Rlp) -> Result { - use common_types::header::Header as FullHeader; + impl Decodable for Response { + fn decode(rlp: &Rlp) -> Result { + use common_types::header::Header as FullHeader; - let mut headers = Vec::new(); + let mut headers = Vec::new(); - for item in rlp.iter() { - // check that it's a valid encoding. - // TODO: just return full headers here? - let _: FullHeader = item.as_val()?; - headers.push(encoded::Header::new(item.as_raw().to_owned())); - } + for item in rlp.iter() { + // check that it's a valid encoding. + // TODO: just return full headers here? + let _: FullHeader = item.as_val()?; + headers.push(encoded::Header::new(item.as_raw().to_owned())); + } - Ok(Response { headers }) - } - } + Ok(Response { headers }) + } + } - impl Encodable for Response { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(self.headers.len()); - for header in &self.headers { - s.append_raw(header.rlp().as_raw(), 1); - } - } - } + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(self.headers.len()); + for header in &self.headers { + s.append_raw(header.rlp().as_raw(), 1); + } + } + } } /// Request and response for header proofs. pub mod header_proof { - use super::{Field, NoSuchOutput, OutputKind, Output}; - use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp}; - use ethereum_types::{H256, U256}; - use bytes::Bytes; + use super::{Field, NoSuchOutput, Output, OutputKind}; + use bytes::Bytes; + use ethereum_types::{H256, U256}; + use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; - /// Potentially incomplete header proof request. - #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] - pub struct Incomplete { - /// Block number. - pub num: Field, - } + /// Potentially incomplete header proof request. + #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] + pub struct Incomplete { + /// Block number. + pub num: Field, + } - impl super::IncompleteRequest for Incomplete { - type Complete = Complete; - type Response = Response; + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + type Response = Response; - fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> - where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> - { - match self.num { - Field::Scalar(_) => Ok(()), - Field::BackReference(req, idx) => f(req, idx, OutputKind::Number), - } - } + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where + F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>, + { + match self.num { + Field::Scalar(_) => Ok(()), + Field::BackReference(req, idx) => f(req, idx, OutputKind::Number), + } + } - fn note_outputs(&self, mut note: F) where F: FnMut(usize, OutputKind) { - note(0, OutputKind::Hash); - } + fn note_outputs(&self, mut note: F) + where + F: FnMut(usize, OutputKind), + { + note(0, OutputKind::Hash); + } - fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { - if let Field::BackReference(req, idx) = self.num { - self.num = match oracle(req, idx) { - Ok(Output::Number(num)) => Field::Scalar(num), - _ => Field::BackReference(req, idx), - } - } - } + fn fill(&mut self, oracle: F) + where + F: Fn(usize, usize) -> Result, + { + if let Field::BackReference(req, idx) = self.num { + self.num = match oracle(req, idx) { + Ok(Output::Number(num)) => Field::Scalar(num), + _ => Field::BackReference(req, idx), + } + } + } - fn complete(self) -> Result { - Ok(Complete { - num: self.num.into_scalar()?, - }) - } + fn complete(self) -> Result { + Ok(Complete { + num: self.num.into_scalar()?, + }) + } - fn adjust_refs(&mut self, mapping: F) where F: FnMut(usize) -> usize { - self.num.adjust_req(mapping) - } - } + fn adjust_refs(&mut self, mapping: F) + where + F: FnMut(usize) -> usize, + { + self.num.adjust_req(mapping) + } + } - /// A complete header proof request. - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct Complete { - /// The number to get a header proof for. - pub num: u64, - } + /// A complete header proof request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The number to get a header proof for. + pub num: u64, + } - /// The output of a request for a header proof. - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct Response { - /// Inclusion proof of the header and total difficulty in the CHT. - pub proof: Vec, - /// The proved header's hash. - pub hash: H256, - /// The proved header's total difficulty. - pub td: U256, - } + /// The output of a request for a header proof. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// Inclusion proof of the header and total difficulty in the CHT. + pub proof: Vec, + /// The proved header's hash. + pub hash: H256, + /// The proved header's total difficulty. + pub td: U256, + } - impl super::ResponseLike for Response { - /// Fill reusable outputs by providing them to the function. - fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { - f(0, Output::Hash(self.hash)); - } - } + impl super::ResponseLike for Response { + /// Fill reusable outputs by providing them to the function. + fn fill_outputs(&self, mut f: F) + where + F: FnMut(usize, Output), + { + f(0, Output::Hash(self.hash)); + } + } - impl Decodable for Response { - fn decode(rlp: &Rlp) -> Result { - Ok(Response { - proof: rlp.list_at(0)?, - hash: rlp.val_at(1)?, - td: rlp.val_at(2)?, - }) - } - } + impl Decodable for Response { + fn decode(rlp: &Rlp) -> Result { + Ok(Response { + proof: rlp.list_at(0)?, + hash: rlp.val_at(1)?, + td: rlp.val_at(2)?, + }) + } + } - impl Encodable for Response { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(3) - .append_list::,_>(&self.proof[..]) - .append(&self.hash) - .append(&self.td); - } - } + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(3) + .append_list::, _>(&self.proof[..]) + .append(&self.hash) + .append(&self.td); + } + } } /// Request and response for transaction index. pub mod transaction_index { - use super::{Field, NoSuchOutput, OutputKind, Output}; - use ethereum_types::H256; + use super::{Field, NoSuchOutput, Output, OutputKind}; + use ethereum_types::H256; - /// Potentially incomplete transaction index request. - #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] - pub struct Incomplete { - /// Transaction hash to get index for. - pub hash: Field, - } + /// Potentially incomplete transaction index request. + #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] + pub struct Incomplete { + /// Transaction hash to get index for. + pub hash: Field, + } - impl super::IncompleteRequest for Incomplete { - type Complete = Complete; - type Response = Response; + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + type Response = Response; - fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> - where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> - { - match self.hash { - Field::Scalar(_) => Ok(()), - Field::BackReference(req, idx) => f(req, idx, OutputKind::Hash), - } - } + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where + F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>, + { + match self.hash { + Field::Scalar(_) => Ok(()), + Field::BackReference(req, idx) => f(req, idx, OutputKind::Hash), + } + } - fn note_outputs(&self, mut f: F) where F: FnMut(usize, OutputKind) { - f(0, OutputKind::Number); - f(1, OutputKind::Hash); - } + fn note_outputs(&self, mut f: F) + where + F: FnMut(usize, OutputKind), + { + f(0, OutputKind::Number); + f(1, OutputKind::Hash); + } - fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { - if let Field::BackReference(req, idx) = self.hash { - self.hash = match oracle(req, idx) { - Ok(Output::Hash(hash)) => Field::Scalar(hash.into()), - _ => Field::BackReference(req, idx), - } - } - } + fn fill(&mut self, oracle: F) + where + F: Fn(usize, usize) -> Result, + { + if let Field::BackReference(req, idx) = self.hash { + self.hash = match oracle(req, idx) { + Ok(Output::Hash(hash)) => Field::Scalar(hash.into()), + _ => Field::BackReference(req, idx), + } + } + } - fn complete(self) -> Result { - Ok(Complete { - hash: self.hash.into_scalar()?, - }) - } + fn complete(self) -> Result { + Ok(Complete { + hash: self.hash.into_scalar()?, + }) + } - fn adjust_refs(&mut self, mapping: F) where F: FnMut(usize) -> usize { - self.hash.adjust_req(mapping) - } - } + fn adjust_refs(&mut self, mapping: F) + where + F: FnMut(usize) -> usize, + { + self.hash.adjust_req(mapping) + } + } - /// A complete transaction index request. - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct Complete { - /// The transaction hash to get index for. - pub hash: H256, - } + /// A complete transaction index request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The transaction hash to get index for. + pub hash: H256, + } - /// The output of a request for transaction index. - #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] - pub struct Response { - /// Block number. - pub num: u64, - /// Block hash - pub hash: H256, - /// Index in block. - pub index: u64, - } + /// The output of a request for transaction index. + #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] + pub struct Response { + /// Block number. + pub num: u64, + /// Block hash + pub hash: H256, + /// Index in block. + pub index: u64, + } - impl super::ResponseLike for Response { - /// Fill reusable outputs by providing them to the function. - fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { - f(0, Output::Number(self.num)); - f(1, Output::Hash(self.hash)); - } - } + impl super::ResponseLike for Response { + /// Fill reusable outputs by providing them to the function. + fn fill_outputs(&self, mut f: F) + where + F: FnMut(usize, Output), + { + f(0, Output::Number(self.num)); + f(1, Output::Hash(self.hash)); + } + } } /// Request and response for block receipts pub mod block_receipts { - use super::{Field, NoSuchOutput, OutputKind, Output}; - use common_types::receipt::Receipt; - use ethereum_types::H256; + use super::{Field, NoSuchOutput, Output, OutputKind}; + use common_types::receipt::Receipt; + use ethereum_types::H256; - /// Potentially incomplete block receipts request. - #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] - pub struct Incomplete { - /// Block hash to get receipts for. - pub hash: Field, - } + /// Potentially incomplete block receipts request. + #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] + pub struct Incomplete { + /// Block hash to get receipts for. + pub hash: Field, + } - impl super::IncompleteRequest for Incomplete { - type Complete = Complete; - type Response = Response; + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + type Response = Response; - fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> - where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> - { - match self.hash { - Field::Scalar(_) => Ok(()), - Field::BackReference(req, idx) => f(req, idx, OutputKind::Hash), - } - } + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where + F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>, + { + match self.hash { + Field::Scalar(_) => Ok(()), + Field::BackReference(req, idx) => f(req, idx, OutputKind::Hash), + } + } - fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} + fn note_outputs(&self, _: F) + where + F: FnMut(usize, OutputKind), + { + } - fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { - if let Field::BackReference(req, idx) = self.hash { - self.hash = match oracle(req, idx) { - Ok(Output::Hash(hash)) => Field::Scalar(hash.into()), - _ => Field::BackReference(req, idx), - } - } - } + fn fill(&mut self, oracle: F) + where + F: Fn(usize, usize) -> Result, + { + if let Field::BackReference(req, idx) = self.hash { + self.hash = match oracle(req, idx) { + Ok(Output::Hash(hash)) => Field::Scalar(hash.into()), + _ => Field::BackReference(req, idx), + } + } + } - fn complete(self) -> Result { - Ok(Complete { - hash: self.hash.into_scalar()?, - }) - } + fn complete(self) -> Result { + Ok(Complete { + hash: self.hash.into_scalar()?, + }) + } - fn adjust_refs(&mut self, mapping: F) where F: FnMut(usize) -> usize { - self.hash.adjust_req(mapping) - } - } + fn adjust_refs(&mut self, mapping: F) + where + F: FnMut(usize) -> usize, + { + self.hash.adjust_req(mapping) + } + } - /// A complete block receipts request. - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct Complete { - /// The number to get block receipts for. - pub hash: H256, - } + /// A complete block receipts request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The number to get block receipts for. + pub hash: H256, + } - /// The output of a request for block receipts. - #[derive(Debug, Clone, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper)] - pub struct Response { - /// The block receipts. - pub receipts: Vec - } + /// The output of a request for block receipts. + #[derive(Debug, Clone, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper)] + pub struct Response { + /// The block receipts. + pub receipts: Vec, + } - impl super::ResponseLike for Response { - /// Fill reusable outputs by providing them to the function. - fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} - } + impl super::ResponseLike for Response { + /// Fill reusable outputs by providing them to the function. + fn fill_outputs(&self, _: F) + where + F: FnMut(usize, Output), + { + } + } } /// Request and response for a block body pub mod block_body { - use super::{Field, NoSuchOutput, OutputKind, Output}; - use common_types::encoded; - use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp}; - use ethereum_types::H256; + use super::{Field, NoSuchOutput, Output, OutputKind}; + use common_types::encoded; + use ethereum_types::H256; + use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; - /// Potentially incomplete block body request. - #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] - pub struct Incomplete { - /// Block hash to get receipts for. - pub hash: Field, - } + /// Potentially incomplete block body request. + #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] + pub struct Incomplete { + /// Block hash to get receipts for. + pub hash: Field, + } - impl super::IncompleteRequest for Incomplete { - type Complete = Complete; - type Response = Response; + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + type Response = Response; - fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> - where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> - { - match self.hash { - Field::Scalar(_) => Ok(()), - Field::BackReference(req, idx) => f(req, idx, OutputKind::Hash), - } - } + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where + F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>, + { + match self.hash { + Field::Scalar(_) => Ok(()), + Field::BackReference(req, idx) => f(req, idx, OutputKind::Hash), + } + } - fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} + fn note_outputs(&self, _: F) + where + F: FnMut(usize, OutputKind), + { + } - fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { - if let Field::BackReference(req, idx) = self.hash { - self.hash = match oracle(req, idx) { - Ok(Output::Hash(hash)) => Field::Scalar(hash), - _ => Field::BackReference(req, idx), - } - } - } + fn fill(&mut self, oracle: F) + where + F: Fn(usize, usize) -> Result, + { + if let Field::BackReference(req, idx) = self.hash { + self.hash = match oracle(req, idx) { + Ok(Output::Hash(hash)) => Field::Scalar(hash), + _ => Field::BackReference(req, idx), + } + } + } - fn complete(self) -> Result { - Ok(Complete { - hash: self.hash.into_scalar()?, - }) - } + fn complete(self) -> Result { + Ok(Complete { + hash: self.hash.into_scalar()?, + }) + } - fn adjust_refs(&mut self, mapping: F) where F: FnMut(usize) -> usize { - self.hash.adjust_req(mapping) - } - } + fn adjust_refs(&mut self, mapping: F) + where + F: FnMut(usize) -> usize, + { + self.hash.adjust_req(mapping) + } + } - /// A complete block body request. - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct Complete { - /// The hash to get a block body for. - pub hash: H256, - } + /// A complete block body request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The hash to get a block body for. + pub hash: H256, + } - /// The output of a request for block body. - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct Response { - /// The block body. - pub body: encoded::Body, - } + /// The output of a request for block body. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// The block body. + pub body: encoded::Body, + } - impl super::ResponseLike for Response { - /// Fill reusable outputs by providing them to the function. - fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} - } + impl super::ResponseLike for Response { + /// Fill reusable outputs by providing them to the function. + fn fill_outputs(&self, _: F) + where + F: FnMut(usize, Output), + { + } + } - impl Decodable for Response { - fn decode(rlp: &Rlp) -> Result { - use common_types::header::Header as FullHeader; - use common_types::transaction::UnverifiedTransaction; + impl Decodable for Response { + fn decode(rlp: &Rlp) -> Result { + use common_types::{header::Header as FullHeader, transaction::UnverifiedTransaction}; - // check body validity. - let _: Vec = rlp.list_at(0)?; - let _: Vec = rlp.list_at(1)?; + // check body validity. + let _: Vec = rlp.list_at(0)?; + let _: Vec = rlp.list_at(1)?; - Ok(Response { - body: encoded::Body::new(rlp.as_raw().to_owned()), - }) - } - } + Ok(Response { + body: encoded::Body::new(rlp.as_raw().to_owned()), + }) + } + } - impl Encodable for Response { - fn rlp_append(&self, s: &mut RlpStream) { - s.append_raw(&self.body.rlp().as_raw(), 1); - } - } + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.append_raw(&self.body.rlp().as_raw(), 1); + } + } } /// A request for an account proof. pub mod account { - use super::{Field, NoSuchOutput, OutputKind, Output}; - use ethereum_types::{H256, U256}; - use bytes::Bytes; + use super::{Field, NoSuchOutput, Output, OutputKind}; + use bytes::Bytes; + use ethereum_types::{H256, U256}; - /// Potentially incomplete request for an account proof. - #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] - pub struct Incomplete { - /// Block hash to request state proof for. - pub block_hash: Field, - /// Hash of the account's address. - pub address_hash: Field, - } + /// Potentially incomplete request for an account proof. + #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] + pub struct Incomplete { + /// Block hash to request state proof for. + pub block_hash: Field, + /// Hash of the account's address. + pub address_hash: Field, + } - impl super::IncompleteRequest for Incomplete { - type Complete = Complete; - type Response = Response; + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + type Response = Response; - fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> - where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> - { - if let Field::BackReference(req, idx) = self.block_hash { - f(req, idx, OutputKind::Hash)? - } + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where + F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>, + { + if let Field::BackReference(req, idx) = self.block_hash { + f(req, idx, OutputKind::Hash)? + } - if let Field::BackReference(req, idx) = self.address_hash { - f(req, idx, OutputKind::Hash)? - } + if let Field::BackReference(req, idx) = self.address_hash { + f(req, idx, OutputKind::Hash)? + } - Ok(()) - } + Ok(()) + } - fn note_outputs(&self, mut f: F) where F: FnMut(usize, OutputKind) { - f(0, OutputKind::Hash); - f(1, OutputKind::Hash); - } + fn note_outputs(&self, mut f: F) + where + F: FnMut(usize, OutputKind), + { + f(0, OutputKind::Hash); + f(1, OutputKind::Hash); + } - fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { - if let Field::BackReference(req, idx) = self.block_hash { - self.block_hash = match oracle(req, idx) { - Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash), - _ => Field::BackReference(req, idx), - } - } + fn fill(&mut self, oracle: F) + where + F: Fn(usize, usize) -> Result, + { + if let Field::BackReference(req, idx) = self.block_hash { + self.block_hash = match oracle(req, idx) { + Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash), + _ => Field::BackReference(req, idx), + } + } - if let Field::BackReference(req, idx) = self.address_hash { - self.address_hash = match oracle(req, idx) { - Ok(Output::Hash(address_hash)) => Field::Scalar(address_hash), - _ => Field::BackReference(req, idx), - } - } - } + if let Field::BackReference(req, idx) = self.address_hash { + self.address_hash = match oracle(req, idx) { + Ok(Output::Hash(address_hash)) => Field::Scalar(address_hash), + _ => Field::BackReference(req, idx), + } + } + } - fn complete(self) -> Result { - Ok(Complete { - block_hash: self.block_hash.into_scalar()?, - address_hash: self.address_hash.into_scalar()?, - }) - } + fn complete(self) -> Result { + Ok(Complete { + block_hash: self.block_hash.into_scalar()?, + address_hash: self.address_hash.into_scalar()?, + }) + } - fn adjust_refs(&mut self, mut mapping: F) where F: FnMut(usize) -> usize { - self.block_hash.adjust_req(&mut mapping); - self.address_hash.adjust_req(&mut mapping); - } - } + fn adjust_refs(&mut self, mut mapping: F) + where + F: FnMut(usize) -> usize, + { + self.block_hash.adjust_req(&mut mapping); + self.address_hash.adjust_req(&mut mapping); + } + } - /// A complete request for an account. - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct Complete { - /// Block hash to request state proof for. - pub block_hash: H256, - /// Hash of the account's address. - pub address_hash: H256, - } + /// A complete request for an account. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// Block hash to request state proof for. + pub block_hash: H256, + /// Hash of the account's address. + pub address_hash: H256, + } - /// The output of a request for an account state proof. - #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] - pub struct Response { - /// Inclusion/exclusion proof - pub proof: Vec, - /// Account nonce. - pub nonce: U256, - /// Account balance. - pub balance: U256, - /// Account's code hash. - pub code_hash: H256, - /// Account's storage trie root. - pub storage_root: H256, - } + /// The output of a request for an account state proof. + #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] + pub struct Response { + /// Inclusion/exclusion proof + pub proof: Vec, + /// Account nonce. + pub nonce: U256, + /// Account balance. + pub balance: U256, + /// Account's code hash. + pub code_hash: H256, + /// Account's storage trie root. + pub storage_root: H256, + } - impl super::ResponseLike for Response { - /// Fill reusable outputs by providing them to the function. - fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { - f(0, Output::Hash(self.code_hash)); - f(1, Output::Hash(self.storage_root)); - } - } + impl super::ResponseLike for Response { + /// Fill reusable outputs by providing them to the function. + fn fill_outputs(&self, mut f: F) + where + F: FnMut(usize, Output), + { + f(0, Output::Hash(self.code_hash)); + f(1, Output::Hash(self.storage_root)); + } + } } /// A request for a storage proof. pub mod storage { - use super::{Field, NoSuchOutput, OutputKind, Output}; - use ethereum_types::H256; - use bytes::Bytes; + use super::{Field, NoSuchOutput, Output, OutputKind}; + use bytes::Bytes; + use ethereum_types::H256; - /// Potentially incomplete request for an storage proof. - #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] - pub struct Incomplete { - /// Block hash to request state proof for. - pub block_hash: Field, - /// Hash of the account's address. - pub address_hash: Field, - /// Hash of the storage key. - pub key_hash: Field, - } + /// Potentially incomplete request for an storage proof. + #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] + pub struct Incomplete { + /// Block hash to request state proof for. + pub block_hash: Field, + /// Hash of the account's address. + pub address_hash: Field, + /// Hash of the storage key. + pub key_hash: Field, + } - impl super::IncompleteRequest for Incomplete { - type Complete = Complete; - type Response = Response; + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + type Response = Response; - fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> - where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> - { - if let Field::BackReference(req, idx) = self.block_hash { - f(req, idx, OutputKind::Hash)? - } + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where + F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>, + { + if let Field::BackReference(req, idx) = self.block_hash { + f(req, idx, OutputKind::Hash)? + } - if let Field::BackReference(req, idx) = self.address_hash { - f(req, idx, OutputKind::Hash)? - } + if let Field::BackReference(req, idx) = self.address_hash { + f(req, idx, OutputKind::Hash)? + } - if let Field::BackReference(req, idx) = self.key_hash { - f(req, idx, OutputKind::Hash)? - } + if let Field::BackReference(req, idx) = self.key_hash { + f(req, idx, OutputKind::Hash)? + } - Ok(()) - } + Ok(()) + } - fn note_outputs(&self, mut f: F) where F: FnMut(usize, OutputKind) { - f(0, OutputKind::Hash); - } + fn note_outputs(&self, mut f: F) + where + F: FnMut(usize, OutputKind), + { + f(0, OutputKind::Hash); + } - fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { - if let Field::BackReference(req, idx) = self.block_hash { - self.block_hash = match oracle(req, idx) { - Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash), - _ => Field::BackReference(req, idx), - } - } + fn fill(&mut self, oracle: F) + where + F: Fn(usize, usize) -> Result, + { + if let Field::BackReference(req, idx) = self.block_hash { + self.block_hash = match oracle(req, idx) { + Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash), + _ => Field::BackReference(req, idx), + } + } - if let Field::BackReference(req, idx) = self.address_hash { - self.address_hash = match oracle(req, idx) { - Ok(Output::Hash(address_hash)) => Field::Scalar(address_hash), - _ => Field::BackReference(req, idx), - } - } + if let Field::BackReference(req, idx) = self.address_hash { + self.address_hash = match oracle(req, idx) { + Ok(Output::Hash(address_hash)) => Field::Scalar(address_hash), + _ => Field::BackReference(req, idx), + } + } - if let Field::BackReference(req, idx) = self.key_hash { - self.key_hash = match oracle(req, idx) { - Ok(Output::Hash(key_hash)) => Field::Scalar(key_hash), - _ => Field::BackReference(req, idx), - } - } - } + if let Field::BackReference(req, idx) = self.key_hash { + self.key_hash = match oracle(req, idx) { + Ok(Output::Hash(key_hash)) => Field::Scalar(key_hash), + _ => Field::BackReference(req, idx), + } + } + } - fn complete(self) -> Result { - Ok(Complete { - block_hash: self.block_hash.into_scalar()?, - address_hash: self.address_hash.into_scalar()?, - key_hash: self.key_hash.into_scalar()?, - }) - } + fn complete(self) -> Result { + Ok(Complete { + block_hash: self.block_hash.into_scalar()?, + address_hash: self.address_hash.into_scalar()?, + key_hash: self.key_hash.into_scalar()?, + }) + } - fn adjust_refs(&mut self, mut mapping: F) where F: FnMut(usize) -> usize { - self.block_hash.adjust_req(&mut mapping); - self.address_hash.adjust_req(&mut mapping); - self.key_hash.adjust_req(&mut mapping); - } - } + fn adjust_refs(&mut self, mut mapping: F) + where + F: FnMut(usize) -> usize, + { + self.block_hash.adjust_req(&mut mapping); + self.address_hash.adjust_req(&mut mapping); + self.key_hash.adjust_req(&mut mapping); + } + } - /// A complete request for a storage proof. - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct Complete { - /// Block hash to request state proof for. - pub block_hash: H256, - /// Hash of the account's address. - pub address_hash: H256, - /// Storage key hash. - pub key_hash: H256, - } + /// A complete request for a storage proof. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// Block hash to request state proof for. + pub block_hash: H256, + /// Hash of the account's address. + pub address_hash: H256, + /// Storage key hash. + pub key_hash: H256, + } - /// The output of a request for an account state proof. - #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] - pub struct Response { - /// Inclusion/exclusion proof - pub proof: Vec, - /// Storage value. - pub value: H256, - } + /// The output of a request for an account state proof. + #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] + pub struct Response { + /// Inclusion/exclusion proof + pub proof: Vec, + /// Storage value. + pub value: H256, + } - impl super::ResponseLike for Response { - /// Fill reusable outputs by providing them to the function. - fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { - f(0, Output::Hash(self.value)); - } - } + impl super::ResponseLike for Response { + /// Fill reusable outputs by providing them to the function. + fn fill_outputs(&self, mut f: F) + where + F: FnMut(usize, Output), + { + f(0, Output::Hash(self.value)); + } + } } /// A request for contract code. pub mod contract_code { - use super::{Field, NoSuchOutput, OutputKind, Output}; - use ethereum_types::H256; - use bytes::Bytes; + use super::{Field, NoSuchOutput, Output, OutputKind}; + use bytes::Bytes; + use ethereum_types::H256; - /// Potentially incomplete contract code request. - #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] - pub struct Incomplete { - /// The block hash to request the state for. - pub block_hash: Field, - /// The code hash. - pub code_hash: Field, - } + /// Potentially incomplete contract code request. + #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] + pub struct Incomplete { + /// The block hash to request the state for. + pub block_hash: Field, + /// The code hash. + pub code_hash: Field, + } - impl super::IncompleteRequest for Incomplete { - type Complete = Complete; - type Response = Response; + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + type Response = Response; - fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> - where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> - { - if let Field::BackReference(req, idx) = self.block_hash { - f(req, idx, OutputKind::Hash)?; - } - if let Field::BackReference(req, idx) = self.code_hash { - f(req, idx, OutputKind::Hash)?; - } + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where + F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>, + { + if let Field::BackReference(req, idx) = self.block_hash { + f(req, idx, OutputKind::Hash)?; + } + if let Field::BackReference(req, idx) = self.code_hash { + f(req, idx, OutputKind::Hash)?; + } - Ok(()) - } + Ok(()) + } - fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} + fn note_outputs(&self, _: F) + where + F: FnMut(usize, OutputKind), + { + } - fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { - if let Field::BackReference(req, idx) = self.block_hash { - self.block_hash = match oracle(req, idx) { - Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash), - _ => Field::BackReference(req, idx), - } - } + fn fill(&mut self, oracle: F) + where + F: Fn(usize, usize) -> Result, + { + if let Field::BackReference(req, idx) = self.block_hash { + self.block_hash = match oracle(req, idx) { + Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash), + _ => Field::BackReference(req, idx), + } + } - if let Field::BackReference(req, idx) = self.code_hash { - self.code_hash = match oracle(req, idx) { - Ok(Output::Hash(code_hash)) => Field::Scalar(code_hash), - _ => Field::BackReference(req, idx), - } - } - } + if let Field::BackReference(req, idx) = self.code_hash { + self.code_hash = match oracle(req, idx) { + Ok(Output::Hash(code_hash)) => Field::Scalar(code_hash), + _ => Field::BackReference(req, idx), + } + } + } - fn complete(self) -> Result { - Ok(Complete { - block_hash: self.block_hash.into_scalar()?, - code_hash: self.code_hash.into_scalar()?, - }) - } + fn complete(self) -> Result { + Ok(Complete { + block_hash: self.block_hash.into_scalar()?, + code_hash: self.code_hash.into_scalar()?, + }) + } - fn adjust_refs(&mut self, mut mapping: F) where F: FnMut(usize) -> usize { - self.block_hash.adjust_req(&mut mapping); - self.code_hash.adjust_req(&mut mapping); - } - } + fn adjust_refs(&mut self, mut mapping: F) + where + F: FnMut(usize) -> usize, + { + self.block_hash.adjust_req(&mut mapping); + self.code_hash.adjust_req(&mut mapping); + } + } - /// A complete request. - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct Complete { - /// The block hash to request the state for. - pub block_hash: H256, - /// The code hash. - pub code_hash: H256, - } + /// A complete request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The block hash to request the state for. + pub block_hash: H256, + /// The code hash. + pub code_hash: H256, + } - /// The output of a request for - #[derive(Debug, Clone, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper)] - pub struct Response { - /// The requested code. - pub code: Bytes, - } + /// The output of a request for + #[derive(Debug, Clone, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper)] + pub struct Response { + /// The requested code. + pub code: Bytes, + } - impl super::ResponseLike for Response { - /// Fill reusable outputs by providing them to the function. - fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} - } + impl super::ResponseLike for Response { + /// Fill reusable outputs by providing them to the function. + fn fill_outputs(&self, _: F) + where + F: FnMut(usize, Output), + { + } + } } /// A request for proof of execution. pub mod execution { - use super::{Field, NoSuchOutput, OutputKind, Output}; - use common_types::transaction::Action; - use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp}; - use ethereum_types::{H256, U256, Address}; - use kvdb::DBValue; - use bytes::Bytes; + use super::{Field, NoSuchOutput, Output, OutputKind}; + use bytes::Bytes; + use common_types::transaction::Action; + use ethereum_types::{Address, H256, U256}; + use kvdb::DBValue; + use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; - /// Potentially incomplete execution proof request. - #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] - pub struct Incomplete { - /// The block hash to request the state for. - pub block_hash: Field, - /// The address the transaction should be from. - pub from: Address, - /// The action of the transaction. - pub action: Action, - /// The amount of gas to prove. - pub gas: U256, - /// The gas price. - pub gas_price: U256, - /// The value to transfer. - pub value: U256, - /// Call data. - pub data: Bytes, - } + /// Potentially incomplete execution proof request. + #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] + pub struct Incomplete { + /// The block hash to request the state for. + pub block_hash: Field, + /// The address the transaction should be from. + pub from: Address, + /// The action of the transaction. + pub action: Action, + /// The amount of gas to prove. + pub gas: U256, + /// The gas price. + pub gas_price: U256, + /// The value to transfer. + pub value: U256, + /// Call data. + pub data: Bytes, + } - impl super::IncompleteRequest for Incomplete { - type Complete = Complete; - type Response = Response; + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + type Response = Response; - fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> - where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> - { - if let Field::BackReference(req, idx) = self.block_hash { - f(req, idx, OutputKind::Hash)?; - } + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where + F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>, + { + if let Field::BackReference(req, idx) = self.block_hash { + f(req, idx, OutputKind::Hash)?; + } - Ok(()) - } + Ok(()) + } - fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} + fn note_outputs(&self, _: F) + where + F: FnMut(usize, OutputKind), + { + } - fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { - if let Field::BackReference(req, idx) = self.block_hash { - self.block_hash = match oracle(req, idx) { - Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash), - _ => Field::BackReference(req, idx), - } - } - } - fn complete(self) -> Result { - Ok(Complete { - block_hash: self.block_hash.into_scalar()?, - from: self.from, - action: self.action, - gas: self.gas, - gas_price: self.gas_price, - value: self.value, - data: self.data, - }) - } + fn fill(&mut self, oracle: F) + where + F: Fn(usize, usize) -> Result, + { + if let Field::BackReference(req, idx) = self.block_hash { + self.block_hash = match oracle(req, idx) { + Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash), + _ => Field::BackReference(req, idx), + } + } + } + fn complete(self) -> Result { + Ok(Complete { + block_hash: self.block_hash.into_scalar()?, + from: self.from, + action: self.action, + gas: self.gas, + gas_price: self.gas_price, + value: self.value, + data: self.data, + }) + } - fn adjust_refs(&mut self, mapping: F) where F: FnMut(usize) -> usize { - self.block_hash.adjust_req(mapping); - } - } + fn adjust_refs(&mut self, mapping: F) + where + F: FnMut(usize) -> usize, + { + self.block_hash.adjust_req(mapping); + } + } - /// A complete request. - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct Complete { - /// The block hash to request the state for. - pub block_hash: H256, - /// The address the transaction should be from. - pub from: Address, - /// The action of the transaction. - pub action: Action, - /// The amount of gas to prove. - pub gas: U256, - /// The gas price. - pub gas_price: U256, - /// The value to transfer. - pub value: U256, - /// Call data. - pub data: Bytes, - } + /// A complete request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The block hash to request the state for. + pub block_hash: H256, + /// The address the transaction should be from. + pub from: Address, + /// The action of the transaction. + pub action: Action, + /// The amount of gas to prove. + pub gas: U256, + /// The gas price. + pub gas_price: U256, + /// The value to transfer. + pub value: U256, + /// Call data. + pub data: Bytes, + } - /// The output of a request for proof of execution - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct Response { - /// All state items (trie nodes, code) necessary to re-prove the transaction. - pub items: Vec, - } + /// The output of a request for proof of execution + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// All state items (trie nodes, code) necessary to re-prove the transaction. + pub items: Vec, + } - impl super::ResponseLike for Response { - /// Fill reusable outputs by providing them to the function. - fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} - } + impl super::ResponseLike for Response { + /// Fill reusable outputs by providing them to the function. + fn fill_outputs(&self, _: F) + where + F: FnMut(usize, Output), + { + } + } - impl Decodable for Response { - fn decode(rlp: &Rlp) -> Result { - let mut items = Vec::new(); - for raw_item in rlp.iter() { - let mut item = DBValue::new(); - item.append_slice(raw_item.data()?); - items.push(item); - } + impl Decodable for Response { + fn decode(rlp: &Rlp) -> Result { + let mut items = Vec::new(); + for raw_item in rlp.iter() { + let mut item = DBValue::new(); + item.append_slice(raw_item.data()?); + items.push(item); + } - Ok(Response { items }) - } - } + Ok(Response { items }) + } + } - impl Encodable for Response { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(self.items.len()); + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(self.items.len()); - for item in &self.items { - s.append(&&**item); - } - } - } + for item in &self.items { + s.append(&&**item); + } + } + } } /// A request for epoch signal data. pub mod epoch_signal { - use super::{Field, NoSuchOutput, OutputKind, Output}; - use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp}; - use ethereum_types::H256; - use bytes::Bytes; + use super::{Field, NoSuchOutput, Output, OutputKind}; + use bytes::Bytes; + use ethereum_types::H256; + use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; - /// Potentially incomplete epoch signal request. - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct Incomplete { - /// The block hash to request the signal for. - pub block_hash: Field, - } + /// Potentially incomplete epoch signal request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// The block hash to request the signal for. + pub block_hash: Field, + } - impl Decodable for Incomplete { - fn decode(rlp: &Rlp) -> Result { - Ok(Incomplete { - block_hash: rlp.val_at(0)?, - }) - } - } + impl Decodable for Incomplete { + fn decode(rlp: &Rlp) -> Result { + Ok(Incomplete { + block_hash: rlp.val_at(0)?, + }) + } + } - impl Encodable for Incomplete { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(1).append(&self.block_hash); - } - } + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(1).append(&self.block_hash); + } + } - impl super::IncompleteRequest for Incomplete { - type Complete = Complete; - type Response = Response; + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + type Response = Response; - fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> - where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> - { - if let Field::BackReference(req, idx) = self.block_hash { - f(req, idx, OutputKind::Hash)?; - } + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where + F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>, + { + if let Field::BackReference(req, idx) = self.block_hash { + f(req, idx, OutputKind::Hash)?; + } - Ok(()) - } + Ok(()) + } - fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} + fn note_outputs(&self, _: F) + where + F: FnMut(usize, OutputKind), + { + } - fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { - if let Field::BackReference(req, idx) = self.block_hash { - self.block_hash = match oracle(req, idx) { - Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash), - _ => Field::BackReference(req, idx), - } - } - } + fn fill(&mut self, oracle: F) + where + F: Fn(usize, usize) -> Result, + { + if let Field::BackReference(req, idx) = self.block_hash { + self.block_hash = match oracle(req, idx) { + Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash), + _ => Field::BackReference(req, idx), + } + } + } - fn complete(self) -> Result { - Ok(Complete { - block_hash: self.block_hash.into_scalar()?, - }) - } + fn complete(self) -> Result { + Ok(Complete { + block_hash: self.block_hash.into_scalar()?, + }) + } - fn adjust_refs(&mut self, mut mapping: F) where F: FnMut(usize) -> usize { - self.block_hash.adjust_req(&mut mapping); - } - } + fn adjust_refs(&mut self, mut mapping: F) + where + F: FnMut(usize) -> usize, + { + self.block_hash.adjust_req(&mut mapping); + } + } - /// A complete request. - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct Complete { - /// The block hash to request the epoch signal for. - pub block_hash: H256, - } + /// A complete request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The block hash to request the epoch signal for. + pub block_hash: H256, + } - /// The output of a request for an epoch signal. - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct Response { - /// The requested epoch signal. - pub signal: Bytes, - } + /// The output of a request for an epoch signal. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// The requested epoch signal. + pub signal: Bytes, + } - impl super::ResponseLike for Response { - /// Fill reusable outputs by providing them to the function. - fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} - } + impl super::ResponseLike for Response { + /// Fill reusable outputs by providing them to the function. + fn fill_outputs(&self, _: F) + where + F: FnMut(usize, Output), + { + } + } - impl Decodable for Response { - fn decode(rlp: &Rlp) -> Result { + impl Decodable for Response { + fn decode(rlp: &Rlp) -> Result { + Ok(Response { + signal: rlp.as_val()?, + }) + } + } - Ok(Response { - signal: rlp.as_val()?, - }) - } - } - - impl Encodable for Response { - fn rlp_append(&self, s: &mut RlpStream) { - s.append(&self.signal); - } - } + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.append(&self.signal); + } + } } #[cfg(test)] mod tests { - use super::*; - use common_types::header::Header; + use super::*; + use common_types::header::Header; - fn check_roundtrip(val: T) - where T: ::rlp::Encodable + ::rlp::Decodable + PartialEq + ::std::fmt::Debug - { - // check as single value. - let bytes = ::rlp::encode(&val); - let new_val: T = ::rlp::decode(&bytes).unwrap(); - assert_eq!(val, new_val); + fn check_roundtrip(val: T) + where + T: ::rlp::Encodable + ::rlp::Decodable + PartialEq + ::std::fmt::Debug, + { + // check as single value. + let bytes = ::rlp::encode(&val); + let new_val: T = ::rlp::decode(&bytes).unwrap(); + assert_eq!(val, new_val); - // check as list containing single value. - let list = [val]; - let bytes = ::rlp::encode_list(&list); - let new_list: Vec = ::rlp::decode_list(&bytes); - assert_eq!(&list, &new_list[..]); - } + // check as list containing single value. + let list = [val]; + let bytes = ::rlp::encode_list(&list); + let new_list: Vec = ::rlp::decode_list(&bytes); + assert_eq!(&list, &new_list[..]); + } - #[test] - fn hash_or_number_roundtrip() { - let hash = HashOrNumber::Hash(H256::default()); - let number = HashOrNumber::Number(5); + #[test] + fn hash_or_number_roundtrip() { + let hash = HashOrNumber::Hash(H256::default()); + let number = HashOrNumber::Number(5); - check_roundtrip(hash); - check_roundtrip(number); - } + check_roundtrip(hash); + check_roundtrip(number); + } - #[test] - fn field_roundtrip() { - let field_scalar = Field::Scalar(5usize); - let field_back: Field = Field::BackReference(1, 2); + #[test] + fn field_roundtrip() { + let field_scalar = Field::Scalar(5usize); + let field_back: Field = Field::BackReference(1, 2); - check_roundtrip(field_scalar); - check_roundtrip(field_back); - } + check_roundtrip(field_scalar); + check_roundtrip(field_back); + } - #[test] - fn headers_roundtrip() { - let req = IncompleteHeadersRequest { - start: Field::Scalar(5u64.into()), - skip: 0, - max: 100, - reverse: false, - }; + #[test] + fn headers_roundtrip() { + let req = IncompleteHeadersRequest { + start: Field::Scalar(5u64.into()), + skip: 0, + max: 100, + reverse: false, + }; - let full_req = Request::Headers(req.clone()); - let res = HeadersResponse { - headers: vec![ - ::common_types::encoded::Header::new(::rlp::encode(&Header::default())) - ] - }; - let full_res = Response::Headers(res.clone()); + let full_req = Request::Headers(req.clone()); + let res = HeadersResponse { + headers: vec![::common_types::encoded::Header::new(::rlp::encode( + &Header::default(), + ))], + }; + let full_res = Response::Headers(res.clone()); - check_roundtrip(req); - check_roundtrip(full_req); - check_roundtrip(res); - check_roundtrip(full_res); - } + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } - #[test] - fn header_proof_roundtrip() { - let req = IncompleteHeaderProofRequest { - num: Field::BackReference(1, 234), - }; + #[test] + fn header_proof_roundtrip() { + let req = IncompleteHeaderProofRequest { + num: Field::BackReference(1, 234), + }; - let full_req = Request::HeaderProof(req.clone()); - let res = HeaderProofResponse { - proof: vec![vec![1, 2, 3], vec![4, 5, 6]], - hash: Default::default(), - td: 100.into(), - }; - let full_res = Response::HeaderProof(res.clone()); + let full_req = Request::HeaderProof(req.clone()); + let res = HeaderProofResponse { + proof: vec![vec![1, 2, 3], vec![4, 5, 6]], + hash: Default::default(), + td: 100.into(), + }; + let full_res = Response::HeaderProof(res.clone()); - check_roundtrip(req); - check_roundtrip(full_req); - check_roundtrip(res); - check_roundtrip(full_res); - } + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } - #[test] - fn transaction_index_roundtrip() { - let req = IncompleteTransactionIndexRequest { - hash: Field::Scalar(Default::default()), - }; + #[test] + fn transaction_index_roundtrip() { + let req = IncompleteTransactionIndexRequest { + hash: Field::Scalar(Default::default()), + }; - let full_req = Request::TransactionIndex(req.clone()); - let res = TransactionIndexResponse { - num: 1000, - hash: ::ethereum_types::H256::random(), - index: 4, - }; - let full_res = Response::TransactionIndex(res.clone()); + let full_req = Request::TransactionIndex(req.clone()); + let res = TransactionIndexResponse { + num: 1000, + hash: ::ethereum_types::H256::random(), + index: 4, + }; + let full_res = Response::TransactionIndex(res.clone()); - check_roundtrip(req); - check_roundtrip(full_req); - check_roundtrip(res); - check_roundtrip(full_res); - } + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } - #[test] - fn receipts_roundtrip() { - use common_types::receipt::{Receipt, TransactionOutcome}; - let req = IncompleteReceiptsRequest { - hash: Field::Scalar(Default::default()), - }; + #[test] + fn receipts_roundtrip() { + use common_types::receipt::{Receipt, TransactionOutcome}; + let req = IncompleteReceiptsRequest { + hash: Field::Scalar(Default::default()), + }; - let full_req = Request::Receipts(req.clone()); - let receipt = Receipt::new(TransactionOutcome::Unknown, Default::default(), Vec::new()); - let res = ReceiptsResponse { - receipts: vec![receipt.clone(), receipt], - }; - let full_res = Response::Receipts(res.clone()); + let full_req = Request::Receipts(req.clone()); + let receipt = Receipt::new(TransactionOutcome::Unknown, Default::default(), Vec::new()); + let res = ReceiptsResponse { + receipts: vec![receipt.clone(), receipt], + }; + let full_res = Response::Receipts(res.clone()); - check_roundtrip(req); - check_roundtrip(full_req); - check_roundtrip(res); - check_roundtrip(full_res); - } + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } - #[test] - fn body_roundtrip() { - use common_types::transaction::{Transaction, UnverifiedTransaction}; - let req = IncompleteBodyRequest { - hash: Field::Scalar(Default::default()), - }; + #[test] + fn body_roundtrip() { + use common_types::transaction::{Transaction, UnverifiedTransaction}; + let req = IncompleteBodyRequest { + hash: Field::Scalar(Default::default()), + }; - let full_req = Request::Body(req.clone()); - let res = BodyResponse { - body: { - let header = ::common_types::header::Header::default(); - let tx = UnverifiedTransaction::from(Transaction::default().fake_sign(Default::default())); - let mut stream = RlpStream::new_list(2); - stream.begin_list(2).append(&tx).append(&tx) - .begin_list(1).append(&header); + let full_req = Request::Body(req.clone()); + let res = BodyResponse { + body: { + let header = ::common_types::header::Header::default(); + let tx = UnverifiedTransaction::from( + Transaction::default().fake_sign(Default::default()), + ); + let mut stream = RlpStream::new_list(2); + stream + .begin_list(2) + .append(&tx) + .append(&tx) + .begin_list(1) + .append(&header); - ::common_types::encoded::Body::new(stream.out()) - }, - }; - let full_res = Response::Body(res.clone()); + ::common_types::encoded::Body::new(stream.out()) + }, + }; + let full_res = Response::Body(res.clone()); - check_roundtrip(req); - check_roundtrip(full_req); - check_roundtrip(res); - check_roundtrip(full_res); - } + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } - #[test] - fn account_roundtrip() { - let req = IncompleteAccountRequest { - block_hash: Field::Scalar(Default::default()), - address_hash: Field::BackReference(1, 2), - }; + #[test] + fn account_roundtrip() { + let req = IncompleteAccountRequest { + block_hash: Field::Scalar(Default::default()), + address_hash: Field::BackReference(1, 2), + }; - let full_req = Request::Account(req.clone()); - let res = AccountResponse { - proof: vec![vec![1, 2, 3], vec![4, 5, 6]], - nonce: 100.into(), - balance: 123456.into(), - code_hash: Default::default(), - storage_root: Default::default(), - }; - let full_res = Response::Account(res.clone()); + let full_req = Request::Account(req.clone()); + let res = AccountResponse { + proof: vec![vec![1, 2, 3], vec![4, 5, 6]], + nonce: 100.into(), + balance: 123456.into(), + code_hash: Default::default(), + storage_root: Default::default(), + }; + let full_res = Response::Account(res.clone()); - check_roundtrip(req); - check_roundtrip(full_req); - check_roundtrip(res); - check_roundtrip(full_res); - } + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } - #[test] - fn storage_roundtrip() { - let req = IncompleteStorageRequest { - block_hash: Field::Scalar(Default::default()), - address_hash: Field::BackReference(1, 2), - key_hash: Field::BackReference(3, 2), - }; + #[test] + fn storage_roundtrip() { + let req = IncompleteStorageRequest { + block_hash: Field::Scalar(Default::default()), + address_hash: Field::BackReference(1, 2), + key_hash: Field::BackReference(3, 2), + }; - let full_req = Request::Storage(req.clone()); - let res = StorageResponse { - proof: vec![vec![1, 2, 3], vec![4, 5, 6]], - value: H256::default(), - }; - let full_res = Response::Storage(res.clone()); + let full_req = Request::Storage(req.clone()); + let res = StorageResponse { + proof: vec![vec![1, 2, 3], vec![4, 5, 6]], + value: H256::default(), + }; + let full_res = Response::Storage(res.clone()); - check_roundtrip(req); - check_roundtrip(full_req); - check_roundtrip(res); - check_roundtrip(full_res); - } + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } - #[test] - fn code_roundtrip() { - let req = IncompleteCodeRequest { - block_hash: Field::Scalar(Default::default()), - code_hash: Field::BackReference(3, 2), - }; + #[test] + fn code_roundtrip() { + let req = IncompleteCodeRequest { + block_hash: Field::Scalar(Default::default()), + code_hash: Field::BackReference(3, 2), + }; - let full_req = Request::Code(req.clone()); - let res = CodeResponse { - code: vec![1, 2, 3, 4, 5, 6, 7, 6, 5, 4], - }; - let full_res = Response::Code(res.clone()); + let full_req = Request::Code(req.clone()); + let res = CodeResponse { + code: vec![1, 2, 3, 4, 5, 6, 7, 6, 5, 4], + }; + let full_res = Response::Code(res.clone()); - check_roundtrip(req); - check_roundtrip(full_req); - check_roundtrip(res); - check_roundtrip(full_res); - } + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } - #[test] - fn execution_roundtrip() { - use kvdb::DBValue; + #[test] + fn execution_roundtrip() { + use kvdb::DBValue; - let req = IncompleteExecutionRequest { - block_hash: Field::Scalar(Default::default()), - from: Default::default(), - action: ::common_types::transaction::Action::Create, - gas: 100_000.into(), - gas_price: 0.into(), - value: 100_000_001.into(), - data: vec![1, 2, 3, 2, 1], - }; + let req = IncompleteExecutionRequest { + block_hash: Field::Scalar(Default::default()), + from: Default::default(), + action: ::common_types::transaction::Action::Create, + gas: 100_000.into(), + gas_price: 0.into(), + value: 100_000_001.into(), + data: vec![1, 2, 3, 2, 1], + }; - let full_req = Request::Execution(req.clone()); - let res = ExecutionResponse { - items: vec![DBValue::new(), { - let mut value = DBValue::new(); - value.append_slice(&[1, 1, 1, 2, 3]); - value - }], - }; - let full_res = Response::Execution(res.clone()); + let full_req = Request::Execution(req.clone()); + let res = ExecutionResponse { + items: vec![DBValue::new(), { + let mut value = DBValue::new(); + value.append_slice(&[1, 1, 1, 2, 3]); + value + }], + }; + let full_res = Response::Execution(res.clone()); - check_roundtrip(req); - check_roundtrip(full_req); - check_roundtrip(res); - check_roundtrip(full_res); - } + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } - #[test] - fn vec_test() { - use rlp::*; + #[test] + fn vec_test() { + use rlp::*; - let reqs: Vec<_> = (0..10).map(|_| IncompleteExecutionRequest { - block_hash: Field::Scalar(Default::default()), - from: Default::default(), - action: ::common_types::transaction::Action::Create, - gas: 100_000.into(), - gas_price: 0.into(), - value: 100_000_001.into(), - data: vec![1, 2, 3, 2, 1], - }).map(Request::Execution).collect(); + let reqs: Vec<_> = (0..10) + .map(|_| IncompleteExecutionRequest { + block_hash: Field::Scalar(Default::default()), + from: Default::default(), + action: ::common_types::transaction::Action::Create, + gas: 100_000.into(), + gas_price: 0.into(), + value: 100_000_001.into(), + data: vec![1, 2, 3, 2, 1], + }) + .map(Request::Execution) + .collect(); - let mut stream = RlpStream::new_list(2); - stream.append(&100usize).append_list(&reqs); - let out = stream.out(); + let mut stream = RlpStream::new_list(2); + stream.append(&100usize).append_list(&reqs); + let out = stream.out(); - let rlp = Rlp::new(&out); - assert_eq!(rlp.val_at::(0).unwrap(), 100usize); - assert_eq!(rlp.list_at::(1).unwrap(), reqs); - } + let rlp = Rlp::new(&out); + assert_eq!(rlp.val_at::(0).unwrap(), 100usize); + assert_eq!(rlp.list_at::(1).unwrap(), reqs); + } - #[test] - fn responses_vec() { - use common_types::receipt::{Receipt, TransactionOutcome}; - let mut stream = RlpStream::new_list(2); - stream.begin_list(0).begin_list(0); + #[test] + fn responses_vec() { + use common_types::receipt::{Receipt, TransactionOutcome}; + let mut stream = RlpStream::new_list(2); + stream.begin_list(0).begin_list(0); - let body = ::common_types::encoded::Body::new(stream.out()); - let reqs = vec![ - Response::Headers(HeadersResponse { headers: vec![] }), - Response::HeaderProof(HeaderProofResponse { proof: vec![], hash: Default::default(), td: 100.into()}), - Response::Receipts(ReceiptsResponse { receipts: vec![Receipt::new(TransactionOutcome::Unknown, Default::default(), Vec::new())] }), - Response::Body(BodyResponse { body: body }), - Response::Account(AccountResponse { - proof: vec![], - nonce: 100.into(), - balance: 123.into(), - code_hash: Default::default(), - storage_root: Default::default() - }), - Response::Storage(StorageResponse { proof: vec![], value: H256::default() }), - Response::Code(CodeResponse { code: vec![1, 2, 3, 4, 5] }), - Response::Execution(ExecutionResponse { items: vec![] }), - ]; + let body = ::common_types::encoded::Body::new(stream.out()); + let reqs = vec![ + Response::Headers(HeadersResponse { headers: vec![] }), + Response::HeaderProof(HeaderProofResponse { + proof: vec![], + hash: Default::default(), + td: 100.into(), + }), + Response::Receipts(ReceiptsResponse { + receipts: vec![Receipt::new( + TransactionOutcome::Unknown, + Default::default(), + Vec::new(), + )], + }), + Response::Body(BodyResponse { body: body }), + Response::Account(AccountResponse { + proof: vec![], + nonce: 100.into(), + balance: 123.into(), + code_hash: Default::default(), + storage_root: Default::default(), + }), + Response::Storage(StorageResponse { + proof: vec![], + value: H256::default(), + }), + Response::Code(CodeResponse { + code: vec![1, 2, 3, 4, 5], + }), + Response::Execution(ExecutionResponse { items: vec![] }), + ]; - let raw = ::rlp::encode_list(&reqs); - assert_eq!(::rlp::decode_list::(&raw), reqs); - } + let raw = ::rlp::encode_list(&reqs); + assert_eq!(::rlp::decode_list::(&raw), reqs); + } - #[test] - fn epoch_signal_roundtrip() { - let req = IncompleteSignalRequest { - block_hash: Field::Scalar(Default::default()), - }; + #[test] + fn epoch_signal_roundtrip() { + let req = IncompleteSignalRequest { + block_hash: Field::Scalar(Default::default()), + }; - let full_req = Request::Signal(req.clone()); - let res = SignalResponse { - signal: vec![1, 2, 3, 4, 5, 6, 7, 6, 5, 4], - }; - let full_res = Response::Signal(res.clone()); + let full_req = Request::Signal(req.clone()); + let res = SignalResponse { + signal: vec![1, 2, 3, 4, 5, 6, 7, 6, 5, 4], + }; + let full_res = Response::Signal(res.clone()); - check_roundtrip(req); - check_roundtrip(full_req); - check_roundtrip(res); - check_roundtrip(full_res); - } + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } } diff --git a/ethcore/node-filter/src/lib.rs b/ethcore/node-filter/src/lib.rs index 816bb84a8..237ee7845 100644 --- a/ethcore/node-filter/src/lib.rs +++ b/ethcore/node-filter/src/lib.rs @@ -39,93 +39,106 @@ extern crate log; use std::sync::Weak; -use ethcore::client::{BlockChainClient, BlockId}; -use ethereum_types::{H256, Address}; -use ethabi::FunctionOutputDecoder; -use network::{ConnectionFilter, ConnectionDirection}; use devp2p::NodeId; +use ethabi::FunctionOutputDecoder; +use ethcore::client::{BlockChainClient, BlockId}; +use ethereum_types::{Address, H256}; +use network::{ConnectionDirection, ConnectionFilter}; use_contract!(peer_set, "res/peer_set.json"); /// Connection filter that uses a contract to manage permissions. pub struct NodeFilter { - client: Weak, - contract_address: Address, + client: Weak, + contract_address: Address, } impl NodeFilter { - /// Create a new instance. Accepts a contract address. - pub fn new(client: Weak, contract_address: Address) -> NodeFilter { - NodeFilter { - client, - contract_address, - } - } + /// Create a new instance. Accepts a contract address. + pub fn new(client: Weak, contract_address: Address) -> NodeFilter { + NodeFilter { + client, + contract_address, + } + } } impl ConnectionFilter for NodeFilter { - fn connection_allowed(&self, own_id: &NodeId, connecting_id: &NodeId, _direction: ConnectionDirection) -> bool { - let client = match self.client.upgrade() { - Some(client) => client, - None => return false, - }; + fn connection_allowed( + &self, + own_id: &NodeId, + connecting_id: &NodeId, + _direction: ConnectionDirection, + ) -> bool { + let client = match self.client.upgrade() { + Some(client) => client, + None => return false, + }; - let address = self.contract_address; - let own_low = H256::from_slice(&own_id[0..32]); - let own_high = H256::from_slice(&own_id[32..64]); - let id_low = H256::from_slice(&connecting_id[0..32]); - let id_high = H256::from_slice(&connecting_id[32..64]); + let address = self.contract_address; + let own_low = H256::from_slice(&own_id[0..32]); + let own_high = H256::from_slice(&own_id[32..64]); + let id_low = H256::from_slice(&connecting_id[0..32]); + let id_high = H256::from_slice(&connecting_id[32..64]); - let (data, decoder) = peer_set::functions::connection_allowed::call(own_low, own_high, id_low, id_high); - let allowed = client.call_contract(BlockId::Latest, address, data) - .and_then(|value| decoder.decode(&value).map_err(|e| e.to_string())) - .unwrap_or_else(|e| { - debug!("Error callling peer set contract: {:?}", e); - false - }); + let (data, decoder) = + peer_set::functions::connection_allowed::call(own_low, own_high, id_low, id_high); + let allowed = client + .call_contract(BlockId::Latest, address, data) + .and_then(|value| decoder.decode(&value).map_err(|e| e.to_string())) + .unwrap_or_else(|e| { + debug!("Error callling peer set contract: {:?}", e); + false + }); - allowed - } + allowed + } } #[cfg(test)] mod test { - use std::sync::{Arc, Weak}; - use ethcore::spec::Spec; - use ethcore::client::{BlockChainClient, Client, ClientConfig}; - use ethcore::miner::Miner; - use ethcore::test_helpers; - use network::{ConnectionDirection, ConnectionFilter, NodeId}; - use io::IoChannel; - use super::NodeFilter; - use tempdir::TempDir; + use super::NodeFilter; + use ethcore::{ + client::{BlockChainClient, Client, ClientConfig}, + miner::Miner, + spec::Spec, + test_helpers, + }; + use io::IoChannel; + use network::{ConnectionDirection, ConnectionFilter, NodeId}; + use std::sync::{Arc, Weak}; + use tempdir::TempDir; - /// Contract code: https://gist.github.com/arkpar/467dbcc73cbb85b0997a7a10ffa0695f - #[test] - fn node_filter() { - let contract_addr = "0000000000000000000000000000000000000005".into(); - let data = include_bytes!("../res/node_filter.json"); - let tempdir = TempDir::new("").unwrap(); - let spec = Spec::load(&tempdir.path(), &data[..]).unwrap(); - let client_db = test_helpers::new_db(); + /// Contract code: https://gist.github.com/arkpar/467dbcc73cbb85b0997a7a10ffa0695f + #[test] + fn node_filter() { + let contract_addr = "0000000000000000000000000000000000000005".into(); + let data = include_bytes!("../res/node_filter.json"); + let tempdir = TempDir::new("").unwrap(); + let spec = Spec::load(&tempdir.path(), &data[..]).unwrap(); + let client_db = test_helpers::new_db(); - let client = Client::new( - ClientConfig::default(), - &spec, - client_db, - Arc::new(Miner::new_for_tests(&spec, None)), - IoChannel::disconnected(), - ).unwrap(); - let filter = NodeFilter::new(Arc::downgrade(&client) as Weak, contract_addr); - let self1: NodeId = "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002".into(); - let self2: NodeId = "00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003".into(); - let node1: NodeId = "00000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012".into(); - let node2: NodeId = "00000000000000000000000000000000000000000000000000000000000000210000000000000000000000000000000000000000000000000000000000000022".into(); - let nodex: NodeId = "77000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(); + let client = Client::new( + ClientConfig::default(), + &spec, + client_db, + Arc::new(Miner::new_for_tests(&spec, None)), + IoChannel::disconnected(), + ) + .unwrap(); + let filter = NodeFilter::new( + Arc::downgrade(&client) as Weak, + contract_addr, + ); + let self1: NodeId = "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002".into(); + let self2: NodeId = "00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003".into(); + let node1: NodeId = "00000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012".into(); + let node2: NodeId = "00000000000000000000000000000000000000000000000000000000000000210000000000000000000000000000000000000000000000000000000000000022".into(); + let nodex: NodeId = "77000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(); - assert!(filter.connection_allowed(&self1, &node1, ConnectionDirection::Inbound)); - assert!(filter.connection_allowed(&self1, &nodex, ConnectionDirection::Inbound)); - assert!(filter.connection_allowed(&self2, &node1, ConnectionDirection::Inbound)); - assert!(filter.connection_allowed(&self2, &node2, ConnectionDirection::Inbound)); - } + assert!(filter.connection_allowed(&self1, &node1, ConnectionDirection::Inbound)); + assert!(filter.connection_allowed(&self1, &nodex, ConnectionDirection::Inbound)); + assert!(filter.connection_allowed(&self2, &node1, ConnectionDirection::Inbound)); + assert!(filter.connection_allowed(&self2, &node2, ConnectionDirection::Inbound)); + } } diff --git a/ethcore/private-tx/src/encryptor.rs b/ethcore/private-tx/src/encryptor.rs index 6a24cf930..a95692747 100644 --- a/ethcore/private-tx/src/encryptor.rs +++ b/ethcore/private-tx/src/encryptor.rs @@ -16,25 +16,25 @@ //! Encryption providers. -use std::io::Read; -use std::str::FromStr; -use std::sync::Arc; -use std::iter::repeat; -use std::time::{Instant, Duration}; -use std::collections::HashMap; -use std::collections::hash_map::Entry; -use parking_lot::Mutex; -use ethereum_types::{H128, H256, Address}; -use ethjson; -use ethkey::{Signature, Public}; -use crypto; -use futures::Future; -use fetch::{Fetch, Client as FetchClient, Method, BodyReader, Request}; +use super::{key_server_keys::address_to_key, Signer}; use bytes::{Bytes, ToPretty}; +use crypto; use error::Error; +use ethereum_types::{Address, H128, H256}; +use ethjson; +use ethkey::{Public, Signature}; +use fetch::{BodyReader, Client as FetchClient, Fetch, Method, Request}; +use futures::Future; +use parking_lot::Mutex; +use std::{ + collections::{hash_map::Entry, HashMap}, + io::Read, + iter::repeat, + str::FromStr, + sync::Arc, + time::{Duration, Instant}, +}; use url::Url; -use super::Signer; -use super::key_server_keys::address_to_key; /// Initialization vector length. const INIT_VEC_LEN: usize = 16; @@ -44,206 +44,228 @@ const ENCRYPTION_SESSION_DURATION: u64 = 30 * 1000; /// Trait for encryption/decryption operations. pub trait Encryptor: Send + Sync + 'static { - /// Generate unique contract key && encrypt passed data. Encryption can only be performed once. - fn encrypt( - &self, - contract_address: &Address, - initialisation_vector: &H128, - plain_data: &[u8], - ) -> Result; + /// Generate unique contract key && encrypt passed data. Encryption can only be performed once. + fn encrypt( + &self, + contract_address: &Address, + initialisation_vector: &H128, + plain_data: &[u8], + ) -> Result; - /// Decrypt data using previously generated contract key. - fn decrypt( - &self, - contract_address: &Address, - cypher: &[u8], - ) -> Result; + /// Decrypt data using previously generated contract key. + fn decrypt(&self, contract_address: &Address, cypher: &[u8]) -> Result; } /// Configuration for key server encryptor #[derive(Default, PartialEq, Debug, Clone)] pub struct EncryptorConfig { - /// URL to key server - pub base_url: Option, - /// Key server's threshold - pub threshold: u32, - /// Account used for signing requests to key server - pub key_server_account: Option
, + /// URL to key server + pub base_url: Option, + /// Key server's threshold + pub threshold: u32, + /// Account used for signing requests to key server + pub key_server_account: Option
, } struct EncryptionSession { - key: Bytes, - end_time: Instant, + key: Bytes, + end_time: Instant, } /// SecretStore-based encryption/decryption operations. pub struct SecretStoreEncryptor { - config: EncryptorConfig, - client: FetchClient, - sessions: Mutex>, - signer: Arc, + config: EncryptorConfig, + client: FetchClient, + sessions: Mutex>, + signer: Arc, } impl SecretStoreEncryptor { - /// Create new encryptor - pub fn new( - config: EncryptorConfig, - client: FetchClient, - signer: Arc, - ) -> Result { - Ok(SecretStoreEncryptor { - config, - client, - signer, - sessions: Mutex::default(), - }) - } + /// Create new encryptor + pub fn new( + config: EncryptorConfig, + client: FetchClient, + signer: Arc, + ) -> Result { + Ok(SecretStoreEncryptor { + config, + client, + signer, + sessions: Mutex::default(), + }) + } - /// Ask secret store for key && decrypt the key. - fn retrieve_key( - &self, - url_suffix: &str, - use_post: bool, - contract_address: &Address, - ) -> Result { - // check if the key was already cached - if let Some(key) = self.obtained_key(contract_address) { - return Ok(key); - } - let contract_address_signature = self.sign_contract_address(contract_address)?; - let requester = self.config.key_server_account.ok_or_else(|| Error::KeyServerAccountNotSet)?; + /// Ask secret store for key && decrypt the key. + fn retrieve_key( + &self, + url_suffix: &str, + use_post: bool, + contract_address: &Address, + ) -> Result { + // check if the key was already cached + if let Some(key) = self.obtained_key(contract_address) { + return Ok(key); + } + let contract_address_signature = self.sign_contract_address(contract_address)?; + let requester = self + .config + .key_server_account + .ok_or_else(|| Error::KeyServerAccountNotSet)?; - // key id in SS is H256 && we have H160 here => expand with assitional zeros - let contract_address_extended: H256 = contract_address.into(); - let base_url = self.config.base_url.clone().ok_or_else(|| Error::KeyServerNotSet)?; + // key id in SS is H256 && we have H160 here => expand with assitional zeros + let contract_address_extended: H256 = contract_address.into(); + let base_url = self + .config + .base_url + .clone() + .ok_or_else(|| Error::KeyServerNotSet)?; - // prepare request url - let url = format!("{}/{}/{}{}", - base_url, - contract_address_extended.to_hex(), - contract_address_signature, - url_suffix, - ); + // prepare request url + let url = format!( + "{}/{}/{}{}", + base_url, + contract_address_extended.to_hex(), + contract_address_signature, + url_suffix, + ); - // send HTTP request - let method = if use_post { - Method::POST - } else { - Method::GET - }; + // send HTTP request + let method = if use_post { Method::POST } else { Method::GET }; - let url = Url::from_str(&url).map_err(|e| Error::Encrypt(e.to_string()))?; - let response = self.client.fetch(Request::new(url, method), Default::default()).wait() - .map_err(|e| Error::Encrypt(e.to_string()))?; + let url = Url::from_str(&url).map_err(|e| Error::Encrypt(e.to_string()))?; + let response = self + .client + .fetch(Request::new(url, method), Default::default()) + .wait() + .map_err(|e| Error::Encrypt(e.to_string()))?; - if response.is_not_found() { - return Err(Error::EncryptionKeyNotFound(*contract_address)); - } + if response.is_not_found() { + return Err(Error::EncryptionKeyNotFound(*contract_address)); + } - if !response.is_success() { - return Err(Error::Encrypt(response.status().canonical_reason().unwrap_or("unknown").into())); - } + if !response.is_success() { + return Err(Error::Encrypt( + response + .status() + .canonical_reason() + .unwrap_or("unknown") + .into(), + )); + } - // read HTTP response - let mut result = String::new(); - BodyReader::new(response).read_to_string(&mut result)?; + // read HTTP response + let mut result = String::new(); + BodyReader::new(response).read_to_string(&mut result)?; - // response is JSON string (which is, in turn, hex-encoded, encrypted Public) - let encrypted_bytes: ethjson::bytes::Bytes = result.trim_matches('\"').parse().map_err(|e| Error::Encrypt(e))?; + // response is JSON string (which is, in turn, hex-encoded, encrypted Public) + let encrypted_bytes: ethjson::bytes::Bytes = result + .trim_matches('\"') + .parse() + .map_err(|e| Error::Encrypt(e))?; - // decrypt Public - let decrypted_bytes = self.signer.decrypt(requester, &crypto::DEFAULT_MAC, &encrypted_bytes)?; - let decrypted_key = Public::from_slice(&decrypted_bytes); + // decrypt Public + let decrypted_bytes = + self.signer + .decrypt(requester, &crypto::DEFAULT_MAC, &encrypted_bytes)?; + let decrypted_key = Public::from_slice(&decrypted_bytes); - // and now take x coordinate of Public as a key - let key: Bytes = (*decrypted_key)[..INIT_VEC_LEN].into(); + // and now take x coordinate of Public as a key + let key: Bytes = (*decrypted_key)[..INIT_VEC_LEN].into(); - // cache the key in the session and clear expired sessions - self.sessions.lock().insert(*contract_address, EncryptionSession{ - key: key.clone(), - end_time: Instant::now() + Duration::from_millis(ENCRYPTION_SESSION_DURATION), - }); - self.clean_expired_sessions(); - Ok(key) - } + // cache the key in the session and clear expired sessions + self.sessions.lock().insert( + *contract_address, + EncryptionSession { + key: key.clone(), + end_time: Instant::now() + Duration::from_millis(ENCRYPTION_SESSION_DURATION), + }, + ); + self.clean_expired_sessions(); + Ok(key) + } - fn clean_expired_sessions(&self) { - let mut sessions = self.sessions.lock(); - sessions.retain(|_, session| session.end_time < Instant::now()); - } + fn clean_expired_sessions(&self) { + let mut sessions = self.sessions.lock(); + sessions.retain(|_, session| session.end_time < Instant::now()); + } - fn obtained_key(&self, contract_address: &Address) -> Option { - let mut sessions = self.sessions.lock(); - let stored_session = sessions.entry(*contract_address); - match stored_session { - Entry::Occupied(session) => { - if Instant::now() > session.get().end_time { - session.remove_entry(); - None - } else { - Some(session.get().key.clone()) - } - } - Entry::Vacant(_) => None, - } - } + fn obtained_key(&self, contract_address: &Address) -> Option { + let mut sessions = self.sessions.lock(); + let stored_session = sessions.entry(*contract_address); + match stored_session { + Entry::Occupied(session) => { + if Instant::now() > session.get().end_time { + session.remove_entry(); + None + } else { + Some(session.get().key.clone()) + } + } + Entry::Vacant(_) => None, + } + } - fn sign_contract_address(&self, contract_address: &Address) -> Result { - let key_server_account = self.config.key_server_account.ok_or_else(|| Error::KeyServerAccountNotSet)?; - Ok(self.signer.sign(key_server_account, address_to_key(contract_address))?) - } + fn sign_contract_address(&self, contract_address: &Address) -> Result { + let key_server_account = self + .config + .key_server_account + .ok_or_else(|| Error::KeyServerAccountNotSet)?; + Ok(self + .signer + .sign(key_server_account, address_to_key(contract_address))?) + } } impl Encryptor for SecretStoreEncryptor { - fn encrypt( - &self, - contract_address: &Address, - initialisation_vector: &H128, - plain_data: &[u8], - ) -> Result { - // retrieve the key, try to generate it if it doesn't exist yet - let key = match self.retrieve_key("", false, contract_address) { - Ok(key) => Ok(key), - Err(Error::EncryptionKeyNotFound(_)) => { - trace!(target: "privatetx", "Key for account wasnt found in sstore. Creating. Address: {:?}", contract_address); - self.retrieve_key(&format!("/{}", self.config.threshold), true, contract_address) - } - Err(err) => Err(err), - }?; + fn encrypt( + &self, + contract_address: &Address, + initialisation_vector: &H128, + plain_data: &[u8], + ) -> Result { + // retrieve the key, try to generate it if it doesn't exist yet + let key = match self.retrieve_key("", false, contract_address) { + Ok(key) => Ok(key), + Err(Error::EncryptionKeyNotFound(_)) => { + trace!(target: "privatetx", "Key for account wasnt found in sstore. Creating. Address: {:?}", contract_address); + self.retrieve_key( + &format!("/{}", self.config.threshold), + true, + contract_address, + ) + } + Err(err) => Err(err), + }?; - // encrypt data - let mut cypher = Vec::with_capacity(plain_data.len() + initialisation_vector.len()); - cypher.extend(repeat(0).take(plain_data.len())); - crypto::aes::encrypt_128_ctr(&key, initialisation_vector, plain_data, &mut cypher) - .map_err(|e| Error::Encrypt(e.to_string()))?; - cypher.extend_from_slice(&initialisation_vector); + // encrypt data + let mut cypher = Vec::with_capacity(plain_data.len() + initialisation_vector.len()); + cypher.extend(repeat(0).take(plain_data.len())); + crypto::aes::encrypt_128_ctr(&key, initialisation_vector, plain_data, &mut cypher) + .map_err(|e| Error::Encrypt(e.to_string()))?; + cypher.extend_from_slice(&initialisation_vector); - Ok(cypher) - } + Ok(cypher) + } - /// Decrypt data using previously generated contract key. - fn decrypt( - &self, - contract_address: &Address, - cypher: &[u8], - ) -> Result { - // initialization vector takes INIT_VEC_LEN bytes - let cypher_len = cypher.len(); - if cypher_len < INIT_VEC_LEN { - return Err(Error::Decrypt("Invalid cypher".into())); - } + /// Decrypt data using previously generated contract key. + fn decrypt(&self, contract_address: &Address, cypher: &[u8]) -> Result { + // initialization vector takes INIT_VEC_LEN bytes + let cypher_len = cypher.len(); + if cypher_len < INIT_VEC_LEN { + return Err(Error::Decrypt("Invalid cypher".into())); + } - // retrieve existing key - let key = self.retrieve_key("", false, contract_address)?; + // retrieve existing key + let key = self.retrieve_key("", false, contract_address)?; - // use symmetric decryption to decrypt document - let (cypher, iv) = cypher.split_at(cypher_len - INIT_VEC_LEN); - let mut plain_data = Vec::with_capacity(cypher_len - INIT_VEC_LEN); - plain_data.extend(repeat(0).take(cypher_len - INIT_VEC_LEN)); - crypto::aes::decrypt_128_ctr(&key, &iv, cypher, &mut plain_data) - .map_err(|e| Error::Decrypt(e.to_string()))?; - Ok(plain_data) - } + // use symmetric decryption to decrypt document + let (cypher, iv) = cypher.split_at(cypher_len - INIT_VEC_LEN); + let mut plain_data = Vec::with_capacity(cypher_len - INIT_VEC_LEN); + plain_data.extend(repeat(0).take(cypher_len - INIT_VEC_LEN)); + crypto::aes::decrypt_128_ctr(&key, &iv, cypher, &mut plain_data) + .map_err(|e| Error::Decrypt(e.to_string()))?; + Ok(plain_data) + } } /// Dummy encryptor. @@ -251,20 +273,16 @@ impl Encryptor for SecretStoreEncryptor { pub struct NoopEncryptor; impl Encryptor for NoopEncryptor { - fn encrypt( - &self, - _contract_address: &Address, - _initialisation_vector: &H128, - data: &[u8], - ) -> Result { - Ok(data.to_vec()) - } + fn encrypt( + &self, + _contract_address: &Address, + _initialisation_vector: &H128, + data: &[u8], + ) -> Result { + Ok(data.to_vec()) + } - fn decrypt( - &self, - _contract_address: &Address, - data: &[u8], - ) -> Result { - Ok(data.to_vec()) - } + fn decrypt(&self, _contract_address: &Address, data: &[u8]) -> Result { + Ok(data.to_vec()) + } } diff --git a/ethcore/private-tx/src/error.rs b/ethcore/private-tx/src/error.rs index eda08b2a5..347f0ced3 100644 --- a/ethcore/private-tx/src/error.rs +++ b/ethcore/private-tx/src/error.rs @@ -14,187 +14,189 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::error; use derive_more::Display; -use ethereum_types::Address; -use rlp::DecoderError; -use ethtrie::TrieError; use ethcore::error::{Error as EthcoreError, ExecutionError}; -use types::transaction::Error as TransactionError; -use ethkey::Error as KeyError; -use ethkey::crypto::Error as CryptoError; -use txpool::VerifiedTransaction; +use ethereum_types::Address; +use ethkey::{crypto::Error as CryptoError, Error as KeyError}; +use ethtrie::TrieError; use private_transactions::VerifiedPrivateTransaction; +use rlp::DecoderError; +use std::error; +use txpool::VerifiedTransaction; +use types::transaction::Error as TransactionError; type TxPoolError = txpool::Error<::Hash>; #[derive(Debug, Display)] pub enum Error { - /// Error concerning the Rust standard library's IO subsystem. - #[display(fmt = "Io Error: {}", _0)] - Io(::std::io::Error), - /// RLP decoding error. - #[display(fmt = "Decoder Error: {}", _0)] - Decoder(DecoderError), - /// Error concerning TrieDBs. - #[display(fmt = "Trie Error: {}", _0)] - Trie(TrieError), - /// Transaction pool error. - #[display(fmt = "Transaction Pool Error: {}", _0)] - TxPool(TxPoolError), - /// Crypto error. - #[display(fmt = "Crypto Error {}", _0)] - Crypto(CryptoError), - /// Encryption error. - #[display(fmt = "Encryption error. ({})", _0)] - Encrypt(String), - /// Decryption error. - #[display(fmt = "Decryption error. ({})", _0)] - Decrypt(String), - /// Address not authorized. - #[display(fmt = "Private transaction execution is not authorised for {}", _0)] - NotAuthorised(Address), - /// Transaction creates more than one contract. - #[display(fmt = "Private transaction created too many contracts")] - TooManyContracts, - /// Contract call error. - #[display(fmt = "Contract call error. ({})", _0)] - Call(String), - /// State is not available. - #[display(fmt = "State is not available")] - StatePruned, - /// State is incorrect. - #[display(fmt = "State is incorrect")] - StateIncorrect, - /// Wrong private transaction type. - #[display(fmt = "Wrong private transaction type")] - BadTransactionType, - /// Contract does not exist or was not created. - #[display(fmt = "Contract does not exist or was not created")] - ContractDoesNotExist, - /// Reference to the client is corrupted. - #[display(fmt = "Reference to the client is corrupted")] - ClientIsMalformed, - /// Queue of private transactions for verification is full. - #[display(fmt = "Queue of private transactions for verification is full")] - QueueIsFull, - /// The transaction already exists in queue of private transactions. - #[display(fmt = "The transaction already exists in queue of private transactions.")] - PrivateTransactionAlreadyImported, - /// The information about private transaction is not found in the store. - #[display(fmt = "The information about private transaction is not found in the store.")] - PrivateTransactionNotFound, - /// Account for signing public transactions not set. - #[display(fmt = "Account for signing public transactions not set.")] - SignerAccountNotSet, - /// Account for validating private transactions not set. - #[display(fmt = "Account for validating private transactions not set.")] - ValidatorAccountNotSet, - /// Account for signing requests to key server not set. - #[display(fmt = "Account for signing requests to key server not set.")] - KeyServerAccountNotSet, - /// Encryption key is not found on key server. - #[display(fmt = "Encryption key is not found on key server for {}", _0)] - EncryptionKeyNotFound(Address), - /// Key server URL is not set. - #[display(fmt = "Key server URL is not set.")] - KeyServerNotSet, - /// VM execution error. - #[display(fmt = "VM execution error {}", _0)] - Execution(ExecutionError), - /// General signing error. - #[display(fmt = "General signing error {}", _0)] - Key(KeyError), - /// Error of transactions processing. - #[display(fmt = "Error of transactions processing {}", _0)] - Transaction(TransactionError), - /// General ethcore error. - #[display(fmt = "General ethcore error {}", _0)] - Ethcore(EthcoreError), - /// A convenient variant for String. - #[display(fmt = "{}", _0)] - Msg(String), + /// Error concerning the Rust standard library's IO subsystem. + #[display(fmt = "Io Error: {}", _0)] + Io(::std::io::Error), + /// RLP decoding error. + #[display(fmt = "Decoder Error: {}", _0)] + Decoder(DecoderError), + /// Error concerning TrieDBs. + #[display(fmt = "Trie Error: {}", _0)] + Trie(TrieError), + /// Transaction pool error. + #[display(fmt = "Transaction Pool Error: {}", _0)] + TxPool(TxPoolError), + /// Crypto error. + #[display(fmt = "Crypto Error {}", _0)] + Crypto(CryptoError), + /// Encryption error. + #[display(fmt = "Encryption error. ({})", _0)] + Encrypt(String), + /// Decryption error. + #[display(fmt = "Decryption error. ({})", _0)] + Decrypt(String), + /// Address not authorized. + #[display(fmt = "Private transaction execution is not authorised for {}", _0)] + NotAuthorised(Address), + /// Transaction creates more than one contract. + #[display(fmt = "Private transaction created too many contracts")] + TooManyContracts, + /// Contract call error. + #[display(fmt = "Contract call error. ({})", _0)] + Call(String), + /// State is not available. + #[display(fmt = "State is not available")] + StatePruned, + /// State is incorrect. + #[display(fmt = "State is incorrect")] + StateIncorrect, + /// Wrong private transaction type. + #[display(fmt = "Wrong private transaction type")] + BadTransactionType, + /// Contract does not exist or was not created. + #[display(fmt = "Contract does not exist or was not created")] + ContractDoesNotExist, + /// Reference to the client is corrupted. + #[display(fmt = "Reference to the client is corrupted")] + ClientIsMalformed, + /// Queue of private transactions for verification is full. + #[display(fmt = "Queue of private transactions for verification is full")] + QueueIsFull, + /// The transaction already exists in queue of private transactions. + #[display(fmt = "The transaction already exists in queue of private transactions.")] + PrivateTransactionAlreadyImported, + /// The information about private transaction is not found in the store. + #[display(fmt = "The information about private transaction is not found in the store.")] + PrivateTransactionNotFound, + /// Account for signing public transactions not set. + #[display(fmt = "Account for signing public transactions not set.")] + SignerAccountNotSet, + /// Account for validating private transactions not set. + #[display(fmt = "Account for validating private transactions not set.")] + ValidatorAccountNotSet, + /// Account for signing requests to key server not set. + #[display(fmt = "Account for signing requests to key server not set.")] + KeyServerAccountNotSet, + /// Encryption key is not found on key server. + #[display(fmt = "Encryption key is not found on key server for {}", _0)] + EncryptionKeyNotFound(Address), + /// Key server URL is not set. + #[display(fmt = "Key server URL is not set.")] + KeyServerNotSet, + /// VM execution error. + #[display(fmt = "VM execution error {}", _0)] + Execution(ExecutionError), + /// General signing error. + #[display(fmt = "General signing error {}", _0)] + Key(KeyError), + /// Error of transactions processing. + #[display(fmt = "Error of transactions processing {}", _0)] + Transaction(TransactionError), + /// General ethcore error. + #[display(fmt = "General ethcore error {}", _0)] + Ethcore(EthcoreError), + /// A convenient variant for String. + #[display(fmt = "{}", _0)] + Msg(String), } impl error::Error for Error { - fn source(&self) -> Option<&(error::Error + 'static)> { - match self { - Error::Io(e) => Some(e), - Error::Decoder(e) => Some(e), - Error::Trie(e) => Some(e), - Error::TxPool(e) => Some(e), - Error::Crypto(e) => Some(e), - Error::Execution(e) => Some(e), - Error::Key(e) => Some(e), - Error::Transaction(e) => Some(e), - Error::Ethcore(e) => Some(e), - _ => None, - } - } + fn source(&self) -> Option<&(error::Error + 'static)> { + match self { + Error::Io(e) => Some(e), + Error::Decoder(e) => Some(e), + Error::Trie(e) => Some(e), + Error::TxPool(e) => Some(e), + Error::Crypto(e) => Some(e), + Error::Execution(e) => Some(e), + Error::Key(e) => Some(e), + Error::Transaction(e) => Some(e), + Error::Ethcore(e) => Some(e), + _ => None, + } + } } impl From for Error { - fn from(s: String) -> Self { - Error::Msg(s) - } + fn from(s: String) -> Self { + Error::Msg(s) + } } impl From for Error { - fn from(err: std::io::Error) -> Self { - Error::Io(err).into() - } + fn from(err: std::io::Error) -> Self { + Error::Io(err).into() + } } impl From for Error { - fn from(err: KeyError) -> Self { - Error::Key(err).into() - } + fn from(err: KeyError) -> Self { + Error::Key(err).into() + } } impl From for Error { - fn from(err: CryptoError) -> Self { - Error::Crypto(err).into() - } + fn from(err: CryptoError) -> Self { + Error::Crypto(err).into() + } } impl From for Error { - fn from(err: DecoderError) -> Self { - Error::Decoder(err).into() - } + fn from(err: DecoderError) -> Self { + Error::Decoder(err).into() + } } impl From for Error { - fn from(err: ExecutionError) -> Self { - Error::Execution(err).into() - } + fn from(err: ExecutionError) -> Self { + Error::Execution(err).into() + } } impl From for Error { - fn from(err: TransactionError) -> Self { - Error::Transaction(err).into() - } + fn from(err: TransactionError) -> Self { + Error::Transaction(err).into() + } } impl From for Error { - fn from(err: TrieError) -> Self { - Error::Trie(err).into() - } + fn from(err: TrieError) -> Self { + Error::Trie(err).into() + } } impl From for Error { - fn from(err: TxPoolError) -> Self { - Error::TxPool(err).into() - } + fn from(err: TxPoolError) -> Self { + Error::TxPool(err).into() + } } impl From for Error { - fn from(err: EthcoreError) -> Self { - Error::Ethcore(err).into() - } + fn from(err: EthcoreError) -> Self { + Error::Ethcore(err).into() + } } -impl From> for Error where Error: From { - fn from(err: Box) -> Error { - Error::from(*err) - } +impl From> for Error +where + Error: From, +{ + fn from(err: Box) -> Error { + Error::from(*err) + } } diff --git a/ethcore/private-tx/src/key_server_keys.rs b/ethcore/private-tx/src/key_server_keys.rs index 28d9b3cb9..cab4d6adc 100644 --- a/ethcore/private-tx/src/key_server_keys.rs +++ b/ethcore/private-tx/src/key_server_keys.rs @@ -16,12 +16,12 @@ //! Wrapper around key server responsible for access keys processing. -use std::sync::Arc; -use parking_lot::RwLock; -use ethereum_types::{H256, Address}; use call_contract::{CallContract, RegistryInfo}; -use ethcore::client::BlockId; use ethabi::FunctionOutputDecoder; +use ethcore::client::BlockId; +use ethereum_types::{Address, H256}; +use parking_lot::RwLock; +use std::sync::Arc; const ACL_CHECKER_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_acl_checker"; @@ -29,145 +29,167 @@ use_contract!(keys_acl_contract, "res/keys_acl.json"); /// Returns the address (of the contract), that corresponds to the key pub fn key_to_address(key: &H256) -> Address { - Address::from_slice(&key.to_vec()[..10]) + Address::from_slice(&key.to_vec()[..10]) } /// Returns the key from the key server associated with the contract pub fn address_to_key(contract_address: &Address) -> H256 { - // Current solution uses contract address extended with 0 as id - let contract_address_extended: H256 = contract_address.into(); + // Current solution uses contract address extended with 0 as id + let contract_address_extended: H256 = contract_address.into(); - H256::from_slice(&contract_address_extended) + H256::from_slice(&contract_address_extended) } /// Trait for keys server keys provider. pub trait KeyProvider: Send + Sync + 'static { - /// Account, that is used for communication with key server - fn key_server_account(&self) -> Option
; + /// Account, that is used for communication with key server + fn key_server_account(&self) -> Option
; - /// List of keys available for the account - fn available_keys(&self, block: BlockId, account: &Address) -> Option>; + /// List of keys available for the account + fn available_keys(&self, block: BlockId, account: &Address) -> Option>; - /// Update permissioning contract - fn update_acl_contract(&self); + /// Update permissioning contract + fn update_acl_contract(&self); } /// Secret Store keys provider -pub struct SecretStoreKeys where C: CallContract + RegistryInfo + Send + Sync + 'static { - client: Arc, - key_server_account: Option
, - keys_acl_contract: RwLock>, +pub struct SecretStoreKeys +where + C: CallContract + RegistryInfo + Send + Sync + 'static, +{ + client: Arc, + key_server_account: Option
, + keys_acl_contract: RwLock>, } -impl SecretStoreKeys where C: CallContract + RegistryInfo + Send + Sync + 'static { - /// Create provider - pub fn new(client: Arc, key_server_account: Option
) -> Self { - SecretStoreKeys { - client, - key_server_account, - keys_acl_contract: RwLock::new(None), - } - } +impl SecretStoreKeys +where + C: CallContract + RegistryInfo + Send + Sync + 'static, +{ + /// Create provider + pub fn new(client: Arc, key_server_account: Option
) -> Self { + SecretStoreKeys { + client, + key_server_account, + keys_acl_contract: RwLock::new(None), + } + } } -impl KeyProvider for SecretStoreKeys where C: CallContract + RegistryInfo + Send + Sync + 'static { - fn key_server_account(&self) -> Option
{ - self.key_server_account - } +impl KeyProvider for SecretStoreKeys +where + C: CallContract + RegistryInfo + Send + Sync + 'static, +{ + fn key_server_account(&self) -> Option
{ + self.key_server_account + } - fn available_keys(&self, block: BlockId, account: &Address) -> Option> { - match *self.keys_acl_contract.read() { - Some(acl_contract_address) => { - let (data, decoder) = keys_acl_contract::functions::available_keys::call(*account); - if let Ok(value) = self.client.call_contract(block, acl_contract_address, data) { - decoder.decode(&value).ok().map(|key_values| { - key_values.iter().map(key_to_address).collect() - }) - } else { - None - } - } - None => None, - } - } + fn available_keys(&self, block: BlockId, account: &Address) -> Option> { + match *self.keys_acl_contract.read() { + Some(acl_contract_address) => { + let (data, decoder) = keys_acl_contract::functions::available_keys::call(*account); + if let Ok(value) = self.client.call_contract(block, acl_contract_address, data) { + decoder + .decode(&value) + .ok() + .map(|key_values| key_values.iter().map(key_to_address).collect()) + } else { + None + } + } + None => None, + } + } - fn update_acl_contract(&self) { - let contract_address = self.client.registry_address(ACL_CHECKER_CONTRACT_REGISTRY_NAME.into(), BlockId::Latest); - if *self.keys_acl_contract.read() != contract_address { - trace!(target: "privatetx", "Configuring for ACL checker contract from address {:?}", + fn update_acl_contract(&self) { + let contract_address = self + .client + .registry_address(ACL_CHECKER_CONTRACT_REGISTRY_NAME.into(), BlockId::Latest); + if *self.keys_acl_contract.read() != contract_address { + trace!(target: "privatetx", "Configuring for ACL checker contract from address {:?}", contract_address); - *self.keys_acl_contract.write() = contract_address; - } - } + *self.keys_acl_contract.write() = contract_address; + } + } } /// Dummy keys provider. pub struct StoringKeyProvider { - available_keys: RwLock>>, - key_server_account: Option
, + available_keys: RwLock>>, + key_server_account: Option
, } impl StoringKeyProvider { - /// Store available keys - pub fn set_available_keys(&self, keys: &Vec
) { - *self.available_keys.write() = Some(keys.clone()) - } + /// Store available keys + pub fn set_available_keys(&self, keys: &Vec
) { + *self.available_keys.write() = Some(keys.clone()) + } } impl Default for StoringKeyProvider { - fn default() -> Self { - StoringKeyProvider { - available_keys: RwLock::new(None), - key_server_account: Some(Address::default()), - } - } + fn default() -> Self { + StoringKeyProvider { + available_keys: RwLock::new(None), + key_server_account: Some(Address::default()), + } + } } impl KeyProvider for StoringKeyProvider { - fn key_server_account(&self) -> Option
{ - self.key_server_account - } + fn key_server_account(&self) -> Option
{ + self.key_server_account + } - fn available_keys(&self, _block: BlockId, _account: &Address) -> Option> { - self.available_keys.read().clone() - } + fn available_keys(&self, _block: BlockId, _account: &Address) -> Option> { + self.available_keys.read().clone() + } - fn update_acl_contract(&self) {} + fn update_acl_contract(&self) {} } #[cfg(test)] mod tests { - use std::sync::Arc; - use ethkey::{Secret, KeyPair}; - use bytes::Bytes; - use super::*; + use super::*; + use bytes::Bytes; + use ethkey::{KeyPair, Secret}; + use std::sync::Arc; - struct DummyRegistryClient { - registry_address: Option
, - } + struct DummyRegistryClient { + registry_address: Option
, + } - impl DummyRegistryClient { - pub fn new(registry_address: Option
) -> Self { - DummyRegistryClient { - registry_address - } - } - } + impl DummyRegistryClient { + pub fn new(registry_address: Option
) -> Self { + DummyRegistryClient { registry_address } + } + } - impl RegistryInfo for DummyRegistryClient { - fn registry_address(&self, _name: String, _block: BlockId) -> Option
{ self.registry_address } - } + impl RegistryInfo for DummyRegistryClient { + fn registry_address(&self, _name: String, _block: BlockId) -> Option
{ + self.registry_address + } + } - impl CallContract for DummyRegistryClient { - fn call_contract(&self, _id: BlockId, _address: Address, _data: Bytes) -> Result { Ok(vec![]) } - } + impl CallContract for DummyRegistryClient { + fn call_contract( + &self, + _id: BlockId, + _address: Address, + _data: Bytes, + ) -> Result { + Ok(vec![]) + } + } - #[test] - fn should_update_acl_contract() { - let key = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000011")).unwrap(); - let client = DummyRegistryClient::new(Some(key.address())); - let keys_data = SecretStoreKeys::new(Arc::new(client), None); - keys_data.update_acl_contract(); - assert_eq!(keys_data.keys_acl_contract.read().unwrap(), key.address()); - } -} \ No newline at end of file + #[test] + fn should_update_acl_contract() { + let key = KeyPair::from_secret(Secret::from( + "0000000000000000000000000000000000000000000000000000000000000011", + )) + .unwrap(); + let client = DummyRegistryClient::new(Some(key.address())); + let keys_data = SecretStoreKeys::new(Arc::new(client), None); + keys_data.update_acl_contract(); + assert_eq!(keys_data.keys_acl_contract.read().unwrap(), key.address()); + } +} diff --git a/ethcore/private-tx/src/lib.rs b/ethcore/private-tx/src/lib.rs index 6ff35a4be..a48ae87de 100644 --- a/ethcore/private-tx/src/lib.rs +++ b/ethcore/private-tx/src/lib.rs @@ -18,13 +18,13 @@ // Recursion limit required because of // error_chain foreign_links. -#![recursion_limit="256"] +#![recursion_limit = "256"] mod encryptor; -mod key_server_keys; -mod private_transactions; -mod messages; mod error; +mod key_server_keys; +mod messages; +mod private_transactions; extern crate common_types as types; extern crate ethabi; @@ -42,11 +42,11 @@ extern crate keccak_hash as hash; extern crate parity_bytes as bytes; extern crate parity_crypto as crypto; extern crate parking_lot; -extern crate trie_db as trie; extern crate patricia_trie_ethereum as ethtrie; extern crate rlp; extern crate rustc_hex; extern crate transaction_pool as txpool; +extern crate trie_db as trie; extern crate url; #[macro_use] extern crate log; @@ -58,40 +58,45 @@ extern crate derive_more; #[macro_use] extern crate rlp_derive; -#[cfg(test)] -extern crate rand; #[cfg(test)] extern crate env_logger; +#[cfg(test)] +extern crate rand; -pub use encryptor::{Encryptor, SecretStoreEncryptor, EncryptorConfig, NoopEncryptor}; -pub use key_server_keys::{KeyProvider, SecretStoreKeys, StoringKeyProvider}; -pub use private_transactions::{VerifiedPrivateTransaction, VerificationStore, PrivateTransactionSigningDesc, SigningStore}; -pub use messages::{PrivateTransaction, SignedPrivateTransaction}; +pub use encryptor::{Encryptor, EncryptorConfig, NoopEncryptor, SecretStoreEncryptor}; pub use error::Error; - -use std::sync::{Arc, Weak}; -use std::collections::{HashMap, HashSet, BTreeMap}; -use ethereum_types::{H128, H256, U256, Address}; -use hash::keccak; -use rlp::*; -use parking_lot::RwLock; -use bytes::Bytes; -use ethkey::{Signature, recover, public_to_address}; -use io::IoChannel; -use ethcore::executive::{Executive, TransactOptions}; -use ethcore::executed::{Executed}; -use types::transaction::{SignedTransaction, Transaction, Action, UnverifiedTransaction}; -use ethcore::{contract_address as ethcore_contract_address}; -use ethcore::client::{ - Client, ChainNotify, NewBlocks, ChainMessageType, ClientIoMessage, BlockId, - Call, BlockInfo +pub use key_server_keys::{KeyProvider, SecretStoreKeys, StoringKeyProvider}; +pub use messages::{PrivateTransaction, SignedPrivateTransaction}; +pub use private_transactions::{ + PrivateTransactionSigningDesc, SigningStore, VerificationStore, VerifiedPrivateTransaction, }; -use ethcore::miner::{self, Miner, MinerService, pool_client::NonceCache}; -use ethcore::{state, state_db}; -use ethcore::trace::{Tracer, VMTracer}; + +use bytes::Bytes; use call_contract::CallContract; -use rustc_hex::FromHex; use ethabi::FunctionOutputDecoder; +use ethcore::{ + client::{ + BlockId, BlockInfo, Call, ChainMessageType, ChainNotify, Client, ClientIoMessage, NewBlocks, + }, + contract_address as ethcore_contract_address, + executed::Executed, + executive::{Executive, TransactOptions}, + miner::{self, pool_client::NonceCache, Miner, MinerService}, + state, state_db, + trace::{Tracer, VMTracer}, +}; +use ethereum_types::{Address, H128, H256, U256}; +use ethkey::{public_to_address, recover, Signature}; +use hash::keccak; +use io::IoChannel; +use parking_lot::RwLock; +use rlp::*; +use rustc_hex::FromHex; +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + sync::{Arc, Weak}, +}; +use types::transaction::{Action, SignedTransaction, Transaction, UnverifiedTransaction}; // Source avaiable at https://github.com/parity-contracts/private-tx/blob/master/contracts/PrivateContract.sol const DEFAULT_STUB_CONTRACT: &'static str = include_str!("../res/private.evm"); @@ -113,598 +118,813 @@ const PRIVATE_CONTRACT_WITH_NOTIFICATION_VER: usize = 2; /// Configurtion for private transaction provider #[derive(Default, PartialEq, Debug, Clone)] pub struct ProviderConfig { - /// Accounts that can be used for validation - pub validator_accounts: Vec
, - /// Account used for signing public transactions created from private transactions - pub signer_account: Option
, + /// Accounts that can be used for validation + pub validator_accounts: Vec
, + /// Account used for signing public transactions created from private transactions + pub signer_account: Option
, } #[derive(Debug)] /// Private transaction execution receipt. pub struct Receipt { - /// Private transaction hash. - pub hash: H256, - /// Contract address. - pub contract_address: Address, - /// Execution status. - pub status_code: u8, + /// Private transaction hash. + pub hash: H256, + /// Contract address. + pub contract_address: Address, + /// Execution status. + pub status_code: u8, } /// Payload signing and decrypting capabilities. pub trait Signer: Send + Sync { - /// Decrypt payload using private key of given address. - fn decrypt(&self, account: Address, shared_mac: &[u8], payload: &[u8]) -> Result, Error>; - /// Sign given hash using provided account. - fn sign(&self, account: Address, hash: ethkey::Message) -> Result; + /// Decrypt payload using private key of given address. + fn decrypt( + &self, + account: Address, + shared_mac: &[u8], + payload: &[u8], + ) -> Result, Error>; + /// Sign given hash using provided account. + fn sign(&self, account: Address, hash: ethkey::Message) -> Result; } /// Signer implementation that errors on any request. pub struct DummySigner; impl Signer for DummySigner { - fn decrypt(&self, _account: Address, _shared_mac: &[u8], _payload: &[u8]) -> Result, Error> { - Err("Decrypting is not supported.".to_owned())? - } + fn decrypt( + &self, + _account: Address, + _shared_mac: &[u8], + _payload: &[u8], + ) -> Result, Error> { + Err("Decrypting is not supported.".to_owned())? + } - fn sign(&self, _account: Address, _hash: ethkey::Message) -> Result { - Err("Signing is not supported.".to_owned())? - } + fn sign(&self, _account: Address, _hash: ethkey::Message) -> Result { + Err("Signing is not supported.".to_owned())? + } } /// Signer implementation using multiple keypairs pub struct KeyPairSigner(pub Vec); impl Signer for KeyPairSigner { - fn decrypt(&self, account: Address, shared_mac: &[u8], payload: &[u8]) -> Result, Error> { - let kp = self.0.iter().find(|k| k.address() == account).ok_or(ethkey::Error::InvalidAddress)?; - Ok(ethkey::crypto::ecies::decrypt(kp.secret(), shared_mac, payload)?) - } + fn decrypt( + &self, + account: Address, + shared_mac: &[u8], + payload: &[u8], + ) -> Result, Error> { + let kp = self + .0 + .iter() + .find(|k| k.address() == account) + .ok_or(ethkey::Error::InvalidAddress)?; + Ok(ethkey::crypto::ecies::decrypt( + kp.secret(), + shared_mac, + payload, + )?) + } - fn sign(&self, account: Address, hash: ethkey::Message) -> Result { - let kp = self.0.iter().find(|k| k.address() == account).ok_or(ethkey::Error::InvalidAddress)?; - Ok(ethkey::sign(kp.secret(), &hash)?) - } + fn sign(&self, account: Address, hash: ethkey::Message) -> Result { + let kp = self + .0 + .iter() + .find(|k| k.address() == account) + .ok_or(ethkey::Error::InvalidAddress)?; + Ok(ethkey::sign(kp.secret(), &hash)?) + } } /// Manager of private transactions pub struct Provider { - encryptor: Box, - validator_accounts: HashSet
, - signer_account: Option
, - notify: RwLock>>, - transactions_for_signing: RwLock, - transactions_for_verification: VerificationStore, - client: Arc, - miner: Arc, - accounts: Arc, - channel: IoChannel, - keys_provider: Arc, + encryptor: Box, + validator_accounts: HashSet
, + signer_account: Option
, + notify: RwLock>>, + transactions_for_signing: RwLock, + transactions_for_verification: VerificationStore, + client: Arc, + miner: Arc, + accounts: Arc, + channel: IoChannel, + keys_provider: Arc, } #[derive(Debug)] -pub struct PrivateExecutionResult where T: Tracer, V: VMTracer { - code: Option, - state: Bytes, - contract_address: Address, - result: Executed, +pub struct PrivateExecutionResult +where + T: Tracer, + V: VMTracer, +{ + code: Option, + state: Bytes, + contract_address: Address, + result: Executed, } impl Provider { - /// Create a new provider. - pub fn new( - client: Arc, - miner: Arc, - accounts: Arc, - encryptor: Box, - config: ProviderConfig, - channel: IoChannel, - keys_provider: Arc, - ) -> Self { - keys_provider.update_acl_contract(); - Provider { - encryptor, - validator_accounts: config.validator_accounts.into_iter().collect(), - signer_account: config.signer_account, - notify: RwLock::default(), - transactions_for_signing: RwLock::default(), - transactions_for_verification: VerificationStore::default(), - client, - miner, - accounts, - channel, - keys_provider, - } - } + /// Create a new provider. + pub fn new( + client: Arc, + miner: Arc, + accounts: Arc, + encryptor: Box, + config: ProviderConfig, + channel: IoChannel, + keys_provider: Arc, + ) -> Self { + keys_provider.update_acl_contract(); + Provider { + encryptor, + validator_accounts: config.validator_accounts.into_iter().collect(), + signer_account: config.signer_account, + notify: RwLock::default(), + transactions_for_signing: RwLock::default(), + transactions_for_verification: VerificationStore::default(), + client, + miner, + accounts, + channel, + keys_provider, + } + } - // TODO [ToDr] Don't use `ChainNotify` here! - // Better to create a separate notification type for this. - /// Adds an actor to be notified on certain events - pub fn add_notify(&self, target: Arc) { - self.notify.write().push(Arc::downgrade(&target)); - } + // TODO [ToDr] Don't use `ChainNotify` here! + // Better to create a separate notification type for this. + /// Adds an actor to be notified on certain events + pub fn add_notify(&self, target: Arc) { + self.notify.write().push(Arc::downgrade(&target)); + } - fn notify(&self, f: F) where F: Fn(&ChainNotify) { - for np in self.notify.read().iter() { - if let Some(n) = np.upgrade() { - f(&*n); - } - } - } + fn notify(&self, f: F) + where + F: Fn(&ChainNotify), + { + for np in self.notify.read().iter() { + if let Some(n) = np.upgrade() { + f(&*n); + } + } + } - /// 1. Create private transaction from the signed transaction - /// 2. Executes private transaction - /// 3. Save it with state returned on prev step to the queue for signing - /// 4. Broadcast corresponding message to the chain - pub fn create_private_transaction(&self, signed_transaction: SignedTransaction) -> Result { - trace!(target: "privatetx", "Creating private transaction from regular transaction: {:?}", signed_transaction); - if self.signer_account.is_none() { - warn!(target: "privatetx", "Signing account not set"); - return Err(Error::SignerAccountNotSet); - } - let tx_hash = signed_transaction.hash(); - let contract = Self::contract_address_from_transaction(&signed_transaction).map_err(|_| Error::BadTransactionType)?; - let data = signed_transaction.rlp_bytes(); - let encrypted_transaction = self.encrypt(&contract, &Self::iv_from_transaction(&signed_transaction), &data)?; - let private = PrivateTransaction::new(encrypted_transaction, contract); - // TODO #9825 [ToDr] Using BlockId::Latest is bad here, - // the block may change in the middle of execution - // causing really weird stuff to happen. - // We should retrieve hash and stick to that. IMHO - // best would be to change the API and only allow H256 instead of BlockID - // in private-tx to avoid such mistakes. - let contract_nonce = self.get_contract_nonce(&contract, BlockId::Latest)?; - let private_state = self.execute_private_transaction(BlockId::Latest, &signed_transaction)?; - trace!(target: "privatetx", "Private transaction created, encrypted transaction: {:?}, private state: {:?}", private, private_state); - let contract_validators = self.get_validators(BlockId::Latest, &contract)?; - trace!(target: "privatetx", "Required validators: {:?}", contract_validators); - let private_state_hash = self.calculate_state_hash(&private_state, contract_nonce); - trace!(target: "privatetx", "Hashed effective private state for sender: {:?}", private_state_hash); - self.transactions_for_signing.write().add_transaction(private.hash(), signed_transaction, contract_validators, private_state, contract_nonce)?; - self.broadcast_private_transaction(private.hash(), private.rlp_bytes()); - Ok(Receipt { - hash: tx_hash, - contract_address: contract, - status_code: 0, - }) - } + /// 1. Create private transaction from the signed transaction + /// 2. Executes private transaction + /// 3. Save it with state returned on prev step to the queue for signing + /// 4. Broadcast corresponding message to the chain + pub fn create_private_transaction( + &self, + signed_transaction: SignedTransaction, + ) -> Result { + trace!(target: "privatetx", "Creating private transaction from regular transaction: {:?}", signed_transaction); + if self.signer_account.is_none() { + warn!(target: "privatetx", "Signing account not set"); + return Err(Error::SignerAccountNotSet); + } + let tx_hash = signed_transaction.hash(); + let contract = Self::contract_address_from_transaction(&signed_transaction) + .map_err(|_| Error::BadTransactionType)?; + let data = signed_transaction.rlp_bytes(); + let encrypted_transaction = self.encrypt( + &contract, + &Self::iv_from_transaction(&signed_transaction), + &data, + )?; + let private = PrivateTransaction::new(encrypted_transaction, contract); + // TODO #9825 [ToDr] Using BlockId::Latest is bad here, + // the block may change in the middle of execution + // causing really weird stuff to happen. + // We should retrieve hash and stick to that. IMHO + // best would be to change the API and only allow H256 instead of BlockID + // in private-tx to avoid such mistakes. + let contract_nonce = self.get_contract_nonce(&contract, BlockId::Latest)?; + let private_state = + self.execute_private_transaction(BlockId::Latest, &signed_transaction)?; + trace!(target: "privatetx", "Private transaction created, encrypted transaction: {:?}, private state: {:?}", private, private_state); + let contract_validators = self.get_validators(BlockId::Latest, &contract)?; + trace!(target: "privatetx", "Required validators: {:?}", contract_validators); + let private_state_hash = self.calculate_state_hash(&private_state, contract_nonce); + trace!(target: "privatetx", "Hashed effective private state for sender: {:?}", private_state_hash); + self.transactions_for_signing.write().add_transaction( + private.hash(), + signed_transaction, + contract_validators, + private_state, + contract_nonce, + )?; + self.broadcast_private_transaction(private.hash(), private.rlp_bytes()); + Ok(Receipt { + hash: tx_hash, + contract_address: contract, + status_code: 0, + }) + } - /// Calculate hash from united private state and contract nonce - pub fn calculate_state_hash(&self, state: &Bytes, nonce: U256) -> H256 { - let state_hash = keccak(state); - let mut state_buf = [0u8; 64]; - state_buf[..32].clone_from_slice(&state_hash); - state_buf[32..].clone_from_slice(&H256::from(nonce)); - keccak(AsRef::<[u8]>::as_ref(&state_buf[..])) - } + /// Calculate hash from united private state and contract nonce + pub fn calculate_state_hash(&self, state: &Bytes, nonce: U256) -> H256 { + let state_hash = keccak(state); + let mut state_buf = [0u8; 64]; + state_buf[..32].clone_from_slice(&state_hash); + state_buf[32..].clone_from_slice(&H256::from(nonce)); + keccak(AsRef::<[u8]>::as_ref(&state_buf[..])) + } - fn pool_client<'a>(&'a self, nonce_cache: &'a NonceCache, local_accounts: &'a HashSet
) -> miner::pool_client::PoolClient<'a, Client> { - let engine = self.client.engine(); - miner::pool_client::PoolClient::new( - &*self.client, - nonce_cache, - engine, - local_accounts, - None, // refuse_service_transactions = true - ) - } + fn pool_client<'a>( + &'a self, + nonce_cache: &'a NonceCache, + local_accounts: &'a HashSet
, + ) -> miner::pool_client::PoolClient<'a, Client> { + let engine = self.client.engine(); + miner::pool_client::PoolClient::new( + &*self.client, + nonce_cache, + engine, + local_accounts, + None, // refuse_service_transactions = true + ) + } - /// Retrieve and verify the first available private transaction for every sender - fn process_verification_queue(&self) -> Result<(), Error> { - let process_transaction = |transaction: &VerifiedPrivateTransaction| -> Result<_, String> { - let private_hash = transaction.private_transaction.hash(); - match transaction.validator_account { - None => { - trace!(target: "privatetx", "Propagating transaction further"); - self.broadcast_private_transaction(private_hash, transaction.private_transaction.rlp_bytes()); - return Ok(()); - } - Some(validator_account) => { - if !self.validator_accounts.contains(&validator_account) { - trace!(target: "privatetx", "Propagating transaction further"); - self.broadcast_private_transaction(private_hash, transaction.private_transaction.rlp_bytes()); - return Ok(()); - } - let contract = Self::contract_address_from_transaction(&transaction.transaction) - .map_err(|_| "Incorrect type of action for the transaction")?; - // TODO #9825 [ToDr] Usage of BlockId::Latest - let contract_nonce = self.get_contract_nonce(&contract, BlockId::Latest); - if let Err(e) = contract_nonce { - return Err(format!("Cannot retrieve contract nonce: {:?}", e).into()); - } - let contract_nonce = contract_nonce.expect("Error was checked before"); - let private_state = self.execute_private_transaction(BlockId::Latest, &transaction.transaction); - if let Err(e) = private_state { - return Err(format!("Cannot retrieve private state: {:?}", e).into()); - } - let private_state = private_state.expect("Error was checked before"); - let private_state_hash = self.calculate_state_hash(&private_state, contract_nonce); - trace!(target: "privatetx", "Hashed effective private state for validator: {:?}", private_state_hash); - let signed_state = self.accounts.sign(validator_account, private_state_hash); - if let Err(e) = signed_state { - return Err(format!("Cannot sign the state: {:?}", e).into()); - } - let signed_state = signed_state.expect("Error was checked before"); - let signed_private_transaction = SignedPrivateTransaction::new(private_hash, signed_state, None); - trace!(target: "privatetx", "Sending signature for private transaction: {:?}", signed_private_transaction); - self.broadcast_signed_private_transaction(signed_private_transaction.hash(), signed_private_transaction.rlp_bytes()); - } - } - Ok(()) - }; - let nonce_cache = NonceCache::new(NONCE_CACHE_SIZE); - let local_accounts = HashSet::new(); - let ready_transactions = self.transactions_for_verification.drain(self.pool_client(&nonce_cache, &local_accounts)); - for transaction in ready_transactions { - if let Err(e) = process_transaction(&transaction) { - warn!(target: "privatetx", "Error: {:?}", e); - } - } - Ok(()) - } + /// Retrieve and verify the first available private transaction for every sender + fn process_verification_queue(&self) -> Result<(), Error> { + let process_transaction = |transaction: &VerifiedPrivateTransaction| -> Result<_, String> { + let private_hash = transaction.private_transaction.hash(); + match transaction.validator_account { + None => { + trace!(target: "privatetx", "Propagating transaction further"); + self.broadcast_private_transaction( + private_hash, + transaction.private_transaction.rlp_bytes(), + ); + return Ok(()); + } + Some(validator_account) => { + if !self.validator_accounts.contains(&validator_account) { + trace!(target: "privatetx", "Propagating transaction further"); + self.broadcast_private_transaction( + private_hash, + transaction.private_transaction.rlp_bytes(), + ); + return Ok(()); + } + let contract = + Self::contract_address_from_transaction(&transaction.transaction) + .map_err(|_| "Incorrect type of action for the transaction")?; + // TODO #9825 [ToDr] Usage of BlockId::Latest + let contract_nonce = self.get_contract_nonce(&contract, BlockId::Latest); + if let Err(e) = contract_nonce { + return Err(format!("Cannot retrieve contract nonce: {:?}", e).into()); + } + let contract_nonce = contract_nonce.expect("Error was checked before"); + let private_state = + self.execute_private_transaction(BlockId::Latest, &transaction.transaction); + if let Err(e) = private_state { + return Err(format!("Cannot retrieve private state: {:?}", e).into()); + } + let private_state = private_state.expect("Error was checked before"); + let private_state_hash = + self.calculate_state_hash(&private_state, contract_nonce); + trace!(target: "privatetx", "Hashed effective private state for validator: {:?}", private_state_hash); + let signed_state = self.accounts.sign(validator_account, private_state_hash); + if let Err(e) = signed_state { + return Err(format!("Cannot sign the state: {:?}", e).into()); + } + let signed_state = signed_state.expect("Error was checked before"); + let signed_private_transaction = + SignedPrivateTransaction::new(private_hash, signed_state, None); + trace!(target: "privatetx", "Sending signature for private transaction: {:?}", signed_private_transaction); + self.broadcast_signed_private_transaction( + signed_private_transaction.hash(), + signed_private_transaction.rlp_bytes(), + ); + } + } + Ok(()) + }; + let nonce_cache = NonceCache::new(NONCE_CACHE_SIZE); + let local_accounts = HashSet::new(); + let ready_transactions = self + .transactions_for_verification + .drain(self.pool_client(&nonce_cache, &local_accounts)); + for transaction in ready_transactions { + if let Err(e) = process_transaction(&transaction) { + warn!(target: "privatetx", "Error: {:?}", e); + } + } + Ok(()) + } - /// Add signed private transaction into the store - /// Creates corresponding public transaction if last required signature collected and sends it to the chain - pub fn process_signature(&self, signed_tx: &SignedPrivateTransaction) -> Result<(), Error> { - trace!(target: "privatetx", "Processing signed private transaction"); - let private_hash = signed_tx.private_transaction_hash(); - let desc = match self.transactions_for_signing.read().get(&private_hash) { - None => { - // Not our transaction, broadcast further to peers - self.broadcast_signed_private_transaction(signed_tx.hash(), signed_tx.rlp_bytes()); - return Ok(()); - }, - Some(desc) => desc, - }; - let last = self.last_required_signature(&desc, signed_tx.signature())?; + /// Add signed private transaction into the store + /// Creates corresponding public transaction if last required signature collected and sends it to the chain + pub fn process_signature(&self, signed_tx: &SignedPrivateTransaction) -> Result<(), Error> { + trace!(target: "privatetx", "Processing signed private transaction"); + let private_hash = signed_tx.private_transaction_hash(); + let desc = match self.transactions_for_signing.read().get(&private_hash) { + None => { + // Not our transaction, broadcast further to peers + self.broadcast_signed_private_transaction(signed_tx.hash(), signed_tx.rlp_bytes()); + return Ok(()); + } + Some(desc) => desc, + }; + let last = self.last_required_signature(&desc, signed_tx.signature())?; - if last { - let mut signatures = desc.received_signatures.clone(); - signatures.push(signed_tx.signature()); - let rsv: Vec = signatures.into_iter().map(|sign| sign.into_electrum().into()).collect(); - // Create public transaction - let signer_account = self.signer_account.ok_or_else(|| Error::SignerAccountNotSet)?; - let state = self.client.state_at(BlockId::Latest).ok_or(Error::StatePruned)?; - let nonce = state.nonce(&signer_account)?; - let public_tx = self.public_transaction( - desc.state.clone(), - &desc.original_transaction, - &rsv, - nonce, - desc.original_transaction.gas_price - )?; - trace!(target: "privatetx", "Last required signature received, public transaction created: {:?}", public_tx); - // Sign and add it to the queue - let chain_id = desc.original_transaction.chain_id(); - let hash = public_tx.hash(chain_id); - let signature = self.accounts.sign(signer_account, hash)?; - let signed = SignedTransaction::new(public_tx.with_signature(signature, chain_id))?; - match self.miner.import_own_transaction(&*self.client, signed.into()) { - Ok(_) => trace!(target: "privatetx", "Public transaction added to queue"), - Err(err) => { - warn!(target: "privatetx", "Failed to add transaction to queue, error: {:?}", err); - return Err(err.into()); - } - } - // Notify about state changes - let contract = Self::contract_address_from_transaction(&desc.original_transaction)?; - // TODO #9825 Usage of BlockId::Latest - if self.get_contract_version(BlockId::Latest, &contract) >= PRIVATE_CONTRACT_WITH_NOTIFICATION_VER { - match self.state_changes_notify(BlockId::Latest, &contract, &desc.original_transaction.sender(), desc.original_transaction.hash()) { - Ok(_) => trace!(target: "privatetx", "Notification about private state changes sent"), - Err(err) => warn!(target: "privatetx", "Failed to send private state changed notification, error: {:?}", err), - } - } - // Remove from store for signing - if let Err(err) = self.transactions_for_signing.write().remove(&private_hash) { - warn!(target: "privatetx", "Failed to remove transaction from signing store, error: {:?}", err); - return Err(err); - } - } else { - // Add signature to the store - match self.transactions_for_signing.write().add_signature(&private_hash, signed_tx.signature()) { - Ok(_) => trace!(target: "privatetx", "Signature stored for private transaction"), - Err(err) => { - warn!(target: "privatetx", "Failed to add signature to signing store, error: {:?}", err); - return Err(err); - } - } - } - Ok(()) - } + if last { + let mut signatures = desc.received_signatures.clone(); + signatures.push(signed_tx.signature()); + let rsv: Vec = signatures + .into_iter() + .map(|sign| sign.into_electrum().into()) + .collect(); + // Create public transaction + let signer_account = self + .signer_account + .ok_or_else(|| Error::SignerAccountNotSet)?; + let state = self + .client + .state_at(BlockId::Latest) + .ok_or(Error::StatePruned)?; + let nonce = state.nonce(&signer_account)?; + let public_tx = self.public_transaction( + desc.state.clone(), + &desc.original_transaction, + &rsv, + nonce, + desc.original_transaction.gas_price, + )?; + trace!(target: "privatetx", "Last required signature received, public transaction created: {:?}", public_tx); + // Sign and add it to the queue + let chain_id = desc.original_transaction.chain_id(); + let hash = public_tx.hash(chain_id); + let signature = self.accounts.sign(signer_account, hash)?; + let signed = SignedTransaction::new(public_tx.with_signature(signature, chain_id))?; + match self + .miner + .import_own_transaction(&*self.client, signed.into()) + { + Ok(_) => trace!(target: "privatetx", "Public transaction added to queue"), + Err(err) => { + warn!(target: "privatetx", "Failed to add transaction to queue, error: {:?}", err); + return Err(err.into()); + } + } + // Notify about state changes + let contract = Self::contract_address_from_transaction(&desc.original_transaction)?; + // TODO #9825 Usage of BlockId::Latest + if self.get_contract_version(BlockId::Latest, &contract) + >= PRIVATE_CONTRACT_WITH_NOTIFICATION_VER + { + match self.state_changes_notify( + BlockId::Latest, + &contract, + &desc.original_transaction.sender(), + desc.original_transaction.hash(), + ) { + Ok(_) => { + trace!(target: "privatetx", "Notification about private state changes sent") + } + Err(err) => { + warn!(target: "privatetx", "Failed to send private state changed notification, error: {:?}", err) + } + } + } + // Remove from store for signing + if let Err(err) = self.transactions_for_signing.write().remove(&private_hash) { + warn!(target: "privatetx", "Failed to remove transaction from signing store, error: {:?}", err); + return Err(err); + } + } else { + // Add signature to the store + match self + .transactions_for_signing + .write() + .add_signature(&private_hash, signed_tx.signature()) + { + Ok(_) => trace!(target: "privatetx", "Signature stored for private transaction"), + Err(err) => { + warn!(target: "privatetx", "Failed to add signature to signing store, error: {:?}", err); + return Err(err); + } + } + } + Ok(()) + } - fn contract_address_from_transaction(transaction: &SignedTransaction) -> Result { - match transaction.action { - Action::Call(contract) => Ok(contract), - _ => { - warn!(target: "privatetx", "Incorrect type of action for the transaction"); - return Err(Error::BadTransactionType); - } - } - } + fn contract_address_from_transaction( + transaction: &SignedTransaction, + ) -> Result { + match transaction.action { + Action::Call(contract) => Ok(contract), + _ => { + warn!(target: "privatetx", "Incorrect type of action for the transaction"); + return Err(Error::BadTransactionType); + } + } + } - fn last_required_signature(&self, desc: &PrivateTransactionSigningDesc, sign: Signature) -> Result { - if desc.received_signatures.contains(&sign) { - return Ok(false); - } - let state_hash = self.calculate_state_hash(&desc.state, desc.contract_nonce); - match recover(&sign, &state_hash) { - Ok(public) => { - let sender = public_to_address(&public); - match desc.validators.contains(&sender) { - true => { - Ok(desc.received_signatures.len() + 1 == desc.validators.len()) - } - false => { - warn!(target: "privatetx", "Sender's state doesn't correspond to validator's"); - return Err(Error::StateIncorrect); - } - } - } - Err(err) => { - warn!(target: "privatetx", "Sender's state doesn't correspond to validator's, error {:?}", err); - return Err(err.into()); - } - } - } + fn last_required_signature( + &self, + desc: &PrivateTransactionSigningDesc, + sign: Signature, + ) -> Result { + if desc.received_signatures.contains(&sign) { + return Ok(false); + } + let state_hash = self.calculate_state_hash(&desc.state, desc.contract_nonce); + match recover(&sign, &state_hash) { + Ok(public) => { + let sender = public_to_address(&public); + match desc.validators.contains(&sender) { + true => Ok(desc.received_signatures.len() + 1 == desc.validators.len()), + false => { + warn!(target: "privatetx", "Sender's state doesn't correspond to validator's"); + return Err(Error::StateIncorrect); + } + } + } + Err(err) => { + warn!(target: "privatetx", "Sender's state doesn't correspond to validator's, error {:?}", err); + return Err(err.into()); + } + } + } - /// Broadcast the private transaction message to the chain - fn broadcast_private_transaction(&self, transaction_hash: H256, message: Bytes) { - self.notify(|notify| notify.broadcast(ChainMessageType::PrivateTransaction(transaction_hash, message.clone()))); - } + /// Broadcast the private transaction message to the chain + fn broadcast_private_transaction(&self, transaction_hash: H256, message: Bytes) { + self.notify(|notify| { + notify.broadcast(ChainMessageType::PrivateTransaction( + transaction_hash, + message.clone(), + )) + }); + } - /// Broadcast signed private transaction message to the chain - fn broadcast_signed_private_transaction(&self, transaction_hash: H256, message: Bytes) { - self.notify(|notify| notify.broadcast(ChainMessageType::SignedPrivateTransaction(transaction_hash, message.clone()))); - } + /// Broadcast signed private transaction message to the chain + fn broadcast_signed_private_transaction(&self, transaction_hash: H256, message: Bytes) { + self.notify(|notify| { + notify.broadcast(ChainMessageType::SignedPrivateTransaction( + transaction_hash, + message.clone(), + )) + }); + } - fn iv_from_transaction(transaction: &SignedTransaction) -> H128 { - let nonce = keccak(&transaction.nonce.rlp_bytes()); - let (iv, _) = nonce.split_at(INIT_VEC_LEN); - H128::from_slice(iv) - } + fn iv_from_transaction(transaction: &SignedTransaction) -> H128 { + let nonce = keccak(&transaction.nonce.rlp_bytes()); + let (iv, _) = nonce.split_at(INIT_VEC_LEN); + H128::from_slice(iv) + } - fn iv_from_address(contract_address: &Address) -> H128 { - let address = keccak(&contract_address.rlp_bytes()); - let (iv, _) = address.split_at(INIT_VEC_LEN); - H128::from_slice(iv) - } + fn iv_from_address(contract_address: &Address) -> H128 { + let address = keccak(&contract_address.rlp_bytes()); + let (iv, _) = address.split_at(INIT_VEC_LEN); + H128::from_slice(iv) + } - fn encrypt(&self, contract_address: &Address, initialisation_vector: &H128, data: &[u8]) -> Result { - trace!(target: "privatetx", "Encrypt data using key(address): {:?}", contract_address); - Ok(self.encryptor.encrypt(contract_address, initialisation_vector, data)?) - } + fn encrypt( + &self, + contract_address: &Address, + initialisation_vector: &H128, + data: &[u8], + ) -> Result { + trace!(target: "privatetx", "Encrypt data using key(address): {:?}", contract_address); + Ok(self + .encryptor + .encrypt(contract_address, initialisation_vector, data)?) + } - fn decrypt(&self, contract_address: &Address, data: &[u8]) -> Result { - trace!(target: "privatetx", "Decrypt data using key(address): {:?}", contract_address); - Ok(self.encryptor.decrypt(contract_address, data)?) - } + fn decrypt(&self, contract_address: &Address, data: &[u8]) -> Result { + trace!(target: "privatetx", "Decrypt data using key(address): {:?}", contract_address); + Ok(self.encryptor.decrypt(contract_address, data)?) + } - fn get_decrypted_state(&self, address: &Address, block: BlockId) -> Result { - let (data, decoder) = private_contract::functions::state::call(); - let value = self.client.call_contract(block, *address, data)?; - let state = decoder.decode(&value).map_err(|e| Error::Call(format!("Contract call failed {:?}", e)))?; - self.decrypt(address, &state) - } + fn get_decrypted_state(&self, address: &Address, block: BlockId) -> Result { + let (data, decoder) = private_contract::functions::state::call(); + let value = self.client.call_contract(block, *address, data)?; + let state = decoder + .decode(&value) + .map_err(|e| Error::Call(format!("Contract call failed {:?}", e)))?; + self.decrypt(address, &state) + } - fn get_decrypted_code(&self, address: &Address, block: BlockId) -> Result { - let (data, decoder) = private_contract::functions::code::call(); - let value = self.client.call_contract(block, *address, data)?; - let state = decoder.decode(&value).map_err(|e| Error::Call(format!("Contract call failed {:?}", e)))?; - self.decrypt(address, &state) - } + fn get_decrypted_code(&self, address: &Address, block: BlockId) -> Result { + let (data, decoder) = private_contract::functions::code::call(); + let value = self.client.call_contract(block, *address, data)?; + let state = decoder + .decode(&value) + .map_err(|e| Error::Call(format!("Contract call failed {:?}", e)))?; + self.decrypt(address, &state) + } - pub fn get_contract_nonce(&self, address: &Address, block: BlockId) -> Result { - let (data, decoder) = private_contract::functions::nonce::call(); - let value = self.client.call_contract(block, *address, data)?; - decoder.decode(&value).map_err(|e| Error::Call(format!("Contract call failed {:?}", e)).into()) - } + pub fn get_contract_nonce(&self, address: &Address, block: BlockId) -> Result { + let (data, decoder) = private_contract::functions::nonce::call(); + let value = self.client.call_contract(block, *address, data)?; + decoder + .decode(&value) + .map_err(|e| Error::Call(format!("Contract call failed {:?}", e)).into()) + } - fn snapshot_to_storage(raw: Bytes) -> HashMap { - let items = raw.len() / 64; - (0..items).map(|i| { - let offset = i * 64; - let key = H256::from_slice(&raw[offset..(offset + 32)]); - let value = H256::from_slice(&raw[(offset + 32)..(offset + 64)]); - (key, value) - }).collect() - } + fn snapshot_to_storage(raw: Bytes) -> HashMap { + let items = raw.len() / 64; + (0..items) + .map(|i| { + let offset = i * 64; + let key = H256::from_slice(&raw[offset..(offset + 32)]); + let value = H256::from_slice(&raw[(offset + 32)..(offset + 64)]); + (key, value) + }) + .collect() + } - fn snapshot_from_storage(storage: &HashMap) -> Bytes { - let mut raw = Vec::with_capacity(storage.len() * 64); - // Sort the storage to guarantee the order for all parties - let sorted_storage: BTreeMap<&H256, &H256> = storage.iter().collect(); - for (key, value) in sorted_storage { - raw.extend_from_slice(key); - raw.extend_from_slice(value); - }; - raw - } + fn snapshot_from_storage(storage: &HashMap) -> Bytes { + let mut raw = Vec::with_capacity(storage.len() * 64); + // Sort the storage to guarantee the order for all parties + let sorted_storage: BTreeMap<&H256, &H256> = storage.iter().collect(); + for (key, value) in sorted_storage { + raw.extend_from_slice(key); + raw.extend_from_slice(value); + } + raw + } - fn patch_account_state(&self, contract_address: &Address, block: BlockId, state: &mut state::State) -> Result<(), Error> { - let contract_code = Arc::new(self.get_decrypted_code(contract_address, block)?); - let contract_state = self.get_decrypted_state(contract_address, block)?; - trace!(target: "privatetx", "Patching contract at {:?}, code: {:?}, state: {:?}", contract_address, contract_code, contract_state); - state.patch_account(contract_address, contract_code, Self::snapshot_to_storage(contract_state))?; - Ok(()) - } + fn patch_account_state( + &self, + contract_address: &Address, + block: BlockId, + state: &mut state::State, + ) -> Result<(), Error> { + let contract_code = Arc::new(self.get_decrypted_code(contract_address, block)?); + let contract_state = self.get_decrypted_state(contract_address, block)?; + trace!(target: "privatetx", "Patching contract at {:?}, code: {:?}, state: {:?}", contract_address, contract_code, contract_state); + state.patch_account( + contract_address, + contract_code, + Self::snapshot_to_storage(contract_state), + )?; + Ok(()) + } - pub fn execute_private(&self, transaction: &SignedTransaction, options: TransactOptions, block: BlockId) -> Result, Error> - where - T: Tracer, - V: VMTracer, - { - let mut env_info = self.client.env_info(block).ok_or(Error::StatePruned)?; - env_info.gas_limit = transaction.gas; + pub fn execute_private( + &self, + transaction: &SignedTransaction, + options: TransactOptions, + block: BlockId, + ) -> Result, Error> + where + T: Tracer, + V: VMTracer, + { + let mut env_info = self.client.env_info(block).ok_or(Error::StatePruned)?; + env_info.gas_limit = transaction.gas; - let mut state = self.client.state_at(block).ok_or(Error::StatePruned)?; - // TODO #9825 in case of BlockId::Latest these need to operate on the same state - let contract_address = match transaction.action { - Action::Call(ref contract_address) => { - // Patch current contract state - self.patch_account_state(contract_address, block, &mut state)?; - Some(*contract_address) - }, - Action::Create => None, - }; + let mut state = self.client.state_at(block).ok_or(Error::StatePruned)?; + // TODO #9825 in case of BlockId::Latest these need to operate on the same state + let contract_address = match transaction.action { + Action::Call(ref contract_address) => { + // Patch current contract state + self.patch_account_state(contract_address, block, &mut state)?; + Some(*contract_address) + } + Action::Create => None, + }; - let engine = self.client.engine(); - let sender = transaction.sender(); - let nonce = state.nonce(&sender)?; - let contract_address = contract_address.unwrap_or_else(|| { - let (new_address, _) = ethcore_contract_address(engine.create_address_scheme(env_info.number), &sender, &nonce, &transaction.data); - new_address - }); - // Patch other available private contracts' states as well - // TODO: #10133 patch only required for the contract states - if let Some(key_server_account) = self.keys_provider.key_server_account() { - if let Some(available_contracts) = self.keys_provider.available_keys(block, &key_server_account) { - for private_contract in available_contracts { - if private_contract == contract_address { - continue; - } - self.patch_account_state(&private_contract, block, &mut state)?; - } - } - } - let machine = engine.machine(); - let schedule = machine.schedule(env_info.number); - let result = Executive::new(&mut state, &env_info, &machine, &schedule).transact_virtual(transaction, options)?; - let (encrypted_code, encrypted_storage) = { - let (code, storage) = state.into_account(&contract_address)?; - trace!(target: "privatetx", "Private contract executed. code: {:?}, state: {:?}, result: {:?}", code, storage, result.output); - let enc_code = match code { - Some(c) => Some(self.encrypt(&contract_address, &Self::iv_from_address(&contract_address), &c)?), - None => None, - }; - (enc_code, self.encrypt(&contract_address, &Self::iv_from_transaction(transaction), &Self::snapshot_from_storage(&storage))?) - }; - Ok(PrivateExecutionResult { - code: encrypted_code, - state: encrypted_storage, - contract_address: contract_address, - result, - }) - } + let engine = self.client.engine(); + let sender = transaction.sender(); + let nonce = state.nonce(&sender)?; + let contract_address = contract_address.unwrap_or_else(|| { + let (new_address, _) = ethcore_contract_address( + engine.create_address_scheme(env_info.number), + &sender, + &nonce, + &transaction.data, + ); + new_address + }); + // Patch other available private contracts' states as well + // TODO: #10133 patch only required for the contract states + if let Some(key_server_account) = self.keys_provider.key_server_account() { + if let Some(available_contracts) = self + .keys_provider + .available_keys(block, &key_server_account) + { + for private_contract in available_contracts { + if private_contract == contract_address { + continue; + } + self.patch_account_state(&private_contract, block, &mut state)?; + } + } + } + let machine = engine.machine(); + let schedule = machine.schedule(env_info.number); + let result = Executive::new(&mut state, &env_info, &machine, &schedule) + .transact_virtual(transaction, options)?; + let (encrypted_code, encrypted_storage) = { + let (code, storage) = state.into_account(&contract_address)?; + trace!(target: "privatetx", "Private contract executed. code: {:?}, state: {:?}, result: {:?}", code, storage, result.output); + let enc_code = match code { + Some(c) => Some(self.encrypt( + &contract_address, + &Self::iv_from_address(&contract_address), + &c, + )?), + None => None, + }; + ( + enc_code, + self.encrypt( + &contract_address, + &Self::iv_from_transaction(transaction), + &Self::snapshot_from_storage(&storage), + )?, + ) + }; + Ok(PrivateExecutionResult { + code: encrypted_code, + state: encrypted_storage, + contract_address: contract_address, + result, + }) + } - fn generate_constructor(validators: &[Address], code: Bytes, storage: Bytes) -> Bytes { - let constructor_code = DEFAULT_STUB_CONTRACT.from_hex().expect("Default contract code is valid"); - private_contract::constructor(constructor_code, validators.iter().map(|a| *a).collect::>(), code, storage) - } + fn generate_constructor(validators: &[Address], code: Bytes, storage: Bytes) -> Bytes { + let constructor_code = DEFAULT_STUB_CONTRACT + .from_hex() + .expect("Default contract code is valid"); + private_contract::constructor( + constructor_code, + validators.iter().map(|a| *a).collect::>(), + code, + storage, + ) + } - fn generate_set_state_call(signatures: &[Signature], storage: Bytes) -> Bytes { - private_contract::functions::set_state::encode_input( - storage, - signatures.iter().map(|s| { - let mut v: [u8; 32] = [0; 32]; - v[31] = s.v(); - v - }).collect::>(), - signatures.iter().map(|s| s.r()).collect::>(), - signatures.iter().map(|s| s.s()).collect::>() - ) - } + fn generate_set_state_call(signatures: &[Signature], storage: Bytes) -> Bytes { + private_contract::functions::set_state::encode_input( + storage, + signatures + .iter() + .map(|s| { + let mut v: [u8; 32] = [0; 32]; + v[31] = s.v(); + v + }) + .collect::>(), + signatures.iter().map(|s| s.r()).collect::>(), + signatures.iter().map(|s| s.s()).collect::>(), + ) + } - /// Returns the key from the key server associated with the contract - pub fn contract_key_id(&self, contract_address: &Address) -> Result { - Ok(key_server_keys::address_to_key(contract_address)) - } + /// Returns the key from the key server associated with the contract + pub fn contract_key_id(&self, contract_address: &Address) -> Result { + Ok(key_server_keys::address_to_key(contract_address)) + } - /// Create encrypted public contract deployment transaction. - pub fn public_creation_transaction(&self, block: BlockId, source: &SignedTransaction, validators: &[Address], gas_price: U256) -> Result<(Transaction, Address), Error> { - if let Action::Call(_) = source.action { - return Err(Error::BadTransactionType); - } - let sender = source.sender(); - let state = self.client.state_at(block).ok_or(Error::StatePruned)?; - let nonce = state.nonce(&sender)?; - let executed = self.execute_private(source, TransactOptions::with_no_tracing(), block)?; - let header = self.client.block_header(block) - .ok_or(Error::StatePruned) - .and_then(|h| h.decode().map_err(|_| Error::StateIncorrect).into())?; - let (executed_code, executed_state) = (executed.code.unwrap_or_default(), executed.state); - let tx_data = Self::generate_constructor(validators, executed_code.clone(), executed_state.clone()); - let mut tx = Transaction { - nonce: nonce, - action: Action::Create, - gas: u64::max_value().into(), - gas_price: gas_price, - value: source.value, - data: tx_data, - }; - tx.gas = match self.client.estimate_gas(&tx.clone().fake_sign(sender), &state, &header) { - Ok(estimated_gas) => estimated_gas, - Err(_) => self.estimate_tx_gas(validators, &executed_code, &executed_state, &[]), - }; + /// Create encrypted public contract deployment transaction. + pub fn public_creation_transaction( + &self, + block: BlockId, + source: &SignedTransaction, + validators: &[Address], + gas_price: U256, + ) -> Result<(Transaction, Address), Error> { + if let Action::Call(_) = source.action { + return Err(Error::BadTransactionType); + } + let sender = source.sender(); + let state = self.client.state_at(block).ok_or(Error::StatePruned)?; + let nonce = state.nonce(&sender)?; + let executed = self.execute_private(source, TransactOptions::with_no_tracing(), block)?; + let header = self + .client + .block_header(block) + .ok_or(Error::StatePruned) + .and_then(|h| h.decode().map_err(|_| Error::StateIncorrect).into())?; + let (executed_code, executed_state) = (executed.code.unwrap_or_default(), executed.state); + let tx_data = + Self::generate_constructor(validators, executed_code.clone(), executed_state.clone()); + let mut tx = Transaction { + nonce: nonce, + action: Action::Create, + gas: u64::max_value().into(), + gas_price: gas_price, + value: source.value, + data: tx_data, + }; + tx.gas = match self + .client + .estimate_gas(&tx.clone().fake_sign(sender), &state, &header) + { + Ok(estimated_gas) => estimated_gas, + Err(_) => self.estimate_tx_gas(validators, &executed_code, &executed_state, &[]), + }; - Ok((tx, executed.contract_address)) - } + Ok((tx, executed.contract_address)) + } - fn estimate_tx_gas(&self, validators: &[Address], code: &Bytes, state: &Bytes, signatures: &[Signature]) -> U256 { - let default_gas = 650000 + - validators.len() as u64 * 30000 + - code.len() as u64 * 8000 + - signatures.len() as u64 * 50000 + - state.len() as u64 * 8000; - default_gas.into() - } + fn estimate_tx_gas( + &self, + validators: &[Address], + code: &Bytes, + state: &Bytes, + signatures: &[Signature], + ) -> U256 { + let default_gas = 650000 + + validators.len() as u64 * 30000 + + code.len() as u64 * 8000 + + signatures.len() as u64 * 50000 + + state.len() as u64 * 8000; + default_gas.into() + } - /// Create encrypted public contract deployment transaction. Returns updated encrypted state. - pub fn execute_private_transaction(&self, block: BlockId, source: &SignedTransaction) -> Result { - if let Action::Create = source.action { - return Err(Error::BadTransactionType); - } - let result = self.execute_private(source, TransactOptions::with_no_tracing(), block)?; - Ok(result.state) - } + /// Create encrypted public contract deployment transaction. Returns updated encrypted state. + pub fn execute_private_transaction( + &self, + block: BlockId, + source: &SignedTransaction, + ) -> Result { + if let Action::Create = source.action { + return Err(Error::BadTransactionType); + } + let result = self.execute_private(source, TransactOptions::with_no_tracing(), block)?; + Ok(result.state) + } - /// Create encrypted public transaction from private transaction. - pub fn public_transaction(&self, state: Bytes, source: &SignedTransaction, signatures: &[Signature], nonce: U256, gas_price: U256) -> Result { - let gas = self.estimate_tx_gas(&[], &Vec::new(), &state, signatures); - Ok(Transaction { - nonce: nonce, - action: source.action.clone(), - gas: gas.into(), - gas_price: gas_price, - value: 0.into(), - data: Self::generate_set_state_call(signatures, state) - }) - } + /// Create encrypted public transaction from private transaction. + pub fn public_transaction( + &self, + state: Bytes, + source: &SignedTransaction, + signatures: &[Signature], + nonce: U256, + gas_price: U256, + ) -> Result { + let gas = self.estimate_tx_gas(&[], &Vec::new(), &state, signatures); + Ok(Transaction { + nonce: nonce, + action: source.action.clone(), + gas: gas.into(), + gas_price: gas_price, + value: 0.into(), + data: Self::generate_set_state_call(signatures, state), + }) + } - /// Call into private contract. - pub fn private_call(&self, block: BlockId, transaction: &SignedTransaction) -> Result { - let result = self.execute_private(transaction, TransactOptions::with_no_tracing(), block)?; - Ok(result.result) - } + /// Call into private contract. + pub fn private_call( + &self, + block: BlockId, + transaction: &SignedTransaction, + ) -> Result { + let result = + self.execute_private(transaction, TransactOptions::with_no_tracing(), block)?; + Ok(result.result) + } - /// Returns private validators for a contract. - pub fn get_validators(&self, block: BlockId, address: &Address) -> Result, Error> { - let (data, decoder) = private_contract::functions::get_validators::call(); - let value = self.client.call_contract(block, *address, data)?; - decoder.decode(&value).map_err(|e| Error::Call(format!("Contract call failed {:?}", e)).into()) - } + /// Returns private validators for a contract. + pub fn get_validators(&self, block: BlockId, address: &Address) -> Result, Error> { + let (data, decoder) = private_contract::functions::get_validators::call(); + let value = self.client.call_contract(block, *address, data)?; + decoder + .decode(&value) + .map_err(|e| Error::Call(format!("Contract call failed {:?}", e)).into()) + } - fn get_contract_version(&self, block: BlockId, address: &Address) -> usize { - let (data, decoder) = private_contract::functions::get_version::call(); - match self.client.call_contract(block, *address, data) - .and_then(|value| decoder.decode(&value).map_err(|e| e.to_string())) { - Ok(version) => version.low_u64() as usize, - Err(_) => INITIAL_PRIVATE_CONTRACT_VER, - } - } + fn get_contract_version(&self, block: BlockId, address: &Address) -> usize { + let (data, decoder) = private_contract::functions::get_version::call(); + match self + .client + .call_contract(block, *address, data) + .and_then(|value| decoder.decode(&value).map_err(|e| e.to_string())) + { + Ok(version) => version.low_u64() as usize, + Err(_) => INITIAL_PRIVATE_CONTRACT_VER, + } + } - fn state_changes_notify(&self, block: BlockId, address: &Address, originator: &Address, transaction_hash: H256) -> Result<(), Error> { - let (data, _) = private_contract::functions::notify_changes::call(*originator, transaction_hash.0.to_vec()); - let _value = self.client.call_contract(block, *address, data)?; - Ok(()) - } + fn state_changes_notify( + &self, + block: BlockId, + address: &Address, + originator: &Address, + transaction_hash: H256, + ) -> Result<(), Error> { + let (data, _) = private_contract::functions::notify_changes::call( + *originator, + transaction_hash.0.to_vec(), + ); + let _value = self.client.call_contract(block, *address, data)?; + Ok(()) + } } pub trait Importer { - /// Process received private transaction - fn import_private_transaction(&self, _rlp: &[u8]) -> Result; + /// Process received private transaction + fn import_private_transaction(&self, _rlp: &[u8]) -> Result; - /// Add signed private transaction into the store - /// - /// Creates corresponding public transaction if last required signature collected and sends it to the chain - fn import_signed_private_transaction(&self, _rlp: &[u8]) -> Result; + /// Add signed private transaction into the store + /// + /// Creates corresponding public transaction if last required signature collected and sends it to the chain + fn import_signed_private_transaction(&self, _rlp: &[u8]) -> Result; } // TODO [ToDr] Offload more heavy stuff to the IoService thread. @@ -713,70 +933,72 @@ pub trait Importer { // for both verification and execution. impl Importer for Arc { - fn import_private_transaction(&self, rlp: &[u8]) -> Result { - trace!(target: "privatetx", "Private transaction received"); - let private_tx: PrivateTransaction = Rlp::new(rlp).as_val()?; - let private_tx_hash = private_tx.hash(); - let contract = private_tx.contract(); - let contract_validators = self.get_validators(BlockId::Latest, &contract)?; + fn import_private_transaction(&self, rlp: &[u8]) -> Result { + trace!(target: "privatetx", "Private transaction received"); + let private_tx: PrivateTransaction = Rlp::new(rlp).as_val()?; + let private_tx_hash = private_tx.hash(); + let contract = private_tx.contract(); + let contract_validators = self.get_validators(BlockId::Latest, &contract)?; - let validation_account = contract_validators - .iter() - .find(|address| self.validator_accounts.contains(address)); + let validation_account = contract_validators + .iter() + .find(|address| self.validator_accounts.contains(address)); - // Extract the original transaction - let encrypted_data = private_tx.encrypted(); - let transaction_bytes = self.decrypt(&contract, &encrypted_data)?; - let original_tx: UnverifiedTransaction = Rlp::new(&transaction_bytes).as_val()?; - let nonce_cache = NonceCache::new(NONCE_CACHE_SIZE); - let local_accounts = HashSet::new(); - // Add to the queue for further verification - self.transactions_for_verification.add_transaction( - original_tx, - validation_account.map(|&account| account), - private_tx, - self.pool_client(&nonce_cache, &local_accounts), - )?; - let provider = Arc::downgrade(self); - let result = self.channel.send(ClientIoMessage::execute(move |_| { - if let Some(provider) = provider.upgrade() { - if let Err(e) = provider.process_verification_queue() { - warn!(target: "privatetx", "Unable to process the queue: {}", e); - } - } - })); - if let Err(e) = result { - warn!(target: "privatetx", "Error sending NewPrivateTransaction message: {:?}", e); - } - Ok(private_tx_hash) - } + // Extract the original transaction + let encrypted_data = private_tx.encrypted(); + let transaction_bytes = self.decrypt(&contract, &encrypted_data)?; + let original_tx: UnverifiedTransaction = Rlp::new(&transaction_bytes).as_val()?; + let nonce_cache = NonceCache::new(NONCE_CACHE_SIZE); + let local_accounts = HashSet::new(); + // Add to the queue for further verification + self.transactions_for_verification.add_transaction( + original_tx, + validation_account.map(|&account| account), + private_tx, + self.pool_client(&nonce_cache, &local_accounts), + )?; + let provider = Arc::downgrade(self); + let result = self.channel.send(ClientIoMessage::execute(move |_| { + if let Some(provider) = provider.upgrade() { + if let Err(e) = provider.process_verification_queue() { + warn!(target: "privatetx", "Unable to process the queue: {}", e); + } + } + })); + if let Err(e) = result { + warn!(target: "privatetx", "Error sending NewPrivateTransaction message: {:?}", e); + } + Ok(private_tx_hash) + } - fn import_signed_private_transaction(&self, rlp: &[u8]) -> Result { - let tx: SignedPrivateTransaction = Rlp::new(rlp).as_val()?; - trace!(target: "privatetx", "Signature for private transaction received: {:?}", tx); - let private_hash = tx.private_transaction_hash(); - let provider = Arc::downgrade(self); - let result = self.channel.send(ClientIoMessage::execute(move |_| { - if let Some(provider) = provider.upgrade() { - if let Err(e) = provider.process_signature(&tx) { - warn!(target: "privatetx", "Unable to process the signature: {}", e); - } - } - })); - if let Err(e) = result { - warn!(target: "privatetx", "Error sending NewSignedPrivateTransaction message: {:?}", e); - } - Ok(private_hash) - } + fn import_signed_private_transaction(&self, rlp: &[u8]) -> Result { + let tx: SignedPrivateTransaction = Rlp::new(rlp).as_val()?; + trace!(target: "privatetx", "Signature for private transaction received: {:?}", tx); + let private_hash = tx.private_transaction_hash(); + let provider = Arc::downgrade(self); + let result = self.channel.send(ClientIoMessage::execute(move |_| { + if let Some(provider) = provider.upgrade() { + if let Err(e) = provider.process_signature(&tx) { + warn!(target: "privatetx", "Unable to process the signature: {}", e); + } + } + })); + if let Err(e) = result { + warn!(target: "privatetx", "Error sending NewSignedPrivateTransaction message: {:?}", e); + } + Ok(private_hash) + } } impl ChainNotify for Provider { - fn new_blocks(&self, new_blocks: NewBlocks) { - if new_blocks.imported.is_empty() || new_blocks.has_more_blocks_to_import { return } - trace!(target: "privatetx", "New blocks imported, try to prune the queue"); - if let Err(err) = self.process_verification_queue() { - warn!(target: "privatetx", "Cannot prune private transactions queue. error: {:?}", err); - } - self.keys_provider.update_acl_contract(); - } + fn new_blocks(&self, new_blocks: NewBlocks) { + if new_blocks.imported.is_empty() || new_blocks.has_more_blocks_to_import { + return; + } + trace!(target: "privatetx", "New blocks imported, try to prune the queue"); + if let Err(err) = self.process_verification_queue() { + warn!(target: "privatetx", "Cannot prune private transactions queue. error: {:?}", err); + } + self.keys_provider.update_acl_contract(); + } } diff --git a/ethcore/private-tx/src/messages.rs b/ethcore/private-tx/src/messages.rs index 2990fb9b0..0953565de 100644 --- a/ethcore/private-tx/src/messages.rs +++ b/ethcore/private-tx/src/messages.rs @@ -14,102 +14,106 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use ethereum_types::{H256, U256, Address}; use bytes::Bytes; +use ethereum_types::{Address, H256, U256}; +use ethkey::Signature; use hash::keccak; use rlp::Encodable; -use ethkey::Signature; use types::transaction::signature::{add_chain_replay_protection, check_replay_protection}; /// Message with private transaction encrypted #[derive(Default, Debug, Clone, PartialEq, RlpEncodable, RlpDecodable, Eq)] pub struct PrivateTransaction { - /// Encrypted data - encrypted: Bytes, - /// Address of the contract - contract: Address, - /// Hash - hash: H256, + /// Encrypted data + encrypted: Bytes, + /// Address of the contract + contract: Address, + /// Hash + hash: H256, } impl PrivateTransaction { - /// Constructor - pub fn new(encrypted: Bytes, contract: Address) -> Self { - PrivateTransaction { - encrypted, - contract, - hash: 0.into(), - }.compute_hash() - } + /// Constructor + pub fn new(encrypted: Bytes, contract: Address) -> Self { + PrivateTransaction { + encrypted, + contract, + hash: 0.into(), + } + .compute_hash() + } - fn compute_hash(mut self) -> PrivateTransaction { - self.hash = keccak(&*self.rlp_bytes()); - self - } + fn compute_hash(mut self) -> PrivateTransaction { + self.hash = keccak(&*self.rlp_bytes()); + self + } - /// Hash of the private transaction - pub fn hash(&self) -> H256 { - self.hash - } + /// Hash of the private transaction + pub fn hash(&self) -> H256 { + self.hash + } - /// Address of the contract - pub fn contract(&self) -> Address { - self.contract - } + /// Address of the contract + pub fn contract(&self) -> Address { + self.contract + } - /// Encrypted data - pub fn encrypted(&self) -> Bytes { - self.encrypted.clone() - } + /// Encrypted data + pub fn encrypted(&self) -> Bytes { + self.encrypted.clone() + } } /// Message about private transaction's signing #[derive(Default, Debug, Clone, PartialEq, RlpEncodable, RlpDecodable, Eq)] pub struct SignedPrivateTransaction { - /// Hash of the corresponding private transaction - private_transaction_hash: H256, - /// Signature of the validator - /// The V field of the signature - v: u64, - /// The R field of the signature - r: U256, - /// The S field of the signature - s: U256, - /// Hash - hash: H256, + /// Hash of the corresponding private transaction + private_transaction_hash: H256, + /// Signature of the validator + /// The V field of the signature + v: u64, + /// The R field of the signature + r: U256, + /// The S field of the signature + s: U256, + /// Hash + hash: H256, } impl SignedPrivateTransaction { - /// Construct a signed private transaction message - pub fn new(private_transaction_hash: H256, sig: Signature, chain_id: Option) -> Self { - SignedPrivateTransaction { - private_transaction_hash: private_transaction_hash, - r: sig.r().into(), - s: sig.s().into(), - v: add_chain_replay_protection(sig.v() as u64, chain_id), - hash: 0.into(), - }.compute_hash() - } + /// Construct a signed private transaction message + pub fn new(private_transaction_hash: H256, sig: Signature, chain_id: Option) -> Self { + SignedPrivateTransaction { + private_transaction_hash: private_transaction_hash, + r: sig.r().into(), + s: sig.s().into(), + v: add_chain_replay_protection(sig.v() as u64, chain_id), + hash: 0.into(), + } + .compute_hash() + } - fn compute_hash(mut self) -> SignedPrivateTransaction { - self.hash = keccak(&*self.rlp_bytes()); - self - } + fn compute_hash(mut self) -> SignedPrivateTransaction { + self.hash = keccak(&*self.rlp_bytes()); + self + } - pub fn standard_v(&self) -> u8 { check_replay_protection(self.v) } + pub fn standard_v(&self) -> u8 { + check_replay_protection(self.v) + } - /// Construct a signature object from the sig. - pub fn signature(&self) -> Signature { - Signature::from_rsv(&self.r.into(), &self.s.into(), self.standard_v()) - } + /// Construct a signature object from the sig. + pub fn signature(&self) -> Signature { + Signature::from_rsv(&self.r.into(), &self.s.into(), self.standard_v()) + } - /// Get the hash of of the original transaction. - pub fn private_transaction_hash(&self) -> H256 { - self.private_transaction_hash - } + /// Get the hash of of the original transaction. + pub fn private_transaction_hash(&self) -> H256 { + self.private_transaction_hash + } - /// Own hash - pub fn hash(&self) -> H256 { - self.hash - } + /// Own hash + pub fn hash(&self) -> H256 { + self.hash + } } diff --git a/ethcore/private-tx/src/private_transactions.rs b/ethcore/private-tx/src/private_transactions.rs index d0456657b..d20efcffe 100644 --- a/ethcore/private-tx/src/private_transactions.rs +++ b/ethcore/private-tx/src/private_transactions.rs @@ -14,21 +14,22 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::cmp; -use std::collections::{HashMap, HashSet}; +use std::{ + cmp, + collections::{HashMap, HashSet}, + sync::Arc, +}; use bytes::Bytes; +use error::Error; use ethcore_miner::pool; -use ethereum_types::{H256, U256, Address}; -use heapsize::HeapSizeOf; +use ethereum_types::{Address, H256, U256}; use ethkey::Signature; +use heapsize::HeapSizeOf; use messages::PrivateTransaction; use parking_lot::RwLock; -use types::transaction::{UnverifiedTransaction, SignedTransaction}; -use txpool; -use txpool::{VerifiedTransaction, Verifier}; -use error::Error; +use txpool::{self, VerifiedTransaction, Verifier}; +use types::transaction::{SignedTransaction, UnverifiedTransaction}; type Pool = txpool::Pool; @@ -38,227 +39,240 @@ const MAX_QUEUE_LEN: usize = 8312; /// Private transaction stored in queue for verification #[derive(Debug, Clone, PartialEq, Eq)] pub struct VerifiedPrivateTransaction { - /// Original private transaction - pub private_transaction: PrivateTransaction, - /// Address that should be used for verification - pub validator_account: Option
, - /// Resulting verified transaction - pub transaction: SignedTransaction, - /// Original transaction hash - pub transaction_hash: H256, - /// Original transaction sender - pub transaction_sender: Address, + /// Original private transaction + pub private_transaction: PrivateTransaction, + /// Address that should be used for verification + pub validator_account: Option
, + /// Resulting verified transaction + pub transaction: SignedTransaction, + /// Original transaction hash + pub transaction_hash: H256, + /// Original transaction sender + pub transaction_sender: Address, } impl txpool::VerifiedTransaction for VerifiedPrivateTransaction { - type Hash = H256; - type Sender = Address; + type Hash = H256; + type Sender = Address; - fn hash(&self) -> &H256 { - &self.transaction_hash - } + fn hash(&self) -> &H256 { + &self.transaction_hash + } - fn mem_usage(&self) -> usize { - self.transaction.heap_size_of_children() - } + fn mem_usage(&self) -> usize { + self.transaction.heap_size_of_children() + } - fn sender(&self) -> &Address { - &self.transaction_sender - } + fn sender(&self) -> &Address { + &self.transaction_sender + } } impl pool::ScoredTransaction for VerifiedPrivateTransaction { - fn priority(&self) -> pool::Priority { - pool::Priority::Regular - } + fn priority(&self) -> pool::Priority { + pool::Priority::Regular + } - /// Gets transaction gas price. - fn gas_price(&self) -> &U256 { - &self.transaction.gas_price - } + /// Gets transaction gas price. + fn gas_price(&self) -> &U256 { + &self.transaction.gas_price + } - /// Gets transaction nonce. - fn nonce(&self) -> U256 { - self.transaction.nonce - } + /// Gets transaction nonce. + fn nonce(&self) -> U256 { + self.transaction.nonce + } } /// Checks readiness of transactions by looking if the transaction from sender already exists. /// Guarantees only one transaction per sender #[derive(Debug)] pub struct PrivateReadyState { - senders: HashSet
, - state: C, + senders: HashSet
, + state: C, } impl PrivateReadyState { - /// Create new State checker, given client interface. - pub fn new( - state: C, - ) -> Self { - PrivateReadyState { - senders: Default::default(), - state, - } - } + /// Create new State checker, given client interface. + pub fn new(state: C) -> Self { + PrivateReadyState { + senders: Default::default(), + state, + } + } } -impl txpool::Ready for PrivateReadyState { - fn is_ready(&mut self, tx: &VerifiedPrivateTransaction) -> txpool::Readiness { - let sender = tx.sender(); - let state = &self.state; - let state_nonce = state.account_nonce(sender); - if self.senders.contains(sender) { - txpool::Readiness::Future - } else { - self.senders.insert(*sender); - match tx.transaction.nonce.cmp(&state_nonce) { - cmp::Ordering::Greater => txpool::Readiness::Future, - cmp::Ordering::Less => txpool::Readiness::Stale, - cmp::Ordering::Equal => txpool::Readiness::Ready, - } - } - } +impl txpool::Ready + for PrivateReadyState +{ + fn is_ready(&mut self, tx: &VerifiedPrivateTransaction) -> txpool::Readiness { + let sender = tx.sender(); + let state = &self.state; + let state_nonce = state.account_nonce(sender); + if self.senders.contains(sender) { + txpool::Readiness::Future + } else { + self.senders.insert(*sender); + match tx.transaction.nonce.cmp(&state_nonce) { + cmp::Ordering::Greater => txpool::Readiness::Future, + cmp::Ordering::Less => txpool::Readiness::Stale, + cmp::Ordering::Equal => txpool::Readiness::Ready, + } + } + } } /// Storage for private transactions for verification pub struct VerificationStore { - verification_pool: RwLock, - verification_options: pool::verifier::Options, + verification_pool: RwLock, + verification_options: pool::verifier::Options, } impl Default for VerificationStore { - fn default() -> Self { - VerificationStore { - verification_pool: RwLock::new( - txpool::Pool::new( - txpool::NoopListener, - pool::scoring::NonceAndGasPrice(pool::PrioritizationStrategy::GasPriceOnly), - pool::Options { - max_count: MAX_QUEUE_LEN, - max_per_sender: MAX_QUEUE_LEN / 10, - max_mem_usage: 8 * 1024 * 1024, - }, - ) - ), - verification_options: pool::verifier::Options { - // TODO [ToDr] This should probably be based on some real values? - minimal_gas_price: 0.into(), - block_gas_limit: 8_000_000.into(), - tx_gas_limit: U256::max_value(), - no_early_reject: false, - }, - } - } + fn default() -> Self { + VerificationStore { + verification_pool: RwLock::new(txpool::Pool::new( + txpool::NoopListener, + pool::scoring::NonceAndGasPrice(pool::PrioritizationStrategy::GasPriceOnly), + pool::Options { + max_count: MAX_QUEUE_LEN, + max_per_sender: MAX_QUEUE_LEN / 10, + max_mem_usage: 8 * 1024 * 1024, + }, + )), + verification_options: pool::verifier::Options { + // TODO [ToDr] This should probably be based on some real values? + minimal_gas_price: 0.into(), + block_gas_limit: 8_000_000.into(), + tx_gas_limit: U256::max_value(), + no_early_reject: false, + }, + } + } } impl VerificationStore { - /// Adds private transaction for verification into the store - pub fn add_transaction( - &self, - transaction: UnverifiedTransaction, - validator_account: Option
, - private_transaction: PrivateTransaction, - client: C, - ) -> Result<(), Error> { + /// Adds private transaction for verification into the store + pub fn add_transaction( + &self, + transaction: UnverifiedTransaction, + validator_account: Option
, + private_transaction: PrivateTransaction, + client: C, + ) -> Result<(), Error> { + let options = self.verification_options.clone(); + // Use pool's verifying pipeline for original transaction's verification + let verifier = + pool::verifier::Verifier::new(client.clone(), options, Default::default(), None); + let unverified = pool::verifier::Transaction::Unverified(transaction); + let verified_tx = verifier.verify_transaction(unverified)?; + let signed_tx: SignedTransaction = verified_tx.signed().clone(); + let signed_hash = signed_tx.hash(); + let signed_sender = signed_tx.sender(); + let verified = VerifiedPrivateTransaction { + private_transaction, + validator_account, + transaction: signed_tx, + transaction_hash: signed_hash, + transaction_sender: signed_sender, + }; + let replace = pool::replace::ReplaceByScoreAndReadiness::new( + self.verification_pool.read().scoring().clone(), + client, + ); + self.verification_pool.write().import(verified, &replace)?; + Ok(()) + } - let options = self.verification_options.clone(); - // Use pool's verifying pipeline for original transaction's verification - let verifier = pool::verifier::Verifier::new(client.clone(), options, Default::default(), None); - let unverified = pool::verifier::Transaction::Unverified(transaction); - let verified_tx = verifier.verify_transaction(unverified)?; - let signed_tx: SignedTransaction = verified_tx.signed().clone(); - let signed_hash = signed_tx.hash(); - let signed_sender = signed_tx.sender(); - let verified = VerifiedPrivateTransaction { - private_transaction, - validator_account, - transaction: signed_tx, - transaction_hash: signed_hash, - transaction_sender: signed_sender, - }; - let replace = pool::replace::ReplaceByScoreAndReadiness::new( - self.verification_pool.read().scoring().clone(), client); - self.verification_pool.write().import(verified, &replace)?; - Ok(()) - } - - /// Drains transactions ready for verification from the pool - /// Returns only one transaction per sender because several cannot be verified in a row without verification from other peers - pub fn drain(&self, client: C) -> Vec> { - let ready = PrivateReadyState::new(client); - let transactions: Vec<_> = self.verification_pool.read().pending(ready).collect(); - let mut pool = self.verification_pool.write(); - for tx in &transactions { - pool.remove(tx.hash(), true); - } - transactions - } + /// Drains transactions ready for verification from the pool + /// Returns only one transaction per sender because several cannot be verified in a row without verification from other peers + pub fn drain( + &self, + client: C, + ) -> Vec> { + let ready = PrivateReadyState::new(client); + let transactions: Vec<_> = self.verification_pool.read().pending(ready).collect(); + let mut pool = self.verification_pool.write(); + for tx in &transactions { + pool.remove(tx.hash(), true); + } + transactions + } } /// Desriptor for private transaction stored in queue for signing #[derive(Debug, Clone)] pub struct PrivateTransactionSigningDesc { - /// Original unsigned transaction - pub original_transaction: SignedTransaction, - /// Supposed validators from the contract - pub validators: Vec
, - /// Already obtained signatures - pub received_signatures: Vec, - /// State after transaction execution to compare further with received from validators - pub state: Bytes, - /// Build-in nonce of the contract - pub contract_nonce: U256, + /// Original unsigned transaction + pub original_transaction: SignedTransaction, + /// Supposed validators from the contract + pub validators: Vec
, + /// Already obtained signatures + pub received_signatures: Vec, + /// State after transaction execution to compare further with received from validators + pub state: Bytes, + /// Build-in nonce of the contract + pub contract_nonce: U256, } /// Storage for private transactions for signing #[derive(Default)] pub struct SigningStore { - /// Transactions and descriptors for signing - transactions: HashMap, + /// Transactions and descriptors for signing + transactions: HashMap, } impl SigningStore { - /// Adds new private transaction into the store for signing - pub fn add_transaction( - &mut self, - private_hash: H256, - transaction: SignedTransaction, - validators: Vec
, - state: Bytes, - contract_nonce: U256, - ) -> Result<(), Error> { - if self.transactions.len() > MAX_QUEUE_LEN { - return Err(Error::QueueIsFull); - } + /// Adds new private transaction into the store for signing + pub fn add_transaction( + &mut self, + private_hash: H256, + transaction: SignedTransaction, + validators: Vec
, + state: Bytes, + contract_nonce: U256, + ) -> Result<(), Error> { + if self.transactions.len() > MAX_QUEUE_LEN { + return Err(Error::QueueIsFull); + } - self.transactions.insert(private_hash, PrivateTransactionSigningDesc { - original_transaction: transaction.clone(), - validators: validators.clone(), - received_signatures: Vec::new(), - state, - contract_nonce, - }); - Ok(()) - } + self.transactions.insert( + private_hash, + PrivateTransactionSigningDesc { + original_transaction: transaction.clone(), + validators: validators.clone(), + received_signatures: Vec::new(), + state, + contract_nonce, + }, + ); + Ok(()) + } - /// Get copy of private transaction's description from the storage - pub fn get(&self, private_hash: &H256) -> Option { - self.transactions.get(private_hash).cloned() - } + /// Get copy of private transaction's description from the storage + pub fn get(&self, private_hash: &H256) -> Option { + self.transactions.get(private_hash).cloned() + } - /// Removes desc from the store (after verification is completed) - pub fn remove(&mut self, private_hash: &H256) -> Result<(), Error> { - self.transactions.remove(private_hash); - Ok(()) - } + /// Removes desc from the store (after verification is completed) + pub fn remove(&mut self, private_hash: &H256) -> Result<(), Error> { + self.transactions.remove(private_hash); + Ok(()) + } - /// Adds received signature for the stored private transaction - pub fn add_signature(&mut self, private_hash: &H256, signature: Signature) -> Result<(), Error> { - let desc = self.transactions.get_mut(private_hash).ok_or_else(|| Error::PrivateTransactionNotFound)?; - if !desc.received_signatures.contains(&signature) { - desc.received_signatures.push(signature); - } - Ok(()) - } + /// Adds received signature for the stored private transaction + pub fn add_signature( + &mut self, + private_hash: &H256, + signature: Signature, + ) -> Result<(), Error> { + let desc = self + .transactions + .get_mut(private_hash) + .ok_or_else(|| Error::PrivateTransactionNotFound)?; + if !desc.received_signatures.contains(&signature) { + desc.received_signatures.push(signature); + } + Ok(()) + } } diff --git a/ethcore/private-tx/tests/private_contract.rs b/ethcore/private-tx/tests/private_contract.rs index 6365b10ee..23dc5d98d 100644 --- a/ethcore/private-tx/tests/private_contract.rs +++ b/ethcore/private-tx/tests/private_contract.rs @@ -28,248 +28,394 @@ extern crate rustc_hex; #[macro_use] extern crate log; -use std::sync::Arc; use rustc_hex::{FromHex, ToHex}; +use std::sync::Arc; -use types::ids::BlockId; -use types::transaction::{Transaction, Action}; -use ethcore::CreateContractAddress; -use ethcore::client::BlockChainClient; -use ethcore::executive::{contract_address}; -use ethcore::miner::Miner; -use ethcore::test_helpers::{generate_dummy_client, push_block_with_transactions}; -use ethkey::{Secret, KeyPair, Signature}; +use ethcore::{ + client::BlockChainClient, + executive::contract_address, + miner::Miner, + test_helpers::{generate_dummy_client, push_block_with_transactions}, + CreateContractAddress, +}; +use ethkey::{KeyPair, Secret, Signature}; use hash::keccak; +use types::{ + ids::BlockId, + transaction::{Action, Transaction}, +}; use ethcore_private_tx::{NoopEncryptor, Provider, ProviderConfig, StoringKeyProvider}; #[test] fn private_contract() { - // This uses a simple private contract: contract Test1 { bytes32 public x; function setX(bytes32 _x) { x = _x; } } - let _ = ::env_logger::try_init(); - let client = generate_dummy_client(0); - let chain_id = client.signing_chain_id(); - let key1 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000011")).unwrap(); - let _key2 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000012")).unwrap(); - let key3 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000013")).unwrap(); - let key4 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000014")).unwrap(); + // This uses a simple private contract: contract Test1 { bytes32 public x; function setX(bytes32 _x) { x = _x; } } + let _ = ::env_logger::try_init(); + let client = generate_dummy_client(0); + let chain_id = client.signing_chain_id(); + let key1 = KeyPair::from_secret(Secret::from( + "0000000000000000000000000000000000000000000000000000000000000011", + )) + .unwrap(); + let _key2 = KeyPair::from_secret(Secret::from( + "0000000000000000000000000000000000000000000000000000000000000012", + )) + .unwrap(); + let key3 = KeyPair::from_secret(Secret::from( + "0000000000000000000000000000000000000000000000000000000000000013", + )) + .unwrap(); + let key4 = KeyPair::from_secret(Secret::from( + "0000000000000000000000000000000000000000000000000000000000000014", + )) + .unwrap(); - let signer = Arc::new(ethcore_private_tx::KeyPairSigner(vec![key1.clone(), key3.clone(), key4.clone()])); + let signer = Arc::new(ethcore_private_tx::KeyPairSigner(vec![ + key1.clone(), + key3.clone(), + key4.clone(), + ])); - let config = ProviderConfig{ - validator_accounts: vec![key3.address(), key4.address()], - signer_account: None, - }; + let config = ProviderConfig { + validator_accounts: vec![key3.address(), key4.address()], + signer_account: None, + }; - let io = ethcore_io::IoChannel::disconnected(); - let miner = Arc::new(Miner::new_for_tests(&::ethcore::spec::Spec::new_test(), None)); - let private_keys = Arc::new(StoringKeyProvider::default()); - let pm = Arc::new(Provider::new( - client.clone(), - miner, - signer.clone(), - Box::new(NoopEncryptor::default()), - config, - io, - private_keys, - )); + let io = ethcore_io::IoChannel::disconnected(); + let miner = Arc::new(Miner::new_for_tests( + &::ethcore::spec::Spec::new_test(), + None, + )); + let private_keys = Arc::new(StoringKeyProvider::default()); + let pm = Arc::new(Provider::new( + client.clone(), + miner, + signer.clone(), + Box::new(NoopEncryptor::default()), + config, + io, + private_keys, + )); - let (address, _) = contract_address(CreateContractAddress::FromSenderAndNonce, &key1.address(), &0.into(), &[]); + let (address, _) = contract_address( + CreateContractAddress::FromSenderAndNonce, + &key1.address(), + &0.into(), + &[], + ); - trace!("Creating private contract"); - let private_contract_test = "6060604052341561000f57600080fd5b60d88061001d6000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680630c55699c146046578063bc64b76d14607457600080fd5b3415605057600080fd5b60566098565b60405180826000191660001916815260200191505060405180910390f35b3415607e57600080fd5b6096600480803560001916906020019091905050609e565b005b60005481565b8060008160001916905550505600a165627a7a723058206acbdf4b15ca4c2d43e1b1879b830451a34f1e9d02ff1f2f394d8d857e79d2080029".from_hex().unwrap(); - let mut private_create_tx = Transaction::default(); - private_create_tx.action = Action::Create; - private_create_tx.data = private_contract_test; - private_create_tx.gas = 200000.into(); - let private_create_tx_signed = private_create_tx.sign(&key1.secret(), None); - let validators = vec![key3.address(), key4.address()]; - let (public_tx, _) = pm.public_creation_transaction(BlockId::Latest, &private_create_tx_signed, &validators, 0.into()).unwrap(); - let public_tx = public_tx.sign(&key1.secret(), chain_id); - trace!("Transaction created. Pushing block"); - push_block_with_transactions(&client, &[public_tx]); + trace!("Creating private contract"); + let private_contract_test = "6060604052341561000f57600080fd5b60d88061001d6000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680630c55699c146046578063bc64b76d14607457600080fd5b3415605057600080fd5b60566098565b60405180826000191660001916815260200191505060405180910390f35b3415607e57600080fd5b6096600480803560001916906020019091905050609e565b005b60005481565b8060008160001916905550505600a165627a7a723058206acbdf4b15ca4c2d43e1b1879b830451a34f1e9d02ff1f2f394d8d857e79d2080029".from_hex().unwrap(); + let mut private_create_tx = Transaction::default(); + private_create_tx.action = Action::Create; + private_create_tx.data = private_contract_test; + private_create_tx.gas = 200000.into(); + let private_create_tx_signed = private_create_tx.sign(&key1.secret(), None); + let validators = vec![key3.address(), key4.address()]; + let (public_tx, _) = pm + .public_creation_transaction( + BlockId::Latest, + &private_create_tx_signed, + &validators, + 0.into(), + ) + .unwrap(); + let public_tx = public_tx.sign(&key1.secret(), chain_id); + trace!("Transaction created. Pushing block"); + push_block_with_transactions(&client, &[public_tx]); - trace!("Querying default private state"); - let mut query_tx = Transaction::default(); - query_tx.action = Action::Call(address.clone()); - query_tx.data = "0c55699c".from_hex().unwrap(); // getX - query_tx.gas = 50000.into(); - query_tx.nonce = 1.into(); - let query_tx = query_tx.sign(&key1.secret(), chain_id); - let result = pm.private_call(BlockId::Latest, &query_tx).unwrap(); - assert_eq!(&result.output[..], &("0000000000000000000000000000000000000000000000000000000000000000".from_hex().unwrap()[..])); - assert_eq!(pm.get_validators(BlockId::Latest, &address).unwrap(), validators); + trace!("Querying default private state"); + let mut query_tx = Transaction::default(); + query_tx.action = Action::Call(address.clone()); + query_tx.data = "0c55699c".from_hex().unwrap(); // getX + query_tx.gas = 50000.into(); + query_tx.nonce = 1.into(); + let query_tx = query_tx.sign(&key1.secret(), chain_id); + let result = pm.private_call(BlockId::Latest, &query_tx).unwrap(); + assert_eq!( + &result.output[..], + &("0000000000000000000000000000000000000000000000000000000000000000" + .from_hex() + .unwrap()[..]) + ); + assert_eq!( + pm.get_validators(BlockId::Latest, &address).unwrap(), + validators + ); - trace!("Modifying private state"); - let mut private_tx = Transaction::default(); - private_tx.action = Action::Call(address.clone()); - private_tx.data = "bc64b76d2a00000000000000000000000000000000000000000000000000000000000000".from_hex().unwrap(); //setX(42) - private_tx.gas = 120000.into(); - private_tx.nonce = 1.into(); - let private_tx = private_tx.sign(&key1.secret(), None); - let private_contract_nonce = pm.get_contract_nonce(&address, BlockId::Latest).unwrap(); - let private_state = pm.execute_private_transaction(BlockId::Latest, &private_tx).unwrap(); - let nonced_state_hash = pm.calculate_state_hash(&private_state, private_contract_nonce); - let signatures: Vec<_> = [&key3, &key4].iter().map(|k| - Signature::from(::ethkey::sign(&k.secret(), &nonced_state_hash).unwrap().into_electrum())).collect(); - let public_tx = pm.public_transaction(private_state, &private_tx, &signatures, 1.into(), 0.into()).unwrap(); - let public_tx = public_tx.sign(&key1.secret(), chain_id); - push_block_with_transactions(&client, &[public_tx]); + trace!("Modifying private state"); + let mut private_tx = Transaction::default(); + private_tx.action = Action::Call(address.clone()); + private_tx.data = "bc64b76d2a00000000000000000000000000000000000000000000000000000000000000" + .from_hex() + .unwrap(); //setX(42) + private_tx.gas = 120000.into(); + private_tx.nonce = 1.into(); + let private_tx = private_tx.sign(&key1.secret(), None); + let private_contract_nonce = pm.get_contract_nonce(&address, BlockId::Latest).unwrap(); + let private_state = pm + .execute_private_transaction(BlockId::Latest, &private_tx) + .unwrap(); + let nonced_state_hash = pm.calculate_state_hash(&private_state, private_contract_nonce); + let signatures: Vec<_> = [&key3, &key4] + .iter() + .map(|k| { + Signature::from( + ::ethkey::sign(&k.secret(), &nonced_state_hash) + .unwrap() + .into_electrum(), + ) + }) + .collect(); + let public_tx = pm + .public_transaction(private_state, &private_tx, &signatures, 1.into(), 0.into()) + .unwrap(); + let public_tx = public_tx.sign(&key1.secret(), chain_id); + push_block_with_transactions(&client, &[public_tx]); - trace!("Querying private state"); - let mut query_tx = Transaction::default(); - query_tx.action = Action::Call(address.clone()); - query_tx.data = "0c55699c".from_hex().unwrap(); // getX - query_tx.gas = 50000.into(); - query_tx.nonce = 2.into(); - let query_tx = query_tx.sign(&key1.secret(), chain_id); - let result = pm.private_call(BlockId::Latest, &query_tx).unwrap(); - assert_eq!(&result.output[..], &("2a00000000000000000000000000000000000000000000000000000000000000".from_hex().unwrap()[..])); - assert_eq!(pm.get_validators(BlockId::Latest, &address).unwrap(), validators); + trace!("Querying private state"); + let mut query_tx = Transaction::default(); + query_tx.action = Action::Call(address.clone()); + query_tx.data = "0c55699c".from_hex().unwrap(); // getX + query_tx.gas = 50000.into(); + query_tx.nonce = 2.into(); + let query_tx = query_tx.sign(&key1.secret(), chain_id); + let result = pm.private_call(BlockId::Latest, &query_tx).unwrap(); + assert_eq!( + &result.output[..], + &("2a00000000000000000000000000000000000000000000000000000000000000" + .from_hex() + .unwrap()[..]) + ); + assert_eq!( + pm.get_validators(BlockId::Latest, &address).unwrap(), + validators + ); - // Now try modification with just one signature - trace!("Modifying private state"); - let mut private_tx = Transaction::default(); - private_tx.action = Action::Call(address.clone()); - private_tx.data = "bc64b76d2b00000000000000000000000000000000000000000000000000000000000000".from_hex().unwrap(); //setX(43) - private_tx.gas = 120000.into(); - private_tx.nonce = 2.into(); - let private_tx = private_tx.sign(&key1.secret(), None); - let private_state = pm.execute_private_transaction(BlockId::Latest, &private_tx).unwrap(); - let private_state_hash = keccak(&private_state); - let signatures: Vec<_> = [&key4].iter().map(|k| - Signature::from(::ethkey::sign(&k.secret(), &private_state_hash).unwrap().into_electrum())).collect(); - let public_tx = pm.public_transaction(private_state, &private_tx, &signatures, 2.into(), 0.into()).unwrap(); - let public_tx = public_tx.sign(&key1.secret(), chain_id); - push_block_with_transactions(&client, &[public_tx]); + // Now try modification with just one signature + trace!("Modifying private state"); + let mut private_tx = Transaction::default(); + private_tx.action = Action::Call(address.clone()); + private_tx.data = "bc64b76d2b00000000000000000000000000000000000000000000000000000000000000" + .from_hex() + .unwrap(); //setX(43) + private_tx.gas = 120000.into(); + private_tx.nonce = 2.into(); + let private_tx = private_tx.sign(&key1.secret(), None); + let private_state = pm + .execute_private_transaction(BlockId::Latest, &private_tx) + .unwrap(); + let private_state_hash = keccak(&private_state); + let signatures: Vec<_> = [&key4] + .iter() + .map(|k| { + Signature::from( + ::ethkey::sign(&k.secret(), &private_state_hash) + .unwrap() + .into_electrum(), + ) + }) + .collect(); + let public_tx = pm + .public_transaction(private_state, &private_tx, &signatures, 2.into(), 0.into()) + .unwrap(); + let public_tx = public_tx.sign(&key1.secret(), chain_id); + push_block_with_transactions(&client, &[public_tx]); - trace!("Querying private state"); - let mut query_tx = Transaction::default(); - query_tx.action = Action::Call(address.clone()); - query_tx.data = "0c55699c".from_hex().unwrap(); // getX - query_tx.gas = 50000.into(); - query_tx.nonce = 3.into(); - let query_tx = query_tx.sign(&key1.secret(), chain_id); - let result = pm.private_call(BlockId::Latest, &query_tx).unwrap(); - assert_eq!(result.output, "2a00000000000000000000000000000000000000000000000000000000000000".from_hex().unwrap()); + trace!("Querying private state"); + let mut query_tx = Transaction::default(); + query_tx.action = Action::Call(address.clone()); + query_tx.data = "0c55699c".from_hex().unwrap(); // getX + query_tx.gas = 50000.into(); + query_tx.nonce = 3.into(); + let query_tx = query_tx.sign(&key1.secret(), chain_id); + let result = pm.private_call(BlockId::Latest, &query_tx).unwrap(); + assert_eq!( + result.output, + "2a00000000000000000000000000000000000000000000000000000000000000" + .from_hex() + .unwrap() + ); } #[test] fn call_other_private_contract() { - // This test verifies calls private contract methods from another one - // Two contract will be deployed - // The same contract A: - // contract Test1 { - // bytes32 public x; - // function setX(bytes32 _x) { - // x = _x; - // } - // } - // And the following contract B: - // contract Deployed { - // function setX(uint) {} - // function x() returns (uint) {} - //} - // contract Existing { - // Deployed dc; - // function Existing(address t) { - // dc = Deployed(t); - // } - // function getX() returns (uint) { - // return dc.x(); - // } - // } - //ethcore_logger::init_log(); + // This test verifies calls private contract methods from another one + // Two contract will be deployed + // The same contract A: + // contract Test1 { + // bytes32 public x; + // function setX(bytes32 _x) { + // x = _x; + // } + // } + // And the following contract B: + // contract Deployed { + // function setX(uint) {} + // function x() returns (uint) {} + //} + // contract Existing { + // Deployed dc; + // function Existing(address t) { + // dc = Deployed(t); + // } + // function getX() returns (uint) { + // return dc.x(); + // } + // } + //ethcore_logger::init_log(); - // Create client and provider - let client = generate_dummy_client(0); - let chain_id = client.signing_chain_id(); - let key1 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000011")).unwrap(); - let _key2 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000012")).unwrap(); - let key3 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000013")).unwrap(); - let key4 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000014")).unwrap(); - let signer = Arc::new(ethcore_private_tx::KeyPairSigner(vec![key1.clone(), key3.clone(), key4.clone()])); + // Create client and provider + let client = generate_dummy_client(0); + let chain_id = client.signing_chain_id(); + let key1 = KeyPair::from_secret(Secret::from( + "0000000000000000000000000000000000000000000000000000000000000011", + )) + .unwrap(); + let _key2 = KeyPair::from_secret(Secret::from( + "0000000000000000000000000000000000000000000000000000000000000012", + )) + .unwrap(); + let key3 = KeyPair::from_secret(Secret::from( + "0000000000000000000000000000000000000000000000000000000000000013", + )) + .unwrap(); + let key4 = KeyPair::from_secret(Secret::from( + "0000000000000000000000000000000000000000000000000000000000000014", + )) + .unwrap(); + let signer = Arc::new(ethcore_private_tx::KeyPairSigner(vec![ + key1.clone(), + key3.clone(), + key4.clone(), + ])); - let config = ProviderConfig{ - validator_accounts: vec![key3.address(), key4.address()], - signer_account: None, - }; + let config = ProviderConfig { + validator_accounts: vec![key3.address(), key4.address()], + signer_account: None, + }; - let io = ethcore_io::IoChannel::disconnected(); - let miner = Arc::new(Miner::new_for_tests(&::ethcore::spec::Spec::new_test(), None)); - let private_keys = Arc::new(StoringKeyProvider::default()); - let pm = Arc::new(Provider::new( - client.clone(), - miner, - signer.clone(), - Box::new(NoopEncryptor::default()), - config, - io, - private_keys.clone(), - )); + let io = ethcore_io::IoChannel::disconnected(); + let miner = Arc::new(Miner::new_for_tests( + &::ethcore::spec::Spec::new_test(), + None, + )); + let private_keys = Arc::new(StoringKeyProvider::default()); + let pm = Arc::new(Provider::new( + client.clone(), + miner, + signer.clone(), + Box::new(NoopEncryptor::default()), + config, + io, + private_keys.clone(), + )); - // Deploy contract A - let (address_a, _) = contract_address(CreateContractAddress::FromSenderAndNonce, &key1.address(), &0.into(), &[]); - trace!("Creating private contract A"); - let private_contract_a_test = "6060604052341561000f57600080fd5b60d88061001d6000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680630c55699c146046578063bc64b76d14607457600080fd5b3415605057600080fd5b60566098565b60405180826000191660001916815260200191505060405180910390f35b3415607e57600080fd5b6096600480803560001916906020019091905050609e565b005b60005481565b8060008160001916905550505600a165627a7a723058206acbdf4b15ca4c2d43e1b1879b830451a34f1e9d02ff1f2f394d8d857e79d2080029".from_hex().unwrap(); - let mut private_create_tx1 = Transaction::default(); - private_create_tx1.action = Action::Create; - private_create_tx1.data = private_contract_a_test; - private_create_tx1.gas = 200000.into(); - private_create_tx1.nonce = 0.into(); - let private_create_tx_signed = private_create_tx1.sign(&key1.secret(), None); - let validators = vec![key3.address(), key4.address()]; - let (public_tx1, _) = pm.public_creation_transaction(BlockId::Latest, &private_create_tx_signed, &validators, 0.into()).unwrap(); - let public_tx1 = public_tx1.sign(&key1.secret(), chain_id); - trace!("Transaction created. Pushing block"); - push_block_with_transactions(&client, &[public_tx1]); + // Deploy contract A + let (address_a, _) = contract_address( + CreateContractAddress::FromSenderAndNonce, + &key1.address(), + &0.into(), + &[], + ); + trace!("Creating private contract A"); + let private_contract_a_test = "6060604052341561000f57600080fd5b60d88061001d6000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680630c55699c146046578063bc64b76d14607457600080fd5b3415605057600080fd5b60566098565b60405180826000191660001916815260200191505060405180910390f35b3415607e57600080fd5b6096600480803560001916906020019091905050609e565b005b60005481565b8060008160001916905550505600a165627a7a723058206acbdf4b15ca4c2d43e1b1879b830451a34f1e9d02ff1f2f394d8d857e79d2080029".from_hex().unwrap(); + let mut private_create_tx1 = Transaction::default(); + private_create_tx1.action = Action::Create; + private_create_tx1.data = private_contract_a_test; + private_create_tx1.gas = 200000.into(); + private_create_tx1.nonce = 0.into(); + let private_create_tx_signed = private_create_tx1.sign(&key1.secret(), None); + let validators = vec![key3.address(), key4.address()]; + let (public_tx1, _) = pm + .public_creation_transaction( + BlockId::Latest, + &private_create_tx_signed, + &validators, + 0.into(), + ) + .unwrap(); + let public_tx1 = public_tx1.sign(&key1.secret(), chain_id); + trace!("Transaction created. Pushing block"); + push_block_with_transactions(&client, &[public_tx1]); - // Deploy contract B - let (address_b, _) = contract_address(CreateContractAddress::FromSenderAndNonce, &key1.address(), &1.into(), &[]); - trace!("Creating private contract B"); - // Build constructor data - let mut deploy_data = "6060604052341561000f57600080fd5b6040516020806101c583398101604052808051906020019091905050806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505061014a8061007b6000396000f300606060405260043610610041576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680635197c7aa14610046575b600080fd5b341561005157600080fd5b61005961006f565b6040518082815260200191505060405180910390f35b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16630c55699c6000604051602001526040518163ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401602060405180830381600087803b15156100fe57600080fd5b6102c65a03f1151561010f57600080fd5b505050604051805190509050905600a165627a7a723058207f8994e02725b47d76ec73e5c54a338d27b306dd1c830276bff2d75fcd1a5c920029000000000000000000000000".to_string(); - deploy_data.push_str(&address_a.to_vec().to_hex()); - let private_contract_b_test = deploy_data.from_hex().unwrap(); - let mut private_create_tx2 = Transaction::default(); - private_create_tx2.action = Action::Create; - private_create_tx2.data = private_contract_b_test; - private_create_tx2.gas = 200000.into(); - private_create_tx2.nonce = 1.into(); - let private_create_tx_signed = private_create_tx2.sign(&key1.secret(), None); - let (public_tx2, _) = pm.public_creation_transaction(BlockId::Latest, &private_create_tx_signed, &validators, 0.into()).unwrap(); - let public_tx2 = public_tx2.sign(&key1.secret(), chain_id); - trace!("Transaction created. Pushing block"); - push_block_with_transactions(&client, &[public_tx2]); + // Deploy contract B + let (address_b, _) = contract_address( + CreateContractAddress::FromSenderAndNonce, + &key1.address(), + &1.into(), + &[], + ); + trace!("Creating private contract B"); + // Build constructor data + let mut deploy_data = "6060604052341561000f57600080fd5b6040516020806101c583398101604052808051906020019091905050806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505061014a8061007b6000396000f300606060405260043610610041576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680635197c7aa14610046575b600080fd5b341561005157600080fd5b61005961006f565b6040518082815260200191505060405180910390f35b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16630c55699c6000604051602001526040518163ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401602060405180830381600087803b15156100fe57600080fd5b6102c65a03f1151561010f57600080fd5b505050604051805190509050905600a165627a7a723058207f8994e02725b47d76ec73e5c54a338d27b306dd1c830276bff2d75fcd1a5c920029000000000000000000000000".to_string(); + deploy_data.push_str(&address_a.to_vec().to_hex()); + let private_contract_b_test = deploy_data.from_hex().unwrap(); + let mut private_create_tx2 = Transaction::default(); + private_create_tx2.action = Action::Create; + private_create_tx2.data = private_contract_b_test; + private_create_tx2.gas = 200000.into(); + private_create_tx2.nonce = 1.into(); + let private_create_tx_signed = private_create_tx2.sign(&key1.secret(), None); + let (public_tx2, _) = pm + .public_creation_transaction( + BlockId::Latest, + &private_create_tx_signed, + &validators, + 0.into(), + ) + .unwrap(); + let public_tx2 = public_tx2.sign(&key1.secret(), chain_id); + trace!("Transaction created. Pushing block"); + push_block_with_transactions(&client, &[public_tx2]); - // Let provider know, that it has access to both keys for A and B - private_keys.set_available_keys(&vec![address_a, address_b]); + // Let provider know, that it has access to both keys for A and B + private_keys.set_available_keys(&vec![address_a, address_b]); - // Call A.setx(42) - trace!("Modifying private state"); - let mut private_tx = Transaction::default(); - private_tx.action = Action::Call(address_a.clone()); - private_tx.data = "bc64b76d2a00000000000000000000000000000000000000000000000000000000000000".from_hex().unwrap(); //setX(42) - private_tx.gas = 120000.into(); - private_tx.nonce = 2.into(); - let private_tx = private_tx.sign(&key1.secret(), None); - let private_contract_nonce = pm.get_contract_nonce(&address_b, BlockId::Latest).unwrap(); - let private_state = pm.execute_private_transaction(BlockId::Latest, &private_tx).unwrap(); - let nonced_state_hash = pm.calculate_state_hash(&private_state, private_contract_nonce); - let signatures: Vec<_> = [&key3, &key4].iter().map(|k| - Signature::from(::ethkey::sign(&k.secret(), &nonced_state_hash).unwrap().into_electrum())).collect(); - let public_tx = pm.public_transaction(private_state, &private_tx, &signatures, 2.into(), 0.into()).unwrap(); - let public_tx = public_tx.sign(&key1.secret(), chain_id); - push_block_with_transactions(&client, &[public_tx]); + // Call A.setx(42) + trace!("Modifying private state"); + let mut private_tx = Transaction::default(); + private_tx.action = Action::Call(address_a.clone()); + private_tx.data = "bc64b76d2a00000000000000000000000000000000000000000000000000000000000000" + .from_hex() + .unwrap(); //setX(42) + private_tx.gas = 120000.into(); + private_tx.nonce = 2.into(); + let private_tx = private_tx.sign(&key1.secret(), None); + let private_contract_nonce = pm.get_contract_nonce(&address_b, BlockId::Latest).unwrap(); + let private_state = pm + .execute_private_transaction(BlockId::Latest, &private_tx) + .unwrap(); + let nonced_state_hash = pm.calculate_state_hash(&private_state, private_contract_nonce); + let signatures: Vec<_> = [&key3, &key4] + .iter() + .map(|k| { + Signature::from( + ::ethkey::sign(&k.secret(), &nonced_state_hash) + .unwrap() + .into_electrum(), + ) + }) + .collect(); + let public_tx = pm + .public_transaction(private_state, &private_tx, &signatures, 2.into(), 0.into()) + .unwrap(); + let public_tx = public_tx.sign(&key1.secret(), chain_id); + push_block_with_transactions(&client, &[public_tx]); - // Call B.getX() - trace!("Querying private state"); - let mut query_tx = Transaction::default(); - query_tx.action = Action::Call(address_b.clone()); - query_tx.data = "5197c7aa".from_hex().unwrap(); // getX - query_tx.gas = 50000.into(); - query_tx.nonce = 3.into(); - let query_tx = query_tx.sign(&key1.secret(), chain_id); - let result = pm.private_call(BlockId::Latest, &query_tx).unwrap(); - assert_eq!(&result.output[..], &("2a00000000000000000000000000000000000000000000000000000000000000".from_hex().unwrap()[..])); + // Call B.getX() + trace!("Querying private state"); + let mut query_tx = Transaction::default(); + query_tx.action = Action::Call(address_b.clone()); + query_tx.data = "5197c7aa".from_hex().unwrap(); // getX + query_tx.gas = 50000.into(); + query_tx.nonce = 3.into(); + let query_tx = query_tx.sign(&key1.secret(), chain_id); + let result = pm.private_call(BlockId::Latest, &query_tx).unwrap(); + assert_eq!( + &result.output[..], + &("2a00000000000000000000000000000000000000000000000000000000000000" + .from_hex() + .unwrap()[..]) + ); } diff --git a/ethcore/service/src/error.rs b/ethcore/service/src/error.rs index c73cb0dfc..0d03a02d4 100644 --- a/ethcore/service/src/error.rs +++ b/ethcore/service/src/error.rs @@ -19,13 +19,13 @@ #![allow(deprecated)] use ethcore; -use io; use ethcore_private_tx; +use io; error_chain! { - foreign_links { - Ethcore(ethcore::error::Error); - IoError(io::IoError); - PrivateTransactions(ethcore_private_tx::Error); - } + foreign_links { + Ethcore(ethcore::error::Error); + IoError(io::IoError); + PrivateTransactions(ethcore_private_tx::Error); + } } diff --git a/ethcore/service/src/service.rs b/ethcore/service/src/service.rs index 095397037..69ec5b8d0 100644 --- a/ethcore/service/src/service.rs +++ b/ethcore/service/src/service.rs @@ -16,198 +16,205 @@ //! Creates and registers client and network services. -use std::sync::Arc; -use std::path::Path; -use std::time::Duration; +use std::{path::Path, sync::Arc, time::Duration}; use ansi_term::Colour; use ethereum_types::H256; -use io::{IoContext, TimerToken, IoHandler, IoService, IoError}; +use io::{IoContext, IoError, IoHandler, IoService, TimerToken}; use stop_guard::StopGuard; -use sync::PrivateTxHandler; use blockchain::{BlockChainDB, BlockChainDBHandler}; -use ethcore::client::{Client, ClientConfig, ChainNotify, ClientIoMessage}; -use ethcore::miner::Miner; -use ethcore::snapshot::service::{Service as SnapshotService, ServiceParams as SnapServiceParams}; -use ethcore::snapshot::{SnapshotService as _SnapshotService, RestorationStatus, Error as SnapshotError}; -use ethcore::spec::Spec; -use ethcore::error::{Error as EthcoreError, ErrorKind}; - +use ethcore::{ + client::{ChainNotify, Client, ClientConfig, ClientIoMessage}, + error::{Error as EthcoreError, ErrorKind}, + miner::Miner, + snapshot::{ + service::{Service as SnapshotService, ServiceParams as SnapServiceParams}, + Error as SnapshotError, RestorationStatus, SnapshotService as _SnapshotService, + }, + spec::Spec, +}; +use sync::PrivateTxHandler; use ethcore_private_tx::{self, Importer, Signer}; use Error; pub struct PrivateTxService { - provider: Arc, + provider: Arc, } impl PrivateTxService { - fn new(provider: Arc) -> Self { - PrivateTxService { - provider, - } - } + fn new(provider: Arc) -> Self { + PrivateTxService { provider } + } - /// Returns underlying provider. - pub fn provider(&self) -> Arc { - self.provider.clone() - } + /// Returns underlying provider. + pub fn provider(&self) -> Arc { + self.provider.clone() + } } impl PrivateTxHandler for PrivateTxService { - fn import_private_transaction(&self, rlp: &[u8]) -> Result { - match self.provider.import_private_transaction(rlp) { - Ok(import_result) => Ok(import_result), - Err(err) => { - warn!(target: "privatetx", "Unable to import private transaction packet: {}", err); - bail!(err.to_string()) - } - } - } + fn import_private_transaction(&self, rlp: &[u8]) -> Result { + match self.provider.import_private_transaction(rlp) { + Ok(import_result) => Ok(import_result), + Err(err) => { + warn!(target: "privatetx", "Unable to import private transaction packet: {}", err); + bail!(err.to_string()) + } + } + } - fn import_signed_private_transaction(&self, rlp: &[u8]) -> Result { - match self.provider.import_signed_private_transaction(rlp) { - Ok(import_result) => Ok(import_result), - Err(err) => { - warn!(target: "privatetx", "Unable to import signed private transaction packet: {}", err); - bail!(err.to_string()) - } - } - } + fn import_signed_private_transaction(&self, rlp: &[u8]) -> Result { + match self.provider.import_signed_private_transaction(rlp) { + Ok(import_result) => Ok(import_result), + Err(err) => { + warn!(target: "privatetx", "Unable to import signed private transaction packet: {}", err); + bail!(err.to_string()) + } + } + } } /// Client service setup. Creates and registers client and network services with the IO subsystem. pub struct ClientService { - io_service: Arc>, - client: Arc, - snapshot: Arc, - private_tx: Arc, - database: Arc, - _stop_guard: StopGuard, + io_service: Arc>, + client: Arc, + snapshot: Arc, + private_tx: Arc, + database: Arc, + _stop_guard: StopGuard, } impl ClientService { - /// Start the `ClientService`. - pub fn start( - config: ClientConfig, - spec: &Spec, - blockchain_db: Arc, - snapshot_path: &Path, - restoration_db_handler: Box, - _ipc_path: &Path, - miner: Arc, - signer: Arc, - encryptor: Box, - private_tx_conf: ethcore_private_tx::ProviderConfig, - private_encryptor_conf: ethcore_private_tx::EncryptorConfig, - ) -> Result - { - let io_service = IoService::::start()?; + /// Start the `ClientService`. + pub fn start( + config: ClientConfig, + spec: &Spec, + blockchain_db: Arc, + snapshot_path: &Path, + restoration_db_handler: Box, + _ipc_path: &Path, + miner: Arc, + signer: Arc, + encryptor: Box, + private_tx_conf: ethcore_private_tx::ProviderConfig, + private_encryptor_conf: ethcore_private_tx::EncryptorConfig, + ) -> Result { + let io_service = IoService::::start()?; - info!("Configured for {} using {} engine", Colour::White.bold().paint(spec.name.clone()), Colour::Yellow.bold().paint(spec.engine.name())); + info!( + "Configured for {} using {} engine", + Colour::White.bold().paint(spec.name.clone()), + Colour::Yellow.bold().paint(spec.engine.name()) + ); - let pruning = config.pruning; - let client = Client::new( - config, - &spec, - blockchain_db.clone(), - miner.clone(), - io_service.channel(), - )?; - miner.set_io_channel(io_service.channel()); - miner.set_in_chain_checker(&client.clone()); + let pruning = config.pruning; + let client = Client::new( + config, + &spec, + blockchain_db.clone(), + miner.clone(), + io_service.channel(), + )?; + miner.set_io_channel(io_service.channel()); + miner.set_in_chain_checker(&client.clone()); - let snapshot_params = SnapServiceParams { - engine: spec.engine.clone(), - genesis_block: spec.genesis_block(), - restoration_db_handler: restoration_db_handler, - pruning: pruning, - channel: io_service.channel(), - snapshot_root: snapshot_path.into(), - client: client.clone(), - }; - let snapshot = Arc::new(SnapshotService::new(snapshot_params)?); + let snapshot_params = SnapServiceParams { + engine: spec.engine.clone(), + genesis_block: spec.genesis_block(), + restoration_db_handler: restoration_db_handler, + pruning: pruning, + channel: io_service.channel(), + snapshot_root: snapshot_path.into(), + client: client.clone(), + }; + let snapshot = Arc::new(SnapshotService::new(snapshot_params)?); - let private_keys = Arc::new(ethcore_private_tx::SecretStoreKeys::new( - client.clone(), - private_encryptor_conf.key_server_account, - )); - let provider = Arc::new(ethcore_private_tx::Provider::new( - client.clone(), - miner, - signer, - encryptor, - private_tx_conf, - io_service.channel(), - private_keys, - )); - let private_tx = Arc::new(PrivateTxService::new(provider)); + let private_keys = Arc::new(ethcore_private_tx::SecretStoreKeys::new( + client.clone(), + private_encryptor_conf.key_server_account, + )); + let provider = Arc::new(ethcore_private_tx::Provider::new( + client.clone(), + miner, + signer, + encryptor, + private_tx_conf, + io_service.channel(), + private_keys, + )); + let private_tx = Arc::new(PrivateTxService::new(provider)); - let client_io = Arc::new(ClientIoHandler { - client: client.clone(), - snapshot: snapshot.clone(), - }); - io_service.register_handler(client_io)?; + let client_io = Arc::new(ClientIoHandler { + client: client.clone(), + snapshot: snapshot.clone(), + }); + io_service.register_handler(client_io)?; - spec.engine.register_client(Arc::downgrade(&client) as _); + spec.engine.register_client(Arc::downgrade(&client) as _); - let stop_guard = StopGuard::new(); + let stop_guard = StopGuard::new(); - Ok(ClientService { - io_service: Arc::new(io_service), - client: client, - snapshot: snapshot, - private_tx, - database: blockchain_db, - _stop_guard: stop_guard, - }) - } + Ok(ClientService { + io_service: Arc::new(io_service), + client: client, + snapshot: snapshot, + private_tx, + database: blockchain_db, + _stop_guard: stop_guard, + }) + } - /// Get general IO interface - pub fn register_io_handler(&self, handler: Arc + Send>) -> Result<(), IoError> { - self.io_service.register_handler(handler) - } + /// Get general IO interface + pub fn register_io_handler( + &self, + handler: Arc + Send>, + ) -> Result<(), IoError> { + self.io_service.register_handler(handler) + } - /// Get client interface - pub fn client(&self) -> Arc { - self.client.clone() - } + /// Get client interface + pub fn client(&self) -> Arc { + self.client.clone() + } - /// Get snapshot interface. - pub fn snapshot_service(&self) -> Arc { - self.snapshot.clone() - } + /// Get snapshot interface. + pub fn snapshot_service(&self) -> Arc { + self.snapshot.clone() + } - /// Get private transaction service. - pub fn private_tx_service(&self) -> Arc { - self.private_tx.clone() - } + /// Get private transaction service. + pub fn private_tx_service(&self) -> Arc { + self.private_tx.clone() + } - /// Get network service component - pub fn io(&self) -> Arc> { - self.io_service.clone() - } + /// Get network service component + pub fn io(&self) -> Arc> { + self.io_service.clone() + } - /// Set the actor to be notified on certain chain events - pub fn add_notify(&self, notify: Arc) { - self.client.add_notify(notify); - } + /// Set the actor to be notified on certain chain events + pub fn add_notify(&self, notify: Arc) { + self.client.add_notify(notify); + } - /// Get a handle to the database. - pub fn db(&self) -> Arc { self.database.clone() } + /// Get a handle to the database. + pub fn db(&self) -> Arc { + self.database.clone() + } - /// Shutdown the Client Service - pub fn shutdown(&self) { - trace!(target: "shutdown", "Shutting down Client Service"); - self.snapshot.shutdown(); - } + /// Shutdown the Client Service + pub fn shutdown(&self) { + trace!(target: "shutdown", "Shutting down Client Service"); + self.snapshot.shutdown(); + } } /// IO interface for the Client handler struct ClientIoHandler { - client: Arc, - snapshot: Arc, + client: Arc, + snapshot: Arc, } const CLIENT_TICK_TIMER: TimerToken = 0; @@ -217,118 +224,125 @@ const CLIENT_TICK: Duration = Duration::from_secs(5); const SNAPSHOT_TICK: Duration = Duration::from_secs(10); impl IoHandler for ClientIoHandler { - fn initialize(&self, io: &IoContext) { - io.register_timer(CLIENT_TICK_TIMER, CLIENT_TICK).expect("Error registering client timer"); - io.register_timer(SNAPSHOT_TICK_TIMER, SNAPSHOT_TICK).expect("Error registering snapshot timer"); - } + fn initialize(&self, io: &IoContext) { + io.register_timer(CLIENT_TICK_TIMER, CLIENT_TICK) + .expect("Error registering client timer"); + io.register_timer(SNAPSHOT_TICK_TIMER, SNAPSHOT_TICK) + .expect("Error registering snapshot timer"); + } - fn timeout(&self, _io: &IoContext, timer: TimerToken) { - trace_time!("service::read"); - match timer { - CLIENT_TICK_TIMER => { - use ethcore::snapshot::SnapshotService; - let snapshot_restoration = if let RestorationStatus::Ongoing{..} = self.snapshot.status() { true } else { false }; - self.client.tick(snapshot_restoration) - }, - SNAPSHOT_TICK_TIMER => self.snapshot.tick(), - _ => warn!("IO service triggered unregistered timer '{}'", timer), - } - } + fn timeout(&self, _io: &IoContext, timer: TimerToken) { + trace_time!("service::read"); + match timer { + CLIENT_TICK_TIMER => { + use ethcore::snapshot::SnapshotService; + let snapshot_restoration = + if let RestorationStatus::Ongoing { .. } = self.snapshot.status() { + true + } else { + false + }; + self.client.tick(snapshot_restoration) + } + SNAPSHOT_TICK_TIMER => self.snapshot.tick(), + _ => warn!("IO service triggered unregistered timer '{}'", timer), + } + } - fn message(&self, _io: &IoContext, net_message: &ClientIoMessage) { - trace_time!("service::message"); - use std::thread; + fn message(&self, _io: &IoContext, net_message: &ClientIoMessage) { + trace_time!("service::message"); + use std::thread; - match *net_message { - ClientIoMessage::BlockVerified => { - self.client.import_verified_blocks(); - } - ClientIoMessage::BeginRestoration(ref manifest) => { - if let Err(e) = self.snapshot.init_restore(manifest.clone(), true) { - warn!("Failed to initialize snapshot restoration: {}", e); - } - } - ClientIoMessage::FeedStateChunk(ref hash, ref chunk) => { - self.snapshot.feed_state_chunk(*hash, chunk) - } - ClientIoMessage::FeedBlockChunk(ref hash, ref chunk) => { - self.snapshot.feed_block_chunk(*hash, chunk) - } - ClientIoMessage::TakeSnapshot(num) => { - let client = self.client.clone(); - let snapshot = self.snapshot.clone(); + match *net_message { + ClientIoMessage::BlockVerified => { + self.client.import_verified_blocks(); + } + ClientIoMessage::BeginRestoration(ref manifest) => { + if let Err(e) = self.snapshot.init_restore(manifest.clone(), true) { + warn!("Failed to initialize snapshot restoration: {}", e); + } + } + ClientIoMessage::FeedStateChunk(ref hash, ref chunk) => { + self.snapshot.feed_state_chunk(*hash, chunk) + } + ClientIoMessage::FeedBlockChunk(ref hash, ref chunk) => { + self.snapshot.feed_block_chunk(*hash, chunk) + } + ClientIoMessage::TakeSnapshot(num) => { + let client = self.client.clone(); + let snapshot = self.snapshot.clone(); - let res = thread::Builder::new().name("Periodic Snapshot".into()).spawn(move || { - if let Err(e) = snapshot.take_snapshot(&*client, num) { - match e { - EthcoreError(ErrorKind::Snapshot(SnapshotError::SnapshotAborted), _) => info!("Snapshot aborted"), - _ => warn!("Failed to take snapshot at block #{}: {}", num, e), - } + let res = thread::Builder::new() + .name("Periodic Snapshot".into()) + .spawn(move || { + if let Err(e) = snapshot.take_snapshot(&*client, num) { + match e { + EthcoreError( + ErrorKind::Snapshot(SnapshotError::SnapshotAborted), + _, + ) => info!("Snapshot aborted"), + _ => warn!("Failed to take snapshot at block #{}: {}", num, e), + } + } + }); - } - }); - - if let Err(e) = res { - debug!(target: "snapshot", "Failed to initialize periodic snapshot thread: {:?}", e); - } - }, - ClientIoMessage::Execute(ref exec) => { - (*exec.0)(&self.client); - } - _ => {} // ignore other messages - } - } + if let Err(e) = res { + debug!(target: "snapshot", "Failed to initialize periodic snapshot thread: {:?}", e); + } + } + ClientIoMessage::Execute(ref exec) => { + (*exec.0)(&self.client); + } + _ => {} // ignore other messages + } + } } #[cfg(test)] mod tests { - use std::sync::Arc; - use std::{time, thread}; + use std::{sync::Arc, thread, time}; - use tempdir::TempDir; + use tempdir::TempDir; - use ethcore_db::NUM_COLUMNS; - use ethcore::client::ClientConfig; - use ethcore::miner::Miner; - use ethcore::spec::Spec; - use ethcore::test_helpers; - use kvdb_rocksdb::{DatabaseConfig, CompactionProfile}; - use super::*; + use super::*; + use ethcore::{client::ClientConfig, miner::Miner, spec::Spec, test_helpers}; + use ethcore_db::NUM_COLUMNS; + use kvdb_rocksdb::{CompactionProfile, DatabaseConfig}; - use ethcore_private_tx; + use ethcore_private_tx; - #[test] - fn it_can_be_started() { - let tempdir = TempDir::new("").unwrap(); - let client_path = tempdir.path().join("client"); - let snapshot_path = tempdir.path().join("snapshot"); + #[test] + fn it_can_be_started() { + let tempdir = TempDir::new("").unwrap(); + let client_path = tempdir.path().join("client"); + let snapshot_path = tempdir.path().join("snapshot"); - let client_config = ClientConfig::default(); - let mut client_db_config = DatabaseConfig::with_columns(NUM_COLUMNS); + let client_config = ClientConfig::default(); + let mut client_db_config = DatabaseConfig::with_columns(NUM_COLUMNS); - client_db_config.memory_budget = client_config.db_cache_size; - client_db_config.compaction = CompactionProfile::auto(&client_path); + client_db_config.memory_budget = client_config.db_cache_size; + client_db_config.compaction = CompactionProfile::auto(&client_path); - let client_db_handler = test_helpers::restoration_db_handler(client_db_config.clone()); - let client_db = client_db_handler.open(&client_path).unwrap(); - let restoration_db_handler = test_helpers::restoration_db_handler(client_db_config); + let client_db_handler = test_helpers::restoration_db_handler(client_db_config.clone()); + let client_db = client_db_handler.open(&client_path).unwrap(); + let restoration_db_handler = test_helpers::restoration_db_handler(client_db_config); - let spec = Spec::new_test(); - let service = ClientService::start( - ClientConfig::default(), - &spec, - client_db, - &snapshot_path, - restoration_db_handler, - tempdir.path(), - Arc::new(Miner::new_for_tests(&spec, None)), - Arc::new(ethcore_private_tx::DummySigner), - Box::new(ethcore_private_tx::NoopEncryptor), - Default::default(), - Default::default(), - ); - assert!(service.is_ok()); - drop(service.unwrap()); - thread::park_timeout(time::Duration::from_millis(100)); - } + let spec = Spec::new_test(); + let service = ClientService::start( + ClientConfig::default(), + &spec, + client_db, + &snapshot_path, + restoration_db_handler, + tempdir.path(), + Arc::new(Miner::new_for_tests(&spec, None)), + Arc::new(ethcore_private_tx::DummySigner), + Box::new(ethcore_private_tx::NoopEncryptor), + Default::default(), + Default::default(), + ); + assert!(service.is_ok()); + drop(service.unwrap()); + thread::park_timeout(time::Duration::from_millis(100)); + } } diff --git a/ethcore/service/src/stop_guard.rs b/ethcore/service/src/stop_guard.rs index 168219520..3284bf83f 100644 --- a/ethcore/service/src/stop_guard.rs +++ b/ethcore/service/src/stop_guard.rs @@ -16,25 +16,24 @@ //! Stop guard mod -use std::sync::Arc; -use std::sync::atomic::*; +use std::sync::{atomic::*, Arc}; /// Stop guard that will set a stop flag on drop pub struct StopGuard { - flag: Arc, + flag: Arc, } impl StopGuard { - /// Create a stop guard - pub fn new() -> StopGuard { - StopGuard { - flag: Arc::new(AtomicBool::new(false)) - } - } + /// Create a stop guard + pub fn new() -> StopGuard { + StopGuard { + flag: Arc::new(AtomicBool::new(false)), + } + } } impl Drop for StopGuard { - fn drop(&mut self) { - self.flag.store(true, Ordering::Relaxed) - } + fn drop(&mut self) { + self.flag.store(true, Ordering::Relaxed) + } } diff --git a/ethcore/src/account_db.rs b/ethcore/src/account_db.rs index a389c009b..ee90086fa 100644 --- a/ethcore/src/account_db.rs +++ b/ethcore/src/account_db.rs @@ -16,8 +16,8 @@ //! DB backend wrapper for Account trie use ethereum_types::H256; -use hash::{KECCAK_NULL_RLP, keccak}; -use hash_db::{HashDB, AsHashDB}; +use hash::{keccak, KECCAK_NULL_RLP}; +use hash_db::{AsHashDB, HashDB}; use keccak_hasher::KeccakHasher; use kvdb::DBValue; use rlp::NULL_RLP; @@ -29,255 +29,284 @@ use ethereum_types::Address; // leaves the first 96 bits untouched in order to support partial key lookup. #[inline] fn combine_key<'a>(address_hash: &'a H256, key: &'a H256) -> H256 { - let mut dst = key.clone(); - { - let last_src: &[u8] = &*address_hash; - let last_dst: &mut [u8] = &mut *dst; - for (k, a) in last_dst[12..].iter_mut().zip(&last_src[12..]) { - *k ^= *a - } - } + let mut dst = key.clone(); + { + let last_src: &[u8] = &*address_hash; + let last_dst: &mut [u8] = &mut *dst; + for (k, a) in last_dst[12..].iter_mut().zip(&last_src[12..]) { + *k ^= *a + } + } - dst + dst } /// A factory for different kinds of account dbs. #[derive(Debug, Clone)] pub enum Factory { - /// Mangle hashes based on address. This is the default. - Mangled, - /// Don't mangle hashes. - Plain, + /// Mangle hashes based on address. This is the default. + Mangled, + /// Don't mangle hashes. + Plain, } impl Default for Factory { - fn default() -> Self { Factory::Mangled } + fn default() -> Self { + Factory::Mangled + } } impl Factory { - /// Create a read-only accountdb. - /// This will panic when write operations are called. - pub fn readonly<'db>(&self, db: &'db HashDB, address_hash: H256) -> Box + 'db> { - match *self { - Factory::Mangled => Box::new(AccountDB::from_hash(db, address_hash)), - Factory::Plain => Box::new(Wrapping(db)), - } - } + /// Create a read-only accountdb. + /// This will panic when write operations are called. + pub fn readonly<'db>( + &self, + db: &'db HashDB, + address_hash: H256, + ) -> Box + 'db> { + match *self { + Factory::Mangled => Box::new(AccountDB::from_hash(db, address_hash)), + Factory::Plain => Box::new(Wrapping(db)), + } + } - /// Create a new mutable hashdb. - pub fn create<'db>(&self, db: &'db mut HashDB, address_hash: H256) -> Box + 'db> { - match *self { - Factory::Mangled => Box::new(AccountDBMut::from_hash(db, address_hash)), - Factory::Plain => Box::new(WrappingMut(db)), - } - } + /// Create a new mutable hashdb. + pub fn create<'db>( + &self, + db: &'db mut HashDB, + address_hash: H256, + ) -> Box + 'db> { + match *self { + Factory::Mangled => Box::new(AccountDBMut::from_hash(db, address_hash)), + Factory::Plain => Box::new(WrappingMut(db)), + } + } } // TODO: introduce HashDBMut? /// DB backend wrapper for Account trie /// Transforms trie node keys for the database pub struct AccountDB<'db> { - db: &'db HashDB, - address_hash: H256, + db: &'db HashDB, + address_hash: H256, } impl<'db> AccountDB<'db> { - /// Create a new AccountDB from an address. - #[cfg(test)] - pub fn new(db: &'db HashDB, address: &Address) -> Self { - Self::from_hash(db, keccak(address)) - } + /// Create a new AccountDB from an address. + #[cfg(test)] + pub fn new(db: &'db HashDB, address: &Address) -> Self { + Self::from_hash(db, keccak(address)) + } - /// Create a new AcountDB from an address' hash. - pub fn from_hash(db: &'db HashDB, address_hash: H256) -> Self { - AccountDB { - db: db, - address_hash: address_hash, - } - } + /// Create a new AcountDB from an address' hash. + pub fn from_hash(db: &'db HashDB, address_hash: H256) -> Self { + AccountDB { + db: db, + address_hash: address_hash, + } + } } impl<'db> AsHashDB for AccountDB<'db> { - fn as_hash_db(&self) -> &HashDB { self } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } + fn as_hash_db(&self) -> &HashDB { + self + } + fn as_hash_db_mut(&mut self) -> &mut HashDB { + self + } } impl<'db> HashDB for AccountDB<'db> { - fn get(&self, key: &H256) -> Option { - if key == &KECCAK_NULL_RLP { - return Some(DBValue::from_slice(&NULL_RLP)); - } - self.db.get(&combine_key(&self.address_hash, key)) - } + fn get(&self, key: &H256) -> Option { + if key == &KECCAK_NULL_RLP { + return Some(DBValue::from_slice(&NULL_RLP)); + } + self.db.get(&combine_key(&self.address_hash, key)) + } - fn contains(&self, key: &H256) -> bool { - if key == &KECCAK_NULL_RLP { - return true; - } - self.db.contains(&combine_key(&self.address_hash, key)) - } + fn contains(&self, key: &H256) -> bool { + if key == &KECCAK_NULL_RLP { + return true; + } + self.db.contains(&combine_key(&self.address_hash, key)) + } - fn insert(&mut self, _value: &[u8]) -> H256 { - unimplemented!() - } + fn insert(&mut self, _value: &[u8]) -> H256 { + unimplemented!() + } - fn emplace(&mut self, _key: H256, _value: DBValue) { - unimplemented!() - } + fn emplace(&mut self, _key: H256, _value: DBValue) { + unimplemented!() + } - fn remove(&mut self, _key: &H256) { - unimplemented!() - } + fn remove(&mut self, _key: &H256) { + unimplemented!() + } } /// DB backend wrapper for Account trie pub struct AccountDBMut<'db> { - db: &'db mut HashDB, - address_hash: H256, + db: &'db mut HashDB, + address_hash: H256, } impl<'db> AccountDBMut<'db> { - /// Create a new AccountDB from an address. - #[cfg(test)] - pub fn new(db: &'db mut HashDB, address: &Address) -> Self { - Self::from_hash(db, keccak(address)) - } + /// Create a new AccountDB from an address. + #[cfg(test)] + pub fn new(db: &'db mut HashDB, address: &Address) -> Self { + Self::from_hash(db, keccak(address)) + } - /// Create a new AcountDB from an address' hash. - pub fn from_hash(db: &'db mut HashDB, address_hash: H256) -> Self { - AccountDBMut { - db: db, - address_hash: address_hash, - } - } + /// Create a new AcountDB from an address' hash. + pub fn from_hash(db: &'db mut HashDB, address_hash: H256) -> Self { + AccountDBMut { + db: db, + address_hash: address_hash, + } + } - #[cfg(test)] - pub fn immutable(&'db self) -> AccountDB<'db> { - AccountDB { db: self.db, address_hash: self.address_hash.clone() } - } + #[cfg(test)] + pub fn immutable(&'db self) -> AccountDB<'db> { + AccountDB { + db: self.db, + address_hash: self.address_hash.clone(), + } + } } -impl<'db> HashDB for AccountDBMut<'db>{ - fn get(&self, key: &H256) -> Option { - if key == &KECCAK_NULL_RLP { - return Some(DBValue::from_slice(&NULL_RLP)); - } - self.db.get(&combine_key(&self.address_hash, key)) - } +impl<'db> HashDB for AccountDBMut<'db> { + fn get(&self, key: &H256) -> Option { + if key == &KECCAK_NULL_RLP { + return Some(DBValue::from_slice(&NULL_RLP)); + } + self.db.get(&combine_key(&self.address_hash, key)) + } - fn contains(&self, key: &H256) -> bool { - if key == &KECCAK_NULL_RLP { - return true; - } - self.db.contains(&combine_key(&self.address_hash, key)) - } + fn contains(&self, key: &H256) -> bool { + if key == &KECCAK_NULL_RLP { + return true; + } + self.db.contains(&combine_key(&self.address_hash, key)) + } - fn insert(&mut self, value: &[u8]) -> H256 { - if value == &NULL_RLP { - return KECCAK_NULL_RLP.clone(); - } - let k = keccak(value); - let ak = combine_key(&self.address_hash, &k); - self.db.emplace(ak, DBValue::from_slice(value)); - k - } + fn insert(&mut self, value: &[u8]) -> H256 { + if value == &NULL_RLP { + return KECCAK_NULL_RLP.clone(); + } + let k = keccak(value); + let ak = combine_key(&self.address_hash, &k); + self.db.emplace(ak, DBValue::from_slice(value)); + k + } - fn emplace(&mut self, key: H256, value: DBValue) { - if key == KECCAK_NULL_RLP { - return; - } - let key = combine_key(&self.address_hash, &key); - self.db.emplace(key, value) - } + fn emplace(&mut self, key: H256, value: DBValue) { + if key == KECCAK_NULL_RLP { + return; + } + let key = combine_key(&self.address_hash, &key); + self.db.emplace(key, value) + } - fn remove(&mut self, key: &H256) { - if key == &KECCAK_NULL_RLP { - return; - } - let key = combine_key(&self.address_hash, key); - self.db.remove(&key) - } + fn remove(&mut self, key: &H256) { + if key == &KECCAK_NULL_RLP { + return; + } + let key = combine_key(&self.address_hash, key); + self.db.remove(&key) + } } impl<'db> AsHashDB for AccountDBMut<'db> { - fn as_hash_db(&self) -> &HashDB { self } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } + fn as_hash_db(&self) -> &HashDB { + self + } + fn as_hash_db_mut(&mut self) -> &mut HashDB { + self + } } struct Wrapping<'db>(&'db HashDB); impl<'db> AsHashDB for Wrapping<'db> { - fn as_hash_db(&self) -> &HashDB { self } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } + fn as_hash_db(&self) -> &HashDB { + self + } + fn as_hash_db_mut(&mut self) -> &mut HashDB { + self + } } impl<'db> HashDB for Wrapping<'db> { - fn get(&self, key: &H256) -> Option { - if key == &KECCAK_NULL_RLP { - return Some(DBValue::from_slice(&NULL_RLP)); - } - self.0.get(key) - } + fn get(&self, key: &H256) -> Option { + if key == &KECCAK_NULL_RLP { + return Some(DBValue::from_slice(&NULL_RLP)); + } + self.0.get(key) + } - fn contains(&self, key: &H256) -> bool { - if key == &KECCAK_NULL_RLP { - return true; - } - self.0.contains(key) - } + fn contains(&self, key: &H256) -> bool { + if key == &KECCAK_NULL_RLP { + return true; + } + self.0.contains(key) + } - fn insert(&mut self, _value: &[u8]) -> H256 { - unimplemented!() - } + fn insert(&mut self, _value: &[u8]) -> H256 { + unimplemented!() + } - fn emplace(&mut self, _key: H256, _value: DBValue) { - unimplemented!() - } + fn emplace(&mut self, _key: H256, _value: DBValue) { + unimplemented!() + } - fn remove(&mut self, _key: &H256) { - unimplemented!() - } + fn remove(&mut self, _key: &H256) { + unimplemented!() + } } struct WrappingMut<'db>(&'db mut HashDB); impl<'db> AsHashDB for WrappingMut<'db> { - fn as_hash_db(&self) -> &HashDB { self } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } + fn as_hash_db(&self) -> &HashDB { + self + } + fn as_hash_db_mut(&mut self) -> &mut HashDB { + self + } } -impl<'db> HashDB for WrappingMut<'db>{ - fn get(&self, key: &H256) -> Option { - if key == &KECCAK_NULL_RLP { - return Some(DBValue::from_slice(&NULL_RLP)); - } - self.0.get(key) - } +impl<'db> HashDB for WrappingMut<'db> { + fn get(&self, key: &H256) -> Option { + if key == &KECCAK_NULL_RLP { + return Some(DBValue::from_slice(&NULL_RLP)); + } + self.0.get(key) + } - fn contains(&self, key: &H256) -> bool { - if key == &KECCAK_NULL_RLP { - return true; - } - self.0.contains(key) - } + fn contains(&self, key: &H256) -> bool { + if key == &KECCAK_NULL_RLP { + return true; + } + self.0.contains(key) + } - fn insert(&mut self, value: &[u8]) -> H256 { - if value == &NULL_RLP { - return KECCAK_NULL_RLP.clone(); - } - self.0.insert(value) - } + fn insert(&mut self, value: &[u8]) -> H256 { + if value == &NULL_RLP { + return KECCAK_NULL_RLP.clone(); + } + self.0.insert(value) + } - fn emplace(&mut self, key: H256, value: DBValue) { - if key == KECCAK_NULL_RLP { - return; - } - self.0.emplace(key, value) - } + fn emplace(&mut self, key: H256, value: DBValue) { + if key == KECCAK_NULL_RLP { + return; + } + self.0.emplace(key, value) + } - fn remove(&mut self, key: &H256) { - if key == &KECCAK_NULL_RLP { - return; - } - self.0.remove(key) - } + fn remove(&mut self, key: &H256) { + if key == &KECCAK_NULL_RLP { + return; + } + self.0.remove(key) + } } diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index 56cfc1c4c..6761d0448 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -31,18 +31,16 @@ //! `ExecutedBlock` is an underlaying data structure used by all structs above to store block //! related info. -use std::{cmp, ops}; -use std::collections::HashSet; -use std::sync::Arc; +use std::{cmp, collections::HashSet, ops, sync::Arc}; use bytes::Bytes; -use ethereum_types::{H256, U256, Address, Bloom}; +use ethereum_types::{Address, Bloom, H256, U256}; use engines::EthEngine; -use error::{Error, BlockError}; +use error::{BlockError, Error}; use factory::Factories; -use state_db::StateDB; use state::State; +use state_db::StateDB; use trace::Tracing; use triehash::ordered_trie_root; use unexpected::{Mismatch, OutOfBounds}; @@ -50,18 +48,20 @@ use verification::PreverifiedBlock; use vm::{EnvInfo, LastHashes}; use hash::keccak; -use rlp::{RlpStream, Encodable, encode_list}; -use types::transaction::{SignedTransaction, Error as TransactionError}; -use types::header::{Header, ExtendedHeader}; -use types::receipt::{Receipt, TransactionOutcome}; +use rlp::{encode_list, Encodable, RlpStream}; +use types::{ + header::{ExtendedHeader, Header}, + receipt::{Receipt, TransactionOutcome}, + transaction::{Error as TransactionError, SignedTransaction}, +}; /// Block that is ready for transactions to be added. /// /// It's a bit like a Vec, except that whenever a transaction is pushed, we execute it and /// maintain the system `state()`. We also archive execution receipts in preparation for later block creation. pub struct OpenBlock<'x> { - block: ExecutedBlock, - engine: &'x EthEngine, + block: ExecutedBlock, + engine: &'x EthEngine, } /// Just like `OpenBlock`, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields, @@ -70,8 +70,8 @@ pub struct OpenBlock<'x> { /// There is no function available to push a transaction. #[derive(Clone)] pub struct ClosedBlock { - block: ExecutedBlock, - unclosed_state: State, + block: ExecutedBlock, + unclosed_state: State, } /// Just like `ClosedBlock` except that we can't reopen it and it's faster. @@ -79,629 +79,771 @@ pub struct ClosedBlock { /// We actually store the post-`Engine::on_close_block` state, unlike in `ClosedBlock` where it's the pre. #[derive(Clone)] pub struct LockedBlock { - block: ExecutedBlock, + block: ExecutedBlock, } /// A block that has a valid seal. /// /// The block's header has valid seal arguments. The block cannot be reversed into a `ClosedBlock` or `OpenBlock`. pub struct SealedBlock { - block: ExecutedBlock, + block: ExecutedBlock, } /// An internal type for a block's common elements. #[derive(Clone)] pub struct ExecutedBlock { - /// Executed block header. - pub header: Header, - /// Executed transactions. - pub transactions: Vec, - /// Uncles. - pub uncles: Vec
, - /// Transaction receipts. - pub receipts: Vec, - /// Hashes of already executed transactions. - pub transactions_set: HashSet, - /// Underlaying state. - pub state: State, - /// Transaction traces. - pub traces: Tracing, - /// Hashes of last 256 blocks. - pub last_hashes: Arc, + /// Executed block header. + pub header: Header, + /// Executed transactions. + pub transactions: Vec, + /// Uncles. + pub uncles: Vec
, + /// Transaction receipts. + pub receipts: Vec, + /// Hashes of already executed transactions. + pub transactions_set: HashSet, + /// Underlaying state. + pub state: State, + /// Transaction traces. + pub traces: Tracing, + /// Hashes of last 256 blocks. + pub last_hashes: Arc, } impl ExecutedBlock { - /// Create a new block from the given `state`. - fn new(state: State, last_hashes: Arc, tracing: bool) -> ExecutedBlock { - ExecutedBlock { - header: Default::default(), - transactions: Default::default(), - uncles: Default::default(), - receipts: Default::default(), - transactions_set: Default::default(), - state: state, - traces: if tracing { - Tracing::enabled() - } else { - Tracing::Disabled - }, - last_hashes: last_hashes, - } - } + /// Create a new block from the given `state`. + fn new(state: State, last_hashes: Arc, tracing: bool) -> ExecutedBlock { + ExecutedBlock { + header: Default::default(), + transactions: Default::default(), + uncles: Default::default(), + receipts: Default::default(), + transactions_set: Default::default(), + state: state, + traces: if tracing { + Tracing::enabled() + } else { + Tracing::Disabled + }, + last_hashes: last_hashes, + } + } - /// Get the environment info concerning this block. - pub fn env_info(&self) -> EnvInfo { - // TODO: memoise. - EnvInfo { - number: self.header.number(), - author: self.header.author().clone(), - timestamp: self.header.timestamp(), - difficulty: self.header.difficulty().clone(), - last_hashes: self.last_hashes.clone(), - gas_used: self.receipts.last().map_or(U256::zero(), |r| r.gas_used), - gas_limit: self.header.gas_limit().clone(), - } - } + /// Get the environment info concerning this block. + pub fn env_info(&self) -> EnvInfo { + // TODO: memoise. + EnvInfo { + number: self.header.number(), + author: self.header.author().clone(), + timestamp: self.header.timestamp(), + difficulty: self.header.difficulty().clone(), + last_hashes: self.last_hashes.clone(), + gas_used: self.receipts.last().map_or(U256::zero(), |r| r.gas_used), + gas_limit: self.header.gas_limit().clone(), + } + } - /// Get mutable access to a state. - pub fn state_mut(&mut self) -> &mut State { - &mut self.state - } + /// Get mutable access to a state. + pub fn state_mut(&mut self) -> &mut State { + &mut self.state + } - /// Get mutable reference to traces. - pub fn traces_mut(&mut self) -> &mut Tracing { - &mut self.traces - } + /// Get mutable reference to traces. + pub fn traces_mut(&mut self) -> &mut Tracing { + &mut self.traces + } } /// Trait for an object that owns an `ExecutedBlock` pub trait Drain { - /// Returns `ExecutedBlock` - fn drain(self) -> ExecutedBlock; + /// Returns `ExecutedBlock` + fn drain(self) -> ExecutedBlock; } impl<'x> OpenBlock<'x> { - /// Create a new `OpenBlock` ready for transaction pushing. - pub fn new<'a, I: IntoIterator>( - engine: &'x EthEngine, - factories: Factories, - tracing: bool, - db: StateDB, - parent: &Header, - last_hashes: Arc, - author: Address, - gas_range_target: (U256, U256), - extra_data: Bytes, - is_epoch_begin: bool, - ancestry: I, - ) -> Result { - let number = parent.number() + 1; - let state = State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce(number), factories)?; - let mut r = OpenBlock { - block: ExecutedBlock::new(state, last_hashes, tracing), - engine: engine, - }; + /// Create a new `OpenBlock` ready for transaction pushing. + pub fn new<'a, I: IntoIterator>( + engine: &'x EthEngine, + factories: Factories, + tracing: bool, + db: StateDB, + parent: &Header, + last_hashes: Arc, + author: Address, + gas_range_target: (U256, U256), + extra_data: Bytes, + is_epoch_begin: bool, + ancestry: I, + ) -> Result { + let number = parent.number() + 1; + let state = State::from_existing( + db, + parent.state_root().clone(), + engine.account_start_nonce(number), + factories, + )?; + let mut r = OpenBlock { + block: ExecutedBlock::new(state, last_hashes, tracing), + engine: engine, + }; - r.block.header.set_parent_hash(parent.hash()); - r.block.header.set_number(number); - r.block.header.set_author(author); - r.block.header.set_timestamp(engine.open_block_header_timestamp(parent.timestamp())); - r.block.header.set_extra_data(extra_data); + r.block.header.set_parent_hash(parent.hash()); + r.block.header.set_number(number); + r.block.header.set_author(author); + r.block + .header + .set_timestamp(engine.open_block_header_timestamp(parent.timestamp())); + r.block.header.set_extra_data(extra_data); - let gas_floor_target = cmp::max(gas_range_target.0, engine.params().min_gas_limit); - let gas_ceil_target = cmp::max(gas_range_target.1, gas_floor_target); + let gas_floor_target = cmp::max(gas_range_target.0, engine.params().min_gas_limit); + let gas_ceil_target = cmp::max(gas_range_target.1, gas_floor_target); - engine.machine().populate_from_parent(&mut r.block.header, parent, gas_floor_target, gas_ceil_target); - engine.populate_from_parent(&mut r.block.header, parent); + engine.machine().populate_from_parent( + &mut r.block.header, + parent, + gas_floor_target, + gas_ceil_target, + ); + engine.populate_from_parent(&mut r.block.header, parent); - engine.machine().on_new_block(&mut r.block)?; - engine.on_new_block(&mut r.block, is_epoch_begin, &mut ancestry.into_iter())?; + engine.machine().on_new_block(&mut r.block)?; + engine.on_new_block(&mut r.block, is_epoch_begin, &mut ancestry.into_iter())?; - Ok(r) - } + Ok(r) + } - /// Alter the timestamp of the block. - pub fn set_timestamp(&mut self, timestamp: u64) { - self.block.header.set_timestamp(timestamp); - } + /// Alter the timestamp of the block. + pub fn set_timestamp(&mut self, timestamp: u64) { + self.block.header.set_timestamp(timestamp); + } - /// Removes block gas limit. - pub fn remove_gas_limit(&mut self) { - self.block.header.set_gas_limit(U256::max_value()); - } + /// Removes block gas limit. + pub fn remove_gas_limit(&mut self) { + self.block.header.set_gas_limit(U256::max_value()); + } - /// Add an uncle to the block, if possible. - /// - /// NOTE Will check chain constraints and the uncle number but will NOT check - /// that the header itself is actually valid. - pub fn push_uncle(&mut self, valid_uncle_header: Header) -> Result<(), BlockError> { - let max_uncles = self.engine.maximum_uncle_count(self.block.header.number()); - if self.block.uncles.len() + 1 > max_uncles { - return Err(BlockError::TooManyUncles(OutOfBounds{ - min: None, - max: Some(max_uncles), - found: self.block.uncles.len() + 1, - })); - } - // TODO: check number - // TODO: check not a direct ancestor (use last_hashes for that) - self.block.uncles.push(valid_uncle_header); - Ok(()) - } + /// Add an uncle to the block, if possible. + /// + /// NOTE Will check chain constraints and the uncle number but will NOT check + /// that the header itself is actually valid. + pub fn push_uncle(&mut self, valid_uncle_header: Header) -> Result<(), BlockError> { + let max_uncles = self.engine.maximum_uncle_count(self.block.header.number()); + if self.block.uncles.len() + 1 > max_uncles { + return Err(BlockError::TooManyUncles(OutOfBounds { + min: None, + max: Some(max_uncles), + found: self.block.uncles.len() + 1, + })); + } + // TODO: check number + // TODO: check not a direct ancestor (use last_hashes for that) + self.block.uncles.push(valid_uncle_header); + Ok(()) + } - /// Push a transaction into the block. - /// - /// If valid, it will be executed, and archived together with the receipt. - pub fn push_transaction(&mut self, t: SignedTransaction, h: Option) -> Result<&Receipt, Error> { - if self.block.transactions_set.contains(&t.hash()) { - return Err(TransactionError::AlreadyImported.into()); - } + /// Push a transaction into the block. + /// + /// If valid, it will be executed, and archived together with the receipt. + pub fn push_transaction( + &mut self, + t: SignedTransaction, + h: Option, + ) -> Result<&Receipt, Error> { + if self.block.transactions_set.contains(&t.hash()) { + return Err(TransactionError::AlreadyImported.into()); + } - let env_info = self.block.env_info(); - let outcome = self.block.state.apply(&env_info, self.engine.machine(), &t, self.block.traces.is_enabled())?; + let env_info = self.block.env_info(); + let outcome = self.block.state.apply( + &env_info, + self.engine.machine(), + &t, + self.block.traces.is_enabled(), + )?; - self.block.transactions_set.insert(h.unwrap_or_else(||t.hash())); - self.block.transactions.push(t.into()); - if let Tracing::Enabled(ref mut traces) = self.block.traces { - traces.push(outcome.trace.into()); - } - self.block.receipts.push(outcome.receipt); - Ok(self.block.receipts.last().expect("receipt just pushed; qed")) - } + self.block + .transactions_set + .insert(h.unwrap_or_else(|| t.hash())); + self.block.transactions.push(t.into()); + if let Tracing::Enabled(ref mut traces) = self.block.traces { + traces.push(outcome.trace.into()); + } + self.block.receipts.push(outcome.receipt); + Ok(self + .block + .receipts + .last() + .expect("receipt just pushed; qed")) + } - /// Push transactions onto the block. - #[cfg(not(feature = "slow-blocks"))] - fn push_transactions(&mut self, transactions: Vec) -> Result<(), Error> { - for t in transactions { - self.push_transaction(t, None)?; - } - Ok(()) - } + /// Push transactions onto the block. + #[cfg(not(feature = "slow-blocks"))] + fn push_transactions(&mut self, transactions: Vec) -> Result<(), Error> { + for t in transactions { + self.push_transaction(t, None)?; + } + Ok(()) + } - /// Push transactions onto the block. - #[cfg(feature = "slow-blocks")] - fn push_transactions(&mut self, transactions: Vec) -> Result<(), Error> { - use std::time; + /// Push transactions onto the block. + #[cfg(feature = "slow-blocks")] + fn push_transactions(&mut self, transactions: Vec) -> Result<(), Error> { + use std::time; - let slow_tx = option_env!("SLOW_TX_DURATION").and_then(|v| v.parse().ok()).unwrap_or(100); - for t in transactions { - let hash = t.hash(); - let start = time::Instant::now(); - self.push_transaction(t, None)?; - let took = start.elapsed(); - let took_ms = took.as_secs() * 1000 + took.subsec_nanos() as u64 / 1000000; - if took > time::Duration::from_millis(slow_tx) { - warn!("Heavy ({} ms) transaction in block {:?}: {:?}", took_ms, self.block.header().number(), hash); - } - debug!(target: "tx", "Transaction {:?} took: {} ms", hash, took_ms); - } + let slow_tx = option_env!("SLOW_TX_DURATION") + .and_then(|v| v.parse().ok()) + .unwrap_or(100); + for t in transactions { + let hash = t.hash(); + let start = time::Instant::now(); + self.push_transaction(t, None)?; + let took = start.elapsed(); + let took_ms = took.as_secs() * 1000 + took.subsec_nanos() as u64 / 1000000; + if took > time::Duration::from_millis(slow_tx) { + warn!( + "Heavy ({} ms) transaction in block {:?}: {:?}", + took_ms, + self.block.header().number(), + hash + ); + } + debug!(target: "tx", "Transaction {:?} took: {} ms", hash, took_ms); + } - Ok(()) - } + Ok(()) + } - /// Populate self from a header. - fn populate_from(&mut self, header: &Header) { - self.block.header.set_difficulty(*header.difficulty()); - self.block.header.set_gas_limit(*header.gas_limit()); - self.block.header.set_timestamp(header.timestamp()); - self.block.header.set_uncles_hash(*header.uncles_hash()); - self.block.header.set_transactions_root(*header.transactions_root()); - // TODO: that's horrible. set only for backwards compatibility - if header.extra_data().len() > self.engine.maximum_extra_data_size() { - warn!("Couldn't set extradata. Ignoring."); - } else { - self.block.header.set_extra_data(header.extra_data().clone()); - } - } + /// Populate self from a header. + fn populate_from(&mut self, header: &Header) { + self.block.header.set_difficulty(*header.difficulty()); + self.block.header.set_gas_limit(*header.gas_limit()); + self.block.header.set_timestamp(header.timestamp()); + self.block.header.set_uncles_hash(*header.uncles_hash()); + self.block + .header + .set_transactions_root(*header.transactions_root()); + // TODO: that's horrible. set only for backwards compatibility + if header.extra_data().len() > self.engine.maximum_extra_data_size() { + warn!("Couldn't set extradata. Ignoring."); + } else { + self.block + .header + .set_extra_data(header.extra_data().clone()); + } + } - /// Turn this into a `ClosedBlock`. - pub fn close(self) -> Result { - let unclosed_state = self.block.state.clone(); - let locked = self.close_and_lock()?; + /// Turn this into a `ClosedBlock`. + pub fn close(self) -> Result { + let unclosed_state = self.block.state.clone(); + let locked = self.close_and_lock()?; - Ok(ClosedBlock { - block: locked.block, - unclosed_state, - }) - } + Ok(ClosedBlock { + block: locked.block, + unclosed_state, + }) + } - /// Turn this into a `LockedBlock`. - pub fn close_and_lock(self) -> Result { - let mut s = self; + /// Turn this into a `LockedBlock`. + pub fn close_and_lock(self) -> Result { + let mut s = self; - s.engine.on_close_block(&mut s.block)?; - s.block.state.commit()?; + s.engine.on_close_block(&mut s.block)?; + s.block.state.commit()?; - s.block.header.set_transactions_root(ordered_trie_root(s.block.transactions.iter().map(|e| e.rlp_bytes()))); - let uncle_bytes = encode_list(&s.block.uncles); - s.block.header.set_uncles_hash(keccak(&uncle_bytes)); - s.block.header.set_state_root(s.block.state.root().clone()); - s.block.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes()))); - s.block.header.set_log_bloom(s.block.receipts.iter().fold(Bloom::zero(), |mut b, r| { - b.accrue_bloom(&r.log_bloom); - b - })); - s.block.header.set_gas_used(s.block.receipts.last().map_or_else(U256::zero, |r| r.gas_used)); + s.block.header.set_transactions_root(ordered_trie_root( + s.block.transactions.iter().map(|e| e.rlp_bytes()), + )); + let uncle_bytes = encode_list(&s.block.uncles); + s.block.header.set_uncles_hash(keccak(&uncle_bytes)); + s.block.header.set_state_root(s.block.state.root().clone()); + s.block.header.set_receipts_root(ordered_trie_root( + s.block.receipts.iter().map(|r| r.rlp_bytes()), + )); + s.block + .header + .set_log_bloom(s.block.receipts.iter().fold(Bloom::zero(), |mut b, r| { + b.accrue_bloom(&r.log_bloom); + b + })); + s.block.header.set_gas_used( + s.block + .receipts + .last() + .map_or_else(U256::zero, |r| r.gas_used), + ); - Ok(LockedBlock { - block: s.block, - }) - } + Ok(LockedBlock { block: s.block }) + } - #[cfg(test)] - /// Return mutable block reference. To be used in tests only. - pub fn block_mut(&mut self) -> &mut ExecutedBlock { &mut self.block } + #[cfg(test)] + /// Return mutable block reference. To be used in tests only. + pub fn block_mut(&mut self) -> &mut ExecutedBlock { + &mut self.block + } } impl<'a> ops::Deref for OpenBlock<'a> { - type Target = ExecutedBlock; + type Target = ExecutedBlock; - fn deref(&self) -> &Self::Target { - &self.block - } + fn deref(&self) -> &Self::Target { + &self.block + } } impl ops::Deref for ClosedBlock { - type Target = ExecutedBlock; + type Target = ExecutedBlock; - fn deref(&self) -> &Self::Target { - &self.block - } + fn deref(&self) -> &Self::Target { + &self.block + } } impl ops::Deref for LockedBlock { - type Target = ExecutedBlock; + type Target = ExecutedBlock; - fn deref(&self) -> &Self::Target { - &self.block - } + fn deref(&self) -> &Self::Target { + &self.block + } } impl ops::Deref for SealedBlock { - type Target = ExecutedBlock; + type Target = ExecutedBlock; - fn deref(&self) -> &Self::Target { - &self.block - } + fn deref(&self) -> &Self::Target { + &self.block + } } impl ClosedBlock { - /// Turn this into a `LockedBlock`, unable to be reopened again. - pub fn lock(self) -> LockedBlock { - LockedBlock { - block: self.block, - } - } + /// Turn this into a `LockedBlock`, unable to be reopened again. + pub fn lock(self) -> LockedBlock { + LockedBlock { block: self.block } + } - /// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`. - pub fn reopen(self, engine: &EthEngine) -> OpenBlock { - // revert rewards (i.e. set state back at last transaction's state). - let mut block = self.block; - block.state = self.unclosed_state; - OpenBlock { - block: block, - engine: engine, - } - } + /// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`. + pub fn reopen(self, engine: &EthEngine) -> OpenBlock { + // revert rewards (i.e. set state back at last transaction's state). + let mut block = self.block; + block.state = self.unclosed_state; + OpenBlock { + block: block, + engine: engine, + } + } } impl LockedBlock { - /// Removes outcomes from receipts and updates the receipt root. - /// - /// This is done after the block is enacted for historical reasons. - /// We allow inconsistency in receipts for some chains if `validate_receipts_transition` - /// is set to non-zero value, so the check only happens if we detect - /// unmatching root first and then fall back to striped receipts. - pub fn strip_receipts_outcomes(&mut self) { - for receipt in &mut self.block.receipts { - receipt.outcome = TransactionOutcome::Unknown; - } - self.block.header.set_receipts_root( - ordered_trie_root(self.block.receipts.iter().map(|r| r.rlp_bytes())) - ); - } + /// Removes outcomes from receipts and updates the receipt root. + /// + /// This is done after the block is enacted for historical reasons. + /// We allow inconsistency in receipts for some chains if `validate_receipts_transition` + /// is set to non-zero value, so the check only happens if we detect + /// unmatching root first and then fall back to striped receipts. + pub fn strip_receipts_outcomes(&mut self) { + for receipt in &mut self.block.receipts { + receipt.outcome = TransactionOutcome::Unknown; + } + self.block.header.set_receipts_root(ordered_trie_root( + self.block.receipts.iter().map(|r| r.rlp_bytes()), + )); + } - /// Provide a valid seal in order to turn this into a `SealedBlock`. - /// - /// NOTE: This does not check the validity of `seal` with the engine. - pub fn seal(self, engine: &EthEngine, seal: Vec) -> Result { - let expected_seal_fields = engine.seal_fields(&self.header); - let mut s = self; - if seal.len() != expected_seal_fields { - Err(BlockError::InvalidSealArity(Mismatch { - expected: expected_seal_fields, - found: seal.len() - }))?; - } + /// Provide a valid seal in order to turn this into a `SealedBlock`. + /// + /// NOTE: This does not check the validity of `seal` with the engine. + pub fn seal(self, engine: &EthEngine, seal: Vec) -> Result { + let expected_seal_fields = engine.seal_fields(&self.header); + let mut s = self; + if seal.len() != expected_seal_fields { + Err(BlockError::InvalidSealArity(Mismatch { + expected: expected_seal_fields, + found: seal.len(), + }))?; + } - s.block.header.set_seal(seal); - engine.on_seal_block(&mut s.block)?; - s.block.header.compute_hash(); + s.block.header.set_seal(seal); + engine.on_seal_block(&mut s.block)?; + s.block.header.compute_hash(); - Ok(SealedBlock { - block: s.block - }) - } + Ok(SealedBlock { block: s.block }) + } - /// Provide a valid seal in order to turn this into a `SealedBlock`. - /// This does check the validity of `seal` with the engine. - /// Returns the `ClosedBlock` back again if the seal is no good. - /// TODO(https://github.com/paritytech/parity-ethereum/issues/10407): This is currently only used in POW chain call paths, we should really merge it with seal() above. - pub fn try_seal( - self, - engine: &EthEngine, - seal: Vec, - ) -> Result { - let mut s = self; - s.block.header.set_seal(seal); - s.block.header.compute_hash(); + /// Provide a valid seal in order to turn this into a `SealedBlock`. + /// This does check the validity of `seal` with the engine. + /// Returns the `ClosedBlock` back again if the seal is no good. + /// TODO(https://github.com/paritytech/parity-ethereum/issues/10407): This is currently only used in POW chain call paths, we should really merge it with seal() above. + pub fn try_seal(self, engine: &EthEngine, seal: Vec) -> Result { + let mut s = self; + s.block.header.set_seal(seal); + s.block.header.compute_hash(); - // TODO: passing state context to avoid engines owning it? - engine.verify_local_seal(&s.block.header)?; - Ok(SealedBlock { - block: s.block - }) - } + // TODO: passing state context to avoid engines owning it? + engine.verify_local_seal(&s.block.header)?; + Ok(SealedBlock { block: s.block }) + } } impl Drain for LockedBlock { - fn drain(self) -> ExecutedBlock { - self.block - } + fn drain(self) -> ExecutedBlock { + self.block + } } impl SealedBlock { - /// Get the RLP-encoding of the block. - pub fn rlp_bytes(&self) -> Bytes { - let mut block_rlp = RlpStream::new_list(3); - block_rlp.append(&self.block.header); - block_rlp.append_list(&self.block.transactions); - block_rlp.append_list(&self.block.uncles); - block_rlp.out() - } + /// Get the RLP-encoding of the block. + pub fn rlp_bytes(&self) -> Bytes { + let mut block_rlp = RlpStream::new_list(3); + block_rlp.append(&self.block.header); + block_rlp.append_list(&self.block.transactions); + block_rlp.append_list(&self.block.uncles); + block_rlp.out() + } } impl Drain for SealedBlock { - fn drain(self) -> ExecutedBlock { - self.block - } + fn drain(self) -> ExecutedBlock { + self.block + } } /// Enact the block given by block header, transactions and uncles pub(crate) fn enact( - header: Header, - transactions: Vec, - uncles: Vec
, - engine: &EthEngine, - tracing: bool, - db: StateDB, - parent: &Header, - last_hashes: Arc, - factories: Factories, - is_epoch_begin: bool, - ancestry: &mut Iterator, + header: Header, + transactions: Vec, + uncles: Vec
, + engine: &EthEngine, + tracing: bool, + db: StateDB, + parent: &Header, + last_hashes: Arc, + factories: Factories, + is_epoch_begin: bool, + ancestry: &mut Iterator, ) -> Result { - // For trace log - let trace_state = if log_enabled!(target: "enact", ::log::Level::Trace) { - Some(State::from_existing(db.boxed_clone(), parent.state_root().clone(), engine.account_start_nonce(parent.number() + 1), factories.clone())?) - } else { - None - }; + // For trace log + let trace_state = if log_enabled!(target: "enact", ::log::Level::Trace) { + Some(State::from_existing( + db.boxed_clone(), + parent.state_root().clone(), + engine.account_start_nonce(parent.number() + 1), + factories.clone(), + )?) + } else { + None + }; - let mut b = OpenBlock::new( - engine, - factories, - tracing, - db, - parent, - last_hashes, - // Engine such as Clique will calculate author from extra_data. - // this is only important for executing contracts as the 'executive_author'. - engine.executive_author(&header)?, - (3141562.into(), 31415620.into()), - vec![], - is_epoch_begin, - ancestry, - )?; + let mut b = OpenBlock::new( + engine, + factories, + tracing, + db, + parent, + last_hashes, + // Engine such as Clique will calculate author from extra_data. + // this is only important for executing contracts as the 'executive_author'. + engine.executive_author(&header)?, + (3141562.into(), 31415620.into()), + vec![], + is_epoch_begin, + ancestry, + )?; - if let Some(ref s) = trace_state { - let env = b.env_info(); - let root = s.root(); - let author_balance = s.balance(&env.author)?; - trace!(target: "enact", "num={}, root={}, author={}, author_balance={}\n", + if let Some(ref s) = trace_state { + let env = b.env_info(); + let root = s.root(); + let author_balance = s.balance(&env.author)?; + trace!(target: "enact", "num={}, root={}, author={}, author_balance={}\n", b.block.header.number(), root, env.author, author_balance); - } + } - b.populate_from(&header); - b.push_transactions(transactions)?; + b.populate_from(&header); + b.push_transactions(transactions)?; - for u in uncles { - b.push_uncle(u)?; - } + for u in uncles { + b.push_uncle(u)?; + } - b.close_and_lock() + b.close_and_lock() } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header pub fn enact_verified( - block: PreverifiedBlock, - engine: &EthEngine, - tracing: bool, - db: StateDB, - parent: &Header, - last_hashes: Arc, - factories: Factories, - is_epoch_begin: bool, - ancestry: &mut Iterator, + block: PreverifiedBlock, + engine: &EthEngine, + tracing: bool, + db: StateDB, + parent: &Header, + last_hashes: Arc, + factories: Factories, + is_epoch_begin: bool, + ancestry: &mut Iterator, ) -> Result { - - enact( - block.header, - block.transactions, - block.uncles, - engine, - tracing, - db, - parent, - last_hashes, - factories, - is_epoch_begin, - ancestry, - ) + enact( + block.header, + block.transactions, + block.uncles, + engine, + tracing, + db, + parent, + last_hashes, + factories, + is_epoch_begin, + ancestry, + ) } #[cfg(test)] mod tests { - use test_helpers::get_temp_state_db; - use super::*; - use engines::EthEngine; - use vm::LastHashes; - use error::Error; - use factory::Factories; - use state_db::StateDB; - use ethereum_types::Address; - use std::sync::Arc; - use verification::queue::kind::blocks::Unverified; - use types::transaction::SignedTransaction; - use types::header::Header; - use types::view; - use types::views::BlockView; + use super::*; + use engines::EthEngine; + use error::Error; + use ethereum_types::Address; + use factory::Factories; + use state_db::StateDB; + use std::sync::Arc; + use test_helpers::get_temp_state_db; + use types::{header::Header, transaction::SignedTransaction, view, views::BlockView}; + use verification::queue::kind::blocks::Unverified; + use vm::LastHashes; - /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header - fn enact_bytes( - block_bytes: Vec, - engine: &EthEngine, - tracing: bool, - db: StateDB, - parent: &Header, - last_hashes: Arc, - factories: Factories, - ) -> Result { + /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header + fn enact_bytes( + block_bytes: Vec, + engine: &EthEngine, + tracing: bool, + db: StateDB, + parent: &Header, + last_hashes: Arc, + factories: Factories, + ) -> Result { + let block = Unverified::from_rlp(block_bytes)?; + let header = block.header; + let transactions: Result, Error> = block + .transactions + .into_iter() + .map(SignedTransaction::new) + .map(|r| r.map_err(Into::into)) + .collect(); + let transactions = transactions?; - let block = Unverified::from_rlp(block_bytes)?; - let header = block.header; - let transactions: Result, Error> = block - .transactions - .into_iter() - .map(SignedTransaction::new) - .map(|r| r.map_err(Into::into)) - .collect(); - let transactions = transactions?; - - { - if ::log::max_level() >= ::log::Level::Trace { - let s = State::from_existing(db.boxed_clone(), parent.state_root().clone(), engine.account_start_nonce(parent.number() + 1), factories.clone())?; - trace!(target: "enact", "num={}, root={}, author={}, author_balance={}\n", + { + if ::log::max_level() >= ::log::Level::Trace { + let s = State::from_existing( + db.boxed_clone(), + parent.state_root().clone(), + engine.account_start_nonce(parent.number() + 1), + factories.clone(), + )?; + trace!(target: "enact", "num={}, root={}, author={}, author_balance={}\n", header.number(), s.root(), header.author(), s.balance(&header.author())?); - } - } + } + } - let mut b = OpenBlock::new( - engine, - factories, - tracing, - db, - parent, - last_hashes, - Address::new(), - (3141562.into(), 31415620.into()), - vec![], - false, - None, - )?; + let mut b = OpenBlock::new( + engine, + factories, + tracing, + db, + parent, + last_hashes, + Address::new(), + (3141562.into(), 31415620.into()), + vec![], + false, + None, + )?; - b.populate_from(&header); - b.push_transactions(transactions)?; + b.populate_from(&header); + b.push_transactions(transactions)?; - for u in block.uncles { - b.push_uncle(u)?; - } + for u in block.uncles { + b.push_uncle(u)?; + } - b.close_and_lock() - } + b.close_and_lock() + } - /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards - fn enact_and_seal( - block_bytes: Vec, - engine: &EthEngine, - tracing: bool, - db: StateDB, - parent: &Header, - last_hashes: Arc, - factories: Factories, - ) -> Result { - let header = Unverified::from_rlp(block_bytes.clone())?.header; - Ok(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes, factories)? - .seal(engine, header.seal().to_vec())?) - } + /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards + fn enact_and_seal( + block_bytes: Vec, + engine: &EthEngine, + tracing: bool, + db: StateDB, + parent: &Header, + last_hashes: Arc, + factories: Factories, + ) -> Result { + let header = Unverified::from_rlp(block_bytes.clone())?.header; + Ok(enact_bytes( + block_bytes, + engine, + tracing, + db, + parent, + last_hashes, + factories, + )? + .seal(engine, header.seal().to_vec())?) + } - #[test] - fn open_block() { - use spec::*; - let spec = Spec::new_test(); - let genesis_header = spec.genesis_header(); - let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b = OpenBlock::new(&*spec.engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); - let b = b.close_and_lock().unwrap(); - let _ = b.seal(&*spec.engine, vec![]); - } + #[test] + fn open_block() { + use spec::*; + let spec = Spec::new_test(); + let genesis_header = spec.genesis_header(); + let db = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let last_hashes = Arc::new(vec![genesis_header.hash()]); + let b = OpenBlock::new( + &*spec.engine, + Default::default(), + false, + db, + &genesis_header, + last_hashes, + Address::zero(), + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let b = b.close_and_lock().unwrap(); + let _ = b.seal(&*spec.engine, vec![]); + } - #[test] - fn enact_block() { - use spec::*; - let spec = Spec::new_test(); - let engine = &*spec.engine; - let genesis_header = spec.genesis_header(); + #[test] + fn enact_block() { + use spec::*; + let spec = Spec::new_test(); + let engine = &*spec.engine; + let genesis_header = spec.genesis_header(); - let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap() - .close_and_lock().unwrap().seal(engine, vec![]).unwrap(); - let orig_bytes = b.rlp_bytes(); - let orig_db = b.drain().state.drop().1; + let db = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let last_hashes = Arc::new(vec![genesis_header.hash()]); + let b = OpenBlock::new( + engine, + Default::default(), + false, + db, + &genesis_header, + last_hashes.clone(), + Address::zero(), + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap() + .close_and_lock() + .unwrap() + .seal(engine, vec![]) + .unwrap(); + let orig_bytes = b.rlp_bytes(); + let orig_db = b.drain().state.drop().1; - let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let e = enact_and_seal(orig_bytes.clone(), engine, false, db, &genesis_header, last_hashes, Default::default()).unwrap(); + let db = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let e = enact_and_seal( + orig_bytes.clone(), + engine, + false, + db, + &genesis_header, + last_hashes, + Default::default(), + ) + .unwrap(); - assert_eq!(e.rlp_bytes(), orig_bytes); + assert_eq!(e.rlp_bytes(), orig_bytes); - let db = e.drain().state.drop().1; - assert_eq!(orig_db.journal_db().keys(), db.journal_db().keys()); - assert!(orig_db.journal_db().keys().iter().filter(|k| orig_db.journal_db().get(k.0) != db.journal_db().get(k.0)).next() == None); - } + let db = e.drain().state.drop().1; + assert_eq!(orig_db.journal_db().keys(), db.journal_db().keys()); + assert!( + orig_db + .journal_db() + .keys() + .iter() + .filter(|k| orig_db.journal_db().get(k.0) != db.journal_db().get(k.0)) + .next() + == None + ); + } - #[test] - fn enact_block_with_uncle() { - use spec::*; - let spec = Spec::new_test(); - let engine = &*spec.engine; - let genesis_header = spec.genesis_header(); + #[test] + fn enact_block_with_uncle() { + use spec::*; + let spec = Spec::new_test(); + let engine = &*spec.engine; + let genesis_header = spec.genesis_header(); - let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let last_hashes = Arc::new(vec![genesis_header.hash()]); - let mut open_block = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); - let mut uncle1_header = Header::new(); - uncle1_header.set_extra_data(b"uncle1".to_vec()); - let mut uncle2_header = Header::new(); - uncle2_header.set_extra_data(b"uncle2".to_vec()); - open_block.push_uncle(uncle1_header).unwrap(); - open_block.push_uncle(uncle2_header).unwrap(); - let b = open_block.close_and_lock().unwrap().seal(engine, vec![]).unwrap(); + let db = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let last_hashes = Arc::new(vec![genesis_header.hash()]); + let mut open_block = OpenBlock::new( + engine, + Default::default(), + false, + db, + &genesis_header, + last_hashes.clone(), + Address::zero(), + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let mut uncle1_header = Header::new(); + uncle1_header.set_extra_data(b"uncle1".to_vec()); + let mut uncle2_header = Header::new(); + uncle2_header.set_extra_data(b"uncle2".to_vec()); + open_block.push_uncle(uncle1_header).unwrap(); + open_block.push_uncle(uncle2_header).unwrap(); + let b = open_block + .close_and_lock() + .unwrap() + .seal(engine, vec![]) + .unwrap(); - let orig_bytes = b.rlp_bytes(); - let orig_db = b.drain().state.drop().1; + let orig_bytes = b.rlp_bytes(); + let orig_db = b.drain().state.drop().1; - let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let e = enact_and_seal(orig_bytes.clone(), engine, false, db, &genesis_header, last_hashes, Default::default()).unwrap(); + let db = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let e = enact_and_seal( + orig_bytes.clone(), + engine, + false, + db, + &genesis_header, + last_hashes, + Default::default(), + ) + .unwrap(); - let bytes = e.rlp_bytes(); - assert_eq!(bytes, orig_bytes); - let uncles = view!(BlockView, &bytes).uncles(); - assert_eq!(uncles[1].extra_data(), b"uncle2"); + let bytes = e.rlp_bytes(); + assert_eq!(bytes, orig_bytes); + let uncles = view!(BlockView, &bytes).uncles(); + assert_eq!(uncles[1].extra_data(), b"uncle2"); - let db = e.drain().state.drop().1; - assert_eq!(orig_db.journal_db().keys(), db.journal_db().keys()); - assert!(orig_db.journal_db().keys().iter().filter(|k| orig_db.journal_db().get(k.0) != db.journal_db().get(k.0)).next() == None); - } + let db = e.drain().state.drop().1; + assert_eq!(orig_db.journal_db().keys(), db.journal_db().keys()); + assert!( + orig_db + .journal_db() + .keys() + .iter() + .filter(|k| orig_db.journal_db().get(k.0) != db.journal_db().get(k.0)) + .next() + == None + ); + } } diff --git a/ethcore/src/client/ancient_import.rs b/ethcore/src/client/ancient_import.rs index 2a0a970cd..de5f4b8ee 100644 --- a/ethcore/src/client/ancient_import.rs +++ b/ethcore/src/client/ancient_import.rs @@ -18,7 +18,7 @@ use std::sync::Arc; -use engines::{EthEngine, EpochVerifier}; +use engines::{EpochVerifier, EthEngine}; use machine::EthereumMachine; use blockchain::BlockChain; @@ -32,70 +32,78 @@ const HEAVY_VERIFY_RATE: f32 = 0.02; /// Ancient block verifier: import an ancient sequence of blocks in order from a starting /// epoch. pub struct AncientVerifier { - cur_verifier: RwLock>>>, - engine: Arc, + cur_verifier: RwLock>>>, + engine: Arc, } impl AncientVerifier { - /// Create a new ancient block verifier with the given engine. - pub fn new(engine: Arc) -> Self { - AncientVerifier { - cur_verifier: RwLock::new(None), - engine, - } - } + /// Create a new ancient block verifier with the given engine. + pub fn new(engine: Arc) -> Self { + AncientVerifier { + cur_verifier: RwLock::new(None), + engine, + } + } - /// Verify the next block header, randomly choosing whether to do heavy or light - /// verification. If the block is the end of an epoch, updates the epoch verifier. - pub fn verify( - &self, - rng: &mut R, - header: &Header, - chain: &BlockChain, - ) -> Result<(), ::error::Error> { - // perform verification - let verified = if let Some(ref cur_verifier) = *self.cur_verifier.read() { - match rng.gen::() <= HEAVY_VERIFY_RATE { - true => cur_verifier.verify_heavy(header)?, - false => cur_verifier.verify_light(header)?, - } - true - } else { - false - }; + /// Verify the next block header, randomly choosing whether to do heavy or light + /// verification. If the block is the end of an epoch, updates the epoch verifier. + pub fn verify( + &self, + rng: &mut R, + header: &Header, + chain: &BlockChain, + ) -> Result<(), ::error::Error> { + // perform verification + let verified = if let Some(ref cur_verifier) = *self.cur_verifier.read() { + match rng.gen::() <= HEAVY_VERIFY_RATE { + true => cur_verifier.verify_heavy(header)?, + false => cur_verifier.verify_light(header)?, + } + true + } else { + false + }; - // when there is no verifier initialize it. - // We use a bool flag to avoid double locking in the happy case - if !verified { - { - let mut cur_verifier = self.cur_verifier.write(); - if cur_verifier.is_none() { - *cur_verifier = Some(self.initial_verifier(header, chain)?); - } - } - // Call again to verify. - return self.verify(rng, header, chain); - } + // when there is no verifier initialize it. + // We use a bool flag to avoid double locking in the happy case + if !verified { + { + let mut cur_verifier = self.cur_verifier.write(); + if cur_verifier.is_none() { + *cur_verifier = Some(self.initial_verifier(header, chain)?); + } + } + // Call again to verify. + return self.verify(rng, header, chain); + } - // ancient import will only use transitions obtained from the snapshot. - if let Some(transition) = chain.epoch_transition(header.number(), header.hash()) { - let v = self.engine.epoch_verifier(&header, &transition.proof).known_confirmed()?; - *self.cur_verifier.write() = Some(v); - } + // ancient import will only use transitions obtained from the snapshot. + if let Some(transition) = chain.epoch_transition(header.number(), header.hash()) { + let v = self + .engine + .epoch_verifier(&header, &transition.proof) + .known_confirmed()?; + *self.cur_verifier.write() = Some(v); + } - Ok(()) - } + Ok(()) + } - fn initial_verifier(&self, header: &Header, chain: &BlockChain) - -> Result>, ::error::Error> - { - trace!(target: "client", "Initializing ancient block restoration."); - let current_epoch_data = chain.epoch_transitions() - .take_while(|&(_, ref t)| t.block_number < header.number()) - .last() - .map(|(_, t)| t.proof) - .expect("At least one epoch entry (genesis) always stored; qed"); + fn initial_verifier( + &self, + header: &Header, + chain: &BlockChain, + ) -> Result>, ::error::Error> { + trace!(target: "client", "Initializing ancient block restoration."); + let current_epoch_data = chain + .epoch_transitions() + .take_while(|&(_, ref t)| t.block_number < header.number()) + .last() + .map(|(_, t)| t.proof) + .expect("At least one epoch entry (genesis) always stored; qed"); - self.engine.epoch_verifier(&header, ¤t_epoch_data).known_confirmed() - } + self.engine + .epoch_verifier(&header, ¤t_epoch_data) + .known_confirmed() + } } diff --git a/ethcore/src/client/bad_blocks.rs b/ethcore/src/client/bad_blocks.rs index 6af24cc40..5757f35b8 100644 --- a/ethcore/src/client/bad_blocks.rs +++ b/ethcore/src/client/bad_blocks.rs @@ -25,57 +25,62 @@ use verification::queue::kind::blocks::Unverified; /// Recently seen bad blocks. pub struct BadBlocks { - last_blocks: RwLock>, + last_blocks: RwLock>, } impl Default for BadBlocks { - fn default() -> Self { - BadBlocks { - last_blocks: RwLock::new(MemoryLruCache::new(8 * 1024 * 1024)), - } - } + fn default() -> Self { + BadBlocks { + last_blocks: RwLock::new(MemoryLruCache::new(8 * 1024 * 1024)), + } + } } impl BadBlocks { - /// Reports given RLP as invalid block. - pub fn report(&self, raw: Bytes, message: String) { - match Unverified::from_rlp(raw) { - Ok(unverified) => { - error!( - target: "client", - "\nBad block detected: {}\nRLP: {}\nHeader: {:?}\nUncles: {}\nTransactions:{}\n", - message, - unverified.bytes.to_hex(), - unverified.header, - unverified.uncles - .iter() - .enumerate() - .map(|(index, uncle)| format!("[Uncle {}] {:?}", index, uncle)) - .join("\n"), - unverified.transactions - .iter() - .enumerate() - .map(|(index, tx)| format!("[Tx {}] {:?}", index, tx)) - .join("\n"), - ); - self.last_blocks.write().insert(unverified.header.hash(), (unverified, message)); - }, - Err(err) => { - error!(target: "client", "Bad undecodable block detected: {}\n{:?}", message, err); - }, - } - } + /// Reports given RLP as invalid block. + pub fn report(&self, raw: Bytes, message: String) { + match Unverified::from_rlp(raw) { + Ok(unverified) => { + error!( + target: "client", + "\nBad block detected: {}\nRLP: {}\nHeader: {:?}\nUncles: {}\nTransactions:{}\n", + message, + unverified.bytes.to_hex(), + unverified.header, + unverified.uncles + .iter() + .enumerate() + .map(|(index, uncle)| format!("[Uncle {}] {:?}", index, uncle)) + .join("\n"), + unverified.transactions + .iter() + .enumerate() + .map(|(index, tx)| format!("[Tx {}] {:?}", index, tx)) + .join("\n"), + ); + self.last_blocks + .write() + .insert(unverified.header.hash(), (unverified, message)); + } + Err(err) => { + error!(target: "client", "Bad undecodable block detected: {}\n{:?}", message, err); + } + } + } - /// Returns a list of recently detected bad blocks with error descriptions. - pub fn bad_blocks(&self) -> Vec<(Unverified, String)> { - self.last_blocks.read() - .backstore() - .iter() - .map(|(_k, (unverified, message))| ( - Unverified::from_rlp(unverified.bytes.clone()) - .expect("Bytes coming from UnverifiedBlock so decodable; qed"), - message.clone(), - )) - .collect() - } + /// Returns a list of recently detected bad blocks with error descriptions. + pub fn bad_blocks(&self) -> Vec<(Unverified, String)> { + self.last_blocks + .read() + .backstore() + .iter() + .map(|(_k, (unverified, message))| { + ( + Unverified::from_rlp(unverified.bytes.clone()) + .expect("Bytes coming from UnverifiedBlock so decodable; qed"), + message.clone(), + ) + }) + .collect() + } } diff --git a/ethcore/src/client/chain_notify.rs b/ethcore/src/client/chain_notify.rs index 5f9b8ed31..302ab8546 100644 --- a/ethcore/src/client/chain_notify.rs +++ b/ethcore/src/client/chain_notify.rs @@ -14,180 +14,188 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use blockchain::ImportRoute; use bytes::Bytes; use ethereum_types::{H256, U256}; +use std::{collections::HashMap, time::Duration}; use types::transaction::UnverifiedTransaction; -use blockchain::ImportRoute; -use std::time::Duration; -use std::collections::HashMap; /// Messages to broadcast via chain pub enum ChainMessageType { - /// Consensus message - Consensus(Vec), - /// Message with private transaction - PrivateTransaction(H256, Vec), - /// Message with signed private transaction - SignedPrivateTransaction(H256, Vec), + /// Consensus message + Consensus(Vec), + /// Message with private transaction + PrivateTransaction(H256, Vec), + /// Message with signed private transaction + SignedPrivateTransaction(H256, Vec), } /// Route type to indicate whether it is enacted or retracted. #[derive(Clone)] pub enum ChainRouteType { - /// Enacted block - Enacted, - /// Retracted block - Retracted + /// Enacted block + Enacted, + /// Retracted block + Retracted, } /// A complete chain enacted retracted route. #[derive(Default, Clone)] pub struct ChainRoute { - route: Vec<(H256, ChainRouteType)>, - enacted: Vec, - retracted: Vec, + route: Vec<(H256, ChainRouteType)>, + enacted: Vec, + retracted: Vec, } impl<'a> From<&'a [ImportRoute]> for ChainRoute { - fn from(import_results: &'a [ImportRoute]) -> ChainRoute { - ChainRoute::new(import_results.iter().flat_map(|route| { - route.retracted.iter().map(|h| (*h, ChainRouteType::Retracted)) - .chain(route.enacted.iter().map(|h| (*h, ChainRouteType::Enacted))) - }).collect()) - } + fn from(import_results: &'a [ImportRoute]) -> ChainRoute { + ChainRoute::new( + import_results + .iter() + .flat_map(|route| { + route + .retracted + .iter() + .map(|h| (*h, ChainRouteType::Retracted)) + .chain(route.enacted.iter().map(|h| (*h, ChainRouteType::Enacted))) + }) + .collect(), + ) + } } impl ChainRoute { - /// Create a new ChainRoute based on block hash and route type pairs. - pub fn new(route: Vec<(H256, ChainRouteType)>) -> Self { - let (enacted, retracted) = Self::to_enacted_retracted(&route); + /// Create a new ChainRoute based on block hash and route type pairs. + pub fn new(route: Vec<(H256, ChainRouteType)>) -> Self { + let (enacted, retracted) = Self::to_enacted_retracted(&route); - Self { route, enacted, retracted } - } + Self { + route, + enacted, + retracted, + } + } - /// Gather all non-duplicate enacted and retracted blocks. - fn to_enacted_retracted(route: &[(H256, ChainRouteType)]) -> (Vec, Vec) { - fn map_to_vec(map: Vec<(H256, bool)>) -> Vec { - map.into_iter().map(|(k, _v)| k).collect() - } + /// Gather all non-duplicate enacted and retracted blocks. + fn to_enacted_retracted(route: &[(H256, ChainRouteType)]) -> (Vec, Vec) { + fn map_to_vec(map: Vec<(H256, bool)>) -> Vec { + map.into_iter().map(|(k, _v)| k).collect() + } - // Because we are doing multiple inserts some of the blocks that were enacted in import `k` - // could be retracted in import `k+1`. This is why to understand if after all inserts - // the block is enacted or retracted we iterate over all routes and at the end final state - // will be in the hashmap - let map = route.iter().fold(HashMap::new(), |mut map, route| { - match &route.1 { - &ChainRouteType::Enacted => { - map.insert(route.0, true); - }, - &ChainRouteType::Retracted => { - map.insert(route.0, false); - }, - } - map - }); + // Because we are doing multiple inserts some of the blocks that were enacted in import `k` + // could be retracted in import `k+1`. This is why to understand if after all inserts + // the block is enacted or retracted we iterate over all routes and at the end final state + // will be in the hashmap + let map = route.iter().fold(HashMap::new(), |mut map, route| { + match &route.1 { + &ChainRouteType::Enacted => { + map.insert(route.0, true); + } + &ChainRouteType::Retracted => { + map.insert(route.0, false); + } + } + map + }); - // Split to enacted retracted (using hashmap value) - let (enacted, retracted) = map.into_iter().partition(|&(_k, v)| v); - // And convert tuples to keys - (map_to_vec(enacted), map_to_vec(retracted)) - } + // Split to enacted retracted (using hashmap value) + let (enacted, retracted) = map.into_iter().partition(|&(_k, v)| v); + // And convert tuples to keys + (map_to_vec(enacted), map_to_vec(retracted)) + } - /// Consume route and return the enacted retracted form. - pub fn into_enacted_retracted(self) -> (Vec, Vec) { - (self.enacted, self.retracted) - } + /// Consume route and return the enacted retracted form. + pub fn into_enacted_retracted(self) -> (Vec, Vec) { + (self.enacted, self.retracted) + } - /// All non-duplicate enacted blocks. - pub fn enacted(&self) -> &[H256] { - &self.enacted - } + /// All non-duplicate enacted blocks. + pub fn enacted(&self) -> &[H256] { + &self.enacted + } - /// All non-duplicate retracted blocks. - pub fn retracted(&self) -> &[H256] { - &self.retracted - } + /// All non-duplicate retracted blocks. + pub fn retracted(&self) -> &[H256] { + &self.retracted + } - /// All blocks in the route. - pub fn route(&self) -> &[(H256, ChainRouteType)] { - &self.route - } + /// All blocks in the route. + pub fn route(&self) -> &[(H256, ChainRouteType)] { + &self.route + } } /// Used by `ChainNotify` `new_blocks()` pub struct NewBlocks { - /// Imported blocks - pub imported: Vec, - /// Invalid blocks - pub invalid: Vec, - /// Route - pub route: ChainRoute, - /// Sealed - pub sealed: Vec, - /// Block bytes. - pub proposed: Vec, - /// Duration - pub duration: Duration, - /// Has more blocks to import - pub has_more_blocks_to_import: bool, + /// Imported blocks + pub imported: Vec, + /// Invalid blocks + pub invalid: Vec, + /// Route + pub route: ChainRoute, + /// Sealed + pub sealed: Vec, + /// Block bytes. + pub proposed: Vec, + /// Duration + pub duration: Duration, + /// Has more blocks to import + pub has_more_blocks_to_import: bool, } impl NewBlocks { - /// Constructor - pub fn new( - imported: Vec, - invalid: Vec, - route: ChainRoute, - sealed: Vec, - proposed: Vec, - duration: Duration, - has_more_blocks_to_import: bool, - ) -> NewBlocks { - NewBlocks { - imported, - invalid, - route, - sealed, - proposed, - duration, - has_more_blocks_to_import, - } - } + /// Constructor + pub fn new( + imported: Vec, + invalid: Vec, + route: ChainRoute, + sealed: Vec, + proposed: Vec, + duration: Duration, + has_more_blocks_to_import: bool, + ) -> NewBlocks { + NewBlocks { + imported, + invalid, + route, + sealed, + proposed, + duration, + has_more_blocks_to_import, + } + } } /// Represents what has to be handled by actor listening to chain events -pub trait ChainNotify : Send + Sync { - /// fires when chain has new blocks. - fn new_blocks(&self, _new_blocks: NewBlocks) { - // does nothing by default - } +pub trait ChainNotify: Send + Sync { + /// fires when chain has new blocks. + fn new_blocks(&self, _new_blocks: NewBlocks) { + // does nothing by default + } - /// fires when chain achieves active mode - fn start(&self) { - // does nothing by default - } + /// fires when chain achieves active mode + fn start(&self) { + // does nothing by default + } - /// fires when chain achieves passive mode - fn stop(&self) { - // does nothing by default - } + /// fires when chain achieves passive mode + fn stop(&self) { + // does nothing by default + } - /// fires when chain broadcasts a message - fn broadcast(&self, _message_type: ChainMessageType) { - // does nothing by default - } + /// fires when chain broadcasts a message + fn broadcast(&self, _message_type: ChainMessageType) { + // does nothing by default + } - /// fires when new block is about to be imported - /// implementations should be light - fn block_pre_import(&self, _bytes: &Bytes, _hash: &H256, _difficulty: &U256) { - // does nothing by default - } + /// fires when new block is about to be imported + /// implementations should be light + fn block_pre_import(&self, _bytes: &Bytes, _hash: &H256, _difficulty: &U256) { + // does nothing by default + } - /// fires when new transactions are received from a peer - fn transactions_received(&self, - _txs: &[UnverifiedTransaction], - _peer_id: usize, - ) { - // does nothing by default - } + /// fires when new transactions are received from a peer + fn transactions_received(&self, _txs: &[UnverifiedTransaction], _peer_id: usize) { + // does nothing by default + } } diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 43cf42bbd..848960c3b 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -14,86 +14,92 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::cmp; -use std::collections::{HashSet, BTreeMap, VecDeque}; -use std::str::FromStr; -use std::str::from_utf8; -use std::convert::TryFrom; -use std::sync::atomic::{AtomicUsize, AtomicI64, AtomicBool, Ordering as AtomicOrdering}; -use std::sync::{Arc, Weak}; -use std::io::{BufReader, BufRead}; -use std::time::{Duration, Instant}; +use std::{ + cmp, + collections::{BTreeMap, HashSet, VecDeque}, + convert::TryFrom, + io::{BufRead, BufReader}, + str::{from_utf8, FromStr}, + sync::{ + atomic::{AtomicBool, AtomicI64, AtomicUsize, Ordering as AtomicOrdering}, + Arc, Weak, + }, + time::{Duration, Instant}, +}; -use blockchain::{BlockReceipts, BlockChain, BlockChainDB, BlockProvider, TreeRoute, ImportRoute, TransactionAddress, ExtrasInsert, BlockNumberKey}; -use bytes::Bytes; -use bytes::ToPretty; +use blockchain::{ + BlockChain, BlockChainDB, BlockNumberKey, BlockProvider, BlockReceipts, ExtrasInsert, + ImportRoute, TransactionAddress, TreeRoute, +}; +use bytes::{Bytes, ToPretty}; +use call_contract::CallContract; use error::Error; +use ethcore_miner::pool::VerifiedTransaction; use ethereum_types::{Address, H256, H264, U256}; use hash::keccak; -use call_contract::CallContract; -use ethcore_miner::pool::VerifiedTransaction; use itertools::Itertools; use kvdb::{DBTransaction, DBValue, KeyValueDB}; use parking_lot::{Mutex, RwLock}; use rand::OsRng; use rlp::PayloadInfo; use rustc_hex::FromHex; -use types::transaction::{self, LocalizedTransaction, UnverifiedTransaction, SignedTransaction, Action}; -use trie::{TrieSpec, TrieFactory, Trie}; -use types::ancestry_action::AncestryAction; -use types::encoded; -use types::filter::Filter; -use types::log_entry::LocalizedLogEntry; -use types::receipt::{Receipt, LocalizedReceipt}; -use types::{BlockNumber, header::{Header, ExtendedHeader}}; +use trie::{Trie, TrieFactory, TrieSpec}; +use types::{ + ancestry_action::AncestryAction, + data_format::DataFormat, + encoded, + filter::Filter, + header::{ExtendedHeader, Header}, + log_entry::LocalizedLogEntry, + receipt::{LocalizedReceipt, Receipt}, + transaction::{self, Action, LocalizedTransaction, SignedTransaction, UnverifiedTransaction}, + BlockNumber, +}; use vm::{EnvInfo, LastHashes}; -use types::data_format::DataFormat; -use block::{LockedBlock, Drain, ClosedBlock, OpenBlock, enact_verified, SealedBlock}; -use client::ancient_import::AncientVerifier; +use ansi_term::Colour; +use block::{enact_verified, ClosedBlock, Drain, LockedBlock, OpenBlock, SealedBlock}; +use call_contract::RegistryInfo; use client::{ - Nonce, Balance, ChainInfo, BlockInfo, TransactionInfo, - ReopenBlock, PrepareOpenBlock, ScheduleInfo, ImportSealedBlock, - BroadcastProposalBlock, ImportBlock, StateOrBlock, StateInfo, StateClient, Call, - AccountData, BlockChain as BlockChainTrait, BlockProducer, SealedBlockImporter, - ClientIoMessage, BlockChainReset, ImportExportBlocks + ancient_import::AncientVerifier, bad_blocks, traits::ForceUpdateSealing, AccountData, + BadBlocks, Balance, BlockChain as BlockChainTrait, BlockChainClient, BlockChainReset, BlockId, + BlockInfo, BlockProducer, BroadcastProposalBlock, Call, CallAnalytics, ChainInfo, + ChainMessageType, ChainNotify, ChainRoute, ClientConfig, ClientIoMessage, EngineInfo, + ImportBlock, ImportExportBlocks, ImportSealedBlock, IoClient, Mode, NewBlocks, Nonce, + PrepareOpenBlock, ProvingBlockChainClient, PruningInfo, ReopenBlock, ScheduleInfo, + SealedBlockImporter, StateClient, StateInfo, StateOrBlock, TraceFilter, TraceId, TransactionId, + TransactionInfo, UncleId, }; -use client::{ - BlockId, TransactionId, UncleId, TraceId, ClientConfig, BlockChainClient, - TraceFilter, CallAnalytics, Mode, - ChainNotify, NewBlocks, ChainRoute, PruningInfo, ProvingBlockChainClient, EngineInfo, ChainMessageType, - IoClient, BadBlocks, traits::ForceUpdateSealing +use engines::{ + epoch::PendingTransition, EngineError, EpochTransition, EthEngine, ForkChoice, MAX_UNCLE_AGE, }; -use client::bad_blocks; -use engines::{MAX_UNCLE_AGE, EthEngine, EpochTransition, ForkChoice, EngineError}; -use engines::epoch::PendingTransition; use error::{ - ImportErrorKind, ExecutionError, CallError, BlockError, ImportError, - QueueError, QueueErrorKind, Error as EthcoreError, EthcoreResult, ErrorKind as EthcoreErrorKind + BlockError, CallError, Error as EthcoreError, ErrorKind as EthcoreErrorKind, EthcoreResult, + ExecutionError, ImportError, ImportErrorKind, QueueError, QueueErrorKind, }; -use executive::{Executive, Executed, TransactOptions, contract_address}; +use executive::{contract_address, Executed, Executive, TransactOptions}; use factory::{Factories, VmFactory}; +use io::IoChannel; use miner::{Miner, MinerService}; use snapshot::{self, io as snapshot_io, SnapshotClient}; use spec::Spec; use state::{self, State}; use state_db::StateDB; -use trace::{self, TraceDB, ImportRequest as TraceImportRequest, LocalizedTrace, Database as TraceDatabase}; +use trace::{ + self, Database as TraceDatabase, ImportRequest as TraceImportRequest, LocalizedTrace, TraceDB, +}; use transaction_ext::Transaction; -use verification::queue::kind::BlockLike; -use verification::queue::kind::blocks::Unverified; -use verification::{PreverifiedBlock, Verifier, BlockQueue}; -use verification; -use ansi_term::Colour; -use call_contract::RegistryInfo; -use io::IoChannel; +use verification::{ + self, + queue::kind::{blocks::Unverified, BlockLike}, + BlockQueue, PreverifiedBlock, Verifier, +}; use vm::Schedule; // re-export -pub use types::blockchain_info::BlockChainInfo; -pub use types::block_status::BlockStatus; pub use blockchain::CacheSize as BlockChainCacheSize; +use db::{keys::BlockDetails, Readable, Writable}; +pub use types::{block_status::BlockStatus, blockchain_info::BlockChainInfo}; pub use verification::QueueInfo as BlockQueueInfo; -use db::{Writable, Readable, keys::BlockDetails}; use_contract!(registry, "res/contracts/registrar.json"); @@ -106,2406 +112,2758 @@ const MIN_HISTORY_SIZE: u64 = 8; /// Report on the status of a client. #[derive(Default, Clone, Debug, Eq, PartialEq)] pub struct ClientReport { - /// How many blocks have been imported so far. - pub blocks_imported: usize, - /// How many transactions have been applied so far. - pub transactions_applied: usize, - /// How much gas has been processed so far. - pub gas_processed: U256, - /// Memory used by state DB - pub state_db_mem: usize, + /// How many blocks have been imported so far. + pub blocks_imported: usize, + /// How many transactions have been applied so far. + pub transactions_applied: usize, + /// How much gas has been processed so far. + pub gas_processed: U256, + /// Memory used by state DB + pub state_db_mem: usize, } impl ClientReport { - /// Alter internal reporting to reflect the additional `block` has been processed. - pub fn accrue_block(&mut self, header: &Header, transactions: usize) { - self.blocks_imported += 1; - self.transactions_applied += transactions; - self.gas_processed = self.gas_processed + *header.gas_used(); - } + /// Alter internal reporting to reflect the additional `block` has been processed. + pub fn accrue_block(&mut self, header: &Header, transactions: usize) { + self.blocks_imported += 1; + self.transactions_applied += transactions; + self.gas_processed = self.gas_processed + *header.gas_used(); + } } impl<'a> ::std::ops::Sub<&'a ClientReport> for ClientReport { - type Output = Self; + type Output = Self; - fn sub(mut self, other: &'a ClientReport) -> Self { - let higher_mem = ::std::cmp::max(self.state_db_mem, other.state_db_mem); - let lower_mem = ::std::cmp::min(self.state_db_mem, other.state_db_mem); + fn sub(mut self, other: &'a ClientReport) -> Self { + let higher_mem = ::std::cmp::max(self.state_db_mem, other.state_db_mem); + let lower_mem = ::std::cmp::min(self.state_db_mem, other.state_db_mem); - self.blocks_imported -= other.blocks_imported; - self.transactions_applied -= other.transactions_applied; - self.gas_processed = self.gas_processed - other.gas_processed; - self.state_db_mem = higher_mem - lower_mem; + self.blocks_imported -= other.blocks_imported; + self.transactions_applied -= other.transactions_applied; + self.gas_processed = self.gas_processed - other.gas_processed; + self.state_db_mem = higher_mem - lower_mem; - self - } + self + } } struct SleepState { - last_activity: Option, - last_autosleep: Option, + last_activity: Option, + last_autosleep: Option, } impl SleepState { - fn new(awake: bool) -> Self { - SleepState { - last_activity: match awake { false => None, true => Some(Instant::now()) }, - last_autosleep: match awake { false => Some(Instant::now()), true => None }, - } - } + fn new(awake: bool) -> Self { + SleepState { + last_activity: match awake { + false => None, + true => Some(Instant::now()), + }, + last_autosleep: match awake { + false => Some(Instant::now()), + true => None, + }, + } + } } struct Importer { - /// Lock used during block import - pub import_lock: Mutex<()>, // FIXME Maybe wrap the whole `Importer` instead? + /// Lock used during block import + pub import_lock: Mutex<()>, // FIXME Maybe wrap the whole `Importer` instead? - /// Used to verify blocks - pub verifier: Box>, + /// Used to verify blocks + pub verifier: Box>, - /// Queue containing pending blocks - pub block_queue: BlockQueue, + /// Queue containing pending blocks + pub block_queue: BlockQueue, - /// Handles block sealing - pub miner: Arc, + /// Handles block sealing + pub miner: Arc, - /// Ancient block verifier: import an ancient sequence of blocks in order from a starting epoch - pub ancient_verifier: AncientVerifier, + /// Ancient block verifier: import an ancient sequence of blocks in order from a starting epoch + pub ancient_verifier: AncientVerifier, - /// Ethereum engine to be used during import - pub engine: Arc, + /// Ethereum engine to be used during import + pub engine: Arc, - /// A lru cache of recently detected bad blocks - pub bad_blocks: bad_blocks::BadBlocks, + /// A lru cache of recently detected bad blocks + pub bad_blocks: bad_blocks::BadBlocks, } /// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue. /// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue. pub struct Client { - /// Flag used to disable the client forever. Not to be confused with `liveness`. - /// - /// For example, auto-updater will disable client forever if there is a - /// hard fork registered on-chain that we don't have capability for. - /// When hard fork block rolls around, the client (if `update` is false) - /// knows it can't proceed further. - enabled: AtomicBool, + /// Flag used to disable the client forever. Not to be confused with `liveness`. + /// + /// For example, auto-updater will disable client forever if there is a + /// hard fork registered on-chain that we don't have capability for. + /// When hard fork block rolls around, the client (if `update` is false) + /// knows it can't proceed further. + enabled: AtomicBool, - /// Operating mode for the client - mode: Mutex, + /// Operating mode for the client + mode: Mutex, - chain: RwLock>, - tracedb: RwLock>, - engine: Arc, + chain: RwLock>, + tracedb: RwLock>, + engine: Arc, - /// Client configuration - config: ClientConfig, + /// Client configuration + config: ClientConfig, - /// Database pruning strategy to use for StateDB - pruning: journaldb::Algorithm, + /// Database pruning strategy to use for StateDB + pruning: journaldb::Algorithm, - /// Client uses this to store blocks, traces, etc. - db: RwLock>, + /// Client uses this to store blocks, traces, etc. + db: RwLock>, - state_db: RwLock, + state_db: RwLock, - /// Report on the status of client - report: RwLock, + /// Report on the status of client + report: RwLock, - sleep_state: Mutex, + sleep_state: Mutex, - /// Flag changed by `sleep` and `wake_up` methods. Not to be confused with `enabled`. - liveness: AtomicBool, - io_channel: RwLock>, + /// Flag changed by `sleep` and `wake_up` methods. Not to be confused with `enabled`. + liveness: AtomicBool, + io_channel: RwLock>, - /// List of actors to be notified on certain chain events - notify: RwLock>>, + /// List of actors to be notified on certain chain events + notify: RwLock>>, - /// Queued transactions from IO - queue_transactions: IoChannelQueue, - /// Ancient blocks import queue - queue_ancient_blocks: IoChannelQueue, - /// Queued ancient blocks, make sure they are imported in order. - queued_ancient_blocks: Arc, - VecDeque<(Unverified, Bytes)> - )>>, - ancient_blocks_import_lock: Arc>, - /// Consensus messages import queue - queue_consensus_message: IoChannelQueue, + /// Queued transactions from IO + queue_transactions: IoChannelQueue, + /// Ancient blocks import queue + queue_ancient_blocks: IoChannelQueue, + /// Queued ancient blocks, make sure they are imported in order. + queued_ancient_blocks: Arc, VecDeque<(Unverified, Bytes)>)>>, + ancient_blocks_import_lock: Arc>, + /// Consensus messages import queue + queue_consensus_message: IoChannelQueue, - last_hashes: RwLock>, - factories: Factories, + last_hashes: RwLock>, + factories: Factories, - /// Number of eras kept in a journal before they are pruned - history: u64, + /// Number of eras kept in a journal before they are pruned + history: u64, - /// An action to be done if a mode/spec_name change happens - on_user_defaults_change: Mutex) + 'static + Send>>>, + /// An action to be done if a mode/spec_name change happens + on_user_defaults_change: Mutex) + 'static + Send>>>, - registrar_address: Option
, + registrar_address: Option
, - /// A closure to call when we want to restart the client - exit_handler: Mutex>>, + /// A closure to call when we want to restart the client + exit_handler: Mutex>>, - importer: Importer, + importer: Importer, } impl Importer { - pub fn new( - config: &ClientConfig, - engine: Arc, - message_channel: IoChannel, - miner: Arc, - ) -> Result { - let block_queue = BlockQueue::new( - config.queue.clone(), - engine.clone(), - message_channel.clone(), - config.verifier_type.verifying_seal() - ); - - Ok(Importer { - import_lock: Mutex::new(()), - verifier: verification::new(config.verifier_type.clone()), - block_queue, - miner, - ancient_verifier: AncientVerifier::new(engine.clone()), - engine, - bad_blocks: Default::default(), - }) - } - - /// This is triggered by a message coming from a block queue when the block is ready for insertion - pub fn import_verified_blocks(&self, client: &Client) -> usize { - // Shortcut out if we know we're incapable of syncing the chain. - if !client.enabled.load(AtomicOrdering::Relaxed) { - return 0; - } - - let max_blocks_to_import = client.config.max_round_blocks_to_import; - let (imported_blocks, import_results, invalid_blocks, imported, proposed_blocks, duration, has_more_blocks_to_import) = { - let mut imported_blocks = Vec::with_capacity(max_blocks_to_import); - let mut invalid_blocks = HashSet::new(); - let mut proposed_blocks = Vec::with_capacity(max_blocks_to_import); - let mut import_results = Vec::with_capacity(max_blocks_to_import); - - let _import_lock = self.import_lock.lock(); - let blocks = self.block_queue.drain(max_blocks_to_import); - if blocks.is_empty() { - return 0; - } - trace_time!("import_verified_blocks"); - let start = Instant::now(); - - for block in blocks { - let header = block.header.clone(); - let bytes = block.bytes.clone(); - let hash = header.hash(); - - let is_invalid = invalid_blocks.contains(header.parent_hash()); - if is_invalid { - invalid_blocks.insert(hash); - continue; - } - - match self.check_and_lock_block(&bytes, block, client) { - Ok((closed_block, pending)) => { - imported_blocks.push(hash); - let transactions_len = closed_block.transactions.len(); - let route = self.commit_block(closed_block, &header, encoded::Block::new(bytes), pending, client); - import_results.push(route); - client.report.write().accrue_block(&header, transactions_len); - }, - Err(err) => { - self.bad_blocks.report(bytes, format!("{:?}", err)); - invalid_blocks.insert(hash); - }, - } - } - - let imported = imported_blocks.len(); - let invalid_blocks = invalid_blocks.into_iter().collect::>(); - - if !invalid_blocks.is_empty() { - self.block_queue.mark_as_bad(&invalid_blocks); - } - let has_more_blocks_to_import = !self.block_queue.mark_as_good(&imported_blocks); - (imported_blocks, import_results, invalid_blocks, imported, proposed_blocks, start.elapsed(), has_more_blocks_to_import) - }; - - { - if !imported_blocks.is_empty() { - let route = ChainRoute::from(import_results.as_ref()); - - if !has_more_blocks_to_import { - self.miner.chain_new_blocks(client, &imported_blocks, &invalid_blocks, route.enacted(), route.retracted(), false); - } - - client.notify(|notify| { - notify.new_blocks( - NewBlocks::new( - imported_blocks.clone(), - invalid_blocks.clone(), - route.clone(), - Vec::new(), - proposed_blocks.clone(), - duration, - has_more_blocks_to_import, - ) - ); - }); - } - } - - let db = client.db.read(); - db.key_value().flush().expect("DB flush failed."); - imported - } - - fn check_and_lock_block(&self, bytes: &[u8], block: PreverifiedBlock, client: &Client) -> EthcoreResult<(LockedBlock, Option)> { - let engine = &*self.engine; - let header = block.header.clone(); - - // Check the block isn't so old we won't be able to enact it. - let best_block_number = client.chain.read().best_block_number(); - if client.pruning_info().earliest_state > header.number() { - warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number); - bail!("Block is ancient"); - } - - // Check if parent is in chain - let parent = match client.block_header_decoded(BlockId::Hash(*header.parent_hash())) { - Some(h) => h, - None => { - warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash()); - bail!("Parent not found"); - } - }; - - let chain = client.chain.read(); - // Verify Block Family - let verify_family_result = self.verifier.verify_block_family( - &header, - &parent, - engine, - Some(verification::FullFamilyParams { - block: &block, - block_provider: &**chain, - client - }), - ); - - if let Err(e) = verify_family_result { - warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); - bail!(e); - }; - - let verify_external_result = self.verifier.verify_block_external(&header, engine); - if let Err(e) = verify_external_result { - warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); - bail!(e); - }; - - // Enact Verified Block - let last_hashes = client.build_last_hashes(header.parent_hash()); - let db = client.state_db.read().boxed_clone_canon(header.parent_hash()); - - let is_epoch_begin = chain.epoch_transition(parent.number(), *header.parent_hash()).is_some(); - - let enact_result = enact_verified( - block, - engine, - client.tracedb.read().tracing_enabled(), - db, - &parent, - last_hashes, - client.factories.clone(), - is_epoch_begin, - &mut chain.ancestry_with_metadata_iter(*header.parent_hash()), - ); - - let mut locked_block = match enact_result { - Ok(b) => b, - Err(e) => { - warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); - bail!(e); - } - }; - - // Strip receipts for blocks before validate_receipts_transition, - // if the expected receipts root header does not match. - // (i.e. allow inconsistency in receipts outcome before the transition block) - if header.number() < engine.params().validate_receipts_transition - && header.receipts_root() != locked_block.header.receipts_root() - { - locked_block.strip_receipts_outcomes(); - } - - // Final Verification - if let Err(e) = self.verifier.verify_block_final(&header, &locked_block.header) { - warn!(target: "client", "Stage 5 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); - bail!(e); - } - - let pending = self.check_epoch_end_signal( - &header, - bytes, - &locked_block.receipts, - locked_block.state.db(), - client - )?; - - Ok((locked_block, pending)) - } - - /// Import a block with transaction receipts. - /// - /// The block is guaranteed to be the next best blocks in the - /// first block sequence. Does no sealing or transaction validation. - fn import_old_block(&self, unverified: Unverified, receipts_bytes: &[u8], db: &KeyValueDB, chain: &BlockChain) -> EthcoreResult<()> { - let receipts = ::rlp::decode_list(receipts_bytes); - let _import_lock = self.import_lock.lock(); - - { - trace_time!("import_old_block"); - // verify the block, passing the chain for updating the epoch verifier. - let mut rng = OsRng::new()?; - self.ancient_verifier.verify(&mut rng, &unverified.header, &chain)?; - - // Commit results - let mut batch = DBTransaction::new(); - chain.insert_unordered_block(&mut batch, encoded::Block::new(unverified.bytes), receipts, None, false, true); - // Final commit to the DB - db.write_buffered(batch); - chain.commit(); - } - db.flush().expect("DB flush failed."); - Ok(()) - } - - // NOTE: the header of the block passed here is not necessarily sealed, as - // it is for reconstructing the state transition. - // - // The header passed is from the original block data and is sealed. - // TODO: should return an error if ImportRoute is none, issue #9910 - fn commit_block( - &self, - block: B, - header: &Header, - block_data: encoded::Block, - pending: Option, - client: &Client - ) -> ImportRoute - where B: Drain - { - let hash = &header.hash(); - let number = header.number(); - let parent = header.parent_hash(); - let chain = client.chain.read(); - let mut is_finalized = false; - - // Commit results - let block = block.drain(); - debug_assert_eq!(header.hash(), block_data.header_view().hash()); - - let mut batch = DBTransaction::new(); - - let ancestry_actions = self.engine.ancestry_actions(&header, &mut chain.ancestry_with_metadata_iter(*parent)); - - let receipts = block.receipts; - let traces = block.traces.drain(); - let best_hash = chain.best_block_hash(); - - let new = ExtendedHeader { - header: header.clone(), - is_finalized, - parent_total_difficulty: chain.block_details(&parent).expect("Parent block is in the database; qed").total_difficulty - }; - - let best = { - let hash = best_hash; - let header = chain.block_header_data(&hash) - .expect("Best block is in the database; qed") - .decode() - .expect("Stored block header is valid RLP; qed"); - let details = chain.block_details(&hash) - .expect("Best block is in the database; qed"); - - ExtendedHeader { - parent_total_difficulty: details.total_difficulty - *header.difficulty(), - is_finalized: details.is_finalized, - header: header, - } - }; - - let route = chain.tree_route(best_hash, *parent).expect("forks are only kept when it has common ancestors; tree route from best to prospective's parent always exists; qed"); - let fork_choice = if route.is_from_route_finalized { - ForkChoice::Old - } else { - self.engine.fork_choice(&new, &best) - }; - - // CHECK! I *think* this is fine, even if the state_root is equal to another - // already-imported block of the same number. - // TODO: Prove it with a test. - let mut state = block.state.drop().1; - - // check epoch end signal, potentially generating a proof on the current - // state. - if let Some(pending) = pending { - chain.insert_pending_transition(&mut batch, header.hash(), pending); - } - - state.journal_under(&mut batch, number, hash).expect("DB commit failed"); - - let finalized: Vec<_> = ancestry_actions.into_iter().map(|ancestry_action| { - let AncestryAction::MarkFinalized(a) = ancestry_action; - - if a != header.hash() { - chain.mark_finalized(&mut batch, a).expect("Engine's ancestry action must be known blocks; qed"); - } else { - // we're finalizing the current block - is_finalized = true; - } - - a - }).collect(); - - let route = chain.insert_block(&mut batch, block_data, receipts.clone(), ExtrasInsert { - fork_choice: fork_choice, - is_finalized, - }); - - client.tracedb.read().import(&mut batch, TraceImportRequest { - traces: traces.into(), - block_hash: hash.clone(), - block_number: number, - enacted: route.enacted.clone(), - retracted: route.retracted.len() - }); - - let is_canon = route.enacted.last().map_or(false, |h| h == hash); - state.sync_cache(&route.enacted, &route.retracted, is_canon); - // Final commit to the DB - client.db.read().key_value().write_buffered(batch); - chain.commit(); - - self.check_epoch_end(&header, &finalized, &chain, client); - - client.update_last_hashes(&parent, hash); - - if let Err(e) = client.prune_ancient(state, &chain) { - warn!("Failed to prune ancient state data: {}", e); - } - - route - } - - // check for epoch end signal and write pending transition if it occurs. - // state for the given block must be available. - fn check_epoch_end_signal( - &self, - header: &Header, - block_bytes: &[u8], - receipts: &[Receipt], - state_db: &StateDB, - client: &Client, - ) -> EthcoreResult> { - use engines::EpochChange; - - let hash = header.hash(); - let auxiliary = ::machine::AuxiliaryData { - bytes: Some(block_bytes), - receipts: Some(&receipts), - }; - - match self.engine.signals_epoch_end(header, auxiliary) { - EpochChange::Yes(proof) => { - use engines::Proof; - - let proof = match proof { - Proof::Known(proof) => proof, - Proof::WithState(with_state) => { - let env_info = EnvInfo { - number: header.number(), - author: header.author().clone(), - timestamp: header.timestamp(), - difficulty: header.difficulty().clone(), - last_hashes: client.build_last_hashes(header.parent_hash()), - gas_used: U256::default(), - gas_limit: u64::max_value().into(), - }; - - let call = move |addr, data| { - let mut state_db = state_db.boxed_clone(); - let backend = ::state::backend::Proving::new(state_db.as_hash_db_mut()); - - let transaction = - client.contract_call_tx(BlockId::Hash(*header.parent_hash()), addr, data); - - let mut state = State::from_existing( - backend, - header.state_root().clone(), - self.engine.account_start_nonce(header.number()), - client.factories.clone(), - ).expect("state known to be available for just-imported block; qed"); - - let options = TransactOptions::with_no_tracing().dont_check_nonce(); - let machine = self.engine.machine(); - let schedule = machine.schedule(env_info.number); - let res = Executive::new(&mut state, &env_info, &machine, &schedule) - .transact(&transaction, options); - - let res = match res { - Err(e) => { - trace!(target: "client", "Proved call failed: {}", e); - Err(e.to_string()) - } - Ok(res) => Ok((res.output, state.drop().1.extract_proof())), - }; - - res.map(|(output, proof)| (output, proof.into_iter().map(|x| x.into_vec()).collect())) - }; - - match with_state.generate_proof(&call) { - Ok(proof) => proof, - Err(e) => { - warn!(target: "client", "Failed to generate transition proof for block {}: {}", hash, e); - warn!(target: "client", "Snapshots produced by this client may be incomplete"); - return Err(EngineError::FailedSystemCall(e).into()) - } - } - } - }; - - debug!(target: "client", "Block {} signals epoch end.", hash); - - Ok(Some(PendingTransition { proof: proof })) - }, - EpochChange::No => Ok(None), - EpochChange::Unsure(_) => { - warn!(target: "client", "Detected invalid engine implementation."); - warn!(target: "client", "Engine claims to require more block data, but everything provided."); - Err(EngineError::InvalidEngine.into()) - } - } - } - - // check for ending of epoch and write transition if it occurs. - fn check_epoch_end<'a>(&self, header: &'a Header, finalized: &'a [H256], chain: &BlockChain, client: &Client) { - let is_epoch_end = self.engine.is_epoch_end( - header, - finalized, - &(|hash| client.block_header_decoded(BlockId::Hash(hash))), - &(|hash| chain.get_pending_transition(hash)), // TODO: limit to current epoch. - ); - - if let Some(proof) = is_epoch_end { - debug!(target: "client", "Epoch transition at block {}", header.hash()); - - let mut batch = DBTransaction::new(); - chain.insert_epoch_transition(&mut batch, header.number(), EpochTransition { - block_hash: header.hash(), - block_number: header.number(), - proof: proof, - }); - - // always write the batch directly since epoch transition proofs are - // fetched from a DB iterator and DB iterators are only available on - // flushed data. - client.db.read().key_value().write(batch).expect("DB flush failed"); - } - } + pub fn new( + config: &ClientConfig, + engine: Arc, + message_channel: IoChannel, + miner: Arc, + ) -> Result { + let block_queue = BlockQueue::new( + config.queue.clone(), + engine.clone(), + message_channel.clone(), + config.verifier_type.verifying_seal(), + ); + + Ok(Importer { + import_lock: Mutex::new(()), + verifier: verification::new(config.verifier_type.clone()), + block_queue, + miner, + ancient_verifier: AncientVerifier::new(engine.clone()), + engine, + bad_blocks: Default::default(), + }) + } + + /// This is triggered by a message coming from a block queue when the block is ready for insertion + pub fn import_verified_blocks(&self, client: &Client) -> usize { + // Shortcut out if we know we're incapable of syncing the chain. + if !client.enabled.load(AtomicOrdering::Relaxed) { + return 0; + } + + let max_blocks_to_import = client.config.max_round_blocks_to_import; + let ( + imported_blocks, + import_results, + invalid_blocks, + imported, + proposed_blocks, + duration, + has_more_blocks_to_import, + ) = { + let mut imported_blocks = Vec::with_capacity(max_blocks_to_import); + let mut invalid_blocks = HashSet::new(); + let mut proposed_blocks = Vec::with_capacity(max_blocks_to_import); + let mut import_results = Vec::with_capacity(max_blocks_to_import); + + let _import_lock = self.import_lock.lock(); + let blocks = self.block_queue.drain(max_blocks_to_import); + if blocks.is_empty() { + return 0; + } + trace_time!("import_verified_blocks"); + let start = Instant::now(); + + for block in blocks { + let header = block.header.clone(); + let bytes = block.bytes.clone(); + let hash = header.hash(); + + let is_invalid = invalid_blocks.contains(header.parent_hash()); + if is_invalid { + invalid_blocks.insert(hash); + continue; + } + + match self.check_and_lock_block(&bytes, block, client) { + Ok((closed_block, pending)) => { + imported_blocks.push(hash); + let transactions_len = closed_block.transactions.len(); + let route = self.commit_block( + closed_block, + &header, + encoded::Block::new(bytes), + pending, + client, + ); + import_results.push(route); + client + .report + .write() + .accrue_block(&header, transactions_len); + } + Err(err) => { + self.bad_blocks.report(bytes, format!("{:?}", err)); + invalid_blocks.insert(hash); + } + } + } + + let imported = imported_blocks.len(); + let invalid_blocks = invalid_blocks.into_iter().collect::>(); + + if !invalid_blocks.is_empty() { + self.block_queue.mark_as_bad(&invalid_blocks); + } + let has_more_blocks_to_import = !self.block_queue.mark_as_good(&imported_blocks); + ( + imported_blocks, + import_results, + invalid_blocks, + imported, + proposed_blocks, + start.elapsed(), + has_more_blocks_to_import, + ) + }; + + { + if !imported_blocks.is_empty() { + let route = ChainRoute::from(import_results.as_ref()); + + if !has_more_blocks_to_import { + self.miner.chain_new_blocks( + client, + &imported_blocks, + &invalid_blocks, + route.enacted(), + route.retracted(), + false, + ); + } + + client.notify(|notify| { + notify.new_blocks(NewBlocks::new( + imported_blocks.clone(), + invalid_blocks.clone(), + route.clone(), + Vec::new(), + proposed_blocks.clone(), + duration, + has_more_blocks_to_import, + )); + }); + } + } + + let db = client.db.read(); + db.key_value().flush().expect("DB flush failed."); + imported + } + + fn check_and_lock_block( + &self, + bytes: &[u8], + block: PreverifiedBlock, + client: &Client, + ) -> EthcoreResult<(LockedBlock, Option)> { + let engine = &*self.engine; + let header = block.header.clone(); + + // Check the block isn't so old we won't be able to enact it. + let best_block_number = client.chain.read().best_block_number(); + if client.pruning_info().earliest_state > header.number() { + warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number); + bail!("Block is ancient"); + } + + // Check if parent is in chain + let parent = match client.block_header_decoded(BlockId::Hash(*header.parent_hash())) { + Some(h) => h, + None => { + warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash()); + bail!("Parent not found"); + } + }; + + let chain = client.chain.read(); + // Verify Block Family + let verify_family_result = self.verifier.verify_block_family( + &header, + &parent, + engine, + Some(verification::FullFamilyParams { + block: &block, + block_provider: &**chain, + client, + }), + ); + + if let Err(e) = verify_family_result { + warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); + bail!(e); + }; + + let verify_external_result = self.verifier.verify_block_external(&header, engine); + if let Err(e) = verify_external_result { + warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); + bail!(e); + }; + + // Enact Verified Block + let last_hashes = client.build_last_hashes(header.parent_hash()); + let db = client + .state_db + .read() + .boxed_clone_canon(header.parent_hash()); + + let is_epoch_begin = chain + .epoch_transition(parent.number(), *header.parent_hash()) + .is_some(); + + let enact_result = enact_verified( + block, + engine, + client.tracedb.read().tracing_enabled(), + db, + &parent, + last_hashes, + client.factories.clone(), + is_epoch_begin, + &mut chain.ancestry_with_metadata_iter(*header.parent_hash()), + ); + + let mut locked_block = match enact_result { + Ok(b) => b, + Err(e) => { + warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); + bail!(e); + } + }; + + // Strip receipts for blocks before validate_receipts_transition, + // if the expected receipts root header does not match. + // (i.e. allow inconsistency in receipts outcome before the transition block) + if header.number() < engine.params().validate_receipts_transition + && header.receipts_root() != locked_block.header.receipts_root() + { + locked_block.strip_receipts_outcomes(); + } + + // Final Verification + if let Err(e) = self + .verifier + .verify_block_final(&header, &locked_block.header) + { + warn!(target: "client", "Stage 5 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); + bail!(e); + } + + let pending = self.check_epoch_end_signal( + &header, + bytes, + &locked_block.receipts, + locked_block.state.db(), + client, + )?; + + Ok((locked_block, pending)) + } + + /// Import a block with transaction receipts. + /// + /// The block is guaranteed to be the next best blocks in the + /// first block sequence. Does no sealing or transaction validation. + fn import_old_block( + &self, + unverified: Unverified, + receipts_bytes: &[u8], + db: &KeyValueDB, + chain: &BlockChain, + ) -> EthcoreResult<()> { + let receipts = ::rlp::decode_list(receipts_bytes); + let _import_lock = self.import_lock.lock(); + + { + trace_time!("import_old_block"); + // verify the block, passing the chain for updating the epoch verifier. + let mut rng = OsRng::new()?; + self.ancient_verifier + .verify(&mut rng, &unverified.header, &chain)?; + + // Commit results + let mut batch = DBTransaction::new(); + chain.insert_unordered_block( + &mut batch, + encoded::Block::new(unverified.bytes), + receipts, + None, + false, + true, + ); + // Final commit to the DB + db.write_buffered(batch); + chain.commit(); + } + db.flush().expect("DB flush failed."); + Ok(()) + } + + // NOTE: the header of the block passed here is not necessarily sealed, as + // it is for reconstructing the state transition. + // + // The header passed is from the original block data and is sealed. + // TODO: should return an error if ImportRoute is none, issue #9910 + fn commit_block( + &self, + block: B, + header: &Header, + block_data: encoded::Block, + pending: Option, + client: &Client, + ) -> ImportRoute + where + B: Drain, + { + let hash = &header.hash(); + let number = header.number(); + let parent = header.parent_hash(); + let chain = client.chain.read(); + let mut is_finalized = false; + + // Commit results + let block = block.drain(); + debug_assert_eq!(header.hash(), block_data.header_view().hash()); + + let mut batch = DBTransaction::new(); + + let ancestry_actions = self + .engine + .ancestry_actions(&header, &mut chain.ancestry_with_metadata_iter(*parent)); + + let receipts = block.receipts; + let traces = block.traces.drain(); + let best_hash = chain.best_block_hash(); + + let new = ExtendedHeader { + header: header.clone(), + is_finalized, + parent_total_difficulty: chain + .block_details(&parent) + .expect("Parent block is in the database; qed") + .total_difficulty, + }; + + let best = { + let hash = best_hash; + let header = chain + .block_header_data(&hash) + .expect("Best block is in the database; qed") + .decode() + .expect("Stored block header is valid RLP; qed"); + let details = chain + .block_details(&hash) + .expect("Best block is in the database; qed"); + + ExtendedHeader { + parent_total_difficulty: details.total_difficulty - *header.difficulty(), + is_finalized: details.is_finalized, + header: header, + } + }; + + let route = chain.tree_route(best_hash, *parent).expect("forks are only kept when it has common ancestors; tree route from best to prospective's parent always exists; qed"); + let fork_choice = if route.is_from_route_finalized { + ForkChoice::Old + } else { + self.engine.fork_choice(&new, &best) + }; + + // CHECK! I *think* this is fine, even if the state_root is equal to another + // already-imported block of the same number. + // TODO: Prove it with a test. + let mut state = block.state.drop().1; + + // check epoch end signal, potentially generating a proof on the current + // state. + if let Some(pending) = pending { + chain.insert_pending_transition(&mut batch, header.hash(), pending); + } + + state + .journal_under(&mut batch, number, hash) + .expect("DB commit failed"); + + let finalized: Vec<_> = ancestry_actions + .into_iter() + .map(|ancestry_action| { + let AncestryAction::MarkFinalized(a) = ancestry_action; + + if a != header.hash() { + chain + .mark_finalized(&mut batch, a) + .expect("Engine's ancestry action must be known blocks; qed"); + } else { + // we're finalizing the current block + is_finalized = true; + } + + a + }) + .collect(); + + let route = chain.insert_block( + &mut batch, + block_data, + receipts.clone(), + ExtrasInsert { + fork_choice: fork_choice, + is_finalized, + }, + ); + + client.tracedb.read().import( + &mut batch, + TraceImportRequest { + traces: traces.into(), + block_hash: hash.clone(), + block_number: number, + enacted: route.enacted.clone(), + retracted: route.retracted.len(), + }, + ); + + let is_canon = route.enacted.last().map_or(false, |h| h == hash); + state.sync_cache(&route.enacted, &route.retracted, is_canon); + // Final commit to the DB + client.db.read().key_value().write_buffered(batch); + chain.commit(); + + self.check_epoch_end(&header, &finalized, &chain, client); + + client.update_last_hashes(&parent, hash); + + if let Err(e) = client.prune_ancient(state, &chain) { + warn!("Failed to prune ancient state data: {}", e); + } + + route + } + + // check for epoch end signal and write pending transition if it occurs. + // state for the given block must be available. + fn check_epoch_end_signal( + &self, + header: &Header, + block_bytes: &[u8], + receipts: &[Receipt], + state_db: &StateDB, + client: &Client, + ) -> EthcoreResult> { + use engines::EpochChange; + + let hash = header.hash(); + let auxiliary = ::machine::AuxiliaryData { + bytes: Some(block_bytes), + receipts: Some(&receipts), + }; + + match self.engine.signals_epoch_end(header, auxiliary) { + EpochChange::Yes(proof) => { + use engines::Proof; + + let proof = match proof { + Proof::Known(proof) => proof, + Proof::WithState(with_state) => { + let env_info = EnvInfo { + number: header.number(), + author: header.author().clone(), + timestamp: header.timestamp(), + difficulty: header.difficulty().clone(), + last_hashes: client.build_last_hashes(header.parent_hash()), + gas_used: U256::default(), + gas_limit: u64::max_value().into(), + }; + + let call = move |addr, data| { + let mut state_db = state_db.boxed_clone(); + let backend = ::state::backend::Proving::new(state_db.as_hash_db_mut()); + + let transaction = client.contract_call_tx( + BlockId::Hash(*header.parent_hash()), + addr, + data, + ); + + let mut state = State::from_existing( + backend, + header.state_root().clone(), + self.engine.account_start_nonce(header.number()), + client.factories.clone(), + ) + .expect("state known to be available for just-imported block; qed"); + + let options = TransactOptions::with_no_tracing().dont_check_nonce(); + let machine = self.engine.machine(); + let schedule = machine.schedule(env_info.number); + let res = Executive::new(&mut state, &env_info, &machine, &schedule) + .transact(&transaction, options); + + let res = match res { + Err(e) => { + trace!(target: "client", "Proved call failed: {}", e); + Err(e.to_string()) + } + Ok(res) => Ok((res.output, state.drop().1.extract_proof())), + }; + + res.map(|(output, proof)| { + (output, proof.into_iter().map(|x| x.into_vec()).collect()) + }) + }; + + match with_state.generate_proof(&call) { + Ok(proof) => proof, + Err(e) => { + warn!(target: "client", "Failed to generate transition proof for block {}: {}", hash, e); + warn!(target: "client", "Snapshots produced by this client may be incomplete"); + return Err(EngineError::FailedSystemCall(e).into()); + } + } + } + }; + + debug!(target: "client", "Block {} signals epoch end.", hash); + + Ok(Some(PendingTransition { proof: proof })) + } + EpochChange::No => Ok(None), + EpochChange::Unsure(_) => { + warn!(target: "client", "Detected invalid engine implementation."); + warn!(target: "client", "Engine claims to require more block data, but everything provided."); + Err(EngineError::InvalidEngine.into()) + } + } + } + + // check for ending of epoch and write transition if it occurs. + fn check_epoch_end<'a>( + &self, + header: &'a Header, + finalized: &'a [H256], + chain: &BlockChain, + client: &Client, + ) { + let is_epoch_end = self.engine.is_epoch_end( + header, + finalized, + &(|hash| client.block_header_decoded(BlockId::Hash(hash))), + &(|hash| chain.get_pending_transition(hash)), // TODO: limit to current epoch. + ); + + if let Some(proof) = is_epoch_end { + debug!(target: "client", "Epoch transition at block {}", header.hash()); + + let mut batch = DBTransaction::new(); + chain.insert_epoch_transition( + &mut batch, + header.number(), + EpochTransition { + block_hash: header.hash(), + block_number: header.number(), + proof: proof, + }, + ); + + // always write the batch directly since epoch transition proofs are + // fetched from a DB iterator and DB iterators are only available on + // flushed data. + client + .db + .read() + .key_value() + .write(batch) + .expect("DB flush failed"); + } + } } impl Client { - /// Create a new client with given parameters. - /// The database is assumed to have been initialized with the correct columns. - pub fn new( - config: ClientConfig, - spec: &Spec, - db: Arc, - miner: Arc, - message_channel: IoChannel, - ) -> Result, ::error::Error> { - let trie_spec = match config.fat_db { - true => TrieSpec::Fat, - false => TrieSpec::Secure, - }; + /// Create a new client with given parameters. + /// The database is assumed to have been initialized with the correct columns. + pub fn new( + config: ClientConfig, + spec: &Spec, + db: Arc, + miner: Arc, + message_channel: IoChannel, + ) -> Result, ::error::Error> { + let trie_spec = match config.fat_db { + true => TrieSpec::Fat, + false => TrieSpec::Secure, + }; - let trie_factory = TrieFactory::new(trie_spec); - let factories = Factories { - vm: VmFactory::new(config.vm_type.clone(), config.jump_table_size), - trie: trie_factory, - accountdb: Default::default(), - }; + let trie_factory = TrieFactory::new(trie_spec); + let factories = Factories { + vm: VmFactory::new(config.vm_type.clone(), config.jump_table_size), + trie: trie_factory, + accountdb: Default::default(), + }; - let journal_db = journaldb::new(db.key_value().clone(), config.pruning, ::db::COL_STATE); - let mut state_db = StateDB::new(journal_db, config.state_cache_size); - if state_db.journal_db().is_empty() { - // Sets the correct state root. - state_db = spec.ensure_db_good(state_db, &factories)?; - let mut batch = DBTransaction::new(); - state_db.journal_under(&mut batch, 0, &spec.genesis_header().hash())?; - db.key_value().write(batch)?; - } + let journal_db = journaldb::new(db.key_value().clone(), config.pruning, ::db::COL_STATE); + let mut state_db = StateDB::new(journal_db, config.state_cache_size); + if state_db.journal_db().is_empty() { + // Sets the correct state root. + state_db = spec.ensure_db_good(state_db, &factories)?; + let mut batch = DBTransaction::new(); + state_db.journal_under(&mut batch, 0, &spec.genesis_header().hash())?; + db.key_value().write(batch)?; + } - let gb = spec.genesis_block(); - let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone())); - let tracedb = RwLock::new(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone())); + let gb = spec.genesis_block(); + let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone())); + let tracedb = RwLock::new(TraceDB::new( + config.tracing.clone(), + db.clone(), + chain.clone(), + )); - trace!("Cleanup journal: DB Earliest = {:?}, Latest = {:?}", state_db.journal_db().earliest_era(), state_db.journal_db().latest_era()); + trace!( + "Cleanup journal: DB Earliest = {:?}, Latest = {:?}", + state_db.journal_db().earliest_era(), + state_db.journal_db().latest_era() + ); - let history = if config.history < MIN_HISTORY_SIZE { - info!(target: "client", "Ignoring pruning history parameter of {}\ + let history = if config.history < MIN_HISTORY_SIZE { + info!(target: "client", "Ignoring pruning history parameter of {}\ , falling back to minimum of {}", config.history, MIN_HISTORY_SIZE); - MIN_HISTORY_SIZE - } else { - config.history - }; - - if !chain.block_header_data(&chain.best_block_hash()).map_or(true, |h| state_db.journal_db().contains(&h.state_root())) { - warn!("State root not found for block #{} ({:x})", chain.best_block_number(), chain.best_block_hash()); - } - - let engine = spec.engine.clone(); - - let awake = match config.mode { Mode::Dark(..) | Mode::Off => false, _ => true }; - - let importer = Importer::new(&config, engine.clone(), message_channel.clone(), miner)?; - - let registrar_address = engine.additional_params().get("registrar").and_then(|s| Address::from_str(s).ok()); - if let Some(ref addr) = registrar_address { - trace!(target: "client", "Found registrar at {}", addr); - } - - let client = Arc::new(Client { - enabled: AtomicBool::new(true), - sleep_state: Mutex::new(SleepState::new(awake)), - liveness: AtomicBool::new(awake), - mode: Mutex::new(config.mode.clone()), - chain: RwLock::new(chain), - tracedb, - engine, - pruning: config.pruning.clone(), - db: RwLock::new(db.clone()), - state_db: RwLock::new(state_db), - report: RwLock::new(Default::default()), - io_channel: RwLock::new(message_channel), - notify: RwLock::new(Vec::new()), - queue_transactions: IoChannelQueue::new(config.transaction_verification_queue_size), - queue_ancient_blocks: IoChannelQueue::new(MAX_ANCIENT_BLOCKS_QUEUE_SIZE), - queued_ancient_blocks: Default::default(), - ancient_blocks_import_lock: Default::default(), - queue_consensus_message: IoChannelQueue::new(usize::max_value()), - last_hashes: RwLock::new(VecDeque::new()), - factories, - history, - on_user_defaults_change: Mutex::new(None), - registrar_address, - exit_handler: Mutex::new(None), - importer, - config, - }); - - // prune old states. - { - let state_db = client.state_db.read().boxed_clone(); - let chain = client.chain.read(); - client.prune_ancient(state_db, &chain)?; - } - - // ensure genesis epoch proof in the DB. - { - let chain = client.chain.read(); - let gh = spec.genesis_header(); - if chain.epoch_transition(0, gh.hash()).is_none() { - trace!(target: "client", "No genesis transition found."); - - let proof = client.with_proving_caller( - BlockId::Number(0), - |call| client.engine.genesis_epoch_data(&gh, call) - ); - let proof = match proof { - Ok(proof) => proof, - Err(e) => { - warn!(target: "client", "Error generating genesis epoch data: {}. Snapshots generated may not be complete.", e); - Vec::new() - } - }; - - debug!(target: "client", "Obtained genesis transition proof: {:?}", proof); - - let mut batch = DBTransaction::new(); - chain.insert_epoch_transition(&mut batch, 0, EpochTransition { - block_hash: gh.hash(), - block_number: 0, - proof: proof, - }); - - client.db.read().key_value().write_buffered(batch); - } - } - - // ensure buffered changes are flushed. - client.db.read().key_value().flush()?; - Ok(client) - } - - /// Wakes up client if it's a sleep. - pub fn keep_alive(&self) { - let should_wake = match *self.mode.lock() { - Mode::Dark(..) | Mode::Passive(..) => true, - _ => false, - }; - if should_wake { - self.wake_up(); - (*self.sleep_state.lock()).last_activity = Some(Instant::now()); - } - } - - /// Adds an actor to be notified on certain events - pub fn add_notify(&self, target: Arc) { - self.notify.write().push(Arc::downgrade(&target)); - } - - /// Set a closure to call when the client wants to be restarted. - /// - /// The parameter passed to the callback is the name of the new chain spec to use after - /// the restart. - pub fn set_exit_handler(&self, f: F) where F: Fn(String) + 'static + Send { - *self.exit_handler.lock() = Some(Box::new(f)); - } - - /// Returns engine reference. - pub fn engine(&self) -> &EthEngine { - &*self.engine - } - - fn notify(&self, f: F) where F: Fn(&ChainNotify) { - for np in &*self.notify.read() { - if let Some(n) = np.upgrade() { - f(&*n); - } - } - } - - /// Register an action to be done if a mode/spec_name change happens. - pub fn on_user_defaults_change(&self, f: F) where F: 'static + FnMut(Option) + Send { - *self.on_user_defaults_change.lock() = Some(Box::new(f)); - } - - /// Flush the block import queue. - pub fn flush_queue(&self) { - self.importer.block_queue.flush(); - while !self.importer.block_queue.is_empty() { - self.import_verified_blocks(); - } - } - - /// The env info as of the best block. - pub fn latest_env_info(&self) -> EnvInfo { - self.env_info(BlockId::Latest).expect("Best block header always stored; qed") - } - - /// The env info as of a given block. - /// returns `None` if the block unknown. - pub fn env_info(&self, id: BlockId) -> Option { - self.block_header(id).map(|header| { - EnvInfo { - number: header.number(), - author: header.author(), - timestamp: header.timestamp(), - difficulty: header.difficulty(), - last_hashes: self.build_last_hashes(&header.parent_hash()), - gas_used: U256::default(), - gas_limit: header.gas_limit(), - } - }) - } - - fn build_last_hashes(&self, parent_hash: &H256) -> Arc { - { - let hashes = self.last_hashes.read(); - if hashes.front().map_or(false, |h| h == parent_hash) { - let mut res = Vec::from(hashes.clone()); - res.resize(256, H256::default()); - return Arc::new(res); - } - } - let mut last_hashes = LastHashes::new(); - last_hashes.resize(256, H256::default()); - last_hashes[0] = parent_hash.clone(); - let chain = self.chain.read(); - for i in 0..255 { - match chain.block_details(&last_hashes[i]) { - Some(details) => { - last_hashes[i + 1] = details.parent.clone(); - }, - None => break, - } - } - let mut cached_hashes = self.last_hashes.write(); - *cached_hashes = VecDeque::from(last_hashes.clone()); - Arc::new(last_hashes) - } - - /// This is triggered by a message coming from a block queue when the block is ready for insertion - pub fn import_verified_blocks(&self) -> usize { - self.importer.import_verified_blocks(self) - } - - // use a state-proving closure for the given block. - fn with_proving_caller(&self, id: BlockId, with_call: F) -> T - where F: FnOnce(&::machine::Call) -> T - { - let call = |a, d| { - let tx = self.contract_call_tx(id, a, d); - let (result, items) = self.prove_transaction(tx, id) - .ok_or_else(|| format!("Unable to make call. State unavailable?"))?; - - let items = items.into_iter().map(|x| x.to_vec()).collect(); - Ok((result, items)) - }; - - with_call(&call) - } - - // prune ancient states until below the memory limit or only the minimum amount remain. - fn prune_ancient(&self, mut state_db: StateDB, chain: &BlockChain) -> Result<(), ::error::Error> { - let number = match state_db.journal_db().latest_era() { - Some(n) => n, - None => return Ok(()), - }; - - // prune all ancient eras until we're below the memory target, - // but have at least the minimum number of states. - loop { - let needs_pruning = state_db.journal_db().is_pruned() && - state_db.journal_db().journal_size() >= self.config.history_mem; - - if !needs_pruning { break } - match state_db.journal_db().earliest_era() { - Some(era) if era + self.history <= number => { - trace!(target: "client", "Pruning state for ancient era {}", era); - match chain.block_hash(era) { - Some(ancient_hash) => { - let mut batch = DBTransaction::new(); - state_db.mark_canonical(&mut batch, era, &ancient_hash)?; - self.db.read().key_value().write_buffered(batch); - state_db.journal_db().flush(); - } - None => - debug!(target: "client", "Missing expected hash for block {}", era), - } - } - _ => break, // means that every era is kept, no pruning necessary. - } - } - - Ok(()) - } - - fn update_last_hashes(&self, parent: &H256, hash: &H256) { - let mut hashes = self.last_hashes.write(); - if hashes.front().map_or(false, |h| h == parent) { - if hashes.len() > 255 { - hashes.pop_back(); - } - hashes.push_front(hash.clone()); - } - } - - /// Get shared miner reference. - #[cfg(test)] - pub fn miner(&self) -> Arc { - self.importer.miner.clone() - } - - #[cfg(test)] - pub fn state_db(&self) -> ::parking_lot::RwLockReadGuard { - self.state_db.read() - } - - #[cfg(test)] - pub fn chain(&self) -> Arc { - self.chain.read().clone() - } - - /// Replace io channel. Useful for testing. - pub fn set_io_channel(&self, io_channel: IoChannel) { - *self.io_channel.write() = io_channel; - } - - /// Get a copy of the best block's state. - pub fn latest_state_and_header(&self) -> (State, Header) { - let header = self.best_block_header(); - let state = State::from_existing( - self.state_db.read().boxed_clone_canon(&header.hash()), - *header.state_root(), - self.engine.account_start_nonce(header.number()), - self.factories.clone() - ) - .expect("State root of best block header always valid."); - (state, header) - } - - /// Attempt to get a copy of a specific block's final state. - /// - /// This will not fail if given BlockId::Latest. - /// Otherwise, this can fail (but may not) if the DB prunes state or the block - /// is unknown. - pub fn state_at(&self, id: BlockId) -> Option> { - // fast path for latest state. - if let BlockId::Latest = id { - let (state, _) = self.latest_state_and_header(); - return Some(state) - } - - let block_number = match self.block_number(id) { - Some(num) => num, - None => return None, - }; - - self.block_header(id).and_then(|header| { - let db = self.state_db.read().boxed_clone(); - - // early exit for pruned blocks - if db.is_pruned() && self.pruning_info().earliest_state > block_number { - return None; - } - - let root = header.state_root(); - State::from_existing(db, root, self.engine.account_start_nonce(block_number), self.factories.clone()).ok() - }) - } - - /// Attempt to get a copy of a specific block's beginning state. - /// - /// This will not fail if given BlockId::Latest. - /// Otherwise, this can fail (but may not) if the DB prunes state. - pub fn state_at_beginning(&self, id: BlockId) -> Option> { - match self.block_number(id) { - None => None, - Some(0) => self.state_at(id), - Some(n) => self.state_at(BlockId::Number(n - 1)), - } - } - - /// Get a copy of the best block's state. - pub fn state(&self) -> impl StateInfo { - let (state, _) = self.latest_state_and_header(); - state - } - - /// Get info on the cache. - pub fn blockchain_cache_info(&self) -> BlockChainCacheSize { - self.chain.read().cache_size() - } - - /// Get the report. - pub fn report(&self) -> ClientReport { - let mut report = self.report.read().clone(); - report.state_db_mem = self.state_db.read().mem_used(); - report - } - - /// Tick the client. - // TODO: manage by real events. - pub fn tick(&self, prevent_sleep: bool) { - self.check_garbage(); - if !prevent_sleep { - self.check_snooze(); - } - } - - fn check_garbage(&self) { - self.chain.read().collect_garbage(); - self.importer.block_queue.collect_garbage(); - self.tracedb.read().collect_garbage(); - } - - fn check_snooze(&self) { - let mode = self.mode.lock().clone(); - match mode { - Mode::Dark(timeout) => { - let mut ss = self.sleep_state.lock(); - if let Some(t) = ss.last_activity { - if Instant::now() > t + timeout { - self.sleep(false); - ss.last_activity = None; - } - } - } - Mode::Passive(timeout, wakeup_after) => { - let mut ss = self.sleep_state.lock(); - let now = Instant::now(); - if let Some(t) = ss.last_activity { - if now > t + timeout { - self.sleep(false); - ss.last_activity = None; - ss.last_autosleep = Some(now); - } - } - if let Some(t) = ss.last_autosleep { - if now > t + wakeup_after { - self.wake_up(); - ss.last_activity = Some(now); - ss.last_autosleep = None; - } - } - } - _ => {} - } - } - - /// Take a snapshot at the given block. - /// If the ID given is "latest", this will default to 1000 blocks behind. - pub fn take_snapshot( - &self, - writer: W, - at: BlockId, - p: &snapshot::Progress, - ) -> Result<(), EthcoreError> { - let db = self.state_db.read().journal_db().boxed_clone(); - let best_block_number = self.chain_info().best_block_number; - let block_number = self.block_number(at).ok_or_else(|| snapshot::Error::InvalidStartingBlock(at))?; - - if db.is_pruned() && self.pruning_info().earliest_state > block_number { - return Err(snapshot::Error::OldBlockPrunedDB.into()); - } - - let history = ::std::cmp::min(self.history, 1000); - - let start_hash = match at { - BlockId::Latest => { - let start_num = match db.earliest_era() { - Some(era) => ::std::cmp::max(era, best_block_number.saturating_sub(history)), - None => best_block_number.saturating_sub(history), - }; - - match self.block_hash(BlockId::Number(start_num)) { - Some(h) => h, - None => return Err(snapshot::Error::InvalidStartingBlock(at).into()), - } - } - _ => match self.block_hash(at) { - Some(hash) => hash, - None => return Err(snapshot::Error::InvalidStartingBlock(at).into()), - }, - }; - - let processing_threads = self.config.snapshot.processing_threads; - let chunker = self.engine.snapshot_components().ok_or(snapshot::Error::SnapshotsUnsupported)?; - snapshot::take_snapshot( - chunker, - &self.chain.read(), - start_hash, - db.as_hash_db(), - writer, - p, - processing_threads, - )?; - Ok(()) - } - - /// Ask the client what the history parameter is. - pub fn pruning_history(&self) -> u64 { - self.history - } - - fn block_hash(chain: &BlockChain, id: BlockId) -> Option { - match id { - BlockId::Hash(hash) => Some(hash), - BlockId::Number(number) => chain.block_hash(number), - BlockId::Earliest => chain.block_hash(0), - BlockId::Latest => Some(chain.best_block_hash()), - } - } - - fn transaction_address(&self, id: TransactionId) -> Option { - match id { - TransactionId::Hash(ref hash) => self.chain.read().transaction_address(hash), - TransactionId::Location(id, index) => Self::block_hash(&self.chain.read(), id).map(|hash| TransactionAddress { - block_hash: hash, - index: index, - }) - } - } - - fn wake_up(&self) { - if !self.liveness.load(AtomicOrdering::Relaxed) { - self.liveness.store(true, AtomicOrdering::Relaxed); - self.notify(|n| n.start()); - info!(target: "mode", "wake_up: Waking."); - } - } - - fn sleep(&self, force: bool) { - if self.liveness.load(AtomicOrdering::Relaxed) { - // only sleep if the import queue is mostly empty. - if force || (self.queue_info().total_queue_size() <= MAX_QUEUE_SIZE_TO_SLEEP_ON) { - self.liveness.store(false, AtomicOrdering::Relaxed); - self.notify(|n| n.stop()); - info!(target: "mode", "sleep: Sleeping."); - } else { - info!(target: "mode", "sleep: Cannot sleep - syncing ongoing."); - // TODO: Consider uncommenting. - //(*self.sleep_state.lock()).last_activity = Some(Instant::now()); - } - } - } - - // transaction for calling contracts from services like engine. - // from the null sender, with 50M gas. - fn contract_call_tx(&self, block_id: BlockId, address: Address, data: Bytes) -> SignedTransaction { - let from = Address::default(); - transaction::Transaction { - nonce: self.nonce(&from, block_id).unwrap_or_else(|| self.engine.account_start_nonce(0)), - action: Action::Call(address), - gas: U256::from(50_000_000), - gas_price: U256::default(), - value: U256::default(), - data: data, - }.fake_sign(from) - } - - fn do_virtual_call( - machine: &::machine::EthereumMachine, - env_info: &EnvInfo, - state: &mut State, - t: &SignedTransaction, - analytics: CallAnalytics, - ) -> Result { - fn call( - state: &mut State, - env_info: &EnvInfo, - machine: &::machine::EthereumMachine, - state_diff: bool, - transaction: &SignedTransaction, - options: TransactOptions, - ) -> Result, CallError> where - T: trace::Tracer, - V: trace::VMTracer, - { - let options = options - .dont_check_nonce() - .save_output_from_contract(); - let original_state = if state_diff { Some(state.clone()) } else { None }; - let schedule = machine.schedule(env_info.number); - - let mut ret = Executive::new(state, env_info, &machine, &schedule).transact_virtual(transaction, options)?; - - if let Some(original) = original_state { - ret.state_diff = Some(state.diff_from(original).map_err(ExecutionError::from)?); - } - Ok(ret) - } - - let state_diff = analytics.state_diffing; - - match (analytics.transaction_tracing, analytics.vm_tracing) { - (true, true) => call(state, env_info, machine, state_diff, t, TransactOptions::with_tracing_and_vm_tracing()), - (true, false) => call(state, env_info, machine, state_diff, t, TransactOptions::with_tracing()), - (false, true) => call(state, env_info, machine, state_diff, t, TransactOptions::with_vm_tracing()), - (false, false) => call(state, env_info, machine, state_diff, t, TransactOptions::with_no_tracing()), - } - } - - fn block_number_ref(&self, id: &BlockId) -> Option { - match *id { - BlockId::Number(number) => Some(number), - BlockId::Hash(ref hash) => self.chain.read().block_number(hash), - BlockId::Earliest => Some(0), - BlockId::Latest => Some(self.chain.read().best_block_number()), - } - } - - /// Retrieve a decoded header given `BlockId` - /// - /// This method optimizes access patterns for latest block header - /// to avoid excessive RLP encoding, decoding and hashing. - fn block_header_decoded(&self, id: BlockId) -> Option
{ - match id { - BlockId::Latest - => Some(self.chain.read().best_block_header()), - BlockId::Hash(ref hash) if hash == &self.chain.read().best_block_hash() - => Some(self.chain.read().best_block_header()), - BlockId::Number(number) if number == self.chain.read().best_block_number() - => Some(self.chain.read().best_block_header()), - _ => self.block_header(id).and_then(|h| h.decode().ok()) - } - } + MIN_HISTORY_SIZE + } else { + config.history + }; + + if !chain + .block_header_data(&chain.best_block_hash()) + .map_or(true, |h| state_db.journal_db().contains(&h.state_root())) + { + warn!( + "State root not found for block #{} ({:x})", + chain.best_block_number(), + chain.best_block_hash() + ); + } + + let engine = spec.engine.clone(); + + let awake = match config.mode { + Mode::Dark(..) | Mode::Off => false, + _ => true, + }; + + let importer = Importer::new(&config, engine.clone(), message_channel.clone(), miner)?; + + let registrar_address = engine + .additional_params() + .get("registrar") + .and_then(|s| Address::from_str(s).ok()); + if let Some(ref addr) = registrar_address { + trace!(target: "client", "Found registrar at {}", addr); + } + + let client = Arc::new(Client { + enabled: AtomicBool::new(true), + sleep_state: Mutex::new(SleepState::new(awake)), + liveness: AtomicBool::new(awake), + mode: Mutex::new(config.mode.clone()), + chain: RwLock::new(chain), + tracedb, + engine, + pruning: config.pruning.clone(), + db: RwLock::new(db.clone()), + state_db: RwLock::new(state_db), + report: RwLock::new(Default::default()), + io_channel: RwLock::new(message_channel), + notify: RwLock::new(Vec::new()), + queue_transactions: IoChannelQueue::new(config.transaction_verification_queue_size), + queue_ancient_blocks: IoChannelQueue::new(MAX_ANCIENT_BLOCKS_QUEUE_SIZE), + queued_ancient_blocks: Default::default(), + ancient_blocks_import_lock: Default::default(), + queue_consensus_message: IoChannelQueue::new(usize::max_value()), + last_hashes: RwLock::new(VecDeque::new()), + factories, + history, + on_user_defaults_change: Mutex::new(None), + registrar_address, + exit_handler: Mutex::new(None), + importer, + config, + }); + + // prune old states. + { + let state_db = client.state_db.read().boxed_clone(); + let chain = client.chain.read(); + client.prune_ancient(state_db, &chain)?; + } + + // ensure genesis epoch proof in the DB. + { + let chain = client.chain.read(); + let gh = spec.genesis_header(); + if chain.epoch_transition(0, gh.hash()).is_none() { + trace!(target: "client", "No genesis transition found."); + + let proof = client.with_proving_caller(BlockId::Number(0), |call| { + client.engine.genesis_epoch_data(&gh, call) + }); + let proof = match proof { + Ok(proof) => proof, + Err(e) => { + warn!(target: "client", "Error generating genesis epoch data: {}. Snapshots generated may not be complete.", e); + Vec::new() + } + }; + + debug!(target: "client", "Obtained genesis transition proof: {:?}", proof); + + let mut batch = DBTransaction::new(); + chain.insert_epoch_transition( + &mut batch, + 0, + EpochTransition { + block_hash: gh.hash(), + block_number: 0, + proof: proof, + }, + ); + + client.db.read().key_value().write_buffered(batch); + } + } + + // ensure buffered changes are flushed. + client.db.read().key_value().flush()?; + Ok(client) + } + + /// Wakes up client if it's a sleep. + pub fn keep_alive(&self) { + let should_wake = match *self.mode.lock() { + Mode::Dark(..) | Mode::Passive(..) => true, + _ => false, + }; + if should_wake { + self.wake_up(); + (*self.sleep_state.lock()).last_activity = Some(Instant::now()); + } + } + + /// Adds an actor to be notified on certain events + pub fn add_notify(&self, target: Arc) { + self.notify.write().push(Arc::downgrade(&target)); + } + + /// Set a closure to call when the client wants to be restarted. + /// + /// The parameter passed to the callback is the name of the new chain spec to use after + /// the restart. + pub fn set_exit_handler(&self, f: F) + where + F: Fn(String) + 'static + Send, + { + *self.exit_handler.lock() = Some(Box::new(f)); + } + + /// Returns engine reference. + pub fn engine(&self) -> &EthEngine { + &*self.engine + } + + fn notify(&self, f: F) + where + F: Fn(&ChainNotify), + { + for np in &*self.notify.read() { + if let Some(n) = np.upgrade() { + f(&*n); + } + } + } + + /// Register an action to be done if a mode/spec_name change happens. + pub fn on_user_defaults_change(&self, f: F) + where + F: 'static + FnMut(Option) + Send, + { + *self.on_user_defaults_change.lock() = Some(Box::new(f)); + } + + /// Flush the block import queue. + pub fn flush_queue(&self) { + self.importer.block_queue.flush(); + while !self.importer.block_queue.is_empty() { + self.import_verified_blocks(); + } + } + + /// The env info as of the best block. + pub fn latest_env_info(&self) -> EnvInfo { + self.env_info(BlockId::Latest) + .expect("Best block header always stored; qed") + } + + /// The env info as of a given block. + /// returns `None` if the block unknown. + pub fn env_info(&self, id: BlockId) -> Option { + self.block_header(id).map(|header| EnvInfo { + number: header.number(), + author: header.author(), + timestamp: header.timestamp(), + difficulty: header.difficulty(), + last_hashes: self.build_last_hashes(&header.parent_hash()), + gas_used: U256::default(), + gas_limit: header.gas_limit(), + }) + } + + fn build_last_hashes(&self, parent_hash: &H256) -> Arc { + { + let hashes = self.last_hashes.read(); + if hashes.front().map_or(false, |h| h == parent_hash) { + let mut res = Vec::from(hashes.clone()); + res.resize(256, H256::default()); + return Arc::new(res); + } + } + let mut last_hashes = LastHashes::new(); + last_hashes.resize(256, H256::default()); + last_hashes[0] = parent_hash.clone(); + let chain = self.chain.read(); + for i in 0..255 { + match chain.block_details(&last_hashes[i]) { + Some(details) => { + last_hashes[i + 1] = details.parent.clone(); + } + None => break, + } + } + let mut cached_hashes = self.last_hashes.write(); + *cached_hashes = VecDeque::from(last_hashes.clone()); + Arc::new(last_hashes) + } + + /// This is triggered by a message coming from a block queue when the block is ready for insertion + pub fn import_verified_blocks(&self) -> usize { + self.importer.import_verified_blocks(self) + } + + // use a state-proving closure for the given block. + fn with_proving_caller(&self, id: BlockId, with_call: F) -> T + where + F: FnOnce(&::machine::Call) -> T, + { + let call = |a, d| { + let tx = self.contract_call_tx(id, a, d); + let (result, items) = self + .prove_transaction(tx, id) + .ok_or_else(|| format!("Unable to make call. State unavailable?"))?; + + let items = items.into_iter().map(|x| x.to_vec()).collect(); + Ok((result, items)) + }; + + with_call(&call) + } + + // prune ancient states until below the memory limit or only the minimum amount remain. + fn prune_ancient( + &self, + mut state_db: StateDB, + chain: &BlockChain, + ) -> Result<(), ::error::Error> { + let number = match state_db.journal_db().latest_era() { + Some(n) => n, + None => return Ok(()), + }; + + // prune all ancient eras until we're below the memory target, + // but have at least the minimum number of states. + loop { + let needs_pruning = state_db.journal_db().is_pruned() + && state_db.journal_db().journal_size() >= self.config.history_mem; + + if !needs_pruning { + break; + } + match state_db.journal_db().earliest_era() { + Some(era) if era + self.history <= number => { + trace!(target: "client", "Pruning state for ancient era {}", era); + match chain.block_hash(era) { + Some(ancient_hash) => { + let mut batch = DBTransaction::new(); + state_db.mark_canonical(&mut batch, era, &ancient_hash)?; + self.db.read().key_value().write_buffered(batch); + state_db.journal_db().flush(); + } + None => debug!(target: "client", "Missing expected hash for block {}", era), + } + } + _ => break, // means that every era is kept, no pruning necessary. + } + } + + Ok(()) + } + + fn update_last_hashes(&self, parent: &H256, hash: &H256) { + let mut hashes = self.last_hashes.write(); + if hashes.front().map_or(false, |h| h == parent) { + if hashes.len() > 255 { + hashes.pop_back(); + } + hashes.push_front(hash.clone()); + } + } + + /// Get shared miner reference. + #[cfg(test)] + pub fn miner(&self) -> Arc { + self.importer.miner.clone() + } + + #[cfg(test)] + pub fn state_db(&self) -> ::parking_lot::RwLockReadGuard { + self.state_db.read() + } + + #[cfg(test)] + pub fn chain(&self) -> Arc { + self.chain.read().clone() + } + + /// Replace io channel. Useful for testing. + pub fn set_io_channel(&self, io_channel: IoChannel) { + *self.io_channel.write() = io_channel; + } + + /// Get a copy of the best block's state. + pub fn latest_state_and_header(&self) -> (State, Header) { + let header = self.best_block_header(); + let state = State::from_existing( + self.state_db.read().boxed_clone_canon(&header.hash()), + *header.state_root(), + self.engine.account_start_nonce(header.number()), + self.factories.clone(), + ) + .expect("State root of best block header always valid."); + (state, header) + } + + /// Attempt to get a copy of a specific block's final state. + /// + /// This will not fail if given BlockId::Latest. + /// Otherwise, this can fail (but may not) if the DB prunes state or the block + /// is unknown. + pub fn state_at(&self, id: BlockId) -> Option> { + // fast path for latest state. + if let BlockId::Latest = id { + let (state, _) = self.latest_state_and_header(); + return Some(state); + } + + let block_number = match self.block_number(id) { + Some(num) => num, + None => return None, + }; + + self.block_header(id).and_then(|header| { + let db = self.state_db.read().boxed_clone(); + + // early exit for pruned blocks + if db.is_pruned() && self.pruning_info().earliest_state > block_number { + return None; + } + + let root = header.state_root(); + State::from_existing( + db, + root, + self.engine.account_start_nonce(block_number), + self.factories.clone(), + ) + .ok() + }) + } + + /// Attempt to get a copy of a specific block's beginning state. + /// + /// This will not fail if given BlockId::Latest. + /// Otherwise, this can fail (but may not) if the DB prunes state. + pub fn state_at_beginning(&self, id: BlockId) -> Option> { + match self.block_number(id) { + None => None, + Some(0) => self.state_at(id), + Some(n) => self.state_at(BlockId::Number(n - 1)), + } + } + + /// Get a copy of the best block's state. + pub fn state(&self) -> impl StateInfo { + let (state, _) = self.latest_state_and_header(); + state + } + + /// Get info on the cache. + pub fn blockchain_cache_info(&self) -> BlockChainCacheSize { + self.chain.read().cache_size() + } + + /// Get the report. + pub fn report(&self) -> ClientReport { + let mut report = self.report.read().clone(); + report.state_db_mem = self.state_db.read().mem_used(); + report + } + + /// Tick the client. + // TODO: manage by real events. + pub fn tick(&self, prevent_sleep: bool) { + self.check_garbage(); + if !prevent_sleep { + self.check_snooze(); + } + } + + fn check_garbage(&self) { + self.chain.read().collect_garbage(); + self.importer.block_queue.collect_garbage(); + self.tracedb.read().collect_garbage(); + } + + fn check_snooze(&self) { + let mode = self.mode.lock().clone(); + match mode { + Mode::Dark(timeout) => { + let mut ss = self.sleep_state.lock(); + if let Some(t) = ss.last_activity { + if Instant::now() > t + timeout { + self.sleep(false); + ss.last_activity = None; + } + } + } + Mode::Passive(timeout, wakeup_after) => { + let mut ss = self.sleep_state.lock(); + let now = Instant::now(); + if let Some(t) = ss.last_activity { + if now > t + timeout { + self.sleep(false); + ss.last_activity = None; + ss.last_autosleep = Some(now); + } + } + if let Some(t) = ss.last_autosleep { + if now > t + wakeup_after { + self.wake_up(); + ss.last_activity = Some(now); + ss.last_autosleep = None; + } + } + } + _ => {} + } + } + + /// Take a snapshot at the given block. + /// If the ID given is "latest", this will default to 1000 blocks behind. + pub fn take_snapshot( + &self, + writer: W, + at: BlockId, + p: &snapshot::Progress, + ) -> Result<(), EthcoreError> { + let db = self.state_db.read().journal_db().boxed_clone(); + let best_block_number = self.chain_info().best_block_number; + let block_number = self + .block_number(at) + .ok_or_else(|| snapshot::Error::InvalidStartingBlock(at))?; + + if db.is_pruned() && self.pruning_info().earliest_state > block_number { + return Err(snapshot::Error::OldBlockPrunedDB.into()); + } + + let history = ::std::cmp::min(self.history, 1000); + + let start_hash = match at { + BlockId::Latest => { + let start_num = match db.earliest_era() { + Some(era) => ::std::cmp::max(era, best_block_number.saturating_sub(history)), + None => best_block_number.saturating_sub(history), + }; + + match self.block_hash(BlockId::Number(start_num)) { + Some(h) => h, + None => return Err(snapshot::Error::InvalidStartingBlock(at).into()), + } + } + _ => match self.block_hash(at) { + Some(hash) => hash, + None => return Err(snapshot::Error::InvalidStartingBlock(at).into()), + }, + }; + + let processing_threads = self.config.snapshot.processing_threads; + let chunker = self + .engine + .snapshot_components() + .ok_or(snapshot::Error::SnapshotsUnsupported)?; + snapshot::take_snapshot( + chunker, + &self.chain.read(), + start_hash, + db.as_hash_db(), + writer, + p, + processing_threads, + )?; + Ok(()) + } + + /// Ask the client what the history parameter is. + pub fn pruning_history(&self) -> u64 { + self.history + } + + fn block_hash(chain: &BlockChain, id: BlockId) -> Option { + match id { + BlockId::Hash(hash) => Some(hash), + BlockId::Number(number) => chain.block_hash(number), + BlockId::Earliest => chain.block_hash(0), + BlockId::Latest => Some(chain.best_block_hash()), + } + } + + fn transaction_address(&self, id: TransactionId) -> Option { + match id { + TransactionId::Hash(ref hash) => self.chain.read().transaction_address(hash), + TransactionId::Location(id, index) => { + Self::block_hash(&self.chain.read(), id).map(|hash| TransactionAddress { + block_hash: hash, + index: index, + }) + } + } + } + + fn wake_up(&self) { + if !self.liveness.load(AtomicOrdering::Relaxed) { + self.liveness.store(true, AtomicOrdering::Relaxed); + self.notify(|n| n.start()); + info!(target: "mode", "wake_up: Waking."); + } + } + + fn sleep(&self, force: bool) { + if self.liveness.load(AtomicOrdering::Relaxed) { + // only sleep if the import queue is mostly empty. + if force || (self.queue_info().total_queue_size() <= MAX_QUEUE_SIZE_TO_SLEEP_ON) { + self.liveness.store(false, AtomicOrdering::Relaxed); + self.notify(|n| n.stop()); + info!(target: "mode", "sleep: Sleeping."); + } else { + info!(target: "mode", "sleep: Cannot sleep - syncing ongoing."); + // TODO: Consider uncommenting. + //(*self.sleep_state.lock()).last_activity = Some(Instant::now()); + } + } + } + + // transaction for calling contracts from services like engine. + // from the null sender, with 50M gas. + fn contract_call_tx( + &self, + block_id: BlockId, + address: Address, + data: Bytes, + ) -> SignedTransaction { + let from = Address::default(); + transaction::Transaction { + nonce: self + .nonce(&from, block_id) + .unwrap_or_else(|| self.engine.account_start_nonce(0)), + action: Action::Call(address), + gas: U256::from(50_000_000), + gas_price: U256::default(), + value: U256::default(), + data: data, + } + .fake_sign(from) + } + + fn do_virtual_call( + machine: &::machine::EthereumMachine, + env_info: &EnvInfo, + state: &mut State, + t: &SignedTransaction, + analytics: CallAnalytics, + ) -> Result { + fn call( + state: &mut State, + env_info: &EnvInfo, + machine: &::machine::EthereumMachine, + state_diff: bool, + transaction: &SignedTransaction, + options: TransactOptions, + ) -> Result, CallError> + where + T: trace::Tracer, + V: trace::VMTracer, + { + let options = options.dont_check_nonce().save_output_from_contract(); + let original_state = if state_diff { + Some(state.clone()) + } else { + None + }; + let schedule = machine.schedule(env_info.number); + + let mut ret = Executive::new(state, env_info, &machine, &schedule) + .transact_virtual(transaction, options)?; + + if let Some(original) = original_state { + ret.state_diff = Some(state.diff_from(original).map_err(ExecutionError::from)?); + } + Ok(ret) + } + + let state_diff = analytics.state_diffing; + + match (analytics.transaction_tracing, analytics.vm_tracing) { + (true, true) => call( + state, + env_info, + machine, + state_diff, + t, + TransactOptions::with_tracing_and_vm_tracing(), + ), + (true, false) => call( + state, + env_info, + machine, + state_diff, + t, + TransactOptions::with_tracing(), + ), + (false, true) => call( + state, + env_info, + machine, + state_diff, + t, + TransactOptions::with_vm_tracing(), + ), + (false, false) => call( + state, + env_info, + machine, + state_diff, + t, + TransactOptions::with_no_tracing(), + ), + } + } + + fn block_number_ref(&self, id: &BlockId) -> Option { + match *id { + BlockId::Number(number) => Some(number), + BlockId::Hash(ref hash) => self.chain.read().block_number(hash), + BlockId::Earliest => Some(0), + BlockId::Latest => Some(self.chain.read().best_block_number()), + } + } + + /// Retrieve a decoded header given `BlockId` + /// + /// This method optimizes access patterns for latest block header + /// to avoid excessive RLP encoding, decoding and hashing. + fn block_header_decoded(&self, id: BlockId) -> Option
{ + match id { + BlockId::Latest => Some(self.chain.read().best_block_header()), + BlockId::Hash(ref hash) if hash == &self.chain.read().best_block_hash() => { + Some(self.chain.read().best_block_header()) + } + BlockId::Number(number) if number == self.chain.read().best_block_number() => { + Some(self.chain.read().best_block_header()) + } + _ => self.block_header(id).and_then(|h| h.decode().ok()), + } + } } impl snapshot::DatabaseRestore for Client { - /// Restart the client with a new backend - fn restore_db(&self, new_db: &str) -> Result<(), EthcoreError> { - trace!(target: "snapshot", "Replacing client database with {:?}", new_db); + /// Restart the client with a new backend + fn restore_db(&self, new_db: &str) -> Result<(), EthcoreError> { + trace!(target: "snapshot", "Replacing client database with {:?}", new_db); - let _import_lock = self.importer.import_lock.lock(); - let mut state_db = self.state_db.write(); - let mut chain = self.chain.write(); - let mut tracedb = self.tracedb.write(); - self.importer.miner.clear(); - let db = self.db.write(); - db.restore(new_db)?; + let _import_lock = self.importer.import_lock.lock(); + let mut state_db = self.state_db.write(); + let mut chain = self.chain.write(); + let mut tracedb = self.tracedb.write(); + self.importer.miner.clear(); + let db = self.db.write(); + db.restore(new_db)?; - let cache_size = state_db.cache_size(); - *state_db = StateDB::new(journaldb::new(db.key_value().clone(), self.pruning, ::db::COL_STATE), cache_size); - *chain = Arc::new(BlockChain::new(self.config.blockchain.clone(), &[], db.clone())); - *tracedb = TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone()); - Ok(()) - } + let cache_size = state_db.cache_size(); + *state_db = StateDB::new( + journaldb::new(db.key_value().clone(), self.pruning, ::db::COL_STATE), + cache_size, + ); + *chain = Arc::new(BlockChain::new( + self.config.blockchain.clone(), + &[], + db.clone(), + )); + *tracedb = TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone()); + Ok(()) + } } impl BlockChainReset for Client { - fn reset(&self, num: u32) -> Result<(), String> { - if num as u64 > self.pruning_history() { - return Err("Attempting to reset to block with pruned state".into()) - } else if num == 0 { - return Err("invalid number of blocks to reset".into()) - } + fn reset(&self, num: u32) -> Result<(), String> { + if num as u64 > self.pruning_history() { + return Err("Attempting to reset to block with pruned state".into()); + } else if num == 0 { + return Err("invalid number of blocks to reset".into()); + } - let mut blocks_to_delete = Vec::with_capacity(num as usize); - let mut best_block_hash = self.chain.read().best_block_hash(); - let mut batch = DBTransaction::with_capacity(blocks_to_delete.len()); + let mut blocks_to_delete = Vec::with_capacity(num as usize); + let mut best_block_hash = self.chain.read().best_block_hash(); + let mut batch = DBTransaction::with_capacity(blocks_to_delete.len()); - for _ in 0..num { - let current_header = self.chain.read().block_header_data(&best_block_hash) - .expect("best_block_hash was fetched from db; block_header_data should exist in db; qed"); - best_block_hash = current_header.parent_hash(); + for _ in 0..num { + let current_header = self + .chain + .read() + .block_header_data(&best_block_hash) + .expect( + "best_block_hash was fetched from db; block_header_data should exist in db; qed", + ); + best_block_hash = current_header.parent_hash(); - let (number, hash) = (current_header.number(), current_header.hash()); - batch.delete(::db::COL_HEADERS, &hash); - batch.delete(::db::COL_BODIES, &hash); - Writable::delete:: - (&mut batch, ::db::COL_EXTRA, &hash); - Writable::delete:: - (&mut batch, ::db::COL_EXTRA, &number); + let (number, hash) = (current_header.number(), current_header.hash()); + batch.delete(::db::COL_HEADERS, &hash); + batch.delete(::db::COL_BODIES, &hash); + Writable::delete::(&mut batch, ::db::COL_EXTRA, &hash); + Writable::delete::(&mut batch, ::db::COL_EXTRA, &number); - blocks_to_delete.push((number, hash)); - } + blocks_to_delete.push((number, hash)); + } - let hashes = blocks_to_delete.iter().map(|(_, hash)| hash).collect::>(); - info!("Deleting block hashes {}", - Colour::Red - .bold() - .paint(format!("{:#?}", hashes)) - ); + let hashes = blocks_to_delete + .iter() + .map(|(_, hash)| hash) + .collect::>(); + info!( + "Deleting block hashes {}", + Colour::Red.bold().paint(format!("{:#?}", hashes)) + ); - let mut best_block_details = Readable::read::( - &**self.db.read().key_value(), - ::db::COL_EXTRA, - &best_block_hash - ).expect("block was previously imported; best_block_details should exist; qed"); + let mut best_block_details = Readable::read::( + &**self.db.read().key_value(), + ::db::COL_EXTRA, + &best_block_hash, + ) + .expect("block was previously imported; best_block_details should exist; qed"); - let (_, last_hash) = blocks_to_delete.last() - .expect("num is > 0; blocks_to_delete can't be empty; qed"); - // remove the last block as a child so that it can be re-imported - // ethcore/blockchain/src/blockchain.rs/Blockchain::is_known_child() - best_block_details.children.retain(|h| *h != *last_hash); - batch.write( - ::db::COL_EXTRA, - &best_block_hash, - &best_block_details - ); - // update the new best block hash - batch.put(::db::COL_EXTRA, b"best", &best_block_hash); + let (_, last_hash) = blocks_to_delete + .last() + .expect("num is > 0; blocks_to_delete can't be empty; qed"); + // remove the last block as a child so that it can be re-imported + // ethcore/blockchain/src/blockchain.rs/Blockchain::is_known_child() + best_block_details.children.retain(|h| *h != *last_hash); + batch.write(::db::COL_EXTRA, &best_block_hash, &best_block_details); + // update the new best block hash + batch.put(::db::COL_EXTRA, b"best", &best_block_hash); - self.db.read() - .key_value() - .write(batch) - .map_err(|err| format!("could not delete blocks; io error occurred: {}", err))?; + self.db + .read() + .key_value() + .write(batch) + .map_err(|err| format!("could not delete blocks; io error occurred: {}", err))?; - info!("New best block hash {}", Colour::Green.bold().paint(format!("{:?}", best_block_hash))); + info!( + "New best block hash {}", + Colour::Green.bold().paint(format!("{:?}", best_block_hash)) + ); - Ok(()) - } + Ok(()) + } } impl Nonce for Client { - fn nonce(&self, address: &Address, id: BlockId) -> Option { - self.state_at(id).and_then(|s| s.nonce(address).ok()) - } + fn nonce(&self, address: &Address, id: BlockId) -> Option { + self.state_at(id).and_then(|s| s.nonce(address).ok()) + } } impl Balance for Client { - fn balance(&self, address: &Address, state: StateOrBlock) -> Option { - match state { - StateOrBlock::State(s) => s.balance(address).ok(), - StateOrBlock::Block(id) => self.state_at(id).and_then(|s| s.balance(address).ok()) - } - } + fn balance(&self, address: &Address, state: StateOrBlock) -> Option { + match state { + StateOrBlock::State(s) => s.balance(address).ok(), + StateOrBlock::Block(id) => self.state_at(id).and_then(|s| s.balance(address).ok()), + } + } } impl AccountData for Client {} impl ChainInfo for Client { - fn chain_info(&self) -> BlockChainInfo { - let mut chain_info = self.chain.read().chain_info(); - chain_info.pending_total_difficulty = chain_info.total_difficulty + self.importer.block_queue.total_difficulty(); - chain_info - } + fn chain_info(&self) -> BlockChainInfo { + let mut chain_info = self.chain.read().chain_info(); + chain_info.pending_total_difficulty = + chain_info.total_difficulty + self.importer.block_queue.total_difficulty(); + chain_info + } } impl BlockInfo for Client { - fn block_header(&self, id: BlockId) -> Option { - let chain = self.chain.read(); + fn block_header(&self, id: BlockId) -> Option { + let chain = self.chain.read(); - Self::block_hash(&chain, id).and_then(|hash| chain.block_header_data(&hash)) - } + Self::block_hash(&chain, id).and_then(|hash| chain.block_header_data(&hash)) + } - fn best_block_header(&self) -> Header { - self.chain.read().best_block_header() - } + fn best_block_header(&self) -> Header { + self.chain.read().best_block_header() + } - fn block(&self, id: BlockId) -> Option { - let chain = self.chain.read(); + fn block(&self, id: BlockId) -> Option { + let chain = self.chain.read(); - Self::block_hash(&chain, id).and_then(|hash| chain.block(&hash)) - } + Self::block_hash(&chain, id).and_then(|hash| chain.block(&hash)) + } - fn code_hash(&self, address: &Address, id: BlockId) -> Option { - self.state_at(id).and_then(|s| s.code_hash(address).unwrap_or(None)) - } + fn code_hash(&self, address: &Address, id: BlockId) -> Option { + self.state_at(id) + .and_then(|s| s.code_hash(address).unwrap_or(None)) + } } impl TransactionInfo for Client { - fn transaction_block(&self, id: TransactionId) -> Option { - self.transaction_address(id).map(|addr| addr.block_hash) - } + fn transaction_block(&self, id: TransactionId) -> Option { + self.transaction_address(id).map(|addr| addr.block_hash) + } } impl BlockChainTrait for Client {} impl RegistryInfo for Client { - fn registry_address(&self, name: String, block: BlockId) -> Option
{ - use ethabi::FunctionOutputDecoder; + fn registry_address(&self, name: String, block: BlockId) -> Option
{ + use ethabi::FunctionOutputDecoder; - let address = self.registrar_address?; + let address = self.registrar_address?; - let (data, decoder) = registry::functions::get_address::call(keccak(name.as_bytes()), "A"); - let value = decoder.decode(&self.call_contract(block, address, data).ok()?).ok()?; - if value.is_zero() { - None - } else { - Some(value) - } - } + let (data, decoder) = registry::functions::get_address::call(keccak(name.as_bytes()), "A"); + let value = decoder + .decode(&self.call_contract(block, address, data).ok()?) + .ok()?; + if value.is_zero() { + None + } else { + Some(value) + } + } } impl CallContract for Client { - fn call_contract(&self, block_id: BlockId, address: Address, data: Bytes) -> Result { - let state_pruned = || CallError::StatePruned.to_string(); - let state = &mut self.state_at(block_id).ok_or_else(&state_pruned)?; - let header = self.block_header_decoded(block_id).ok_or_else(&state_pruned)?; + fn call_contract( + &self, + block_id: BlockId, + address: Address, + data: Bytes, + ) -> Result { + let state_pruned = || CallError::StatePruned.to_string(); + let state = &mut self.state_at(block_id).ok_or_else(&state_pruned)?; + let header = self + .block_header_decoded(block_id) + .ok_or_else(&state_pruned)?; - let transaction = self.contract_call_tx(block_id, address, data); + let transaction = self.contract_call_tx(block_id, address, data); - self.call(&transaction, Default::default(), state, &header) - .map_err(|e| format!("{:?}", e)) - .map(|executed| executed.output) - } + self.call(&transaction, Default::default(), state, &header) + .map_err(|e| format!("{:?}", e)) + .map(|executed| executed.output) + } } impl ImportBlock for Client { - fn import_block(&self, unverified: Unverified) -> EthcoreResult { - if self.chain.read().is_known(&unverified.hash()) { - bail!(EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain)); - } + fn import_block(&self, unverified: Unverified) -> EthcoreResult { + if self.chain.read().is_known(&unverified.hash()) { + bail!(EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain)); + } - let status = self.block_status(BlockId::Hash(unverified.parent_hash())); - if status == BlockStatus::Unknown { - bail!(EthcoreErrorKind::Block(BlockError::UnknownParent(unverified.parent_hash()))); - } + let status = self.block_status(BlockId::Hash(unverified.parent_hash())); + if status == BlockStatus::Unknown { + bail!(EthcoreErrorKind::Block(BlockError::UnknownParent( + unverified.parent_hash() + ))); + } - let raw = if self.importer.block_queue.is_empty() { - Some(( - unverified.bytes.clone(), - unverified.header.hash(), - *unverified.header.difficulty(), - )) - } else { None }; + let raw = if self.importer.block_queue.is_empty() { + Some(( + unverified.bytes.clone(), + unverified.header.hash(), + *unverified.header.difficulty(), + )) + } else { + None + }; - match self.importer.block_queue.import(unverified) { - Ok(hash) => { - if let Some((raw, hash, difficulty)) = raw { - self.notify(move |n| n.block_pre_import(&raw, &hash, &difficulty)); - } - Ok(hash) - }, - // we only care about block errors (not import errors) - Err((block, EthcoreError(EthcoreErrorKind::Block(err), _))) => { - self.importer.bad_blocks.report(block.bytes, format!("{:?}", err)); - bail!(EthcoreErrorKind::Block(err)) - }, - Err((_, e)) => Err(e), - } - } + match self.importer.block_queue.import(unverified) { + Ok(hash) => { + if let Some((raw, hash, difficulty)) = raw { + self.notify(move |n| n.block_pre_import(&raw, &hash, &difficulty)); + } + Ok(hash) + } + // we only care about block errors (not import errors) + Err((block, EthcoreError(EthcoreErrorKind::Block(err), _))) => { + self.importer + .bad_blocks + .report(block.bytes, format!("{:?}", err)); + bail!(EthcoreErrorKind::Block(err)) + } + Err((_, e)) => Err(e), + } + } } impl StateClient for Client { - type State = State<::state_db::StateDB>; + type State = State<::state_db::StateDB>; - fn latest_state_and_header(&self) -> (Self::State, Header) { - Client::latest_state_and_header(self) - } + fn latest_state_and_header(&self) -> (Self::State, Header) { + Client::latest_state_and_header(self) + } - fn state_at(&self, id: BlockId) -> Option { - Client::state_at(self, id) - } + fn state_at(&self, id: BlockId) -> Option { + Client::state_at(self, id) + } } impl Call for Client { - type State = State<::state_db::StateDB>; + type State = State<::state_db::StateDB>; - fn call(&self, transaction: &SignedTransaction, analytics: CallAnalytics, state: &mut Self::State, header: &Header) -> Result { - let env_info = EnvInfo { - number: header.number(), - author: header.author().clone(), - timestamp: header.timestamp(), - difficulty: header.difficulty().clone(), - last_hashes: self.build_last_hashes(header.parent_hash()), - gas_used: U256::default(), - gas_limit: U256::max_value(), - }; - let machine = self.engine.machine(); + fn call( + &self, + transaction: &SignedTransaction, + analytics: CallAnalytics, + state: &mut Self::State, + header: &Header, + ) -> Result { + let env_info = EnvInfo { + number: header.number(), + author: header.author().clone(), + timestamp: header.timestamp(), + difficulty: header.difficulty().clone(), + last_hashes: self.build_last_hashes(header.parent_hash()), + gas_used: U256::default(), + gas_limit: U256::max_value(), + }; + let machine = self.engine.machine(); - Self::do_virtual_call(&machine, &env_info, state, transaction, analytics) - } + Self::do_virtual_call(&machine, &env_info, state, transaction, analytics) + } - fn call_many(&self, transactions: &[(SignedTransaction, CallAnalytics)], state: &mut Self::State, header: &Header) -> Result, CallError> { - let mut env_info = EnvInfo { - number: header.number(), - author: header.author().clone(), - timestamp: header.timestamp(), - difficulty: header.difficulty().clone(), - last_hashes: self.build_last_hashes(header.parent_hash()), - gas_used: U256::default(), - gas_limit: U256::max_value(), - }; + fn call_many( + &self, + transactions: &[(SignedTransaction, CallAnalytics)], + state: &mut Self::State, + header: &Header, + ) -> Result, CallError> { + let mut env_info = EnvInfo { + number: header.number(), + author: header.author().clone(), + timestamp: header.timestamp(), + difficulty: header.difficulty().clone(), + last_hashes: self.build_last_hashes(header.parent_hash()), + gas_used: U256::default(), + gas_limit: U256::max_value(), + }; - let mut results = Vec::with_capacity(transactions.len()); - let machine = self.engine.machine(); + let mut results = Vec::with_capacity(transactions.len()); + let machine = self.engine.machine(); - for &(ref t, analytics) in transactions { - let ret = Self::do_virtual_call(machine, &env_info, state, t, analytics)?; - env_info.gas_used = ret.cumulative_gas_used; - results.push(ret); - } + for &(ref t, analytics) in transactions { + let ret = Self::do_virtual_call(machine, &env_info, state, t, analytics)?; + env_info.gas_used = ret.cumulative_gas_used; + results.push(ret); + } - Ok(results) - } + Ok(results) + } - fn estimate_gas(&self, t: &SignedTransaction, state: &Self::State, header: &Header) -> Result { - let (mut upper, max_upper, env_info) = { - let init = *header.gas_limit(); - let max = init * U256::from(10); + fn estimate_gas( + &self, + t: &SignedTransaction, + state: &Self::State, + header: &Header, + ) -> Result { + let (mut upper, max_upper, env_info) = { + let init = *header.gas_limit(); + let max = init * U256::from(10); - let env_info = EnvInfo { - number: header.number(), - author: header.author().clone(), - timestamp: header.timestamp(), - difficulty: header.difficulty().clone(), - last_hashes: self.build_last_hashes(header.parent_hash()), - gas_used: U256::default(), - gas_limit: max, - }; + let env_info = EnvInfo { + number: header.number(), + author: header.author().clone(), + timestamp: header.timestamp(), + difficulty: header.difficulty().clone(), + last_hashes: self.build_last_hashes(header.parent_hash()), + gas_used: U256::default(), + gas_limit: max, + }; - (init, max, env_info) - }; + (init, max, env_info) + }; - let sender = t.sender(); - let options = || TransactOptions::with_tracing().dont_check_nonce(); + let sender = t.sender(); + let options = || TransactOptions::with_tracing().dont_check_nonce(); - let exec = |gas| { - let mut tx = t.as_unsigned().clone(); - tx.gas = gas; - let tx = tx.fake_sign(sender); + let exec = |gas| { + let mut tx = t.as_unsigned().clone(); + tx.gas = gas; + let tx = tx.fake_sign(sender); - let mut clone = state.clone(); - let machine = self.engine.machine(); - let schedule = machine.schedule(env_info.number); - Executive::new(&mut clone, &env_info, &machine, &schedule) - .transact_virtual(&tx, options()) - }; + let mut clone = state.clone(); + let machine = self.engine.machine(); + let schedule = machine.schedule(env_info.number); + Executive::new(&mut clone, &env_info, &machine, &schedule) + .transact_virtual(&tx, options()) + }; - let cond = |gas| { - exec(gas) - .ok() - .map_or(false, |r| r.exception.is_none()) - }; + let cond = |gas| exec(gas).ok().map_or(false, |r| r.exception.is_none()); - if !cond(upper) { - upper = max_upper; - match exec(upper) { - Ok(v) => { - if let Some(exception) = v.exception { - return Err(CallError::Exceptional(exception)) - } - }, - Err(_e) => { - trace!(target: "estimate_gas", "estimate_gas failed with {}", upper); - let err = ExecutionError::Internal(format!("Requires higher than upper limit of {}", upper)); - return Err(err.into()) - } - } - } - let lower = t.gas_required(&self.engine.schedule(env_info.number)).into(); - if cond(lower) { - trace!(target: "estimate_gas", "estimate_gas succeeded with {}", lower); - return Ok(lower) - } + if !cond(upper) { + upper = max_upper; + match exec(upper) { + Ok(v) => { + if let Some(exception) = v.exception { + return Err(CallError::Exceptional(exception)); + } + } + Err(_e) => { + trace!(target: "estimate_gas", "estimate_gas failed with {}", upper); + let err = ExecutionError::Internal(format!( + "Requires higher than upper limit of {}", + upper + )); + return Err(err.into()); + } + } + } + let lower = t + .gas_required(&self.engine.schedule(env_info.number)) + .into(); + if cond(lower) { + trace!(target: "estimate_gas", "estimate_gas succeeded with {}", lower); + return Ok(lower); + } - /// Find transition point between `lower` and `upper` where `cond` changes from `false` to `true`. - /// Returns the lowest value between `lower` and `upper` for which `cond` returns true. - /// We assert: `cond(lower) = false`, `cond(upper) = true` - fn binary_chop(mut lower: U256, mut upper: U256, mut cond: F) -> Result - where F: FnMut(U256) -> bool - { - while upper - lower > 1.into() { - let mid = (lower + upper) / 2; - trace!(target: "estimate_gas", "{} .. {} .. {}", lower, mid, upper); - let c = cond(mid); - match c { - true => upper = mid, - false => lower = mid, - }; - trace!(target: "estimate_gas", "{} => {} .. {}", c, lower, upper); - } - Ok(upper) - } + /// Find transition point between `lower` and `upper` where `cond` changes from `false` to `true`. + /// Returns the lowest value between `lower` and `upper` for which `cond` returns true. + /// We assert: `cond(lower) = false`, `cond(upper) = true` + fn binary_chop(mut lower: U256, mut upper: U256, mut cond: F) -> Result + where + F: FnMut(U256) -> bool, + { + while upper - lower > 1.into() { + let mid = (lower + upper) / 2; + trace!(target: "estimate_gas", "{} .. {} .. {}", lower, mid, upper); + let c = cond(mid); + match c { + true => upper = mid, + false => lower = mid, + }; + trace!(target: "estimate_gas", "{} => {} .. {}", c, lower, upper); + } + Ok(upper) + } - // binary chop to non-excepting call with gas somewhere between 21000 and block gas limit - trace!(target: "estimate_gas", "estimate_gas chopping {} .. {}", lower, upper); - binary_chop(lower, upper, cond) - } + // binary chop to non-excepting call with gas somewhere between 21000 and block gas limit + trace!(target: "estimate_gas", "estimate_gas chopping {} .. {}", lower, upper); + binary_chop(lower, upper, cond) + } } impl EngineInfo for Client { - fn engine(&self) -> &EthEngine { - Client::engine(self) - } + fn engine(&self) -> &EthEngine { + Client::engine(self) + } } impl BadBlocks for Client { - fn bad_blocks(&self) -> Vec<(Unverified, String)> { - self.importer.bad_blocks.bad_blocks() - } + fn bad_blocks(&self) -> Vec<(Unverified, String)> { + self.importer.bad_blocks.bad_blocks() + } } impl BlockChainClient for Client { - fn replay(&self, id: TransactionId, analytics: CallAnalytics) -> Result { - let address = self.transaction_address(id).ok_or(CallError::TransactionNotFound)?; - let block = BlockId::Hash(address.block_hash); - - const PROOF: &'static str = "The transaction address contains a valid index within block; qed"; - Ok(self.replay_block_transactions(block, analytics)?.nth(address.index).expect(PROOF).1) - } - - fn replay_block_transactions(&self, block: BlockId, analytics: CallAnalytics) -> Result>, CallError> { - let mut env_info = self.env_info(block).ok_or(CallError::StatePruned)?; - let body = self.block_body(block).ok_or(CallError::StatePruned)?; - let mut state = self.state_at_beginning(block).ok_or(CallError::StatePruned)?; - let txs = body.transactions(); - let engine = self.engine.clone(); - - const PROOF: &'static str = "Transactions fetched from blockchain; blockchain transactions are valid; qed"; - const EXECUTE_PROOF: &'static str = "Transaction replayed; qed"; - - Ok(Box::new(txs.into_iter() - .map(move |t| { - let transaction_hash = t.hash(); - let t = SignedTransaction::new(t).expect(PROOF); - let machine = engine.machine(); - let x = Self::do_virtual_call(machine, &env_info, &mut state, &t, analytics).expect(EXECUTE_PROOF); - env_info.gas_used = env_info.gas_used + x.gas_used; - (transaction_hash, x) - }))) - } - - fn mode(&self) -> Mode { - let r = self.mode.lock().clone().into(); - trace!(target: "mode", "Asked for mode = {:?}. returning {:?}", &*self.mode.lock(), r); - r - } - - fn disable(&self) { - self.set_mode(Mode::Off); - self.enabled.store(false, AtomicOrdering::Relaxed); - self.clear_queue(); - } - - fn set_mode(&self, new_mode: Mode) { - trace!(target: "mode", "Client::set_mode({:?})", new_mode); - if !self.enabled.load(AtomicOrdering::Relaxed) { - return; - } - { - let mut mode = self.mode.lock(); - *mode = new_mode.clone().into(); - trace!(target: "mode", "Mode now {:?}", &*mode); - if let Some(ref mut f) = *self.on_user_defaults_change.lock() { - trace!(target: "mode", "Making callback..."); - f(Some((&*mode).clone())) - } - } - match new_mode { - Mode::Active => self.wake_up(), - Mode::Off => self.sleep(true), - _ => {(*self.sleep_state.lock()).last_activity = Some(Instant::now()); } - } - } - - fn spec_name(&self) -> String { - self.config.spec_name.clone() - } - - fn set_spec_name(&self, new_spec_name: String) -> Result<(), ()> { - trace!(target: "mode", "Client::set_spec_name({:?})", new_spec_name); - if !self.enabled.load(AtomicOrdering::Relaxed) { - return Err(()); - } - if let Some(ref h) = *self.exit_handler.lock() { - (*h)(new_spec_name); - Ok(()) - } else { - warn!("Not hypervised; cannot change chain."); - Err(()) - } - } - - fn block_number(&self, id: BlockId) -> Option { - self.block_number_ref(&id) - } - - fn block_body(&self, id: BlockId) -> Option { - let chain = self.chain.read(); - - Self::block_hash(&chain, id).and_then(|hash| chain.block_body(&hash)) - } - - fn block_status(&self, id: BlockId) -> BlockStatus { - let chain = self.chain.read(); - match Self::block_hash(&chain, id) { - Some(ref hash) if chain.is_known(hash) => BlockStatus::InChain, - Some(hash) => self.importer.block_queue.status(&hash).into(), - None => BlockStatus::Unknown - } - } - - fn block_total_difficulty(&self, id: BlockId) -> Option { - let chain = self.chain.read(); - - Self::block_hash(&chain, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty) - } - - fn storage_root(&self, address: &Address, id: BlockId) -> Option { - self.state_at(id).and_then(|s| s.storage_root(address).ok()).and_then(|x| x) - } - - fn block_hash(&self, id: BlockId) -> Option { - let chain = self.chain.read(); - Self::block_hash(&chain, id) - } - - fn code(&self, address: &Address, state: StateOrBlock) -> Option> { - let result = match state { - StateOrBlock::State(s) => s.code(address).ok(), - StateOrBlock::Block(id) => self.state_at(id).and_then(|s| s.code(address).ok()) - }; - - // Converting from `Option>>` to `Option>` - result.map(|c| c.map(|c| (&*c).clone())) - } - - fn storage_at(&self, address: &Address, position: &H256, state: StateOrBlock) -> Option { - match state { - StateOrBlock::State(s) => s.storage_at(address, position).ok(), - StateOrBlock::Block(id) => self.state_at(id).and_then(|s| s.storage_at(address, position).ok()) - } - } - - fn list_accounts(&self, id: BlockId, after: Option<&Address>, count: u64) -> Option> { - if !self.factories.trie.is_fat() { - trace!(target: "fatdb", "list_accounts: Not a fat DB"); - return None; - } - - let state = match self.state_at(id) { - Some(state) => state, - _ => return None, - }; - - let (root, db) = state.drop(); - let db = &db.as_hash_db(); - let trie = match self.factories.trie.readonly(db, &root) { - Ok(trie) => trie, - _ => { - trace!(target: "fatdb", "list_accounts: Couldn't open the DB"); - return None; - } - }; - - let mut iter = match trie.iter() { - Ok(iter) => iter, - _ => return None, - }; - - if let Some(after) = after { - if let Err(e) = iter.seek(after) { - trace!(target: "fatdb", "list_accounts: Couldn't seek the DB: {:?}", e); - } else { - // Position the iterator after the `after` element - iter.next(); - } - } - - let accounts = iter.filter_map(|item| { - item.ok().map(|(addr, _)| Address::from_slice(&addr)) - }).take(count as usize).collect(); - - Some(accounts) - } - - fn list_storage(&self, id: BlockId, account: &Address, after: Option<&H256>, count: u64) -> Option> { - if !self.factories.trie.is_fat() { - trace!(target: "fatdb", "list_storage: Not a fat DB"); - return None; - } - - let state = match self.state_at(id) { - Some(state) => state, - _ => return None, - }; - - let root = match state.storage_root(account) { - Ok(Some(root)) => root, - _ => return None, - }; - - let (_, db) = state.drop(); - let account_db = &self.factories.accountdb.readonly(db.as_hash_db(), keccak(account)); - let account_db = &account_db.as_hash_db(); - let trie = match self.factories.trie.readonly(account_db, &root) { - Ok(trie) => trie, - _ => { - trace!(target: "fatdb", "list_storage: Couldn't open the DB"); - return None; - } - }; - - let mut iter = match trie.iter() { - Ok(iter) => iter, - _ => return None, - }; - - if let Some(after) = after { - if let Err(e) = iter.seek(after) { - trace!(target: "fatdb", "list_storage: Couldn't seek the DB: {:?}", e); - } else { - // Position the iterator after the `after` element - iter.next(); - } - } - - let keys = iter.filter_map(|item| { - item.ok().map(|(key, _)| H256::from_slice(&key)) - }).take(count as usize).collect(); - - Some(keys) - } - - fn transaction(&self, id: TransactionId) -> Option { - self.transaction_address(id).and_then(|address| self.chain.read().transaction(&address)) - } - - fn uncle(&self, id: UncleId) -> Option { - let index = id.position; - self.block_body(id.block).and_then(|body| body.view().uncle_rlp_at(index)) - .map(encoded::Header::new) - } - - fn transaction_receipt(&self, id: TransactionId) -> Option { - // NOTE Don't use block_receipts here for performance reasons - let address = self.transaction_address(id)?; - let hash = address.block_hash; - let chain = self.chain.read(); - let number = chain.block_number(&hash)?; - let body = chain.block_body(&hash)?; - let mut receipts = chain.block_receipts(&hash)?.receipts; - receipts.truncate(address.index + 1); - - let transaction = body.view().localized_transaction_at(&hash, number, address.index)?; - let receipt = receipts.pop()?; - let gas_used = receipts.last().map_or_else(|| 0.into(), |r| r.gas_used); - let no_of_logs = receipts.into_iter().map(|receipt| receipt.logs.len()).sum::(); - - let receipt = transaction_receipt(self.engine().machine(), transaction, receipt, gas_used, no_of_logs); - Some(receipt) - } - - fn localized_block_receipts(&self, id: BlockId) -> Option> { - let hash = self.block_hash(id)?; - - let chain = self.chain.read(); - let receipts = chain.block_receipts(&hash)?; - let number = chain.block_number(&hash)?; - let body = chain.block_body(&hash)?; - let engine = self.engine.clone(); - - let mut gas_used = 0.into(); - let mut no_of_logs = 0; - - Some(body - .view() - .localized_transactions(&hash, number) - .into_iter() - .zip(receipts.receipts) - .map(move |(transaction, receipt)| { - let result = transaction_receipt(engine.machine(), transaction, receipt, gas_used, no_of_logs); - gas_used = result.cumulative_gas_used; - no_of_logs += result.logs.len(); - result - }) - .collect() - ) - } - - fn tree_route(&self, from: &H256, to: &H256) -> Option { - let chain = self.chain.read(); - match chain.is_known(from) && chain.is_known(to) { - true => chain.tree_route(from.clone(), to.clone()), - false => None - } - } - - fn find_uncles(&self, hash: &H256) -> Option> { - self.chain.read().find_uncle_hashes(hash, MAX_UNCLE_AGE) - } - - fn state_data(&self, hash: &H256) -> Option { - self.state_db.read().journal_db().state(hash) - } - - fn block_receipts(&self, hash: &H256) -> Option { - self.chain.read().block_receipts(hash) - } - - fn queue_info(&self) -> BlockQueueInfo { - self.importer.block_queue.queue_info() - } - - fn is_queue_empty(&self) -> bool { - self.importer.block_queue.is_empty() - } - - fn clear_queue(&self) { - self.importer.block_queue.clear(); - } - - fn additional_params(&self) -> BTreeMap { - self.engine.additional_params().into_iter().collect() - } - - fn logs(&self, filter: Filter) -> Result, BlockId> { - let chain = self.chain.read(); - - // First, check whether `filter.from_block` and `filter.to_block` is on the canon chain. If so, we can use the - // optimized version. - let is_canon = |id| { - match id { - // If it is referred by number, then it is always on the canon chain. - &BlockId::Earliest | &BlockId::Latest | &BlockId::Number(_) => true, - // If it is referred by hash, we see whether a hash -> number -> hash conversion gives us the same - // result. - &BlockId::Hash(ref hash) => chain.is_canon(hash), - } - }; - - let blocks = if is_canon(&filter.from_block) && is_canon(&filter.to_block) { - // If we are on the canon chain, use bloom filter to fetch required hashes. - // - // If we are sure the block does not exist (where val > best_block_number), then return error. Note that we - // don't need to care about pending blocks here because RPC query sets pending back to latest (or handled - // pending logs themselves). - let from = match self.block_number_ref(&filter.from_block) { - Some(val) if val <= chain.best_block_number() => val, - _ => return Err(filter.from_block.clone()), - }; - let to = match self.block_number_ref(&filter.to_block) { - Some(val) if val <= chain.best_block_number() => val, - _ => return Err(filter.to_block.clone()), - }; - - // If from is greater than to, then the current bloom filter behavior is to just return empty - // result. There's no point to continue here. - if from > to { - return Err(filter.to_block.clone()); - } - - chain.blocks_with_bloom(&filter.bloom_possibilities(), from, to) - .into_iter() - .filter_map(|n| chain.block_hash(n)) - .collect::>() - } else { - // Otherwise, we use a slower version that finds a link between from_block and to_block. - let from_hash = match Self::block_hash(&chain, filter.from_block) { - Some(val) => val, - None => return Err(filter.from_block.clone()), - }; - let from_number = match chain.block_number(&from_hash) { - Some(val) => val, - None => return Err(BlockId::Hash(from_hash)), - }; - let to_hash = match Self::block_hash(&chain, filter.to_block) { - Some(val) => val, - None => return Err(filter.to_block.clone()), - }; - - let blooms = filter.bloom_possibilities(); - let bloom_match = |header: &encoded::Header| { - blooms.iter().any(|bloom| header.log_bloom().contains_bloom(bloom)) - }; - - let (blocks, last_hash) = { - let mut blocks = Vec::new(); - let mut current_hash = to_hash; - - loop { - let header = match chain.block_header_data(¤t_hash) { - Some(val) => val, - None => return Err(BlockId::Hash(current_hash)), - }; - if bloom_match(&header) { - blocks.push(current_hash); - } - - // Stop if `from` block is reached. - if header.number() <= from_number { - break; - } - current_hash = header.parent_hash(); - } - - blocks.reverse(); - (blocks, current_hash) - }; - - // Check if we've actually reached the expected `from` block. - if last_hash != from_hash || blocks.is_empty() { - // In this case, from_hash is the cause (for not matching last_hash). - return Err(BlockId::Hash(from_hash)); - } - - blocks - }; - - Ok(chain.logs(blocks, |entry| filter.matches(entry), filter.limit)) - } - - fn filter_traces(&self, filter: TraceFilter) -> Option> { - if !self.tracedb.read().tracing_enabled() { - return None; - } - - let start = self.block_number(filter.range.start)?; - let end = self.block_number(filter.range.end)?; - - let db_filter = trace::Filter { - range: start as usize..end as usize, - from_address: filter.from_address.into(), - to_address: filter.to_address.into(), - }; - - let traces = self.tracedb.read() - .filter(&db_filter) - .into_iter() - .skip(filter.after.unwrap_or(0)) - .take(filter.count.unwrap_or(usize::max_value())) - .collect(); - Some(traces) - } - - fn trace(&self, trace: TraceId) -> Option { - if !self.tracedb.read().tracing_enabled() { - return None; - } - - let trace_address = trace.address; - self.transaction_address(trace.transaction) - .and_then(|tx_address| { - self.block_number(BlockId::Hash(tx_address.block_hash)) - .and_then(|number| self.tracedb.read().trace(number, tx_address.index, trace_address)) - }) - } - - fn transaction_traces(&self, transaction: TransactionId) -> Option> { - if !self.tracedb.read().tracing_enabled() { - return None; - } - - self.transaction_address(transaction) - .and_then(|tx_address| { - self.block_number(BlockId::Hash(tx_address.block_hash)) - .and_then(|number| self.tracedb.read().transaction_traces(number, tx_address.index)) - }) - } - - fn block_traces(&self, block: BlockId) -> Option> { - if !self.tracedb.read().tracing_enabled() { - return None; - } - - self.block_number(block) - .and_then(|number| self.tracedb.read().block_traces(number)) - } - - fn last_hashes(&self) -> LastHashes { - (*self.build_last_hashes(&self.chain.read().best_block_hash())).clone() - } - - fn transactions_to_propagate(&self) -> Vec> { - const PROPAGATE_FOR_BLOCKS: u32 = 4; - const MIN_TX_TO_PROPAGATE: usize = 256; - - let block_gas_limit = *self.best_block_header().gas_limit(); - let min_tx_gas: U256 = self.latest_schedule().tx_gas.into(); - - let max_len = if min_tx_gas.is_zero() { - usize::max_value() - } else { - cmp::max( - MIN_TX_TO_PROPAGATE, - cmp::min( - (block_gas_limit / min_tx_gas) * PROPAGATE_FOR_BLOCKS, - // never more than usize - usize::max_value().into() - ).as_u64() as usize - ) - }; - self.importer.miner.ready_transactions(self, max_len, ::miner::PendingOrdering::Priority) - } - - fn signing_chain_id(&self) -> Option { - self.engine.signing_chain_id(&self.latest_env_info()) - } - - fn block_extra_info(&self, id: BlockId) -> Option> { - self.block_header_decoded(id) - .map(|header| self.engine.extra_info(&header)) - } - - fn uncle_extra_info(&self, id: UncleId) -> Option> { - self.uncle(id) - .and_then(|h| { - h.decode().map(|dh| { - self.engine.extra_info(&dh) - }).ok() - }) - } - - fn pruning_info(&self) -> PruningInfo { - PruningInfo { - earliest_chain: self.chain.read().first_block_number().unwrap_or(1), - earliest_state: self.state_db.read().journal_db().earliest_era().unwrap_or(0), - } - } - - fn transact_contract(&self, address: Address, data: Bytes) -> Result<(), transaction::Error> { - let authoring_params = self.importer.miner.authoring_params(); - let service_transaction_checker = self.importer.miner.service_transaction_checker(); - let gas_price = if let Some(checker) = service_transaction_checker { - match checker.check_address(self, authoring_params.author) { - Ok(true) => U256::zero(), - _ => self.importer.miner.sensible_gas_price(), - } - } else { - self.importer.miner.sensible_gas_price() - }; - let transaction = transaction::Transaction { - nonce: self.latest_nonce(&authoring_params.author), - action: Action::Call(address), - gas: self.importer.miner.sensible_gas_limit(), - gas_price, - value: U256::zero(), - data: data, - }; - let chain_id = self.engine.signing_chain_id(&self.latest_env_info()); - let signature = self.engine.sign(transaction.hash(chain_id)) - .map_err(|e| transaction::Error::InvalidSignature(e.to_string()))?; - let signed = SignedTransaction::new(transaction.with_signature(signature, chain_id))?; - self.importer.miner.import_own_transaction(self, signed.into()) - } - - fn registrar_address(&self) -> Option
{ - self.registrar_address.clone() - } + fn replay(&self, id: TransactionId, analytics: CallAnalytics) -> Result { + let address = self + .transaction_address(id) + .ok_or(CallError::TransactionNotFound)?; + let block = BlockId::Hash(address.block_hash); + + const PROOF: &'static str = + "The transaction address contains a valid index within block; qed"; + Ok(self + .replay_block_transactions(block, analytics)? + .nth(address.index) + .expect(PROOF) + .1) + } + + fn replay_block_transactions( + &self, + block: BlockId, + analytics: CallAnalytics, + ) -> Result>, CallError> { + let mut env_info = self.env_info(block).ok_or(CallError::StatePruned)?; + let body = self.block_body(block).ok_or(CallError::StatePruned)?; + let mut state = self + .state_at_beginning(block) + .ok_or(CallError::StatePruned)?; + let txs = body.transactions(); + let engine = self.engine.clone(); + + const PROOF: &'static str = + "Transactions fetched from blockchain; blockchain transactions are valid; qed"; + const EXECUTE_PROOF: &'static str = "Transaction replayed; qed"; + + Ok(Box::new(txs.into_iter().map(move |t| { + let transaction_hash = t.hash(); + let t = SignedTransaction::new(t).expect(PROOF); + let machine = engine.machine(); + let x = Self::do_virtual_call(machine, &env_info, &mut state, &t, analytics) + .expect(EXECUTE_PROOF); + env_info.gas_used = env_info.gas_used + x.gas_used; + (transaction_hash, x) + }))) + } + + fn mode(&self) -> Mode { + let r = self.mode.lock().clone().into(); + trace!(target: "mode", "Asked for mode = {:?}. returning {:?}", &*self.mode.lock(), r); + r + } + + fn disable(&self) { + self.set_mode(Mode::Off); + self.enabled.store(false, AtomicOrdering::Relaxed); + self.clear_queue(); + } + + fn set_mode(&self, new_mode: Mode) { + trace!(target: "mode", "Client::set_mode({:?})", new_mode); + if !self.enabled.load(AtomicOrdering::Relaxed) { + return; + } + { + let mut mode = self.mode.lock(); + *mode = new_mode.clone().into(); + trace!(target: "mode", "Mode now {:?}", &*mode); + if let Some(ref mut f) = *self.on_user_defaults_change.lock() { + trace!(target: "mode", "Making callback..."); + f(Some((&*mode).clone())) + } + } + match new_mode { + Mode::Active => self.wake_up(), + Mode::Off => self.sleep(true), + _ => { + (*self.sleep_state.lock()).last_activity = Some(Instant::now()); + } + } + } + + fn spec_name(&self) -> String { + self.config.spec_name.clone() + } + + fn set_spec_name(&self, new_spec_name: String) -> Result<(), ()> { + trace!(target: "mode", "Client::set_spec_name({:?})", new_spec_name); + if !self.enabled.load(AtomicOrdering::Relaxed) { + return Err(()); + } + if let Some(ref h) = *self.exit_handler.lock() { + (*h)(new_spec_name); + Ok(()) + } else { + warn!("Not hypervised; cannot change chain."); + Err(()) + } + } + + fn block_number(&self, id: BlockId) -> Option { + self.block_number_ref(&id) + } + + fn block_body(&self, id: BlockId) -> Option { + let chain = self.chain.read(); + + Self::block_hash(&chain, id).and_then(|hash| chain.block_body(&hash)) + } + + fn block_status(&self, id: BlockId) -> BlockStatus { + let chain = self.chain.read(); + match Self::block_hash(&chain, id) { + Some(ref hash) if chain.is_known(hash) => BlockStatus::InChain, + Some(hash) => self.importer.block_queue.status(&hash).into(), + None => BlockStatus::Unknown, + } + } + + fn block_total_difficulty(&self, id: BlockId) -> Option { + let chain = self.chain.read(); + + Self::block_hash(&chain, id) + .and_then(|hash| chain.block_details(&hash)) + .map(|d| d.total_difficulty) + } + + fn storage_root(&self, address: &Address, id: BlockId) -> Option { + self.state_at(id) + .and_then(|s| s.storage_root(address).ok()) + .and_then(|x| x) + } + + fn block_hash(&self, id: BlockId) -> Option { + let chain = self.chain.read(); + Self::block_hash(&chain, id) + } + + fn code(&self, address: &Address, state: StateOrBlock) -> Option> { + let result = match state { + StateOrBlock::State(s) => s.code(address).ok(), + StateOrBlock::Block(id) => self.state_at(id).and_then(|s| s.code(address).ok()), + }; + + // Converting from `Option>>` to `Option>` + result.map(|c| c.map(|c| (&*c).clone())) + } + + fn storage_at(&self, address: &Address, position: &H256, state: StateOrBlock) -> Option { + match state { + StateOrBlock::State(s) => s.storage_at(address, position).ok(), + StateOrBlock::Block(id) => self + .state_at(id) + .and_then(|s| s.storage_at(address, position).ok()), + } + } + + fn list_accounts( + &self, + id: BlockId, + after: Option<&Address>, + count: u64, + ) -> Option> { + if !self.factories.trie.is_fat() { + trace!(target: "fatdb", "list_accounts: Not a fat DB"); + return None; + } + + let state = match self.state_at(id) { + Some(state) => state, + _ => return None, + }; + + let (root, db) = state.drop(); + let db = &db.as_hash_db(); + let trie = match self.factories.trie.readonly(db, &root) { + Ok(trie) => trie, + _ => { + trace!(target: "fatdb", "list_accounts: Couldn't open the DB"); + return None; + } + }; + + let mut iter = match trie.iter() { + Ok(iter) => iter, + _ => return None, + }; + + if let Some(after) = after { + if let Err(e) = iter.seek(after) { + trace!(target: "fatdb", "list_accounts: Couldn't seek the DB: {:?}", e); + } else { + // Position the iterator after the `after` element + iter.next(); + } + } + + let accounts = iter + .filter_map(|item| item.ok().map(|(addr, _)| Address::from_slice(&addr))) + .take(count as usize) + .collect(); + + Some(accounts) + } + + fn list_storage( + &self, + id: BlockId, + account: &Address, + after: Option<&H256>, + count: u64, + ) -> Option> { + if !self.factories.trie.is_fat() { + trace!(target: "fatdb", "list_storage: Not a fat DB"); + return None; + } + + let state = match self.state_at(id) { + Some(state) => state, + _ => return None, + }; + + let root = match state.storage_root(account) { + Ok(Some(root)) => root, + _ => return None, + }; + + let (_, db) = state.drop(); + let account_db = &self + .factories + .accountdb + .readonly(db.as_hash_db(), keccak(account)); + let account_db = &account_db.as_hash_db(); + let trie = match self.factories.trie.readonly(account_db, &root) { + Ok(trie) => trie, + _ => { + trace!(target: "fatdb", "list_storage: Couldn't open the DB"); + return None; + } + }; + + let mut iter = match trie.iter() { + Ok(iter) => iter, + _ => return None, + }; + + if let Some(after) = after { + if let Err(e) = iter.seek(after) { + trace!(target: "fatdb", "list_storage: Couldn't seek the DB: {:?}", e); + } else { + // Position the iterator after the `after` element + iter.next(); + } + } + + let keys = iter + .filter_map(|item| item.ok().map(|(key, _)| H256::from_slice(&key))) + .take(count as usize) + .collect(); + + Some(keys) + } + + fn transaction(&self, id: TransactionId) -> Option { + self.transaction_address(id) + .and_then(|address| self.chain.read().transaction(&address)) + } + + fn uncle(&self, id: UncleId) -> Option { + let index = id.position; + self.block_body(id.block) + .and_then(|body| body.view().uncle_rlp_at(index)) + .map(encoded::Header::new) + } + + fn transaction_receipt(&self, id: TransactionId) -> Option { + // NOTE Don't use block_receipts here for performance reasons + let address = self.transaction_address(id)?; + let hash = address.block_hash; + let chain = self.chain.read(); + let number = chain.block_number(&hash)?; + let body = chain.block_body(&hash)?; + let mut receipts = chain.block_receipts(&hash)?.receipts; + receipts.truncate(address.index + 1); + + let transaction = body + .view() + .localized_transaction_at(&hash, number, address.index)?; + let receipt = receipts.pop()?; + let gas_used = receipts.last().map_or_else(|| 0.into(), |r| r.gas_used); + let no_of_logs = receipts + .into_iter() + .map(|receipt| receipt.logs.len()) + .sum::(); + + let receipt = transaction_receipt( + self.engine().machine(), + transaction, + receipt, + gas_used, + no_of_logs, + ); + Some(receipt) + } + + fn localized_block_receipts(&self, id: BlockId) -> Option> { + let hash = self.block_hash(id)?; + + let chain = self.chain.read(); + let receipts = chain.block_receipts(&hash)?; + let number = chain.block_number(&hash)?; + let body = chain.block_body(&hash)?; + let engine = self.engine.clone(); + + let mut gas_used = 0.into(); + let mut no_of_logs = 0; + + Some( + body.view() + .localized_transactions(&hash, number) + .into_iter() + .zip(receipts.receipts) + .map(move |(transaction, receipt)| { + let result = transaction_receipt( + engine.machine(), + transaction, + receipt, + gas_used, + no_of_logs, + ); + gas_used = result.cumulative_gas_used; + no_of_logs += result.logs.len(); + result + }) + .collect(), + ) + } + + fn tree_route(&self, from: &H256, to: &H256) -> Option { + let chain = self.chain.read(); + match chain.is_known(from) && chain.is_known(to) { + true => chain.tree_route(from.clone(), to.clone()), + false => None, + } + } + + fn find_uncles(&self, hash: &H256) -> Option> { + self.chain.read().find_uncle_hashes(hash, MAX_UNCLE_AGE) + } + + fn state_data(&self, hash: &H256) -> Option { + self.state_db.read().journal_db().state(hash) + } + + fn block_receipts(&self, hash: &H256) -> Option { + self.chain.read().block_receipts(hash) + } + + fn queue_info(&self) -> BlockQueueInfo { + self.importer.block_queue.queue_info() + } + + fn is_queue_empty(&self) -> bool { + self.importer.block_queue.is_empty() + } + + fn clear_queue(&self) { + self.importer.block_queue.clear(); + } + + fn additional_params(&self) -> BTreeMap { + self.engine.additional_params().into_iter().collect() + } + + fn logs(&self, filter: Filter) -> Result, BlockId> { + let chain = self.chain.read(); + + // First, check whether `filter.from_block` and `filter.to_block` is on the canon chain. If so, we can use the + // optimized version. + let is_canon = |id| { + match id { + // If it is referred by number, then it is always on the canon chain. + &BlockId::Earliest | &BlockId::Latest | &BlockId::Number(_) => true, + // If it is referred by hash, we see whether a hash -> number -> hash conversion gives us the same + // result. + &BlockId::Hash(ref hash) => chain.is_canon(hash), + } + }; + + let blocks = if is_canon(&filter.from_block) && is_canon(&filter.to_block) { + // If we are on the canon chain, use bloom filter to fetch required hashes. + // + // If we are sure the block does not exist (where val > best_block_number), then return error. Note that we + // don't need to care about pending blocks here because RPC query sets pending back to latest (or handled + // pending logs themselves). + let from = match self.block_number_ref(&filter.from_block) { + Some(val) if val <= chain.best_block_number() => val, + _ => return Err(filter.from_block.clone()), + }; + let to = match self.block_number_ref(&filter.to_block) { + Some(val) if val <= chain.best_block_number() => val, + _ => return Err(filter.to_block.clone()), + }; + + // If from is greater than to, then the current bloom filter behavior is to just return empty + // result. There's no point to continue here. + if from > to { + return Err(filter.to_block.clone()); + } + + chain + .blocks_with_bloom(&filter.bloom_possibilities(), from, to) + .into_iter() + .filter_map(|n| chain.block_hash(n)) + .collect::>() + } else { + // Otherwise, we use a slower version that finds a link between from_block and to_block. + let from_hash = match Self::block_hash(&chain, filter.from_block) { + Some(val) => val, + None => return Err(filter.from_block.clone()), + }; + let from_number = match chain.block_number(&from_hash) { + Some(val) => val, + None => return Err(BlockId::Hash(from_hash)), + }; + let to_hash = match Self::block_hash(&chain, filter.to_block) { + Some(val) => val, + None => return Err(filter.to_block.clone()), + }; + + let blooms = filter.bloom_possibilities(); + let bloom_match = |header: &encoded::Header| { + blooms + .iter() + .any(|bloom| header.log_bloom().contains_bloom(bloom)) + }; + + let (blocks, last_hash) = { + let mut blocks = Vec::new(); + let mut current_hash = to_hash; + + loop { + let header = match chain.block_header_data(¤t_hash) { + Some(val) => val, + None => return Err(BlockId::Hash(current_hash)), + }; + if bloom_match(&header) { + blocks.push(current_hash); + } + + // Stop if `from` block is reached. + if header.number() <= from_number { + break; + } + current_hash = header.parent_hash(); + } + + blocks.reverse(); + (blocks, current_hash) + }; + + // Check if we've actually reached the expected `from` block. + if last_hash != from_hash || blocks.is_empty() { + // In this case, from_hash is the cause (for not matching last_hash). + return Err(BlockId::Hash(from_hash)); + } + + blocks + }; + + Ok(chain.logs(blocks, |entry| filter.matches(entry), filter.limit)) + } + + fn filter_traces(&self, filter: TraceFilter) -> Option> { + if !self.tracedb.read().tracing_enabled() { + return None; + } + + let start = self.block_number(filter.range.start)?; + let end = self.block_number(filter.range.end)?; + + let db_filter = trace::Filter { + range: start as usize..end as usize, + from_address: filter.from_address.into(), + to_address: filter.to_address.into(), + }; + + let traces = self + .tracedb + .read() + .filter(&db_filter) + .into_iter() + .skip(filter.after.unwrap_or(0)) + .take(filter.count.unwrap_or(usize::max_value())) + .collect(); + Some(traces) + } + + fn trace(&self, trace: TraceId) -> Option { + if !self.tracedb.read().tracing_enabled() { + return None; + } + + let trace_address = trace.address; + self.transaction_address(trace.transaction) + .and_then(|tx_address| { + self.block_number(BlockId::Hash(tx_address.block_hash)) + .and_then(|number| { + self.tracedb + .read() + .trace(number, tx_address.index, trace_address) + }) + }) + } + + fn transaction_traces(&self, transaction: TransactionId) -> Option> { + if !self.tracedb.read().tracing_enabled() { + return None; + } + + self.transaction_address(transaction) + .and_then(|tx_address| { + self.block_number(BlockId::Hash(tx_address.block_hash)) + .and_then(|number| { + self.tracedb + .read() + .transaction_traces(number, tx_address.index) + }) + }) + } + + fn block_traces(&self, block: BlockId) -> Option> { + if !self.tracedb.read().tracing_enabled() { + return None; + } + + self.block_number(block) + .and_then(|number| self.tracedb.read().block_traces(number)) + } + + fn last_hashes(&self) -> LastHashes { + (*self.build_last_hashes(&self.chain.read().best_block_hash())).clone() + } + + fn transactions_to_propagate(&self) -> Vec> { + const PROPAGATE_FOR_BLOCKS: u32 = 4; + const MIN_TX_TO_PROPAGATE: usize = 256; + + let block_gas_limit = *self.best_block_header().gas_limit(); + let min_tx_gas: U256 = self.latest_schedule().tx_gas.into(); + + let max_len = if min_tx_gas.is_zero() { + usize::max_value() + } else { + cmp::max( + MIN_TX_TO_PROPAGATE, + cmp::min( + (block_gas_limit / min_tx_gas) * PROPAGATE_FOR_BLOCKS, + // never more than usize + usize::max_value().into(), + ) + .as_u64() as usize, + ) + }; + self.importer + .miner + .ready_transactions(self, max_len, ::miner::PendingOrdering::Priority) + } + + fn signing_chain_id(&self) -> Option { + self.engine.signing_chain_id(&self.latest_env_info()) + } + + fn block_extra_info(&self, id: BlockId) -> Option> { + self.block_header_decoded(id) + .map(|header| self.engine.extra_info(&header)) + } + + fn uncle_extra_info(&self, id: UncleId) -> Option> { + self.uncle(id) + .and_then(|h| h.decode().map(|dh| self.engine.extra_info(&dh)).ok()) + } + + fn pruning_info(&self) -> PruningInfo { + PruningInfo { + earliest_chain: self.chain.read().first_block_number().unwrap_or(1), + earliest_state: self + .state_db + .read() + .journal_db() + .earliest_era() + .unwrap_or(0), + } + } + + fn transact_contract(&self, address: Address, data: Bytes) -> Result<(), transaction::Error> { + let authoring_params = self.importer.miner.authoring_params(); + let service_transaction_checker = self.importer.miner.service_transaction_checker(); + let gas_price = if let Some(checker) = service_transaction_checker { + match checker.check_address(self, authoring_params.author) { + Ok(true) => U256::zero(), + _ => self.importer.miner.sensible_gas_price(), + } + } else { + self.importer.miner.sensible_gas_price() + }; + let transaction = transaction::Transaction { + nonce: self.latest_nonce(&authoring_params.author), + action: Action::Call(address), + gas: self.importer.miner.sensible_gas_limit(), + gas_price, + value: U256::zero(), + data: data, + }; + let chain_id = self.engine.signing_chain_id(&self.latest_env_info()); + let signature = self + .engine + .sign(transaction.hash(chain_id)) + .map_err(|e| transaction::Error::InvalidSignature(e.to_string()))?; + let signed = SignedTransaction::new(transaction.with_signature(signature, chain_id))?; + self.importer + .miner + .import_own_transaction(self, signed.into()) + } + + fn registrar_address(&self) -> Option
{ + self.registrar_address.clone() + } } impl IoClient for Client { - fn queue_transactions(&self, transactions: Vec, peer_id: usize) { - trace_time!("queue_transactions"); - let len = transactions.len(); - self.queue_transactions.queue(&self.io_channel.read(), len, move |client| { - trace_time!("import_queued_transactions"); + fn queue_transactions(&self, transactions: Vec, peer_id: usize) { + trace_time!("queue_transactions"); + let len = transactions.len(); + self.queue_transactions + .queue(&self.io_channel.read(), len, move |client| { + trace_time!("import_queued_transactions"); - let txs: Vec = transactions - .iter() - .filter_map(|bytes| client.engine.decode_transaction(bytes).ok()) - .collect(); + let txs: Vec = transactions + .iter() + .filter_map(|bytes| client.engine.decode_transaction(bytes).ok()) + .collect(); - client.notify(|notify| { - notify.transactions_received(&txs, peer_id); - }); + client.notify(|notify| { + notify.transactions_received(&txs, peer_id); + }); - client.importer.miner.import_external_transactions(client, txs); - }).unwrap_or_else(|e| { - debug!(target: "client", "Ignoring {} transactions: {}", len, e); - }); - } + client + .importer + .miner + .import_external_transactions(client, txs); + }) + .unwrap_or_else(|e| { + debug!(target: "client", "Ignoring {} transactions: {}", len, e); + }); + } - fn queue_ancient_block(&self, unverified: Unverified, receipts_bytes: Bytes) -> EthcoreResult { - trace_time!("queue_ancient_block"); + fn queue_ancient_block( + &self, + unverified: Unverified, + receipts_bytes: Bytes, + ) -> EthcoreResult { + trace_time!("queue_ancient_block"); - let hash = unverified.hash(); - { - // check block order - if self.chain.read().is_known(&hash) { - bail!(EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain)); - } - let parent_hash = unverified.parent_hash(); - // NOTE To prevent race condition with import, make sure to check queued blocks first - // (and attempt to acquire lock) - let is_parent_pending = self.queued_ancient_blocks.read().0.contains(&parent_hash); - if !is_parent_pending && !self.chain.read().is_known(&parent_hash) { - bail!(EthcoreErrorKind::Block(BlockError::UnknownParent(parent_hash))); - } - } + let hash = unverified.hash(); + { + // check block order + if self.chain.read().is_known(&hash) { + bail!(EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain)); + } + let parent_hash = unverified.parent_hash(); + // NOTE To prevent race condition with import, make sure to check queued blocks first + // (and attempt to acquire lock) + let is_parent_pending = self.queued_ancient_blocks.read().0.contains(&parent_hash); + if !is_parent_pending && !self.chain.read().is_known(&parent_hash) { + bail!(EthcoreErrorKind::Block(BlockError::UnknownParent( + parent_hash + ))); + } + } - // we queue blocks here and trigger an IO message. - { - let mut queued = self.queued_ancient_blocks.write(); - queued.0.insert(hash); - queued.1.push_back((unverified, receipts_bytes)); - } + // we queue blocks here and trigger an IO message. + { + let mut queued = self.queued_ancient_blocks.write(); + queued.0.insert(hash); + queued.1.push_back((unverified, receipts_bytes)); + } - let queued = self.queued_ancient_blocks.clone(); - let lock = self.ancient_blocks_import_lock.clone(); - self.queue_ancient_blocks.queue(&self.io_channel.read(), 1, move |client| { - trace_time!("import_ancient_block"); - // Make sure to hold the lock here to prevent importing out of order. - // We use separate lock, cause we don't want to block queueing. - let _lock = lock.lock(); - for _i in 0..MAX_ANCIENT_BLOCKS_TO_IMPORT { - let first = queued.write().1.pop_front(); - if let Some((unverified, receipts_bytes)) = first { - let hash = unverified.hash(); - let result = client.importer.import_old_block( - unverified, - &receipts_bytes, - &**client.db.read().key_value(), - &*client.chain.read(), - ); - if let Err(e) = result { - error!(target: "client", "Error importing ancient block: {}", e); + let queued = self.queued_ancient_blocks.clone(); + let lock = self.ancient_blocks_import_lock.clone(); + self.queue_ancient_blocks + .queue(&self.io_channel.read(), 1, move |client| { + trace_time!("import_ancient_block"); + // Make sure to hold the lock here to prevent importing out of order. + // We use separate lock, cause we don't want to block queueing. + let _lock = lock.lock(); + for _i in 0..MAX_ANCIENT_BLOCKS_TO_IMPORT { + let first = queued.write().1.pop_front(); + if let Some((unverified, receipts_bytes)) = first { + let hash = unverified.hash(); + let result = client.importer.import_old_block( + unverified, + &receipts_bytes, + &**client.db.read().key_value(), + &*client.chain.read(), + ); + if let Err(e) = result { + error!(target: "client", "Error importing ancient block: {}", e); - let mut queued = queued.write(); - queued.0.clear(); - queued.1.clear(); - } - // remove from pending - queued.write().0.remove(&hash); - } else { - break; - } - } - })?; + let mut queued = queued.write(); + queued.0.clear(); + queued.1.clear(); + } + // remove from pending + queued.write().0.remove(&hash); + } else { + break; + } + } + })?; - Ok(hash) - } + Ok(hash) + } - fn queue_consensus_message(&self, message: Bytes) { - match self.queue_consensus_message.queue(&self.io_channel.read(), 1, move |client| { - if let Err(e) = client.engine().handle_message(&message) { - debug!(target: "poa", "Invalid message received: {}", e); - } - }) { - Ok(_) => (), - Err(e) => { - debug!(target: "poa", "Ignoring the message, error queueing: {}", e); - } - } - } + fn queue_consensus_message(&self, message: Bytes) { + match self + .queue_consensus_message + .queue(&self.io_channel.read(), 1, move |client| { + if let Err(e) = client.engine().handle_message(&message) { + debug!(target: "poa", "Invalid message received: {}", e); + } + }) { + Ok(_) => (), + Err(e) => { + debug!(target: "poa", "Ignoring the message, error queueing: {}", e); + } + } + } } impl ReopenBlock for Client { - fn reopen_block(&self, block: ClosedBlock) -> OpenBlock { - let engine = &*self.engine; - let mut block = block.reopen(engine); - let max_uncles = engine.maximum_uncle_count(block.header.number()); - if block.uncles.len() < max_uncles { - let chain = self.chain.read(); - let h = chain.best_block_hash(); - // Add new uncles - let uncles = chain - .find_uncle_hashes(&h, MAX_UNCLE_AGE) - .unwrap_or_else(Vec::new); + fn reopen_block(&self, block: ClosedBlock) -> OpenBlock { + let engine = &*self.engine; + let mut block = block.reopen(engine); + let max_uncles = engine.maximum_uncle_count(block.header.number()); + if block.uncles.len() < max_uncles { + let chain = self.chain.read(); + let h = chain.best_block_hash(); + // Add new uncles + let uncles = chain + .find_uncle_hashes(&h, MAX_UNCLE_AGE) + .unwrap_or_else(Vec::new); - for h in uncles { - if !block.uncles.iter().any(|header| header.hash() == h) { - let uncle = chain.block_header_data(&h).expect("find_uncle_hashes only returns hashes for existing headers; qed"); - let uncle = uncle.decode().expect("decoding failure"); - block.push_uncle(uncle).expect("pushing up to maximum_uncle_count; + for h in uncles { + if !block.uncles.iter().any(|header| header.hash() == h) { + let uncle = chain + .block_header_data(&h) + .expect("find_uncle_hashes only returns hashes for existing headers; qed"); + let uncle = uncle.decode().expect("decoding failure"); + block.push_uncle(uncle).expect( + "pushing up to maximum_uncle_count; push_uncle is not ok only if more than maximum_uncle_count is pushed; so all push_uncle are Ok; - qed"); - if block.uncles.len() >= max_uncles { break } - } - } - - } - block - } + qed", + ); + if block.uncles.len() >= max_uncles { + break; + } + } + } + } + block + } } impl PrepareOpenBlock for Client { - fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> Result { - let engine = &*self.engine; - let chain = self.chain.read(); - let best_header = chain.best_block_header(); - let h = best_header.hash(); + fn prepare_open_block( + &self, + author: Address, + gas_range_target: (U256, U256), + extra_data: Bytes, + ) -> Result { + let engine = &*self.engine; + let chain = self.chain.read(); + let best_header = chain.best_block_header(); + let h = best_header.hash(); - let is_epoch_begin = chain.epoch_transition(best_header.number(), h).is_some(); - let mut open_block = OpenBlock::new( - engine, - self.factories.clone(), - self.tracedb.read().tracing_enabled(), - self.state_db.read().boxed_clone_canon(&h), - &best_header, - self.build_last_hashes(&h), - author, - gas_range_target, - extra_data, - is_epoch_begin, - chain.ancestry_with_metadata_iter(best_header.hash()), - )?; + let is_epoch_begin = chain.epoch_transition(best_header.number(), h).is_some(); + let mut open_block = OpenBlock::new( + engine, + self.factories.clone(), + self.tracedb.read().tracing_enabled(), + self.state_db.read().boxed_clone_canon(&h), + &best_header, + self.build_last_hashes(&h), + author, + gas_range_target, + extra_data, + is_epoch_begin, + chain.ancestry_with_metadata_iter(best_header.hash()), + )?; - // Add uncles - chain - .find_uncle_headers(&h, MAX_UNCLE_AGE) - .unwrap_or_else(Vec::new) - .into_iter() - .take(engine.maximum_uncle_count(open_block.header.number())) - .foreach(|h| { - open_block.push_uncle(h.decode().expect("decoding failure")).expect("pushing maximum_uncle_count; + // Add uncles + chain + .find_uncle_headers(&h, MAX_UNCLE_AGE) + .unwrap_or_else(Vec::new) + .into_iter() + .take(engine.maximum_uncle_count(open_block.header.number())) + .foreach(|h| { + open_block + .push_uncle(h.decode().expect("decoding failure")) + .expect( + "pushing maximum_uncle_count; open_block was just created; push_uncle is not ok only if more than maximum_uncle_count is pushed; so all push_uncle are Ok; - qed"); - }); + qed", + ); + }); - Ok(open_block) - } + Ok(open_block) + } } impl BlockProducer for Client {} impl ScheduleInfo for Client { - fn latest_schedule(&self) -> Schedule { - self.engine.schedule(self.latest_env_info().number) - } + fn latest_schedule(&self) -> Schedule { + self.engine.schedule(self.latest_env_info().number) + } } impl ImportSealedBlock for Client { - fn import_sealed_block(&self, block: SealedBlock) -> EthcoreResult { - let start = Instant::now(); - let raw = block.rlp_bytes(); - let header = block.header.clone(); - let hash = header.hash(); - self.notify(|n| { - n.block_pre_import(&raw, &hash, header.difficulty()) - }); + fn import_sealed_block(&self, block: SealedBlock) -> EthcoreResult { + let start = Instant::now(); + let raw = block.rlp_bytes(); + let header = block.header.clone(); + let hash = header.hash(); + self.notify(|n| n.block_pre_import(&raw, &hash, header.difficulty())); - let route = { - // Do a super duper basic verification to detect potential bugs - if let Err(e) = self.engine.verify_block_basic(&header) { - self.importer.bad_blocks.report( - block.rlp_bytes(), - format!("Detected an issue with locally sealed block: {}", e), - ); - return Err(e.into()); - } + let route = { + // Do a super duper basic verification to detect potential bugs + if let Err(e) = self.engine.verify_block_basic(&header) { + self.importer.bad_blocks.report( + block.rlp_bytes(), + format!("Detected an issue with locally sealed block: {}", e), + ); + return Err(e.into()); + } - // scope for self.import_lock - let _import_lock = self.importer.import_lock.lock(); - trace_time!("import_sealed_block"); + // scope for self.import_lock + let _import_lock = self.importer.import_lock.lock(); + trace_time!("import_sealed_block"); - let block_data = block.rlp_bytes(); + let block_data = block.rlp_bytes(); - let pending = self.importer.check_epoch_end_signal( - &header, - &block_data, - &block.receipts, - block.state.db(), - self - )?; - let route = self.importer.commit_block( - block, - &header, - encoded::Block::new(block_data), - pending, - self - ); - trace!(target: "client", "Imported sealed block #{} ({})", header.number(), hash); - self.state_db.write().sync_cache(&route.enacted, &route.retracted, false); - route - }; - let route = ChainRoute::from([route].as_ref()); - self.importer.miner.chain_new_blocks( - self, - &[hash], - &[], - route.enacted(), - route.retracted(), - self.engine.seals_internally().is_some(), - ); - self.notify(|notify| { - notify.new_blocks( - NewBlocks::new( - vec![hash], - vec![], - route.clone(), - vec![hash], - vec![], - start.elapsed(), - false - ) - ); - }); - self.db.read().key_value().flush().expect("DB flush failed."); - Ok(hash) - } + let pending = self.importer.check_epoch_end_signal( + &header, + &block_data, + &block.receipts, + block.state.db(), + self, + )?; + let route = self.importer.commit_block( + block, + &header, + encoded::Block::new(block_data), + pending, + self, + ); + trace!(target: "client", "Imported sealed block #{} ({})", header.number(), hash); + self.state_db + .write() + .sync_cache(&route.enacted, &route.retracted, false); + route + }; + let route = ChainRoute::from([route].as_ref()); + self.importer.miner.chain_new_blocks( + self, + &[hash], + &[], + route.enacted(), + route.retracted(), + self.engine.seals_internally().is_some(), + ); + self.notify(|notify| { + notify.new_blocks(NewBlocks::new( + vec![hash], + vec![], + route.clone(), + vec![hash], + vec![], + start.elapsed(), + false, + )); + }); + self.db + .read() + .key_value() + .flush() + .expect("DB flush failed."); + Ok(hash) + } } impl BroadcastProposalBlock for Client { - fn broadcast_proposal_block(&self, block: SealedBlock) { - const DURATION_ZERO: Duration = Duration::from_millis(0); - self.notify(|notify| { - notify.new_blocks( - NewBlocks::new( - vec![], - vec![], - ChainRoute::default(), - vec![], - vec![block.rlp_bytes()], - DURATION_ZERO, - false - ) - ); - }); - } + fn broadcast_proposal_block(&self, block: SealedBlock) { + const DURATION_ZERO: Duration = Duration::from_millis(0); + self.notify(|notify| { + notify.new_blocks(NewBlocks::new( + vec![], + vec![], + ChainRoute::default(), + vec![], + vec![block.rlp_bytes()], + DURATION_ZERO, + false, + )); + }); + } } impl SealedBlockImporter for Client {} @@ -2514,474 +2872,525 @@ impl ::miner::TransactionVerifierClient for Client {} impl ::miner::BlockChainClient for Client {} impl super::traits::EngineClient for Client { - fn update_sealing(&self, force: ForceUpdateSealing) { - self.importer.miner.update_sealing(self, force) - } + fn update_sealing(&self, force: ForceUpdateSealing) { + self.importer.miner.update_sealing(self, force) + } - fn submit_seal(&self, block_hash: H256, seal: Vec) { - let import = self.importer.miner.submit_seal(block_hash, seal) - .and_then(|block| self.import_sealed_block(block)); - if let Err(err) = import { - warn!(target: "poa", "Wrong internal seal submission! {:?}", err); - } - } + fn submit_seal(&self, block_hash: H256, seal: Vec) { + let import = self + .importer + .miner + .submit_seal(block_hash, seal) + .and_then(|block| self.import_sealed_block(block)); + if let Err(err) = import { + warn!(target: "poa", "Wrong internal seal submission! {:?}", err); + } + } - fn broadcast_consensus_message(&self, message: Bytes) { - self.notify(|notify| { - notify.broadcast(ChainMessageType::Consensus(message.clone())) - }); - } + fn broadcast_consensus_message(&self, message: Bytes) { + self.notify(|notify| notify.broadcast(ChainMessageType::Consensus(message.clone()))); + } - fn epoch_transition_for(&self, parent_hash: H256) -> Option<::engines::EpochTransition> { - self.chain.read().epoch_transition_for(parent_hash) - } + fn epoch_transition_for(&self, parent_hash: H256) -> Option<::engines::EpochTransition> { + self.chain.read().epoch_transition_for(parent_hash) + } - fn as_full_client(&self) -> Option<&BlockChainClient> { Some(self) } + fn as_full_client(&self) -> Option<&BlockChainClient> { + Some(self) + } - fn block_number(&self, id: BlockId) -> Option { - BlockChainClient::block_number(self, id) - } + fn block_number(&self, id: BlockId) -> Option { + BlockChainClient::block_number(self, id) + } - fn block_header(&self, id: BlockId) -> Option { - BlockChainClient::block_header(self, id) - } + fn block_header(&self, id: BlockId) -> Option { + BlockChainClient::block_header(self, id) + } } impl ProvingBlockChainClient for Client { - fn prove_storage(&self, key1: H256, key2: H256, id: BlockId) -> Option<(Vec, H256)> { - self.state_at(id) - .and_then(move |state| state.prove_storage(key1, key2).ok()) - } + fn prove_storage(&self, key1: H256, key2: H256, id: BlockId) -> Option<(Vec, H256)> { + self.state_at(id) + .and_then(move |state| state.prove_storage(key1, key2).ok()) + } - fn prove_account(&self, key1: H256, id: BlockId) -> Option<(Vec, ::types::basic_account::BasicAccount)> { - self.state_at(id) - .and_then(move |state| state.prove_account(key1).ok()) - } + fn prove_account( + &self, + key1: H256, + id: BlockId, + ) -> Option<(Vec, ::types::basic_account::BasicAccount)> { + self.state_at(id) + .and_then(move |state| state.prove_account(key1).ok()) + } - fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option<(Bytes, Vec)> { - let (header, mut env_info) = match (self.block_header(id), self.env_info(id)) { - (Some(s), Some(e)) => (s, e), - _ => return None, - }; + fn prove_transaction( + &self, + transaction: SignedTransaction, + id: BlockId, + ) -> Option<(Bytes, Vec)> { + let (header, mut env_info) = match (self.block_header(id), self.env_info(id)) { + (Some(s), Some(e)) => (s, e), + _ => return None, + }; - env_info.gas_limit = transaction.gas.clone(); - let mut jdb = self.state_db.read().journal_db().boxed_clone(); + env_info.gas_limit = transaction.gas.clone(); + let mut jdb = self.state_db.read().journal_db().boxed_clone(); - state::prove_transaction_virtual( - jdb.as_hash_db_mut(), - header.state_root().clone(), - &transaction, - self.engine.machine(), - &env_info, - self.factories.clone(), - ) - } + state::prove_transaction_virtual( + jdb.as_hash_db_mut(), + header.state_root().clone(), + &transaction, + self.engine.machine(), + &env_info, + self.factories.clone(), + ) + } - fn epoch_signal(&self, hash: H256) -> Option> { - // pending transitions are never deleted, and do not contain - // finality proofs by definition. - self.chain.read().get_pending_transition(hash).map(|pending| pending.proof) - } + fn epoch_signal(&self, hash: H256) -> Option> { + // pending transitions are never deleted, and do not contain + // finality proofs by definition. + self.chain + .read() + .get_pending_transition(hash) + .map(|pending| pending.proof) + } } impl SnapshotClient for Client {} - impl ImportExportBlocks for Client { - fn export_blocks<'a>( - &self, - mut out: Box, - from: BlockId, - to: BlockId, - format: Option - ) -> Result<(), String> { - let from = self.block_number(from).ok_or("Starting block could not be found")?; - let to = self.block_number(to).ok_or("End block could not be found")?; - let format = format.unwrap_or_default(); + fn export_blocks<'a>( + &self, + mut out: Box, + from: BlockId, + to: BlockId, + format: Option, + ) -> Result<(), String> { + let from = self + .block_number(from) + .ok_or("Starting block could not be found")?; + let to = self + .block_number(to) + .ok_or("End block could not be found")?; + let format = format.unwrap_or_default(); - for i in from..=to { - if i % 10000 == 0 { - info!("#{}", i); - } - let b = self.block(BlockId::Number(i)) - .ok_or("Error exporting incomplete chain")? - .into_inner(); - match format { - DataFormat::Binary => { - out.write(&b) - .map_err(|e| { - format!("Couldn't write to stream. Cause: {}", e) - })?; - } - DataFormat::Hex => { - out.write_fmt(format_args!("{}\n", b.pretty())) - .map_err(|e| { - format!("Couldn't write to stream. Cause: {}", e) - })?; - } - } - } - Ok(()) - } + for i in from..=to { + if i % 10000 == 0 { + info!("#{}", i); + } + let b = self + .block(BlockId::Number(i)) + .ok_or("Error exporting incomplete chain")? + .into_inner(); + match format { + DataFormat::Binary => { + out.write(&b) + .map_err(|e| format!("Couldn't write to stream. Cause: {}", e))?; + } + DataFormat::Hex => { + out.write_fmt(format_args!("{}\n", b.pretty())) + .map_err(|e| format!("Couldn't write to stream. Cause: {}", e))?; + } + } + } + Ok(()) + } - fn import_blocks<'a>( - &self, - mut source: Box, - format: Option - ) -> Result<(), String> { - const READAHEAD_BYTES: usize = 8; + fn import_blocks<'a>( + &self, + mut source: Box, + format: Option, + ) -> Result<(), String> { + const READAHEAD_BYTES: usize = 8; - let mut first_bytes: Vec = vec![0; READAHEAD_BYTES]; - let mut first_read = 0; + let mut first_bytes: Vec = vec![0; READAHEAD_BYTES]; + let mut first_read = 0; - let format = match format { - Some(format) => format, - None => { - first_read = source.read(&mut first_bytes) - .map_err(|_| { - "Error reading from the file/stream." - })?; - match first_bytes[0] { - 0xf9 => DataFormat::Binary, - _ => DataFormat::Hex, - } - } - }; + let format = match format { + Some(format) => format, + None => { + first_read = source + .read(&mut first_bytes) + .map_err(|_| "Error reading from the file/stream.")?; + match first_bytes[0] { + 0xf9 => DataFormat::Binary, + _ => DataFormat::Hex, + } + } + }; - let do_import = |bytes: Vec| { - let block = Unverified::from_rlp(bytes).map_err(|_| "Invalid block rlp")?; - let number = block.header.number(); - while self.queue_info().is_full() { - std::thread::sleep(Duration::from_secs(1)); - } - match self.import_block(block) { - Err(Error(EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain), _)) => { - trace!("Skipping block #{}: already in chain.", number); - } - Err(e) => { - return Err(format!("Cannot import block #{}: {:?}", number, e)); - }, - Ok(_) => {}, - } - Ok(()) - }; + let do_import = |bytes: Vec| { + let block = Unverified::from_rlp(bytes).map_err(|_| "Invalid block rlp")?; + let number = block.header.number(); + while self.queue_info().is_full() { + std::thread::sleep(Duration::from_secs(1)); + } + match self.import_block(block) { + Err(Error(EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain), _)) => { + trace!("Skipping block #{}: already in chain.", number); + } + Err(e) => { + return Err(format!("Cannot import block #{}: {:?}", number, e)); + } + Ok(_) => {} + } + Ok(()) + }; - match format { - DataFormat::Binary => { - loop { - let (mut bytes, n) = if first_read > 0 { - (first_bytes.clone(), first_read) - } else { - let mut bytes = vec![0; READAHEAD_BYTES]; - let n = source.read(&mut bytes) - .map_err(|err| { - format!("Error reading from the file/stream: {:?}", err) - })?; - (bytes, n) - }; - if n == 0 { break; } - first_read = 0; - let s = PayloadInfo::from(&bytes) - .map_err(|e| { - format!("Invalid RLP in the file/stream: {:?}", e) - })?.total(); - bytes.resize(s, 0); - source.read_exact(&mut bytes[n..]) - .map_err(|err| { - format!("Error reading from the file/stream: {:?}", err) - })?; - do_import(bytes)?; - } - } - DataFormat::Hex => { - for line in BufReader::new(source).lines() { - let s = line - .map_err(|err| { - format!("Error reading from the file/stream: {:?}", err) - })?; - let s = if first_read > 0 { - from_utf8(&first_bytes) - .map_err(|err| { - format!("Invalid UTF-8: {:?}", err) - })? - .to_owned() + &(s[..]) - } else { - s - }; - first_read = 0; - let bytes = s.from_hex() - .map_err(|err| { - format!("Invalid hex in file/stream: {:?}", err) - })?; - do_import(bytes)?; - } - } - }; - self.flush_queue(); - Ok(()) - } + match format { + DataFormat::Binary => loop { + let (mut bytes, n) = if first_read > 0 { + (first_bytes.clone(), first_read) + } else { + let mut bytes = vec![0; READAHEAD_BYTES]; + let n = source + .read(&mut bytes) + .map_err(|err| format!("Error reading from the file/stream: {:?}", err))?; + (bytes, n) + }; + if n == 0 { + break; + } + first_read = 0; + let s = PayloadInfo::from(&bytes) + .map_err(|e| format!("Invalid RLP in the file/stream: {:?}", e))? + .total(); + bytes.resize(s, 0); + source + .read_exact(&mut bytes[n..]) + .map_err(|err| format!("Error reading from the file/stream: {:?}", err))?; + do_import(bytes)?; + }, + DataFormat::Hex => { + for line in BufReader::new(source).lines() { + let s = line + .map_err(|err| format!("Error reading from the file/stream: {:?}", err))?; + let s = if first_read > 0 { + from_utf8(&first_bytes) + .map_err(|err| format!("Invalid UTF-8: {:?}", err))? + .to_owned() + + &(s[..]) + } else { + s + }; + first_read = 0; + let bytes = s + .from_hex() + .map_err(|err| format!("Invalid hex in file/stream: {:?}", err))?; + do_import(bytes)?; + } + } + }; + self.flush_queue(); + Ok(()) + } } /// Returns `LocalizedReceipt` given `LocalizedTransaction` /// and a vector of receipts from given block up to transaction index. fn transaction_receipt( - machine: &::machine::EthereumMachine, - mut tx: LocalizedTransaction, - receipt: Receipt, - prior_gas_used: U256, - prior_no_of_logs: usize, + machine: &::machine::EthereumMachine, + mut tx: LocalizedTransaction, + receipt: Receipt, + prior_gas_used: U256, + prior_no_of_logs: usize, ) -> LocalizedReceipt { - let sender = tx.sender(); - let transaction_hash = tx.hash(); - let block_hash = tx.block_hash; - let block_number = tx.block_number; - let transaction_index = tx.transaction_index; + let sender = tx.sender(); + let transaction_hash = tx.hash(); + let block_hash = tx.block_hash; + let block_number = tx.block_number; + let transaction_index = tx.transaction_index; - LocalizedReceipt { - from: sender, - to: match tx.action { - Action::Create => None, - Action::Call(ref address) => Some(address.clone().into()) - }, - transaction_hash: transaction_hash, - transaction_index: transaction_index, - block_hash: block_hash, - block_number: block_number, - cumulative_gas_used: receipt.gas_used, - gas_used: receipt.gas_used - prior_gas_used, - contract_address: match tx.action { - Action::Call(_) => None, - Action::Create => Some(contract_address(machine.create_address_scheme(block_number), &sender, &tx.nonce, &tx.data).0) - }, - logs: receipt.logs.into_iter().enumerate().map(|(i, log)| LocalizedLogEntry { - entry: log, - block_hash: block_hash, - block_number: block_number, - transaction_hash: transaction_hash, - transaction_index: transaction_index, - transaction_log_index: i, - log_index: prior_no_of_logs + i, - }).collect(), - log_bloom: receipt.log_bloom, - outcome: receipt.outcome, - } + LocalizedReceipt { + from: sender, + to: match tx.action { + Action::Create => None, + Action::Call(ref address) => Some(address.clone().into()), + }, + transaction_hash: transaction_hash, + transaction_index: transaction_index, + block_hash: block_hash, + block_number: block_number, + cumulative_gas_used: receipt.gas_used, + gas_used: receipt.gas_used - prior_gas_used, + contract_address: match tx.action { + Action::Call(_) => None, + Action::Create => Some( + contract_address( + machine.create_address_scheme(block_number), + &sender, + &tx.nonce, + &tx.data, + ) + .0, + ), + }, + logs: receipt + .logs + .into_iter() + .enumerate() + .map(|(i, log)| LocalizedLogEntry { + entry: log, + block_hash: block_hash, + block_number: block_number, + transaction_hash: transaction_hash, + transaction_index: transaction_index, + transaction_log_index: i, + log_index: prior_no_of_logs + i, + }) + .collect(), + log_bloom: receipt.log_bloom, + outcome: receipt.outcome, + } } /// Queue some items to be processed by IO client. struct IoChannelQueue { - /// Using a *signed* integer for counting currently queued messages since the - /// order in which the counter is incremented and decremented is not defined. - /// Using an unsigned integer can (and will) result in integer underflow, - /// incorrectly rejecting messages and returning a FullQueue error. - currently_queued: Arc, - limit: i64, + /// Using a *signed* integer for counting currently queued messages since the + /// order in which the counter is incremented and decremented is not defined. + /// Using an unsigned integer can (and will) result in integer underflow, + /// incorrectly rejecting messages and returning a FullQueue error. + currently_queued: Arc, + limit: i64, } impl IoChannelQueue { - pub fn new(limit: usize) -> Self { - let limit = i64::try_from(limit).unwrap_or(i64::max_value()); - IoChannelQueue { - currently_queued: Default::default(), - limit, - } - } + pub fn new(limit: usize) -> Self { + let limit = i64::try_from(limit).unwrap_or(i64::max_value()); + IoChannelQueue { + currently_queued: Default::default(), + limit, + } + } - pub fn queue(&self, channel: &IoChannel, count: usize, fun: F) -> EthcoreResult<()> where - F: Fn(&Client) + Send + Sync + 'static, - { - let queue_size = self.currently_queued.load(AtomicOrdering::Relaxed); - if queue_size >= self.limit { - let err_limit = usize::try_from(self.limit).unwrap_or(usize::max_value()); - bail!("The queue is full ({})", err_limit); - }; + pub fn queue( + &self, + channel: &IoChannel, + count: usize, + fun: F, + ) -> EthcoreResult<()> + where + F: Fn(&Client) + Send + Sync + 'static, + { + let queue_size = self.currently_queued.load(AtomicOrdering::Relaxed); + if queue_size >= self.limit { + let err_limit = usize::try_from(self.limit).unwrap_or(usize::max_value()); + bail!("The queue is full ({})", err_limit); + }; - let count = i64::try_from(count).unwrap_or(i64::max_value()); + let count = i64::try_from(count).unwrap_or(i64::max_value()); - let currently_queued = self.currently_queued.clone(); - let _ok = channel.send(ClientIoMessage::execute(move |client| { - currently_queued.fetch_sub(count, AtomicOrdering::SeqCst); - fun(client); - }))?; + let currently_queued = self.currently_queued.clone(); + let _ok = channel.send(ClientIoMessage::execute(move |client| { + currently_queued.fetch_sub(count, AtomicOrdering::SeqCst); + fun(client); + }))?; - self.currently_queued.fetch_add(count, AtomicOrdering::SeqCst); - Ok(()) - } + self.currently_queued + .fetch_add(count, AtomicOrdering::SeqCst); + Ok(()) + } } #[cfg(test)] mod tests { - use test_helpers::{generate_dummy_client, generate_dummy_client_with_data, generate_dummy_client_with_spec_and_data, get_good_dummy_block_hash}; - use blockchain::{BlockProvider, ExtrasInsert}; - use spec::Spec; + use blockchain::{BlockProvider, ExtrasInsert}; + use spec::Spec; + use test_helpers::{ + generate_dummy_client, generate_dummy_client_with_data, + generate_dummy_client_with_spec_and_data, get_good_dummy_block_hash, + }; - #[test] - fn should_not_cache_details_before_commit() { - use client::{BlockChainClient, ChainInfo}; - use test_helpers::{generate_dummy_client, get_good_dummy_block_hash}; + #[test] + fn should_not_cache_details_before_commit() { + use client::{BlockChainClient, ChainInfo}; + use test_helpers::{generate_dummy_client, get_good_dummy_block_hash}; - use std::thread; - use std::time::Duration; - use std::sync::Arc; - use std::sync::atomic::{AtomicBool, Ordering}; - use kvdb::DBTransaction; - use types::encoded; + use kvdb::DBTransaction; + use std::{ + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + thread, + time::Duration, + }; + use types::encoded; - let client = generate_dummy_client(0); - let genesis = client.chain_info().best_block_hash; - let (new_hash, new_block) = get_good_dummy_block_hash(); + let client = generate_dummy_client(0); + let genesis = client.chain_info().best_block_hash; + let (new_hash, new_block) = get_good_dummy_block_hash(); - let go = { - // Separate thread uncommitted transaction - let go = Arc::new(AtomicBool::new(false)); - let go_thread = go.clone(); - let another_client = client.clone(); - thread::spawn(move || { - let mut batch = DBTransaction::new(); - another_client.chain.read().insert_block(&mut batch, encoded::Block::new(new_block), Vec::new(), ExtrasInsert { - fork_choice: ::engines::ForkChoice::New, - is_finalized: false, - }); - go_thread.store(true, Ordering::SeqCst); - }); - go - }; + let go = { + // Separate thread uncommitted transaction + let go = Arc::new(AtomicBool::new(false)); + let go_thread = go.clone(); + let another_client = client.clone(); + thread::spawn(move || { + let mut batch = DBTransaction::new(); + another_client.chain.read().insert_block( + &mut batch, + encoded::Block::new(new_block), + Vec::new(), + ExtrasInsert { + fork_choice: ::engines::ForkChoice::New, + is_finalized: false, + }, + ); + go_thread.store(true, Ordering::SeqCst); + }); + go + }; - while !go.load(Ordering::SeqCst) { thread::park_timeout(Duration::from_millis(5)); } + while !go.load(Ordering::SeqCst) { + thread::park_timeout(Duration::from_millis(5)); + } - assert!(client.tree_route(&genesis, &new_hash).is_none()); - } + assert!(client.tree_route(&genesis, &new_hash).is_none()); + } - #[test] - fn should_return_block_receipts() { - use client::{BlockChainClient, BlockId, TransactionId}; - use test_helpers::{generate_dummy_client_with_data}; + #[test] + fn should_return_block_receipts() { + use client::{BlockChainClient, BlockId, TransactionId}; + use test_helpers::generate_dummy_client_with_data; - let client = generate_dummy_client_with_data(2, 2, &[1.into(), 1.into()]); - let receipts = client.localized_block_receipts(BlockId::Latest).unwrap(); + let client = generate_dummy_client_with_data(2, 2, &[1.into(), 1.into()]); + let receipts = client.localized_block_receipts(BlockId::Latest).unwrap(); - assert_eq!(receipts.len(), 2); - assert_eq!(receipts[0].transaction_index, 0); - assert_eq!(receipts[0].block_number, 2); - assert_eq!(receipts[0].cumulative_gas_used, 53_000.into()); - assert_eq!(receipts[0].gas_used, 53_000.into()); + assert_eq!(receipts.len(), 2); + assert_eq!(receipts[0].transaction_index, 0); + assert_eq!(receipts[0].block_number, 2); + assert_eq!(receipts[0].cumulative_gas_used, 53_000.into()); + assert_eq!(receipts[0].gas_used, 53_000.into()); - assert_eq!(receipts[1].transaction_index, 1); - assert_eq!(receipts[1].block_number, 2); - assert_eq!(receipts[1].cumulative_gas_used, 106_000.into()); - assert_eq!(receipts[1].gas_used, 53_000.into()); + assert_eq!(receipts[1].transaction_index, 1); + assert_eq!(receipts[1].block_number, 2); + assert_eq!(receipts[1].cumulative_gas_used, 106_000.into()); + assert_eq!(receipts[1].gas_used, 53_000.into()); - let receipt = client.transaction_receipt(TransactionId::Hash(receipts[0].transaction_hash)); - assert_eq!(receipt, Some(receipts[0].clone())); + let receipt = client.transaction_receipt(TransactionId::Hash(receipts[0].transaction_hash)); + assert_eq!(receipt, Some(receipts[0].clone())); - let receipt = client.transaction_receipt(TransactionId::Hash(receipts[1].transaction_hash)); - assert_eq!(receipt, Some(receipts[1].clone())); - } + let receipt = client.transaction_receipt(TransactionId::Hash(receipts[1].transaction_hash)); + assert_eq!(receipt, Some(receipts[1].clone())); + } - #[test] - fn should_return_correct_log_index() { - use hash::keccak; - use super::transaction_receipt; - use ethkey::KeyPair; - use types::log_entry::{LogEntry, LocalizedLogEntry}; - use types::receipt::{Receipt, LocalizedReceipt, TransactionOutcome}; - use types::transaction::{Transaction, LocalizedTransaction, Action}; + #[test] + fn should_return_correct_log_index() { + use super::transaction_receipt; + use ethkey::KeyPair; + use hash::keccak; + use types::{ + log_entry::{LocalizedLogEntry, LogEntry}, + receipt::{LocalizedReceipt, Receipt, TransactionOutcome}, + transaction::{Action, LocalizedTransaction, Transaction}, + }; - // given - let key = KeyPair::from_secret_slice(&keccak("test")).unwrap(); - let secret = key.secret(); - let machine = ::ethereum::new_frontier_test_machine(); + // given + let key = KeyPair::from_secret_slice(&keccak("test")).unwrap(); + let secret = key.secret(); + let machine = ::ethereum::new_frontier_test_machine(); - let block_number = 1; - let block_hash = 5.into(); - let state_root = 99.into(); - let gas_used = 10.into(); - let raw_tx = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 21000.into(), - action: Action::Call(10.into()), - value: 0.into(), - data: vec![], - }; - let tx1 = raw_tx.clone().sign(secret, None); - let transaction = LocalizedTransaction { - signed: tx1.clone().into(), - block_number: block_number, - block_hash: block_hash, - transaction_index: 1, - cached_sender: Some(tx1.sender()), - }; - let logs = vec![LogEntry { - address: 5.into(), - topics: vec![], - data: vec![], - }, LogEntry { - address: 15.into(), - topics: vec![], - data: vec![], - }]; - let receipt = Receipt { - outcome: TransactionOutcome::StateRoot(state_root), - gas_used: gas_used, - log_bloom: Default::default(), - logs: logs.clone(), - }; + let block_number = 1; + let block_hash = 5.into(); + let state_root = 99.into(); + let gas_used = 10.into(); + let raw_tx = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 21000.into(), + action: Action::Call(10.into()), + value: 0.into(), + data: vec![], + }; + let tx1 = raw_tx.clone().sign(secret, None); + let transaction = LocalizedTransaction { + signed: tx1.clone().into(), + block_number: block_number, + block_hash: block_hash, + transaction_index: 1, + cached_sender: Some(tx1.sender()), + }; + let logs = vec![ + LogEntry { + address: 5.into(), + topics: vec![], + data: vec![], + }, + LogEntry { + address: 15.into(), + topics: vec![], + data: vec![], + }, + ]; + let receipt = Receipt { + outcome: TransactionOutcome::StateRoot(state_root), + gas_used: gas_used, + log_bloom: Default::default(), + logs: logs.clone(), + }; - // when - let receipt = transaction_receipt(&machine, transaction, receipt, 5.into(), 1); + // when + let receipt = transaction_receipt(&machine, transaction, receipt, 5.into(), 1); - // then - assert_eq!(receipt, LocalizedReceipt { - from: tx1.sender().into(), - to: match tx1.action { - Action::Create => None, - Action::Call(ref address) => Some(address.clone().into()) - }, - transaction_hash: tx1.hash(), - transaction_index: 1, - block_hash: block_hash, - block_number: block_number, - cumulative_gas_used: gas_used, - gas_used: gas_used - 5, - contract_address: None, - logs: vec![LocalizedLogEntry { - entry: logs[0].clone(), - block_hash: block_hash, - block_number: block_number, - transaction_hash: tx1.hash(), - transaction_index: 1, - transaction_log_index: 0, - log_index: 1, - }, LocalizedLogEntry { - entry: logs[1].clone(), - block_hash: block_hash, - block_number: block_number, - transaction_hash: tx1.hash(), - transaction_index: 1, - transaction_log_index: 1, - log_index: 2, - }], - log_bloom: Default::default(), - outcome: TransactionOutcome::StateRoot(state_root), - }); - } + // then + assert_eq!( + receipt, + LocalizedReceipt { + from: tx1.sender().into(), + to: match tx1.action { + Action::Create => None, + Action::Call(ref address) => Some(address.clone().into()), + }, + transaction_hash: tx1.hash(), + transaction_index: 1, + block_hash: block_hash, + block_number: block_number, + cumulative_gas_used: gas_used, + gas_used: gas_used - 5, + contract_address: None, + logs: vec![ + LocalizedLogEntry { + entry: logs[0].clone(), + block_hash: block_hash, + block_number: block_number, + transaction_hash: tx1.hash(), + transaction_index: 1, + transaction_log_index: 0, + log_index: 1, + }, + LocalizedLogEntry { + entry: logs[1].clone(), + block_hash: block_hash, + block_number: block_number, + transaction_hash: tx1.hash(), + transaction_index: 1, + transaction_log_index: 1, + log_index: 2, + } + ], + log_bloom: Default::default(), + outcome: TransactionOutcome::StateRoot(state_root), + } + ); + } - #[test] - fn should_mark_finalization_correctly_for_parent() { - let client = generate_dummy_client_with_spec_and_data(Spec::new_test_with_finality, 2, 0, &[]); - let chain = client.chain(); + #[test] + fn should_mark_finalization_correctly_for_parent() { + let client = + generate_dummy_client_with_spec_and_data(Spec::new_test_with_finality, 2, 0, &[]); + let chain = client.chain(); - let block1_details = chain.block_hash(1).and_then(|h| chain.block_details(&h)); - assert!(block1_details.is_some()); - let block1_details = block1_details.unwrap(); - assert_eq!(block1_details.children.len(), 1); - assert!(block1_details.is_finalized); + let block1_details = chain.block_hash(1).and_then(|h| chain.block_details(&h)); + assert!(block1_details.is_some()); + let block1_details = block1_details.unwrap(); + assert_eq!(block1_details.children.len(), 1); + assert!(block1_details.is_finalized); - let block2_details = chain.block_hash(2).and_then(|h| chain.block_details(&h)); - assert!(block2_details.is_some()); - let block2_details = block2_details.unwrap(); - assert_eq!(block2_details.children.len(), 0); - assert!(!block2_details.is_finalized); - } + let block2_details = chain.block_hash(2).and_then(|h| chain.block_details(&h)); + assert!(block2_details.is_some()); + let block2_details = block2_details.unwrap(); + assert_eq!(block2_details.children.len(), 0); + assert!(!block2_details.is_finalized); + } } - diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs index 59fc4f813..77ec4b27b 100644 --- a/ethcore/src/client/config.rs +++ b/ethcore/src/client/config.rs @@ -14,159 +14,164 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::str::FromStr; -use std::fmt::{Display, Formatter, Error as FmtError}; +use std::{ + fmt::{Display, Error as FmtError, Formatter}, + str::FromStr, +}; -use verification::{VerifierType, QueueConfig}; use journaldb; use snapshot::SnapshotConfiguration; +use verification::{QueueConfig, VerifierType}; -pub use std::time::Duration; pub use blockchain::Config as BlockChainConfig; -pub use trace::Config as TraceConfig; pub use evm::VMType; +pub use std::time::Duration; +pub use trace::Config as TraceConfig; /// Client state db compaction profile #[derive(Debug, PartialEq, Clone)] pub enum DatabaseCompactionProfile { - /// Try to determine compaction profile automatically - Auto, - /// SSD compaction profile - SSD, - /// HDD or other slow storage io compaction profile - HDD, + /// Try to determine compaction profile automatically + Auto, + /// SSD compaction profile + SSD, + /// HDD or other slow storage io compaction profile + HDD, } impl Default for DatabaseCompactionProfile { - fn default() -> Self { - DatabaseCompactionProfile::Auto - } + fn default() -> Self { + DatabaseCompactionProfile::Auto + } } impl FromStr for DatabaseCompactionProfile { - type Err = String; + type Err = String; - fn from_str(s: &str) -> Result { - match s { - "auto" => Ok(DatabaseCompactionProfile::Auto), - "ssd" => Ok(DatabaseCompactionProfile::SSD), - "hdd" => Ok(DatabaseCompactionProfile::HDD), - _ => Err("Invalid compaction profile given. Expected default/hdd/ssd.".into()), - } - } + fn from_str(s: &str) -> Result { + match s { + "auto" => Ok(DatabaseCompactionProfile::Auto), + "ssd" => Ok(DatabaseCompactionProfile::SSD), + "hdd" => Ok(DatabaseCompactionProfile::HDD), + _ => Err("Invalid compaction profile given. Expected default/hdd/ssd.".into()), + } + } } /// Operating mode for the client. #[derive(Debug, Eq, PartialEq, Clone)] pub enum Mode { - /// Always on. - Active, - /// Goes offline after client is inactive for some (given) time, but - /// comes back online after a while of inactivity. - Passive(Duration, Duration), - /// Goes offline after client is inactive for some (given) time and - /// stays inactive. - Dark(Duration), - /// Always off. - Off, + /// Always on. + Active, + /// Goes offline after client is inactive for some (given) time, but + /// comes back online after a while of inactivity. + Passive(Duration, Duration), + /// Goes offline after client is inactive for some (given) time and + /// stays inactive. + Dark(Duration), + /// Always off. + Off, } impl Display for Mode { - fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { - match *self { - Mode::Active => write!(f, "active"), - Mode::Passive(..) => write!(f, "passive"), - Mode::Dark(..) => write!(f, "dark"), - Mode::Off => write!(f, "offline"), - } - } + fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { + match *self { + Mode::Active => write!(f, "active"), + Mode::Passive(..) => write!(f, "passive"), + Mode::Dark(..) => write!(f, "dark"), + Mode::Off => write!(f, "offline"), + } + } } /// Client configuration. Includes configs for all sub-systems. #[derive(Debug, PartialEq, Clone)] pub struct ClientConfig { - /// Block queue configuration. - pub queue: QueueConfig, - /// Blockchain configuration. - pub blockchain: BlockChainConfig, - /// Trace configuration. - pub tracing: TraceConfig, - /// VM type. - pub vm_type: VMType, - /// Fat DB enabled? - pub fat_db: bool, - /// The JournalDB ("pruning") algorithm to use. - pub pruning: journaldb::Algorithm, - /// The name of the client instance. - pub name: String, - /// RocksDB column cache-size if not default - pub db_cache_size: Option, - /// State db compaction profile - pub db_compaction: DatabaseCompactionProfile, - /// Operating mode - pub mode: Mode, - /// The chain spec name - pub spec_name: String, - /// Type of block verifier used by client. - pub verifier_type: VerifierType, - /// State db cache-size. - pub state_cache_size: usize, - /// EVM jump-tables cache size. - pub jump_table_size: usize, - /// Minimum state pruning history size. - pub history: u64, - /// Ideal memory usage for state pruning history. - pub history_mem: usize, - /// Check seal valididity on block import - pub check_seal: bool, - /// Maximal number of transactions queued for verification in a separate thread. - pub transaction_verification_queue_size: usize, - /// Maximal number of blocks to import at each round. - pub max_round_blocks_to_import: usize, - /// Snapshot configuration - pub snapshot: SnapshotConfiguration, + /// Block queue configuration. + pub queue: QueueConfig, + /// Blockchain configuration. + pub blockchain: BlockChainConfig, + /// Trace configuration. + pub tracing: TraceConfig, + /// VM type. + pub vm_type: VMType, + /// Fat DB enabled? + pub fat_db: bool, + /// The JournalDB ("pruning") algorithm to use. + pub pruning: journaldb::Algorithm, + /// The name of the client instance. + pub name: String, + /// RocksDB column cache-size if not default + pub db_cache_size: Option, + /// State db compaction profile + pub db_compaction: DatabaseCompactionProfile, + /// Operating mode + pub mode: Mode, + /// The chain spec name + pub spec_name: String, + /// Type of block verifier used by client. + pub verifier_type: VerifierType, + /// State db cache-size. + pub state_cache_size: usize, + /// EVM jump-tables cache size. + pub jump_table_size: usize, + /// Minimum state pruning history size. + pub history: u64, + /// Ideal memory usage for state pruning history. + pub history_mem: usize, + /// Check seal valididity on block import + pub check_seal: bool, + /// Maximal number of transactions queued for verification in a separate thread. + pub transaction_verification_queue_size: usize, + /// Maximal number of blocks to import at each round. + pub max_round_blocks_to_import: usize, + /// Snapshot configuration + pub snapshot: SnapshotConfiguration, } impl Default for ClientConfig { - fn default() -> Self { - let mb = 1024 * 1024; - ClientConfig { - queue: Default::default(), - blockchain: Default::default(), - tracing: Default::default(), - vm_type: Default::default(), - fat_db: false, - pruning: journaldb::Algorithm::OverlayRecent, - name: "default".into(), - db_cache_size: None, - db_compaction: Default::default(), - mode: Mode::Active, - spec_name: "".into(), - verifier_type: VerifierType::Canon, - state_cache_size: 1 * mb, - jump_table_size: 1 * mb, - history: 64, - history_mem: 32 * mb, - check_seal: true, - transaction_verification_queue_size: 8192, - max_round_blocks_to_import: 12, - snapshot: Default::default(), - } - } + fn default() -> Self { + let mb = 1024 * 1024; + ClientConfig { + queue: Default::default(), + blockchain: Default::default(), + tracing: Default::default(), + vm_type: Default::default(), + fat_db: false, + pruning: journaldb::Algorithm::OverlayRecent, + name: "default".into(), + db_cache_size: None, + db_compaction: Default::default(), + mode: Mode::Active, + spec_name: "".into(), + verifier_type: VerifierType::Canon, + state_cache_size: 1 * mb, + jump_table_size: 1 * mb, + history: 64, + history_mem: 32 * mb, + check_seal: true, + transaction_verification_queue_size: 8192, + max_round_blocks_to_import: 12, + snapshot: Default::default(), + } + } } #[cfg(test)] mod test { - use super::DatabaseCompactionProfile; + use super::DatabaseCompactionProfile; - #[test] - fn test_default_compaction_profile() { - assert_eq!(DatabaseCompactionProfile::default(), DatabaseCompactionProfile::Auto); - } + #[test] + fn test_default_compaction_profile() { + assert_eq!( + DatabaseCompactionProfile::default(), + DatabaseCompactionProfile::Auto + ); + } - #[test] - fn test_parsing_compaction_profile() { - assert_eq!(DatabaseCompactionProfile::Auto, "auto".parse().unwrap()); - assert_eq!(DatabaseCompactionProfile::SSD, "ssd".parse().unwrap()); - assert_eq!(DatabaseCompactionProfile::HDD, "hdd".parse().unwrap()); - } + #[test] + fn test_parsing_compaction_profile() { + assert_eq!(DatabaseCompactionProfile::Auto, "auto".parse().unwrap()); + assert_eq!(DatabaseCompactionProfile::SSD, "ssd".parse().unwrap()); + assert_eq!(DatabaseCompactionProfile::HDD, "hdd".parse().unwrap()); + } } diff --git a/ethcore/src/client/evm_test_client.rs b/ethcore/src/client/evm_test_client.rs index be5857b0e..b331a5bcb 100644 --- a/ethcore/src/client/evm_test_client.rs +++ b/ethcore/src/client/evm_test_client.rs @@ -16,48 +16,56 @@ //! Simple Client used for EVM tests. -use std::fmt; -use std::sync::Arc; -use ethereum_types::{H256, U256, H160}; -use {factory, journaldb, trie, kvdb_memorydb}; -use kvdb::{self, KeyValueDB}; -use {state, state_db, client, executive, trace, db, spec, pod_state}; -use types::{log_entry, receipt, transaction}; -use factory::Factories; -use evm::{VMType, FinalizationResult}; -use vm::{self, ActionParams}; +use client; +use db; +use ethereum_types::{H160, H256, U256}; use ethtrie; +use evm::{FinalizationResult, VMType}; +use executive; +use factory::{self, Factories}; +use journaldb; +use kvdb::{self, KeyValueDB}; +use kvdb_memorydb; +use pod_state; +use spec; +use state; +use state_db; +use std::{fmt, sync::Arc}; +use trace; +use trie; +use types::{log_entry, receipt, transaction}; +use vm::{self, ActionParams}; /// EVM test Error. #[derive(Debug)] pub enum EvmTestError { - /// Trie integrity error. - Trie(Box), - /// EVM error. - Evm(vm::Error), - /// Initialization error. - ClientError(::error::Error), - /// Post-condition failure, - PostCondition(String), + /// Trie integrity error. + Trie(Box), + /// EVM error. + Evm(vm::Error), + /// Initialization error. + ClientError(::error::Error), + /// Post-condition failure, + PostCondition(String), } impl> From for EvmTestError { - fn from(err: E) -> Self { - EvmTestError::ClientError(err.into()) - } + fn from(err: E) -> Self { + EvmTestError::ClientError(err.into()) + } } impl fmt::Display for EvmTestError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - use self::EvmTestError::*; + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use self::EvmTestError::*; - match *self { - Trie(ref err) => write!(fmt, "Trie: {}", err), - Evm(ref err) => write!(fmt, "EVM: {}", err), - ClientError(ref err) => write!(fmt, "{}", err), - PostCondition(ref err) => write!(fmt, "{}", err), - } - } + match *self { + Trie(ref err) => write!(fmt, "Trie: {}", err), + Evm(ref err) => write!(fmt, "EVM: {}", err), + ClientError(ref err) => write!(fmt, "{}", err), + PostCondition(ref err) => write!(fmt, "{}", err), + } + } } use ethereum; @@ -65,283 +73,330 @@ use ethjson::spec::ForkSpec; /// Simplified, single-block EVM test client. pub struct EvmTestClient<'a> { - state: state::State, - spec: &'a spec::Spec, - dump_state: fn(&state::State) -> Option, + state: state::State, + spec: &'a spec::Spec, + dump_state: fn(&state::State) -> Option, } fn no_dump_state(_: &state::State) -> Option { - None + None } impl<'a> fmt::Debug for EvmTestClient<'a> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("EvmTestClient") - .field("state", &self.state) - .field("spec", &self.spec.name) - .finish() - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("EvmTestClient") + .field("state", &self.state) + .field("spec", &self.spec.name) + .finish() + } } impl<'a> EvmTestClient<'a> { - /// Converts a json spec definition into spec. - pub fn spec_from_json(spec: &ForkSpec) -> Option { - match *spec { - ForkSpec::Frontier => Some(ethereum::new_frontier_test()), - ForkSpec::Homestead => Some(ethereum::new_homestead_test()), - ForkSpec::EIP150 => Some(ethereum::new_eip150_test()), - ForkSpec::EIP158 => Some(ethereum::new_eip161_test()), - ForkSpec::Byzantium => Some(ethereum::new_byzantium_test()), - ForkSpec::Constantinople => Some(ethereum::new_constantinople_test()), - ForkSpec::ConstantinopleFix => Some(ethereum::new_constantinople_fix_test()), - ForkSpec::Istanbul => Some(ethereum::new_istanbul_test()), - ForkSpec::EIP158ToByzantiumAt5 => Some(ethereum::new_transition_test()), - ForkSpec::FrontierToHomesteadAt5 | ForkSpec::HomesteadToDaoAt5 | ForkSpec::HomesteadToEIP150At5 => None, - } - } + /// Converts a json spec definition into spec. + pub fn spec_from_json(spec: &ForkSpec) -> Option { + match *spec { + ForkSpec::Frontier => Some(ethereum::new_frontier_test()), + ForkSpec::Homestead => Some(ethereum::new_homestead_test()), + ForkSpec::EIP150 => Some(ethereum::new_eip150_test()), + ForkSpec::EIP158 => Some(ethereum::new_eip161_test()), + ForkSpec::Byzantium => Some(ethereum::new_byzantium_test()), + ForkSpec::Constantinople => Some(ethereum::new_constantinople_test()), + ForkSpec::ConstantinopleFix => Some(ethereum::new_constantinople_fix_test()), + ForkSpec::Istanbul => Some(ethereum::new_istanbul_test()), + ForkSpec::EIP158ToByzantiumAt5 => Some(ethereum::new_transition_test()), + ForkSpec::FrontierToHomesteadAt5 + | ForkSpec::HomesteadToDaoAt5 + | ForkSpec::HomesteadToEIP150At5 => None, + } + } - /// Change default function for dump state (default does not dump) - pub fn set_dump_state_fn(&mut self, dump_state: fn(&state::State) -> Option) { - self.dump_state = dump_state; - } + /// Change default function for dump state (default does not dump) + pub fn set_dump_state_fn( + &mut self, + dump_state: fn(&state::State) -> Option, + ) { + self.dump_state = dump_state; + } - /// Creates new EVM test client with in-memory DB initialized with genesis of given Spec. - /// Takes a `TrieSpec` to set the type of trie. - pub fn new_with_trie(spec: &'a spec::Spec, trie_spec: trie::TrieSpec) -> Result { - let factories = Self::factories(trie_spec); - let state = Self::state_from_spec(spec, &factories)?; + /// Creates new EVM test client with in-memory DB initialized with genesis of given Spec. + /// Takes a `TrieSpec` to set the type of trie. + pub fn new_with_trie( + spec: &'a spec::Spec, + trie_spec: trie::TrieSpec, + ) -> Result { + let factories = Self::factories(trie_spec); + let state = Self::state_from_spec(spec, &factories)?; - Ok(EvmTestClient { - state, - spec, - dump_state: no_dump_state, - }) - } + Ok(EvmTestClient { + state, + spec, + dump_state: no_dump_state, + }) + } - /// Creates new EVM test client with an in-memory DB initialized with genesis of given chain Spec. - pub fn new(spec: &'a spec::Spec) -> Result { - Self::new_with_trie(spec, trie::TrieSpec::Secure) - } + /// Creates new EVM test client with an in-memory DB initialized with genesis of given chain Spec. + pub fn new(spec: &'a spec::Spec) -> Result { + Self::new_with_trie(spec, trie::TrieSpec::Secure) + } - /// Creates new EVM test client with an in-memory DB initialized with given PodState. - /// Takes a `TrieSpec` to set the type of trie. - pub fn from_pod_state_with_trie(spec: &'a spec::Spec, pod_state: pod_state::PodState, trie_spec: trie::TrieSpec) -> Result { - let factories = Self::factories(trie_spec); - let state = Self::state_from_pod(spec, &factories, pod_state)?; + /// Creates new EVM test client with an in-memory DB initialized with given PodState. + /// Takes a `TrieSpec` to set the type of trie. + pub fn from_pod_state_with_trie( + spec: &'a spec::Spec, + pod_state: pod_state::PodState, + trie_spec: trie::TrieSpec, + ) -> Result { + let factories = Self::factories(trie_spec); + let state = Self::state_from_pod(spec, &factories, pod_state)?; - Ok(EvmTestClient { - state, - spec, - dump_state: no_dump_state, - }) - } + Ok(EvmTestClient { + state, + spec, + dump_state: no_dump_state, + }) + } - /// Creates new EVM test client with an in-memory DB initialized with given PodState. - pub fn from_pod_state(spec: &'a spec::Spec, pod_state: pod_state::PodState) -> Result { - Self::from_pod_state_with_trie(spec, pod_state, trie::TrieSpec::Secure) - } + /// Creates new EVM test client with an in-memory DB initialized with given PodState. + pub fn from_pod_state( + spec: &'a spec::Spec, + pod_state: pod_state::PodState, + ) -> Result { + Self::from_pod_state_with_trie(spec, pod_state, trie::TrieSpec::Secure) + } - fn factories(trie_spec: trie::TrieSpec) -> Factories { - Factories { - vm: factory::VmFactory::new(VMType::Interpreter, 5 * 1024), - trie: trie::TrieFactory::new(trie_spec), - accountdb: Default::default(), - } - } + fn factories(trie_spec: trie::TrieSpec) -> Factories { + Factories { + vm: factory::VmFactory::new(VMType::Interpreter, 5 * 1024), + trie: trie::TrieFactory::new(trie_spec), + accountdb: Default::default(), + } + } - fn state_from_spec(spec: &'a spec::Spec, factories: &Factories) -> Result, EvmTestError> { - let db = Arc::new(kvdb_memorydb::create(db::NUM_COLUMNS.expect("We use column-based DB; qed"))); - let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE); - let mut state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024); - state_db = spec.ensure_db_good(state_db, factories)?; + fn state_from_spec( + spec: &'a spec::Spec, + factories: &Factories, + ) -> Result, EvmTestError> { + let db = Arc::new(kvdb_memorydb::create( + db::NUM_COLUMNS.expect("We use column-based DB; qed"), + )); + let journal_db = + journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE); + let mut state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024); + state_db = spec.ensure_db_good(state_db, factories)?; - let genesis = spec.genesis_header(); - // Write DB - { - let mut batch = kvdb::DBTransaction::new(); - state_db.journal_under(&mut batch, 0, &genesis.hash())?; - db.write(batch)?; - } + let genesis = spec.genesis_header(); + // Write DB + { + let mut batch = kvdb::DBTransaction::new(); + state_db.journal_under(&mut batch, 0, &genesis.hash())?; + db.write(batch)?; + } - state::State::from_existing( - state_db, - *genesis.state_root(), - spec.engine.account_start_nonce(0), - factories.clone() - ).map_err(EvmTestError::Trie) - } + state::State::from_existing( + state_db, + *genesis.state_root(), + spec.engine.account_start_nonce(0), + factories.clone(), + ) + .map_err(EvmTestError::Trie) + } - fn state_from_pod(spec: &'a spec::Spec, factories: &Factories, pod_state: pod_state::PodState) -> Result, EvmTestError> { - let db = Arc::new(kvdb_memorydb::create(db::NUM_COLUMNS.expect("We use column-based DB; qed"))); - let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE); - let state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024); - let mut state = state::State::new( - state_db, - spec.engine.account_start_nonce(0), - factories.clone(), - ); - state.populate_from(pod_state); - state.commit()?; - Ok(state) - } + fn state_from_pod( + spec: &'a spec::Spec, + factories: &Factories, + pod_state: pod_state::PodState, + ) -> Result, EvmTestError> { + let db = Arc::new(kvdb_memorydb::create( + db::NUM_COLUMNS.expect("We use column-based DB; qed"), + )); + let journal_db = + journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE); + let state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024); + let mut state = state::State::new( + state_db, + spec.engine.account_start_nonce(0), + factories.clone(), + ); + state.populate_from(pod_state); + state.commit()?; + Ok(state) + } - /// Return current state. - pub fn state(&self) -> &state::State { - &self.state - } + /// Return current state. + pub fn state(&self) -> &state::State { + &self.state + } - /// Execute the VM given ActionParams and tracer. - /// Returns amount of gas left and the output. - pub fn call( - &mut self, - params: ActionParams, - tracer: &mut T, - vm_tracer: &mut V, - ) -> Result - { - let genesis = self.spec.genesis_header(); - let info = client::EnvInfo { - number: genesis.number(), - author: *genesis.author(), - timestamp: genesis.timestamp(), - difficulty: *genesis.difficulty(), - last_hashes: Arc::new([H256::default(); 256].to_vec()), - gas_used: 0.into(), - gas_limit: *genesis.gas_limit(), - }; - self.call_envinfo(params, tracer, vm_tracer, info) - } + /// Execute the VM given ActionParams and tracer. + /// Returns amount of gas left and the output. + pub fn call( + &mut self, + params: ActionParams, + tracer: &mut T, + vm_tracer: &mut V, + ) -> Result { + let genesis = self.spec.genesis_header(); + let info = client::EnvInfo { + number: genesis.number(), + author: *genesis.author(), + timestamp: genesis.timestamp(), + difficulty: *genesis.difficulty(), + last_hashes: Arc::new([H256::default(); 256].to_vec()), + gas_used: 0.into(), + gas_limit: *genesis.gas_limit(), + }; + self.call_envinfo(params, tracer, vm_tracer, info) + } - /// Execute the VM given envinfo, ActionParams and tracer. - /// Returns amount of gas left and the output. - pub fn call_envinfo( - &mut self, - params: ActionParams, - tracer: &mut T, - vm_tracer: &mut V, - info: client::EnvInfo, - ) -> Result - { - let mut substate = state::Substate::new(); - let machine = self.spec.engine.machine(); - let schedule = machine.schedule(info.number); - let mut executive = executive::Executive::new(&mut self.state, &info, &machine, &schedule); - executive.call( - params, - &mut substate, - tracer, - vm_tracer, - ).map_err(EvmTestError::Evm) - } + /// Execute the VM given envinfo, ActionParams and tracer. + /// Returns amount of gas left and the output. + pub fn call_envinfo( + &mut self, + params: ActionParams, + tracer: &mut T, + vm_tracer: &mut V, + info: client::EnvInfo, + ) -> Result { + let mut substate = state::Substate::new(); + let machine = self.spec.engine.machine(); + let schedule = machine.schedule(info.number); + let mut executive = executive::Executive::new(&mut self.state, &info, &machine, &schedule); + executive + .call(params, &mut substate, tracer, vm_tracer) + .map_err(EvmTestError::Evm) + } - /// Executes a SignedTransaction within context of the provided state and `EnvInfo`. - /// Returns the state root, gas left and the output. - pub fn transact( - &mut self, - env_info: &client::EnvInfo, - transaction: transaction::SignedTransaction, - tracer: T, - vm_tracer: V, - ) -> std::result::Result, TransactErr> { - let initial_gas = transaction.gas; - // Verify transaction - let is_ok = transaction.verify_basic(true, None); - if let Err(error) = is_ok { - return Err( - TransactErr{ - state_root: *self.state.root(), - error: error.into(), - end_state: (self.dump_state)(&self.state), - }); - } + /// Executes a SignedTransaction within context of the provided state and `EnvInfo`. + /// Returns the state root, gas left and the output. + pub fn transact( + &mut self, + env_info: &client::EnvInfo, + transaction: transaction::SignedTransaction, + tracer: T, + vm_tracer: V, + ) -> std::result::Result, TransactErr> { + let initial_gas = transaction.gas; + // Verify transaction + let is_ok = transaction.verify_basic(true, None); + if let Err(error) = is_ok { + return Err(TransactErr { + state_root: *self.state.root(), + error: error.into(), + end_state: (self.dump_state)(&self.state), + }); + } - // Apply transaction - let result = self.state.apply_with_tracing(&env_info, self.spec.engine.machine(), &transaction, tracer, vm_tracer); - let scheme = self.spec.engine.machine().create_address_scheme(env_info.number); + // Apply transaction + let result = self.state.apply_with_tracing( + &env_info, + self.spec.engine.machine(), + &transaction, + tracer, + vm_tracer, + ); + let scheme = self + .spec + .engine + .machine() + .create_address_scheme(env_info.number); - // Touch the coinbase at the end of the test to simulate - // miner reward. - // Details: https://github.com/paritytech/parity-ethereum/issues/9431 - let schedule = self.spec.engine.machine().schedule(env_info.number); - self.state.add_balance(&env_info.author, &0.into(), if schedule.no_empty { - state::CleanupMode::NoEmpty - } else { - state::CleanupMode::ForceCreate - }).ok(); - // Touching also means that we should remove the account if it's within eip161 - // conditions. - self.state.kill_garbage( - &vec![env_info.author].into_iter().collect(), - schedule.kill_empty, - &None, - false - ).ok(); + // Touch the coinbase at the end of the test to simulate + // miner reward. + // Details: https://github.com/paritytech/parity-ethereum/issues/9431 + let schedule = self.spec.engine.machine().schedule(env_info.number); + self.state + .add_balance( + &env_info.author, + &0.into(), + if schedule.no_empty { + state::CleanupMode::NoEmpty + } else { + state::CleanupMode::ForceCreate + }, + ) + .ok(); + // Touching also means that we should remove the account if it's within eip161 + // conditions. + self.state + .kill_garbage( + &vec![env_info.author].into_iter().collect(), + schedule.kill_empty, + &None, + false, + ) + .ok(); - self.state.commit().ok(); + self.state.commit().ok(); - let state_root = *self.state.root(); + let state_root = *self.state.root(); - let end_state = (self.dump_state)(&self.state); + let end_state = (self.dump_state)(&self.state); - match result { - Ok(result) => { - Ok(TransactSuccess { - state_root, - gas_left: initial_gas - result.receipt.gas_used, - outcome: result.receipt.outcome, - output: result.output, - trace: result.trace, - vm_trace: result.vm_trace, - logs: result.receipt.logs, - contract_address: if let transaction::Action::Create = transaction.action { - Some(executive::contract_address(scheme, &transaction.sender(), &transaction.nonce, &transaction.data).0) - } else { - None - }, - end_state, - } - )}, - Err(error) => Err(TransactErr { - state_root, - error, - end_state, - }), - } - } + match result { + Ok(result) => Ok(TransactSuccess { + state_root, + gas_left: initial_gas - result.receipt.gas_used, + outcome: result.receipt.outcome, + output: result.output, + trace: result.trace, + vm_trace: result.vm_trace, + logs: result.receipt.logs, + contract_address: if let transaction::Action::Create = transaction.action { + Some( + executive::contract_address( + scheme, + &transaction.sender(), + &transaction.nonce, + &transaction.data, + ) + .0, + ) + } else { + None + }, + end_state, + }), + Err(error) => Err(TransactErr { + state_root, + error, + end_state, + }), + } + } } /// To be returned inside a std::result::Result::Ok after a successful /// transaction completed. #[allow(dead_code)] pub struct TransactSuccess { - /// State root - pub state_root: H256, - /// Amount of gas left - pub gas_left: U256, - /// Output - pub output: Vec, - /// Traces - pub trace: Vec, - /// VM Traces - pub vm_trace: Option, - /// Created contract address (if any) - pub contract_address: Option, - /// Generated logs - pub logs: Vec, - /// outcome - pub outcome: receipt::TransactionOutcome, - /// end state if needed - pub end_state: Option, + /// State root + pub state_root: H256, + /// Amount of gas left + pub gas_left: U256, + /// Output + pub output: Vec, + /// Traces + pub trace: Vec, + /// VM Traces + pub vm_trace: Option, + /// Created contract address (if any) + pub contract_address: Option, + /// Generated logs + pub logs: Vec, + /// outcome + pub outcome: receipt::TransactionOutcome, + /// end state if needed + pub end_state: Option, } /// To be returned inside a std::result::Result::Err after a failed /// transaction. #[allow(dead_code)] pub struct TransactErr { - /// State root - pub state_root: H256, - /// Execution error - pub error: ::error::Error, - /// end state if needed - pub end_state: Option, + /// State root + pub state_root: H256, + /// Execution error + pub error: ::error::Error, + /// end state if needed + pub end_state: Option, } diff --git a/ethcore/src/client/io_message.rs b/ethcore/src/client/io_message.rs index 92e2d3e25..7d57f694c 100644 --- a/ethcore/src/client/io_message.rs +++ b/ethcore/src/client/io_message.rs @@ -14,43 +14,43 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::fmt; use bytes::Bytes; use client::Client; use ethereum_types::H256; use snapshot::ManifestData; +use std::fmt; /// Message type for external and internal events #[derive(Debug)] pub enum ClientIoMessage { - /// Best Block Hash in chain has been changed - NewChainHead, - /// A block is ready - BlockVerified, - /// Begin snapshot restoration - BeginRestoration(ManifestData), - /// Feed a state chunk to the snapshot service - FeedStateChunk(H256, Bytes), - /// Feed a block chunk to the snapshot service - FeedBlockChunk(H256, Bytes), - /// Take a snapshot for the block with given number. - TakeSnapshot(u64), - /// Execute wrapped closure - Execute(Callback), + /// Best Block Hash in chain has been changed + NewChainHead, + /// A block is ready + BlockVerified, + /// Begin snapshot restoration + BeginRestoration(ManifestData), + /// Feed a state chunk to the snapshot service + FeedStateChunk(H256, Bytes), + /// Feed a block chunk to the snapshot service + FeedBlockChunk(H256, Bytes), + /// Take a snapshot for the block with given number. + TakeSnapshot(u64), + /// Execute wrapped closure + Execute(Callback), } impl ClientIoMessage { - /// Create new `ClientIoMessage` that executes given procedure. - pub fn execute(fun: F) -> Self { - ClientIoMessage::Execute(Callback(Box::new(fun))) - } + /// Create new `ClientIoMessage` that executes given procedure. + pub fn execute(fun: F) -> Self { + ClientIoMessage::Execute(Callback(Box::new(fun))) + } } /// A function to invoke in the client thread. pub struct Callback(pub Box); impl fmt::Debug for Callback { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "") - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "") + } } diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index 31ec21047..3c5337d6d 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -27,29 +27,32 @@ mod io_message; pub mod test_client; mod trace; -pub use self::client::*; -pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockChainConfig, VMType}; #[cfg(any(test, feature = "test-helpers"))] pub use self::evm_test_client::{EvmTestClient, EvmTestError, TransactErr, TransactSuccess}; -pub use self::io_message::ClientIoMessage; #[cfg(any(test, feature = "test-helpers"))] -pub use self::test_client::{TestBlockChainClient, EachBlockWith}; -pub use self::chain_notify::{ChainNotify, NewBlocks, ChainRoute, ChainRouteType, ChainMessageType}; -pub use self::traits::{ - Nonce, Balance, ChainInfo, BlockInfo, ReopenBlock, PrepareOpenBlock, TransactionInfo, ScheduleInfo, ImportSealedBlock, BroadcastProposalBlock, ImportBlock, - StateOrBlock, StateClient, Call, EngineInfo, AccountData, BlockChain, BlockProducer, SealedBlockImporter, BadBlocks, - BlockChainReset, ImportExportBlocks +pub use self::test_client::{EachBlockWith, TestBlockChainClient}; +pub use self::{ + chain_notify::{ChainMessageType, ChainNotify, ChainRoute, ChainRouteType, NewBlocks}, + client::*, + config::{BlockChainConfig, ClientConfig, DatabaseCompactionProfile, Mode, VMType}, + io_message::ClientIoMessage, + traits::{ + AccountData, BadBlocks, Balance, BlockChain, BlockChainClient, BlockChainReset, BlockInfo, + BlockProducer, BroadcastProposalBlock, Call, ChainInfo, EngineClient, EngineInfo, + ImportBlock, ImportExportBlocks, ImportSealedBlock, IoClient, Nonce, PrepareOpenBlock, + ProvingBlockChainClient, ReopenBlock, ScheduleInfo, SealedBlockImporter, StateClient, + StateOrBlock, TransactionInfo, + }, }; pub use state::StateInfo; -pub use self::traits::{BlockChainClient, EngineClient, ProvingBlockChainClient, IoClient}; -pub use types::ids::*; -pub use types::trace_filter::Filter as TraceFilter; -pub use types::pruning_info::PruningInfo; -pub use types::call_analytics::CallAnalytics; +pub use types::{ + call_analytics::CallAnalytics, ids::*, pruning_info::PruningInfo, + trace_filter::Filter as TraceFilter, +}; pub use executive::{Executed, Executive, TransactOptions}; -pub use vm::{LastHashes, EnvInfo}; +pub use vm::{EnvInfo, LastHashes}; pub use error::TransactionImportError; pub use verification::VerifierType; diff --git a/ethcore/src/client/private_notify.rs b/ethcore/src/client/private_notify.rs index 4be183873..2c264eb75 100644 --- a/ethcore/src/client/private_notify.rs +++ b/ethcore/src/client/private_notify.rs @@ -17,7 +17,7 @@ use error::TransactionImportError; /// Represent private transactions handler inside the client -pub trait PrivateNotify : Send + Sync { - /// fires when private transaction message queued via client io queue - fn private_transaction_queued(&self) -> Result<(), TransactionImportError>; +pub trait PrivateNotify: Send + Sync { + /// fires when private transaction message queued via client io queue + fn private_transaction_queued(&self) -> Result<(), TransactionImportError>; } diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index e3ccc71fb..6221a3f53 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -16,16 +16,20 @@ //! Test client. -use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering as AtomicOrder}; -use std::sync::Arc; -use std::collections::{HashMap, BTreeMap}; -use std::mem; +use std::{ + collections::{BTreeMap, HashMap}, + mem, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering as AtomicOrder}, + Arc, + }, +}; -use blockchain::{TreeRoute, BlockReceipts}; +use blockchain::{BlockReceipts, TreeRoute}; use bytes::Bytes; -use db::{NUM_COLUMNS, COL_STATE}; +use db::{COL_STATE, NUM_COLUMNS}; use ethcore_miner::pool::VerifiedTransaction; -use ethereum_types::{H256, U256, Address}; +use ethereum_types::{Address, H256, U256}; use ethkey::{Generator, Random}; use ethtrie; use hash::keccak; @@ -35,28 +39,30 @@ use kvdb_memorydb; use parking_lot::RwLock; use rlp::{Rlp, RlpStream}; use rustc_hex::FromHex; -use types::transaction::{self, Transaction, LocalizedTransaction, SignedTransaction, Action}; -use types::BlockNumber; -use types::basic_account::BasicAccount; -use types::encoded; -use types::filter::Filter; -use types::header::Header; -use types::log_entry::LocalizedLogEntry; -use types::pruning_info::PruningInfo; -use types::receipt::{Receipt, LocalizedReceipt, TransactionOutcome}; -use types::view; -use types::views::BlockView; +use types::{ + basic_account::BasicAccount, + encoded, + filter::Filter, + header::Header, + log_entry::LocalizedLogEntry, + pruning_info::PruningInfo, + receipt::{LocalizedReceipt, Receipt, TransactionOutcome}, + transaction::{self, Action, LocalizedTransaction, SignedTransaction, Transaction}, + view, + views::BlockView, + BlockNumber, +}; use vm::Schedule; -use block::{OpenBlock, SealedBlock, ClosedBlock}; +use block::{ClosedBlock, OpenBlock, SealedBlock}; use call_contract::{CallContract, RegistryInfo}; use client::{ - Nonce, Balance, ChainInfo, BlockInfo, ReopenBlock, TransactionInfo, - PrepareOpenBlock, BlockChainClient, BlockChainInfo, BlockStatus, BlockId, Mode, - TransactionId, UncleId, TraceId, TraceFilter, LastHashes, CallAnalytics, - ProvingBlockChainClient, ScheduleInfo, ImportSealedBlock, BroadcastProposalBlock, ImportBlock, StateOrBlock, - Call, StateClient, EngineInfo, AccountData, BlockChain, BlockProducer, SealedBlockImporter, IoClient, - BadBlocks, traits::ForceUpdateSealing + traits::ForceUpdateSealing, AccountData, BadBlocks, Balance, BlockChain, BlockChainClient, + BlockChainInfo, BlockId, BlockInfo, BlockProducer, BlockStatus, BroadcastProposalBlock, Call, + CallAnalytics, ChainInfo, EngineInfo, ImportBlock, ImportSealedBlock, IoClient, LastHashes, + Mode, Nonce, PrepareOpenBlock, ProvingBlockChainClient, ReopenBlock, ScheduleInfo, + SealedBlockImporter, StateClient, StateOrBlock, TraceFilter, TraceId, TransactionId, + TransactionInfo, UncleId, }; use engines::EthEngine; use error::{Error, EthcoreResult}; @@ -68,378 +74,398 @@ use spec::Spec; use state::StateInfo; use state_db::StateDB; use trace::LocalizedTrace; -use verification::queue::QueueInfo; -use verification::queue::kind::blocks::Unverified; +use verification::queue::{kind::blocks::Unverified, QueueInfo}; /// Test client. pub struct TestBlockChainClient { - /// Blocks. - pub blocks: RwLock>, - /// Mapping of numbers to hashes. - pub numbers: RwLock>, - /// Genesis block hash. - pub genesis_hash: H256, - /// Last block hash. - pub last_hash: RwLock, - /// Extra data do set for each block - pub extra_data: Bytes, - /// Difficulty. - pub difficulty: RwLock, - /// Balances. - pub balances: RwLock>, - /// Nonces. - pub nonces: RwLock>, - /// Storage. - pub storage: RwLock>, - /// Code. - pub code: RwLock>, - /// Execution result. - pub execution_result: RwLock>>, - /// Transaction receipts. - pub receipts: RwLock>, - /// Logs - pub logs: RwLock>, - /// Should return errors on logs. - pub error_on_logs: RwLock>, - /// Block queue size. - pub queue_size: AtomicUsize, - /// Miner - pub miner: Arc, - /// Spec - pub spec: Spec, - /// Timestamp assigned to latest sealed block - pub latest_block_timestamp: RwLock, - /// Ancient block info. - pub ancient_block: RwLock>, - /// First block info. - pub first_block: RwLock>, - /// Traces to return - pub traces: RwLock>>, - /// Pruning history size to report. - pub history: RwLock>, - /// Is disabled - pub disabled: AtomicBool, + /// Blocks. + pub blocks: RwLock>, + /// Mapping of numbers to hashes. + pub numbers: RwLock>, + /// Genesis block hash. + pub genesis_hash: H256, + /// Last block hash. + pub last_hash: RwLock, + /// Extra data do set for each block + pub extra_data: Bytes, + /// Difficulty. + pub difficulty: RwLock, + /// Balances. + pub balances: RwLock>, + /// Nonces. + pub nonces: RwLock>, + /// Storage. + pub storage: RwLock>, + /// Code. + pub code: RwLock>, + /// Execution result. + pub execution_result: RwLock>>, + /// Transaction receipts. + pub receipts: RwLock>, + /// Logs + pub logs: RwLock>, + /// Should return errors on logs. + pub error_on_logs: RwLock>, + /// Block queue size. + pub queue_size: AtomicUsize, + /// Miner + pub miner: Arc, + /// Spec + pub spec: Spec, + /// Timestamp assigned to latest sealed block + pub latest_block_timestamp: RwLock, + /// Ancient block info. + pub ancient_block: RwLock>, + /// First block info. + pub first_block: RwLock>, + /// Traces to return + pub traces: RwLock>>, + /// Pruning history size to report. + pub history: RwLock>, + /// Is disabled + pub disabled: AtomicBool, } /// Used for generating test client blocks. #[derive(Clone, Copy)] pub enum EachBlockWith { - /// Plain block. - Nothing, - /// Block with an uncle. - Uncle, - /// Block with a transaction. - Transaction, - /// Block with multiple transactions. - Transactions(usize), - /// Block with an uncle and transaction. - UncleAndTransaction + /// Plain block. + Nothing, + /// Block with an uncle. + Uncle, + /// Block with a transaction. + Transaction, + /// Block with multiple transactions. + Transactions(usize), + /// Block with an uncle and transaction. + UncleAndTransaction, } impl Default for TestBlockChainClient { - fn default() -> Self { - TestBlockChainClient::new() - } + fn default() -> Self { + TestBlockChainClient::new() + } } impl TestBlockChainClient { - /// Creates new test client. - pub fn new() -> Self { - Self::new_with_extra_data(Bytes::new()) - } + /// Creates new test client. + pub fn new() -> Self { + Self::new_with_extra_data(Bytes::new()) + } - /// Creates new test client with specified extra data for each block - pub fn new_with_extra_data(extra_data: Bytes) -> Self { - let spec = Spec::new_test(); - TestBlockChainClient::new_with_spec_and_extra(spec, extra_data) - } + /// Creates new test client with specified extra data for each block + pub fn new_with_extra_data(extra_data: Bytes) -> Self { + let spec = Spec::new_test(); + TestBlockChainClient::new_with_spec_and_extra(spec, extra_data) + } - /// Create test client with custom spec. - pub fn new_with_spec(spec: Spec) -> Self { - TestBlockChainClient::new_with_spec_and_extra(spec, Bytes::new()) - } + /// Create test client with custom spec. + pub fn new_with_spec(spec: Spec) -> Self { + TestBlockChainClient::new_with_spec_and_extra(spec, Bytes::new()) + } - /// Create test client with custom spec and extra data. - pub fn new_with_spec_and_extra(spec: Spec, extra_data: Bytes) -> Self { - let genesis_block = spec.genesis_block(); - let genesis_hash = spec.genesis_header().hash(); + /// Create test client with custom spec and extra data. + pub fn new_with_spec_and_extra(spec: Spec, extra_data: Bytes) -> Self { + let genesis_block = spec.genesis_block(); + let genesis_hash = spec.genesis_header().hash(); - let mut client = TestBlockChainClient { - blocks: RwLock::new(HashMap::new()), - numbers: RwLock::new(HashMap::new()), - genesis_hash: H256::new(), - extra_data: extra_data, - last_hash: RwLock::new(H256::new()), - difficulty: RwLock::new(spec.genesis_header().difficulty().clone()), - balances: RwLock::new(HashMap::new()), - nonces: RwLock::new(HashMap::new()), - storage: RwLock::new(HashMap::new()), - code: RwLock::new(HashMap::new()), - execution_result: RwLock::new(None), - receipts: RwLock::new(HashMap::new()), - logs: RwLock::new(Vec::new()), - queue_size: AtomicUsize::new(0), - miner: Arc::new(Miner::new_for_tests(&spec, None)), - spec: spec, - latest_block_timestamp: RwLock::new(10_000_000), - ancient_block: RwLock::new(None), - first_block: RwLock::new(None), - traces: RwLock::new(None), - history: RwLock::new(None), - disabled: AtomicBool::new(false), - error_on_logs: RwLock::new(None), - }; + let mut client = TestBlockChainClient { + blocks: RwLock::new(HashMap::new()), + numbers: RwLock::new(HashMap::new()), + genesis_hash: H256::new(), + extra_data: extra_data, + last_hash: RwLock::new(H256::new()), + difficulty: RwLock::new(spec.genesis_header().difficulty().clone()), + balances: RwLock::new(HashMap::new()), + nonces: RwLock::new(HashMap::new()), + storage: RwLock::new(HashMap::new()), + code: RwLock::new(HashMap::new()), + execution_result: RwLock::new(None), + receipts: RwLock::new(HashMap::new()), + logs: RwLock::new(Vec::new()), + queue_size: AtomicUsize::new(0), + miner: Arc::new(Miner::new_for_tests(&spec, None)), + spec: spec, + latest_block_timestamp: RwLock::new(10_000_000), + ancient_block: RwLock::new(None), + first_block: RwLock::new(None), + traces: RwLock::new(None), + history: RwLock::new(None), + disabled: AtomicBool::new(false), + error_on_logs: RwLock::new(None), + }; - // insert genesis hash. - client.blocks.get_mut().insert(genesis_hash, genesis_block); - client.numbers.get_mut().insert(0, genesis_hash); - *client.last_hash.get_mut() = genesis_hash; - client.genesis_hash = genesis_hash; - client - } + // insert genesis hash. + client.blocks.get_mut().insert(genesis_hash, genesis_block); + client.numbers.get_mut().insert(0, genesis_hash); + *client.last_hash.get_mut() = genesis_hash; + client.genesis_hash = genesis_hash; + client + } - /// Set the transaction receipt result - pub fn set_transaction_receipt(&self, id: TransactionId, receipt: LocalizedReceipt) { - self.receipts.write().insert(id, receipt); - } + /// Set the transaction receipt result + pub fn set_transaction_receipt(&self, id: TransactionId, receipt: LocalizedReceipt) { + self.receipts.write().insert(id, receipt); + } - /// Set the execution result. - pub fn set_execution_result(&self, result: Result) { - *self.execution_result.write() = Some(result); - } + /// Set the execution result. + pub fn set_execution_result(&self, result: Result) { + *self.execution_result.write() = Some(result); + } - /// Set the balance of account `address` to `balance`. - pub fn set_balance(&self, address: Address, balance: U256) { - self.balances.write().insert(address, balance); - } + /// Set the balance of account `address` to `balance`. + pub fn set_balance(&self, address: Address, balance: U256) { + self.balances.write().insert(address, balance); + } - /// Set nonce of account `address` to `nonce`. - pub fn set_nonce(&self, address: Address, nonce: U256) { - self.nonces.write().insert(address, nonce); - } + /// Set nonce of account `address` to `nonce`. + pub fn set_nonce(&self, address: Address, nonce: U256) { + self.nonces.write().insert(address, nonce); + } - /// Set `code` at `address`. - pub fn set_code(&self, address: Address, code: Bytes) { - self.code.write().insert(address, code); - } + /// Set `code` at `address`. + pub fn set_code(&self, address: Address, code: Bytes) { + self.code.write().insert(address, code); + } - /// Set storage `position` to `value` for account `address`. - pub fn set_storage(&self, address: Address, position: H256, value: H256) { - self.storage.write().insert((address, position), value); - } + /// Set storage `position` to `value` for account `address`. + pub fn set_storage(&self, address: Address, position: H256, value: H256) { + self.storage.write().insert((address, position), value); + } - /// Set block queue size for testing - pub fn set_queue_size(&self, size: usize) { - self.queue_size.store(size, AtomicOrder::Relaxed); - } + /// Set block queue size for testing + pub fn set_queue_size(&self, size: usize) { + self.queue_size.store(size, AtomicOrder::Relaxed); + } - /// Set timestamp assigned to latest sealed block - pub fn set_latest_block_timestamp(&self, ts: u64) { - *self.latest_block_timestamp.write() = ts; - } + /// Set timestamp assigned to latest sealed block + pub fn set_latest_block_timestamp(&self, ts: u64) { + *self.latest_block_timestamp.write() = ts; + } - /// Set logs to return for each logs call. - pub fn set_logs(&self, logs: Vec) { - *self.logs.write() = logs; - } + /// Set logs to return for each logs call. + pub fn set_logs(&self, logs: Vec) { + *self.logs.write() = logs; + } - /// Set return errors on logs. - pub fn set_error_on_logs(&self, val: Option) { - *self.error_on_logs.write() = val; - } + /// Set return errors on logs. + pub fn set_error_on_logs(&self, val: Option) { + *self.error_on_logs.write() = val; + } - /// Add a block to test client. - pub fn add_block(&self, with: EachBlockWith, hook: F) - where F: Fn(Header) -> Header - { - let n = self.numbers.read().len(); + /// Add a block to test client. + pub fn add_block(&self, with: EachBlockWith, hook: F) + where + F: Fn(Header) -> Header, + { + let n = self.numbers.read().len(); - let mut header = Header::new(); - header.set_difficulty(From::from(n)); - header.set_parent_hash(self.last_hash.read().clone()); - header.set_number(n as BlockNumber); - header.set_gas_limit(U256::from(1_000_000)); - header.set_extra_data(self.extra_data.clone()); + let mut header = Header::new(); + header.set_difficulty(From::from(n)); + header.set_parent_hash(self.last_hash.read().clone()); + header.set_number(n as BlockNumber); + header.set_gas_limit(U256::from(1_000_000)); + header.set_extra_data(self.extra_data.clone()); - header = hook(header); + header = hook(header); - let uncles = match with { - EachBlockWith::Uncle | EachBlockWith::UncleAndTransaction => { - let mut uncles = RlpStream::new_list(1); - let mut uncle_header = Header::new(); - uncle_header.set_difficulty(From::from(n)); - uncle_header.set_parent_hash(self.last_hash.read().clone()); - uncle_header.set_number(n as BlockNumber); - uncles.append(&uncle_header); - header.set_uncles_hash(keccak(uncles.as_raw())); - uncles - }, - _ => RlpStream::new_list(0) - }; - let txs = match with { - EachBlockWith::Transaction | EachBlockWith::UncleAndTransaction | EachBlockWith::Transactions(_) => { - let num_transactions = match with { - EachBlockWith::Transactions(num) => num, - _ => 1, - }; - let mut txs = RlpStream::new_list(num_transactions); - let keypair = Random.generate().unwrap(); - let mut nonce = U256::zero(); + let uncles = match with { + EachBlockWith::Uncle | EachBlockWith::UncleAndTransaction => { + let mut uncles = RlpStream::new_list(1); + let mut uncle_header = Header::new(); + uncle_header.set_difficulty(From::from(n)); + uncle_header.set_parent_hash(self.last_hash.read().clone()); + uncle_header.set_number(n as BlockNumber); + uncles.append(&uncle_header); + header.set_uncles_hash(keccak(uncles.as_raw())); + uncles + } + _ => RlpStream::new_list(0), + }; + let txs = match with { + EachBlockWith::Transaction + | EachBlockWith::UncleAndTransaction + | EachBlockWith::Transactions(_) => { + let num_transactions = match with { + EachBlockWith::Transactions(num) => num, + _ => 1, + }; + let mut txs = RlpStream::new_list(num_transactions); + let keypair = Random.generate().unwrap(); + let mut nonce = U256::zero(); - for _ in 0..num_transactions { - // Update nonces value - let tx = Transaction { - action: Action::Create, - value: U256::from(100), - data: "3331600055".from_hex().unwrap(), - gas: U256::from(100_000), - gas_price: U256::from(200_000_000_000u64), - nonce: nonce - }; - let signed_tx = tx.sign(keypair.secret(), None); - txs.append(&signed_tx); - nonce += U256::one(); - } + for _ in 0..num_transactions { + // Update nonces value + let tx = Transaction { + action: Action::Create, + value: U256::from(100), + data: "3331600055".from_hex().unwrap(), + gas: U256::from(100_000), + gas_price: U256::from(200_000_000_000u64), + nonce: nonce, + }; + let signed_tx = tx.sign(keypair.secret(), None); + txs.append(&signed_tx); + nonce += U256::one(); + } - self.nonces.write().insert(keypair.address(), nonce); - txs.out() - }, - _ => ::rlp::EMPTY_LIST_RLP.to_vec() - }; + self.nonces.write().insert(keypair.address(), nonce); + txs.out() + } + _ => ::rlp::EMPTY_LIST_RLP.to_vec(), + }; - let mut rlp = RlpStream::new_list(3); - rlp.append(&header); - rlp.append_raw(&txs, 1); - rlp.append_raw(uncles.as_raw(), 1); - let unverified = Unverified::from_rlp(rlp.out()).unwrap(); - self.import_block(unverified).unwrap(); - } + let mut rlp = RlpStream::new_list(3); + rlp.append(&header); + rlp.append_raw(&txs, 1); + rlp.append_raw(uncles.as_raw(), 1); + let unverified = Unverified::from_rlp(rlp.out()).unwrap(); + self.import_block(unverified).unwrap(); + } - /// Add a sequence of blocks to test client. - pub fn add_blocks(&self, count: usize, with: EachBlockWith) { - for _ in 0..count { - self.add_block(with, |header| header); - } - } + /// Add a sequence of blocks to test client. + pub fn add_blocks(&self, count: usize, with: EachBlockWith) { + for _ in 0..count { + self.add_block(with, |header| header); + } + } - /// Make a bad block by setting invalid parent hash. - pub fn corrupt_block_parent(&self, n: BlockNumber) { - let hash = self.block_hash(BlockId::Number(n)).unwrap(); - let mut header: Header = self.block_header(BlockId::Number(n)).unwrap().decode().expect("decoding failed"); - header.set_parent_hash(H256::from(42)); - let mut rlp = RlpStream::new_list(3); - rlp.append(&header); - rlp.append_raw(&::rlp::NULL_RLP, 1); - rlp.append_raw(&::rlp::NULL_RLP, 1); - self.blocks.write().insert(hash, rlp.out()); - } + /// Make a bad block by setting invalid parent hash. + pub fn corrupt_block_parent(&self, n: BlockNumber) { + let hash = self.block_hash(BlockId::Number(n)).unwrap(); + let mut header: Header = self + .block_header(BlockId::Number(n)) + .unwrap() + .decode() + .expect("decoding failed"); + header.set_parent_hash(H256::from(42)); + let mut rlp = RlpStream::new_list(3); + rlp.append(&header); + rlp.append_raw(&::rlp::NULL_RLP, 1); + rlp.append_raw(&::rlp::NULL_RLP, 1); + self.blocks.write().insert(hash, rlp.out()); + } - /// Get block hash with `delta` as offset from the most recent blocks. - pub fn block_hash_delta_minus(&mut self, delta: usize) -> H256 { - let blocks_read = self.numbers.read(); - let index = blocks_read.len() - delta; - blocks_read[&index].clone() - } + /// Get block hash with `delta` as offset from the most recent blocks. + pub fn block_hash_delta_minus(&mut self, delta: usize) -> H256 { + let blocks_read = self.numbers.read(); + let index = blocks_read.len() - delta; + blocks_read[&index].clone() + } - fn block_hash(&self, id: BlockId) -> Option { - match id { - BlockId::Hash(hash) => Some(hash), - BlockId::Number(n) => self.numbers.read().get(&(n as usize)).cloned(), - BlockId::Earliest => self.numbers.read().get(&0).cloned(), - BlockId::Latest => self.numbers.read().get(&(self.numbers.read().len() - 1)).cloned() - } - } + fn block_hash(&self, id: BlockId) -> Option { + match id { + BlockId::Hash(hash) => Some(hash), + BlockId::Number(n) => self.numbers.read().get(&(n as usize)).cloned(), + BlockId::Earliest => self.numbers.read().get(&0).cloned(), + BlockId::Latest => self + .numbers + .read() + .get(&(self.numbers.read().len() - 1)) + .cloned(), + } + } - /// Inserts a transaction with given gas price to miners transactions queue. - pub fn insert_transaction_with_gas_price_to_queue(&self, gas_price: U256) -> H256 { - let keypair = Random.generate().unwrap(); - let tx = Transaction { - action: Action::Create, - value: U256::from(100), - data: "3331600055".from_hex().unwrap(), - gas: U256::from(100_000), - gas_price: gas_price, - nonce: U256::zero() - }; - let signed_tx = tx.sign(keypair.secret(), None); - self.set_balance(signed_tx.sender(), 10_000_000_000_000_000_000u64.into()); - let hash = signed_tx.hash(); - let res = self.miner.import_external_transactions(self, vec![signed_tx.into()]); - let res = res.into_iter().next().unwrap(); - assert!(res.is_ok()); - hash - } + /// Inserts a transaction with given gas price to miners transactions queue. + pub fn insert_transaction_with_gas_price_to_queue(&self, gas_price: U256) -> H256 { + let keypair = Random.generate().unwrap(); + let tx = Transaction { + action: Action::Create, + value: U256::from(100), + data: "3331600055".from_hex().unwrap(), + gas: U256::from(100_000), + gas_price: gas_price, + nonce: U256::zero(), + }; + let signed_tx = tx.sign(keypair.secret(), None); + self.set_balance(signed_tx.sender(), 10_000_000_000_000_000_000u64.into()); + let hash = signed_tx.hash(); + let res = self + .miner + .import_external_transactions(self, vec![signed_tx.into()]); + let res = res.into_iter().next().unwrap(); + assert!(res.is_ok()); + hash + } - /// Inserts a transaction to miners transactions queue. - pub fn insert_transaction_to_queue(&self) -> H256 { - self.insert_transaction_with_gas_price_to_queue(U256::from(20_000_000_000u64)) - } + /// Inserts a transaction to miners transactions queue. + pub fn insert_transaction_to_queue(&self) -> H256 { + self.insert_transaction_with_gas_price_to_queue(U256::from(20_000_000_000u64)) + } - /// Set reported history size. - pub fn set_history(&self, h: Option) { - *self.history.write() = h; - } + /// Set reported history size. + pub fn set_history(&self, h: Option) { + *self.history.write() = h; + } - /// Returns true if the client has been disabled. - pub fn is_disabled(&self) -> bool { - self.disabled.load(AtomicOrder::Relaxed) - } + /// Returns true if the client has been disabled. + pub fn is_disabled(&self) -> bool { + self.disabled.load(AtomicOrder::Relaxed) + } } pub fn get_temp_state_db() -> StateDB { - let db = kvdb_memorydb::create(NUM_COLUMNS.unwrap_or(0)); - let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, COL_STATE); - StateDB::new(journal_db, 1024 * 1024) + let db = kvdb_memorydb::create(NUM_COLUMNS.unwrap_or(0)); + let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, COL_STATE); + StateDB::new(journal_db, 1024 * 1024) } impl ReopenBlock for TestBlockChainClient { - fn reopen_block(&self, block: ClosedBlock) -> OpenBlock { - block.reopen(&*self.spec.engine) - } + fn reopen_block(&self, block: ClosedBlock) -> OpenBlock { + block.reopen(&*self.spec.engine) + } } impl PrepareOpenBlock for TestBlockChainClient { - fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> Result { - let engine = &*self.spec.engine; - let genesis_header = self.spec.genesis_header(); - let db = self.spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); + fn prepare_open_block( + &self, + author: Address, + gas_range_target: (U256, U256), + extra_data: Bytes, + ) -> Result { + let engine = &*self.spec.engine; + let genesis_header = self.spec.genesis_header(); + let db = self + .spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); - let last_hashes = vec![genesis_header.hash()]; - let mut open_block = OpenBlock::new( - engine, - Default::default(), - false, - db, - &genesis_header, - Arc::new(last_hashes), - author, - gas_range_target, - extra_data, - false, - None, - )?; - // TODO [todr] Override timestamp for predictability - open_block.set_timestamp(*self.latest_block_timestamp.read()); - Ok(open_block) - } + let last_hashes = vec![genesis_header.hash()]; + let mut open_block = OpenBlock::new( + engine, + Default::default(), + false, + db, + &genesis_header, + Arc::new(last_hashes), + author, + gas_range_target, + extra_data, + false, + None, + )?; + // TODO [todr] Override timestamp for predictability + open_block.set_timestamp(*self.latest_block_timestamp.read()); + Ok(open_block) + } } impl ScheduleInfo for TestBlockChainClient { - fn latest_schedule(&self) -> Schedule { - Schedule::new_post_eip150(24576, true, true, true) - } + fn latest_schedule(&self) -> Schedule { + Schedule::new_post_eip150(24576, true, true, true) + } } impl ImportSealedBlock for TestBlockChainClient { - fn import_sealed_block(&self, _block: SealedBlock) -> EthcoreResult { - Ok(H256::default()) - } + fn import_sealed_block(&self, _block: SealedBlock) -> EthcoreResult { + Ok(H256::default()) + } } impl BlockProducer for TestBlockChainClient {} impl BroadcastProposalBlock for TestBlockChainClient { - fn broadcast_proposal_block(&self, _block: SealedBlock) {} + fn broadcast_proposal_block(&self, _block: SealedBlock) {} } impl SealedBlockImporter for TestBlockChainClient {} @@ -448,513 +474,651 @@ impl ::miner::TransactionVerifierClient for TestBlockChainClient {} impl ::miner::BlockChainClient for TestBlockChainClient {} impl Nonce for TestBlockChainClient { - fn nonce(&self, address: &Address, id: BlockId) -> Option { - match id { - BlockId::Latest => Some(self.nonces.read().get(address).cloned().unwrap_or(self.spec.params().account_start_nonce)), - _ => None, - } - } + fn nonce(&self, address: &Address, id: BlockId) -> Option { + match id { + BlockId::Latest => Some( + self.nonces + .read() + .get(address) + .cloned() + .unwrap_or(self.spec.params().account_start_nonce), + ), + _ => None, + } + } - fn latest_nonce(&self, address: &Address) -> U256 { - self.nonce(address, BlockId::Latest).unwrap() - } + fn latest_nonce(&self, address: &Address) -> U256 { + self.nonce(address, BlockId::Latest).unwrap() + } } impl Balance for TestBlockChainClient { - fn balance(&self, address: &Address, state: StateOrBlock) -> Option { - match state { - StateOrBlock::Block(BlockId::Latest) | StateOrBlock::State(_) => Some(self.balances.read().get(address).cloned().unwrap_or_else(U256::zero)), - _ => None, - } - } + fn balance(&self, address: &Address, state: StateOrBlock) -> Option { + match state { + StateOrBlock::Block(BlockId::Latest) | StateOrBlock::State(_) => Some( + self.balances + .read() + .get(address) + .cloned() + .unwrap_or_else(U256::zero), + ), + _ => None, + } + } - fn latest_balance(&self, address: &Address) -> U256 { - self.balance(address, BlockId::Latest.into()).unwrap() - } + fn latest_balance(&self, address: &Address) -> U256 { + self.balance(address, BlockId::Latest.into()).unwrap() + } } impl AccountData for TestBlockChainClient {} impl ChainInfo for TestBlockChainClient { - fn chain_info(&self) -> BlockChainInfo { - let number = self.blocks.read().len() as BlockNumber - 1; - BlockChainInfo { - total_difficulty: *self.difficulty.read(), - pending_total_difficulty: *self.difficulty.read(), - genesis_hash: self.genesis_hash.clone(), - best_block_hash: self.last_hash.read().clone(), - best_block_number: number, - best_block_timestamp: number, - first_block_hash: self.first_block.read().as_ref().map(|x| x.0), - first_block_number: self.first_block.read().as_ref().map(|x| x.1), - ancient_block_hash: self.ancient_block.read().as_ref().map(|x| x.0), - ancient_block_number: self.ancient_block.read().as_ref().map(|x| x.1) - } - } + fn chain_info(&self) -> BlockChainInfo { + let number = self.blocks.read().len() as BlockNumber - 1; + BlockChainInfo { + total_difficulty: *self.difficulty.read(), + pending_total_difficulty: *self.difficulty.read(), + genesis_hash: self.genesis_hash.clone(), + best_block_hash: self.last_hash.read().clone(), + best_block_number: number, + best_block_timestamp: number, + first_block_hash: self.first_block.read().as_ref().map(|x| x.0), + first_block_number: self.first_block.read().as_ref().map(|x| x.1), + ancient_block_hash: self.ancient_block.read().as_ref().map(|x| x.0), + ancient_block_number: self.ancient_block.read().as_ref().map(|x| x.1), + } + } } impl BlockInfo for TestBlockChainClient { - fn block_header(&self, id: BlockId) -> Option { - self.block_hash(id) - .and_then(|hash| self.blocks.read().get(&hash).map(|r| view!(BlockView, r).header_rlp().as_raw().to_vec())) - .map(encoded::Header::new) - } + fn block_header(&self, id: BlockId) -> Option { + self.block_hash(id) + .and_then(|hash| { + self.blocks + .read() + .get(&hash) + .map(|r| view!(BlockView, r).header_rlp().as_raw().to_vec()) + }) + .map(encoded::Header::new) + } - fn best_block_header(&self) -> Header { - self.block_header(BlockId::Hash(self.chain_info().best_block_hash)) - .expect("Best block always has header.") - .decode() - .expect("decoding failed") - } + fn best_block_header(&self) -> Header { + self.block_header(BlockId::Hash(self.chain_info().best_block_hash)) + .expect("Best block always has header.") + .decode() + .expect("decoding failed") + } - fn block(&self, id: BlockId) -> Option { - self.block_hash(id) - .and_then(|hash| self.blocks.read().get(&hash).cloned()) - .map(encoded::Block::new) - } + fn block(&self, id: BlockId) -> Option { + self.block_hash(id) + .and_then(|hash| self.blocks.read().get(&hash).cloned()) + .map(encoded::Block::new) + } - fn code_hash(&self, address: &Address, id: BlockId) -> Option { - match id { - BlockId::Latest => self.code.read().get(address).map(|c| keccak(&c)), - _ => None, - } - } + fn code_hash(&self, address: &Address, id: BlockId) -> Option { + match id { + BlockId::Latest => self.code.read().get(address).map(|c| keccak(&c)), + _ => None, + } + } } impl CallContract for TestBlockChainClient { - fn call_contract(&self, _id: BlockId, _address: Address, _data: Bytes) -> Result { Ok(vec![]) } + fn call_contract( + &self, + _id: BlockId, + _address: Address, + _data: Bytes, + ) -> Result { + Ok(vec![]) + } } impl TransactionInfo for TestBlockChainClient { - fn transaction_block(&self, _id: TransactionId) -> Option { - None // Simple default. - } + fn transaction_block(&self, _id: TransactionId) -> Option { + None // Simple default. + } } impl BlockChain for TestBlockChainClient {} impl RegistryInfo for TestBlockChainClient { - fn registry_address(&self, _name: String, _block: BlockId) -> Option
{ None } + fn registry_address(&self, _name: String, _block: BlockId) -> Option
{ + None + } } impl ImportBlock for TestBlockChainClient { - fn import_block(&self, unverified: Unverified) -> EthcoreResult { - let header = unverified.header; - let h = header.hash(); - let number: usize = header.number() as usize; - if number > self.blocks.read().len() { - panic!("Unexpected block number. Expected {}, got {}", self.blocks.read().len(), number); - } - if number > 0 { - match self.blocks.read().get(header.parent_hash()) { - Some(parent) => { - let parent = view!(BlockView, parent).header(); - if parent.number() != (header.number() - 1) { - panic!("Unexpected block parent"); - } - }, - None => { - panic!("Unknown block parent {:?} for block {}", header.parent_hash(), number); - } - } - } - let len = self.numbers.read().len(); - if number == len { - { - let mut difficulty = self.difficulty.write(); - *difficulty = *difficulty + header.difficulty().clone(); - } - mem::replace(&mut *self.last_hash.write(), h.clone()); - self.blocks.write().insert(h.clone(), unverified.bytes); - self.numbers.write().insert(number, h.clone()); - let mut parent_hash = header.parent_hash().clone(); - if number > 0 { - let mut n = number - 1; - while n > 0 && self.numbers.read()[&n] != parent_hash { - *self.numbers.write().get_mut(&n).unwrap() = parent_hash.clone(); - n -= 1; - parent_hash = view!(BlockView, &self.blocks.read()[&parent_hash]).header().parent_hash().clone(); - } - } - } - else { - self.blocks.write().insert(h.clone(), unverified.bytes); - } - Ok(h) - } + fn import_block(&self, unverified: Unverified) -> EthcoreResult { + let header = unverified.header; + let h = header.hash(); + let number: usize = header.number() as usize; + if number > self.blocks.read().len() { + panic!( + "Unexpected block number. Expected {}, got {}", + self.blocks.read().len(), + number + ); + } + if number > 0 { + match self.blocks.read().get(header.parent_hash()) { + Some(parent) => { + let parent = view!(BlockView, parent).header(); + if parent.number() != (header.number() - 1) { + panic!("Unexpected block parent"); + } + } + None => { + panic!( + "Unknown block parent {:?} for block {}", + header.parent_hash(), + number + ); + } + } + } + let len = self.numbers.read().len(); + if number == len { + { + let mut difficulty = self.difficulty.write(); + *difficulty = *difficulty + header.difficulty().clone(); + } + mem::replace(&mut *self.last_hash.write(), h.clone()); + self.blocks.write().insert(h.clone(), unverified.bytes); + self.numbers.write().insert(number, h.clone()); + let mut parent_hash = header.parent_hash().clone(); + if number > 0 { + let mut n = number - 1; + while n > 0 && self.numbers.read()[&n] != parent_hash { + *self.numbers.write().get_mut(&n).unwrap() = parent_hash.clone(); + n -= 1; + parent_hash = view!(BlockView, &self.blocks.read()[&parent_hash]) + .header() + .parent_hash() + .clone(); + } + } + } else { + self.blocks.write().insert(h.clone(), unverified.bytes); + } + Ok(h) + } } impl Call for TestBlockChainClient { - // State will not be used by test client anyway, since all methods that accept state are mocked - type State = TestState; + // State will not be used by test client anyway, since all methods that accept state are mocked + type State = TestState; - fn call(&self, _t: &SignedTransaction, _analytics: CallAnalytics, _state: &mut Self::State, _header: &Header) -> Result { - self.execution_result.read().clone().unwrap() - } + fn call( + &self, + _t: &SignedTransaction, + _analytics: CallAnalytics, + _state: &mut Self::State, + _header: &Header, + ) -> Result { + self.execution_result.read().clone().unwrap() + } - fn call_many(&self, txs: &[(SignedTransaction, CallAnalytics)], state: &mut Self::State, header: &Header) -> Result, CallError> { - let mut res = Vec::with_capacity(txs.len()); - for &(ref tx, analytics) in txs { - res.push(self.call(tx, analytics, state, header)?); - } - Ok(res) - } + fn call_many( + &self, + txs: &[(SignedTransaction, CallAnalytics)], + state: &mut Self::State, + header: &Header, + ) -> Result, CallError> { + let mut res = Vec::with_capacity(txs.len()); + for &(ref tx, analytics) in txs { + res.push(self.call(tx, analytics, state, header)?); + } + Ok(res) + } - fn estimate_gas(&self, _t: &SignedTransaction, _state: &Self::State, _header: &Header) -> Result { - Ok(21000.into()) - } + fn estimate_gas( + &self, + _t: &SignedTransaction, + _state: &Self::State, + _header: &Header, + ) -> Result { + Ok(21000.into()) + } } /// NewType wrapper around `()` to impersonate `State` in trait impls. State will not be used by /// test client, since all methods that accept state are mocked. pub struct TestState; impl StateInfo for TestState { - fn nonce(&self, _address: &Address) -> ethtrie::Result { unimplemented!() } - fn balance(&self, _address: &Address) -> ethtrie::Result { unimplemented!() } - fn storage_at(&self, _address: &Address, _key: &H256) -> ethtrie::Result { unimplemented!() } - fn code(&self, _address: &Address) -> ethtrie::Result>> { unimplemented!() } + fn nonce(&self, _address: &Address) -> ethtrie::Result { + unimplemented!() + } + fn balance(&self, _address: &Address) -> ethtrie::Result { + unimplemented!() + } + fn storage_at(&self, _address: &Address, _key: &H256) -> ethtrie::Result { + unimplemented!() + } + fn code(&self, _address: &Address) -> ethtrie::Result>> { + unimplemented!() + } } impl StateClient for TestBlockChainClient { - // State will not be used by test client anyway, since all methods that accept state are mocked - type State = TestState; + // State will not be used by test client anyway, since all methods that accept state are mocked + type State = TestState; - fn latest_state_and_header(&self) -> (Self::State, Header) { - (TestState, self.best_block_header()) - } + fn latest_state_and_header(&self) -> (Self::State, Header) { + (TestState, self.best_block_header()) + } - fn state_at(&self, _id: BlockId) -> Option { - Some(TestState) - } + fn state_at(&self, _id: BlockId) -> Option { + Some(TestState) + } } impl EngineInfo for TestBlockChainClient { - fn engine(&self) -> &EthEngine { - unimplemented!() - } + fn engine(&self) -> &EthEngine { + unimplemented!() + } } impl BadBlocks for TestBlockChainClient { - fn bad_blocks(&self) -> Vec<(Unverified, String)> { - vec![ - (Unverified { - header: Default::default(), - transactions: vec![], - uncles: vec![], - bytes: vec![1, 2, 3], - }, "Invalid block".into()) - ] - } + fn bad_blocks(&self) -> Vec<(Unverified, String)> { + vec![( + Unverified { + header: Default::default(), + transactions: vec![], + uncles: vec![], + bytes: vec![1, 2, 3], + }, + "Invalid block".into(), + )] + } } impl BlockChainClient for TestBlockChainClient { - fn replay(&self, _id: TransactionId, _analytics: CallAnalytics) -> Result { - self.execution_result.read().clone().unwrap() - } + fn replay(&self, _id: TransactionId, _analytics: CallAnalytics) -> Result { + self.execution_result.read().clone().unwrap() + } - fn replay_block_transactions(&self, _block: BlockId, _analytics: CallAnalytics) -> Result>, CallError> { - Ok(Box::new(self.traces.read().clone().unwrap().into_iter().map(|t| t.transaction_hash.unwrap_or(H256::new())).zip(self.execution_result.read().clone().unwrap().into_iter()))) - } + fn replay_block_transactions( + &self, + _block: BlockId, + _analytics: CallAnalytics, + ) -> Result>, CallError> { + Ok(Box::new( + self.traces + .read() + .clone() + .unwrap() + .into_iter() + .map(|t| t.transaction_hash.unwrap_or(H256::new())) + .zip(self.execution_result.read().clone().unwrap().into_iter()), + )) + } - fn block_total_difficulty(&self, _id: BlockId) -> Option { - Some(U256::zero()) - } + fn block_total_difficulty(&self, _id: BlockId) -> Option { + Some(U256::zero()) + } - fn block_hash(&self, id: BlockId) -> Option { - Self::block_hash(self, id) - } + fn block_hash(&self, id: BlockId) -> Option { + Self::block_hash(self, id) + } - fn storage_root(&self, _address: &Address, _id: BlockId) -> Option { - None - } + fn storage_root(&self, _address: &Address, _id: BlockId) -> Option { + None + } - fn code(&self, address: &Address, state: StateOrBlock) -> Option> { - match state { - StateOrBlock::Block(BlockId::Latest) => Some(self.code.read().get(address).cloned()), - _ => None, - } - } + fn code(&self, address: &Address, state: StateOrBlock) -> Option> { + match state { + StateOrBlock::Block(BlockId::Latest) => Some(self.code.read().get(address).cloned()), + _ => None, + } + } - fn storage_at(&self, address: &Address, position: &H256, state: StateOrBlock) -> Option { - match state { - StateOrBlock::Block(BlockId::Latest) => Some(self.storage.read().get(&(address.clone(), position.clone())).cloned().unwrap_or_else(H256::new)), - _ => None, - } - } + fn storage_at(&self, address: &Address, position: &H256, state: StateOrBlock) -> Option { + match state { + StateOrBlock::Block(BlockId::Latest) => Some( + self.storage + .read() + .get(&(address.clone(), position.clone())) + .cloned() + .unwrap_or_else(H256::new), + ), + _ => None, + } + } - fn list_accounts(&self, _id: BlockId, _after: Option<&Address>, _count: u64) -> Option> { - None - } + fn list_accounts( + &self, + _id: BlockId, + _after: Option<&Address>, + _count: u64, + ) -> Option> { + None + } - fn list_storage(&self, _id: BlockId, _account: &Address, _after: Option<&H256>, _count: u64) -> Option> { - None - } - fn transaction(&self, _id: TransactionId) -> Option { - None // Simple default. - } + fn list_storage( + &self, + _id: BlockId, + _account: &Address, + _after: Option<&H256>, + _count: u64, + ) -> Option> { + None + } + fn transaction(&self, _id: TransactionId) -> Option { + None // Simple default. + } - fn uncle(&self, _id: UncleId) -> Option { - None // Simple default. - } + fn uncle(&self, _id: UncleId) -> Option { + None // Simple default. + } - fn uncle_extra_info(&self, _id: UncleId) -> Option> { - None - } + fn uncle_extra_info(&self, _id: UncleId) -> Option> { + None + } - fn transaction_receipt(&self, id: TransactionId) -> Option { - self.receipts.read().get(&id).cloned() - } + fn transaction_receipt(&self, id: TransactionId) -> Option { + self.receipts.read().get(&id).cloned() + } - fn localized_block_receipts(&self, _id: BlockId) -> Option> { - Some(self.receipts.read().values().cloned().collect()) - } + fn localized_block_receipts(&self, _id: BlockId) -> Option> { + Some(self.receipts.read().values().cloned().collect()) + } - fn logs(&self, filter: Filter) -> Result, BlockId> { - match self.error_on_logs.read().as_ref() { - Some(id) => return Err(id.clone()), - None => (), - } + fn logs(&self, filter: Filter) -> Result, BlockId> { + match self.error_on_logs.read().as_ref() { + Some(id) => return Err(id.clone()), + None => (), + } - let mut logs = self.logs.read().clone(); - let len = logs.len(); - Ok(match filter.limit { - Some(limit) if limit <= len => logs.split_off(len - limit), - _ => logs, - }) - } + let mut logs = self.logs.read().clone(); + let len = logs.len(); + Ok(match filter.limit { + Some(limit) if limit <= len => logs.split_off(len - limit), + _ => logs, + }) + } - fn last_hashes(&self) -> LastHashes { - unimplemented!(); - } + fn last_hashes(&self) -> LastHashes { + unimplemented!(); + } - fn block_number(&self, id: BlockId) -> Option { - match id { - BlockId::Number(number) => Some(number), - BlockId::Earliest => Some(0), - BlockId::Latest => Some(self.chain_info().best_block_number), - BlockId::Hash(ref h) => - self.numbers.read().iter().find(|&(_, hash)| hash == h).map(|e| *e.0 as u64) - } - } + fn block_number(&self, id: BlockId) -> Option { + match id { + BlockId::Number(number) => Some(number), + BlockId::Earliest => Some(0), + BlockId::Latest => Some(self.chain_info().best_block_number), + BlockId::Hash(ref h) => self + .numbers + .read() + .iter() + .find(|&(_, hash)| hash == h) + .map(|e| *e.0 as u64), + } + } - fn block_body(&self, id: BlockId) -> Option { - self.block_hash(id).and_then(|hash| self.blocks.read().get(&hash).map(|r| { - let block = view!(BlockView, r); - let mut stream = RlpStream::new_list(2); - stream.append_raw(block.transactions_rlp().as_raw(), 1); - stream.append_raw(block.uncles_rlp().as_raw(), 1); - encoded::Body::new(stream.out()) - })) - } + fn block_body(&self, id: BlockId) -> Option { + self.block_hash(id).and_then(|hash| { + self.blocks.read().get(&hash).map(|r| { + let block = view!(BlockView, r); + let mut stream = RlpStream::new_list(2); + stream.append_raw(block.transactions_rlp().as_raw(), 1); + stream.append_raw(block.uncles_rlp().as_raw(), 1); + encoded::Body::new(stream.out()) + }) + }) + } - fn block_extra_info(&self, id: BlockId) -> Option> { - self.block(id) - .map(|block| block.view().header()) - .map(|header| self.spec.engine.extra_info(&header)) - } + fn block_extra_info(&self, id: BlockId) -> Option> { + self.block(id) + .map(|block| block.view().header()) + .map(|header| self.spec.engine.extra_info(&header)) + } - fn block_status(&self, id: BlockId) -> BlockStatus { - match id { - BlockId::Number(number) if (number as usize) < self.blocks.read().len() => BlockStatus::InChain, - BlockId::Hash(ref hash) if self.blocks.read().get(hash).is_some() => BlockStatus::InChain, - BlockId::Latest | BlockId::Earliest => BlockStatus::InChain, - _ => BlockStatus::Unknown, - } - } + fn block_status(&self, id: BlockId) -> BlockStatus { + match id { + BlockId::Number(number) if (number as usize) < self.blocks.read().len() => { + BlockStatus::InChain + } + BlockId::Hash(ref hash) if self.blocks.read().get(hash).is_some() => { + BlockStatus::InChain + } + BlockId::Latest | BlockId::Earliest => BlockStatus::InChain, + _ => BlockStatus::Unknown, + } + } - // works only if blocks are one after another 1 -> 2 -> 3 - fn tree_route(&self, from: &H256, to: &H256) -> Option { - Some(TreeRoute { - ancestor: H256::new(), - index: 0, - blocks: { - let numbers_read = self.numbers.read(); - let mut adding = false; + // works only if blocks are one after another 1 -> 2 -> 3 + fn tree_route(&self, from: &H256, to: &H256) -> Option { + Some(TreeRoute { + ancestor: H256::new(), + index: 0, + blocks: { + let numbers_read = self.numbers.read(); + let mut adding = false; - let mut blocks = Vec::new(); - for (_, hash) in numbers_read.iter().sorted_by(|tuple1, tuple2| tuple1.0.cmp(tuple2.0)) { - if hash == to { - if adding { - blocks.push(hash.clone()); - } - adding = false; - break; - } - if hash == from { - adding = true; - } - if adding { - blocks.push(hash.clone()); - } - } - if adding { Vec::new() } else { blocks } - }, - is_from_route_finalized: false, - }) - } + let mut blocks = Vec::new(); + for (_, hash) in numbers_read + .iter() + .sorted_by(|tuple1, tuple2| tuple1.0.cmp(tuple2.0)) + { + if hash == to { + if adding { + blocks.push(hash.clone()); + } + adding = false; + break; + } + if hash == from { + adding = true; + } + if adding { + blocks.push(hash.clone()); + } + } + if adding { + Vec::new() + } else { + blocks + } + }, + is_from_route_finalized: false, + }) + } - fn find_uncles(&self, _hash: &H256) -> Option> { - None - } + fn find_uncles(&self, _hash: &H256) -> Option> { + None + } - // TODO: returns just hashes instead of node state rlp(?) - fn state_data(&self, hash: &H256) -> Option { - // starts with 'f' ? - if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") { - let mut rlp = RlpStream::new(); - rlp.append(&hash.clone()); - return Some(rlp.out()); - } - None - } + // TODO: returns just hashes instead of node state rlp(?) + fn state_data(&self, hash: &H256) -> Option { + // starts with 'f' ? + if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") { + let mut rlp = RlpStream::new(); + rlp.append(&hash.clone()); + return Some(rlp.out()); + } + None + } - fn block_receipts(&self, hash: &H256) -> Option { - // starts with 'f' ? - if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") { - let receipt = BlockReceipts::new(vec![Receipt::new( - TransactionOutcome::StateRoot(H256::zero()), - U256::zero(), - vec![])]); - return Some(receipt); - } - None - } + fn block_receipts(&self, hash: &H256) -> Option { + // starts with 'f' ? + if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") { + let receipt = BlockReceipts::new(vec![Receipt::new( + TransactionOutcome::StateRoot(H256::zero()), + U256::zero(), + vec![], + )]); + return Some(receipt); + } + None + } - fn queue_info(&self) -> QueueInfo { - QueueInfo { - verified_queue_size: self.queue_size.load(AtomicOrder::Relaxed), - unverified_queue_size: 0, - verifying_queue_size: 0, - max_queue_size: 0, - max_mem_use: 0, - mem_used: 0, - } - } + fn queue_info(&self) -> QueueInfo { + QueueInfo { + verified_queue_size: self.queue_size.load(AtomicOrder::Relaxed), + unverified_queue_size: 0, + verifying_queue_size: 0, + max_queue_size: 0, + max_mem_use: 0, + mem_used: 0, + } + } - fn clear_queue(&self) { - } + fn clear_queue(&self) {} - fn additional_params(&self) -> BTreeMap { - Default::default() - } + fn additional_params(&self) -> BTreeMap { + Default::default() + } - fn filter_traces(&self, _filter: TraceFilter) -> Option> { - self.traces.read().clone() - } + fn filter_traces(&self, _filter: TraceFilter) -> Option> { + self.traces.read().clone() + } - fn trace(&self, _trace: TraceId) -> Option { - self.traces.read().clone().and_then(|vec| vec.into_iter().next()) - } + fn trace(&self, _trace: TraceId) -> Option { + self.traces + .read() + .clone() + .and_then(|vec| vec.into_iter().next()) + } - fn transaction_traces(&self, _trace: TransactionId) -> Option> { - self.traces.read().clone() - } + fn transaction_traces(&self, _trace: TransactionId) -> Option> { + self.traces.read().clone() + } - fn block_traces(&self, _trace: BlockId) -> Option> { - self.traces.read().clone() - } + fn block_traces(&self, _trace: BlockId) -> Option> { + self.traces.read().clone() + } - fn transactions_to_propagate(&self) -> Vec> { - self.miner.ready_transactions(self, 4096, miner::PendingOrdering::Priority) - } + fn transactions_to_propagate(&self) -> Vec> { + self.miner + .ready_transactions(self, 4096, miner::PendingOrdering::Priority) + } - fn signing_chain_id(&self) -> Option { None } + fn signing_chain_id(&self) -> Option { + None + } - fn mode(&self) -> Mode { Mode::Active } + fn mode(&self) -> Mode { + Mode::Active + } - fn set_mode(&self, _: Mode) { unimplemented!(); } + fn set_mode(&self, _: Mode) { + unimplemented!(); + } - fn spec_name(&self) -> String { "foundation".into() } + fn spec_name(&self) -> String { + "foundation".into() + } - fn set_spec_name(&self, _: String) -> Result<(), ()> { unimplemented!(); } + fn set_spec_name(&self, _: String) -> Result<(), ()> { + unimplemented!(); + } - fn disable(&self) { self.disabled.store(true, AtomicOrder::Relaxed); } + fn disable(&self) { + self.disabled.store(true, AtomicOrder::Relaxed); + } - fn pruning_info(&self) -> PruningInfo { - let best_num = self.chain_info().best_block_number; - PruningInfo { - earliest_chain: 1, - earliest_state: self.history.read().as_ref().map(|x| best_num - x).unwrap_or(0), - } - } + fn pruning_info(&self) -> PruningInfo { + let best_num = self.chain_info().best_block_number; + PruningInfo { + earliest_chain: 1, + earliest_state: self + .history + .read() + .as_ref() + .map(|x| best_num - x) + .unwrap_or(0), + } + } - fn transact_contract(&self, address: Address, data: Bytes) -> Result<(), transaction::Error> { - let transaction = Transaction { - nonce: self.latest_nonce(&self.miner.authoring_params().author), - action: Action::Call(address), - gas: self.spec.gas_limit, - gas_price: U256::zero(), - value: U256::default(), - data: data, - }; - let chain_id = Some(self.spec.chain_id()); - let sig = self.spec.engine.sign(transaction.hash(chain_id)).unwrap(); - let signed = SignedTransaction::new(transaction.with_signature(sig, chain_id)).unwrap(); - self.miner.import_own_transaction(self, signed.into()) - } + fn transact_contract(&self, address: Address, data: Bytes) -> Result<(), transaction::Error> { + let transaction = Transaction { + nonce: self.latest_nonce(&self.miner.authoring_params().author), + action: Action::Call(address), + gas: self.spec.gas_limit, + gas_price: U256::zero(), + value: U256::default(), + data: data, + }; + let chain_id = Some(self.spec.chain_id()); + let sig = self.spec.engine.sign(transaction.hash(chain_id)).unwrap(); + let signed = SignedTransaction::new(transaction.with_signature(sig, chain_id)).unwrap(); + self.miner.import_own_transaction(self, signed.into()) + } - fn registrar_address(&self) -> Option
{ None } + fn registrar_address(&self) -> Option
{ + None + } } impl IoClient for TestBlockChainClient { - fn queue_transactions(&self, transactions: Vec, _peer_id: usize) { - // import right here - let txs = transactions.into_iter().filter_map(|bytes| Rlp::new(&bytes).as_val().ok()).collect(); - self.miner.import_external_transactions(self, txs); - } + fn queue_transactions(&self, transactions: Vec, _peer_id: usize) { + // import right here + let txs = transactions + .into_iter() + .filter_map(|bytes| Rlp::new(&bytes).as_val().ok()) + .collect(); + self.miner.import_external_transactions(self, txs); + } - fn queue_ancient_block(&self, unverified: Unverified, _r: Bytes) -> EthcoreResult { - self.import_block(unverified) - } + fn queue_ancient_block(&self, unverified: Unverified, _r: Bytes) -> EthcoreResult { + self.import_block(unverified) + } - fn queue_consensus_message(&self, message: Bytes) { - self.spec.engine.handle_message(&message).unwrap(); - } + fn queue_consensus_message(&self, message: Bytes) { + self.spec.engine.handle_message(&message).unwrap(); + } } impl ProvingBlockChainClient for TestBlockChainClient { - fn prove_storage(&self, _: H256, _: H256, _: BlockId) -> Option<(Vec, H256)> { - None - } + fn prove_storage(&self, _: H256, _: H256, _: BlockId) -> Option<(Vec, H256)> { + None + } - fn prove_account(&self, _: H256, _: BlockId) -> Option<(Vec, BasicAccount)> { - None - } + fn prove_account(&self, _: H256, _: BlockId) -> Option<(Vec, BasicAccount)> { + None + } - fn prove_transaction(&self, _: SignedTransaction, _: BlockId) -> Option<(Bytes, Vec)> { - None - } + fn prove_transaction(&self, _: SignedTransaction, _: BlockId) -> Option<(Bytes, Vec)> { + None + } - fn epoch_signal(&self, _: H256) -> Option> { - None - } + fn epoch_signal(&self, _: H256) -> Option> { + None + } } impl super::traits::EngineClient for TestBlockChainClient { - fn update_sealing(&self, force: ForceUpdateSealing) { - self.miner.update_sealing(self, force) - } + fn update_sealing(&self, force: ForceUpdateSealing) { + self.miner.update_sealing(self, force) + } - fn submit_seal(&self, block_hash: H256, seal: Vec) { - let import = self.miner.submit_seal(block_hash, seal).and_then(|block| self.import_sealed_block(block)); - if let Err(err) = import { - warn!(target: "poa", "Wrong internal seal submission! {:?}", err); - } - } + fn submit_seal(&self, block_hash: H256, seal: Vec) { + let import = self + .miner + .submit_seal(block_hash, seal) + .and_then(|block| self.import_sealed_block(block)); + if let Err(err) = import { + warn!(target: "poa", "Wrong internal seal submission! {:?}", err); + } + } - fn broadcast_consensus_message(&self, _message: Bytes) {} + fn broadcast_consensus_message(&self, _message: Bytes) {} - fn epoch_transition_for(&self, _block_hash: H256) -> Option<::engines::EpochTransition> { - None - } + fn epoch_transition_for(&self, _block_hash: H256) -> Option<::engines::EpochTransition> { + None + } - fn as_full_client(&self) -> Option<&BlockChainClient> { Some(self) } + fn as_full_client(&self) -> Option<&BlockChainClient> { + Some(self) + } - fn block_number(&self, id: BlockId) -> Option { - BlockChainClient::block_number(self, id) - } + fn block_number(&self, id: BlockId) -> Option { + BlockChainClient::block_number(self, id) + } - fn block_header(&self, id: BlockId) -> Option { - BlockChainClient::block_header(self, id) - } + fn block_header(&self, id: BlockId) -> Option { + BlockChainClient::block_header(self, id) + } } diff --git a/ethcore/src/client/trace.rs b/ethcore/src/client/trace.rs index 73563a1d0..25d34b733 100644 --- a/ethcore/src/client/trace.rs +++ b/ethcore/src/client/trace.rs @@ -22,19 +22,20 @@ use trace::DatabaseExtras as TraceDatabaseExtras; use types::BlockNumber; impl TraceDatabaseExtras for BlockChain { - fn block_hash(&self, block_number: BlockNumber) -> Option { - (self as &BlockProvider).block_hash(block_number) - } + fn block_hash(&self, block_number: BlockNumber) -> Option { + (self as &BlockProvider).block_hash(block_number) + } - fn transaction_hash(&self, block_number: BlockNumber, tx_position: usize) -> Option { - (self as &BlockProvider).block_hash(block_number) - .and_then(|block_hash| { - let tx_address = TransactionAddress { - block_hash: block_hash, - index: tx_position - }; - self.transaction(&tx_address) - }) - .map(|tx| tx.hash()) - } + fn transaction_hash(&self, block_number: BlockNumber, tx_position: usize) -> Option { + (self as &BlockProvider) + .block_hash(block_number) + .and_then(|block_hash| { + let tx_address = TransactionAddress { + block_hash: block_hash, + index: tx_position, + }; + self.transaction(&tx_address) + }) + .map(|tx| tx.hash()) + } } diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index 16c185e69..72050737d 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -14,35 +14,36 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::BTreeMap; -use std::sync::Arc; +use std::{collections::BTreeMap, sync::Arc}; use blockchain::{BlockReceipts, TreeRoute}; use bytes::Bytes; use call_contract::{CallContract, RegistryInfo}; use ethcore_miner::pool::VerifiedTransaction; -use ethereum_types::{H256, U256, Address}; +use ethereum_types::{Address, H256, U256}; use evm::Schedule; use itertools::Itertools; use kvdb::DBValue; -use types::transaction::{self, LocalizedTransaction, SignedTransaction}; -use types::BlockNumber; -use types::basic_account::BasicAccount; -use types::block_status::BlockStatus; -use types::blockchain_info::BlockChainInfo; -use types::call_analytics::CallAnalytics; -use types::data_format::DataFormat; -use types::encoded; -use types::filter::Filter; -use types::header::Header; -use types::ids::*; -use types::log_entry::LocalizedLogEntry; -use types::pruning_info::PruningInfo; -use types::receipt::LocalizedReceipt; -use types::trace_filter::Filter as TraceFilter; +use types::{ + basic_account::BasicAccount, + block_status::BlockStatus, + blockchain_info::BlockChainInfo, + call_analytics::CallAnalytics, + data_format::DataFormat, + encoded, + filter::Filter, + header::Header, + ids::*, + log_entry::LocalizedLogEntry, + pruning_info::PruningInfo, + receipt::LocalizedReceipt, + trace_filter::Filter as TraceFilter, + transaction::{self, LocalizedTransaction, SignedTransaction}, + BlockNumber, +}; use vm::LastHashes; -use block::{OpenBlock, SealedBlock, ClosedBlock}; +use block::{ClosedBlock, OpenBlock, SealedBlock}; use client::Mode; use engines::EthEngine; use error::{Error, EthcoreResult}; @@ -50,64 +51,65 @@ use executed::CallError; use executive::Executed; use state::StateInfo; use trace::LocalizedTrace; -use verification::queue::QueueInfo as BlockQueueInfo; -use verification::queue::kind::blocks::Unverified; +use verification::queue::{kind::blocks::Unverified, QueueInfo as BlockQueueInfo}; /// State information to be used during client query pub enum StateOrBlock { - /// State to be used, may be pending - State(Box), + /// State to be used, may be pending + State(Box), - /// Id of an existing block from a chain to get state from - Block(BlockId) + /// Id of an existing block from a chain to get state from + Block(BlockId), } impl From for StateOrBlock { - fn from(info: S) -> StateOrBlock { - StateOrBlock::State(Box::new(info) as Box<_>) - } + fn from(info: S) -> StateOrBlock { + StateOrBlock::State(Box::new(info) as Box<_>) + } } impl From> for StateOrBlock { - fn from(info: Box) -> StateOrBlock { - StateOrBlock::State(info) - } + fn from(info: Box) -> StateOrBlock { + StateOrBlock::State(info) + } } impl From for StateOrBlock { - fn from(id: BlockId) -> StateOrBlock { - StateOrBlock::Block(id) - } + fn from(id: BlockId) -> StateOrBlock { + StateOrBlock::Block(id) + } } /// Provides `nonce` and `latest_nonce` methods pub trait Nonce { - /// Attempt to get address nonce at given block. - /// May not fail on BlockId::Latest. - fn nonce(&self, address: &Address, id: BlockId) -> Option; + /// Attempt to get address nonce at given block. + /// May not fail on BlockId::Latest. + fn nonce(&self, address: &Address, id: BlockId) -> Option; - /// Get address nonce at the latest block's state. - fn latest_nonce(&self, address: &Address) -> U256 { - self.nonce(address, BlockId::Latest) - .expect("nonce will return Some when given BlockId::Latest. nonce was given BlockId::Latest. \ - Therefore nonce has returned Some; qed") - } + /// Get address nonce at the latest block's state. + fn latest_nonce(&self, address: &Address) -> U256 { + self.nonce(address, BlockId::Latest).expect( + "nonce will return Some when given BlockId::Latest. nonce was given BlockId::Latest. \ + Therefore nonce has returned Some; qed", + ) + } } /// Provides `balance` and `latest_balance` methods pub trait Balance { - /// Get address balance at the given block's state. - /// - /// May not return None if given BlockId::Latest. - /// Returns None if and only if the block's root hash has been pruned from the DB. - fn balance(&self, address: &Address, state: StateOrBlock) -> Option; + /// Get address balance at the given block's state. + /// + /// May not return None if given BlockId::Latest. + /// Returns None if and only if the block's root hash has been pruned from the DB. + fn balance(&self, address: &Address, state: StateOrBlock) -> Option; - /// Get address balance at the latest block's state. - fn latest_balance(&self, address: &Address) -> U256 { - self.balance(address, BlockId::Latest.into()) - .expect("balance will return Some if given BlockId::Latest. balance was given BlockId::Latest \ - Therefore balance has returned Some; qed") - } + /// Get address balance at the latest block's state. + fn latest_balance(&self, address: &Address) -> U256 { + self.balance(address, BlockId::Latest.into()).expect( + "balance will return Some if given BlockId::Latest. balance was given BlockId::Latest \ + Therefore balance has returned Some; qed", + ) + } } /// Provides methods to access account info @@ -115,45 +117,45 @@ pub trait AccountData: Nonce + Balance {} /// Provides `chain_info` method pub trait ChainInfo { - /// Get blockchain information. - fn chain_info(&self) -> BlockChainInfo; + /// Get blockchain information. + fn chain_info(&self) -> BlockChainInfo; } /// Provides various information on a block by it's ID pub trait BlockInfo { - /// Get raw block header data by block id. - fn block_header(&self, id: BlockId) -> Option; + /// Get raw block header data by block id. + fn block_header(&self, id: BlockId) -> Option; - /// Get the best block header. - fn best_block_header(&self) -> Header; + /// Get the best block header. + fn best_block_header(&self) -> Header; - /// Get raw block data by block header hash. - fn block(&self, id: BlockId) -> Option; + /// Get raw block data by block header hash. + fn block(&self, id: BlockId) -> Option; - /// Get address code hash at given block's state. - fn code_hash(&self, address: &Address, id: BlockId) -> Option; + /// Get address code hash at given block's state. + fn code_hash(&self, address: &Address, id: BlockId) -> Option; } /// Provides various information on a transaction by it's ID pub trait TransactionInfo { - /// Get the hash of block that contains the transaction, if any. - fn transaction_block(&self, id: TransactionId) -> Option; + /// Get the hash of block that contains the transaction, if any. + fn transaction_block(&self, id: TransactionId) -> Option; } /// Provides methods to access chain state pub trait StateClient { - /// Type representing chain state - type State: StateInfo; + /// Type representing chain state + type State: StateInfo; - /// Get a copy of the best block's state and header. - fn latest_state_and_header(&self) -> (Self::State, Header); + /// Get a copy of the best block's state and header. + fn latest_state_and_header(&self) -> (Self::State, Header); - /// Attempt to get a copy of a specific block's final state. - /// - /// This will not fail if given BlockId::Latest. - /// Otherwise, this can fail (but may not) if the DB prunes state or the block - /// is unknown. - fn state_at(&self, id: BlockId) -> Option; + /// Attempt to get a copy of a specific block's final state. + /// + /// This will not fail if given BlockId::Latest. + /// Otherwise, this can fail (but may not) if the DB prunes state or the block + /// is unknown. + fn state_at(&self, id: BlockId) -> Option; } /// Provides various blockchain information, like block header, chain state etc. @@ -162,241 +164,289 @@ pub trait BlockChain: ChainInfo + BlockInfo + TransactionInfo {} // FIXME Why these methods belong to BlockChainClient and not MiningBlockChainClient? /// Provides methods to import block into blockchain pub trait ImportBlock { - /// Import a block into the blockchain. - fn import_block(&self, block: Unverified) -> EthcoreResult; + /// Import a block into the blockchain. + fn import_block(&self, block: Unverified) -> EthcoreResult; } /// Provides `call` and `call_many` methods pub trait Call { - /// Type representing chain state - type State: StateInfo; + /// Type representing chain state + type State: StateInfo; - /// Makes a non-persistent transaction call. - fn call(&self, tx: &SignedTransaction, analytics: CallAnalytics, state: &mut Self::State, header: &Header) -> Result; + /// Makes a non-persistent transaction call. + fn call( + &self, + tx: &SignedTransaction, + analytics: CallAnalytics, + state: &mut Self::State, + header: &Header, + ) -> Result; - /// Makes multiple non-persistent but dependent transaction calls. - /// Returns a vector of successes or a failure if any of the transaction fails. - fn call_many(&self, txs: &[(SignedTransaction, CallAnalytics)], state: &mut Self::State, header: &Header) -> Result, CallError>; + /// Makes multiple non-persistent but dependent transaction calls. + /// Returns a vector of successes or a failure if any of the transaction fails. + fn call_many( + &self, + txs: &[(SignedTransaction, CallAnalytics)], + state: &mut Self::State, + header: &Header, + ) -> Result, CallError>; - /// Estimates how much gas will be necessary for a call. - fn estimate_gas(&self, t: &SignedTransaction, state: &Self::State, header: &Header) -> Result; + /// Estimates how much gas will be necessary for a call. + fn estimate_gas( + &self, + t: &SignedTransaction, + state: &Self::State, + header: &Header, + ) -> Result; } /// Provides `engine` method pub trait EngineInfo { - /// Get underlying engine object - fn engine(&self) -> &EthEngine; + /// Get underlying engine object + fn engine(&self) -> &EthEngine; } /// IO operations that should off-load heavy work to another thread. pub trait IoClient: Sync + Send { - /// Queue transactions for importing. - fn queue_transactions(&self, transactions: Vec, peer_id: usize); + /// Queue transactions for importing. + fn queue_transactions(&self, transactions: Vec, peer_id: usize); - /// Queue block import with transaction receipts. Does no sealing and transaction validation. - fn queue_ancient_block(&self, block_bytes: Unverified, receipts_bytes: Bytes) -> EthcoreResult; + /// Queue block import with transaction receipts. Does no sealing and transaction validation. + fn queue_ancient_block( + &self, + block_bytes: Unverified, + receipts_bytes: Bytes, + ) -> EthcoreResult; - /// Queue conensus engine message. - fn queue_consensus_message(&self, message: Bytes); + /// Queue conensus engine message. + fn queue_consensus_message(&self, message: Bytes); } /// Provides recently seen bad blocks. pub trait BadBlocks { - /// Returns a list of blocks that were recently not imported because they were invalid. - fn bad_blocks(&self) -> Vec<(Unverified, String)>; + /// Returns a list of blocks that were recently not imported because they were invalid. + fn bad_blocks(&self) -> Vec<(Unverified, String)>; } /// Blockchain database client. Owns and manages a blockchain and a block queue. -pub trait BlockChainClient : Sync + Send + AccountData + BlockChain + CallContract + RegistryInfo + ImportBlock -+ IoClient + BadBlocks { - /// Look up the block number for the given block ID. - fn block_number(&self, id: BlockId) -> Option; +pub trait BlockChainClient: + Sync + + Send + + AccountData + + BlockChain + + CallContract + + RegistryInfo + + ImportBlock + + IoClient + + BadBlocks +{ + /// Look up the block number for the given block ID. + fn block_number(&self, id: BlockId) -> Option; - /// Get raw block body data by block id. - /// Block body is an RLP list of two items: uncles and transactions. - fn block_body(&self, id: BlockId) -> Option; + /// Get raw block body data by block id. + /// Block body is an RLP list of two items: uncles and transactions. + fn block_body(&self, id: BlockId) -> Option; - /// Get block status by block header hash. - fn block_status(&self, id: BlockId) -> BlockStatus; + /// Get block status by block header hash. + fn block_status(&self, id: BlockId) -> BlockStatus; - /// Get block total difficulty. - fn block_total_difficulty(&self, id: BlockId) -> Option; + /// Get block total difficulty. + fn block_total_difficulty(&self, id: BlockId) -> Option; - /// Attempt to get address storage root at given block. - /// May not fail on BlockId::Latest. - fn storage_root(&self, address: &Address, id: BlockId) -> Option; + /// Attempt to get address storage root at given block. + /// May not fail on BlockId::Latest. + fn storage_root(&self, address: &Address, id: BlockId) -> Option; - /// Get block hash. - fn block_hash(&self, id: BlockId) -> Option; + /// Get block hash. + fn block_hash(&self, id: BlockId) -> Option; - /// Get address code at given block's state. - fn code(&self, address: &Address, state: StateOrBlock) -> Option>; + /// Get address code at given block's state. + fn code(&self, address: &Address, state: StateOrBlock) -> Option>; - /// Get address code at the latest block's state. - fn latest_code(&self, address: &Address) -> Option { - self.code(address, BlockId::Latest.into()) - .expect("code will return Some if given BlockId::Latest; qed") - } + /// Get address code at the latest block's state. + fn latest_code(&self, address: &Address) -> Option { + self.code(address, BlockId::Latest.into()) + .expect("code will return Some if given BlockId::Latest; qed") + } - /// Get address code hash at given block's state. + /// Get address code hash at given block's state. - /// Get value of the storage at given position at the given block's state. - /// - /// May not return None if given BlockId::Latest. - /// Returns None if and only if the block's root hash has been pruned from the DB. - fn storage_at(&self, address: &Address, position: &H256, state: StateOrBlock) -> Option; + /// Get value of the storage at given position at the given block's state. + /// + /// May not return None if given BlockId::Latest. + /// Returns None if and only if the block's root hash has been pruned from the DB. + fn storage_at(&self, address: &Address, position: &H256, state: StateOrBlock) -> Option; - /// Get value of the storage at given position at the latest block's state. - fn latest_storage_at(&self, address: &Address, position: &H256) -> H256 { - self.storage_at(address, position, BlockId::Latest.into()) + /// Get value of the storage at given position at the latest block's state. + fn latest_storage_at(&self, address: &Address, position: &H256) -> H256 { + self.storage_at(address, position, BlockId::Latest.into()) .expect("storage_at will return Some if given BlockId::Latest. storage_at was given BlockId::Latest. \ Therefore storage_at has returned Some; qed") - } + } - /// Get a list of all accounts in the block `id`, if fat DB is in operation, otherwise `None`. - /// If `after` is set the list starts with the following item. - fn list_accounts(&self, id: BlockId, after: Option<&Address>, count: u64) -> Option>; + /// Get a list of all accounts in the block `id`, if fat DB is in operation, otherwise `None`. + /// If `after` is set the list starts with the following item. + fn list_accounts( + &self, + id: BlockId, + after: Option<&Address>, + count: u64, + ) -> Option>; - /// Get a list of all storage keys in the block `id`, if fat DB is in operation, otherwise `None`. - /// If `after` is set the list starts with the following item. - fn list_storage(&self, id: BlockId, account: &Address, after: Option<&H256>, count: u64) -> Option>; + /// Get a list of all storage keys in the block `id`, if fat DB is in operation, otherwise `None`. + /// If `after` is set the list starts with the following item. + fn list_storage( + &self, + id: BlockId, + account: &Address, + after: Option<&H256>, + count: u64, + ) -> Option>; - /// Get transaction with given hash. - fn transaction(&self, id: TransactionId) -> Option; + /// Get transaction with given hash. + fn transaction(&self, id: TransactionId) -> Option; - /// Get uncle with given id. - fn uncle(&self, id: UncleId) -> Option; + /// Get uncle with given id. + fn uncle(&self, id: UncleId) -> Option; - /// Get transaction receipt with given hash. - fn transaction_receipt(&self, id: TransactionId) -> Option; + /// Get transaction receipt with given hash. + fn transaction_receipt(&self, id: TransactionId) -> Option; - /// Get localized receipts for all transaction in given block. - fn localized_block_receipts(&self, id: BlockId) -> Option>; + /// Get localized receipts for all transaction in given block. + fn localized_block_receipts(&self, id: BlockId) -> Option>; - /// Get a tree route between `from` and `to`. - /// See `BlockChain::tree_route`. - fn tree_route(&self, from: &H256, to: &H256) -> Option; + /// Get a tree route between `from` and `to`. + /// See `BlockChain::tree_route`. + fn tree_route(&self, from: &H256, to: &H256) -> Option; - /// Get all possible uncle hashes for a block. - fn find_uncles(&self, hash: &H256) -> Option>; + /// Get all possible uncle hashes for a block. + fn find_uncles(&self, hash: &H256) -> Option>; - /// Get latest state node - fn state_data(&self, hash: &H256) -> Option; + /// Get latest state node + fn state_data(&self, hash: &H256) -> Option; - /// Get block receipts data by block header hash. - fn block_receipts(&self, hash: &H256) -> Option; + /// Get block receipts data by block header hash. + fn block_receipts(&self, hash: &H256) -> Option; - /// Get block queue information. - fn queue_info(&self) -> BlockQueueInfo; + /// Get block queue information. + fn queue_info(&self) -> BlockQueueInfo; - /// Returns true if block queue is empty. - fn is_queue_empty(&self) -> bool { - self.queue_info().is_empty() - } + /// Returns true if block queue is empty. + fn is_queue_empty(&self) -> bool { + self.queue_info().is_empty() + } - /// Clear block queue and abort all import activity. - fn clear_queue(&self); + /// Clear block queue and abort all import activity. + fn clear_queue(&self); - /// Get the registrar address, if it exists. - fn additional_params(&self) -> BTreeMap; + /// Get the registrar address, if it exists. + fn additional_params(&self) -> BTreeMap; - /// Returns logs matching given filter. If one of the filtering block cannot be found, returns the block id that caused the error. - fn logs(&self, filter: Filter) -> Result, BlockId>; + /// Returns logs matching given filter. If one of the filtering block cannot be found, returns the block id that caused the error. + fn logs(&self, filter: Filter) -> Result, BlockId>; - /// Replays a given transaction for inspection. - fn replay(&self, t: TransactionId, analytics: CallAnalytics) -> Result; + /// Replays a given transaction for inspection. + fn replay(&self, t: TransactionId, analytics: CallAnalytics) -> Result; - /// Replays all the transactions in a given block for inspection. - fn replay_block_transactions(&self, block: BlockId, analytics: CallAnalytics) -> Result>, CallError>; + /// Replays all the transactions in a given block for inspection. + fn replay_block_transactions( + &self, + block: BlockId, + analytics: CallAnalytics, + ) -> Result>, CallError>; - /// Returns traces matching given filter. - fn filter_traces(&self, filter: TraceFilter) -> Option>; + /// Returns traces matching given filter. + fn filter_traces(&self, filter: TraceFilter) -> Option>; - /// Returns trace with given id. - fn trace(&self, trace: TraceId) -> Option; + /// Returns trace with given id. + fn trace(&self, trace: TraceId) -> Option; - /// Returns traces created by transaction. - fn transaction_traces(&self, trace: TransactionId) -> Option>; + /// Returns traces created by transaction. + fn transaction_traces(&self, trace: TransactionId) -> Option>; - /// Returns traces created by transaction from block. - fn block_traces(&self, trace: BlockId) -> Option>; + /// Returns traces created by transaction from block. + fn block_traces(&self, trace: BlockId) -> Option>; - /// Get last hashes starting from best block. - fn last_hashes(&self) -> LastHashes; + /// Get last hashes starting from best block. + fn last_hashes(&self) -> LastHashes; - /// List all ready transactions that should be propagated to other peers. - fn transactions_to_propagate(&self) -> Vec>; + /// List all ready transactions that should be propagated to other peers. + fn transactions_to_propagate(&self) -> Vec>; - /// Sorted list of transaction gas prices from at least last sample_size blocks. - fn gas_price_corpus(&self, sample_size: usize) -> ::stats::Corpus { - let mut h = self.chain_info().best_block_hash; - let mut corpus = Vec::new(); - while corpus.is_empty() { - for _ in 0..sample_size { - let block = match self.block(BlockId::Hash(h)) { - Some(block) => block, - None => return corpus.into(), - }; + /// Sorted list of transaction gas prices from at least last sample_size blocks. + fn gas_price_corpus(&self, sample_size: usize) -> ::stats::Corpus { + let mut h = self.chain_info().best_block_hash; + let mut corpus = Vec::new(); + while corpus.is_empty() { + for _ in 0..sample_size { + let block = match self.block(BlockId::Hash(h)) { + Some(block) => block, + None => return corpus.into(), + }; - if block.number() == 0 { - return corpus.into(); - } - block.transaction_views().iter().foreach(|t| corpus.push(t.gas_price())); - h = block.parent_hash().clone(); - } - } - corpus.into() - } + if block.number() == 0 { + return corpus.into(); + } + block + .transaction_views() + .iter() + .foreach(|t| corpus.push(t.gas_price())); + h = block.parent_hash().clone(); + } + } + corpus.into() + } - /// Get the preferred chain ID to sign on - fn signing_chain_id(&self) -> Option; + /// Get the preferred chain ID to sign on + fn signing_chain_id(&self) -> Option; - /// Get the mode. - fn mode(&self) -> Mode; + /// Get the mode. + fn mode(&self) -> Mode; - /// Set the mode. - fn set_mode(&self, mode: Mode); + /// Set the mode. + fn set_mode(&self, mode: Mode); - /// Get the chain spec name. - fn spec_name(&self) -> String; + /// Get the chain spec name. + fn spec_name(&self) -> String; - /// Set the chain via a spec name. - fn set_spec_name(&self, spec_name: String) -> Result<(), ()>; + /// Set the chain via a spec name. + fn set_spec_name(&self, spec_name: String) -> Result<(), ()>; - /// Disable the client from importing blocks. This cannot be undone in this session and indicates - /// that a subsystem has reason to believe this executable incapable of syncing the chain. - fn disable(&self); + /// Disable the client from importing blocks. This cannot be undone in this session and indicates + /// that a subsystem has reason to believe this executable incapable of syncing the chain. + fn disable(&self); - /// Returns engine-related extra info for `BlockId`. - fn block_extra_info(&self, id: BlockId) -> Option>; + /// Returns engine-related extra info for `BlockId`. + fn block_extra_info(&self, id: BlockId) -> Option>; - /// Returns engine-related extra info for `UncleId`. - fn uncle_extra_info(&self, id: UncleId) -> Option>; + /// Returns engine-related extra info for `UncleId`. + fn uncle_extra_info(&self, id: UncleId) -> Option>; - /// Returns information about pruning/data availability. - fn pruning_info(&self) -> PruningInfo; + /// Returns information about pruning/data availability. + fn pruning_info(&self) -> PruningInfo; - /// Schedule state-altering transaction to be executed on the next pending block. - fn transact_contract(&self, address: Address, data: Bytes) -> Result<(), transaction::Error>; + /// Schedule state-altering transaction to be executed on the next pending block. + fn transact_contract(&self, address: Address, data: Bytes) -> Result<(), transaction::Error>; - /// Get the address of the registry itself. - fn registrar_address(&self) -> Option
; + /// Get the address of the registry itself. + fn registrar_address(&self) -> Option
; } /// Provides `reopen_block` method pub trait ReopenBlock { - /// Reopens an OpenBlock and updates uncles. - fn reopen_block(&self, block: ClosedBlock) -> OpenBlock; + /// Reopens an OpenBlock and updates uncles. + fn reopen_block(&self, block: ClosedBlock) -> OpenBlock; } /// Provides `prepare_open_block` method pub trait PrepareOpenBlock { - /// Returns OpenBlock prepared for closing. - fn prepare_open_block(&self, - author: Address, - gas_range_target: (U256, U256), - extra_data: Bytes - ) -> Result; + /// Returns OpenBlock prepared for closing. + fn prepare_open_block( + &self, + author: Address, + gas_range_target: (U256, U256), + extra_data: Bytes, + ) -> Result; } /// Provides methods used for sealing new state @@ -404,20 +454,20 @@ pub trait BlockProducer: PrepareOpenBlock + ReopenBlock {} /// Provides `latest_schedule` method pub trait ScheduleInfo { - /// Returns latest schedule. - fn latest_schedule(&self) -> Schedule; + /// Returns latest schedule. + fn latest_schedule(&self) -> Schedule; } ///Provides `import_sealed_block` method pub trait ImportSealedBlock { - /// Import sealed block. Skips all verifications. - fn import_sealed_block(&self, block: SealedBlock) -> EthcoreResult; + /// Import sealed block. Skips all verifications. + fn import_sealed_block(&self, block: SealedBlock) -> EthcoreResult; } /// Provides `broadcast_proposal_block` method pub trait BroadcastProposalBlock { - /// Broadcast a block proposal. - fn broadcast_proposal_block(&self, block: SealedBlock); + /// Broadcast a block proposal. + fn broadcast_proposal_block(&self, block: SealedBlock); } /// Provides methods to import sealed block and broadcast a block proposal @@ -426,66 +476,70 @@ pub trait SealedBlockImporter: ImportSealedBlock + BroadcastProposalBlock {} /// Do we want to force update sealing? #[derive(Debug, Copy, Clone, PartialEq)] pub enum ForceUpdateSealing { - /// Ideally you want to use `No` at all times as `Yes` skips `reseal_required` checks. - Yes, - /// Don't skip `reseal_required` checks - No + /// Ideally you want to use `No` at all times as `Yes` skips `reseal_required` checks. + Yes, + /// Don't skip `reseal_required` checks + No, } /// Client facilities used by internally sealing Engines. pub trait EngineClient: Sync + Send + ChainInfo { - /// Make a new block and seal it. - fn update_sealing(&self, force: ForceUpdateSealing); + /// Make a new block and seal it. + fn update_sealing(&self, force: ForceUpdateSealing); - /// Submit a seal for a block in the mining queue. - fn submit_seal(&self, block_hash: H256, seal: Vec); + /// Submit a seal for a block in the mining queue. + fn submit_seal(&self, block_hash: H256, seal: Vec); - /// Broadcast a consensus message to the network. - fn broadcast_consensus_message(&self, message: Bytes); + /// Broadcast a consensus message to the network. + fn broadcast_consensus_message(&self, message: Bytes); - /// Get the transition to the epoch the given parent hash is part of - /// or transitions to. - /// This will give the epoch that any children of this parent belong to. - /// - /// The block corresponding the the parent hash must be stored already. - fn epoch_transition_for(&self, parent_hash: H256) -> Option<::engines::EpochTransition>; + /// Get the transition to the epoch the given parent hash is part of + /// or transitions to. + /// This will give the epoch that any children of this parent belong to. + /// + /// The block corresponding the the parent hash must be stored already. + fn epoch_transition_for(&self, parent_hash: H256) -> Option<::engines::EpochTransition>; - /// Attempt to cast the engine client to a full client. - fn as_full_client(&self) -> Option<&BlockChainClient>; + /// Attempt to cast the engine client to a full client. + fn as_full_client(&self) -> Option<&BlockChainClient>; - /// Get a block number by ID. - fn block_number(&self, id: BlockId) -> Option; + /// Get a block number by ID. + fn block_number(&self, id: BlockId) -> Option; - /// Get raw block header data by block id. - fn block_header(&self, id: BlockId) -> Option; + /// Get raw block header data by block id. + fn block_header(&self, id: BlockId) -> Option; } /// Extended client interface for providing proofs of the state. pub trait ProvingBlockChainClient: BlockChainClient { - /// Prove account storage at a specific block id. - /// - /// Both provided keys assume a secure trie. - /// Returns a vector of raw trie nodes (in order from the root) proving the storage query. - fn prove_storage(&self, key1: H256, key2: H256, id: BlockId) -> Option<(Vec, H256)>; + /// Prove account storage at a specific block id. + /// + /// Both provided keys assume a secure trie. + /// Returns a vector of raw trie nodes (in order from the root) proving the storage query. + fn prove_storage(&self, key1: H256, key2: H256, id: BlockId) -> Option<(Vec, H256)>; - /// Prove account existence at a specific block id. - /// The key is the keccak hash of the account's address. - /// Returns a vector of raw trie nodes (in order from the root) proving the query. - fn prove_account(&self, key1: H256, id: BlockId) -> Option<(Vec, BasicAccount)>; + /// Prove account existence at a specific block id. + /// The key is the keccak hash of the account's address. + /// Returns a vector of raw trie nodes (in order from the root) proving the query. + fn prove_account(&self, key1: H256, id: BlockId) -> Option<(Vec, BasicAccount)>; - /// Prove execution of a transaction at the given block. - /// Returns the output of the call and a vector of database items necessary - /// to reproduce it. - fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option<(Bytes, Vec)>; + /// Prove execution of a transaction at the given block. + /// Returns the output of the call and a vector of database items necessary + /// to reproduce it. + fn prove_transaction( + &self, + transaction: SignedTransaction, + id: BlockId, + ) -> Option<(Bytes, Vec)>; - /// Get an epoch change signal by block hash. - fn epoch_signal(&self, hash: H256) -> Option>; + /// Get an epoch change signal by block hash. + fn epoch_signal(&self, hash: H256) -> Option>; } /// resets the blockchain pub trait BlockChainReset { - /// reset to best_block - n - fn reset(&self, num: u32) -> Result<(), String>; + /// reset to best_block - n + fn reset(&self, num: u32) -> Result<(), String>; } /// Provides a method for importing/exporting blocks @@ -494,22 +548,22 @@ pub trait ImportExportBlocks { /// destination could be a file or stdout. /// If the format is hex, each block is written on a new line. /// For binary exports, all block data is written to the same line. - fn export_blocks<'a>( + fn export_blocks<'a>( &self, destination: Box, from: BlockId, to: BlockId, - format: Option + format: Option, ) -> Result<(), String>; - /// Import blocks from destination, with the given format argument - /// Source could be a file or stdout. - /// For hex format imports, it attempts to read the blocks on a line by line basis. - /// For binary format imports, reads the 8 byte RLP header in order to decode the block - /// length to be read. - fn import_blocks<'a>( - &self, - source: Box, - format: Option - ) -> Result<(), String>; + /// Import blocks from destination, with the given format argument + /// Source could be a file or stdout. + /// For hex format imports, it attempts to read the blocks on a line by line basis. + /// For binary format imports, reads the 8 byte RLP header in order to decode the block + /// length to be read. + fn import_blocks<'a>( + &self, + source: Box, + format: Option, + ) -> Result<(), String>; } diff --git a/ethcore/src/engines/authority_round/finality.rs b/ethcore/src/engines/authority_round/finality.rs index af57278c9..461be6213 100644 --- a/ethcore/src/engines/authority_round/finality.rs +++ b/ethcore/src/engines/authority_round/finality.rs @@ -16,10 +16,12 @@ //! Finality proof generation and checking. -use std::collections::{VecDeque}; -use std::collections::hash_map::{HashMap, Entry}; +use std::collections::{ + hash_map::{Entry, HashMap}, + VecDeque, +}; -use ethereum_types::{H256, Address}; +use ethereum_types::{Address, H256}; use engines::validator_set::SimpleList; @@ -30,104 +32,122 @@ pub struct UnknownValidator; /// Rolling finality checker for authority round consensus. /// Stores a chain of unfinalized hashes that can be pushed onto. pub struct RollingFinality { - headers: VecDeque<(H256, Vec
)>, - signers: SimpleList, - sign_count: HashMap, - last_pushed: Option, + headers: VecDeque<(H256, Vec
)>, + signers: SimpleList, + sign_count: HashMap, + last_pushed: Option, } impl RollingFinality { - /// Create a blank finality checker under the given validator set. - pub fn blank(signers: Vec
) -> Self { - RollingFinality { - headers: VecDeque::new(), - signers: SimpleList::new(signers), - sign_count: HashMap::new(), - last_pushed: None, - } - } + /// Create a blank finality checker under the given validator set. + pub fn blank(signers: Vec
) -> Self { + RollingFinality { + headers: VecDeque::new(), + signers: SimpleList::new(signers), + sign_count: HashMap::new(), + last_pushed: None, + } + } - /// Extract unfinalized subchain from ancestry iterator. - /// Clears the current subchain. - /// - /// Fails if any provided signature isn't part of the signers set. - pub fn build_ancestry_subchain(&mut self, iterable: I) -> Result<(), UnknownValidator> - where I: IntoIterator)> - { - self.clear(); - for (hash, signers) in iterable { - if signers.iter().any(|s| !self.signers.contains(s)) { return Err(UnknownValidator) } - if self.last_pushed.is_none() { self.last_pushed = Some(hash) } + /// Extract unfinalized subchain from ancestry iterator. + /// Clears the current subchain. + /// + /// Fails if any provided signature isn't part of the signers set. + pub fn build_ancestry_subchain(&mut self, iterable: I) -> Result<(), UnknownValidator> + where + I: IntoIterator)>, + { + self.clear(); + for (hash, signers) in iterable { + if signers.iter().any(|s| !self.signers.contains(s)) { + return Err(UnknownValidator); + } + if self.last_pushed.is_none() { + self.last_pushed = Some(hash) + } - // break when we've got our first finalized block. - { - let current_signed = self.sign_count.len(); + // break when we've got our first finalized block. + { + let current_signed = self.sign_count.len(); - let new_signers = signers.iter().filter(|s| !self.sign_count.contains_key(s)).count(); - let would_be_finalized = (current_signed + new_signers) * 2 > self.signers.len(); + let new_signers = signers + .iter() + .filter(|s| !self.sign_count.contains_key(s)) + .count(); + let would_be_finalized = (current_signed + new_signers) * 2 > self.signers.len(); - if would_be_finalized { - trace!(target: "finality", "Encountered already finalized block {}", hash); - break - } + if would_be_finalized { + trace!(target: "finality", "Encountered already finalized block {}", hash); + break; + } - for signer in signers.iter() { - *self.sign_count.entry(*signer).or_insert(0) += 1; - } - } + for signer in signers.iter() { + *self.sign_count.entry(*signer).or_insert(0) += 1; + } + } - self.headers.push_front((hash, signers)); - } + self.headers.push_front((hash, signers)); + } - trace!(target: "finality", "Rolling finality state: {:?}", self.headers); - Ok(()) - } + trace!(target: "finality", "Rolling finality state: {:?}", self.headers); + Ok(()) + } - /// Clear the finality status, but keeps the validator set. - pub fn clear(&mut self) { - self.headers.clear(); - self.sign_count.clear(); - self.last_pushed = None; - } + /// Clear the finality status, but keeps the validator set. + pub fn clear(&mut self) { + self.headers.clear(); + self.sign_count.clear(); + self.last_pushed = None; + } - /// Returns the last pushed hash. - pub fn subchain_head(&self) -> Option { - self.last_pushed - } + /// Returns the last pushed hash. + pub fn subchain_head(&self) -> Option { + self.last_pushed + } - /// Get an iterator over stored hashes in order. - #[cfg(test)] - pub fn unfinalized_hashes(&self) -> impl Iterator { - self.headers.iter().map(|(h, _)| h) - } + /// Get an iterator over stored hashes in order. + #[cfg(test)] + pub fn unfinalized_hashes(&self) -> impl Iterator { + self.headers.iter().map(|(h, _)| h) + } - /// Get the validator set. - pub fn validators(&self) -> &SimpleList { &self.signers } + /// Get the validator set. + pub fn validators(&self) -> &SimpleList { + &self.signers + } - /// Push a hash onto the rolling finality checker (implying `subchain_head` == head.parent) - /// - /// Fails if `signer` isn't a member of the active validator set. - /// Returns a list of all newly finalized headers. - // TODO: optimize with smallvec. - pub fn push_hash(&mut self, head: H256, signers: Vec
) -> Result, UnknownValidator> { - if signers.iter().any(|s| !self.signers.contains(s)) { return Err(UnknownValidator) } + /// Push a hash onto the rolling finality checker (implying `subchain_head` == head.parent) + /// + /// Fails if `signer` isn't a member of the active validator set. + /// Returns a list of all newly finalized headers. + // TODO: optimize with smallvec. + pub fn push_hash( + &mut self, + head: H256, + signers: Vec
, + ) -> Result, UnknownValidator> { + if signers.iter().any(|s| !self.signers.contains(s)) { + return Err(UnknownValidator); + } - for signer in signers.iter() { - *self.sign_count.entry(*signer).or_insert(0) += 1; - } + for signer in signers.iter() { + *self.sign_count.entry(*signer).or_insert(0) += 1; + } - self.headers.push_back((head, signers)); + self.headers.push_back((head, signers)); - let mut newly_finalized = Vec::new(); + let mut newly_finalized = Vec::new(); - while self.sign_count.len() * 2 > self.signers.len() { - let (hash, signers) = self.headers.pop_front() - .expect("headers length always greater than sign count length; qed"); + while self.sign_count.len() * 2 > self.signers.len() { + let (hash, signers) = self + .headers + .pop_front() + .expect("headers length always greater than sign count length; qed"); - newly_finalized.push(hash); + newly_finalized.push(hash); - for signer in signers { - match self.sign_count.entry(signer) { + for signer in signers { + match self.sign_count.entry(signer) { Entry::Occupied(mut entry) => { // decrement count for this signer and purge on zero. *entry.get_mut() -= 1; @@ -138,82 +158,100 @@ impl RollingFinality { } Entry::Vacant(_) => panic!("all hashes in `header` should have entries in `sign_count` for their signers; qed"), } - } - } + } + } - trace!(target: "finality", "Blocks finalized by {:?}: {:?}", head, newly_finalized); + trace!(target: "finality", "Blocks finalized by {:?}: {:?}", head, newly_finalized); - self.last_pushed = Some(head); - Ok(newly_finalized) - } + self.last_pushed = Some(head); + Ok(newly_finalized) + } } #[cfg(test)] mod tests { - use ethereum_types::{H256, Address}; - use super::RollingFinality; + use super::RollingFinality; + use ethereum_types::{Address, H256}; - #[test] - fn rejects_unknown_signers() { - let signers = (0..3).map(|_| Address::random()).collect::>(); - let mut finality = RollingFinality::blank(signers.clone()); - assert!(finality.push_hash(H256::random(), vec![signers[0], Address::random()]).is_err()); - } + #[test] + fn rejects_unknown_signers() { + let signers = (0..3).map(|_| Address::random()).collect::>(); + let mut finality = RollingFinality::blank(signers.clone()); + assert!(finality + .push_hash(H256::random(), vec![signers[0], Address::random()]) + .is_err()); + } - #[test] - fn finalize_multiple() { - let signers: Vec<_> = (0..6).map(|_| Address::random()).collect(); + #[test] + fn finalize_multiple() { + let signers: Vec<_> = (0..6).map(|_| Address::random()).collect(); - let mut finality = RollingFinality::blank(signers.clone()); - let hashes: Vec<_> = (0..7).map(|_| H256::random()).collect(); + let mut finality = RollingFinality::blank(signers.clone()); + let hashes: Vec<_> = (0..7).map(|_| H256::random()).collect(); - // 3 / 6 signers is < 51% so no finality. - for (i, hash) in hashes.iter().take(6).cloned().enumerate() { - let i = i % 3; - assert!(finality.push_hash(hash, vec![signers[i]]).unwrap().len() == 0); - } + // 3 / 6 signers is < 51% so no finality. + for (i, hash) in hashes.iter().take(6).cloned().enumerate() { + let i = i % 3; + assert!(finality.push_hash(hash, vec![signers[i]]).unwrap().len() == 0); + } - // after pushing a block signed by a fourth validator, the first four - // blocks of the unverified chain become verified. - assert_eq!(finality.push_hash(hashes[6], vec![signers[4]]).unwrap(), - vec![hashes[0], hashes[1], hashes[2], hashes[3]]); - } + // after pushing a block signed by a fourth validator, the first four + // blocks of the unverified chain become verified. + assert_eq!( + finality.push_hash(hashes[6], vec![signers[4]]).unwrap(), + vec![hashes[0], hashes[1], hashes[2], hashes[3]] + ); + } - #[test] - fn finalize_multiple_signers() { - let signers: Vec<_> = (0..6).map(|_| Address::random()).collect(); - let mut finality = RollingFinality::blank(signers.clone()); - let hash = H256::random(); + #[test] + fn finalize_multiple_signers() { + let signers: Vec<_> = (0..6).map(|_| Address::random()).collect(); + let mut finality = RollingFinality::blank(signers.clone()); + let hash = H256::random(); - // after pushing a block signed by four validators, it becomes verified right away. - assert_eq!(finality.push_hash(hash, signers[0..4].to_vec()).unwrap(), vec![hash]); - } + // after pushing a block signed by four validators, it becomes verified right away. + assert_eq!( + finality.push_hash(hash, signers[0..4].to_vec()).unwrap(), + vec![hash] + ); + } - #[test] - fn from_ancestry() { - let signers: Vec<_> = (0..6).map(|_| Address::random()).collect(); - let hashes: Vec<_> = (0..12).map(|i| (H256::random(), vec![signers[i % 6]])).collect(); + #[test] + fn from_ancestry() { + let signers: Vec<_> = (0..6).map(|_| Address::random()).collect(); + let hashes: Vec<_> = (0..12) + .map(|i| (H256::random(), vec![signers[i % 6]])) + .collect(); - let mut finality = RollingFinality::blank(signers.clone()); - finality.build_ancestry_subchain(hashes.iter().rev().cloned()).unwrap(); + let mut finality = RollingFinality::blank(signers.clone()); + finality + .build_ancestry_subchain(hashes.iter().rev().cloned()) + .unwrap(); - assert_eq!(finality.unfinalized_hashes().count(), 3); - assert_eq!(finality.subchain_head(), Some(hashes[11].0)); - } + assert_eq!(finality.unfinalized_hashes().count(), 3); + assert_eq!(finality.subchain_head(), Some(hashes[11].0)); + } - #[test] - fn from_ancestry_multiple_signers() { - let signers: Vec<_> = (0..6).map(|_| Address::random()).collect(); - let hashes: Vec<_> = (0..12).map(|i| { - (H256::random(), vec![signers[i % 6], signers[(i + 1) % 6], signers[(i + 2) % 6]]) - }).collect(); + #[test] + fn from_ancestry_multiple_signers() { + let signers: Vec<_> = (0..6).map(|_| Address::random()).collect(); + let hashes: Vec<_> = (0..12) + .map(|i| { + ( + H256::random(), + vec![signers[i % 6], signers[(i + 1) % 6], signers[(i + 2) % 6]], + ) + }) + .collect(); - let mut finality = RollingFinality::blank(signers.clone()); - finality.build_ancestry_subchain(hashes.iter().rev().cloned()).unwrap(); + let mut finality = RollingFinality::blank(signers.clone()); + finality + .build_ancestry_subchain(hashes.iter().rev().cloned()) + .unwrap(); - // only the last hash has < 51% of authorities' signatures - assert_eq!(finality.unfinalized_hashes().count(), 1); - assert_eq!(finality.unfinalized_hashes().next(), Some(&hashes[11].0)); - assert_eq!(finality.subchain_head(), Some(hashes[11].0)); - } + // only the last hash has < 51% of authorities' signatures + assert_eq!(finality.unfinalized_hashes().count(), 1); + assert_eq!(finality.unfinalized_hashes().next(), Some(&hashes[11].0)); + assert_eq!(finality.subchain_head(), Some(hashes[11].0)); + } } diff --git a/ethcore/src/engines/authority_round/mod.rs b/ethcore/src/engines/authority_round/mod.rs index 3350498e9..dfdf2e48b 100644 --- a/ethcore/src/engines/authority_round/mod.rs +++ b/ethcore/src/engines/authority_round/mod.rs @@ -16,273 +16,300 @@ //! A blockchain engine that supports a non-instant BFT proof-of-authority. -use std::collections::{BTreeMap, BTreeSet, HashSet}; -use std::{cmp, fmt}; -use std::iter::FromIterator; -use std::ops::Deref; -use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering}; -use std::sync::{Weak, Arc}; -use std::time::{UNIX_EPOCH, Duration}; +use std::{ + cmp, + collections::{BTreeMap, BTreeSet, HashSet}, + fmt, + iter::FromIterator, + ops::Deref, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering as AtomicOrdering}, + Arc, Weak, + }, + time::{Duration, UNIX_EPOCH}, +}; -use block::*; -use client::{EngineClient, traits::ForceUpdateSealing}; -use engines::{Engine, Seal, EngineError, ConstructedVerifier}; -use engines::block_reward; -use engines::block_reward::{BlockRewardContract, RewardKind}; -use error::{Error, ErrorKind, BlockError}; -use ethjson; -use machine::{AuxiliaryData, Call, EthereumMachine}; -use hash::keccak; -use super::signer::EngineSigner; -use super::validator_set::{ValidatorSet, SimpleList, new_validator_set}; use self::finality::RollingFinality; +use super::{ + signer::EngineSigner, + validator_set::{new_validator_set, SimpleList, ValidatorSet}, +}; +use block::*; +use client::{traits::ForceUpdateSealing, EngineClient}; +use engines::{ + block_reward, + block_reward::{BlockRewardContract, RewardKind}, + ConstructedVerifier, Engine, EngineError, Seal, +}; +use error::{BlockError, Error, ErrorKind}; +use ethereum_types::{Address, H256, H520, U128, U256}; +use ethjson; use ethkey::{self, Signature}; -use io::{IoContext, IoHandler, TimerToken, IoService}; +use hash::keccak; +use io::{IoContext, IoHandler, IoService, TimerToken}; use itertools::{self, Itertools}; -use rlp::{encode, Decodable, DecoderError, Encodable, RlpStream, Rlp}; -use ethereum_types::{H256, H520, Address, U128, U256}; +use machine::{AuxiliaryData, Call, EthereumMachine}; use parking_lot::{Mutex, RwLock}; +use rlp::{encode, Decodable, DecoderError, Encodable, Rlp, RlpStream}; use time_utils::CheckedSystemTime; -use types::BlockNumber; -use types::header::{Header, ExtendedHeader}; -use types::ancestry_action::AncestryAction; +use types::{ + ancestry_action::AncestryAction, + header::{ExtendedHeader, Header}, + BlockNumber, +}; use unexpected::{Mismatch, OutOfBounds}; mod finality; /// `AuthorityRound` params. pub struct AuthorityRoundParams { - /// Time to wait before next block or authority switching, - /// in seconds. - /// - /// Deliberately typed as u16 as too high of a value leads - /// to slow block issuance. - pub step_duration: u16, - /// Starting step, - pub start_step: Option, - /// Valid validators. - pub validators: Box, - /// Chain score validation transition block. - pub validate_score_transition: u64, - /// Monotonic step validation transition block. - pub validate_step_transition: u64, - /// Immediate transitions. - pub immediate_transitions: bool, - /// Block reward in base units. - pub block_reward: U256, - /// Block reward contract transition block. - pub block_reward_contract_transition: u64, - /// Block reward contract. - pub block_reward_contract: Option, - /// Number of accepted uncles transition block. - pub maximum_uncle_count_transition: u64, - /// Number of accepted uncles. - pub maximum_uncle_count: usize, - /// Empty step messages transition block. - pub empty_steps_transition: u64, - /// Number of accepted empty steps. - pub maximum_empty_steps: usize, - /// Transition block to strict empty steps validation. - pub strict_empty_steps_transition: u64, + /// Time to wait before next block or authority switching, + /// in seconds. + /// + /// Deliberately typed as u16 as too high of a value leads + /// to slow block issuance. + pub step_duration: u16, + /// Starting step, + pub start_step: Option, + /// Valid validators. + pub validators: Box, + /// Chain score validation transition block. + pub validate_score_transition: u64, + /// Monotonic step validation transition block. + pub validate_step_transition: u64, + /// Immediate transitions. + pub immediate_transitions: bool, + /// Block reward in base units. + pub block_reward: U256, + /// Block reward contract transition block. + pub block_reward_contract_transition: u64, + /// Block reward contract. + pub block_reward_contract: Option, + /// Number of accepted uncles transition block. + pub maximum_uncle_count_transition: u64, + /// Number of accepted uncles. + pub maximum_uncle_count: usize, + /// Empty step messages transition block. + pub empty_steps_transition: u64, + /// Number of accepted empty steps. + pub maximum_empty_steps: usize, + /// Transition block to strict empty steps validation. + pub strict_empty_steps_transition: u64, } const U16_MAX: usize = ::std::u16::MAX as usize; impl From for AuthorityRoundParams { - fn from(p: ethjson::spec::AuthorityRoundParams) -> Self { - let mut step_duration_usize: usize = p.step_duration.into(); - if step_duration_usize > U16_MAX { - step_duration_usize = U16_MAX; - warn!(target: "engine", "step_duration is too high ({}), setting it to {}", step_duration_usize, U16_MAX); - } - AuthorityRoundParams { - step_duration: step_duration_usize as u16, - validators: new_validator_set(p.validators), - start_step: p.start_step.map(Into::into), - validate_score_transition: p.validate_score_transition.map_or(0, Into::into), - validate_step_transition: p.validate_step_transition.map_or(0, Into::into), - immediate_transitions: p.immediate_transitions.unwrap_or(false), - block_reward: p.block_reward.map_or_else(Default::default, Into::into), - block_reward_contract_transition: p.block_reward_contract_transition.map_or(0, Into::into), - block_reward_contract: match (p.block_reward_contract_code, p.block_reward_contract_address) { - (Some(code), _) => Some(BlockRewardContract::new_from_code(Arc::new(code.into()))), - (_, Some(address)) => Some(BlockRewardContract::new_from_address(address.into())), - (None, None) => None, - }, - maximum_uncle_count_transition: p.maximum_uncle_count_transition.map_or(0, Into::into), - maximum_uncle_count: p.maximum_uncle_count.map_or(0, Into::into), - empty_steps_transition: p.empty_steps_transition.map_or(u64::max_value(), |n| ::std::cmp::max(n.into(), 1)), - maximum_empty_steps: p.maximum_empty_steps.map_or(0, Into::into), - strict_empty_steps_transition: p.strict_empty_steps_transition.map_or(0, Into::into), - } - } + fn from(p: ethjson::spec::AuthorityRoundParams) -> Self { + let mut step_duration_usize: usize = p.step_duration.into(); + if step_duration_usize > U16_MAX { + step_duration_usize = U16_MAX; + warn!(target: "engine", "step_duration is too high ({}), setting it to {}", step_duration_usize, U16_MAX); + } + AuthorityRoundParams { + step_duration: step_duration_usize as u16, + validators: new_validator_set(p.validators), + start_step: p.start_step.map(Into::into), + validate_score_transition: p.validate_score_transition.map_or(0, Into::into), + validate_step_transition: p.validate_step_transition.map_or(0, Into::into), + immediate_transitions: p.immediate_transitions.unwrap_or(false), + block_reward: p.block_reward.map_or_else(Default::default, Into::into), + block_reward_contract_transition: p + .block_reward_contract_transition + .map_or(0, Into::into), + block_reward_contract: match ( + p.block_reward_contract_code, + p.block_reward_contract_address, + ) { + (Some(code), _) => Some(BlockRewardContract::new_from_code(Arc::new(code.into()))), + (_, Some(address)) => Some(BlockRewardContract::new_from_address(address.into())), + (None, None) => None, + }, + maximum_uncle_count_transition: p.maximum_uncle_count_transition.map_or(0, Into::into), + maximum_uncle_count: p.maximum_uncle_count.map_or(0, Into::into), + empty_steps_transition: p + .empty_steps_transition + .map_or(u64::max_value(), |n| ::std::cmp::max(n.into(), 1)), + maximum_empty_steps: p.maximum_empty_steps.map_or(0, Into::into), + strict_empty_steps_transition: p.strict_empty_steps_transition.map_or(0, Into::into), + } + } } // Helper for managing the step. #[derive(Debug)] struct Step { - calibrate: bool, // whether calibration is enabled. - inner: AtomicUsize, - duration: u16, + calibrate: bool, // whether calibration is enabled. + inner: AtomicUsize, + duration: u16, } impl Step { - fn load(&self) -> u64 { self.inner.load(AtomicOrdering::SeqCst) as u64 } - fn duration_remaining(&self) -> Duration { - let now = unix_now(); - let expected_seconds = self.load() - .checked_add(1) - .and_then(|ctr| ctr.checked_mul(self.duration as u64)) - .map(Duration::from_secs); + fn load(&self) -> u64 { + self.inner.load(AtomicOrdering::SeqCst) as u64 + } + fn duration_remaining(&self) -> Duration { + let now = unix_now(); + let expected_seconds = self + .load() + .checked_add(1) + .and_then(|ctr| ctr.checked_mul(self.duration as u64)) + .map(Duration::from_secs); - match expected_seconds { - Some(step_end) if step_end > now => step_end - now, - Some(_) => Duration::from_secs(0), - None => { - let ctr = self.load(); - error!(target: "engine", "Step counter is too high: {}, aborting", ctr); - panic!("step counter is too high: {}", ctr) - }, - } + match expected_seconds { + Some(step_end) if step_end > now => step_end - now, + Some(_) => Duration::from_secs(0), + None => { + let ctr = self.load(); + error!(target: "engine", "Step counter is too high: {}, aborting", ctr); + panic!("step counter is too high: {}", ctr) + } + } + } - } + fn increment(&self) { + use std::usize; + // fetch_add won't panic on overflow but will rather wrap + // around, leading to zero as the step counter, which might + // lead to unexpected situations, so it's better to shut down. + if self.inner.fetch_add(1, AtomicOrdering::SeqCst) == usize::MAX { + error!(target: "engine", "Step counter is too high: {}, aborting", usize::MAX); + panic!("step counter is too high: {}", usize::MAX); + } + } - fn increment(&self) { - use std::usize; - // fetch_add won't panic on overflow but will rather wrap - // around, leading to zero as the step counter, which might - // lead to unexpected situations, so it's better to shut down. - if self.inner.fetch_add(1, AtomicOrdering::SeqCst) == usize::MAX { - error!(target: "engine", "Step counter is too high: {}, aborting", usize::MAX); - panic!("step counter is too high: {}", usize::MAX); - } + fn calibrate(&self) { + if self.calibrate { + let new_step = unix_now().as_secs() / (self.duration as u64); + self.inner.store(new_step as usize, AtomicOrdering::SeqCst); + } + } - } + fn check_future(&self, given: u64) -> Result<(), Option>> { + const REJECTED_STEP_DRIFT: u64 = 4; - fn calibrate(&self) { - if self.calibrate { - let new_step = unix_now().as_secs() / (self.duration as u64); - self.inner.store(new_step as usize, AtomicOrdering::SeqCst); - } - } + // Verify if the step is correct. + if given <= self.load() { + return Ok(()); + } - fn check_future(&self, given: u64) -> Result<(), Option>> { - const REJECTED_STEP_DRIFT: u64 = 4; + // Make absolutely sure that the given step is incorrect. + self.calibrate(); + let current = self.load(); - // Verify if the step is correct. - if given <= self.load() { - return Ok(()); - } - - // Make absolutely sure that the given step is incorrect. - self.calibrate(); - let current = self.load(); - - // reject blocks too far in the future - if given > current + REJECTED_STEP_DRIFT { - Err(None) - // wait a bit for blocks in near future - } else if given > current { - let d = self.duration as u64; - Err(Some(OutOfBounds { - min: None, - max: Some(d * current), - found: d * given, - })) - } else { - Ok(()) - } - } + // reject blocks too far in the future + if given > current + REJECTED_STEP_DRIFT { + Err(None) + // wait a bit for blocks in near future + } else if given > current { + let d = self.duration as u64; + Err(Some(OutOfBounds { + min: None, + max: Some(d * current), + found: d * given, + })) + } else { + Ok(()) + } + } } // Chain scoring: total weight is sqrt(U256::max_value())*height - step fn calculate_score(parent_step: u64, current_step: u64, current_empty_steps: usize) -> U256 { - U256::from(U128::max_value()) + U256::from(parent_step) - U256::from(current_step) + U256::from(current_empty_steps) + U256::from(U128::max_value()) + U256::from(parent_step) - U256::from(current_step) + + U256::from(current_empty_steps) } struct EpochManager { - epoch_transition_hash: H256, - epoch_transition_number: BlockNumber, - finality_checker: RollingFinality, - force: bool, + epoch_transition_hash: H256, + epoch_transition_number: BlockNumber, + finality_checker: RollingFinality, + force: bool, } impl EpochManager { - fn blank() -> Self { - EpochManager { - epoch_transition_hash: H256::default(), - epoch_transition_number: 0, - finality_checker: RollingFinality::blank(Vec::new()), - force: true, - } - } + fn blank() -> Self { + EpochManager { + epoch_transition_hash: H256::default(), + epoch_transition_number: 0, + finality_checker: RollingFinality::blank(Vec::new()), + force: true, + } + } - // zoom to epoch for given header. returns true if succeeded, false otherwise. - fn zoom_to(&mut self, client: &EngineClient, machine: &EthereumMachine, validators: &ValidatorSet, header: &Header) -> bool { - let last_was_parent = self.finality_checker.subchain_head() == Some(*header.parent_hash()); + // zoom to epoch for given header. returns true if succeeded, false otherwise. + fn zoom_to( + &mut self, + client: &EngineClient, + machine: &EthereumMachine, + validators: &ValidatorSet, + header: &Header, + ) -> bool { + let last_was_parent = self.finality_checker.subchain_head() == Some(*header.parent_hash()); - // early exit for current target == chain head, but only if the epochs are - // the same. - if last_was_parent && !self.force { - return true; - } + // early exit for current target == chain head, but only if the epochs are + // the same. + if last_was_parent && !self.force { + return true; + } - self.force = false; - debug!(target: "engine", "Zooming to epoch for block {}", header.hash()); + self.force = false; + debug!(target: "engine", "Zooming to epoch for block {}", header.hash()); - // epoch_transition_for can be an expensive call, but in the absence of - // forks it will only need to be called for the block directly after - // epoch transition, in which case it will be O(1) and require a single - // DB lookup. - let last_transition = match client.epoch_transition_for(*header.parent_hash()) { - Some(t) => t, - None => { - // this really should never happen unless the block passed - // hasn't got a parent in the database. - debug!(target: "engine", "No genesis transition found."); - return false; - } - }; + // epoch_transition_for can be an expensive call, but in the absence of + // forks it will only need to be called for the block directly after + // epoch transition, in which case it will be O(1) and require a single + // DB lookup. + let last_transition = match client.epoch_transition_for(*header.parent_hash()) { + Some(t) => t, + None => { + // this really should never happen unless the block passed + // hasn't got a parent in the database. + debug!(target: "engine", "No genesis transition found."); + return false; + } + }; - // extract other epoch set if it's not the same as the last. - if last_transition.block_hash != self.epoch_transition_hash { - let (signal_number, set_proof, _) = destructure_proofs(&last_transition.proof) - .expect("proof produced by this engine; therefore it is valid; qed"); + // extract other epoch set if it's not the same as the last. + if last_transition.block_hash != self.epoch_transition_hash { + let (signal_number, set_proof, _) = destructure_proofs(&last_transition.proof) + .expect("proof produced by this engine; therefore it is valid; qed"); - trace!(target: "engine", "extracting epoch set for epoch ({}, {}) signalled at #{}", + trace!(target: "engine", "extracting epoch set for epoch ({}, {}) signalled at #{}", last_transition.block_number, last_transition.block_hash, signal_number); - let first = signal_number == 0; - let epoch_set = validators.epoch_set( - first, - machine, - signal_number, // use signal number so multi-set first calculation is correct. - set_proof, - ) - .ok() - .map(|(list, _)| list.into_inner()) - .expect("proof produced by this engine; therefore it is valid; qed"); + let first = signal_number == 0; + let epoch_set = validators + .epoch_set( + first, + machine, + signal_number, // use signal number so multi-set first calculation is correct. + set_proof, + ) + .ok() + .map(|(list, _)| list.into_inner()) + .expect("proof produced by this engine; therefore it is valid; qed"); - self.finality_checker = RollingFinality::blank(epoch_set); - } + self.finality_checker = RollingFinality::blank(epoch_set); + } - self.epoch_transition_hash = last_transition.block_hash; - self.epoch_transition_number = last_transition.block_number; + self.epoch_transition_hash = last_transition.block_hash; + self.epoch_transition_number = last_transition.block_number; - true - } + true + } - // note new epoch hash. this will force the next block to re-load - // the epoch set - // TODO: optimize and don't require re-loading after epoch change. - fn note_new_epoch(&mut self) { - self.force = true; - } + // note new epoch hash. this will force the next block to re-load + // the epoch set + // TODO: optimize and don't require re-loading after epoch change. + fn note_new_epoch(&mut self) { + self.force = true; + } - /// Get validator set. Zoom to the correct epoch first. - fn validators(&self) -> &SimpleList { - self.finality_checker.validators() - } + /// Get validator set. Zoom to the correct epoch first. + fn validators(&self) -> &SimpleList { + self.finality_checker.validators() + } } /// A message broadcast by authorities when it's their turn to seal a block but there are no @@ -290,90 +317,103 @@ impl EpochManager { /// proof. #[derive(Clone, Debug, PartialEq, Eq)] struct EmptyStep { - signature: H520, - step: u64, - parent_hash: H256, + signature: H520, + step: u64, + parent_hash: H256, } impl PartialOrd for EmptyStep { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } } impl Ord for EmptyStep { - fn cmp(&self, other: &Self) -> cmp::Ordering { - self.step.cmp(&other.step) - .then_with(|| self.parent_hash.cmp(&other.parent_hash)) - .then_with(|| self.signature.cmp(&other.signature)) - } + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.step + .cmp(&other.step) + .then_with(|| self.parent_hash.cmp(&other.parent_hash)) + .then_with(|| self.signature.cmp(&other.signature)) + } } impl EmptyStep { - fn from_sealed(sealed_empty_step: SealedEmptyStep, parent_hash: &H256) -> EmptyStep { - let signature = sealed_empty_step.signature; - let step = sealed_empty_step.step; - let parent_hash = parent_hash.clone(); - EmptyStep { signature, step, parent_hash } - } + fn from_sealed(sealed_empty_step: SealedEmptyStep, parent_hash: &H256) -> EmptyStep { + let signature = sealed_empty_step.signature; + let step = sealed_empty_step.step; + let parent_hash = parent_hash.clone(); + EmptyStep { + signature, + step, + parent_hash, + } + } - fn verify(&self, validators: &ValidatorSet) -> Result { - let message = keccak(empty_step_rlp(self.step, &self.parent_hash)); - let correct_proposer = step_proposer(validators, &self.parent_hash, self.step); + fn verify(&self, validators: &ValidatorSet) -> Result { + let message = keccak(empty_step_rlp(self.step, &self.parent_hash)); + let correct_proposer = step_proposer(validators, &self.parent_hash, self.step); - ethkey::verify_address(&correct_proposer, &self.signature.into(), &message) - .map_err(|e| e.into()) - } + ethkey::verify_address(&correct_proposer, &self.signature.into(), &message) + .map_err(|e| e.into()) + } - fn author(&self) -> Result { - let message = keccak(empty_step_rlp(self.step, &self.parent_hash)); - let public = ethkey::recover(&self.signature.into(), &message)?; - Ok(ethkey::public_to_address(&public)) - } + fn author(&self) -> Result { + let message = keccak(empty_step_rlp(self.step, &self.parent_hash)); + let public = ethkey::recover(&self.signature.into(), &message)?; + Ok(ethkey::public_to_address(&public)) + } - fn sealed(&self) -> SealedEmptyStep { - let signature = self.signature; - let step = self.step; - SealedEmptyStep { signature, step } - } + fn sealed(&self) -> SealedEmptyStep { + let signature = self.signature; + let step = self.step; + SealedEmptyStep { signature, step } + } } impl fmt::Display for EmptyStep { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - write!(f, "({:x}, {}, {:x})", self.signature, self.step, self.parent_hash) - } + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!( + f, + "({:x}, {}, {:x})", + self.signature, self.step, self.parent_hash + ) + } } impl Encodable for EmptyStep { - fn rlp_append(&self, s: &mut RlpStream) { - let empty_step_rlp = empty_step_rlp(self.step, &self.parent_hash); - s.begin_list(2) - .append(&self.signature) - .append_raw(&empty_step_rlp, 1); - } + fn rlp_append(&self, s: &mut RlpStream) { + let empty_step_rlp = empty_step_rlp(self.step, &self.parent_hash); + s.begin_list(2) + .append(&self.signature) + .append_raw(&empty_step_rlp, 1); + } } impl Decodable for EmptyStep { - fn decode(rlp: &Rlp) -> Result { - let signature = rlp.val_at(0)?; - let empty_step_rlp = rlp.at(1)?; + fn decode(rlp: &Rlp) -> Result { + let signature = rlp.val_at(0)?; + let empty_step_rlp = rlp.at(1)?; - let step = empty_step_rlp.val_at(0)?; - let parent_hash = empty_step_rlp.val_at(1)?; + let step = empty_step_rlp.val_at(0)?; + let parent_hash = empty_step_rlp.val_at(1)?; - Ok(EmptyStep { signature, step, parent_hash }) - } + Ok(EmptyStep { + signature, + step, + parent_hash, + }) + } } pub fn empty_step_full_rlp(signature: &H520, empty_step_rlp: &[u8]) -> Vec { - let mut s = RlpStream::new_list(2); - s.append(signature).append_raw(empty_step_rlp, 1); - s.out() + let mut s = RlpStream::new_list(2); + s.append(signature).append_raw(empty_step_rlp, 1); + s.out() } pub fn empty_step_rlp(step: u64, parent_hash: &H256) -> Vec { - let mut s = RlpStream::new_list(2); - s.append(&step).append(parent_hash); - s.out() + let mut s = RlpStream::new_list(2); + s.append(&step).append(parent_hash); + s.out() } /// An empty step message that is included in a seal, the only difference is that it doesn't include @@ -381,475 +421,561 @@ pub fn empty_step_rlp(step: u64, parent_hash: &H256) -> Vec { /// message, which can be reconstructed by using the parent hash of the block in which this sealed /// empty message is included. struct SealedEmptyStep { - signature: H520, - step: u64, + signature: H520, + step: u64, } impl Encodable for SealedEmptyStep { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(2) - .append(&self.signature) - .append(&self.step); - } + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2).append(&self.signature).append(&self.step); + } } impl Decodable for SealedEmptyStep { - fn decode(rlp: &Rlp) -> Result { - let signature = rlp.val_at(0)?; - let step = rlp.val_at(1)?; + fn decode(rlp: &Rlp) -> Result { + let signature = rlp.val_at(0)?; + let step = rlp.val_at(1)?; - Ok(SealedEmptyStep { signature, step }) - } + Ok(SealedEmptyStep { signature, step }) + } } struct PermissionedStep { - inner: Step, - can_propose: AtomicBool, + inner: Step, + can_propose: AtomicBool, } /// Engine using `AuthorityRound` proof-of-authority BFT consensus. pub struct AuthorityRound { - transition_service: IoService<()>, - step: Arc, - client: Arc>>>, - signer: RwLock>>, - validators: Box, - validate_score_transition: u64, - validate_step_transition: u64, - empty_steps: Mutex>, - epoch_manager: Mutex, - immediate_transitions: bool, - block_reward: U256, - block_reward_contract_transition: u64, - block_reward_contract: Option, - maximum_uncle_count_transition: u64, - maximum_uncle_count: usize, - empty_steps_transition: u64, - strict_empty_steps_transition: u64, - maximum_empty_steps: usize, - machine: EthereumMachine, + transition_service: IoService<()>, + step: Arc, + client: Arc>>>, + signer: RwLock>>, + validators: Box, + validate_score_transition: u64, + validate_step_transition: u64, + empty_steps: Mutex>, + epoch_manager: Mutex, + immediate_transitions: bool, + block_reward: U256, + block_reward_contract_transition: u64, + block_reward_contract: Option, + maximum_uncle_count_transition: u64, + maximum_uncle_count: usize, + empty_steps_transition: u64, + strict_empty_steps_transition: u64, + maximum_empty_steps: usize, + machine: EthereumMachine, } // header-chain validator. struct EpochVerifier { - step: Arc, - subchain_validators: SimpleList, - empty_steps_transition: u64, + step: Arc, + subchain_validators: SimpleList, + empty_steps_transition: u64, } impl super::EpochVerifier for EpochVerifier { - fn verify_light(&self, header: &Header) -> Result<(), Error> { - // Validate the timestamp - verify_timestamp(&self.step.inner, header_step(header, self.empty_steps_transition)?)?; - // always check the seal since it's fast. - // nothing heavier to do. - verify_external(header, &self.subchain_validators, self.empty_steps_transition) - } + fn verify_light(&self, header: &Header) -> Result<(), Error> { + // Validate the timestamp + verify_timestamp( + &self.step.inner, + header_step(header, self.empty_steps_transition)?, + )?; + // always check the seal since it's fast. + // nothing heavier to do. + verify_external( + header, + &self.subchain_validators, + self.empty_steps_transition, + ) + } - fn check_finality_proof(&self, proof: &[u8]) -> Option> { - let mut finality_checker = RollingFinality::blank(self.subchain_validators.clone().into_inner()); - let mut finalized = Vec::new(); + fn check_finality_proof(&self, proof: &[u8]) -> Option> { + let mut finality_checker = + RollingFinality::blank(self.subchain_validators.clone().into_inner()); + let mut finalized = Vec::new(); - let headers: Vec
= Rlp::new(proof).as_list().ok()?; + let headers: Vec
= Rlp::new(proof).as_list().ok()?; - { - let mut push_header = |parent_header: &Header, header: Option<&Header>| { - // ensure all headers have correct number of seal fields so we can `verify_external` - // and get `empty_steps` without panic. - if parent_header.seal().len() != header_expected_seal_fields(parent_header, self.empty_steps_transition) { - return None - } - if header.iter().any(|h| h.seal().len() != header_expected_seal_fields(h, self.empty_steps_transition)) { - return None - } + { + let mut push_header = |parent_header: &Header, header: Option<&Header>| { + // ensure all headers have correct number of seal fields so we can `verify_external` + // and get `empty_steps` without panic. + if parent_header.seal().len() + != header_expected_seal_fields(parent_header, self.empty_steps_transition) + { + return None; + } + if header.iter().any(|h| { + h.seal().len() != header_expected_seal_fields(h, self.empty_steps_transition) + }) { + return None; + } - // `verify_external` checks that signature is correct and author == signer. - verify_external(parent_header, &self.subchain_validators, self.empty_steps_transition).ok()?; + // `verify_external` checks that signature is correct and author == signer. + verify_external( + parent_header, + &self.subchain_validators, + self.empty_steps_transition, + ) + .ok()?; - let mut signers = match header { - Some(header) => header_empty_steps_signers(header, self.empty_steps_transition).ok()?, - _ => Vec::new(), - }; - signers.push(*parent_header.author()); + let mut signers = match header { + Some(header) => { + header_empty_steps_signers(header, self.empty_steps_transition).ok()? + } + _ => Vec::new(), + }; + signers.push(*parent_header.author()); - let newly_finalized = finality_checker.push_hash(parent_header.hash(), signers).ok()?; - finalized.extend(newly_finalized); + let newly_finalized = finality_checker + .push_hash(parent_header.hash(), signers) + .ok()?; + finalized.extend(newly_finalized); - Some(()) - }; + Some(()) + }; - for window in headers.windows(2) { - push_header(&window[0], Some(&window[1]))?; - } + for window in headers.windows(2) { + push_header(&window[0], Some(&window[1]))?; + } - if let Some(last) = headers.last() { - push_header(last, None)?; - } - } + if let Some(last) = headers.last() { + push_header(last, None)?; + } + } - if finalized.is_empty() { None } else { Some(finalized) } - } + if finalized.is_empty() { + None + } else { + Some(finalized) + } + } } fn header_seal_hash(header: &Header, empty_steps_rlp: Option<&[u8]>) -> H256 { - match empty_steps_rlp { - Some(empty_steps_rlp) => { - let mut message = header.bare_hash().to_vec(); - message.extend_from_slice(empty_steps_rlp); - keccak(message) - }, - None => { - header.bare_hash() - }, - } + match empty_steps_rlp { + Some(empty_steps_rlp) => { + let mut message = header.bare_hash().to_vec(); + message.extend_from_slice(empty_steps_rlp); + keccak(message) + } + None => header.bare_hash(), + } } fn header_expected_seal_fields(header: &Header, empty_steps_transition: u64) -> usize { - if header.number() >= empty_steps_transition { - 3 - } else { - 2 - } + if header.number() >= empty_steps_transition { + 3 + } else { + 2 + } } fn header_step(header: &Header, empty_steps_transition: u64) -> Result { - Rlp::new(&header.seal().get(0).unwrap_or_else(|| + Rlp::new(&header.seal().get(0).unwrap_or_else(|| panic!("was either checked with verify_block_basic or is genesis; has {} fields; qed (Make sure the spec file has a correct genesis seal)", header_expected_seal_fields(header, empty_steps_transition)) )) .as_val() } -fn header_signature(header: &Header, empty_steps_transition: u64) -> Result { - Rlp::new(&header.seal().get(1).unwrap_or_else(|| - panic!("was checked with verify_block_basic; has {} fields; qed", - header_expected_seal_fields(header, empty_steps_transition)) - )) - .as_val::().map(Into::into) +fn header_signature( + header: &Header, + empty_steps_transition: u64, +) -> Result { + Rlp::new(&header.seal().get(1).unwrap_or_else(|| { + panic!( + "was checked with verify_block_basic; has {} fields; qed", + header_expected_seal_fields(header, empty_steps_transition) + ) + })) + .as_val::() + .map(Into::into) } // extracts the raw empty steps vec from the header seal. should only be called when there are 3 fields in the seal // (i.e. header.number() >= self.empty_steps_transition) fn header_empty_steps_raw(header: &Header) -> &[u8] { - header.seal().get(2).expect("was checked with verify_block_basic; has 3 fields; qed") + header + .seal() + .get(2) + .expect("was checked with verify_block_basic; has 3 fields; qed") } // extracts the empty steps from the header seal. should only be called when there are 3 fields in the seal // (i.e. header.number() >= self.empty_steps_transition). fn header_empty_steps(header: &Header) -> Result, ::rlp::DecoderError> { - let empty_steps = Rlp::new(header_empty_steps_raw(header)).as_list::()?; - Ok(empty_steps.into_iter().map(|s| EmptyStep::from_sealed(s, header.parent_hash())).collect()) + let empty_steps = Rlp::new(header_empty_steps_raw(header)).as_list::()?; + Ok(empty_steps + .into_iter() + .map(|s| EmptyStep::from_sealed(s, header.parent_hash())) + .collect()) } // gets the signers of empty step messages for the given header, does not include repeated signers -fn header_empty_steps_signers(header: &Header, empty_steps_transition: u64) -> Result, Error> { - if header.number() >= empty_steps_transition { - let mut signers = HashSet::new(); - for empty_step in header_empty_steps(header)? { - signers.insert(empty_step.author()?); - } +fn header_empty_steps_signers( + header: &Header, + empty_steps_transition: u64, +) -> Result, Error> { + if header.number() >= empty_steps_transition { + let mut signers = HashSet::new(); + for empty_step in header_empty_steps(header)? { + signers.insert(empty_step.author()?); + } - Ok(Vec::from_iter(signers.into_iter())) - } else { - Ok(Vec::new()) - } + Ok(Vec::from_iter(signers.into_iter())) + } else { + Ok(Vec::new()) + } } fn step_proposer(validators: &ValidatorSet, bh: &H256, step: u64) -> Address { - let proposer = validators.get(bh, step as usize); - trace!(target: "engine", "Fetched proposer for step {}: {}", step, proposer); - proposer + let proposer = validators.get(bh, step as usize); + trace!(target: "engine", "Fetched proposer for step {}: {}", step, proposer); + proposer } fn is_step_proposer(validators: &ValidatorSet, bh: &H256, step: u64, address: &Address) -> bool { - step_proposer(validators, bh, step) == *address + step_proposer(validators, bh, step) == *address } fn verify_timestamp(step: &Step, header_step: u64) -> Result<(), BlockError> { - match step.check_future(header_step) { - Err(None) => { - trace!(target: "engine", "verify_timestamp: block from the future"); - Err(BlockError::InvalidSeal.into()) - }, - Err(Some(oob)) => { - // NOTE This error might be returned only in early stage of verification (Stage 1). - // Returning it further won't recover the sync process. - trace!(target: "engine", "verify_timestamp: block too early"); + match step.check_future(header_step) { + Err(None) => { + trace!(target: "engine", "verify_timestamp: block from the future"); + Err(BlockError::InvalidSeal.into()) + } + Err(Some(oob)) => { + // NOTE This error might be returned only in early stage of verification (Stage 1). + // Returning it further won't recover the sync process. + trace!(target: "engine", "verify_timestamp: block too early"); - let found = CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(oob.found)) - .ok_or(BlockError::TimestampOverflow)?; - let max = oob.max.and_then(|m| CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(m))); - let min = oob.min.and_then(|m| CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(m))); + let found = CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(oob.found)) + .ok_or(BlockError::TimestampOverflow)?; + let max = oob + .max + .and_then(|m| CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(m))); + let min = oob + .min + .and_then(|m| CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(m))); - let new_oob = OutOfBounds { min, max, found }; + let new_oob = OutOfBounds { min, max, found }; - Err(BlockError::TemporarilyInvalid(new_oob).into()) - }, - Ok(_) => Ok(()), - } + Err(BlockError::TemporarilyInvalid(new_oob).into()) + } + Ok(_) => Ok(()), + } } -fn verify_external(header: &Header, validators: &ValidatorSet, empty_steps_transition: u64) -> Result<(), Error> { - let header_step = header_step(header, empty_steps_transition)?; +fn verify_external( + header: &Header, + validators: &ValidatorSet, + empty_steps_transition: u64, +) -> Result<(), Error> { + let header_step = header_step(header, empty_steps_transition)?; - let proposer_signature = header_signature(header, empty_steps_transition)?; - let correct_proposer = validators.get(header.parent_hash(), header_step as usize); - let is_invalid_proposer = *header.author() != correct_proposer || { - let empty_steps_rlp = if header.number() >= empty_steps_transition { - Some(header_empty_steps_raw(header)) - } else { - None - }; + let proposer_signature = header_signature(header, empty_steps_transition)?; + let correct_proposer = validators.get(header.parent_hash(), header_step as usize); + let is_invalid_proposer = *header.author() != correct_proposer || { + let empty_steps_rlp = if header.number() >= empty_steps_transition { + Some(header_empty_steps_raw(header)) + } else { + None + }; - let header_seal_hash = header_seal_hash(header, empty_steps_rlp); - !ethkey::verify_address(&correct_proposer, &proposer_signature, &header_seal_hash)? - }; + let header_seal_hash = header_seal_hash(header, empty_steps_rlp); + !ethkey::verify_address(&correct_proposer, &proposer_signature, &header_seal_hash)? + }; - if is_invalid_proposer { - trace!(target: "engine", "verify_block_external: bad proposer for step: {}", header_step); - Err(EngineError::NotProposer(Mismatch { expected: correct_proposer, found: *header.author() }))? - } else { - Ok(()) - } + if is_invalid_proposer { + trace!(target: "engine", "verify_block_external: bad proposer for step: {}", header_step); + Err(EngineError::NotProposer(Mismatch { + expected: correct_proposer, + found: *header.author(), + }))? + } else { + Ok(()) + } } fn combine_proofs(signal_number: BlockNumber, set_proof: &[u8], finality_proof: &[u8]) -> Vec { - let mut stream = ::rlp::RlpStream::new_list(3); - stream.append(&signal_number).append(&set_proof).append(&finality_proof); - stream.out() + let mut stream = ::rlp::RlpStream::new_list(3); + stream + .append(&signal_number) + .append(&set_proof) + .append(&finality_proof); + stream.out() } - fn destructure_proofs(combined: &[u8]) -> Result<(BlockNumber, &[u8], &[u8]), Error> { - let rlp = Rlp::new(combined); - Ok(( - rlp.at(0)?.as_val()?, - rlp.at(1)?.data()?, - rlp.at(2)?.data()?, - )) + let rlp = Rlp::new(combined); + Ok((rlp.at(0)?.as_val()?, rlp.at(1)?.data()?, rlp.at(2)?.data()?)) } trait AsMillis { - fn as_millis(&self) -> u64; + fn as_millis(&self) -> u64; } impl AsMillis for Duration { - fn as_millis(&self) -> u64 { - self.as_secs() * 1_000 + (self.subsec_nanos() / 1_000_000) as u64 - } + fn as_millis(&self) -> u64 { + self.as_secs() * 1_000 + (self.subsec_nanos() / 1_000_000) as u64 + } } // A type for storing owned or borrowed data that has a common type. // Useful for returning either a borrow or owned data from a function. enum CowLike<'a, A: 'a + ?Sized, B> { - Borrowed(&'a A), - Owned(B), + Borrowed(&'a A), + Owned(B), } -impl<'a, A: ?Sized, B> Deref for CowLike<'a, A, B> where B: AsRef { - type Target = A; - fn deref(&self) -> &A { - match self { - CowLike::Borrowed(b) => b, - CowLike::Owned(o) => o.as_ref(), - } - } +impl<'a, A: ?Sized, B> Deref for CowLike<'a, A, B> +where + B: AsRef, +{ + type Target = A; + fn deref(&self) -> &A { + match self { + CowLike::Borrowed(b) => b, + CowLike::Owned(o) => o.as_ref(), + } + } } impl AuthorityRound { - /// Create a new instance of AuthorityRound engine. - pub fn new(our_params: AuthorityRoundParams, machine: EthereumMachine) -> Result, Error> { - if our_params.step_duration == 0 { - error!(target: "engine", "Authority Round step duration can't be zero, aborting"); - panic!("authority_round: step duration can't be zero") - } - let should_timeout = our_params.start_step.is_none(); - let initial_step = our_params.start_step.unwrap_or_else(|| (unix_now().as_secs() / (our_params.step_duration as u64))); - let engine = Arc::new( - AuthorityRound { - transition_service: IoService::<()>::start()?, - step: Arc::new(PermissionedStep { - inner: Step { - inner: AtomicUsize::new(initial_step as usize), - calibrate: our_params.start_step.is_none(), - duration: our_params.step_duration, - }, - can_propose: AtomicBool::new(true), - }), - client: Arc::new(RwLock::new(None)), - signer: RwLock::new(None), - validators: our_params.validators, - validate_score_transition: our_params.validate_score_transition, - validate_step_transition: our_params.validate_step_transition, - empty_steps: Default::default(), - epoch_manager: Mutex::new(EpochManager::blank()), - immediate_transitions: our_params.immediate_transitions, - block_reward: our_params.block_reward, - block_reward_contract_transition: our_params.block_reward_contract_transition, - block_reward_contract: our_params.block_reward_contract, - maximum_uncle_count_transition: our_params.maximum_uncle_count_transition, - maximum_uncle_count: our_params.maximum_uncle_count, - empty_steps_transition: our_params.empty_steps_transition, - maximum_empty_steps: our_params.maximum_empty_steps, - strict_empty_steps_transition: our_params.strict_empty_steps_transition, - machine: machine, - }); + /// Create a new instance of AuthorityRound engine. + pub fn new( + our_params: AuthorityRoundParams, + machine: EthereumMachine, + ) -> Result, Error> { + if our_params.step_duration == 0 { + error!(target: "engine", "Authority Round step duration can't be zero, aborting"); + panic!("authority_round: step duration can't be zero") + } + let should_timeout = our_params.start_step.is_none(); + let initial_step = our_params + .start_step + .unwrap_or_else(|| (unix_now().as_secs() / (our_params.step_duration as u64))); + let engine = Arc::new(AuthorityRound { + transition_service: IoService::<()>::start()?, + step: Arc::new(PermissionedStep { + inner: Step { + inner: AtomicUsize::new(initial_step as usize), + calibrate: our_params.start_step.is_none(), + duration: our_params.step_duration, + }, + can_propose: AtomicBool::new(true), + }), + client: Arc::new(RwLock::new(None)), + signer: RwLock::new(None), + validators: our_params.validators, + validate_score_transition: our_params.validate_score_transition, + validate_step_transition: our_params.validate_step_transition, + empty_steps: Default::default(), + epoch_manager: Mutex::new(EpochManager::blank()), + immediate_transitions: our_params.immediate_transitions, + block_reward: our_params.block_reward, + block_reward_contract_transition: our_params.block_reward_contract_transition, + block_reward_contract: our_params.block_reward_contract, + maximum_uncle_count_transition: our_params.maximum_uncle_count_transition, + maximum_uncle_count: our_params.maximum_uncle_count, + empty_steps_transition: our_params.empty_steps_transition, + maximum_empty_steps: our_params.maximum_empty_steps, + strict_empty_steps_transition: our_params.strict_empty_steps_transition, + machine: machine, + }); - // Do not initialize timeouts for tests. - if should_timeout { - let handler = TransitionHandler { - step: engine.step.clone(), - client: engine.client.clone(), - }; - engine.transition_service.register_handler(Arc::new(handler))?; - } - Ok(engine) - } + // Do not initialize timeouts for tests. + if should_timeout { + let handler = TransitionHandler { + step: engine.step.clone(), + client: engine.client.clone(), + }; + engine + .transition_service + .register_handler(Arc::new(handler))?; + } + Ok(engine) + } - // fetch correct validator set for epoch at header, taking into account - // finality of previous transitions. - fn epoch_set<'a>(&'a self, header: &Header) -> Result<(CowLike, BlockNumber), Error> { - Ok(if self.immediate_transitions { - (CowLike::Borrowed(&*self.validators), header.number()) - } else { - let mut epoch_manager = self.epoch_manager.lock(); - let client = match self.client.read().as_ref().and_then(|weak| weak.upgrade()) { - Some(client) => client, - None => { - debug!(target: "engine", "Unable to verify sig: missing client ref."); - return Err(EngineError::RequiresClient.into()) - } - }; + // fetch correct validator set for epoch at header, taking into account + // finality of previous transitions. + fn epoch_set<'a>( + &'a self, + header: &Header, + ) -> Result<(CowLike, BlockNumber), Error> { + Ok(if self.immediate_transitions { + (CowLike::Borrowed(&*self.validators), header.number()) + } else { + let mut epoch_manager = self.epoch_manager.lock(); + let client = match self.client.read().as_ref().and_then(|weak| weak.upgrade()) { + Some(client) => client, + None => { + debug!(target: "engine", "Unable to verify sig: missing client ref."); + return Err(EngineError::RequiresClient.into()); + } + }; - if !epoch_manager.zoom_to(&*client, &self.machine, &*self.validators, header) { - debug!(target: "engine", "Unable to zoom to epoch."); - return Err(EngineError::RequiresClient.into()) - } + if !epoch_manager.zoom_to(&*client, &self.machine, &*self.validators, header) { + debug!(target: "engine", "Unable to zoom to epoch."); + return Err(EngineError::RequiresClient.into()); + } - (CowLike::Owned(epoch_manager.validators().clone()), epoch_manager.epoch_transition_number) - }) - } + ( + CowLike::Owned(epoch_manager.validators().clone()), + epoch_manager.epoch_transition_number, + ) + }) + } - fn empty_steps(&self, from_step: u64, to_step: u64, parent_hash: H256) -> Vec { - let from = EmptyStep { - step: from_step + 1, - parent_hash, - signature: Default::default(), - }; - let to = EmptyStep { - step: to_step, - parent_hash: Default::default(), - signature: Default::default(), - }; + fn empty_steps(&self, from_step: u64, to_step: u64, parent_hash: H256) -> Vec { + let from = EmptyStep { + step: from_step + 1, + parent_hash, + signature: Default::default(), + }; + let to = EmptyStep { + step: to_step, + parent_hash: Default::default(), + signature: Default::default(), + }; - if from >= to { - return vec![]; - } + if from >= to { + return vec![]; + } - self.empty_steps.lock() - .range(from..to) - .filter(|e| e.parent_hash == parent_hash) - .cloned() - .collect() - } + self.empty_steps + .lock() + .range(from..to) + .filter(|e| e.parent_hash == parent_hash) + .cloned() + .collect() + } - fn clear_empty_steps(&self, step: u64) { - // clear old `empty_steps` messages - let mut empty_steps = self.empty_steps.lock(); - *empty_steps = empty_steps.split_off(&EmptyStep { - step: step + 1, - parent_hash: Default::default(), - signature: Default::default(), - }); - } + fn clear_empty_steps(&self, step: u64) { + // clear old `empty_steps` messages + let mut empty_steps = self.empty_steps.lock(); + *empty_steps = empty_steps.split_off(&EmptyStep { + step: step + 1, + parent_hash: Default::default(), + signature: Default::default(), + }); + } - fn handle_empty_step_message(&self, empty_step: EmptyStep) { - self.empty_steps.lock().insert(empty_step); - } + fn handle_empty_step_message(&self, empty_step: EmptyStep) { + self.empty_steps.lock().insert(empty_step); + } - fn generate_empty_step(&self, parent_hash: &H256) { - let step = self.step.inner.load(); - let empty_step_rlp = empty_step_rlp(step, parent_hash); + fn generate_empty_step(&self, parent_hash: &H256) { + let step = self.step.inner.load(); + let empty_step_rlp = empty_step_rlp(step, parent_hash); - if let Ok(signature) = self.sign(keccak(&empty_step_rlp)).map(Into::into) { - let message_rlp = empty_step_full_rlp(&signature, &empty_step_rlp); + if let Ok(signature) = self.sign(keccak(&empty_step_rlp)).map(Into::into) { + let message_rlp = empty_step_full_rlp(&signature, &empty_step_rlp); - let parent_hash = *parent_hash; - let empty_step = EmptyStep { signature, step, parent_hash }; + let parent_hash = *parent_hash; + let empty_step = EmptyStep { + signature, + step, + parent_hash, + }; - trace!(target: "engine", "broadcasting empty step message: {:?}", empty_step); - self.broadcast_message(message_rlp); - self.handle_empty_step_message(empty_step); + trace!(target: "engine", "broadcasting empty step message: {:?}", empty_step); + self.broadcast_message(message_rlp); + self.handle_empty_step_message(empty_step); + } else { + warn!(target: "engine", "generate_empty_step: FAIL: accounts secret key unavailable"); + } + } - } else { - warn!(target: "engine", "generate_empty_step: FAIL: accounts secret key unavailable"); - } - } + fn broadcast_message(&self, message: Vec) { + if let Some(ref weak) = *self.client.read() { + if let Some(c) = weak.upgrade() { + c.broadcast_consensus_message(message); + } + } + } - fn broadcast_message(&self, message: Vec) { - if let Some(ref weak) = *self.client.read() { - if let Some(c) = weak.upgrade() { - c.broadcast_consensus_message(message); - } - } - } + fn report_skipped( + &self, + header: &Header, + current_step: u64, + parent_step: u64, + validators: &ValidatorSet, + set_number: u64, + ) { + // we're building on top of the genesis block so don't report any skipped steps + if header.number() == 1 { + return; + } - fn report_skipped(&self, header: &Header, current_step: u64, parent_step: u64, validators: &ValidatorSet, set_number: u64) { - // we're building on top of the genesis block so don't report any skipped steps - if header.number() == 1 { - return; - } - - if let (true, Some(me)) = (current_step > parent_step + 1, self.signer.read().as_ref().map(|s| s.address())) { - debug!(target: "engine", "Author {} built block with step gap. current step: {}, parent step: {}", + if let (true, Some(me)) = ( + current_step > parent_step + 1, + self.signer.read().as_ref().map(|s| s.address()), + ) { + debug!(target: "engine", "Author {} built block with step gap. current step: {}, parent step: {}", header.author(), current_step, parent_step); - let mut reported = HashSet::new(); - for step in parent_step + 1..current_step { - let skipped_primary = step_proposer(validators, header.parent_hash(), step); - // Do not report this signer. - if skipped_primary != me { - // Stop reporting once validators start repeating. - if !reported.insert(skipped_primary) { break; } - self.validators.report_benign(&skipped_primary, set_number, header.number()); - } - } - } - } + let mut reported = HashSet::new(); + for step in parent_step + 1..current_step { + let skipped_primary = step_proposer(validators, header.parent_hash(), step); + // Do not report this signer. + if skipped_primary != me { + // Stop reporting once validators start repeating. + if !reported.insert(skipped_primary) { + break; + } + self.validators + .report_benign(&skipped_primary, set_number, header.number()); + } + } + } + } - // Returns the hashes of all ancestor blocks that are finalized by the given `chain_head`. - fn build_finality(&self, chain_head: &Header, ancestry: &mut Iterator) -> Vec { - if self.immediate_transitions { return Vec::new() } + // Returns the hashes of all ancestor blocks that are finalized by the given `chain_head`. + fn build_finality( + &self, + chain_head: &Header, + ancestry: &mut Iterator, + ) -> Vec { + if self.immediate_transitions { + return Vec::new(); + } - let client = match self.client.read().as_ref().and_then(|weak| weak.upgrade()) { - Some(client) => client, - None => { - warn!(target: "engine", "Unable to apply ancestry actions: missing client ref."); - return Vec::new(); - } - }; + let client = match self.client.read().as_ref().and_then(|weak| weak.upgrade()) { + Some(client) => client, + None => { + warn!(target: "engine", "Unable to apply ancestry actions: missing client ref."); + return Vec::new(); + } + }; - let mut epoch_manager = self.epoch_manager.lock(); - if !epoch_manager.zoom_to(&*client, &self.machine, &*self.validators, chain_head) { - return Vec::new(); - } + let mut epoch_manager = self.epoch_manager.lock(); + if !epoch_manager.zoom_to(&*client, &self.machine, &*self.validators, chain_head) { + return Vec::new(); + } - if epoch_manager.finality_checker.subchain_head() != Some(*chain_head.parent_hash()) { - // build new finality checker from unfinalized ancestry of chain head, not including chain head itself yet. - trace!(target: "finality", "Building finality up to parent of {} ({})", + if epoch_manager.finality_checker.subchain_head() != Some(*chain_head.parent_hash()) { + // build new finality checker from unfinalized ancestry of chain head, not including chain head itself yet. + trace!(target: "finality", "Building finality up to parent of {} ({})", chain_head.hash(), chain_head.parent_hash()); - // the empty steps messages in a header signal approval of the - // parent header. - let mut parent_empty_steps_signers = match header_empty_steps_signers(&chain_head, self.empty_steps_transition) { - Ok(empty_step_signers) => empty_step_signers, - Err(_) => { - warn!(target: "finality", "Failed to get empty step signatures from block {}", chain_head.hash()); - return Vec::new(); - } - }; + // the empty steps messages in a header signal approval of the + // parent header. + let mut parent_empty_steps_signers = match header_empty_steps_signers( + &chain_head, + self.empty_steps_transition, + ) { + Ok(empty_step_signers) => empty_step_signers, + Err(_) => { + warn!(target: "finality", "Failed to get empty step signatures from block {}", chain_head.hash()); + return Vec::new(); + } + }; - let epoch_transition_hash = epoch_manager.epoch_transition_hash; - let ancestry_iter = ancestry.map(|header| { + let epoch_transition_hash = epoch_manager.epoch_transition_hash; + let ancestry_iter = ancestry.map(|header| { let mut signers = vec![*header.author()]; signers.extend(parent_empty_steps_signers.drain(..)); @@ -869,953 +995,1140 @@ impl AuthorityRound { .while_some() .take_while(|&(h, _)| h != epoch_transition_hash); - if let Err(e) = epoch_manager.finality_checker.build_ancestry_subchain(ancestry_iter) { - debug!(target: "engine", "inconsistent validator set within epoch: {:?}", e); - return Vec::new(); - } - } + if let Err(e) = epoch_manager + .finality_checker + .build_ancestry_subchain(ancestry_iter) + { + debug!(target: "engine", "inconsistent validator set within epoch: {:?}", e); + return Vec::new(); + } + } - let finalized = epoch_manager.finality_checker.push_hash(chain_head.hash(), vec![*chain_head.author()]); - finalized.unwrap_or_default() - } + let finalized = epoch_manager + .finality_checker + .push_hash(chain_head.hash(), vec![*chain_head.author()]); + finalized.unwrap_or_default() + } } fn unix_now() -> Duration { - UNIX_EPOCH.elapsed().expect("Valid time has to be set in your system.") + UNIX_EPOCH + .elapsed() + .expect("Valid time has to be set in your system.") } struct TransitionHandler { - step: Arc, - client: Arc>>>, + step: Arc, + client: Arc>>>, } const ENGINE_TIMEOUT_TOKEN: TimerToken = 23; impl IoHandler<()> for TransitionHandler { - fn initialize(&self, io: &IoContext<()>) { - let remaining = AsMillis::as_millis(&self.step.inner.duration_remaining()); - io.register_timer_once(ENGINE_TIMEOUT_TOKEN, Duration::from_millis(remaining)) - .unwrap_or_else(|e| warn!(target: "engine", "Failed to start consensus step timer: {}.", e)) - } + fn initialize(&self, io: &IoContext<()>) { + let remaining = AsMillis::as_millis(&self.step.inner.duration_remaining()); + io.register_timer_once(ENGINE_TIMEOUT_TOKEN, Duration::from_millis(remaining)) + .unwrap_or_else( + |e| warn!(target: "engine", "Failed to start consensus step timer: {}.", e), + ) + } - fn timeout(&self, io: &IoContext<()>, timer: TimerToken) { - if timer == ENGINE_TIMEOUT_TOKEN { - // NOTE we might be lagging by couple of steps in case the timeout - // has not been called fast enough. - // Make sure to advance up to the actual step. - while AsMillis::as_millis(&self.step.inner.duration_remaining()) == 0 { - self.step.inner.increment(); - self.step.can_propose.store(true, AtomicOrdering::SeqCst); - if let Some(ref weak) = *self.client.read() { - if let Some(c) = weak.upgrade() { - c.update_sealing(ForceUpdateSealing::No); - } - } - } + fn timeout(&self, io: &IoContext<()>, timer: TimerToken) { + if timer == ENGINE_TIMEOUT_TOKEN { + // NOTE we might be lagging by couple of steps in case the timeout + // has not been called fast enough. + // Make sure to advance up to the actual step. + while AsMillis::as_millis(&self.step.inner.duration_remaining()) == 0 { + self.step.inner.increment(); + self.step.can_propose.store(true, AtomicOrdering::SeqCst); + if let Some(ref weak) = *self.client.read() { + if let Some(c) = weak.upgrade() { + c.update_sealing(ForceUpdateSealing::No); + } + } + } - let next_run_at = AsMillis::as_millis(&self.step.inner.duration_remaining()) >> 2; - io.register_timer_once(ENGINE_TIMEOUT_TOKEN, Duration::from_millis(next_run_at)) - .unwrap_or_else(|e| warn!(target: "engine", "Failed to restart consensus step timer: {}.", e)) - } - } + let next_run_at = AsMillis::as_millis(&self.step.inner.duration_remaining()) >> 2; + io.register_timer_once(ENGINE_TIMEOUT_TOKEN, Duration::from_millis(next_run_at)) + .unwrap_or_else( + |e| warn!(target: "engine", "Failed to restart consensus step timer: {}.", e), + ) + } + } } impl Engine for AuthorityRound { - fn name(&self) -> &str { "AuthorityRound" } + fn name(&self) -> &str { + "AuthorityRound" + } - fn machine(&self) -> &EthereumMachine { &self.machine } + fn machine(&self) -> &EthereumMachine { + &self.machine + } - /// Three fields - consensus step and the corresponding proposer signature, and a list of empty - /// step messages (which should be empty if no steps are skipped) - fn seal_fields(&self, header: &Header) -> usize { - header_expected_seal_fields(header, self.empty_steps_transition) - } + /// Three fields - consensus step and the corresponding proposer signature, and a list of empty + /// step messages (which should be empty if no steps are skipped) + fn seal_fields(&self, header: &Header) -> usize { + header_expected_seal_fields(header, self.empty_steps_transition) + } - fn step(&self) { - self.step.inner.increment(); - self.step.can_propose.store(true, AtomicOrdering::SeqCst); - if let Some(ref weak) = *self.client.read() { - if let Some(c) = weak.upgrade() { - c.update_sealing(ForceUpdateSealing::No); - } - } - } + fn step(&self) { + self.step.inner.increment(); + self.step.can_propose.store(true, AtomicOrdering::SeqCst); + if let Some(ref weak) = *self.client.read() { + if let Some(c) = weak.upgrade() { + c.update_sealing(ForceUpdateSealing::No); + } + } + } - /// Additional engine-specific information for the user/developer concerning `header`. - fn extra_info(&self, header: &Header) -> BTreeMap { - if header.seal().len() < header_expected_seal_fields(header, self.empty_steps_transition) { - return BTreeMap::default(); - } + /// Additional engine-specific information for the user/developer concerning `header`. + fn extra_info(&self, header: &Header) -> BTreeMap { + if header.seal().len() < header_expected_seal_fields(header, self.empty_steps_transition) { + return BTreeMap::default(); + } - let step = header_step(header, self.empty_steps_transition).as_ref() - .map(ToString::to_string) - .unwrap_or_default(); - let signature = header_signature(header, self.empty_steps_transition).as_ref() - .map(ToString::to_string) - .unwrap_or_default(); + let step = header_step(header, self.empty_steps_transition) + .as_ref() + .map(ToString::to_string) + .unwrap_or_default(); + let signature = header_signature(header, self.empty_steps_transition) + .as_ref() + .map(ToString::to_string) + .unwrap_or_default(); - let mut info = map![ - "step".into() => step, - "signature".into() => signature - ]; + let mut info = map![ + "step".into() => step, + "signature".into() => signature + ]; - if header.number() >= self.empty_steps_transition { - let empty_steps = - if let Ok(empty_steps) = header_empty_steps(header).as_ref() { - format!("[{}]", - empty_steps.iter().fold( - "".to_string(), - |acc, e| if acc.len() > 0 { acc + ","} else { acc } + &e.to_string())) + if header.number() >= self.empty_steps_transition { + let empty_steps = if let Ok(empty_steps) = header_empty_steps(header).as_ref() { + format!( + "[{}]", + empty_steps + .iter() + .fold("".to_string(), |acc, e| if acc.len() > 0 { + acc + "," + } else { + acc + } + &e.to_string()) + ) + } else { + "".into() + }; - } else { - "".into() - }; + info.insert("emptySteps".into(), empty_steps); + } - info.insert("emptySteps".into(), empty_steps); - } + info + } - info - } + fn maximum_uncle_count(&self, block: BlockNumber) -> usize { + if block >= self.maximum_uncle_count_transition { + self.maximum_uncle_count + } else { + // fallback to default value + 2 + } + } - fn maximum_uncle_count(&self, block: BlockNumber) -> usize { - if block >= self.maximum_uncle_count_transition { - self.maximum_uncle_count - } else { - // fallback to default value - 2 - } - } + fn populate_from_parent(&self, header: &mut Header, parent: &Header) { + let parent_step = header_step(parent, self.empty_steps_transition) + .expect("Header has been verified; qed"); + let current_step = self.step.inner.load(); - fn populate_from_parent(&self, header: &mut Header, parent: &Header) { - let parent_step = header_step(parent, self.empty_steps_transition).expect("Header has been verified; qed"); - let current_step = self.step.inner.load(); + let current_empty_steps_len = if header.number() >= self.empty_steps_transition { + self.empty_steps(parent_step, current_step, parent.hash()) + .len() + } else { + 0 + }; - let current_empty_steps_len = if header.number() >= self.empty_steps_transition { - self.empty_steps(parent_step, current_step, parent.hash()).len() - } else { - 0 - }; + let score = calculate_score(parent_step, current_step, current_empty_steps_len); + header.set_difficulty(score); + } - let score = calculate_score(parent_step, current_step, current_empty_steps_len); - header.set_difficulty(score); - } + fn seals_internally(&self) -> Option { + // TODO: accept a `&Call` here so we can query the validator set. + Some(self.signer.read().is_some()) + } - fn seals_internally(&self) -> Option { - // TODO: accept a `&Call` here so we can query the validator set. - Some(self.signer.read().is_some()) - } + fn handle_message(&self, rlp: &[u8]) -> Result<(), EngineError> { + fn fmt_err(x: T) -> EngineError { + EngineError::MalformedMessage(format!("{:?}", x)) + } - fn handle_message(&self, rlp: &[u8]) -> Result<(), EngineError> { - fn fmt_err(x: T) -> EngineError { - EngineError::MalformedMessage(format!("{:?}", x)) - } + let rlp = Rlp::new(rlp); + let empty_step: EmptyStep = rlp.as_val().map_err(fmt_err)?; - let rlp = Rlp::new(rlp); - let empty_step: EmptyStep = rlp.as_val().map_err(fmt_err)?;; + if empty_step.verify(&*self.validators).unwrap_or(false) { + if self.step.inner.check_future(empty_step.step).is_ok() { + trace!(target: "engine", "handle_message: received empty step message {:?}", empty_step); + self.handle_empty_step_message(empty_step); + } else { + trace!(target: "engine", "handle_message: empty step message from the future {:?}", empty_step); + } + } else { + trace!(target: "engine", "handle_message: received invalid step message {:?}", empty_step); + }; - if empty_step.verify(&*self.validators).unwrap_or(false) { - if self.step.inner.check_future(empty_step.step).is_ok() { - trace!(target: "engine", "handle_message: received empty step message {:?}", empty_step); - self.handle_empty_step_message(empty_step); - } else { - trace!(target: "engine", "handle_message: empty step message from the future {:?}", empty_step); - } - } else { - trace!(target: "engine", "handle_message: received invalid step message {:?}", empty_step); - }; + Ok(()) + } - Ok(()) - } + /// Attempt to seal the block internally. + /// + /// This operation is synchronous and may (quite reasonably) not be available, in which case + /// `Seal::None` will be returned. + fn generate_seal(&self, block: &ExecutedBlock, parent: &Header) -> Seal { + // first check to avoid generating signature most of the time + // (but there's still a race to the `compare_and_swap`) + if !self.step.can_propose.load(AtomicOrdering::SeqCst) { + trace!(target: "engine", "Aborting seal generation. Can't propose."); + return Seal::None; + } - /// Attempt to seal the block internally. - /// - /// This operation is synchronous and may (quite reasonably) not be available, in which case - /// `Seal::None` will be returned. - fn generate_seal(&self, block: &ExecutedBlock, parent: &Header) -> Seal { - // first check to avoid generating signature most of the time - // (but there's still a race to the `compare_and_swap`) - if !self.step.can_propose.load(AtomicOrdering::SeqCst) { - trace!(target: "engine", "Aborting seal generation. Can't propose."); - return Seal::None; - } + let header = &block.header; + let parent_step = header_step(parent, self.empty_steps_transition) + .expect("Header has been verified; qed"); - let header = &block.header; - let parent_step = header_step(parent, self.empty_steps_transition) - .expect("Header has been verified; qed"); + let step = self.step.inner.load(); - let step = self.step.inner.load(); + // filter messages from old and future steps and different parents + let empty_steps = if header.number() >= self.empty_steps_transition { + self.empty_steps(parent_step.into(), step.into(), *header.parent_hash()) + } else { + Vec::new() + }; - // filter messages from old and future steps and different parents - let empty_steps = if header.number() >= self.empty_steps_transition { - self.empty_steps(parent_step.into(), step.into(), *header.parent_hash()) - } else { - Vec::new() - }; + let expected_diff = calculate_score(parent_step, step.into(), empty_steps.len().into()); - let expected_diff = calculate_score(parent_step, step.into(), empty_steps.len().into()); - - if header.difficulty() != &expected_diff { - debug!(target: "engine", "Aborting seal generation. The step or empty_steps have changed in the meantime. {:?} != {:?}", + if header.difficulty() != &expected_diff { + debug!(target: "engine", "Aborting seal generation. The step or empty_steps have changed in the meantime. {:?} != {:?}", header.difficulty(), expected_diff); - return Seal::None; - } + return Seal::None; + } - if parent_step > step.into() { - warn!(target: "engine", "Aborting seal generation for invalid step: {} > {}", parent_step, step); - return Seal::None; - } + if parent_step > step.into() { + warn!(target: "engine", "Aborting seal generation for invalid step: {} > {}", parent_step, step); + return Seal::None; + } - let (validators, set_number) = match self.epoch_set(header) { - Err(err) => { - warn!(target: "engine", "Unable to generate seal: {}", err); - return Seal::None; - }, - Ok(ok) => ok, - }; + let (validators, set_number) = match self.epoch_set(header) { + Err(err) => { + warn!(target: "engine", "Unable to generate seal: {}", err); + return Seal::None; + } + Ok(ok) => ok, + }; - if is_step_proposer(&*validators, header.parent_hash(), step, header.author()) { - // this is guarded against by `can_propose` unless the block was signed - // on the same step (implies same key) and on a different node. - if parent_step == step { - warn!("Attempted to seal block on the same step as parent. Is this authority sealing with more than one node?"); - return Seal::None; - } + if is_step_proposer(&*validators, header.parent_hash(), step, header.author()) { + // this is guarded against by `can_propose` unless the block was signed + // on the same step (implies same key) and on a different node. + if parent_step == step { + warn!("Attempted to seal block on the same step as parent. Is this authority sealing with more than one node?"); + return Seal::None; + } - // if there are no transactions to include in the block, we don't seal and instead broadcast a signed - // `EmptyStep(step, parent_hash)` message. If we exceed the maximum amount of `empty_step` rounds we proceed - // with the seal. - if header.number() >= self.empty_steps_transition && - block.transactions.is_empty() && - empty_steps.len() < self.maximum_empty_steps { + // if there are no transactions to include in the block, we don't seal and instead broadcast a signed + // `EmptyStep(step, parent_hash)` message. If we exceed the maximum amount of `empty_step` rounds we proceed + // with the seal. + if header.number() >= self.empty_steps_transition + && block.transactions.is_empty() + && empty_steps.len() < self.maximum_empty_steps + { + if self + .step + .can_propose + .compare_and_swap(true, false, AtomicOrdering::SeqCst) + { + self.generate_empty_step(header.parent_hash()); + } - if self.step.can_propose.compare_and_swap(true, false, AtomicOrdering::SeqCst) { - self.generate_empty_step(header.parent_hash()); - } + return Seal::None; + } - return Seal::None; - } + let empty_steps_rlp = if header.number() >= self.empty_steps_transition { + let empty_steps: Vec<_> = empty_steps.iter().map(|e| e.sealed()).collect(); + Some(::rlp::encode_list(&empty_steps)) + } else { + None + }; - let empty_steps_rlp = if header.number() >= self.empty_steps_transition { - let empty_steps: Vec<_> = empty_steps.iter().map(|e| e.sealed()).collect(); - Some(::rlp::encode_list(&empty_steps)) - } else { - None - }; + if let Ok(signature) = self.sign(header_seal_hash( + header, + empty_steps_rlp.as_ref().map(|e| &**e), + )) { + trace!(target: "engine", "generate_seal: Issuing a block for step {}.", step); - if let Ok(signature) = self.sign(header_seal_hash(header, empty_steps_rlp.as_ref().map(|e| &**e))) { - trace!(target: "engine", "generate_seal: Issuing a block for step {}.", step); + // only issue the seal if we were the first to reach the compare_and_swap. + if self + .step + .can_propose + .compare_and_swap(true, false, AtomicOrdering::SeqCst) + { + // we can drop all accumulated empty step messages that are + // older than the parent step since we're including them in + // the seal + self.clear_empty_steps(parent_step); - // only issue the seal if we were the first to reach the compare_and_swap. - if self.step.can_propose.compare_and_swap(true, false, AtomicOrdering::SeqCst) { - // we can drop all accumulated empty step messages that are - // older than the parent step since we're including them in - // the seal - self.clear_empty_steps(parent_step); + // report any skipped primaries between the parent block and + // the block we're sealing, unless we have empty steps enabled + if header.number() < self.empty_steps_transition { + self.report_skipped(header, step, parent_step, &*validators, set_number); + } - // report any skipped primaries between the parent block and - // the block we're sealing, unless we have empty steps enabled - if header.number() < self.empty_steps_transition { - self.report_skipped(header, step, parent_step, &*validators, set_number); - } + let mut fields = + vec![encode(&step), encode(&(&H520::from(signature) as &[u8]))]; - let mut fields = vec![ - encode(&step), - encode(&(&H520::from(signature) as &[u8])), - ]; + if let Some(empty_steps_rlp) = empty_steps_rlp { + fields.push(empty_steps_rlp); + } - if let Some(empty_steps_rlp) = empty_steps_rlp { - fields.push(empty_steps_rlp); - } - - return Seal::Regular(fields); - } - } else { - warn!(target: "engine", "generate_seal: FAIL: Accounts secret key unavailable."); - } - } else { - trace!(target: "engine", "generate_seal: {} not a proposer for step {}.", + return Seal::Regular(fields); + } + } else { + warn!(target: "engine", "generate_seal: FAIL: Accounts secret key unavailable."); + } + } else { + trace!(target: "engine", "generate_seal: {} not a proposer for step {}.", header.author(), step); - } - - Seal::None - } - - fn verify_local_seal(&self, _header: &Header) -> Result<(), Error> { - Ok(()) - } - - fn on_new_block( - &self, - block: &mut ExecutedBlock, - epoch_begin: bool, - _ancestry: &mut Iterator, - ) -> Result<(), Error> { - // with immediate transitions, we don't use the epoch mechanism anyway. - // the genesis is always considered an epoch, but we ignore it intentionally. - if self.immediate_transitions || !epoch_begin { return Ok(()) } - - // genesis is never a new block, but might as well check. - let header = block.header.clone(); - let first = header.number() == 0; - - let mut call = |to, data| { - let result = self.machine.execute_as_system( - block, - to, - U256::max_value(), // unbounded gas? maybe make configurable. - Some(data), - ); - - result.map_err(|e| format!("{}", e)) - }; - - self.validators.on_epoch_begin(first, &header, &mut call) - } - - /// Apply the block reward on finalisation of the block. - fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> { - let mut beneficiaries = Vec::new(); - if block.header.number() >= self.empty_steps_transition { - let empty_steps = if block.header.seal().is_empty() { - // this is a new block, calculate rewards based on the empty steps messages we have accumulated - let client = match self.client.read().as_ref().and_then(|weak| weak.upgrade()) { - Some(client) => client, - None => { - debug!(target: "engine", "Unable to close block: missing client ref."); - return Err(EngineError::RequiresClient.into()) - }, - }; - - let parent = client.block_header(::client::BlockId::Hash(*block.header.parent_hash())) - .expect("hash is from parent; parent header must exist; qed") - .decode()?; - - let parent_step = header_step(&parent, self.empty_steps_transition)?; - let current_step = self.step.inner.load(); - self.empty_steps(parent_step.into(), current_step.into(), parent.hash()) - } else { - // we're verifying a block, extract empty steps from the seal - header_empty_steps(&block.header)? - }; - - for empty_step in empty_steps { - let author = empty_step.author()?; - beneficiaries.push((author, RewardKind::EmptyStep)); - } - } - - let author = *block.header.author(); - beneficiaries.push((author, RewardKind::Author)); - - let rewards: Vec<_> = match self.block_reward_contract { - Some(ref c) if block.header.number() >= self.block_reward_contract_transition => { - let mut call = super::default_system_or_code_call(&self.machine, block); - - let rewards = c.reward(&beneficiaries, &mut call)?; - rewards.into_iter().map(|(author, amount)| (author, RewardKind::External, amount)).collect() - }, - _ => { - beneficiaries.into_iter().map(|(author, reward_kind)| (author, reward_kind, self.block_reward)).collect() - }, - }; - - block_reward::apply_block_rewards(&rewards, block, &self.machine) - } - - /// Check the number of seal fields. - fn verify_block_basic(&self, header: &Header) -> Result<(), Error> { - if header.number() >= self.validate_score_transition && *header.difficulty() >= U256::from(U128::max_value()) { - return Err(From::from(BlockError::DifficultyOutOfBounds( - OutOfBounds { min: None, max: Some(U256::from(U128::max_value())), found: *header.difficulty() } - ))); - } - - match verify_timestamp(&self.step.inner, header_step(header, self.empty_steps_transition)?) { - Err(BlockError::InvalidSeal) => { - // This check runs in Phase 1 where there is no guarantee that the parent block is - // already imported, therefore the call to `epoch_set` may fail. In that case we - // won't report the misbehavior but this is not a concern because: - // - Only authorities can report and it's expected that they'll be up-to-date and - // importing, therefore the parent header will most likely be available - // - Even if you are an authority that is syncing the chain, the contract will most - // likely ignore old reports - // - This specific check is only relevant if you're importing (since it checks - // against wall clock) - if let Ok((_, set_number)) = self.epoch_set(header) { - self.validators.report_benign(header.author(), set_number, header.number()); - } - - Err(BlockError::InvalidSeal.into()) - } - Err(e) => Err(e.into()), - Ok(()) => Ok(()), - } - } - - /// Do the step and gas limit validation. - fn verify_block_family(&self, header: &Header, parent: &Header) -> Result<(), Error> { - let step = header_step(header, self.empty_steps_transition)?; - let parent_step = header_step(parent, self.empty_steps_transition)?; - - let (validators, set_number) = self.epoch_set(header)?; - - // Ensure header is from the step after parent. - if step == parent_step - || (header.number() >= self.validate_step_transition && step <= parent_step) { - trace!(target: "engine", "Multiple blocks proposed for step {}.", parent_step); - - self.validators.report_malicious(header.author(), set_number, header.number(), Default::default()); - Err(EngineError::DoubleVote(*header.author()))?; - } - - // If empty step messages are enabled we will validate the messages in the seal, missing messages are not - // reported as there's no way to tell whether the empty step message was never sent or simply not included. - let empty_steps_len = if header.number() >= self.empty_steps_transition { - let validate_empty_steps = || -> Result { - let strict_empty_steps = header.number() >= self.strict_empty_steps_transition; - let empty_steps = header_empty_steps(header)?; - let empty_steps_len = empty_steps.len(); - let mut prev_empty_step = 0; - - for empty_step in empty_steps { - if empty_step.step <= parent_step || empty_step.step >= step { - Err(EngineError::InsufficientProof( - format!("empty step proof for invalid step: {:?}", empty_step.step)))?; - } - - if empty_step.parent_hash != *header.parent_hash() { - Err(EngineError::InsufficientProof( - format!("empty step proof for invalid parent hash: {:?}", empty_step.parent_hash)))?; - } - - if !empty_step.verify(&*validators).unwrap_or(false) { - Err(EngineError::InsufficientProof( - format!("invalid empty step proof: {:?}", empty_step)))?; - } - - if strict_empty_steps { - if empty_step.step <= prev_empty_step { - Err(EngineError::InsufficientProof(format!( - "{} empty step: {:?}", - if empty_step.step == prev_empty_step { "duplicate" } else { "unordered" }, - empty_step - )))?; - } - - prev_empty_step = empty_step.step; - } - } - - Ok(empty_steps_len) - }; - - match validate_empty_steps() { - Ok(len) => len, - Err(err) => { - self.validators.report_benign(header.author(), set_number, header.number()); - return Err(err); - }, - } - } else { - self.report_skipped(header, step, parent_step, &*validators, set_number); - - 0 - }; - - if header.number() >= self.validate_score_transition { - let expected_difficulty = calculate_score(parent_step.into(), step.into(), empty_steps_len.into()); - if header.difficulty() != &expected_difficulty { - return Err(From::from(BlockError::InvalidDifficulty(Mismatch { expected: expected_difficulty, found: header.difficulty().clone() }))); - } - } - - Ok(()) - } - - // Check the validators. - fn verify_block_external(&self, header: &Header) -> Result<(), Error> { - let (validators, set_number) = self.epoch_set(header)?; - - // verify signature against fixed list, but reports should go to the - // contract itself. - let res = verify_external(header, &*validators, self.empty_steps_transition); - match res { - Err(Error(ErrorKind::Engine(EngineError::NotProposer(_)), _)) => { - self.validators.report_benign(header.author(), set_number, header.number()); - }, - Ok(_) => { - // we can drop all accumulated empty step messages that are older than this header's step - let header_step = header_step(header, self.empty_steps_transition)?; - self.clear_empty_steps(header_step.into()); - }, - _ => {}, - } - res - } - - fn genesis_epoch_data(&self, header: &Header, call: &Call) -> Result, String> { - self.validators.genesis_epoch_data(header, call) - .map(|set_proof| combine_proofs(0, &set_proof, &[])) - } - - fn signals_epoch_end(&self, header: &Header, aux: AuxiliaryData) - -> super::EpochChange - { - if self.immediate_transitions { return super::EpochChange::No } - - let first = header.number() == 0; - self.validators.signals_epoch_end(first, header, aux) - } - - fn is_epoch_end_light( - &self, - chain_head: &Header, - chain: &super::Headers
, - transition_store: &super::PendingTransitionStore, - ) -> Option> { - // epochs only matter if we want to support light clients. - if self.immediate_transitions { return None } - - let epoch_transition_hash = { - let client = match self.client.read().as_ref().and_then(|weak| weak.upgrade()) { - Some(client) => client, - None => { - warn!(target: "engine", "Unable to check for epoch end: missing client ref."); - return None; - } - }; - - let mut epoch_manager = self.epoch_manager.lock(); - if !epoch_manager.zoom_to(&*client, &self.machine, &*self.validators, chain_head) { - return None; - } - - epoch_manager.epoch_transition_hash - }; - - let mut hash = *chain_head.parent_hash(); - - let mut ancestry = itertools::repeat_call(move || { - chain(hash).and_then(|header| { - if header.number() == 0 { return None } - hash = *header.parent_hash(); - Some(header) - }) - }) - .while_some() - .take_while(|header| header.hash() != epoch_transition_hash); - - let finalized = self.build_finality(chain_head, &mut ancestry); - - self.is_epoch_end(chain_head, &finalized, chain, transition_store) - } - - fn is_epoch_end( - &self, - chain_head: &Header, - finalized: &[H256], - chain: &super::Headers
, - transition_store: &super::PendingTransitionStore, - ) -> Option> { - // epochs only matter if we want to support light clients. - if self.immediate_transitions { return None } - - let first = chain_head.number() == 0; - - // Apply transitions that don't require finality and should be enacted immediately (e.g from chain spec) - if let Some(change) = self.validators.is_epoch_end(first, chain_head) { - info!(target: "engine", "Immediately applying validator set change signalled at block {}", chain_head.number()); - self.epoch_manager.lock().note_new_epoch(); - let change = combine_proofs(chain_head.number(), &change, &[]); - return Some(change) - } - - // check transition store for pending transitions against recently finalized blocks - for finalized_hash in finalized { - if let Some(pending) = transition_store(*finalized_hash) { - // walk the chain backwards from current head until finalized_hash - // to construct transition proof. author == ec_recover(sig) known - // since the blocks are in the DB. - let mut hash = chain_head.hash(); - let mut finality_proof: Vec<_> = itertools::repeat_call(move || { - chain(hash).and_then(|header| { - hash = *header.parent_hash(); - if header.number() == 0 { None } - else { Some(header) } - }) - }) - .while_some() - .take_while(|h| h.hash() != *finalized_hash) - .collect(); - - let finalized_header = if *finalized_hash == chain_head.hash() { - // chain closure only stores ancestry, but the chain head is also unfinalized. - chain_head.clone() - } else { - chain(*finalized_hash) - .expect("header is finalized; finalized headers must exist in the chain; qed") - }; - - let signal_number = finalized_header.number(); - info!(target: "engine", "Applying validator set change signalled at block {}", signal_number); - - finality_proof.push(finalized_header); - finality_proof.reverse(); - - let finality_proof = ::rlp::encode_list(&finality_proof); - - self.epoch_manager.lock().note_new_epoch(); - - // We turn off can_propose here because upon validator set change there can - // be two valid proposers for a single step: one from the old set and - // one from the new. - // - // This way, upon encountering an epoch change, the proposer from the - // new set will be forced to wait until the next step to avoid sealing a - // block that breaks the invariant that the parent's step < the block's step. - self.step.can_propose.store(false, AtomicOrdering::SeqCst); - return Some(combine_proofs(signal_number, &pending.proof, &*finality_proof)); - } - } - - None - } - - fn epoch_verifier<'a>(&self, _header: &Header, proof: &'a [u8]) -> ConstructedVerifier<'a, EthereumMachine> { - let (signal_number, set_proof, finality_proof) = match destructure_proofs(proof) { - Ok(x) => x, - Err(e) => return ConstructedVerifier::Err(e), - }; - - let first = signal_number == 0; - match self.validators.epoch_set(first, &self.machine, signal_number, set_proof) { - Ok((list, finalize)) => { - let verifier = Box::new(EpochVerifier { - step: self.step.clone(), - subchain_validators: list, - empty_steps_transition: self.empty_steps_transition, - }); - - match finalize { - Some(finalize) => ConstructedVerifier::Unconfirmed(verifier, finality_proof, finalize), - None => ConstructedVerifier::Trusted(verifier), - } - } - Err(e) => ConstructedVerifier::Err(e), - } - } - - fn register_client(&self, client: Weak) { - *self.client.write() = Some(client.clone()); - self.validators.register_client(client); - } - - fn set_signer(&self, signer: Box) { - *self.signer.write() = Some(signer); - } - - fn sign(&self, hash: H256) -> Result { - Ok(self.signer.read() - .as_ref() - .ok_or(ethkey::Error::InvalidAddress)? - .sign(hash)? - ) - } - - fn snapshot_components(&self) -> Option> { - if self.immediate_transitions { - None - } else { - Some(Box::new(::snapshot::PoaSnapshot)) - } - } - - fn fork_choice(&self, new: &ExtendedHeader, current: &ExtendedHeader) -> super::ForkChoice { - super::total_difficulty_fork_choice(new, current) - } - - fn ancestry_actions(&self, header: &Header, ancestry: &mut Iterator) -> Vec { - let finalized = self.build_finality( - header, - &mut ancestry.take_while(|e| !e.is_finalized).map(|e| e.header), - ); - - if !finalized.is_empty() { - debug!(target: "finality", "Finalizing blocks: {:?}", finalized); - } - - finalized.into_iter().map(AncestryAction::MarkFinalized).collect() - } + } + + Seal::None + } + + fn verify_local_seal(&self, _header: &Header) -> Result<(), Error> { + Ok(()) + } + + fn on_new_block( + &self, + block: &mut ExecutedBlock, + epoch_begin: bool, + _ancestry: &mut Iterator, + ) -> Result<(), Error> { + // with immediate transitions, we don't use the epoch mechanism anyway. + // the genesis is always considered an epoch, but we ignore it intentionally. + if self.immediate_transitions || !epoch_begin { + return Ok(()); + } + + // genesis is never a new block, but might as well check. + let header = block.header.clone(); + let first = header.number() == 0; + + let mut call = |to, data| { + let result = self.machine.execute_as_system( + block, + to, + U256::max_value(), // unbounded gas? maybe make configurable. + Some(data), + ); + + result.map_err(|e| format!("{}", e)) + }; + + self.validators.on_epoch_begin(first, &header, &mut call) + } + + /// Apply the block reward on finalisation of the block. + fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> { + let mut beneficiaries = Vec::new(); + if block.header.number() >= self.empty_steps_transition { + let empty_steps = if block.header.seal().is_empty() { + // this is a new block, calculate rewards based on the empty steps messages we have accumulated + let client = match self.client.read().as_ref().and_then(|weak| weak.upgrade()) { + Some(client) => client, + None => { + debug!(target: "engine", "Unable to close block: missing client ref."); + return Err(EngineError::RequiresClient.into()); + } + }; + + let parent = client + .block_header(::client::BlockId::Hash(*block.header.parent_hash())) + .expect("hash is from parent; parent header must exist; qed") + .decode()?; + + let parent_step = header_step(&parent, self.empty_steps_transition)?; + let current_step = self.step.inner.load(); + self.empty_steps(parent_step.into(), current_step.into(), parent.hash()) + } else { + // we're verifying a block, extract empty steps from the seal + header_empty_steps(&block.header)? + }; + + for empty_step in empty_steps { + let author = empty_step.author()?; + beneficiaries.push((author, RewardKind::EmptyStep)); + } + } + + let author = *block.header.author(); + beneficiaries.push((author, RewardKind::Author)); + + let rewards: Vec<_> = match self.block_reward_contract { + Some(ref c) if block.header.number() >= self.block_reward_contract_transition => { + let mut call = super::default_system_or_code_call(&self.machine, block); + + let rewards = c.reward(&beneficiaries, &mut call)?; + rewards + .into_iter() + .map(|(author, amount)| (author, RewardKind::External, amount)) + .collect() + } + _ => beneficiaries + .into_iter() + .map(|(author, reward_kind)| (author, reward_kind, self.block_reward)) + .collect(), + }; + + block_reward::apply_block_rewards(&rewards, block, &self.machine) + } + + /// Check the number of seal fields. + fn verify_block_basic(&self, header: &Header) -> Result<(), Error> { + if header.number() >= self.validate_score_transition + && *header.difficulty() >= U256::from(U128::max_value()) + { + return Err(From::from(BlockError::DifficultyOutOfBounds(OutOfBounds { + min: None, + max: Some(U256::from(U128::max_value())), + found: *header.difficulty(), + }))); + } + + match verify_timestamp( + &self.step.inner, + header_step(header, self.empty_steps_transition)?, + ) { + Err(BlockError::InvalidSeal) => { + // This check runs in Phase 1 where there is no guarantee that the parent block is + // already imported, therefore the call to `epoch_set` may fail. In that case we + // won't report the misbehavior but this is not a concern because: + // - Only authorities can report and it's expected that they'll be up-to-date and + // importing, therefore the parent header will most likely be available + // - Even if you are an authority that is syncing the chain, the contract will most + // likely ignore old reports + // - This specific check is only relevant if you're importing (since it checks + // against wall clock) + if let Ok((_, set_number)) = self.epoch_set(header) { + self.validators + .report_benign(header.author(), set_number, header.number()); + } + + Err(BlockError::InvalidSeal.into()) + } + Err(e) => Err(e.into()), + Ok(()) => Ok(()), + } + } + + /// Do the step and gas limit validation. + fn verify_block_family(&self, header: &Header, parent: &Header) -> Result<(), Error> { + let step = header_step(header, self.empty_steps_transition)?; + let parent_step = header_step(parent, self.empty_steps_transition)?; + + let (validators, set_number) = self.epoch_set(header)?; + + // Ensure header is from the step after parent. + if step == parent_step + || (header.number() >= self.validate_step_transition && step <= parent_step) + { + trace!(target: "engine", "Multiple blocks proposed for step {}.", parent_step); + + self.validators.report_malicious( + header.author(), + set_number, + header.number(), + Default::default(), + ); + Err(EngineError::DoubleVote(*header.author()))?; + } + + // If empty step messages are enabled we will validate the messages in the seal, missing messages are not + // reported as there's no way to tell whether the empty step message was never sent or simply not included. + let empty_steps_len = if header.number() >= self.empty_steps_transition { + let validate_empty_steps = || -> Result { + let strict_empty_steps = header.number() >= self.strict_empty_steps_transition; + let empty_steps = header_empty_steps(header)?; + let empty_steps_len = empty_steps.len(); + let mut prev_empty_step = 0; + + for empty_step in empty_steps { + if empty_step.step <= parent_step || empty_step.step >= step { + Err(EngineError::InsufficientProof(format!( + "empty step proof for invalid step: {:?}", + empty_step.step + )))?; + } + + if empty_step.parent_hash != *header.parent_hash() { + Err(EngineError::InsufficientProof(format!( + "empty step proof for invalid parent hash: {:?}", + empty_step.parent_hash + )))?; + } + + if !empty_step.verify(&*validators).unwrap_or(false) { + Err(EngineError::InsufficientProof(format!( + "invalid empty step proof: {:?}", + empty_step + )))?; + } + + if strict_empty_steps { + if empty_step.step <= prev_empty_step { + Err(EngineError::InsufficientProof(format!( + "{} empty step: {:?}", + if empty_step.step == prev_empty_step { + "duplicate" + } else { + "unordered" + }, + empty_step + )))?; + } + + prev_empty_step = empty_step.step; + } + } + + Ok(empty_steps_len) + }; + + match validate_empty_steps() { + Ok(len) => len, + Err(err) => { + self.validators + .report_benign(header.author(), set_number, header.number()); + return Err(err); + } + } + } else { + self.report_skipped(header, step, parent_step, &*validators, set_number); + + 0 + }; + + if header.number() >= self.validate_score_transition { + let expected_difficulty = + calculate_score(parent_step.into(), step.into(), empty_steps_len.into()); + if header.difficulty() != &expected_difficulty { + return Err(From::from(BlockError::InvalidDifficulty(Mismatch { + expected: expected_difficulty, + found: header.difficulty().clone(), + }))); + } + } + + Ok(()) + } + + // Check the validators. + fn verify_block_external(&self, header: &Header) -> Result<(), Error> { + let (validators, set_number) = self.epoch_set(header)?; + + // verify signature against fixed list, but reports should go to the + // contract itself. + let res = verify_external(header, &*validators, self.empty_steps_transition); + match res { + Err(Error(ErrorKind::Engine(EngineError::NotProposer(_)), _)) => { + self.validators + .report_benign(header.author(), set_number, header.number()); + } + Ok(_) => { + // we can drop all accumulated empty step messages that are older than this header's step + let header_step = header_step(header, self.empty_steps_transition)?; + self.clear_empty_steps(header_step.into()); + } + _ => {} + } + res + } + + fn genesis_epoch_data(&self, header: &Header, call: &Call) -> Result, String> { + self.validators + .genesis_epoch_data(header, call) + .map(|set_proof| combine_proofs(0, &set_proof, &[])) + } + + fn signals_epoch_end( + &self, + header: &Header, + aux: AuxiliaryData, + ) -> super::EpochChange { + if self.immediate_transitions { + return super::EpochChange::No; + } + + let first = header.number() == 0; + self.validators.signals_epoch_end(first, header, aux) + } + + fn is_epoch_end_light( + &self, + chain_head: &Header, + chain: &super::Headers
, + transition_store: &super::PendingTransitionStore, + ) -> Option> { + // epochs only matter if we want to support light clients. + if self.immediate_transitions { + return None; + } + + let epoch_transition_hash = { + let client = match self.client.read().as_ref().and_then(|weak| weak.upgrade()) { + Some(client) => client, + None => { + warn!(target: "engine", "Unable to check for epoch end: missing client ref."); + return None; + } + }; + + let mut epoch_manager = self.epoch_manager.lock(); + if !epoch_manager.zoom_to(&*client, &self.machine, &*self.validators, chain_head) { + return None; + } + + epoch_manager.epoch_transition_hash + }; + + let mut hash = *chain_head.parent_hash(); + + let mut ancestry = itertools::repeat_call(move || { + chain(hash).and_then(|header| { + if header.number() == 0 { + return None; + } + hash = *header.parent_hash(); + Some(header) + }) + }) + .while_some() + .take_while(|header| header.hash() != epoch_transition_hash); + + let finalized = self.build_finality(chain_head, &mut ancestry); + + self.is_epoch_end(chain_head, &finalized, chain, transition_store) + } + + fn is_epoch_end( + &self, + chain_head: &Header, + finalized: &[H256], + chain: &super::Headers
, + transition_store: &super::PendingTransitionStore, + ) -> Option> { + // epochs only matter if we want to support light clients. + if self.immediate_transitions { + return None; + } + + let first = chain_head.number() == 0; + + // Apply transitions that don't require finality and should be enacted immediately (e.g from chain spec) + if let Some(change) = self.validators.is_epoch_end(first, chain_head) { + info!(target: "engine", "Immediately applying validator set change signalled at block {}", chain_head.number()); + self.epoch_manager.lock().note_new_epoch(); + let change = combine_proofs(chain_head.number(), &change, &[]); + return Some(change); + } + + // check transition store for pending transitions against recently finalized blocks + for finalized_hash in finalized { + if let Some(pending) = transition_store(*finalized_hash) { + // walk the chain backwards from current head until finalized_hash + // to construct transition proof. author == ec_recover(sig) known + // since the blocks are in the DB. + let mut hash = chain_head.hash(); + let mut finality_proof: Vec<_> = itertools::repeat_call(move || { + chain(hash).and_then(|header| { + hash = *header.parent_hash(); + if header.number() == 0 { + None + } else { + Some(header) + } + }) + }) + .while_some() + .take_while(|h| h.hash() != *finalized_hash) + .collect(); + + let finalized_header = if *finalized_hash == chain_head.hash() { + // chain closure only stores ancestry, but the chain head is also unfinalized. + chain_head.clone() + } else { + chain(*finalized_hash).expect( + "header is finalized; finalized headers must exist in the chain; qed", + ) + }; + + let signal_number = finalized_header.number(); + info!(target: "engine", "Applying validator set change signalled at block {}", signal_number); + + finality_proof.push(finalized_header); + finality_proof.reverse(); + + let finality_proof = ::rlp::encode_list(&finality_proof); + + self.epoch_manager.lock().note_new_epoch(); + + // We turn off can_propose here because upon validator set change there can + // be two valid proposers for a single step: one from the old set and + // one from the new. + // + // This way, upon encountering an epoch change, the proposer from the + // new set will be forced to wait until the next step to avoid sealing a + // block that breaks the invariant that the parent's step < the block's step. + self.step.can_propose.store(false, AtomicOrdering::SeqCst); + return Some(combine_proofs( + signal_number, + &pending.proof, + &*finality_proof, + )); + } + } + + None + } + + fn epoch_verifier<'a>( + &self, + _header: &Header, + proof: &'a [u8], + ) -> ConstructedVerifier<'a, EthereumMachine> { + let (signal_number, set_proof, finality_proof) = match destructure_proofs(proof) { + Ok(x) => x, + Err(e) => return ConstructedVerifier::Err(e), + }; + + let first = signal_number == 0; + match self + .validators + .epoch_set(first, &self.machine, signal_number, set_proof) + { + Ok((list, finalize)) => { + let verifier = Box::new(EpochVerifier { + step: self.step.clone(), + subchain_validators: list, + empty_steps_transition: self.empty_steps_transition, + }); + + match finalize { + Some(finalize) => { + ConstructedVerifier::Unconfirmed(verifier, finality_proof, finalize) + } + None => ConstructedVerifier::Trusted(verifier), + } + } + Err(e) => ConstructedVerifier::Err(e), + } + } + + fn register_client(&self, client: Weak) { + *self.client.write() = Some(client.clone()); + self.validators.register_client(client); + } + + fn set_signer(&self, signer: Box) { + *self.signer.write() = Some(signer); + } + + fn sign(&self, hash: H256) -> Result { + Ok(self + .signer + .read() + .as_ref() + .ok_or(ethkey::Error::InvalidAddress)? + .sign(hash)?) + } + + fn snapshot_components(&self) -> Option> { + if self.immediate_transitions { + None + } else { + Some(Box::new(::snapshot::PoaSnapshot)) + } + } + + fn fork_choice(&self, new: &ExtendedHeader, current: &ExtendedHeader) -> super::ForkChoice { + super::total_difficulty_fork_choice(new, current) + } + + fn ancestry_actions( + &self, + header: &Header, + ancestry: &mut Iterator, + ) -> Vec { + let finalized = self.build_finality( + header, + &mut ancestry.take_while(|e| !e.is_finalized).map(|e| e.header), + ); + + if !finalized.is_empty() { + debug!(target: "finality", "Finalizing blocks: {:?}", finalized); + } + + finalized + .into_iter() + .map(AncestryAction::MarkFinalized) + .collect() + } } #[cfg(test)] mod tests { - use std::collections::BTreeMap; - use std::sync::Arc; - use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering}; - use hash::keccak; - use accounts::AccountProvider; - use ethereum_types::{Address, H520, H256, U256}; - use ethkey::Signature; - use types::header::Header; - use rlp::encode; - use block::*; - use test_helpers::{ - generate_dummy_client_with_spec, get_temp_state_db, - TestNotify - }; - use spec::Spec; - use types::transaction::{Action, Transaction}; - use engines::{Seal, Engine, EngineError, EthEngine}; - use engines::validator_set::{TestSet, SimpleList}; - use error::{Error, ErrorKind}; - use super::{AuthorityRoundParams, AuthorityRound, EmptyStep, SealedEmptyStep, calculate_score}; + use super::{ + calculate_score, AuthorityRound, AuthorityRoundParams, EmptyStep, SealedEmptyStep, + }; + use accounts::AccountProvider; + use block::*; + use engines::{ + validator_set::{SimpleList, TestSet}, + Engine, EngineError, EthEngine, Seal, + }; + use error::{Error, ErrorKind}; + use ethereum_types::{Address, H256, H520, U256}; + use ethkey::Signature; + use hash::keccak; + use rlp::encode; + use spec::Spec; + use std::{ + collections::BTreeMap, + sync::{ + atomic::{AtomicUsize, Ordering as AtomicOrdering}, + Arc, + }, + }; + use test_helpers::{generate_dummy_client_with_spec, get_temp_state_db, TestNotify}; + use types::{ + header::Header, + transaction::{Action, Transaction}, + }; - fn aura(f: F) -> Arc where - F: FnOnce(&mut AuthorityRoundParams), - { - let mut params = AuthorityRoundParams { - step_duration: 1, - start_step: Some(1), - validators: Box::new(TestSet::default()), - validate_score_transition: 0, - validate_step_transition: 0, - immediate_transitions: true, - maximum_uncle_count_transition: 0, - maximum_uncle_count: 0, - empty_steps_transition: u64::max_value(), - maximum_empty_steps: 0, - block_reward: Default::default(), - block_reward_contract_transition: 0, - block_reward_contract: Default::default(), - strict_empty_steps_transition: 0, - }; + fn aura(f: F) -> Arc + where + F: FnOnce(&mut AuthorityRoundParams), + { + let mut params = AuthorityRoundParams { + step_duration: 1, + start_step: Some(1), + validators: Box::new(TestSet::default()), + validate_score_transition: 0, + validate_step_transition: 0, + immediate_transitions: true, + maximum_uncle_count_transition: 0, + maximum_uncle_count: 0, + empty_steps_transition: u64::max_value(), + maximum_empty_steps: 0, + block_reward: Default::default(), + block_reward_contract_transition: 0, + block_reward_contract: Default::default(), + strict_empty_steps_transition: 0, + }; - // mutate aura params - f(&mut params); + // mutate aura params + f(&mut params); - // create engine - let mut c_params = ::spec::CommonParams::default(); - c_params.gas_limit_bound_divisor = 5.into(); - let machine = ::machine::EthereumMachine::regular(c_params, Default::default()); - AuthorityRound::new(params, machine).unwrap() - } - - #[test] - fn has_valid_metadata() { - let engine = Spec::new_test_round().engine; - assert!(!engine.name().is_empty()); - } - - #[test] - fn can_return_schedule() { - let engine = Spec::new_test_round().engine; - let schedule = engine.schedule(10000000); - - assert!(schedule.stack_limit > 0); - } - - #[test] - fn can_do_signature_verification_fail() { - let engine = Spec::new_test_round().engine; - let mut header: Header = Header::default(); - header.set_seal(vec![encode(&H520::default())]); - - let verify_result = engine.verify_block_external(&header); - assert!(verify_result.is_err()); - } - - #[test] - fn generates_seal_and_does_not_double_propose() { - let tap = Arc::new(AccountProvider::transient_provider()); - let addr1 = tap.insert_account(keccak("1").into(), &"1".into()).unwrap(); - let addr2 = tap.insert_account(keccak("2").into(), &"2".into()).unwrap(); - - let spec = Spec::new_test_round(); - let engine = &*spec.engine; - let genesis_header = spec.genesis_header(); - let db1 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let db2 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); - let b1 = b1.close_and_lock().unwrap(); - let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes, addr2, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); - let b2 = b2.close_and_lock().unwrap(); - - engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); - if let Seal::Regular(seal) = engine.generate_seal(&b1, &genesis_header) { - assert!(b1.clone().try_seal(engine, seal).is_ok()); - // Second proposal is forbidden. - assert!(engine.generate_seal(&b1, &genesis_header) == Seal::None); - } - - engine.set_signer(Box::new((tap, addr2, "2".into()))); - if let Seal::Regular(seal) = engine.generate_seal(&b2, &genesis_header) { - assert!(b2.clone().try_seal(engine, seal).is_ok()); - // Second proposal is forbidden. - assert!(engine.generate_seal(&b2, &genesis_header) == Seal::None); - } - } - - #[test] - fn checks_difficulty_in_generate_seal() { - let tap = Arc::new(AccountProvider::transient_provider()); - let addr1 = tap.insert_account(keccak("1").into(), &"1".into()).unwrap(); - let addr2 = tap.insert_account(keccak("0").into(), &"0".into()).unwrap(); - - let spec = Spec::new_test_round(); - let engine = &*spec.engine; - - let genesis_header = spec.genesis_header(); - let db1 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let db2 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let last_hashes = Arc::new(vec![genesis_header.hash()]); - - let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); - let b1 = b1.close_and_lock().unwrap(); - let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes, addr2, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); - let b2 = b2.close_and_lock().unwrap(); - - engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); - match engine.generate_seal(&b1, &genesis_header) { - Seal::None | Seal::Proposal(_) => panic!("wrong seal"), - Seal::Regular(_) => { - engine.step(); - - engine.set_signer(Box::new((tap.clone(), addr2, "0".into()))); - match engine.generate_seal(&b2, &genesis_header) { - Seal::Regular(_) | Seal::Proposal(_) => panic!("sealed despite wrong difficulty"), - Seal::None => {} - } - } - } - } - - #[test] - fn proposer_switching() { - let tap = AccountProvider::transient_provider(); - let addr = tap.insert_account(keccak("0").into(), &"0".into()).unwrap(); - let mut parent_header: Header = Header::default(); - parent_header.set_seal(vec![encode(&0usize)]); - parent_header.set_gas_limit("222222".parse::().unwrap()); - let mut header: Header = Header::default(); - header.set_number(1); - header.set_gas_limit("222222".parse::().unwrap()); - header.set_author(addr); - - let engine = Spec::new_test_round().engine; - - // Two validators. - // Spec starts with step 2. - header.set_difficulty(calculate_score(0, 2, 0)); - let signature = tap.sign(addr, Some("0".into()), header.bare_hash()).unwrap(); - header.set_seal(vec![encode(&2usize), encode(&(&*signature as &[u8]))]); - assert!(engine.verify_block_family(&header, &parent_header).is_ok()); - assert!(engine.verify_block_external(&header).is_err()); - header.set_difficulty(calculate_score(0, 1, 0)); - let signature = tap.sign(addr, Some("0".into()), header.bare_hash()).unwrap(); - header.set_seal(vec![encode(&1usize), encode(&(&*signature as &[u8]))]); - assert!(engine.verify_block_family(&header, &parent_header).is_ok()); - assert!(engine.verify_block_external(&header).is_ok()); - } - - #[test] - fn rejects_future_block() { - let tap = AccountProvider::transient_provider(); - let addr = tap.insert_account(keccak("0").into(), &"0".into()).unwrap(); - - let mut parent_header: Header = Header::default(); - parent_header.set_seal(vec![encode(&0usize)]); - parent_header.set_gas_limit("222222".parse::().unwrap()); - let mut header: Header = Header::default(); - header.set_number(1); - header.set_gas_limit("222222".parse::().unwrap()); - header.set_author(addr); - - let engine = Spec::new_test_round().engine; - - // Two validators. - // Spec starts with step 2. - header.set_difficulty(calculate_score(0, 1, 0)); - let signature = tap.sign(addr, Some("0".into()), header.bare_hash()).unwrap(); - header.set_seal(vec![encode(&1usize), encode(&(&*signature as &[u8]))]); - assert!(engine.verify_block_family(&header, &parent_header).is_ok()); - assert!(engine.verify_block_external(&header).is_ok()); - header.set_seal(vec![encode(&5usize), encode(&(&*signature as &[u8]))]); - assert!(engine.verify_block_basic(&header).is_err()); - } - - #[test] - fn rejects_step_backwards() { - let tap = AccountProvider::transient_provider(); - let addr = tap.insert_account(keccak("0").into(), &"0".into()).unwrap(); - - let mut parent_header: Header = Header::default(); - parent_header.set_seal(vec![encode(&4usize)]); - parent_header.set_gas_limit("222222".parse::().unwrap()); - let mut header: Header = Header::default(); - header.set_number(1); - header.set_gas_limit("222222".parse::().unwrap()); - header.set_author(addr); - - let engine = Spec::new_test_round().engine; - - let signature = tap.sign(addr, Some("0".into()), header.bare_hash()).unwrap(); - // Two validators. - // Spec starts with step 2. - header.set_seal(vec![encode(&5usize), encode(&(&*signature as &[u8]))]); - header.set_difficulty(calculate_score(4, 5, 0)); - assert!(engine.verify_block_family(&header, &parent_header).is_ok()); - header.set_seal(vec![encode(&3usize), encode(&(&*signature as &[u8]))]); - header.set_difficulty(calculate_score(4, 3, 0)); - assert!(engine.verify_block_family(&header, &parent_header).is_err()); - } - - #[test] - fn reports_skipped() { - let last_benign = Arc::new(AtomicUsize::new(0)); - let aura = aura(|p| { - p.validators = Box::new(TestSet::new(Default::default(), last_benign.clone())); - }); - - let mut parent_header: Header = Header::default(); - parent_header.set_seal(vec![encode(&1usize)]); - parent_header.set_gas_limit("222222".parse::().unwrap()); - let mut header: Header = Header::default(); - header.set_difficulty(calculate_score(1, 3, 0)); - header.set_gas_limit("222222".parse::().unwrap()); - header.set_seal(vec![encode(&3usize)]); - - // Do not report when signer not present. - assert!(aura.verify_block_family(&header, &parent_header).is_ok()); - assert_eq!(last_benign.load(AtomicOrdering::SeqCst), 0); - - aura.set_signer(Box::new((Arc::new(AccountProvider::transient_provider()), Default::default(), "".into()))); - - // Do not report on steps skipped between genesis and first block. - header.set_number(1); - assert!(aura.verify_block_family(&header, &parent_header).is_ok()); - assert_eq!(last_benign.load(AtomicOrdering::SeqCst), 0); - - // Report on skipped steps otherwise. - header.set_number(2); - assert!(aura.verify_block_family(&header, &parent_header).is_ok()); - assert_eq!(last_benign.load(AtomicOrdering::SeqCst), 2); - } - - #[test] - fn test_uncles_transition() { - let aura = aura(|params| { - params.maximum_uncle_count_transition = 1; - }); - - assert_eq!(aura.maximum_uncle_count(0), 2); - assert_eq!(aura.maximum_uncle_count(1), 0); - assert_eq!(aura.maximum_uncle_count(100), 0); - } + // create engine + let mut c_params = ::spec::CommonParams::default(); + c_params.gas_limit_bound_divisor = 5.into(); + let machine = ::machine::EthereumMachine::regular(c_params, Default::default()); + AuthorityRound::new(params, machine).unwrap() + } #[test] - #[should_panic(expected="counter is too high")] + fn has_valid_metadata() { + let engine = Spec::new_test_round().engine; + assert!(!engine.name().is_empty()); + } + + #[test] + fn can_return_schedule() { + let engine = Spec::new_test_round().engine; + let schedule = engine.schedule(10000000); + + assert!(schedule.stack_limit > 0); + } + + #[test] + fn can_do_signature_verification_fail() { + let engine = Spec::new_test_round().engine; + let mut header: Header = Header::default(); + header.set_seal(vec![encode(&H520::default())]); + + let verify_result = engine.verify_block_external(&header); + assert!(verify_result.is_err()); + } + + #[test] + fn generates_seal_and_does_not_double_propose() { + let tap = Arc::new(AccountProvider::transient_provider()); + let addr1 = tap.insert_account(keccak("1").into(), &"1".into()).unwrap(); + let addr2 = tap.insert_account(keccak("2").into(), &"2".into()).unwrap(); + + let spec = Spec::new_test_round(); + let engine = &*spec.engine; + let genesis_header = spec.genesis_header(); + let db1 = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let db2 = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let last_hashes = Arc::new(vec![genesis_header.hash()]); + let b1 = OpenBlock::new( + engine, + Default::default(), + false, + db1, + &genesis_header, + last_hashes.clone(), + addr1, + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let b1 = b1.close_and_lock().unwrap(); + let b2 = OpenBlock::new( + engine, + Default::default(), + false, + db2, + &genesis_header, + last_hashes, + addr2, + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let b2 = b2.close_and_lock().unwrap(); + + engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); + if let Seal::Regular(seal) = engine.generate_seal(&b1, &genesis_header) { + assert!(b1.clone().try_seal(engine, seal).is_ok()); + // Second proposal is forbidden. + assert!(engine.generate_seal(&b1, &genesis_header) == Seal::None); + } + + engine.set_signer(Box::new((tap, addr2, "2".into()))); + if let Seal::Regular(seal) = engine.generate_seal(&b2, &genesis_header) { + assert!(b2.clone().try_seal(engine, seal).is_ok()); + // Second proposal is forbidden. + assert!(engine.generate_seal(&b2, &genesis_header) == Seal::None); + } + } + + #[test] + fn checks_difficulty_in_generate_seal() { + let tap = Arc::new(AccountProvider::transient_provider()); + let addr1 = tap.insert_account(keccak("1").into(), &"1".into()).unwrap(); + let addr2 = tap.insert_account(keccak("0").into(), &"0".into()).unwrap(); + + let spec = Spec::new_test_round(); + let engine = &*spec.engine; + + let genesis_header = spec.genesis_header(); + let db1 = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let db2 = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let last_hashes = Arc::new(vec![genesis_header.hash()]); + + let b1 = OpenBlock::new( + engine, + Default::default(), + false, + db1, + &genesis_header, + last_hashes.clone(), + addr1, + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let b1 = b1.close_and_lock().unwrap(); + let b2 = OpenBlock::new( + engine, + Default::default(), + false, + db2, + &genesis_header, + last_hashes, + addr2, + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let b2 = b2.close_and_lock().unwrap(); + + engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); + match engine.generate_seal(&b1, &genesis_header) { + Seal::None | Seal::Proposal(_) => panic!("wrong seal"), + Seal::Regular(_) => { + engine.step(); + + engine.set_signer(Box::new((tap.clone(), addr2, "0".into()))); + match engine.generate_seal(&b2, &genesis_header) { + Seal::Regular(_) | Seal::Proposal(_) => { + panic!("sealed despite wrong difficulty") + } + Seal::None => {} + } + } + } + } + + #[test] + fn proposer_switching() { + let tap = AccountProvider::transient_provider(); + let addr = tap.insert_account(keccak("0").into(), &"0".into()).unwrap(); + let mut parent_header: Header = Header::default(); + parent_header.set_seal(vec![encode(&0usize)]); + parent_header.set_gas_limit("222222".parse::().unwrap()); + let mut header: Header = Header::default(); + header.set_number(1); + header.set_gas_limit("222222".parse::().unwrap()); + header.set_author(addr); + + let engine = Spec::new_test_round().engine; + + // Two validators. + // Spec starts with step 2. + header.set_difficulty(calculate_score(0, 2, 0)); + let signature = tap + .sign(addr, Some("0".into()), header.bare_hash()) + .unwrap(); + header.set_seal(vec![encode(&2usize), encode(&(&*signature as &[u8]))]); + assert!(engine.verify_block_family(&header, &parent_header).is_ok()); + assert!(engine.verify_block_external(&header).is_err()); + header.set_difficulty(calculate_score(0, 1, 0)); + let signature = tap + .sign(addr, Some("0".into()), header.bare_hash()) + .unwrap(); + header.set_seal(vec![encode(&1usize), encode(&(&*signature as &[u8]))]); + assert!(engine.verify_block_family(&header, &parent_header).is_ok()); + assert!(engine.verify_block_external(&header).is_ok()); + } + + #[test] + fn rejects_future_block() { + let tap = AccountProvider::transient_provider(); + let addr = tap.insert_account(keccak("0").into(), &"0".into()).unwrap(); + + let mut parent_header: Header = Header::default(); + parent_header.set_seal(vec![encode(&0usize)]); + parent_header.set_gas_limit("222222".parse::().unwrap()); + let mut header: Header = Header::default(); + header.set_number(1); + header.set_gas_limit("222222".parse::().unwrap()); + header.set_author(addr); + + let engine = Spec::new_test_round().engine; + + // Two validators. + // Spec starts with step 2. + header.set_difficulty(calculate_score(0, 1, 0)); + let signature = tap + .sign(addr, Some("0".into()), header.bare_hash()) + .unwrap(); + header.set_seal(vec![encode(&1usize), encode(&(&*signature as &[u8]))]); + assert!(engine.verify_block_family(&header, &parent_header).is_ok()); + assert!(engine.verify_block_external(&header).is_ok()); + header.set_seal(vec![encode(&5usize), encode(&(&*signature as &[u8]))]); + assert!(engine.verify_block_basic(&header).is_err()); + } + + #[test] + fn rejects_step_backwards() { + let tap = AccountProvider::transient_provider(); + let addr = tap.insert_account(keccak("0").into(), &"0".into()).unwrap(); + + let mut parent_header: Header = Header::default(); + parent_header.set_seal(vec![encode(&4usize)]); + parent_header.set_gas_limit("222222".parse::().unwrap()); + let mut header: Header = Header::default(); + header.set_number(1); + header.set_gas_limit("222222".parse::().unwrap()); + header.set_author(addr); + + let engine = Spec::new_test_round().engine; + + let signature = tap + .sign(addr, Some("0".into()), header.bare_hash()) + .unwrap(); + // Two validators. + // Spec starts with step 2. + header.set_seal(vec![encode(&5usize), encode(&(&*signature as &[u8]))]); + header.set_difficulty(calculate_score(4, 5, 0)); + assert!(engine.verify_block_family(&header, &parent_header).is_ok()); + header.set_seal(vec![encode(&3usize), encode(&(&*signature as &[u8]))]); + header.set_difficulty(calculate_score(4, 3, 0)); + assert!(engine.verify_block_family(&header, &parent_header).is_err()); + } + + #[test] + fn reports_skipped() { + let last_benign = Arc::new(AtomicUsize::new(0)); + let aura = aura(|p| { + p.validators = Box::new(TestSet::new(Default::default(), last_benign.clone())); + }); + + let mut parent_header: Header = Header::default(); + parent_header.set_seal(vec![encode(&1usize)]); + parent_header.set_gas_limit("222222".parse::().unwrap()); + let mut header: Header = Header::default(); + header.set_difficulty(calculate_score(1, 3, 0)); + header.set_gas_limit("222222".parse::().unwrap()); + header.set_seal(vec![encode(&3usize)]); + + // Do not report when signer not present. + assert!(aura.verify_block_family(&header, &parent_header).is_ok()); + assert_eq!(last_benign.load(AtomicOrdering::SeqCst), 0); + + aura.set_signer(Box::new(( + Arc::new(AccountProvider::transient_provider()), + Default::default(), + "".into(), + ))); + + // Do not report on steps skipped between genesis and first block. + header.set_number(1); + assert!(aura.verify_block_family(&header, &parent_header).is_ok()); + assert_eq!(last_benign.load(AtomicOrdering::SeqCst), 0); + + // Report on skipped steps otherwise. + header.set_number(2); + assert!(aura.verify_block_family(&header, &parent_header).is_ok()); + assert_eq!(last_benign.load(AtomicOrdering::SeqCst), 2); + } + + #[test] + fn test_uncles_transition() { + let aura = aura(|params| { + params.maximum_uncle_count_transition = 1; + }); + + assert_eq!(aura.maximum_uncle_count(0), 2); + assert_eq!(aura.maximum_uncle_count(1), 0); + assert_eq!(aura.maximum_uncle_count(100), 0); + } + + #[test] + #[should_panic(expected = "counter is too high")] fn test_counter_increment_too_high() { use super::Step; let step = Step { @@ -1824,522 +2137,684 @@ mod tests { duration: 1, }; step.increment(); - } - - #[test] - #[should_panic(expected="counter is too high")] - fn test_counter_duration_remaining_too_high() { - use super::Step; - let step = Step { - calibrate: false, - inner: AtomicUsize::new(::std::usize::MAX), - duration: 1, - }; - step.duration_remaining(); - } - - #[test] - #[should_panic(expected="authority_round: step duration can't be zero")] - fn test_step_duration_zero() { - aura(|params| { - params.step_duration = 0; - }); - } - - fn setup_empty_steps() -> (Spec, Arc, Vec
) { - let spec = Spec::new_test_round_empty_steps(); - let tap = Arc::new(AccountProvider::transient_provider()); - - let addr1 = tap.insert_account(keccak("1").into(), &"1".into()).unwrap(); - let addr2 = tap.insert_account(keccak("0").into(), &"0".into()).unwrap(); - - let accounts = vec![addr1, addr2]; - - (spec, tap, accounts) - } - - fn empty_step(engine: &EthEngine, step: u64, parent_hash: &H256) -> EmptyStep { - let empty_step_rlp = super::empty_step_rlp(step, parent_hash); - let signature = engine.sign(keccak(&empty_step_rlp)).unwrap().into(); - let parent_hash = parent_hash.clone(); - EmptyStep { step, signature, parent_hash } - } - - fn sealed_empty_step(engine: &EthEngine, step: u64, parent_hash: &H256) -> SealedEmptyStep { - let empty_step_rlp = super::empty_step_rlp(step, parent_hash); - let signature = engine.sign(keccak(&empty_step_rlp)).unwrap().into(); - SealedEmptyStep { signature, step } - } - - fn set_empty_steps_seal(header: &mut Header, step: u64, block_signature: ðkey::Signature, empty_steps: &[SealedEmptyStep]) { - header.set_seal(vec![ - encode(&(step as usize)), - encode(&(&**block_signature as &[u8])), - ::rlp::encode_list(&empty_steps), - ]); - } - - fn assert_insufficient_proof(result: Result, contains: &str) { - match result { - Err(Error(ErrorKind::Engine(EngineError::InsufficientProof(ref s)), _)) =>{ - assert!(s.contains(contains), "Expected {:?} to contain {:?}", s, contains); - }, - e => assert!(false, "Unexpected result: {:?}", e), - } - } - - #[test] - fn broadcast_empty_step_message() { - let (spec, tap, accounts) = setup_empty_steps(); - - let addr1 = accounts[0]; - - let engine = &*spec.engine; - let genesis_header = spec.genesis_header(); - let db1 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - - let last_hashes = Arc::new(vec![genesis_header.hash()]); - - let client = generate_dummy_client_with_spec(Spec::new_test_round_empty_steps); - let notify = Arc::new(TestNotify::default()); - client.add_notify(notify.clone()); - engine.register_client(Arc::downgrade(&client) as _); - - engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); - - let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); - let b1 = b1.close_and_lock().unwrap(); - - // the block is empty so we don't seal and instead broadcast an empty step message - assert_eq!(engine.generate_seal(&b1, &genesis_header), Seal::None); - - // spec starts with step 2 - let empty_step_rlp = encode(&empty_step(engine, 2, &genesis_header.hash())); - - // we've received the message - assert!(notify.messages.read().contains(&empty_step_rlp)); - let len = notify.messages.read().len(); - - // make sure that we don't generate empty step for the second time - assert_eq!(engine.generate_seal(&b1, &genesis_header), Seal::None); - assert_eq!(len, notify.messages.read().len()); - } - - #[test] - fn seal_with_empty_steps() { - let (spec, tap, accounts) = setup_empty_steps(); - - let addr1 = accounts[0]; - let addr2 = accounts[1]; - - let engine = &*spec.engine; - let genesis_header = spec.genesis_header(); - let db1 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let db2 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - - let last_hashes = Arc::new(vec![genesis_header.hash()]); - - let client = generate_dummy_client_with_spec(Spec::new_test_round_empty_steps); - let notify = Arc::new(TestNotify::default()); - client.add_notify(notify.clone()); - engine.register_client(Arc::downgrade(&client) as _); - - // step 2 - let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); - let b1 = b1.close_and_lock().unwrap(); - - // since the block is empty it isn't sealed and we generate empty steps - engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); - assert_eq!(engine.generate_seal(&b1, &genesis_header), Seal::None); - engine.step(); - - // step 3 - let mut b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes.clone(), addr2, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); - b2.push_transaction(Transaction { - action: Action::Create, - nonce: U256::from(0), - gas_price: U256::from(3000), - gas: U256::from(53_000), - value: U256::from(1), - data: vec![], - }.fake_sign(addr2), None).unwrap(); - let b2 = b2.close_and_lock().unwrap(); - - // we will now seal a block with 1tx and include the accumulated empty step message - engine.set_signer(Box::new((tap.clone(), addr2, "0".into()))); - if let Seal::Regular(seal) = engine.generate_seal(&b2, &genesis_header) { - engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); - let empty_step2 = sealed_empty_step(engine, 2, &genesis_header.hash()); - let empty_steps = ::rlp::encode_list(&vec![empty_step2]); - - assert_eq!(seal[0], encode(&3usize)); - assert_eq!(seal[2], empty_steps); - } - } - - #[test] - fn seal_empty_block_with_empty_steps() { - let (spec, tap, accounts) = setup_empty_steps(); - - let addr1 = accounts[0]; - let addr2 = accounts[1]; - - let engine = &*spec.engine; - let genesis_header = spec.genesis_header(); - let db1 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let db2 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let db3 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - - let last_hashes = Arc::new(vec![genesis_header.hash()]); - - let client = generate_dummy_client_with_spec(Spec::new_test_round_empty_steps); - let notify = Arc::new(TestNotify::default()); - client.add_notify(notify.clone()); - engine.register_client(Arc::downgrade(&client) as _); - - // step 2 - let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); - let b1 = b1.close_and_lock().unwrap(); - - // since the block is empty it isn't sealed and we generate empty steps - engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); - assert_eq!(engine.generate_seal(&b1, &genesis_header), Seal::None); - engine.step(); - - // step 3 - let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes.clone(), addr2, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); - let b2 = b2.close_and_lock().unwrap(); - engine.set_signer(Box::new((tap.clone(), addr2, "0".into()))); - assert_eq!(engine.generate_seal(&b2, &genesis_header), Seal::None); - engine.step(); - - // step 4 - // the spec sets the maximum_empty_steps to 2 so we will now seal an empty block and include the empty step messages - let b3 = OpenBlock::new(engine, Default::default(), false, db3, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); - let b3 = b3.close_and_lock().unwrap(); - - engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); - if let Seal::Regular(seal) = engine.generate_seal(&b3, &genesis_header) { - let empty_step2 = sealed_empty_step(engine, 2, &genesis_header.hash()); - engine.set_signer(Box::new((tap.clone(), addr2, "0".into()))); - let empty_step3 = sealed_empty_step(engine, 3, &genesis_header.hash()); - - let empty_steps = ::rlp::encode_list(&vec![empty_step2, empty_step3]); - - assert_eq!(seal[0], encode(&4usize)); - assert_eq!(seal[2], empty_steps); - } - } - - #[test] - fn reward_empty_steps() { - let (spec, tap, accounts) = setup_empty_steps(); - - let addr1 = accounts[0]; - - let engine = &*spec.engine; - let genesis_header = spec.genesis_header(); - let db1 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let db2 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - - let last_hashes = Arc::new(vec![genesis_header.hash()]); - - let client = generate_dummy_client_with_spec(Spec::new_test_round_empty_steps); - engine.register_client(Arc::downgrade(&client) as _); - - // step 2 - let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); - let b1 = b1.close_and_lock().unwrap(); - - // since the block is empty it isn't sealed and we generate empty steps - engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); - assert_eq!(engine.generate_seal(&b1, &genesis_header), Seal::None); - engine.step(); - - // step 3 - // the signer of the accumulated empty step message should be rewarded - let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); - let addr1_balance = b2.state.balance(&addr1).unwrap(); - - // after closing the block `addr1` should be reward twice, one for the included empty step message and another for block creation - let b2 = b2.close_and_lock().unwrap(); - - // the spec sets the block reward to 10 - assert_eq!(b2.state.balance(&addr1).unwrap(), addr1_balance + (10 * 2)) - } - - #[test] - fn verify_seal_empty_steps() { - let (spec, tap, accounts) = setup_empty_steps(); - let addr1 = accounts[0]; - let addr2 = accounts[1]; - let engine = &*spec.engine; - - let mut parent_header: Header = Header::default(); - parent_header.set_seal(vec![encode(&0usize)]); - parent_header.set_gas_limit("222222".parse::().unwrap()); - - let mut header: Header = Header::default(); - header.set_parent_hash(parent_header.hash()); - header.set_number(1); - header.set_gas_limit("222222".parse::().unwrap()); - header.set_author(addr1); - - let signature = tap.sign(addr1, Some("1".into()), header.bare_hash()).unwrap(); - - // empty step with invalid step - let empty_steps = vec![SealedEmptyStep { signature: 0.into(), step: 2 }]; - set_empty_steps_seal(&mut header, 2, &signature, &empty_steps); - - assert_insufficient_proof( - engine.verify_block_family(&header, &parent_header), - "invalid step" - ); - - // empty step with invalid signature - let empty_steps = vec![SealedEmptyStep { signature: 0.into(), step: 1 }]; - set_empty_steps_seal(&mut header, 2, &signature, &empty_steps); - - assert_insufficient_proof( - engine.verify_block_family(&header, &parent_header), - "invalid empty step proof" - ); - - // empty step with valid signature from incorrect proposer for step - engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); - let empty_steps = vec![sealed_empty_step(engine, 1, &parent_header.hash())]; - set_empty_steps_seal(&mut header, 2, &signature, &empty_steps); - - assert_insufficient_proof( - engine.verify_block_family(&header, &parent_header), - "invalid empty step proof" - ); - - // valid empty steps - engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); - let empty_step2 = sealed_empty_step(engine, 2, &parent_header.hash()); - engine.set_signer(Box::new((tap.clone(), addr2, "0".into()))); - let empty_step3 = sealed_empty_step(engine, 3, &parent_header.hash()); - - let empty_steps = vec![empty_step2, empty_step3]; - header.set_difficulty(calculate_score(0, 4, 2)); - let signature = tap.sign(addr1, Some("1".into()), header.bare_hash()).unwrap(); - set_empty_steps_seal(&mut header, 4, &signature, &empty_steps); - - assert!(engine.verify_block_family(&header, &parent_header).is_ok()); - } - - #[test] - fn block_reward_contract() { - let spec = Spec::new_test_round_block_reward_contract(); - let tap = Arc::new(AccountProvider::transient_provider()); - - let addr1 = tap.insert_account(keccak("1").into(), &"1".into()).unwrap(); - - let engine = &*spec.engine; - let genesis_header = spec.genesis_header(); - let db1 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let db2 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - - let last_hashes = Arc::new(vec![genesis_header.hash()]); - - let client = generate_dummy_client_with_spec(Spec::new_test_round_block_reward_contract); - engine.register_client(Arc::downgrade(&client) as _); - - // step 2 - let b1 = OpenBlock::new( - engine, - Default::default(), - false, - db1, - &genesis_header, - last_hashes.clone(), - addr1, - (3141562.into(), 31415620.into()), - vec![], - false, - None, - ).unwrap(); - let b1 = b1.close_and_lock().unwrap(); - - // since the block is empty it isn't sealed and we generate empty steps - engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); - assert_eq!(engine.generate_seal(&b1, &genesis_header), Seal::None); - engine.step(); - - // step 3 - // the signer of the accumulated empty step message should be rewarded - let b2 = OpenBlock::new( - engine, - Default::default(), - false, - db2, - &genesis_header, - last_hashes.clone(), - addr1, - (3141562.into(), 31415620.into()), - vec![], - false, - None, - ).unwrap(); - let addr1_balance = b2.state.balance(&addr1).unwrap(); - - // after closing the block `addr1` should be reward twice, one for the included empty step - // message and another for block creation - let b2 = b2.close_and_lock().unwrap(); - - // the contract rewards (1000 + kind) for each benefactor/reward kind - assert_eq!( - b2.state.balance(&addr1).unwrap(), - addr1_balance + (1000 + 0) + (1000 + 2), - ) - } - - #[test] - fn extra_info_from_seal() { - let (spec, tap, accounts) = setup_empty_steps(); - let engine = &*spec.engine; - - let addr1 = accounts[0]; - engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); - - let mut header: Header = Header::default(); - let empty_step = empty_step(engine, 1, &header.parent_hash()); - let sealed_empty_step = empty_step.sealed(); - - header.set_number(2); - header.set_seal(vec![ - encode(&2usize), - encode(&H520::default()), - ::rlp::encode_list(&vec![sealed_empty_step]), - ]); - - let info = engine.extra_info(&header); - - let mut expected = BTreeMap::default(); - expected.insert("step".into(), "2".into()); - expected.insert("signature".into(), Signature::from(H520::default()).to_string()); - expected.insert("emptySteps".into(), format!("[{}]", empty_step)); - - assert_eq!(info, expected); - - header.set_seal(vec![]); - - assert_eq!( - engine.extra_info(&header), - BTreeMap::default(), - ); - } - - #[test] - fn test_empty_steps() { - let engine = aura(|p| { - p.step_duration = 4; - p.empty_steps_transition = 0; - p.maximum_empty_steps = 0; - }); - - let parent_hash: H256 = 1.into(); - let signature = H520::default(); - let step = |step: u64| EmptyStep { - step, - parent_hash, - signature, - }; - - engine.handle_empty_step_message(step(1)); - engine.handle_empty_step_message(step(3)); - engine.handle_empty_step_message(step(2)); - engine.handle_empty_step_message(step(1)); - - assert_eq!(engine.empty_steps(0, 4, parent_hash), vec![step(1), step(2), step(3)]); - assert_eq!(engine.empty_steps(2, 3, parent_hash), vec![]); - assert_eq!(engine.empty_steps(2, 4, parent_hash), vec![step(3)]); - - engine.clear_empty_steps(2); - - assert_eq!(engine.empty_steps(0, 3, parent_hash), vec![]); - assert_eq!(engine.empty_steps(0, 4, parent_hash), vec![step(3)]); - } - - #[test] - fn should_reject_duplicate_empty_steps() { - // given - let (_spec, tap, accounts) = setup_empty_steps(); - let engine = aura(|p| { - p.validators = Box::new(SimpleList::new(accounts.clone())); - p.step_duration = 4; - p.empty_steps_transition = 0; - p.maximum_empty_steps = 0; - }); - - let mut parent = Header::default(); - parent.set_seal(vec![encode(&0usize)]); - - let mut header = Header::default(); - header.set_number(parent.number() + 1); - header.set_parent_hash(parent.hash()); - header.set_author(accounts[0]); - - // when - engine.set_signer(Box::new((tap.clone(), accounts[1], "0".into()))); - let empty_steps = vec![ - sealed_empty_step(&*engine, 1, &parent.hash()), - sealed_empty_step(&*engine, 1, &parent.hash()), - ]; - let step = 2; - let signature = tap.sign(accounts[0], Some("1".into()), header.bare_hash()).unwrap(); - set_empty_steps_seal(&mut header, step, &signature, &empty_steps); - header.set_difficulty(calculate_score(0, step, empty_steps.len())); - - // then - assert_insufficient_proof( - engine.verify_block_family(&header, &parent), - "duplicate empty step" - ); - } - - #[test] - fn should_reject_empty_steps_out_of_order() { - // given - let (_spec, tap, accounts) = setup_empty_steps(); - let engine = aura(|p| { - p.validators = Box::new(SimpleList::new(accounts.clone())); - p.step_duration = 4; - p.empty_steps_transition = 0; - p.maximum_empty_steps = 0; - }); - - let mut parent = Header::default(); - parent.set_seal(vec![encode(&0usize)]); - - let mut header = Header::default(); - header.set_number(parent.number() + 1); - header.set_parent_hash(parent.hash()); - header.set_author(accounts[0]); - - // when - engine.set_signer(Box::new((tap.clone(), accounts[1], "0".into()))); - let es1 = sealed_empty_step(&*engine, 1, &parent.hash()); - engine.set_signer(Box::new((tap.clone(), accounts[0], "1".into()))); - let es2 = sealed_empty_step(&*engine, 2, &parent.hash()); - - let mut empty_steps = vec![es2, es1]; - - let step = 3; - let signature = tap.sign(accounts[1], Some("0".into()), header.bare_hash()).unwrap(); - set_empty_steps_seal(&mut header, step, &signature, &empty_steps); - header.set_difficulty(calculate_score(0, step, empty_steps.len())); - - // then make sure it's rejected because of the order - assert_insufficient_proof( - engine.verify_block_family(&header, &parent), - "unordered empty step" - ); - - // now try to fix the order - empty_steps.reverse(); - set_empty_steps_seal(&mut header, step, &signature, &empty_steps); - assert_eq!(engine.verify_block_family(&header, &parent).unwrap(), ()); - } + } + + #[test] + #[should_panic(expected = "counter is too high")] + fn test_counter_duration_remaining_too_high() { + use super::Step; + let step = Step { + calibrate: false, + inner: AtomicUsize::new(::std::usize::MAX), + duration: 1, + }; + step.duration_remaining(); + } + + #[test] + #[should_panic(expected = "authority_round: step duration can't be zero")] + fn test_step_duration_zero() { + aura(|params| { + params.step_duration = 0; + }); + } + + fn setup_empty_steps() -> (Spec, Arc, Vec
) { + let spec = Spec::new_test_round_empty_steps(); + let tap = Arc::new(AccountProvider::transient_provider()); + + let addr1 = tap.insert_account(keccak("1").into(), &"1".into()).unwrap(); + let addr2 = tap.insert_account(keccak("0").into(), &"0".into()).unwrap(); + + let accounts = vec![addr1, addr2]; + + (spec, tap, accounts) + } + + fn empty_step(engine: &EthEngine, step: u64, parent_hash: &H256) -> EmptyStep { + let empty_step_rlp = super::empty_step_rlp(step, parent_hash); + let signature = engine.sign(keccak(&empty_step_rlp)).unwrap().into(); + let parent_hash = parent_hash.clone(); + EmptyStep { + step, + signature, + parent_hash, + } + } + + fn sealed_empty_step(engine: &EthEngine, step: u64, parent_hash: &H256) -> SealedEmptyStep { + let empty_step_rlp = super::empty_step_rlp(step, parent_hash); + let signature = engine.sign(keccak(&empty_step_rlp)).unwrap().into(); + SealedEmptyStep { signature, step } + } + + fn set_empty_steps_seal( + header: &mut Header, + step: u64, + block_signature: ðkey::Signature, + empty_steps: &[SealedEmptyStep], + ) { + header.set_seal(vec![ + encode(&(step as usize)), + encode(&(&**block_signature as &[u8])), + ::rlp::encode_list(&empty_steps), + ]); + } + + fn assert_insufficient_proof(result: Result, contains: &str) { + match result { + Err(Error(ErrorKind::Engine(EngineError::InsufficientProof(ref s)), _)) => { + assert!( + s.contains(contains), + "Expected {:?} to contain {:?}", + s, + contains + ); + } + e => assert!(false, "Unexpected result: {:?}", e), + } + } + + #[test] + fn broadcast_empty_step_message() { + let (spec, tap, accounts) = setup_empty_steps(); + + let addr1 = accounts[0]; + + let engine = &*spec.engine; + let genesis_header = spec.genesis_header(); + let db1 = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + + let last_hashes = Arc::new(vec![genesis_header.hash()]); + + let client = generate_dummy_client_with_spec(Spec::new_test_round_empty_steps); + let notify = Arc::new(TestNotify::default()); + client.add_notify(notify.clone()); + engine.register_client(Arc::downgrade(&client) as _); + + engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); + + let b1 = OpenBlock::new( + engine, + Default::default(), + false, + db1, + &genesis_header, + last_hashes.clone(), + addr1, + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let b1 = b1.close_and_lock().unwrap(); + + // the block is empty so we don't seal and instead broadcast an empty step message + assert_eq!(engine.generate_seal(&b1, &genesis_header), Seal::None); + + // spec starts with step 2 + let empty_step_rlp = encode(&empty_step(engine, 2, &genesis_header.hash())); + + // we've received the message + assert!(notify.messages.read().contains(&empty_step_rlp)); + let len = notify.messages.read().len(); + + // make sure that we don't generate empty step for the second time + assert_eq!(engine.generate_seal(&b1, &genesis_header), Seal::None); + assert_eq!(len, notify.messages.read().len()); + } + + #[test] + fn seal_with_empty_steps() { + let (spec, tap, accounts) = setup_empty_steps(); + + let addr1 = accounts[0]; + let addr2 = accounts[1]; + + let engine = &*spec.engine; + let genesis_header = spec.genesis_header(); + let db1 = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let db2 = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + + let last_hashes = Arc::new(vec![genesis_header.hash()]); + + let client = generate_dummy_client_with_spec(Spec::new_test_round_empty_steps); + let notify = Arc::new(TestNotify::default()); + client.add_notify(notify.clone()); + engine.register_client(Arc::downgrade(&client) as _); + + // step 2 + let b1 = OpenBlock::new( + engine, + Default::default(), + false, + db1, + &genesis_header, + last_hashes.clone(), + addr1, + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let b1 = b1.close_and_lock().unwrap(); + + // since the block is empty it isn't sealed and we generate empty steps + engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); + assert_eq!(engine.generate_seal(&b1, &genesis_header), Seal::None); + engine.step(); + + // step 3 + let mut b2 = OpenBlock::new( + engine, + Default::default(), + false, + db2, + &genesis_header, + last_hashes.clone(), + addr2, + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + b2.push_transaction( + Transaction { + action: Action::Create, + nonce: U256::from(0), + gas_price: U256::from(3000), + gas: U256::from(53_000), + value: U256::from(1), + data: vec![], + } + .fake_sign(addr2), + None, + ) + .unwrap(); + let b2 = b2.close_and_lock().unwrap(); + + // we will now seal a block with 1tx and include the accumulated empty step message + engine.set_signer(Box::new((tap.clone(), addr2, "0".into()))); + if let Seal::Regular(seal) = engine.generate_seal(&b2, &genesis_header) { + engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); + let empty_step2 = sealed_empty_step(engine, 2, &genesis_header.hash()); + let empty_steps = ::rlp::encode_list(&vec![empty_step2]); + + assert_eq!(seal[0], encode(&3usize)); + assert_eq!(seal[2], empty_steps); + } + } + + #[test] + fn seal_empty_block_with_empty_steps() { + let (spec, tap, accounts) = setup_empty_steps(); + + let addr1 = accounts[0]; + let addr2 = accounts[1]; + + let engine = &*spec.engine; + let genesis_header = spec.genesis_header(); + let db1 = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let db2 = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let db3 = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + + let last_hashes = Arc::new(vec![genesis_header.hash()]); + + let client = generate_dummy_client_with_spec(Spec::new_test_round_empty_steps); + let notify = Arc::new(TestNotify::default()); + client.add_notify(notify.clone()); + engine.register_client(Arc::downgrade(&client) as _); + + // step 2 + let b1 = OpenBlock::new( + engine, + Default::default(), + false, + db1, + &genesis_header, + last_hashes.clone(), + addr1, + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let b1 = b1.close_and_lock().unwrap(); + + // since the block is empty it isn't sealed and we generate empty steps + engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); + assert_eq!(engine.generate_seal(&b1, &genesis_header), Seal::None); + engine.step(); + + // step 3 + let b2 = OpenBlock::new( + engine, + Default::default(), + false, + db2, + &genesis_header, + last_hashes.clone(), + addr2, + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let b2 = b2.close_and_lock().unwrap(); + engine.set_signer(Box::new((tap.clone(), addr2, "0".into()))); + assert_eq!(engine.generate_seal(&b2, &genesis_header), Seal::None); + engine.step(); + + // step 4 + // the spec sets the maximum_empty_steps to 2 so we will now seal an empty block and include the empty step messages + let b3 = OpenBlock::new( + engine, + Default::default(), + false, + db3, + &genesis_header, + last_hashes.clone(), + addr1, + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let b3 = b3.close_and_lock().unwrap(); + + engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); + if let Seal::Regular(seal) = engine.generate_seal(&b3, &genesis_header) { + let empty_step2 = sealed_empty_step(engine, 2, &genesis_header.hash()); + engine.set_signer(Box::new((tap.clone(), addr2, "0".into()))); + let empty_step3 = sealed_empty_step(engine, 3, &genesis_header.hash()); + + let empty_steps = ::rlp::encode_list(&vec![empty_step2, empty_step3]); + + assert_eq!(seal[0], encode(&4usize)); + assert_eq!(seal[2], empty_steps); + } + } + + #[test] + fn reward_empty_steps() { + let (spec, tap, accounts) = setup_empty_steps(); + + let addr1 = accounts[0]; + + let engine = &*spec.engine; + let genesis_header = spec.genesis_header(); + let db1 = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let db2 = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + + let last_hashes = Arc::new(vec![genesis_header.hash()]); + + let client = generate_dummy_client_with_spec(Spec::new_test_round_empty_steps); + engine.register_client(Arc::downgrade(&client) as _); + + // step 2 + let b1 = OpenBlock::new( + engine, + Default::default(), + false, + db1, + &genesis_header, + last_hashes.clone(), + addr1, + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let b1 = b1.close_and_lock().unwrap(); + + // since the block is empty it isn't sealed and we generate empty steps + engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); + assert_eq!(engine.generate_seal(&b1, &genesis_header), Seal::None); + engine.step(); + + // step 3 + // the signer of the accumulated empty step message should be rewarded + let b2 = OpenBlock::new( + engine, + Default::default(), + false, + db2, + &genesis_header, + last_hashes.clone(), + addr1, + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let addr1_balance = b2.state.balance(&addr1).unwrap(); + + // after closing the block `addr1` should be reward twice, one for the included empty step message and another for block creation + let b2 = b2.close_and_lock().unwrap(); + + // the spec sets the block reward to 10 + assert_eq!(b2.state.balance(&addr1).unwrap(), addr1_balance + (10 * 2)) + } + + #[test] + fn verify_seal_empty_steps() { + let (spec, tap, accounts) = setup_empty_steps(); + let addr1 = accounts[0]; + let addr2 = accounts[1]; + let engine = &*spec.engine; + + let mut parent_header: Header = Header::default(); + parent_header.set_seal(vec![encode(&0usize)]); + parent_header.set_gas_limit("222222".parse::().unwrap()); + + let mut header: Header = Header::default(); + header.set_parent_hash(parent_header.hash()); + header.set_number(1); + header.set_gas_limit("222222".parse::().unwrap()); + header.set_author(addr1); + + let signature = tap + .sign(addr1, Some("1".into()), header.bare_hash()) + .unwrap(); + + // empty step with invalid step + let empty_steps = vec![SealedEmptyStep { + signature: 0.into(), + step: 2, + }]; + set_empty_steps_seal(&mut header, 2, &signature, &empty_steps); + + assert_insufficient_proof( + engine.verify_block_family(&header, &parent_header), + "invalid step", + ); + + // empty step with invalid signature + let empty_steps = vec![SealedEmptyStep { + signature: 0.into(), + step: 1, + }]; + set_empty_steps_seal(&mut header, 2, &signature, &empty_steps); + + assert_insufficient_proof( + engine.verify_block_family(&header, &parent_header), + "invalid empty step proof", + ); + + // empty step with valid signature from incorrect proposer for step + engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); + let empty_steps = vec![sealed_empty_step(engine, 1, &parent_header.hash())]; + set_empty_steps_seal(&mut header, 2, &signature, &empty_steps); + + assert_insufficient_proof( + engine.verify_block_family(&header, &parent_header), + "invalid empty step proof", + ); + + // valid empty steps + engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); + let empty_step2 = sealed_empty_step(engine, 2, &parent_header.hash()); + engine.set_signer(Box::new((tap.clone(), addr2, "0".into()))); + let empty_step3 = sealed_empty_step(engine, 3, &parent_header.hash()); + + let empty_steps = vec![empty_step2, empty_step3]; + header.set_difficulty(calculate_score(0, 4, 2)); + let signature = tap + .sign(addr1, Some("1".into()), header.bare_hash()) + .unwrap(); + set_empty_steps_seal(&mut header, 4, &signature, &empty_steps); + + assert!(engine.verify_block_family(&header, &parent_header).is_ok()); + } + + #[test] + fn block_reward_contract() { + let spec = Spec::new_test_round_block_reward_contract(); + let tap = Arc::new(AccountProvider::transient_provider()); + + let addr1 = tap.insert_account(keccak("1").into(), &"1".into()).unwrap(); + + let engine = &*spec.engine; + let genesis_header = spec.genesis_header(); + let db1 = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let db2 = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + + let last_hashes = Arc::new(vec![genesis_header.hash()]); + + let client = generate_dummy_client_with_spec(Spec::new_test_round_block_reward_contract); + engine.register_client(Arc::downgrade(&client) as _); + + // step 2 + let b1 = OpenBlock::new( + engine, + Default::default(), + false, + db1, + &genesis_header, + last_hashes.clone(), + addr1, + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let b1 = b1.close_and_lock().unwrap(); + + // since the block is empty it isn't sealed and we generate empty steps + engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); + assert_eq!(engine.generate_seal(&b1, &genesis_header), Seal::None); + engine.step(); + + // step 3 + // the signer of the accumulated empty step message should be rewarded + let b2 = OpenBlock::new( + engine, + Default::default(), + false, + db2, + &genesis_header, + last_hashes.clone(), + addr1, + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let addr1_balance = b2.state.balance(&addr1).unwrap(); + + // after closing the block `addr1` should be reward twice, one for the included empty step + // message and another for block creation + let b2 = b2.close_and_lock().unwrap(); + + // the contract rewards (1000 + kind) for each benefactor/reward kind + assert_eq!( + b2.state.balance(&addr1).unwrap(), + addr1_balance + (1000 + 0) + (1000 + 2), + ) + } + + #[test] + fn extra_info_from_seal() { + let (spec, tap, accounts) = setup_empty_steps(); + let engine = &*spec.engine; + + let addr1 = accounts[0]; + engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); + + let mut header: Header = Header::default(); + let empty_step = empty_step(engine, 1, &header.parent_hash()); + let sealed_empty_step = empty_step.sealed(); + + header.set_number(2); + header.set_seal(vec![ + encode(&2usize), + encode(&H520::default()), + ::rlp::encode_list(&vec![sealed_empty_step]), + ]); + + let info = engine.extra_info(&header); + + let mut expected = BTreeMap::default(); + expected.insert("step".into(), "2".into()); + expected.insert( + "signature".into(), + Signature::from(H520::default()).to_string(), + ); + expected.insert("emptySteps".into(), format!("[{}]", empty_step)); + + assert_eq!(info, expected); + + header.set_seal(vec![]); + + assert_eq!(engine.extra_info(&header), BTreeMap::default(),); + } + + #[test] + fn test_empty_steps() { + let engine = aura(|p| { + p.step_duration = 4; + p.empty_steps_transition = 0; + p.maximum_empty_steps = 0; + }); + + let parent_hash: H256 = 1.into(); + let signature = H520::default(); + let step = |step: u64| EmptyStep { + step, + parent_hash, + signature, + }; + + engine.handle_empty_step_message(step(1)); + engine.handle_empty_step_message(step(3)); + engine.handle_empty_step_message(step(2)); + engine.handle_empty_step_message(step(1)); + + assert_eq!( + engine.empty_steps(0, 4, parent_hash), + vec![step(1), step(2), step(3)] + ); + assert_eq!(engine.empty_steps(2, 3, parent_hash), vec![]); + assert_eq!(engine.empty_steps(2, 4, parent_hash), vec![step(3)]); + + engine.clear_empty_steps(2); + + assert_eq!(engine.empty_steps(0, 3, parent_hash), vec![]); + assert_eq!(engine.empty_steps(0, 4, parent_hash), vec![step(3)]); + } + + #[test] + fn should_reject_duplicate_empty_steps() { + // given + let (_spec, tap, accounts) = setup_empty_steps(); + let engine = aura(|p| { + p.validators = Box::new(SimpleList::new(accounts.clone())); + p.step_duration = 4; + p.empty_steps_transition = 0; + p.maximum_empty_steps = 0; + }); + + let mut parent = Header::default(); + parent.set_seal(vec![encode(&0usize)]); + + let mut header = Header::default(); + header.set_number(parent.number() + 1); + header.set_parent_hash(parent.hash()); + header.set_author(accounts[0]); + + // when + engine.set_signer(Box::new((tap.clone(), accounts[1], "0".into()))); + let empty_steps = vec![ + sealed_empty_step(&*engine, 1, &parent.hash()), + sealed_empty_step(&*engine, 1, &parent.hash()), + ]; + let step = 2; + let signature = tap + .sign(accounts[0], Some("1".into()), header.bare_hash()) + .unwrap(); + set_empty_steps_seal(&mut header, step, &signature, &empty_steps); + header.set_difficulty(calculate_score(0, step, empty_steps.len())); + + // then + assert_insufficient_proof( + engine.verify_block_family(&header, &parent), + "duplicate empty step", + ); + } + + #[test] + fn should_reject_empty_steps_out_of_order() { + // given + let (_spec, tap, accounts) = setup_empty_steps(); + let engine = aura(|p| { + p.validators = Box::new(SimpleList::new(accounts.clone())); + p.step_duration = 4; + p.empty_steps_transition = 0; + p.maximum_empty_steps = 0; + }); + + let mut parent = Header::default(); + parent.set_seal(vec![encode(&0usize)]); + + let mut header = Header::default(); + header.set_number(parent.number() + 1); + header.set_parent_hash(parent.hash()); + header.set_author(accounts[0]); + + // when + engine.set_signer(Box::new((tap.clone(), accounts[1], "0".into()))); + let es1 = sealed_empty_step(&*engine, 1, &parent.hash()); + engine.set_signer(Box::new((tap.clone(), accounts[0], "1".into()))); + let es2 = sealed_empty_step(&*engine, 2, &parent.hash()); + + let mut empty_steps = vec![es2, es1]; + + let step = 3; + let signature = tap + .sign(accounts[1], Some("0".into()), header.bare_hash()) + .unwrap(); + set_empty_steps_seal(&mut header, step, &signature, &empty_steps); + header.set_difficulty(calculate_score(0, step, empty_steps.len())); + + // then make sure it's rejected because of the order + assert_insufficient_proof( + engine.verify_block_family(&header, &parent), + "unordered empty step", + ); + + // now try to fix the order + empty_steps.reverse(); + set_empty_steps_seal(&mut header, step, &signature, &empty_steps); + assert_eq!(engine.verify_block_family(&header, &parent).unwrap(), ()); + } } diff --git a/ethcore/src/engines/basic_authority.rs b/ethcore/src/engines/basic_authority.rs index 69b8d07c5..e54affdf7 100644 --- a/ethcore/src/engines/basic_authority.rs +++ b/ethcore/src/engines/basic_authority.rs @@ -16,269 +16,301 @@ //! A blockchain engine that supports a basic, non-BFT proof-of-authority. -use std::sync::Weak; -use ethereum_types::{H256, H520}; -use parking_lot::RwLock; -use ethkey::{self, Signature}; +use super::validator_set::{new_validator_set, SimpleList, ValidatorSet}; use block::*; -use engines::{Engine, Seal, ConstructedVerifier, EngineError}; -use engines::signer::EngineSigner; -use error::{BlockError, Error}; -use ethjson; use client::EngineClient; +use engines::{signer::EngineSigner, ConstructedVerifier, Engine, EngineError, Seal}; +use error::{BlockError, Error}; +use ethereum_types::{H256, H520}; +use ethjson; +use ethkey::{self, Signature}; use machine::{AuxiliaryData, Call, EthereumMachine}; -use types::header::{Header, ExtendedHeader}; -use super::validator_set::{ValidatorSet, SimpleList, new_validator_set}; +use parking_lot::RwLock; +use std::sync::Weak; +use types::header::{ExtendedHeader, Header}; /// `BasicAuthority` params. #[derive(Debug, PartialEq)] pub struct BasicAuthorityParams { - /// Valid signatories. - pub validators: ethjson::spec::ValidatorSet, + /// Valid signatories. + pub validators: ethjson::spec::ValidatorSet, } impl From for BasicAuthorityParams { - fn from(p: ethjson::spec::BasicAuthorityParams) -> Self { - BasicAuthorityParams { - validators: p.validators, - } - } + fn from(p: ethjson::spec::BasicAuthorityParams) -> Self { + BasicAuthorityParams { + validators: p.validators, + } + } } struct EpochVerifier { - list: SimpleList, + list: SimpleList, } impl super::EpochVerifier for EpochVerifier { - fn verify_light(&self, header: &Header) -> Result<(), Error> { - verify_external(header, &self.list) - } + fn verify_light(&self, header: &Header) -> Result<(), Error> { + verify_external(header, &self.list) + } } fn verify_external(header: &Header, validators: &ValidatorSet) -> Result<(), Error> { - use rlp::Rlp; + use rlp::Rlp; - // Check if the signature belongs to a validator, can depend on parent state. - let sig = Rlp::new(&header.seal()[0]).as_val::()?; - let signer = ethkey::public_to_address(ðkey::recover(&sig.into(), &header.bare_hash())?); + // Check if the signature belongs to a validator, can depend on parent state. + let sig = Rlp::new(&header.seal()[0]).as_val::()?; + let signer = ethkey::public_to_address(ðkey::recover(&sig.into(), &header.bare_hash())?); - if *header.author() != signer { - return Err(EngineError::NotAuthorized(*header.author()).into()) - } + if *header.author() != signer { + return Err(EngineError::NotAuthorized(*header.author()).into()); + } - match validators.contains(header.parent_hash(), &signer) { - false => Err(BlockError::InvalidSeal.into()), - true => Ok(()) - } + match validators.contains(header.parent_hash(), &signer) { + false => Err(BlockError::InvalidSeal.into()), + true => Ok(()), + } } /// Engine using `BasicAuthority`, trivial proof-of-authority consensus. pub struct BasicAuthority { - machine: EthereumMachine, - signer: RwLock>>, - validators: Box, + machine: EthereumMachine, + signer: RwLock>>, + validators: Box, } impl BasicAuthority { - /// Create a new instance of BasicAuthority engine - pub fn new(our_params: BasicAuthorityParams, machine: EthereumMachine) -> Self { - BasicAuthority { - machine: machine, - signer: RwLock::new(None), - validators: new_validator_set(our_params.validators), - } - } + /// Create a new instance of BasicAuthority engine + pub fn new(our_params: BasicAuthorityParams, machine: EthereumMachine) -> Self { + BasicAuthority { + machine: machine, + signer: RwLock::new(None), + validators: new_validator_set(our_params.validators), + } + } } impl Engine for BasicAuthority { - fn name(&self) -> &str { "BasicAuthority" } + fn name(&self) -> &str { + "BasicAuthority" + } - fn machine(&self) -> &EthereumMachine { &self.machine } + fn machine(&self) -> &EthereumMachine { + &self.machine + } - // One field - the signature - fn seal_fields(&self, _header: &Header) -> usize { 1 } + // One field - the signature + fn seal_fields(&self, _header: &Header) -> usize { + 1 + } - fn seals_internally(&self) -> Option { - Some(self.signer.read().is_some()) - } + fn seals_internally(&self) -> Option { + Some(self.signer.read().is_some()) + } - /// Attempt to seal the block internally. - fn generate_seal(&self, block: &ExecutedBlock, _parent: &Header) -> Seal { - let header = &block.header; - let author = header.author(); - if self.validators.contains(header.parent_hash(), author) { - // account should be pernamently unlocked, otherwise sealing will fail - if let Ok(signature) = self.sign(header.bare_hash()) { - return Seal::Regular(vec![::rlp::encode(&(&H520::from(signature) as &[u8]))]); - } else { - trace!(target: "basicauthority", "generate_seal: FAIL: accounts secret key unavailable"); - } - } - Seal::None - } + /// Attempt to seal the block internally. + fn generate_seal(&self, block: &ExecutedBlock, _parent: &Header) -> Seal { + let header = &block.header; + let author = header.author(); + if self.validators.contains(header.parent_hash(), author) { + // account should be pernamently unlocked, otherwise sealing will fail + if let Ok(signature) = self.sign(header.bare_hash()) { + return Seal::Regular(vec![::rlp::encode(&(&H520::from(signature) as &[u8]))]); + } else { + trace!(target: "basicauthority", "generate_seal: FAIL: accounts secret key unavailable"); + } + } + Seal::None + } - fn verify_local_seal(&self, _header: &Header) -> Result<(), Error> { - Ok(()) - } + fn verify_local_seal(&self, _header: &Header) -> Result<(), Error> { + Ok(()) + } - fn verify_block_external(&self, header: &Header) -> Result<(), Error> { - verify_external(header, &*self.validators) - } + fn verify_block_external(&self, header: &Header) -> Result<(), Error> { + verify_external(header, &*self.validators) + } - fn genesis_epoch_data(&self, header: &Header, call: &Call) -> Result, String> { - self.validators.genesis_epoch_data(header, call) - } + fn genesis_epoch_data(&self, header: &Header, call: &Call) -> Result, String> { + self.validators.genesis_epoch_data(header, call) + } - #[cfg(not(test))] - fn signals_epoch_end(&self, _header: &Header, _auxiliary: AuxiliaryData) - -> super::EpochChange - { - // don't bother signalling even though a contract might try. - super::EpochChange::No - } + #[cfg(not(test))] + fn signals_epoch_end( + &self, + _header: &Header, + _auxiliary: AuxiliaryData, + ) -> super::EpochChange { + // don't bother signalling even though a contract might try. + super::EpochChange::No + } - #[cfg(test)] - fn signals_epoch_end(&self, header: &Header, auxiliary: AuxiliaryData) - -> super::EpochChange - { - // in test mode, always signal even though they don't be finalized. - let first = header.number() == 0; - self.validators.signals_epoch_end(first, header, auxiliary) - } + #[cfg(test)] + fn signals_epoch_end( + &self, + header: &Header, + auxiliary: AuxiliaryData, + ) -> super::EpochChange { + // in test mode, always signal even though they don't be finalized. + let first = header.number() == 0; + self.validators.signals_epoch_end(first, header, auxiliary) + } - fn is_epoch_end( - &self, - chain_head: &Header, - _finalized: &[H256], - _chain: &super::Headers
, - _transition_store: &super::PendingTransitionStore, - ) -> Option> { - let first = chain_head.number() == 0; + fn is_epoch_end( + &self, + chain_head: &Header, + _finalized: &[H256], + _chain: &super::Headers
, + _transition_store: &super::PendingTransitionStore, + ) -> Option> { + let first = chain_head.number() == 0; - // finality never occurs so only apply immediate transitions. - self.validators.is_epoch_end(first, chain_head) - } + // finality never occurs so only apply immediate transitions. + self.validators.is_epoch_end(first, chain_head) + } - fn is_epoch_end_light( - &self, - chain_head: &Header, - chain: &super::Headers
, - transition_store: &super::PendingTransitionStore, - ) -> Option> { - self.is_epoch_end(chain_head, &[], chain, transition_store) - } + fn is_epoch_end_light( + &self, + chain_head: &Header, + chain: &super::Headers
, + transition_store: &super::PendingTransitionStore, + ) -> Option> { + self.is_epoch_end(chain_head, &[], chain, transition_store) + } - fn epoch_verifier<'a>(&self, header: &Header, proof: &'a [u8]) -> ConstructedVerifier<'a, EthereumMachine> { - let first = header.number() == 0; + fn epoch_verifier<'a>( + &self, + header: &Header, + proof: &'a [u8], + ) -> ConstructedVerifier<'a, EthereumMachine> { + let first = header.number() == 0; - match self.validators.epoch_set(first, &self.machine, header.number(), proof) { - Ok((list, finalize)) => { - let verifier = Box::new(EpochVerifier { list: list }); + match self + .validators + .epoch_set(first, &self.machine, header.number(), proof) + { + Ok((list, finalize)) => { + let verifier = Box::new(EpochVerifier { list: list }); - // our epoch verifier will ensure no unverified verifier is ever verified. - match finalize { - Some(finalize) => ConstructedVerifier::Unconfirmed(verifier, proof, finalize), - None => ConstructedVerifier::Trusted(verifier), - } - } - Err(e) => ConstructedVerifier::Err(e), - } - } + // our epoch verifier will ensure no unverified verifier is ever verified. + match finalize { + Some(finalize) => ConstructedVerifier::Unconfirmed(verifier, proof, finalize), + None => ConstructedVerifier::Trusted(verifier), + } + } + Err(e) => ConstructedVerifier::Err(e), + } + } - fn register_client(&self, client: Weak) { - self.validators.register_client(client); - } + fn register_client(&self, client: Weak) { + self.validators.register_client(client); + } - fn set_signer(&self, signer: Box) { - *self.signer.write() = Some(signer); - } + fn set_signer(&self, signer: Box) { + *self.signer.write() = Some(signer); + } - fn sign(&self, hash: H256) -> Result { - Ok(self.signer.read() - .as_ref() - .ok_or_else(|| ethkey::Error::InvalidAddress)? - .sign(hash)? - ) - } + fn sign(&self, hash: H256) -> Result { + Ok(self + .signer + .read() + .as_ref() + .ok_or_else(|| ethkey::Error::InvalidAddress)? + .sign(hash)?) + } - fn snapshot_components(&self) -> Option> { - None - } + fn snapshot_components(&self) -> Option> { + None + } - fn fork_choice(&self, new: &ExtendedHeader, current: &ExtendedHeader) -> super::ForkChoice { - super::total_difficulty_fork_choice(new, current) - } + fn fork_choice(&self, new: &ExtendedHeader, current: &ExtendedHeader) -> super::ForkChoice { + super::total_difficulty_fork_choice(new, current) + } } #[cfg(test)] mod tests { - use std::sync::Arc; - use hash::keccak; - use ethereum_types::H520; - use block::*; - use test_helpers::get_temp_state_db; - use accounts::AccountProvider; - use types::header::Header; - use spec::Spec; - use engines::Seal; - use tempdir::TempDir; + use accounts::AccountProvider; + use block::*; + use engines::Seal; + use ethereum_types::H520; + use hash::keccak; + use spec::Spec; + use std::sync::Arc; + use tempdir::TempDir; + use test_helpers::get_temp_state_db; + use types::header::Header; - /// Create a new test chain spec with `BasicAuthority` consensus engine. - fn new_test_authority() -> Spec { - let bytes: &[u8] = include_bytes!("../../res/basic_authority.json"); - let tempdir = TempDir::new("").unwrap(); - Spec::load(&tempdir.path(), bytes).expect("invalid chain spec") - } + /// Create a new test chain spec with `BasicAuthority` consensus engine. + fn new_test_authority() -> Spec { + let bytes: &[u8] = include_bytes!("../../res/basic_authority.json"); + let tempdir = TempDir::new("").unwrap(); + Spec::load(&tempdir.path(), bytes).expect("invalid chain spec") + } - #[test] - fn has_valid_metadata() { - let engine = new_test_authority().engine; - assert!(!engine.name().is_empty()); - } + #[test] + fn has_valid_metadata() { + let engine = new_test_authority().engine; + assert!(!engine.name().is_empty()); + } - #[test] - fn can_return_schedule() { - let engine = new_test_authority().engine; - let schedule = engine.schedule(10000000); - assert!(schedule.stack_limit > 0); - } + #[test] + fn can_return_schedule() { + let engine = new_test_authority().engine; + let schedule = engine.schedule(10000000); + assert!(schedule.stack_limit > 0); + } - #[test] - fn can_do_signature_verification_fail() { - let engine = new_test_authority().engine; - let mut header: Header = Header::default(); - header.set_seal(vec![::rlp::encode(&H520::default())]); + #[test] + fn can_do_signature_verification_fail() { + let engine = new_test_authority().engine; + let mut header: Header = Header::default(); + header.set_seal(vec![::rlp::encode(&H520::default())]); - let verify_result = engine.verify_block_external(&header); - assert!(verify_result.is_err()); - } + let verify_result = engine.verify_block_external(&header); + assert!(verify_result.is_err()); + } - #[test] - fn can_generate_seal() { - let tap = AccountProvider::transient_provider(); - let addr = tap.insert_account(keccak("").into(), &"".into()).unwrap(); + #[test] + fn can_generate_seal() { + let tap = AccountProvider::transient_provider(); + let addr = tap.insert_account(keccak("").into(), &"".into()).unwrap(); - let spec = new_test_authority(); - let engine = &*spec.engine; - engine.set_signer(Box::new((Arc::new(tap), addr, "".into()))); - let genesis_header = spec.genesis_header(); - let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); - let b = b.close_and_lock().unwrap(); - if let Seal::Regular(seal) = engine.generate_seal(&b, &genesis_header) { - assert!(b.try_seal(engine, seal).is_ok()); - } - } + let spec = new_test_authority(); + let engine = &*spec.engine; + engine.set_signer(Box::new((Arc::new(tap), addr, "".into()))); + let genesis_header = spec.genesis_header(); + let db = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let last_hashes = Arc::new(vec![genesis_header.hash()]); + let b = OpenBlock::new( + engine, + Default::default(), + false, + db, + &genesis_header, + last_hashes, + addr, + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let b = b.close_and_lock().unwrap(); + if let Seal::Regular(seal) = engine.generate_seal(&b, &genesis_header) { + assert!(b.try_seal(engine, seal).is_ok()); + } + } - #[test] - fn seals_internally() { - let tap = AccountProvider::transient_provider(); - let authority = tap.insert_account(keccak("").into(), &"".into()).unwrap(); + #[test] + fn seals_internally() { + let tap = AccountProvider::transient_provider(); + let authority = tap.insert_account(keccak("").into(), &"".into()).unwrap(); - let engine = new_test_authority().engine; - assert!(!engine.seals_internally().unwrap()); - engine.set_signer(Box::new((Arc::new(tap), authority, "".into()))); - assert!(engine.seals_internally().unwrap()); - } + let engine = new_test_authority().engine; + assert!(!engine.seals_internally().unwrap()); + engine.set_signer(Box::new((Arc::new(tap), authority, "".into()))); + assert!(engine.seals_internally().unwrap()); + } } diff --git a/ethcore/src/engines/block_reward.rs b/ethcore/src/engines/block_reward.rs index 58b55408e..911f888cd 100644 --- a/ethcore/src/engines/block_reward.rs +++ b/ethcore/src/engines/block_reward.rs @@ -17,19 +17,17 @@ //! A module with types for declaring block rewards and a client interface for interacting with a //! block reward contract. -use ethabi; -use ethabi::ParamType; -use ethereum_types::{H160, Address, U256}; +use ethabi::{self, ParamType}; +use ethereum_types::{Address, H160, U256}; -use std::sync::Arc; -use hash::keccak; -use error::Error; -use machine::Machine; -use trace; -use types::BlockNumber; use super::{SystemOrCodeCall, SystemOrCodeCallKind}; -use trace::{Tracer, ExecutiveTracer, Tracing}; use block::ExecutedBlock; +use error::Error; +use hash::keccak; +use machine::Machine; +use std::sync::Arc; +use trace::{self, ExecutiveTracer, Tracer, Tracing}; +use types::BlockNumber; use_contract!(block_reward_contract, "res/contracts/block_reward.json"); @@ -38,203 +36,245 @@ use_contract!(block_reward_contract, "res/contracts/block_reward.json"); /// different semantics which could lead e.g. to different reward values. #[derive(PartialEq, Eq, Clone, Copy, Debug)] pub enum RewardKind { - /// Reward attributed to the block author. - Author, - /// Reward attributed to the author(s) of empty step(s) included in the block (AuthorityRound engine). - EmptyStep, - /// Reward attributed by an external protocol (e.g. block reward contract). - External, - /// Reward attributed to the block uncle(s) with given difference. - Uncle(u8), + /// Reward attributed to the block author. + Author, + /// Reward attributed to the author(s) of empty step(s) included in the block (AuthorityRound engine). + EmptyStep, + /// Reward attributed by an external protocol (e.g. block reward contract). + External, + /// Reward attributed to the block uncle(s) with given difference. + Uncle(u8), } impl RewardKind { - /// Create `RewardKind::Uncle` from given current block number and uncle block number. - pub fn uncle(number: BlockNumber, uncle: BlockNumber) -> Self { - RewardKind::Uncle(if number > uncle && number - uncle <= u8::max_value().into() { (number - uncle) as u8 } else { 0 }) - } + /// Create `RewardKind::Uncle` from given current block number and uncle block number. + pub fn uncle(number: BlockNumber, uncle: BlockNumber) -> Self { + RewardKind::Uncle( + if number > uncle && number - uncle <= u8::max_value().into() { + (number - uncle) as u8 + } else { + 0 + }, + ) + } } impl From for u16 { - fn from(reward_kind: RewardKind) -> Self { - match reward_kind { - RewardKind::Author => 0, - RewardKind::EmptyStep => 2, - RewardKind::External => 3, + fn from(reward_kind: RewardKind) -> Self { + match reward_kind { + RewardKind::Author => 0, + RewardKind::EmptyStep => 2, + RewardKind::External => 3, - RewardKind::Uncle(depth) => 100 + depth as u16, - } - } + RewardKind::Uncle(depth) => 100 + depth as u16, + } + } } impl Into for RewardKind { - fn into(self) -> trace::RewardType { - match self { - RewardKind::Author => trace::RewardType::Block, - RewardKind::Uncle(_) => trace::RewardType::Uncle, - RewardKind::EmptyStep => trace::RewardType::EmptyStep, - RewardKind::External => trace::RewardType::External, - } - } + fn into(self) -> trace::RewardType { + match self { + RewardKind::Author => trace::RewardType::Block, + RewardKind::Uncle(_) => trace::RewardType::Uncle, + RewardKind::EmptyStep => trace::RewardType::EmptyStep, + RewardKind::External => trace::RewardType::External, + } + } } /// A client for the block reward contract. #[derive(PartialEq, Debug)] pub struct BlockRewardContract { - kind: SystemOrCodeCallKind, + kind: SystemOrCodeCallKind, } impl BlockRewardContract { - /// Create a new block reward contract client targeting the system call kind. - pub fn new(kind: SystemOrCodeCallKind) -> BlockRewardContract { - BlockRewardContract { - kind, - } - } + /// Create a new block reward contract client targeting the system call kind. + pub fn new(kind: SystemOrCodeCallKind) -> BlockRewardContract { + BlockRewardContract { kind } + } - /// Create a new block reward contract client targeting the contract address. - pub fn new_from_address(address: Address) -> BlockRewardContract { - Self::new(SystemOrCodeCallKind::Address(address)) - } + /// Create a new block reward contract client targeting the contract address. + pub fn new_from_address(address: Address) -> BlockRewardContract { + Self::new(SystemOrCodeCallKind::Address(address)) + } - /// Create a new block reward contract client targeting the given code. - pub fn new_from_code(code: Arc>) -> BlockRewardContract { - let code_hash = keccak(&code[..]); + /// Create a new block reward contract client targeting the given code. + pub fn new_from_code(code: Arc>) -> BlockRewardContract { + let code_hash = keccak(&code[..]); - Self::new(SystemOrCodeCallKind::Code(code, code_hash)) - } + Self::new(SystemOrCodeCallKind::Code(code, code_hash)) + } - /// Calls the block reward contract with the given beneficiaries list (and associated reward kind) - /// and returns the reward allocation (address - value). The block reward contract *must* be - /// called by the system address so the `caller` must ensure that (e.g. using - /// `machine.execute_as_system`). - pub fn reward( - &self, - beneficiaries: &[(Address, RewardKind)], - caller: &mut SystemOrCodeCall, - ) -> Result, Error> { - let input = block_reward_contract::functions::reward::encode_input( - beneficiaries.iter().map(|&(address, _)| H160::from(address)), - beneficiaries.iter().map(|&(_, ref reward_kind)| u16::from(*reward_kind)), - ); + /// Calls the block reward contract with the given beneficiaries list (and associated reward kind) + /// and returns the reward allocation (address - value). The block reward contract *must* be + /// called by the system address so the `caller` must ensure that (e.g. using + /// `machine.execute_as_system`). + pub fn reward( + &self, + beneficiaries: &[(Address, RewardKind)], + caller: &mut SystemOrCodeCall, + ) -> Result, Error> { + let input = block_reward_contract::functions::reward::encode_input( + beneficiaries + .iter() + .map(|&(address, _)| H160::from(address)), + beneficiaries + .iter() + .map(|&(_, ref reward_kind)| u16::from(*reward_kind)), + ); - let output = caller(self.kind.clone(), input) - .map_err(Into::into) - .map_err(::engines::EngineError::FailedSystemCall)?; + let output = caller(self.kind.clone(), input) + .map_err(Into::into) + .map_err(::engines::EngineError::FailedSystemCall)?; - // since this is a non-constant call we can't use ethabi's function output - // deserialization, sadness ensues. - let types = &[ - ParamType::Array(Box::new(ParamType::Address)), - ParamType::Array(Box::new(ParamType::Uint(256))), - ]; + // since this is a non-constant call we can't use ethabi's function output + // deserialization, sadness ensues. + let types = &[ + ParamType::Array(Box::new(ParamType::Address)), + ParamType::Array(Box::new(ParamType::Uint(256))), + ]; - let tokens = ethabi::decode(types, &output) - .map_err(|err| err.to_string()) - .map_err(::engines::EngineError::FailedSystemCall)?; + let tokens = ethabi::decode(types, &output) + .map_err(|err| err.to_string()) + .map_err(::engines::EngineError::FailedSystemCall)?; - assert!(tokens.len() == 2); + assert!(tokens.len() == 2); - let addresses = tokens[0].clone().to_array().expect("type checked by ethabi::decode; qed"); - let rewards = tokens[1].clone().to_array().expect("type checked by ethabi::decode; qed"); + let addresses = tokens[0] + .clone() + .to_array() + .expect("type checked by ethabi::decode; qed"); + let rewards = tokens[1] + .clone() + .to_array() + .expect("type checked by ethabi::decode; qed"); - if addresses.len() != rewards.len() { - return Err(::engines::EngineError::FailedSystemCall( - "invalid data returned by reward contract: both arrays must have the same size".into() - ).into()); - } + if addresses.len() != rewards.len() { + return Err(::engines::EngineError::FailedSystemCall( + "invalid data returned by reward contract: both arrays must have the same size" + .into(), + ) + .into()); + } - let addresses = addresses.into_iter().map(|t| t.to_address().expect("type checked by ethabi::decode; qed")); - let rewards = rewards.into_iter().map(|t| t.to_uint().expect("type checked by ethabi::decode; qed")); + let addresses = addresses + .into_iter() + .map(|t| t.to_address().expect("type checked by ethabi::decode; qed")); + let rewards = rewards + .into_iter() + .map(|t| t.to_uint().expect("type checked by ethabi::decode; qed")); - Ok(addresses.zip(rewards).collect()) - } + Ok(addresses.zip(rewards).collect()) + } } /// Applies the given block rewards, i.e. adds the given balance to each beneficiary' address. /// If tracing is enabled the operations are recorded. pub fn apply_block_rewards( - rewards: &[(Address, RewardKind, U256)], - block: &mut ExecutedBlock, - machine: &M, + rewards: &[(Address, RewardKind, U256)], + block: &mut ExecutedBlock, + machine: &M, ) -> Result<(), M::Error> { - for &(ref author, _, ref block_reward) in rewards { - machine.add_balance(block, author, block_reward)?; - } + for &(ref author, _, ref block_reward) in rewards { + machine.add_balance(block, author, block_reward)?; + } - if let Tracing::Enabled(ref mut traces) = *block.traces_mut() { - let mut tracer = ExecutiveTracer::default(); + if let Tracing::Enabled(ref mut traces) = *block.traces_mut() { + let mut tracer = ExecutiveTracer::default(); - for &(address, reward_kind, amount) in rewards { - tracer.trace_reward(address, amount, reward_kind.into()); - } + for &(address, reward_kind, amount) in rewards { + tracer.trace_reward(address, amount, reward_kind.into()); + } - traces.push(tracer.drain().into()); - } + traces.push(tracer.drain().into()); + } - Ok(()) + Ok(()) } #[cfg(test)] mod test { - use client::PrepareOpenBlock; - use ethereum_types::U256; - use spec::Spec; - use test_helpers::generate_dummy_client_with_spec; + use client::PrepareOpenBlock; + use ethereum_types::U256; + use spec::Spec; + use test_helpers::generate_dummy_client_with_spec; - use engines::SystemOrCodeCallKind; - use super::{BlockRewardContract, RewardKind}; + use super::{BlockRewardContract, RewardKind}; + use engines::SystemOrCodeCallKind; - #[test] - fn block_reward_contract() { - let client = generate_dummy_client_with_spec(Spec::new_test_round_block_reward_contract); + #[test] + fn block_reward_contract() { + let client = generate_dummy_client_with_spec(Spec::new_test_round_block_reward_contract); - let machine = Spec::new_test_machine(); + let machine = Spec::new_test_machine(); - // the spec has a block reward contract defined at the given address - let block_reward_contract = BlockRewardContract::new_from_address( - "0000000000000000000000000000000000000042".into(), - ); + // the spec has a block reward contract defined at the given address + let block_reward_contract = BlockRewardContract::new_from_address( + "0000000000000000000000000000000000000042".into(), + ); - let mut call = |to, data| { - let mut block = client.prepare_open_block( - "0000000000000000000000000000000000000001".into(), - (3141562.into(), 31415620.into()), - vec![], - ).unwrap(); + let mut call = |to, data| { + let mut block = client + .prepare_open_block( + "0000000000000000000000000000000000000001".into(), + (3141562.into(), 31415620.into()), + vec![], + ) + .unwrap(); - let result = match to { - SystemOrCodeCallKind::Address(to) => { - machine.execute_as_system( - block.block_mut(), - to, - U256::max_value(), - Some(data), - ) - }, - _ => panic!("Test reward contract is created by an address, we never reach this branch."), - }; + let result = match to { + SystemOrCodeCallKind::Address(to) => { + machine.execute_as_system(block.block_mut(), to, U256::max_value(), Some(data)) + } + _ => panic!( + "Test reward contract is created by an address, we never reach this branch." + ), + }; - result.map_err(|e| format!("{}", e)) - }; + result.map_err(|e| format!("{}", e)) + }; - // if no beneficiaries are given no rewards are attributed - assert!(block_reward_contract.reward(&vec![], &mut call).unwrap().is_empty()); + // if no beneficiaries are given no rewards are attributed + assert!(block_reward_contract + .reward(&vec![], &mut call) + .unwrap() + .is_empty()); - // the contract rewards (1000 + kind) for each benefactor - let beneficiaries = vec![ - ("0000000000000000000000000000000000000033".into(), RewardKind::Author), - ("0000000000000000000000000000000000000034".into(), RewardKind::Uncle(1)), - ("0000000000000000000000000000000000000035".into(), RewardKind::EmptyStep), - ]; + // the contract rewards (1000 + kind) for each benefactor + let beneficiaries = vec![ + ( + "0000000000000000000000000000000000000033".into(), + RewardKind::Author, + ), + ( + "0000000000000000000000000000000000000034".into(), + RewardKind::Uncle(1), + ), + ( + "0000000000000000000000000000000000000035".into(), + RewardKind::EmptyStep, + ), + ]; - let rewards = block_reward_contract.reward(&beneficiaries, &mut call).unwrap(); - let expected = vec![ - ("0000000000000000000000000000000000000033".into(), U256::from(1000)), - ("0000000000000000000000000000000000000034".into(), U256::from(1000 + 101)), - ("0000000000000000000000000000000000000035".into(), U256::from(1000 + 2)), - ]; + let rewards = block_reward_contract + .reward(&beneficiaries, &mut call) + .unwrap(); + let expected = vec![ + ( + "0000000000000000000000000000000000000033".into(), + U256::from(1000), + ), + ( + "0000000000000000000000000000000000000034".into(), + U256::from(1000 + 101), + ), + ( + "0000000000000000000000000000000000000035".into(), + U256::from(1000 + 2), + ), + ]; - assert_eq!(expected, rewards); - } + assert_eq!(expected, rewards); + } } diff --git a/ethcore/src/engines/clique/block_state.rs b/ethcore/src/engines/clique/block_state.rs index 6518d8ac2..4e331e016 100644 --- a/ethcore/src/engines/clique/block_state.rs +++ b/ethcore/src/engines/clique/block_state.rs @@ -14,98 +14,123 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::{HashMap, BTreeSet, VecDeque}; -use std::fmt; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use std::{ + collections::{BTreeSet, HashMap, VecDeque}, + fmt, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; -use engines::EngineError; -use engines::clique::util::{extract_signers, recover_creator}; -use engines::clique::{VoteType, DIFF_INTURN, DIFF_NOTURN, NULL_AUTHOR, SIGNING_DELAY_NOTURN_MS}; -use error::{Error, BlockError}; +use engines::{ + clique::{ + util::{extract_signers, recover_creator}, + VoteType, DIFF_INTURN, DIFF_NOTURN, NULL_AUTHOR, SIGNING_DELAY_NOTURN_MS, + }, + EngineError, +}; +use error::{BlockError, Error}; use ethereum_types::{Address, H64}; use rand::Rng; use time_utils::CheckedSystemTime; -use types::BlockNumber; -use types::header::Header; +use types::{header::Header, BlockNumber}; use unexpected::Mismatch; /// Type that keeps track of the state for a given vote // Votes that go against the proposal aren't counted since it's equivalent to not voting #[derive(Copy, Clone, Debug, PartialEq, PartialOrd)] pub struct VoteState { - kind: VoteType, - votes: u64, + kind: VoteType, + votes: u64, } /// Type that represent a vote #[derive(Copy, Clone, Debug, PartialEq, PartialOrd)] pub struct Vote { - block_number: BlockNumber, - beneficiary: Address, - kind: VoteType, - signer: Address, - reverted: bool, + block_number: BlockNumber, + beneficiary: Address, + kind: VoteType, + signer: Address, + reverted: bool, } /// Type that represent a pending vote #[derive(Copy, Clone, Debug, Eq, Hash, PartialEq, PartialOrd)] pub struct PendingVote { - signer: Address, - beneficiary: Address, + signer: Address, + beneficiary: Address, } /// Clique state for each block. #[cfg(not(test))] #[derive(Clone, Debug, Default)] pub struct CliqueBlockState { - /// Current votes for a beneficiary - votes: HashMap, - /// A list of all votes for the given epoch - votes_history: Vec, - /// a list of all valid signer, sorted by ascending order. - signers: BTreeSet
, - /// a deque of recent signer, new entry should be pushed front, apply() modifies this. - recent_signers: VecDeque
, - /// inturn signing should wait until this time - pub next_timestamp_inturn: Option, - /// noturn signing should wait until this time - pub next_timestamp_noturn: Option, + /// Current votes for a beneficiary + votes: HashMap, + /// A list of all votes for the given epoch + votes_history: Vec, + /// a list of all valid signer, sorted by ascending order. + signers: BTreeSet
, + /// a deque of recent signer, new entry should be pushed front, apply() modifies this. + recent_signers: VecDeque
, + /// inturn signing should wait until this time + pub next_timestamp_inturn: Option, + /// noturn signing should wait until this time + pub next_timestamp_noturn: Option, } #[cfg(test)] #[derive(Clone, Debug, Default)] pub struct CliqueBlockState { - /// All recorded votes for a given signer, `Vec` is a stack of votes - pub votes: HashMap, - /// A list of all votes for the given epoch - pub votes_history: Vec, - /// a list of all valid signer, sorted by ascending order. - pub signers: BTreeSet
, - /// a deque of recent signer, new entry should be pushed front, apply() modifies this. - pub recent_signers: VecDeque
, - /// inturn signing should wait until this time - pub next_timestamp_inturn: Option, - /// noturn signing should wait until this time - pub next_timestamp_noturn: Option, + /// All recorded votes for a given signer, `Vec` is a stack of votes + pub votes: HashMap, + /// A list of all votes for the given epoch + pub votes_history: Vec, + /// a list of all valid signer, sorted by ascending order. + pub signers: BTreeSet
, + /// a deque of recent signer, new entry should be pushed front, apply() modifies this. + pub recent_signers: VecDeque
, + /// inturn signing should wait until this time + pub next_timestamp_inturn: Option, + /// noturn signing should wait until this time + pub next_timestamp_noturn: Option, } impl fmt::Display for CliqueBlockState { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let signers: Vec = self.signers.iter() - .map(|s| - format!("{} {:?}", - s, - self.votes.iter().map(|(v, s)| format!("[beneficiary {}, votes: {}]", v.beneficiary, s.votes)) - .collect::>() - ) - ) - .collect(); + let signers: Vec = self + .signers + .iter() + .map(|s| { + format!( + "{} {:?}", + s, + self.votes + .iter() + .map(|(v, s)| format!( + "[beneficiary {}, votes: {}]", + v.beneficiary, s.votes + )) + .collect::>() + ) + }) + .collect(); - let recent_signers: Vec = self.recent_signers.iter().map(|s| format!("{}", s)).collect(); - let num_votes = self.votes_history.len(); - let add_votes = self.votes_history.iter().filter(|v| v.kind == VoteType::Add).count(); - let rm_votes = self.votes_history.iter().filter(|v| v.kind == VoteType::Remove).count(); - let reverted_votes = self.votes_history.iter().filter(|v| v.reverted).count(); + let recent_signers: Vec = self + .recent_signers + .iter() + .map(|s| format!("{}", s)) + .collect(); + let num_votes = self.votes_history.len(); + let add_votes = self + .votes_history + .iter() + .filter(|v| v.kind == VoteType::Add) + .count(); + let rm_votes = self + .votes_history + .iter() + .filter(|v| v.kind == VoteType::Remove) + .count(); + let reverted_votes = self.votes_history.iter().filter(|v| v.reverted).count(); write!(f, "Votes {{ \n signers: {:?} \n recent_signers: {:?} \n number of votes: {} \n number of add votes {} @@ -115,253 +140,270 @@ impl fmt::Display for CliqueBlockState { } impl CliqueBlockState { - /// Create new state with given information, this is used creating new state from Checkpoint block. - pub fn new(signers: BTreeSet
) -> Self { - CliqueBlockState { - signers, - ..Default::default() - } - } + /// Create new state with given information, this is used creating new state from Checkpoint block. + pub fn new(signers: BTreeSet
) -> Self { + CliqueBlockState { + signers, + ..Default::default() + } + } - // see https://github.com/ethereum/go-ethereum/blob/master/consensus/clique/clique.go#L474 - fn verify(&self, header: &Header) -> Result { - let creator = recover_creator(header)?.clone(); + // see https://github.com/ethereum/go-ethereum/blob/master/consensus/clique/clique.go#L474 + fn verify(&self, header: &Header) -> Result { + let creator = recover_creator(header)?.clone(); - // The signer is not authorized - if !self.signers.contains(&creator) { - trace!(target: "engine", "current state: {}", self); - Err(EngineError::NotAuthorized(creator))? - } + // The signer is not authorized + if !self.signers.contains(&creator) { + trace!(target: "engine", "current state: {}", self); + Err(EngineError::NotAuthorized(creator))? + } - // The signer has signed a block too recently - if self.recent_signers.contains(&creator) { - trace!(target: "engine", "current state: {}", self); - Err(EngineError::CliqueTooRecentlySigned(creator))? - } + // The signer has signed a block too recently + if self.recent_signers.contains(&creator) { + trace!(target: "engine", "current state: {}", self); + Err(EngineError::CliqueTooRecentlySigned(creator))? + } - // Wrong difficulty - let inturn = self.is_inturn(header.number(), &creator); + // Wrong difficulty + let inturn = self.is_inturn(header.number(), &creator); - if inturn && *header.difficulty() != DIFF_INTURN { - Err(BlockError::InvalidDifficulty(Mismatch { - expected: DIFF_INTURN, - found: *header.difficulty(), - }))? - } + if inturn && *header.difficulty() != DIFF_INTURN { + Err(BlockError::InvalidDifficulty(Mismatch { + expected: DIFF_INTURN, + found: *header.difficulty(), + }))? + } - if !inturn && *header.difficulty() != DIFF_NOTURN { - Err(BlockError::InvalidDifficulty(Mismatch { - expected: DIFF_NOTURN, - found: *header.difficulty(), - }))? - } + if !inturn && *header.difficulty() != DIFF_NOTURN { + Err(BlockError::InvalidDifficulty(Mismatch { + expected: DIFF_NOTURN, + found: *header.difficulty(), + }))? + } - Ok(creator) - } + Ok(creator) + } - /// Verify and apply a new header to current state - pub fn apply(&mut self, header: &Header, is_checkpoint: bool) -> Result { - let creator = self.verify(header)?; - self.recent_signers.push_front(creator); - self.rotate_recent_signers(); + /// Verify and apply a new header to current state + pub fn apply(&mut self, header: &Header, is_checkpoint: bool) -> Result { + let creator = self.verify(header)?; + self.recent_signers.push_front(creator); + self.rotate_recent_signers(); - if is_checkpoint { - // checkpoint block should not affect previous tallying, so we check that. - let signers = extract_signers(header)?; - if self.signers != signers { - let invalid_signers: Vec = signers.into_iter() - .filter(|s| !self.signers.contains(s)) - .map(|s| format!("{}", s)) - .collect(); - Err(EngineError::CliqueFaultyRecoveredSigners(invalid_signers))? - }; + if is_checkpoint { + // checkpoint block should not affect previous tallying, so we check that. + let signers = extract_signers(header)?; + if self.signers != signers { + let invalid_signers: Vec = signers + .into_iter() + .filter(|s| !self.signers.contains(s)) + .map(|s| format!("{}", s)) + .collect(); + Err(EngineError::CliqueFaultyRecoveredSigners(invalid_signers))? + }; - // TODO(niklasad1): I'm not sure if we should shrink here because it is likely that next epoch - // will need some memory and might be better for allocation algorithm to decide whether to shrink or not - // (typically doubles or halves the allocted memory when necessary) - self.votes.clear(); - self.votes_history.clear(); - self.votes.shrink_to_fit(); - self.votes_history.shrink_to_fit(); - } + // TODO(niklasad1): I'm not sure if we should shrink here because it is likely that next epoch + // will need some memory and might be better for allocation algorithm to decide whether to shrink or not + // (typically doubles or halves the allocted memory when necessary) + self.votes.clear(); + self.votes_history.clear(); + self.votes.shrink_to_fit(); + self.votes_history.shrink_to_fit(); + } - // Contains vote - if *header.author() != NULL_AUTHOR { - let decoded_seal = header.decode_seal::>()?; - if decoded_seal.len() != 2 { - Err(BlockError::InvalidSealArity(Mismatch { expected: 2, found: decoded_seal.len() }))? - } + // Contains vote + if *header.author() != NULL_AUTHOR { + let decoded_seal = header.decode_seal::>()?; + if decoded_seal.len() != 2 { + Err(BlockError::InvalidSealArity(Mismatch { + expected: 2, + found: decoded_seal.len(), + }))? + } - let nonce: H64 = decoded_seal[1].into(); - self.update_signers_on_vote(VoteType::from_nonce(nonce)?, creator, *header.author(), header.number())?; - } + let nonce: H64 = decoded_seal[1].into(); + self.update_signers_on_vote( + VoteType::from_nonce(nonce)?, + creator, + *header.author(), + header.number(), + )?; + } - Ok(creator) - } + Ok(creator) + } - fn update_signers_on_vote( - &mut self, - kind: VoteType, - signer: Address, - beneficiary: Address, - block_number: u64 - ) -> Result<(), Error> { + fn update_signers_on_vote( + &mut self, + kind: VoteType, + signer: Address, + beneficiary: Address, + block_number: u64, + ) -> Result<(), Error> { + trace!(target: "engine", "Attempt vote {:?} {:?}", kind, beneficiary); - trace!(target: "engine", "Attempt vote {:?} {:?}", kind, beneficiary); + let pending_vote = PendingVote { + signer, + beneficiary, + }; - let pending_vote = PendingVote { signer, beneficiary }; + let reverted = if self.is_valid_vote(&beneficiary, kind) { + self.add_vote(pending_vote, kind) + } else { + // This case only happens if a `signer` wants to revert their previous vote + // (does nothing if no previous vote was found) + self.revert_vote(pending_vote) + }; - let reverted = if self.is_valid_vote(&beneficiary, kind) { - self.add_vote(pending_vote, kind) - } else { - // This case only happens if a `signer` wants to revert their previous vote - // (does nothing if no previous vote was found) - self.revert_vote(pending_vote) - }; + // Add all votes to the history + self.votes_history.push(Vote { + block_number, + beneficiary, + kind, + signer, + reverted, + }); - // Add all votes to the history - self.votes_history.push( - Vote { - block_number, - beneficiary, - kind, - signer, - reverted, - }); + // If no vote was found for the beneficiary return `early` but don't propogate an error + let (votes, vote_kind) = match self.get_current_votes_and_kind(beneficiary) { + Some((v, k)) => (v, k), + None => return Ok(()), + }; + let threshold = self.signers.len() / 2; - // If no vote was found for the beneficiary return `early` but don't propogate an error - let (votes, vote_kind) = match self.get_current_votes_and_kind(beneficiary) { - Some((v, k)) => (v, k), - None => return Ok(()), - }; - let threshold = self.signers.len() / 2; + debug!(target: "engine", "{}/{} votes to have consensus", votes, threshold + 1); + trace!(target: "engine", "votes: {:?}", votes); - debug!(target: "engine", "{}/{} votes to have consensus", votes, threshold + 1); - trace!(target: "engine", "votes: {:?}", votes); + if votes > threshold { + match vote_kind { + VoteType::Add => { + if self.signers.insert(beneficiary) { + debug!(target: "engine", "added new signer: {}", beneficiary); + } + } + VoteType::Remove => { + if self.signers.remove(&beneficiary) { + debug!(target: "engine", "removed signer: {}", beneficiary); + } + } + } - if votes > threshold { - match vote_kind { - VoteType::Add => { - if self.signers.insert(beneficiary) { - debug!(target: "engine", "added new signer: {}", beneficiary); - } - } - VoteType::Remove => { - if self.signers.remove(&beneficiary) { - debug!(target: "engine", "removed signer: {}", beneficiary); - } - } - } + self.rotate_recent_signers(); + self.remove_all_votes_from(beneficiary); + } - self.rotate_recent_signers(); - self.remove_all_votes_from(beneficiary); - } + Ok(()) + } - Ok(()) - } + /// Calculate the next timestamp for `inturn` and `noturn` fails if any of them can't be represented as + /// `SystemTime` + // TODO(niklasad1): refactor this method to be in constructor of `CliqueBlockState` instead. + // This is a quite bad API because we must mutate both variables even when already `inturn` fails + // That's why we can't return early and must have the `if-else` in the end + pub fn calc_next_timestamp(&mut self, timestamp: u64, period: u64) -> Result<(), Error> { + let inturn = CheckedSystemTime::checked_add( + UNIX_EPOCH, + Duration::from_secs(timestamp.saturating_add(period)), + ); - /// Calculate the next timestamp for `inturn` and `noturn` fails if any of them can't be represented as - /// `SystemTime` - // TODO(niklasad1): refactor this method to be in constructor of `CliqueBlockState` instead. - // This is a quite bad API because we must mutate both variables even when already `inturn` fails - // That's why we can't return early and must have the `if-else` in the end - pub fn calc_next_timestamp(&mut self, timestamp: u64, period: u64) -> Result<(), Error> { - let inturn = CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(timestamp.saturating_add(period))); + self.next_timestamp_inturn = inturn; - self.next_timestamp_inturn = inturn; + let delay = Duration::from_millis(rand::thread_rng().gen_range( + 0u64, + (self.signers.len() as u64 / 2 + 1) * SIGNING_DELAY_NOTURN_MS, + )); + self.next_timestamp_noturn = inturn.map(|inturn| inturn + delay); - let delay = Duration::from_millis( - rand::thread_rng().gen_range(0u64, (self.signers.len() as u64 / 2 + 1) * SIGNING_DELAY_NOTURN_MS)); - self.next_timestamp_noturn = inturn.map(|inturn| { - inturn + delay - }); + if self.next_timestamp_inturn.is_some() && self.next_timestamp_noturn.is_some() { + Ok(()) + } else { + Err(BlockError::TimestampOverflow)? + } + } - if self.next_timestamp_inturn.is_some() && self.next_timestamp_noturn.is_some() { - Ok(()) - } else { - Err(BlockError::TimestampOverflow)? - } - } + /// Returns true if the block difficulty should be `inturn` + pub fn is_inturn(&self, current_block_number: u64, author: &Address) -> bool { + if let Some(pos) = self.signers.iter().position(|x| *author == *x) { + return current_block_number % self.signers.len() as u64 == pos as u64; + } + false + } - /// Returns true if the block difficulty should be `inturn` - pub fn is_inturn(&self, current_block_number: u64, author: &Address) -> bool { - if let Some(pos) = self.signers.iter().position(|x| *author == *x) { - return current_block_number % self.signers.len() as u64 == pos as u64; - } - false - } + /// Returns whether the signer is authorized to sign a block + pub fn is_authorized(&self, author: &Address) -> bool { + self.signers.contains(author) && !self.recent_signers.contains(author) + } - /// Returns whether the signer is authorized to sign a block - pub fn is_authorized(&self, author: &Address) -> bool { - self.signers.contains(author) && !self.recent_signers.contains(author) - } + /// Returns whether it makes sense to cast the specified vote in the + /// current state (e.g. don't try to add an already authorized signer). + pub fn is_valid_vote(&self, address: &Address, vote_type: VoteType) -> bool { + let in_signer = self.signers.contains(address); + match vote_type { + VoteType::Add => !in_signer, + VoteType::Remove => in_signer, + } + } - /// Returns whether it makes sense to cast the specified vote in the - /// current state (e.g. don't try to add an already authorized signer). - pub fn is_valid_vote(&self, address: &Address, vote_type: VoteType) -> bool { - let in_signer = self.signers.contains(address); - match vote_type { - VoteType::Add => !in_signer, - VoteType::Remove => in_signer, - } - } + /// Returns the list of current signers + pub fn signers(&self) -> &BTreeSet
{ + &self.signers + } - /// Returns the list of current signers - pub fn signers(&self) -> &BTreeSet
{ - &self.signers - } + // Note this method will always return `true` but it is intended for a uniform `API` + fn add_vote(&mut self, pending_vote: PendingVote, kind: VoteType) -> bool { + self.votes + .entry(pending_vote) + .and_modify(|state| { + state.votes = state.votes.saturating_add(1); + }) + .or_insert_with(|| VoteState { kind, votes: 1 }); + true + } - // Note this method will always return `true` but it is intended for a uniform `API` - fn add_vote(&mut self, pending_vote: PendingVote, kind: VoteType) -> bool { + fn revert_vote(&mut self, pending_vote: PendingVote) -> bool { + let mut revert = false; + let mut remove = false; - self.votes.entry(pending_vote) - .and_modify(|state| { - state.votes = state.votes.saturating_add(1); - }) - .or_insert_with(|| VoteState { kind, votes: 1 }); - true - } + self.votes.entry(pending_vote).and_modify(|state| { + if state.votes.saturating_sub(1) == 0 { + remove = true; + } + revert = true; + }); - fn revert_vote(&mut self, pending_vote: PendingVote) -> bool { - let mut revert = false; - let mut remove = false; + if remove { + self.votes.remove(&pending_vote); + } - self.votes.entry(pending_vote).and_modify(|state| { - if state.votes.saturating_sub(1) == 0 { - remove = true; - } - revert = true; - }); + revert + } - if remove { - self.votes.remove(&pending_vote); - } + fn get_current_votes_and_kind(&self, beneficiary: Address) -> Option<(usize, VoteType)> { + let kind = self + .votes + .iter() + .find(|(v, _t)| v.beneficiary == beneficiary) + .map(|(_v, t)| t.kind)?; - revert - } + let votes = self + .votes + .keys() + .filter(|vote| vote.beneficiary == beneficiary) + .count(); - fn get_current_votes_and_kind(&self, beneficiary: Address) -> Option<(usize, VoteType)> { - let kind = self.votes.iter() - .find(|(v, _t)| v.beneficiary == beneficiary) - .map(|(_v, t)| t.kind)?; + Some((votes, kind)) + } - let votes = self.votes.keys() - .filter(|vote| vote.beneficiary == beneficiary) - .count(); + fn rotate_recent_signers(&mut self) { + if self.recent_signers.len() >= (self.signers.len() / 2) + 1 { + self.recent_signers.pop_back(); + } + } - Some((votes, kind)) - } - - fn rotate_recent_signers(&mut self) { - if self.recent_signers.len() >= ( self.signers.len() / 2 ) + 1 { - self.recent_signers.pop_back(); - } - } - - fn remove_all_votes_from(&mut self, beneficiary: Address) { - self.votes = std::mem::replace(&mut self.votes, HashMap::new()) - .into_iter() - .filter(|(v, _t)| v.signer != beneficiary && v.beneficiary != beneficiary) - .collect(); - } + fn remove_all_votes_from(&mut self, beneficiary: Address) { + self.votes = std::mem::replace(&mut self.votes, HashMap::new()) + .into_iter() + .filter(|(v, _t)| v.signer != beneficiary && v.beneficiary != beneficiary) + .collect(); + } } diff --git a/ethcore/src/engines/clique/mod.rs b/ethcore/src/engines/clique/mod.rs index 65d56fa28..a7bbdd424 100644 --- a/ethcore/src/engines/clique/mod.rs +++ b/ethcore/src/engines/clique/mod.rs @@ -57,22 +57,24 @@ /// 6. We call `Clique::on_seal_block()` which will allow us to modify the block header during seal generation. /// 7. Finally, `Clique::verify_local_seal()` is called. After this, the syncing code path will be followed /// in order to import the new block. - use std::cmp; -use std::collections::HashMap; -use std::collections::VecDeque; -use std::sync::{Arc, Weak}; -use std::thread; -use std::time; -use std::time::{Instant, Duration, SystemTime, UNIX_EPOCH}; +use std::{ + collections::{HashMap, VecDeque}, + sync::{Arc, Weak}, + thread, time, + time::{Duration, Instant, SystemTime, UNIX_EPOCH}, +}; +use super::signer::EngineSigner; use block::ExecutedBlock; use bytes::Bytes; -use client::{BlockId, EngineClient, traits::ForceUpdateSealing}; -use engines::clique::util::{extract_signers, recover_creator}; -use engines::{Engine, EngineError, Seal}; +use client::{traits::ForceUpdateSealing, BlockId, EngineClient}; +use engines::{ + clique::util::{extract_signers, recover_creator}, + Engine, EngineError, Seal, +}; use error::{BlockError, Error}; -use ethereum_types::{Address, H64, H160, H256, U256}; +use ethereum_types::{Address, H160, H256, H64, U256}; use ethkey::Signature; use hash::KECCAK_EMPTY_LIST_RLP; use itertools::Itertools; @@ -80,17 +82,17 @@ use lru_cache::LruCache; use machine::{Call, EthereumMachine}; use parking_lot::RwLock; use rand::Rng; -use super::signer::EngineSigner; -use unexpected::{Mismatch, OutOfBounds}; use time_utils::CheckedSystemTime; -use types::BlockNumber; -use types::header::{ExtendedHeader, Header}; +use types::{ + header::{ExtendedHeader, Header}, + BlockNumber, +}; +use unexpected::{Mismatch, OutOfBounds}; -use self::block_state::CliqueBlockState; -use self::params::CliqueParams; +use self::{block_state::CliqueBlockState, params::CliqueParams}; -mod params; mod block_state; +mod params; mod util; // TODO(niklasad1): extract tester types into a separate mod to be shared in the code base @@ -129,649 +131,683 @@ pub const STATE_CACHE_NUM: usize = 128; /// Vote to add or remove the beneficiary #[derive(Copy, Clone, Debug, PartialEq, PartialOrd)] pub enum VoteType { - Add, - Remove, + Add, + Remove, } impl VoteType { - /// Try to construct a `Vote` from a nonce - pub fn from_nonce(nonce: H64) -> Result { - if nonce == NONCE_AUTH_VOTE { - Ok(VoteType::Add) - } else if nonce == NONCE_DROP_VOTE { - Ok(VoteType::Remove) - } else { - Err(EngineError::CliqueInvalidNonce(nonce))? - } - } + /// Try to construct a `Vote` from a nonce + pub fn from_nonce(nonce: H64) -> Result { + if nonce == NONCE_AUTH_VOTE { + Ok(VoteType::Add) + } else if nonce == NONCE_DROP_VOTE { + Ok(VoteType::Remove) + } else { + Err(EngineError::CliqueInvalidNonce(nonce))? + } + } - /// Get the rlp encoding of the vote - pub fn as_rlp(&self) -> Vec> { - match self { - VoteType::Add => vec![rlp::encode(&NULL_MIXHASH), rlp::encode(&NONCE_AUTH_VOTE)], - VoteType::Remove => vec![rlp::encode(&NULL_MIXHASH), rlp::encode(&NONCE_DROP_VOTE)], - } - } + /// Get the rlp encoding of the vote + pub fn as_rlp(&self) -> Vec> { + match self { + VoteType::Add => vec![rlp::encode(&NULL_MIXHASH), rlp::encode(&NONCE_AUTH_VOTE)], + VoteType::Remove => vec![rlp::encode(&NULL_MIXHASH), rlp::encode(&NONCE_DROP_VOTE)], + } + } } /// Clique Engine implementation // block_state_by_hash -> block state indexed by header hash. #[cfg(not(test))] pub struct Clique { - epoch_length: u64, - period: u64, - machine: EthereumMachine, - client: RwLock>>, - block_state_by_hash: RwLock>, - proposals: RwLock>, - signer: RwLock>>, + epoch_length: u64, + period: u64, + machine: EthereumMachine, + client: RwLock>>, + block_state_by_hash: RwLock>, + proposals: RwLock>, + signer: RwLock>>, } #[cfg(test)] /// Test version of `CliqueEngine` to make all fields public pub struct Clique { - pub epoch_length: u64, - pub period: u64, - pub machine: EthereumMachine, - pub client: RwLock>>, - pub block_state_by_hash: RwLock>, - pub proposals: RwLock>, - pub signer: RwLock>>, + pub epoch_length: u64, + pub period: u64, + pub machine: EthereumMachine, + pub client: RwLock>>, + pub block_state_by_hash: RwLock>, + pub proposals: RwLock>, + pub signer: RwLock>>, } impl Clique { - /// Initialize Clique engine from empty state. - pub fn new(params: CliqueParams, machine: EthereumMachine) -> Result, Error> { - /// Step Clique at most every 2 seconds - const SEALING_FREQ: Duration = Duration::from_secs(2); + /// Initialize Clique engine from empty state. + pub fn new(params: CliqueParams, machine: EthereumMachine) -> Result, Error> { + /// Step Clique at most every 2 seconds + const SEALING_FREQ: Duration = Duration::from_secs(2); - let engine = Clique { - epoch_length: params.epoch, - period: params.period, - client: Default::default(), - block_state_by_hash: RwLock::new(LruCache::new(STATE_CACHE_NUM)), - proposals: Default::default(), - signer: Default::default(), - machine, - }; - let engine = Arc::new(engine); - let weak_eng = Arc::downgrade(&engine); + let engine = Clique { + epoch_length: params.epoch, + period: params.period, + client: Default::default(), + block_state_by_hash: RwLock::new(LruCache::new(STATE_CACHE_NUM)), + proposals: Default::default(), + signer: Default::default(), + machine, + }; + let engine = Arc::new(engine); + let weak_eng = Arc::downgrade(&engine); - thread::Builder::new().name("StepService".into()) - .spawn(move || { - loop { - let next_step_at = Instant::now() + SEALING_FREQ; - trace!(target: "miner", "StepService: triggering sealing"); - if let Some(eng) = weak_eng.upgrade() { - eng.step() - } else { - warn!(target: "shutdown", "StepService: engine is dropped; exiting."); - break; - } + thread::Builder::new() + .name("StepService".into()) + .spawn(move || loop { + let next_step_at = Instant::now() + SEALING_FREQ; + trace!(target: "miner", "StepService: triggering sealing"); + if let Some(eng) = weak_eng.upgrade() { + eng.step() + } else { + warn!(target: "shutdown", "StepService: engine is dropped; exiting."); + break; + } - let now = Instant::now(); - if now < next_step_at { - thread::sleep(next_step_at - now); - } - } - })?; - Ok(engine) - } + let now = Instant::now(); + if now < next_step_at { + thread::sleep(next_step_at - now); + } + })?; + Ok(engine) + } - #[cfg(test)] - /// Initialize test variant of `CliqueEngine`, - /// Note we need to `mock` the miner and it is introduced to test block verification to trigger new blocks - /// to mainly test consensus edge cases - pub fn with_test(epoch_length: u64, period: u64) -> Self { - use spec::Spec; + #[cfg(test)] + /// Initialize test variant of `CliqueEngine`, + /// Note we need to `mock` the miner and it is introduced to test block verification to trigger new blocks + /// to mainly test consensus edge cases + pub fn with_test(epoch_length: u64, period: u64) -> Self { + use spec::Spec; - Self { - epoch_length, - period, - client: Default::default(), - block_state_by_hash: RwLock::new(LruCache::new(STATE_CACHE_NUM)), - proposals: Default::default(), - signer: Default::default(), - machine: Spec::new_test_machine(), - } - } + Self { + epoch_length, + period, + client: Default::default(), + block_state_by_hash: RwLock::new(LruCache::new(STATE_CACHE_NUM)), + proposals: Default::default(), + signer: Default::default(), + machine: Spec::new_test_machine(), + } + } - fn sign_header(&self, header: &Header) -> Result<(Signature, H256), Error> { + fn sign_header(&self, header: &Header) -> Result<(Signature, H256), Error> { + match self.signer.read().as_ref() { + None => Err(EngineError::RequiresSigner)?, + Some(signer) => { + let digest = header.hash(); + match signer.sign(digest) { + Ok(sig) => Ok((sig, digest)), + Err(e) => Err(EngineError::Custom(e.into()))?, + } + } + } + } - match self.signer.read().as_ref() { - None => { - Err(EngineError::RequiresSigner)? - } - Some(signer) => { - let digest = header.hash(); - match signer.sign(digest) { - Ok(sig) => Ok((sig, digest)), - Err(e) => Err(EngineError::Custom(e.into()))?, - } - } - } - } + /// Construct an new state from given checkpoint header. + fn new_checkpoint_state(&self, header: &Header) -> Result { + debug_assert_eq!(header.number() % self.epoch_length, 0); - /// Construct an new state from given checkpoint header. - fn new_checkpoint_state(&self, header: &Header) -> Result { - debug_assert_eq!(header.number() % self.epoch_length, 0); + let mut state = CliqueBlockState::new(extract_signers(header)?); - let mut state = CliqueBlockState::new( - extract_signers(header)?); + // TODO(niklasad1): refactor to perform this check in the `CliqueBlockState` constructor instead + state.calc_next_timestamp(header.timestamp(), self.period)?; - // TODO(niklasad1): refactor to perform this check in the `CliqueBlockState` constructor instead - state.calc_next_timestamp(header.timestamp(), self.period)?; + Ok(state) + } - Ok(state) - } + fn state_no_backfill(&self, hash: &H256) -> Option { + self.block_state_by_hash.write().get_mut(hash).cloned() + } - fn state_no_backfill(&self, hash: &H256) -> Option { - self.block_state_by_hash.write().get_mut(hash).cloned() - } + /// Get `CliqueBlockState` for given header, backfill from last checkpoint if needed. + fn state(&self, header: &Header) -> Result { + let mut block_state_by_hash = self.block_state_by_hash.write(); + if let Some(state) = block_state_by_hash.get_mut(&header.hash()) { + return Ok(state.clone()); + } + // If we are looking for an checkpoint block state, we can directly reconstruct it. + if header.number() % self.epoch_length == 0 { + let state = self.new_checkpoint_state(header)?; + block_state_by_hash.insert(header.hash(), state.clone()); + return Ok(state); + } + // BlockState is not found in memory, which means we need to reconstruct state from last checkpoint. + match self.client.read().as_ref().and_then(|w| w.upgrade()) { + None => { + return Err(EngineError::RequiresClient)?; + } + Some(c) => { + let last_checkpoint_number = + header.number() - header.number() % self.epoch_length as u64; + debug_assert_ne!(last_checkpoint_number, header.number()); - /// Get `CliqueBlockState` for given header, backfill from last checkpoint if needed. - fn state(&self, header: &Header) -> Result { - let mut block_state_by_hash = self.block_state_by_hash.write(); - if let Some(state) = block_state_by_hash.get_mut(&header.hash()) { - return Ok(state.clone()); - } - // If we are looking for an checkpoint block state, we can directly reconstruct it. - if header.number() % self.epoch_length == 0 { - let state = self.new_checkpoint_state(header)?; - block_state_by_hash.insert(header.hash(), state.clone()); - return Ok(state); - } - // BlockState is not found in memory, which means we need to reconstruct state from last checkpoint. - match self.client.read().as_ref().and_then(|w| w.upgrade()) { - None => { - return Err(EngineError::RequiresClient)?; - } - Some(c) => { - let last_checkpoint_number = header.number() - header.number() % self.epoch_length as u64; - debug_assert_ne!(last_checkpoint_number, header.number()); - - // Catching up state, note that we don't really store block state for intermediary blocks, - // for speed. - let backfill_start = time::Instant::now(); - trace!(target: "engine", + // Catching up state, note that we don't really store block state for intermediary blocks, + // for speed. + let backfill_start = time::Instant::now(); + trace!(target: "engine", "Back-filling block state. last_checkpoint_number: {}, target: {}({}).", last_checkpoint_number, header.number(), header.hash()); - let mut chain: &mut VecDeque
= &mut VecDeque::with_capacity( - (header.number() - last_checkpoint_number + 1) as usize); + let mut chain: &mut VecDeque
= &mut VecDeque::with_capacity( + (header.number() - last_checkpoint_number + 1) as usize, + ); - // Put ourselves in. - chain.push_front(header.clone()); + // Put ourselves in. + chain.push_front(header.clone()); - // populate chain to last checkpoint - loop { - let (last_parent_hash, last_num) = { - let l = chain.front().expect("chain has at least one element; qed"); - (*l.parent_hash(), l.number()) - }; + // populate chain to last checkpoint + loop { + let (last_parent_hash, last_num) = { + let l = chain.front().expect("chain has at least one element; qed"); + (*l.parent_hash(), l.number()) + }; - if last_num == last_checkpoint_number + 1 { - break; - } - match c.block_header(BlockId::Hash(last_parent_hash)) { - None => { - return Err(BlockError::UnknownParent(last_parent_hash))?; - } - Some(next) => { - chain.push_front(next.decode()?); - } - } - } + if last_num == last_checkpoint_number + 1 { + break; + } + match c.block_header(BlockId::Hash(last_parent_hash)) { + None => { + return Err(BlockError::UnknownParent(last_parent_hash))?; + } + Some(next) => { + chain.push_front(next.decode()?); + } + } + } - // Get the state for last checkpoint. - let last_checkpoint_hash = *chain.front() - .expect("chain has at least one element; qed") - .parent_hash(); + // Get the state for last checkpoint. + let last_checkpoint_hash = *chain + .front() + .expect("chain has at least one element; qed") + .parent_hash(); - let last_checkpoint_header = match c.block_header(BlockId::Hash(last_checkpoint_hash)) { - None => return Err(EngineError::CliqueMissingCheckpoint(last_checkpoint_hash))?, - Some(header) => header.decode()?, - }; + let last_checkpoint_header = + match c.block_header(BlockId::Hash(last_checkpoint_hash)) { + None => { + return Err(EngineError::CliqueMissingCheckpoint(last_checkpoint_hash))? + } + Some(header) => header.decode()?, + }; - let last_checkpoint_state = match block_state_by_hash.get_mut(&last_checkpoint_hash) { - Some(state) => state.clone(), - None => self.new_checkpoint_state(&last_checkpoint_header)?, - }; + let last_checkpoint_state = match block_state_by_hash.get_mut(&last_checkpoint_hash) + { + Some(state) => state.clone(), + None => self.new_checkpoint_state(&last_checkpoint_header)?, + }; - block_state_by_hash.insert(last_checkpoint_header.hash(), last_checkpoint_state.clone()); + block_state_by_hash + .insert(last_checkpoint_header.hash(), last_checkpoint_state.clone()); - // Backfill! - let mut new_state = last_checkpoint_state.clone(); - for item in chain { - new_state.apply(item, false)?; - } - new_state.calc_next_timestamp(header.timestamp(), self.period)?; - block_state_by_hash.insert(header.hash(), new_state.clone()); + // Backfill! + let mut new_state = last_checkpoint_state.clone(); + for item in chain { + new_state.apply(item, false)?; + } + new_state.calc_next_timestamp(header.timestamp(), self.period)?; + block_state_by_hash.insert(header.hash(), new_state.clone()); - let elapsed = backfill_start.elapsed(); - trace!(target: "engine", "Back-filling succeed, took {} ms.", elapsed.as_millis()); - Ok(new_state) - } - } - } + let elapsed = backfill_start.elapsed(); + trace!(target: "engine", "Back-filling succeed, took {} ms.", elapsed.as_millis()); + Ok(new_state) + } + } + } } impl Engine for Clique { - fn name(&self) -> &str { "Clique" } + fn name(&self) -> &str { + "Clique" + } - fn machine(&self) -> &EthereumMachine { &self.machine } + fn machine(&self) -> &EthereumMachine { + &self.machine + } - // Clique use same fields, nonce + mixHash - fn seal_fields(&self, _header: &Header) -> usize { 2 } + // Clique use same fields, nonce + mixHash + fn seal_fields(&self, _header: &Header) -> usize { + 2 + } - fn maximum_uncle_count(&self, _block: BlockNumber) -> usize { 0 } + fn maximum_uncle_count(&self, _block: BlockNumber) -> usize { + 0 + } - fn on_new_block( - &self, - _block: &mut ExecutedBlock, - _epoch_begin: bool, - _ancestry: &mut Iterator, - ) -> Result<(), Error> { - Ok(()) - } + fn on_new_block( + &self, + _block: &mut ExecutedBlock, + _epoch_begin: bool, + _ancestry: &mut Iterator, + ) -> Result<(), Error> { + Ok(()) + } - // Clique has no block reward. - fn on_close_block(&self, _block: &mut ExecutedBlock) -> Result<(), Error> { - Ok(()) - } + // Clique has no block reward. + fn on_close_block(&self, _block: &mut ExecutedBlock) -> Result<(), Error> { + Ok(()) + } - fn on_seal_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> { - trace!(target: "engine", "on_seal_block"); + fn on_seal_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> { + trace!(target: "engine", "on_seal_block"); - let header = &mut block.header; + let header = &mut block.header; - let state = self.state_no_backfill(header.parent_hash()) - .ok_or_else(|| BlockError::UnknownParent(*header.parent_hash()))?; + let state = self + .state_no_backfill(header.parent_hash()) + .ok_or_else(|| BlockError::UnknownParent(*header.parent_hash()))?; - let is_checkpoint = header.number() % self.epoch_length == 0; + let is_checkpoint = header.number() % self.epoch_length == 0; - header.set_author(NULL_AUTHOR); + header.set_author(NULL_AUTHOR); - // Cast a random Vote if not checkpoint - if !is_checkpoint { - // TODO(niklasad1): this will always be false because `proposals` is never written to - let votes = self.proposals.read().iter() - .filter(|(address, vote_type)| state.is_valid_vote(*address, **vote_type)) - .map(|(address, vote_type)| (*address, *vote_type)) - .collect_vec(); + // Cast a random Vote if not checkpoint + if !is_checkpoint { + // TODO(niklasad1): this will always be false because `proposals` is never written to + let votes = self + .proposals + .read() + .iter() + .filter(|(address, vote_type)| state.is_valid_vote(*address, **vote_type)) + .map(|(address, vote_type)| (*address, *vote_type)) + .collect_vec(); - if !votes.is_empty() { - // Pick a random vote. - let random_vote = rand::thread_rng().gen_range(0 as usize, votes.len()); - let (beneficiary, vote_type) = votes[random_vote]; + if !votes.is_empty() { + // Pick a random vote. + let random_vote = rand::thread_rng().gen_range(0 as usize, votes.len()); + let (beneficiary, vote_type) = votes[random_vote]; - trace!(target: "engine", "Casting vote: beneficiary {}, type {:?} ", beneficiary, vote_type); + trace!(target: "engine", "Casting vote: beneficiary {}, type {:?} ", beneficiary, vote_type); - header.set_author(beneficiary); - header.set_seal(vote_type.as_rlp()); - } - } + header.set_author(beneficiary); + header.set_seal(vote_type.as_rlp()); + } + } - // Work on clique seal. + // Work on clique seal. - let mut seal: Vec = Vec::with_capacity(VANITY_LENGTH + SIGNATURE_LENGTH); + let mut seal: Vec = Vec::with_capacity(VANITY_LENGTH + SIGNATURE_LENGTH); - // At this point, extra_data should only contain miner vanity. - if header.extra_data().len() != VANITY_LENGTH { - Err(BlockError::ExtraDataOutOfBounds(OutOfBounds { - min: Some(VANITY_LENGTH), - max: Some(VANITY_LENGTH), - found: header.extra_data().len() - }))?; - } - // vanity - { - seal.extend_from_slice(&header.extra_data()[0..VANITY_LENGTH]); - } + // At this point, extra_data should only contain miner vanity. + if header.extra_data().len() != VANITY_LENGTH { + Err(BlockError::ExtraDataOutOfBounds(OutOfBounds { + min: Some(VANITY_LENGTH), + max: Some(VANITY_LENGTH), + found: header.extra_data().len(), + }))?; + } + // vanity + { + seal.extend_from_slice(&header.extra_data()[0..VANITY_LENGTH]); + } - // If we are building an checkpoint block, add all signers now. - if is_checkpoint { - seal.reserve(state.signers().len() * 20); - state.signers().iter().foreach(|addr| { - seal.extend_from_slice(&addr[..]); - }); - } + // If we are building an checkpoint block, add all signers now. + if is_checkpoint { + seal.reserve(state.signers().len() * 20); + state.signers().iter().foreach(|addr| { + seal.extend_from_slice(&addr[..]); + }); + } - header.set_extra_data(seal.clone()); + header.set_extra_data(seal.clone()); - // append signature onto extra_data - let (sig, _msg) = self.sign_header(&header)?; - seal.extend_from_slice(&sig[..]); - header.set_extra_data(seal.clone()); + // append signature onto extra_data + let (sig, _msg) = self.sign_header(&header)?; + seal.extend_from_slice(&sig[..]); + header.set_extra_data(seal.clone()); - header.compute_hash(); + header.compute_hash(); - // locally sealed block don't go through valid_block_family(), so we have to record state here. - let mut new_state = state.clone(); - new_state.apply(&header, is_checkpoint)?; - new_state.calc_next_timestamp(header.timestamp(), self.period)?; - self.block_state_by_hash.write().insert(header.hash(), new_state); + // locally sealed block don't go through valid_block_family(), so we have to record state here. + let mut new_state = state.clone(); + new_state.apply(&header, is_checkpoint)?; + new_state.calc_next_timestamp(header.timestamp(), self.period)?; + self.block_state_by_hash + .write() + .insert(header.hash(), new_state); - trace!(target: "engine", "on_seal_block: finished, final header: {:?}", header); + trace!(target: "engine", "on_seal_block: finished, final header: {:?}", header); - Ok(()) - } + Ok(()) + } - /// Clique doesn't require external work to seal, so we always return true here. - fn seals_internally(&self) -> Option { - Some(true) - } + /// Clique doesn't require external work to seal, so we always return true here. + fn seals_internally(&self) -> Option { + Some(true) + } - /// Returns if we are ready to seal, the real sealing (signing extra_data) is actually done in `on_seal_block()`. - fn generate_seal(&self, block: &ExecutedBlock, parent: &Header) -> Seal { - trace!(target: "engine", "tried to generate_seal"); - let null_seal = util::null_seal(); + /// Returns if we are ready to seal, the real sealing (signing extra_data) is actually done in `on_seal_block()`. + fn generate_seal(&self, block: &ExecutedBlock, parent: &Header) -> Seal { + trace!(target: "engine", "tried to generate_seal"); + let null_seal = util::null_seal(); - if block.header.number() == 0 { - trace!(target: "engine", "attempted to seal genesis block"); - return Seal::None; - } + if block.header.number() == 0 { + trace!(target: "engine", "attempted to seal genesis block"); + return Seal::None; + } - // if sealing period is 0, and not an checkpoint block, refuse to seal - if self.period == 0 { - if block.transactions.is_empty() && block.header.number() % self.epoch_length != 0 { - return Seal::None; - } - return Seal::Regular(null_seal); - } + // if sealing period is 0, and not an checkpoint block, refuse to seal + if self.period == 0 { + if block.transactions.is_empty() && block.header.number() % self.epoch_length != 0 { + return Seal::None; + } + return Seal::Regular(null_seal); + } - // Check we actually have authority to seal. - if let Some(author) = self.signer.read().as_ref().map(|x| x.address()) { - - // ensure the voting state exists - match self.state(&parent) { - Err(e) => { - warn!(target: "engine", "generate_seal: can't get parent state(number: {}, hash: {}): {} ", + // Check we actually have authority to seal. + if let Some(author) = self.signer.read().as_ref().map(|x| x.address()) { + // ensure the voting state exists + match self.state(&parent) { + Err(e) => { + warn!(target: "engine", "generate_seal: can't get parent state(number: {}, hash: {}): {} ", parent.number(), parent.hash(), e); - return Seal::None; - } - Ok(state) => { - // Are we authorized to seal? - if !state.is_authorized(&author) { - trace!(target: "engine", "generate_seal: Not authorized to sign right now."); - // wait for one third of period to try again. - thread::sleep(Duration::from_secs(self.period / 3 + 1)); - return Seal::None; - } + return Seal::None; + } + Ok(state) => { + // Are we authorized to seal? + if !state.is_authorized(&author) { + trace!(target: "engine", "generate_seal: Not authorized to sign right now."); + // wait for one third of period to try again. + thread::sleep(Duration::from_secs(self.period / 3 + 1)); + return Seal::None; + } - let inturn = state.is_inturn(block.header.number(), &author); + let inturn = state.is_inturn(block.header.number(), &author); - let now = SystemTime::now(); + let now = SystemTime::now(); - let limit = match inturn { - true => state.next_timestamp_inturn.unwrap_or(now), - false => state.next_timestamp_noturn.unwrap_or(now), - }; + let limit = match inturn { + true => state.next_timestamp_inturn.unwrap_or(now), + false => state.next_timestamp_noturn.unwrap_or(now), + }; - // Wait for the right moment. - if now < limit { - trace!(target: "engine", + // Wait for the right moment. + if now < limit { + trace!(target: "engine", "generate_seal: sleeping to sign: inturn: {}, now: {:?}, to: {:?}.", inturn, now, limit); - match limit.duration_since(SystemTime::now()) { - Ok(duration) => { - thread::sleep(duration); - }, - Err(e) => { - warn!(target:"engine", "generate_seal: unable to sleep, err: {}", e); - return Seal::None; - } - } - } + match limit.duration_since(SystemTime::now()) { + Ok(duration) => { + thread::sleep(duration); + } + Err(e) => { + warn!(target:"engine", "generate_seal: unable to sleep, err: {}", e); + return Seal::None; + } + } + } - trace!(target: "engine", "generate_seal: seal ready for block {}, txs: {}.", + trace!(target: "engine", "generate_seal: seal ready for block {}, txs: {}.", block.header.number(), block.transactions.len()); - return Seal::Regular(null_seal); - } - } - } - Seal::None - } + return Seal::Regular(null_seal); + } + } + } + Seal::None + } - fn verify_local_seal(&self, _header: &Header) -> Result<(), Error> { Ok(()) } + fn verify_local_seal(&self, _header: &Header) -> Result<(), Error> { + Ok(()) + } - fn verify_block_basic(&self, header: &Header) -> Result<(), Error> { - // Largely same as https://github.com/ethereum/go-ethereum/blob/master/consensus/clique/clique.go#L275 + fn verify_block_basic(&self, header: &Header) -> Result<(), Error> { + // Largely same as https://github.com/ethereum/go-ethereum/blob/master/consensus/clique/clique.go#L275 - // Ignore genesis block. - if header.number() == 0 { - return Ok(()); - } + // Ignore genesis block. + if header.number() == 0 { + return Ok(()); + } - // Don't waste time checking blocks from the future - { - let limit = CheckedSystemTime::checked_add(SystemTime::now(), Duration::from_secs(self.period)) - .ok_or(BlockError::TimestampOverflow)?; + // Don't waste time checking blocks from the future + { + let limit = + CheckedSystemTime::checked_add(SystemTime::now(), Duration::from_secs(self.period)) + .ok_or(BlockError::TimestampOverflow)?; - // This should succeed under the contraints that the system clock works - let limit_as_dur = limit.duration_since(UNIX_EPOCH).map_err(|e| { - Box::new(format!("Converting SystemTime to Duration failed: {}", e)) - })?; + // This should succeed under the contraints that the system clock works + let limit_as_dur = limit.duration_since(UNIX_EPOCH).map_err(|e| { + Box::new(format!("Converting SystemTime to Duration failed: {}", e)) + })?; - let hdr = Duration::from_secs(header.timestamp()); - if hdr > limit_as_dur { - let found = CheckedSystemTime::checked_add(UNIX_EPOCH, hdr).ok_or(BlockError::TimestampOverflow)?; + let hdr = Duration::from_secs(header.timestamp()); + if hdr > limit_as_dur { + let found = CheckedSystemTime::checked_add(UNIX_EPOCH, hdr) + .ok_or(BlockError::TimestampOverflow)?; - Err(BlockError::TemporarilyInvalid(OutOfBounds { - min: None, - max: Some(limit), - found, - }))? - } - } + Err(BlockError::TemporarilyInvalid(OutOfBounds { + min: None, + max: Some(limit), + found, + }))? + } + } - let is_checkpoint = header.number() % self.epoch_length == 0; + let is_checkpoint = header.number() % self.epoch_length == 0; - if is_checkpoint && *header.author() != NULL_AUTHOR { - return Err(EngineError::CliqueWrongAuthorCheckpoint(Mismatch { - expected: 0.into(), - found: *header.author(), - }))?; - } + if is_checkpoint && *header.author() != NULL_AUTHOR { + return Err(EngineError::CliqueWrongAuthorCheckpoint(Mismatch { + expected: 0.into(), + found: *header.author(), + }))?; + } - let seal_fields = header.decode_seal::>()?; - if seal_fields.len() != 2 { - Err(BlockError::InvalidSealArity(Mismatch { - expected: 2, - found: seal_fields.len(), - }))? - } + let seal_fields = header.decode_seal::>()?; + if seal_fields.len() != 2 { + Err(BlockError::InvalidSealArity(Mismatch { + expected: 2, + found: seal_fields.len(), + }))? + } - let mixhash: H256 = seal_fields[0].into(); - let nonce: H64 = seal_fields[1].into(); + let mixhash: H256 = seal_fields[0].into(); + let nonce: H64 = seal_fields[1].into(); - // Nonce must be 0x00..0 or 0xff..f - if nonce != NONCE_DROP_VOTE && nonce != NONCE_AUTH_VOTE { - Err(EngineError::CliqueInvalidNonce(nonce))?; - } + // Nonce must be 0x00..0 or 0xff..f + if nonce != NONCE_DROP_VOTE && nonce != NONCE_AUTH_VOTE { + Err(EngineError::CliqueInvalidNonce(nonce))?; + } - if is_checkpoint && nonce != NULL_NONCE { - Err(EngineError::CliqueInvalidNonce(nonce))?; - } + if is_checkpoint && nonce != NULL_NONCE { + Err(EngineError::CliqueInvalidNonce(nonce))?; + } - // Ensure that the mix digest is zero as Clique don't have fork protection currently - if mixhash != NULL_MIXHASH { - Err(BlockError::MismatchedH256SealElement(Mismatch { - expected: NULL_MIXHASH, - found: mixhash, - }))? - } + // Ensure that the mix digest is zero as Clique don't have fork protection currently + if mixhash != NULL_MIXHASH { + Err(BlockError::MismatchedH256SealElement(Mismatch { + expected: NULL_MIXHASH, + found: mixhash, + }))? + } - let extra_data_len = header.extra_data().len(); + let extra_data_len = header.extra_data().len(); - if extra_data_len < VANITY_LENGTH { - Err(EngineError::CliqueMissingVanity)? - } + if extra_data_len < VANITY_LENGTH { + Err(EngineError::CliqueMissingVanity)? + } - if extra_data_len < VANITY_LENGTH + SIGNATURE_LENGTH { - Err(EngineError::CliqueMissingSignature)? - } + if extra_data_len < VANITY_LENGTH + SIGNATURE_LENGTH { + Err(EngineError::CliqueMissingSignature)? + } - let signers = extra_data_len - (VANITY_LENGTH + SIGNATURE_LENGTH); + let signers = extra_data_len - (VANITY_LENGTH + SIGNATURE_LENGTH); - // Checkpoint blocks must at least contain one signer - if is_checkpoint && signers == 0 { - Err(EngineError::CliqueCheckpointNoSigner)? - } + // Checkpoint blocks must at least contain one signer + if is_checkpoint && signers == 0 { + Err(EngineError::CliqueCheckpointNoSigner)? + } - // Addresses must be be divisable by 20 - if is_checkpoint && signers % ADDRESS_LENGTH != 0 { - Err(EngineError::CliqueCheckpointInvalidSigners(signers))? - } + // Addresses must be be divisable by 20 + if is_checkpoint && signers % ADDRESS_LENGTH != 0 { + Err(EngineError::CliqueCheckpointInvalidSigners(signers))? + } - // Ensure that the block doesn't contain any uncles which are meaningless in PoA - if *header.uncles_hash() != NULL_UNCLES_HASH { - Err(BlockError::InvalidUnclesHash(Mismatch { - expected: NULL_UNCLES_HASH, - found: *header.uncles_hash(), - }))? - } + // Ensure that the block doesn't contain any uncles which are meaningless in PoA + if *header.uncles_hash() != NULL_UNCLES_HASH { + Err(BlockError::InvalidUnclesHash(Mismatch { + expected: NULL_UNCLES_HASH, + found: *header.uncles_hash(), + }))? + } - // Ensure that the block's difficulty is meaningful (may not be correct at this point) - if *header.difficulty() != DIFF_INTURN && *header.difficulty() != DIFF_NOTURN { - Err(BlockError::DifficultyOutOfBounds(OutOfBounds { - min: Some(DIFF_NOTURN), - max: Some(DIFF_INTURN), - found: *header.difficulty(), - }))? - } + // Ensure that the block's difficulty is meaningful (may not be correct at this point) + if *header.difficulty() != DIFF_INTURN && *header.difficulty() != DIFF_NOTURN { + Err(BlockError::DifficultyOutOfBounds(OutOfBounds { + min: Some(DIFF_NOTURN), + max: Some(DIFF_INTURN), + found: *header.difficulty(), + }))? + } - // All basic checks passed, continue to next phase - Ok(()) - } + // All basic checks passed, continue to next phase + Ok(()) + } - fn verify_block_unordered(&self, _header: &Header) -> Result<(), Error> { - // Nothing to check here. - Ok(()) - } + fn verify_block_unordered(&self, _header: &Header) -> Result<(), Error> { + // Nothing to check here. + Ok(()) + } - /// Verify block family by looking up parent state (backfill if needed), then try to apply current header. - /// see https://github.com/ethereum/go-ethereum/blob/master/consensus/clique/clique.go#L338 - fn verify_block_family(&self, header: &Header, parent: &Header) -> Result<(), Error> { - // Ignore genesis block. - if header.number() == 0 { - return Ok(()); - } + /// Verify block family by looking up parent state (backfill if needed), then try to apply current header. + /// see https://github.com/ethereum/go-ethereum/blob/master/consensus/clique/clique.go#L338 + fn verify_block_family(&self, header: &Header, parent: &Header) -> Result<(), Error> { + // Ignore genesis block. + if header.number() == 0 { + return Ok(()); + } - // parent sanity check - if parent.hash() != *header.parent_hash() || header.number() != parent.number() + 1 { - Err(BlockError::UnknownParent(parent.hash()))? - } + // parent sanity check + if parent.hash() != *header.parent_hash() || header.number() != parent.number() + 1 { + Err(BlockError::UnknownParent(parent.hash()))? + } - // Ensure that the block's timestamp isn't too close to it's parent - let limit = parent.timestamp().saturating_add(self.period); - if limit > header.timestamp() { - let max = CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(header.timestamp())); - let found = CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(limit)) - .ok_or(BlockError::TimestampOverflow)?; + // Ensure that the block's timestamp isn't too close to it's parent + let limit = parent.timestamp().saturating_add(self.period); + if limit > header.timestamp() { + let max = + CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(header.timestamp())); + let found = CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(limit)) + .ok_or(BlockError::TimestampOverflow)?; - Err(BlockError::InvalidTimestamp(OutOfBounds { - min: None, - max, - found, - }))? - } + Err(BlockError::InvalidTimestamp(OutOfBounds { + min: None, + max, + found, + }))? + } - // Retrieve the parent state - let parent_state = self.state(&parent)?; - // Try to apply current state, apply() will further check signer and recent signer. - let mut new_state = parent_state.clone(); - new_state.apply(header, header.number() % self.epoch_length == 0)?; - new_state.calc_next_timestamp(header.timestamp(), self.period)?; - self.block_state_by_hash.write().insert(header.hash(), new_state); + // Retrieve the parent state + let parent_state = self.state(&parent)?; + // Try to apply current state, apply() will further check signer and recent signer. + let mut new_state = parent_state.clone(); + new_state.apply(header, header.number() % self.epoch_length == 0)?; + new_state.calc_next_timestamp(header.timestamp(), self.period)?; + self.block_state_by_hash + .write() + .insert(header.hash(), new_state); - Ok(()) - } + Ok(()) + } - fn genesis_epoch_data(&self, header: &Header, _call: &Call) -> Result, String> { - let mut state = self.new_checkpoint_state(header).expect("Unable to parse genesis data."); - state.calc_next_timestamp(header.timestamp(), self.period).map_err(|e| format!("{}", e))?; - self.block_state_by_hash.write().insert(header.hash(), state); + fn genesis_epoch_data(&self, header: &Header, _call: &Call) -> Result, String> { + let mut state = self + .new_checkpoint_state(header) + .expect("Unable to parse genesis data."); + state + .calc_next_timestamp(header.timestamp(), self.period) + .map_err(|e| format!("{}", e))?; + self.block_state_by_hash + .write() + .insert(header.hash(), state); - // no proof. - Ok(Vec::new()) - } + // no proof. + Ok(Vec::new()) + } - // Our task here is to set difficulty - fn populate_from_parent(&self, header: &mut Header, parent: &Header) { - // TODO(https://github.com/paritytech/parity-ethereum/issues/10410): this is a horrible hack, - // it is due to the fact that enact and miner both use OpenBlock::new() which will both call - // this function. more refactoring is definitely needed. - if header.extra_data().len() < VANITY_LENGTH + SIGNATURE_LENGTH { - trace!(target: "engine", "populate_from_parent in sealing"); + // Our task here is to set difficulty + fn populate_from_parent(&self, header: &mut Header, parent: &Header) { + // TODO(https://github.com/paritytech/parity-ethereum/issues/10410): this is a horrible hack, + // it is due to the fact that enact and miner both use OpenBlock::new() which will both call + // this function. more refactoring is definitely needed. + if header.extra_data().len() < VANITY_LENGTH + SIGNATURE_LENGTH { + trace!(target: "engine", "populate_from_parent in sealing"); - // It's unclear how to prevent creating new blocks unless we are authorized, the best way (and geth does this too) - // it's just to ignore setting a correct difficulty here, we will check authorization in next step in generate_seal anyway. - if let Some(signer) = self.signer.read().as_ref() { - let state = match self.state(&parent) { - Err(e) => { - trace!(target: "engine", "populate_from_parent: Unable to find parent state: {}, ignored.", e); - return; - } - Ok(state) => state, - }; + // It's unclear how to prevent creating new blocks unless we are authorized, the best way (and geth does this too) + // it's just to ignore setting a correct difficulty here, we will check authorization in next step in generate_seal anyway. + if let Some(signer) = self.signer.read().as_ref() { + let state = match self.state(&parent) { + Err(e) => { + trace!(target: "engine", "populate_from_parent: Unable to find parent state: {}, ignored.", e); + return; + } + Ok(state) => state, + }; - if state.is_authorized(&signer.address()) { - if state.is_inturn(header.number(), &signer.address()) { - header.set_difficulty(DIFF_INTURN); - } else { - header.set_difficulty(DIFF_NOTURN); - } - } + if state.is_authorized(&signer.address()) { + if state.is_inturn(header.number(), &signer.address()) { + header.set_difficulty(DIFF_INTURN); + } else { + header.set_difficulty(DIFF_NOTURN); + } + } - let zero_padding_len = VANITY_LENGTH - header.extra_data().len(); - if zero_padding_len > 0 { - let mut resized_extra_data = header.extra_data().clone(); - resized_extra_data.resize(VANITY_LENGTH, 0); - header.set_extra_data(resized_extra_data); - } - } else { - trace!(target: "engine", "populate_from_parent: no signer registered"); - } - } - } + let zero_padding_len = VANITY_LENGTH - header.extra_data().len(); + if zero_padding_len > 0 { + let mut resized_extra_data = header.extra_data().clone(); + resized_extra_data.resize(VANITY_LENGTH, 0); + header.set_extra_data(resized_extra_data); + } + } else { + trace!(target: "engine", "populate_from_parent: no signer registered"); + } + } + } - fn set_signer(&self, signer: Box) { - trace!(target: "engine", "set_signer: {}", signer.address()); - *self.signer.write() = Some(signer); - } + fn set_signer(&self, signer: Box) { + trace!(target: "engine", "set_signer: {}", signer.address()); + *self.signer.write() = Some(signer); + } - fn register_client(&self, client: Weak) { - *self.client.write() = Some(client.clone()); - } + fn register_client(&self, client: Weak) { + *self.client.write() = Some(client.clone()); + } - fn step(&self) { - if self.signer.read().is_some() { - if let Some(ref weak) = *self.client.read() { - if let Some(c) = weak.upgrade() { - c.update_sealing(ForceUpdateSealing::No); - } - } - } - } + fn step(&self) { + if self.signer.read().is_some() { + if let Some(ref weak) = *self.client.read() { + if let Some(c) = weak.upgrade() { + c.update_sealing(ForceUpdateSealing::No); + } + } + } + } - /// Clique timestamp is set to parent + period , or current time which ever is higher. - fn open_block_header_timestamp(&self, parent_timestamp: u64) -> u64 { - let now = time::SystemTime::now().duration_since(time::UNIX_EPOCH).unwrap_or_default(); - cmp::max(now.as_secs() as u64, parent_timestamp.saturating_add(self.period)) - } + /// Clique timestamp is set to parent + period , or current time which ever is higher. + fn open_block_header_timestamp(&self, parent_timestamp: u64) -> u64 { + let now = time::SystemTime::now() + .duration_since(time::UNIX_EPOCH) + .unwrap_or_default(); + cmp::max( + now.as_secs() as u64, + parent_timestamp.saturating_add(self.period), + ) + } - fn is_timestamp_valid(&self, header_timestamp: u64, parent_timestamp: u64) -> bool { - header_timestamp >= parent_timestamp.saturating_add(self.period) - } + fn is_timestamp_valid(&self, header_timestamp: u64, parent_timestamp: u64) -> bool { + header_timestamp >= parent_timestamp.saturating_add(self.period) + } - fn fork_choice(&self, new: &ExtendedHeader, current: &ExtendedHeader) -> super::ForkChoice { - super::total_difficulty_fork_choice(new, current) - } + fn fork_choice(&self, new: &ExtendedHeader, current: &ExtendedHeader) -> super::ForkChoice { + super::total_difficulty_fork_choice(new, current) + } - // Clique uses the author field for voting, the real author is hidden in the `extra_data` field. - // So when executing tx's (like in `enact()`) we want to use the executive author - fn executive_author(&self, header: &Header) -> Result { - recover_creator(header) - } + // Clique uses the author field for voting, the real author is hidden in the `extra_data` field. + // So when executing tx's (like in `enact()`) we want to use the executive author + fn executive_author(&self, header: &Header) -> Result { + recover_creator(header) + } } diff --git a/ethcore/src/engines/clique/params.rs b/ethcore/src/engines/clique/params.rs index e24edfcba..500715734 100644 --- a/ethcore/src/engines/clique/params.rs +++ b/ethcore/src/engines/clique/params.rs @@ -20,22 +20,19 @@ use ethjson; /// `Clique` params. pub struct CliqueParams { - /// Period as defined in EIP - pub period: u64, - /// Epoch length as defined in EIP - pub epoch: u64, + /// Period as defined in EIP + pub period: u64, + /// Epoch length as defined in EIP + pub epoch: u64, } impl From for CliqueParams { - fn from(p: ethjson::spec::CliqueParams) -> Self { - let period = p.period.map_or_else(|| 30000 as u64, Into::into); - let epoch = p.epoch.map_or_else(|| 15 as u64, Into::into); + fn from(p: ethjson::spec::CliqueParams) -> Self { + let period = p.period.map_or_else(|| 30000 as u64, Into::into); + let epoch = p.epoch.map_or_else(|| 15 as u64, Into::into); - assert!(epoch > 0); + assert!(epoch > 0); - CliqueParams { - period, - epoch, - } - } + CliqueParams { period, epoch } + } } diff --git a/ethcore/src/engines/clique/tests.rs b/ethcore/src/engines/clique/tests.rs index c7916192d..2f5816a08 100644 --- a/ethcore/src/engines/clique/tests.rs +++ b/ethcore/src/engines/clique/tests.rs @@ -16,789 +16,1321 @@ //! Consensus tests for `PoA Clique Engine`, see http://eips.ethereum.org/EIPS/eip-225 for more information +use super::*; use block::*; use engines::Engine; use error::{Error, ErrorKind}; use ethereum_types::{Address, H256}; -use ethkey::{Secret, KeyPair}; +use ethkey::{KeyPair, Secret}; use state_db::StateDB; -use super::*; use test_helpers::get_temp_state_db; -use std::sync::Arc; -use std::collections::HashMap; +use std::{collections::HashMap, sync::Arc}; /// Possible signers pub const SIGNER_TAGS: [char; 6] = ['A', 'B', 'C', 'D', 'E', 'F']; /// Clique block types pub enum CliqueBlockType { - /// Epoch transition block must contain list of signers - Checkpoint, - /// Block with no votes - Empty, - /// Vote - Vote(VoteType), + /// Epoch transition block must contain list of signers + Checkpoint, + /// Block with no votes + Empty, + /// Vote + Vote(VoteType), } /// Clique tester pub struct CliqueTester { - /// Mocked Clique - pub clique: Clique, - /// Mocked genesis state - pub genesis: Header, - /// StateDB - pub db: StateDB, - /// List of signers - pub signers: HashMap, + /// Mocked Clique + pub clique: Clique, + /// Mocked genesis state + pub genesis: Header, + /// StateDB + pub db: StateDB, + /// List of signers + pub signers: HashMap, } impl CliqueTester { - /// Create a `Clique` tester with settings - pub fn with(epoch: u64, period: u64, initial_signers: Vec) -> Self { - assert_eq!(initial_signers.iter().all(|s| SIGNER_TAGS.contains(s)), true, - "Not all the initial signers is in SIGNER_TAGS, possible keys are 'A' ..= 'F'"); + /// Create a `Clique` tester with settings + pub fn with(epoch: u64, period: u64, initial_signers: Vec) -> Self { + assert_eq!( + initial_signers.iter().all(|s| SIGNER_TAGS.contains(s)), + true, + "Not all the initial signers is in SIGNER_TAGS, possible keys are 'A' ..= 'F'" + ); - let clique = Clique::with_test(epoch, period); - let mut genesis = Header::default(); - let mut signers = HashMap::new(); + let clique = Clique::with_test(epoch, period); + let mut genesis = Header::default(); + let mut signers = HashMap::new(); - let call = |_a, _b| { - unimplemented!("Clique doesn't use Engine::Call"); - }; + let call = |_a, _b| { + unimplemented!("Clique doesn't use Engine::Call"); + }; - let mut extra_data = vec![0; VANITY_LENGTH]; + let mut extra_data = vec![0; VANITY_LENGTH]; - for &signer in SIGNER_TAGS.iter() { - let secret = Secret::from(H256::from(signer as u64)); - let keypair = KeyPair::from_secret(secret).unwrap(); - if initial_signers.contains(&signer) { - extra_data.extend(&*keypair.address()); - } - signers.insert(signer, keypair); - } + for &signer in SIGNER_TAGS.iter() { + let secret = Secret::from(H256::from(signer as u64)); + let keypair = KeyPair::from_secret(secret).unwrap(); + if initial_signers.contains(&signer) { + extra_data.extend(&*keypair.address()); + } + signers.insert(signer, keypair); + } - // append dummy signature - extra_data.extend(std::iter::repeat(0).take(SIGNATURE_LENGTH)); + // append dummy signature + extra_data.extend(std::iter::repeat(0).take(SIGNATURE_LENGTH)); - genesis.set_extra_data(extra_data); - genesis.set_gas_limit(U256::from(0xa00000)); - genesis.set_difficulty(U256::from(1)); - genesis.set_seal(util::null_seal()); + genesis.set_extra_data(extra_data); + genesis.set_gas_limit(U256::from(0xa00000)); + genesis.set_difficulty(U256::from(1)); + genesis.set_seal(util::null_seal()); - clique.genesis_epoch_data(&genesis, &call).expect("Create genesis failed"); - Self {clique, genesis, db: get_temp_state_db(), signers} - } + clique + .genesis_epoch_data(&genesis, &call) + .expect("Create genesis failed"); + Self { + clique, + genesis, + db: get_temp_state_db(), + signers, + } + } - /// Get difficulty for a given block - pub fn get_difficulty(&self, block_num: BlockNumber, header: &Header, signer: &Address) -> U256 { - let state = self.clique.state(header).unwrap(); - if state.is_inturn(block_num, signer) { - DIFF_INTURN - } else { - DIFF_NOTURN - } - } + /// Get difficulty for a given block + pub fn get_difficulty( + &self, + block_num: BlockNumber, + header: &Header, + signer: &Address, + ) -> U256 { + let state = self.clique.state(header).unwrap(); + if state.is_inturn(block_num, signer) { + DIFF_INTURN + } else { + DIFF_NOTURN + } + } - /// Get the state of a given block - // Note, this will read the cache and `will` not work with more than 128 blocks - pub fn get_state_at_block(&self, hash: &H256) -> CliqueBlockState { - self.clique.block_state_by_hash.write() - .get_mut(hash) - .expect("CliqueBlockState not found tested failed") - .clone() - } + /// Get the state of a given block + // Note, this will read the cache and `will` not work with more than 128 blocks + pub fn get_state_at_block(&self, hash: &H256) -> CliqueBlockState { + self.clique + .block_state_by_hash + .write() + .get_mut(hash) + .expect("CliqueBlockState not found tested failed") + .clone() + } - /// Get signers after a certain state - // This is generally used to fetch the state after a test has been executed and checked against - // the intial list of signers provided in the test - pub fn clique_signers(&self, hash: &H256) -> impl Iterator { - self.get_state_at_block(hash).signers().clone().into_iter() - } + /// Get signers after a certain state + // This is generally used to fetch the state after a test has been executed and checked against + // the intial list of signers provided in the test + pub fn clique_signers(&self, hash: &H256) -> impl Iterator { + self.get_state_at_block(hash).signers().clone().into_iter() + } - /// Fetches all addresses at current `block` and converts them back to `tags (char)` and sorts them - /// Addresses are supposed sorted based on address but these tests are using `tags` just for simplicity - /// and the order is not important! - pub fn into_tags>(&self, addr: T) -> Vec { - let mut tags: Vec = addr.filter_map(|addr| { - for (t, kp) in self.signers.iter() { - if addr == kp.address() { - return Some(*t) - } - } - None - }) - .collect(); + /// Fetches all addresses at current `block` and converts them back to `tags (char)` and sorts them + /// Addresses are supposed sorted based on address but these tests are using `tags` just for simplicity + /// and the order is not important! + pub fn into_tags>(&self, addr: T) -> Vec { + let mut tags: Vec = addr + .filter_map(|addr| { + for (t, kp) in self.signers.iter() { + if addr == kp.address() { + return Some(*t); + } + } + None + }) + .collect(); - tags.sort(); - tags - } + tags.sort(); + tags + } - /// Create a new `Clique` block and import - pub fn new_block_and_import( - &self, - block_type: CliqueBlockType, - last_header: &Header, - beneficary: Option
, - signer: char, - ) -> Result { + /// Create a new `Clique` block and import + pub fn new_block_and_import( + &self, + block_type: CliqueBlockType, + last_header: &Header, + beneficary: Option
, + signer: char, + ) -> Result { + let mut extra_data = vec![0; VANITY_LENGTH]; + let mut seal = util::null_seal(); + let last_hash = last_header.hash(); - let mut extra_data = vec![0; VANITY_LENGTH]; - let mut seal = util::null_seal(); - let last_hash = last_header.hash(); + match block_type { + CliqueBlockType::Checkpoint => { + let signers = self.clique.state(&last_header).unwrap().signers().clone(); + for signer in signers { + extra_data.extend(&*signer); + } + } + CliqueBlockType::Vote(v) => seal = v.as_rlp(), + CliqueBlockType::Empty => (), + }; - match block_type { - CliqueBlockType::Checkpoint => { - let signers = self.clique.state(&last_header).unwrap().signers().clone(); - for signer in signers { - extra_data.extend(&*signer); - } - } - CliqueBlockType::Vote(v) => seal = v.as_rlp(), - CliqueBlockType::Empty => (), - }; + let db = self.db.boxed_clone(); - let db = self.db.boxed_clone(); + let mut block = OpenBlock::new( + &self.clique, + Default::default(), + false, + db, + &last_header.clone(), + Arc::new(vec![last_hash]), + beneficary.unwrap_or_default(), + (3141562.into(), 31415620.into()), + extra_data, + false, + None, + ) + .unwrap(); - let mut block = OpenBlock::new( - &self.clique, - Default::default(), - false, - db, - &last_header.clone(), - Arc::new(vec![last_hash]), - beneficary.unwrap_or_default(), - (3141562.into(), 31415620.into()), - extra_data, - false, - None, - ).unwrap(); + { + let difficulty = self.get_difficulty( + block.header.number(), + last_header, + &self.signers[&signer].address(), + ); + let b = block.block_mut(); + b.header + .set_timestamp(last_header.timestamp() + self.clique.period); + b.header.set_difficulty(difficulty); + b.header.set_seal(seal); - { - let difficulty = self.get_difficulty(block.header.number(), last_header, &self.signers[&signer].address()); - let b = block.block_mut(); - b.header.set_timestamp(last_header.timestamp() + self.clique.period); - b.header.set_difficulty(difficulty); - b.header.set_seal(seal); + let sign = ethkey::sign(self.signers[&signer].secret(), &b.header.hash()).unwrap(); + let mut extra_data = b.header.extra_data().clone(); + extra_data.extend_from_slice(&*sign); + b.header.set_extra_data(extra_data); + } - let sign = ethkey::sign(self.signers[&signer].secret(), &b.header.hash()).unwrap(); - let mut extra_data = b.header.extra_data().clone(); - extra_data.extend_from_slice(&*sign); - b.header.set_extra_data(extra_data); - } + let current_header = &block.header; + self.clique.verify_block_basic(current_header)?; + self.clique + .verify_block_family(current_header, &last_header)?; - let current_header = &block.header; - self.clique.verify_block_basic(current_header)?; - self.clique.verify_block_family(current_header, &last_header)?; - - Ok(current_header.clone()) - } + Ok(current_header.clone()) + } } #[test] fn one_signer_with_no_votes() { - let tester = CliqueTester::with(10, 1, vec!['A']); + let tester = CliqueTester::with(10, 1, vec!['A']); - let empty_block = tester.new_block_and_import(CliqueBlockType::Empty, &tester.genesis, None, 'A').unwrap(); + let empty_block = tester + .new_block_and_import(CliqueBlockType::Empty, &tester.genesis, None, 'A') + .unwrap(); - let tags = tester.into_tags(tester.clique_signers(&empty_block.hash())); - assert_eq!(&tags, &['A']); + let tags = tester.into_tags(tester.clique_signers(&empty_block.hash())); + assert_eq!(&tags, &['A']); } #[test] fn one_signer_two_votes() { - let tester = CliqueTester::with(10, 1, vec!['A']); + let tester = CliqueTester::with(10, 1, vec!['A']); - // Add a vote for `B` signed by `A` - let vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &tester.genesis, - Some(tester.signers[&'B'].address()), 'A').unwrap(); - let tags = tester.into_tags(tester.clique_signers(&vote.hash())); - assert_eq!(&tags, &['A', 'B']); + // Add a vote for `B` signed by `A` + let vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &tester.genesis, + Some(tester.signers[&'B'].address()), + 'A', + ) + .unwrap(); + let tags = tester.into_tags(tester.clique_signers(&vote.hash())); + assert_eq!(&tags, &['A', 'B']); - // Add a empty block signed by `B` - let empty = tester.new_block_and_import(CliqueBlockType::Empty, &vote, None, 'B').unwrap(); + // Add a empty block signed by `B` + let empty = tester + .new_block_and_import(CliqueBlockType::Empty, &vote, None, 'B') + .unwrap(); - // Add vote for `C` signed by A but should not be accepted - let vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &empty, - Some(tester.signers[&'C'].address()), 'A').unwrap(); + // Add vote for `C` signed by A but should not be accepted + let vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &empty, + Some(tester.signers[&'C'].address()), + 'A', + ) + .unwrap(); - let tags = tester.into_tags(tester.clique_signers(&vote.hash())); - assert_eq!(&tags, &['A', 'B']); + let tags = tester.into_tags(tester.clique_signers(&vote.hash())); + assert_eq!(&tags, &['A', 'B']); } #[test] fn two_signers_six_votes_deny_last() { - let tester = CliqueTester::with(10, 1, vec!['A', 'B']); + let tester = CliqueTester::with(10, 1, vec!['A', 'B']); - let mut prev_header = tester.genesis.clone(); + let mut prev_header = tester.genesis.clone(); - // Add two votes for `C` signed by `A` and `B` - for &signer in SIGNER_TAGS.iter().take(2) { - let vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &prev_header, - Some(tester.signers[&'C'].address()), signer).unwrap(); - prev_header = vote.clone(); - } + // Add two votes for `C` signed by `A` and `B` + for &signer in SIGNER_TAGS.iter().take(2) { + let vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &prev_header, + Some(tester.signers[&'C'].address()), + signer, + ) + .unwrap(); + prev_header = vote.clone(); + } - // Add two votes for `D` signed by `A` and `B` - for &signer in SIGNER_TAGS.iter().take(2) { - let vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &prev_header, - Some(tester.signers[&'D'].address()), signer).unwrap(); - prev_header = vote.clone(); - } + // Add two votes for `D` signed by `A` and `B` + for &signer in SIGNER_TAGS.iter().take(2) { + let vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &prev_header, + Some(tester.signers[&'D'].address()), + signer, + ) + .unwrap(); + prev_header = vote.clone(); + } - // Add a empty block signed by `C` - let empty = tester.new_block_and_import(CliqueBlockType::Empty, &prev_header, None, 'C').unwrap(); - prev_header = empty.clone(); + // Add a empty block signed by `C` + let empty = tester + .new_block_and_import(CliqueBlockType::Empty, &prev_header, None, 'C') + .unwrap(); + prev_header = empty.clone(); - // Add two votes for `E` signed by `A` and `B` - for &signer in SIGNER_TAGS.iter().take(2) { - let vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &prev_header, - Some(tester.signers[&'E'].address()), signer).unwrap(); - prev_header = vote.clone(); - } + // Add two votes for `E` signed by `A` and `B` + for &signer in SIGNER_TAGS.iter().take(2) { + let vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &prev_header, + Some(tester.signers[&'E'].address()), + signer, + ) + .unwrap(); + prev_header = vote.clone(); + } - let tags = tester.into_tags(tester.clique_signers(&prev_header.hash())); - assert_eq!(&tags, &['A', 'B', 'C', 'D']); + let tags = tester.into_tags(tester.clique_signers(&prev_header.hash())); + assert_eq!(&tags, &['A', 'B', 'C', 'D']); } #[test] fn one_signer_dropping_itself() { - let tester = CliqueTester::with(10, 1, vec!['A']); - let vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &tester.genesis, - Some(tester.signers[&'A'].address()), 'A').unwrap(); - let signers = tester.clique_signers(&vote.hash()); - assert!(signers.count() == 0); + let tester = CliqueTester::with(10, 1, vec!['A']); + let vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &tester.genesis, + Some(tester.signers[&'A'].address()), + 'A', + ) + .unwrap(); + let signers = tester.clique_signers(&vote.hash()); + assert!(signers.count() == 0); } #[test] fn two_signers_one_remove_vote_no_consensus() { - let tester = CliqueTester::with(10, 1, vec!['A', 'B']); - let vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &tester.genesis, - Some(tester.signers[&'B'].address()), 'A').unwrap(); + let tester = CliqueTester::with(10, 1, vec!['A', 'B']); + let vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &tester.genesis, + Some(tester.signers[&'B'].address()), + 'A', + ) + .unwrap(); - let tags = tester.into_tags(tester.clique_signers(&vote.hash())); - assert_eq!(&tags, &['A', 'B']); + let tags = tester.into_tags(tester.clique_signers(&vote.hash())); + assert_eq!(&tags, &['A', 'B']); } #[test] fn two_signers_consensus_remove_b() { - let tester = CliqueTester::with(10, 1, vec!['A', 'B']); - let first_vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &tester.genesis, - Some(tester.signers[&'B'].address()), 'A').unwrap(); - let second_vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &first_vote, - Some(tester.signers[&'B'].address()), 'B').unwrap(); + let tester = CliqueTester::with(10, 1, vec!['A', 'B']); + let first_vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &tester.genesis, + Some(tester.signers[&'B'].address()), + 'A', + ) + .unwrap(); + let second_vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &first_vote, + Some(tester.signers[&'B'].address()), + 'B', + ) + .unwrap(); - let tags = tester.into_tags(tester.clique_signers(&second_vote.hash())); - assert_eq!(&tags, &['A']); + let tags = tester.into_tags(tester.clique_signers(&second_vote.hash())); + assert_eq!(&tags, &['A']); } #[test] fn three_signers_consensus_remove_c() { - let tester = CliqueTester::with(10, 1, vec!['A', 'B', 'C']); - let first_vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &tester.genesis, - Some(tester.signers[&'C'].address()), 'A').unwrap(); - let second_vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &first_vote, - Some(tester.signers[&'C'].address()), 'B').unwrap(); + let tester = CliqueTester::with(10, 1, vec!['A', 'B', 'C']); + let first_vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &tester.genesis, + Some(tester.signers[&'C'].address()), + 'A', + ) + .unwrap(); + let second_vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &first_vote, + Some(tester.signers[&'C'].address()), + 'B', + ) + .unwrap(); - let tags = tester.into_tags(tester.clique_signers(&second_vote.hash())); - assert_eq!(&tags, &['A', 'B']); + let tags = tester.into_tags(tester.clique_signers(&second_vote.hash())); + assert_eq!(&tags, &['A', 'B']); } #[test] fn four_signers_half_no_consensus() { - let tester = CliqueTester::with(10, 1, vec!['A', 'B', 'C', 'D']); - let first_vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &tester.genesis, - Some(tester.signers[&'C'].address()), 'A').unwrap(); + let tester = CliqueTester::with(10, 1, vec!['A', 'B', 'C', 'D']); + let first_vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &tester.genesis, + Some(tester.signers[&'C'].address()), + 'A', + ) + .unwrap(); - let second_vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &first_vote, - Some(tester.signers[&'C'].address()), 'B').unwrap(); + let second_vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &first_vote, + Some(tester.signers[&'C'].address()), + 'B', + ) + .unwrap(); - let tags = tester.into_tags(tester.clique_signers(&second_vote.hash())); - assert_eq!(&tags, &['A', 'B', 'C', 'D']); + let tags = tester.into_tags(tester.clique_signers(&second_vote.hash())); + assert_eq!(&tags, &['A', 'B', 'C', 'D']); } #[test] fn four_signers_three_consensus_rm() { - let tester = CliqueTester::with(10, 1, vec!['A', 'B', 'C', 'D']); + let tester = CliqueTester::with(10, 1, vec!['A', 'B', 'C', 'D']); - let mut prev_header = tester.genesis.clone(); + let mut prev_header = tester.genesis.clone(); - // Three votes to remove `D` signed by ['A', 'B', 'C'] - for signer in SIGNER_TAGS.iter().take(3) { - let vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &prev_header, - Some(tester.signers[&'D'].address()), *signer).unwrap(); - prev_header = vote.clone(); - } + // Three votes to remove `D` signed by ['A', 'B', 'C'] + for signer in SIGNER_TAGS.iter().take(3) { + let vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &prev_header, + Some(tester.signers[&'D'].address()), + *signer, + ) + .unwrap(); + prev_header = vote.clone(); + } - let tags = tester.into_tags(tester.clique_signers(&prev_header.hash())); - assert_eq!(&tags, &['A', 'B', 'C']); + let tags = tester.into_tags(tester.clique_signers(&prev_header.hash())); + assert_eq!(&tags, &['A', 'B', 'C']); } #[test] fn vote_add_only_counted_once_per_signer() { - let tester = CliqueTester::with(10, 1, vec!['A', 'B']); + let tester = CliqueTester::with(10, 1, vec!['A', 'B']); - // Add a vote for `C` signed by `A` - let vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &tester.genesis, - Some(tester.signers[&'C'].address()), 'A').unwrap(); - // Empty block signed by B` - let empty = tester.new_block_and_import(CliqueBlockType::Empty, &vote, None, 'B').unwrap(); + // Add a vote for `C` signed by `A` + let vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &tester.genesis, + Some(tester.signers[&'C'].address()), + 'A', + ) + .unwrap(); + // Empty block signed by B` + let empty = tester + .new_block_and_import(CliqueBlockType::Empty, &vote, None, 'B') + .unwrap(); - // Add a vote for `C` signed by `A` - let vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &empty, - Some(tester.signers[&'C'].address()), 'A').unwrap(); - // Empty block signed by `B` - let empty = tester.new_block_and_import(CliqueBlockType::Empty, &vote, None, 'B').unwrap(); + // Add a vote for `C` signed by `A` + let vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &empty, + Some(tester.signers[&'C'].address()), + 'A', + ) + .unwrap(); + // Empty block signed by `B` + let empty = tester + .new_block_and_import(CliqueBlockType::Empty, &vote, None, 'B') + .unwrap(); - // Add a vote for `C` signed by `A` - let vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &empty, - Some(tester.signers[&'C'].address()), 'A').unwrap(); + // Add a vote for `C` signed by `A` + let vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &empty, + Some(tester.signers[&'C'].address()), + 'A', + ) + .unwrap(); - let tags = tester.into_tags(tester.clique_signers(&vote.hash())); - assert_eq!(&tags, &['A', 'B']); + let tags = tester.into_tags(tester.clique_signers(&vote.hash())); + assert_eq!(&tags, &['A', 'B']); } #[test] fn vote_add_concurrently_is_permitted() { - let tester = CliqueTester::with(10, 1, vec!['A', 'B']); + let tester = CliqueTester::with(10, 1, vec!['A', 'B']); - // Add a vote for `C` signed by `A` - let b = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &tester.genesis, - Some(tester.signers[&'C'].address()), 'A').unwrap(); + // Add a vote for `C` signed by `A` + let b = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &tester.genesis, + Some(tester.signers[&'C'].address()), + 'A', + ) + .unwrap(); - // Empty block signed by `B` - let b = tester.new_block_and_import(CliqueBlockType::Empty, &b, None, 'B').unwrap(); + // Empty block signed by `B` + let b = tester + .new_block_and_import(CliqueBlockType::Empty, &b, None, 'B') + .unwrap(); - // Add a vote for `D` signed by `A` - let b = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &b, - Some(tester.signers[&'D'].address()), 'A').unwrap(); + // Add a vote for `D` signed by `A` + let b = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &b, + Some(tester.signers[&'D'].address()), + 'A', + ) + .unwrap(); - // Empty block signed by `B` - let b = tester.new_block_and_import(CliqueBlockType::Empty, &b, None, 'B').unwrap(); + // Empty block signed by `B` + let b = tester + .new_block_and_import(CliqueBlockType::Empty, &b, None, 'B') + .unwrap(); - // Empty block signed by `A` - let b = tester.new_block_and_import(CliqueBlockType::Empty, &b, None, 'A').unwrap(); + // Empty block signed by `A` + let b = tester + .new_block_and_import(CliqueBlockType::Empty, &b, None, 'A') + .unwrap(); - // Add a vote for `D` signed by `B` - let b = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &b, - Some(tester.signers[&'D'].address()), 'B').unwrap(); + // Add a vote for `D` signed by `B` + let b = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &b, + Some(tester.signers[&'D'].address()), + 'B', + ) + .unwrap(); - // Empty block signed by `A` - let b = tester.new_block_and_import(CliqueBlockType::Empty, &b, None, 'A').unwrap(); + // Empty block signed by `A` + let b = tester + .new_block_and_import(CliqueBlockType::Empty, &b, None, 'A') + .unwrap(); - // Add a vote for `C` signed by `B` - let b = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &b, - Some(tester.signers[&'C'].address()), 'B').unwrap(); + // Add a vote for `C` signed by `B` + let b = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &b, + Some(tester.signers[&'C'].address()), + 'B', + ) + .unwrap(); - let tags = tester.into_tags(tester.clique_signers(&b.hash())); - assert_eq!(&tags, &['A', 'B', 'C', 'D']); + let tags = tester.into_tags(tester.clique_signers(&b.hash())); + assert_eq!(&tags, &['A', 'B', 'C', 'D']); } #[test] fn vote_rm_only_counted_once_per_signer() { - let tester = CliqueTester::with(10, 1, vec!['A', 'B']); + let tester = CliqueTester::with(10, 1, vec!['A', 'B']); - let mut prev_header = tester.genesis.clone(); + let mut prev_header = tester.genesis.clone(); - for _ in 0..2 { - // Vote to remove `B` signed by `A` - let b = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &prev_header, - Some(tester.signers[&'B'].address()), 'A').unwrap(); - // Empty block signed by `B` - let b = tester.new_block_and_import(CliqueBlockType::Empty, &b, None, 'B').unwrap(); + for _ in 0..2 { + // Vote to remove `B` signed by `A` + let b = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &prev_header, + Some(tester.signers[&'B'].address()), + 'A', + ) + .unwrap(); + // Empty block signed by `B` + let b = tester + .new_block_and_import(CliqueBlockType::Empty, &b, None, 'B') + .unwrap(); - prev_header = b.clone(); - } + prev_header = b.clone(); + } - // Add a vote for `B` signed by `A` - let b = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &prev_header, - Some(tester.signers[&'B'].address()), 'A').unwrap(); + // Add a vote for `B` signed by `A` + let b = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &prev_header, + Some(tester.signers[&'B'].address()), + 'A', + ) + .unwrap(); - let tags = tester.into_tags(tester.clique_signers(&b.hash())); - assert_eq!(&tags, &['A', 'B']); + let tags = tester.into_tags(tester.clique_signers(&b.hash())); + assert_eq!(&tags, &['A', 'B']); } #[test] fn vote_rm_concurrently_is_permitted() { - let tester = CliqueTester::with(100, 1, vec!['A', 'B', 'C', 'D']); + let tester = CliqueTester::with(100, 1, vec!['A', 'B', 'C', 'D']); - // Add a vote for `C` signed by `A` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &tester.genesis, - Some(tester.signers[&'C'].address()), 'A').unwrap(); + // Add a vote for `C` signed by `A` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &tester.genesis, + Some(tester.signers[&'C'].address()), + 'A', + ) + .unwrap(); - // Empty block signed by `B` - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'B').unwrap(); - // Empty block signed by `C` - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'C').unwrap(); + // Empty block signed by `B` + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'B') + .unwrap(); + // Empty block signed by `C` + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'C') + .unwrap(); - // Add a vote for `D` signed by `A` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'D'].address()), 'A').unwrap(); + // Add a vote for `D` signed by `A` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'D'].address()), + 'A', + ) + .unwrap(); - // Empty block signed by `B` - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'B').unwrap(); - // Empty block signed by `C` - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'C').unwrap(); - // Empty block signed by `A` - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'A').unwrap(); + // Empty block signed by `B` + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'B') + .unwrap(); + // Empty block signed by `C` + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'C') + .unwrap(); + // Empty block signed by `A` + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'A') + .unwrap(); - // Add a vote for `D` signed by `B` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'D'].address()), 'B').unwrap(); - // Add a vote for `D` signed by `C` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'D'].address()), 'C').unwrap(); + // Add a vote for `D` signed by `B` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'D'].address()), + 'B', + ) + .unwrap(); + // Add a vote for `D` signed by `C` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'D'].address()), + 'C', + ) + .unwrap(); - // Empty block signed by `A` - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'A').unwrap(); - // Add a vote for `C` signed by `B` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'C'].address()), 'B').unwrap(); + // Empty block signed by `A` + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'A') + .unwrap(); + // Add a vote for `C` signed by `B` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'C'].address()), + 'B', + ) + .unwrap(); - let tags = tester.into_tags(tester.clique_signers(&block.hash())); - assert_eq!(&tags, &['A', 'B']); + let tags = tester.into_tags(tester.clique_signers(&block.hash())); + assert_eq!(&tags, &['A', 'B']); } #[test] fn vote_to_rm_are_immediate_and_ensure_votes_are_rm() { - let tester = CliqueTester::with(100, 1, vec!['A', 'B', 'C']); + let tester = CliqueTester::with(100, 1, vec!['A', 'B', 'C']); - // Vote to remove `B` signed by `C` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &tester.genesis, - Some(tester.signers[&'B'].address()), 'C').unwrap(); - // Vote to remove `C` signed by `A` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'C'].address()), 'A').unwrap(); - // Vote to remove `C` signed by `B` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'C'].address()), 'B').unwrap(); - // Vote to remove `B` signed by `A` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'B'].address()), 'A').unwrap(); + // Vote to remove `B` signed by `C` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &tester.genesis, + Some(tester.signers[&'B'].address()), + 'C', + ) + .unwrap(); + // Vote to remove `C` signed by `A` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'C'].address()), + 'A', + ) + .unwrap(); + // Vote to remove `C` signed by `B` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'C'].address()), + 'B', + ) + .unwrap(); + // Vote to remove `B` signed by `A` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'B'].address()), + 'A', + ) + .unwrap(); - let tags = tester.into_tags(tester.clique_signers(&block.hash())); - assert_eq!(&tags, &['A', 'B']); + let tags = tester.into_tags(tester.clique_signers(&block.hash())); + assert_eq!(&tags, &['A', 'B']); } #[test] fn vote_to_rm_are_immediate_and_votes_should_be_dropped_from_kicked_signer() { - let tester = CliqueTester::with(100, 1, vec!['A', 'B', 'C']); + let tester = CliqueTester::with(100, 1, vec!['A', 'B', 'C']); - // Vote to add `D` signed by `C` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &tester.genesis, - Some(tester.signers[&'D'].address()), 'C').unwrap(); - // Vote to remove `C` signed by `A` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'C'].address()), 'A').unwrap(); + // Vote to add `D` signed by `C` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &tester.genesis, + Some(tester.signers[&'D'].address()), + 'C', + ) + .unwrap(); + // Vote to remove `C` signed by `A` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'C'].address()), + 'A', + ) + .unwrap(); - // Vote to remove `C` signed by `B` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'C'].address()), 'B').unwrap(); + // Vote to remove `C` signed by `B` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'C'].address()), + 'B', + ) + .unwrap(); - // Vote to add `D` signed by `A` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &block, - Some(tester.signers[&'D'].address()), 'A').unwrap(); + // Vote to add `D` signed by `A` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &block, + Some(tester.signers[&'D'].address()), + 'A', + ) + .unwrap(); - let tags = tester.into_tags(tester.clique_signers(&block.hash())); - assert_eq!(&tags, &['A', 'B']); + let tags = tester.into_tags(tester.clique_signers(&block.hash())); + assert_eq!(&tags, &['A', 'B']); } #[test] fn cascading_not_allowed() { - let tester = CliqueTester::with(100, 1, vec!['A', 'B', 'C', 'D']); + let tester = CliqueTester::with(100, 1, vec!['A', 'B', 'C', 'D']); - // Vote against `C` signed by `A` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &tester.genesis, - Some(tester.signers[&'C'].address()), 'A').unwrap(); + // Vote against `C` signed by `A` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &tester.genesis, + Some(tester.signers[&'C'].address()), + 'A', + ) + .unwrap(); - // Empty block signed by `B` - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'B').unwrap(); + // Empty block signed by `B` + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'B') + .unwrap(); - // Empty block signed by `C` - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'C').unwrap(); + // Empty block signed by `C` + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'C') + .unwrap(); - // Vote against `D` signed by `A` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'D'].address()), 'A').unwrap(); + // Vote against `D` signed by `A` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'D'].address()), + 'A', + ) + .unwrap(); - // Vote against `C` signed by `B` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'C'].address()), 'B').unwrap(); + // Vote against `C` signed by `B` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'C'].address()), + 'B', + ) + .unwrap(); - // Empty block signed by `C` - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'C').unwrap(); + // Empty block signed by `C` + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'C') + .unwrap(); - // Empty block signed by `A` - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'A').unwrap(); + // Empty block signed by `A` + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'A') + .unwrap(); - // Vote against `D` signed by `B` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'D'].address()), 'B').unwrap(); + // Vote against `D` signed by `B` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'D'].address()), + 'B', + ) + .unwrap(); - // Vote against `D` signed by `C` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'D'].address()), 'C').unwrap(); + // Vote against `D` signed by `C` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'D'].address()), + 'C', + ) + .unwrap(); - let tags = tester.into_tags(tester.clique_signers(&block.hash())); - assert_eq!(&tags, &['A', 'B', 'C']); + let tags = tester.into_tags(tester.clique_signers(&block.hash())); + assert_eq!(&tags, &['A', 'B', 'C']); } #[test] fn consensus_out_of_bounds_consensus_execute_on_touch() { - let tester = CliqueTester::with(100, 1, vec!['A', 'B', 'C', 'D']); + let tester = CliqueTester::with(100, 1, vec!['A', 'B', 'C', 'D']); - // Vote against `C` signed by `A` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &tester.genesis, - Some(tester.signers[&'C'].address()), 'A').unwrap(); + // Vote against `C` signed by `A` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &tester.genesis, + Some(tester.signers[&'C'].address()), + 'A', + ) + .unwrap(); - // Empty block signed by `B` - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'B').unwrap(); + // Empty block signed by `B` + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'B') + .unwrap(); - // Empty block signed by `C` - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'C').unwrap(); + // Empty block signed by `C` + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'C') + .unwrap(); - // Vote against `D` signed by `A` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'D'].address()), 'A').unwrap(); + // Vote against `D` signed by `A` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'D'].address()), + 'A', + ) + .unwrap(); - // Vote against `C` signed by `B` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'C'].address()), 'B').unwrap(); + // Vote against `C` signed by `B` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'C'].address()), + 'B', + ) + .unwrap(); - // Empty block signed by `C` - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'C').unwrap(); + // Empty block signed by `C` + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'C') + .unwrap(); - // Empty block signed by `A` - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'A').unwrap(); + // Empty block signed by `A` + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'A') + .unwrap(); - // Vote against `D` signed by `B` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'D'].address()), 'B').unwrap(); + // Vote against `D` signed by `B` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'D'].address()), + 'B', + ) + .unwrap(); - // Vote against `D` signed by `C` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'D'].address()), 'C').unwrap(); + // Vote against `D` signed by `C` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'D'].address()), + 'C', + ) + .unwrap(); - let tags = tester.into_tags(tester.clique_signers(&block.hash())); - assert_eq!(&tags, &['A', 'B', 'C'], "D should have been removed after 3/4 remove votes"); + let tags = tester.into_tags(tester.clique_signers(&block.hash())); + assert_eq!( + &tags, + &['A', 'B', 'C'], + "D should have been removed after 3/4 remove votes" + ); - // Empty block signed by `A` - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'A').unwrap(); + // Empty block signed by `A` + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'A') + .unwrap(); - // Vote for `C` signed by `C` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &block, - Some(tester.signers[&'C'].address()), 'C').unwrap(); + // Vote for `C` signed by `C` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &block, + Some(tester.signers[&'C'].address()), + 'C', + ) + .unwrap(); - let tags = tester.into_tags(tester.clique_signers(&block.hash())); - assert_eq!(&tags, &['A', 'B']); + let tags = tester.into_tags(tester.clique_signers(&block.hash())); + assert_eq!(&tags, &['A', 'B']); } #[test] fn consensus_out_of_bounds_first_touch() { - let tester = CliqueTester::with(100, 1, vec!['A', 'B', 'C', 'D']); + let tester = CliqueTester::with(100, 1, vec!['A', 'B', 'C', 'D']); - // Vote against `C` signed by `A` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &tester.genesis, - Some(tester.signers[&'C'].address()), 'A').unwrap(); + // Vote against `C` signed by `A` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &tester.genesis, + Some(tester.signers[&'C'].address()), + 'A', + ) + .unwrap(); - // Empty block signed by `B` - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'B').unwrap(); + // Empty block signed by `B` + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'B') + .unwrap(); - // Empty block signed by `C` - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'C').unwrap(); + // Empty block signed by `C` + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'C') + .unwrap(); - // Vote against `D` signed by `A` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'D'].address()), 'A').unwrap(); + // Vote against `D` signed by `A` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'D'].address()), + 'A', + ) + .unwrap(); - // Vote against `C` signed by `B` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'C'].address()), 'B').unwrap(); + // Vote against `C` signed by `B` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'C'].address()), + 'B', + ) + .unwrap(); - // Empty block signed by `C` - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'C').unwrap(); + // Empty block signed by `C` + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'C') + .unwrap(); - // Empty block signed by `A` - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'A').unwrap(); + // Empty block signed by `A` + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'A') + .unwrap(); - // Vote against `D` signed by `B` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'D'].address()), 'B').unwrap(); + // Vote against `D` signed by `B` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'D'].address()), + 'B', + ) + .unwrap(); - // Vote against `D` signed by `C` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &block, - Some(tester.signers[&'D'].address()), 'C').unwrap(); + // Vote against `D` signed by `C` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &block, + Some(tester.signers[&'D'].address()), + 'C', + ) + .unwrap(); - let tags = tester.into_tags(tester.clique_signers(&block.hash())); - assert_eq!(&tags, &['A', 'B', 'C']); + let tags = tester.into_tags(tester.clique_signers(&block.hash())); + assert_eq!(&tags, &['A', 'B', 'C']); - // Empty block signed by `A` - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'A').unwrap(); + // Empty block signed by `A` + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'A') + .unwrap(); - // Vote for `C` signed by `B` - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &block, - Some(tester.signers[&'C'].address()), 'B').unwrap(); + // Vote for `C` signed by `B` + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &block, + Some(tester.signers[&'C'].address()), + 'B', + ) + .unwrap(); - let tags = tester.into_tags(tester.clique_signers(&block.hash())); - assert_eq!(&tags, &['A', 'B', 'C']); + let tags = tester.into_tags(tester.clique_signers(&block.hash())); + assert_eq!(&tags, &['A', 'B', 'C']); } #[test] fn pending_votes_doesnt_survive_authorization_changes() { - let tester = CliqueTester::with(100, 1, vec!['A', 'B', 'C', 'D', 'E']); + let tester = CliqueTester::with(100, 1, vec!['A', 'B', 'C', 'D', 'E']); - let mut prev_header = tester.genesis.clone(); + let mut prev_header = tester.genesis.clone(); - // Vote for `F` from [`A`, `B`, `C`] - for sign in SIGNER_TAGS.iter().take(3) { - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &prev_header, - Some(tester.signers[&'F'].address()), *sign).unwrap(); - prev_header = block.clone(); - } + // Vote for `F` from [`A`, `B`, `C`] + for sign in SIGNER_TAGS.iter().take(3) { + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &prev_header, + Some(tester.signers[&'F'].address()), + *sign, + ) + .unwrap(); + prev_header = block.clone(); + } - let tags = tester.into_tags(tester.clique_signers(&prev_header.hash())); - assert_eq!(&tags, &['A', 'B', 'C', 'D', 'E', 'F'], "F should have been added"); + let tags = tester.into_tags(tester.clique_signers(&prev_header.hash())); + assert_eq!( + &tags, + &['A', 'B', 'C', 'D', 'E', 'F'], + "F should have been added" + ); - // Vote against `F` from [`D`, `E`, `B`, `C`] - for sign in SIGNER_TAGS.iter().skip(3).chain(SIGNER_TAGS.iter().skip(1).take(2)) { - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &prev_header, - Some(tester.signers[&'F'].address()), *sign).unwrap(); - prev_header = block.clone(); - } + // Vote against `F` from [`D`, `E`, `B`, `C`] + for sign in SIGNER_TAGS + .iter() + .skip(3) + .chain(SIGNER_TAGS.iter().skip(1).take(2)) + { + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &prev_header, + Some(tester.signers[&'F'].address()), + *sign, + ) + .unwrap(); + prev_header = block.clone(); + } - let tags = tester.into_tags(tester.clique_signers(&prev_header.hash())); - assert_eq!(&tags, &['A', 'B', 'C', 'D', 'E'], "F should have been removed"); + let tags = tester.into_tags(tester.clique_signers(&prev_header.hash())); + assert_eq!( + &tags, + &['A', 'B', 'C', 'D', 'E'], + "F should have been removed" + ); - // Vote for `F` from [`D`, `E`] - for sign in SIGNER_TAGS.iter().skip(3).take(2) { - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &prev_header, - Some(tester.signers[&'F'].address()), *sign).unwrap(); - prev_header = block.clone(); - } + // Vote for `F` from [`D`, `E`] + for sign in SIGNER_TAGS.iter().skip(3).take(2) { + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &prev_header, + Some(tester.signers[&'F'].address()), + *sign, + ) + .unwrap(); + prev_header = block.clone(); + } - // Vote against `A` from [`B`, `C`, `D`] - for sign in SIGNER_TAGS.iter().skip(1).take(3) { - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Remove), &prev_header, - Some(tester.signers[&'A'].address()), *sign).unwrap(); - prev_header = block.clone(); - } + // Vote against `A` from [`B`, `C`, `D`] + for sign in SIGNER_TAGS.iter().skip(1).take(3) { + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Remove), + &prev_header, + Some(tester.signers[&'A'].address()), + *sign, + ) + .unwrap(); + prev_header = block.clone(); + } - let tags = tester.into_tags(tester.clique_signers(&prev_header.hash())); - assert_eq!(&tags, &['B', 'C', 'D', 'E'], "A should have been removed"); + let tags = tester.into_tags(tester.clique_signers(&prev_header.hash())); + assert_eq!(&tags, &['B', 'C', 'D', 'E'], "A should have been removed"); - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &prev_header, - Some(tester.signers[&'F'].address()), 'B').unwrap(); + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &prev_header, + Some(tester.signers[&'F'].address()), + 'B', + ) + .unwrap(); - let tags = tester.into_tags(tester.clique_signers(&block.hash())); - assert_eq!(&tags, &['B', 'C', 'D', 'E', 'F'], "F should have been added again"); + let tags = tester.into_tags(tester.clique_signers(&block.hash())); + assert_eq!( + &tags, + &['B', 'C', 'D', 'E', 'F'], + "F should have been added again" + ); } #[test] fn epoch_transition_reset_all_votes() { - let tester = CliqueTester::with(3, 1, vec!['A', 'B']); + let tester = CliqueTester::with(3, 1, vec!['A', 'B']); - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &tester.genesis, - Some(tester.signers[&'C'].address()), 'A').unwrap(); + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &tester.genesis, + Some(tester.signers[&'C'].address()), + 'A', + ) + .unwrap(); - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'B').unwrap(); - let block = tester.new_block_and_import(CliqueBlockType::Checkpoint, &block, None, 'A').unwrap(); + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'B') + .unwrap(); + let block = tester + .new_block_and_import(CliqueBlockType::Checkpoint, &block, None, 'A') + .unwrap(); - let block = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &block, - Some(tester.signers[&'C'].address()), 'B').unwrap(); + let block = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &block, + Some(tester.signers[&'C'].address()), + 'B', + ) + .unwrap(); - let tags = tester.into_tags(tester.clique_signers(&block.hash())); - assert_eq!(&tags, &['A', 'B'], "Votes should have been reset after checkpoint"); + let tags = tester.into_tags(tester.clique_signers(&block.hash())); + assert_eq!( + &tags, + &['A', 'B'], + "Votes should have been reset after checkpoint" + ); } #[test] fn unauthorized_signer_should_not_be_able_to_sign_block() { - let tester = CliqueTester::with(3, 1, vec!['A']); - let err = tester.new_block_and_import(CliqueBlockType::Empty, &tester.genesis, None, 'B').unwrap_err(); + let tester = CliqueTester::with(3, 1, vec!['A']); + let err = tester + .new_block_and_import(CliqueBlockType::Empty, &tester.genesis, None, 'B') + .unwrap_err(); - match err.kind() { - ErrorKind::Engine(EngineError::NotAuthorized(_)) => (), - _ => assert!(true == false, "Wrong error kind"), - } + match err.kind() { + ErrorKind::Engine(EngineError::NotAuthorized(_)) => (), + _ => assert!(true == false, "Wrong error kind"), + } } #[test] fn signer_should_not_be_able_to_sign_two_consequtive_blocks() { - let tester = CliqueTester::with(3, 1, vec!['A', 'B']); - let b = tester.new_block_and_import(CliqueBlockType::Empty, &tester.genesis, None, 'A').unwrap(); - let err = tester.new_block_and_import(CliqueBlockType::Empty, &b, None, 'A').unwrap_err(); + let tester = CliqueTester::with(3, 1, vec!['A', 'B']); + let b = tester + .new_block_and_import(CliqueBlockType::Empty, &tester.genesis, None, 'A') + .unwrap(); + let err = tester + .new_block_and_import(CliqueBlockType::Empty, &b, None, 'A') + .unwrap_err(); - match err.kind() { - ErrorKind::Engine(EngineError::CliqueTooRecentlySigned(_)) => (), - _ => assert!(true == false, "Wrong error kind"), - } + match err.kind() { + ErrorKind::Engine(EngineError::CliqueTooRecentlySigned(_)) => (), + _ => assert!(true == false, "Wrong error kind"), + } } - #[test] fn recent_signers_should_not_reset_on_checkpoint() { - let tester = CliqueTester::with(3, 1, vec!['A', 'B', 'C']); + let tester = CliqueTester::with(3, 1, vec!['A', 'B', 'C']); - let block = tester.new_block_and_import(CliqueBlockType::Empty, &tester.genesis, None, 'A').unwrap(); - let block = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'B').unwrap(); - let block = tester.new_block_and_import(CliqueBlockType::Checkpoint, &block, None, 'A').unwrap(); + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &tester.genesis, None, 'A') + .unwrap(); + let block = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'B') + .unwrap(); + let block = tester + .new_block_and_import(CliqueBlockType::Checkpoint, &block, None, 'A') + .unwrap(); - let err = tester.new_block_and_import(CliqueBlockType::Empty, &block, None, 'A').unwrap_err(); + let err = tester + .new_block_and_import(CliqueBlockType::Empty, &block, None, 'A') + .unwrap_err(); - match err.kind() { - ErrorKind::Engine(EngineError::CliqueTooRecentlySigned(_)) => (), - _ => assert!(true == false, "Wrong error kind"), - } + match err.kind() { + ErrorKind::Engine(EngineError::CliqueTooRecentlySigned(_)) => (), + _ => assert!(true == false, "Wrong error kind"), + } } // Not part of http://eips.ethereum.org/EIPS/eip-225 #[test] fn bonus_consensus_should_keep_track_of_votes_before_latest_per_signer() { - let tester = CliqueTester::with(100, 1, vec!['A', 'B', 'C', 'D']); + let tester = CliqueTester::with(100, 1, vec!['A', 'B', 'C', 'D']); - // Add a vote for `E` signed by `A` - let vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &tester.genesis, - Some(tester.signers[&'E'].address()), 'A').unwrap(); - // Empty block signed by `B` - let vote = tester.new_block_and_import(CliqueBlockType::Empty, &vote, None, 'B').unwrap(); + // Add a vote for `E` signed by `A` + let vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &tester.genesis, + Some(tester.signers[&'E'].address()), + 'A', + ) + .unwrap(); + // Empty block signed by `B` + let vote = tester + .new_block_and_import(CliqueBlockType::Empty, &vote, None, 'B') + .unwrap(); - // Empty block signed by `C` - let vote = tester.new_block_and_import(CliqueBlockType::Empty, &vote, None, 'C').unwrap(); + // Empty block signed by `C` + let vote = tester + .new_block_and_import(CliqueBlockType::Empty, &vote, None, 'C') + .unwrap(); - // Empty block signed by `D` - let vote = tester.new_block_and_import(CliqueBlockType::Empty, &vote, None, 'D').unwrap(); + // Empty block signed by `D` + let vote = tester + .new_block_and_import(CliqueBlockType::Empty, &vote, None, 'D') + .unwrap(); - // Add a vote for `F` signed by `A` - let vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &vote, - Some(tester.signers[&'F'].address()), 'A').unwrap(); - // Empty block signed by `C` - let vote = tester.new_block_and_import(CliqueBlockType::Empty, &vote, None, 'C').unwrap(); + // Add a vote for `F` signed by `A` + let vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &vote, + Some(tester.signers[&'F'].address()), + 'A', + ) + .unwrap(); + // Empty block signed by `C` + let vote = tester + .new_block_and_import(CliqueBlockType::Empty, &vote, None, 'C') + .unwrap(); - // Empty block signed by `D` - let vote = tester.new_block_and_import(CliqueBlockType::Empty, &vote, None, 'D').unwrap(); + // Empty block signed by `D` + let vote = tester + .new_block_and_import(CliqueBlockType::Empty, &vote, None, 'D') + .unwrap(); - // Add a vote for `E` signed by `B` - let vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &vote, - Some(tester.signers[&'E'].address()), 'B').unwrap(); - // Empty block signed by `A` - let vote = tester.new_block_and_import(CliqueBlockType::Empty, &vote, None, 'A').unwrap(); + // Add a vote for `E` signed by `B` + let vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &vote, + Some(tester.signers[&'E'].address()), + 'B', + ) + .unwrap(); + // Empty block signed by `A` + let vote = tester + .new_block_and_import(CliqueBlockType::Empty, &vote, None, 'A') + .unwrap(); - // Empty block signed by `C` - let vote = tester.new_block_and_import(CliqueBlockType::Empty, &vote, None, 'C').unwrap(); + // Empty block signed by `C` + let vote = tester + .new_block_and_import(CliqueBlockType::Empty, &vote, None, 'C') + .unwrap(); - // Empty block signed by `D` - let vote = tester.new_block_and_import(CliqueBlockType::Empty, &vote, None, 'D').unwrap(); + // Empty block signed by `D` + let vote = tester + .new_block_and_import(CliqueBlockType::Empty, &vote, None, 'D') + .unwrap(); - // Add a vote for `F` signed by `B` - let vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &vote, - Some(tester.signers[&'F'].address()), 'B').unwrap(); + // Add a vote for `F` signed by `B` + let vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &vote, + Some(tester.signers[&'F'].address()), + 'B', + ) + .unwrap(); - // Empty block signed by A` - let vote = tester.new_block_and_import(CliqueBlockType::Empty, &vote, None, 'A').unwrap(); + // Empty block signed by A` + let vote = tester + .new_block_and_import(CliqueBlockType::Empty, &vote, None, 'A') + .unwrap(); - // Add a vote for `E` signed by `C` - let vote = tester.new_block_and_import(CliqueBlockType::Vote(VoteType::Add), &vote, - Some(tester.signers[&'E'].address()), 'C').unwrap(); + // Add a vote for `E` signed by `C` + let vote = tester + .new_block_and_import( + CliqueBlockType::Vote(VoteType::Add), + &vote, + Some(tester.signers[&'E'].address()), + 'C', + ) + .unwrap(); - let tags = tester.into_tags(tester.clique_signers(&vote.hash())); - assert_eq!(&tags, &['A', 'B', 'C', 'D', 'E']); + let tags = tester.into_tags(tester.clique_signers(&vote.hash())); + assert_eq!(&tags, &['A', 'B', 'C', 'D', 'E']); } diff --git a/ethcore/src/engines/clique/util.rs b/ethcore/src/engines/clique/util.rs index 3f75289e9..57dcc3657 100644 --- a/ethcore/src/engines/clique/util.rs +++ b/ethcore/src/engines/clique/util.rs @@ -16,8 +16,10 @@ use std::collections::BTreeSet; -use engines::EngineError; -use engines::clique::{ADDRESS_LENGTH, SIGNATURE_LENGTH, VANITY_LENGTH, NULL_NONCE, NULL_MIXHASH}; +use engines::{ + clique::{ADDRESS_LENGTH, NULL_MIXHASH, NULL_NONCE, SIGNATURE_LENGTH, VANITY_LENGTH}, + EngineError, +}; use error::Error; use ethereum_types::{Address, H256}; use ethkey::{public_to_address, recover as ec_recover, Signature}; @@ -29,49 +31,49 @@ use types::header::Header; /// How many recovered signature to cache in the memory. pub const CREATOR_CACHE_NUM: usize = 4096; lazy_static! { - /// key: header hash - /// value: creator address - static ref CREATOR_BY_HASH: RwLock> = RwLock::new(LruCache::new(CREATOR_CACHE_NUM)); + /// key: header hash + /// value: creator address + static ref CREATOR_BY_HASH: RwLock> = RwLock::new(LruCache::new(CREATOR_CACHE_NUM)); } /// Recover block creator from signature pub fn recover_creator(header: &Header) -> Result { - // Initialization - let mut cache = CREATOR_BY_HASH.write(); + // Initialization + let mut cache = CREATOR_BY_HASH.write(); - if let Some(creator) = cache.get_mut(&header.hash()) { - return Ok(*creator); - } + if let Some(creator) = cache.get_mut(&header.hash()) { + return Ok(*creator); + } - let data = header.extra_data(); - if data.len() < VANITY_LENGTH { - Err(EngineError::CliqueMissingVanity)? - } + let data = header.extra_data(); + if data.len() < VANITY_LENGTH { + Err(EngineError::CliqueMissingVanity)? + } - if data.len() < VANITY_LENGTH + SIGNATURE_LENGTH { - Err(EngineError::CliqueMissingSignature)? - } + if data.len() < VANITY_LENGTH + SIGNATURE_LENGTH { + Err(EngineError::CliqueMissingSignature)? + } - // Split `signed_extra data` and `signature` - let (signed_data_slice, signature_slice) = data.split_at(data.len() - SIGNATURE_LENGTH); + // Split `signed_extra data` and `signature` + let (signed_data_slice, signature_slice) = data.split_at(data.len() - SIGNATURE_LENGTH); - // convert `&[u8]` to `[u8; 65]` - let signature = { - let mut s = [0; SIGNATURE_LENGTH]; - s.copy_from_slice(signature_slice); - s - }; + // convert `&[u8]` to `[u8; 65]` + let signature = { + let mut s = [0; SIGNATURE_LENGTH]; + s.copy_from_slice(signature_slice); + s + }; - // modify header and hash it - let unsigned_header = &mut header.clone(); - unsigned_header.set_extra_data(signed_data_slice.to_vec()); - let msg = unsigned_header.hash(); + // modify header and hash it + let unsigned_header = &mut header.clone(); + unsigned_header.set_extra_data(signed_data_slice.to_vec()); + let msg = unsigned_header.hash(); - let pubkey = ec_recover(&Signature::from(signature), &msg)?; - let creator = public_to_address(&pubkey); + let pubkey = ec_recover(&Signature::from(signature), &msg)?; + let creator = public_to_address(&pubkey); - cache.insert(header.hash(), creator.clone()); - Ok(creator) + cache.insert(header.hash(), creator.clone()); + Ok(creator) } /// Extract signer list from extra_data. @@ -83,33 +85,35 @@ pub fn recover_creator(header: &Header) -> Result { /// Signature: 65 bytes /// -- pub fn extract_signers(header: &Header) -> Result, Error> { - let data = header.extra_data(); + let data = header.extra_data(); - if data.len() <= VANITY_LENGTH + SIGNATURE_LENGTH { - Err(EngineError::CliqueCheckpointNoSigner)? - } + if data.len() <= VANITY_LENGTH + SIGNATURE_LENGTH { + Err(EngineError::CliqueCheckpointNoSigner)? + } - // extract only the portion of extra_data which includes the signer list - let signers_raw = &data[(VANITY_LENGTH)..data.len() - (SIGNATURE_LENGTH)]; + // extract only the portion of extra_data which includes the signer list + let signers_raw = &data[(VANITY_LENGTH)..data.len() - (SIGNATURE_LENGTH)]; - if signers_raw.len() % ADDRESS_LENGTH != 0 { - Err(EngineError::CliqueCheckpointInvalidSigners(signers_raw.len()))? - } + if signers_raw.len() % ADDRESS_LENGTH != 0 { + Err(EngineError::CliqueCheckpointInvalidSigners( + signers_raw.len(), + ))? + } - let num_signers = signers_raw.len() / 20; + let num_signers = signers_raw.len() / 20; - let signers: BTreeSet
= (0..num_signers) - .map(|i| { - let start = i * ADDRESS_LENGTH; - let end = start + ADDRESS_LENGTH; - signers_raw[start..end].into() - }) - .collect(); + let signers: BTreeSet
= (0..num_signers) + .map(|i| { + let start = i * ADDRESS_LENGTH; + let end = start + ADDRESS_LENGTH; + signers_raw[start..end].into() + }) + .collect(); - Ok(signers) + Ok(signers) } /// Retrieve `null_seal` pub fn null_seal() -> Vec> { - vec![encode(&NULL_MIXHASH.to_vec()), encode(&NULL_NONCE.to_vec())] + vec![encode(&NULL_MIXHASH.to_vec()), encode(&NULL_NONCE.to_vec())] } diff --git a/ethcore/src/engines/instant_seal.rs b/ethcore/src/engines/instant_seal.rs index 677a8abc1..328035536 100644 --- a/ethcore/src/engines/instant_seal.rs +++ b/ethcore/src/engines/instant_seal.rs @@ -14,135 +14,160 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use block::ExecutedBlock; use engines::{Engine, Seal}; use machine::Machine; -use types::header::{Header, ExtendedHeader}; -use block::ExecutedBlock; use std::sync::atomic::{AtomicU64, Ordering}; +use types::header::{ExtendedHeader, Header}; /// `InstantSeal` params. #[derive(Default, Debug, PartialEq)] pub struct InstantSealParams { - /// Whether to use millisecond timestamp - pub millisecond_timestamp: bool, + /// Whether to use millisecond timestamp + pub millisecond_timestamp: bool, } impl From<::ethjson::spec::InstantSealParams> for InstantSealParams { - fn from(p: ::ethjson::spec::InstantSealParams) -> Self { - InstantSealParams { - millisecond_timestamp: p.millisecond_timestamp, - } - } + fn from(p: ::ethjson::spec::InstantSealParams) -> Self { + InstantSealParams { + millisecond_timestamp: p.millisecond_timestamp, + } + } } /// An engine which does not provide any consensus mechanism, just seals blocks internally. /// Only seals blocks which have transactions. pub struct InstantSeal { - params: InstantSealParams, - machine: M, - last_sealed_block: AtomicU64, + params: InstantSealParams, + machine: M, + last_sealed_block: AtomicU64, } impl InstantSeal { - /// Returns new instance of InstantSeal over the given state machine. - pub fn new(params: InstantSealParams, machine: M) -> Self { - InstantSeal { - params, - machine, - last_sealed_block: AtomicU64::new(0), - } - } + /// Returns new instance of InstantSeal over the given state machine. + pub fn new(params: InstantSealParams, machine: M) -> Self { + InstantSeal { + params, + machine, + last_sealed_block: AtomicU64::new(0), + } + } } impl Engine for InstantSeal { - fn name(&self) -> &str { - "InstantSeal" - } + fn name(&self) -> &str { + "InstantSeal" + } - fn machine(&self) -> &M { &self.machine } + fn machine(&self) -> &M { + &self.machine + } - fn seals_internally(&self) -> Option { Some(true) } + fn seals_internally(&self) -> Option { + Some(true) + } - fn should_reseal_on_update(&self) -> bool { - // We would like for the miner to `update_sealing` if there are local_pending_transactions - // in the pool to prevent transactions sent in parallel from stalling in the transaction - // pool. (see #9660) - true - } + fn should_reseal_on_update(&self) -> bool { + // We would like for the miner to `update_sealing` if there are local_pending_transactions + // in the pool to prevent transactions sent in parallel from stalling in the transaction + // pool. (see #9660) + true + } - fn generate_seal(&self, block: &ExecutedBlock, _parent: &Header) -> Seal { - if !block.transactions.is_empty() { - let block_number = block.header.number(); - let last_sealed_block = self.last_sealed_block.load(Ordering::SeqCst); - // Return a regular seal if the given block is _higher_ than - // the last sealed one - if block_number > last_sealed_block { - let prev_last_sealed_block = self.last_sealed_block.compare_and_swap(last_sealed_block, block_number, Ordering::SeqCst); - if prev_last_sealed_block == last_sealed_block { - return Seal::Regular(Vec::new()) - } - } - } - Seal::None - } + fn generate_seal(&self, block: &ExecutedBlock, _parent: &Header) -> Seal { + if !block.transactions.is_empty() { + let block_number = block.header.number(); + let last_sealed_block = self.last_sealed_block.load(Ordering::SeqCst); + // Return a regular seal if the given block is _higher_ than + // the last sealed one + if block_number > last_sealed_block { + let prev_last_sealed_block = self.last_sealed_block.compare_and_swap( + last_sealed_block, + block_number, + Ordering::SeqCst, + ); + if prev_last_sealed_block == last_sealed_block { + return Seal::Regular(Vec::new()); + } + } + } + Seal::None + } - fn verify_local_seal(&self, _header: &Header) -> Result<(), M::Error> { - Ok(()) - } + fn verify_local_seal(&self, _header: &Header) -> Result<(), M::Error> { + Ok(()) + } - fn open_block_header_timestamp(&self, parent_timestamp: u64) -> u64 { - use std::{time, cmp}; + fn open_block_header_timestamp(&self, parent_timestamp: u64) -> u64 { + use std::{cmp, time}; - let dur = time::SystemTime::now().duration_since(time::UNIX_EPOCH).unwrap_or_default(); - let mut now = dur.as_secs(); - if self.params.millisecond_timestamp { - now = now * 1000 + dur.subsec_millis() as u64; - } - cmp::max(now, parent_timestamp) - } + let dur = time::SystemTime::now() + .duration_since(time::UNIX_EPOCH) + .unwrap_or_default(); + let mut now = dur.as_secs(); + if self.params.millisecond_timestamp { + now = now * 1000 + dur.subsec_millis() as u64; + } + cmp::max(now, parent_timestamp) + } - fn is_timestamp_valid(&self, header_timestamp: u64, parent_timestamp: u64) -> bool { - header_timestamp >= parent_timestamp - } + fn is_timestamp_valid(&self, header_timestamp: u64, parent_timestamp: u64) -> bool { + header_timestamp >= parent_timestamp + } - fn fork_choice(&self, new: &ExtendedHeader, current: &ExtendedHeader) -> super::ForkChoice { - super::total_difficulty_fork_choice(new, current) - } + fn fork_choice(&self, new: &ExtendedHeader, current: &ExtendedHeader) -> super::ForkChoice { + super::total_difficulty_fork_choice(new, current) + } } #[cfg(test)] mod tests { - use std::sync::Arc; - use ethereum_types::{H520, Address}; - use test_helpers::get_temp_state_db; - use spec::Spec; - use types::header::Header; - use block::*; - use engines::Seal; + use block::*; + use engines::Seal; + use ethereum_types::{Address, H520}; + use spec::Spec; + use std::sync::Arc; + use test_helpers::get_temp_state_db; + use types::header::Header; - #[test] - fn instant_can_seal() { - let spec = Spec::new_instant(); - let engine = &*spec.engine; - let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let genesis_header = spec.genesis_header(); - let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::default(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); - let b = b.close_and_lock().unwrap(); - if let Seal::Regular(seal) = engine.generate_seal(&b, &genesis_header) { - assert!(b.try_seal(engine, seal).is_ok()); - } - } + #[test] + fn instant_can_seal() { + let spec = Spec::new_instant(); + let engine = &*spec.engine; + let db = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let genesis_header = spec.genesis_header(); + let last_hashes = Arc::new(vec![genesis_header.hash()]); + let b = OpenBlock::new( + engine, + Default::default(), + false, + db, + &genesis_header, + last_hashes, + Address::default(), + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let b = b.close_and_lock().unwrap(); + if let Seal::Regular(seal) = engine.generate_seal(&b, &genesis_header) { + assert!(b.try_seal(engine, seal).is_ok()); + } + } - #[test] - fn instant_cant_verify() { - let engine = Spec::new_instant().engine; - let mut header: Header = Header::default(); + #[test] + fn instant_cant_verify() { + let engine = Spec::new_instant().engine; + let mut header: Header = Header::default(); - assert!(engine.verify_block_basic(&header).is_ok()); + assert!(engine.verify_block_basic(&header).is_ok()); - header.set_seal(vec![::rlp::encode(&H520::default())]); + header.set_seal(vec![::rlp::encode(&H520::default())]); - assert!(engine.verify_block_unordered(&header).is_ok()); - } + assert!(engine.verify_block_unordered(&header).is_ok()); + } } diff --git a/ethcore/src/engines/mod.rs b/ethcore/src/engines/mod.rs index 56e7b5d17..cb4942216 100644 --- a/ethcore/src/engines/mod.rs +++ b/ethcore/src/engines/mod.rs @@ -26,37 +26,45 @@ mod validator_set; pub mod block_reward; pub mod signer; -pub use self::authority_round::AuthorityRound; -pub use self::basic_authority::BasicAuthority; -pub use self::instant_seal::{InstantSeal, InstantSealParams}; -pub use self::null_engine::NullEngine; -pub use self::signer::EngineSigner; -pub use self::clique::Clique; +pub use self::{ + authority_round::AuthorityRound, + basic_authority::BasicAuthority, + clique::Clique, + instant_seal::{InstantSeal, InstantSealParams}, + null_engine::NullEngine, + signer::EngineSigner, +}; // TODO [ToDr] Remove re-export (#10130) -pub use types::engines::ForkChoice; -pub use types::engines::epoch::{self, Transition as EpochTransition}; +pub use types::engines::{ + epoch::{self, Transition as EpochTransition}, + ForkChoice, +}; -use std::sync::{Weak, Arc}; -use std::collections::{BTreeMap, HashMap}; -use std::{fmt, error}; +use std::{ + collections::{BTreeMap, HashMap}, + error, fmt, + sync::{Arc, Weak}, +}; use builtin::Builtin; -use vm::{EnvInfo, Schedule, CreateContractAddress, CallType, ActionValue}; use error::Error; -use types::BlockNumber; -use types::header::{Header, ExtendedHeader}; use snapshot::SnapshotComponents; use spec::CommonParams; -use types::transaction::{self, UnverifiedTransaction, SignedTransaction}; +use types::{ + header::{ExtendedHeader, Header}, + transaction::{self, SignedTransaction, UnverifiedTransaction}, + BlockNumber, +}; +use vm::{ActionValue, CallType, CreateContractAddress, EnvInfo, Schedule}; -use ethkey::{Signature}; -use machine::{self, Machine, AuxiliaryRequest, AuxiliaryData}; -use ethereum_types::{H64, H256, U256, Address}; -use unexpected::{Mismatch, OutOfBounds}; -use bytes::Bytes; -use types::ancestry_action::AncestryAction; use block::ExecutedBlock; +use bytes::Bytes; +use ethereum_types::{Address, H256, H64, U256}; +use ethkey::Signature; +use machine::{self, AuxiliaryData, AuxiliaryRequest, Machine}; +use types::ancestry_action::AncestryAction; +use unexpected::{Mismatch, OutOfBounds}; /// Default EIP-210 contract code. /// As defined in https://github.com/ethereum/EIPs/pull/210 @@ -67,142 +75,151 @@ pub const MAX_UNCLE_AGE: usize = 6; /// Voting errors. #[derive(Debug)] pub enum EngineError { - /// Signature or author field does not belong to an authority. - NotAuthorized(Address), - /// The same author issued different votes at the same step. - DoubleVote(Address), - /// The received block is from an incorrect proposer. - NotProposer(Mismatch
), - /// Message was not expected. - UnexpectedMessage, - /// Seal field has an unexpected size. - BadSealFieldSize(OutOfBounds), - /// Validation proof insufficient. - InsufficientProof(String), - /// Failed system call. - FailedSystemCall(String), - /// Malformed consensus message. - MalformedMessage(String), - /// Requires client ref, but none registered. - RequiresClient, - /// Invalid engine specification or implementation. - InvalidEngine, - /// Requires signer ref, but none registered. - RequiresSigner, - /// Checkpoint is missing - CliqueMissingCheckpoint(H256), - /// Missing vanity data - CliqueMissingVanity, - /// Missing signature - CliqueMissingSignature, - /// Missing signers - CliqueCheckpointNoSigner, - /// List of signers is invalid - CliqueCheckpointInvalidSigners(usize), - /// Wrong author on a checkpoint - CliqueWrongAuthorCheckpoint(Mismatch
), - /// Wrong checkpoint authors recovered - CliqueFaultyRecoveredSigners(Vec), - /// Invalid nonce (should contain vote) - CliqueInvalidNonce(H64), - /// The signer signed a block to recently - CliqueTooRecentlySigned(Address), - /// Custom - Custom(String), + /// Signature or author field does not belong to an authority. + NotAuthorized(Address), + /// The same author issued different votes at the same step. + DoubleVote(Address), + /// The received block is from an incorrect proposer. + NotProposer(Mismatch
), + /// Message was not expected. + UnexpectedMessage, + /// Seal field has an unexpected size. + BadSealFieldSize(OutOfBounds), + /// Validation proof insufficient. + InsufficientProof(String), + /// Failed system call. + FailedSystemCall(String), + /// Malformed consensus message. + MalformedMessage(String), + /// Requires client ref, but none registered. + RequiresClient, + /// Invalid engine specification or implementation. + InvalidEngine, + /// Requires signer ref, but none registered. + RequiresSigner, + /// Checkpoint is missing + CliqueMissingCheckpoint(H256), + /// Missing vanity data + CliqueMissingVanity, + /// Missing signature + CliqueMissingSignature, + /// Missing signers + CliqueCheckpointNoSigner, + /// List of signers is invalid + CliqueCheckpointInvalidSigners(usize), + /// Wrong author on a checkpoint + CliqueWrongAuthorCheckpoint(Mismatch
), + /// Wrong checkpoint authors recovered + CliqueFaultyRecoveredSigners(Vec), + /// Invalid nonce (should contain vote) + CliqueInvalidNonce(H64), + /// The signer signed a block to recently + CliqueTooRecentlySigned(Address), + /// Custom + Custom(String), } impl fmt::Display for EngineError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use self::EngineError::*; - let msg = match *self { - CliqueMissingCheckpoint(ref hash) => format!("Missing checkpoint block: {}", hash), - CliqueMissingVanity => format!("Extra data is missing vanity data"), - CliqueMissingSignature => format!("Extra data is missing signature"), - CliqueCheckpointInvalidSigners(len) => format!("Checkpoint block list was of length: {} of checkpoint but - it needs to be bigger than zero and a divisible by 20", len), - CliqueCheckpointNoSigner => format!("Checkpoint block list of signers was empty"), - CliqueInvalidNonce(ref mis) => format!("Unexpected nonce {} expected {} or {}", mis, 0_u64, u64::max_value()), - CliqueWrongAuthorCheckpoint(ref oob) => format!("Unexpected checkpoint author: {}", oob), - CliqueFaultyRecoveredSigners(ref mis) => format!("Faulty recovered signers {:?}", mis), - CliqueTooRecentlySigned(ref address) => format!("The signer: {} has signed a block too recently", address), - Custom(ref s) => s.clone(), - DoubleVote(ref address) => format!("Author {} issued too many blocks.", address), - NotProposer(ref mis) => format!("Author is not a current proposer: {}", mis), - NotAuthorized(ref address) => format!("Signer {} is not authorized.", address), - UnexpectedMessage => "This Engine should not be fed messages.".into(), - BadSealFieldSize(ref oob) => format!("Seal field has an unexpected length: {}", oob), - InsufficientProof(ref msg) => format!("Insufficient validation proof: {}", msg), - FailedSystemCall(ref msg) => format!("Failed to make system call: {}", msg), - MalformedMessage(ref msg) => format!("Received malformed consensus message: {}", msg), - RequiresClient => format!("Call requires client but none registered"), - RequiresSigner => format!("Call requires signer but none registered"), - InvalidEngine => format!("Invalid engine specification or implementation"), - }; + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::EngineError::*; + let msg = match *self { + CliqueMissingCheckpoint(ref hash) => format!("Missing checkpoint block: {}", hash), + CliqueMissingVanity => format!("Extra data is missing vanity data"), + CliqueMissingSignature => format!("Extra data is missing signature"), + CliqueCheckpointInvalidSigners(len) => format!( + "Checkpoint block list was of length: {} of checkpoint but + it needs to be bigger than zero and a divisible by 20", + len + ), + CliqueCheckpointNoSigner => format!("Checkpoint block list of signers was empty"), + CliqueInvalidNonce(ref mis) => format!( + "Unexpected nonce {} expected {} or {}", + mis, + 0_u64, + u64::max_value() + ), + CliqueWrongAuthorCheckpoint(ref oob) => { + format!("Unexpected checkpoint author: {}", oob) + } + CliqueFaultyRecoveredSigners(ref mis) => format!("Faulty recovered signers {:?}", mis), + CliqueTooRecentlySigned(ref address) => { + format!("The signer: {} has signed a block too recently", address) + } + Custom(ref s) => s.clone(), + DoubleVote(ref address) => format!("Author {} issued too many blocks.", address), + NotProposer(ref mis) => format!("Author is not a current proposer: {}", mis), + NotAuthorized(ref address) => format!("Signer {} is not authorized.", address), + UnexpectedMessage => "This Engine should not be fed messages.".into(), + BadSealFieldSize(ref oob) => format!("Seal field has an unexpected length: {}", oob), + InsufficientProof(ref msg) => format!("Insufficient validation proof: {}", msg), + FailedSystemCall(ref msg) => format!("Failed to make system call: {}", msg), + MalformedMessage(ref msg) => format!("Received malformed consensus message: {}", msg), + RequiresClient => format!("Call requires client but none registered"), + RequiresSigner => format!("Call requires signer but none registered"), + InvalidEngine => format!("Invalid engine specification or implementation"), + }; - f.write_fmt(format_args!("Engine error ({})", msg)) - } + f.write_fmt(format_args!("Engine error ({})", msg)) + } } impl error::Error for EngineError { - fn description(&self) -> &str { - "Engine error" - } + fn description(&self) -> &str { + "Engine error" + } } /// Seal type. #[derive(Debug, PartialEq, Eq)] pub enum Seal { - /// Proposal seal; should be broadcasted, but not inserted into blockchain. - Proposal(Vec), - /// Regular block seal; should be part of the blockchain. - Regular(Vec), - /// Engine does not generate seal for this block right now. - None, + /// Proposal seal; should be broadcasted, but not inserted into blockchain. + Proposal(Vec), + /// Regular block seal; should be part of the blockchain. + Regular(Vec), + /// Engine does not generate seal for this block right now. + None, } /// A system-calling closure. Enacts calls on a block's state from the system address. pub type SystemCall<'a> = FnMut(Address, Vec) -> Result, String> + 'a; /// A system-calling closure. Enacts calls on a block's state with code either from an on-chain contract, or hard-coded EVM or WASM (if enabled on-chain) codes. -pub type SystemOrCodeCall<'a> = FnMut(SystemOrCodeCallKind, Vec) -> Result, String> + 'a; +pub type SystemOrCodeCall<'a> = + FnMut(SystemOrCodeCallKind, Vec) -> Result, String> + 'a; /// Kind of SystemOrCodeCall, this is either an on-chain address, or code. #[derive(PartialEq, Debug, Clone)] pub enum SystemOrCodeCallKind { - /// On-chain address. - Address(Address), - /// Hard-coded code. - Code(Arc>, H256), + /// On-chain address. + Address(Address), + /// Hard-coded code. + Code(Arc>, H256), } /// Default SystemOrCodeCall implementation. -pub fn default_system_or_code_call<'a>(machine: &'a ::machine::EthereumMachine, block: &'a mut ::block::ExecutedBlock) -> impl FnMut(SystemOrCodeCallKind, Vec) -> Result, String> + 'a { - move |to, data| { - let result = match to { - SystemOrCodeCallKind::Address(address) => { - machine.execute_as_system( - block, - address, - U256::max_value(), - Some(data), - ) - }, - SystemOrCodeCallKind::Code(code, code_hash) => { - machine.execute_code_as_system( - block, - None, - Some(code), - Some(code_hash), - Some(ActionValue::Apparent(U256::zero())), - U256::max_value(), - Some(data), - Some(CallType::StaticCall), - ) - }, - }; +pub fn default_system_or_code_call<'a>( + machine: &'a ::machine::EthereumMachine, + block: &'a mut ::block::ExecutedBlock, +) -> impl FnMut(SystemOrCodeCallKind, Vec) -> Result, String> + 'a { + move |to, data| { + let result = match to { + SystemOrCodeCallKind::Address(address) => { + machine.execute_as_system(block, address, U256::max_value(), Some(data)) + } + SystemOrCodeCallKind::Code(code, code_hash) => machine.execute_code_as_system( + block, + None, + Some(code), + Some(code_hash), + Some(ActionValue::Apparent(U256::zero())), + U256::max_value(), + Some(data), + Some(CallType::StaticCall), + ), + }; - result.map_err(|e| format!("{}", e)) - } + result.map_err(|e| format!("{}", e)) + } } /// Type alias for a function we can get headers by hash through. @@ -213,272 +230,308 @@ pub type PendingTransitionStore<'a> = Fn(H256) -> Option: Send + Sync { - /// Generate a proof, given the state. - fn generate_proof<'a>(&self, state: &machine::Call) -> Result, String>; - /// Check a proof generated elsewhere (potentially by a peer). - // `engine` needed to check state proofs, while really this should - // just be state machine params. - fn check_proof(&self, machine: &M, proof: &[u8]) -> Result<(), String>; + /// Generate a proof, given the state. + fn generate_proof<'a>(&self, state: &machine::Call) -> Result, String>; + /// Check a proof generated elsewhere (potentially by a peer). + // `engine` needed to check state proofs, while really this should + // just be state machine params. + fn check_proof(&self, machine: &M, proof: &[u8]) -> Result<(), String>; } /// Proof generated on epoch change. pub enum Proof { - /// Known proof (extracted from signal) - Known(Vec), - /// State dependent proof. - WithState(Arc>), + /// Known proof (extracted from signal) + Known(Vec), + /// State dependent proof. + WithState(Arc>), } /// Generated epoch verifier. pub enum ConstructedVerifier<'a, M: Machine> { - /// Fully trusted verifier. - Trusted(Box>), - /// Verifier unconfirmed. Check whether given finality proof finalizes given hash - /// under previous epoch. - Unconfirmed(Box>, &'a [u8], H256), - /// Error constructing verifier. - Err(Error), + /// Fully trusted verifier. + Trusted(Box>), + /// Verifier unconfirmed. Check whether given finality proof finalizes given hash + /// under previous epoch. + Unconfirmed(Box>, &'a [u8], H256), + /// Error constructing verifier. + Err(Error), } impl<'a, M: Machine> ConstructedVerifier<'a, M> { - /// Convert to a result, indicating that any necessary confirmation has been done - /// already. - pub fn known_confirmed(self) -> Result>, Error> { - match self { - ConstructedVerifier::Trusted(v) | ConstructedVerifier::Unconfirmed(v, _, _) => Ok(v), - ConstructedVerifier::Err(e) => Err(e), - } - } + /// Convert to a result, indicating that any necessary confirmation has been done + /// already. + pub fn known_confirmed(self) -> Result>, Error> { + match self { + ConstructedVerifier::Trusted(v) | ConstructedVerifier::Unconfirmed(v, _, _) => Ok(v), + ConstructedVerifier::Err(e) => Err(e), + } + } } /// Results of a query of whether an epoch change occurred at the given block. pub enum EpochChange { - /// Cannot determine until more data is passed. - Unsure(AuxiliaryRequest), - /// No epoch change. - No, - /// The epoch will change, with proof. - Yes(Proof), + /// Cannot determine until more data is passed. + Unsure(AuxiliaryRequest), + /// No epoch change. + No, + /// The epoch will change, with proof. + Yes(Proof), } /// A consensus mechanism for the chain. Generally either proof-of-work or proof-of-stake-based. /// Provides hooks into each of the major parts of block import. pub trait Engine: Sync + Send { - /// The name of this engine. - fn name(&self) -> &str; + /// The name of this engine. + fn name(&self) -> &str; - /// Get access to the underlying state machine. - // TODO: decouple. - fn machine(&self) -> &M; + /// Get access to the underlying state machine. + // TODO: decouple. + fn machine(&self) -> &M; - /// The number of additional header fields required for this engine. - fn seal_fields(&self, _header: &Header) -> usize { 0 } + /// The number of additional header fields required for this engine. + fn seal_fields(&self, _header: &Header) -> usize { + 0 + } - /// Additional engine-specific information for the user/developer concerning `header`. - fn extra_info(&self, _header: &Header) -> BTreeMap { BTreeMap::new() } + /// Additional engine-specific information for the user/developer concerning `header`. + fn extra_info(&self, _header: &Header) -> BTreeMap { + BTreeMap::new() + } - /// Maximum number of uncles a block is allowed to declare. - fn maximum_uncle_count(&self, _block: BlockNumber) -> usize { 0 } + /// Maximum number of uncles a block is allowed to declare. + fn maximum_uncle_count(&self, _block: BlockNumber) -> usize { + 0 + } - /// Optional maximum gas limit. - fn maximum_gas_limit(&self) -> Option { None } + /// Optional maximum gas limit. + fn maximum_gas_limit(&self) -> Option { + None + } - /// Block transformation functions, before the transactions. - /// `epoch_begin` set to true if this block kicks off an epoch. - fn on_new_block( - &self, - _block: &mut ExecutedBlock, - _epoch_begin: bool, - _ancestry: &mut Iterator, - ) -> Result<(), M::Error> { - Ok(()) - } + /// Block transformation functions, before the transactions. + /// `epoch_begin` set to true if this block kicks off an epoch. + fn on_new_block( + &self, + _block: &mut ExecutedBlock, + _epoch_begin: bool, + _ancestry: &mut Iterator, + ) -> Result<(), M::Error> { + Ok(()) + } - /// Block transformation functions, after the transactions. - fn on_close_block(&self, _block: &mut ExecutedBlock) -> Result<(), M::Error> { - Ok(()) - } + /// Block transformation functions, after the transactions. + fn on_close_block(&self, _block: &mut ExecutedBlock) -> Result<(), M::Error> { + Ok(()) + } - /// Allow mutating the header during seal generation. Currently only used by Clique. - fn on_seal_block(&self, _block: &mut ExecutedBlock) -> Result<(), Error> { Ok(()) } + /// Allow mutating the header during seal generation. Currently only used by Clique. + fn on_seal_block(&self, _block: &mut ExecutedBlock) -> Result<(), Error> { + Ok(()) + } - /// None means that it requires external input (e.g. PoW) to seal a block. - /// Some(true) means the engine is currently prime for seal generation (i.e. node is the current validator). - /// Some(false) means that the node might seal internally but is not qualified now. - fn seals_internally(&self) -> Option { None } + /// None means that it requires external input (e.g. PoW) to seal a block. + /// Some(true) means the engine is currently prime for seal generation (i.e. node is the current validator). + /// Some(false) means that the node might seal internally but is not qualified now. + fn seals_internally(&self) -> Option { + None + } - /// Called in `miner.chain_new_blocks` if the engine wishes to `update_sealing` - /// after a block was recently sealed. - /// - /// returns false by default - fn should_reseal_on_update(&self) -> bool { - false - } + /// Called in `miner.chain_new_blocks` if the engine wishes to `update_sealing` + /// after a block was recently sealed. + /// + /// returns false by default + fn should_reseal_on_update(&self) -> bool { + false + } - /// Attempt to seal the block internally. - /// - /// If `Some` is returned, then you get a valid seal. - /// - /// This operation is synchronous and may (quite reasonably) not be available, in which None will - /// be returned. - /// - /// It is fine to require access to state or a full client for this function, since - /// light clients do not generate seals. - fn generate_seal(&self, _block: &ExecutedBlock, _parent: &Header) -> Seal { Seal::None } + /// Attempt to seal the block internally. + /// + /// If `Some` is returned, then you get a valid seal. + /// + /// This operation is synchronous and may (quite reasonably) not be available, in which None will + /// be returned. + /// + /// It is fine to require access to state or a full client for this function, since + /// light clients do not generate seals. + fn generate_seal(&self, _block: &ExecutedBlock, _parent: &Header) -> Seal { + Seal::None + } - /// Verify a locally-generated seal of a header. - /// - /// If this engine seals internally, - /// no checks have to be done here, since all internally generated seals - /// should be valid. - /// - /// Externally-generated seals (e.g. PoW) will need to be checked for validity. - /// - /// It is fine to require access to state or a full client for this function, since - /// light clients do not generate seals. - fn verify_local_seal(&self, header: &Header) -> Result<(), M::Error>; + /// Verify a locally-generated seal of a header. + /// + /// If this engine seals internally, + /// no checks have to be done here, since all internally generated seals + /// should be valid. + /// + /// Externally-generated seals (e.g. PoW) will need to be checked for validity. + /// + /// It is fine to require access to state or a full client for this function, since + /// light clients do not generate seals. + fn verify_local_seal(&self, header: &Header) -> Result<(), M::Error>; - /// Phase 1 quick block verification. Only does checks that are cheap. Returns either a null `Ok` or a general error detailing the problem with import. - /// The verification module can optionally avoid checking the seal (`check_seal`), if seal verification is disabled this method won't be called. - fn verify_block_basic(&self, _header: &Header) -> Result<(), M::Error> { Ok(()) } + /// Phase 1 quick block verification. Only does checks that are cheap. Returns either a null `Ok` or a general error detailing the problem with import. + /// The verification module can optionally avoid checking the seal (`check_seal`), if seal verification is disabled this method won't be called. + fn verify_block_basic(&self, _header: &Header) -> Result<(), M::Error> { + Ok(()) + } - /// Phase 2 verification. Perform costly checks such as transaction signatures. Returns either a null `Ok` or a general error detailing the problem with import. - /// The verification module can optionally avoid checking the seal (`check_seal`), if seal verification is disabled this method won't be called. - fn verify_block_unordered(&self, _header: &Header) -> Result<(), M::Error> { Ok(()) } + /// Phase 2 verification. Perform costly checks such as transaction signatures. Returns either a null `Ok` or a general error detailing the problem with import. + /// The verification module can optionally avoid checking the seal (`check_seal`), if seal verification is disabled this method won't be called. + fn verify_block_unordered(&self, _header: &Header) -> Result<(), M::Error> { + Ok(()) + } - /// Phase 3 verification. Check block information against parent. Returns either a null `Ok` or a general error detailing the problem with import. - fn verify_block_family(&self, _header: &Header, _parent: &Header) -> Result<(), M::Error> { Ok(()) } + /// Phase 3 verification. Check block information against parent. Returns either a null `Ok` or a general error detailing the problem with import. + fn verify_block_family(&self, _header: &Header, _parent: &Header) -> Result<(), M::Error> { + Ok(()) + } - /// Phase 4 verification. Verify block header against potentially external data. - /// Should only be called when `register_client` has been called previously. - fn verify_block_external(&self, _header: &Header) -> Result<(), M::Error> { Ok(()) } + /// Phase 4 verification. Verify block header against potentially external data. + /// Should only be called when `register_client` has been called previously. + fn verify_block_external(&self, _header: &Header) -> Result<(), M::Error> { + Ok(()) + } - /// Genesis epoch data. - fn genesis_epoch_data<'a>(&self, _header: &Header, _state: &machine::Call) -> Result, String> { Ok(Vec::new()) } + /// Genesis epoch data. + fn genesis_epoch_data<'a>( + &self, + _header: &Header, + _state: &machine::Call, + ) -> Result, String> { + Ok(Vec::new()) + } - /// Whether an epoch change is signalled at the given header but will require finality. - /// If a change can be enacted immediately then return `No` from this function but - /// `Yes` from `is_epoch_end`. - /// - /// If auxiliary data of the block is required, return an auxiliary request and the function will be - /// called again with them. - /// Return `Yes` or `No` when the answer is definitively known. - /// - /// Should not interact with state. - fn signals_epoch_end<'a>(&self, _header: &Header, _aux: AuxiliaryData<'a>) - -> EpochChange - { - EpochChange::No - } + /// Whether an epoch change is signalled at the given header but will require finality. + /// If a change can be enacted immediately then return `No` from this function but + /// `Yes` from `is_epoch_end`. + /// + /// If auxiliary data of the block is required, return an auxiliary request and the function will be + /// called again with them. + /// Return `Yes` or `No` when the answer is definitively known. + /// + /// Should not interact with state. + fn signals_epoch_end<'a>(&self, _header: &Header, _aux: AuxiliaryData<'a>) -> EpochChange { + EpochChange::No + } - /// Whether a block is the end of an epoch. - /// - /// This either means that an immediate transition occurs or a block signalling transition - /// has reached finality. The `Headers` given are not guaranteed to return any blocks - /// from any epoch other than the current. The client must keep track of finality and provide - /// the latest finalized headers to check against the transition store. - /// - /// Return optional transition proof. - fn is_epoch_end( - &self, - _chain_head: &Header, - _finalized: &[H256], - _chain: &Headers
, - _transition_store: &PendingTransitionStore, - ) -> Option> { - None - } + /// Whether a block is the end of an epoch. + /// + /// This either means that an immediate transition occurs or a block signalling transition + /// has reached finality. The `Headers` given are not guaranteed to return any blocks + /// from any epoch other than the current. The client must keep track of finality and provide + /// the latest finalized headers to check against the transition store. + /// + /// Return optional transition proof. + fn is_epoch_end( + &self, + _chain_head: &Header, + _finalized: &[H256], + _chain: &Headers
, + _transition_store: &PendingTransitionStore, + ) -> Option> { + None + } - /// Whether a block is the end of an epoch. - /// - /// This either means that an immediate transition occurs or a block signalling transition - /// has reached finality. The `Headers` given are not guaranteed to return any blocks - /// from any epoch other than the current. This is a specialized method to use for light - /// clients since the light client doesn't track finality of all blocks, and therefore finality - /// for blocks in the current epoch is built inside this method by the engine. - /// - /// Return optional transition proof. - fn is_epoch_end_light( - &self, - _chain_head: &Header, - _chain: &Headers
, - _transition_store: &PendingTransitionStore, - ) -> Option> { - None - } + /// Whether a block is the end of an epoch. + /// + /// This either means that an immediate transition occurs or a block signalling transition + /// has reached finality. The `Headers` given are not guaranteed to return any blocks + /// from any epoch other than the current. This is a specialized method to use for light + /// clients since the light client doesn't track finality of all blocks, and therefore finality + /// for blocks in the current epoch is built inside this method by the engine. + /// + /// Return optional transition proof. + fn is_epoch_end_light( + &self, + _chain_head: &Header, + _chain: &Headers
, + _transition_store: &PendingTransitionStore, + ) -> Option> { + None + } - /// Create an epoch verifier from validation proof and a flag indicating - /// whether finality is required. - fn epoch_verifier<'a>(&self, _header: &Header, _proof: &'a [u8]) -> ConstructedVerifier<'a, M> { - ConstructedVerifier::Trusted(Box::new(NoOp)) - } + /// Create an epoch verifier from validation proof and a flag indicating + /// whether finality is required. + fn epoch_verifier<'a>(&self, _header: &Header, _proof: &'a [u8]) -> ConstructedVerifier<'a, M> { + ConstructedVerifier::Trusted(Box::new(NoOp)) + } - /// Populate a header's fields based on its parent's header. - /// Usually implements the chain scoring rule based on weight. - fn populate_from_parent(&self, _header: &mut Header, _parent: &Header) { } + /// Populate a header's fields based on its parent's header. + /// Usually implements the chain scoring rule based on weight. + fn populate_from_parent(&self, _header: &mut Header, _parent: &Header) {} - /// Handle any potential consensus messages; - /// updating consensus state and potentially issuing a new one. - fn handle_message(&self, _message: &[u8]) -> Result<(), EngineError> { Err(EngineError::UnexpectedMessage) } + /// Handle any potential consensus messages; + /// updating consensus state and potentially issuing a new one. + fn handle_message(&self, _message: &[u8]) -> Result<(), EngineError> { + Err(EngineError::UnexpectedMessage) + } - /// Register a component which signs consensus messages. - fn set_signer(&self, _signer: Box) {} + /// Register a component which signs consensus messages. + fn set_signer(&self, _signer: Box) {} - /// Sign using the EngineSigner, to be used for consensus tx signing. - fn sign(&self, _hash: H256) -> Result { unimplemented!() } + /// Sign using the EngineSigner, to be used for consensus tx signing. + fn sign(&self, _hash: H256) -> Result { + unimplemented!() + } - /// Add Client which can be used for sealing, potentially querying the state and sending messages. - fn register_client(&self, _client: Weak) {} + /// Add Client which can be used for sealing, potentially querying the state and sending messages. + fn register_client(&self, _client: Weak) {} - /// Trigger next step of the consensus engine. - fn step(&self) {} + /// Trigger next step of the consensus engine. + fn step(&self) {} - /// Create a factory for building snapshot chunks and restoring from them. - /// Returning `None` indicates that this engine doesn't support snapshot creation. - fn snapshot_components(&self) -> Option> { - None - } + /// Create a factory for building snapshot chunks and restoring from them. + /// Returning `None` indicates that this engine doesn't support snapshot creation. + fn snapshot_components(&self) -> Option> { + None + } - /// Whether this engine supports warp sync. - fn supports_warp(&self) -> bool { - self.snapshot_components().is_some() - } + /// Whether this engine supports warp sync. + fn supports_warp(&self) -> bool { + self.snapshot_components().is_some() + } - /// Return a new open block header timestamp based on the parent timestamp. - fn open_block_header_timestamp(&self, parent_timestamp: u64) -> u64 { - use std::{time, cmp}; + /// Return a new open block header timestamp based on the parent timestamp. + fn open_block_header_timestamp(&self, parent_timestamp: u64) -> u64 { + use std::{cmp, time}; - let now = time::SystemTime::now().duration_since(time::UNIX_EPOCH).unwrap_or_default(); - cmp::max(now.as_secs() as u64, parent_timestamp + 1) - } + let now = time::SystemTime::now() + .duration_since(time::UNIX_EPOCH) + .unwrap_or_default(); + cmp::max(now.as_secs() as u64, parent_timestamp + 1) + } - /// Check whether the parent timestamp is valid. - fn is_timestamp_valid(&self, header_timestamp: u64, parent_timestamp: u64) -> bool { - header_timestamp > parent_timestamp - } + /// Check whether the parent timestamp is valid. + fn is_timestamp_valid(&self, header_timestamp: u64, parent_timestamp: u64) -> bool { + header_timestamp > parent_timestamp + } - /// Gather all ancestry actions. Called at the last stage when a block is committed. The Engine must guarantee that - /// the ancestry exists. - fn ancestry_actions(&self, _header: &Header, _ancestry: &mut Iterator) -> Vec { - Vec::new() - } + /// Gather all ancestry actions. Called at the last stage when a block is committed. The Engine must guarantee that + /// the ancestry exists. + fn ancestry_actions( + &self, + _header: &Header, + _ancestry: &mut Iterator, + ) -> Vec { + Vec::new() + } - /// Check whether the given new block is the best block, after finalization check. - fn fork_choice(&self, new: &ExtendedHeader, best: &ExtendedHeader) -> ForkChoice; + /// Check whether the given new block is the best block, after finalization check. + fn fork_choice(&self, new: &ExtendedHeader, best: &ExtendedHeader) -> ForkChoice; - /// Returns author should used when executing tx's for this block. - fn executive_author(&self, header: &Header) -> Result { - Ok(*header.author()) - } + /// Returns author should used when executing tx's for this block. + fn executive_author(&self, header: &Header) -> Result { + Ok(*header.author()) + } } /// Check whether a given block is the best block based on the default total difficulty rule. pub fn total_difficulty_fork_choice(new: &ExtendedHeader, best: &ExtendedHeader) -> ForkChoice { - if new.total_score() > best.total_score() { - ForkChoice::New - } else { - ForkChoice::Old - } + if new.total_score() > best.total_score() { + ForkChoice::New + } else { + ForkChoice::Old + } } /// Common type alias for an engine coupled with an Ethereum-like state machine. @@ -486,109 +539,122 @@ pub fn total_difficulty_fork_choice(new: &ExtendedHeader, best: &ExtendedHeader) // fortunately the effect is largely the same since engines are mostly used // via trait objects. pub trait EthEngine: Engine<::machine::EthereumMachine> { - /// Get the general parameters of the chain. - fn params(&self) -> &CommonParams { - self.machine().params() - } + /// Get the general parameters of the chain. + fn params(&self) -> &CommonParams { + self.machine().params() + } - /// Get the EVM schedule for the given block number. - fn schedule(&self, block_number: BlockNumber) -> Schedule { - self.machine().schedule(block_number) - } + /// Get the EVM schedule for the given block number. + fn schedule(&self, block_number: BlockNumber) -> Schedule { + self.machine().schedule(block_number) + } - /// Builtin-contracts for the chain.. - fn builtins(&self) -> &BTreeMap { - self.machine().builtins() - } + /// Builtin-contracts for the chain.. + fn builtins(&self) -> &BTreeMap { + self.machine().builtins() + } - /// Attempt to get a handle to a built-in contract. - /// Only returns references to activated built-ins. - fn builtin(&self, a: &Address, block_number: BlockNumber) -> Option<&Builtin> { - self.machine().builtin(a, block_number) - } + /// Attempt to get a handle to a built-in contract. + /// Only returns references to activated built-ins. + fn builtin(&self, a: &Address, block_number: BlockNumber) -> Option<&Builtin> { + self.machine().builtin(a, block_number) + } - /// Some intrinsic operation parameters; by default they take their value from the `spec()`'s `engine_params`. - fn maximum_extra_data_size(&self) -> usize { - self.machine().maximum_extra_data_size() - } + /// Some intrinsic operation parameters; by default they take their value from the `spec()`'s `engine_params`. + fn maximum_extra_data_size(&self) -> usize { + self.machine().maximum_extra_data_size() + } - /// The nonce with which accounts begin at given block. - fn account_start_nonce(&self, block: BlockNumber) -> U256 { - self.machine().account_start_nonce(block) - } + /// The nonce with which accounts begin at given block. + fn account_start_nonce(&self, block: BlockNumber) -> U256 { + self.machine().account_start_nonce(block) + } - /// The network ID that transactions should be signed with. - fn signing_chain_id(&self, env_info: &EnvInfo) -> Option { - self.machine().signing_chain_id(env_info) - } + /// The network ID that transactions should be signed with. + fn signing_chain_id(&self, env_info: &EnvInfo) -> Option { + self.machine().signing_chain_id(env_info) + } - /// Returns new contract address generation scheme at given block number. - fn create_address_scheme(&self, number: BlockNumber) -> CreateContractAddress { - self.machine().create_address_scheme(number) - } + /// Returns new contract address generation scheme at given block number. + fn create_address_scheme(&self, number: BlockNumber) -> CreateContractAddress { + self.machine().create_address_scheme(number) + } - /// Verify a particular transaction is valid. - /// - /// Unordered verification doesn't rely on the transaction execution order, - /// i.e. it should only verify stuff that doesn't assume any previous transactions - /// has already been verified and executed. - /// - /// NOTE This function consumes an `UnverifiedTransaction` and produces `SignedTransaction` - /// which implies that a heavy check of the signature is performed here. - fn verify_transaction_unordered(&self, t: UnverifiedTransaction, header: &Header) -> Result { - self.machine().verify_transaction_unordered(t, header) - } + /// Verify a particular transaction is valid. + /// + /// Unordered verification doesn't rely on the transaction execution order, + /// i.e. it should only verify stuff that doesn't assume any previous transactions + /// has already been verified and executed. + /// + /// NOTE This function consumes an `UnverifiedTransaction` and produces `SignedTransaction` + /// which implies that a heavy check of the signature is performed here. + fn verify_transaction_unordered( + &self, + t: UnverifiedTransaction, + header: &Header, + ) -> Result { + self.machine().verify_transaction_unordered(t, header) + } - /// Perform basic/cheap transaction verification. - /// - /// This should include all cheap checks that can be done before - /// actually checking the signature, like chain-replay protection. - /// - /// NOTE This is done before the signature is recovered so avoid - /// doing any state-touching checks that might be expensive. - /// - /// TODO: Add flags for which bits of the transaction to check. - /// TODO: consider including State in the params. - fn verify_transaction_basic(&self, t: &UnverifiedTransaction, header: &Header) -> Result<(), transaction::Error> { - self.machine().verify_transaction_basic(t, header) - } + /// Perform basic/cheap transaction verification. + /// + /// This should include all cheap checks that can be done before + /// actually checking the signature, like chain-replay protection. + /// + /// NOTE This is done before the signature is recovered so avoid + /// doing any state-touching checks that might be expensive. + /// + /// TODO: Add flags for which bits of the transaction to check. + /// TODO: consider including State in the params. + fn verify_transaction_basic( + &self, + t: &UnverifiedTransaction, + header: &Header, + ) -> Result<(), transaction::Error> { + self.machine().verify_transaction_basic(t, header) + } - /// Additional information. - fn additional_params(&self) -> HashMap { - self.machine().additional_params() - } + /// Additional information. + fn additional_params(&self) -> HashMap { + self.machine().additional_params() + } - /// Performs pre-validation of RLP decoded transaction before other processing - fn decode_transaction(&self, transaction: &[u8]) -> Result { - self.machine().decode_transaction(transaction) - } + /// Performs pre-validation of RLP decoded transaction before other processing + fn decode_transaction( + &self, + transaction: &[u8], + ) -> Result { + self.machine().decode_transaction(transaction) + } } // convenience wrappers for existing functions. -impl EthEngine for T where T: Engine<::machine::EthereumMachine> { } +impl EthEngine for T where T: Engine<::machine::EthereumMachine> {} /// Verifier for all blocks within an epoch with self-contained state. pub trait EpochVerifier: Send + Sync { - /// Lightly verify the next block header. - /// This may not be a header belonging to a different epoch. - fn verify_light(&self, header: &Header) -> Result<(), M::Error>; + /// Lightly verify the next block header. + /// This may not be a header belonging to a different epoch. + fn verify_light(&self, header: &Header) -> Result<(), M::Error>; - /// Perform potentially heavier checks on the next block header. - fn verify_heavy(&self, header: &Header) -> Result<(), M::Error> { - self.verify_light(header) - } + /// Perform potentially heavier checks on the next block header. + fn verify_heavy(&self, header: &Header) -> Result<(), M::Error> { + self.verify_light(header) + } - /// Check a finality proof against this epoch verifier. - /// Returns `Some(hashes)` if the proof proves finality of these hashes. - /// Returns `None` if the proof doesn't prove anything. - fn check_finality_proof(&self, _proof: &[u8]) -> Option> { - None - } + /// Check a finality proof against this epoch verifier. + /// Returns `Some(hashes)` if the proof proves finality of these hashes. + /// Returns `None` if the proof doesn't prove anything. + fn check_finality_proof(&self, _proof: &[u8]) -> Option> { + None + } } /// Special "no-op" verifier for stateless, epoch-less engines. pub struct NoOp; impl EpochVerifier for NoOp { - fn verify_light(&self, _header: &Header) -> Result<(), M::Error> { Ok(()) } + fn verify_light(&self, _header: &Header) -> Result<(), M::Error> { + Ok(()) + } } diff --git a/ethcore/src/engines/null_engine.rs b/ethcore/src/engines/null_engine.rs index ae9440911..eac037ce9 100644 --- a/ethcore/src/engines/null_engine.rs +++ b/ethcore/src/engines/null_engine.rs @@ -14,109 +14,130 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use engines::Engine; -use engines::block_reward::{self, RewardKind}; +use block::ExecutedBlock; +use engines::{ + block_reward::{self, RewardKind}, + Engine, +}; use ethereum_types::U256; use machine::Machine; -use types::BlockNumber; -use types::header::{Header, ExtendedHeader}; -use types::ancestry_action::AncestryAction; -use block::ExecutedBlock; +use types::{ + ancestry_action::AncestryAction, + header::{ExtendedHeader, Header}, + BlockNumber, +}; /// Params for a null engine. #[derive(Clone, Default)] pub struct NullEngineParams { - /// base reward for a block. - pub block_reward: U256, - /// Immediate finalization. - pub immediate_finalization: bool + /// base reward for a block. + pub block_reward: U256, + /// Immediate finalization. + pub immediate_finalization: bool, } impl From<::ethjson::spec::NullEngineParams> for NullEngineParams { - fn from(p: ::ethjson::spec::NullEngineParams) -> Self { - NullEngineParams { - block_reward: p.block_reward.map_or_else(Default::default, Into::into), - immediate_finalization: p.immediate_finalization.unwrap_or(false) - } - } + fn from(p: ::ethjson::spec::NullEngineParams) -> Self { + NullEngineParams { + block_reward: p.block_reward.map_or_else(Default::default, Into::into), + immediate_finalization: p.immediate_finalization.unwrap_or(false), + } + } } /// An engine which does not provide any consensus mechanism and does not seal blocks. pub struct NullEngine { - params: NullEngineParams, - machine: M, + params: NullEngineParams, + machine: M, } impl NullEngine { - /// Returns new instance of NullEngine with default VM Factory - pub fn new(params: NullEngineParams, machine: M) -> Self { - NullEngine { - params: params, - machine: machine, - } - } + /// Returns new instance of NullEngine with default VM Factory + pub fn new(params: NullEngineParams, machine: M) -> Self { + NullEngine { + params: params, + machine: machine, + } + } } impl Default for NullEngine { - fn default() -> Self { - Self::new(Default::default(), Default::default()) - } + fn default() -> Self { + Self::new(Default::default(), Default::default()) + } } impl Engine for NullEngine { - fn name(&self) -> &str { - "NullEngine" - } + fn name(&self) -> &str { + "NullEngine" + } - fn machine(&self) -> &M { &self.machine } + fn machine(&self) -> &M { + &self.machine + } - fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), M::Error> { - use std::ops::Shr; + fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), M::Error> { + use std::ops::Shr; - let author = *block.header.author(); - let number = block.header.number(); + let author = *block.header.author(); + let number = block.header.number(); - let reward = self.params.block_reward; - if reward == U256::zero() { return Ok(()) } + let reward = self.params.block_reward; + if reward == U256::zero() { + return Ok(()); + } - let n_uncles = block.uncles.len(); + let n_uncles = block.uncles.len(); - let mut rewards = Vec::new(); + let mut rewards = Vec::new(); - // Bestow block reward - let result_block_reward = reward + reward.shr(5) * U256::from(n_uncles); - rewards.push((author, RewardKind::Author, result_block_reward)); + // Bestow block reward + let result_block_reward = reward + reward.shr(5) * U256::from(n_uncles); + rewards.push((author, RewardKind::Author, result_block_reward)); - // bestow uncle rewards. - for u in &block.uncles { - let uncle_author = u.author(); - let result_uncle_reward = (reward * U256::from(8 + u.number() - number)).shr(3); - rewards.push((*uncle_author, RewardKind::uncle(number, u.number()), result_uncle_reward)); - } + // bestow uncle rewards. + for u in &block.uncles { + let uncle_author = u.author(); + let result_uncle_reward = (reward * U256::from(8 + u.number() - number)).shr(3); + rewards.push(( + *uncle_author, + RewardKind::uncle(number, u.number()), + result_uncle_reward, + )); + } - block_reward::apply_block_rewards(&rewards, block, &self.machine) - } + block_reward::apply_block_rewards(&rewards, block, &self.machine) + } - fn maximum_uncle_count(&self, _block: BlockNumber) -> usize { 2 } + fn maximum_uncle_count(&self, _block: BlockNumber) -> usize { + 2 + } - fn verify_local_seal(&self, _header: &Header) -> Result<(), M::Error> { - Ok(()) - } + fn verify_local_seal(&self, _header: &Header) -> Result<(), M::Error> { + Ok(()) + } - fn snapshot_components(&self) -> Option> { - Some(Box::new(::snapshot::PowSnapshot::new(10000, 10000))) - } + fn snapshot_components(&self) -> Option> { + Some(Box::new(::snapshot::PowSnapshot::new(10000, 10000))) + } - fn fork_choice(&self, new: &ExtendedHeader, current: &ExtendedHeader) -> super::ForkChoice { - super::total_difficulty_fork_choice(new, current) - } + fn fork_choice(&self, new: &ExtendedHeader, current: &ExtendedHeader) -> super::ForkChoice { + super::total_difficulty_fork_choice(new, current) + } - fn ancestry_actions(&self, _header: &Header, ancestry: &mut dyn Iterator) -> Vec { - if self.params.immediate_finalization { - // always mark parent finalized - ancestry.take(1).map(|e| AncestryAction::MarkFinalized(e.header.hash())).collect() - } else { - Vec::new() - } - } + fn ancestry_actions( + &self, + _header: &Header, + ancestry: &mut dyn Iterator, + ) -> Vec { + if self.params.immediate_finalization { + // always mark parent finalized + ancestry + .take(1) + .map(|e| AncestryAction::MarkFinalized(e.header.hash())) + .collect() + } else { + Vec::new() + } + } } diff --git a/ethcore/src/engines/signer.rs b/ethcore/src/engines/signer.rs index bccaca191..fa238dc5a 100644 --- a/ethcore/src/engines/signer.rs +++ b/ethcore/src/engines/signer.rs @@ -16,68 +16,68 @@ //! A signer used by Engines which need to sign messages. -use ethereum_types::{H256, Address}; +use ethereum_types::{Address, H256}; use ethkey::{self, Signature}; /// Everything that an Engine needs to sign messages. pub trait EngineSigner: Send + Sync { - /// Sign a consensus message hash. - fn sign(&self, hash: H256) -> Result; + /// Sign a consensus message hash. + fn sign(&self, hash: H256) -> Result; - /// Signing address - fn address(&self) -> Address; + /// Signing address + fn address(&self) -> Address; } /// Creates a new `EngineSigner` from given key pair. pub fn from_keypair(keypair: ethkey::KeyPair) -> Box { - Box::new(Signer(keypair)) + Box::new(Signer(keypair)) } struct Signer(ethkey::KeyPair); impl EngineSigner for Signer { - fn sign(&self, hash: H256) -> Result { - ethkey::sign(self.0.secret(), &hash) - } + fn sign(&self, hash: H256) -> Result { + ethkey::sign(self.0.secret(), &hash) + } - fn address(&self) -> Address { - self.0.address() - } + fn address(&self) -> Address { + self.0.address() + } } #[cfg(test)] mod test_signer { - use std::sync::Arc; + use std::sync::Arc; - use ethkey::Password; - use accounts::{self, AccountProvider, SignError}; + use accounts::{self, AccountProvider, SignError}; + use ethkey::Password; - use super::*; + use super::*; - impl EngineSigner for (Arc, Address, Password) { - fn sign(&self, hash: H256) -> Result { - match self.0.sign(self.1, Some(self.2.clone()), hash) { - Err(SignError::NotUnlocked) => unreachable!(), - Err(SignError::NotFound) => Err(ethkey::Error::InvalidAddress), - Err(SignError::Hardware(err)) => { - warn!("Error using hardware wallet for engine: {:?}", err); - Err(ethkey::Error::InvalidSecret) - }, - Err(SignError::SStore(accounts::Error::EthKey(err))) => Err(err), - Err(SignError::SStore(accounts::Error::EthKeyCrypto(err))) => { - warn!("Low level crypto error: {:?}", err); - Err(ethkey::Error::InvalidSecret) - }, - Err(SignError::SStore(err)) => { - warn!("Error signing for engine: {:?}", err); - Err(ethkey::Error::InvalidSignature) - }, - Ok(ok) => Ok(ok), - } - } + impl EngineSigner for (Arc, Address, Password) { + fn sign(&self, hash: H256) -> Result { + match self.0.sign(self.1, Some(self.2.clone()), hash) { + Err(SignError::NotUnlocked) => unreachable!(), + Err(SignError::NotFound) => Err(ethkey::Error::InvalidAddress), + Err(SignError::Hardware(err)) => { + warn!("Error using hardware wallet for engine: {:?}", err); + Err(ethkey::Error::InvalidSecret) + } + Err(SignError::SStore(accounts::Error::EthKey(err))) => Err(err), + Err(SignError::SStore(accounts::Error::EthKeyCrypto(err))) => { + warn!("Low level crypto error: {:?}", err); + Err(ethkey::Error::InvalidSecret) + } + Err(SignError::SStore(err)) => { + warn!("Error signing for engine: {:?}", err); + Err(ethkey::Error::InvalidSignature) + } + Ok(ok) => Ok(ok), + } + } - fn address(&self) -> Address { - self.1 - } - } + fn address(&self) -> Address { + self.1 + } + } } diff --git a/ethcore/src/engines/validator_set/contract.rs b/ethcore/src/engines/validator_set/contract.rs index f0064a8c2..776858a3c 100644 --- a/ethcore/src/engines/validator_set/contract.rs +++ b/ethcore/src/engines/validator_set/contract.rs @@ -16,203 +16,251 @@ /// Validator set maintained in a contract, updated using `getValidators` method. /// It can also report validators for misbehaviour with two levels: `reportMalicious` and `reportBenign`. - use std::sync::Weak; use bytes::Bytes; -use ethereum_types::{H256, Address}; +use ethereum_types::{Address, H256}; use machine::{AuxiliaryData, Call, EthereumMachine}; use parking_lot::RwLock; -use types::BlockNumber; -use types::header::Header; +use types::{header::Header, BlockNumber}; use client::EngineClient; -use super::{ValidatorSet, SimpleList, SystemCall}; -use super::safe_contract::ValidatorSafeContract; +use super::{safe_contract::ValidatorSafeContract, SimpleList, SystemCall, ValidatorSet}; use_contract!(validator_report, "res/contracts/validator_report.json"); /// A validator contract with reporting. pub struct ValidatorContract { - contract_address: Address, - validators: ValidatorSafeContract, - client: RwLock>>, // TODO [keorn]: remove + contract_address: Address, + validators: ValidatorSafeContract, + client: RwLock>>, // TODO [keorn]: remove } impl ValidatorContract { - pub fn new(contract_address: Address) -> Self { - ValidatorContract { - contract_address, - validators: ValidatorSafeContract::new(contract_address), - client: RwLock::new(None), - } - } + pub fn new(contract_address: Address) -> Self { + ValidatorContract { + contract_address, + validators: ValidatorSafeContract::new(contract_address), + client: RwLock::new(None), + } + } } impl ValidatorContract { - fn transact(&self, data: Bytes) -> Result<(), String> { - let client = self.client.read().as_ref() - .and_then(Weak::upgrade) - .ok_or_else(|| "No client!")?; + fn transact(&self, data: Bytes) -> Result<(), String> { + let client = self + .client + .read() + .as_ref() + .and_then(Weak::upgrade) + .ok_or_else(|| "No client!")?; - match client.as_full_client() { - Some(c) => { - c.transact_contract(self.contract_address, data) - .map_err(|e| format!("Transaction import error: {}", e))?; - Ok(()) - }, - None => Err("No full client!".into()), - } - } + match client.as_full_client() { + Some(c) => { + c.transact_contract(self.contract_address, data) + .map_err(|e| format!("Transaction import error: {}", e))?; + Ok(()) + } + None => Err("No full client!".into()), + } + } } impl ValidatorSet for ValidatorContract { - fn default_caller(&self, id: ::types::ids::BlockId) -> Box { - self.validators.default_caller(id) - } + fn default_caller(&self, id: ::types::ids::BlockId) -> Box { + self.validators.default_caller(id) + } - fn on_epoch_begin(&self, first: bool, header: &Header, call: &mut SystemCall) -> Result<(), ::error::Error> { - self.validators.on_epoch_begin(first, header, call) - } + fn on_epoch_begin( + &self, + first: bool, + header: &Header, + call: &mut SystemCall, + ) -> Result<(), ::error::Error> { + self.validators.on_epoch_begin(first, header, call) + } - fn genesis_epoch_data(&self, header: &Header, call: &Call) -> Result, String> { - self.validators.genesis_epoch_data(header, call) - } + fn genesis_epoch_data(&self, header: &Header, call: &Call) -> Result, String> { + self.validators.genesis_epoch_data(header, call) + } - fn is_epoch_end(&self, first: bool, chain_head: &Header) -> Option> { - self.validators.is_epoch_end(first, chain_head) - } + fn is_epoch_end(&self, first: bool, chain_head: &Header) -> Option> { + self.validators.is_epoch_end(first, chain_head) + } - fn signals_epoch_end( - &self, - first: bool, - header: &Header, - aux: AuxiliaryData, - ) -> ::engines::EpochChange { - self.validators.signals_epoch_end(first, header, aux) - } + fn signals_epoch_end( + &self, + first: bool, + header: &Header, + aux: AuxiliaryData, + ) -> ::engines::EpochChange { + self.validators.signals_epoch_end(first, header, aux) + } - fn epoch_set(&self, first: bool, machine: &EthereumMachine, number: BlockNumber, proof: &[u8]) -> Result<(SimpleList, Option), ::error::Error> { - self.validators.epoch_set(first, machine, number, proof) - } + fn epoch_set( + &self, + first: bool, + machine: &EthereumMachine, + number: BlockNumber, + proof: &[u8], + ) -> Result<(SimpleList, Option), ::error::Error> { + self.validators.epoch_set(first, machine, number, proof) + } - fn contains_with_caller(&self, bh: &H256, address: &Address, caller: &Call) -> bool { - self.validators.contains_with_caller(bh, address, caller) - } + fn contains_with_caller(&self, bh: &H256, address: &Address, caller: &Call) -> bool { + self.validators.contains_with_caller(bh, address, caller) + } - fn get_with_caller(&self, bh: &H256, nonce: usize, caller: &Call) -> Address { - self.validators.get_with_caller(bh, nonce, caller) - } + fn get_with_caller(&self, bh: &H256, nonce: usize, caller: &Call) -> Address { + self.validators.get_with_caller(bh, nonce, caller) + } - fn count_with_caller(&self, bh: &H256, caller: &Call) -> usize { - self.validators.count_with_caller(bh, caller) - } + fn count_with_caller(&self, bh: &H256, caller: &Call) -> usize { + self.validators.count_with_caller(bh, caller) + } - fn report_malicious(&self, address: &Address, _set_block: BlockNumber, block: BlockNumber, proof: Bytes) { - let data = validator_report::functions::report_malicious::encode_input(*address, block, proof); - match self.transact(data) { - Ok(_) => warn!(target: "engine", "Reported malicious validator {}", address), - Err(s) => warn!(target: "engine", "Validator {} could not be reported {}", address, s), - } - } + fn report_malicious( + &self, + address: &Address, + _set_block: BlockNumber, + block: BlockNumber, + proof: Bytes, + ) { + let data = + validator_report::functions::report_malicious::encode_input(*address, block, proof); + match self.transact(data) { + Ok(_) => warn!(target: "engine", "Reported malicious validator {}", address), + Err(s) => warn!(target: "engine", "Validator {} could not be reported {}", address, s), + } + } - fn report_benign(&self, address: &Address, _set_block: BlockNumber, block: BlockNumber) { - let data = validator_report::functions::report_benign::encode_input(*address, block); - match self.transact(data) { - Ok(_) => warn!(target: "engine", "Reported benign validator misbehaviour {}", address), - Err(s) => warn!(target: "engine", "Validator {} could not be reported {}", address, s), - } - } + fn report_benign(&self, address: &Address, _set_block: BlockNumber, block: BlockNumber) { + let data = validator_report::functions::report_benign::encode_input(*address, block); + match self.transact(data) { + Ok(_) => warn!(target: "engine", "Reported benign validator misbehaviour {}", address), + Err(s) => warn!(target: "engine", "Validator {} could not be reported {}", address, s), + } + } - fn register_client(&self, client: Weak) { - self.validators.register_client(client.clone()); - *self.client.write() = Some(client); - } + fn register_client(&self, client: Weak) { + self.validators.register_client(client.clone()); + *self.client.write() = Some(client); + } } #[cfg(test)] mod tests { - use std::sync::Arc; - use rustc_hex::FromHex; - use hash::keccak; - use ethereum_types::{H520, Address}; - use bytes::ToPretty; - use rlp::encode; - use spec::Spec; - use types::header::Header; - use accounts::AccountProvider; - use miner::{self, MinerService}; - use types::ids::BlockId; - use test_helpers::generate_dummy_client_with_spec; - use call_contract::CallContract; - use client::{BlockChainClient, ChainInfo, BlockInfo}; - use super::super::ValidatorSet; - use super::ValidatorContract; + use super::{super::ValidatorSet, ValidatorContract}; + use accounts::AccountProvider; + use bytes::ToPretty; + use call_contract::CallContract; + use client::{BlockChainClient, BlockInfo, ChainInfo}; + use ethereum_types::{Address, H520}; + use hash::keccak; + use miner::{self, MinerService}; + use rlp::encode; + use rustc_hex::FromHex; + use spec::Spec; + use std::sync::Arc; + use test_helpers::generate_dummy_client_with_spec; + use types::{header::Header, ids::BlockId}; - #[test] - fn fetches_validators() { - let client = generate_dummy_client_with_spec(Spec::new_validator_contract); - let vc = Arc::new(ValidatorContract::new("0000000000000000000000000000000000000005".parse::
().unwrap())); - vc.register_client(Arc::downgrade(&client) as _); - let last_hash = client.best_block_header().hash(); - assert!(vc.contains(&last_hash, &"7d577a597b2742b498cb5cf0c26cdcd726d39e6e".parse::
().unwrap())); - assert!(vc.contains(&last_hash, &"82a978b3f5962a5b0957d9ee9eef472ee55b42f1".parse::
().unwrap())); - } + #[test] + fn fetches_validators() { + let client = generate_dummy_client_with_spec(Spec::new_validator_contract); + let vc = Arc::new(ValidatorContract::new( + "0000000000000000000000000000000000000005" + .parse::
() + .unwrap(), + )); + vc.register_client(Arc::downgrade(&client) as _); + let last_hash = client.best_block_header().hash(); + assert!(vc.contains( + &last_hash, + &"7d577a597b2742b498cb5cf0c26cdcd726d39e6e" + .parse::
() + .unwrap() + )); + assert!(vc.contains( + &last_hash, + &"82a978b3f5962a5b0957d9ee9eef472ee55b42f1" + .parse::
() + .unwrap() + )); + } - #[test] - fn reports_validators() { - let tap = Arc::new(AccountProvider::transient_provider()); - let v1 = tap.insert_account(keccak("1").into(), &"".into()).unwrap(); - let client = generate_dummy_client_with_spec(Spec::new_validator_contract); - client.engine().register_client(Arc::downgrade(&client) as _); - let validator_contract = "0000000000000000000000000000000000000005".parse::
().unwrap(); + #[test] + fn reports_validators() { + let tap = Arc::new(AccountProvider::transient_provider()); + let v1 = tap.insert_account(keccak("1").into(), &"".into()).unwrap(); + let client = generate_dummy_client_with_spec(Spec::new_validator_contract); + client + .engine() + .register_client(Arc::downgrade(&client) as _); + let validator_contract = "0000000000000000000000000000000000000005" + .parse::
() + .unwrap(); - // Make sure reporting can be done. - client.miner().set_gas_range_target((1_000_000.into(), 1_000_000.into())); - let signer = Box::new((tap.clone(), v1, "".into())); - client.miner().set_author(miner::Author::Sealer(signer)); + // Make sure reporting can be done. + client + .miner() + .set_gas_range_target((1_000_000.into(), 1_000_000.into())); + let signer = Box::new((tap.clone(), v1, "".into())); + client.miner().set_author(miner::Author::Sealer(signer)); - // Check a block that is a bit in future, reject it but don't report the validator. - let mut header = Header::default(); - let seal = vec![encode(&4u8), encode(&(&H520::default() as &[u8]))]; - header.set_seal(seal); - header.set_author(v1); - header.set_number(2); - header.set_parent_hash(client.chain_info().best_block_hash); - assert!(client.engine().verify_block_external(&header).is_err()); - client.engine().step(); - assert_eq!(client.chain_info().best_block_number, 0); + // Check a block that is a bit in future, reject it but don't report the validator. + let mut header = Header::default(); + let seal = vec![encode(&4u8), encode(&(&H520::default() as &[u8]))]; + header.set_seal(seal); + header.set_author(v1); + header.set_number(2); + header.set_parent_hash(client.chain_info().best_block_hash); + assert!(client.engine().verify_block_external(&header).is_err()); + client.engine().step(); + assert_eq!(client.chain_info().best_block_number, 0); - // Now create one that is more in future. That one should be rejected and validator should be reported. - let mut header = Header::default(); - let seal = vec![encode(&8u8), encode(&(&H520::default() as &[u8]))]; - header.set_seal(seal); - header.set_author(v1); - header.set_number(2); - header.set_parent_hash(client.chain_info().best_block_hash); - // `reportBenign` when the designated proposer releases block from the future (bad clock). - assert!(client.engine().verify_block_basic(&header).is_err()); - // Seal a block. - client.engine().step(); - assert_eq!(client.chain_info().best_block_number, 1); - // Check if the unresponsive validator is `disliked`. - assert_eq!( - client.call_contract(BlockId::Latest, validator_contract, "d8f2e0bf".from_hex().unwrap()).unwrap().to_hex(), - "0000000000000000000000007d577a597b2742b498cb5cf0c26cdcd726d39e6e" - ); - // Simulate a misbehaving validator by handling a double proposal. - let header = client.best_block_header(); - assert!(client.engine().verify_block_family(&header, &header).is_err()); - // Seal a block. - client.engine().step(); - client.engine().step(); - assert_eq!(client.chain_info().best_block_number, 2); + // Now create one that is more in future. That one should be rejected and validator should be reported. + let mut header = Header::default(); + let seal = vec![encode(&8u8), encode(&(&H520::default() as &[u8]))]; + header.set_seal(seal); + header.set_author(v1); + header.set_number(2); + header.set_parent_hash(client.chain_info().best_block_hash); + // `reportBenign` when the designated proposer releases block from the future (bad clock). + assert!(client.engine().verify_block_basic(&header).is_err()); + // Seal a block. + client.engine().step(); + assert_eq!(client.chain_info().best_block_number, 1); + // Check if the unresponsive validator is `disliked`. + assert_eq!( + client + .call_contract( + BlockId::Latest, + validator_contract, + "d8f2e0bf".from_hex().unwrap() + ) + .unwrap() + .to_hex(), + "0000000000000000000000007d577a597b2742b498cb5cf0c26cdcd726d39e6e" + ); + // Simulate a misbehaving validator by handling a double proposal. + let header = client.best_block_header(); + assert!(client + .engine() + .verify_block_family(&header, &header) + .is_err()); + // Seal a block. + client.engine().step(); + client.engine().step(); + assert_eq!(client.chain_info().best_block_number, 2); - // Check if misbehaving validator was removed. - client.transact_contract(Default::default(), Default::default()).unwrap(); - client.engine().step(); - client.engine().step(); - assert_eq!(client.chain_info().best_block_number, 2); - } + // Check if misbehaving validator was removed. + client + .transact_contract(Default::default(), Default::default()) + .unwrap(); + client.engine().step(); + client.engine().step(); + assert_eq!(client.chain_info().best_block_number, 2); + } } diff --git a/ethcore/src/engines/validator_set/mod.rs b/ethcore/src/engines/validator_set/mod.rs index 915a3f9a1..ca3768960 100644 --- a/ethcore/src/engines/validator_set/mod.rs +++ b/ethcore/src/engines/validator_set/mod.rs @@ -14,132 +14,159 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +mod contract; +mod multi; +mod safe_contract; +mod simple_list; /// Validator lists. #[cfg(test)] mod test; -mod simple_list; -mod safe_contract; -mod contract; -mod multi; use std::sync::Weak; use bytes::Bytes; -use ethereum_types::{H256, Address}; +use ethereum_types::{Address, H256}; use ethjson::spec::ValidatorSet as ValidatorSpec; use machine::{AuxiliaryData, Call, EthereumMachine}; -use types::BlockNumber; -use types::header::Header; -use types::ids::BlockId; +use types::{header::Header, ids::BlockId, BlockNumber}; use client::EngineClient; +pub use self::simple_list::SimpleList; #[cfg(test)] pub use self::test::TestSet; -pub use self::simple_list::SimpleList; -use self::contract::ValidatorContract; -use self::safe_contract::ValidatorSafeContract; -use self::multi::Multi; +use self::{contract::ValidatorContract, multi::Multi, safe_contract::ValidatorSafeContract}; use super::SystemCall; /// Creates a validator set from spec. pub fn new_validator_set(spec: ValidatorSpec) -> Box { - match spec { - ValidatorSpec::List(list) => Box::new(SimpleList::new(list.into_iter().map(Into::into).collect())), - ValidatorSpec::SafeContract(address) => Box::new(ValidatorSafeContract::new(address.into())), - ValidatorSpec::Contract(address) => Box::new(ValidatorContract::new(address.into())), - ValidatorSpec::Multi(sequence) => Box::new( - Multi::new(sequence.into_iter().map(|(block, set)| (block.into(), new_validator_set(set))).collect()) - ), - } + match spec { + ValidatorSpec::List(list) => { + Box::new(SimpleList::new(list.into_iter().map(Into::into).collect())) + } + ValidatorSpec::SafeContract(address) => { + Box::new(ValidatorSafeContract::new(address.into())) + } + ValidatorSpec::Contract(address) => Box::new(ValidatorContract::new(address.into())), + ValidatorSpec::Multi(sequence) => Box::new(Multi::new( + sequence + .into_iter() + .map(|(block, set)| (block.into(), new_validator_set(set))) + .collect(), + )), + } } /// A validator set. pub trait ValidatorSet: Send + Sync + 'static { - /// Get the default "Call" helper, for use in general operation. - // TODO [keorn]: this is a hack intended to migrate off of - // a strict dependency on state always being available. - fn default_caller(&self, block_id: BlockId) -> Box; + /// Get the default "Call" helper, for use in general operation. + // TODO [keorn]: this is a hack intended to migrate off of + // a strict dependency on state always being available. + fn default_caller(&self, block_id: BlockId) -> Box; - /// Checks if a given address is a validator, - /// using underlying, default call mechanism. - fn contains(&self, parent: &H256, address: &Address) -> bool { - let default = self.default_caller(BlockId::Hash(*parent)); - self.contains_with_caller(parent, address, &*default) - } + /// Checks if a given address is a validator, + /// using underlying, default call mechanism. + fn contains(&self, parent: &H256, address: &Address) -> bool { + let default = self.default_caller(BlockId::Hash(*parent)); + self.contains_with_caller(parent, address, &*default) + } - /// Draws an validator nonce modulo number of validators. - fn get(&self, parent: &H256, nonce: usize) -> Address { - let default = self.default_caller(BlockId::Hash(*parent)); - self.get_with_caller(parent, nonce, &*default) - } + /// Draws an validator nonce modulo number of validators. + fn get(&self, parent: &H256, nonce: usize) -> Address { + let default = self.default_caller(BlockId::Hash(*parent)); + self.get_with_caller(parent, nonce, &*default) + } - /// Returns the current number of validators. - fn count(&self, parent: &H256) -> usize { - let default = self.default_caller(BlockId::Hash(*parent)); - self.count_with_caller(parent, &*default) - } + /// Returns the current number of validators. + fn count(&self, parent: &H256) -> usize { + let default = self.default_caller(BlockId::Hash(*parent)); + self.count_with_caller(parent, &*default) + } - /// Signalling that a new epoch has begun. - /// - /// All calls here will be from the `SYSTEM_ADDRESS`: 2^160 - 2 - /// and will have an effect on the block's state. - /// The caller provided here may not generate proofs. - /// - /// `first` is true if this is the first block in the set. - fn on_epoch_begin(&self, _first: bool, _header: &Header, _call: &mut SystemCall) -> Result<(), ::error::Error> { - Ok(()) - } + /// Signalling that a new epoch has begun. + /// + /// All calls here will be from the `SYSTEM_ADDRESS`: 2^160 - 2 + /// and will have an effect on the block's state. + /// The caller provided here may not generate proofs. + /// + /// `first` is true if this is the first block in the set. + fn on_epoch_begin( + &self, + _first: bool, + _header: &Header, + _call: &mut SystemCall, + ) -> Result<(), ::error::Error> { + Ok(()) + } - /// Extract genesis epoch data from the genesis state and header. - fn genesis_epoch_data(&self, _header: &Header, _call: &Call) -> Result, String> { Ok(Vec::new()) } + /// Extract genesis epoch data from the genesis state and header. + fn genesis_epoch_data(&self, _header: &Header, _call: &Call) -> Result, String> { + Ok(Vec::new()) + } - /// Whether this block is the last one in its epoch. - /// - /// Indicates that the validator set changed at the given block in a manner - /// that doesn't require finality. - /// - /// `first` is true if this is the first block in the set. - fn is_epoch_end(&self, first: bool, chain_head: &Header) -> Option>; + /// Whether this block is the last one in its epoch. + /// + /// Indicates that the validator set changed at the given block in a manner + /// that doesn't require finality. + /// + /// `first` is true if this is the first block in the set. + fn is_epoch_end(&self, first: bool, chain_head: &Header) -> Option>; - /// Whether the given block signals the end of an epoch, but change won't take effect - /// until finality. - /// - /// Engine should set `first` only if the header is genesis. Multiplexing validator - /// sets can set `first` to internal changes. - fn signals_epoch_end( - &self, - first: bool, - header: &Header, - aux: AuxiliaryData, - ) -> ::engines::EpochChange; + /// Whether the given block signals the end of an epoch, but change won't take effect + /// until finality. + /// + /// Engine should set `first` only if the header is genesis. Multiplexing validator + /// sets can set `first` to internal changes. + fn signals_epoch_end( + &self, + first: bool, + header: &Header, + aux: AuxiliaryData, + ) -> ::engines::EpochChange; - /// Recover the validator set from the given proof, the block number, and - /// whether this header is first in its set. - /// - /// May fail if the given header doesn't kick off an epoch or - /// the proof is invalid. - /// - /// Returns the set, along with a flag indicating whether finality of a specific - /// hash should be proven. - fn epoch_set(&self, first: bool, machine: &EthereumMachine, number: BlockNumber, proof: &[u8]) - -> Result<(SimpleList, Option), ::error::Error>; + /// Recover the validator set from the given proof, the block number, and + /// whether this header is first in its set. + /// + /// May fail if the given header doesn't kick off an epoch or + /// the proof is invalid. + /// + /// Returns the set, along with a flag indicating whether finality of a specific + /// hash should be proven. + fn epoch_set( + &self, + first: bool, + machine: &EthereumMachine, + number: BlockNumber, + proof: &[u8], + ) -> Result<(SimpleList, Option), ::error::Error>; - /// Checks if a given address is a validator, with the given function - /// for executing synchronous calls to contracts. - fn contains_with_caller(&self, parent_block_hash: &H256, address: &Address, caller: &Call) -> bool; + /// Checks if a given address is a validator, with the given function + /// for executing synchronous calls to contracts. + fn contains_with_caller( + &self, + parent_block_hash: &H256, + address: &Address, + caller: &Call, + ) -> bool; - /// Draws an validator nonce modulo number of validators. - fn get_with_caller(&self, parent_block_hash: &H256, nonce: usize, caller: &Call) -> Address; + /// Draws an validator nonce modulo number of validators. + fn get_with_caller(&self, parent_block_hash: &H256, nonce: usize, caller: &Call) -> Address; - /// Returns the current number of validators. - fn count_with_caller(&self, parent_block_hash: &H256, caller: &Call) -> usize; + /// Returns the current number of validators. + fn count_with_caller(&self, parent_block_hash: &H256, caller: &Call) -> usize; - /// Notifies about malicious behaviour. - fn report_malicious(&self, _validator: &Address, _set_block: BlockNumber, _block: BlockNumber, _proof: Bytes) {} - /// Notifies about benign misbehaviour. - fn report_benign(&self, _validator: &Address, _set_block: BlockNumber, _block: BlockNumber) {} - /// Allows blockchain state access. - fn register_client(&self, _client: Weak) {} + /// Notifies about malicious behaviour. + fn report_malicious( + &self, + _validator: &Address, + _set_block: BlockNumber, + _block: BlockNumber, + _proof: Bytes, + ) { + } + /// Notifies about benign misbehaviour. + fn report_benign(&self, _validator: &Address, _set_block: BlockNumber, _block: BlockNumber) {} + /// Allows blockchain state access. + fn register_client(&self, _client: Weak) {} } diff --git a/ethcore/src/engines/validator_set/multi.rs b/ethcore/src/engines/validator_set/multi.rs index dec6b5011..4a1f45b56 100644 --- a/ethcore/src/engines/validator_set/multi.rs +++ b/ethcore/src/engines/validator_set/multi.rs @@ -15,234 +15,282 @@ // along with Parity Ethereum. If not, see . /// Validator set changing at fork blocks. - use std::collections::BTreeMap; use std::sync::Weak; use bytes::Bytes; -use ethereum_types::{H256, Address}; +use ethereum_types::{Address, H256}; use parking_lot::RwLock; -use types::BlockNumber; -use types::header::Header; -use types::ids::BlockId; +use types::{header::Header, ids::BlockId, BlockNumber}; +use super::{SystemCall, ValidatorSet}; use client::EngineClient; use machine::{AuxiliaryData, Call, EthereumMachine}; -use super::{SystemCall, ValidatorSet}; type BlockNumberLookup = Box Result + Send + Sync + 'static>; pub struct Multi { - sets: BTreeMap>, - block_number: RwLock, + sets: BTreeMap>, + block_number: RwLock, } impl Multi { - pub fn new(set_map: BTreeMap>) -> Self { - assert!(set_map.get(&0u64).is_some(), "ValidatorSet has to be specified from block 0."); - Multi { - sets: set_map, - block_number: RwLock::new(Box::new(move |_| Err("No client!".into()))), - } - } + pub fn new(set_map: BTreeMap>) -> Self { + assert!( + set_map.get(&0u64).is_some(), + "ValidatorSet has to be specified from block 0." + ); + Multi { + sets: set_map, + block_number: RwLock::new(Box::new(move |_| Err("No client!".into()))), + } + } - fn correct_set(&self, id: BlockId) -> Option<&ValidatorSet> { - match self.block_number.read()(id).map(|parent_block| self.correct_set_by_number(parent_block)) { - Ok((_, set)) => Some(set), - Err(e) => { - debug!(target: "engine", "ValidatorSet could not be recovered: {}", e); - None - }, - } - } + fn correct_set(&self, id: BlockId) -> Option<&ValidatorSet> { + match self.block_number.read()(id) + .map(|parent_block| self.correct_set_by_number(parent_block)) + { + Ok((_, set)) => Some(set), + Err(e) => { + debug!(target: "engine", "ValidatorSet could not be recovered: {}", e); + None + } + } + } - // get correct set by block number, along with block number at which - // this set was activated. - fn correct_set_by_number(&self, parent_block: BlockNumber) -> (BlockNumber, &ValidatorSet) { - let (block, set) = self.sets.iter() + // get correct set by block number, along with block number at which + // this set was activated. + fn correct_set_by_number(&self, parent_block: BlockNumber) -> (BlockNumber, &ValidatorSet) { + let (block, set) = self.sets.iter() .rev() .find(|&(block, _)| *block <= parent_block + 1) .expect("constructor validation ensures that there is at least one validator set for block 0; block 0 is less than any uint; qed"); - trace!(target: "engine", "Multi ValidatorSet retrieved for block {}.", block); - (*block, &**set) - } + trace!(target: "engine", "Multi ValidatorSet retrieved for block {}.", block); + (*block, &**set) + } } impl ValidatorSet for Multi { - fn default_caller(&self, block_id: BlockId) -> Box { - self.correct_set(block_id).map(|set| set.default_caller(block_id)) - .unwrap_or_else(|| Box::new(|_, _| Err("No validator set for given ID.".into()))) - } + fn default_caller(&self, block_id: BlockId) -> Box { + self.correct_set(block_id) + .map(|set| set.default_caller(block_id)) + .unwrap_or_else(|| Box::new(|_, _| Err("No validator set for given ID.".into()))) + } - fn on_epoch_begin(&self, _first: bool, header: &Header, call: &mut SystemCall) -> Result<(), ::error::Error> { - let (set_block, set) = self.correct_set_by_number(header.number()); - let first = set_block == header.number(); + fn on_epoch_begin( + &self, + _first: bool, + header: &Header, + call: &mut SystemCall, + ) -> Result<(), ::error::Error> { + let (set_block, set) = self.correct_set_by_number(header.number()); + let first = set_block == header.number(); - set.on_epoch_begin(first, header, call) - } + set.on_epoch_begin(first, header, call) + } - fn genesis_epoch_data(&self, header: &Header, call: &Call) -> Result, String> { - self.correct_set_by_number(0).1.genesis_epoch_data(header, call) - } + fn genesis_epoch_data(&self, header: &Header, call: &Call) -> Result, String> { + self.correct_set_by_number(0) + .1 + .genesis_epoch_data(header, call) + } - fn is_epoch_end(&self, _first: bool, chain_head: &Header) -> Option> { - let (set_block, set) = self.correct_set_by_number(chain_head.number()); - let first = set_block == chain_head.number(); + fn is_epoch_end(&self, _first: bool, chain_head: &Header) -> Option> { + let (set_block, set) = self.correct_set_by_number(chain_head.number()); + let first = set_block == chain_head.number(); - set.is_epoch_end(first, chain_head) - } + set.is_epoch_end(first, chain_head) + } - fn signals_epoch_end(&self, _first: bool, header: &Header, aux: AuxiliaryData) - -> ::engines::EpochChange - { - let (set_block, set) = self.correct_set_by_number(header.number()); - let first = set_block == header.number(); + fn signals_epoch_end( + &self, + _first: bool, + header: &Header, + aux: AuxiliaryData, + ) -> ::engines::EpochChange { + let (set_block, set) = self.correct_set_by_number(header.number()); + let first = set_block == header.number(); - set.signals_epoch_end(first, header, aux) - } + set.signals_epoch_end(first, header, aux) + } - fn epoch_set(&self, _first: bool, machine: &EthereumMachine, number: BlockNumber, proof: &[u8]) -> Result<(super::SimpleList, Option), ::error::Error> { - let (set_block, set) = self.correct_set_by_number(number); - let first = set_block == number; + fn epoch_set( + &self, + _first: bool, + machine: &EthereumMachine, + number: BlockNumber, + proof: &[u8], + ) -> Result<(super::SimpleList, Option), ::error::Error> { + let (set_block, set) = self.correct_set_by_number(number); + let first = set_block == number; - set.epoch_set(first, machine, number, proof) - } + set.epoch_set(first, machine, number, proof) + } - fn contains_with_caller(&self, bh: &H256, address: &Address, caller: &Call) -> bool { - self.correct_set(BlockId::Hash(*bh)) - .map_or(false, |set| set.contains_with_caller(bh, address, caller)) - } + fn contains_with_caller(&self, bh: &H256, address: &Address, caller: &Call) -> bool { + self.correct_set(BlockId::Hash(*bh)) + .map_or(false, |set| set.contains_with_caller(bh, address, caller)) + } - fn get_with_caller(&self, bh: &H256, nonce: usize, caller: &Call) -> Address { - self.correct_set(BlockId::Hash(*bh)) - .map_or_else(Default::default, |set| set.get_with_caller(bh, nonce, caller)) - } + fn get_with_caller(&self, bh: &H256, nonce: usize, caller: &Call) -> Address { + self.correct_set(BlockId::Hash(*bh)) + .map_or_else(Default::default, |set| { + set.get_with_caller(bh, nonce, caller) + }) + } - fn count_with_caller(&self, bh: &H256, caller: &Call) -> usize { - self.correct_set(BlockId::Hash(*bh)) - .map_or_else(usize::max_value, |set| set.count_with_caller(bh, caller)) - } + fn count_with_caller(&self, bh: &H256, caller: &Call) -> usize { + self.correct_set(BlockId::Hash(*bh)) + .map_or_else(usize::max_value, |set| set.count_with_caller(bh, caller)) + } - fn report_malicious(&self, validator: &Address, set_block: BlockNumber, block: BlockNumber, proof: Bytes) { - self.correct_set_by_number(set_block).1.report_malicious(validator, set_block, block, proof); - } + fn report_malicious( + &self, + validator: &Address, + set_block: BlockNumber, + block: BlockNumber, + proof: Bytes, + ) { + self.correct_set_by_number(set_block) + .1 + .report_malicious(validator, set_block, block, proof); + } - fn report_benign(&self, validator: &Address, set_block: BlockNumber, block: BlockNumber) { - self.correct_set_by_number(set_block).1.report_benign(validator, set_block, block); - } + fn report_benign(&self, validator: &Address, set_block: BlockNumber, block: BlockNumber) { + self.correct_set_by_number(set_block) + .1 + .report_benign(validator, set_block, block); + } - fn register_client(&self, client: Weak) { - for set in self.sets.values() { - set.register_client(client.clone()); - } - *self.block_number.write() = Box::new(move |id| client - .upgrade() - .ok_or_else(|| "No client!".into()) - .and_then(|c| c.block_number(id).ok_or_else(|| "Unknown block".into()))); - } + fn register_client(&self, client: Weak) { + for set in self.sets.values() { + set.register_client(client.clone()); + } + *self.block_number.write() = Box::new(move |id| { + client + .upgrade() + .ok_or_else(|| "No client!".into()) + .and_then(|c| c.block_number(id).ok_or_else(|| "Unknown block".into())) + }); + } } #[cfg(test)] mod tests { - use std::sync::Arc; - use std::collections::BTreeMap; - use hash::keccak; - use accounts::AccountProvider; - use client::{BlockChainClient, ChainInfo, BlockInfo, ImportBlock, traits::ForceUpdateSealing}; - use engines::EpochChange; - use engines::validator_set::ValidatorSet; - use ethkey::Secret; - use types::header::Header; - use miner::{self, MinerService}; - use spec::Spec; - use test_helpers::{generate_dummy_client_with_spec, generate_dummy_client_with_spec_and_data}; - use types::ids::BlockId; - use ethereum_types::Address; - use verification::queue::kind::blocks::Unverified; + use accounts::AccountProvider; + use client::{traits::ForceUpdateSealing, BlockChainClient, BlockInfo, ChainInfo, ImportBlock}; + use engines::{validator_set::ValidatorSet, EpochChange}; + use ethereum_types::Address; + use ethkey::Secret; + use hash::keccak; + use miner::{self, MinerService}; + use spec::Spec; + use std::{collections::BTreeMap, sync::Arc}; + use test_helpers::{generate_dummy_client_with_spec, generate_dummy_client_with_spec_and_data}; + use types::{header::Header, ids::BlockId}; + use verification::queue::kind::blocks::Unverified; - use super::Multi; + use super::Multi; - #[test] - fn uses_current_set() { - let tap = Arc::new(AccountProvider::transient_provider()); - let s0: Secret = keccak("0").into(); - let v0 = tap.insert_account(s0.clone(), &"".into()).unwrap(); - let v1 = tap.insert_account(keccak("1").into(), &"".into()).unwrap(); - let client = generate_dummy_client_with_spec(Spec::new_validator_multi); - client.engine().register_client(Arc::downgrade(&client) as _); + #[test] + fn uses_current_set() { + let tap = Arc::new(AccountProvider::transient_provider()); + let s0: Secret = keccak("0").into(); + let v0 = tap.insert_account(s0.clone(), &"".into()).unwrap(); + let v1 = tap.insert_account(keccak("1").into(), &"".into()).unwrap(); + let client = generate_dummy_client_with_spec(Spec::new_validator_multi); + client + .engine() + .register_client(Arc::downgrade(&client) as _); - // Make sure txs go through. - client.miner().set_gas_range_target((1_000_000.into(), 1_000_000.into())); + // Make sure txs go through. + client + .miner() + .set_gas_range_target((1_000_000.into(), 1_000_000.into())); - // Wrong signer for the first block. - let signer = Box::new((tap.clone(), v1, "".into())); - client.miner().set_author(miner::Author::Sealer(signer)); - client.transact_contract(Default::default(), Default::default()).unwrap(); - ::client::EngineClient::update_sealing(&*client, ForceUpdateSealing::No); - assert_eq!(client.chain_info().best_block_number, 0); - // Right signer for the first block. - let signer = Box::new((tap.clone(), v0, "".into())); - client.miner().set_author(miner::Author::Sealer(signer)); - ::client::EngineClient::update_sealing(&*client, ForceUpdateSealing::No); - assert_eq!(client.chain_info().best_block_number, 1); - // This time v0 is wrong. - client.transact_contract(Default::default(), Default::default()).unwrap(); - ::client::EngineClient::update_sealing(&*client, ForceUpdateSealing::No); - assert_eq!(client.chain_info().best_block_number, 1); - let signer = Box::new((tap.clone(), v1, "".into())); - client.miner().set_author(miner::Author::Sealer(signer)); - ::client::EngineClient::update_sealing(&*client, ForceUpdateSealing::No); - assert_eq!(client.chain_info().best_block_number, 2); - // v1 is still good. - client.transact_contract(Default::default(), Default::default()).unwrap(); - ::client::EngineClient::update_sealing(&*client, ForceUpdateSealing::No); - assert_eq!(client.chain_info().best_block_number, 3); + // Wrong signer for the first block. + let signer = Box::new((tap.clone(), v1, "".into())); + client.miner().set_author(miner::Author::Sealer(signer)); + client + .transact_contract(Default::default(), Default::default()) + .unwrap(); + ::client::EngineClient::update_sealing(&*client, ForceUpdateSealing::No); + assert_eq!(client.chain_info().best_block_number, 0); + // Right signer for the first block. + let signer = Box::new((tap.clone(), v0, "".into())); + client.miner().set_author(miner::Author::Sealer(signer)); + ::client::EngineClient::update_sealing(&*client, ForceUpdateSealing::No); + assert_eq!(client.chain_info().best_block_number, 1); + // This time v0 is wrong. + client + .transact_contract(Default::default(), Default::default()) + .unwrap(); + ::client::EngineClient::update_sealing(&*client, ForceUpdateSealing::No); + assert_eq!(client.chain_info().best_block_number, 1); + let signer = Box::new((tap.clone(), v1, "".into())); + client.miner().set_author(miner::Author::Sealer(signer)); + ::client::EngineClient::update_sealing(&*client, ForceUpdateSealing::No); + assert_eq!(client.chain_info().best_block_number, 2); + // v1 is still good. + client + .transact_contract(Default::default(), Default::default()) + .unwrap(); + ::client::EngineClient::update_sealing(&*client, ForceUpdateSealing::No); + assert_eq!(client.chain_info().best_block_number, 3); - // Check syncing. - let sync_client = generate_dummy_client_with_spec_and_data(Spec::new_validator_multi, 0, 0, &[]); - sync_client.engine().register_client(Arc::downgrade(&sync_client) as _); - for i in 1..4 { - sync_client.import_block(Unverified::from_rlp(client.block(BlockId::Number(i)).unwrap().into_inner()).unwrap()).unwrap(); - } - sync_client.flush_queue(); - assert_eq!(sync_client.chain_info().best_block_number, 3); - } + // Check syncing. + let sync_client = + generate_dummy_client_with_spec_and_data(Spec::new_validator_multi, 0, 0, &[]); + sync_client + .engine() + .register_client(Arc::downgrade(&sync_client) as _); + for i in 1..4 { + sync_client + .import_block( + Unverified::from_rlp(client.block(BlockId::Number(i)).unwrap().into_inner()) + .unwrap(), + ) + .unwrap(); + } + sync_client.flush_queue(); + assert_eq!(sync_client.chain_info().best_block_number, 3); + } - #[test] - fn transition_to_fixed_list_instant() { - use super::super::SimpleList; + #[test] + fn transition_to_fixed_list_instant() { + use super::super::SimpleList; - let mut map: BTreeMap<_, Box> = BTreeMap::new(); - let list1: Vec<_> = (0..10).map(|_| Address::random()).collect(); - let list2 = { - let mut list = list1.clone(); - list.push(Address::random()); - list - }; + let mut map: BTreeMap<_, Box> = BTreeMap::new(); + let list1: Vec<_> = (0..10).map(|_| Address::random()).collect(); + let list2 = { + let mut list = list1.clone(); + list.push(Address::random()); + list + }; - map.insert(0, Box::new(SimpleList::new(list1))); - map.insert(500, Box::new(SimpleList::new(list2))); + map.insert(0, Box::new(SimpleList::new(list1))); + map.insert(500, Box::new(SimpleList::new(list2))); - let multi = Multi::new(map); + let multi = Multi::new(map); - let mut header = Header::new(); - header.set_number(499); + let mut header = Header::new(); + header.set_number(499); - match multi.signals_epoch_end(false, &header, Default::default()) { - EpochChange::No => {}, - _ => panic!("Expected no epoch signal change."), - } - assert!(multi.is_epoch_end(false, &header).is_none()); + match multi.signals_epoch_end(false, &header, Default::default()) { + EpochChange::No => {} + _ => panic!("Expected no epoch signal change."), + } + assert!(multi.is_epoch_end(false, &header).is_none()); - header.set_number(500); + header.set_number(500); - match multi.signals_epoch_end(false, &header, Default::default()) { - EpochChange::No => {}, - _ => panic!("Expected no epoch signal change."), - } - assert!(multi.is_epoch_end(false, &header).is_some()); - } + match multi.signals_epoch_end(false, &header, Default::default()) { + EpochChange::No => {} + _ => panic!("Expected no epoch signal change."), + } + assert!(multi.is_epoch_end(false, &header).is_some()); + } } diff --git a/ethcore/src/engines/validator_set/safe_contract.rs b/ethcore/src/engines/validator_set/safe_contract.rs index 3596ff4b2..9b9872598 100644 --- a/ethcore/src/engines/validator_set/safe_contract.rs +++ b/ethcore/src/engines/validator_set/safe_contract.rs @@ -15,27 +15,22 @@ // along with Parity Ethereum. If not, see . /// Validator set maintained in a contract, updated using `getValidators` method. - -use std::sync::{Weak, Arc}; +use std::sync::{Arc, Weak}; use bytes::Bytes; use ethabi::FunctionOutputDecoder; -use ethereum_types::{H256, U256, Address, Bloom}; +use ethereum_types::{Address, Bloom, H256, U256}; use hash::keccak; use kvdb::DBValue; use memory_cache::MemoryLruCache; use parking_lot::RwLock; use rlp::{Rlp, RlpStream}; -use types::header::Header; -use types::ids::BlockId; -use types::log_entry::LogEntry; -use types::receipt::Receipt; +use types::{header::Header, ids::BlockId, log_entry::LogEntry, receipt::Receipt}; use unexpected::Mismatch; +use super::{simple_list::SimpleList, SystemCall, ValidatorSet}; use client::EngineClient; -use machine::{AuxiliaryData, Call, EthereumMachine, AuxiliaryRequest}; -use super::{SystemCall, ValidatorSet}; -use super::simple_list::SimpleList; +use machine::{AuxiliaryData, AuxiliaryRequest, Call, EthereumMachine}; use_contract!(validator_set, "res/contracts/validator_set.json"); @@ -45,549 +40,637 @@ const MEMOIZE_CAPACITY: usize = 500; const EVENT_NAME: &'static [u8] = &*b"InitiateChange(bytes32,address[])"; lazy_static! { - static ref EVENT_NAME_HASH: H256 = keccak(EVENT_NAME); + static ref EVENT_NAME_HASH: H256 = keccak(EVENT_NAME); } // state-dependent proofs for the safe contract: // only "first" proofs are such. struct StateProof { - contract_address: Address, - header: Header, + contract_address: Address, + header: Header, } impl ::engines::StateDependentProof for StateProof { - fn generate_proof(&self, caller: &Call) -> Result, String> { - prove_initial(self.contract_address, &self.header, caller) - } + fn generate_proof(&self, caller: &Call) -> Result, String> { + prove_initial(self.contract_address, &self.header, caller) + } - fn check_proof(&self, machine: &EthereumMachine, proof: &[u8]) -> Result<(), String> { - let (header, state_items) = decode_first_proof(&Rlp::new(proof)) - .map_err(|e| format!("proof incorrectly encoded: {}", e))?; - if &header != &self.header { - return Err("wrong header in proof".into()); - } + fn check_proof(&self, machine: &EthereumMachine, proof: &[u8]) -> Result<(), String> { + let (header, state_items) = decode_first_proof(&Rlp::new(proof)) + .map_err(|e| format!("proof incorrectly encoded: {}", e))?; + if &header != &self.header { + return Err("wrong header in proof".into()); + } - check_first_proof(machine, self.contract_address, header, &state_items).map(|_| ()) - } + check_first_proof(machine, self.contract_address, header, &state_items).map(|_| ()) + } } /// The validator contract should have the following interface: pub struct ValidatorSafeContract { - contract_address: Address, - validators: RwLock>, - client: RwLock>>, // TODO [keorn]: remove + contract_address: Address, + validators: RwLock>, + client: RwLock>>, // TODO [keorn]: remove } // first proof is just a state proof call of `getValidators` at header's state. fn encode_first_proof(header: &Header, state_items: &[Vec]) -> Bytes { - let mut stream = RlpStream::new_list(2); - stream.append(header).begin_list(state_items.len()); - for item in state_items { - stream.append(item); - } + let mut stream = RlpStream::new_list(2); + stream.append(header).begin_list(state_items.len()); + for item in state_items { + stream.append(item); + } - stream.out() + stream.out() } // check a first proof: fetch the validator set at the given block. -fn check_first_proof(machine: &EthereumMachine, contract_address: Address, old_header: Header, state_items: &[DBValue]) - -> Result, String> -{ - use types::transaction::{Action, Transaction}; +fn check_first_proof( + machine: &EthereumMachine, + contract_address: Address, + old_header: Header, + state_items: &[DBValue], +) -> Result, String> { + use types::transaction::{Action, Transaction}; - // TODO: match client contract_call_tx more cleanly without duplication. - const PROVIDED_GAS: u64 = 50_000_000; + // TODO: match client contract_call_tx more cleanly without duplication. + const PROVIDED_GAS: u64 = 50_000_000; - let env_info = ::vm::EnvInfo { - number: old_header.number(), - author: *old_header.author(), - difficulty: *old_header.difficulty(), - gas_limit: PROVIDED_GAS.into(), - timestamp: old_header.timestamp(), - last_hashes: { - // this will break if we don't inclue all 256 last hashes. - let mut last_hashes: Vec<_> = (0..256).map(|_| H256::default()).collect(); - last_hashes[255] = *old_header.parent_hash(); - Arc::new(last_hashes) - }, - gas_used: 0.into(), - }; + let env_info = ::vm::EnvInfo { + number: old_header.number(), + author: *old_header.author(), + difficulty: *old_header.difficulty(), + gas_limit: PROVIDED_GAS.into(), + timestamp: old_header.timestamp(), + last_hashes: { + // this will break if we don't inclue all 256 last hashes. + let mut last_hashes: Vec<_> = (0..256).map(|_| H256::default()).collect(); + last_hashes[255] = *old_header.parent_hash(); + Arc::new(last_hashes) + }, + gas_used: 0.into(), + }; - // check state proof using given machine. - let number = old_header.number(); - let (data, decoder) = validator_set::functions::get_validators::call(); + // check state proof using given machine. + let number = old_header.number(); + let (data, decoder) = validator_set::functions::get_validators::call(); - let from = Address::default(); - let tx = Transaction { - nonce: machine.account_start_nonce(number), - action: Action::Call(contract_address), - gas: PROVIDED_GAS.into(), - gas_price: U256::default(), - value: U256::default(), - data, - }.fake_sign(from); + let from = Address::default(); + let tx = Transaction { + nonce: machine.account_start_nonce(number), + action: Action::Call(contract_address), + gas: PROVIDED_GAS.into(), + gas_price: U256::default(), + value: U256::default(), + data, + } + .fake_sign(from); - let res = ::state::check_proof( - state_items, - *old_header.state_root(), - &tx, - machine, - &env_info, - ); + let res = ::state::check_proof( + state_items, + *old_header.state_root(), + &tx, + machine, + &env_info, + ); - match res { - ::state::ProvedExecution::BadProof => Err("Bad proof".into()), - ::state::ProvedExecution::Failed(e) => Err(format!("Failed call: {}", e)), - ::state::ProvedExecution::Complete(e) => decoder.decode(&e.output).map_err(|e| e.to_string()), - } + match res { + ::state::ProvedExecution::BadProof => Err("Bad proof".into()), + ::state::ProvedExecution::Failed(e) => Err(format!("Failed call: {}", e)), + ::state::ProvedExecution::Complete(e) => { + decoder.decode(&e.output).map_err(|e| e.to_string()) + } + } } fn decode_first_proof(rlp: &Rlp) -> Result<(Header, Vec), ::error::Error> { - let header = rlp.val_at(0)?; - let state_items = rlp.at(1)?.iter().map(|x| { - let mut val = DBValue::new(); - val.append_slice(x.data()?); - Ok(val) - }).collect::>()?; + let header = rlp.val_at(0)?; + let state_items = rlp + .at(1)? + .iter() + .map(|x| { + let mut val = DBValue::new(); + val.append_slice(x.data()?); + Ok(val) + }) + .collect::>()?; - Ok((header, state_items)) + Ok((header, state_items)) } // inter-contract proofs are a header and receipts. // checking will involve ensuring that the receipts match the header and // extracting the validator set from the receipts. fn encode_proof(header: &Header, receipts: &[Receipt]) -> Bytes { - let mut stream = RlpStream::new_list(2); - stream.append(header).append_list(receipts); - stream.drain() + let mut stream = RlpStream::new_list(2); + stream.append(header).append_list(receipts); + stream.drain() } fn decode_proof(rlp: &Rlp) -> Result<(Header, Vec), ::error::Error> { - Ok((rlp.val_at(0)?, rlp.list_at(1)?)) + Ok((rlp.val_at(0)?, rlp.list_at(1)?)) } // given a provider and caller, generate proof. this will just be a state proof // of `getValidators`. -fn prove_initial(contract_address: Address, header: &Header, caller: &Call) -> Result, String> { - use std::cell::RefCell; +fn prove_initial( + contract_address: Address, + header: &Header, + caller: &Call, +) -> Result, String> { + use std::cell::RefCell; - let epoch_proof = RefCell::new(None); - let validators = { - let (data, decoder) = validator_set::functions::get_validators::call(); - let (value, proof) = caller(contract_address, data)?; - *epoch_proof.borrow_mut() = Some(encode_first_proof(header, &proof)); - decoder.decode(&value).map_err(|e| e.to_string())? - }; + let epoch_proof = RefCell::new(None); + let validators = { + let (data, decoder) = validator_set::functions::get_validators::call(); + let (value, proof) = caller(contract_address, data)?; + *epoch_proof.borrow_mut() = Some(encode_first_proof(header, &proof)); + decoder.decode(&value).map_err(|e| e.to_string())? + }; - let proof = epoch_proof.into_inner().expect("epoch_proof always set after call; qed"); + let proof = epoch_proof + .into_inner() + .expect("epoch_proof always set after call; qed"); - trace!(target: "engine", "obtained proof for initial set: {} validators, {} bytes", + trace!(target: "engine", "obtained proof for initial set: {} validators, {} bytes", validators.len(), proof.len()); - info!(target: "engine", "Signal for switch to contract-based validator set."); - info!(target: "engine", "Initial contract validators: {:?}", validators); + info!(target: "engine", "Signal for switch to contract-based validator set."); + info!(target: "engine", "Initial contract validators: {:?}", validators); - Ok(proof) + Ok(proof) } impl ValidatorSafeContract { - pub fn new(contract_address: Address) -> Self { - ValidatorSafeContract { - contract_address, - validators: RwLock::new(MemoryLruCache::new(MEMOIZE_CAPACITY)), - client: RwLock::new(None), - } - } + pub fn new(contract_address: Address) -> Self { + ValidatorSafeContract { + contract_address, + validators: RwLock::new(MemoryLruCache::new(MEMOIZE_CAPACITY)), + client: RwLock::new(None), + } + } - /// Queries the state and gets the set of validators. - fn get_list(&self, caller: &Call) -> Option { - let contract_address = self.contract_address; + /// Queries the state and gets the set of validators. + fn get_list(&self, caller: &Call) -> Option { + let contract_address = self.contract_address; - let (data, decoder) = validator_set::functions::get_validators::call(); - let value = caller(contract_address, data).and_then(|x| decoder.decode(&x.0).map_err(|e| e.to_string())); + let (data, decoder) = validator_set::functions::get_validators::call(); + let value = caller(contract_address, data) + .and_then(|x| decoder.decode(&x.0).map_err(|e| e.to_string())); - match value { - Ok(new) => { - debug!(target: "engine", "Set of validators obtained: {:?}", new); - Some(SimpleList::new(new)) - }, - Err(s) => { - debug!(target: "engine", "Set of validators could not be updated: {}", s); - None - }, - } - } + match value { + Ok(new) => { + debug!(target: "engine", "Set of validators obtained: {:?}", new); + Some(SimpleList::new(new)) + } + Err(s) => { + debug!(target: "engine", "Set of validators could not be updated: {}", s); + None + } + } + } - // Whether the header matches the expected bloom. - // - // The expected log should have 3 topics: - // 1. ETHABI-encoded log name. - // 2. the block's parent hash. - // 3. the "nonce": n for the nth transition in history. - // - // We can only search for the first 2, since we don't have the third - // just yet. - // - // The parent hash is included to prevent - // malicious actors from brute forcing other logs that would - // produce the same bloom. - // - // The log data is an array of all new validator addresses. - fn expected_bloom(&self, header: &Header) -> Bloom { - let topics = vec![*EVENT_NAME_HASH, *header.parent_hash()]; + // Whether the header matches the expected bloom. + // + // The expected log should have 3 topics: + // 1. ETHABI-encoded log name. + // 2. the block's parent hash. + // 3. the "nonce": n for the nth transition in history. + // + // We can only search for the first 2, since we don't have the third + // just yet. + // + // The parent hash is included to prevent + // malicious actors from brute forcing other logs that would + // produce the same bloom. + // + // The log data is an array of all new validator addresses. + fn expected_bloom(&self, header: &Header) -> Bloom { + let topics = vec![*EVENT_NAME_HASH, *header.parent_hash()]; - debug!(target: "engine", "Expected topics for header {}: {:?}", + debug!(target: "engine", "Expected topics for header {}: {:?}", header.hash(), topics); - LogEntry { - address: self.contract_address, - topics: topics, - data: Vec::new(), // irrelevant for bloom. - }.bloom() - } + LogEntry { + address: self.contract_address, + topics: topics, + data: Vec::new(), // irrelevant for bloom. + } + .bloom() + } - // check receipts for log event. bloom should be `expected_bloom` for the - // header the receipts correspond to. - fn extract_from_event(&self, bloom: Bloom, header: &Header, receipts: &[Receipt]) -> Option { - let check_log = |log: &LogEntry| { - log.address == self.contract_address && - log.topics.len() == 2 && - log.topics[0] == *EVENT_NAME_HASH && - log.topics[1] == *header.parent_hash() - }; + // check receipts for log event. bloom should be `expected_bloom` for the + // header the receipts correspond to. + fn extract_from_event( + &self, + bloom: Bloom, + header: &Header, + receipts: &[Receipt], + ) -> Option { + let check_log = |log: &LogEntry| { + log.address == self.contract_address + && log.topics.len() == 2 + && log.topics[0] == *EVENT_NAME_HASH + && log.topics[1] == *header.parent_hash() + }; - //// iterate in reverse because only the _last_ change in a given - //// block actually has any effect. - //// the contract should only increment the nonce once. - let mut decoded_events = receipts.iter() - .rev() - .filter(|r| r.log_bloom.contains_bloom(&bloom)) - .flat_map(|r| r.logs.iter()) - .filter(move |l| check_log(l)) - .filter_map(|log| { - validator_set::events::initiate_change::parse_log((log.topics.clone(), log.data.clone()).into()).ok() - }); + //// iterate in reverse because only the _last_ change in a given + //// block actually has any effect. + //// the contract should only increment the nonce once. + let mut decoded_events = receipts + .iter() + .rev() + .filter(|r| r.log_bloom.contains_bloom(&bloom)) + .flat_map(|r| r.logs.iter()) + .filter(move |l| check_log(l)) + .filter_map(|log| { + validator_set::events::initiate_change::parse_log( + (log.topics.clone(), log.data.clone()).into(), + ) + .ok() + }); - // only last log is taken into account - match decoded_events.next() { - None => None, - Some(matched_event) => Some(SimpleList::new(matched_event.new_set)) - } - } + // only last log is taken into account + match decoded_events.next() { + None => None, + Some(matched_event) => Some(SimpleList::new(matched_event.new_set)), + } + } } impl ValidatorSet for ValidatorSafeContract { - fn default_caller(&self, id: BlockId) -> Box { - let client = self.client.read().clone(); - Box::new(move |addr, data| client.as_ref() - .and_then(Weak::upgrade) - .ok_or_else(|| "No client!".into()) - .and_then(|c| { - match c.as_full_client() { - Some(c) => c.call_contract(id, addr, data), - None => Err("No full client!".into()), - } - }) - .map(|out| (out, Vec::new()))) // generate no proofs in general - } + fn default_caller(&self, id: BlockId) -> Box { + let client = self.client.read().clone(); + Box::new(move |addr, data| { + client + .as_ref() + .and_then(Weak::upgrade) + .ok_or_else(|| "No client!".into()) + .and_then(|c| match c.as_full_client() { + Some(c) => c.call_contract(id, addr, data), + None => Err("No full client!".into()), + }) + .map(|out| (out, Vec::new())) + }) // generate no proofs in general + } - fn on_epoch_begin(&self, _first: bool, _header: &Header, caller: &mut SystemCall) -> Result<(), ::error::Error> { - let data = validator_set::functions::finalize_change::encode_input(); - caller(self.contract_address, data) - .map(|_| ()) - .map_err(::engines::EngineError::FailedSystemCall) - .map_err(Into::into) - } + fn on_epoch_begin( + &self, + _first: bool, + _header: &Header, + caller: &mut SystemCall, + ) -> Result<(), ::error::Error> { + let data = validator_set::functions::finalize_change::encode_input(); + caller(self.contract_address, data) + .map(|_| ()) + .map_err(::engines::EngineError::FailedSystemCall) + .map_err(Into::into) + } - fn genesis_epoch_data(&self, header: &Header, call: &Call) -> Result, String> { - prove_initial(self.contract_address, header, call) - } + fn genesis_epoch_data(&self, header: &Header, call: &Call) -> Result, String> { + prove_initial(self.contract_address, header, call) + } - fn is_epoch_end(&self, _first: bool, _chain_head: &Header) -> Option> { - None // no immediate transitions to contract. - } + fn is_epoch_end(&self, _first: bool, _chain_head: &Header) -> Option> { + None // no immediate transitions to contract. + } - fn signals_epoch_end(&self, first: bool, header: &Header, aux: AuxiliaryData) - -> ::engines::EpochChange - { - let receipts = aux.receipts; + fn signals_epoch_end( + &self, + first: bool, + header: &Header, + aux: AuxiliaryData, + ) -> ::engines::EpochChange { + let receipts = aux.receipts; - // transition to the first block of a contract requires finality but has no log event. - if first { - debug!(target: "engine", "signalling transition to fresh contract."); - let state_proof = Arc::new(StateProof { - contract_address: self.contract_address, - header: header.clone(), - }); - return ::engines::EpochChange::Yes(::engines::Proof::WithState(state_proof as Arc<_>)); - } + // transition to the first block of a contract requires finality but has no log event. + if first { + debug!(target: "engine", "signalling transition to fresh contract."); + let state_proof = Arc::new(StateProof { + contract_address: self.contract_address, + header: header.clone(), + }); + return ::engines::EpochChange::Yes(::engines::Proof::WithState(state_proof as Arc<_>)); + } - // otherwise, we're checking for logs. - let bloom = self.expected_bloom(header); - let header_bloom = header.log_bloom(); + // otherwise, we're checking for logs. + let bloom = self.expected_bloom(header); + let header_bloom = header.log_bloom(); - if &bloom & header_bloom != bloom { return ::engines::EpochChange::No } + if &bloom & header_bloom != bloom { + return ::engines::EpochChange::No; + } - trace!(target: "engine", "detected epoch change event bloom"); + trace!(target: "engine", "detected epoch change event bloom"); - match receipts { - None => ::engines::EpochChange::Unsure(AuxiliaryRequest::Receipts), - Some(receipts) => match self.extract_from_event(bloom, header, receipts) { - None => ::engines::EpochChange::No, - Some(list) => { - info!(target: "engine", "Signal for transition within contract. New list: {:?}", + match receipts { + None => ::engines::EpochChange::Unsure(AuxiliaryRequest::Receipts), + Some(receipts) => match self.extract_from_event(bloom, header, receipts) { + None => ::engines::EpochChange::No, + Some(list) => { + info!(target: "engine", "Signal for transition within contract. New list: {:?}", &*list); - let proof = encode_proof(&header, receipts); - ::engines::EpochChange::Yes(::engines::Proof::Known(proof)) - } - }, - } - } + let proof = encode_proof(&header, receipts); + ::engines::EpochChange::Yes(::engines::Proof::Known(proof)) + } + }, + } + } - fn epoch_set(&self, first: bool, machine: &EthereumMachine, _number: ::types::BlockNumber, proof: &[u8]) - -> Result<(SimpleList, Option), ::error::Error> - { - let rlp = Rlp::new(proof); + fn epoch_set( + &self, + first: bool, + machine: &EthereumMachine, + _number: ::types::BlockNumber, + proof: &[u8], + ) -> Result<(SimpleList, Option), ::error::Error> { + let rlp = Rlp::new(proof); - if first { - trace!(target: "engine", "Recovering initial epoch set"); + if first { + trace!(target: "engine", "Recovering initial epoch set"); - let (old_header, state_items) = decode_first_proof(&rlp)?; - let number = old_header.number(); - let old_hash = old_header.hash(); - let addresses = check_first_proof(machine, self.contract_address, old_header, &state_items) - .map_err(::engines::EngineError::InsufficientProof)?; + let (old_header, state_items) = decode_first_proof(&rlp)?; + let number = old_header.number(); + let old_hash = old_header.hash(); + let addresses = + check_first_proof(machine, self.contract_address, old_header, &state_items) + .map_err(::engines::EngineError::InsufficientProof)?; - trace!(target: "engine", "extracted epoch set at #{}: {} addresses", + trace!(target: "engine", "extracted epoch set at #{}: {} addresses", number, addresses.len()); - Ok((SimpleList::new(addresses), Some(old_hash))) - } else { - let (old_header, receipts) = decode_proof(&rlp)?; + Ok((SimpleList::new(addresses), Some(old_hash))) + } else { + let (old_header, receipts) = decode_proof(&rlp)?; - // ensure receipts match header. - // TODO: optimize? these were just decoded. - let found_root = ::triehash::ordered_trie_root( - receipts.iter().map(::rlp::encode) - ); - if found_root != *old_header.receipts_root() { - return Err(::error::BlockError::InvalidReceiptsRoot( - Mismatch { expected: *old_header.receipts_root(), found: found_root } - ).into()); - } + // ensure receipts match header. + // TODO: optimize? these were just decoded. + let found_root = ::triehash::ordered_trie_root(receipts.iter().map(::rlp::encode)); + if found_root != *old_header.receipts_root() { + return Err(::error::BlockError::InvalidReceiptsRoot(Mismatch { + expected: *old_header.receipts_root(), + found: found_root, + }) + .into()); + } - let bloom = self.expected_bloom(&old_header); + let bloom = self.expected_bloom(&old_header); - match self.extract_from_event(bloom, &old_header, &receipts) { - Some(list) => Ok((list, Some(old_header.hash()))), - None => Err(::engines::EngineError::InsufficientProof("No log event in proof.".into()).into()), - } - } - } + match self.extract_from_event(bloom, &old_header, &receipts) { + Some(list) => Ok((list, Some(old_header.hash()))), + None => Err(::engines::EngineError::InsufficientProof( + "No log event in proof.".into(), + ) + .into()), + } + } + } - fn contains_with_caller(&self, block_hash: &H256, address: &Address, caller: &Call) -> bool { - let mut guard = self.validators.write(); - let maybe_existing = guard - .get_mut(block_hash) - .map(|list| list.contains(block_hash, address)); - maybe_existing - .unwrap_or_else(|| self - .get_list(caller) - .map_or(false, |list| { - let contains = list.contains(block_hash, address); - guard.insert(block_hash.clone(), list); - contains - })) - } + fn contains_with_caller(&self, block_hash: &H256, address: &Address, caller: &Call) -> bool { + let mut guard = self.validators.write(); + let maybe_existing = guard + .get_mut(block_hash) + .map(|list| list.contains(block_hash, address)); + maybe_existing.unwrap_or_else(|| { + self.get_list(caller).map_or(false, |list| { + let contains = list.contains(block_hash, address); + guard.insert(block_hash.clone(), list); + contains + }) + }) + } - fn get_with_caller(&self, block_hash: &H256, nonce: usize, caller: &Call) -> Address { - let mut guard = self.validators.write(); - let maybe_existing = guard - .get_mut(block_hash) - .map(|list| list.get(block_hash, nonce)); - maybe_existing - .unwrap_or_else(|| self - .get_list(caller) - .map_or_else(Default::default, |list| { - let address = list.get(block_hash, nonce); - guard.insert(block_hash.clone(), list); - address - })) - } + fn get_with_caller(&self, block_hash: &H256, nonce: usize, caller: &Call) -> Address { + let mut guard = self.validators.write(); + let maybe_existing = guard + .get_mut(block_hash) + .map(|list| list.get(block_hash, nonce)); + maybe_existing.unwrap_or_else(|| { + self.get_list(caller).map_or_else(Default::default, |list| { + let address = list.get(block_hash, nonce); + guard.insert(block_hash.clone(), list); + address + }) + }) + } - fn count_with_caller(&self, block_hash: &H256, caller: &Call) -> usize { - let mut guard = self.validators.write(); - let maybe_existing = guard - .get_mut(block_hash) - .map(|list| list.count(block_hash)); - maybe_existing - .unwrap_or_else(|| self - .get_list(caller) - .map_or_else(usize::max_value, |list| { - let address = list.count(block_hash); - guard.insert(block_hash.clone(), list); - address - })) - } + fn count_with_caller(&self, block_hash: &H256, caller: &Call) -> usize { + let mut guard = self.validators.write(); + let maybe_existing = guard.get_mut(block_hash).map(|list| list.count(block_hash)); + maybe_existing.unwrap_or_else(|| { + self.get_list(caller).map_or_else(usize::max_value, |list| { + let address = list.count(block_hash); + guard.insert(block_hash.clone(), list); + address + }) + }) + } - fn register_client(&self, client: Weak) { - trace!(target: "engine", "Setting up contract caller."); - *self.client.write() = Some(client); - } + fn register_client(&self, client: Weak) { + trace!(target: "engine", "Setting up contract caller."); + *self.client.write() = Some(client); + } } #[cfg(test)] mod tests { - use std::sync::Arc; - use rustc_hex::FromHex; - use hash::keccak; - use ethereum_types::Address; - use types::ids::BlockId; - use spec::Spec; - use accounts::AccountProvider; - use types::transaction::{Transaction, Action}; - use client::{ChainInfo, BlockInfo, ImportBlock, traits::{ForceUpdateSealing, EngineClient}}; - use ethkey::Secret; - use miner::{self, MinerService}; - use test_helpers::{generate_dummy_client_with_spec, generate_dummy_client_with_spec_and_data}; - use super::super::ValidatorSet; - use super::{ValidatorSafeContract, EVENT_NAME_HASH}; - use verification::queue::kind::blocks::Unverified; + use super::{super::ValidatorSet, ValidatorSafeContract, EVENT_NAME_HASH}; + use accounts::AccountProvider; + use client::{ + traits::{EngineClient, ForceUpdateSealing}, + BlockInfo, ChainInfo, ImportBlock, + }; + use ethereum_types::Address; + use ethkey::Secret; + use hash::keccak; + use miner::{self, MinerService}; + use rustc_hex::FromHex; + use spec::Spec; + use std::sync::Arc; + use test_helpers::{generate_dummy_client_with_spec, generate_dummy_client_with_spec_and_data}; + use types::{ + ids::BlockId, + transaction::{Action, Transaction}, + }; + use verification::queue::kind::blocks::Unverified; - #[test] - fn fetches_validators() { - let client = generate_dummy_client_with_spec(Spec::new_validator_safe_contract); - let vc = Arc::new(ValidatorSafeContract::new("0000000000000000000000000000000000000005".parse::
().unwrap())); - vc.register_client(Arc::downgrade(&client) as _); - let last_hash = client.best_block_header().hash(); - assert!(vc.contains(&last_hash, &"7d577a597b2742b498cb5cf0c26cdcd726d39e6e".parse::
().unwrap())); - assert!(vc.contains(&last_hash, &"82a978b3f5962a5b0957d9ee9eef472ee55b42f1".parse::
().unwrap())); - } + #[test] + fn fetches_validators() { + let client = generate_dummy_client_with_spec(Spec::new_validator_safe_contract); + let vc = Arc::new(ValidatorSafeContract::new( + "0000000000000000000000000000000000000005" + .parse::
() + .unwrap(), + )); + vc.register_client(Arc::downgrade(&client) as _); + let last_hash = client.best_block_header().hash(); + assert!(vc.contains( + &last_hash, + &"7d577a597b2742b498cb5cf0c26cdcd726d39e6e" + .parse::
() + .unwrap() + )); + assert!(vc.contains( + &last_hash, + &"82a978b3f5962a5b0957d9ee9eef472ee55b42f1" + .parse::
() + .unwrap() + )); + } - #[test] - fn knows_validators() { - let tap = Arc::new(AccountProvider::transient_provider()); - let s0: Secret = keccak("1").into(); - let v0 = tap.insert_account(s0.clone(), &"".into()).unwrap(); - let v1 = tap.insert_account(keccak("0").into(), &"".into()).unwrap(); - let chain_id = Spec::new_validator_safe_contract().chain_id(); - let client = generate_dummy_client_with_spec(Spec::new_validator_safe_contract); - client.engine().register_client(Arc::downgrade(&client) as _); - let validator_contract = "0000000000000000000000000000000000000005".parse::
().unwrap(); - let signer = Box::new((tap.clone(), v1, "".into())); + #[test] + fn knows_validators() { + let tap = Arc::new(AccountProvider::transient_provider()); + let s0: Secret = keccak("1").into(); + let v0 = tap.insert_account(s0.clone(), &"".into()).unwrap(); + let v1 = tap.insert_account(keccak("0").into(), &"".into()).unwrap(); + let chain_id = Spec::new_validator_safe_contract().chain_id(); + let client = generate_dummy_client_with_spec(Spec::new_validator_safe_contract); + client + .engine() + .register_client(Arc::downgrade(&client) as _); + let validator_contract = "0000000000000000000000000000000000000005" + .parse::
() + .unwrap(); + let signer = Box::new((tap.clone(), v1, "".into())); - client.miner().set_author(miner::Author::Sealer(signer)); - // Remove "1" validator. - let tx = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 500_000.into(), - action: Action::Call(validator_contract), - value: 0.into(), - data: "bfc708a000000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".from_hex().unwrap(), - }.sign(&s0, Some(chain_id)); - client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap(); - EngineClient::update_sealing(&*client, ForceUpdateSealing::No); - assert_eq!(client.chain_info().best_block_number, 1); - // Add "1" validator back in. - let tx = Transaction { - nonce: 1.into(), - gas_price: 0.into(), - gas: 500_000.into(), - action: Action::Call(validator_contract), - value: 0.into(), - data: "4d238c8e00000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".from_hex().unwrap(), - }.sign(&s0, Some(chain_id)); - client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap(); - EngineClient::update_sealing(&*client, ForceUpdateSealing::No); - // The transaction is not yet included so still unable to seal. - assert_eq!(client.chain_info().best_block_number, 1); + client.miner().set_author(miner::Author::Sealer(signer)); + // Remove "1" validator. + let tx = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 500_000.into(), + action: Action::Call(validator_contract), + value: 0.into(), + data: "bfc708a000000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1" + .from_hex() + .unwrap(), + } + .sign(&s0, Some(chain_id)); + client + .miner() + .import_own_transaction(client.as_ref(), tx.into()) + .unwrap(); + EngineClient::update_sealing(&*client, ForceUpdateSealing::No); + assert_eq!(client.chain_info().best_block_number, 1); + // Add "1" validator back in. + let tx = Transaction { + nonce: 1.into(), + gas_price: 0.into(), + gas: 500_000.into(), + action: Action::Call(validator_contract), + value: 0.into(), + data: "4d238c8e00000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1" + .from_hex() + .unwrap(), + } + .sign(&s0, Some(chain_id)); + client + .miner() + .import_own_transaction(client.as_ref(), tx.into()) + .unwrap(); + EngineClient::update_sealing(&*client, ForceUpdateSealing::No); + // The transaction is not yet included so still unable to seal. + assert_eq!(client.chain_info().best_block_number, 1); - // Switch to the validator that is still there. - let signer = Box::new((tap.clone(), v0, "".into())); - client.miner().set_author(miner::Author::Sealer(signer)); - EngineClient::update_sealing(&*client, ForceUpdateSealing::No); - assert_eq!(client.chain_info().best_block_number, 2); - // Switch back to the added validator, since the state is updated. - let signer = Box::new((tap.clone(), v1, "".into())); - client.miner().set_author(miner::Author::Sealer(signer)); - let tx = Transaction { - nonce: 2.into(), - gas_price: 0.into(), - gas: 21000.into(), - action: Action::Call(Address::default()), - value: 0.into(), - data: Vec::new(), - }.sign(&s0, Some(chain_id)); - client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap(); - EngineClient::update_sealing(&*client, ForceUpdateSealing::No); - // Able to seal again. - assert_eq!(client.chain_info().best_block_number, 3); + // Switch to the validator that is still there. + let signer = Box::new((tap.clone(), v0, "".into())); + client.miner().set_author(miner::Author::Sealer(signer)); + EngineClient::update_sealing(&*client, ForceUpdateSealing::No); + assert_eq!(client.chain_info().best_block_number, 2); + // Switch back to the added validator, since the state is updated. + let signer = Box::new((tap.clone(), v1, "".into())); + client.miner().set_author(miner::Author::Sealer(signer)); + let tx = Transaction { + nonce: 2.into(), + gas_price: 0.into(), + gas: 21000.into(), + action: Action::Call(Address::default()), + value: 0.into(), + data: Vec::new(), + } + .sign(&s0, Some(chain_id)); + client + .miner() + .import_own_transaction(client.as_ref(), tx.into()) + .unwrap(); + EngineClient::update_sealing(&*client, ForceUpdateSealing::No); + // Able to seal again. + assert_eq!(client.chain_info().best_block_number, 3); - // Check syncing. - let sync_client = generate_dummy_client_with_spec_and_data(Spec::new_validator_safe_contract, 0, 0, &[]); - sync_client.engine().register_client(Arc::downgrade(&sync_client) as _); - for i in 1..4 { - sync_client.import_block(Unverified::from_rlp(client.block(BlockId::Number(i)).unwrap().into_inner()).unwrap()).unwrap(); - } - sync_client.flush_queue(); - assert_eq!(sync_client.chain_info().best_block_number, 3); - } + // Check syncing. + let sync_client = + generate_dummy_client_with_spec_and_data(Spec::new_validator_safe_contract, 0, 0, &[]); + sync_client + .engine() + .register_client(Arc::downgrade(&sync_client) as _); + for i in 1..4 { + sync_client + .import_block( + Unverified::from_rlp(client.block(BlockId::Number(i)).unwrap().into_inner()) + .unwrap(), + ) + .unwrap(); + } + sync_client.flush_queue(); + assert_eq!(sync_client.chain_info().best_block_number, 3); + } - #[test] - fn detects_bloom() { - use engines::EpochChange; - use machine::AuxiliaryRequest; - use types::header::Header; - use types::log_entry::LogEntry; + #[test] + fn detects_bloom() { + use engines::EpochChange; + use machine::AuxiliaryRequest; + use types::{header::Header, log_entry::LogEntry}; - let client = generate_dummy_client_with_spec(Spec::new_validator_safe_contract); - let engine = client.engine().clone(); - let validator_contract = "0000000000000000000000000000000000000005".parse::
().unwrap(); + let client = generate_dummy_client_with_spec(Spec::new_validator_safe_contract); + let engine = client.engine().clone(); + let validator_contract = "0000000000000000000000000000000000000005" + .parse::
() + .unwrap(); - let last_hash = client.best_block_header().hash(); - let mut new_header = Header::default(); - new_header.set_parent_hash(last_hash); - new_header.set_number(1); // so the validator set looks for a log. + let last_hash = client.best_block_header().hash(); + let mut new_header = Header::default(); + new_header.set_parent_hash(last_hash); + new_header.set_number(1); // so the validator set looks for a log. - // first, try without the parent hash. - let mut event = LogEntry { - address: validator_contract, - topics: vec![*EVENT_NAME_HASH], - data: Vec::new(), - }; + // first, try without the parent hash. + let mut event = LogEntry { + address: validator_contract, + topics: vec![*EVENT_NAME_HASH], + data: Vec::new(), + }; - new_header.set_log_bloom(event.bloom()); - match engine.signals_epoch_end(&new_header, Default::default()) { - EpochChange::No => {}, - _ => panic!("Expected bloom to be unrecognized."), - }; + new_header.set_log_bloom(event.bloom()); + match engine.signals_epoch_end(&new_header, Default::default()) { + EpochChange::No => {} + _ => panic!("Expected bloom to be unrecognized."), + }; - // with the last hash, it should need the receipts. - event.topics.push(last_hash); - new_header.set_log_bloom(event.bloom()); + // with the last hash, it should need the receipts. + event.topics.push(last_hash); + new_header.set_log_bloom(event.bloom()); - match engine.signals_epoch_end(&new_header, Default::default()) { - EpochChange::Unsure(AuxiliaryRequest::Receipts) => {}, - _ => panic!("Expected bloom to be recognized."), - }; - } + match engine.signals_epoch_end(&new_header, Default::default()) { + EpochChange::Unsure(AuxiliaryRequest::Receipts) => {} + _ => panic!("Expected bloom to be recognized."), + }; + } - #[test] - fn initial_contract_is_signal() { - use types::header::Header; - use engines::{EpochChange, Proof}; + #[test] + fn initial_contract_is_signal() { + use engines::{EpochChange, Proof}; + use types::header::Header; - let client = generate_dummy_client_with_spec(Spec::new_validator_safe_contract); - let engine = client.engine().clone(); + let client = generate_dummy_client_with_spec(Spec::new_validator_safe_contract); + let engine = client.engine().clone(); - let mut new_header = Header::default(); - new_header.set_number(0); // so the validator set doesn't look for a log + let mut new_header = Header::default(); + new_header.set_number(0); // so the validator set doesn't look for a log - match engine.signals_epoch_end(&new_header, Default::default()) { - EpochChange::Yes(Proof::WithState(_)) => {}, - _ => panic!("Expected state to be required to prove initial signal"), - }; - } + match engine.signals_epoch_end(&new_header, Default::default()) { + EpochChange::Yes(Proof::WithState(_)) => {} + _ => panic!("Expected state to be required to prove initial signal"), + }; + } } diff --git a/ethcore/src/engines/validator_set/simple_list.rs b/ethcore/src/engines/validator_set/simple_list.rs index 0a0294be9..6eeb02d61 100644 --- a/ethcore/src/engines/validator_set/simple_list.rs +++ b/ethcore/src/engines/validator_set/simple_list.rs @@ -14,95 +14,104 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use ethereum_types::{Address, H256}; /// Preconfigured validator list. - use heapsize::HeapSizeOf; -use ethereum_types::{H256, Address}; -use machine::{AuxiliaryData, Call, EthereumMachine}; -use types::BlockNumber; -use types::header::Header; use super::ValidatorSet; +use machine::{AuxiliaryData, Call, EthereumMachine}; +use types::{header::Header, BlockNumber}; /// Validator set containing a known set of addresses. #[derive(Clone, Debug, PartialEq, Eq, Default)] pub struct SimpleList { - validators: Vec
, + validators: Vec
, } impl SimpleList { - /// Create a new `SimpleList`. - pub fn new(validators: Vec
) -> Self { - SimpleList { - validators: validators, - } - } + /// Create a new `SimpleList`. + pub fn new(validators: Vec
) -> Self { + SimpleList { + validators: validators, + } + } - /// Convert into inner representation. - pub fn into_inner(self) -> Vec
{ - self.validators - } + /// Convert into inner representation. + pub fn into_inner(self) -> Vec
{ + self.validators + } } impl ::std::ops::Deref for SimpleList { - type Target = [Address]; + type Target = [Address]; - fn deref(&self) -> &[Address] { &self.validators } + fn deref(&self) -> &[Address] { + &self.validators + } } impl From> for SimpleList { - fn from(validators: Vec
) -> Self { - SimpleList { - validators: validators, - } - } + fn from(validators: Vec
) -> Self { + SimpleList { + validators: validators, + } + } } impl HeapSizeOf for SimpleList { - fn heap_size_of_children(&self) -> usize { - self.validators.heap_size_of_children() - } + fn heap_size_of_children(&self) -> usize { + self.validators.heap_size_of_children() + } } impl ValidatorSet for SimpleList { - fn default_caller(&self, _block_id: ::types::ids::BlockId) -> Box { - Box::new(|_, _| Err("Simple list doesn't require calls.".into())) - } + fn default_caller(&self, _block_id: ::types::ids::BlockId) -> Box { + Box::new(|_, _| Err("Simple list doesn't require calls.".into())) + } - fn is_epoch_end(&self, first: bool, _chain_head: &Header) -> Option> { - match first { - true => Some(Vec::new()), // allow transition to fixed list, and instantly - false => None, - } - } + fn is_epoch_end(&self, first: bool, _chain_head: &Header) -> Option> { + match first { + true => Some(Vec::new()), // allow transition to fixed list, and instantly + false => None, + } + } - fn signals_epoch_end(&self, _: bool, _: &Header, _: AuxiliaryData) - -> ::engines::EpochChange - { - ::engines::EpochChange::No - } + fn signals_epoch_end( + &self, + _: bool, + _: &Header, + _: AuxiliaryData, + ) -> ::engines::EpochChange { + ::engines::EpochChange::No + } - fn epoch_set(&self, _first: bool, _: &EthereumMachine, _: BlockNumber, _: &[u8]) -> Result<(SimpleList, Option), ::error::Error> { - Ok((self.clone(), None)) - } + fn epoch_set( + &self, + _first: bool, + _: &EthereumMachine, + _: BlockNumber, + _: &[u8], + ) -> Result<(SimpleList, Option), ::error::Error> { + Ok((self.clone(), None)) + } - fn contains_with_caller(&self, _bh: &H256, address: &Address, _: &Call) -> bool { - self.validators.contains(address) - } + fn contains_with_caller(&self, _bh: &H256, address: &Address, _: &Call) -> bool { + self.validators.contains(address) + } - fn get_with_caller(&self, _bh: &H256, nonce: usize, _: &Call) -> Address { - let validator_n = self.validators.len(); + fn get_with_caller(&self, _bh: &H256, nonce: usize, _: &Call) -> Address { + let validator_n = self.validators.len(); - if validator_n == 0 { - panic!("Cannot operate with an empty validator set."); - } + if validator_n == 0 { + panic!("Cannot operate with an empty validator set."); + } - self.validators.get(nonce % validator_n).expect("There are validator_n authorities; taking number modulo validator_n gives number in validator_n range; qed").clone() - } + self.validators.get(nonce % validator_n).expect("There are validator_n authorities; taking number modulo validator_n gives number in validator_n range; qed").clone() + } - fn count_with_caller(&self, _bh: &H256, _: &Call) -> usize { - self.validators.len() - } + fn count_with_caller(&self, _bh: &H256, _: &Call) -> usize { + self.validators.len() + } } impl AsRef for SimpleList { @@ -113,19 +122,18 @@ impl AsRef for SimpleList { #[cfg(test)] mod tests { - use std::str::FromStr; - use ethereum_types::Address; - use super::super::ValidatorSet; - use super::SimpleList; + use super::{super::ValidatorSet, SimpleList}; + use ethereum_types::Address; + use std::str::FromStr; - #[test] - fn simple_list() { - let a1 = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); - let a2 = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let list = SimpleList::new(vec![a1.clone(), a2.clone()]); - assert!(list.contains(&Default::default(), &a1)); - assert_eq!(list.get(&Default::default(), 0), a1); - assert_eq!(list.get(&Default::default(), 1), a2); - assert_eq!(list.get(&Default::default(), 2), a1); - } + #[test] + fn simple_list() { + let a1 = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); + let a2 = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + let list = SimpleList::new(vec![a1.clone(), a2.clone()]); + assert!(list.contains(&Default::default(), &a1)); + assert_eq!(list.get(&Default::default(), 0), a1); + assert_eq!(list.get(&Default::default(), 1), a2); + assert_eq!(list.get(&Default::default(), 2), a1); + } } diff --git a/ethcore/src/engines/validator_set/test.rs b/ethcore/src/engines/validator_set/test.rs index c66ff14ad..7b61cef00 100644 --- a/ethcore/src/engines/validator_set/test.rs +++ b/ethcore/src/engines/validator_set/test.rs @@ -15,83 +15,105 @@ // along with Parity Ethereum. If not, see . /// Used for Engine testing. - use std::str::FromStr; -use std::sync::Arc; -use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering}; +use std::sync::{ + atomic::{AtomicUsize, Ordering as AtomicOrdering}, + Arc, +}; use bytes::Bytes; -use ethereum_types::{H256, Address}; +use ethereum_types::{Address, H256}; use heapsize::HeapSizeOf; -use types::BlockNumber; -use types::header::Header; +use types::{header::Header, BlockNumber}; +use super::{SimpleList, ValidatorSet}; use machine::{AuxiliaryData, Call, EthereumMachine}; -use super::{ValidatorSet, SimpleList}; /// Set used for testing with a single validator. pub struct TestSet { - validator: SimpleList, - last_malicious: Arc, - last_benign: Arc, + validator: SimpleList, + last_malicious: Arc, + last_benign: Arc, } impl Default for TestSet { - fn default() -> Self { - TestSet::new(Default::default(), Default::default()) - } + fn default() -> Self { + TestSet::new(Default::default(), Default::default()) + } } impl TestSet { - pub fn new(last_malicious: Arc, last_benign: Arc) -> Self { - TestSet { - validator: SimpleList::new(vec![Address::from_str("7d577a597b2742b498cb5cf0c26cdcd726d39e6e").unwrap()]), - last_malicious, - last_benign, - } - } + pub fn new(last_malicious: Arc, last_benign: Arc) -> Self { + TestSet { + validator: SimpleList::new(vec![Address::from_str( + "7d577a597b2742b498cb5cf0c26cdcd726d39e6e", + ) + .unwrap()]), + last_malicious, + last_benign, + } + } } impl HeapSizeOf for TestSet { - fn heap_size_of_children(&self) -> usize { - self.validator.heap_size_of_children() - } + fn heap_size_of_children(&self) -> usize { + self.validator.heap_size_of_children() + } } impl ValidatorSet for TestSet { - fn default_caller(&self, _block_id: ::types::ids::BlockId) -> Box { - Box::new(|_, _| Err("Test set doesn't require calls.".into())) - } + fn default_caller(&self, _block_id: ::types::ids::BlockId) -> Box { + Box::new(|_, _| Err("Test set doesn't require calls.".into())) + } - fn is_epoch_end(&self, _first: bool, _chain_head: &Header) -> Option> { None } + fn is_epoch_end(&self, _first: bool, _chain_head: &Header) -> Option> { + None + } - fn signals_epoch_end(&self, _: bool, _: &Header, _: AuxiliaryData) - -> ::engines::EpochChange - { - ::engines::EpochChange::No - } + fn signals_epoch_end( + &self, + _: bool, + _: &Header, + _: AuxiliaryData, + ) -> ::engines::EpochChange { + ::engines::EpochChange::No + } - fn epoch_set(&self, _: bool, _: &EthereumMachine, _: BlockNumber, _: &[u8]) -> Result<(SimpleList, Option), ::error::Error> { - Ok((self.validator.clone(), None)) - } + fn epoch_set( + &self, + _: bool, + _: &EthereumMachine, + _: BlockNumber, + _: &[u8], + ) -> Result<(SimpleList, Option), ::error::Error> { + Ok((self.validator.clone(), None)) + } - fn contains_with_caller(&self, bh: &H256, address: &Address, _: &Call) -> bool { - self.validator.contains(bh, address) - } + fn contains_with_caller(&self, bh: &H256, address: &Address, _: &Call) -> bool { + self.validator.contains(bh, address) + } - fn get_with_caller(&self, bh: &H256, nonce: usize, _: &Call) -> Address { - self.validator.get(bh, nonce) - } + fn get_with_caller(&self, bh: &H256, nonce: usize, _: &Call) -> Address { + self.validator.get(bh, nonce) + } - fn count_with_caller(&self, _bh: &H256, _: &Call) -> usize { - 1 - } + fn count_with_caller(&self, _bh: &H256, _: &Call) -> usize { + 1 + } - fn report_malicious(&self, _validator: &Address, _set_block: BlockNumber, block: BlockNumber, _proof: Bytes) { - self.last_malicious.store(block as usize, AtomicOrdering::SeqCst) - } + fn report_malicious( + &self, + _validator: &Address, + _set_block: BlockNumber, + block: BlockNumber, + _proof: Bytes, + ) { + self.last_malicious + .store(block as usize, AtomicOrdering::SeqCst) + } - fn report_benign(&self, _validator: &Address, _set_block: BlockNumber, block: BlockNumber) { - self.last_benign.store(block as usize, AtomicOrdering::SeqCst) - } + fn report_benign(&self, _validator: &Address, _set_block: BlockNumber, block: BlockNumber) { + self.last_benign + .store(block as usize, AtomicOrdering::SeqCst) + } } diff --git a/ethcore/src/error.rs b/ethcore/src/error.rs index d5aa45ba0..277adb107 100644 --- a/ethcore/src/error.rs +++ b/ethcore/src/error.rs @@ -20,269 +20,280 @@ // https://github.com/paritytech/parity-ethereum/issues/10302 #![allow(deprecated)] -use std::{fmt, error}; -use std::time::SystemTime; +use std::{error, fmt, time::SystemTime}; -use ethereum_types::{H256, U256, Address, Bloom}; +use ethereum_types::{Address, Bloom, H256, U256}; use ethkey::Error as EthkeyError; use ethtrie::TrieError; use rlp; use snappy::InvalidInput; use snapshot::Error as SnapshotError; -use types::BlockNumber; -use types::transaction::Error as TransactionError; +use types::{transaction::Error as TransactionError, BlockNumber}; use unexpected::{Mismatch, OutOfBounds}; use engines::EngineError; -pub use executed::{ExecutionError, CallError}; +pub use executed::{CallError, ExecutionError}; #[derive(Debug, PartialEq, Clone, Eq)] /// Errors concerning block processing. pub enum BlockError { - /// Block has too many uncles. - TooManyUncles(OutOfBounds), - /// Extra data is of an invalid length. - ExtraDataOutOfBounds(OutOfBounds), - /// Seal is incorrect format. - InvalidSealArity(Mismatch), - /// Block has too much gas used. - TooMuchGasUsed(OutOfBounds), - /// Uncles hash in header is invalid. - InvalidUnclesHash(Mismatch), - /// An uncle is from a generation too old. - UncleTooOld(OutOfBounds), - /// An uncle is from the same generation as the block. - UncleIsBrother(OutOfBounds), - /// An uncle is already in the chain. - UncleInChain(H256), - /// An uncle is included twice. - DuplicateUncle(H256), - /// An uncle has a parent not in the chain. - UncleParentNotInChain(H256), - /// State root header field is invalid. - InvalidStateRoot(Mismatch), - /// Gas used header field is invalid. - InvalidGasUsed(Mismatch), - /// Transactions root header field is invalid. - InvalidTransactionsRoot(Mismatch), - /// Difficulty is out of range; this can be used as an looser error prior to getting a definitive - /// value for difficulty. This error needs only provide bounds of which it is out. - DifficultyOutOfBounds(OutOfBounds), - /// Difficulty header field is invalid; this is a strong error used after getting a definitive - /// value for difficulty (which is provided). - InvalidDifficulty(Mismatch), - /// Seal element of type H256 (max_hash for Ethash, but could be something else for - /// other seal engines) is out of bounds. - MismatchedH256SealElement(Mismatch), - /// Proof-of-work aspect of seal, which we assume is a 256-bit value, is invalid. - InvalidProofOfWork(OutOfBounds), - /// Some low-level aspect of the seal is incorrect. - InvalidSeal, - /// Gas limit header field is invalid. - InvalidGasLimit(OutOfBounds), - /// Receipts trie root header field is invalid. - InvalidReceiptsRoot(Mismatch), - /// Timestamp header field is invalid. - InvalidTimestamp(OutOfBounds), - /// Timestamp header field is too far in future. - TemporarilyInvalid(OutOfBounds), - /// Log bloom header field is invalid. - InvalidLogBloom(Box>), - /// Number field of header is invalid. - InvalidNumber(Mismatch), - /// Block number isn't sensible. - RidiculousNumber(OutOfBounds), - /// Timestamp header overflowed - TimestampOverflow, - /// Too many transactions from a particular address. - TooManyTransactions(Address), - /// Parent given is unknown. - UnknownParent(H256), - /// Uncle parent given is unknown. - UnknownUncleParent(H256), - /// No transition to epoch number. - UnknownEpochTransition(u64), + /// Block has too many uncles. + TooManyUncles(OutOfBounds), + /// Extra data is of an invalid length. + ExtraDataOutOfBounds(OutOfBounds), + /// Seal is incorrect format. + InvalidSealArity(Mismatch), + /// Block has too much gas used. + TooMuchGasUsed(OutOfBounds), + /// Uncles hash in header is invalid. + InvalidUnclesHash(Mismatch), + /// An uncle is from a generation too old. + UncleTooOld(OutOfBounds), + /// An uncle is from the same generation as the block. + UncleIsBrother(OutOfBounds), + /// An uncle is already in the chain. + UncleInChain(H256), + /// An uncle is included twice. + DuplicateUncle(H256), + /// An uncle has a parent not in the chain. + UncleParentNotInChain(H256), + /// State root header field is invalid. + InvalidStateRoot(Mismatch), + /// Gas used header field is invalid. + InvalidGasUsed(Mismatch), + /// Transactions root header field is invalid. + InvalidTransactionsRoot(Mismatch), + /// Difficulty is out of range; this can be used as an looser error prior to getting a definitive + /// value for difficulty. This error needs only provide bounds of which it is out. + DifficultyOutOfBounds(OutOfBounds), + /// Difficulty header field is invalid; this is a strong error used after getting a definitive + /// value for difficulty (which is provided). + InvalidDifficulty(Mismatch), + /// Seal element of type H256 (max_hash for Ethash, but could be something else for + /// other seal engines) is out of bounds. + MismatchedH256SealElement(Mismatch), + /// Proof-of-work aspect of seal, which we assume is a 256-bit value, is invalid. + InvalidProofOfWork(OutOfBounds), + /// Some low-level aspect of the seal is incorrect. + InvalidSeal, + /// Gas limit header field is invalid. + InvalidGasLimit(OutOfBounds), + /// Receipts trie root header field is invalid. + InvalidReceiptsRoot(Mismatch), + /// Timestamp header field is invalid. + InvalidTimestamp(OutOfBounds), + /// Timestamp header field is too far in future. + TemporarilyInvalid(OutOfBounds), + /// Log bloom header field is invalid. + InvalidLogBloom(Box>), + /// Number field of header is invalid. + InvalidNumber(Mismatch), + /// Block number isn't sensible. + RidiculousNumber(OutOfBounds), + /// Timestamp header overflowed + TimestampOverflow, + /// Too many transactions from a particular address. + TooManyTransactions(Address), + /// Parent given is unknown. + UnknownParent(H256), + /// Uncle parent given is unknown. + UnknownUncleParent(H256), + /// No transition to epoch number. + UnknownEpochTransition(u64), } impl fmt::Display for BlockError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use self::BlockError::*; + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::BlockError::*; - let msg = match *self { - TooManyUncles(ref oob) => format!("Block has too many uncles. {}", oob), - ExtraDataOutOfBounds(ref oob) => format!("Extra block data too long. {}", oob), - InvalidSealArity(ref mis) => format!("Block seal in incorrect format: {}", mis), - TooMuchGasUsed(ref oob) => format!("Block has too much gas used. {}", oob), - InvalidUnclesHash(ref mis) => format!("Block has invalid uncles hash: {}", mis), - UncleTooOld(ref oob) => format!("Uncle block is too old. {}", oob), - UncleIsBrother(ref oob) => format!("Uncle from same generation as block. {}", oob), - UncleInChain(ref hash) => format!("Uncle {} already in chain", hash), - DuplicateUncle(ref hash) => format!("Uncle {} already in the header", hash), - UncleParentNotInChain(ref hash) => format!("Uncle {} has a parent not in the chain", hash), - InvalidStateRoot(ref mis) => format!("Invalid state root in header: {}", mis), - InvalidGasUsed(ref mis) => format!("Invalid gas used in header: {}", mis), - InvalidTransactionsRoot(ref mis) => format!("Invalid transactions root in header: {}", mis), - DifficultyOutOfBounds(ref oob) => format!("Invalid block difficulty: {}", oob), - InvalidDifficulty(ref mis) => format!("Invalid block difficulty: {}", mis), - MismatchedH256SealElement(ref mis) => format!("Seal element out of bounds: {}", mis), - InvalidProofOfWork(ref oob) => format!("Block has invalid PoW: {}", oob), - InvalidSeal => "Block has invalid seal.".into(), - InvalidGasLimit(ref oob) => format!("Invalid gas limit: {}", oob), - InvalidReceiptsRoot(ref mis) => format!("Invalid receipts trie root in header: {}", mis), - InvalidTimestamp(ref oob) => { - let oob = oob.map(|st| st.elapsed().unwrap_or_default().as_secs()); - format!("Invalid timestamp in header: {}", oob) - }, - TemporarilyInvalid(ref oob) => { - let oob = oob.map(|st| st.elapsed().unwrap_or_default().as_secs()); - format!("Future timestamp in header: {}", oob) - }, - InvalidLogBloom(ref oob) => format!("Invalid log bloom in header: {}", oob), - InvalidNumber(ref mis) => format!("Invalid number in header: {}", mis), - RidiculousNumber(ref oob) => format!("Implausible block number. {}", oob), - UnknownParent(ref hash) => format!("Unknown parent: {}", hash), - UnknownUncleParent(ref hash) => format!("Unknown uncle parent: {}", hash), - UnknownEpochTransition(ref num) => format!("Unknown transition to epoch number: {}", num), - TimestampOverflow => format!("Timestamp overflow"), - TooManyTransactions(ref address) => format!("Too many transactions from: {}", address), - }; + let msg = match *self { + TooManyUncles(ref oob) => format!("Block has too many uncles. {}", oob), + ExtraDataOutOfBounds(ref oob) => format!("Extra block data too long. {}", oob), + InvalidSealArity(ref mis) => format!("Block seal in incorrect format: {}", mis), + TooMuchGasUsed(ref oob) => format!("Block has too much gas used. {}", oob), + InvalidUnclesHash(ref mis) => format!("Block has invalid uncles hash: {}", mis), + UncleTooOld(ref oob) => format!("Uncle block is too old. {}", oob), + UncleIsBrother(ref oob) => format!("Uncle from same generation as block. {}", oob), + UncleInChain(ref hash) => format!("Uncle {} already in chain", hash), + DuplicateUncle(ref hash) => format!("Uncle {} already in the header", hash), + UncleParentNotInChain(ref hash) => { + format!("Uncle {} has a parent not in the chain", hash) + } + InvalidStateRoot(ref mis) => format!("Invalid state root in header: {}", mis), + InvalidGasUsed(ref mis) => format!("Invalid gas used in header: {}", mis), + InvalidTransactionsRoot(ref mis) => { + format!("Invalid transactions root in header: {}", mis) + } + DifficultyOutOfBounds(ref oob) => format!("Invalid block difficulty: {}", oob), + InvalidDifficulty(ref mis) => format!("Invalid block difficulty: {}", mis), + MismatchedH256SealElement(ref mis) => format!("Seal element out of bounds: {}", mis), + InvalidProofOfWork(ref oob) => format!("Block has invalid PoW: {}", oob), + InvalidSeal => "Block has invalid seal.".into(), + InvalidGasLimit(ref oob) => format!("Invalid gas limit: {}", oob), + InvalidReceiptsRoot(ref mis) => { + format!("Invalid receipts trie root in header: {}", mis) + } + InvalidTimestamp(ref oob) => { + let oob = oob.map(|st| st.elapsed().unwrap_or_default().as_secs()); + format!("Invalid timestamp in header: {}", oob) + } + TemporarilyInvalid(ref oob) => { + let oob = oob.map(|st| st.elapsed().unwrap_or_default().as_secs()); + format!("Future timestamp in header: {}", oob) + } + InvalidLogBloom(ref oob) => format!("Invalid log bloom in header: {}", oob), + InvalidNumber(ref mis) => format!("Invalid number in header: {}", mis), + RidiculousNumber(ref oob) => format!("Implausible block number. {}", oob), + UnknownParent(ref hash) => format!("Unknown parent: {}", hash), + UnknownUncleParent(ref hash) => format!("Unknown uncle parent: {}", hash), + UnknownEpochTransition(ref num) => { + format!("Unknown transition to epoch number: {}", num) + } + TimestampOverflow => format!("Timestamp overflow"), + TooManyTransactions(ref address) => format!("Too many transactions from: {}", address), + }; - f.write_fmt(format_args!("Block error ({})", msg)) - } + f.write_fmt(format_args!("Block error ({})", msg)) + } } impl error::Error for BlockError { - fn description(&self) -> &str { - "Block error" - } + fn description(&self) -> &str { + "Block error" + } } error_chain! { - types { - QueueError, QueueErrorKind, QueueErrorResultExt, QueueErrorResult; - } + types { + QueueError, QueueErrorKind, QueueErrorResultExt, QueueErrorResult; + } - errors { - #[doc = "Queue is full"] - Full(limit: usize) { - description("Queue is full") - display("The queue is full ({})", limit) - } - } + errors { + #[doc = "Queue is full"] + Full(limit: usize) { + description("Queue is full") + display("The queue is full ({})", limit) + } + } - foreign_links { - Channel(::io::IoError) #[doc = "Io channel error"]; - } + foreign_links { + Channel(::io::IoError) #[doc = "Io channel error"]; + } } error_chain! { - types { - ImportError, ImportErrorKind, ImportErrorResultExt, ImportErrorResult; - } + types { + ImportError, ImportErrorKind, ImportErrorResultExt, ImportErrorResult; + } - errors { - #[doc = "Already in the block chain."] - AlreadyInChain { - description("Block already in chain") - display("Block already in chain") - } + errors { + #[doc = "Already in the block chain."] + AlreadyInChain { + description("Block already in chain") + display("Block already in chain") + } - #[doc = "Already in the block queue"] - AlreadyQueued { - description("block already in the block queue") - display("block already in the block queue") - } + #[doc = "Already in the block queue"] + AlreadyQueued { + description("block already in the block queue") + display("block already in the block queue") + } - #[doc = "Already marked as bad from a previous import (could mean parent is bad)."] - KnownBad { - description("block known to be bad") - display("block known to be bad") - } - } + #[doc = "Already marked as bad from a previous import (could mean parent is bad)."] + KnownBad { + description("block known to be bad") + display("block known to be bad") + } + } } /// Api-level error for transaction import #[derive(Debug, Clone)] pub enum TransactionImportError { - /// Transaction error - Transaction(TransactionError), - /// Other error - Other(String), + /// Transaction error + Transaction(TransactionError), + /// Other error + Other(String), } impl From for TransactionImportError { - fn from(e: Error) -> Self { - match e { - Error(ErrorKind::Transaction(transaction_error), _) => TransactionImportError::Transaction(transaction_error), - _ => TransactionImportError::Other(format!("other block import error: {:?}", e)), - } - } + fn from(e: Error) -> Self { + match e { + Error(ErrorKind::Transaction(transaction_error), _) => { + TransactionImportError::Transaction(transaction_error) + } + _ => TransactionImportError::Other(format!("other block import error: {:?}", e)), + } + } } error_chain! { - types { - Error, ErrorKind, ErrorResultExt, EthcoreResult; - } + types { + Error, ErrorKind, ErrorResultExt, EthcoreResult; + } - links { - Import(ImportError, ImportErrorKind) #[doc = "Error concerning block import." ]; - Queue(QueueError, QueueErrorKind) #[doc = "Io channel queue error"]; - } + links { + Import(ImportError, ImportErrorKind) #[doc = "Error concerning block import." ]; + Queue(QueueError, QueueErrorKind) #[doc = "Io channel queue error"]; + } - foreign_links { - Io(::io::IoError) #[doc = "Io create error"]; - StdIo(::std::io::Error) #[doc = "Error concerning the Rust standard library's IO subsystem."]; - Trie(TrieError) #[doc = "Error concerning TrieDBs."]; - Execution(ExecutionError) #[doc = "Error concerning EVM code execution."]; - Block(BlockError) #[doc = "Error concerning block processing."]; - Transaction(TransactionError) #[doc = "Error concerning transaction processing."]; - Snappy(InvalidInput) #[doc = "Snappy error."]; - Engine(EngineError) #[doc = "Consensus vote error."]; - Ethkey(EthkeyError) #[doc = "Ethkey error."]; - Decoder(rlp::DecoderError) #[doc = "RLP decoding errors"]; - } + foreign_links { + Io(::io::IoError) #[doc = "Io create error"]; + StdIo(::std::io::Error) #[doc = "Error concerning the Rust standard library's IO subsystem."]; + Trie(TrieError) #[doc = "Error concerning TrieDBs."]; + Execution(ExecutionError) #[doc = "Error concerning EVM code execution."]; + Block(BlockError) #[doc = "Error concerning block processing."]; + Transaction(TransactionError) #[doc = "Error concerning transaction processing."]; + Snappy(InvalidInput) #[doc = "Snappy error."]; + Engine(EngineError) #[doc = "Consensus vote error."]; + Ethkey(EthkeyError) #[doc = "Ethkey error."]; + Decoder(rlp::DecoderError) #[doc = "RLP decoding errors"]; + } - errors { - #[doc = "Snapshot error."] - Snapshot(err: SnapshotError) { - description("Snapshot error.") - display("Snapshot error {}", err) - } + errors { + #[doc = "Snapshot error."] + Snapshot(err: SnapshotError) { + description("Snapshot error.") + display("Snapshot error {}", err) + } - #[doc = "PoW hash is invalid or out of date."] - PowHashInvalid { - description("PoW hash is invalid or out of date.") - display("PoW hash is invalid or out of date.") - } + #[doc = "PoW hash is invalid or out of date."] + PowHashInvalid { + description("PoW hash is invalid or out of date.") + display("PoW hash is invalid or out of date.") + } - #[doc = "The value of the nonce or mishash is invalid."] - PowInvalid { - description("The value of the nonce or mishash is invalid.") - display("The value of the nonce or mishash is invalid.") - } + #[doc = "The value of the nonce or mishash is invalid."] + PowInvalid { + description("The value of the nonce or mishash is invalid.") + display("The value of the nonce or mishash is invalid.") + } - #[doc = "Unknown engine given"] - UnknownEngineName(name: String) { - description("Unknown engine name") - display("Unknown engine name ({})", name) - } - } + #[doc = "Unknown engine given"] + UnknownEngineName(name: String) { + description("Unknown engine name") + display("Unknown engine name ({})", name) + } + } } impl From for Error { - fn from(err: SnapshotError) -> Error { - match err { - SnapshotError::Io(err) => ErrorKind::StdIo(err).into(), - SnapshotError::Trie(err) => ErrorKind::Trie(err).into(), - SnapshotError::Decoder(err) => err.into(), - other => ErrorKind::Snapshot(other).into(), - } - } + fn from(err: SnapshotError) -> Error { + match err { + SnapshotError::Io(err) => ErrorKind::StdIo(err).into(), + SnapshotError::Trie(err) => ErrorKind::Trie(err).into(), + SnapshotError::Decoder(err) => err.into(), + other => ErrorKind::Snapshot(other).into(), + } + } } -impl From> for Error where Error: From { - fn from(err: Box) -> Error { - Error::from(*err) - } +impl From> for Error +where + Error: From, +{ + fn from(err: Box) -> Error { + Error::from(*err) + } } diff --git a/ethcore/src/ethereum/denominations.rs b/ethcore/src/ethereum/denominations.rs index 3bf4878c7..4019222ec 100644 --- a/ethcore/src/ethereum/denominations.rs +++ b/ethcore/src/ethereum/denominations.rs @@ -18,20 +18,30 @@ use ethereum_types::U256; #[inline] /// 1 Ether in Wei -pub fn ether() -> U256 { U256::exp10(18) } +pub fn ether() -> U256 { + U256::exp10(18) +} #[inline] /// 1 Finney in Wei -pub fn finney() -> U256 { U256::exp10(15) } +pub fn finney() -> U256 { + U256::exp10(15) +} #[inline] /// 1 Szabo in Wei -pub fn szabo() -> U256 { U256::exp10(12) } +pub fn szabo() -> U256 { + U256::exp10(12) +} #[inline] /// 1 Shannon in Wei -pub fn shannon() -> U256 { U256::exp10(9) } +pub fn shannon() -> U256 { + U256::exp10(9) +} #[inline] /// 1 Wei in Wei -pub fn wei() -> U256 { U256::exp10(0) } +pub fn wei() -> U256 { + U256::exp10(0) +} diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index 698e23cf7..6871a955c 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -14,22 +14,24 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::cmp; -use std::collections::BTreeMap; -use std::path::Path; -use std::sync::Arc; +use std::{cmp, collections::BTreeMap, path::Path, sync::Arc}; use ethereum_types::{H256, H64, U256}; use ethjson; -use hash::{KECCAK_EMPTY_LIST_RLP}; +use hash::KECCAK_EMPTY_LIST_RLP; use rlp::Rlp; -use types::header::{Header, ExtendedHeader}; -use types::BlockNumber; -use unexpected::{OutOfBounds, Mismatch}; +use types::{ + header::{ExtendedHeader, Header}, + BlockNumber, +}; +use unexpected::{Mismatch, OutOfBounds}; use block::ExecutedBlock; -use engines::block_reward::{self, BlockRewardContract, RewardKind}; -use engines::{self, Engine}; +use engines::{ + self, + block_reward::{self, BlockRewardContract, RewardKind}, + Engine, +}; use error::{BlockError, Error}; use ethash::{self, quick_get_difficulty, slow_hash_block_number, EthashManager, OptimizeFor}; use machine::EthereumMachine; @@ -43,156 +45,170 @@ const MAX_SNAPSHOT_BLOCKS: u64 = 30000; /// Ethash specific seal #[derive(Debug, PartialEq)] pub struct Seal { - /// Ethash seal mix_hash - pub mix_hash: H256, - /// Ethash seal nonce - pub nonce: H64, + /// Ethash seal mix_hash + pub mix_hash: H256, + /// Ethash seal nonce + pub nonce: H64, } impl Seal { - /// Tries to parse rlp as ethash seal. - pub fn parse_seal>(seal: &[T]) -> Result { - if seal.len() != 2 { - return Err(BlockError::InvalidSealArity( - Mismatch { - expected: 2, - found: seal.len() - } - ).into()); - } + /// Tries to parse rlp as ethash seal. + pub fn parse_seal>(seal: &[T]) -> Result { + if seal.len() != 2 { + return Err(BlockError::InvalidSealArity(Mismatch { + expected: 2, + found: seal.len(), + }) + .into()); + } - let mix_hash = Rlp::new(seal[0].as_ref()).as_val::()?; - let nonce = Rlp::new(seal[1].as_ref()).as_val::()?; - let seal = Seal { - mix_hash, - nonce, - }; + let mix_hash = Rlp::new(seal[0].as_ref()).as_val::()?; + let nonce = Rlp::new(seal[1].as_ref()).as_val::()?; + let seal = Seal { mix_hash, nonce }; - Ok(seal) - } + Ok(seal) + } } /// Ethash params. #[derive(Debug, PartialEq)] pub struct EthashParams { - /// Minimum difficulty. - pub minimum_difficulty: U256, - /// Difficulty bound divisor. - pub difficulty_bound_divisor: U256, - /// Difficulty increment divisor. - pub difficulty_increment_divisor: u64, - /// Metropolis difficulty increment divisor. - pub metropolis_difficulty_increment_divisor: u64, - /// Block duration. - pub duration_limit: u64, - /// Homestead transition block number. - pub homestead_transition: u64, - /// Transition block for a change of difficulty params (currently just bound_divisor). - pub difficulty_hardfork_transition: u64, - /// Difficulty param after the difficulty transition. - pub difficulty_hardfork_bound_divisor: U256, - /// Block on which there is no additional difficulty from the exponential bomb. - pub bomb_defuse_transition: u64, - /// Number of first block where EIP-100 rules begin. - pub eip100b_transition: u64, - /// Number of first block where ECIP-1010 begins. - pub ecip1010_pause_transition: u64, - /// Number of first block where ECIP-1010 ends. - pub ecip1010_continue_transition: u64, - /// Total block number for one ECIP-1017 era. - pub ecip1017_era_rounds: u64, - /// Block reward in base units. - pub block_reward: BTreeMap, - /// EXPIP-2 block height - pub expip2_transition: u64, - /// EXPIP-2 duration limit - pub expip2_duration_limit: u64, - /// Block reward contract transition block. - pub block_reward_contract_transition: u64, - /// Block reward contract. - pub block_reward_contract: Option, - /// Difficulty bomb delays. - pub difficulty_bomb_delays: BTreeMap, - /// Block to transition to progpow - pub progpow_transition: u64, + /// Minimum difficulty. + pub minimum_difficulty: U256, + /// Difficulty bound divisor. + pub difficulty_bound_divisor: U256, + /// Difficulty increment divisor. + pub difficulty_increment_divisor: u64, + /// Metropolis difficulty increment divisor. + pub metropolis_difficulty_increment_divisor: u64, + /// Block duration. + pub duration_limit: u64, + /// Homestead transition block number. + pub homestead_transition: u64, + /// Transition block for a change of difficulty params (currently just bound_divisor). + pub difficulty_hardfork_transition: u64, + /// Difficulty param after the difficulty transition. + pub difficulty_hardfork_bound_divisor: U256, + /// Block on which there is no additional difficulty from the exponential bomb. + pub bomb_defuse_transition: u64, + /// Number of first block where EIP-100 rules begin. + pub eip100b_transition: u64, + /// Number of first block where ECIP-1010 begins. + pub ecip1010_pause_transition: u64, + /// Number of first block where ECIP-1010 ends. + pub ecip1010_continue_transition: u64, + /// Total block number for one ECIP-1017 era. + pub ecip1017_era_rounds: u64, + /// Block reward in base units. + pub block_reward: BTreeMap, + /// EXPIP-2 block height + pub expip2_transition: u64, + /// EXPIP-2 duration limit + pub expip2_duration_limit: u64, + /// Block reward contract transition block. + pub block_reward_contract_transition: u64, + /// Block reward contract. + pub block_reward_contract: Option, + /// Difficulty bomb delays. + pub difficulty_bomb_delays: BTreeMap, + /// Block to transition to progpow + pub progpow_transition: u64, } impl From for EthashParams { - fn from(p: ethjson::spec::EthashParams) -> Self { - EthashParams { - minimum_difficulty: p.minimum_difficulty.into(), - difficulty_bound_divisor: p.difficulty_bound_divisor.into(), - difficulty_increment_divisor: p.difficulty_increment_divisor.map_or(10, Into::into), - metropolis_difficulty_increment_divisor: p.metropolis_difficulty_increment_divisor.map_or(9, Into::into), - duration_limit: p.duration_limit.map_or(0, Into::into), - homestead_transition: p.homestead_transition.map_or(0, Into::into), - difficulty_hardfork_transition: p.difficulty_hardfork_transition.map_or(u64::max_value(), Into::into), - difficulty_hardfork_bound_divisor: p.difficulty_hardfork_bound_divisor.map_or(p.difficulty_bound_divisor.into(), Into::into), - bomb_defuse_transition: p.bomb_defuse_transition.map_or(u64::max_value(), Into::into), - eip100b_transition: p.eip100b_transition.map_or(u64::max_value(), Into::into), - ecip1010_pause_transition: p.ecip1010_pause_transition.map_or(u64::max_value(), Into::into), - ecip1010_continue_transition: p.ecip1010_continue_transition.map_or(u64::max_value(), Into::into), - ecip1017_era_rounds: p.ecip1017_era_rounds.map_or(u64::max_value(), Into::into), - block_reward: p.block_reward.map_or_else( - || { - let mut ret = BTreeMap::new(); - ret.insert(0, U256::zero()); - ret - }, - |reward| { - match reward { - ethjson::spec::BlockReward::Single(reward) => { - let mut ret = BTreeMap::new(); - ret.insert(0, reward.into()); - ret - }, - ethjson::spec::BlockReward::Multi(multi) => { - multi.into_iter() - .map(|(block, reward)| (block.into(), reward.into())) - .collect() - }, - } - }), - expip2_transition: p.expip2_transition.map_or(u64::max_value(), Into::into), - expip2_duration_limit: p.expip2_duration_limit.map_or(30, Into::into), - progpow_transition: p.progpow_transition.map_or(u64::max_value(), Into::into), - block_reward_contract_transition: p.block_reward_contract_transition.map_or(0, Into::into), - block_reward_contract: match (p.block_reward_contract_code, p.block_reward_contract_address) { - (Some(code), _) => Some(BlockRewardContract::new_from_code(Arc::new(code.into()))), - (_, Some(address)) => Some(BlockRewardContract::new_from_address(address.into())), - (None, None) => None, - }, - difficulty_bomb_delays: p.difficulty_bomb_delays.unwrap_or_default().into_iter() - .map(|(block, delay)| (block.into(), delay.into())) - .collect() - } - } + fn from(p: ethjson::spec::EthashParams) -> Self { + EthashParams { + minimum_difficulty: p.minimum_difficulty.into(), + difficulty_bound_divisor: p.difficulty_bound_divisor.into(), + difficulty_increment_divisor: p.difficulty_increment_divisor.map_or(10, Into::into), + metropolis_difficulty_increment_divisor: p + .metropolis_difficulty_increment_divisor + .map_or(9, Into::into), + duration_limit: p.duration_limit.map_or(0, Into::into), + homestead_transition: p.homestead_transition.map_or(0, Into::into), + difficulty_hardfork_transition: p + .difficulty_hardfork_transition + .map_or(u64::max_value(), Into::into), + difficulty_hardfork_bound_divisor: p + .difficulty_hardfork_bound_divisor + .map_or(p.difficulty_bound_divisor.into(), Into::into), + bomb_defuse_transition: p + .bomb_defuse_transition + .map_or(u64::max_value(), Into::into), + eip100b_transition: p.eip100b_transition.map_or(u64::max_value(), Into::into), + ecip1010_pause_transition: p + .ecip1010_pause_transition + .map_or(u64::max_value(), Into::into), + ecip1010_continue_transition: p + .ecip1010_continue_transition + .map_or(u64::max_value(), Into::into), + ecip1017_era_rounds: p.ecip1017_era_rounds.map_or(u64::max_value(), Into::into), + block_reward: p.block_reward.map_or_else( + || { + let mut ret = BTreeMap::new(); + ret.insert(0, U256::zero()); + ret + }, + |reward| match reward { + ethjson::spec::BlockReward::Single(reward) => { + let mut ret = BTreeMap::new(); + ret.insert(0, reward.into()); + ret + } + ethjson::spec::BlockReward::Multi(multi) => multi + .into_iter() + .map(|(block, reward)| (block.into(), reward.into())) + .collect(), + }, + ), + expip2_transition: p.expip2_transition.map_or(u64::max_value(), Into::into), + expip2_duration_limit: p.expip2_duration_limit.map_or(30, Into::into), + progpow_transition: p.progpow_transition.map_or(u64::max_value(), Into::into), + block_reward_contract_transition: p + .block_reward_contract_transition + .map_or(0, Into::into), + block_reward_contract: match ( + p.block_reward_contract_code, + p.block_reward_contract_address, + ) { + (Some(code), _) => Some(BlockRewardContract::new_from_code(Arc::new(code.into()))), + (_, Some(address)) => Some(BlockRewardContract::new_from_address(address.into())), + (None, None) => None, + }, + difficulty_bomb_delays: p + .difficulty_bomb_delays + .unwrap_or_default() + .into_iter() + .map(|(block, delay)| (block.into(), delay.into())) + .collect(), + } + } } /// Engine using Ethash proof-of-work consensus algorithm, suitable for Ethereum /// mainnet chains in the Olympic, Frontier and Homestead eras. pub struct Ethash { - ethash_params: EthashParams, - pow: EthashManager, - machine: EthereumMachine, + ethash_params: EthashParams, + pow: EthashManager, + machine: EthereumMachine, } impl Ethash { - /// Create a new instance of Ethash engine - pub fn new>>( - cache_dir: &Path, - ethash_params: EthashParams, - machine: EthereumMachine, - optimize_for: T, - ) -> Arc { - let progpow_transition = ethash_params.progpow_transition; + /// Create a new instance of Ethash engine + pub fn new>>( + cache_dir: &Path, + ethash_params: EthashParams, + machine: EthereumMachine, + optimize_for: T, + ) -> Arc { + let progpow_transition = ethash_params.progpow_transition; - Arc::new(Ethash { - ethash_params, - machine, - pow: EthashManager::new(cache_dir.as_ref(), optimize_for.into(), progpow_transition), - }) - } + Arc::new(Ethash { + ethash_params, + machine, + pow: EthashManager::new(cache_dir.as_ref(), optimize_for.into(), progpow_transition), + }) + } } // TODO [rphmeier] @@ -204,719 +220,924 @@ impl Ethash { // in the future, we might move the Ethash epoch // caching onto this mechanism as well. impl engines::EpochVerifier for Arc { - fn verify_light(&self, _header: &Header) -> Result<(), Error> { Ok(()) } - fn verify_heavy(&self, header: &Header) -> Result<(), Error> { - self.verify_block_unordered(header) - } + fn verify_light(&self, _header: &Header) -> Result<(), Error> { + Ok(()) + } + fn verify_heavy(&self, header: &Header) -> Result<(), Error> { + self.verify_block_unordered(header) + } } impl Engine for Arc { - fn name(&self) -> &str { "Ethash" } - fn machine(&self) -> &EthereumMachine { &self.machine } + fn name(&self) -> &str { + "Ethash" + } + fn machine(&self) -> &EthereumMachine { + &self.machine + } - // Two fields - nonce and mix. - fn seal_fields(&self, _header: &Header) -> usize { 2 } + // Two fields - nonce and mix. + fn seal_fields(&self, _header: &Header) -> usize { + 2 + } - /// Additional engine-specific information for the user/developer concerning `header`. - fn extra_info(&self, header: &Header) -> BTreeMap { - match Seal::parse_seal(header.seal()) { - Ok(seal) => map![ - "nonce".to_owned() => format!("0x{:x}", seal.nonce), - "mixHash".to_owned() => format!("0x{:x}", seal.mix_hash) - ], - _ => BTreeMap::default() - } - } + /// Additional engine-specific information for the user/developer concerning `header`. + fn extra_info(&self, header: &Header) -> BTreeMap { + match Seal::parse_seal(header.seal()) { + Ok(seal) => map![ + "nonce".to_owned() => format!("0x{:x}", seal.nonce), + "mixHash".to_owned() => format!("0x{:x}", seal.mix_hash) + ], + _ => BTreeMap::default(), + } + } - fn maximum_uncle_count(&self, _block: BlockNumber) -> usize { 2 } + fn maximum_uncle_count(&self, _block: BlockNumber) -> usize { + 2 + } - fn maximum_gas_limit(&self) -> Option { Some(0x7fff_ffff_ffff_ffffu64.into()) } + fn maximum_gas_limit(&self) -> Option { + Some(0x7fff_ffff_ffff_ffffu64.into()) + } - fn populate_from_parent(&self, header: &mut Header, parent: &Header) { - let difficulty = self.calculate_difficulty(header, parent); - header.set_difficulty(difficulty); - } + fn populate_from_parent(&self, header: &mut Header, parent: &Header) { + let difficulty = self.calculate_difficulty(header, parent); + header.set_difficulty(difficulty); + } - /// Apply the block reward on finalisation of the block. - /// This assumes that all uncles are valid uncles (i.e. of at least one generation before the current). - fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> { - use std::ops::Shr; + /// Apply the block reward on finalisation of the block. + /// This assumes that all uncles are valid uncles (i.e. of at least one generation before the current). + fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> { + use std::ops::Shr; - let author = *block.header.author(); - let number = block.header.number(); + let author = *block.header.author(); + let number = block.header.number(); - let rewards = match self.ethash_params.block_reward_contract { - Some(ref c) if number >= self.ethash_params.block_reward_contract_transition => { - let mut beneficiaries = Vec::new(); + let rewards = match self.ethash_params.block_reward_contract { + Some(ref c) if number >= self.ethash_params.block_reward_contract_transition => { + let mut beneficiaries = Vec::new(); - beneficiaries.push((author, RewardKind::Author)); - for u in &block.uncles { - let uncle_author = u.author(); - beneficiaries.push((*uncle_author, RewardKind::uncle(number, u.number()))); - } + beneficiaries.push((author, RewardKind::Author)); + for u in &block.uncles { + let uncle_author = u.author(); + beneficiaries.push((*uncle_author, RewardKind::uncle(number, u.number()))); + } - let mut call = engines::default_system_or_code_call(&self.machine, block); + let mut call = engines::default_system_or_code_call(&self.machine, block); - let rewards = c.reward(&beneficiaries, &mut call)?; - rewards.into_iter().map(|(author, amount)| (author, RewardKind::External, amount)).collect() - }, - _ => { - let mut rewards = Vec::new(); + let rewards = c.reward(&beneficiaries, &mut call)?; + rewards + .into_iter() + .map(|(author, amount)| (author, RewardKind::External, amount)) + .collect() + } + _ => { + let mut rewards = Vec::new(); - let (_, reward) = self.ethash_params.block_reward.iter() + let (_, reward) = self.ethash_params.block_reward.iter() .rev() .find(|&(block, _)| *block <= number) .expect("Current block's reward is not found; this indicates a chain config error; qed"); - let reward = *reward; + let reward = *reward; - // Applies ECIP-1017 eras. - let eras_rounds = self.ethash_params.ecip1017_era_rounds; - let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, reward, number); + // Applies ECIP-1017 eras. + let eras_rounds = self.ethash_params.ecip1017_era_rounds; + let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, reward, number); - //let n_uncles = LiveBlock::uncles(&*block).len(); - let n_uncles = block.uncles.len(); + //let n_uncles = LiveBlock::uncles(&*block).len(); + let n_uncles = block.uncles.len(); - // Bestow block rewards. - let mut result_block_reward = reward + reward.shr(5) * U256::from(n_uncles); + // Bestow block rewards. + let mut result_block_reward = reward + reward.shr(5) * U256::from(n_uncles); - rewards.push((author, RewardKind::Author, result_block_reward)); + rewards.push((author, RewardKind::Author, result_block_reward)); - // Bestow uncle rewards. - for u in &block.uncles { - let uncle_author = u.author(); - let result_uncle_reward = if eras == 0 { - (reward * U256::from(8 + u.number() - number)).shr(3) - } else { - reward.shr(5) - }; + // Bestow uncle rewards. + for u in &block.uncles { + let uncle_author = u.author(); + let result_uncle_reward = if eras == 0 { + (reward * U256::from(8 + u.number() - number)).shr(3) + } else { + reward.shr(5) + }; - rewards.push((*uncle_author, RewardKind::uncle(number, u.number()), result_uncle_reward)); - } + rewards.push(( + *uncle_author, + RewardKind::uncle(number, u.number()), + result_uncle_reward, + )); + } - rewards - }, - }; + rewards + } + }; - block_reward::apply_block_rewards(&rewards, block, &self.machine) - } + block_reward::apply_block_rewards(&rewards, block, &self.machine) + } - #[cfg(not(feature = "miner-debug"))] - fn verify_local_seal(&self, header: &Header) -> Result<(), Error> { - self.verify_block_basic(header) - .and_then(|_| self.verify_block_unordered(header)) - } + #[cfg(not(feature = "miner-debug"))] + fn verify_local_seal(&self, header: &Header) -> Result<(), Error> { + self.verify_block_basic(header) + .and_then(|_| self.verify_block_unordered(header)) + } - #[cfg(feature = "miner-debug")] - fn verify_local_seal(&self, _header: &Header) -> Result<(), Error> { - warn!("Skipping seal verification, running in miner testing mode."); - Ok(()) - } + #[cfg(feature = "miner-debug")] + fn verify_local_seal(&self, _header: &Header) -> Result<(), Error> { + warn!("Skipping seal verification, running in miner testing mode."); + Ok(()) + } - fn verify_block_basic(&self, header: &Header) -> Result<(), Error> { - // check the seal fields. - let seal = Seal::parse_seal(header.seal())?; + fn verify_block_basic(&self, header: &Header) -> Result<(), Error> { + // check the seal fields. + let seal = Seal::parse_seal(header.seal())?; - // TODO: consider removing these lines. - let min_difficulty = self.ethash_params.minimum_difficulty; - if header.difficulty() < &min_difficulty { - return Err(From::from(BlockError::DifficultyOutOfBounds(OutOfBounds { min: Some(min_difficulty), max: None, found: header.difficulty().clone() }))) - } + // TODO: consider removing these lines. + let min_difficulty = self.ethash_params.minimum_difficulty; + if header.difficulty() < &min_difficulty { + return Err(From::from(BlockError::DifficultyOutOfBounds(OutOfBounds { + min: Some(min_difficulty), + max: None, + found: header.difficulty().clone(), + }))); + } - let difficulty = ethash::boundary_to_difficulty(&H256(quick_get_difficulty( - &header.bare_hash().0, - seal.nonce.low_u64(), - &seal.mix_hash.0, - header.number() >= self.ethash_params.progpow_transition - ))); + let difficulty = ethash::boundary_to_difficulty(&H256(quick_get_difficulty( + &header.bare_hash().0, + seal.nonce.low_u64(), + &seal.mix_hash.0, + header.number() >= self.ethash_params.progpow_transition, + ))); - if &difficulty < header.difficulty() { - return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { min: Some(header.difficulty().clone()), max: None, found: difficulty }))); - } + if &difficulty < header.difficulty() { + return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { + min: Some(header.difficulty().clone()), + max: None, + found: difficulty, + }))); + } - Ok(()) - } + Ok(()) + } - fn verify_block_unordered(&self, header: &Header) -> Result<(), Error> { - let seal = Seal::parse_seal(header.seal())?; + fn verify_block_unordered(&self, header: &Header) -> Result<(), Error> { + let seal = Seal::parse_seal(header.seal())?; - let result = self.pow.compute_light(header.number() as u64, &header.bare_hash().0, seal.nonce.low_u64()); - let mix = H256(result.mix_hash); - let difficulty = ethash::boundary_to_difficulty(&H256(result.value)); - trace!(target: "miner", "num: {num}, seed: {seed}, h: {h}, non: {non}, mix: {mix}, res: {res}", + let result = self.pow.compute_light( + header.number() as u64, + &header.bare_hash().0, + seal.nonce.low_u64(), + ); + let mix = H256(result.mix_hash); + let difficulty = ethash::boundary_to_difficulty(&H256(result.value)); + trace!(target: "miner", "num: {num}, seed: {seed}, h: {h}, non: {non}, mix: {mix}, res: {res}", num = header.number() as u64, seed = H256(slow_hash_block_number(header.number() as u64)), h = header.bare_hash(), non = seal.nonce.low_u64(), mix = H256(result.mix_hash), res = H256(result.value)); - if mix != seal.mix_hash { - return Err(From::from(BlockError::MismatchedH256SealElement(Mismatch { expected: mix, found: seal.mix_hash }))); - } - if &difficulty < header.difficulty() { - return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { min: Some(header.difficulty().clone()), max: None, found: difficulty }))); - } - Ok(()) - } + if mix != seal.mix_hash { + return Err(From::from(BlockError::MismatchedH256SealElement( + Mismatch { + expected: mix, + found: seal.mix_hash, + }, + ))); + } + if &difficulty < header.difficulty() { + return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { + min: Some(header.difficulty().clone()), + max: None, + found: difficulty, + }))); + } + Ok(()) + } - fn verify_block_family(&self, header: &Header, parent: &Header) -> Result<(), Error> { - // we should not calculate difficulty for genesis blocks - if header.number() == 0 { - return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() }))); - } + fn verify_block_family(&self, header: &Header, parent: &Header) -> Result<(), Error> { + // we should not calculate difficulty for genesis blocks + if header.number() == 0 { + return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { + min: Some(1), + max: None, + found: header.number(), + }))); + } - // Check difficulty is correct given the two timestamps. - let expected_difficulty = self.calculate_difficulty(header, parent); - if header.difficulty() != &expected_difficulty { - return Err(From::from(BlockError::InvalidDifficulty(Mismatch { expected: expected_difficulty, found: header.difficulty().clone() }))) - } + // Check difficulty is correct given the two timestamps. + let expected_difficulty = self.calculate_difficulty(header, parent); + if header.difficulty() != &expected_difficulty { + return Err(From::from(BlockError::InvalidDifficulty(Mismatch { + expected: expected_difficulty, + found: header.difficulty().clone(), + }))); + } - Ok(()) - } + Ok(()) + } - fn epoch_verifier<'a>(&self, _header: &Header, _proof: &'a [u8]) -> engines::ConstructedVerifier<'a, EthereumMachine> { - engines::ConstructedVerifier::Trusted(Box::new(self.clone())) - } + fn epoch_verifier<'a>( + &self, + _header: &Header, + _proof: &'a [u8], + ) -> engines::ConstructedVerifier<'a, EthereumMachine> { + engines::ConstructedVerifier::Trusted(Box::new(self.clone())) + } - fn snapshot_components(&self) -> Option> { - Some(Box::new(::snapshot::PowSnapshot::new(SNAPSHOT_BLOCKS, MAX_SNAPSHOT_BLOCKS))) - } + fn snapshot_components(&self) -> Option> { + Some(Box::new(::snapshot::PowSnapshot::new( + SNAPSHOT_BLOCKS, + MAX_SNAPSHOT_BLOCKS, + ))) + } - fn fork_choice(&self, new: &ExtendedHeader, current: &ExtendedHeader) -> engines::ForkChoice { - engines::total_difficulty_fork_choice(new, current) - } + fn fork_choice(&self, new: &ExtendedHeader, current: &ExtendedHeader) -> engines::ForkChoice { + engines::total_difficulty_fork_choice(new, current) + } } impl Ethash { - fn calculate_difficulty(&self, header: &Header, parent: &Header) -> U256 { - const EXP_DIFF_PERIOD: u64 = 100_000; - if header.number() == 0 { - panic!("Can't calculate genesis block difficulty"); - } + fn calculate_difficulty(&self, header: &Header, parent: &Header) -> U256 { + const EXP_DIFF_PERIOD: u64 = 100_000; + if header.number() == 0 { + panic!("Can't calculate genesis block difficulty"); + } - let parent_has_uncles = parent.uncles_hash() != &KECCAK_EMPTY_LIST_RLP; + let parent_has_uncles = parent.uncles_hash() != &KECCAK_EMPTY_LIST_RLP; - let min_difficulty = self.ethash_params.minimum_difficulty; + let min_difficulty = self.ethash_params.minimum_difficulty; - let difficulty_hardfork = header.number() >= self.ethash_params.difficulty_hardfork_transition; - let difficulty_bound_divisor = if difficulty_hardfork { - self.ethash_params.difficulty_hardfork_bound_divisor - } else { - self.ethash_params.difficulty_bound_divisor - }; + let difficulty_hardfork = + header.number() >= self.ethash_params.difficulty_hardfork_transition; + let difficulty_bound_divisor = if difficulty_hardfork { + self.ethash_params.difficulty_hardfork_bound_divisor + } else { + self.ethash_params.difficulty_bound_divisor + }; - let expip2_hardfork = header.number() >= self.ethash_params.expip2_transition; - let duration_limit = if expip2_hardfork { - self.ethash_params.expip2_duration_limit - } else { - self.ethash_params.duration_limit - }; + let expip2_hardfork = header.number() >= self.ethash_params.expip2_transition; + let duration_limit = if expip2_hardfork { + self.ethash_params.expip2_duration_limit + } else { + self.ethash_params.duration_limit + }; - let frontier_limit = self.ethash_params.homestead_transition; + let frontier_limit = self.ethash_params.homestead_transition; - let mut target = if header.number() < frontier_limit { - if header.timestamp() >= parent.timestamp() + duration_limit { - *parent.difficulty() - (*parent.difficulty() / difficulty_bound_divisor) - } else { - *parent.difficulty() + (*parent.difficulty() / difficulty_bound_divisor) - } - } else { - trace!(target: "ethash", "Calculating difficulty parent.difficulty={}, header.timestamp={}, parent.timestamp={}", parent.difficulty(), header.timestamp(), parent.timestamp()); - //block_diff = parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99) - let (increment_divisor, threshold) = if header.number() < self.ethash_params.eip100b_transition { - (self.ethash_params.difficulty_increment_divisor, 1) - } else if parent_has_uncles { - (self.ethash_params.metropolis_difficulty_increment_divisor, 2) - } else { - (self.ethash_params.metropolis_difficulty_increment_divisor, 1) - }; + let mut target = if header.number() < frontier_limit { + if header.timestamp() >= parent.timestamp() + duration_limit { + *parent.difficulty() - (*parent.difficulty() / difficulty_bound_divisor) + } else { + *parent.difficulty() + (*parent.difficulty() / difficulty_bound_divisor) + } + } else { + trace!(target: "ethash", "Calculating difficulty parent.difficulty={}, header.timestamp={}, parent.timestamp={}", parent.difficulty(), header.timestamp(), parent.timestamp()); + //block_diff = parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99) + let (increment_divisor, threshold) = + if header.number() < self.ethash_params.eip100b_transition { + (self.ethash_params.difficulty_increment_divisor, 1) + } else if parent_has_uncles { + ( + self.ethash_params.metropolis_difficulty_increment_divisor, + 2, + ) + } else { + ( + self.ethash_params.metropolis_difficulty_increment_divisor, + 1, + ) + }; - let diff_inc = (header.timestamp() - parent.timestamp()) / increment_divisor; - if diff_inc <= threshold { - *parent.difficulty() + *parent.difficulty() / difficulty_bound_divisor * U256::from(threshold - diff_inc) - } else { - let multiplier: U256 = cmp::min(diff_inc - threshold, 99).into(); - parent.difficulty().saturating_sub( - *parent.difficulty() / difficulty_bound_divisor * multiplier - ) - } - }; - target = cmp::max(min_difficulty, target); - if header.number() < self.ethash_params.bomb_defuse_transition { - if header.number() < self.ethash_params.ecip1010_pause_transition { - let mut number = header.number(); - let original_number = number; - for (block, delay) in &self.ethash_params.difficulty_bomb_delays { - if original_number >= *block { - number = number.saturating_sub(*delay); - } - } - let period = (number / EXP_DIFF_PERIOD) as usize; - if period > 1 { - target = cmp::max(min_difficulty, target + (U256::from(1) << (period - 2))); - } - } else if header.number() < self.ethash_params.ecip1010_continue_transition { - let fixed_difficulty = ((self.ethash_params.ecip1010_pause_transition / EXP_DIFF_PERIOD) - 2) as usize; - target = cmp::max(min_difficulty, target + (U256::from(1) << fixed_difficulty)); - } else { - let period = ((parent.number() + 1) / EXP_DIFF_PERIOD) as usize; - let delay = ((self.ethash_params.ecip1010_continue_transition - self.ethash_params.ecip1010_pause_transition) / EXP_DIFF_PERIOD) as usize; - target = cmp::max(min_difficulty, target + (U256::from(1) << (period - delay - 2))); - } - } - target - } + let diff_inc = (header.timestamp() - parent.timestamp()) / increment_divisor; + if diff_inc <= threshold { + *parent.difficulty() + + *parent.difficulty() / difficulty_bound_divisor + * U256::from(threshold - diff_inc) + } else { + let multiplier: U256 = cmp::min(diff_inc - threshold, 99).into(); + parent + .difficulty() + .saturating_sub(*parent.difficulty() / difficulty_bound_divisor * multiplier) + } + }; + target = cmp::max(min_difficulty, target); + if header.number() < self.ethash_params.bomb_defuse_transition { + if header.number() < self.ethash_params.ecip1010_pause_transition { + let mut number = header.number(); + let original_number = number; + for (block, delay) in &self.ethash_params.difficulty_bomb_delays { + if original_number >= *block { + number = number.saturating_sub(*delay); + } + } + let period = (number / EXP_DIFF_PERIOD) as usize; + if period > 1 { + target = cmp::max(min_difficulty, target + (U256::from(1) << (period - 2))); + } + } else if header.number() < self.ethash_params.ecip1010_continue_transition { + let fixed_difficulty = + ((self.ethash_params.ecip1010_pause_transition / EXP_DIFF_PERIOD) - 2) as usize; + target = cmp::max(min_difficulty, target + (U256::from(1) << fixed_difficulty)); + } else { + let period = ((parent.number() + 1) / EXP_DIFF_PERIOD) as usize; + let delay = ((self.ethash_params.ecip1010_continue_transition + - self.ethash_params.ecip1010_pause_transition) + / EXP_DIFF_PERIOD) as usize; + target = cmp::max( + min_difficulty, + target + (U256::from(1) << (period - delay - 2)), + ); + } + } + target + } } -fn ecip1017_eras_block_reward(era_rounds: u64, mut reward: U256, block_number:u64) -> (u64, U256) { - let eras = if block_number != 0 && block_number % era_rounds == 0 { - block_number / era_rounds - 1 - } else { - block_number / era_rounds - }; - let mut divi = U256::from(1); - for _ in 0..eras { - reward = reward * U256::from(4); - divi = divi * U256::from(5); - } - reward = reward / divi; - (eras, reward) +fn ecip1017_eras_block_reward(era_rounds: u64, mut reward: U256, block_number: u64) -> (u64, U256) { + let eras = if block_number != 0 && block_number % era_rounds == 0 { + block_number / era_rounds - 1 + } else { + block_number / era_rounds + }; + let mut divi = U256::from(1); + for _ in 0..eras { + reward = reward * U256::from(4); + divi = divi * U256::from(5); + } + reward = reward / divi; + (eras, reward) } #[cfg(test)] mod tests { - use std::str::FromStr; - use std::sync::Arc; - use std::collections::BTreeMap; - use ethereum_types::{H64, H256, U256, Address}; - use block::*; - use test_helpers::get_temp_state_db; - use error::{BlockError, Error, ErrorKind}; - use types::header::Header; - use spec::Spec; - use engines::Engine; - use super::super::{new_morden, new_mcip3_test, new_homestead_test_machine}; - use super::{Ethash, EthashParams, ecip1017_eras_block_reward}; - use rlp; - use tempdir::TempDir; + use super::{ + super::{new_homestead_test_machine, new_mcip3_test, new_morden}, + ecip1017_eras_block_reward, Ethash, EthashParams, + }; + use block::*; + use engines::Engine; + use error::{BlockError, Error, ErrorKind}; + use ethereum_types::{Address, H256, H64, U256}; + use rlp; + use spec::Spec; + use std::{collections::BTreeMap, str::FromStr, sync::Arc}; + use tempdir::TempDir; + use test_helpers::get_temp_state_db; + use types::header::Header; - fn test_spec() -> Spec { - let tempdir = TempDir::new("").unwrap(); - new_morden(&tempdir.path()) - } + fn test_spec() -> Spec { + let tempdir = TempDir::new("").unwrap(); + new_morden(&tempdir.path()) + } - fn get_default_ethash_params() -> EthashParams { - EthashParams { - minimum_difficulty: U256::from(131072), - difficulty_bound_divisor: U256::from(2048), - difficulty_increment_divisor: 10, - metropolis_difficulty_increment_divisor: 9, - homestead_transition: 1150000, - duration_limit: 13, - block_reward: { - let mut ret = BTreeMap::new(); - ret.insert(0, 0.into()); - ret - }, - difficulty_hardfork_transition: u64::max_value(), - difficulty_hardfork_bound_divisor: U256::from(0), - bomb_defuse_transition: u64::max_value(), - eip100b_transition: u64::max_value(), - ecip1010_pause_transition: u64::max_value(), - ecip1010_continue_transition: u64::max_value(), - ecip1017_era_rounds: u64::max_value(), - expip2_transition: u64::max_value(), - expip2_duration_limit: 30, - block_reward_contract: None, - block_reward_contract_transition: 0, - difficulty_bomb_delays: BTreeMap::new(), - progpow_transition: u64::max_value(), - } - } + fn get_default_ethash_params() -> EthashParams { + EthashParams { + minimum_difficulty: U256::from(131072), + difficulty_bound_divisor: U256::from(2048), + difficulty_increment_divisor: 10, + metropolis_difficulty_increment_divisor: 9, + homestead_transition: 1150000, + duration_limit: 13, + block_reward: { + let mut ret = BTreeMap::new(); + ret.insert(0, 0.into()); + ret + }, + difficulty_hardfork_transition: u64::max_value(), + difficulty_hardfork_bound_divisor: U256::from(0), + bomb_defuse_transition: u64::max_value(), + eip100b_transition: u64::max_value(), + ecip1010_pause_transition: u64::max_value(), + ecip1010_continue_transition: u64::max_value(), + ecip1017_era_rounds: u64::max_value(), + expip2_transition: u64::max_value(), + expip2_duration_limit: 30, + block_reward_contract: None, + block_reward_contract_transition: 0, + difficulty_bomb_delays: BTreeMap::new(), + progpow_transition: u64::max_value(), + } + } - #[test] - fn on_close_block() { - let spec = test_spec(); - let engine = &*spec.engine; - let genesis_header = spec.genesis_header(); - let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); - let b = b.close().unwrap(); - assert_eq!(b.state.balance(&Address::zero()).unwrap(), U256::from_str("4563918244f40000").unwrap()); - } + #[test] + fn on_close_block() { + let spec = test_spec(); + let engine = &*spec.engine; + let genesis_header = spec.genesis_header(); + let db = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let last_hashes = Arc::new(vec![genesis_header.hash()]); + let b = OpenBlock::new( + engine, + Default::default(), + false, + db, + &genesis_header, + last_hashes, + Address::zero(), + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let b = b.close().unwrap(); + assert_eq!( + b.state.balance(&Address::zero()).unwrap(), + U256::from_str("4563918244f40000").unwrap() + ); + } - #[test] - fn has_valid_ecip1017_eras_block_reward() { - let eras_rounds = 5000000; + #[test] + fn has_valid_ecip1017_eras_block_reward() { + let eras_rounds = 5000000; - let start_reward: U256 = "4563918244F40000".parse().unwrap(); + let start_reward: U256 = "4563918244F40000".parse().unwrap(); - let block_number = 0; - let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number); - assert_eq!(0, eras); - assert_eq!(U256::from_str("4563918244F40000").unwrap(), reward); + let block_number = 0; + let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number); + assert_eq!(0, eras); + assert_eq!(U256::from_str("4563918244F40000").unwrap(), reward); - let block_number = 5000000; - let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number); - assert_eq!(0, eras); - assert_eq!(U256::from_str("4563918244F40000").unwrap(), reward); + let block_number = 5000000; + let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number); + assert_eq!(0, eras); + assert_eq!(U256::from_str("4563918244F40000").unwrap(), reward); - let block_number = 10000000; - let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number); - assert_eq!(1, eras); - assert_eq!(U256::from_str("3782DACE9D900000").unwrap(), reward); + let block_number = 10000000; + let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number); + assert_eq!(1, eras); + assert_eq!(U256::from_str("3782DACE9D900000").unwrap(), reward); - let block_number = 20000000; - let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number); - assert_eq!(3, eras); - assert_eq!(U256::from_str("2386F26FC1000000").unwrap(), reward); + let block_number = 20000000; + let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number); + assert_eq!(3, eras); + assert_eq!(U256::from_str("2386F26FC1000000").unwrap(), reward); - let block_number = 80000000; - let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number); - assert_eq!(15, eras); - assert_eq!(U256::from_str("271000000000000").unwrap(), reward); + let block_number = 80000000; + let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number); + assert_eq!(15, eras); + assert_eq!(U256::from_str("271000000000000").unwrap(), reward); - let block_number = 250000000; - let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number); - assert_eq!(49, eras); - assert_eq!(U256::from_str("51212FFBAF0A").unwrap(), reward); - } + let block_number = 250000000; + let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number); + assert_eq!(49, eras); + assert_eq!(U256::from_str("51212FFBAF0A").unwrap(), reward); + } - #[test] - fn on_close_block_with_uncle() { - let spec = test_spec(); - let engine = &*spec.engine; - let genesis_header = spec.genesis_header(); - let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let last_hashes = Arc::new(vec![genesis_header.hash()]); - let mut b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); - let mut uncle = Header::new(); - let uncle_author: Address = "ef2d6d194084c2de36e0dabfce45d046b37d1106".into(); - uncle.set_author(uncle_author); - b.push_uncle(uncle).unwrap(); + #[test] + fn on_close_block_with_uncle() { + let spec = test_spec(); + let engine = &*spec.engine; + let genesis_header = spec.genesis_header(); + let db = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let last_hashes = Arc::new(vec![genesis_header.hash()]); + let mut b = OpenBlock::new( + engine, + Default::default(), + false, + db, + &genesis_header, + last_hashes, + Address::zero(), + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let mut uncle = Header::new(); + let uncle_author: Address = "ef2d6d194084c2de36e0dabfce45d046b37d1106".into(); + uncle.set_author(uncle_author); + b.push_uncle(uncle).unwrap(); - let b = b.close().unwrap(); - assert_eq!(b.state.balance(&Address::zero()).unwrap(), "478eae0e571ba000".into()); - assert_eq!(b.state.balance(&uncle_author).unwrap(), "3cb71f51fc558000".into()); - } + let b = b.close().unwrap(); + assert_eq!( + b.state.balance(&Address::zero()).unwrap(), + "478eae0e571ba000".into() + ); + assert_eq!( + b.state.balance(&uncle_author).unwrap(), + "3cb71f51fc558000".into() + ); + } - #[test] - fn has_valid_mcip3_era_block_rewards() { - let spec = new_mcip3_test(); - let engine = &*spec.engine; - let genesis_header = spec.genesis_header(); - let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); - let b = b.close().unwrap(); + #[test] + fn has_valid_mcip3_era_block_rewards() { + let spec = new_mcip3_test(); + let engine = &*spec.engine; + let genesis_header = spec.genesis_header(); + let db = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let last_hashes = Arc::new(vec![genesis_header.hash()]); + let b = OpenBlock::new( + engine, + Default::default(), + false, + db, + &genesis_header, + last_hashes, + Address::zero(), + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let b = b.close().unwrap(); - let ubi_contract: Address = "00efdd5883ec628983e9063c7d969fe268bbf310".into(); - let dev_contract: Address = "00756cf8159095948496617f5fb17ed95059f536".into(); - assert_eq!(b.state.balance(&Address::zero()).unwrap(), U256::from_str("d8d726b7177a80000").unwrap()); - assert_eq!(b.state.balance(&ubi_contract).unwrap(), U256::from_str("2b5e3af16b1880000").unwrap()); - assert_eq!(b.state.balance(&dev_contract).unwrap(), U256::from_str("c249fdd327780000").unwrap()); - } + let ubi_contract: Address = "00efdd5883ec628983e9063c7d969fe268bbf310".into(); + let dev_contract: Address = "00756cf8159095948496617f5fb17ed95059f536".into(); + assert_eq!( + b.state.balance(&Address::zero()).unwrap(), + U256::from_str("d8d726b7177a80000").unwrap() + ); + assert_eq!( + b.state.balance(&ubi_contract).unwrap(), + U256::from_str("2b5e3af16b1880000").unwrap() + ); + assert_eq!( + b.state.balance(&dev_contract).unwrap(), + U256::from_str("c249fdd327780000").unwrap() + ); + } - #[test] - fn has_valid_metadata() { - let engine = test_spec().engine; - assert!(!engine.name().is_empty()); - } + #[test] + fn has_valid_metadata() { + let engine = test_spec().engine; + assert!(!engine.name().is_empty()); + } - #[test] - fn can_return_schedule() { - let engine = test_spec().engine; - let schedule = engine.schedule(10000000); - assert!(schedule.stack_limit > 0); + #[test] + fn can_return_schedule() { + let engine = test_spec().engine; + let schedule = engine.schedule(10000000); + assert!(schedule.stack_limit > 0); - let schedule = engine.schedule(100); - assert!(!schedule.have_delegate_call); - } + let schedule = engine.schedule(100); + assert!(!schedule.have_delegate_call); + } - #[test] - fn can_do_seal_verification_fail() { - let engine = test_spec().engine; - let header: Header = Header::default(); + #[test] + fn can_do_seal_verification_fail() { + let engine = test_spec().engine; + let header: Header = Header::default(); - let verify_result = engine.verify_block_basic(&header); + let verify_result = engine.verify_block_basic(&header); - match verify_result { - Err(Error(ErrorKind::Block(BlockError::InvalidSealArity(_)), _)) => {}, - Err(_) => { panic!("should be block seal-arity mismatch error (got {:?})", verify_result); }, - _ => { panic!("Should be error, got Ok"); }, - } - } + match verify_result { + Err(Error(ErrorKind::Block(BlockError::InvalidSealArity(_)), _)) => {} + Err(_) => { + panic!( + "should be block seal-arity mismatch error (got {:?})", + verify_result + ); + } + _ => { + panic!("Should be error, got Ok"); + } + } + } - #[test] - fn can_do_difficulty_verification_fail() { - let engine = test_spec().engine; - let mut header: Header = Header::default(); - header.set_seal(vec![rlp::encode(&H256::zero()), rlp::encode(&H64::zero())]); + #[test] + fn can_do_difficulty_verification_fail() { + let engine = test_spec().engine; + let mut header: Header = Header::default(); + header.set_seal(vec![rlp::encode(&H256::zero()), rlp::encode(&H64::zero())]); - let verify_result = engine.verify_block_basic(&header); + let verify_result = engine.verify_block_basic(&header); - match verify_result { - Err(Error(ErrorKind::Block(BlockError::DifficultyOutOfBounds(_)), _)) => {}, - Err(_) => { panic!("should be block difficulty error (got {:?})", verify_result); }, - _ => { panic!("Should be error, got Ok"); }, - } - } + match verify_result { + Err(Error(ErrorKind::Block(BlockError::DifficultyOutOfBounds(_)), _)) => {} + Err(_) => { + panic!("should be block difficulty error (got {:?})", verify_result); + } + _ => { + panic!("Should be error, got Ok"); + } + } + } - #[test] - fn can_do_proof_of_work_verification_fail() { - let engine = test_spec().engine; - let mut header: Header = Header::default(); - header.set_seal(vec![rlp::encode(&H256::zero()), rlp::encode(&H64::zero())]); - header.set_difficulty(U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa").unwrap()); + #[test] + fn can_do_proof_of_work_verification_fail() { + let engine = test_spec().engine; + let mut header: Header = Header::default(); + header.set_seal(vec![rlp::encode(&H256::zero()), rlp::encode(&H64::zero())]); + header.set_difficulty( + U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa") + .unwrap(), + ); - let verify_result = engine.verify_block_basic(&header); + let verify_result = engine.verify_block_basic(&header); - match verify_result { - Err(Error(ErrorKind::Block(BlockError::InvalidProofOfWork(_)), _)) => {}, - Err(_) => { panic!("should be invalid proof of work error (got {:?})", verify_result); }, - _ => { panic!("Should be error, got Ok"); }, - } - } + match verify_result { + Err(Error(ErrorKind::Block(BlockError::InvalidProofOfWork(_)), _)) => {} + Err(_) => { + panic!( + "should be invalid proof of work error (got {:?})", + verify_result + ); + } + _ => { + panic!("Should be error, got Ok"); + } + } + } - #[test] - fn can_do_seal_unordered_verification_fail() { - let engine = test_spec().engine; - let header = Header::default(); + #[test] + fn can_do_seal_unordered_verification_fail() { + let engine = test_spec().engine; + let header = Header::default(); - let verify_result = engine.verify_block_unordered(&header); + let verify_result = engine.verify_block_unordered(&header); - match verify_result { - Err(Error(ErrorKind::Block(BlockError::InvalidSealArity(_)), _)) => {}, - Err(_) => { panic!("should be block seal-arity mismatch error (got {:?})", verify_result); }, - _ => { panic!("Should be error, got Ok"); }, - } - } + match verify_result { + Err(Error(ErrorKind::Block(BlockError::InvalidSealArity(_)), _)) => {} + Err(_) => { + panic!( + "should be block seal-arity mismatch error (got {:?})", + verify_result + ); + } + _ => { + panic!("Should be error, got Ok"); + } + } + } - #[test] - fn can_do_seal_unordered_verification_fail2() { - let engine = test_spec().engine; - let mut header = Header::default(); - header.set_seal(vec![vec![], vec![]]); + #[test] + fn can_do_seal_unordered_verification_fail2() { + let engine = test_spec().engine; + let mut header = Header::default(); + header.set_seal(vec![vec![], vec![]]); - let verify_result = engine.verify_block_unordered(&header); - // rlp error, shouldn't panic - assert!(verify_result.is_err()); - } + let verify_result = engine.verify_block_unordered(&header); + // rlp error, shouldn't panic + assert!(verify_result.is_err()); + } - #[test] - fn can_do_seal256_verification_fail() { - let engine = test_spec().engine; - let mut header: Header = Header::default(); - header.set_seal(vec![rlp::encode(&H256::zero()), rlp::encode(&H64::zero())]); - let verify_result = engine.verify_block_unordered(&header); + #[test] + fn can_do_seal256_verification_fail() { + let engine = test_spec().engine; + let mut header: Header = Header::default(); + header.set_seal(vec![rlp::encode(&H256::zero()), rlp::encode(&H64::zero())]); + let verify_result = engine.verify_block_unordered(&header); - match verify_result { - Err(Error(ErrorKind::Block(BlockError::MismatchedH256SealElement(_)), _)) => {}, - Err(_) => { panic!("should be invalid 256-bit seal fail (got {:?})", verify_result); }, - _ => { panic!("Should be error, got Ok"); }, - } - } + match verify_result { + Err(Error(ErrorKind::Block(BlockError::MismatchedH256SealElement(_)), _)) => {} + Err(_) => { + panic!( + "should be invalid 256-bit seal fail (got {:?})", + verify_result + ); + } + _ => { + panic!("Should be error, got Ok"); + } + } + } - #[test] - fn can_do_proof_of_work_unordered_verification_fail() { - let engine = test_spec().engine; - let mut header: Header = Header::default(); - header.set_seal(vec![rlp::encode(&H256::from("b251bd2e0283d0658f2cadfdc8ca619b5de94eca5742725e2e757dd13ed7503d")), rlp::encode(&H64::zero())]); - header.set_difficulty(U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa").unwrap()); + #[test] + fn can_do_proof_of_work_unordered_verification_fail() { + let engine = test_spec().engine; + let mut header: Header = Header::default(); + header.set_seal(vec![ + rlp::encode(&H256::from( + "b251bd2e0283d0658f2cadfdc8ca619b5de94eca5742725e2e757dd13ed7503d", + )), + rlp::encode(&H64::zero()), + ]); + header.set_difficulty( + U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa") + .unwrap(), + ); - let verify_result = engine.verify_block_unordered(&header); + let verify_result = engine.verify_block_unordered(&header); - match verify_result { - Err(Error(ErrorKind::Block(BlockError::InvalidProofOfWork(_)), _)) => {}, - Err(_) => { panic!("should be invalid proof-of-work fail (got {:?})", verify_result); }, - _ => { panic!("Should be error, got Ok"); }, - } - } + match verify_result { + Err(Error(ErrorKind::Block(BlockError::InvalidProofOfWork(_)), _)) => {} + Err(_) => { + panic!( + "should be invalid proof-of-work fail (got {:?})", + verify_result + ); + } + _ => { + panic!("Should be error, got Ok"); + } + } + } - #[test] - fn can_verify_block_family_genesis_fail() { - let engine = test_spec().engine; - let header: Header = Header::default(); - let parent_header: Header = Header::default(); + #[test] + fn can_verify_block_family_genesis_fail() { + let engine = test_spec().engine; + let header: Header = Header::default(); + let parent_header: Header = Header::default(); - let verify_result = engine.verify_block_family(&header, &parent_header); + let verify_result = engine.verify_block_family(&header, &parent_header); - match verify_result { - Err(Error(ErrorKind::Block(BlockError::RidiculousNumber(_)), _)) => {}, - Err(_) => { panic!("should be invalid block number fail (got {:?})", verify_result); }, - _ => { panic!("Should be error, got Ok"); }, - } - } + match verify_result { + Err(Error(ErrorKind::Block(BlockError::RidiculousNumber(_)), _)) => {} + Err(_) => { + panic!( + "should be invalid block number fail (got {:?})", + verify_result + ); + } + _ => { + panic!("Should be error, got Ok"); + } + } + } - #[test] - fn can_verify_block_family_difficulty_fail() { - let engine = test_spec().engine; - let mut header: Header = Header::default(); - header.set_number(2); - let mut parent_header: Header = Header::default(); - parent_header.set_number(1); + #[test] + fn can_verify_block_family_difficulty_fail() { + let engine = test_spec().engine; + let mut header: Header = Header::default(); + header.set_number(2); + let mut parent_header: Header = Header::default(); + parent_header.set_number(1); - let verify_result = engine.verify_block_family(&header, &parent_header); + let verify_result = engine.verify_block_family(&header, &parent_header); - match verify_result { - Err(Error(ErrorKind::Block(BlockError::InvalidDifficulty(_)), _)) => {}, - Err(_) => { panic!("should be invalid difficulty fail (got {:?})", verify_result); }, - _ => { panic!("Should be error, got Ok"); }, - } - } + match verify_result { + Err(Error(ErrorKind::Block(BlockError::InvalidDifficulty(_)), _)) => {} + Err(_) => { + panic!( + "should be invalid difficulty fail (got {:?})", + verify_result + ); + } + _ => { + panic!("Should be error, got Ok"); + } + } + } - #[test] - fn difficulty_frontier() { - let machine = new_homestead_test_machine(); - let ethparams = get_default_ethash_params(); - let tempdir = TempDir::new("").unwrap(); - let ethash = Ethash::new(tempdir.path(), ethparams, machine, None); + #[test] + fn difficulty_frontier() { + let machine = new_homestead_test_machine(); + let ethparams = get_default_ethash_params(); + let tempdir = TempDir::new("").unwrap(); + let ethash = Ethash::new(tempdir.path(), ethparams, machine, None); - let mut parent_header = Header::default(); - parent_header.set_number(1000000); - parent_header.set_difficulty(U256::from_str("b69de81a22b").unwrap()); - parent_header.set_timestamp(1455404053); - let mut header = Header::default(); - header.set_number(parent_header.number() + 1); - header.set_timestamp(1455404058); + let mut parent_header = Header::default(); + parent_header.set_number(1000000); + parent_header.set_difficulty(U256::from_str("b69de81a22b").unwrap()); + parent_header.set_timestamp(1455404053); + let mut header = Header::default(); + header.set_number(parent_header.number() + 1); + header.set_timestamp(1455404058); - let difficulty = ethash.calculate_difficulty(&header, &parent_header); - assert_eq!(U256::from_str("b6b4bbd735f").unwrap(), difficulty); - } + let difficulty = ethash.calculate_difficulty(&header, &parent_header); + assert_eq!(U256::from_str("b6b4bbd735f").unwrap(), difficulty); + } - #[test] - fn difficulty_homestead() { - let machine = new_homestead_test_machine(); - let ethparams = get_default_ethash_params(); - let tempdir = TempDir::new("").unwrap(); - let ethash = Ethash::new(tempdir.path(), ethparams, machine, None); + #[test] + fn difficulty_homestead() { + let machine = new_homestead_test_machine(); + let ethparams = get_default_ethash_params(); + let tempdir = TempDir::new("").unwrap(); + let ethash = Ethash::new(tempdir.path(), ethparams, machine, None); - let mut parent_header = Header::default(); - parent_header.set_number(1500000); - parent_header.set_difficulty(U256::from_str("1fd0fd70792b").unwrap()); - parent_header.set_timestamp(1463003133); - let mut header = Header::default(); - header.set_number(parent_header.number() + 1); - header.set_timestamp(1463003177); + let mut parent_header = Header::default(); + parent_header.set_number(1500000); + parent_header.set_difficulty(U256::from_str("1fd0fd70792b").unwrap()); + parent_header.set_timestamp(1463003133); + let mut header = Header::default(); + header.set_number(parent_header.number() + 1); + header.set_timestamp(1463003177); - let difficulty = ethash.calculate_difficulty(&header, &parent_header); - assert_eq!(U256::from_str("1fc50f118efe").unwrap(), difficulty); - } + let difficulty = ethash.calculate_difficulty(&header, &parent_header); + assert_eq!(U256::from_str("1fc50f118efe").unwrap(), difficulty); + } - #[test] - fn difficulty_classic_bomb_delay() { - let machine = new_homestead_test_machine(); - let ethparams = EthashParams { - ecip1010_pause_transition: 3000000, - ..get_default_ethash_params() - }; - let tempdir = TempDir::new("").unwrap(); - let ethash = Ethash::new(tempdir.path(), ethparams, machine, None); + #[test] + fn difficulty_classic_bomb_delay() { + let machine = new_homestead_test_machine(); + let ethparams = EthashParams { + ecip1010_pause_transition: 3000000, + ..get_default_ethash_params() + }; + let tempdir = TempDir::new("").unwrap(); + let ethash = Ethash::new(tempdir.path(), ethparams, machine, None); - let mut parent_header = Header::default(); - parent_header.set_number(3500000); - parent_header.set_difficulty(U256::from_str("6F62EAF8D3C").unwrap()); - parent_header.set_timestamp(1452838500); - let mut header = Header::default(); - header.set_number(parent_header.number() + 1); + let mut parent_header = Header::default(); + parent_header.set_number(3500000); + parent_header.set_difficulty(U256::from_str("6F62EAF8D3C").unwrap()); + parent_header.set_timestamp(1452838500); + let mut header = Header::default(); + header.set_number(parent_header.number() + 1); - header.set_timestamp(parent_header.timestamp() + 20); - assert_eq!( - U256::from_str("6F55FE9B74B").unwrap(), - ethash.calculate_difficulty(&header, &parent_header) - ); - header.set_timestamp(parent_header.timestamp() + 5); - assert_eq!( - U256::from_str("6F71D75632D").unwrap(), - ethash.calculate_difficulty(&header, &parent_header) - ); - header.set_timestamp(parent_header.timestamp() + 80); - assert_eq!( - U256::from_str("6F02746B3A5").unwrap(), - ethash.calculate_difficulty(&header, &parent_header) - ); - } + header.set_timestamp(parent_header.timestamp() + 20); + assert_eq!( + U256::from_str("6F55FE9B74B").unwrap(), + ethash.calculate_difficulty(&header, &parent_header) + ); + header.set_timestamp(parent_header.timestamp() + 5); + assert_eq!( + U256::from_str("6F71D75632D").unwrap(), + ethash.calculate_difficulty(&header, &parent_header) + ); + header.set_timestamp(parent_header.timestamp() + 80); + assert_eq!( + U256::from_str("6F02746B3A5").unwrap(), + ethash.calculate_difficulty(&header, &parent_header) + ); + } - #[test] - fn test_difficulty_bomb_continue() { - let machine = new_homestead_test_machine(); - let ethparams = EthashParams { - ecip1010_pause_transition: 3000000, - ecip1010_continue_transition: 5000000, - ..get_default_ethash_params() - }; - let tempdir = TempDir::new("").unwrap(); - let ethash = Ethash::new(tempdir.path(), ethparams, machine, None); + #[test] + fn test_difficulty_bomb_continue() { + let machine = new_homestead_test_machine(); + let ethparams = EthashParams { + ecip1010_pause_transition: 3000000, + ecip1010_continue_transition: 5000000, + ..get_default_ethash_params() + }; + let tempdir = TempDir::new("").unwrap(); + let ethash = Ethash::new(tempdir.path(), ethparams, machine, None); - let mut parent_header = Header::default(); - parent_header.set_number(5000102); - parent_header.set_difficulty(U256::from_str("14944397EE8B").unwrap()); - parent_header.set_timestamp(1513175023); - let mut header = Header::default(); - header.set_number(parent_header.number() + 1); - header.set_timestamp(parent_header.timestamp() + 6); - assert_eq!( - U256::from_str("1496E6206188").unwrap(), - ethash.calculate_difficulty(&header, &parent_header) - ); - parent_header.set_number(5100123); - parent_header.set_difficulty(U256::from_str("14D24B39C7CF").unwrap()); - parent_header.set_timestamp(1514609324); - header.set_number(parent_header.number() + 1); - header.set_timestamp(parent_header.timestamp() + 41); - assert_eq!( - U256::from_str("14CA9C5D9227").unwrap(), - ethash.calculate_difficulty(&header, &parent_header) - ); - parent_header.set_number(6150001); - parent_header.set_difficulty(U256::from_str("305367B57227").unwrap()); - parent_header.set_timestamp(1529664575); - header.set_number(parent_header.number() + 1); - header.set_timestamp(parent_header.timestamp() + 105); - assert_eq!( - U256::from_str("309D09E0C609").unwrap(), - ethash.calculate_difficulty(&header, &parent_header) - ); - parent_header.set_number(8000000); - parent_header.set_difficulty(U256::from_str("1180B36D4CE5B6A").unwrap()); - parent_header.set_timestamp(1535431724); - header.set_number(parent_header.number() + 1); - header.set_timestamp(parent_header.timestamp() + 420); - assert_eq!( - U256::from_str("5126FFD5BCBB9E7").unwrap(), - ethash.calculate_difficulty(&header, &parent_header) - ); - } + let mut parent_header = Header::default(); + parent_header.set_number(5000102); + parent_header.set_difficulty(U256::from_str("14944397EE8B").unwrap()); + parent_header.set_timestamp(1513175023); + let mut header = Header::default(); + header.set_number(parent_header.number() + 1); + header.set_timestamp(parent_header.timestamp() + 6); + assert_eq!( + U256::from_str("1496E6206188").unwrap(), + ethash.calculate_difficulty(&header, &parent_header) + ); + parent_header.set_number(5100123); + parent_header.set_difficulty(U256::from_str("14D24B39C7CF").unwrap()); + parent_header.set_timestamp(1514609324); + header.set_number(parent_header.number() + 1); + header.set_timestamp(parent_header.timestamp() + 41); + assert_eq!( + U256::from_str("14CA9C5D9227").unwrap(), + ethash.calculate_difficulty(&header, &parent_header) + ); + parent_header.set_number(6150001); + parent_header.set_difficulty(U256::from_str("305367B57227").unwrap()); + parent_header.set_timestamp(1529664575); + header.set_number(parent_header.number() + 1); + header.set_timestamp(parent_header.timestamp() + 105); + assert_eq!( + U256::from_str("309D09E0C609").unwrap(), + ethash.calculate_difficulty(&header, &parent_header) + ); + parent_header.set_number(8000000); + parent_header.set_difficulty(U256::from_str("1180B36D4CE5B6A").unwrap()); + parent_header.set_timestamp(1535431724); + header.set_number(parent_header.number() + 1); + header.set_timestamp(parent_header.timestamp() + 420); + assert_eq!( + U256::from_str("5126FFD5BCBB9E7").unwrap(), + ethash.calculate_difficulty(&header, &parent_header) + ); + } - #[test] - fn difficulty_max_timestamp() { - let machine = new_homestead_test_machine(); - let ethparams = get_default_ethash_params(); - let tempdir = TempDir::new("").unwrap(); - let ethash = Ethash::new(tempdir.path(), ethparams, machine, None); + #[test] + fn difficulty_max_timestamp() { + let machine = new_homestead_test_machine(); + let ethparams = get_default_ethash_params(); + let tempdir = TempDir::new("").unwrap(); + let ethash = Ethash::new(tempdir.path(), ethparams, machine, None); - let mut parent_header = Header::default(); - parent_header.set_number(1000000); - parent_header.set_difficulty(U256::from_str("b69de81a22b").unwrap()); - parent_header.set_timestamp(1455404053); - let mut header = Header::default(); - header.set_number(parent_header.number() + 1); - header.set_timestamp(u64::max_value()); + let mut parent_header = Header::default(); + parent_header.set_number(1000000); + parent_header.set_difficulty(U256::from_str("b69de81a22b").unwrap()); + parent_header.set_timestamp(1455404053); + let mut header = Header::default(); + header.set_number(parent_header.number() + 1); + header.set_timestamp(u64::max_value()); - let difficulty = ethash.calculate_difficulty(&header, &parent_header); - assert_eq!(U256::from(12543204905719u64), difficulty); - } + let difficulty = ethash.calculate_difficulty(&header, &parent_header); + assert_eq!(U256::from(12543204905719u64), difficulty); + } - #[test] - fn test_extra_info() { - let machine = new_homestead_test_machine(); - let ethparams = get_default_ethash_params(); - let tempdir = TempDir::new("").unwrap(); - let ethash = Ethash::new(tempdir.path(), ethparams, machine, None); - let mut header = Header::default(); - header.set_seal(vec![rlp::encode(&H256::from("b251bd2e0283d0658f2cadfdc8ca619b5de94eca5742725e2e757dd13ed7503d")), rlp::encode(&H64::zero())]); - let info = ethash.extra_info(&header); - assert_eq!(info["nonce"], "0x0000000000000000"); - assert_eq!(info["mixHash"], "0xb251bd2e0283d0658f2cadfdc8ca619b5de94eca5742725e2e757dd13ed7503d"); - } + #[test] + fn test_extra_info() { + let machine = new_homestead_test_machine(); + let ethparams = get_default_ethash_params(); + let tempdir = TempDir::new("").unwrap(); + let ethash = Ethash::new(tempdir.path(), ethparams, machine, None); + let mut header = Header::default(); + header.set_seal(vec![ + rlp::encode(&H256::from( + "b251bd2e0283d0658f2cadfdc8ca619b5de94eca5742725e2e757dd13ed7503d", + )), + rlp::encode(&H64::zero()), + ]); + let info = ethash.extra_info(&header); + assert_eq!(info["nonce"], "0x0000000000000000"); + assert_eq!( + info["mixHash"], + "0xb251bd2e0283d0658f2cadfdc8ca619b5de94eca5742725e2e757dd13ed7503d" + ); + } } diff --git a/ethcore/src/ethereum/mod.rs b/ethcore/src/ethereum/mod.rs index 5ba9c2948..46d834d36 100644 --- a/ethcore/src/ethereum/mod.rs +++ b/ethcore/src/ethereum/mod.rs @@ -19,236 +19,398 @@ //! Contains all Ethereum network specific stuff, such as denominations and //! consensus specifications. -/// Export the ethash module. -pub mod ethash; /// Export the denominations module. pub mod denominations; +/// Export the ethash module. +pub mod ethash; -pub use self::ethash::{Ethash}; -pub use self::denominations::*; +pub use self::{denominations::*, ethash::Ethash}; -use machine::EthereumMachine; use super::spec::*; +use machine::EthereumMachine; /// Load chain spec from `SpecParams` and JSON. pub fn load<'a, T: Into>>>(params: T, b: &[u8]) -> Spec { - match params.into() { - Some(params) => Spec::load(params, b), - None => Spec::load(&::std::env::temp_dir(), b) - }.expect("chain spec is invalid") + match params.into() { + Some(params) => Spec::load(params, b), + None => Spec::load(&::std::env::temp_dir(), b), + } + .expect("chain spec is invalid") } fn load_machine(b: &[u8]) -> EthereumMachine { - Spec::load_machine(b).expect("chain spec is invalid") + Spec::load_machine(b).expect("chain spec is invalid") } /// Create a new Foundation mainnet chain spec. pub fn new_foundation<'a, T: Into>>(params: T) -> Spec { - load(params.into(), include_bytes!("../../res/ethereum/foundation.json")) + load( + params.into(), + include_bytes!("../../res/ethereum/foundation.json"), + ) } /// Create a new Classic mainnet chain spec without the DAO hardfork. pub fn new_classic<'a, T: Into>>(params: T) -> Spec { - load(params.into(), include_bytes!("../../res/ethereum/classic.json")) + load( + params.into(), + include_bytes!("../../res/ethereum/classic.json"), + ) } /// Create a new POA Network mainnet chain spec. pub fn new_poanet<'a, T: Into>>(params: T) -> Spec { - load(params.into(), include_bytes!("../../res/ethereum/poacore.json")) + load( + params.into(), + include_bytes!("../../res/ethereum/poacore.json"), + ) } /// Create a new xDai chain spec. pub fn new_xdai<'a, T: Into>>(params: T) -> Spec { - load(params.into(), include_bytes!("../../res/ethereum/xdai.json")) + load( + params.into(), + include_bytes!("../../res/ethereum/xdai.json"), + ) } /// Create a new Volta mainnet chain spec. pub fn new_volta<'a, T: Into>>(params: T) -> Spec { - load(params.into(), include_bytes!("../../res/ethereum/volta.json")) + load( + params.into(), + include_bytes!("../../res/ethereum/volta.json"), + ) } /// Create a new EWC mainnet chain spec. pub fn new_ewc<'a, T: Into>>(params: T) -> Spec { - load(params.into(), include_bytes!("../../res/ethereum/ewc.json")) + load(params.into(), include_bytes!("../../res/ethereum/ewc.json")) } /// Create a new Expanse mainnet chain spec. pub fn new_expanse<'a, T: Into>>(params: T) -> Spec { - load(params.into(), include_bytes!("../../res/ethereum/expanse.json")) + load( + params.into(), + include_bytes!("../../res/ethereum/expanse.json"), + ) } /// Create a new Musicoin mainnet chain spec. pub fn new_musicoin<'a, T: Into>>(params: T) -> Spec { - // The musicoin chain spec uses a block reward contract which can be found at - // https://gist.github.com/andresilva/6f2afaf9486732a0797f4bdeae018ee9 - load(params.into(), include_bytes!("../../res/ethereum/musicoin.json")) + // The musicoin chain spec uses a block reward contract which can be found at + // https://gist.github.com/andresilva/6f2afaf9486732a0797f4bdeae018ee9 + load( + params.into(), + include_bytes!("../../res/ethereum/musicoin.json"), + ) } /// Create a new Ellaism mainnet chain spec. pub fn new_ellaism<'a, T: Into>>(params: T) -> Spec { - load(params.into(), include_bytes!("../../res/ethereum/ellaism.json")) + load( + params.into(), + include_bytes!("../../res/ethereum/ellaism.json"), + ) } /// Create a new MIX mainnet chain spec. pub fn new_mix<'a, T: Into>>(params: T) -> Spec { - load(params.into(), include_bytes!("../../res/ethereum/mix.json")) + load(params.into(), include_bytes!("../../res/ethereum/mix.json")) } /// Create a new Callisto chain spec pub fn new_callisto<'a, T: Into>>(params: T) -> Spec { - load(params.into(), include_bytes!("../../res/ethereum/callisto.json")) + load( + params.into(), + include_bytes!("../../res/ethereum/callisto.json"), + ) } /// Create a new Morden testnet chain spec. pub fn new_morden<'a, T: Into>>(params: T) -> Spec { - load(params.into(), include_bytes!("../../res/ethereum/morden.json")) + load( + params.into(), + include_bytes!("../../res/ethereum/morden.json"), + ) } /// Create a new Ropsten testnet chain spec. pub fn new_ropsten<'a, T: Into>>(params: T) -> Spec { - load(params.into(), include_bytes!("../../res/ethereum/ropsten.json")) + load( + params.into(), + include_bytes!("../../res/ethereum/ropsten.json"), + ) } /// Create a new Kovan testnet chain spec. pub fn new_kovan<'a, T: Into>>(params: T) -> Spec { - load(params.into(), include_bytes!("../../res/ethereum/kovan.json")) + load( + params.into(), + include_bytes!("../../res/ethereum/kovan.json"), + ) } /// Create a new Rinkeby testnet chain spec. pub fn new_rinkeby<'a, T: Into>>(params: T) -> Spec { - load(params.into(), include_bytes!("../../res/ethereum/rinkeby.json")) + load( + params.into(), + include_bytes!("../../res/ethereum/rinkeby.json"), + ) } /// Create a new Görli testnet chain spec. pub fn new_goerli<'a, T: Into>>(params: T) -> Spec { - load(params.into(), include_bytes!("../../res/ethereum/goerli.json")) + load( + params.into(), + include_bytes!("../../res/ethereum/goerli.json"), + ) } /// Create a new Kotti testnet chain spec. pub fn new_kotti<'a, T: Into>>(params: T) -> Spec { - load(params.into(), include_bytes!("../../res/ethereum/kotti.json")) + load( + params.into(), + include_bytes!("../../res/ethereum/kotti.json"), + ) } /// Create a new POA Sokol testnet chain spec. pub fn new_sokol<'a, T: Into>>(params: T) -> Spec { - load(params.into(), include_bytes!("../../res/ethereum/poasokol.json")) + load( + params.into(), + include_bytes!("../../res/ethereum/poasokol.json"), + ) } /// Create a new Morodor testnet chain spec. pub fn new_mordor<'a, T: Into>>(params: T) -> Spec { - load(params.into(), include_bytes!("../../res/ethereum/mordor.json")) + load( + params.into(), + include_bytes!("../../res/ethereum/mordor.json"), + ) } // For tests /// Create a new Foundation Frontier-era chain spec as though it never changes to Homestead. -pub fn new_frontier_test() -> Spec { load(None, include_bytes!("../../res/ethereum/frontier_test.json")) } +pub fn new_frontier_test() -> Spec { + load( + None, + include_bytes!("../../res/ethereum/frontier_test.json"), + ) +} /// Create a new Ropsten chain spec. -pub fn new_ropsten_test() -> Spec { load(None, include_bytes!("../../res/ethereum/ropsten.json")) } +pub fn new_ropsten_test() -> Spec { + load(None, include_bytes!("../../res/ethereum/ropsten.json")) +} /// Create a new Foundation Homestead-era chain spec as though it never changed from Frontier. -pub fn new_homestead_test() -> Spec { load(None, include_bytes!("../../res/ethereum/homestead_test.json")) } +pub fn new_homestead_test() -> Spec { + load( + None, + include_bytes!("../../res/ethereum/homestead_test.json"), + ) +} /// Create a new Foundation Homestead-EIP150-era chain spec as though it never changed from Homestead/Frontier. -pub fn new_eip150_test() -> Spec { load(None, include_bytes!("../../res/ethereum/eip150_test.json")) } +pub fn new_eip150_test() -> Spec { + load(None, include_bytes!("../../res/ethereum/eip150_test.json")) +} /// Create a new Foundation Homestead-EIP161-era chain spec as though it never changed from Homestead/Frontier. -pub fn new_eip161_test() -> Spec { load(None, include_bytes!("../../res/ethereum/eip161_test.json")) } +pub fn new_eip161_test() -> Spec { + load(None, include_bytes!("../../res/ethereum/eip161_test.json")) +} /// Create a new Foundation Frontier/Homestead/DAO chain spec with transition points at #5 and #8. -pub fn new_transition_test() -> Spec { load(None, include_bytes!("../../res/ethereum/transition_test.json")) } +pub fn new_transition_test() -> Spec { + load( + None, + include_bytes!("../../res/ethereum/transition_test.json"), + ) +} /// Create a new Foundation Mainnet chain spec without genesis accounts. -pub fn new_mainnet_like() -> Spec { load(None, include_bytes!("../../res/ethereum/frontier_like_test.json")) } +pub fn new_mainnet_like() -> Spec { + load( + None, + include_bytes!("../../res/ethereum/frontier_like_test.json"), + ) +} /// Create a new Foundation Byzantium era spec. -pub fn new_byzantium_test() -> Spec { load(None, include_bytes!("../../res/ethereum/byzantium_test.json")) } +pub fn new_byzantium_test() -> Spec { + load( + None, + include_bytes!("../../res/ethereum/byzantium_test.json"), + ) +} /// Create a new Foundation Constantinople era spec. -pub fn new_constantinople_test() -> Spec { load(None, include_bytes!("../../res/ethereum/constantinople_test.json")) } +pub fn new_constantinople_test() -> Spec { + load( + None, + include_bytes!("../../res/ethereum/constantinople_test.json"), + ) +} /// Create a new Foundation St. Peter's (Contantinople Fix) era spec. -pub fn new_constantinople_fix_test() -> Spec { load(None, include_bytes!("../../res/ethereum/st_peters_test.json")) } +pub fn new_constantinople_fix_test() -> Spec { + load( + None, + include_bytes!("../../res/ethereum/st_peters_test.json"), + ) +} /// Create a new Foundation Istanbul era spec. -pub fn new_istanbul_test() -> Spec { load(None, include_bytes!("../../res/ethereum/istanbul_test.json")) } +pub fn new_istanbul_test() -> Spec { + load( + None, + include_bytes!("../../res/ethereum/istanbul_test.json"), + ) +} /// Create a new Musicoin-MCIP3-era spec. -pub fn new_mcip3_test() -> Spec { load(None, include_bytes!("../../res/ethereum/mcip3_test.json")) } +pub fn new_mcip3_test() -> Spec { + load(None, include_bytes!("../../res/ethereum/mcip3_test.json")) +} // For tests /// Create a new Foundation Frontier-era chain spec as though it never changes to Homestead. -pub fn new_frontier_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/frontier_test.json")) } +pub fn new_frontier_test_machine() -> EthereumMachine { + load_machine(include_bytes!("../../res/ethereum/frontier_test.json")) +} /// Create a new Foundation Homestead-era chain spec as though it never changed from Frontier. -pub fn new_homestead_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/homestead_test.json")) } +pub fn new_homestead_test_machine() -> EthereumMachine { + load_machine(include_bytes!("../../res/ethereum/homestead_test.json")) +} /// Create a new Foundation Homestead-EIP210-era chain spec as though it never changed from Homestead/Frontier. -pub fn new_eip210_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/eip210_test.json")) } +pub fn new_eip210_test_machine() -> EthereumMachine { + load_machine(include_bytes!("../../res/ethereum/eip210_test.json")) +} /// Create a new Foundation Byzantium era spec. -pub fn new_byzantium_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/byzantium_test.json")) } +pub fn new_byzantium_test_machine() -> EthereumMachine { + load_machine(include_bytes!("../../res/ethereum/byzantium_test.json")) +} /// Create a new Foundation Constantinople era spec. -pub fn new_constantinople_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/constantinople_test.json")) } +pub fn new_constantinople_test_machine() -> EthereumMachine { + load_machine(include_bytes!( + "../../res/ethereum/constantinople_test.json" + )) +} /// Create a new Foundation St. Peter's (Contantinople Fix) era spec. -pub fn new_constantinople_fix_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/st_peters_test.json")) } +pub fn new_constantinople_fix_test_machine() -> EthereumMachine { + load_machine(include_bytes!("../../res/ethereum/st_peters_test.json")) +} /// Create a new Foundation Istanbul era spec. -pub fn new_istanbul_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/istanbul_test.json")) } +pub fn new_istanbul_test_machine() -> EthereumMachine { + load_machine(include_bytes!("../../res/ethereum/istanbul_test.json")) +} /// Create a new Musicoin-MCIP3-era spec. -pub fn new_mcip3_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/mcip3_test.json")) } +pub fn new_mcip3_test_machine() -> EthereumMachine { + load_machine(include_bytes!("../../res/ethereum/mcip3_test.json")) +} /// Create new Kovan spec with wasm activated at certain block -pub fn new_kovan_wasm_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/kovan_wasm_test.json")) } +pub fn new_kovan_wasm_test_machine() -> EthereumMachine { + load_machine(include_bytes!("../../res/ethereum/kovan_wasm_test.json")) +} #[cfg(test)] mod tests { - use ethereum_types::U256; - use state::*; - use super::*; - use test_helpers::get_temp_state_db; - use types::view; - use types::views::BlockView; + use super::*; + use ethereum_types::U256; + use state::*; + use test_helpers::get_temp_state_db; + use types::{view, views::BlockView}; - #[test] - fn ensure_db_good() { - let spec = new_morden(&::std::env::temp_dir()); - let engine = &spec.engine; - let genesis_header = spec.genesis_header(); - let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let s = State::from_existing(db, genesis_header.state_root().clone(), engine.account_start_nonce(0), Default::default()).unwrap(); - assert_eq!(s.balance(&"0000000000000000000000000000000000000001".into()).unwrap(), 1u64.into()); - assert_eq!(s.balance(&"0000000000000000000000000000000000000002".into()).unwrap(), 1u64.into()); - assert_eq!(s.balance(&"0000000000000000000000000000000000000003".into()).unwrap(), 1u64.into()); - assert_eq!(s.balance(&"0000000000000000000000000000000000000004".into()).unwrap(), 1u64.into()); - assert_eq!(s.balance(&"102e61f5d8f9bc71d0ad4a084df4e65e05ce0e1c".into()).unwrap(), U256::from(1u64) << 200); - assert_eq!(s.balance(&"0000000000000000000000000000000000000000".into()).unwrap(), 0u64.into()); - } + #[test] + fn ensure_db_good() { + let spec = new_morden(&::std::env::temp_dir()); + let engine = &spec.engine; + let genesis_header = spec.genesis_header(); + let db = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let s = State::from_existing( + db, + genesis_header.state_root().clone(), + engine.account_start_nonce(0), + Default::default(), + ) + .unwrap(); + assert_eq!( + s.balance(&"0000000000000000000000000000000000000001".into()) + .unwrap(), + 1u64.into() + ); + assert_eq!( + s.balance(&"0000000000000000000000000000000000000002".into()) + .unwrap(), + 1u64.into() + ); + assert_eq!( + s.balance(&"0000000000000000000000000000000000000003".into()) + .unwrap(), + 1u64.into() + ); + assert_eq!( + s.balance(&"0000000000000000000000000000000000000004".into()) + .unwrap(), + 1u64.into() + ); + assert_eq!( + s.balance(&"102e61f5d8f9bc71d0ad4a084df4e65e05ce0e1c".into()) + .unwrap(), + U256::from(1u64) << 200 + ); + assert_eq!( + s.balance(&"0000000000000000000000000000000000000000".into()) + .unwrap(), + 0u64.into() + ); + } - #[test] - fn morden() { - let morden = new_morden(&::std::env::temp_dir()); + #[test] + fn morden() { + let morden = new_morden(&::std::env::temp_dir()); - assert_eq!(morden.state_root(), "f3f4696bbf3b3b07775128eb7a3763279a394e382130f27c21e70233e04946a9".into()); - let genesis = morden.genesis_block(); - assert_eq!(view!(BlockView, &genesis).header_view().hash(), "0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303".into()); + assert_eq!( + morden.state_root(), + "f3f4696bbf3b3b07775128eb7a3763279a394e382130f27c21e70233e04946a9".into() + ); + let genesis = morden.genesis_block(); + assert_eq!( + view!(BlockView, &genesis).header_view().hash(), + "0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303".into() + ); - let _ = morden.engine; - } + let _ = morden.engine; + } - #[test] - fn frontier() { - let frontier = new_foundation(&::std::env::temp_dir()); + #[test] + fn frontier() { + let frontier = new_foundation(&::std::env::temp_dir()); - assert_eq!(frontier.state_root(), "d7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544".into()); - let genesis = frontier.genesis_block(); - assert_eq!(view!(BlockView, &genesis).header_view().hash(), "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3".into()); + assert_eq!( + frontier.state_root(), + "d7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544".into() + ); + let genesis = frontier.genesis_block(); + assert_eq!( + view!(BlockView, &genesis).header_view().hash(), + "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3".into() + ); - let _ = frontier.engine; - } + let _ = frontier.engine; + } } diff --git a/ethcore/src/executed.rs b/ethcore/src/executed.rs index 10e06fd05..ea5e24341 100644 --- a/ethcore/src/executed.rs +++ b/ethcore/src/executed.rs @@ -16,184 +16,204 @@ //! Transaction execution format module. -use ethereum_types::{U256, U512, Address}; use bytes::Bytes; +use ethereum_types::{Address, U256, U512}; use ethtrie; +use trace::{FlatTrace, VMTrace}; +use types::{log_entry::LogEntry, state_diff::StateDiff}; use vm; -use trace::{VMTrace, FlatTrace}; -use types::state_diff::StateDiff; -use types::log_entry::LogEntry; -use std::{fmt, error}; +use std::{error, fmt}; /// Transaction execution receipt. #[derive(Debug, PartialEq, Clone)] pub struct Executed { - /// True if the outer call/create resulted in an exceptional exit. - pub exception: Option, + /// True if the outer call/create resulted in an exceptional exit. + pub exception: Option, - /// Gas paid up front for execution of transaction. - pub gas: U256, + /// Gas paid up front for execution of transaction. + pub gas: U256, - /// Gas used during execution of transaction. - pub gas_used: U256, + /// Gas used during execution of transaction. + pub gas_used: U256, - /// Gas refunded after the execution of transaction. - /// To get gas that was required up front, add `refunded` and `gas_used`. - pub refunded: U256, + /// Gas refunded after the execution of transaction. + /// To get gas that was required up front, add `refunded` and `gas_used`. + pub refunded: U256, - /// Cumulative gas used in current block so far. - /// - /// `cumulative_gas_used = gas_used(t0) + gas_used(t1) + ... gas_used(tn)` - /// - /// where `tn` is current transaction. - pub cumulative_gas_used: U256, + /// Cumulative gas used in current block so far. + /// + /// `cumulative_gas_used = gas_used(t0) + gas_used(t1) + ... gas_used(tn)` + /// + /// where `tn` is current transaction. + pub cumulative_gas_used: U256, - /// Vector of logs generated by transaction. - pub logs: Vec, + /// Vector of logs generated by transaction. + pub logs: Vec, - /// Addresses of contracts created during execution of transaction. - /// Ordered from earliest creation. - /// - /// eg. sender creates contract A and A in constructor creates contract B - /// - /// B creation ends first, and it will be the first element of the vector. - pub contracts_created: Vec
, - /// Transaction output. - pub output: Bytes, - /// The trace of this transaction. - pub trace: Vec, - /// The VM trace of this transaction. - pub vm_trace: Option, - /// The state diff, if we traced it. - pub state_diff: Option, + /// Addresses of contracts created during execution of transaction. + /// Ordered from earliest creation. + /// + /// eg. sender creates contract A and A in constructor creates contract B + /// + /// B creation ends first, and it will be the first element of the vector. + pub contracts_created: Vec
, + /// Transaction output. + pub output: Bytes, + /// The trace of this transaction. + pub trace: Vec, + /// The VM trace of this transaction. + pub vm_trace: Option, + /// The state diff, if we traced it. + pub state_diff: Option, } /// Result of executing the transaction. #[derive(PartialEq, Debug, Clone)] pub enum ExecutionError { - /// Returned when there gas paid for transaction execution is - /// lower than base gas required. - NotEnoughBaseGas { - /// Absolute minimum gas required. - required: U256, - /// Gas provided. - got: U256 - }, - /// Returned when block (gas_used + gas) > gas_limit. - /// - /// If gas =< gas_limit, upstream may try to execute the transaction - /// in next block. - BlockGasLimitReached { - /// Gas limit of block for transaction. - gas_limit: U256, - /// Gas used in block prior to transaction. - gas_used: U256, - /// Amount of gas in block. - gas: U256 - }, - /// Returned when transaction nonce does not match state nonce. - InvalidNonce { - /// Nonce expected. - expected: U256, - /// Nonce found. - got: U256 - }, - /// Returned when cost of transaction (value + gas_price * gas) exceeds - /// current sender balance. - NotEnoughCash { - /// Minimum required balance. - required: U512, - /// Actual balance. - got: U512 - }, - /// When execution tries to modify the state in static context - MutableCallInStaticContext, - /// Returned when transacting from a non-existing account with dust protection enabled. - SenderMustExist, - /// Returned when internal evm error occurs. - Internal(String), - /// Returned when generic transaction occurs - TransactionMalformed(String), + /// Returned when there gas paid for transaction execution is + /// lower than base gas required. + NotEnoughBaseGas { + /// Absolute minimum gas required. + required: U256, + /// Gas provided. + got: U256, + }, + /// Returned when block (gas_used + gas) > gas_limit. + /// + /// If gas =< gas_limit, upstream may try to execute the transaction + /// in next block. + BlockGasLimitReached { + /// Gas limit of block for transaction. + gas_limit: U256, + /// Gas used in block prior to transaction. + gas_used: U256, + /// Amount of gas in block. + gas: U256, + }, + /// Returned when transaction nonce does not match state nonce. + InvalidNonce { + /// Nonce expected. + expected: U256, + /// Nonce found. + got: U256, + }, + /// Returned when cost of transaction (value + gas_price * gas) exceeds + /// current sender balance. + NotEnoughCash { + /// Minimum required balance. + required: U512, + /// Actual balance. + got: U512, + }, + /// When execution tries to modify the state in static context + MutableCallInStaticContext, + /// Returned when transacting from a non-existing account with dust protection enabled. + SenderMustExist, + /// Returned when internal evm error occurs. + Internal(String), + /// Returned when generic transaction occurs + TransactionMalformed(String), } impl From> for ExecutionError { - fn from(err: Box) -> Self { - ExecutionError::Internal(format!("{:?}", err)) - } + fn from(err: Box) -> Self { + ExecutionError::Internal(format!("{:?}", err)) + } } impl From for ExecutionError { - fn from(err: ethtrie::TrieError) -> Self { - ExecutionError::Internal(format!("{:?}", err)) - } + fn from(err: ethtrie::TrieError) -> Self { + ExecutionError::Internal(format!("{:?}", err)) + } } impl fmt::Display for ExecutionError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use self::ExecutionError::*; + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::ExecutionError::*; - let msg = match *self { - NotEnoughBaseGas { ref required, ref got } => - format!("Not enough base gas. {} is required, but only {} paid", required, got), - BlockGasLimitReached { ref gas_limit, ref gas_used, ref gas } => - format!("Block gas limit reached. The limit is {}, {} has \ - already been used, and {} more is required", gas_limit, gas_used, gas), - InvalidNonce { ref expected, ref got } => - format!("Invalid transaction nonce: expected {}, found {}", expected, got), - NotEnoughCash { ref required, ref got } => - format!("Cost of transaction exceeds sender balance. {} is required \ - but the sender only has {}", required, got), - MutableCallInStaticContext => "Mutable Call in static context".to_owned(), - SenderMustExist => "Transacting from an empty account".to_owned(), - Internal(ref msg) => msg.clone(), - TransactionMalformed(ref err) => format!("Malformed transaction: {}", err), - }; + let msg = match *self { + NotEnoughBaseGas { + ref required, + ref got, + } => format!( + "Not enough base gas. {} is required, but only {} paid", + required, got + ), + BlockGasLimitReached { + ref gas_limit, + ref gas_used, + ref gas, + } => format!( + "Block gas limit reached. The limit is {}, {} has \ + already been used, and {} more is required", + gas_limit, gas_used, gas + ), + InvalidNonce { + ref expected, + ref got, + } => format!( + "Invalid transaction nonce: expected {}, found {}", + expected, got + ), + NotEnoughCash { + ref required, + ref got, + } => format!( + "Cost of transaction exceeds sender balance. {} is required \ + but the sender only has {}", + required, got + ), + MutableCallInStaticContext => "Mutable Call in static context".to_owned(), + SenderMustExist => "Transacting from an empty account".to_owned(), + Internal(ref msg) => msg.clone(), + TransactionMalformed(ref err) => format!("Malformed transaction: {}", err), + }; - f.write_fmt(format_args!("Transaction execution error ({}).", msg)) - } + f.write_fmt(format_args!("Transaction execution error ({}).", msg)) + } } impl error::Error for ExecutionError { - fn description(&self) -> &str { - "Transaction execution error" - } + fn description(&self) -> &str { + "Transaction execution error" + } } /// Result of executing the transaction. #[derive(PartialEq, Debug, Clone)] pub enum CallError { - /// Couldn't find the transaction in the chain. - TransactionNotFound, - /// Couldn't find requested block's state in the chain. - StatePruned, - /// Couldn't find an amount of gas that didn't result in an exception. - Exceptional(vm::Error), - /// Corrupt state. - StateCorrupt, - /// Error executing. - Execution(ExecutionError), + /// Couldn't find the transaction in the chain. + TransactionNotFound, + /// Couldn't find requested block's state in the chain. + StatePruned, + /// Couldn't find an amount of gas that didn't result in an exception. + Exceptional(vm::Error), + /// Corrupt state. + StateCorrupt, + /// Error executing. + Execution(ExecutionError), } impl From for CallError { - fn from(error: ExecutionError) -> Self { - CallError::Execution(error) - } + fn from(error: ExecutionError) -> Self { + CallError::Execution(error) + } } impl fmt::Display for CallError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use self::CallError::*; + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::CallError::*; - let msg = match *self { - TransactionNotFound => "Transaction couldn't be found in the chain".into(), - StatePruned => "Couldn't find the transaction block's state in the chain".into(), - Exceptional(ref e) => format!("An exception ({}) happened in the execution", e), - StateCorrupt => "Stored state found to be corrupted.".into(), - Execution(ref e) => format!("{}", e), - }; + let msg = match *self { + TransactionNotFound => "Transaction couldn't be found in the chain".into(), + StatePruned => "Couldn't find the transaction block's state in the chain".into(), + Exceptional(ref e) => format!("An exception ({}) happened in the execution", e), + StateCorrupt => "Stored state found to be corrupted.".into(), + Execution(ref e) => format!("{}", e), + }; - f.write_fmt(format_args!("Transaction execution error ({}).", msg)) - } + f.write_fmt(format_args!("Transaction execution error ({}).", msg)) + } } /// Transaction execution result. diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index 17c86bc46..1731e630e 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -15,26 +15,25 @@ // along with Parity Ethereum. If not, see . //! Transaction Execution environment. -use std::cmp; -use std::sync::Arc; -use hash::keccak; -use ethereum_types::{H256, U256, U512, Address}; use bytes::{Bytes, BytesRef}; -use state::{Backend as StateBackend, State, Substate, CleanupMode}; -use executed::ExecutionError; -use machine::EthereumMachine as Machine; -use evm::{CallType, Finalize, FinalizationResult}; -use vm::{ - self, EnvInfo, CreateContractAddress, ReturnData, CleanDustMode, ActionParams, - ActionValue, Schedule, TrapError, ResumeCall, ResumeCreate -}; -use factory::VmFactory; -use externalities::*; -use trace::{self, Tracer, VMTracer}; -use types::transaction::{Action, SignedTransaction}; -use transaction_ext::Transaction; use crossbeam_utils::thread; +use ethereum_types::{Address, H256, U256, U512}; +use evm::{CallType, FinalizationResult, Finalize}; +use executed::ExecutionError; pub use executed::{Executed, ExecutionResult}; +use externalities::*; +use factory::VmFactory; +use hash::keccak; +use machine::EthereumMachine as Machine; +use state::{Backend as StateBackend, CleanupMode, State, Substate}; +use std::{cmp, sync::Arc}; +use trace::{self, Tracer, VMTracer}; +use transaction_ext::Transaction; +use types::transaction::{Action, SignedTransaction}; +use vm::{ + self, ActionParams, ActionValue, CleanDustMode, CreateContractAddress, EnvInfo, ResumeCall, + ResumeCreate, ReturnData, Schedule, TrapError, +}; #[cfg(debug_assertions)] /// Roughly estimate what stack size each level of evm depth will use. (Debug build) @@ -53,564 +52,825 @@ const STACK_SIZE_ENTRY_OVERHEAD: usize = 100 * 1024; const STACK_SIZE_ENTRY_OVERHEAD: usize = 20 * 1024; /// Returns new address created from address, nonce, and code hash -pub fn contract_address(address_scheme: CreateContractAddress, sender: &Address, nonce: &U256, code: &[u8]) -> (Address, Option) { - use rlp::RlpStream; +pub fn contract_address( + address_scheme: CreateContractAddress, + sender: &Address, + nonce: &U256, + code: &[u8], +) -> (Address, Option) { + use rlp::RlpStream; - match address_scheme { - CreateContractAddress::FromSenderAndNonce => { - let mut stream = RlpStream::new_list(2); - stream.append(sender); - stream.append(nonce); - (From::from(keccak(stream.as_raw())), None) - }, - CreateContractAddress::FromSenderSaltAndCodeHash(salt) => { - let code_hash = keccak(code); - let mut buffer = [0u8; 1 + 20 + 32 + 32]; - buffer[0] = 0xff; - &mut buffer[1..(1+20)].copy_from_slice(&sender[..]); - &mut buffer[(1+20)..(1+20+32)].copy_from_slice(&salt[..]); - &mut buffer[(1+20+32)..].copy_from_slice(&code_hash[..]); - (From::from(keccak(&buffer[..])), Some(code_hash)) - }, - CreateContractAddress::FromSenderAndCodeHash => { - let code_hash = keccak(code); - let mut buffer = [0u8; 20 + 32]; - &mut buffer[..20].copy_from_slice(&sender[..]); - &mut buffer[20..].copy_from_slice(&code_hash[..]); - (From::from(keccak(&buffer[..])), Some(code_hash)) - }, - } + match address_scheme { + CreateContractAddress::FromSenderAndNonce => { + let mut stream = RlpStream::new_list(2); + stream.append(sender); + stream.append(nonce); + (From::from(keccak(stream.as_raw())), None) + } + CreateContractAddress::FromSenderSaltAndCodeHash(salt) => { + let code_hash = keccak(code); + let mut buffer = [0u8; 1 + 20 + 32 + 32]; + buffer[0] = 0xff; + &mut buffer[1..(1 + 20)].copy_from_slice(&sender[..]); + &mut buffer[(1 + 20)..(1 + 20 + 32)].copy_from_slice(&salt[..]); + &mut buffer[(1 + 20 + 32)..].copy_from_slice(&code_hash[..]); + (From::from(keccak(&buffer[..])), Some(code_hash)) + } + CreateContractAddress::FromSenderAndCodeHash => { + let code_hash = keccak(code); + let mut buffer = [0u8; 20 + 32]; + &mut buffer[..20].copy_from_slice(&sender[..]); + &mut buffer[20..].copy_from_slice(&code_hash[..]); + (From::from(keccak(&buffer[..])), Some(code_hash)) + } + } } /// Convert a finalization result into a VM message call result. pub fn into_message_call_result(result: vm::Result) -> vm::MessageCallResult { - match result { - Ok(FinalizationResult { gas_left, return_data, apply_state: true }) => vm::MessageCallResult::Success(gas_left, return_data), - Ok(FinalizationResult { gas_left, return_data, apply_state: false }) => vm::MessageCallResult::Reverted(gas_left, return_data), - _ => vm::MessageCallResult::Failed - } + match result { + Ok(FinalizationResult { + gas_left, + return_data, + apply_state: true, + }) => vm::MessageCallResult::Success(gas_left, return_data), + Ok(FinalizationResult { + gas_left, + return_data, + apply_state: false, + }) => vm::MessageCallResult::Reverted(gas_left, return_data), + _ => vm::MessageCallResult::Failed, + } } /// Convert a finalization result into a VM contract create result. -pub fn into_contract_create_result(result: vm::Result, address: &Address, substate: &mut Substate) -> vm::ContractCreateResult { - match result { - Ok(FinalizationResult { gas_left, apply_state: true, .. }) => { - substate.contracts_created.push(address.clone()); - vm::ContractCreateResult::Created(address.clone(), gas_left) - }, - Ok(FinalizationResult { gas_left, apply_state: false, return_data }) => { - vm::ContractCreateResult::Reverted(gas_left, return_data) - }, - _ => vm::ContractCreateResult::Failed, - } +pub fn into_contract_create_result( + result: vm::Result, + address: &Address, + substate: &mut Substate, +) -> vm::ContractCreateResult { + match result { + Ok(FinalizationResult { + gas_left, + apply_state: true, + .. + }) => { + substate.contracts_created.push(address.clone()); + vm::ContractCreateResult::Created(address.clone(), gas_left) + } + Ok(FinalizationResult { + gas_left, + apply_state: false, + return_data, + }) => vm::ContractCreateResult::Reverted(gas_left, return_data), + _ => vm::ContractCreateResult::Failed, + } } /// Transaction execution options. #[derive(Copy, Clone, PartialEq)] pub struct TransactOptions { - /// Enable call tracing. - pub tracer: T, - /// Enable VM tracing. - pub vm_tracer: V, - /// Check transaction nonce before execution. - pub check_nonce: bool, - /// Records the output from init contract calls. - pub output_from_init_contract: bool, + /// Enable call tracing. + pub tracer: T, + /// Enable VM tracing. + pub vm_tracer: V, + /// Check transaction nonce before execution. + pub check_nonce: bool, + /// Records the output from init contract calls. + pub output_from_init_contract: bool, } impl TransactOptions { - /// Create new `TransactOptions` with given tracer and VM tracer. - pub fn new(tracer: T, vm_tracer: V) -> Self { - TransactOptions { - tracer, - vm_tracer, - check_nonce: true, - output_from_init_contract: false, - } - } + /// Create new `TransactOptions` with given tracer and VM tracer. + pub fn new(tracer: T, vm_tracer: V) -> Self { + TransactOptions { + tracer, + vm_tracer, + check_nonce: true, + output_from_init_contract: false, + } + } - /// Disables the nonce check - pub fn dont_check_nonce(mut self) -> Self { - self.check_nonce = false; - self - } + /// Disables the nonce check + pub fn dont_check_nonce(mut self) -> Self { + self.check_nonce = false; + self + } - /// Saves the output from contract creation. - pub fn save_output_from_contract(mut self) -> Self { - self.output_from_init_contract = true; - self - } + /// Saves the output from contract creation. + pub fn save_output_from_contract(mut self) -> Self { + self.output_from_init_contract = true; + self + } } impl TransactOptions { - /// Creates new `TransactOptions` with default tracing and VM tracing. - pub fn with_tracing_and_vm_tracing() -> Self { - TransactOptions { - tracer: trace::ExecutiveTracer::default(), - vm_tracer: trace::ExecutiveVMTracer::toplevel(), - check_nonce: true, - output_from_init_contract: false, - } - } + /// Creates new `TransactOptions` with default tracing and VM tracing. + pub fn with_tracing_and_vm_tracing() -> Self { + TransactOptions { + tracer: trace::ExecutiveTracer::default(), + vm_tracer: trace::ExecutiveVMTracer::toplevel(), + check_nonce: true, + output_from_init_contract: false, + } + } } impl TransactOptions { - /// Creates new `TransactOptions` with default tracing and no VM tracing. - pub fn with_tracing() -> Self { - TransactOptions { - tracer: trace::ExecutiveTracer::default(), - vm_tracer: trace::NoopVMTracer, - check_nonce: true, - output_from_init_contract: false, - } - } + /// Creates new `TransactOptions` with default tracing and no VM tracing. + pub fn with_tracing() -> Self { + TransactOptions { + tracer: trace::ExecutiveTracer::default(), + vm_tracer: trace::NoopVMTracer, + check_nonce: true, + output_from_init_contract: false, + } + } } impl TransactOptions { - /// Creates new `TransactOptions` with no tracing and default VM tracing. - pub fn with_vm_tracing() -> Self { - TransactOptions { - tracer: trace::NoopTracer, - vm_tracer: trace::ExecutiveVMTracer::toplevel(), - check_nonce: true, - output_from_init_contract: false, - } - } + /// Creates new `TransactOptions` with no tracing and default VM tracing. + pub fn with_vm_tracing() -> Self { + TransactOptions { + tracer: trace::NoopTracer, + vm_tracer: trace::ExecutiveVMTracer::toplevel(), + check_nonce: true, + output_from_init_contract: false, + } + } } impl TransactOptions { - /// Creates new `TransactOptions` without any tracing. - pub fn with_no_tracing() -> Self { - TransactOptions { - tracer: trace::NoopTracer, - vm_tracer: trace::NoopVMTracer, - check_nonce: true, - output_from_init_contract: false, - } - } + /// Creates new `TransactOptions` without any tracing. + pub fn with_no_tracing() -> Self { + TransactOptions { + tracer: trace::NoopTracer, + vm_tracer: trace::NoopVMTracer, + check_nonce: true, + output_from_init_contract: false, + } + } } /// Trap result returned by executive. -pub type ExecutiveTrapResult<'a, T> = vm::TrapResult, CallCreateExecutive<'a>>; +pub type ExecutiveTrapResult<'a, T> = + vm::TrapResult, CallCreateExecutive<'a>>; /// Trap error for executive. pub type ExecutiveTrapError<'a> = vm::TrapError, CallCreateExecutive<'a>>; enum CallCreateExecutiveKind { - Transfer(ActionParams), - CallBuiltin(ActionParams), - ExecCall(ActionParams, Substate), - ExecCreate(ActionParams, Substate), - ResumeCall(OriginInfo, Box, Substate), - ResumeCreate(OriginInfo, Box, Substate), + Transfer(ActionParams), + CallBuiltin(ActionParams), + ExecCall(ActionParams, Substate), + ExecCreate(ActionParams, Substate), + ResumeCall(OriginInfo, Box, Substate), + ResumeCreate(OriginInfo, Box, Substate), } /// Executive for a raw call/create action. pub struct CallCreateExecutive<'a> { - info: &'a EnvInfo, - machine: &'a Machine, - schedule: &'a Schedule, - factory: &'a VmFactory, - depth: usize, - stack_depth: usize, - static_flag: bool, - is_create: bool, - gas: U256, - kind: CallCreateExecutiveKind, + info: &'a EnvInfo, + machine: &'a Machine, + schedule: &'a Schedule, + factory: &'a VmFactory, + depth: usize, + stack_depth: usize, + static_flag: bool, + is_create: bool, + gas: U256, + kind: CallCreateExecutiveKind, } impl<'a> CallCreateExecutive<'a> { - /// Create a new call executive using raw data. - pub fn new_call_raw(params: ActionParams, info: &'a EnvInfo, machine: &'a Machine, schedule: &'a Schedule, factory: &'a VmFactory, depth: usize, stack_depth: usize, parent_static_flag: bool) -> Self { - trace!("Executive::call(params={:?}) self.env_info={:?}, parent_static={}", params, info, parent_static_flag); + /// Create a new call executive using raw data. + pub fn new_call_raw( + params: ActionParams, + info: &'a EnvInfo, + machine: &'a Machine, + schedule: &'a Schedule, + factory: &'a VmFactory, + depth: usize, + stack_depth: usize, + parent_static_flag: bool, + ) -> Self { + trace!( + "Executive::call(params={:?}) self.env_info={:?}, parent_static={}", + params, + info, + parent_static_flag + ); - let gas = params.gas; - let static_flag = parent_static_flag || params.call_type == CallType::StaticCall; + let gas = params.gas; + let static_flag = parent_static_flag || params.call_type == CallType::StaticCall; - // if destination is builtin, try to execute it - let kind = if let Some(builtin) = machine.builtin(¶ms.code_address, info.number) { - // Engines aren't supposed to return builtins until activation, but - // prefer to fail rather than silently break consensus. - if !builtin.is_active(info.number) { - panic!("Consensus failure: engine implementation prematurely enabled built-in at {}", params.code_address); - } + // if destination is builtin, try to execute it + let kind = if let Some(builtin) = machine.builtin(¶ms.code_address, info.number) { + // Engines aren't supposed to return builtins until activation, but + // prefer to fail rather than silently break consensus. + if !builtin.is_active(info.number) { + panic!( + "Consensus failure: engine implementation prematurely enabled built-in at {}", + params.code_address + ); + } - CallCreateExecutiveKind::CallBuiltin(params) - } else { - if params.code.is_some() { - CallCreateExecutiveKind::ExecCall(params, Substate::new()) - } else { - CallCreateExecutiveKind::Transfer(params) - } - }; + CallCreateExecutiveKind::CallBuiltin(params) + } else { + if params.code.is_some() { + CallCreateExecutiveKind::ExecCall(params, Substate::new()) + } else { + CallCreateExecutiveKind::Transfer(params) + } + }; - Self { - info, machine, schedule, factory, depth, stack_depth, static_flag, kind, gas, - is_create: false, - } - } + Self { + info, + machine, + schedule, + factory, + depth, + stack_depth, + static_flag, + kind, + gas, + is_create: false, + } + } - /// Create a new create executive using raw data. - pub fn new_create_raw(params: ActionParams, info: &'a EnvInfo, machine: &'a Machine, schedule: &'a Schedule, factory: &'a VmFactory, depth: usize, stack_depth: usize, static_flag: bool) -> Self { - trace!("Executive::create(params={:?}) self.env_info={:?}, static={}", params, info, static_flag); + /// Create a new create executive using raw data. + pub fn new_create_raw( + params: ActionParams, + info: &'a EnvInfo, + machine: &'a Machine, + schedule: &'a Schedule, + factory: &'a VmFactory, + depth: usize, + stack_depth: usize, + static_flag: bool, + ) -> Self { + trace!( + "Executive::create(params={:?}) self.env_info={:?}, static={}", + params, + info, + static_flag + ); - let gas = params.gas; + let gas = params.gas; - let kind = CallCreateExecutiveKind::ExecCreate(params, Substate::new()); + let kind = CallCreateExecutiveKind::ExecCreate(params, Substate::new()); - Self { - info, machine, schedule, factory, depth, stack_depth, static_flag, kind, gas, - is_create: true, - } - } + Self { + info, + machine, + schedule, + factory, + depth, + stack_depth, + static_flag, + kind, + gas, + is_create: true, + } + } - /// If this executive contains an unconfirmed substate, returns a mutable reference to it. - pub fn unconfirmed_substate(&mut self) -> Option<&mut Substate> { - match self.kind { - CallCreateExecutiveKind::ExecCall(_, ref mut unsub) => Some(unsub), - CallCreateExecutiveKind::ExecCreate(_, ref mut unsub) => Some(unsub), - CallCreateExecutiveKind::ResumeCreate(_, _, ref mut unsub) => Some(unsub), - CallCreateExecutiveKind::ResumeCall(_, _, ref mut unsub) => Some(unsub), - CallCreateExecutiveKind::Transfer(..) | CallCreateExecutiveKind::CallBuiltin(..) => None, - } - } + /// If this executive contains an unconfirmed substate, returns a mutable reference to it. + pub fn unconfirmed_substate(&mut self) -> Option<&mut Substate> { + match self.kind { + CallCreateExecutiveKind::ExecCall(_, ref mut unsub) => Some(unsub), + CallCreateExecutiveKind::ExecCreate(_, ref mut unsub) => Some(unsub), + CallCreateExecutiveKind::ResumeCreate(_, _, ref mut unsub) => Some(unsub), + CallCreateExecutiveKind::ResumeCall(_, _, ref mut unsub) => Some(unsub), + CallCreateExecutiveKind::Transfer(..) | CallCreateExecutiveKind::CallBuiltin(..) => { + None + } + } + } - fn check_static_flag(params: &ActionParams, static_flag: bool, is_create: bool) -> vm::Result<()> { - if is_create { - if static_flag { - return Err(vm::Error::MutableCallInStaticContext); - } - } else { - if (static_flag && - (params.call_type == CallType::StaticCall || params.call_type == CallType::Call)) && - params.value.value() > U256::zero() - { - return Err(vm::Error::MutableCallInStaticContext); - } - } + fn check_static_flag( + params: &ActionParams, + static_flag: bool, + is_create: bool, + ) -> vm::Result<()> { + if is_create { + if static_flag { + return Err(vm::Error::MutableCallInStaticContext); + } + } else { + if (static_flag + && (params.call_type == CallType::StaticCall || params.call_type == CallType::Call)) + && params.value.value() > U256::zero() + { + return Err(vm::Error::MutableCallInStaticContext); + } + } - Ok(()) - } + Ok(()) + } - fn check_eip684(params: &ActionParams, state: &State) -> vm::Result<()> { - if state.exists_and_has_code_or_nonce(¶ms.address)? { - return Err(vm::Error::OutOfGas); - } + fn check_eip684( + params: &ActionParams, + state: &State, + ) -> vm::Result<()> { + if state.exists_and_has_code_or_nonce(¶ms.address)? { + return Err(vm::Error::OutOfGas); + } - Ok(()) - } + Ok(()) + } - fn transfer_exec_balance(params: &ActionParams, schedule: &Schedule, state: &mut State, substate: &mut Substate) -> vm::Result<()> { - if let ActionValue::Transfer(val) = params.value { - state.transfer_balance(¶ms.sender, ¶ms.address, &val, substate.to_cleanup_mode(&schedule))?; - } + fn transfer_exec_balance( + params: &ActionParams, + schedule: &Schedule, + state: &mut State, + substate: &mut Substate, + ) -> vm::Result<()> { + if let ActionValue::Transfer(val) = params.value { + state.transfer_balance( + ¶ms.sender, + ¶ms.address, + &val, + substate.to_cleanup_mode(&schedule), + )?; + } - Ok(()) - } + Ok(()) + } - fn transfer_exec_balance_and_init_contract(params: &ActionParams, schedule: &Schedule, state: &mut State, substate: &mut Substate) -> vm::Result<()> { - let nonce_offset = if schedule.no_empty {1} else {0}.into(); - let prev_bal = state.balance(¶ms.address)?; - if let ActionValue::Transfer(val) = params.value { - state.sub_balance(¶ms.sender, &val, &mut substate.to_cleanup_mode(&schedule))?; - state.new_contract(¶ms.address, val.saturating_add(prev_bal), nonce_offset)?; - } else { - state.new_contract(¶ms.address, prev_bal, nonce_offset)?; - } + fn transfer_exec_balance_and_init_contract( + params: &ActionParams, + schedule: &Schedule, + state: &mut State, + substate: &mut Substate, + ) -> vm::Result<()> { + let nonce_offset = if schedule.no_empty { 1 } else { 0 }.into(); + let prev_bal = state.balance(¶ms.address)?; + if let ActionValue::Transfer(val) = params.value { + state.sub_balance( + ¶ms.sender, + &val, + &mut substate.to_cleanup_mode(&schedule), + )?; + state.new_contract(¶ms.address, val.saturating_add(prev_bal), nonce_offset)?; + } else { + state.new_contract(¶ms.address, prev_bal, nonce_offset)?; + } - Ok(()) - } + Ok(()) + } - fn enact_result(result: &vm::Result, state: &mut State, substate: &mut Substate, un_substate: Substate) { - match *result { - Err(vm::Error::OutOfGas) - | Err(vm::Error::BadJumpDestination {..}) - | Err(vm::Error::BadInstruction {.. }) - | Err(vm::Error::StackUnderflow {..}) - | Err(vm::Error::BuiltIn {..}) - | Err(vm::Error::Wasm {..}) - | Err(vm::Error::OutOfStack {..}) - | Err(vm::Error::MutableCallInStaticContext) - | Err(vm::Error::OutOfBounds) - | Err(vm::Error::Reverted) - | Ok(FinalizationResult { apply_state: false, .. }) => { - state.revert_to_checkpoint(); - }, - Ok(_) | Err(vm::Error::Internal(_)) => { - state.discard_checkpoint(); - substate.accrue(un_substate); - } - } - } + fn enact_result( + result: &vm::Result, + state: &mut State, + substate: &mut Substate, + un_substate: Substate, + ) { + match *result { + Err(vm::Error::OutOfGas) + | Err(vm::Error::BadJumpDestination { .. }) + | Err(vm::Error::BadInstruction { .. }) + | Err(vm::Error::StackUnderflow { .. }) + | Err(vm::Error::BuiltIn { .. }) + | Err(vm::Error::Wasm { .. }) + | Err(vm::Error::OutOfStack { .. }) + | Err(vm::Error::MutableCallInStaticContext) + | Err(vm::Error::OutOfBounds) + | Err(vm::Error::Reverted) + | Ok(FinalizationResult { + apply_state: false, .. + }) => { + state.revert_to_checkpoint(); + } + Ok(_) | Err(vm::Error::Internal(_)) => { + state.discard_checkpoint(); + substate.accrue(un_substate); + } + } + } - /// Creates `Externalities` from `Executive`. - fn as_externalities<'any, B: 'any + StateBackend, T, V>( - state: &'any mut State, - info: &'any EnvInfo, - machine: &'any Machine, - schedule: &'any Schedule, - depth: usize, - stack_depth: usize, - static_flag: bool, - origin_info: &'any OriginInfo, - substate: &'any mut Substate, - output: OutputPolicy, - tracer: &'any mut T, - vm_tracer: &'any mut V, - ) -> Externalities<'any, T, V, B> where T: Tracer, V: VMTracer { - Externalities::new(state, info, machine, schedule, depth, stack_depth, origin_info, substate, output, tracer, vm_tracer, static_flag) - } + /// Creates `Externalities` from `Executive`. + fn as_externalities<'any, B: 'any + StateBackend, T, V>( + state: &'any mut State, + info: &'any EnvInfo, + machine: &'any Machine, + schedule: &'any Schedule, + depth: usize, + stack_depth: usize, + static_flag: bool, + origin_info: &'any OriginInfo, + substate: &'any mut Substate, + output: OutputPolicy, + tracer: &'any mut T, + vm_tracer: &'any mut V, + ) -> Externalities<'any, T, V, B> + where + T: Tracer, + V: VMTracer, + { + Externalities::new( + state, + info, + machine, + schedule, + depth, + stack_depth, + origin_info, + substate, + output, + tracer, + vm_tracer, + static_flag, + ) + } - /// Execute the executive. If a sub-call/create action is required, a resume trap error is returned. The caller is - /// then expected to call `resume_call` or `resume_create` to continue the execution. - /// - /// Current-level tracing is expected to be handled by caller. - pub fn exec(mut self, state: &mut State, substate: &mut Substate, tracer: &mut T, vm_tracer: &mut V) -> ExecutiveTrapResult<'a, FinalizationResult> { - match self.kind { - CallCreateExecutiveKind::Transfer(ref params) => { - assert!(!self.is_create); + /// Execute the executive. If a sub-call/create action is required, a resume trap error is returned. The caller is + /// then expected to call `resume_call` or `resume_create` to continue the execution. + /// + /// Current-level tracing is expected to be handled by caller. + pub fn exec( + mut self, + state: &mut State, + substate: &mut Substate, + tracer: &mut T, + vm_tracer: &mut V, + ) -> ExecutiveTrapResult<'a, FinalizationResult> { + match self.kind { + CallCreateExecutiveKind::Transfer(ref params) => { + assert!(!self.is_create); - let mut inner = || { - Self::check_static_flag(params, self.static_flag, self.is_create)?; - Self::transfer_exec_balance(params, self.schedule, state, substate)?; + let mut inner = || { + Self::check_static_flag(params, self.static_flag, self.is_create)?; + Self::transfer_exec_balance(params, self.schedule, state, substate)?; - Ok(FinalizationResult { - gas_left: params.gas, - return_data: ReturnData::empty(), - apply_state: true, - }) - }; + Ok(FinalizationResult { + gas_left: params.gas, + return_data: ReturnData::empty(), + apply_state: true, + }) + }; - Ok(inner()) - }, - CallCreateExecutiveKind::CallBuiltin(ref params) => { - assert!(!self.is_create); + Ok(inner()) + } + CallCreateExecutiveKind::CallBuiltin(ref params) => { + assert!(!self.is_create); - let mut inner = || { - let builtin = self.machine.builtin(¶ms.code_address, self.info.number).expect("Builtin is_some is checked when creating this kind in new_call_raw; qed"); + let mut inner = || { + let builtin = self.machine.builtin(¶ms.code_address, self.info.number).expect("Builtin is_some is checked when creating this kind in new_call_raw; qed"); - Self::check_static_flag(¶ms, self.static_flag, self.is_create)?; - state.checkpoint(); - Self::transfer_exec_balance(¶ms, self.schedule, state, substate)?; + Self::check_static_flag(¶ms, self.static_flag, self.is_create)?; + state.checkpoint(); + Self::transfer_exec_balance(¶ms, self.schedule, state, substate)?; - let default = []; - let data = if let Some(ref d) = params.data { d as &[u8] } else { &default as &[u8] }; + let default = []; + let data = if let Some(ref d) = params.data { + d as &[u8] + } else { + &default as &[u8] + }; - // NOTE(niklasad1): block number is used by `builtin alt_bn128 ops` to enable eip1108 - let cost = builtin.cost(data, self.info.number); - if cost <= params.gas { - let mut builtin_out_buffer = Vec::new(); - let result = { - let mut builtin_output = BytesRef::Flexible(&mut builtin_out_buffer); - builtin.execute(data, &mut builtin_output) - }; - if let Err(e) = result { - state.revert_to_checkpoint(); + // NOTE(niklasad1): block number is used by `builtin alt_bn128 ops` to enable eip1108 + let cost = builtin.cost(data, self.info.number); + if cost <= params.gas { + let mut builtin_out_buffer = Vec::new(); + let result = { + let mut builtin_output = BytesRef::Flexible(&mut builtin_out_buffer); + builtin.execute(data, &mut builtin_output) + }; + if let Err(e) = result { + state.revert_to_checkpoint(); - Err(vm::Error::BuiltIn(e)) - } else { - state.discard_checkpoint(); + Err(vm::Error::BuiltIn(e)) + } else { + state.discard_checkpoint(); - let out_len = builtin_out_buffer.len(); - Ok(FinalizationResult { - gas_left: params.gas - cost, - return_data: ReturnData::new(builtin_out_buffer, 0, out_len), - apply_state: true, - }) - } - } else { - // just drain the whole gas - state.revert_to_checkpoint(); + let out_len = builtin_out_buffer.len(); + Ok(FinalizationResult { + gas_left: params.gas - cost, + return_data: ReturnData::new(builtin_out_buffer, 0, out_len), + apply_state: true, + }) + } + } else { + // just drain the whole gas + state.revert_to_checkpoint(); - Err(vm::Error::OutOfGas) - } - }; + Err(vm::Error::OutOfGas) + } + }; - Ok(inner()) - }, - CallCreateExecutiveKind::ExecCall(params, mut unconfirmed_substate) => { - assert!(!self.is_create); + Ok(inner()) + } + CallCreateExecutiveKind::ExecCall(params, mut unconfirmed_substate) => { + assert!(!self.is_create); - { - let static_flag = self.static_flag; - let is_create = self.is_create; - let schedule = self.schedule; + { + let static_flag = self.static_flag; + let is_create = self.is_create; + let schedule = self.schedule; - let mut pre_inner = || { - Self::check_static_flag(¶ms, static_flag, is_create)?; - state.checkpoint(); - Self::transfer_exec_balance(¶ms, schedule, state, substate)?; - Ok(()) - }; + let mut pre_inner = || { + Self::check_static_flag(¶ms, static_flag, is_create)?; + state.checkpoint(); + Self::transfer_exec_balance(¶ms, schedule, state, substate)?; + Ok(()) + }; - match pre_inner() { - Ok(()) => (), - Err(err) => return Ok(Err(err)), - } - } + match pre_inner() { + Ok(()) => (), + Err(err) => return Ok(Err(err)), + } + } - let origin_info = OriginInfo::from(¶ms); - let exec = self.factory.create(params, self.schedule, self.depth); + let origin_info = OriginInfo::from(¶ms); + let exec = self.factory.create(params, self.schedule, self.depth); - let out = { - let mut ext = Self::as_externalities(state, self.info, self.machine, self.schedule, self.depth, self.stack_depth, self.static_flag, &origin_info, &mut unconfirmed_substate, OutputPolicy::Return, tracer, vm_tracer); - match exec.exec(&mut ext) { - Ok(val) => Ok(val.finalize(ext)), - Err(err) => Err(err), - } - }; + let out = { + let mut ext = Self::as_externalities( + state, + self.info, + self.machine, + self.schedule, + self.depth, + self.stack_depth, + self.static_flag, + &origin_info, + &mut unconfirmed_substate, + OutputPolicy::Return, + tracer, + vm_tracer, + ); + match exec.exec(&mut ext) { + Ok(val) => Ok(val.finalize(ext)), + Err(err) => Err(err), + } + }; - let res = match out { - Ok(val) => val, - Err(TrapError::Call(subparams, resume)) => { - self.kind = CallCreateExecutiveKind::ResumeCall(origin_info, resume, unconfirmed_substate); - return Err(TrapError::Call(subparams, self)); - }, - Err(TrapError::Create(subparams, address, resume)) => { - self.kind = CallCreateExecutiveKind::ResumeCreate(origin_info, resume, unconfirmed_substate); - return Err(TrapError::Create(subparams, address, self)); - }, - }; + let res = match out { + Ok(val) => val, + Err(TrapError::Call(subparams, resume)) => { + self.kind = CallCreateExecutiveKind::ResumeCall( + origin_info, + resume, + unconfirmed_substate, + ); + return Err(TrapError::Call(subparams, self)); + } + Err(TrapError::Create(subparams, address, resume)) => { + self.kind = CallCreateExecutiveKind::ResumeCreate( + origin_info, + resume, + unconfirmed_substate, + ); + return Err(TrapError::Create(subparams, address, self)); + } + }; - Self::enact_result(&res, state, substate, unconfirmed_substate); - Ok(res) - }, - CallCreateExecutiveKind::ExecCreate(params, mut unconfirmed_substate) => { - assert!(self.is_create); + Self::enact_result(&res, state, substate, unconfirmed_substate); + Ok(res) + } + CallCreateExecutiveKind::ExecCreate(params, mut unconfirmed_substate) => { + assert!(self.is_create); - { - let static_flag = self.static_flag; - let is_create = self.is_create; - let schedule = self.schedule; + { + let static_flag = self.static_flag; + let is_create = self.is_create; + let schedule = self.schedule; - let mut pre_inner = || { - Self::check_eip684(¶ms, state)?; - Self::check_static_flag(¶ms, static_flag, is_create)?; - state.checkpoint(); - Self::transfer_exec_balance_and_init_contract(¶ms, schedule, state, substate)?; - Ok(()) - }; + let mut pre_inner = || { + Self::check_eip684(¶ms, state)?; + Self::check_static_flag(¶ms, static_flag, is_create)?; + state.checkpoint(); + Self::transfer_exec_balance_and_init_contract( + ¶ms, schedule, state, substate, + )?; + Ok(()) + }; - match pre_inner() { - Ok(()) => (), - Err(err) => return Ok(Err(err)), - } - } + match pre_inner() { + Ok(()) => (), + Err(err) => return Ok(Err(err)), + } + } - let origin_info = OriginInfo::from(¶ms); - let exec = self.factory.create(params, self.schedule, self.depth); + let origin_info = OriginInfo::from(¶ms); + let exec = self.factory.create(params, self.schedule, self.depth); - let out = { - let mut ext = Self::as_externalities(state, self.info, self.machine, self.schedule, self.depth, self.stack_depth, self.static_flag, &origin_info, &mut unconfirmed_substate, OutputPolicy::InitContract, tracer, vm_tracer); - match exec.exec(&mut ext) { - Ok(val) => Ok(val.finalize(ext)), - Err(err) => Err(err), - } - }; + let out = { + let mut ext = Self::as_externalities( + state, + self.info, + self.machine, + self.schedule, + self.depth, + self.stack_depth, + self.static_flag, + &origin_info, + &mut unconfirmed_substate, + OutputPolicy::InitContract, + tracer, + vm_tracer, + ); + match exec.exec(&mut ext) { + Ok(val) => Ok(val.finalize(ext)), + Err(err) => Err(err), + } + }; - let res = match out { - Ok(val) => val, - Err(TrapError::Call(subparams, resume)) => { - self.kind = CallCreateExecutiveKind::ResumeCall(origin_info, resume, unconfirmed_substate); - return Err(TrapError::Call(subparams, self)); - }, - Err(TrapError::Create(subparams, address, resume)) => { - self.kind = CallCreateExecutiveKind::ResumeCreate(origin_info, resume, unconfirmed_substate); - return Err(TrapError::Create(subparams, address, self)); - }, - }; + let res = match out { + Ok(val) => val, + Err(TrapError::Call(subparams, resume)) => { + self.kind = CallCreateExecutiveKind::ResumeCall( + origin_info, + resume, + unconfirmed_substate, + ); + return Err(TrapError::Call(subparams, self)); + } + Err(TrapError::Create(subparams, address, resume)) => { + self.kind = CallCreateExecutiveKind::ResumeCreate( + origin_info, + resume, + unconfirmed_substate, + ); + return Err(TrapError::Create(subparams, address, self)); + } + }; - Self::enact_result(&res, state, substate, unconfirmed_substate); - Ok(res) - }, - CallCreateExecutiveKind::ResumeCall(..) | CallCreateExecutiveKind::ResumeCreate(..) => panic!("This executive has already been executed once."), - } - } + Self::enact_result(&res, state, substate, unconfirmed_substate); + Ok(res) + } + CallCreateExecutiveKind::ResumeCall(..) | CallCreateExecutiveKind::ResumeCreate(..) => { + panic!("This executive has already been executed once.") + } + } + } - /// Resume execution from a call trap previsouly trapped by `exec`. - /// - /// Current-level tracing is expected to be handled by caller. - pub fn resume_call(mut self, result: vm::MessageCallResult, state: &mut State, substate: &mut Substate, tracer: &mut T, vm_tracer: &mut V) -> ExecutiveTrapResult<'a, FinalizationResult> { - match self.kind { - CallCreateExecutiveKind::ResumeCall(origin_info, resume, mut unconfirmed_substate) => { - let out = { - let exec = resume.resume_call(result); + /// Resume execution from a call trap previsouly trapped by `exec`. + /// + /// Current-level tracing is expected to be handled by caller. + pub fn resume_call( + mut self, + result: vm::MessageCallResult, + state: &mut State, + substate: &mut Substate, + tracer: &mut T, + vm_tracer: &mut V, + ) -> ExecutiveTrapResult<'a, FinalizationResult> { + match self.kind { + CallCreateExecutiveKind::ResumeCall(origin_info, resume, mut unconfirmed_substate) => { + let out = { + let exec = resume.resume_call(result); - let mut ext = Self::as_externalities(state, self.info, self.machine, self.schedule, self.depth, self.stack_depth, self.static_flag, &origin_info, &mut unconfirmed_substate, if self.is_create { OutputPolicy::InitContract } else { OutputPolicy::Return }, tracer, vm_tracer); - match exec.exec(&mut ext) { - Ok(val) => Ok(val.finalize(ext)), - Err(err) => Err(err), - } - }; + let mut ext = Self::as_externalities( + state, + self.info, + self.machine, + self.schedule, + self.depth, + self.stack_depth, + self.static_flag, + &origin_info, + &mut unconfirmed_substate, + if self.is_create { + OutputPolicy::InitContract + } else { + OutputPolicy::Return + }, + tracer, + vm_tracer, + ); + match exec.exec(&mut ext) { + Ok(val) => Ok(val.finalize(ext)), + Err(err) => Err(err), + } + }; - let res = match out { - Ok(val) => val, - Err(TrapError::Call(subparams, resume)) => { - self.kind = CallCreateExecutiveKind::ResumeCall(origin_info, resume, unconfirmed_substate); - return Err(TrapError::Call(subparams, self)); - }, - Err(TrapError::Create(subparams, address, resume)) => { - self.kind = CallCreateExecutiveKind::ResumeCreate(origin_info, resume, unconfirmed_substate); - return Err(TrapError::Create(subparams, address, self)); - }, - }; + let res = match out { + Ok(val) => val, + Err(TrapError::Call(subparams, resume)) => { + self.kind = CallCreateExecutiveKind::ResumeCall( + origin_info, + resume, + unconfirmed_substate, + ); + return Err(TrapError::Call(subparams, self)); + } + Err(TrapError::Create(subparams, address, resume)) => { + self.kind = CallCreateExecutiveKind::ResumeCreate( + origin_info, + resume, + unconfirmed_substate, + ); + return Err(TrapError::Create(subparams, address, self)); + } + }; - Self::enact_result(&res, state, substate, unconfirmed_substate); - Ok(res) - }, - CallCreateExecutiveKind::ResumeCreate(..) => - panic!("Resumable as create, but called resume_call"), - CallCreateExecutiveKind::Transfer(..) | CallCreateExecutiveKind::CallBuiltin(..) | - CallCreateExecutiveKind::ExecCall(..) | CallCreateExecutiveKind::ExecCreate(..) => - panic!("Not resumable"), - } - } + Self::enact_result(&res, state, substate, unconfirmed_substate); + Ok(res) + } + CallCreateExecutiveKind::ResumeCreate(..) => { + panic!("Resumable as create, but called resume_call") + } + CallCreateExecutiveKind::Transfer(..) + | CallCreateExecutiveKind::CallBuiltin(..) + | CallCreateExecutiveKind::ExecCall(..) + | CallCreateExecutiveKind::ExecCreate(..) => panic!("Not resumable"), + } + } - /// Resume execution from a create trap previsouly trapped by `exec`. - /// - /// Current-level tracing is expected to be handled by caller. - pub fn resume_create(mut self, result: vm::ContractCreateResult, state: &mut State, substate: &mut Substate, tracer: &mut T, vm_tracer: &mut V) -> ExecutiveTrapResult<'a, FinalizationResult> { - match self.kind { - CallCreateExecutiveKind::ResumeCreate(origin_info, resume, mut unconfirmed_substate) => { - let out = { - let exec = resume.resume_create(result); + /// Resume execution from a create trap previsouly trapped by `exec`. + /// + /// Current-level tracing is expected to be handled by caller. + pub fn resume_create( + mut self, + result: vm::ContractCreateResult, + state: &mut State, + substate: &mut Substate, + tracer: &mut T, + vm_tracer: &mut V, + ) -> ExecutiveTrapResult<'a, FinalizationResult> { + match self.kind { + CallCreateExecutiveKind::ResumeCreate( + origin_info, + resume, + mut unconfirmed_substate, + ) => { + let out = { + let exec = resume.resume_create(result); - let mut ext = Self::as_externalities(state, self.info, self.machine, self.schedule, self.depth, self.stack_depth, self.static_flag, &origin_info, &mut unconfirmed_substate, if self.is_create { OutputPolicy::InitContract } else { OutputPolicy::Return }, tracer, vm_tracer); - match exec.exec(&mut ext) { - Ok(val) => Ok(val.finalize(ext)), - Err(err) => Err(err), - } - }; + let mut ext = Self::as_externalities( + state, + self.info, + self.machine, + self.schedule, + self.depth, + self.stack_depth, + self.static_flag, + &origin_info, + &mut unconfirmed_substate, + if self.is_create { + OutputPolicy::InitContract + } else { + OutputPolicy::Return + }, + tracer, + vm_tracer, + ); + match exec.exec(&mut ext) { + Ok(val) => Ok(val.finalize(ext)), + Err(err) => Err(err), + } + }; - let res = match out { - Ok(val) => val, - Err(TrapError::Call(subparams, resume)) => { - self.kind = CallCreateExecutiveKind::ResumeCall(origin_info, resume, unconfirmed_substate); - return Err(TrapError::Call(subparams, self)); - }, - Err(TrapError::Create(subparams, address, resume)) => { - self.kind = CallCreateExecutiveKind::ResumeCreate(origin_info, resume, unconfirmed_substate); - return Err(TrapError::Create(subparams, address, self)); - }, - }; + let res = match out { + Ok(val) => val, + Err(TrapError::Call(subparams, resume)) => { + self.kind = CallCreateExecutiveKind::ResumeCall( + origin_info, + resume, + unconfirmed_substate, + ); + return Err(TrapError::Call(subparams, self)); + } + Err(TrapError::Create(subparams, address, resume)) => { + self.kind = CallCreateExecutiveKind::ResumeCreate( + origin_info, + resume, + unconfirmed_substate, + ); + return Err(TrapError::Create(subparams, address, self)); + } + }; - Self::enact_result(&res, state, substate, unconfirmed_substate); - Ok(res) - }, - CallCreateExecutiveKind::ResumeCall(..) => - panic!("Resumable as call, but called resume_create"), - CallCreateExecutiveKind::Transfer(..) | CallCreateExecutiveKind::CallBuiltin(..) | - CallCreateExecutiveKind::ExecCall(..) | CallCreateExecutiveKind::ExecCreate(..) => - panic!("Not resumable"), - } - } + Self::enact_result(&res, state, substate, unconfirmed_substate); + Ok(res) + } + CallCreateExecutiveKind::ResumeCall(..) => { + panic!("Resumable as call, but called resume_create") + } + CallCreateExecutiveKind::Transfer(..) + | CallCreateExecutiveKind::CallBuiltin(..) + | CallCreateExecutiveKind::ExecCall(..) + | CallCreateExecutiveKind::ExecCreate(..) => panic!("Not resumable"), + } + } - /// Execute and consume the current executive. This function handles resume traps and sub-level tracing. The caller is expected to handle current-level tracing. - pub fn consume(self, state: &mut State, top_substate: &mut Substate, tracer: &mut T, vm_tracer: &mut V) -> vm::Result { - let mut last_res = Some((false, self.gas, self.exec(state, top_substate, tracer, vm_tracer))); + /// Execute and consume the current executive. This function handles resume traps and sub-level tracing. The caller is expected to handle current-level tracing. + pub fn consume( + self, + state: &mut State, + top_substate: &mut Substate, + tracer: &mut T, + vm_tracer: &mut V, + ) -> vm::Result { + let mut last_res = Some(( + false, + self.gas, + self.exec(state, top_substate, tracer, vm_tracer), + )); - let mut callstack: Vec<(Option
, CallCreateExecutive<'a>)> = Vec::new(); - loop { - match last_res { + let mut callstack: Vec<(Option
, CallCreateExecutive<'a>)> = Vec::new(); + loop { + match last_res { None => { match callstack.pop() { Some((_, exec)) => { @@ -740,732 +1000,940 @@ impl<'a> CallCreateExecutive<'a> { last_res = None; }, } - } - } + } + } } /// Transaction executor. pub struct Executive<'a, B: 'a> { - state: &'a mut State, - info: &'a EnvInfo, - machine: &'a Machine, - schedule: &'a Schedule, - depth: usize, - static_flag: bool, + state: &'a mut State, + info: &'a EnvInfo, + machine: &'a Machine, + schedule: &'a Schedule, + depth: usize, + static_flag: bool, } impl<'a, B: 'a + StateBackend> Executive<'a, B> { - /// Basic constructor. - pub fn new(state: &'a mut State, info: &'a EnvInfo, machine: &'a Machine, schedule: &'a Schedule) -> Self { - Executive { - state: state, - info: info, - machine: machine, - schedule: schedule, - depth: 0, - static_flag: false, - } - } + /// Basic constructor. + pub fn new( + state: &'a mut State, + info: &'a EnvInfo, + machine: &'a Machine, + schedule: &'a Schedule, + ) -> Self { + Executive { + state: state, + info: info, + machine: machine, + schedule: schedule, + depth: 0, + static_flag: false, + } + } - /// Populates executive from parent properties. Increments executive depth. - pub fn from_parent(state: &'a mut State, info: &'a EnvInfo, machine: &'a Machine, schedule: &'a Schedule, parent_depth: usize, static_flag: bool) -> Self { - Executive { - state: state, - info: info, - machine: machine, - schedule: schedule, - depth: parent_depth + 1, - static_flag: static_flag, - } - } + /// Populates executive from parent properties. Increments executive depth. + pub fn from_parent( + state: &'a mut State, + info: &'a EnvInfo, + machine: &'a Machine, + schedule: &'a Schedule, + parent_depth: usize, + static_flag: bool, + ) -> Self { + Executive { + state: state, + info: info, + machine: machine, + schedule: schedule, + depth: parent_depth + 1, + static_flag: static_flag, + } + } - /// This function should be used to execute transaction. - pub fn transact(&'a mut self, t: &SignedTransaction, options: TransactOptions) - -> Result, ExecutionError> where T: Tracer, V: VMTracer, - { - self.transact_with_tracer(t, options.check_nonce, options.output_from_init_contract, options.tracer, options.vm_tracer) - } + /// This function should be used to execute transaction. + pub fn transact( + &'a mut self, + t: &SignedTransaction, + options: TransactOptions, + ) -> Result, ExecutionError> + where + T: Tracer, + V: VMTracer, + { + self.transact_with_tracer( + t, + options.check_nonce, + options.output_from_init_contract, + options.tracer, + options.vm_tracer, + ) + } - /// Execute a transaction in a "virtual" context. - /// This will ensure the caller has enough balance to execute the desired transaction. - /// Used for extra-block executions for things like consensus contracts and RPCs - pub fn transact_virtual(&'a mut self, t: &SignedTransaction, options: TransactOptions) - -> Result, ExecutionError> where T: Tracer, V: VMTracer, - { - let sender = t.sender(); - let balance = self.state.balance(&sender)?; - let needed_balance = t.value.saturating_add(t.gas.saturating_mul(t.gas_price)); - if balance < needed_balance { - // give the sender a sufficient balance - self.state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty)?; - } + /// Execute a transaction in a "virtual" context. + /// This will ensure the caller has enough balance to execute the desired transaction. + /// Used for extra-block executions for things like consensus contracts and RPCs + pub fn transact_virtual( + &'a mut self, + t: &SignedTransaction, + options: TransactOptions, + ) -> Result, ExecutionError> + where + T: Tracer, + V: VMTracer, + { + let sender = t.sender(); + let balance = self.state.balance(&sender)?; + let needed_balance = t.value.saturating_add(t.gas.saturating_mul(t.gas_price)); + if balance < needed_balance { + // give the sender a sufficient balance + self.state + .add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty)?; + } - self.transact(t, options) - } + self.transact(t, options) + } - /// Execute transaction/call with tracing enabled - fn transact_with_tracer( - &'a mut self, - t: &SignedTransaction, - check_nonce: bool, - output_from_create: bool, - mut tracer: T, - mut vm_tracer: V - ) -> Result, ExecutionError> where T: Tracer, V: VMTracer { - let sender = t.sender(); - let nonce = self.state.nonce(&sender)?; + /// Execute transaction/call with tracing enabled + fn transact_with_tracer( + &'a mut self, + t: &SignedTransaction, + check_nonce: bool, + output_from_create: bool, + mut tracer: T, + mut vm_tracer: V, + ) -> Result, ExecutionError> + where + T: Tracer, + V: VMTracer, + { + let sender = t.sender(); + let nonce = self.state.nonce(&sender)?; - let schedule = self.schedule; - let base_gas_required = U256::from(t.gas_required(&schedule)); + let schedule = self.schedule; + let base_gas_required = U256::from(t.gas_required(&schedule)); - if t.gas < base_gas_required { - return Err(ExecutionError::NotEnoughBaseGas { required: base_gas_required, got: t.gas }); - } + if t.gas < base_gas_required { + return Err(ExecutionError::NotEnoughBaseGas { + required: base_gas_required, + got: t.gas, + }); + } - if !t.is_unsigned() && check_nonce && schedule.kill_dust != CleanDustMode::Off && !self.state.exists(&sender)? { - return Err(ExecutionError::SenderMustExist); - } + if !t.is_unsigned() + && check_nonce + && schedule.kill_dust != CleanDustMode::Off + && !self.state.exists(&sender)? + { + return Err(ExecutionError::SenderMustExist); + } - let init_gas = t.gas - base_gas_required; + let init_gas = t.gas - base_gas_required; - // validate transaction nonce - if check_nonce && t.nonce != nonce { - return Err(ExecutionError::InvalidNonce { expected: nonce, got: t.nonce }); - } + // validate transaction nonce + if check_nonce && t.nonce != nonce { + return Err(ExecutionError::InvalidNonce { + expected: nonce, + got: t.nonce, + }); + } - // validate if transaction fits into given block - if self.info.gas_used + t.gas > self.info.gas_limit { - return Err(ExecutionError::BlockGasLimitReached { - gas_limit: self.info.gas_limit, - gas_used: self.info.gas_used, - gas: t.gas - }); - } + // validate if transaction fits into given block + if self.info.gas_used + t.gas > self.info.gas_limit { + return Err(ExecutionError::BlockGasLimitReached { + gas_limit: self.info.gas_limit, + gas_used: self.info.gas_used, + gas: t.gas, + }); + } - // TODO: we might need bigints here, or at least check overflows. - let balance = self.state.balance(&sender)?; - let gas_cost = t.gas.full_mul(t.gas_price); - let total_cost = U512::from(t.value) + gas_cost; + // TODO: we might need bigints here, or at least check overflows. + let balance = self.state.balance(&sender)?; + let gas_cost = t.gas.full_mul(t.gas_price); + let total_cost = U512::from(t.value) + gas_cost; - // avoid unaffordable transactions - let balance512 = U512::from(balance); - if balance512 < total_cost { - return Err(ExecutionError::NotEnoughCash { required: total_cost, got: balance512 }); - } + // avoid unaffordable transactions + let balance512 = U512::from(balance); + if balance512 < total_cost { + return Err(ExecutionError::NotEnoughCash { + required: total_cost, + got: balance512, + }); + } - let mut substate = Substate::new(); + let mut substate = Substate::new(); - // NOTE: there can be no invalid transactions from this point. - if !schedule.keep_unsigned_nonce || !t.is_unsigned() { - self.state.inc_nonce(&sender)?; - } - self.state.sub_balance(&sender, &U256::from(gas_cost), &mut substate.to_cleanup_mode(&schedule))?; + // NOTE: there can be no invalid transactions from this point. + if !schedule.keep_unsigned_nonce || !t.is_unsigned() { + self.state.inc_nonce(&sender)?; + } + self.state.sub_balance( + &sender, + &U256::from(gas_cost), + &mut substate.to_cleanup_mode(&schedule), + )?; - let (result, output) = match t.action { - Action::Create => { - let (new_address, code_hash) = contract_address(self.machine.create_address_scheme(self.info.number), &sender, &nonce, &t.data); - let params = ActionParams { - code_address: new_address.clone(), - code_hash: code_hash, - address: new_address, - sender: sender.clone(), - origin: sender.clone(), - gas: init_gas, - gas_price: t.gas_price, - value: ActionValue::Transfer(t.value), - code: Some(Arc::new(t.data.clone())), - data: None, - call_type: CallType::None, - params_type: vm::ParamsType::Embedded, - }; - let res = self.create(params, &mut substate, &mut tracer, &mut vm_tracer); - let out = match &res { - Ok(res) if output_from_create => res.return_data.to_vec(), - _ => Vec::new(), - }; - (res, out) - }, - Action::Call(ref address) => { - let params = ActionParams { - code_address: address.clone(), - address: address.clone(), - sender: sender.clone(), - origin: sender.clone(), - gas: init_gas, - gas_price: t.gas_price, - value: ActionValue::Transfer(t.value), - code: self.state.code(address)?, - code_hash: self.state.code_hash(address)?, - data: Some(t.data.clone()), - call_type: CallType::Call, - params_type: vm::ParamsType::Separate, - }; - let res = self.call(params, &mut substate, &mut tracer, &mut vm_tracer); - let out = match &res { - Ok(res) => res.return_data.to_vec(), - _ => Vec::new(), - }; - (res, out) - } - }; + let (result, output) = match t.action { + Action::Create => { + let (new_address, code_hash) = contract_address( + self.machine.create_address_scheme(self.info.number), + &sender, + &nonce, + &t.data, + ); + let params = ActionParams { + code_address: new_address.clone(), + code_hash: code_hash, + address: new_address, + sender: sender.clone(), + origin: sender.clone(), + gas: init_gas, + gas_price: t.gas_price, + value: ActionValue::Transfer(t.value), + code: Some(Arc::new(t.data.clone())), + data: None, + call_type: CallType::None, + params_type: vm::ParamsType::Embedded, + }; + let res = self.create(params, &mut substate, &mut tracer, &mut vm_tracer); + let out = match &res { + Ok(res) if output_from_create => res.return_data.to_vec(), + _ => Vec::new(), + }; + (res, out) + } + Action::Call(ref address) => { + let params = ActionParams { + code_address: address.clone(), + address: address.clone(), + sender: sender.clone(), + origin: sender.clone(), + gas: init_gas, + gas_price: t.gas_price, + value: ActionValue::Transfer(t.value), + code: self.state.code(address)?, + code_hash: self.state.code_hash(address)?, + data: Some(t.data.clone()), + call_type: CallType::Call, + params_type: vm::ParamsType::Separate, + }; + let res = self.call(params, &mut substate, &mut tracer, &mut vm_tracer); + let out = match &res { + Ok(res) => res.return_data.to_vec(), + _ => Vec::new(), + }; + (res, out) + } + }; - // finalize here! - Ok(self.finalize(t, substate, result, output, tracer.drain(), vm_tracer.drain())?) - } + // finalize here! + Ok(self.finalize( + t, + substate, + result, + output, + tracer.drain(), + vm_tracer.drain(), + )?) + } - /// Calls contract function with given contract params and stack depth. - /// NOTE. It does not finalize the transaction (doesn't do refunds, nor suicides). - /// Modifies the substate and the output. - /// Returns either gas_left or `vm::Error`. - pub fn call_with_stack_depth( - &mut self, - params: ActionParams, - substate: &mut Substate, - stack_depth: usize, - tracer: &mut T, - vm_tracer: &mut V - ) -> vm::Result where T: Tracer, V: VMTracer { - tracer.prepare_trace_call(¶ms, self.depth, self.machine.builtin(¶ms.address, self.info.number).is_some()); - vm_tracer.prepare_subtrace(params.code.as_ref().map_or_else(|| &[] as &[u8], |d| &*d as &[u8])); + /// Calls contract function with given contract params and stack depth. + /// NOTE. It does not finalize the transaction (doesn't do refunds, nor suicides). + /// Modifies the substate and the output. + /// Returns either gas_left or `vm::Error`. + pub fn call_with_stack_depth( + &mut self, + params: ActionParams, + substate: &mut Substate, + stack_depth: usize, + tracer: &mut T, + vm_tracer: &mut V, + ) -> vm::Result + where + T: Tracer, + V: VMTracer, + { + tracer.prepare_trace_call( + ¶ms, + self.depth, + self.machine + .builtin(¶ms.address, self.info.number) + .is_some(), + ); + vm_tracer.prepare_subtrace( + params + .code + .as_ref() + .map_or_else(|| &[] as &[u8], |d| &*d as &[u8]), + ); - let gas = params.gas; + let gas = params.gas; - let vm_factory = self.state.vm_factory(); - let result = CallCreateExecutive::new_call_raw( - params, - self.info, - self.machine, - self.schedule, - &vm_factory, - self.depth, - stack_depth, - self.static_flag - ).consume(self.state, substate, tracer, vm_tracer); + let vm_factory = self.state.vm_factory(); + let result = CallCreateExecutive::new_call_raw( + params, + self.info, + self.machine, + self.schedule, + &vm_factory, + self.depth, + stack_depth, + self.static_flag, + ) + .consume(self.state, substate, tracer, vm_tracer); - match result { - Ok(ref val) if val.apply_state => { - tracer.done_trace_call( - gas - val.gas_left, - &val.return_data, - ); - }, - Ok(_) => { - tracer.done_trace_failed(&vm::Error::Reverted); - }, - Err(ref err) => { - tracer.done_trace_failed(err); - }, - } - vm_tracer.done_subtrace(); + match result { + Ok(ref val) if val.apply_state => { + tracer.done_trace_call(gas - val.gas_left, &val.return_data); + } + Ok(_) => { + tracer.done_trace_failed(&vm::Error::Reverted); + } + Err(ref err) => { + tracer.done_trace_failed(err); + } + } + vm_tracer.done_subtrace(); - result - } + result + } - /// Calls contract function with given contract params, if the stack depth is above a threshold, create a new thread - /// to execute it. - pub fn call_with_crossbeam( - &mut self, - params: ActionParams, - substate: &mut Substate, - stack_depth: usize, - tracer: &mut T, - vm_tracer: &mut V - ) -> vm::Result where T: Tracer, V: VMTracer { - let local_stack_size = ::io::LOCAL_STACK_SIZE.with(|sz| sz.get()); - let depth_threshold = local_stack_size.saturating_sub(STACK_SIZE_ENTRY_OVERHEAD) / STACK_SIZE_PER_DEPTH; + /// Calls contract function with given contract params, if the stack depth is above a threshold, create a new thread + /// to execute it. + pub fn call_with_crossbeam( + &mut self, + params: ActionParams, + substate: &mut Substate, + stack_depth: usize, + tracer: &mut T, + vm_tracer: &mut V, + ) -> vm::Result + where + T: Tracer, + V: VMTracer, + { + let local_stack_size = ::io::LOCAL_STACK_SIZE.with(|sz| sz.get()); + let depth_threshold = + local_stack_size.saturating_sub(STACK_SIZE_ENTRY_OVERHEAD) / STACK_SIZE_PER_DEPTH; - if stack_depth != depth_threshold { - self.call_with_stack_depth(params, substate, stack_depth, tracer, vm_tracer) - } else { - thread::scope(|scope| { - let stack_size = cmp::max(self.schedule.max_depth.saturating_sub(depth_threshold) * STACK_SIZE_PER_DEPTH, local_stack_size); - scope.builder() - .stack_size(stack_size) - .spawn(|_| { - self.call_with_stack_depth(params, substate, stack_depth, tracer, vm_tracer) - }) - .expect("Sub-thread creation cannot fail; the host might run out of resources; qed") - .join() - }) - .expect("Sub-thread never panics; qed") - .expect("Sub-thread never panics; qed") - } - } + if stack_depth != depth_threshold { + self.call_with_stack_depth(params, substate, stack_depth, tracer, vm_tracer) + } else { + thread::scope(|scope| { + let stack_size = cmp::max( + self.schedule.max_depth.saturating_sub(depth_threshold) * STACK_SIZE_PER_DEPTH, + local_stack_size, + ); + scope + .builder() + .stack_size(stack_size) + .spawn(|_| { + self.call_with_stack_depth(params, substate, stack_depth, tracer, vm_tracer) + }) + .expect( + "Sub-thread creation cannot fail; the host might run out of resources; qed", + ) + .join() + }) + .expect("Sub-thread never panics; qed") + .expect("Sub-thread never panics; qed") + } + } - /// Calls contract function with given contract params. - pub fn call( - &mut self, - params: ActionParams, - substate: &mut Substate, - tracer: &mut T, - vm_tracer: &mut V - ) -> vm::Result where T: Tracer, V: VMTracer { - self.call_with_stack_depth(params, substate, 0, tracer, vm_tracer) - } + /// Calls contract function with given contract params. + pub fn call( + &mut self, + params: ActionParams, + substate: &mut Substate, + tracer: &mut T, + vm_tracer: &mut V, + ) -> vm::Result + where + T: Tracer, + V: VMTracer, + { + self.call_with_stack_depth(params, substate, 0, tracer, vm_tracer) + } - /// Creates contract with given contract params and stack depth. - /// NOTE. It does not finalize the transaction (doesn't do refunds, nor suicides). - /// Modifies the substate. - pub fn create_with_stack_depth( - &mut self, - params: ActionParams, - substate: &mut Substate, - stack_depth: usize, - tracer: &mut T, - vm_tracer: &mut V, - ) -> vm::Result where T: Tracer, V: VMTracer { - tracer.prepare_trace_create(¶ms); - vm_tracer.prepare_subtrace(params.code.as_ref().map_or_else(|| &[] as &[u8], |d| &*d as &[u8])); + /// Creates contract with given contract params and stack depth. + /// NOTE. It does not finalize the transaction (doesn't do refunds, nor suicides). + /// Modifies the substate. + pub fn create_with_stack_depth( + &mut self, + params: ActionParams, + substate: &mut Substate, + stack_depth: usize, + tracer: &mut T, + vm_tracer: &mut V, + ) -> vm::Result + where + T: Tracer, + V: VMTracer, + { + tracer.prepare_trace_create(¶ms); + vm_tracer.prepare_subtrace( + params + .code + .as_ref() + .map_or_else(|| &[] as &[u8], |d| &*d as &[u8]), + ); - let address = params.address; - let gas = params.gas; + let address = params.address; + let gas = params.gas; - let vm_factory = self.state.vm_factory(); - let result = CallCreateExecutive::new_create_raw( - params, - self.info, - self.machine, - self.schedule, - &vm_factory, - self.depth, - stack_depth, - self.static_flag - ).consume(self.state, substate, tracer, vm_tracer); + let vm_factory = self.state.vm_factory(); + let result = CallCreateExecutive::new_create_raw( + params, + self.info, + self.machine, + self.schedule, + &vm_factory, + self.depth, + stack_depth, + self.static_flag, + ) + .consume(self.state, substate, tracer, vm_tracer); - match result { - Ok(ref val) if val.apply_state => { - tracer.done_trace_create( - gas - val.gas_left, - &val.return_data, - address, - ); - }, - Ok(_) => { - tracer.done_trace_failed(&vm::Error::Reverted); - }, - Err(ref err) => { - tracer.done_trace_failed(err); - }, - } - vm_tracer.done_subtrace(); + match result { + Ok(ref val) if val.apply_state => { + tracer.done_trace_create(gas - val.gas_left, &val.return_data, address); + } + Ok(_) => { + tracer.done_trace_failed(&vm::Error::Reverted); + } + Err(ref err) => { + tracer.done_trace_failed(err); + } + } + vm_tracer.done_subtrace(); - result - } + result + } - /// Creates contract with given contract params, if the stack depth is above a threshold, create a new thread to - /// execute it. - pub fn create_with_crossbeam( - &mut self, - params: ActionParams, - substate: &mut Substate, - stack_depth: usize, - tracer: &mut T, - vm_tracer: &mut V, - ) -> vm::Result where T: Tracer, V: VMTracer { - let local_stack_size = ::io::LOCAL_STACK_SIZE.with(|sz| sz.get()); - let depth_threshold = local_stack_size.saturating_sub(STACK_SIZE_ENTRY_OVERHEAD) / STACK_SIZE_PER_DEPTH; + /// Creates contract with given contract params, if the stack depth is above a threshold, create a new thread to + /// execute it. + pub fn create_with_crossbeam( + &mut self, + params: ActionParams, + substate: &mut Substate, + stack_depth: usize, + tracer: &mut T, + vm_tracer: &mut V, + ) -> vm::Result + where + T: Tracer, + V: VMTracer, + { + let local_stack_size = ::io::LOCAL_STACK_SIZE.with(|sz| sz.get()); + let depth_threshold = + local_stack_size.saturating_sub(STACK_SIZE_ENTRY_OVERHEAD) / STACK_SIZE_PER_DEPTH; - if stack_depth != depth_threshold { - self.create_with_stack_depth(params, substate, stack_depth, tracer, vm_tracer) - } else { - thread::scope(|scope| { - let stack_size = cmp::max(self.schedule.max_depth.saturating_sub(depth_threshold) * STACK_SIZE_PER_DEPTH, local_stack_size); - scope.builder() - .stack_size(stack_size) - .spawn(|_| { - self.create_with_stack_depth(params, substate, stack_depth, tracer, vm_tracer) - }) - .expect("Sub-thread creation cannot fail; the host might run out of resources; qed") - .join() - }) - .expect("Sub-thread never panics; qed") - .expect("Sub-thread never panics; qed") - } - } + if stack_depth != depth_threshold { + self.create_with_stack_depth(params, substate, stack_depth, tracer, vm_tracer) + } else { + thread::scope(|scope| { + let stack_size = cmp::max( + self.schedule.max_depth.saturating_sub(depth_threshold) * STACK_SIZE_PER_DEPTH, + local_stack_size, + ); + scope + .builder() + .stack_size(stack_size) + .spawn(|_| { + self.create_with_stack_depth( + params, + substate, + stack_depth, + tracer, + vm_tracer, + ) + }) + .expect( + "Sub-thread creation cannot fail; the host might run out of resources; qed", + ) + .join() + }) + .expect("Sub-thread never panics; qed") + .expect("Sub-thread never panics; qed") + } + } - /// Creates contract with given contract params. - pub fn create( - &mut self, - params: ActionParams, - substate: &mut Substate, - tracer: &mut T, - vm_tracer: &mut V, - ) -> vm::Result where T: Tracer, V: VMTracer { - self.create_with_stack_depth(params, substate, 0, tracer, vm_tracer) - } + /// Creates contract with given contract params. + pub fn create( + &mut self, + params: ActionParams, + substate: &mut Substate, + tracer: &mut T, + vm_tracer: &mut V, + ) -> vm::Result + where + T: Tracer, + V: VMTracer, + { + self.create_with_stack_depth(params, substate, 0, tracer, vm_tracer) + } - /// Finalizes the transaction (does refunds and suicides). - fn finalize( - &mut self, - t: &SignedTransaction, - mut substate: Substate, - result: vm::Result, - output: Bytes, - trace: Vec, - vm_trace: Option - ) -> Result, ExecutionError> { - let schedule = self.schedule; + /// Finalizes the transaction (does refunds and suicides). + fn finalize( + &mut self, + t: &SignedTransaction, + mut substate: Substate, + result: vm::Result, + output: Bytes, + trace: Vec, + vm_trace: Option, + ) -> Result, ExecutionError> { + let schedule = self.schedule; - // refunds from SSTORE nonzero -> zero - assert!(substate.sstore_clears_refund >= 0, "On transaction level, sstore clears refund cannot go below zero."); - let sstore_refunds = U256::from(substate.sstore_clears_refund as u64); - // refunds from contract suicides - let suicide_refunds = U256::from(schedule.suicide_refund_gas) * U256::from(substate.suicides.len()); - let refunds_bound = sstore_refunds + suicide_refunds; + // refunds from SSTORE nonzero -> zero + assert!( + substate.sstore_clears_refund >= 0, + "On transaction level, sstore clears refund cannot go below zero." + ); + let sstore_refunds = U256::from(substate.sstore_clears_refund as u64); + // refunds from contract suicides + let suicide_refunds = + U256::from(schedule.suicide_refund_gas) * U256::from(substate.suicides.len()); + let refunds_bound = sstore_refunds + suicide_refunds; - // real ammount to refund - let gas_left_prerefund = match result { Ok(FinalizationResult{ gas_left, .. }) => gas_left, _ => 0.into() }; - let refunded = cmp::min(refunds_bound, (t.gas - gas_left_prerefund) >> 1); - let gas_left = gas_left_prerefund + refunded; + // real ammount to refund + let gas_left_prerefund = match result { + Ok(FinalizationResult { gas_left, .. }) => gas_left, + _ => 0.into(), + }; + let refunded = cmp::min(refunds_bound, (t.gas - gas_left_prerefund) >> 1); + let gas_left = gas_left_prerefund + refunded; - let gas_used = t.gas.saturating_sub(gas_left); - let (refund_value, overflow_1) = gas_left.overflowing_mul(t.gas_price); - let (fees_value, overflow_2) = gas_used.overflowing_mul(t.gas_price); - if overflow_1 || overflow_2 { - return Err(ExecutionError::TransactionMalformed("U256 Overflow".to_string())); - } + let gas_used = t.gas.saturating_sub(gas_left); + let (refund_value, overflow_1) = gas_left.overflowing_mul(t.gas_price); + let (fees_value, overflow_2) = gas_used.overflowing_mul(t.gas_price); + if overflow_1 || overflow_2 { + return Err(ExecutionError::TransactionMalformed( + "U256 Overflow".to_string(), + )); + } - - trace!("exec::finalize: t.gas={}, sstore_refunds={}, suicide_refunds={}, refunds_bound={}, gas_left_prerefund={}, refunded={}, gas_left={}, gas_used={}, refund_value={}, fees_value={}\n", + trace!("exec::finalize: t.gas={}, sstore_refunds={}, suicide_refunds={}, refunds_bound={}, gas_left_prerefund={}, refunded={}, gas_left={}, gas_used={}, refund_value={}, fees_value={}\n", t.gas, sstore_refunds, suicide_refunds, refunds_bound, gas_left_prerefund, refunded, gas_left, gas_used, refund_value, fees_value); - let sender = t.sender(); - trace!("exec::finalize: Refunding refund_value={}, sender={}\n", refund_value, sender); - // Below: NoEmpty is safe since the sender must already be non-null to have sent this transaction - self.state.add_balance(&sender, &refund_value, CleanupMode::NoEmpty)?; - trace!("exec::finalize: Compensating author: fees_value={}, author={}\n", fees_value, &self.info.author); - self.state.add_balance(&self.info.author, &fees_value, substate.to_cleanup_mode(&schedule))?; + let sender = t.sender(); + trace!( + "exec::finalize: Refunding refund_value={}, sender={}\n", + refund_value, + sender + ); + // Below: NoEmpty is safe since the sender must already be non-null to have sent this transaction + self.state + .add_balance(&sender, &refund_value, CleanupMode::NoEmpty)?; + trace!( + "exec::finalize: Compensating author: fees_value={}, author={}\n", + fees_value, + &self.info.author + ); + self.state.add_balance( + &self.info.author, + &fees_value, + substate.to_cleanup_mode(&schedule), + )?; - // perform suicides - for address in &substate.suicides { - self.state.kill_account(address); - } + // perform suicides + for address in &substate.suicides { + self.state.kill_account(address); + } - // perform garbage-collection - let min_balance = if schedule.kill_dust != CleanDustMode::Off { Some(U256::from(schedule.tx_gas).overflowing_mul(t.gas_price).0) } else { None }; - self.state.kill_garbage(&substate.touched, schedule.kill_empty, &min_balance, schedule.kill_dust == CleanDustMode::WithCodeAndStorage)?; + // perform garbage-collection + let min_balance = if schedule.kill_dust != CleanDustMode::Off { + Some(U256::from(schedule.tx_gas).overflowing_mul(t.gas_price).0) + } else { + None + }; + self.state.kill_garbage( + &substate.touched, + schedule.kill_empty, + &min_balance, + schedule.kill_dust == CleanDustMode::WithCodeAndStorage, + )?; - match result { - Err(vm::Error::Internal(msg)) => Err(ExecutionError::Internal(msg)), - Err(exception) => { - Ok(Executed { - exception: Some(exception), - gas: t.gas, - gas_used: t.gas, - refunded: U256::zero(), - cumulative_gas_used: self.info.gas_used + t.gas, - logs: vec![], - contracts_created: vec![], - output: output, - trace: trace, - vm_trace: vm_trace, - state_diff: None, - }) - }, - Ok(r) => { - Ok(Executed { - exception: if r.apply_state { None } else { Some(vm::Error::Reverted) }, - gas: t.gas, - gas_used: gas_used, - refunded: refunded, - cumulative_gas_used: self.info.gas_used + gas_used, - logs: substate.logs, - contracts_created: substate.contracts_created, - output: output, - trace: trace, - vm_trace: vm_trace, - state_diff: None, - }) - }, - } - } + match result { + Err(vm::Error::Internal(msg)) => Err(ExecutionError::Internal(msg)), + Err(exception) => Ok(Executed { + exception: Some(exception), + gas: t.gas, + gas_used: t.gas, + refunded: U256::zero(), + cumulative_gas_used: self.info.gas_used + t.gas, + logs: vec![], + contracts_created: vec![], + output: output, + trace: trace, + vm_trace: vm_trace, + state_diff: None, + }), + Ok(r) => Ok(Executed { + exception: if r.apply_state { + None + } else { + Some(vm::Error::Reverted) + }, + gas: t.gas, + gas_used: gas_used, + refunded: refunded, + cumulative_gas_used: self.info.gas_used + gas_used, + logs: substate.logs, + contracts_created: substate.contracts_created, + output: output, + trace: trace, + vm_trace: vm_trace, + state_diff: None, + }), + } + } } #[cfg(test)] #[allow(dead_code)] mod tests { - use std::sync::Arc; - use std::str::FromStr; - use rustc_hex::FromHex; - use ethkey::{Generator, Random}; - use super::*; - use ethereum_types::{H256, U256, U512, Address}; - use vm::{ActionParams, ActionValue, CallType, EnvInfo, CreateContractAddress}; - use evm::{Factory, VMType}; - use error::ExecutionError; - use machine::EthereumMachine; - use state::{Substate, CleanupMode}; - use test_helpers::{get_temp_state_with_factory, get_temp_state}; - use trace::trace; - use trace::{FlatTrace, Tracer, NoopTracer, ExecutiveTracer}; - use trace::{VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff, VMTracer, NoopVMTracer, ExecutiveVMTracer}; - use types::transaction::{Action, Transaction}; + use super::*; + use error::ExecutionError; + use ethereum_types::{Address, H256, U256, U512}; + use ethkey::{Generator, Random}; + use evm::{Factory, VMType}; + use machine::EthereumMachine; + use rustc_hex::FromHex; + use state::{CleanupMode, Substate}; + use std::{str::FromStr, sync::Arc}; + use test_helpers::{get_temp_state, get_temp_state_with_factory}; + use trace::{ + trace, ExecutiveTracer, ExecutiveVMTracer, FlatTrace, MemoryDiff, NoopTracer, NoopVMTracer, + StorageDiff, Tracer, VMExecutedOperation, VMOperation, VMTrace, VMTracer, + }; + use types::transaction::{Action, Transaction}; + use vm::{ActionParams, ActionValue, CallType, CreateContractAddress, EnvInfo}; - fn make_frontier_machine(max_depth: usize) -> EthereumMachine { - let mut machine = ::ethereum::new_frontier_test_machine(); - machine.set_schedule_creation_rules(Box::new(move |s, _| s.max_depth = max_depth)); - machine - } + fn make_frontier_machine(max_depth: usize) -> EthereumMachine { + let mut machine = ::ethereum::new_frontier_test_machine(); + machine.set_schedule_creation_rules(Box::new(move |s, _| s.max_depth = max_depth)); + machine + } - fn make_byzantium_machine(max_depth: usize) -> EthereumMachine { - let mut machine = ::ethereum::new_byzantium_test_machine(); - machine.set_schedule_creation_rules(Box::new(move |s, _| s.max_depth = max_depth)); - machine - } + fn make_byzantium_machine(max_depth: usize) -> EthereumMachine { + let mut machine = ::ethereum::new_byzantium_test_machine(); + machine.set_schedule_creation_rules(Box::new(move |s, _| s.max_depth = max_depth)); + machine + } - #[test] - fn test_contract_address() { - let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let expected_address = Address::from_str("3f09c73a5ed19289fb9bdc72f1742566df146f56").unwrap(); - assert_eq!(expected_address, contract_address(CreateContractAddress::FromSenderAndNonce, &address, &U256::from(88), &[]).0); - } + #[test] + fn test_contract_address() { + let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + let expected_address = + Address::from_str("3f09c73a5ed19289fb9bdc72f1742566df146f56").unwrap(); + assert_eq!( + expected_address, + contract_address( + CreateContractAddress::FromSenderAndNonce, + &address, + &U256::from(88), + &[] + ) + .0 + ); + } - // TODO: replace params with transactions! - evm_test!{test_sender_balance: test_sender_balance_int} - fn test_sender_balance(factory: Factory) { - let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let address = contract_address(CreateContractAddress::FromSenderAndNonce, &sender, &U256::zero(), &[]).0; - let mut params = ActionParams::default(); - params.address = address.clone(); - params.sender = sender.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new("3331600055".from_hex().unwrap())); - params.value = ActionValue::Transfer(U256::from(0x7)); - let mut state = get_temp_state_with_factory(factory); - state.add_balance(&sender, &U256::from(0x100u64), CleanupMode::NoEmpty).unwrap(); - let info = EnvInfo::default(); - let machine = make_frontier_machine(0); - let schedule = machine.schedule(info.number); - let mut substate = Substate::new(); + // TODO: replace params with transactions! + evm_test! {test_sender_balance: test_sender_balance_int} + fn test_sender_balance(factory: Factory) { + let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + let address = contract_address( + CreateContractAddress::FromSenderAndNonce, + &sender, + &U256::zero(), + &[], + ) + .0; + let mut params = ActionParams::default(); + params.address = address.clone(); + params.sender = sender.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new("3331600055".from_hex().unwrap())); + params.value = ActionValue::Transfer(U256::from(0x7)); + let mut state = get_temp_state_with_factory(factory); + state + .add_balance(&sender, &U256::from(0x100u64), CleanupMode::NoEmpty) + .unwrap(); + let info = EnvInfo::default(); + let machine = make_frontier_machine(0); + let schedule = machine.schedule(info.number); + let mut substate = Substate::new(); - let FinalizationResult { gas_left, .. } = { - let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).unwrap() - }; + let FinalizationResult { gas_left, .. } = { + let mut ex = Executive::new(&mut state, &info, &machine, &schedule); + ex.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) + .unwrap() + }; - assert_eq!(gas_left, U256::from(79_975)); - assert_eq!(state.storage_at(&address, &H256::new()).unwrap(), H256::from(&U256::from(0xf9u64))); - assert_eq!(state.balance(&sender).unwrap(), U256::from(0xf9)); - assert_eq!(state.balance(&address).unwrap(), U256::from(0x7)); - assert_eq!(substate.contracts_created.len(), 0); + assert_eq!(gas_left, U256::from(79_975)); + assert_eq!( + state.storage_at(&address, &H256::new()).unwrap(), + H256::from(&U256::from(0xf9u64)) + ); + assert_eq!(state.balance(&sender).unwrap(), U256::from(0xf9)); + assert_eq!(state.balance(&address).unwrap(), U256::from(0x7)); + assert_eq!(substate.contracts_created.len(), 0); - // TODO: just test state root. - } + // TODO: just test state root. + } - evm_test!{test_create_contract_out_of_depth: test_create_contract_out_of_depth_int} - fn test_create_contract_out_of_depth(factory: Factory) { - // code: - // - // 7c 601080600c6000396000f3006000355415600957005b60203560003555 - push 29 bytes? - // 60 00 - push 0 - // 52 - // 60 1d - push 29 - // 60 03 - push 3 - // 60 17 - push 17 - // f0 - create - // 60 00 - push 0 - // 55 sstore - // - // other code: - // - // 60 10 - push 16 - // 80 - duplicate first stack item - // 60 0c - push 12 - // 60 00 - push 0 - // 39 - copy current code to memory - // 60 00 - push 0 - // f3 - return + evm_test! {test_create_contract_out_of_depth: test_create_contract_out_of_depth_int} + fn test_create_contract_out_of_depth(factory: Factory) { + // code: + // + // 7c 601080600c6000396000f3006000355415600957005b60203560003555 - push 29 bytes? + // 60 00 - push 0 + // 52 + // 60 1d - push 29 + // 60 03 - push 3 + // 60 17 - push 17 + // f0 - create + // 60 00 - push 0 + // 55 sstore + // + // other code: + // + // 60 10 - push 16 + // 80 - duplicate first stack item + // 60 0c - push 12 + // 60 00 - push 0 + // 39 - copy current code to memory + // 60 00 - push 0 + // f3 - return - let code = "7c601080600c6000396000f3006000355415600957005b60203560003555600052601d60036017f0600055".from_hex().unwrap(); + let code = "7c601080600c6000396000f3006000355415600957005b60203560003555600052601d60036017f0600055".from_hex().unwrap(); - let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); - let address = contract_address(CreateContractAddress::FromSenderAndNonce, &sender, &U256::zero(), &[]).0; - // TODO: add tests for 'callcreate' - //let next_address = contract_address(&address, &U256::zero()); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.sender = sender.clone(); - params.origin = sender.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.value = ActionValue::Transfer(U256::from(100)); - let mut state = get_temp_state_with_factory(factory); - state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); - let info = EnvInfo::default(); - let machine = make_frontier_machine(0); - let schedule = machine.schedule(info.number); - let mut substate = Substate::new(); + let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); + let address = contract_address( + CreateContractAddress::FromSenderAndNonce, + &sender, + &U256::zero(), + &[], + ) + .0; + // TODO: add tests for 'callcreate' + //let next_address = contract_address(&address, &U256::zero()); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.sender = sender.clone(); + params.origin = sender.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + params.value = ActionValue::Transfer(U256::from(100)); + let mut state = get_temp_state_with_factory(factory); + state + .add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty) + .unwrap(); + let info = EnvInfo::default(); + let machine = make_frontier_machine(0); + let schedule = machine.schedule(info.number); + let mut substate = Substate::new(); - let FinalizationResult { gas_left, .. } = { - let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).unwrap() - }; + let FinalizationResult { gas_left, .. } = { + let mut ex = Executive::new(&mut state, &info, &machine, &schedule); + ex.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) + .unwrap() + }; - assert_eq!(gas_left, U256::from(62_976)); - // ended with max depth - assert_eq!(substate.contracts_created.len(), 0); - } + assert_eq!(gas_left, U256::from(62_976)); + // ended with max depth + assert_eq!(substate.contracts_created.len(), 0); + } - #[test] - fn test_call_to_precompiled_tracing() { - // code: - // - // 60 00 - push 00 out size - // 60 00 - push 00 out offset - // 60 00 - push 00 in size - // 60 00 - push 00 in offset - // 60 01 - push 01 value - // 60 03 - push 03 to - // 61 ffff - push fff gas - // f1 - CALL + #[test] + fn test_call_to_precompiled_tracing() { + // code: + // + // 60 00 - push 00 out size + // 60 00 - push 00 out offset + // 60 00 - push 00 in size + // 60 00 - push 00 in offset + // 60 01 - push 01 value + // 60 03 - push 03 to + // 61 ffff - push fff gas + // f1 - CALL - let code = "60006000600060006001600361fffff1".from_hex().unwrap(); - let sender = Address::from_str("4444444444444444444444444444444444444444").unwrap(); - let address = Address::from_str("5555555555555555555555555555555555555555").unwrap(); + let code = "60006000600060006001600361fffff1".from_hex().unwrap(); + let sender = Address::from_str("4444444444444444444444444444444444444444").unwrap(); + let address = Address::from_str("5555555555555555555555555555555555555555").unwrap(); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.code_address = address.clone(); - params.sender = sender.clone(); - params.origin = sender.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.value = ActionValue::Transfer(U256::from(100)); - params.call_type = CallType::Call; - let mut state = get_temp_state(); - state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); - let info = EnvInfo::default(); - let machine = make_byzantium_machine(5); - let schedule = machine.schedule(info.number); - let mut substate = Substate::new(); - let mut tracer = ExecutiveTracer::default(); - let mut vm_tracer = ExecutiveVMTracer::toplevel(); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.code_address = address.clone(); + params.sender = sender.clone(); + params.origin = sender.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + params.value = ActionValue::Transfer(U256::from(100)); + params.call_type = CallType::Call; + let mut state = get_temp_state(); + state + .add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty) + .unwrap(); + let info = EnvInfo::default(); + let machine = make_byzantium_machine(5); + let schedule = machine.schedule(info.number); + let mut substate = Substate::new(); + let mut tracer = ExecutiveTracer::default(); + let mut vm_tracer = ExecutiveVMTracer::toplevel(); - let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.call(params, &mut substate, &mut tracer, &mut vm_tracer).unwrap(); + let mut ex = Executive::new(&mut state, &info, &machine, &schedule); + ex.call(params, &mut substate, &mut tracer, &mut vm_tracer) + .unwrap(); - assert_eq!(tracer.drain(), vec![FlatTrace { - action: trace::Action::Call(trace::Call { - from: "4444444444444444444444444444444444444444".into(), - to: "5555555555555555555555555555555555555555".into(), - value: 100.into(), - gas: 100_000.into(), - input: vec![], - call_type: CallType::Call - }), - result: trace::Res::Call(trace::CallResult { - gas_used: 33021.into(), - output: vec![] - }), - subtraces: 1, - trace_address: Default::default() - }, FlatTrace { - action: trace::Action::Call(trace::Call { - from: "5555555555555555555555555555555555555555".into(), - to: "0000000000000000000000000000000000000003".into(), - value: 1.into(), - gas: 66560.into(), - input: vec![], - call_type: CallType::Call - }), result: trace::Res::Call(trace::CallResult { - gas_used: 600.into(), - output: vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 156, 17, 133, 165, 197, 233, 252, 84, 97, 40, 8, 151, 126, 232, 245, 72, 178, 37, 141, 49] - }), - subtraces: 0, - trace_address: vec![0].into_iter().collect(), - }]); - } + assert_eq!( + tracer.drain(), + vec![ + FlatTrace { + action: trace::Action::Call(trace::Call { + from: "4444444444444444444444444444444444444444".into(), + to: "5555555555555555555555555555555555555555".into(), + value: 100.into(), + gas: 100_000.into(), + input: vec![], + call_type: CallType::Call + }), + result: trace::Res::Call(trace::CallResult { + gas_used: 33021.into(), + output: vec![] + }), + subtraces: 1, + trace_address: Default::default() + }, + FlatTrace { + action: trace::Action::Call(trace::Call { + from: "5555555555555555555555555555555555555555".into(), + to: "0000000000000000000000000000000000000003".into(), + value: 1.into(), + gas: 66560.into(), + input: vec![], + call_type: CallType::Call + }), + result: trace::Res::Call(trace::CallResult { + gas_used: 600.into(), + output: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 156, 17, 133, 165, 197, 233, 252, + 84, 97, 40, 8, 151, 126, 232, 245, 72, 178, 37, 141, 49 + ] + }), + subtraces: 0, + trace_address: vec![0].into_iter().collect(), + } + ] + ); + } - #[test] - // Tracing is not suported in JIT - fn test_call_to_create() { - // code: - // - // 7c 601080600c6000396000f3006000355415600957005b60203560003555 - push 29 bytes? - // 60 00 - push 0 - // 52 - // 60 1d - push 29 - // 60 03 - push 3 - // 60 17 - push 23 - // f0 - create - // 60 00 - push 0 - // 55 sstore - // - // other code: - // - // 60 10 - push 16 - // 80 - duplicate first stack item - // 60 0c - push 12 - // 60 00 - push 0 - // 39 - copy current code to memory - // 60 00 - push 0 - // f3 - return + #[test] + // Tracing is not suported in JIT + fn test_call_to_create() { + // code: + // + // 7c 601080600c6000396000f3006000355415600957005b60203560003555 - push 29 bytes? + // 60 00 - push 0 + // 52 + // 60 1d - push 29 + // 60 03 - push 3 + // 60 17 - push 23 + // f0 - create + // 60 00 - push 0 + // 55 sstore + // + // other code: + // + // 60 10 - push 16 + // 80 - duplicate first stack item + // 60 0c - push 12 + // 60 00 - push 0 + // 39 - copy current code to memory + // 60 00 - push 0 + // f3 - return - let code = "7c601080600c6000396000f3006000355415600957005b60203560003555600052601d60036017f0600055".from_hex().unwrap(); + let code = "7c601080600c6000396000f3006000355415600957005b60203560003555600052601d60036017f0600055".from_hex().unwrap(); - let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); - let address = contract_address(CreateContractAddress::FromSenderAndNonce, &sender, &U256::zero(), &[]).0; - // TODO: add tests for 'callcreate' - //let next_address = contract_address(&address, &U256::zero()); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.code_address = address.clone(); - params.sender = sender.clone(); - params.origin = sender.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.value = ActionValue::Transfer(U256::from(100)); - params.call_type = CallType::Call; - let mut state = get_temp_state(); - state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); - let info = EnvInfo::default(); - let machine = make_frontier_machine(5); - let schedule = machine.schedule(info.number); - let mut substate = Substate::new(); - let mut tracer = ExecutiveTracer::default(); - let mut vm_tracer = ExecutiveVMTracer::toplevel(); + let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); + let address = contract_address( + CreateContractAddress::FromSenderAndNonce, + &sender, + &U256::zero(), + &[], + ) + .0; + // TODO: add tests for 'callcreate' + //let next_address = contract_address(&address, &U256::zero()); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.code_address = address.clone(); + params.sender = sender.clone(); + params.origin = sender.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + params.value = ActionValue::Transfer(U256::from(100)); + params.call_type = CallType::Call; + let mut state = get_temp_state(); + state + .add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty) + .unwrap(); + let info = EnvInfo::default(); + let machine = make_frontier_machine(5); + let schedule = machine.schedule(info.number); + let mut substate = Substate::new(); + let mut tracer = ExecutiveTracer::default(); + let mut vm_tracer = ExecutiveVMTracer::toplevel(); - let FinalizationResult { gas_left, .. } = { - let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.call(params, &mut substate, &mut tracer, &mut vm_tracer).unwrap() - }; + let FinalizationResult { gas_left, .. } = { + let mut ex = Executive::new(&mut state, &info, &machine, &schedule); + ex.call(params, &mut substate, &mut tracer, &mut vm_tracer) + .unwrap() + }; - assert_eq!(gas_left, U256::from(44_752)); + assert_eq!(gas_left, U256::from(44_752)); - let expected_trace = vec![FlatTrace { - trace_address: Default::default(), - subtraces: 1, - action: trace::Action::Call(trace::Call { - from: "cd1722f3947def4cf144679da39c4c32bdc35681".into(), - to: "b010143a42d5980c7e5ef0e4a4416dc098a4fed3".into(), - value: 100.into(), - gas: 100000.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(55_248), - output: vec![], - }), - }, FlatTrace { - trace_address: vec![0].into_iter().collect(), - subtraces: 0, - action: trace::Action::Create(trace::Create { - from: "b010143a42d5980c7e5ef0e4a4416dc098a4fed3".into(), - value: 23.into(), - gas: 67979.into(), - init: vec![96, 16, 128, 96, 12, 96, 0, 57, 96, 0, 243, 0, 96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53, 85] - }), - result: trace::Res::Create(trace::CreateResult { - gas_used: U256::from(3224), - address: Address::from_str("c6d80f262ae5e0f164e5fde365044d7ada2bfa34").unwrap(), - code: vec![96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53] - }), - }]; + let expected_trace = vec![ + FlatTrace { + trace_address: Default::default(), + subtraces: 1, + action: trace::Action::Call(trace::Call { + from: "cd1722f3947def4cf144679da39c4c32bdc35681".into(), + to: "b010143a42d5980c7e5ef0e4a4416dc098a4fed3".into(), + value: 100.into(), + gas: 100000.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(55_248), + output: vec![], + }), + }, + FlatTrace { + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + action: trace::Action::Create(trace::Create { + from: "b010143a42d5980c7e5ef0e4a4416dc098a4fed3".into(), + value: 23.into(), + gas: 67979.into(), + init: vec![ + 96, 16, 128, 96, 12, 96, 0, 57, 96, 0, 243, 0, 96, 0, 53, 84, 21, 96, 9, + 87, 0, 91, 96, 32, 53, 96, 0, 53, 85, + ], + }), + result: trace::Res::Create(trace::CreateResult { + gas_used: U256::from(3224), + address: Address::from_str("c6d80f262ae5e0f164e5fde365044d7ada2bfa34").unwrap(), + code: vec![96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53], + }), + }, + ]; - assert_eq!(tracer.drain(), expected_trace); + assert_eq!(tracer.drain(), expected_trace); - let expected_vm_trace = VMTrace { + let expected_vm_trace = VMTrace { parent_step: 0, code: vec![124, 96, 16, 128, 96, 12, 96, 0, 57, 96, 0, 243, 0, 96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53, 85, 96, 0, 82, 96, 29, 96, 3, 96, 23, 240, 96, 0, 85], operations: vec![ @@ -1496,704 +1964,986 @@ mod tests { } ] }; - assert_eq!(vm_tracer.drain().unwrap(), expected_vm_trace); - } - - #[test] - fn test_trace_reverted_create() { - // code: - // - // 65 60016000fd - push 5 bytes - // 60 00 - push 0 - // 52 mstore - // 60 05 - push 5 - // 60 1b - push 27 - // 60 17 - push 23 - // f0 - create - // 60 00 - push 0 - // 55 sstore - // - // other code: - // - // 60 01 - // 60 00 - // fd - revert - - let code = "6460016000fd6000526005601b6017f0600055".from_hex().unwrap(); - - let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); - let address = contract_address(CreateContractAddress::FromSenderAndNonce, &sender, &U256::zero(), &[]).0; - let mut params = ActionParams::default(); - params.address = address.clone(); - params.code_address = address.clone(); - params.sender = sender.clone(); - params.origin = sender.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.value = ActionValue::Transfer(U256::from(100)); - params.call_type = CallType::Call; - let mut state = get_temp_state(); - state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); - let info = EnvInfo::default(); - let machine = ::ethereum::new_byzantium_test_machine(); - let schedule = machine.schedule(info.number); - let mut substate = Substate::new(); - let mut tracer = ExecutiveTracer::default(); - let mut vm_tracer = ExecutiveVMTracer::toplevel(); - - let FinalizationResult { gas_left, .. } = { - let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.call(params, &mut substate, &mut tracer, &mut vm_tracer).unwrap() - }; - - assert_eq!(gas_left, U256::from(62967)); - - let expected_trace = vec![FlatTrace { - trace_address: Default::default(), - subtraces: 1, - action: trace::Action::Call(trace::Call { - from: "cd1722f3947def4cf144679da39c4c32bdc35681".into(), - to: "b010143a42d5980c7e5ef0e4a4416dc098a4fed3".into(), - value: 100.into(), - gas: 100_000.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(37_033), - output: vec![], - }), - }, FlatTrace { - trace_address: vec![0].into_iter().collect(), - subtraces: 0, - action: trace::Action::Create(trace::Create { - from: "b010143a42d5980c7e5ef0e4a4416dc098a4fed3".into(), - value: 23.into(), - gas: 66_917.into(), - init: vec![0x60, 0x01, 0x60, 0x00, 0xfd] - }), - result: trace::Res::FailedCreate(vm::Error::Reverted.into()), - }]; - - assert_eq!(tracer.drain(), expected_trace); - } - - #[test] - fn test_create_contract() { - // Tracing is not supported in JIT - // code: - // - // 60 10 - push 16 - // 80 - duplicate first stack item - // 60 0c - push 12 - // 60 00 - push 0 - // 39 - copy current code to memory - // 60 00 - push 0 - // f3 - return - - let code = "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(); - - let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); - let address = contract_address(CreateContractAddress::FromSenderAndNonce, &sender, &U256::zero(), &[]).0; - // TODO: add tests for 'callcreate' - //let next_address = contract_address(&address, &U256::zero()); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.sender = sender.clone(); - params.origin = sender.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.value = ActionValue::Transfer(100.into()); - let mut state = get_temp_state(); - state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); - let info = EnvInfo::default(); - let machine = make_frontier_machine(5); - let schedule = machine.schedule(info.number); - let mut substate = Substate::new(); - let mut tracer = ExecutiveTracer::default(); - let mut vm_tracer = ExecutiveVMTracer::toplevel(); - - let FinalizationResult { gas_left, .. } = { - let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.create(params.clone(), &mut substate, &mut tracer, &mut vm_tracer).unwrap() - }; - - assert_eq!(gas_left, U256::from(96_776)); - - let expected_trace = vec![FlatTrace { - trace_address: Default::default(), - subtraces: 0, - action: trace::Action::Create(trace::Create { - from: params.sender, - value: 100.into(), - gas: params.gas, - init: vec![96, 16, 128, 96, 12, 96, 0, 57, 96, 0, 243, 0, 96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53, 85], - }), - result: trace::Res::Create(trace::CreateResult { - gas_used: U256::from(3224), - address: params.address, - code: vec![96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53] - }), - }]; - - assert_eq!(tracer.drain(), expected_trace); - - let expected_vm_trace = VMTrace { - parent_step: 0, - code: vec![96, 16, 128, 96, 12, 96, 0, 57, 96, 0, 243, 0, 96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53, 85], - operations: vec![ - VMOperation { pc: 0, instruction: 96, gas_cost: 3.into(), executed: Some(VMExecutedOperation { gas_used: 99997.into(), stack_push: vec_into![16], mem_diff: None, store_diff: None }) }, - VMOperation { pc: 2, instruction: 128, gas_cost: 3.into(), executed: Some(VMExecutedOperation { gas_used: 99994.into(), stack_push: vec_into![16, 16], mem_diff: None, store_diff: None }) }, - VMOperation { pc: 3, instruction: 96, gas_cost: 3.into(), executed: Some(VMExecutedOperation { gas_used: 99991.into(), stack_push: vec_into![12], mem_diff: None, store_diff: None }) }, - VMOperation { pc: 5, instruction: 96, gas_cost: 3.into(), executed: Some(VMExecutedOperation { gas_used: 99988.into(), stack_push: vec_into![0], mem_diff: None, store_diff: None }) }, - VMOperation { pc: 7, instruction: 57, gas_cost: 9.into(), executed: Some(VMExecutedOperation { gas_used: 99979.into(), stack_push: vec_into![], mem_diff: Some(MemoryDiff { offset: 0, data: vec![96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53] }), store_diff: None }) }, - VMOperation { pc: 8, instruction: 96, gas_cost: 3.into(), executed: Some(VMExecutedOperation { gas_used: 99976.into(), stack_push: vec_into![0], mem_diff: None, store_diff: None }) }, - VMOperation { pc: 10, instruction: 243, gas_cost: 0.into(), executed: Some(VMExecutedOperation { gas_used: 99976.into(), stack_push: vec_into![], mem_diff: None, store_diff: None }) } - ], - subs: vec![] - }; - assert_eq!(vm_tracer.drain().unwrap(), expected_vm_trace); - } - - evm_test!{test_create_contract_value_too_high: test_create_contract_value_too_high_int} - fn test_create_contract_value_too_high(factory: Factory) { - // code: - // - // 7c 601080600c6000396000f3006000355415600957005b60203560003555 - push 29 bytes? - // 60 00 - push 0 - // 52 - // 60 1d - push 29 - // 60 03 - push 3 - // 60 e6 - push 230 - // f0 - create a contract trying to send 230. - // 60 00 - push 0 - // 55 sstore - // - // other code: - // - // 60 10 - push 16 - // 80 - duplicate first stack item - // 60 0c - push 12 - // 60 00 - push 0 - // 39 - copy current code to memory - // 60 00 - push 0 - // f3 - return - - let code = "7c601080600c6000396000f3006000355415600957005b60203560003555600052601d600360e6f0600055".from_hex().unwrap(); - - let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); - let address = contract_address(CreateContractAddress::FromSenderAndNonce, &sender, &U256::zero(), &[]).0; - // TODO: add tests for 'callcreate' - //let next_address = contract_address(&address, &U256::zero()); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.sender = sender.clone(); - params.origin = sender.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.value = ActionValue::Transfer(U256::from(100)); - let mut state = get_temp_state_with_factory(factory); - state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); - let info = EnvInfo::default(); - let machine = make_frontier_machine(0); - let schedule = machine.schedule(info.number); - let mut substate = Substate::new(); - - let FinalizationResult { gas_left, .. } = { - let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).unwrap() - }; - - assert_eq!(gas_left, U256::from(62_976)); - assert_eq!(substate.contracts_created.len(), 0); - } - - evm_test!{test_create_contract_without_max_depth: test_create_contract_without_max_depth_int} - fn test_create_contract_without_max_depth(factory: Factory) { - // code: - // - // 7c 601080600c6000396000f3006000355415600957005b60203560003555 - push 29 bytes? - // 60 00 - push 0 - // 52 - // 60 1d - push 29 - // 60 03 - push 3 - // 60 17 - push 17 - // f0 - create - // 60 00 - push 0 - // 55 sstore - // - // other code: - // - // 60 10 - push 16 - // 80 - duplicate first stack item - // 60 0c - push 12 - // 60 00 - push 0 - // 39 - copy current code to memory - // 60 00 - push 0 - // f3 - return - - let code = "7c601080600c6000396000f3006000355415600957005b60203560003555600052601d60036017f0".from_hex().unwrap(); - - let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); - let address = contract_address(CreateContractAddress::FromSenderAndNonce, &sender, &U256::zero(), &[]).0; - let next_address = contract_address(CreateContractAddress::FromSenderAndNonce, &address, &U256::zero(), &[]).0; - let mut params = ActionParams::default(); - params.address = address.clone(); - params.sender = sender.clone(); - params.origin = sender.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.value = ActionValue::Transfer(U256::from(100)); - let mut state = get_temp_state_with_factory(factory); - state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); - let info = EnvInfo::default(); - let machine = make_frontier_machine(1024); - let schedule = machine.schedule(info.number); - let mut substate = Substate::new(); - - { - let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).unwrap(); - } - - assert_eq!(substate.contracts_created.len(), 1); - assert_eq!(substate.contracts_created[0], next_address); - } - - // test is incorrect, mk - // TODO: fix (preferred) or remove - evm_test_ignore!{test_aba_calls: test_aba_calls_int} - fn test_aba_calls(factory: Factory) { - // 60 00 - push 0 - // 60 00 - push 0 - // 60 00 - push 0 - // 60 00 - push 0 - // 60 18 - push 18 - // 73 945304eb96065b2a98b57a48a06ae28d285a71b5 - push this address - // 61 03e8 - push 1000 - // f1 - message call - // 58 - get PC - // 55 - sstore - - let code_a = "6000600060006000601873945304eb96065b2a98b57a48a06ae28d285a71b56103e8f15855".from_hex().unwrap(); - - // 60 00 - push 0 - // 60 00 - push 0 - // 60 00 - push 0 - // 60 00 - push 0 - // 60 17 - push 17 - // 73 0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6 - push this address - // 61 0x01f4 - push 500 - // f1 - message call - // 60 01 - push 1 - // 01 - add - // 58 - get PC - // 55 - sstore - let code_b = "60006000600060006017730f572e5295c57f15886f9b263e2f6d2d6c7b5ec66101f4f16001015855".from_hex().unwrap(); - - let address_a = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let address_b = Address::from_str("945304eb96065b2a98b57a48a06ae28d285a71b5" ).unwrap(); - let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); - - let mut params = ActionParams::default(); - params.address = address_a.clone(); - params.sender = sender.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code_a.clone())); - params.value = ActionValue::Transfer(U256::from(100_000)); - - let mut state = get_temp_state_with_factory(factory); - state.init_code(&address_a, code_a.clone()).unwrap(); - state.init_code(&address_b, code_b.clone()).unwrap(); - state.add_balance(&sender, &U256::from(100_000), CleanupMode::NoEmpty).unwrap(); - - let info = EnvInfo::default(); - let machine = make_frontier_machine(0); - let schedule = machine.schedule(info.number); - let mut substate = Substate::new(); - - let FinalizationResult { gas_left, .. } = { - let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).unwrap() - }; - - assert_eq!(gas_left, U256::from(73_237)); - assert_eq!(state.storage_at(&address_a, &H256::from(&U256::from(0x23))).unwrap(), H256::from(&U256::from(1))); - } - - // test is incorrect, mk - // TODO: fix (preferred) or remove - evm_test_ignore!{test_recursive_bomb1: test_recursive_bomb1_int} - fn test_recursive_bomb1(factory: Factory) { - // 60 01 - push 1 - // 60 00 - push 0 - // 54 - sload - // 01 - add - // 60 00 - push 0 - // 55 - sstore - // 60 00 - push 0 - // 60 00 - push 0 - // 60 00 - push 0 - // 60 00 - push 0 - // 60 00 - push 0 - // 30 - load address - // 60 e0 - push e0 - // 5a - get gas - // 03 - sub - // f1 - message call (self in this case) - // 60 01 - push 1 - // 55 - sstore - let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); - let code = "600160005401600055600060006000600060003060e05a03f1600155".from_hex().unwrap(); - let address = contract_address(CreateContractAddress::FromSenderAndNonce, &sender, &U256::zero(), &[]).0; - let mut params = ActionParams::default(); - params.address = address.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code.clone())); - let mut state = get_temp_state_with_factory(factory); - state.init_code(&address, code).unwrap(); - let info = EnvInfo::default(); - let machine = make_frontier_machine(0); - let schedule = machine.schedule(info.number); - let mut substate = Substate::new(); - - let FinalizationResult { gas_left, .. } = { - let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).unwrap() - }; - - assert_eq!(gas_left, U256::from(59_870)); - assert_eq!(state.storage_at(&address, &H256::from(&U256::zero())).unwrap(), H256::from(&U256::from(1))); - assert_eq!(state.storage_at(&address, &H256::from(&U256::one())).unwrap(), H256::from(&U256::from(1))); - } - - // test is incorrect, mk - // TODO: fix (preferred) or remove - evm_test_ignore!{test_transact_simple: test_transact_simple_int} - fn test_transact_simple(factory: Factory) { - let keypair = Random.generate().unwrap(); - let t = Transaction { - action: Action::Create, - value: U256::from(17), - data: "3331600055".from_hex().unwrap(), - gas: U256::from(100_000), - gas_price: U256::zero(), - nonce: U256::zero() - }.sign(keypair.secret(), None); - let sender = t.sender(); - let contract = contract_address(CreateContractAddress::FromSenderAndNonce, &sender, &U256::zero(), &[]).0; - - let mut state = get_temp_state_with_factory(factory); - state.add_balance(&sender, &U256::from(18), CleanupMode::NoEmpty).unwrap(); - let mut info = EnvInfo::default(); - info.gas_limit = U256::from(100_000); - let machine = make_frontier_machine(0); - let schedule = machine.schedule(info.number); - - let executed = { - let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - let opts = TransactOptions::with_no_tracing(); - ex.transact(&t, opts).unwrap() - }; - - assert_eq!(executed.gas, U256::from(100_000)); - assert_eq!(executed.gas_used, U256::from(41_301)); - assert_eq!(executed.refunded, U256::from(58_699)); - assert_eq!(executed.cumulative_gas_used, U256::from(41_301)); - assert_eq!(executed.logs.len(), 0); - assert_eq!(executed.contracts_created.len(), 0); - assert_eq!(state.balance(&sender).unwrap(), U256::from(1)); - assert_eq!(state.balance(&contract).unwrap(), U256::from(17)); - assert_eq!(state.nonce(&sender).unwrap(), U256::from(1)); - assert_eq!(state.storage_at(&contract, &H256::new()).unwrap(), H256::from(&U256::from(1))); - } - - evm_test!{test_transact_invalid_nonce: test_transact_invalid_nonce_int} - fn test_transact_invalid_nonce(factory: Factory) { - let keypair = Random.generate().unwrap(); - let t = Transaction { - action: Action::Create, - value: U256::from(17), - data: "3331600055".from_hex().unwrap(), - gas: U256::from(100_000), - gas_price: U256::zero(), - nonce: U256::one() - }.sign(keypair.secret(), None); - let sender = t.sender(); - - let mut state = get_temp_state_with_factory(factory); - state.add_balance(&sender, &U256::from(17), CleanupMode::NoEmpty).unwrap(); - let mut info = EnvInfo::default(); - info.gas_limit = U256::from(100_000); - let machine = make_frontier_machine(0); - let schedule = machine.schedule(info.number); - - let res = { - let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - let opts = TransactOptions::with_no_tracing(); - ex.transact(&t, opts) - }; - - match res { - Err(ExecutionError::InvalidNonce { expected, got }) - if expected == U256::zero() && got == U256::one() => (), - _ => assert!(false, "Expected invalid nonce error.") - } - } - - evm_test!{test_transact_gas_limit_reached: test_transact_gas_limit_reached_int} - fn test_transact_gas_limit_reached(factory: Factory) { - let keypair = Random.generate().unwrap(); - let t = Transaction { - action: Action::Create, - value: U256::from(17), - data: "3331600055".from_hex().unwrap(), - gas: U256::from(80_001), - gas_price: U256::zero(), - nonce: U256::zero() - }.sign(keypair.secret(), None); - let sender = t.sender(); - - let mut state = get_temp_state_with_factory(factory); - state.add_balance(&sender, &U256::from(17), CleanupMode::NoEmpty).unwrap(); - let mut info = EnvInfo::default(); - info.gas_used = U256::from(20_000); - info.gas_limit = U256::from(100_000); - let machine = make_frontier_machine(0); - let schedule = machine.schedule(info.number); - - let res = { - let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - let opts = TransactOptions::with_no_tracing(); - ex.transact(&t, opts) - }; - - match res { - Err(ExecutionError::BlockGasLimitReached { gas_limit, gas_used, gas }) - if gas_limit == U256::from(100_000) && gas_used == U256::from(20_000) && gas == U256::from(80_001) => (), - _ => assert!(false, "Expected block gas limit error.") - } - } - - evm_test!{test_not_enough_cash: test_not_enough_cash_int} - fn test_not_enough_cash(factory: Factory) { - - let keypair = Random.generate().unwrap(); - let t = Transaction { - action: Action::Create, - value: U256::from(18), - data: "3331600055".from_hex().unwrap(), - gas: U256::from(100_000), - gas_price: U256::one(), - nonce: U256::zero() - }.sign(keypair.secret(), None); - let sender = t.sender(); - - let mut state = get_temp_state_with_factory(factory); - state.add_balance(&sender, &U256::from(100_017), CleanupMode::NoEmpty).unwrap(); - let mut info = EnvInfo::default(); - info.gas_limit = U256::from(100_000); - let machine = make_frontier_machine(0); - let schedule = machine.schedule(info.number); - - let res = { - let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - let opts = TransactOptions::with_no_tracing(); - ex.transact(&t, opts) - }; - - match res { - Err(ExecutionError::NotEnoughCash { required , got }) - if required == U512::from(100_018) && got == U512::from(100_017) => (), - _ => assert!(false, "Expected not enough cash error. {:?}", res) - } - } - - evm_test!{test_keccak: test_keccak_int} - fn test_keccak(factory: Factory) { - let code = "6064640fffffffff20600055".from_hex().unwrap(); - - let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let address = contract_address(CreateContractAddress::FromSenderAndNonce, &sender, &U256::zero(), &[]).0; - // TODO: add tests for 'callcreate' - //let next_address = contract_address(&address, &U256::zero()); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.sender = sender.clone(); - params.origin = sender.clone(); - params.gas = U256::from(0x0186a0); - params.code = Some(Arc::new(code)); - params.value = ActionValue::Transfer(U256::from_str("0de0b6b3a7640000").unwrap()); - let mut state = get_temp_state_with_factory(factory); - state.add_balance(&sender, &U256::from_str("152d02c7e14af6800000").unwrap(), CleanupMode::NoEmpty).unwrap(); - let info = EnvInfo::default(); - let machine = make_frontier_machine(0); - let schedule = machine.schedule(info.number); - let mut substate = Substate::new(); - - let result = { - let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) - }; - - match result { - Err(_) => {}, - _ => panic!("Expected OutOfGas"), - } - } - - evm_test!{test_revert: test_revert_int} - fn test_revert(factory: Factory) { - let contract_address = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); - let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - // EIP-140 test case - let code = "6c726576657274656420646174616000557f726576657274206d657373616765000000000000000000000000000000000000600052600e6000fd".from_hex().unwrap(); - let returns = "726576657274206d657373616765".from_hex().unwrap(); - let mut state = get_temp_state_with_factory(factory.clone()); - state.add_balance(&sender, &U256::from_str("152d02c7e14af68000000").unwrap(), CleanupMode::NoEmpty).unwrap(); - state.commit().unwrap(); - - let mut params = ActionParams::default(); - params.address = contract_address.clone(); - params.sender = sender.clone(); - params.origin = sender.clone(); - params.gas = U256::from(20025); - params.code = Some(Arc::new(code)); - params.value = ActionValue::Transfer(U256::zero()); - let info = EnvInfo::default(); - let machine = ::ethereum::new_byzantium_test_machine(); - let schedule = machine.schedule(info.number); - let mut substate = Substate::new(); - - let mut output = [0u8; 14]; - let FinalizationResult { gas_left: result, return_data, .. } = { - let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).unwrap() - }; - (&mut output).copy_from_slice(&return_data[..(cmp::min(14, return_data.len()))]); - - assert_eq!(result, U256::from(1)); - assert_eq!(output[..], returns[..]); - assert_eq!(state.storage_at(&contract_address, &H256::from(&U256::zero())).unwrap(), H256::from(&U256::from(0))); - } - - evm_test!{test_eip1283: test_eip1283_int} - fn test_eip1283(factory: Factory) { - let x1 = Address::from(0x1000); - let x2 = Address::from(0x1001); - let y1 = Address::from(0x2001); - let y2 = Address::from(0x2002); - let operating_address = Address::from(0); - let k = H256::new(); - - let mut state = get_temp_state_with_factory(factory.clone()); - state.new_contract(&x1, U256::zero(), U256::from(1)).unwrap(); - state.init_code(&x1, "600160005560006000556001600055".from_hex().unwrap()).unwrap(); - state.new_contract(&x2, U256::zero(), U256::from(1)).unwrap(); - state.init_code(&x2, "600060005560016000556000600055".from_hex().unwrap()).unwrap(); - state.new_contract(&y1, U256::zero(), U256::from(1)).unwrap(); - state.init_code(&y1, "600060006000600061100062fffffff4".from_hex().unwrap()).unwrap(); - state.new_contract(&y2, U256::zero(), U256::from(1)).unwrap(); - state.init_code(&y2, "600060006000600061100162fffffff4".from_hex().unwrap()).unwrap(); - - let info = EnvInfo::default(); - let machine = ::ethereum::new_constantinople_test_machine(); - let schedule = machine.schedule(info.number); - - assert_eq!(state.storage_at(&operating_address, &k).unwrap(), H256::from(U256::from(0))); - // Test a call via top-level -> y1 -> x1 - let (FinalizationResult { gas_left, .. }, refund, gas) = { - let gas = U256::from(0xffffffffffu64); - let mut params = ActionParams::default(); - params.code = Some(Arc::new("6001600055600060006000600061200163fffffffff4".from_hex().unwrap())); - params.gas = gas; - let mut substate = Substate::new(); - let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - let res = ex.call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).unwrap(); - - (res, substate.sstore_clears_refund, gas) - }; - let gas_used = gas - gas_left; - // sstore: 0 -> (1) -> () -> (1 -> 0 -> 1) - assert_eq!(gas_used, U256::from(41860)); - assert_eq!(refund, 19800); - - assert_eq!(state.storage_at(&operating_address, &k).unwrap(), H256::from(U256::from(1))); - // Test a call via top-level -> y2 -> x2 - let (FinalizationResult { gas_left, .. }, refund, gas) = { - let gas = U256::from(0xffffffffffu64); - let mut params = ActionParams::default(); - params.code = Some(Arc::new("6001600055600060006000600061200263fffffffff4".from_hex().unwrap())); - params.gas = gas; - let mut substate = Substate::new(); - let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - let res = ex.call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).unwrap(); - - (res, substate.sstore_clears_refund, gas) - }; - let gas_used = gas - gas_left; - // sstore: 1 -> (1) -> () -> (0 -> 1 -> 0) - assert_eq!(gas_used, U256::from(11860)); - assert_eq!(refund, 19800); - } - - fn wasm_sample_code() -> Arc> { - Arc::new( + assert_eq!(vm_tracer.drain().unwrap(), expected_vm_trace); + } + + #[test] + fn test_trace_reverted_create() { + // code: + // + // 65 60016000fd - push 5 bytes + // 60 00 - push 0 + // 52 mstore + // 60 05 - push 5 + // 60 1b - push 27 + // 60 17 - push 23 + // f0 - create + // 60 00 - push 0 + // 55 sstore + // + // other code: + // + // 60 01 + // 60 00 + // fd - revert + + let code = "6460016000fd6000526005601b6017f0600055".from_hex().unwrap(); + + let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); + let address = contract_address( + CreateContractAddress::FromSenderAndNonce, + &sender, + &U256::zero(), + &[], + ) + .0; + let mut params = ActionParams::default(); + params.address = address.clone(); + params.code_address = address.clone(); + params.sender = sender.clone(); + params.origin = sender.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + params.value = ActionValue::Transfer(U256::from(100)); + params.call_type = CallType::Call; + let mut state = get_temp_state(); + state + .add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty) + .unwrap(); + let info = EnvInfo::default(); + let machine = ::ethereum::new_byzantium_test_machine(); + let schedule = machine.schedule(info.number); + let mut substate = Substate::new(); + let mut tracer = ExecutiveTracer::default(); + let mut vm_tracer = ExecutiveVMTracer::toplevel(); + + let FinalizationResult { gas_left, .. } = { + let mut ex = Executive::new(&mut state, &info, &machine, &schedule); + ex.call(params, &mut substate, &mut tracer, &mut vm_tracer) + .unwrap() + }; + + assert_eq!(gas_left, U256::from(62967)); + + let expected_trace = vec![ + FlatTrace { + trace_address: Default::default(), + subtraces: 1, + action: trace::Action::Call(trace::Call { + from: "cd1722f3947def4cf144679da39c4c32bdc35681".into(), + to: "b010143a42d5980c7e5ef0e4a4416dc098a4fed3".into(), + value: 100.into(), + gas: 100_000.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(37_033), + output: vec![], + }), + }, + FlatTrace { + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + action: trace::Action::Create(trace::Create { + from: "b010143a42d5980c7e5ef0e4a4416dc098a4fed3".into(), + value: 23.into(), + gas: 66_917.into(), + init: vec![0x60, 0x01, 0x60, 0x00, 0xfd], + }), + result: trace::Res::FailedCreate(vm::Error::Reverted.into()), + }, + ]; + + assert_eq!(tracer.drain(), expected_trace); + } + + #[test] + fn test_create_contract() { + // Tracing is not supported in JIT + // code: + // + // 60 10 - push 16 + // 80 - duplicate first stack item + // 60 0c - push 12 + // 60 00 - push 0 + // 39 - copy current code to memory + // 60 00 - push 0 + // f3 - return + + let code = "601080600c6000396000f3006000355415600957005b60203560003555" + .from_hex() + .unwrap(); + + let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); + let address = contract_address( + CreateContractAddress::FromSenderAndNonce, + &sender, + &U256::zero(), + &[], + ) + .0; + // TODO: add tests for 'callcreate' + //let next_address = contract_address(&address, &U256::zero()); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.sender = sender.clone(); + params.origin = sender.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + params.value = ActionValue::Transfer(100.into()); + let mut state = get_temp_state(); + state + .add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty) + .unwrap(); + let info = EnvInfo::default(); + let machine = make_frontier_machine(5); + let schedule = machine.schedule(info.number); + let mut substate = Substate::new(); + let mut tracer = ExecutiveTracer::default(); + let mut vm_tracer = ExecutiveVMTracer::toplevel(); + + let FinalizationResult { gas_left, .. } = { + let mut ex = Executive::new(&mut state, &info, &machine, &schedule); + ex.create(params.clone(), &mut substate, &mut tracer, &mut vm_tracer) + .unwrap() + }; + + assert_eq!(gas_left, U256::from(96_776)); + + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + subtraces: 0, + action: trace::Action::Create(trace::Create { + from: params.sender, + value: 100.into(), + gas: params.gas, + init: vec![ + 96, 16, 128, 96, 12, 96, 0, 57, 96, 0, 243, 0, 96, 0, 53, 84, 21, 96, 9, 87, 0, + 91, 96, 32, 53, 96, 0, 53, 85, + ], + }), + result: trace::Res::Create(trace::CreateResult { + gas_used: U256::from(3224), + address: params.address, + code: vec![96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53], + }), + }]; + + assert_eq!(tracer.drain(), expected_trace); + + let expected_vm_trace = VMTrace { + parent_step: 0, + code: vec![ + 96, 16, 128, 96, 12, 96, 0, 57, 96, 0, 243, 0, 96, 0, 53, 84, 21, 96, 9, 87, 0, 91, + 96, 32, 53, 96, 0, 53, 85, + ], + operations: vec![ + VMOperation { + pc: 0, + instruction: 96, + gas_cost: 3.into(), + executed: Some(VMExecutedOperation { + gas_used: 99997.into(), + stack_push: vec_into![16], + mem_diff: None, + store_diff: None, + }), + }, + VMOperation { + pc: 2, + instruction: 128, + gas_cost: 3.into(), + executed: Some(VMExecutedOperation { + gas_used: 99994.into(), + stack_push: vec_into![16, 16], + mem_diff: None, + store_diff: None, + }), + }, + VMOperation { + pc: 3, + instruction: 96, + gas_cost: 3.into(), + executed: Some(VMExecutedOperation { + gas_used: 99991.into(), + stack_push: vec_into![12], + mem_diff: None, + store_diff: None, + }), + }, + VMOperation { + pc: 5, + instruction: 96, + gas_cost: 3.into(), + executed: Some(VMExecutedOperation { + gas_used: 99988.into(), + stack_push: vec_into![0], + mem_diff: None, + store_diff: None, + }), + }, + VMOperation { + pc: 7, + instruction: 57, + gas_cost: 9.into(), + executed: Some(VMExecutedOperation { + gas_used: 99979.into(), + stack_push: vec_into![], + mem_diff: Some(MemoryDiff { + offset: 0, + data: vec![96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53], + }), + store_diff: None, + }), + }, + VMOperation { + pc: 8, + instruction: 96, + gas_cost: 3.into(), + executed: Some(VMExecutedOperation { + gas_used: 99976.into(), + stack_push: vec_into![0], + mem_diff: None, + store_diff: None, + }), + }, + VMOperation { + pc: 10, + instruction: 243, + gas_cost: 0.into(), + executed: Some(VMExecutedOperation { + gas_used: 99976.into(), + stack_push: vec_into![], + mem_diff: None, + store_diff: None, + }), + }, + ], + subs: vec![], + }; + assert_eq!(vm_tracer.drain().unwrap(), expected_vm_trace); + } + + evm_test! {test_create_contract_value_too_high: test_create_contract_value_too_high_int} + fn test_create_contract_value_too_high(factory: Factory) { + // code: + // + // 7c 601080600c6000396000f3006000355415600957005b60203560003555 - push 29 bytes? + // 60 00 - push 0 + // 52 + // 60 1d - push 29 + // 60 03 - push 3 + // 60 e6 - push 230 + // f0 - create a contract trying to send 230. + // 60 00 - push 0 + // 55 sstore + // + // other code: + // + // 60 10 - push 16 + // 80 - duplicate first stack item + // 60 0c - push 12 + // 60 00 - push 0 + // 39 - copy current code to memory + // 60 00 - push 0 + // f3 - return + + let code = "7c601080600c6000396000f3006000355415600957005b60203560003555600052601d600360e6f0600055".from_hex().unwrap(); + + let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); + let address = contract_address( + CreateContractAddress::FromSenderAndNonce, + &sender, + &U256::zero(), + &[], + ) + .0; + // TODO: add tests for 'callcreate' + //let next_address = contract_address(&address, &U256::zero()); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.sender = sender.clone(); + params.origin = sender.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + params.value = ActionValue::Transfer(U256::from(100)); + let mut state = get_temp_state_with_factory(factory); + state + .add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty) + .unwrap(); + let info = EnvInfo::default(); + let machine = make_frontier_machine(0); + let schedule = machine.schedule(info.number); + let mut substate = Substate::new(); + + let FinalizationResult { gas_left, .. } = { + let mut ex = Executive::new(&mut state, &info, &machine, &schedule); + ex.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) + .unwrap() + }; + + assert_eq!(gas_left, U256::from(62_976)); + assert_eq!(substate.contracts_created.len(), 0); + } + + evm_test! {test_create_contract_without_max_depth: test_create_contract_without_max_depth_int} + fn test_create_contract_without_max_depth(factory: Factory) { + // code: + // + // 7c 601080600c6000396000f3006000355415600957005b60203560003555 - push 29 bytes? + // 60 00 - push 0 + // 52 + // 60 1d - push 29 + // 60 03 - push 3 + // 60 17 - push 17 + // f0 - create + // 60 00 - push 0 + // 55 sstore + // + // other code: + // + // 60 10 - push 16 + // 80 - duplicate first stack item + // 60 0c - push 12 + // 60 00 - push 0 + // 39 - copy current code to memory + // 60 00 - push 0 + // f3 - return + + let code = + "7c601080600c6000396000f3006000355415600957005b60203560003555600052601d60036017f0" + .from_hex() + .unwrap(); + + let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); + let address = contract_address( + CreateContractAddress::FromSenderAndNonce, + &sender, + &U256::zero(), + &[], + ) + .0; + let next_address = contract_address( + CreateContractAddress::FromSenderAndNonce, + &address, + &U256::zero(), + &[], + ) + .0; + let mut params = ActionParams::default(); + params.address = address.clone(); + params.sender = sender.clone(); + params.origin = sender.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + params.value = ActionValue::Transfer(U256::from(100)); + let mut state = get_temp_state_with_factory(factory); + state + .add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty) + .unwrap(); + let info = EnvInfo::default(); + let machine = make_frontier_machine(1024); + let schedule = machine.schedule(info.number); + let mut substate = Substate::new(); + + { + let mut ex = Executive::new(&mut state, &info, &machine, &schedule); + ex.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) + .unwrap(); + } + + assert_eq!(substate.contracts_created.len(), 1); + assert_eq!(substate.contracts_created[0], next_address); + } + + // test is incorrect, mk + // TODO: fix (preferred) or remove + evm_test_ignore! {test_aba_calls: test_aba_calls_int} + fn test_aba_calls(factory: Factory) { + // 60 00 - push 0 + // 60 00 - push 0 + // 60 00 - push 0 + // 60 00 - push 0 + // 60 18 - push 18 + // 73 945304eb96065b2a98b57a48a06ae28d285a71b5 - push this address + // 61 03e8 - push 1000 + // f1 - message call + // 58 - get PC + // 55 - sstore + + let code_a = "6000600060006000601873945304eb96065b2a98b57a48a06ae28d285a71b56103e8f15855" + .from_hex() + .unwrap(); + + // 60 00 - push 0 + // 60 00 - push 0 + // 60 00 - push 0 + // 60 00 - push 0 + // 60 17 - push 17 + // 73 0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6 - push this address + // 61 0x01f4 - push 500 + // f1 - message call + // 60 01 - push 1 + // 01 - add + // 58 - get PC + // 55 - sstore + let code_b = + "60006000600060006017730f572e5295c57f15886f9b263e2f6d2d6c7b5ec66101f4f16001015855" + .from_hex() + .unwrap(); + + let address_a = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + let address_b = Address::from_str("945304eb96065b2a98b57a48a06ae28d285a71b5").unwrap(); + let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); + + let mut params = ActionParams::default(); + params.address = address_a.clone(); + params.sender = sender.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code_a.clone())); + params.value = ActionValue::Transfer(U256::from(100_000)); + + let mut state = get_temp_state_with_factory(factory); + state.init_code(&address_a, code_a.clone()).unwrap(); + state.init_code(&address_b, code_b.clone()).unwrap(); + state + .add_balance(&sender, &U256::from(100_000), CleanupMode::NoEmpty) + .unwrap(); + + let info = EnvInfo::default(); + let machine = make_frontier_machine(0); + let schedule = machine.schedule(info.number); + let mut substate = Substate::new(); + + let FinalizationResult { gas_left, .. } = { + let mut ex = Executive::new(&mut state, &info, &machine, &schedule); + ex.call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) + .unwrap() + }; + + assert_eq!(gas_left, U256::from(73_237)); + assert_eq!( + state + .storage_at(&address_a, &H256::from(&U256::from(0x23))) + .unwrap(), + H256::from(&U256::from(1)) + ); + } + + // test is incorrect, mk + // TODO: fix (preferred) or remove + evm_test_ignore! {test_recursive_bomb1: test_recursive_bomb1_int} + fn test_recursive_bomb1(factory: Factory) { + // 60 01 - push 1 + // 60 00 - push 0 + // 54 - sload + // 01 - add + // 60 00 - push 0 + // 55 - sstore + // 60 00 - push 0 + // 60 00 - push 0 + // 60 00 - push 0 + // 60 00 - push 0 + // 60 00 - push 0 + // 30 - load address + // 60 e0 - push e0 + // 5a - get gas + // 03 - sub + // f1 - message call (self in this case) + // 60 01 - push 1 + // 55 - sstore + let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); + let code = "600160005401600055600060006000600060003060e05a03f1600155" + .from_hex() + .unwrap(); + let address = contract_address( + CreateContractAddress::FromSenderAndNonce, + &sender, + &U256::zero(), + &[], + ) + .0; + let mut params = ActionParams::default(); + params.address = address.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code.clone())); + let mut state = get_temp_state_with_factory(factory); + state.init_code(&address, code).unwrap(); + let info = EnvInfo::default(); + let machine = make_frontier_machine(0); + let schedule = machine.schedule(info.number); + let mut substate = Substate::new(); + + let FinalizationResult { gas_left, .. } = { + let mut ex = Executive::new(&mut state, &info, &machine, &schedule); + ex.call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) + .unwrap() + }; + + assert_eq!(gas_left, U256::from(59_870)); + assert_eq!( + state + .storage_at(&address, &H256::from(&U256::zero())) + .unwrap(), + H256::from(&U256::from(1)) + ); + assert_eq!( + state + .storage_at(&address, &H256::from(&U256::one())) + .unwrap(), + H256::from(&U256::from(1)) + ); + } + + // test is incorrect, mk + // TODO: fix (preferred) or remove + evm_test_ignore! {test_transact_simple: test_transact_simple_int} + fn test_transact_simple(factory: Factory) { + let keypair = Random.generate().unwrap(); + let t = Transaction { + action: Action::Create, + value: U256::from(17), + data: "3331600055".from_hex().unwrap(), + gas: U256::from(100_000), + gas_price: U256::zero(), + nonce: U256::zero(), + } + .sign(keypair.secret(), None); + let sender = t.sender(); + let contract = contract_address( + CreateContractAddress::FromSenderAndNonce, + &sender, + &U256::zero(), + &[], + ) + .0; + + let mut state = get_temp_state_with_factory(factory); + state + .add_balance(&sender, &U256::from(18), CleanupMode::NoEmpty) + .unwrap(); + let mut info = EnvInfo::default(); + info.gas_limit = U256::from(100_000); + let machine = make_frontier_machine(0); + let schedule = machine.schedule(info.number); + + let executed = { + let mut ex = Executive::new(&mut state, &info, &machine, &schedule); + let opts = TransactOptions::with_no_tracing(); + ex.transact(&t, opts).unwrap() + }; + + assert_eq!(executed.gas, U256::from(100_000)); + assert_eq!(executed.gas_used, U256::from(41_301)); + assert_eq!(executed.refunded, U256::from(58_699)); + assert_eq!(executed.cumulative_gas_used, U256::from(41_301)); + assert_eq!(executed.logs.len(), 0); + assert_eq!(executed.contracts_created.len(), 0); + assert_eq!(state.balance(&sender).unwrap(), U256::from(1)); + assert_eq!(state.balance(&contract).unwrap(), U256::from(17)); + assert_eq!(state.nonce(&sender).unwrap(), U256::from(1)); + assert_eq!( + state.storage_at(&contract, &H256::new()).unwrap(), + H256::from(&U256::from(1)) + ); + } + + evm_test! {test_transact_invalid_nonce: test_transact_invalid_nonce_int} + fn test_transact_invalid_nonce(factory: Factory) { + let keypair = Random.generate().unwrap(); + let t = Transaction { + action: Action::Create, + value: U256::from(17), + data: "3331600055".from_hex().unwrap(), + gas: U256::from(100_000), + gas_price: U256::zero(), + nonce: U256::one(), + } + .sign(keypair.secret(), None); + let sender = t.sender(); + + let mut state = get_temp_state_with_factory(factory); + state + .add_balance(&sender, &U256::from(17), CleanupMode::NoEmpty) + .unwrap(); + let mut info = EnvInfo::default(); + info.gas_limit = U256::from(100_000); + let machine = make_frontier_machine(0); + let schedule = machine.schedule(info.number); + + let res = { + let mut ex = Executive::new(&mut state, &info, &machine, &schedule); + let opts = TransactOptions::with_no_tracing(); + ex.transact(&t, opts) + }; + + match res { + Err(ExecutionError::InvalidNonce { expected, got }) + if expected == U256::zero() && got == U256::one() => + { + () + } + _ => assert!(false, "Expected invalid nonce error."), + } + } + + evm_test! {test_transact_gas_limit_reached: test_transact_gas_limit_reached_int} + fn test_transact_gas_limit_reached(factory: Factory) { + let keypair = Random.generate().unwrap(); + let t = Transaction { + action: Action::Create, + value: U256::from(17), + data: "3331600055".from_hex().unwrap(), + gas: U256::from(80_001), + gas_price: U256::zero(), + nonce: U256::zero(), + } + .sign(keypair.secret(), None); + let sender = t.sender(); + + let mut state = get_temp_state_with_factory(factory); + state + .add_balance(&sender, &U256::from(17), CleanupMode::NoEmpty) + .unwrap(); + let mut info = EnvInfo::default(); + info.gas_used = U256::from(20_000); + info.gas_limit = U256::from(100_000); + let machine = make_frontier_machine(0); + let schedule = machine.schedule(info.number); + + let res = { + let mut ex = Executive::new(&mut state, &info, &machine, &schedule); + let opts = TransactOptions::with_no_tracing(); + ex.transact(&t, opts) + }; + + match res { + Err(ExecutionError::BlockGasLimitReached { + gas_limit, + gas_used, + gas, + }) if gas_limit == U256::from(100_000) + && gas_used == U256::from(20_000) + && gas == U256::from(80_001) => + { + () + } + _ => assert!(false, "Expected block gas limit error."), + } + } + + evm_test! {test_not_enough_cash: test_not_enough_cash_int} + fn test_not_enough_cash(factory: Factory) { + let keypair = Random.generate().unwrap(); + let t = Transaction { + action: Action::Create, + value: U256::from(18), + data: "3331600055".from_hex().unwrap(), + gas: U256::from(100_000), + gas_price: U256::one(), + nonce: U256::zero(), + } + .sign(keypair.secret(), None); + let sender = t.sender(); + + let mut state = get_temp_state_with_factory(factory); + state + .add_balance(&sender, &U256::from(100_017), CleanupMode::NoEmpty) + .unwrap(); + let mut info = EnvInfo::default(); + info.gas_limit = U256::from(100_000); + let machine = make_frontier_machine(0); + let schedule = machine.schedule(info.number); + + let res = { + let mut ex = Executive::new(&mut state, &info, &machine, &schedule); + let opts = TransactOptions::with_no_tracing(); + ex.transact(&t, opts) + }; + + match res { + Err(ExecutionError::NotEnoughCash { required, got }) + if required == U512::from(100_018) && got == U512::from(100_017) => + { + () + } + _ => assert!(false, "Expected not enough cash error. {:?}", res), + } + } + + evm_test! {test_keccak: test_keccak_int} + fn test_keccak(factory: Factory) { + let code = "6064640fffffffff20600055".from_hex().unwrap(); + + let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + let address = contract_address( + CreateContractAddress::FromSenderAndNonce, + &sender, + &U256::zero(), + &[], + ) + .0; + // TODO: add tests for 'callcreate' + //let next_address = contract_address(&address, &U256::zero()); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.sender = sender.clone(); + params.origin = sender.clone(); + params.gas = U256::from(0x0186a0); + params.code = Some(Arc::new(code)); + params.value = ActionValue::Transfer(U256::from_str("0de0b6b3a7640000").unwrap()); + let mut state = get_temp_state_with_factory(factory); + state + .add_balance( + &sender, + &U256::from_str("152d02c7e14af6800000").unwrap(), + CleanupMode::NoEmpty, + ) + .unwrap(); + let info = EnvInfo::default(); + let machine = make_frontier_machine(0); + let schedule = machine.schedule(info.number); + let mut substate = Substate::new(); + + let result = { + let mut ex = Executive::new(&mut state, &info, &machine, &schedule); + ex.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) + }; + + match result { + Err(_) => {} + _ => panic!("Expected OutOfGas"), + } + } + + evm_test! {test_revert: test_revert_int} + fn test_revert(factory: Factory) { + let contract_address = + Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); + let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + // EIP-140 test case + let code = "6c726576657274656420646174616000557f726576657274206d657373616765000000000000000000000000000000000000600052600e6000fd".from_hex().unwrap(); + let returns = "726576657274206d657373616765".from_hex().unwrap(); + let mut state = get_temp_state_with_factory(factory.clone()); + state + .add_balance( + &sender, + &U256::from_str("152d02c7e14af68000000").unwrap(), + CleanupMode::NoEmpty, + ) + .unwrap(); + state.commit().unwrap(); + + let mut params = ActionParams::default(); + params.address = contract_address.clone(); + params.sender = sender.clone(); + params.origin = sender.clone(); + params.gas = U256::from(20025); + params.code = Some(Arc::new(code)); + params.value = ActionValue::Transfer(U256::zero()); + let info = EnvInfo::default(); + let machine = ::ethereum::new_byzantium_test_machine(); + let schedule = machine.schedule(info.number); + let mut substate = Substate::new(); + + let mut output = [0u8; 14]; + let FinalizationResult { + gas_left: result, + return_data, + .. + } = { + let mut ex = Executive::new(&mut state, &info, &machine, &schedule); + ex.call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) + .unwrap() + }; + (&mut output).copy_from_slice(&return_data[..(cmp::min(14, return_data.len()))]); + + assert_eq!(result, U256::from(1)); + assert_eq!(output[..], returns[..]); + assert_eq!( + state + .storage_at(&contract_address, &H256::from(&U256::zero())) + .unwrap(), + H256::from(&U256::from(0)) + ); + } + + evm_test! {test_eip1283: test_eip1283_int} + fn test_eip1283(factory: Factory) { + let x1 = Address::from(0x1000); + let x2 = Address::from(0x1001); + let y1 = Address::from(0x2001); + let y2 = Address::from(0x2002); + let operating_address = Address::from(0); + let k = H256::new(); + + let mut state = get_temp_state_with_factory(factory.clone()); + state + .new_contract(&x1, U256::zero(), U256::from(1)) + .unwrap(); + state + .init_code(&x1, "600160005560006000556001600055".from_hex().unwrap()) + .unwrap(); + state + .new_contract(&x2, U256::zero(), U256::from(1)) + .unwrap(); + state + .init_code(&x2, "600060005560016000556000600055".from_hex().unwrap()) + .unwrap(); + state + .new_contract(&y1, U256::zero(), U256::from(1)) + .unwrap(); + state + .init_code(&y1, "600060006000600061100062fffffff4".from_hex().unwrap()) + .unwrap(); + state + .new_contract(&y2, U256::zero(), U256::from(1)) + .unwrap(); + state + .init_code(&y2, "600060006000600061100162fffffff4".from_hex().unwrap()) + .unwrap(); + + let info = EnvInfo::default(); + let machine = ::ethereum::new_constantinople_test_machine(); + let schedule = machine.schedule(info.number); + + assert_eq!( + state.storage_at(&operating_address, &k).unwrap(), + H256::from(U256::from(0)) + ); + // Test a call via top-level -> y1 -> x1 + let (FinalizationResult { gas_left, .. }, refund, gas) = { + let gas = U256::from(0xffffffffffu64); + let mut params = ActionParams::default(); + params.code = Some(Arc::new( + "6001600055600060006000600061200163fffffffff4" + .from_hex() + .unwrap(), + )); + params.gas = gas; + let mut substate = Substate::new(); + let mut ex = Executive::new(&mut state, &info, &machine, &schedule); + let res = ex + .call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) + .unwrap(); + + (res, substate.sstore_clears_refund, gas) + }; + let gas_used = gas - gas_left; + // sstore: 0 -> (1) -> () -> (1 -> 0 -> 1) + assert_eq!(gas_used, U256::from(41860)); + assert_eq!(refund, 19800); + + assert_eq!( + state.storage_at(&operating_address, &k).unwrap(), + H256::from(U256::from(1)) + ); + // Test a call via top-level -> y2 -> x2 + let (FinalizationResult { gas_left, .. }, refund, gas) = { + let gas = U256::from(0xffffffffffu64); + let mut params = ActionParams::default(); + params.code = Some(Arc::new( + "6001600055600060006000600061200263fffffffff4" + .from_hex() + .unwrap(), + )); + params.gas = gas; + let mut substate = Substate::new(); + let mut ex = Executive::new(&mut state, &info, &machine, &schedule); + let res = ex + .call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) + .unwrap(); + + (res, substate.sstore_clears_refund, gas) + }; + let gas_used = gas - gas_left; + // sstore: 1 -> (1) -> () -> (0 -> 1 -> 0) + assert_eq!(gas_used, U256::from(11860)); + assert_eq!(refund, 19800); + } + + fn wasm_sample_code() -> Arc> { + Arc::new( "0061736d01000000010d0360027f7f0060017f0060000002270303656e7603726574000003656e760673656e646572000103656e76066d656d6f727902010110030201020404017000000501000708010463616c6c00020901000ac10101be0102057f017e4100410028020441c0006b22043602042004412c6a41106a220041003602002004412c6a41086a22014200370200200441186a41106a22024100360200200441186a41086a220342003703002004420037022c2004410036021c20044100360218200441186a1001200020022802002202360200200120032903002205370200200441106a2002360200200441086a200537030020042004290318220537022c200420053703002004411410004100200441c0006a3602040b0b0a010041040b0410c00000" .from_hex() .unwrap() ) - } + } - #[test] - fn wasm_activated_test() { - let contract_address = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); - let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + #[test] + fn wasm_activated_test() { + let contract_address = + Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); + let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let mut state = get_temp_state(); - state.add_balance(&sender, &U256::from(10000000000u64), CleanupMode::NoEmpty).unwrap(); - state.commit().unwrap(); + let mut state = get_temp_state(); + state + .add_balance(&sender, &U256::from(10000000000u64), CleanupMode::NoEmpty) + .unwrap(); + state.commit().unwrap(); - let mut params = ActionParams::default(); - params.origin = sender.clone(); - params.sender = sender.clone(); - params.address = contract_address.clone(); - params.gas = U256::from(20025); - params.code = Some(wasm_sample_code()); + let mut params = ActionParams::default(); + params.origin = sender.clone(); + params.sender = sender.clone(); + params.address = contract_address.clone(); + params.gas = U256::from(20025); + params.code = Some(wasm_sample_code()); - let mut info = EnvInfo::default(); + let mut info = EnvInfo::default(); - // 100 > 10 - info.number = 100; + // 100 > 10 + info.number = 100; - // Network with wasm activated at block 10 - let machine = ::ethereum::new_kovan_wasm_test_machine(); + // Network with wasm activated at block 10 + let machine = ::ethereum::new_kovan_wasm_test_machine(); - let mut output = [0u8; 20]; - let FinalizationResult { gas_left: result, return_data, .. } = { - let schedule = machine.schedule(info.number); - let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.call(params.clone(), &mut Substate::new(), &mut NoopTracer, &mut NoopVMTracer).unwrap() - }; - (&mut output).copy_from_slice(&return_data[..(cmp::min(20, return_data.len()))]); + let mut output = [0u8; 20]; + let FinalizationResult { + gas_left: result, + return_data, + .. + } = { + let schedule = machine.schedule(info.number); + let mut ex = Executive::new(&mut state, &info, &machine, &schedule); + ex.call( + params.clone(), + &mut Substate::new(), + &mut NoopTracer, + &mut NoopVMTracer, + ) + .unwrap() + }; + (&mut output).copy_from_slice(&return_data[..(cmp::min(20, return_data.len()))]); - assert_eq!(result, U256::from(18433)); - // Transaction successfully returned sender - assert_eq!(output[..], sender[..]); + assert_eq!(result, U256::from(18433)); + // Transaction successfully returned sender + assert_eq!(output[..], sender[..]); - // 1 < 10 - info.number = 1; + // 1 < 10 + info.number = 1; - let mut output = [0u8; 20]; - let FinalizationResult { gas_left: result, return_data, .. } = { - let schedule = machine.schedule(info.number); - let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.call(params, &mut Substate::new(), &mut NoopTracer, &mut NoopVMTracer).unwrap() - }; - (&mut output[..((cmp::min(20, return_data.len())))]).copy_from_slice(&return_data[..(cmp::min(20, return_data.len()))]); + let mut output = [0u8; 20]; + let FinalizationResult { + gas_left: result, + return_data, + .. + } = { + let schedule = machine.schedule(info.number); + let mut ex = Executive::new(&mut state, &info, &machine, &schedule); + ex.call( + params, + &mut Substate::new(), + &mut NoopTracer, + &mut NoopVMTracer, + ) + .unwrap() + }; + (&mut output[..(cmp::min(20, return_data.len()))]) + .copy_from_slice(&return_data[..(cmp::min(20, return_data.len()))]); - assert_eq!(result, U256::from(20025)); - // Since transaction errored due to wasm was not activated, result is just empty - assert_eq!(output[..], [0u8; 20][..]); - } + assert_eq!(result, U256::from(20025)); + // Since transaction errored due to wasm was not activated, result is just empty + assert_eq!(output[..], [0u8; 20][..]); + } } diff --git a/ethcore/src/externalities.rs b/ethcore/src/externalities.rs index 27ef05365..7f5603300 100644 --- a/ethcore/src/externalities.rs +++ b/ethcore/src/externalities.rs @@ -15,636 +15,865 @@ // along with Parity Ethereum. If not, see . //! Transaction Execution environment. -use std::cmp; -use std::sync::Arc; -use ethereum_types::{H256, U256, Address}; use bytes::Bytes; -use state::{Backend as StateBackend, State, Substate, CleanupMode}; -use machine::EthereumMachine as Machine; +use ethereum_types::{Address, H256, U256}; use executive::*; -use vm::{ - self, ActionParams, ActionValue, EnvInfo, CallType, Schedule, - Ext, ContractCreateResult, MessageCallResult, CreateContractAddress, - ReturnData, TrapKind -}; -use types::transaction::UNSIGNED_SENDER; +use machine::EthereumMachine as Machine; +use state::{Backend as StateBackend, CleanupMode, State, Substate}; +use std::{cmp, sync::Arc}; use trace::{Tracer, VMTracer}; +use types::transaction::UNSIGNED_SENDER; +use vm::{ + self, ActionParams, ActionValue, CallType, ContractCreateResult, CreateContractAddress, + EnvInfo, Ext, MessageCallResult, ReturnData, Schedule, TrapKind, +}; /// Policy for handling output data on `RETURN` opcode. pub enum OutputPolicy { - /// Return reference to fixed sized output. - /// Used for message calls. - Return, - /// Init new contract as soon as `RETURN` is called. - InitContract, + /// Return reference to fixed sized output. + /// Used for message calls. + Return, + /// Init new contract as soon as `RETURN` is called. + InitContract, } /// Transaction properties that externalities need to know about. pub struct OriginInfo { - address: Address, - origin: Address, - gas_price: U256, - value: U256, + address: Address, + origin: Address, + gas_price: U256, + value: U256, } impl OriginInfo { - /// Populates origin info from action params. - pub fn from(params: &ActionParams) -> Self { - OriginInfo { - address: params.address.clone(), - origin: params.origin.clone(), - gas_price: params.gas_price, - value: match params.value { - ActionValue::Transfer(val) | ActionValue::Apparent(val) => val - }, - } - } + /// Populates origin info from action params. + pub fn from(params: &ActionParams) -> Self { + OriginInfo { + address: params.address.clone(), + origin: params.origin.clone(), + gas_price: params.gas_price, + value: match params.value { + ActionValue::Transfer(val) | ActionValue::Apparent(val) => val, + }, + } + } } /// Implementation of evm Externalities. pub struct Externalities<'a, T: 'a, V: 'a, B: 'a> { - state: &'a mut State, - env_info: &'a EnvInfo, - depth: usize, - stack_depth: usize, - origin_info: &'a OriginInfo, - substate: &'a mut Substate, - machine: &'a Machine, - schedule: &'a Schedule, - output: OutputPolicy, - tracer: &'a mut T, - vm_tracer: &'a mut V, - static_flag: bool, + state: &'a mut State, + env_info: &'a EnvInfo, + depth: usize, + stack_depth: usize, + origin_info: &'a OriginInfo, + substate: &'a mut Substate, + machine: &'a Machine, + schedule: &'a Schedule, + output: OutputPolicy, + tracer: &'a mut T, + vm_tracer: &'a mut V, + static_flag: bool, } impl<'a, T: 'a, V: 'a, B: 'a> Externalities<'a, T, V, B> - where T: Tracer, V: VMTracer, B: StateBackend +where + T: Tracer, + V: VMTracer, + B: StateBackend, { - /// Basic `Externalities` constructor. - pub fn new( - state: &'a mut State, - env_info: &'a EnvInfo, - machine: &'a Machine, - schedule: &'a Schedule, - depth: usize, - stack_depth: usize, - origin_info: &'a OriginInfo, - substate: &'a mut Substate, - output: OutputPolicy, - tracer: &'a mut T, - vm_tracer: &'a mut V, - static_flag: bool, - ) -> Self { - Externalities { - state: state, - env_info: env_info, - depth: depth, - stack_depth: stack_depth, - origin_info: origin_info, - substate: substate, - machine: machine, - schedule: schedule, - output: output, - tracer: tracer, - vm_tracer: vm_tracer, - static_flag: static_flag, - } - } + /// Basic `Externalities` constructor. + pub fn new( + state: &'a mut State, + env_info: &'a EnvInfo, + machine: &'a Machine, + schedule: &'a Schedule, + depth: usize, + stack_depth: usize, + origin_info: &'a OriginInfo, + substate: &'a mut Substate, + output: OutputPolicy, + tracer: &'a mut T, + vm_tracer: &'a mut V, + static_flag: bool, + ) -> Self { + Externalities { + state: state, + env_info: env_info, + depth: depth, + stack_depth: stack_depth, + origin_info: origin_info, + substate: substate, + machine: machine, + schedule: schedule, + output: output, + tracer: tracer, + vm_tracer: vm_tracer, + static_flag: static_flag, + } + } } impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> - where T: Tracer, V: VMTracer, B: StateBackend +where + T: Tracer, + V: VMTracer, + B: StateBackend, { - fn initial_storage_at(&self, key: &H256) -> vm::Result { - if self.state.is_base_storage_root_unchanged(&self.origin_info.address)? { - self.state.checkpoint_storage_at(0, &self.origin_info.address, key).map(|v| v.unwrap_or_default()).map_err(Into::into) - } else { - warn!(target: "externalities", "Detected existing account {:#x} where a forced contract creation happened.", self.origin_info.address); - Ok(H256::zero()) - } - } + fn initial_storage_at(&self, key: &H256) -> vm::Result { + if self + .state + .is_base_storage_root_unchanged(&self.origin_info.address)? + { + self.state + .checkpoint_storage_at(0, &self.origin_info.address, key) + .map(|v| v.unwrap_or_default()) + .map_err(Into::into) + } else { + warn!(target: "externalities", "Detected existing account {:#x} where a forced contract creation happened.", self.origin_info.address); + Ok(H256::zero()) + } + } - fn storage_at(&self, key: &H256) -> vm::Result { - self.state.storage_at(&self.origin_info.address, key).map_err(Into::into) - } + fn storage_at(&self, key: &H256) -> vm::Result { + self.state + .storage_at(&self.origin_info.address, key) + .map_err(Into::into) + } - fn set_storage(&mut self, key: H256, value: H256) -> vm::Result<()> { - if self.static_flag { - Err(vm::Error::MutableCallInStaticContext) - } else { - self.state.set_storage(&self.origin_info.address, key, value).map_err(Into::into) - } - } + fn set_storage(&mut self, key: H256, value: H256) -> vm::Result<()> { + if self.static_flag { + Err(vm::Error::MutableCallInStaticContext) + } else { + self.state + .set_storage(&self.origin_info.address, key, value) + .map_err(Into::into) + } + } - fn is_static(&self) -> bool { - return self.static_flag - } + fn is_static(&self) -> bool { + return self.static_flag; + } - fn exists(&self, address: &Address) -> vm::Result { - self.state.exists(address).map_err(Into::into) - } + fn exists(&self, address: &Address) -> vm::Result { + self.state.exists(address).map_err(Into::into) + } - fn exists_and_not_null(&self, address: &Address) -> vm::Result { - self.state.exists_and_not_null(address).map_err(Into::into) - } + fn exists_and_not_null(&self, address: &Address) -> vm::Result { + self.state.exists_and_not_null(address).map_err(Into::into) + } - fn origin_balance(&self) -> vm::Result { - self.balance(&self.origin_info.address).map_err(Into::into) - } + fn origin_balance(&self) -> vm::Result { + self.balance(&self.origin_info.address).map_err(Into::into) + } - fn balance(&self, address: &Address) -> vm::Result { - self.state.balance(address).map_err(Into::into) - } + fn balance(&self, address: &Address) -> vm::Result { + self.state.balance(address).map_err(Into::into) + } - fn blockhash(&mut self, number: &U256) -> H256 { - if self.env_info.number + 256 >= self.machine.params().eip210_transition { - let blockhash_contract_address = self.machine.params().eip210_contract_address; - let code_res = self.state.code(&blockhash_contract_address) - .and_then(|code| self.state.code_hash(&blockhash_contract_address).map(|hash| (code, hash))); + fn blockhash(&mut self, number: &U256) -> H256 { + if self.env_info.number + 256 >= self.machine.params().eip210_transition { + let blockhash_contract_address = self.machine.params().eip210_contract_address; + let code_res = self + .state + .code(&blockhash_contract_address) + .and_then(|code| { + self.state + .code_hash(&blockhash_contract_address) + .map(|hash| (code, hash)) + }); - let (code, code_hash) = match code_res { - Ok((code, hash)) => (code, hash), - Err(_) => return H256::zero(), - }; + let (code, code_hash) = match code_res { + Ok((code, hash)) => (code, hash), + Err(_) => return H256::zero(), + }; - let params = ActionParams { - sender: self.origin_info.address.clone(), - address: blockhash_contract_address.clone(), - value: ActionValue::Apparent(self.origin_info.value), - code_address: blockhash_contract_address.clone(), - origin: self.origin_info.origin.clone(), - gas: self.machine.params().eip210_contract_gas, - gas_price: 0.into(), - code: code, - code_hash: code_hash, - data: Some(H256::from(number).to_vec()), - call_type: CallType::Call, - params_type: vm::ParamsType::Separate, - }; + let params = ActionParams { + sender: self.origin_info.address.clone(), + address: blockhash_contract_address.clone(), + value: ActionValue::Apparent(self.origin_info.value), + code_address: blockhash_contract_address.clone(), + origin: self.origin_info.origin.clone(), + gas: self.machine.params().eip210_contract_gas, + gas_price: 0.into(), + code: code, + code_hash: code_hash, + data: Some(H256::from(number).to_vec()), + call_type: CallType::Call, + params_type: vm::ParamsType::Separate, + }; - let mut ex = Executive::new(self.state, self.env_info, self.machine, self.schedule); - let r = ex.call_with_crossbeam(params, self.substate, self.stack_depth + 1, self.tracer, self.vm_tracer); - let output = match &r { - Ok(ref r) => H256::from(&r.return_data[..32]), - _ => H256::new(), - }; - trace!("ext: blockhash contract({}) -> {:?}({}) self.env_info.number={}\n", number, r, output, self.env_info.number); - output - } else { - // TODO: comment out what this function expects from env_info, since it will produce panics if the latter is inconsistent - match *number < U256::from(self.env_info.number) && number.low_u64() >= cmp::max(256, self.env_info.number) - 256 { - true => { - let index = self.env_info.number - number.low_u64() - 1; - assert!(index < self.env_info.last_hashes.len() as u64, format!("Inconsistent env_info, should contain at least {:?} last hashes", index+1)); - let r = self.env_info.last_hashes[index as usize].clone(); - trace!("ext: blockhash({}) -> {} self.env_info.number={}\n", number, r, self.env_info.number); - r - }, - false => { - trace!("ext: blockhash({}) -> null self.env_info.number={}\n", number, self.env_info.number); - H256::zero() - }, - } - } - } + let mut ex = Executive::new(self.state, self.env_info, self.machine, self.schedule); + let r = ex.call_with_crossbeam( + params, + self.substate, + self.stack_depth + 1, + self.tracer, + self.vm_tracer, + ); + let output = match &r { + Ok(ref r) => H256::from(&r.return_data[..32]), + _ => H256::new(), + }; + trace!( + "ext: blockhash contract({}) -> {:?}({}) self.env_info.number={}\n", + number, + r, + output, + self.env_info.number + ); + output + } else { + // TODO: comment out what this function expects from env_info, since it will produce panics if the latter is inconsistent + match *number < U256::from(self.env_info.number) + && number.low_u64() >= cmp::max(256, self.env_info.number) - 256 + { + true => { + let index = self.env_info.number - number.low_u64() - 1; + assert!( + index < self.env_info.last_hashes.len() as u64, + format!( + "Inconsistent env_info, should contain at least {:?} last hashes", + index + 1 + ) + ); + let r = self.env_info.last_hashes[index as usize].clone(); + trace!( + "ext: blockhash({}) -> {} self.env_info.number={}\n", + number, + r, + self.env_info.number + ); + r + } + false => { + trace!( + "ext: blockhash({}) -> null self.env_info.number={}\n", + number, + self.env_info.number + ); + H256::zero() + } + } + } + } - fn create( - &mut self, - gas: &U256, - value: &U256, - code: &[u8], - address_scheme: CreateContractAddress, - trap: bool, - ) -> ::std::result::Result { - // create new contract address - let (address, code_hash) = match self.state.nonce(&self.origin_info.address) { - Ok(nonce) => contract_address(address_scheme, &self.origin_info.address, &nonce, &code), - Err(e) => { - debug!(target: "ext", "Database corruption encountered: {:?}", e); - return Ok(ContractCreateResult::Failed) - } - }; + fn create( + &mut self, + gas: &U256, + value: &U256, + code: &[u8], + address_scheme: CreateContractAddress, + trap: bool, + ) -> ::std::result::Result { + // create new contract address + let (address, code_hash) = match self.state.nonce(&self.origin_info.address) { + Ok(nonce) => contract_address(address_scheme, &self.origin_info.address, &nonce, &code), + Err(e) => { + debug!(target: "ext", "Database corruption encountered: {:?}", e); + return Ok(ContractCreateResult::Failed); + } + }; - // prepare the params - let params = ActionParams { - code_address: address.clone(), - address: address.clone(), - sender: self.origin_info.address.clone(), - origin: self.origin_info.origin.clone(), - gas: *gas, - gas_price: self.origin_info.gas_price, - value: ActionValue::Transfer(*value), - code: Some(Arc::new(code.to_vec())), - code_hash: code_hash, - data: None, - call_type: CallType::None, - params_type: vm::ParamsType::Embedded, - }; + // prepare the params + let params = ActionParams { + code_address: address.clone(), + address: address.clone(), + sender: self.origin_info.address.clone(), + origin: self.origin_info.origin.clone(), + gas: *gas, + gas_price: self.origin_info.gas_price, + value: ActionValue::Transfer(*value), + code: Some(Arc::new(code.to_vec())), + code_hash: code_hash, + data: None, + call_type: CallType::None, + params_type: vm::ParamsType::Embedded, + }; - if !self.static_flag { - if !self.schedule.keep_unsigned_nonce || params.sender != UNSIGNED_SENDER { - if let Err(e) = self.state.inc_nonce(&self.origin_info.address) { - debug!(target: "ext", "Database corruption encountered: {:?}", e); - return Ok(ContractCreateResult::Failed) - } - } - } + if !self.static_flag { + if !self.schedule.keep_unsigned_nonce || params.sender != UNSIGNED_SENDER { + if let Err(e) = self.state.inc_nonce(&self.origin_info.address) { + debug!(target: "ext", "Database corruption encountered: {:?}", e); + return Ok(ContractCreateResult::Failed); + } + } + } - if trap { - return Err(TrapKind::Create(params, address)); - } + if trap { + return Err(TrapKind::Create(params, address)); + } - // TODO: handle internal error separately - let mut ex = Executive::from_parent(self.state, self.env_info, self.machine, self.schedule, self.depth, self.static_flag); - let out = ex.create_with_crossbeam(params, self.substate, self.stack_depth + 1, self.tracer, self.vm_tracer); - Ok(into_contract_create_result(out, &address, self.substate)) - } + // TODO: handle internal error separately + let mut ex = Executive::from_parent( + self.state, + self.env_info, + self.machine, + self.schedule, + self.depth, + self.static_flag, + ); + let out = ex.create_with_crossbeam( + params, + self.substate, + self.stack_depth + 1, + self.tracer, + self.vm_tracer, + ); + Ok(into_contract_create_result(out, &address, self.substate)) + } - fn call( - &mut self, - gas: &U256, - sender_address: &Address, - receive_address: &Address, - value: Option, - data: &[u8], - code_address: &Address, - call_type: CallType, - trap: bool, - ) -> ::std::result::Result { - trace!(target: "externalities", "call"); + fn call( + &mut self, + gas: &U256, + sender_address: &Address, + receive_address: &Address, + value: Option, + data: &[u8], + code_address: &Address, + call_type: CallType, + trap: bool, + ) -> ::std::result::Result { + trace!(target: "externalities", "call"); - let code_res = self.state.code(code_address) - .and_then(|code| self.state.code_hash(code_address).map(|hash| (code, hash))); + let code_res = self + .state + .code(code_address) + .and_then(|code| self.state.code_hash(code_address).map(|hash| (code, hash))); - let (code, code_hash) = match code_res { - Ok((code, hash)) => (code, hash), - Err(_) => return Ok(MessageCallResult::Failed), - }; + let (code, code_hash) = match code_res { + Ok((code, hash)) => (code, hash), + Err(_) => return Ok(MessageCallResult::Failed), + }; - let mut params = ActionParams { - sender: sender_address.clone(), - address: receive_address.clone(), - value: ActionValue::Apparent(self.origin_info.value), - code_address: code_address.clone(), - origin: self.origin_info.origin.clone(), - gas: *gas, - gas_price: self.origin_info.gas_price, - code: code, - code_hash: code_hash, - data: Some(data.to_vec()), - call_type: call_type, - params_type: vm::ParamsType::Separate, - }; + let mut params = ActionParams { + sender: sender_address.clone(), + address: receive_address.clone(), + value: ActionValue::Apparent(self.origin_info.value), + code_address: code_address.clone(), + origin: self.origin_info.origin.clone(), + gas: *gas, + gas_price: self.origin_info.gas_price, + code: code, + code_hash: code_hash, + data: Some(data.to_vec()), + call_type: call_type, + params_type: vm::ParamsType::Separate, + }; - if let Some(value) = value { - params.value = ActionValue::Transfer(value); - } + if let Some(value) = value { + params.value = ActionValue::Transfer(value); + } - if trap { - return Err(TrapKind::Call(params)); - } + if trap { + return Err(TrapKind::Call(params)); + } - let mut ex = Executive::from_parent(self.state, self.env_info, self.machine, self.schedule, self.depth, self.static_flag); - let out = ex.call_with_crossbeam(params, self.substate, self.stack_depth + 1, self.tracer, self.vm_tracer); - Ok(into_message_call_result(out)) - } + let mut ex = Executive::from_parent( + self.state, + self.env_info, + self.machine, + self.schedule, + self.depth, + self.static_flag, + ); + let out = ex.call_with_crossbeam( + params, + self.substate, + self.stack_depth + 1, + self.tracer, + self.vm_tracer, + ); + Ok(into_message_call_result(out)) + } - fn extcode(&self, address: &Address) -> vm::Result>> { - Ok(self.state.code(address)?) - } + fn extcode(&self, address: &Address) -> vm::Result>> { + Ok(self.state.code(address)?) + } - fn extcodehash(&self, address: &Address) -> vm::Result> { - if self.state.exists_and_not_null(address)? { - Ok(self.state.code_hash(address)?) - } else { - Ok(None) - } - } + fn extcodehash(&self, address: &Address) -> vm::Result> { + if self.state.exists_and_not_null(address)? { + Ok(self.state.code_hash(address)?) + } else { + Ok(None) + } + } - fn extcodesize(&self, address: &Address) -> vm::Result> { - Ok(self.state.code_size(address)?) - } + fn extcodesize(&self, address: &Address) -> vm::Result> { + Ok(self.state.code_size(address)?) + } - fn ret(self, gas: &U256, data: &ReturnData, apply_state: bool) -> vm::Result - where Self: Sized { - match self.output { - OutputPolicy::Return => { - Ok(*gas) - }, - OutputPolicy::InitContract if apply_state => { - let return_cost = U256::from(data.len()) * U256::from(self.schedule.create_data_gas); - if return_cost > *gas || data.len() > self.schedule.create_data_limit { - return match self.schedule.exceptional_failed_code_deposit { - true => Err(vm::Error::OutOfGas), - false => Ok(*gas) - } - } - self.state.init_code(&self.origin_info.address, data.to_vec())?; - Ok(*gas - return_cost) - }, - OutputPolicy::InitContract => { - Ok(*gas) - }, - } - } + fn ret(self, gas: &U256, data: &ReturnData, apply_state: bool) -> vm::Result + where + Self: Sized, + { + match self.output { + OutputPolicy::Return => Ok(*gas), + OutputPolicy::InitContract if apply_state => { + let return_cost = + U256::from(data.len()) * U256::from(self.schedule.create_data_gas); + if return_cost > *gas || data.len() > self.schedule.create_data_limit { + return match self.schedule.exceptional_failed_code_deposit { + true => Err(vm::Error::OutOfGas), + false => Ok(*gas), + }; + } + self.state + .init_code(&self.origin_info.address, data.to_vec())?; + Ok(*gas - return_cost) + } + OutputPolicy::InitContract => Ok(*gas), + } + } - fn log(&mut self, topics: Vec, data: &[u8]) -> vm::Result<()> { - use types::log_entry::LogEntry; + fn log(&mut self, topics: Vec, data: &[u8]) -> vm::Result<()> { + use types::log_entry::LogEntry; - if self.static_flag { - return Err(vm::Error::MutableCallInStaticContext); - } + if self.static_flag { + return Err(vm::Error::MutableCallInStaticContext); + } - let address = self.origin_info.address.clone(); - self.substate.logs.push(LogEntry { - address: address, - topics: topics, - data: data.to_vec() - }); + let address = self.origin_info.address.clone(); + self.substate.logs.push(LogEntry { + address: address, + topics: topics, + data: data.to_vec(), + }); - Ok(()) - } + Ok(()) + } - fn suicide(&mut self, refund_address: &Address) -> vm::Result<()> { - if self.static_flag { - return Err(vm::Error::MutableCallInStaticContext); - } + fn suicide(&mut self, refund_address: &Address) -> vm::Result<()> { + if self.static_flag { + return Err(vm::Error::MutableCallInStaticContext); + } - let address = self.origin_info.address.clone(); - let balance = self.balance(&address)?; - if &address == refund_address { - // TODO [todr] To be consistent with CPP client we set balance to 0 in that case. - self.state.sub_balance(&address, &balance, &mut CleanupMode::NoEmpty)?; - } else { - trace!(target: "ext", "Suiciding {} -> {} (xfer: {})", address, refund_address, balance); - self.state.transfer_balance( - &address, - refund_address, - &balance, - self.substate.to_cleanup_mode(&self.schedule) - )?; - } + let address = self.origin_info.address.clone(); + let balance = self.balance(&address)?; + if &address == refund_address { + // TODO [todr] To be consistent with CPP client we set balance to 0 in that case. + self.state + .sub_balance(&address, &balance, &mut CleanupMode::NoEmpty)?; + } else { + trace!(target: "ext", "Suiciding {} -> {} (xfer: {})", address, refund_address, balance); + self.state.transfer_balance( + &address, + refund_address, + &balance, + self.substate.to_cleanup_mode(&self.schedule), + )?; + } - self.tracer.trace_suicide(address, balance, refund_address.clone()); - self.substate.suicides.insert(address); + self.tracer + .trace_suicide(address, balance, refund_address.clone()); + self.substate.suicides.insert(address); - Ok(()) - } + Ok(()) + } - fn schedule(&self) -> &Schedule { - &self.schedule - } + fn schedule(&self) -> &Schedule { + &self.schedule + } - fn env_info(&self) -> &EnvInfo { - self.env_info - } + fn env_info(&self) -> &EnvInfo { + self.env_info + } - fn chain_id(&self) -> u64 { - self.machine.params().chain_id - } + fn chain_id(&self) -> u64 { + self.machine.params().chain_id + } - fn depth(&self) -> usize { - self.depth - } + fn depth(&self) -> usize { + self.depth + } - fn add_sstore_refund(&mut self, value: usize) { - self.substate.sstore_clears_refund += value as i128; - } + fn add_sstore_refund(&mut self, value: usize) { + self.substate.sstore_clears_refund += value as i128; + } - fn sub_sstore_refund(&mut self, value: usize) { - self.substate.sstore_clears_refund -= value as i128; - } + fn sub_sstore_refund(&mut self, value: usize) { + self.substate.sstore_clears_refund -= value as i128; + } - fn trace_next_instruction(&mut self, pc: usize, instruction: u8, current_gas: U256) -> bool { - self.vm_tracer.trace_next_instruction(pc, instruction, current_gas) - } + fn trace_next_instruction(&mut self, pc: usize, instruction: u8, current_gas: U256) -> bool { + self.vm_tracer + .trace_next_instruction(pc, instruction, current_gas) + } - fn trace_prepare_execute(&mut self, pc: usize, instruction: u8, gas_cost: U256, mem_written: Option<(usize, usize)>, store_written: Option<(U256, U256)>) { - self.vm_tracer.trace_prepare_execute(pc, instruction, gas_cost, mem_written, store_written) - } + fn trace_prepare_execute( + &mut self, + pc: usize, + instruction: u8, + gas_cost: U256, + mem_written: Option<(usize, usize)>, + store_written: Option<(U256, U256)>, + ) { + self.vm_tracer + .trace_prepare_execute(pc, instruction, gas_cost, mem_written, store_written) + } - fn trace_failed(&mut self) { - self.vm_tracer.trace_failed(); - } + fn trace_failed(&mut self) { + self.vm_tracer.trace_failed(); + } - fn trace_executed(&mut self, gas_used: U256, stack_push: &[U256], mem: &[u8]) { - self.vm_tracer.trace_executed(gas_used, stack_push, mem) - } + fn trace_executed(&mut self, gas_used: U256, stack_push: &[U256], mem: &[u8]) { + self.vm_tracer.trace_executed(gas_used, stack_push, mem) + } } #[cfg(test)] mod tests { - use ethereum_types::{U256, Address}; - use evm::{EnvInfo, Ext, CallType}; - use state::{State, Substate}; - use test_helpers::get_temp_state; - use super::*; - use trace::{NoopTracer, NoopVMTracer}; + use super::*; + use ethereum_types::{Address, U256}; + use evm::{CallType, EnvInfo, Ext}; + use state::{State, Substate}; + use test_helpers::get_temp_state; + use trace::{NoopTracer, NoopVMTracer}; - fn get_test_origin() -> OriginInfo { - OriginInfo { - address: Address::zero(), - origin: Address::zero(), - gas_price: U256::zero(), - value: U256::zero() - } - } + fn get_test_origin() -> OriginInfo { + OriginInfo { + address: Address::zero(), + origin: Address::zero(), + gas_price: U256::zero(), + value: U256::zero(), + } + } - fn get_test_env_info() -> EnvInfo { - EnvInfo { - number: 100, - author: 0.into(), - timestamp: 0, - difficulty: 0.into(), - last_hashes: Arc::new(vec![]), - gas_used: 0.into(), - gas_limit: 0.into(), - } - } + fn get_test_env_info() -> EnvInfo { + EnvInfo { + number: 100, + author: 0.into(), + timestamp: 0, + difficulty: 0.into(), + last_hashes: Arc::new(vec![]), + gas_used: 0.into(), + gas_limit: 0.into(), + } + } - struct TestSetup { - state: State<::state_db::StateDB>, - machine: ::machine::EthereumMachine, - schedule: Schedule, - sub_state: Substate, - env_info: EnvInfo - } + struct TestSetup { + state: State<::state_db::StateDB>, + machine: ::machine::EthereumMachine, + schedule: Schedule, + sub_state: Substate, + env_info: EnvInfo, + } - impl Default for TestSetup { - fn default() -> Self { - TestSetup::new() - } - } + impl Default for TestSetup { + fn default() -> Self { + TestSetup::new() + } + } - impl TestSetup { - fn new() -> Self { - let machine = ::spec::Spec::new_test_machine(); - let env_info = get_test_env_info(); - let schedule = machine.schedule(env_info.number); - TestSetup { - state: get_temp_state(), - schedule: schedule, - machine: machine, - sub_state: Substate::new(), - env_info: env_info, - } - } - } + impl TestSetup { + fn new() -> Self { + let machine = ::spec::Spec::new_test_machine(); + let env_info = get_test_env_info(); + let schedule = machine.schedule(env_info.number); + TestSetup { + state: get_temp_state(), + schedule: schedule, + machine: machine, + sub_state: Substate::new(), + env_info: env_info, + } + } + } - #[test] - fn can_be_created() { - let mut setup = TestSetup::new(); - let state = &mut setup.state; - let mut tracer = NoopTracer; - let mut vm_tracer = NoopVMTracer; - let origin_info = get_test_origin(); + #[test] + fn can_be_created() { + let mut setup = TestSetup::new(); + let state = &mut setup.state; + let mut tracer = NoopTracer; + let mut vm_tracer = NoopVMTracer; + let origin_info = get_test_origin(); - let ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, 0, &origin_info, &mut setup.sub_state, OutputPolicy::InitContract, &mut tracer, &mut vm_tracer, false); + let ext = Externalities::new( + state, + &setup.env_info, + &setup.machine, + &setup.schedule, + 0, + 0, + &origin_info, + &mut setup.sub_state, + OutputPolicy::InitContract, + &mut tracer, + &mut vm_tracer, + false, + ); - assert_eq!(ext.env_info().number, 100); - } + assert_eq!(ext.env_info().number, 100); + } - #[test] - fn can_return_block_hash_no_env() { - let mut setup = TestSetup::new(); - let state = &mut setup.state; - let mut tracer = NoopTracer; - let mut vm_tracer = NoopVMTracer; - let origin_info = get_test_origin(); + #[test] + fn can_return_block_hash_no_env() { + let mut setup = TestSetup::new(); + let state = &mut setup.state; + let mut tracer = NoopTracer; + let mut vm_tracer = NoopVMTracer; + let origin_info = get_test_origin(); - let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, 0, &origin_info, &mut setup.sub_state, OutputPolicy::InitContract, &mut tracer, &mut vm_tracer, false); + let mut ext = Externalities::new( + state, + &setup.env_info, + &setup.machine, + &setup.schedule, + 0, + 0, + &origin_info, + &mut setup.sub_state, + OutputPolicy::InitContract, + &mut tracer, + &mut vm_tracer, + false, + ); - let hash = ext.blockhash(&"0000000000000000000000000000000000000000000000000000000000120000".parse::().unwrap()); + let hash = ext.blockhash( + &"0000000000000000000000000000000000000000000000000000000000120000" + .parse::() + .unwrap(), + ); - assert_eq!(hash, H256::zero()); - } + assert_eq!(hash, H256::zero()); + } - #[test] - fn can_return_block_hash() { - let test_hash = H256::from("afafafafafafafafafafafbcbcbcbcbcbcbcbcbcbeeeeeeeeeeeeedddddddddd"); - let test_env_number = 0x120001; + #[test] + fn can_return_block_hash() { + let test_hash = + H256::from("afafafafafafafafafafafbcbcbcbcbcbcbcbcbcbeeeeeeeeeeeeedddddddddd"); + let test_env_number = 0x120001; - let mut setup = TestSetup::new(); - { - let env_info = &mut setup.env_info; - env_info.number = test_env_number; - let mut last_hashes = (*env_info.last_hashes).clone(); - last_hashes.push(test_hash.clone()); - env_info.last_hashes = Arc::new(last_hashes); - } - let state = &mut setup.state; - let mut tracer = NoopTracer; - let mut vm_tracer = NoopVMTracer; - let origin_info = get_test_origin(); + let mut setup = TestSetup::new(); + { + let env_info = &mut setup.env_info; + env_info.number = test_env_number; + let mut last_hashes = (*env_info.last_hashes).clone(); + last_hashes.push(test_hash.clone()); + env_info.last_hashes = Arc::new(last_hashes); + } + let state = &mut setup.state; + let mut tracer = NoopTracer; + let mut vm_tracer = NoopVMTracer; + let origin_info = get_test_origin(); - let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, 0, &origin_info, &mut setup.sub_state, OutputPolicy::InitContract, &mut tracer, &mut vm_tracer, false); + let mut ext = Externalities::new( + state, + &setup.env_info, + &setup.machine, + &setup.schedule, + 0, + 0, + &origin_info, + &mut setup.sub_state, + OutputPolicy::InitContract, + &mut tracer, + &mut vm_tracer, + false, + ); - let hash = ext.blockhash(&"0000000000000000000000000000000000000000000000000000000000120000".parse::().unwrap()); + let hash = ext.blockhash( + &"0000000000000000000000000000000000000000000000000000000000120000" + .parse::() + .unwrap(), + ); - assert_eq!(test_hash, hash); - } + assert_eq!(test_hash, hash); + } - #[test] - #[should_panic] - fn can_call_fail_empty() { - let mut setup = TestSetup::new(); - let state = &mut setup.state; - let mut tracer = NoopTracer; - let mut vm_tracer = NoopVMTracer; - let origin_info = get_test_origin(); + #[test] + #[should_panic] + fn can_call_fail_empty() { + let mut setup = TestSetup::new(); + let state = &mut setup.state; + let mut tracer = NoopTracer; + let mut vm_tracer = NoopVMTracer; + let origin_info = get_test_origin(); - let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, 0, &origin_info, &mut setup.sub_state, OutputPolicy::InitContract, &mut tracer, &mut vm_tracer, false); + let mut ext = Externalities::new( + state, + &setup.env_info, + &setup.machine, + &setup.schedule, + 0, + 0, + &origin_info, + &mut setup.sub_state, + OutputPolicy::InitContract, + &mut tracer, + &mut vm_tracer, + false, + ); - // this should panic because we have no balance on any account - ext.call( - &"0000000000000000000000000000000000000000000000000000000000120000".parse::().unwrap(), - &Address::new(), - &Address::new(), - Some("0000000000000000000000000000000000000000000000000000000000150000".parse::().unwrap()), - &[], - &Address::new(), - CallType::Call, - false, - ).ok().unwrap(); - } + // this should panic because we have no balance on any account + ext.call( + &"0000000000000000000000000000000000000000000000000000000000120000" + .parse::() + .unwrap(), + &Address::new(), + &Address::new(), + Some( + "0000000000000000000000000000000000000000000000000000000000150000" + .parse::() + .unwrap(), + ), + &[], + &Address::new(), + CallType::Call, + false, + ) + .ok() + .unwrap(); + } - #[test] - fn can_log() { - let log_data = vec![120u8, 110u8]; - let log_topics = vec![H256::from("af0fa234a6af46afa23faf23bcbc1c1cb4bcb7bcbe7e7e7ee3ee2edddddddddd")]; + #[test] + fn can_log() { + let log_data = vec![120u8, 110u8]; + let log_topics = vec![H256::from( + "af0fa234a6af46afa23faf23bcbc1c1cb4bcb7bcbe7e7e7ee3ee2edddddddddd", + )]; - let mut setup = TestSetup::new(); - let state = &mut setup.state; - let mut tracer = NoopTracer; - let mut vm_tracer = NoopVMTracer; - let origin_info = get_test_origin(); + let mut setup = TestSetup::new(); + let state = &mut setup.state; + let mut tracer = NoopTracer; + let mut vm_tracer = NoopVMTracer; + let origin_info = get_test_origin(); - { - let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, 0, &origin_info, &mut setup.sub_state, OutputPolicy::InitContract, &mut tracer, &mut vm_tracer, false); - ext.log(log_topics, &log_data).unwrap(); - } + { + let mut ext = Externalities::new( + state, + &setup.env_info, + &setup.machine, + &setup.schedule, + 0, + 0, + &origin_info, + &mut setup.sub_state, + OutputPolicy::InitContract, + &mut tracer, + &mut vm_tracer, + false, + ); + ext.log(log_topics, &log_data).unwrap(); + } - assert_eq!(setup.sub_state.logs.len(), 1); - } + assert_eq!(setup.sub_state.logs.len(), 1); + } - #[test] - fn can_suicide() { - let refund_account = &Address::new(); + #[test] + fn can_suicide() { + let refund_account = &Address::new(); - let mut setup = TestSetup::new(); - let state = &mut setup.state; - let mut tracer = NoopTracer; - let mut vm_tracer = NoopVMTracer; - let origin_info = get_test_origin(); + let mut setup = TestSetup::new(); + let state = &mut setup.state; + let mut tracer = NoopTracer; + let mut vm_tracer = NoopVMTracer; + let origin_info = get_test_origin(); - { - let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, 0, &origin_info, &mut setup.sub_state, OutputPolicy::InitContract, &mut tracer, &mut vm_tracer, false); - ext.suicide(refund_account).unwrap(); - } + { + let mut ext = Externalities::new( + state, + &setup.env_info, + &setup.machine, + &setup.schedule, + 0, + 0, + &origin_info, + &mut setup.sub_state, + OutputPolicy::InitContract, + &mut tracer, + &mut vm_tracer, + false, + ); + ext.suicide(refund_account).unwrap(); + } - assert_eq!(setup.sub_state.suicides.len(), 1); - } + assert_eq!(setup.sub_state.suicides.len(), 1); + } - #[test] - fn can_create() { - use std::str::FromStr; + #[test] + fn can_create() { + use std::str::FromStr; - let mut setup = TestSetup::new(); - let state = &mut setup.state; - let mut tracer = NoopTracer; - let mut vm_tracer = NoopVMTracer; - let origin_info = get_test_origin(); + let mut setup = TestSetup::new(); + let state = &mut setup.state; + let mut tracer = NoopTracer; + let mut vm_tracer = NoopVMTracer; + let origin_info = get_test_origin(); - let address = { - let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, 0, &origin_info, &mut setup.sub_state, OutputPolicy::InitContract, &mut tracer, &mut vm_tracer, false); - match ext.create(&U256::max_value(), &U256::zero(), &[], CreateContractAddress::FromSenderAndNonce, false) { - Ok(ContractCreateResult::Created(address, _)) => address, - _ => panic!("Test create failed; expected Created, got Failed/Reverted."), - } - }; + let address = { + let mut ext = Externalities::new( + state, + &setup.env_info, + &setup.machine, + &setup.schedule, + 0, + 0, + &origin_info, + &mut setup.sub_state, + OutputPolicy::InitContract, + &mut tracer, + &mut vm_tracer, + false, + ); + match ext.create( + &U256::max_value(), + &U256::zero(), + &[], + CreateContractAddress::FromSenderAndNonce, + false, + ) { + Ok(ContractCreateResult::Created(address, _)) => address, + _ => panic!("Test create failed; expected Created, got Failed/Reverted."), + } + }; - assert_eq!(address, Address::from_str("bd770416a3345f91e4b34576cb804a576fa48eb1").unwrap()); - } + assert_eq!( + address, + Address::from_str("bd770416a3345f91e4b34576cb804a576fa48eb1").unwrap() + ); + } - #[test] - fn can_create2() { - use std::str::FromStr; + #[test] + fn can_create2() { + use std::str::FromStr; - let mut setup = TestSetup::new(); - let state = &mut setup.state; - let mut tracer = NoopTracer; - let mut vm_tracer = NoopVMTracer; - let origin_info = get_test_origin(); + let mut setup = TestSetup::new(); + let state = &mut setup.state; + let mut tracer = NoopTracer; + let mut vm_tracer = NoopVMTracer; + let origin_info = get_test_origin(); - let address = { - let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, 0, &origin_info, &mut setup.sub_state, OutputPolicy::InitContract, &mut tracer, &mut vm_tracer, false); + let address = { + let mut ext = Externalities::new( + state, + &setup.env_info, + &setup.machine, + &setup.schedule, + 0, + 0, + &origin_info, + &mut setup.sub_state, + OutputPolicy::InitContract, + &mut tracer, + &mut vm_tracer, + false, + ); - match ext.create(&U256::max_value(), &U256::zero(), &[], CreateContractAddress::FromSenderSaltAndCodeHash(H256::default()), false) { - Ok(ContractCreateResult::Created(address, _)) => address, - _ => panic!("Test create failed; expected Created, got Failed/Reverted."), - } - }; + match ext.create( + &U256::max_value(), + &U256::zero(), + &[], + CreateContractAddress::FromSenderSaltAndCodeHash(H256::default()), + false, + ) { + Ok(ContractCreateResult::Created(address, _)) => address, + _ => panic!("Test create failed; expected Created, got Failed/Reverted."), + } + }; - assert_eq!(address, Address::from_str("e33c0c7f7df4809055c3eba6c09cfe4baf1bd9e0").unwrap()); - } + assert_eq!( + address, + Address::from_str("e33c0c7f7df4809055c3eba6c09cfe4baf1bd9e0").unwrap() + ); + } } diff --git a/ethcore/src/factory.rs b/ethcore/src/factory.rs index 06b77da9a..444fa632b 100644 --- a/ethcore/src/factory.rs +++ b/ethcore/src/factory.rs @@ -14,49 +14,55 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use trie::TrieFactory; -use ethtrie::RlpCodec; use account_db::Factory as AccountFactory; +use ethtrie::RlpCodec; use evm::{Factory as EvmFactory, VMType}; -use vm::{Exec, ActionParams, Schedule}; -use wasm::WasmInterpreter; use keccak_hasher::KeccakHasher; +use trie::TrieFactory; +use vm::{ActionParams, Exec, Schedule}; +use wasm::WasmInterpreter; const WASM_MAGIC_NUMBER: &'static [u8; 4] = b"\0asm"; /// Virtual machine factory #[derive(Default, Clone)] pub struct VmFactory { - evm: EvmFactory, + evm: EvmFactory, } impl VmFactory { - pub fn create(&self, params: ActionParams, schedule: &Schedule, depth: usize) -> Box { - if schedule.wasm.is_some() && params.code.as_ref().map_or(false, |code| code.len() > 4 && &code[0..4] == WASM_MAGIC_NUMBER) { - Box::new(WasmInterpreter::new(params)) - } else { - self.evm.create(params, schedule, depth) - } - } + pub fn create(&self, params: ActionParams, schedule: &Schedule, depth: usize) -> Box { + if schedule.wasm.is_some() + && params.code.as_ref().map_or(false, |code| { + code.len() > 4 && &code[0..4] == WASM_MAGIC_NUMBER + }) + { + Box::new(WasmInterpreter::new(params)) + } else { + self.evm.create(params, schedule, depth) + } + } - pub fn new(evm: VMType, cache_size: usize) -> Self { - VmFactory { evm: EvmFactory::new(evm, cache_size) } - } + pub fn new(evm: VMType, cache_size: usize) -> Self { + VmFactory { + evm: EvmFactory::new(evm, cache_size), + } + } } impl From for VmFactory { - fn from(evm: EvmFactory) -> Self { - VmFactory { evm: evm } - } + fn from(evm: EvmFactory) -> Self { + VmFactory { evm: evm } + } } /// Collection of factories. #[derive(Default, Clone)] pub struct Factories { - /// factory for evm. - pub vm: VmFactory, - /// factory for tries. - pub trie: TrieFactory, - /// factory for account databases. - pub accountdb: AccountFactory, + /// factory for evm. + pub vm: VmFactory, + /// factory for tries. + pub trie: TrieFactory, + /// factory for account databases. + pub accountdb: AccountFactory, } diff --git a/ethcore/src/json_tests/chain.rs b/ethcore/src/json_tests/chain.rs index 4488d0f32..f95f1deea 100644 --- a/ethcore/src/json_tests/chain.rs +++ b/ethcore/src/json_tests/chain.rs @@ -14,184 +14,199 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::path::Path; -use std::sync::Arc; -use client::{EvmTestClient, Client, ClientConfig, ChainInfo, ImportBlock}; -use spec::Genesis; +use super::{HookType, SKIP_TEST_STATE}; +use client::{ChainInfo, Client, ClientConfig, EvmTestClient, ImportBlock}; use ethjson; -use miner::Miner; use io::IoChannel; +use miner::Miner; +use spec::Genesis; +use std::{path::Path, sync::Arc}; use test_helpers; -use verification::queue::kind::blocks::Unverified; -use verification::VerifierType; -use super::SKIP_TEST_STATE; -use super::HookType; +use verification::{queue::kind::blocks::Unverified, VerifierType}; /// Run chain jsontests on a given folder. pub fn run_test_path(p: &Path, skip: &[&'static str], h: &mut H) { - ::json_tests::test_common::run_test_path(p, skip, json_chain_test, h) + ::json_tests::test_common::run_test_path(p, skip, json_chain_test, h) } /// Run chain jsontests on a given file. pub fn run_test_file(p: &Path, h: &mut H) { - ::json_tests::test_common::run_test_file(p, json_chain_test, h) + ::json_tests::test_common::run_test_file(p, json_chain_test, h) } fn skip_test(name: &String) -> bool { - SKIP_TEST_STATE.block.iter().any(|block_test|block_test.subtests.contains(name)) + SKIP_TEST_STATE + .block + .iter() + .any(|block_test| block_test.subtests.contains(name)) } -pub fn json_chain_test(json_data: &[u8], start_stop_hook: &mut H) -> Vec { - let _ = ::env_logger::try_init(); - let tests = ethjson::blockchain::Test::load(json_data).unwrap(); - let mut failed = Vec::new(); +pub fn json_chain_test( + json_data: &[u8], + start_stop_hook: &mut H, +) -> Vec { + let _ = ::env_logger::try_init(); + let tests = ethjson::blockchain::Test::load(json_data).unwrap(); + let mut failed = Vec::new(); - for (name, blockchain) in tests.into_iter() { - if skip_test(&name) { - println!(" - {} | {:?} Ignoring tests because in skip list", name, blockchain.network); - continue; - } - start_stop_hook(&name, HookType::OnStart); + for (name, blockchain) in tests.into_iter() { + if skip_test(&name) { + println!( + " - {} | {:?} Ignoring tests because in skip list", + name, blockchain.network + ); + continue; + } + start_stop_hook(&name, HookType::OnStart); - let mut fail = false; - { - let mut fail_unless = |cond: bool| if !cond && !fail { - failed.push(name.clone()); - flushln!("FAIL"); - fail = true; - true - } else {false}; + let mut fail = false; + { + let mut fail_unless = |cond: bool| { + if !cond && !fail { + failed.push(name.clone()); + flushln!("FAIL"); + fail = true; + true + } else { + false + } + }; - flush!(" - {}...", name); + flush!(" - {}...", name); - let spec = { - let mut spec = match EvmTestClient::spec_from_json(&blockchain.network) { - Some(spec) => spec, - None => { - println!(" - {} | {:?} Ignoring tests because of missing spec", name, blockchain.network); - continue; - } - }; + let spec = { + let mut spec = match EvmTestClient::spec_from_json(&blockchain.network) { + Some(spec) => spec, + None => { + println!( + " - {} | {:?} Ignoring tests because of missing spec", + name, blockchain.network + ); + continue; + } + }; - let genesis = Genesis::from(blockchain.genesis()); - let state = From::from(blockchain.pre_state.clone()); - spec.set_genesis_state(state).expect("Failed to overwrite genesis state"); - spec.overwrite_genesis_params(genesis); - assert!(spec.is_state_root_valid()); - spec - }; + let genesis = Genesis::from(blockchain.genesis()); + let state = From::from(blockchain.pre_state.clone()); + spec.set_genesis_state(state) + .expect("Failed to overwrite genesis state"); + spec.overwrite_genesis_params(genesis); + assert!(spec.is_state_root_valid()); + spec + }; - { - let db = test_helpers::new_db(); - let mut config = ClientConfig::default(); - if ethjson::blockchain::Engine::NoProof == blockchain.engine { - config.verifier_type = VerifierType::CanonNoSeal; - config.check_seal = false; - } - config.history = 8; - let client = Client::new( - config, - &spec, - db, - Arc::new(Miner::new_for_tests(&spec, None)), - IoChannel::disconnected(), - ).unwrap(); - for b in blockchain.blocks_rlp() { - if let Ok(block) = Unverified::from_rlp(b) { - let _ = client.import_block(block); - client.flush_queue(); - client.import_verified_blocks(); - } - } - fail_unless(client.chain_info().best_block_hash == blockchain.best_block.into()); - } - } + { + let db = test_helpers::new_db(); + let mut config = ClientConfig::default(); + if ethjson::blockchain::Engine::NoProof == blockchain.engine { + config.verifier_type = VerifierType::CanonNoSeal; + config.check_seal = false; + } + config.history = 8; + let client = Client::new( + config, + &spec, + db, + Arc::new(Miner::new_for_tests(&spec, None)), + IoChannel::disconnected(), + ) + .unwrap(); + for b in blockchain.blocks_rlp() { + if let Ok(block) = Unverified::from_rlp(b) { + let _ = client.import_block(block); + client.flush_queue(); + client.import_verified_blocks(); + } + } + fail_unless(client.chain_info().best_block_hash == blockchain.best_block.into()); + } + } - if !fail { - flushln!("ok"); - } + if !fail { + flushln!("ok"); + } - start_stop_hook(&name, HookType::OnStop); - } + start_stop_hook(&name, HookType::OnStop); + } - println!("!!! {:?} tests from failed.", failed.len()); - failed + println!("!!! {:?} tests from failed.", failed.len()); + failed } #[cfg(test)] mod block_tests { - use super::json_chain_test; - use json_tests::HookType; + use super::json_chain_test; + use json_tests::HookType; - fn do_json_test(json_data: &[u8], h: &mut H) -> Vec { - json_chain_test(json_data, h) - } + fn do_json_test(json_data: &[u8], h: &mut H) -> Vec { + json_chain_test(json_data, h) + } - declare_test!{BlockchainTests_bcBlockGasLimitTest, "BlockchainTests/bcBlockGasLimitTest"} - declare_test!{BlockchainTests_bcExploitTest, "BlockchainTests/bcExploitTest"} - declare_test!{BlockchainTests_bcForgedTest, "BlockchainTests/bcForgedTest"} - declare_test!{BlockchainTests_bcForkStressTest, "BlockchainTests/bcForkStressTest"} - declare_test!{BlockchainTests_bcGasPricerTest, "BlockchainTests/bcGasPricerTest"} - declare_test!{BlockchainTests_bcInvalidHeaderTest, "BlockchainTests/bcInvalidHeaderTest"} - declare_test!{BlockchainTests_bcMultiChainTest, "BlockchainTests/bcMultiChainTest"} - declare_test!{BlockchainTests_bcRandomBlockhashTest, "BlockchainTests/bcRandomBlockhashTest"} - declare_test!{BlockchainTests_bcStateTest, "BlockchainTests/bcStateTests"} - declare_test!{BlockchainTests_bcTotalDifficultyTest, "BlockchainTests/bcTotalDifficultyTest"} - declare_test!{BlockchainTests_bcUncleHeaderValidity, "BlockchainTests/bcUncleHeaderValidity"} - declare_test!{BlockchainTests_bcUncleTest, "BlockchainTests/bcUncleTest"} - declare_test!{BlockchainTests_bcValidBlockTest, "BlockchainTests/bcValidBlockTest"} - declare_test!{BlockchainTests_bcWalletTest, "BlockchainTests/bcWalletTest"} + declare_test! {BlockchainTests_bcBlockGasLimitTest, "BlockchainTests/bcBlockGasLimitTest"} + declare_test! {BlockchainTests_bcExploitTest, "BlockchainTests/bcExploitTest"} + declare_test! {BlockchainTests_bcForgedTest, "BlockchainTests/bcForgedTest"} + declare_test! {BlockchainTests_bcForkStressTest, "BlockchainTests/bcForkStressTest"} + declare_test! {BlockchainTests_bcGasPricerTest, "BlockchainTests/bcGasPricerTest"} + declare_test! {BlockchainTests_bcInvalidHeaderTest, "BlockchainTests/bcInvalidHeaderTest"} + declare_test! {BlockchainTests_bcMultiChainTest, "BlockchainTests/bcMultiChainTest"} + declare_test! {BlockchainTests_bcRandomBlockhashTest, "BlockchainTests/bcRandomBlockhashTest"} + declare_test! {BlockchainTests_bcStateTest, "BlockchainTests/bcStateTests"} + declare_test! {BlockchainTests_bcTotalDifficultyTest, "BlockchainTests/bcTotalDifficultyTest"} + declare_test! {BlockchainTests_bcUncleHeaderValidity, "BlockchainTests/bcUncleHeaderValidity"} + declare_test! {BlockchainTests_bcUncleTest, "BlockchainTests/bcUncleTest"} + declare_test! {BlockchainTests_bcValidBlockTest, "BlockchainTests/bcValidBlockTest"} + declare_test! {BlockchainTests_bcWalletTest, "BlockchainTests/bcWalletTest"} - declare_test!{BlockchainTests_GeneralStateTest_stArgsZeroOneBalance, "BlockchainTests/GeneralStateTests/stArgsZeroOneBalance/"} - declare_test!{BlockchainTests_GeneralStateTest_stAttackTest, "BlockchainTests/GeneralStateTests/stAttackTest/"} - declare_test!{BlockchainTests_GeneralStateTest_stBadOpcodeTest, "BlockchainTests/GeneralStateTests/stBadOpcode/"} - declare_test!{BlockchainTests_GeneralStateTest_stBugsTest, "BlockchainTests/GeneralStateTests/stBugs/"} - declare_test!{BlockchainTests_GeneralStateTest_stCallCodes, "BlockchainTests/GeneralStateTests/stCallCodes/"} - declare_test!{BlockchainTests_GeneralStateTest_stCallCreateCallCodeTest, "BlockchainTests/GeneralStateTests/stCallCreateCallCodeTest/"} - declare_test!{BlockchainTests_GeneralStateTest_stCallDelegateCodesCallCodeHomestead, "BlockchainTests/GeneralStateTests/stCallDelegateCodesCallCodeHomestead/"} - declare_test!{BlockchainTests_GeneralStateTest_stCallDelegateCodesHomestead, "BlockchainTests/GeneralStateTests/stCallDelegateCodesHomestead/"} - declare_test!{BlockchainTests_GeneralStateTest_stChangedEIP150, "BlockchainTests/GeneralStateTests/stChangedEIP150/"} - declare_test!{BlockchainTests_GeneralStateTest_stCodeSizeLimit, "BlockchainTests/GeneralStateTests/stCodeSizeLimit/"} - declare_test!{BlockchainTests_GeneralStateTest_stCreate2, "BlockchainTests/GeneralStateTests/stCreate2/"} - declare_test!{BlockchainTests_GeneralStateTest_stCreateTest, "BlockchainTests/GeneralStateTests/stCreateTest/"} - declare_test!{BlockchainTests_GeneralStateTest_stDelegatecallTestHomestead, "BlockchainTests/GeneralStateTests/stDelegatecallTestHomestead/"} - declare_test!{BlockchainTests_GeneralStateTest_stEIP150singleCodeGasPrices, "BlockchainTests/GeneralStateTests/stEIP150singleCodeGasPrices/"} - declare_test!{BlockchainTests_GeneralStateTest_stEIP150Specific, "BlockchainTests/GeneralStateTests/stEIP150Specific/"} - declare_test!{BlockchainTests_GeneralStateTest_stEIP158Specific, "BlockchainTests/GeneralStateTests/stEIP158Specific/"} - declare_test!{BlockchainTests_GeneralStateTest_stExample, "BlockchainTests/GeneralStateTests/stExample/"} - declare_test!{BlockchainTests_GeneralStateTest_stHomesteadSpecific, "BlockchainTests/GeneralStateTests/stHomesteadSpecific/"} - declare_test!{BlockchainTests_GeneralStateTest_stInitCodeTest, "BlockchainTests/GeneralStateTests/stInitCodeTest/"} - declare_test!{BlockchainTests_GeneralStateTest_stLogTests, "BlockchainTests/GeneralStateTests/stLogTests/"} - declare_test!{BlockchainTests_GeneralStateTest_stMemExpandingEIP150Calls, "BlockchainTests/GeneralStateTests/stMemExpandingEIP150Calls/"} - declare_test!{heavy => BlockchainTests_GeneralStateTest_stMemoryStressTest, "BlockchainTests/GeneralStateTests/stMemoryStressTest/"} - declare_test!{BlockchainTests_GeneralStateTest_stMemoryTest, "BlockchainTests/GeneralStateTests/stMemoryTest/"} - declare_test!{BlockchainTests_GeneralStateTest_stNonZeroCallsTest, "BlockchainTests/GeneralStateTests/stNonZeroCallsTest/"} - declare_test!{BlockchainTests_GeneralStateTest_stPreCompiledContracts, "BlockchainTests/GeneralStateTests/stPreCompiledContracts/"} - declare_test!{BlockchainTests_GeneralStateTest_stPreCompiledContracts2, "BlockchainTests/GeneralStateTests/stPreCompiledContracts2/"} - declare_test!{heavy => BlockchainTests_GeneralStateTest_stQuadraticComplexityTest, "BlockchainTests/GeneralStateTests/stQuadraticComplexityTest/"} - declare_test!{BlockchainTests_GeneralStateTest_stRandom, "BlockchainTests/GeneralStateTests/stRandom/"} - declare_test!{BlockchainTests_GeneralStateTest_stRandom2, "BlockchainTests/GeneralStateTests/stRandom2/"} - declare_test!{BlockchainTests_GeneralStateTest_stRecursiveCreate, "BlockchainTests/GeneralStateTests/stRecursiveCreate/"} - declare_test!{BlockchainTests_GeneralStateTest_stRefundTest, "BlockchainTests/GeneralStateTests/stRefundTest/"} - declare_test!{BlockchainTests_GeneralStateTest_stReturnDataTest, "BlockchainTests/GeneralStateTests/stReturnDataTest/"} - declare_test!{BlockchainTests_GeneralStateTest_stRevertTest, "BlockchainTests/GeneralStateTests/stRevertTest/"} - declare_test!{BlockchainTests_GeneralStateTest_stShift, "BlockchainTests/GeneralStateTests/stShift/"} - declare_test!{BlockchainTests_GeneralStateTest_stSolidityTest, "BlockchainTests/GeneralStateTests/stSolidityTest/"} - declare_test!{BlockchainTests_GeneralStateTest_stSpecialTest, "BlockchainTests/GeneralStateTests/stSpecialTest/"} - declare_test!{BlockchainTests_GeneralStateTest_stStackTests, "BlockchainTests/GeneralStateTests/stStackTests/"} - declare_test!{BlockchainTests_GeneralStateTest_stStaticCall, "BlockchainTests/GeneralStateTests/stStaticCall/"} - declare_test!{BlockchainTests_GeneralStateTest_stSystemOperationsTest, "BlockchainTests/GeneralStateTests/stSystemOperationsTest/"} - declare_test!{BlockchainTests_GeneralStateTest_stTransactionTest, "BlockchainTests/GeneralStateTests/stTransactionTest/"} - declare_test!{BlockchainTests_GeneralStateTest_stTransitionTest, "BlockchainTests/GeneralStateTests/stTransitionTest/"} - declare_test!{BlockchainTests_GeneralStateTest_stWalletTest, "BlockchainTests/GeneralStateTests/stWalletTest/"} - declare_test!{BlockchainTests_GeneralStateTest_stZeroCallsRevert, "BlockchainTests/GeneralStateTests/stZeroCallsRevert/"} - declare_test!{BlockchainTests_GeneralStateTest_stZeroCallsTest, "BlockchainTests/GeneralStateTests/stZeroCallsTest/"} - declare_test!{BlockchainTests_GeneralStateTest_stZeroKnowledge, "BlockchainTests/GeneralStateTests/stZeroKnowledge/"} - declare_test!{BlockchainTests_GeneralStateTest_stZeroKnowledge2, "BlockchainTests/GeneralStateTests/stZeroKnowledge2/"} - declare_test!{BlockchainTests_GeneralStateTest_stSStoreTest, "BlockchainTests/GeneralStateTests/stSStoreTest/"} + declare_test! {BlockchainTests_GeneralStateTest_stArgsZeroOneBalance, "BlockchainTests/GeneralStateTests/stArgsZeroOneBalance/"} + declare_test! {BlockchainTests_GeneralStateTest_stAttackTest, "BlockchainTests/GeneralStateTests/stAttackTest/"} + declare_test! {BlockchainTests_GeneralStateTest_stBadOpcodeTest, "BlockchainTests/GeneralStateTests/stBadOpcode/"} + declare_test! {BlockchainTests_GeneralStateTest_stBugsTest, "BlockchainTests/GeneralStateTests/stBugs/"} + declare_test! {BlockchainTests_GeneralStateTest_stCallCodes, "BlockchainTests/GeneralStateTests/stCallCodes/"} + declare_test! {BlockchainTests_GeneralStateTest_stCallCreateCallCodeTest, "BlockchainTests/GeneralStateTests/stCallCreateCallCodeTest/"} + declare_test! {BlockchainTests_GeneralStateTest_stCallDelegateCodesCallCodeHomestead, "BlockchainTests/GeneralStateTests/stCallDelegateCodesCallCodeHomestead/"} + declare_test! {BlockchainTests_GeneralStateTest_stCallDelegateCodesHomestead, "BlockchainTests/GeneralStateTests/stCallDelegateCodesHomestead/"} + declare_test! {BlockchainTests_GeneralStateTest_stChangedEIP150, "BlockchainTests/GeneralStateTests/stChangedEIP150/"} + declare_test! {BlockchainTests_GeneralStateTest_stCodeSizeLimit, "BlockchainTests/GeneralStateTests/stCodeSizeLimit/"} + declare_test! {BlockchainTests_GeneralStateTest_stCreate2, "BlockchainTests/GeneralStateTests/stCreate2/"} + declare_test! {BlockchainTests_GeneralStateTest_stCreateTest, "BlockchainTests/GeneralStateTests/stCreateTest/"} + declare_test! {BlockchainTests_GeneralStateTest_stDelegatecallTestHomestead, "BlockchainTests/GeneralStateTests/stDelegatecallTestHomestead/"} + declare_test! {BlockchainTests_GeneralStateTest_stEIP150singleCodeGasPrices, "BlockchainTests/GeneralStateTests/stEIP150singleCodeGasPrices/"} + declare_test! {BlockchainTests_GeneralStateTest_stEIP150Specific, "BlockchainTests/GeneralStateTests/stEIP150Specific/"} + declare_test! {BlockchainTests_GeneralStateTest_stEIP158Specific, "BlockchainTests/GeneralStateTests/stEIP158Specific/"} + declare_test! {BlockchainTests_GeneralStateTest_stExample, "BlockchainTests/GeneralStateTests/stExample/"} + declare_test! {BlockchainTests_GeneralStateTest_stHomesteadSpecific, "BlockchainTests/GeneralStateTests/stHomesteadSpecific/"} + declare_test! {BlockchainTests_GeneralStateTest_stInitCodeTest, "BlockchainTests/GeneralStateTests/stInitCodeTest/"} + declare_test! {BlockchainTests_GeneralStateTest_stLogTests, "BlockchainTests/GeneralStateTests/stLogTests/"} + declare_test! {BlockchainTests_GeneralStateTest_stMemExpandingEIP150Calls, "BlockchainTests/GeneralStateTests/stMemExpandingEIP150Calls/"} + declare_test! {heavy => BlockchainTests_GeneralStateTest_stMemoryStressTest, "BlockchainTests/GeneralStateTests/stMemoryStressTest/"} + declare_test! {BlockchainTests_GeneralStateTest_stMemoryTest, "BlockchainTests/GeneralStateTests/stMemoryTest/"} + declare_test! {BlockchainTests_GeneralStateTest_stNonZeroCallsTest, "BlockchainTests/GeneralStateTests/stNonZeroCallsTest/"} + declare_test! {BlockchainTests_GeneralStateTest_stPreCompiledContracts, "BlockchainTests/GeneralStateTests/stPreCompiledContracts/"} + declare_test! {BlockchainTests_GeneralStateTest_stPreCompiledContracts2, "BlockchainTests/GeneralStateTests/stPreCompiledContracts2/"} + declare_test! {heavy => BlockchainTests_GeneralStateTest_stQuadraticComplexityTest, "BlockchainTests/GeneralStateTests/stQuadraticComplexityTest/"} + declare_test! {BlockchainTests_GeneralStateTest_stRandom, "BlockchainTests/GeneralStateTests/stRandom/"} + declare_test! {BlockchainTests_GeneralStateTest_stRandom2, "BlockchainTests/GeneralStateTests/stRandom2/"} + declare_test! {BlockchainTests_GeneralStateTest_stRecursiveCreate, "BlockchainTests/GeneralStateTests/stRecursiveCreate/"} + declare_test! {BlockchainTests_GeneralStateTest_stRefundTest, "BlockchainTests/GeneralStateTests/stRefundTest/"} + declare_test! {BlockchainTests_GeneralStateTest_stReturnDataTest, "BlockchainTests/GeneralStateTests/stReturnDataTest/"} + declare_test! {BlockchainTests_GeneralStateTest_stRevertTest, "BlockchainTests/GeneralStateTests/stRevertTest/"} + declare_test! {BlockchainTests_GeneralStateTest_stShift, "BlockchainTests/GeneralStateTests/stShift/"} + declare_test! {BlockchainTests_GeneralStateTest_stSolidityTest, "BlockchainTests/GeneralStateTests/stSolidityTest/"} + declare_test! {BlockchainTests_GeneralStateTest_stSpecialTest, "BlockchainTests/GeneralStateTests/stSpecialTest/"} + declare_test! {BlockchainTests_GeneralStateTest_stStackTests, "BlockchainTests/GeneralStateTests/stStackTests/"} + declare_test! {BlockchainTests_GeneralStateTest_stStaticCall, "BlockchainTests/GeneralStateTests/stStaticCall/"} + declare_test! {BlockchainTests_GeneralStateTest_stSystemOperationsTest, "BlockchainTests/GeneralStateTests/stSystemOperationsTest/"} + declare_test! {BlockchainTests_GeneralStateTest_stTransactionTest, "BlockchainTests/GeneralStateTests/stTransactionTest/"} + declare_test! {BlockchainTests_GeneralStateTest_stTransitionTest, "BlockchainTests/GeneralStateTests/stTransitionTest/"} + declare_test! {BlockchainTests_GeneralStateTest_stWalletTest, "BlockchainTests/GeneralStateTests/stWalletTest/"} + declare_test! {BlockchainTests_GeneralStateTest_stZeroCallsRevert, "BlockchainTests/GeneralStateTests/stZeroCallsRevert/"} + declare_test! {BlockchainTests_GeneralStateTest_stZeroCallsTest, "BlockchainTests/GeneralStateTests/stZeroCallsTest/"} + declare_test! {BlockchainTests_GeneralStateTest_stZeroKnowledge, "BlockchainTests/GeneralStateTests/stZeroKnowledge/"} + declare_test! {BlockchainTests_GeneralStateTest_stZeroKnowledge2, "BlockchainTests/GeneralStateTests/stZeroKnowledge2/"} + declare_test! {BlockchainTests_GeneralStateTest_stSStoreTest, "BlockchainTests/GeneralStateTests/stSStoreTest/"} - declare_test!{BlockchainTests_TransitionTests_bcEIP158ToByzantium, "BlockchainTests/TransitionTests/bcEIP158ToByzantium/"} - declare_test!{BlockchainTests_TransitionTests_bcFrontierToHomestead, "BlockchainTests/TransitionTests/bcFrontierToHomestead/"} - declare_test!{BlockchainTests_TransitionTests_bcHomesteadToDao, "BlockchainTests/TransitionTests/bcHomesteadToDao/"} - declare_test!{BlockchainTests_TransitionTests_bcHomesteadToEIP150, "BlockchainTests/TransitionTests/bcHomesteadToEIP150/"} + declare_test! {BlockchainTests_TransitionTests_bcEIP158ToByzantium, "BlockchainTests/TransitionTests/bcEIP158ToByzantium/"} + declare_test! {BlockchainTests_TransitionTests_bcFrontierToHomestead, "BlockchainTests/TransitionTests/bcFrontierToHomestead/"} + declare_test! {BlockchainTests_TransitionTests_bcHomesteadToDao, "BlockchainTests/TransitionTests/bcHomesteadToDao/"} + declare_test! {BlockchainTests_TransitionTests_bcHomesteadToEIP150, "BlockchainTests/TransitionTests/bcHomesteadToEIP150/"} } diff --git a/ethcore/src/json_tests/difficulty.rs b/ethcore/src/json_tests/difficulty.rs index bf3a48fff..fc47a2768 100644 --- a/ethcore/src/json_tests/difficulty.rs +++ b/ethcore/src/json_tests/difficulty.rs @@ -14,85 +14,85 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use ethjson; -use types::header::Header; use ethereum_types::U256; +use ethjson; use spec::Spec; +use types::header::Header; use super::HookType; -pub fn json_difficulty_test(json_data: &[u8], spec: Spec, start_stop_hook: &mut H) -> Vec { - let _ = ::env_logger::try_init(); - let tests = ethjson::test::DifficultyTest::load(json_data).unwrap(); - let engine = &spec.engine; +pub fn json_difficulty_test( + json_data: &[u8], + spec: Spec, + start_stop_hook: &mut H, +) -> Vec { + let _ = ::env_logger::try_init(); + let tests = ethjson::test::DifficultyTest::load(json_data).unwrap(); + let engine = &spec.engine; - for (name, test) in tests.into_iter() { - start_stop_hook(&name, HookType::OnStart); + for (name, test) in tests.into_iter() { + start_stop_hook(&name, HookType::OnStart); - flush!(" - {}...", name); - println!(" - {}...", name); + flush!(" - {}...", name); + println!(" - {}...", name); - let mut parent_header = Header::new(); - let block_number: u64 = test.current_block_number.into(); - parent_header.set_number(block_number - 1); - parent_header.set_gas_limit(0x20000.into()); - parent_header.set_timestamp(test.parent_timestamp.into()); - parent_header.set_difficulty(test.parent_difficulty.into()); - parent_header.set_uncles_hash(test.parent_uncles.into()); - let mut header = Header::new(); - header.set_number(block_number); - header.set_timestamp(test.current_timestamp.into()); - engine.populate_from_parent(&mut header, &parent_header); - let expected_difficulty: U256 = test.current_difficulty.into(); - assert_eq!(header.difficulty(), &expected_difficulty); - flushln!("ok"); + let mut parent_header = Header::new(); + let block_number: u64 = test.current_block_number.into(); + parent_header.set_number(block_number - 1); + parent_header.set_gas_limit(0x20000.into()); + parent_header.set_timestamp(test.parent_timestamp.into()); + parent_header.set_difficulty(test.parent_difficulty.into()); + parent_header.set_uncles_hash(test.parent_uncles.into()); + let mut header = Header::new(); + header.set_number(block_number); + header.set_timestamp(test.current_timestamp.into()); + engine.populate_from_parent(&mut header, &parent_header); + let expected_difficulty: U256 = test.current_difficulty.into(); + assert_eq!(header.difficulty(), &expected_difficulty); + flushln!("ok"); - start_stop_hook(&name, HookType::OnStop); - } - vec![] + start_stop_hook(&name, HookType::OnStop); + } + vec![] } macro_rules! difficulty_json_test { - ( $spec:ident ) => { + ( $spec:ident ) => { + use super::json_difficulty_test; + use json_tests::HookType; + use tempdir::TempDir; - use super::json_difficulty_test; - use tempdir::TempDir; - use json_tests::HookType; - - fn do_json_test(json_data: &[u8], h: &mut H) -> Vec { - let tempdir = TempDir::new("").unwrap(); - json_difficulty_test(json_data, ::ethereum::$spec(&tempdir.path()), h) - } - - } + fn do_json_test(json_data: &[u8], h: &mut H) -> Vec { + let tempdir = TempDir::new("").unwrap(); + json_difficulty_test(json_data, ::ethereum::$spec(&tempdir.path()), h) + } + }; } macro_rules! difficulty_json_test_nopath { - ( $spec:ident ) => { + ( $spec:ident ) => { + use super::json_difficulty_test; + use json_tests::HookType; - use super::json_difficulty_test; - use json_tests::HookType; - - fn do_json_test(json_data: &[u8], h: &mut H) -> Vec { - json_difficulty_test(json_data, ::ethereum::$spec(), h) - } - - } + fn do_json_test(json_data: &[u8], h: &mut H) -> Vec { + json_difficulty_test(json_data, ::ethereum::$spec(), h) + } + }; } mod difficulty_test { - difficulty_json_test!(new_foundation); - declare_test!{DifficultyTests_difficulty, "BasicTests/difficulty.json"} + difficulty_json_test!(new_foundation); + declare_test! {DifficultyTests_difficulty, "BasicTests/difficulty.json"} } mod difficulty_test_byzantium { - difficulty_json_test_nopath!(new_byzantium_test); - declare_test!{DifficultyTests_difficultyByzantium, "BasicTests/difficultyByzantium.json"} + difficulty_json_test_nopath!(new_byzantium_test); + declare_test! {DifficultyTests_difficultyByzantium, "BasicTests/difficultyByzantium.json"} } mod difficulty_test_foundation { - difficulty_json_test!(new_foundation); - declare_test!{DifficultyTests_difficultyMainNetwork, "BasicTests/difficultyMainNetwork.json"} + difficulty_json_test!(new_foundation); + declare_test! {DifficultyTests_difficultyMainNetwork, "BasicTests/difficultyMainNetwork.json"} } // Disabling Ropsten diff tests; waiting for upstream ethereum/tests Constantinople update @@ -102,11 +102,11 @@ mod difficulty_test_foundation { //} mod difficulty_test_frontier { - difficulty_json_test_nopath!(new_frontier_test); - declare_test!{DifficultyTests_difficultyFrontier, "BasicTests/difficultyFrontier.json"} + difficulty_json_test_nopath!(new_frontier_test); + declare_test! {DifficultyTests_difficultyFrontier, "BasicTests/difficultyFrontier.json"} } mod difficulty_test_homestead { - difficulty_json_test_nopath!(new_homestead_test); - declare_test!{DifficultyTests_difficultyHomestead, "BasicTests/difficultyHomestead.json"} + difficulty_json_test_nopath!(new_homestead_test); + declare_test! {DifficultyTests_difficultyHomestead, "BasicTests/difficultyHomestead.json"} } diff --git a/ethcore/src/json_tests/executive.rs b/ethcore/src/json_tests/executive.rs index 0dae76e41..dd9b1ab7e 100644 --- a/ethcore/src/json_tests/executive.rs +++ b/ethcore/src/json_tests/executive.rs @@ -14,363 +14,404 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::path::Path; -use std::sync::Arc; use super::test_common::*; -use state::{Backend as StateBackend, State, Substate}; -use executive::*; -use evm::{VMType, Finalize}; -use vm::{ - self, ActionParams, CallType, Schedule, Ext, - ContractCreateResult, EnvInfo, MessageCallResult, - CreateContractAddress, ReturnData, -}; -use externalities::*; -use test_helpers::get_temp_state; -use ethjson; -use trace::{Tracer, NoopTracer}; -use trace::{VMTracer, NoopVMTracer}; use bytes::Bytes; +use ethjson; use ethtrie; -use rlp::RlpStream; +use evm::{Finalize, VMType}; +use executive::*; +use externalities::*; use hash::keccak; use machine::EthereumMachine as Machine; +use rlp::RlpStream; +use state::{Backend as StateBackend, State, Substate}; +use std::{path::Path, sync::Arc}; +use test_helpers::get_temp_state; +use trace::{NoopTracer, NoopVMTracer, Tracer, VMTracer}; +use vm::{ + self, ActionParams, CallType, ContractCreateResult, CreateContractAddress, EnvInfo, Ext, + MessageCallResult, ReturnData, Schedule, +}; use super::HookType; /// Run executive jsontests on a given folder. pub fn run_test_path(p: &Path, skip: &[&'static str], h: &mut H) { - ::json_tests::test_common::run_test_path(p, skip, do_json_test, h) + ::json_tests::test_common::run_test_path(p, skip, do_json_test, h) } /// Run executive jsontests on a given file. pub fn run_test_file(p: &Path, h: &mut H) { - ::json_tests::test_common::run_test_file(p, do_json_test, h) + ::json_tests::test_common::run_test_file(p, do_json_test, h) } #[derive(Debug, PartialEq, Clone)] struct CallCreate { - data: Bytes, - destination: Option
, - gas_limit: U256, - value: U256 + data: Bytes, + destination: Option
, + gas_limit: U256, + value: U256, } impl From for CallCreate { - fn from(c: ethjson::vm::Call) -> Self { - let dst: Option = c.destination.into(); - CallCreate { - data: c.data.into(), - destination: dst.map(Into::into), - gas_limit: c.gas_limit.into(), - value: c.value.into() - } - } + fn from(c: ethjson::vm::Call) -> Self { + let dst: Option = c.destination.into(); + CallCreate { + data: c.data.into(), + destination: dst.map(Into::into), + gas_limit: c.gas_limit.into(), + value: c.value.into(), + } + } } /// Tiny wrapper around executive externalities. /// Stores callcreates. struct TestExt<'a, T: 'a, V: 'a, B: 'a> - where T: Tracer, V: VMTracer, B: StateBackend +where + T: Tracer, + V: VMTracer, + B: StateBackend, { - ext: Externalities<'a, T, V, B>, - callcreates: Vec, - nonce: U256, - sender: Address, + ext: Externalities<'a, T, V, B>, + callcreates: Vec, + nonce: U256, + sender: Address, } impl<'a, T: 'a, V: 'a, B: 'a> TestExt<'a, T, V, B> - where T: Tracer, V: VMTracer, B: StateBackend, +where + T: Tracer, + V: VMTracer, + B: StateBackend, { - fn new( - state: &'a mut State, - info: &'a EnvInfo, - machine: &'a Machine, - schedule: &'a Schedule, - depth: usize, - origin_info: &'a OriginInfo, - substate: &'a mut Substate, - output: OutputPolicy, - address: Address, - tracer: &'a mut T, - vm_tracer: &'a mut V, - ) -> ethtrie::Result { - let static_call = false; - Ok(TestExt { - nonce: state.nonce(&address)?, - ext: Externalities::new(state, info, machine, schedule, depth, 0, origin_info, substate, output, tracer, vm_tracer, static_call), - callcreates: vec![], - sender: address, - }) - } + fn new( + state: &'a mut State, + info: &'a EnvInfo, + machine: &'a Machine, + schedule: &'a Schedule, + depth: usize, + origin_info: &'a OriginInfo, + substate: &'a mut Substate, + output: OutputPolicy, + address: Address, + tracer: &'a mut T, + vm_tracer: &'a mut V, + ) -> ethtrie::Result { + let static_call = false; + Ok(TestExt { + nonce: state.nonce(&address)?, + ext: Externalities::new( + state, + info, + machine, + schedule, + depth, + 0, + origin_info, + substate, + output, + tracer, + vm_tracer, + static_call, + ), + callcreates: vec![], + sender: address, + }) + } } impl<'a, T: 'a, V: 'a, B: 'a> Ext for TestExt<'a, T, V, B> - where T: Tracer, V: VMTracer, B: StateBackend +where + T: Tracer, + V: VMTracer, + B: StateBackend, { - fn storage_at(&self, key: &H256) -> vm::Result { - self.ext.storage_at(key) - } + fn storage_at(&self, key: &H256) -> vm::Result { + self.ext.storage_at(key) + } - fn initial_storage_at(&self, key: &H256) -> vm::Result { - self.ext.initial_storage_at(key) - } + fn initial_storage_at(&self, key: &H256) -> vm::Result { + self.ext.initial_storage_at(key) + } - fn set_storage(&mut self, key: H256, value: H256) -> vm::Result<()> { - self.ext.set_storage(key, value) - } + fn set_storage(&mut self, key: H256, value: H256) -> vm::Result<()> { + self.ext.set_storage(key, value) + } - fn exists(&self, address: &Address) -> vm::Result { - self.ext.exists(address) - } + fn exists(&self, address: &Address) -> vm::Result { + self.ext.exists(address) + } - fn exists_and_not_null(&self, address: &Address) -> vm::Result { - self.ext.exists_and_not_null(address) - } + fn exists_and_not_null(&self, address: &Address) -> vm::Result { + self.ext.exists_and_not_null(address) + } - fn balance(&self, address: &Address) -> vm::Result { - self.ext.balance(address) - } + fn balance(&self, address: &Address) -> vm::Result { + self.ext.balance(address) + } - fn origin_balance(&self) -> vm::Result { - self.ext.origin_balance() - } + fn origin_balance(&self) -> vm::Result { + self.ext.origin_balance() + } - fn blockhash(&mut self, number: &U256) -> H256 { - self.ext.blockhash(number) - } + fn blockhash(&mut self, number: &U256) -> H256 { + self.ext.blockhash(number) + } - fn create( - &mut self, - gas: &U256, - value: &U256, - code: &[u8], - address: CreateContractAddress, - _trap: bool - ) -> Result { - self.callcreates.push(CallCreate { - data: code.to_vec(), - destination: None, - gas_limit: *gas, - value: *value - }); - let contract_address = contract_address(address, &self.sender, &self.nonce, &code).0; - Ok(ContractCreateResult::Created(contract_address, *gas)) - } + fn create( + &mut self, + gas: &U256, + value: &U256, + code: &[u8], + address: CreateContractAddress, + _trap: bool, + ) -> Result { + self.callcreates.push(CallCreate { + data: code.to_vec(), + destination: None, + gas_limit: *gas, + value: *value, + }); + let contract_address = contract_address(address, &self.sender, &self.nonce, &code).0; + Ok(ContractCreateResult::Created(contract_address, *gas)) + } - fn call( - &mut self, - gas: &U256, - _sender_address: &Address, - receive_address: &Address, - value: Option, - data: &[u8], - _code_address: &Address, - _call_type: CallType, - _trap: bool - ) -> Result { - self.callcreates.push(CallCreate { - data: data.to_vec(), - destination: Some(receive_address.clone()), - gas_limit: *gas, - value: value.unwrap() - }); - Ok(MessageCallResult::Success(*gas, ReturnData::empty())) - } + fn call( + &mut self, + gas: &U256, + _sender_address: &Address, + receive_address: &Address, + value: Option, + data: &[u8], + _code_address: &Address, + _call_type: CallType, + _trap: bool, + ) -> Result { + self.callcreates.push(CallCreate { + data: data.to_vec(), + destination: Some(receive_address.clone()), + gas_limit: *gas, + value: value.unwrap(), + }); + Ok(MessageCallResult::Success(*gas, ReturnData::empty())) + } - fn extcode(&self, address: &Address) -> vm::Result>> { - self.ext.extcode(address) - } + fn extcode(&self, address: &Address) -> vm::Result>> { + self.ext.extcode(address) + } - fn extcodesize(&self, address: &Address) -> vm::Result> { - self.ext.extcodesize(address) - } + fn extcodesize(&self, address: &Address) -> vm::Result> { + self.ext.extcodesize(address) + } - fn extcodehash(&self, address: &Address) -> vm::Result> { - self.ext.extcodehash(address) - } + fn extcodehash(&self, address: &Address) -> vm::Result> { + self.ext.extcodehash(address) + } - fn log(&mut self, topics: Vec, data: &[u8]) -> vm::Result<()> { - self.ext.log(topics, data) - } + fn log(&mut self, topics: Vec, data: &[u8]) -> vm::Result<()> { + self.ext.log(topics, data) + } - fn ret(self, gas: &U256, data: &ReturnData, apply_state: bool) -> Result { - self.ext.ret(gas, data, apply_state) - } + fn ret(self, gas: &U256, data: &ReturnData, apply_state: bool) -> Result { + self.ext.ret(gas, data, apply_state) + } - fn suicide(&mut self, refund_address: &Address) -> vm::Result<()> { - self.ext.suicide(refund_address) - } + fn suicide(&mut self, refund_address: &Address) -> vm::Result<()> { + self.ext.suicide(refund_address) + } - fn schedule(&self) -> &Schedule { - self.ext.schedule() - } + fn schedule(&self) -> &Schedule { + self.ext.schedule() + } - fn env_info(&self) -> &EnvInfo { - self.ext.env_info() - } + fn env_info(&self) -> &EnvInfo { + self.ext.env_info() + } - fn chain_id(&self) -> u64 { 0 } + fn chain_id(&self) -> u64 { + 0 + } - fn depth(&self) -> usize { - 0 - } + fn depth(&self) -> usize { + 0 + } - fn is_static(&self) -> bool { - false - } + fn is_static(&self) -> bool { + false + } - fn add_sstore_refund(&mut self, value: usize) { - self.ext.add_sstore_refund(value) - } + fn add_sstore_refund(&mut self, value: usize) { + self.ext.add_sstore_refund(value) + } - fn sub_sstore_refund(&mut self, value: usize) { - self.ext.sub_sstore_refund(value) - } + fn sub_sstore_refund(&mut self, value: usize) { + self.ext.sub_sstore_refund(value) + } } fn do_json_test(json_data: &[u8], h: &mut H) -> Vec { - let vms = VMType::all(); - vms - .iter() - .flat_map(|vm| do_json_test_for(vm, json_data, h)) - .collect() + let vms = VMType::all(); + vms.iter() + .flat_map(|vm| do_json_test_for(vm, json_data, h)) + .collect() } -fn do_json_test_for(vm_type: &VMType, json_data: &[u8], start_stop_hook: &mut H) -> Vec { - let tests = ethjson::vm::Test::load(json_data).unwrap(); - let mut failed = Vec::new(); +fn do_json_test_for( + vm_type: &VMType, + json_data: &[u8], + start_stop_hook: &mut H, +) -> Vec { + let tests = ethjson::vm::Test::load(json_data).unwrap(); + let mut failed = Vec::new(); - for (name, vm) in tests.into_iter() { - start_stop_hook(&format!("{}-{}", name, vm_type), HookType::OnStart); + for (name, vm) in tests.into_iter() { + start_stop_hook(&format!("{}-{}", name, vm_type), HookType::OnStart); - info!(target: "jsontests", "name: {:?}", name); - let mut fail = false; + info!(target: "jsontests", "name: {:?}", name); + let mut fail = false; - let mut fail_unless = |cond: bool, s: &str | if !cond && !fail { - failed.push(format!("[{}] {}: {}", vm_type, name, s)); - fail = true - }; + let mut fail_unless = |cond: bool, s: &str| { + if !cond && !fail { + failed.push(format!("[{}] {}: {}", vm_type, name, s)); + fail = true + } + }; - macro_rules! try_fail { - ($e: expr) => { - match $e { - Ok(x) => x, - Err(e) => { - let msg = format!("Internal error: {}", e); - fail_unless(false, &msg); - continue - } - } - } - } + macro_rules! try_fail { + ($e: expr) => { + match $e { + Ok(x) => x, + Err(e) => { + let msg = format!("Internal error: {}", e); + fail_unless(false, &msg); + continue; + } + } + }; + } - let out_of_gas = vm.out_of_gas(); - let mut state = get_temp_state(); - state.populate_from(From::from(vm.pre_state.clone())); - let info: EnvInfo = From::from(vm.env); - let machine = { - let mut machine = ::ethereum::new_frontier_test_machine(); - machine.set_schedule_creation_rules(Box::new(move |s, _| s.max_depth = 1)); - machine - }; + let out_of_gas = vm.out_of_gas(); + let mut state = get_temp_state(); + state.populate_from(From::from(vm.pre_state.clone())); + let info: EnvInfo = From::from(vm.env); + let machine = { + let mut machine = ::ethereum::new_frontier_test_machine(); + machine.set_schedule_creation_rules(Box::new(move |s, _| s.max_depth = 1)); + machine + }; - let params = ActionParams::from(vm.transaction); + let params = ActionParams::from(vm.transaction); - let mut substate = Substate::new(); - let mut tracer = NoopTracer; - let mut vm_tracer = NoopVMTracer; - let vm_factory = state.vm_factory(); - let origin_info = OriginInfo::from(¶ms); + let mut substate = Substate::new(); + let mut tracer = NoopTracer; + let mut vm_tracer = NoopVMTracer; + let vm_factory = state.vm_factory(); + let origin_info = OriginInfo::from(¶ms); - // execute - let (res, callcreates) = { - let schedule = machine.schedule(info.number); - let mut ex = try_fail!(TestExt::new( - &mut state, - &info, - &machine, - &schedule, - 0, - &origin_info, - &mut substate, - OutputPolicy::Return, - params.address.clone(), - &mut tracer, - &mut vm_tracer, - )); - let mut evm = vm_factory.create(params, &schedule, 0); - let res = evm.exec(&mut ex).ok().expect("TestExt never trap; resume error never happens; qed"); - // a return in finalize will not alter callcreates - let callcreates = ex.callcreates.clone(); - (res.finalize(ex), callcreates) - }; + // execute + let (res, callcreates) = { + let schedule = machine.schedule(info.number); + let mut ex = try_fail!(TestExt::new( + &mut state, + &info, + &machine, + &schedule, + 0, + &origin_info, + &mut substate, + OutputPolicy::Return, + params.address.clone(), + &mut tracer, + &mut vm_tracer, + )); + let mut evm = vm_factory.create(params, &schedule, 0); + let res = evm + .exec(&mut ex) + .ok() + .expect("TestExt never trap; resume error never happens; qed"); + // a return in finalize will not alter callcreates + let callcreates = ex.callcreates.clone(); + (res.finalize(ex), callcreates) + }; - let output = match &res { - Ok(res) => res.return_data.to_vec(), - Err(_) => Vec::new(), - }; + let output = match &res { + Ok(res) => res.return_data.to_vec(), + Err(_) => Vec::new(), + }; - let log_hash = { - let mut rlp = RlpStream::new_list(substate.logs.len()); - for l in &substate.logs { - rlp.append(l); - } - keccak(&rlp.drain()) - }; + let log_hash = { + let mut rlp = RlpStream::new_list(substate.logs.len()); + for l in &substate.logs { + rlp.append(l); + } + keccak(&rlp.drain()) + }; - match res { - Err(_) => fail_unless(out_of_gas, "didn't expect to run out of gas."), - Ok(res) => { - fail_unless(!out_of_gas, "expected to run out of gas."); - fail_unless(Some(res.gas_left) == vm.gas_left.map(Into::into), "gas_left is incorrect"); - let vm_output: Option> = vm.output.map(Into::into); - fail_unless(Some(output) == vm_output, "output is incorrect"); - fail_unless(Some(log_hash) == vm.logs.map(|h| h.0), "logs are incorrect"); + match res { + Err(_) => fail_unless(out_of_gas, "didn't expect to run out of gas."), + Ok(res) => { + fail_unless(!out_of_gas, "expected to run out of gas."); + fail_unless( + Some(res.gas_left) == vm.gas_left.map(Into::into), + "gas_left is incorrect", + ); + let vm_output: Option> = vm.output.map(Into::into); + fail_unless(Some(output) == vm_output, "output is incorrect"); + fail_unless(Some(log_hash) == vm.logs.map(|h| h.0), "logs are incorrect"); - for (address, account) in vm.post_state.unwrap().into_iter() { - let address = address.into(); - let code: Vec = account.code.into(); - let found_code = try_fail!(state.code(&address)); - let found_balance = try_fail!(state.balance(&address)); - let found_nonce = try_fail!(state.nonce(&address)); + for (address, account) in vm.post_state.unwrap().into_iter() { + let address = address.into(); + let code: Vec = account.code.into(); + let found_code = try_fail!(state.code(&address)); + let found_balance = try_fail!(state.balance(&address)); + let found_nonce = try_fail!(state.nonce(&address)); - fail_unless(found_code.as_ref().map_or_else(|| code.is_empty(), |c| &**c == &code), "code is incorrect"); - fail_unless(found_balance == account.balance.into(), "balance is incorrect"); - fail_unless(found_nonce == account.nonce.into(), "nonce is incorrect"); - for (k, v) in account.storage { - let key: U256 = k.into(); - let value: U256 = v.into(); - let found_storage = try_fail!(state.storage_at(&address, &From::from(key))); - fail_unless(found_storage == From::from(value), "storage is incorrect"); - } - } + fail_unless( + found_code + .as_ref() + .map_or_else(|| code.is_empty(), |c| &**c == &code), + "code is incorrect", + ); + fail_unless( + found_balance == account.balance.into(), + "balance is incorrect", + ); + fail_unless(found_nonce == account.nonce.into(), "nonce is incorrect"); + for (k, v) in account.storage { + let key: U256 = k.into(); + let value: U256 = v.into(); + let found_storage = try_fail!(state.storage_at(&address, &From::from(key))); + fail_unless(found_storage == From::from(value), "storage is incorrect"); + } + } - let calls: Option> = vm.calls.map(|c| c.into_iter().map(From::from).collect()); - fail_unless(Some(callcreates) == calls, "callcreates does not match"); - } - }; + let calls: Option> = + vm.calls.map(|c| c.into_iter().map(From::from).collect()); + fail_unless(Some(callcreates) == calls, "callcreates does not match"); + } + }; - start_stop_hook(&format!("{}-{}", name, vm_type), HookType::OnStop); - } + start_stop_hook(&format!("{}-{}", name, vm_type), HookType::OnStop); + } - for f in &failed { - error!("FAILED: {:?}", f); - } + for f in &failed { + error!("FAILED: {:?}", f); + } - failed + failed } -declare_test!{ExecutiveTests_vmArithmeticTest, "VMTests/vmArithmeticTest"} -declare_test!{ExecutiveTests_vmBitwiseLogicOperationTest, "VMTests/vmBitwiseLogicOperation"} -declare_test!{ExecutiveTests_vmBlockInfoTest, "VMTests/vmBlockInfoTest"} - // TODO [todr] Fails with Signal 11 when using JIT -declare_test!{ExecutiveTests_vmEnvironmentalInfoTest, "VMTests/vmEnvironmentalInfo"} -declare_test!{ExecutiveTests_vmIOandFlowOperationsTest, "VMTests/vmIOandFlowOperations"} -declare_test!{ExecutiveTests_vmLogTest, "VMTests/vmLogTest"} -declare_test!{heavy => ExecutiveTests_vmPerformance, "VMTests/vmPerformance"} -declare_test!{ExecutiveTests_vmPushDupSwapTest, "VMTests/vmPushDupSwapTest"} -declare_test!{ExecutiveTests_vmRandomTest, "VMTests/vmRandomTest"} -declare_test!{ExecutiveTests_vmSha3Test, "VMTests/vmSha3Test"} -declare_test!{ExecutiveTests_vmSystemOperationsTest, "VMTests/vmSystemOperations"} -declare_test!{ExecutiveTests_vmTests, "VMTests/vmTests"} +declare_test! {ExecutiveTests_vmArithmeticTest, "VMTests/vmArithmeticTest"} +declare_test! {ExecutiveTests_vmBitwiseLogicOperationTest, "VMTests/vmBitwiseLogicOperation"} +declare_test! {ExecutiveTests_vmBlockInfoTest, "VMTests/vmBlockInfoTest"} +// TODO [todr] Fails with Signal 11 when using JIT +declare_test! {ExecutiveTests_vmEnvironmentalInfoTest, "VMTests/vmEnvironmentalInfo"} +declare_test! {ExecutiveTests_vmIOandFlowOperationsTest, "VMTests/vmIOandFlowOperations"} +declare_test! {ExecutiveTests_vmLogTest, "VMTests/vmLogTest"} +declare_test! {heavy => ExecutiveTests_vmPerformance, "VMTests/vmPerformance"} +declare_test! {ExecutiveTests_vmPushDupSwapTest, "VMTests/vmPushDupSwapTest"} +declare_test! {ExecutiveTests_vmRandomTest, "VMTests/vmRandomTest"} +declare_test! {ExecutiveTests_vmSha3Test, "VMTests/vmSha3Test"} +declare_test! {ExecutiveTests_vmSystemOperationsTest, "VMTests/vmSystemOperations"} +declare_test! {ExecutiveTests_vmTests, "VMTests/vmTests"} diff --git a/ethcore/src/json_tests/mod.rs b/ethcore/src/json_tests/mod.rs index 99cbdb21e..69572baaf 100644 --- a/ethcore/src/json_tests/mod.rs +++ b/ethcore/src/json_tests/mod.rs @@ -19,28 +19,32 @@ #[macro_use] mod test_common; -mod transaction; -mod executive; -mod state; mod chain; -mod trie; +mod executive; mod skip; +mod state; +mod transaction; +mod trie; #[cfg(test)] mod difficulty; pub use self::test_common::HookType; -pub use self::transaction::run_test_path as run_transaction_test_path; -pub use self::transaction::run_test_file as run_transaction_test_file; -pub use self::executive::run_test_path as run_executive_test_path; -pub use self::executive::run_test_file as run_executive_test_file; -pub use self::state::run_test_path as run_state_test_path; -pub use self::state::run_test_file as run_state_test_file; -pub use self::chain::run_test_path as run_chain_test_path; -pub use self::chain::run_test_file as run_chain_test_file; -pub use self::trie::run_generic_test_path as run_generic_trie_test_path; -pub use self::trie::run_generic_test_file as run_generic_trie_test_file; -pub use self::trie::run_secure_test_path as run_secure_trie_test_path; -pub use self::trie::run_secure_test_file as run_secure_trie_test_file; use self::skip::SKIP_TEST_STATE; +pub use self::{ + chain::{run_test_file as run_chain_test_file, run_test_path as run_chain_test_path}, + executive::{ + run_test_file as run_executive_test_file, run_test_path as run_executive_test_path, + }, + state::{run_test_file as run_state_test_file, run_test_path as run_state_test_path}, + transaction::{ + run_test_file as run_transaction_test_file, run_test_path as run_transaction_test_path, + }, + trie::{ + run_generic_test_file as run_generic_trie_test_file, + run_generic_test_path as run_generic_trie_test_path, + run_secure_test_file as run_secure_trie_test_file, + run_secure_test_path as run_secure_trie_test_path, + }, +}; diff --git a/ethcore/src/json_tests/skip.rs b/ethcore/src/json_tests/skip.rs index b6ef9795f..6a993fe1c 100644 --- a/ethcore/src/json_tests/skip.rs +++ b/ethcore/src/json_tests/skip.rs @@ -18,17 +18,15 @@ use ethjson; -#[cfg(feature="ci-skip-tests")] -lazy_static!{ - pub static ref SKIP_TEST_STATE: ethjson::test::SkipStates = { - let skip_data = include_bytes!("../../res/ethereum/tests-issues/currents.json"); - ethjson::test::SkipStates::load(&skip_data[..]).expect("No invalid json allowed") - }; +#[cfg(feature = "ci-skip-tests")] +lazy_static! { + pub static ref SKIP_TEST_STATE: ethjson::test::SkipStates = { + let skip_data = include_bytes!("../../res/ethereum/tests-issues/currents.json"); + ethjson::test::SkipStates::load(&skip_data[..]).expect("No invalid json allowed") + }; } -#[cfg(not(feature="ci-skip-tests"))] -lazy_static!{ - pub static ref SKIP_TEST_STATE: ethjson::test::SkipStates = { - ethjson::test::SkipStates::empty() - }; +#[cfg(not(feature = "ci-skip-tests"))] +lazy_static! { + pub static ref SKIP_TEST_STATE: ethjson::test::SkipStates = ethjson::test::SkipStates::empty(); } diff --git a/ethcore/src/json_tests/state.rs b/ethcore/src/json_tests/state.rs index c51a2c361..302fa5d71 100644 --- a/ethcore/src/json_tests/state.rs +++ b/ethcore/src/json_tests/state.rs @@ -14,175 +14,194 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::path::Path; -use super::test_common::*; -use pod_state::PodState; -use trace; +use super::{test_common::*, HookType, SKIP_TEST_STATE}; use client::{EvmTestClient, EvmTestError, TransactErr, TransactSuccess}; use ethjson; +use pod_state::PodState; +use std::path::Path; +use trace; use types::transaction::SignedTransaction; use vm::EnvInfo; -use super::SKIP_TEST_STATE; -use super::HookType; /// Run state jsontests on a given folder. pub fn run_test_path(p: &Path, skip: &[&'static str], h: &mut H) { - ::json_tests::test_common::run_test_path(p, skip, json_chain_test, h) + ::json_tests::test_common::run_test_path(p, skip, json_chain_test, h) } /// Run state jsontests on a given file. pub fn run_test_file(p: &Path, h: &mut H) { - ::json_tests::test_common::run_test_file(p, json_chain_test, h) + ::json_tests::test_common::run_test_file(p, json_chain_test, h) } fn skip_test(subname: &str, chain: &String, number: usize) -> bool { - SKIP_TEST_STATE.state.iter().any(|state_test|{ - if let Some(subtest) = state_test.subtests.get(subname) { - chain == &subtest.chain && - (subtest.subnumbers[0] == "*" - || subtest.subnumbers.contains(&number.to_string())) - } else { - false - } - }) + SKIP_TEST_STATE.state.iter().any(|state_test| { + if let Some(subtest) = state_test.subtests.get(subname) { + chain == &subtest.chain + && (subtest.subnumbers[0] == "*" + || subtest.subnumbers.contains(&number.to_string())) + } else { + false + } + }) } -pub fn json_chain_test(json_data: &[u8], start_stop_hook: &mut H) -> Vec { - let _ = ::env_logger::try_init(); - let tests = ethjson::state::test::Test::load(json_data).unwrap(); - let mut failed = Vec::new(); +pub fn json_chain_test( + json_data: &[u8], + start_stop_hook: &mut H, +) -> Vec { + let _ = ::env_logger::try_init(); + let tests = ethjson::state::test::Test::load(json_data).unwrap(); + let mut failed = Vec::new(); - for (name, test) in tests.into_iter() { - start_stop_hook(&name, HookType::OnStart); + for (name, test) in tests.into_iter() { + start_stop_hook(&name, HookType::OnStart); - { - let multitransaction = test.transaction; - let env: EnvInfo = test.env.into(); - let pre: PodState = test.pre_state.into(); + { + let multitransaction = test.transaction; + let env: EnvInfo = test.env.into(); + let pre: PodState = test.pre_state.into(); - for (spec_name, states) in test.post_states { - let total = states.len(); - let spec = match EvmTestClient::spec_from_json(&spec_name) { - Some(spec) => spec, - None => { - println!(" - {} | {:?} Ignoring tests because of missing spec", name, spec_name); - continue; - } - }; + for (spec_name, states) in test.post_states { + let total = states.len(); + let spec = match EvmTestClient::spec_from_json(&spec_name) { + Some(spec) => spec, + None => { + println!( + " - {} | {:?} Ignoring tests because of missing spec", + name, spec_name + ); + continue; + } + }; - for (i, state) in states.into_iter().enumerate() { - let info = format!(" - {} | {:?} ({}/{}) ...", name, spec_name, i + 1, total); - if skip_test(&name, &spec.name, i + 1) { - println!("{} in skip list : SKIPPED", info); - continue; - } + for (i, state) in states.into_iter().enumerate() { + let info = format!(" - {} | {:?} ({}/{}) ...", name, spec_name, i + 1, total); + if skip_test(&name, &spec.name, i + 1) { + println!("{} in skip list : SKIPPED", info); + continue; + } - let post_root: H256 = state.hash.into(); - let transaction: SignedTransaction = multitransaction.select(&state.indexes).into(); + let post_root: H256 = state.hash.into(); + let transaction: SignedTransaction = + multitransaction.select(&state.indexes).into(); - let result = || -> Result<_, EvmTestError> { - Ok(EvmTestClient::from_pod_state(&spec, pre.clone())? - .transact(&env, transaction, trace::NoopTracer, trace::NoopVMTracer)) - }; - match result() { - Err(err) => { - println!("{} !!! Unexpected internal error: {:?}", info, err); - flushln!("{} fail", info); - failed.push(name.clone()); - }, - Ok(Ok(TransactSuccess { state_root, .. })) if state_root != post_root => { - println!("{} !!! State mismatch (got: {}, expect: {}", info, state_root, post_root); - flushln!("{} fail", info); - failed.push(name.clone()); - }, - Ok(Err(TransactErr { state_root, ref error, .. })) if state_root != post_root => { - println!("{} !!! State mismatch (got: {}, expect: {}", info, state_root, post_root); - println!("{} !!! Execution error: {:?}", info, error); - flushln!("{} fail", info); - failed.push(name.clone()); - }, - Ok(Err(TransactErr { error, .. })) => { - flushln!("{} ok ({:?})", info, error); - }, - Ok(_) => { - flushln!("{} ok", info); - }, - } - } - } - } + let result = || -> Result<_, EvmTestError> { + Ok(EvmTestClient::from_pod_state(&spec, pre.clone())?.transact( + &env, + transaction, + trace::NoopTracer, + trace::NoopVMTracer, + )) + }; + match result() { + Err(err) => { + println!("{} !!! Unexpected internal error: {:?}", info, err); + flushln!("{} fail", info); + failed.push(name.clone()); + } + Ok(Ok(TransactSuccess { state_root, .. })) if state_root != post_root => { + println!( + "{} !!! State mismatch (got: {}, expect: {}", + info, state_root, post_root + ); + flushln!("{} fail", info); + failed.push(name.clone()); + } + Ok(Err(TransactErr { + state_root, + ref error, + .. + })) if state_root != post_root => { + println!( + "{} !!! State mismatch (got: {}, expect: {}", + info, state_root, post_root + ); + println!("{} !!! Execution error: {:?}", info, error); + flushln!("{} fail", info); + failed.push(name.clone()); + } + Ok(Err(TransactErr { error, .. })) => { + flushln!("{} ok ({:?})", info, error); + } + Ok(_) => { + flushln!("{} ok", info); + } + } + } + } + } - start_stop_hook(&name, HookType::OnStop); - } + start_stop_hook(&name, HookType::OnStop); + } - if !failed.is_empty() { - println!("!!! {:?} tests failed.", failed.len()); - } - failed + if !failed.is_empty() { + println!("!!! {:?} tests failed.", failed.len()); + } + failed } #[cfg(test)] mod state_tests { - use super::json_chain_test; - use json_tests::HookType; + use super::json_chain_test; + use json_tests::HookType; - fn do_json_test(json_data: &[u8], h: &mut H) -> Vec { - json_chain_test(json_data, h) - } + fn do_json_test(json_data: &[u8], h: &mut H) -> Vec { + json_chain_test(json_data, h) + } - declare_test!{GeneralStateTest_stArgsZeroOneBalance, "GeneralStateTests/stArgsZeroOneBalance/"} - declare_test!{GeneralStateTest_stAttackTest, "GeneralStateTests/stAttackTest/"} - declare_test!{GeneralStateTest_stBadOpcodeTest, "GeneralStateTests/stBadOpcode/"} - declare_test!{GeneralStateTest_stBugs, "GeneralStateTests/stBugs/"} - declare_test!{GeneralStateTest_stCallCodes, "GeneralStateTests/stCallCodes/"} - declare_test!{GeneralStateTest_stCallCreateCallCodeTest, "GeneralStateTests/stCallCreateCallCodeTest/"} - declare_test!{GeneralStateTest_stCallDelegateCodesCallCodeHomestead, "GeneralStateTests/stCallDelegateCodesCallCodeHomestead/"} - declare_test!{GeneralStateTest_stCallDelegateCodesHomestead, "GeneralStateTests/stCallDelegateCodesHomestead/"} - declare_test!{GeneralStateTest_stChangedEIP150, "GeneralStateTests/stChangedEIP150/"} - declare_test!{GeneralStateTest_stCodeCopyTest, "GeneralStateTests/stCodeCopyTest/"} - declare_test!{GeneralStateTest_stCodeSizeLimit, "GeneralStateTests/stCodeSizeLimit/"} - declare_test!{GeneralStateTest_stCreate2Test, "GeneralStateTests/stCreate2/"} - declare_test!{GeneralStateTest_stCreateTest, "GeneralStateTests/stCreateTest/"} - declare_test!{GeneralStateTest_stDelegatecallTestHomestead, "GeneralStateTests/stDelegatecallTestHomestead/"} - declare_test!{GeneralStateTest_stEIP150singleCodeGasPrices, "GeneralStateTests/stEIP150singleCodeGasPrices/"} - declare_test!{GeneralStateTest_stEIP150Specific, "GeneralStateTests/stEIP150Specific/"} - declare_test!{GeneralStateTest_stEIP158Specific, "GeneralStateTests/stEIP158Specific/"} - declare_test!{GeneralStateTest_stEWASMTests, "GeneralStateTests/stEWASMTests/"} - declare_test!{GeneralStateTest_stExample, "GeneralStateTests/stExample/"} - declare_test!{GeneralStateTest_stHomesteadSpecific, "GeneralStateTests/stHomesteadSpecific/"} - declare_test!{GeneralStateTest_stInitCodeTest, "GeneralStateTests/stInitCodeTest/"} - declare_test!{GeneralStateTest_stLogTests, "GeneralStateTests/stLogTests/"} - declare_test!{GeneralStateTest_stMemExpandingEIP150Calls, "GeneralStateTests/stMemExpandingEIP150Calls/"} - declare_test!{heavy => GeneralStateTest_stMemoryStressTest, "GeneralStateTests/stMemoryStressTest/"} - declare_test!{GeneralStateTest_stMemoryTest, "GeneralStateTests/stMemoryTest/"} - declare_test!{GeneralStateTest_stNonZeroCallsTest, "GeneralStateTests/stNonZeroCallsTest/"} - declare_test!{GeneralStateTest_stPreCompiledContracts, "GeneralStateTests/stPreCompiledContracts/"} - declare_test!{GeneralStateTest_stPreCompiledContracts2, "GeneralStateTests/stPreCompiledContracts2/"} - declare_test!{heavy => GeneralStateTest_stQuadraticComplexityTest, "GeneralStateTests/stQuadraticComplexityTest/"} - declare_test!{GeneralStateTest_stRandom, "GeneralStateTests/stRandom/"} - declare_test!{GeneralStateTest_stRandom2, "GeneralStateTests/stRandom2/"} - declare_test!{GeneralStateTest_stRecursiveCreate, "GeneralStateTests/stRecursiveCreate/"} - declare_test!{GeneralStateTest_stRefundTest, "GeneralStateTests/stRefundTest/"} - declare_test!{GeneralStateTest_stReturnDataTest, "GeneralStateTests/stReturnDataTest/"} - declare_test!{GeneralStateTest_stRevertTest, "GeneralStateTests/stRevertTest/"} - declare_test!{GeneralStateTest_stSStoreTest, "GeneralStateTests/stSStoreTest/"} - declare_test!{GeneralStateTest_stShift, "GeneralStateTests/stShift/"} - declare_test!{GeneralStateTest_stSolidityTest, "GeneralStateTests/stSolidityTest/"} - declare_test!{GeneralStateTest_stSpecialTest, "GeneralStateTests/stSpecialTest/"} - declare_test!{GeneralStateTest_stStackTests, "GeneralStateTests/stStackTests/"} - declare_test!{GeneralStateTest_stStaticCall, "GeneralStateTests/stStaticCall/"} - declare_test!{GeneralStateTest_stSystemOperationsTest, "GeneralStateTests/stSystemOperationsTest/"} - declare_test!{GeneralStateTest_stTransactionTest, "GeneralStateTests/stTransactionTest/"} - declare_test!{GeneralStateTest_stTransitionTest, "GeneralStateTests/stTransitionTest/"} - declare_test!{GeneralStateTest_stWalletTest, "GeneralStateTests/stWalletTest/"} - declare_test!{GeneralStateTest_stZeroCallsRevert, "GeneralStateTests/stZeroCallsRevert/"} - declare_test!{GeneralStateTest_stZeroCallsTest, "GeneralStateTests/stZeroCallsTest/"} - declare_test!{GeneralStateTest_stZeroKnowledge, "GeneralStateTests/stZeroKnowledge/"} + declare_test! {GeneralStateTest_stArgsZeroOneBalance, "GeneralStateTests/stArgsZeroOneBalance/"} + declare_test! {GeneralStateTest_stAttackTest, "GeneralStateTests/stAttackTest/"} + declare_test! {GeneralStateTest_stBadOpcodeTest, "GeneralStateTests/stBadOpcode/"} + declare_test! {GeneralStateTest_stBugs, "GeneralStateTests/stBugs/"} + declare_test! {GeneralStateTest_stCallCodes, "GeneralStateTests/stCallCodes/"} + declare_test! {GeneralStateTest_stCallCreateCallCodeTest, "GeneralStateTests/stCallCreateCallCodeTest/"} + declare_test! {GeneralStateTest_stCallDelegateCodesCallCodeHomestead, "GeneralStateTests/stCallDelegateCodesCallCodeHomestead/"} + declare_test! {GeneralStateTest_stCallDelegateCodesHomestead, "GeneralStateTests/stCallDelegateCodesHomestead/"} + declare_test! {GeneralStateTest_stChangedEIP150, "GeneralStateTests/stChangedEIP150/"} + declare_test! {GeneralStateTest_stCodeCopyTest, "GeneralStateTests/stCodeCopyTest/"} + declare_test! {GeneralStateTest_stCodeSizeLimit, "GeneralStateTests/stCodeSizeLimit/"} + declare_test! {GeneralStateTest_stCreate2Test, "GeneralStateTests/stCreate2/"} + declare_test! {GeneralStateTest_stCreateTest, "GeneralStateTests/stCreateTest/"} + declare_test! {GeneralStateTest_stDelegatecallTestHomestead, "GeneralStateTests/stDelegatecallTestHomestead/"} + declare_test! {GeneralStateTest_stEIP150singleCodeGasPrices, "GeneralStateTests/stEIP150singleCodeGasPrices/"} + declare_test! {GeneralStateTest_stEIP150Specific, "GeneralStateTests/stEIP150Specific/"} + declare_test! {GeneralStateTest_stEIP158Specific, "GeneralStateTests/stEIP158Specific/"} + declare_test! {GeneralStateTest_stEWASMTests, "GeneralStateTests/stEWASMTests/"} + declare_test! {GeneralStateTest_stExample, "GeneralStateTests/stExample/"} + declare_test! {GeneralStateTest_stHomesteadSpecific, "GeneralStateTests/stHomesteadSpecific/"} + declare_test! {GeneralStateTest_stInitCodeTest, "GeneralStateTests/stInitCodeTest/"} + declare_test! {GeneralStateTest_stLogTests, "GeneralStateTests/stLogTests/"} + declare_test! {GeneralStateTest_stMemExpandingEIP150Calls, "GeneralStateTests/stMemExpandingEIP150Calls/"} + declare_test! {heavy => GeneralStateTest_stMemoryStressTest, "GeneralStateTests/stMemoryStressTest/"} + declare_test! {GeneralStateTest_stMemoryTest, "GeneralStateTests/stMemoryTest/"} + declare_test! {GeneralStateTest_stNonZeroCallsTest, "GeneralStateTests/stNonZeroCallsTest/"} + declare_test! {GeneralStateTest_stPreCompiledContracts, "GeneralStateTests/stPreCompiledContracts/"} + declare_test! {GeneralStateTest_stPreCompiledContracts2, "GeneralStateTests/stPreCompiledContracts2/"} + declare_test! {heavy => GeneralStateTest_stQuadraticComplexityTest, "GeneralStateTests/stQuadraticComplexityTest/"} + declare_test! {GeneralStateTest_stRandom, "GeneralStateTests/stRandom/"} + declare_test! {GeneralStateTest_stRandom2, "GeneralStateTests/stRandom2/"} + declare_test! {GeneralStateTest_stRecursiveCreate, "GeneralStateTests/stRecursiveCreate/"} + declare_test! {GeneralStateTest_stRefundTest, "GeneralStateTests/stRefundTest/"} + declare_test! {GeneralStateTest_stReturnDataTest, "GeneralStateTests/stReturnDataTest/"} + declare_test! {GeneralStateTest_stRevertTest, "GeneralStateTests/stRevertTest/"} + declare_test! {GeneralStateTest_stSStoreTest, "GeneralStateTests/stSStoreTest/"} + declare_test! {GeneralStateTest_stShift, "GeneralStateTests/stShift/"} + declare_test! {GeneralStateTest_stSolidityTest, "GeneralStateTests/stSolidityTest/"} + declare_test! {GeneralStateTest_stSpecialTest, "GeneralStateTests/stSpecialTest/"} + declare_test! {GeneralStateTest_stStackTests, "GeneralStateTests/stStackTests/"} + declare_test! {GeneralStateTest_stStaticCall, "GeneralStateTests/stStaticCall/"} + declare_test! {GeneralStateTest_stSystemOperationsTest, "GeneralStateTests/stSystemOperationsTest/"} + declare_test! {GeneralStateTest_stTransactionTest, "GeneralStateTests/stTransactionTest/"} + declare_test! {GeneralStateTest_stTransitionTest, "GeneralStateTests/stTransitionTest/"} + declare_test! {GeneralStateTest_stWalletTest, "GeneralStateTests/stWalletTest/"} + declare_test! {GeneralStateTest_stZeroCallsRevert, "GeneralStateTests/stZeroCallsRevert/"} + declare_test! {GeneralStateTest_stZeroCallsTest, "GeneralStateTests/stZeroCallsTest/"} + declare_test! {GeneralStateTest_stZeroKnowledge, "GeneralStateTests/stZeroKnowledge/"} - // Attempts to send a transaction that requires more than current balance: - // Tx: - // https://github.com/ethereum/tests/blob/726b161ba8a739691006cc1ba080672bb50a9d49/GeneralStateTests/stZeroKnowledge2/ecmul_0-3_5616_28000_96.json#L170 - // Balance: - // https://github.com/ethereum/tests/blob/726b161ba8a739691006cc1ba080672bb50a9d49/GeneralStateTests/stZeroKnowledge2/ecmul_0-3_5616_28000_96.json#L126 - declare_test!{GeneralStateTest_stZeroKnowledge2, "GeneralStateTests/stZeroKnowledge2/"} + // Attempts to send a transaction that requires more than current balance: + // Tx: + // https://github.com/ethereum/tests/blob/726b161ba8a739691006cc1ba080672bb50a9d49/GeneralStateTests/stZeroKnowledge2/ecmul_0-3_5616_28000_96.json#L170 + // Balance: + // https://github.com/ethereum/tests/blob/726b161ba8a739691006cc1ba080672bb50a9d49/GeneralStateTests/stZeroKnowledge2/ecmul_0-3_5616_28000_96.json#L126 + declare_test! {GeneralStateTest_stZeroKnowledge2, "GeneralStateTests/stZeroKnowledge2/"} } diff --git a/ethcore/src/json_tests/test_common.rs b/ethcore/src/json_tests/test_common.rs index 7e3842ecb..af066b288 100644 --- a/ethcore/src/json_tests/test_common.rs +++ b/ethcore/src/json_tests/test_common.rs @@ -14,137 +14,152 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::HashSet; -use std::io::Read; -use std::fs::{File, read_dir}; -use std::path::Path; -use std::ffi::OsString; -pub use ethereum_types::{H256, U256, Address}; +pub use ethereum_types::{Address, H256, U256}; +use std::{ + collections::HashSet, + ffi::OsString, + fs::{read_dir, File}, + io::Read, + path::Path, +}; /// Indicate when to run the hook passed to test functions. #[derive(Copy, Clone, Eq, PartialEq)] pub enum HookType { - /// Hook to code to run on test start. - OnStart, - /// Hook to code to run on test end. - OnStop + /// Hook to code to run on test start. + OnStart, + /// Hook to code to run on test end. + OnStop, } pub fn run_test_path( - p: &Path, skip: &[&'static str], - runner: fn(json_data: &[u8], start_stop_hook: &mut H) -> Vec, - start_stop_hook: &mut H + p: &Path, + skip: &[&'static str], + runner: fn(json_data: &[u8], start_stop_hook: &mut H) -> Vec, + start_stop_hook: &mut H, ) { - let mut errors = Vec::new(); - run_test_path_inner(p, skip, runner, start_stop_hook, &mut errors); - let empty: [String; 0] = []; - assert_eq!(errors, empty); + let mut errors = Vec::new(); + run_test_path_inner(p, skip, runner, start_stop_hook, &mut errors); + let empty: [String; 0] = []; + assert_eq!(errors, empty); } fn run_test_path_inner( - p: &Path, skip: &[&'static str], - runner: fn(json_data: &[u8], start_stop_hook: &mut H) -> Vec, - start_stop_hook: &mut H, - errors: &mut Vec + p: &Path, + skip: &[&'static str], + runner: fn(json_data: &[u8], start_stop_hook: &mut H) -> Vec, + start_stop_hook: &mut H, + errors: &mut Vec, ) { - let path = Path::new(p); - let s: HashSet = skip.iter().map(|s| { - let mut os: OsString = s.into(); - os.push(".json"); - os - }).collect(); - let extension = path.extension().and_then(|s| s.to_str()); - if path.is_dir() { - for p in read_dir(path).unwrap().filter_map(|e| { - let e = e.unwrap(); - if s.contains(&e.file_name()) { - None - } else { - Some(e.path()) - }}) { - run_test_path_inner(&p, skip, runner, start_stop_hook, errors); - } - } else if extension == Some("swp") || extension == None { - // Ignore junk - } else { - let mut path = p.to_path_buf(); - path.set_extension("json"); - run_test_file_append(&path, runner, start_stop_hook, errors) - } + let path = Path::new(p); + let s: HashSet = skip + .iter() + .map(|s| { + let mut os: OsString = s.into(); + os.push(".json"); + os + }) + .collect(); + let extension = path.extension().and_then(|s| s.to_str()); + if path.is_dir() { + for p in read_dir(path).unwrap().filter_map(|e| { + let e = e.unwrap(); + if s.contains(&e.file_name()) { + None + } else { + Some(e.path()) + } + }) { + run_test_path_inner(&p, skip, runner, start_stop_hook, errors); + } + } else if extension == Some("swp") || extension == None { + // Ignore junk + } else { + let mut path = p.to_path_buf(); + path.set_extension("json"); + run_test_file_append(&path, runner, start_stop_hook, errors) + } } fn run_test_file_append( - path: &Path, - runner: fn(json_data: &[u8], start_stop_hook: &mut H) -> Vec, - start_stop_hook: &mut H, - errors: &mut Vec + path: &Path, + runner: fn(json_data: &[u8], start_stop_hook: &mut H) -> Vec, + start_stop_hook: &mut H, + errors: &mut Vec, ) { - let mut data = Vec::new(); - let mut file = match File::open(&path) { - Ok(file) => file, - Err(_) => panic!("Error opening test file at: {:?}", path), - }; - file.read_to_end(&mut data).expect("Error reading test file"); - errors.append(&mut runner(&data, start_stop_hook)); + let mut data = Vec::new(); + let mut file = match File::open(&path) { + Ok(file) => file, + Err(_) => panic!("Error opening test file at: {:?}", path), + }; + file.read_to_end(&mut data) + .expect("Error reading test file"); + errors.append(&mut runner(&data, start_stop_hook)); } pub fn run_test_file( - path: &Path, - runner: fn(json_data: &[u8], start_stop_hook: &mut H) -> Vec, - start_stop_hook: &mut H + path: &Path, + runner: fn(json_data: &[u8], start_stop_hook: &mut H) -> Vec, + start_stop_hook: &mut H, ) { - let mut data = Vec::new(); - let mut file = match File::open(&path) { - Ok(file) => file, - Err(_) => panic!("Error opening test file at: {:?}", path), - }; - file.read_to_end(&mut data).expect("Error reading test file"); - let results = runner(&data, start_stop_hook); - let empty: [String; 0] = []; - assert_eq!(results, empty); + let mut data = Vec::new(); + let mut file = match File::open(&path) { + Ok(file) => file, + Err(_) => panic!("Error opening test file at: {:?}", path), + }; + file.read_to_end(&mut data) + .expect("Error reading test file"); + let results = runner(&data, start_stop_hook); + let empty: [String; 0] = []; + assert_eq!(results, empty); } #[cfg(test)] macro_rules! test { - ($name: expr, $skip: expr) => { - ::json_tests::test_common::run_test_path(::std::path::Path::new(concat!("res/ethereum/tests/", $name)), &$skip, do_json_test, &mut |_, _| ()); - } + ($name: expr, $skip: expr) => { + ::json_tests::test_common::run_test_path( + ::std::path::Path::new(concat!("res/ethereum/tests/", $name)), + &$skip, + do_json_test, + &mut |_, _| (), + ); + }; } #[macro_export] macro_rules! declare_test { - (skip => $arr: expr, $id: ident, $name: expr) => { - #[cfg(test)] - #[test] - #[allow(non_snake_case)] - fn $id() { - test!($name, $arr); - } - }; - (ignore => $id: ident, $name: expr) => { - #[cfg(test)] - #[ignore] - #[test] - #[allow(non_snake_case)] - fn $id() { - test!($name, []); - } - }; - (heavy => $id: ident, $name: expr) => { - #[cfg(test)] - #[cfg(feature = "test-heavy")] - #[test] - #[allow(non_snake_case)] - fn $id() { - test!($name, []); - } - }; - ($id: ident, $name: expr) => { - #[cfg(test)] - #[test] - #[allow(non_snake_case)] - fn $id() { - test!($name, []); - } - } + (skip => $arr: expr, $id: ident, $name: expr) => { + #[cfg(test)] + #[test] + #[allow(non_snake_case)] + fn $id() { + test!($name, $arr); + } + }; + (ignore => $id: ident, $name: expr) => { + #[cfg(test)] + #[ignore] + #[test] + #[allow(non_snake_case)] + fn $id() { + test!($name, []); + } + }; + (heavy => $id: ident, $name: expr) => { + #[cfg(test)] + #[cfg(feature = "test-heavy")] + #[test] + #[allow(non_snake_case)] + fn $id() { + test!($name, []); + } + }; + ($id: ident, $name: expr) => { + #[cfg(test)] + #[test] + #[allow(non_snake_case)] + fn $id() { + test!($name, []); + } + }; } diff --git a/ethcore/src/json_tests/transaction.rs b/ethcore/src/json_tests/transaction.rs index febc61404..d7a0b6b05 100644 --- a/ethcore/src/json_tests/transaction.rs +++ b/ethcore/src/json_tests/transaction.rs @@ -14,99 +14,110 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::path::Path; use super::test_common::*; use client::EvmTestClient; use ethjson; use rlp::Rlp; -use types::header::Header; -use types::transaction::UnverifiedTransaction; +use std::path::Path; use transaction_ext::Transaction; +use types::{header::Header, transaction::UnverifiedTransaction}; /// Run transaction jsontests on a given folder. pub fn run_test_path(p: &Path, skip: &[&'static str], h: &mut H) { - ::json_tests::test_common::run_test_path(p, skip, do_json_test, h) + ::json_tests::test_common::run_test_path(p, skip, do_json_test, h) } /// Run transaction jsontests on a given file. pub fn run_test_file(p: &Path, h: &mut H) { - ::json_tests::test_common::run_test_file(p, do_json_test, h) + ::json_tests::test_common::run_test_file(p, do_json_test, h) } // Block number used to run the tests. // Make sure that all the specified features are activated. const BLOCK_NUMBER: u64 = 0x6ffffffffffffe; -fn do_json_test(json_data: &[u8], start_stop_hook: &mut H) -> Vec { - let tests = ethjson::transaction::Test::load(json_data).unwrap(); - let mut failed = Vec::new(); - for (name, test) in tests.into_iter() { - start_stop_hook(&name, HookType::OnStart); +fn do_json_test( + json_data: &[u8], + start_stop_hook: &mut H, +) -> Vec { + let tests = ethjson::transaction::Test::load(json_data).unwrap(); + let mut failed = Vec::new(); + for (name, test) in tests.into_iter() { + start_stop_hook(&name, HookType::OnStart); - for (spec_name, result) in test.post_state { - let spec = match EvmTestClient::spec_from_json(&spec_name) { - Some(spec) => spec, - None => { - println!(" - {} | {:?} Ignoring tests because of missing spec", name, spec_name); - continue; - } - }; + for (spec_name, result) in test.post_state { + let spec = match EvmTestClient::spec_from_json(&spec_name) { + Some(spec) => spec, + None => { + println!( + " - {} | {:?} Ignoring tests because of missing spec", + name, spec_name + ); + continue; + } + }; - let mut fail_unless = |cond: bool, title: &str| if !cond { - failed.push(format!("{}-{:?}", name, spec_name)); - println!("Transaction failed: {:?}-{:?}: {:?}", name, spec_name, title); - }; + let mut fail_unless = |cond: bool, title: &str| { + if !cond { + failed.push(format!("{}-{:?}", name, spec_name)); + println!( + "Transaction failed: {:?}-{:?}: {:?}", + name, spec_name, title + ); + } + }; - let rlp: Vec = test.rlp.clone().into(); - let res = Rlp::new(&rlp) - .as_val() - .map_err(::error::Error::from) - .and_then(|t: UnverifiedTransaction| { - let mut header: Header = Default::default(); - // Use high enough number to activate all required features. - header.set_number(BLOCK_NUMBER); + let rlp: Vec = test.rlp.clone().into(); + let res = Rlp::new(&rlp) + .as_val() + .map_err(::error::Error::from) + .and_then(|t: UnverifiedTransaction| { + let mut header: Header = Default::default(); + // Use high enough number to activate all required features. + header.set_number(BLOCK_NUMBER); - let minimal = t.gas_required(&spec.engine.schedule(header.number())).into(); - if t.gas < minimal { - return Err(::types::transaction::Error::InsufficientGas { - minimal, got: t.gas, - }.into()); - } - spec.engine.verify_transaction_basic(&t, &header)?; - Ok(spec.engine.verify_transaction_unordered(t, &header)?) - }); + let minimal = t + .gas_required(&spec.engine.schedule(header.number())) + .into(); + if t.gas < minimal { + return Err(::types::transaction::Error::InsufficientGas { + minimal, + got: t.gas, + } + .into()); + } + spec.engine.verify_transaction_basic(&t, &header)?; + Ok(spec.engine.verify_transaction_unordered(t, &header)?) + }); - match (res, result.hash, result.sender) { - (Ok(t), Some(hash), Some(sender)) => { - fail_unless(t.sender() == sender.into(), "sender mismatch"); - fail_unless(t.hash() == hash.into(), "hash mismatch"); - }, - (Err(_), None, None) => {}, - data => { - fail_unless( - false, - &format!("Validity different: {:?}", data) - ); - } - } - } + match (res, result.hash, result.sender) { + (Ok(t), Some(hash), Some(sender)) => { + fail_unless(t.sender() == sender.into(), "sender mismatch"); + fail_unless(t.hash() == hash.into(), "hash mismatch"); + } + (Err(_), None, None) => {} + data => { + fail_unless(false, &format!("Validity different: {:?}", data)); + } + } + } - start_stop_hook(&name, HookType::OnStop); - } + start_stop_hook(&name, HookType::OnStop); + } - for f in &failed { - println!("FAILED: {:?}", f); - } - failed + for f in &failed { + println!("FAILED: {:?}", f); + } + failed } -declare_test!{TransactionTests_ttAddress, "TransactionTests/ttAddress"} -declare_test!{TransactionTests_ttData, "TransactionTests/ttData"} -declare_test!{TransactionTests_ttGasLimit, "TransactionTests/ttGasLimit"} -declare_test!{TransactionTests_ttGasPrice, "TransactionTests/ttGasPrice"} -declare_test!{TransactionTests_ttNonce, "TransactionTests/ttNonce"} -declare_test!{TransactionTests_ttRSValue, "TransactionTests/ttRSValue"} -declare_test!{TransactionTests_ttSignature, "TransactionTests/ttSignature"} -declare_test!{TransactionTests_ttValue, "TransactionTests/ttValue"} -declare_test!{TransactionTests_ttVValue, "TransactionTests/ttVValue"} -declare_test!{TransactionTests_ttWrongRLP, "TransactionTests/ttWrongRLP"} +declare_test! {TransactionTests_ttAddress, "TransactionTests/ttAddress"} +declare_test! {TransactionTests_ttData, "TransactionTests/ttData"} +declare_test! {TransactionTests_ttGasLimit, "TransactionTests/ttGasLimit"} +declare_test! {TransactionTests_ttGasPrice, "TransactionTests/ttGasPrice"} +declare_test! {TransactionTests_ttNonce, "TransactionTests/ttNonce"} +declare_test! {TransactionTests_ttRSValue, "TransactionTests/ttRSValue"} +declare_test! {TransactionTests_ttSignature, "TransactionTests/ttSignature"} +declare_test! {TransactionTests_ttValue, "TransactionTests/ttValue"} +declare_test! {TransactionTests_ttVValue, "TransactionTests/ttVValue"} +declare_test! {TransactionTests_ttWrongRLP, "TransactionTests/ttWrongRLP"} diff --git a/ethcore/src/json_tests/trie.rs b/ethcore/src/json_tests/trie.rs index d56490ec7..1745132e8 100644 --- a/ethcore/src/json_tests/trie.rs +++ b/ethcore/src/json_tests/trie.rs @@ -14,96 +14,102 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use ethjson; -use trie::{TrieFactory, TrieSpec}; -use ethtrie::RlpCodec; use ethereum_types::H256; +use ethjson; +use ethtrie::RlpCodec; +use trie::{TrieFactory, TrieSpec}; use super::HookType; -pub use self::generic::run_test_path as run_generic_test_path; -pub use self::generic::run_test_file as run_generic_test_file; -pub use self::secure::run_test_path as run_secure_test_path; -pub use self::secure::run_test_file as run_secure_test_file; +pub use self::{ + generic::{run_test_file as run_generic_test_file, run_test_path as run_generic_test_path}, + secure::{run_test_file as run_secure_test_file, run_test_path as run_secure_test_path}, +}; -fn test_trie(json: &[u8], trie: TrieSpec, start_stop_hook: &mut H) -> Vec { - let tests = ethjson::trie::Test::load(json).unwrap(); - let factory = TrieFactory::<_, RlpCodec>::new(trie); - let mut result = vec![]; +fn test_trie( + json: &[u8], + trie: TrieSpec, + start_stop_hook: &mut H, +) -> Vec { + let tests = ethjson::trie::Test::load(json).unwrap(); + let factory = TrieFactory::<_, RlpCodec>::new(trie); + let mut result = vec![]; - for (name, test) in tests.into_iter() { - start_stop_hook(&name, HookType::OnStart); + for (name, test) in tests.into_iter() { + start_stop_hook(&name, HookType::OnStart); - let mut memdb = journaldb::new_memory_db(); - let mut root = H256::default(); - let mut t = factory.create(&mut memdb, &mut root); + let mut memdb = journaldb::new_memory_db(); + let mut root = H256::default(); + let mut t = factory.create(&mut memdb, &mut root); - for (key, value) in test.input.data.into_iter() { - let key: Vec = key.into(); - let value: Vec = value.map_or_else(Vec::new, Into::into); - t.insert(&key, &value) - .expect(&format!("Trie test '{:?}' failed due to internal error", name)); - } + for (key, value) in test.input.data.into_iter() { + let key: Vec = key.into(); + let value: Vec = value.map_or_else(Vec::new, Into::into); + t.insert(&key, &value).expect(&format!( + "Trie test '{:?}' failed due to internal error", + name + )); + } - if *t.root() != test.root.into() { - result.push(format!("Trie test '{:?}' failed.", name)); - } + if *t.root() != test.root.into() { + result.push(format!("Trie test '{:?}' failed.", name)); + } - start_stop_hook(&name, HookType::OnStop); - } + start_stop_hook(&name, HookType::OnStop); + } - for i in &result { - println!("FAILED: {}", i); - } + for i in &result { + println!("FAILED: {}", i); + } - result + result } mod generic { - use std::path::Path; - use trie::TrieSpec; + use std::path::Path; + use trie::TrieSpec; - use super::HookType; + use super::HookType; - /// Run generic trie jsontests on a given folder. - pub fn run_test_path(p: &Path, skip: &[&'static str], h: &mut H) { - ::json_tests::test_common::run_test_path(p, skip, do_json_test, h) - } + /// Run generic trie jsontests on a given folder. + pub fn run_test_path(p: &Path, skip: &[&'static str], h: &mut H) { + ::json_tests::test_common::run_test_path(p, skip, do_json_test, h) + } - /// Run generic trie jsontests on a given file. - pub fn run_test_file(p: &Path, h: &mut H) { - ::json_tests::test_common::run_test_file(p, do_json_test, h) - } + /// Run generic trie jsontests on a given file. + pub fn run_test_file(p: &Path, h: &mut H) { + ::json_tests::test_common::run_test_file(p, do_json_test, h) + } - fn do_json_test(json: &[u8], h: &mut H) -> Vec { - super::test_trie(json, TrieSpec::Generic, h) - } + fn do_json_test(json: &[u8], h: &mut H) -> Vec { + super::test_trie(json, TrieSpec::Generic, h) + } - declare_test!{TrieTests_trietest, "TrieTests/trietest"} - declare_test!{TrieTests_trieanyorder, "TrieTests/trieanyorder"} + declare_test! {TrieTests_trietest, "TrieTests/trietest"} + declare_test! {TrieTests_trieanyorder, "TrieTests/trieanyorder"} } mod secure { - use std::path::Path; - use trie::TrieSpec; + use std::path::Path; + use trie::TrieSpec; - use super::HookType; + use super::HookType; - /// Run secure trie jsontests on a given folder. - pub fn run_test_path(p: &Path, skip: &[&'static str], h: &mut H) { - ::json_tests::test_common::run_test_path(p, skip, do_json_test, h) - } + /// Run secure trie jsontests on a given folder. + pub fn run_test_path(p: &Path, skip: &[&'static str], h: &mut H) { + ::json_tests::test_common::run_test_path(p, skip, do_json_test, h) + } - /// Run secure trie jsontests on a given file. - pub fn run_test_file(p: &Path, h: &mut H) { - ::json_tests::test_common::run_test_file(p, do_json_test, h) - } + /// Run secure trie jsontests on a given file. + pub fn run_test_file(p: &Path, h: &mut H) { + ::json_tests::test_common::run_test_file(p, do_json_test, h) + } - fn do_json_test(json: &[u8], h: &mut H) -> Vec { - super::test_trie(json, TrieSpec::Secure, h) - } + fn do_json_test(json: &[u8], h: &mut H) -> Vec { + super::test_trie(json, TrieSpec::Secure, h) + } - declare_test!{TrieTests_hex_encoded_secure, "TrieTests/hex_encoded_securetrie_test"} - declare_test!{TrieTests_trietest_secure, "TrieTests/trietest_secureTrie"} - declare_test!{TrieTests_trieanyorder_secure, "TrieTests/trieanyorder_secureTrie"} + declare_test! {TrieTests_hex_encoded_secure, "TrieTests/hex_encoded_securetrie_test"} + declare_test! {TrieTests_trietest_secure, "TrieTests/trietest_secureTrie"} + declare_test! {TrieTests_trieanyorder_secure, "TrieTests/trieanyorder_secureTrie"} } diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 09896d69e..685ec4fdb 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -55,7 +55,7 @@ // Recursion limit required because of // error_chain foreign_links. -#![recursion_limit="128"] +#![recursion_limit = "128"] extern crate ansi_term; extern crate byteorder; @@ -90,7 +90,6 @@ extern crate num_cpus; extern crate parity_bytes as bytes; extern crate parity_snappy as snappy; extern crate parking_lot; -extern crate trie_db as trie; extern crate patricia_trie_ethereum as ethtrie; extern crate rand; extern crate rayon; @@ -99,26 +98,27 @@ extern crate rustc_hex; extern crate serde; extern crate stats; extern crate time_utils; +extern crate trie_db as trie; extern crate triehash_ethereum as triehash; extern crate unexpected; extern crate using_queue; extern crate vm; extern crate wasm; -#[cfg(test)] -extern crate ethcore_accounts as accounts; -#[cfg(feature = "stratum")] -extern crate ethcore_stratum; -#[cfg(any(test, feature = "tempdir"))] -extern crate tempdir; -#[cfg(any(test, feature = "kvdb-rocksdb"))] -extern crate kvdb_rocksdb; #[cfg(any(test, feature = "blooms-db"))] extern crate blooms_db; #[cfg(any(test, feature = "env_logger"))] extern crate env_logger; #[cfg(test)] +extern crate ethcore_accounts as accounts; +#[cfg(feature = "stratum")] +extern crate ethcore_stratum; +#[cfg(any(test, feature = "kvdb-rocksdb"))] +extern crate kvdb_rocksdb; +#[cfg(test)] extern crate rlp_compress; +#[cfg(any(test, feature = "tempdir"))] +extern crate tempdir; #[cfg(test)] #[macro_use] extern crate hex_literal; @@ -160,8 +160,8 @@ pub mod executed; pub mod executive; pub mod machine; pub mod miner; -pub mod pod_state; pub mod pod_account; +pub mod pod_state; pub mod snapshot; pub mod spec; pub mod state; @@ -175,13 +175,13 @@ mod externalities; mod factory; mod tx_filter; -#[cfg(test)] -mod tests; #[cfg(feature = "json-tests")] pub mod json_tests; #[cfg(any(test, feature = "test-helpers"))] pub mod test_helpers; +#[cfg(test)] +mod tests; -pub use executive::contract_address; pub use evm::CreateContractAddress; +pub use executive::contract_address; pub use trie::TrieSpec; diff --git a/ethcore/src/machine/impls.rs b/ethcore/src/machine/impls.rs index 371d088d5..a2ee16b3d 100644 --- a/ethcore/src/machine/impls.rs +++ b/ethcore/src/machine/impls.rs @@ -16,17 +16,24 @@ //! Ethereum-like state machine definition. -use std::collections::{BTreeMap, HashMap}; -use std::cmp; -use std::sync::Arc; +use std::{ + cmp, + collections::{BTreeMap, HashMap}, + sync::Arc, +}; -use ethereum_types::{U256, H256, Address}; +use ethereum_types::{Address, H256, U256}; use rlp::Rlp; -use types::transaction::{self, SYSTEM_ADDRESS, UNSIGNED_SENDER, UnverifiedTransaction, SignedTransaction}; -use types::BlockNumber; -use types::header::Header; -use vm::{CallType, ActionParams, ActionValue, ParamsType}; -use vm::{EnvInfo, Schedule, CreateContractAddress}; +use types::{ + header::Header, + transaction::{ + self, SignedTransaction, UnverifiedTransaction, SYSTEM_ADDRESS, UNSIGNED_SENDER, + }, + BlockNumber, +}; +use vm::{ + ActionParams, ActionValue, CallType, CreateContractAddress, EnvInfo, ParamsType, Schedule, +}; use block::ExecutedBlock; use builtin::Builtin; @@ -45,25 +52,34 @@ pub const PARITY_GAS_LIMIT_DETERMINANT: U256 = U256([37, 0, 0, 0]); /// Ethash-specific extensions. #[derive(Debug, Clone)] pub struct EthashExtensions { - /// Homestead transition block number. - pub homestead_transition: BlockNumber, - /// DAO hard-fork transition block (X). - pub dao_hardfork_transition: u64, - /// DAO hard-fork refund contract address (C). - pub dao_hardfork_beneficiary: Address, - /// DAO hard-fork DAO accounts list (L) - pub dao_hardfork_accounts: Vec
, + /// Homestead transition block number. + pub homestead_transition: BlockNumber, + /// DAO hard-fork transition block (X). + pub dao_hardfork_transition: u64, + /// DAO hard-fork refund contract address (C). + pub dao_hardfork_beneficiary: Address, + /// DAO hard-fork DAO accounts list (L) + pub dao_hardfork_accounts: Vec
, } impl From<::ethjson::spec::EthashParams> for EthashExtensions { - fn from(p: ::ethjson::spec::EthashParams) -> Self { - EthashExtensions { - homestead_transition: p.homestead_transition.map_or(0, Into::into), - dao_hardfork_transition: p.dao_hardfork_transition.map_or(u64::max_value(), Into::into), - dao_hardfork_beneficiary: p.dao_hardfork_beneficiary.map_or_else(Address::new, Into::into), - dao_hardfork_accounts: p.dao_hardfork_accounts.unwrap_or_else(Vec::new).into_iter().map(Into::into).collect(), - } - } + fn from(p: ::ethjson::spec::EthashParams) -> Self { + EthashExtensions { + homestead_transition: p.homestead_transition.map_or(0, Into::into), + dao_hardfork_transition: p + .dao_hardfork_transition + .map_or(u64::max_value(), Into::into), + dao_hardfork_beneficiary: p + .dao_hardfork_beneficiary + .map_or_else(Address::new, Into::into), + dao_hardfork_accounts: p + .dao_hardfork_accounts + .unwrap_or_else(Vec::new) + .into_iter() + .map(Into::into) + .collect(), + } + } } /// Special rules to be applied to the schedule. @@ -71,346 +87,394 @@ pub type ScheduleCreationRules = Fn(&mut Schedule, BlockNumber) + Sync + Send; /// An ethereum-like state machine. pub struct EthereumMachine { - params: CommonParams, - builtins: Arc>, - tx_filter: Option>, - ethash_extensions: Option, - schedule_rules: Option>, + params: CommonParams, + builtins: Arc>, + tx_filter: Option>, + ethash_extensions: Option, + schedule_rules: Option>, } impl EthereumMachine { - /// Regular ethereum machine. - pub fn regular(params: CommonParams, builtins: BTreeMap) -> EthereumMachine { - let tx_filter = TransactionFilter::from_params(¶ms).map(Arc::new); - EthereumMachine { - params: params, - builtins: Arc::new(builtins), - tx_filter: tx_filter, - ethash_extensions: None, - schedule_rules: None, - } - } + /// Regular ethereum machine. + pub fn regular(params: CommonParams, builtins: BTreeMap) -> EthereumMachine { + let tx_filter = TransactionFilter::from_params(¶ms).map(Arc::new); + EthereumMachine { + params: params, + builtins: Arc::new(builtins), + tx_filter: tx_filter, + ethash_extensions: None, + schedule_rules: None, + } + } - /// Ethereum machine with ethash extensions. - // TODO: either unify or specify to mainnet specifically and include other specific-chain HFs? - pub fn with_ethash_extensions(params: CommonParams, builtins: BTreeMap, extensions: EthashExtensions) -> EthereumMachine { - let mut machine = EthereumMachine::regular(params, builtins); - machine.ethash_extensions = Some(extensions); - machine - } + /// Ethereum machine with ethash extensions. + // TODO: either unify or specify to mainnet specifically and include other specific-chain HFs? + pub fn with_ethash_extensions( + params: CommonParams, + builtins: BTreeMap, + extensions: EthashExtensions, + ) -> EthereumMachine { + let mut machine = EthereumMachine::regular(params, builtins); + machine.ethash_extensions = Some(extensions); + machine + } - /// Attach special rules to the creation of schedule. - pub fn set_schedule_creation_rules(&mut self, rules: Box) { - self.schedule_rules = Some(rules); - } + /// Attach special rules to the creation of schedule. + pub fn set_schedule_creation_rules(&mut self, rules: Box) { + self.schedule_rules = Some(rules); + } - /// Get a reference to the ethash-specific extensions. - pub fn ethash_extensions(&self) -> Option<&EthashExtensions> { - self.ethash_extensions.as_ref() - } + /// Get a reference to the ethash-specific extensions. + pub fn ethash_extensions(&self) -> Option<&EthashExtensions> { + self.ethash_extensions.as_ref() + } } impl EthereumMachine { - /// Execute a call as the system address. Block environment information passed to the - /// VM is modified to have its gas limit bounded at the upper limit of possible used - /// gases including this system call, capped at the maximum value able to be - /// represented by U256. This system call modifies the block state, but discards other - /// information. If suicides, logs or refunds happen within the system call, they - /// will not be executed or recorded. Gas used by this system call will not be counted - /// on the block. - pub fn execute_as_system( - &self, - block: &mut ExecutedBlock, - contract_address: Address, - gas: U256, - data: Option>, - ) -> Result, Error> { - let (code, code_hash) = { - let state = &block.state; + /// Execute a call as the system address. Block environment information passed to the + /// VM is modified to have its gas limit bounded at the upper limit of possible used + /// gases including this system call, capped at the maximum value able to be + /// represented by U256. This system call modifies the block state, but discards other + /// information. If suicides, logs or refunds happen within the system call, they + /// will not be executed or recorded. Gas used by this system call will not be counted + /// on the block. + pub fn execute_as_system( + &self, + block: &mut ExecutedBlock, + contract_address: Address, + gas: U256, + data: Option>, + ) -> Result, Error> { + let (code, code_hash) = { + let state = &block.state; - (state.code(&contract_address)?, - state.code_hash(&contract_address)?) - }; + ( + state.code(&contract_address)?, + state.code_hash(&contract_address)?, + ) + }; - self.execute_code_as_system( - block, - Some(contract_address), - code, - code_hash, - None, - gas, - data, - None, - ) - } + self.execute_code_as_system( + block, + Some(contract_address), + code, + code_hash, + None, + gas, + data, + None, + ) + } - /// Same as execute_as_system, but execute code directly. If contract address is None, use the null sender - /// address. If code is None, then this function has no effect. The call is executed without finalization, and does - /// not form a transaction. - pub fn execute_code_as_system( - &self, - block: &mut ExecutedBlock, - contract_address: Option
, - code: Option>>, - code_hash: Option, - value: Option, - gas: U256, - data: Option>, - call_type: Option, - ) -> Result, Error> { - let env_info = { - let mut env_info = block.env_info(); - env_info.gas_limit = env_info.gas_used.saturating_add(gas); - env_info - }; + /// Same as execute_as_system, but execute code directly. If contract address is None, use the null sender + /// address. If code is None, then this function has no effect. The call is executed without finalization, and does + /// not form a transaction. + pub fn execute_code_as_system( + &self, + block: &mut ExecutedBlock, + contract_address: Option
, + code: Option>>, + code_hash: Option, + value: Option, + gas: U256, + data: Option>, + call_type: Option, + ) -> Result, Error> { + let env_info = { + let mut env_info = block.env_info(); + env_info.gas_limit = env_info.gas_used.saturating_add(gas); + env_info + }; - let mut state = block.state_mut(); + let mut state = block.state_mut(); - let params = ActionParams { - code_address: contract_address.unwrap_or(UNSIGNED_SENDER), - address: contract_address.unwrap_or(UNSIGNED_SENDER), - sender: SYSTEM_ADDRESS, - origin: SYSTEM_ADDRESS, - gas, - gas_price: 0.into(), - value: value.unwrap_or_else(|| ActionValue::Transfer(0.into())), - code, - code_hash, - data, - call_type: call_type.unwrap_or(CallType::Call), - params_type: ParamsType::Separate, - }; - let schedule = self.schedule(env_info.number); - let mut ex = Executive::new(&mut state, &env_info, self, &schedule); - let mut substate = Substate::new(); + let params = ActionParams { + code_address: contract_address.unwrap_or(UNSIGNED_SENDER), + address: contract_address.unwrap_or(UNSIGNED_SENDER), + sender: SYSTEM_ADDRESS, + origin: SYSTEM_ADDRESS, + gas, + gas_price: 0.into(), + value: value.unwrap_or_else(|| ActionValue::Transfer(0.into())), + code, + code_hash, + data, + call_type: call_type.unwrap_or(CallType::Call), + params_type: ParamsType::Separate, + }; + let schedule = self.schedule(env_info.number); + let mut ex = Executive::new(&mut state, &env_info, self, &schedule); + let mut substate = Substate::new(); - let res = ex.call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).map_err(|e| ::engines::EngineError::FailedSystemCall(format!("{}", e)))?; - let output = res.return_data.to_vec(); + let res = ex + .call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) + .map_err(|e| ::engines::EngineError::FailedSystemCall(format!("{}", e)))?; + let output = res.return_data.to_vec(); - Ok(output) - } + Ok(output) + } - /// Push last known block hash to the state. - fn push_last_hash(&self, block: &mut ExecutedBlock) -> Result<(), Error> { - let params = self.params(); - if block.header.number() == params.eip210_transition { - let state = block.state_mut(); - state.init_code(¶ms.eip210_contract_address, params.eip210_contract_code.clone())?; - } - if block.header.number() >= params.eip210_transition { - let parent_hash = *block.header.parent_hash(); - let _ = self.execute_as_system( - block, - params.eip210_contract_address, - params.eip210_contract_gas, - Some(parent_hash.to_vec()), - )?; - } - Ok(()) - } + /// Push last known block hash to the state. + fn push_last_hash(&self, block: &mut ExecutedBlock) -> Result<(), Error> { + let params = self.params(); + if block.header.number() == params.eip210_transition { + let state = block.state_mut(); + state.init_code( + ¶ms.eip210_contract_address, + params.eip210_contract_code.clone(), + )?; + } + if block.header.number() >= params.eip210_transition { + let parent_hash = *block.header.parent_hash(); + let _ = self.execute_as_system( + block, + params.eip210_contract_address, + params.eip210_contract_gas, + Some(parent_hash.to_vec()), + )?; + } + Ok(()) + } - /// Logic to perform on a new block: updating last hashes and the DAO - /// fork, for ethash. - pub fn on_new_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> { - self.push_last_hash(block)?; + /// Logic to perform on a new block: updating last hashes and the DAO + /// fork, for ethash. + pub fn on_new_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> { + self.push_last_hash(block)?; - if let Some(ref ethash_params) = self.ethash_extensions { - if block.header.number() == ethash_params.dao_hardfork_transition { - let state = block.state_mut(); - for child in ðash_params.dao_hardfork_accounts { - let beneficiary = ðash_params.dao_hardfork_beneficiary; - state.balance(child) - .and_then(|b| state.transfer_balance(child, beneficiary, &b, CleanupMode::NoEmpty))?; - } - } - } + if let Some(ref ethash_params) = self.ethash_extensions { + if block.header.number() == ethash_params.dao_hardfork_transition { + let state = block.state_mut(); + for child in ðash_params.dao_hardfork_accounts { + let beneficiary = ðash_params.dao_hardfork_beneficiary; + state.balance(child).and_then(|b| { + state.transfer_balance(child, beneficiary, &b, CleanupMode::NoEmpty) + })?; + } + } + } - Ok(()) - } + Ok(()) + } - /// Populate a header's fields based on its parent's header. - /// Usually implements the chain scoring rule based on weight. - /// The gas floor target must not be lower than the engine's minimum gas limit. - pub fn populate_from_parent(&self, header: &mut Header, parent: &Header, gas_floor_target: U256, gas_ceil_target: U256) { - header.set_difficulty(parent.difficulty().clone()); - let gas_limit = parent.gas_limit().clone(); - assert!(!gas_limit.is_zero(), "Gas limit should be > 0"); + /// Populate a header's fields based on its parent's header. + /// Usually implements the chain scoring rule based on weight. + /// The gas floor target must not be lower than the engine's minimum gas limit. + pub fn populate_from_parent( + &self, + header: &mut Header, + parent: &Header, + gas_floor_target: U256, + gas_ceil_target: U256, + ) { + header.set_difficulty(parent.difficulty().clone()); + let gas_limit = parent.gas_limit().clone(); + assert!(!gas_limit.is_zero(), "Gas limit should be > 0"); - if let Some(ref ethash_params) = self.ethash_extensions { - let gas_limit = { - let bound_divisor = self.params().gas_limit_bound_divisor; - let lower_limit = gas_limit - gas_limit / bound_divisor + 1; - let upper_limit = gas_limit + gas_limit / bound_divisor - 1; - let gas_limit = if gas_limit < gas_floor_target { - let gas_limit = cmp::min(gas_floor_target, upper_limit); - round_block_gas_limit(gas_limit, lower_limit, upper_limit) - } else if gas_limit > gas_ceil_target { - let gas_limit = cmp::max(gas_ceil_target, lower_limit); - round_block_gas_limit(gas_limit, lower_limit, upper_limit) - } else { - let total_lower_limit = cmp::max(lower_limit, gas_floor_target); - let total_upper_limit = cmp::min(upper_limit, gas_ceil_target); - let gas_limit = cmp::max(gas_floor_target, cmp::min(total_upper_limit, - lower_limit + (header.gas_used().clone() * 6u32 / 5) / bound_divisor)); - round_block_gas_limit(gas_limit, total_lower_limit, total_upper_limit) - }; - // ensure that we are not violating protocol limits - debug_assert!(gas_limit >= lower_limit); - debug_assert!(gas_limit <= upper_limit); - gas_limit - }; + if let Some(ref ethash_params) = self.ethash_extensions { + let gas_limit = { + let bound_divisor = self.params().gas_limit_bound_divisor; + let lower_limit = gas_limit - gas_limit / bound_divisor + 1; + let upper_limit = gas_limit + gas_limit / bound_divisor - 1; + let gas_limit = if gas_limit < gas_floor_target { + let gas_limit = cmp::min(gas_floor_target, upper_limit); + round_block_gas_limit(gas_limit, lower_limit, upper_limit) + } else if gas_limit > gas_ceil_target { + let gas_limit = cmp::max(gas_ceil_target, lower_limit); + round_block_gas_limit(gas_limit, lower_limit, upper_limit) + } else { + let total_lower_limit = cmp::max(lower_limit, gas_floor_target); + let total_upper_limit = cmp::min(upper_limit, gas_ceil_target); + let gas_limit = cmp::max( + gas_floor_target, + cmp::min( + total_upper_limit, + lower_limit + (header.gas_used().clone() * 6u32 / 5) / bound_divisor, + ), + ); + round_block_gas_limit(gas_limit, total_lower_limit, total_upper_limit) + }; + // ensure that we are not violating protocol limits + debug_assert!(gas_limit >= lower_limit); + debug_assert!(gas_limit <= upper_limit); + gas_limit + }; - header.set_gas_limit(gas_limit); - if header.number() >= ethash_params.dao_hardfork_transition && - header.number() <= ethash_params.dao_hardfork_transition + 9 { - header.set_extra_data(b"dao-hard-fork"[..].to_owned()); - } - return - } + header.set_gas_limit(gas_limit); + if header.number() >= ethash_params.dao_hardfork_transition + && header.number() <= ethash_params.dao_hardfork_transition + 9 + { + header.set_extra_data(b"dao-hard-fork"[..].to_owned()); + } + return; + } - header.set_gas_limit({ - let bound_divisor = self.params().gas_limit_bound_divisor; - if gas_limit < gas_floor_target { - cmp::min(gas_floor_target, gas_limit + gas_limit / bound_divisor - 1) - } else { - cmp::max(gas_floor_target, gas_limit - gas_limit / bound_divisor + 1) - } - }); - } + header.set_gas_limit({ + let bound_divisor = self.params().gas_limit_bound_divisor; + if gas_limit < gas_floor_target { + cmp::min(gas_floor_target, gas_limit + gas_limit / bound_divisor - 1) + } else { + cmp::max(gas_floor_target, gas_limit - gas_limit / bound_divisor + 1) + } + }); + } - /// Get the general parameters of the chain. - pub fn params(&self) -> &CommonParams { - &self.params - } + /// Get the general parameters of the chain. + pub fn params(&self) -> &CommonParams { + &self.params + } - /// Get the EVM schedule for the given block number. - pub fn schedule(&self, block_number: BlockNumber) -> Schedule { - let mut schedule = match self.ethash_extensions { - None => self.params.schedule(block_number), - Some(ref ext) => { - if block_number < ext.homestead_transition { - Schedule::new_frontier() - } else { - self.params.schedule(block_number) - } - } - }; + /// Get the EVM schedule for the given block number. + pub fn schedule(&self, block_number: BlockNumber) -> Schedule { + let mut schedule = match self.ethash_extensions { + None => self.params.schedule(block_number), + Some(ref ext) => { + if block_number < ext.homestead_transition { + Schedule::new_frontier() + } else { + self.params.schedule(block_number) + } + } + }; - if let Some(ref rules) = self.schedule_rules { - (rules)(&mut schedule, block_number) - } + if let Some(ref rules) = self.schedule_rules { + (rules)(&mut schedule, block_number) + } - schedule - } + schedule + } - /// Builtin-contracts for the chain.. - pub fn builtins(&self) -> &BTreeMap { - &*self.builtins - } + /// Builtin-contracts for the chain.. + pub fn builtins(&self) -> &BTreeMap { + &*self.builtins + } - /// Attempt to get a handle to a built-in contract. - /// Only returns references to activated built-ins. - // TODO: builtin contract routing - to do this properly, it will require removing the built-in configuration-reading logic - // from Spec into here and removing the Spec::builtins field. - pub fn builtin(&self, a: &Address, block_number: BlockNumber) -> Option<&Builtin> { - self.builtins() - .get(a) - .and_then(|b| if b.is_active(block_number) { Some(b) } else { None }) - } + /// Attempt to get a handle to a built-in contract. + /// Only returns references to activated built-ins. + // TODO: builtin contract routing - to do this properly, it will require removing the built-in configuration-reading logic + // from Spec into here and removing the Spec::builtins field. + pub fn builtin(&self, a: &Address, block_number: BlockNumber) -> Option<&Builtin> { + self.builtins().get(a).and_then(|b| { + if b.is_active(block_number) { + Some(b) + } else { + None + } + }) + } - /// Some intrinsic operation parameters; by default they take their value from the `spec()`'s `engine_params`. - pub fn maximum_extra_data_size(&self) -> usize { self.params().maximum_extra_data_size } + /// Some intrinsic operation parameters; by default they take their value from the `spec()`'s `engine_params`. + pub fn maximum_extra_data_size(&self) -> usize { + self.params().maximum_extra_data_size + } - /// The nonce with which accounts begin at given block. - pub fn account_start_nonce(&self, block: u64) -> U256 { - let params = self.params(); + /// The nonce with which accounts begin at given block. + pub fn account_start_nonce(&self, block: u64) -> U256 { + let params = self.params(); - if block >= params.dust_protection_transition { - U256::from(params.nonce_cap_increment) * U256::from(block) - } else { - params.account_start_nonce - } - } + if block >= params.dust_protection_transition { + U256::from(params.nonce_cap_increment) * U256::from(block) + } else { + params.account_start_nonce + } + } - /// The network ID that transactions should be signed with. - pub fn signing_chain_id(&self, env_info: &EnvInfo) -> Option { - let params = self.params(); + /// The network ID that transactions should be signed with. + pub fn signing_chain_id(&self, env_info: &EnvInfo) -> Option { + let params = self.params(); - if env_info.number >= params.eip155_transition { - Some(params.chain_id) - } else { - None - } - } + if env_info.number >= params.eip155_transition { + Some(params.chain_id) + } else { + None + } + } - /// Returns new contract address generation scheme at given block number. - pub fn create_address_scheme(&self, _number: BlockNumber) -> CreateContractAddress { - CreateContractAddress::FromSenderAndNonce - } + /// Returns new contract address generation scheme at given block number. + pub fn create_address_scheme(&self, _number: BlockNumber) -> CreateContractAddress { + CreateContractAddress::FromSenderAndNonce + } - /// Verify a particular transaction is valid, regardless of order. - pub fn verify_transaction_unordered(&self, t: UnverifiedTransaction, _header: &Header) -> Result { - Ok(SignedTransaction::new(t)?) - } + /// Verify a particular transaction is valid, regardless of order. + pub fn verify_transaction_unordered( + &self, + t: UnverifiedTransaction, + _header: &Header, + ) -> Result { + Ok(SignedTransaction::new(t)?) + } - /// Does basic verification of the transaction. - pub fn verify_transaction_basic(&self, t: &UnverifiedTransaction, header: &Header) -> Result<(), transaction::Error> { - let check_low_s = match self.ethash_extensions { - Some(ref ext) => header.number() >= ext.homestead_transition, - None => true, - }; + /// Does basic verification of the transaction. + pub fn verify_transaction_basic( + &self, + t: &UnverifiedTransaction, + header: &Header, + ) -> Result<(), transaction::Error> { + let check_low_s = match self.ethash_extensions { + Some(ref ext) => header.number() >= ext.homestead_transition, + None => true, + }; - let chain_id = if header.number() < self.params().validate_chain_id_transition { - t.chain_id() - } else if header.number() >= self.params().eip155_transition { - Some(self.params().chain_id) - } else { - None - }; - t.verify_basic(check_low_s, chain_id)?; + let chain_id = if header.number() < self.params().validate_chain_id_transition { + t.chain_id() + } else if header.number() >= self.params().eip155_transition { + Some(self.params().chain_id) + } else { + None + }; + t.verify_basic(check_low_s, chain_id)?; - Ok(()) - } + Ok(()) + } - /// Does verification of the transaction against the parent state. - pub fn verify_transaction(&self, t: &SignedTransaction, parent: &Header, client: &C) - -> Result<(), transaction::Error> - { - if let Some(ref filter) = self.tx_filter.as_ref() { - if !filter.transaction_allowed(&parent.hash(), parent.number() + 1, t, client) { - return Err(transaction::Error::NotAllowed.into()) - } - } + /// Does verification of the transaction against the parent state. + pub fn verify_transaction( + &self, + t: &SignedTransaction, + parent: &Header, + client: &C, + ) -> Result<(), transaction::Error> { + if let Some(ref filter) = self.tx_filter.as_ref() { + if !filter.transaction_allowed(&parent.hash(), parent.number() + 1, t, client) { + return Err(transaction::Error::NotAllowed.into()); + } + } - Ok(()) - } + Ok(()) + } - /// Additional params. - pub fn additional_params(&self) -> HashMap { - hash_map![ - "registrar".to_owned() => format!("{:x}", self.params.registrar) - ] - } + /// Additional params. + pub fn additional_params(&self) -> HashMap { + hash_map![ + "registrar".to_owned() => format!("{:x}", self.params.registrar) + ] + } - /// Performs pre-validation of RLP decoded transaction before other processing - pub fn decode_transaction(&self, transaction: &[u8]) -> Result { - let rlp = Rlp::new(&transaction); - if rlp.as_raw().len() > self.params().max_transaction_size { - debug!("Rejected oversized transaction of {} bytes", rlp.as_raw().len()); - return Err(transaction::Error::TooBig) - } - rlp.as_val().map_err(|e| transaction::Error::InvalidRlp(e.to_string())) - } + /// Performs pre-validation of RLP decoded transaction before other processing + pub fn decode_transaction( + &self, + transaction: &[u8], + ) -> Result { + let rlp = Rlp::new(&transaction); + if rlp.as_raw().len() > self.params().max_transaction_size { + debug!( + "Rejected oversized transaction of {} bytes", + rlp.as_raw().len() + ); + return Err(transaction::Error::TooBig); + } + rlp.as_val() + .map_err(|e| transaction::Error::InvalidRlp(e.to_string())) + } } /// Auxiliary data fetcher for an Ethereum machine. In Ethereum-like machines /// there are two kinds of auxiliary data: bodies and receipts. #[derive(Default, Clone)] pub struct AuxiliaryData<'a> { - /// The full block bytes, including the header. - pub bytes: Option<&'a [u8]>, - /// The block receipts. - pub receipts: Option<&'a [::types::receipt::Receipt]>, + /// The full block bytes, including the header. + pub bytes: Option<&'a [u8]>, + /// The block receipts. + pub receipts: Option<&'a [::types::receipt::Receipt]>, } /// Type alias for a function we can make calls through synchronously. @@ -420,126 +484,165 @@ pub type Call<'a> = Fn(Address, Vec) -> Result<(Vec, Vec>), Stri /// Request for auxiliary data of a block. #[derive(Debug, Clone, Copy, PartialEq)] pub enum AuxiliaryRequest { - /// Needs the body. - Body, - /// Needs the receipts. - Receipts, - /// Needs both body and receipts. - Both, + /// Needs the body. + Body, + /// Needs the receipts. + Receipts, + /// Needs both body and receipts. + Both, } impl super::Machine for EthereumMachine { - type EngineClient = ::client::EngineClient; + type EngineClient = ::client::EngineClient; - type Error = Error; + type Error = Error; - fn balance(&self, live: &ExecutedBlock, address: &Address) -> Result { - live.state.balance(address).map_err(Into::into) - } + fn balance(&self, live: &ExecutedBlock, address: &Address) -> Result { + live.state.balance(address).map_err(Into::into) + } - fn add_balance(&self, live: &mut ExecutedBlock, address: &Address, amount: &U256) -> Result<(), Error> { - live.state_mut().add_balance(address, amount, CleanupMode::NoEmpty).map_err(Into::into) - } + fn add_balance( + &self, + live: &mut ExecutedBlock, + address: &Address, + amount: &U256, + ) -> Result<(), Error> { + live.state_mut() + .add_balance(address, amount, CleanupMode::NoEmpty) + .map_err(Into::into) + } } // Try to round gas_limit a bit so that: // 1) it will still be in desired range // 2) it will be a nearest (with tendency to increase) multiple of PARITY_GAS_LIMIT_DETERMINANT fn round_block_gas_limit(gas_limit: U256, lower_limit: U256, upper_limit: U256) -> U256 { - let increased_gas_limit = gas_limit + (PARITY_GAS_LIMIT_DETERMINANT - gas_limit % PARITY_GAS_LIMIT_DETERMINANT); - if increased_gas_limit > upper_limit { - let decreased_gas_limit = increased_gas_limit - PARITY_GAS_LIMIT_DETERMINANT; - if decreased_gas_limit < lower_limit { - gas_limit - } else { - decreased_gas_limit - } - } else { - increased_gas_limit - } + let increased_gas_limit = + gas_limit + (PARITY_GAS_LIMIT_DETERMINANT - gas_limit % PARITY_GAS_LIMIT_DETERMINANT); + if increased_gas_limit > upper_limit { + let decreased_gas_limit = increased_gas_limit - PARITY_GAS_LIMIT_DETERMINANT; + if decreased_gas_limit < lower_limit { + gas_limit + } else { + decreased_gas_limit + } + } else { + increased_gas_limit + } } #[cfg(test)] mod tests { - use super::*; + use super::*; - fn get_default_ethash_extensions() -> EthashExtensions { - EthashExtensions { - homestead_transition: 1150000, - dao_hardfork_transition: u64::max_value(), - dao_hardfork_beneficiary: "0000000000000000000000000000000000000001".into(), - dao_hardfork_accounts: Vec::new(), - } - } + fn get_default_ethash_extensions() -> EthashExtensions { + EthashExtensions { + homestead_transition: 1150000, + dao_hardfork_transition: u64::max_value(), + dao_hardfork_beneficiary: "0000000000000000000000000000000000000001".into(), + dao_hardfork_accounts: Vec::new(), + } + } - #[test] - fn should_disallow_unsigned_transactions() { - let rlp = "ea80843b9aca0083015f90948921ebb5f79e9e3920abe571004d0b1d5119c154865af3107a400080038080"; - let transaction: UnverifiedTransaction = ::rlp::decode(&::rustc_hex::FromHex::from_hex(rlp).unwrap()).unwrap(); - let spec = ::ethereum::new_ropsten_test(); - let ethparams = get_default_ethash_extensions(); + #[test] + fn should_disallow_unsigned_transactions() { + let rlp = "ea80843b9aca0083015f90948921ebb5f79e9e3920abe571004d0b1d5119c154865af3107a400080038080"; + let transaction: UnverifiedTransaction = + ::rlp::decode(&::rustc_hex::FromHex::from_hex(rlp).unwrap()).unwrap(); + let spec = ::ethereum::new_ropsten_test(); + let ethparams = get_default_ethash_extensions(); - let machine = EthereumMachine::with_ethash_extensions( - spec.params().clone(), - Default::default(), - ethparams, - ); - let mut header = ::types::header::Header::new(); - header.set_number(15); + let machine = EthereumMachine::with_ethash_extensions( + spec.params().clone(), + Default::default(), + ethparams, + ); + let mut header = ::types::header::Header::new(); + header.set_number(15); - let res = machine.verify_transaction_basic(&transaction, &header); - assert_eq!(res, Err(transaction::Error::InvalidSignature("Crypto error (Invalid EC signature)".into()))); - } + let res = machine.verify_transaction_basic(&transaction, &header); + assert_eq!( + res, + Err(transaction::Error::InvalidSignature( + "Crypto error (Invalid EC signature)".into() + )) + ); + } - #[test] - fn ethash_gas_limit_is_multiple_of_determinant() { - use ethereum_types::U256; + #[test] + fn ethash_gas_limit_is_multiple_of_determinant() { + use ethereum_types::U256; - let spec = ::ethereum::new_homestead_test(); - let ethparams = get_default_ethash_extensions(); + let spec = ::ethereum::new_homestead_test(); + let ethparams = get_default_ethash_extensions(); - let machine = EthereumMachine::with_ethash_extensions( - spec.params().clone(), - Default::default(), - ethparams, - ); + let machine = EthereumMachine::with_ethash_extensions( + spec.params().clone(), + Default::default(), + ethparams, + ); - let mut parent = ::types::header::Header::new(); - let mut header = ::types::header::Header::new(); - header.set_number(1); + let mut parent = ::types::header::Header::new(); + let mut header = ::types::header::Header::new(); + header.set_number(1); - // this test will work for this constant only - assert_eq!(PARITY_GAS_LIMIT_DETERMINANT, U256::from(37)); + // this test will work for this constant only + assert_eq!(PARITY_GAS_LIMIT_DETERMINANT, U256::from(37)); - // when parent.gas_limit < gas_floor_target: - parent.set_gas_limit(U256::from(50_000)); - machine.populate_from_parent(&mut header, &parent, U256::from(100_000), U256::from(200_000)); - assert_eq!(*header.gas_limit(), U256::from(50_024)); + // when parent.gas_limit < gas_floor_target: + parent.set_gas_limit(U256::from(50_000)); + machine.populate_from_parent( + &mut header, + &parent, + U256::from(100_000), + U256::from(200_000), + ); + assert_eq!(*header.gas_limit(), U256::from(50_024)); - // when parent.gas_limit > gas_ceil_target: - parent.set_gas_limit(U256::from(250_000)); - machine.populate_from_parent(&mut header, &parent, U256::from(100_000), U256::from(200_000)); - assert_eq!(*header.gas_limit(), U256::from(249_787)); + // when parent.gas_limit > gas_ceil_target: + parent.set_gas_limit(U256::from(250_000)); + machine.populate_from_parent( + &mut header, + &parent, + U256::from(100_000), + U256::from(200_000), + ); + assert_eq!(*header.gas_limit(), U256::from(249_787)); - // when parent.gas_limit is in miner's range - header.set_gas_used(U256::from(150_000)); - parent.set_gas_limit(U256::from(150_000)); - machine.populate_from_parent(&mut header, &parent, U256::from(100_000), U256::from(200_000)); - assert_eq!(*header.gas_limit(), U256::from(150_035)); + // when parent.gas_limit is in miner's range + header.set_gas_used(U256::from(150_000)); + parent.set_gas_limit(U256::from(150_000)); + machine.populate_from_parent( + &mut header, + &parent, + U256::from(100_000), + U256::from(200_000), + ); + assert_eq!(*header.gas_limit(), U256::from(150_035)); - // when parent.gas_limit is in miner's range - // && we can NOT increase it to be multiple of constant - header.set_gas_used(U256::from(150_000)); - parent.set_gas_limit(U256::from(150_000)); - machine.populate_from_parent(&mut header, &parent, U256::from(100_000), U256::from(150_002)); - assert_eq!(*header.gas_limit(), U256::from(149_998)); + // when parent.gas_limit is in miner's range + // && we can NOT increase it to be multiple of constant + header.set_gas_used(U256::from(150_000)); + parent.set_gas_limit(U256::from(150_000)); + machine.populate_from_parent( + &mut header, + &parent, + U256::from(100_000), + U256::from(150_002), + ); + assert_eq!(*header.gas_limit(), U256::from(149_998)); - // when parent.gas_limit is in miner's range - // && we can NOT increase it to be multiple of constant - // && we can NOT decrease it to be multiple of constant - header.set_gas_used(U256::from(150_000)); - parent.set_gas_limit(U256::from(150_000)); - machine.populate_from_parent(&mut header, &parent, U256::from(150_000), U256::from(150_002)); - assert_eq!(*header.gas_limit(), U256::from(150_002)); - } + // when parent.gas_limit is in miner's range + // && we can NOT increase it to be multiple of constant + // && we can NOT decrease it to be multiple of constant + header.set_gas_used(U256::from(150_000)); + parent.set_gas_limit(U256::from(150_000)); + machine.populate_from_parent( + &mut header, + &parent, + U256::from(150_000), + U256::from(150_002), + ); + assert_eq!(*header.gas_limit(), U256::from(150_002)); + } } diff --git a/ethcore/src/machine/mod.rs b/ethcore/src/machine/mod.rs index 882dc011a..e702294ee 100644 --- a/ethcore/src/machine/mod.rs +++ b/ethcore/src/machine/mod.rs @@ -3,5 +3,4 @@ mod impls; mod traits; -pub use self::impls::*; -pub use self::traits::*; +pub use self::{impls::*, traits::*}; diff --git a/ethcore/src/machine/traits.rs b/ethcore/src/machine/traits.rs index 1523885e0..b481dc62f 100644 --- a/ethcore/src/machine/traits.rs +++ b/ethcore/src/machine/traits.rs @@ -17,21 +17,26 @@ //! Generalization of a state machine for a consensus engine. //! This will define traits for the header, block, and state of a blockchain. -use ethereum_types::{U256, Address}; use block::ExecutedBlock; +use ethereum_types::{Address, U256}; /// Generalization of types surrounding blockchain-suitable state machines. pub trait Machine: Send + Sync { - /// A handle to a blockchain client for this machine. - type EngineClient: ?Sized; + /// A handle to a blockchain client for this machine. + type EngineClient: ?Sized; - /// Errors which can occur when querying or interacting with the machine. - type Error; + /// Errors which can occur when querying or interacting with the machine. + type Error; - /// Get the balance, in base units, associated with an account. - /// Extracts data from the live block. - fn balance(&self, live: &ExecutedBlock, address: &Address) -> Result; + /// Get the balance, in base units, associated with an account. + /// Extracts data from the live block. + fn balance(&self, live: &ExecutedBlock, address: &Address) -> Result; - /// Increment the balance of an account in the state of the live block. - fn add_balance(&self, live: &mut ExecutedBlock, address: &Address, amount: &U256) -> Result<(), Self::Error>; + /// Increment the balance of an account in the state of the live block. + fn add_balance( + &self, + live: &mut ExecutedBlock, + address: &Address, + amount: &U256, + ) -> Result<(), Self::Error>; } diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 815aba6cf..5a0add234 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -14,46 +14,49 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::cmp; -use std::time::{Instant, Duration}; -use std::collections::{BTreeMap, BTreeSet, HashSet}; -use std::sync::Arc; +use std::{ + cmp, + collections::{BTreeMap, BTreeSet, HashSet}, + sync::Arc, + time::{Duration, Instant}, +}; use ansi_term::Colour; use bytes::Bytes; use call_contract::CallContract; -use ethcore_miner::gas_pricer::GasPricer; -use ethcore_miner::local_accounts::LocalAccounts; -use ethcore_miner::pool::{self, TransactionQueue, VerifiedTransaction, QueueStatus, PrioritizationStrategy}; -use ethcore_miner::service_transaction_checker::ServiceTransactionChecker; #[cfg(feature = "work-notify")] use ethcore_miner::work_notify::NotifyWork; -use ethereum_types::{H256, U256, Address}; +use ethcore_miner::{ + gas_pricer::GasPricer, + local_accounts::LocalAccounts, + pool::{self, PrioritizationStrategy, QueueStatus, TransactionQueue, VerifiedTransaction}, + service_transaction_checker::ServiceTransactionChecker, +}; +use ethereum_types::{Address, H256, U256}; use io::IoChannel; -use miner::pool_client::{PoolClient, CachedNonceClient, NonceCache}; -use miner::{self, MinerService}; +use miner::{ + self, + pool_client::{CachedNonceClient, NonceCache, PoolClient}, + MinerService, +}; use parking_lot::{Mutex, RwLock}; use rayon::prelude::*; -use types::transaction::{ - self, - Action, - UnverifiedTransaction, - SignedTransaction, - PendingTransaction, +use types::{ + block::Block, + header::Header, + receipt::RichReceipt, + transaction::{self, Action, PendingTransaction, SignedTransaction, UnverifiedTransaction}, + BlockNumber, }; -use types::BlockNumber; -use types::block::Block; -use types::header::Header; -use types::receipt::RichReceipt; -use using_queue::{UsingQueue, GetAction}; +use using_queue::{GetAction, UsingQueue}; use block::{ClosedBlock, SealedBlock}; use client::{ - BlockChain, ChainInfo, BlockProducer, SealedBlockImporter, Nonce, TransactionInfo, TransactionId + traits::{EngineClient, ForceUpdateSealing}, + BlockChain, BlockId, BlockProducer, ChainInfo, ClientIoMessage, Nonce, SealedBlockImporter, + TransactionId, TransactionInfo, }; -use client::{BlockId, ClientIoMessage}; -use client::traits::{EngineClient, ForceUpdateSealing}; -use engines::{EthEngine, Seal, EngineSigner}; +use engines::{EngineSigner, EthEngine, Seal}; use error::{Error, ErrorKind}; use executed::ExecutionError; use executive::contract_address; @@ -63,13 +66,13 @@ use state::State; /// Different possible definitions for pending transaction set. #[derive(Debug, PartialEq)] pub enum PendingSet { - /// Always just the transactions in the queue. These have had only cheap checks. - AlwaysQueue, - /// Always just the transactions in the sealing block. These have had full checks but - /// may be empty if the node is not actively mining or has no force_sealing enabled. - AlwaysSealing, - /// Takes from sealing if mining, from queue otherwise. - SealingOrElseQueue, + /// Always just the transactions in the queue. These have had only cheap checks. + AlwaysQueue, + /// Always just the transactions in the sealing block. These have had full checks but + /// may be empty if the node is not actively mining or has no force_sealing enabled. + AlwaysSealing, + /// Takes from sealing if mining, from queue otherwise. + SealingOrElseQueue, } /// Transaction queue penalization settings. @@ -78,24 +81,24 @@ pub enum PendingSet { /// will get lower priority. #[derive(Debug, PartialEq, Clone)] pub enum Penalization { - /// Penalization in transaction queue is disabled - Disabled, - /// Penalization in transaction queue is enabled - Enabled { - /// Upper limit of transaction processing time before penalizing. - offend_threshold: Duration, - }, + /// Penalization in transaction queue is disabled + Disabled, + /// Penalization in transaction queue is enabled + Enabled { + /// Upper limit of transaction processing time before penalizing. + offend_threshold: Duration, + }, } /// Pending block preparation status. #[derive(Debug, PartialEq)] pub enum BlockPreparationStatus { - /// We had to prepare new pending block and the preparation succeeded. - Succeeded, - /// We had to prepare new pending block but the preparation failed. - Failed, - /// We didn't have to prepare a new block. - NotPrepared, + /// We had to prepare new pending block and the preparation succeeded. + Succeeded, + /// We had to prepare new pending block but the preparation failed. + Failed, + /// We didn't have to prepare a new block. + NotPrepared, } /// Initial minimal gas price. @@ -118,1150 +121,1256 @@ const MAX_SKIPPED_TRANSACTIONS: usize = 128; /// Configures the behaviour of the miner. #[derive(Debug, PartialEq)] pub struct MinerOptions { - /// Force the miner to reseal, even when nobody has asked for work. - pub force_sealing: bool, - /// Reseal on receipt of new external transactions. - pub reseal_on_external_tx: bool, - /// Reseal on receipt of new local transactions. - pub reseal_on_own_tx: bool, - /// Reseal when new uncle block has been imported. - pub reseal_on_uncle: bool, - /// Minimum period between transaction-inspired reseals. - pub reseal_min_period: Duration, - /// Maximum period between blocks (enables force sealing after that). - pub reseal_max_period: Duration, - /// Whether we should fallback to providing all the queue's transactions or just pending. - pub pending_set: PendingSet, - /// How many historical work packages can we store before running out? - pub work_queue_size: usize, - /// Can we submit two different solutions for the same block and expect both to result in an import? - pub enable_resubmission: bool, - /// Create a pending block with maximal possible gas limit. - /// NOTE: Such block will contain all pending transactions but - /// will be invalid if mined. - pub infinite_pending_block: bool, + /// Force the miner to reseal, even when nobody has asked for work. + pub force_sealing: bool, + /// Reseal on receipt of new external transactions. + pub reseal_on_external_tx: bool, + /// Reseal on receipt of new local transactions. + pub reseal_on_own_tx: bool, + /// Reseal when new uncle block has been imported. + pub reseal_on_uncle: bool, + /// Minimum period between transaction-inspired reseals. + pub reseal_min_period: Duration, + /// Maximum period between blocks (enables force sealing after that). + pub reseal_max_period: Duration, + /// Whether we should fallback to providing all the queue's transactions or just pending. + pub pending_set: PendingSet, + /// How many historical work packages can we store before running out? + pub work_queue_size: usize, + /// Can we submit two different solutions for the same block and expect both to result in an import? + pub enable_resubmission: bool, + /// Create a pending block with maximal possible gas limit. + /// NOTE: Such block will contain all pending transactions but + /// will be invalid if mined. + pub infinite_pending_block: bool, - /// Strategy to use for prioritizing transactions in the queue. - pub tx_queue_strategy: PrioritizationStrategy, - /// Simple senders penalization. - pub tx_queue_penalization: Penalization, - /// Do we want to mark transactions recieved locally (e.g. RPC) as local if we don't have the sending account? - pub tx_queue_no_unfamiliar_locals: bool, - /// Do we refuse to accept service transactions even if sender is certified. - pub refuse_service_transactions: bool, - /// Transaction pool limits. - pub pool_limits: pool::Options, - /// Initial transaction verification options. - pub pool_verification_options: pool::verifier::Options, + /// Strategy to use for prioritizing transactions in the queue. + pub tx_queue_strategy: PrioritizationStrategy, + /// Simple senders penalization. + pub tx_queue_penalization: Penalization, + /// Do we want to mark transactions recieved locally (e.g. RPC) as local if we don't have the sending account? + pub tx_queue_no_unfamiliar_locals: bool, + /// Do we refuse to accept service transactions even if sender is certified. + pub refuse_service_transactions: bool, + /// Transaction pool limits. + pub pool_limits: pool::Options, + /// Initial transaction verification options. + pub pool_verification_options: pool::verifier::Options, } impl Default for MinerOptions { - fn default() -> Self { - MinerOptions { - force_sealing: false, - reseal_on_external_tx: false, - reseal_on_own_tx: true, - reseal_on_uncle: false, - reseal_min_period: Duration::from_secs(2), - reseal_max_period: Duration::from_secs(120), - pending_set: PendingSet::AlwaysQueue, - work_queue_size: 20, - enable_resubmission: true, - infinite_pending_block: false, - tx_queue_strategy: PrioritizationStrategy::GasPriceOnly, - tx_queue_penalization: Penalization::Disabled, - tx_queue_no_unfamiliar_locals: false, - refuse_service_transactions: false, - pool_limits: pool::Options { - max_count: 8_192, - max_per_sender: 81, - max_mem_usage: 4 * 1024 * 1024, - }, - pool_verification_options: pool::verifier::Options { - minimal_gas_price: DEFAULT_MINIMAL_GAS_PRICE.into(), - block_gas_limit: U256::max_value(), - tx_gas_limit: U256::max_value(), - no_early_reject: false, - }, - } - } + fn default() -> Self { + MinerOptions { + force_sealing: false, + reseal_on_external_tx: false, + reseal_on_own_tx: true, + reseal_on_uncle: false, + reseal_min_period: Duration::from_secs(2), + reseal_max_period: Duration::from_secs(120), + pending_set: PendingSet::AlwaysQueue, + work_queue_size: 20, + enable_resubmission: true, + infinite_pending_block: false, + tx_queue_strategy: PrioritizationStrategy::GasPriceOnly, + tx_queue_penalization: Penalization::Disabled, + tx_queue_no_unfamiliar_locals: false, + refuse_service_transactions: false, + pool_limits: pool::Options { + max_count: 8_192, + max_per_sender: 81, + max_mem_usage: 4 * 1024 * 1024, + }, + pool_verification_options: pool::verifier::Options { + minimal_gas_price: DEFAULT_MINIMAL_GAS_PRICE.into(), + block_gas_limit: U256::max_value(), + tx_gas_limit: U256::max_value(), + no_early_reject: false, + }, + } + } } /// Configurable parameters of block authoring. #[derive(Debug, Default, Clone)] pub struct AuthoringParams { - /// Lower and upper bound of block gas limit that we are targeting - pub gas_range_target: (U256, U256), - /// Block author - pub author: Address, - /// Block extra data - pub extra_data: Bytes, + /// Lower and upper bound of block gas limit that we are targeting + pub gas_range_target: (U256, U256), + /// Block author + pub author: Address, + /// Block extra data + pub extra_data: Bytes, } /// Block sealing mechanism pub enum Author { - /// Sealing block is external and we only need a reward beneficiary (i.e. PoW) - External(Address), - /// Sealing is done internally, we need a way to create signatures to seal block (i.e. PoA) - Sealer(Box), + /// Sealing block is external and we only need a reward beneficiary (i.e. PoW) + External(Address), + /// Sealing is done internally, we need a way to create signatures to seal block (i.e. PoA) + Sealer(Box), } impl Author { - /// Get author's address. - pub fn address(&self) -> Address { - match *self { - Author::External(address) => address, - Author::Sealer(ref sealer) => sealer.address(), - } - } + /// Get author's address. + pub fn address(&self) -> Address { + match *self { + Author::External(address) => address, + Author::Sealer(ref sealer) => sealer.address(), + } + } } struct SealingWork { - queue: UsingQueue, - enabled: bool, - next_allowed_reseal: Instant, - next_mandatory_reseal: Instant, - // block number when sealing work was last requested - last_request: Option, + queue: UsingQueue, + enabled: bool, + next_allowed_reseal: Instant, + next_mandatory_reseal: Instant, + // block number when sealing work was last requested + last_request: Option, } impl SealingWork { - /// Are we allowed to do a non-mandatory reseal? - fn reseal_allowed(&self) -> bool { - Instant::now() > self.next_allowed_reseal - } + /// Are we allowed to do a non-mandatory reseal? + fn reseal_allowed(&self) -> bool { + Instant::now() > self.next_allowed_reseal + } } /// Keeps track of transactions using priority queue and holds currently mined block. /// Handles preparing work for "work sealing" or seals "internally" if Engine does not require work. pub struct Miner { - // NOTE [ToDr] When locking always lock in this order! - sealing: Mutex, - params: RwLock, - #[cfg(feature = "work-notify")] - listeners: RwLock>>, - nonce_cache: NonceCache, - gas_pricer: Mutex, - options: MinerOptions, - // TODO [ToDr] Arc is only required because of price updater - transaction_queue: Arc, - engine: Arc, - accounts: Arc, - io_channel: RwLock>>, - service_transaction_checker: Option, + // NOTE [ToDr] When locking always lock in this order! + sealing: Mutex, + params: RwLock, + #[cfg(feature = "work-notify")] + listeners: RwLock>>, + nonce_cache: NonceCache, + gas_pricer: Mutex, + options: MinerOptions, + // TODO [ToDr] Arc is only required because of price updater + transaction_queue: Arc, + engine: Arc, + accounts: Arc, + io_channel: RwLock>>, + service_transaction_checker: Option, } impl Miner { - /// Push listener that will handle new jobs - #[cfg(feature = "work-notify")] - pub fn add_work_listener(&self, notifier: Box) { - self.listeners.write().push(notifier); - self.sealing.lock().enabled = true; - } - - /// Set a callback to be notified about imported transactions' hashes. - pub fn add_transactions_listener(&self, f: Box) { - self.transaction_queue.add_listener(f); - } - - /// Creates new instance of miner Arc. - pub fn new( - options: MinerOptions, - gas_pricer: GasPricer, - spec: &Spec, - accounts: A, - ) -> Self { - let limits = options.pool_limits.clone(); - let verifier_options = options.pool_verification_options.clone(); - let tx_queue_strategy = options.tx_queue_strategy; - let nonce_cache_size = cmp::max(4096, limits.max_count / 4); - let refuse_service_transactions = options.refuse_service_transactions; - let engine = spec.engine.clone(); - - Miner { - sealing: Mutex::new(SealingWork { - queue: UsingQueue::new(options.work_queue_size), - enabled: options.force_sealing - || spec.engine.seals_internally().is_some(), - next_allowed_reseal: Instant::now(), - next_mandatory_reseal: Instant::now() + options.reseal_max_period, - last_request: None, - }), - params: RwLock::new(AuthoringParams::default()), - #[cfg(feature = "work-notify")] - listeners: RwLock::new(vec![]), - gas_pricer: Mutex::new(gas_pricer), - nonce_cache: NonceCache::new(nonce_cache_size), - options, - transaction_queue: Arc::new(TransactionQueue::new(limits, verifier_options, tx_queue_strategy)), - accounts: Arc::new(accounts), - engine, - io_channel: RwLock::new(None), - service_transaction_checker: if refuse_service_transactions { - None - } else { - Some(ServiceTransactionChecker::default()) - }, - } - } - - /// Creates new instance of miner with given spec and accounts. - /// - /// NOTE This should be only used for tests. - pub fn new_for_tests(spec: &Spec, accounts: Option>) -> Miner { - let minimal_gas_price = 0.into(); - Miner::new(MinerOptions { - pool_verification_options: pool::verifier::Options { - minimal_gas_price, - block_gas_limit: U256::max_value(), - tx_gas_limit: U256::max_value(), - no_early_reject: false, - }, - reseal_min_period: Duration::from_secs(0), - ..Default::default() - }, GasPricer::new_fixed(minimal_gas_price), spec, accounts.unwrap_or_default()) - } - - /// Sets `IoChannel` - pub fn set_io_channel(&self, io_channel: IoChannel) { - *self.io_channel.write() = Some(io_channel); - } - - /// Sets in-blockchain checker for transactions. - pub fn set_in_chain_checker(&self, chain: &Arc) where - C: TransactionInfo + Send + Sync + 'static, - { - let client = Arc::downgrade(chain); - self.transaction_queue.set_in_chain_checker(move |hash| { - match client.upgrade() { - Some(info) => info.transaction_block(TransactionId::Hash(*hash)).is_some(), - None => false, - } - }); - } - - /// Clear all pending block states - pub fn clear(&self) { - self.sealing.lock().queue.reset(); - } - - /// Updates transaction queue verification limits. - /// - /// Limits consist of current block gas limit and minimal gas price. - pub fn update_transaction_queue_limits(&self, block_gas_limit: U256) { - trace!(target: "miner", "minimal_gas_price: recalibrating..."); - let txq = self.transaction_queue.clone(); - let mut options = self.options.pool_verification_options.clone(); - self.gas_pricer.lock().recalibrate(move |gas_price| { - debug!(target: "miner", "minimal_gas_price: Got gas price! {}", gas_price); - options.minimal_gas_price = gas_price; - options.block_gas_limit = block_gas_limit; - txq.set_verifier_options(options); - }); - } - - /// Returns ServiceTransactionChecker - pub fn service_transaction_checker(&self) -> Option { - self.service_transaction_checker.clone() - } - - /// Retrieves an existing pending block iff it's not older than given block number. - /// - /// NOTE: This will not prepare a new pending block if it's not existing. - fn map_existing_pending_block(&self, f: F, latest_block_number: BlockNumber) -> Option where - F: FnOnce(&ClosedBlock) -> T, - { - self.sealing.lock().queue - .peek_last_ref() - .and_then(|b| { - // to prevent a data race between block import and updating pending block - // we allow the number to be equal. - if b.header.number() >= latest_block_number { - Some(f(b)) - } else { - None - } - }) - } - - fn pool_client<'a, C: 'a>(&'a self, chain: &'a C) -> PoolClient<'a, C> where - C: BlockChain + CallContract, - { - PoolClient::new( - chain, - &self.nonce_cache, - &*self.engine, - &*self.accounts, - self.service_transaction_checker.as_ref(), - ) - } - - /// Prepares new block for sealing including top transactions from queue. - fn prepare_block(&self, chain: &C) -> Option<(ClosedBlock, Option)> where - C: BlockChain + CallContract + BlockProducer + Nonce + Sync, - { - trace_time!("prepare_block"); - let chain_info = chain.chain_info(); - - // Open block - let (mut open_block, original_work_hash) = { - let mut sealing = self.sealing.lock(); - let last_work_hash = sealing.queue.peek_last_ref().map(|pb| pb.header.hash()); - let best_hash = chain_info.best_block_hash; - - // check to see if last ClosedBlock in would_seals is actually same parent block. - // if so - // duplicate, re-open and push any new transactions. - // if at least one was pushed successfully, close and enqueue new ClosedBlock; - // otherwise, leave everything alone. - // otherwise, author a fresh block. - let mut open_block = match sealing.queue.get_pending_if(|b| b.header.parent_hash() == &best_hash) { - Some(old_block) => { - trace!(target: "miner", "prepare_block: Already have previous work; updating and returning"); - // add transactions to old_block - chain.reopen_block(old_block) - } - None => { - // block not found - create it. - trace!(target: "miner", "prepare_block: No existing work - making new block"); - let params = self.params.read().clone(); - - match chain.prepare_open_block( - params.author, - params.gas_range_target, - params.extra_data, - ) { - Ok(block) => block, - Err(err) => { - warn!(target: "miner", "Open new block failed with error {:?}. This is likely an error in chain specificiations or on-chain consensus smart contracts.", err); - return None; - } - } - } - }; - - if self.options.infinite_pending_block { - open_block.remove_gas_limit(); - } - - (open_block, last_work_hash) - }; - - let mut invalid_transactions = HashSet::new(); - let mut not_allowed_transactions = HashSet::new(); - let mut senders_to_penalize = HashSet::new(); - let block_number = open_block.header.number(); - - let mut tx_count = 0usize; - let mut skipped_transactions = 0usize; - - let client = self.pool_client(chain); - let engine_params = self.engine.params(); - let min_tx_gas: U256 = self.engine.schedule(chain_info.best_block_number).tx_gas.into(); - let nonce_cap: Option = if chain_info.best_block_number + 1 >= engine_params.dust_protection_transition { - Some((engine_params.nonce_cap_increment * (chain_info.best_block_number + 1)).into()) - } else { - None - }; - // we will never need more transactions than limit divided by min gas - let max_transactions = if min_tx_gas.is_zero() { - usize::max_value() - } else { - MAX_SKIPPED_TRANSACTIONS.saturating_add(cmp::min(*open_block.header.gas_limit() / min_tx_gas, u64::max_value().into()).as_u64() as usize) - }; - - let pending: Vec> = self.transaction_queue.pending( - client.clone(), - pool::PendingSettings { - block_number: chain_info.best_block_number, - current_timestamp: chain_info.best_block_timestamp, - nonce_cap, - max_len: max_transactions, - ordering: miner::PendingOrdering::Priority, - } - ); - - let took_ms = |elapsed: &Duration| { - elapsed.as_secs() * 1000 + elapsed.subsec_nanos() as u64 / 1_000_000 - }; - - let block_start = Instant::now(); - debug!(target: "miner", "Attempting to push {} transactions.", pending.len()); - - for tx in pending { - let start = Instant::now(); - - let transaction = tx.signed().clone(); - let hash = transaction.hash(); - let sender = transaction.sender(); - - // Re-verify transaction again vs current state. - let result = client.verify_for_pending_block(&transaction, &open_block.header) - .map_err(|e| e.into()) - .and_then(|_| { - open_block.push_transaction(transaction, None) - }); - - let took = start.elapsed(); - - // Check for heavy transactions - match self.options.tx_queue_penalization { - Penalization::Enabled { ref offend_threshold } if &took > offend_threshold => { - senders_to_penalize.insert(sender); - debug!(target: "miner", "Detected heavy transaction ({} ms). Penalizing sender.", took_ms(&took)); - }, - _ => {}, - } - - debug!(target: "miner", "Adding tx {:?} took {} ms", hash, took_ms(&took)); - match result { - Err(Error(ErrorKind::Execution(ExecutionError::BlockGasLimitReached { gas_limit, gas_used, gas }), _)) => { - debug!(target: "miner", "Skipping adding transaction to block because of gas limit: {:?} (limit: {:?}, used: {:?}, gas: {:?})", hash, gas_limit, gas_used, gas); - - // Penalize transaction if it's above current gas limit - if gas > gas_limit { - debug!(target: "txqueue", "[{:?}] Transaction above block gas limit.", hash); - invalid_transactions.insert(hash); - } - - // Exit early if gas left is smaller then min_tx_gas - let gas_left = gas_limit - gas_used; - if gas_left < min_tx_gas { - debug!(target: "miner", "Remaining gas is lower than minimal gas for a transaction. Block is full."); - break; - } - - // Avoid iterating over the entire queue in case block is almost full. - skipped_transactions += 1; - if skipped_transactions > MAX_SKIPPED_TRANSACTIONS { - debug!(target: "miner", "Reached skipped transactions threshold. Assuming block is full."); - break; - } - }, - // Invalid nonce error can happen only if previous transaction is skipped because of gas limit. - // If there is errornous state of transaction queue it will be fixed when next block is imported. - Err(Error(ErrorKind::Execution(ExecutionError::InvalidNonce { expected, got }), _)) => { - debug!(target: "miner", "Skipping adding transaction to block because of invalid nonce: {:?} (expected: {:?}, got: {:?})", hash, expected, got); - }, - // already have transaction - ignore - Err(Error(ErrorKind::Transaction(transaction::Error::AlreadyImported), _)) => {}, - Err(Error(ErrorKind::Transaction(transaction::Error::NotAllowed), _)) => { - not_allowed_transactions.insert(hash); - debug!(target: "miner", "Skipping non-allowed transaction for sender {:?}", hash); - }, - Err(e) => { - debug!(target: "txqueue", "[{:?}] Marking as invalid: {:?}.", hash, e); - debug!( - target: "miner", "Error adding transaction to block: number={}. transaction_hash={:?}, Error: {:?}", block_number, hash, e - ); - invalid_transactions.insert(hash); - }, - // imported ok - _ => tx_count += 1, - } - } - let elapsed = block_start.elapsed(); - debug!(target: "miner", "Pushed {} transactions in {} ms", tx_count, took_ms(&elapsed)); - - let block = match open_block.close() { - Ok(block) => block, - Err(err) => { - warn!(target: "miner", "Closing the block failed with error {:?}. This is likely an error in chain specificiations or on-chain consensus smart contracts.", err); - return None; - } - }; - - { - self.transaction_queue.remove(invalid_transactions.iter(), true); - self.transaction_queue.remove(not_allowed_transactions.iter(), false); - self.transaction_queue.penalize(senders_to_penalize.iter()); - } - - Some((block, original_work_hash)) - } - - /// Returns `true` if we should create pending block even if some other conditions are not met. - /// - /// In general we always seal iff: - /// 1. --force-sealing CLI parameter is provided - /// 2. There are listeners awaiting new work packages (e.g. remote work notifications or stratum). - fn forced_sealing(&self) -> bool { - let listeners_empty = { - #[cfg(feature = "work-notify")] - { self.listeners.read().is_empty() } - #[cfg(not(feature = "work-notify"))] - { true } - }; - - self.options.force_sealing || !listeners_empty - } - - /// Check is reseal is allowed and necessary. - fn requires_reseal(&self, best_block: BlockNumber) -> bool { - let mut sealing = self.sealing.lock(); - if !sealing.enabled { - trace!(target: "miner", "requires_reseal: sealing is disabled"); - return false - } - - if !sealing.reseal_allowed() { - trace!(target: "miner", "requires_reseal: reseal too early"); - return false - } - - trace!(target: "miner", "requires_reseal: sealing enabled"); - - // Disable sealing if there were no requests for SEALING_TIMEOUT_IN_BLOCKS - let had_requests = sealing.last_request.map(|last_request| - best_block.saturating_sub(last_request) <= SEALING_TIMEOUT_IN_BLOCKS - ).unwrap_or(false); - - // keep sealing enabled if any of the conditions is met - let sealing_enabled = self.forced_sealing() - || self.transaction_queue.has_local_pending_transactions() - || self.engine.seals_internally() == Some(true) - || had_requests; - - let should_disable_sealing = !sealing_enabled; - - trace!(target: "miner", "requires_reseal: should_disable_sealing={}; forced={:?}, has_local={:?}, internal={:?}, had_requests={:?}", - should_disable_sealing, - self.forced_sealing(), - self.transaction_queue.has_local_pending_transactions(), - self.engine.seals_internally(), - had_requests, - ); - - if should_disable_sealing { - trace!(target: "miner", "Miner sleeping (current {}, last {})", best_block, sealing.last_request.unwrap_or(0)); - sealing.enabled = false; - sealing.queue.reset(); - false - } else { - // sealing enabled and we don't want to sleep. - sealing.next_allowed_reseal = Instant::now() + self.options.reseal_min_period; - true - } - } - - // TODO: (https://github.com/paritytech/parity-ethereum/issues/10407) - // This is only used in authority_round path, and should be refactored to merge with the other seal() path. - // Attempts to perform internal sealing (one that does not require work) and handles the result depending on the - // type of Seal. - fn seal_and_import_block_internally(&self, chain: &C, block: ClosedBlock) -> bool - where C: BlockChain + SealedBlockImporter, - { - { - let sealing = self.sealing.lock(); - if block.transactions.is_empty() - && !self.forced_sealing() - && Instant::now() <= sealing.next_mandatory_reseal - { - return false - } - } - - trace!(target: "miner", "seal_block_internally: attempting internal seal."); - - let parent_header = match chain.block_header(BlockId::Hash(*block.header.parent_hash())) { - Some(h) => { - match h.decode() { - Ok(decoded_hdr) => decoded_hdr, - Err(_) => return false - } - } - None => return false, - }; - - match self.engine.generate_seal(&block, &parent_header) { - // Save proposal for later seal submission and broadcast it. - Seal::Proposal(seal) => { - trace!(target: "miner", "Received a Proposal seal."); - { - let mut sealing = self.sealing.lock(); - sealing.next_mandatory_reseal = Instant::now() + self.options.reseal_max_period; - sealing.queue.set_pending(block.clone()); - sealing.queue.use_last_ref(); - } - - block - .lock() - .seal(&*self.engine, seal) - .map(|sealed| { - chain.broadcast_proposal_block(sealed); - true - }) - .unwrap_or_else(|e| { - warn!("ERROR: seal failed when given internally generated seal: {}", e); - false - }) - }, - // Directly import a regular sealed block. - Seal::Regular(seal) => { - trace!(target: "miner", "Received a Regular seal."); - { - let mut sealing = self.sealing.lock(); - sealing.next_mandatory_reseal = Instant::now() + self.options.reseal_max_period; - } - - block - .lock() - .seal(&*self.engine, seal) - .map(|sealed| { - chain.import_sealed_block(sealed).is_ok() - }) - .unwrap_or_else(|e| { - warn!("ERROR: seal failed when given internally generated seal: {}", e); - false - }) - }, - Seal::None => false, - } - } - - /// Prepares work which has to be done to seal. - fn prepare_work(&self, block: ClosedBlock, original_work_hash: Option) { - let (work, is_new) = { - let block_header = block.header.clone(); - let block_hash = block_header.hash(); - - let mut sealing = self.sealing.lock(); - let last_work_hash = sealing.queue.peek_last_ref().map(|pb| pb.header.hash()); - - trace!( - target: "miner", - "prepare_work: Checking whether we need to reseal: orig={:?} last={:?}, this={:?}", - original_work_hash, last_work_hash, block_hash - ); - - let (work, is_new) = if last_work_hash.map_or(true, |h| h != block_hash) { - trace!( - target: "miner", - "prepare_work: Pushing a new, refreshed or borrowed pending {}...", - block_hash - ); - let is_new = original_work_hash.map_or(true, |h| h != block_hash); - - sealing.queue.set_pending(block); - - #[cfg(feature = "work-notify")] - { - // If push notifications are enabled we assume all work items are used. - if is_new && !self.listeners.read().is_empty() { - sealing.queue.use_last_ref(); - } - } - - (Some((block_hash, *block_header.difficulty(), block_header.number())), is_new) - } else { - (None, false) - }; - trace!( - target: "miner", - "prepare_work: leaving (last={:?})", - sealing.queue.peek_last_ref().map(|b| b.header.hash()) - ); - (work, is_new) - }; - - #[cfg(feature = "work-notify")] - { - if is_new { - work.map(|(pow_hash, difficulty, number)| { - for notifier in self.listeners.read().iter() { - notifier.notify(pow_hash, difficulty, number) - } - }); - } - } - - // NB: hack to use variables to avoid warning. - #[cfg(not(feature = "work-notify"))] - { - let _work = work; - let _is_new = is_new; - } - } - - /// Prepare a pending block. Returns the preparation status. - fn prepare_pending_block(&self, client: &C) -> BlockPreparationStatus where - C: BlockChain + CallContract + BlockProducer + SealedBlockImporter + Nonce + Sync, - { - trace!(target: "miner", "prepare_pending_block: entering"); - let prepare_new = { - let mut sealing = self.sealing.lock(); - let have_work = sealing.queue.peek_last_ref().is_some(); - trace!(target: "miner", "prepare_pending_block: have_work={}", have_work); - if !have_work { - sealing.enabled = true; - true - } else { - false - } - }; - - let preparation_status = if prepare_new { - // -------------------------------------------------------------------------- - // | NOTE Code below requires sealing locks. | - // | Make sure to release the locks before calling that method. | - // -------------------------------------------------------------------------- - match self.prepare_block(client) { - Some((block, original_work_hash)) => { - self.prepare_work(block, original_work_hash); - BlockPreparationStatus::Succeeded - }, - None => BlockPreparationStatus::Failed, - } - } else { - BlockPreparationStatus::NotPrepared - }; - - let best_number = client.chain_info().best_block_number; - let mut sealing = self.sealing.lock(); - if sealing.last_request != Some(best_number) { - trace!( - target: "miner", - "prepare_pending_block: Miner received request (was {}, now {}) - waking up.", - sealing.last_request.unwrap_or(0), best_number - ); - sealing.last_request = Some(best_number); - } - - preparation_status - } - -/// Prepare pending block, check whether sealing is needed, and then update sealing. - fn prepare_and_update_sealing(&self, chain: &C) { - - // Make sure to do it after transaction is imported and lock is dropped. - // We need to create pending block and enable sealing. - if self.engine.seals_internally().unwrap_or(false) || self.prepare_pending_block(chain) == BlockPreparationStatus::NotPrepared { - // If new block has not been prepared (means we already had one) - // or Engine might be able to seal internally, - // we need to update sealing. - self.update_sealing(chain, ForceUpdateSealing::No); - } - } + /// Push listener that will handle new jobs + #[cfg(feature = "work-notify")] + pub fn add_work_listener(&self, notifier: Box) { + self.listeners.write().push(notifier); + self.sealing.lock().enabled = true; + } + + /// Set a callback to be notified about imported transactions' hashes. + pub fn add_transactions_listener(&self, f: Box) { + self.transaction_queue.add_listener(f); + } + + /// Creates new instance of miner Arc. + pub fn new( + options: MinerOptions, + gas_pricer: GasPricer, + spec: &Spec, + accounts: A, + ) -> Self { + let limits = options.pool_limits.clone(); + let verifier_options = options.pool_verification_options.clone(); + let tx_queue_strategy = options.tx_queue_strategy; + let nonce_cache_size = cmp::max(4096, limits.max_count / 4); + let refuse_service_transactions = options.refuse_service_transactions; + let engine = spec.engine.clone(); + + Miner { + sealing: Mutex::new(SealingWork { + queue: UsingQueue::new(options.work_queue_size), + enabled: options.force_sealing || spec.engine.seals_internally().is_some(), + next_allowed_reseal: Instant::now(), + next_mandatory_reseal: Instant::now() + options.reseal_max_period, + last_request: None, + }), + params: RwLock::new(AuthoringParams::default()), + #[cfg(feature = "work-notify")] + listeners: RwLock::new(vec![]), + gas_pricer: Mutex::new(gas_pricer), + nonce_cache: NonceCache::new(nonce_cache_size), + options, + transaction_queue: Arc::new(TransactionQueue::new( + limits, + verifier_options, + tx_queue_strategy, + )), + accounts: Arc::new(accounts), + engine, + io_channel: RwLock::new(None), + service_transaction_checker: if refuse_service_transactions { + None + } else { + Some(ServiceTransactionChecker::default()) + }, + } + } + + /// Creates new instance of miner with given spec and accounts. + /// + /// NOTE This should be only used for tests. + pub fn new_for_tests(spec: &Spec, accounts: Option>) -> Miner { + let minimal_gas_price = 0.into(); + Miner::new( + MinerOptions { + pool_verification_options: pool::verifier::Options { + minimal_gas_price, + block_gas_limit: U256::max_value(), + tx_gas_limit: U256::max_value(), + no_early_reject: false, + }, + reseal_min_period: Duration::from_secs(0), + ..Default::default() + }, + GasPricer::new_fixed(minimal_gas_price), + spec, + accounts.unwrap_or_default(), + ) + } + + /// Sets `IoChannel` + pub fn set_io_channel(&self, io_channel: IoChannel) { + *self.io_channel.write() = Some(io_channel); + } + + /// Sets in-blockchain checker for transactions. + pub fn set_in_chain_checker(&self, chain: &Arc) + where + C: TransactionInfo + Send + Sync + 'static, + { + let client = Arc::downgrade(chain); + self.transaction_queue + .set_in_chain_checker(move |hash| match client.upgrade() { + Some(info) => info.transaction_block(TransactionId::Hash(*hash)).is_some(), + None => false, + }); + } + + /// Clear all pending block states + pub fn clear(&self) { + self.sealing.lock().queue.reset(); + } + + /// Updates transaction queue verification limits. + /// + /// Limits consist of current block gas limit and minimal gas price. + pub fn update_transaction_queue_limits(&self, block_gas_limit: U256) { + trace!(target: "miner", "minimal_gas_price: recalibrating..."); + let txq = self.transaction_queue.clone(); + let mut options = self.options.pool_verification_options.clone(); + self.gas_pricer.lock().recalibrate(move |gas_price| { + debug!(target: "miner", "minimal_gas_price: Got gas price! {}", gas_price); + options.minimal_gas_price = gas_price; + options.block_gas_limit = block_gas_limit; + txq.set_verifier_options(options); + }); + } + + /// Returns ServiceTransactionChecker + pub fn service_transaction_checker(&self) -> Option { + self.service_transaction_checker.clone() + } + + /// Retrieves an existing pending block iff it's not older than given block number. + /// + /// NOTE: This will not prepare a new pending block if it's not existing. + fn map_existing_pending_block(&self, f: F, latest_block_number: BlockNumber) -> Option + where + F: FnOnce(&ClosedBlock) -> T, + { + self.sealing.lock().queue.peek_last_ref().and_then(|b| { + // to prevent a data race between block import and updating pending block + // we allow the number to be equal. + if b.header.number() >= latest_block_number { + Some(f(b)) + } else { + None + } + }) + } + + fn pool_client<'a, C: 'a>(&'a self, chain: &'a C) -> PoolClient<'a, C> + where + C: BlockChain + CallContract, + { + PoolClient::new( + chain, + &self.nonce_cache, + &*self.engine, + &*self.accounts, + self.service_transaction_checker.as_ref(), + ) + } + + /// Prepares new block for sealing including top transactions from queue. + fn prepare_block(&self, chain: &C) -> Option<(ClosedBlock, Option)> + where + C: BlockChain + CallContract + BlockProducer + Nonce + Sync, + { + trace_time!("prepare_block"); + let chain_info = chain.chain_info(); + + // Open block + let (mut open_block, original_work_hash) = { + let mut sealing = self.sealing.lock(); + let last_work_hash = sealing.queue.peek_last_ref().map(|pb| pb.header.hash()); + let best_hash = chain_info.best_block_hash; + + // check to see if last ClosedBlock in would_seals is actually same parent block. + // if so + // duplicate, re-open and push any new transactions. + // if at least one was pushed successfully, close and enqueue new ClosedBlock; + // otherwise, leave everything alone. + // otherwise, author a fresh block. + let mut open_block = match sealing + .queue + .get_pending_if(|b| b.header.parent_hash() == &best_hash) + { + Some(old_block) => { + trace!(target: "miner", "prepare_block: Already have previous work; updating and returning"); + // add transactions to old_block + chain.reopen_block(old_block) + } + None => { + // block not found - create it. + trace!(target: "miner", "prepare_block: No existing work - making new block"); + let params = self.params.read().clone(); + + match chain.prepare_open_block( + params.author, + params.gas_range_target, + params.extra_data, + ) { + Ok(block) => block, + Err(err) => { + warn!(target: "miner", "Open new block failed with error {:?}. This is likely an error in chain specificiations or on-chain consensus smart contracts.", err); + return None; + } + } + } + }; + + if self.options.infinite_pending_block { + open_block.remove_gas_limit(); + } + + (open_block, last_work_hash) + }; + + let mut invalid_transactions = HashSet::new(); + let mut not_allowed_transactions = HashSet::new(); + let mut senders_to_penalize = HashSet::new(); + let block_number = open_block.header.number(); + + let mut tx_count = 0usize; + let mut skipped_transactions = 0usize; + + let client = self.pool_client(chain); + let engine_params = self.engine.params(); + let min_tx_gas: U256 = self + .engine + .schedule(chain_info.best_block_number) + .tx_gas + .into(); + let nonce_cap: Option = if chain_info.best_block_number + 1 + >= engine_params.dust_protection_transition + { + Some((engine_params.nonce_cap_increment * (chain_info.best_block_number + 1)).into()) + } else { + None + }; + // we will never need more transactions than limit divided by min gas + let max_transactions = if min_tx_gas.is_zero() { + usize::max_value() + } else { + MAX_SKIPPED_TRANSACTIONS.saturating_add( + cmp::min( + *open_block.header.gas_limit() / min_tx_gas, + u64::max_value().into(), + ) + .as_u64() as usize, + ) + }; + + let pending: Vec> = self.transaction_queue.pending( + client.clone(), + pool::PendingSettings { + block_number: chain_info.best_block_number, + current_timestamp: chain_info.best_block_timestamp, + nonce_cap, + max_len: max_transactions, + ordering: miner::PendingOrdering::Priority, + }, + ); + + let took_ms = |elapsed: &Duration| { + elapsed.as_secs() * 1000 + elapsed.subsec_nanos() as u64 / 1_000_000 + }; + + let block_start = Instant::now(); + debug!(target: "miner", "Attempting to push {} transactions.", pending.len()); + + for tx in pending { + let start = Instant::now(); + + let transaction = tx.signed().clone(); + let hash = transaction.hash(); + let sender = transaction.sender(); + + // Re-verify transaction again vs current state. + let result = client + .verify_for_pending_block(&transaction, &open_block.header) + .map_err(|e| e.into()) + .and_then(|_| open_block.push_transaction(transaction, None)); + + let took = start.elapsed(); + + // Check for heavy transactions + match self.options.tx_queue_penalization { + Penalization::Enabled { + ref offend_threshold, + } if &took > offend_threshold => { + senders_to_penalize.insert(sender); + debug!(target: "miner", "Detected heavy transaction ({} ms). Penalizing sender.", took_ms(&took)); + } + _ => {} + } + + debug!(target: "miner", "Adding tx {:?} took {} ms", hash, took_ms(&took)); + match result { + Err(Error( + ErrorKind::Execution(ExecutionError::BlockGasLimitReached { + gas_limit, + gas_used, + gas, + }), + _, + )) => { + debug!(target: "miner", "Skipping adding transaction to block because of gas limit: {:?} (limit: {:?}, used: {:?}, gas: {:?})", hash, gas_limit, gas_used, gas); + + // Penalize transaction if it's above current gas limit + if gas > gas_limit { + debug!(target: "txqueue", "[{:?}] Transaction above block gas limit.", hash); + invalid_transactions.insert(hash); + } + + // Exit early if gas left is smaller then min_tx_gas + let gas_left = gas_limit - gas_used; + if gas_left < min_tx_gas { + debug!(target: "miner", "Remaining gas is lower than minimal gas for a transaction. Block is full."); + break; + } + + // Avoid iterating over the entire queue in case block is almost full. + skipped_transactions += 1; + if skipped_transactions > MAX_SKIPPED_TRANSACTIONS { + debug!(target: "miner", "Reached skipped transactions threshold. Assuming block is full."); + break; + } + } + // Invalid nonce error can happen only if previous transaction is skipped because of gas limit. + // If there is errornous state of transaction queue it will be fixed when next block is imported. + Err(Error( + ErrorKind::Execution(ExecutionError::InvalidNonce { expected, got }), + _, + )) => { + debug!(target: "miner", "Skipping adding transaction to block because of invalid nonce: {:?} (expected: {:?}, got: {:?})", hash, expected, got); + } + // already have transaction - ignore + Err(Error(ErrorKind::Transaction(transaction::Error::AlreadyImported), _)) => {} + Err(Error(ErrorKind::Transaction(transaction::Error::NotAllowed), _)) => { + not_allowed_transactions.insert(hash); + debug!(target: "miner", "Skipping non-allowed transaction for sender {:?}", hash); + } + Err(e) => { + debug!(target: "txqueue", "[{:?}] Marking as invalid: {:?}.", hash, e); + debug!( + target: "miner", "Error adding transaction to block: number={}. transaction_hash={:?}, Error: {:?}", block_number, hash, e + ); + invalid_transactions.insert(hash); + } + // imported ok + _ => tx_count += 1, + } + } + let elapsed = block_start.elapsed(); + debug!(target: "miner", "Pushed {} transactions in {} ms", tx_count, took_ms(&elapsed)); + + let block = match open_block.close() { + Ok(block) => block, + Err(err) => { + warn!(target: "miner", "Closing the block failed with error {:?}. This is likely an error in chain specificiations or on-chain consensus smart contracts.", err); + return None; + } + }; + + { + self.transaction_queue + .remove(invalid_transactions.iter(), true); + self.transaction_queue + .remove(not_allowed_transactions.iter(), false); + self.transaction_queue.penalize(senders_to_penalize.iter()); + } + + Some((block, original_work_hash)) + } + + /// Returns `true` if we should create pending block even if some other conditions are not met. + /// + /// In general we always seal iff: + /// 1. --force-sealing CLI parameter is provided + /// 2. There are listeners awaiting new work packages (e.g. remote work notifications or stratum). + fn forced_sealing(&self) -> bool { + let listeners_empty = { + #[cfg(feature = "work-notify")] + { + self.listeners.read().is_empty() + } + #[cfg(not(feature = "work-notify"))] + { + true + } + }; + + self.options.force_sealing || !listeners_empty + } + + /// Check is reseal is allowed and necessary. + fn requires_reseal(&self, best_block: BlockNumber) -> bool { + let mut sealing = self.sealing.lock(); + if !sealing.enabled { + trace!(target: "miner", "requires_reseal: sealing is disabled"); + return false; + } + + if !sealing.reseal_allowed() { + trace!(target: "miner", "requires_reseal: reseal too early"); + return false; + } + + trace!(target: "miner", "requires_reseal: sealing enabled"); + + // Disable sealing if there were no requests for SEALING_TIMEOUT_IN_BLOCKS + let had_requests = sealing + .last_request + .map(|last_request| { + best_block.saturating_sub(last_request) <= SEALING_TIMEOUT_IN_BLOCKS + }) + .unwrap_or(false); + + // keep sealing enabled if any of the conditions is met + let sealing_enabled = self.forced_sealing() + || self.transaction_queue.has_local_pending_transactions() + || self.engine.seals_internally() == Some(true) + || had_requests; + + let should_disable_sealing = !sealing_enabled; + + trace!(target: "miner", "requires_reseal: should_disable_sealing={}; forced={:?}, has_local={:?}, internal={:?}, had_requests={:?}", + should_disable_sealing, + self.forced_sealing(), + self.transaction_queue.has_local_pending_transactions(), + self.engine.seals_internally(), + had_requests, + ); + + if should_disable_sealing { + trace!(target: "miner", "Miner sleeping (current {}, last {})", best_block, sealing.last_request.unwrap_or(0)); + sealing.enabled = false; + sealing.queue.reset(); + false + } else { + // sealing enabled and we don't want to sleep. + sealing.next_allowed_reseal = Instant::now() + self.options.reseal_min_period; + true + } + } + + // TODO: (https://github.com/paritytech/parity-ethereum/issues/10407) + // This is only used in authority_round path, and should be refactored to merge with the other seal() path. + // Attempts to perform internal sealing (one that does not require work) and handles the result depending on the + // type of Seal. + fn seal_and_import_block_internally(&self, chain: &C, block: ClosedBlock) -> bool + where + C: BlockChain + SealedBlockImporter, + { + { + let sealing = self.sealing.lock(); + if block.transactions.is_empty() + && !self.forced_sealing() + && Instant::now() <= sealing.next_mandatory_reseal + { + return false; + } + } + + trace!(target: "miner", "seal_block_internally: attempting internal seal."); + + let parent_header = match chain.block_header(BlockId::Hash(*block.header.parent_hash())) { + Some(h) => match h.decode() { + Ok(decoded_hdr) => decoded_hdr, + Err(_) => return false, + }, + None => return false, + }; + + match self.engine.generate_seal(&block, &parent_header) { + // Save proposal for later seal submission and broadcast it. + Seal::Proposal(seal) => { + trace!(target: "miner", "Received a Proposal seal."); + { + let mut sealing = self.sealing.lock(); + sealing.next_mandatory_reseal = Instant::now() + self.options.reseal_max_period; + sealing.queue.set_pending(block.clone()); + sealing.queue.use_last_ref(); + } + + block + .lock() + .seal(&*self.engine, seal) + .map(|sealed| { + chain.broadcast_proposal_block(sealed); + true + }) + .unwrap_or_else(|e| { + warn!( + "ERROR: seal failed when given internally generated seal: {}", + e + ); + false + }) + } + // Directly import a regular sealed block. + Seal::Regular(seal) => { + trace!(target: "miner", "Received a Regular seal."); + { + let mut sealing = self.sealing.lock(); + sealing.next_mandatory_reseal = Instant::now() + self.options.reseal_max_period; + } + + block + .lock() + .seal(&*self.engine, seal) + .map(|sealed| chain.import_sealed_block(sealed).is_ok()) + .unwrap_or_else(|e| { + warn!( + "ERROR: seal failed when given internally generated seal: {}", + e + ); + false + }) + } + Seal::None => false, + } + } + + /// Prepares work which has to be done to seal. + fn prepare_work(&self, block: ClosedBlock, original_work_hash: Option) { + let (work, is_new) = { + let block_header = block.header.clone(); + let block_hash = block_header.hash(); + + let mut sealing = self.sealing.lock(); + let last_work_hash = sealing.queue.peek_last_ref().map(|pb| pb.header.hash()); + + trace!( + target: "miner", + "prepare_work: Checking whether we need to reseal: orig={:?} last={:?}, this={:?}", + original_work_hash, last_work_hash, block_hash + ); + + let (work, is_new) = if last_work_hash.map_or(true, |h| h != block_hash) { + trace!( + target: "miner", + "prepare_work: Pushing a new, refreshed or borrowed pending {}...", + block_hash + ); + let is_new = original_work_hash.map_or(true, |h| h != block_hash); + + sealing.queue.set_pending(block); + + #[cfg(feature = "work-notify")] + { + // If push notifications are enabled we assume all work items are used. + if is_new && !self.listeners.read().is_empty() { + sealing.queue.use_last_ref(); + } + } + + ( + Some(( + block_hash, + *block_header.difficulty(), + block_header.number(), + )), + is_new, + ) + } else { + (None, false) + }; + trace!( + target: "miner", + "prepare_work: leaving (last={:?})", + sealing.queue.peek_last_ref().map(|b| b.header.hash()) + ); + (work, is_new) + }; + + #[cfg(feature = "work-notify")] + { + if is_new { + work.map(|(pow_hash, difficulty, number)| { + for notifier in self.listeners.read().iter() { + notifier.notify(pow_hash, difficulty, number) + } + }); + } + } + + // NB: hack to use variables to avoid warning. + #[cfg(not(feature = "work-notify"))] + { + let _work = work; + let _is_new = is_new; + } + } + + /// Prepare a pending block. Returns the preparation status. + fn prepare_pending_block(&self, client: &C) -> BlockPreparationStatus + where + C: BlockChain + CallContract + BlockProducer + SealedBlockImporter + Nonce + Sync, + { + trace!(target: "miner", "prepare_pending_block: entering"); + let prepare_new = { + let mut sealing = self.sealing.lock(); + let have_work = sealing.queue.peek_last_ref().is_some(); + trace!(target: "miner", "prepare_pending_block: have_work={}", have_work); + if !have_work { + sealing.enabled = true; + true + } else { + false + } + }; + + let preparation_status = if prepare_new { + // -------------------------------------------------------------------------- + // | NOTE Code below requires sealing locks. | + // | Make sure to release the locks before calling that method. | + // -------------------------------------------------------------------------- + match self.prepare_block(client) { + Some((block, original_work_hash)) => { + self.prepare_work(block, original_work_hash); + BlockPreparationStatus::Succeeded + } + None => BlockPreparationStatus::Failed, + } + } else { + BlockPreparationStatus::NotPrepared + }; + + let best_number = client.chain_info().best_block_number; + let mut sealing = self.sealing.lock(); + if sealing.last_request != Some(best_number) { + trace!( + target: "miner", + "prepare_pending_block: Miner received request (was {}, now {}) - waking up.", + sealing.last_request.unwrap_or(0), best_number + ); + sealing.last_request = Some(best_number); + } + + preparation_status + } + + /// Prepare pending block, check whether sealing is needed, and then update sealing. + fn prepare_and_update_sealing(&self, chain: &C) { + // Make sure to do it after transaction is imported and lock is dropped. + // We need to create pending block and enable sealing. + if self.engine.seals_internally().unwrap_or(false) + || self.prepare_pending_block(chain) == BlockPreparationStatus::NotPrepared + { + // If new block has not been prepared (means we already had one) + // or Engine might be able to seal internally, + // we need to update sealing. + self.update_sealing(chain, ForceUpdateSealing::No); + } + } } -const SEALING_TIMEOUT_IN_BLOCKS : u64 = 5; +const SEALING_TIMEOUT_IN_BLOCKS: u64 = 5; impl miner::MinerService for Miner { - type State = State<::state_db::StateDB>; + type State = State<::state_db::StateDB>; - fn authoring_params(&self) -> AuthoringParams { - self.params.read().clone() - } + fn authoring_params(&self) -> AuthoringParams { + self.params.read().clone() + } - fn set_gas_range_target(&self, gas_range_target: (U256, U256)) { - self.params.write().gas_range_target = gas_range_target; - } + fn set_gas_range_target(&self, gas_range_target: (U256, U256)) { + self.params.write().gas_range_target = gas_range_target; + } - fn set_extra_data(&self, extra_data: Bytes) { - self.params.write().extra_data = extra_data; - } + fn set_extra_data(&self, extra_data: Bytes) { + self.params.write().extra_data = extra_data; + } - fn set_author(&self, author: Author) { - self.params.write().author = author.address(); + fn set_author(&self, author: Author) { + self.params.write().author = author.address(); - if let Author::Sealer(signer) = author { - if self.engine.seals_internally().is_some() { - // Enable sealing - self.sealing.lock().enabled = true; - // -------------------------------------------------------------------------- - // | NOTE Code below may require author and sealing locks | - // | (some `Engine`s call `EngineClient.update_sealing()`) | - // | Make sure to release the locks before calling that method. | - // -------------------------------------------------------------------------- - self.engine.set_signer(signer); - } else { - warn!("Setting an EngineSigner while Engine does not require one."); - } - } - } + if let Author::Sealer(signer) = author { + if self.engine.seals_internally().is_some() { + // Enable sealing + self.sealing.lock().enabled = true; + // -------------------------------------------------------------------------- + // | NOTE Code below may require author and sealing locks | + // | (some `Engine`s call `EngineClient.update_sealing()`) | + // | Make sure to release the locks before calling that method. | + // -------------------------------------------------------------------------- + self.engine.set_signer(signer); + } else { + warn!("Setting an EngineSigner while Engine does not require one."); + } + } + } - fn sensible_gas_price(&self) -> U256 { - // 10% above our minimum. - self.transaction_queue.current_worst_gas_price() * 110u32 / 100 - } + fn sensible_gas_price(&self) -> U256 { + // 10% above our minimum. + self.transaction_queue.current_worst_gas_price() * 110u32 / 100 + } - fn sensible_gas_limit(&self) -> U256 { - self.params.read().gas_range_target.0 / 5 - } + fn sensible_gas_limit(&self) -> U256 { + self.params.read().gas_range_target.0 / 5 + } - fn set_minimal_gas_price(&self, new_price: U256) -> Result { - match *self.gas_pricer.lock() { - // Binding the gas pricer to `gp` here to prevent - // a deadlock when calling recalibrate() - ref mut gp @ GasPricer::Fixed(_) => { - trace!(target: "miner", "minimal_gas_price: recalibrating fixed..."); - *gp = GasPricer::new_fixed(new_price); + fn set_minimal_gas_price(&self, new_price: U256) -> Result { + match *self.gas_pricer.lock() { + // Binding the gas pricer to `gp` here to prevent + // a deadlock when calling recalibrate() + ref mut gp @ GasPricer::Fixed(_) => { + trace!(target: "miner", "minimal_gas_price: recalibrating fixed..."); + *gp = GasPricer::new_fixed(new_price); - let txq = self.transaction_queue.clone(); - let mut options = self.options.pool_verification_options.clone(); - gp.recalibrate(move |gas_price| { - debug!(target: "miner", "minimal_gas_price: Got gas price! {}", gas_price); - options.minimal_gas_price = gas_price; - txq.set_verifier_options(options); - }); + let txq = self.transaction_queue.clone(); + let mut options = self.options.pool_verification_options.clone(); + gp.recalibrate(move |gas_price| { + debug!(target: "miner", "minimal_gas_price: Got gas price! {}", gas_price); + options.minimal_gas_price = gas_price; + txq.set_verifier_options(options); + }); - Ok(true) - }, - #[cfg(feature = "price-info")] - GasPricer::Calibrated(_) => { - let error_msg = "Can't update fixed gas price while automatic gas calibration is enabled."; - return Err(error_msg); - }, - } - } + Ok(true) + } + #[cfg(feature = "price-info")] + GasPricer::Calibrated(_) => { + let error_msg = + "Can't update fixed gas price while automatic gas calibration is enabled."; + return Err(error_msg); + } + } + } - fn import_external_transactions( - &self, - chain: &C, - transactions: Vec - ) -> Vec> { - trace!(target: "external_tx", "Importing external transactions"); - let client = self.pool_client(chain); - let results = self.transaction_queue.import( - client, - transactions.into_iter().map(pool::verifier::Transaction::Unverified).collect(), - ); + fn import_external_transactions( + &self, + chain: &C, + transactions: Vec, + ) -> Vec> { + trace!(target: "external_tx", "Importing external transactions"); + let client = self.pool_client(chain); + let results = self.transaction_queue.import( + client, + transactions + .into_iter() + .map(pool::verifier::Transaction::Unverified) + .collect(), + ); - // -------------------------------------------------------------------------- - // | NOTE Code below requires sealing locks. | - // | Make sure to release the locks before calling that method. | - // -------------------------------------------------------------------------- - if !results.is_empty() && self.options.reseal_on_external_tx && self.sealing.lock().reseal_allowed() { - self.prepare_and_update_sealing(chain); - } + // -------------------------------------------------------------------------- + // | NOTE Code below requires sealing locks. | + // | Make sure to release the locks before calling that method. | + // -------------------------------------------------------------------------- + if !results.is_empty() + && self.options.reseal_on_external_tx + && self.sealing.lock().reseal_allowed() + { + self.prepare_and_update_sealing(chain); + } - results - } + results + } - fn import_own_transaction( - &self, - chain: &C, - pending: PendingTransaction - ) -> Result<(), transaction::Error> { - // note: you may want to use `import_claimed_local_transaction` instead of this one. + fn import_own_transaction( + &self, + chain: &C, + pending: PendingTransaction, + ) -> Result<(), transaction::Error> { + // note: you may want to use `import_claimed_local_transaction` instead of this one. - trace!(target: "own_tx", "Importing transaction: {:?}", pending); + trace!(target: "own_tx", "Importing transaction: {:?}", pending); - let client = self.pool_client(chain); - let imported = self.transaction_queue.import( - client, - vec![pool::verifier::Transaction::Local(pending)] - ).pop().expect("one result returned per added transaction; one added => one result; qed"); + let client = self.pool_client(chain); + let imported = self + .transaction_queue + .import(client, vec![pool::verifier::Transaction::Local(pending)]) + .pop() + .expect("one result returned per added transaction; one added => one result; qed"); - // -------------------------------------------------------------------------- - // | NOTE Code below requires sealing locks. | - // | Make sure to release the locks before calling that method. | - // -------------------------------------------------------------------------- - if imported.is_ok() && self.options.reseal_on_own_tx && self.sealing.lock().reseal_allowed() { - self.prepare_and_update_sealing(chain); - } + // -------------------------------------------------------------------------- + // | NOTE Code below requires sealing locks. | + // | Make sure to release the locks before calling that method. | + // -------------------------------------------------------------------------- + if imported.is_ok() && self.options.reseal_on_own_tx && self.sealing.lock().reseal_allowed() + { + self.prepare_and_update_sealing(chain); + } - imported - } + imported + } - fn import_claimed_local_transaction( - &self, - chain: &C, - pending: PendingTransaction, - trusted: bool - ) -> Result<(), transaction::Error> { - // treat the tx as local if the option is enabled, if we have the account, or if - // the account is specified as a Prioritized Local Addresses - let sender = pending.sender(); - let treat_as_local = trusted - || !self.options.tx_queue_no_unfamiliar_locals - || self.accounts.is_local(&sender); + fn import_claimed_local_transaction( + &self, + chain: &C, + pending: PendingTransaction, + trusted: bool, + ) -> Result<(), transaction::Error> { + // treat the tx as local if the option is enabled, if we have the account, or if + // the account is specified as a Prioritized Local Addresses + let sender = pending.sender(); + let treat_as_local = trusted + || !self.options.tx_queue_no_unfamiliar_locals + || self.accounts.is_local(&sender); - if treat_as_local { - self.import_own_transaction(chain, pending) - } else { - // We want to replicate behaviour for external transactions if we're not going to treat - // this as local. This is important with regards to sealing blocks - self.import_external_transactions(chain, vec![pending.transaction.into()]) - .pop().expect("one result per tx, as in `import_own_transaction`") - } - } + if treat_as_local { + self.import_own_transaction(chain, pending) + } else { + // We want to replicate behaviour for external transactions if we're not going to treat + // this as local. This is important with regards to sealing blocks + self.import_external_transactions(chain, vec![pending.transaction.into()]) + .pop() + .expect("one result per tx, as in `import_own_transaction`") + } + } - fn local_transactions(&self) -> BTreeMap { - self.transaction_queue.local_transactions() - } + fn local_transactions(&self) -> BTreeMap { + self.transaction_queue.local_transactions() + } - fn queued_transactions(&self) -> Vec> { - self.transaction_queue.all_transactions() - } + fn queued_transactions(&self) -> Vec> { + self.transaction_queue.all_transactions() + } - fn queued_transaction_hashes(&self) -> Vec { - self.transaction_queue.all_transaction_hashes() - } + fn queued_transaction_hashes(&self) -> Vec { + self.transaction_queue.all_transaction_hashes() + } - fn pending_transaction_hashes(&self, chain: &C) -> BTreeSet where - C: ChainInfo + Sync, - { - let chain_info = chain.chain_info(); + fn pending_transaction_hashes(&self, chain: &C) -> BTreeSet + where + C: ChainInfo + Sync, + { + let chain_info = chain.chain_info(); - let from_queue = || self.transaction_queue.pending_hashes( - |sender| self.nonce_cache.get(sender), - ); + let from_queue = || { + self.transaction_queue + .pending_hashes(|sender| self.nonce_cache.get(sender)) + }; - let from_pending = || { - self.map_existing_pending_block(|sealing| { - sealing.transactions - .iter() - .map(|signed| signed.hash()) - .collect() - }, chain_info.best_block_number) - }; + let from_pending = || { + self.map_existing_pending_block( + |sealing| { + sealing + .transactions + .iter() + .map(|signed| signed.hash()) + .collect() + }, + chain_info.best_block_number, + ) + }; - match self.options.pending_set { - PendingSet::AlwaysQueue => { - from_queue() - }, - PendingSet::AlwaysSealing => { - from_pending().unwrap_or_default() - }, - PendingSet::SealingOrElseQueue => { - from_pending().unwrap_or_else(from_queue) - }, - } - } + match self.options.pending_set { + PendingSet::AlwaysQueue => from_queue(), + PendingSet::AlwaysSealing => from_pending().unwrap_or_default(), + PendingSet::SealingOrElseQueue => from_pending().unwrap_or_else(from_queue), + } + } - fn ready_transactions(&self, chain: &C, max_len: usize, ordering: miner::PendingOrdering) - -> Vec> - where - C: ChainInfo + Nonce + Sync, - { - let chain_info = chain.chain_info(); + fn ready_transactions( + &self, + chain: &C, + max_len: usize, + ordering: miner::PendingOrdering, + ) -> Vec> + where + C: ChainInfo + Nonce + Sync, + { + let chain_info = chain.chain_info(); - let from_queue = || { - // We propagate transactions over the nonce cap. - // The mechanism is only to limit number of transactions in pending block - // those transactions are valid and will just be ready to be included in next block. - let nonce_cap = None; + let from_queue = || { + // We propagate transactions over the nonce cap. + // The mechanism is only to limit number of transactions in pending block + // those transactions are valid and will just be ready to be included in next block. + let nonce_cap = None; - self.transaction_queue.pending( - CachedNonceClient::new(chain, &self.nonce_cache), - pool::PendingSettings { - block_number: chain_info.best_block_number, - current_timestamp: chain_info.best_block_timestamp, - nonce_cap, - max_len, - ordering, - }, - ) - }; + self.transaction_queue.pending( + CachedNonceClient::new(chain, &self.nonce_cache), + pool::PendingSettings { + block_number: chain_info.best_block_number, + current_timestamp: chain_info.best_block_timestamp, + nonce_cap, + max_len, + ordering, + }, + ) + }; - let from_pending = || { - self.map_existing_pending_block(|sealing| { - sealing.transactions - .iter() - .map(|signed| pool::VerifiedTransaction::from_pending_block_transaction(signed.clone())) - .map(Arc::new) - .take(max_len) - .collect() - }, chain_info.best_block_number) - }; + let from_pending = || { + self.map_existing_pending_block( + |sealing| { + sealing + .transactions + .iter() + .map(|signed| { + pool::VerifiedTransaction::from_pending_block_transaction( + signed.clone(), + ) + }) + .map(Arc::new) + .take(max_len) + .collect() + }, + chain_info.best_block_number, + ) + }; - match self.options.pending_set { - PendingSet::AlwaysQueue => { - from_queue() - }, - PendingSet::AlwaysSealing => { - from_pending().unwrap_or_default() - }, - PendingSet::SealingOrElseQueue => { - from_pending().unwrap_or_else(from_queue) - }, - } - } + match self.options.pending_set { + PendingSet::AlwaysQueue => from_queue(), + PendingSet::AlwaysSealing => from_pending().unwrap_or_default(), + PendingSet::SealingOrElseQueue => from_pending().unwrap_or_else(from_queue), + } + } - fn next_nonce(&self, chain: &C, address: &Address) -> U256 where - C: Nonce + Sync, - { - self.transaction_queue.next_nonce(CachedNonceClient::new(chain, &self.nonce_cache), address) - .unwrap_or_else(|| chain.latest_nonce(address)) - } + fn next_nonce(&self, chain: &C, address: &Address) -> U256 + where + C: Nonce + Sync, + { + self.transaction_queue + .next_nonce(CachedNonceClient::new(chain, &self.nonce_cache), address) + .unwrap_or_else(|| chain.latest_nonce(address)) + } - fn transaction(&self, hash: &H256) -> Option> { - self.transaction_queue.find(hash) - } + fn transaction(&self, hash: &H256) -> Option> { + self.transaction_queue.find(hash) + } - fn remove_transaction(&self, hash: &H256) -> Option> { - self.transaction_queue.remove(::std::iter::once(hash), false) - .pop() - .expect("remove() returns one result per hash; one hash passed; qed") - } + fn remove_transaction(&self, hash: &H256) -> Option> { + self.transaction_queue + .remove(::std::iter::once(hash), false) + .pop() + .expect("remove() returns one result per hash; one hash passed; qed") + } - fn queue_status(&self) -> QueueStatus { - self.transaction_queue.status() - } + fn queue_status(&self) -> QueueStatus { + self.transaction_queue.status() + } - fn pending_receipts(&self, best_block: BlockNumber) -> Option> { - self.map_existing_pending_block(|pending| { - let receipts = &pending.receipts; - pending.transactions - .iter() - .enumerate() - .map(|(index, tx)| { - let prev_gas = if index == 0 { Default::default() } else { receipts[index - 1].gas_used }; - let receipt = &receipts[index]; - RichReceipt { - from: tx.sender(), - to: match tx.action { - Action::Create => None, - Action::Call(ref address) => Some(*address), - }, - transaction_hash: tx.hash(), - transaction_index: index, - cumulative_gas_used: receipt.gas_used, - gas_used: receipt.gas_used - prev_gas, - contract_address: match tx.action { - Action::Call(_) => None, - Action::Create => { - let sender = tx.sender(); - Some(contract_address(self.engine.create_address_scheme(pending.header.number()), &sender, &tx.nonce, &tx.data).0) - } - }, - logs: receipt.logs.clone(), - log_bloom: receipt.log_bloom, - outcome: receipt.outcome.clone(), - } - }) - .collect() - }, best_block) - } + fn pending_receipts(&self, best_block: BlockNumber) -> Option> { + self.map_existing_pending_block( + |pending| { + let receipts = &pending.receipts; + pending + .transactions + .iter() + .enumerate() + .map(|(index, tx)| { + let prev_gas = if index == 0 { + Default::default() + } else { + receipts[index - 1].gas_used + }; + let receipt = &receipts[index]; + RichReceipt { + from: tx.sender(), + to: match tx.action { + Action::Create => None, + Action::Call(ref address) => Some(*address), + }, + transaction_hash: tx.hash(), + transaction_index: index, + cumulative_gas_used: receipt.gas_used, + gas_used: receipt.gas_used - prev_gas, + contract_address: match tx.action { + Action::Call(_) => None, + Action::Create => { + let sender = tx.sender(); + Some( + contract_address( + self.engine + .create_address_scheme(pending.header.number()), + &sender, + &tx.nonce, + &tx.data, + ) + .0, + ) + } + }, + logs: receipt.logs.clone(), + log_bloom: receipt.log_bloom, + outcome: receipt.outcome.clone(), + } + }) + .collect() + }, + best_block, + ) + } - /// Update sealing if required. - /// Prepare the block and work if the Engine does not seal internally. - fn update_sealing(&self, chain: &C, force: ForceUpdateSealing) where - C: BlockChain + CallContract + BlockProducer + SealedBlockImporter + Nonce + Sync, - { - trace!(target: "miner", "update_sealing"); + /// Update sealing if required. + /// Prepare the block and work if the Engine does not seal internally. + fn update_sealing(&self, chain: &C, force: ForceUpdateSealing) + where + C: BlockChain + CallContract + BlockProducer + SealedBlockImporter + Nonce + Sync, + { + trace!(target: "miner", "update_sealing"); - // Do nothing if we don't want to force update_sealing and reseal is not required. - // but note that `requires_reseal` updates internal state. - if force == ForceUpdateSealing::No && - !self.requires_reseal(chain.chain_info().best_block_number) - { - return; - } + // Do nothing if we don't want to force update_sealing and reseal is not required. + // but note that `requires_reseal` updates internal state. + if force == ForceUpdateSealing::No + && !self.requires_reseal(chain.chain_info().best_block_number) + { + return; + } - // -------------------------------------------------------------------------- - // | NOTE Code below requires sealing locks. | - // | Make sure to release the locks before calling that method. | - // -------------------------------------------------------------------------- - trace!(target: "miner", "update_sealing: preparing a block"); - let (block, original_work_hash) = match self.prepare_block(chain) { - Some((block, original_work_hash)) => (block, original_work_hash), - None => return, - }; + // -------------------------------------------------------------------------- + // | NOTE Code below requires sealing locks. | + // | Make sure to release the locks before calling that method. | + // -------------------------------------------------------------------------- + trace!(target: "miner", "update_sealing: preparing a block"); + let (block, original_work_hash) = match self.prepare_block(chain) { + Some((block, original_work_hash)) => (block, original_work_hash), + None => return, + }; - // refuse to seal the first block of the chain if it contains hard forks - // which should be on by default. - if block.header.number() == 1 { - if let Some(name) = self.engine.params().nonzero_bugfix_hard_fork() { - warn!("Your chain specification contains one or more hard forks which are required to be \ + // refuse to seal the first block of the chain if it contains hard forks + // which should be on by default. + if block.header.number() == 1 { + if let Some(name) = self.engine.params().nonzero_bugfix_hard_fork() { + warn!("Your chain specification contains one or more hard forks which are required to be \ on by default. Please remove these forks and start your chain again: {}.", name); - return; - } - } + return; + } + } - match self.engine.seals_internally() { - Some(true) => { - trace!(target: "miner", "update_sealing: engine indicates internal sealing"); - if self.seal_and_import_block_internally(chain, block) { - trace!(target: "miner", "update_sealing: imported internally sealed block"); - } - return - }, - Some(false) => { - trace!(target: "miner", "update_sealing: engine is not keen to seal internally right now"); - // anyway, save the block for later use - self.sealing.lock().queue.set_pending(block); - }, - None => { - trace!(target: "miner", "update_sealing: engine does not seal internally, preparing work"); - self.prepare_work(block, original_work_hash); - }, - }; - } + match self.engine.seals_internally() { + Some(true) => { + trace!(target: "miner", "update_sealing: engine indicates internal sealing"); + if self.seal_and_import_block_internally(chain, block) { + trace!(target: "miner", "update_sealing: imported internally sealed block"); + } + return; + } + Some(false) => { + trace!(target: "miner", "update_sealing: engine is not keen to seal internally right now"); + // anyway, save the block for later use + self.sealing.lock().queue.set_pending(block); + } + None => { + trace!(target: "miner", "update_sealing: engine does not seal internally, preparing work"); + self.prepare_work(block, original_work_hash); + } + }; + } - fn is_currently_sealing(&self) -> bool { - self.sealing.lock().enabled - } + fn is_currently_sealing(&self) -> bool { + self.sealing.lock().enabled + } - fn work_package(&self, chain: &C) -> Option<(H256, BlockNumber, u64, U256)> where - C: BlockChain + CallContract + BlockProducer + SealedBlockImporter + Nonce + Sync, - { - if self.engine.seals_internally().is_some() { - return None; - } + fn work_package(&self, chain: &C) -> Option<(H256, BlockNumber, u64, U256)> + where + C: BlockChain + CallContract + BlockProducer + SealedBlockImporter + Nonce + Sync, + { + if self.engine.seals_internally().is_some() { + return None; + } - self.prepare_pending_block(chain); + self.prepare_pending_block(chain); - self.sealing.lock().queue.use_last_ref().map(|b| { - let header = &b.header; - (header.hash(), header.number(), header.timestamp(), *header.difficulty()) - }) - } + self.sealing.lock().queue.use_last_ref().map(|b| { + let header = &b.header; + ( + header.hash(), + header.number(), + header.timestamp(), + *header.difficulty(), + ) + }) + } - // Note used for external submission (PoW) and internally by sealing engines. - fn submit_seal(&self, block_hash: H256, seal: Vec) -> Result { - let result = - if let Some(b) = self.sealing.lock().queue.get_used_if( - if self.options.enable_resubmission { - GetAction::Clone - } else { - GetAction::Take - }, - |b| &b.header.bare_hash() == &block_hash - ) { - trace!(target: "miner", "Submitted block {}={} with seal {:?}", block_hash, b.header.bare_hash(), seal); - b.lock().try_seal(&*self.engine, seal).or_else(|e| { - warn!(target: "miner", "Mined solution rejected: {}", e); - Err(ErrorKind::PowInvalid.into()) - }) - } else { - warn!(target: "miner", "Submitted solution rejected: Block unknown or out of date."); - Err(ErrorKind::PowHashInvalid.into()) - }; + // Note used for external submission (PoW) and internally by sealing engines. + fn submit_seal(&self, block_hash: H256, seal: Vec) -> Result { + let result = if let Some(b) = self.sealing.lock().queue.get_used_if( + if self.options.enable_resubmission { + GetAction::Clone + } else { + GetAction::Take + }, + |b| &b.header.bare_hash() == &block_hash, + ) { + trace!(target: "miner", "Submitted block {}={} with seal {:?}", block_hash, b.header.bare_hash(), seal); + b.lock().try_seal(&*self.engine, seal).or_else(|e| { + warn!(target: "miner", "Mined solution rejected: {}", e); + Err(ErrorKind::PowInvalid.into()) + }) + } else { + warn!(target: "miner", "Submitted solution rejected: Block unknown or out of date."); + Err(ErrorKind::PowHashInvalid.into()) + }; - result.and_then(|sealed| { + result.and_then(|sealed| { let n = sealed.header.number(); let h = sealed.header.hash(); info!(target: "miner", "Submitted block imported OK. #{}: {}", Colour::White.bold().paint(format!("{}", n)), Colour::White.bold().paint(format!("{:x}", h))); Ok(sealed) }) - } + } - fn chain_new_blocks(&self, chain: &C, imported: &[H256], _invalid: &[H256], enacted: &[H256], retracted: &[H256], is_internal_import: bool) - where C: miner::BlockChainClient, - { - trace!(target: "miner", "chain_new_blocks"); + fn chain_new_blocks( + &self, + chain: &C, + imported: &[H256], + _invalid: &[H256], + enacted: &[H256], + retracted: &[H256], + is_internal_import: bool, + ) where + C: miner::BlockChainClient, + { + trace!(target: "miner", "chain_new_blocks"); - // 1. We ignore blocks that were `imported` unless resealing on new uncles is enabled. - // 2. We ignore blocks that are `invalid` because it doesn't have any meaning in terms of the transactions that - // are in those blocks + // 1. We ignore blocks that were `imported` unless resealing on new uncles is enabled. + // 2. We ignore blocks that are `invalid` because it doesn't have any meaning in terms of the transactions that + // are in those blocks - let has_new_best_block = enacted.len() > 0; + let has_new_best_block = enacted.len() > 0; - if has_new_best_block { - // Clear nonce cache - self.nonce_cache.clear(); - } + if has_new_best_block { + // Clear nonce cache + self.nonce_cache.clear(); + } - // First update gas limit in transaction queue and minimal gas price. - let gas_limit = *chain.best_block_header().gas_limit(); - self.update_transaction_queue_limits(gas_limit); + // First update gas limit in transaction queue and minimal gas price. + let gas_limit = *chain.best_block_header().gas_limit(); + self.update_transaction_queue_limits(gas_limit); - // Then import all transactions from retracted blocks. - let client = self.pool_client(chain); - { - retracted + // Then import all transactions from retracted blocks. + let client = self.pool_client(chain); + { + retracted .par_iter() .for_each(|hash| { let block = chain.block(BlockId::Hash(*hash)) @@ -1275,539 +1384,646 @@ impl miner::MinerService for Miner { txs, ); }); - } + } - if has_new_best_block || (imported.len() > 0 && self.options.reseal_on_uncle) { - // Reset `next_allowed_reseal` in case a block is imported. - // Even if min_period is high, we will always attempt to create - // new pending block. - self.sealing.lock().next_allowed_reseal = Instant::now(); + if has_new_best_block || (imported.len() > 0 && self.options.reseal_on_uncle) { + // Reset `next_allowed_reseal` in case a block is imported. + // Even if min_period is high, we will always attempt to create + // new pending block. + self.sealing.lock().next_allowed_reseal = Instant::now(); - if !is_internal_import { - // -------------------------------------------------------------------------- - // | NOTE Code below requires sealing locks. | - // | Make sure to release the locks before calling that method. | - // -------------------------------------------------------------------------- - self.update_sealing(chain, ForceUpdateSealing::No); - } - } + if !is_internal_import { + // -------------------------------------------------------------------------- + // | NOTE Code below requires sealing locks. | + // | Make sure to release the locks before calling that method. | + // -------------------------------------------------------------------------- + self.update_sealing(chain, ForceUpdateSealing::No); + } + } - if has_new_best_block { - // Make sure to cull transactions after we update sealing. - // Not culling won't lead to old transactions being added to the block - // (thanks to Ready), but culling can take significant amount of time, - // so best to leave it after we create some work for miners to prevent increased - // uncle rate. - // If the io_channel is available attempt to offload culling to a separate task - // to avoid blocking chain_new_blocks - if let Some(ref channel) = *self.io_channel.read() { - let queue = self.transaction_queue.clone(); - let nonce_cache = self.nonce_cache.clone(); - let engine = self.engine.clone(); - let accounts = self.accounts.clone(); - let service_transaction_checker = self.service_transaction_checker.clone(); + if has_new_best_block { + // Make sure to cull transactions after we update sealing. + // Not culling won't lead to old transactions being added to the block + // (thanks to Ready), but culling can take significant amount of time, + // so best to leave it after we create some work for miners to prevent increased + // uncle rate. + // If the io_channel is available attempt to offload culling to a separate task + // to avoid blocking chain_new_blocks + if let Some(ref channel) = *self.io_channel.read() { + let queue = self.transaction_queue.clone(); + let nonce_cache = self.nonce_cache.clone(); + let engine = self.engine.clone(); + let accounts = self.accounts.clone(); + let service_transaction_checker = self.service_transaction_checker.clone(); - let cull = move |chain: &::client::Client| { - let client = PoolClient::new( - chain, - &nonce_cache, - &*engine, - &*accounts, - service_transaction_checker.as_ref(), - ); - queue.cull(client); - if engine.should_reseal_on_update() { - // force update_sealing here to skip `reseal_required` checks - chain.update_sealing(ForceUpdateSealing::Yes); - } - }; + let cull = move |chain: &::client::Client| { + let client = PoolClient::new( + chain, + &nonce_cache, + &*engine, + &*accounts, + service_transaction_checker.as_ref(), + ); + queue.cull(client); + if engine.should_reseal_on_update() { + // force update_sealing here to skip `reseal_required` checks + chain.update_sealing(ForceUpdateSealing::Yes); + } + }; - if let Err(e) = channel.send(ClientIoMessage::execute(cull)) { - warn!(target: "miner", "Error queueing cull: {:?}", e); - } - } else { - self.transaction_queue.cull(client); - if self.engine.should_reseal_on_update() { - // force update_sealing here to skip `reseal_required` checks - self.update_sealing(chain, ForceUpdateSealing::Yes); - } - } - } - if let Some(ref service_transaction_checker) = self.service_transaction_checker { - match service_transaction_checker.refresh_cache(chain) { - Ok(true) => { - trace!(target: "client", "Service transaction cache was refreshed successfully"); - }, - Ok(false) => { - trace!(target: "client", "Registrar or/and service transactions contract does not exist"); - }, - Err(e) => error!(target: "client", "Error occurred while refreshing service transaction cache: {}", e) - }; - }; - } + if let Err(e) = channel.send(ClientIoMessage::execute(cull)) { + warn!(target: "miner", "Error queueing cull: {:?}", e); + } + } else { + self.transaction_queue.cull(client); + if self.engine.should_reseal_on_update() { + // force update_sealing here to skip `reseal_required` checks + self.update_sealing(chain, ForceUpdateSealing::Yes); + } + } + } + if let Some(ref service_transaction_checker) = self.service_transaction_checker { + match service_transaction_checker.refresh_cache(chain) { + Ok(true) => { + trace!(target: "client", "Service transaction cache was refreshed successfully"); + } + Ok(false) => { + trace!(target: "client", "Registrar or/and service transactions contract does not exist"); + } + Err(e) => { + error!(target: "client", "Error occurred while refreshing service transaction cache: {}", e) + } + }; + }; + } - fn pending_state(&self, latest_block_number: BlockNumber) -> Option { - self.map_existing_pending_block(|b| b.state.clone(), latest_block_number) - } + fn pending_state(&self, latest_block_number: BlockNumber) -> Option { + self.map_existing_pending_block(|b| b.state.clone(), latest_block_number) + } - fn pending_block_header(&self, latest_block_number: BlockNumber) -> Option
{ - self.map_existing_pending_block(|b| b.header.clone(), latest_block_number) - } + fn pending_block_header(&self, latest_block_number: BlockNumber) -> Option
{ + self.map_existing_pending_block(|b| b.header.clone(), latest_block_number) + } - fn pending_block(&self, latest_block_number: BlockNumber) -> Option { - self.map_existing_pending_block(|b| { - Block { - header: b.header.clone(), - transactions: b.transactions.iter().cloned().map(Into::into).collect(), - uncles: b.uncles.to_vec(), - } - }, latest_block_number) - } + fn pending_block(&self, latest_block_number: BlockNumber) -> Option { + self.map_existing_pending_block( + |b| Block { + header: b.header.clone(), + transactions: b.transactions.iter().cloned().map(Into::into).collect(), + uncles: b.uncles.to_vec(), + }, + latest_block_number, + ) + } - fn pending_transactions(&self, latest_block_number: BlockNumber) -> Option> { - self.map_existing_pending_block(|b| b.transactions.iter().cloned().collect(), latest_block_number) - } + fn pending_transactions( + &self, + latest_block_number: BlockNumber, + ) -> Option> { + self.map_existing_pending_block( + |b| b.transactions.iter().cloned().collect(), + latest_block_number, + ) + } } #[cfg(test)] mod tests { - use std::iter::FromIterator; - - use super::*; - use accounts::AccountProvider; - use ethkey::{Generator, Random}; - use hash::keccak; - use rustc_hex::FromHex; - use types::BlockNumber; - - use client::{TestBlockChainClient, EachBlockWith, ChainInfo, ImportSealedBlock}; - use miner::{MinerService, PendingOrdering}; - use test_helpers::{generate_dummy_client, generate_dummy_client_with_spec}; - use types::transaction::{Transaction}; - - #[test] - fn should_prepare_block_to_seal() { - // given - let client = TestBlockChainClient::default(); - let miner = Miner::new_for_tests(&Spec::new_test(), None); - - // when - let sealing_work = miner.work_package(&client); - assert!(sealing_work.is_some(), "Expected closed block"); - } - - #[test] - fn should_still_work_after_a_couple_of_blocks() { - // given - let client = TestBlockChainClient::default(); - let miner = Miner::new_for_tests(&Spec::new_test(), None); - - let res = miner.work_package(&client); - let hash = res.unwrap().0; - let block = miner.submit_seal(hash, vec![]).unwrap(); - client.import_sealed_block(block).unwrap(); - - // two more blocks mined, work requested. - client.add_blocks(1, EachBlockWith::Uncle); - miner.work_package(&client); - - client.add_blocks(1, EachBlockWith::Uncle); - miner.work_package(&client); - - // solution to original work submitted. - assert!(miner.submit_seal(hash, vec![]).is_ok()); - } - - fn miner() -> Miner { - Miner::new( - MinerOptions { - force_sealing: false, - reseal_on_external_tx: false, - reseal_on_own_tx: true, - reseal_on_uncle: false, - reseal_min_period: Duration::from_secs(5), - reseal_max_period: Duration::from_secs(120), - pending_set: PendingSet::AlwaysSealing, - work_queue_size: 5, - enable_resubmission: true, - infinite_pending_block: false, - tx_queue_penalization: Penalization::Disabled, - tx_queue_strategy: PrioritizationStrategy::GasPriceOnly, - tx_queue_no_unfamiliar_locals: false, - refuse_service_transactions: false, - pool_limits: Default::default(), - pool_verification_options: pool::verifier::Options { - minimal_gas_price: 0.into(), - block_gas_limit: U256::max_value(), - tx_gas_limit: U256::max_value(), - no_early_reject: false, - }, - }, - GasPricer::new_fixed(0u64.into()), - &Spec::new_test(), - ::std::collections::HashSet::new(), // local accounts - ) - } - - const TEST_CHAIN_ID: u64 = 2; - - fn transaction() -> SignedTransaction { - transaction_with_chain_id(TEST_CHAIN_ID) - } - - fn transaction_with_chain_id(chain_id: u64) -> SignedTransaction { - let keypair = Random.generate().unwrap(); - Transaction { - action: Action::Create, - value: U256::zero(), - data: "3331600055".from_hex().unwrap(), - gas: U256::from(100_000), - gas_price: U256::zero(), - nonce: U256::zero(), - }.sign(keypair.secret(), Some(chain_id)) - } - - #[test] - fn should_make_pending_block_when_importing_own_transaction() { - // given - let client = TestBlockChainClient::default(); - let miner = miner(); - let transaction = transaction(); - let best_block = 0; - // when - let res = miner.import_own_transaction(&client, PendingTransaction::new(transaction, None)); - - // then - assert_eq!(res.unwrap(), ()); - assert_eq!(miner.pending_transactions(best_block).unwrap().len(), 1); - assert_eq!(miner.pending_receipts(best_block).unwrap().len(), 1); - assert_eq!(miner.ready_transactions(&client, 10, PendingOrdering::Priority).len(), 1); - // This method will let us know if pending block was created (before calling that method) - assert_eq!(miner.prepare_pending_block(&client), BlockPreparationStatus::NotPrepared); - } - - #[test] - fn should_not_return_stale_work_packages() { - // given - let client = TestBlockChainClient::default(); - let miner = miner(); - - // initial work package should create the pending block - let res = miner.work_package(&client); - assert_eq!(res.unwrap().1, 1); - // This should be true, since there were some requests. - assert_eq!(miner.requires_reseal(0), true); - - // when new block is imported - let client = generate_dummy_client(2); - let imported = [0.into()]; - let empty = &[]; - miner.chain_new_blocks(&*client, &imported, empty, &imported, empty, false); - - // then - // This should be false, because it's too early. - assert_eq!(miner.requires_reseal(2), false); - // but still work package should be ready - let res = miner.work_package(&*client); - assert_eq!(res.unwrap().1, 3); - assert_eq!(miner.prepare_pending_block(&*client), BlockPreparationStatus::NotPrepared); - } - - #[test] - fn should_not_use_pending_block_if_best_block_is_higher() { - // given - let client = TestBlockChainClient::default(); - let miner = miner(); - let transaction = transaction(); - let best_block = 10; - // when - let res = miner.import_own_transaction(&client, PendingTransaction::new(transaction, None)); - - // then - assert_eq!(res.unwrap(), ()); - assert_eq!(miner.pending_transactions(best_block), None); - assert_eq!(miner.pending_receipts(best_block), None); - assert_eq!(miner.ready_transactions(&client, 10, PendingOrdering::Priority).len(), 1); - } - - #[test] - fn should_import_external_transaction() { - // given - let client = TestBlockChainClient::default(); - let miner = miner(); - let transaction = transaction().into(); - let best_block = 0; - // when - let res = miner.import_external_transactions(&client, vec![transaction]).pop().unwrap(); - - // then - assert_eq!(res.unwrap(), ()); - // By default we don't reseal on external transactions - assert_eq!(miner.pending_transactions(best_block), None); - assert_eq!(miner.pending_receipts(best_block), None); - // By default we use PendingSet::AlwaysSealing, so no transactions yet. - assert_eq!(miner.ready_transactions(&client, 10, PendingOrdering::Priority).len(), 0); - // This method will let us know if pending block was created (before calling that method) - assert_eq!(miner.prepare_pending_block(&client), BlockPreparationStatus::Succeeded); - // After pending block is created we should see a transaction. - assert_eq!(miner.ready_transactions(&client, 10, PendingOrdering::Priority).len(), 1); - } - - #[test] - fn should_treat_unfamiliar_locals_selectively() { - // given - let keypair = Random.generate().unwrap(); - let client = TestBlockChainClient::default(); - let mut local_accounts = ::std::collections::HashSet::new(); - local_accounts.insert(keypair.address()); - - let miner = Miner::new( - MinerOptions { - tx_queue_no_unfamiliar_locals: true, - ..miner().options - }, - GasPricer::new_fixed(0u64.into()), - &Spec::new_test(), - local_accounts, - ); - let transaction = transaction(); - let best_block = 0; - // when - // This transaction should not be marked as local because our account_provider doesn't have the sender - let res = miner.import_claimed_local_transaction(&client, PendingTransaction::new(transaction.clone(), None), false); - - // then - // Check the same conditions as `should_import_external_transaction` first. Behaviour should be identical. - // That is: it's treated as though we added it through `import_external_transactions` - assert_eq!(res.unwrap(), ()); - assert_eq!(miner.pending_transactions(best_block), None); - assert_eq!(miner.pending_receipts(best_block), None); - assert_eq!(miner.ready_transactions(&client, 10, PendingOrdering::Priority).len(), 0); - assert_eq!(miner.prepare_pending_block(&client), BlockPreparationStatus::Succeeded); - assert_eq!(miner.ready_transactions(&client, 10, PendingOrdering::Priority).len(), 1); - - // when - 2nd part: create a local transaction from account_provider. - // Borrow the transaction used before & sign with our generated keypair. - let local_transaction = transaction.deconstruct().0.as_unsigned().clone().sign(keypair.secret(), Some(TEST_CHAIN_ID)); - let res2 = miner.import_claimed_local_transaction(&client, PendingTransaction::new(local_transaction, None), false); - - // then - 2nd part: we add on the results from the last pending block. - // This is borrowed from `should_make_pending_block_when_importing_own_transaction` and slightly modified. - assert_eq!(res2.unwrap(), ()); - assert_eq!(miner.pending_transactions(best_block).unwrap().len(), 2); - assert_eq!(miner.pending_receipts(best_block).unwrap().len(), 2); - assert_eq!(miner.ready_transactions(&client, 10, PendingOrdering::Priority).len(), 2); - assert_eq!(miner.prepare_pending_block(&client), BlockPreparationStatus::NotPrepared); - } - - #[test] - fn should_reject_local_transaction_with_invalid_chain_id() { - let spec = Spec::new_test(); - let miner = Miner::new_for_tests(&spec, None); - let client = TestBlockChainClient::default(); - let chain_id = spec.chain_id(); - - // chain_id + 100500 is invalid - let import = miner.import_claimed_local_transaction( - &client, - PendingTransaction::new(transaction_with_chain_id(chain_id + 10500), None), - false, - ); - assert_eq!(import, Err(transaction::Error::InvalidChainId)); - - // chain_id is valid - let import = miner.import_claimed_local_transaction( - &client, - PendingTransaction::new(transaction_with_chain_id(chain_id), None), - false, - ); - assert_eq!(import, Ok(())); - } - - #[test] - fn should_prioritize_locals() { - let client = TestBlockChainClient::default(); - let transaction = transaction(); - let miner = Miner::new( - MinerOptions { - tx_queue_no_unfamiliar_locals: true, // should work even with this enabled - ..miner().options - }, - GasPricer::new_fixed(0u64.into()), - &Spec::new_test(), - HashSet::from_iter(vec![transaction.sender()].into_iter()), - ); - let best_block = 0; - - // Miner with sender as a known local address should prioritize transactions from that address - let res2 = miner.import_claimed_local_transaction(&client, PendingTransaction::new(transaction, None), false); - - // check to make sure the prioritized transaction is pending - assert_eq!(res2.unwrap(), ()); - assert_eq!(miner.pending_transactions(best_block).unwrap().len(), 1); - assert_eq!(miner.pending_receipts(best_block).unwrap().len(), 1); - assert_eq!(miner.ready_transactions(&client, 10, PendingOrdering::Priority).len(), 1); - assert_eq!(miner.prepare_pending_block(&client), BlockPreparationStatus::NotPrepared); - } - - #[test] - fn should_not_seal_unless_enabled() { - let miner = miner(); - let client = TestBlockChainClient::default(); - // By default resealing is not required. - assert!(!miner.requires_reseal(1u8.into())); - - miner.import_external_transactions(&client, vec![transaction().into()]).pop().unwrap().unwrap(); - assert_eq!(miner.prepare_pending_block(&client), BlockPreparationStatus::Succeeded); - // Unless asked to prepare work. - assert!(miner.requires_reseal(1u8.into())); - } - - #[test] - fn internal_seals_without_work() { - let spec = Spec::new_instant(); - let miner = Miner::new_for_tests(&spec, None); - - let client = generate_dummy_client(2); - - let import = miner.import_external_transactions(&*client, vec![transaction_with_chain_id(spec.chain_id()).into()]).pop().unwrap(); - assert_eq!(import.unwrap(), ()); - - miner.update_sealing(&*client, ForceUpdateSealing::No); - client.flush_queue(); - assert!(miner.pending_block(0).is_none()); - assert_eq!(client.chain_info().best_block_number, 3 as BlockNumber); - - assert!(miner.import_own_transaction(&*client, PendingTransaction::new(transaction_with_chain_id(spec.chain_id()).into(), None)).is_ok()); - - miner.update_sealing(&*client, ForceUpdateSealing::No); - client.flush_queue(); - assert!(miner.pending_block(0).is_none()); - assert_eq!(client.chain_info().best_block_number, 4 as BlockNumber); - } - - #[test] - fn should_not_fail_setting_engine_signer_without_account_provider() { - let spec = Spec::new_test_round; - let tap = Arc::new(AccountProvider::transient_provider()); - let addr = tap.insert_account(keccak("1").into(), &"".into()).unwrap(); - let client = generate_dummy_client_with_spec(spec); - let engine_signer = Box::new((tap.clone(), addr, "".into())); - let msg = Default::default(); - assert!(client.engine().sign(msg).is_err()); - - // should set engine signer and miner author - client.miner().set_author(Author::Sealer(engine_signer)); - assert_eq!(client.miner().authoring_params().author, addr); - assert!(client.engine().sign(msg).is_ok()); - } - - #[test] - fn should_mine_if_internal_sealing_is_enabled() { - let spec = Spec::new_instant(); - let miner = Miner::new_for_tests(&spec, None); - - let client = generate_dummy_client(2); - miner.update_sealing(&*client, ForceUpdateSealing::No); - - assert!(miner.is_currently_sealing()); - } - - #[test] - fn should_not_mine_if_internal_sealing_is_disabled() { - let spec = Spec::new_test_round(); - let miner = Miner::new_for_tests(&spec, None); - - let client = generate_dummy_client(2); - miner.update_sealing(&*client, ForceUpdateSealing::No); - - assert!(!miner.is_currently_sealing()); - } - - #[test] - fn should_not_mine_if_no_fetch_work_request() { - let spec = Spec::new_test(); - let miner = Miner::new_for_tests(&spec, None); - - let client = generate_dummy_client(2); - miner.update_sealing(&*client, ForceUpdateSealing::No); - - assert!(!miner.is_currently_sealing()); - } - - #[cfg(feature = "work-notify")] - #[test] - fn should_mine_if_fetch_work_request() { - struct DummyNotifyWork; - - impl NotifyWork for DummyNotifyWork { - fn notify(&self, _pow_hash: H256, _difficulty: U256, _number: u64) { } - } - - let spec = Spec::new_test(); - let miner = Miner::new_for_tests(&spec, None); - miner.add_work_listener(Box::new(DummyNotifyWork)); - - let client = generate_dummy_client(2); - miner.update_sealing(&*client, ForceUpdateSealing::No); - - assert!(miner.is_currently_sealing()); - } - - #[test] - fn should_set_new_minimum_gas_price() { - // Creates a new GasPricer::Fixed behind the scenes - let miner = Miner::new_for_tests(&Spec::new_test(), None); - - let expected_minimum_gas_price: U256 = 0x1337.into(); - miner.set_minimal_gas_price(expected_minimum_gas_price).unwrap(); - - let txq_options = miner.transaction_queue.status().options; - let current_minimum_gas_price = txq_options.minimal_gas_price; - - assert!(current_minimum_gas_price == expected_minimum_gas_price); - } - - #[cfg(feature = "price-info")] - fn dynamic_gas_pricer() -> GasPricer { - use std::time::Duration; - use parity_runtime::Executor; - use fetch::Client as FetchClient; - use ethcore_miner::gas_price_calibrator::{GasPriceCalibrator, GasPriceCalibratorOptions}; - - // Don't really care about any of these settings since - // the gas pricer is never actually going to be used - let fetch = FetchClient::new(1).unwrap(); - let p = Executor::new_sync(); - - GasPricer::new_calibrated( - GasPriceCalibrator::new( - GasPriceCalibratorOptions { - usd_per_tx: 0.0, - recalibration_period: Duration::from_secs(0), - }, - fetch, - p, - "fake_endpoint".to_owned() - ) - ) - } - - #[test] - #[cfg(feature = "price-info")] - fn should_fail_to_set_new_minimum_gas_price() { - // We get a fixed gas pricer by default, need to change that - let miner = Miner::new_for_tests(&Spec::new_test(), None); - let calibrated_gas_pricer = dynamic_gas_pricer(); - *miner.gas_pricer.lock() = calibrated_gas_pricer; - - let expected_minimum_gas_price: U256 = 0x1337.into(); - let result = miner.set_minimal_gas_price(expected_minimum_gas_price); - assert!(result.is_err()); - - let received_error_msg = result.unwrap_err(); - let expected_error_msg = "Can't update fixed gas price while automatic gas calibration is enabled."; - - assert!(received_error_msg == expected_error_msg); - } + use std::iter::FromIterator; + + use super::*; + use accounts::AccountProvider; + use ethkey::{Generator, Random}; + use hash::keccak; + use rustc_hex::FromHex; + use types::BlockNumber; + + use client::{ChainInfo, EachBlockWith, ImportSealedBlock, TestBlockChainClient}; + use miner::{MinerService, PendingOrdering}; + use test_helpers::{generate_dummy_client, generate_dummy_client_with_spec}; + use types::transaction::Transaction; + + #[test] + fn should_prepare_block_to_seal() { + // given + let client = TestBlockChainClient::default(); + let miner = Miner::new_for_tests(&Spec::new_test(), None); + + // when + let sealing_work = miner.work_package(&client); + assert!(sealing_work.is_some(), "Expected closed block"); + } + + #[test] + fn should_still_work_after_a_couple_of_blocks() { + // given + let client = TestBlockChainClient::default(); + let miner = Miner::new_for_tests(&Spec::new_test(), None); + + let res = miner.work_package(&client); + let hash = res.unwrap().0; + let block = miner.submit_seal(hash, vec![]).unwrap(); + client.import_sealed_block(block).unwrap(); + + // two more blocks mined, work requested. + client.add_blocks(1, EachBlockWith::Uncle); + miner.work_package(&client); + + client.add_blocks(1, EachBlockWith::Uncle); + miner.work_package(&client); + + // solution to original work submitted. + assert!(miner.submit_seal(hash, vec![]).is_ok()); + } + + fn miner() -> Miner { + Miner::new( + MinerOptions { + force_sealing: false, + reseal_on_external_tx: false, + reseal_on_own_tx: true, + reseal_on_uncle: false, + reseal_min_period: Duration::from_secs(5), + reseal_max_period: Duration::from_secs(120), + pending_set: PendingSet::AlwaysSealing, + work_queue_size: 5, + enable_resubmission: true, + infinite_pending_block: false, + tx_queue_penalization: Penalization::Disabled, + tx_queue_strategy: PrioritizationStrategy::GasPriceOnly, + tx_queue_no_unfamiliar_locals: false, + refuse_service_transactions: false, + pool_limits: Default::default(), + pool_verification_options: pool::verifier::Options { + minimal_gas_price: 0.into(), + block_gas_limit: U256::max_value(), + tx_gas_limit: U256::max_value(), + no_early_reject: false, + }, + }, + GasPricer::new_fixed(0u64.into()), + &Spec::new_test(), + ::std::collections::HashSet::new(), // local accounts + ) + } + + const TEST_CHAIN_ID: u64 = 2; + + fn transaction() -> SignedTransaction { + transaction_with_chain_id(TEST_CHAIN_ID) + } + + fn transaction_with_chain_id(chain_id: u64) -> SignedTransaction { + let keypair = Random.generate().unwrap(); + Transaction { + action: Action::Create, + value: U256::zero(), + data: "3331600055".from_hex().unwrap(), + gas: U256::from(100_000), + gas_price: U256::zero(), + nonce: U256::zero(), + } + .sign(keypair.secret(), Some(chain_id)) + } + + #[test] + fn should_make_pending_block_when_importing_own_transaction() { + // given + let client = TestBlockChainClient::default(); + let miner = miner(); + let transaction = transaction(); + let best_block = 0; + // when + let res = miner.import_own_transaction(&client, PendingTransaction::new(transaction, None)); + + // then + assert_eq!(res.unwrap(), ()); + assert_eq!(miner.pending_transactions(best_block).unwrap().len(), 1); + assert_eq!(miner.pending_receipts(best_block).unwrap().len(), 1); + assert_eq!( + miner + .ready_transactions(&client, 10, PendingOrdering::Priority) + .len(), + 1 + ); + // This method will let us know if pending block was created (before calling that method) + assert_eq!( + miner.prepare_pending_block(&client), + BlockPreparationStatus::NotPrepared + ); + } + + #[test] + fn should_not_return_stale_work_packages() { + // given + let client = TestBlockChainClient::default(); + let miner = miner(); + + // initial work package should create the pending block + let res = miner.work_package(&client); + assert_eq!(res.unwrap().1, 1); + // This should be true, since there were some requests. + assert_eq!(miner.requires_reseal(0), true); + + // when new block is imported + let client = generate_dummy_client(2); + let imported = [0.into()]; + let empty = &[]; + miner.chain_new_blocks(&*client, &imported, empty, &imported, empty, false); + + // then + // This should be false, because it's too early. + assert_eq!(miner.requires_reseal(2), false); + // but still work package should be ready + let res = miner.work_package(&*client); + assert_eq!(res.unwrap().1, 3); + assert_eq!( + miner.prepare_pending_block(&*client), + BlockPreparationStatus::NotPrepared + ); + } + + #[test] + fn should_not_use_pending_block_if_best_block_is_higher() { + // given + let client = TestBlockChainClient::default(); + let miner = miner(); + let transaction = transaction(); + let best_block = 10; + // when + let res = miner.import_own_transaction(&client, PendingTransaction::new(transaction, None)); + + // then + assert_eq!(res.unwrap(), ()); + assert_eq!(miner.pending_transactions(best_block), None); + assert_eq!(miner.pending_receipts(best_block), None); + assert_eq!( + miner + .ready_transactions(&client, 10, PendingOrdering::Priority) + .len(), + 1 + ); + } + + #[test] + fn should_import_external_transaction() { + // given + let client = TestBlockChainClient::default(); + let miner = miner(); + let transaction = transaction().into(); + let best_block = 0; + // when + let res = miner + .import_external_transactions(&client, vec![transaction]) + .pop() + .unwrap(); + + // then + assert_eq!(res.unwrap(), ()); + // By default we don't reseal on external transactions + assert_eq!(miner.pending_transactions(best_block), None); + assert_eq!(miner.pending_receipts(best_block), None); + // By default we use PendingSet::AlwaysSealing, so no transactions yet. + assert_eq!( + miner + .ready_transactions(&client, 10, PendingOrdering::Priority) + .len(), + 0 + ); + // This method will let us know if pending block was created (before calling that method) + assert_eq!( + miner.prepare_pending_block(&client), + BlockPreparationStatus::Succeeded + ); + // After pending block is created we should see a transaction. + assert_eq!( + miner + .ready_transactions(&client, 10, PendingOrdering::Priority) + .len(), + 1 + ); + } + + #[test] + fn should_treat_unfamiliar_locals_selectively() { + // given + let keypair = Random.generate().unwrap(); + let client = TestBlockChainClient::default(); + let mut local_accounts = ::std::collections::HashSet::new(); + local_accounts.insert(keypair.address()); + + let miner = Miner::new( + MinerOptions { + tx_queue_no_unfamiliar_locals: true, + ..miner().options + }, + GasPricer::new_fixed(0u64.into()), + &Spec::new_test(), + local_accounts, + ); + let transaction = transaction(); + let best_block = 0; + // when + // This transaction should not be marked as local because our account_provider doesn't have the sender + let res = miner.import_claimed_local_transaction( + &client, + PendingTransaction::new(transaction.clone(), None), + false, + ); + + // then + // Check the same conditions as `should_import_external_transaction` first. Behaviour should be identical. + // That is: it's treated as though we added it through `import_external_transactions` + assert_eq!(res.unwrap(), ()); + assert_eq!(miner.pending_transactions(best_block), None); + assert_eq!(miner.pending_receipts(best_block), None); + assert_eq!( + miner + .ready_transactions(&client, 10, PendingOrdering::Priority) + .len(), + 0 + ); + assert_eq!( + miner.prepare_pending_block(&client), + BlockPreparationStatus::Succeeded + ); + assert_eq!( + miner + .ready_transactions(&client, 10, PendingOrdering::Priority) + .len(), + 1 + ); + + // when - 2nd part: create a local transaction from account_provider. + // Borrow the transaction used before & sign with our generated keypair. + let local_transaction = transaction + .deconstruct() + .0 + .as_unsigned() + .clone() + .sign(keypair.secret(), Some(TEST_CHAIN_ID)); + let res2 = miner.import_claimed_local_transaction( + &client, + PendingTransaction::new(local_transaction, None), + false, + ); + + // then - 2nd part: we add on the results from the last pending block. + // This is borrowed from `should_make_pending_block_when_importing_own_transaction` and slightly modified. + assert_eq!(res2.unwrap(), ()); + assert_eq!(miner.pending_transactions(best_block).unwrap().len(), 2); + assert_eq!(miner.pending_receipts(best_block).unwrap().len(), 2); + assert_eq!( + miner + .ready_transactions(&client, 10, PendingOrdering::Priority) + .len(), + 2 + ); + assert_eq!( + miner.prepare_pending_block(&client), + BlockPreparationStatus::NotPrepared + ); + } + + #[test] + fn should_reject_local_transaction_with_invalid_chain_id() { + let spec = Spec::new_test(); + let miner = Miner::new_for_tests(&spec, None); + let client = TestBlockChainClient::default(); + let chain_id = spec.chain_id(); + + // chain_id + 100500 is invalid + let import = miner.import_claimed_local_transaction( + &client, + PendingTransaction::new(transaction_with_chain_id(chain_id + 10500), None), + false, + ); + assert_eq!(import, Err(transaction::Error::InvalidChainId)); + + // chain_id is valid + let import = miner.import_claimed_local_transaction( + &client, + PendingTransaction::new(transaction_with_chain_id(chain_id), None), + false, + ); + assert_eq!(import, Ok(())); + } + + #[test] + fn should_prioritize_locals() { + let client = TestBlockChainClient::default(); + let transaction = transaction(); + let miner = Miner::new( + MinerOptions { + tx_queue_no_unfamiliar_locals: true, // should work even with this enabled + ..miner().options + }, + GasPricer::new_fixed(0u64.into()), + &Spec::new_test(), + HashSet::from_iter(vec![transaction.sender()].into_iter()), + ); + let best_block = 0; + + // Miner with sender as a known local address should prioritize transactions from that address + let res2 = miner.import_claimed_local_transaction( + &client, + PendingTransaction::new(transaction, None), + false, + ); + + // check to make sure the prioritized transaction is pending + assert_eq!(res2.unwrap(), ()); + assert_eq!(miner.pending_transactions(best_block).unwrap().len(), 1); + assert_eq!(miner.pending_receipts(best_block).unwrap().len(), 1); + assert_eq!( + miner + .ready_transactions(&client, 10, PendingOrdering::Priority) + .len(), + 1 + ); + assert_eq!( + miner.prepare_pending_block(&client), + BlockPreparationStatus::NotPrepared + ); + } + + #[test] + fn should_not_seal_unless_enabled() { + let miner = miner(); + let client = TestBlockChainClient::default(); + // By default resealing is not required. + assert!(!miner.requires_reseal(1u8.into())); + + miner + .import_external_transactions(&client, vec![transaction().into()]) + .pop() + .unwrap() + .unwrap(); + assert_eq!( + miner.prepare_pending_block(&client), + BlockPreparationStatus::Succeeded + ); + // Unless asked to prepare work. + assert!(miner.requires_reseal(1u8.into())); + } + + #[test] + fn internal_seals_without_work() { + let spec = Spec::new_instant(); + let miner = Miner::new_for_tests(&spec, None); + + let client = generate_dummy_client(2); + + let import = miner + .import_external_transactions( + &*client, + vec![transaction_with_chain_id(spec.chain_id()).into()], + ) + .pop() + .unwrap(); + assert_eq!(import.unwrap(), ()); + + miner.update_sealing(&*client, ForceUpdateSealing::No); + client.flush_queue(); + assert!(miner.pending_block(0).is_none()); + assert_eq!(client.chain_info().best_block_number, 3 as BlockNumber); + + assert!(miner + .import_own_transaction( + &*client, + PendingTransaction::new(transaction_with_chain_id(spec.chain_id()).into(), None) + ) + .is_ok()); + + miner.update_sealing(&*client, ForceUpdateSealing::No); + client.flush_queue(); + assert!(miner.pending_block(0).is_none()); + assert_eq!(client.chain_info().best_block_number, 4 as BlockNumber); + } + + #[test] + fn should_not_fail_setting_engine_signer_without_account_provider() { + let spec = Spec::new_test_round; + let tap = Arc::new(AccountProvider::transient_provider()); + let addr = tap.insert_account(keccak("1").into(), &"".into()).unwrap(); + let client = generate_dummy_client_with_spec(spec); + let engine_signer = Box::new((tap.clone(), addr, "".into())); + let msg = Default::default(); + assert!(client.engine().sign(msg).is_err()); + + // should set engine signer and miner author + client.miner().set_author(Author::Sealer(engine_signer)); + assert_eq!(client.miner().authoring_params().author, addr); + assert!(client.engine().sign(msg).is_ok()); + } + + #[test] + fn should_mine_if_internal_sealing_is_enabled() { + let spec = Spec::new_instant(); + let miner = Miner::new_for_tests(&spec, None); + + let client = generate_dummy_client(2); + miner.update_sealing(&*client, ForceUpdateSealing::No); + + assert!(miner.is_currently_sealing()); + } + + #[test] + fn should_not_mine_if_internal_sealing_is_disabled() { + let spec = Spec::new_test_round(); + let miner = Miner::new_for_tests(&spec, None); + + let client = generate_dummy_client(2); + miner.update_sealing(&*client, ForceUpdateSealing::No); + + assert!(!miner.is_currently_sealing()); + } + + #[test] + fn should_not_mine_if_no_fetch_work_request() { + let spec = Spec::new_test(); + let miner = Miner::new_for_tests(&spec, None); + + let client = generate_dummy_client(2); + miner.update_sealing(&*client, ForceUpdateSealing::No); + + assert!(!miner.is_currently_sealing()); + } + + #[cfg(feature = "work-notify")] + #[test] + fn should_mine_if_fetch_work_request() { + struct DummyNotifyWork; + + impl NotifyWork for DummyNotifyWork { + fn notify(&self, _pow_hash: H256, _difficulty: U256, _number: u64) {} + } + + let spec = Spec::new_test(); + let miner = Miner::new_for_tests(&spec, None); + miner.add_work_listener(Box::new(DummyNotifyWork)); + + let client = generate_dummy_client(2); + miner.update_sealing(&*client, ForceUpdateSealing::No); + + assert!(miner.is_currently_sealing()); + } + + #[test] + fn should_set_new_minimum_gas_price() { + // Creates a new GasPricer::Fixed behind the scenes + let miner = Miner::new_for_tests(&Spec::new_test(), None); + + let expected_minimum_gas_price: U256 = 0x1337.into(); + miner + .set_minimal_gas_price(expected_minimum_gas_price) + .unwrap(); + + let txq_options = miner.transaction_queue.status().options; + let current_minimum_gas_price = txq_options.minimal_gas_price; + + assert!(current_minimum_gas_price == expected_minimum_gas_price); + } + + #[cfg(feature = "price-info")] + fn dynamic_gas_pricer() -> GasPricer { + use ethcore_miner::gas_price_calibrator::{GasPriceCalibrator, GasPriceCalibratorOptions}; + use fetch::Client as FetchClient; + use parity_runtime::Executor; + use std::time::Duration; + + // Don't really care about any of these settings since + // the gas pricer is never actually going to be used + let fetch = FetchClient::new(1).unwrap(); + let p = Executor::new_sync(); + + GasPricer::new_calibrated(GasPriceCalibrator::new( + GasPriceCalibratorOptions { + usd_per_tx: 0.0, + recalibration_period: Duration::from_secs(0), + }, + fetch, + p, + "fake_endpoint".to_owned(), + )) + } + + #[test] + #[cfg(feature = "price-info")] + fn should_fail_to_set_new_minimum_gas_price() { + // We get a fixed gas pricer by default, need to change that + let miner = Miner::new_for_tests(&Spec::new_test(), None); + let calibrated_gas_pricer = dynamic_gas_pricer(); + *miner.gas_pricer.lock() = calibrated_gas_pricer; + + let expected_minimum_gas_price: U256 = 0x1337.into(); + let result = miner.set_minimal_gas_price(expected_minimum_gas_price); + assert!(result.is_err()); + + let received_error_msg = result.unwrap_err(); + let expected_error_msg = + "Can't update fixed gas price while automatic gas calibration is enabled."; + + assert!(received_error_msg == expected_error_msg); + } } diff --git a/ethcore/src/miner/mod.rs b/ethcore/src/miner/mod.rs index 12658192a..c993b6b1a 100644 --- a/ethcore/src/miner/mod.rs +++ b/ethcore/src/miner/mod.rs @@ -25,28 +25,30 @@ pub mod pool_client; #[cfg(feature = "stratum")] pub mod stratum; -pub use self::miner::{Miner, MinerOptions, Penalization, PendingSet, AuthoringParams, Author}; -pub use ethcore_miner::local_accounts::LocalAccounts; -pub use ethcore_miner::pool::PendingOrdering; +pub use self::miner::{Author, AuthoringParams, Miner, MinerOptions, Penalization, PendingSet}; +pub use ethcore_miner::{local_accounts::LocalAccounts, pool::PendingOrdering}; -use std::sync::Arc; -use std::collections::{BTreeSet, BTreeMap}; +use std::{ + collections::{BTreeMap, BTreeSet}, + sync::Arc, +}; use bytes::Bytes; -use ethcore_miner::pool::{VerifiedTransaction, QueueStatus, local_transactions}; -use ethereum_types::{H256, U256, Address}; -use types::transaction::{self, UnverifiedTransaction, SignedTransaction, PendingTransaction}; -use types::BlockNumber; -use types::block::Block; -use types::header::Header; -use types::receipt::RichReceipt; +use ethcore_miner::pool::{local_transactions, QueueStatus, VerifiedTransaction}; +use ethereum_types::{Address, H256, U256}; +use types::{ + block::Block, + header::Header, + receipt::RichReceipt, + transaction::{self, PendingTransaction, SignedTransaction, UnverifiedTransaction}, + BlockNumber, +}; use block::SealedBlock; use call_contract::{CallContract, RegistryInfo}; use client::{ - ScheduleInfo, - BlockChain, BlockProducer, SealedBlockImporter, ChainInfo, - AccountData, Nonce, traits::ForceUpdateSealing + traits::ForceUpdateSealing, AccountData, BlockChain, BlockProducer, ChainInfo, Nonce, + ScheduleInfo, SealedBlockImporter, }; use error::Error; use state::StateInfo; @@ -60,153 +62,190 @@ pub trait TransactionVerifierClient: Send + Sync {} /// Extended client interface used for mining -pub trait BlockChainClient: TransactionVerifierClient + BlockProducer + SealedBlockImporter {} +pub trait BlockChainClient: + TransactionVerifierClient + BlockProducer + SealedBlockImporter +{ +} /// Miner client API -pub trait MinerService : Send + Sync { - /// Type representing chain state - type State: StateInfo + 'static; +pub trait MinerService: Send + Sync { + /// Type representing chain state + type State: StateInfo + 'static; - // Sealing + // Sealing - /// Submit `seal` as a valid solution for the header of `pow_hash`. - /// Will check the seal, but not actually insert the block into the chain. - fn submit_seal(&self, pow_hash: H256, seal: Vec) -> Result; + /// Submit `seal` as a valid solution for the header of `pow_hash`. + /// Will check the seal, but not actually insert the block into the chain. + fn submit_seal(&self, pow_hash: H256, seal: Vec) -> Result; - /// Is it currently sealing? - fn is_currently_sealing(&self) -> bool; + /// Is it currently sealing? + fn is_currently_sealing(&self) -> bool; - /// Get the sealing work package preparing it if doesn't exist yet. - /// - /// Returns `None` if engine seals internally. - fn work_package(&self, chain: &C) -> Option<(H256, BlockNumber, u64, U256)> - where C: BlockChain + CallContract + BlockProducer + SealedBlockImporter + Nonce + Sync; + /// Get the sealing work package preparing it if doesn't exist yet. + /// + /// Returns `None` if engine seals internally. + fn work_package(&self, chain: &C) -> Option<(H256, BlockNumber, u64, U256)> + where + C: BlockChain + CallContract + BlockProducer + SealedBlockImporter + Nonce + Sync; - /// Update current pending block - fn update_sealing(&self, chain: &C, force: ForceUpdateSealing) - where C: BlockChain + CallContract + BlockProducer + SealedBlockImporter + Nonce + Sync; + /// Update current pending block + fn update_sealing(&self, chain: &C, force: ForceUpdateSealing) + where + C: BlockChain + CallContract + BlockProducer + SealedBlockImporter + Nonce + Sync; - // Notifications + // Notifications - /// Called when blocks are imported to chain, updates transactions queue. - /// `is_internal_import` indicates that the block has just been created in miner and internally sealed by the engine, - /// so we shouldn't attempt creating new block again. - fn chain_new_blocks(&self, chain: &C, imported: &[H256], invalid: &[H256], enacted: &[H256], retracted: &[H256], is_internal_import: bool) - where C: BlockChainClient; + /// Called when blocks are imported to chain, updates transactions queue. + /// `is_internal_import` indicates that the block has just been created in miner and internally sealed by the engine, + /// so we shouldn't attempt creating new block again. + fn chain_new_blocks( + &self, + chain: &C, + imported: &[H256], + invalid: &[H256], + enacted: &[H256], + retracted: &[H256], + is_internal_import: bool, + ) where + C: BlockChainClient; - // Pending block + // Pending block - /// Get a list of all pending receipts from pending block. - fn pending_receipts(&self, best_block: BlockNumber) -> Option>; + /// Get a list of all pending receipts from pending block. + fn pending_receipts(&self, best_block: BlockNumber) -> Option>; - /// Get a particular receipt from pending block. - fn pending_receipt(&self, best_block: BlockNumber, hash: &H256) -> Option { - let receipts = self.pending_receipts(best_block)?; - receipts.into_iter().find(|r| &r.transaction_hash == hash) - } + /// Get a particular receipt from pending block. + fn pending_receipt(&self, best_block: BlockNumber, hash: &H256) -> Option { + let receipts = self.pending_receipts(best_block)?; + receipts.into_iter().find(|r| &r.transaction_hash == hash) + } - /// Get `Some` `clone()` of the current pending block's state or `None` if we're not sealing. - fn pending_state(&self, latest_block_number: BlockNumber) -> Option; + /// Get `Some` `clone()` of the current pending block's state or `None` if we're not sealing. + fn pending_state(&self, latest_block_number: BlockNumber) -> Option; - /// Get `Some` `clone()` of the current pending block header or `None` if we're not sealing. - fn pending_block_header(&self, latest_block_number: BlockNumber) -> Option
; + /// Get `Some` `clone()` of the current pending block header or `None` if we're not sealing. + fn pending_block_header(&self, latest_block_number: BlockNumber) -> Option
; - /// Get `Some` `clone()` of the current pending block or `None` if we're not sealing. - fn pending_block(&self, latest_block_number: BlockNumber) -> Option; + /// Get `Some` `clone()` of the current pending block or `None` if we're not sealing. + fn pending_block(&self, latest_block_number: BlockNumber) -> Option; - /// Get `Some` `clone()` of the current pending block transactions or `None` if we're not sealing. - fn pending_transactions(&self, latest_block_number: BlockNumber) -> Option>; + /// Get `Some` `clone()` of the current pending block transactions or `None` if we're not sealing. + fn pending_transactions( + &self, + latest_block_number: BlockNumber, + ) -> Option>; - // Block authoring + // Block authoring - /// Get current authoring parameters. - fn authoring_params(&self) -> AuthoringParams; + /// Get current authoring parameters. + fn authoring_params(&self) -> AuthoringParams; - /// Set the lower and upper bound of gas limit we wish to target when sealing a new block. - fn set_gas_range_target(&self, gas_range_target: (U256, U256)); + /// Set the lower and upper bound of gas limit we wish to target when sealing a new block. + fn set_gas_range_target(&self, gas_range_target: (U256, U256)); - /// Set the extra_data that we will seal blocks with. - fn set_extra_data(&self, extra_data: Bytes); + /// Set the extra_data that we will seal blocks with. + fn set_extra_data(&self, extra_data: Bytes); - /// Set info necessary to sign consensus messages and block authoring. - /// - /// On chains where sealing is done externally (e.g. PoW) we provide only reward beneficiary. - fn set_author(&self, author: Author); + /// Set info necessary to sign consensus messages and block authoring. + /// + /// On chains where sealing is done externally (e.g. PoW) we provide only reward beneficiary. + fn set_author(&self, author: Author); - // Transaction Pool + // Transaction Pool - /// Imports transactions to transaction queue. - fn import_external_transactions(&self, client: &C, transactions: Vec) - -> Vec> - where C: BlockChainClient; + /// Imports transactions to transaction queue. + fn import_external_transactions( + &self, + client: &C, + transactions: Vec, + ) -> Vec> + where + C: BlockChainClient; - /// Imports own (node owner) transaction to queue. - fn import_own_transaction(&self, chain: &C, transaction: PendingTransaction) - -> Result<(), transaction::Error> - where C: BlockChainClient; + /// Imports own (node owner) transaction to queue. + fn import_own_transaction( + &self, + chain: &C, + transaction: PendingTransaction, + ) -> Result<(), transaction::Error> + where + C: BlockChainClient; - /// Imports transactions from potentially external sources, with behaviour determined - /// by the config flag `tx_queue_allow_unfamiliar_locals` - fn import_claimed_local_transaction(&self, chain: &C, transaction: PendingTransaction, trusted: bool) - -> Result<(), transaction::Error> - where C: BlockChainClient; + /// Imports transactions from potentially external sources, with behaviour determined + /// by the config flag `tx_queue_allow_unfamiliar_locals` + fn import_claimed_local_transaction( + &self, + chain: &C, + transaction: PendingTransaction, + trusted: bool, + ) -> Result<(), transaction::Error> + where + C: BlockChainClient; - /// Removes transaction from the pool. - /// - /// Attempts to "cancel" a transaction. If it was not propagated yet (or not accepted by other peers) - /// there is a good chance that the transaction will actually be removed. - /// NOTE: The transaction is not removed from pending block if there is one. - fn remove_transaction(&self, hash: &H256) -> Option>; + /// Removes transaction from the pool. + /// + /// Attempts to "cancel" a transaction. If it was not propagated yet (or not accepted by other peers) + /// there is a good chance that the transaction will actually be removed. + /// NOTE: The transaction is not removed from pending block if there is one. + fn remove_transaction(&self, hash: &H256) -> Option>; - /// Query transaction from the pool given it's hash. - fn transaction(&self, hash: &H256) -> Option>; + /// Query transaction from the pool given it's hash. + fn transaction(&self, hash: &H256) -> Option>; - /// Returns next valid nonce for given address. - /// - /// This includes nonces of all transactions from this address in the pending queue - /// if they are consecutive. - /// NOTE: pool may contain some future transactions that will become pending after - /// transaction with nonce returned from this function is signed on. - fn next_nonce(&self, chain: &C, address: &Address) -> U256 - where C: Nonce + Sync; + /// Returns next valid nonce for given address. + /// + /// This includes nonces of all transactions from this address in the pending queue + /// if they are consecutive. + /// NOTE: pool may contain some future transactions that will become pending after + /// transaction with nonce returned from this function is signed on. + fn next_nonce(&self, chain: &C, address: &Address) -> U256 + where + C: Nonce + Sync; - /// Get a set of all pending transaction hashes. - /// - /// Depending on the settings may look in transaction pool or only in pending block. - fn pending_transaction_hashes(&self, chain: &C) -> BTreeSet where - C: ChainInfo + Sync; + /// Get a set of all pending transaction hashes. + /// + /// Depending on the settings may look in transaction pool or only in pending block. + fn pending_transaction_hashes(&self, chain: &C) -> BTreeSet + where + C: ChainInfo + Sync; - /// Get a list of all ready transactions either ordered by priority or unordered (cheaper). - /// - /// Depending on the settings may look in transaction pool or only in pending block. - /// If you don't need a full set of transactions, you can add `max_len` and create only a limited set of - /// transactions. - fn ready_transactions(&self, chain: &C, max_len: usize, ordering: PendingOrdering) -> Vec> - where C: ChainInfo + Nonce + Sync; + /// Get a list of all ready transactions either ordered by priority or unordered (cheaper). + /// + /// Depending on the settings may look in transaction pool or only in pending block. + /// If you don't need a full set of transactions, you can add `max_len` and create only a limited set of + /// transactions. + fn ready_transactions( + &self, + chain: &C, + max_len: usize, + ordering: PendingOrdering, + ) -> Vec> + where + C: ChainInfo + Nonce + Sync; - /// Get a list of all transactions in the pool (some of them might not be ready for inclusion yet). - fn queued_transactions(&self) -> Vec>; + /// Get a list of all transactions in the pool (some of them might not be ready for inclusion yet). + fn queued_transactions(&self) -> Vec>; - /// Get a list of all transaction hashes in the pool (some of them might not be ready for inclusion yet). - fn queued_transaction_hashes(&self) -> Vec; + /// Get a list of all transaction hashes in the pool (some of them might not be ready for inclusion yet). + fn queued_transaction_hashes(&self) -> Vec; - /// Get a list of local transactions with statuses. - fn local_transactions(&self) -> BTreeMap; + /// Get a list of local transactions with statuses. + fn local_transactions(&self) -> BTreeMap; - /// Get current queue status. - /// - /// Status includes verification thresholds and current pool utilization and limits. - fn queue_status(&self) -> QueueStatus; + /// Get current queue status. + /// + /// Status includes verification thresholds and current pool utilization and limits. + fn queue_status(&self) -> QueueStatus; - // Misc + // Misc - /// Suggested gas price. - fn sensible_gas_price(&self) -> U256; + /// Suggested gas price. + fn sensible_gas_price(&self) -> U256; - /// Suggested gas limit. - fn sensible_gas_limit(&self) -> U256; + /// Suggested gas limit. + fn sensible_gas_limit(&self) -> U256; - /// Set a new minimum gas limit. - /// Will not work if dynamic gas calibration is set. - fn set_minimal_gas_price(&self, gas_price: U256) -> Result; + /// Set a new minimum gas limit. + /// Will not work if dynamic gas calibration is set. + fn set_minimal_gas_price(&self, gas_price: U256) -> Result; } diff --git a/ethcore/src/miner/pool_client.rs b/ethcore/src/miner/pool_client.rs index e6ff3a329..5946e2404 100644 --- a/ethcore/src/miner/pool_client.rs +++ b/ethcore/src/miner/pool_client.rs @@ -16,27 +16,21 @@ //! Blockchain access for transaction pool. -use std::{ - collections::HashMap, - fmt, - sync::Arc, -}; +use std::{collections::HashMap, fmt, sync::Arc}; -use ethereum_types::{H256, U256, Address}; -use ethcore_miner::local_accounts::LocalAccounts; -use ethcore_miner::pool; -use ethcore_miner::pool::client::NonceClient; -use ethcore_miner::service_transaction_checker::ServiceTransactionChecker; -use types::transaction::{ - self, - UnverifiedTransaction, - SignedTransaction, +use ethcore_miner::{ + local_accounts::LocalAccounts, pool, pool::client::NonceClient, + service_transaction_checker::ServiceTransactionChecker, }; -use types::header::Header; +use ethereum_types::{Address, H256, U256}; use parking_lot::RwLock; +use types::{ + header::Header, + transaction::{self, SignedTransaction, UnverifiedTransaction}, +}; use call_contract::CallContract; -use client::{TransactionId, BlockInfo, Nonce}; +use client::{BlockInfo, Nonce, TransactionId}; use engines::EthEngine; use miner; use transaction_ext::Transaction; @@ -44,209 +38,233 @@ use transaction_ext::Transaction; /// Cache for state nonces. #[derive(Debug, Clone)] pub struct NonceCache { - nonces: Arc>>, - limit: usize + nonces: Arc>>, + limit: usize, } impl NonceCache { - /// Create new cache with a limit of `limit` entries. - pub fn new(limit: usize) -> Self { - NonceCache { - nonces: Arc::new(RwLock::new(HashMap::with_capacity(limit / 2))), - limit, - } - } + /// Create new cache with a limit of `limit` entries. + pub fn new(limit: usize) -> Self { + NonceCache { + nonces: Arc::new(RwLock::new(HashMap::with_capacity(limit / 2))), + limit, + } + } - /// Retrieve a cached nonce for given sender. - pub fn get(&self, sender: &Address) -> Option { - self.nonces.read().get(sender).cloned() - } + /// Retrieve a cached nonce for given sender. + pub fn get(&self, sender: &Address) -> Option { + self.nonces.read().get(sender).cloned() + } - /// Clear all entries from the cache. - pub fn clear(&self) { - self.nonces.write().clear(); - } + /// Clear all entries from the cache. + pub fn clear(&self) { + self.nonces.write().clear(); + } } /// Blockchain accesss for transaction pool. pub struct PoolClient<'a, C: 'a> { - chain: &'a C, - cached_nonces: CachedNonceClient<'a, C>, - engine: &'a EthEngine, - accounts: &'a LocalAccounts, - best_block_header: Header, - service_transaction_checker: Option<&'a ServiceTransactionChecker>, + chain: &'a C, + cached_nonces: CachedNonceClient<'a, C>, + engine: &'a EthEngine, + accounts: &'a LocalAccounts, + best_block_header: Header, + service_transaction_checker: Option<&'a ServiceTransactionChecker>, } impl<'a, C: 'a> Clone for PoolClient<'a, C> { - fn clone(&self) -> Self { - PoolClient { - chain: self.chain, - cached_nonces: self.cached_nonces.clone(), - engine: self.engine, - accounts: self.accounts.clone(), - best_block_header: self.best_block_header.clone(), - service_transaction_checker: self.service_transaction_checker.clone(), - } - } + fn clone(&self) -> Self { + PoolClient { + chain: self.chain, + cached_nonces: self.cached_nonces.clone(), + engine: self.engine, + accounts: self.accounts.clone(), + best_block_header: self.best_block_header.clone(), + service_transaction_checker: self.service_transaction_checker.clone(), + } + } } -impl<'a, C: 'a> PoolClient<'a, C> where - C: BlockInfo + CallContract, +impl<'a, C: 'a> PoolClient<'a, C> +where + C: BlockInfo + CallContract, { - /// Creates new client given chain, nonce cache, accounts and service transaction verifier. - pub fn new( - chain: &'a C, - cache: &'a NonceCache, - engine: &'a EthEngine, - accounts: &'a LocalAccounts, - service_transaction_checker: Option<&'a ServiceTransactionChecker>, - ) -> Self { - let best_block_header = chain.best_block_header(); - PoolClient { - chain, - cached_nonces: CachedNonceClient::new(chain, cache), - engine, - accounts, - best_block_header, - service_transaction_checker, - } - } + /// Creates new client given chain, nonce cache, accounts and service transaction verifier. + pub fn new( + chain: &'a C, + cache: &'a NonceCache, + engine: &'a EthEngine, + accounts: &'a LocalAccounts, + service_transaction_checker: Option<&'a ServiceTransactionChecker>, + ) -> Self { + let best_block_header = chain.best_block_header(); + PoolClient { + chain, + cached_nonces: CachedNonceClient::new(chain, cache), + engine, + accounts, + best_block_header, + service_transaction_checker, + } + } - /// Verifies transaction against its block (before its import into this block) - /// Also Verifies if signed transaction is executable. - /// - /// This should perform any verifications that rely on chain status. - pub fn verify_for_pending_block(&self, tx: &SignedTransaction, header: &Header) -> Result<(), transaction::Error> { - self.engine.machine().verify_transaction_basic(tx, header)?; - self.engine.machine().verify_transaction(tx, &self.best_block_header, self.chain) - } + /// Verifies transaction against its block (before its import into this block) + /// Also Verifies if signed transaction is executable. + /// + /// This should perform any verifications that rely on chain status. + pub fn verify_for_pending_block( + &self, + tx: &SignedTransaction, + header: &Header, + ) -> Result<(), transaction::Error> { + self.engine.machine().verify_transaction_basic(tx, header)?; + self.engine + .machine() + .verify_transaction(tx, &self.best_block_header, self.chain) + } } impl<'a, C: 'a> fmt::Debug for PoolClient<'a, C> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "PoolClient") - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "PoolClient") + } } -impl<'a, C: 'a> pool::client::Client for PoolClient<'a, C> where - C: miner::TransactionVerifierClient + Sync, +impl<'a, C: 'a> pool::client::Client for PoolClient<'a, C> +where + C: miner::TransactionVerifierClient + Sync, { - fn transaction_already_included(&self, hash: &H256) -> bool { - self.chain.transaction_block(TransactionId::Hash(*hash)).is_some() - } + fn transaction_already_included(&self, hash: &H256) -> bool { + self.chain + .transaction_block(TransactionId::Hash(*hash)) + .is_some() + } - fn verify_transaction_basic(&self, tx: &UnverifiedTransaction) -> Result<(), transaction::Error> { - self.engine.verify_transaction_basic(tx, &self.best_block_header)?; - Ok(()) - } + fn verify_transaction_basic( + &self, + tx: &UnverifiedTransaction, + ) -> Result<(), transaction::Error> { + self.engine + .verify_transaction_basic(tx, &self.best_block_header)?; + Ok(()) + } - fn verify_transaction(&self, tx: UnverifiedTransaction) -> Result { - self.engine.verify_transaction_basic(&tx, &self.best_block_header)?; - let tx = self.engine.verify_transaction_unordered(tx, &self.best_block_header)?; + fn verify_transaction( + &self, + tx: UnverifiedTransaction, + ) -> Result { + self.engine + .verify_transaction_basic(&tx, &self.best_block_header)?; + let tx = self + .engine + .verify_transaction_unordered(tx, &self.best_block_header)?; - self.engine.machine().verify_transaction(&tx, &self.best_block_header, self.chain)?; - Ok(tx) - } + self.engine + .machine() + .verify_transaction(&tx, &self.best_block_header, self.chain)?; + Ok(tx) + } - fn account_details(&self, address: &Address) -> pool::client::AccountDetails { - pool::client::AccountDetails { - nonce: self.cached_nonces.account_nonce(address), - balance: self.chain.latest_balance(address), - is_local: self.accounts.is_local(address), - } - } + fn account_details(&self, address: &Address) -> pool::client::AccountDetails { + pool::client::AccountDetails { + nonce: self.cached_nonces.account_nonce(address), + balance: self.chain.latest_balance(address), + is_local: self.accounts.is_local(address), + } + } - fn required_gas(&self, tx: &transaction::Transaction) -> U256 { - tx.gas_required(&self.chain.latest_schedule()).into() - } + fn required_gas(&self, tx: &transaction::Transaction) -> U256 { + tx.gas_required(&self.chain.latest_schedule()).into() + } - fn transaction_type(&self, tx: &SignedTransaction) -> pool::client::TransactionType { - match self.service_transaction_checker { - None => pool::client::TransactionType::Regular, - Some(ref checker) => match checker.check(self.chain, &tx) { - Ok(true) => pool::client::TransactionType::Service, - Ok(false) => pool::client::TransactionType::Regular, - Err(e) => { - debug!(target: "txqueue", "Unable to verify service transaction: {:?}", e); - pool::client::TransactionType::Regular - }, - } - } - } + fn transaction_type(&self, tx: &SignedTransaction) -> pool::client::TransactionType { + match self.service_transaction_checker { + None => pool::client::TransactionType::Regular, + Some(ref checker) => match checker.check(self.chain, &tx) { + Ok(true) => pool::client::TransactionType::Service, + Ok(false) => pool::client::TransactionType::Regular, + Err(e) => { + debug!(target: "txqueue", "Unable to verify service transaction: {:?}", e); + pool::client::TransactionType::Regular + } + }, + } + } - fn decode_transaction(&self, transaction: &[u8]) -> Result { - self.engine.decode_transaction(transaction) - } + fn decode_transaction( + &self, + transaction: &[u8], + ) -> Result { + self.engine.decode_transaction(transaction) + } } -impl<'a, C: 'a> NonceClient for PoolClient<'a, C> where - C: Nonce + Sync, +impl<'a, C: 'a> NonceClient for PoolClient<'a, C> +where + C: Nonce + Sync, { - fn account_nonce(&self, address: &Address) -> U256 { - self.cached_nonces.account_nonce(address) - } + fn account_nonce(&self, address: &Address) -> U256 { + self.cached_nonces.account_nonce(address) + } } pub(crate) struct CachedNonceClient<'a, C: 'a> { - client: &'a C, - cache: &'a NonceCache, + client: &'a C, + cache: &'a NonceCache, } impl<'a, C: 'a> Clone for CachedNonceClient<'a, C> { - fn clone(&self) -> Self { - CachedNonceClient { - client: self.client, - cache: self.cache, - } - } + fn clone(&self) -> Self { + CachedNonceClient { + client: self.client, + cache: self.cache, + } + } } impl<'a, C: 'a> fmt::Debug for CachedNonceClient<'a, C> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("CachedNonceClient") - .field("cache", &self.cache.nonces.read().len()) - .field("limit", &self.cache.limit) - .finish() - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("CachedNonceClient") + .field("cache", &self.cache.nonces.read().len()) + .field("limit", &self.cache.limit) + .finish() + } } impl<'a, C: 'a> CachedNonceClient<'a, C> { - pub fn new(client: &'a C, cache: &'a NonceCache) -> Self { - CachedNonceClient { - client, - cache, - } - } + pub fn new(client: &'a C, cache: &'a NonceCache) -> Self { + CachedNonceClient { client, cache } + } } -impl<'a, C: 'a> NonceClient for CachedNonceClient<'a, C> where - C: Nonce + Sync, +impl<'a, C: 'a> NonceClient for CachedNonceClient<'a, C> +where + C: Nonce + Sync, { - fn account_nonce(&self, address: &Address) -> U256 { - if let Some(nonce) = self.cache.nonces.read().get(address) { - return *nonce; - } + fn account_nonce(&self, address: &Address) -> U256 { + if let Some(nonce) = self.cache.nonces.read().get(address) { + return *nonce; + } - // We don't check again if cache has been populated. - // It's not THAT expensive to fetch the nonce from state. - let mut cache = self.cache.nonces.write(); - let nonce = self.client.latest_nonce(address); - cache.insert(*address, nonce); + // We don't check again if cache has been populated. + // It's not THAT expensive to fetch the nonce from state. + let mut cache = self.cache.nonces.write(); + let nonce = self.client.latest_nonce(address); + cache.insert(*address, nonce); - if cache.len() < self.cache.limit { - return nonce - } + if cache.len() < self.cache.limit { + return nonce; + } - debug!(target: "txpool", "NonceCache: reached limit."); - trace_time!("nonce_cache:clear"); + debug!(target: "txpool", "NonceCache: reached limit."); + trace_time!("nonce_cache:clear"); - // Remove excessive amount of entries from the cache - let to_remove: Vec<_> = cache.keys().take(self.cache.limit / 2).cloned().collect(); - for x in to_remove { - cache.remove(&x); - } + // Remove excessive amount of entries from the cache + let to_remove: Vec<_> = cache.keys().take(self.cache.limit / 2).cloned().collect(); + for x in to_remove { + cache.remove(&x); + } - nonce - } + nonce + } } diff --git a/ethcore/src/miner/stratum.rs b/ethcore/src/miner/stratum.rs index 35a35fbb2..4dfaa2151 100644 --- a/ethcore/src/miner/stratum.rs +++ b/ethcore/src/miner/stratum.rs @@ -16,20 +16,20 @@ //! Client-side stratum job dispatcher and mining notifier handler -use std::sync::{Arc, Weak}; -use std::net::{SocketAddr, AddrParseError}; -use std::fmt; +use std::{ + fmt, + net::{AddrParseError, SocketAddr}, + sync::{Arc, Weak}, +}; use client::{Client, ImportSealedBlock}; -use ethereum_types::{H64, H256, clean_0x, U256}; use ethash::{self, SeedHashCompute}; #[cfg(feature = "work-notify")] use ethcore_miner::work_notify::NotifyWork; #[cfg(feature = "work-notify")] use ethcore_stratum::PushWorkHandler; -use ethcore_stratum::{ - JobDispatcher, Stratum as StratumService, Error as StratumServiceError, -}; +use ethcore_stratum::{Error as StratumServiceError, JobDispatcher, Stratum as StratumService}; +use ethereum_types::{clean_0x, H256, H64, U256}; use miner::{Miner, MinerService}; use parking_lot::Mutex; use rlp::encode; @@ -37,212 +37,233 @@ use rlp::encode; /// Configures stratum server options. #[derive(Debug, PartialEq, Clone)] pub struct Options { - /// Working directory - pub io_path: String, - /// Network address - pub listen_addr: String, - /// Port - pub port: u16, - /// Secret for peers - pub secret: Option, + /// Working directory + pub io_path: String, + /// Network address + pub listen_addr: String, + /// Port + pub port: u16, + /// Secret for peers + pub secret: Option, } struct SubmitPayload { - nonce: H64, - pow_hash: H256, - mix_hash: H256, + nonce: H64, + pow_hash: H256, + mix_hash: H256, } impl SubmitPayload { - fn from_args(payload: Vec) -> Result { - if payload.len() != 3 { - return Err(PayloadError::ArgumentsAmountUnexpected(payload.len())); - } + fn from_args(payload: Vec) -> Result { + if payload.len() != 3 { + return Err(PayloadError::ArgumentsAmountUnexpected(payload.len())); + } - let nonce = match clean_0x(&payload[0]).parse::() { - Ok(nonce) => nonce, - Err(e) => { - warn!(target: "stratum", "submit_work ({}): invalid nonce ({:?})", &payload[0], e); - return Err(PayloadError::InvalidNonce(payload[0].clone())) - } - }; + let nonce = match clean_0x(&payload[0]).parse::() { + Ok(nonce) => nonce, + Err(e) => { + warn!(target: "stratum", "submit_work ({}): invalid nonce ({:?})", &payload[0], e); + return Err(PayloadError::InvalidNonce(payload[0].clone())); + } + }; - let pow_hash = match clean_0x(&payload[1]).parse::() { - Ok(pow_hash) => pow_hash, - Err(e) => { - warn!(target: "stratum", "submit_work ({}): invalid hash ({:?})", &payload[1], e); - return Err(PayloadError::InvalidPowHash(payload[1].clone())); - } - }; + let pow_hash = match clean_0x(&payload[1]).parse::() { + Ok(pow_hash) => pow_hash, + Err(e) => { + warn!(target: "stratum", "submit_work ({}): invalid hash ({:?})", &payload[1], e); + return Err(PayloadError::InvalidPowHash(payload[1].clone())); + } + }; - let mix_hash = match clean_0x(&payload[2]).parse::() { - Ok(mix_hash) => mix_hash, - Err(e) => { - warn!(target: "stratum", "submit_work ({}): invalid mix-hash ({:?})", &payload[2], e); - return Err(PayloadError::InvalidMixHash(payload[2].clone())); - } - }; + let mix_hash = match clean_0x(&payload[2]).parse::() { + Ok(mix_hash) => mix_hash, + Err(e) => { + warn!(target: "stratum", "submit_work ({}): invalid mix-hash ({:?})", &payload[2], e); + return Err(PayloadError::InvalidMixHash(payload[2].clone())); + } + }; - Ok(SubmitPayload { - nonce: nonce, - pow_hash: pow_hash, - mix_hash: mix_hash, - }) - } + Ok(SubmitPayload { + nonce: nonce, + pow_hash: pow_hash, + mix_hash: mix_hash, + }) + } } #[derive(Debug)] enum PayloadError { - ArgumentsAmountUnexpected(usize), - InvalidNonce(String), - InvalidPowHash(String), - InvalidMixHash(String), + ArgumentsAmountUnexpected(usize), + InvalidNonce(String), + InvalidPowHash(String), + InvalidMixHash(String), } impl fmt::Display for PayloadError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(&self, f) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self, f) + } } /// Job dispatcher for stratum service pub struct StratumJobDispatcher { - seed_compute: Mutex, - client: Weak, - miner: Weak, + seed_compute: Mutex, + client: Weak, + miner: Weak, } impl JobDispatcher for StratumJobDispatcher { - fn initial(&self) -> Option { - // initial payload may contain additional data, not in this case - self.job() - } + fn initial(&self) -> Option { + // initial payload may contain additional data, not in this case + self.job() + } - fn job(&self) -> Option { - self.with_core(|client, miner| miner.work_package(&*client).map(|(pow_hash, number, _timestamp, difficulty)| { - self.payload(pow_hash, difficulty, number) - })) - } + fn job(&self) -> Option { + self.with_core(|client, miner| { + miner + .work_package(&*client) + .map(|(pow_hash, number, _timestamp, difficulty)| { + self.payload(pow_hash, difficulty, number) + }) + }) + } - fn submit(&self, payload: Vec) -> Result<(), StratumServiceError> { - let payload = SubmitPayload::from_args(payload).map_err(|e| - StratumServiceError::Dispatch(e.to_string()) - )?; + fn submit(&self, payload: Vec) -> Result<(), StratumServiceError> { + let payload = SubmitPayload::from_args(payload) + .map_err(|e| StratumServiceError::Dispatch(e.to_string()))?; - trace!( - target: "stratum", - "submit_work: Decoded: nonce={}, pow_hash={}, mix_hash={}", - payload.nonce, - payload.pow_hash, - payload.mix_hash, - ); + trace!( + target: "stratum", + "submit_work: Decoded: nonce={}, pow_hash={}, mix_hash={}", + payload.nonce, + payload.pow_hash, + payload.mix_hash, + ); - self.with_core_result(|client, miner| { - let seal = vec![encode(&payload.mix_hash), encode(&payload.nonce)]; + self.with_core_result(|client, miner| { + let seal = vec![encode(&payload.mix_hash), encode(&payload.nonce)]; - let import = miner.submit_seal(payload.pow_hash, seal) - .and_then(|block| client.import_sealed_block(block)); - match import { - Ok(_) => Ok(()), - Err(e) => { - warn!(target: "stratum", "submit_seal error: {:?}", e); - Err(StratumServiceError::Dispatch(e.to_string())) - } - } - }) - } + let import = miner + .submit_seal(payload.pow_hash, seal) + .and_then(|block| client.import_sealed_block(block)); + match import { + Ok(_) => Ok(()), + Err(e) => { + warn!(target: "stratum", "submit_seal error: {:?}", e); + Err(StratumServiceError::Dispatch(e.to_string())) + } + } + }) + } } impl StratumJobDispatcher { - /// New stratum job dispatcher given the miner and client - fn new(miner: Weak, client: Weak) -> StratumJobDispatcher { - StratumJobDispatcher { - seed_compute: Mutex::new(SeedHashCompute::default()), - client: client, - miner: miner, - } - } + /// New stratum job dispatcher given the miner and client + fn new(miner: Weak, client: Weak) -> StratumJobDispatcher { + StratumJobDispatcher { + seed_compute: Mutex::new(SeedHashCompute::default()), + client: client, + miner: miner, + } + } - /// Serializes payload for stratum service - fn payload(&self, pow_hash: H256, difficulty: U256, number: u64) -> String { - // TODO: move this to engine - let target = ethash::difficulty_to_boundary(&difficulty); - let seed_hash = &self.seed_compute.lock().hash_block_number(number); - let seed_hash = H256::from_slice(&seed_hash[..]); - format!( - r#"["0x", "0x{:x}","0x{:x}","0x{:x}","0x{:x}"]"#, - pow_hash, seed_hash, target, number - ) - } + /// Serializes payload for stratum service + fn payload(&self, pow_hash: H256, difficulty: U256, number: u64) -> String { + // TODO: move this to engine + let target = ethash::difficulty_to_boundary(&difficulty); + let seed_hash = &self.seed_compute.lock().hash_block_number(number); + let seed_hash = H256::from_slice(&seed_hash[..]); + format!( + r#"["0x", "0x{:x}","0x{:x}","0x{:x}","0x{:x}"]"#, + pow_hash, seed_hash, target, number + ) + } - fn with_core(&self, f: F) -> Option where F: Fn(Arc, Arc) -> Option { - self.client.upgrade().and_then(|client| self.miner.upgrade().and_then(|miner| (f)(client, miner))) - } + fn with_core(&self, f: F) -> Option + where + F: Fn(Arc, Arc) -> Option, + { + self.client + .upgrade() + .and_then(|client| self.miner.upgrade().and_then(|miner| (f)(client, miner))) + } - fn with_core_result(&self, f: F) -> Result<(), StratumServiceError> where F: Fn(Arc, Arc) -> Result<(), StratumServiceError> { - match (self.client.upgrade(), self.miner.upgrade()) { - (Some(client), Some(miner)) => f(client, miner), - _ => Ok(()), - } - } + fn with_core_result(&self, f: F) -> Result<(), StratumServiceError> + where + F: Fn(Arc, Arc) -> Result<(), StratumServiceError>, + { + match (self.client.upgrade(), self.miner.upgrade()) { + (Some(client), Some(miner)) => f(client, miner), + _ => Ok(()), + } + } } /// Wrapper for dedicated stratum service pub struct Stratum { - dispatcher: Arc, - service: Arc, + dispatcher: Arc, + service: Arc, } #[derive(Debug)] /// Stratum error pub enum Error { - /// IPC sockets error - Service(StratumServiceError), - /// Invalid network address - Address(AddrParseError), + /// IPC sockets error + Service(StratumServiceError), + /// Invalid network address + Address(AddrParseError), } impl From for Error { - fn from(service_err: StratumServiceError) -> Error { Error::Service(service_err) } + fn from(service_err: StratumServiceError) -> Error { + Error::Service(service_err) + } } impl From for Error { - fn from(err: AddrParseError) -> Error { Error::Address(err) } + fn from(err: AddrParseError) -> Error { + Error::Address(err) + } } #[cfg(feature = "work-notify")] impl NotifyWork for Stratum { - fn notify(&self, pow_hash: H256, difficulty: U256, number: u64) { - trace!(target: "stratum", "Notify work"); + fn notify(&self, pow_hash: H256, difficulty: U256, number: u64) { + trace!(target: "stratum", "Notify work"); - self.service.push_work_all( - self.dispatcher.payload(pow_hash, difficulty, number) - ); - } + self.service + .push_work_all(self.dispatcher.payload(pow_hash, difficulty, number)); + } } impl Stratum { + /// New stratum job dispatcher, given the miner, client and dedicated stratum service + pub fn start( + options: &Options, + miner: Weak, + client: Weak, + ) -> Result { + use std::net::IpAddr; - /// New stratum job dispatcher, given the miner, client and dedicated stratum service - pub fn start(options: &Options, miner: Weak, client: Weak) -> Result { - use std::net::IpAddr; + let dispatcher = Arc::new(StratumJobDispatcher::new(miner, client)); - let dispatcher = Arc::new(StratumJobDispatcher::new(miner, client)); + let service = StratumService::start( + &SocketAddr::new(options.listen_addr.parse::()?, options.port), + dispatcher.clone(), + options.secret.clone(), + )?; - let service = StratumService::start( - &SocketAddr::new(options.listen_addr.parse::()?, options.port), - dispatcher.clone(), - options.secret.clone(), - )?; + Ok(Stratum { + dispatcher, + service, + }) + } - Ok(Stratum { dispatcher, service }) - } - - /// Start STRATUM job dispatcher and register it in the miner - #[cfg(feature = "work-notify")] - pub fn register(cfg: &Options, miner: Arc, client: Weak) -> Result<(), Error> { - let stratum = Stratum::start(cfg, Arc::downgrade(&miner.clone()), client)?; - miner.add_work_listener(Box::new(stratum) as Box); - Ok(()) - } + /// Start STRATUM job dispatcher and register it in the miner + #[cfg(feature = "work-notify")] + pub fn register(cfg: &Options, miner: Arc, client: Weak) -> Result<(), Error> { + let stratum = Stratum::start(cfg, Arc::downgrade(&miner.clone()), client)?; + miner.add_work_listener(Box::new(stratum) as Box); + Ok(()) + } } diff --git a/ethcore/src/pod_account.rs b/ethcore/src/pod_account.rs index 39dce6e36..0aac929a5 100644 --- a/ethcore/src/pod_account.rs +++ b/ethcore/src/pod_account.rs @@ -16,130 +16,159 @@ //! Account system expressed in Plain Old Data. -use std::fmt; -use std::collections::BTreeMap; -use itertools::Itertools; -use hash::{keccak}; -use ethereum_types::{H256, U256}; -use hash_db::HashDB; -use kvdb::DBValue; -use keccak_hasher::KeccakHasher; -use triehash::sec_trie_root; use bytes::Bytes; -use trie::TrieFactory; -use ethtrie::RlpCodec; -use state::Account; +use ethereum_types::{H256, U256}; use ethjson; -use types::account_diff::*; +use ethtrie::RlpCodec; +use hash::keccak; +use hash_db::HashDB; +use itertools::Itertools; +use keccak_hasher::KeccakHasher; +use kvdb::DBValue; use rlp::{self, RlpStream}; -use serde::Serializer; use rustc_hex::ToHex; +use serde::Serializer; +use state::Account; +use std::{collections::BTreeMap, fmt}; +use trie::TrieFactory; +use triehash::sec_trie_root; +use types::account_diff::*; #[derive(Debug, Clone, PartialEq, Eq, Serialize)] /// An account, expressed as Plain-Old-Data (hence the name). /// Does not have a DB overlay cache, code hash or anything like that. pub struct PodAccount { - /// The balance of the account. - pub balance: U256, - /// The nonce of the account. - pub nonce: U256, - #[serde(serialize_with="opt_bytes_to_hex")] - /// The code of the account or `None` in the special case that it is unknown. - pub code: Option, - /// The storage of the account. - pub storage: BTreeMap, + /// The balance of the account. + pub balance: U256, + /// The nonce of the account. + pub nonce: U256, + #[serde(serialize_with = "opt_bytes_to_hex")] + /// The code of the account or `None` in the special case that it is unknown. + pub code: Option, + /// The storage of the account. + pub storage: BTreeMap, } fn opt_bytes_to_hex(opt_bytes: &Option, serializer: S) -> Result - where S: Serializer +where + S: Serializer, { - serializer.collect_str(&format_args!("0x{}",opt_bytes.as_ref().map_or("".to_string(), |b|b.to_hex()))) + serializer.collect_str(&format_args!( + "0x{}", + opt_bytes.as_ref().map_or("".to_string(), |b| b.to_hex()) + )) } impl PodAccount { - /// Convert Account to a PodAccount. - /// NOTE: This will silently fail unless the account is fully cached. - pub fn from_account(acc: &Account) -> PodAccount { - PodAccount { - balance: *acc.balance(), - nonce: *acc.nonce(), - storage: acc.storage_changes().iter().fold(BTreeMap::new(), |mut m, (k, v)| {m.insert(k.clone(), v.clone()); m}), - code: acc.code().map(|x| x.to_vec()), - } - } + /// Convert Account to a PodAccount. + /// NOTE: This will silently fail unless the account is fully cached. + pub fn from_account(acc: &Account) -> PodAccount { + PodAccount { + balance: *acc.balance(), + nonce: *acc.nonce(), + storage: acc + .storage_changes() + .iter() + .fold(BTreeMap::new(), |mut m, (k, v)| { + m.insert(k.clone(), v.clone()); + m + }), + code: acc.code().map(|x| x.to_vec()), + } + } - /// Returns the RLP for this account. - pub fn rlp(&self) -> Bytes { - let mut stream = RlpStream::new_list(4); - stream.append(&self.nonce); - stream.append(&self.balance); - stream.append(&sec_trie_root(self.storage.iter().map(|(k, v)| (k, rlp::encode(&U256::from(&**v)))))); - stream.append(&keccak(&self.code.as_ref().unwrap_or(&vec![]))); - stream.out() - } + /// Returns the RLP for this account. + pub fn rlp(&self) -> Bytes { + let mut stream = RlpStream::new_list(4); + stream.append(&self.nonce); + stream.append(&self.balance); + stream.append(&sec_trie_root( + self.storage + .iter() + .map(|(k, v)| (k, rlp::encode(&U256::from(&**v)))), + )); + stream.append(&keccak(&self.code.as_ref().unwrap_or(&vec![]))); + stream.out() + } - /// Place additional data into given hash DB. - pub fn insert_additional(&self, db: &mut HashDB, factory: &TrieFactory) { - match self.code { - Some(ref c) if !c.is_empty() => { db.insert(c); } - _ => {} - } - let mut r = H256::new(); - let mut t = factory.create(db, &mut r); - for (k, v) in &self.storage { - if let Err(e) = t.insert(k, &rlp::encode(&U256::from(&**v))) { - warn!("Encountered potential DB corruption: {}", e); - } - } - } + /// Place additional data into given hash DB. + pub fn insert_additional( + &self, + db: &mut HashDB, + factory: &TrieFactory, + ) { + match self.code { + Some(ref c) if !c.is_empty() => { + db.insert(c); + } + _ => {} + } + let mut r = H256::new(); + let mut t = factory.create(db, &mut r); + for (k, v) in &self.storage { + if let Err(e) = t.insert(k, &rlp::encode(&U256::from(&**v))) { + warn!("Encountered potential DB corruption: {}", e); + } + } + } } impl From for PodAccount { - fn from(a: ethjson::blockchain::Account) -> Self { - PodAccount { - balance: a.balance.into(), - nonce: a.nonce.into(), - code: Some(a.code.into()), - storage: a.storage.into_iter().map(|(key, value)| { - let key: U256 = key.into(); - let value: U256 = value.into(); - (H256::from(key), H256::from(value)) - }).collect(), - } - } + fn from(a: ethjson::blockchain::Account) -> Self { + PodAccount { + balance: a.balance.into(), + nonce: a.nonce.into(), + code: Some(a.code.into()), + storage: a + .storage + .into_iter() + .map(|(key, value)| { + let key: U256 = key.into(); + let value: U256 = value.into(); + (H256::from(key), H256::from(value)) + }) + .collect(), + } + } } impl From for PodAccount { - fn from(a: ethjson::spec::Account) -> Self { - PodAccount { - balance: a.balance.map_or_else(U256::zero, Into::into), - nonce: a.nonce.map_or_else(U256::zero, Into::into), - code: Some(a.code.map_or_else(Vec::new, Into::into)), - storage: a.storage.map_or_else(BTreeMap::new, |s| s.into_iter().map(|(key, value)| { - let key: U256 = key.into(); - let value: U256 = value.into(); - (H256::from(key), H256::from(value)) - }).collect()), - } - } + fn from(a: ethjson::spec::Account) -> Self { + PodAccount { + balance: a.balance.map_or_else(U256::zero, Into::into), + nonce: a.nonce.map_or_else(U256::zero, Into::into), + code: Some(a.code.map_or_else(Vec::new, Into::into)), + storage: a.storage.map_or_else(BTreeMap::new, |s| { + s.into_iter() + .map(|(key, value)| { + let key: U256 = key.into(); + let value: U256 = value.into(); + (H256::from(key), H256::from(value)) + }) + .collect() + }), + } + } } impl fmt::Display for PodAccount { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "(bal={}; nonce={}; code={} bytes, #{}; storage={} items)", - self.balance, - self.nonce, - self.code.as_ref().map_or(0, |c| c.len()), - self.code.as_ref().map_or_else(H256::new, |c| keccak(c)), - self.storage.len(), - ) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "(bal={}; nonce={}; code={} bytes, #{}; storage={} items)", + self.balance, + self.nonce, + self.code.as_ref().map_or(0, |c| c.len()), + self.code.as_ref().map_or_else(H256::new, |c| keccak(c)), + self.storage.len(), + ) + } } /// Determine difference between two optionally existant `Account`s. Returns None /// if they are the same. pub fn diff_pod(pre: Option<&PodAccount>, post: Option<&PodAccount>) -> Option { - match (pre, post) { + match (pre, post) { (None, Some(x)) => Some(AccountDiff { balance: Diff::Born(x.balance), nonce: Diff::Born(x.nonce), @@ -181,71 +210,108 @@ pub fn diff_pod(pre: Option<&PodAccount>, post: Option<&PodAccount>) -> Option 1, 2 => 2, 3 => 3, 4 => 4, 5 => 0, 6 => 0, 7 => 0] - }; - let b = PodAccount { - balance: 0.into(), - nonce: 0.into(), - code: Some(vec![]), - storage: map_into![1 => 1, 2 => 3, 3 => 0, 5 => 0, 7 => 7, 8 => 0, 9 => 9] - }; - assert_eq!(diff_pod(Some(&a), Some(&b)), Some(AccountDiff { - balance: Diff::Same, - nonce: Diff::Same, - code: Diff::Same, - storage: map![ - 2.into() => Diff::new(2.into(), 3.into()), - 3.into() => Diff::new(3.into(), 0.into()), - 4.into() => Diff::new(4.into(), 0.into()), - 7.into() => Diff::new(0.into(), 7.into()), - 9.into() => Diff::new(0.into(), 9.into()) - ], - })); - } + #[test] + fn storage() { + let a = PodAccount { + balance: 0.into(), + nonce: 0.into(), + code: Some(vec![]), + storage: map_into![1 => 1, 2 => 2, 3 => 3, 4 => 4, 5 => 0, 6 => 0, 7 => 0], + }; + let b = PodAccount { + balance: 0.into(), + nonce: 0.into(), + code: Some(vec![]), + storage: map_into![1 => 1, 2 => 3, 3 => 0, 5 => 0, 7 => 7, 8 => 0, 9 => 9], + }; + assert_eq!( + diff_pod(Some(&a), Some(&b)), + Some(AccountDiff { + balance: Diff::Same, + nonce: Diff::Same, + code: Diff::Same, + storage: map![ + 2.into() => Diff::new(2.into(), 3.into()), + 3.into() => Diff::new(3.into(), 0.into()), + 4.into() => Diff::new(4.into(), 0.into()), + 7.into() => Diff::new(0.into(), 7.into()), + 9.into() => Diff::new(0.into(), 9.into()) + ], + }) + ); + } } diff --git a/ethcore/src/pod_state.rs b/ethcore/src/pod_state.rs index c1130faa7..b7545178f 100644 --- a/ethcore/src/pod_state.rs +++ b/ethcore/src/pod_state.rs @@ -16,190 +16,229 @@ //! State of all accounts in the system expressed in Plain Old Data. -use std::fmt; -use std::collections::BTreeMap; -use itertools::Itertools; -use ethereum_types::{H256, Address}; -use triehash::sec_trie_root; -use pod_account::{self, PodAccount}; -use types::state_diff::StateDiff; +use ethereum_types::{Address, H256}; use ethjson; +use itertools::Itertools; +use pod_account::{self, PodAccount}; +use std::{collections::BTreeMap, fmt}; +use triehash::sec_trie_root; +use types::state_diff::StateDiff; /// State of all accounts in the system expressed in Plain Old Data. #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize)] pub struct PodState(BTreeMap); impl PodState { - /// Contruct a new object from the `m`. - pub fn new() -> PodState { Default::default() } + /// Contruct a new object from the `m`. + pub fn new() -> PodState { + Default::default() + } - /// Contruct a new object from the `m`. - pub fn from(m: BTreeMap) -> PodState { PodState(m) } + /// Contruct a new object from the `m`. + pub fn from(m: BTreeMap) -> PodState { + PodState(m) + } - /// Get the underlying map. - pub fn get(&self) -> &BTreeMap { &self.0 } + /// Get the underlying map. + pub fn get(&self) -> &BTreeMap { + &self.0 + } - /// Get the root hash of the trie of the RLP of this. - pub fn root(&self) -> H256 { - sec_trie_root(self.0.iter().map(|(k, v)| (k, v.rlp()))) - } + /// Get the root hash of the trie of the RLP of this. + pub fn root(&self) -> H256 { + sec_trie_root(self.0.iter().map(|(k, v)| (k, v.rlp()))) + } - /// Drain object to get the underlying map. - pub fn drain(self) -> BTreeMap { self.0 } + /// Drain object to get the underlying map. + pub fn drain(self) -> BTreeMap { + self.0 + } } impl From for PodState { - fn from(s: ethjson::blockchain::State) -> PodState { - let state = s.into_iter().map(|(addr, acc)| (addr.into(), PodAccount::from(acc))).collect(); - PodState(state) - } + fn from(s: ethjson::blockchain::State) -> PodState { + let state = s + .into_iter() + .map(|(addr, acc)| (addr.into(), PodAccount::from(acc))) + .collect(); + PodState(state) + } } impl From for PodState { - fn from(s: ethjson::spec::State) -> PodState { - let state: BTreeMap<_,_> = s.into_iter() - .filter(|pair| !pair.1.is_empty()) - .map(|(addr, acc)| (addr.into(), PodAccount::from(acc))) - .collect(); - PodState(state) - } + fn from(s: ethjson::spec::State) -> PodState { + let state: BTreeMap<_, _> = s + .into_iter() + .filter(|pair| !pair.1.is_empty()) + .map(|(addr, acc)| (addr.into(), PodAccount::from(acc))) + .collect(); + PodState(state) + } } impl fmt::Display for PodState { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - for (add, acc) in &self.0 { - writeln!(f, "{} => {}", add, acc)?; - } - Ok(()) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + for (add, acc) in &self.0 { + writeln!(f, "{} => {}", add, acc)?; + } + Ok(()) + } } /// Calculate and return diff between `pre` state and `post` state. pub fn diff_pod(pre: &PodState, post: &PodState) -> StateDiff { - StateDiff { - raw: pre.get().keys() - .merge(post.get().keys()) - .filter_map(|acc| pod_account::diff_pod(pre.get().get(acc), post.get().get(acc)).map(|d| (acc.clone(), d))) - .collect() - } + StateDiff { + raw: pre + .get() + .keys() + .merge(post.get().keys()) + .filter_map(|acc| { + pod_account::diff_pod(pre.get().get(acc), post.get().get(acc)) + .map(|d| (acc.clone(), d)) + }) + .collect(), + } } #[cfg(test)] mod test { - use std::collections::BTreeMap; - use types::state_diff::*; - use types::account_diff::*; - use pod_account::PodAccount; - use super::PodState; + use super::PodState; + use pod_account::PodAccount; + use std::collections::BTreeMap; + use types::{account_diff::*, state_diff::*}; - #[test] - fn create_delete() { - let a = PodState::from(map![ - 1.into() => PodAccount { - balance: 69.into(), - nonce: 0.into(), - code: Some(Vec::new()), - storage: map![], - } - ]); - assert_eq!(super::diff_pod(&a, &PodState::new()), StateDiff { raw: map![ - 1.into() => AccountDiff{ - balance: Diff::Died(69.into()), - nonce: Diff::Died(0.into()), - code: Diff::Died(vec![]), - storage: map![], - } - ]}); - assert_eq!(super::diff_pod(&PodState::new(), &a), StateDiff{ raw: map![ - 1.into() => AccountDiff{ - balance: Diff::Born(69.into()), - nonce: Diff::Born(0.into()), - code: Diff::Born(vec![]), - storage: map![], - } - ]}); - } + #[test] + fn create_delete() { + let a = PodState::from(map![ + 1.into() => PodAccount { + balance: 69.into(), + nonce: 0.into(), + code: Some(Vec::new()), + storage: map![], + } + ]); + assert_eq!( + super::diff_pod(&a, &PodState::new()), + StateDiff { + raw: map![ + 1.into() => AccountDiff{ + balance: Diff::Died(69.into()), + nonce: Diff::Died(0.into()), + code: Diff::Died(vec![]), + storage: map![], + } + ] + } + ); + assert_eq!( + super::diff_pod(&PodState::new(), &a), + StateDiff { + raw: map![ + 1.into() => AccountDiff{ + balance: Diff::Born(69.into()), + nonce: Diff::Born(0.into()), + code: Diff::Born(vec![]), + storage: map![], + } + ] + } + ); + } - #[test] - fn create_delete_with_unchanged() { - let a = PodState::from(map![ - 1.into() => PodAccount { - balance: 69.into(), - nonce: 0.into(), - code: Some(Vec::new()), - storage: map![], - } - ]); - let b = PodState::from(map![ - 1.into() => PodAccount { - balance: 69.into(), - nonce: 0.into(), - code: Some(Vec::new()), - storage: map![], - }, - 2.into() => PodAccount { - balance: 69.into(), - nonce: 0.into(), - code: Some(Vec::new()), - storage: map![], - } - ]); - assert_eq!(super::diff_pod(&a, &b), StateDiff { raw: map![ - 2.into() => AccountDiff{ - balance: Diff::Born(69.into()), - nonce: Diff::Born(0.into()), - code: Diff::Born(vec![]), - storage: map![], - } - ]}); - assert_eq!(super::diff_pod(&b, &a), StateDiff { raw: map![ - 2.into() => AccountDiff{ - balance: Diff::Died(69.into()), - nonce: Diff::Died(0.into()), - code: Diff::Died(vec![]), - storage: map![], - } - ]}); - } - - #[test] - fn change_with_unchanged() { - let a = PodState::from(map![ - 1.into() => PodAccount { - balance: 69.into(), - nonce: 0.into(), - code: Some(Vec::new()), - storage: map![], - }, - 2.into() => PodAccount { - balance: 69.into(), - nonce: 0.into(), - code: Some(Vec::new()), - storage: map![], - } - ]); - let b = PodState::from(map![ - 1.into() => PodAccount { - balance: 69.into(), - nonce: 1.into(), - code: Some(Vec::new()), - storage: map![], - }, - 2.into() => PodAccount { - balance: 69.into(), - nonce: 0.into(), - code: Some(Vec::new()), - storage: map![], - } - ]); - assert_eq!(super::diff_pod(&a, &b), StateDiff { raw: map![ - 1.into() => AccountDiff{ - balance: Diff::Same, - nonce: Diff::Changed(0.into(), 1.into()), - code: Diff::Same, - storage: map![], - } - ]}); - } + #[test] + fn create_delete_with_unchanged() { + let a = PodState::from(map![ + 1.into() => PodAccount { + balance: 69.into(), + nonce: 0.into(), + code: Some(Vec::new()), + storage: map![], + } + ]); + let b = PodState::from(map![ + 1.into() => PodAccount { + balance: 69.into(), + nonce: 0.into(), + code: Some(Vec::new()), + storage: map![], + }, + 2.into() => PodAccount { + balance: 69.into(), + nonce: 0.into(), + code: Some(Vec::new()), + storage: map![], + } + ]); + assert_eq!( + super::diff_pod(&a, &b), + StateDiff { + raw: map![ + 2.into() => AccountDiff{ + balance: Diff::Born(69.into()), + nonce: Diff::Born(0.into()), + code: Diff::Born(vec![]), + storage: map![], + } + ] + } + ); + assert_eq!( + super::diff_pod(&b, &a), + StateDiff { + raw: map![ + 2.into() => AccountDiff{ + balance: Diff::Died(69.into()), + nonce: Diff::Died(0.into()), + code: Diff::Died(vec![]), + storage: map![], + } + ] + } + ); + } + #[test] + fn change_with_unchanged() { + let a = PodState::from(map![ + 1.into() => PodAccount { + balance: 69.into(), + nonce: 0.into(), + code: Some(Vec::new()), + storage: map![], + }, + 2.into() => PodAccount { + balance: 69.into(), + nonce: 0.into(), + code: Some(Vec::new()), + storage: map![], + } + ]); + let b = PodState::from(map![ + 1.into() => PodAccount { + balance: 69.into(), + nonce: 1.into(), + code: Some(Vec::new()), + storage: map![], + }, + 2.into() => PodAccount { + balance: 69.into(), + nonce: 0.into(), + code: Some(Vec::new()), + storage: map![], + } + ]); + assert_eq!( + super::diff_pod(&a, &b), + StateDiff { + raw: map![ + 1.into() => AccountDiff{ + balance: Diff::Same, + nonce: Diff::Changed(0.into(), 1.into()), + code: Diff::Same, + storage: map![], + } + ] + } + ); + } } diff --git a/ethcore/src/snapshot/account.rs b/ethcore/src/snapshot/account.rs index 2a9ac911f..c571f75b0 100644 --- a/ethcore/src/snapshot/account.rs +++ b/ethcore/src/snapshot/account.rs @@ -17,357 +17,449 @@ //! Account state encoding and decoding use account_db::{AccountDB, AccountDBMut}; -use types::basic_account::BasicAccount; use bytes::Bytes; use ethereum_types::{H256, U256}; use ethtrie::{TrieDB, TrieDBMut}; use hash::{KECCAK_EMPTY, KECCAK_NULL_RLP}; use hash_db::HashDB; -use rlp::{RlpStream, Rlp}; +use rlp::{Rlp, RlpStream}; use snapshot::{Error, Progress}; -use std::collections::HashSet; +use std::{collections::HashSet, sync::atomic::Ordering}; use trie::{Trie, TrieMut}; -use std::sync::atomic::Ordering; +use types::basic_account::BasicAccount; // An empty account -- these were replaced with RLP null data for a space optimization in v1. const ACC_EMPTY: BasicAccount = BasicAccount { - nonce: U256([0, 0, 0, 0]), - balance: U256([0, 0, 0, 0]), - storage_root: KECCAK_NULL_RLP, - code_hash: KECCAK_EMPTY, + nonce: U256([0, 0, 0, 0]), + balance: U256([0, 0, 0, 0]), + storage_root: KECCAK_NULL_RLP, + code_hash: KECCAK_EMPTY, }; // whether an encoded account has code and how it is referred to. #[repr(u8)] enum CodeState { - // the account has no code. - Empty = 0, - // raw code is encoded. - Inline = 1, - // the code is referred to by hash. - Hash = 2, + // the account has no code. + Empty = 0, + // raw code is encoded. + Inline = 1, + // the code is referred to by hash. + Hash = 2, } impl CodeState { - fn from(x: u8) -> Result { - match x { - 0 => Ok(CodeState::Empty), - 1 => Ok(CodeState::Inline), - 2 => Ok(CodeState::Hash), - _ => Err(Error::UnrecognizedCodeState(x)) - } - } + fn from(x: u8) -> Result { + match x { + 0 => Ok(CodeState::Empty), + 1 => Ok(CodeState::Inline), + 2 => Ok(CodeState::Hash), + _ => Err(Error::UnrecognizedCodeState(x)), + } + } - fn raw(self) -> u8 { - self as u8 - } + fn raw(self) -> u8 { + self as u8 + } } // walk the account's storage trie, returning a vector of RLP items containing the // account address hash, account properties and the storage. Each item contains at most `max_storage_items` // storage records split according to snapshot format definition. pub fn to_fat_rlps( - account_hash: &H256, - acc: &BasicAccount, - acct_db: &AccountDB, - used_code: &mut HashSet, - first_chunk_size: usize, - max_chunk_size: usize, - p: &Progress, + account_hash: &H256, + acc: &BasicAccount, + acct_db: &AccountDB, + used_code: &mut HashSet, + first_chunk_size: usize, + max_chunk_size: usize, + p: &Progress, ) -> Result, Error> { - let db = &(acct_db as &dyn HashDB<_,_>); - let db = TrieDB::new(db, &acc.storage_root)?; - let mut chunks = Vec::new(); - let mut db_iter = db.iter()?; - let mut target_chunk_size = first_chunk_size; - let mut account_stream = RlpStream::new_list(2); - let mut leftover: Option> = None; - loop { - account_stream.append(account_hash); - account_stream.begin_list(5); + let db = &(acct_db as &dyn HashDB<_, _>); + let db = TrieDB::new(db, &acc.storage_root)?; + let mut chunks = Vec::new(); + let mut db_iter = db.iter()?; + let mut target_chunk_size = first_chunk_size; + let mut account_stream = RlpStream::new_list(2); + let mut leftover: Option> = None; + loop { + account_stream.append(account_hash); + account_stream.begin_list(5); - account_stream.append(&acc.nonce) - .append(&acc.balance); + account_stream.append(&acc.nonce).append(&acc.balance); - // [has_code, code_hash]. - if acc.code_hash == KECCAK_EMPTY { - account_stream.append(&CodeState::Empty.raw()).append_empty_data(); - } else if used_code.contains(&acc.code_hash) { - account_stream.append(&CodeState::Hash.raw()).append(&acc.code_hash); - } else { - match acct_db.get(&acc.code_hash) { - Some(c) => { - used_code.insert(acc.code_hash.clone()); - account_stream.append(&CodeState::Inline.raw()).append(&&*c); - } - None => { - warn!("code lookup failed during snapshot"); - account_stream.append(&false).append_empty_data(); - } - } - } + // [has_code, code_hash]. + if acc.code_hash == KECCAK_EMPTY { + account_stream + .append(&CodeState::Empty.raw()) + .append_empty_data(); + } else if used_code.contains(&acc.code_hash) { + account_stream + .append(&CodeState::Hash.raw()) + .append(&acc.code_hash); + } else { + match acct_db.get(&acc.code_hash) { + Some(c) => { + used_code.insert(acc.code_hash.clone()); + account_stream.append(&CodeState::Inline.raw()).append(&&*c); + } + None => { + warn!("code lookup failed during snapshot"); + account_stream.append(&false).append_empty_data(); + } + } + } - account_stream.begin_unbounded_list(); - if account_stream.len() > target_chunk_size { - // account does not fit, push an empty record to mark a new chunk - target_chunk_size = max_chunk_size; - chunks.push(Vec::new()); - } + account_stream.begin_unbounded_list(); + if account_stream.len() > target_chunk_size { + // account does not fit, push an empty record to mark a new chunk + target_chunk_size = max_chunk_size; + chunks.push(Vec::new()); + } - if let Some(pair) = leftover.take() { - if !account_stream.append_raw_checked(&pair, 1, target_chunk_size) { - return Err(Error::ChunkTooSmall); - } - } + if let Some(pair) = leftover.take() { + if !account_stream.append_raw_checked(&pair, 1, target_chunk_size) { + return Err(Error::ChunkTooSmall); + } + } - loop { - if p.abort.load(Ordering::SeqCst) { - trace!(target: "snapshot", "to_fat_rlps: aborting snapshot"); - return Err(Error::SnapshotAborted); - } - match db_iter.next() { - Some(Ok((k, v))) => { - let pair = { - let mut stream = RlpStream::new_list(2); - stream.append(&k).append(&&*v); - stream.drain() - }; - if !account_stream.append_raw_checked(&pair, 1, target_chunk_size) { - account_stream.complete_unbounded_list(); - let stream = ::std::mem::replace(&mut account_stream, RlpStream::new_list(2)); - chunks.push(stream.out()); - target_chunk_size = max_chunk_size; - leftover = Some(pair); - break; - } - }, - Some(Err(e)) => { - return Err(e.into()); - }, - None => { - account_stream.complete_unbounded_list(); - let stream = ::std::mem::replace(&mut account_stream, RlpStream::new_list(2)); - chunks.push(stream.out()); - return Ok(chunks); - } - } - - } - } + loop { + if p.abort.load(Ordering::SeqCst) { + trace!(target: "snapshot", "to_fat_rlps: aborting snapshot"); + return Err(Error::SnapshotAborted); + } + match db_iter.next() { + Some(Ok((k, v))) => { + let pair = { + let mut stream = RlpStream::new_list(2); + stream.append(&k).append(&&*v); + stream.drain() + }; + if !account_stream.append_raw_checked(&pair, 1, target_chunk_size) { + account_stream.complete_unbounded_list(); + let stream = + ::std::mem::replace(&mut account_stream, RlpStream::new_list(2)); + chunks.push(stream.out()); + target_chunk_size = max_chunk_size; + leftover = Some(pair); + break; + } + } + Some(Err(e)) => { + return Err(e.into()); + } + None => { + account_stream.complete_unbounded_list(); + let stream = ::std::mem::replace(&mut account_stream, RlpStream::new_list(2)); + chunks.push(stream.out()); + return Ok(chunks); + } + } + } + } } // decode a fat rlp, and rebuild the storage trie as we go. // returns the account structure along with its newly recovered code, // if it exists. pub fn from_fat_rlp( - acct_db: &mut AccountDBMut, - rlp: Rlp, - mut storage_root: H256, + acct_db: &mut AccountDBMut, + rlp: Rlp, + mut storage_root: H256, ) -> Result<(BasicAccount, Option), Error> { + // check for special case of empty account. + if rlp.is_empty() { + return Ok((ACC_EMPTY, None)); + } - // check for special case of empty account. - if rlp.is_empty() { - return Ok((ACC_EMPTY, None)); - } + let nonce = rlp.val_at(0)?; + let balance = rlp.val_at(1)?; + let code_state: CodeState = { + let raw: u8 = rlp.val_at(2)?; + CodeState::from(raw)? + }; - let nonce = rlp.val_at(0)?; - let balance = rlp.val_at(1)?; - let code_state: CodeState = { - let raw: u8 = rlp.val_at(2)?; - CodeState::from(raw)? - }; + // load the code if it exists. + let (code_hash, new_code) = match code_state { + CodeState::Empty => (KECCAK_EMPTY, None), + CodeState::Inline => { + let code: Bytes = rlp.val_at(3)?; + let code_hash = acct_db.insert(&code); - // load the code if it exists. - let (code_hash, new_code) = match code_state { - CodeState::Empty => (KECCAK_EMPTY, None), - CodeState::Inline => { - let code: Bytes = rlp.val_at(3)?; - let code_hash = acct_db.insert(&code); + (code_hash, Some(code)) + } + CodeState::Hash => { + let code_hash = rlp.val_at(3)?; - (code_hash, Some(code)) - } - CodeState::Hash => { - let code_hash = rlp.val_at(3)?; + (code_hash, None) + } + }; - (code_hash, None) - } - }; + { + let mut storage_trie = if storage_root.is_zero() { + TrieDBMut::new(acct_db, &mut storage_root) + } else { + TrieDBMut::from_existing(acct_db, &mut storage_root)? + }; + let pairs = rlp.at(4)?; + for pair_rlp in pairs.iter() { + let k: Bytes = pair_rlp.val_at(0)?; + let v: Bytes = pair_rlp.val_at(1)?; - { - let mut storage_trie = if storage_root.is_zero() { - TrieDBMut::new(acct_db, &mut storage_root) - } else { - TrieDBMut::from_existing(acct_db, &mut storage_root)? - }; - let pairs = rlp.at(4)?; - for pair_rlp in pairs.iter() { - let k: Bytes = pair_rlp.val_at(0)?; - let v: Bytes = pair_rlp.val_at(1)?; + storage_trie.insert(&k, &v)?; + } + } - storage_trie.insert(&k, &v)?; - } - } + let acc = BasicAccount { + nonce: nonce, + balance: balance, + storage_root: storage_root, + code_hash: code_hash, + }; - let acc = BasicAccount { - nonce: nonce, - balance: balance, - storage_root: storage_root, - code_hash: code_hash, - }; - - Ok((acc, new_code)) + Ok((acc, new_code)) } #[cfg(test)] mod tests { - use account_db::{AccountDB, AccountDBMut}; - use types::basic_account::BasicAccount; - use test_helpers::get_temp_state_db; - use snapshot::tests::helpers::fill_storage; - use snapshot::Progress; + use account_db::{AccountDB, AccountDBMut}; + use snapshot::{tests::helpers::fill_storage, Progress}; + use test_helpers::get_temp_state_db; + use types::basic_account::BasicAccount; - use hash::{KECCAK_EMPTY, KECCAK_NULL_RLP, keccak}; - use ethereum_types::{H256, Address}; - use hash_db::HashDB; - use kvdb::DBValue; - use rlp::Rlp; + use ethereum_types::{Address, H256}; + use hash::{keccak, KECCAK_EMPTY, KECCAK_NULL_RLP}; + use hash_db::HashDB; + use kvdb::DBValue; + use rlp::Rlp; - use std::collections::HashSet; + use std::collections::HashSet; - use super::{ACC_EMPTY, to_fat_rlps, from_fat_rlp}; + use super::{from_fat_rlp, to_fat_rlps, ACC_EMPTY}; - #[test] - fn encoding_basic() { - let mut db = get_temp_state_db(); - let addr = Address::random(); + #[test] + fn encoding_basic() { + let mut db = get_temp_state_db(); + let addr = Address::random(); - let account = BasicAccount { - nonce: 50.into(), - balance: 123456789.into(), - storage_root: KECCAK_NULL_RLP, - code_hash: KECCAK_EMPTY, - }; + let account = BasicAccount { + nonce: 50.into(), + balance: 123456789.into(), + storage_root: KECCAK_NULL_RLP, + code_hash: KECCAK_EMPTY, + }; - let thin_rlp = ::rlp::encode(&account); - assert_eq!(::rlp::decode::(&thin_rlp).unwrap(), account); - let p = Progress::default(); - let fat_rlps = to_fat_rlps(&keccak(&addr), &account, &AccountDB::new(db.as_hash_db(), &addr), &mut Default::default(), usize::max_value(), usize::max_value(), &p).unwrap(); - let fat_rlp = Rlp::new(&fat_rlps[0]).at(1).unwrap(); - assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hash_db_mut(), &addr), fat_rlp, H256::zero()).unwrap().0, account); - } + let thin_rlp = ::rlp::encode(&account); + assert_eq!(::rlp::decode::(&thin_rlp).unwrap(), account); + let p = Progress::default(); + let fat_rlps = to_fat_rlps( + &keccak(&addr), + &account, + &AccountDB::new(db.as_hash_db(), &addr), + &mut Default::default(), + usize::max_value(), + usize::max_value(), + &p, + ) + .unwrap(); + let fat_rlp = Rlp::new(&fat_rlps[0]).at(1).unwrap(); + assert_eq!( + from_fat_rlp( + &mut AccountDBMut::new(db.as_hash_db_mut(), &addr), + fat_rlp, + H256::zero() + ) + .unwrap() + .0, + account + ); + } - #[test] - fn encoding_storage() { - let mut db = get_temp_state_db(); - let addr = Address::random(); + #[test] + fn encoding_storage() { + let mut db = get_temp_state_db(); + let addr = Address::random(); - let account = { - let acct_db = AccountDBMut::new(db.as_hash_db_mut(), &addr); - let mut root = KECCAK_NULL_RLP; - fill_storage(acct_db, &mut root, &mut H256::zero()); - BasicAccount { - nonce: 25.into(), - balance: 987654321.into(), - storage_root: root, - code_hash: KECCAK_EMPTY, - } - }; + let account = { + let acct_db = AccountDBMut::new(db.as_hash_db_mut(), &addr); + let mut root = KECCAK_NULL_RLP; + fill_storage(acct_db, &mut root, &mut H256::zero()); + BasicAccount { + nonce: 25.into(), + balance: 987654321.into(), + storage_root: root, + code_hash: KECCAK_EMPTY, + } + }; - let thin_rlp = ::rlp::encode(&account); - assert_eq!(::rlp::decode::(&thin_rlp).unwrap(), account); + let thin_rlp = ::rlp::encode(&account); + assert_eq!(::rlp::decode::(&thin_rlp).unwrap(), account); - let p = Progress::default(); + let p = Progress::default(); - let fat_rlp = to_fat_rlps(&keccak(&addr), &account, &AccountDB::new(db.as_hash_db(), &addr), &mut Default::default(), usize::max_value(), usize::max_value(), &p).unwrap(); - let fat_rlp = Rlp::new(&fat_rlp[0]).at(1).unwrap(); - assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hash_db_mut(), &addr), fat_rlp, H256::zero()).unwrap().0, account); - } + let fat_rlp = to_fat_rlps( + &keccak(&addr), + &account, + &AccountDB::new(db.as_hash_db(), &addr), + &mut Default::default(), + usize::max_value(), + usize::max_value(), + &p, + ) + .unwrap(); + let fat_rlp = Rlp::new(&fat_rlp[0]).at(1).unwrap(); + assert_eq!( + from_fat_rlp( + &mut AccountDBMut::new(db.as_hash_db_mut(), &addr), + fat_rlp, + H256::zero() + ) + .unwrap() + .0, + account + ); + } - #[test] - fn encoding_storage_split() { - let mut db = get_temp_state_db(); - let addr = Address::random(); + #[test] + fn encoding_storage_split() { + let mut db = get_temp_state_db(); + let addr = Address::random(); - let account = { - let acct_db = AccountDBMut::new(db.as_hash_db_mut(), &addr); - let mut root = KECCAK_NULL_RLP; - fill_storage(acct_db, &mut root, &mut H256::zero()); - BasicAccount { - nonce: 25.into(), - balance: 987654321.into(), - storage_root: root, - code_hash: KECCAK_EMPTY, - } - }; + let account = { + let acct_db = AccountDBMut::new(db.as_hash_db_mut(), &addr); + let mut root = KECCAK_NULL_RLP; + fill_storage(acct_db, &mut root, &mut H256::zero()); + BasicAccount { + nonce: 25.into(), + balance: 987654321.into(), + storage_root: root, + code_hash: KECCAK_EMPTY, + } + }; - let thin_rlp = ::rlp::encode(&account); - assert_eq!(::rlp::decode::(&thin_rlp).unwrap(), account); + let thin_rlp = ::rlp::encode(&account); + assert_eq!(::rlp::decode::(&thin_rlp).unwrap(), account); - let p = Progress::default(); - let fat_rlps = to_fat_rlps(&keccak(addr), &account, &AccountDB::new(db.as_hash_db(), &addr), &mut Default::default(), 500, 1000, &p).unwrap(); - let mut root = KECCAK_NULL_RLP; - let mut restored_account = None; - for rlp in fat_rlps { - let fat_rlp = Rlp::new(&rlp).at(1).unwrap(); - restored_account = Some(from_fat_rlp(&mut AccountDBMut::new(db.as_hash_db_mut(), &addr), fat_rlp, root).unwrap().0); - root = restored_account.as_ref().unwrap().storage_root.clone(); - } - assert_eq!(restored_account, Some(account)); - } + let p = Progress::default(); + let fat_rlps = to_fat_rlps( + &keccak(addr), + &account, + &AccountDB::new(db.as_hash_db(), &addr), + &mut Default::default(), + 500, + 1000, + &p, + ) + .unwrap(); + let mut root = KECCAK_NULL_RLP; + let mut restored_account = None; + for rlp in fat_rlps { + let fat_rlp = Rlp::new(&rlp).at(1).unwrap(); + restored_account = Some( + from_fat_rlp( + &mut AccountDBMut::new(db.as_hash_db_mut(), &addr), + fat_rlp, + root, + ) + .unwrap() + .0, + ); + root = restored_account.as_ref().unwrap().storage_root.clone(); + } + assert_eq!(restored_account, Some(account)); + } - #[test] - fn encoding_code() { - let mut db = get_temp_state_db(); + #[test] + fn encoding_code() { + let mut db = get_temp_state_db(); - let addr1 = Address::random(); - let addr2 = Address::random(); + let addr1 = Address::random(); + let addr2 = Address::random(); - let code_hash = { - let mut acct_db = AccountDBMut::new(db.as_hash_db_mut(), &addr1); - acct_db.insert(b"this is definitely code") - }; + let code_hash = { + let mut acct_db = AccountDBMut::new(db.as_hash_db_mut(), &addr1); + acct_db.insert(b"this is definitely code") + }; - { - let mut acct_db = AccountDBMut::new(db.as_hash_db_mut(), &addr2); - acct_db.emplace(code_hash.clone(), DBValue::from_slice(b"this is definitely code")); - } + { + let mut acct_db = AccountDBMut::new(db.as_hash_db_mut(), &addr2); + acct_db.emplace( + code_hash.clone(), + DBValue::from_slice(b"this is definitely code"), + ); + } - let account1 = BasicAccount { - nonce: 50.into(), - balance: 123456789.into(), - storage_root: KECCAK_NULL_RLP, - code_hash, - }; + let account1 = BasicAccount { + nonce: 50.into(), + balance: 123456789.into(), + storage_root: KECCAK_NULL_RLP, + code_hash, + }; - let account2 = BasicAccount { - nonce: 400.into(), - balance: 98765432123456789usize.into(), - storage_root: KECCAK_NULL_RLP, - code_hash, - }; + let account2 = BasicAccount { + nonce: 400.into(), + balance: 98765432123456789usize.into(), + storage_root: KECCAK_NULL_RLP, + code_hash, + }; - let mut used_code = HashSet::new(); - let p1 = Progress::default(); - let p2 = Progress::default(); - let fat_rlp1 = to_fat_rlps(&keccak(&addr1), &account1, &AccountDB::new(db.as_hash_db(), &addr1), &mut used_code, usize::max_value(), usize::max_value(), &p1).unwrap(); - let fat_rlp2 = to_fat_rlps(&keccak(&addr2), &account2, &AccountDB::new(db.as_hash_db(), &addr2), &mut used_code, usize::max_value(), usize::max_value(), &p2).unwrap(); - assert_eq!(used_code.len(), 1); + let mut used_code = HashSet::new(); + let p1 = Progress::default(); + let p2 = Progress::default(); + let fat_rlp1 = to_fat_rlps( + &keccak(&addr1), + &account1, + &AccountDB::new(db.as_hash_db(), &addr1), + &mut used_code, + usize::max_value(), + usize::max_value(), + &p1, + ) + .unwrap(); + let fat_rlp2 = to_fat_rlps( + &keccak(&addr2), + &account2, + &AccountDB::new(db.as_hash_db(), &addr2), + &mut used_code, + usize::max_value(), + usize::max_value(), + &p2, + ) + .unwrap(); + assert_eq!(used_code.len(), 1); - let fat_rlp1 = Rlp::new(&fat_rlp1[0]).at(1).unwrap(); - let fat_rlp2 = Rlp::new(&fat_rlp2[0]).at(1).unwrap(); + let fat_rlp1 = Rlp::new(&fat_rlp1[0]).at(1).unwrap(); + let fat_rlp2 = Rlp::new(&fat_rlp2[0]).at(1).unwrap(); - let (acc, maybe_code) = from_fat_rlp(&mut AccountDBMut::new(db.as_hash_db_mut(), &addr2), fat_rlp2, H256::zero()).unwrap(); - assert!(maybe_code.is_none()); - assert_eq!(acc, account2); + let (acc, maybe_code) = from_fat_rlp( + &mut AccountDBMut::new(db.as_hash_db_mut(), &addr2), + fat_rlp2, + H256::zero(), + ) + .unwrap(); + assert!(maybe_code.is_none()); + assert_eq!(acc, account2); - let (acc, maybe_code) = from_fat_rlp(&mut AccountDBMut::new(db.as_hash_db_mut(), &addr1), fat_rlp1, H256::zero()).unwrap(); - assert_eq!(maybe_code, Some(b"this is definitely code".to_vec())); - assert_eq!(acc, account1); - } + let (acc, maybe_code) = from_fat_rlp( + &mut AccountDBMut::new(db.as_hash_db_mut(), &addr1), + fat_rlp1, + H256::zero(), + ) + .unwrap(); + assert_eq!(maybe_code, Some(b"this is definitely code".to_vec())); + assert_eq!(acc, account1); + } - #[test] - fn encoding_empty_acc() { - let mut db = get_temp_state_db(); - assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hash_db_mut(), &Address::zero()), Rlp::new(&::rlp::NULL_RLP), H256::zero()).unwrap(), (ACC_EMPTY, None)); - } + #[test] + fn encoding_empty_acc() { + let mut db = get_temp_state_db(); + assert_eq!( + from_fat_rlp( + &mut AccountDBMut::new(db.as_hash_db_mut(), &Address::zero()), + Rlp::new(&::rlp::NULL_RLP), + H256::zero() + ) + .unwrap(), + (ACC_EMPTY, None) + ); + } } diff --git a/ethcore/src/snapshot/block.rs b/ethcore/src/snapshot/block.rs index 0fc590763..a3abb5358 100644 --- a/ethcore/src/snapshot/block.rs +++ b/ethcore/src/snapshot/block.rs @@ -19,186 +19,184 @@ use bytes::Bytes; use ethereum_types::H256; use hash::keccak; -use rlp::{DecoderError, RlpStream, Rlp}; +use rlp::{DecoderError, Rlp, RlpStream}; use triehash::ordered_trie_root; -use types::block::Block; -use types::header::Header; -use types::views::BlockView; +use types::{block::Block, header::Header, views::BlockView}; const HEADER_FIELDS: usize = 8; const BLOCK_FIELDS: usize = 2; pub struct AbridgedBlock { - rlp: Bytes, + rlp: Bytes, } impl AbridgedBlock { - /// Create from rlp-compressed bytes. Does no verification. - pub fn from_raw(compressed: Bytes) -> Self { - AbridgedBlock { - rlp: compressed, - } - } + /// Create from rlp-compressed bytes. Does no verification. + pub fn from_raw(compressed: Bytes) -> Self { + AbridgedBlock { rlp: compressed } + } - /// Return the inner bytes. - pub fn into_inner(self) -> Bytes { - self.rlp - } + /// Return the inner bytes. + pub fn into_inner(self) -> Bytes { + self.rlp + } - /// Given a full block view, trim out the parent hash and block number, - /// producing new rlp. - pub fn from_block_view(block_view: &BlockView) -> Self { - let header = block_view.header_view(); - let seal_fields = header.seal(); + /// Given a full block view, trim out the parent hash and block number, + /// producing new rlp. + pub fn from_block_view(block_view: &BlockView) -> Self { + let header = block_view.header_view(); + let seal_fields = header.seal(); - // 10 header fields, unknown number of seal fields, and 2 block fields. - let mut stream = RlpStream::new_list( - HEADER_FIELDS + - seal_fields.len() + - BLOCK_FIELDS - ); + // 10 header fields, unknown number of seal fields, and 2 block fields. + let mut stream = RlpStream::new_list(HEADER_FIELDS + seal_fields.len() + BLOCK_FIELDS); - // write header values. - stream - .append(&header.author()) - .append(&header.state_root()) - .append(&header.log_bloom()) - .append(&header.difficulty()) - .append(&header.gas_limit()) - .append(&header.gas_used()) - .append(&header.timestamp()) - .append(&header.extra_data()); + // write header values. + stream + .append(&header.author()) + .append(&header.state_root()) + .append(&header.log_bloom()) + .append(&header.difficulty()) + .append(&header.gas_limit()) + .append(&header.gas_used()) + .append(&header.timestamp()) + .append(&header.extra_data()); - // write block values. - stream - .append_list(&block_view.transactions()) - .append_list(&block_view.uncles()); + // write block values. + stream + .append_list(&block_view.transactions()) + .append_list(&block_view.uncles()); - // write seal fields. - for field in seal_fields { - stream.append_raw(&field, 1); - } + // write seal fields. + for field in seal_fields { + stream.append_raw(&field, 1); + } - AbridgedBlock { - rlp: stream.out(), - } - } + AbridgedBlock { rlp: stream.out() } + } - /// Flesh out an abridged block view with the provided parent hash and block number. - /// - /// Will fail if contains invalid rlp. - pub fn to_block(&self, parent_hash: H256, number: u64, receipts_root: H256) -> Result { - let rlp = Rlp::new(&self.rlp); + /// Flesh out an abridged block view with the provided parent hash and block number. + /// + /// Will fail if contains invalid rlp. + pub fn to_block( + &self, + parent_hash: H256, + number: u64, + receipts_root: H256, + ) -> Result { + let rlp = Rlp::new(&self.rlp); - let mut header: Header = Default::default(); - header.set_parent_hash(parent_hash); - header.set_author(rlp.val_at(0)?); - header.set_state_root(rlp.val_at(1)?); - header.set_log_bloom(rlp.val_at(2)?); - header.set_difficulty(rlp.val_at(3)?); - header.set_number(number); - header.set_gas_limit(rlp.val_at(4)?); - header.set_gas_used(rlp.val_at(5)?); - header.set_timestamp(rlp.val_at(6)?); - header.set_extra_data(rlp.val_at(7)?); + let mut header: Header = Default::default(); + header.set_parent_hash(parent_hash); + header.set_author(rlp.val_at(0)?); + header.set_state_root(rlp.val_at(1)?); + header.set_log_bloom(rlp.val_at(2)?); + header.set_difficulty(rlp.val_at(3)?); + header.set_number(number); + header.set_gas_limit(rlp.val_at(4)?); + header.set_gas_used(rlp.val_at(5)?); + header.set_timestamp(rlp.val_at(6)?); + header.set_extra_data(rlp.val_at(7)?); - let transactions = rlp.list_at(8)?; - let uncles: Vec
= rlp.list_at(9)?; + let transactions = rlp.list_at(8)?; + let uncles: Vec
= rlp.list_at(9)?; - header.set_transactions_root(ordered_trie_root( - rlp.at(8)?.iter().map(|r| r.as_raw()) - )); - header.set_receipts_root(receipts_root); + header.set_transactions_root(ordered_trie_root(rlp.at(8)?.iter().map(|r| r.as_raw()))); + header.set_receipts_root(receipts_root); - let mut uncles_rlp = RlpStream::new(); - uncles_rlp.append_list(&uncles); - header.set_uncles_hash(keccak(uncles_rlp.as_raw())); + let mut uncles_rlp = RlpStream::new(); + uncles_rlp.append_list(&uncles); + header.set_uncles_hash(keccak(uncles_rlp.as_raw())); - let mut seal_fields = Vec::new(); - for i in (HEADER_FIELDS + BLOCK_FIELDS)..rlp.item_count()? { - let seal_rlp = rlp.at(i)?; - seal_fields.push(seal_rlp.as_raw().to_owned()); - } + let mut seal_fields = Vec::new(); + for i in (HEADER_FIELDS + BLOCK_FIELDS)..rlp.item_count()? { + let seal_rlp = rlp.at(i)?; + seal_fields.push(seal_rlp.as_raw().to_owned()); + } - header.set_seal(seal_fields); + header.set_seal(seal_fields); - Ok(Block { - header: header, - transactions: transactions, - uncles: uncles, - }) - } + Ok(Block { + header: header, + transactions: transactions, + uncles: uncles, + }) + } } #[cfg(test)] mod tests { - use super::AbridgedBlock; + use super::AbridgedBlock; - use bytes::Bytes; - use ethereum_types::{H256, U256, Address}; - use types::transaction::{Action, Transaction}; - use types::block::Block; - use types::view; - use types::views::BlockView; + use bytes::Bytes; + use ethereum_types::{Address, H256, U256}; + use types::{ + block::Block, + transaction::{Action, Transaction}, + view, + views::BlockView, + }; - fn encode_block(b: &Block) -> Bytes { - b.rlp_bytes() - } + fn encode_block(b: &Block) -> Bytes { + b.rlp_bytes() + } - #[test] - fn empty_block_abridging() { - let b = Block::default(); - let receipts_root = b.header.receipts_root().clone(); - let encoded = encode_block(&b); + #[test] + fn empty_block_abridging() { + let b = Block::default(); + let receipts_root = b.header.receipts_root().clone(); + let encoded = encode_block(&b); - let abridged = AbridgedBlock::from_block_view(&view!(BlockView, &encoded)); - assert_eq!(abridged.to_block(H256::new(), 0, receipts_root).unwrap(), b); - } + let abridged = AbridgedBlock::from_block_view(&view!(BlockView, &encoded)); + assert_eq!(abridged.to_block(H256::new(), 0, receipts_root).unwrap(), b); + } - #[test] - #[should_panic] - fn wrong_number() { - let b = Block::default(); - let receipts_root = b.header.receipts_root().clone(); - let encoded = encode_block(&b); + #[test] + #[should_panic] + fn wrong_number() { + let b = Block::default(); + let receipts_root = b.header.receipts_root().clone(); + let encoded = encode_block(&b); - let abridged = AbridgedBlock::from_block_view(&view!(BlockView, &encoded)); - assert_eq!(abridged.to_block(H256::new(), 2, receipts_root).unwrap(), b); - } + let abridged = AbridgedBlock::from_block_view(&view!(BlockView, &encoded)); + assert_eq!(abridged.to_block(H256::new(), 2, receipts_root).unwrap(), b); + } - #[test] - fn with_transactions() { - let mut b = Block::default(); + #[test] + fn with_transactions() { + let mut b = Block::default(); - let t1 = Transaction { - action: Action::Create, - nonce: U256::from(42), - gas_price: U256::from(3000), - gas: U256::from(50_000), - value: U256::from(1), - data: b"Hello!".to_vec() - }.fake_sign(Address::from(0x69)); + let t1 = Transaction { + action: Action::Create, + nonce: U256::from(42), + gas_price: U256::from(3000), + gas: U256::from(50_000), + value: U256::from(1), + data: b"Hello!".to_vec(), + } + .fake_sign(Address::from(0x69)); - let t2 = Transaction { - action: Action::Create, - nonce: U256::from(88), - gas_price: U256::from(12345), - gas: U256::from(300000), - value: U256::from(1000000000), - data: "Eep!".into(), - }.fake_sign(Address::from(0x55)); + let t2 = Transaction { + action: Action::Create, + nonce: U256::from(88), + gas_price: U256::from(12345), + gas: U256::from(300000), + value: U256::from(1000000000), + data: "Eep!".into(), + } + .fake_sign(Address::from(0x55)); - b.transactions.push(t1.into()); - b.transactions.push(t2.into()); + b.transactions.push(t1.into()); + b.transactions.push(t2.into()); - let receipts_root = b.header.receipts_root().clone(); - b.header.set_transactions_root(::triehash::ordered_trie_root( - b.transactions.iter().map(::rlp::encode) - )); + let receipts_root = b.header.receipts_root().clone(); + b.header + .set_transactions_root(::triehash::ordered_trie_root( + b.transactions.iter().map(::rlp::encode), + )); - let encoded = encode_block(&b); + let encoded = encode_block(&b); - let abridged = AbridgedBlock::from_block_view(&view!(BlockView, &encoded[..])); - assert_eq!(abridged.to_block(H256::new(), 0, receipts_root).unwrap(), b); - } + let abridged = AbridgedBlock::from_block_view(&view!(BlockView, &encoded[..])); + assert_eq!(abridged.to_block(H256::new(), 0, receipts_root).unwrap(), b); + } } diff --git a/ethcore/src/snapshot/consensus/authority.rs b/ethcore/src/snapshot/consensus/authority.rs index 4423e0740..c38f95b9f 100644 --- a/ethcore/src/snapshot/consensus/authority.rs +++ b/ethcore/src/snapshot/consensus/authority.rs @@ -19,25 +19,24 @@ //! //! The chunks here contain state proofs of transitions, along with validator proofs. -use super::{SnapshotComponents, Rebuilder, ChunkSink}; +use super::{ChunkSink, Rebuilder, SnapshotComponents}; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Arc; +use std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, +}; -use engines::{EthEngine, EpochVerifier, EpochTransition}; +use engines::{EpochTransition, EpochVerifier, EthEngine}; use machine::EthereumMachine; use snapshot::{Error, ManifestData, Progress}; use blockchain::{BlockChain, BlockChainDB, BlockProvider}; use bytes::Bytes; use ethereum_types::{H256, U256}; -use itertools::{Position, Itertools}; +use itertools::{Itertools, Position}; use kvdb::KeyValueDB; -use rlp::{RlpStream, Rlp}; -use types::encoded; -use types::header::Header; -use types::ids::BlockId; -use types::receipt::Receipt; +use rlp::{Rlp, RlpStream}; +use types::{encoded, header::Header, ids::BlockId, receipt::Receipt}; /// Snapshot creation and restoration for PoA chains. /// Chunk format: @@ -53,343 +52,388 @@ use types::receipt::Receipt; pub struct PoaSnapshot; impl SnapshotComponents for PoaSnapshot { - fn chunk_all( - &mut self, - chain: &BlockChain, - block_at: H256, - sink: &mut ChunkSink, - _progress: &Progress, - preferred_size: usize, - ) -> Result<(), Error> { - let number = chain.block_number(&block_at) - .ok_or_else(|| Error::InvalidStartingBlock(BlockId::Hash(block_at)))?; + fn chunk_all( + &mut self, + chain: &BlockChain, + block_at: H256, + sink: &mut ChunkSink, + _progress: &Progress, + preferred_size: usize, + ) -> Result<(), Error> { + let number = chain + .block_number(&block_at) + .ok_or_else(|| Error::InvalidStartingBlock(BlockId::Hash(block_at)))?; - let mut pending_size = 0; - let mut rlps = Vec::new(); + let mut pending_size = 0; + let mut rlps = Vec::new(); - for (_, transition) in chain.epoch_transitions() - .take_while(|&(_, ref t)| t.block_number <= number) - { - // this can happen when our starting block is non-canonical. - if transition.block_number == number && transition.block_hash != block_at { - break - } + for (_, transition) in chain + .epoch_transitions() + .take_while(|&(_, ref t)| t.block_number <= number) + { + // this can happen when our starting block is non-canonical. + if transition.block_number == number && transition.block_hash != block_at { + break; + } - let header = chain.block_header_data(&transition.block_hash) - .ok_or_else(|| Error::BlockNotFound(transition.block_hash))?; + let header = chain + .block_header_data(&transition.block_hash) + .ok_or_else(|| Error::BlockNotFound(transition.block_hash))?; - let entry = { - let mut entry_stream = RlpStream::new_list(2); - entry_stream - .append_raw(&header.into_inner(), 1) - .append(&transition.proof); + let entry = { + let mut entry_stream = RlpStream::new_list(2); + entry_stream + .append_raw(&header.into_inner(), 1) + .append(&transition.proof); - entry_stream.out() - }; + entry_stream.out() + }; - // cut of the chunk if too large. - let new_loaded_size = pending_size + entry.len(); - pending_size = if new_loaded_size > preferred_size && !rlps.is_empty() { - write_chunk(false, &mut rlps, sink)?; - entry.len() - } else { - new_loaded_size - }; + // cut of the chunk if too large. + let new_loaded_size = pending_size + entry.len(); + pending_size = if new_loaded_size > preferred_size && !rlps.is_empty() { + write_chunk(false, &mut rlps, sink)?; + entry.len() + } else { + new_loaded_size + }; - rlps.push(entry); - } + rlps.push(entry); + } - let (block, receipts) = chain.block(&block_at) - .and_then(|b| chain.block_receipts(&block_at).map(|r| (b, r))) - .ok_or_else(|| Error::BlockNotFound(block_at))?; - let block = block.decode()?; + let (block, receipts) = chain + .block(&block_at) + .and_then(|b| chain.block_receipts(&block_at).map(|r| (b, r))) + .ok_or_else(|| Error::BlockNotFound(block_at))?; + let block = block.decode()?; - let parent_td = chain.block_details(block.header.parent_hash()) - .map(|d| d.total_difficulty) - .ok_or_else(|| Error::BlockNotFound(block_at))?; + let parent_td = chain + .block_details(block.header.parent_hash()) + .map(|d| d.total_difficulty) + .ok_or_else(|| Error::BlockNotFound(block_at))?; - rlps.push({ - let mut stream = RlpStream::new_list(5); - stream - .append(&block.header) - .append_list(&block.transactions) - .append_list(&block.uncles) - .append(&receipts) - .append(&parent_td); - stream.out() - }); + rlps.push({ + let mut stream = RlpStream::new_list(5); + stream + .append(&block.header) + .append_list(&block.transactions) + .append_list(&block.uncles) + .append(&receipts) + .append(&parent_td); + stream.out() + }); - write_chunk(true, &mut rlps, sink)?; + write_chunk(true, &mut rlps, sink)?; - Ok(()) - } + Ok(()) + } - fn rebuilder( - &self, - chain: BlockChain, - db: Arc, - manifest: &ManifestData, - ) -> Result, ::error::Error> { - Ok(Box::new(ChunkRebuilder { - manifest: manifest.clone(), - warp_target: None, - chain: chain, - db: db.key_value().clone(), - had_genesis: false, - unverified_firsts: Vec::new(), - last_epochs: Vec::new(), - })) - } + fn rebuilder( + &self, + chain: BlockChain, + db: Arc, + manifest: &ManifestData, + ) -> Result, ::error::Error> { + Ok(Box::new(ChunkRebuilder { + manifest: manifest.clone(), + warp_target: None, + chain: chain, + db: db.key_value().clone(), + had_genesis: false, + unverified_firsts: Vec::new(), + last_epochs: Vec::new(), + })) + } - fn min_supported_version(&self) -> u64 { 3 } - fn current_version(&self) -> u64 { 3 } + fn min_supported_version(&self) -> u64 { + 3 + } + fn current_version(&self) -> u64 { + 3 + } } // writes a chunk composed of the inner RLPs here. // flag indicates whether the chunk is the last chunk. fn write_chunk(last: bool, chunk_data: &mut Vec, sink: &mut ChunkSink) -> Result<(), Error> { - let mut stream = RlpStream::new_list(1 + chunk_data.len()); + let mut stream = RlpStream::new_list(1 + chunk_data.len()); - stream.append(&last); - for item in chunk_data.drain(..) { - stream.append_raw(&item, 1); - } + stream.append(&last); + for item in chunk_data.drain(..) { + stream.append_raw(&item, 1); + } - (sink)(stream.out().as_slice()).map_err(Into::into) + (sink)(stream.out().as_slice()).map_err(Into::into) } // rebuilder checks state proofs for all transitions, and checks that each // transition header is verifiable from the epoch data of the one prior. struct ChunkRebuilder { - manifest: ManifestData, - warp_target: Option
, - chain: BlockChain, - db: Arc, - had_genesis: bool, + manifest: ManifestData, + warp_target: Option
, + chain: BlockChain, + db: Arc, + had_genesis: bool, - // sorted vectors of unverified first blocks in a chunk - // and epoch data from last blocks in chunks. - // verification for these will be done at the end. - unverified_firsts: Vec<(Header, Bytes, H256)>, - last_epochs: Vec<(Header, Box>)>, + // sorted vectors of unverified first blocks in a chunk + // and epoch data from last blocks in chunks. + // verification for these will be done at the end. + unverified_firsts: Vec<(Header, Bytes, H256)>, + last_epochs: Vec<(Header, Box>)>, } // verified data. struct Verified { - epoch_transition: EpochTransition, - header: Header, + epoch_transition: EpochTransition, + header: Header, } impl ChunkRebuilder { - fn verify_transition( - &mut self, - last_verifier: &mut Option>>, - transition_rlp: Rlp, - engine: &EthEngine, - ) -> Result { - use engines::ConstructedVerifier; + fn verify_transition( + &mut self, + last_verifier: &mut Option>>, + transition_rlp: Rlp, + engine: &EthEngine, + ) -> Result { + use engines::ConstructedVerifier; - // decode. - let header: Header = transition_rlp.val_at(0)?; - let epoch_data: Bytes = transition_rlp.val_at(1)?; + // decode. + let header: Header = transition_rlp.val_at(0)?; + let epoch_data: Bytes = transition_rlp.val_at(1)?; - trace!(target: "snapshot", "verifying transition to epoch at block {}", header.number()); + trace!(target: "snapshot", "verifying transition to epoch at block {}", header.number()); - // check current transition against validators of last epoch. - let new_verifier = match engine.epoch_verifier(&header, &epoch_data) { - ConstructedVerifier::Trusted(v) => v, - ConstructedVerifier::Unconfirmed(v, finality_proof, hash) => { - match *last_verifier { - Some(ref last) => - if last.check_finality_proof(finality_proof).map_or(true, |hashes| !hashes.contains(&hash)) - { - return Err(Error::BadEpochProof(header.number()).into()); - }, - None if header.number() != 0 => { - // genesis never requires additional validation. + // check current transition against validators of last epoch. + let new_verifier = match engine.epoch_verifier(&header, &epoch_data) { + ConstructedVerifier::Trusted(v) => v, + ConstructedVerifier::Unconfirmed(v, finality_proof, hash) => { + match *last_verifier { + Some(ref last) => { + if last + .check_finality_proof(finality_proof) + .map_or(true, |hashes| !hashes.contains(&hash)) + { + return Err(Error::BadEpochProof(header.number()).into()); + } + } + None if header.number() != 0 => { + // genesis never requires additional validation. - let idx = self.unverified_firsts - .binary_search_by_key(&header.number(), |&(ref h, _, _)| h.number()) - .unwrap_or_else(|x| x); + let idx = self + .unverified_firsts + .binary_search_by_key(&header.number(), |&(ref h, _, _)| h.number()) + .unwrap_or_else(|x| x); - let entry = (header.clone(), finality_proof.to_owned(), hash); - self.unverified_firsts.insert(idx, entry); - } - None => {} - } + let entry = (header.clone(), finality_proof.to_owned(), hash); + self.unverified_firsts.insert(idx, entry); + } + None => {} + } - v - } - ConstructedVerifier::Err(e) => return Err(e), - }; + v + } + ConstructedVerifier::Err(e) => return Err(e), + }; - // create new epoch verifier. - *last_verifier = Some(new_verifier); + // create new epoch verifier. + *last_verifier = Some(new_verifier); - Ok(Verified { - epoch_transition: EpochTransition { - block_hash: header.hash(), - block_number: header.number(), - proof: epoch_data, - }, - header: header, - }) - } + Ok(Verified { + epoch_transition: EpochTransition { + block_hash: header.hash(), + block_number: header.number(), + proof: epoch_data, + }, + header: header, + }) + } } impl Rebuilder for ChunkRebuilder { - fn feed( - &mut self, - chunk: &[u8], - engine: &EthEngine, - abort_flag: &AtomicBool, - ) -> Result<(), ::error::Error> { - let rlp = Rlp::new(chunk); - let is_last_chunk: bool = rlp.val_at(0)?; - let num_items = rlp.item_count()?; + fn feed( + &mut self, + chunk: &[u8], + engine: &EthEngine, + abort_flag: &AtomicBool, + ) -> Result<(), ::error::Error> { + let rlp = Rlp::new(chunk); + let is_last_chunk: bool = rlp.val_at(0)?; + let num_items = rlp.item_count()?; - // number of transitions in the chunk. - let num_transitions = if is_last_chunk { - num_items - 2 - } else { - num_items - 1 - }; + // number of transitions in the chunk. + let num_transitions = if is_last_chunk { + num_items - 2 + } else { + num_items - 1 + }; - if num_transitions == 0 && !is_last_chunk { - return Err(Error::WrongChunkFormat("Found non-last chunk without any data.".into()).into()); - } + if num_transitions == 0 && !is_last_chunk { + return Err( + Error::WrongChunkFormat("Found non-last chunk without any data.".into()).into(), + ); + } - let mut last_verifier = None; - let mut last_number = None; - for transition_rlp in rlp.iter().skip(1).take(num_transitions).with_position() { - if !abort_flag.load(Ordering::SeqCst) { return Err(Error::RestorationAborted.into()) } + let mut last_verifier = None; + let mut last_number = None; + for transition_rlp in rlp.iter().skip(1).take(num_transitions).with_position() { + if !abort_flag.load(Ordering::SeqCst) { + return Err(Error::RestorationAborted.into()); + } - let (is_first, is_last) = match transition_rlp { - Position::First(_) => (true, false), - Position::Middle(_) => (false, false), - Position::Last(_) => (false, true), - Position::Only(_) => (true, true), - }; + let (is_first, is_last) = match transition_rlp { + Position::First(_) => (true, false), + Position::Middle(_) => (false, false), + Position::Last(_) => (false, true), + Position::Only(_) => (true, true), + }; - let transition_rlp = transition_rlp.into_inner(); - let verified = self.verify_transition( - &mut last_verifier, - transition_rlp, - engine, - )?; + let transition_rlp = transition_rlp.into_inner(); + let verified = self.verify_transition(&mut last_verifier, transition_rlp, engine)?; - if last_number.map_or(false, |num| verified.header.number() <= num) { - return Err(Error::WrongChunkFormat("Later epoch transition in earlier or same block.".into()).into()); - } + if last_number.map_or(false, |num| verified.header.number() <= num) { + return Err(Error::WrongChunkFormat( + "Later epoch transition in earlier or same block.".into(), + ) + .into()); + } - last_number = Some(verified.header.number()); + last_number = Some(verified.header.number()); - // book-keep borders for verification later. - if is_first { - // make sure the genesis transition was included, - // but it doesn't need verification later. - if verified.header.number() == 0 { - if verified.header.hash() != self.chain.genesis_hash() { - return Err(Error::WrongBlockHash(0, verified.header.hash(), self.chain.genesis_hash()).into()); - } + // book-keep borders for verification later. + if is_first { + // make sure the genesis transition was included, + // but it doesn't need verification later. + if verified.header.number() == 0 { + if verified.header.hash() != self.chain.genesis_hash() { + return Err(Error::WrongBlockHash( + 0, + verified.header.hash(), + self.chain.genesis_hash(), + ) + .into()); + } - self.had_genesis = true; - } - } - if is_last { - let idx = self.last_epochs - .binary_search_by_key(&verified.header.number(), |&(ref h, _)| h.number()) - .unwrap_or_else(|x| x); + self.had_genesis = true; + } + } + if is_last { + let idx = self + .last_epochs + .binary_search_by_key(&verified.header.number(), |&(ref h, _)| h.number()) + .unwrap_or_else(|x| x); - let entry = ( - verified.header.clone(), - last_verifier.take().expect("last_verifier always set after verify_transition; qed"), - ); - self.last_epochs.insert(idx, entry); - } + let entry = ( + verified.header.clone(), + last_verifier + .take() + .expect("last_verifier always set after verify_transition; qed"), + ); + self.last_epochs.insert(idx, entry); + } - // write epoch transition into database. - let mut batch = self.db.transaction(); - self.chain.insert_epoch_transition(&mut batch, verified.header.number(), - verified.epoch_transition); - self.db.write_buffered(batch); + // write epoch transition into database. + let mut batch = self.db.transaction(); + self.chain.insert_epoch_transition( + &mut batch, + verified.header.number(), + verified.epoch_transition, + ); + self.db.write_buffered(batch); - trace!(target: "snapshot", "Verified epoch transition for epoch at block {}", verified.header.number()); - } + trace!(target: "snapshot", "Verified epoch transition for epoch at block {}", verified.header.number()); + } - if is_last_chunk { - use types::block::Block; + if is_last_chunk { + use types::block::Block; - let last_rlp = rlp.at(num_items - 1)?; - let block = Block { - header: last_rlp.val_at(0)?, - transactions: last_rlp.list_at(1)?, - uncles: last_rlp.list_at(2)?, - }; - let block_data = block.rlp_bytes(); - let receipts: Vec = last_rlp.list_at(3)?; + let last_rlp = rlp.at(num_items - 1)?; + let block = Block { + header: last_rlp.val_at(0)?, + transactions: last_rlp.list_at(1)?, + uncles: last_rlp.list_at(2)?, + }; + let block_data = block.rlp_bytes(); + let receipts: Vec = last_rlp.list_at(3)?; - { - let hash = block.header.hash(); - let best_hash = self.manifest.block_hash; - if hash != best_hash { - return Err(Error::WrongBlockHash(block.header.number(), best_hash, hash).into()) - } - } + { + let hash = block.header.hash(); + let best_hash = self.manifest.block_hash; + if hash != best_hash { + return Err( + Error::WrongBlockHash(block.header.number(), best_hash, hash).into(), + ); + } + } - let parent_td: U256 = last_rlp.val_at(4)?; + let parent_td: U256 = last_rlp.val_at(4)?; - let mut batch = self.db.transaction(); - self.chain.insert_unordered_block(&mut batch, encoded::Block::new(block_data), receipts, Some(parent_td), true, false); - self.db.write_buffered(batch); + let mut batch = self.db.transaction(); + self.chain.insert_unordered_block( + &mut batch, + encoded::Block::new(block_data), + receipts, + Some(parent_td), + true, + false, + ); + self.db.write_buffered(batch); - self.warp_target = Some(block.header); - } + self.warp_target = Some(block.header); + } - Ok(()) - } + Ok(()) + } - fn finalize(&mut self, _engine: &EthEngine) -> Result<(), ::error::Error> { - if !self.had_genesis { - return Err(Error::WrongChunkFormat("No genesis transition included.".into()).into()); - } + fn finalize(&mut self, _engine: &EthEngine) -> Result<(), ::error::Error> { + if !self.had_genesis { + return Err(Error::WrongChunkFormat("No genesis transition included.".into()).into()); + } - let target_header = match self.warp_target.take() { - Some(x) => x, - None => return Err(Error::WrongChunkFormat("Warp target block not included.".into()).into()), - }; + let target_header = match self.warp_target.take() { + Some(x) => x, + None => { + return Err( + Error::WrongChunkFormat("Warp target block not included.".into()).into(), + ) + } + }; - // verify the first entries of chunks we couldn't before. - // we store all last verifiers, but not all firsts. - // match each unverified first epoch with a last epoch verifier. - let mut lasts_reversed = self.last_epochs.iter().rev(); - for &(ref header, ref finality_proof, hash) in self.unverified_firsts.iter().rev() { - let mut found = false; - while let Some(&(ref last_header, ref last_verifier)) = lasts_reversed.next() { - if last_header.number() < header.number() { - if last_verifier.check_finality_proof(&finality_proof).map_or(true, |hashes| !hashes.contains(&hash)) { - return Err(Error::BadEpochProof(header.number()).into()); - } - found = true; - break; - } - } + // verify the first entries of chunks we couldn't before. + // we store all last verifiers, but not all firsts. + // match each unverified first epoch with a last epoch verifier. + let mut lasts_reversed = self.last_epochs.iter().rev(); + for &(ref header, ref finality_proof, hash) in self.unverified_firsts.iter().rev() { + let mut found = false; + while let Some(&(ref last_header, ref last_verifier)) = lasts_reversed.next() { + if last_header.number() < header.number() { + if last_verifier + .check_finality_proof(&finality_proof) + .map_or(true, |hashes| !hashes.contains(&hash)) + { + return Err(Error::BadEpochProof(header.number()).into()); + } + found = true; + break; + } + } - if !found { - return Err(Error::WrongChunkFormat("Inconsistent chunk ordering.".into()).into()); - } - } + if !found { + return Err(Error::WrongChunkFormat("Inconsistent chunk ordering.".into()).into()); + } + } - // verify that the warp target verifies correctly the - // most recent epoch. if the warp target was a transition itself, - // it's already verified and doesn't need any more verification. - let &(ref header, ref last_epoch) = self.last_epochs.last() - .expect("last_epochs known to have at least one element by the check above; qed"); + // verify that the warp target verifies correctly the + // most recent epoch. if the warp target was a transition itself, + // it's already verified and doesn't need any more verification. + let &(ref header, ref last_epoch) = self + .last_epochs + .last() + .expect("last_epochs known to have at least one element by the check above; qed"); - if header != &target_header { - last_epoch.verify_heavy(&target_header)?; - } + if header != &target_header { + last_epoch.verify_heavy(&target_header)?; + } - Ok(()) - } + Ok(()) + } } diff --git a/ethcore/src/snapshot/consensus/mod.rs b/ethcore/src/snapshot/consensus/mod.rs index 907e9c520..8ec76ad63 100644 --- a/ethcore/src/snapshot/consensus/mod.rs +++ b/ethcore/src/snapshot/consensus/mod.rs @@ -17,8 +17,7 @@ //! Secondary chunk creation and restoration, implementations for different consensus //! engines. -use std::sync::atomic::AtomicBool; -use std::sync::Arc; +use std::sync::{atomic::AtomicBool, Arc}; use blockchain::{BlockChain, BlockChainDB}; use engines::EthEngine; @@ -29,68 +28,67 @@ use ethereum_types::H256; mod authority; mod work; -pub use self::authority::*; -pub use self::work::*; +pub use self::{authority::*, work::*}; /// A sink for produced chunks. pub type ChunkSink<'a> = FnMut(&[u8]) -> ::std::io::Result<()> + 'a; /// Components necessary for snapshot creation and restoration. pub trait SnapshotComponents: Send { - /// Create secondary snapshot chunks; these corroborate the state data - /// in the state chunks. - /// - /// Chunks shouldn't exceed the given preferred size, and should be fed - /// uncompressed into the sink. - /// - /// This will vary by consensus engine, so it's exposed as a trait. - fn chunk_all( - &mut self, - chain: &BlockChain, - block_at: H256, - chunk_sink: &mut ChunkSink, - progress: &Progress, - preferred_size: usize, - ) -> Result<(), Error>; + /// Create secondary snapshot chunks; these corroborate the state data + /// in the state chunks. + /// + /// Chunks shouldn't exceed the given preferred size, and should be fed + /// uncompressed into the sink. + /// + /// This will vary by consensus engine, so it's exposed as a trait. + fn chunk_all( + &mut self, + chain: &BlockChain, + block_at: H256, + chunk_sink: &mut ChunkSink, + progress: &Progress, + preferred_size: usize, + ) -> Result<(), Error>; - /// Create a rebuilder, which will have chunks fed into it in aribtrary - /// order and then be finalized. - /// - /// The manifest, a database, and fresh `BlockChain` are supplied. - /// - /// The engine passed to the `Rebuilder` methods will be the same instance - /// that created the `SnapshotComponents`. - fn rebuilder( - &self, - chain: BlockChain, - db: Arc, - manifest: &ManifestData, - ) -> Result, ::error::Error>; + /// Create a rebuilder, which will have chunks fed into it in aribtrary + /// order and then be finalized. + /// + /// The manifest, a database, and fresh `BlockChain` are supplied. + /// + /// The engine passed to the `Rebuilder` methods will be the same instance + /// that created the `SnapshotComponents`. + fn rebuilder( + &self, + chain: BlockChain, + db: Arc, + manifest: &ManifestData, + ) -> Result, ::error::Error>; - /// Minimum supported snapshot version number. - fn min_supported_version(&self) -> u64; + /// Minimum supported snapshot version number. + fn min_supported_version(&self) -> u64; - /// Current version number - fn current_version(&self) -> u64; + /// Current version number + fn current_version(&self) -> u64; } /// Restore from secondary snapshot chunks. pub trait Rebuilder: Send { - /// Feed a chunk, potentially out of order. - /// - /// Check `abort_flag` periodically while doing heavy work. If set to `false`, should bail with - /// `Error::RestorationAborted`. - fn feed( - &mut self, - chunk: &[u8], - engine: &EthEngine, - abort_flag: &AtomicBool, - ) -> Result<(), ::error::Error>; + /// Feed a chunk, potentially out of order. + /// + /// Check `abort_flag` periodically while doing heavy work. If set to `false`, should bail with + /// `Error::RestorationAborted`. + fn feed( + &mut self, + chunk: &[u8], + engine: &EthEngine, + abort_flag: &AtomicBool, + ) -> Result<(), ::error::Error>; - /// Finalize the restoration. Will be done after all chunks have been - /// fed successfully. - /// - /// This should apply the necessary "glue" between chunks, - /// and verify against the restored state. - fn finalize(&mut self, engine: &EthEngine) -> Result<(), ::error::Error>; + /// Finalize the restoration. Will be done after all chunks have been + /// fed successfully. + /// + /// This should apply the necessary "glue" between chunks, + /// and verify against the restored state. + fn finalize(&mut self, engine: &EthEngine) -> Result<(), ::error::Error>; } diff --git a/ethcore/src/snapshot/consensus/work.rs b/ethcore/src/snapshot/consensus/work.rs index 106fe4474..5a81eeabc 100644 --- a/ethcore/src/snapshot/consensus/work.rs +++ b/ethcore/src/snapshot/consensus/work.rs @@ -20,21 +20,24 @@ //! The secondary chunks in this instance are 30,000 "abridged blocks" from the head //! of the chain, which serve as an indication of valid chain. -use super::{SnapshotComponents, Rebuilder, ChunkSink}; +use super::{ChunkSink, Rebuilder, SnapshotComponents}; -use std::collections::VecDeque; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Arc; +use std::{ + collections::VecDeque, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; use blockchain::{BlockChain, BlockChainDB, BlockProvider}; +use bytes::Bytes; use engines::EthEngine; -use snapshot::{Error, ManifestData, Progress}; -use snapshot::block::AbridgedBlock; use ethereum_types::H256; use kvdb::KeyValueDB; -use bytes::Bytes; -use rlp::{RlpStream, Rlp}; use rand::OsRng; +use rlp::{Rlp, RlpStream}; +use snapshot::{block::AbridgedBlock, Error, ManifestData, Progress}; use types::encoded; /// Snapshot creation and restoration for PoW chains. @@ -42,146 +45,170 @@ use types::encoded; /// loose assurance that the chain is valid. #[derive(Clone, Copy, PartialEq)] pub struct PowSnapshot { - /// Number of blocks from the head of the chain - /// to include in the snapshot. - pub blocks: u64, - /// Number of to allow in the snapshot when restoring. - pub max_restore_blocks: u64, + /// Number of blocks from the head of the chain + /// to include in the snapshot. + pub blocks: u64, + /// Number of to allow in the snapshot when restoring. + pub max_restore_blocks: u64, } impl PowSnapshot { - /// Create a new instance. - pub fn new(blocks: u64, max_restore_blocks: u64) -> PowSnapshot { - PowSnapshot { - blocks: blocks, - max_restore_blocks: max_restore_blocks, - } - } + /// Create a new instance. + pub fn new(blocks: u64, max_restore_blocks: u64) -> PowSnapshot { + PowSnapshot { + blocks: blocks, + max_restore_blocks: max_restore_blocks, + } + } } impl SnapshotComponents for PowSnapshot { - fn chunk_all( - &mut self, - chain: &BlockChain, - block_at: H256, - chunk_sink: &mut ChunkSink, - progress: &Progress, - preferred_size: usize, - ) -> Result<(), Error> { - PowWorker { - chain: chain, - rlps: VecDeque::new(), - current_hash: block_at, - writer: chunk_sink, - progress: progress, - preferred_size: preferred_size, - }.chunk_all(self.blocks) - } + fn chunk_all( + &mut self, + chain: &BlockChain, + block_at: H256, + chunk_sink: &mut ChunkSink, + progress: &Progress, + preferred_size: usize, + ) -> Result<(), Error> { + PowWorker { + chain: chain, + rlps: VecDeque::new(), + current_hash: block_at, + writer: chunk_sink, + progress: progress, + preferred_size: preferred_size, + } + .chunk_all(self.blocks) + } - fn rebuilder( - &self, - chain: BlockChain, - db: Arc, - manifest: &ManifestData, - ) -> Result, ::error::Error> { - PowRebuilder::new(chain, db.key_value().clone(), manifest, self.max_restore_blocks).map(|r| Box::new(r) as Box<_>) - } + fn rebuilder( + &self, + chain: BlockChain, + db: Arc, + manifest: &ManifestData, + ) -> Result, ::error::Error> { + PowRebuilder::new( + chain, + db.key_value().clone(), + manifest, + self.max_restore_blocks, + ) + .map(|r| Box::new(r) as Box<_>) + } - fn min_supported_version(&self) -> u64 { ::snapshot::MIN_SUPPORTED_STATE_CHUNK_VERSION } - fn current_version(&self) -> u64 { ::snapshot::STATE_CHUNK_VERSION } + fn min_supported_version(&self) -> u64 { + ::snapshot::MIN_SUPPORTED_STATE_CHUNK_VERSION + } + fn current_version(&self) -> u64 { + ::snapshot::STATE_CHUNK_VERSION + } } /// Used to build block chunks. struct PowWorker<'a> { - chain: &'a BlockChain, - // block, receipt rlp pairs. - rlps: VecDeque, - current_hash: H256, - writer: &'a mut ChunkSink<'a>, - progress: &'a Progress, - preferred_size: usize, + chain: &'a BlockChain, + // block, receipt rlp pairs. + rlps: VecDeque, + current_hash: H256, + writer: &'a mut ChunkSink<'a>, + progress: &'a Progress, + preferred_size: usize, } impl<'a> PowWorker<'a> { - // Repeatedly fill the buffers and writes out chunks, moving backwards from starting block hash. - // Loops until we reach the first desired block, and writes out the remainder. - fn chunk_all(&mut self, snapshot_blocks: u64) -> Result<(), Error> { - let mut loaded_size = 0; - let mut last = self.current_hash; + // Repeatedly fill the buffers and writes out chunks, moving backwards from starting block hash. + // Loops until we reach the first desired block, and writes out the remainder. + fn chunk_all(&mut self, snapshot_blocks: u64) -> Result<(), Error> { + let mut loaded_size = 0; + let mut last = self.current_hash; - let genesis_hash = self.chain.genesis_hash(); + let genesis_hash = self.chain.genesis_hash(); - for _ in 0..snapshot_blocks { - if self.current_hash == genesis_hash { break } + for _ in 0..snapshot_blocks { + if self.current_hash == genesis_hash { + break; + } - let (block, receipts) = self.chain.block(&self.current_hash) - .and_then(|b| self.chain.block_receipts(&self.current_hash).map(|r| (b, r))) - .ok_or_else(|| Error::BlockNotFound(self.current_hash))?; + let (block, receipts) = self + .chain + .block(&self.current_hash) + .and_then(|b| { + self.chain + .block_receipts(&self.current_hash) + .map(|r| (b, r)) + }) + .ok_or_else(|| Error::BlockNotFound(self.current_hash))?; - let abridged_rlp = AbridgedBlock::from_block_view(&block.view()).into_inner(); + let abridged_rlp = AbridgedBlock::from_block_view(&block.view()).into_inner(); - let pair = { - let mut pair_stream = RlpStream::new_list(2); - pair_stream.append_raw(&abridged_rlp, 1).append(&receipts); - pair_stream.out() - }; + let pair = { + let mut pair_stream = RlpStream::new_list(2); + pair_stream.append_raw(&abridged_rlp, 1).append(&receipts); + pair_stream.out() + }; - let new_loaded_size = loaded_size + pair.len(); + let new_loaded_size = loaded_size + pair.len(); - // cut off the chunk if too large. + // cut off the chunk if too large. - if new_loaded_size > self.preferred_size && !self.rlps.is_empty() { - self.write_chunk(last)?; - loaded_size = pair.len(); - } else { - loaded_size = new_loaded_size; - } + if new_loaded_size > self.preferred_size && !self.rlps.is_empty() { + self.write_chunk(last)?; + loaded_size = pair.len(); + } else { + loaded_size = new_loaded_size; + } - self.rlps.push_front(pair); + self.rlps.push_front(pair); - last = self.current_hash; - self.current_hash = block.header_view().parent_hash(); - self.progress.blocks.fetch_add(1, Ordering::SeqCst); - } + last = self.current_hash; + self.current_hash = block.header_view().parent_hash(); + self.progress.blocks.fetch_add(1, Ordering::SeqCst); + } - if loaded_size != 0 { - self.write_chunk(last)?; - } + if loaded_size != 0 { + self.write_chunk(last)?; + } - Ok(()) - } + Ok(()) + } - // write out the data in the buffers to a chunk on disk - // - // we preface each chunk with the parent of the first block's details, - // obtained from the details of the last block written. - fn write_chunk(&mut self, last: H256) -> Result<(), Error> { - trace!(target: "snapshot", "prepared block chunk with {} blocks", self.rlps.len()); + // write out the data in the buffers to a chunk on disk + // + // we preface each chunk with the parent of the first block's details, + // obtained from the details of the last block written. + fn write_chunk(&mut self, last: H256) -> Result<(), Error> { + trace!(target: "snapshot", "prepared block chunk with {} blocks", self.rlps.len()); - let (last_header, last_details) = self.chain.block_header_data(&last) - .and_then(|n| self.chain.block_details(&last).map(|d| (n, d))) - .ok_or_else(|| Error::BlockNotFound(last))?; + let (last_header, last_details) = self + .chain + .block_header_data(&last) + .and_then(|n| self.chain.block_details(&last).map(|d| (n, d))) + .ok_or_else(|| Error::BlockNotFound(last))?; - let parent_number = last_header.number() - 1; - let parent_hash = last_header.parent_hash(); - let parent_total_difficulty = last_details.total_difficulty - last_header.difficulty(); + let parent_number = last_header.number() - 1; + let parent_hash = last_header.parent_hash(); + let parent_total_difficulty = last_details.total_difficulty - last_header.difficulty(); - trace!(target: "snapshot", "parent last written block: {}", parent_hash); + trace!(target: "snapshot", "parent last written block: {}", parent_hash); - let num_entries = self.rlps.len(); - let mut rlp_stream = RlpStream::new_list(3 + num_entries); - rlp_stream.append(&parent_number).append(&parent_hash).append(&parent_total_difficulty); + let num_entries = self.rlps.len(); + let mut rlp_stream = RlpStream::new_list(3 + num_entries); + rlp_stream + .append(&parent_number) + .append(&parent_hash) + .append(&parent_total_difficulty); - for pair in self.rlps.drain(..) { - rlp_stream.append_raw(&pair, 1); - } + for pair in self.rlps.drain(..) { + rlp_stream.append_raw(&pair, 1); + } - let raw_data = rlp_stream.out(); + let raw_data = rlp_stream.out(); - (self.writer)(&raw_data)?; + (self.writer)(&raw_data)?; - Ok(()) - } + Ok(()) + } } /// Rebuilder for proof-of-work chains. @@ -193,134 +220,167 @@ impl<'a> PowWorker<'a> { /// /// After all chunks have been submitted, we "glue" the chunks together. pub struct PowRebuilder { - chain: BlockChain, - db: Arc, - rng: OsRng, - disconnected: Vec<(u64, H256)>, - best_number: u64, - best_hash: H256, - best_root: H256, - fed_blocks: u64, - snapshot_blocks: u64, + chain: BlockChain, + db: Arc, + rng: OsRng, + disconnected: Vec<(u64, H256)>, + best_number: u64, + best_hash: H256, + best_root: H256, + fed_blocks: u64, + snapshot_blocks: u64, } impl PowRebuilder { - /// Create a new PowRebuilder. - fn new(chain: BlockChain, db: Arc, manifest: &ManifestData, snapshot_blocks: u64) -> Result { - Ok(PowRebuilder { - chain: chain, - db: db, - rng: OsRng::new()?, - disconnected: Vec::new(), - best_number: manifest.block_number, - best_hash: manifest.block_hash, - best_root: manifest.state_root, - fed_blocks: 0, - snapshot_blocks: snapshot_blocks, - }) - } + /// Create a new PowRebuilder. + fn new( + chain: BlockChain, + db: Arc, + manifest: &ManifestData, + snapshot_blocks: u64, + ) -> Result { + Ok(PowRebuilder { + chain: chain, + db: db, + rng: OsRng::new()?, + disconnected: Vec::new(), + best_number: manifest.block_number, + best_hash: manifest.block_hash, + best_root: manifest.state_root, + fed_blocks: 0, + snapshot_blocks: snapshot_blocks, + }) + } } impl Rebuilder for PowRebuilder { - /// Feed the rebuilder an uncompressed block chunk. - /// Returns the number of blocks fed or any errors. - fn feed(&mut self, chunk: &[u8], engine: &EthEngine, abort_flag: &AtomicBool) -> Result<(), ::error::Error> { - use snapshot::verify_old_block; - use ethereum_types::U256; - use triehash::ordered_trie_root; + /// Feed the rebuilder an uncompressed block chunk. + /// Returns the number of blocks fed or any errors. + fn feed( + &mut self, + chunk: &[u8], + engine: &EthEngine, + abort_flag: &AtomicBool, + ) -> Result<(), ::error::Error> { + use ethereum_types::U256; + use snapshot::verify_old_block; + use triehash::ordered_trie_root; - let rlp = Rlp::new(chunk); - let item_count = rlp.item_count()?; - let num_blocks = (item_count - 3) as u64; + let rlp = Rlp::new(chunk); + let item_count = rlp.item_count()?; + let num_blocks = (item_count - 3) as u64; - trace!(target: "snapshot", "restoring block chunk with {} blocks.", num_blocks); + trace!(target: "snapshot", "restoring block chunk with {} blocks.", num_blocks); - if self.fed_blocks + num_blocks > self.snapshot_blocks { - return Err(Error::TooManyBlocks(self.snapshot_blocks, self.fed_blocks + num_blocks).into()) - } + if self.fed_blocks + num_blocks > self.snapshot_blocks { + return Err( + Error::TooManyBlocks(self.snapshot_blocks, self.fed_blocks + num_blocks).into(), + ); + } - // todo: assert here that these values are consistent with chunks being in order. - let mut cur_number = rlp.val_at::(0)? + 1; - let mut parent_hash = rlp.val_at::(1)?; - let parent_total_difficulty = rlp.val_at::(2)?; + // todo: assert here that these values are consistent with chunks being in order. + let mut cur_number = rlp.val_at::(0)? + 1; + let mut parent_hash = rlp.val_at::(1)?; + let parent_total_difficulty = rlp.val_at::(2)?; - for idx in 3..item_count { - if !abort_flag.load(Ordering::SeqCst) { return Err(Error::RestorationAborted.into()) } + for idx in 3..item_count { + if !abort_flag.load(Ordering::SeqCst) { + return Err(Error::RestorationAborted.into()); + } - let pair = rlp.at(idx)?; - let abridged_rlp = pair.at(0)?.as_raw().to_owned(); - let abridged_block = AbridgedBlock::from_raw(abridged_rlp); - let receipts: Vec<::types::receipt::Receipt> = pair.list_at(1)?; - let receipts_root = ordered_trie_root(pair.at(1)?.iter().map(|r| r.as_raw())); + let pair = rlp.at(idx)?; + let abridged_rlp = pair.at(0)?.as_raw().to_owned(); + let abridged_block = AbridgedBlock::from_raw(abridged_rlp); + let receipts: Vec<::types::receipt::Receipt> = pair.list_at(1)?; + let receipts_root = ordered_trie_root(pair.at(1)?.iter().map(|r| r.as_raw())); - let block = abridged_block.to_block(parent_hash, cur_number, receipts_root)?; - let block_bytes = encoded::Block::new(block.rlp_bytes()); - let is_best = cur_number == self.best_number; + let block = abridged_block.to_block(parent_hash, cur_number, receipts_root)?; + let block_bytes = encoded::Block::new(block.rlp_bytes()); + let is_best = cur_number == self.best_number; - if is_best { - if block.header.hash() != self.best_hash { - return Err(Error::WrongBlockHash(cur_number, self.best_hash, block.header.hash()).into()) - } + if is_best { + if block.header.hash() != self.best_hash { + return Err(Error::WrongBlockHash( + cur_number, + self.best_hash, + block.header.hash(), + ) + .into()); + } - if block.header.state_root() != &self.best_root { - return Err(Error::WrongStateRoot(self.best_root, *block.header.state_root()).into()) - } - } + if block.header.state_root() != &self.best_root { + return Err( + Error::WrongStateRoot(self.best_root, *block.header.state_root()).into(), + ); + } + } - verify_old_block( - &mut self.rng, - &block.header, - engine, - &self.chain, - is_best - )?; + verify_old_block(&mut self.rng, &block.header, engine, &self.chain, is_best)?; - let mut batch = self.db.transaction(); + let mut batch = self.db.transaction(); - // special-case the first block in each chunk. - if idx == 3 { - if self.chain.insert_unordered_block(&mut batch, block_bytes, receipts, Some(parent_total_difficulty), is_best, false) { - self.disconnected.push((cur_number, block.header.hash())); - } - } else { - self.chain.insert_unordered_block(&mut batch, block_bytes, receipts, None, is_best, false); - } - self.db.write_buffered(batch); - self.chain.commit(); + // special-case the first block in each chunk. + if idx == 3 { + if self.chain.insert_unordered_block( + &mut batch, + block_bytes, + receipts, + Some(parent_total_difficulty), + is_best, + false, + ) { + self.disconnected.push((cur_number, block.header.hash())); + } + } else { + self.chain.insert_unordered_block( + &mut batch, + block_bytes, + receipts, + None, + is_best, + false, + ); + } + self.db.write_buffered(batch); + self.chain.commit(); - parent_hash = block.header.hash(); - cur_number += 1; - } + parent_hash = block.header.hash(); + cur_number += 1; + } - self.fed_blocks += num_blocks; + self.fed_blocks += num_blocks; - Ok(()) - } + Ok(()) + } - /// Glue together any disconnected chunks and check that the chain is complete. - fn finalize(&mut self, _: &EthEngine) -> Result<(), ::error::Error> { - let mut batch = self.db.transaction(); + /// Glue together any disconnected chunks and check that the chain is complete. + fn finalize(&mut self, _: &EthEngine) -> Result<(), ::error::Error> { + let mut batch = self.db.transaction(); - for (first_num, first_hash) in self.disconnected.drain(..) { - let parent_num = first_num - 1; + for (first_num, first_hash) in self.disconnected.drain(..) { + let parent_num = first_num - 1; - // check if the parent is even in the chain. - // since we don't restore every single block in the chain, - // the first block of the first chunks has nothing to connect to. - if let Some(parent_hash) = self.chain.block_hash(parent_num) { - // if so, add the child to it. - self.chain.add_child(&mut batch, parent_hash, first_hash); - } - } + // check if the parent is even in the chain. + // since we don't restore every single block in the chain, + // the first block of the first chunks has nothing to connect to. + if let Some(parent_hash) = self.chain.block_hash(parent_num) { + // if so, add the child to it. + self.chain.add_child(&mut batch, parent_hash, first_hash); + } + } - let genesis_hash = self.chain.genesis_hash(); - self.chain.insert_epoch_transition(&mut batch, 0, ::engines::EpochTransition { - block_number: 0, - block_hash: genesis_hash, - proof: vec![], - }); + let genesis_hash = self.chain.genesis_hash(); + self.chain.insert_epoch_transition( + &mut batch, + 0, + ::engines::EpochTransition { + block_number: 0, + block_hash: genesis_hash, + proof: vec![], + }, + ); - self.db.write_buffered(batch); - Ok(()) - } + self.db.write_buffered(batch); + Ok(()) + } } diff --git a/ethcore/src/snapshot/error.rs b/ethcore/src/snapshot/error.rs index 6faa19da2..0fd9dd41b 100644 --- a/ethcore/src/snapshot/error.rs +++ b/ethcore/src/snapshot/error.rs @@ -27,100 +27,125 @@ use rlp::DecoderError; /// Snapshot-related errors. #[derive(Debug)] pub enum Error { - /// Invalid starting block for snapshot. - InvalidStartingBlock(BlockId), - /// Block not found. - BlockNotFound(H256), - /// Incomplete chain. - IncompleteChain, - /// Best block has wrong state root. - WrongStateRoot(H256, H256), - /// Wrong block hash. - WrongBlockHash(u64, H256, H256), - /// Too many blocks contained within the snapshot. - TooManyBlocks(u64, u64), - /// Old starting block in a pruned database. - OldBlockPrunedDB, - /// Missing code. - MissingCode(Vec), - /// Unrecognized code encoding. - UnrecognizedCodeState(u8), - /// Restoration aborted. - RestorationAborted, - /// Trie error. - Trie(TrieError), - /// Decoder error. - Decoder(DecoderError), - /// Io error. - Io(::std::io::Error), - /// Snapshot version is not supported. - VersionNotSupported(u64), - /// Max chunk size is to small to fit basic account data. - ChunkTooSmall, - /// Oversized chunk - ChunkTooLarge, - /// Snapshots not supported by the consensus engine. - SnapshotsUnsupported, - /// Aborted snapshot - SnapshotAborted, - /// Bad epoch transition. - BadEpochProof(u64), - /// Wrong chunk format. - WrongChunkFormat(String), - /// Unlinked ancient block chain - UnlinkedAncientBlockChain, + /// Invalid starting block for snapshot. + InvalidStartingBlock(BlockId), + /// Block not found. + BlockNotFound(H256), + /// Incomplete chain. + IncompleteChain, + /// Best block has wrong state root. + WrongStateRoot(H256, H256), + /// Wrong block hash. + WrongBlockHash(u64, H256, H256), + /// Too many blocks contained within the snapshot. + TooManyBlocks(u64, u64), + /// Old starting block in a pruned database. + OldBlockPrunedDB, + /// Missing code. + MissingCode(Vec), + /// Unrecognized code encoding. + UnrecognizedCodeState(u8), + /// Restoration aborted. + RestorationAborted, + /// Trie error. + Trie(TrieError), + /// Decoder error. + Decoder(DecoderError), + /// Io error. + Io(::std::io::Error), + /// Snapshot version is not supported. + VersionNotSupported(u64), + /// Max chunk size is to small to fit basic account data. + ChunkTooSmall, + /// Oversized chunk + ChunkTooLarge, + /// Snapshots not supported by the consensus engine. + SnapshotsUnsupported, + /// Aborted snapshot + SnapshotAborted, + /// Bad epoch transition. + BadEpochProof(u64), + /// Wrong chunk format. + WrongChunkFormat(String), + /// Unlinked ancient block chain + UnlinkedAncientBlockChain, } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Error::InvalidStartingBlock(ref id) => write!(f, "Invalid starting block: {:?}", id), - Error::BlockNotFound(ref hash) => write!(f, "Block not found in chain: {}", hash), - Error::IncompleteChain => write!(f, "Incomplete blockchain."), - Error::WrongStateRoot(ref expected, ref found) => write!(f, "Final block has wrong state root. Expected {:?}, got {:?}", expected, found), - Error::WrongBlockHash(ref num, ref expected, ref found) => - write!(f, "Block {} had wrong hash. expected {:?}, got {:?}", num, expected, found), - Error::TooManyBlocks(ref expected, ref found) => write!(f, "Snapshot contained too many blocks. Expected {}, got {}", expected, found), - Error::OldBlockPrunedDB => write!(f, "Attempted to create a snapshot at an old block while using \ - a pruned database. Please re-run with the --pruning archive flag."), - Error::MissingCode(ref missing) => write!(f, "Incomplete snapshot: {} contract codes not found.", missing.len()), - Error::UnrecognizedCodeState(state) => write!(f, "Unrecognized code encoding ({})", state), - Error::RestorationAborted => write!(f, "Snapshot restoration aborted."), - Error::Io(ref err) => err.fmt(f), - Error::Decoder(ref err) => err.fmt(f), - Error::Trie(ref err) => err.fmt(f), - Error::VersionNotSupported(ref ver) => write!(f, "Snapshot version {} is not supprted.", ver), - Error::ChunkTooSmall => write!(f, "Chunk size is too small."), - Error::ChunkTooLarge => write!(f, "Chunk size is too large."), - Error::SnapshotsUnsupported => write!(f, "Snapshots unsupported by consensus engine."), - Error::SnapshotAborted => write!(f, "Snapshot was aborted."), - Error::BadEpochProof(i) => write!(f, "Bad epoch proof for transition to epoch {}", i), - Error::WrongChunkFormat(ref msg) => write!(f, "Wrong chunk format: {}", msg), - Error::UnlinkedAncientBlockChain => write!(f, "Unlinked ancient blocks chain"), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Error::InvalidStartingBlock(ref id) => write!(f, "Invalid starting block: {:?}", id), + Error::BlockNotFound(ref hash) => write!(f, "Block not found in chain: {}", hash), + Error::IncompleteChain => write!(f, "Incomplete blockchain."), + Error::WrongStateRoot(ref expected, ref found) => write!( + f, + "Final block has wrong state root. Expected {:?}, got {:?}", + expected, found + ), + Error::WrongBlockHash(ref num, ref expected, ref found) => write!( + f, + "Block {} had wrong hash. expected {:?}, got {:?}", + num, expected, found + ), + Error::TooManyBlocks(ref expected, ref found) => write!( + f, + "Snapshot contained too many blocks. Expected {}, got {}", + expected, found + ), + Error::OldBlockPrunedDB => write!( + f, + "Attempted to create a snapshot at an old block while using \ + a pruned database. Please re-run with the --pruning archive flag." + ), + Error::MissingCode(ref missing) => write!( + f, + "Incomplete snapshot: {} contract codes not found.", + missing.len() + ), + Error::UnrecognizedCodeState(state) => { + write!(f, "Unrecognized code encoding ({})", state) + } + Error::RestorationAborted => write!(f, "Snapshot restoration aborted."), + Error::Io(ref err) => err.fmt(f), + Error::Decoder(ref err) => err.fmt(f), + Error::Trie(ref err) => err.fmt(f), + Error::VersionNotSupported(ref ver) => { + write!(f, "Snapshot version {} is not supprted.", ver) + } + Error::ChunkTooSmall => write!(f, "Chunk size is too small."), + Error::ChunkTooLarge => write!(f, "Chunk size is too large."), + Error::SnapshotsUnsupported => write!(f, "Snapshots unsupported by consensus engine."), + Error::SnapshotAborted => write!(f, "Snapshot was aborted."), + Error::BadEpochProof(i) => write!(f, "Bad epoch proof for transition to epoch {}", i), + Error::WrongChunkFormat(ref msg) => write!(f, "Wrong chunk format: {}", msg), + Error::UnlinkedAncientBlockChain => write!(f, "Unlinked ancient blocks chain"), + } + } } impl From<::std::io::Error> for Error { - fn from(err: ::std::io::Error) -> Self { - Error::Io(err) - } + fn from(err: ::std::io::Error) -> Self { + Error::Io(err) + } } impl From for Error { - fn from(err: TrieError) -> Self { - Error::Trie(err) - } + fn from(err: TrieError) -> Self { + Error::Trie(err) + } } impl From for Error { - fn from(err: DecoderError) -> Self { - Error::Decoder(err) - } + fn from(err: DecoderError) -> Self { + Error::Decoder(err) + } } -impl From> for Error where Error: From { - fn from(err: Box) -> Self { - Error::from(*err) - } +impl From> for Error +where + Error: From, +{ + fn from(err: Box) -> Self { + Error::from(*err) + } } diff --git a/ethcore/src/snapshot/io.rs b/ethcore/src/snapshot/io.rs index 536862e7b..704682099 100644 --- a/ethcore/src/snapshot/io.rs +++ b/ethcore/src/snapshot/io.rs @@ -20,14 +20,16 @@ //! Packed snapshots are written to a single file, and loose snapshots are //! written to multiple files in one directory. -use std::collections::HashMap; -use std::io::{self, Read, Seek, SeekFrom, Write}; -use std::fs::{self, File}; -use std::path::{Path, PathBuf}; +use std::{ + collections::HashMap, + fs::{self, File}, + io::{self, Read, Seek, SeekFrom, Write}, + path::{Path, PathBuf}, +}; use bytes::Bytes; use ethereum_types::H256; -use rlp::{RlpStream, Rlp}; +use rlp::{Rlp, RlpStream}; use super::ManifestData; @@ -37,15 +39,17 @@ const SNAPSHOT_VERSION: u64 = 2; /// Writing the same chunk multiple times will lead to implementation-defined /// behavior, and is not advised. pub trait SnapshotWriter { - /// Write a compressed state chunk. - fn write_state_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()>; + /// Write a compressed state chunk. + fn write_state_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()>; - /// Write a compressed block chunk. - fn write_block_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()>; + /// Write a compressed block chunk. + fn write_block_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()>; - /// Complete writing. The manifest's chunk lists must be consistent - /// with the chunks written. - fn finish(self, manifest: ManifestData) -> io::Result<()> where Self: Sized; + /// Complete writing. The manifest's chunk lists must be consistent + /// with the chunks written. + fn finish(self, manifest: ManifestData) -> io::Result<()> + where + Self: Sized; } // (hash, len, offset) @@ -63,358 +67,369 @@ struct ChunkInfo(H256, u64, u64); /// but also maps chunk hashes to their lengths and offsets in the file /// for easy reading. pub struct PackedWriter { - file: File, - state_hashes: Vec, - block_hashes: Vec, - cur_len: u64, + file: File, + state_hashes: Vec, + block_hashes: Vec, + cur_len: u64, } impl PackedWriter { - /// Create a new "PackedWriter", to write into the file at the given path. - pub fn new(path: &Path) -> io::Result { - Ok(PackedWriter { - file: File::create(path)?, - state_hashes: Vec::new(), - block_hashes: Vec::new(), - cur_len: 0, - }) - } + /// Create a new "PackedWriter", to write into the file at the given path. + pub fn new(path: &Path) -> io::Result { + Ok(PackedWriter { + file: File::create(path)?, + state_hashes: Vec::new(), + block_hashes: Vec::new(), + cur_len: 0, + }) + } } impl SnapshotWriter for PackedWriter { - fn write_state_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()> { - self.file.write_all(chunk)?; + fn write_state_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()> { + self.file.write_all(chunk)?; - let len = chunk.len() as u64; - self.state_hashes.push(ChunkInfo(hash, len, self.cur_len)); + let len = chunk.len() as u64; + self.state_hashes.push(ChunkInfo(hash, len, self.cur_len)); - self.cur_len += len; - Ok(()) - } + self.cur_len += len; + Ok(()) + } - fn write_block_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()> { - self.file.write_all(chunk)?; + fn write_block_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()> { + self.file.write_all(chunk)?; - let len = chunk.len() as u64; - self.block_hashes.push(ChunkInfo(hash, len, self.cur_len)); + let len = chunk.len() as u64; + self.block_hashes.push(ChunkInfo(hash, len, self.cur_len)); - self.cur_len += len; - Ok(()) - } + self.cur_len += len; + Ok(()) + } - fn finish(mut self, manifest: ManifestData) -> io::Result<()> { - // we ignore the hashes fields of the manifest under the assumption that - // they are consistent with ours. - let mut stream = RlpStream::new_list(6); - stream - .append(&SNAPSHOT_VERSION) - .append_list(&self.state_hashes) - .append_list(&self.block_hashes) - .append(&manifest.state_root) - .append(&manifest.block_number) - .append(&manifest.block_hash); + fn finish(mut self, manifest: ManifestData) -> io::Result<()> { + // we ignore the hashes fields of the manifest under the assumption that + // they are consistent with ours. + let mut stream = RlpStream::new_list(6); + stream + .append(&SNAPSHOT_VERSION) + .append_list(&self.state_hashes) + .append_list(&self.block_hashes) + .append(&manifest.state_root) + .append(&manifest.block_number) + .append(&manifest.block_hash); - let manifest_rlp = stream.out(); + let manifest_rlp = stream.out(); - self.file.write_all(&manifest_rlp)?; - let off = self.cur_len; - trace!(target: "snapshot_io", "writing manifest of len {} to offset {}", manifest_rlp.len(), off); + self.file.write_all(&manifest_rlp)?; + let off = self.cur_len; + trace!(target: "snapshot_io", "writing manifest of len {} to offset {}", manifest_rlp.len(), off); - let off_bytes: [u8; 8] = - [ - off as u8, - (off >> 8) as u8, - (off >> 16) as u8, - (off >> 24) as u8, - (off >> 32) as u8, - (off >> 40) as u8, - (off >> 48) as u8, - (off >> 56) as u8, - ]; + let off_bytes: [u8; 8] = [ + off as u8, + (off >> 8) as u8, + (off >> 16) as u8, + (off >> 24) as u8, + (off >> 32) as u8, + (off >> 40) as u8, + (off >> 48) as u8, + (off >> 56) as u8, + ]; - self.file.write_all(&off_bytes[..])?; + self.file.write_all(&off_bytes[..])?; - Ok(()) - } + Ok(()) + } } /// A "loose" writer writes chunk files into a directory. pub struct LooseWriter { - dir: PathBuf, + dir: PathBuf, } impl LooseWriter { - /// Create a new LooseWriter which will write into the given directory, - /// creating it if it doesn't exist. - pub fn new(path: PathBuf) -> io::Result { - fs::create_dir_all(&path)?; + /// Create a new LooseWriter which will write into the given directory, + /// creating it if it doesn't exist. + pub fn new(path: PathBuf) -> io::Result { + fs::create_dir_all(&path)?; - Ok(LooseWriter { - dir: path, - }) - } + Ok(LooseWriter { dir: path }) + } - // writing logic is the same for both kinds of chunks. - fn write_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()> { - let file_path = self.dir.join(format!("{:x}", hash)); - let mut file = File::create(file_path)?; - file.write_all(chunk)?; - Ok(()) - } + // writing logic is the same for both kinds of chunks. + fn write_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()> { + let file_path = self.dir.join(format!("{:x}", hash)); + let mut file = File::create(file_path)?; + file.write_all(chunk)?; + Ok(()) + } } impl SnapshotWriter for LooseWriter { - fn write_state_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()> { - self.write_chunk(hash, chunk) - } + fn write_state_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()> { + self.write_chunk(hash, chunk) + } - fn write_block_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()> { - self.write_chunk(hash, chunk) - } + fn write_block_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()> { + self.write_chunk(hash, chunk) + } - fn finish(self, manifest: ManifestData) -> io::Result<()> { - let rlp = manifest.into_rlp(); - let mut path = self.dir.clone(); - path.push("MANIFEST"); + fn finish(self, manifest: ManifestData) -> io::Result<()> { + let rlp = manifest.into_rlp(); + let mut path = self.dir.clone(); + path.push("MANIFEST"); - let mut file = File::create(path)?; - file.write_all(&rlp[..])?; + let mut file = File::create(path)?; + file.write_all(&rlp[..])?; - Ok(()) - } + Ok(()) + } } /// Something which can read compressed snapshots. pub trait SnapshotReader { - /// Get the manifest data for this snapshot. - fn manifest(&self) -> &ManifestData; + /// Get the manifest data for this snapshot. + fn manifest(&self) -> &ManifestData; - /// Get raw chunk data by hash. implementation defined behavior - /// if a chunk not in the manifest is requested. - fn chunk(&self, hash: H256) -> io::Result; + /// Get raw chunk data by hash. implementation defined behavior + /// if a chunk not in the manifest is requested. + fn chunk(&self, hash: H256) -> io::Result; } /// Packed snapshot reader. pub struct PackedReader { - file: File, - state_hashes: HashMap, // len, offset - block_hashes: HashMap, // len, offset - manifest: ManifestData, + file: File, + state_hashes: HashMap, // len, offset + block_hashes: HashMap, // len, offset + manifest: ManifestData, } impl PackedReader { - /// Create a new `PackedReader` for the file at the given path. - /// This will fail if any io errors are encountered or the file - /// is not a valid packed snapshot. - pub fn new(path: &Path) -> Result, ::snapshot::error::Error> { - let mut file = File::open(path)?; - let file_len = file.metadata()?.len(); - if file_len < 8 { - // ensure we don't seek before beginning. - return Ok(None); - } + /// Create a new `PackedReader` for the file at the given path. + /// This will fail if any io errors are encountered or the file + /// is not a valid packed snapshot. + pub fn new(path: &Path) -> Result, ::snapshot::error::Error> { + let mut file = File::open(path)?; + let file_len = file.metadata()?.len(); + if file_len < 8 { + // ensure we don't seek before beginning. + return Ok(None); + } - file.seek(SeekFrom::End(-8))?; - let mut off_bytes = [0u8; 8]; + file.seek(SeekFrom::End(-8))?; + let mut off_bytes = [0u8; 8]; - file.read_exact(&mut off_bytes[..])?; + file.read_exact(&mut off_bytes[..])?; - let manifest_off: u64 = - ((off_bytes[7] as u64) << 56) + - ((off_bytes[6] as u64) << 48) + - ((off_bytes[5] as u64) << 40) + - ((off_bytes[4] as u64) << 32) + - ((off_bytes[3] as u64) << 24) + - ((off_bytes[2] as u64) << 16) + - ((off_bytes[1] as u64) << 8) + - (off_bytes[0] as u64); + let manifest_off: u64 = ((off_bytes[7] as u64) << 56) + + ((off_bytes[6] as u64) << 48) + + ((off_bytes[5] as u64) << 40) + + ((off_bytes[4] as u64) << 32) + + ((off_bytes[3] as u64) << 24) + + ((off_bytes[2] as u64) << 16) + + ((off_bytes[1] as u64) << 8) + + (off_bytes[0] as u64); - let manifest_len = file_len - manifest_off - 8; - trace!(target: "snapshot", "loading manifest of length {} from offset {}", manifest_len, manifest_off); + let manifest_len = file_len - manifest_off - 8; + trace!(target: "snapshot", "loading manifest of length {} from offset {}", manifest_len, manifest_off); - let mut manifest_buf = vec![0; manifest_len as usize]; + let mut manifest_buf = vec![0; manifest_len as usize]; - file.seek(SeekFrom::Start(manifest_off))?; - file.read_exact(&mut manifest_buf)?; + file.seek(SeekFrom::Start(manifest_off))?; + file.read_exact(&mut manifest_buf)?; - let rlp = Rlp::new(&manifest_buf); + let rlp = Rlp::new(&manifest_buf); - let (start, version) = if rlp.item_count()? == 5 { - (0, 1) - } else { - (1, rlp.val_at(0)?) - }; + let (start, version) = if rlp.item_count()? == 5 { + (0, 1) + } else { + (1, rlp.val_at(0)?) + }; - if version > SNAPSHOT_VERSION { - return Err(::snapshot::error::Error::VersionNotSupported(version)); - } + if version > SNAPSHOT_VERSION { + return Err(::snapshot::error::Error::VersionNotSupported(version)); + } - let state: Vec = rlp.list_at(0 + start)?; - let blocks: Vec = rlp.list_at(1 + start)?; + let state: Vec = rlp.list_at(0 + start)?; + let blocks: Vec = rlp.list_at(1 + start)?; - let manifest = ManifestData { - version: version, - state_hashes: state.iter().map(|c| c.0).collect(), - block_hashes: blocks.iter().map(|c| c.0).collect(), - state_root: rlp.val_at(2 + start)?, - block_number: rlp.val_at(3 + start)?, - block_hash: rlp.val_at(4 + start)?, - }; + let manifest = ManifestData { + version: version, + state_hashes: state.iter().map(|c| c.0).collect(), + block_hashes: blocks.iter().map(|c| c.0).collect(), + state_root: rlp.val_at(2 + start)?, + block_number: rlp.val_at(3 + start)?, + block_hash: rlp.val_at(4 + start)?, + }; - Ok(Some(PackedReader { - file: file, - state_hashes: state.into_iter().map(|c| (c.0, (c.1, c.2))).collect(), - block_hashes: blocks.into_iter().map(|c| (c.0, (c.1, c.2))).collect(), - manifest: manifest - })) - } + Ok(Some(PackedReader { + file: file, + state_hashes: state.into_iter().map(|c| (c.0, (c.1, c.2))).collect(), + block_hashes: blocks.into_iter().map(|c| (c.0, (c.1, c.2))).collect(), + manifest: manifest, + })) + } } impl SnapshotReader for PackedReader { - fn manifest(&self) -> &ManifestData { - &self.manifest - } + fn manifest(&self) -> &ManifestData { + &self.manifest + } - fn chunk(&self, hash: H256) -> io::Result { - let &(len, off) = self.state_hashes.get(&hash).or_else(|| self.block_hashes.get(&hash)) - .expect("only chunks in the manifest can be requested; qed"); + fn chunk(&self, hash: H256) -> io::Result { + let &(len, off) = self + .state_hashes + .get(&hash) + .or_else(|| self.block_hashes.get(&hash)) + .expect("only chunks in the manifest can be requested; qed"); - let mut file = &self.file; + let mut file = &self.file; - file.seek(SeekFrom::Start(off))?; - let mut buf = vec![0; len as usize]; + file.seek(SeekFrom::Start(off))?; + let mut buf = vec![0; len as usize]; - file.read_exact(&mut buf[..])?; + file.read_exact(&mut buf[..])?; - Ok(buf) - } + Ok(buf) + } } /// reader for "loose" snapshots pub struct LooseReader { - dir: PathBuf, - manifest: ManifestData, + dir: PathBuf, + manifest: ManifestData, } impl LooseReader { - /// Create a new `LooseReader` which will read the manifest and chunk data from - /// the given directory. - pub fn new(mut dir: PathBuf) -> Result { - let mut manifest_buf = Vec::new(); + /// Create a new `LooseReader` which will read the manifest and chunk data from + /// the given directory. + pub fn new(mut dir: PathBuf) -> Result { + let mut manifest_buf = Vec::new(); - dir.push("MANIFEST"); - let mut manifest_file = File::open(&dir)?; - manifest_file.read_to_end(&mut manifest_buf)?; + dir.push("MANIFEST"); + let mut manifest_file = File::open(&dir)?; + manifest_file.read_to_end(&mut manifest_buf)?; - let manifest = ManifestData::from_rlp(&manifest_buf[..])?; + let manifest = ManifestData::from_rlp(&manifest_buf[..])?; - dir.pop(); + dir.pop(); - Ok(LooseReader { dir, manifest }) - } + Ok(LooseReader { dir, manifest }) + } } impl SnapshotReader for LooseReader { - fn manifest(&self) -> &ManifestData { - &self.manifest - } + fn manifest(&self) -> &ManifestData { + &self.manifest + } - fn chunk(&self, hash: H256) -> io::Result { - let path = self.dir.join(format!("{:x}", hash)); - let mut buf = Vec::new(); - let mut file = File::open(&path)?; - file.read_to_end(&mut buf)?; - Ok(buf) - } + fn chunk(&self, hash: H256) -> io::Result { + let path = self.dir.join(format!("{:x}", hash)); + let mut buf = Vec::new(); + let mut file = File::open(&path)?; + file.read_to_end(&mut buf)?; + Ok(buf) + } } #[cfg(test)] mod tests { - use tempdir::TempDir; - use hash::keccak; + use hash::keccak; + use tempdir::TempDir; - use snapshot::ManifestData; - use super::{SnapshotWriter, SnapshotReader, PackedWriter, PackedReader, LooseWriter, LooseReader, SNAPSHOT_VERSION}; + use super::{ + LooseReader, LooseWriter, PackedReader, PackedWriter, SnapshotReader, SnapshotWriter, + SNAPSHOT_VERSION, + }; + use snapshot::ManifestData; - const STATE_CHUNKS: &'static [&'static [u8]] = &[b"dog", b"cat", b"hello world", b"hi", b"notarealchunk"]; - const BLOCK_CHUNKS: &'static [&'static [u8]] = &[b"hello!", b"goodbye!", b"abcdefg", b"hijklmnop", b"qrstuvwxy", b"and", b"z"]; + const STATE_CHUNKS: &'static [&'static [u8]] = + &[b"dog", b"cat", b"hello world", b"hi", b"notarealchunk"]; + const BLOCK_CHUNKS: &'static [&'static [u8]] = &[ + b"hello!", + b"goodbye!", + b"abcdefg", + b"hijklmnop", + b"qrstuvwxy", + b"and", + b"z", + ]; - #[test] - fn packed_write_and_read() { - let tempdir = TempDir::new("").unwrap(); - let path = tempdir.path().join("packed"); - let mut writer = PackedWriter::new(&path).unwrap(); + #[test] + fn packed_write_and_read() { + let tempdir = TempDir::new("").unwrap(); + let path = tempdir.path().join("packed"); + let mut writer = PackedWriter::new(&path).unwrap(); - let mut state_hashes = Vec::new(); - let mut block_hashes = Vec::new(); + let mut state_hashes = Vec::new(); + let mut block_hashes = Vec::new(); - for chunk in STATE_CHUNKS { - let hash = keccak(&chunk); - state_hashes.push(hash.clone()); - writer.write_state_chunk(hash, chunk).unwrap(); - } + for chunk in STATE_CHUNKS { + let hash = keccak(&chunk); + state_hashes.push(hash.clone()); + writer.write_state_chunk(hash, chunk).unwrap(); + } - for chunk in BLOCK_CHUNKS { - let hash = keccak(&chunk); - block_hashes.push(hash.clone()); - writer.write_block_chunk(keccak(&chunk), chunk).unwrap(); - } + for chunk in BLOCK_CHUNKS { + let hash = keccak(&chunk); + block_hashes.push(hash.clone()); + writer.write_block_chunk(keccak(&chunk), chunk).unwrap(); + } - let manifest = ManifestData { - version: SNAPSHOT_VERSION, - state_hashes: state_hashes, - block_hashes: block_hashes, - state_root: keccak(b"notarealroot"), - block_number: 12345678987654321, - block_hash: keccak(b"notarealblock"), - }; + let manifest = ManifestData { + version: SNAPSHOT_VERSION, + state_hashes: state_hashes, + block_hashes: block_hashes, + state_root: keccak(b"notarealroot"), + block_number: 12345678987654321, + block_hash: keccak(b"notarealblock"), + }; - writer.finish(manifest.clone()).unwrap(); + writer.finish(manifest.clone()).unwrap(); - let reader = PackedReader::new(&path).unwrap().unwrap(); - assert_eq!(reader.manifest(), &manifest); + let reader = PackedReader::new(&path).unwrap().unwrap(); + assert_eq!(reader.manifest(), &manifest); - for hash in manifest.state_hashes.iter().chain(&manifest.block_hashes) { - reader.chunk(hash.clone()).unwrap(); - } - } + for hash in manifest.state_hashes.iter().chain(&manifest.block_hashes) { + reader.chunk(hash.clone()).unwrap(); + } + } - #[test] - fn loose_write_and_read() { - let tempdir = TempDir::new("").unwrap(); - let mut writer = LooseWriter::new(tempdir.path().into()).unwrap(); + #[test] + fn loose_write_and_read() { + let tempdir = TempDir::new("").unwrap(); + let mut writer = LooseWriter::new(tempdir.path().into()).unwrap(); - let mut state_hashes = Vec::new(); - let mut block_hashes = Vec::new(); + let mut state_hashes = Vec::new(); + let mut block_hashes = Vec::new(); - for chunk in STATE_CHUNKS { - let hash = keccak(&chunk); - state_hashes.push(hash.clone()); - writer.write_state_chunk(hash, chunk).unwrap(); - } + for chunk in STATE_CHUNKS { + let hash = keccak(&chunk); + state_hashes.push(hash.clone()); + writer.write_state_chunk(hash, chunk).unwrap(); + } - for chunk in BLOCK_CHUNKS { - let hash = keccak(&chunk); - block_hashes.push(hash.clone()); - writer.write_block_chunk(keccak(&chunk), chunk).unwrap(); - } + for chunk in BLOCK_CHUNKS { + let hash = keccak(&chunk); + block_hashes.push(hash.clone()); + writer.write_block_chunk(keccak(&chunk), chunk).unwrap(); + } - let manifest = ManifestData { - version: SNAPSHOT_VERSION, - state_hashes: state_hashes, - block_hashes: block_hashes, - state_root: keccak(b"notarealroot"), - block_number: 12345678987654321, - block_hash: keccak(b"notarealblock)"), - }; + let manifest = ManifestData { + version: SNAPSHOT_VERSION, + state_hashes: state_hashes, + block_hashes: block_hashes, + state_root: keccak(b"notarealroot"), + block_number: 12345678987654321, + block_hash: keccak(b"notarealblock)"), + }; - writer.finish(manifest.clone()).unwrap(); + writer.finish(manifest.clone()).unwrap(); - let reader = LooseReader::new(tempdir.path().into()).unwrap(); - assert_eq!(reader.manifest(), &manifest); + let reader = LooseReader::new(tempdir.path().into()).unwrap(); + assert_eq!(reader.manifest(), &manifest); - for hash in manifest.state_hashes.iter().chain(&manifest.block_hashes) { - reader.chunk(hash.clone()).unwrap(); - } - } + for hash in manifest.state_hashes.iter().chain(&manifest.block_hashes) { + reader.chunk(hash.clone()).unwrap(); + } + } } diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 79b4eae94..b32b8a40f 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -19,49 +19,54 @@ //! Documentation of the format can be found at //! https://wiki.parity.io/Warp-Sync-Snapshot-Format -use std::collections::{HashMap, HashSet}; -use std::cmp; -use std::sync::Arc; -use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}; -use hash::{keccak, KECCAK_NULL_RLP, KECCAK_EMPTY}; +use hash::{keccak, KECCAK_EMPTY, KECCAK_NULL_RLP}; +use std::{ + cmp, + collections::{HashMap, HashSet}, + sync::{ + atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, + Arc, + }, +}; use account_db::{AccountDB, AccountDBMut}; use blockchain::{BlockChain, BlockProvider}; use engines::EthEngine; -use types::header::Header; -use types::ids::BlockId; +use types::{header::Header, ids::BlockId}; -use ethereum_types::{H256, U256}; -use hash_db::HashDB; -use keccak_hasher::KeccakHasher; -use snappy; -use bytes::Bytes; -use parking_lot::Mutex; -use journaldb::{self, Algorithm, JournalDB}; -use kvdb::{KeyValueDB, DBValue}; -use trie::{Trie, TrieMut}; -use ethtrie::{TrieDB, TrieDBMut}; -use rlp::{RlpStream, Rlp}; use bloom_journal::Bloom; +use bytes::Bytes; +use ethereum_types::{H256, U256}; +use ethtrie::{TrieDB, TrieDBMut}; +use hash_db::HashDB; +use journaldb::{self, Algorithm, JournalDB}; +use keccak_hasher::KeccakHasher; +use kvdb::{DBValue, KeyValueDB}; use num_cpus; +use parking_lot::Mutex; +use rlp::{Rlp, RlpStream}; +use snappy; +use trie::{Trie, TrieMut}; use self::io::SnapshotWriter; -use super::state_db::StateDB; -use super::state::Account as StateAccount; +use super::{state::Account as StateAccount, state_db::StateDB}; use crossbeam_utils::thread; -use rand::{Rng, OsRng}; +use rand::{OsRng, Rng}; pub use self::error::Error; -pub use self::consensus::*; -pub use self::service::{SnapshotClient, Service, DatabaseRestore}; -pub use self::traits::SnapshotService; -pub use self::watcher::Watcher; -pub use types::snapshot_manifest::ManifestData; -pub use types::restoration_status::RestorationStatus; -pub use types::basic_account::BasicAccount; +pub use self::{ + consensus::*, + service::{DatabaseRestore, Service, SnapshotClient}, + traits::SnapshotService, + watcher::Watcher, +}; +pub use types::{ + basic_account::BasicAccount, restoration_status::RestorationStatus, + snapshot_manifest::ManifestData, +}; pub mod io; pub mod service; @@ -97,77 +102,85 @@ const MAX_SNAPSHOT_SUBPARTS: usize = 256; /// Configuration for the Snapshot service #[derive(Debug, Clone, PartialEq)] pub struct SnapshotConfiguration { - /// If `true`, no periodic snapshots will be created - pub no_periodic: bool, - /// Number of threads for creating snapshots - pub processing_threads: usize, + /// If `true`, no periodic snapshots will be created + pub no_periodic: bool, + /// Number of threads for creating snapshots + pub processing_threads: usize, } impl Default for SnapshotConfiguration { - fn default() -> Self { - SnapshotConfiguration { - no_periodic: false, - processing_threads: ::std::cmp::max(1, num_cpus::get_physical() / 2), - } - } + fn default() -> Self { + SnapshotConfiguration { + no_periodic: false, + processing_threads: ::std::cmp::max(1, num_cpus::get_physical() / 2), + } + } } /// A progress indicator for snapshots. #[derive(Debug, Default)] pub struct Progress { - accounts: AtomicUsize, - blocks: AtomicUsize, - size: AtomicU64, - done: AtomicBool, - abort: AtomicBool, + accounts: AtomicUsize, + blocks: AtomicUsize, + size: AtomicU64, + done: AtomicBool, + abort: AtomicBool, } impl Progress { - /// Reset the progress. - pub fn reset(&self) { - self.accounts.store(0, Ordering::Release); - self.blocks.store(0, Ordering::Release); - self.size.store(0, Ordering::Release); - self.abort.store(false, Ordering::Release); + /// Reset the progress. + pub fn reset(&self) { + self.accounts.store(0, Ordering::Release); + self.blocks.store(0, Ordering::Release); + self.size.store(0, Ordering::Release); + self.abort.store(false, Ordering::Release); - // atomic fence here to ensure the others are written first? - // logs might very rarely get polluted if not. - self.done.store(false, Ordering::Release); - } + // atomic fence here to ensure the others are written first? + // logs might very rarely get polluted if not. + self.done.store(false, Ordering::Release); + } - /// Get the number of accounts snapshotted thus far. - pub fn accounts(&self) -> usize { self.accounts.load(Ordering::Acquire) } + /// Get the number of accounts snapshotted thus far. + pub fn accounts(&self) -> usize { + self.accounts.load(Ordering::Acquire) + } - /// Get the number of blocks snapshotted thus far. - pub fn blocks(&self) -> usize { self.blocks.load(Ordering::Acquire) } + /// Get the number of blocks snapshotted thus far. + pub fn blocks(&self) -> usize { + self.blocks.load(Ordering::Acquire) + } - /// Get the written size of the snapshot in bytes. - pub fn size(&self) -> u64 { self.size.load(Ordering::Acquire) } - - /// Whether the snapshot is complete. - pub fn done(&self) -> bool { self.done.load(Ordering::Acquire) } + /// Get the written size of the snapshot in bytes. + pub fn size(&self) -> u64 { + self.size.load(Ordering::Acquire) + } + /// Whether the snapshot is complete. + pub fn done(&self) -> bool { + self.done.load(Ordering::Acquire) + } } /// Take a snapshot using the given blockchain, starting block hash, and database, writing into the given writer. pub fn take_snapshot( - chunker: Box, - chain: &BlockChain, - block_hash: H256, - state_db: &dyn HashDB, - writer: W, - p: &Progress, - processing_threads: usize, + chunker: Box, + chain: &BlockChain, + block_hash: H256, + state_db: &dyn HashDB, + writer: W, + p: &Progress, + processing_threads: usize, ) -> Result<(), Error> { - let start_header = chain.block_header_data(&block_hash) - .ok_or_else(|| Error::InvalidStartingBlock(BlockId::Hash(block_hash)))?; - let state_root = start_header.state_root(); - let block_number = start_header.number(); + let start_header = chain + .block_header_data(&block_hash) + .ok_or_else(|| Error::InvalidStartingBlock(BlockId::Hash(block_hash)))?; + let state_root = start_header.state_root(); + let block_number = start_header.number(); - info!("Taking snapshot starting at block {}", block_number); + info!("Taking snapshot starting at block {}", block_number); - let version = chunker.current_version(); - let writer = Mutex::new(writer); - let (state_hashes, block_hashes) = thread::scope(|scope| -> Result<(Vec, Vec), Error> { + let version = chunker.current_version(); + let writer = Mutex::new(writer); + let (state_hashes, block_hashes) = thread::scope(|scope| -> Result<(Vec, Vec), Error> { let writer = &writer; let block_guard = scope.spawn(move |_| { chunk_secondary(chunker, chain, block_hash, writer, p) @@ -207,22 +220,22 @@ pub fn take_snapshot( Ok((state_hashes, block_hashes)) }).expect("Sub-thread never panics; qed")?; - info!(target: "snapshot", "produced {} state chunks and {} block chunks.", state_hashes.len(), block_hashes.len()); + info!(target: "snapshot", "produced {} state chunks and {} block chunks.", state_hashes.len(), block_hashes.len()); - let manifest_data = ManifestData { - version, - state_hashes, - block_hashes, - state_root, - block_number, - block_hash, - }; + let manifest_data = ManifestData { + version, + state_hashes, + block_hashes, + state_root, + block_number, + block_hash, + }; - writer.into_inner().finish(manifest_data)?; + writer.into_inner().finish(manifest_data)?; - p.done.store(true, Ordering::SeqCst); + p.done.store(true, Ordering::SeqCst); - Ok(()) + Ok(()) } /// Create and write out all secondary chunks to disk, returning a vector of all @@ -232,96 +245,100 @@ pub fn take_snapshot( /// in the state chunks. /// Returns a list of chunk hashes, with the first having the blocks furthest from the genesis. pub fn chunk_secondary<'a>( - mut chunker: Box, - chain: &'a BlockChain, - start_hash: H256, - writer: &Mutex, - progress: &'a Progress + mut chunker: Box, + chain: &'a BlockChain, + start_hash: H256, + writer: &Mutex, + progress: &'a Progress, ) -> Result, Error> { - let mut chunk_hashes = Vec::new(); - let mut snappy_buffer = vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)]; + let mut chunk_hashes = Vec::new(); + let mut snappy_buffer = vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)]; - { - let mut chunk_sink = |raw_data: &[u8]| { - let compressed_size = snappy::compress_into(raw_data, &mut snappy_buffer); - let compressed = &snappy_buffer[..compressed_size]; - let hash = keccak(&compressed); - let size = compressed.len(); + { + let mut chunk_sink = |raw_data: &[u8]| { + let compressed_size = snappy::compress_into(raw_data, &mut snappy_buffer); + let compressed = &snappy_buffer[..compressed_size]; + let hash = keccak(&compressed); + let size = compressed.len(); - writer.lock().write_block_chunk(hash, compressed)?; - trace!(target: "snapshot", "wrote secondary chunk. hash: {:x}, size: {}, uncompressed size: {}", + writer.lock().write_block_chunk(hash, compressed)?; + trace!(target: "snapshot", "wrote secondary chunk. hash: {:x}, size: {}, uncompressed size: {}", hash, size, raw_data.len()); - progress.size.fetch_add(size as u64, Ordering::SeqCst); - chunk_hashes.push(hash); - Ok(()) - }; + progress.size.fetch_add(size as u64, Ordering::SeqCst); + chunk_hashes.push(hash); + Ok(()) + }; - chunker.chunk_all( - chain, - start_hash, - &mut chunk_sink, - progress, - PREFERRED_CHUNK_SIZE, - )?; - } + chunker.chunk_all( + chain, + start_hash, + &mut chunk_sink, + progress, + PREFERRED_CHUNK_SIZE, + )?; + } - Ok(chunk_hashes) + Ok(chunk_hashes) } /// State trie chunker. struct StateChunker<'a> { - hashes: Vec, - rlps: Vec, - cur_size: usize, - snappy_buffer: Vec, - writer: &'a Mutex, - progress: &'a Progress, - thread_idx: usize, + hashes: Vec, + rlps: Vec, + cur_size: usize, + snappy_buffer: Vec, + writer: &'a Mutex, + progress: &'a Progress, + thread_idx: usize, } impl<'a> StateChunker<'a> { - // Push a key, value pair to be encoded. - // - // If the buffer is greater than the desired chunk size, - // this will write out the data to disk. - fn push(&mut self, data: Bytes) -> Result<(), Error> { - self.cur_size += data.len(); - self.rlps.push(data); - Ok(()) - } + // Push a key, value pair to be encoded. + // + // If the buffer is greater than the desired chunk size, + // this will write out the data to disk. + fn push(&mut self, data: Bytes) -> Result<(), Error> { + self.cur_size += data.len(); + self.rlps.push(data); + Ok(()) + } - // Write out the buffer to disk, pushing the created chunk's hash to - // the list. - fn write_chunk(&mut self) -> Result<(), Error> { - let num_entries = self.rlps.len(); - let mut stream = RlpStream::new_list(num_entries); - for rlp in self.rlps.drain(..) { - stream.append_raw(&rlp, 1); - } + // Write out the buffer to disk, pushing the created chunk's hash to + // the list. + fn write_chunk(&mut self) -> Result<(), Error> { + let num_entries = self.rlps.len(); + let mut stream = RlpStream::new_list(num_entries); + for rlp in self.rlps.drain(..) { + stream.append_raw(&rlp, 1); + } - let raw_data = stream.out(); + let raw_data = stream.out(); - let compressed_size = snappy::compress_into(&raw_data, &mut self.snappy_buffer); - let compressed = &self.snappy_buffer[..compressed_size]; - let hash = keccak(&compressed); + let compressed_size = snappy::compress_into(&raw_data, &mut self.snappy_buffer); + let compressed = &self.snappy_buffer[..compressed_size]; + let hash = keccak(&compressed); - self.writer.lock().write_state_chunk(hash, compressed)?; - trace!(target: "snapshot", "Thread {} wrote state chunk. size: {}, uncompressed size: {}", self.thread_idx, compressed_size, raw_data.len()); + self.writer.lock().write_state_chunk(hash, compressed)?; + trace!(target: "snapshot", "Thread {} wrote state chunk. size: {}, uncompressed size: {}", self.thread_idx, compressed_size, raw_data.len()); - self.progress.accounts.fetch_add(num_entries, Ordering::SeqCst); - self.progress.size.fetch_add(compressed_size as u64, Ordering::SeqCst); + self.progress + .accounts + .fetch_add(num_entries, Ordering::SeqCst); + self.progress + .size + .fetch_add(compressed_size as u64, Ordering::SeqCst); - self.hashes.push(hash); - self.cur_size = 0; + self.hashes.push(hash); + self.cur_size = 0; - Ok(()) - } + Ok(()) + } - // Get current chunk size. - fn chunk_size(&self) -> usize { - self.cur_size - } + // Get current chunk size. + fn chunk_size(&self) -> usize { + self.cur_size + } } /// Walk the given state database starting from the given root, @@ -332,244 +349,269 @@ impl<'a> StateChunker<'a> { /// Returns a list of hashes of chunks created, or any error it may /// have encountered. pub fn chunk_state<'a>( - db: &dyn HashDB, - root: &H256, - writer: &Mutex, - progress: &'a Progress, - part: Option, - thread_idx: usize, + db: &dyn HashDB, + root: &H256, + writer: &Mutex, + progress: &'a Progress, + part: Option, + thread_idx: usize, ) -> Result, Error> { - let account_trie = TrieDB::new(&db, &root)?; + let account_trie = TrieDB::new(&db, &root)?; - let mut chunker = StateChunker { - hashes: Vec::new(), - rlps: Vec::new(), - cur_size: 0, - snappy_buffer: vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)], - writer, - progress, - thread_idx, - }; + let mut chunker = StateChunker { + hashes: Vec::new(), + rlps: Vec::new(), + cur_size: 0, + snappy_buffer: vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)], + writer, + progress, + thread_idx, + }; - let mut used_code = HashSet::new(); + let mut used_code = HashSet::new(); - // account_key here is the address' hash. - let mut account_iter = account_trie.iter()?; + // account_key here is the address' hash. + let mut account_iter = account_trie.iter()?; - let mut seek_to = None; + let mut seek_to = None; - if let Some(part) = part { - assert!(part < 16, "Wrong chunk state part number (must be <16) in snapshot creation."); + if let Some(part) = part { + assert!( + part < 16, + "Wrong chunk state part number (must be <16) in snapshot creation." + ); - let part_offset = MAX_SNAPSHOT_SUBPARTS / SNAPSHOT_SUBPARTS; - let mut seek_from = vec![0; 32]; - seek_from[0] = (part * part_offset) as u8; - account_iter.seek(&seek_from)?; + let part_offset = MAX_SNAPSHOT_SUBPARTS / SNAPSHOT_SUBPARTS; + let mut seek_from = vec![0; 32]; + seek_from[0] = (part * part_offset) as u8; + account_iter.seek(&seek_from)?; - // Set the upper-bound, except for the last part - if part < SNAPSHOT_SUBPARTS - 1 { - seek_to = Some(((part + 1) * part_offset) as u8) - } - } + // Set the upper-bound, except for the last part + if part < SNAPSHOT_SUBPARTS - 1 { + seek_to = Some(((part + 1) * part_offset) as u8) + } + } - for item in account_iter { - let (account_key, account_data) = item?; - let account_key_hash = H256::from_slice(&account_key); + for item in account_iter { + let (account_key, account_data) = item?; + let account_key_hash = H256::from_slice(&account_key); - if seek_to.map_or(false, |seek_to| account_key[0] >= seek_to) { - break; - } + if seek_to.map_or(false, |seek_to| account_key[0] >= seek_to) { + break; + } - let account = ::rlp::decode(&*account_data)?; - let account_db = AccountDB::from_hash(db, account_key_hash); + let account = ::rlp::decode(&*account_data)?; + let account_db = AccountDB::from_hash(db, account_key_hash); - let fat_rlps = account::to_fat_rlps(&account_key_hash, &account, &account_db, &mut used_code, PREFERRED_CHUNK_SIZE - chunker.chunk_size(), PREFERRED_CHUNK_SIZE, progress)?; - for (i, fat_rlp) in fat_rlps.into_iter().enumerate() { - if i > 0 { - chunker.write_chunk()?; - } - chunker.push(fat_rlp)?; - } - } + let fat_rlps = account::to_fat_rlps( + &account_key_hash, + &account, + &account_db, + &mut used_code, + PREFERRED_CHUNK_SIZE - chunker.chunk_size(), + PREFERRED_CHUNK_SIZE, + progress, + )?; + for (i, fat_rlp) in fat_rlps.into_iter().enumerate() { + if i > 0 { + chunker.write_chunk()?; + } + chunker.push(fat_rlp)?; + } + } - if chunker.cur_size != 0 { - chunker.write_chunk()?; - } + if chunker.cur_size != 0 { + chunker.write_chunk()?; + } - Ok(chunker.hashes) + Ok(chunker.hashes) } /// Used to rebuild the state trie piece by piece. pub struct StateRebuilder { - db: Box, - state_root: H256, - known_code: HashMap, // code hashes mapped to first account with this code. - missing_code: HashMap>, // maps code hashes to lists of accounts missing that code. - bloom: Bloom, - known_storage_roots: HashMap, // maps account hashes to last known storage root. Only filled for last account per chunk. + db: Box, + state_root: H256, + known_code: HashMap, // code hashes mapped to first account with this code. + missing_code: HashMap>, // maps code hashes to lists of accounts missing that code. + bloom: Bloom, + known_storage_roots: HashMap, // maps account hashes to last known storage root. Only filled for last account per chunk. } impl StateRebuilder { - /// Create a new state rebuilder to write into the given backing DB. - pub fn new(db: Arc, pruning: Algorithm) -> Self { - StateRebuilder { - db: journaldb::new(db.clone(), pruning, ::db::COL_STATE), - state_root: KECCAK_NULL_RLP, - known_code: HashMap::new(), - missing_code: HashMap::new(), - bloom: StateDB::load_bloom(&*db), - known_storage_roots: HashMap::new(), - } - } + /// Create a new state rebuilder to write into the given backing DB. + pub fn new(db: Arc, pruning: Algorithm) -> Self { + StateRebuilder { + db: journaldb::new(db.clone(), pruning, ::db::COL_STATE), + state_root: KECCAK_NULL_RLP, + known_code: HashMap::new(), + missing_code: HashMap::new(), + bloom: StateDB::load_bloom(&*db), + known_storage_roots: HashMap::new(), + } + } - /// Feed an uncompressed state chunk into the rebuilder. - pub fn feed(&mut self, chunk: &[u8], flag: &AtomicBool) -> Result<(), ::error::Error> { - let rlp = Rlp::new(chunk); - let empty_rlp = StateAccount::new_basic(U256::zero(), U256::zero()).rlp(); - let mut pairs = Vec::with_capacity(rlp.item_count()?); + /// Feed an uncompressed state chunk into the rebuilder. + pub fn feed(&mut self, chunk: &[u8], flag: &AtomicBool) -> Result<(), ::error::Error> { + let rlp = Rlp::new(chunk); + let empty_rlp = StateAccount::new_basic(U256::zero(), U256::zero()).rlp(); + let mut pairs = Vec::with_capacity(rlp.item_count()?); - // initialize the pairs vector with empty values so we have slots to write into. - pairs.resize(rlp.item_count()?, (H256::zero(), Vec::new())); + // initialize the pairs vector with empty values so we have slots to write into. + pairs.resize(rlp.item_count()?, (H256::zero(), Vec::new())); - let status = rebuild_accounts( - self.db.as_hash_db_mut(), - rlp, - &mut pairs, - &self.known_code, - &mut self.known_storage_roots, - flag - )?; + let status = rebuild_accounts( + self.db.as_hash_db_mut(), + rlp, + &mut pairs, + &self.known_code, + &mut self.known_storage_roots, + flag, + )?; - for (addr_hash, code_hash) in status.missing_code { - self.missing_code.entry(code_hash).or_insert_with(Vec::new).push(addr_hash); - } + for (addr_hash, code_hash) in status.missing_code { + self.missing_code + .entry(code_hash) + .or_insert_with(Vec::new) + .push(addr_hash); + } - // patch up all missing code. must be done after collecting all new missing code entries. - for (code_hash, code, first_with) in status.new_code { - for addr_hash in self.missing_code.remove(&code_hash).unwrap_or_else(Vec::new) { - let mut db = AccountDBMut::from_hash(self.db.as_hash_db_mut(), addr_hash); - db.emplace(code_hash, DBValue::from_slice(&code)); - } + // patch up all missing code. must be done after collecting all new missing code entries. + for (code_hash, code, first_with) in status.new_code { + for addr_hash in self + .missing_code + .remove(&code_hash) + .unwrap_or_else(Vec::new) + { + let mut db = AccountDBMut::from_hash(self.db.as_hash_db_mut(), addr_hash); + db.emplace(code_hash, DBValue::from_slice(&code)); + } - self.known_code.insert(code_hash, first_with); - } + self.known_code.insert(code_hash, first_with); + } - let backing = self.db.backing().clone(); + let backing = self.db.backing().clone(); - // batch trie writes - { - let mut account_trie = if self.state_root != KECCAK_NULL_RLP { - TrieDBMut::from_existing(self.db.as_hash_db_mut(), &mut self.state_root)? - } else { - TrieDBMut::new(self.db.as_hash_db_mut(), &mut self.state_root) - }; + // batch trie writes + { + let mut account_trie = if self.state_root != KECCAK_NULL_RLP { + TrieDBMut::from_existing(self.db.as_hash_db_mut(), &mut self.state_root)? + } else { + TrieDBMut::new(self.db.as_hash_db_mut(), &mut self.state_root) + }; - for (hash, thin_rlp) in pairs { - if !flag.load(Ordering::SeqCst) { return Err(Error::RestorationAborted.into()) } + for (hash, thin_rlp) in pairs { + if !flag.load(Ordering::SeqCst) { + return Err(Error::RestorationAborted.into()); + } - if &thin_rlp[..] != &empty_rlp[..] { - self.bloom.set(&*hash); - } - account_trie.insert(&hash, &thin_rlp)?; - } - } + if &thin_rlp[..] != &empty_rlp[..] { + self.bloom.set(&*hash); + } + account_trie.insert(&hash, &thin_rlp)?; + } + } - let bloom_journal = self.bloom.drain_journal(); - let mut batch = backing.transaction(); - StateDB::commit_bloom(&mut batch, bloom_journal)?; - self.db.inject(&mut batch)?; - backing.write_buffered(batch); - trace!(target: "snapshot", "current state root: {:?}", self.state_root); - Ok(()) - } + let bloom_journal = self.bloom.drain_journal(); + let mut batch = backing.transaction(); + StateDB::commit_bloom(&mut batch, bloom_journal)?; + self.db.inject(&mut batch)?; + backing.write_buffered(batch); + trace!(target: "snapshot", "current state root: {:?}", self.state_root); + Ok(()) + } - /// Finalize the restoration. Check for accounts missing code and make a dummy - /// journal entry. - /// Once all chunks have been fed, there should be nothing missing. - pub fn finalize(mut self, era: u64, id: H256) -> Result, ::error::Error> { - let missing = self.missing_code.keys().cloned().collect::>(); - if !missing.is_empty() { return Err(Error::MissingCode(missing).into()) } + /// Finalize the restoration. Check for accounts missing code and make a dummy + /// journal entry. + /// Once all chunks have been fed, there should be nothing missing. + pub fn finalize(mut self, era: u64, id: H256) -> Result, ::error::Error> { + let missing = self.missing_code.keys().cloned().collect::>(); + if !missing.is_empty() { + return Err(Error::MissingCode(missing).into()); + } - let mut batch = self.db.backing().transaction(); - self.db.journal_under(&mut batch, era, &id)?; - self.db.backing().write_buffered(batch); + let mut batch = self.db.backing().transaction(); + self.db.journal_under(&mut batch, era, &id)?; + self.db.backing().write_buffered(batch); - Ok(self.db) - } + Ok(self.db) + } - /// Get the state root of the rebuilder. - pub fn state_root(&self) -> H256 { self.state_root } + /// Get the state root of the rebuilder. + pub fn state_root(&self) -> H256 { + self.state_root + } } #[derive(Default)] struct RebuiltStatus { - // new code that's become available. (code_hash, code, addr_hash) - new_code: Vec<(H256, Bytes, H256)>, - missing_code: Vec<(H256, H256)>, // accounts that are missing code. + // new code that's become available. (code_hash, code, addr_hash) + new_code: Vec<(H256, Bytes, H256)>, + missing_code: Vec<(H256, H256)>, // accounts that are missing code. } // rebuild a set of accounts and their storage. // returns a status detailing newly-loaded code and accounts missing code. fn rebuild_accounts( - db: &mut dyn HashDB, - account_fat_rlps: Rlp, - out_chunk: &mut [(H256, Bytes)], - known_code: &HashMap, - known_storage_roots: &mut HashMap, - abort_flag: &AtomicBool, + db: &mut dyn HashDB, + account_fat_rlps: Rlp, + out_chunk: &mut [(H256, Bytes)], + known_code: &HashMap, + known_storage_roots: &mut HashMap, + abort_flag: &AtomicBool, ) -> Result { - let mut status = RebuiltStatus::default(); - for (account_rlp, out) in account_fat_rlps.into_iter().zip(out_chunk.iter_mut()) { - if !abort_flag.load(Ordering::SeqCst) { return Err(Error::RestorationAborted.into()) } + let mut status = RebuiltStatus::default(); + for (account_rlp, out) in account_fat_rlps.into_iter().zip(out_chunk.iter_mut()) { + if !abort_flag.load(Ordering::SeqCst) { + return Err(Error::RestorationAborted.into()); + } - let hash: H256 = account_rlp.val_at(0)?; - let fat_rlp = account_rlp.at(1)?; + let hash: H256 = account_rlp.val_at(0)?; + let fat_rlp = account_rlp.at(1)?; - let thin_rlp = { + let thin_rlp = { + // fill out the storage trie and code while decoding. + let (acc, maybe_code) = { + let mut acct_db = AccountDBMut::from_hash(db, hash); + let storage_root = known_storage_roots.get(&hash).cloned().unwrap_or_default(); + account::from_fat_rlp(&mut acct_db, fat_rlp, storage_root)? + }; - // fill out the storage trie and code while decoding. - let (acc, maybe_code) = { - let mut acct_db = AccountDBMut::from_hash(db, hash); - let storage_root = known_storage_roots.get(&hash).cloned().unwrap_or_default(); - account::from_fat_rlp(&mut acct_db, fat_rlp, storage_root)? - }; + let code_hash = acc.code_hash.clone(); + match maybe_code { + // new inline code + Some(code) => status.new_code.push((code_hash, code, hash)), + None => { + if code_hash != KECCAK_EMPTY { + // see if this code has already been included inline + match known_code.get(&code_hash) { + Some(&first_with) => { + // if so, load it from the database. + let code = AccountDB::from_hash(db, first_with) + .get(&code_hash) + .ok_or_else(|| Error::MissingCode(vec![first_with]))?; - let code_hash = acc.code_hash.clone(); - match maybe_code { - // new inline code - Some(code) => status.new_code.push((code_hash, code, hash)), - None => { - if code_hash != KECCAK_EMPTY { - // see if this code has already been included inline - match known_code.get(&code_hash) { - Some(&first_with) => { - // if so, load it from the database. - let code = AccountDB::from_hash(db, first_with) - .get(&code_hash) - .ok_or_else(|| Error::MissingCode(vec![first_with]))?; + // and write it again under a different mangled key + AccountDBMut::from_hash(db, hash).emplace(code_hash, code); + } + // if not, queue it up to be filled later + None => status.missing_code.push((hash, code_hash)), + } + } + } + } - // and write it again under a different mangled key - AccountDBMut::from_hash(db, hash).emplace(code_hash, code); - } - // if not, queue it up to be filled later - None => status.missing_code.push((hash, code_hash)), - } - } - } - } + ::rlp::encode(&acc) + }; - ::rlp::encode(&acc) - }; - - *out = (hash, thin_rlp); - } - if let Some(&(ref hash, ref rlp)) = out_chunk.iter().last() { - known_storage_roots.insert(*hash, ::rlp::decode::(rlp)?.storage_root); - } - if let Some(&(ref hash, ref rlp)) = out_chunk.iter().next() { - known_storage_roots.insert(*hash, ::rlp::decode::(rlp)?.storage_root); - } - Ok(status) + *out = (hash, thin_rlp); + } + if let Some(&(ref hash, ref rlp)) = out_chunk.iter().last() { + known_storage_roots.insert(*hash, ::rlp::decode::(rlp)?.storage_root); + } + if let Some(&(ref hash, ref rlp)) = out_chunk.iter().next() { + known_storage_roots.insert(*hash, ::rlp::decode::(rlp)?.storage_root); + } + Ok(status) } /// Proportion of blocks which we will verify `PoW` for. @@ -578,16 +620,22 @@ const POW_VERIFY_RATE: f32 = 0.02; /// Verify an old block with the given header, engine, blockchain, body. If `always` is set, it will perform /// the fullest verification possible. If not, it will take a random sample to determine whether it will /// do heavy or light verification. -pub fn verify_old_block(rng: &mut OsRng, header: &Header, engine: &dyn EthEngine, chain: &BlockChain, always: bool) -> Result<(), ::error::Error> { - engine.verify_block_basic(header)?; +pub fn verify_old_block( + rng: &mut OsRng, + header: &Header, + engine: &dyn EthEngine, + chain: &BlockChain, + always: bool, +) -> Result<(), ::error::Error> { + engine.verify_block_basic(header)?; - if always || rng.gen::() <= POW_VERIFY_RATE { - engine.verify_block_unordered(header)?; - match chain.block_header_data(header.parent_hash()) { - Some(parent) => engine.verify_block_family(header, &parent.decode()?), - None => Ok(()), - } - } else { - Ok(()) - } + if always || rng.gen::() <= POW_VERIFY_RATE { + engine.verify_block_unordered(header)?; + match chain.block_header_data(header.parent_hash()) { + Some(parent) => engine.verify_block_family(header, &parent.decode()?), + None => Ok(()), + } + } else { + Ok(()) + } } diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index ddae76a00..30c69f909 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -16,190 +16,217 @@ //! Snapshot network service implementation. -use std::collections::HashSet; -use std::io::{self, Read, ErrorKind}; -use std::fs::{self, File}; -use std::path::PathBuf; -use std::sync::Arc; -use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; -use std::cmp; +use std::{ + cmp, + collections::HashSet, + fs::{self, File}, + io::{self, ErrorKind, Read}, + path::PathBuf, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering}, + Arc, + }, +}; -use super::{ManifestData, StateRebuilder, Rebuilder, RestorationStatus, SnapshotService, MAX_CHUNK_SIZE}; -use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter}; +use super::{ + io::{LooseReader, LooseWriter, SnapshotReader, SnapshotWriter}, + ManifestData, Rebuilder, RestorationStatus, SnapshotService, StateRebuilder, MAX_CHUNK_SIZE, +}; use blockchain::{BlockChain, BlockChainDB, BlockChainDBHandler}; -use client::{BlockInfo, BlockChainClient, Client, ChainInfo, ClientIoMessage}; +use client::{BlockChainClient, BlockInfo, ChainInfo, Client, ClientIoMessage}; use engines::EthEngine; use error::{Error, ErrorKind as SnapshotErrorKind}; -use snapshot::{Error as SnapshotError}; use hash::keccak; +use snapshot::Error as SnapshotError; use types::ids::BlockId; use io::IoChannel; -use ethereum_types::H256; -use parking_lot::{Mutex, RwLock, RwLockReadGuard}; use bytes::Bytes; +use ethereum_types::H256; use journaldb::Algorithm; use kvdb::DBTransaction; +use parking_lot::{Mutex, RwLock, RwLockReadGuard}; use snappy; /// Helper for removing directories in case of error. struct Guard(bool, PathBuf); impl Guard { - fn new(path: PathBuf) -> Self { Guard(true, path) } + fn new(path: PathBuf) -> Self { + Guard(true, path) + } - #[cfg(test)] - fn benign() -> Self { Guard(false, PathBuf::default()) } + #[cfg(test)] + fn benign() -> Self { + Guard(false, PathBuf::default()) + } - fn disarm(mut self) { self.0 = false } + fn disarm(mut self) { + self.0 = false + } } impl Drop for Guard { - fn drop(&mut self) { - if self.0 { - let _ = fs::remove_dir_all(&self.1); - } - } + fn drop(&mut self) { + if self.0 { + let _ = fs::remove_dir_all(&self.1); + } + } } /// External database restoration handler pub trait DatabaseRestore: Send + Sync { - /// Restart with a new backend. Takes ownership of passed database and moves it to a new location. - fn restore_db(&self, new_db: &str) -> Result<(), Error>; + /// Restart with a new backend. Takes ownership of passed database and moves it to a new location. + fn restore_db(&self, new_db: &str) -> Result<(), Error>; } /// State restoration manager. struct Restoration { - manifest: ManifestData, - state_chunks_left: HashSet, - block_chunks_left: HashSet, - state: StateRebuilder, - secondary: Box, - writer: Option, - snappy_buffer: Bytes, - final_state_root: H256, - guard: Guard, - db: Arc, + manifest: ManifestData, + state_chunks_left: HashSet, + block_chunks_left: HashSet, + state: StateRebuilder, + secondary: Box, + writer: Option, + snappy_buffer: Bytes, + final_state_root: H256, + guard: Guard, + db: Arc, } struct RestorationParams<'a> { - manifest: ManifestData, // manifest to base restoration on. - pruning: Algorithm, // pruning algorithm for the database. - db: Arc, // database - writer: Option, // writer for recovered snapshot. - genesis: &'a [u8], // genesis block of the chain. - guard: Guard, // guard for the restoration directory. - engine: &'a EthEngine, + manifest: ManifestData, // manifest to base restoration on. + pruning: Algorithm, // pruning algorithm for the database. + db: Arc, // database + writer: Option, // writer for recovered snapshot. + genesis: &'a [u8], // genesis block of the chain. + guard: Guard, // guard for the restoration directory. + engine: &'a EthEngine, } impl Restoration { - // make a new restoration using the given parameters. - fn new(params: RestorationParams) -> Result { - let manifest = params.manifest; + // make a new restoration using the given parameters. + fn new(params: RestorationParams) -> Result { + let manifest = params.manifest; - let state_chunks = manifest.state_hashes.iter().cloned().collect(); - let block_chunks = manifest.block_hashes.iter().cloned().collect(); + let state_chunks = manifest.state_hashes.iter().cloned().collect(); + let block_chunks = manifest.block_hashes.iter().cloned().collect(); - let raw_db = params.db; + let raw_db = params.db; - let chain = BlockChain::new(Default::default(), params.genesis, raw_db.clone()); - let components = params.engine.snapshot_components() - .ok_or_else(|| ::snapshot::Error::SnapshotsUnsupported)?; + let chain = BlockChain::new(Default::default(), params.genesis, raw_db.clone()); + let components = params + .engine + .snapshot_components() + .ok_or_else(|| ::snapshot::Error::SnapshotsUnsupported)?; - let secondary = components.rebuilder(chain, raw_db.clone(), &manifest)?; + let secondary = components.rebuilder(chain, raw_db.clone(), &manifest)?; - let root = manifest.state_root.clone(); + let root = manifest.state_root.clone(); - Ok(Restoration { - manifest: manifest, - state_chunks_left: state_chunks, - block_chunks_left: block_chunks, - state: StateRebuilder::new(raw_db.key_value().clone(), params.pruning), - secondary: secondary, - writer: params.writer, - snappy_buffer: Vec::new(), - final_state_root: root, - guard: params.guard, - db: raw_db, - }) - } + Ok(Restoration { + manifest: manifest, + state_chunks_left: state_chunks, + block_chunks_left: block_chunks, + state: StateRebuilder::new(raw_db.key_value().clone(), params.pruning), + secondary: secondary, + writer: params.writer, + snappy_buffer: Vec::new(), + final_state_root: root, + guard: params.guard, + db: raw_db, + }) + } - // feeds a state chunk, aborts early if `flag` becomes false. - fn feed_state(&mut self, hash: H256, chunk: &[u8], flag: &AtomicBool) -> Result<(), Error> { - if self.state_chunks_left.contains(&hash) { - let expected_len = snappy::decompressed_len(chunk)?; - if expected_len > MAX_CHUNK_SIZE { - trace!(target: "snapshot", "Discarding large chunk: {} vs {}", expected_len, MAX_CHUNK_SIZE); - return Err(::snapshot::Error::ChunkTooLarge.into()); - } - let len = snappy::decompress_into(chunk, &mut self.snappy_buffer)?; + // feeds a state chunk, aborts early if `flag` becomes false. + fn feed_state(&mut self, hash: H256, chunk: &[u8], flag: &AtomicBool) -> Result<(), Error> { + if self.state_chunks_left.contains(&hash) { + let expected_len = snappy::decompressed_len(chunk)?; + if expected_len > MAX_CHUNK_SIZE { + trace!(target: "snapshot", "Discarding large chunk: {} vs {}", expected_len, MAX_CHUNK_SIZE); + return Err(::snapshot::Error::ChunkTooLarge.into()); + } + let len = snappy::decompress_into(chunk, &mut self.snappy_buffer)?; - self.state.feed(&self.snappy_buffer[..len], flag)?; + self.state.feed(&self.snappy_buffer[..len], flag)?; - if let Some(ref mut writer) = self.writer.as_mut() { - writer.write_state_chunk(hash, chunk)?; - } + if let Some(ref mut writer) = self.writer.as_mut() { + writer.write_state_chunk(hash, chunk)?; + } - self.state_chunks_left.remove(&hash); - } + self.state_chunks_left.remove(&hash); + } - Ok(()) - } + Ok(()) + } - // feeds a block chunk - fn feed_blocks(&mut self, hash: H256, chunk: &[u8], engine: &EthEngine, flag: &AtomicBool) -> Result<(), Error> { - if self.block_chunks_left.contains(&hash) { - let expected_len = snappy::decompressed_len(chunk)?; - if expected_len > MAX_CHUNK_SIZE { - trace!(target: "snapshot", "Discarding large chunk: {} vs {}", expected_len, MAX_CHUNK_SIZE); - return Err(::snapshot::Error::ChunkTooLarge.into()); - } - let len = snappy::decompress_into(chunk, &mut self.snappy_buffer)?; + // feeds a block chunk + fn feed_blocks( + &mut self, + hash: H256, + chunk: &[u8], + engine: &EthEngine, + flag: &AtomicBool, + ) -> Result<(), Error> { + if self.block_chunks_left.contains(&hash) { + let expected_len = snappy::decompressed_len(chunk)?; + if expected_len > MAX_CHUNK_SIZE { + trace!(target: "snapshot", "Discarding large chunk: {} vs {}", expected_len, MAX_CHUNK_SIZE); + return Err(::snapshot::Error::ChunkTooLarge.into()); + } + let len = snappy::decompress_into(chunk, &mut self.snappy_buffer)?; - self.secondary.feed(&self.snappy_buffer[..len], engine, flag)?; - if let Some(ref mut writer) = self.writer.as_mut() { - writer.write_block_chunk(hash, chunk)?; - } + self.secondary + .feed(&self.snappy_buffer[..len], engine, flag)?; + if let Some(ref mut writer) = self.writer.as_mut() { + writer.write_block_chunk(hash, chunk)?; + } - self.block_chunks_left.remove(&hash); - } + self.block_chunks_left.remove(&hash); + } - Ok(()) - } + Ok(()) + } - // finish up restoration. - fn finalize(mut self, engine: &EthEngine) -> Result<(), Error> { - use trie::TrieError; + // finish up restoration. + fn finalize(mut self, engine: &EthEngine) -> Result<(), Error> { + use trie::TrieError; - if !self.is_done() { return Ok(()) } + if !self.is_done() { + return Ok(()); + } - // verify final state root. - let root = self.state.state_root(); - if root != self.final_state_root { - warn!("Final restored state has wrong state root: expected {:?}, got {:?}", self.final_state_root, root); - return Err(TrieError::InvalidStateRoot(root).into()); - } + // verify final state root. + let root = self.state.state_root(); + if root != self.final_state_root { + warn!( + "Final restored state has wrong state root: expected {:?}, got {:?}", + self.final_state_root, root + ); + return Err(TrieError::InvalidStateRoot(root).into()); + } - // check for missing code. - self.state.finalize(self.manifest.block_number, self.manifest.block_hash)?; + // check for missing code. + self.state + .finalize(self.manifest.block_number, self.manifest.block_hash)?; - // connect out-of-order chunks and verify chain integrity. - self.secondary.finalize(engine)?; + // connect out-of-order chunks and verify chain integrity. + self.secondary.finalize(engine)?; - if let Some(writer) = self.writer { - writer.finish(self.manifest)?; - } + if let Some(writer) = self.writer { + writer.finish(self.manifest)?; + } - self.guard.disarm(); - Ok(()) - } + self.guard.disarm(); + Ok(()) + } - // is everything done? - fn is_done(&self) -> bool { - self.block_chunks_left.is_empty() && self.state_chunks_left.is_empty() - } + // is everything done? + fn is_done(&self) -> bool { + self.block_chunks_left.is_empty() && self.state_chunks_left.is_empty() + } } /// Type alias for client io channel. @@ -210,758 +237,837 @@ pub trait SnapshotClient: BlockChainClient + BlockInfo + DatabaseRestore {} /// Snapshot service parameters. pub struct ServiceParams { - /// The consensus engine this is built on. - pub engine: Arc, - /// The chain's genesis block. - pub genesis_block: Bytes, - /// State pruning algorithm. - pub pruning: Algorithm, - /// Handler for opening a restoration DB. - pub restoration_db_handler: Box, - /// Async IO channel for sending messages. - pub channel: Channel, - /// The directory to put snapshots in. - /// Usually "/snapshot" - pub snapshot_root: PathBuf, - /// A handle for database restoration. - pub client: Arc, + /// The consensus engine this is built on. + pub engine: Arc, + /// The chain's genesis block. + pub genesis_block: Bytes, + /// State pruning algorithm. + pub pruning: Algorithm, + /// Handler for opening a restoration DB. + pub restoration_db_handler: Box, + /// Async IO channel for sending messages. + pub channel: Channel, + /// The directory to put snapshots in. + /// Usually "/snapshot" + pub snapshot_root: PathBuf, + /// A handle for database restoration. + pub client: Arc, } /// `SnapshotService` implementation. /// This controls taking snapshots and restoring from them. pub struct Service { - restoration: Mutex>, - restoration_db_handler: Box, - snapshot_root: PathBuf, - io_channel: Mutex, - pruning: Algorithm, - status: Mutex, - reader: RwLock>, - engine: Arc, - genesis_block: Bytes, - state_chunks: AtomicUsize, - block_chunks: AtomicUsize, - client: Arc, - progress: super::Progress, - taking_snapshot: AtomicBool, - restoring_snapshot: AtomicBool, + restoration: Mutex>, + restoration_db_handler: Box, + snapshot_root: PathBuf, + io_channel: Mutex, + pruning: Algorithm, + status: Mutex, + reader: RwLock>, + engine: Arc, + genesis_block: Bytes, + state_chunks: AtomicUsize, + block_chunks: AtomicUsize, + client: Arc, + progress: super::Progress, + taking_snapshot: AtomicBool, + restoring_snapshot: AtomicBool, } impl Service { - /// Create a new snapshot service from the given parameters. - pub fn new(params: ServiceParams) -> Result { - let mut service = Service { - restoration: Mutex::new(None), - restoration_db_handler: params.restoration_db_handler, - snapshot_root: params.snapshot_root, - io_channel: Mutex::new(params.channel), - pruning: params.pruning, - status: Mutex::new(RestorationStatus::Inactive), - reader: RwLock::new(None), - engine: params.engine, - genesis_block: params.genesis_block, - state_chunks: AtomicUsize::new(0), - block_chunks: AtomicUsize::new(0), - client: params.client, - progress: Default::default(), - taking_snapshot: AtomicBool::new(false), - restoring_snapshot: AtomicBool::new(false), - }; - - // create the root snapshot dir if it doesn't exist. - if let Err(e) = fs::create_dir_all(&service.snapshot_root) { - if e.kind() != ErrorKind::AlreadyExists { - return Err(e.into()) - } - } - - // delete the temporary restoration DB dir if it does exist. - if let Err(e) = fs::remove_dir_all(service.restoration_db()) { - if e.kind() != ErrorKind::NotFound { - return Err(e.into()) - } - } - - // delete the temporary snapshot dir if it does exist. - if let Err(e) = fs::remove_dir_all(service.temp_snapshot_dir()) { - if e.kind() != ErrorKind::NotFound { - return Err(e.into()) - } - } - - let reader = LooseReader::new(service.snapshot_dir()).ok(); - *service.reader.get_mut() = reader; - - Ok(service) - } - - // get the current snapshot dir. - fn snapshot_dir(&self) -> PathBuf { - let mut dir = self.snapshot_root.clone(); - dir.push("current"); - dir - } - - // get the temporary snapshot dir. - fn temp_snapshot_dir(&self) -> PathBuf { - let mut dir = self.snapshot_root.clone(); - dir.push("in_progress"); - dir - } - - // get the restoration directory. - fn restoration_dir(&self) -> PathBuf { - let mut dir = self.snapshot_root.clone(); - dir.push("restoration"); - dir - } - - // restoration db path. - fn restoration_db(&self) -> PathBuf { - let mut dir = self.restoration_dir(); - dir.push("db"); - dir - } - - // temporary snapshot recovery path. - fn temp_recovery_dir(&self) -> PathBuf { - let mut dir = self.restoration_dir(); - dir.push("temp"); - dir - } - - // previous snapshot chunks path. - fn prev_chunks_dir(&self) -> PathBuf { - let mut dir = self.snapshot_root.clone(); - dir.push("prev_chunks"); - dir - } - - // replace one the client's database with our own. - fn replace_client_db(&self) -> Result<(), Error> { - let migrated_blocks = self.migrate_blocks()?; - info!(target: "snapshot", "Migrated {} ancient blocks", migrated_blocks); - - let rest_db = self.restoration_db(); - self.client.restore_db(&*rest_db.to_string_lossy())?; - Ok(()) - } - - // Migrate the blocks in the current DB into the new chain - fn migrate_blocks(&self) -> Result { - // Count the number of migrated blocks - let mut count = 0; - let rest_db = self.restoration_db(); - - let cur_chain_info = self.client.chain_info(); - - let next_db = self.restoration_db_handler.open(&rest_db)?; - let next_chain = BlockChain::new(Default::default(), &[], next_db.clone()); - let next_chain_info = next_chain.chain_info(); - - // The old database looks like this: - // [genesis, best_ancient_block] ... [first_block, best_block] - // If we are fully synced neither `best_ancient_block` nor `first_block` is set, and we can assume that the whole range from [genesis, best_block] is imported. - // The new database only contains the tip of the chain ([first_block, best_block]), - // so the useful set of blocks is defined as: - // [0 ... min(new.first_block, best_ancient_block or best_block)] - let find_range = || -> Option<(H256, H256)> { - let next_available_from = next_chain_info.first_block_number?; - let cur_available_to = cur_chain_info.ancient_block_number.unwrap_or(cur_chain_info.best_block_number); - - let highest_block_num = cmp::min(next_available_from.saturating_sub(1), cur_available_to); - - if highest_block_num == 0 { - return None; - } - - trace!(target: "snapshot", "Trying to import ancient blocks until {}", highest_block_num); - - // Here we start from the highest block number and go backward to 0, - // thus starting at `highest_block_num` and targetting `0`. - let target_hash = self.client.block_hash(BlockId::Number(0))?; - let start_hash = self.client.block_hash(BlockId::Number(highest_block_num))?; - - Some((start_hash, target_hash)) - }; - - let (start_hash, target_hash) = match find_range() { - Some(x) => x, - None => return Ok(0), - }; - - let mut batch = DBTransaction::new(); - let mut parent_hash = start_hash; - while parent_hash != target_hash { - // Early return if restoration is aborted - if !self.restoring_snapshot.load(Ordering::SeqCst) { - return Ok(count); - } - - let block = self.client.block(BlockId::Hash(parent_hash)).ok_or(::snapshot::error::Error::UnlinkedAncientBlockChain)?; - parent_hash = block.parent_hash(); - - let block_number = block.number(); - let block_receipts = self.client.block_receipts(&block.hash()); - let parent_total_difficulty = self.client.block_total_difficulty(BlockId::Hash(parent_hash)); - - match (block_receipts, parent_total_difficulty) { - (Some(block_receipts), Some(parent_total_difficulty)) => { - let block_receipts = block_receipts.receipts; - - next_chain.insert_unordered_block(&mut batch, block, block_receipts, Some(parent_total_difficulty), false, true); - count += 1; - }, - _ => break, - } - - // Writing changes to DB and logging every now and then - if block_number % 1_000 == 0 { - next_db.key_value().write_buffered(batch); - next_chain.commit(); - next_db.key_value().flush().expect("DB flush failed."); - batch = DBTransaction::new(); - } - - if block_number % 10_000 == 0 { - info!(target: "snapshot", "Block restoration at #{}", block_number); - } - } - - // Final commit to the DB - next_db.key_value().write_buffered(batch); - next_chain.commit(); - next_db.key_value().flush().expect("DB flush failed."); - - // We couldn't reach the targeted hash - if parent_hash != target_hash { - return Err(::snapshot::error::Error::UnlinkedAncientBlockChain.into()); - } - - // Update best ancient block in the Next Chain - next_chain.update_best_ancient_block(&start_hash); - Ok(count) - } - - /// Get a reference to the snapshot reader. - pub fn reader(&self) -> RwLockReadGuard> { - self.reader.read() - } - - /// Tick the snapshot service. This will log any active snapshot - /// being taken. - pub fn tick(&self) { - if self.progress.done() || !self.taking_snapshot.load(Ordering::SeqCst) { return } - - let p = &self.progress; - info!("Snapshot: {} accounts {} blocks {} bytes", p.accounts(), p.blocks(), p.size()); - } - - /// Take a snapshot at the block with the given number. - /// calling this while a restoration is in progress or vice versa - /// will lead to a race condition where the first one to finish will - /// have their produced snapshot overwritten. - pub fn take_snapshot(&self, client: &Client, num: u64) -> Result<(), Error> { - if self.taking_snapshot.compare_and_swap(false, true, Ordering::SeqCst) { - info!("Skipping snapshot at #{} as another one is currently in-progress.", num); - return Ok(()); - } - - info!("Taking snapshot at #{}", num); - self.progress.reset(); - - let temp_dir = self.temp_snapshot_dir(); - let snapshot_dir = self.snapshot_dir(); - - let _ = fs::remove_dir_all(&temp_dir); - - let writer = LooseWriter::new(temp_dir.clone())?; - - let guard = Guard::new(temp_dir.clone()); - let res = client.take_snapshot(writer, BlockId::Number(num), &self.progress); - self.taking_snapshot.store(false, Ordering::SeqCst); - if let Err(e) = res { - if client.chain_info().best_block_number >= num + client.pruning_history() { - // The state we were snapshotting was pruned before we could finish. - info!("Periodic snapshot failed: block state pruned. Run with a longer `--pruning-history` or with `--no-periodic-snapshot`"); - return Err(e); - } else { - return Err(e); - } - } - - info!("Finished taking snapshot at #{}", num); - - let mut reader = self.reader.write(); - - // destroy the old snapshot reader. - *reader = None; - - if snapshot_dir.exists() { - fs::remove_dir_all(&snapshot_dir)?; - } - - fs::rename(temp_dir, &snapshot_dir)?; - - *reader = Some(LooseReader::new(snapshot_dir)?); - - guard.disarm(); - Ok(()) - } - - /// Initialize the restoration synchronously. - /// The recover flag indicates whether to recover the restored snapshot. - pub fn init_restore(&self, manifest: ManifestData, recover: bool) -> Result<(), Error> { - let mut res = self.restoration.lock(); - - let rest_dir = self.restoration_dir(); - let rest_db = self.restoration_db(); - let recovery_temp = self.temp_recovery_dir(); - let prev_chunks = self.prev_chunks_dir(); - - // delete and restore the restoration dir. - if let Err(e) = fs::remove_dir_all(&prev_chunks) { - match e.kind() { - ErrorKind::NotFound => {}, - _ => return Err(e.into()), - } - } - - // Move the previous recovery temp directory - // to `prev_chunks` to be able to restart restoring - // with previously downloaded blocks - // This step is optional, so don't fail on error - fs::rename(&recovery_temp, &prev_chunks).ok(); - - self.state_chunks.store(0, Ordering::SeqCst); - self.block_chunks.store(0, Ordering::SeqCst); - - // tear down existing restoration. - *res = None; - - // delete and restore the restoration dir. - if let Err(e) = fs::remove_dir_all(&rest_dir) { - match e.kind() { - ErrorKind::NotFound => {}, - _ => return Err(e.into()), - } - } - - *self.status.lock() = RestorationStatus::Initializing { - chunks_done: 0, - }; - - fs::create_dir_all(&rest_dir)?; - - // make new restoration. - let writer = match recover { - true => Some(LooseWriter::new(recovery_temp)?), - false => None - }; - - let params = RestorationParams { - manifest: manifest.clone(), - pruning: self.pruning, - db: self.restoration_db_handler.open(&rest_db)?, - writer: writer, - genesis: &self.genesis_block, - guard: Guard::new(rest_db), - engine: &*self.engine, - }; - - let state_chunks = manifest.state_hashes.len(); - let block_chunks = manifest.block_hashes.len(); - - *res = Some(Restoration::new(params)?); - - self.restoring_snapshot.store(true, Ordering::SeqCst); - - // Import previous chunks, continue if it fails - self.import_prev_chunks(&mut res, manifest).ok(); - - // It could be that the restoration failed or completed in the meanwhile - let mut restoration_status = self.status.lock(); - if let RestorationStatus::Initializing { .. } = *restoration_status { - *restoration_status = RestorationStatus::Ongoing { - state_chunks: state_chunks as u32, - block_chunks: block_chunks as u32, - state_chunks_done: self.state_chunks.load(Ordering::SeqCst) as u32, - block_chunks_done: self.block_chunks.load(Ordering::SeqCst) as u32, - }; - } - - Ok(()) - } - - /// Import the previous chunks into the current restoration - fn import_prev_chunks(&self, restoration: &mut Option, manifest: ManifestData) -> Result<(), Error> { - let prev_chunks = self.prev_chunks_dir(); - - // Restore previous snapshot chunks - let files = fs::read_dir(prev_chunks.as_path())?; - let mut num_temp_chunks = 0; - - for prev_chunk_file in files { - // Don't go over all the files if the restoration has been aborted - if !self.restoring_snapshot.load(Ordering::SeqCst) { - trace!(target:"snapshot", "Aborting importing previous chunks"); - return Ok(()); - } - // Import the chunk, don't fail and continue if one fails - match self.import_prev_chunk(restoration, &manifest, prev_chunk_file) { - Ok(true) => num_temp_chunks += 1, - Err(e) => trace!(target: "snapshot", "Error importing chunk: {:?}", e), - _ => (), - } - } - - trace!(target:"snapshot", "Imported {} previous chunks", num_temp_chunks); - - // Remove the prev temp directory - fs::remove_dir_all(&prev_chunks)?; - - Ok(()) - } - - /// Import a previous chunk at the given path. Returns whether the block was imported or not - fn import_prev_chunk(&self, restoration: &mut Option, manifest: &ManifestData, file: io::Result) -> Result { - let file = file?; - let path = file.path(); - - let mut file = File::open(path.clone())?; - let mut buffer = Vec::new(); - file.read_to_end(&mut buffer)?; - - let hash = keccak(&buffer); - - let is_state = if manifest.block_hashes.contains(&hash) { - false - } else if manifest.state_hashes.contains(&hash) { - true - } else { - return Ok(false); - }; - - self.feed_chunk_with_restoration(restoration, hash, &buffer, is_state)?; - - trace!(target: "snapshot", "Fed chunk {:?}", hash); - - Ok(true) - } - - // finalize the restoration. this accepts an already-locked - // restoration as an argument -- so acquiring it again _will_ - // lead to deadlock. - fn finalize_restoration(&self, rest: &mut Option) -> Result<(), Error> { - trace!(target: "snapshot", "finalizing restoration"); - - let recover = rest.as_ref().map_or(false, |rest| rest.writer.is_some()); - - // destroy the restoration before replacing databases and snapshot. - rest.take() - .map(|r| r.finalize(&*self.engine)) - .unwrap_or(Ok(()))?; - - self.replace_client_db()?; - - if recover { - let mut reader = self.reader.write(); - *reader = None; // destroy the old reader if it existed. - - let snapshot_dir = self.snapshot_dir(); - - if snapshot_dir.exists() { - trace!(target: "snapshot", "removing old snapshot dir at {}", snapshot_dir.to_string_lossy()); - fs::remove_dir_all(&snapshot_dir)?; - } - - trace!(target: "snapshot", "copying restored snapshot files over"); - fs::rename(self.temp_recovery_dir(), &snapshot_dir)?; - - *reader = Some(LooseReader::new(snapshot_dir)?); - } - - let _ = fs::remove_dir_all(self.restoration_dir()); - *self.status.lock() = RestorationStatus::Inactive; - - Ok(()) - } - - /// Feed a chunk of either kind (block or state). no-op if no restoration or status is wrong. - fn feed_chunk(&self, hash: H256, chunk: &[u8], is_state: bool) { - // TODO: be able to process block chunks and state chunks at same time? - let mut restoration = self.restoration.lock(); - match self.feed_chunk_with_restoration(&mut restoration, hash, chunk, is_state) { - Ok(()) | - Err(Error(SnapshotErrorKind::Snapshot(SnapshotError::RestorationAborted), _)) => (), - Err(e) => { - warn!("Encountered error during snapshot restoration: {}", e); - *self.restoration.lock() = None; - *self.status.lock() = RestorationStatus::Failed; - let _ = fs::remove_dir_all(self.restoration_dir()); - } - } - } - - /// Feed a chunk with the Restoration - fn feed_chunk_with_restoration(&self, restoration: &mut Option, hash: H256, chunk: &[u8], is_state: bool) -> Result<(), Error> { - let (result, db) = { - match self.status() { - RestorationStatus::Inactive | RestorationStatus::Failed => { - trace!(target: "snapshot", "Tried to restore chunk {:x} while inactive or failed", hash); - return Ok(()); - }, - RestorationStatus::Ongoing { .. } | RestorationStatus::Initializing { .. } => { - let (res, db) = { - let rest = match *restoration { - Some(ref mut r) => r, - None => return Ok(()), - }; - - (match is_state { - true => rest.feed_state(hash, chunk, &self.restoring_snapshot), - false => rest.feed_blocks(hash, chunk, &*self.engine, &self.restoring_snapshot), - }.map(|_| rest.is_done()), rest.db.clone()) - }; - - let res = match res { - Ok(is_done) => { - match is_state { - true => self.state_chunks.fetch_add(1, Ordering::SeqCst), - false => self.block_chunks.fetch_add(1, Ordering::SeqCst), - }; - - match is_done { - true => { - db.key_value().flush()?; - drop(db); - return self.finalize_restoration(&mut *restoration); - }, - false => Ok(()) - } - } - other => other.map(drop), - }; - (res, db) - } - } - }; - - result?; - db.key_value().flush()?; - Ok(()) - } - - /// Feed a state chunk to be processed synchronously. - pub fn feed_state_chunk(&self, hash: H256, chunk: &[u8]) { - self.feed_chunk(hash, chunk, true); - } - - /// Feed a block chunk to be processed synchronously. - pub fn feed_block_chunk(&self, hash: H256, chunk: &[u8]) { - self.feed_chunk(hash, chunk, false); - } + /// Create a new snapshot service from the given parameters. + pub fn new(params: ServiceParams) -> Result { + let mut service = Service { + restoration: Mutex::new(None), + restoration_db_handler: params.restoration_db_handler, + snapshot_root: params.snapshot_root, + io_channel: Mutex::new(params.channel), + pruning: params.pruning, + status: Mutex::new(RestorationStatus::Inactive), + reader: RwLock::new(None), + engine: params.engine, + genesis_block: params.genesis_block, + state_chunks: AtomicUsize::new(0), + block_chunks: AtomicUsize::new(0), + client: params.client, + progress: Default::default(), + taking_snapshot: AtomicBool::new(false), + restoring_snapshot: AtomicBool::new(false), + }; + + // create the root snapshot dir if it doesn't exist. + if let Err(e) = fs::create_dir_all(&service.snapshot_root) { + if e.kind() != ErrorKind::AlreadyExists { + return Err(e.into()); + } + } + + // delete the temporary restoration DB dir if it does exist. + if let Err(e) = fs::remove_dir_all(service.restoration_db()) { + if e.kind() != ErrorKind::NotFound { + return Err(e.into()); + } + } + + // delete the temporary snapshot dir if it does exist. + if let Err(e) = fs::remove_dir_all(service.temp_snapshot_dir()) { + if e.kind() != ErrorKind::NotFound { + return Err(e.into()); + } + } + + let reader = LooseReader::new(service.snapshot_dir()).ok(); + *service.reader.get_mut() = reader; + + Ok(service) + } + + // get the current snapshot dir. + fn snapshot_dir(&self) -> PathBuf { + let mut dir = self.snapshot_root.clone(); + dir.push("current"); + dir + } + + // get the temporary snapshot dir. + fn temp_snapshot_dir(&self) -> PathBuf { + let mut dir = self.snapshot_root.clone(); + dir.push("in_progress"); + dir + } + + // get the restoration directory. + fn restoration_dir(&self) -> PathBuf { + let mut dir = self.snapshot_root.clone(); + dir.push("restoration"); + dir + } + + // restoration db path. + fn restoration_db(&self) -> PathBuf { + let mut dir = self.restoration_dir(); + dir.push("db"); + dir + } + + // temporary snapshot recovery path. + fn temp_recovery_dir(&self) -> PathBuf { + let mut dir = self.restoration_dir(); + dir.push("temp"); + dir + } + + // previous snapshot chunks path. + fn prev_chunks_dir(&self) -> PathBuf { + let mut dir = self.snapshot_root.clone(); + dir.push("prev_chunks"); + dir + } + + // replace one the client's database with our own. + fn replace_client_db(&self) -> Result<(), Error> { + let migrated_blocks = self.migrate_blocks()?; + info!(target: "snapshot", "Migrated {} ancient blocks", migrated_blocks); + + let rest_db = self.restoration_db(); + self.client.restore_db(&*rest_db.to_string_lossy())?; + Ok(()) + } + + // Migrate the blocks in the current DB into the new chain + fn migrate_blocks(&self) -> Result { + // Count the number of migrated blocks + let mut count = 0; + let rest_db = self.restoration_db(); + + let cur_chain_info = self.client.chain_info(); + + let next_db = self.restoration_db_handler.open(&rest_db)?; + let next_chain = BlockChain::new(Default::default(), &[], next_db.clone()); + let next_chain_info = next_chain.chain_info(); + + // The old database looks like this: + // [genesis, best_ancient_block] ... [first_block, best_block] + // If we are fully synced neither `best_ancient_block` nor `first_block` is set, and we can assume that the whole range from [genesis, best_block] is imported. + // The new database only contains the tip of the chain ([first_block, best_block]), + // so the useful set of blocks is defined as: + // [0 ... min(new.first_block, best_ancient_block or best_block)] + let find_range = || -> Option<(H256, H256)> { + let next_available_from = next_chain_info.first_block_number?; + let cur_available_to = cur_chain_info + .ancient_block_number + .unwrap_or(cur_chain_info.best_block_number); + + let highest_block_num = + cmp::min(next_available_from.saturating_sub(1), cur_available_to); + + if highest_block_num == 0 { + return None; + } + + trace!(target: "snapshot", "Trying to import ancient blocks until {}", highest_block_num); + + // Here we start from the highest block number and go backward to 0, + // thus starting at `highest_block_num` and targetting `0`. + let target_hash = self.client.block_hash(BlockId::Number(0))?; + let start_hash = self.client.block_hash(BlockId::Number(highest_block_num))?; + + Some((start_hash, target_hash)) + }; + + let (start_hash, target_hash) = match find_range() { + Some(x) => x, + None => return Ok(0), + }; + + let mut batch = DBTransaction::new(); + let mut parent_hash = start_hash; + while parent_hash != target_hash { + // Early return if restoration is aborted + if !self.restoring_snapshot.load(Ordering::SeqCst) { + return Ok(count); + } + + let block = self + .client + .block(BlockId::Hash(parent_hash)) + .ok_or(::snapshot::error::Error::UnlinkedAncientBlockChain)?; + parent_hash = block.parent_hash(); + + let block_number = block.number(); + let block_receipts = self.client.block_receipts(&block.hash()); + let parent_total_difficulty = self + .client + .block_total_difficulty(BlockId::Hash(parent_hash)); + + match (block_receipts, parent_total_difficulty) { + (Some(block_receipts), Some(parent_total_difficulty)) => { + let block_receipts = block_receipts.receipts; + + next_chain.insert_unordered_block( + &mut batch, + block, + block_receipts, + Some(parent_total_difficulty), + false, + true, + ); + count += 1; + } + _ => break, + } + + // Writing changes to DB and logging every now and then + if block_number % 1_000 == 0 { + next_db.key_value().write_buffered(batch); + next_chain.commit(); + next_db.key_value().flush().expect("DB flush failed."); + batch = DBTransaction::new(); + } + + if block_number % 10_000 == 0 { + info!(target: "snapshot", "Block restoration at #{}", block_number); + } + } + + // Final commit to the DB + next_db.key_value().write_buffered(batch); + next_chain.commit(); + next_db.key_value().flush().expect("DB flush failed."); + + // We couldn't reach the targeted hash + if parent_hash != target_hash { + return Err(::snapshot::error::Error::UnlinkedAncientBlockChain.into()); + } + + // Update best ancient block in the Next Chain + next_chain.update_best_ancient_block(&start_hash); + Ok(count) + } + + /// Get a reference to the snapshot reader. + pub fn reader(&self) -> RwLockReadGuard> { + self.reader.read() + } + + /// Tick the snapshot service. This will log any active snapshot + /// being taken. + pub fn tick(&self) { + if self.progress.done() || !self.taking_snapshot.load(Ordering::SeqCst) { + return; + } + + let p = &self.progress; + info!( + "Snapshot: {} accounts {} blocks {} bytes", + p.accounts(), + p.blocks(), + p.size() + ); + } + + /// Take a snapshot at the block with the given number. + /// calling this while a restoration is in progress or vice versa + /// will lead to a race condition where the first one to finish will + /// have their produced snapshot overwritten. + pub fn take_snapshot(&self, client: &Client, num: u64) -> Result<(), Error> { + if self + .taking_snapshot + .compare_and_swap(false, true, Ordering::SeqCst) + { + info!( + "Skipping snapshot at #{} as another one is currently in-progress.", + num + ); + return Ok(()); + } + + info!("Taking snapshot at #{}", num); + self.progress.reset(); + + let temp_dir = self.temp_snapshot_dir(); + let snapshot_dir = self.snapshot_dir(); + + let _ = fs::remove_dir_all(&temp_dir); + + let writer = LooseWriter::new(temp_dir.clone())?; + + let guard = Guard::new(temp_dir.clone()); + let res = client.take_snapshot(writer, BlockId::Number(num), &self.progress); + self.taking_snapshot.store(false, Ordering::SeqCst); + if let Err(e) = res { + if client.chain_info().best_block_number >= num + client.pruning_history() { + // The state we were snapshotting was pruned before we could finish. + info!("Periodic snapshot failed: block state pruned. Run with a longer `--pruning-history` or with `--no-periodic-snapshot`"); + return Err(e); + } else { + return Err(e); + } + } + + info!("Finished taking snapshot at #{}", num); + + let mut reader = self.reader.write(); + + // destroy the old snapshot reader. + *reader = None; + + if snapshot_dir.exists() { + fs::remove_dir_all(&snapshot_dir)?; + } + + fs::rename(temp_dir, &snapshot_dir)?; + + *reader = Some(LooseReader::new(snapshot_dir)?); + + guard.disarm(); + Ok(()) + } + + /// Initialize the restoration synchronously. + /// The recover flag indicates whether to recover the restored snapshot. + pub fn init_restore(&self, manifest: ManifestData, recover: bool) -> Result<(), Error> { + let mut res = self.restoration.lock(); + + let rest_dir = self.restoration_dir(); + let rest_db = self.restoration_db(); + let recovery_temp = self.temp_recovery_dir(); + let prev_chunks = self.prev_chunks_dir(); + + // delete and restore the restoration dir. + if let Err(e) = fs::remove_dir_all(&prev_chunks) { + match e.kind() { + ErrorKind::NotFound => {} + _ => return Err(e.into()), + } + } + + // Move the previous recovery temp directory + // to `prev_chunks` to be able to restart restoring + // with previously downloaded blocks + // This step is optional, so don't fail on error + fs::rename(&recovery_temp, &prev_chunks).ok(); + + self.state_chunks.store(0, Ordering::SeqCst); + self.block_chunks.store(0, Ordering::SeqCst); + + // tear down existing restoration. + *res = None; + + // delete and restore the restoration dir. + if let Err(e) = fs::remove_dir_all(&rest_dir) { + match e.kind() { + ErrorKind::NotFound => {} + _ => return Err(e.into()), + } + } + + *self.status.lock() = RestorationStatus::Initializing { chunks_done: 0 }; + + fs::create_dir_all(&rest_dir)?; + + // make new restoration. + let writer = match recover { + true => Some(LooseWriter::new(recovery_temp)?), + false => None, + }; + + let params = RestorationParams { + manifest: manifest.clone(), + pruning: self.pruning, + db: self.restoration_db_handler.open(&rest_db)?, + writer: writer, + genesis: &self.genesis_block, + guard: Guard::new(rest_db), + engine: &*self.engine, + }; + + let state_chunks = manifest.state_hashes.len(); + let block_chunks = manifest.block_hashes.len(); + + *res = Some(Restoration::new(params)?); + + self.restoring_snapshot.store(true, Ordering::SeqCst); + + // Import previous chunks, continue if it fails + self.import_prev_chunks(&mut res, manifest).ok(); + + // It could be that the restoration failed or completed in the meanwhile + let mut restoration_status = self.status.lock(); + if let RestorationStatus::Initializing { .. } = *restoration_status { + *restoration_status = RestorationStatus::Ongoing { + state_chunks: state_chunks as u32, + block_chunks: block_chunks as u32, + state_chunks_done: self.state_chunks.load(Ordering::SeqCst) as u32, + block_chunks_done: self.block_chunks.load(Ordering::SeqCst) as u32, + }; + } + + Ok(()) + } + + /// Import the previous chunks into the current restoration + fn import_prev_chunks( + &self, + restoration: &mut Option, + manifest: ManifestData, + ) -> Result<(), Error> { + let prev_chunks = self.prev_chunks_dir(); + + // Restore previous snapshot chunks + let files = fs::read_dir(prev_chunks.as_path())?; + let mut num_temp_chunks = 0; + + for prev_chunk_file in files { + // Don't go over all the files if the restoration has been aborted + if !self.restoring_snapshot.load(Ordering::SeqCst) { + trace!(target:"snapshot", "Aborting importing previous chunks"); + return Ok(()); + } + // Import the chunk, don't fail and continue if one fails + match self.import_prev_chunk(restoration, &manifest, prev_chunk_file) { + Ok(true) => num_temp_chunks += 1, + Err(e) => trace!(target: "snapshot", "Error importing chunk: {:?}", e), + _ => (), + } + } + + trace!(target:"snapshot", "Imported {} previous chunks", num_temp_chunks); + + // Remove the prev temp directory + fs::remove_dir_all(&prev_chunks)?; + + Ok(()) + } + + /// Import a previous chunk at the given path. Returns whether the block was imported or not + fn import_prev_chunk( + &self, + restoration: &mut Option, + manifest: &ManifestData, + file: io::Result, + ) -> Result { + let file = file?; + let path = file.path(); + + let mut file = File::open(path.clone())?; + let mut buffer = Vec::new(); + file.read_to_end(&mut buffer)?; + + let hash = keccak(&buffer); + + let is_state = if manifest.block_hashes.contains(&hash) { + false + } else if manifest.state_hashes.contains(&hash) { + true + } else { + return Ok(false); + }; + + self.feed_chunk_with_restoration(restoration, hash, &buffer, is_state)?; + + trace!(target: "snapshot", "Fed chunk {:?}", hash); + + Ok(true) + } + + // finalize the restoration. this accepts an already-locked + // restoration as an argument -- so acquiring it again _will_ + // lead to deadlock. + fn finalize_restoration(&self, rest: &mut Option) -> Result<(), Error> { + trace!(target: "snapshot", "finalizing restoration"); + + let recover = rest.as_ref().map_or(false, |rest| rest.writer.is_some()); + + // destroy the restoration before replacing databases and snapshot. + rest.take() + .map(|r| r.finalize(&*self.engine)) + .unwrap_or(Ok(()))?; + + self.replace_client_db()?; + + if recover { + let mut reader = self.reader.write(); + *reader = None; // destroy the old reader if it existed. + + let snapshot_dir = self.snapshot_dir(); + + if snapshot_dir.exists() { + trace!(target: "snapshot", "removing old snapshot dir at {}", snapshot_dir.to_string_lossy()); + fs::remove_dir_all(&snapshot_dir)?; + } + + trace!(target: "snapshot", "copying restored snapshot files over"); + fs::rename(self.temp_recovery_dir(), &snapshot_dir)?; + + *reader = Some(LooseReader::new(snapshot_dir)?); + } + + let _ = fs::remove_dir_all(self.restoration_dir()); + *self.status.lock() = RestorationStatus::Inactive; + + Ok(()) + } + + /// Feed a chunk of either kind (block or state). no-op if no restoration or status is wrong. + fn feed_chunk(&self, hash: H256, chunk: &[u8], is_state: bool) { + // TODO: be able to process block chunks and state chunks at same time? + let mut restoration = self.restoration.lock(); + match self.feed_chunk_with_restoration(&mut restoration, hash, chunk, is_state) { + Ok(()) + | Err(Error(SnapshotErrorKind::Snapshot(SnapshotError::RestorationAborted), _)) => (), + Err(e) => { + warn!("Encountered error during snapshot restoration: {}", e); + *self.restoration.lock() = None; + *self.status.lock() = RestorationStatus::Failed; + let _ = fs::remove_dir_all(self.restoration_dir()); + } + } + } + + /// Feed a chunk with the Restoration + fn feed_chunk_with_restoration( + &self, + restoration: &mut Option, + hash: H256, + chunk: &[u8], + is_state: bool, + ) -> Result<(), Error> { + let (result, db) = { + match self.status() { + RestorationStatus::Inactive | RestorationStatus::Failed => { + trace!(target: "snapshot", "Tried to restore chunk {:x} while inactive or failed", hash); + return Ok(()); + } + RestorationStatus::Ongoing { .. } | RestorationStatus::Initializing { .. } => { + let (res, db) = { + let rest = match *restoration { + Some(ref mut r) => r, + None => return Ok(()), + }; + + ( + match is_state { + true => rest.feed_state(hash, chunk, &self.restoring_snapshot), + false => rest.feed_blocks( + hash, + chunk, + &*self.engine, + &self.restoring_snapshot, + ), + } + .map(|_| rest.is_done()), + rest.db.clone(), + ) + }; + + let res = match res { + Ok(is_done) => { + match is_state { + true => self.state_chunks.fetch_add(1, Ordering::SeqCst), + false => self.block_chunks.fetch_add(1, Ordering::SeqCst), + }; + + match is_done { + true => { + db.key_value().flush()?; + drop(db); + return self.finalize_restoration(&mut *restoration); + } + false => Ok(()), + } + } + other => other.map(drop), + }; + (res, db) + } + } + }; + + result?; + db.key_value().flush()?; + Ok(()) + } + + /// Feed a state chunk to be processed synchronously. + pub fn feed_state_chunk(&self, hash: H256, chunk: &[u8]) { + self.feed_chunk(hash, chunk, true); + } + + /// Feed a block chunk to be processed synchronously. + pub fn feed_block_chunk(&self, hash: H256, chunk: &[u8]) { + self.feed_chunk(hash, chunk, false); + } } impl SnapshotService for Service { - fn manifest(&self) -> Option { - self.reader.read().as_ref().map(|r| r.manifest().clone()) - } + fn manifest(&self) -> Option { + self.reader.read().as_ref().map(|r| r.manifest().clone()) + } - fn supported_versions(&self) -> Option<(u64, u64)> { - self.engine.snapshot_components() - .map(|c| (c.min_supported_version(), c.current_version())) - } + fn supported_versions(&self) -> Option<(u64, u64)> { + self.engine + .snapshot_components() + .map(|c| (c.min_supported_version(), c.current_version())) + } - fn chunk(&self, hash: H256) -> Option { - self.reader.read().as_ref().and_then(|r| r.chunk(hash).ok()) - } + fn chunk(&self, hash: H256) -> Option { + self.reader.read().as_ref().and_then(|r| r.chunk(hash).ok()) + } - fn completed_chunks(&self) -> Option> { - let restoration = self.restoration.lock(); + fn completed_chunks(&self) -> Option> { + let restoration = self.restoration.lock(); - match *restoration { - Some(ref restoration) => { - let completed_chunks = restoration.manifest.block_hashes - .iter() - .filter(|h| !restoration.block_chunks_left.contains(h)) - .chain( - restoration.manifest.state_hashes - .iter() - .filter(|h| !restoration.state_chunks_left.contains(h)) - ) - .map(|h| *h) - .collect(); + match *restoration { + Some(ref restoration) => { + let completed_chunks = restoration + .manifest + .block_hashes + .iter() + .filter(|h| !restoration.block_chunks_left.contains(h)) + .chain( + restoration + .manifest + .state_hashes + .iter() + .filter(|h| !restoration.state_chunks_left.contains(h)), + ) + .map(|h| *h) + .collect(); - Some(completed_chunks) - }, - None => None, - } - } + Some(completed_chunks) + } + None => None, + } + } - fn status(&self) -> RestorationStatus { - let mut cur_status = self.status.lock(); + fn status(&self) -> RestorationStatus { + let mut cur_status = self.status.lock(); - match *cur_status { - RestorationStatus::Initializing { ref mut chunks_done } => { - *chunks_done = self.state_chunks.load(Ordering::SeqCst) as u32 + - self.block_chunks.load(Ordering::SeqCst) as u32; - } - RestorationStatus::Ongoing { ref mut state_chunks_done, ref mut block_chunks_done, .. } => { - *state_chunks_done = self.state_chunks.load(Ordering::SeqCst) as u32; - *block_chunks_done = self.block_chunks.load(Ordering::SeqCst) as u32; - }, - _ => (), - } + match *cur_status { + RestorationStatus::Initializing { + ref mut chunks_done, + } => { + *chunks_done = self.state_chunks.load(Ordering::SeqCst) as u32 + + self.block_chunks.load(Ordering::SeqCst) as u32; + } + RestorationStatus::Ongoing { + ref mut state_chunks_done, + ref mut block_chunks_done, + .. + } => { + *state_chunks_done = self.state_chunks.load(Ordering::SeqCst) as u32; + *block_chunks_done = self.block_chunks.load(Ordering::SeqCst) as u32; + } + _ => (), + } - cur_status.clone() - } + cur_status.clone() + } - fn begin_restore(&self, manifest: ManifestData) { - if let Err(e) = self.io_channel.lock().send(ClientIoMessage::BeginRestoration(manifest)) { - trace!("Error sending snapshot service message: {:?}", e); - } - } + fn begin_restore(&self, manifest: ManifestData) { + if let Err(e) = self + .io_channel + .lock() + .send(ClientIoMessage::BeginRestoration(manifest)) + { + trace!("Error sending snapshot service message: {:?}", e); + } + } - fn abort_restore(&self) { - trace!(target: "snapshot", "Aborting restore"); - self.restoring_snapshot.store(false, Ordering::SeqCst); - *self.restoration.lock() = None; - *self.status.lock() = RestorationStatus::Inactive; - } + fn abort_restore(&self) { + trace!(target: "snapshot", "Aborting restore"); + self.restoring_snapshot.store(false, Ordering::SeqCst); + *self.restoration.lock() = None; + *self.status.lock() = RestorationStatus::Inactive; + } - fn restore_state_chunk(&self, hash: H256, chunk: Bytes) { - if let Err(e) = self.io_channel.lock().send(ClientIoMessage::FeedStateChunk(hash, chunk)) { - trace!("Error sending snapshot service message: {:?}", e); - } - } + fn restore_state_chunk(&self, hash: H256, chunk: Bytes) { + if let Err(e) = self + .io_channel + .lock() + .send(ClientIoMessage::FeedStateChunk(hash, chunk)) + { + trace!("Error sending snapshot service message: {:?}", e); + } + } - fn restore_block_chunk(&self, hash: H256, chunk: Bytes) { - if let Err(e) = self.io_channel.lock().send(ClientIoMessage::FeedBlockChunk(hash, chunk)) { - trace!("Error sending snapshot service message: {:?}", e); - } - } + fn restore_block_chunk(&self, hash: H256, chunk: Bytes) { + if let Err(e) = self + .io_channel + .lock() + .send(ClientIoMessage::FeedBlockChunk(hash, chunk)) + { + trace!("Error sending snapshot service message: {:?}", e); + } + } - fn abort_snapshot(&self) { - if self.taking_snapshot.load(Ordering::SeqCst) { - trace!(target: "snapshot", "Aborting snapshot – Snapshot under way"); - self.progress.abort.store(true, Ordering::SeqCst); - } - } + fn abort_snapshot(&self) { + if self.taking_snapshot.load(Ordering::SeqCst) { + trace!(target: "snapshot", "Aborting snapshot – Snapshot under way"); + self.progress.abort.store(true, Ordering::SeqCst); + } + } - fn shutdown(&self) { - trace!(target: "snapshot", "Shut down SnapshotService"); - self.abort_restore(); - trace!(target: "snapshot", "Shut down SnapshotService - restore aborted"); - self.abort_snapshot(); - trace!(target: "snapshot", "Shut down SnapshotService - snapshot aborted"); - } + fn shutdown(&self) { + trace!(target: "snapshot", "Shut down SnapshotService"); + self.abort_restore(); + trace!(target: "snapshot", "Shut down SnapshotService - restore aborted"); + self.abort_snapshot(); + trace!(target: "snapshot", "Shut down SnapshotService - snapshot aborted"); + } } impl Drop for Service { - fn drop(&mut self) { - trace!(target: "shutdown", "Dropping Service"); - self.abort_restore(); - trace!(target: "shutdown", "Dropping Service - restore aborted"); - self.abort_snapshot(); - trace!(target: "shutdown", "Dropping Service - snapshot aborted"); - } + fn drop(&mut self) { + trace!(target: "shutdown", "Dropping Service"); + self.abort_restore(); + trace!(target: "shutdown", "Dropping Service - restore aborted"); + self.abort_snapshot(); + trace!(target: "shutdown", "Dropping Service - snapshot aborted"); + } } #[cfg(test)] mod tests { - use client::ClientIoMessage; - use io::{IoService}; - use spec::Spec; - use journaldb::Algorithm; - use snapshot::{ManifestData, RestorationStatus, SnapshotService}; - use super::*; - use tempdir::TempDir; - use test_helpers::{generate_dummy_client_with_spec_and_data, restoration_db_handler}; + use super::*; + use client::ClientIoMessage; + use io::IoService; + use journaldb::Algorithm; + use snapshot::{ManifestData, RestorationStatus, SnapshotService}; + use spec::Spec; + use tempdir::TempDir; + use test_helpers::{generate_dummy_client_with_spec_and_data, restoration_db_handler}; - #[test] - fn sends_async_messages() { - let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()]; - let client = generate_dummy_client_with_spec_and_data(Spec::new_null, 400, 5, &gas_prices); - let service = IoService::::start().unwrap(); - let spec = Spec::new_test(); + #[test] + fn sends_async_messages() { + let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()]; + let client = generate_dummy_client_with_spec_and_data(Spec::new_null, 400, 5, &gas_prices); + let service = IoService::::start().unwrap(); + let spec = Spec::new_test(); - let tempdir = TempDir::new("").unwrap(); - let dir = tempdir.path().join("snapshot"); + let tempdir = TempDir::new("").unwrap(); + let dir = tempdir.path().join("snapshot"); - let snapshot_params = ServiceParams { - engine: spec.engine.clone(), - genesis_block: spec.genesis_block(), - restoration_db_handler: restoration_db_handler(Default::default()), - pruning: Algorithm::Archive, - channel: service.channel(), - snapshot_root: dir, - client: client, - }; + let snapshot_params = ServiceParams { + engine: spec.engine.clone(), + genesis_block: spec.genesis_block(), + restoration_db_handler: restoration_db_handler(Default::default()), + pruning: Algorithm::Archive, + channel: service.channel(), + snapshot_root: dir, + client: client, + }; - let service = Service::new(snapshot_params).unwrap(); + let service = Service::new(snapshot_params).unwrap(); - assert!(service.manifest().is_none()); - assert!(service.chunk(Default::default()).is_none()); - assert_eq!(service.status(), RestorationStatus::Inactive); + assert!(service.manifest().is_none()); + assert!(service.chunk(Default::default()).is_none()); + assert_eq!(service.status(), RestorationStatus::Inactive); - let manifest = ManifestData { - version: 2, - state_hashes: vec![], - block_hashes: vec![], - state_root: Default::default(), - block_number: 0, - block_hash: Default::default(), - }; + let manifest = ManifestData { + version: 2, + state_hashes: vec![], + block_hashes: vec![], + state_root: Default::default(), + block_number: 0, + block_hash: Default::default(), + }; - service.begin_restore(manifest); - service.abort_restore(); - service.restore_state_chunk(Default::default(), vec![]); - service.restore_block_chunk(Default::default(), vec![]); - } + service.begin_restore(manifest); + service.abort_restore(); + service.restore_state_chunk(Default::default(), vec![]); + service.restore_block_chunk(Default::default(), vec![]); + } - #[test] - fn cannot_finish_with_invalid_chunks() { - use ethereum_types::H256; - use kvdb_rocksdb::DatabaseConfig; + #[test] + fn cannot_finish_with_invalid_chunks() { + use ethereum_types::H256; + use kvdb_rocksdb::DatabaseConfig; - let spec = Spec::new_test(); - let tempdir = TempDir::new("").unwrap(); + let spec = Spec::new_test(); + let tempdir = TempDir::new("").unwrap(); - let state_hashes: Vec<_> = (0..5).map(|_| H256::random()).collect(); - let block_hashes: Vec<_> = (0..5).map(|_| H256::random()).collect(); - let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); - let gb = spec.genesis_block(); - let flag = ::std::sync::atomic::AtomicBool::new(true); + let state_hashes: Vec<_> = (0..5).map(|_| H256::random()).collect(); + let block_hashes: Vec<_> = (0..5).map(|_| H256::random()).collect(); + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + let gb = spec.genesis_block(); + let flag = ::std::sync::atomic::AtomicBool::new(true); - let params = RestorationParams { - manifest: ManifestData { - version: 2, - state_hashes: state_hashes.clone(), - block_hashes: block_hashes.clone(), - state_root: H256::default(), - block_number: 100000, - block_hash: H256::default(), - }, - pruning: Algorithm::Archive, - db: restoration_db_handler(db_config).open(&tempdir.path().to_owned()).unwrap(), - writer: None, - genesis: &gb, - guard: Guard::benign(), - engine: &*spec.engine.clone(), - }; + let params = RestorationParams { + manifest: ManifestData { + version: 2, + state_hashes: state_hashes.clone(), + block_hashes: block_hashes.clone(), + state_root: H256::default(), + block_number: 100000, + block_hash: H256::default(), + }, + pruning: Algorithm::Archive, + db: restoration_db_handler(db_config) + .open(&tempdir.path().to_owned()) + .unwrap(), + writer: None, + genesis: &gb, + guard: Guard::benign(), + engine: &*spec.engine.clone(), + }; - let mut restoration = Restoration::new(params).unwrap(); - let definitely_bad_chunk = [1, 2, 3, 4, 5]; + let mut restoration = Restoration::new(params).unwrap(); + let definitely_bad_chunk = [1, 2, 3, 4, 5]; - for hash in state_hashes { - assert!(restoration.feed_state(hash, &definitely_bad_chunk, &flag).is_err()); - assert!(!restoration.is_done()); - } + for hash in state_hashes { + assert!(restoration + .feed_state(hash, &definitely_bad_chunk, &flag) + .is_err()); + assert!(!restoration.is_done()); + } - for hash in block_hashes { - assert!(restoration.feed_blocks(hash, &definitely_bad_chunk, &*spec.engine, &flag).is_err()); - assert!(!restoration.is_done()); - } - } + for hash in block_hashes { + assert!(restoration + .feed_blocks(hash, &definitely_bad_chunk, &*spec.engine, &flag) + .is_err()); + assert!(!restoration.is_done()); + } + } } diff --git a/ethcore/src/snapshot/tests/helpers.rs b/ethcore/src/snapshot/tests/helpers.rs index 817e02499..7e117ad77 100644 --- a/ethcore/src/snapshot/tests/helpers.rs +++ b/ethcore/src/snapshot/tests/helpers.rs @@ -19,174 +19,181 @@ extern crate trie_standardmap; +use hash::KECCAK_NULL_RLP; use std::sync::Arc; -use hash::{KECCAK_NULL_RLP}; use account_db::AccountDBMut; -use types::basic_account::BasicAccount; use blockchain::{BlockChain, BlockChainDB}; -use client::{Client, ChainInfo}; +use client::{ChainInfo, Client}; use engines::EthEngine; -use snapshot::{StateRebuilder}; -use snapshot::io::{SnapshotReader, PackedWriter, PackedReader}; +use snapshot::{ + io::{PackedReader, PackedWriter, SnapshotReader}, + StateRebuilder, +}; +use types::basic_account::BasicAccount; -use tempdir::TempDir; use rand::Rng; +use tempdir::TempDir; -use kvdb::DBValue; -use ethereum_types::H256; -use hash_db::HashDB; -use keccak_hasher::KeccakHasher; -use journaldb; -use trie::{TrieMut, Trie}; -use ethtrie::{SecTrieDBMut, TrieDB, TrieDBMut}; use self::trie_standardmap::{Alphabet, StandardMap, ValueMode}; +use ethereum_types::H256; +use ethtrie::{SecTrieDBMut, TrieDB, TrieDBMut}; +use hash_db::HashDB; +use journaldb; +use keccak_hasher::KeccakHasher; +use kvdb::DBValue; +use trie::{Trie, TrieMut}; // the proportion of accounts we will alter each tick. const ACCOUNT_CHURN: f32 = 0.01; /// This structure will incrementally alter a state given an rng. pub struct StateProducer { - state_root: H256, - storage_seed: H256, + state_root: H256, + storage_seed: H256, } impl StateProducer { - /// Create a new `StateProducer`. - pub fn new() -> Self { - StateProducer { - state_root: KECCAK_NULL_RLP, - storage_seed: H256::zero(), - } - } + /// Create a new `StateProducer`. + pub fn new() -> Self { + StateProducer { + state_root: KECCAK_NULL_RLP, + storage_seed: H256::zero(), + } + } - /// Tick the state producer. This alters the state, writing new data into - /// the database. - pub fn tick(&mut self, rng: &mut R, db: &mut HashDB) { - // modify existing accounts. - let mut accounts_to_modify: Vec<_> = { - let trie = TrieDB::new(&db, &self.state_root).unwrap(); - let temp = trie.iter().unwrap() // binding required due to complicated lifetime stuff - .filter(|_| rng.gen::() < ACCOUNT_CHURN) - .map(Result::unwrap) - .map(|(k, v)| (H256::from_slice(&k), v.to_owned())) - .collect(); + /// Tick the state producer. This alters the state, writing new data into + /// the database. + pub fn tick(&mut self, rng: &mut R, db: &mut HashDB) { + // modify existing accounts. + let mut accounts_to_modify: Vec<_> = { + let trie = TrieDB::new(&db, &self.state_root).unwrap(); + let temp = trie + .iter() + .unwrap() // binding required due to complicated lifetime stuff + .filter(|_| rng.gen::() < ACCOUNT_CHURN) + .map(Result::unwrap) + .map(|(k, v)| (H256::from_slice(&k), v.to_owned())) + .collect(); - temp - }; + temp + }; - // sweep once to alter storage tries. - for &mut (ref mut address_hash, ref mut account_data) in &mut accounts_to_modify { - let mut account: BasicAccount = ::rlp::decode(&*account_data).expect("error decoding basic account"); - let acct_db = AccountDBMut::from_hash(db, *address_hash); - fill_storage(acct_db, &mut account.storage_root, &mut self.storage_seed); - *account_data = DBValue::from_vec(::rlp::encode(&account)); - } + // sweep once to alter storage tries. + for &mut (ref mut address_hash, ref mut account_data) in &mut accounts_to_modify { + let mut account: BasicAccount = + ::rlp::decode(&*account_data).expect("error decoding basic account"); + let acct_db = AccountDBMut::from_hash(db, *address_hash); + fill_storage(acct_db, &mut account.storage_root, &mut self.storage_seed); + *account_data = DBValue::from_vec(::rlp::encode(&account)); + } - // sweep again to alter account trie. - let mut trie = TrieDBMut::from_existing(db, &mut self.state_root).unwrap(); + // sweep again to alter account trie. + let mut trie = TrieDBMut::from_existing(db, &mut self.state_root).unwrap(); - for (address_hash, account_data) in accounts_to_modify { - trie.insert(&address_hash[..], &account_data).unwrap(); - } + for (address_hash, account_data) in accounts_to_modify { + trie.insert(&address_hash[..], &account_data).unwrap(); + } - // add between 0 and 5 new accounts each tick. - let new_accs = rng.gen::() % 5; + // add between 0 and 5 new accounts each tick. + let new_accs = rng.gen::() % 5; - for _ in 0..new_accs { - let address_hash = H256(rng.gen()); - let balance: usize = rng.gen(); - let nonce: usize = rng.gen(); - let acc = ::state::Account::new_basic(balance.into(), nonce.into()).rlp(); - trie.insert(&address_hash[..], &acc).unwrap(); - } - } + for _ in 0..new_accs { + let address_hash = H256(rng.gen()); + let balance: usize = rng.gen(); + let nonce: usize = rng.gen(); + let acc = ::state::Account::new_basic(balance.into(), nonce.into()).rlp(); + trie.insert(&address_hash[..], &acc).unwrap(); + } + } - /// Get the current state root. - pub fn state_root(&self) -> H256 { - self.state_root - } + /// Get the current state root. + pub fn state_root(&self) -> H256 { + self.state_root + } } /// Fill the storage of an account. pub fn fill_storage(mut db: AccountDBMut, root: &mut H256, seed: &mut H256) { - let map = StandardMap { - alphabet: Alphabet::All, - min_key: 6, - journal_key: 6, - value_mode: ValueMode::Random, - count: 100, - }; - { - let mut trie = if *root == KECCAK_NULL_RLP { - SecTrieDBMut::new(&mut db, root) - } else { - SecTrieDBMut::from_existing(&mut db, root).unwrap() - }; + let map = StandardMap { + alphabet: Alphabet::All, + min_key: 6, + journal_key: 6, + value_mode: ValueMode::Random, + count: 100, + }; + { + let mut trie = if *root == KECCAK_NULL_RLP { + SecTrieDBMut::new(&mut db, root) + } else { + SecTrieDBMut::from_existing(&mut db, root).unwrap() + }; - for (k, v) in map.make_with(seed) { - trie.insert(&k, &v).unwrap(); - } - } + for (k, v) in map.make_with(seed) { + trie.insert(&k, &v).unwrap(); + } + } } /// Take a snapshot from the given client into a temporary file. /// Return a snapshot reader for it. pub fn snap(client: &Client) -> (Box, TempDir) { - use types::ids::BlockId; + use types::ids::BlockId; - let tempdir = TempDir::new("").unwrap(); - let path = tempdir.path().join("file"); - let writer = PackedWriter::new(&path).unwrap(); - let progress = Default::default(); + let tempdir = TempDir::new("").unwrap(); + let path = tempdir.path().join("file"); + let writer = PackedWriter::new(&path).unwrap(); + let progress = Default::default(); - let hash = client.chain_info().best_block_hash; - client.take_snapshot(writer, BlockId::Hash(hash), &progress).unwrap(); + let hash = client.chain_info().best_block_hash; + client + .take_snapshot(writer, BlockId::Hash(hash), &progress) + .unwrap(); - let reader = PackedReader::new(&path).unwrap().unwrap(); + let reader = PackedReader::new(&path).unwrap().unwrap(); - (Box::new(reader), tempdir) + (Box::new(reader), tempdir) } /// Restore a snapshot into a given database. This will read chunks from the given reader /// write into the given database. pub fn restore( - db: Arc, - engine: &EthEngine, - reader: &SnapshotReader, - genesis: &[u8], + db: Arc, + engine: &EthEngine, + reader: &SnapshotReader, + genesis: &[u8], ) -> Result<(), ::error::Error> { - use std::sync::atomic::AtomicBool; - use snappy; + use snappy; + use std::sync::atomic::AtomicBool; - let flag = AtomicBool::new(true); - let components = engine.snapshot_components().unwrap(); - let manifest = reader.manifest(); + let flag = AtomicBool::new(true); + let components = engine.snapshot_components().unwrap(); + let manifest = reader.manifest(); - let mut state = StateRebuilder::new(db.key_value().clone(), journaldb::Algorithm::Archive); - let mut secondary = { - let chain = BlockChain::new(Default::default(), genesis, db.clone()); - components.rebuilder(chain, db, manifest).unwrap() - }; + let mut state = StateRebuilder::new(db.key_value().clone(), journaldb::Algorithm::Archive); + let mut secondary = { + let chain = BlockChain::new(Default::default(), genesis, db.clone()); + components.rebuilder(chain, db, manifest).unwrap() + }; - let mut snappy_buffer = Vec::new(); + let mut snappy_buffer = Vec::new(); - trace!(target: "snapshot", "restoring state"); - for state_chunk_hash in manifest.state_hashes.iter() { - trace!(target: "snapshot", "state chunk hash: {}", state_chunk_hash); - let chunk = reader.chunk(*state_chunk_hash).unwrap(); - let len = snappy::decompress_into(&chunk, &mut snappy_buffer).unwrap(); - state.feed(&snappy_buffer[..len], &flag)?; - } + trace!(target: "snapshot", "restoring state"); + for state_chunk_hash in manifest.state_hashes.iter() { + trace!(target: "snapshot", "state chunk hash: {}", state_chunk_hash); + let chunk = reader.chunk(*state_chunk_hash).unwrap(); + let len = snappy::decompress_into(&chunk, &mut snappy_buffer).unwrap(); + state.feed(&snappy_buffer[..len], &flag)?; + } - trace!(target: "snapshot", "restoring secondary"); - for chunk_hash in manifest.block_hashes.iter() { - let chunk = reader.chunk(*chunk_hash).unwrap(); - let len = snappy::decompress_into(&chunk, &mut snappy_buffer).unwrap(); - secondary.feed(&snappy_buffer[..len], engine, &flag)?; - } + trace!(target: "snapshot", "restoring secondary"); + for chunk_hash in manifest.block_hashes.iter() { + let chunk = reader.chunk(*chunk_hash).unwrap(); + let len = snappy::decompress_into(&chunk, &mut snappy_buffer).unwrap(); + secondary.feed(&snappy_buffer[..len], engine, &flag)?; + } - trace!(target: "snapshot", "finalizing"); - state.finalize(manifest.block_number, manifest.block_hash)?; - secondary.finalize(engine) + trace!(target: "snapshot", "finalizing"); + state.finalize(manifest.block_number, manifest.block_hash)?; + secondary.finalize(engine) } diff --git a/ethcore/src/snapshot/tests/mod.rs b/ethcore/src/snapshot/tests/mod.rs index f25fd03b2..89014170a 100644 --- a/ethcore/src/snapshot/tests/mod.rs +++ b/ethcore/src/snapshot/tests/mod.rs @@ -16,10 +16,10 @@ //! Snapshot tests. -mod proof_of_work; mod proof_of_authority; -mod state; +mod proof_of_work; mod service; +mod state; pub mod helpers; @@ -27,14 +27,14 @@ use super::ManifestData; #[test] fn manifest_rlp() { - let manifest = ManifestData { - version: 2, - block_hashes: Vec::new(), - state_hashes: Vec::new(), - block_number: 1234567, - state_root: Default::default(), - block_hash: Default::default(), - }; - let raw = manifest.clone().into_rlp(); - assert_eq!(ManifestData::from_rlp(&raw).unwrap(), manifest); + let manifest = ManifestData { + version: 2, + block_hashes: Vec::new(), + state_hashes: Vec::new(), + block_number: 1234567, + state_root: Default::default(), + block_hash: Default::default(), + }; + let raw = manifest.clone().into_rlp(); + assert_eq!(ManifestData::from_rlp(&raw).unwrap(), manifest); } diff --git a/ethcore/src/snapshot/tests/proof_of_authority.rs b/ethcore/src/snapshot/tests/proof_of_authority.rs index f1610e6cc..4d4b0d5cd 100644 --- a/ethcore/src/snapshot/tests/proof_of_authority.rs +++ b/ethcore/src/snapshot/tests/proof_of_authority.rs @@ -16,18 +16,16 @@ //! PoA block chunker and rebuilder tests. -use std::cell::RefCell; -use std::sync::Arc; -use std::str::FromStr; +use std::{cell::RefCell, str::FromStr, sync::Arc}; use accounts::AccountProvider; -use client::{Client, BlockChainClient, ChainInfo}; +use client::{BlockChainClient, ChainInfo, Client}; use ethkey::Secret; use snapshot::tests::helpers as snapshot_helpers; use spec::Spec; -use test_helpers::generate_dummy_client_with_spec; -use types::transaction::{Transaction, Action, SignedTransaction}; use tempdir::TempDir; +use test_helpers::generate_dummy_client_with_spec; +use types::transaction::{Action, SignedTransaction, Transaction}; use ethereum_types::Address; use test_helpers; @@ -39,17 +37,19 @@ const TRANSITION_BLOCK_1: usize = 2; // block at which the contract becomes acti const TRANSITION_BLOCK_2: usize = 10; // block at which the second contract activates. macro_rules! secret { - ($e: expr) => { Secret::from($crate::hash::keccak($e).0) } + ($e: expr) => { + Secret::from($crate::hash::keccak($e).0) + }; } lazy_static! { - // contract addresses. - static ref CONTRACT_ADDR_1: Address = Address::from_str("0000000000000000000000000000000000000005").unwrap(); - static ref CONTRACT_ADDR_2: Address = Address::from_str("0000000000000000000000000000000000000006").unwrap(); - // secret: `keccak(1)`, and initial validator. - static ref RICH_ADDR: Address = Address::from_str("7d577a597b2742b498cb5cf0c26cdcd726d39e6e").unwrap(); - // rich address' secret. - static ref RICH_SECRET: Secret = secret!("1"); + // contract addresses. + static ref CONTRACT_ADDR_1: Address = Address::from_str("0000000000000000000000000000000000000005").unwrap(); + static ref CONTRACT_ADDR_2: Address = Address::from_str("0000000000000000000000000000000000000006").unwrap(); + // secret: `keccak(1)`, and initial validator. + static ref RICH_ADDR: Address = Address::from_str("7d577a597b2742b498cb5cf0c26cdcd726d39e6e").unwrap(); + // rich address' secret. + static ref RICH_SECRET: Secret = secret!("1"); } /// Contract code used here: https://gist.github.com/anonymous/2a43783647e0f0dfcc359bd6fd81d6d9 @@ -58,207 +58,234 @@ lazy_static! { /// Create a new Spec with AuthorityRound which uses a contract at address 5 to determine the current validators using `getValidators`. /// `test_validator_set::ValidatorSet` provides a native wrapper for the ABi. fn spec_fixed_to_contract() -> Spec { - let data = include_bytes!("test_validator_contract.json"); - let tempdir = TempDir::new("").unwrap(); - Spec::load(&tempdir.path(), &data[..]).unwrap() + let data = include_bytes!("test_validator_contract.json"); + let tempdir = TempDir::new("").unwrap(); + Spec::load(&tempdir.path(), &data[..]).unwrap() } // creates an account provider, filling it with accounts from all the given // secrets and password `PASS`. // returns addresses corresponding to secrets. fn make_accounts(secrets: &[Secret]) -> (Arc, Vec
) { - let provider = AccountProvider::transient_provider(); + let provider = AccountProvider::transient_provider(); - let addrs = secrets.iter() - .cloned() - .map(|s| provider.insert_account(s, &PASS.into()).unwrap()) - .collect(); + let addrs = secrets + .iter() + .cloned() + .map(|s| provider.insert_account(s, &PASS.into()).unwrap()) + .collect(); - (Arc::new(provider), addrs) + (Arc::new(provider), addrs) } // validator transition. block number and new validators. must be after `TRANSITION_BLOCK`. // all addresses in the set must be in the account provider. enum Transition { - // manual transition via transaction - Manual(usize, Vec
), - // implicit transition via multi-set - Implicit(usize, Vec
), + // manual transition via transaction + Manual(usize, Vec
), + // implicit transition via multi-set + Implicit(usize, Vec
), } // create a chain with the given transitions and some blocks beyond that transition. -fn make_chain(accounts: Arc, blocks_beyond: usize, transitions: Vec) -> Arc { - let client = generate_dummy_client_with_spec(spec_fixed_to_contract); +fn make_chain( + accounts: Arc, + blocks_beyond: usize, + transitions: Vec, +) -> Arc { + let client = generate_dummy_client_with_spec(spec_fixed_to_contract); - let mut cur_signers = vec![*RICH_ADDR]; - { - let engine = client.engine(); - engine.register_client(Arc::downgrade(&client) as _); - } + let mut cur_signers = vec![*RICH_ADDR]; + { + let engine = client.engine(); + engine.register_client(Arc::downgrade(&client) as _); + } - { - // push a block with given number, signed by one of the signers, with given transactions. - let push_block = |signers: &[Address], n, txs: Vec| { - use miner::{self, MinerService}; + { + // push a block with given number, signed by one of the signers, with given transactions. + let push_block = |signers: &[Address], n, txs: Vec| { + use miner::{self, MinerService}; - let idx = n as usize % signers.len(); - trace!(target: "snapshot", "Pushing block #{}, {} txs, author={}", + let idx = n as usize % signers.len(); + trace!(target: "snapshot", "Pushing block #{}, {} txs, author={}", n, txs.len(), signers[idx]); - let signer = Box::new((accounts.clone(), signers[idx], PASS.into())); - client.miner().set_author(miner::Author::Sealer(signer)); - client.miner().import_external_transactions(&*client, - txs.into_iter().map(Into::into).collect()); + let signer = Box::new((accounts.clone(), signers[idx], PASS.into())); + client.miner().set_author(miner::Author::Sealer(signer)); + client + .miner() + .import_external_transactions(&*client, txs.into_iter().map(Into::into).collect()); - client.engine().step(); + client.engine().step(); - assert_eq!(client.chain_info().best_block_number, n); - }; + assert_eq!(client.chain_info().best_block_number, n); + }; - // execution callback for native contract: push transaction to be sealed. - let nonce = RefCell::new(client.engine().account_start_nonce(0)); + // execution callback for native contract: push transaction to be sealed. + let nonce = RefCell::new(client.engine().account_start_nonce(0)); - // create useless transactions vector so we don't have to dig in - // and force sealing. - let make_useless_transactions = || { - let mut nonce = nonce.borrow_mut(); - let transaction = Transaction { - nonce: *nonce, - gas_price: 1.into(), - gas: 21_000.into(), - action: Action::Call(Address::new()), - value: 1.into(), - data: Vec::new(), - }.sign(&*RICH_SECRET, client.signing_chain_id()); + // create useless transactions vector so we don't have to dig in + // and force sealing. + let make_useless_transactions = || { + let mut nonce = nonce.borrow_mut(); + let transaction = Transaction { + nonce: *nonce, + gas_price: 1.into(), + gas: 21_000.into(), + action: Action::Call(Address::new()), + value: 1.into(), + data: Vec::new(), + } + .sign(&*RICH_SECRET, client.signing_chain_id()); - *nonce = *nonce + 1; - vec![transaction] - }; + *nonce = *nonce + 1; + vec![transaction] + }; - // apply all transitions. - for transition in transitions { - let (num, manual, new_set) = match transition { - Transition::Manual(num, new_set) => (num, true, new_set), - Transition::Implicit(num, new_set) => (num, false, new_set), - }; + // apply all transitions. + for transition in transitions { + let (num, manual, new_set) = match transition { + Transition::Manual(num, new_set) => (num, true, new_set), + Transition::Implicit(num, new_set) => (num, false, new_set), + }; - if num < TRANSITION_BLOCK_1 { - panic!("Bad test: issued epoch change before transition to contract."); - } + if num < TRANSITION_BLOCK_1 { + panic!("Bad test: issued epoch change before transition to contract."); + } - if (num as u64) < client.chain_info().best_block_number { - panic!("Bad test: issued epoch change before previous transition finalized."); - } + if (num as u64) < client.chain_info().best_block_number { + panic!("Bad test: issued epoch change before previous transition finalized."); + } - for number in client.chain_info().best_block_number + 1 .. num as u64 { - push_block(&cur_signers, number, make_useless_transactions()); - } + for number in client.chain_info().best_block_number + 1..num as u64 { + push_block(&cur_signers, number, make_useless_transactions()); + } - let pending = if manual { - trace!(target: "snapshot", "applying set transition at block #{}", num); - let address = match num >= TRANSITION_BLOCK_2 { - true => &CONTRACT_ADDR_2 as &Address, - false => &CONTRACT_ADDR_1 as &Address, - }; + let pending = if manual { + trace!(target: "snapshot", "applying set transition at block #{}", num); + let address = match num >= TRANSITION_BLOCK_2 { + true => &CONTRACT_ADDR_2 as &Address, + false => &CONTRACT_ADDR_1 as &Address, + }; - let data = test_validator_set::functions::set_validators::encode_input(new_set.clone()); - let mut nonce = nonce.borrow_mut(); - let transaction = Transaction { - nonce: *nonce, - gas_price: 0.into(), - gas: 1_000_000.into(), - action: Action::Call(*address), - value: 0.into(), - data, - }.sign(&*RICH_SECRET, client.signing_chain_id()); + let data = + test_validator_set::functions::set_validators::encode_input(new_set.clone()); + let mut nonce = nonce.borrow_mut(); + let transaction = Transaction { + nonce: *nonce, + gas_price: 0.into(), + gas: 1_000_000.into(), + action: Action::Call(*address), + value: 0.into(), + data, + } + .sign(&*RICH_SECRET, client.signing_chain_id()); - *nonce = *nonce + 1; - vec![transaction] - } else { - make_useless_transactions() - }; + *nonce = *nonce + 1; + vec![transaction] + } else { + make_useless_transactions() + }; - // push transition block. - push_block(&cur_signers, num as u64, pending); + // push transition block. + push_block(&cur_signers, num as u64, pending); - // push blocks to finalize transition - for finalization_count in 1.. { - if finalization_count * 2 > cur_signers.len() { break } - push_block(&cur_signers, (num + finalization_count) as u64, make_useless_transactions()); - } + // push blocks to finalize transition + for finalization_count in 1.. { + if finalization_count * 2 > cur_signers.len() { + break; + } + push_block( + &cur_signers, + (num + finalization_count) as u64, + make_useless_transactions(), + ); + } - cur_signers = new_set; - } + cur_signers = new_set; + } - // make blocks beyond. - for number in (client.chain_info().best_block_number..).take(blocks_beyond) { - push_block(&cur_signers, number + 1, make_useless_transactions()); - } - } + // make blocks beyond. + for number in (client.chain_info().best_block_number..).take(blocks_beyond) { + push_block(&cur_signers, number + 1, make_useless_transactions()); + } + } - client + client } #[test] fn fixed_to_contract_only() { - let (provider, addrs) = make_accounts(&[ - RICH_SECRET.clone(), - secret!("foo"), - secret!("bar"), - secret!("test"), - secret!("signer"), - secret!("crypto"), - secret!("wizard"), - secret!("dog42"), - ]); + let (provider, addrs) = make_accounts(&[ + RICH_SECRET.clone(), + secret!("foo"), + secret!("bar"), + secret!("test"), + secret!("signer"), + secret!("crypto"), + secret!("wizard"), + secret!("dog42"), + ]); - assert!(provider.has_account(*RICH_ADDR)); + assert!(provider.has_account(*RICH_ADDR)); - let client = make_chain(provider, 3, vec![ - Transition::Manual(3, vec![addrs[2], addrs[3], addrs[5], addrs[7]]), - Transition::Manual(6, vec![addrs[0], addrs[1], addrs[4], addrs[6]]), - ]); + let client = make_chain( + provider, + 3, + vec![ + Transition::Manual(3, vec![addrs[2], addrs[3], addrs[5], addrs[7]]), + Transition::Manual(6, vec![addrs[0], addrs[1], addrs[4], addrs[6]]), + ], + ); - // 6, 7, 8 prove finality for transition at 6. - // 3 beyond gets us to 11. - assert_eq!(client.chain_info().best_block_number, 11); - let (reader, _tempdir) = snapshot_helpers::snap(&*client); + // 6, 7, 8 prove finality for transition at 6. + // 3 beyond gets us to 11. + assert_eq!(client.chain_info().best_block_number, 11); + let (reader, _tempdir) = snapshot_helpers::snap(&*client); - let new_db = test_helpers::new_db(); - let spec = spec_fixed_to_contract(); + let new_db = test_helpers::new_db(); + let spec = spec_fixed_to_contract(); - // ensure fresh engine's step matches. - for _ in 0..11 { spec.engine.step() } - snapshot_helpers::restore(new_db, &*spec.engine, &*reader, &spec.genesis_block()).unwrap(); + // ensure fresh engine's step matches. + for _ in 0..11 { + spec.engine.step() + } + snapshot_helpers::restore(new_db, &*spec.engine, &*reader, &spec.genesis_block()).unwrap(); } #[test] fn fixed_to_contract_to_contract() { - let (provider, addrs) = make_accounts(&[ - RICH_SECRET.clone(), - secret!("foo"), - secret!("bar"), - secret!("test"), - secret!("signer"), - secret!("crypto"), - secret!("wizard"), - secret!("dog42"), - ]); + let (provider, addrs) = make_accounts(&[ + RICH_SECRET.clone(), + secret!("foo"), + secret!("bar"), + secret!("test"), + secret!("signer"), + secret!("crypto"), + secret!("wizard"), + secret!("dog42"), + ]); - assert!(provider.has_account(*RICH_ADDR)); + assert!(provider.has_account(*RICH_ADDR)); - let client = make_chain(provider, 3, vec![ - Transition::Manual(3, vec![addrs[2], addrs[3], addrs[5], addrs[7]]), - Transition::Manual(6, vec![addrs[0], addrs[1], addrs[4], addrs[6]]), - Transition::Implicit(10, vec![addrs[0]]), - Transition::Manual(13, vec![addrs[2], addrs[4], addrs[6], addrs[7]]), - ]); + let client = make_chain( + provider, + 3, + vec![ + Transition::Manual(3, vec![addrs[2], addrs[3], addrs[5], addrs[7]]), + Transition::Manual(6, vec![addrs[0], addrs[1], addrs[4], addrs[6]]), + Transition::Implicit(10, vec![addrs[0]]), + Transition::Manual(13, vec![addrs[2], addrs[4], addrs[6], addrs[7]]), + ], + ); - assert_eq!(client.chain_info().best_block_number, 16); - let (reader, _tempdir) = snapshot_helpers::snap(&*client); - let new_db = test_helpers::new_db(); - let spec = spec_fixed_to_contract(); + assert_eq!(client.chain_info().best_block_number, 16); + let (reader, _tempdir) = snapshot_helpers::snap(&*client); + let new_db = test_helpers::new_db(); + let spec = spec_fixed_to_contract(); - for _ in 0..16 { spec.engine.step() } - snapshot_helpers::restore(new_db, &*spec.engine, &*reader, &spec.genesis_block()).unwrap(); + for _ in 0..16 { + spec.engine.step() + } + snapshot_helpers::restore(new_db, &*spec.engine, &*reader, &spec.genesis_block()).unwrap(); } diff --git a/ethcore/src/snapshot/tests/proof_of_work.rs b/ethcore/src/snapshot/tests/proof_of_work.rs index d970da406..8a048d4cd 100644 --- a/ethcore/src/snapshot/tests/proof_of_work.rs +++ b/ethcore/src/snapshot/tests/proof_of_work.rs @@ -16,134 +16,157 @@ //! PoW block chunker and rebuilder tests. +use error::{Error, ErrorKind}; use std::sync::atomic::AtomicBool; use tempdir::TempDir; -use error::{Error, ErrorKind}; -use blockchain::generator::{BlockGenerator, BlockBuilder}; -use blockchain::{BlockChain, ExtrasInsert}; -use snapshot::{chunk_secondary, Error as SnapshotError, Progress, SnapshotComponents}; -use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}; +use blockchain::{ + generator::{BlockBuilder, BlockGenerator}, + BlockChain, ExtrasInsert, +}; +use snapshot::{ + chunk_secondary, + io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}, + Error as SnapshotError, Progress, SnapshotComponents, +}; +use kvdb::DBTransaction; use parking_lot::Mutex; use snappy; -use kvdb::DBTransaction; use test_helpers; -const SNAPSHOT_MODE: ::snapshot::PowSnapshot = ::snapshot::PowSnapshot { blocks: 30000, max_restore_blocks: 30000 }; +const SNAPSHOT_MODE: ::snapshot::PowSnapshot = ::snapshot::PowSnapshot { + blocks: 30000, + max_restore_blocks: 30000, +}; fn chunk_and_restore(amount: u64) { - let genesis = BlockBuilder::genesis(); - let rest = genesis.add_blocks(amount as usize); - let generator = BlockGenerator::new(vec![rest]); - let genesis = genesis.last(); + let genesis = BlockBuilder::genesis(); + let rest = genesis.add_blocks(amount as usize); + let generator = BlockGenerator::new(vec![rest]); + let genesis = genesis.last(); - let engine = ::spec::Spec::new_test().engine; - let tempdir = TempDir::new("").unwrap(); - let snapshot_path = tempdir.path().join("SNAP"); + let engine = ::spec::Spec::new_test().engine; + let tempdir = TempDir::new("").unwrap(); + let snapshot_path = tempdir.path().join("SNAP"); - let old_db = test_helpers::new_db(); - let bc = BlockChain::new(Default::default(), genesis.encoded().raw(), old_db.clone()); + let old_db = test_helpers::new_db(); + let bc = BlockChain::new(Default::default(), genesis.encoded().raw(), old_db.clone()); - // build the blockchain. - let mut batch = DBTransaction::new(); - for block in generator { - bc.insert_block(&mut batch, block.encoded(), vec![], ExtrasInsert { - fork_choice: ::engines::ForkChoice::New, - is_finalized: false, - }); - bc.commit(); - } + // build the blockchain. + let mut batch = DBTransaction::new(); + for block in generator { + bc.insert_block( + &mut batch, + block.encoded(), + vec![], + ExtrasInsert { + fork_choice: ::engines::ForkChoice::New, + is_finalized: false, + }, + ); + bc.commit(); + } - old_db.key_value().write(batch).unwrap(); + old_db.key_value().write(batch).unwrap(); - let best_hash = bc.best_block_hash(); + let best_hash = bc.best_block_hash(); - // snapshot it. - let writer = Mutex::new(PackedWriter::new(&snapshot_path).unwrap()); - let block_hashes = chunk_secondary( - Box::new(SNAPSHOT_MODE), - &bc, - best_hash, - &writer, - &Progress::default() - ).unwrap(); + // snapshot it. + let writer = Mutex::new(PackedWriter::new(&snapshot_path).unwrap()); + let block_hashes = chunk_secondary( + Box::new(SNAPSHOT_MODE), + &bc, + best_hash, + &writer, + &Progress::default(), + ) + .unwrap(); - let manifest = ::snapshot::ManifestData { - version: 2, - state_hashes: Vec::new(), - block_hashes: block_hashes, - state_root: ::hash::KECCAK_NULL_RLP, - block_number: amount, - block_hash: best_hash, - }; + let manifest = ::snapshot::ManifestData { + version: 2, + state_hashes: Vec::new(), + block_hashes: block_hashes, + state_root: ::hash::KECCAK_NULL_RLP, + block_number: amount, + block_hash: best_hash, + }; - writer.into_inner().finish(manifest.clone()).unwrap(); + writer.into_inner().finish(manifest.clone()).unwrap(); - // restore it. - let new_db = test_helpers::new_db(); - let new_chain = BlockChain::new(Default::default(), genesis.encoded().raw(), new_db.clone()); - let mut rebuilder = SNAPSHOT_MODE.rebuilder(new_chain, new_db.clone(), &manifest).unwrap(); + // restore it. + let new_db = test_helpers::new_db(); + let new_chain = BlockChain::new(Default::default(), genesis.encoded().raw(), new_db.clone()); + let mut rebuilder = SNAPSHOT_MODE + .rebuilder(new_chain, new_db.clone(), &manifest) + .unwrap(); - let reader = PackedReader::new(&snapshot_path).unwrap().unwrap(); - let flag = AtomicBool::new(true); - for chunk_hash in &reader.manifest().block_hashes { - let compressed = reader.chunk(*chunk_hash).unwrap(); - let chunk = snappy::decompress(&compressed).unwrap(); - rebuilder.feed(&chunk, engine.as_ref(), &flag).unwrap(); - } + let reader = PackedReader::new(&snapshot_path).unwrap().unwrap(); + let flag = AtomicBool::new(true); + for chunk_hash in &reader.manifest().block_hashes { + let compressed = reader.chunk(*chunk_hash).unwrap(); + let chunk = snappy::decompress(&compressed).unwrap(); + rebuilder.feed(&chunk, engine.as_ref(), &flag).unwrap(); + } - rebuilder.finalize(engine.as_ref()).unwrap(); - drop(rebuilder); + rebuilder.finalize(engine.as_ref()).unwrap(); + drop(rebuilder); - // and test it. - let new_chain = BlockChain::new(Default::default(), genesis.encoded().raw(), new_db); - assert_eq!(new_chain.best_block_hash(), best_hash); + // and test it. + let new_chain = BlockChain::new(Default::default(), genesis.encoded().raw(), new_db); + assert_eq!(new_chain.best_block_hash(), best_hash); } #[test] fn chunk_and_restore_500() { - chunk_and_restore(500) + chunk_and_restore(500) } #[test] fn chunk_and_restore_4k() { - chunk_and_restore(4000) + chunk_and_restore(4000) } #[test] fn checks_flag() { - use rlp::RlpStream; - use ethereum_types::H256; + use ethereum_types::H256; + use rlp::RlpStream; - let mut stream = RlpStream::new_list(5); + let mut stream = RlpStream::new_list(5); - stream.append(&100u64) - .append(&H256::default()) - .append(&(!0u64)); + stream + .append(&100u64) + .append(&H256::default()) + .append(&(!0u64)); - stream.append_empty_data().append_empty_data(); + stream.append_empty_data().append_empty_data(); - let genesis = BlockBuilder::genesis(); - let chunk = stream.out(); + let genesis = BlockBuilder::genesis(); + let chunk = stream.out(); - let db = test_helpers::new_db(); - let engine = ::spec::Spec::new_test().engine; - let chain = BlockChain::new(Default::default(), genesis.last().encoded().raw(), db.clone()); + let db = test_helpers::new_db(); + let engine = ::spec::Spec::new_test().engine; + let chain = BlockChain::new( + Default::default(), + genesis.last().encoded().raw(), + db.clone(), + ); - let manifest = ::snapshot::ManifestData { - version: 2, - state_hashes: Vec::new(), - block_hashes: Vec::new(), - state_root: ::hash::KECCAK_NULL_RLP, - block_number: 102, - block_hash: H256::default(), - }; + let manifest = ::snapshot::ManifestData { + version: 2, + state_hashes: Vec::new(), + block_hashes: Vec::new(), + state_root: ::hash::KECCAK_NULL_RLP, + block_number: 102, + block_hash: H256::default(), + }; - let mut rebuilder = SNAPSHOT_MODE.rebuilder(chain, db.clone(), &manifest).unwrap(); + let mut rebuilder = SNAPSHOT_MODE + .rebuilder(chain, db.clone(), &manifest) + .unwrap(); - match rebuilder.feed(&chunk, engine.as_ref(), &AtomicBool::new(false)) { - Err(Error(ErrorKind::Snapshot(SnapshotError::RestorationAborted), _)) => {} - _ => panic!("Wrong result on abort flag set") - } + match rebuilder.feed(&chunk, engine.as_ref(), &AtomicBool::new(false)) { + Err(Error(ErrorKind::Snapshot(SnapshotError::RestorationAborted), _)) => {} + _ => panic!("Wrong result on abort flag set"), + } } diff --git a/ethcore/src/snapshot/tests/service.rs b/ethcore/src/snapshot/tests/service.rs index 515e5992f..b56f76ec1 100644 --- a/ethcore/src/snapshot/tests/service.rs +++ b/ethcore/src/snapshot/tests/service.rs @@ -16,87 +16,93 @@ //! Tests for the snapshot service. -use std::fs; -use std::sync::Arc; +use std::{fs, sync::Arc}; -use tempdir::TempDir; use blockchain::BlockProvider; -use client::{Client, ClientConfig, ImportBlock, BlockInfo}; -use types::ids::BlockId; -use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}; -use snapshot::service::{Service, ServiceParams}; -use snapshot::{chunk_state, chunk_secondary, ManifestData, Progress, SnapshotService, RestorationStatus}; +use client::{BlockInfo, Client, ClientConfig, ImportBlock}; +use snapshot::{ + chunk_secondary, chunk_state, + io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}, + service::{Service, ServiceParams}, + ManifestData, Progress, RestorationStatus, SnapshotService, +}; use spec::Spec; -use test_helpers::{new_db, new_temp_db, generate_dummy_client_with_spec_and_data, restoration_db_handler}; +use tempdir::TempDir; +use test_helpers::{ + generate_dummy_client_with_spec_and_data, new_db, new_temp_db, restoration_db_handler, +}; +use types::ids::BlockId; -use parking_lot::Mutex; use io::IoChannel; use kvdb_rocksdb::DatabaseConfig; +use parking_lot::Mutex; use verification::queue::kind::blocks::Unverified; #[test] fn restored_is_equivalent() { - let _ = ::env_logger::try_init(); + let _ = ::env_logger::try_init(); - const NUM_BLOCKS: u32 = 400; - const TX_PER: usize = 5; + const NUM_BLOCKS: u32 = 400; + const TX_PER: usize = 5; - let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()]; - let client = generate_dummy_client_with_spec_and_data(Spec::new_null, NUM_BLOCKS, TX_PER, &gas_prices); + let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()]; + let client = + generate_dummy_client_with_spec_and_data(Spec::new_null, NUM_BLOCKS, TX_PER, &gas_prices); - let tempdir = TempDir::new("").unwrap(); - let client_db = tempdir.path().join("client_db"); - let path = tempdir.path().join("snapshot"); + let tempdir = TempDir::new("").unwrap(); + let client_db = tempdir.path().join("client_db"); + let path = tempdir.path().join("snapshot"); - let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); - let restoration = restoration_db_handler(db_config); - let blockchain_db = restoration.open(&client_db).unwrap(); + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + let restoration = restoration_db_handler(db_config); + let blockchain_db = restoration.open(&client_db).unwrap(); - let spec = Spec::new_null(); - let client2 = Client::new( - Default::default(), - &spec, - blockchain_db, - Arc::new(::miner::Miner::new_for_tests(&spec, None)), - IoChannel::disconnected(), - ).unwrap(); + let spec = Spec::new_null(); + let client2 = Client::new( + Default::default(), + &spec, + blockchain_db, + Arc::new(::miner::Miner::new_for_tests(&spec, None)), + IoChannel::disconnected(), + ) + .unwrap(); - let service_params = ServiceParams { - engine: spec.engine.clone(), - genesis_block: spec.genesis_block(), - restoration_db_handler: restoration, - pruning: ::journaldb::Algorithm::Archive, - channel: IoChannel::disconnected(), - snapshot_root: path, - client: client2.clone(), - }; + let service_params = ServiceParams { + engine: spec.engine.clone(), + genesis_block: spec.genesis_block(), + restoration_db_handler: restoration, + pruning: ::journaldb::Algorithm::Archive, + channel: IoChannel::disconnected(), + snapshot_root: path, + client: client2.clone(), + }; - let service = Service::new(service_params).unwrap(); - service.take_snapshot(&client, NUM_BLOCKS as u64).unwrap(); + let service = Service::new(service_params).unwrap(); + service.take_snapshot(&client, NUM_BLOCKS as u64).unwrap(); - let manifest = service.manifest().unwrap(); + let manifest = service.manifest().unwrap(); - service.init_restore(manifest.clone(), true).unwrap(); - assert!(service.init_restore(manifest.clone(), true).is_ok()); + service.init_restore(manifest.clone(), true).unwrap(); + assert!(service.init_restore(manifest.clone(), true).is_ok()); - for hash in manifest.state_hashes { - let chunk = service.chunk(hash).unwrap(); - service.feed_state_chunk(hash, &chunk); - } + for hash in manifest.state_hashes { + let chunk = service.chunk(hash).unwrap(); + service.feed_state_chunk(hash, &chunk); + } - for hash in manifest.block_hashes { - let chunk = service.chunk(hash).unwrap(); - service.feed_block_chunk(hash, &chunk); - } + for hash in manifest.block_hashes { + let chunk = service.chunk(hash).unwrap(); + service.feed_block_chunk(hash, &chunk); + } - assert_eq!(service.status(), RestorationStatus::Inactive); + assert_eq!(service.status(), RestorationStatus::Inactive); - for x in 0..NUM_BLOCKS { - let block1 = client.block(BlockId::Number(x as u64)).unwrap(); - let block2 = client2.block(BlockId::Number(x as u64)).unwrap(); + for x in 0..NUM_BLOCKS { + let block1 = client.block(BlockId::Number(x as u64)).unwrap(); + let block2 = client2.block(BlockId::Number(x as u64)).unwrap(); - assert_eq!(block1, block2); - } + assert_eq!(block1, block2); + } } // on windows the guards deletion (remove_dir_all) @@ -105,245 +111,275 @@ fn restored_is_equivalent() { #[cfg(not(target_os = "windows"))] #[test] fn guards_delete_folders() { - let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()]; - let client = generate_dummy_client_with_spec_and_data(Spec::new_null, 400, 5, &gas_prices); + let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()]; + let client = generate_dummy_client_with_spec_and_data(Spec::new_null, 400, 5, &gas_prices); - let spec = Spec::new_null(); - let tempdir = TempDir::new("").unwrap(); - let service_params = ServiceParams { - engine: spec.engine.clone(), - genesis_block: spec.genesis_block(), - restoration_db_handler: restoration_db_handler(DatabaseConfig::with_columns(::db::NUM_COLUMNS)), - pruning: ::journaldb::Algorithm::Archive, - channel: IoChannel::disconnected(), - snapshot_root: tempdir.path().to_owned(), - client: client, - }; + let spec = Spec::new_null(); + let tempdir = TempDir::new("").unwrap(); + let service_params = ServiceParams { + engine: spec.engine.clone(), + genesis_block: spec.genesis_block(), + restoration_db_handler: restoration_db_handler(DatabaseConfig::with_columns( + ::db::NUM_COLUMNS, + )), + pruning: ::journaldb::Algorithm::Archive, + channel: IoChannel::disconnected(), + snapshot_root: tempdir.path().to_owned(), + client: client, + }; - let service = Service::new(service_params).unwrap(); - let path = tempdir.path().join("restoration"); + let service = Service::new(service_params).unwrap(); + let path = tempdir.path().join("restoration"); - let manifest = ManifestData { - version: 2, - state_hashes: vec![], - block_hashes: vec![], - block_number: 0, - block_hash: Default::default(), - state_root: Default::default(), - }; + let manifest = ManifestData { + version: 2, + state_hashes: vec![], + block_hashes: vec![], + block_number: 0, + block_hash: Default::default(), + state_root: Default::default(), + }; - service.init_restore(manifest.clone(), true).unwrap(); - assert!(path.exists()); + service.init_restore(manifest.clone(), true).unwrap(); + assert!(path.exists()); - // The `db` folder should have been deleted, - // while the `temp` one kept - service.abort_restore(); - assert!(!path.join("db").exists()); - assert!(path.join("temp").exists()); + // The `db` folder should have been deleted, + // while the `temp` one kept + service.abort_restore(); + assert!(!path.join("db").exists()); + assert!(path.join("temp").exists()); - service.init_restore(manifest.clone(), true).unwrap(); - assert!(path.exists()); + service.init_restore(manifest.clone(), true).unwrap(); + assert!(path.exists()); - drop(service); - assert!(!path.join("db").exists()); - assert!(path.join("temp").exists()); + drop(service); + assert!(!path.join("db").exists()); + assert!(path.join("temp").exists()); } #[test] fn keep_ancient_blocks() { - let _ = ::env_logger::try_init(); + let _ = ::env_logger::try_init(); - // Test variables - const NUM_BLOCKS: u64 = 500; - const NUM_SNAPSHOT_BLOCKS: u64 = 300; - const SNAPSHOT_MODE: ::snapshot::PowSnapshot = ::snapshot::PowSnapshot { blocks: NUM_SNAPSHOT_BLOCKS, max_restore_blocks: NUM_SNAPSHOT_BLOCKS }; + // Test variables + const NUM_BLOCKS: u64 = 500; + const NUM_SNAPSHOT_BLOCKS: u64 = 300; + const SNAPSHOT_MODE: ::snapshot::PowSnapshot = ::snapshot::PowSnapshot { + blocks: NUM_SNAPSHOT_BLOCKS, + max_restore_blocks: NUM_SNAPSHOT_BLOCKS, + }; - // Temporary folders - let tempdir = TempDir::new("").unwrap(); - let snapshot_path = tempdir.path().join("SNAP"); + // Temporary folders + let tempdir = TempDir::new("").unwrap(); + let snapshot_path = tempdir.path().join("SNAP"); - // Generate blocks - let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()]; - let spec_f = Spec::new_null; - let spec = spec_f(); - let client = generate_dummy_client_with_spec_and_data(spec_f, NUM_BLOCKS as u32, 5, &gas_prices); + // Generate blocks + let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()]; + let spec_f = Spec::new_null; + let spec = spec_f(); + let client = + generate_dummy_client_with_spec_and_data(spec_f, NUM_BLOCKS as u32, 5, &gas_prices); - let bc = client.chain(); + let bc = client.chain(); - // Create the Snapshot - let best_hash = bc.best_block_hash(); - let writer = Mutex::new(PackedWriter::new(&snapshot_path).unwrap()); - let block_hashes = chunk_secondary( - Box::new(SNAPSHOT_MODE), - &bc, - best_hash, - &writer, - &Progress::default() - ).unwrap(); - let state_db = client.state_db().journal_db().boxed_clone(); - let start_header = bc.block_header_data(&best_hash).unwrap(); - let state_root = start_header.state_root(); - let state_hashes = chunk_state( - state_db.as_hash_db(), - &state_root, - &writer, - &Progress::default(), - None, - 0 - ).unwrap(); + // Create the Snapshot + let best_hash = bc.best_block_hash(); + let writer = Mutex::new(PackedWriter::new(&snapshot_path).unwrap()); + let block_hashes = chunk_secondary( + Box::new(SNAPSHOT_MODE), + &bc, + best_hash, + &writer, + &Progress::default(), + ) + .unwrap(); + let state_db = client.state_db().journal_db().boxed_clone(); + let start_header = bc.block_header_data(&best_hash).unwrap(); + let state_root = start_header.state_root(); + let state_hashes = chunk_state( + state_db.as_hash_db(), + &state_root, + &writer, + &Progress::default(), + None, + 0, + ) + .unwrap(); - let manifest = ::snapshot::ManifestData { - version: 2, - state_hashes, - state_root, - block_hashes, - block_number: NUM_BLOCKS, - block_hash: best_hash, - }; + let manifest = ::snapshot::ManifestData { + version: 2, + state_hashes, + state_root, + block_hashes, + block_number: NUM_BLOCKS, + block_hash: best_hash, + }; - writer.into_inner().finish(manifest.clone()).unwrap(); + writer.into_inner().finish(manifest.clone()).unwrap(); - // Initialize the Client - let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); - let client_db = new_temp_db(&tempdir.path()); - let client2 = Client::new( - ClientConfig::default(), - &spec, - client_db, - Arc::new(::miner::Miner::new_for_tests(&spec, None)), - IoChannel::disconnected(), - ).unwrap(); + // Initialize the Client + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + let client_db = new_temp_db(&tempdir.path()); + let client2 = Client::new( + ClientConfig::default(), + &spec, + client_db, + Arc::new(::miner::Miner::new_for_tests(&spec, None)), + IoChannel::disconnected(), + ) + .unwrap(); - // Add some ancient blocks - for block_number in 1..50 { - let block_hash = bc.block_hash(block_number).unwrap(); - let block = bc.block(&block_hash).unwrap(); - client2.import_block(Unverified::from_rlp(block.into_inner()).unwrap()).unwrap(); - } + // Add some ancient blocks + for block_number in 1..50 { + let block_hash = bc.block_hash(block_number).unwrap(); + let block = bc.block(&block_hash).unwrap(); + client2 + .import_block(Unverified::from_rlp(block.into_inner()).unwrap()) + .unwrap(); + } - client2.import_verified_blocks(); - client2.flush_queue(); + client2.import_verified_blocks(); + client2.flush_queue(); - // Restore the Snapshot - let reader = PackedReader::new(&snapshot_path).unwrap().unwrap(); - let service_params = ServiceParams { - engine: spec.engine.clone(), - genesis_block: spec.genesis_block(), - restoration_db_handler: restoration_db_handler(db_config), - pruning: ::journaldb::Algorithm::Archive, - channel: IoChannel::disconnected(), - snapshot_root: tempdir.path().to_owned(), - client: client2.clone(), - }; - let service = Service::new(service_params).unwrap(); - service.init_restore(manifest.clone(), false).unwrap(); + // Restore the Snapshot + let reader = PackedReader::new(&snapshot_path).unwrap().unwrap(); + let service_params = ServiceParams { + engine: spec.engine.clone(), + genesis_block: spec.genesis_block(), + restoration_db_handler: restoration_db_handler(db_config), + pruning: ::journaldb::Algorithm::Archive, + channel: IoChannel::disconnected(), + snapshot_root: tempdir.path().to_owned(), + client: client2.clone(), + }; + let service = Service::new(service_params).unwrap(); + service.init_restore(manifest.clone(), false).unwrap(); - for hash in &manifest.block_hashes { - let chunk = reader.chunk(*hash).unwrap(); - service.feed_block_chunk(*hash, &chunk); - } + for hash in &manifest.block_hashes { + let chunk = reader.chunk(*hash).unwrap(); + service.feed_block_chunk(*hash, &chunk); + } - for hash in &manifest.state_hashes { - let chunk = reader.chunk(*hash).unwrap(); - service.feed_state_chunk(*hash, &chunk); - } + for hash in &manifest.state_hashes { + let chunk = reader.chunk(*hash).unwrap(); + service.feed_state_chunk(*hash, &chunk); + } - match service.status() { - RestorationStatus::Inactive => (), - RestorationStatus::Failed => panic!("Snapshot Restoration has failed."), - RestorationStatus::Ongoing { .. } => panic!("Snapshot Restoration should be done."), - _ => panic!("Invalid Snapshot Service status."), - } + match service.status() { + RestorationStatus::Inactive => (), + RestorationStatus::Failed => panic!("Snapshot Restoration has failed."), + RestorationStatus::Ongoing { .. } => panic!("Snapshot Restoration should be done."), + _ => panic!("Invalid Snapshot Service status."), + } - // Check that the latest block number is the right one - assert_eq!(client2.block(BlockId::Latest).unwrap().number(), NUM_BLOCKS as u64); + // Check that the latest block number is the right one + assert_eq!( + client2.block(BlockId::Latest).unwrap().number(), + NUM_BLOCKS as u64 + ); - // Check that we have blocks in [NUM_BLOCKS - NUM_SNAPSHOT_BLOCKS + 1 ; NUM_BLOCKS] - // but none before - assert!(client2.block(BlockId::Number(NUM_BLOCKS - NUM_SNAPSHOT_BLOCKS + 1)).is_some()); - assert!(client2.block(BlockId::Number(100)).is_none()); + // Check that we have blocks in [NUM_BLOCKS - NUM_SNAPSHOT_BLOCKS + 1 ; NUM_BLOCKS] + // but none before + assert!(client2 + .block(BlockId::Number(NUM_BLOCKS - NUM_SNAPSHOT_BLOCKS + 1)) + .is_some()); + assert!(client2.block(BlockId::Number(100)).is_none()); - // Check that the first 50 blocks have been migrated - for block_number in 1..49 { - assert!(client2.block(BlockId::Number(block_number)).is_some()); - } + // Check that the first 50 blocks have been migrated + for block_number in 1..49 { + assert!(client2.block(BlockId::Number(block_number)).is_some()); + } } #[test] fn recover_aborted_recovery() { - let _ = ::env_logger::try_init(); + let _ = ::env_logger::try_init(); - const NUM_BLOCKS: u32 = 400; - let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()]; - let client = generate_dummy_client_with_spec_and_data(Spec::new_null, NUM_BLOCKS, 5, &gas_prices); + const NUM_BLOCKS: u32 = 400; + let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()]; + let client = + generate_dummy_client_with_spec_and_data(Spec::new_null, NUM_BLOCKS, 5, &gas_prices); - let spec = Spec::new_null(); - let tempdir = TempDir::new("").unwrap(); - let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); - let client_db = new_db(); - let client2 = Client::new( - Default::default(), - &spec, - client_db, - Arc::new(::miner::Miner::new_for_tests(&spec, None)), - IoChannel::disconnected(), - ).unwrap(); - let service_params = ServiceParams { - engine: spec.engine.clone(), - genesis_block: spec.genesis_block(), - restoration_db_handler: restoration_db_handler(db_config), - pruning: ::journaldb::Algorithm::Archive, - channel: IoChannel::disconnected(), - snapshot_root: tempdir.path().to_owned(), - client: client2.clone(), - }; + let spec = Spec::new_null(); + let tempdir = TempDir::new("").unwrap(); + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + let client_db = new_db(); + let client2 = Client::new( + Default::default(), + &spec, + client_db, + Arc::new(::miner::Miner::new_for_tests(&spec, None)), + IoChannel::disconnected(), + ) + .unwrap(); + let service_params = ServiceParams { + engine: spec.engine.clone(), + genesis_block: spec.genesis_block(), + restoration_db_handler: restoration_db_handler(db_config), + pruning: ::journaldb::Algorithm::Archive, + channel: IoChannel::disconnected(), + snapshot_root: tempdir.path().to_owned(), + client: client2.clone(), + }; - let service = Service::new(service_params).unwrap(); - service.take_snapshot(&client, NUM_BLOCKS as u64).unwrap(); + let service = Service::new(service_params).unwrap(); + service.take_snapshot(&client, NUM_BLOCKS as u64).unwrap(); - let manifest = service.manifest().unwrap(); - service.init_restore(manifest.clone(), true).unwrap(); + let manifest = service.manifest().unwrap(); + service.init_restore(manifest.clone(), true).unwrap(); - // Restore only the state chunks - for hash in &manifest.state_hashes { - let chunk = service.chunk(*hash).unwrap(); - service.feed_state_chunk(*hash, &chunk); - } + // Restore only the state chunks + for hash in &manifest.state_hashes { + let chunk = service.chunk(*hash).unwrap(); + service.feed_state_chunk(*hash, &chunk); + } - match service.status() { - RestorationStatus::Ongoing { block_chunks_done, state_chunks_done, .. } => { - assert_eq!(state_chunks_done, manifest.state_hashes.len() as u32); - assert_eq!(block_chunks_done, 0); - }, - e => panic!("Snapshot restoration must be ongoing ; {:?}", e), - } + match service.status() { + RestorationStatus::Ongoing { + block_chunks_done, + state_chunks_done, + .. + } => { + assert_eq!(state_chunks_done, manifest.state_hashes.len() as u32); + assert_eq!(block_chunks_done, 0); + } + e => panic!("Snapshot restoration must be ongoing ; {:?}", e), + } - // Abort the restore... - service.abort_restore(); + // Abort the restore... + service.abort_restore(); - // And try again! - service.init_restore(manifest.clone(), true).unwrap(); + // And try again! + service.init_restore(manifest.clone(), true).unwrap(); - match service.status() { - RestorationStatus::Ongoing { block_chunks_done, state_chunks_done, .. } => { - assert_eq!(state_chunks_done, manifest.state_hashes.len() as u32); - assert_eq!(block_chunks_done, 0); - }, - e => panic!("Snapshot restoration must be ongoing ; {:?}", e), - } + match service.status() { + RestorationStatus::Ongoing { + block_chunks_done, + state_chunks_done, + .. + } => { + assert_eq!(state_chunks_done, manifest.state_hashes.len() as u32); + assert_eq!(block_chunks_done, 0); + } + e => panic!("Snapshot restoration must be ongoing ; {:?}", e), + } - // Remove the snapshot directory, and restart the restoration - // It shouldn't have restored any previous blocks - fs::remove_dir_all(tempdir.path()).unwrap(); + // Remove the snapshot directory, and restart the restoration + // It shouldn't have restored any previous blocks + fs::remove_dir_all(tempdir.path()).unwrap(); - // And try again! - service.init_restore(manifest.clone(), true).unwrap(); + // And try again! + service.init_restore(manifest.clone(), true).unwrap(); - match service.status() { - RestorationStatus::Ongoing { block_chunks_done, state_chunks_done, .. } => { - assert_eq!(block_chunks_done, 0); - assert_eq!(state_chunks_done, 0); - }, - _ => panic!("Snapshot restoration must be ongoing"), - } + match service.status() { + RestorationStatus::Ongoing { + block_chunks_done, + state_chunks_done, + .. + } => { + assert_eq!(block_chunks_done, 0); + assert_eq!(state_chunks_done, 0); + } + _ => panic!("Snapshot restoration must be ongoing"), + } } diff --git a/ethcore/src/snapshot/tests/state.rs b/ethcore/src/snapshot/tests/state.rs index 0d9760332..8c8ce8d6c 100644 --- a/ethcore/src/snapshot/tests/state.rs +++ b/ethcore/src/snapshot/tests/state.rs @@ -16,189 +16,218 @@ //! State snapshotting tests. -use std::sync::Arc; -use std::sync::atomic::AtomicBool; -use hash::{KECCAK_NULL_RLP, keccak}; +use hash::{keccak, KECCAK_NULL_RLP}; +use std::sync::{atomic::AtomicBool, Arc}; -use types::basic_account::BasicAccount; -use snapshot::account; -use snapshot::{chunk_state, Error as SnapshotError, Progress, StateRebuilder, SNAPSHOT_SUBPARTS}; -use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}; use super::helpers::StateProducer; +use snapshot::{ + account, chunk_state, + io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}, + Error as SnapshotError, Progress, StateRebuilder, SNAPSHOT_SUBPARTS, +}; +use types::basic_account::BasicAccount; use error::{Error, ErrorKind}; -use rand::{XorShiftRng, SeedableRng}; use ethereum_types::H256; use journaldb::{self, Algorithm}; use kvdb_rocksdb::{Database, DatabaseConfig}; use parking_lot::Mutex; +use rand::{SeedableRng, XorShiftRng}; use tempdir::TempDir; #[test] fn snap_and_restore() { - use hash_db::HashDB; - let mut producer = StateProducer::new(); - let mut rng = XorShiftRng::from_seed([1, 2, 3, 4]); - let mut old_db = journaldb::new_memory_db(); - let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + use hash_db::HashDB; + let mut producer = StateProducer::new(); + let mut rng = XorShiftRng::from_seed([1, 2, 3, 4]); + let mut old_db = journaldb::new_memory_db(); + let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS); - for _ in 0..150 { - producer.tick(&mut rng, &mut old_db); - } + for _ in 0..150 { + producer.tick(&mut rng, &mut old_db); + } - let tempdir = TempDir::new("").unwrap(); - let snap_file = tempdir.path().join("SNAP"); + let tempdir = TempDir::new("").unwrap(); + let snap_file = tempdir.path().join("SNAP"); - let state_root = producer.state_root(); - let writer = Mutex::new(PackedWriter::new(&snap_file).unwrap()); + let state_root = producer.state_root(); + let writer = Mutex::new(PackedWriter::new(&snap_file).unwrap()); - let mut state_hashes = Vec::new(); - for part in 0..SNAPSHOT_SUBPARTS { - let mut hashes = chunk_state(&old_db, &state_root, &writer, &Progress::default(), Some(part), 0).unwrap(); - state_hashes.append(&mut hashes); - } + let mut state_hashes = Vec::new(); + for part in 0..SNAPSHOT_SUBPARTS { + let mut hashes = chunk_state( + &old_db, + &state_root, + &writer, + &Progress::default(), + Some(part), + 0, + ) + .unwrap(); + state_hashes.append(&mut hashes); + } - writer.into_inner().finish(::snapshot::ManifestData { - version: 2, - state_hashes: state_hashes, - block_hashes: Vec::new(), - state_root: state_root, - block_number: 1000, - block_hash: H256::default(), - }).unwrap(); + writer + .into_inner() + .finish(::snapshot::ManifestData { + version: 2, + state_hashes: state_hashes, + block_hashes: Vec::new(), + state_root: state_root, + block_number: 1000, + block_hash: H256::default(), + }) + .unwrap(); - let db_path = tempdir.path().join("db"); - let db = { - let new_db = Arc::new(Database::open(&db_cfg, &db_path.to_string_lossy()).unwrap()); - let mut rebuilder = StateRebuilder::new(new_db.clone(), Algorithm::OverlayRecent); - let reader = PackedReader::new(&snap_file).unwrap().unwrap(); + let db_path = tempdir.path().join("db"); + let db = { + let new_db = Arc::new(Database::open(&db_cfg, &db_path.to_string_lossy()).unwrap()); + let mut rebuilder = StateRebuilder::new(new_db.clone(), Algorithm::OverlayRecent); + let reader = PackedReader::new(&snap_file).unwrap().unwrap(); - let flag = AtomicBool::new(true); + let flag = AtomicBool::new(true); - for chunk_hash in &reader.manifest().state_hashes { - let raw = reader.chunk(*chunk_hash).unwrap(); - let chunk = ::snappy::decompress(&raw).unwrap(); + for chunk_hash in &reader.manifest().state_hashes { + let raw = reader.chunk(*chunk_hash).unwrap(); + let chunk = ::snappy::decompress(&raw).unwrap(); - rebuilder.feed(&chunk, &flag).unwrap(); - } + rebuilder.feed(&chunk, &flag).unwrap(); + } - assert_eq!(rebuilder.state_root(), state_root); - rebuilder.finalize(1000, H256::default()).unwrap(); + assert_eq!(rebuilder.state_root(), state_root); + rebuilder.finalize(1000, H256::default()).unwrap(); - new_db - }; + new_db + }; - let new_db = journaldb::new(db, Algorithm::OverlayRecent, ::db::COL_STATE); - assert_eq!(new_db.earliest_era(), Some(1000)); - let keys = old_db.keys(); + let new_db = journaldb::new(db, Algorithm::OverlayRecent, ::db::COL_STATE); + assert_eq!(new_db.earliest_era(), Some(1000)); + let keys = old_db.keys(); - for key in keys.keys() { - assert_eq!(old_db.get(&key).unwrap(), new_db.as_hash_db().get(&key).unwrap()); - } + for key in keys.keys() { + assert_eq!( + old_db.get(&key).unwrap(), + new_db.as_hash_db().get(&key).unwrap() + ); + } } #[test] fn get_code_from_prev_chunk() { - use std::collections::HashSet; - use rlp::RlpStream; - use ethereum_types::{H256, U256}; - use hash_db::HashDB; + use ethereum_types::{H256, U256}; + use hash_db::HashDB; + use rlp::RlpStream; + use std::collections::HashSet; - use account_db::{AccountDBMut, AccountDB}; + use account_db::{AccountDB, AccountDBMut}; - let code = b"this is definitely code"; - let mut used_code = HashSet::new(); - let mut acc_stream = RlpStream::new_list(4); - acc_stream.append(&U256::default()) - .append(&U256::default()) - .append(&KECCAK_NULL_RLP) - .append(&keccak(code)); + let code = b"this is definitely code"; + let mut used_code = HashSet::new(); + let mut acc_stream = RlpStream::new_list(4); + acc_stream + .append(&U256::default()) + .append(&U256::default()) + .append(&KECCAK_NULL_RLP) + .append(&keccak(code)); - let (h1, h2) = (H256::random(), H256::random()); + let (h1, h2) = (H256::random(), H256::random()); - // two accounts with the same code, one per chunk. - // first one will have code inlined, - // second will just have its hash. - let thin_rlp = acc_stream.out(); - let acc: BasicAccount = ::rlp::decode(&thin_rlp).expect("error decoding basic account"); + // two accounts with the same code, one per chunk. + // first one will have code inlined, + // second will just have its hash. + let thin_rlp = acc_stream.out(); + let acc: BasicAccount = ::rlp::decode(&thin_rlp).expect("error decoding basic account"); - let mut make_chunk = |acc, hash| { - let mut db = journaldb::new_memory_db(); - AccountDBMut::from_hash(&mut db, hash).insert(&code[..]); - let p = Progress::default(); - let fat_rlp = account::to_fat_rlps(&hash, &acc, &AccountDB::from_hash(&db, hash), &mut used_code, usize::max_value(), usize::max_value(), &p).unwrap(); - let mut stream = RlpStream::new_list(1); - stream.append_raw(&fat_rlp[0], 1); - stream.out() - }; + let mut make_chunk = |acc, hash| { + let mut db = journaldb::new_memory_db(); + AccountDBMut::from_hash(&mut db, hash).insert(&code[..]); + let p = Progress::default(); + let fat_rlp = account::to_fat_rlps( + &hash, + &acc, + &AccountDB::from_hash(&db, hash), + &mut used_code, + usize::max_value(), + usize::max_value(), + &p, + ) + .unwrap(); + let mut stream = RlpStream::new_list(1); + stream.append_raw(&fat_rlp[0], 1); + stream.out() + }; - let chunk1 = make_chunk(acc.clone(), h1); - let chunk2 = make_chunk(acc, h2); + let chunk1 = make_chunk(acc.clone(), h1); + let chunk2 = make_chunk(acc, h2); - let tempdir = TempDir::new("").unwrap(); - let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS); - let new_db = Arc::new(Database::open(&db_cfg, tempdir.path().to_str().unwrap()).unwrap()); + let tempdir = TempDir::new("").unwrap(); + let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + let new_db = Arc::new(Database::open(&db_cfg, tempdir.path().to_str().unwrap()).unwrap()); - { - let mut rebuilder = StateRebuilder::new(new_db.clone(), Algorithm::OverlayRecent); - let flag = AtomicBool::new(true); + { + let mut rebuilder = StateRebuilder::new(new_db.clone(), Algorithm::OverlayRecent); + let flag = AtomicBool::new(true); - rebuilder.feed(&chunk1, &flag).unwrap(); - rebuilder.feed(&chunk2, &flag).unwrap(); + rebuilder.feed(&chunk1, &flag).unwrap(); + rebuilder.feed(&chunk2, &flag).unwrap(); - rebuilder.finalize(1000, H256::random()).unwrap(); - } + rebuilder.finalize(1000, H256::random()).unwrap(); + } - let state_db = journaldb::new(new_db, Algorithm::OverlayRecent, ::db::COL_STATE); - assert_eq!(state_db.earliest_era(), Some(1000)); + let state_db = journaldb::new(new_db, Algorithm::OverlayRecent, ::db::COL_STATE); + assert_eq!(state_db.earliest_era(), Some(1000)); } #[test] fn checks_flag() { - let mut producer = StateProducer::new(); - let mut rng = XorShiftRng::from_seed([5, 6, 7, 8]); - let mut old_db = journaldb::new_memory_db(); - let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + let mut producer = StateProducer::new(); + let mut rng = XorShiftRng::from_seed([5, 6, 7, 8]); + let mut old_db = journaldb::new_memory_db(); + let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS); - for _ in 0..10 { - producer.tick(&mut rng, &mut old_db); - } + for _ in 0..10 { + producer.tick(&mut rng, &mut old_db); + } - let tempdir = TempDir::new("").unwrap(); - let snap_file = tempdir.path().join("SNAP"); + let tempdir = TempDir::new("").unwrap(); + let snap_file = tempdir.path().join("SNAP"); - let state_root = producer.state_root(); - let writer = Mutex::new(PackedWriter::new(&snap_file).unwrap()); + let state_root = producer.state_root(); + let writer = Mutex::new(PackedWriter::new(&snap_file).unwrap()); - let state_hashes = chunk_state(&old_db, &state_root, &writer, &Progress::default(), None, 0).unwrap(); + let state_hashes = + chunk_state(&old_db, &state_root, &writer, &Progress::default(), None, 0).unwrap(); - writer.into_inner().finish(::snapshot::ManifestData { - version: 2, - state_hashes, - block_hashes: Vec::new(), - state_root, - block_number: 0, - block_hash: H256::default(), - }).unwrap(); + writer + .into_inner() + .finish(::snapshot::ManifestData { + version: 2, + state_hashes, + block_hashes: Vec::new(), + state_root, + block_number: 0, + block_hash: H256::default(), + }) + .unwrap(); - let tempdir = TempDir::new("").unwrap(); - let db_path = tempdir.path().join("db"); - { - let new_db = Arc::new(Database::open(&db_cfg, &db_path.to_string_lossy()).unwrap()); - let mut rebuilder = StateRebuilder::new(new_db.clone(), Algorithm::OverlayRecent); - let reader = PackedReader::new(&snap_file).unwrap().unwrap(); + let tempdir = TempDir::new("").unwrap(); + let db_path = tempdir.path().join("db"); + { + let new_db = Arc::new(Database::open(&db_cfg, &db_path.to_string_lossy()).unwrap()); + let mut rebuilder = StateRebuilder::new(new_db.clone(), Algorithm::OverlayRecent); + let reader = PackedReader::new(&snap_file).unwrap().unwrap(); - let flag = AtomicBool::new(false); + let flag = AtomicBool::new(false); - for chunk_hash in &reader.manifest().state_hashes { - let raw = reader.chunk(*chunk_hash).unwrap(); - let chunk = ::snappy::decompress(&raw).unwrap(); + for chunk_hash in &reader.manifest().state_hashes { + let raw = reader.chunk(*chunk_hash).unwrap(); + let chunk = ::snappy::decompress(&raw).unwrap(); - match rebuilder.feed(&chunk, &flag) { - Err(Error(ErrorKind::Snapshot(SnapshotError::RestorationAborted), _)) => {}, - _ => panic!("unexpected result when feeding with flag off"), - } - } - } + match rebuilder.feed(&chunk, &flag) { + Err(Error(ErrorKind::Snapshot(SnapshotError::RestorationAborted), _)) => {} + _ => panic!("unexpected result when feeding with flag off"), + } + } + } } diff --git a/ethcore/src/snapshot/traits.rs b/ethcore/src/snapshot/traits.rs index aa61b595b..d2d6ca665 100644 --- a/ethcore/src/snapshot/traits.rs +++ b/ethcore/src/snapshot/traits.rs @@ -15,49 +15,49 @@ // along with Parity Ethereum. If not, see . use super::{ManifestData, RestorationStatus}; -use ethereum_types::H256; use bytes::Bytes; +use ethereum_types::H256; /// The interface for a snapshot network service. /// This handles: /// - restoration of snapshots to temporary databases. /// - responding to queries for snapshot manifests and chunks -pub trait SnapshotService : Sync + Send { - /// Query the most recent manifest data. - fn manifest(&self) -> Option; +pub trait SnapshotService: Sync + Send { + /// Query the most recent manifest data. + fn manifest(&self) -> Option; - /// Get the supported range of snapshot version numbers. - /// `None` indicates warp sync isn't supported by the consensus engine. - fn supported_versions(&self) -> Option<(u64, u64)>; + /// Get the supported range of snapshot version numbers. + /// `None` indicates warp sync isn't supported by the consensus engine. + fn supported_versions(&self) -> Option<(u64, u64)>; - /// Returns a list of the completed chunks - fn completed_chunks(&self) -> Option>; + /// Returns a list of the completed chunks + fn completed_chunks(&self) -> Option>; - /// Get raw chunk for a given hash. - fn chunk(&self, hash: H256) -> Option; + /// Get raw chunk for a given hash. + fn chunk(&self, hash: H256) -> Option; - /// Ask the snapshot service for the restoration status. - fn status(&self) -> RestorationStatus; + /// Ask the snapshot service for the restoration status. + fn status(&self) -> RestorationStatus; - /// Begin snapshot restoration. - /// If restoration in-progress, this will reset it. - /// From this point on, any previous snapshot may become unavailable. - fn begin_restore(&self, manifest: ManifestData); + /// Begin snapshot restoration. + /// If restoration in-progress, this will reset it. + /// From this point on, any previous snapshot may become unavailable. + fn begin_restore(&self, manifest: ManifestData); - /// Abort an in-progress restoration if there is one. - fn abort_restore(&self); + /// Abort an in-progress restoration if there is one. + fn abort_restore(&self); - /// Feed a raw state chunk to the service to be processed asynchronously. - /// no-op if not currently restoring. - fn restore_state_chunk(&self, hash: H256, chunk: Bytes); + /// Feed a raw state chunk to the service to be processed asynchronously. + /// no-op if not currently restoring. + fn restore_state_chunk(&self, hash: H256, chunk: Bytes); - /// Feed a raw block chunk to the service to be processed asynchronously. - /// no-op if currently restoring. - fn restore_block_chunk(&self, hash: H256, chunk: Bytes); + /// Feed a raw block chunk to the service to be processed asynchronously. + /// no-op if currently restoring. + fn restore_block_chunk(&self, hash: H256, chunk: Bytes); - /// Abort in-progress snapshotting if there is one. - fn abort_snapshot(&self); + /// Abort in-progress snapshotting if there is one. + fn abort_snapshot(&self); - /// Shutdown the Snapshot Service by aborting any ongoing restore - fn shutdown(&self); + /// Shutdown the Snapshot Service by aborting any ongoing restore + fn shutdown(&self); } diff --git a/ethcore/src/snapshot/watcher.rs b/ethcore/src/snapshot/watcher.rs index 5c4712cff..e65be7eaf 100644 --- a/ethcore/src/snapshot/watcher.rs +++ b/ethcore/src/snapshot/watcher.rs @@ -16,181 +16,206 @@ //! Watcher for snapshot-related chain events. +use client::{BlockInfo, ChainNotify, Client, ClientIoMessage, NewBlocks}; use parking_lot::Mutex; -use client::{BlockInfo, Client, ChainNotify, NewBlocks, ClientIoMessage}; use types::ids::BlockId; -use io::IoChannel; use ethereum_types::H256; +use io::IoChannel; use std::sync::Arc; // helper trait for transforming hashes to numbers and checking if syncing. trait Oracle: Send + Sync { - fn to_number(&self, hash: H256) -> Option; + fn to_number(&self, hash: H256) -> Option; - fn is_major_importing(&self) -> bool; + fn is_major_importing(&self) -> bool; } -struct StandardOracle where F: 'static + Send + Sync + Fn() -> bool { - client: Arc, - sync_status: F, +struct StandardOracle +where + F: 'static + Send + Sync + Fn() -> bool, +{ + client: Arc, + sync_status: F, } impl Oracle for StandardOracle - where F: Send + Sync + Fn() -> bool +where + F: Send + Sync + Fn() -> bool, { - fn to_number(&self, hash: H256) -> Option { - self.client.block_header(BlockId::Hash(hash)).map(|h| h.number()) - } + fn to_number(&self, hash: H256) -> Option { + self.client + .block_header(BlockId::Hash(hash)) + .map(|h| h.number()) + } - fn is_major_importing(&self) -> bool { - (self.sync_status)() - } + fn is_major_importing(&self) -> bool { + (self.sync_status)() + } } // helper trait for broadcasting a block to take a snapshot at. trait Broadcast: Send + Sync { - fn take_at(&self, num: Option); + fn take_at(&self, num: Option); } impl Broadcast for Mutex> { - fn take_at(&self, num: Option) { - let num = match num { - Some(n) => n, - None => return, - }; + fn take_at(&self, num: Option) { + let num = match num { + Some(n) => n, + None => return, + }; - trace!(target: "snapshot_watcher", "broadcast: {}", num); + trace!(target: "snapshot_watcher", "broadcast: {}", num); - if let Err(e) = self.lock().send(ClientIoMessage::TakeSnapshot(num)) { - warn!("Snapshot watcher disconnected from IoService: {}", e); - } - } + if let Err(e) = self.lock().send(ClientIoMessage::TakeSnapshot(num)) { + warn!("Snapshot watcher disconnected from IoService: {}", e); + } + } } /// A `ChainNotify` implementation which will trigger a snapshot event /// at certain block numbers. pub struct Watcher { - oracle: Box, - broadcast: Box, - period: u64, - history: u64, + oracle: Box, + broadcast: Box, + period: u64, + history: u64, } impl Watcher { - /// Create a new `Watcher` which will trigger a snapshot event - /// once every `period` blocks, but only after that block is - /// `history` blocks old. - pub fn new(client: Arc, sync_status: F, channel: IoChannel, period: u64, history: u64) -> Self - where F: 'static + Send + Sync + Fn() -> bool - { - Watcher { - oracle: Box::new(StandardOracle { - client: client, - sync_status: sync_status, - }), - broadcast: Box::new(Mutex::new(channel)), - period: period, - history: history, - } - } + /// Create a new `Watcher` which will trigger a snapshot event + /// once every `period` blocks, but only after that block is + /// `history` blocks old. + pub fn new( + client: Arc, + sync_status: F, + channel: IoChannel, + period: u64, + history: u64, + ) -> Self + where + F: 'static + Send + Sync + Fn() -> bool, + { + Watcher { + oracle: Box::new(StandardOracle { + client: client, + sync_status: sync_status, + }), + broadcast: Box::new(Mutex::new(channel)), + period: period, + history: history, + } + } } impl ChainNotify for Watcher { - fn new_blocks(&self, new_blocks: NewBlocks) { - if self.oracle.is_major_importing() || new_blocks.has_more_blocks_to_import { return } + fn new_blocks(&self, new_blocks: NewBlocks) { + if self.oracle.is_major_importing() || new_blocks.has_more_blocks_to_import { + return; + } - trace!(target: "snapshot_watcher", "{} imported", new_blocks.imported.len()); + trace!(target: "snapshot_watcher", "{} imported", new_blocks.imported.len()); - let highest = new_blocks.imported.into_iter() - .filter_map(|h| self.oracle.to_number(h)) - .filter(|&num| num >= self.period + self.history) - .map(|num| num - self.history) - .filter(|num| num % self.period == 0) - .fold(0, ::std::cmp::max); + let highest = new_blocks + .imported + .into_iter() + .filter_map(|h| self.oracle.to_number(h)) + .filter(|&num| num >= self.period + self.history) + .map(|num| num - self.history) + .filter(|num| num % self.period == 0) + .fold(0, ::std::cmp::max); - match highest { - 0 => self.broadcast.take_at(None), - _ => self.broadcast.take_at(Some(highest)), - } - } + match highest { + 0 => self.broadcast.take_at(None), + _ => self.broadcast.take_at(Some(highest)), + } + } } #[cfg(test)] mod tests { - use super::{Broadcast, Oracle, Watcher}; + use super::{Broadcast, Oracle, Watcher}; - use client::{ChainNotify, NewBlocks, ChainRoute}; + use client::{ChainNotify, ChainRoute, NewBlocks}; - use ethereum_types::{H256, U256}; + use ethereum_types::{H256, U256}; - use std::collections::HashMap; - use std::time::Duration; + use std::{collections::HashMap, time::Duration}; - struct TestOracle(HashMap); + struct TestOracle(HashMap); - impl Oracle for TestOracle { - fn to_number(&self, hash: H256) -> Option { - self.0.get(&hash).cloned() - } + impl Oracle for TestOracle { + fn to_number(&self, hash: H256) -> Option { + self.0.get(&hash).cloned() + } - fn is_major_importing(&self) -> bool { false } - } + fn is_major_importing(&self) -> bool { + false + } + } - struct TestBroadcast(Option); - impl Broadcast for TestBroadcast { - fn take_at(&self, num: Option) { - if num != self.0 { - panic!("Watcher broadcast wrong number. Expected {:?}, found {:?}", self.0, num); - } - } - } + struct TestBroadcast(Option); + impl Broadcast for TestBroadcast { + fn take_at(&self, num: Option) { + if num != self.0 { + panic!( + "Watcher broadcast wrong number. Expected {:?}, found {:?}", + self.0, num + ); + } + } + } - // helper harness for tests which expect a notification. - fn harness(numbers: Vec, period: u64, history: u64, expected: Option) { - const DURATION_ZERO: Duration = Duration::from_millis(0); + // helper harness for tests which expect a notification. + fn harness(numbers: Vec, period: u64, history: u64, expected: Option) { + const DURATION_ZERO: Duration = Duration::from_millis(0); - let hashes: Vec<_> = numbers.clone().into_iter().map(|x| H256::from(U256::from(x))).collect(); - let map = hashes.clone().into_iter().zip(numbers).collect(); + let hashes: Vec<_> = numbers + .clone() + .into_iter() + .map(|x| H256::from(U256::from(x))) + .collect(); + let map = hashes.clone().into_iter().zip(numbers).collect(); - let watcher = Watcher { - oracle: Box::new(TestOracle(map)), - broadcast: Box::new(TestBroadcast(expected)), - period: period, - history: history, - }; + let watcher = Watcher { + oracle: Box::new(TestOracle(map)), + broadcast: Box::new(TestBroadcast(expected)), + period: period, + history: history, + }; - watcher.new_blocks(NewBlocks::new( - hashes, - vec![], - ChainRoute::default(), - vec![], - vec![], - DURATION_ZERO, - false - )); - } + watcher.new_blocks(NewBlocks::new( + hashes, + vec![], + ChainRoute::default(), + vec![], + vec![], + DURATION_ZERO, + false, + )); + } - // helper + // helper - #[test] - fn should_not_fire() { - harness(vec![0], 5, 0, None); - } + #[test] + fn should_not_fire() { + harness(vec![0], 5, 0, None); + } - #[test] - fn fires_once_for_two() { - harness(vec![14, 15], 10, 5, Some(10)); - } + #[test] + fn fires_once_for_two() { + harness(vec![14, 15], 10, 5, Some(10)); + } - #[test] - fn finds_highest() { - harness(vec![15, 25], 10, 5, Some(20)); - } + #[test] + fn finds_highest() { + harness(vec![15, 25], 10, 5, Some(20)); + } - #[test] - fn doesnt_fire_before_history() { - harness(vec![10, 11], 10, 5, None); - } + #[test] + fn doesnt_fire_before_history() { + harness(vec![10, 11], 10, 5, None); + } } diff --git a/ethcore/src/spec/genesis.rs b/ethcore/src/spec/genesis.rs index 96a42178d..1992fba7b 100644 --- a/ethcore/src/spec/genesis.rs +++ b/ethcore/src/spec/genesis.rs @@ -14,51 +14,55 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use ethereum_types::{H256, U256, Address}; +use ethereum_types::{Address, H256, U256}; use ethjson; use hash::KECCAK_NULL_RLP; use spec::seal::Seal; /// Genesis components. pub struct Genesis { - /// Seal. - pub seal: Seal, - /// Difficulty. - pub difficulty: U256, - /// Author. - pub author: Address, - /// Timestamp. - pub timestamp: u64, - /// Parent hash. - pub parent_hash: H256, - /// Gas limit. - pub gas_limit: U256, - /// Transactions root. - pub transactions_root: H256, - /// Receipts root. - pub receipts_root: H256, - /// State root. - pub state_root: Option, - /// Gas used. - pub gas_used: U256, - /// Extra data. - pub extra_data: Vec, + /// Seal. + pub seal: Seal, + /// Difficulty. + pub difficulty: U256, + /// Author. + pub author: Address, + /// Timestamp. + pub timestamp: u64, + /// Parent hash. + pub parent_hash: H256, + /// Gas limit. + pub gas_limit: U256, + /// Transactions root. + pub transactions_root: H256, + /// Receipts root. + pub receipts_root: H256, + /// State root. + pub state_root: Option, + /// Gas used. + pub gas_used: U256, + /// Extra data. + pub extra_data: Vec, } impl From for Genesis { - fn from(g: ethjson::spec::Genesis) -> Self { - Genesis { - seal: From::from(g.seal), - difficulty: g.difficulty.into(), - author: g.author.map_or_else(Address::zero, Into::into), - timestamp: g.timestamp.map_or(0, Into::into), - parent_hash: g.parent_hash.map_or_else(H256::zero, Into::into), - gas_limit: g.gas_limit.into(), - transactions_root: g.transactions_root.map_or_else(|| KECCAK_NULL_RLP.clone(), Into::into), - receipts_root: g.receipts_root.map_or_else(|| KECCAK_NULL_RLP.clone(), Into::into), - state_root: g.state_root.map(Into::into), - gas_used: g.gas_used.map_or_else(U256::zero, Into::into), - extra_data: g.extra_data.map_or_else(Vec::new, Into::into), - } - } + fn from(g: ethjson::spec::Genesis) -> Self { + Genesis { + seal: From::from(g.seal), + difficulty: g.difficulty.into(), + author: g.author.map_or_else(Address::zero, Into::into), + timestamp: g.timestamp.map_or(0, Into::into), + parent_hash: g.parent_hash.map_or_else(H256::zero, Into::into), + gas_limit: g.gas_limit.into(), + transactions_root: g + .transactions_root + .map_or_else(|| KECCAK_NULL_RLP.clone(), Into::into), + receipts_root: g + .receipts_root + .map_or_else(|| KECCAK_NULL_RLP.clone(), Into::into), + state_root: g.state_root.map(Into::into), + gas_used: g.gas_used.map_or_else(U256::zero, Into::into), + extra_data: g.extra_data.map_or_else(Vec::new, Into::into), + } + } } diff --git a/ethcore/src/spec/mod.rs b/ethcore/src/spec/mod.rs index 5d90b5fbf..85b4e75cd 100644 --- a/ethcore/src/spec/mod.rs +++ b/ethcore/src/spec/mod.rs @@ -20,5 +20,7 @@ mod genesis; mod seal; mod spec; -pub use self::genesis::Genesis; -pub use self::spec::{Spec, SpecHardcodedSync, SpecParams, CommonParams, OptimizeFor}; +pub use self::{ + genesis::Genesis, + spec::{CommonParams, OptimizeFor, Spec, SpecHardcodedSync, SpecParams}, +}; diff --git a/ethcore/src/spec/seal.rs b/ethcore/src/spec/seal.rs index ed70ac8b5..26b8e2c89 100644 --- a/ethcore/src/spec/seal.rs +++ b/ethcore/src/spec/seal.rs @@ -16,105 +16,105 @@ //! Spec seal. -use rlp::RlpStream; -use ethereum_types::{H64, H256, H520}; +use ethereum_types::{H256, H520, H64}; use ethjson; +use rlp::RlpStream; /// Classic ethereum seal. pub struct Ethereum { - /// Seal nonce. - pub nonce: H64, - /// Seal mix hash. - pub mix_hash: H256, + /// Seal nonce. + pub nonce: H64, + /// Seal mix hash. + pub mix_hash: H256, } impl Into for Ethereum { - fn into(self) -> Generic { - let mut s = RlpStream::new_list(2); - s.append(&self.mix_hash).append(&self.nonce); - Generic(s.out()) - } + fn into(self) -> Generic { + let mut s = RlpStream::new_list(2); + s.append(&self.mix_hash).append(&self.nonce); + Generic(s.out()) + } } /// AuthorityRound seal. pub struct AuthorityRound { - /// Seal step. - pub step: usize, - /// Seal signature. - pub signature: H520, + /// Seal step. + pub step: usize, + /// Seal signature. + pub signature: H520, } /// Tendermint seal. pub struct Tendermint { - /// Seal round. - pub round: usize, - /// Proposal seal signature. - pub proposal: H520, - /// Precommit seal signatures. - pub precommits: Vec, + /// Seal round. + pub round: usize, + /// Proposal seal signature. + pub proposal: H520, + /// Precommit seal signatures. + pub precommits: Vec, } impl Into for AuthorityRound { - fn into(self) -> Generic { - let mut s = RlpStream::new_list(2); - s.append(&self.step).append(&self.signature); - Generic(s.out()) - } + fn into(self) -> Generic { + let mut s = RlpStream::new_list(2); + s.append(&self.step).append(&self.signature); + Generic(s.out()) + } } impl Into for Tendermint { - fn into(self) -> Generic { - let mut stream = RlpStream::new_list(3); - stream - .append(&self.round) - .append(&self.proposal) - .append_list(&self.precommits); - Generic(stream.out()) - } + fn into(self) -> Generic { + let mut stream = RlpStream::new_list(3); + stream + .append(&self.round) + .append(&self.proposal) + .append_list(&self.precommits); + Generic(stream.out()) + } } pub struct Generic(pub Vec); /// Genesis seal type. pub enum Seal { - /// Classic ethereum seal. - Ethereum(Ethereum), - /// AuthorityRound seal. - AuthorityRound(AuthorityRound), - /// Tendermint seal. - Tendermint(Tendermint), - /// Generic RLP seal. - Generic(Generic), + /// Classic ethereum seal. + Ethereum(Ethereum), + /// AuthorityRound seal. + AuthorityRound(AuthorityRound), + /// Tendermint seal. + Tendermint(Tendermint), + /// Generic RLP seal. + Generic(Generic), } impl From for Seal { - fn from(s: ethjson::spec::Seal) -> Self { - match s { - ethjson::spec::Seal::Ethereum(eth) => Seal::Ethereum(Ethereum { - nonce: eth.nonce.into(), - mix_hash: eth.mix_hash.into() - }), - ethjson::spec::Seal::AuthorityRound(ar) => Seal::AuthorityRound(AuthorityRound { - step: ar.step.into(), - signature: ar.signature.into() - }), - ethjson::spec::Seal::Tendermint(tender) => Seal::Tendermint(Tendermint { - round: tender.round.into(), - proposal: tender.proposal.into(), - precommits: tender.precommits.into_iter().map(Into::into).collect() - }), - ethjson::spec::Seal::Generic(g) => Seal::Generic(Generic(g.into())), - } - } + fn from(s: ethjson::spec::Seal) -> Self { + match s { + ethjson::spec::Seal::Ethereum(eth) => Seal::Ethereum(Ethereum { + nonce: eth.nonce.into(), + mix_hash: eth.mix_hash.into(), + }), + ethjson::spec::Seal::AuthorityRound(ar) => Seal::AuthorityRound(AuthorityRound { + step: ar.step.into(), + signature: ar.signature.into(), + }), + ethjson::spec::Seal::Tendermint(tender) => Seal::Tendermint(Tendermint { + round: tender.round.into(), + proposal: tender.proposal.into(), + precommits: tender.precommits.into_iter().map(Into::into).collect(), + }), + ethjson::spec::Seal::Generic(g) => Seal::Generic(Generic(g.into())), + } + } } impl Into for Seal { - fn into(self) -> Generic { - match self { - Seal::Generic(generic) => generic, - Seal::Ethereum(eth) => eth.into(), - Seal::AuthorityRound(ar) => ar.into(), - Seal::Tendermint(tender) => tender.into(), - } - } + fn into(self) -> Generic { + match self { + Seal::Generic(generic) => generic, + Seal::Ethereum(eth) => eth.into(), + Seal::AuthorityRound(ar) => ar.into(), + Seal::Tendermint(tender) => tender.into(), + } + } } diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index 37305e455..d56d7d7b0 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -16,38 +16,30 @@ //! Parameters for a block chain. -use std::collections::BTreeMap; -use std::convert::TryFrom; -use std::io::Read; -use std::path::Path; -use std::sync::Arc; +use std::{collections::BTreeMap, convert::TryFrom, io::Read, path::Path, sync::Arc}; use bytes::Bytes; -use ethereum_types::{H256, Bloom, U256, Address}; +use ethereum_types::{Address, Bloom, H256, U256}; use ethjson; -use hash::{KECCAK_NULL_RLP, keccak}; +use hash::{keccak, KECCAK_NULL_RLP}; use parking_lot::RwLock; use rlp::{Rlp, RlpStream}; use rustc_hex::{FromHex, ToHex}; -use types::BlockNumber; -use types::encoded; -use types::header::Header; -use vm::{EnvInfo, CallType, ActionValue, ActionParams, ParamsType}; +use types::{encoded, header::Header, BlockNumber}; +use vm::{ActionParams, ActionValue, CallType, EnvInfo, ParamsType}; use builtin::Builtin; use engines::{ - EthEngine, NullEngine, InstantSeal, InstantSealParams, BasicAuthority, Clique, - AuthorityRound, DEFAULT_BLOCKHASH_CONTRACT + AuthorityRound, BasicAuthority, Clique, EthEngine, InstantSeal, InstantSealParams, NullEngine, + DEFAULT_BLOCKHASH_CONTRACT, }; use error::Error; use executive::Executive; use factory::Factories; use machine::EthereumMachine; use pod_state::PodState; -use spec::Genesis; -use spec::seal::Generic as GenericSeal; -use state::backend::Basic as BasicBackend; -use state::{Backend, State, Substate}; +use spec::{seal::Generic as GenericSeal, Genesis}; +use state::{backend::Basic as BasicBackend, Backend, State, Substate}; use trace::{NoopTracer, NoopVMTracer}; pub use ethash::OptimizeFor; @@ -56,7 +48,7 @@ const MAX_TRANSACTION_SIZE: usize = 300 * 1024; // helper for formatting errors. fn fmt_err(f: F) -> String { - format!("Spec json is invalid: {}", f) + format!("Spec json is invalid: {}", f) } /// Parameters common to ethereum-like blockchains. @@ -68,1033 +60,1048 @@ fn fmt_err(f: F) -> String { #[derive(Debug, PartialEq, Default)] #[cfg_attr(any(test, feature = "test-helpers"), derive(Clone))] pub struct CommonParams { - /// Account start nonce. - pub account_start_nonce: U256, - /// Maximum size of extra data. - pub maximum_extra_data_size: usize, - /// Network id. - pub network_id: u64, - /// Chain id. - pub chain_id: u64, - /// Main subprotocol name. - pub subprotocol_name: String, - /// Minimum gas limit. - pub min_gas_limit: U256, - /// Fork block to check. - pub fork_block: Option<(BlockNumber, H256)>, - /// EIP150 transition block number. - pub eip150_transition: BlockNumber, - /// Number of first block where EIP-160 rules begin. - pub eip160_transition: BlockNumber, - /// Number of first block where EIP-161.abc begin. - pub eip161abc_transition: BlockNumber, - /// Number of first block where EIP-161.d begins. - pub eip161d_transition: BlockNumber, - /// Number of first block where EIP-98 rules begin. - pub eip98_transition: BlockNumber, - /// Number of first block where EIP-658 rules begin. - pub eip658_transition: BlockNumber, - /// Number of first block where EIP-155 rules begin. - pub eip155_transition: BlockNumber, - /// Validate block receipts root. - pub validate_receipts_transition: BlockNumber, - /// Validate transaction chain id. - pub validate_chain_id_transition: BlockNumber, - /// Number of first block where EIP-140 rules begin. - pub eip140_transition: BlockNumber, - /// Number of first block where EIP-210 rules begin. - pub eip210_transition: BlockNumber, - /// EIP-210 Blockhash contract address. - pub eip210_contract_address: Address, - /// EIP-210 Blockhash contract code. - pub eip210_contract_code: Bytes, - /// Gas allocated for EIP-210 blockhash update. - pub eip210_contract_gas: U256, - /// Number of first block where EIP-211 rules begin. - pub eip211_transition: BlockNumber, - /// Number of first block where EIP-214 rules begin. - pub eip214_transition: BlockNumber, - /// Number of first block where EIP-145 rules begin. - pub eip145_transition: BlockNumber, - /// Number of first block where EIP-1052 rules begin. - pub eip1052_transition: BlockNumber, - /// Number of first block where EIP-1283 rules begin. - pub eip1283_transition: BlockNumber, - /// Number of first block where EIP-1283 rules end. - pub eip1283_disable_transition: BlockNumber, - /// Number of first block where EIP-1283 rules re-enabled. - pub eip1283_reenable_transition: BlockNumber, - /// Number of first block where EIP-1014 rules begin. - pub eip1014_transition: BlockNumber, - /// Number of first block where EIP-1706 rules begin. - pub eip1706_transition: BlockNumber, - /// Number of first block where EIP-1344 rules begin: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1344.md - pub eip1344_transition: BlockNumber, - /// Number of first block where EIP-1884 rules begin:https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1884.md - pub eip1884_transition: BlockNumber, - /// Number of first block where EIP-2028 rules begin. - pub eip2028_transition: BlockNumber, - /// Number of first block where dust cleanup rules (EIP-168 and EIP169) begin. - pub dust_protection_transition: BlockNumber, - /// Nonce cap increase per block. Nonce cap is only checked if dust protection is enabled. - pub nonce_cap_increment: u64, - /// Enable dust cleanup for contracts. - pub remove_dust_contracts: bool, - /// Wasm activation blocknumber, if any disabled initially. - pub wasm_activation_transition: BlockNumber, - /// Number of first block where KIP-4 rules begin. Only has effect if Wasm is activated. - pub kip4_transition: BlockNumber, - /// Number of first block where KIP-6 rules begin. Only has effect if Wasm is activated. - pub kip6_transition: BlockNumber, - /// Gas limit bound divisor (how much gas limit can change per block) - pub gas_limit_bound_divisor: U256, - /// Registrar contract address. - pub registrar: Address, - /// Node permission managing contract address. - pub node_permission_contract: Option
, - /// Maximum contract code size that can be deployed. - pub max_code_size: u64, - /// Number of first block where max code size limit is active. - pub max_code_size_transition: BlockNumber, - /// Transaction permission managing contract address. - pub transaction_permission_contract: Option
, - /// Block at which the transaction permission contract should start being used. - pub transaction_permission_contract_transition: BlockNumber, - /// Maximum size of transaction's RLP payload - pub max_transaction_size: usize, + /// Account start nonce. + pub account_start_nonce: U256, + /// Maximum size of extra data. + pub maximum_extra_data_size: usize, + /// Network id. + pub network_id: u64, + /// Chain id. + pub chain_id: u64, + /// Main subprotocol name. + pub subprotocol_name: String, + /// Minimum gas limit. + pub min_gas_limit: U256, + /// Fork block to check. + pub fork_block: Option<(BlockNumber, H256)>, + /// EIP150 transition block number. + pub eip150_transition: BlockNumber, + /// Number of first block where EIP-160 rules begin. + pub eip160_transition: BlockNumber, + /// Number of first block where EIP-161.abc begin. + pub eip161abc_transition: BlockNumber, + /// Number of first block where EIP-161.d begins. + pub eip161d_transition: BlockNumber, + /// Number of first block where EIP-98 rules begin. + pub eip98_transition: BlockNumber, + /// Number of first block where EIP-658 rules begin. + pub eip658_transition: BlockNumber, + /// Number of first block where EIP-155 rules begin. + pub eip155_transition: BlockNumber, + /// Validate block receipts root. + pub validate_receipts_transition: BlockNumber, + /// Validate transaction chain id. + pub validate_chain_id_transition: BlockNumber, + /// Number of first block where EIP-140 rules begin. + pub eip140_transition: BlockNumber, + /// Number of first block where EIP-210 rules begin. + pub eip210_transition: BlockNumber, + /// EIP-210 Blockhash contract address. + pub eip210_contract_address: Address, + /// EIP-210 Blockhash contract code. + pub eip210_contract_code: Bytes, + /// Gas allocated for EIP-210 blockhash update. + pub eip210_contract_gas: U256, + /// Number of first block where EIP-211 rules begin. + pub eip211_transition: BlockNumber, + /// Number of first block where EIP-214 rules begin. + pub eip214_transition: BlockNumber, + /// Number of first block where EIP-145 rules begin. + pub eip145_transition: BlockNumber, + /// Number of first block where EIP-1052 rules begin. + pub eip1052_transition: BlockNumber, + /// Number of first block where EIP-1283 rules begin. + pub eip1283_transition: BlockNumber, + /// Number of first block where EIP-1283 rules end. + pub eip1283_disable_transition: BlockNumber, + /// Number of first block where EIP-1283 rules re-enabled. + pub eip1283_reenable_transition: BlockNumber, + /// Number of first block where EIP-1014 rules begin. + pub eip1014_transition: BlockNumber, + /// Number of first block where EIP-1706 rules begin. + pub eip1706_transition: BlockNumber, + /// Number of first block where EIP-1344 rules begin: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1344.md + pub eip1344_transition: BlockNumber, + /// Number of first block where EIP-1884 rules begin:https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1884.md + pub eip1884_transition: BlockNumber, + /// Number of first block where EIP-2028 rules begin. + pub eip2028_transition: BlockNumber, + /// Number of first block where dust cleanup rules (EIP-168 and EIP169) begin. + pub dust_protection_transition: BlockNumber, + /// Nonce cap increase per block. Nonce cap is only checked if dust protection is enabled. + pub nonce_cap_increment: u64, + /// Enable dust cleanup for contracts. + pub remove_dust_contracts: bool, + /// Wasm activation blocknumber, if any disabled initially. + pub wasm_activation_transition: BlockNumber, + /// Number of first block where KIP-4 rules begin. Only has effect if Wasm is activated. + pub kip4_transition: BlockNumber, + /// Number of first block where KIP-6 rules begin. Only has effect if Wasm is activated. + pub kip6_transition: BlockNumber, + /// Gas limit bound divisor (how much gas limit can change per block) + pub gas_limit_bound_divisor: U256, + /// Registrar contract address. + pub registrar: Address, + /// Node permission managing contract address. + pub node_permission_contract: Option
, + /// Maximum contract code size that can be deployed. + pub max_code_size: u64, + /// Number of first block where max code size limit is active. + pub max_code_size_transition: BlockNumber, + /// Transaction permission managing contract address. + pub transaction_permission_contract: Option
, + /// Block at which the transaction permission contract should start being used. + pub transaction_permission_contract_transition: BlockNumber, + /// Maximum size of transaction's RLP payload + pub max_transaction_size: usize, } impl CommonParams { - /// Schedule for an EVM in the post-EIP-150-era of the Ethereum main net. - pub fn schedule(&self, block_number: u64) -> ::vm::Schedule { - if block_number < self.eip150_transition { - ::vm::Schedule::new_homestead() - } else { - let max_code_size = self.max_code_size(block_number); - let mut schedule = ::vm::Schedule::new_post_eip150( - max_code_size as _, - block_number >= self.eip160_transition, - block_number >= self.eip161abc_transition, - block_number >= self.eip161d_transition - ); + /// Schedule for an EVM in the post-EIP-150-era of the Ethereum main net. + pub fn schedule(&self, block_number: u64) -> ::vm::Schedule { + if block_number < self.eip150_transition { + ::vm::Schedule::new_homestead() + } else { + let max_code_size = self.max_code_size(block_number); + let mut schedule = ::vm::Schedule::new_post_eip150( + max_code_size as _, + block_number >= self.eip160_transition, + block_number >= self.eip161abc_transition, + block_number >= self.eip161d_transition, + ); - self.update_schedule(block_number, &mut schedule); - schedule - } - } + self.update_schedule(block_number, &mut schedule); + schedule + } + } - /// Returns max code size at given block. - pub fn max_code_size(&self, block_number: u64) -> u64 { - if block_number >= self.max_code_size_transition { - self.max_code_size - } else { - u64::max_value() - } - } + /// Returns max code size at given block. + pub fn max_code_size(&self, block_number: u64) -> u64 { + if block_number >= self.max_code_size_transition { + self.max_code_size + } else { + u64::max_value() + } + } - /// Apply common spec config parameters to the schedule. - pub fn update_schedule(&self, block_number: u64, schedule: &mut ::vm::Schedule) { - schedule.have_create2 = block_number >= self.eip1014_transition; - schedule.have_revert = block_number >= self.eip140_transition; - schedule.have_static_call = block_number >= self.eip214_transition; - schedule.have_return_data = block_number >= self.eip211_transition; - schedule.have_bitwise_shifting = block_number >= self.eip145_transition; - schedule.have_extcodehash = block_number >= self.eip1052_transition; - schedule.have_chain_id = block_number >= self.eip1344_transition; - schedule.eip1283 = - (block_number >= self.eip1283_transition && - !(block_number >= self.eip1283_disable_transition)) || - block_number >= self.eip1283_reenable_transition; - schedule.eip1706 = block_number >= self.eip1706_transition; + /// Apply common spec config parameters to the schedule. + pub fn update_schedule(&self, block_number: u64, schedule: &mut ::vm::Schedule) { + schedule.have_create2 = block_number >= self.eip1014_transition; + schedule.have_revert = block_number >= self.eip140_transition; + schedule.have_static_call = block_number >= self.eip214_transition; + schedule.have_return_data = block_number >= self.eip211_transition; + schedule.have_bitwise_shifting = block_number >= self.eip145_transition; + schedule.have_extcodehash = block_number >= self.eip1052_transition; + schedule.have_chain_id = block_number >= self.eip1344_transition; + schedule.eip1283 = (block_number >= self.eip1283_transition + && !(block_number >= self.eip1283_disable_transition)) + || block_number >= self.eip1283_reenable_transition; + schedule.eip1706 = block_number >= self.eip1706_transition; - if block_number >= self.eip1884_transition { - schedule.have_selfbalance = true; - schedule.sload_gas = 800; - schedule.balance_gas = 700; - schedule.extcodehash_gas = 700; - } - if block_number >= self.eip2028_transition { - schedule.tx_data_non_zero_gas = 16; - } - if block_number >= self.eip210_transition { - schedule.blockhash_gas = 800; - } - if block_number >= self.dust_protection_transition { - schedule.kill_dust = match self.remove_dust_contracts { - true => ::vm::CleanDustMode::WithCodeAndStorage, - false => ::vm::CleanDustMode::BasicOnly, - }; - } - if block_number >= self.wasm_activation_transition { - let mut wasm = ::vm::WasmCosts::default(); - if block_number >= self.kip4_transition { - wasm.have_create2 = true; - } - if block_number >= self.kip6_transition { - wasm.have_gasleft = true; - } - schedule.wasm = Some(wasm); - } - } + if block_number >= self.eip1884_transition { + schedule.have_selfbalance = true; + schedule.sload_gas = 800; + schedule.balance_gas = 700; + schedule.extcodehash_gas = 700; + } + if block_number >= self.eip2028_transition { + schedule.tx_data_non_zero_gas = 16; + } + if block_number >= self.eip210_transition { + schedule.blockhash_gas = 800; + } + if block_number >= self.dust_protection_transition { + schedule.kill_dust = match self.remove_dust_contracts { + true => ::vm::CleanDustMode::WithCodeAndStorage, + false => ::vm::CleanDustMode::BasicOnly, + }; + } + if block_number >= self.wasm_activation_transition { + let mut wasm = ::vm::WasmCosts::default(); + if block_number >= self.kip4_transition { + wasm.have_create2 = true; + } + if block_number >= self.kip6_transition { + wasm.have_gasleft = true; + } + schedule.wasm = Some(wasm); + } + } - /// Return Some if the current parameters contain a bugfix hard fork not on block 0. - pub fn nonzero_bugfix_hard_fork(&self) -> Option<&str> { - if self.eip155_transition != 0 { - return Some("eip155Transition"); - } + /// Return Some if the current parameters contain a bugfix hard fork not on block 0. + pub fn nonzero_bugfix_hard_fork(&self) -> Option<&str> { + if self.eip155_transition != 0 { + return Some("eip155Transition"); + } - if self.validate_receipts_transition != 0 { - return Some("validateReceiptsTransition"); - } + if self.validate_receipts_transition != 0 { + return Some("validateReceiptsTransition"); + } - if self.validate_chain_id_transition != 0 { - return Some("validateChainIdTransition"); - } + if self.validate_chain_id_transition != 0 { + return Some("validateChainIdTransition"); + } - None - } + None + } } impl From for CommonParams { - fn from(p: ethjson::spec::Params) -> Self { - CommonParams { - account_start_nonce: p.account_start_nonce.map_or_else(U256::zero, Into::into), - maximum_extra_data_size: p.maximum_extra_data_size.into(), - network_id: p.network_id.into(), - chain_id: if let Some(n) = p.chain_id { - n.into() - } else { - p.network_id.into() - }, - subprotocol_name: p.subprotocol_name.unwrap_or_else(|| "eth".to_owned()), - min_gas_limit: p.min_gas_limit.into(), - fork_block: if let (Some(n), Some(h)) = (p.fork_block, p.fork_hash) { - Some((n.into(), h.into())) - } else { - None - }, - eip150_transition: p.eip150_transition.map_or(0, Into::into), - eip160_transition: p.eip160_transition.map_or(0, Into::into), - eip161abc_transition: p.eip161abc_transition.map_or(0, Into::into), - eip161d_transition: p.eip161d_transition.map_or(0, Into::into), - eip98_transition: p.eip98_transition.map_or_else( - BlockNumber::max_value, - Into::into, - ), - eip155_transition: p.eip155_transition.map_or(0, Into::into), - validate_receipts_transition: p.validate_receipts_transition.map_or(0, Into::into), - validate_chain_id_transition: p.validate_chain_id_transition.map_or(0, Into::into), - eip140_transition: p.eip140_transition.map_or_else( - BlockNumber::max_value, - Into::into, - ), - eip210_transition: p.eip210_transition.map_or_else( - BlockNumber::max_value, - Into::into, - ), - eip210_contract_address: p.eip210_contract_address.map_or(0xf0.into(), Into::into), - eip210_contract_code: p.eip210_contract_code.map_or_else( - || { - DEFAULT_BLOCKHASH_CONTRACT.from_hex().expect( - "Default BLOCKHASH contract is valid", - ) - }, - Into::into, - ), - eip210_contract_gas: p.eip210_contract_gas.map_or(1000000.into(), Into::into), - eip211_transition: p.eip211_transition.map_or_else( - BlockNumber::max_value, - Into::into, - ), - eip145_transition: p.eip145_transition.map_or_else( - BlockNumber::max_value, - Into::into, - ), - eip214_transition: p.eip214_transition.map_or_else( - BlockNumber::max_value, - Into::into, - ), - eip658_transition: p.eip658_transition.map_or_else( - BlockNumber::max_value, - Into::into, - ), - eip1052_transition: p.eip1052_transition.map_or_else( - BlockNumber::max_value, - Into::into, - ), - eip1283_transition: p.eip1283_transition.map_or_else( - BlockNumber::max_value, - Into::into, - ), - eip1283_disable_transition: p.eip1283_disable_transition.map_or_else( - BlockNumber::max_value, - Into::into, - ), - eip1283_reenable_transition: p.eip1283_reenable_transition.map_or_else( - BlockNumber::max_value, - Into::into, - ), - eip1706_transition: p.eip1706_transition.map_or_else( - BlockNumber::max_value, - Into::into, - ), - eip1014_transition: p.eip1014_transition.map_or_else( - BlockNumber::max_value, - Into::into, - ), - eip1344_transition: p.eip1344_transition.map_or_else( - BlockNumber::max_value, - Into::into, - ), - eip1884_transition: p.eip1884_transition.map_or_else( - BlockNumber::max_value, - Into::into, - ), - eip2028_transition: p.eip2028_transition.map_or_else( - BlockNumber::max_value, - Into::into, - ), - dust_protection_transition: p.dust_protection_transition.map_or_else( - BlockNumber::max_value, - Into::into, - ), - nonce_cap_increment: p.nonce_cap_increment.map_or(64, Into::into), - remove_dust_contracts: p.remove_dust_contracts.unwrap_or(false), - gas_limit_bound_divisor: p.gas_limit_bound_divisor.into(), - registrar: p.registrar.map_or_else(Address::new, Into::into), - node_permission_contract: p.node_permission_contract.map(Into::into), - max_code_size: p.max_code_size.map_or(u64::max_value(), Into::into), - max_transaction_size: p.max_transaction_size.map_or(MAX_TRANSACTION_SIZE, Into::into), - max_code_size_transition: p.max_code_size_transition.map_or(0, Into::into), - transaction_permission_contract: p.transaction_permission_contract.map(Into::into), - transaction_permission_contract_transition: - p.transaction_permission_contract_transition.map_or(0, Into::into), - wasm_activation_transition: p.wasm_activation_transition.map_or_else( - BlockNumber::max_value, - Into::into - ), - kip4_transition: p.kip4_transition.map_or_else( - BlockNumber::max_value, - Into::into - ), - kip6_transition: p.kip6_transition.map_or_else( - BlockNumber::max_value, - Into::into - ), - } - } + fn from(p: ethjson::spec::Params) -> Self { + CommonParams { + account_start_nonce: p.account_start_nonce.map_or_else(U256::zero, Into::into), + maximum_extra_data_size: p.maximum_extra_data_size.into(), + network_id: p.network_id.into(), + chain_id: if let Some(n) = p.chain_id { + n.into() + } else { + p.network_id.into() + }, + subprotocol_name: p.subprotocol_name.unwrap_or_else(|| "eth".to_owned()), + min_gas_limit: p.min_gas_limit.into(), + fork_block: if let (Some(n), Some(h)) = (p.fork_block, p.fork_hash) { + Some((n.into(), h.into())) + } else { + None + }, + eip150_transition: p.eip150_transition.map_or(0, Into::into), + eip160_transition: p.eip160_transition.map_or(0, Into::into), + eip161abc_transition: p.eip161abc_transition.map_or(0, Into::into), + eip161d_transition: p.eip161d_transition.map_or(0, Into::into), + eip98_transition: p + .eip98_transition + .map_or_else(BlockNumber::max_value, Into::into), + eip155_transition: p.eip155_transition.map_or(0, Into::into), + validate_receipts_transition: p.validate_receipts_transition.map_or(0, Into::into), + validate_chain_id_transition: p.validate_chain_id_transition.map_or(0, Into::into), + eip140_transition: p + .eip140_transition + .map_or_else(BlockNumber::max_value, Into::into), + eip210_transition: p + .eip210_transition + .map_or_else(BlockNumber::max_value, Into::into), + eip210_contract_address: p.eip210_contract_address.map_or(0xf0.into(), Into::into), + eip210_contract_code: p.eip210_contract_code.map_or_else( + || { + DEFAULT_BLOCKHASH_CONTRACT + .from_hex() + .expect("Default BLOCKHASH contract is valid") + }, + Into::into, + ), + eip210_contract_gas: p.eip210_contract_gas.map_or(1000000.into(), Into::into), + eip211_transition: p + .eip211_transition + .map_or_else(BlockNumber::max_value, Into::into), + eip145_transition: p + .eip145_transition + .map_or_else(BlockNumber::max_value, Into::into), + eip214_transition: p + .eip214_transition + .map_or_else(BlockNumber::max_value, Into::into), + eip658_transition: p + .eip658_transition + .map_or_else(BlockNumber::max_value, Into::into), + eip1052_transition: p + .eip1052_transition + .map_or_else(BlockNumber::max_value, Into::into), + eip1283_transition: p + .eip1283_transition + .map_or_else(BlockNumber::max_value, Into::into), + eip1283_disable_transition: p + .eip1283_disable_transition + .map_or_else(BlockNumber::max_value, Into::into), + eip1283_reenable_transition: p + .eip1283_reenable_transition + .map_or_else(BlockNumber::max_value, Into::into), + eip1706_transition: p + .eip1706_transition + .map_or_else(BlockNumber::max_value, Into::into), + eip1014_transition: p + .eip1014_transition + .map_or_else(BlockNumber::max_value, Into::into), + eip1344_transition: p + .eip1344_transition + .map_or_else(BlockNumber::max_value, Into::into), + eip1884_transition: p + .eip1884_transition + .map_or_else(BlockNumber::max_value, Into::into), + eip2028_transition: p + .eip2028_transition + .map_or_else(BlockNumber::max_value, Into::into), + dust_protection_transition: p + .dust_protection_transition + .map_or_else(BlockNumber::max_value, Into::into), + nonce_cap_increment: p.nonce_cap_increment.map_or(64, Into::into), + remove_dust_contracts: p.remove_dust_contracts.unwrap_or(false), + gas_limit_bound_divisor: p.gas_limit_bound_divisor.into(), + registrar: p.registrar.map_or_else(Address::new, Into::into), + node_permission_contract: p.node_permission_contract.map(Into::into), + max_code_size: p.max_code_size.map_or(u64::max_value(), Into::into), + max_transaction_size: p + .max_transaction_size + .map_or(MAX_TRANSACTION_SIZE, Into::into), + max_code_size_transition: p.max_code_size_transition.map_or(0, Into::into), + transaction_permission_contract: p.transaction_permission_contract.map(Into::into), + transaction_permission_contract_transition: p + .transaction_permission_contract_transition + .map_or(0, Into::into), + wasm_activation_transition: p + .wasm_activation_transition + .map_or_else(BlockNumber::max_value, Into::into), + kip4_transition: p + .kip4_transition + .map_or_else(BlockNumber::max_value, Into::into), + kip6_transition: p + .kip6_transition + .map_or_else(BlockNumber::max_value, Into::into), + } + } } /// Runtime parameters for the spec that are related to how the software should run the chain, /// rather than integral properties of the chain itself. #[derive(Debug, Clone, Copy)] pub struct SpecParams<'a> { - /// The path to the folder used to cache nodes. This is typically /tmp/ on Unix-like systems - pub cache_dir: &'a Path, - /// Whether to run slower at the expense of better memory usage, or run faster while using - /// more - /// memory. This may get more fine-grained in the future but for now is simply a binary - /// option. - pub optimization_setting: Option, + /// The path to the folder used to cache nodes. This is typically /tmp/ on Unix-like systems + pub cache_dir: &'a Path, + /// Whether to run slower at the expense of better memory usage, or run faster while using + /// more + /// memory. This may get more fine-grained in the future but for now is simply a binary + /// option. + pub optimization_setting: Option, } impl<'a> SpecParams<'a> { - /// Create from a cache path, with null values for the other fields - pub fn from_path(path: &'a Path) -> Self { - SpecParams { - cache_dir: path, - optimization_setting: None, - } - } + /// Create from a cache path, with null values for the other fields + pub fn from_path(path: &'a Path) -> Self { + SpecParams { + cache_dir: path, + optimization_setting: None, + } + } - /// Create from a cache path and an optimization setting - pub fn new(path: &'a Path, optimization: OptimizeFor) -> Self { - SpecParams { - cache_dir: path, - optimization_setting: Some(optimization), - } - } + /// Create from a cache path and an optimization setting + pub fn new(path: &'a Path, optimization: OptimizeFor) -> Self { + SpecParams { + cache_dir: path, + optimization_setting: Some(optimization), + } + } } impl<'a, T: AsRef> From<&'a T> for SpecParams<'a> { - fn from(path: &'a T) -> Self { - Self::from_path(path.as_ref()) - } + fn from(path: &'a T) -> Self { + Self::from_path(path.as_ref()) + } } /// Parameters for a block chain; includes both those intrinsic to the design of the /// chain and those to be interpreted by the active chain engine. pub struct Spec { - /// User friendly spec name - pub name: String, - /// What engine are we using for this? - pub engine: Arc, - /// Name of the subdir inside the main data dir to use for chain data and settings. - pub data_dir: String, + /// User friendly spec name + pub name: String, + /// What engine are we using for this? + pub engine: Arc, + /// Name of the subdir inside the main data dir to use for chain data and settings. + pub data_dir: String, - /// Known nodes on the network in enode format. - pub nodes: Vec, + /// Known nodes on the network in enode format. + pub nodes: Vec, - /// The genesis block's parent hash field. - pub parent_hash: H256, - /// The genesis block's author field. - pub author: Address, - /// The genesis block's difficulty field. - pub difficulty: U256, - /// The genesis block's gas limit field. - pub gas_limit: U256, - /// The genesis block's gas used field. - pub gas_used: U256, - /// The genesis block's timestamp field. - pub timestamp: u64, - /// Transactions root of the genesis block. Should be KECCAK_NULL_RLP. - pub transactions_root: H256, - /// Receipts root of the genesis block. Should be KECCAK_NULL_RLP. - pub receipts_root: H256, - /// The genesis block's extra data field. - pub extra_data: Bytes, - /// Each seal field, expressed as RLP, concatenated. - pub seal_rlp: Bytes, + /// The genesis block's parent hash field. + pub parent_hash: H256, + /// The genesis block's author field. + pub author: Address, + /// The genesis block's difficulty field. + pub difficulty: U256, + /// The genesis block's gas limit field. + pub gas_limit: U256, + /// The genesis block's gas used field. + pub gas_used: U256, + /// The genesis block's timestamp field. + pub timestamp: u64, + /// Transactions root of the genesis block. Should be KECCAK_NULL_RLP. + pub transactions_root: H256, + /// Receipts root of the genesis block. Should be KECCAK_NULL_RLP. + pub receipts_root: H256, + /// The genesis block's extra data field. + pub extra_data: Bytes, + /// Each seal field, expressed as RLP, concatenated. + pub seal_rlp: Bytes, - /// Hardcoded synchronization. Allows the light client to immediately jump to a specific block. - pub hardcoded_sync: Option, + /// Hardcoded synchronization. Allows the light client to immediately jump to a specific block. + pub hardcoded_sync: Option, - /// Contract constructors to be executed on genesis. - constructors: Vec<(Address, Bytes)>, + /// Contract constructors to be executed on genesis. + constructors: Vec<(Address, Bytes)>, - /// May be prepopulated if we know this in advance. - state_root_memo: RwLock, + /// May be prepopulated if we know this in advance. + state_root_memo: RwLock, - /// Genesis state as plain old data. - genesis_state: PodState, + /// Genesis state as plain old data. + genesis_state: PodState, } #[cfg(test)] impl Clone for Spec { - fn clone(&self) -> Spec { - Spec { - name: self.name.clone(), - engine: self.engine.clone(), - data_dir: self.data_dir.clone(), - nodes: self.nodes.clone(), - parent_hash: self.parent_hash.clone(), - transactions_root: self.transactions_root.clone(), - receipts_root: self.receipts_root.clone(), - author: self.author.clone(), - difficulty: self.difficulty.clone(), - gas_limit: self.gas_limit.clone(), - gas_used: self.gas_used.clone(), - timestamp: self.timestamp.clone(), - extra_data: self.extra_data.clone(), - seal_rlp: self.seal_rlp.clone(), - hardcoded_sync: self.hardcoded_sync.clone(), - constructors: self.constructors.clone(), - state_root_memo: RwLock::new(*self.state_root_memo.read()), - genesis_state: self.genesis_state.clone(), - } - } + fn clone(&self) -> Spec { + Spec { + name: self.name.clone(), + engine: self.engine.clone(), + data_dir: self.data_dir.clone(), + nodes: self.nodes.clone(), + parent_hash: self.parent_hash.clone(), + transactions_root: self.transactions_root.clone(), + receipts_root: self.receipts_root.clone(), + author: self.author.clone(), + difficulty: self.difficulty.clone(), + gas_limit: self.gas_limit.clone(), + gas_used: self.gas_used.clone(), + timestamp: self.timestamp.clone(), + extra_data: self.extra_data.clone(), + seal_rlp: self.seal_rlp.clone(), + hardcoded_sync: self.hardcoded_sync.clone(), + constructors: self.constructors.clone(), + state_root_memo: RwLock::new(*self.state_root_memo.read()), + genesis_state: self.genesis_state.clone(), + } + } } /// Part of `Spec`. Describes the hardcoded synchronization parameters. pub struct SpecHardcodedSync { - /// Header of the block to jump to for hardcoded sync, and total difficulty. - pub header: encoded::Header, - /// Total difficulty of the block to jump to. - pub total_difficulty: U256, - /// List of hardcoded CHTs, in order. If `hardcoded_sync` is set, the CHTs should include the - /// header of `hardcoded_sync`. - pub chts: Vec, + /// Header of the block to jump to for hardcoded sync, and total difficulty. + pub header: encoded::Header, + /// Total difficulty of the block to jump to. + pub total_difficulty: U256, + /// List of hardcoded CHTs, in order. If `hardcoded_sync` is set, the CHTs should include the + /// header of `hardcoded_sync`. + pub chts: Vec, } impl SpecHardcodedSync { - /// Turns this specifications back into JSON. Useful for pretty printing. - pub fn to_json(self) -> ethjson::spec::HardcodedSync { - self.into() - } + /// Turns this specifications back into JSON. Useful for pretty printing. + pub fn to_json(self) -> ethjson::spec::HardcodedSync { + self.into() + } } #[cfg(test)] impl Clone for SpecHardcodedSync { - fn clone(&self) -> SpecHardcodedSync { - SpecHardcodedSync { - header: self.header.clone(), - total_difficulty: self.total_difficulty.clone(), - chts: self.chts.clone(), - } - } + fn clone(&self) -> SpecHardcodedSync { + SpecHardcodedSync { + header: self.header.clone(), + total_difficulty: self.total_difficulty.clone(), + chts: self.chts.clone(), + } + } } impl From for ethjson::spec::HardcodedSync { - fn from(sync: SpecHardcodedSync) -> ethjson::spec::HardcodedSync { - ethjson::spec::HardcodedSync { - header: sync.header.into_inner().to_hex(), - total_difficulty: ethjson::uint::Uint(sync.total_difficulty), - chts: sync.chts.into_iter().map(Into::into).collect(), - } - } + fn from(sync: SpecHardcodedSync) -> ethjson::spec::HardcodedSync { + ethjson::spec::HardcodedSync { + header: sync.header.into_inner().to_hex(), + total_difficulty: ethjson::uint::Uint(sync.total_difficulty), + chts: sync.chts.into_iter().map(Into::into).collect(), + } + } } fn load_machine_from(s: ethjson::spec::Spec) -> EthereumMachine { - let builtins = s.accounts.builtins().into_iter().map(|p| (p.0.into(), Builtin::try_from(p.1).expect("chain spec is invalid"))).collect(); - let params = CommonParams::from(s.params); + let builtins = s + .accounts + .builtins() + .into_iter() + .map(|p| { + ( + p.0.into(), + Builtin::try_from(p.1).expect("chain spec is invalid"), + ) + }) + .collect(); + let params = CommonParams::from(s.params); - Spec::machine(&s.engine, params, builtins) + Spec::machine(&s.engine, params, builtins) } fn convert_json_to_spec( - (address, builtin): (ethjson::hash::Address, ethjson::spec::builtin::Builtin), + (address, builtin): (ethjson::hash::Address, ethjson::spec::builtin::Builtin), ) -> Result<(Address, Builtin), Error> { - let builtin = Builtin::try_from(builtin)?; - Ok((address.into(), builtin)) + let builtin = Builtin::try_from(builtin)?; + Ok((address.into(), builtin)) } /// Load from JSON object. fn load_from(spec_params: SpecParams, s: ethjson::spec::Spec) -> Result { - let builtins: Result, _> = s - .accounts - .builtins() - .into_iter() - .map(convert_json_to_spec) - .collect(); - let builtins = builtins?; - let g = Genesis::from(s.genesis); - let GenericSeal(seal_rlp) = g.seal.into(); - let params = CommonParams::from(s.params); + let builtins: Result, _> = s + .accounts + .builtins() + .into_iter() + .map(convert_json_to_spec) + .collect(); + let builtins = builtins?; + let g = Genesis::from(s.genesis); + let GenericSeal(seal_rlp) = g.seal.into(); + let params = CommonParams::from(s.params); - let hardcoded_sync = if let Some(ref hs) = s.hardcoded_sync { - if let Ok(header) = hs.header.from_hex() { - Some(SpecHardcodedSync { - header: encoded::Header::new(header), - total_difficulty: hs.total_difficulty.into(), - chts: s.hardcoded_sync - .as_ref() - .map(|s| s.chts.iter().map(|c| c.clone().into()).collect()) - .unwrap_or_default() - }) - } else { - None - } - } else { - None - }; + let hardcoded_sync = if let Some(ref hs) = s.hardcoded_sync { + if let Ok(header) = hs.header.from_hex() { + Some(SpecHardcodedSync { + header: encoded::Header::new(header), + total_difficulty: hs.total_difficulty.into(), + chts: s + .hardcoded_sync + .as_ref() + .map(|s| s.chts.iter().map(|c| c.clone().into()).collect()) + .unwrap_or_default(), + }) + } else { + None + } + } else { + None + }; - let mut s = Spec { - name: s.name.clone().into(), - engine: Spec::engine(spec_params, s.engine, params, builtins), - data_dir: s.data_dir.unwrap_or(s.name).into(), - nodes: s.nodes.unwrap_or_else(Vec::new), - parent_hash: g.parent_hash, - transactions_root: g.transactions_root, - receipts_root: g.receipts_root, - author: g.author, - difficulty: g.difficulty, - gas_limit: g.gas_limit, - gas_used: g.gas_used, - timestamp: g.timestamp, - extra_data: g.extra_data, - seal_rlp: seal_rlp, - hardcoded_sync: hardcoded_sync, - constructors: s.accounts - .constructors() - .into_iter() - .map(|(a, c)| (a.into(), c.into())) - .collect(), - state_root_memo: RwLock::new(Default::default()), // will be overwritten right after. - genesis_state: s.accounts.into(), - }; + let mut s = Spec { + name: s.name.clone().into(), + engine: Spec::engine(spec_params, s.engine, params, builtins), + data_dir: s.data_dir.unwrap_or(s.name).into(), + nodes: s.nodes.unwrap_or_else(Vec::new), + parent_hash: g.parent_hash, + transactions_root: g.transactions_root, + receipts_root: g.receipts_root, + author: g.author, + difficulty: g.difficulty, + gas_limit: g.gas_limit, + gas_used: g.gas_used, + timestamp: g.timestamp, + extra_data: g.extra_data, + seal_rlp: seal_rlp, + hardcoded_sync: hardcoded_sync, + constructors: s + .accounts + .constructors() + .into_iter() + .map(|(a, c)| (a.into(), c.into())) + .collect(), + state_root_memo: RwLock::new(Default::default()), // will be overwritten right after. + genesis_state: s.accounts.into(), + }; - // use memoized state root if provided. - match g.state_root { - Some(root) => *s.state_root_memo.get_mut() = root, - None => { - let _ = s.run_constructors( - &Default::default(), - BasicBackend(journaldb::new_memory_db()), - )?; - } - } + // use memoized state root if provided. + match g.state_root { + Some(root) => *s.state_root_memo.get_mut() = root, + None => { + let _ = s.run_constructors( + &Default::default(), + BasicBackend(journaldb::new_memory_db()), + )?; + } + } - Ok(s) + Ok(s) } macro_rules! load_bundled { - ($e:expr) => { - Spec::load( - &::std::env::temp_dir(), - include_bytes!(concat!("../../res/", $e, ".json")) as &[u8] - ).expect(concat!("Chain spec ", $e, " is invalid.")) - }; + ($e:expr) => { + Spec::load( + &::std::env::temp_dir(), + include_bytes!(concat!("../../res/", $e, ".json")) as &[u8], + ) + .expect(concat!("Chain spec ", $e, " is invalid.")) + }; } #[cfg(any(test, feature = "test-helpers"))] macro_rules! load_machine_bundled { - ($e:expr) => { - Spec::load_machine( - include_bytes!(concat!("../../res/", $e, ".json")) as &[u8] - ).expect(concat!("Chain spec ", $e, " is invalid.")) - }; + ($e:expr) => { + Spec::load_machine(include_bytes!(concat!("../../res/", $e, ".json")) as &[u8]) + .expect(concat!("Chain spec ", $e, " is invalid.")) + }; } impl Spec { - // create an instance of an Ethereum state machine, minus consensus logic. - fn machine( - engine_spec: ðjson::spec::Engine, - params: CommonParams, - builtins: BTreeMap, - ) -> EthereumMachine { - if let ethjson::spec::Engine::Ethash(ref ethash) = *engine_spec { - EthereumMachine::with_ethash_extensions(params, builtins, ethash.params.clone().into()) - } else { - EthereumMachine::regular(params, builtins) - } - } + // create an instance of an Ethereum state machine, minus consensus logic. + fn machine( + engine_spec: ðjson::spec::Engine, + params: CommonParams, + builtins: BTreeMap, + ) -> EthereumMachine { + if let ethjson::spec::Engine::Ethash(ref ethash) = *engine_spec { + EthereumMachine::with_ethash_extensions(params, builtins, ethash.params.clone().into()) + } else { + EthereumMachine::regular(params, builtins) + } + } - /// Convert engine spec into a arc'd Engine of the right underlying type. - /// TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead. - fn engine( - spec_params: SpecParams, - engine_spec: ethjson::spec::Engine, - params: CommonParams, - builtins: BTreeMap, - ) -> Arc { - let machine = Self::machine(&engine_spec, params, builtins); + /// Convert engine spec into a arc'd Engine of the right underlying type. + /// TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead. + fn engine( + spec_params: SpecParams, + engine_spec: ethjson::spec::Engine, + params: CommonParams, + builtins: BTreeMap, + ) -> Arc { + let machine = Self::machine(&engine_spec, params, builtins); - match engine_spec { - ethjson::spec::Engine::Null(null) => Arc::new(NullEngine::new(null.params.into(), machine)), - ethjson::spec::Engine::Ethash(ethash) => Arc::new(::ethereum::Ethash::new(spec_params.cache_dir, ethash.params.into(), machine, spec_params.optimization_setting)), - ethjson::spec::Engine::InstantSeal(Some(instant_seal)) => Arc::new(InstantSeal::new(instant_seal.params.into(), machine)), - ethjson::spec::Engine::InstantSeal(None) => Arc::new(InstantSeal::new(InstantSealParams::default(), machine)), - ethjson::spec::Engine::BasicAuthority(basic_authority) => Arc::new(BasicAuthority::new(basic_authority.params.into(), machine)), - ethjson::spec::Engine::Clique(clique) => Clique::new(clique.params.into(), machine) - .expect("Failed to start Clique consensus engine."), - ethjson::spec::Engine::AuthorityRound(authority_round) => AuthorityRound::new(authority_round.params.into(), machine) - .expect("Failed to start AuthorityRound consensus engine."), - } - } + match engine_spec { + ethjson::spec::Engine::Null(null) => { + Arc::new(NullEngine::new(null.params.into(), machine)) + } + ethjson::spec::Engine::Ethash(ethash) => Arc::new(::ethereum::Ethash::new( + spec_params.cache_dir, + ethash.params.into(), + machine, + spec_params.optimization_setting, + )), + ethjson::spec::Engine::InstantSeal(Some(instant_seal)) => { + Arc::new(InstantSeal::new(instant_seal.params.into(), machine)) + } + ethjson::spec::Engine::InstantSeal(None) => { + Arc::new(InstantSeal::new(InstantSealParams::default(), machine)) + } + ethjson::spec::Engine::BasicAuthority(basic_authority) => { + Arc::new(BasicAuthority::new(basic_authority.params.into(), machine)) + } + ethjson::spec::Engine::Clique(clique) => Clique::new(clique.params.into(), machine) + .expect("Failed to start Clique consensus engine."), + ethjson::spec::Engine::AuthorityRound(authority_round) => { + AuthorityRound::new(authority_round.params.into(), machine) + .expect("Failed to start AuthorityRound consensus engine.") + } + } + } - // given a pre-constructor state, run all the given constructors and produce a new state and - // state root. - fn run_constructors(&self, factories: &Factories, mut db: T) -> Result { - let mut root = KECCAK_NULL_RLP; + // given a pre-constructor state, run all the given constructors and produce a new state and + // state root. + fn run_constructors(&self, factories: &Factories, mut db: T) -> Result { + let mut root = KECCAK_NULL_RLP; - // basic accounts in spec. - { - let mut t = factories.trie.create(db.as_hash_db_mut(), &mut root); + // basic accounts in spec. + { + let mut t = factories.trie.create(db.as_hash_db_mut(), &mut root); - for (address, account) in self.genesis_state.get().iter() { - t.insert(&**address, &account.rlp())?; - } - } + for (address, account) in self.genesis_state.get().iter() { + t.insert(&**address, &account.rlp())?; + } + } - for (address, account) in self.genesis_state.get().iter() { - db.note_non_null_account(address); - account.insert_additional( - &mut *factories.accountdb.create( - db.as_hash_db_mut(), - keccak(address), - ), - &factories.trie, - ); - } + for (address, account) in self.genesis_state.get().iter() { + db.note_non_null_account(address); + account.insert_additional( + &mut *factories + .accountdb + .create(db.as_hash_db_mut(), keccak(address)), + &factories.trie, + ); + } - let start_nonce = self.engine.account_start_nonce(0); + let start_nonce = self.engine.account_start_nonce(0); - let (root, db) = { - let mut state = State::from_existing(db, root, start_nonce, factories.clone())?; + let (root, db) = { + let mut state = State::from_existing(db, root, start_nonce, factories.clone())?; - // Execute contract constructors. - let env_info = EnvInfo { - number: 0, - author: self.author, - timestamp: self.timestamp, - difficulty: self.difficulty, - last_hashes: Default::default(), - gas_used: U256::zero(), - gas_limit: U256::max_value(), - }; + // Execute contract constructors. + let env_info = EnvInfo { + number: 0, + author: self.author, + timestamp: self.timestamp, + difficulty: self.difficulty, + last_hashes: Default::default(), + gas_used: U256::zero(), + gas_limit: U256::max_value(), + }; - let from = Address::default(); - for &(ref address, ref constructor) in self.constructors.iter() { - trace!(target: "spec", "run_constructors: Creating a contract at {}.", address); - trace!(target: "spec", " .. root before = {}", state.root()); - let params = ActionParams { - code_address: address.clone(), - code_hash: Some(keccak(constructor)), - address: address.clone(), - sender: from.clone(), - origin: from.clone(), - gas: U256::max_value(), - gas_price: Default::default(), - value: ActionValue::Transfer(Default::default()), - code: Some(Arc::new(constructor.clone())), - data: None, - call_type: CallType::None, - params_type: ParamsType::Embedded, - }; + let from = Address::default(); + for &(ref address, ref constructor) in self.constructors.iter() { + trace!(target: "spec", "run_constructors: Creating a contract at {}.", address); + trace!(target: "spec", " .. root before = {}", state.root()); + let params = ActionParams { + code_address: address.clone(), + code_hash: Some(keccak(constructor)), + address: address.clone(), + sender: from.clone(), + origin: from.clone(), + gas: U256::max_value(), + gas_price: Default::default(), + value: ActionValue::Transfer(Default::default()), + code: Some(Arc::new(constructor.clone())), + data: None, + call_type: CallType::None, + params_type: ParamsType::Embedded, + }; - let mut substate = Substate::new(); + let mut substate = Substate::new(); - { - let machine = self.engine.machine(); - let schedule = machine.schedule(env_info.number); - let mut exec = Executive::new(&mut state, &env_info, &machine, &schedule); - if let Err(e) = exec.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) { - warn!(target: "spec", "Genesis constructor execution at {} failed: {}.", address, e); - } - } + { + let machine = self.engine.machine(); + let schedule = machine.schedule(env_info.number); + let mut exec = Executive::new(&mut state, &env_info, &machine, &schedule); + if let Err(e) = + exec.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) + { + warn!(target: "spec", "Genesis constructor execution at {} failed: {}.", address, e); + } + } - if let Err(e) = state.commit() { - warn!(target: "spec", "Genesis constructor trie commit at {} failed: {}.", address, e); - } + if let Err(e) = state.commit() { + warn!(target: "spec", "Genesis constructor trie commit at {} failed: {}.", address, e); + } - trace!(target: "spec", " .. root after = {}", state.root()); - } + trace!(target: "spec", " .. root after = {}", state.root()); + } - state.drop() - }; + state.drop() + }; - *self.state_root_memo.write() = root; - Ok(db) - } + *self.state_root_memo.write() = root; + Ok(db) + } - /// Return the state root for the genesis state, memoising accordingly. - pub fn state_root(&self) -> H256 { - self.state_root_memo.read().clone() - } + /// Return the state root for the genesis state, memoising accordingly. + pub fn state_root(&self) -> H256 { + self.state_root_memo.read().clone() + } - /// Get common blockchain parameters. - pub fn params(&self) -> &CommonParams { - &self.engine.params() - } + /// Get common blockchain parameters. + pub fn params(&self) -> &CommonParams { + &self.engine.params() + } - /// Get the known knodes of the network in enode format. - pub fn nodes(&self) -> &[String] { - &self.nodes - } + /// Get the known knodes of the network in enode format. + pub fn nodes(&self) -> &[String] { + &self.nodes + } - /// Get the configured Network ID. - pub fn network_id(&self) -> u64 { - self.params().network_id - } + /// Get the configured Network ID. + pub fn network_id(&self) -> u64 { + self.params().network_id + } - /// Get the chain ID used for signing. - pub fn chain_id(&self) -> u64 { - self.params().chain_id - } + /// Get the chain ID used for signing. + pub fn chain_id(&self) -> u64 { + self.params().chain_id + } - /// Get the configured subprotocol name. - pub fn subprotocol_name(&self) -> String { - self.params().subprotocol_name.clone() - } + /// Get the configured subprotocol name. + pub fn subprotocol_name(&self) -> String { + self.params().subprotocol_name.clone() + } - /// Get the configured network fork block. - pub fn fork_block(&self) -> Option<(BlockNumber, H256)> { - self.params().fork_block - } + /// Get the configured network fork block. + pub fn fork_block(&self) -> Option<(BlockNumber, H256)> { + self.params().fork_block + } - /// Get the header of the genesis block. - pub fn genesis_header(&self) -> Header { - let mut header: Header = Default::default(); - header.set_parent_hash(self.parent_hash.clone()); - header.set_timestamp(self.timestamp); - header.set_number(0); - header.set_author(self.author.clone()); - header.set_transactions_root(self.transactions_root.clone()); - header.set_uncles_hash(keccak(RlpStream::new_list(0).out())); - header.set_extra_data(self.extra_data.clone()); - header.set_state_root(self.state_root()); - header.set_receipts_root(self.receipts_root.clone()); - header.set_log_bloom(Bloom::default()); - header.set_gas_used(self.gas_used.clone()); - header.set_gas_limit(self.gas_limit.clone()); - header.set_difficulty(self.difficulty.clone()); - header.set_seal({ - let r = Rlp::new(&self.seal_rlp); - r.iter().map(|f| f.as_raw().to_vec()).collect() - }); - trace!(target: "spec", "Header hash is {}", header.hash()); - header - } + /// Get the header of the genesis block. + pub fn genesis_header(&self) -> Header { + let mut header: Header = Default::default(); + header.set_parent_hash(self.parent_hash.clone()); + header.set_timestamp(self.timestamp); + header.set_number(0); + header.set_author(self.author.clone()); + header.set_transactions_root(self.transactions_root.clone()); + header.set_uncles_hash(keccak(RlpStream::new_list(0).out())); + header.set_extra_data(self.extra_data.clone()); + header.set_state_root(self.state_root()); + header.set_receipts_root(self.receipts_root.clone()); + header.set_log_bloom(Bloom::default()); + header.set_gas_used(self.gas_used.clone()); + header.set_gas_limit(self.gas_limit.clone()); + header.set_difficulty(self.difficulty.clone()); + header.set_seal({ + let r = Rlp::new(&self.seal_rlp); + r.iter().map(|f| f.as_raw().to_vec()).collect() + }); + trace!(target: "spec", "Header hash is {}", header.hash()); + header + } - /// Compose the genesis block for this chain. - pub fn genesis_block(&self) -> Bytes { - let empty_list = RlpStream::new_list(0).out(); - let header = self.genesis_header(); - let mut ret = RlpStream::new_list(3); - ret.append(&header); - ret.append_raw(&empty_list, 1); - ret.append_raw(&empty_list, 1); - ret.out() - } + /// Compose the genesis block for this chain. + pub fn genesis_block(&self) -> Bytes { + let empty_list = RlpStream::new_list(0).out(); + let header = self.genesis_header(); + let mut ret = RlpStream::new_list(3); + ret.append(&header); + ret.append_raw(&empty_list, 1); + ret.append_raw(&empty_list, 1); + ret.out() + } - /// Overwrite the genesis components. - pub fn overwrite_genesis_params(&mut self, g: Genesis) { - let GenericSeal(seal_rlp) = g.seal.into(); - self.parent_hash = g.parent_hash; - self.transactions_root = g.transactions_root; - self.receipts_root = g.receipts_root; - self.author = g.author; - self.difficulty = g.difficulty; - self.gas_limit = g.gas_limit; - self.gas_used = g.gas_used; - self.timestamp = g.timestamp; - self.extra_data = g.extra_data; - self.seal_rlp = seal_rlp; - } + /// Overwrite the genesis components. + pub fn overwrite_genesis_params(&mut self, g: Genesis) { + let GenericSeal(seal_rlp) = g.seal.into(); + self.parent_hash = g.parent_hash; + self.transactions_root = g.transactions_root; + self.receipts_root = g.receipts_root; + self.author = g.author; + self.difficulty = g.difficulty; + self.gas_limit = g.gas_limit; + self.gas_used = g.gas_used; + self.timestamp = g.timestamp; + self.extra_data = g.extra_data; + self.seal_rlp = seal_rlp; + } - /// Alter the value of the genesis state. - pub fn set_genesis_state(&mut self, s: PodState) -> Result<(), Error> { - self.genesis_state = s; - let _ = self.run_constructors( - &Default::default(), - BasicBackend(journaldb::new_memory_db()), - )?; + /// Alter the value of the genesis state. + pub fn set_genesis_state(&mut self, s: PodState) -> Result<(), Error> { + self.genesis_state = s; + let _ = self.run_constructors( + &Default::default(), + BasicBackend(journaldb::new_memory_db()), + )?; - Ok(()) - } + Ok(()) + } - /// Return genesis state as Plain old data. - pub fn genesis_state(&self) -> &PodState { - &self.genesis_state - } + /// Return genesis state as Plain old data. + pub fn genesis_state(&self) -> &PodState { + &self.genesis_state + } - /// Returns `false` if the memoized state root is invalid. `true` otherwise. - pub fn is_state_root_valid(&self) -> bool { - // TODO: get rid of this function and ensure state root always is valid. - // we're mostly there, but `self.genesis_state.root()` doesn't encompass - // post-constructor state. - *self.state_root_memo.read() == self.genesis_state.root() - } + /// Returns `false` if the memoized state root is invalid. `true` otherwise. + pub fn is_state_root_valid(&self) -> bool { + // TODO: get rid of this function and ensure state root always is valid. + // we're mostly there, but `self.genesis_state.root()` doesn't encompass + // post-constructor state. + *self.state_root_memo.read() == self.genesis_state.root() + } - /// Ensure that the given state DB has the trie nodes in for the genesis state. - pub fn ensure_db_good(&self, db: T, factories: &Factories) -> Result { - if db.as_hash_db().contains(&self.state_root()) { - return Ok(db); - } + /// Ensure that the given state DB has the trie nodes in for the genesis state. + pub fn ensure_db_good(&self, db: T, factories: &Factories) -> Result { + if db.as_hash_db().contains(&self.state_root()) { + return Ok(db); + } - // TODO: could optimize so we don't re-run, but `ensure_db_good` is barely ever - // called anyway. - let db = self.run_constructors(factories, db)?; - Ok(db) - } + // TODO: could optimize so we don't re-run, but `ensure_db_good` is barely ever + // called anyway. + let db = self.run_constructors(factories, db)?; + Ok(db) + } - /// Loads just the state machine from a json file. - pub fn load_machine(reader: R) -> Result { - ethjson::spec::Spec::load(reader) - .map_err(fmt_err) - .map(load_machine_from) - } + /// Loads just the state machine from a json file. + pub fn load_machine(reader: R) -> Result { + ethjson::spec::Spec::load(reader) + .map_err(fmt_err) + .map(load_machine_from) + } - /// Loads spec from json file. Provide factories for executing contracts and ensuring - /// storage goes to the right place. - pub fn load<'a, T: Into>, R>(params: T, reader: R) -> Result - where - R: Read, - { - ethjson::spec::Spec::load(reader).map_err(fmt_err).and_then( - |x| { - load_from(params.into(), x).map_err(fmt_err) - }, - ) - } + /// Loads spec from json file. Provide factories for executing contracts and ensuring + /// storage goes to the right place. + pub fn load<'a, T: Into>, R>(params: T, reader: R) -> Result + where + R: Read, + { + ethjson::spec::Spec::load(reader) + .map_err(fmt_err) + .and_then(|x| load_from(params.into(), x).map_err(fmt_err)) + } - /// initialize genesis epoch data, using in-memory database for - /// constructor. - pub fn genesis_epoch_data(&self) -> Result, String> { - use types::transaction::{Action, Transaction}; - use journaldb; - use kvdb_memorydb; + /// initialize genesis epoch data, using in-memory database for + /// constructor. + pub fn genesis_epoch_data(&self) -> Result, String> { + use journaldb; + use kvdb_memorydb; + use types::transaction::{Action, Transaction}; - let genesis = self.genesis_header(); + let genesis = self.genesis_header(); - let factories = Default::default(); - let mut db = journaldb::new( - Arc::new(kvdb_memorydb::create(0)), - journaldb::Algorithm::Archive, - None, - ); + let factories = Default::default(); + let mut db = journaldb::new( + Arc::new(kvdb_memorydb::create(0)), + journaldb::Algorithm::Archive, + None, + ); - self.ensure_db_good(BasicBackend(db.as_hash_db_mut()), &factories) - .map_err(|e| format!("Unable to initialize genesis state: {}", e))?; + self.ensure_db_good(BasicBackend(db.as_hash_db_mut()), &factories) + .map_err(|e| format!("Unable to initialize genesis state: {}", e))?; - let call = |a, d| { - let mut db = db.boxed_clone(); - let env_info = ::evm::EnvInfo { - number: 0, - author: *genesis.author(), - timestamp: genesis.timestamp(), - difficulty: *genesis.difficulty(), - gas_limit: U256::max_value(), - last_hashes: Arc::new(Vec::new()), - gas_used: 0.into(), - }; + let call = |a, d| { + let mut db = db.boxed_clone(); + let env_info = ::evm::EnvInfo { + number: 0, + author: *genesis.author(), + timestamp: genesis.timestamp(), + difficulty: *genesis.difficulty(), + gas_limit: U256::max_value(), + last_hashes: Arc::new(Vec::new()), + gas_used: 0.into(), + }; - let from = Address::default(); - let tx = Transaction { - nonce: self.engine.account_start_nonce(0), - action: Action::Call(a), - gas: U256::max_value(), - gas_price: U256::default(), - value: U256::default(), - data: d, - }.fake_sign(from); + let from = Address::default(); + let tx = Transaction { + nonce: self.engine.account_start_nonce(0), + action: Action::Call(a), + gas: U256::max_value(), + gas_price: U256::default(), + value: U256::default(), + data: d, + } + .fake_sign(from); - let res = ::state::prove_transaction_virtual( - db.as_hash_db_mut(), - *genesis.state_root(), - &tx, - self.engine.machine(), - &env_info, - factories.clone(), - ); + let res = ::state::prove_transaction_virtual( + db.as_hash_db_mut(), + *genesis.state_root(), + &tx, + self.engine.machine(), + &env_info, + factories.clone(), + ); - res.map(|(out, proof)| { - (out, proof.into_iter().map(|x| x.into_vec()).collect()) - }).ok_or_else(|| "Failed to prove call: insufficient state".into()) - }; + res.map(|(out, proof)| (out, proof.into_iter().map(|x| x.into_vec()).collect())) + .ok_or_else(|| "Failed to prove call: insufficient state".into()) + }; - self.engine.genesis_epoch_data(&genesis, &call) - } + self.engine.genesis_epoch_data(&genesis, &call) + } - /// Create a new Spec with InstantSeal consensus which does internal sealing (not requiring - /// work). - pub fn new_instant() -> Spec { - load_bundled!("instant_seal") - } + /// Create a new Spec with InstantSeal consensus which does internal sealing (not requiring + /// work). + pub fn new_instant() -> Spec { + load_bundled!("instant_seal") + } - /// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a - /// NullEngine consensus. - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test() -> Spec { - load_bundled!("null_morden") - } + /// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a + /// NullEngine consensus. + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_test() -> Spec { + load_bundled!("null_morden") + } - /// Create the EthereumMachine corresponding to Spec::new_test. - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test_machine() -> EthereumMachine { load_machine_bundled!("null_morden") } + /// Create the EthereumMachine corresponding to Spec::new_test. + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_test_machine() -> EthereumMachine { + load_machine_bundled!("null_morden") + } - /// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus with applying reward on block close. - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test_with_reward() -> Spec { load_bundled!("null_morden_with_reward") } + /// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus with applying reward on block close. + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_test_with_reward() -> Spec { + load_bundled!("null_morden_with_reward") + } - /// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus with finality. - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test_with_finality() -> Spec { load_bundled!("null_morden_with_finality") } + /// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus with finality. + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_test_with_finality() -> Spec { + load_bundled!("null_morden_with_finality") + } - /// Create a new Spec which is a NullEngine consensus with a premine of address whose - /// secret is keccak(''). - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_null() -> Spec { - load_bundled!("null") - } + /// Create a new Spec which is a NullEngine consensus with a premine of address whose + /// secret is keccak(''). + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_null() -> Spec { + load_bundled!("null") + } - /// Create a new Spec which constructs a contract at address 5 with storage at 0 equal to 1. - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test_constructor() -> Spec { - load_bundled!("constructor") - } + /// Create a new Spec which constructs a contract at address 5 with storage at 0 equal to 1. + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_test_constructor() -> Spec { + load_bundled!("constructor") + } - /// Create a new Spec with AuthorityRound consensus which does internal sealing (not - /// requiring work). - /// Accounts with secrets keccak("0") and keccak("1") are the validators. - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test_round() -> Self { - load_bundled!("authority_round") - } + /// Create a new Spec with AuthorityRound consensus which does internal sealing (not + /// requiring work). + /// Accounts with secrets keccak("0") and keccak("1") are the validators. + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_test_round() -> Self { + load_bundled!("authority_round") + } - /// Create a new Spec with AuthorityRound consensus which does internal sealing (not - /// requiring work) with empty step messages enabled. - /// Accounts with secrets keccak("0") and keccak("1") are the validators. - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test_round_empty_steps() -> Self { - load_bundled!("authority_round_empty_steps") - } + /// Create a new Spec with AuthorityRound consensus which does internal sealing (not + /// requiring work) with empty step messages enabled. + /// Accounts with secrets keccak("0") and keccak("1") are the validators. + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_test_round_empty_steps() -> Self { + load_bundled!("authority_round_empty_steps") + } - /// Create a new Spec with AuthorityRound consensus (with empty steps) using a block reward - /// contract. The contract source code can be found at: - /// https://github.com/parity-contracts/block-reward/blob/daf7d44383b6cdb11cb6b953b018648e2b027cfb/contracts/ExampleBlockReward.sol - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test_round_block_reward_contract() -> Self { - load_bundled!("authority_round_block_reward_contract") - } + /// Create a new Spec with AuthorityRound consensus (with empty steps) using a block reward + /// contract. The contract source code can be found at: + /// https://github.com/parity-contracts/block-reward/blob/daf7d44383b6cdb11cb6b953b018648e2b027cfb/contracts/ExampleBlockReward.sol + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_test_round_block_reward_contract() -> Self { + load_bundled!("authority_round_block_reward_contract") + } - /// TestList.sol used in both specs: https://github.com/paritytech/contracts/pull/30/files - /// Accounts with secrets keccak("0") and keccak("1") are initially the validators. - /// Create a new Spec with BasicAuthority which uses a contract at address 5 to determine - /// the current validators using `getValidators`. - /// Second validator can be removed with - /// "0xbfc708a000000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1" and added - /// back in using - /// "0x4d238c8e00000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1". - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_validator_safe_contract() -> Self { - load_bundled!("validator_safe_contract") - } + /// TestList.sol used in both specs: https://github.com/paritytech/contracts/pull/30/files + /// Accounts with secrets keccak("0") and keccak("1") are initially the validators. + /// Create a new Spec with BasicAuthority which uses a contract at address 5 to determine + /// the current validators using `getValidators`. + /// Second validator can be removed with + /// "0xbfc708a000000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1" and added + /// back in using + /// "0x4d238c8e00000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1". + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_validator_safe_contract() -> Self { + load_bundled!("validator_safe_contract") + } - /// The same as the `safeContract`, but allows reporting and uses AuthorityRound. - /// Account is marked with `reportBenign` it can be checked as disliked with "0xd8f2e0bf". - /// Validator can be removed with `reportMalicious`. - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_validator_contract() -> Self { - load_bundled!("validator_contract") - } + /// The same as the `safeContract`, but allows reporting and uses AuthorityRound. + /// Account is marked with `reportBenign` it can be checked as disliked with "0xd8f2e0bf". + /// Validator can be removed with `reportMalicious`. + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_validator_contract() -> Self { + load_bundled!("validator_contract") + } - /// Create a new Spec with BasicAuthority which uses multiple validator sets changing with - /// height. - /// Account with secrets keccak("0") is the validator for block 1 and with keccak("1") - /// onwards. - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_validator_multi() -> Self { - load_bundled!("validator_multi") - } + /// Create a new Spec with BasicAuthority which uses multiple validator sets changing with + /// height. + /// Account with secrets keccak("0") is the validator for block 1 and with keccak("1") + /// onwards. + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_validator_multi() -> Self { + load_bundled!("validator_multi") + } } #[cfg(test)] mod tests { - use super::*; - use state::State; - use test_helpers::get_temp_state_db; - use tempdir::TempDir; - use types::view; - use types::views::BlockView; + use super::*; + use state::State; + use tempdir::TempDir; + use test_helpers::get_temp_state_db; + use types::{view, views::BlockView}; - #[test] - fn test_load_empty() { - let tempdir = TempDir::new("").unwrap(); - assert!(Spec::load(&tempdir.path(), &[] as &[u8]).is_err()); - } + #[test] + fn test_load_empty() { + let tempdir = TempDir::new("").unwrap(); + assert!(Spec::load(&tempdir.path(), &[] as &[u8]).is_err()); + } - #[test] - fn test_chain() { - let test_spec = Spec::new_test(); + #[test] + fn test_chain() { + let test_spec = Spec::new_test(); - assert_eq!( - test_spec.state_root(), - "f3f4696bbf3b3b07775128eb7a3763279a394e382130f27c21e70233e04946a9".into() - ); - let genesis = test_spec.genesis_block(); - assert_eq!( - view!(BlockView, &genesis).header_view().hash(), - "0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303".into() - ); - } + assert_eq!( + test_spec.state_root(), + "f3f4696bbf3b3b07775128eb7a3763279a394e382130f27c21e70233e04946a9".into() + ); + let genesis = test_spec.genesis_block(); + assert_eq!( + view!(BlockView, &genesis).header_view().hash(), + "0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303".into() + ); + } - #[test] - fn genesis_constructor() { - let _ = ::env_logger::try_init(); - let spec = Spec::new_test_constructor(); - let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()) - .unwrap(); - let state = State::from_existing( - db.boxed_clone(), - spec.state_root(), - spec.engine.account_start_nonce(0), - Default::default(), - ).unwrap(); - let expected = "0000000000000000000000000000000000000000000000000000000000000001".into(); - let address = "0000000000000000000000000000000000001337".into(); + #[test] + fn genesis_constructor() { + let _ = ::env_logger::try_init(); + let spec = Spec::new_test_constructor(); + let db = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let state = State::from_existing( + db.boxed_clone(), + spec.state_root(), + spec.engine.account_start_nonce(0), + Default::default(), + ) + .unwrap(); + let expected = "0000000000000000000000000000000000000000000000000000000000000001".into(); + let address = "0000000000000000000000000000000000001337".into(); - assert_eq!(state.storage_at(&address, &H256::zero()).unwrap(), expected); - assert_eq!(state.balance(&address).unwrap(), 1.into()); - } + assert_eq!(state.storage_at(&address, &H256::zero()).unwrap(), expected); + assert_eq!(state.balance(&address).unwrap(), 1.into()); + } } diff --git a/ethcore/src/state/account.rs b/ethcore/src/state/account.rs index fea9444b1..581d7305d 100644 --- a/ethcore/src/state/account.rs +++ b/ethcore/src/state/account.rs @@ -16,729 +16,826 @@ //! Single account in the system. -use std::fmt; -use std::sync::Arc; -use std::collections::{HashMap, BTreeMap}; -use hash::{KECCAK_EMPTY, KECCAK_NULL_RLP, keccak}; -use ethereum_types::{H256, U256, Address}; +use bytes::{Bytes, ToPretty}; use error::Error; +use ethereum_types::{Address, H256, U256}; +use ethtrie::{Result as TrieResult, SecTrieDB, TrieDB, TrieFactory}; +use hash::{keccak, KECCAK_EMPTY, KECCAK_NULL_RLP}; use hash_db::HashDB; use keccak_hasher::KeccakHasher; use kvdb::DBValue; -use bytes::{Bytes, ToPretty}; -use trie::{Trie, Recorder}; -use ethtrie::{TrieFactory, TrieDB, SecTrieDB, Result as TrieResult}; -use pod_account::*; -use rlp::{RlpStream, encode}; use lru_cache::LruCache; +use pod_account::*; +use rlp::{encode, RlpStream}; +use std::{ + collections::{BTreeMap, HashMap}, + fmt, + sync::Arc, +}; +use trie::{Recorder, Trie}; use types::basic_account::BasicAccount; -use std::cell::{RefCell, Cell}; +use std::cell::{Cell, RefCell}; const STORAGE_CACHE_ITEMS: usize = 8192; /// Boolean type for clean/dirty status. #[derive(PartialEq, Eq, Clone, Copy, Debug)] pub enum Filth { - /// Data has not been changed. - Clean, - /// Data has been changed. - Dirty, + /// Data has not been changed. + Clean, + /// Data has been changed. + Dirty, } /// Single account in the system. /// Keeps track of changes to the code and storage. /// The changes are applied in `commit_storage` and `commit_code` pub struct Account { - // Balance of the account. - balance: U256, - // Nonce of the account. - nonce: U256, - // Trie-backed storage. - storage_root: H256, - // LRU Cache of the trie-backed storage. - // This is limited to `STORAGE_CACHE_ITEMS` recent queries - storage_cache: RefCell>, - // LRU Cache of the trie-backed storage for original value. - // This is only used when the initial storage root is different compared to - // what is in the database. That is, it is only used for new contracts. - original_storage_cache: Option<(H256, RefCell>)>, - // Modified storage. Accumulates changes to storage made in `set_storage` - // Takes precedence over `storage_cache`. - storage_changes: HashMap, - // Code hash of the account. - code_hash: H256, - // Size of the account code. - code_size: Option, - // Code cache of the account. - code_cache: Arc, - // Account code new or has been modified. - code_filth: Filth, - // Cached address hash. - address_hash: Cell>, + // Balance of the account. + balance: U256, + // Nonce of the account. + nonce: U256, + // Trie-backed storage. + storage_root: H256, + // LRU Cache of the trie-backed storage. + // This is limited to `STORAGE_CACHE_ITEMS` recent queries + storage_cache: RefCell>, + // LRU Cache of the trie-backed storage for original value. + // This is only used when the initial storage root is different compared to + // what is in the database. That is, it is only used for new contracts. + original_storage_cache: Option<(H256, RefCell>)>, + // Modified storage. Accumulates changes to storage made in `set_storage` + // Takes precedence over `storage_cache`. + storage_changes: HashMap, + // Code hash of the account. + code_hash: H256, + // Size of the account code. + code_size: Option, + // Code cache of the account. + code_cache: Arc, + // Account code new or has been modified. + code_filth: Filth, + // Cached address hash. + address_hash: Cell>, } impl From for Account { - fn from(basic: BasicAccount) -> Self { - Account { - balance: basic.balance, - nonce: basic.nonce, - storage_root: basic.storage_root, - storage_cache: Self::empty_storage_cache(), - original_storage_cache: None, - storage_changes: HashMap::new(), - code_hash: basic.code_hash, - code_size: None, - code_cache: Arc::new(vec![]), - code_filth: Filth::Clean, - address_hash: Cell::new(None), - } - } + fn from(basic: BasicAccount) -> Self { + Account { + balance: basic.balance, + nonce: basic.nonce, + storage_root: basic.storage_root, + storage_cache: Self::empty_storage_cache(), + original_storage_cache: None, + storage_changes: HashMap::new(), + code_hash: basic.code_hash, + code_size: None, + code_cache: Arc::new(vec![]), + code_filth: Filth::Clean, + address_hash: Cell::new(None), + } + } } impl Account { - #[cfg(test)] - /// General constructor. - pub fn new(balance: U256, nonce: U256, storage: HashMap, code: Bytes) -> Account { - Account { - balance: balance, - nonce: nonce, - storage_root: KECCAK_NULL_RLP, - storage_cache: Self::empty_storage_cache(), - original_storage_cache: None, - storage_changes: storage, - code_hash: keccak(&code), - code_size: Some(code.len()), - code_cache: Arc::new(code), - code_filth: Filth::Dirty, - address_hash: Cell::new(None), - } - } + #[cfg(test)] + /// General constructor. + pub fn new(balance: U256, nonce: U256, storage: HashMap, code: Bytes) -> Account { + Account { + balance: balance, + nonce: nonce, + storage_root: KECCAK_NULL_RLP, + storage_cache: Self::empty_storage_cache(), + original_storage_cache: None, + storage_changes: storage, + code_hash: keccak(&code), + code_size: Some(code.len()), + code_cache: Arc::new(code), + code_filth: Filth::Dirty, + address_hash: Cell::new(None), + } + } - fn empty_storage_cache() -> RefCell> { - RefCell::new(LruCache::new(STORAGE_CACHE_ITEMS)) - } + fn empty_storage_cache() -> RefCell> { + RefCell::new(LruCache::new(STORAGE_CACHE_ITEMS)) + } - /// General constructor. - pub fn from_pod(pod: PodAccount) -> Account { - Account { - balance: pod.balance, - nonce: pod.nonce, - storage_root: KECCAK_NULL_RLP, - storage_cache: Self::empty_storage_cache(), - original_storage_cache: None, - storage_changes: pod.storage.into_iter().collect(), - code_hash: pod.code.as_ref().map_or(KECCAK_EMPTY, |c| keccak(c)), - code_filth: Filth::Dirty, - code_size: Some(pod.code.as_ref().map_or(0, |c| c.len())), - code_cache: Arc::new(pod.code.map_or_else(|| { warn!("POD account with unknown code is being created! Assuming no code."); vec![] }, |c| c)), - address_hash: Cell::new(None), - } - } + /// General constructor. + pub fn from_pod(pod: PodAccount) -> Account { + Account { + balance: pod.balance, + nonce: pod.nonce, + storage_root: KECCAK_NULL_RLP, + storage_cache: Self::empty_storage_cache(), + original_storage_cache: None, + storage_changes: pod.storage.into_iter().collect(), + code_hash: pod.code.as_ref().map_or(KECCAK_EMPTY, |c| keccak(c)), + code_filth: Filth::Dirty, + code_size: Some(pod.code.as_ref().map_or(0, |c| c.len())), + code_cache: Arc::new(pod.code.map_or_else( + || { + warn!("POD account with unknown code is being created! Assuming no code."); + vec![] + }, + |c| c, + )), + address_hash: Cell::new(None), + } + } - /// Create a new account with the given balance. - pub fn new_basic(balance: U256, nonce: U256) -> Account { - Account { - balance: balance, - nonce: nonce, - storage_root: KECCAK_NULL_RLP, - storage_cache: Self::empty_storage_cache(), - original_storage_cache: None, - storage_changes: HashMap::new(), - code_hash: KECCAK_EMPTY, - code_cache: Arc::new(vec![]), - code_size: Some(0), - code_filth: Filth::Clean, - address_hash: Cell::new(None), - } - } + /// Create a new account with the given balance. + pub fn new_basic(balance: U256, nonce: U256) -> Account { + Account { + balance: balance, + nonce: nonce, + storage_root: KECCAK_NULL_RLP, + storage_cache: Self::empty_storage_cache(), + original_storage_cache: None, + storage_changes: HashMap::new(), + code_hash: KECCAK_EMPTY, + code_cache: Arc::new(vec![]), + code_size: Some(0), + code_filth: Filth::Clean, + address_hash: Cell::new(None), + } + } - /// Create a new account from RLP. - pub fn from_rlp(rlp: &[u8]) -> Result { - ::rlp::decode::(rlp) - .map(|ba| ba.into()) - .map_err(|e| e.into()) - } + /// Create a new account from RLP. + pub fn from_rlp(rlp: &[u8]) -> Result { + ::rlp::decode::(rlp) + .map(|ba| ba.into()) + .map_err(|e| e.into()) + } - /// Create a new contract account. - /// NOTE: make sure you use `init_code` on this before `commit`ing. - pub fn new_contract(balance: U256, nonce: U256, original_storage_root: H256) -> Account { - Account { - balance: balance, - nonce: nonce, - storage_root: KECCAK_NULL_RLP, - storage_cache: Self::empty_storage_cache(), - original_storage_cache: if original_storage_root == KECCAK_NULL_RLP { - None - } else { - Some((original_storage_root, Self::empty_storage_cache())) - }, - storage_changes: HashMap::new(), - code_hash: KECCAK_EMPTY, - code_cache: Arc::new(vec![]), - code_size: None, - code_filth: Filth::Clean, - address_hash: Cell::new(None), - } - } + /// Create a new contract account. + /// NOTE: make sure you use `init_code` on this before `commit`ing. + pub fn new_contract(balance: U256, nonce: U256, original_storage_root: H256) -> Account { + Account { + balance: balance, + nonce: nonce, + storage_root: KECCAK_NULL_RLP, + storage_cache: Self::empty_storage_cache(), + original_storage_cache: if original_storage_root == KECCAK_NULL_RLP { + None + } else { + Some((original_storage_root, Self::empty_storage_cache())) + }, + storage_changes: HashMap::new(), + code_hash: KECCAK_EMPTY, + code_cache: Arc::new(vec![]), + code_size: None, + code_filth: Filth::Clean, + address_hash: Cell::new(None), + } + } - /// Set this account's code to the given code. - /// NOTE: Account should have been created with `new_contract()` - pub fn init_code(&mut self, code: Bytes) { - self.code_hash = keccak(&code); - self.code_cache = Arc::new(code); - self.code_size = Some(self.code_cache.len()); - self.code_filth = Filth::Dirty; - } + /// Set this account's code to the given code. + /// NOTE: Account should have been created with `new_contract()` + pub fn init_code(&mut self, code: Bytes) { + self.code_hash = keccak(&code); + self.code_cache = Arc::new(code); + self.code_size = Some(self.code_cache.len()); + self.code_filth = Filth::Dirty; + } - /// Reset this account's code to the given code. - pub fn reset_code(&mut self, code: Bytes) { - self.init_code(code); - } + /// Reset this account's code to the given code. + pub fn reset_code(&mut self, code: Bytes) { + self.init_code(code); + } - /// Reset this account's code and storage to given values. - pub fn reset_code_and_storage(&mut self, code: Arc, storage: HashMap) { - self.code_hash = keccak(&*code); - self.code_cache = code; - self.code_size = Some(self.code_cache.len()); - self.code_filth = Filth::Dirty; - self.storage_cache = Self::empty_storage_cache(); - self.storage_changes = storage; - if self.storage_root != KECCAK_NULL_RLP { - self.original_storage_cache = Some((self.storage_root, Self::empty_storage_cache())); - } - self.storage_root = KECCAK_NULL_RLP; - } + /// Reset this account's code and storage to given values. + pub fn reset_code_and_storage(&mut self, code: Arc, storage: HashMap) { + self.code_hash = keccak(&*code); + self.code_cache = code; + self.code_size = Some(self.code_cache.len()); + self.code_filth = Filth::Dirty; + self.storage_cache = Self::empty_storage_cache(); + self.storage_changes = storage; + if self.storage_root != KECCAK_NULL_RLP { + self.original_storage_cache = Some((self.storage_root, Self::empty_storage_cache())); + } + self.storage_root = KECCAK_NULL_RLP; + } - /// Set (and cache) the contents of the trie's storage at `key` to `value`. - pub fn set_storage(&mut self, key: H256, value: H256) { - self.storage_changes.insert(key, value); - } + /// Set (and cache) the contents of the trie's storage at `key` to `value`. + pub fn set_storage(&mut self, key: H256, value: H256) { + self.storage_changes.insert(key, value); + } - /// Get (and cache) the contents of the trie's storage at `key`. - /// Takes modified storage into account. - pub fn storage_at(&self, db: &HashDB, key: &H256) -> TrieResult { - if let Some(value) = self.cached_storage_at(key) { - return Ok(value); - } - Self::get_and_cache_storage( - &self.storage_root, - &mut self.storage_cache.borrow_mut(), - db, - key) - } + /// Get (and cache) the contents of the trie's storage at `key`. + /// Takes modified storage into account. + pub fn storage_at(&self, db: &HashDB, key: &H256) -> TrieResult { + if let Some(value) = self.cached_storage_at(key) { + return Ok(value); + } + Self::get_and_cache_storage( + &self.storage_root, + &mut self.storage_cache.borrow_mut(), + db, + key, + ) + } - /// Get (and cache) the contents of the trie's storage at `key`. - /// Does not take modified storage into account. - pub fn original_storage_at(&self, db: &HashDB, key: &H256) -> TrieResult { - if let Some(value) = self.cached_original_storage_at(key) { - return Ok(value); - } - match &self.original_storage_cache { - Some((ref original_storage_root, ref original_storage_cache)) => - Self::get_and_cache_storage( - original_storage_root, - &mut original_storage_cache.borrow_mut(), - db, - key - ), - None => - Self::get_and_cache_storage( - &self.storage_root, - &mut self.storage_cache.borrow_mut(), - db, - key - ), - } - } + /// Get (and cache) the contents of the trie's storage at `key`. + /// Does not take modified storage into account. + pub fn original_storage_at( + &self, + db: &HashDB, + key: &H256, + ) -> TrieResult { + if let Some(value) = self.cached_original_storage_at(key) { + return Ok(value); + } + match &self.original_storage_cache { + Some((ref original_storage_root, ref original_storage_cache)) => { + Self::get_and_cache_storage( + original_storage_root, + &mut original_storage_cache.borrow_mut(), + db, + key, + ) + } + None => Self::get_and_cache_storage( + &self.storage_root, + &mut self.storage_cache.borrow_mut(), + db, + key, + ), + } + } - fn get_and_cache_storage(storage_root: &H256, storage_cache: &mut LruCache, db: &HashDB, key: &H256) -> TrieResult { - let db = SecTrieDB::new(&db, storage_root)?; - let panicky_decoder = |bytes:&[u8]| ::rlp::decode(&bytes).expect("decoding db value failed"); - let item: U256 = db.get_with(key, panicky_decoder)?.unwrap_or_else(U256::zero); - let value: H256 = item.into(); - storage_cache.insert(key.clone(), value.clone()); - Ok(value) - } + fn get_and_cache_storage( + storage_root: &H256, + storage_cache: &mut LruCache, + db: &HashDB, + key: &H256, + ) -> TrieResult { + let db = SecTrieDB::new(&db, storage_root)?; + let panicky_decoder = + |bytes: &[u8]| ::rlp::decode(&bytes).expect("decoding db value failed"); + let item: U256 = db + .get_with(key, panicky_decoder)? + .unwrap_or_else(U256::zero); + let value: H256 = item.into(); + storage_cache.insert(key.clone(), value.clone()); + Ok(value) + } - /// Get cached storage value if any. Returns `None` if the - /// key is not in the cache. - pub fn cached_storage_at(&self, key: &H256) -> Option { - if let Some(value) = self.storage_changes.get(key) { - return Some(value.clone()) - } - self.cached_moved_original_storage_at(key) - } + /// Get cached storage value if any. Returns `None` if the + /// key is not in the cache. + pub fn cached_storage_at(&self, key: &H256) -> Option { + if let Some(value) = self.storage_changes.get(key) { + return Some(value.clone()); + } + self.cached_moved_original_storage_at(key) + } - /// Get cached original storage value after last state commitment. Returns `None` if the key is not in the cache. - pub fn cached_original_storage_at(&self, key: &H256) -> Option { - match &self.original_storage_cache { - Some((_, ref original_storage_cache)) => { - if let Some(value) = original_storage_cache.borrow_mut().get_mut(key) { - Some(value.clone()) - } else { - None - } - }, - None => { - self.cached_moved_original_storage_at(key) - }, - } - } + /// Get cached original storage value after last state commitment. Returns `None` if the key is not in the cache. + pub fn cached_original_storage_at(&self, key: &H256) -> Option { + match &self.original_storage_cache { + Some((_, ref original_storage_cache)) => { + if let Some(value) = original_storage_cache.borrow_mut().get_mut(key) { + Some(value.clone()) + } else { + None + } + } + None => self.cached_moved_original_storage_at(key), + } + } - /// Get cached original storage value since last contract creation on this address. Returns `None` if the key is not in the cache. - fn cached_moved_original_storage_at(&self, key: &H256) -> Option { - // If storage root is empty RLP, then early return zero value. Practically, this makes it so that if - // `original_storage_cache` is used, then `storage_cache` will always remain empty. - if self.storage_root == KECCAK_NULL_RLP { - return Some(H256::new()); - } + /// Get cached original storage value since last contract creation on this address. Returns `None` if the key is not in the cache. + fn cached_moved_original_storage_at(&self, key: &H256) -> Option { + // If storage root is empty RLP, then early return zero value. Practically, this makes it so that if + // `original_storage_cache` is used, then `storage_cache` will always remain empty. + if self.storage_root == KECCAK_NULL_RLP { + return Some(H256::new()); + } - if let Some(value) = self.storage_cache.borrow_mut().get_mut(key) { - Some(value.clone()) - } else { - None - } - } + if let Some(value) = self.storage_cache.borrow_mut().get_mut(key) { + Some(value.clone()) + } else { + None + } + } - /// return the balance associated with this account. - pub fn balance(&self) -> &U256 { &self.balance } + /// return the balance associated with this account. + pub fn balance(&self) -> &U256 { + &self.balance + } - /// return the nonce associated with this account. - pub fn nonce(&self) -> &U256 { &self.nonce } + /// return the nonce associated with this account. + pub fn nonce(&self) -> &U256 { + &self.nonce + } - /// return the code hash associated with this account. - pub fn code_hash(&self) -> H256 { - self.code_hash.clone() - } + /// return the code hash associated with this account. + pub fn code_hash(&self) -> H256 { + self.code_hash.clone() + } - /// return and cache `keccak(address)`, `address` must be the address of this - /// account. - pub fn address_hash(&self, address: &Address) -> H256 { - let hash = self.address_hash.get(); - hash.unwrap_or_else(|| { - let hash = keccak(address); - self.address_hash.set(Some(hash.clone())); - hash - }) - } + /// return and cache `keccak(address)`, `address` must be the address of this + /// account. + pub fn address_hash(&self, address: &Address) -> H256 { + let hash = self.address_hash.get(); + hash.unwrap_or_else(|| { + let hash = keccak(address); + self.address_hash.set(Some(hash.clone())); + hash + }) + } - /// returns the account's code. If `None` then the code cache isn't available - - /// get someone who knows to call `note_code`. - pub fn code(&self) -> Option> { - if self.code_hash != KECCAK_EMPTY && self.code_cache.is_empty() { - return None; - } - Some(self.code_cache.clone()) - } + /// returns the account's code. If `None` then the code cache isn't available - + /// get someone who knows to call `note_code`. + pub fn code(&self) -> Option> { + if self.code_hash != KECCAK_EMPTY && self.code_cache.is_empty() { + return None; + } + Some(self.code_cache.clone()) + } - /// returns the account's code size. If `None` then the code cache or code size cache isn't available - - /// get someone who knows to call `note_code`. - pub fn code_size(&self) -> Option { - self.code_size.clone() - } + /// returns the account's code size. If `None` then the code cache or code size cache isn't available - + /// get someone who knows to call `note_code`. + pub fn code_size(&self) -> Option { + self.code_size.clone() + } - #[cfg(test)] - /// Provide a byte array which hashes to the `code_hash`. returns the hash as a result. - pub fn note_code(&mut self, code: Bytes) -> Result<(), H256> { - let h = keccak(&code); - if self.code_hash == h { - self.code_cache = Arc::new(code); - self.code_size = Some(self.code_cache.len()); - Ok(()) - } else { - Err(h) - } - } + #[cfg(test)] + /// Provide a byte array which hashes to the `code_hash`. returns the hash as a result. + pub fn note_code(&mut self, code: Bytes) -> Result<(), H256> { + let h = keccak(&code); + if self.code_hash == h { + self.code_cache = Arc::new(code); + self.code_size = Some(self.code_cache.len()); + Ok(()) + } else { + Err(h) + } + } - /// Is `code_cache` valid; such that code is going to return Some? - pub fn is_cached(&self) -> bool { - !self.code_cache.is_empty() || (self.code_cache.is_empty() && self.code_hash == KECCAK_EMPTY) - } + /// Is `code_cache` valid; such that code is going to return Some? + pub fn is_cached(&self) -> bool { + !self.code_cache.is_empty() + || (self.code_cache.is_empty() && self.code_hash == KECCAK_EMPTY) + } - /// Provide a database to get `code_hash`. Should not be called if it is a contract without code. Returns the cached code, if successful. - #[must_use] - pub fn cache_code(&mut self, db: &HashDB) -> Option> { - // TODO: fill out self.code_cache; - trace!("Account::cache_code: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty()); + /// Provide a database to get `code_hash`. Should not be called if it is a contract without code. Returns the cached code, if successful. + #[must_use] + pub fn cache_code(&mut self, db: &HashDB) -> Option> { + // TODO: fill out self.code_cache; + trace!( + "Account::cache_code: ic={}; self.code_hash={:?}, self.code_cache={}", + self.is_cached(), + self.code_hash, + self.code_cache.pretty() + ); - if self.is_cached() { return Some(self.code_cache.clone()); } + if self.is_cached() { + return Some(self.code_cache.clone()); + } - match db.get(&self.code_hash) { - Some(x) => { - self.code_size = Some(x.len()); - self.code_cache = Arc::new(x.into_vec()); - Some(self.code_cache.clone()) - }, - _ => { - warn!("Failed reverse get of {}", self.code_hash); - None - }, - } - } + match db.get(&self.code_hash) { + Some(x) => { + self.code_size = Some(x.len()); + self.code_cache = Arc::new(x.into_vec()); + Some(self.code_cache.clone()) + } + _ => { + warn!("Failed reverse get of {}", self.code_hash); + None + } + } + } - /// Provide code to cache. For correctness, should be the correct code for the account. - pub fn cache_given_code(&mut self, code: Arc) { - trace!("Account::cache_given_code: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty()); + /// Provide code to cache. For correctness, should be the correct code for the account. + pub fn cache_given_code(&mut self, code: Arc) { + trace!( + "Account::cache_given_code: ic={}; self.code_hash={:?}, self.code_cache={}", + self.is_cached(), + self.code_hash, + self.code_cache.pretty() + ); - self.code_size = Some(code.len()); - self.code_cache = code; - } + self.code_size = Some(code.len()); + self.code_cache = code; + } - /// Provide a database to get `code_size`. Should not be called if it is a contract without code. Returns whether - /// the cache succeeds. - #[must_use] - pub fn cache_code_size(&mut self, db: &HashDB) -> bool { - // TODO: fill out self.code_cache; - trace!("Account::cache_code_size: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty()); - self.code_size.is_some() || - if self.code_hash != KECCAK_EMPTY { - match db.get(&self.code_hash) { - Some(x) => { - self.code_size = Some(x.len()); - true - }, - _ => { - warn!("Failed reverse get of {}", self.code_hash); - false - }, - } - } else { - // If the code hash is empty hash, then the code size is zero. - self.code_size = Some(0); - true - } - } + /// Provide a database to get `code_size`. Should not be called if it is a contract without code. Returns whether + /// the cache succeeds. + #[must_use] + pub fn cache_code_size(&mut self, db: &HashDB) -> bool { + // TODO: fill out self.code_cache; + trace!( + "Account::cache_code_size: ic={}; self.code_hash={:?}, self.code_cache={}", + self.is_cached(), + self.code_hash, + self.code_cache.pretty() + ); + self.code_size.is_some() + || if self.code_hash != KECCAK_EMPTY { + match db.get(&self.code_hash) { + Some(x) => { + self.code_size = Some(x.len()); + true + } + _ => { + warn!("Failed reverse get of {}", self.code_hash); + false + } + } + } else { + // If the code hash is empty hash, then the code size is zero. + self.code_size = Some(0); + true + } + } - /// Determine whether there are any un-`commit()`-ed storage-setting operations. - pub fn storage_is_clean(&self) -> bool { self.storage_changes.is_empty() } + /// Determine whether there are any un-`commit()`-ed storage-setting operations. + pub fn storage_is_clean(&self) -> bool { + self.storage_changes.is_empty() + } - /// Check if account has zero nonce, balance, no code and no storage. - /// - /// NOTE: Will panic if `!self.storage_is_clean()` - pub fn is_empty(&self) -> bool { - assert!(self.storage_is_clean(), "Account::is_empty() may only legally be called when storage is clean."); - self.is_null() && self.storage_root == KECCAK_NULL_RLP - } + /// Check if account has zero nonce, balance, no code and no storage. + /// + /// NOTE: Will panic if `!self.storage_is_clean()` + pub fn is_empty(&self) -> bool { + assert!( + self.storage_is_clean(), + "Account::is_empty() may only legally be called when storage is clean." + ); + self.is_null() && self.storage_root == KECCAK_NULL_RLP + } - /// Check if account has zero nonce, balance, no code. - pub fn is_null(&self) -> bool { - self.balance.is_zero() && - self.nonce.is_zero() && - self.code_hash == KECCAK_EMPTY - } + /// Check if account has zero nonce, balance, no code. + pub fn is_null(&self) -> bool { + self.balance.is_zero() && self.nonce.is_zero() && self.code_hash == KECCAK_EMPTY + } - /// Check if account is basic (Has no code). - pub fn is_basic(&self) -> bool { - self.code_hash == KECCAK_EMPTY - } + /// Check if account is basic (Has no code). + pub fn is_basic(&self) -> bool { + self.code_hash == KECCAK_EMPTY + } - /// Return the storage root associated with this account or None if it has been altered via the overlay. - pub fn storage_root(&self) -> Option { - if self.storage_is_clean() { - Some(self.storage_root) - } else { - None - } - } + /// Return the storage root associated with this account or None if it has been altered via the overlay. + pub fn storage_root(&self) -> Option { + if self.storage_is_clean() { + Some(self.storage_root) + } else { + None + } + } - /// Return the original storage root of this account. - pub fn original_storage_root(&self) -> H256 { - if let Some((original_storage_root, _)) = self.original_storage_cache { - original_storage_root - } else { - self.storage_root - } - } + /// Return the original storage root of this account. + pub fn original_storage_root(&self) -> H256 { + if let Some((original_storage_root, _)) = self.original_storage_cache { + original_storage_root + } else { + self.storage_root + } + } - /// Whether the base storage root of this account is unchanged. - pub fn is_base_storage_root_unchanged(&self) -> bool { - self.original_storage_cache.is_none() - } + /// Whether the base storage root of this account is unchanged. + pub fn is_base_storage_root_unchanged(&self) -> bool { + self.original_storage_cache.is_none() + } - /// Storage root where the account changes are based upon. - pub fn base_storage_root(&self) -> H256 { - self.storage_root - } + /// Storage root where the account changes are based upon. + pub fn base_storage_root(&self) -> H256 { + self.storage_root + } - /// Return the storage overlay. - pub fn storage_changes(&self) -> &HashMap { &self.storage_changes } + /// Return the storage overlay. + pub fn storage_changes(&self) -> &HashMap { + &self.storage_changes + } - /// Increment the nonce of the account by one. - pub fn inc_nonce(&mut self) { - self.nonce = self.nonce.saturating_add(U256::from(1u8)); - } + /// Increment the nonce of the account by one. + pub fn inc_nonce(&mut self) { + self.nonce = self.nonce.saturating_add(U256::from(1u8)); + } - /// Increase account balance. - pub fn add_balance(&mut self, x: &U256) { - self.balance = self.balance.saturating_add(*x); - } + /// Increase account balance. + pub fn add_balance(&mut self, x: &U256) { + self.balance = self.balance.saturating_add(*x); + } - /// Decrease account balance. - /// Panics if balance is less than `x` - pub fn sub_balance(&mut self, x: &U256) { - assert!(self.balance >= *x); - self.balance = self.balance - *x; - } + /// Decrease account balance. + /// Panics if balance is less than `x` + pub fn sub_balance(&mut self, x: &U256) { + assert!(self.balance >= *x); + self.balance = self.balance - *x; + } - /// Commit the `storage_changes` to the backing DB and update `storage_root`. - pub fn commit_storage(&mut self, trie_factory: &TrieFactory, db: &mut HashDB) -> TrieResult<()> { - let mut t = trie_factory.from_existing(db, &mut self.storage_root)?; - for (k, v) in self.storage_changes.drain() { - // cast key and value to trait type, - // so we can call overloaded `to_bytes` method - match v.is_zero() { - true => t.remove(&k)?, - false => t.insert(&k, &encode(&U256::from(&*v)))?, - }; + /// Commit the `storage_changes` to the backing DB and update `storage_root`. + pub fn commit_storage( + &mut self, + trie_factory: &TrieFactory, + db: &mut HashDB, + ) -> TrieResult<()> { + let mut t = trie_factory.from_existing(db, &mut self.storage_root)?; + for (k, v) in self.storage_changes.drain() { + // cast key and value to trait type, + // so we can call overloaded `to_bytes` method + match v.is_zero() { + true => t.remove(&k)?, + false => t.insert(&k, &encode(&U256::from(&*v)))?, + }; - self.storage_cache.borrow_mut().insert(k, v); - } - self.original_storage_cache = None; - Ok(()) - } + self.storage_cache.borrow_mut().insert(k, v); + } + self.original_storage_cache = None; + Ok(()) + } - /// Commit any unsaved code. `code_hash` will always return the hash of the `code_cache` after this. - pub fn commit_code(&mut self, db: &mut HashDB) { - trace!("Commiting code of {:?} - {:?}, {:?}", self, self.code_filth == Filth::Dirty, self.code_cache.is_empty()); - match (self.code_filth == Filth::Dirty, self.code_cache.is_empty()) { - (true, true) => { - self.code_size = Some(0); - self.code_filth = Filth::Clean; - }, - (true, false) => { - db.emplace(self.code_hash.clone(), DBValue::from_slice(&*self.code_cache)); - self.code_size = Some(self.code_cache.len()); - self.code_filth = Filth::Clean; - }, - (false, _) => {}, - } - } + /// Commit any unsaved code. `code_hash` will always return the hash of the `code_cache` after this. + pub fn commit_code(&mut self, db: &mut HashDB) { + trace!( + "Commiting code of {:?} - {:?}, {:?}", + self, + self.code_filth == Filth::Dirty, + self.code_cache.is_empty() + ); + match (self.code_filth == Filth::Dirty, self.code_cache.is_empty()) { + (true, true) => { + self.code_size = Some(0); + self.code_filth = Filth::Clean; + } + (true, false) => { + db.emplace( + self.code_hash.clone(), + DBValue::from_slice(&*self.code_cache), + ); + self.code_size = Some(self.code_cache.len()); + self.code_filth = Filth::Clean; + } + (false, _) => {} + } + } - /// Export to RLP. - pub fn rlp(&self) -> Bytes { - let mut stream = RlpStream::new_list(4); - stream.append(&self.nonce); - stream.append(&self.balance); - stream.append(&self.storage_root); - stream.append(&self.code_hash); - stream.out() - } + /// Export to RLP. + pub fn rlp(&self) -> Bytes { + let mut stream = RlpStream::new_list(4); + stream.append(&self.nonce); + stream.append(&self.balance); + stream.append(&self.storage_root); + stream.append(&self.code_hash); + stream.out() + } - /// Clone basic account data - pub fn clone_basic(&self) -> Account { - Account { - balance: self.balance.clone(), - nonce: self.nonce.clone(), - storage_root: self.storage_root.clone(), - storage_cache: Self::empty_storage_cache(), - original_storage_cache: self.original_storage_cache.as_ref().map(|(r, _)| (*r, Self::empty_storage_cache())), - storage_changes: HashMap::new(), - code_hash: self.code_hash.clone(), - code_size: self.code_size.clone(), - code_cache: self.code_cache.clone(), - code_filth: self.code_filth, - address_hash: self.address_hash.clone(), - } - } + /// Clone basic account data + pub fn clone_basic(&self) -> Account { + Account { + balance: self.balance.clone(), + nonce: self.nonce.clone(), + storage_root: self.storage_root.clone(), + storage_cache: Self::empty_storage_cache(), + original_storage_cache: self + .original_storage_cache + .as_ref() + .map(|(r, _)| (*r, Self::empty_storage_cache())), + storage_changes: HashMap::new(), + code_hash: self.code_hash.clone(), + code_size: self.code_size.clone(), + code_cache: self.code_cache.clone(), + code_filth: self.code_filth, + address_hash: self.address_hash.clone(), + } + } - /// Clone account data and dirty storage keys - pub fn clone_dirty(&self) -> Account { - let mut account = self.clone_basic(); - account.storage_changes = self.storage_changes.clone(); - account - } + /// Clone account data and dirty storage keys + pub fn clone_dirty(&self) -> Account { + let mut account = self.clone_basic(); + account.storage_changes = self.storage_changes.clone(); + account + } - /// Clone account data, dirty storage keys and cached storage keys. - pub fn clone_all(&self) -> Account { - let mut account = self.clone_dirty(); - account.storage_cache = self.storage_cache.clone(); - account.original_storage_cache = self.original_storage_cache.clone(); - account - } + /// Clone account data, dirty storage keys and cached storage keys. + pub fn clone_all(&self) -> Account { + let mut account = self.clone_dirty(); + account.storage_cache = self.storage_cache.clone(); + account.original_storage_cache = self.original_storage_cache.clone(); + account + } - /// Replace self with the data from other account merging storage cache. - /// Basic account data and all modifications are overwritten - /// with new values. - pub fn overwrite_with(&mut self, other: Account) { - self.balance = other.balance; - self.nonce = other.nonce; - self.code_hash = other.code_hash; - self.code_filth = other.code_filth; - self.code_cache = other.code_cache; - self.code_size = other.code_size; - self.address_hash = other.address_hash; - if self.storage_root == other.storage_root { - let mut cache = self.storage_cache.borrow_mut(); - for (k, v) in other.storage_cache.into_inner() { - cache.insert(k, v); - } - } else { - self.storage_cache = other.storage_cache; - } - self.original_storage_cache = other.original_storage_cache; - self.storage_root = other.storage_root; - self.storage_changes = other.storage_changes; - } + /// Replace self with the data from other account merging storage cache. + /// Basic account data and all modifications are overwritten + /// with new values. + pub fn overwrite_with(&mut self, other: Account) { + self.balance = other.balance; + self.nonce = other.nonce; + self.code_hash = other.code_hash; + self.code_filth = other.code_filth; + self.code_cache = other.code_cache; + self.code_size = other.code_size; + self.address_hash = other.address_hash; + if self.storage_root == other.storage_root { + let mut cache = self.storage_cache.borrow_mut(); + for (k, v) in other.storage_cache.into_inner() { + cache.insert(k, v); + } + } else { + self.storage_cache = other.storage_cache; + } + self.original_storage_cache = other.original_storage_cache; + self.storage_root = other.storage_root; + self.storage_changes = other.storage_changes; + } } // light client storage proof. impl Account { - /// Prove a storage key's existence or nonexistence in the account's storage - /// trie. - /// `storage_key` is the hash of the desired storage key, meaning - /// this will only work correctly under a secure trie. - pub fn prove_storage(&self, db: &HashDB, storage_key: H256) -> TrieResult<(Vec, H256)> { - let mut recorder = Recorder::new(); + /// Prove a storage key's existence or nonexistence in the account's storage + /// trie. + /// `storage_key` is the hash of the desired storage key, meaning + /// this will only work correctly under a secure trie. + pub fn prove_storage( + &self, + db: &HashDB, + storage_key: H256, + ) -> TrieResult<(Vec, H256)> { + let mut recorder = Recorder::new(); - let trie = TrieDB::new(&db, &self.storage_root)?; - let item: U256 = { - let panicky_decoder = |bytes:&[u8]| ::rlp::decode(bytes).expect("decoding db value failed"); - let query = (&mut recorder, panicky_decoder); - trie.get_with(&storage_key, query)?.unwrap_or_else(U256::zero) - }; + let trie = TrieDB::new(&db, &self.storage_root)?; + let item: U256 = { + let panicky_decoder = + |bytes: &[u8]| ::rlp::decode(bytes).expect("decoding db value failed"); + let query = (&mut recorder, panicky_decoder); + trie.get_with(&storage_key, query)? + .unwrap_or_else(U256::zero) + }; - Ok((recorder.drain().into_iter().map(|r| r.data).collect(), item.into())) - } + Ok(( + recorder.drain().into_iter().map(|r| r.data).collect(), + item.into(), + )) + } } impl fmt::Debug for Account { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Account") - .field("balance", &self.balance) - .field("nonce", &self.nonce) - .field("code", &self.code()) - .field("storage", &self.storage_changes.iter().collect::>()) - .finish() - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Account") + .field("balance", &self.balance) + .field("nonce", &self.nonce) + .field("code", &self.code()) + .field( + "storage", + &self.storage_changes.iter().collect::>(), + ) + .finish() + } } #[cfg(test)] mod tests { - use rlp_compress::{compress, decompress, snapshot_swapper}; - use ethereum_types::{H256, Address}; - use journaldb::new_memory_db; - use bytes::Bytes; - use super::*; - use account_db::*; + use super::*; + use account_db::*; + use bytes::Bytes; + use ethereum_types::{Address, H256}; + use journaldb::new_memory_db; + use rlp_compress::{compress, decompress, snapshot_swapper}; - #[test] - fn account_compress() { - let raw = Account::new_basic(2.into(), 4.into()).rlp(); - let compact_vec = compress(&raw, snapshot_swapper()); - assert!(raw.len() > compact_vec.len()); - let again_raw = decompress(&compact_vec, snapshot_swapper()); - assert_eq!(raw, again_raw.into_vec()); - } + #[test] + fn account_compress() { + let raw = Account::new_basic(2.into(), 4.into()).rlp(); + let compact_vec = compress(&raw, snapshot_swapper()); + assert!(raw.len() > compact_vec.len()); + let again_raw = decompress(&compact_vec, snapshot_swapper()); + assert_eq!(raw, again_raw.into_vec()); + } - #[test] - fn storage_at() { - let mut db = new_memory_db(); - let mut db = AccountDBMut::new(&mut db, &Address::new()); - let rlp = { - let mut a = Account::new_contract(69.into(), 0.into(), KECCAK_NULL_RLP); - a.set_storage(0x00u64.into(), 0x1234u64.into()); - a.commit_storage(&Default::default(), &mut db).unwrap(); - a.init_code(vec![]); - a.commit_code(&mut db); - a.rlp() - }; + #[test] + fn storage_at() { + let mut db = new_memory_db(); + let mut db = AccountDBMut::new(&mut db, &Address::new()); + let rlp = { + let mut a = Account::new_contract(69.into(), 0.into(), KECCAK_NULL_RLP); + a.set_storage(0x00u64.into(), 0x1234u64.into()); + a.commit_storage(&Default::default(), &mut db).unwrap(); + a.init_code(vec![]); + a.commit_code(&mut db); + a.rlp() + }; - let a = Account::from_rlp(&rlp).expect("decoding db value failed"); - assert_eq!(a.storage_root().unwrap(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2".into()); - assert_eq!(a.storage_at(&db.immutable(), &0x00u64.into()).unwrap(), 0x1234u64.into()); - assert_eq!(a.storage_at(&db.immutable(), &0x01u64.into()).unwrap(), H256::default()); - } + let a = Account::from_rlp(&rlp).expect("decoding db value failed"); + assert_eq!( + a.storage_root().unwrap(), + "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2".into() + ); + assert_eq!( + a.storage_at(&db.immutable(), &0x00u64.into()).unwrap(), + 0x1234u64.into() + ); + assert_eq!( + a.storage_at(&db.immutable(), &0x01u64.into()).unwrap(), + H256::default() + ); + } - #[test] - fn note_code() { - let mut db = new_memory_db(); - let mut db = AccountDBMut::new(&mut db, &Address::new()); + #[test] + fn note_code() { + let mut db = new_memory_db(); + let mut db = AccountDBMut::new(&mut db, &Address::new()); - let rlp = { - let mut a = Account::new_contract(69.into(), 0.into(), KECCAK_NULL_RLP); - a.init_code(vec![0x55, 0x44, 0xffu8]); - a.commit_code(&mut db); - a.rlp() - }; + let rlp = { + let mut a = Account::new_contract(69.into(), 0.into(), KECCAK_NULL_RLP); + a.init_code(vec![0x55, 0x44, 0xffu8]); + a.commit_code(&mut db); + a.rlp() + }; - let mut a = Account::from_rlp(&rlp).expect("decoding db value failed"); - assert!(a.cache_code(&db.immutable()).is_some()); + let mut a = Account::from_rlp(&rlp).expect("decoding db value failed"); + assert!(a.cache_code(&db.immutable()).is_some()); - let mut a = Account::from_rlp(&rlp).expect("decoding db value failed"); - assert_eq!(a.note_code(vec![0x55, 0x44, 0xffu8]), Ok(())); - } + let mut a = Account::from_rlp(&rlp).expect("decoding db value failed"); + assert_eq!(a.note_code(vec![0x55, 0x44, 0xffu8]), Ok(())); + } - #[test] - fn commit_storage() { - let mut a = Account::new_contract(69.into(), 0.into(), KECCAK_NULL_RLP); - let mut db = new_memory_db(); - let mut db = AccountDBMut::new(&mut db, &Address::new()); - a.set_storage(0.into(), 0x1234.into()); - assert_eq!(a.storage_root(), None); - a.commit_storage(&Default::default(), &mut db).unwrap(); - assert_eq!(a.storage_root().unwrap(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2".into()); - } + #[test] + fn commit_storage() { + let mut a = Account::new_contract(69.into(), 0.into(), KECCAK_NULL_RLP); + let mut db = new_memory_db(); + let mut db = AccountDBMut::new(&mut db, &Address::new()); + a.set_storage(0.into(), 0x1234.into()); + assert_eq!(a.storage_root(), None); + a.commit_storage(&Default::default(), &mut db).unwrap(); + assert_eq!( + a.storage_root().unwrap(), + "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2".into() + ); + } - #[test] - fn commit_remove_commit_storage() { - let mut a = Account::new_contract(69.into(), 0.into(), KECCAK_NULL_RLP); - let mut db = new_memory_db(); - let mut db = AccountDBMut::new(&mut db, &Address::new()); - a.set_storage(0.into(), 0x1234.into()); - a.commit_storage(&Default::default(), &mut db).unwrap(); - a.set_storage(1.into(), 0x1234.into()); - a.commit_storage(&Default::default(), &mut db).unwrap(); - a.set_storage(1.into(), 0.into()); - a.commit_storage(&Default::default(), &mut db).unwrap(); - assert_eq!(a.storage_root().unwrap(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2".into()); - } + #[test] + fn commit_remove_commit_storage() { + let mut a = Account::new_contract(69.into(), 0.into(), KECCAK_NULL_RLP); + let mut db = new_memory_db(); + let mut db = AccountDBMut::new(&mut db, &Address::new()); + a.set_storage(0.into(), 0x1234.into()); + a.commit_storage(&Default::default(), &mut db).unwrap(); + a.set_storage(1.into(), 0x1234.into()); + a.commit_storage(&Default::default(), &mut db).unwrap(); + a.set_storage(1.into(), 0.into()); + a.commit_storage(&Default::default(), &mut db).unwrap(); + assert_eq!( + a.storage_root().unwrap(), + "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2".into() + ); + } - #[test] - fn commit_code() { - let mut a = Account::new_contract(69.into(), 0.into(), KECCAK_NULL_RLP); - let mut db = new_memory_db(); - let mut db = AccountDBMut::new(&mut db, &Address::new()); - a.init_code(vec![0x55, 0x44, 0xffu8]); - assert_eq!(a.code_filth, Filth::Dirty); - assert_eq!(a.code_size(), Some(3)); - a.commit_code(&mut db); - assert_eq!(a.code_hash(), "af231e631776a517ca23125370d542873eca1fb4d613ed9b5d5335a46ae5b7eb".into()); - } + #[test] + fn commit_code() { + let mut a = Account::new_contract(69.into(), 0.into(), KECCAK_NULL_RLP); + let mut db = new_memory_db(); + let mut db = AccountDBMut::new(&mut db, &Address::new()); + a.init_code(vec![0x55, 0x44, 0xffu8]); + assert_eq!(a.code_filth, Filth::Dirty); + assert_eq!(a.code_size(), Some(3)); + a.commit_code(&mut db); + assert_eq!( + a.code_hash(), + "af231e631776a517ca23125370d542873eca1fb4d613ed9b5d5335a46ae5b7eb".into() + ); + } - #[test] - fn reset_code() { - let mut a = Account::new_contract(69.into(), 0.into(), KECCAK_NULL_RLP); - let mut db = new_memory_db(); - let mut db = AccountDBMut::new(&mut db, &Address::new()); - a.init_code(vec![0x55, 0x44, 0xffu8]); - assert_eq!(a.code_filth, Filth::Dirty); - a.commit_code(&mut db); - assert_eq!(a.code_filth, Filth::Clean); - assert_eq!(a.code_hash(), "af231e631776a517ca23125370d542873eca1fb4d613ed9b5d5335a46ae5b7eb".into()); - a.reset_code(vec![0x55]); - assert_eq!(a.code_filth, Filth::Dirty); - a.commit_code(&mut db); - assert_eq!(a.code_hash(), "37bf2238b11b68cdc8382cece82651b59d3c3988873b6e0f33d79694aa45f1be".into()); - } + #[test] + fn reset_code() { + let mut a = Account::new_contract(69.into(), 0.into(), KECCAK_NULL_RLP); + let mut db = new_memory_db(); + let mut db = AccountDBMut::new(&mut db, &Address::new()); + a.init_code(vec![0x55, 0x44, 0xffu8]); + assert_eq!(a.code_filth, Filth::Dirty); + a.commit_code(&mut db); + assert_eq!(a.code_filth, Filth::Clean); + assert_eq!( + a.code_hash(), + "af231e631776a517ca23125370d542873eca1fb4d613ed9b5d5335a46ae5b7eb".into() + ); + a.reset_code(vec![0x55]); + assert_eq!(a.code_filth, Filth::Dirty); + a.commit_code(&mut db); + assert_eq!( + a.code_hash(), + "37bf2238b11b68cdc8382cece82651b59d3c3988873b6e0f33d79694aa45f1be".into() + ); + } - #[test] - fn rlpio() { - let a = Account::new(69u8.into(), 0u8.into(), HashMap::new(), Bytes::new()); - let b = Account::from_rlp(&a.rlp()).unwrap(); - assert_eq!(a.balance(), b.balance()); - assert_eq!(a.nonce(), b.nonce()); - assert_eq!(a.code_hash(), b.code_hash()); - assert_eq!(a.storage_root(), b.storage_root()); - } + #[test] + fn rlpio() { + let a = Account::new(69u8.into(), 0u8.into(), HashMap::new(), Bytes::new()); + let b = Account::from_rlp(&a.rlp()).unwrap(); + assert_eq!(a.balance(), b.balance()); + assert_eq!(a.nonce(), b.nonce()); + assert_eq!(a.code_hash(), b.code_hash()); + assert_eq!(a.storage_root(), b.storage_root()); + } - #[test] - fn new_account() { - let a = Account::new(69u8.into(), 0u8.into(), HashMap::new(), Bytes::new()); - assert_eq!(a.rlp().to_hex(), "f8448045a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"); - assert_eq!(*a.balance(), 69u8.into()); - assert_eq!(*a.nonce(), 0u8.into()); - assert_eq!(a.code_hash(), KECCAK_EMPTY); - assert_eq!(a.storage_root().unwrap(), KECCAK_NULL_RLP); - } + #[test] + fn new_account() { + let a = Account::new(69u8.into(), 0u8.into(), HashMap::new(), Bytes::new()); + assert_eq!(a.rlp().to_hex(), "f8448045a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"); + assert_eq!(*a.balance(), 69u8.into()); + assert_eq!(*a.nonce(), 0u8.into()); + assert_eq!(a.code_hash(), KECCAK_EMPTY); + assert_eq!(a.storage_root().unwrap(), KECCAK_NULL_RLP); + } } diff --git a/ethcore/src/state/backend.rs b/ethcore/src/state/backend.rs index 11e73edb3..e2d9c4d70 100644 --- a/ethcore/src/state/backend.rs +++ b/ethcore/src/state/backend.rs @@ -21,54 +21,57 @@ //! should become general over time to the point where not even a //! merkle trie is strictly necessary. -use std::collections::{HashSet, HashMap}; -use std::sync::Arc; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; -use state::Account; -use parking_lot::Mutex; use ethereum_types::{Address, H256}; -use memory_db::MemoryDB; use hash_db::{AsHashDB, HashDB}; -use kvdb::DBValue; -use keccak_hasher::KeccakHasher; use journaldb::AsKeyedHashDB; +use keccak_hasher::KeccakHasher; +use kvdb::DBValue; +use memory_db::MemoryDB; +use parking_lot::Mutex; +use state::Account; /// State backend. See module docs for more details. pub trait Backend: Send { - /// Treat the backend as a read-only hashdb. - fn as_hash_db(&self) -> &HashDB; + /// Treat the backend as a read-only hashdb. + fn as_hash_db(&self) -> &HashDB; - /// Treat the backend as a writeable hashdb. - fn as_hash_db_mut(&mut self) -> &mut HashDB; + /// Treat the backend as a writeable hashdb. + fn as_hash_db_mut(&mut self) -> &mut HashDB; - /// Add an account entry to the cache. - fn add_to_account_cache(&mut self, addr: Address, data: Option, modified: bool); + /// Add an account entry to the cache. + fn add_to_account_cache(&mut self, addr: Address, data: Option, modified: bool); - /// Add a global code cache entry. This doesn't need to worry about canonicality because - /// it simply maps hashes to raw code and will always be correct in the absence of - /// hash collisions. - fn cache_code(&self, hash: H256, code: Arc>); + /// Add a global code cache entry. This doesn't need to worry about canonicality because + /// it simply maps hashes to raw code and will always be correct in the absence of + /// hash collisions. + fn cache_code(&self, hash: H256, code: Arc>); - /// Get basic copy of the cached account. Not required to include storage. - /// Returns 'None' if cache is disabled or if the account is not cached. - fn get_cached_account(&self, addr: &Address) -> Option>; + /// Get basic copy of the cached account. Not required to include storage. + /// Returns 'None' if cache is disabled or if the account is not cached. + fn get_cached_account(&self, addr: &Address) -> Option>; - /// Get value from a cached account. - /// `None` is passed to the closure if the account entry cached - /// is known not to exist. - /// `None` is returned if the entry is not cached. - fn get_cached(&self, a: &Address, f: F) -> Option - where F: FnOnce(Option<&mut Account>) -> U; + /// Get value from a cached account. + /// `None` is passed to the closure if the account entry cached + /// is known not to exist. + /// `None` is returned if the entry is not cached. + fn get_cached(&self, a: &Address, f: F) -> Option + where + F: FnOnce(Option<&mut Account>) -> U; - /// Get cached code based on hash. - fn get_cached_code(&self, hash: &H256) -> Option>>; + /// Get cached code based on hash. + fn get_cached_code(&self, hash: &H256) -> Option>>; - /// Note that an account with the given address is non-null. - fn note_non_null_account(&self, address: &Address); + /// Note that an account with the given address is non-null. + fn note_non_null_account(&self, address: &Address); - /// Check whether an account is known to be empty. Returns true if known to be - /// empty, false otherwise. - fn is_known_null(&self, address: &Address) -> bool; + /// Check whether an account is known to be empty. Returns true if known to be + /// empty, false otherwise. + fn is_known_null(&self, address: &Address) -> bool; } /// A raw backend used to check proofs of execution. @@ -81,57 +84,76 @@ pub trait Backend: Send { pub struct ProofCheck(MemoryDB); impl ProofCheck { - /// Create a new `ProofCheck` backend from the given state items. - pub fn new(proof: &[DBValue]) -> Self { - let mut db = journaldb::new_memory_db(); - for item in proof { db.insert(item); } - ProofCheck(db) - } + /// Create a new `ProofCheck` backend from the given state items. + pub fn new(proof: &[DBValue]) -> Self { + let mut db = journaldb::new_memory_db(); + for item in proof { + db.insert(item); + } + ProofCheck(db) + } } impl journaldb::KeyedHashDB for ProofCheck { - fn keys(&self) -> HashMap { self.0.keys() } + fn keys(&self) -> HashMap { + self.0.keys() + } } impl HashDB for ProofCheck { - fn get(&self, key: &H256) -> Option { - self.0.get(key) - } + fn get(&self, key: &H256) -> Option { + self.0.get(key) + } - fn contains(&self, key: &H256) -> bool { - self.0.contains(key) - } + fn contains(&self, key: &H256) -> bool { + self.0.contains(key) + } - fn insert(&mut self, value: &[u8]) -> H256 { - self.0.insert(value) - } + fn insert(&mut self, value: &[u8]) -> H256 { + self.0.insert(value) + } - fn emplace(&mut self, key: H256, value: DBValue) { - self.0.emplace(key, value) - } + fn emplace(&mut self, key: H256, value: DBValue) { + self.0.emplace(key, value) + } - fn remove(&mut self, _key: &H256) { } + fn remove(&mut self, _key: &H256) {} } impl AsHashDB for ProofCheck { - fn as_hash_db(&self) -> &HashDB { self } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } + fn as_hash_db(&self) -> &HashDB { + self + } + fn as_hash_db_mut(&mut self) -> &mut HashDB { + self + } } impl Backend for ProofCheck { - fn as_hash_db(&self) -> &HashDB { self } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } - fn add_to_account_cache(&mut self, _addr: Address, _data: Option, _modified: bool) {} - fn cache_code(&self, _hash: H256, _code: Arc>) {} - fn get_cached_account(&self, _addr: &Address) -> Option> { None } - fn get_cached(&self, _a: &Address, _f: F) -> Option - where F: FnOnce(Option<&mut Account>) -> U - { - None - } - fn get_cached_code(&self, _hash: &H256) -> Option>> { None } - fn note_non_null_account(&self, _address: &Address) {} - fn is_known_null(&self, _address: &Address) -> bool { false } + fn as_hash_db(&self) -> &HashDB { + self + } + fn as_hash_db_mut(&mut self) -> &mut HashDB { + self + } + fn add_to_account_cache(&mut self, _addr: Address, _data: Option, _modified: bool) {} + fn cache_code(&self, _hash: H256, _code: Arc>) {} + fn get_cached_account(&self, _addr: &Address) -> Option> { + None + } + fn get_cached(&self, _a: &Address, _f: F) -> Option + where + F: FnOnce(Option<&mut Account>) -> U, + { + None + } + fn get_cached_code(&self, _hash: &H256) -> Option>> { + None + } + fn note_non_null_account(&self, _address: &Address) {} + fn is_known_null(&self, _address: &Address) -> bool { + false + } } /// Proving state backend. @@ -140,107 +162,128 @@ impl Backend for ProofCheck { /// /// This doesn't cache anything or rely on the canonical state caches. pub struct Proving { - base: H, // state we're proving values from. - changed: MemoryDB, // changed state via insertions. - proof: Mutex>, + base: H, // state we're proving values from. + changed: MemoryDB, // changed state via insertions. + proof: Mutex>, } impl AsKeyedHashDB for Proving { - fn as_keyed_hash_db(&self) -> &journaldb::KeyedHashDB { self } + fn as_keyed_hash_db(&self) -> &journaldb::KeyedHashDB { + self + } } -impl + Send + Sync> AsHashDB for Proving { - fn as_hash_db(&self) -> &HashDB { self } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } +impl + Send + Sync> AsHashDB + for Proving +{ + fn as_hash_db(&self) -> &HashDB { + self + } + fn as_hash_db_mut(&mut self) -> &mut HashDB { + self + } } impl journaldb::KeyedHashDB for Proving { - fn keys(&self) -> HashMap { - let mut keys = self.base.as_keyed_hash_db().keys(); - keys.extend(self.changed.keys()); - keys - } + fn keys(&self) -> HashMap { + let mut keys = self.base.as_keyed_hash_db().keys(); + keys.extend(self.changed.keys()); + keys + } } -impl + Send + Sync> HashDB for Proving { - fn get(&self, key: &H256) -> Option { - match self.base.as_hash_db().get(key) { - Some(val) => { - self.proof.lock().insert(val.clone()); - Some(val) - } - None => self.changed.get(key) - } - } +impl + Send + Sync> HashDB + for Proving +{ + fn get(&self, key: &H256) -> Option { + match self.base.as_hash_db().get(key) { + Some(val) => { + self.proof.lock().insert(val.clone()); + Some(val) + } + None => self.changed.get(key), + } + } - fn contains(&self, key: &H256) -> bool { - self.get(key).is_some() - } + fn contains(&self, key: &H256) -> bool { + self.get(key).is_some() + } - fn insert(&mut self, value: &[u8]) -> H256 { - self.changed.insert(value) - } + fn insert(&mut self, value: &[u8]) -> H256 { + self.changed.insert(value) + } - fn emplace(&mut self, key: H256, value: DBValue) { - self.changed.emplace(key, value) - } + fn emplace(&mut self, key: H256, value: DBValue) { + self.changed.emplace(key, value) + } - fn remove(&mut self, key: &H256) { - // only remove from `changed` - if self.changed.contains(key) { - self.changed.remove(key) - } - } + fn remove(&mut self, key: &H256) { + // only remove from `changed` + if self.changed.contains(key) { + self.changed.remove(key) + } + } } impl + Send + Sync> Backend for Proving { - fn as_hash_db(&self) -> &HashDB { self } + fn as_hash_db(&self) -> &HashDB { + self + } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } + fn as_hash_db_mut(&mut self) -> &mut HashDB { + self + } - fn add_to_account_cache(&mut self, _: Address, _: Option, _: bool) { } + fn add_to_account_cache(&mut self, _: Address, _: Option, _: bool) {} - fn cache_code(&self, _: H256, _: Arc>) { } + fn cache_code(&self, _: H256, _: Arc>) {} - fn get_cached_account(&self, _: &Address) -> Option> { None } + fn get_cached_account(&self, _: &Address) -> Option> { + None + } - fn get_cached(&self, _: &Address, _: F) -> Option - where F: FnOnce(Option<&mut Account>) -> U - { - None - } + fn get_cached(&self, _: &Address, _: F) -> Option + where + F: FnOnce(Option<&mut Account>) -> U, + { + None + } - fn get_cached_code(&self, _: &H256) -> Option>> { None } - fn note_non_null_account(&self, _: &Address) { } - fn is_known_null(&self, _: &Address) -> bool { false } + fn get_cached_code(&self, _: &H256) -> Option>> { + None + } + fn note_non_null_account(&self, _: &Address) {} + fn is_known_null(&self, _: &Address) -> bool { + false + } } impl> Proving { - /// Create a new `Proving` over a base database. - /// This will store all values ever fetched from that base. - pub fn new(base: H) -> Self { - Proving { - base: base, - changed: journaldb::new_memory_db(), - proof: Mutex::new(HashSet::new()), - } - } + /// Create a new `Proving` over a base database. + /// This will store all values ever fetched from that base. + pub fn new(base: H) -> Self { + Proving { + base: base, + changed: journaldb::new_memory_db(), + proof: Mutex::new(HashSet::new()), + } + } - /// Consume the backend, extracting the gathered proof in lexicographical order - /// by value. - pub fn extract_proof(self) -> Vec { - self.proof.into_inner().into_iter().collect() - } + /// Consume the backend, extracting the gathered proof in lexicographical order + /// by value. + pub fn extract_proof(self) -> Vec { + self.proof.into_inner().into_iter().collect() + } } impl + Clone> Clone for Proving { - fn clone(&self) -> Self { - Proving { - base: self.base.clone(), - changed: self.changed.clone(), - proof: Mutex::new(self.proof.lock().clone()), - } - } + fn clone(&self) -> Self { + Proving { + base: self.base.clone(), + changed: self.changed.clone(), + proof: Mutex::new(self.proof.lock().clone()), + } + } } /// A basic backend. Just wraps the given database, directly inserting into and deleting from @@ -248,27 +291,34 @@ impl + Clone> Clone for Proving { pub struct Basic(pub H); impl + Send + Sync> Backend for Basic { - fn as_hash_db(&self) -> &HashDB { - self.0.as_hash_db() - } + fn as_hash_db(&self) -> &HashDB { + self.0.as_hash_db() + } - fn as_hash_db_mut(&mut self) -> &mut HashDB { - self.0.as_hash_db_mut() - } + fn as_hash_db_mut(&mut self) -> &mut HashDB { + self.0.as_hash_db_mut() + } - fn add_to_account_cache(&mut self, _: Address, _: Option, _: bool) { } + fn add_to_account_cache(&mut self, _: Address, _: Option, _: bool) {} - fn cache_code(&self, _: H256, _: Arc>) { } + fn cache_code(&self, _: H256, _: Arc>) {} - fn get_cached_account(&self, _: &Address) -> Option> { None } + fn get_cached_account(&self, _: &Address) -> Option> { + None + } - fn get_cached(&self, _: &Address, _: F) -> Option - where F: FnOnce(Option<&mut Account>) -> U - { - None - } + fn get_cached(&self, _: &Address, _: F) -> Option + where + F: FnOnce(Option<&mut Account>) -> U, + { + None + } - fn get_cached_code(&self, _: &H256) -> Option>> { None } - fn note_non_null_account(&self, _: &Address) { } - fn is_known_null(&self, _: &Address) -> bool { false } + fn get_cached_code(&self, _: &H256) -> Option>> { + None + } + fn note_non_null_account(&self, _: &Address) {} + fn is_known_null(&self, _: &Address) -> bool { + false + } } diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index ddad10c40..283099496 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -19,57 +19,57 @@ //! Unconfirmed sub-states are managed with `checkpoint`s which may be canonicalized //! or rolled back. -use std::cell::{RefCell, RefMut}; -use std::collections::hash_map::Entry; -use std::collections::{HashMap, BTreeMap, BTreeSet, HashSet}; -use std::fmt; -use std::sync::Arc; -use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY}; +use hash::{KECCAK_EMPTY, KECCAK_NULL_RLP}; +use std::{ + cell::{RefCell, RefMut}, + collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, + fmt, + sync::Arc, +}; -use types::receipt::{Receipt, TransactionOutcome}; -use machine::EthereumMachine as Machine; -use vm::EnvInfo; use error::Error; +use executed::{Executed, ExecutionError}; use executive::{Executive, TransactOptions}; -use factory::Factories; -use trace::{self, FlatTrace, VMTrace}; +use factory::{Factories, VmFactory}; +use machine::EthereumMachine as Machine; use pod_account::*; use pod_state::{self, PodState}; -use types::basic_account::BasicAccount; -use executed::{Executed, ExecutionError}; -use types::state_diff::StateDiff; -use types::transaction::SignedTransaction; use state_db::StateDB; -use factory::VmFactory; +use trace::{self, FlatTrace, VMTrace}; +use types::{ + basic_account::BasicAccount, + receipt::{Receipt, TransactionOutcome}, + state_diff::StateDiff, + transaction::SignedTransaction, +}; +use vm::EnvInfo; -use ethereum_types::{H256, U256, Address}; -use hash_db::{HashDB, AsHashDB}; +use bytes::Bytes; +use ethereum_types::{Address, H256, U256}; +use hash_db::{AsHashDB, HashDB}; use keccak_hasher::KeccakHasher; use kvdb::DBValue; -use bytes::Bytes; -use trie::{Trie, TrieError, Recorder}; -use ethtrie::{TrieDB, Result as TrieResult}; +use ethtrie::{Result as TrieResult, TrieDB}; +use trie::{Recorder, Trie, TrieError}; mod account; mod substate; pub mod backend; -pub use self::account::Account; -pub use self::backend::Backend; -pub use self::substate::Substate; +pub use self::{account::Account, backend::Backend, substate::Substate}; /// Used to return information about an `State::apply` operation. pub struct ApplyOutcome { - /// The receipt for the applied transaction. - pub receipt: Receipt, - /// The output of the applied transaction. - pub output: Bytes, - /// The trace for the applied transaction, empty if tracing was not produced. - pub trace: Vec, - /// The VM trace for the applied transaction, None if tracing was not produced. - pub vm_trace: Option + /// The receipt for the applied transaction. + pub receipt: Receipt, + /// The output of the applied transaction. + pub output: Bytes, + /// The trace for the applied transaction, empty if tracing was not produced. + pub trace: Vec, + /// The VM trace for the applied transaction, None if tracing was not produced. + pub vm_trace: Option, } /// Result type for the execution ("application") of a transaction. @@ -78,28 +78,28 @@ pub type ApplyResult = Result, Error>; /// Return type of proof validity check. #[derive(Debug, Clone)] pub enum ProvedExecution { - /// Proof wasn't enough to complete execution. - BadProof, - /// The transaction failed, but not due to a bad proof. - Failed(ExecutionError), - /// The transaction successfully completed with the given proof. - Complete(Box), + /// Proof wasn't enough to complete execution. + BadProof, + /// The transaction failed, but not due to a bad proof. + Failed(ExecutionError), + /// The transaction successfully completed with the given proof. + Complete(Box), } #[derive(Eq, PartialEq, Clone, Copy, Debug)] /// Account modification state. Used to check if the account was /// Modified in between commits and overall. enum AccountState { - /// Account was loaded from disk and never modified in this state object. - CleanFresh, - /// Account was loaded from the global cache and never modified. - CleanCached, - /// Account has been modified and is not committed to the trie yet. - /// This is set if any of the account data is changed, including - /// storage and code. - Dirty, - /// Account was modified and committed to the trie. - Committed, + /// Account was loaded from disk and never modified in this state object. + CleanFresh, + /// Account was loaded from the global cache and never modified. + CleanCached, + /// Account has been modified and is not committed to the trie yet. + /// This is set if any of the account data is changed, including + /// storage and code. + Dirty, + /// Account was modified and committed to the trie. + Committed, } #[derive(Debug)] @@ -108,157 +108,159 @@ enum AccountState { /// Account entry can contain existing (`Some`) or non-existing /// account (`None`) struct AccountEntry { - /// Account entry. `None` if account known to be non-existant. - account: Option, - /// Unmodified account balance. - old_balance: Option, - /// Entry state. - state: AccountState, + /// Account entry. `None` if account known to be non-existant. + account: Option, + /// Unmodified account balance. + old_balance: Option, + /// Entry state. + state: AccountState, } // Account cache item. Contains account data and // modification state impl AccountEntry { - fn is_dirty(&self) -> bool { - self.state == AccountState::Dirty - } + fn is_dirty(&self) -> bool { + self.state == AccountState::Dirty + } - fn exists_and_is_null(&self) -> bool { - self.account.as_ref().map_or(false, |a| a.is_null()) - } + fn exists_and_is_null(&self) -> bool { + self.account.as_ref().map_or(false, |a| a.is_null()) + } - /// Clone dirty data into new `AccountEntry`. This includes - /// basic account data and modified storage keys. - /// Returns None if clean. - fn clone_if_dirty(&self) -> Option { - match self.is_dirty() { - true => Some(self.clone_dirty()), - false => None, - } - } + /// Clone dirty data into new `AccountEntry`. This includes + /// basic account data and modified storage keys. + /// Returns None if clean. + fn clone_if_dirty(&self) -> Option { + match self.is_dirty() { + true => Some(self.clone_dirty()), + false => None, + } + } - /// Clone dirty data into new `AccountEntry`. This includes - /// basic account data and modified storage keys. - fn clone_dirty(&self) -> AccountEntry { - AccountEntry { - old_balance: self.old_balance, - account: self.account.as_ref().map(Account::clone_dirty), - state: self.state, - } - } + /// Clone dirty data into new `AccountEntry`. This includes + /// basic account data and modified storage keys. + fn clone_dirty(&self) -> AccountEntry { + AccountEntry { + old_balance: self.old_balance, + account: self.account.as_ref().map(Account::clone_dirty), + state: self.state, + } + } - // Create a new account entry and mark it as dirty. - fn new_dirty(account: Option) -> AccountEntry { - AccountEntry { - old_balance: account.as_ref().map(|a| a.balance().clone()), - account: account, - state: AccountState::Dirty, - } - } + // Create a new account entry and mark it as dirty. + fn new_dirty(account: Option) -> AccountEntry { + AccountEntry { + old_balance: account.as_ref().map(|a| a.balance().clone()), + account: account, + state: AccountState::Dirty, + } + } - // Create a new account entry and mark it as clean. - fn new_clean(account: Option) -> AccountEntry { - AccountEntry { - old_balance: account.as_ref().map(|a| a.balance().clone()), - account: account, - state: AccountState::CleanFresh, - } - } + // Create a new account entry and mark it as clean. + fn new_clean(account: Option) -> AccountEntry { + AccountEntry { + old_balance: account.as_ref().map(|a| a.balance().clone()), + account: account, + state: AccountState::CleanFresh, + } + } - // Create a new account entry and mark it as clean and cached. - fn new_clean_cached(account: Option) -> AccountEntry { - AccountEntry { - old_balance: account.as_ref().map(|a| a.balance().clone()), - account: account, - state: AccountState::CleanCached, - } - } + // Create a new account entry and mark it as clean and cached. + fn new_clean_cached(account: Option) -> AccountEntry { + AccountEntry { + old_balance: account.as_ref().map(|a| a.balance().clone()), + account: account, + state: AccountState::CleanCached, + } + } - // Replace data with another entry but preserve storage cache. - fn overwrite_with(&mut self, other: AccountEntry) { - self.state = other.state; - match other.account { - Some(acc) => { - if let Some(ref mut ours) = self.account { - ours.overwrite_with(acc); - } else { - self.account = Some(acc); - } - }, - None => self.account = None, - } - } + // Replace data with another entry but preserve storage cache. + fn overwrite_with(&mut self, other: AccountEntry) { + self.state = other.state; + match other.account { + Some(acc) => { + if let Some(ref mut ours) = self.account { + ours.overwrite_with(acc); + } else { + self.account = Some(acc); + } + } + None => self.account = None, + } + } } /// Check the given proof of execution. /// `Err(ExecutionError::Internal)` indicates failure, everything else indicates /// a successful proof (as the transaction itself may be poorly chosen). pub fn check_proof( - proof: &[DBValue], - root: H256, - transaction: &SignedTransaction, - machine: &Machine, - env_info: &EnvInfo, + proof: &[DBValue], + root: H256, + transaction: &SignedTransaction, + machine: &Machine, + env_info: &EnvInfo, ) -> ProvedExecution { - let backend = self::backend::ProofCheck::new(proof); - let mut factories = Factories::default(); - factories.accountdb = ::account_db::Factory::Plain; + let backend = self::backend::ProofCheck::new(proof); + let mut factories = Factories::default(); + factories.accountdb = ::account_db::Factory::Plain; - let res = State::from_existing( - backend, - root, - machine.account_start_nonce(env_info.number), - factories - ); + let res = State::from_existing( + backend, + root, + machine.account_start_nonce(env_info.number), + factories, + ); - let mut state = match res { - Ok(state) => state, - Err(_) => return ProvedExecution::BadProof, - }; + let mut state = match res { + Ok(state) => state, + Err(_) => return ProvedExecution::BadProof, + }; - let options = TransactOptions::with_no_tracing().save_output_from_contract(); - match state.execute(env_info, machine, transaction, options, true) { - Ok(executed) => ProvedExecution::Complete(Box::new(executed)), - Err(ExecutionError::Internal(_)) => ProvedExecution::BadProof, - Err(e) => ProvedExecution::Failed(e), - } + let options = TransactOptions::with_no_tracing().save_output_from_contract(); + match state.execute(env_info, machine, transaction, options, true) { + Ok(executed) => ProvedExecution::Complete(Box::new(executed)), + Err(ExecutionError::Internal(_)) => ProvedExecution::BadProof, + Err(e) => ProvedExecution::Failed(e), + } } /// Prove a `virtual` transaction on the given state. /// Returns `None` when the transacion could not be proved, /// and a proof otherwise. pub fn prove_transaction_virtual + Send + Sync>( - db: H, - root: H256, - transaction: &SignedTransaction, - machine: &Machine, - env_info: &EnvInfo, - factories: Factories, + db: H, + root: H256, + transaction: &SignedTransaction, + machine: &Machine, + env_info: &EnvInfo, + factories: Factories, ) -> Option<(Bytes, Vec)> { - use self::backend::Proving; + use self::backend::Proving; - let backend = Proving::new(db); - let res = State::from_existing( - backend, - root, - machine.account_start_nonce(env_info.number), - factories, - ); + let backend = Proving::new(db); + let res = State::from_existing( + backend, + root, + machine.account_start_nonce(env_info.number), + factories, + ); - let mut state = match res { - Ok(state) => state, - Err(_) => return None, - }; + let mut state = match res { + Ok(state) => state, + Err(_) => return None, + }; - let options = TransactOptions::with_no_tracing().dont_check_nonce().save_output_from_contract(); - match state.execute(env_info, machine, transaction, options, true) { - Err(ExecutionError::Internal(_)) => None, - Err(e) => { - trace!(target: "state", "Proved call failed: {}", e); - Some((Vec::new(), state.drop().1.extract_proof())) - } - Ok(res) => Some((res.output, state.drop().1.extract_proof())), - } + let options = TransactOptions::with_no_tracing() + .dont_check_nonce() + .save_output_from_contract(); + match state.execute(env_info, machine, transaction, options, true) { + Err(ExecutionError::Internal(_)) => None, + Err(e) => { + trace!(target: "state", "Proved call failed: {}", e); + Some((Vec::new(), state.drop().1.extract_proof())) + } + Ok(res) => Some((res.output, state.drop().1.extract_proof())), + } } /// Representation of the entire state of all accounts in the system. @@ -307,634 +309,773 @@ pub fn prove_transaction_virtual + Send + Syn /// backed-up values are moved into a parent checkpoint (if any). /// pub struct State { - db: B, - root: H256, - cache: RefCell>, - // The original account is preserved in - checkpoints: RefCell>>>, - account_start_nonce: U256, - factories: Factories, + db: B, + root: H256, + cache: RefCell>, + // The original account is preserved in + checkpoints: RefCell>>>, + account_start_nonce: U256, + factories: Factories, } #[derive(Copy, Clone)] enum RequireCache { - None, - CodeSize, - Code, + None, + CodeSize, + Code, } /// Mode of dealing with null accounts. #[derive(PartialEq)] pub enum CleanupMode<'a> { - /// Create accounts which would be null. - ForceCreate, - /// Don't delete null accounts upon touching, but also don't create them. - NoEmpty, - /// Mark all touched accounts. - TrackTouched(&'a mut HashSet
), + /// Create accounts which would be null. + ForceCreate, + /// Don't delete null accounts upon touching, but also don't create them. + NoEmpty, + /// Mark all touched accounts. + TrackTouched(&'a mut HashSet
), } /// Provides subset of `State` methods to query state information pub trait StateInfo { - /// Get the nonce of account `a`. - fn nonce(&self, a: &Address) -> TrieResult; + /// Get the nonce of account `a`. + fn nonce(&self, a: &Address) -> TrieResult; - /// Get the balance of account `a`. - fn balance(&self, a: &Address) -> TrieResult; + /// Get the balance of account `a`. + fn balance(&self, a: &Address) -> TrieResult; - /// Mutate storage of account `address` so that it is `value` for `key`. - fn storage_at(&self, address: &Address, key: &H256) -> TrieResult; + /// Mutate storage of account `address` so that it is `value` for `key`. + fn storage_at(&self, address: &Address, key: &H256) -> TrieResult; - /// Get accounts' code. - fn code(&self, a: &Address) -> TrieResult>>; + /// Get accounts' code. + fn code(&self, a: &Address) -> TrieResult>>; } impl StateInfo for State { - fn nonce(&self, a: &Address) -> TrieResult { State::nonce(self, a) } - fn balance(&self, a: &Address) -> TrieResult { State::balance(self, a) } - fn storage_at(&self, address: &Address, key: &H256) -> TrieResult { State::storage_at(self, address, key) } - fn code(&self, address: &Address) -> TrieResult>> { State::code(self, address) } + fn nonce(&self, a: &Address) -> TrieResult { + State::nonce(self, a) + } + fn balance(&self, a: &Address) -> TrieResult { + State::balance(self, a) + } + fn storage_at(&self, address: &Address, key: &H256) -> TrieResult { + State::storage_at(self, address, key) + } + fn code(&self, address: &Address) -> TrieResult>> { + State::code(self, address) + } } const SEC_TRIE_DB_UNWRAP_STR: &'static str = "A state can only be created with valid root. Creating a SecTrieDB with a valid root will not fail. \ Therefore creating a SecTrieDB with this state's root will not fail."; impl State { - /// Creates new state with empty state root - /// Used for tests. - pub fn new(mut db: B, account_start_nonce: U256, factories: Factories) -> State { - let mut root = H256::new(); - { - // init trie and reset root to null - let _ = factories.trie.create(db.as_hash_db_mut(), &mut root); - } + /// Creates new state with empty state root + /// Used for tests. + pub fn new(mut db: B, account_start_nonce: U256, factories: Factories) -> State { + let mut root = H256::new(); + { + // init trie and reset root to null + let _ = factories.trie.create(db.as_hash_db_mut(), &mut root); + } - State { - db: db, - root: root, - cache: RefCell::new(HashMap::new()), - checkpoints: RefCell::new(Vec::new()), - account_start_nonce: account_start_nonce, - factories: factories, - } - } + State { + db: db, + root: root, + cache: RefCell::new(HashMap::new()), + checkpoints: RefCell::new(Vec::new()), + account_start_nonce: account_start_nonce, + factories: factories, + } + } - /// Creates new state with existing state root - pub fn from_existing(db: B, root: H256, account_start_nonce: U256, factories: Factories) -> TrieResult> { - if !db.as_hash_db().contains(&root) { - return Err(Box::new(TrieError::InvalidStateRoot(root))); - } + /// Creates new state with existing state root + pub fn from_existing( + db: B, + root: H256, + account_start_nonce: U256, + factories: Factories, + ) -> TrieResult> { + if !db.as_hash_db().contains(&root) { + return Err(Box::new(TrieError::InvalidStateRoot(root))); + } - let state = State { - db: db, - root: root, - cache: RefCell::new(HashMap::new()), - checkpoints: RefCell::new(Vec::new()), - account_start_nonce: account_start_nonce, - factories: factories - }; + let state = State { + db: db, + root: root, + cache: RefCell::new(HashMap::new()), + checkpoints: RefCell::new(Vec::new()), + account_start_nonce: account_start_nonce, + factories: factories, + }; - Ok(state) - } + Ok(state) + } - /// Get a VM factory that can execute on this state. - pub fn vm_factory(&self) -> VmFactory { - self.factories.vm.clone() - } + /// Get a VM factory that can execute on this state. + pub fn vm_factory(&self) -> VmFactory { + self.factories.vm.clone() + } - /// Create a recoverable checkpoint of this state. Return the checkpoint index. - pub fn checkpoint(&mut self) -> usize { - let checkpoints = self.checkpoints.get_mut(); - let index = checkpoints.len(); - checkpoints.push(HashMap::new()); - index - } + /// Create a recoverable checkpoint of this state. Return the checkpoint index. + pub fn checkpoint(&mut self) -> usize { + let checkpoints = self.checkpoints.get_mut(); + let index = checkpoints.len(); + checkpoints.push(HashMap::new()); + index + } - /// Merge last checkpoint with previous. - pub fn discard_checkpoint(&mut self) { - // merge with previous checkpoint - let last = self.checkpoints.get_mut().pop(); - if let Some(mut checkpoint) = last { - if let Some(ref mut prev) = self.checkpoints.get_mut().last_mut() { - if prev.is_empty() { - **prev = checkpoint; - } else { - for (k, v) in checkpoint.drain() { - prev.entry(k).or_insert(v); - } - } - } - } - } + /// Merge last checkpoint with previous. + pub fn discard_checkpoint(&mut self) { + // merge with previous checkpoint + let last = self.checkpoints.get_mut().pop(); + if let Some(mut checkpoint) = last { + if let Some(ref mut prev) = self.checkpoints.get_mut().last_mut() { + if prev.is_empty() { + **prev = checkpoint; + } else { + for (k, v) in checkpoint.drain() { + prev.entry(k).or_insert(v); + } + } + } + } + } - /// Revert to the last checkpoint and discard it. - pub fn revert_to_checkpoint(&mut self) { - if let Some(mut checkpoint) = self.checkpoints.get_mut().pop() { - for (k, v) in checkpoint.drain() { - match v { - Some(v) => { - match self.cache.get_mut().entry(k) { - Entry::Occupied(mut e) => { - // Merge checkpointed changes back into the main account - // storage preserving the cache. - e.get_mut().overwrite_with(v); - }, - Entry::Vacant(e) => { - e.insert(v); - } - } - }, - None => { - if let Entry::Occupied(e) = self.cache.get_mut().entry(k) { - if e.get().is_dirty() { - e.remove(); - } - } - } - } - } - } - } + /// Revert to the last checkpoint and discard it. + pub fn revert_to_checkpoint(&mut self) { + if let Some(mut checkpoint) = self.checkpoints.get_mut().pop() { + for (k, v) in checkpoint.drain() { + match v { + Some(v) => { + match self.cache.get_mut().entry(k) { + Entry::Occupied(mut e) => { + // Merge checkpointed changes back into the main account + // storage preserving the cache. + e.get_mut().overwrite_with(v); + } + Entry::Vacant(e) => { + e.insert(v); + } + } + } + None => { + if let Entry::Occupied(e) = self.cache.get_mut().entry(k) { + if e.get().is_dirty() { + e.remove(); + } + } + } + } + } + } + } - fn insert_cache(&self, address: &Address, account: AccountEntry) { - // Dirty account which is not in the cache means this is a new account. - // It goes directly into the checkpoint as there's nothing to rever to. - // - // In all other cases account is read as clean first, and after that made - // dirty in and added to the checkpoint with `note_cache`. - let is_dirty = account.is_dirty(); - let old_value = self.cache.borrow_mut().insert(*address, account); - if is_dirty { - if let Some(ref mut checkpoint) = self.checkpoints.borrow_mut().last_mut() { - checkpoint.entry(*address).or_insert(old_value); - } - } - } + fn insert_cache(&self, address: &Address, account: AccountEntry) { + // Dirty account which is not in the cache means this is a new account. + // It goes directly into the checkpoint as there's nothing to rever to. + // + // In all other cases account is read as clean first, and after that made + // dirty in and added to the checkpoint with `note_cache`. + let is_dirty = account.is_dirty(); + let old_value = self.cache.borrow_mut().insert(*address, account); + if is_dirty { + if let Some(ref mut checkpoint) = self.checkpoints.borrow_mut().last_mut() { + checkpoint.entry(*address).or_insert(old_value); + } + } + } - fn note_cache(&self, address: &Address) { - if let Some(ref mut checkpoint) = self.checkpoints.borrow_mut().last_mut() { - checkpoint.entry(*address) - .or_insert_with(|| self.cache.borrow().get(address).map(AccountEntry::clone_dirty)); - } - } + fn note_cache(&self, address: &Address) { + if let Some(ref mut checkpoint) = self.checkpoints.borrow_mut().last_mut() { + checkpoint.entry(*address).or_insert_with(|| { + self.cache + .borrow() + .get(address) + .map(AccountEntry::clone_dirty) + }); + } + } - /// Destroy the current object and return root and database. - pub fn drop(mut self) -> (H256, B) { - self.propagate_to_global_cache(); - (self.root, self.db) - } + /// Destroy the current object and return root and database. + pub fn drop(mut self) -> (H256, B) { + self.propagate_to_global_cache(); + (self.root, self.db) + } - /// Destroy the current object and return single account data. - pub fn into_account(self, account: &Address) -> TrieResult<(Option>, HashMap)> { - // TODO: deconstruct without cloning. - let account = self.require(account, true)?; - Ok((account.code().clone(), account.storage_changes().clone())) - } + /// Destroy the current object and return single account data. + pub fn into_account( + self, + account: &Address, + ) -> TrieResult<(Option>, HashMap)> { + // TODO: deconstruct without cloning. + let account = self.require(account, true)?; + Ok((account.code().clone(), account.storage_changes().clone())) + } - /// Return reference to root - pub fn root(&self) -> &H256 { - &self.root - } + /// Return reference to root + pub fn root(&self) -> &H256 { + &self.root + } - /// Create a new contract at address `contract`. If there is already an account at the address - /// it will have its code reset, ready for `init_code()`. - pub fn new_contract(&mut self, contract: &Address, balance: U256, nonce_offset: U256) -> TrieResult<()> { - let original_storage_root = self.original_storage_root(contract)?; - let (nonce, overflow) = self.account_start_nonce.overflowing_add(nonce_offset); - if overflow { - return Err(Box::new(TrieError::DecoderError(H256::from(contract), - rlp::DecoderError::Custom("Nonce overflow".into())))); - } - self.insert_cache(contract, AccountEntry::new_dirty(Some(Account::new_contract(balance, nonce, original_storage_root)))); - Ok(()) - } + /// Create a new contract at address `contract`. If there is already an account at the address + /// it will have its code reset, ready for `init_code()`. + pub fn new_contract( + &mut self, + contract: &Address, + balance: U256, + nonce_offset: U256, + ) -> TrieResult<()> { + let original_storage_root = self.original_storage_root(contract)?; + let (nonce, overflow) = self.account_start_nonce.overflowing_add(nonce_offset); + if overflow { + return Err(Box::new(TrieError::DecoderError( + H256::from(contract), + rlp::DecoderError::Custom("Nonce overflow".into()), + ))); + } + self.insert_cache( + contract, + AccountEntry::new_dirty(Some(Account::new_contract( + balance, + nonce, + original_storage_root, + ))), + ); + Ok(()) + } - /// Remove an existing account. - pub fn kill_account(&mut self, account: &Address) { - self.insert_cache(account, AccountEntry::new_dirty(None)); - } + /// Remove an existing account. + pub fn kill_account(&mut self, account: &Address) { + self.insert_cache(account, AccountEntry::new_dirty(None)); + } - /// Determine whether an account exists. - pub fn exists(&self, a: &Address) -> TrieResult { - // Bloom filter does not contain empty accounts, so it is important here to - // check if account exists in the database directly before EIP-161 is in effect. - self.ensure_cached(a, RequireCache::None, false, |a| a.is_some()) - } + /// Determine whether an account exists. + pub fn exists(&self, a: &Address) -> TrieResult { + // Bloom filter does not contain empty accounts, so it is important here to + // check if account exists in the database directly before EIP-161 is in effect. + self.ensure_cached(a, RequireCache::None, false, |a| a.is_some()) + } - /// Determine whether an account exists and if not empty. - pub fn exists_and_not_null(&self, a: &Address) -> TrieResult { - self.ensure_cached(a, RequireCache::None, false, |a| a.map_or(false, |a| !a.is_null())) - } + /// Determine whether an account exists and if not empty. + pub fn exists_and_not_null(&self, a: &Address) -> TrieResult { + self.ensure_cached(a, RequireCache::None, false, |a| { + a.map_or(false, |a| !a.is_null()) + }) + } - /// Determine whether an account exists and has code or non-zero nonce. - pub fn exists_and_has_code_or_nonce(&self, a: &Address) -> TrieResult { - self.ensure_cached(a, RequireCache::CodeSize, false, - |a| a.map_or(false, |a| a.code_hash() != KECCAK_EMPTY || *a.nonce() != self.account_start_nonce)) - } + /// Determine whether an account exists and has code or non-zero nonce. + pub fn exists_and_has_code_or_nonce(&self, a: &Address) -> TrieResult { + self.ensure_cached(a, RequireCache::CodeSize, false, |a| { + a.map_or(false, |a| { + a.code_hash() != KECCAK_EMPTY || *a.nonce() != self.account_start_nonce + }) + }) + } - /// Get the balance of account `a`. - pub fn balance(&self, a: &Address) -> TrieResult { - self.ensure_cached(a, RequireCache::None, true, - |a| a.as_ref().map_or(U256::zero(), |account| *account.balance())) - } + /// Get the balance of account `a`. + pub fn balance(&self, a: &Address) -> TrieResult { + self.ensure_cached(a, RequireCache::None, true, |a| { + a.as_ref() + .map_or(U256::zero(), |account| *account.balance()) + }) + } - /// Get the nonce of account `a`. - pub fn nonce(&self, a: &Address) -> TrieResult { - self.ensure_cached(a, RequireCache::None, true, - |a| a.as_ref().map_or(self.account_start_nonce, |account| *account.nonce())) - } + /// Get the nonce of account `a`. + pub fn nonce(&self, a: &Address) -> TrieResult { + self.ensure_cached(a, RequireCache::None, true, |a| { + a.as_ref() + .map_or(self.account_start_nonce, |account| *account.nonce()) + }) + } - /// Whether the base storage root of an account remains unchanged. - pub fn is_base_storage_root_unchanged(&self, a: &Address) -> TrieResult { - Ok(self.ensure_cached(a, RequireCache::None, true, - |a| a.as_ref().map(|account| account.is_base_storage_root_unchanged()))? - .unwrap_or(true)) - } + /// Whether the base storage root of an account remains unchanged. + pub fn is_base_storage_root_unchanged(&self, a: &Address) -> TrieResult { + Ok(self + .ensure_cached(a, RequireCache::None, true, |a| { + a.as_ref() + .map(|account| account.is_base_storage_root_unchanged()) + })? + .unwrap_or(true)) + } - /// Get the storage root of account `a`. - pub fn storage_root(&self, a: &Address) -> TrieResult> { - self.ensure_cached(a, RequireCache::None, true, - |a| a.as_ref().and_then(|account| account.storage_root())) - } + /// Get the storage root of account `a`. + pub fn storage_root(&self, a: &Address) -> TrieResult> { + self.ensure_cached(a, RequireCache::None, true, |a| { + a.as_ref().and_then(|account| account.storage_root()) + }) + } - /// Get the original storage root since last commit of account `a`. - pub fn original_storage_root(&self, a: &Address) -> TrieResult { - Ok(self.ensure_cached(a, RequireCache::None, true, - |a| a.as_ref().map(|account| account.original_storage_root()))? - .unwrap_or(KECCAK_NULL_RLP)) - } + /// Get the original storage root since last commit of account `a`. + pub fn original_storage_root(&self, a: &Address) -> TrieResult { + Ok(self + .ensure_cached(a, RequireCache::None, true, |a| { + a.as_ref().map(|account| account.original_storage_root()) + })? + .unwrap_or(KECCAK_NULL_RLP)) + } - /// Get the value of storage at a specific checkpoint. - pub fn checkpoint_storage_at(&self, start_checkpoint_index: usize, address: &Address, key: &H256) -> TrieResult> { - #[must_use] - enum ReturnKind { - /// Use original storage at value at this address. - OriginalAt, - /// The checkpoint storage value is the same as the checkpoint storage value at the next checkpoint. - SameAsNext, - } + /// Get the value of storage at a specific checkpoint. + pub fn checkpoint_storage_at( + &self, + start_checkpoint_index: usize, + address: &Address, + key: &H256, + ) -> TrieResult> { + #[must_use] + enum ReturnKind { + /// Use original storage at value at this address. + OriginalAt, + /// The checkpoint storage value is the same as the checkpoint storage value at the next checkpoint. + SameAsNext, + } - let kind = { - let checkpoints = self.checkpoints.borrow(); + let kind = { + let checkpoints = self.checkpoints.borrow(); - if start_checkpoint_index >= checkpoints.len() { - // The checkpoint was not found. Return None. - return Ok(None); - } + if start_checkpoint_index >= checkpoints.len() { + // The checkpoint was not found. Return None. + return Ok(None); + } - let mut kind = None; + let mut kind = None; - for checkpoint in checkpoints.iter().skip(start_checkpoint_index) { - match checkpoint.get(address) { - // The account exists at this checkpoint. - Some(Some(AccountEntry { account: Some(ref account), .. })) => { - if let Some(value) = account.cached_storage_at(key) { - return Ok(Some(value)); - } else { - // This account has checkpoint entry, but the key is not in the entry's cache. We can use - // original_storage_at if current account's original storage root is the same as checkpoint - // account's original storage root. Otherwise, the account must be a newly created contract. - if account.base_storage_root() == self.original_storage_root(address)? { - kind = Some(ReturnKind::OriginalAt); - break - } else { - // If account base storage root is different from the original storage root since last - // commit, then it can only be created from a new contract, where the base storage root - // would always be empty. Note that this branch is actually never called, because - // `cached_storage_at` handled this case. - warn!(target: "state", "Trying to get an account's cached storage value, but base storage root does not equal to original storage root! Assuming the value is empty."); - return Ok(Some(H256::new())); - } - } - }, - // The account didn't exist at that point. Return empty value. - Some(Some(AccountEntry { account: None, .. })) => return Ok(Some(H256::new())), - // The value was not cached at that checkpoint, meaning it was not modified at all. - Some(None) => { - kind = Some(ReturnKind::OriginalAt); - break - }, - // This key does not have a checkpoint entry. - None => { - kind = Some(ReturnKind::SameAsNext); - }, - } - } + for checkpoint in checkpoints.iter().skip(start_checkpoint_index) { + match checkpoint.get(address) { + // The account exists at this checkpoint. + Some(Some(AccountEntry { + account: Some(ref account), + .. + })) => { + if let Some(value) = account.cached_storage_at(key) { + return Ok(Some(value)); + } else { + // This account has checkpoint entry, but the key is not in the entry's cache. We can use + // original_storage_at if current account's original storage root is the same as checkpoint + // account's original storage root. Otherwise, the account must be a newly created contract. + if account.base_storage_root() == self.original_storage_root(address)? { + kind = Some(ReturnKind::OriginalAt); + break; + } else { + // If account base storage root is different from the original storage root since last + // commit, then it can only be created from a new contract, where the base storage root + // would always be empty. Note that this branch is actually never called, because + // `cached_storage_at` handled this case. + warn!(target: "state", "Trying to get an account's cached storage value, but base storage root does not equal to original storage root! Assuming the value is empty."); + return Ok(Some(H256::new())); + } + } + } + // The account didn't exist at that point. Return empty value. + Some(Some(AccountEntry { account: None, .. })) => return Ok(Some(H256::new())), + // The value was not cached at that checkpoint, meaning it was not modified at all. + Some(None) => { + kind = Some(ReturnKind::OriginalAt); + break; + } + // This key does not have a checkpoint entry. + None => { + kind = Some(ReturnKind::SameAsNext); + } + } + } - kind.expect("start_checkpoint_index is checked to be below checkpoints_len; for loop above must have been executed at least once; it will either early return, or set the kind value to Some; qed") - }; + kind.expect("start_checkpoint_index is checked to be below checkpoints_len; for loop above must have been executed at least once; it will either early return, or set the kind value to Some; qed") + }; - match kind { - ReturnKind::SameAsNext => { - // If we reached here, all previous SameAsNext failed to early return. It means that the value we want - // to fetch is the same as current. - Ok(Some(self.storage_at(address, key)?)) - }, - ReturnKind::OriginalAt => Ok(Some(self.original_storage_at(address, key)?)), - } - } + match kind { + ReturnKind::SameAsNext => { + // If we reached here, all previous SameAsNext failed to early return. It means that the value we want + // to fetch is the same as current. + Ok(Some(self.storage_at(address, key)?)) + } + ReturnKind::OriginalAt => Ok(Some(self.original_storage_at(address, key)?)), + } + } - fn storage_at_inner( - &self, address: &Address, key: &H256, f_cached_at: FCachedStorageAt, f_at: FStorageAt, - ) -> TrieResult where - FCachedStorageAt: Fn(&Account, &H256) -> Option, - FStorageAt: Fn(&Account, &HashDB, &H256) -> TrieResult - { - // Storage key search and update works like this: - // 1. If there's an entry for the account in the local cache check for the key and return it if found. - // 2. If there's an entry for the account in the global cache check for the key or load it into that account. - // 3. If account is missing in the global cache load it into the local cache and cache the key there. + fn storage_at_inner( + &self, + address: &Address, + key: &H256, + f_cached_at: FCachedStorageAt, + f_at: FStorageAt, + ) -> TrieResult + where + FCachedStorageAt: Fn(&Account, &H256) -> Option, + FStorageAt: Fn(&Account, &HashDB, &H256) -> TrieResult, + { + // Storage key search and update works like this: + // 1. If there's an entry for the account in the local cache check for the key and return it if found. + // 2. If there's an entry for the account in the global cache check for the key or load it into that account. + // 3. If account is missing in the global cache load it into the local cache and cache the key there. - { - // check local cache first without updating - let local_cache = self.cache.borrow_mut(); - let mut local_account = None; - if let Some(maybe_acc) = local_cache.get(address) { - match maybe_acc.account { - Some(ref account) => { - if let Some(value) = f_cached_at(account, key) { - return Ok(value); - } else { - local_account = Some(maybe_acc); - } - }, - _ => return Ok(H256::new()), - } - } - // check the global cache and and cache storage key there if found, - let trie_res = self.db.get_cached(address, |acc| match acc { - None => Ok(H256::new()), - Some(a) => { - let account_db = self.factories.accountdb.readonly(self.db.as_hash_db(), a.address_hash(address)); - f_at(a, account_db.as_hash_db(), key) - } - }); + { + // check local cache first without updating + let local_cache = self.cache.borrow_mut(); + let mut local_account = None; + if let Some(maybe_acc) = local_cache.get(address) { + match maybe_acc.account { + Some(ref account) => { + if let Some(value) = f_cached_at(account, key) { + return Ok(value); + } else { + local_account = Some(maybe_acc); + } + } + _ => return Ok(H256::new()), + } + } + // check the global cache and and cache storage key there if found, + let trie_res = self.db.get_cached(address, |acc| match acc { + None => Ok(H256::new()), + Some(a) => { + let account_db = self + .factories + .accountdb + .readonly(self.db.as_hash_db(), a.address_hash(address)); + f_at(a, account_db.as_hash_db(), key) + } + }); - if let Some(res) = trie_res { - return res; - } + if let Some(res) = trie_res { + return res; + } - // otherwise cache the account localy and cache storage key there. - if let Some(ref mut acc) = local_account { - if let Some(ref account) = acc.account { - let account_db = self.factories.accountdb.readonly(self.db.as_hash_db(), account.address_hash(address)); - return f_at(account, account_db.as_hash_db(), key) - } else { - return Ok(H256::new()) - } - } - } + // otherwise cache the account localy and cache storage key there. + if let Some(ref mut acc) = local_account { + if let Some(ref account) = acc.account { + let account_db = self + .factories + .accountdb + .readonly(self.db.as_hash_db(), account.address_hash(address)); + return f_at(account, account_db.as_hash_db(), key); + } else { + return Ok(H256::new()); + } + } + } - // check if the account could exist before any requests to trie - if self.db.is_known_null(address) { return Ok(H256::zero()) } + // check if the account could exist before any requests to trie + if self.db.is_known_null(address) { + return Ok(H256::zero()); + } - // account is not found in the global cache, get from the DB and insert into local - let db = &self.db.as_hash_db(); - let db = self.factories.trie.readonly(db, &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); - let from_rlp = |b: &[u8]| Account::from_rlp(b).expect("decoding db value failed"); - let maybe_acc = db.get_with(address, from_rlp)?; - let r = maybe_acc.as_ref().map_or(Ok(H256::new()), |a| { - let account_db = self.factories.accountdb.readonly(self.db.as_hash_db(), a.address_hash(address)); - f_at(a, account_db.as_hash_db(), key) - }); - self.insert_cache(address, AccountEntry::new_clean(maybe_acc)); - r - } + // account is not found in the global cache, get from the DB and insert into local + let db = &self.db.as_hash_db(); + let db = self + .factories + .trie + .readonly(db, &self.root) + .expect(SEC_TRIE_DB_UNWRAP_STR); + let from_rlp = |b: &[u8]| Account::from_rlp(b).expect("decoding db value failed"); + let maybe_acc = db.get_with(address, from_rlp)?; + let r = maybe_acc.as_ref().map_or(Ok(H256::new()), |a| { + let account_db = self + .factories + .accountdb + .readonly(self.db.as_hash_db(), a.address_hash(address)); + f_at(a, account_db.as_hash_db(), key) + }); + self.insert_cache(address, AccountEntry::new_clean(maybe_acc)); + r + } - /// Mutate storage of account `address` so that it is `value` for `key`. - pub fn storage_at(&self, address: &Address, key: &H256) -> TrieResult { - self.storage_at_inner( - address, - key, - |account, key| { account.cached_storage_at(key) }, - |account, db, key| { account.storage_at(db, key) }, - ) - } + /// Mutate storage of account `address` so that it is `value` for `key`. + pub fn storage_at(&self, address: &Address, key: &H256) -> TrieResult { + self.storage_at_inner( + address, + key, + |account, key| account.cached_storage_at(key), + |account, db, key| account.storage_at(db, key), + ) + } - /// Get the value of storage after last state commitment. - pub fn original_storage_at(&self, address: &Address, key: &H256) -> TrieResult { - self.storage_at_inner( - address, - key, - |account, key| { account.cached_original_storage_at(key) }, - |account, db, key| { account.original_storage_at(db, key) }, - ) - } + /// Get the value of storage after last state commitment. + pub fn original_storage_at(&self, address: &Address, key: &H256) -> TrieResult { + self.storage_at_inner( + address, + key, + |account, key| account.cached_original_storage_at(key), + |account, db, key| account.original_storage_at(db, key), + ) + } - /// Get accounts' code. - pub fn code(&self, a: &Address) -> TrieResult>> { - self.ensure_cached(a, RequireCache::Code, true, - |a| a.as_ref().map_or(None, |a| a.code().clone())) - } + /// Get accounts' code. + pub fn code(&self, a: &Address) -> TrieResult>> { + self.ensure_cached(a, RequireCache::Code, true, |a| { + a.as_ref().map_or(None, |a| a.code().clone()) + }) + } - /// Get an account's code hash. - pub fn code_hash(&self, a: &Address) -> TrieResult> { - self.ensure_cached(a, RequireCache::None, true, - |a| a.as_ref().map(|a| a.code_hash())) - } + /// Get an account's code hash. + pub fn code_hash(&self, a: &Address) -> TrieResult> { + self.ensure_cached(a, RequireCache::None, true, |a| { + a.as_ref().map(|a| a.code_hash()) + }) + } - /// Get accounts' code size. - pub fn code_size(&self, a: &Address) -> TrieResult> { - self.ensure_cached(a, RequireCache::CodeSize, true, - |a| a.as_ref().and_then(|a| a.code_size())) - } + /// Get accounts' code size. + pub fn code_size(&self, a: &Address) -> TrieResult> { + self.ensure_cached(a, RequireCache::CodeSize, true, |a| { + a.as_ref().and_then(|a| a.code_size()) + }) + } - /// Add `incr` to the balance of account `a`. - pub fn add_balance(&mut self, a: &Address, incr: &U256, cleanup_mode: CleanupMode) -> TrieResult<()> { - trace!(target: "state", "add_balance({}, {}): {}", a, incr, self.balance(a)?); - let is_value_transfer = !incr.is_zero(); - if is_value_transfer || (cleanup_mode == CleanupMode::ForceCreate && !self.exists(a)?) { - self.require(a, false)?.add_balance(incr); - } else if let CleanupMode::TrackTouched(set) = cleanup_mode { - if self.exists(a)? { - set.insert(*a); - self.touch(a)?; - } - } - Ok(()) - } + /// Add `incr` to the balance of account `a`. + pub fn add_balance( + &mut self, + a: &Address, + incr: &U256, + cleanup_mode: CleanupMode, + ) -> TrieResult<()> { + trace!(target: "state", "add_balance({}, {}): {}", a, incr, self.balance(a)?); + let is_value_transfer = !incr.is_zero(); + if is_value_transfer || (cleanup_mode == CleanupMode::ForceCreate && !self.exists(a)?) { + self.require(a, false)?.add_balance(incr); + } else if let CleanupMode::TrackTouched(set) = cleanup_mode { + if self.exists(a)? { + set.insert(*a); + self.touch(a)?; + } + } + Ok(()) + } - /// Subtract `decr` from the balance of account `a`. - pub fn sub_balance(&mut self, a: &Address, decr: &U256, cleanup_mode: &mut CleanupMode) -> TrieResult<()> { - trace!(target: "state", "sub_balance({}, {}): {}", a, decr, self.balance(a)?); - if !decr.is_zero() || !self.exists(a)? { - self.require(a, false)?.sub_balance(decr); - } - if let CleanupMode::TrackTouched(ref mut set) = *cleanup_mode { - set.insert(*a); - } - Ok(()) - } + /// Subtract `decr` from the balance of account `a`. + pub fn sub_balance( + &mut self, + a: &Address, + decr: &U256, + cleanup_mode: &mut CleanupMode, + ) -> TrieResult<()> { + trace!(target: "state", "sub_balance({}, {}): {}", a, decr, self.balance(a)?); + if !decr.is_zero() || !self.exists(a)? { + self.require(a, false)?.sub_balance(decr); + } + if let CleanupMode::TrackTouched(ref mut set) = *cleanup_mode { + set.insert(*a); + } + Ok(()) + } - /// Subtracts `by` from the balance of `from` and adds it to that of `to`. - pub fn transfer_balance(&mut self, from: &Address, to: &Address, by: &U256, mut cleanup_mode: CleanupMode) -> TrieResult<()> { - self.sub_balance(from, by, &mut cleanup_mode)?; - self.add_balance(to, by, cleanup_mode)?; - Ok(()) - } + /// Subtracts `by` from the balance of `from` and adds it to that of `to`. + pub fn transfer_balance( + &mut self, + from: &Address, + to: &Address, + by: &U256, + mut cleanup_mode: CleanupMode, + ) -> TrieResult<()> { + self.sub_balance(from, by, &mut cleanup_mode)?; + self.add_balance(to, by, cleanup_mode)?; + Ok(()) + } - /// Increment the nonce of account `a` by 1. - pub fn inc_nonce(&mut self, a: &Address) -> TrieResult<()> { - self.require(a, false).map(|mut x| x.inc_nonce()) - } + /// Increment the nonce of account `a` by 1. + pub fn inc_nonce(&mut self, a: &Address) -> TrieResult<()> { + self.require(a, false).map(|mut x| x.inc_nonce()) + } - /// Mutate storage of account `a` so that it is `value` for `key`. - pub fn set_storage(&mut self, a: &Address, key: H256, value: H256) -> TrieResult<()> { - trace!(target: "state", "set_storage({}:{:x} to {:x})", a, key, value); - if self.storage_at(a, &key)? != value { - self.require(a, false)?.set_storage(key, value) - } + /// Mutate storage of account `a` so that it is `value` for `key`. + pub fn set_storage(&mut self, a: &Address, key: H256, value: H256) -> TrieResult<()> { + trace!(target: "state", "set_storage({}:{:x} to {:x})", a, key, value); + if self.storage_at(a, &key)? != value { + self.require(a, false)?.set_storage(key, value) + } - Ok(()) - } + Ok(()) + } - /// Initialise the code of account `a` so that it is `code`. - /// NOTE: Account should have been created with `new_contract`. - pub fn init_code(&mut self, a: &Address, code: Bytes) -> TrieResult<()> { - self.require_or_from(a, true, || Account::new_contract(0.into(), self.account_start_nonce, KECCAK_NULL_RLP), |_| {})?.init_code(code); - Ok(()) - } + /// Initialise the code of account `a` so that it is `code`. + /// NOTE: Account should have been created with `new_contract`. + pub fn init_code(&mut self, a: &Address, code: Bytes) -> TrieResult<()> { + self.require_or_from( + a, + true, + || Account::new_contract(0.into(), self.account_start_nonce, KECCAK_NULL_RLP), + |_| {}, + )? + .init_code(code); + Ok(()) + } - /// Reset the code of account `a` so that it is `code`. - pub fn reset_code(&mut self, a: &Address, code: Bytes) -> TrieResult<()> { - self.require_or_from(a, true, || Account::new_contract(0.into(), self.account_start_nonce, KECCAK_NULL_RLP), |_| {})?.reset_code(code); - Ok(()) - } + /// Reset the code of account `a` so that it is `code`. + pub fn reset_code(&mut self, a: &Address, code: Bytes) -> TrieResult<()> { + self.require_or_from( + a, + true, + || Account::new_contract(0.into(), self.account_start_nonce, KECCAK_NULL_RLP), + |_| {}, + )? + .reset_code(code); + Ok(()) + } - /// Execute a given transaction, producing a receipt and an optional trace. - /// This will change the state accordingly. - pub fn apply(&mut self, env_info: &EnvInfo, machine: &Machine, t: &SignedTransaction, tracing: bool) -> ApplyResult { - if tracing { - let options = TransactOptions::with_tracing(); - self.apply_with_tracing(env_info, machine, t, options.tracer, options.vm_tracer) - } else { - let options = TransactOptions::with_no_tracing(); - self.apply_with_tracing(env_info, machine, t, options.tracer, options.vm_tracer) - } - } + /// Execute a given transaction, producing a receipt and an optional trace. + /// This will change the state accordingly. + pub fn apply( + &mut self, + env_info: &EnvInfo, + machine: &Machine, + t: &SignedTransaction, + tracing: bool, + ) -> ApplyResult { + if tracing { + let options = TransactOptions::with_tracing(); + self.apply_with_tracing(env_info, machine, t, options.tracer, options.vm_tracer) + } else { + let options = TransactOptions::with_no_tracing(); + self.apply_with_tracing(env_info, machine, t, options.tracer, options.vm_tracer) + } + } - /// Execute a given transaction with given tracer and VM tracer producing a receipt and an optional trace. - /// This will change the state accordingly. - pub fn apply_with_tracing( - &mut self, - env_info: &EnvInfo, - machine: &Machine, - t: &SignedTransaction, - tracer: T, - vm_tracer: V, - ) -> ApplyResult where - T: trace::Tracer, - V: trace::VMTracer, - { - let options = TransactOptions::new(tracer, vm_tracer); - let e = self.execute(env_info, machine, t, options, false)?; - let params = machine.params(); + /// Execute a given transaction with given tracer and VM tracer producing a receipt and an optional trace. + /// This will change the state accordingly. + pub fn apply_with_tracing( + &mut self, + env_info: &EnvInfo, + machine: &Machine, + t: &SignedTransaction, + tracer: T, + vm_tracer: V, + ) -> ApplyResult + where + T: trace::Tracer, + V: trace::VMTracer, + { + let options = TransactOptions::new(tracer, vm_tracer); + let e = self.execute(env_info, machine, t, options, false)?; + let params = machine.params(); - let eip658 = env_info.number >= params.eip658_transition; - let no_intermediate_commits = - eip658 || - (env_info.number >= params.eip98_transition && env_info.number >= params.validate_receipts_transition); + let eip658 = env_info.number >= params.eip658_transition; + let no_intermediate_commits = eip658 + || (env_info.number >= params.eip98_transition + && env_info.number >= params.validate_receipts_transition); - let outcome = if no_intermediate_commits { - if eip658 { - TransactionOutcome::StatusCode(if e.exception.is_some() { 0 } else { 1 }) - } else { - TransactionOutcome::Unknown - } - } else { - self.commit()?; - TransactionOutcome::StateRoot(self.root().clone()) - }; + let outcome = if no_intermediate_commits { + if eip658 { + TransactionOutcome::StatusCode(if e.exception.is_some() { 0 } else { 1 }) + } else { + TransactionOutcome::Unknown + } + } else { + self.commit()?; + TransactionOutcome::StateRoot(self.root().clone()) + }; - let output = e.output; - let receipt = Receipt::new(outcome, e.cumulative_gas_used, e.logs); - trace!(target: "state", "Transaction receipt: {:?}", receipt); + let output = e.output; + let receipt = Receipt::new(outcome, e.cumulative_gas_used, e.logs); + trace!(target: "state", "Transaction receipt: {:?}", receipt); - Ok(ApplyOutcome { - receipt, - output, - trace: e.trace, - vm_trace: e.vm_trace, - }) - } + Ok(ApplyOutcome { + receipt, + output, + trace: e.trace, + vm_trace: e.vm_trace, + }) + } - // Execute a given transaction without committing changes. - // - // `virt` signals that we are executing outside of a block set and restrictions like - // gas limits and gas costs should be lifted. - fn execute(&mut self, env_info: &EnvInfo, machine: &Machine, t: &SignedTransaction, options: TransactOptions, virt: bool) - -> Result, ExecutionError> where T: trace::Tracer, V: trace::VMTracer, - { - let schedule = machine.schedule(env_info.number); - let mut e = Executive::new(self, env_info, machine, &schedule); + // Execute a given transaction without committing changes. + // + // `virt` signals that we are executing outside of a block set and restrictions like + // gas limits and gas costs should be lifted. + fn execute( + &mut self, + env_info: &EnvInfo, + machine: &Machine, + t: &SignedTransaction, + options: TransactOptions, + virt: bool, + ) -> Result, ExecutionError> + where + T: trace::Tracer, + V: trace::VMTracer, + { + let schedule = machine.schedule(env_info.number); + let mut e = Executive::new(self, env_info, machine, &schedule); - match virt { - true => e.transact_virtual(t, options), - false => e.transact(t, options), - } - } + match virt { + true => e.transact_virtual(t, options), + false => e.transact(t, options), + } + } - fn touch(&mut self, a: &Address) -> TrieResult<()> { - self.require(a, false)?; - Ok(()) - } + fn touch(&mut self, a: &Address) -> TrieResult<()> { + self.require(a, false)?; + Ok(()) + } - /// Commits our cached account changes into the trie. - pub fn commit(&mut self) -> Result<(), Error> { - assert!(self.checkpoints.borrow().is_empty()); - // first, commit the sub trees. - let mut accounts = self.cache.borrow_mut(); - for (address, ref mut a) in accounts.iter_mut().filter(|&(_, ref a)| a.is_dirty()) { - if let Some(ref mut account) = a.account { - let addr_hash = account.address_hash(address); - { - let mut account_db = self.factories.accountdb.create(self.db.as_hash_db_mut(), addr_hash); - account.commit_storage(&self.factories.trie, account_db.as_hash_db_mut())?; - account.commit_code(account_db.as_hash_db_mut()); - } - if !account.is_empty() { - self.db.note_non_null_account(address); - } - } - } + /// Commits our cached account changes into the trie. + pub fn commit(&mut self) -> Result<(), Error> { + assert!(self.checkpoints.borrow().is_empty()); + // first, commit the sub trees. + let mut accounts = self.cache.borrow_mut(); + for (address, ref mut a) in accounts.iter_mut().filter(|&(_, ref a)| a.is_dirty()) { + if let Some(ref mut account) = a.account { + let addr_hash = account.address_hash(address); + { + let mut account_db = self + .factories + .accountdb + .create(self.db.as_hash_db_mut(), addr_hash); + account.commit_storage(&self.factories.trie, account_db.as_hash_db_mut())?; + account.commit_code(account_db.as_hash_db_mut()); + } + if !account.is_empty() { + self.db.note_non_null_account(address); + } + } + } - { - let mut trie = self.factories.trie.from_existing(self.db.as_hash_db_mut(), &mut self.root)?; - for (address, ref mut a) in accounts.iter_mut().filter(|&(_, ref a)| a.is_dirty()) { - a.state = AccountState::Committed; - match a.account { - Some(ref mut account) => { - trie.insert(address, &account.rlp())?; - }, - None => { - trie.remove(address)?; - }, - }; - } - } + { + let mut trie = self + .factories + .trie + .from_existing(self.db.as_hash_db_mut(), &mut self.root)?; + for (address, ref mut a) in accounts.iter_mut().filter(|&(_, ref a)| a.is_dirty()) { + a.state = AccountState::Committed; + match a.account { + Some(ref mut account) => { + trie.insert(address, &account.rlp())?; + } + None => { + trie.remove(address)?; + } + }; + } + } - Ok(()) - } + Ok(()) + } - /// Propagate local cache into shared canonical state cache. - fn propagate_to_global_cache(&mut self) { - let mut addresses = self.cache.borrow_mut(); - trace!("Committing cache {:?} entries", addresses.len()); - for (address, a) in addresses.drain().filter(|&(_, ref a)| a.state == AccountState::Committed || a.state == AccountState::CleanFresh) { - self.db.add_to_account_cache(address, a.account, a.state == AccountState::Committed); - } - } + /// Propagate local cache into shared canonical state cache. + fn propagate_to_global_cache(&mut self) { + let mut addresses = self.cache.borrow_mut(); + trace!("Committing cache {:?} entries", addresses.len()); + for (address, a) in addresses.drain().filter(|&(_, ref a)| { + a.state == AccountState::Committed || a.state == AccountState::CleanFresh + }) { + self.db + .add_to_account_cache(address, a.account, a.state == AccountState::Committed); + } + } - /// Clear state cache - pub fn clear(&mut self) { - assert!(self.checkpoints.borrow().is_empty()); - self.cache.borrow_mut().clear(); - } + /// Clear state cache + pub fn clear(&mut self) { + assert!(self.checkpoints.borrow().is_empty()); + self.cache.borrow_mut().clear(); + } - /// Remove any touched empty or dust accounts. - pub fn kill_garbage(&mut self, touched: &HashSet
, remove_empty_touched: bool, min_balance: &Option, kill_contracts: bool) -> TrieResult<()> { - let to_kill: HashSet<_> = { - self.cache.borrow().iter().filter_map(|(address, ref entry)| + /// Remove any touched empty or dust accounts. + pub fn kill_garbage( + &mut self, + touched: &HashSet
, + remove_empty_touched: bool, + min_balance: &Option, + kill_contracts: bool, + ) -> TrieResult<()> { + let to_kill: HashSet<_> = { + self.cache.borrow().iter().filter_map(|(address, ref entry)| if touched.contains(address) && // Check all touched accounts ((remove_empty_touched && entry.exists_and_is_null()) // Remove all empty touched accounts. || min_balance.map_or(false, |ref balance| entry.account.as_ref().map_or(false, |account| @@ -943,1825 +1084,2403 @@ impl State { Some(address.clone()) } else { None }).collect() - }; - for address in to_kill { - self.kill_account(&address); - } - Ok(()) - } + }; + for address in to_kill { + self.kill_account(&address); + } + Ok(()) + } - /// Populate the state from `accounts`. - /// Used for tests. - pub fn populate_from(&mut self, accounts: PodState) { - assert!(self.checkpoints.borrow().is_empty()); - for (add, acc) in accounts.drain().into_iter() { - self.cache.borrow_mut().insert(add, AccountEntry::new_dirty(Some(Account::from_pod(acc)))); - } - } + /// Populate the state from `accounts`. + /// Used for tests. + pub fn populate_from(&mut self, accounts: PodState) { + assert!(self.checkpoints.borrow().is_empty()); + for (add, acc) in accounts.drain().into_iter() { + self.cache + .borrow_mut() + .insert(add, AccountEntry::new_dirty(Some(Account::from_pod(acc)))); + } + } - /// Populate a PodAccount map from this state. - fn to_pod_cache(&self) -> PodState { - assert!(self.checkpoints.borrow().is_empty()); - PodState::from(self.cache.borrow().iter().fold(BTreeMap::new(), |mut m, (add, opt)| { - if let Some(ref acc) = opt.account { - m.insert(*add, PodAccount::from_account(acc)); - } - m - })) - } + /// Populate a PodAccount map from this state. + fn to_pod_cache(&self) -> PodState { + assert!(self.checkpoints.borrow().is_empty()); + PodState::from( + self.cache + .borrow() + .iter() + .fold(BTreeMap::new(), |mut m, (add, opt)| { + if let Some(ref acc) = opt.account { + m.insert(*add, PodAccount::from_account(acc)); + } + m + }), + ) + } - #[cfg(feature="to-pod-full")] - /// Populate a PodAccount map from this state. - /// Warning this is not for real time use. - /// Use of this method requires FatDB mode to be able - /// to iterate on accounts. - pub fn to_pod_full(&self) -> Result { + #[cfg(feature = "to-pod-full")] + /// Populate a PodAccount map from this state. + /// Warning this is not for real time use. + /// Use of this method requires FatDB mode to be able + /// to iterate on accounts. + pub fn to_pod_full(&self) -> Result { + assert!(self.checkpoints.borrow().is_empty()); + assert!(self.factories.trie.is_fat()); - assert!(self.checkpoints.borrow().is_empty()); - assert!(self.factories.trie.is_fat()); + let mut result = BTreeMap::new(); - let mut result = BTreeMap::new(); + let db = &self.db.as_hash_db(); + let trie = self.factories.trie.readonly(db, &self.root)?; - let db = &self.db.as_hash_db(); - let trie = self.factories.trie.readonly(db, &self.root)?; + // put trie in cache + for item in trie.iter()? { + if let Ok((addr, _dbval)) = item { + let address = Address::from_slice(&addr); + let _ = self.require(&address, true); + } + } - // put trie in cache - for item in trie.iter()? { - if let Ok((addr, _dbval)) = item { - let address = Address::from_slice(&addr); - let _ = self.require(&address, true); - } - } + // Resolve missing part + for (add, opt) in self.cache.borrow().iter() { + if let Some(ref acc) = opt.account { + let pod_account = self.account_to_pod_account(acc, add)?; + result.insert(add.clone(), pod_account); + } + } - // Resolve missing part - for (add, opt) in self.cache.borrow().iter() { - if let Some(ref acc) = opt.account { - let pod_account = self.account_to_pod_account(acc, add)?; - result.insert(add.clone(), pod_account); - } - } + Ok(PodState::from(result)) + } - Ok(PodState::from(result)) - } + /// Create a PodAccount from an account. + /// Differs from existing method by including all storage + /// values of the account to the PodAccount. + /// This function is only intended for use in small tests or with fresh accounts. + /// It requires FatDB. + #[cfg(feature = "to-pod-full")] + fn account_to_pod_account( + &self, + account: &Account, + address: &Address, + ) -> Result { + let mut pod_storage = BTreeMap::new(); + let addr_hash = account.address_hash(address); + let accountdb = self + .factories + .accountdb + .readonly(self.db.as_hash_db(), addr_hash); + let root = account.base_storage_root(); - /// Create a PodAccount from an account. - /// Differs from existing method by including all storage - /// values of the account to the PodAccount. - /// This function is only intended for use in small tests or with fresh accounts. - /// It requires FatDB. - #[cfg(feature="to-pod-full")] - fn account_to_pod_account(&self, account: &Account, address: &Address) -> Result { - let mut pod_storage = BTreeMap::new(); - let addr_hash = account.address_hash(address); - let accountdb = self.factories.accountdb.readonly(self.db.as_hash_db(), addr_hash); - let root = account.base_storage_root(); + let accountdb = &accountdb.as_hash_db(); + let trie = self.factories.trie.readonly(accountdb, &root)?; + for o_kv in trie.iter()? { + if let Ok((key, val)) = o_kv { + pod_storage.insert( + key[..].into(), + rlp::decode::(&val[..]) + .expect("Decoded from trie which was encoded from the same type; qed") + .into(), + ); + } + } - let accountdb = &accountdb.as_hash_db(); - let trie = self.factories.trie.readonly(accountdb, &root)?; - for o_kv in trie.iter()? { - if let Ok((key, val)) = o_kv { - pod_storage.insert(key[..].into(), rlp::decode::(&val[..]).expect("Decoded from trie which was encoded from the same type; qed").into()); - } - } + let mut pod_account = PodAccount::from_account(&account); + // cached one first + pod_storage.append(&mut pod_account.storage); + pod_account.storage = pod_storage; + Ok(pod_account) + } - let mut pod_account = PodAccount::from_account(&account); - // cached one first - pod_storage.append(&mut pod_account.storage); - pod_account.storage = pod_storage; - Ok(pod_account) - } + /// Populate a PodAccount map from this state, with another state as the account and storage query. + fn to_pod_diff(&mut self, query: &State) -> TrieResult { + assert!(self.checkpoints.borrow().is_empty()); - /// Populate a PodAccount map from this state, with another state as the account and storage query. - fn to_pod_diff(&mut self, query: &State) -> TrieResult { - assert!(self.checkpoints.borrow().is_empty()); + // Merge PodAccount::to_pod for cache of self and `query`. + let all_addresses = self + .cache + .borrow() + .keys() + .cloned() + .chain(query.cache.borrow().keys().cloned()) + .collect::>(); - // Merge PodAccount::to_pod for cache of self and `query`. - let all_addresses = self.cache.borrow().keys().cloned() - .chain(query.cache.borrow().keys().cloned()) - .collect::>(); + Ok(PodState::from(all_addresses.into_iter().fold( + Ok(BTreeMap::new()), + |m: TrieResult<_>, address| { + let mut m = m?; - Ok(PodState::from(all_addresses.into_iter().fold(Ok(BTreeMap::new()), |m: TrieResult<_>, address| { - let mut m = m?; + let account = self.ensure_cached(&address, RequireCache::Code, true, |acc| { + acc.map(|acc| { + // Merge all modified storage keys. + let all_keys = { + let self_keys = acc + .storage_changes() + .keys() + .cloned() + .collect::>(); - let account = self.ensure_cached(&address, RequireCache::Code, true, |acc| { - acc.map(|acc| { - // Merge all modified storage keys. - let all_keys = { - let self_keys = acc.storage_changes().keys().cloned() - .collect::>(); + if let Some(ref query_storage) = + query.cache.borrow().get(&address).and_then(|opt| { + Some( + opt.account + .as_ref()? + .storage_changes() + .keys() + .cloned() + .collect::>(), + ) + }) + { + self_keys.union(&query_storage).cloned().collect::>() + } else { + self_keys.into_iter().collect::>() + } + }; - if let Some(ref query_storage) = query.cache.borrow().get(&address) - .and_then(|opt| { - Some(opt.account.as_ref()?.storage_changes().keys().cloned() - .collect::>()) - }) - { - self_keys.union(&query_storage).cloned().collect::>() - } else { - self_keys.into_iter().collect::>() - } - }; + // Storage must be fetched after ensure_cached to avoid borrow problem. + ( + *acc.balance(), + *acc.nonce(), + all_keys, + acc.code().map(|x| x.to_vec()), + ) + }) + })?; - // Storage must be fetched after ensure_cached to avoid borrow problem. - (*acc.balance(), *acc.nonce(), all_keys, acc.code().map(|x| x.to_vec())) - }) - })?; + if let Some((balance, nonce, storage_keys, code)) = account { + let storage = storage_keys.into_iter().fold( + Ok(BTreeMap::new()), + |s: TrieResult<_>, key| { + let mut s = s?; - if let Some((balance, nonce, storage_keys, code)) = account { - let storage = storage_keys.into_iter().fold(Ok(BTreeMap::new()), |s: TrieResult<_>, key| { - let mut s = s?; + s.insert(key, self.storage_at(&address, &key)?); + Ok(s) + }, + )?; - s.insert(key, self.storage_at(&address, &key)?); - Ok(s) - })?; + m.insert( + address, + PodAccount { + balance, + nonce, + storage, + code, + }, + ); + } - m.insert(address, PodAccount { - balance, nonce, storage, code - }); - } + Ok(m) + }, + )?)) + } - Ok(m) - })?)) - } + /// Returns a `StateDiff` describing the difference from `orig` to `self`. + /// Consumes self. + pub fn diff_from(&self, mut orig: State) -> TrieResult { + let pod_state_post = self.to_pod_cache(); + let pod_state_pre = orig.to_pod_diff(self)?; + Ok(pod_state::diff_pod(&pod_state_pre, &pod_state_post)) + } - /// Returns a `StateDiff` describing the difference from `orig` to `self`. - /// Consumes self. - pub fn diff_from(&self, mut orig: State) -> TrieResult { - let pod_state_post = self.to_pod_cache(); - let pod_state_pre = orig.to_pod_diff(self)?; - Ok(pod_state::diff_pod(&pod_state_pre, &pod_state_post)) - } + /// Load required account data from the databases. Returns whether the cache succeeds. + #[must_use] + fn update_account_cache( + require: RequireCache, + account: &mut Account, + state_db: &B, + db: &HashDB, + ) -> bool { + if let RequireCache::None = require { + return true; + } - /// Load required account data from the databases. Returns whether the cache succeeds. - #[must_use] - fn update_account_cache(require: RequireCache, account: &mut Account, state_db: &B, db: &HashDB) -> bool { - if let RequireCache::None = require { - return true; - } + if account.is_cached() { + return true; + } - if account.is_cached() { - return true; - } + // if there's already code in the global cache, always cache it localy + let hash = account.code_hash(); + match state_db.get_cached_code(&hash) { + Some(code) => { + account.cache_given_code(code); + true + } + None => match require { + RequireCache::None => true, + RequireCache::Code => { + if let Some(code) = account.cache_code(db) { + // propagate code loaded from the database to + // the global code cache. + state_db.cache_code(hash, code); + true + } else { + false + } + } + RequireCache::CodeSize => account.cache_code_size(db), + }, + } + } - // if there's already code in the global cache, always cache it localy - let hash = account.code_hash(); - match state_db.get_cached_code(&hash) { - Some(code) => { - account.cache_given_code(code); - true - }, - None => match require { - RequireCache::None => true, - RequireCache::Code => { - if let Some(code) = account.cache_code(db) { - // propagate code loaded from the database to - // the global code cache. - state_db.cache_code(hash, code); - true - } else { - false - } - }, - RequireCache::CodeSize => { - account.cache_code_size(db) - } - } - } - } + /// Check caches for required data + /// First searches for account in the local, then the shared cache. + /// Populates local cache if nothing found. + fn ensure_cached( + &self, + a: &Address, + require: RequireCache, + check_null: bool, + f: F, + ) -> TrieResult + where + F: Fn(Option<&Account>) -> U, + { + // check local cache first + if let Some(ref mut maybe_acc) = self.cache.borrow_mut().get_mut(a) { + if let Some(ref mut account) = maybe_acc.account { + let accountdb = self + .factories + .accountdb + .readonly(self.db.as_hash_db(), account.address_hash(a)); + if Self::update_account_cache(require, account, &self.db, accountdb.as_hash_db()) { + return Ok(f(Some(account))); + } else { + return Err(Box::new(TrieError::IncompleteDatabase(H256::from(a)))); + } + } + return Ok(f(None)); + } + // check global cache + let result = self.db.get_cached(a, |mut acc| { + if let Some(ref mut account) = acc { + let accountdb = self + .factories + .accountdb + .readonly(self.db.as_hash_db(), account.address_hash(a)); + if !Self::update_account_cache(require, account, &self.db, accountdb.as_hash_db()) { + return Err(Box::new(TrieError::IncompleteDatabase(H256::from(a)))); + } + } + Ok(f(acc.map(|a| &*a))) + }); + match result { + Some(r) => Ok(r?), + None => { + // first check if it is not in database for sure + if check_null && self.db.is_known_null(a) { + return Ok(f(None)); + } - /// Check caches for required data - /// First searches for account in the local, then the shared cache. - /// Populates local cache if nothing found. - fn ensure_cached(&self, a: &Address, require: RequireCache, check_null: bool, f: F) -> TrieResult - where F: Fn(Option<&Account>) -> U { - // check local cache first - if let Some(ref mut maybe_acc) = self.cache.borrow_mut().get_mut(a) { - if let Some(ref mut account) = maybe_acc.account { - let accountdb = self.factories.accountdb.readonly(self.db.as_hash_db(), account.address_hash(a)); - if Self::update_account_cache(require, account, &self.db, accountdb.as_hash_db()) { - return Ok(f(Some(account))); - } else { - return Err(Box::new(TrieError::IncompleteDatabase(H256::from(a)))); - } - } - return Ok(f(None)); - } - // check global cache - let result = self.db.get_cached(a, |mut acc| { - if let Some(ref mut account) = acc { - let accountdb = self.factories.accountdb.readonly(self.db.as_hash_db(), account.address_hash(a)); - if !Self::update_account_cache(require, account, &self.db, accountdb.as_hash_db()) { - return Err(Box::new(TrieError::IncompleteDatabase(H256::from(a)))); - } - } - Ok(f(acc.map(|a| &*a))) - }); - match result { - Some(r) => Ok(r?), - None => { - // first check if it is not in database for sure - if check_null && self.db.is_known_null(a) { return Ok(f(None)); } + // not found in the global cache, get from the DB and insert into local + let db = &self.db.as_hash_db(); + let db = self.factories.trie.readonly(db, &self.root)?; + let from_rlp = |b: &[u8]| Account::from_rlp(b).expect("decoding db value failed"); + let mut maybe_acc = db.get_with(a, from_rlp)?; + if let Some(ref mut account) = maybe_acc.as_mut() { + let accountdb = self + .factories + .accountdb + .readonly(self.db.as_hash_db(), account.address_hash(a)); + if !Self::update_account_cache( + require, + account, + &self.db, + accountdb.as_hash_db(), + ) { + return Err(Box::new(TrieError::IncompleteDatabase(H256::from(a)))); + } + } + let r = f(maybe_acc.as_ref()); + self.insert_cache(a, AccountEntry::new_clean(maybe_acc)); + Ok(r) + } + } + } - // not found in the global cache, get from the DB and insert into local - let db = &self.db.as_hash_db(); - let db = self.factories.trie.readonly(db, &self.root)?; - let from_rlp = |b: &[u8]| Account::from_rlp(b).expect("decoding db value failed"); - let mut maybe_acc = db.get_with(a, from_rlp)?; - if let Some(ref mut account) = maybe_acc.as_mut() { - let accountdb = self.factories.accountdb.readonly(self.db.as_hash_db(), account.address_hash(a)); - if !Self::update_account_cache(require, account, &self.db, accountdb.as_hash_db()) { - return Err(Box::new(TrieError::IncompleteDatabase(H256::from(a)))); - } - } - let r = f(maybe_acc.as_ref()); - self.insert_cache(a, AccountEntry::new_clean(maybe_acc)); - Ok(r) - } - } - } + /// Pull account `a` in our cache from the trie DB. `require_code` requires that the code be cached, too. + fn require<'a>(&'a self, a: &Address, require_code: bool) -> TrieResult> { + self.require_or_from( + a, + require_code, + || Account::new_basic(0u8.into(), self.account_start_nonce), + |_| {}, + ) + } - /// Pull account `a` in our cache from the trie DB. `require_code` requires that the code be cached, too. - fn require<'a>(&'a self, a: &Address, require_code: bool) -> TrieResult> { - self.require_or_from(a, require_code, || Account::new_basic(0u8.into(), self.account_start_nonce), |_| {}) - } + /// Pull account `a` in our cache from the trie DB. `require_code` requires that the code be cached, too. + /// If it doesn't exist, make account equal the evaluation of `default`. + fn require_or_from<'a, F, G>( + &'a self, + a: &Address, + require_code: bool, + default: F, + not_default: G, + ) -> TrieResult> + where + F: FnOnce() -> Account, + G: FnOnce(&mut Account), + { + let contains_key = self.cache.borrow().contains_key(a); + if !contains_key { + match self.db.get_cached_account(a) { + Some(acc) => self.insert_cache(a, AccountEntry::new_clean_cached(acc)), + None => { + let maybe_acc = if !self.db.is_known_null(a) { + let db = &self.db.as_hash_db(); + let db = self.factories.trie.readonly(db, &self.root)?; + let from_rlp = + |b: &[u8]| Account::from_rlp(b).expect("decoding db value failed"); + AccountEntry::new_clean(db.get_with(a, from_rlp)?) + } else { + AccountEntry::new_clean(None) + }; + self.insert_cache(a, maybe_acc); + } + } + } + self.note_cache(a); - /// Pull account `a` in our cache from the trie DB. `require_code` requires that the code be cached, too. - /// If it doesn't exist, make account equal the evaluation of `default`. - fn require_or_from<'a, F, G>(&'a self, a: &Address, require_code: bool, default: F, not_default: G) -> TrieResult> - where F: FnOnce() -> Account, G: FnOnce(&mut Account), - { - let contains_key = self.cache.borrow().contains_key(a); - if !contains_key { - match self.db.get_cached_account(a) { - Some(acc) => self.insert_cache(a, AccountEntry::new_clean_cached(acc)), - None => { - let maybe_acc = if !self.db.is_known_null(a) { - let db = &self.db.as_hash_db(); - let db = self.factories.trie.readonly(db, &self.root)?; - let from_rlp = |b:&[u8]| { Account::from_rlp(b).expect("decoding db value failed") }; - AccountEntry::new_clean(db.get_with(a, from_rlp)?) - } else { - AccountEntry::new_clean(None) - }; - self.insert_cache(a, maybe_acc); - } - } - } - self.note_cache(a); + // at this point the entry is guaranteed to be in the cache. + let mut account = RefMut::map(self.cache.borrow_mut(), |c| { + let entry = c + .get_mut(a) + .expect("entry known to exist in the cache; qed"); - // at this point the entry is guaranteed to be in the cache. - let mut account = RefMut::map(self.cache.borrow_mut(), |c| { - let entry = c.get_mut(a).expect("entry known to exist in the cache; qed"); + match &mut entry.account { + &mut Some(ref mut acc) => not_default(acc), + slot => *slot = Some(default()), + } - match &mut entry.account { - &mut Some(ref mut acc) => not_default(acc), - slot => *slot = Some(default()), - } + // set the dirty flag after changing account data. + entry.state = AccountState::Dirty; + entry + .account + .as_mut() + .expect("Required account must always exist; qed") + }); - // set the dirty flag after changing account data. - entry.state = AccountState::Dirty; - entry.account.as_mut().expect("Required account must always exist; qed") - }); + if require_code { + let addr_hash = account.address_hash(a); + let accountdb = self + .factories + .accountdb + .readonly(self.db.as_hash_db(), addr_hash); - if require_code { - let addr_hash = account.address_hash(a); - let accountdb = self.factories.accountdb.readonly(self.db.as_hash_db(), addr_hash); + if !Self::update_account_cache( + RequireCache::Code, + &mut account, + &self.db, + accountdb.as_hash_db(), + ) { + return Err(Box::new(TrieError::IncompleteDatabase(H256::from(a)))); + } + } - if !Self::update_account_cache(RequireCache::Code, &mut account, &self.db, accountdb.as_hash_db()) { - return Err(Box::new(TrieError::IncompleteDatabase(H256::from(a)))) - } - } + Ok(account) + } - Ok(account) - } - - /// Replace account code and storage. Creates account if it does not exist. - pub fn patch_account(&self, a: &Address, code: Arc, storage: HashMap) -> TrieResult<()> { - Ok(self.require(a, false)?.reset_code_and_storage(code, storage)) - } + /// Replace account code and storage. Creates account if it does not exist. + pub fn patch_account( + &self, + a: &Address, + code: Arc, + storage: HashMap, + ) -> TrieResult<()> { + Ok(self + .require(a, false)? + .reset_code_and_storage(code, storage)) + } } // State proof implementations; useful for light client protocols. impl State { - /// Prove an account's existence or nonexistence in the state trie. - /// Returns a merkle proof of the account's trie node omitted or an encountered trie error. - /// If the account doesn't exist in the trie, prove that and return defaults. - /// Requires a secure trie to be used for accurate results. - /// `account_key` == keccak(address) - pub fn prove_account(&self, account_key: H256) -> TrieResult<(Vec, BasicAccount)> { - let mut recorder = Recorder::new(); - let db = &self.db.as_hash_db(); - let trie = TrieDB::new(db, &self.root)?; - let maybe_account: Option = { - let panicky_decoder = |bytes: &[u8]| { - ::rlp::decode(bytes).unwrap_or_else(|_| panic!("prove_account, could not query trie for account key={}", &account_key)) - }; - let query = (&mut recorder, panicky_decoder); - trie.get_with(&account_key, query)? - }; - let account = maybe_account.unwrap_or_else(|| BasicAccount { - balance: 0.into(), - nonce: self.account_start_nonce, - code_hash: KECCAK_EMPTY, - storage_root: KECCAK_NULL_RLP, - }); + /// Prove an account's existence or nonexistence in the state trie. + /// Returns a merkle proof of the account's trie node omitted or an encountered trie error. + /// If the account doesn't exist in the trie, prove that and return defaults. + /// Requires a secure trie to be used for accurate results. + /// `account_key` == keccak(address) + pub fn prove_account(&self, account_key: H256) -> TrieResult<(Vec, BasicAccount)> { + let mut recorder = Recorder::new(); + let db = &self.db.as_hash_db(); + let trie = TrieDB::new(db, &self.root)?; + let maybe_account: Option = { + let panicky_decoder = |bytes: &[u8]| { + ::rlp::decode(bytes).unwrap_or_else(|_| { + panic!( + "prove_account, could not query trie for account key={}", + &account_key + ) + }) + }; + let query = (&mut recorder, panicky_decoder); + trie.get_with(&account_key, query)? + }; + let account = maybe_account.unwrap_or_else(|| BasicAccount { + balance: 0.into(), + nonce: self.account_start_nonce, + code_hash: KECCAK_EMPTY, + storage_root: KECCAK_NULL_RLP, + }); - Ok((recorder.drain().into_iter().map(|r| r.data).collect(), account)) - } + Ok(( + recorder.drain().into_iter().map(|r| r.data).collect(), + account, + )) + } - /// Prove an account's storage key's existence or nonexistence in the state. - /// Returns a merkle proof of the account's storage trie. - /// Requires a secure trie to be used for correctness. - /// `account_key` == keccak(address) - /// `storage_key` == keccak(key) - pub fn prove_storage(&self, account_key: H256, storage_key: H256) -> TrieResult<(Vec, H256)> { - // TODO: probably could look into cache somehow but it's keyed by - // address, not keccak(address). - let db = &self.db.as_hash_db(); - let trie = TrieDB::new(db, &self.root)?; - let from_rlp = |b: &[u8]| Account::from_rlp(b).expect("decoding db value failed"); - let acc = match trie.get_with(&account_key, from_rlp)? { - Some(acc) => acc, - None => return Ok((Vec::new(), H256::new())), - }; + /// Prove an account's storage key's existence or nonexistence in the state. + /// Returns a merkle proof of the account's storage trie. + /// Requires a secure trie to be used for correctness. + /// `account_key` == keccak(address) + /// `storage_key` == keccak(key) + pub fn prove_storage( + &self, + account_key: H256, + storage_key: H256, + ) -> TrieResult<(Vec, H256)> { + // TODO: probably could look into cache somehow but it's keyed by + // address, not keccak(address). + let db = &self.db.as_hash_db(); + let trie = TrieDB::new(db, &self.root)?; + let from_rlp = |b: &[u8]| Account::from_rlp(b).expect("decoding db value failed"); + let acc = match trie.get_with(&account_key, from_rlp)? { + Some(acc) => acc, + None => return Ok((Vec::new(), H256::new())), + }; - let account_db = self.factories.accountdb.readonly(self.db.as_hash_db(), account_key); - acc.prove_storage(account_db.as_hash_db(), storage_key) - } + let account_db = self + .factories + .accountdb + .readonly(self.db.as_hash_db(), account_key); + acc.prove_storage(account_db.as_hash_db(), storage_key) + } } impl fmt::Debug for State { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self.cache.borrow()) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self.cache.borrow()) + } } impl State { - /// Get a reference to the underlying state DB. - pub fn db(&self) -> &StateDB { - &self.db - } + /// Get a reference to the underlying state DB. + pub fn db(&self) -> &StateDB { + &self.db + } } // TODO: cloning for `State` shouldn't be possible in general; Remove this and use // checkpoints where possible. impl Clone for State { - fn clone(&self) -> State { - let cache = { - let mut cache: HashMap = HashMap::new(); - for (key, val) in self.cache.borrow().iter() { - if let Some(entry) = val.clone_if_dirty() { - cache.insert(key.clone(), entry); - } - } - cache - }; + fn clone(&self) -> State { + let cache = { + let mut cache: HashMap = HashMap::new(); + for (key, val) in self.cache.borrow().iter() { + if let Some(entry) = val.clone_if_dirty() { + cache.insert(key.clone(), entry); + } + } + cache + }; - State { - db: self.db.boxed_clone(), - root: self.root.clone(), - cache: RefCell::new(cache), - checkpoints: RefCell::new(Vec::new()), - account_start_nonce: self.account_start_nonce.clone(), - factories: self.factories.clone(), - } - } + State { + db: self.db.boxed_clone(), + root: self.root.clone(), + cache: RefCell::new(cache), + checkpoints: RefCell::new(Vec::new()), + account_start_nonce: self.account_start_nonce.clone(), + factories: self.factories.clone(), + } + } } #[cfg(test)] mod tests { - use std::sync::Arc; - use std::str::FromStr; - use rustc_hex::FromHex; - use hash::{keccak, KECCAK_NULL_RLP}; - use super::*; - use ethkey::Secret; - use ethereum_types::{H256, U256, Address}; - use test_helpers::{get_temp_state, get_temp_state_db}; - use machine::EthereumMachine; - use vm::EnvInfo; - use spec::*; - use types::transaction::*; - use trace::{FlatTrace, TraceError, trace}; - use evm::CallType; - - fn secret() -> Secret { - keccak("").into() - } - - fn make_frontier_machine(max_depth: usize) -> EthereumMachine { - let mut machine = ::ethereum::new_frontier_test_machine(); - machine.set_schedule_creation_rules(Box::new(move |s, _| s.max_depth = max_depth)); - machine - } - - #[test] - fn should_apply_create_transaction() { - let _ = env_logger::try_init(); - - let mut state = get_temp_state(); - - let mut info = EnvInfo::default(); - info.gas_limit = 1_000_000.into(); - let machine = make_frontier_machine(5); - - let t = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Create, - value: 100.into(), - data: FromHex::from_hex("601080600c6000396000f3006000355415600957005b60203560003555").unwrap(), - }.sign(&secret(), None); - - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); - let result = state.apply(&info, &machine, &t, true).unwrap(); - let expected_trace = vec![FlatTrace { - trace_address: Default::default(), - subtraces: 0, - action: trace::Action::Create(trace::Create { - from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), - value: 100.into(), - gas: 77412.into(), - init: vec![96, 16, 128, 96, 12, 96, 0, 57, 96, 0, 243, 0, 96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53, 85], - }), - result: trace::Res::Create(trace::CreateResult { - gas_used: U256::from(3224), - address: Address::from_str("8988167e088c87cd314df6d3c2b83da5acb93ace").unwrap(), - code: vec![96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53] - }), - }]; - - assert_eq!(result.trace, expected_trace); - } - - #[test] - fn should_work_when_cloned() { - let _ = env_logger::try_init(); - - let a = Address::zero(); - - let mut state = { - let mut state = get_temp_state(); - assert_eq!(state.exists(&a).unwrap(), false); - state.inc_nonce(&a).unwrap(); - state.commit().unwrap(); - state.clone() - }; - - state.inc_nonce(&a).unwrap(); - state.commit().unwrap(); - } - - #[test] - fn should_trace_failed_create_transaction() { - let _ = env_logger::try_init(); - - let mut state = get_temp_state(); - - let mut info = EnvInfo::default(); - info.gas_limit = 1_000_000.into(); - let machine = make_frontier_machine(5); - - let t = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Create, - value: 100.into(), - data: FromHex::from_hex("5b600056").unwrap(), - }.sign(&secret(), None); - - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); - let result = state.apply(&info, &machine, &t, true).unwrap(); - let expected_trace = vec![FlatTrace { - trace_address: Default::default(), - action: trace::Action::Create(trace::Create { - from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), - value: 100.into(), - gas: 78792.into(), - init: vec![91, 96, 0, 86], - }), - result: trace::Res::FailedCreate(TraceError::OutOfGas), - subtraces: 0 - }]; - - assert_eq!(result.trace, expected_trace); - } - - #[test] - fn should_trace_call_transaction() { - let _ = env_logger::try_init(); - - let mut state = get_temp_state(); - - let mut info = EnvInfo::default(); - info.gas_limit = 1_000_000.into(); - let machine = make_frontier_machine(5); - - let t = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Call(0xa.into()), - value: 100.into(), - data: vec![], - }.sign(&secret(), None); - - state.init_code(&0xa.into(), FromHex::from_hex("6000").unwrap()).unwrap(); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); - let result = state.apply(&info, &machine, &t, true).unwrap(); - let expected_trace = vec![FlatTrace { - trace_address: Default::default(), - action: trace::Action::Call(trace::Call { - from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), - to: 0xa.into(), - value: 100.into(), - gas: 79000.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(3), - output: vec![] - }), - subtraces: 0, - }]; - - assert_eq!(result.trace, expected_trace); - } - - #[test] - fn should_trace_basic_call_transaction() { - let _ = env_logger::try_init(); - - let mut state = get_temp_state(); - - let mut info = EnvInfo::default(); - info.gas_limit = 1_000_000.into(); - let machine = make_frontier_machine(5); - - let t = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Call(0xa.into()), - value: 100.into(), - data: vec![], - }.sign(&secret(), None); - - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); - let result = state.apply(&info, &machine, &t, true).unwrap(); - let expected_trace = vec![FlatTrace { - trace_address: Default::default(), - action: trace::Action::Call(trace::Call { - from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), - to: 0xa.into(), - value: 100.into(), - gas: 79000.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(0), - output: vec![] - }), - subtraces: 0, - }]; - - assert_eq!(result.trace, expected_trace); - } - - #[test] - fn should_trace_call_transaction_to_builtin() { - let _ = env_logger::try_init(); - - let mut state = get_temp_state(); - - let mut info = EnvInfo::default(); - info.gas_limit = 1_000_000.into(); - let machine = Spec::new_test_machine(); - - let t = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Call(0x1.into()), - value: 0.into(), - data: vec![], - }.sign(&secret(), None); - - let result = state.apply(&info, &machine, &t, true).unwrap(); - - let expected_trace = vec![FlatTrace { - trace_address: Default::default(), - action: trace::Action::Call(trace::Call { - from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), - to: "0000000000000000000000000000000000000001".into(), - value: 0.into(), - gas: 79_000.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(3000), - output: vec![] - }), - subtraces: 0, - }]; - - assert_eq!(result.trace, expected_trace); - } - - #[test] - fn should_not_trace_subcall_transaction_to_builtin() { - let _ = env_logger::try_init(); - - let mut state = get_temp_state(); - - let mut info = EnvInfo::default(); - info.gas_limit = 1_000_000.into(); - let machine = Spec::new_test_machine(); - - let t = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Call(0xa.into()), - value: 0.into(), - data: vec![], - }.sign(&secret(), None); - - state.init_code(&0xa.into(), FromHex::from_hex("600060006000600060006001610be0f1").unwrap()).unwrap(); - let result = state.apply(&info, &machine, &t, true).unwrap(); - - let expected_trace = vec![FlatTrace { - trace_address: Default::default(), - action: trace::Action::Call(trace::Call { - from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), - to: 0xa.into(), - value: 0.into(), - gas: 79000.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(3_721), // in post-eip150 - output: vec![] - }), - subtraces: 0, - }]; - - assert_eq!(result.trace, expected_trace); - } - - #[test] - fn should_trace_callcode_properly() { - let _ = env_logger::try_init(); - - let mut state = get_temp_state(); - - let mut info = EnvInfo::default(); - info.gas_limit = 1_000_000.into(); - let machine = Spec::new_test_machine(); - - let t = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Call(0xa.into()), - value: 0.into(), - data: vec![], - }.sign(&secret(), None); - - state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b611000f2").unwrap()).unwrap(); - state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()).unwrap(); - let result = state.apply(&info, &machine, &t, true).unwrap(); - - let expected_trace = vec![FlatTrace { - trace_address: Default::default(), - subtraces: 1, - action: trace::Action::Call(trace::Call { - from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), - to: 0xa.into(), - value: 0.into(), - gas: 79000.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::Call(trace::CallResult { - gas_used: 724.into(), // in post-eip150 - output: vec![] - }), - }, FlatTrace { - trace_address: vec![0].into_iter().collect(), - subtraces: 0, - action: trace::Action::Call(trace::Call { - from: 0xa.into(), - to: 0xb.into(), - value: 0.into(), - gas: 4096.into(), - input: vec![], - call_type: CallType::CallCode, - }), - result: trace::Res::Call(trace::CallResult { - gas_used: 3.into(), - output: vec![], - }), - }]; - - assert_eq!(result.trace, expected_trace); - } - - #[test] - fn should_trace_delegatecall_properly() { - let _ = env_logger::try_init(); - - let mut state = get_temp_state(); - - let mut info = EnvInfo::default(); - info.gas_limit = 1_000_000.into(); - info.number = 0x789b0; - let machine = Spec::new_test_machine(); - - let t = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Call(0xa.into()), - value: 0.into(), - data: vec![], - }.sign(&secret(), None); - - state.init_code(&0xa.into(), FromHex::from_hex("6000600060006000600b618000f4").unwrap()).unwrap(); - state.init_code(&0xb.into(), FromHex::from_hex("60056000526001601ff3").unwrap()).unwrap(); - let result = state.apply(&info, &machine, &t, true).unwrap(); - - let expected_trace = vec![FlatTrace { - trace_address: Default::default(), - subtraces: 1, - action: trace::Action::Call(trace::Call { - from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), - to: 0xa.into(), - value: 0.into(), - gas: 79000.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(736), // in post-eip150 - output: vec![] - }), - }, FlatTrace { - trace_address: vec![0].into_iter().collect(), - subtraces: 0, - action: trace::Action::Call(trace::Call { - from: 0xa.into(), - to: 0xb.into(), - value: 0.into(), - gas: 32768.into(), - input: vec![], - call_type: CallType::DelegateCall, - }), - result: trace::Res::Call(trace::CallResult { - gas_used: 18.into(), - output: vec![5], - }), - }]; - - assert_eq!(result.trace, expected_trace); - } - - #[test] - fn should_trace_failed_call_transaction() { - let _ = env_logger::try_init(); - - let mut state = get_temp_state(); - - let mut info = EnvInfo::default(); - info.gas_limit = 1_000_000.into(); - let machine = make_frontier_machine(5); - - let t = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Call(0xa.into()), - value: 100.into(), - data: vec![], - }.sign(&secret(), None); - - state.init_code(&0xa.into(), FromHex::from_hex("5b600056").unwrap()).unwrap(); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); - let result = state.apply(&info, &machine, &t, true).unwrap(); - let expected_trace = vec![FlatTrace { - trace_address: Default::default(), - action: trace::Action::Call(trace::Call { - from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), - to: 0xa.into(), - value: 100.into(), - gas: 79000.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::FailedCall(TraceError::OutOfGas), - subtraces: 0, - }]; - - assert_eq!(result.trace, expected_trace); - } - - #[test] - fn should_trace_call_with_subcall_transaction() { - let _ = env_logger::try_init(); - - let mut state = get_temp_state(); - - let mut info = EnvInfo::default(); - info.gas_limit = 1_000_000.into(); - let machine = make_frontier_machine(5); - - let t = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Call(0xa.into()), - value: 100.into(), - data: vec![], - }.sign(&secret(), None); - - state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()).unwrap(); - state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()).unwrap(); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); - let result = state.apply(&info, &machine, &t, true).unwrap(); - - let expected_trace = vec![FlatTrace { - trace_address: Default::default(), - subtraces: 1, - action: trace::Action::Call(trace::Call { - from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), - to: 0xa.into(), - value: 100.into(), - gas: 79000.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(69), - output: vec![] - }), - }, FlatTrace { - trace_address: vec![0].into_iter().collect(), - subtraces: 0, - action: trace::Action::Call(trace::Call { - from: 0xa.into(), - to: 0xb.into(), - value: 0.into(), - gas: 78934.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(3), - output: vec![] - }), - }]; - - assert_eq!(result.trace, expected_trace); - } - - #[test] - fn should_trace_call_with_basic_subcall_transaction() { - let _ = env_logger::try_init(); - - let mut state = get_temp_state(); - - let mut info = EnvInfo::default(); - info.gas_limit = 1_000_000.into(); - let machine = make_frontier_machine(5); - - let t = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Call(0xa.into()), - value: 100.into(), - data: vec![], - }.sign(&secret(), None); - - state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006045600b6000f1").unwrap()).unwrap(); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); - let result = state.apply(&info, &machine, &t, true).unwrap(); - let expected_trace = vec![FlatTrace { - trace_address: Default::default(), - subtraces: 1, - action: trace::Action::Call(trace::Call { - from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), - to: 0xa.into(), - value: 100.into(), - gas: 79000.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(31761), - output: vec![] - }), - }, FlatTrace { - trace_address: vec![0].into_iter().collect(), - subtraces: 0, - action: trace::Action::Call(trace::Call { - from: 0xa.into(), - to: 0xb.into(), - value: 69.into(), - gas: 2300.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::Call(trace::CallResult::default()), - }]; - - assert_eq!(result.trace, expected_trace); - } - - #[test] - fn should_not_trace_call_with_invalid_basic_subcall_transaction() { - let _ = env_logger::try_init(); - - let mut state = get_temp_state(); - - let mut info = EnvInfo::default(); - info.gas_limit = 1_000_000.into(); - let machine = make_frontier_machine(5); - - let t = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Call(0xa.into()), - value: 100.into(), - data: vec![], - }.sign(&secret(), None); - - state.init_code(&0xa.into(), FromHex::from_hex("600060006000600060ff600b6000f1").unwrap()).unwrap(); // not enough funds. - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); - let result = state.apply(&info, &machine, &t, true).unwrap(); - let expected_trace = vec![FlatTrace { - trace_address: Default::default(), - subtraces: 0, - action: trace::Action::Call(trace::Call { - from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), - to: 0xa.into(), - value: 100.into(), - gas: 79000.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(31761), - output: vec![] - }), - }]; - - assert_eq!(result.trace, expected_trace); - } - - #[test] - fn should_trace_failed_subcall_transaction() { - let _ = env_logger::try_init(); - - let mut state = get_temp_state(); - - let mut info = EnvInfo::default(); - info.gas_limit = 1_000_000.into(); - let machine = make_frontier_machine(5); - - let t = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Call(0xa.into()), - value: 100.into(), - data: vec![],//600480600b6000396000f35b600056 - }.sign(&secret(), None); - - state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()).unwrap(); - state.init_code(&0xb.into(), FromHex::from_hex("5b600056").unwrap()).unwrap(); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); - let result = state.apply(&info, &machine, &t, true).unwrap(); - let expected_trace = vec![FlatTrace { - trace_address: Default::default(), - subtraces: 1, - action: trace::Action::Call(trace::Call { - from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), - to: 0xa.into(), - value: 100.into(), - gas: 79000.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(79_000), - output: vec![] - }), - }, FlatTrace { - trace_address: vec![0].into_iter().collect(), - subtraces: 0, - action: trace::Action::Call(trace::Call { - from: 0xa.into(), - to: 0xb.into(), - value: 0.into(), - gas: 78934.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::FailedCall(TraceError::OutOfGas), - }]; - - assert_eq!(result.trace, expected_trace); - } - - #[test] - fn should_trace_call_with_subcall_with_subcall_transaction() { - let _ = env_logger::try_init(); - - let mut state = get_temp_state(); - - let mut info = EnvInfo::default(); - info.gas_limit = 1_000_000.into(); - let machine = make_frontier_machine(5); - - let t = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Call(0xa.into()), - value: 100.into(), - data: vec![], - }.sign(&secret(), None); - - state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()).unwrap(); - state.init_code(&0xb.into(), FromHex::from_hex("60006000600060006000600c602b5a03f1").unwrap()).unwrap(); - state.init_code(&0xc.into(), FromHex::from_hex("6000").unwrap()).unwrap(); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); - let result = state.apply(&info, &machine, &t, true).unwrap(); - let expected_trace = vec![FlatTrace { - trace_address: Default::default(), - subtraces: 1, - action: trace::Action::Call(trace::Call { - from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), - to: 0xa.into(), - value: 100.into(), - gas: 79000.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(135), - output: vec![] - }), - }, FlatTrace { - trace_address: vec![0].into_iter().collect(), - subtraces: 1, - action: trace::Action::Call(trace::Call { - from: 0xa.into(), - to: 0xb.into(), - value: 0.into(), - gas: 78934.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(69), - output: vec![] - }), - }, FlatTrace { - trace_address: vec![0, 0].into_iter().collect(), - subtraces: 0, - action: trace::Action::Call(trace::Call { - from: 0xb.into(), - to: 0xc.into(), - value: 0.into(), - gas: 78868.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(3), - output: vec![] - }), - }]; - - assert_eq!(result.trace, expected_trace); - } - - #[test] - fn should_trace_failed_subcall_with_subcall_transaction() { - let _ = env_logger::try_init(); - - let mut state = get_temp_state(); - - let mut info = EnvInfo::default(); - info.gas_limit = 1_000_000.into(); - let machine = make_frontier_machine(5); - - let t = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Call(0xa.into()), - value: 100.into(), - data: vec![],//600480600b6000396000f35b600056 - }.sign(&secret(), None); - - state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()).unwrap(); - state.init_code(&0xb.into(), FromHex::from_hex("60006000600060006000600c602b5a03f1505b601256").unwrap()).unwrap(); - state.init_code(&0xc.into(), FromHex::from_hex("6000").unwrap()).unwrap(); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); - let result = state.apply(&info, &machine, &t, true).unwrap(); - - let expected_trace = vec![FlatTrace { - trace_address: Default::default(), - subtraces: 1, - action: trace::Action::Call(trace::Call { - from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), - to: 0xa.into(), - value: 100.into(), - gas: 79000.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(79_000), - output: vec![] - }) - }, FlatTrace { - trace_address: vec![0].into_iter().collect(), - subtraces: 1, - action: trace::Action::Call(trace::Call { - from: 0xa.into(), - to: 0xb.into(), - value: 0.into(), - gas: 78934.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::FailedCall(TraceError::OutOfGas), - }, FlatTrace { - trace_address: vec![0, 0].into_iter().collect(), - subtraces: 0, - action: trace::Action::Call(trace::Call { - from: 0xb.into(), - to: 0xc.into(), - value: 0.into(), - gas: 78868.into(), - call_type: CallType::Call, - input: vec![], - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(3), - output: vec![] - }), - }]; - - assert_eq!(result.trace, expected_trace); - } - - #[test] - fn should_trace_suicide() { - let _ = env_logger::try_init(); - - let mut state = get_temp_state(); - - let mut info = EnvInfo::default(); - info.gas_limit = 1_000_000.into(); - let machine = make_frontier_machine(5); - - let t = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 100_000.into(), - action: Action::Call(0xa.into()), - value: 100.into(), - data: vec![], - }.sign(&secret(), None); - - state.init_code(&0xa.into(), FromHex::from_hex("73000000000000000000000000000000000000000bff").unwrap()).unwrap(); - state.add_balance(&0xa.into(), &50.into(), CleanupMode::NoEmpty).unwrap(); - state.add_balance(&t.sender(), &100.into(), CleanupMode::NoEmpty).unwrap(); - let result = state.apply(&info, &machine, &t, true).unwrap(); - let expected_trace = vec![FlatTrace { - trace_address: Default::default(), - subtraces: 1, - action: trace::Action::Call(trace::Call { - from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), - to: 0xa.into(), - value: 100.into(), - gas: 79000.into(), - input: vec![], - call_type: CallType::Call, - }), - result: trace::Res::Call(trace::CallResult { - gas_used: 3.into(), - output: vec![] - }), - }, FlatTrace { - trace_address: vec![0].into_iter().collect(), - subtraces: 0, - action: trace::Action::Suicide(trace::Suicide { - address: 0xa.into(), - refund_address: 0xb.into(), - balance: 150.into(), - }), - result: trace::Res::None, - }]; - - assert_eq!(result.trace, expected_trace); - } - - #[test] - fn code_from_database() { - let a = Address::zero(); - let (root, db) = { - let mut state = get_temp_state(); - state.require_or_from(&a, false, || Account::new_contract(42.into(), 0.into(), KECCAK_NULL_RLP), |_|{}).unwrap(); - state.init_code(&a, vec![1, 2, 3]).unwrap(); - assert_eq!(state.code(&a).unwrap(), Some(Arc::new(vec![1u8, 2, 3]))); - state.commit().unwrap(); - assert_eq!(state.code(&a).unwrap(), Some(Arc::new(vec![1u8, 2, 3]))); - state.drop() - }; - - let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert_eq!(state.code(&a).unwrap(), Some(Arc::new(vec![1u8, 2, 3]))); - } - - #[test] - fn storage_at_from_database() { - let a = Address::zero(); - let (root, db) = { - let mut state = get_temp_state(); - state.set_storage(&a, H256::from(&U256::from(1u64)), H256::from(&U256::from(69u64))).unwrap(); - state.commit().unwrap(); - state.drop() - }; - - let s = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert_eq!(s.storage_at(&a, &H256::from(&U256::from(1u64))).unwrap(), H256::from(&U256::from(69u64))); - } - - #[test] - fn get_from_database() { - let a = Address::zero(); - let (root, db) = { - let mut state = get_temp_state(); - state.inc_nonce(&a).unwrap(); - state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty).unwrap(); - state.commit().unwrap(); - assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); - state.drop() - }; - - let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); - assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); - } - - #[test] - fn remove() { - let a = Address::zero(); - let mut state = get_temp_state(); - assert_eq!(state.exists(&a).unwrap(), false); - assert_eq!(state.exists_and_not_null(&a).unwrap(), false); - state.inc_nonce(&a).unwrap(); - assert_eq!(state.exists(&a).unwrap(), true); - assert_eq!(state.exists_and_not_null(&a).unwrap(), true); - assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); - state.kill_account(&a); - assert_eq!(state.exists(&a).unwrap(), false); - assert_eq!(state.exists_and_not_null(&a).unwrap(), false); - assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); - } - - #[test] - fn empty_account_is_not_created() { - let a = Address::zero(); - let db = get_temp_state_db(); - let (root, db) = { - let mut state = State::new(db, U256::from(0), Default::default()); - state.add_balance(&a, &U256::default(), CleanupMode::NoEmpty).unwrap(); // create an empty account - state.commit().unwrap(); - state.drop() - }; - let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert!(!state.exists(&a).unwrap()); - assert!(!state.exists_and_not_null(&a).unwrap()); - } - - #[test] - fn empty_account_exists_when_creation_forced() { - let a = Address::zero(); - let db = get_temp_state_db(); - let (root, db) = { - let mut state = State::new(db, U256::from(0), Default::default()); - state.add_balance(&a, &U256::default(), CleanupMode::ForceCreate).unwrap(); // create an empty account - state.commit().unwrap(); - state.drop() - }; - let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert!(state.exists(&a).unwrap()); - assert!(!state.exists_and_not_null(&a).unwrap()); - } - - #[test] - fn remove_from_database() { - let a = Address::zero(); - let (root, db) = { - let mut state = get_temp_state(); - state.inc_nonce(&a).unwrap(); - state.commit().unwrap(); - assert_eq!(state.exists(&a).unwrap(), true); - assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); - state.drop() - }; - - let (root, db) = { - let mut state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert_eq!(state.exists(&a).unwrap(), true); - assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); - state.kill_account(&a); - state.commit().unwrap(); - assert_eq!(state.exists(&a).unwrap(), false); - assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); - state.drop() - }; - - let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert_eq!(state.exists(&a).unwrap(), false); - assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); - } - - #[test] - fn alter_balance() { - let mut state = get_temp_state(); - let a = Address::zero(); - let b = 1u64.into(); - state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty).unwrap(); - assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); - state.commit().unwrap(); - assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); - state.sub_balance(&a, &U256::from(42u64), &mut CleanupMode::NoEmpty).unwrap(); - assert_eq!(state.balance(&a).unwrap(), U256::from(27u64)); - state.commit().unwrap(); - assert_eq!(state.balance(&a).unwrap(), U256::from(27u64)); - state.transfer_balance(&a, &b, &U256::from(18u64), CleanupMode::NoEmpty).unwrap(); - assert_eq!(state.balance(&a).unwrap(), U256::from(9u64)); - assert_eq!(state.balance(&b).unwrap(), U256::from(18u64)); - state.commit().unwrap(); - assert_eq!(state.balance(&a).unwrap(), U256::from(9u64)); - assert_eq!(state.balance(&b).unwrap(), U256::from(18u64)); - } - - #[test] - fn alter_nonce() { - let mut state = get_temp_state(); - let a = Address::zero(); - state.inc_nonce(&a).unwrap(); - assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); - state.inc_nonce(&a).unwrap(); - assert_eq!(state.nonce(&a).unwrap(), U256::from(2u64)); - state.commit().unwrap(); - assert_eq!(state.nonce(&a).unwrap(), U256::from(2u64)); - state.inc_nonce(&a).unwrap(); - assert_eq!(state.nonce(&a).unwrap(), U256::from(3u64)); - state.commit().unwrap(); - assert_eq!(state.nonce(&a).unwrap(), U256::from(3u64)); - } - - #[test] - fn balance_nonce() { - let mut state = get_temp_state(); - let a = Address::zero(); - assert_eq!(state.balance(&a).unwrap(), U256::from(0u64)); - assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); - state.commit().unwrap(); - assert_eq!(state.balance(&a).unwrap(), U256::from(0u64)); - assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); - } - - #[test] - fn ensure_cached() { - let mut state = get_temp_state(); - let a = Address::zero(); - state.require(&a, false).unwrap(); - state.commit().unwrap(); - assert_eq!(*state.root(), "0ce23f3c809de377b008a4a3ee94a0834aac8bec1f86e28ffe4fdb5a15b0c785".into()); - } - - #[test] - fn checkpoint_basic() { - let mut state = get_temp_state(); - let a = Address::zero(); - state.checkpoint(); - state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty).unwrap(); - assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); - state.discard_checkpoint(); - assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); - state.checkpoint(); - state.add_balance(&a, &U256::from(1u64), CleanupMode::NoEmpty).unwrap(); - assert_eq!(state.balance(&a).unwrap(), U256::from(70u64)); - state.revert_to_checkpoint(); - assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); - } - - #[test] - fn checkpoint_nested() { - let mut state = get_temp_state(); - let a = Address::zero(); - state.checkpoint(); - state.checkpoint(); - state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty).unwrap(); - assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); - state.discard_checkpoint(); - assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); - state.revert_to_checkpoint(); - assert_eq!(state.balance(&a).unwrap(), U256::from(0)); - } - - #[test] - fn checkpoint_revert_to_get_storage_at() { - let mut state = get_temp_state(); - let a = Address::zero(); - let k = H256::from(U256::from(0)); - - let c0 = state.checkpoint(); - let c1 = state.checkpoint(); - state.set_storage(&a, k, H256::from(U256::from(1))).unwrap(); - - assert_eq!(state.checkpoint_storage_at(c0, &a, &k).unwrap(), Some(H256::from(U256::from(0)))); - assert_eq!(state.checkpoint_storage_at(c1, &a, &k).unwrap(), Some(H256::from(U256::from(0)))); - assert_eq!(state.storage_at(&a, &k).unwrap(), H256::from(U256::from(1))); - - state.revert_to_checkpoint(); // Revert to c1. - assert_eq!(state.checkpoint_storage_at(c0, &a, &k).unwrap(), Some(H256::from(U256::from(0)))); - assert_eq!(state.storage_at(&a, &k).unwrap(), H256::from(U256::from(0))); - } - - #[test] - fn checkpoint_from_empty_get_storage_at() { - let mut state = get_temp_state(); - let a = Address::zero(); - let k = H256::from(U256::from(0)); - let k2 = H256::from(U256::from(1)); - - assert_eq!(state.storage_at(&a, &k).unwrap(), H256::from(U256::from(0))); - state.clear(); - - let c0 = state.checkpoint(); - state.new_contract(&a, U256::zero(), U256::zero()).unwrap(); - let c1 = state.checkpoint(); - state.set_storage(&a, k, H256::from(U256::from(1))).unwrap(); - let c2 = state.checkpoint(); - let c3 = state.checkpoint(); - state.set_storage(&a, k2, H256::from(U256::from(3))).unwrap(); - state.set_storage(&a, k, H256::from(U256::from(3))).unwrap(); - let c4 = state.checkpoint(); - state.set_storage(&a, k, H256::from(U256::from(4))).unwrap(); - let c5 = state.checkpoint(); - - assert_eq!(state.checkpoint_storage_at(c0, &a, &k).unwrap(), Some(H256::from(U256::from(0)))); - assert_eq!(state.checkpoint_storage_at(c1, &a, &k).unwrap(), Some(H256::from(U256::from(0)))); - assert_eq!(state.checkpoint_storage_at(c2, &a, &k).unwrap(), Some(H256::from(U256::from(1)))); - assert_eq!(state.checkpoint_storage_at(c3, &a, &k).unwrap(), Some(H256::from(U256::from(1)))); - assert_eq!(state.checkpoint_storage_at(c4, &a, &k).unwrap(), Some(H256::from(U256::from(3)))); - assert_eq!(state.checkpoint_storage_at(c5, &a, &k).unwrap(), Some(H256::from(U256::from(4)))); - - state.discard_checkpoint(); // Commit/discard c5. - assert_eq!(state.checkpoint_storage_at(c0, &a, &k).unwrap(), Some(H256::from(U256::from(0)))); - assert_eq!(state.checkpoint_storage_at(c1, &a, &k).unwrap(), Some(H256::from(U256::from(0)))); - assert_eq!(state.checkpoint_storage_at(c2, &a, &k).unwrap(), Some(H256::from(U256::from(1)))); - assert_eq!(state.checkpoint_storage_at(c3, &a, &k).unwrap(), Some(H256::from(U256::from(1)))); - assert_eq!(state.checkpoint_storage_at(c4, &a, &k).unwrap(), Some(H256::from(U256::from(3)))); - - state.revert_to_checkpoint(); // Revert to c4. - assert_eq!(state.checkpoint_storage_at(c0, &a, &k).unwrap(), Some(H256::from(U256::from(0)))); - assert_eq!(state.checkpoint_storage_at(c1, &a, &k).unwrap(), Some(H256::from(U256::from(0)))); - assert_eq!(state.checkpoint_storage_at(c2, &a, &k).unwrap(), Some(H256::from(U256::from(1)))); - assert_eq!(state.checkpoint_storage_at(c3, &a, &k).unwrap(), Some(H256::from(U256::from(1)))); - - state.discard_checkpoint(); // Commit/discard c3. - assert_eq!(state.checkpoint_storage_at(c0, &a, &k).unwrap(), Some(H256::from(U256::from(0)))); - assert_eq!(state.checkpoint_storage_at(c1, &a, &k).unwrap(), Some(H256::from(U256::from(0)))); - assert_eq!(state.checkpoint_storage_at(c2, &a, &k).unwrap(), Some(H256::from(U256::from(1)))); - - state.revert_to_checkpoint(); // Revert to c2. - assert_eq!(state.checkpoint_storage_at(c0, &a, &k).unwrap(), Some(H256::from(U256::from(0)))); - assert_eq!(state.checkpoint_storage_at(c1, &a, &k).unwrap(), Some(H256::from(U256::from(0)))); - - state.discard_checkpoint(); // Commit/discard c1. - assert_eq!(state.checkpoint_storage_at(c0, &a, &k).unwrap(), Some(H256::from(U256::from(0)))); - } - - #[test] - fn checkpoint_get_storage_at() { - let mut state = get_temp_state(); - let a = Address::zero(); - let k = H256::from(U256::from(0)); - let k2 = H256::from(U256::from(1)); - - state.set_storage(&a, k, H256::from(U256::from(0xffff))).unwrap(); - state.commit().unwrap(); - state.clear(); - - assert_eq!(state.storage_at(&a, &k).unwrap(), H256::from(U256::from(0xffff))); - state.clear(); - - let cm1 = state.checkpoint(); - let c0 = state.checkpoint(); - state.new_contract(&a, U256::zero(), U256::zero()).unwrap(); - let c1 = state.checkpoint(); - state.set_storage(&a, k, H256::from(U256::from(1))).unwrap(); - let c2 = state.checkpoint(); - let c3 = state.checkpoint(); - state.set_storage(&a, k2, H256::from(U256::from(3))).unwrap(); - state.set_storage(&a, k, H256::from(U256::from(3))).unwrap(); - let c4 = state.checkpoint(); - state.set_storage(&a, k, H256::from(U256::from(4))).unwrap(); - let c5 = state.checkpoint(); - - assert_eq!(state.checkpoint_storage_at(cm1, &a, &k).unwrap(), Some(H256::from(U256::from(0xffff)))); - assert_eq!(state.checkpoint_storage_at(c0, &a, &k).unwrap(), Some(H256::from(U256::from(0xffff)))); - assert_eq!(state.checkpoint_storage_at(c1, &a, &k).unwrap(), Some(H256::from(U256::from(0)))); - assert_eq!(state.checkpoint_storage_at(c2, &a, &k).unwrap(), Some(H256::from(U256::from(1)))); - assert_eq!(state.checkpoint_storage_at(c3, &a, &k).unwrap(), Some(H256::from(U256::from(1)))); - assert_eq!(state.checkpoint_storage_at(c4, &a, &k).unwrap(), Some(H256::from(U256::from(3)))); - assert_eq!(state.checkpoint_storage_at(c5, &a, &k).unwrap(), Some(H256::from(U256::from(4)))); - - state.discard_checkpoint(); // Commit/discard c5. - assert_eq!(state.checkpoint_storage_at(cm1, &a, &k).unwrap(), Some(H256::from(U256::from(0xffff)))); - assert_eq!(state.checkpoint_storage_at(c0, &a, &k).unwrap(), Some(H256::from(U256::from(0xffff)))); - assert_eq!(state.checkpoint_storage_at(c1, &a, &k).unwrap(), Some(H256::from(U256::from(0)))); - assert_eq!(state.checkpoint_storage_at(c2, &a, &k).unwrap(), Some(H256::from(U256::from(1)))); - assert_eq!(state.checkpoint_storage_at(c3, &a, &k).unwrap(), Some(H256::from(U256::from(1)))); - assert_eq!(state.checkpoint_storage_at(c4, &a, &k).unwrap(), Some(H256::from(U256::from(3)))); - - state.revert_to_checkpoint(); // Revert to c4. - assert_eq!(state.checkpoint_storage_at(cm1, &a, &k).unwrap(), Some(H256::from(U256::from(0xffff)))); - assert_eq!(state.checkpoint_storage_at(c0, &a, &k).unwrap(), Some(H256::from(U256::from(0xffff)))); - assert_eq!(state.checkpoint_storage_at(c1, &a, &k).unwrap(), Some(H256::from(U256::from(0)))); - assert_eq!(state.checkpoint_storage_at(c2, &a, &k).unwrap(), Some(H256::from(U256::from(1)))); - assert_eq!(state.checkpoint_storage_at(c3, &a, &k).unwrap(), Some(H256::from(U256::from(1)))); - - state.discard_checkpoint(); // Commit/discard c3. - assert_eq!(state.checkpoint_storage_at(cm1, &a, &k).unwrap(), Some(H256::from(U256::from(0xffff)))); - assert_eq!(state.checkpoint_storage_at(c0, &a, &k).unwrap(), Some(H256::from(U256::from(0xffff)))); - assert_eq!(state.checkpoint_storage_at(c1, &a, &k).unwrap(), Some(H256::from(U256::from(0)))); - assert_eq!(state.checkpoint_storage_at(c2, &a, &k).unwrap(), Some(H256::from(U256::from(1)))); - - state.revert_to_checkpoint(); // Revert to c2. - assert_eq!(state.checkpoint_storage_at(cm1, &a, &k).unwrap(), Some(H256::from(U256::from(0xffff)))); - assert_eq!(state.checkpoint_storage_at(c0, &a, &k).unwrap(), Some(H256::from(U256::from(0xffff)))); - assert_eq!(state.checkpoint_storage_at(c1, &a, &k).unwrap(), Some(H256::from(U256::from(0)))); - - state.discard_checkpoint(); // Commit/discard c1. - assert_eq!(state.checkpoint_storage_at(cm1, &a, &k).unwrap(), Some(H256::from(U256::from(0xffff)))); - assert_eq!(state.checkpoint_storage_at(c0, &a, &k).unwrap(), Some(H256::from(U256::from(0xffff)))); - } - - #[test] - fn kill_account_with_checkpoints() { - let mut state = get_temp_state(); - let a = Address::zero(); - let k = H256::from(U256::from(0)); - state.checkpoint(); - state.set_storage(&a, k, H256::from(U256::from(1))).unwrap(); - state.checkpoint(); - state.kill_account(&a); - - assert_eq!(state.storage_at(&a, &k).unwrap(), H256::from(U256::from(0))); - state.revert_to_checkpoint(); - assert_eq!(state.storage_at(&a, &k).unwrap(), H256::from(U256::from(1))); - } - - #[test] - fn create_contract_fail() { - let mut state = get_temp_state(); - let orig_root = state.root().clone(); - let a: Address = 1000.into(); - - state.checkpoint(); // c1 - state.new_contract(&a, U256::zero(), U256::zero()).unwrap(); - state.add_balance(&a, &U256::from(1), CleanupMode::ForceCreate).unwrap(); - state.checkpoint(); // c2 - state.add_balance(&a, &U256::from(1), CleanupMode::ForceCreate).unwrap(); - state.discard_checkpoint(); // discard c2 - state.revert_to_checkpoint(); // revert to c1 - assert_eq!(state.exists(&a).unwrap(), false); - - state.commit().unwrap(); - assert_eq!(orig_root, state.root().clone()); - } - - #[test] - fn create_contract_fail_previous_storage() { - let mut state = get_temp_state(); - let a: Address = 1000.into(); - let k = H256::from(U256::from(0)); - - state.set_storage(&a, k, H256::from(U256::from(0xffff))).unwrap(); - state.commit().unwrap(); - state.clear(); - - let orig_root = state.root().clone(); - assert_eq!(state.storage_at(&a, &k).unwrap(), H256::from(U256::from(0xffff))); - state.clear(); - - state.checkpoint(); // c1 - state.new_contract(&a, U256::zero(), U256::zero()).unwrap(); - state.checkpoint(); // c2 - state.set_storage(&a, k, H256::from(U256::from(2))).unwrap(); - state.revert_to_checkpoint(); // revert to c2 - assert_eq!(state.storage_at(&a, &k).unwrap(), H256::from(U256::from(0))); - state.revert_to_checkpoint(); // revert to c1 - assert_eq!(state.storage_at(&a, &k).unwrap(), H256::from(U256::from(0xffff))); - - state.commit().unwrap(); - assert_eq!(orig_root, state.root().clone()); - } - - #[test] - fn create_empty() { - let mut state = get_temp_state(); - state.commit().unwrap(); - assert_eq!(*state.root(), "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421".into()); - } - - #[test] - fn should_not_panic_on_state_diff_with_storage() { - let mut state = get_temp_state(); - - let a: Address = 0xa.into(); - state.init_code(&a, b"abcdefg".to_vec()).unwrap();; - state.add_balance(&a, &256.into(), CleanupMode::NoEmpty).unwrap(); - state.set_storage(&a, 0xb.into(), 0xc.into()).unwrap(); - - let mut new_state = state.clone(); - new_state.set_storage(&a, 0xb.into(), 0xd.into()).unwrap(); - - new_state.diff_from(state).unwrap(); - } - - #[test] - fn should_kill_garbage() { - let a = 10.into(); - let b = 20.into(); - let c = 30.into(); - let d = 40.into(); - let e = 50.into(); - let x = 0.into(); - let db = get_temp_state_db(); - let (root, db) = { - let mut state = State::new(db, U256::from(0), Default::default()); - state.add_balance(&a, &U256::default(), CleanupMode::ForceCreate).unwrap(); // create an empty account - state.add_balance(&b, &100.into(), CleanupMode::ForceCreate).unwrap(); // create a dust account - state.add_balance(&c, &101.into(), CleanupMode::ForceCreate).unwrap(); // create a normal account - state.add_balance(&d, &99.into(), CleanupMode::ForceCreate).unwrap(); // create another dust account - state.new_contract(&e, 100.into(), 1.into()).unwrap(); // create a contract account - state.init_code(&e, vec![0x00]).unwrap(); - state.commit().unwrap(); - state.drop() - }; - - let mut state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - let mut touched = HashSet::new(); - state.add_balance(&a, &U256::default(), CleanupMode::TrackTouched(&mut touched)).unwrap(); // touch an account - state.transfer_balance(&b, &x, &1.into(), CleanupMode::TrackTouched(&mut touched)).unwrap(); // touch an account decreasing its balance - state.transfer_balance(&c, &x, &1.into(), CleanupMode::TrackTouched(&mut touched)).unwrap(); // touch an account decreasing its balance - state.transfer_balance(&e, &x, &1.into(), CleanupMode::TrackTouched(&mut touched)).unwrap(); // touch an account decreasing its balance - state.kill_garbage(&touched, true, &None, false).unwrap(); - assert!(!state.exists(&a).unwrap()); - assert!(state.exists(&b).unwrap()); - state.kill_garbage(&touched, true, &Some(100.into()), false).unwrap(); - assert!(!state.exists(&b).unwrap()); - assert!(state.exists(&c).unwrap()); - assert!(state.exists(&d).unwrap()); - assert!(state.exists(&e).unwrap()); - state.kill_garbage(&touched, true, &Some(100.into()), true).unwrap(); - assert!(state.exists(&c).unwrap()); - assert!(state.exists(&d).unwrap()); - assert!(!state.exists(&e).unwrap()); - } - - #[test] - fn should_trace_diff_suicided_accounts() { - use pod_account; - - let a = 10.into(); - let db = get_temp_state_db(); - let (root, db) = { - let mut state = State::new(db, U256::from(0), Default::default()); - state.add_balance(&a, &100.into(), CleanupMode::ForceCreate).unwrap(); - state.commit().unwrap(); - state.drop() - }; - - let mut state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - let original = state.clone(); - state.kill_account(&a); - - let diff = state.diff_from(original).unwrap(); - let diff_map = diff.get(); - assert_eq!(diff_map.len(), 1); - assert!(diff_map.get(&a).is_some()); - assert_eq!(diff_map.get(&a), - pod_account::diff_pod(Some(&PodAccount { - balance: U256::from(100), - nonce: U256::zero(), - code: Some(Default::default()), - storage: Default::default() - }), None).as_ref()); - } - - #[test] - fn should_trace_diff_unmodified_storage() { - use pod_account; - - let a = 10.into(); - let db = get_temp_state_db(); - - let (root, db) = { - let mut state = State::new(db, U256::from(0), Default::default()); - state.set_storage(&a, H256::from(&U256::from(1u64)), H256::from(&U256::from(20u64))).unwrap(); - state.commit().unwrap(); - state.drop() - }; - - let mut state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - let original = state.clone(); - state.set_storage(&a, H256::from(&U256::from(1u64)), H256::from(&U256::from(100u64))).unwrap(); - - let diff = state.diff_from(original).unwrap(); - let diff_map = diff.get(); - assert_eq!(diff_map.len(), 1); - assert!(diff_map.get(&a).is_some()); - assert_eq!(diff_map.get(&a), - pod_account::diff_pod(Some(&PodAccount { - balance: U256::zero(), - nonce: U256::zero(), - code: Some(Default::default()), - storage: vec![(H256::from(&U256::from(1u64)), H256::from(&U256::from(20u64)))] - .into_iter().collect(), - }), Some(&PodAccount { - balance: U256::zero(), - nonce: U256::zero(), - code: Some(Default::default()), - storage: vec![(H256::from(&U256::from(1u64)), H256::from(&U256::from(100u64)))] - .into_iter().collect(), - })).as_ref()); - } - - #[cfg(feature="to-pod-full")] - #[test] - fn should_get_full_pod_storage_values() { - use trie::{TrieFactory, TrieSpec}; - - let a = 10.into(); - let db = get_temp_state_db(); - - let factories = Factories { - vm: Default::default(), - trie: TrieFactory::new(TrieSpec::Fat), - accountdb: Default::default(), - }; - - let get_pod_state_val = |pod_state : &PodState, ak, k| { - pod_state.get().get(ak).unwrap().storage.get(&k).unwrap().clone() - }; - - let storage_address = H256::from(&U256::from(1u64)); - - let (root, db) = { - let mut state = State::new(db, U256::from(0), factories.clone()); - state.set_storage(&a, storage_address.clone(), H256::from(&U256::from(20u64))).unwrap(); - let dump = state.to_pod_full().unwrap(); - assert_eq!(get_pod_state_val(&dump, &a, storage_address.clone()), H256::from(&U256::from(20u64))); - state.commit().unwrap(); - let dump = state.to_pod_full().unwrap(); - assert_eq!(get_pod_state_val(&dump, &a, storage_address.clone()), H256::from(&U256::from(20u64))); - state.drop() - }; - - let mut state = State::from_existing(db, root, U256::from(0u8), factories).unwrap(); - let dump = state.to_pod_full().unwrap(); - assert_eq!(get_pod_state_val(&dump, &a, storage_address.clone()), H256::from(&U256::from(20u64))); - state.set_storage(&a, storage_address.clone(), H256::from(&U256::from(21u64))).unwrap(); - let dump = state.to_pod_full().unwrap(); - assert_eq!(get_pod_state_val(&dump, &a, storage_address.clone()), H256::from(&U256::from(21u64))); - state.commit().unwrap(); - state.set_storage(&a, storage_address.clone(), H256::from(&U256::from(0u64))).unwrap(); - let dump = state.to_pod_full().unwrap(); - assert_eq!(get_pod_state_val(&dump, &a, storage_address.clone()), H256::from(&U256::from(0u64))); - - } - + use super::*; + use ethereum_types::{Address, H256, U256}; + use ethkey::Secret; + use evm::CallType; + use hash::{keccak, KECCAK_NULL_RLP}; + use machine::EthereumMachine; + use rustc_hex::FromHex; + use spec::*; + use std::{str::FromStr, sync::Arc}; + use test_helpers::{get_temp_state, get_temp_state_db}; + use trace::{trace, FlatTrace, TraceError}; + use types::transaction::*; + use vm::EnvInfo; + + fn secret() -> Secret { + keccak("").into() + } + + fn make_frontier_machine(max_depth: usize) -> EthereumMachine { + let mut machine = ::ethereum::new_frontier_test_machine(); + machine.set_schedule_creation_rules(Box::new(move |s, _| s.max_depth = max_depth)); + machine + } + + #[test] + fn should_apply_create_transaction() { + let _ = env_logger::try_init(); + + let mut state = get_temp_state(); + + let mut info = EnvInfo::default(); + info.gas_limit = 1_000_000.into(); + let machine = make_frontier_machine(5); + + let t = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Create, + value: 100.into(), + data: FromHex::from_hex("601080600c6000396000f3006000355415600957005b60203560003555") + .unwrap(), + } + .sign(&secret(), None); + + state + .add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty) + .unwrap(); + let result = state.apply(&info, &machine, &t, true).unwrap(); + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + subtraces: 0, + action: trace::Action::Create(trace::Create { + from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), + value: 100.into(), + gas: 77412.into(), + init: vec![ + 96, 16, 128, 96, 12, 96, 0, 57, 96, 0, 243, 0, 96, 0, 53, 84, 21, 96, 9, 87, 0, + 91, 96, 32, 53, 96, 0, 53, 85, + ], + }), + result: trace::Res::Create(trace::CreateResult { + gas_used: U256::from(3224), + address: Address::from_str("8988167e088c87cd314df6d3c2b83da5acb93ace").unwrap(), + code: vec![96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53], + }), + }]; + + assert_eq!(result.trace, expected_trace); + } + + #[test] + fn should_work_when_cloned() { + let _ = env_logger::try_init(); + + let a = Address::zero(); + + let mut state = { + let mut state = get_temp_state(); + assert_eq!(state.exists(&a).unwrap(), false); + state.inc_nonce(&a).unwrap(); + state.commit().unwrap(); + state.clone() + }; + + state.inc_nonce(&a).unwrap(); + state.commit().unwrap(); + } + + #[test] + fn should_trace_failed_create_transaction() { + let _ = env_logger::try_init(); + + let mut state = get_temp_state(); + + let mut info = EnvInfo::default(); + info.gas_limit = 1_000_000.into(); + let machine = make_frontier_machine(5); + + let t = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Create, + value: 100.into(), + data: FromHex::from_hex("5b600056").unwrap(), + } + .sign(&secret(), None); + + state + .add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty) + .unwrap(); + let result = state.apply(&info, &machine, &t, true).unwrap(); + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + action: trace::Action::Create(trace::Create { + from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), + value: 100.into(), + gas: 78792.into(), + init: vec![91, 96, 0, 86], + }), + result: trace::Res::FailedCreate(TraceError::OutOfGas), + subtraces: 0, + }]; + + assert_eq!(result.trace, expected_trace); + } + + #[test] + fn should_trace_call_transaction() { + let _ = env_logger::try_init(); + + let mut state = get_temp_state(); + + let mut info = EnvInfo::default(); + info.gas_limit = 1_000_000.into(); + let machine = make_frontier_machine(5); + + let t = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Call(0xa.into()), + value: 100.into(), + data: vec![], + } + .sign(&secret(), None); + + state + .init_code(&0xa.into(), FromHex::from_hex("6000").unwrap()) + .unwrap(); + state + .add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty) + .unwrap(); + let result = state.apply(&info, &machine, &t, true).unwrap(); + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + action: trace::Action::Call(trace::Call { + from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), + to: 0xa.into(), + value: 100.into(), + gas: 79000.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(3), + output: vec![], + }), + subtraces: 0, + }]; + + assert_eq!(result.trace, expected_trace); + } + + #[test] + fn should_trace_basic_call_transaction() { + let _ = env_logger::try_init(); + + let mut state = get_temp_state(); + + let mut info = EnvInfo::default(); + info.gas_limit = 1_000_000.into(); + let machine = make_frontier_machine(5); + + let t = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Call(0xa.into()), + value: 100.into(), + data: vec![], + } + .sign(&secret(), None); + + state + .add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty) + .unwrap(); + let result = state.apply(&info, &machine, &t, true).unwrap(); + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + action: trace::Action::Call(trace::Call { + from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), + to: 0xa.into(), + value: 100.into(), + gas: 79000.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(0), + output: vec![], + }), + subtraces: 0, + }]; + + assert_eq!(result.trace, expected_trace); + } + + #[test] + fn should_trace_call_transaction_to_builtin() { + let _ = env_logger::try_init(); + + let mut state = get_temp_state(); + + let mut info = EnvInfo::default(); + info.gas_limit = 1_000_000.into(); + let machine = Spec::new_test_machine(); + + let t = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Call(0x1.into()), + value: 0.into(), + data: vec![], + } + .sign(&secret(), None); + + let result = state.apply(&info, &machine, &t, true).unwrap(); + + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + action: trace::Action::Call(trace::Call { + from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), + to: "0000000000000000000000000000000000000001".into(), + value: 0.into(), + gas: 79_000.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(3000), + output: vec![], + }), + subtraces: 0, + }]; + + assert_eq!(result.trace, expected_trace); + } + + #[test] + fn should_not_trace_subcall_transaction_to_builtin() { + let _ = env_logger::try_init(); + + let mut state = get_temp_state(); + + let mut info = EnvInfo::default(); + info.gas_limit = 1_000_000.into(); + let machine = Spec::new_test_machine(); + + let t = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Call(0xa.into()), + value: 0.into(), + data: vec![], + } + .sign(&secret(), None); + + state + .init_code( + &0xa.into(), + FromHex::from_hex("600060006000600060006001610be0f1").unwrap(), + ) + .unwrap(); + let result = state.apply(&info, &machine, &t, true).unwrap(); + + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + action: trace::Action::Call(trace::Call { + from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), + to: 0xa.into(), + value: 0.into(), + gas: 79000.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(3_721), // in post-eip150 + output: vec![], + }), + subtraces: 0, + }]; + + assert_eq!(result.trace, expected_trace); + } + + #[test] + fn should_trace_callcode_properly() { + let _ = env_logger::try_init(); + + let mut state = get_temp_state(); + + let mut info = EnvInfo::default(); + info.gas_limit = 1_000_000.into(); + let machine = Spec::new_test_machine(); + + let t = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Call(0xa.into()), + value: 0.into(), + data: vec![], + } + .sign(&secret(), None); + + state + .init_code( + &0xa.into(), + FromHex::from_hex("60006000600060006000600b611000f2").unwrap(), + ) + .unwrap(); + state + .init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()) + .unwrap(); + let result = state.apply(&info, &machine, &t, true).unwrap(); + + let expected_trace = vec![ + FlatTrace { + trace_address: Default::default(), + subtraces: 1, + action: trace::Action::Call(trace::Call { + from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), + to: 0xa.into(), + value: 0.into(), + gas: 79000.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: 724.into(), // in post-eip150 + output: vec![], + }), + }, + FlatTrace { + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + action: trace::Action::Call(trace::Call { + from: 0xa.into(), + to: 0xb.into(), + value: 0.into(), + gas: 4096.into(), + input: vec![], + call_type: CallType::CallCode, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: 3.into(), + output: vec![], + }), + }, + ]; + + assert_eq!(result.trace, expected_trace); + } + + #[test] + fn should_trace_delegatecall_properly() { + let _ = env_logger::try_init(); + + let mut state = get_temp_state(); + + let mut info = EnvInfo::default(); + info.gas_limit = 1_000_000.into(); + info.number = 0x789b0; + let machine = Spec::new_test_machine(); + + let t = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Call(0xa.into()), + value: 0.into(), + data: vec![], + } + .sign(&secret(), None); + + state + .init_code( + &0xa.into(), + FromHex::from_hex("6000600060006000600b618000f4").unwrap(), + ) + .unwrap(); + state + .init_code( + &0xb.into(), + FromHex::from_hex("60056000526001601ff3").unwrap(), + ) + .unwrap(); + let result = state.apply(&info, &machine, &t, true).unwrap(); + + let expected_trace = vec![ + FlatTrace { + trace_address: Default::default(), + subtraces: 1, + action: trace::Action::Call(trace::Call { + from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), + to: 0xa.into(), + value: 0.into(), + gas: 79000.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(736), // in post-eip150 + output: vec![], + }), + }, + FlatTrace { + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + action: trace::Action::Call(trace::Call { + from: 0xa.into(), + to: 0xb.into(), + value: 0.into(), + gas: 32768.into(), + input: vec![], + call_type: CallType::DelegateCall, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: 18.into(), + output: vec![5], + }), + }, + ]; + + assert_eq!(result.trace, expected_trace); + } + + #[test] + fn should_trace_failed_call_transaction() { + let _ = env_logger::try_init(); + + let mut state = get_temp_state(); + + let mut info = EnvInfo::default(); + info.gas_limit = 1_000_000.into(); + let machine = make_frontier_machine(5); + + let t = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Call(0xa.into()), + value: 100.into(), + data: vec![], + } + .sign(&secret(), None); + + state + .init_code(&0xa.into(), FromHex::from_hex("5b600056").unwrap()) + .unwrap(); + state + .add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty) + .unwrap(); + let result = state.apply(&info, &machine, &t, true).unwrap(); + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + action: trace::Action::Call(trace::Call { + from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), + to: 0xa.into(), + value: 100.into(), + gas: 79000.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::FailedCall(TraceError::OutOfGas), + subtraces: 0, + }]; + + assert_eq!(result.trace, expected_trace); + } + + #[test] + fn should_trace_call_with_subcall_transaction() { + let _ = env_logger::try_init(); + + let mut state = get_temp_state(); + + let mut info = EnvInfo::default(); + info.gas_limit = 1_000_000.into(); + let machine = make_frontier_machine(5); + + let t = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Call(0xa.into()), + value: 100.into(), + data: vec![], + } + .sign(&secret(), None); + + state + .init_code( + &0xa.into(), + FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap(), + ) + .unwrap(); + state + .init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()) + .unwrap(); + state + .add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty) + .unwrap(); + let result = state.apply(&info, &machine, &t, true).unwrap(); + + let expected_trace = vec![ + FlatTrace { + trace_address: Default::default(), + subtraces: 1, + action: trace::Action::Call(trace::Call { + from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), + to: 0xa.into(), + value: 100.into(), + gas: 79000.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(69), + output: vec![], + }), + }, + FlatTrace { + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + action: trace::Action::Call(trace::Call { + from: 0xa.into(), + to: 0xb.into(), + value: 0.into(), + gas: 78934.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(3), + output: vec![], + }), + }, + ]; + + assert_eq!(result.trace, expected_trace); + } + + #[test] + fn should_trace_call_with_basic_subcall_transaction() { + let _ = env_logger::try_init(); + + let mut state = get_temp_state(); + + let mut info = EnvInfo::default(); + info.gas_limit = 1_000_000.into(); + let machine = make_frontier_machine(5); + + let t = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Call(0xa.into()), + value: 100.into(), + data: vec![], + } + .sign(&secret(), None); + + state + .init_code( + &0xa.into(), + FromHex::from_hex("60006000600060006045600b6000f1").unwrap(), + ) + .unwrap(); + state + .add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty) + .unwrap(); + let result = state.apply(&info, &machine, &t, true).unwrap(); + let expected_trace = vec![ + FlatTrace { + trace_address: Default::default(), + subtraces: 1, + action: trace::Action::Call(trace::Call { + from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), + to: 0xa.into(), + value: 100.into(), + gas: 79000.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(31761), + output: vec![], + }), + }, + FlatTrace { + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + action: trace::Action::Call(trace::Call { + from: 0xa.into(), + to: 0xb.into(), + value: 69.into(), + gas: 2300.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult::default()), + }, + ]; + + assert_eq!(result.trace, expected_trace); + } + + #[test] + fn should_not_trace_call_with_invalid_basic_subcall_transaction() { + let _ = env_logger::try_init(); + + let mut state = get_temp_state(); + + let mut info = EnvInfo::default(); + info.gas_limit = 1_000_000.into(); + let machine = make_frontier_machine(5); + + let t = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Call(0xa.into()), + value: 100.into(), + data: vec![], + } + .sign(&secret(), None); + + state + .init_code( + &0xa.into(), + FromHex::from_hex("600060006000600060ff600b6000f1").unwrap(), + ) + .unwrap(); // not enough funds. + state + .add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty) + .unwrap(); + let result = state.apply(&info, &machine, &t, true).unwrap(); + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + subtraces: 0, + action: trace::Action::Call(trace::Call { + from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), + to: 0xa.into(), + value: 100.into(), + gas: 79000.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(31761), + output: vec![], + }), + }]; + + assert_eq!(result.trace, expected_trace); + } + + #[test] + fn should_trace_failed_subcall_transaction() { + let _ = env_logger::try_init(); + + let mut state = get_temp_state(); + + let mut info = EnvInfo::default(); + info.gas_limit = 1_000_000.into(); + let machine = make_frontier_machine(5); + + let t = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Call(0xa.into()), + value: 100.into(), + data: vec![], //600480600b6000396000f35b600056 + } + .sign(&secret(), None); + + state + .init_code( + &0xa.into(), + FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap(), + ) + .unwrap(); + state + .init_code(&0xb.into(), FromHex::from_hex("5b600056").unwrap()) + .unwrap(); + state + .add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty) + .unwrap(); + let result = state.apply(&info, &machine, &t, true).unwrap(); + let expected_trace = vec![ + FlatTrace { + trace_address: Default::default(), + subtraces: 1, + action: trace::Action::Call(trace::Call { + from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), + to: 0xa.into(), + value: 100.into(), + gas: 79000.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(79_000), + output: vec![], + }), + }, + FlatTrace { + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + action: trace::Action::Call(trace::Call { + from: 0xa.into(), + to: 0xb.into(), + value: 0.into(), + gas: 78934.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::FailedCall(TraceError::OutOfGas), + }, + ]; + + assert_eq!(result.trace, expected_trace); + } + + #[test] + fn should_trace_call_with_subcall_with_subcall_transaction() { + let _ = env_logger::try_init(); + + let mut state = get_temp_state(); + + let mut info = EnvInfo::default(); + info.gas_limit = 1_000_000.into(); + let machine = make_frontier_machine(5); + + let t = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Call(0xa.into()), + value: 100.into(), + data: vec![], + } + .sign(&secret(), None); + + state + .init_code( + &0xa.into(), + FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap(), + ) + .unwrap(); + state + .init_code( + &0xb.into(), + FromHex::from_hex("60006000600060006000600c602b5a03f1").unwrap(), + ) + .unwrap(); + state + .init_code(&0xc.into(), FromHex::from_hex("6000").unwrap()) + .unwrap(); + state + .add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty) + .unwrap(); + let result = state.apply(&info, &machine, &t, true).unwrap(); + let expected_trace = vec![ + FlatTrace { + trace_address: Default::default(), + subtraces: 1, + action: trace::Action::Call(trace::Call { + from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), + to: 0xa.into(), + value: 100.into(), + gas: 79000.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(135), + output: vec![], + }), + }, + FlatTrace { + trace_address: vec![0].into_iter().collect(), + subtraces: 1, + action: trace::Action::Call(trace::Call { + from: 0xa.into(), + to: 0xb.into(), + value: 0.into(), + gas: 78934.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(69), + output: vec![], + }), + }, + FlatTrace { + trace_address: vec![0, 0].into_iter().collect(), + subtraces: 0, + action: trace::Action::Call(trace::Call { + from: 0xb.into(), + to: 0xc.into(), + value: 0.into(), + gas: 78868.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(3), + output: vec![], + }), + }, + ]; + + assert_eq!(result.trace, expected_trace); + } + + #[test] + fn should_trace_failed_subcall_with_subcall_transaction() { + let _ = env_logger::try_init(); + + let mut state = get_temp_state(); + + let mut info = EnvInfo::default(); + info.gas_limit = 1_000_000.into(); + let machine = make_frontier_machine(5); + + let t = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Call(0xa.into()), + value: 100.into(), + data: vec![], //600480600b6000396000f35b600056 + } + .sign(&secret(), None); + + state + .init_code( + &0xa.into(), + FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap(), + ) + .unwrap(); + state + .init_code( + &0xb.into(), + FromHex::from_hex("60006000600060006000600c602b5a03f1505b601256").unwrap(), + ) + .unwrap(); + state + .init_code(&0xc.into(), FromHex::from_hex("6000").unwrap()) + .unwrap(); + state + .add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty) + .unwrap(); + let result = state.apply(&info, &machine, &t, true).unwrap(); + + let expected_trace = vec![ + FlatTrace { + trace_address: Default::default(), + subtraces: 1, + action: trace::Action::Call(trace::Call { + from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), + to: 0xa.into(), + value: 100.into(), + gas: 79000.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(79_000), + output: vec![], + }), + }, + FlatTrace { + trace_address: vec![0].into_iter().collect(), + subtraces: 1, + action: trace::Action::Call(trace::Call { + from: 0xa.into(), + to: 0xb.into(), + value: 0.into(), + gas: 78934.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::FailedCall(TraceError::OutOfGas), + }, + FlatTrace { + trace_address: vec![0, 0].into_iter().collect(), + subtraces: 0, + action: trace::Action::Call(trace::Call { + from: 0xb.into(), + to: 0xc.into(), + value: 0.into(), + gas: 78868.into(), + call_type: CallType::Call, + input: vec![], + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(3), + output: vec![], + }), + }, + ]; + + assert_eq!(result.trace, expected_trace); + } + + #[test] + fn should_trace_suicide() { + let _ = env_logger::try_init(); + + let mut state = get_temp_state(); + + let mut info = EnvInfo::default(); + info.gas_limit = 1_000_000.into(); + let machine = make_frontier_machine(5); + + let t = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Call(0xa.into()), + value: 100.into(), + data: vec![], + } + .sign(&secret(), None); + + state + .init_code( + &0xa.into(), + FromHex::from_hex("73000000000000000000000000000000000000000bff").unwrap(), + ) + .unwrap(); + state + .add_balance(&0xa.into(), &50.into(), CleanupMode::NoEmpty) + .unwrap(); + state + .add_balance(&t.sender(), &100.into(), CleanupMode::NoEmpty) + .unwrap(); + let result = state.apply(&info, &machine, &t, true).unwrap(); + let expected_trace = vec![ + FlatTrace { + trace_address: Default::default(), + subtraces: 1, + action: trace::Action::Call(trace::Call { + from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), + to: 0xa.into(), + value: 100.into(), + gas: 79000.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: 3.into(), + output: vec![], + }), + }, + FlatTrace { + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + action: trace::Action::Suicide(trace::Suicide { + address: 0xa.into(), + refund_address: 0xb.into(), + balance: 150.into(), + }), + result: trace::Res::None, + }, + ]; + + assert_eq!(result.trace, expected_trace); + } + + #[test] + fn code_from_database() { + let a = Address::zero(); + let (root, db) = { + let mut state = get_temp_state(); + state + .require_or_from( + &a, + false, + || Account::new_contract(42.into(), 0.into(), KECCAK_NULL_RLP), + |_| {}, + ) + .unwrap(); + state.init_code(&a, vec![1, 2, 3]).unwrap(); + assert_eq!(state.code(&a).unwrap(), Some(Arc::new(vec![1u8, 2, 3]))); + state.commit().unwrap(); + assert_eq!(state.code(&a).unwrap(), Some(Arc::new(vec![1u8, 2, 3]))); + state.drop() + }; + + let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); + assert_eq!(state.code(&a).unwrap(), Some(Arc::new(vec![1u8, 2, 3]))); + } + + #[test] + fn storage_at_from_database() { + let a = Address::zero(); + let (root, db) = { + let mut state = get_temp_state(); + state + .set_storage( + &a, + H256::from(&U256::from(1u64)), + H256::from(&U256::from(69u64)), + ) + .unwrap(); + state.commit().unwrap(); + state.drop() + }; + + let s = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); + assert_eq!( + s.storage_at(&a, &H256::from(&U256::from(1u64))).unwrap(), + H256::from(&U256::from(69u64)) + ); + } + + #[test] + fn get_from_database() { + let a = Address::zero(); + let (root, db) = { + let mut state = get_temp_state(); + state.inc_nonce(&a).unwrap(); + state + .add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty) + .unwrap(); + state.commit().unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); + state.drop() + }; + + let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); + assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); + } + + #[test] + fn remove() { + let a = Address::zero(); + let mut state = get_temp_state(); + assert_eq!(state.exists(&a).unwrap(), false); + assert_eq!(state.exists_and_not_null(&a).unwrap(), false); + state.inc_nonce(&a).unwrap(); + assert_eq!(state.exists(&a).unwrap(), true); + assert_eq!(state.exists_and_not_null(&a).unwrap(), true); + assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); + state.kill_account(&a); + assert_eq!(state.exists(&a).unwrap(), false); + assert_eq!(state.exists_and_not_null(&a).unwrap(), false); + assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); + } + + #[test] + fn empty_account_is_not_created() { + let a = Address::zero(); + let db = get_temp_state_db(); + let (root, db) = { + let mut state = State::new(db, U256::from(0), Default::default()); + state + .add_balance(&a, &U256::default(), CleanupMode::NoEmpty) + .unwrap(); // create an empty account + state.commit().unwrap(); + state.drop() + }; + let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); + assert!(!state.exists(&a).unwrap()); + assert!(!state.exists_and_not_null(&a).unwrap()); + } + + #[test] + fn empty_account_exists_when_creation_forced() { + let a = Address::zero(); + let db = get_temp_state_db(); + let (root, db) = { + let mut state = State::new(db, U256::from(0), Default::default()); + state + .add_balance(&a, &U256::default(), CleanupMode::ForceCreate) + .unwrap(); // create an empty account + state.commit().unwrap(); + state.drop() + }; + let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); + assert!(state.exists(&a).unwrap()); + assert!(!state.exists_and_not_null(&a).unwrap()); + } + + #[test] + fn remove_from_database() { + let a = Address::zero(); + let (root, db) = { + let mut state = get_temp_state(); + state.inc_nonce(&a).unwrap(); + state.commit().unwrap(); + assert_eq!(state.exists(&a).unwrap(), true); + assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); + state.drop() + }; + + let (root, db) = { + let mut state = + State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); + assert_eq!(state.exists(&a).unwrap(), true); + assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); + state.kill_account(&a); + state.commit().unwrap(); + assert_eq!(state.exists(&a).unwrap(), false); + assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); + state.drop() + }; + + let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); + assert_eq!(state.exists(&a).unwrap(), false); + assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); + } + + #[test] + fn alter_balance() { + let mut state = get_temp_state(); + let a = Address::zero(); + let b = 1u64.into(); + state + .add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty) + .unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); + state.commit().unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); + state + .sub_balance(&a, &U256::from(42u64), &mut CleanupMode::NoEmpty) + .unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(27u64)); + state.commit().unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(27u64)); + state + .transfer_balance(&a, &b, &U256::from(18u64), CleanupMode::NoEmpty) + .unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(9u64)); + assert_eq!(state.balance(&b).unwrap(), U256::from(18u64)); + state.commit().unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(9u64)); + assert_eq!(state.balance(&b).unwrap(), U256::from(18u64)); + } + + #[test] + fn alter_nonce() { + let mut state = get_temp_state(); + let a = Address::zero(); + state.inc_nonce(&a).unwrap(); + assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); + state.inc_nonce(&a).unwrap(); + assert_eq!(state.nonce(&a).unwrap(), U256::from(2u64)); + state.commit().unwrap(); + assert_eq!(state.nonce(&a).unwrap(), U256::from(2u64)); + state.inc_nonce(&a).unwrap(); + assert_eq!(state.nonce(&a).unwrap(), U256::from(3u64)); + state.commit().unwrap(); + assert_eq!(state.nonce(&a).unwrap(), U256::from(3u64)); + } + + #[test] + fn balance_nonce() { + let mut state = get_temp_state(); + let a = Address::zero(); + assert_eq!(state.balance(&a).unwrap(), U256::from(0u64)); + assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); + state.commit().unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(0u64)); + assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); + } + + #[test] + fn ensure_cached() { + let mut state = get_temp_state(); + let a = Address::zero(); + state.require(&a, false).unwrap(); + state.commit().unwrap(); + assert_eq!( + *state.root(), + "0ce23f3c809de377b008a4a3ee94a0834aac8bec1f86e28ffe4fdb5a15b0c785".into() + ); + } + + #[test] + fn checkpoint_basic() { + let mut state = get_temp_state(); + let a = Address::zero(); + state.checkpoint(); + state + .add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty) + .unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); + state.discard_checkpoint(); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); + state.checkpoint(); + state + .add_balance(&a, &U256::from(1u64), CleanupMode::NoEmpty) + .unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(70u64)); + state.revert_to_checkpoint(); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); + } + + #[test] + fn checkpoint_nested() { + let mut state = get_temp_state(); + let a = Address::zero(); + state.checkpoint(); + state.checkpoint(); + state + .add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty) + .unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); + state.discard_checkpoint(); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); + state.revert_to_checkpoint(); + assert_eq!(state.balance(&a).unwrap(), U256::from(0)); + } + + #[test] + fn checkpoint_revert_to_get_storage_at() { + let mut state = get_temp_state(); + let a = Address::zero(); + let k = H256::from(U256::from(0)); + + let c0 = state.checkpoint(); + let c1 = state.checkpoint(); + state.set_storage(&a, k, H256::from(U256::from(1))).unwrap(); + + assert_eq!( + state.checkpoint_storage_at(c0, &a, &k).unwrap(), + Some(H256::from(U256::from(0))) + ); + assert_eq!( + state.checkpoint_storage_at(c1, &a, &k).unwrap(), + Some(H256::from(U256::from(0))) + ); + assert_eq!(state.storage_at(&a, &k).unwrap(), H256::from(U256::from(1))); + + state.revert_to_checkpoint(); // Revert to c1. + assert_eq!( + state.checkpoint_storage_at(c0, &a, &k).unwrap(), + Some(H256::from(U256::from(0))) + ); + assert_eq!(state.storage_at(&a, &k).unwrap(), H256::from(U256::from(0))); + } + + #[test] + fn checkpoint_from_empty_get_storage_at() { + let mut state = get_temp_state(); + let a = Address::zero(); + let k = H256::from(U256::from(0)); + let k2 = H256::from(U256::from(1)); + + assert_eq!(state.storage_at(&a, &k).unwrap(), H256::from(U256::from(0))); + state.clear(); + + let c0 = state.checkpoint(); + state.new_contract(&a, U256::zero(), U256::zero()).unwrap(); + let c1 = state.checkpoint(); + state.set_storage(&a, k, H256::from(U256::from(1))).unwrap(); + let c2 = state.checkpoint(); + let c3 = state.checkpoint(); + state + .set_storage(&a, k2, H256::from(U256::from(3))) + .unwrap(); + state.set_storage(&a, k, H256::from(U256::from(3))).unwrap(); + let c4 = state.checkpoint(); + state.set_storage(&a, k, H256::from(U256::from(4))).unwrap(); + let c5 = state.checkpoint(); + + assert_eq!( + state.checkpoint_storage_at(c0, &a, &k).unwrap(), + Some(H256::from(U256::from(0))) + ); + assert_eq!( + state.checkpoint_storage_at(c1, &a, &k).unwrap(), + Some(H256::from(U256::from(0))) + ); + assert_eq!( + state.checkpoint_storage_at(c2, &a, &k).unwrap(), + Some(H256::from(U256::from(1))) + ); + assert_eq!( + state.checkpoint_storage_at(c3, &a, &k).unwrap(), + Some(H256::from(U256::from(1))) + ); + assert_eq!( + state.checkpoint_storage_at(c4, &a, &k).unwrap(), + Some(H256::from(U256::from(3))) + ); + assert_eq!( + state.checkpoint_storage_at(c5, &a, &k).unwrap(), + Some(H256::from(U256::from(4))) + ); + + state.discard_checkpoint(); // Commit/discard c5. + assert_eq!( + state.checkpoint_storage_at(c0, &a, &k).unwrap(), + Some(H256::from(U256::from(0))) + ); + assert_eq!( + state.checkpoint_storage_at(c1, &a, &k).unwrap(), + Some(H256::from(U256::from(0))) + ); + assert_eq!( + state.checkpoint_storage_at(c2, &a, &k).unwrap(), + Some(H256::from(U256::from(1))) + ); + assert_eq!( + state.checkpoint_storage_at(c3, &a, &k).unwrap(), + Some(H256::from(U256::from(1))) + ); + assert_eq!( + state.checkpoint_storage_at(c4, &a, &k).unwrap(), + Some(H256::from(U256::from(3))) + ); + + state.revert_to_checkpoint(); // Revert to c4. + assert_eq!( + state.checkpoint_storage_at(c0, &a, &k).unwrap(), + Some(H256::from(U256::from(0))) + ); + assert_eq!( + state.checkpoint_storage_at(c1, &a, &k).unwrap(), + Some(H256::from(U256::from(0))) + ); + assert_eq!( + state.checkpoint_storage_at(c2, &a, &k).unwrap(), + Some(H256::from(U256::from(1))) + ); + assert_eq!( + state.checkpoint_storage_at(c3, &a, &k).unwrap(), + Some(H256::from(U256::from(1))) + ); + + state.discard_checkpoint(); // Commit/discard c3. + assert_eq!( + state.checkpoint_storage_at(c0, &a, &k).unwrap(), + Some(H256::from(U256::from(0))) + ); + assert_eq!( + state.checkpoint_storage_at(c1, &a, &k).unwrap(), + Some(H256::from(U256::from(0))) + ); + assert_eq!( + state.checkpoint_storage_at(c2, &a, &k).unwrap(), + Some(H256::from(U256::from(1))) + ); + + state.revert_to_checkpoint(); // Revert to c2. + assert_eq!( + state.checkpoint_storage_at(c0, &a, &k).unwrap(), + Some(H256::from(U256::from(0))) + ); + assert_eq!( + state.checkpoint_storage_at(c1, &a, &k).unwrap(), + Some(H256::from(U256::from(0))) + ); + + state.discard_checkpoint(); // Commit/discard c1. + assert_eq!( + state.checkpoint_storage_at(c0, &a, &k).unwrap(), + Some(H256::from(U256::from(0))) + ); + } + + #[test] + fn checkpoint_get_storage_at() { + let mut state = get_temp_state(); + let a = Address::zero(); + let k = H256::from(U256::from(0)); + let k2 = H256::from(U256::from(1)); + + state + .set_storage(&a, k, H256::from(U256::from(0xffff))) + .unwrap(); + state.commit().unwrap(); + state.clear(); + + assert_eq!( + state.storage_at(&a, &k).unwrap(), + H256::from(U256::from(0xffff)) + ); + state.clear(); + + let cm1 = state.checkpoint(); + let c0 = state.checkpoint(); + state.new_contract(&a, U256::zero(), U256::zero()).unwrap(); + let c1 = state.checkpoint(); + state.set_storage(&a, k, H256::from(U256::from(1))).unwrap(); + let c2 = state.checkpoint(); + let c3 = state.checkpoint(); + state + .set_storage(&a, k2, H256::from(U256::from(3))) + .unwrap(); + state.set_storage(&a, k, H256::from(U256::from(3))).unwrap(); + let c4 = state.checkpoint(); + state.set_storage(&a, k, H256::from(U256::from(4))).unwrap(); + let c5 = state.checkpoint(); + + assert_eq!( + state.checkpoint_storage_at(cm1, &a, &k).unwrap(), + Some(H256::from(U256::from(0xffff))) + ); + assert_eq!( + state.checkpoint_storage_at(c0, &a, &k).unwrap(), + Some(H256::from(U256::from(0xffff))) + ); + assert_eq!( + state.checkpoint_storage_at(c1, &a, &k).unwrap(), + Some(H256::from(U256::from(0))) + ); + assert_eq!( + state.checkpoint_storage_at(c2, &a, &k).unwrap(), + Some(H256::from(U256::from(1))) + ); + assert_eq!( + state.checkpoint_storage_at(c3, &a, &k).unwrap(), + Some(H256::from(U256::from(1))) + ); + assert_eq!( + state.checkpoint_storage_at(c4, &a, &k).unwrap(), + Some(H256::from(U256::from(3))) + ); + assert_eq!( + state.checkpoint_storage_at(c5, &a, &k).unwrap(), + Some(H256::from(U256::from(4))) + ); + + state.discard_checkpoint(); // Commit/discard c5. + assert_eq!( + state.checkpoint_storage_at(cm1, &a, &k).unwrap(), + Some(H256::from(U256::from(0xffff))) + ); + assert_eq!( + state.checkpoint_storage_at(c0, &a, &k).unwrap(), + Some(H256::from(U256::from(0xffff))) + ); + assert_eq!( + state.checkpoint_storage_at(c1, &a, &k).unwrap(), + Some(H256::from(U256::from(0))) + ); + assert_eq!( + state.checkpoint_storage_at(c2, &a, &k).unwrap(), + Some(H256::from(U256::from(1))) + ); + assert_eq!( + state.checkpoint_storage_at(c3, &a, &k).unwrap(), + Some(H256::from(U256::from(1))) + ); + assert_eq!( + state.checkpoint_storage_at(c4, &a, &k).unwrap(), + Some(H256::from(U256::from(3))) + ); + + state.revert_to_checkpoint(); // Revert to c4. + assert_eq!( + state.checkpoint_storage_at(cm1, &a, &k).unwrap(), + Some(H256::from(U256::from(0xffff))) + ); + assert_eq!( + state.checkpoint_storage_at(c0, &a, &k).unwrap(), + Some(H256::from(U256::from(0xffff))) + ); + assert_eq!( + state.checkpoint_storage_at(c1, &a, &k).unwrap(), + Some(H256::from(U256::from(0))) + ); + assert_eq!( + state.checkpoint_storage_at(c2, &a, &k).unwrap(), + Some(H256::from(U256::from(1))) + ); + assert_eq!( + state.checkpoint_storage_at(c3, &a, &k).unwrap(), + Some(H256::from(U256::from(1))) + ); + + state.discard_checkpoint(); // Commit/discard c3. + assert_eq!( + state.checkpoint_storage_at(cm1, &a, &k).unwrap(), + Some(H256::from(U256::from(0xffff))) + ); + assert_eq!( + state.checkpoint_storage_at(c0, &a, &k).unwrap(), + Some(H256::from(U256::from(0xffff))) + ); + assert_eq!( + state.checkpoint_storage_at(c1, &a, &k).unwrap(), + Some(H256::from(U256::from(0))) + ); + assert_eq!( + state.checkpoint_storage_at(c2, &a, &k).unwrap(), + Some(H256::from(U256::from(1))) + ); + + state.revert_to_checkpoint(); // Revert to c2. + assert_eq!( + state.checkpoint_storage_at(cm1, &a, &k).unwrap(), + Some(H256::from(U256::from(0xffff))) + ); + assert_eq!( + state.checkpoint_storage_at(c0, &a, &k).unwrap(), + Some(H256::from(U256::from(0xffff))) + ); + assert_eq!( + state.checkpoint_storage_at(c1, &a, &k).unwrap(), + Some(H256::from(U256::from(0))) + ); + + state.discard_checkpoint(); // Commit/discard c1. + assert_eq!( + state.checkpoint_storage_at(cm1, &a, &k).unwrap(), + Some(H256::from(U256::from(0xffff))) + ); + assert_eq!( + state.checkpoint_storage_at(c0, &a, &k).unwrap(), + Some(H256::from(U256::from(0xffff))) + ); + } + + #[test] + fn kill_account_with_checkpoints() { + let mut state = get_temp_state(); + let a = Address::zero(); + let k = H256::from(U256::from(0)); + state.checkpoint(); + state.set_storage(&a, k, H256::from(U256::from(1))).unwrap(); + state.checkpoint(); + state.kill_account(&a); + + assert_eq!(state.storage_at(&a, &k).unwrap(), H256::from(U256::from(0))); + state.revert_to_checkpoint(); + assert_eq!(state.storage_at(&a, &k).unwrap(), H256::from(U256::from(1))); + } + + #[test] + fn create_contract_fail() { + let mut state = get_temp_state(); + let orig_root = state.root().clone(); + let a: Address = 1000.into(); + + state.checkpoint(); // c1 + state.new_contract(&a, U256::zero(), U256::zero()).unwrap(); + state + .add_balance(&a, &U256::from(1), CleanupMode::ForceCreate) + .unwrap(); + state.checkpoint(); // c2 + state + .add_balance(&a, &U256::from(1), CleanupMode::ForceCreate) + .unwrap(); + state.discard_checkpoint(); // discard c2 + state.revert_to_checkpoint(); // revert to c1 + assert_eq!(state.exists(&a).unwrap(), false); + + state.commit().unwrap(); + assert_eq!(orig_root, state.root().clone()); + } + + #[test] + fn create_contract_fail_previous_storage() { + let mut state = get_temp_state(); + let a: Address = 1000.into(); + let k = H256::from(U256::from(0)); + + state + .set_storage(&a, k, H256::from(U256::from(0xffff))) + .unwrap(); + state.commit().unwrap(); + state.clear(); + + let orig_root = state.root().clone(); + assert_eq!( + state.storage_at(&a, &k).unwrap(), + H256::from(U256::from(0xffff)) + ); + state.clear(); + + state.checkpoint(); // c1 + state.new_contract(&a, U256::zero(), U256::zero()).unwrap(); + state.checkpoint(); // c2 + state.set_storage(&a, k, H256::from(U256::from(2))).unwrap(); + state.revert_to_checkpoint(); // revert to c2 + assert_eq!(state.storage_at(&a, &k).unwrap(), H256::from(U256::from(0))); + state.revert_to_checkpoint(); // revert to c1 + assert_eq!( + state.storage_at(&a, &k).unwrap(), + H256::from(U256::from(0xffff)) + ); + + state.commit().unwrap(); + assert_eq!(orig_root, state.root().clone()); + } + + #[test] + fn create_empty() { + let mut state = get_temp_state(); + state.commit().unwrap(); + assert_eq!( + *state.root(), + "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421".into() + ); + } + + #[test] + fn should_not_panic_on_state_diff_with_storage() { + let mut state = get_temp_state(); + + let a: Address = 0xa.into(); + state.init_code(&a, b"abcdefg".to_vec()).unwrap(); + state + .add_balance(&a, &256.into(), CleanupMode::NoEmpty) + .unwrap(); + state.set_storage(&a, 0xb.into(), 0xc.into()).unwrap(); + + let mut new_state = state.clone(); + new_state.set_storage(&a, 0xb.into(), 0xd.into()).unwrap(); + + new_state.diff_from(state).unwrap(); + } + + #[test] + fn should_kill_garbage() { + let a = 10.into(); + let b = 20.into(); + let c = 30.into(); + let d = 40.into(); + let e = 50.into(); + let x = 0.into(); + let db = get_temp_state_db(); + let (root, db) = { + let mut state = State::new(db, U256::from(0), Default::default()); + state + .add_balance(&a, &U256::default(), CleanupMode::ForceCreate) + .unwrap(); // create an empty account + state + .add_balance(&b, &100.into(), CleanupMode::ForceCreate) + .unwrap(); // create a dust account + state + .add_balance(&c, &101.into(), CleanupMode::ForceCreate) + .unwrap(); // create a normal account + state + .add_balance(&d, &99.into(), CleanupMode::ForceCreate) + .unwrap(); // create another dust account + state.new_contract(&e, 100.into(), 1.into()).unwrap(); // create a contract account + state.init_code(&e, vec![0x00]).unwrap(); + state.commit().unwrap(); + state.drop() + }; + + let mut state = + State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); + let mut touched = HashSet::new(); + state + .add_balance( + &a, + &U256::default(), + CleanupMode::TrackTouched(&mut touched), + ) + .unwrap(); // touch an account + state + .transfer_balance(&b, &x, &1.into(), CleanupMode::TrackTouched(&mut touched)) + .unwrap(); // touch an account decreasing its balance + state + .transfer_balance(&c, &x, &1.into(), CleanupMode::TrackTouched(&mut touched)) + .unwrap(); // touch an account decreasing its balance + state + .transfer_balance(&e, &x, &1.into(), CleanupMode::TrackTouched(&mut touched)) + .unwrap(); // touch an account decreasing its balance + state.kill_garbage(&touched, true, &None, false).unwrap(); + assert!(!state.exists(&a).unwrap()); + assert!(state.exists(&b).unwrap()); + state + .kill_garbage(&touched, true, &Some(100.into()), false) + .unwrap(); + assert!(!state.exists(&b).unwrap()); + assert!(state.exists(&c).unwrap()); + assert!(state.exists(&d).unwrap()); + assert!(state.exists(&e).unwrap()); + state + .kill_garbage(&touched, true, &Some(100.into()), true) + .unwrap(); + assert!(state.exists(&c).unwrap()); + assert!(state.exists(&d).unwrap()); + assert!(!state.exists(&e).unwrap()); + } + + #[test] + fn should_trace_diff_suicided_accounts() { + use pod_account; + + let a = 10.into(); + let db = get_temp_state_db(); + let (root, db) = { + let mut state = State::new(db, U256::from(0), Default::default()); + state + .add_balance(&a, &100.into(), CleanupMode::ForceCreate) + .unwrap(); + state.commit().unwrap(); + state.drop() + }; + + let mut state = + State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); + let original = state.clone(); + state.kill_account(&a); + + let diff = state.diff_from(original).unwrap(); + let diff_map = diff.get(); + assert_eq!(diff_map.len(), 1); + assert!(diff_map.get(&a).is_some()); + assert_eq!( + diff_map.get(&a), + pod_account::diff_pod( + Some(&PodAccount { + balance: U256::from(100), + nonce: U256::zero(), + code: Some(Default::default()), + storage: Default::default() + }), + None + ) + .as_ref() + ); + } + + #[test] + fn should_trace_diff_unmodified_storage() { + use pod_account; + + let a = 10.into(); + let db = get_temp_state_db(); + + let (root, db) = { + let mut state = State::new(db, U256::from(0), Default::default()); + state + .set_storage( + &a, + H256::from(&U256::from(1u64)), + H256::from(&U256::from(20u64)), + ) + .unwrap(); + state.commit().unwrap(); + state.drop() + }; + + let mut state = + State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); + let original = state.clone(); + state + .set_storage( + &a, + H256::from(&U256::from(1u64)), + H256::from(&U256::from(100u64)), + ) + .unwrap(); + + let diff = state.diff_from(original).unwrap(); + let diff_map = diff.get(); + assert_eq!(diff_map.len(), 1); + assert!(diff_map.get(&a).is_some()); + assert_eq!( + diff_map.get(&a), + pod_account::diff_pod( + Some(&PodAccount { + balance: U256::zero(), + nonce: U256::zero(), + code: Some(Default::default()), + storage: vec![( + H256::from(&U256::from(1u64)), + H256::from(&U256::from(20u64)) + )] + .into_iter() + .collect(), + }), + Some(&PodAccount { + balance: U256::zero(), + nonce: U256::zero(), + code: Some(Default::default()), + storage: vec![( + H256::from(&U256::from(1u64)), + H256::from(&U256::from(100u64)) + )] + .into_iter() + .collect(), + }) + ) + .as_ref() + ); + } + + #[cfg(feature = "to-pod-full")] + #[test] + fn should_get_full_pod_storage_values() { + use trie::{TrieFactory, TrieSpec}; + + let a = 10.into(); + let db = get_temp_state_db(); + + let factories = Factories { + vm: Default::default(), + trie: TrieFactory::new(TrieSpec::Fat), + accountdb: Default::default(), + }; + + let get_pod_state_val = |pod_state: &PodState, ak, k| { + pod_state + .get() + .get(ak) + .unwrap() + .storage + .get(&k) + .unwrap() + .clone() + }; + + let storage_address = H256::from(&U256::from(1u64)); + + let (root, db) = { + let mut state = State::new(db, U256::from(0), factories.clone()); + state + .set_storage(&a, storage_address.clone(), H256::from(&U256::from(20u64))) + .unwrap(); + let dump = state.to_pod_full().unwrap(); + assert_eq!( + get_pod_state_val(&dump, &a, storage_address.clone()), + H256::from(&U256::from(20u64)) + ); + state.commit().unwrap(); + let dump = state.to_pod_full().unwrap(); + assert_eq!( + get_pod_state_val(&dump, &a, storage_address.clone()), + H256::from(&U256::from(20u64)) + ); + state.drop() + }; + + let mut state = State::from_existing(db, root, U256::from(0u8), factories).unwrap(); + let dump = state.to_pod_full().unwrap(); + assert_eq!( + get_pod_state_val(&dump, &a, storage_address.clone()), + H256::from(&U256::from(20u64)) + ); + state + .set_storage(&a, storage_address.clone(), H256::from(&U256::from(21u64))) + .unwrap(); + let dump = state.to_pod_full().unwrap(); + assert_eq!( + get_pod_state_val(&dump, &a, storage_address.clone()), + H256::from(&U256::from(21u64)) + ); + state.commit().unwrap(); + state + .set_storage(&a, storage_address.clone(), H256::from(&U256::from(0u64))) + .unwrap(); + let dump = state.to_pod_full().unwrap(); + assert_eq!( + get_pod_state_val(&dump, &a, storage_address.clone()), + H256::from(&U256::from(0u64)) + ); + } } diff --git a/ethcore/src/state/substate.rs b/ethcore/src/state/substate.rs index 86f6e37f8..9009b6b88 100644 --- a/ethcore/src/state/substate.rs +++ b/ethcore/src/state/substate.rs @@ -15,92 +15,96 @@ // along with Parity Ethereum. If not, see . //! Execution environment substate. -use std::collections::HashSet; -use ethereum_types::Address; -use types::log_entry::LogEntry; -use evm::{Schedule, CleanDustMode}; use super::CleanupMode; +use ethereum_types::Address; +use evm::{CleanDustMode, Schedule}; +use std::collections::HashSet; +use types::log_entry::LogEntry; /// State changes which should be applied in finalize, /// after transaction is fully executed. #[derive(Debug, Default)] pub struct Substate { - /// Any accounts that have suicided. - pub suicides: HashSet
, + /// Any accounts that have suicided. + pub suicides: HashSet
, - /// Any accounts that are touched. - pub touched: HashSet
, + /// Any accounts that are touched. + pub touched: HashSet
, - /// Any logs. - pub logs: Vec, + /// Any logs. + pub logs: Vec, - /// Refund counter of SSTORE. - pub sstore_clears_refund: i128, + /// Refund counter of SSTORE. + pub sstore_clears_refund: i128, - /// Created contracts. - pub contracts_created: Vec
, + /// Created contracts. + pub contracts_created: Vec
, } impl Substate { - /// Creates new substate. - pub fn new() -> Self { - Substate::default() - } + /// Creates new substate. + pub fn new() -> Self { + Substate::default() + } - /// Merge secondary substate `s` into self, accruing each element correspondingly. - pub fn accrue(&mut self, s: Substate) { - self.suicides.extend(s.suicides); - self.touched.extend(s.touched); - self.logs.extend(s.logs); - self.sstore_clears_refund += s.sstore_clears_refund; - self.contracts_created.extend(s.contracts_created); - } + /// Merge secondary substate `s` into self, accruing each element correspondingly. + pub fn accrue(&mut self, s: Substate) { + self.suicides.extend(s.suicides); + self.touched.extend(s.touched); + self.logs.extend(s.logs); + self.sstore_clears_refund += s.sstore_clears_refund; + self.contracts_created.extend(s.contracts_created); + } - /// Get the cleanup mode object from this. - pub fn to_cleanup_mode(&mut self, schedule: &Schedule) -> CleanupMode { - match (schedule.kill_dust != CleanDustMode::Off, schedule.no_empty, schedule.kill_empty) { - (false, false, _) => CleanupMode::ForceCreate, - (false, true, false) => CleanupMode::NoEmpty, - (false, true, true) | (true, _, _,) => CleanupMode::TrackTouched(&mut self.touched), - } - } + /// Get the cleanup mode object from this. + pub fn to_cleanup_mode(&mut self, schedule: &Schedule) -> CleanupMode { + match ( + schedule.kill_dust != CleanDustMode::Off, + schedule.no_empty, + schedule.kill_empty, + ) { + (false, false, _) => CleanupMode::ForceCreate, + (false, true, false) => CleanupMode::NoEmpty, + (false, true, true) | (true, _, _) => CleanupMode::TrackTouched(&mut self.touched), + } + } } #[cfg(test)] mod tests { - use super::Substate; - use types::log_entry::LogEntry; + use super::Substate; + use types::log_entry::LogEntry; - #[test] - fn created() { - let sub_state = Substate::new(); - assert_eq!(sub_state.suicides.len(), 0); - } + #[test] + fn created() { + let sub_state = Substate::new(); + assert_eq!(sub_state.suicides.len(), 0); + } - #[test] - fn accrue() { - let mut sub_state = Substate::new(); - sub_state.contracts_created.push(1u64.into()); - sub_state.logs.push(LogEntry { - address: 1u64.into(), - topics: vec![], - data: vec![] - }); - sub_state.sstore_clears_refund = (15000 * 5).into(); - sub_state.suicides.insert(10u64.into()); + #[test] + fn accrue() { + let mut sub_state = Substate::new(); + sub_state.contracts_created.push(1u64.into()); + sub_state.logs.push(LogEntry { + address: 1u64.into(), + topics: vec![], + data: vec![], + }); + sub_state.sstore_clears_refund = (15000 * 5).into(); + sub_state.suicides.insert(10u64.into()); - let mut sub_state_2 = Substate::new(); - sub_state_2.contracts_created.push(2u64.into()); - sub_state_2.logs.push(LogEntry { - address: 1u64.into(), - topics: vec![], - data: vec![] - }); - sub_state_2.sstore_clears_refund = (15000 * 7).into(); + let mut sub_state_2 = Substate::new(); + sub_state_2.contracts_created.push(2u64.into()); + sub_state_2.logs.push(LogEntry { + address: 1u64.into(), + topics: vec![], + data: vec![], + }); + sub_state_2.sstore_clears_refund = (15000 * 7).into(); - sub_state.accrue(sub_state_2); - assert_eq!(sub_state.contracts_created.len(), 2); - assert_eq!(sub_state.sstore_clears_refund, (15000 * 12).into()); - assert_eq!(sub_state.suicides.len(), 1); - } + sub_state.accrue(sub_state_2); + assert_eq!(sub_state.contracts_created.len(), 2); + assert_eq!(sub_state.sstore_clears_refund, (15000 * 12).into()); + assert_eq!(sub_state.suicides.len(), 1); + } } diff --git a/ethcore/src/state_db.rs b/ethcore/src/state_db.rs index 066a4f616..1103eb01c 100644 --- a/ethcore/src/state_db.rs +++ b/ethcore/src/state_db.rs @@ -16,19 +16,21 @@ //! State database abstraction. For more info, see the doc for `StateDB` -use std::collections::{VecDeque, HashSet}; -use std::io; -use std::sync::Arc; +use std::{ + collections::{HashSet, VecDeque}, + io, + sync::Arc, +}; use bloom_journal::{Bloom, BloomJournal}; -use byteorder::{LittleEndian, ByteOrder}; +use byteorder::{ByteOrder, LittleEndian}; use db::COL_ACCOUNT_BLOOM; -use ethereum_types::{H256, Address}; +use ethereum_types::{Address, H256}; use hash::keccak; use hash_db::HashDB; use journaldb::JournalDB; use keccak_hasher::KeccakHasher; -use kvdb::{KeyValueDB, DBTransaction, DBValue}; +use kvdb::{DBTransaction, DBValue, KeyValueDB}; use lru_cache::LruCache; use memory_cache::MemoryLruCache; use parking_lot::Mutex; @@ -56,39 +58,39 @@ const ACCOUNT_CACHE_RATIO: usize = 90; /// Shared canonical state cache. struct AccountCache { - /// DB Account cache. `None` indicates that account is known to be missing. - // When changing the type of the values here, be sure to update `mem_used` and - // `new`. - accounts: LruCache>, - /// Information on the modifications in recently committed blocks; specifically which addresses - /// changed in which block. Ordered by block number. - modifications: VecDeque, + /// DB Account cache. `None` indicates that account is known to be missing. + // When changing the type of the values here, be sure to update `mem_used` and + // `new`. + accounts: LruCache>, + /// Information on the modifications in recently committed blocks; specifically which addresses + /// changed in which block. Ordered by block number. + modifications: VecDeque, } /// Buffered account cache item. struct CacheQueueItem { - /// Account address. - address: Address, - /// Acccount data or `None` if account does not exist. - account: SyncAccount, - /// Indicates that the account was modified before being - /// added to the cache. - modified: bool, + /// Account address. + address: Address, + /// Acccount data or `None` if account does not exist. + account: SyncAccount, + /// Indicates that the account was modified before being + /// added to the cache. + modified: bool, } #[derive(Debug)] /// Accumulates a list of accounts changed in a block. struct BlockChanges { - /// Block number. - number: BlockNumber, - /// Block hash. - hash: H256, - /// Parent block hash. - parent: H256, - /// A set of modified account addresses. - accounts: HashSet
, - /// Block is part of the canonical chain. - is_canon: bool, + /// Block number. + number: BlockNumber, + /// Block hash. + hash: H256, + /// Parent block hash. + parent: H256, + /// A set of modified account addresses. + accounts: HashSet
, + /// Block is part of the canonical chain. + is_canon: bool, } /// State database abstraction. @@ -106,367 +108,417 @@ struct BlockChanges { /// Then, after the block has been added to the chain the local cache in the /// `StateDB` is propagated into the global cache. pub struct StateDB { - /// Backing database. - db: Box, - /// Shared canonical state cache. - account_cache: Arc>, - /// DB Code cache. Maps code hashes to shared bytes. - code_cache: Arc>>>>, - /// Local dirty cache. - local_cache: Vec, - /// Shared account bloom. Does not handle chain reorganizations. - account_bloom: Arc>, - cache_size: usize, - /// Hash of the block on top of which this instance was created or - /// `None` if cache is disabled - parent_hash: Option, - /// Hash of the committing block or `None` if not committed yet. - commit_hash: Option, - /// Number of the committing block or `None` if not committed yet. - commit_number: Option, + /// Backing database. + db: Box, + /// Shared canonical state cache. + account_cache: Arc>, + /// DB Code cache. Maps code hashes to shared bytes. + code_cache: Arc>>>>, + /// Local dirty cache. + local_cache: Vec, + /// Shared account bloom. Does not handle chain reorganizations. + account_bloom: Arc>, + cache_size: usize, + /// Hash of the block on top of which this instance was created or + /// `None` if cache is disabled + parent_hash: Option, + /// Hash of the committing block or `None` if not committed yet. + commit_hash: Option, + /// Number of the committing block or `None` if not committed yet. + commit_number: Option, } impl StateDB { + /// Create a new instance wrapping `JournalDB` and the maximum allowed size + /// of the LRU cache in bytes. Actual used memory may (read: will) be higher due to bookkeeping. + // TODO: make the cache size actually accurate by moving the account storage cache + // into the `AccountCache` structure as its own `LruCache<(Address, H256), H256>`. + pub fn new(db: Box, cache_size: usize) -> StateDB { + let bloom = Self::load_bloom(&**db.backing()); + let acc_cache_size = cache_size * ACCOUNT_CACHE_RATIO / 100; + let code_cache_size = cache_size - acc_cache_size; + let cache_items = acc_cache_size / ::std::mem::size_of::>(); - /// Create a new instance wrapping `JournalDB` and the maximum allowed size - /// of the LRU cache in bytes. Actual used memory may (read: will) be higher due to bookkeeping. - // TODO: make the cache size actually accurate by moving the account storage cache - // into the `AccountCache` structure as its own `LruCache<(Address, H256), H256>`. - pub fn new(db: Box, cache_size: usize) -> StateDB { - let bloom = Self::load_bloom(&**db.backing()); - let acc_cache_size = cache_size * ACCOUNT_CACHE_RATIO / 100; - let code_cache_size = cache_size - acc_cache_size; - let cache_items = acc_cache_size / ::std::mem::size_of::>(); + StateDB { + db: db, + account_cache: Arc::new(Mutex::new(AccountCache { + accounts: LruCache::new(cache_items), + modifications: VecDeque::new(), + })), + code_cache: Arc::new(Mutex::new(MemoryLruCache::new(code_cache_size))), + local_cache: Vec::new(), + account_bloom: Arc::new(Mutex::new(bloom)), + cache_size: cache_size, + parent_hash: None, + commit_hash: None, + commit_number: None, + } + } - StateDB { - db: db, - account_cache: Arc::new(Mutex::new(AccountCache { - accounts: LruCache::new(cache_items), - modifications: VecDeque::new(), - })), - code_cache: Arc::new(Mutex::new(MemoryLruCache::new(code_cache_size))), - local_cache: Vec::new(), - account_bloom: Arc::new(Mutex::new(bloom)), - cache_size: cache_size, - parent_hash: None, - commit_hash: None, - commit_number: None, - } - } + /// Loads accounts bloom from the database + /// This bloom is used to handle request for the non-existant account fast + pub fn load_bloom(db: &KeyValueDB) -> Bloom { + let hash_count_entry = db + .get(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY) + .expect("Low-level database error"); - /// Loads accounts bloom from the database - /// This bloom is used to handle request for the non-existant account fast - pub fn load_bloom(db: &KeyValueDB) -> Bloom { - let hash_count_entry = db.get(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY) - .expect("Low-level database error"); + let hash_count_bytes = match hash_count_entry { + Some(bytes) => bytes, + None => return Bloom::new(ACCOUNT_BLOOM_SPACE, DEFAULT_ACCOUNT_PRESET), + }; - let hash_count_bytes = match hash_count_entry { - Some(bytes) => bytes, - None => return Bloom::new(ACCOUNT_BLOOM_SPACE, DEFAULT_ACCOUNT_PRESET), - }; + assert_eq!(hash_count_bytes.len(), 1); + let hash_count = hash_count_bytes[0]; - assert_eq!(hash_count_bytes.len(), 1); - let hash_count = hash_count_bytes[0]; + let mut bloom_parts = vec![0u64; ACCOUNT_BLOOM_SPACE / 8]; + let mut key = [0u8; 8]; + for i in 0..ACCOUNT_BLOOM_SPACE / 8 { + LittleEndian::write_u64(&mut key, i as u64); + bloom_parts[i] = db + .get(COL_ACCOUNT_BLOOM, &key) + .expect("low-level database error") + .and_then(|val| Some(LittleEndian::read_u64(&val[..]))) + .unwrap_or(0u64); + } - let mut bloom_parts = vec![0u64; ACCOUNT_BLOOM_SPACE / 8]; - let mut key = [0u8; 8]; - for i in 0..ACCOUNT_BLOOM_SPACE / 8 { - LittleEndian::write_u64(&mut key, i as u64); - bloom_parts[i] = db.get(COL_ACCOUNT_BLOOM, &key).expect("low-level database error") - .and_then(|val| Some(LittleEndian::read_u64(&val[..]))) - .unwrap_or(0u64); - } + let bloom = Bloom::from_parts(&bloom_parts, hash_count as u32); + trace!(target: "account_bloom", "Bloom is {:?} full, hash functions count = {:?}", bloom.saturation(), hash_count); + bloom + } - let bloom = Bloom::from_parts(&bloom_parts, hash_count as u32); - trace!(target: "account_bloom", "Bloom is {:?} full, hash functions count = {:?}", bloom.saturation(), hash_count); - bloom - } + /// Commit blooms journal to the database transaction + pub fn commit_bloom(batch: &mut DBTransaction, journal: BloomJournal) -> io::Result<()> { + assert!(journal.hash_functions <= 255); + batch.put( + COL_ACCOUNT_BLOOM, + ACCOUNT_BLOOM_HASHCOUNT_KEY, + &[journal.hash_functions as u8], + ); + let mut key = [0u8; 8]; + let mut val = [0u8; 8]; - /// Commit blooms journal to the database transaction - pub fn commit_bloom(batch: &mut DBTransaction, journal: BloomJournal) -> io::Result<()> { - assert!(journal.hash_functions <= 255); - batch.put(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY, &[journal.hash_functions as u8]); - let mut key = [0u8; 8]; - let mut val = [0u8; 8]; + for (bloom_part_index, bloom_part_value) in journal.entries { + LittleEndian::write_u64(&mut key, bloom_part_index as u64); + LittleEndian::write_u64(&mut val, bloom_part_value); + batch.put(COL_ACCOUNT_BLOOM, &key, &val); + } + Ok(()) + } - for (bloom_part_index, bloom_part_value) in journal.entries { - LittleEndian::write_u64(&mut key, bloom_part_index as u64); - LittleEndian::write_u64(&mut val, bloom_part_value); - batch.put(COL_ACCOUNT_BLOOM, &key, &val); - } - Ok(()) - } + /// Journal all recent operations under the given era and ID. + pub fn journal_under( + &mut self, + batch: &mut DBTransaction, + now: u64, + id: &H256, + ) -> io::Result { + { + let mut bloom_lock = self.account_bloom.lock(); + Self::commit_bloom(batch, bloom_lock.drain_journal())?; + } + let records = self.db.journal_under(batch, now, id)?; + self.commit_hash = Some(id.clone()); + self.commit_number = Some(now); + Ok(records) + } - /// Journal all recent operations under the given era and ID. - pub fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result { - { - let mut bloom_lock = self.account_bloom.lock(); - Self::commit_bloom(batch, bloom_lock.drain_journal())?; - } - let records = self.db.journal_under(batch, now, id)?; - self.commit_hash = Some(id.clone()); - self.commit_number = Some(now); - Ok(records) - } + /// Mark a given candidate from an ancient era as canonical, enacting its removals from the + /// backing database and reverting any non-canonical historical commit's insertions. + pub fn mark_canonical( + &mut self, + batch: &mut DBTransaction, + end_era: u64, + canon_id: &H256, + ) -> io::Result { + self.db.mark_canonical(batch, end_era, canon_id) + } - /// Mark a given candidate from an ancient era as canonical, enacting its removals from the - /// backing database and reverting any non-canonical historical commit's insertions. - pub fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> io::Result { - self.db.mark_canonical(batch, end_era, canon_id) - } + /// Propagate local cache into the global cache and synchonize + /// the global cache with the best block state. + /// This function updates the global cache by removing entries + /// that are invalidated by chain reorganization. `sync_cache` + /// should be called after the block has been committed and the + /// blockchain route has ben calculated. + pub fn sync_cache(&mut self, enacted: &[H256], retracted: &[H256], is_best: bool) { + trace!( + "sync_cache id = (#{:?}, {:?}), parent={:?}, best={}", + self.commit_number, + self.commit_hash, + self.parent_hash, + is_best + ); + let mut cache = self.account_cache.lock(); + let cache = &mut *cache; - /// Propagate local cache into the global cache and synchonize - /// the global cache with the best block state. - /// This function updates the global cache by removing entries - /// that are invalidated by chain reorganization. `sync_cache` - /// should be called after the block has been committed and the - /// blockchain route has ben calculated. - pub fn sync_cache(&mut self, enacted: &[H256], retracted: &[H256], is_best: bool) { - trace!("sync_cache id = (#{:?}, {:?}), parent={:?}, best={}", self.commit_number, self.commit_hash, self.parent_hash, is_best); - let mut cache = self.account_cache.lock(); - let cache = &mut *cache; + // Purge changes from re-enacted and retracted blocks. + // Filter out commiting block if any. + let mut clear = false; + for block in enacted + .iter() + .filter(|h| self.commit_hash.as_ref().map_or(true, |p| *h != p)) + { + clear = clear || { + if let Some(ref mut m) = cache.modifications.iter_mut().find(|m| &m.hash == block) { + trace!("Reverting enacted block {:?}", block); + m.is_canon = true; + for a in &m.accounts { + trace!("Reverting enacted address {:?}", a); + cache.accounts.remove(a); + } + false + } else { + true + } + }; + } - // Purge changes from re-enacted and retracted blocks. - // Filter out commiting block if any. - let mut clear = false; - for block in enacted.iter().filter(|h| self.commit_hash.as_ref().map_or(true, |p| *h != p)) { - clear = clear || { - if let Some(ref mut m) = cache.modifications.iter_mut().find(|m| &m.hash == block) { - trace!("Reverting enacted block {:?}", block); - m.is_canon = true; - for a in &m.accounts { - trace!("Reverting enacted address {:?}", a); - cache.accounts.remove(a); - } - false - } else { - true - } - }; - } + for block in retracted { + clear = clear || { + if let Some(ref mut m) = cache.modifications.iter_mut().find(|m| &m.hash == block) { + trace!("Retracting block {:?}", block); + m.is_canon = false; + for a in &m.accounts { + trace!("Retracted address {:?}", a); + cache.accounts.remove(a); + } + false + } else { + true + } + }; + } + if clear { + // We don't know anything about the block; clear everything + trace!("Wiping cache"); + cache.accounts.clear(); + cache.modifications.clear(); + } - for block in retracted { - clear = clear || { - if let Some(ref mut m) = cache.modifications.iter_mut().find(|m| &m.hash == block) { - trace!("Retracting block {:?}", block); - m.is_canon = false; - for a in &m.accounts { - trace!("Retracted address {:?}", a); - cache.accounts.remove(a); - } - false - } else { - true - } - }; - } - if clear { - // We don't know anything about the block; clear everything - trace!("Wiping cache"); - cache.accounts.clear(); - cache.modifications.clear(); - } + // Propagate cache only if committing on top of the latest canonical state + // blocks are ordered by number and only one block with a given number is marked as canonical + // (contributed to canonical state cache) + if let (Some(ref number), Some(ref hash), Some(ref parent)) = + (self.commit_number, self.commit_hash, self.parent_hash) + { + if cache.modifications.len() == STATE_CACHE_BLOCKS { + cache.modifications.pop_back(); + } + let mut modifications = HashSet::new(); + trace!("committing {} cache entries", self.local_cache.len()); + for account in self.local_cache.drain(..) { + if account.modified { + modifications.insert(account.address.clone()); + } + if is_best { + let acc = account.account.0; + if let Some(&mut Some(ref mut existing)) = + cache.accounts.get_mut(&account.address) + { + if let Some(new) = acc { + if account.modified { + existing.overwrite_with(new); + } + continue; + } + } + cache.accounts.insert(account.address, acc); + } + } - // Propagate cache only if committing on top of the latest canonical state - // blocks are ordered by number and only one block with a given number is marked as canonical - // (contributed to canonical state cache) - if let (Some(ref number), Some(ref hash), Some(ref parent)) = (self.commit_number, self.commit_hash, self.parent_hash) { - if cache.modifications.len() == STATE_CACHE_BLOCKS { - cache.modifications.pop_back(); - } - let mut modifications = HashSet::new(); - trace!("committing {} cache entries", self.local_cache.len()); - for account in self.local_cache.drain(..) { - if account.modified { - modifications.insert(account.address.clone()); - } - if is_best { - let acc = account.account.0; - if let Some(&mut Some(ref mut existing)) = cache.accounts.get_mut(&account.address) { - if let Some(new) = acc { - if account.modified { - existing.overwrite_with(new); - } - continue; - } - } - cache.accounts.insert(account.address, acc); - } - } + // Save modified accounts. These are ordered by the block number. + let block_changes = BlockChanges { + accounts: modifications, + number: *number, + hash: hash.clone(), + is_canon: is_best, + parent: parent.clone(), + }; + let insert_at = cache + .modifications + .iter() + .enumerate() + .find(|&(_, m)| m.number < *number) + .map(|(i, _)| i); + trace!("inserting modifications at {:?}", insert_at); + if let Some(insert_at) = insert_at { + cache.modifications.insert(insert_at, block_changes); + } else { + cache.modifications.push_back(block_changes); + } + } + } - // Save modified accounts. These are ordered by the block number. - let block_changes = BlockChanges { - accounts: modifications, - number: *number, - hash: hash.clone(), - is_canon: is_best, - parent: parent.clone(), - }; - let insert_at = cache.modifications.iter().enumerate().find(|&(_, m)| m.number < *number).map(|(i, _)| i); - trace!("inserting modifications at {:?}", insert_at); - if let Some(insert_at) = insert_at { - cache.modifications.insert(insert_at, block_changes); - } else { - cache.modifications.push_back(block_changes); - } - } - } + /// Conversion method to interpret self as `HashDB` reference + pub fn as_hash_db(&self) -> &HashDB { + self.db.as_hash_db() + } - /// Conversion method to interpret self as `HashDB` reference - pub fn as_hash_db(&self) -> &HashDB { - self.db.as_hash_db() - } + /// Conversion method to interpret self as mutable `HashDB` reference + pub fn as_hash_db_mut(&mut self) -> &mut HashDB { + self.db.as_hash_db_mut() + } - /// Conversion method to interpret self as mutable `HashDB` reference - pub fn as_hash_db_mut(&mut self) -> &mut HashDB { - self.db.as_hash_db_mut() - } + /// Clone the database. + pub fn boxed_clone(&self) -> StateDB { + StateDB { + db: self.db.boxed_clone(), + account_cache: self.account_cache.clone(), + code_cache: self.code_cache.clone(), + local_cache: Vec::new(), + account_bloom: self.account_bloom.clone(), + cache_size: self.cache_size, + parent_hash: None, + commit_hash: None, + commit_number: None, + } + } - /// Clone the database. - pub fn boxed_clone(&self) -> StateDB { - StateDB { - db: self.db.boxed_clone(), - account_cache: self.account_cache.clone(), - code_cache: self.code_cache.clone(), - local_cache: Vec::new(), - account_bloom: self.account_bloom.clone(), - cache_size: self.cache_size, - parent_hash: None, - commit_hash: None, - commit_number: None, - } - } + /// Clone the database for a canonical state. + pub fn boxed_clone_canon(&self, parent: &H256) -> StateDB { + StateDB { + db: self.db.boxed_clone(), + account_cache: self.account_cache.clone(), + code_cache: self.code_cache.clone(), + local_cache: Vec::new(), + account_bloom: self.account_bloom.clone(), + cache_size: self.cache_size, + parent_hash: Some(parent.clone()), + commit_hash: None, + commit_number: None, + } + } - /// Clone the database for a canonical state. - pub fn boxed_clone_canon(&self, parent: &H256) -> StateDB { - StateDB { - db: self.db.boxed_clone(), - account_cache: self.account_cache.clone(), - code_cache: self.code_cache.clone(), - local_cache: Vec::new(), - account_bloom: self.account_bloom.clone(), - cache_size: self.cache_size, - parent_hash: Some(parent.clone()), - commit_hash: None, - commit_number: None, - } - } + /// Check if pruning is enabled on the database. + pub fn is_pruned(&self) -> bool { + self.db.is_pruned() + } - /// Check if pruning is enabled on the database. - pub fn is_pruned(&self) -> bool { - self.db.is_pruned() - } + /// Heap size used. + pub fn mem_used(&self) -> usize { + // TODO: account for LRU-cache overhead; this is a close approximation. + self.db.mem_used() + { + let accounts = self.account_cache.lock().accounts.len(); + let code_size = self.code_cache.lock().current_size(); + code_size + accounts * ::std::mem::size_of::>() + } + } - /// Heap size used. - pub fn mem_used(&self) -> usize { - // TODO: account for LRU-cache overhead; this is a close approximation. - self.db.mem_used() + { - let accounts = self.account_cache.lock().accounts.len(); - let code_size = self.code_cache.lock().current_size(); - code_size + accounts * ::std::mem::size_of::>() - } - } + /// Returns underlying `JournalDB`. + pub fn journal_db(&self) -> &JournalDB { + &*self.db + } - /// Returns underlying `JournalDB`. - pub fn journal_db(&self) -> &JournalDB { - &*self.db - } + /// Query how much memory is set aside for the accounts cache (in bytes). + pub fn cache_size(&self) -> usize { + self.cache_size + } - /// Query how much memory is set aside for the accounts cache (in bytes). - pub fn cache_size(&self) -> usize { - self.cache_size - } - - /// Check if the account can be returned from cache by matching current block parent hash against canonical - /// state and filtering out account modified in later blocks. - fn is_allowed(addr: &Address, parent_hash: &H256, modifications: &VecDeque) -> bool { - if modifications.is_empty() { - return true; - } - // Ignore all accounts modified in later blocks - // Modifications contains block ordered by the number - // We search for our parent in that list first and then for - // all its parent until we hit the canonical block, - // checking against all the intermediate modifications. - let mut parent = parent_hash; - for m in modifications { - if &m.hash == parent { - if m.is_canon { - return true; - } - parent = &m.parent; - } - if m.accounts.contains(addr) { - trace!("Cache lookup skipped for {:?}: modified in a later block", addr); - return false; - } - } - trace!("Cache lookup skipped for {:?}: parent hash is unknown", addr); - false - } + /// Check if the account can be returned from cache by matching current block parent hash against canonical + /// state and filtering out account modified in later blocks. + fn is_allowed( + addr: &Address, + parent_hash: &H256, + modifications: &VecDeque, + ) -> bool { + if modifications.is_empty() { + return true; + } + // Ignore all accounts modified in later blocks + // Modifications contains block ordered by the number + // We search for our parent in that list first and then for + // all its parent until we hit the canonical block, + // checking against all the intermediate modifications. + let mut parent = parent_hash; + for m in modifications { + if &m.hash == parent { + if m.is_canon { + return true; + } + parent = &m.parent; + } + if m.accounts.contains(addr) { + trace!( + "Cache lookup skipped for {:?}: modified in a later block", + addr + ); + return false; + } + } + trace!( + "Cache lookup skipped for {:?}: parent hash is unknown", + addr + ); + false + } } impl state::Backend for StateDB { - fn as_hash_db(&self) -> &HashDB { self.db.as_hash_db() } + fn as_hash_db(&self) -> &HashDB { + self.db.as_hash_db() + } - fn as_hash_db_mut(&mut self) -> &mut HashDB { - self.db.as_hash_db_mut() - } + fn as_hash_db_mut(&mut self) -> &mut HashDB { + self.db.as_hash_db_mut() + } - fn add_to_account_cache(&mut self, addr: Address, data: Option, modified: bool) { - self.local_cache.push(CacheQueueItem { - address: addr, - account: SyncAccount(data), - modified: modified, - }) - } + fn add_to_account_cache(&mut self, addr: Address, data: Option, modified: bool) { + self.local_cache.push(CacheQueueItem { + address: addr, + account: SyncAccount(data), + modified: modified, + }) + } - fn cache_code(&self, hash: H256, code: Arc>) { - let mut cache = self.code_cache.lock(); + fn cache_code(&self, hash: H256, code: Arc>) { + let mut cache = self.code_cache.lock(); - cache.insert(hash, code); - } + cache.insert(hash, code); + } - fn get_cached_account(&self, addr: &Address) -> Option> { - self.parent_hash.as_ref().and_then(|parent_hash| { - let mut cache = self.account_cache.lock(); - if !Self::is_allowed(addr, parent_hash, &cache.modifications) { - return None; - } - cache.accounts.get_mut(addr).map(|a| a.as_ref().map(|a| a.clone_basic())) - }) - } + fn get_cached_account(&self, addr: &Address) -> Option> { + self.parent_hash.as_ref().and_then(|parent_hash| { + let mut cache = self.account_cache.lock(); + if !Self::is_allowed(addr, parent_hash, &cache.modifications) { + return None; + } + cache + .accounts + .get_mut(addr) + .map(|a| a.as_ref().map(|a| a.clone_basic())) + }) + } - fn get_cached(&self, a: &Address, f: F) -> Option - where F: FnOnce(Option<&mut Account>) -> U - { - self.parent_hash.as_ref().and_then(|parent_hash| { - let mut cache = self.account_cache.lock(); - if !Self::is_allowed(a, parent_hash, &cache.modifications) { - return None; - } - cache.accounts.get_mut(a).map(|c| f(c.as_mut())) - }) - } + fn get_cached(&self, a: &Address, f: F) -> Option + where + F: FnOnce(Option<&mut Account>) -> U, + { + self.parent_hash.as_ref().and_then(|parent_hash| { + let mut cache = self.account_cache.lock(); + if !Self::is_allowed(a, parent_hash, &cache.modifications) { + return None; + } + cache.accounts.get_mut(a).map(|c| f(c.as_mut())) + }) + } - fn get_cached_code(&self, hash: &H256) -> Option>> { - let mut cache = self.code_cache.lock(); + fn get_cached_code(&self, hash: &H256) -> Option>> { + let mut cache = self.code_cache.lock(); - cache.get_mut(hash).map(|code| code.clone()) - } + cache.get_mut(hash).map(|code| code.clone()) + } - fn note_non_null_account(&self, address: &Address) { - trace!(target: "account_bloom", "Note account bloom: {:?}", address); - let mut bloom = self.account_bloom.lock(); - bloom.set(&*keccak(address)); - } + fn note_non_null_account(&self, address: &Address) { + trace!(target: "account_bloom", "Note account bloom: {:?}", address); + let mut bloom = self.account_bloom.lock(); + bloom.set(&*keccak(address)); + } - fn is_known_null(&self, address: &Address) -> bool { - trace!(target: "account_bloom", "Check account bloom: {:?}", address); - let bloom = self.account_bloom.lock(); - let is_null = !bloom.check(&*keccak(address)); - is_null - } + fn is_known_null(&self, address: &Address) -> bool { + trace!(target: "account_bloom", "Check account bloom: {:?}", address); + let bloom = self.account_bloom.lock(); + let is_null = !bloom.check(&*keccak(address)); + is_null + } } /// Sync wrapper for the account. @@ -478,75 +530,82 @@ unsafe impl Sync for SyncAccount {} #[cfg(test)] mod tests { - use ethereum_types::{H256, U256, Address}; - use kvdb::DBTransaction; - use test_helpers::get_temp_state_db; - use state::{Account, Backend}; + use ethereum_types::{Address, H256, U256}; + use kvdb::DBTransaction; + use state::{Account, Backend}; + use test_helpers::get_temp_state_db; - #[test] - fn state_db_smoke() { - let _ = ::env_logger::try_init(); + #[test] + fn state_db_smoke() { + let _ = ::env_logger::try_init(); - let state_db = get_temp_state_db(); - let root_parent = H256::random(); - let address = Address::random(); - let h0 = H256::random(); - let h1a = H256::random(); - let h1b = H256::random(); - let h2a = H256::random(); - let h2b = H256::random(); - let h3a = H256::random(); - let h3b = H256::random(); - let mut batch = DBTransaction::new(); + let state_db = get_temp_state_db(); + let root_parent = H256::random(); + let address = Address::random(); + let h0 = H256::random(); + let h1a = H256::random(); + let h1b = H256::random(); + let h2a = H256::random(); + let h2b = H256::random(); + let h3a = H256::random(); + let h3b = H256::random(); + let mut batch = DBTransaction::new(); - // blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ] - // balance [ 5 5 4 3 2 2 ] - let mut s = state_db.boxed_clone_canon(&root_parent); - s.add_to_account_cache(address, Some(Account::new_basic(2.into(), 0.into())), false); - s.journal_under(&mut batch, 0, &h0).unwrap(); - s.sync_cache(&[], &[], true); + // blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ] + // balance [ 5 5 4 3 2 2 ] + let mut s = state_db.boxed_clone_canon(&root_parent); + s.add_to_account_cache(address, Some(Account::new_basic(2.into(), 0.into())), false); + s.journal_under(&mut batch, 0, &h0).unwrap(); + s.sync_cache(&[], &[], true); - let mut s = state_db.boxed_clone_canon(&h0); - s.journal_under(&mut batch, 1, &h1a).unwrap(); - s.sync_cache(&[], &[], true); + let mut s = state_db.boxed_clone_canon(&h0); + s.journal_under(&mut batch, 1, &h1a).unwrap(); + s.sync_cache(&[], &[], true); - let mut s = state_db.boxed_clone_canon(&h0); - s.add_to_account_cache(address, Some(Account::new_basic(3.into(), 0.into())), true); - s.journal_under(&mut batch, 1, &h1b).unwrap(); - s.sync_cache(&[], &[], false); + let mut s = state_db.boxed_clone_canon(&h0); + s.add_to_account_cache(address, Some(Account::new_basic(3.into(), 0.into())), true); + s.journal_under(&mut batch, 1, &h1b).unwrap(); + s.sync_cache(&[], &[], false); - let mut s = state_db.boxed_clone_canon(&h1b); - s.add_to_account_cache(address, Some(Account::new_basic(4.into(), 0.into())), true); - s.journal_under(&mut batch, 2, &h2b).unwrap(); - s.sync_cache(&[], &[], false); + let mut s = state_db.boxed_clone_canon(&h1b); + s.add_to_account_cache(address, Some(Account::new_basic(4.into(), 0.into())), true); + s.journal_under(&mut batch, 2, &h2b).unwrap(); + s.sync_cache(&[], &[], false); - let mut s = state_db.boxed_clone_canon(&h1a); - s.add_to_account_cache(address, Some(Account::new_basic(5.into(), 0.into())), true); - s.journal_under(&mut batch, 2, &h2a).unwrap(); - s.sync_cache(&[], &[], true); + let mut s = state_db.boxed_clone_canon(&h1a); + s.add_to_account_cache(address, Some(Account::new_basic(5.into(), 0.into())), true); + s.journal_under(&mut batch, 2, &h2a).unwrap(); + s.sync_cache(&[], &[], true); - let mut s = state_db.boxed_clone_canon(&h2a); - s.journal_under(&mut batch, 3, &h3a).unwrap(); - s.sync_cache(&[], &[], true); + let mut s = state_db.boxed_clone_canon(&h2a); + s.journal_under(&mut batch, 3, &h3a).unwrap(); + s.sync_cache(&[], &[], true); - let s = state_db.boxed_clone_canon(&h3a); - assert_eq!(s.get_cached_account(&address).unwrap().unwrap().balance(), &U256::from(5)); + let s = state_db.boxed_clone_canon(&h3a); + assert_eq!( + s.get_cached_account(&address).unwrap().unwrap().balance(), + &U256::from(5) + ); - let s = state_db.boxed_clone_canon(&h1a); - assert!(s.get_cached_account(&address).is_none()); + let s = state_db.boxed_clone_canon(&h1a); + assert!(s.get_cached_account(&address).is_none()); - let s = state_db.boxed_clone_canon(&h2b); - assert!(s.get_cached_account(&address).is_none()); + let s = state_db.boxed_clone_canon(&h2b); + assert!(s.get_cached_account(&address).is_none()); - let s = state_db.boxed_clone_canon(&h1b); - assert!(s.get_cached_account(&address).is_none()); + let s = state_db.boxed_clone_canon(&h1b); + assert!(s.get_cached_account(&address).is_none()); - // reorg to 3b - // blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ] - let mut s = state_db.boxed_clone_canon(&h2b); - s.journal_under(&mut batch, 3, &h3b).unwrap(); - s.sync_cache(&[h1b.clone(), h2b.clone(), h3b.clone()], &[h1a.clone(), h2a.clone(), h3a.clone()], true); - let s = state_db.boxed_clone_canon(&h3a); - assert!(s.get_cached_account(&address).is_none()); - } + // reorg to 3b + // blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ] + let mut s = state_db.boxed_clone_canon(&h2b); + s.journal_under(&mut batch, 3, &h3b).unwrap(); + s.sync_cache( + &[h1b.clone(), h2b.clone(), h3b.clone()], + &[h1a.clone(), h2a.clone(), h3a.clone()], + true, + ); + let s = state_db.boxed_clone_canon(&h3a); + assert!(s.get_cached_account(&address).is_none()); + } } diff --git a/ethcore/src/test_helpers.rs b/ethcore/src/test_helpers.rs index b5575f36c..d3a58096b 100644 --- a/ethcore/src/test_helpers.rs +++ b/ethcore/src/test_helpers.rs @@ -16,14 +16,14 @@ //! Set of different helpers for client tests -use std::path::Path; -use std::sync::Arc; -use std::{fs, io}; +use std::{fs, io, path::Path, sync::Arc}; -use blockchain::{BlockChain, BlockChainDB, BlockChainDBHandler, Config as BlockChainConfig, ExtrasInsert}; +use blockchain::{ + BlockChain, BlockChainDB, BlockChainDBHandler, Config as BlockChainConfig, ExtrasInsert, +}; use blooms_db; use bytes::Bytes; -use ethereum_types::{H256, U256, Address}; +use ethereum_types::{Address, H256, U256}; use ethkey::KeyPair; use evm::Factory as EvmFactory; use hash::keccak; @@ -33,14 +33,18 @@ use kvdb_rocksdb::{self, Database, DatabaseConfig}; use parking_lot::RwLock; use rlp::{self, RlpStream}; use tempdir::TempDir; -use types::transaction::{Action, Transaction, SignedTransaction}; -use types::encoded; -use types::header::Header; -use types::view; -use types::views::BlockView; +use types::{ + encoded, + header::Header, + transaction::{Action, SignedTransaction, Transaction}, + view, + views::BlockView, +}; -use block::{OpenBlock, Drain}; -use client::{Client, ClientConfig, ChainInfo, ImportBlock, ChainNotify, ChainMessageType, PrepareOpenBlock}; +use block::{Drain, OpenBlock}; +use client::{ + ChainInfo, ChainMessageType, ChainNotify, Client, ClientConfig, ImportBlock, PrepareOpenBlock, +}; use factory::Factories; use miner::Miner; use spec::Spec; @@ -50,468 +54,567 @@ use verification::queue::kind::blocks::Unverified; /// Creates test block with corresponding header pub fn create_test_block(header: &Header) -> Bytes { - let mut rlp = RlpStream::new_list(3); - rlp.append(header); - rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1); - rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1); - rlp.out() + let mut rlp = RlpStream::new_list(3); + rlp.append(header); + rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1); + rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1); + rlp.out() } fn create_unverifiable_block_header(order: u32, parent_hash: H256) -> Header { - let mut header = Header::new(); - header.set_gas_limit(0.into()); - header.set_difficulty((order * 100).into()); - header.set_timestamp((order * 10) as u64); - header.set_number(order as u64); - header.set_parent_hash(parent_hash); - header.set_state_root(H256::zero()); + let mut header = Header::new(); + header.set_gas_limit(0.into()); + header.set_difficulty((order * 100).into()); + header.set_timestamp((order * 10) as u64); + header.set_number(order as u64); + header.set_parent_hash(parent_hash); + header.set_state_root(H256::zero()); - header + header } -fn create_unverifiable_block_with_extra(order: u32, parent_hash: H256, extra: Option) -> Bytes { - let mut header = create_unverifiable_block_header(order, parent_hash); - header.set_extra_data(match extra { - Some(extra_data) => extra_data, - None => { - let base = (order & 0x000000ff) as u8; - let generated: Vec = vec![base + 1, base + 2, base + 3]; - generated - } - }); - create_test_block(&header) +fn create_unverifiable_block_with_extra( + order: u32, + parent_hash: H256, + extra: Option, +) -> Bytes { + let mut header = create_unverifiable_block_header(order, parent_hash); + header.set_extra_data(match extra { + Some(extra_data) => extra_data, + None => { + let base = (order & 0x000000ff) as u8; + let generated: Vec = vec![base + 1, base + 2, base + 3]; + generated + } + }); + create_test_block(&header) } fn create_unverifiable_block(order: u32, parent_hash: H256) -> Bytes { - create_test_block(&create_unverifiable_block_header(order, parent_hash)) + create_test_block(&create_unverifiable_block_header(order, parent_hash)) } /// Creates test block with corresponding header and data -pub fn create_test_block_with_data(header: &Header, transactions: &[SignedTransaction], uncles: &[Header]) -> Bytes { - let mut rlp = RlpStream::new_list(3); - rlp.append(header); - rlp.begin_list(transactions.len()); - for t in transactions { - rlp.append_raw(&rlp::encode(t), 1); - } - rlp.append_list(&uncles); - rlp.out() +pub fn create_test_block_with_data( + header: &Header, + transactions: &[SignedTransaction], + uncles: &[Header], +) -> Bytes { + let mut rlp = RlpStream::new_list(3); + rlp.append(header); + rlp.begin_list(transactions.len()); + for t in transactions { + rlp.append_raw(&rlp::encode(t), 1); + } + rlp.append_list(&uncles); + rlp.out() } /// Generates dummy client (not test client) with corresponding amount of blocks pub fn generate_dummy_client(block_number: u32) -> Arc { - generate_dummy_client_with_spec_and_data(Spec::new_test, block_number, 0, &[]) + generate_dummy_client_with_spec_and_data(Spec::new_test, block_number, 0, &[]) } /// Generates dummy client (not test client) with corresponding amount of blocks and txs per every block -pub fn generate_dummy_client_with_data(block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> Arc { - generate_dummy_client_with_spec_and_data(Spec::new_null, block_number, txs_per_block, tx_gas_prices) +pub fn generate_dummy_client_with_data( + block_number: u32, + txs_per_block: usize, + tx_gas_prices: &[U256], +) -> Arc { + generate_dummy_client_with_spec_and_data( + Spec::new_null, + block_number, + txs_per_block, + tx_gas_prices, + ) } /// Generates dummy client (not test client) with corresponding spec and accounts -pub fn generate_dummy_client_with_spec(test_spec: F) -> Arc where F: Fn()->Spec { - generate_dummy_client_with_spec_and_data(test_spec, 0, 0, &[]) +pub fn generate_dummy_client_with_spec(test_spec: F) -> Arc +where + F: Fn() -> Spec, +{ + generate_dummy_client_with_spec_and_data(test_spec, 0, 0, &[]) } /// Generates dummy client (not test client) with corresponding amount of blocks, txs per block and spec -pub fn generate_dummy_client_with_spec_and_data(test_spec: F, block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> Arc where - F: Fn() -> Spec +pub fn generate_dummy_client_with_spec_and_data( + test_spec: F, + block_number: u32, + txs_per_block: usize, + tx_gas_prices: &[U256], +) -> Arc +where + F: Fn() -> Spec, { - let test_spec = test_spec(); - let client_db = new_db(); + let test_spec = test_spec(); + let client_db = new_db(); - let client = Client::new( - ClientConfig::default(), - &test_spec, - client_db, - Arc::new(Miner::new_for_tests(&test_spec, None)), - IoChannel::disconnected(), - ).unwrap(); - let test_engine = &*test_spec.engine; + let client = Client::new( + ClientConfig::default(), + &test_spec, + client_db, + Arc::new(Miner::new_for_tests(&test_spec, None)), + IoChannel::disconnected(), + ) + .unwrap(); + let test_engine = &*test_spec.engine; - let mut db = test_spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let genesis_header = test_spec.genesis_header(); + let mut db = test_spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let genesis_header = test_spec.genesis_header(); - let mut rolling_timestamp = 40; - let mut last_hashes = vec![]; - let mut last_header = genesis_header.clone(); + let mut rolling_timestamp = 40; + let mut last_hashes = vec![]; + let mut last_header = genesis_header.clone(); - let kp = KeyPair::from_secret_slice(&keccak("")).unwrap(); - let author = kp.address(); + let kp = KeyPair::from_secret_slice(&keccak("")).unwrap(); + let author = kp.address(); - let mut n = 0; - for _ in 0..block_number { - last_hashes.push(last_header.hash()); + let mut n = 0; + for _ in 0..block_number { + last_hashes.push(last_header.hash()); - // forge block. - let mut b = OpenBlock::new( - test_engine, - Default::default(), - false, - db, - &last_header, - Arc::new(last_hashes.clone()), - author.clone(), - (3141562.into(), 31415620.into()), - vec![], - false, - None, - ).unwrap(); - rolling_timestamp += 10; - b.set_timestamp(rolling_timestamp); + // forge block. + let mut b = OpenBlock::new( + test_engine, + Default::default(), + false, + db, + &last_header, + Arc::new(last_hashes.clone()), + author.clone(), + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + rolling_timestamp += 10; + b.set_timestamp(rolling_timestamp); - // first block we don't have any balance, so can't send any transactions. - for _ in 0..txs_per_block { - b.push_transaction(Transaction { - nonce: n.into(), - gas_price: tx_gas_prices[n % tx_gas_prices.len()], - gas: 100000.into(), - action: Action::Create, - data: vec![], - value: U256::zero(), - }.sign(kp.secret(), Some(test_spec.chain_id())), None).unwrap(); - n += 1; - } + // first block we don't have any balance, so can't send any transactions. + for _ in 0..txs_per_block { + b.push_transaction( + Transaction { + nonce: n.into(), + gas_price: tx_gas_prices[n % tx_gas_prices.len()], + gas: 100000.into(), + action: Action::Create, + data: vec![], + value: U256::zero(), + } + .sign(kp.secret(), Some(test_spec.chain_id())), + None, + ) + .unwrap(); + n += 1; + } - let b = b.close_and_lock().unwrap().seal(test_engine, vec![]).unwrap(); + let b = b + .close_and_lock() + .unwrap() + .seal(test_engine, vec![]) + .unwrap(); - if let Err(e) = client.import_block(Unverified::from_rlp(b.rlp_bytes()).unwrap()) { - panic!("error importing block which is valid by definition: {:?}", e); - } + if let Err(e) = client.import_block(Unverified::from_rlp(b.rlp_bytes()).unwrap()) { + panic!( + "error importing block which is valid by definition: {:?}", + e + ); + } - last_header = view!(BlockView, &b.rlp_bytes()).header(); - db = b.drain().state.drop().1; - } - client.flush_queue(); - client.import_verified_blocks(); - client + last_header = view!(BlockView, &b.rlp_bytes()).header(); + db = b.drain().state.drop().1; + } + client.flush_queue(); + client.import_verified_blocks(); + client } /// Adds blocks to the client -pub fn push_blocks_to_client(client: &Arc, timestamp_salt: u64, starting_number: usize, block_number: usize) { - let test_spec = Spec::new_test(); - let state_root = test_spec.genesis_header().state_root().clone(); - let genesis_gas = test_spec.genesis_header().gas_limit().clone(); +pub fn push_blocks_to_client( + client: &Arc, + timestamp_salt: u64, + starting_number: usize, + block_number: usize, +) { + let test_spec = Spec::new_test(); + let state_root = test_spec.genesis_header().state_root().clone(); + let genesis_gas = test_spec.genesis_header().gas_limit().clone(); - let mut rolling_hash = client.chain_info().best_block_hash; - let mut rolling_block_number = starting_number as u64; - let mut rolling_timestamp = timestamp_salt + starting_number as u64 * 10; + let mut rolling_hash = client.chain_info().best_block_hash; + let mut rolling_block_number = starting_number as u64; + let mut rolling_timestamp = timestamp_salt + starting_number as u64 * 10; - for _ in 0..block_number { - let mut header = Header::new(); + for _ in 0..block_number { + let mut header = Header::new(); - header.set_gas_limit(genesis_gas); - header.set_difficulty(U256::from(0x20000)); - header.set_timestamp(rolling_timestamp); - header.set_number(rolling_block_number); - header.set_parent_hash(rolling_hash); - header.set_state_root(state_root); + header.set_gas_limit(genesis_gas); + header.set_difficulty(U256::from(0x20000)); + header.set_timestamp(rolling_timestamp); + header.set_number(rolling_block_number); + header.set_parent_hash(rolling_hash); + header.set_state_root(state_root); - rolling_hash = header.hash(); - rolling_block_number = rolling_block_number + 1; - rolling_timestamp = rolling_timestamp + 10; + rolling_hash = header.hash(); + rolling_block_number = rolling_block_number + 1; + rolling_timestamp = rolling_timestamp + 10; - if let Err(e) = client.import_block(Unverified::from_rlp(create_test_block(&header)).unwrap()) { - panic!("error importing block which is valid by definition: {:?}", e); - } - } + if let Err(e) = + client.import_block(Unverified::from_rlp(create_test_block(&header)).unwrap()) + { + panic!( + "error importing block which is valid by definition: {:?}", + e + ); + } + } } /// Adds one block with transactions pub fn push_block_with_transactions(client: &Arc, transactions: &[SignedTransaction]) { - let test_spec = Spec::new_test(); - let test_engine = &*test_spec.engine; - let block_number = client.chain_info().best_block_number as u64 + 1; + let test_spec = Spec::new_test(); + let test_engine = &*test_spec.engine; + let block_number = client.chain_info().best_block_number as u64 + 1; - let mut b = client.prepare_open_block(Address::default(), (0.into(), 5000000.into()), Bytes::new()).unwrap(); - b.set_timestamp(block_number * 10); + let mut b = client + .prepare_open_block(Address::default(), (0.into(), 5000000.into()), Bytes::new()) + .unwrap(); + b.set_timestamp(block_number * 10); - for t in transactions { - b.push_transaction(t.clone(), None).unwrap(); - } - let b = b.close_and_lock().unwrap().seal(test_engine, vec![]).unwrap(); + for t in transactions { + b.push_transaction(t.clone(), None).unwrap(); + } + let b = b + .close_and_lock() + .unwrap() + .seal(test_engine, vec![]) + .unwrap(); - if let Err(e) = client.import_block(Unverified::from_rlp(b.rlp_bytes()).unwrap()) { - panic!("error importing block which is valid by definition: {:?}", e); - } + if let Err(e) = client.import_block(Unverified::from_rlp(b.rlp_bytes()).unwrap()) { + panic!( + "error importing block which is valid by definition: {:?}", + e + ); + } - client.flush_queue(); - client.import_verified_blocks(); + client.flush_queue(); + client.import_verified_blocks(); } /// Creates dummy client (not test client) with corresponding blocks pub fn get_test_client_with_blocks(blocks: Vec) -> Arc { - let test_spec = Spec::new_test(); - let client_db = new_db(); + let test_spec = Spec::new_test(); + let client_db = new_db(); - let client = Client::new( - ClientConfig::default(), - &test_spec, - client_db, - Arc::new(Miner::new_for_tests(&test_spec, None)), - IoChannel::disconnected(), - ).unwrap(); + let client = Client::new( + ClientConfig::default(), + &test_spec, + client_db, + Arc::new(Miner::new_for_tests(&test_spec, None)), + IoChannel::disconnected(), + ) + .unwrap(); - for block in blocks { - if let Err(e) = client.import_block(Unverified::from_rlp(block).unwrap()) { - panic!("error importing block which is well-formed: {:?}", e); - } - } - client.flush_queue(); - client.import_verified_blocks(); - client + for block in blocks { + if let Err(e) = client.import_block(Unverified::from_rlp(block).unwrap()) { + panic!("error importing block which is well-formed: {:?}", e); + } + } + client.flush_queue(); + client.import_verified_blocks(); + client } struct TestBlockChainDB { - _blooms_dir: TempDir, - _trace_blooms_dir: TempDir, - blooms: blooms_db::Database, - trace_blooms: blooms_db::Database, - key_value: Arc, + _blooms_dir: TempDir, + _trace_blooms_dir: TempDir, + blooms: blooms_db::Database, + trace_blooms: blooms_db::Database, + key_value: Arc, } impl BlockChainDB for TestBlockChainDB { - fn key_value(&self) -> &Arc { - &self.key_value - } + fn key_value(&self) -> &Arc { + &self.key_value + } - fn blooms(&self) -> &blooms_db::Database { - &self.blooms - } + fn blooms(&self) -> &blooms_db::Database { + &self.blooms + } - fn trace_blooms(&self) -> &blooms_db::Database { - &self.trace_blooms - } + fn trace_blooms(&self) -> &blooms_db::Database { + &self.trace_blooms + } } /// Creates new test instance of `BlockChainDB` pub fn new_db() -> Arc { - let blooms_dir = TempDir::new("").unwrap(); - let trace_blooms_dir = TempDir::new("").unwrap(); + let blooms_dir = TempDir::new("").unwrap(); + let trace_blooms_dir = TempDir::new("").unwrap(); - let db = TestBlockChainDB { - blooms: blooms_db::Database::open(blooms_dir.path()).unwrap(), - trace_blooms: blooms_db::Database::open(trace_blooms_dir.path()).unwrap(), - _blooms_dir: blooms_dir, - _trace_blooms_dir: trace_blooms_dir, - key_value: Arc::new(::kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap())) - }; + let db = TestBlockChainDB { + blooms: blooms_db::Database::open(blooms_dir.path()).unwrap(), + trace_blooms: blooms_db::Database::open(trace_blooms_dir.path()).unwrap(), + _blooms_dir: blooms_dir, + _trace_blooms_dir: trace_blooms_dir, + key_value: Arc::new(::kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap())), + }; - Arc::new(db) + Arc::new(db) } /// Creates a new temporary `BlockChainDB` on FS pub fn new_temp_db(tempdir: &Path) -> Arc { - let blooms_dir = TempDir::new("").unwrap(); - let trace_blooms_dir = TempDir::new("").unwrap(); - let key_value_dir = tempdir.join("key_value"); + let blooms_dir = TempDir::new("").unwrap(); + let trace_blooms_dir = TempDir::new("").unwrap(); + let key_value_dir = tempdir.join("key_value"); - let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); - let key_value_db = Database::open(&db_config, key_value_dir.to_str().unwrap()).unwrap(); + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + let key_value_db = Database::open(&db_config, key_value_dir.to_str().unwrap()).unwrap(); - let db = TestBlockChainDB { - blooms: blooms_db::Database::open(blooms_dir.path()).unwrap(), - trace_blooms: blooms_db::Database::open(trace_blooms_dir.path()).unwrap(), - _blooms_dir: blooms_dir, - _trace_blooms_dir: trace_blooms_dir, - key_value: Arc::new(key_value_db) - }; + let db = TestBlockChainDB { + blooms: blooms_db::Database::open(blooms_dir.path()).unwrap(), + trace_blooms: blooms_db::Database::open(trace_blooms_dir.path()).unwrap(), + _blooms_dir: blooms_dir, + _trace_blooms_dir: trace_blooms_dir, + key_value: Arc::new(key_value_db), + }; - Arc::new(db) + Arc::new(db) } /// Creates new instance of KeyValueDBHandler pub fn restoration_db_handler(config: kvdb_rocksdb::DatabaseConfig) -> Box { - struct RestorationDBHandler { - config: kvdb_rocksdb::DatabaseConfig, - } + struct RestorationDBHandler { + config: kvdb_rocksdb::DatabaseConfig, + } - struct RestorationDB { - blooms: blooms_db::Database, - trace_blooms: blooms_db::Database, - key_value: Arc, - } + struct RestorationDB { + blooms: blooms_db::Database, + trace_blooms: blooms_db::Database, + key_value: Arc, + } - impl BlockChainDB for RestorationDB { - fn key_value(&self) -> &Arc { - &self.key_value - } + impl BlockChainDB for RestorationDB { + fn key_value(&self) -> &Arc { + &self.key_value + } - fn blooms(&self) -> &blooms_db::Database { - &self.blooms - } + fn blooms(&self) -> &blooms_db::Database { + &self.blooms + } - fn trace_blooms(&self) -> &blooms_db::Database { - &self.trace_blooms - } - } + fn trace_blooms(&self) -> &blooms_db::Database { + &self.trace_blooms + } + } - impl BlockChainDBHandler for RestorationDBHandler { - fn open(&self, db_path: &Path) -> io::Result> { - let key_value = Arc::new(kvdb_rocksdb::Database::open(&self.config, &db_path.to_string_lossy())?); - let blooms_path = db_path.join("blooms"); - let trace_blooms_path = db_path.join("trace_blooms"); - fs::create_dir_all(&blooms_path)?; - fs::create_dir_all(&trace_blooms_path)?; - let blooms = blooms_db::Database::open(blooms_path).unwrap(); - let trace_blooms = blooms_db::Database::open(trace_blooms_path).unwrap(); - let db = RestorationDB { - blooms, - trace_blooms, - key_value, - }; - Ok(Arc::new(db)) - } - } + impl BlockChainDBHandler for RestorationDBHandler { + fn open(&self, db_path: &Path) -> io::Result> { + let key_value = Arc::new(kvdb_rocksdb::Database::open( + &self.config, + &db_path.to_string_lossy(), + )?); + let blooms_path = db_path.join("blooms"); + let trace_blooms_path = db_path.join("trace_blooms"); + fs::create_dir_all(&blooms_path)?; + fs::create_dir_all(&trace_blooms_path)?; + let blooms = blooms_db::Database::open(blooms_path).unwrap(); + let trace_blooms = blooms_db::Database::open(trace_blooms_path).unwrap(); + let db = RestorationDB { + blooms, + trace_blooms, + key_value, + }; + Ok(Arc::new(db)) + } + } - Box::new(RestorationDBHandler { config }) + Box::new(RestorationDBHandler { config }) } /// Generates dummy blockchain with corresponding amount of blocks pub fn generate_dummy_blockchain(block_number: u32) -> BlockChain { - let db = new_db(); - let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone()); + let db = new_db(); + let bc = BlockChain::new( + BlockChainConfig::default(), + &create_unverifiable_block(0, H256::zero()), + db.clone(), + ); - let mut batch = db.key_value().transaction(); - for block_order in 1..block_number { - // Total difficulty is always 0 here. - bc.insert_block(&mut batch, encoded::Block::new(create_unverifiable_block(block_order, bc.best_block_hash())), vec![], ExtrasInsert { - fork_choice: ::engines::ForkChoice::New, - is_finalized: false, - }); - bc.commit(); - } - db.key_value().write(batch).unwrap(); - bc + let mut batch = db.key_value().transaction(); + for block_order in 1..block_number { + // Total difficulty is always 0 here. + bc.insert_block( + &mut batch, + encoded::Block::new(create_unverifiable_block(block_order, bc.best_block_hash())), + vec![], + ExtrasInsert { + fork_choice: ::engines::ForkChoice::New, + is_finalized: false, + }, + ); + bc.commit(); + } + db.key_value().write(batch).unwrap(); + bc } /// Generates dummy blockchain with corresponding amount of blocks (using creation with extra method for blocks creation) pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> BlockChain { - let db = new_db(); - let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone()); + let db = new_db(); + let bc = BlockChain::new( + BlockChainConfig::default(), + &create_unverifiable_block(0, H256::zero()), + db.clone(), + ); - let mut batch = db.key_value().transaction(); - for block_order in 1..block_number { - // Total difficulty is always 0 here. - bc.insert_block(&mut batch, encoded::Block::new(create_unverifiable_block_with_extra(block_order, bc.best_block_hash(), None)), vec![], ExtrasInsert { - fork_choice: ::engines::ForkChoice::New, - is_finalized: false, - }); - bc.commit(); - } - db.key_value().write(batch).unwrap(); - bc + let mut batch = db.key_value().transaction(); + for block_order in 1..block_number { + // Total difficulty is always 0 here. + bc.insert_block( + &mut batch, + encoded::Block::new(create_unverifiable_block_with_extra( + block_order, + bc.best_block_hash(), + None, + )), + vec![], + ExtrasInsert { + fork_choice: ::engines::ForkChoice::New, + is_finalized: false, + }, + ); + bc.commit(); + } + db.key_value().write(batch).unwrap(); + bc } /// Returns empty dummy blockchain pub fn generate_dummy_empty_blockchain() -> BlockChain { - let db = new_db(); - let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone()); - bc + let db = new_db(); + let bc = BlockChain::new( + BlockChainConfig::default(), + &create_unverifiable_block(0, H256::zero()), + db.clone(), + ); + bc } /// Returns temp state pub fn get_temp_state() -> State<::state_db::StateDB> { - let journal_db = get_temp_state_db(); - State::new(journal_db, U256::from(0), Default::default()) + let journal_db = get_temp_state_db(); + State::new(journal_db, U256::from(0), Default::default()) } /// Returns temp state using coresponding factory pub fn get_temp_state_with_factory(factory: EvmFactory) -> State<::state_db::StateDB> { - let journal_db = get_temp_state_db(); - let mut factories = Factories::default(); - factories.vm = factory.into(); - State::new(journal_db, U256::from(0), factories) + let journal_db = get_temp_state_db(); + let mut factories = Factories::default(); + factories.vm = factory.into(); + State::new(journal_db, U256::from(0), factories) } /// Returns temp state db pub fn get_temp_state_db() -> StateDB { - let db = new_db(); - let journal_db = ::journaldb::new(db.key_value().clone(), ::journaldb::Algorithm::EarlyMerge, ::db::COL_STATE); - StateDB::new(journal_db, 5 * 1024 * 1024) + let db = new_db(); + let journal_db = ::journaldb::new( + db.key_value().clone(), + ::journaldb::Algorithm::EarlyMerge, + ::db::COL_STATE, + ); + StateDB::new(journal_db, 5 * 1024 * 1024) } /// Returns sequence of hashes of the dummy blocks pub fn get_good_dummy_block_seq(count: usize) -> Vec { - let test_spec = Spec::new_test(); - get_good_dummy_block_fork_seq(1, count, &test_spec.genesis_header().hash()) + let test_spec = Spec::new_test(); + get_good_dummy_block_fork_seq(1, count, &test_spec.genesis_header().hash()) } /// Returns sequence of hashes of the dummy blocks beginning from corresponding parent -pub fn get_good_dummy_block_fork_seq(start_number: usize, count: usize, parent_hash: &H256) -> Vec { - let test_spec = Spec::new_test(); - let genesis_gas = test_spec.genesis_header().gas_limit().clone(); - let mut rolling_timestamp = start_number as u64 * 10; - let mut parent = *parent_hash; - let mut r = Vec::new(); - for i in start_number .. start_number + count + 1 { - let mut block_header = Header::new(); - block_header.set_gas_limit(genesis_gas); - block_header.set_difficulty(U256::from(i) * U256([0, 1, 0, 0])); - block_header.set_timestamp(rolling_timestamp); - block_header.set_number(i as u64); - block_header.set_parent_hash(parent); - block_header.set_state_root(test_spec.genesis_header().state_root().clone()); +pub fn get_good_dummy_block_fork_seq( + start_number: usize, + count: usize, + parent_hash: &H256, +) -> Vec { + let test_spec = Spec::new_test(); + let genesis_gas = test_spec.genesis_header().gas_limit().clone(); + let mut rolling_timestamp = start_number as u64 * 10; + let mut parent = *parent_hash; + let mut r = Vec::new(); + for i in start_number..start_number + count + 1 { + let mut block_header = Header::new(); + block_header.set_gas_limit(genesis_gas); + block_header.set_difficulty(U256::from(i) * U256([0, 1, 0, 0])); + block_header.set_timestamp(rolling_timestamp); + block_header.set_number(i as u64); + block_header.set_parent_hash(parent); + block_header.set_state_root(test_spec.genesis_header().state_root().clone()); - parent = block_header.hash(); - rolling_timestamp = rolling_timestamp + 10; + parent = block_header.hash(); + rolling_timestamp = rolling_timestamp + 10; - r.push(create_test_block(&block_header)); - } - r + r.push(create_test_block(&block_header)); + } + r } /// Returns hash and header of the correct dummy block pub fn get_good_dummy_block_hash() -> (H256, Bytes) { - let mut block_header = Header::new(); - let test_spec = Spec::new_test(); - let genesis_gas = test_spec.genesis_header().gas_limit().clone(); - block_header.set_gas_limit(genesis_gas); - block_header.set_difficulty(U256::from(0x20000)); - block_header.set_timestamp(40); - block_header.set_number(1); - block_header.set_parent_hash(test_spec.genesis_header().hash()); - block_header.set_state_root(test_spec.genesis_header().state_root().clone()); + let mut block_header = Header::new(); + let test_spec = Spec::new_test(); + let genesis_gas = test_spec.genesis_header().gas_limit().clone(); + block_header.set_gas_limit(genesis_gas); + block_header.set_difficulty(U256::from(0x20000)); + block_header.set_timestamp(40); + block_header.set_number(1); + block_header.set_parent_hash(test_spec.genesis_header().hash()); + block_header.set_state_root(test_spec.genesis_header().state_root().clone()); - (block_header.hash(), create_test_block(&block_header)) + (block_header.hash(), create_test_block(&block_header)) } /// Returns hash of the correct dummy block pub fn get_good_dummy_block() -> Bytes { - let (_, bytes) = get_good_dummy_block_hash(); - bytes + let (_, bytes) = get_good_dummy_block_hash(); + bytes } /// Returns hash of the dummy block with incorrect state root pub fn get_bad_state_dummy_block() -> Bytes { - let mut block_header = Header::new(); - let test_spec = Spec::new_test(); - let genesis_gas = test_spec.genesis_header().gas_limit().clone(); + let mut block_header = Header::new(); + let test_spec = Spec::new_test(); + let genesis_gas = test_spec.genesis_header().gas_limit().clone(); - block_header.set_gas_limit(genesis_gas); - block_header.set_difficulty(U256::from(0x20000)); - block_header.set_timestamp(40); - block_header.set_number(1); - block_header.set_parent_hash(test_spec.genesis_header().hash()); - block_header.set_state_root(0xbad.into()); + block_header.set_gas_limit(genesis_gas); + block_header.set_difficulty(U256::from(0x20000)); + block_header.set_timestamp(40); + block_header.set_number(1); + block_header.set_parent_hash(test_spec.genesis_header().hash()); + block_header.set_state_root(0xbad.into()); - create_test_block(&block_header) + create_test_block(&block_header) } /// Test actor for chain events #[derive(Default)] pub struct TestNotify { - /// Messages store - pub messages: RwLock>, + /// Messages store + pub messages: RwLock>, } impl ChainNotify for TestNotify { - fn broadcast(&self, message: ChainMessageType) { - let data = match message { - ChainMessageType::Consensus(data) => data, - ChainMessageType::SignedPrivateTransaction(_, data) => data, - ChainMessageType::PrivateTransaction(_, data) => data, - }; - self.messages.write().push(data); - } + fn broadcast(&self, message: ChainMessageType) { + let data = match message { + ChainMessageType::Consensus(data) => data, + ChainMessageType::SignedPrivateTransaction(_, data) => data, + ChainMessageType::PrivateTransaction(_, data) => data, + }; + self.messages.write().push(data); + } } diff --git a/ethcore/src/tests/blockchain.rs b/ethcore/src/tests/blockchain.rs index 9f2cdbdb9..a97430c0e 100644 --- a/ethcore/src/tests/blockchain.rs +++ b/ethcore/src/tests/blockchain.rs @@ -17,44 +17,43 @@ use blockchain::BlockProvider; use test_helpers::{ - generate_dummy_blockchain, - generate_dummy_blockchain_with_extra, - generate_dummy_empty_blockchain, + generate_dummy_blockchain, generate_dummy_blockchain_with_extra, + generate_dummy_empty_blockchain, }; #[test] fn can_contain_arbitrary_block_sequence() { - let bc = generate_dummy_blockchain(50); - assert_eq!(bc.best_block_number(), 49); + let bc = generate_dummy_blockchain(50); + assert_eq!(bc.best_block_number(), 49); } #[test] fn can_collect_garbage() { - let bc = generate_dummy_blockchain(3000); + let bc = generate_dummy_blockchain(3000); - assert_eq!(bc.best_block_number(), 2999); - let best_hash = bc.best_block_hash(); - let mut block_header = bc.block_header_data(&best_hash); + assert_eq!(bc.best_block_number(), 2999); + let best_hash = bc.best_block_hash(); + let mut block_header = bc.block_header_data(&best_hash); - while !block_header.is_none() { - block_header = bc.block_header_data(&block_header.unwrap().parent_hash()); - } - assert!(bc.cache_size().blocks > 1024 * 1024); + while !block_header.is_none() { + block_header = bc.block_header_data(&block_header.unwrap().parent_hash()); + } + assert!(bc.cache_size().blocks > 1024 * 1024); - for _ in 0..2 { - bc.collect_garbage(); - } - assert!(bc.cache_size().blocks < 1024 * 1024); + for _ in 0..2 { + bc.collect_garbage(); + } + assert!(bc.cache_size().blocks < 1024 * 1024); } #[test] fn can_contain_arbitrary_block_sequence_with_extra() { - let bc = generate_dummy_blockchain_with_extra(25); - assert_eq!(bc.best_block_number(), 24); + let bc = generate_dummy_blockchain_with_extra(25); + assert_eq!(bc.best_block_number(), 24); } #[test] fn can_contain_only_genesis_block() { - let bc = generate_dummy_empty_blockchain(); - assert_eq!(bc.best_block_number(), 0); + let bc = generate_dummy_empty_blockchain(); + assert_eq!(bc.best_block_number(), 0); } diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index 1a836e42e..54d28542c 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -14,460 +14,580 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::str::{FromStr, from_utf8}; -use std::sync::Arc; +use std::{ + str::{from_utf8, FromStr}, + sync::Arc, +}; -use ethereum_types::{U256, Address}; +use client::{ + traits::{ + BlockChainClient, BlockChainReset, BlockInfo, ChainInfo, ImportBlock, ImportExportBlocks, + }, + Client, ClientConfig, ImportSealedBlock, PrepareOpenBlock, +}; +use ethereum; +use ethereum_types::{Address, U256}; use ethkey::KeyPair; +use executive::{Executive, TransactOptions}; use hash::keccak; use io::IoChannel; +use miner::{Miner, MinerService, PendingOrdering}; +use rustc_hex::ToHex; +use spec::{self, Spec}; +use state::{self, CleanupMode, State, StateInfo}; use tempdir::TempDir; +use test_helpers::{ + self, generate_dummy_client, generate_dummy_client_with_data, get_bad_state_dummy_block, + get_good_dummy_block, get_good_dummy_block_seq, get_test_client_with_blocks, + push_blocks_to_client, +}; use types::{ - data_format::DataFormat, - ids::BlockId, - transaction::{PendingTransaction, Transaction, Action, Condition}, - filter::Filter, - view, - views::BlockView, + data_format::DataFormat, + filter::Filter, + ids::BlockId, + transaction::{Action, Condition, PendingTransaction, Transaction}, + view, + views::BlockView, }; use verification::queue::kind::blocks::Unverified; -use client::{Client, ClientConfig, PrepareOpenBlock, ImportSealedBlock}; -use client::traits::{ - BlockInfo, BlockChainClient, BlockChainReset, ChainInfo, - ImportExportBlocks, ImportBlock -}; -use spec; -use ethereum; -use executive::{Executive, TransactOptions}; -use miner::{Miner, PendingOrdering, MinerService}; -use spec::Spec; -use state::{self, State, CleanupMode, StateInfo}; -use test_helpers::{ - self, - generate_dummy_client, push_blocks_to_client, get_test_client_with_blocks, get_good_dummy_block_seq, - generate_dummy_client_with_data, get_good_dummy_block, get_bad_state_dummy_block -}; -use rustc_hex::ToHex; #[test] fn imports_from_empty() { - let db = test_helpers::new_db(); - let spec = Spec::new_test(); + let db = test_helpers::new_db(); + let spec = Spec::new_test(); - let client = Client::new( - ClientConfig::default(), - &spec, - db, - Arc::new(Miner::new_for_tests(&spec, None)), - IoChannel::disconnected(), - ).unwrap(); - client.import_verified_blocks(); - client.flush_queue(); + let client = Client::new( + ClientConfig::default(), + &spec, + db, + Arc::new(Miner::new_for_tests(&spec, None)), + IoChannel::disconnected(), + ) + .unwrap(); + client.import_verified_blocks(); + client.flush_queue(); } #[test] fn should_return_registrar() { - let db = test_helpers::new_db(); - let tempdir = TempDir::new("").unwrap(); - let spec = ethereum::new_morden(&tempdir.path().to_owned()); + let db = test_helpers::new_db(); + let tempdir = TempDir::new("").unwrap(); + let spec = ethereum::new_morden(&tempdir.path().to_owned()); - let client = Client::new( - ClientConfig::default(), - &spec, - db, - Arc::new(Miner::new_for_tests(&spec, None)), - IoChannel::disconnected(), - ).unwrap(); - let params = client.additional_params(); - let address = ¶ms["registrar"]; + let client = Client::new( + ClientConfig::default(), + &spec, + db, + Arc::new(Miner::new_for_tests(&spec, None)), + IoChannel::disconnected(), + ) + .unwrap(); + let params = client.additional_params(); + let address = ¶ms["registrar"]; - assert_eq!(address.len(), 40); - assert!(U256::from_str(address).is_ok()); + assert_eq!(address.len(), 40); + assert!(U256::from_str(address).is_ok()); } #[test] fn returns_state_root_basic() { - let client = generate_dummy_client(6); - let test_spec = Spec::new_test(); - let genesis_header = test_spec.genesis_header(); + let client = generate_dummy_client(6); + let test_spec = Spec::new_test(); + let genesis_header = test_spec.genesis_header(); - assert!(client.state_data(genesis_header.state_root()).is_some()); + assert!(client.state_data(genesis_header.state_root()).is_some()); } #[test] fn imports_good_block() { - let db = test_helpers::new_db(); - let spec = Spec::new_test(); + let db = test_helpers::new_db(); + let spec = Spec::new_test(); - let client = Client::new( - ClientConfig::default(), - &spec, - db, - Arc::new(Miner::new_for_tests(&spec, None)), - IoChannel::disconnected(), - ).unwrap(); - let good_block = get_good_dummy_block(); - if client.import_block(Unverified::from_rlp(good_block).unwrap()).is_err() { - panic!("error importing block being good by definition"); - } - client.flush_queue(); - client.import_verified_blocks(); + let client = Client::new( + ClientConfig::default(), + &spec, + db, + Arc::new(Miner::new_for_tests(&spec, None)), + IoChannel::disconnected(), + ) + .unwrap(); + let good_block = get_good_dummy_block(); + if client + .import_block(Unverified::from_rlp(good_block).unwrap()) + .is_err() + { + panic!("error importing block being good by definition"); + } + client.flush_queue(); + client.import_verified_blocks(); - let block = client.block_header(BlockId::Number(1)).unwrap(); - assert!(!block.into_inner().is_empty()); + let block = client.block_header(BlockId::Number(1)).unwrap(); + assert!(!block.into_inner().is_empty()); } #[test] fn query_none_block() { - let db = test_helpers::new_db(); - let spec = Spec::new_test(); + let db = test_helpers::new_db(); + let spec = Spec::new_test(); - let client = Client::new( - ClientConfig::default(), - &spec, - db, - Arc::new(Miner::new_for_tests(&spec, None)), - IoChannel::disconnected(), - ).unwrap(); - let non_existant = client.block_header(BlockId::Number(188)); - assert!(non_existant.is_none()); + let client = Client::new( + ClientConfig::default(), + &spec, + db, + Arc::new(Miner::new_for_tests(&spec, None)), + IoChannel::disconnected(), + ) + .unwrap(); + let non_existant = client.block_header(BlockId::Number(188)); + assert!(non_existant.is_none()); } #[test] fn query_bad_block() { - let client = get_test_client_with_blocks(vec![get_bad_state_dummy_block()]); - let bad_block: Option<_> = client.block_header(BlockId::Number(1)); + let client = get_test_client_with_blocks(vec![get_bad_state_dummy_block()]); + let bad_block: Option<_> = client.block_header(BlockId::Number(1)); - assert!(bad_block.is_none()); + assert!(bad_block.is_none()); } #[test] fn returns_chain_info() { - let dummy_block = get_good_dummy_block(); - let client = get_test_client_with_blocks(vec![dummy_block.clone()]); - let block = view!(BlockView, &dummy_block); - let info = client.chain_info(); - assert_eq!(info.best_block_hash, block.header().hash()); + let dummy_block = get_good_dummy_block(); + let client = get_test_client_with_blocks(vec![dummy_block.clone()]); + let block = view!(BlockView, &dummy_block); + let info = client.chain_info(); + assert_eq!(info.best_block_hash, block.header().hash()); } #[test] fn returns_logs() { - let dummy_block = get_good_dummy_block(); - let client = get_test_client_with_blocks(vec![dummy_block.clone()]); - let logs = client.logs(Filter { - from_block: BlockId::Earliest, - to_block: BlockId::Latest, - address: None, - topics: vec![], - limit: None, - }).unwrap(); - assert_eq!(logs.len(), 0); + let dummy_block = get_good_dummy_block(); + let client = get_test_client_with_blocks(vec![dummy_block.clone()]); + let logs = client + .logs(Filter { + from_block: BlockId::Earliest, + to_block: BlockId::Latest, + address: None, + topics: vec![], + limit: None, + }) + .unwrap(); + assert_eq!(logs.len(), 0); } #[test] fn returns_logs_with_limit() { - let dummy_block = get_good_dummy_block(); - let client = get_test_client_with_blocks(vec![dummy_block.clone()]); - let logs = client.logs(Filter { - from_block: BlockId::Earliest, - to_block: BlockId::Latest, - address: None, - topics: vec![], - limit: None, - }).unwrap(); - assert_eq!(logs.len(), 0); + let dummy_block = get_good_dummy_block(); + let client = get_test_client_with_blocks(vec![dummy_block.clone()]); + let logs = client + .logs(Filter { + from_block: BlockId::Earliest, + to_block: BlockId::Latest, + address: None, + topics: vec![], + limit: None, + }) + .unwrap(); + assert_eq!(logs.len(), 0); } #[test] fn returns_block_body() { - let dummy_block = get_good_dummy_block(); - let client = get_test_client_with_blocks(vec![dummy_block.clone()]); - let block = view!(BlockView, &dummy_block); - let body = client.block_body(BlockId::Hash(block.header().hash())).unwrap(); - let body = body.rlp(); - assert_eq!(body.item_count().unwrap(), 2); - assert_eq!(body.at(0).unwrap().as_raw()[..], block.rlp().at(1).as_raw()[..]); - assert_eq!(body.at(1).unwrap().as_raw()[..], block.rlp().at(2).as_raw()[..]); + let dummy_block = get_good_dummy_block(); + let client = get_test_client_with_blocks(vec![dummy_block.clone()]); + let block = view!(BlockView, &dummy_block); + let body = client + .block_body(BlockId::Hash(block.header().hash())) + .unwrap(); + let body = body.rlp(); + assert_eq!(body.item_count().unwrap(), 2); + assert_eq!( + body.at(0).unwrap().as_raw()[..], + block.rlp().at(1).as_raw()[..] + ); + assert_eq!( + body.at(1).unwrap().as_raw()[..], + block.rlp().at(2).as_raw()[..] + ); } #[test] fn imports_block_sequence() { - let client = generate_dummy_client(6); - let block = client.block_header(BlockId::Number(5)).unwrap(); + let client = generate_dummy_client(6); + let block = client.block_header(BlockId::Number(5)).unwrap(); - assert!(!block.into_inner().is_empty()); + assert!(!block.into_inner().is_empty()); } #[test] fn can_collect_garbage() { - let client = generate_dummy_client(100); - client.tick(true); - assert!(client.blockchain_cache_info().blocks < 100 * 1024); + let client = generate_dummy_client(100); + client.tick(true); + assert!(client.blockchain_cache_info().blocks < 100 * 1024); } #[test] fn can_generate_gas_price_median() { - let client = generate_dummy_client_with_data(3, 1, slice_into![1, 2, 3]); - assert_eq!(Some(&U256::from(2)), client.gas_price_corpus(3).median()); + let client = generate_dummy_client_with_data(3, 1, slice_into![1, 2, 3]); + assert_eq!(Some(&U256::from(2)), client.gas_price_corpus(3).median()); - let client = generate_dummy_client_with_data(4, 1, slice_into![1, 4, 3, 2]); - assert_eq!(Some(&U256::from(3)), client.gas_price_corpus(3).median()); + let client = generate_dummy_client_with_data(4, 1, slice_into![1, 4, 3, 2]); + assert_eq!(Some(&U256::from(3)), client.gas_price_corpus(3).median()); } #[test] fn can_generate_gas_price_histogram() { - let client = generate_dummy_client_with_data(20, 1, slice_into![6354,8593,6065,4842,7845,7002,689,4958,4250,6098,5804,4320,643,8895,2296,8589,7145,2000,2512,1408]); + let client = generate_dummy_client_with_data( + 20, + 1, + slice_into![ + 6354, 8593, 6065, 4842, 7845, 7002, 689, 4958, 4250, 6098, 5804, 4320, 643, 8895, 2296, + 8589, 7145, 2000, 2512, 1408 + ], + ); - let hist = client.gas_price_corpus(20).histogram(5).unwrap(); - let correct_hist = ::stats::Histogram { bucket_bounds: vec_into![643, 2294, 3945, 5596, 7247, 8898], counts: vec![4,2,4,6,4] }; - assert_eq!(hist, correct_hist); + let hist = client.gas_price_corpus(20).histogram(5).unwrap(); + let correct_hist = ::stats::Histogram { + bucket_bounds: vec_into![643, 2294, 3945, 5596, 7247, 8898], + counts: vec![4, 2, 4, 6, 4], + }; + assert_eq!(hist, correct_hist); } #[test] fn empty_gas_price_histogram() { - let client = generate_dummy_client_with_data(20, 0, slice_into![]); + let client = generate_dummy_client_with_data(20, 0, slice_into![]); - assert!(client.gas_price_corpus(20).histogram(5).is_none()); + assert!(client.gas_price_corpus(20).histogram(5).is_none()); } #[test] fn corpus_is_sorted() { - let client = generate_dummy_client_with_data(2, 1, slice_into![U256::from_str("11426908979").unwrap(), U256::from_str("50426908979").unwrap()]); - let corpus = client.gas_price_corpus(20); - assert!(corpus[0] < corpus[1]); + let client = generate_dummy_client_with_data( + 2, + 1, + slice_into![ + U256::from_str("11426908979").unwrap(), + U256::from_str("50426908979").unwrap() + ], + ); + let corpus = client.gas_price_corpus(20); + assert!(corpus[0] < corpus[1]); } #[test] fn can_handle_long_fork() { - let client = generate_dummy_client(1200); - for _ in 0..20 { - client.import_verified_blocks(); - } - assert_eq!(1200, client.chain_info().best_block_number); + let client = generate_dummy_client(1200); + for _ in 0..20 { + client.import_verified_blocks(); + } + assert_eq!(1200, client.chain_info().best_block_number); - push_blocks_to_client(&client, 45, 1201, 800); - push_blocks_to_client(&client, 49, 1201, 800); - push_blocks_to_client(&client, 53, 1201, 600); + push_blocks_to_client(&client, 45, 1201, 800); + push_blocks_to_client(&client, 49, 1201, 800); + push_blocks_to_client(&client, 53, 1201, 600); - for _ in 0..400 { - client.import_verified_blocks(); - } - assert_eq!(2000, client.chain_info().best_block_number); + for _ in 0..400 { + client.import_verified_blocks(); + } + assert_eq!(2000, client.chain_info().best_block_number); } #[test] fn can_mine() { - let dummy_blocks = get_good_dummy_block_seq(2); - let client = get_test_client_with_blocks(vec![dummy_blocks[0].clone()]); + let dummy_blocks = get_good_dummy_block_seq(2); + let client = get_test_client_with_blocks(vec![dummy_blocks[0].clone()]); - let b = client.prepare_open_block(Address::default(), (3141562.into(), 31415620.into()), vec![]).unwrap().close().unwrap(); + let b = client + .prepare_open_block( + Address::default(), + (3141562.into(), 31415620.into()), + vec![], + ) + .unwrap() + .close() + .unwrap(); - assert_eq!(*b.header.parent_hash(), view!(BlockView, &dummy_blocks[0]).header_view().hash()); + assert_eq!( + *b.header.parent_hash(), + view!(BlockView, &dummy_blocks[0]).header_view().hash() + ); } #[test] fn change_history_size() { - let db = test_helpers::new_db(); - let test_spec = Spec::new_null(); - let mut config = ClientConfig::default(); + let db = test_helpers::new_db(); + let test_spec = Spec::new_null(); + let mut config = ClientConfig::default(); - config.history = 2; - let address = Address::random(); - { - let client = Client::new( - ClientConfig::default(), - &test_spec, - db.clone(), - Arc::new(Miner::new_for_tests(&test_spec, None)), - IoChannel::disconnected() - ).unwrap(); + config.history = 2; + let address = Address::random(); + { + let client = Client::new( + ClientConfig::default(), + &test_spec, + db.clone(), + Arc::new(Miner::new_for_tests(&test_spec, None)), + IoChannel::disconnected(), + ) + .unwrap(); - for _ in 0..20 { - let mut b = client.prepare_open_block(Address::default(), (3141562.into(), 31415620.into()), vec![]).unwrap(); - b.block_mut().state_mut().add_balance(&address, &5.into(), CleanupMode::NoEmpty).unwrap(); - b.block_mut().state_mut().commit().unwrap(); - let b = b.close_and_lock().unwrap().seal(&*test_spec.engine, vec![]).unwrap(); - client.import_sealed_block(b).unwrap(); // account change is in the journal overlay - } - } - let mut config = ClientConfig::default(); - config.history = 10; - let client = Client::new( - config, - &test_spec, - db, - Arc::new(Miner::new_for_tests(&test_spec, None)), - IoChannel::disconnected(), - ).unwrap(); - assert_eq!(client.state().balance(&address).unwrap(), 100.into()); + for _ in 0..20 { + let mut b = client + .prepare_open_block( + Address::default(), + (3141562.into(), 31415620.into()), + vec![], + ) + .unwrap(); + b.block_mut() + .state_mut() + .add_balance(&address, &5.into(), CleanupMode::NoEmpty) + .unwrap(); + b.block_mut().state_mut().commit().unwrap(); + let b = b + .close_and_lock() + .unwrap() + .seal(&*test_spec.engine, vec![]) + .unwrap(); + client.import_sealed_block(b).unwrap(); // account change is in the journal overlay + } + } + let mut config = ClientConfig::default(); + config.history = 10; + let client = Client::new( + config, + &test_spec, + db, + Arc::new(Miner::new_for_tests(&test_spec, None)), + IoChannel::disconnected(), + ) + .unwrap(); + assert_eq!(client.state().balance(&address).unwrap(), 100.into()); } #[test] fn does_not_propagate_delayed_transactions() { - let key = KeyPair::from_secret(keccak("test").into()).unwrap(); - let secret = key.secret(); - let tx0 = PendingTransaction::new(Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 21000.into(), - action: Action::Call(Address::default()), - value: 0.into(), - data: Vec::new(), - }.sign(secret, None), Some(Condition::Number(2))); - let tx1 = PendingTransaction::new(Transaction { - nonce: 1.into(), - gas_price: 0.into(), - gas: 21000.into(), - action: Action::Call(Address::default()), - value: 0.into(), - data: Vec::new(), - }.sign(secret, None), None); - let client = generate_dummy_client(1); + let key = KeyPair::from_secret(keccak("test").into()).unwrap(); + let secret = key.secret(); + let tx0 = PendingTransaction::new( + Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 21000.into(), + action: Action::Call(Address::default()), + value: 0.into(), + data: Vec::new(), + } + .sign(secret, None), + Some(Condition::Number(2)), + ); + let tx1 = PendingTransaction::new( + Transaction { + nonce: 1.into(), + gas_price: 0.into(), + gas: 21000.into(), + action: Action::Call(Address::default()), + value: 0.into(), + data: Vec::new(), + } + .sign(secret, None), + None, + ); + let client = generate_dummy_client(1); - client.miner().import_own_transaction(&*client, tx0).unwrap(); - client.miner().import_own_transaction(&*client, tx1).unwrap(); - assert_eq!(0, client.transactions_to_propagate().len()); - assert_eq!(0, client.miner().ready_transactions(&*client, 10, PendingOrdering::Priority).len()); - push_blocks_to_client(&client, 53, 2, 2); - client.flush_queue(); - assert_eq!(2, client.transactions_to_propagate().len()); - assert_eq!(2, client.miner().ready_transactions(&*client, 10, PendingOrdering::Priority).len()); + client + .miner() + .import_own_transaction(&*client, tx0) + .unwrap(); + client + .miner() + .import_own_transaction(&*client, tx1) + .unwrap(); + assert_eq!(0, client.transactions_to_propagate().len()); + assert_eq!( + 0, + client + .miner() + .ready_transactions(&*client, 10, PendingOrdering::Priority) + .len() + ); + push_blocks_to_client(&client, 53, 2, 2); + client.flush_queue(); + assert_eq!(2, client.transactions_to_propagate().len()); + assert_eq!( + 2, + client + .miner() + .ready_transactions(&*client, 10, PendingOrdering::Priority) + .len() + ); } #[test] fn transaction_proof() { - use ::client::ProvingBlockChainClient; + use client::ProvingBlockChainClient; - let client = generate_dummy_client(0); - let address = Address::random(); - let test_spec = Spec::new_test(); - for _ in 0..20 { - let mut b = client.prepare_open_block(Address::default(), (3141562.into(), 31415620.into()), vec![]).unwrap(); - b.block_mut().state_mut().add_balance(&address, &5.into(), CleanupMode::NoEmpty).unwrap(); - b.block_mut().state_mut().commit().unwrap(); - let b = b.close_and_lock().unwrap().seal(&*test_spec.engine, vec![]).unwrap(); - client.import_sealed_block(b).unwrap(); // account change is in the journal overlay - } + let client = generate_dummy_client(0); + let address = Address::random(); + let test_spec = Spec::new_test(); + for _ in 0..20 { + let mut b = client + .prepare_open_block( + Address::default(), + (3141562.into(), 31415620.into()), + vec![], + ) + .unwrap(); + b.block_mut() + .state_mut() + .add_balance(&address, &5.into(), CleanupMode::NoEmpty) + .unwrap(); + b.block_mut().state_mut().commit().unwrap(); + let b = b + .close_and_lock() + .unwrap() + .seal(&*test_spec.engine, vec![]) + .unwrap(); + client.import_sealed_block(b).unwrap(); // account change is in the journal overlay + } - let transaction = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 21000.into(), - action: Action::Call(Address::default()), - value: 5.into(), - data: Vec::new(), - }.fake_sign(address); + let transaction = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 21000.into(), + action: Action::Call(Address::default()), + value: 5.into(), + data: Vec::new(), + } + .fake_sign(address); - let proof = client.prove_transaction(transaction.clone(), BlockId::Latest).unwrap().1; - let backend = state::backend::ProofCheck::new(&proof); + let proof = client + .prove_transaction(transaction.clone(), BlockId::Latest) + .unwrap() + .1; + let backend = state::backend::ProofCheck::new(&proof); - let mut factories = ::factory::Factories::default(); - factories.accountdb = ::account_db::Factory::Plain; // raw state values, no mangled keys. - let root = *client.best_block_header().state_root(); + let mut factories = ::factory::Factories::default(); + factories.accountdb = ::account_db::Factory::Plain; // raw state values, no mangled keys. + let root = *client.best_block_header().state_root(); - let machine = test_spec.engine.machine(); - let env_info = client.latest_env_info(); - let schedule = machine.schedule(env_info.number); - let mut state = State::from_existing(backend, root, 0.into(), factories.clone()).unwrap(); - Executive::new(&mut state, &env_info, &machine, &schedule) - .transact(&transaction, TransactOptions::with_no_tracing().dont_check_nonce()).unwrap(); + let machine = test_spec.engine.machine(); + let env_info = client.latest_env_info(); + let schedule = machine.schedule(env_info.number); + let mut state = State::from_existing(backend, root, 0.into(), factories.clone()).unwrap(); + Executive::new(&mut state, &env_info, &machine, &schedule) + .transact( + &transaction, + TransactOptions::with_no_tracing().dont_check_nonce(), + ) + .unwrap(); - assert_eq!(state.balance(&Address::default()).unwrap(), 5.into()); - assert_eq!(state.balance(&address).unwrap(), 95.into()); + assert_eq!(state.balance(&Address::default()).unwrap(), 5.into()); + assert_eq!(state.balance(&address).unwrap(), 95.into()); } #[test] fn reset_blockchain() { - let client = get_test_client_with_blocks(get_good_dummy_block_seq(19)); - // 19 + genesis block - assert!(client.block_header(BlockId::Number(20)).is_some()); - assert_eq!(client.block_header(BlockId::Number(20)).unwrap().hash(), client.best_block_header().hash()); + let client = get_test_client_with_blocks(get_good_dummy_block_seq(19)); + // 19 + genesis block + assert!(client.block_header(BlockId::Number(20)).is_some()); + assert_eq!( + client.block_header(BlockId::Number(20)).unwrap().hash(), + client.best_block_header().hash() + ); - assert!(client.reset(5).is_ok()); + assert!(client.reset(5).is_ok()); - client.chain().clear_cache(); + client.chain().clear_cache(); - assert!(client.block_header(BlockId::Number(20)).is_none()); - assert!(client.block_header(BlockId::Number(19)).is_none()); - assert!(client.block_header(BlockId::Number(18)).is_none()); - assert!(client.block_header(BlockId::Number(17)).is_none()); - assert!(client.block_header(BlockId::Number(16)).is_none()); + assert!(client.block_header(BlockId::Number(20)).is_none()); + assert!(client.block_header(BlockId::Number(19)).is_none()); + assert!(client.block_header(BlockId::Number(18)).is_none()); + assert!(client.block_header(BlockId::Number(17)).is_none()); + assert!(client.block_header(BlockId::Number(16)).is_none()); - assert!(client.block_header(BlockId::Number(15)).is_some()); + assert!(client.block_header(BlockId::Number(15)).is_some()); } #[test] fn import_export_hex() { - let client = get_test_client_with_blocks(get_good_dummy_block_seq(19)); - let block_rlps = (15..20) - .filter_map(|num| client.block(BlockId::Number(num))) - .map(|header| { - header.raw().to_hex() - }) - .collect::>(); + let client = get_test_client_with_blocks(get_good_dummy_block_seq(19)); + let block_rlps = (15..20) + .filter_map(|num| client.block(BlockId::Number(num))) + .map(|header| header.raw().to_hex()) + .collect::>(); - let mut out = Vec::new(); + let mut out = Vec::new(); - client.export_blocks( - Box::new(&mut out), - BlockId::Number(15), - BlockId::Number(20), - Some(DataFormat::Hex) - ).unwrap(); + client + .export_blocks( + Box::new(&mut out), + BlockId::Number(15), + BlockId::Number(20), + Some(DataFormat::Hex), + ) + .unwrap(); - let written = from_utf8(&out) - .unwrap() - .split("\n") - // last line is empty, ignore it. - .take(5) - .collect::>(); - assert_eq!(block_rlps, written); + let written = from_utf8(&out) + .unwrap() + .split("\n") + // last line is empty, ignore it. + .take(5) + .collect::>(); + assert_eq!(block_rlps, written); - assert!(client.reset(5).is_ok()); - client.chain().clear_cache(); + assert!(client.reset(5).is_ok()); + client.chain().clear_cache(); - assert!(client.block_header(BlockId::Number(20)).is_none()); - assert!(client.block_header(BlockId::Number(19)).is_none()); - assert!(client.block_header(BlockId::Number(18)).is_none()); - assert!(client.block_header(BlockId::Number(17)).is_none()); - assert!(client.block_header(BlockId::Number(16)).is_none()); + assert!(client.block_header(BlockId::Number(20)).is_none()); + assert!(client.block_header(BlockId::Number(19)).is_none()); + assert!(client.block_header(BlockId::Number(18)).is_none()); + assert!(client.block_header(BlockId::Number(17)).is_none()); + assert!(client.block_header(BlockId::Number(16)).is_none()); - client.import_blocks(Box::new(&*out), Some(DataFormat::Hex)).unwrap(); + client + .import_blocks(Box::new(&*out), Some(DataFormat::Hex)) + .unwrap(); - assert!(client.block_header(BlockId::Number(20)).is_some()); - assert!(client.block_header(BlockId::Number(19)).is_some()); - assert!(client.block_header(BlockId::Number(18)).is_some()); - assert!(client.block_header(BlockId::Number(17)).is_some()); - assert!(client.block_header(BlockId::Number(16)).is_some()); + assert!(client.block_header(BlockId::Number(20)).is_some()); + assert!(client.block_header(BlockId::Number(19)).is_some()); + assert!(client.block_header(BlockId::Number(18)).is_some()); + assert!(client.block_header(BlockId::Number(17)).is_some()); + assert!(client.block_header(BlockId::Number(16)).is_some()); } #[test] fn import_export_binary() { - let client = get_test_client_with_blocks(get_good_dummy_block_seq(19)); + let client = get_test_client_with_blocks(get_good_dummy_block_seq(19)); - let mut out = Vec::new(); + let mut out = Vec::new(); - client.export_blocks( - Box::new(&mut out), - BlockId::Number(15), - BlockId::Number(20), - Some(DataFormat::Binary) - ).unwrap(); + client + .export_blocks( + Box::new(&mut out), + BlockId::Number(15), + BlockId::Number(20), + Some(DataFormat::Binary), + ) + .unwrap(); - assert!(client.reset(5).is_ok()); - client.chain().clear_cache(); + assert!(client.reset(5).is_ok()); + client.chain().clear_cache(); - assert!(client.block_header(BlockId::Number(20)).is_none()); - assert!(client.block_header(BlockId::Number(19)).is_none()); - assert!(client.block_header(BlockId::Number(18)).is_none()); - assert!(client.block_header(BlockId::Number(17)).is_none()); - assert!(client.block_header(BlockId::Number(16)).is_none()); + assert!(client.block_header(BlockId::Number(20)).is_none()); + assert!(client.block_header(BlockId::Number(19)).is_none()); + assert!(client.block_header(BlockId::Number(18)).is_none()); + assert!(client.block_header(BlockId::Number(17)).is_none()); + assert!(client.block_header(BlockId::Number(16)).is_none()); - client.import_blocks(Box::new(&*out), Some(DataFormat::Binary)).unwrap(); + client + .import_blocks(Box::new(&*out), Some(DataFormat::Binary)) + .unwrap(); - assert!(client.block_header(BlockId::Number(19)).is_some()); - assert!(client.block_header(BlockId::Number(18)).is_some()); - assert!(client.block_header(BlockId::Number(20)).is_some()); - assert!(client.block_header(BlockId::Number(17)).is_some()); - assert!(client.block_header(BlockId::Number(16)).is_some()); + assert!(client.block_header(BlockId::Number(19)).is_some()); + assert!(client.block_header(BlockId::Number(18)).is_some()); + assert!(client.block_header(BlockId::Number(20)).is_some()); + assert!(client.block_header(BlockId::Number(17)).is_some()); + assert!(client.block_header(BlockId::Number(16)).is_some()); } diff --git a/ethcore/src/tests/evm.rs b/ethcore/src/tests/evm.rs index ec0b1dd8e..c00e99877 100644 --- a/ethcore/src/tests/evm.rs +++ b/ethcore/src/tests/evm.rs @@ -16,83 +16,85 @@ //! Tests of EVM integration with transaction execution. -use std::sync::Arc; -use hash::keccak; -use vm::{EnvInfo, ActionParams, ActionValue, CallType, ParamsType}; use evm::{Factory, VMType}; use executive::Executive; +use hash::keccak; use state::Substate; +use std::sync::Arc; use test_helpers::get_temp_state_with_factory; -use trace::{NoopVMTracer, NoopTracer}; +use trace::{NoopTracer, NoopVMTracer}; use types::transaction::SYSTEM_ADDRESS; +use vm::{ActionParams, ActionValue, CallType, EnvInfo, ParamsType}; use rustc_hex::FromHex; -use ethereum_types::{H256, Address}; +use ethereum_types::{Address, H256}; -evm_test!{test_blockhash_eip210: test_blockhash_eip210_int} +evm_test! {test_blockhash_eip210: test_blockhash_eip210_int} fn test_blockhash_eip210(factory: Factory) { - let get_prev_hash_code = Arc::new("600143034060205260206020f3".from_hex().unwrap()); // this returns previous block hash - let get_prev_hash_code_hash = keccak(get_prev_hash_code.as_ref()); - // This is same as DEFAULT_BLOCKHASH_CONTRACT except for metropolis transition block check removed. - let test_blockhash_contract = "73fffffffffffffffffffffffffffffffffffffffe33141561007a57600143036020526000356101006020510755600061010060205107141561005057600035610100610100602051050761010001555b6000620100006020510714156100755760003561010062010000602051050761020001555b61014a565b4360003512151561009057600060405260206040f35b610100600035430312156100b357610100600035075460605260206060f3610149565b62010000600035430312156100d157600061010060003507146100d4565b60005b156100f6576101006101006000350507610100015460805260206080f3610148565b630100000060003543031215610116576000620100006000350714610119565b60005b1561013c57610100620100006000350507610200015460a052602060a0f3610147565b600060c052602060c0f35b5b5b5b5b"; - let blockhash_contract_code = Arc::new(test_blockhash_contract.from_hex().unwrap()); - let blockhash_contract_code_hash = keccak(blockhash_contract_code.as_ref()); - let machine = ::ethereum::new_eip210_test_machine(); - let mut env_info = EnvInfo::default(); + let get_prev_hash_code = Arc::new("600143034060205260206020f3".from_hex().unwrap()); // this returns previous block hash + let get_prev_hash_code_hash = keccak(get_prev_hash_code.as_ref()); + // This is same as DEFAULT_BLOCKHASH_CONTRACT except for metropolis transition block check removed. + let test_blockhash_contract = "73fffffffffffffffffffffffffffffffffffffffe33141561007a57600143036020526000356101006020510755600061010060205107141561005057600035610100610100602051050761010001555b6000620100006020510714156100755760003561010062010000602051050761020001555b61014a565b4360003512151561009057600060405260206040f35b610100600035430312156100b357610100600035075460605260206060f3610149565b62010000600035430312156100d157600061010060003507146100d4565b60005b156100f6576101006101006000350507610100015460805260206080f3610148565b630100000060003543031215610116576000620100006000350714610119565b60005b1561013c57610100620100006000350507610200015460a052602060a0f3610147565b600060c052602060c0f35b5b5b5b5b"; + let blockhash_contract_code = Arc::new(test_blockhash_contract.from_hex().unwrap()); + let blockhash_contract_code_hash = keccak(blockhash_contract_code.as_ref()); + let machine = ::ethereum::new_eip210_test_machine(); + let mut env_info = EnvInfo::default(); - // populate state with 256 last hashes - let mut state = get_temp_state_with_factory(factory); - let contract_address: Address = 0xf0.into(); - state.init_code(&contract_address, (*blockhash_contract_code).clone()).unwrap(); - for i in 1 .. 257 { - env_info.number = i.into(); - let params = ActionParams { - code_address: contract_address.clone(), - address: contract_address, - sender: SYSTEM_ADDRESS.clone(), - origin: SYSTEM_ADDRESS.clone(), - gas: 100000.into(), - gas_price: 0.into(), - value: ActionValue::Transfer(0.into()), - code: Some(blockhash_contract_code.clone()), - code_hash: Some(blockhash_contract_code_hash), - data: Some(H256::from(i - 1).to_vec()), - call_type: CallType::Call, - params_type: ParamsType::Separate, - }; - let schedule = machine.schedule(env_info.number); - let mut ex = Executive::new(&mut state, &env_info, &machine, &schedule); - let mut substate = Substate::new(); - if let Err(e) = ex.call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) { - panic!("Encountered error on updating last hashes: {}", e); - } - } + // populate state with 256 last hashes + let mut state = get_temp_state_with_factory(factory); + let contract_address: Address = 0xf0.into(); + state + .init_code(&contract_address, (*blockhash_contract_code).clone()) + .unwrap(); + for i in 1..257 { + env_info.number = i.into(); + let params = ActionParams { + code_address: contract_address.clone(), + address: contract_address, + sender: SYSTEM_ADDRESS.clone(), + origin: SYSTEM_ADDRESS.clone(), + gas: 100000.into(), + gas_price: 0.into(), + value: ActionValue::Transfer(0.into()), + code: Some(blockhash_contract_code.clone()), + code_hash: Some(blockhash_contract_code_hash), + data: Some(H256::from(i - 1).to_vec()), + call_type: CallType::Call, + params_type: ParamsType::Separate, + }; + let schedule = machine.schedule(env_info.number); + let mut ex = Executive::new(&mut state, &env_info, &machine, &schedule); + let mut substate = Substate::new(); + if let Err(e) = ex.call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) { + panic!("Encountered error on updating last hashes: {}", e); + } + } - env_info.number = 256; - let params = ActionParams { - code_address: Address::new(), - address: Address::new(), - sender: Address::new(), - origin: Address::new(), - gas: 100000.into(), - gas_price: 0.into(), - value: ActionValue::Transfer(0.into()), - code: Some(get_prev_hash_code), - code_hash: Some(get_prev_hash_code_hash), - data: None, - call_type: CallType::Call, - params_type: ParamsType::Separate, - }; - let schedule = machine.schedule(env_info.number); - let mut ex = Executive::new(&mut state, &env_info, &machine, &schedule); - let mut substate = Substate::new(); - let res = ex.call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer); - let output = match res { - Ok(res) => H256::from(&res.return_data[..32]), - Err(e) => { - panic!("Encountered error on getting last hash: {}", e); - }, - }; - assert_eq!(output, 255.into()); + env_info.number = 256; + let params = ActionParams { + code_address: Address::new(), + address: Address::new(), + sender: Address::new(), + origin: Address::new(), + gas: 100000.into(), + gas_price: 0.into(), + value: ActionValue::Transfer(0.into()), + code: Some(get_prev_hash_code), + code_hash: Some(get_prev_hash_code_hash), + data: None, + call_type: CallType::Call, + params_type: ParamsType::Separate, + }; + let schedule = machine.schedule(env_info.number); + let mut ex = Executive::new(&mut state, &env_info, &machine, &schedule); + let mut substate = Substate::new(); + let res = ex.call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer); + let output = match res { + Ok(res) => H256::from(&res.return_data[..32]), + Err(e) => { + panic!("Encountered error on getting last hash: {}", e); + } + }; + assert_eq!(output, 255.into()); } diff --git a/ethcore/src/tests/mod.rs b/ethcore/src/tests/mod.rs index ee45c7385..d0f86efff 100644 --- a/ethcore/src/tests/mod.rs +++ b/ethcore/src/tests/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -mod client; mod blockchain; +mod client; mod evm; mod trace; diff --git a/ethcore/src/tests/trace.rs b/ethcore/src/tests/trace.rs index c14f13cf5..f5282ce5e 100644 --- a/ethcore/src/tests/trace.rs +++ b/ethcore/src/tests/trace.rs @@ -16,195 +16,232 @@ //! Client tests of tracing +use block::*; +use client::{BlockChainClient, Client, ClientConfig, *}; +use ethereum_types::{Address, U256}; use ethkey::KeyPair; use hash::keccak; -use block::*; -use ethereum_types::{U256, Address}; use io::*; -use spec::*; -use client::*; -use test_helpers::get_temp_state_db; -use client::{BlockChainClient, Client, ClientConfig}; -use std::sync::Arc; use miner::Miner; -use types::transaction::{Action, Transaction}; -use trace::{RewardType, LocalizedTrace}; -use trace::trace::Action::Reward; -use test_helpers; +use spec::*; +use std::sync::Arc; +use test_helpers::{self, get_temp_state_db}; +use trace::{trace::Action::Reward, LocalizedTrace, RewardType}; +use types::{ + header::Header, + transaction::{Action, Transaction}, + view, + views::BlockView, +}; use verification::queue::kind::blocks::Unverified; -use types::header::Header; -use types::view; -use types::views::BlockView; #[test] fn can_trace_block_and_uncle_reward() { - let db = test_helpers::new_db(); - let spec = Spec::new_test_with_reward(); - let engine = &*spec.engine; + let db = test_helpers::new_db(); + let spec = Spec::new_test_with_reward(); + let engine = &*spec.engine; - // Create client - let mut client_config = ClientConfig::default(); - client_config.tracing.enabled = true; - let client = Client::new( - client_config, - &spec, - db, - Arc::new(Miner::new_for_tests(&spec, None)), - IoChannel::disconnected(), - ).unwrap(); + // Create client + let mut client_config = ClientConfig::default(); + client_config.tracing.enabled = true; + let client = Client::new( + client_config, + &spec, + db, + Arc::new(Miner::new_for_tests(&spec, None)), + IoChannel::disconnected(), + ) + .unwrap(); - // Create test data: - // genesis - // | - // root_block - // | - // parent_block - // | - // block with transaction and uncle + // Create test data: + // genesis + // | + // root_block + // | + // parent_block + // | + // block with transaction and uncle - let genesis_header = spec.genesis_header(); - let genesis_gas = genesis_header.gas_limit().clone(); + let genesis_header = spec.genesis_header(); + let genesis_gas = genesis_header.gas_limit().clone(); - let mut db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let mut rolling_timestamp = 40; - let mut last_hashes = vec![]; - let mut last_header = genesis_header.clone(); - last_hashes.push(last_header.hash()); + let mut db = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let mut rolling_timestamp = 40; + let mut last_hashes = vec![]; + let mut last_header = genesis_header.clone(); + last_hashes.push(last_header.hash()); - let kp = KeyPair::from_secret_slice(&keccak("")).unwrap(); - let author = kp.address(); + let kp = KeyPair::from_secret_slice(&keccak("")).unwrap(); + let author = kp.address(); - // Add root block first - let mut root_block = OpenBlock::new( - engine, - Default::default(), - false, - db, - &last_header, - Arc::new(last_hashes.clone()), - author.clone(), - (3141562.into(), 31415620.into()), - vec![], - false, - None, - ).unwrap(); - rolling_timestamp += 10; - root_block.set_timestamp(rolling_timestamp); + // Add root block first + let mut root_block = OpenBlock::new( + engine, + Default::default(), + false, + db, + &last_header, + Arc::new(last_hashes.clone()), + author.clone(), + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + rolling_timestamp += 10; + root_block.set_timestamp(rolling_timestamp); - let root_block = root_block.close_and_lock().unwrap().seal(engine, vec![]).unwrap(); + let root_block = root_block + .close_and_lock() + .unwrap() + .seal(engine, vec![]) + .unwrap(); - if let Err(e) = client.import_block(Unverified::from_rlp(root_block.rlp_bytes()).unwrap()) { - panic!("error importing block which is valid by definition: {:?}", e); - } + if let Err(e) = client.import_block(Unverified::from_rlp(root_block.rlp_bytes()).unwrap()) { + panic!( + "error importing block which is valid by definition: {:?}", + e + ); + } - last_header = view!(BlockView, &root_block.rlp_bytes()).header(); - let root_header = last_header.clone(); - db = root_block.drain().state.drop().1; + last_header = view!(BlockView, &root_block.rlp_bytes()).header(); + let root_header = last_header.clone(); + db = root_block.drain().state.drop().1; - last_hashes.push(last_header.hash()); + last_hashes.push(last_header.hash()); - // Add parent block - let mut parent_block = OpenBlock::new( - engine, - Default::default(), - false, - db, - &last_header, - Arc::new(last_hashes.clone()), - author.clone(), - (3141562.into(), 31415620.into()), - vec![], - false, - None, - ).unwrap(); - rolling_timestamp += 10; - parent_block.set_timestamp(rolling_timestamp); + // Add parent block + let mut parent_block = OpenBlock::new( + engine, + Default::default(), + false, + db, + &last_header, + Arc::new(last_hashes.clone()), + author.clone(), + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + rolling_timestamp += 10; + parent_block.set_timestamp(rolling_timestamp); - let parent_block = parent_block.close_and_lock().unwrap().seal(engine, vec![]).unwrap(); + let parent_block = parent_block + .close_and_lock() + .unwrap() + .seal(engine, vec![]) + .unwrap(); - if let Err(e) = client.import_block(Unverified::from_rlp(parent_block.rlp_bytes()).unwrap()) { - panic!("error importing block which is valid by definition: {:?}", e); - } + if let Err(e) = client.import_block(Unverified::from_rlp(parent_block.rlp_bytes()).unwrap()) { + panic!( + "error importing block which is valid by definition: {:?}", + e + ); + } - last_header = view!(BlockView,&parent_block.rlp_bytes()).header(); - db = parent_block.drain().state.drop().1; + last_header = view!(BlockView, &parent_block.rlp_bytes()).header(); + db = parent_block.drain().state.drop().1; - last_hashes.push(last_header.hash()); + last_hashes.push(last_header.hash()); - // Add testing block with transaction and uncle - let mut block = OpenBlock::new( - engine, - Default::default(), - true, - db, - &last_header, - Arc::new(last_hashes.clone()), - author.clone(), - (3141562.into(), 31415620.into()), - vec![], - false, - None, - ).unwrap(); - rolling_timestamp += 10; - block.set_timestamp(rolling_timestamp); + // Add testing block with transaction and uncle + let mut block = OpenBlock::new( + engine, + Default::default(), + true, + db, + &last_header, + Arc::new(last_hashes.clone()), + author.clone(), + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + rolling_timestamp += 10; + block.set_timestamp(rolling_timestamp); - let mut n = 0; - for _ in 0..1 { - block.push_transaction(Transaction { - nonce: n.into(), - gas_price: 10000.into(), - gas: 100000.into(), - action: Action::Create, - data: vec![], - value: U256::zero(), - }.sign(kp.secret(), Some(spec.network_id())), None).unwrap(); - n += 1; - } + let mut n = 0; + for _ in 0..1 { + block + .push_transaction( + Transaction { + nonce: n.into(), + gas_price: 10000.into(), + gas: 100000.into(), + action: Action::Create, + data: vec![], + value: U256::zero(), + } + .sign(kp.secret(), Some(spec.network_id())), + None, + ) + .unwrap(); + n += 1; + } - let mut uncle = Header::new(); - let uncle_author: Address = "ef2d6d194084c2de36e0dabfce45d046b37d1106".into(); - uncle.set_author(uncle_author); - uncle.set_parent_hash(root_header.hash()); - uncle.set_gas_limit(genesis_gas); - uncle.set_number(root_header.number() + 1); - uncle.set_timestamp(rolling_timestamp); - block.push_uncle(uncle).unwrap(); + let mut uncle = Header::new(); + let uncle_author: Address = "ef2d6d194084c2de36e0dabfce45d046b37d1106".into(); + uncle.set_author(uncle_author); + uncle.set_parent_hash(root_header.hash()); + uncle.set_gas_limit(genesis_gas); + uncle.set_number(root_header.number() + 1); + uncle.set_timestamp(rolling_timestamp); + block.push_uncle(uncle).unwrap(); - let block = block.close_and_lock().unwrap().seal(engine, vec![]).unwrap(); + let block = block + .close_and_lock() + .unwrap() + .seal(engine, vec![]) + .unwrap(); - let res = client.import_block(Unverified::from_rlp(block.rlp_bytes()).unwrap()); - if res.is_err() { - panic!("error importing block: {:#?}", res.err().unwrap()); - } + let res = client.import_block(Unverified::from_rlp(block.rlp_bytes()).unwrap()); + if res.is_err() { + panic!("error importing block: {:#?}", res.err().unwrap()); + } - block.drain(); - client.flush_queue(); - client.import_verified_blocks(); + block.drain(); + client.flush_queue(); + client.import_verified_blocks(); - // Test0. Check overall filter - let filter = TraceFilter { - range: (BlockId::Number(1)..BlockId::Number(3)), - from_address: vec![], - to_address: vec![], - after: None, - count: None, - }; + // Test0. Check overall filter + let filter = TraceFilter { + range: (BlockId::Number(1)..BlockId::Number(3)), + from_address: vec![], + to_address: vec![], + after: None, + count: None, + }; - let traces = client.filter_traces(filter); - assert!(traces.is_some(), "Filtered traces should be present"); - let traces_vec = traces.unwrap(); - let block_reward_traces: Vec = traces_vec.clone().into_iter().filter(|trace| match (trace).action { - Reward(ref a) => a.reward_type == RewardType::Block, - _ => false, - }).collect(); - assert_eq!(block_reward_traces.len(), 3); - let uncle_reward_traces: Vec = traces_vec.clone().into_iter().filter(|trace| match (trace).action { - Reward(ref a) => a.reward_type == RewardType::Uncle, - _ => false, - }).collect(); - assert_eq!(uncle_reward_traces.len(), 1); + let traces = client.filter_traces(filter); + assert!(traces.is_some(), "Filtered traces should be present"); + let traces_vec = traces.unwrap(); + let block_reward_traces: Vec = traces_vec + .clone() + .into_iter() + .filter(|trace| match (trace).action { + Reward(ref a) => a.reward_type == RewardType::Block, + _ => false, + }) + .collect(); + assert_eq!(block_reward_traces.len(), 3); + let uncle_reward_traces: Vec = traces_vec + .clone() + .into_iter() + .filter(|trace| match (trace).action { + Reward(ref a) => a.reward_type == RewardType::Uncle, + _ => false, + }) + .collect(); + assert_eq!(uncle_reward_traces.len(), 1); - // Test1. Check block filter - let traces = client.block_traces(BlockId::Number(3)); - assert_eq!(traces.unwrap().len(), 3); + // Test1. Check block filter + let traces = client.block_traces(BlockId::Number(3)); + assert_eq!(traces.unwrap().len(), 3); } diff --git a/ethcore/src/trace/config.rs b/ethcore/src/trace/config.rs index 72fc17655..085127caf 100644 --- a/ethcore/src/trace/config.rs +++ b/ethcore/src/trace/config.rs @@ -19,21 +19,21 @@ /// Traces config. #[derive(Debug, PartialEq, Clone)] pub struct Config { - /// Indicates if tracing should be enabled or not. - /// If it's None, it will be automatically configured. - pub enabled: bool, - /// Preferef cache-size. - pub pref_cache_size: usize, - /// Max cache-size. - pub max_cache_size: usize, + /// Indicates if tracing should be enabled or not. + /// If it's None, it will be automatically configured. + pub enabled: bool, + /// Preferef cache-size. + pub pref_cache_size: usize, + /// Max cache-size. + pub max_cache_size: usize, } impl Default for Config { - fn default() -> Self { - Config { - enabled: false, - pref_cache_size: 15 * 1024 * 1024, - max_cache_size: 20 * 1024 * 1024, - } - } + fn default() -> Self { + Config { + enabled: false, + pref_cache_size: 15 * 1024 * 1024, + max_cache_size: 20 * 1024 * 1024, + } + } } diff --git a/ethcore/src/trace/db.rs b/ethcore/src/trace/db.rs index 8dbd38449..f5325e668 100644 --- a/ethcore/src/trace/db.rs +++ b/ethcore/src/trace/db.rs @@ -15,38 +15,38 @@ // along with Parity Ethereum. If not, see . //! Trace database. -use std::collections::HashMap; -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; use blockchain::BlockChainDB; -use db::cache_manager::CacheManager; -use db::{self, Key, Writable, Readable, CacheUpdatePolicy}; +use db::{self, cache_manager::CacheManager, CacheUpdatePolicy, Key, Readable, Writable}; use ethereum_types::{H256, H264}; use heapsize::HeapSizeOf; -use kvdb::{DBTransaction}; +use kvdb::DBTransaction; use parking_lot::RwLock; use types::BlockNumber; -use trace::{LocalizedTrace, Config, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras}; -use trace::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces}; +use trace::{ + flat::{FlatBlockTraces, FlatTrace, FlatTransactionTraces}, + Config, Database as TraceDatabase, DatabaseExtras, Filter, ImportRequest, LocalizedTrace, +}; const TRACE_DB_VER: &'static [u8] = b"1.0"; #[derive(Debug, Copy, Clone)] enum TraceDBIndex { - /// Block traces index. - BlockTraces = 0, + /// Block traces index. + BlockTraces = 0, } impl Key for H256 { - type Target = H264; + type Target = H264; - fn key(&self) -> H264 { - let mut result = H264::default(); - result[0] = TraceDBIndex::BlockTraces as u8; - result[1..33].copy_from_slice(self); - result - } + fn key(&self) -> H264 { + let mut result = H264::default(); + result[0] = TraceDBIndex::BlockTraces as u8; + result[1..33].copy_from_slice(self); + result + } } /// Database to store transaction execution trace. @@ -55,583 +55,704 @@ impl Key for H256 { /// in trace database. Each trace has information, which contracts have been /// touched, which have been created during the execution of transaction, and /// which calls failed. -pub struct TraceDB where T: DatabaseExtras { - /// cache - traces: RwLock>, - /// hashes of cached traces - cache_manager: RwLock>, - /// db - db: Arc, - /// tracing enabled - enabled: bool, - /// extras - extras: Arc, +pub struct TraceDB +where + T: DatabaseExtras, +{ + /// cache + traces: RwLock>, + /// hashes of cached traces + cache_manager: RwLock>, + /// db + db: Arc, + /// tracing enabled + enabled: bool, + /// extras + extras: Arc, } -impl TraceDB where T: DatabaseExtras { - /// Creates new instance of `TraceDB`. - pub fn new(config: Config, db: Arc, extras: Arc) -> Self { - let mut batch = DBTransaction::new(); - let genesis = extras.block_hash(0) - .expect("Genesis block is always inserted upon extras db creation qed"); - batch.write(db::COL_TRACE, &genesis, &FlatBlockTraces::default()); - batch.put(db::COL_TRACE, b"version", TRACE_DB_VER); - db.key_value().write(batch).expect("failed to update version"); +impl TraceDB +where + T: DatabaseExtras, +{ + /// Creates new instance of `TraceDB`. + pub fn new(config: Config, db: Arc, extras: Arc) -> Self { + let mut batch = DBTransaction::new(); + let genesis = extras + .block_hash(0) + .expect("Genesis block is always inserted upon extras db creation qed"); + batch.write(db::COL_TRACE, &genesis, &FlatBlockTraces::default()); + batch.put(db::COL_TRACE, b"version", TRACE_DB_VER); + db.key_value() + .write(batch) + .expect("failed to update version"); - TraceDB { - traces: RwLock::new(HashMap::new()), - cache_manager: RwLock::new(CacheManager::new(config.pref_cache_size, config.max_cache_size, 10 * 1024)), - db, - enabled: config.enabled, - extras: extras, - } - } + TraceDB { + traces: RwLock::new(HashMap::new()), + cache_manager: RwLock::new(CacheManager::new( + config.pref_cache_size, + config.max_cache_size, + 10 * 1024, + )), + db, + enabled: config.enabled, + extras: extras, + } + } - fn cache_size(&self) -> usize { - self.traces.read().heap_size_of_children() - } + fn cache_size(&self) -> usize { + self.traces.read().heap_size_of_children() + } - /// Let the cache system know that a cacheable item has been used. - fn note_trace_used(&self, trace_id: H256) { - let mut cache_manager = self.cache_manager.write(); - cache_manager.note_used(trace_id); - } + /// Let the cache system know that a cacheable item has been used. + fn note_trace_used(&self, trace_id: H256) { + let mut cache_manager = self.cache_manager.write(); + cache_manager.note_used(trace_id); + } - /// Ticks our cache system and throws out any old data. - pub fn collect_garbage(&self) { - let current_size = self.cache_size(); + /// Ticks our cache system and throws out any old data. + pub fn collect_garbage(&self) { + let current_size = self.cache_size(); - let mut traces = self.traces.write(); - let mut cache_manager = self.cache_manager.write(); + let mut traces = self.traces.write(); + let mut cache_manager = self.cache_manager.write(); - cache_manager.collect_garbage(current_size, | ids | { - for id in &ids { - traces.remove(id); - } - traces.shrink_to_fit(); + cache_manager.collect_garbage(current_size, |ids| { + for id in &ids { + traces.remove(id); + } + traces.shrink_to_fit(); - traces.heap_size_of_children() - }); - } + traces.heap_size_of_children() + }); + } - /// Returns traces for block with hash. - fn traces(&self, block_hash: &H256) -> Option { - let result = self.db.key_value().read_with_cache(db::COL_TRACE, &self.traces, block_hash); - self.note_trace_used(*block_hash); - result - } + /// Returns traces for block with hash. + fn traces(&self, block_hash: &H256) -> Option { + let result = self + .db + .key_value() + .read_with_cache(db::COL_TRACE, &self.traces, block_hash); + self.note_trace_used(*block_hash); + result + } - /// Returns vector of transaction traces for given block. - fn transactions_traces(&self, block_hash: &H256) -> Option> { - self.traces(block_hash).map(Into::into) - } + /// Returns vector of transaction traces for given block. + fn transactions_traces(&self, block_hash: &H256) -> Option> { + self.traces(block_hash).map(Into::into) + } - fn matching_block_traces( - &self, - filter: &Filter, - traces: FlatBlockTraces, - block_hash: H256, - block_number: BlockNumber - ) -> Vec { - let tx_traces: Vec = traces.into(); - tx_traces.into_iter() - .enumerate() - .flat_map(|(tx_number, tx_trace)| { - self.matching_transaction_traces(filter, tx_trace, block_hash.clone(), block_number, tx_number) - }) - .collect() - } + fn matching_block_traces( + &self, + filter: &Filter, + traces: FlatBlockTraces, + block_hash: H256, + block_number: BlockNumber, + ) -> Vec { + let tx_traces: Vec = traces.into(); + tx_traces + .into_iter() + .enumerate() + .flat_map(|(tx_number, tx_trace)| { + self.matching_transaction_traces( + filter, + tx_trace, + block_hash.clone(), + block_number, + tx_number, + ) + }) + .collect() + } - fn matching_transaction_traces( - &self, - filter: &Filter, - traces: FlatTransactionTraces, - block_hash: H256, - block_number: BlockNumber, - tx_number: usize - ) -> Vec { - let (trace_tx_number, trace_tx_hash) = match self.extras.transaction_hash(block_number, tx_number) { - Some(hash) => (Some(tx_number), Some(hash.clone())), - //None means trace without transaction (reward) - None => (None, None), - }; + fn matching_transaction_traces( + &self, + filter: &Filter, + traces: FlatTransactionTraces, + block_hash: H256, + block_number: BlockNumber, + tx_number: usize, + ) -> Vec { + let (trace_tx_number, trace_tx_hash) = + match self.extras.transaction_hash(block_number, tx_number) { + Some(hash) => (Some(tx_number), Some(hash.clone())), + //None means trace without transaction (reward) + None => (None, None), + }; - let flat_traces: Vec = traces.into(); - flat_traces.into_iter() - .filter_map(|trace| { - match filter.matches(&trace) { - true => Some(LocalizedTrace { - action: trace.action, - result: trace.result, - subtraces: trace.subtraces, - trace_address: trace.trace_address.into_iter().collect(), - transaction_number: trace_tx_number, - transaction_hash: trace_tx_hash, - block_number: block_number, - block_hash: block_hash - }), - false => None - } - }) - .collect() - } + let flat_traces: Vec = traces.into(); + flat_traces + .into_iter() + .filter_map(|trace| match filter.matches(&trace) { + true => Some(LocalizedTrace { + action: trace.action, + result: trace.result, + subtraces: trace.subtraces, + trace_address: trace.trace_address.into_iter().collect(), + transaction_number: trace_tx_number, + transaction_hash: trace_tx_hash, + block_number: block_number, + block_hash: block_hash, + }), + false => None, + }) + .collect() + } } -impl TraceDatabase for TraceDB where T: DatabaseExtras { - fn tracing_enabled(&self) -> bool { - self.enabled - } +impl TraceDatabase for TraceDB +where + T: DatabaseExtras, +{ + fn tracing_enabled(&self) -> bool { + self.enabled + } - /// Traces of import request's enacted blocks are expected to be already in database - /// or to be the currently inserted trace. - fn import(&self, batch: &mut DBTransaction, request: ImportRequest) { - // valid (canon): retracted 0, enacted 1 => false, true, - // valid (branch): retracted 0, enacted 0 => false, false, - // valid (bbcc): retracted 1, enacted 1 => true, true, - // invalid: retracted 1, enacted 0 => true, false, - let ret = request.retracted != 0; - let ena = !request.enacted.is_empty(); - assert!(!(ret && !ena)); - // fast return if tracing is disabled - if !self.tracing_enabled() { - return; - } + /// Traces of import request's enacted blocks are expected to be already in database + /// or to be the currently inserted trace. + fn import(&self, batch: &mut DBTransaction, request: ImportRequest) { + // valid (canon): retracted 0, enacted 1 => false, true, + // valid (branch): retracted 0, enacted 0 => false, false, + // valid (bbcc): retracted 1, enacted 1 => true, true, + // invalid: retracted 1, enacted 0 => true, false, + let ret = request.retracted != 0; + let ena = !request.enacted.is_empty(); + assert!(!(ret && !ena)); + // fast return if tracing is disabled + if !self.tracing_enabled() { + return; + } - // now let's rebuild the blooms - if !request.enacted.is_empty() { - let range_start = request.block_number + 1 - request.enacted.len() as u64; - let enacted_blooms: Vec<_> = request.enacted - .iter() - // all traces are expected to be found here. That's why `expect` has been used - // instead of `filter_map`. If some traces haven't been found, it meens that - // traces database is corrupted or incomplete. - .map(|block_hash| if block_hash == &request.block_hash { - request.traces.bloom() - } else { - self.traces(block_hash).expect("Traces database is incomplete.").bloom() - }) - .collect(); + // now let's rebuild the blooms + if !request.enacted.is_empty() { + let range_start = request.block_number + 1 - request.enacted.len() as u64; + let enacted_blooms: Vec<_> = request + .enacted + .iter() + // all traces are expected to be found here. That's why `expect` has been used + // instead of `filter_map`. If some traces haven't been found, it meens that + // traces database is corrupted or incomplete. + .map(|block_hash| { + if block_hash == &request.block_hash { + request.traces.bloom() + } else { + self.traces(block_hash) + .expect("Traces database is incomplete.") + .bloom() + } + }) + .collect(); - self.db.trace_blooms() - .insert_blooms(range_start, enacted_blooms.iter()) - .expect("Low level database error. Some issue with disk?"); - } + self.db + .trace_blooms() + .insert_blooms(range_start, enacted_blooms.iter()) + .expect("Low level database error. Some issue with disk?"); + } - // insert new block traces into the cache and the database - { - let mut traces = self.traces.write(); - // it's important to use overwrite here, - // cause this value might be queried by hash later - batch.write_with_cache(db::COL_TRACE, &mut *traces, request.block_hash, request.traces, CacheUpdatePolicy::Overwrite); - // note_used must be called after locking traces to avoid cache/traces deadlock on garbage collection - self.note_trace_used(request.block_hash); - } - } + // insert new block traces into the cache and the database + { + let mut traces = self.traces.write(); + // it's important to use overwrite here, + // cause this value might be queried by hash later + batch.write_with_cache( + db::COL_TRACE, + &mut *traces, + request.block_hash, + request.traces, + CacheUpdatePolicy::Overwrite, + ); + // note_used must be called after locking traces to avoid cache/traces deadlock on garbage collection + self.note_trace_used(request.block_hash); + } + } - fn trace(&self, block_number: BlockNumber, tx_position: usize, trace_position: Vec) -> Option { - self.extras.block_hash(block_number) - .and_then(|block_hash| self.transactions_traces(&block_hash) - .and_then(|traces| traces.into_iter().nth(tx_position)) - .map(Into::>::into) - // this may and should be optimized - .and_then(|traces| traces.into_iter().find(|trace| trace.trace_address == trace_position)) - .map(|trace| { - let tx_hash = self.extras.transaction_hash(block_number, tx_position) - .expect("Expected to find transaction hash. Database is probably corrupted"); + fn trace( + &self, + block_number: BlockNumber, + tx_position: usize, + trace_position: Vec, + ) -> Option { + self.extras.block_hash(block_number).and_then(|block_hash| { + self.transactions_traces(&block_hash) + .and_then(|traces| traces.into_iter().nth(tx_position)) + .map(Into::>::into) + // this may and should be optimized + .and_then(|traces| { + traces + .into_iter() + .find(|trace| trace.trace_address == trace_position) + }) + .map(|trace| { + let tx_hash = self + .extras + .transaction_hash(block_number, tx_position) + .expect( + "Expected to find transaction hash. Database is probably corrupted", + ); - LocalizedTrace { - action: trace.action, - result: trace.result, - subtraces: trace.subtraces, - trace_address: trace.trace_address.into_iter().collect(), - transaction_number: Some(tx_position), - transaction_hash: Some(tx_hash), - block_number: block_number, - block_hash: block_hash, - } - }) - ) - } + LocalizedTrace { + action: trace.action, + result: trace.result, + subtraces: trace.subtraces, + trace_address: trace.trace_address.into_iter().collect(), + transaction_number: Some(tx_position), + transaction_hash: Some(tx_hash), + block_number: block_number, + block_hash: block_hash, + } + }) + }) + } - fn transaction_traces(&self, block_number: BlockNumber, tx_position: usize) -> Option> { - self.extras.block_hash(block_number) - .and_then(|block_hash| self.transactions_traces(&block_hash) - .and_then(|traces| traces.into_iter().nth(tx_position)) - .map(Into::>::into) - .map(|traces| { - let tx_hash = self.extras.transaction_hash(block_number, tx_position) - .expect("Expected to find transaction hash. Database is probably corrupted"); + fn transaction_traces( + &self, + block_number: BlockNumber, + tx_position: usize, + ) -> Option> { + self.extras.block_hash(block_number).and_then(|block_hash| { + self.transactions_traces(&block_hash) + .and_then(|traces| traces.into_iter().nth(tx_position)) + .map(Into::>::into) + .map(|traces| { + let tx_hash = self + .extras + .transaction_hash(block_number, tx_position) + .expect( + "Expected to find transaction hash. Database is probably corrupted", + ); - traces.into_iter() - .map(|trace| LocalizedTrace { - action: trace.action, - result: trace.result, - subtraces: trace.subtraces, - trace_address: trace.trace_address.into_iter().collect(), - transaction_number: Some(tx_position), - transaction_hash: Some(tx_hash.clone()), - block_number: block_number, - block_hash: block_hash - }) - .collect() - }) - ) - } + traces + .into_iter() + .map(|trace| LocalizedTrace { + action: trace.action, + result: trace.result, + subtraces: trace.subtraces, + trace_address: trace.trace_address.into_iter().collect(), + transaction_number: Some(tx_position), + transaction_hash: Some(tx_hash.clone()), + block_number: block_number, + block_hash: block_hash, + }) + .collect() + }) + }) + } - fn block_traces(&self, block_number: BlockNumber) -> Option> { - self.extras.block_hash(block_number) - .and_then(|block_hash| self.transactions_traces(&block_hash) - .map(|traces| { - traces.into_iter() - .map(Into::>::into) - .enumerate() - .flat_map(|(tx_position, traces)| { - let (trace_tx_number, trace_tx_hash) = match self.extras.transaction_hash(block_number, tx_position) { - Some(hash) => (Some(tx_position), Some(hash.clone())), - //None means trace without transaction (reward) - None => (None, None), - }; + fn block_traces(&self, block_number: BlockNumber) -> Option> { + self.extras.block_hash(block_number).and_then(|block_hash| { + self.transactions_traces(&block_hash).map(|traces| { + traces + .into_iter() + .map(Into::>::into) + .enumerate() + .flat_map(|(tx_position, traces)| { + let (trace_tx_number, trace_tx_hash) = + match self.extras.transaction_hash(block_number, tx_position) { + Some(hash) => (Some(tx_position), Some(hash.clone())), + //None means trace without transaction (reward) + None => (None, None), + }; - traces.into_iter() - .map(|trace| LocalizedTrace { - action: trace.action, - result: trace.result, - subtraces: trace.subtraces, - trace_address: trace.trace_address.into_iter().collect(), - transaction_number: trace_tx_number, - transaction_hash: trace_tx_hash, - block_number: block_number, - block_hash: block_hash, - }) - .collect::>() - }) - .collect::>() - }) - ) - } + traces + .into_iter() + .map(|trace| LocalizedTrace { + action: trace.action, + result: trace.result, + subtraces: trace.subtraces, + trace_address: trace.trace_address.into_iter().collect(), + transaction_number: trace_tx_number, + transaction_hash: trace_tx_hash, + block_number: block_number, + block_hash: block_hash, + }) + .collect::>() + }) + .collect::>() + }) + }) + } - fn filter(&self, filter: &Filter) -> Vec { - let possibilities = filter.bloom_possibilities(); - let numbers = self.db.trace_blooms() - .filter(filter.range.start as u64, filter.range.end as u64, &possibilities) - .expect("Low level database error. Some issue with disk?"); + fn filter(&self, filter: &Filter) -> Vec { + let possibilities = filter.bloom_possibilities(); + let numbers = self + .db + .trace_blooms() + .filter( + filter.range.start as u64, + filter.range.end as u64, + &possibilities, + ) + .expect("Low level database error. Some issue with disk?"); - numbers.into_iter() - .flat_map(|n| { - let number = n as BlockNumber; - let hash = self.extras.block_hash(number) - .expect("Expected to find block hash. Extras db is probably corrupted"); - let traces = self.traces(&hash) - .expect("Expected to find a trace. Db is probably corrupted."); - self.matching_block_traces(filter, traces, hash, number) - }) - .collect() - } + numbers + .into_iter() + .flat_map(|n| { + let number = n as BlockNumber; + let hash = self + .extras + .block_hash(number) + .expect("Expected to find block hash. Extras db is probably corrupted"); + let traces = self + .traces(&hash) + .expect("Expected to find a trace. Db is probably corrupted."); + self.matching_block_traces(filter, traces, hash, number) + }) + .collect() + } } #[cfg(test)] mod tests { - use std::collections::HashMap; - use std::sync::Arc; - use ethereum_types::{H256, U256, Address}; - use kvdb::{DBTransaction}; - use types::BlockNumber; - use trace::{Config, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest}; - use trace::{Filter, LocalizedTrace, AddressesFilter, TraceError}; - use trace::trace::{Call, Action, Res}; - use trace::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces}; - use evm::CallType; - use test_helpers::new_db; + use ethereum_types::{Address, H256, U256}; + use evm::CallType; + use kvdb::DBTransaction; + use std::{collections::HashMap, sync::Arc}; + use test_helpers::new_db; + use trace::{ + flat::{FlatBlockTraces, FlatTrace, FlatTransactionTraces}, + trace::{Action, Call, Res}, + AddressesFilter, Config, Database as TraceDatabase, DatabaseExtras, Filter, ImportRequest, + LocalizedTrace, TraceDB, TraceError, + }; + use types::BlockNumber; - struct NoopExtras; + struct NoopExtras; - impl DatabaseExtras for NoopExtras { - fn block_hash(&self, block_number: BlockNumber) -> Option { - if block_number == 0 { - Some(H256::default()) - } else { - unimplemented!() - } - } + impl DatabaseExtras for NoopExtras { + fn block_hash(&self, block_number: BlockNumber) -> Option { + if block_number == 0 { + Some(H256::default()) + } else { + unimplemented!() + } + } - fn transaction_hash(&self, _block_number: BlockNumber, _tx_position: usize) -> Option { - unimplemented!(); - } - } + fn transaction_hash( + &self, + _block_number: BlockNumber, + _tx_position: usize, + ) -> Option { + unimplemented!(); + } + } - #[derive(Clone)] - struct Extras { - block_hashes: HashMap, - transaction_hashes: HashMap>, - } + #[derive(Clone)] + struct Extras { + block_hashes: HashMap, + transaction_hashes: HashMap>, + } - impl Default for Extras { - fn default() -> Self { - Extras { - block_hashes: HashMap::new(), - transaction_hashes: HashMap::new(), - } - } - } + impl Default for Extras { + fn default() -> Self { + Extras { + block_hashes: HashMap::new(), + transaction_hashes: HashMap::new(), + } + } + } - impl DatabaseExtras for Extras { - fn block_hash(&self, block_number: BlockNumber) -> Option { - self.block_hashes.get(&block_number).cloned() - } + impl DatabaseExtras for Extras { + fn block_hash(&self, block_number: BlockNumber) -> Option { + self.block_hashes.get(&block_number).cloned() + } - fn transaction_hash(&self, block_number: BlockNumber, tx_position: usize) -> Option { - self.transaction_hashes.get(&block_number) - .and_then(|hashes| hashes.iter().cloned().nth(tx_position)) - } - } + fn transaction_hash(&self, block_number: BlockNumber, tx_position: usize) -> Option { + self.transaction_hashes + .get(&block_number) + .and_then(|hashes| hashes.iter().cloned().nth(tx_position)) + } + } - #[test] - fn test_reopening_db_with_tracing_off() { - let db = new_db(); - let mut config = Config::default(); + #[test] + fn test_reopening_db_with_tracing_off() { + let db = new_db(); + let mut config = Config::default(); - // set autotracing - config.enabled = false; + // set autotracing + config.enabled = false; - { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)); - assert_eq!(tracedb.tracing_enabled(), false); - } - } + { + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)); + assert_eq!(tracedb.tracing_enabled(), false); + } + } - #[test] - fn test_reopening_db_with_tracing_on() { - let db = new_db(); - let mut config = Config::default(); + #[test] + fn test_reopening_db_with_tracing_on() { + let db = new_db(); + let mut config = Config::default(); - // set tracing on - config.enabled = true; + // set tracing on + config.enabled = true; - { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)); - assert_eq!(tracedb.tracing_enabled(), true); - } - } + { + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)); + assert_eq!(tracedb.tracing_enabled(), true); + } + } - fn create_simple_import_request(block_number: BlockNumber, block_hash: H256) -> ImportRequest { - ImportRequest { - traces: FlatBlockTraces::from(vec![FlatTransactionTraces::from(vec![FlatTrace { - trace_address: Default::default(), - subtraces: 0, - action: Action::Call(Call { - from: 1.into(), - to: 2.into(), - value: 3.into(), - gas: 4.into(), - input: vec![], - call_type: CallType::Call, - }), - result: Res::FailedCall(TraceError::OutOfGas), - }])]), - block_hash: block_hash.clone(), - block_number: block_number, - enacted: vec![block_hash], - retracted: 0, - } - } + fn create_simple_import_request(block_number: BlockNumber, block_hash: H256) -> ImportRequest { + ImportRequest { + traces: FlatBlockTraces::from(vec![FlatTransactionTraces::from(vec![FlatTrace { + trace_address: Default::default(), + subtraces: 0, + action: Action::Call(Call { + from: 1.into(), + to: 2.into(), + value: 3.into(), + gas: 4.into(), + input: vec![], + call_type: CallType::Call, + }), + result: Res::FailedCall(TraceError::OutOfGas), + }])]), + block_hash: block_hash.clone(), + block_number: block_number, + enacted: vec![block_hash], + retracted: 0, + } + } - fn create_noncanon_import_request(block_number: BlockNumber, block_hash: H256) -> ImportRequest { - ImportRequest { - traces: FlatBlockTraces::from(vec![FlatTransactionTraces::from(vec![FlatTrace { - trace_address: Default::default(), - subtraces: 0, - action: Action::Call(Call { - from: 1.into(), - to: 2.into(), - value: 3.into(), - gas: 4.into(), - input: vec![], - call_type: CallType::Call, - }), - result: Res::FailedCall(TraceError::OutOfGas), - }])]), - block_hash: block_hash.clone(), - block_number: block_number, - enacted: vec![], - retracted: 0, - } - } + fn create_noncanon_import_request( + block_number: BlockNumber, + block_hash: H256, + ) -> ImportRequest { + ImportRequest { + traces: FlatBlockTraces::from(vec![FlatTransactionTraces::from(vec![FlatTrace { + trace_address: Default::default(), + subtraces: 0, + action: Action::Call(Call { + from: 1.into(), + to: 2.into(), + value: 3.into(), + gas: 4.into(), + input: vec![], + call_type: CallType::Call, + }), + result: Res::FailedCall(TraceError::OutOfGas), + }])]), + block_hash: block_hash.clone(), + block_number: block_number, + enacted: vec![], + retracted: 0, + } + } - fn create_simple_localized_trace(block_number: BlockNumber, block_hash: H256, tx_hash: H256) -> LocalizedTrace { - LocalizedTrace { - action: Action::Call(Call { - from: Address::from(1), - to: Address::from(2), - value: U256::from(3), - gas: U256::from(4), - input: vec![], - call_type: CallType::Call, - }), - result: Res::FailedCall(TraceError::OutOfGas), - trace_address: vec![], - subtraces: 0, - transaction_number: Some(0), - transaction_hash: Some(tx_hash), - block_number: block_number, - block_hash: block_hash, - } - } + fn create_simple_localized_trace( + block_number: BlockNumber, + block_hash: H256, + tx_hash: H256, + ) -> LocalizedTrace { + LocalizedTrace { + action: Action::Call(Call { + from: Address::from(1), + to: Address::from(2), + value: U256::from(3), + gas: U256::from(4), + input: vec![], + call_type: CallType::Call, + }), + result: Res::FailedCall(TraceError::OutOfGas), + trace_address: vec![], + subtraces: 0, + transaction_number: Some(0), + transaction_hash: Some(tx_hash), + block_number: block_number, + block_hash: block_hash, + } + } - #[test] - fn test_import_non_canon_traces() { - let db = new_db(); - let mut config = Config::default(); - config.enabled = true; - let block_0 = H256::from(0xa1); - let block_1 = H256::from(0xa2); - let tx_0 = H256::from(0xff); - let tx_1 = H256::from(0xaf); + #[test] + fn test_import_non_canon_traces() { + let db = new_db(); + let mut config = Config::default(); + config.enabled = true; + let block_0 = H256::from(0xa1); + let block_1 = H256::from(0xa2); + let tx_0 = H256::from(0xff); + let tx_1 = H256::from(0xaf); - let mut extras = Extras::default(); - extras.block_hashes.insert(0, block_0.clone()); - extras.block_hashes.insert(1, block_1.clone()); - extras.transaction_hashes.insert(0, vec![tx_0.clone()]); - extras.transaction_hashes.insert(1, vec![tx_1.clone()]); + let mut extras = Extras::default(); + extras.block_hashes.insert(0, block_0.clone()); + extras.block_hashes.insert(1, block_1.clone()); + extras.transaction_hashes.insert(0, vec![tx_0.clone()]); + extras.transaction_hashes.insert(1, vec![tx_1.clone()]); - let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras)); + let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras)); - // import block 0 - let request = create_noncanon_import_request(0, block_0.clone()); - let mut batch = DBTransaction::new(); - tracedb.import(&mut batch, request); - db.key_value().write(batch).unwrap(); + // import block 0 + let request = create_noncanon_import_request(0, block_0.clone()); + let mut batch = DBTransaction::new(); + tracedb.import(&mut batch, request); + db.key_value().write(batch).unwrap(); - assert!(tracedb.traces(&block_0).is_some(), "Traces should be available even if block is non-canon."); - } + assert!( + tracedb.traces(&block_0).is_some(), + "Traces should be available even if block is non-canon." + ); + } - #[test] - fn test_import() { - let db = new_db(); - let mut config = Config::default(); - config.enabled = true; - let block_1 = H256::from(0xa1); - let block_2 = H256::from(0xa2); - let tx_1 = H256::from(0xff); - let tx_2 = H256::from(0xaf); + #[test] + fn test_import() { + let db = new_db(); + let mut config = Config::default(); + config.enabled = true; + let block_1 = H256::from(0xa1); + let block_2 = H256::from(0xa2); + let tx_1 = H256::from(0xff); + let tx_2 = H256::from(0xaf); - let mut extras = Extras::default(); - extras.block_hashes.insert(0, H256::default()); + let mut extras = Extras::default(); + extras.block_hashes.insert(0, H256::default()); - extras.block_hashes.insert(1, block_1.clone()); - extras.block_hashes.insert(2, block_2.clone()); - extras.transaction_hashes.insert(1, vec![tx_1.clone()]); - extras.transaction_hashes.insert(2, vec![tx_2.clone()]); + extras.block_hashes.insert(1, block_1.clone()); + extras.block_hashes.insert(2, block_2.clone()); + extras.transaction_hashes.insert(1, vec![tx_1.clone()]); + extras.transaction_hashes.insert(2, vec![tx_2.clone()]); - let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras)); + let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras)); - // import block 1 - let request = create_simple_import_request(1, block_1.clone()); - let mut batch = DBTransaction::new(); - tracedb.import(&mut batch, request); - db.key_value().write(batch).unwrap(); + // import block 1 + let request = create_simple_import_request(1, block_1.clone()); + let mut batch = DBTransaction::new(); + tracedb.import(&mut batch, request); + db.key_value().write(batch).unwrap(); - let filter = Filter { - range: (1..1), - from_address: AddressesFilter::from(vec![Address::from(1)]), - to_address: AddressesFilter::from(vec![]), - }; + let filter = Filter { + range: (1..1), + from_address: AddressesFilter::from(vec![Address::from(1)]), + to_address: AddressesFilter::from(vec![]), + }; - let traces = tracedb.filter(&filter); - assert_eq!(traces.len(), 1); - assert_eq!(traces[0], create_simple_localized_trace(1, block_1.clone(), tx_1.clone())); + let traces = tracedb.filter(&filter); + assert_eq!(traces.len(), 1); + assert_eq!( + traces[0], + create_simple_localized_trace(1, block_1.clone(), tx_1.clone()) + ); - // import block 2 - let request = create_simple_import_request(2, block_2.clone()); - let mut batch = DBTransaction::new(); - tracedb.import(&mut batch, request); - db.key_value().write(batch).unwrap(); + // import block 2 + let request = create_simple_import_request(2, block_2.clone()); + let mut batch = DBTransaction::new(); + tracedb.import(&mut batch, request); + db.key_value().write(batch).unwrap(); - let filter = Filter { - range: (1..2), - from_address: AddressesFilter::from(vec![Address::from(1)]), - to_address: AddressesFilter::from(vec![]), - }; + let filter = Filter { + range: (1..2), + from_address: AddressesFilter::from(vec![Address::from(1)]), + to_address: AddressesFilter::from(vec![]), + }; - let traces = tracedb.filter(&filter); - assert_eq!(traces.len(), 2); - assert_eq!(traces[0], create_simple_localized_trace(1, block_1.clone(), tx_1.clone())); - assert_eq!(traces[1], create_simple_localized_trace(2, block_2.clone(), tx_2.clone())); + let traces = tracedb.filter(&filter); + assert_eq!(traces.len(), 2); + assert_eq!( + traces[0], + create_simple_localized_trace(1, block_1.clone(), tx_1.clone()) + ); + assert_eq!( + traces[1], + create_simple_localized_trace(2, block_2.clone(), tx_2.clone()) + ); - assert!(tracedb.block_traces(0).is_some(), "Genesis trace should be always present."); + assert!( + tracedb.block_traces(0).is_some(), + "Genesis trace should be always present." + ); - let traces = tracedb.block_traces(1).unwrap(); - assert_eq!(traces.len(), 1); - assert_eq!(traces[0], create_simple_localized_trace(1, block_1.clone(), tx_1.clone())); + let traces = tracedb.block_traces(1).unwrap(); + assert_eq!(traces.len(), 1); + assert_eq!( + traces[0], + create_simple_localized_trace(1, block_1.clone(), tx_1.clone()) + ); - let traces = tracedb.block_traces(2).unwrap(); - assert_eq!(traces.len(), 1); - assert_eq!(traces[0], create_simple_localized_trace(2, block_2.clone(), tx_2.clone())); + let traces = tracedb.block_traces(2).unwrap(); + assert_eq!(traces.len(), 1); + assert_eq!( + traces[0], + create_simple_localized_trace(2, block_2.clone(), tx_2.clone()) + ); - assert_eq!(None, tracedb.block_traces(3)); + assert_eq!(None, tracedb.block_traces(3)); - let traces = tracedb.transaction_traces(1, 0).unwrap(); - assert_eq!(traces.len(), 1); - assert_eq!(traces[0], create_simple_localized_trace(1, block_1.clone(), tx_1.clone())); + let traces = tracedb.transaction_traces(1, 0).unwrap(); + assert_eq!(traces.len(), 1); + assert_eq!( + traces[0], + create_simple_localized_trace(1, block_1.clone(), tx_1.clone()) + ); - let traces = tracedb.transaction_traces(2, 0).unwrap(); - assert_eq!(traces.len(), 1); - assert_eq!(traces[0], create_simple_localized_trace(2, block_2.clone(), tx_2.clone())); + let traces = tracedb.transaction_traces(2, 0).unwrap(); + assert_eq!(traces.len(), 1); + assert_eq!( + traces[0], + create_simple_localized_trace(2, block_2.clone(), tx_2.clone()) + ); - assert_eq!(None, tracedb.transaction_traces(2, 1)); + assert_eq!(None, tracedb.transaction_traces(2, 1)); - assert_eq!(tracedb.trace(1, 0, vec![]).unwrap(), create_simple_localized_trace(1, block_1.clone(), tx_1.clone())); - assert_eq!(tracedb.trace(2, 0, vec![]).unwrap(), create_simple_localized_trace(2, block_2.clone(), tx_2.clone())); - } + assert_eq!( + tracedb.trace(1, 0, vec![]).unwrap(), + create_simple_localized_trace(1, block_1.clone(), tx_1.clone()) + ); + assert_eq!( + tracedb.trace(2, 0, vec![]).unwrap(), + create_simple_localized_trace(2, block_2.clone(), tx_2.clone()) + ); + } - #[test] - fn query_trace_after_reopen() { - let db = new_db(); - let mut config = Config::default(); - let mut extras = Extras::default(); - let block_0 = H256::from(0xa1); - let tx_0 = H256::from(0xff); + #[test] + fn query_trace_after_reopen() { + let db = new_db(); + let mut config = Config::default(); + let mut extras = Extras::default(); + let block_0 = H256::from(0xa1); + let tx_0 = H256::from(0xff); - extras.block_hashes.insert(0, H256::default()); - extras.transaction_hashes.insert(0, vec![]); - extras.block_hashes.insert(1, block_0.clone()); - extras.transaction_hashes.insert(1, vec![tx_0.clone()]); + extras.block_hashes.insert(0, H256::default()); + extras.transaction_hashes.insert(0, vec![]); + extras.block_hashes.insert(1, block_0.clone()); + extras.transaction_hashes.insert(1, vec![tx_0.clone()]); - // set tracing on - config.enabled = true; + // set tracing on + config.enabled = true; - { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras.clone())); + { + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras.clone())); - // import block 1 - let request = create_simple_import_request(1, block_0.clone()); - let mut batch = DBTransaction::new(); - tracedb.import(&mut batch, request); - db.key_value().write(batch).unwrap(); - } + // import block 1 + let request = create_simple_import_request(1, block_0.clone()); + let mut batch = DBTransaction::new(); + tracedb.import(&mut batch, request); + db.key_value().write(batch).unwrap(); + } - { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras)); - let traces = tracedb.transaction_traces(1, 0); - assert_eq!(traces.unwrap(), vec![create_simple_localized_trace(1, block_0, tx_0)]); - } - } + { + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras)); + let traces = tracedb.transaction_traces(1, 0); + assert_eq!( + traces.unwrap(), + vec![create_simple_localized_trace(1, block_0, tx_0)] + ); + } + } - #[test] - fn query_genesis() { - let db = new_db(); - let mut config = Config::default(); - let mut extras = Extras::default(); - let block_0 = H256::from(0xa1); + #[test] + fn query_genesis() { + let db = new_db(); + let mut config = Config::default(); + let mut extras = Extras::default(); + let block_0 = H256::from(0xa1); - extras.block_hashes.insert(0, block_0.clone()); - extras.transaction_hashes.insert(0, vec![]); + extras.block_hashes.insert(0, block_0.clone()); + extras.transaction_hashes.insert(0, vec![]); - // set tracing on - config.enabled = true; + // set tracing on + config.enabled = true; - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras.clone())); - let traces = tracedb.block_traces(0).unwrap(); + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras.clone())); + let traces = tracedb.block_traces(0).unwrap(); - assert_eq!(traces.len(), 0); - } + assert_eq!(traces.len(), 0); + } } diff --git a/ethcore/src/trace/executive_tracer.rs b/ethcore/src/trace/executive_tracer.rs index 1653011c3..af0468ae6 100644 --- a/ethcore/src/trace/executive_tracer.rs +++ b/ethcore/src/trace/executive_tracer.rs @@ -16,306 +16,349 @@ //! Simple executive tracer. -use std::cmp::min; -use ethereum_types::{U256, Address}; -use vm::{Error as VmError, ActionParams}; +use ethereum_types::{Address, U256}; use log::{debug, warn}; -use trace::trace::{Call, Create, Action, Res, CreateResult, CallResult, VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff, Suicide, Reward, RewardType}; -use trace::{Tracer, VMTracer, FlatTrace}; +use std::cmp::min; +use trace::{ + trace::{ + Action, Call, CallResult, Create, CreateResult, MemoryDiff, Res, Reward, RewardType, + StorageDiff, Suicide, VMExecutedOperation, VMOperation, VMTrace, + }, + FlatTrace, Tracer, VMTracer, +}; +use vm::{ActionParams, Error as VmError}; /// Simple executive tracer. Traces all calls and creates. Ignores delegatecalls. #[derive(Default)] pub struct ExecutiveTracer { - traces: Vec, - index_stack: Vec, - vecindex_stack: Vec, - sublen_stack: Vec, - skip_one: bool, + traces: Vec, + index_stack: Vec, + vecindex_stack: Vec, + sublen_stack: Vec, + skip_one: bool, } impl Tracer for ExecutiveTracer { - type Output = FlatTrace; + type Output = FlatTrace; - fn prepare_trace_call(&mut self, params: &ActionParams, depth: usize, is_builtin: bool) { - assert!(!self.skip_one, "skip_one is used only for builtin contracts that do not have subsequent calls; in prepare_trace_call it cannot be true; qed"); + fn prepare_trace_call(&mut self, params: &ActionParams, depth: usize, is_builtin: bool) { + assert!(!self.skip_one, "skip_one is used only for builtin contracts that do not have subsequent calls; in prepare_trace_call it cannot be true; qed"); - if depth != 0 && is_builtin && params.value.value() == U256::zero() { - self.skip_one = true; - return; - } + if depth != 0 && is_builtin && params.value.value() == U256::zero() { + self.skip_one = true; + return; + } - if let Some(parentlen) = self.sublen_stack.last_mut() { - *parentlen += 1; - } + if let Some(parentlen) = self.sublen_stack.last_mut() { + *parentlen += 1; + } - let trace = FlatTrace { - trace_address: self.index_stack.clone(), - subtraces: self.sublen_stack.last().cloned().unwrap_or(0), - action: Action::Call(Call::from(params.clone())), - result: Res::Call(CallResult { - gas_used: U256::zero(), - output: Vec::new() - }), - }; - self.vecindex_stack.push(self.traces.len()); - self.traces.push(trace); - self.index_stack.push(0); - self.sublen_stack.push(0); - } + let trace = FlatTrace { + trace_address: self.index_stack.clone(), + subtraces: self.sublen_stack.last().cloned().unwrap_or(0), + action: Action::Call(Call::from(params.clone())), + result: Res::Call(CallResult { + gas_used: U256::zero(), + output: Vec::new(), + }), + }; + self.vecindex_stack.push(self.traces.len()); + self.traces.push(trace); + self.index_stack.push(0); + self.sublen_stack.push(0); + } - fn prepare_trace_create(&mut self, params: &ActionParams) { - assert!(!self.skip_one, "skip_one is used only for builtin contracts that do not have subsequent calls; in prepare_trace_create it cannot be true; qed"); + fn prepare_trace_create(&mut self, params: &ActionParams) { + assert!(!self.skip_one, "skip_one is used only for builtin contracts that do not have subsequent calls; in prepare_trace_create it cannot be true; qed"); - if let Some(parentlen) = self.sublen_stack.last_mut() { - *parentlen += 1; - } + if let Some(parentlen) = self.sublen_stack.last_mut() { + *parentlen += 1; + } - let trace = FlatTrace { - trace_address: self.index_stack.clone(), - subtraces: self.sublen_stack.last().cloned().unwrap_or(0), - action: Action::Create(Create::from(params.clone())), - result: Res::Create(CreateResult { - gas_used: U256::zero(), - code: Vec::new(), - address: Address::default(), - }), - }; - self.vecindex_stack.push(self.traces.len()); - self.traces.push(trace); - self.index_stack.push(0); - self.sublen_stack.push(0); - } + let trace = FlatTrace { + trace_address: self.index_stack.clone(), + subtraces: self.sublen_stack.last().cloned().unwrap_or(0), + action: Action::Create(Create::from(params.clone())), + result: Res::Create(CreateResult { + gas_used: U256::zero(), + code: Vec::new(), + address: Address::default(), + }), + }; + self.vecindex_stack.push(self.traces.len()); + self.traces.push(trace); + self.index_stack.push(0); + self.sublen_stack.push(0); + } - fn done_trace_call(&mut self, gas_used: U256, output: &[u8]) { - if self.skip_one { - self.skip_one = false; - return; - } + fn done_trace_call(&mut self, gas_used: U256, output: &[u8]) { + if self.skip_one { + self.skip_one = false; + return; + } - let vecindex = self.vecindex_stack.pop().expect("Executive invoked prepare_trace_call before this function; vecindex_stack is never empty; qed"); - let sublen = self.sublen_stack.pop().expect("Executive invoked prepare_trace_call before this function; sublen_stack is never empty; qed"); - self.index_stack.pop(); + let vecindex = self.vecindex_stack.pop().expect("Executive invoked prepare_trace_call before this function; vecindex_stack is never empty; qed"); + let sublen = self.sublen_stack.pop().expect("Executive invoked prepare_trace_call before this function; sublen_stack is never empty; qed"); + self.index_stack.pop(); - self.traces[vecindex].result = Res::Call(CallResult { - gas_used, - output: output.into(), - }); - self.traces[vecindex].subtraces = sublen; + self.traces[vecindex].result = Res::Call(CallResult { + gas_used, + output: output.into(), + }); + self.traces[vecindex].subtraces = sublen; - if let Some(index) = self.index_stack.last_mut() { - *index += 1; - } - } + if let Some(index) = self.index_stack.last_mut() { + *index += 1; + } + } - fn done_trace_create(&mut self, gas_used: U256, code: &[u8], address: Address) { - assert!(!self.skip_one, "skip_one is only set with prepare_trace_call for builtin contracts with no subsequent calls; skip_one cannot be true after the same level prepare_trace_create; qed"); + fn done_trace_create(&mut self, gas_used: U256, code: &[u8], address: Address) { + assert!(!self.skip_one, "skip_one is only set with prepare_trace_call for builtin contracts with no subsequent calls; skip_one cannot be true after the same level prepare_trace_create; qed"); - let vecindex = self.vecindex_stack.pop().expect("Executive invoked prepare_trace_create before this function; vecindex_stack is never empty; qed"); - let sublen = self.sublen_stack.pop().expect("Executive invoked prepare_trace_create before this function; sublen_stack is never empty; qed"); - self.index_stack.pop(); + let vecindex = self.vecindex_stack.pop().expect("Executive invoked prepare_trace_create before this function; vecindex_stack is never empty; qed"); + let sublen = self.sublen_stack.pop().expect("Executive invoked prepare_trace_create before this function; sublen_stack is never empty; qed"); + self.index_stack.pop(); - self.traces[vecindex].result = Res::Create(CreateResult { - gas_used, address, - code: code.into(), - }); - self.traces[vecindex].subtraces = sublen; + self.traces[vecindex].result = Res::Create(CreateResult { + gas_used, + address, + code: code.into(), + }); + self.traces[vecindex].subtraces = sublen; - if let Some(index) = self.index_stack.last_mut() { - *index += 1; - } - } + if let Some(index) = self.index_stack.last_mut() { + *index += 1; + } + } - fn done_trace_failed(&mut self, error: &VmError) { - if self.skip_one { - self.skip_one = false; - return; - } + fn done_trace_failed(&mut self, error: &VmError) { + if self.skip_one { + self.skip_one = false; + return; + } - let vecindex = self.vecindex_stack.pop().expect("Executive invoked prepare_trace_create/call before this function; vecindex_stack is never empty; qed"); - let sublen = self.sublen_stack.pop().expect("Executive invoked prepare_trace_create/call before this function; vecindex_stack is never empty; qed"); - self.index_stack.pop(); + let vecindex = self.vecindex_stack.pop().expect("Executive invoked prepare_trace_create/call before this function; vecindex_stack is never empty; qed"); + let sublen = self.sublen_stack.pop().expect("Executive invoked prepare_trace_create/call before this function; vecindex_stack is never empty; qed"); + self.index_stack.pop(); - let is_create = match self.traces[vecindex].action { - Action::Create(_) => true, - _ => false, - }; + let is_create = match self.traces[vecindex].action { + Action::Create(_) => true, + _ => false, + }; - if is_create { - self.traces[vecindex].result = Res::FailedCreate(error.into()); - } else { - self.traces[vecindex].result = Res::FailedCall(error.into()); - } - self.traces[vecindex].subtraces = sublen; + if is_create { + self.traces[vecindex].result = Res::FailedCreate(error.into()); + } else { + self.traces[vecindex].result = Res::FailedCall(error.into()); + } + self.traces[vecindex].subtraces = sublen; - if let Some(index) = self.index_stack.last_mut() { - *index += 1; - } - } + if let Some(index) = self.index_stack.last_mut() { + *index += 1; + } + } - fn trace_suicide(&mut self, address: Address, balance: U256, refund_address: Address) { - if let Some(parentlen) = self.sublen_stack.last_mut() { - *parentlen += 1; - } + fn trace_suicide(&mut self, address: Address, balance: U256, refund_address: Address) { + if let Some(parentlen) = self.sublen_stack.last_mut() { + *parentlen += 1; + } - let trace = FlatTrace { - subtraces: 0, - action: Action::Suicide(Suicide { address, refund_address, balance } ), - result: Res::None, - trace_address: self.index_stack.clone(), - }; - debug!(target: "trace", "Traced suicide {:?}", trace); - self.traces.push(trace); + let trace = FlatTrace { + subtraces: 0, + action: Action::Suicide(Suicide { + address, + refund_address, + balance, + }), + result: Res::None, + trace_address: self.index_stack.clone(), + }; + debug!(target: "trace", "Traced suicide {:?}", trace); + self.traces.push(trace); - if let Some(index) = self.index_stack.last_mut() { - *index += 1; - } - } + if let Some(index) = self.index_stack.last_mut() { + *index += 1; + } + } - fn trace_reward(&mut self, author: Address, value: U256, reward_type: RewardType) { - if let Some(parentlen) = self.sublen_stack.last_mut() { - *parentlen += 1; - } + fn trace_reward(&mut self, author: Address, value: U256, reward_type: RewardType) { + if let Some(parentlen) = self.sublen_stack.last_mut() { + *parentlen += 1; + } - let trace = FlatTrace { - subtraces: 0, - action: Action::Reward(Reward { author, value, reward_type } ), - result: Res::None, - trace_address: self.index_stack.clone(), - }; - debug!(target: "trace", "Traced reward {:?}", trace); - self.traces.push(trace); + let trace = FlatTrace { + subtraces: 0, + action: Action::Reward(Reward { + author, + value, + reward_type, + }), + result: Res::None, + trace_address: self.index_stack.clone(), + }; + debug!(target: "trace", "Traced reward {:?}", trace); + self.traces.push(trace); - if let Some(index) = self.index_stack.last_mut() { - *index += 1; - } - } + if let Some(index) = self.index_stack.last_mut() { + *index += 1; + } + } - fn drain(self) -> Vec { - self.traces - } + fn drain(self) -> Vec { + self.traces + } } struct TraceData { - mem_written: Option<(usize, usize)>, - store_written: Option<(U256, U256)>, + mem_written: Option<(usize, usize)>, + store_written: Option<(U256, U256)>, } /// Simple VM tracer. Traces all operations. pub struct ExecutiveVMTracer { - data: VMTrace, - depth: usize, - trace_stack: Vec, + data: VMTrace, + depth: usize, + trace_stack: Vec, } impl ExecutiveVMTracer { - /// Create a new top-level instance. - pub fn toplevel() -> Self { - ExecutiveVMTracer { - data: VMTrace { - parent_step: 0, - code: vec![], - operations: vec![Default::default()], // prefill with a single entry so that prepare_subtrace can get the parent_step - subs: vec![], - }, - depth: 0, - trace_stack: vec![], - } - } + /// Create a new top-level instance. + pub fn toplevel() -> Self { + ExecutiveVMTracer { + data: VMTrace { + parent_step: 0, + code: vec![], + operations: vec![Default::default()], // prefill with a single entry so that prepare_subtrace can get the parent_step + subs: vec![], + }, + depth: 0, + trace_stack: vec![], + } + } - fn with_trace_in_depth(trace: &mut VMTrace, depth: usize, f: F) { - if depth == 0 { - f(trace); - } else { - Self::with_trace_in_depth(trace.subs.last_mut().expect("self.depth is incremented with prepare_subtrace; a subtrace is always pushed; self.depth cannot be greater than subtrace stack; qed"), depth - 1, f); - } - } + fn with_trace_in_depth(trace: &mut VMTrace, depth: usize, f: F) { + if depth == 0 { + f(trace); + } else { + Self::with_trace_in_depth(trace.subs.last_mut().expect("self.depth is incremented with prepare_subtrace; a subtrace is always pushed; self.depth cannot be greater than subtrace stack; qed"), depth - 1, f); + } + } } impl VMTracer for ExecutiveVMTracer { - type Output = VMTrace; + type Output = VMTrace; - fn trace_next_instruction(&mut self, _pc: usize, _instruction: u8, _current_gas: U256) -> bool { true } + fn trace_next_instruction(&mut self, _pc: usize, _instruction: u8, _current_gas: U256) -> bool { + true + } - fn trace_prepare_execute(&mut self, pc: usize, instruction: u8, gas_cost: U256, mem_written: Option<(usize, usize)>, store_written: Option<(U256, U256)>) { - Self::with_trace_in_depth(&mut self.data, self.depth, move |trace| { - trace.operations.push(VMOperation { - pc: pc, - instruction: instruction, - gas_cost: gas_cost, - executed: None, - }); - }); - self.trace_stack.push(TraceData { mem_written, store_written }); - } + fn trace_prepare_execute( + &mut self, + pc: usize, + instruction: u8, + gas_cost: U256, + mem_written: Option<(usize, usize)>, + store_written: Option<(U256, U256)>, + ) { + Self::with_trace_in_depth(&mut self.data, self.depth, move |trace| { + trace.operations.push(VMOperation { + pc: pc, + instruction: instruction, + gas_cost: gas_cost, + executed: None, + }); + }); + self.trace_stack.push(TraceData { + mem_written, + store_written, + }); + } - fn trace_failed(&mut self) { - let _ = self.trace_stack.pop().expect("pushed in trace_prepare_execute; qed"); - } + fn trace_failed(&mut self) { + let _ = self + .trace_stack + .pop() + .expect("pushed in trace_prepare_execute; qed"); + } - fn trace_executed(&mut self, gas_used: U256, stack_push: &[U256], mem: &[u8]) { - let TraceData { mem_written, store_written } = self.trace_stack.pop().expect("pushed in trace_prepare_execute; qed"); - let mem_diff = mem_written.map(|(o, s)| { - if o + s > mem.len() { - warn!(target: "trace", "mem_written is out of bounds"); - } - (o, &mem[min(mem.len(), o)..min(o + s, mem.len())]) - }); - let store_diff = store_written; - Self::with_trace_in_depth(&mut self.data, self.depth, move |trace| { - let ex = VMExecutedOperation { - gas_used: gas_used, - stack_push: stack_push.to_vec(), - mem_diff: mem_diff.map(|(s, r)| MemoryDiff { offset: s, data: r.to_vec() }), - store_diff: store_diff.map(|(l, v)| StorageDiff { location: l, value: v }), - }; - trace.operations.last_mut().expect("trace_executed is always called after a trace_prepare_execute; trace.operations cannot be empty; qed").executed = Some(ex); - }); - } + fn trace_executed(&mut self, gas_used: U256, stack_push: &[U256], mem: &[u8]) { + let TraceData { + mem_written, + store_written, + } = self + .trace_stack + .pop() + .expect("pushed in trace_prepare_execute; qed"); + let mem_diff = mem_written.map(|(o, s)| { + if o + s > mem.len() { + warn!(target: "trace", "mem_written is out of bounds"); + } + (o, &mem[min(mem.len(), o)..min(o + s, mem.len())]) + }); + let store_diff = store_written; + Self::with_trace_in_depth(&mut self.data, self.depth, move |trace| { + let ex = VMExecutedOperation { + gas_used: gas_used, + stack_push: stack_push.to_vec(), + mem_diff: mem_diff.map(|(s, r)| MemoryDiff { + offset: s, + data: r.to_vec(), + }), + store_diff: store_diff.map(|(l, v)| StorageDiff { + location: l, + value: v, + }), + }; + trace.operations.last_mut().expect("trace_executed is always called after a trace_prepare_execute; trace.operations cannot be empty; qed").executed = Some(ex); + }); + } - fn prepare_subtrace(&mut self, code: &[u8]) { - Self::with_trace_in_depth(&mut self.data, self.depth, move |trace| { - let parent_step = trace.operations.len() - 1; // won't overflow since we must already have pushed an operation in trace_prepare_execute. - trace.subs.push(VMTrace { - parent_step, - code: code.to_vec(), - operations: vec![], - subs: vec![], - }); - }); - self.depth += 1; - } + fn prepare_subtrace(&mut self, code: &[u8]) { + Self::with_trace_in_depth(&mut self.data, self.depth, move |trace| { + let parent_step = trace.operations.len() - 1; // won't overflow since we must already have pushed an operation in trace_prepare_execute. + trace.subs.push(VMTrace { + parent_step, + code: code.to_vec(), + operations: vec![], + subs: vec![], + }); + }); + self.depth += 1; + } - fn done_subtrace(&mut self) { - self.depth -= 1; - } + fn done_subtrace(&mut self) { + self.depth -= 1; + } - fn drain(mut self) -> Option { self.data.subs.pop() } + fn drain(mut self) -> Option { + self.data.subs.pop() + } } #[cfg(test)] mod tests { - use super::*; + use super::*; - #[test] - fn should_prefix_address_properly() { - let mut tracer = ExecutiveTracer::default(); + #[test] + fn should_prefix_address_properly() { + let mut tracer = ExecutiveTracer::default(); - tracer.prepare_trace_call(&ActionParams::default(), 0, false); - tracer.prepare_trace_call(&ActionParams::default(), 1, false); - tracer.prepare_trace_call(&ActionParams::default(), 2, false); - tracer.done_trace_call(U256::zero(), &[]); - tracer.prepare_trace_call(&ActionParams::default(), 2, false); - tracer.done_trace_call(U256::zero(), &[]); - tracer.prepare_trace_call(&ActionParams::default(), 2, false); - tracer.done_trace_call(U256::zero(), &[]); - tracer.done_trace_call(U256::zero(), &[]); - tracer.done_trace_call(U256::zero(), &[]); + tracer.prepare_trace_call(&ActionParams::default(), 0, false); + tracer.prepare_trace_call(&ActionParams::default(), 1, false); + tracer.prepare_trace_call(&ActionParams::default(), 2, false); + tracer.done_trace_call(U256::zero(), &[]); + tracer.prepare_trace_call(&ActionParams::default(), 2, false); + tracer.done_trace_call(U256::zero(), &[]); + tracer.prepare_trace_call(&ActionParams::default(), 2, false); + tracer.done_trace_call(U256::zero(), &[]); + tracer.done_trace_call(U256::zero(), &[]); + tracer.done_trace_call(U256::zero(), &[]); - let drained = tracer.drain(); - assert!(drained[0].trace_address.len() == 0); - assert_eq!(&drained[1].trace_address, &[0]); - assert_eq!(&drained[2].trace_address, &[0, 0]); - assert_eq!(&drained[3].trace_address, &[0, 1]); - assert_eq!(&drained[4].trace_address, &[0, 2]); - } + let drained = tracer.drain(); + assert!(drained[0].trace_address.len() == 0); + assert_eq!(&drained[1].trace_address, &[0]); + assert_eq!(&drained[2].trace_address, &[0, 0]); + assert_eq!(&drained[3].trace_address, &[0, 1]); + assert_eq!(&drained[4].trace_address, &[0, 2]); + } } diff --git a/ethcore/src/trace/import.rs b/ethcore/src/trace/import.rs index e9ec9c77b..0e0c531b4 100644 --- a/ethcore/src/trace/import.rs +++ b/ethcore/src/trace/import.rs @@ -22,16 +22,16 @@ use trace::FlatBlockTraces; /// Traces import request. pub struct ImportRequest { - /// Traces to import. - pub traces: FlatBlockTraces, - /// Hash of traces block. - pub block_hash: H256, - /// Number of traces block. - pub block_number: BlockNumber, - /// Blocks enacted by this import. - /// - /// They should be ordered from oldest to newest. - pub enacted: Vec, - /// Number of blocks retracted by this import. - pub retracted: usize, + /// Traces to import. + pub traces: FlatBlockTraces, + /// Hash of traces block. + pub block_hash: H256, + /// Number of traces block. + pub block_number: BlockNumber, + /// Blocks enacted by this import. + /// + /// They should be ordered from oldest to newest. + pub enacted: Vec, + /// Number of blocks retracted by this import. + pub retracted: usize, } diff --git a/ethcore/src/trace/mod.rs b/ethcore/src/trace/mod.rs index 98521dbb0..3eacf0644 100644 --- a/ethcore/src/trace/mod.rs +++ b/ethcore/src/trace/mod.rs @@ -23,112 +23,136 @@ mod import; mod noop_tracer; mod types; -pub use self::config::Config; -pub use self::db::TraceDB; -pub use self::noop_tracer::{NoopTracer, NoopVMTracer}; -pub use self::executive_tracer::{ExecutiveTracer, ExecutiveVMTracer}; -pub use self::import::ImportRequest; -pub use self::localized::LocalizedTrace; +pub use self::{ + config::Config, + db::TraceDB, + executive_tracer::{ExecutiveTracer, ExecutiveVMTracer}, + import::ImportRequest, + localized::LocalizedTrace, + noop_tracer::{NoopTracer, NoopVMTracer}, +}; -pub use self::types::{filter, flat, localized, trace, Tracing}; -pub use self::types::error::Error as TraceError; -pub use self::types::trace::{VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff, RewardType}; -pub use self::types::flat::{FlatTrace, FlatTransactionTraces, FlatBlockTraces}; -pub use self::types::filter::{Filter, AddressesFilter}; +pub use self::types::{ + error::Error as TraceError, + filter, + filter::{AddressesFilter, Filter}, + flat, + flat::{FlatBlockTraces, FlatTrace, FlatTransactionTraces}, + localized, trace, + trace::{MemoryDiff, RewardType, StorageDiff, VMExecutedOperation, VMOperation, VMTrace}, + Tracing, +}; -use ethereum_types::{H256, U256, Address}; +use ethereum_types::{Address, H256, U256}; use kvdb::DBTransaction; -use vm::{Error as VmError, ActionParams}; use types::BlockNumber; +use vm::{ActionParams, Error as VmError}; /// This trait is used by executive to build traces. pub trait Tracer: Send { - /// Data returned when draining the Tracer. - type Output; + /// Data returned when draining the Tracer. + type Output; - /// Prepares call trace for given params. Would panic if prepare/done_trace are not balanced. - fn prepare_trace_call(&mut self, params: &ActionParams, depth: usize, is_builtin: bool); + /// Prepares call trace for given params. Would panic if prepare/done_trace are not balanced. + fn prepare_trace_call(&mut self, params: &ActionParams, depth: usize, is_builtin: bool); - /// Prepares create trace for given params. Would panic if prepare/done_trace are not balanced. - fn prepare_trace_create(&mut self, params: &ActionParams); + /// Prepares create trace for given params. Would panic if prepare/done_trace are not balanced. + fn prepare_trace_create(&mut self, params: &ActionParams); - /// Finishes a successful call trace. Would panic if prepare/done_trace are not balanced. - fn done_trace_call(&mut self, gas_used: U256, output: &[u8]); + /// Finishes a successful call trace. Would panic if prepare/done_trace are not balanced. + fn done_trace_call(&mut self, gas_used: U256, output: &[u8]); - /// Finishes a successful create trace. Would panic if prepare/done_trace are not balanced. - fn done_trace_create(&mut self, gas_used: U256, code: &[u8], address: Address); + /// Finishes a successful create trace. Would panic if prepare/done_trace are not balanced. + fn done_trace_create(&mut self, gas_used: U256, code: &[u8], address: Address); - /// Finishes a failed trace. Would panic if prepare/done_trace are not balanced. - fn done_trace_failed(&mut self, error: &VmError); + /// Finishes a failed trace. Would panic if prepare/done_trace are not balanced. + fn done_trace_failed(&mut self, error: &VmError); - /// Stores suicide info. - fn trace_suicide(&mut self, address: Address, balance: U256, refund_address: Address); + /// Stores suicide info. + fn trace_suicide(&mut self, address: Address, balance: U256, refund_address: Address); - /// Stores reward info. - fn trace_reward(&mut self, author: Address, value: U256, reward_type: RewardType); + /// Stores reward info. + fn trace_reward(&mut self, author: Address, value: U256, reward_type: RewardType); - /// Consumes self and returns all traces. - fn drain(self) -> Vec; + /// Consumes self and returns all traces. + fn drain(self) -> Vec; } /// Used by executive to build VM traces. pub trait VMTracer: Send { + /// Data returned when draining the VMTracer. + type Output; - /// Data returned when draining the VMTracer. - type Output; + /// Trace the progression of interpreter to next instruction. + /// If tracer returns `false` it won't be called again. + /// @returns true if `trace_prepare_execute` and `trace_executed` should be called. + fn trace_next_instruction(&mut self, _pc: usize, _instruction: u8, _current_gas: U256) -> bool { + false + } - /// Trace the progression of interpreter to next instruction. - /// If tracer returns `false` it won't be called again. - /// @returns true if `trace_prepare_execute` and `trace_executed` should be called. - fn trace_next_instruction(&mut self, _pc: usize, _instruction: u8, _current_gas: U256) -> bool { false } + /// Trace the preparation to execute a single valid instruction. + fn trace_prepare_execute( + &mut self, + _pc: usize, + _instruction: u8, + _gas_cost: U256, + _mem_written: Option<(usize, usize)>, + _store_written: Option<(U256, U256)>, + ) { + } - /// Trace the preparation to execute a single valid instruction. - fn trace_prepare_execute(&mut self, _pc: usize, _instruction: u8, _gas_cost: U256, _mem_written: Option<(usize, usize)>, _store_written: Option<(U256, U256)>) {} + /// Trace the execution failure of a single instruction. + fn trace_failed(&mut self) {} - /// Trace the execution failure of a single instruction. - fn trace_failed(&mut self) {} + /// Trace the finalised execution of a single valid instruction. + fn trace_executed(&mut self, _gas_used: U256, _stack_push: &[U256], _mem: &[u8]) {} - /// Trace the finalised execution of a single valid instruction. - fn trace_executed(&mut self, _gas_used: U256, _stack_push: &[U256], _mem: &[u8]) {} + /// Spawn subtracer which will be used to trace deeper levels of execution. + fn prepare_subtrace(&mut self, _code: &[u8]) {} - /// Spawn subtracer which will be used to trace deeper levels of execution. - fn prepare_subtrace(&mut self, _code: &[u8]) {} - - /// Finalize subtracer. - fn done_subtrace(&mut self) {} - - /// Consumes self and returns the VM trace. - fn drain(self) -> Option; + /// Finalize subtracer. + fn done_subtrace(&mut self) {} + /// Consumes self and returns the VM trace. + fn drain(self) -> Option; } /// `DbExtras` provides an interface to query extra data which is not stored in tracesdb, /// but necessary to work correctly. pub trait DatabaseExtras { - /// Returns hash of given block number. - fn block_hash(&self, block_number: BlockNumber) -> Option; + /// Returns hash of given block number. + fn block_hash(&self, block_number: BlockNumber) -> Option; - /// Returns hash of transaction at given position. - fn transaction_hash(&self, block_number: BlockNumber, tx_position: usize) -> Option; + /// Returns hash of transaction at given position. + fn transaction_hash(&self, block_number: BlockNumber, tx_position: usize) -> Option; } /// Db provides an interface to query tracesdb. pub trait Database { - /// Returns true if tracing is enabled. Otherwise false. - fn tracing_enabled(&self) -> bool; + /// Returns true if tracing is enabled. Otherwise false. + fn tracing_enabled(&self) -> bool; - /// Imports new block traces. - fn import(&self, batch: &mut DBTransaction, request: ImportRequest); + /// Imports new block traces. + fn import(&self, batch: &mut DBTransaction, request: ImportRequest); - /// Returns localized trace at given position. - fn trace(&self, block_number: BlockNumber, tx_position: usize, trace_position: Vec) -> Option; + /// Returns localized trace at given position. + fn trace( + &self, + block_number: BlockNumber, + tx_position: usize, + trace_position: Vec, + ) -> Option; - /// Returns localized traces created by a single transaction. - fn transaction_traces(&self, block_number: BlockNumber, tx_position: usize) -> Option>; + /// Returns localized traces created by a single transaction. + fn transaction_traces( + &self, + block_number: BlockNumber, + tx_position: usize, + ) -> Option>; - /// Returns localized traces created in given block. - fn block_traces(&self, block_number: BlockNumber) -> Option>; + /// Returns localized traces created in given block. + fn block_traces(&self, block_number: BlockNumber) -> Option>; - /// Filter traces matching given filter. - fn filter(&self, filter: &Filter) -> Vec; + /// Filter traces matching given filter. + fn filter(&self, filter: &Filter) -> Vec; } diff --git a/ethcore/src/trace/noop_tracer.rs b/ethcore/src/trace/noop_tracer.rs index 62cce8e01..87f814536 100644 --- a/ethcore/src/trace/noop_tracer.rs +++ b/ethcore/src/trace/noop_tracer.rs @@ -16,32 +16,38 @@ //! Nonoperative tracer. -use ethereum_types::{U256, Address}; -use vm::{Error as VmError, ActionParams}; -use trace::{Tracer, VMTracer, FlatTrace}; -use trace::trace::{VMTrace, RewardType}; +use ethereum_types::{Address, U256}; +use trace::{ + trace::{RewardType, VMTrace}, + FlatTrace, Tracer, VMTracer, +}; +use vm::{ActionParams, Error as VmError}; /// Nonoperative tracer. Does not trace anything. pub struct NoopTracer; impl Tracer for NoopTracer { - type Output = FlatTrace; + type Output = FlatTrace; - fn prepare_trace_call(&mut self, _: &ActionParams, _: usize, _: bool) { } - fn prepare_trace_create(&mut self, _: &ActionParams) { } - fn done_trace_call(&mut self, _: U256, _: &[u8]) { } - fn done_trace_create(&mut self, _: U256, _: &[u8], _: Address) { } - fn done_trace_failed(&mut self, _: &VmError) { } - fn trace_suicide(&mut self, _: Address, _: U256, _: Address) { } - fn trace_reward(&mut self, _: Address, _: U256, _: RewardType) { } - fn drain(self) -> Vec { vec![] } + fn prepare_trace_call(&mut self, _: &ActionParams, _: usize, _: bool) {} + fn prepare_trace_create(&mut self, _: &ActionParams) {} + fn done_trace_call(&mut self, _: U256, _: &[u8]) {} + fn done_trace_create(&mut self, _: U256, _: &[u8], _: Address) {} + fn done_trace_failed(&mut self, _: &VmError) {} + fn trace_suicide(&mut self, _: Address, _: U256, _: Address) {} + fn trace_reward(&mut self, _: Address, _: U256, _: RewardType) {} + fn drain(self) -> Vec { + vec![] + } } /// Nonoperative VM tracer. Does not trace anything. pub struct NoopVMTracer; impl VMTracer for NoopVMTracer { - type Output = VMTrace; + type Output = VMTrace; - fn drain(self) -> Option { None } + fn drain(self) -> Option { + None + } } diff --git a/ethcore/src/trace/types/error.rs b/ethcore/src/trace/types/error.rs index 5c775dcb6..9704b8082 100644 --- a/ethcore/src/trace/types/error.rs +++ b/ethcore/src/trace/types/error.rs @@ -16,139 +16,139 @@ //! Trace errors. +use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; use std::fmt; -use rlp::{Encodable, RlpStream, Decodable, DecoderError, Rlp}; use vm::Error as VmError; /// Trace evm errors. #[derive(Debug, PartialEq, Clone)] pub enum Error { - /// `OutOfGas` is returned when transaction execution runs out of gas. - OutOfGas, - /// `BadJumpDestination` is returned when execution tried to move - /// to position that wasn't marked with JUMPDEST instruction - BadJumpDestination, - /// `BadInstructions` is returned when given instruction is not supported - BadInstruction, - /// `StackUnderflow` when there is not enough stack elements to execute instruction - StackUnderflow, - /// When execution would exceed defined Stack Limit - OutOfStack, - /// When builtin contract failed on input data - BuiltIn, - /// Returned on evm internal error. Should never be ignored during development. - /// Likely to cause consensus issues. - Internal, - /// When execution tries to modify the state in static context - MutableCallInStaticContext, - /// Wasm error - Wasm, - /// Contract tried to access past the return data buffer. - OutOfBounds, - /// Execution has been reverted with REVERT instruction. - Reverted, + /// `OutOfGas` is returned when transaction execution runs out of gas. + OutOfGas, + /// `BadJumpDestination` is returned when execution tried to move + /// to position that wasn't marked with JUMPDEST instruction + BadJumpDestination, + /// `BadInstructions` is returned when given instruction is not supported + BadInstruction, + /// `StackUnderflow` when there is not enough stack elements to execute instruction + StackUnderflow, + /// When execution would exceed defined Stack Limit + OutOfStack, + /// When builtin contract failed on input data + BuiltIn, + /// Returned on evm internal error. Should never be ignored during development. + /// Likely to cause consensus issues. + Internal, + /// When execution tries to modify the state in static context + MutableCallInStaticContext, + /// Wasm error + Wasm, + /// Contract tried to access past the return data buffer. + OutOfBounds, + /// Execution has been reverted with REVERT instruction. + Reverted, } impl<'a> From<&'a VmError> for Error { - fn from(e: &'a VmError) -> Self { - match *e { - VmError::OutOfGas => Error::OutOfGas, - VmError::BadJumpDestination { .. } => Error::BadJumpDestination, - VmError::BadInstruction { .. } => Error::BadInstruction, - VmError::StackUnderflow { .. } => Error::StackUnderflow, - VmError::OutOfStack { .. } => Error::OutOfStack, - VmError::BuiltIn { .. } => Error::BuiltIn, - VmError::Wasm { .. } => Error::Wasm, - VmError::Internal(_) => Error::Internal, - VmError::MutableCallInStaticContext => Error::MutableCallInStaticContext, - VmError::OutOfBounds => Error::OutOfBounds, - VmError::Reverted => Error::Reverted, - } - } + fn from(e: &'a VmError) -> Self { + match *e { + VmError::OutOfGas => Error::OutOfGas, + VmError::BadJumpDestination { .. } => Error::BadJumpDestination, + VmError::BadInstruction { .. } => Error::BadInstruction, + VmError::StackUnderflow { .. } => Error::StackUnderflow, + VmError::OutOfStack { .. } => Error::OutOfStack, + VmError::BuiltIn { .. } => Error::BuiltIn, + VmError::Wasm { .. } => Error::Wasm, + VmError::Internal(_) => Error::Internal, + VmError::MutableCallInStaticContext => Error::MutableCallInStaticContext, + VmError::OutOfBounds => Error::OutOfBounds, + VmError::Reverted => Error::Reverted, + } + } } impl From for Error { - fn from(e: VmError) -> Self { - Error::from(&e) - } + fn from(e: VmError) -> Self { + Error::from(&e) + } } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use self::Error::*; - let message = match *self { - OutOfGas => "Out of gas", - BadJumpDestination => "Bad jump destination", - BadInstruction => "Bad instruction", - StackUnderflow => "Stack underflow", - OutOfStack => "Out of stack", - BuiltIn => "Built-in failed", - Wasm => "Wasm runtime error", - Internal => "Internal error", - MutableCallInStaticContext => "Mutable Call In Static Context", - OutOfBounds => "Out of bounds", - Reverted => "Reverted", - }; - message.fmt(f) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::Error::*; + let message = match *self { + OutOfGas => "Out of gas", + BadJumpDestination => "Bad jump destination", + BadInstruction => "Bad instruction", + StackUnderflow => "Stack underflow", + OutOfStack => "Out of stack", + BuiltIn => "Built-in failed", + Wasm => "Wasm runtime error", + Internal => "Internal error", + MutableCallInStaticContext => "Mutable Call In Static Context", + OutOfBounds => "Out of bounds", + Reverted => "Reverted", + }; + message.fmt(f) + } } impl Encodable for Error { - fn rlp_append(&self, s: &mut RlpStream) { - use self::Error::*; - let value = match *self { - OutOfGas => 0u8, - BadJumpDestination => 1, - BadInstruction => 2, - StackUnderflow => 3, - OutOfStack => 4, - Internal => 5, - BuiltIn => 6, - MutableCallInStaticContext => 7, - Wasm => 8, - OutOfBounds => 9, - Reverted => 10, - }; + fn rlp_append(&self, s: &mut RlpStream) { + use self::Error::*; + let value = match *self { + OutOfGas => 0u8, + BadJumpDestination => 1, + BadInstruction => 2, + StackUnderflow => 3, + OutOfStack => 4, + Internal => 5, + BuiltIn => 6, + MutableCallInStaticContext => 7, + Wasm => 8, + OutOfBounds => 9, + Reverted => 10, + }; - s.append_internal(&value); - } + s.append_internal(&value); + } } impl Decodable for Error { - fn decode(rlp: &Rlp) -> Result { - use self::Error::*; - let value: u8 = rlp.as_val()?; - match value { - 0 => Ok(OutOfGas), - 1 => Ok(BadJumpDestination), - 2 => Ok(BadInstruction), - 3 => Ok(StackUnderflow), - 4 => Ok(OutOfStack), - 5 => Ok(Internal), - 6 => Ok(BuiltIn), - 7 => Ok(MutableCallInStaticContext), - 8 => Ok(Wasm), - 9 => Ok(OutOfBounds), - 10 => Ok(Reverted), - _ => Err(DecoderError::Custom("Invalid error type")), - } - } + fn decode(rlp: &Rlp) -> Result { + use self::Error::*; + let value: u8 = rlp.as_val()?; + match value { + 0 => Ok(OutOfGas), + 1 => Ok(BadJumpDestination), + 2 => Ok(BadInstruction), + 3 => Ok(StackUnderflow), + 4 => Ok(OutOfStack), + 5 => Ok(Internal), + 6 => Ok(BuiltIn), + 7 => Ok(MutableCallInStaticContext), + 8 => Ok(Wasm), + 9 => Ok(OutOfBounds), + 10 => Ok(Reverted), + _ => Err(DecoderError::Custom("Invalid error type")), + } + } } #[cfg(test)] mod tests { - use rlp::*; - use super::Error; + use super::Error; + use rlp::*; - #[test] - fn encode_error() { - let err = Error::BadJumpDestination; + #[test] + fn encode_error() { + let err = Error::BadJumpDestination; - let mut s = RlpStream::new_list(2); - s.append(&err); - assert!(!s.is_finished(), "List shouldn't finished yet"); - s.append(&err); - assert!(s.is_finished(), "List should be finished now"); - s.out(); - } + let mut s = RlpStream::new_list(2); + s.append(&err); + assert!(!s.is_finished(), "List shouldn't finished yet"); + s.append(&err); + assert!(s.is_finished(), "List should be finished now"); + s.out(); + } } diff --git a/ethcore/src/trace/types/filter.rs b/ethcore/src/trace/types/filter.rs index d7b8fcc18..4c3fbfe05 100644 --- a/ethcore/src/trace/types/filter.rs +++ b/ethcore/src/trace/types/filter.rs @@ -16,413 +16,419 @@ //! Trace filters type definitions -use std::ops::Range; -use ethereum_types::{Address, Bloom, BloomInput}; -use trace::flat::FlatTrace; use super::trace::{Action, Res}; +use ethereum_types::{Address, Bloom, BloomInput}; +use std::ops::Range; +use trace::flat::FlatTrace; /// Addresses filter. /// /// Used to create bloom possibilities and match filters. #[derive(Debug)] pub struct AddressesFilter { - list: Vec
+ list: Vec
, } impl From> for AddressesFilter { - fn from(addresses: Vec
) -> Self { - AddressesFilter { list: addresses } - } + fn from(addresses: Vec
) -> Self { + AddressesFilter { list: addresses } + } } impl AddressesFilter { - /// Returns true if address matches one of the searched addresses. - pub fn matches(&self, address: &Address) -> bool { - self.matches_all() || self.list.contains(address) - } + /// Returns true if address matches one of the searched addresses. + pub fn matches(&self, address: &Address) -> bool { + self.matches_all() || self.list.contains(address) + } - /// Returns true if this address filter matches everything. - pub fn matches_all(&self) -> bool { - self.list.is_empty() - } + /// Returns true if this address filter matches everything. + pub fn matches_all(&self) -> bool { + self.list.is_empty() + } - /// Returns blooms of this addresses filter. - pub fn blooms(&self) -> Vec { - match self.list.is_empty() { - true => vec![Bloom::default()], - false => self.list.iter() - .map(|address| Bloom::from(BloomInput::Raw(address))) - .collect(), - } - } + /// Returns blooms of this addresses filter. + pub fn blooms(&self) -> Vec { + match self.list.is_empty() { + true => vec![Bloom::default()], + false => self + .list + .iter() + .map(|address| Bloom::from(BloomInput::Raw(address))) + .collect(), + } + } - /// Returns vector of blooms zipped with blooms of this addresses filter. - pub fn with_blooms(&self, blooms: Vec) -> Vec { - match self.list.is_empty() { - true => blooms, - false => blooms - .into_iter() - .flat_map(|bloom| self.list.iter() - .map(|address| { - let mut bloom = bloom.clone(); - bloom.accrue(BloomInput::Raw(address)); - bloom - }) - .collect::>()) - .collect(), - } - } + /// Returns vector of blooms zipped with blooms of this addresses filter. + pub fn with_blooms(&self, blooms: Vec) -> Vec { + match self.list.is_empty() { + true => blooms, + false => blooms + .into_iter() + .flat_map(|bloom| { + self.list + .iter() + .map(|address| { + let mut bloom = bloom.clone(); + bloom.accrue(BloomInput::Raw(address)); + bloom + }) + .collect::>() + }) + .collect(), + } + } } #[derive(Debug)] /// Traces filter. pub struct Filter { - /// Block range. - pub range: Range, + /// Block range. + pub range: Range, - /// From address filter. - pub from_address: AddressesFilter, + /// From address filter. + pub from_address: AddressesFilter, - /// To address filter. - pub to_address: AddressesFilter, + /// To address filter. + pub to_address: AddressesFilter, } impl Filter { - /// Returns combinations of each address. - pub fn bloom_possibilities(&self) -> Vec { - self.to_address.with_blooms(self.from_address.blooms()) - } + /// Returns combinations of each address. + pub fn bloom_possibilities(&self) -> Vec { + self.to_address.with_blooms(self.from_address.blooms()) + } - /// Returns true if given trace matches the filter. - pub fn matches(&self, trace: &FlatTrace) -> bool { - match trace.action { - Action::Call(ref call) => { - let from_matches = self.from_address.matches(&call.from); - let to_matches = self.to_address.matches(&call.to); - from_matches && to_matches - }, - Action::Create(ref create) => { - let from_matches = self.from_address.matches(&create.from); + /// Returns true if given trace matches the filter. + pub fn matches(&self, trace: &FlatTrace) -> bool { + match trace.action { + Action::Call(ref call) => { + let from_matches = self.from_address.matches(&call.from); + let to_matches = self.to_address.matches(&call.to); + from_matches && to_matches + } + Action::Create(ref create) => { + let from_matches = self.from_address.matches(&create.from); - let to_matches = match trace.result { - Res::Create(ref create_result) => self.to_address.matches(&create_result.address), - _ => self.to_address.matches_all(), - }; + let to_matches = match trace.result { + Res::Create(ref create_result) => { + self.to_address.matches(&create_result.address) + } + _ => self.to_address.matches_all(), + }; - from_matches && to_matches - }, - Action::Suicide(ref suicide) => { - let from_matches = self.from_address.matches(&suicide.address); - let to_matches = self.to_address.matches(&suicide.refund_address); - from_matches && to_matches - }, - Action::Reward(ref reward) => { - self.from_address.matches_all() && self.to_address.matches(&reward.author) - }, - } - } + from_matches && to_matches + } + Action::Suicide(ref suicide) => { + let from_matches = self.from_address.matches(&suicide.address); + let to_matches = self.to_address.matches(&suicide.refund_address); + from_matches && to_matches + } + Action::Reward(ref reward) => { + self.from_address.matches_all() && self.to_address.matches(&reward.author) + } + } + } } #[cfg(test)] mod tests { - use ethereum_types::{Address, Bloom, BloomInput}; - use trace::trace::{Action, Call, Res, Create, CreateResult, Suicide, Reward}; - use trace::flat::FlatTrace; - use trace::{Filter, AddressesFilter, TraceError, RewardType}; - use evm::CallType; + use ethereum_types::{Address, Bloom, BloomInput}; + use evm::CallType; + use trace::{ + flat::FlatTrace, + trace::{Action, Call, Create, CreateResult, Res, Reward, Suicide}, + AddressesFilter, Filter, RewardType, TraceError, + }; - #[test] - fn empty_trace_filter_bloom_possibilities() { - let filter = Filter { - range: (0..0), - from_address: AddressesFilter::from(vec![]), - to_address: AddressesFilter::from(vec![]), - }; + #[test] + fn empty_trace_filter_bloom_possibilities() { + let filter = Filter { + range: (0..0), + from_address: AddressesFilter::from(vec![]), + to_address: AddressesFilter::from(vec![]), + }; - let blooms = filter.bloom_possibilities(); - assert_eq!(blooms, vec![Bloom::default()]); - } + let blooms = filter.bloom_possibilities(); + assert_eq!(blooms, vec![Bloom::default()]); + } - #[test] - fn single_trace_filter_bloom_possibility() { - let filter = Filter { - range: (0..0), - from_address: AddressesFilter::from(vec![Address::from(1)]), - to_address: AddressesFilter::from(vec![Address::from(2)]), - }; + #[test] + fn single_trace_filter_bloom_possibility() { + let filter = Filter { + range: (0..0), + from_address: AddressesFilter::from(vec![Address::from(1)]), + to_address: AddressesFilter::from(vec![Address::from(2)]), + }; - let blooms = filter.bloom_possibilities(); - assert_eq!(blooms.len(), 1); + let blooms = filter.bloom_possibilities(); + assert_eq!(blooms.len(), 1); - assert!(blooms[0].contains_input(BloomInput::Raw(&Address::from(1)))); - assert!(blooms[0].contains_input(BloomInput::Raw(&Address::from(2)))); - assert!(!blooms[0].contains_input(BloomInput::Raw(&Address::from(3)))); - } + assert!(blooms[0].contains_input(BloomInput::Raw(&Address::from(1)))); + assert!(blooms[0].contains_input(BloomInput::Raw(&Address::from(2)))); + assert!(!blooms[0].contains_input(BloomInput::Raw(&Address::from(3)))); + } - #[test] - fn only_from_trace_filter_bloom_possibility() { - let filter = Filter { - range: (0..0), - from_address: AddressesFilter::from(vec![Address::from(1)]), - to_address: AddressesFilter::from(vec![]), - }; + #[test] + fn only_from_trace_filter_bloom_possibility() { + let filter = Filter { + range: (0..0), + from_address: AddressesFilter::from(vec![Address::from(1)]), + to_address: AddressesFilter::from(vec![]), + }; - let blooms = filter.bloom_possibilities(); - assert_eq!(blooms.len(), 1); + let blooms = filter.bloom_possibilities(); + assert_eq!(blooms.len(), 1); - assert!(blooms[0].contains_input(BloomInput::Raw(&Address::from(1)))); - assert!(!blooms[0].contains_input(BloomInput::Raw(&Address::from(2)))); - } + assert!(blooms[0].contains_input(BloomInput::Raw(&Address::from(1)))); + assert!(!blooms[0].contains_input(BloomInput::Raw(&Address::from(2)))); + } - #[test] - fn only_to_trace_filter_bloom_possibility() { - let filter = Filter { - range: (0..0), - from_address: AddressesFilter::from(vec![]), - to_address: AddressesFilter::from(vec![Address::from(1)]), - }; + #[test] + fn only_to_trace_filter_bloom_possibility() { + let filter = Filter { + range: (0..0), + from_address: AddressesFilter::from(vec![]), + to_address: AddressesFilter::from(vec![Address::from(1)]), + }; - let blooms = filter.bloom_possibilities(); - assert_eq!(blooms.len(), 1); + let blooms = filter.bloom_possibilities(); + assert_eq!(blooms.len(), 1); - assert!(blooms[0].contains_input(BloomInput::Raw(&Address::from(1)))); - assert!(!blooms[0].contains_input(BloomInput::Raw(&Address::from(2)))); - } + assert!(blooms[0].contains_input(BloomInput::Raw(&Address::from(1)))); + assert!(!blooms[0].contains_input(BloomInput::Raw(&Address::from(2)))); + } - #[test] - fn multiple_trace_filter_bloom_possibility() { - let filter = Filter { - range: (0..0), - from_address: AddressesFilter::from(vec![Address::from(1), Address::from(3)]), - to_address: AddressesFilter::from(vec![Address::from(2), Address::from(4)]), - }; + #[test] + fn multiple_trace_filter_bloom_possibility() { + let filter = Filter { + range: (0..0), + from_address: AddressesFilter::from(vec![Address::from(1), Address::from(3)]), + to_address: AddressesFilter::from(vec![Address::from(2), Address::from(4)]), + }; - let blooms = filter.bloom_possibilities(); - assert_eq!(blooms.len(), 4); + let blooms = filter.bloom_possibilities(); + assert_eq!(blooms.len(), 4); - assert!(blooms[0].contains_input(BloomInput::Raw(&Address::from(1)))); - assert!(blooms[0].contains_input(BloomInput::Raw(&Address::from(2)))); - assert!(!blooms[0].contains_input(BloomInput::Raw(&Address::from(3)))); - assert!(!blooms[0].contains_input(BloomInput::Raw(&Address::from(4)))); + assert!(blooms[0].contains_input(BloomInput::Raw(&Address::from(1)))); + assert!(blooms[0].contains_input(BloomInput::Raw(&Address::from(2)))); + assert!(!blooms[0].contains_input(BloomInput::Raw(&Address::from(3)))); + assert!(!blooms[0].contains_input(BloomInput::Raw(&Address::from(4)))); - assert!(blooms[1].contains_input(BloomInput::Raw(&Address::from(1)))); - assert!(blooms[1].contains_input(BloomInput::Raw(&Address::from(4)))); - assert!(!blooms[1].contains_input(BloomInput::Raw(&Address::from(2)))); - assert!(!blooms[1].contains_input(BloomInput::Raw(&Address::from(3)))); + assert!(blooms[1].contains_input(BloomInput::Raw(&Address::from(1)))); + assert!(blooms[1].contains_input(BloomInput::Raw(&Address::from(4)))); + assert!(!blooms[1].contains_input(BloomInput::Raw(&Address::from(2)))); + assert!(!blooms[1].contains_input(BloomInput::Raw(&Address::from(3)))); - assert!(blooms[2].contains_input(BloomInput::Raw(&Address::from(2)))); - assert!(blooms[2].contains_input(BloomInput::Raw(&Address::from(3)))); - assert!(!blooms[2].contains_input(BloomInput::Raw(&Address::from(1)))); - assert!(!blooms[2].contains_input(BloomInput::Raw(&Address::from(4)))); + assert!(blooms[2].contains_input(BloomInput::Raw(&Address::from(2)))); + assert!(blooms[2].contains_input(BloomInput::Raw(&Address::from(3)))); + assert!(!blooms[2].contains_input(BloomInput::Raw(&Address::from(1)))); + assert!(!blooms[2].contains_input(BloomInput::Raw(&Address::from(4)))); - assert!(blooms[3].contains_input(BloomInput::Raw(&Address::from(3)))); - assert!(blooms[3].contains_input(BloomInput::Raw(&Address::from(4)))); - assert!(!blooms[3].contains_input(BloomInput::Raw(&Address::from(1)))); - assert!(!blooms[3].contains_input(BloomInput::Raw(&Address::from(2)))); - } + assert!(blooms[3].contains_input(BloomInput::Raw(&Address::from(3)))); + assert!(blooms[3].contains_input(BloomInput::Raw(&Address::from(4)))); + assert!(!blooms[3].contains_input(BloomInput::Raw(&Address::from(1)))); + assert!(!blooms[3].contains_input(BloomInput::Raw(&Address::from(2)))); + } - #[test] - fn filter_matches() { - let f0 = Filter { - range: (0..0), - from_address: AddressesFilter::from(vec![Address::from(1)]), - to_address: AddressesFilter::from(vec![]), - }; + #[test] + fn filter_matches() { + let f0 = Filter { + range: (0..0), + from_address: AddressesFilter::from(vec![Address::from(1)]), + to_address: AddressesFilter::from(vec![]), + }; - let f1 = Filter { - range: (0..0), - from_address: AddressesFilter::from(vec![Address::from(3), Address::from(1)]), - to_address: AddressesFilter::from(vec![]), - }; + let f1 = Filter { + range: (0..0), + from_address: AddressesFilter::from(vec![Address::from(3), Address::from(1)]), + to_address: AddressesFilter::from(vec![]), + }; - let f2 = Filter { - range: (0..0), - from_address: AddressesFilter::from(vec![]), - to_address: AddressesFilter::from(vec![]), - }; + let f2 = Filter { + range: (0..0), + from_address: AddressesFilter::from(vec![]), + to_address: AddressesFilter::from(vec![]), + }; - let f3 = Filter { - range: (0..0), - from_address: AddressesFilter::from(vec![]), - to_address: AddressesFilter::from(vec![Address::from(2)]), - }; + let f3 = Filter { + range: (0..0), + from_address: AddressesFilter::from(vec![]), + to_address: AddressesFilter::from(vec![Address::from(2)]), + }; - let f4 = Filter { - range: (0..0), - from_address: AddressesFilter::from(vec![]), - to_address: AddressesFilter::from(vec![Address::from(2), Address::from(3)]), - }; + let f4 = Filter { + range: (0..0), + from_address: AddressesFilter::from(vec![]), + to_address: AddressesFilter::from(vec![Address::from(2), Address::from(3)]), + }; - let f5 = Filter { - range: (0..0), - from_address: AddressesFilter::from(vec![Address::from(1)]), - to_address: AddressesFilter::from(vec![Address::from(2), Address::from(3)]), - }; + let f5 = Filter { + range: (0..0), + from_address: AddressesFilter::from(vec![Address::from(1)]), + to_address: AddressesFilter::from(vec![Address::from(2), Address::from(3)]), + }; - let f6 = Filter { - range: (0..0), - from_address: AddressesFilter::from(vec![Address::from(1)]), - to_address: AddressesFilter::from(vec![Address::from(4)]), - }; + let f6 = Filter { + range: (0..0), + from_address: AddressesFilter::from(vec![Address::from(1)]), + to_address: AddressesFilter::from(vec![Address::from(4)]), + }; - let trace = FlatTrace { - action: Action::Call(Call { - from: 1.into(), - to: 2.into(), - value: 3.into(), - gas: 4.into(), - input: vec![0x5], - call_type: CallType::Call, - }), - result: Res::FailedCall(TraceError::OutOfGas), - trace_address: vec![0].into_iter().collect(), - subtraces: 0, - }; + let trace = FlatTrace { + action: Action::Call(Call { + from: 1.into(), + to: 2.into(), + value: 3.into(), + gas: 4.into(), + input: vec![0x5], + call_type: CallType::Call, + }), + result: Res::FailedCall(TraceError::OutOfGas), + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + }; - assert!(f0.matches(&trace)); - assert!(f1.matches(&trace)); - assert!(f2.matches(&trace)); - assert!(f3.matches(&trace)); - assert!(f4.matches(&trace)); - assert!(f5.matches(&trace)); - assert!(!f6.matches(&trace)); + assert!(f0.matches(&trace)); + assert!(f1.matches(&trace)); + assert!(f2.matches(&trace)); + assert!(f3.matches(&trace)); + assert!(f4.matches(&trace)); + assert!(f5.matches(&trace)); + assert!(!f6.matches(&trace)); - let trace = FlatTrace { - action: Action::Create(Create { - from: 1.into(), - value: 3.into(), - gas: 4.into(), - init: vec![0x5], - }), - result: Res::Create(CreateResult { - gas_used: 10.into(), - code: vec![], - address: 2.into(), - }), - trace_address: vec![0].into_iter().collect(), - subtraces: 0, - }; + let trace = FlatTrace { + action: Action::Create(Create { + from: 1.into(), + value: 3.into(), + gas: 4.into(), + init: vec![0x5], + }), + result: Res::Create(CreateResult { + gas_used: 10.into(), + code: vec![], + address: 2.into(), + }), + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + }; - assert!(f0.matches(&trace)); - assert!(f1.matches(&trace)); - assert!(f2.matches(&trace)); - assert!(f3.matches(&trace)); - assert!(f4.matches(&trace)); - assert!(f5.matches(&trace)); - assert!(!f6.matches(&trace)); + assert!(f0.matches(&trace)); + assert!(f1.matches(&trace)); + assert!(f2.matches(&trace)); + assert!(f3.matches(&trace)); + assert!(f4.matches(&trace)); + assert!(f5.matches(&trace)); + assert!(!f6.matches(&trace)); - let trace = FlatTrace { - action: Action::Suicide(Suicide { - address: 1.into(), - refund_address: 2.into(), - balance: 3.into(), - }), - result: Res::None, - trace_address: vec![].into_iter().collect(), - subtraces: 0 - }; + let trace = FlatTrace { + action: Action::Suicide(Suicide { + address: 1.into(), + refund_address: 2.into(), + balance: 3.into(), + }), + result: Res::None, + trace_address: vec![].into_iter().collect(), + subtraces: 0, + }; - assert!(f0.matches(&trace)); - assert!(f1.matches(&trace)); - assert!(f2.matches(&trace)); - assert!(f3.matches(&trace)); - assert!(f4.matches(&trace)); - assert!(f5.matches(&trace)); - assert!(!f6.matches(&trace)); + assert!(f0.matches(&trace)); + assert!(f1.matches(&trace)); + assert!(f2.matches(&trace)); + assert!(f3.matches(&trace)); + assert!(f4.matches(&trace)); + assert!(f5.matches(&trace)); + assert!(!f6.matches(&trace)); - let trace = FlatTrace { - action: Action::Reward(Reward { - author: 2.into(), - value: 100.into(), - reward_type: RewardType::Block, - }), - result: Res::None, - trace_address: vec![].into_iter().collect(), - subtraces: 0 - }; + let trace = FlatTrace { + action: Action::Reward(Reward { + author: 2.into(), + value: 100.into(), + reward_type: RewardType::Block, + }), + result: Res::None, + trace_address: vec![].into_iter().collect(), + subtraces: 0, + }; - assert!(!f0.matches(&trace)); - assert!(!f1.matches(&trace)); - assert!(f2.matches(&trace)); - assert!(f3.matches(&trace)); - assert!(f4.matches(&trace)); - assert!(!f5.matches(&trace)); - assert!(!f6.matches(&trace)); - } + assert!(!f0.matches(&trace)); + assert!(!f1.matches(&trace)); + assert!(f2.matches(&trace)); + assert!(f3.matches(&trace)); + assert!(f4.matches(&trace)); + assert!(!f5.matches(&trace)); + assert!(!f6.matches(&trace)); + } - #[test] - fn filter_match_block_reward_fix_8070() { - let f0 = Filter { - range: (0..0), - from_address: vec![1.into()].into(), - to_address: vec![].into(), - }; + #[test] + fn filter_match_block_reward_fix_8070() { + let f0 = Filter { + range: (0..0), + from_address: vec![1.into()].into(), + to_address: vec![].into(), + }; - let f1 = Filter { - range: (0..0), - from_address: vec![].into(), - to_address: vec![].into(), - }; + let f1 = Filter { + range: (0..0), + from_address: vec![].into(), + to_address: vec![].into(), + }; - let f2 = Filter { - range: (0..0), - from_address: vec![].into(), - to_address: vec![2.into()].into(), - }; + let f2 = Filter { + range: (0..0), + from_address: vec![].into(), + to_address: vec![2.into()].into(), + }; - let trace = FlatTrace { - action: Action::Reward(Reward { - author: 2.into(), - value: 10.into(), - reward_type: RewardType::Block, - }), - result: Res::None, - trace_address: vec![0].into_iter().collect(), - subtraces: 0, - }; + let trace = FlatTrace { + action: Action::Reward(Reward { + author: 2.into(), + value: 10.into(), + reward_type: RewardType::Block, + }), + result: Res::None, + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + }; - assert!(!f0.matches(&trace)); - assert!(f1.matches(&trace)); - assert!(f2.matches(&trace)); - } + assert!(!f0.matches(&trace)); + assert!(f1.matches(&trace)); + assert!(f2.matches(&trace)); + } - #[test] - fn filter_match_failed_contract_creation_fix_9822() { + #[test] + fn filter_match_failed_contract_creation_fix_9822() { + let f0 = Filter { + range: (0..0), + from_address: vec![1.into()].into(), + to_address: vec![].into(), + }; - let f0 = Filter { - range: (0..0), - from_address: vec![1.into()].into(), - to_address: vec![].into(), - }; + let f1 = Filter { + range: (0..0), + from_address: vec![].into(), + to_address: vec![].into(), + }; - let f1 = Filter { - range: (0..0), - from_address: vec![].into(), - to_address: vec![].into(), - }; + let f2 = Filter { + range: (0..0), + from_address: vec![].into(), + to_address: vec![2.into()].into(), + }; - let f2 = Filter { - range: (0..0), - from_address: vec![].into(), - to_address: vec![2.into()].into(), - }; - - let trace = FlatTrace { - action: Action::Create(Create { - from: 1.into(), - gas: 4.into(), - init: vec![0x5], - value: 3.into(), - }), - result: Res::FailedCall(TraceError::BadInstruction), - trace_address: vec![].into_iter().collect(), - subtraces: 0 - }; - - assert!(f0.matches(&trace)); - assert!(f1.matches(&trace)); - assert!(!f2.matches(&trace)); - } + let trace = FlatTrace { + action: Action::Create(Create { + from: 1.into(), + gas: 4.into(), + init: vec![0x5], + value: 3.into(), + }), + result: Res::FailedCall(TraceError::BadInstruction), + trace_address: vec![].into_iter().collect(), + subtraces: 0, + }; + assert!(f0.matches(&trace)); + assert!(f1.matches(&trace)); + assert!(!f2.matches(&trace)); + } } - diff --git a/ethcore/src/trace/types/flat.rs b/ethcore/src/trace/types/flat.rs index cb3e1229b..4810c42e9 100644 --- a/ethcore/src/trace/types/flat.rs +++ b/ethcore/src/trace/types/flat.rs @@ -16,63 +16,63 @@ //! Flat trace module -use rlp::{Rlp, RlpStream, Decodable, Encodable, DecoderError}; -use heapsize::HeapSizeOf; -use ethereum_types::Bloom; use super::trace::{Action, Res}; +use ethereum_types::Bloom; +use heapsize::HeapSizeOf; +use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; /// Trace localized in vector of traces produced by a single transaction. /// /// Parent and children indexes refer to positions in this vector. #[derive(Debug, PartialEq, Clone)] pub struct FlatTrace { - /// Type of action performed by a transaction. - pub action: Action, - /// Result of this action. - pub result: Res, - /// Number of subtraces. - pub subtraces: usize, - /// Exact location of trace. - /// - /// [index in root, index in first CALL, index in second CALL, ...] - pub trace_address: Vec, + /// Type of action performed by a transaction. + pub action: Action, + /// Result of this action. + pub result: Res, + /// Number of subtraces. + pub subtraces: usize, + /// Exact location of trace. + /// + /// [index in root, index in first CALL, index in second CALL, ...] + pub trace_address: Vec, } impl FlatTrace { - /// Returns bloom of the trace. - pub fn bloom(&self) -> Bloom { - self.action.bloom() | self.result.bloom() - } + /// Returns bloom of the trace. + pub fn bloom(&self) -> Bloom { + self.action.bloom() | self.result.bloom() + } } impl HeapSizeOf for FlatTrace { - fn heap_size_of_children(&self) -> usize { - self.trace_address.heap_size_of_children() - } + fn heap_size_of_children(&self) -> usize { + self.trace_address.heap_size_of_children() + } } impl Encodable for FlatTrace { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(4); - s.append(&self.action); - s.append(&self.result); - s.append(&self.subtraces); - s.append_list::(&self.trace_address.iter().collect::>()); - } + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(4); + s.append(&self.action); + s.append(&self.result); + s.append(&self.subtraces); + s.append_list::(&self.trace_address.iter().collect::>()); + } } impl Decodable for FlatTrace { - fn decode(d: &Rlp) -> Result { - let v: Vec = d.list_at(3)?; - let res = FlatTrace { - action: d.val_at(0)?, - result: d.val_at(1)?, - subtraces: d.val_at(2)?, - trace_address: v.into_iter().collect(), - }; + fn decode(d: &Rlp) -> Result { + let v: Vec = d.list_at(3)?; + let res = FlatTrace { + action: d.val_at(0)?, + result: d.val_at(1)?, + subtraces: d.val_at(2)?, + trace_address: v.into_iter().collect(), + }; - Ok(res) - } + Ok(res) + } } /// Represents all traces produced by a single transaction. @@ -80,28 +80,30 @@ impl Decodable for FlatTrace { pub struct FlatTransactionTraces(Vec); impl From> for FlatTransactionTraces { - fn from(v: Vec) -> Self { - FlatTransactionTraces(v) - } + fn from(v: Vec) -> Self { + FlatTransactionTraces(v) + } } impl HeapSizeOf for FlatTransactionTraces { - fn heap_size_of_children(&self) -> usize { - self.0.heap_size_of_children() - } + fn heap_size_of_children(&self) -> usize { + self.0.heap_size_of_children() + } } impl FlatTransactionTraces { - /// Returns bloom of all traces in the collection. - pub fn bloom(&self) -> Bloom { - self.0.iter().fold(Default::default(), | bloom, trace | bloom | trace.bloom()) - } + /// Returns bloom of all traces in the collection. + pub fn bloom(&self) -> Bloom { + self.0 + .iter() + .fold(Default::default(), |bloom, trace| bloom | trace.bloom()) + } } impl Into> for FlatTransactionTraces { - fn into(self) -> Vec { - self.0 - } + fn into(self) -> Vec { + self.0 + } } /// Represents all traces produced by transactions in a single block. @@ -109,141 +111,145 @@ impl Into> for FlatTransactionTraces { pub struct FlatBlockTraces(Vec); impl HeapSizeOf for FlatBlockTraces { - fn heap_size_of_children(&self) -> usize { - self.0.heap_size_of_children() - } + fn heap_size_of_children(&self) -> usize { + self.0.heap_size_of_children() + } } impl From> for FlatBlockTraces { - fn from(v: Vec) -> Self { - FlatBlockTraces(v) - } + fn from(v: Vec) -> Self { + FlatBlockTraces(v) + } } impl FlatBlockTraces { - /// Returns bloom of all traces in the block. - pub fn bloom(&self) -> Bloom { - self.0.iter().fold(Default::default(), | bloom, tx_traces | bloom | tx_traces.bloom()) - } + /// Returns bloom of all traces in the block. + pub fn bloom(&self) -> Bloom { + self.0.iter().fold(Default::default(), |bloom, tx_traces| { + bloom | tx_traces.bloom() + }) + } } impl Into> for FlatBlockTraces { - fn into(self) -> Vec { - self.0 - } + fn into(self) -> Vec { + self.0 + } } #[cfg(test)] mod tests { - use rlp::*; - use super::{FlatBlockTraces, FlatTransactionTraces, FlatTrace}; - use trace::trace::{Action, Res, CallResult, Call, Suicide, Reward}; - use evm::CallType; - use trace::RewardType; + use super::{FlatBlockTraces, FlatTrace, FlatTransactionTraces}; + use evm::CallType; + use rlp::*; + use trace::{ + trace::{Action, Call, CallResult, Res, Reward, Suicide}, + RewardType, + }; - #[test] - fn encode_flat_transaction_traces() { - let ftt = FlatTransactionTraces::from(Vec::new()); + #[test] + fn encode_flat_transaction_traces() { + let ftt = FlatTransactionTraces::from(Vec::new()); - let mut s = RlpStream::new_list(2); - s.append(&ftt); - assert!(!s.is_finished(), "List shouldn't finished yet"); - s.append(&ftt); - assert!(s.is_finished(), "List should be finished now"); - s.out(); - } + let mut s = RlpStream::new_list(2); + s.append(&ftt); + assert!(!s.is_finished(), "List shouldn't finished yet"); + s.append(&ftt); + assert!(s.is_finished(), "List should be finished now"); + s.out(); + } - #[test] - fn encode_flat_block_traces() { - let fbt = FlatBlockTraces::from(Vec::new()); + #[test] + fn encode_flat_block_traces() { + let fbt = FlatBlockTraces::from(Vec::new()); - let mut s = RlpStream::new_list(2); - s.append(&fbt); - assert!(!s.is_finished(), "List shouldn't finished yet"); - s.append(&fbt); - assert!(s.is_finished(), "List should be finished now"); - s.out(); - } + let mut s = RlpStream::new_list(2); + s.append(&fbt); + assert!(!s.is_finished(), "List shouldn't finished yet"); + s.append(&fbt); + assert!(s.is_finished(), "List should be finished now"); + s.out(); + } - #[test] - fn test_trace_serialization() { - // block #51921 + #[test] + fn test_trace_serialization() { + // block #51921 - let flat_trace = FlatTrace { - action: Action::Call(Call { - from: "8dda5e016e674683241bf671cced51e7239ea2bc".parse().unwrap(), - to: "37a5e19cc2d49f244805d5c268c0e6f321965ab9".parse().unwrap(), - value: "3627e8f712373c0000".parse().unwrap(), - gas: 0x03e8.into(), - input: vec![], - call_type: CallType::Call, - }), - result: Res::Call(CallResult { - gas_used: 0.into(), - output: vec![], - }), - trace_address: Default::default(), - subtraces: 0, - }; + let flat_trace = FlatTrace { + action: Action::Call(Call { + from: "8dda5e016e674683241bf671cced51e7239ea2bc".parse().unwrap(), + to: "37a5e19cc2d49f244805d5c268c0e6f321965ab9".parse().unwrap(), + value: "3627e8f712373c0000".parse().unwrap(), + gas: 0x03e8.into(), + input: vec![], + call_type: CallType::Call, + }), + result: Res::Call(CallResult { + gas_used: 0.into(), + output: vec![], + }), + trace_address: Default::default(), + subtraces: 0, + }; - let flat_trace1 = FlatTrace { - action: Action::Call(Call { - from: "3d0768da09ce77d25e2d998e6a7b6ed4b9116c2d".parse().unwrap(), - to: "412fda7643b37d436cb40628f6dbbb80a07267ed".parse().unwrap(), - value: 0.into(), - gas: 0x010c78.into(), - input: vec![0x41, 0xc0, 0xe1, 0xb5], - call_type: CallType::Call, - }), - result: Res::Call(CallResult { - gas_used: 0x0127.into(), - output: vec![], - }), - trace_address: Default::default(), - subtraces: 1, - }; + let flat_trace1 = FlatTrace { + action: Action::Call(Call { + from: "3d0768da09ce77d25e2d998e6a7b6ed4b9116c2d".parse().unwrap(), + to: "412fda7643b37d436cb40628f6dbbb80a07267ed".parse().unwrap(), + value: 0.into(), + gas: 0x010c78.into(), + input: vec![0x41, 0xc0, 0xe1, 0xb5], + call_type: CallType::Call, + }), + result: Res::Call(CallResult { + gas_used: 0x0127.into(), + output: vec![], + }), + trace_address: Default::default(), + subtraces: 1, + }; - let flat_trace2 = FlatTrace { - action: Action::Suicide(Suicide { - address: "412fda7643b37d436cb40628f6dbbb80a07267ed".parse().unwrap(), - balance: 0.into(), - refund_address: "3d0768da09ce77d25e2d998e6a7b6ed4b9116c2d".parse().unwrap(), - }), - result: Res::None, - trace_address: vec![0].into_iter().collect(), - subtraces: 0, - }; + let flat_trace2 = FlatTrace { + action: Action::Suicide(Suicide { + address: "412fda7643b37d436cb40628f6dbbb80a07267ed".parse().unwrap(), + balance: 0.into(), + refund_address: "3d0768da09ce77d25e2d998e6a7b6ed4b9116c2d".parse().unwrap(), + }), + result: Res::None, + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + }; - let flat_trace3 = FlatTrace { - action: Action::Reward(Reward { - author: "412fda7643b37d436cb40628f6dbbb80a07267ed".parse().unwrap(), - value: 10.into(), - reward_type: RewardType::Uncle, - }), - result: Res::None, - trace_address: vec![0].into_iter().collect(), - subtraces: 0, - }; + let flat_trace3 = FlatTrace { + action: Action::Reward(Reward { + author: "412fda7643b37d436cb40628f6dbbb80a07267ed".parse().unwrap(), + value: 10.into(), + reward_type: RewardType::Uncle, + }), + result: Res::None, + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + }; - let flat_trace4 = FlatTrace { - action: Action::Reward(Reward { - author: "412fda7643b37d436cb40628f6dbbb80a07267ed".parse().unwrap(), - value: 10.into(), - reward_type: RewardType::Block, - }), - result: Res::None, - trace_address: vec![0].into_iter().collect(), - subtraces: 0, - }; + let flat_trace4 = FlatTrace { + action: Action::Reward(Reward { + author: "412fda7643b37d436cb40628f6dbbb80a07267ed".parse().unwrap(), + value: 10.into(), + reward_type: RewardType::Block, + }), + result: Res::None, + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + }; - let block_traces = FlatBlockTraces(vec![ - FlatTransactionTraces(vec![flat_trace]), - FlatTransactionTraces(vec![flat_trace1, flat_trace2]), - FlatTransactionTraces(vec![flat_trace3, flat_trace4]) - ]); + let block_traces = FlatBlockTraces(vec![ + FlatTransactionTraces(vec![flat_trace]), + FlatTransactionTraces(vec![flat_trace1, flat_trace2]), + FlatTransactionTraces(vec![flat_trace3, flat_trace4]), + ]); - let encoded = ::rlp::encode(&block_traces); - let decoded = ::rlp::decode(&encoded).expect("error decoding block traces"); - assert_eq!(block_traces, decoded); - } + let encoded = ::rlp::encode(&block_traces); + let decoded = ::rlp::decode(&encoded).expect("error decoding block traces"); + assert_eq!(block_traces, decoded); + } } diff --git a/ethcore/src/trace/types/localized.rs b/ethcore/src/trace/types/localized.rs index 330d23a72..bbcd69702 100644 --- a/ethcore/src/trace/types/localized.rs +++ b/ethcore/src/trace/types/localized.rs @@ -16,29 +16,29 @@ //! Localized traces type definitions -use ethereum_types::H256; use super::trace::{Action, Res}; +use ethereum_types::H256; use types::BlockNumber; /// Localized trace. #[derive(Debug, PartialEq, Clone)] pub struct LocalizedTrace { - /// Type of action performed by a transaction. - pub action: Action, - /// Result of this action. - pub result: Res, - /// Number of subtraces. - pub subtraces: usize, - /// Exact location of trace. - /// - /// [index in root, index in first CALL, index in second CALL, ...] - pub trace_address: Vec, - /// Transaction number within the block. - pub transaction_number: Option, - /// Signed transaction hash. - pub transaction_hash: Option, - /// Block number. - pub block_number: BlockNumber, - /// Block hash. - pub block_hash: H256, + /// Type of action performed by a transaction. + pub action: Action, + /// Result of this action. + pub result: Res, + /// Number of subtraces. + pub subtraces: usize, + /// Exact location of trace. + /// + /// [index in root, index in first CALL, index in second CALL, ...] + pub trace_address: Vec, + /// Transaction number within the block. + pub transaction_number: Option, + /// Signed transaction hash. + pub transaction_hash: Option, + /// Block number. + pub block_number: BlockNumber, + /// Block hash. + pub block_hash: H256, } diff --git a/ethcore/src/trace/types/mod.rs b/ethcore/src/trace/types/mod.rs index c1ef3ac1a..ac4888b55 100644 --- a/ethcore/src/trace/types/mod.rs +++ b/ethcore/src/trace/types/mod.rs @@ -19,39 +19,39 @@ pub mod error; pub mod filter; pub mod flat; -pub mod trace; pub mod localized; +pub mod trace; use self::flat::FlatTransactionTraces; /// Container for block traces. #[derive(Clone)] pub enum Tracing { - /// This variant should be used when tracing is enabled. - Enabled(Vec), - /// Tracing is disabled. - Disabled, + /// This variant should be used when tracing is enabled. + Enabled(Vec), + /// Tracing is disabled. + Disabled, } impl Tracing { - /// Creates new instance of enabled tracing object. - pub fn enabled() -> Self { - Tracing::Enabled(Default::default()) - } + /// Creates new instance of enabled tracing object. + pub fn enabled() -> Self { + Tracing::Enabled(Default::default()) + } - /// Returns true if tracing is enabled. - pub fn is_enabled(&self) -> bool { - match *self { - Tracing::Enabled(_) => true, - Tracing::Disabled => false, - } - } + /// Returns true if tracing is enabled. + pub fn is_enabled(&self) -> bool { + match *self { + Tracing::Enabled(_) => true, + Tracing::Disabled => false, + } + } - /// Drain all traces. - pub fn drain(self) -> Vec { - match self { - Tracing::Enabled(traces) => traces, - Tracing::Disabled => vec![], - } - } + /// Drain all traces. + pub fn drain(self) -> Vec { + match self { + Tracing::Enabled(traces) => traces, + Tracing::Disabled => vec![], + } + } } diff --git a/ethcore/src/trace/types/trace.rs b/ethcore/src/trace/types/trace.rs index 16084e94c..93ea25b80 100644 --- a/ethcore/src/trace/types/trace.rs +++ b/ethcore/src/trace/types/trace.rs @@ -16,416 +16,419 @@ //! Tracing datatypes. -use ethereum_types::{U256, Address, Bloom, BloomInput}; use bytes::Bytes; -use rlp::{Rlp, RlpStream, Encodable, DecoderError, Decodable}; +use ethereum_types::{Address, Bloom, BloomInput, U256}; +use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; -use vm::ActionParams; -use evm::CallType; use super::error::Error; +use evm::CallType; +use vm::ActionParams; /// `Call` result. #[derive(Debug, Clone, PartialEq, Default, RlpEncodable, RlpDecodable)] pub struct CallResult { - /// Gas used by call. - pub gas_used: U256, - /// Call Output. - pub output: Bytes, + /// Gas used by call. + pub gas_used: U256, + /// Call Output. + pub output: Bytes, } /// `Create` result. #[derive(Debug, Clone, PartialEq, RlpEncodable, RlpDecodable)] pub struct CreateResult { - /// Gas used by create. - pub gas_used: U256, - /// Code of the newly created contract. - pub code: Bytes, - /// Address of the newly created contract. - pub address: Address, + /// Gas used by create. + pub gas_used: U256, + /// Code of the newly created contract. + pub code: Bytes, + /// Address of the newly created contract. + pub address: Address, } impl CreateResult { - /// Returns bloom. - pub fn bloom(&self) -> Bloom { - BloomInput::Raw(&self.address).into() - } + /// Returns bloom. + pub fn bloom(&self) -> Bloom { + BloomInput::Raw(&self.address).into() + } } /// Description of a _call_ action, either a `CALL` operation or a message transction. #[derive(Debug, Clone, PartialEq, RlpEncodable, RlpDecodable)] pub struct Call { - /// The sending account. - pub from: Address, - /// The destination account. - pub to: Address, - /// The value transferred to the destination account. - pub value: U256, - /// The gas available for executing the call. - pub gas: U256, - /// The input data provided to the call. - pub input: Bytes, - /// The type of the call. - pub call_type: CallType, + /// The sending account. + pub from: Address, + /// The destination account. + pub to: Address, + /// The value transferred to the destination account. + pub value: U256, + /// The gas available for executing the call. + pub gas: U256, + /// The input data provided to the call. + pub input: Bytes, + /// The type of the call. + pub call_type: CallType, } impl From for Call { - fn from(p: ActionParams) -> Self { - match p.call_type { - CallType::DelegateCall | CallType::CallCode => Call { - from: p.address, - to: p.code_address, - value: p.value.value(), - gas: p.gas, - input: p.data.unwrap_or_else(Vec::new), - call_type: p.call_type, - }, - _ => Call { - from: p.sender, - to: p.address, - value: p.value.value(), - gas: p.gas, - input: p.data.unwrap_or_else(Vec::new), - call_type: p.call_type, - }, - } - } + fn from(p: ActionParams) -> Self { + match p.call_type { + CallType::DelegateCall | CallType::CallCode => Call { + from: p.address, + to: p.code_address, + value: p.value.value(), + gas: p.gas, + input: p.data.unwrap_or_else(Vec::new), + call_type: p.call_type, + }, + _ => Call { + from: p.sender, + to: p.address, + value: p.value.value(), + gas: p.gas, + input: p.data.unwrap_or_else(Vec::new), + call_type: p.call_type, + }, + } + } } impl Call { - /// Returns call action bloom. - /// The bloom contains from and to addresses. - pub fn bloom(&self) -> Bloom { - let mut bloom = Bloom::default(); - bloom.accrue(BloomInput::Raw(&self.from)); - bloom.accrue(BloomInput::Raw(&self.to)); - bloom - } + /// Returns call action bloom. + /// The bloom contains from and to addresses. + pub fn bloom(&self) -> Bloom { + let mut bloom = Bloom::default(); + bloom.accrue(BloomInput::Raw(&self.from)); + bloom.accrue(BloomInput::Raw(&self.to)); + bloom + } } /// Description of a _create_ action, either a `CREATE` operation or a create transction. #[derive(Debug, Clone, PartialEq, RlpEncodable, RlpDecodable)] pub struct Create { - /// The address of the creator. - pub from: Address, - /// The value with which the new account is endowed. - pub value: U256, - /// The gas available for the creation init code. - pub gas: U256, - /// The init code. - pub init: Bytes, + /// The address of the creator. + pub from: Address, + /// The value with which the new account is endowed. + pub value: U256, + /// The gas available for the creation init code. + pub gas: U256, + /// The init code. + pub init: Bytes, } impl From for Create { - fn from(p: ActionParams) -> Self { - Create { - from: p.sender, - value: p.value.value(), - gas: p.gas, - init: p.code.map_or_else(Vec::new, |c| (*c).clone()), - } - } + fn from(p: ActionParams) -> Self { + Create { + from: p.sender, + value: p.value.value(), + gas: p.gas, + init: p.code.map_or_else(Vec::new, |c| (*c).clone()), + } + } } impl Create { - /// Returns bloom create action bloom. - /// The bloom contains only from address. - pub fn bloom(&self) -> Bloom { - BloomInput::Raw(&self.from).into() - } + /// Returns bloom create action bloom. + /// The bloom contains only from address. + pub fn bloom(&self) -> Bloom { + BloomInput::Raw(&self.from).into() + } } /// Reward type. #[derive(Debug, PartialEq, Clone, Copy)] pub enum RewardType { - /// Block - Block, - /// Uncle - Uncle, - /// Empty step (AuthorityRound) - EmptyStep, - /// A reward directly attributed by an external protocol (e.g. block reward contract) - External, + /// Block + Block, + /// Uncle + Uncle, + /// Empty step (AuthorityRound) + EmptyStep, + /// A reward directly attributed by an external protocol (e.g. block reward contract) + External, } impl Encodable for RewardType { - fn rlp_append(&self, s: &mut RlpStream) { - let v = match *self { - RewardType::Block => 0u32, - RewardType::Uncle => 1, - RewardType::EmptyStep => 2, - RewardType::External => 3, - }; - Encodable::rlp_append(&v, s); - } + fn rlp_append(&self, s: &mut RlpStream) { + let v = match *self { + RewardType::Block => 0u32, + RewardType::Uncle => 1, + RewardType::EmptyStep => 2, + RewardType::External => 3, + }; + Encodable::rlp_append(&v, s); + } } impl Decodable for RewardType { - fn decode(rlp: &Rlp) -> Result { - rlp.as_val().and_then(|v| Ok(match v { - 0u32 => RewardType::Block, - 1 => RewardType::Uncle, - 2 => RewardType::EmptyStep, - 3 => RewardType::External, - _ => return Err(DecoderError::Custom("Invalid value of RewardType item")), - })) - } + fn decode(rlp: &Rlp) -> Result { + rlp.as_val().and_then(|v| { + Ok(match v { + 0u32 => RewardType::Block, + 1 => RewardType::Uncle, + 2 => RewardType::EmptyStep, + 3 => RewardType::External, + _ => return Err(DecoderError::Custom("Invalid value of RewardType item")), + }) + }) + } } /// Reward action #[derive(Debug, Clone, PartialEq)] pub struct Reward { - /// Author's address. - pub author: Address, - /// Reward amount. - pub value: U256, - /// Reward type. - pub reward_type: RewardType, + /// Author's address. + pub author: Address, + /// Reward amount. + pub value: U256, + /// Reward type. + pub reward_type: RewardType, } impl Reward { - /// Return reward action bloom. - pub fn bloom(&self) -> Bloom { - BloomInput::Raw(&self.author).into() - } + /// Return reward action bloom. + pub fn bloom(&self) -> Bloom { + BloomInput::Raw(&self.author).into() + } } impl Encodable for Reward { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(3); - s.append(&self.author); - s.append(&self.value); - s.append(&self.reward_type); - } + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(3); + s.append(&self.author); + s.append(&self.value); + s.append(&self.reward_type); + } } impl Decodable for Reward { - fn decode(rlp: &Rlp) -> Result { - let res = Reward { - author: rlp.val_at(0)?, - value: rlp.val_at(1)?, - reward_type: rlp.val_at(2)?, - }; + fn decode(rlp: &Rlp) -> Result { + let res = Reward { + author: rlp.val_at(0)?, + value: rlp.val_at(1)?, + reward_type: rlp.val_at(2)?, + }; - Ok(res) - } + Ok(res) + } } /// Suicide action. #[derive(Debug, Clone, PartialEq, RlpEncodable, RlpDecodable)] pub struct Suicide { - /// Suicided address. - pub address: Address, - /// Suicided contract heir. - pub refund_address: Address, - /// Balance of the contract just before suicide. - pub balance: U256, + /// Suicided address. + pub address: Address, + /// Suicided contract heir. + pub refund_address: Address, + /// Balance of the contract just before suicide. + pub balance: U256, } impl Suicide { - /// Return suicide action bloom. - pub fn bloom(&self) -> Bloom { - let mut bloom = Bloom::default(); - bloom.accrue(BloomInput::Raw(&self.address)); - bloom.accrue(BloomInput::Raw(&self.refund_address)); - bloom - } + /// Return suicide action bloom. + pub fn bloom(&self) -> Bloom { + let mut bloom = Bloom::default(); + bloom.accrue(BloomInput::Raw(&self.address)); + bloom.accrue(BloomInput::Raw(&self.refund_address)); + bloom + } } /// Description of an action that we trace; will be either a call or a create. #[derive(Debug, Clone, PartialEq)] pub enum Action { - /// It's a call action. - Call(Call), - /// It's a create action. - Create(Create), - /// Suicide. - Suicide(Suicide), - /// Reward - Reward(Reward), + /// It's a call action. + Call(Call), + /// It's a create action. + Create(Create), + /// Suicide. + Suicide(Suicide), + /// Reward + Reward(Reward), } impl Encodable for Action { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(2); - match *self { - Action::Call(ref call) => { - s.append(&0u8); - s.append(call); - }, - Action::Create(ref create) => { - s.append(&1u8); - s.append(create); - }, - Action::Suicide(ref suicide) => { - s.append(&2u8); - s.append(suicide); - }, - Action::Reward(ref reward) => { - s.append(&3u8); - s.append(reward); - } - - } - } + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2); + match *self { + Action::Call(ref call) => { + s.append(&0u8); + s.append(call); + } + Action::Create(ref create) => { + s.append(&1u8); + s.append(create); + } + Action::Suicide(ref suicide) => { + s.append(&2u8); + s.append(suicide); + } + Action::Reward(ref reward) => { + s.append(&3u8); + s.append(reward); + } + } + } } impl Decodable for Action { - fn decode(rlp: &Rlp) -> Result { - let action_type: u8 = rlp.val_at(0)?; - match action_type { - 0 => rlp.val_at(1).map(Action::Call), - 1 => rlp.val_at(1).map(Action::Create), - 2 => rlp.val_at(1).map(Action::Suicide), - 3 => rlp.val_at(1).map(Action::Reward), - _ => Err(DecoderError::Custom("Invalid action type.")), - } - } + fn decode(rlp: &Rlp) -> Result { + let action_type: u8 = rlp.val_at(0)?; + match action_type { + 0 => rlp.val_at(1).map(Action::Call), + 1 => rlp.val_at(1).map(Action::Create), + 2 => rlp.val_at(1).map(Action::Suicide), + 3 => rlp.val_at(1).map(Action::Reward), + _ => Err(DecoderError::Custom("Invalid action type.")), + } + } } impl Action { - /// Returns action bloom. - pub fn bloom(&self) -> Bloom { - match *self { - Action::Call(ref call) => call.bloom(), - Action::Create(ref create) => create.bloom(), - Action::Suicide(ref suicide) => suicide.bloom(), - Action::Reward(ref reward) => reward.bloom(), - } - } + /// Returns action bloom. + pub fn bloom(&self) -> Bloom { + match *self { + Action::Call(ref call) => call.bloom(), + Action::Create(ref create) => create.bloom(), + Action::Suicide(ref suicide) => suicide.bloom(), + Action::Reward(ref reward) => reward.bloom(), + } + } } /// The result of the performed action. #[derive(Debug, Clone, PartialEq)] pub enum Res { - /// Successful call action result. - Call(CallResult), - /// Successful create action result. - Create(CreateResult), - /// Failed call. - FailedCall(Error), - /// Failed create. - FailedCreate(Error), - /// None - None, + /// Successful call action result. + Call(CallResult), + /// Successful create action result. + Create(CreateResult), + /// Failed call. + FailedCall(Error), + /// Failed create. + FailedCreate(Error), + /// None + None, } impl Encodable for Res { - fn rlp_append(&self, s: &mut RlpStream) { - match *self { - Res::Call(ref call) => { - s.begin_list(2); - s.append(&0u8); - s.append(call); - }, - Res::Create(ref create) => { - s.begin_list(2); - s.append(&1u8); - s.append(create); - }, - Res::FailedCall(ref err) => { - s.begin_list(2); - s.append(&2u8); - s.append(err); - }, - Res::FailedCreate(ref err) => { - s.begin_list(2); - s.append(&3u8); - s.append(err); - }, - Res::None => { - s.begin_list(1); - s.append(&4u8); - } - } - } + fn rlp_append(&self, s: &mut RlpStream) { + match *self { + Res::Call(ref call) => { + s.begin_list(2); + s.append(&0u8); + s.append(call); + } + Res::Create(ref create) => { + s.begin_list(2); + s.append(&1u8); + s.append(create); + } + Res::FailedCall(ref err) => { + s.begin_list(2); + s.append(&2u8); + s.append(err); + } + Res::FailedCreate(ref err) => { + s.begin_list(2); + s.append(&3u8); + s.append(err); + } + Res::None => { + s.begin_list(1); + s.append(&4u8); + } + } + } } impl Decodable for Res { - fn decode(rlp: &Rlp) -> Result { - let action_type: u8 = rlp.val_at(0)?; - match action_type { - 0 => rlp.val_at(1).map(Res::Call), - 1 => rlp.val_at(1).map(Res::Create), - 2 => rlp.val_at(1).map(Res::FailedCall), - 3 => rlp.val_at(1).map(Res::FailedCreate), - 4 => Ok(Res::None), - _ => Err(DecoderError::Custom("Invalid result type.")), - } - } + fn decode(rlp: &Rlp) -> Result { + let action_type: u8 = rlp.val_at(0)?; + match action_type { + 0 => rlp.val_at(1).map(Res::Call), + 1 => rlp.val_at(1).map(Res::Create), + 2 => rlp.val_at(1).map(Res::FailedCall), + 3 => rlp.val_at(1).map(Res::FailedCreate), + 4 => Ok(Res::None), + _ => Err(DecoderError::Custom("Invalid result type.")), + } + } } impl Res { - /// Returns result bloom. - pub fn bloom(&self) -> Bloom { - match *self { - Res::Create(ref create) => create.bloom(), - Res::Call(_) | Res::FailedCall(_) | Res::FailedCreate(_) | Res::None => Default::default(), - } - } + /// Returns result bloom. + pub fn bloom(&self) -> Bloom { + match *self { + Res::Create(ref create) => create.bloom(), + Res::Call(_) | Res::FailedCall(_) | Res::FailedCreate(_) | Res::None => { + Default::default() + } + } + } - /// Did this call fail? - pub fn succeeded(&self) -> bool { - match *self { - Res::Call(_) | Res::Create(_) => true, - _ => false, - } - } + /// Did this call fail? + pub fn succeeded(&self) -> bool { + match *self { + Res::Call(_) | Res::Create(_) => true, + _ => false, + } + } } #[derive(Debug, Clone, PartialEq, RlpEncodable, RlpDecodable)] /// A diff of some chunk of memory. pub struct MemoryDiff { - /// Offset into memory the change begins. - pub offset: usize, - /// The changed data. - pub data: Bytes, + /// Offset into memory the change begins. + pub offset: usize, + /// The changed data. + pub data: Bytes, } #[derive(Debug, Clone, PartialEq, RlpEncodable, RlpDecodable)] /// A diff of some storage value. pub struct StorageDiff { - /// Which key in storage is changed. - pub location: U256, - /// What the value has been changed to. - pub value: U256, + /// Which key in storage is changed. + pub location: U256, + /// What the value has been changed to. + pub value: U256, } #[derive(Debug, Clone, PartialEq, RlpEncodable, RlpDecodable)] /// A record of an executed VM operation. pub struct VMExecutedOperation { - /// The total gas used. - pub gas_used: U256, - /// The stack item placed, if any. - pub stack_push: Vec, - /// If altered, the memory delta. - pub mem_diff: Option, - /// The altered storage value, if any. - pub store_diff: Option, + /// The total gas used. + pub gas_used: U256, + /// The stack item placed, if any. + pub stack_push: Vec, + /// If altered, the memory delta. + pub mem_diff: Option, + /// The altered storage value, if any. + pub store_diff: Option, } #[derive(Debug, Clone, PartialEq, Default, RlpEncodable, RlpDecodable)] /// A record of the execution of a single VM operation. pub struct VMOperation { - /// The program counter. - pub pc: usize, - /// The instruction executed. - pub instruction: u8, - /// The gas cost for this instruction. - pub gas_cost: U256, - /// Information concerning the execution of the operation. - pub executed: Option, + /// The program counter. + pub pc: usize, + /// The instruction executed. + pub instruction: u8, + /// The gas cost for this instruction. + pub gas_cost: U256, + /// Information concerning the execution of the operation. + pub executed: Option, } #[derive(Debug, Clone, PartialEq, Default, RlpEncodable, RlpDecodable)] /// A record of a full VM trace for a CALL/CREATE. pub struct VMTrace { - /// The step (i.e. index into operations) at which this trace corresponds. - pub parent_step: usize, - /// The code to be executed. - pub code: Bytes, - /// The operations executed. - pub operations: Vec, - /// The sub traces for each interior action performed as part of this call/create. - /// Thre is a 1:1 correspondance between these and a CALL/CREATE/CALLCODE/DELEGATECALL instruction. - pub subs: Vec, + /// The step (i.e. index into operations) at which this trace corresponds. + pub parent_step: usize, + /// The code to be executed. + pub code: Bytes, + /// The operations executed. + pub operations: Vec, + /// The sub traces for each interior action performed as part of this call/create. + /// Thre is a 1:1 correspondance between these and a CALL/CREATE/CALLCODE/DELEGATECALL instruction. + pub subs: Vec, } diff --git a/ethcore/src/transaction_ext.rs b/ethcore/src/transaction_ext.rs index fefcd91a3..8e14af84e 100644 --- a/ethcore/src/transaction_ext.rs +++ b/ethcore/src/transaction_ext.rs @@ -21,23 +21,36 @@ use types::transaction::{self, Action}; /// Extends transaction with gas verification method. pub trait Transaction { - /// Get the transaction cost in gas for this transaction. - fn gas_required(&self, schedule: &Schedule) -> u64; + /// Get the transaction cost in gas for this transaction. + fn gas_required(&self, schedule: &Schedule) -> u64; } impl Transaction for transaction::Transaction { - fn gas_required(&self, schedule: &Schedule) -> u64 { - gas_required_for(match self.action { - Action::Create => true, - Action::Call(_) => false - }, &self.data, schedule) - } + fn gas_required(&self, schedule: &Schedule) -> u64 { + gas_required_for( + match self.action { + Action::Create => true, + Action::Call(_) => false, + }, + &self.data, + schedule, + ) + } } /// Get the transaction cost in gas for the given params. fn gas_required_for(is_create: bool, data: &[u8], schedule: &Schedule) -> u64 { - data.iter().fold( - (if is_create {schedule.tx_create_gas} else {schedule.tx_gas}) as u64, - |g, b| g + (match *b { 0 => schedule.tx_data_zero_gas, _ => schedule.tx_data_non_zero_gas }) as u64 - ) + data.iter().fold( + (if is_create { + schedule.tx_create_gas + } else { + schedule.tx_gas + }) as u64, + |g, b| { + g + (match *b { + 0 => schedule.tx_data_zero_gas, + _ => schedule.tx_data_non_zero_gas, + }) as u64 + }, + ) } diff --git a/ethcore/src/tx_filter.rs b/ethcore/src/tx_filter.rs index 3f32ab365..1e3c45ec4 100644 --- a/ethcore/src/tx_filter.rs +++ b/ethcore/src/tx_filter.rs @@ -16,272 +16,531 @@ //! Smart contract based transaction filter. -use ethereum_types::{H256, U256, Address}; -use lru_cache::LruCache; use ethabi::FunctionOutputDecoder; +use ethereum_types::{Address, H256, U256}; +use lru_cache::LruCache; use call_contract::CallContract; -use client::{BlockInfo, BlockId}; +use client::{BlockId, BlockInfo}; +use hash::KECCAK_EMPTY; use parking_lot::Mutex; use spec::CommonParams; -use types::transaction::{Action, SignedTransaction}; -use types::BlockNumber; -use hash::KECCAK_EMPTY; +use types::{ + transaction::{Action, SignedTransaction}, + BlockNumber, +}; -use_contract!(transact_acl_deprecated, "res/contracts/tx_acl_deprecated.json"); +use_contract!( + transact_acl_deprecated, + "res/contracts/tx_acl_deprecated.json" +); use_contract!(transact_acl, "res/contracts/tx_acl.json"); const MAX_CACHE_SIZE: usize = 4096; mod tx_permissions { - pub const _ALL: u32 = 0xffffffff; - pub const NONE: u32 = 0x0; - pub const BASIC: u32 = 0b00000001; - pub const CALL: u32 = 0b00000010; - pub const CREATE: u32 = 0b00000100; - pub const _PRIVATE: u32 = 0b00001000; + pub const _ALL: u32 = 0xffffffff; + pub const NONE: u32 = 0x0; + pub const BASIC: u32 = 0b00000001; + pub const CALL: u32 = 0b00000010; + pub const CREATE: u32 = 0b00000100; + pub const _PRIVATE: u32 = 0b00001000; } /// Connection filter that uses a contract to manage permissions. pub struct TransactionFilter { - contract_address: Address, - transition_block: BlockNumber, - permission_cache: Mutex>, - contract_version_cache: Mutex>> + contract_address: Address, + transition_block: BlockNumber, + permission_cache: Mutex>, + contract_version_cache: Mutex>>, } impl TransactionFilter { - /// Create a new instance if address is specified in params. - pub fn from_params(params: &CommonParams) -> Option { - params.transaction_permission_contract.map(|address| - TransactionFilter { - contract_address: address, - transition_block: params.transaction_permission_contract_transition, - permission_cache: Mutex::new(LruCache::new(MAX_CACHE_SIZE)), - contract_version_cache: Mutex::new(LruCache::new(MAX_CACHE_SIZE)), - } - ) - } + /// Create a new instance if address is specified in params. + pub fn from_params(params: &CommonParams) -> Option { + params + .transaction_permission_contract + .map(|address| TransactionFilter { + contract_address: address, + transition_block: params.transaction_permission_contract_transition, + permission_cache: Mutex::new(LruCache::new(MAX_CACHE_SIZE)), + contract_version_cache: Mutex::new(LruCache::new(MAX_CACHE_SIZE)), + }) + } - /// Check if transaction is allowed at given block. - pub fn transaction_allowed(&self, parent_hash: &H256, block_number: BlockNumber, transaction: &SignedTransaction, client: &C) -> bool { - if block_number < self.transition_block { return true; } + /// Check if transaction is allowed at given block. + pub fn transaction_allowed( + &self, + parent_hash: &H256, + block_number: BlockNumber, + transaction: &SignedTransaction, + client: &C, + ) -> bool { + if block_number < self.transition_block { + return true; + } - let mut permission_cache = self.permission_cache.lock(); - let mut contract_version_cache = self.contract_version_cache.lock(); + let mut permission_cache = self.permission_cache.lock(); + let mut contract_version_cache = self.contract_version_cache.lock(); - let (tx_type, to) = match transaction.action { - Action::Create => (tx_permissions::CREATE, Address::new()), - Action::Call(address) => if client.code_hash(&address, BlockId::Hash(*parent_hash)).map_or(false, |c| c != KECCAK_EMPTY) { - (tx_permissions::CALL, address) - } else { - (tx_permissions::BASIC, address) - } - }; + let (tx_type, to) = match transaction.action { + Action::Create => (tx_permissions::CREATE, Address::new()), + Action::Call(address) => { + if client + .code_hash(&address, BlockId::Hash(*parent_hash)) + .map_or(false, |c| c != KECCAK_EMPTY) + { + (tx_permissions::CALL, address) + } else { + (tx_permissions::BASIC, address) + } + } + }; - let sender = transaction.sender(); - let value = transaction.value; - let key = (*parent_hash, sender); + let sender = transaction.sender(); + let value = transaction.value; + let key = (*parent_hash, sender); - if let Some(permissions) = permission_cache.get_mut(&key) { - return *permissions & tx_type != 0; - } + if let Some(permissions) = permission_cache.get_mut(&key) { + return *permissions & tx_type != 0; + } - let contract_address = self.contract_address; - let contract_version = contract_version_cache.get_mut(parent_hash).and_then(|v| *v).or_else(|| { - let (data, decoder) = transact_acl::functions::contract_version::call(); - decoder.decode(&client.call_contract(BlockId::Hash(*parent_hash), contract_address, data).ok()?).ok() - }); - contract_version_cache.insert(*parent_hash, contract_version); + let contract_address = self.contract_address; + let contract_version = contract_version_cache + .get_mut(parent_hash) + .and_then(|v| *v) + .or_else(|| { + let (data, decoder) = transact_acl::functions::contract_version::call(); + decoder + .decode( + &client + .call_contract(BlockId::Hash(*parent_hash), contract_address, data) + .ok()?, + ) + .ok() + }); + contract_version_cache.insert(*parent_hash, contract_version); - // Check permissions in smart contract based on its version - let (permissions, filter_only_sender) = match contract_version { - Some(version) => { - let version_u64 = version.low_u64(); - trace!(target: "tx_filter", "Version of tx permission contract: {}", version); - match version_u64 { - 2 => { - let (data, decoder) = transact_acl::functions::allowed_tx_types::call(sender, to, value); - client.call_contract(BlockId::Hash(*parent_hash), contract_address, data) + // Check permissions in smart contract based on its version + let (permissions, filter_only_sender) = match contract_version { + Some(version) => { + let version_u64 = version.low_u64(); + trace!(target: "tx_filter", "Version of tx permission contract: {}", version); + match version_u64 { + 2 => { + let (data, decoder) = + transact_acl::functions::allowed_tx_types::call(sender, to, value); + client.call_contract(BlockId::Hash(*parent_hash), contract_address, data) .and_then(|value| decoder.decode(&value).map_err(|e| e.to_string())) .map(|(p, f)| (p.low_u32(), f)) .unwrap_or_else(|e| { error!(target: "tx_filter", "Error calling tx permissions contract: {:?}", e); (tx_permissions::NONE, true) }) - }, - _ => { - error!(target: "tx_filter", "Unknown version of tx permissions contract is used"); - (tx_permissions::NONE, true) - } - } - }, - None => { - trace!(target: "tx_filter", "Fallback to the deprecated version of tx permission contract"); - let (data, decoder) = transact_acl_deprecated::functions::allowed_tx_types::call(sender); - (client.call_contract(BlockId::Hash(*parent_hash), contract_address, data) + } + _ => { + error!(target: "tx_filter", "Unknown version of tx permissions contract is used"); + (tx_permissions::NONE, true) + } + } + } + None => { + trace!(target: "tx_filter", "Fallback to the deprecated version of tx permission contract"); + let (data, decoder) = + transact_acl_deprecated::functions::allowed_tx_types::call(sender); + (client.call_contract(BlockId::Hash(*parent_hash), contract_address, data) .and_then(|value| decoder.decode(&value).map_err(|e| e.to_string())) .map(|p| p.low_u32()) .unwrap_or_else(|e| { error!(target: "tx_filter", "Error calling tx permissions contract: {:?}", e); tx_permissions::NONE }), true) - } - }; + } + }; - if filter_only_sender { - permission_cache.insert((*parent_hash, sender), permissions); - } - trace!(target: "tx_filter", - "Given transaction data: sender: {:?} to: {:?} value: {}. Permissions required: {:X}, got: {:X}", - sender, to, value, tx_type, permissions - ); - permissions & tx_type != 0 - } + if filter_only_sender { + permission_cache.insert((*parent_hash, sender), permissions); + } + trace!(target: "tx_filter", + "Given transaction data: sender: {:?} to: {:?} value: {}. Permissions required: {:X}, got: {:X}", + sender, to, value, tx_type, permissions + ); + permissions & tx_type != 0 + } } #[cfg(test)] mod test { - use std::sync::Arc; - use spec::Spec; - use client::{BlockChainClient, Client, ClientConfig, BlockId}; - use miner::Miner; - use ethereum_types::{U256, Address}; - use io::IoChannel; - use ethkey::{Secret, KeyPair}; - use super::TransactionFilter; - use types::transaction::{Transaction, Action}; - use tempdir::TempDir; - use test_helpers; + use super::TransactionFilter; + use client::{BlockChainClient, BlockId, Client, ClientConfig}; + use ethereum_types::{Address, U256}; + use ethkey::{KeyPair, Secret}; + use io::IoChannel; + use miner::Miner; + use spec::Spec; + use std::sync::Arc; + use tempdir::TempDir; + use test_helpers; + use types::transaction::{Action, Transaction}; - /// Contract code: https://gist.github.com/VladLupashevskyi/84f18eabb1e4afadf572cf92af3e7e7f - #[test] - fn transaction_filter() { - let spec_data = include_str!("../res/tx_permission_tests/contract_ver_2_genesis.json"); + /// Contract code: https://gist.github.com/VladLupashevskyi/84f18eabb1e4afadf572cf92af3e7e7f + #[test] + fn transaction_filter() { + let spec_data = include_str!("../res/tx_permission_tests/contract_ver_2_genesis.json"); - let db = test_helpers::new_db(); - let tempdir = TempDir::new("").unwrap(); - let spec = Spec::load(&tempdir.path(), spec_data.as_bytes()).unwrap(); + let db = test_helpers::new_db(); + let tempdir = TempDir::new("").unwrap(); + let spec = Spec::load(&tempdir.path(), spec_data.as_bytes()).unwrap(); - let client = Client::new( - ClientConfig::default(), - &spec, - db, - Arc::new(Miner::new_for_tests(&spec, None)), - IoChannel::disconnected(), - ).unwrap(); - let key1 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000001")).unwrap(); - let key2 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000002")).unwrap(); - let key3 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000003")).unwrap(); - let key4 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000004")).unwrap(); - let key5 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000005")).unwrap(); - let key6 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000006")).unwrap(); - let key7 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000007")).unwrap(); + let client = Client::new( + ClientConfig::default(), + &spec, + db, + Arc::new(Miner::new_for_tests(&spec, None)), + IoChannel::disconnected(), + ) + .unwrap(); + let key1 = KeyPair::from_secret(Secret::from( + "0000000000000000000000000000000000000000000000000000000000000001", + )) + .unwrap(); + let key2 = KeyPair::from_secret(Secret::from( + "0000000000000000000000000000000000000000000000000000000000000002", + )) + .unwrap(); + let key3 = KeyPair::from_secret(Secret::from( + "0000000000000000000000000000000000000000000000000000000000000003", + )) + .unwrap(); + let key4 = KeyPair::from_secret(Secret::from( + "0000000000000000000000000000000000000000000000000000000000000004", + )) + .unwrap(); + let key5 = KeyPair::from_secret(Secret::from( + "0000000000000000000000000000000000000000000000000000000000000005", + )) + .unwrap(); + let key6 = KeyPair::from_secret(Secret::from( + "0000000000000000000000000000000000000000000000000000000000000006", + )) + .unwrap(); + let key7 = KeyPair::from_secret(Secret::from( + "0000000000000000000000000000000000000000000000000000000000000007", + )) + .unwrap(); - let filter = TransactionFilter::from_params(spec.params()).unwrap(); - let mut basic_tx = Transaction::default(); - basic_tx.action = Action::Call(Address::from("d41c057fd1c78805aac12b0a94a405c0461a6fbb")); - let create_tx = Transaction::default(); - let mut call_tx = Transaction::default(); - call_tx.action = Action::Call(Address::from("0000000000000000000000000000000000000005")); + let filter = TransactionFilter::from_params(spec.params()).unwrap(); + let mut basic_tx = Transaction::default(); + basic_tx.action = Action::Call(Address::from("d41c057fd1c78805aac12b0a94a405c0461a6fbb")); + let create_tx = Transaction::default(); + let mut call_tx = Transaction::default(); + call_tx.action = Action::Call(Address::from("0000000000000000000000000000000000000005")); - let mut basic_tx_with_ether_and_to_key7 = Transaction::default(); - basic_tx_with_ether_and_to_key7.action = Action::Call(Address::from("d41c057fd1c78805aac12b0a94a405c0461a6fbb")); - basic_tx_with_ether_and_to_key7.value = U256::from(123123); - let mut call_tx_with_ether = Transaction::default(); - call_tx_with_ether.action = Action::Call(Address::from("0000000000000000000000000000000000000005")); - call_tx_with_ether.value = U256::from(123123); + let mut basic_tx_with_ether_and_to_key7 = Transaction::default(); + basic_tx_with_ether_and_to_key7.action = + Action::Call(Address::from("d41c057fd1c78805aac12b0a94a405c0461a6fbb")); + basic_tx_with_ether_and_to_key7.value = U256::from(123123); + let mut call_tx_with_ether = Transaction::default(); + call_tx_with_ether.action = + Action::Call(Address::from("0000000000000000000000000000000000000005")); + call_tx_with_ether.value = U256::from(123123); - let mut basic_tx_to_key6 = Transaction::default(); - basic_tx_to_key6.action = Action::Call(Address::from("e57bfe9f44b819898f47bf37e5af72a0783e1141")); - let mut basic_tx_with_ether_and_to_key6 = Transaction::default(); - basic_tx_with_ether_and_to_key6.action = Action::Call(Address::from("e57bfe9f44b819898f47bf37e5af72a0783e1141")); - basic_tx_with_ether_and_to_key6.value = U256::from(123123); + let mut basic_tx_to_key6 = Transaction::default(); + basic_tx_to_key6.action = + Action::Call(Address::from("e57bfe9f44b819898f47bf37e5af72a0783e1141")); + let mut basic_tx_with_ether_and_to_key6 = Transaction::default(); + basic_tx_with_ether_and_to_key6.action = + Action::Call(Address::from("e57bfe9f44b819898f47bf37e5af72a0783e1141")); + basic_tx_with_ether_and_to_key6.value = U256::from(123123); - let genesis = client.block_hash(BlockId::Latest).unwrap(); - let block_number = 1; + let genesis = client.block_hash(BlockId::Latest).unwrap(); + let block_number = 1; - assert!(!filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key2.secret(), None), &*client)); - // same tx but request is allowed because the contract only enables at block #1 - assert!(filter.transaction_allowed(&genesis, 0, &create_tx.clone().sign(key2.secret(), None), &*client)); + assert!(!filter.transaction_allowed( + &genesis, + block_number, + &create_tx.clone().sign(key2.secret(), None), + &*client + )); + // same tx but request is allowed because the contract only enables at block #1 + assert!(filter.transaction_allowed( + &genesis, + 0, + &create_tx.clone().sign(key2.secret(), None), + &*client + )); - assert!(filter.transaction_allowed(&genesis, block_number, &basic_tx.clone().sign(key1.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key1.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, block_number, &call_tx.clone().sign(key1.secret(), None), &*client)); + assert!(filter.transaction_allowed( + &genesis, + block_number, + &basic_tx.clone().sign(key1.secret(), None), + &*client + )); + assert!(filter.transaction_allowed( + &genesis, + block_number, + &create_tx.clone().sign(key1.secret(), None), + &*client + )); + assert!(filter.transaction_allowed( + &genesis, + block_number, + &call_tx.clone().sign(key1.secret(), None), + &*client + )); - assert!(filter.transaction_allowed(&genesis, block_number, &basic_tx.clone().sign(key2.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key2.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, block_number, &call_tx.clone().sign(key2.secret(), None), &*client)); + assert!(filter.transaction_allowed( + &genesis, + block_number, + &basic_tx.clone().sign(key2.secret(), None), + &*client + )); + assert!(!filter.transaction_allowed( + &genesis, + block_number, + &create_tx.clone().sign(key2.secret(), None), + &*client + )); + assert!(filter.transaction_allowed( + &genesis, + block_number, + &call_tx.clone().sign(key2.secret(), None), + &*client + )); - assert!(filter.transaction_allowed(&genesis, block_number, &basic_tx.clone().sign(key3.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key3.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, block_number, &call_tx.clone().sign(key3.secret(), None), &*client)); + assert!(filter.transaction_allowed( + &genesis, + block_number, + &basic_tx.clone().sign(key3.secret(), None), + &*client + )); + assert!(!filter.transaction_allowed( + &genesis, + block_number, + &create_tx.clone().sign(key3.secret(), None), + &*client + )); + assert!(!filter.transaction_allowed( + &genesis, + block_number, + &call_tx.clone().sign(key3.secret(), None), + &*client + )); - assert!(!filter.transaction_allowed(&genesis, block_number, &basic_tx.clone().sign(key4.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key4.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, block_number, &call_tx.clone().sign(key4.secret(), None), &*client)); + assert!(!filter.transaction_allowed( + &genesis, + block_number, + &basic_tx.clone().sign(key4.secret(), None), + &*client + )); + assert!(!filter.transaction_allowed( + &genesis, + block_number, + &create_tx.clone().sign(key4.secret(), None), + &*client + )); + assert!(!filter.transaction_allowed( + &genesis, + block_number, + &call_tx.clone().sign(key4.secret(), None), + &*client + )); - assert!(filter.transaction_allowed(&genesis, block_number, &basic_tx.clone().sign(key1.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key1.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, block_number, &call_tx.clone().sign(key1.secret(), None), &*client)); + assert!(filter.transaction_allowed( + &genesis, + block_number, + &basic_tx.clone().sign(key1.secret(), None), + &*client + )); + assert!(filter.transaction_allowed( + &genesis, + block_number, + &create_tx.clone().sign(key1.secret(), None), + &*client + )); + assert!(filter.transaction_allowed( + &genesis, + block_number, + &call_tx.clone().sign(key1.secret(), None), + &*client + )); - assert!(!filter.transaction_allowed(&genesis, block_number, &basic_tx_with_ether_and_to_key7.clone().sign(key5.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, block_number, &call_tx_with_ether.clone().sign(key5.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, block_number, &basic_tx.clone().sign(key6.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, block_number, &basic_tx_with_ether_and_to_key7.clone().sign(key6.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, block_number, &basic_tx_to_key6.clone().sign(key7.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, block_number, &basic_tx_with_ether_and_to_key6.clone().sign(key7.secret(), None), &*client)); - } + assert!(!filter.transaction_allowed( + &genesis, + block_number, + &basic_tx_with_ether_and_to_key7 + .clone() + .sign(key5.secret(), None), + &*client + )); + assert!(!filter.transaction_allowed( + &genesis, + block_number, + &call_tx_with_ether.clone().sign(key5.secret(), None), + &*client + )); + assert!(filter.transaction_allowed( + &genesis, + block_number, + &basic_tx.clone().sign(key6.secret(), None), + &*client + )); + assert!(filter.transaction_allowed( + &genesis, + block_number, + &basic_tx_with_ether_and_to_key7 + .clone() + .sign(key6.secret(), None), + &*client + )); + assert!(filter.transaction_allowed( + &genesis, + block_number, + &basic_tx_to_key6.clone().sign(key7.secret(), None), + &*client + )); + assert!(!filter.transaction_allowed( + &genesis, + block_number, + &basic_tx_with_ether_and_to_key6 + .clone() + .sign(key7.secret(), None), + &*client + )); + } - /// Contract code: https://gist.github.com/arkpar/38a87cb50165b7e683585eec71acb05a - #[test] - fn transaction_filter_deprecated() { - let spec_data = include_str!("../res/tx_permission_tests/deprecated_contract_genesis.json"); + /// Contract code: https://gist.github.com/arkpar/38a87cb50165b7e683585eec71acb05a + #[test] + fn transaction_filter_deprecated() { + let spec_data = include_str!("../res/tx_permission_tests/deprecated_contract_genesis.json"); - let db = test_helpers::new_db(); - let tempdir = TempDir::new("").unwrap(); - let spec = Spec::load(&tempdir.path(), spec_data.as_bytes()).unwrap(); + let db = test_helpers::new_db(); + let tempdir = TempDir::new("").unwrap(); + let spec = Spec::load(&tempdir.path(), spec_data.as_bytes()).unwrap(); - let client = Client::new( - ClientConfig::default(), - &spec, - db, - Arc::new(Miner::new_for_tests(&spec, None)), - IoChannel::disconnected(), - ).unwrap(); - let key1 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000001")).unwrap(); - let key2 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000002")).unwrap(); - let key3 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000003")).unwrap(); - let key4 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000004")).unwrap(); + let client = Client::new( + ClientConfig::default(), + &spec, + db, + Arc::new(Miner::new_for_tests(&spec, None)), + IoChannel::disconnected(), + ) + .unwrap(); + let key1 = KeyPair::from_secret(Secret::from( + "0000000000000000000000000000000000000000000000000000000000000001", + )) + .unwrap(); + let key2 = KeyPair::from_secret(Secret::from( + "0000000000000000000000000000000000000000000000000000000000000002", + )) + .unwrap(); + let key3 = KeyPair::from_secret(Secret::from( + "0000000000000000000000000000000000000000000000000000000000000003", + )) + .unwrap(); + let key4 = KeyPair::from_secret(Secret::from( + "0000000000000000000000000000000000000000000000000000000000000004", + )) + .unwrap(); - let filter = TransactionFilter::from_params(spec.params()).unwrap(); - let mut basic_tx = Transaction::default(); - basic_tx.action = Action::Call(Address::from("000000000000000000000000000000000000032")); - let create_tx = Transaction::default(); - let mut call_tx = Transaction::default(); - call_tx.action = Action::Call(Address::from("0000000000000000000000000000000000000005")); + let filter = TransactionFilter::from_params(spec.params()).unwrap(); + let mut basic_tx = Transaction::default(); + basic_tx.action = Action::Call(Address::from("000000000000000000000000000000000000032")); + let create_tx = Transaction::default(); + let mut call_tx = Transaction::default(); + call_tx.action = Action::Call(Address::from("0000000000000000000000000000000000000005")); - let genesis = client.block_hash(BlockId::Latest).unwrap(); - let block_number = 1; + let genesis = client.block_hash(BlockId::Latest).unwrap(); + let block_number = 1; - assert!(!filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key2.secret(), None), &*client)); - // same tx but request is allowed because the contract only enables at block #1 - assert!(filter.transaction_allowed(&genesis, 0, &create_tx.clone().sign(key2.secret(), None), &*client)); + assert!(!filter.transaction_allowed( + &genesis, + block_number, + &create_tx.clone().sign(key2.secret(), None), + &*client + )); + // same tx but request is allowed because the contract only enables at block #1 + assert!(filter.transaction_allowed( + &genesis, + 0, + &create_tx.clone().sign(key2.secret(), None), + &*client + )); - assert!(filter.transaction_allowed(&genesis, block_number, &basic_tx.clone().sign(key1.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key1.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, block_number, &call_tx.clone().sign(key1.secret(), None), &*client)); + assert!(filter.transaction_allowed( + &genesis, + block_number, + &basic_tx.clone().sign(key1.secret(), None), + &*client + )); + assert!(filter.transaction_allowed( + &genesis, + block_number, + &create_tx.clone().sign(key1.secret(), None), + &*client + )); + assert!(filter.transaction_allowed( + &genesis, + block_number, + &call_tx.clone().sign(key1.secret(), None), + &*client + )); - assert!(filter.transaction_allowed(&genesis, block_number, &basic_tx.clone().sign(key2.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key2.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, block_number, &call_tx.clone().sign(key2.secret(), None), &*client)); + assert!(filter.transaction_allowed( + &genesis, + block_number, + &basic_tx.clone().sign(key2.secret(), None), + &*client + )); + assert!(!filter.transaction_allowed( + &genesis, + block_number, + &create_tx.clone().sign(key2.secret(), None), + &*client + )); + assert!(filter.transaction_allowed( + &genesis, + block_number, + &call_tx.clone().sign(key2.secret(), None), + &*client + )); - assert!(filter.transaction_allowed(&genesis, block_number, &basic_tx.clone().sign(key3.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key3.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, block_number, &call_tx.clone().sign(key3.secret(), None), &*client)); + assert!(filter.transaction_allowed( + &genesis, + block_number, + &basic_tx.clone().sign(key3.secret(), None), + &*client + )); + assert!(!filter.transaction_allowed( + &genesis, + block_number, + &create_tx.clone().sign(key3.secret(), None), + &*client + )); + assert!(!filter.transaction_allowed( + &genesis, + block_number, + &call_tx.clone().sign(key3.secret(), None), + &*client + )); - assert!(!filter.transaction_allowed(&genesis, block_number, &basic_tx.clone().sign(key4.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key4.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, block_number, &call_tx.clone().sign(key4.secret(), None), &*client)); - } + assert!(!filter.transaction_allowed( + &genesis, + block_number, + &basic_tx.clone().sign(key4.secret(), None), + &*client + )); + assert!(!filter.transaction_allowed( + &genesis, + block_number, + &create_tx.clone().sign(key4.secret(), None), + &*client + )); + assert!(!filter.transaction_allowed( + &genesis, + block_number, + &call_tx.clone().sign(key4.secret(), None), + &*client + )); + } } diff --git a/ethcore/src/verification/canon_verifier.rs b/ethcore/src/verification/canon_verifier.rs index 03a1c7155..de7407d37 100644 --- a/ethcore/src/verification/canon_verifier.rs +++ b/ethcore/src/verification/canon_verifier.rs @@ -16,33 +16,32 @@ //! Canonical verifier. +use super::{verification, Verifier}; use call_contract::CallContract; use client::BlockInfo; use engines::EthEngine; use error::Error; use types::header::Header; -use super::Verifier; -use super::verification; /// A canonial verifier -- this does full verification. pub struct CanonVerifier; impl Verifier for CanonVerifier { - fn verify_block_family( - &self, - header: &Header, - parent: &Header, - engine: &EthEngine, - do_full: Option>, - ) -> Result<(), Error> { - verification::verify_block_family(header, parent, engine, do_full) - } + fn verify_block_family( + &self, + header: &Header, + parent: &Header, + engine: &EthEngine, + do_full: Option>, + ) -> Result<(), Error> { + verification::verify_block_family(header, parent, engine, do_full) + } - fn verify_block_final(&self, expected: &Header, got: &Header) -> Result<(), Error> { - verification::verify_block_final(expected, got) - } + fn verify_block_final(&self, expected: &Header, got: &Header) -> Result<(), Error> { + verification::verify_block_final(expected, got) + } - fn verify_block_external(&self, header: &Header, engine: &EthEngine) -> Result<(), Error> { - engine.verify_block_external(header) - } + fn verify_block_external(&self, header: &Header, engine: &EthEngine) -> Result<(), Error> { + engine.verify_block_external(header) + } } diff --git a/ethcore/src/verification/mod.rs b/ethcore/src/verification/mod.rs index 5546bd60c..8022c3a4b 100644 --- a/ethcore/src/verification/mod.rs +++ b/ethcore/src/verification/mod.rs @@ -16,17 +16,19 @@ //! Block verification utilities. -mod verification; -mod verifier; -pub mod queue; mod canon_verifier; mod noop_verifier; +pub mod queue; +mod verification; +mod verifier; -pub use self::verification::*; -pub use self::verifier::Verifier; -pub use self::canon_verifier::CanonVerifier; -pub use self::noop_verifier::NoopVerifier; -pub use self::queue::{BlockQueue, Config as QueueConfig, VerificationQueue, QueueInfo}; +pub use self::{ + canon_verifier::CanonVerifier, + noop_verifier::NoopVerifier, + queue::{BlockQueue, Config as QueueConfig, QueueInfo, VerificationQueue}, + verification::*, + verifier::Verifier, +}; use call_contract::CallContract; use client::BlockInfo; @@ -34,29 +36,29 @@ use client::BlockInfo; /// Verifier type. #[derive(Debug, PartialEq, Clone)] pub enum VerifierType { - /// Verifies block normally. - Canon, - /// Verifies block normallly, but skips seal verification. - CanonNoSeal, - /// Does not verify block at all. - /// Used in tests. - Noop, + /// Verifies block normally. + Canon, + /// Verifies block normallly, but skips seal verification. + CanonNoSeal, + /// Does not verify block at all. + /// Used in tests. + Noop, } /// Create a new verifier based on type. pub fn new(v: VerifierType) -> Box> { - match v { - VerifierType::Canon | VerifierType::CanonNoSeal => Box::new(CanonVerifier), - VerifierType::Noop => Box::new(NoopVerifier), - } + match v { + VerifierType::Canon | VerifierType::CanonNoSeal => Box::new(CanonVerifier), + VerifierType::Noop => Box::new(NoopVerifier), + } } impl VerifierType { - /// Check if seal verification is enabled for this verifier type. - pub fn verifying_seal(&self) -> bool { - match *self { - VerifierType::Canon => true, - VerifierType::Noop | VerifierType::CanonNoSeal => false, - } - } + /// Check if seal verification is enabled for this verifier type. + pub fn verifying_seal(&self) -> bool { + match *self { + VerifierType::Canon => true, + VerifierType::Noop | VerifierType::CanonNoSeal => false, + } + } } diff --git a/ethcore/src/verification/noop_verifier.rs b/ethcore/src/verification/noop_verifier.rs index d68f1eb88..7da110094 100644 --- a/ethcore/src/verification/noop_verifier.rs +++ b/ethcore/src/verification/noop_verifier.rs @@ -16,33 +16,33 @@ //! No-op verifier. +use super::{verification, Verifier}; use call_contract::CallContract; use client::BlockInfo; use engines::EthEngine; use error::Error; use types::header::Header; -use super::{verification, Verifier}; /// A no-op verifier -- this will verify everything it's given immediately. #[allow(dead_code)] pub struct NoopVerifier; impl Verifier for NoopVerifier { - fn verify_block_family( - &self, - _: &Header, - _t: &Header, - _: &EthEngine, - _: Option> - ) -> Result<(), Error> { - Ok(()) - } + fn verify_block_family( + &self, + _: &Header, + _t: &Header, + _: &EthEngine, + _: Option>, + ) -> Result<(), Error> { + Ok(()) + } - fn verify_block_final(&self, _expected: &Header, _got: &Header) -> Result<(), Error> { - Ok(()) - } + fn verify_block_final(&self, _expected: &Header, _got: &Header) -> Result<(), Error> { + Ok(()) + } - fn verify_block_external(&self, _header: &Header, _engine: &EthEngine) -> Result<(), Error> { - Ok(()) - } + fn verify_block_external(&self, _header: &Header, _engine: &EthEngine) -> Result<(), Error> { + Ok(()) + } } diff --git a/ethcore/src/verification/queue/kind.rs b/ethcore/src/verification/queue/kind.rs index f36076230..c57543760 100644 --- a/ethcore/src/verification/queue/kind.rs +++ b/ethcore/src/verification/queue/kind.rs @@ -19,25 +19,24 @@ use engines::EthEngine; use error::Error; -use heapsize::HeapSizeOf; use ethereum_types::{H256, U256}; +use heapsize::HeapSizeOf; -pub use self::blocks::Blocks; -pub use self::headers::Headers; +pub use self::{blocks::Blocks, headers::Headers}; /// Something which can produce a hash and a parent hash. pub trait BlockLike { - /// Get the hash of this item - i.e. the header hash. - fn hash(&self) -> H256; + /// Get the hash of this item - i.e. the header hash. + fn hash(&self) -> H256; - /// Get a raw hash of this item - i.e. the hash of the RLP representation. - fn raw_hash(&self) -> H256; + /// Get a raw hash of this item - i.e. the hash of the RLP representation. + fn raw_hash(&self) -> H256; - /// Get the hash of this item's parent. - fn parent_hash(&self) -> H256; + /// Get the hash of this item's parent. + fn parent_hash(&self) -> H256; - /// Get the difficulty of this item. - fn difficulty(&self) -> U256; + /// Get the difficulty of this item. + fn difficulty(&self) -> U256; } /// Defines transitions between stages of verification. @@ -51,188 +50,221 @@ pub trait BlockLike { /// For correctness, the hashes produced by each stage of the pipeline should be /// consistent. pub trait Kind: 'static + Sized + Send + Sync { - /// The first stage: completely unverified. - type Input: Sized + Send + BlockLike + HeapSizeOf; + /// The first stage: completely unverified. + type Input: Sized + Send + BlockLike + HeapSizeOf; - /// The second stage: partially verified. - type Unverified: Sized + Send + BlockLike + HeapSizeOf; + /// The second stage: partially verified. + type Unverified: Sized + Send + BlockLike + HeapSizeOf; - /// The third stage: completely verified. - type Verified: Sized + Send + BlockLike + HeapSizeOf; + /// The third stage: completely verified. + type Verified: Sized + Send + BlockLike + HeapSizeOf; - /// Attempt to create the `Unverified` item from the input. - fn create(input: Self::Input, engine: &EthEngine, check_seal: bool) -> Result; + /// Attempt to create the `Unverified` item from the input. + fn create( + input: Self::Input, + engine: &EthEngine, + check_seal: bool, + ) -> Result; - /// Attempt to verify the `Unverified` item using the given engine. - fn verify(unverified: Self::Unverified, engine: &EthEngine, check_seal: bool) -> Result; + /// Attempt to verify the `Unverified` item using the given engine. + fn verify( + unverified: Self::Unverified, + engine: &EthEngine, + check_seal: bool, + ) -> Result; } /// The blocks verification module. pub mod blocks { - use super::{Kind, BlockLike}; + use super::{BlockLike, Kind}; - use engines::EthEngine; - use error::{Error, ErrorKind, BlockError}; - use types::header::Header; - use verification::{PreverifiedBlock, verify_block_basic, verify_block_unordered}; - use types::transaction::UnverifiedTransaction; + use engines::EthEngine; + use error::{BlockError, Error, ErrorKind}; + use types::{header::Header, transaction::UnverifiedTransaction}; + use verification::{verify_block_basic, verify_block_unordered, PreverifiedBlock}; - use heapsize::HeapSizeOf; - use ethereum_types::{H256, U256}; - use bytes::Bytes; + use bytes::Bytes; + use ethereum_types::{H256, U256}; + use heapsize::HeapSizeOf; - /// A mode for verifying blocks. - pub struct Blocks; + /// A mode for verifying blocks. + pub struct Blocks; - impl Kind for Blocks { - type Input = Unverified; - type Unverified = Unverified; - type Verified = PreverifiedBlock; + impl Kind for Blocks { + type Input = Unverified; + type Unverified = Unverified; + type Verified = PreverifiedBlock; - fn create(input: Self::Input, engine: &EthEngine, check_seal: bool) -> Result { - match verify_block_basic(&input, engine, check_seal) { - Ok(()) => Ok(input), - Err(Error(ErrorKind::Block(BlockError::TemporarilyInvalid(oob)), _)) => { - debug!(target: "client", "Block received too early {}: {:?}", input.hash(), oob); - Err((input, BlockError::TemporarilyInvalid(oob).into())) - }, - Err(e) => { - warn!(target: "client", "Stage 1 block verification failed for {}: {:?}", input.hash(), e); - Err((input, e)) - } - } - } + fn create( + input: Self::Input, + engine: &EthEngine, + check_seal: bool, + ) -> Result { + match verify_block_basic(&input, engine, check_seal) { + Ok(()) => Ok(input), + Err(Error(ErrorKind::Block(BlockError::TemporarilyInvalid(oob)), _)) => { + debug!(target: "client", "Block received too early {}: {:?}", input.hash(), oob); + Err((input, BlockError::TemporarilyInvalid(oob).into())) + } + Err(e) => { + warn!(target: "client", "Stage 1 block verification failed for {}: {:?}", input.hash(), e); + Err((input, e)) + } + } + } - fn verify(un: Self::Unverified, engine: &EthEngine, check_seal: bool) -> Result { - let hash = un.hash(); - match verify_block_unordered(un, engine, check_seal) { - Ok(verified) => Ok(verified), - Err(e) => { - warn!(target: "client", "Stage 2 block verification failed for {}: {:?}", hash, e); - Err(e) - } - } - } - } + fn verify( + un: Self::Unverified, + engine: &EthEngine, + check_seal: bool, + ) -> Result { + let hash = un.hash(); + match verify_block_unordered(un, engine, check_seal) { + Ok(verified) => Ok(verified), + Err(e) => { + warn!(target: "client", "Stage 2 block verification failed for {}: {:?}", hash, e); + Err(e) + } + } + } + } - /// An unverified block. - #[derive(PartialEq, Debug)] - pub struct Unverified { - /// Unverified block header. - pub header: Header, - /// Unverified block transactions. - pub transactions: Vec, - /// Unverified block uncles. - pub uncles: Vec
, - /// Raw block bytes. - pub bytes: Bytes, - } + /// An unverified block. + #[derive(PartialEq, Debug)] + pub struct Unverified { + /// Unverified block header. + pub header: Header, + /// Unverified block transactions. + pub transactions: Vec, + /// Unverified block uncles. + pub uncles: Vec
, + /// Raw block bytes. + pub bytes: Bytes, + } - impl Unverified { - /// Create an `Unverified` from raw bytes. - pub fn from_rlp(bytes: Bytes) -> Result { - use rlp::Rlp; - let (header, transactions, uncles) = { - let rlp = Rlp::new(&bytes); - let header = rlp.val_at(0)?; - let transactions = rlp.list_at(1)?; - let uncles = rlp.list_at(2)?; - (header, transactions, uncles) - }; + impl Unverified { + /// Create an `Unverified` from raw bytes. + pub fn from_rlp(bytes: Bytes) -> Result { + use rlp::Rlp; + let (header, transactions, uncles) = { + let rlp = Rlp::new(&bytes); + let header = rlp.val_at(0)?; + let transactions = rlp.list_at(1)?; + let uncles = rlp.list_at(2)?; + (header, transactions, uncles) + }; - Ok(Unverified { - header, - transactions, - uncles, - bytes, - }) - } - } + Ok(Unverified { + header, + transactions, + uncles, + bytes, + }) + } + } - impl HeapSizeOf for Unverified { - fn heap_size_of_children(&self) -> usize { - self.header.heap_size_of_children() - + self.transactions.heap_size_of_children() - + self.uncles.heap_size_of_children() - + self.bytes.heap_size_of_children() - } - } + impl HeapSizeOf for Unverified { + fn heap_size_of_children(&self) -> usize { + self.header.heap_size_of_children() + + self.transactions.heap_size_of_children() + + self.uncles.heap_size_of_children() + + self.bytes.heap_size_of_children() + } + } - impl BlockLike for Unverified { - fn hash(&self) -> H256 { - self.header.hash() - } + impl BlockLike for Unverified { + fn hash(&self) -> H256 { + self.header.hash() + } - fn raw_hash(&self) -> H256 { - hash::keccak(&self.bytes) - } + fn raw_hash(&self) -> H256 { + hash::keccak(&self.bytes) + } - fn parent_hash(&self) -> H256 { - self.header.parent_hash().clone() - } + fn parent_hash(&self) -> H256 { + self.header.parent_hash().clone() + } - fn difficulty(&self) -> U256 { - self.header.difficulty().clone() - } - } + fn difficulty(&self) -> U256 { + self.header.difficulty().clone() + } + } - impl BlockLike for PreverifiedBlock { - fn hash(&self) -> H256 { - self.header.hash() - } + impl BlockLike for PreverifiedBlock { + fn hash(&self) -> H256 { + self.header.hash() + } - fn raw_hash(&self) -> H256 { - hash::keccak(&self.bytes) - } + fn raw_hash(&self) -> H256 { + hash::keccak(&self.bytes) + } - fn parent_hash(&self) -> H256 { - self.header.parent_hash().clone() - } + fn parent_hash(&self) -> H256 { + self.header.parent_hash().clone() + } - fn difficulty(&self) -> U256 { - self.header.difficulty().clone() - } - } + fn difficulty(&self) -> U256 { + self.header.difficulty().clone() + } + } } /// Verification for headers. pub mod headers { - use super::{Kind, BlockLike}; + use super::{BlockLike, Kind}; - use engines::EthEngine; - use error::Error; - use types::header::Header; - use verification::verify_header_params; + use engines::EthEngine; + use error::Error; + use types::header::Header; + use verification::verify_header_params; - use ethereum_types::{H256, U256}; + use ethereum_types::{H256, U256}; - impl BlockLike for Header { - fn hash(&self) -> H256 { self.hash() } - fn raw_hash(&self) -> H256 { self.hash() } - fn parent_hash(&self) -> H256 { self.parent_hash().clone() } - fn difficulty(&self) -> U256 { self.difficulty().clone() } - } + impl BlockLike for Header { + fn hash(&self) -> H256 { + self.hash() + } + fn raw_hash(&self) -> H256 { + self.hash() + } + fn parent_hash(&self) -> H256 { + self.parent_hash().clone() + } + fn difficulty(&self) -> U256 { + self.difficulty().clone() + } + } - /// A mode for verifying headers. - pub struct Headers; + /// A mode for verifying headers. + pub struct Headers; - impl Kind for Headers { - type Input = Header; - type Unverified = Header; - type Verified = Header; + impl Kind for Headers { + type Input = Header; + type Unverified = Header; + type Verified = Header; - fn create(input: Self::Input, engine: &EthEngine, check_seal: bool) -> Result { - match verify_header_params(&input, engine, true, check_seal) { - Ok(_) => Ok(input), - Err(err) => Err((input, err)) - } - } + fn create( + input: Self::Input, + engine: &EthEngine, + check_seal: bool, + ) -> Result { + match verify_header_params(&input, engine, true, check_seal) { + Ok(_) => Ok(input), + Err(err) => Err((input, err)), + } + } - fn verify(unverified: Self::Unverified, engine: &EthEngine, check_seal: bool) -> Result { - match check_seal { - true => engine.verify_block_unordered(&unverified,).map(|_| unverified), - false => Ok(unverified), - } - } - } + fn verify( + unverified: Self::Unverified, + engine: &EthEngine, + check_seal: bool, + ) -> Result { + match check_seal { + true => engine + .verify_block_unordered(&unverified) + .map(|_| unverified), + false => Ok(unverified), + } + } + } } diff --git a/ethcore/src/verification/queue/mod.rs b/ethcore/src/verification/queue/mod.rs index 4c8c71763..6cc52cadb 100644 --- a/ethcore/src/verification/queue/mod.rs +++ b/ethcore/src/verification/queue/mod.rs @@ -17,19 +17,23 @@ //! A queue of blocks. Sits between network or other I/O and the `BlockChain`. //! Sorts them ready for blockchain insertion. -use std::thread::{self, JoinHandle}; -use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering as AtomicOrdering}; -use std::sync::Arc; -use std::cmp; -use std::collections::{VecDeque, HashSet, HashMap}; -use heapsize::HeapSizeOf; -use ethereum_types::{H256, U256}; -use parking_lot::{Condvar, Mutex, RwLock}; -use io::*; -use error::{BlockError, ImportErrorKind, ErrorKind, Error}; -use engines::EthEngine; use client::ClientIoMessage; +use engines::EthEngine; +use error::{BlockError, Error, ErrorKind, ImportErrorKind}; +use ethereum_types::{H256, U256}; +use heapsize::HeapSizeOf; +use io::*; use len_caching_lock::LenCachingMutex; +use parking_lot::{Condvar, Mutex, RwLock}; +use std::{ + cmp, + collections::{HashMap, HashSet, VecDeque}, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering as AtomicOrdering}, + Arc, + }, + thread::{self, JoinHandle}, +}; use self::kind::{BlockLike, Kind}; @@ -49,911 +53,980 @@ pub type HeaderQueue = VerificationQueue; /// Verification queue configuration #[derive(Debug, PartialEq, Clone)] pub struct Config { - /// Maximum number of items to keep in unverified queue. - /// When the limit is reached, is_full returns true. - pub max_queue_size: usize, - /// Maximum heap memory to use. - /// When the limit is reached, is_full returns true. - pub max_mem_use: usize, - /// Settings for the number of verifiers and adaptation strategy. - pub verifier_settings: VerifierSettings, + /// Maximum number of items to keep in unverified queue. + /// When the limit is reached, is_full returns true. + pub max_queue_size: usize, + /// Maximum heap memory to use. + /// When the limit is reached, is_full returns true. + pub max_mem_use: usize, + /// Settings for the number of verifiers and adaptation strategy. + pub verifier_settings: VerifierSettings, } impl Default for Config { - fn default() -> Self { - Config { - max_queue_size: 30000, - max_mem_use: 50 * 1024 * 1024, - verifier_settings: VerifierSettings::default(), - } - } + fn default() -> Self { + Config { + max_queue_size: 30000, + max_mem_use: 50 * 1024 * 1024, + verifier_settings: VerifierSettings::default(), + } + } } /// Verifier settings. #[derive(Debug, PartialEq, Clone)] pub struct VerifierSettings { - /// Whether to scale amount of verifiers according to load. - // Todo: replace w/ strategy enum? - pub scale_verifiers: bool, - /// Beginning amount of verifiers. - pub num_verifiers: usize, + /// Whether to scale amount of verifiers according to load. + // Todo: replace w/ strategy enum? + pub scale_verifiers: bool, + /// Beginning amount of verifiers. + pub num_verifiers: usize, } impl Default for VerifierSettings { - fn default() -> Self { - VerifierSettings { - scale_verifiers: false, - num_verifiers: ::num_cpus::get(), - } - } + fn default() -> Self { + VerifierSettings { + scale_verifiers: false, + num_verifiers: ::num_cpus::get(), + } + } } // pool states enum State { - // all threads with id < inner value are to work. - Work(usize), - Exit, + // all threads with id < inner value are to work. + Work(usize), + Exit, } /// An item which is in the process of being verified. pub struct Verifying { - hash: H256, - output: Option, + hash: H256, + output: Option, } impl HeapSizeOf for Verifying { - fn heap_size_of_children(&self) -> usize { - self.output.heap_size_of_children() - } + fn heap_size_of_children(&self) -> usize { + self.output.heap_size_of_children() + } } /// Status of items in the queue. pub enum Status { - /// Currently queued. - Queued, - /// Known to be bad. - Bad, - /// Unknown. - Unknown, + /// Currently queued. + Queued, + /// Known to be bad. + Bad, + /// Unknown. + Unknown, } impl Into<::types::block_status::BlockStatus> for Status { - fn into(self) -> ::types::block_status::BlockStatus { - use ::types::block_status::BlockStatus; - match self { - Status::Queued => BlockStatus::Queued, - Status::Bad => BlockStatus::Bad, - Status::Unknown => BlockStatus::Unknown, - } - } + fn into(self) -> ::types::block_status::BlockStatus { + use types::block_status::BlockStatus; + match self { + Status::Queued => BlockStatus::Queued, + Status::Bad => BlockStatus::Bad, + Status::Unknown => BlockStatus::Unknown, + } + } } // the internal queue sizes. struct Sizes { - unverified: AtomicUsize, - verifying: AtomicUsize, - verified: AtomicUsize, + unverified: AtomicUsize, + verifying: AtomicUsize, + verified: AtomicUsize, } /// A queue of items to be verified. Sits between network or other I/O and the `BlockChain`. /// Keeps them in the same order as inserted, minus invalid items. pub struct VerificationQueue { - engine: Arc, - more_to_verify: Arc, - verification: Arc>, - deleting: Arc, - ready_signal: Arc, - empty: Arc, - processing: RwLock>, // hash to difficulty - ticks_since_adjustment: AtomicUsize, - max_queue_size: usize, - max_mem_use: usize, - scale_verifiers: bool, - verifier_handles: Vec>, - state: Arc<(Mutex, Condvar)>, - total_difficulty: RwLock, + engine: Arc, + more_to_verify: Arc, + verification: Arc>, + deleting: Arc, + ready_signal: Arc, + empty: Arc, + processing: RwLock>, // hash to difficulty + ticks_since_adjustment: AtomicUsize, + max_queue_size: usize, + max_mem_use: usize, + scale_verifiers: bool, + verifier_handles: Vec>, + state: Arc<(Mutex, Condvar)>, + total_difficulty: RwLock, } struct QueueSignal { - deleting: Arc, - signalled: AtomicBool, - message_channel: Mutex>, + deleting: Arc, + signalled: AtomicBool, + message_channel: Mutex>, } impl QueueSignal { - fn set_sync(&self) { - // Do not signal when we are about to close - if self.deleting.load(AtomicOrdering::Relaxed) { - return; - } + fn set_sync(&self) { + // Do not signal when we are about to close + if self.deleting.load(AtomicOrdering::Relaxed) { + return; + } - if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false { - let channel = self.message_channel.lock().clone(); - if let Err(e) = channel.send_sync(ClientIoMessage::BlockVerified) { - debug!("Error sending BlockVerified message: {:?}", e); - } - } - } + if self + .signalled + .compare_and_swap(false, true, AtomicOrdering::Relaxed) + == false + { + let channel = self.message_channel.lock().clone(); + if let Err(e) = channel.send_sync(ClientIoMessage::BlockVerified) { + debug!("Error sending BlockVerified message: {:?}", e); + } + } + } - fn set_async(&self) { - // Do not signal when we are about to close - if self.deleting.load(AtomicOrdering::Relaxed) { - return; - } + fn set_async(&self) { + // Do not signal when we are about to close + if self.deleting.load(AtomicOrdering::Relaxed) { + return; + } - if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false { - let channel = self.message_channel.lock().clone(); - if let Err(e) = channel.send(ClientIoMessage::BlockVerified) { - debug!("Error sending BlockVerified message: {:?}", e); - } - } - } + if self + .signalled + .compare_and_swap(false, true, AtomicOrdering::Relaxed) + == false + { + let channel = self.message_channel.lock().clone(); + if let Err(e) = channel.send(ClientIoMessage::BlockVerified) { + debug!("Error sending BlockVerified message: {:?}", e); + } + } + } - fn reset(&self) { - self.signalled.store(false, AtomicOrdering::Relaxed); - } + fn reset(&self) { + self.signalled.store(false, AtomicOrdering::Relaxed); + } } struct Verification { - // All locks must be captured in the order declared here. - unverified: LenCachingMutex>, - verifying: LenCachingMutex>>, - verified: LenCachingMutex>, - bad: Mutex>, - sizes: Sizes, - check_seal: bool, + // All locks must be captured in the order declared here. + unverified: LenCachingMutex>, + verifying: LenCachingMutex>>, + verified: LenCachingMutex>, + bad: Mutex>, + sizes: Sizes, + check_seal: bool, } impl VerificationQueue { - /// Creates a new queue instance. - pub fn new(config: Config, engine: Arc, message_channel: IoChannel, check_seal: bool) -> Self { - let verification = Arc::new(Verification { - unverified: LenCachingMutex::new(VecDeque::new()), - verifying: LenCachingMutex::new(VecDeque::new()), - verified: LenCachingMutex::new(VecDeque::new()), - bad: Mutex::new(HashSet::new()), - sizes: Sizes { - unverified: AtomicUsize::new(0), - verifying: AtomicUsize::new(0), - verified: AtomicUsize::new(0), - }, - check_seal: check_seal, - }); - let more_to_verify = Arc::new(Condvar::new()); - let deleting = Arc::new(AtomicBool::new(false)); - let ready_signal = Arc::new(QueueSignal { - deleting: deleting.clone(), - signalled: AtomicBool::new(false), - message_channel: Mutex::new(message_channel), - }); - let empty = Arc::new(Condvar::new()); - let scale_verifiers = config.verifier_settings.scale_verifiers; - - let max_verifiers = ::num_cpus::get(); - let default_amount = cmp::max(1, cmp::min(max_verifiers, config.verifier_settings.num_verifiers)); - - // if `auto-scaling` is enabled spawn up extra threads as they might be needed - // otherwise just spawn the number of threads specified by the config - let number_of_threads = if scale_verifiers { - max_verifiers - } else { - cmp::min(default_amount, max_verifiers) - }; - - let state = Arc::new((Mutex::new(State::Work(default_amount)), Condvar::new())); - let mut verifier_handles = Vec::with_capacity(number_of_threads); - - debug!(target: "verification", "Allocating {} verifiers, {} initially active", number_of_threads, default_amount); - debug!(target: "verification", "Verifier auto-scaling {}", if scale_verifiers { "enabled" } else { "disabled" }); - - for i in 0..number_of_threads { - debug!(target: "verification", "Adding verification thread #{}", i); - - let verification = verification.clone(); - let engine = engine.clone(); - let wait = more_to_verify.clone(); - let ready = ready_signal.clone(); - let empty = empty.clone(); - let state = state.clone(); - - let handle = thread::Builder::new() - .name(format!("Verifier #{}", i)) - .spawn(move || { - VerificationQueue::verify( - verification, - engine, - wait, - ready, - empty, - state, - i, - ) - }) - .expect("Failed to create verifier thread."); - verifier_handles.push(handle); - } - - VerificationQueue { - engine: engine, - ready_signal: ready_signal, - more_to_verify: more_to_verify, - verification: verification, - deleting: deleting, - processing: RwLock::new(HashMap::new()), - empty: empty, - ticks_since_adjustment: AtomicUsize::new(0), - max_queue_size: cmp::max(config.max_queue_size, MIN_QUEUE_LIMIT), - max_mem_use: cmp::max(config.max_mem_use, MIN_MEM_LIMIT), - scale_verifiers: scale_verifiers, - verifier_handles: verifier_handles, - state: state, - total_difficulty: RwLock::new(0.into()), - } - } - - fn verify( - verification: Arc>, - engine: Arc, - wait: Arc, - ready: Arc, - empty: Arc, - state: Arc<(Mutex, Condvar)>, - id: usize, - ) { - loop { - // check current state. - { - let mut cur_state = state.0.lock(); - while let State::Work(x) = *cur_state { - // sleep until this thread is required. - if id < x { break } - - debug!(target: "verification", "verifier {} sleeping", id); - state.1.wait(&mut cur_state); - debug!(target: "verification", "verifier {} waking up", id); - } - - if let State::Exit = *cur_state { - debug!(target: "verification", "verifier {} exiting", id); - break; - } - } - - // wait for work if empty. - { - let mut unverified = verification.unverified.lock(); - - if unverified.is_empty() && verification.verifying.lock().is_empty() { - empty.notify_all(); - } - - while unverified.is_empty() { - if let State::Exit = *state.0.lock() { - debug!(target: "verification", "verifier {} exiting", id); - return; - } - - wait.wait(unverified.inner_mut()); - } - - if let State::Exit = *state.0.lock() { - debug!(target: "verification", "verifier {} exiting", id); - return; - } - } - - // do work. - let item = { - // acquire these locks before getting the item to verify. - let mut unverified = verification.unverified.lock(); - let mut verifying = verification.verifying.lock(); - - let item = match unverified.pop_front() { - Some(item) => item, - None => continue, - }; - - verification.sizes.unverified.fetch_sub(item.heap_size_of_children(), AtomicOrdering::SeqCst); - verifying.push_back(Verifying { hash: item.hash(), output: None }); - item - }; - - let hash = item.hash(); - let is_ready = match K::verify(item, &*engine, verification.check_seal) { - Ok(verified) => { - let mut verifying = verification.verifying.lock(); - let mut idx = None; - for (i, e) in verifying.iter_mut().enumerate() { - if e.hash == hash { - idx = Some(i); - - verification.sizes.verifying.fetch_add(verified.heap_size_of_children(), AtomicOrdering::SeqCst); - e.output = Some(verified); - break; - } - } - - if idx == Some(0) { - // we're next! - let mut verified = verification.verified.lock(); - let mut bad = verification.bad.lock(); - VerificationQueue::drain_verifying(&mut verifying, &mut verified, &mut bad, &verification.sizes); - true - } else { - false - } - }, - Err(_) => { - let mut verifying = verification.verifying.lock(); - let mut verified = verification.verified.lock(); - let mut bad = verification.bad.lock(); - - bad.insert(hash.clone()); - verifying.retain(|e| e.hash != hash); - - if verifying.front().map_or(false, |x| x.output.is_some()) { - VerificationQueue::drain_verifying(&mut verifying, &mut verified, &mut bad, &verification.sizes); - true - } else { - false - } - } - }; - if is_ready { - // Import the block immediately - ready.set_sync(); - } - } - } - - fn drain_verifying( - verifying: &mut VecDeque>, - verified: &mut VecDeque, - bad: &mut HashSet, - sizes: &Sizes, - ) { - let mut removed_size = 0; - let mut inserted_size = 0; - - while let Some(output) = verifying.front_mut().and_then(|x| x.output.take()) { - assert!(verifying.pop_front().is_some()); - let size = output.heap_size_of_children(); - removed_size += size; - - if bad.contains(&output.parent_hash()) { - bad.insert(output.hash()); - } else { - inserted_size += size; - verified.push_back(output); - } - } - - sizes.verifying.fetch_sub(removed_size, AtomicOrdering::SeqCst); - sizes.verified.fetch_add(inserted_size, AtomicOrdering::SeqCst); - } - - /// Clear the queue and stop verification activity. - pub fn clear(&self) { - let mut unverified = self.verification.unverified.lock(); - let mut verifying = self.verification.verifying.lock(); - let mut verified = self.verification.verified.lock(); - unverified.clear(); - verifying.clear(); - verified.clear(); - - let sizes = &self.verification.sizes; - sizes.unverified.store(0, AtomicOrdering::Release); - sizes.verifying.store(0, AtomicOrdering::Release); - sizes.verified.store(0, AtomicOrdering::Release); - *self.total_difficulty.write() = 0.into(); - - self.processing.write().clear(); - } - - /// Wait for unverified queue to be empty - pub fn flush(&self) { - let mut unverified = self.verification.unverified.lock(); - while !unverified.is_empty() || !self.verification.verifying.lock().is_empty() { - self.empty.wait(unverified.inner_mut()); - } - } - - /// Check if the item is currently in the queue - pub fn status(&self, hash: &H256) -> Status { - if self.processing.read().contains_key(hash) { - return Status::Queued; - } - if self.verification.bad.lock().contains(hash) { - return Status::Bad; - } - Status::Unknown - } - - /// Add a block to the queue. - pub fn import(&self, input: K::Input) -> Result { - let hash = input.hash(); - let raw_hash = input.raw_hash(); - { - if self.processing.read().contains_key(&hash) { - bail!((input, ErrorKind::Import(ImportErrorKind::AlreadyQueued).into())); - } - - let mut bad = self.verification.bad.lock(); - if bad.contains(&hash) || bad.contains(&raw_hash) { - bail!((input, ErrorKind::Import(ImportErrorKind::KnownBad).into())); - } - - if bad.contains(&input.parent_hash()) { - bad.insert(hash); - bail!((input, ErrorKind::Import(ImportErrorKind::KnownBad).into())); - } - } - - match K::create(input, &*self.engine, self.verification.check_seal) { - Ok(item) => { - self.verification.sizes.unverified.fetch_add(item.heap_size_of_children(), AtomicOrdering::SeqCst); - - self.processing.write().insert(hash, item.difficulty()); - { - let mut td = self.total_difficulty.write(); - *td = *td + item.difficulty(); - } - self.verification.unverified.lock().push_back(item); - self.more_to_verify.notify_all(); - Ok(hash) - }, - Err((input, err)) => { - match err { - // Don't mark future blocks as bad. - Error(ErrorKind::Block(BlockError::TemporarilyInvalid(_)), _) => {}, - // If the transaction root or uncles hash is invalid, it doesn't necessarily mean - // that the header is invalid. We might have just received a malformed block body, - // so we shouldn't put the header hash to `bad`. - // - // We still put the entire `Item` hash to bad, so that we can early reject - // the items that are malformed. - Error(ErrorKind::Block(BlockError::InvalidTransactionsRoot(_)), _) | - Error(ErrorKind::Block(BlockError::InvalidUnclesHash(_)), _) => { - self.verification.bad.lock().insert(raw_hash); - }, - _ => { - self.verification.bad.lock().insert(hash); - } - } - Err((input, err)) - } - } - } - - /// Mark given item and all its children as bad. pauses verification - /// until complete. - pub fn mark_as_bad(&self, hashes: &[H256]) { - if hashes.is_empty() { - return; - } - let mut verified_lock = self.verification.verified.lock(); - let verified = &mut *verified_lock; - let mut bad = self.verification.bad.lock(); - let mut processing = self.processing.write(); - bad.reserve(hashes.len()); - for hash in hashes { - bad.insert(hash.clone()); - if let Some(difficulty) = processing.remove(hash) { - let mut td = self.total_difficulty.write(); - *td = *td - difficulty; - } - } - - let mut new_verified = VecDeque::new(); - let mut removed_size = 0; - for output in verified.drain(..) { - if bad.contains(&output.parent_hash()) { - removed_size += output.heap_size_of_children(); - bad.insert(output.hash()); - if let Some(difficulty) = processing.remove(&output.hash()) { - let mut td = self.total_difficulty.write(); - *td = *td - difficulty; - } - } else { - new_verified.push_back(output); - } - } - - self.verification.sizes.verified.fetch_sub(removed_size, AtomicOrdering::SeqCst); - *verified = new_verified; - } - - /// Mark given item as processed. - /// Returns true if the queue becomes empty. - pub fn mark_as_good(&self, hashes: &[H256]) -> bool { - if hashes.is_empty() { - return self.processing.read().is_empty(); - } - let mut processing = self.processing.write(); - for hash in hashes { - if let Some(difficulty) = processing.remove(hash) { - let mut td = self.total_difficulty.write(); - *td = *td - difficulty; - } - } - processing.is_empty() - } - - /// Removes up to `max` verified items from the queue - pub fn drain(&self, max: usize) -> Vec { - let mut verified = self.verification.verified.lock(); - let count = cmp::min(max, verified.len()); - let result = verified.drain(..count).collect::>(); - - let drained_size = result.iter().map(HeapSizeOf::heap_size_of_children).fold(0, |a, c| a + c); - self.verification.sizes.verified.fetch_sub(drained_size, AtomicOrdering::SeqCst); - - self.ready_signal.reset(); - if !verified.is_empty() { - self.ready_signal.set_async(); - } - result - } - - /// Returns true if there is nothing currently in the queue. - pub fn is_empty(&self) -> bool { - let v = &self.verification; - - v.unverified.load_len() == 0 - && v.verifying.load_len() == 0 - && v.verified.load_len() == 0 - } - - /// Get queue status. - pub fn queue_info(&self) -> QueueInfo { - use std::mem::size_of; - - let (unverified_len, unverified_bytes) = { - let len = self.verification.unverified.load_len(); - let size = self.verification.sizes.unverified.load(AtomicOrdering::Acquire); - - (len, size + len * size_of::()) - }; - let (verifying_len, verifying_bytes) = { - let len = self.verification.verifying.load_len(); - let size = self.verification.sizes.verifying.load(AtomicOrdering::Acquire); - (len, size + len * size_of::>()) - }; - let (verified_len, verified_bytes) = { - let len = self.verification.verified.load_len(); - let size = self.verification.sizes.verified.load(AtomicOrdering::Acquire); - (len, size + len * size_of::()) - }; - - QueueInfo { - unverified_queue_size: unverified_len, - verifying_queue_size: verifying_len, - verified_queue_size: verified_len, - max_queue_size: self.max_queue_size, - max_mem_use: self.max_mem_use, - mem_used: unverified_bytes - + verifying_bytes - + verified_bytes - } - } - - /// Get the total difficulty of all the blocks in the queue. - pub fn total_difficulty(&self) -> U256 { - self.total_difficulty.read().clone() - } - - /// Get the current number of working verifiers. - pub fn num_verifiers(&self) -> usize { - match *self.state.0.lock() { - State::Work(x) => x, - State::Exit => panic!("state only set to exit on drop; queue live now; qed"), - } - } - - /// Optimise memory footprint of the heap fields, and adjust the number of threads - /// to better suit the workload. - pub fn collect_garbage(&self) { - // number of ticks to average queue stats over - // when deciding whether to change the number of verifiers. - #[cfg(not(test))] - const READJUSTMENT_PERIOD: usize = 12; - - #[cfg(test)] - const READJUSTMENT_PERIOD: usize = 1; - - let (u_len, v_len) = { - let u_len = { - let mut q = self.verification.unverified.lock(); - q.shrink_to_fit(); - q.len() - }; - self.verification.verifying.lock().shrink_to_fit(); - - let v_len = { - let mut q = self.verification.verified.lock(); - q.shrink_to_fit(); - q.len() - }; - - (u_len as isize, v_len as isize) - }; - - self.processing.write().shrink_to_fit(); - - if !self.scale_verifiers { return } - - if self.ticks_since_adjustment.fetch_add(1, AtomicOrdering::SeqCst) + 1 >= READJUSTMENT_PERIOD { - self.ticks_since_adjustment.store(0, AtomicOrdering::SeqCst); - } else { - return; - } - - let current = self.num_verifiers(); - - let diff = (v_len - u_len).abs(); - let total = v_len + u_len; - - self.scale_verifiers( - if u_len < 20 { - 1 - } else if diff <= total / 10 { - current - } else if v_len > u_len { - current - 1 - } else { - current + 1 - } - ); - } - - // wake up or sleep verifiers to get as close to the target as - // possible, never going over the amount of initially allocated threads - // or below 1. - fn scale_verifiers(&self, target: usize) { - let current = self.num_verifiers(); - let target = cmp::min(self.verifier_handles.len(), target); - let target = cmp::max(1, target); - - debug!(target: "verification", "Scaling from {} to {} verifiers", current, target); - - *self.state.0.lock() = State::Work(target); - self.state.1.notify_all(); - } + /// Creates a new queue instance. + pub fn new( + config: Config, + engine: Arc, + message_channel: IoChannel, + check_seal: bool, + ) -> Self { + let verification = Arc::new(Verification { + unverified: LenCachingMutex::new(VecDeque::new()), + verifying: LenCachingMutex::new(VecDeque::new()), + verified: LenCachingMutex::new(VecDeque::new()), + bad: Mutex::new(HashSet::new()), + sizes: Sizes { + unverified: AtomicUsize::new(0), + verifying: AtomicUsize::new(0), + verified: AtomicUsize::new(0), + }, + check_seal: check_seal, + }); + let more_to_verify = Arc::new(Condvar::new()); + let deleting = Arc::new(AtomicBool::new(false)); + let ready_signal = Arc::new(QueueSignal { + deleting: deleting.clone(), + signalled: AtomicBool::new(false), + message_channel: Mutex::new(message_channel), + }); + let empty = Arc::new(Condvar::new()); + let scale_verifiers = config.verifier_settings.scale_verifiers; + + let max_verifiers = ::num_cpus::get(); + let default_amount = cmp::max( + 1, + cmp::min(max_verifiers, config.verifier_settings.num_verifiers), + ); + + // if `auto-scaling` is enabled spawn up extra threads as they might be needed + // otherwise just spawn the number of threads specified by the config + let number_of_threads = if scale_verifiers { + max_verifiers + } else { + cmp::min(default_amount, max_verifiers) + }; + + let state = Arc::new((Mutex::new(State::Work(default_amount)), Condvar::new())); + let mut verifier_handles = Vec::with_capacity(number_of_threads); + + debug!(target: "verification", "Allocating {} verifiers, {} initially active", number_of_threads, default_amount); + debug!(target: "verification", "Verifier auto-scaling {}", if scale_verifiers { "enabled" } else { "disabled" }); + + for i in 0..number_of_threads { + debug!(target: "verification", "Adding verification thread #{}", i); + + let verification = verification.clone(); + let engine = engine.clone(); + let wait = more_to_verify.clone(); + let ready = ready_signal.clone(); + let empty = empty.clone(); + let state = state.clone(); + + let handle = thread::Builder::new() + .name(format!("Verifier #{}", i)) + .spawn(move || { + VerificationQueue::verify(verification, engine, wait, ready, empty, state, i) + }) + .expect("Failed to create verifier thread."); + verifier_handles.push(handle); + } + + VerificationQueue { + engine: engine, + ready_signal: ready_signal, + more_to_verify: more_to_verify, + verification: verification, + deleting: deleting, + processing: RwLock::new(HashMap::new()), + empty: empty, + ticks_since_adjustment: AtomicUsize::new(0), + max_queue_size: cmp::max(config.max_queue_size, MIN_QUEUE_LIMIT), + max_mem_use: cmp::max(config.max_mem_use, MIN_MEM_LIMIT), + scale_verifiers: scale_verifiers, + verifier_handles: verifier_handles, + state: state, + total_difficulty: RwLock::new(0.into()), + } + } + + fn verify( + verification: Arc>, + engine: Arc, + wait: Arc, + ready: Arc, + empty: Arc, + state: Arc<(Mutex, Condvar)>, + id: usize, + ) { + loop { + // check current state. + { + let mut cur_state = state.0.lock(); + while let State::Work(x) = *cur_state { + // sleep until this thread is required. + if id < x { + break; + } + + debug!(target: "verification", "verifier {} sleeping", id); + state.1.wait(&mut cur_state); + debug!(target: "verification", "verifier {} waking up", id); + } + + if let State::Exit = *cur_state { + debug!(target: "verification", "verifier {} exiting", id); + break; + } + } + + // wait for work if empty. + { + let mut unverified = verification.unverified.lock(); + + if unverified.is_empty() && verification.verifying.lock().is_empty() { + empty.notify_all(); + } + + while unverified.is_empty() { + if let State::Exit = *state.0.lock() { + debug!(target: "verification", "verifier {} exiting", id); + return; + } + + wait.wait(unverified.inner_mut()); + } + + if let State::Exit = *state.0.lock() { + debug!(target: "verification", "verifier {} exiting", id); + return; + } + } + + // do work. + let item = { + // acquire these locks before getting the item to verify. + let mut unverified = verification.unverified.lock(); + let mut verifying = verification.verifying.lock(); + + let item = match unverified.pop_front() { + Some(item) => item, + None => continue, + }; + + verification + .sizes + .unverified + .fetch_sub(item.heap_size_of_children(), AtomicOrdering::SeqCst); + verifying.push_back(Verifying { + hash: item.hash(), + output: None, + }); + item + }; + + let hash = item.hash(); + let is_ready = match K::verify(item, &*engine, verification.check_seal) { + Ok(verified) => { + let mut verifying = verification.verifying.lock(); + let mut idx = None; + for (i, e) in verifying.iter_mut().enumerate() { + if e.hash == hash { + idx = Some(i); + + verification.sizes.verifying.fetch_add( + verified.heap_size_of_children(), + AtomicOrdering::SeqCst, + ); + e.output = Some(verified); + break; + } + } + + if idx == Some(0) { + // we're next! + let mut verified = verification.verified.lock(); + let mut bad = verification.bad.lock(); + VerificationQueue::drain_verifying( + &mut verifying, + &mut verified, + &mut bad, + &verification.sizes, + ); + true + } else { + false + } + } + Err(_) => { + let mut verifying = verification.verifying.lock(); + let mut verified = verification.verified.lock(); + let mut bad = verification.bad.lock(); + + bad.insert(hash.clone()); + verifying.retain(|e| e.hash != hash); + + if verifying.front().map_or(false, |x| x.output.is_some()) { + VerificationQueue::drain_verifying( + &mut verifying, + &mut verified, + &mut bad, + &verification.sizes, + ); + true + } else { + false + } + } + }; + if is_ready { + // Import the block immediately + ready.set_sync(); + } + } + } + + fn drain_verifying( + verifying: &mut VecDeque>, + verified: &mut VecDeque, + bad: &mut HashSet, + sizes: &Sizes, + ) { + let mut removed_size = 0; + let mut inserted_size = 0; + + while let Some(output) = verifying.front_mut().and_then(|x| x.output.take()) { + assert!(verifying.pop_front().is_some()); + let size = output.heap_size_of_children(); + removed_size += size; + + if bad.contains(&output.parent_hash()) { + bad.insert(output.hash()); + } else { + inserted_size += size; + verified.push_back(output); + } + } + + sizes + .verifying + .fetch_sub(removed_size, AtomicOrdering::SeqCst); + sizes + .verified + .fetch_add(inserted_size, AtomicOrdering::SeqCst); + } + + /// Clear the queue and stop verification activity. + pub fn clear(&self) { + let mut unverified = self.verification.unverified.lock(); + let mut verifying = self.verification.verifying.lock(); + let mut verified = self.verification.verified.lock(); + unverified.clear(); + verifying.clear(); + verified.clear(); + + let sizes = &self.verification.sizes; + sizes.unverified.store(0, AtomicOrdering::Release); + sizes.verifying.store(0, AtomicOrdering::Release); + sizes.verified.store(0, AtomicOrdering::Release); + *self.total_difficulty.write() = 0.into(); + + self.processing.write().clear(); + } + + /// Wait for unverified queue to be empty + pub fn flush(&self) { + let mut unverified = self.verification.unverified.lock(); + while !unverified.is_empty() || !self.verification.verifying.lock().is_empty() { + self.empty.wait(unverified.inner_mut()); + } + } + + /// Check if the item is currently in the queue + pub fn status(&self, hash: &H256) -> Status { + if self.processing.read().contains_key(hash) { + return Status::Queued; + } + if self.verification.bad.lock().contains(hash) { + return Status::Bad; + } + Status::Unknown + } + + /// Add a block to the queue. + pub fn import(&self, input: K::Input) -> Result { + let hash = input.hash(); + let raw_hash = input.raw_hash(); + { + if self.processing.read().contains_key(&hash) { + bail!(( + input, + ErrorKind::Import(ImportErrorKind::AlreadyQueued).into() + )); + } + + let mut bad = self.verification.bad.lock(); + if bad.contains(&hash) || bad.contains(&raw_hash) { + bail!((input, ErrorKind::Import(ImportErrorKind::KnownBad).into())); + } + + if bad.contains(&input.parent_hash()) { + bad.insert(hash); + bail!((input, ErrorKind::Import(ImportErrorKind::KnownBad).into())); + } + } + + match K::create(input, &*self.engine, self.verification.check_seal) { + Ok(item) => { + self.verification + .sizes + .unverified + .fetch_add(item.heap_size_of_children(), AtomicOrdering::SeqCst); + + self.processing.write().insert(hash, item.difficulty()); + { + let mut td = self.total_difficulty.write(); + *td = *td + item.difficulty(); + } + self.verification.unverified.lock().push_back(item); + self.more_to_verify.notify_all(); + Ok(hash) + } + Err((input, err)) => { + match err { + // Don't mark future blocks as bad. + Error(ErrorKind::Block(BlockError::TemporarilyInvalid(_)), _) => {} + // If the transaction root or uncles hash is invalid, it doesn't necessarily mean + // that the header is invalid. We might have just received a malformed block body, + // so we shouldn't put the header hash to `bad`. + // + // We still put the entire `Item` hash to bad, so that we can early reject + // the items that are malformed. + Error(ErrorKind::Block(BlockError::InvalidTransactionsRoot(_)), _) + | Error(ErrorKind::Block(BlockError::InvalidUnclesHash(_)), _) => { + self.verification.bad.lock().insert(raw_hash); + } + _ => { + self.verification.bad.lock().insert(hash); + } + } + Err((input, err)) + } + } + } + + /// Mark given item and all its children as bad. pauses verification + /// until complete. + pub fn mark_as_bad(&self, hashes: &[H256]) { + if hashes.is_empty() { + return; + } + let mut verified_lock = self.verification.verified.lock(); + let verified = &mut *verified_lock; + let mut bad = self.verification.bad.lock(); + let mut processing = self.processing.write(); + bad.reserve(hashes.len()); + for hash in hashes { + bad.insert(hash.clone()); + if let Some(difficulty) = processing.remove(hash) { + let mut td = self.total_difficulty.write(); + *td = *td - difficulty; + } + } + + let mut new_verified = VecDeque::new(); + let mut removed_size = 0; + for output in verified.drain(..) { + if bad.contains(&output.parent_hash()) { + removed_size += output.heap_size_of_children(); + bad.insert(output.hash()); + if let Some(difficulty) = processing.remove(&output.hash()) { + let mut td = self.total_difficulty.write(); + *td = *td - difficulty; + } + } else { + new_verified.push_back(output); + } + } + + self.verification + .sizes + .verified + .fetch_sub(removed_size, AtomicOrdering::SeqCst); + *verified = new_verified; + } + + /// Mark given item as processed. + /// Returns true if the queue becomes empty. + pub fn mark_as_good(&self, hashes: &[H256]) -> bool { + if hashes.is_empty() { + return self.processing.read().is_empty(); + } + let mut processing = self.processing.write(); + for hash in hashes { + if let Some(difficulty) = processing.remove(hash) { + let mut td = self.total_difficulty.write(); + *td = *td - difficulty; + } + } + processing.is_empty() + } + + /// Removes up to `max` verified items from the queue + pub fn drain(&self, max: usize) -> Vec { + let mut verified = self.verification.verified.lock(); + let count = cmp::min(max, verified.len()); + let result = verified.drain(..count).collect::>(); + + let drained_size = result + .iter() + .map(HeapSizeOf::heap_size_of_children) + .fold(0, |a, c| a + c); + self.verification + .sizes + .verified + .fetch_sub(drained_size, AtomicOrdering::SeqCst); + + self.ready_signal.reset(); + if !verified.is_empty() { + self.ready_signal.set_async(); + } + result + } + + /// Returns true if there is nothing currently in the queue. + pub fn is_empty(&self) -> bool { + let v = &self.verification; + + v.unverified.load_len() == 0 && v.verifying.load_len() == 0 && v.verified.load_len() == 0 + } + + /// Get queue status. + pub fn queue_info(&self) -> QueueInfo { + use std::mem::size_of; + + let (unverified_len, unverified_bytes) = { + let len = self.verification.unverified.load_len(); + let size = self + .verification + .sizes + .unverified + .load(AtomicOrdering::Acquire); + + (len, size + len * size_of::()) + }; + let (verifying_len, verifying_bytes) = { + let len = self.verification.verifying.load_len(); + let size = self + .verification + .sizes + .verifying + .load(AtomicOrdering::Acquire); + (len, size + len * size_of::>()) + }; + let (verified_len, verified_bytes) = { + let len = self.verification.verified.load_len(); + let size = self + .verification + .sizes + .verified + .load(AtomicOrdering::Acquire); + (len, size + len * size_of::()) + }; + + QueueInfo { + unverified_queue_size: unverified_len, + verifying_queue_size: verifying_len, + verified_queue_size: verified_len, + max_queue_size: self.max_queue_size, + max_mem_use: self.max_mem_use, + mem_used: unverified_bytes + verifying_bytes + verified_bytes, + } + } + + /// Get the total difficulty of all the blocks in the queue. + pub fn total_difficulty(&self) -> U256 { + self.total_difficulty.read().clone() + } + + /// Get the current number of working verifiers. + pub fn num_verifiers(&self) -> usize { + match *self.state.0.lock() { + State::Work(x) => x, + State::Exit => panic!("state only set to exit on drop; queue live now; qed"), + } + } + + /// Optimise memory footprint of the heap fields, and adjust the number of threads + /// to better suit the workload. + pub fn collect_garbage(&self) { + // number of ticks to average queue stats over + // when deciding whether to change the number of verifiers. + #[cfg(not(test))] + const READJUSTMENT_PERIOD: usize = 12; + + #[cfg(test)] + const READJUSTMENT_PERIOD: usize = 1; + + let (u_len, v_len) = { + let u_len = { + let mut q = self.verification.unverified.lock(); + q.shrink_to_fit(); + q.len() + }; + self.verification.verifying.lock().shrink_to_fit(); + + let v_len = { + let mut q = self.verification.verified.lock(); + q.shrink_to_fit(); + q.len() + }; + + (u_len as isize, v_len as isize) + }; + + self.processing.write().shrink_to_fit(); + + if !self.scale_verifiers { + return; + } + + if self + .ticks_since_adjustment + .fetch_add(1, AtomicOrdering::SeqCst) + + 1 + >= READJUSTMENT_PERIOD + { + self.ticks_since_adjustment.store(0, AtomicOrdering::SeqCst); + } else { + return; + } + + let current = self.num_verifiers(); + + let diff = (v_len - u_len).abs(); + let total = v_len + u_len; + + self.scale_verifiers(if u_len < 20 { + 1 + } else if diff <= total / 10 { + current + } else if v_len > u_len { + current - 1 + } else { + current + 1 + }); + } + + // wake up or sleep verifiers to get as close to the target as + // possible, never going over the amount of initially allocated threads + // or below 1. + fn scale_verifiers(&self, target: usize) { + let current = self.num_verifiers(); + let target = cmp::min(self.verifier_handles.len(), target); + let target = cmp::max(1, target); + + debug!(target: "verification", "Scaling from {} to {} verifiers", current, target); + + *self.state.0.lock() = State::Work(target); + self.state.1.notify_all(); + } } impl Drop for VerificationQueue { - fn drop(&mut self) { - trace!(target: "shutdown", "[VerificationQueue] Closing..."); - self.clear(); - self.deleting.store(true, AtomicOrdering::SeqCst); + fn drop(&mut self) { + trace!(target: "shutdown", "[VerificationQueue] Closing..."); + self.clear(); + self.deleting.store(true, AtomicOrdering::SeqCst); - // set exit state; should be done before `more_to_verify` notification. - *self.state.0.lock() = State::Exit; - self.state.1.notify_all(); + // set exit state; should be done before `more_to_verify` notification. + *self.state.0.lock() = State::Exit; + self.state.1.notify_all(); - // acquire this lock to force threads to reach the waiting point - // if they're in-between the exit check and the more_to_verify wait. - { - let _unverified = self.verification.unverified.lock(); - self.more_to_verify.notify_all(); - } + // acquire this lock to force threads to reach the waiting point + // if they're in-between the exit check and the more_to_verify wait. + { + let _unverified = self.verification.unverified.lock(); + self.more_to_verify.notify_all(); + } - // wait for all verifier threads to join. - for thread in self.verifier_handles.drain(..) { - thread.join().expect("Propagating verifier thread panic on shutdown"); - } + // wait for all verifier threads to join. + for thread in self.verifier_handles.drain(..) { + thread + .join() + .expect("Propagating verifier thread panic on shutdown"); + } - trace!(target: "shutdown", "[VerificationQueue] Closed."); - } + trace!(target: "shutdown", "[VerificationQueue] Closed."); + } } #[cfg(test)] mod tests { - use io::*; - use spec::Spec; - use super::{BlockQueue, Config, State}; - use super::kind::blocks::Unverified; - use test_helpers::{get_good_dummy_block_seq, get_good_dummy_block}; - use error::*; - use bytes::Bytes; - use types::view; - use types::views::BlockView; + use super::{kind::blocks::Unverified, BlockQueue, Config, State}; + use bytes::Bytes; + use error::*; + use io::*; + use spec::Spec; + use test_helpers::{get_good_dummy_block, get_good_dummy_block_seq}; + use types::{view, views::BlockView}; - // create a test block queue. - // auto_scaling enables verifier adjustment. - fn get_test_queue(auto_scale: bool) -> BlockQueue { - let spec = Spec::new_test(); - let engine = spec.engine; + // create a test block queue. + // auto_scaling enables verifier adjustment. + fn get_test_queue(auto_scale: bool) -> BlockQueue { + let spec = Spec::new_test(); + let engine = spec.engine; - let mut config = Config::default(); - config.verifier_settings.scale_verifiers = auto_scale; - BlockQueue::new(config, engine, IoChannel::disconnected(), true) - } + let mut config = Config::default(); + config.verifier_settings.scale_verifiers = auto_scale; + BlockQueue::new(config, engine, IoChannel::disconnected(), true) + } - fn get_test_config(num_verifiers: usize, is_auto_scale: bool) -> Config { - let mut config = Config::default(); - config.verifier_settings.num_verifiers = num_verifiers; - config.verifier_settings.scale_verifiers = is_auto_scale; - config - } + fn get_test_config(num_verifiers: usize, is_auto_scale: bool) -> Config { + let mut config = Config::default(); + config.verifier_settings.num_verifiers = num_verifiers; + config.verifier_settings.scale_verifiers = is_auto_scale; + config + } - fn new_unverified(bytes: Bytes) -> Unverified { - Unverified::from_rlp(bytes).expect("Should be valid rlp") - } + fn new_unverified(bytes: Bytes) -> Unverified { + Unverified::from_rlp(bytes).expect("Should be valid rlp") + } - #[test] - fn can_be_created() { - // TODO better test - let spec = Spec::new_test(); - let engine = spec.engine; - let _ = BlockQueue::new(Config::default(), engine, IoChannel::disconnected(), true); - } + #[test] + fn can_be_created() { + // TODO better test + let spec = Spec::new_test(); + let engine = spec.engine; + let _ = BlockQueue::new(Config::default(), engine, IoChannel::disconnected(), true); + } - #[test] - fn can_import_blocks() { - let queue = get_test_queue(false); - if let Err(e) = queue.import(new_unverified(get_good_dummy_block())) { - panic!("error importing block that is valid by definition({:?})", e); - } - } + #[test] + fn can_import_blocks() { + let queue = get_test_queue(false); + if let Err(e) = queue.import(new_unverified(get_good_dummy_block())) { + panic!("error importing block that is valid by definition({:?})", e); + } + } - #[test] - fn returns_error_for_duplicates() { - let queue = get_test_queue(false); - if let Err(e) = queue.import(new_unverified(get_good_dummy_block())) { - panic!("error importing block that is valid by definition({:?})", e); - } + #[test] + fn returns_error_for_duplicates() { + let queue = get_test_queue(false); + if let Err(e) = queue.import(new_unverified(get_good_dummy_block())) { + panic!("error importing block that is valid by definition({:?})", e); + } - let duplicate_import = queue.import(new_unverified(get_good_dummy_block())); - match duplicate_import { - Err((_, e)) => { - match e { - Error(ErrorKind::Import(ImportErrorKind::AlreadyQueued), _) => {}, - _ => { panic!("must return AlreadyQueued error"); } - } - } - Ok(_) => { panic!("must produce error"); } - } - } + let duplicate_import = queue.import(new_unverified(get_good_dummy_block())); + match duplicate_import { + Err((_, e)) => match e { + Error(ErrorKind::Import(ImportErrorKind::AlreadyQueued), _) => {} + _ => { + panic!("must return AlreadyQueued error"); + } + }, + Ok(_) => { + panic!("must produce error"); + } + } + } - #[test] - fn returns_total_difficulty() { - let queue = get_test_queue(false); - let block = get_good_dummy_block(); - let hash = view!(BlockView, &block).header().hash().clone(); - if let Err(e) = queue.import(new_unverified(block)) { - panic!("error importing block that is valid by definition({:?})", e); - } - queue.flush(); - assert_eq!(queue.total_difficulty(), 131072.into()); - queue.drain(10); - assert_eq!(queue.total_difficulty(), 131072.into()); - queue.mark_as_good(&[ hash ]); - assert_eq!(queue.total_difficulty(), 0.into()); - } + #[test] + fn returns_total_difficulty() { + let queue = get_test_queue(false); + let block = get_good_dummy_block(); + let hash = view!(BlockView, &block).header().hash().clone(); + if let Err(e) = queue.import(new_unverified(block)) { + panic!("error importing block that is valid by definition({:?})", e); + } + queue.flush(); + assert_eq!(queue.total_difficulty(), 131072.into()); + queue.drain(10); + assert_eq!(queue.total_difficulty(), 131072.into()); + queue.mark_as_good(&[hash]); + assert_eq!(queue.total_difficulty(), 0.into()); + } - #[test] - fn returns_ok_for_drained_duplicates() { - let queue = get_test_queue(false); - let block = get_good_dummy_block(); - let hash = view!(BlockView, &block).header().hash().clone(); - if let Err(e) = queue.import(new_unverified(block)) { - panic!("error importing block that is valid by definition({:?})", e); - } - queue.flush(); - queue.drain(10); - queue.mark_as_good(&[ hash ]); + #[test] + fn returns_ok_for_drained_duplicates() { + let queue = get_test_queue(false); + let block = get_good_dummy_block(); + let hash = view!(BlockView, &block).header().hash().clone(); + if let Err(e) = queue.import(new_unverified(block)) { + panic!("error importing block that is valid by definition({:?})", e); + } + queue.flush(); + queue.drain(10); + queue.mark_as_good(&[hash]); - if let Err(e) = queue.import(new_unverified(get_good_dummy_block())) { - panic!("error importing block that has already been drained ({:?})", e); - } - } + if let Err(e) = queue.import(new_unverified(get_good_dummy_block())) { + panic!( + "error importing block that has already been drained ({:?})", + e + ); + } + } - #[test] - fn returns_empty_once_finished() { - let queue = get_test_queue(false); - queue.import(new_unverified(get_good_dummy_block())) - .expect("error importing block that is valid by definition"); - queue.flush(); - queue.drain(1); + #[test] + fn returns_empty_once_finished() { + let queue = get_test_queue(false); + queue + .import(new_unverified(get_good_dummy_block())) + .expect("error importing block that is valid by definition"); + queue.flush(); + queue.drain(1); - assert!(queue.queue_info().is_empty()); - } + assert!(queue.queue_info().is_empty()); + } - #[test] - fn test_mem_limit() { - let spec = Spec::new_test(); - let engine = spec.engine; - let mut config = Config::default(); - config.max_mem_use = super::MIN_MEM_LIMIT; // empty queue uses about 15000 - let queue = BlockQueue::new(config, engine, IoChannel::disconnected(), true); - assert!(!queue.queue_info().is_full()); - let mut blocks = get_good_dummy_block_seq(50); - for b in blocks.drain(..) { - queue.import(new_unverified(b)).unwrap(); - } - assert!(queue.queue_info().is_full()); - } + #[test] + fn test_mem_limit() { + let spec = Spec::new_test(); + let engine = spec.engine; + let mut config = Config::default(); + config.max_mem_use = super::MIN_MEM_LIMIT; // empty queue uses about 15000 + let queue = BlockQueue::new(config, engine, IoChannel::disconnected(), true); + assert!(!queue.queue_info().is_full()); + let mut blocks = get_good_dummy_block_seq(50); + for b in blocks.drain(..) { + queue.import(new_unverified(b)).unwrap(); + } + assert!(queue.queue_info().is_full()); + } - #[test] - fn scaling_limits() { - let max_verifiers = ::num_cpus::get(); - let queue = get_test_queue(true); - queue.scale_verifiers(max_verifiers + 1); + #[test] + fn scaling_limits() { + let max_verifiers = ::num_cpus::get(); + let queue = get_test_queue(true); + queue.scale_verifiers(max_verifiers + 1); - assert!(queue.num_verifiers() < max_verifiers + 1); + assert!(queue.num_verifiers() < max_verifiers + 1); - queue.scale_verifiers(0); + queue.scale_verifiers(0); - assert!(queue.num_verifiers() == 1); - } + assert!(queue.num_verifiers() == 1); + } - #[test] - fn readjust_verifiers() { - let queue = get_test_queue(true); + #[test] + fn readjust_verifiers() { + let queue = get_test_queue(true); - // put all the verifiers to sleep to ensure - // the test isn't timing sensitive. - *queue.state.0.lock() = State::Work(0); + // put all the verifiers to sleep to ensure + // the test isn't timing sensitive. + *queue.state.0.lock() = State::Work(0); - for block in get_good_dummy_block_seq(5000) { - queue.import(new_unverified(block)).expect("Block good by definition; qed"); - } + for block in get_good_dummy_block_seq(5000) { + queue + .import(new_unverified(block)) + .expect("Block good by definition; qed"); + } - // almost all unverified == bump verifier count. - queue.collect_garbage(); - assert_eq!(queue.num_verifiers(), 1); + // almost all unverified == bump verifier count. + queue.collect_garbage(); + assert_eq!(queue.num_verifiers(), 1); - queue.flush(); + queue.flush(); - // nothing to verify == use minimum number of verifiers. - queue.collect_garbage(); - assert_eq!(queue.num_verifiers(), 1); - } + // nothing to verify == use minimum number of verifiers. + queue.collect_garbage(); + assert_eq!(queue.num_verifiers(), 1); + } - #[test] - fn worker_threads_honor_specified_number_without_scaling() { - let spec = Spec::new_test(); - let engine = spec.engine; - let config = get_test_config(1, false); - let queue = BlockQueue::new(config, engine, IoChannel::disconnected(), true); + #[test] + fn worker_threads_honor_specified_number_without_scaling() { + let spec = Spec::new_test(); + let engine = spec.engine; + let config = get_test_config(1, false); + let queue = BlockQueue::new(config, engine, IoChannel::disconnected(), true); - assert_eq!(queue.num_verifiers(), 1); - } + assert_eq!(queue.num_verifiers(), 1); + } - #[test] - fn worker_threads_specified_to_zero_should_set_to_one() { - let spec = Spec::new_test(); - let engine = spec.engine; - let config = get_test_config(0, false); - let queue = BlockQueue::new(config, engine, IoChannel::disconnected(), true); + #[test] + fn worker_threads_specified_to_zero_should_set_to_one() { + let spec = Spec::new_test(); + let engine = spec.engine; + let config = get_test_config(0, false); + let queue = BlockQueue::new(config, engine, IoChannel::disconnected(), true); - assert_eq!(queue.num_verifiers(), 1); - } + assert_eq!(queue.num_verifiers(), 1); + } - #[test] - fn worker_threads_should_only_accept_max_number_cpus() { - let spec = Spec::new_test(); - let engine = spec.engine; - let config = get_test_config(10_000, false); - let queue = BlockQueue::new(config, engine, IoChannel::disconnected(), true); - let num_cpus = ::num_cpus::get(); + #[test] + fn worker_threads_should_only_accept_max_number_cpus() { + let spec = Spec::new_test(); + let engine = spec.engine; + let config = get_test_config(10_000, false); + let queue = BlockQueue::new(config, engine, IoChannel::disconnected(), true); + let num_cpus = ::num_cpus::get(); - assert_eq!(queue.num_verifiers(), num_cpus); - } + assert_eq!(queue.num_verifiers(), num_cpus); + } - #[test] - fn worker_threads_scaling_with_specifed_num_of_workers() { - let num_cpus = ::num_cpus::get(); - // only run the test with at least 2 CPUs - if num_cpus > 1 { - let spec = Spec::new_test(); - let engine = spec.engine; - let config = get_test_config(num_cpus - 1, true); - let queue = BlockQueue::new(config, engine, IoChannel::disconnected(), true); - queue.scale_verifiers(num_cpus); + #[test] + fn worker_threads_scaling_with_specifed_num_of_workers() { + let num_cpus = ::num_cpus::get(); + // only run the test with at least 2 CPUs + if num_cpus > 1 { + let spec = Spec::new_test(); + let engine = spec.engine; + let config = get_test_config(num_cpus - 1, true); + let queue = BlockQueue::new(config, engine, IoChannel::disconnected(), true); + queue.scale_verifiers(num_cpus); - assert_eq!(queue.num_verifiers(), num_cpus); - } - } + assert_eq!(queue.num_verifiers(), num_cpus); + } + } } diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs index 61d711924..4ff9d211f 100644 --- a/ethcore/src/verification/verification.rs +++ b/ethcore/src/verification/verification.rs @@ -21,8 +21,10 @@ //! 2. Signatures verification done in the queue. //! 3. Final verification against the blockchain done before enactment. -use std::collections::HashSet; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use std::{ + collections::HashSet, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; use bytes::Bytes; use hash::keccak; @@ -36,791 +38,1091 @@ use call_contract::CallContract; use client::BlockInfo; use engines::{EthEngine, MAX_UNCLE_AGE}; use error::{BlockError, Error}; -use types::{BlockNumber, header::Header}; -use types::transaction::SignedTransaction; +use types::{header::Header, transaction::SignedTransaction, BlockNumber}; use verification::queue::kind::blocks::Unverified; use time_utils::CheckedSystemTime; /// Preprocessed block data gathered in `verify_block_unordered` call pub struct PreverifiedBlock { - /// Populated block header - pub header: Header, - /// Populated block transactions - pub transactions: Vec, - /// Populated block uncles - pub uncles: Vec
, - /// Block bytes - pub bytes: Bytes, + /// Populated block header + pub header: Header, + /// Populated block transactions + pub transactions: Vec, + /// Populated block uncles + pub uncles: Vec
, + /// Block bytes + pub bytes: Bytes, } impl HeapSizeOf for PreverifiedBlock { - fn heap_size_of_children(&self) -> usize { - self.header.heap_size_of_children() - + self.transactions.heap_size_of_children() - + self.bytes.heap_size_of_children() - } + fn heap_size_of_children(&self) -> usize { + self.header.heap_size_of_children() + + self.transactions.heap_size_of_children() + + self.bytes.heap_size_of_children() + } } /// Phase 1 quick block verification. Only does checks that are cheap. Operates on a single block -pub fn verify_block_basic(block: &Unverified, engine: &EthEngine, check_seal: bool) -> Result<(), Error> { - verify_header_params(&block.header, engine, true, check_seal)?; - verify_block_integrity(block)?; +pub fn verify_block_basic( + block: &Unverified, + engine: &EthEngine, + check_seal: bool, +) -> Result<(), Error> { + verify_header_params(&block.header, engine, true, check_seal)?; + verify_block_integrity(block)?; - if check_seal { - engine.verify_block_basic(&block.header)?; - } + if check_seal { + engine.verify_block_basic(&block.header)?; + } - for uncle in &block.uncles { - verify_header_params(uncle, engine, false, check_seal)?; - if check_seal { - engine.verify_block_basic(uncle)?; - } - } + for uncle in &block.uncles { + verify_header_params(uncle, engine, false, check_seal)?; + if check_seal { + engine.verify_block_basic(uncle)?; + } + } - for t in &block.transactions { - engine.verify_transaction_basic(t, &block.header)?; - } + for t in &block.transactions { + engine.verify_transaction_basic(t, &block.header)?; + } - Ok(()) + Ok(()) } /// Phase 2 verification. Perform costly checks such as transaction signatures and block nonce for ethash. /// Still operates on a individual block /// Returns a `PreverifiedBlock` structure populated with transactions -pub fn verify_block_unordered(block: Unverified, engine: &EthEngine, check_seal: bool) -> Result { - let header = block.header; - if check_seal { - engine.verify_block_unordered(&header)?; - for uncle in &block.uncles { - engine.verify_block_unordered(uncle)?; - } - } - // Verify transactions. - let nonce_cap = if header.number() >= engine.params().dust_protection_transition { - Some((engine.params().nonce_cap_increment * header.number()).into()) - } else { - None - }; +pub fn verify_block_unordered( + block: Unverified, + engine: &EthEngine, + check_seal: bool, +) -> Result { + let header = block.header; + if check_seal { + engine.verify_block_unordered(&header)?; + for uncle in &block.uncles { + engine.verify_block_unordered(uncle)?; + } + } + // Verify transactions. + let nonce_cap = if header.number() >= engine.params().dust_protection_transition { + Some((engine.params().nonce_cap_increment * header.number()).into()) + } else { + None + }; - let transactions = block.transactions - .into_iter() - .map(|t| { - let t = engine.verify_transaction_unordered(t, &header)?; - if let Some(max_nonce) = nonce_cap { - if t.nonce >= max_nonce { - return Err(BlockError::TooManyTransactions(t.sender()).into()); - } - } - Ok(t) - }) - .collect::, Error>>()?; + let transactions = block + .transactions + .into_iter() + .map(|t| { + let t = engine.verify_transaction_unordered(t, &header)?; + if let Some(max_nonce) = nonce_cap { + if t.nonce >= max_nonce { + return Err(BlockError::TooManyTransactions(t.sender()).into()); + } + } + Ok(t) + }) + .collect::, Error>>()?; - Ok(PreverifiedBlock { - header, - transactions, - uncles: block.uncles, - bytes: block.bytes, - }) + Ok(PreverifiedBlock { + header, + transactions, + uncles: block.uncles, + bytes: block.bytes, + }) } /// Parameters for full verification of block family pub struct FullFamilyParams<'a, C: BlockInfo + CallContract + 'a> { - /// Preverified block - pub block: &'a PreverifiedBlock, + /// Preverified block + pub block: &'a PreverifiedBlock, - /// Block provider to use during verification - pub block_provider: &'a BlockProvider, + /// Block provider to use during verification + pub block_provider: &'a BlockProvider, - /// Engine client to use during verification - pub client: &'a C, + /// Engine client to use during verification + pub client: &'a C, } /// Phase 3 verification. Check block information against parent and uncles. -pub fn verify_block_family(header: &Header, parent: &Header, engine: &EthEngine, do_full: Option>) -> Result<(), Error> { - // TODO: verify timestamp - verify_parent(&header, &parent, engine)?; - engine.verify_block_family(&header, &parent)?; +pub fn verify_block_family( + header: &Header, + parent: &Header, + engine: &EthEngine, + do_full: Option>, +) -> Result<(), Error> { + // TODO: verify timestamp + verify_parent(&header, &parent, engine)?; + engine.verify_block_family(&header, &parent)?; - let params = match do_full { - Some(x) => x, - None => return Ok(()), - }; + let params = match do_full { + Some(x) => x, + None => return Ok(()), + }; - verify_uncles(params.block, params.block_provider, engine)?; + verify_uncles(params.block, params.block_provider, engine)?; - for tx in ¶ms.block.transactions { - // transactions are verified against the parent header since the current - // state wasn't available when the tx was created - engine.machine().verify_transaction(tx, parent, params.client)?; - } + for tx in ¶ms.block.transactions { + // transactions are verified against the parent header since the current + // state wasn't available when the tx was created + engine + .machine() + .verify_transaction(tx, parent, params.client)?; + } - Ok(()) + Ok(()) } -fn verify_uncles(block: &PreverifiedBlock, bc: &BlockProvider, engine: &EthEngine) -> Result<(), Error> { - let header = &block.header; - let num_uncles = block.uncles.len(); - let max_uncles = engine.maximum_uncle_count(header.number()); - if num_uncles != 0 { - if num_uncles > max_uncles { - return Err(From::from(BlockError::TooManyUncles(OutOfBounds { - min: None, - max: Some(max_uncles), - found: num_uncles, - }))); - } +fn verify_uncles( + block: &PreverifiedBlock, + bc: &BlockProvider, + engine: &EthEngine, +) -> Result<(), Error> { + let header = &block.header; + let num_uncles = block.uncles.len(); + let max_uncles = engine.maximum_uncle_count(header.number()); + if num_uncles != 0 { + if num_uncles > max_uncles { + return Err(From::from(BlockError::TooManyUncles(OutOfBounds { + min: None, + max: Some(max_uncles), + found: num_uncles, + }))); + } - let mut excluded = HashSet::new(); - excluded.insert(header.hash()); - let mut hash = header.parent_hash().clone(); - excluded.insert(hash.clone()); - for _ in 0..MAX_UNCLE_AGE { - match bc.block_details(&hash) { - Some(details) => { - excluded.insert(details.parent); - let b = bc.block(&hash) - .expect("parent already known to be stored; qed"); - excluded.extend(b.uncle_hashes()); - hash = details.parent; - } - None => break - } - } + let mut excluded = HashSet::new(); + excluded.insert(header.hash()); + let mut hash = header.parent_hash().clone(); + excluded.insert(hash.clone()); + for _ in 0..MAX_UNCLE_AGE { + match bc.block_details(&hash) { + Some(details) => { + excluded.insert(details.parent); + let b = bc + .block(&hash) + .expect("parent already known to be stored; qed"); + excluded.extend(b.uncle_hashes()); + hash = details.parent; + } + None => break, + } + } - let mut verified = HashSet::new(); - for uncle in &block.uncles { - if excluded.contains(&uncle.hash()) { - return Err(From::from(BlockError::UncleInChain(uncle.hash()))) - } + let mut verified = HashSet::new(); + for uncle in &block.uncles { + if excluded.contains(&uncle.hash()) { + return Err(From::from(BlockError::UncleInChain(uncle.hash()))); + } - if verified.contains(&uncle.hash()) { - return Err(From::from(BlockError::DuplicateUncle(uncle.hash()))) - } + if verified.contains(&uncle.hash()) { + return Err(From::from(BlockError::DuplicateUncle(uncle.hash()))); + } - // m_currentBlock.number() - uncle.number() m_cB.n - uP.n() - // 1 2 - // 2 - // 3 - // 4 - // 5 - // 6 7 - // (8 Invalid) + // m_currentBlock.number() - uncle.number() m_cB.n - uP.n() + // 1 2 + // 2 + // 3 + // 4 + // 5 + // 6 7 + // (8 Invalid) - let depth = if header.number() > uncle.number() { header.number() - uncle.number() } else { 0 }; - if depth > MAX_UNCLE_AGE as u64 { - return Err(From::from(BlockError::UncleTooOld(OutOfBounds { min: Some(header.number() - depth), max: Some(header.number() - 1), found: uncle.number() }))); - } - else if depth < 1 { - return Err(From::from(BlockError::UncleIsBrother(OutOfBounds { min: Some(header.number() - depth), max: Some(header.number() - 1), found: uncle.number() }))); - } + let depth = if header.number() > uncle.number() { + header.number() - uncle.number() + } else { + 0 + }; + if depth > MAX_UNCLE_AGE as u64 { + return Err(From::from(BlockError::UncleTooOld(OutOfBounds { + min: Some(header.number() - depth), + max: Some(header.number() - 1), + found: uncle.number(), + }))); + } else if depth < 1 { + return Err(From::from(BlockError::UncleIsBrother(OutOfBounds { + min: Some(header.number() - depth), + max: Some(header.number() - 1), + found: uncle.number(), + }))); + } - // cB - // cB.p^1 1 depth, valid uncle - // cB.p^2 ---/ 2 - // cB.p^3 -----/ 3 - // cB.p^4 -------/ 4 - // cB.p^5 ---------/ 5 - // cB.p^6 -----------/ 6 - // cB.p^7 -------------/ - // cB.p^8 - let mut expected_uncle_parent = header.parent_hash().clone(); - let uncle_parent = bc.block_header_data(&uncle.parent_hash()).ok_or_else(|| Error::from(BlockError::UnknownUncleParent(uncle.parent_hash().clone())))?; - for _ in 0..depth { - match bc.block_details(&expected_uncle_parent) { - Some(details) => { - expected_uncle_parent = details.parent; - }, - None => break - } - } - if expected_uncle_parent != uncle_parent.hash() { - return Err(From::from(BlockError::UncleParentNotInChain(uncle_parent.hash()))); - } + // cB + // cB.p^1 1 depth, valid uncle + // cB.p^2 ---/ 2 + // cB.p^3 -----/ 3 + // cB.p^4 -------/ 4 + // cB.p^5 ---------/ 5 + // cB.p^6 -----------/ 6 + // cB.p^7 -------------/ + // cB.p^8 + let mut expected_uncle_parent = header.parent_hash().clone(); + let uncle_parent = bc.block_header_data(&uncle.parent_hash()).ok_or_else(|| { + Error::from(BlockError::UnknownUncleParent(uncle.parent_hash().clone())) + })?; + for _ in 0..depth { + match bc.block_details(&expected_uncle_parent) { + Some(details) => { + expected_uncle_parent = details.parent; + } + None => break, + } + } + if expected_uncle_parent != uncle_parent.hash() { + return Err(From::from(BlockError::UncleParentNotInChain( + uncle_parent.hash(), + ))); + } - let uncle_parent = uncle_parent.decode()?; - verify_parent(&uncle, &uncle_parent, engine)?; - engine.verify_block_family(&uncle, &uncle_parent)?; - verified.insert(uncle.hash()); - } - } + let uncle_parent = uncle_parent.decode()?; + verify_parent(&uncle, &uncle_parent, engine)?; + engine.verify_block_family(&uncle, &uncle_parent)?; + verified.insert(uncle.hash()); + } + } - Ok(()) + Ok(()) } /// Phase 4 verification. Check block information against transaction enactment results, pub fn verify_block_final(expected: &Header, got: &Header) -> Result<(), Error> { - if expected.state_root() != got.state_root() { - return Err(From::from(BlockError::InvalidStateRoot(Mismatch { expected: *expected.state_root(), found: *got.state_root() }))) - } - if expected.gas_used() != got.gas_used() { - return Err(From::from(BlockError::InvalidGasUsed(Mismatch { expected: *expected.gas_used(), found: *got.gas_used() }))) - } - if expected.log_bloom() != got.log_bloom() { - return Err(From::from(BlockError::InvalidLogBloom(Box::new(Mismatch { expected: *expected.log_bloom(), found: *got.log_bloom() })))) - } - if expected.receipts_root() != got.receipts_root() { - return Err(From::from(BlockError::InvalidReceiptsRoot(Mismatch { expected: *expected.receipts_root(), found: *got.receipts_root() }))) - } - Ok(()) + if expected.state_root() != got.state_root() { + return Err(From::from(BlockError::InvalidStateRoot(Mismatch { + expected: *expected.state_root(), + found: *got.state_root(), + }))); + } + if expected.gas_used() != got.gas_used() { + return Err(From::from(BlockError::InvalidGasUsed(Mismatch { + expected: *expected.gas_used(), + found: *got.gas_used(), + }))); + } + if expected.log_bloom() != got.log_bloom() { + return Err(From::from(BlockError::InvalidLogBloom(Box::new( + Mismatch { + expected: *expected.log_bloom(), + found: *got.log_bloom(), + }, + )))); + } + if expected.receipts_root() != got.receipts_root() { + return Err(From::from(BlockError::InvalidReceiptsRoot(Mismatch { + expected: *expected.receipts_root(), + found: *got.receipts_root(), + }))); + } + Ok(()) } /// Check basic header parameters. -pub fn verify_header_params(header: &Header, engine: &EthEngine, is_full: bool, check_seal: bool) -> Result<(), Error> { - if check_seal { - let expected_seal_fields = engine.seal_fields(header); - if header.seal().len() != expected_seal_fields { - return Err(From::from(BlockError::InvalidSealArity( - Mismatch { expected: expected_seal_fields, found: header.seal().len() } - ))); - } - } +pub fn verify_header_params( + header: &Header, + engine: &EthEngine, + is_full: bool, + check_seal: bool, +) -> Result<(), Error> { + if check_seal { + let expected_seal_fields = engine.seal_fields(header); + if header.seal().len() != expected_seal_fields { + return Err(From::from(BlockError::InvalidSealArity(Mismatch { + expected: expected_seal_fields, + found: header.seal().len(), + }))); + } + } - if header.number() >= From::from(BlockNumber::max_value()) { - return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { max: Some(From::from(BlockNumber::max_value())), min: None, found: header.number() }))) - } - if header.gas_used() > header.gas_limit() { - return Err(From::from(BlockError::TooMuchGasUsed(OutOfBounds { max: Some(*header.gas_limit()), min: None, found: *header.gas_used() }))); - } - let min_gas_limit = engine.params().min_gas_limit; - if header.gas_limit() < &min_gas_limit { - return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas_limit), max: None, found: *header.gas_limit() }))); - } - if let Some(limit) = engine.maximum_gas_limit() { - if header.gas_limit() > &limit { - return Err(From::from(::error::BlockError::InvalidGasLimit(OutOfBounds { min: None, max: Some(limit), found: *header.gas_limit() }))); - } - } - let maximum_extra_data_size = engine.maximum_extra_data_size(); - if header.number() != 0 && header.extra_data().len() > maximum_extra_data_size { - return Err(From::from(BlockError::ExtraDataOutOfBounds(OutOfBounds { min: None, max: Some(maximum_extra_data_size), found: header.extra_data().len() }))); - } + if header.number() >= From::from(BlockNumber::max_value()) { + return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { + max: Some(From::from(BlockNumber::max_value())), + min: None, + found: header.number(), + }))); + } + if header.gas_used() > header.gas_limit() { + return Err(From::from(BlockError::TooMuchGasUsed(OutOfBounds { + max: Some(*header.gas_limit()), + min: None, + found: *header.gas_used(), + }))); + } + let min_gas_limit = engine.params().min_gas_limit; + if header.gas_limit() < &min_gas_limit { + return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { + min: Some(min_gas_limit), + max: None, + found: *header.gas_limit(), + }))); + } + if let Some(limit) = engine.maximum_gas_limit() { + if header.gas_limit() > &limit { + return Err(From::from(::error::BlockError::InvalidGasLimit( + OutOfBounds { + min: None, + max: Some(limit), + found: *header.gas_limit(), + }, + ))); + } + } + let maximum_extra_data_size = engine.maximum_extra_data_size(); + if header.number() != 0 && header.extra_data().len() > maximum_extra_data_size { + return Err(From::from(BlockError::ExtraDataOutOfBounds(OutOfBounds { + min: None, + max: Some(maximum_extra_data_size), + found: header.extra_data().len(), + }))); + } - if let Some(ref ext) = engine.machine().ethash_extensions() { - if header.number() >= ext.dao_hardfork_transition && - header.number() <= ext.dao_hardfork_transition + 9 && - header.extra_data()[..] != b"dao-hard-fork"[..] { - return Err(From::from(BlockError::ExtraDataOutOfBounds(OutOfBounds { min: None, max: None, found: 0 }))); - } - } + if let Some(ref ext) = engine.machine().ethash_extensions() { + if header.number() >= ext.dao_hardfork_transition + && header.number() <= ext.dao_hardfork_transition + 9 + && header.extra_data()[..] != b"dao-hard-fork"[..] + { + return Err(From::from(BlockError::ExtraDataOutOfBounds(OutOfBounds { + min: None, + max: None, + found: 0, + }))); + } + } - if is_full { - const ACCEPTABLE_DRIFT: Duration = Duration::from_secs(15); - // this will resist overflow until `year 2037` - let max_time = SystemTime::now() + ACCEPTABLE_DRIFT; - let invalid_threshold = max_time + ACCEPTABLE_DRIFT * 9; - let timestamp = CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(header.timestamp())) - .ok_or(BlockError::TimestampOverflow)?; + if is_full { + const ACCEPTABLE_DRIFT: Duration = Duration::from_secs(15); + // this will resist overflow until `year 2037` + let max_time = SystemTime::now() + ACCEPTABLE_DRIFT; + let invalid_threshold = max_time + ACCEPTABLE_DRIFT * 9; + let timestamp = + CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(header.timestamp())) + .ok_or(BlockError::TimestampOverflow)?; - if timestamp > invalid_threshold { - return Err(From::from(BlockError::InvalidTimestamp(OutOfBounds { max: Some(max_time), min: None, found: timestamp }))) - } + if timestamp > invalid_threshold { + return Err(From::from(BlockError::InvalidTimestamp(OutOfBounds { + max: Some(max_time), + min: None, + found: timestamp, + }))); + } - if timestamp > max_time { - return Err(From::from(BlockError::TemporarilyInvalid(OutOfBounds { max: Some(max_time), min: None, found: timestamp }))) - } - } + if timestamp > max_time { + return Err(From::from(BlockError::TemporarilyInvalid(OutOfBounds { + max: Some(max_time), + min: None, + found: timestamp, + }))); + } + } - Ok(()) + Ok(()) } /// Check header parameters agains parent header. fn verify_parent(header: &Header, parent: &Header, engine: &EthEngine) -> Result<(), Error> { - assert!(header.parent_hash().is_zero() || &parent.hash() == header.parent_hash(), - "Parent hash should already have been verified; qed"); + assert!( + header.parent_hash().is_zero() || &parent.hash() == header.parent_hash(), + "Parent hash should already have been verified; qed" + ); - let gas_limit_divisor = engine.params().gas_limit_bound_divisor; + let gas_limit_divisor = engine.params().gas_limit_bound_divisor; - if !engine.is_timestamp_valid(header.timestamp(), parent.timestamp()) { - let now = SystemTime::now(); - let min = CheckedSystemTime::checked_add(now, Duration::from_secs(parent.timestamp().saturating_add(1))) - .ok_or(BlockError::TimestampOverflow)?; - let found = CheckedSystemTime::checked_add(now, Duration::from_secs(header.timestamp())) - .ok_or(BlockError::TimestampOverflow)?; - return Err(From::from(BlockError::InvalidTimestamp(OutOfBounds { max: None, min: Some(min), found }))) - } - if header.number() != parent.number() + 1 { - return Err(From::from(BlockError::InvalidNumber(Mismatch { expected: parent.number() + 1, found: header.number() }))); - } + if !engine.is_timestamp_valid(header.timestamp(), parent.timestamp()) { + let now = SystemTime::now(); + let min = CheckedSystemTime::checked_add( + now, + Duration::from_secs(parent.timestamp().saturating_add(1)), + ) + .ok_or(BlockError::TimestampOverflow)?; + let found = CheckedSystemTime::checked_add(now, Duration::from_secs(header.timestamp())) + .ok_or(BlockError::TimestampOverflow)?; + return Err(From::from(BlockError::InvalidTimestamp(OutOfBounds { + max: None, + min: Some(min), + found, + }))); + } + if header.number() != parent.number() + 1 { + return Err(From::from(BlockError::InvalidNumber(Mismatch { + expected: parent.number() + 1, + found: header.number(), + }))); + } - if header.number() == 0 { - return Err(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() }).into()); - } + if header.number() == 0 { + return Err(BlockError::RidiculousNumber(OutOfBounds { + min: Some(1), + max: None, + found: header.number(), + }) + .into()); + } - let parent_gas_limit = *parent.gas_limit(); - let min_gas = parent_gas_limit - parent_gas_limit / gas_limit_divisor; - let max_gas = parent_gas_limit + parent_gas_limit / gas_limit_divisor; - if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas { - return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: *header.gas_limit() }))); - } + let parent_gas_limit = *parent.gas_limit(); + let min_gas = parent_gas_limit - parent_gas_limit / gas_limit_divisor; + let max_gas = parent_gas_limit + parent_gas_limit / gas_limit_divisor; + if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas { + return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { + min: Some(min_gas), + max: Some(max_gas), + found: *header.gas_limit(), + }))); + } - Ok(()) + Ok(()) } /// Verify block data against header: transactions root and uncles hash. fn verify_block_integrity(block: &Unverified) -> Result<(), Error> { - let block_rlp = Rlp::new(&block.bytes); - let tx = block_rlp.at(1)?; - let expected_root = ordered_trie_root(tx.iter().map(|r| r.as_raw())); - if &expected_root != block.header.transactions_root() { - bail!(BlockError::InvalidTransactionsRoot(Mismatch { - expected: expected_root, - found: *block.header.transactions_root(), - })); - } - let expected_uncles = keccak(block_rlp.at(2)?.as_raw()); - if &expected_uncles != block.header.uncles_hash(){ - bail!(BlockError::InvalidUnclesHash(Mismatch { - expected: expected_uncles, - found: *block.header.uncles_hash(), - })); - } - Ok(()) + let block_rlp = Rlp::new(&block.bytes); + let tx = block_rlp.at(1)?; + let expected_root = ordered_trie_root(tx.iter().map(|r| r.as_raw())); + if &expected_root != block.header.transactions_root() { + bail!(BlockError::InvalidTransactionsRoot(Mismatch { + expected: expected_root, + found: *block.header.transactions_root(), + })); + } + let expected_uncles = keccak(block_rlp.at(2)?.as_raw()); + if &expected_uncles != block.header.uncles_hash() { + bail!(BlockError::InvalidUnclesHash(Mismatch { + expected: expected_uncles, + found: *block.header.uncles_hash(), + })); + } + Ok(()) } #[cfg(test)] mod tests { - use super::*; + use super::*; - use std::collections::{BTreeMap, HashMap}; - use std::time::{SystemTime, UNIX_EPOCH}; - use ethereum_types::{H256, BloomRef, U256}; - use blockchain::{BlockDetails, TransactionAddress, BlockReceipts}; - use types::encoded; - use hash::keccak; - use engines::EthEngine; - use error::BlockError::*; - use error::ErrorKind; - use ethkey::{Random, Generator}; - use spec::{CommonParams, Spec}; - use test_helpers::{create_test_block_with_data, create_test_block}; - use types::transaction::{SignedTransaction, Transaction, UnverifiedTransaction, Action}; - use types::log_entry::{LogEntry, LocalizedLogEntry}; - use rlp; - use triehash::ordered_trie_root; + use blockchain::{BlockDetails, BlockReceipts, TransactionAddress}; + use engines::EthEngine; + use error::{BlockError::*, ErrorKind}; + use ethereum_types::{BloomRef, H256, U256}; + use ethkey::{Generator, Random}; + use hash::keccak; + use rlp; + use spec::{CommonParams, Spec}; + use std::{ + collections::{BTreeMap, HashMap}, + time::{SystemTime, UNIX_EPOCH}, + }; + use test_helpers::{create_test_block, create_test_block_with_data}; + use triehash::ordered_trie_root; + use types::{ + encoded, + log_entry::{LocalizedLogEntry, LogEntry}, + transaction::{Action, SignedTransaction, Transaction, UnverifiedTransaction}, + }; - fn check_ok(result: Result<(), Error>) { - result.unwrap_or_else(|e| panic!("Block verification failed: {:?}", e)); - } + fn check_ok(result: Result<(), Error>) { + result.unwrap_or_else(|e| panic!("Block verification failed: {:?}", e)); + } - fn check_fail(result: Result<(), Error>, e: BlockError) { - match result { - Err(Error(ErrorKind::Block(ref error), _)) if *error == e => (), - Err(other) => panic!("Block verification failed.\nExpected: {:?}\nGot: {:?}", e, other), - Ok(_) => panic!("Block verification failed.\nExpected: {:?}\nGot: Ok", e), - } - } + fn check_fail(result: Result<(), Error>, e: BlockError) { + match result { + Err(Error(ErrorKind::Block(ref error), _)) if *error == e => (), + Err(other) => panic!( + "Block verification failed.\nExpected: {:?}\nGot: {:?}", + e, other + ), + Ok(_) => panic!("Block verification failed.\nExpected: {:?}\nGot: Ok", e), + } + } - fn check_fail_timestamp(result: Result<(), Error>, temp: bool) { - let name = if temp { "TemporarilyInvalid" } else { "InvalidTimestamp" }; - match result { - Err(Error(ErrorKind::Block(BlockError::InvalidTimestamp(_)), _)) if !temp => (), - Err(Error(ErrorKind::Block(BlockError::TemporarilyInvalid(_)), _)) if temp => (), - Err(other) => panic!("Block verification failed.\nExpected: {}\nGot: {:?}", name, other), - Ok(_) => panic!("Block verification failed.\nExpected: {}\nGot: Ok", name), - } - } + fn check_fail_timestamp(result: Result<(), Error>, temp: bool) { + let name = if temp { + "TemporarilyInvalid" + } else { + "InvalidTimestamp" + }; + match result { + Err(Error(ErrorKind::Block(BlockError::InvalidTimestamp(_)), _)) if !temp => (), + Err(Error(ErrorKind::Block(BlockError::TemporarilyInvalid(_)), _)) if temp => (), + Err(other) => panic!( + "Block verification failed.\nExpected: {}\nGot: {:?}", + name, other + ), + Ok(_) => panic!("Block verification failed.\nExpected: {}\nGot: Ok", name), + } + } - struct TestBlockChain { - blocks: HashMap, - numbers: HashMap, - } + struct TestBlockChain { + blocks: HashMap, + numbers: HashMap, + } - impl Default for TestBlockChain { - fn default() -> Self { - TestBlockChain::new() - } - } + impl Default for TestBlockChain { + fn default() -> Self { + TestBlockChain::new() + } + } - impl TestBlockChain { - pub fn new() -> Self { - TestBlockChain { - blocks: HashMap::new(), - numbers: HashMap::new(), - } - } + impl TestBlockChain { + pub fn new() -> Self { + TestBlockChain { + blocks: HashMap::new(), + numbers: HashMap::new(), + } + } - pub fn insert(&mut self, bytes: Bytes) { - let header = Unverified::from_rlp(bytes.clone()).unwrap().header; - let hash = header.hash(); - self.blocks.insert(hash, bytes); - self.numbers.insert(header.number(), hash); - } - } + pub fn insert(&mut self, bytes: Bytes) { + let header = Unverified::from_rlp(bytes.clone()).unwrap().header; + let hash = header.hash(); + self.blocks.insert(hash, bytes); + self.numbers.insert(header.number(), hash); + } + } - impl BlockProvider for TestBlockChain { - fn is_known(&self, hash: &H256) -> bool { - self.blocks.contains_key(hash) - } + impl BlockProvider for TestBlockChain { + fn is_known(&self, hash: &H256) -> bool { + self.blocks.contains_key(hash) + } - fn first_block(&self) -> Option { - unimplemented!() - } + fn first_block(&self) -> Option { + unimplemented!() + } - /// Get raw block data - fn block(&self, hash: &H256) -> Option { - self.blocks.get(hash).cloned().map(encoded::Block::new) - } + /// Get raw block data + fn block(&self, hash: &H256) -> Option { + self.blocks.get(hash).cloned().map(encoded::Block::new) + } - fn block_header_data(&self, hash: &H256) -> Option { - self.block(hash) - .map(|b| b.header_view().rlp().as_raw().to_vec()) - .map(encoded::Header::new) - } + fn block_header_data(&self, hash: &H256) -> Option { + self.block(hash) + .map(|b| b.header_view().rlp().as_raw().to_vec()) + .map(encoded::Header::new) + } - fn block_body(&self, hash: &H256) -> Option { - self.block(hash) - .map(|b| BlockChain::block_to_body(&b.into_inner())) - .map(encoded::Body::new) - } + fn block_body(&self, hash: &H256) -> Option { + self.block(hash) + .map(|b| BlockChain::block_to_body(&b.into_inner())) + .map(encoded::Body::new) + } - fn best_ancient_block(&self) -> Option { - None - } + fn best_ancient_block(&self) -> Option { + None + } - /// Get the familial details concerning a block. - fn block_details(&self, hash: &H256) -> Option { - self.blocks.get(hash).map(|bytes| { - let header = Unverified::from_rlp(bytes.to_vec()).unwrap().header; - BlockDetails { - number: header.number(), - total_difficulty: *header.difficulty(), - parent: *header.parent_hash(), - children: Vec::new(), - is_finalized: false, - } - }) - } + /// Get the familial details concerning a block. + fn block_details(&self, hash: &H256) -> Option { + self.blocks.get(hash).map(|bytes| { + let header = Unverified::from_rlp(bytes.to_vec()).unwrap().header; + BlockDetails { + number: header.number(), + total_difficulty: *header.difficulty(), + parent: *header.parent_hash(), + children: Vec::new(), + is_finalized: false, + } + }) + } - fn transaction_address(&self, _hash: &H256) -> Option { - unimplemented!() - } + fn transaction_address(&self, _hash: &H256) -> Option { + unimplemented!() + } - /// Get the hash of given block's number. - fn block_hash(&self, index: BlockNumber) -> Option { - self.numbers.get(&index).cloned() - } + /// Get the hash of given block's number. + fn block_hash(&self, index: BlockNumber) -> Option { + self.numbers.get(&index).cloned() + } - fn block_receipts(&self, _hash: &H256) -> Option { - unimplemented!() - } + fn block_receipts(&self, _hash: &H256) -> Option { + unimplemented!() + } - fn blocks_with_bloom<'a, B, I, II>(&self, _blooms: II, _from_block: BlockNumber, _to_block: BlockNumber) -> Vec - where BloomRef<'a>: From, II: IntoIterator + Copy, I: Iterator, Self: Sized { - unimplemented!() - } + fn blocks_with_bloom<'a, B, I, II>( + &self, + _blooms: II, + _from_block: BlockNumber, + _to_block: BlockNumber, + ) -> Vec + where + BloomRef<'a>: From, + II: IntoIterator + Copy, + I: Iterator, + Self: Sized, + { + unimplemented!() + } - fn logs(&self, _blocks: Vec, _matches: F, _limit: Option) -> Vec - where F: Fn(&LogEntry) -> bool, Self: Sized { - unimplemented!() - } - } + fn logs( + &self, + _blocks: Vec, + _matches: F, + _limit: Option, + ) -> Vec + where + F: Fn(&LogEntry) -> bool, + Self: Sized, + { + unimplemented!() + } + } - fn basic_test(bytes: &[u8], engine: &EthEngine) -> Result<(), Error> { - let unverified = Unverified::from_rlp(bytes.to_vec())?; - verify_block_basic(&unverified, engine, true) - } + fn basic_test(bytes: &[u8], engine: &EthEngine) -> Result<(), Error> { + let unverified = Unverified::from_rlp(bytes.to_vec())?; + verify_block_basic(&unverified, engine, true) + } - fn family_test(bytes: &[u8], engine: &EthEngine, bc: &BC) -> Result<(), Error> where BC: BlockProvider { - let block = Unverified::from_rlp(bytes.to_vec()).unwrap(); - let header = block.header; - let transactions: Vec<_> = block.transactions - .into_iter() - .map(SignedTransaction::new) - .collect::>()?; + fn family_test(bytes: &[u8], engine: &EthEngine, bc: &BC) -> Result<(), Error> + where + BC: BlockProvider, + { + let block = Unverified::from_rlp(bytes.to_vec()).unwrap(); + let header = block.header; + let transactions: Vec<_> = block + .transactions + .into_iter() + .map(SignedTransaction::new) + .collect::>()?; - // TODO: client is really meant to be used for state query here by machine - // additions that need access to state (tx filter in specific) - // no existing tests need access to test, so having this not function - // is fine. - let client = ::client::TestBlockChainClient::default(); - let parent = bc.block_header_data(header.parent_hash()) - .ok_or(BlockError::UnknownParent(*header.parent_hash()))? - .decode()?; + // TODO: client is really meant to be used for state query here by machine + // additions that need access to state (tx filter in specific) + // no existing tests need access to test, so having this not function + // is fine. + let client = ::client::TestBlockChainClient::default(); + let parent = bc + .block_header_data(header.parent_hash()) + .ok_or(BlockError::UnknownParent(*header.parent_hash()))? + .decode()?; - let block = PreverifiedBlock { - header, - transactions, - uncles: block.uncles, - bytes: bytes.to_vec(), - }; + let block = PreverifiedBlock { + header, + transactions, + uncles: block.uncles, + bytes: bytes.to_vec(), + }; - let full_params = FullFamilyParams { - block: &block, - block_provider: bc as &BlockProvider, - client: &client, - }; - verify_block_family(&block.header, &parent, engine, Some(full_params)) - } + let full_params = FullFamilyParams { + block: &block, + block_provider: bc as &BlockProvider, + client: &client, + }; + verify_block_family(&block.header, &parent, engine, Some(full_params)) + } - fn unordered_test(bytes: &[u8], engine: &EthEngine) -> Result<(), Error> { - let un = Unverified::from_rlp(bytes.to_vec())?; - verify_block_unordered(un, engine, false)?; - Ok(()) - } + fn unordered_test(bytes: &[u8], engine: &EthEngine) -> Result<(), Error> { + let un = Unverified::from_rlp(bytes.to_vec())?; + verify_block_unordered(un, engine, false)?; + Ok(()) + } - #[test] - fn test_verify_block_basic_with_invalid_transactions() { - let spec = Spec::new_test(); - let engine = &*spec.engine; + #[test] + fn test_verify_block_basic_with_invalid_transactions() { + let spec = Spec::new_test(); + let engine = &*spec.engine; - let block = { - let mut rlp = rlp::RlpStream::new_list(3); - let mut header = Header::default(); - // that's an invalid transaction list rlp - let invalid_transactions = vec![vec![0u8]]; - header.set_transactions_root(ordered_trie_root(&invalid_transactions)); - header.set_gas_limit(engine.params().min_gas_limit); - rlp.append(&header); - rlp.append_list::, _>(&invalid_transactions); - rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1); - rlp.out() - }; + let block = { + let mut rlp = rlp::RlpStream::new_list(3); + let mut header = Header::default(); + // that's an invalid transaction list rlp + let invalid_transactions = vec![vec![0u8]]; + header.set_transactions_root(ordered_trie_root(&invalid_transactions)); + header.set_gas_limit(engine.params().min_gas_limit); + rlp.append(&header); + rlp.append_list::, _>(&invalid_transactions); + rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1); + rlp.out() + }; - assert!(basic_test(&block, engine).is_err()); - } + assert!(basic_test(&block, engine).is_err()); + } - #[test] - fn test_verify_block() { - use rlp::RlpStream; + #[test] + fn test_verify_block() { + use rlp::RlpStream; - // Test against morden - let mut good = Header::new(); - let spec = Spec::new_test(); - let engine = &*spec.engine; + // Test against morden + let mut good = Header::new(); + let spec = Spec::new_test(); + let engine = &*spec.engine; - let min_gas_limit = engine.params().min_gas_limit; - good.set_gas_limit(min_gas_limit); - good.set_timestamp(40); - good.set_number(10); + let min_gas_limit = engine.params().min_gas_limit; + good.set_gas_limit(min_gas_limit); + good.set_timestamp(40); + good.set_number(10); - let keypair = Random.generate().unwrap(); + let keypair = Random.generate().unwrap(); - let tr1 = Transaction { - action: Action::Create, - value: U256::from(0), - data: Bytes::new(), - gas: U256::from(30_000), - gas_price: U256::from(40_000), - nonce: U256::one() - }.sign(keypair.secret(), None); + let tr1 = Transaction { + action: Action::Create, + value: U256::from(0), + data: Bytes::new(), + gas: U256::from(30_000), + gas_price: U256::from(40_000), + nonce: U256::one(), + } + .sign(keypair.secret(), None); - let tr2 = Transaction { - action: Action::Create, - value: U256::from(0), - data: Bytes::new(), - gas: U256::from(30_000), - gas_price: U256::from(40_000), - nonce: U256::from(2) - }.sign(keypair.secret(), None); + let tr2 = Transaction { + action: Action::Create, + value: U256::from(0), + data: Bytes::new(), + gas: U256::from(30_000), + gas_price: U256::from(40_000), + nonce: U256::from(2), + } + .sign(keypair.secret(), None); - let tr3 = Transaction { - action: Action::Call(0x0.into()), - value: U256::from(0), - data: Bytes::new(), - gas: U256::from(30_000), - gas_price: U256::from(0), - nonce: U256::zero(), - }.null_sign(0); + let tr3 = Transaction { + action: Action::Call(0x0.into()), + value: U256::from(0), + data: Bytes::new(), + gas: U256::from(30_000), + gas_price: U256::from(0), + nonce: U256::zero(), + } + .null_sign(0); - let good_transactions = [ tr1.clone(), tr2.clone() ]; - let eip86_transactions = [ tr3.clone() ]; + let good_transactions = [tr1.clone(), tr2.clone()]; + let eip86_transactions = [tr3.clone()]; - let diff_inc = U256::from(0x40); + let diff_inc = U256::from(0x40); - let mut parent6 = good.clone(); - parent6.set_number(6); - let mut parent7 = good.clone(); - parent7.set_number(7); - parent7.set_parent_hash(parent6.hash()); - parent7.set_difficulty(parent6.difficulty().clone() + diff_inc); - parent7.set_timestamp(parent6.timestamp() + 10); - let mut parent8 = good.clone(); - parent8.set_number(8); - parent8.set_parent_hash(parent7.hash()); - parent8.set_difficulty(parent7.difficulty().clone() + diff_inc); - parent8.set_timestamp(parent7.timestamp() + 10); + let mut parent6 = good.clone(); + parent6.set_number(6); + let mut parent7 = good.clone(); + parent7.set_number(7); + parent7.set_parent_hash(parent6.hash()); + parent7.set_difficulty(parent6.difficulty().clone() + diff_inc); + parent7.set_timestamp(parent6.timestamp() + 10); + let mut parent8 = good.clone(); + parent8.set_number(8); + parent8.set_parent_hash(parent7.hash()); + parent8.set_difficulty(parent7.difficulty().clone() + diff_inc); + parent8.set_timestamp(parent7.timestamp() + 10); - let mut good_uncle1 = good.clone(); - good_uncle1.set_number(9); - good_uncle1.set_parent_hash(parent8.hash()); - good_uncle1.set_difficulty(parent8.difficulty().clone() + diff_inc); - good_uncle1.set_timestamp(parent8.timestamp() + 10); - let mut ex = good_uncle1.extra_data().to_vec(); - ex.push(1u8); - good_uncle1.set_extra_data(ex); + let mut good_uncle1 = good.clone(); + good_uncle1.set_number(9); + good_uncle1.set_parent_hash(parent8.hash()); + good_uncle1.set_difficulty(parent8.difficulty().clone() + diff_inc); + good_uncle1.set_timestamp(parent8.timestamp() + 10); + let mut ex = good_uncle1.extra_data().to_vec(); + ex.push(1u8); + good_uncle1.set_extra_data(ex); - let mut good_uncle2 = good.clone(); - good_uncle2.set_number(8); - good_uncle2.set_parent_hash(parent7.hash()); - good_uncle2.set_difficulty(parent7.difficulty().clone() + diff_inc); - good_uncle2.set_timestamp(parent7.timestamp() + 10); - let mut ex = good_uncle2.extra_data().to_vec(); - ex.push(2u8); - good_uncle2.set_extra_data(ex); + let mut good_uncle2 = good.clone(); + good_uncle2.set_number(8); + good_uncle2.set_parent_hash(parent7.hash()); + good_uncle2.set_difficulty(parent7.difficulty().clone() + diff_inc); + good_uncle2.set_timestamp(parent7.timestamp() + 10); + let mut ex = good_uncle2.extra_data().to_vec(); + ex.push(2u8); + good_uncle2.set_extra_data(ex); - let good_uncles = vec![ good_uncle1.clone(), good_uncle2.clone() ]; - let mut uncles_rlp = RlpStream::new(); - uncles_rlp.append_list(&good_uncles); - let good_uncles_hash = keccak(uncles_rlp.as_raw()); - let good_transactions_root = ordered_trie_root(good_transactions.iter().map(|t| ::rlp::encode::(t))); - let eip86_transactions_root = ordered_trie_root(eip86_transactions.iter().map(|t| ::rlp::encode::(t))); + let good_uncles = vec![good_uncle1.clone(), good_uncle2.clone()]; + let mut uncles_rlp = RlpStream::new(); + uncles_rlp.append_list(&good_uncles); + let good_uncles_hash = keccak(uncles_rlp.as_raw()); + let good_transactions_root = ordered_trie_root( + good_transactions + .iter() + .map(|t| ::rlp::encode::(t)), + ); + let eip86_transactions_root = ordered_trie_root( + eip86_transactions + .iter() + .map(|t| ::rlp::encode::(t)), + ); - let mut parent = good.clone(); - parent.set_number(9); - parent.set_timestamp(parent8.timestamp() + 10); - parent.set_parent_hash(parent8.hash()); - parent.set_difficulty(parent8.difficulty().clone() + diff_inc); + let mut parent = good.clone(); + parent.set_number(9); + parent.set_timestamp(parent8.timestamp() + 10); + parent.set_parent_hash(parent8.hash()); + parent.set_difficulty(parent8.difficulty().clone() + diff_inc); - good.set_parent_hash(parent.hash()); - good.set_difficulty(parent.difficulty().clone() + diff_inc); - good.set_timestamp(parent.timestamp() + 10); + good.set_parent_hash(parent.hash()); + good.set_difficulty(parent.difficulty().clone() + diff_inc); + good.set_timestamp(parent.timestamp() + 10); - let mut bc = TestBlockChain::new(); - bc.insert(create_test_block(&good)); - bc.insert(create_test_block(&parent)); - bc.insert(create_test_block(&parent6)); - bc.insert(create_test_block(&parent7)); - bc.insert(create_test_block(&parent8)); + let mut bc = TestBlockChain::new(); + bc.insert(create_test_block(&good)); + bc.insert(create_test_block(&parent)); + bc.insert(create_test_block(&parent6)); + bc.insert(create_test_block(&parent7)); + bc.insert(create_test_block(&parent8)); - check_ok(basic_test(&create_test_block(&good), engine)); + check_ok(basic_test(&create_test_block(&good), engine)); - let mut bad_header = good.clone(); - bad_header.set_transactions_root(eip86_transactions_root.clone()); - bad_header.set_uncles_hash(good_uncles_hash.clone()); - match basic_test(&create_test_block_with_data(&bad_header, &eip86_transactions, &good_uncles), engine) { + let mut bad_header = good.clone(); + bad_header.set_transactions_root(eip86_transactions_root.clone()); + bad_header.set_uncles_hash(good_uncles_hash.clone()); + match basic_test(&create_test_block_with_data(&bad_header, &eip86_transactions, &good_uncles), engine) { Err(Error(ErrorKind::Transaction(ref e), _)) if e == &::ethkey::Error::InvalidSignature.into() => (), e => panic!("Block verification failed.\nExpected: Transaction Error (Invalid Signature)\nGot: {:?}", e), } - let mut header = good.clone(); - header.set_transactions_root(good_transactions_root.clone()); - header.set_uncles_hash(good_uncles_hash.clone()); - check_ok(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine)); + let mut header = good.clone(); + header.set_transactions_root(good_transactions_root.clone()); + header.set_uncles_hash(good_uncles_hash.clone()); + check_ok(basic_test( + &create_test_block_with_data(&header, &good_transactions, &good_uncles), + engine, + )); - header.set_gas_limit(min_gas_limit - 1); - check_fail(basic_test(&create_test_block(&header), engine), - InvalidGasLimit(OutOfBounds { min: Some(min_gas_limit), max: None, found: header.gas_limit().clone() })); + header.set_gas_limit(min_gas_limit - 1); + check_fail( + basic_test(&create_test_block(&header), engine), + InvalidGasLimit(OutOfBounds { + min: Some(min_gas_limit), + max: None, + found: header.gas_limit().clone(), + }), + ); - header = good.clone(); - header.set_number(BlockNumber::max_value()); - check_fail(basic_test(&create_test_block(&header), engine), - RidiculousNumber(OutOfBounds { max: Some(BlockNumber::max_value()), min: None, found: header.number() })); + header = good.clone(); + header.set_number(BlockNumber::max_value()); + check_fail( + basic_test(&create_test_block(&header), engine), + RidiculousNumber(OutOfBounds { + max: Some(BlockNumber::max_value()), + min: None, + found: header.number(), + }), + ); - header = good.clone(); - let gas_used = header.gas_limit().clone() + 1; - header.set_gas_used(gas_used); - check_fail(basic_test(&create_test_block(&header), engine), - TooMuchGasUsed(OutOfBounds { max: Some(header.gas_limit().clone()), min: None, found: header.gas_used().clone() })); + header = good.clone(); + let gas_used = header.gas_limit().clone() + 1; + header.set_gas_used(gas_used); + check_fail( + basic_test(&create_test_block(&header), engine), + TooMuchGasUsed(OutOfBounds { + max: Some(header.gas_limit().clone()), + min: None, + found: header.gas_used().clone(), + }), + ); - header = good.clone(); - let mut ex = header.extra_data().to_vec(); - ex.resize(engine.maximum_extra_data_size() + 1, 0u8); - header.set_extra_data(ex); - check_fail(basic_test(&create_test_block(&header), engine), - ExtraDataOutOfBounds(OutOfBounds { max: Some(engine.maximum_extra_data_size()), min: None, found: header.extra_data().len() })); + header = good.clone(); + let mut ex = header.extra_data().to_vec(); + ex.resize(engine.maximum_extra_data_size() + 1, 0u8); + header.set_extra_data(ex); + check_fail( + basic_test(&create_test_block(&header), engine), + ExtraDataOutOfBounds(OutOfBounds { + max: Some(engine.maximum_extra_data_size()), + min: None, + found: header.extra_data().len(), + }), + ); - header = good.clone(); - let mut ex = header.extra_data().to_vec(); - ex.resize(engine.maximum_extra_data_size() + 1, 0u8); - header.set_extra_data(ex); - check_fail(basic_test(&create_test_block(&header), engine), - ExtraDataOutOfBounds(OutOfBounds { max: Some(engine.maximum_extra_data_size()), min: None, found: header.extra_data().len() })); + header = good.clone(); + let mut ex = header.extra_data().to_vec(); + ex.resize(engine.maximum_extra_data_size() + 1, 0u8); + header.set_extra_data(ex); + check_fail( + basic_test(&create_test_block(&header), engine), + ExtraDataOutOfBounds(OutOfBounds { + max: Some(engine.maximum_extra_data_size()), + min: None, + found: header.extra_data().len(), + }), + ); - header = good.clone(); - header.set_uncles_hash(good_uncles_hash.clone()); - check_fail(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine), - InvalidTransactionsRoot(Mismatch { expected: good_transactions_root.clone(), found: header.transactions_root().clone() })); + header = good.clone(); + header.set_uncles_hash(good_uncles_hash.clone()); + check_fail( + basic_test( + &create_test_block_with_data(&header, &good_transactions, &good_uncles), + engine, + ), + InvalidTransactionsRoot(Mismatch { + expected: good_transactions_root.clone(), + found: header.transactions_root().clone(), + }), + ); - header = good.clone(); - header.set_transactions_root(good_transactions_root.clone()); - check_fail(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine), - InvalidUnclesHash(Mismatch { expected: good_uncles_hash.clone(), found: header.uncles_hash().clone() })); + header = good.clone(); + header.set_transactions_root(good_transactions_root.clone()); + check_fail( + basic_test( + &create_test_block_with_data(&header, &good_transactions, &good_uncles), + engine, + ), + InvalidUnclesHash(Mismatch { + expected: good_uncles_hash.clone(), + found: header.uncles_hash().clone(), + }), + ); - check_ok(family_test(&create_test_block(&good), engine, &bc)); - check_ok(family_test(&create_test_block_with_data(&good, &good_transactions, &good_uncles), engine, &bc)); + check_ok(family_test(&create_test_block(&good), engine, &bc)); + check_ok(family_test( + &create_test_block_with_data(&good, &good_transactions, &good_uncles), + engine, + &bc, + )); - header = good.clone(); - header.set_parent_hash(H256::random()); - check_fail(family_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine, &bc), - UnknownParent(header.parent_hash().clone())); + header = good.clone(); + header.set_parent_hash(H256::random()); + check_fail( + family_test( + &create_test_block_with_data(&header, &good_transactions, &good_uncles), + engine, + &bc, + ), + UnknownParent(header.parent_hash().clone()), + ); - header = good.clone(); - header.set_timestamp(10); - check_fail_timestamp(family_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine, &bc), false); + header = good.clone(); + header.set_timestamp(10); + check_fail_timestamp( + family_test( + &create_test_block_with_data(&header, &good_transactions, &good_uncles), + engine, + &bc, + ), + false, + ); - header = good.clone(); - // will return `BlockError::TimestampOverflow` when timestamp > `i32::max_value()` - header.set_timestamp(i32::max_value() as u64); - check_fail_timestamp(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine), false); + header = good.clone(); + // will return `BlockError::TimestampOverflow` when timestamp > `i32::max_value()` + header.set_timestamp(i32::max_value() as u64); + check_fail_timestamp( + basic_test( + &create_test_block_with_data(&header, &good_transactions, &good_uncles), + engine, + ), + false, + ); - header = good.clone(); - header.set_timestamp(SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() + 20); - check_fail_timestamp(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine), true); + header = good.clone(); + header.set_timestamp( + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs() + + 20, + ); + check_fail_timestamp( + basic_test( + &create_test_block_with_data(&header, &good_transactions, &good_uncles), + engine, + ), + true, + ); - header = good.clone(); - header.set_timestamp(SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() + 10); - header.set_uncles_hash(good_uncles_hash.clone()); - header.set_transactions_root(good_transactions_root.clone()); - check_ok(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine)); + header = good.clone(); + header.set_timestamp( + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs() + + 10, + ); + header.set_uncles_hash(good_uncles_hash.clone()); + header.set_transactions_root(good_transactions_root.clone()); + check_ok(basic_test( + &create_test_block_with_data(&header, &good_transactions, &good_uncles), + engine, + )); - header = good.clone(); - header.set_number(9); - check_fail(family_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine, &bc), - InvalidNumber(Mismatch { expected: parent.number() + 1, found: header.number() })); + header = good.clone(); + header.set_number(9); + check_fail( + family_test( + &create_test_block_with_data(&header, &good_transactions, &good_uncles), + engine, + &bc, + ), + InvalidNumber(Mismatch { + expected: parent.number() + 1, + found: header.number(), + }), + ); - header = good.clone(); - let mut bad_uncles = good_uncles.clone(); - bad_uncles.push(good_uncle1.clone()); - check_fail(family_test(&create_test_block_with_data(&header, &good_transactions, &bad_uncles), engine, &bc), - TooManyUncles(OutOfBounds { max: Some(engine.maximum_uncle_count(header.number())), min: None, found: bad_uncles.len() })); + header = good.clone(); + let mut bad_uncles = good_uncles.clone(); + bad_uncles.push(good_uncle1.clone()); + check_fail( + family_test( + &create_test_block_with_data(&header, &good_transactions, &bad_uncles), + engine, + &bc, + ), + TooManyUncles(OutOfBounds { + max: Some(engine.maximum_uncle_count(header.number())), + min: None, + found: bad_uncles.len(), + }), + ); - header = good.clone(); - bad_uncles = vec![ good_uncle1.clone(), good_uncle1.clone() ]; - check_fail(family_test(&create_test_block_with_data(&header, &good_transactions, &bad_uncles), engine, &bc), - DuplicateUncle(good_uncle1.hash())); + header = good.clone(); + bad_uncles = vec![good_uncle1.clone(), good_uncle1.clone()]; + check_fail( + family_test( + &create_test_block_with_data(&header, &good_transactions, &bad_uncles), + engine, + &bc, + ), + DuplicateUncle(good_uncle1.hash()), + ); - header = good.clone(); - header.set_gas_limit(0.into()); - header.set_difficulty("0000000000000000000000000000000000000000000000000000000000020000".parse::().unwrap()); - match family_test(&create_test_block(&header), engine, &bc) { - Err(Error(ErrorKind::Block(InvalidGasLimit(_)), _)) => {}, - Err(_) => { panic!("should be invalid difficulty fail"); }, - _ => { panic!("Should be error, got Ok"); }, - } + header = good.clone(); + header.set_gas_limit(0.into()); + header.set_difficulty( + "0000000000000000000000000000000000000000000000000000000000020000" + .parse::() + .unwrap(), + ); + match family_test(&create_test_block(&header), engine, &bc) { + Err(Error(ErrorKind::Block(InvalidGasLimit(_)), _)) => {} + Err(_) => { + panic!("should be invalid difficulty fail"); + } + _ => { + panic!("Should be error, got Ok"); + } + } - // TODO: some additional uncle checks - } + // TODO: some additional uncle checks + } - #[test] - fn dust_protection() { - use ethkey::{Generator, Random}; - use types::transaction::{Transaction, Action}; - use machine::EthereumMachine; - use engines::NullEngine; + #[test] + fn dust_protection() { + use engines::NullEngine; + use ethkey::{Generator, Random}; + use machine::EthereumMachine; + use types::transaction::{Action, Transaction}; - let mut params = CommonParams::default(); - params.dust_protection_transition = 0; - params.nonce_cap_increment = 2; + let mut params = CommonParams::default(); + params.dust_protection_transition = 0; + params.nonce_cap_increment = 2; - let mut header = Header::default(); - header.set_number(1); + let mut header = Header::default(); + header.set_number(1); - let keypair = Random.generate().unwrap(); - let bad_transactions: Vec<_> = (0..3).map(|i| Transaction { - action: Action::Create, - value: U256::zero(), - data: Vec::new(), - gas: 0.into(), - gas_price: U256::zero(), - nonce: i.into(), - }.sign(keypair.secret(), None)).collect(); + let keypair = Random.generate().unwrap(); + let bad_transactions: Vec<_> = (0..3) + .map(|i| { + Transaction { + action: Action::Create, + value: U256::zero(), + data: Vec::new(), + gas: 0.into(), + gas_price: U256::zero(), + nonce: i.into(), + } + .sign(keypair.secret(), None) + }) + .collect(); - let good_transactions = [bad_transactions[0].clone(), bad_transactions[1].clone()]; + let good_transactions = [bad_transactions[0].clone(), bad_transactions[1].clone()]; - let machine = EthereumMachine::regular(params, BTreeMap::new()); - let engine = NullEngine::new(Default::default(), machine); - check_fail(unordered_test(&create_test_block_with_data(&header, &bad_transactions, &[]), &engine), TooManyTransactions(keypair.address())); - unordered_test(&create_test_block_with_data(&header, &good_transactions, &[]), &engine).unwrap(); - } + let machine = EthereumMachine::regular(params, BTreeMap::new()); + let engine = NullEngine::new(Default::default(), machine); + check_fail( + unordered_test( + &create_test_block_with_data(&header, &bad_transactions, &[]), + &engine, + ), + TooManyTransactions(keypair.address()), + ); + unordered_test( + &create_test_block_with_data(&header, &good_transactions, &[]), + &engine, + ) + .unwrap(); + } } diff --git a/ethcore/src/verification/verifier.rs b/ethcore/src/verification/verifier.rs index 76eb60b9a..8f5cdd7d4 100644 --- a/ethcore/src/verification/verifier.rs +++ b/ethcore/src/verification/verifier.rs @@ -16,28 +16,29 @@ //! A generic verifier trait. +use super::verification; use call_contract::CallContract; use client::BlockInfo; use engines::EthEngine; use error::Error; use types::header::Header; -use super::verification; /// Should be used to verify blocks. pub trait Verifier: Send + Sync - where C: BlockInfo + CallContract +where + C: BlockInfo + CallContract, { - /// Verify a block relative to its parent and uncles. - fn verify_block_family( - &self, - header: &Header, - parent: &Header, - engine: &EthEngine, - do_full: Option> - ) -> Result<(), Error>; + /// Verify a block relative to its parent and uncles. + fn verify_block_family( + &self, + header: &Header, + parent: &Header, + engine: &EthEngine, + do_full: Option>, + ) -> Result<(), Error>; - /// Do a final verification check for an enacted header vs its expected counterpart. - fn verify_block_final(&self, expected: &Header, got: &Header) -> Result<(), Error>; - /// Verify a block, inspecing external state. - fn verify_block_external(&self, header: &Header, engine: &EthEngine) -> Result<(), Error>; + /// Do a final verification check for an enacted header vs its expected counterpart. + fn verify_block_final(&self, expected: &Header, got: &Header) -> Result<(), Error>; + /// Verify a block, inspecing external state. + fn verify_block_external(&self, header: &Header, engine: &EthEngine) -> Result<(), Error>; } diff --git a/ethcore/sync/src/api.rs b/ethcore/sync/src/api.rs index 4a66f468d..69c5dfaaa 100644 --- a/ethcore/sync/src/api.rs +++ b/ethcore/sync/src/api.rs @@ -14,42 +14,50 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::{Arc, mpsc, atomic}; -use std::collections::{HashMap, BTreeMap}; -use std::io; -use std::ops::RangeInclusive; -use std::time::Duration; use bytes::Bytes; use devp2p::NetworkService; -use network::{NetworkProtocolHandler, NetworkContext, PeerId, ProtocolId, - NetworkConfiguration as BasicNetworkConfiguration, NonReservedPeerMode, Error, ErrorKind, - ConnectionFilter}; -use network::client_version::ClientVersion; +use network::{ + client_version::ClientVersion, ConnectionFilter, Error, ErrorKind, + NetworkConfiguration as BasicNetworkConfiguration, NetworkContext, NetworkProtocolHandler, + NonReservedPeerMode, PeerId, ProtocolId, +}; +use std::{ + collections::{BTreeMap, HashMap}, + io, + ops::RangeInclusive, + sync::{atomic, mpsc, Arc}, + time::Duration, +}; -use types::pruning_info::PruningInfo; +use chain::{ + sync_packet::SyncPacket::{PrivateTransactionPacket, SignedPrivateTransactionPacket}, + ChainSyncApi, SyncStatus as EthSyncStatus, ETH_PROTOCOL_VERSION_62, ETH_PROTOCOL_VERSION_63, + PAR_PROTOCOL_VERSION_1, PAR_PROTOCOL_VERSION_2, PAR_PROTOCOL_VERSION_3, +}; +use ethcore::{ + client::{BlockChainClient, ChainMessageType, ChainNotify, NewBlocks}, + snapshot::SnapshotService, +}; use ethereum_types::{H256, H512, U256}; -use io::{TimerToken}; use ethkey::Secret; -use ethcore::client::{BlockChainClient, ChainNotify, NewBlocks, ChainMessageType}; -use ethcore::snapshot::SnapshotService; -use types::BlockNumber; -use sync_io::NetSyncIo; -use chain::{ChainSyncApi, SyncStatus as EthSyncStatus}; -use std::net::{SocketAddr, AddrParseError}; -use std::str::FromStr; -use parking_lot::{RwLock, Mutex}; -use chain::{ETH_PROTOCOL_VERSION_63, ETH_PROTOCOL_VERSION_62, - PAR_PROTOCOL_VERSION_1, PAR_PROTOCOL_VERSION_2, PAR_PROTOCOL_VERSION_3}; -use chain::sync_packet::SyncPacket::{PrivateTransactionPacket, SignedPrivateTransactionPacket}; -use light::client::AsLightClient; -use light::Provider; -use light::net::{ - self as light_net, LightProtocol, Params as LightParams, - Capabilities, Handler as LightHandler, EventContext, SampleStore, +use io::TimerToken; +use light::{ + client::AsLightClient, + net::{ + self as light_net, Capabilities, EventContext, Handler as LightHandler, LightProtocol, + Params as LightParams, SampleStore, + }, + Provider, }; use network::IpFilter; +use parking_lot::{Mutex, RwLock}; use private_tx::PrivateTxHandler; -use types::transaction::UnverifiedTransaction; +use std::{ + net::{AddrParseError, SocketAddr}, + str::FromStr, +}; +use sync_io::NetSyncIo; +use types::{pruning_info::PruningInfo, transaction::UnverifiedTransaction, BlockNumber}; use super::light_sync::SyncInfo; @@ -63,172 +71,168 @@ pub const LIGHT_PROTOCOL: ProtocolId = *b"pip"; /// Determine warp sync status. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum WarpSync { - /// Warp sync is enabled. - Enabled, - /// Warp sync is disabled. - Disabled, - /// Only warp sync is allowed (no regular sync) and only after given block number. - OnlyAndAfter(BlockNumber), + /// Warp sync is enabled. + Enabled, + /// Warp sync is disabled. + Disabled, + /// Only warp sync is allowed (no regular sync) and only after given block number. + OnlyAndAfter(BlockNumber), } impl WarpSync { - /// Returns true if warp sync is enabled. - pub fn is_enabled(&self) -> bool { - match *self { - WarpSync::Enabled => true, - WarpSync::OnlyAndAfter(_) => true, - WarpSync::Disabled => false, - } - } + /// Returns true if warp sync is enabled. + pub fn is_enabled(&self) -> bool { + match *self { + WarpSync::Enabled => true, + WarpSync::OnlyAndAfter(_) => true, + WarpSync::Disabled => false, + } + } - /// Returns `true` if we are in warp-only mode. - /// - /// i.e. we will never fall back to regular sync - /// until given block number is reached by - /// successfuly finding and restoring from a snapshot. - pub fn is_warp_only(&self) -> bool { - if let WarpSync::OnlyAndAfter(_) = *self { - true - } else { - false - } - } + /// Returns `true` if we are in warp-only mode. + /// + /// i.e. we will never fall back to regular sync + /// until given block number is reached by + /// successfuly finding and restoring from a snapshot. + pub fn is_warp_only(&self) -> bool { + if let WarpSync::OnlyAndAfter(_) = *self { + true + } else { + false + } + } } /// Sync configuration #[derive(Debug, Clone, Copy)] pub struct SyncConfig { - /// Max blocks to download ahead - pub max_download_ahead_blocks: usize, - /// Enable ancient block download. - pub download_old_blocks: bool, - /// Network ID - pub network_id: u64, - /// Main "eth" subprotocol name. - pub subprotocol_name: [u8; 3], - /// Light subprotocol name. - pub light_subprotocol_name: [u8; 3], - /// Fork block to check - pub fork_block: Option<(BlockNumber, H256)>, - /// Enable snapshot sync - pub warp_sync: WarpSync, - /// Enable light client server. - pub serve_light: bool, + /// Max blocks to download ahead + pub max_download_ahead_blocks: usize, + /// Enable ancient block download. + pub download_old_blocks: bool, + /// Network ID + pub network_id: u64, + /// Main "eth" subprotocol name. + pub subprotocol_name: [u8; 3], + /// Light subprotocol name. + pub light_subprotocol_name: [u8; 3], + /// Fork block to check + pub fork_block: Option<(BlockNumber, H256)>, + /// Enable snapshot sync + pub warp_sync: WarpSync, + /// Enable light client server. + pub serve_light: bool, } impl Default for SyncConfig { - fn default() -> SyncConfig { - SyncConfig { - max_download_ahead_blocks: 20000, - download_old_blocks: true, - network_id: 1, - subprotocol_name: ETH_PROTOCOL, - light_subprotocol_name: LIGHT_PROTOCOL, - fork_block: None, - warp_sync: WarpSync::Disabled, - serve_light: false, - } - } + fn default() -> SyncConfig { + SyncConfig { + max_download_ahead_blocks: 20000, + download_old_blocks: true, + network_id: 1, + subprotocol_name: ETH_PROTOCOL, + light_subprotocol_name: LIGHT_PROTOCOL, + fork_block: None, + warp_sync: WarpSync::Disabled, + serve_light: false, + } + } } /// Current sync status pub trait SyncProvider: Send + Sync { - /// Get sync status - fn status(&self) -> EthSyncStatus; + /// Get sync status + fn status(&self) -> EthSyncStatus; - /// Get peers information - fn peers(&self) -> Vec; + /// Get peers information + fn peers(&self) -> Vec; - /// Get the enode if available. - fn enode(&self) -> Option; + /// Get the enode if available. + fn enode(&self) -> Option; - /// Returns propagation count for pending transactions. - fn transactions_stats(&self) -> BTreeMap; + /// Returns propagation count for pending transactions. + fn transactions_stats(&self) -> BTreeMap; } /// Transaction stats #[derive(Debug)] pub struct TransactionStats { - /// Block number where this TX was first seen. - pub first_seen: u64, - /// Peers it was propagated to. - pub propagated_to: BTreeMap, + /// Block number where this TX was first seen. + pub first_seen: u64, + /// Peers it was propagated to. + pub propagated_to: BTreeMap, } /// Peer connection information #[derive(Debug)] pub struct PeerInfo { - /// Public node id - pub id: Option, - /// Node client ID - pub client_version: ClientVersion, - /// Capabilities - pub capabilities: Vec, - /// Remote endpoint address - pub remote_address: String, - /// Local endpoint address - pub local_address: String, - /// Eth protocol info. - pub eth_info: Option, - /// Light protocol info. - pub pip_info: Option, + /// Public node id + pub id: Option, + /// Node client ID + pub client_version: ClientVersion, + /// Capabilities + pub capabilities: Vec, + /// Remote endpoint address + pub remote_address: String, + /// Local endpoint address + pub local_address: String, + /// Eth protocol info. + pub eth_info: Option, + /// Light protocol info. + pub pip_info: Option, } /// Ethereum protocol info. #[derive(Debug)] pub struct EthProtocolInfo { - /// Protocol version - pub version: u32, - /// SHA3 of peer best block hash - pub head: H256, - /// Peer total difficulty if known - pub difficulty: Option, + /// Protocol version + pub version: u32, + /// SHA3 of peer best block hash + pub head: H256, + /// Peer total difficulty if known + pub difficulty: Option, } /// PIP protocol info. #[derive(Debug)] pub struct PipProtocolInfo { - /// Protocol version - pub version: u32, - /// SHA3 of peer best block hash - pub head: H256, - /// Peer total difficulty if known - pub difficulty: U256, + /// Protocol version + pub version: u32, + /// SHA3 of peer best block hash + pub head: H256, + /// Peer total difficulty if known + pub difficulty: U256, } impl From for PipProtocolInfo { - fn from(status: light_net::Status) -> Self { - PipProtocolInfo { - version: status.protocol_version, - head: status.head_hash, - difficulty: status.head_td, - } - } + fn from(status: light_net::Status) -> Self { + PipProtocolInfo { + version: status.protocol_version, + head: status.head_hash, + difficulty: status.head_td, + } + } } /// Configuration to attach alternate protocol handlers. /// Only works when IPC is disabled. pub struct AttachedProtocol { - /// The protocol handler in question. - pub handler: Arc, - /// 3-character ID for the protocol. - pub protocol_id: ProtocolId, - /// Supported versions and their packet counts. - pub versions: &'static [(u8, u8)], + /// The protocol handler in question. + pub handler: Arc, + /// 3-character ID for the protocol. + pub protocol_id: ProtocolId, + /// Supported versions and their packet counts. + pub versions: &'static [(u8, u8)], } impl AttachedProtocol { - fn register(&self, network: &NetworkService) { - let res = network.register_protocol( - self.handler.clone(), - self.protocol_id, - self.versions - ); + fn register(&self, network: &NetworkService) { + let res = network.register_protocol(self.handler.clone(), self.protocol_id, self.versions); - if let Err(e) = res { - warn!(target: "sync", "Error attaching protocol {:?}: {:?}", self.protocol_id, e); - } - } + if let Err(e) = res { + warn!(target: "sync", "Error attaching protocol {:?}: {:?}", self.protocol_id, e); + } + } } /// A prioritized tasks run in a specialised timer. @@ -238,188 +242,216 @@ impl AttachedProtocol { /// that happens here should work even if the task is cancelled. #[derive(Debug)] pub enum PriorityTask { - /// Propagate given block - PropagateBlock { - /// When the task was initiated - started: ::std::time::Instant, - /// Raw block RLP to propagate - block: Bytes, - /// Block hash - hash: H256, - /// Blocks difficulty - difficulty: U256, - }, - /// Propagate a list of transactions - PropagateTransactions(::std::time::Instant, Arc), + /// Propagate given block + PropagateBlock { + /// When the task was initiated + started: ::std::time::Instant, + /// Raw block RLP to propagate + block: Bytes, + /// Block hash + hash: H256, + /// Blocks difficulty + difficulty: U256, + }, + /// Propagate a list of transactions + PropagateTransactions(::std::time::Instant, Arc), } impl PriorityTask { - /// Mark the task as being processed, right after it's retrieved from the queue. - pub fn starting(&self) { - match *self { - PriorityTask::PropagateTransactions(_, ref is_ready) => is_ready.store(true, atomic::Ordering::SeqCst), - _ => {}, - } - } + /// Mark the task as being processed, right after it's retrieved from the queue. + pub fn starting(&self) { + match *self { + PriorityTask::PropagateTransactions(_, ref is_ready) => { + is_ready.store(true, atomic::Ordering::SeqCst) + } + _ => {} + } + } } /// EthSync initialization parameters. pub struct Params { - /// Configuration. - pub config: SyncConfig, - /// Blockchain client. - pub chain: Arc, - /// Snapshot service. - pub snapshot_service: Arc, - /// Private tx service. - pub private_tx_handler: Option>, - /// Light data provider. - pub provider: Arc<::light::Provider>, - /// Network layer configuration. - pub network_config: NetworkConfiguration, - /// Other protocols to attach. - pub attached_protos: Vec, + /// Configuration. + pub config: SyncConfig, + /// Blockchain client. + pub chain: Arc, + /// Snapshot service. + pub snapshot_service: Arc, + /// Private tx service. + pub private_tx_handler: Option>, + /// Light data provider. + pub provider: Arc<::light::Provider>, + /// Network layer configuration. + pub network_config: NetworkConfiguration, + /// Other protocols to attach. + pub attached_protos: Vec, } /// Ethereum network protocol handler pub struct EthSync { - /// Network service - network: NetworkService, - /// Main (eth/par) protocol handler - eth_handler: Arc, - /// Light (pip) protocol handler - light_proto: Option>, - /// Other protocols to attach. - attached_protos: Vec, - /// The main subprotocol name - subprotocol_name: [u8; 3], - /// Light subprotocol name. - light_subprotocol_name: [u8; 3], - /// Priority tasks notification channel - priority_tasks: Mutex>, + /// Network service + network: NetworkService, + /// Main (eth/par) protocol handler + eth_handler: Arc, + /// Light (pip) protocol handler + light_proto: Option>, + /// Other protocols to attach. + attached_protos: Vec, + /// The main subprotocol name + subprotocol_name: [u8; 3], + /// Light subprotocol name. + light_subprotocol_name: [u8; 3], + /// Priority tasks notification channel + priority_tasks: Mutex>, } fn light_params( - network_id: u64, - median_peers: f64, - pruning_info: PruningInfo, - sample_store: Option>, + network_id: u64, + median_peers: f64, + pruning_info: PruningInfo, + sample_store: Option>, ) -> LightParams { - let mut light_params = LightParams { - network_id: network_id, - config: Default::default(), - capabilities: Capabilities { - serve_headers: true, - serve_chain_since: Some(pruning_info.earliest_chain), - serve_state_since: Some(pruning_info.earliest_state), - tx_relay: true, - }, - sample_store: sample_store, - }; + let mut light_params = LightParams { + network_id: network_id, + config: Default::default(), + capabilities: Capabilities { + serve_headers: true, + serve_chain_since: Some(pruning_info.earliest_chain), + serve_state_since: Some(pruning_info.earliest_state), + tx_relay: true, + }, + sample_store: sample_store, + }; - light_params.config.median_peers = median_peers; - light_params + light_params.config.median_peers = median_peers; + light_params } impl EthSync { - /// Creates and register protocol with the network service - pub fn new(params: Params, connection_filter: Option>) -> Result, Error> { - let pruning_info = params.chain.pruning_info(); - let light_proto = match params.config.serve_light { - false => None, - true => Some({ - let sample_store = params.network_config.net_config_path - .clone() - .map(::std::path::PathBuf::from) - .map(|mut p| { p.push("request_timings"); light_net::FileStore(p) }) - .map(|store| Box::new(store) as Box<_>); + /// Creates and register protocol with the network service + pub fn new( + params: Params, + connection_filter: Option>, + ) -> Result, Error> { + let pruning_info = params.chain.pruning_info(); + let light_proto = match params.config.serve_light { + false => None, + true => Some({ + let sample_store = params + .network_config + .net_config_path + .clone() + .map(::std::path::PathBuf::from) + .map(|mut p| { + p.push("request_timings"); + light_net::FileStore(p) + }) + .map(|store| Box::new(store) as Box<_>); - let median_peers = (params.network_config.min_peers + params.network_config.max_peers) as f64 / 2.0; - let light_params = light_params( - params.config.network_id, - median_peers, - pruning_info, - sample_store, - ); + let median_peers = (params.network_config.min_peers + + params.network_config.max_peers) as f64 + / 2.0; + let light_params = light_params( + params.config.network_id, + median_peers, + pruning_info, + sample_store, + ); - let mut light_proto = LightProtocol::new(params.provider, light_params); - light_proto.add_handler(Arc::new(TxRelay(params.chain.clone()))); + let mut light_proto = LightProtocol::new(params.provider, light_params); + light_proto.add_handler(Arc::new(TxRelay(params.chain.clone()))); - Arc::new(light_proto) - }) - }; + Arc::new(light_proto) + }), + }; - let (priority_tasks_tx, priority_tasks_rx) = mpsc::channel(); - let sync = ChainSyncApi::new( - params.config, - &*params.chain, - params.private_tx_handler.as_ref().cloned(), - priority_tasks_rx, - ); - let service = NetworkService::new(params.network_config.clone().into_basic()?, connection_filter)?; + let (priority_tasks_tx, priority_tasks_rx) = mpsc::channel(); + let sync = ChainSyncApi::new( + params.config, + &*params.chain, + params.private_tx_handler.as_ref().cloned(), + priority_tasks_rx, + ); + let service = NetworkService::new( + params.network_config.clone().into_basic()?, + connection_filter, + )?; - let sync = Arc::new(EthSync { - network: service, - eth_handler: Arc::new(SyncProtocolHandler { - sync, - chain: params.chain, - snapshot_service: params.snapshot_service, - overlay: RwLock::new(HashMap::new()), - }), - light_proto: light_proto, - subprotocol_name: params.config.subprotocol_name, - light_subprotocol_name: params.config.light_subprotocol_name, - attached_protos: params.attached_protos, - priority_tasks: Mutex::new(priority_tasks_tx), - }); + let sync = Arc::new(EthSync { + network: service, + eth_handler: Arc::new(SyncProtocolHandler { + sync, + chain: params.chain, + snapshot_service: params.snapshot_service, + overlay: RwLock::new(HashMap::new()), + }), + light_proto: light_proto, + subprotocol_name: params.config.subprotocol_name, + light_subprotocol_name: params.config.light_subprotocol_name, + attached_protos: params.attached_protos, + priority_tasks: Mutex::new(priority_tasks_tx), + }); - Ok(sync) - } + Ok(sync) + } - /// Priority tasks producer - pub fn priority_tasks(&self) -> mpsc::Sender { - self.priority_tasks.lock().clone() - } + /// Priority tasks producer + pub fn priority_tasks(&self) -> mpsc::Sender { + self.priority_tasks.lock().clone() + } } impl SyncProvider for EthSync { - /// Get sync status - fn status(&self) -> EthSyncStatus { - self.eth_handler.sync.status() - } + /// Get sync status + fn status(&self) -> EthSyncStatus { + self.eth_handler.sync.status() + } - /// Get sync peers - fn peers(&self) -> Vec { - self.network.with_context_eval(self.subprotocol_name, |ctx| { - let peer_ids = self.network.connected_peers(); - let light_proto = self.light_proto.as_ref(); + /// Get sync peers + fn peers(&self) -> Vec { + self.network + .with_context_eval(self.subprotocol_name, |ctx| { + let peer_ids = self.network.connected_peers(); + let light_proto = self.light_proto.as_ref(); - let peer_info = self.eth_handler.sync.peer_info(&peer_ids); - peer_ids.into_iter().zip(peer_info).filter_map(|(peer_id, peer_info)| { - let session_info = match ctx.session_info(peer_id) { - None => return None, - Some(info) => info, - }; + let peer_info = self.eth_handler.sync.peer_info(&peer_ids); + peer_ids + .into_iter() + .zip(peer_info) + .filter_map(|(peer_id, peer_info)| { + let session_info = match ctx.session_info(peer_id) { + None => return None, + Some(info) => info, + }; - Some(PeerInfo { - id: session_info.id.map(|id| format!("{:x}", id)), - client_version: session_info.client_version, - capabilities: session_info.peer_capabilities.into_iter().map(|c| c.to_string()).collect(), - remote_address: session_info.remote_address, - local_address: session_info.local_address, - eth_info: peer_info, - pip_info: light_proto.as_ref().and_then(|lp| lp.peer_status(peer_id)).map(Into::into), - }) - }).collect() - }).unwrap_or_else(Vec::new) - } + Some(PeerInfo { + id: session_info.id.map(|id| format!("{:x}", id)), + client_version: session_info.client_version, + capabilities: session_info + .peer_capabilities + .into_iter() + .map(|c| c.to_string()) + .collect(), + remote_address: session_info.remote_address, + local_address: session_info.local_address, + eth_info: peer_info, + pip_info: light_proto + .as_ref() + .and_then(|lp| lp.peer_status(peer_id)) + .map(Into::into), + }) + }) + .collect() + }) + .unwrap_or_else(Vec::new) + } - fn enode(&self) -> Option { - self.network.external_url() - } + fn enode(&self) -> Option { + self.network.external_url() + } - fn transactions_stats(&self) -> BTreeMap { - self.eth_handler.sync.transactions_stats() - } + fn transactions_stats(&self) -> BTreeMap { + self.eth_handler.sync.transactions_stats() + } } const PEERS_TIMER: TimerToken = 0; @@ -431,116 +463,145 @@ const PRIORITY_TIMER: TimerToken = 4; pub(crate) const PRIORITY_TIMER_INTERVAL: Duration = Duration::from_millis(250); struct SyncProtocolHandler { - /// Shared blockchain client. - chain: Arc, - /// Shared snapshot service. - snapshot_service: Arc, - /// Sync strategy - sync: ChainSyncApi, - /// Chain overlay used to cache data such as fork block. - overlay: RwLock>, + /// Shared blockchain client. + chain: Arc, + /// Shared snapshot service. + snapshot_service: Arc, + /// Sync strategy + sync: ChainSyncApi, + /// Chain overlay used to cache data such as fork block. + overlay: RwLock>, } impl NetworkProtocolHandler for SyncProtocolHandler { - fn initialize(&self, io: &NetworkContext) { - if io.subprotocol_name() != WARP_SYNC_PROTOCOL_ID { - io.register_timer(PEERS_TIMER, Duration::from_millis(700)).expect("Error registering peers timer"); - io.register_timer(MAINTAIN_SYNC_TIMER, Duration::from_millis(1100)).expect("Error registering sync timer"); - io.register_timer(CONTINUE_SYNC_TIMER, Duration::from_millis(2500)).expect("Error registering sync timer"); - io.register_timer(TX_TIMER, Duration::from_millis(1300)).expect("Error registering transactions timer"); + fn initialize(&self, io: &NetworkContext) { + if io.subprotocol_name() != WARP_SYNC_PROTOCOL_ID { + io.register_timer(PEERS_TIMER, Duration::from_millis(700)) + .expect("Error registering peers timer"); + io.register_timer(MAINTAIN_SYNC_TIMER, Duration::from_millis(1100)) + .expect("Error registering sync timer"); + io.register_timer(CONTINUE_SYNC_TIMER, Duration::from_millis(2500)) + .expect("Error registering sync timer"); + io.register_timer(TX_TIMER, Duration::from_millis(1300)) + .expect("Error registering transactions timer"); - io.register_timer(PRIORITY_TIMER, PRIORITY_TIMER_INTERVAL).expect("Error registering peers timer"); - } - } + io.register_timer(PRIORITY_TIMER, PRIORITY_TIMER_INTERVAL) + .expect("Error registering peers timer"); + } + } - fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { - self.sync.dispatch_packet(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay), *peer, packet_id, data); - } + fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { + self.sync.dispatch_packet( + &mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay), + *peer, + packet_id, + data, + ); + } - fn connected(&self, io: &NetworkContext, peer: &PeerId) { - trace_time!("sync::connected"); - // If warp protocol is supported only allow warp handshake - let warp_protocol = io.protocol_version(WARP_SYNC_PROTOCOL_ID, *peer).unwrap_or(0) != 0; - let warp_context = io.subprotocol_name() == WARP_SYNC_PROTOCOL_ID; - if warp_protocol == warp_context { - self.sync.write().on_peer_connected(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay), *peer); - } - } + fn connected(&self, io: &NetworkContext, peer: &PeerId) { + trace_time!("sync::connected"); + // If warp protocol is supported only allow warp handshake + let warp_protocol = io + .protocol_version(WARP_SYNC_PROTOCOL_ID, *peer) + .unwrap_or(0) + != 0; + let warp_context = io.subprotocol_name() == WARP_SYNC_PROTOCOL_ID; + if warp_protocol == warp_context { + self.sync.write().on_peer_connected( + &mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay), + *peer, + ); + } + } - fn disconnected(&self, io: &NetworkContext, peer: &PeerId) { - trace_time!("sync::disconnected"); - if io.subprotocol_name() != WARP_SYNC_PROTOCOL_ID { - self.sync.write().on_peer_aborting(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay), *peer); - } - } + fn disconnected(&self, io: &NetworkContext, peer: &PeerId) { + trace_time!("sync::disconnected"); + if io.subprotocol_name() != WARP_SYNC_PROTOCOL_ID { + self.sync.write().on_peer_aborting( + &mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay), + *peer, + ); + } + } - fn timeout(&self, io: &NetworkContext, timer: TimerToken) { - trace_time!("sync::timeout"); - let mut io = NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay); - match timer { - PEERS_TIMER => self.sync.write().maintain_peers(&mut io), - MAINTAIN_SYNC_TIMER => self.sync.write().maintain_sync(&mut io), - CONTINUE_SYNC_TIMER => self.sync.write().continue_sync(&mut io), - TX_TIMER => self.sync.write().propagate_new_transactions(&mut io), - PRIORITY_TIMER => self.sync.process_priority_queue(&mut io), - _ => warn!("Unknown timer {} triggered.", timer), - } - } + fn timeout(&self, io: &NetworkContext, timer: TimerToken) { + trace_time!("sync::timeout"); + let mut io = NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay); + match timer { + PEERS_TIMER => self.sync.write().maintain_peers(&mut io), + MAINTAIN_SYNC_TIMER => self.sync.write().maintain_sync(&mut io), + CONTINUE_SYNC_TIMER => self.sync.write().continue_sync(&mut io), + TX_TIMER => self.sync.write().propagate_new_transactions(&mut io), + PRIORITY_TIMER => self.sync.process_priority_queue(&mut io), + _ => warn!("Unknown timer {} triggered.", timer), + } + } } impl ChainNotify for EthSync { - fn block_pre_import(&self, bytes: &Bytes, hash: &H256, difficulty: &U256) { - let task = PriorityTask::PropagateBlock { - started: ::std::time::Instant::now(), - block: bytes.clone(), - hash: *hash, - difficulty: *difficulty, - }; - if let Err(e) = self.priority_tasks.lock().send(task) { - warn!(target: "sync", "Unexpected error during priority block propagation: {:?}", e); - } - } + fn block_pre_import(&self, bytes: &Bytes, hash: &H256, difficulty: &U256) { + let task = PriorityTask::PropagateBlock { + started: ::std::time::Instant::now(), + block: bytes.clone(), + hash: *hash, + difficulty: *difficulty, + }; + if let Err(e) = self.priority_tasks.lock().send(task) { + warn!(target: "sync", "Unexpected error during priority block propagation: {:?}", e); + } + } - fn new_blocks(&self, new_blocks: NewBlocks) - { - if new_blocks.has_more_blocks_to_import { return } - use light::net::Announcement; + fn new_blocks(&self, new_blocks: NewBlocks) { + if new_blocks.has_more_blocks_to_import { + return; + } + use light::net::Announcement; - self.network.with_context(self.subprotocol_name, |context| { - let mut sync_io = NetSyncIo::new(context, &*self.eth_handler.chain, &*self.eth_handler.snapshot_service, - &self.eth_handler.overlay); - self.eth_handler.sync.write().chain_new_blocks( - &mut sync_io, - &new_blocks.imported, - &new_blocks.invalid, - new_blocks.route.enacted(), - new_blocks.route.retracted(), - &new_blocks.sealed, - &new_blocks.proposed); - }); + self.network.with_context(self.subprotocol_name, |context| { + let mut sync_io = NetSyncIo::new( + context, + &*self.eth_handler.chain, + &*self.eth_handler.snapshot_service, + &self.eth_handler.overlay, + ); + self.eth_handler.sync.write().chain_new_blocks( + &mut sync_io, + &new_blocks.imported, + &new_blocks.invalid, + new_blocks.route.enacted(), + new_blocks.route.retracted(), + &new_blocks.sealed, + &new_blocks.proposed, + ); + }); - self.network.with_context(self.light_subprotocol_name, |context| { - let light_proto = match self.light_proto.as_ref() { - Some(lp) => lp, - None => return, - }; + self.network + .with_context(self.light_subprotocol_name, |context| { + let light_proto = match self.light_proto.as_ref() { + Some(lp) => lp, + None => return, + }; - let chain_info = self.eth_handler.chain.chain_info(); - light_proto.make_announcement(&context, Announcement { - head_hash: chain_info.best_block_hash, - head_num: chain_info.best_block_number, - head_td: chain_info.total_difficulty, - reorg_depth: 0, // recalculated on a per-peer basis. - serve_headers: false, // these fields consist of _changes_ in capability. - serve_state_since: None, - serve_chain_since: None, - tx_relay: false, - }) - }) - } + let chain_info = self.eth_handler.chain.chain_info(); + light_proto.make_announcement( + &context, + Announcement { + head_hash: chain_info.best_block_hash, + head_num: chain_info.best_block_number, + head_td: chain_info.total_difficulty, + reorg_depth: 0, // recalculated on a per-peer basis. + serve_headers: false, // these fields consist of _changes_ in capability. + serve_state_since: None, + serve_chain_since: None, + tx_relay: false, + }, + ) + }) + } - fn start(&self) { - match self.network.start() { + fn start(&self) { + match self.network.start() { Err((err, listen_address)) => { match err.into() { ErrorKind::Io(ref e) if e.kind() == io::ErrorKind::AddrInUse => { @@ -552,44 +613,86 @@ impl ChainNotify for EthSync { _ => {}, } - self.network.register_protocol(self.eth_handler.clone(), self.subprotocol_name, &[ETH_PROTOCOL_VERSION_62, ETH_PROTOCOL_VERSION_63]) - .unwrap_or_else(|e| warn!("Error registering ethereum protocol: {:?}", e)); - // register the warp sync subprotocol - self.network.register_protocol(self.eth_handler.clone(), WARP_SYNC_PROTOCOL_ID, &[PAR_PROTOCOL_VERSION_1, PAR_PROTOCOL_VERSION_2, PAR_PROTOCOL_VERSION_3]) - .unwrap_or_else(|e| warn!("Error registering snapshot sync protocol: {:?}", e)); + self.network + .register_protocol( + self.eth_handler.clone(), + self.subprotocol_name, + &[ETH_PROTOCOL_VERSION_62, ETH_PROTOCOL_VERSION_63], + ) + .unwrap_or_else(|e| warn!("Error registering ethereum protocol: {:?}", e)); + // register the warp sync subprotocol + self.network + .register_protocol( + self.eth_handler.clone(), + WARP_SYNC_PROTOCOL_ID, + &[ + PAR_PROTOCOL_VERSION_1, + PAR_PROTOCOL_VERSION_2, + PAR_PROTOCOL_VERSION_3, + ], + ) + .unwrap_or_else(|e| warn!("Error registering snapshot sync protocol: {:?}", e)); - // register the light protocol. - if let Some(light_proto) = self.light_proto.as_ref().map(|x| x.clone()) { - self.network.register_protocol(light_proto, self.light_subprotocol_name, ::light::net::PROTOCOL_VERSIONS) - .unwrap_or_else(|e| warn!("Error registering light client protocol: {:?}", e)); - } + // register the light protocol. + if let Some(light_proto) = self.light_proto.as_ref().map(|x| x.clone()) { + self.network + .register_protocol( + light_proto, + self.light_subprotocol_name, + ::light::net::PROTOCOL_VERSIONS, + ) + .unwrap_or_else(|e| warn!("Error registering light client protocol: {:?}", e)); + } - // register any attached protocols. - for proto in &self.attached_protos { proto.register(&self.network) } - } + // register any attached protocols. + for proto in &self.attached_protos { + proto.register(&self.network) + } + } - fn stop(&self) { - self.eth_handler.snapshot_service.abort_restore(); - self.network.stop(); - } + fn stop(&self) { + self.eth_handler.snapshot_service.abort_restore(); + self.network.stop(); + } - fn broadcast(&self, message_type: ChainMessageType) { - self.network.with_context(WARP_SYNC_PROTOCOL_ID, |context| { - let mut sync_io = NetSyncIo::new(context, &*self.eth_handler.chain, &*self.eth_handler.snapshot_service, &self.eth_handler.overlay); - match message_type { - ChainMessageType::Consensus(message) => self.eth_handler.sync.write().propagate_consensus_packet(&mut sync_io, message), - ChainMessageType::PrivateTransaction(transaction_hash, message) => - self.eth_handler.sync.write().propagate_private_transaction(&mut sync_io, transaction_hash, PrivateTransactionPacket, message), - ChainMessageType::SignedPrivateTransaction(transaction_hash, message) => - self.eth_handler.sync.write().propagate_private_transaction(&mut sync_io, transaction_hash, SignedPrivateTransactionPacket, message), - } - }); - } + fn broadcast(&self, message_type: ChainMessageType) { + self.network.with_context(WARP_SYNC_PROTOCOL_ID, |context| { + let mut sync_io = NetSyncIo::new( + context, + &*self.eth_handler.chain, + &*self.eth_handler.snapshot_service, + &self.eth_handler.overlay, + ); + match message_type { + ChainMessageType::Consensus(message) => self + .eth_handler + .sync + .write() + .propagate_consensus_packet(&mut sync_io, message), + ChainMessageType::PrivateTransaction(transaction_hash, message) => { + self.eth_handler.sync.write().propagate_private_transaction( + &mut sync_io, + transaction_hash, + PrivateTransactionPacket, + message, + ) + } + ChainMessageType::SignedPrivateTransaction(transaction_hash, message) => { + self.eth_handler.sync.write().propagate_private_transaction( + &mut sync_io, + transaction_hash, + SignedPrivateTransactionPacket, + message, + ) + } + } + }); + } - fn transactions_received(&self, txs: &[UnverifiedTransaction], peer_id: PeerId) { - let mut sync = self.eth_handler.sync.write(); - sync.transactions_received(txs, peer_id); - } + fn transactions_received(&self, txs: &[UnverifiedTransaction], peer_id: PeerId) { + let mut sync = self.eth_handler.sync.write(); + sync.transactions_received(txs, peer_id); + } } /// PIP event handler. @@ -597,340 +700,390 @@ impl ChainNotify for EthSync { struct TxRelay(Arc); impl LightHandler for TxRelay { - fn on_transactions(&self, ctx: &EventContext, relay: &[::types::transaction::UnverifiedTransaction]) { - trace!(target: "pip", "Relaying {} transactions from peer {}", relay.len(), ctx.peer()); - self.0.queue_transactions(relay.iter().map(|tx| ::rlp::encode(tx)).collect(), ctx.peer()) - } + fn on_transactions( + &self, + ctx: &EventContext, + relay: &[::types::transaction::UnverifiedTransaction], + ) { + trace!(target: "pip", "Relaying {} transactions from peer {}", relay.len(), ctx.peer()); + self.0.queue_transactions( + relay.iter().map(|tx| ::rlp::encode(tx)).collect(), + ctx.peer(), + ) + } } /// Trait for managing network -pub trait ManageNetwork : Send + Sync { - /// Set to allow unreserved peers to connect - fn accept_unreserved_peers(&self); - /// Set to deny unreserved peers to connect - fn deny_unreserved_peers(&self); - /// Remove reservation for the peer - fn remove_reserved_peer(&self, peer: String) -> Result<(), String>; - /// Add reserved peer - fn add_reserved_peer(&self, peer: String) -> Result<(), String>; - /// Start network - fn start_network(&self); - /// Stop network - fn stop_network(&self); - /// Returns the minimum and maximum peers. - fn num_peers_range(&self) -> RangeInclusive; - /// Get network context for protocol. - fn with_proto_context(&self, proto: ProtocolId, f: &mut FnMut(&NetworkContext)); +pub trait ManageNetwork: Send + Sync { + /// Set to allow unreserved peers to connect + fn accept_unreserved_peers(&self); + /// Set to deny unreserved peers to connect + fn deny_unreserved_peers(&self); + /// Remove reservation for the peer + fn remove_reserved_peer(&self, peer: String) -> Result<(), String>; + /// Add reserved peer + fn add_reserved_peer(&self, peer: String) -> Result<(), String>; + /// Start network + fn start_network(&self); + /// Stop network + fn stop_network(&self); + /// Returns the minimum and maximum peers. + fn num_peers_range(&self) -> RangeInclusive; + /// Get network context for protocol. + fn with_proto_context(&self, proto: ProtocolId, f: &mut FnMut(&NetworkContext)); } impl ManageNetwork for EthSync { - fn accept_unreserved_peers(&self) { - self.network.set_non_reserved_mode(NonReservedPeerMode::Accept); - } + fn accept_unreserved_peers(&self) { + self.network + .set_non_reserved_mode(NonReservedPeerMode::Accept); + } - fn deny_unreserved_peers(&self) { - self.network.set_non_reserved_mode(NonReservedPeerMode::Deny); - } + fn deny_unreserved_peers(&self) { + self.network + .set_non_reserved_mode(NonReservedPeerMode::Deny); + } - fn remove_reserved_peer(&self, peer: String) -> Result<(), String> { - self.network.remove_reserved_peer(&peer).map_err(|e| format!("{:?}", e)) - } + fn remove_reserved_peer(&self, peer: String) -> Result<(), String> { + self.network + .remove_reserved_peer(&peer) + .map_err(|e| format!("{:?}", e)) + } - fn add_reserved_peer(&self, peer: String) -> Result<(), String> { - self.network.add_reserved_peer(&peer).map_err(|e| format!("{:?}", e)) - } + fn add_reserved_peer(&self, peer: String) -> Result<(), String> { + self.network + .add_reserved_peer(&peer) + .map_err(|e| format!("{:?}", e)) + } - fn start_network(&self) { - self.start(); - } + fn start_network(&self) { + self.start(); + } - fn stop_network(&self) { - self.network.with_context(self.subprotocol_name, |context| { - let mut sync_io = NetSyncIo::new(context, &*self.eth_handler.chain, &*self.eth_handler.snapshot_service, &self.eth_handler.overlay); - self.eth_handler.sync.write().abort(&mut sync_io); - }); + fn stop_network(&self) { + self.network.with_context(self.subprotocol_name, |context| { + let mut sync_io = NetSyncIo::new( + context, + &*self.eth_handler.chain, + &*self.eth_handler.snapshot_service, + &self.eth_handler.overlay, + ); + self.eth_handler.sync.write().abort(&mut sync_io); + }); - if let Some(light_proto) = self.light_proto.as_ref() { - light_proto.abort(); - } + if let Some(light_proto) = self.light_proto.as_ref() { + light_proto.abort(); + } - self.stop(); - } + self.stop(); + } - fn num_peers_range(&self) -> RangeInclusive { - self.network.num_peers_range() - } + fn num_peers_range(&self) -> RangeInclusive { + self.network.num_peers_range() + } - fn with_proto_context(&self, proto: ProtocolId, f: &mut FnMut(&NetworkContext)) { - self.network.with_context_eval(proto, f); - } + fn with_proto_context(&self, proto: ProtocolId, f: &mut FnMut(&NetworkContext)) { + self.network.with_context_eval(proto, f); + } } #[derive(Debug, Clone, PartialEq, Eq)] /// Network service configuration pub struct NetworkConfiguration { - /// Directory path to store general network configuration. None means nothing will be saved - pub config_path: Option, - /// Directory path to store network-specific configuration. None means nothing will be saved - pub net_config_path: Option, - /// IP address to listen for incoming connections. Listen to all connections by default - pub listen_address: Option, - /// IP address to advertise. Detected automatically if none. - pub public_address: Option, - /// Port for UDP connections, same as TCP by default - pub udp_port: Option, - /// Enable NAT configuration - pub nat_enabled: bool, - /// Enable discovery - pub discovery_enabled: bool, - /// List of initial node addresses - pub boot_nodes: Vec, - /// Use provided node key instead of default - pub use_secret: Option, - /// Max number of connected peers to maintain - pub max_peers: u32, - /// Min number of connected peers to maintain - pub min_peers: u32, - /// Max pending peers. - pub max_pending_peers: u32, - /// Reserved snapshot sync peers. - pub snapshot_peers: u32, - /// List of reserved node addresses. - pub reserved_nodes: Vec, - /// The non-reserved peer mode. - pub allow_non_reserved: bool, - /// IP Filtering - pub ip_filter: IpFilter, - /// Client version string - pub client_version: String, + /// Directory path to store general network configuration. None means nothing will be saved + pub config_path: Option, + /// Directory path to store network-specific configuration. None means nothing will be saved + pub net_config_path: Option, + /// IP address to listen for incoming connections. Listen to all connections by default + pub listen_address: Option, + /// IP address to advertise. Detected automatically if none. + pub public_address: Option, + /// Port for UDP connections, same as TCP by default + pub udp_port: Option, + /// Enable NAT configuration + pub nat_enabled: bool, + /// Enable discovery + pub discovery_enabled: bool, + /// List of initial node addresses + pub boot_nodes: Vec, + /// Use provided node key instead of default + pub use_secret: Option, + /// Max number of connected peers to maintain + pub max_peers: u32, + /// Min number of connected peers to maintain + pub min_peers: u32, + /// Max pending peers. + pub max_pending_peers: u32, + /// Reserved snapshot sync peers. + pub snapshot_peers: u32, + /// List of reserved node addresses. + pub reserved_nodes: Vec, + /// The non-reserved peer mode. + pub allow_non_reserved: bool, + /// IP Filtering + pub ip_filter: IpFilter, + /// Client version string + pub client_version: String, } impl NetworkConfiguration { - /// Create a new default config. - pub fn new() -> Self { - From::from(BasicNetworkConfiguration::new()) - } + /// Create a new default config. + pub fn new() -> Self { + From::from(BasicNetworkConfiguration::new()) + } - /// Create a new local config. - pub fn new_local() -> Self { - From::from(BasicNetworkConfiguration::new_local()) - } + /// Create a new local config. + pub fn new_local() -> Self { + From::from(BasicNetworkConfiguration::new_local()) + } - /// Attempt to convert this config into a BasicNetworkConfiguration. - pub fn into_basic(self) -> Result { - Ok(BasicNetworkConfiguration { - config_path: self.config_path, - net_config_path: self.net_config_path, - listen_address: match self.listen_address { None => None, Some(addr) => Some(SocketAddr::from_str(&addr)?) }, - public_address: match self.public_address { None => None, Some(addr) => Some(SocketAddr::from_str(&addr)?) }, - udp_port: self.udp_port, - nat_enabled: self.nat_enabled, - discovery_enabled: self.discovery_enabled, - boot_nodes: self.boot_nodes, - use_secret: self.use_secret, - max_peers: self.max_peers, - min_peers: self.min_peers, - max_handshakes: self.max_pending_peers, - reserved_protocols: hash_map![WARP_SYNC_PROTOCOL_ID => self.snapshot_peers], - reserved_nodes: self.reserved_nodes, - ip_filter: self.ip_filter, - non_reserved_mode: if self.allow_non_reserved { NonReservedPeerMode::Accept } else { NonReservedPeerMode::Deny }, - client_version: self.client_version, - }) - } + /// Attempt to convert this config into a BasicNetworkConfiguration. + pub fn into_basic(self) -> Result { + Ok(BasicNetworkConfiguration { + config_path: self.config_path, + net_config_path: self.net_config_path, + listen_address: match self.listen_address { + None => None, + Some(addr) => Some(SocketAddr::from_str(&addr)?), + }, + public_address: match self.public_address { + None => None, + Some(addr) => Some(SocketAddr::from_str(&addr)?), + }, + udp_port: self.udp_port, + nat_enabled: self.nat_enabled, + discovery_enabled: self.discovery_enabled, + boot_nodes: self.boot_nodes, + use_secret: self.use_secret, + max_peers: self.max_peers, + min_peers: self.min_peers, + max_handshakes: self.max_pending_peers, + reserved_protocols: hash_map![WARP_SYNC_PROTOCOL_ID => self.snapshot_peers], + reserved_nodes: self.reserved_nodes, + ip_filter: self.ip_filter, + non_reserved_mode: if self.allow_non_reserved { + NonReservedPeerMode::Accept + } else { + NonReservedPeerMode::Deny + }, + client_version: self.client_version, + }) + } } impl From for NetworkConfiguration { - fn from(other: BasicNetworkConfiguration) -> Self { - NetworkConfiguration { - config_path: other.config_path, - net_config_path: other.net_config_path, - listen_address: other.listen_address.and_then(|addr| Some(format!("{}", addr))), - public_address: other.public_address.and_then(|addr| Some(format!("{}", addr))), - udp_port: other.udp_port, - nat_enabled: other.nat_enabled, - discovery_enabled: other.discovery_enabled, - boot_nodes: other.boot_nodes, - use_secret: other.use_secret, - max_peers: other.max_peers, - min_peers: other.min_peers, - max_pending_peers: other.max_handshakes, - snapshot_peers: *other.reserved_protocols.get(&WARP_SYNC_PROTOCOL_ID).unwrap_or(&0), - reserved_nodes: other.reserved_nodes, - ip_filter: other.ip_filter, - allow_non_reserved: match other.non_reserved_mode { NonReservedPeerMode::Accept => true, _ => false } , - client_version: other.client_version, - } - } + fn from(other: BasicNetworkConfiguration) -> Self { + NetworkConfiguration { + config_path: other.config_path, + net_config_path: other.net_config_path, + listen_address: other + .listen_address + .and_then(|addr| Some(format!("{}", addr))), + public_address: other + .public_address + .and_then(|addr| Some(format!("{}", addr))), + udp_port: other.udp_port, + nat_enabled: other.nat_enabled, + discovery_enabled: other.discovery_enabled, + boot_nodes: other.boot_nodes, + use_secret: other.use_secret, + max_peers: other.max_peers, + min_peers: other.min_peers, + max_pending_peers: other.max_handshakes, + snapshot_peers: *other + .reserved_protocols + .get(&WARP_SYNC_PROTOCOL_ID) + .unwrap_or(&0), + reserved_nodes: other.reserved_nodes, + ip_filter: other.ip_filter, + allow_non_reserved: match other.non_reserved_mode { + NonReservedPeerMode::Accept => true, + _ => false, + }, + client_version: other.client_version, + } + } } /// Configuration for IPC service. #[derive(Debug, Clone)] pub struct ServiceConfiguration { - /// Sync config. - pub sync: SyncConfig, - /// Network configuration. - pub net: NetworkConfiguration, - /// IPC path. - pub io_path: String, + /// Sync config. + pub sync: SyncConfig, + /// Network configuration. + pub net: NetworkConfiguration, + /// IPC path. + pub io_path: String, } /// Numbers of peers (max, min, active). #[derive(Debug, Clone)] pub struct PeerNumbers { - /// Number of connected peers. - pub connected: usize, - /// Number of active peers. - pub active: usize, - /// Max peers. - pub max: usize, - /// Min peers. - pub min: usize, + /// Number of connected peers. + pub connected: usize, + /// Number of active peers. + pub active: usize, + /// Max peers. + pub max: usize, + /// Min peers. + pub min: usize, } /// Light synchronization. pub trait LightSyncProvider { - /// Get peer numbers. - fn peer_numbers(&self) -> PeerNumbers; + /// Get peer numbers. + fn peer_numbers(&self) -> PeerNumbers; - /// Get peers information - fn peers(&self) -> Vec; + /// Get peers information + fn peers(&self) -> Vec; - /// Get network id. - fn network_id(&self) -> u64; + /// Get network id. + fn network_id(&self) -> u64; - /// Get the enode if available. - fn enode(&self) -> Option; + /// Get the enode if available. + fn enode(&self) -> Option; - /// Returns propagation count for pending transactions. - fn transactions_stats(&self) -> BTreeMap; + /// Returns propagation count for pending transactions. + fn transactions_stats(&self) -> BTreeMap; } /// Wrapper around `light_sync::SyncInfo` to expose those methods without the concrete type `LightSync` pub trait LightSyncInfo: Send + Sync { - /// Get the highest block advertised on the network. - fn highest_block(&self) -> Option; + /// Get the highest block advertised on the network. + fn highest_block(&self) -> Option; - /// Get the block number at the time of sync start. - fn start_block(&self) -> u64; + /// Get the block number at the time of sync start. + fn start_block(&self) -> u64; - /// Whether major sync is underway. - fn is_major_importing(&self) -> bool; + /// Whether major sync is underway. + fn is_major_importing(&self) -> bool; } /// Execute a closure with a protocol context. pub trait LightNetworkDispatcher { - /// Execute a closure with a protocol context. - fn with_context(&self, f: F) -> Option where F: FnOnce(&::light::net::BasicContext) -> T; + /// Execute a closure with a protocol context. + fn with_context(&self, f: F) -> Option + where + F: FnOnce(&::light::net::BasicContext) -> T; } /// Configuration for the light sync. pub struct LightSyncParams { - /// Network configuration. - pub network_config: BasicNetworkConfiguration, - /// Light client to sync to. - pub client: Arc, - /// Network ID. - pub network_id: u64, - /// Subprotocol name. - pub subprotocol_name: [u8; 3], - /// Other handlers to attach. - pub handlers: Vec>, - /// Other subprotocols to run. - pub attached_protos: Vec, + /// Network configuration. + pub network_config: BasicNetworkConfiguration, + /// Light client to sync to. + pub client: Arc, + /// Network ID. + pub network_id: u64, + /// Subprotocol name. + pub subprotocol_name: [u8; 3], + /// Other handlers to attach. + pub handlers: Vec>, + /// Other subprotocols to run. + pub attached_protos: Vec, } /// Service for light synchronization. pub struct LightSync { - proto: Arc, - sync: Arc, - attached_protos: Vec, - network: NetworkService, - subprotocol_name: [u8; 3], - network_id: u64, + proto: Arc, + sync: Arc, + attached_protos: Vec, + network: NetworkService, + subprotocol_name: [u8; 3], + network_id: u64, } impl LightSync { - /// Create a new light sync service. - pub fn new(params: LightSyncParams) -> Result - where L: AsLightClient + Provider + Sync + Send + 'static - { - use light_sync::LightSync as SyncHandler; + /// Create a new light sync service. + pub fn new(params: LightSyncParams) -> Result + where + L: AsLightClient + Provider + Sync + Send + 'static, + { + use light_sync::LightSync as SyncHandler; - // initialize light protocol handler and attach sync module. - let (sync, light_proto) = { - let light_params = LightParams { - network_id: params.network_id, - config: Default::default(), - capabilities: Capabilities { - serve_headers: false, - serve_chain_since: None, - serve_state_since: None, - tx_relay: false, - }, - sample_store: None, - }; + // initialize light protocol handler and attach sync module. + let (sync, light_proto) = { + let light_params = LightParams { + network_id: params.network_id, + config: Default::default(), + capabilities: Capabilities { + serve_headers: false, + serve_chain_since: None, + serve_state_since: None, + tx_relay: false, + }, + sample_store: None, + }; - let mut light_proto = LightProtocol::new(params.client.clone(), light_params); - let sync_handler = Arc::new(SyncHandler::new(params.client.clone())?); - light_proto.add_handler(sync_handler.clone()); + let mut light_proto = LightProtocol::new(params.client.clone(), light_params); + let sync_handler = Arc::new(SyncHandler::new(params.client.clone())?); + light_proto.add_handler(sync_handler.clone()); - for handler in params.handlers { - light_proto.add_handler(handler); - } + for handler in params.handlers { + light_proto.add_handler(handler); + } - (sync_handler, Arc::new(light_proto)) - }; + (sync_handler, Arc::new(light_proto)) + }; - let service = NetworkService::new(params.network_config, None)?; - - Ok(LightSync { - proto: light_proto, - sync: sync, - attached_protos: params.attached_protos, - network: service, - subprotocol_name: params.subprotocol_name, - network_id: params.network_id, - }) - } + let service = NetworkService::new(params.network_config, None)?; + Ok(LightSync { + proto: light_proto, + sync: sync, + attached_protos: params.attached_protos, + network: service, + subprotocol_name: params.subprotocol_name, + network_id: params.network_id, + }) + } } impl ::std::ops::Deref for LightSync { - type Target = ::light_sync::SyncInfo; + type Target = ::light_sync::SyncInfo; - fn deref(&self) -> &Self::Target { &*self.sync } + fn deref(&self) -> &Self::Target { + &*self.sync + } } - impl LightNetworkDispatcher for LightSync { - fn with_context(&self, f: F) -> Option where F: FnOnce(&::light::net::BasicContext) -> T { - self.network.with_context_eval( - self.subprotocol_name, - move |ctx| self.proto.with_context(&ctx, f), - ) - } + fn with_context(&self, f: F) -> Option + where + F: FnOnce(&::light::net::BasicContext) -> T, + { + self.network + .with_context_eval(self.subprotocol_name, move |ctx| { + self.proto.with_context(&ctx, f) + }) + } } impl ManageNetwork for LightSync { - fn accept_unreserved_peers(&self) { - self.network.set_non_reserved_mode(NonReservedPeerMode::Accept); - } + fn accept_unreserved_peers(&self) { + self.network + .set_non_reserved_mode(NonReservedPeerMode::Accept); + } - fn deny_unreserved_peers(&self) { - self.network.set_non_reserved_mode(NonReservedPeerMode::Deny); - } + fn deny_unreserved_peers(&self) { + self.network + .set_non_reserved_mode(NonReservedPeerMode::Deny); + } - fn remove_reserved_peer(&self, peer: String) -> Result<(), String> { - self.network.remove_reserved_peer(&peer).map_err(|e| format!("{:?}", e)) - } + fn remove_reserved_peer(&self, peer: String) -> Result<(), String> { + self.network + .remove_reserved_peer(&peer) + .map_err(|e| format!("{:?}", e)) + } - fn add_reserved_peer(&self, peer: String) -> Result<(), String> { - self.network.add_reserved_peer(&peer).map_err(|e| format!("{:?}", e)) - } + fn add_reserved_peer(&self, peer: String) -> Result<(), String> { + self.network + .add_reserved_peer(&peer) + .map_err(|e| format!("{:?}", e)) + } - fn start_network(&self) { - match self.network.start() { + fn start_network(&self) { + match self.network.start() { Err((err, listen_address)) => { match err.into() { ErrorKind::Io(ref e) if e.kind() == io::ErrorKind::AddrInUse => { @@ -942,87 +1095,103 @@ impl ManageNetwork for LightSync { _ => {}, } - let light_proto = self.proto.clone(); + let light_proto = self.proto.clone(); - self.network.register_protocol(light_proto, self.subprotocol_name, ::light::net::PROTOCOL_VERSIONS) - .unwrap_or_else(|e| warn!("Error registering light client protocol: {:?}", e)); + self.network + .register_protocol( + light_proto, + self.subprotocol_name, + ::light::net::PROTOCOL_VERSIONS, + ) + .unwrap_or_else(|e| warn!("Error registering light client protocol: {:?}", e)); - for proto in &self.attached_protos { proto.register(&self.network) } - } + for proto in &self.attached_protos { + proto.register(&self.network) + } + } - fn stop_network(&self) { - self.proto.abort(); - self.network.stop(); - } + fn stop_network(&self) { + self.proto.abort(); + self.network.stop(); + } - fn num_peers_range(&self) -> RangeInclusive { - self.network.num_peers_range() - } + fn num_peers_range(&self) -> RangeInclusive { + self.network.num_peers_range() + } - fn with_proto_context(&self, proto: ProtocolId, f: &mut FnMut(&NetworkContext)) { - self.network.with_context_eval(proto, f); - } + fn with_proto_context(&self, proto: ProtocolId, f: &mut FnMut(&NetworkContext)) { + self.network.with_context_eval(proto, f); + } } impl LightSyncProvider for LightSync { - fn peer_numbers(&self) -> PeerNumbers { - let (connected, active) = self.proto.peer_count(); - let peers_range = self.num_peers_range(); - debug_assert!(peers_range.end() >= peers_range.start()); - PeerNumbers { - connected: connected, - active: active, - max: *peers_range.end() as usize, - min: *peers_range.start() as usize, - } - } + fn peer_numbers(&self) -> PeerNumbers { + let (connected, active) = self.proto.peer_count(); + let peers_range = self.num_peers_range(); + debug_assert!(peers_range.end() >= peers_range.start()); + PeerNumbers { + connected: connected, + active: active, + max: *peers_range.end() as usize, + min: *peers_range.start() as usize, + } + } - fn peers(&self) -> Vec { - self.network.with_context_eval(self.subprotocol_name, |ctx| { - let peer_ids = self.network.connected_peers(); + fn peers(&self) -> Vec { + self.network + .with_context_eval(self.subprotocol_name, |ctx| { + let peer_ids = self.network.connected_peers(); - peer_ids.into_iter().filter_map(|peer_id| { - let session_info = match ctx.session_info(peer_id) { - None => return None, - Some(info) => info, - }; + peer_ids + .into_iter() + .filter_map(|peer_id| { + let session_info = match ctx.session_info(peer_id) { + None => return None, + Some(info) => info, + }; - Some(PeerInfo { - id: session_info.id.map(|id| format!("{:x}", id)), - client_version: session_info.client_version, - capabilities: session_info.peer_capabilities.into_iter().map(|c| c.to_string()).collect(), - remote_address: session_info.remote_address, - local_address: session_info.local_address, - eth_info: None, - pip_info: self.proto.peer_status(peer_id).map(Into::into), - }) - }).collect() - }).unwrap_or_else(Vec::new) - } + Some(PeerInfo { + id: session_info.id.map(|id| format!("{:x}", id)), + client_version: session_info.client_version, + capabilities: session_info + .peer_capabilities + .into_iter() + .map(|c| c.to_string()) + .collect(), + remote_address: session_info.remote_address, + local_address: session_info.local_address, + eth_info: None, + pip_info: self.proto.peer_status(peer_id).map(Into::into), + }) + }) + .collect() + }) + .unwrap_or_else(Vec::new) + } - fn enode(&self) -> Option { - self.network.external_url() - } + fn enode(&self) -> Option { + self.network.external_url() + } - fn network_id(&self) -> u64 { - self.network_id - } + fn network_id(&self) -> u64 { + self.network_id + } - fn transactions_stats(&self) -> BTreeMap { - Default::default() // TODO - } + fn transactions_stats(&self) -> BTreeMap { + Default::default() // TODO + } } impl LightSyncInfo for LightSync { - fn highest_block(&self) -> Option { - (*self.sync).highest_block() - } + fn highest_block(&self) -> Option { + (*self.sync).highest_block() + } - fn start_block(&self) -> u64 { - (*self.sync).start_block() - } + fn start_block(&self) -> u64 { + (*self.sync).start_block() + } - fn is_major_importing(&self) -> bool { - (*self.sync).is_major_importing() - } + fn is_major_importing(&self) -> bool { + (*self.sync).is_major_importing() + } } diff --git a/ethcore/sync/src/block_sync.rs b/ethcore/sync/src/block_sync.rs index 04ffa5f18..58bd00819 100644 --- a/ethcore/sync/src/block_sync.rs +++ b/ethcore/sync/src/block_sync.rs @@ -14,23 +14,26 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use blocks::{BlockCollection, SyncBody, SyncHeader}; +use chain::BlockSet; +use ethcore::{ + client::{BlockId, BlockStatus}, + error::{ + BlockError, Error as EthcoreError, ErrorKind as EthcoreErrorKind, ImportErrorKind, + QueueErrorKind, + }, +}; +use ethereum_types::H256; +use heapsize::HeapSizeOf; +use network::{client_version::ClientCapabilities, PeerId}; +use rlp::{self, Rlp}; +use std::cmp; /// /// Blockchain downloader /// - use std::collections::{HashSet, VecDeque}; -use std::cmp; -use heapsize::HeapSizeOf; -use ethereum_types::H256; -use rlp::{self, Rlp}; -use types::BlockNumber; -use ethcore::client::{BlockStatus, BlockId}; -use ethcore::error::{ImportErrorKind, QueueErrorKind, BlockError, Error as EthcoreError, ErrorKind as EthcoreErrorKind}; use sync_io::SyncIo; -use blocks::{BlockCollection, SyncBody, SyncHeader}; -use chain::BlockSet; -use network::PeerId; -use network::client_version::ClientCapabilities; +use types::BlockNumber; const MAX_HEADERS_TO_REQUEST: usize = 128; const MAX_BODIES_TO_REQUEST_LARGE: usize = 128; @@ -63,942 +66,1045 @@ macro_rules! debug_sync { #[derive(Copy, Clone, Eq, PartialEq, Debug)] /// Downloader state pub enum State { - /// No active downloads. - Idle, - /// Downloading subchain heads - ChainHead, - /// Downloading blocks - Blocks, - /// Download is complete - Complete, + /// No active downloads. + Idle, + /// Downloading subchain heads + ChainHead, + /// Downloading blocks + Blocks, + /// Download is complete + Complete, } /// Data that needs to be requested from a peer. pub enum BlockRequest { - Headers { - start: H256, - count: u64, - skip: u64, - }, - Bodies { - hashes: Vec, - }, - Receipts { - hashes: Vec, - }, + Headers { start: H256, count: u64, skip: u64 }, + Bodies { hashes: Vec }, + Receipts { hashes: Vec }, } /// Indicates sync action #[derive(Eq, PartialEq, Debug)] pub enum DownloadAction { - /// Do nothing - None, - /// Reset downloads for all peers - Reset + /// Do nothing + None, + /// Reset downloads for all peers + Reset, } #[derive(Eq, PartialEq, Debug)] pub enum BlockDownloaderImportError { - /// Imported data is rejected as invalid. Peer should be dropped. - Invalid, - /// Imported data is valid but rejected cause the downloader does not need it. - Useless, + /// Imported data is rejected as invalid. Peer should be dropped. + Invalid, + /// Imported data is valid but rejected cause the downloader does not need it. + Useless, } impl From for BlockDownloaderImportError { - fn from(_: rlp::DecoderError) -> BlockDownloaderImportError { - BlockDownloaderImportError::Invalid - } + fn from(_: rlp::DecoderError) -> BlockDownloaderImportError { + BlockDownloaderImportError::Invalid + } } /// Block downloader strategy. /// Manages state and block data for a block download process. pub struct BlockDownloader { - /// Which set of blocks to download - block_set: BlockSet, - /// Downloader state - state: State, - /// Highest block number seen - highest_block: Option, - /// Downloaded blocks, holds `H`, `B` and `S` - blocks: BlockCollection, - /// Last imported block number - last_imported_block: BlockNumber, - /// Last imported block hash - last_imported_hash: H256, - /// Number of blocks imported this round - imported_this_round: Option, - /// Block number the last round started with. - last_round_start: BlockNumber, - last_round_start_hash: H256, - /// Block parents imported this round (hash, parent) - round_parents: VecDeque<(H256, H256)>, - /// Do we need to download block recetips. - download_receipts: bool, - /// Sync up to the block with this hash. - target_hash: Option, - /// Probing range for seeking common best block. - retract_step: u64, - /// consecutive useless headers this round - useless_headers_count: usize, + /// Which set of blocks to download + block_set: BlockSet, + /// Downloader state + state: State, + /// Highest block number seen + highest_block: Option, + /// Downloaded blocks, holds `H`, `B` and `S` + blocks: BlockCollection, + /// Last imported block number + last_imported_block: BlockNumber, + /// Last imported block hash + last_imported_hash: H256, + /// Number of blocks imported this round + imported_this_round: Option, + /// Block number the last round started with. + last_round_start: BlockNumber, + last_round_start_hash: H256, + /// Block parents imported this round (hash, parent) + round_parents: VecDeque<(H256, H256)>, + /// Do we need to download block recetips. + download_receipts: bool, + /// Sync up to the block with this hash. + target_hash: Option, + /// Probing range for seeking common best block. + retract_step: u64, + /// consecutive useless headers this round + useless_headers_count: usize, } impl BlockDownloader { - /// Create a new instance of syncing strategy. - /// For BlockSet::NewBlocks this won't reorganize to before the last kept state. - pub fn new(block_set: BlockSet, start_hash: &H256, start_number: BlockNumber) -> Self { - let sync_receipts = match block_set { - BlockSet::NewBlocks => false, - BlockSet::OldBlocks => true - }; - BlockDownloader { - block_set, - state: State::Idle, - highest_block: None, - last_imported_block: start_number, - last_imported_hash: start_hash.clone(), - last_round_start: start_number, - last_round_start_hash: start_hash.clone(), - blocks: BlockCollection::new(sync_receipts), - imported_this_round: None, - round_parents: VecDeque::new(), - download_receipts: sync_receipts, - target_hash: None, - retract_step: 1, - useless_headers_count: 0, - } - } + /// Create a new instance of syncing strategy. + /// For BlockSet::NewBlocks this won't reorganize to before the last kept state. + pub fn new(block_set: BlockSet, start_hash: &H256, start_number: BlockNumber) -> Self { + let sync_receipts = match block_set { + BlockSet::NewBlocks => false, + BlockSet::OldBlocks => true, + }; + BlockDownloader { + block_set, + state: State::Idle, + highest_block: None, + last_imported_block: start_number, + last_imported_hash: start_hash.clone(), + last_round_start: start_number, + last_round_start_hash: start_hash.clone(), + blocks: BlockCollection::new(sync_receipts), + imported_this_round: None, + round_parents: VecDeque::new(), + download_receipts: sync_receipts, + target_hash: None, + retract_step: 1, + useless_headers_count: 0, + } + } - /// Reset sync. Clear all local downloaded data. - pub fn reset(&mut self) { - self.blocks.clear(); - self.useless_headers_count = 0; - self.state = State::Idle; - } + /// Reset sync. Clear all local downloaded data. + pub fn reset(&mut self) { + self.blocks.clear(); + self.useless_headers_count = 0; + self.state = State::Idle; + } - /// Mark a block as known in the chain - pub fn mark_as_known(&mut self, hash: &H256, number: BlockNumber) { - if number >= self.last_imported_block + 1 { - self.last_imported_block = number; - self.last_imported_hash = hash.clone(); - self.imported_this_round = Some(self.imported_this_round.unwrap_or(0) + 1); - self.last_round_start = number; - self.last_round_start_hash = hash.clone(); - } - } + /// Mark a block as known in the chain + pub fn mark_as_known(&mut self, hash: &H256, number: BlockNumber) { + if number >= self.last_imported_block + 1 { + self.last_imported_block = number; + self.last_imported_hash = hash.clone(); + self.imported_this_round = Some(self.imported_this_round.unwrap_or(0) + 1); + self.last_round_start = number; + self.last_round_start_hash = hash.clone(); + } + } - /// Check if download is complete - pub fn is_complete(&self) -> bool { - self.state == State::Complete - } + /// Check if download is complete + pub fn is_complete(&self) -> bool { + self.state == State::Complete + } - /// Check if particular block hash is being downloaded - pub fn is_downloading(&self, hash: &H256) -> bool { - self.blocks.is_downloading(hash) - } + /// Check if particular block hash is being downloaded + pub fn is_downloading(&self, hash: &H256) -> bool { + self.blocks.is_downloading(hash) + } - /// Set starting sync block - pub fn set_target(&mut self, hash: &H256) { - self.target_hash = Some(hash.clone()); - } + /// Set starting sync block + pub fn set_target(&mut self, hash: &H256) { + self.target_hash = Some(hash.clone()); + } - /// Unmark header as being downloaded. - pub fn clear_header_download(&mut self, hash: &H256) { - self.blocks.clear_header_download(hash) - } + /// Unmark header as being downloaded. + pub fn clear_header_download(&mut self, hash: &H256) { + self.blocks.clear_header_download(hash) + } - /// Unmark block body as being downloaded. - pub fn clear_body_download(&mut self, hashes: &[H256]) { - self.blocks.clear_body_download(hashes) - } + /// Unmark block body as being downloaded. + pub fn clear_body_download(&mut self, hashes: &[H256]) { + self.blocks.clear_body_download(hashes) + } - /// Unmark block receipt as being downloaded. - pub fn clear_receipt_download(&mut self, hashes: &[H256]) { - self.blocks.clear_receipt_download(hashes) - } - /// Reset collection for a new sync round with given subchain block hashes. - pub fn reset_to(&mut self, hashes: Vec) { - self.reset(); - self.blocks.reset_to(hashes); - self.state = State::Blocks; - } + /// Unmark block receipt as being downloaded. + pub fn clear_receipt_download(&mut self, hashes: &[H256]) { + self.blocks.clear_receipt_download(hashes) + } + /// Reset collection for a new sync round with given subchain block hashes. + pub fn reset_to(&mut self, hashes: Vec) { + self.reset(); + self.blocks.reset_to(hashes); + self.state = State::Blocks; + } - /// Returns used heap memory size. - pub fn heap_size(&self) -> usize { - self.blocks.heap_size() + self.round_parents.heap_size_of_children() - } + /// Returns used heap memory size. + pub fn heap_size(&self) -> usize { + self.blocks.heap_size() + self.round_parents.heap_size_of_children() + } - /// Returns best imported block number. - pub fn last_imported_block_number(&self) -> BlockNumber { - self.last_imported_block - } + /// Returns best imported block number. + pub fn last_imported_block_number(&self) -> BlockNumber { + self.last_imported_block + } - /// Add new block headers. - pub fn import_headers(&mut self, io: &mut SyncIo, r: &Rlp, expected_hash: H256) -> Result { - let item_count = r.item_count().unwrap_or(0); - if self.state == State::Idle { - trace_sync!(self, "Ignored unexpected block headers"); - return Ok(DownloadAction::None) - } - if item_count == 0 && (self.state == State::Blocks) { - return Err(BlockDownloaderImportError::Invalid); - } + /// Add new block headers. + pub fn import_headers( + &mut self, + io: &mut SyncIo, + r: &Rlp, + expected_hash: H256, + ) -> Result { + let item_count = r.item_count().unwrap_or(0); + if self.state == State::Idle { + trace_sync!(self, "Ignored unexpected block headers"); + return Ok(DownloadAction::None); + } + if item_count == 0 && (self.state == State::Blocks) { + return Err(BlockDownloaderImportError::Invalid); + } - // The request is generated in ::request_blocks. - let (max_count, skip) = if self.state == State::ChainHead { - (SUBCHAIN_SIZE as usize, (MAX_HEADERS_TO_REQUEST - 2) as u64) - } else { - (MAX_HEADERS_TO_REQUEST, 0) - }; + // The request is generated in ::request_blocks. + let (max_count, skip) = if self.state == State::ChainHead { + (SUBCHAIN_SIZE as usize, (MAX_HEADERS_TO_REQUEST - 2) as u64) + } else { + (MAX_HEADERS_TO_REQUEST, 0) + }; - if item_count > max_count { - debug!(target: "sync", "Headers response is larger than expected"); - return Err(BlockDownloaderImportError::Invalid); - } + if item_count > max_count { + debug!(target: "sync", "Headers response is larger than expected"); + return Err(BlockDownloaderImportError::Invalid); + } - let mut headers = Vec::new(); - let mut hashes = Vec::new(); - let mut last_header = None; - for i in 0..item_count { - let info = SyncHeader::from_rlp(r.at(i)?.as_raw().to_vec())?; - let number = BlockNumber::from(info.header.number()); - let hash = info.header.hash(); + let mut headers = Vec::new(); + let mut hashes = Vec::new(); + let mut last_header = None; + for i in 0..item_count { + let info = SyncHeader::from_rlp(r.at(i)?.as_raw().to_vec())?; + let number = BlockNumber::from(info.header.number()); + let hash = info.header.hash(); - let valid_response = match last_header { - // First header must match expected hash. - None => expected_hash == hash, - Some((last_number, last_hash)) => { - // Subsequent headers must be spaced by skip interval. - let skip_valid = number == last_number + skip + 1; - // Consecutive headers must be linked by parent hash. - let parent_valid = (number != last_number + 1) || *info.header.parent_hash() == last_hash; - skip_valid && parent_valid - } - }; + let valid_response = match last_header { + // First header must match expected hash. + None => expected_hash == hash, + Some((last_number, last_hash)) => { + // Subsequent headers must be spaced by skip interval. + let skip_valid = number == last_number + skip + 1; + // Consecutive headers must be linked by parent hash. + let parent_valid = + (number != last_number + 1) || *info.header.parent_hash() == last_hash; + skip_valid && parent_valid + } + }; - // Disable the peer for this syncing round if it gives invalid chain - if !valid_response { - debug!(target: "sync", "Invalid headers response"); - return Err(BlockDownloaderImportError::Invalid); - } + // Disable the peer for this syncing round if it gives invalid chain + if !valid_response { + debug!(target: "sync", "Invalid headers response"); + return Err(BlockDownloaderImportError::Invalid); + } - last_header = Some((number, hash)); - if self.blocks.contains(&hash) { - trace_sync!(self, "Skipping existing block header {} ({:?})", number, hash); - continue; - } + last_header = Some((number, hash)); + if self.blocks.contains(&hash) { + trace_sync!( + self, + "Skipping existing block header {} ({:?})", + number, + hash + ); + continue; + } - match io.chain().block_status(BlockId::Hash(hash.clone())) { - BlockStatus::InChain | BlockStatus::Queued => { - match self.state { - State::Blocks => trace_sync!(self, "Header already in chain {} ({})", number, hash), - _ => trace_sync!(self, "Header already in chain {} ({}), state = {:?}", number, hash, self.state), - } - headers.push(info); - hashes.push(hash); - }, - BlockStatus::Bad => { - return Err(BlockDownloaderImportError::Invalid); - }, - BlockStatus::Unknown => { - headers.push(info); - hashes.push(hash); - } - } - } + match io.chain().block_status(BlockId::Hash(hash.clone())) { + BlockStatus::InChain | BlockStatus::Queued => { + match self.state { + State::Blocks => { + trace_sync!(self, "Header already in chain {} ({})", number, hash) + } + _ => trace_sync!( + self, + "Header already in chain {} ({}), state = {:?}", + number, + hash, + self.state + ), + } + headers.push(info); + hashes.push(hash); + } + BlockStatus::Bad => { + return Err(BlockDownloaderImportError::Invalid); + } + BlockStatus::Unknown => { + headers.push(info); + hashes.push(hash); + } + } + } - if let Some((number, _)) = last_header { - if self.highest_block.as_ref().map_or(true, |n| number > *n) { - self.highest_block = Some(number); - } - } + if let Some((number, _)) = last_header { + if self.highest_block.as_ref().map_or(true, |n| number > *n) { + self.highest_block = Some(number); + } + } - match self.state { - State::ChainHead => { - if !headers.is_empty() { - trace_sync!(self, "Received {} subchain heads, proceeding to download", headers.len()); - self.blocks.reset_to(hashes); - self.state = State::Blocks; - return Ok(DownloadAction::Reset); - } else { - trace_sync!(self, "No useful subchain heads received, expected hash {:?}", expected_hash); - let best = io.chain().chain_info().best_block_number; - let oldest_reorg = io.chain().pruning_info().earliest_state; - let last = self.last_imported_block; - match self.block_set { - BlockSet::NewBlocks if best > last && (last == 0 || last < oldest_reorg) => { - trace_sync!(self, "No common block, disabling peer"); - return Err(BlockDownloaderImportError::Invalid) - }, - BlockSet::OldBlocks => { - trace_sync!(self, "Expected some useful headers for downloading OldBlocks. Try a different peer"); - return Err(BlockDownloaderImportError::Useless) - }, - _ => (), - } - } - }, - State::Blocks => { - let count = headers.len(); - // At least one of the headers must advance the subchain. Otherwise they are all useless. - if count == 0 { - self.useless_headers_count += 1; - trace_sync!(self, "No useful headers ({:?} this round), expected hash {:?}", self.useless_headers_count, expected_hash); - // only reset download if we have multiple subchain heads, to avoid unnecessary resets - // when we are at the head of the chain when we may legitimately receive no useful headers - if self.blocks.heads_len() > 1 && self.useless_headers_count >= MAX_USELESS_HEADERS_PER_ROUND { - trace_sync!(self, "Received {:?} useless responses this round. Resetting sync", MAX_USELESS_HEADERS_PER_ROUND); - self.reset(); - } - return Err(BlockDownloaderImportError::Useless); - } - self.blocks.insert_headers(headers); - trace_sync!(self, "Inserted {} headers", count); - }, - _ => trace_sync!(self, "Unexpected headers({})", headers.len()), - } + match self.state { + State::ChainHead => { + if !headers.is_empty() { + trace_sync!( + self, + "Received {} subchain heads, proceeding to download", + headers.len() + ); + self.blocks.reset_to(hashes); + self.state = State::Blocks; + return Ok(DownloadAction::Reset); + } else { + trace_sync!( + self, + "No useful subchain heads received, expected hash {:?}", + expected_hash + ); + let best = io.chain().chain_info().best_block_number; + let oldest_reorg = io.chain().pruning_info().earliest_state; + let last = self.last_imported_block; + match self.block_set { + BlockSet::NewBlocks + if best > last && (last == 0 || last < oldest_reorg) => + { + trace_sync!(self, "No common block, disabling peer"); + return Err(BlockDownloaderImportError::Invalid); + } + BlockSet::OldBlocks => { + trace_sync!(self, "Expected some useful headers for downloading OldBlocks. Try a different peer"); + return Err(BlockDownloaderImportError::Useless); + } + _ => (), + } + } + } + State::Blocks => { + let count = headers.len(); + // At least one of the headers must advance the subchain. Otherwise they are all useless. + if count == 0 { + self.useless_headers_count += 1; + trace_sync!( + self, + "No useful headers ({:?} this round), expected hash {:?}", + self.useless_headers_count, + expected_hash + ); + // only reset download if we have multiple subchain heads, to avoid unnecessary resets + // when we are at the head of the chain when we may legitimately receive no useful headers + if self.blocks.heads_len() > 1 + && self.useless_headers_count >= MAX_USELESS_HEADERS_PER_ROUND + { + trace_sync!( + self, + "Received {:?} useless responses this round. Resetting sync", + MAX_USELESS_HEADERS_PER_ROUND + ); + self.reset(); + } + return Err(BlockDownloaderImportError::Useless); + } + self.blocks.insert_headers(headers); + trace_sync!(self, "Inserted {} headers", count); + } + _ => trace_sync!(self, "Unexpected headers({})", headers.len()), + } - Ok(DownloadAction::None) - } + Ok(DownloadAction::None) + } - /// Called by peer once it has new block bodies - pub fn import_bodies(&mut self, r: &Rlp, expected_hashes: &[H256]) -> Result<(), BlockDownloaderImportError> { - let item_count = r.item_count().unwrap_or(0); - if item_count == 0 { - return Err(BlockDownloaderImportError::Useless); - } else if self.state != State::Blocks { - trace_sync!(self, "Ignored unexpected block bodies"); - } else { - let mut bodies = Vec::with_capacity(item_count); - for i in 0..item_count { - let body = SyncBody::from_rlp(r.at(i)?.as_raw())?; - bodies.push(body); - } + /// Called by peer once it has new block bodies + pub fn import_bodies( + &mut self, + r: &Rlp, + expected_hashes: &[H256], + ) -> Result<(), BlockDownloaderImportError> { + let item_count = r.item_count().unwrap_or(0); + if item_count == 0 { + return Err(BlockDownloaderImportError::Useless); + } else if self.state != State::Blocks { + trace_sync!(self, "Ignored unexpected block bodies"); + } else { + let mut bodies = Vec::with_capacity(item_count); + for i in 0..item_count { + let body = SyncBody::from_rlp(r.at(i)?.as_raw())?; + bodies.push(body); + } - let hashes = self.blocks.insert_bodies(bodies); - if hashes.len() != item_count { - trace_sync!(self, "Deactivating peer for giving invalid block bodies"); - return Err(BlockDownloaderImportError::Invalid); - } - if !all_expected(hashes.as_slice(), expected_hashes, |&a, &b| a == b) { - trace_sync!(self, "Deactivating peer for giving unexpected block bodies"); - return Err(BlockDownloaderImportError::Invalid); - } - } - Ok(()) - } + let hashes = self.blocks.insert_bodies(bodies); + if hashes.len() != item_count { + trace_sync!(self, "Deactivating peer for giving invalid block bodies"); + return Err(BlockDownloaderImportError::Invalid); + } + if !all_expected(hashes.as_slice(), expected_hashes, |&a, &b| a == b) { + trace_sync!(self, "Deactivating peer for giving unexpected block bodies"); + return Err(BlockDownloaderImportError::Invalid); + } + } + Ok(()) + } - /// Called by peer once it has new block bodies - pub fn import_receipts(&mut self, r: &Rlp, expected_hashes: &[H256]) -> Result<(), BlockDownloaderImportError> { - let item_count = r.item_count().unwrap_or(0); - if item_count == 0 { - return Err(BlockDownloaderImportError::Useless); - } - else if self.state != State::Blocks { - trace_sync!(self, "Ignored unexpected block receipts"); - } - else { - let mut receipts = Vec::with_capacity(item_count); - for i in 0..item_count { - let receipt = r.at(i).map_err(|e| { - trace_sync!(self, "Error decoding block receipts RLP: {:?}", e); - BlockDownloaderImportError::Invalid - })?; - receipts.push(receipt.as_raw().to_vec()); - } - let hashes = self.blocks.insert_receipts(receipts); - if hashes.len() != item_count { - trace_sync!(self, "Deactivating peer for giving invalid block receipts"); - return Err(BlockDownloaderImportError::Invalid); - } - if !all_expected(hashes.as_slice(), expected_hashes, |a, b| a.contains(b)) { - trace_sync!(self, "Deactivating peer for giving unexpected block receipts"); - return Err(BlockDownloaderImportError::Invalid); - } - } - Ok(()) - } + /// Called by peer once it has new block bodies + pub fn import_receipts( + &mut self, + r: &Rlp, + expected_hashes: &[H256], + ) -> Result<(), BlockDownloaderImportError> { + let item_count = r.item_count().unwrap_or(0); + if item_count == 0 { + return Err(BlockDownloaderImportError::Useless); + } else if self.state != State::Blocks { + trace_sync!(self, "Ignored unexpected block receipts"); + } else { + let mut receipts = Vec::with_capacity(item_count); + for i in 0..item_count { + let receipt = r.at(i).map_err(|e| { + trace_sync!(self, "Error decoding block receipts RLP: {:?}", e); + BlockDownloaderImportError::Invalid + })?; + receipts.push(receipt.as_raw().to_vec()); + } + let hashes = self.blocks.insert_receipts(receipts); + if hashes.len() != item_count { + trace_sync!(self, "Deactivating peer for giving invalid block receipts"); + return Err(BlockDownloaderImportError::Invalid); + } + if !all_expected(hashes.as_slice(), expected_hashes, |a, b| a.contains(b)) { + trace_sync!( + self, + "Deactivating peer for giving unexpected block receipts" + ); + return Err(BlockDownloaderImportError::Invalid); + } + } + Ok(()) + } - fn start_sync_round(&mut self, io: &mut SyncIo) { - self.state = State::ChainHead; - trace_sync!(self, "Starting round (last imported count = {:?}, last started = {}, block = {:?}", self.imported_this_round, self.last_round_start, self.last_imported_block); - // Check if need to retract to find the common block. The problem is that the peers still return headers by hash even - // from the non-canonical part of the tree. So we also retract if nothing has been imported last round. - let start = self.last_round_start; - let start_hash = self.last_round_start_hash; - match self.imported_this_round { - Some(n) if n == 0 && start > 0 => { - // nothing was imported last round, step back to a previous block - // search parent in last round known parents first - if let Some(&(_, p)) = self.round_parents.iter().find(|&&(h, _)| h == start_hash) { - self.last_imported_block = start - 1; - self.last_imported_hash = p.clone(); - trace_sync!(self, "Searching common header from the last round {} ({})", self.last_imported_block, self.last_imported_hash); - } else { - let best = io.chain().chain_info().best_block_number; - let oldest_reorg = io.chain().pruning_info().earliest_state; - if self.block_set == BlockSet::NewBlocks && best > start && start < oldest_reorg { - debug_sync!(self, "Could not revert to previous ancient block, last: {} ({})", start, start_hash); - self.reset(); - } else { - let n = start - cmp::min(self.retract_step, start); - self.retract_step *= 2; - match io.chain().block_hash(BlockId::Number(n)) { - Some(h) => { - self.last_imported_block = n; - self.last_imported_hash = h; - trace_sync!(self, "Searching common header in the blockchain {} ({})", start, self.last_imported_hash); - } - None => { - debug_sync!(self, "Could not revert to previous block, last: {} ({})", start, self.last_imported_hash); - self.reset(); - } - } - } - } - }, - _ => { - self.retract_step = 1; - }, - } - self.last_round_start = self.last_imported_block; - self.last_round_start_hash = self.last_imported_hash; - self.imported_this_round = None; - } + fn start_sync_round(&mut self, io: &mut SyncIo) { + self.state = State::ChainHead; + trace_sync!( + self, + "Starting round (last imported count = {:?}, last started = {}, block = {:?}", + self.imported_this_round, + self.last_round_start, + self.last_imported_block + ); + // Check if need to retract to find the common block. The problem is that the peers still return headers by hash even + // from the non-canonical part of the tree. So we also retract if nothing has been imported last round. + let start = self.last_round_start; + let start_hash = self.last_round_start_hash; + match self.imported_this_round { + Some(n) if n == 0 && start > 0 => { + // nothing was imported last round, step back to a previous block + // search parent in last round known parents first + if let Some(&(_, p)) = self.round_parents.iter().find(|&&(h, _)| h == start_hash) { + self.last_imported_block = start - 1; + self.last_imported_hash = p.clone(); + trace_sync!( + self, + "Searching common header from the last round {} ({})", + self.last_imported_block, + self.last_imported_hash + ); + } else { + let best = io.chain().chain_info().best_block_number; + let oldest_reorg = io.chain().pruning_info().earliest_state; + if self.block_set == BlockSet::NewBlocks && best > start && start < oldest_reorg + { + debug_sync!( + self, + "Could not revert to previous ancient block, last: {} ({})", + start, + start_hash + ); + self.reset(); + } else { + let n = start - cmp::min(self.retract_step, start); + self.retract_step *= 2; + match io.chain().block_hash(BlockId::Number(n)) { + Some(h) => { + self.last_imported_block = n; + self.last_imported_hash = h; + trace_sync!( + self, + "Searching common header in the blockchain {} ({})", + start, + self.last_imported_hash + ); + } + None => { + debug_sync!( + self, + "Could not revert to previous block, last: {} ({})", + start, + self.last_imported_hash + ); + self.reset(); + } + } + } + } + } + _ => { + self.retract_step = 1; + } + } + self.last_round_start = self.last_imported_block; + self.last_round_start_hash = self.last_imported_hash; + self.imported_this_round = None; + } - /// Find some headers or blocks to download for a peer. - pub fn request_blocks(&mut self, peer_id: PeerId, io: &mut SyncIo, num_active_peers: usize) -> Option { - match self.state { - State::Idle => { - self.start_sync_round(io); - if self.state == State::ChainHead { - return self.request_blocks(peer_id, io, num_active_peers); - } - }, - State::ChainHead => { - if num_active_peers < MAX_PARALLEL_SUBCHAIN_DOWNLOAD { - // Request subchain headers - trace_sync!(self, "Starting sync with better chain"); - // Request MAX_HEADERS_TO_REQUEST - 2 headers apart so that - // MAX_HEADERS_TO_REQUEST would include headers for neighbouring subchains - return Some(BlockRequest::Headers { - start: self.last_imported_hash.clone(), - count: SUBCHAIN_SIZE, - skip: (MAX_HEADERS_TO_REQUEST - 2) as u64, - }); - } - }, - State::Blocks => { - // check to see if we need to download any block bodies first - let client_version = io.peer_version(peer_id); + /// Find some headers or blocks to download for a peer. + pub fn request_blocks( + &mut self, + peer_id: PeerId, + io: &mut SyncIo, + num_active_peers: usize, + ) -> Option { + match self.state { + State::Idle => { + self.start_sync_round(io); + if self.state == State::ChainHead { + return self.request_blocks(peer_id, io, num_active_peers); + } + } + State::ChainHead => { + if num_active_peers < MAX_PARALLEL_SUBCHAIN_DOWNLOAD { + // Request subchain headers + trace_sync!(self, "Starting sync with better chain"); + // Request MAX_HEADERS_TO_REQUEST - 2 headers apart so that + // MAX_HEADERS_TO_REQUEST would include headers for neighbouring subchains + return Some(BlockRequest::Headers { + start: self.last_imported_hash.clone(), + count: SUBCHAIN_SIZE, + skip: (MAX_HEADERS_TO_REQUEST - 2) as u64, + }); + } + } + State::Blocks => { + // check to see if we need to download any block bodies first + let client_version = io.peer_version(peer_id); - let number_of_bodies_to_request = if client_version.can_handle_large_requests() { - MAX_BODIES_TO_REQUEST_LARGE - } else { - MAX_BODIES_TO_REQUEST_SMALL - }; + let number_of_bodies_to_request = if client_version.can_handle_large_requests() { + MAX_BODIES_TO_REQUEST_LARGE + } else { + MAX_BODIES_TO_REQUEST_SMALL + }; - let needed_bodies = self.blocks.needed_bodies(number_of_bodies_to_request, false); - if !needed_bodies.is_empty() { - return Some(BlockRequest::Bodies { - hashes: needed_bodies, - }); - } + let needed_bodies = self + .blocks + .needed_bodies(number_of_bodies_to_request, false); + if !needed_bodies.is_empty() { + return Some(BlockRequest::Bodies { + hashes: needed_bodies, + }); + } - if self.download_receipts { - let needed_receipts = self.blocks.needed_receipts(MAX_RECEPITS_TO_REQUEST, false); - if !needed_receipts.is_empty() { - return Some(BlockRequest::Receipts { - hashes: needed_receipts, - }); - } - } + if self.download_receipts { + let needed_receipts = + self.blocks.needed_receipts(MAX_RECEPITS_TO_REQUEST, false); + if !needed_receipts.is_empty() { + return Some(BlockRequest::Receipts { + hashes: needed_receipts, + }); + } + } - // find subchain to download - if let Some((h, count)) = self.blocks.needed_headers(MAX_HEADERS_TO_REQUEST, false) { - return Some(BlockRequest::Headers { - start: h, - count: count as u64, - skip: 0, - }); - } - }, - State::Complete => (), - } - None - } + // find subchain to download + if let Some((h, count)) = self.blocks.needed_headers(MAX_HEADERS_TO_REQUEST, false) + { + return Some(BlockRequest::Headers { + start: h, + count: count as u64, + skip: 0, + }); + } + } + State::Complete => (), + } + None + } - /// Checks if there are blocks fully downloaded that can be imported into the blockchain and does the import. - /// Returns DownloadAction::Reset if it is imported all the the blocks it can and all downloading peers should be reset - pub fn collect_blocks(&mut self, io: &mut SyncIo, allow_out_of_order: bool) -> DownloadAction { - let mut download_action = DownloadAction::None; - let mut imported = HashSet::new(); - let blocks = self.blocks.drain(); - let count = blocks.len(); - for block_and_receipts in blocks { - let block = block_and_receipts.block; - let receipts = block_and_receipts.receipts; + /// Checks if there are blocks fully downloaded that can be imported into the blockchain and does the import. + /// Returns DownloadAction::Reset if it is imported all the the blocks it can and all downloading peers should be reset + pub fn collect_blocks(&mut self, io: &mut SyncIo, allow_out_of_order: bool) -> DownloadAction { + let mut download_action = DownloadAction::None; + let mut imported = HashSet::new(); + let blocks = self.blocks.drain(); + let count = blocks.len(); + for block_and_receipts in blocks { + let block = block_and_receipts.block; + let receipts = block_and_receipts.receipts; - let h = block.header.hash(); - let number = block.header.number(); - let parent = *block.header.parent_hash(); + let h = block.header.hash(); + let number = block.header.number(); + let parent = *block.header.parent_hash(); - if self.target_hash.as_ref().map_or(false, |t| t == &h) { - self.state = State::Complete; - trace_sync!(self, "Sync target reached"); - return download_action; - } + if self.target_hash.as_ref().map_or(false, |t| t == &h) { + self.state = State::Complete; + trace_sync!(self, "Sync target reached"); + return download_action; + } - let result = if let Some(receipts) = receipts { - io.chain().queue_ancient_block(block, receipts) - } else { - io.chain().import_block(block) - }; + let result = if let Some(receipts) = receipts { + io.chain().queue_ancient_block(block, receipts) + } else { + io.chain().import_block(block) + }; - match result { - Err(EthcoreError(EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain), _)) => { - trace_sync!(self, "Block already in chain {:?}", h); - self.block_imported(&h, number, &parent); - }, - Err(EthcoreError(EthcoreErrorKind::Import(ImportErrorKind::AlreadyQueued), _)) => { - trace_sync!(self, "Block already queued {:?}", h); - self.block_imported(&h, number, &parent); - }, - Ok(_) => { - trace_sync!(self, "Block queued {:?}", h); - imported.insert(h.clone()); - self.block_imported(&h, number, &parent); - }, - Err(EthcoreError(EthcoreErrorKind::Block(BlockError::UnknownParent(_)), _)) if allow_out_of_order => { - break; - }, - Err(EthcoreError(EthcoreErrorKind::Block(BlockError::UnknownParent(_)), _)) => { - trace_sync!(self, "Unknown new block parent, restarting sync"); - break; - }, - Err(EthcoreError(EthcoreErrorKind::Block(BlockError::TemporarilyInvalid(_)), _)) => { - debug_sync!(self, "Block temporarily invalid: {:?}, restarting sync", h); - break; - }, - Err(EthcoreError(EthcoreErrorKind::Queue(QueueErrorKind::Full(limit)), _)) => { - debug_sync!(self, "Block import queue full ({}), restarting sync", limit); - download_action = DownloadAction::Reset; - break; - }, - Err(e) => { - debug_sync!(self, "Bad block {:?} : {:?}", h, e); - download_action = DownloadAction::Reset; - break; - } - } - } - trace_sync!(self, "Imported {} of {}", imported.len(), count); - self.imported_this_round = Some(self.imported_this_round.unwrap_or(0) + imported.len()); + match result { + Err(EthcoreError(EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain), _)) => { + trace_sync!(self, "Block already in chain {:?}", h); + self.block_imported(&h, number, &parent); + } + Err(EthcoreError(EthcoreErrorKind::Import(ImportErrorKind::AlreadyQueued), _)) => { + trace_sync!(self, "Block already queued {:?}", h); + self.block_imported(&h, number, &parent); + } + Ok(_) => { + trace_sync!(self, "Block queued {:?}", h); + imported.insert(h.clone()); + self.block_imported(&h, number, &parent); + } + Err(EthcoreError(EthcoreErrorKind::Block(BlockError::UnknownParent(_)), _)) + if allow_out_of_order => + { + break; + } + Err(EthcoreError(EthcoreErrorKind::Block(BlockError::UnknownParent(_)), _)) => { + trace_sync!(self, "Unknown new block parent, restarting sync"); + break; + } + Err(EthcoreError( + EthcoreErrorKind::Block(BlockError::TemporarilyInvalid(_)), + _, + )) => { + debug_sync!(self, "Block temporarily invalid: {:?}, restarting sync", h); + break; + } + Err(EthcoreError(EthcoreErrorKind::Queue(QueueErrorKind::Full(limit)), _)) => { + debug_sync!(self, "Block import queue full ({}), restarting sync", limit); + download_action = DownloadAction::Reset; + break; + } + Err(e) => { + debug_sync!(self, "Bad block {:?} : {:?}", h, e); + download_action = DownloadAction::Reset; + break; + } + } + } + trace_sync!(self, "Imported {} of {}", imported.len(), count); + self.imported_this_round = Some(self.imported_this_round.unwrap_or(0) + imported.len()); - if self.blocks.is_empty() { - // complete sync round - trace_sync!(self, "Sync round complete"); - download_action = DownloadAction::Reset; - } - download_action - } + if self.blocks.is_empty() { + // complete sync round + trace_sync!(self, "Sync round complete"); + download_action = DownloadAction::Reset; + } + download_action + } - fn block_imported(&mut self, hash: &H256, number: BlockNumber, parent: &H256) { - self.last_imported_block = number; - self.last_imported_hash = hash.clone(); - self.round_parents.push_back((hash.clone(), parent.clone())); - if self.round_parents.len() > MAX_ROUND_PARENTS { - self.round_parents.pop_front(); - } - } + fn block_imported(&mut self, hash: &H256, number: BlockNumber, parent: &H256) { + self.last_imported_block = number; + self.last_imported_hash = hash.clone(); + self.round_parents.push_back((hash.clone(), parent.clone())); + if self.round_parents.len() > MAX_ROUND_PARENTS { + self.round_parents.pop_front(); + } + } } // Determines if the first argument matches an ordered subset of the second, according to some predicate. fn all_expected(values: &[A], expected_values: &[B], is_expected: F) -> bool - where F: Fn(&A, &B) -> bool +where + F: Fn(&A, &B) -> bool, { - let mut expected_iter = expected_values.iter(); - values.iter().all(|val1| { - while let Some(val2) = expected_iter.next() { - if is_expected(val1, val2) { - return true; - } - } - false - }) + let mut expected_iter = expected_values.iter(); + values.iter().all(|val1| { + while let Some(val2) = expected_iter.next() { + if is_expected(val1, val2) { + return true; + } + } + false + }) } #[cfg(test)] mod tests { - use super::*; - use ethcore::client::TestBlockChainClient; - use ethcore::spec::Spec; - use ethkey::{Generator,Random}; - use hash::keccak; - use parking_lot::RwLock; - use rlp::{encode_list,RlpStream}; - use tests::helpers::TestIo; - use tests::snapshot::TestSnapshotService; - use types::transaction::{Transaction,SignedTransaction}; - use triehash_ethereum::ordered_trie_root; - use types::header::Header as BlockHeader; - - fn dummy_header(number: u64, parent_hash: H256) -> BlockHeader { - let mut header = BlockHeader::new(); - header.set_gas_limit(0.into()); - header.set_difficulty((number * 100).into()); - header.set_timestamp(number * 10); - header.set_number(number); - header.set_parent_hash(parent_hash); - header.set_state_root(H256::zero()); - header - } - - fn dummy_signed_tx() -> SignedTransaction { - let keypair = Random.generate().unwrap(); - Transaction::default().sign(keypair.secret(), None) - } - - fn import_headers(headers: &[BlockHeader], downloader: &mut BlockDownloader, io: &mut SyncIo) -> Result { - let mut stream = RlpStream::new(); - stream.append_list(headers); - let bytes = stream.out(); - let rlp = Rlp::new(&bytes); - let expected_hash = headers.first().unwrap().hash(); - downloader.import_headers(io, &rlp, expected_hash) - } - - fn import_headers_ok(headers: &[BlockHeader], downloader: &mut BlockDownloader, io: &mut SyncIo) { - let res = import_headers(headers, downloader, io); - assert!(res.is_ok()); - } - - #[test] - fn import_headers_in_chain_head_state() { - ::env_logger::try_init().ok(); - - let spec = Spec::new_test(); - let genesis_hash = spec.genesis_header().hash(); - - let mut downloader = BlockDownloader::new(BlockSet::NewBlocks, &genesis_hash, 0); - downloader.state = State::ChainHead; - - let mut chain = TestBlockChainClient::new(); - let snapshot_service = TestSnapshotService::new(); - let queue = RwLock::new(VecDeque::new()); - let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None); - - // Valid headers sequence. - let valid_headers = [ - spec.genesis_header(), - dummy_header(127, H256::random()), - dummy_header(254, H256::random()), - ]; - let rlp_data = encode_list(&valid_headers); - let valid_rlp = Rlp::new(&rlp_data); - - match downloader.import_headers(&mut io, &valid_rlp, genesis_hash) { - Ok(DownloadAction::Reset) => assert_eq!(downloader.state, State::Blocks), - _ => panic!("expected transition to Blocks state"), - }; - - // Headers are rejected because the expected hash does not match. - let invalid_start_block_headers = [ - dummy_header(0, H256::random()), - dummy_header(127, H256::random()), - dummy_header(254, H256::random()), - ]; - let rlp_data = encode_list(&invalid_start_block_headers); - let invalid_start_block_rlp = Rlp::new(&rlp_data); - - match downloader.import_headers(&mut io, &invalid_start_block_rlp, genesis_hash) { - Err(BlockDownloaderImportError::Invalid) => (), - _ => panic!("expected BlockDownloaderImportError"), - }; - - // Headers are rejected because they are not spaced as expected. - let invalid_skip_headers = [ - spec.genesis_header(), - dummy_header(128, H256::random()), - dummy_header(256, H256::random()), - ]; - let rlp_data = encode_list(&invalid_skip_headers); - let invalid_skip_rlp = Rlp::new(&rlp_data); - - match downloader.import_headers(&mut io, &invalid_skip_rlp, genesis_hash) { - Err(BlockDownloaderImportError::Invalid) => (), - _ => panic!("expected BlockDownloaderImportError"), - }; - - // Invalid because the packet size is too large. - let mut too_many_headers = Vec::with_capacity((SUBCHAIN_SIZE + 1) as usize); - too_many_headers.push(spec.genesis_header()); - for i in 1..(SUBCHAIN_SIZE + 1) { - too_many_headers.push(dummy_header((MAX_HEADERS_TO_REQUEST as u64 - 1) * i, H256::random())); - } - let rlp_data = encode_list(&too_many_headers); - - let too_many_rlp = Rlp::new(&rlp_data); - match downloader.import_headers(&mut io, &too_many_rlp, genesis_hash) { - Err(BlockDownloaderImportError::Invalid) => (), - _ => panic!("expected BlockDownloaderImportError"), - }; - } - - #[test] - fn import_headers_in_blocks_state() { - ::env_logger::try_init().ok(); - - let mut chain = TestBlockChainClient::new(); - let snapshot_service = TestSnapshotService::new(); - let queue = RwLock::new(VecDeque::new()); - let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None); - - let mut headers = Vec::with_capacity(3); - let parent_hash = H256::random(); - headers.push(dummy_header(127, parent_hash)); - let parent_hash = headers[0].hash(); - headers.push(dummy_header(128, parent_hash)); - let parent_hash = headers[1].hash(); - headers.push(dummy_header(129, parent_hash)); - - let mut downloader = BlockDownloader::new(BlockSet::NewBlocks, &H256::random(), 0); - downloader.state = State::Blocks; - downloader.blocks.reset_to(vec![headers[0].hash()]); - - let rlp_data = encode_list(&headers); - let headers_rlp = Rlp::new(&rlp_data); - - match downloader.import_headers(&mut io, &headers_rlp, headers[0].hash()) { - Ok(DownloadAction::None) => (), - _ => panic!("expected successful import"), - }; - - // Invalidate parent_hash link. - headers[2] = dummy_header(129, H256::random()); - let rlp_data = encode_list(&headers); - let headers_rlp = Rlp::new(&rlp_data); - - match downloader.import_headers(&mut io, &headers_rlp, headers[0].hash()) { - Err(BlockDownloaderImportError::Invalid) => (), - _ => panic!("expected BlockDownloaderImportError"), - }; - - // Invalidate header sequence by skipping a header. - headers[2] = dummy_header(130, headers[1].hash()); - let rlp_data = encode_list(&headers); - let headers_rlp = Rlp::new(&rlp_data); - - match downloader.import_headers(&mut io, &headers_rlp, headers[0].hash()) { - Err(BlockDownloaderImportError::Invalid) => (), - _ => panic!("expected BlockDownloaderImportError"), - }; - } - - #[test] - fn import_bodies() { - ::env_logger::try_init().ok(); - - let mut chain = TestBlockChainClient::new(); - let snapshot_service = TestSnapshotService::new(); - let queue = RwLock::new(VecDeque::new()); - let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None); - - // Import block headers. - let mut headers = Vec::with_capacity(4); - let mut bodies = Vec::with_capacity(4); - let mut parent_hash = H256::zero(); - for i in 0..4 { - // Construct the block body - let mut uncles = if i > 0 { - encode_list(&[dummy_header(i - 1, H256::random())]) - } else { - ::rlp::EMPTY_LIST_RLP.to_vec() - }; - - let mut txs = encode_list(&[dummy_signed_tx()]); - let tx_root = ordered_trie_root(Rlp::new(&txs).iter().map(|r| r.as_raw())); - - let mut rlp = RlpStream::new_list(2); - rlp.append_raw(&txs, 1); - rlp.append_raw(&uncles, 1); - bodies.push(rlp.out()); - - // Construct the block header - let mut header = dummy_header(i, parent_hash); - header.set_transactions_root(tx_root); - header.set_uncles_hash(keccak(&uncles)); - parent_hash = header.hash(); - headers.push(header); - } - - let mut downloader = BlockDownloader::new(BlockSet::NewBlocks, &headers[0].hash(), 0); - downloader.state = State::Blocks; - downloader.blocks.reset_to(vec![headers[0].hash()]); - - // Only import the first three block headers. - let rlp_data = encode_list(&headers[0..3]); - let headers_rlp = Rlp::new(&rlp_data); - assert!(downloader.import_headers(&mut io, &headers_rlp, headers[0].hash()).is_ok()); - - // Import first body successfully. - let mut rlp_data = RlpStream::new_list(1); - rlp_data.append_raw(&bodies[0], 1); - let bodies_rlp = Rlp::new(rlp_data.as_raw()); - assert!(downloader.import_bodies(&bodies_rlp, &[headers[0].hash(), headers[1].hash()]).is_ok()); - - // Import second body successfully. - let mut rlp_data = RlpStream::new_list(1); - rlp_data.append_raw(&bodies[1], 1); - let bodies_rlp = Rlp::new(rlp_data.as_raw()); - assert!(downloader.import_bodies(&bodies_rlp, &[headers[0].hash(), headers[1].hash()]).is_ok()); - - // Import unexpected third body. - let mut rlp_data = RlpStream::new_list(1); - rlp_data.append_raw(&bodies[2], 1); - let bodies_rlp = Rlp::new(rlp_data.as_raw()); - match downloader.import_bodies(&bodies_rlp, &[headers[0].hash(), headers[1].hash()]) { - Err(BlockDownloaderImportError::Invalid) => (), - _ => panic!("expected BlockDownloaderImportError"), - }; - } - - #[test] - fn import_receipts() { - ::env_logger::try_init().ok(); - - let mut chain = TestBlockChainClient::new(); - let snapshot_service = TestSnapshotService::new(); - let queue = RwLock::new(VecDeque::new()); - let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None); - - // Import block headers. - let mut headers = Vec::with_capacity(4); - let mut receipts = Vec::with_capacity(4); - let mut parent_hash = H256::zero(); - for i in 0..4 { - // Construct the receipts. Receipt root for the first two blocks is the same. - // - // The RLP-encoded integers are clearly not receipts, but the BlockDownloader treats - // all receipts as byte blobs, so it does not matter. - let mut receipts_rlp = if i < 2 { - encode_list(&[0u32]) - } else { - encode_list(&[i as u32]) - }; - let receipts_root = ordered_trie_root(Rlp::new(&receipts_rlp).iter().map(|r| r.as_raw())); - receipts.push(receipts_rlp); - - // Construct the block header. - let mut header = dummy_header(i, parent_hash); - header.set_receipts_root(receipts_root); - parent_hash = header.hash(); - headers.push(header); - } - - let mut downloader = BlockDownloader::new(BlockSet::OldBlocks, &headers[0].hash(), 0); - downloader.state = State::Blocks; - downloader.blocks.reset_to(vec![headers[0].hash()]); - - // Only import the first three block headers. - let rlp_data = encode_list(&headers[0..3]); - let headers_rlp = Rlp::new(&rlp_data); - assert!(downloader.import_headers(&mut io, &headers_rlp, headers[0].hash()).is_ok()); - - // Import second and third receipts successfully. - let mut rlp_data = RlpStream::new_list(2); - rlp_data.append_raw(&receipts[1], 1); - rlp_data.append_raw(&receipts[2], 1); - let receipts_rlp = Rlp::new(rlp_data.as_raw()); - assert!(downloader.import_receipts(&receipts_rlp, &[headers[1].hash(), headers[2].hash()]).is_ok()); - - // Import unexpected fourth receipt. - let mut rlp_data = RlpStream::new_list(1); - rlp_data.append_raw(&receipts[3], 1); - let bodies_rlp = Rlp::new(rlp_data.as_raw()); - match downloader.import_bodies(&bodies_rlp, &[headers[1].hash(), headers[2].hash()]) { - Err(BlockDownloaderImportError::Invalid) => (), - _ => panic!("expected BlockDownloaderImportError"), - }; - } - - #[test] - fn reset_after_multiple_sets_of_useless_headers() { - ::env_logger::try_init().ok(); - - let spec = Spec::new_test(); - let genesis_hash = spec.genesis_header().hash(); - - let mut downloader = BlockDownloader::new(BlockSet::NewBlocks, &genesis_hash, 0); - downloader.state = State::ChainHead; - - let mut chain = TestBlockChainClient::new(); - let snapshot_service = TestSnapshotService::new(); - let queue = RwLock::new(VecDeque::new()); - let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None); - - let heads = [ - spec.genesis_header(), - dummy_header(127, H256::random()), - dummy_header(254, H256::random()), - ]; - - let short_subchain = [dummy_header(1, genesis_hash)]; - - import_headers_ok(&heads, &mut downloader, &mut io); - import_headers_ok(&short_subchain, &mut downloader, &mut io); - - assert_eq!(downloader.state, State::Blocks); - assert!(!downloader.blocks.is_empty()); - - // simulate receiving useless headers - let head = vec![short_subchain.last().unwrap().clone()]; - for _ in 0..MAX_USELESS_HEADERS_PER_ROUND { - let res = import_headers(&head, &mut downloader, &mut io); - assert!(res.is_err()); - } - - assert_eq!(downloader.state, State::Idle); - assert!(downloader.blocks.is_empty()); - } - - #[test] - fn dont_reset_after_multiple_sets_of_useless_headers_for_chain_head() { - ::env_logger::try_init().ok(); - - let spec = Spec::new_test(); - let genesis_hash = spec.genesis_header().hash(); - - let mut downloader = BlockDownloader::new(BlockSet::NewBlocks, &genesis_hash, 0); - downloader.state = State::ChainHead; - - let mut chain = TestBlockChainClient::new(); - let snapshot_service = TestSnapshotService::new(); - let queue = RwLock::new(VecDeque::new()); - let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None); - - let heads = [ - spec.genesis_header() - ]; - - let short_subchain = [dummy_header(1, genesis_hash)]; - - import_headers_ok(&heads, &mut downloader, &mut io); - import_headers_ok(&short_subchain, &mut downloader, &mut io); - - assert_eq!(downloader.state, State::Blocks); - assert!(!downloader.blocks.is_empty()); - - // simulate receiving useless headers - let head = vec![short_subchain.last().unwrap().clone()]; - for _ in 0..MAX_USELESS_HEADERS_PER_ROUND { - let res = import_headers(&head, &mut downloader, &mut io); - assert!(res.is_err()); - } - - // download shouldn't be reset since this is the chain head for a single subchain. - // this state usually occurs for NewBlocks when it has reached the chain head. - assert_eq!(downloader.state, State::Blocks); - assert!(!downloader.blocks.is_empty()); - } + use super::*; + use ethcore::{client::TestBlockChainClient, spec::Spec}; + use ethkey::{Generator, Random}; + use hash::keccak; + use parking_lot::RwLock; + use rlp::{encode_list, RlpStream}; + use tests::{helpers::TestIo, snapshot::TestSnapshotService}; + use triehash_ethereum::ordered_trie_root; + use types::{ + header::Header as BlockHeader, + transaction::{SignedTransaction, Transaction}, + }; + + fn dummy_header(number: u64, parent_hash: H256) -> BlockHeader { + let mut header = BlockHeader::new(); + header.set_gas_limit(0.into()); + header.set_difficulty((number * 100).into()); + header.set_timestamp(number * 10); + header.set_number(number); + header.set_parent_hash(parent_hash); + header.set_state_root(H256::zero()); + header + } + + fn dummy_signed_tx() -> SignedTransaction { + let keypair = Random.generate().unwrap(); + Transaction::default().sign(keypair.secret(), None) + } + + fn import_headers( + headers: &[BlockHeader], + downloader: &mut BlockDownloader, + io: &mut SyncIo, + ) -> Result { + let mut stream = RlpStream::new(); + stream.append_list(headers); + let bytes = stream.out(); + let rlp = Rlp::new(&bytes); + let expected_hash = headers.first().unwrap().hash(); + downloader.import_headers(io, &rlp, expected_hash) + } + + fn import_headers_ok( + headers: &[BlockHeader], + downloader: &mut BlockDownloader, + io: &mut SyncIo, + ) { + let res = import_headers(headers, downloader, io); + assert!(res.is_ok()); + } + + #[test] + fn import_headers_in_chain_head_state() { + ::env_logger::try_init().ok(); + + let spec = Spec::new_test(); + let genesis_hash = spec.genesis_header().hash(); + + let mut downloader = BlockDownloader::new(BlockSet::NewBlocks, &genesis_hash, 0); + downloader.state = State::ChainHead; + + let mut chain = TestBlockChainClient::new(); + let snapshot_service = TestSnapshotService::new(); + let queue = RwLock::new(VecDeque::new()); + let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None); + + // Valid headers sequence. + let valid_headers = [ + spec.genesis_header(), + dummy_header(127, H256::random()), + dummy_header(254, H256::random()), + ]; + let rlp_data = encode_list(&valid_headers); + let valid_rlp = Rlp::new(&rlp_data); + + match downloader.import_headers(&mut io, &valid_rlp, genesis_hash) { + Ok(DownloadAction::Reset) => assert_eq!(downloader.state, State::Blocks), + _ => panic!("expected transition to Blocks state"), + }; + + // Headers are rejected because the expected hash does not match. + let invalid_start_block_headers = [ + dummy_header(0, H256::random()), + dummy_header(127, H256::random()), + dummy_header(254, H256::random()), + ]; + let rlp_data = encode_list(&invalid_start_block_headers); + let invalid_start_block_rlp = Rlp::new(&rlp_data); + + match downloader.import_headers(&mut io, &invalid_start_block_rlp, genesis_hash) { + Err(BlockDownloaderImportError::Invalid) => (), + _ => panic!("expected BlockDownloaderImportError"), + }; + + // Headers are rejected because they are not spaced as expected. + let invalid_skip_headers = [ + spec.genesis_header(), + dummy_header(128, H256::random()), + dummy_header(256, H256::random()), + ]; + let rlp_data = encode_list(&invalid_skip_headers); + let invalid_skip_rlp = Rlp::new(&rlp_data); + + match downloader.import_headers(&mut io, &invalid_skip_rlp, genesis_hash) { + Err(BlockDownloaderImportError::Invalid) => (), + _ => panic!("expected BlockDownloaderImportError"), + }; + + // Invalid because the packet size is too large. + let mut too_many_headers = Vec::with_capacity((SUBCHAIN_SIZE + 1) as usize); + too_many_headers.push(spec.genesis_header()); + for i in 1..(SUBCHAIN_SIZE + 1) { + too_many_headers.push(dummy_header( + (MAX_HEADERS_TO_REQUEST as u64 - 1) * i, + H256::random(), + )); + } + let rlp_data = encode_list(&too_many_headers); + + let too_many_rlp = Rlp::new(&rlp_data); + match downloader.import_headers(&mut io, &too_many_rlp, genesis_hash) { + Err(BlockDownloaderImportError::Invalid) => (), + _ => panic!("expected BlockDownloaderImportError"), + }; + } + + #[test] + fn import_headers_in_blocks_state() { + ::env_logger::try_init().ok(); + + let mut chain = TestBlockChainClient::new(); + let snapshot_service = TestSnapshotService::new(); + let queue = RwLock::new(VecDeque::new()); + let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None); + + let mut headers = Vec::with_capacity(3); + let parent_hash = H256::random(); + headers.push(dummy_header(127, parent_hash)); + let parent_hash = headers[0].hash(); + headers.push(dummy_header(128, parent_hash)); + let parent_hash = headers[1].hash(); + headers.push(dummy_header(129, parent_hash)); + + let mut downloader = BlockDownloader::new(BlockSet::NewBlocks, &H256::random(), 0); + downloader.state = State::Blocks; + downloader.blocks.reset_to(vec![headers[0].hash()]); + + let rlp_data = encode_list(&headers); + let headers_rlp = Rlp::new(&rlp_data); + + match downloader.import_headers(&mut io, &headers_rlp, headers[0].hash()) { + Ok(DownloadAction::None) => (), + _ => panic!("expected successful import"), + }; + + // Invalidate parent_hash link. + headers[2] = dummy_header(129, H256::random()); + let rlp_data = encode_list(&headers); + let headers_rlp = Rlp::new(&rlp_data); + + match downloader.import_headers(&mut io, &headers_rlp, headers[0].hash()) { + Err(BlockDownloaderImportError::Invalid) => (), + _ => panic!("expected BlockDownloaderImportError"), + }; + + // Invalidate header sequence by skipping a header. + headers[2] = dummy_header(130, headers[1].hash()); + let rlp_data = encode_list(&headers); + let headers_rlp = Rlp::new(&rlp_data); + + match downloader.import_headers(&mut io, &headers_rlp, headers[0].hash()) { + Err(BlockDownloaderImportError::Invalid) => (), + _ => panic!("expected BlockDownloaderImportError"), + }; + } + + #[test] + fn import_bodies() { + ::env_logger::try_init().ok(); + + let mut chain = TestBlockChainClient::new(); + let snapshot_service = TestSnapshotService::new(); + let queue = RwLock::new(VecDeque::new()); + let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None); + + // Import block headers. + let mut headers = Vec::with_capacity(4); + let mut bodies = Vec::with_capacity(4); + let mut parent_hash = H256::zero(); + for i in 0..4 { + // Construct the block body + let mut uncles = if i > 0 { + encode_list(&[dummy_header(i - 1, H256::random())]) + } else { + ::rlp::EMPTY_LIST_RLP.to_vec() + }; + + let mut txs = encode_list(&[dummy_signed_tx()]); + let tx_root = ordered_trie_root(Rlp::new(&txs).iter().map(|r| r.as_raw())); + + let mut rlp = RlpStream::new_list(2); + rlp.append_raw(&txs, 1); + rlp.append_raw(&uncles, 1); + bodies.push(rlp.out()); + + // Construct the block header + let mut header = dummy_header(i, parent_hash); + header.set_transactions_root(tx_root); + header.set_uncles_hash(keccak(&uncles)); + parent_hash = header.hash(); + headers.push(header); + } + + let mut downloader = BlockDownloader::new(BlockSet::NewBlocks, &headers[0].hash(), 0); + downloader.state = State::Blocks; + downloader.blocks.reset_to(vec![headers[0].hash()]); + + // Only import the first three block headers. + let rlp_data = encode_list(&headers[0..3]); + let headers_rlp = Rlp::new(&rlp_data); + assert!(downloader + .import_headers(&mut io, &headers_rlp, headers[0].hash()) + .is_ok()); + + // Import first body successfully. + let mut rlp_data = RlpStream::new_list(1); + rlp_data.append_raw(&bodies[0], 1); + let bodies_rlp = Rlp::new(rlp_data.as_raw()); + assert!(downloader + .import_bodies(&bodies_rlp, &[headers[0].hash(), headers[1].hash()]) + .is_ok()); + + // Import second body successfully. + let mut rlp_data = RlpStream::new_list(1); + rlp_data.append_raw(&bodies[1], 1); + let bodies_rlp = Rlp::new(rlp_data.as_raw()); + assert!(downloader + .import_bodies(&bodies_rlp, &[headers[0].hash(), headers[1].hash()]) + .is_ok()); + + // Import unexpected third body. + let mut rlp_data = RlpStream::new_list(1); + rlp_data.append_raw(&bodies[2], 1); + let bodies_rlp = Rlp::new(rlp_data.as_raw()); + match downloader.import_bodies(&bodies_rlp, &[headers[0].hash(), headers[1].hash()]) { + Err(BlockDownloaderImportError::Invalid) => (), + _ => panic!("expected BlockDownloaderImportError"), + }; + } + + #[test] + fn import_receipts() { + ::env_logger::try_init().ok(); + + let mut chain = TestBlockChainClient::new(); + let snapshot_service = TestSnapshotService::new(); + let queue = RwLock::new(VecDeque::new()); + let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None); + + // Import block headers. + let mut headers = Vec::with_capacity(4); + let mut receipts = Vec::with_capacity(4); + let mut parent_hash = H256::zero(); + for i in 0..4 { + // Construct the receipts. Receipt root for the first two blocks is the same. + // + // The RLP-encoded integers are clearly not receipts, but the BlockDownloader treats + // all receipts as byte blobs, so it does not matter. + let mut receipts_rlp = if i < 2 { + encode_list(&[0u32]) + } else { + encode_list(&[i as u32]) + }; + let receipts_root = + ordered_trie_root(Rlp::new(&receipts_rlp).iter().map(|r| r.as_raw())); + receipts.push(receipts_rlp); + + // Construct the block header. + let mut header = dummy_header(i, parent_hash); + header.set_receipts_root(receipts_root); + parent_hash = header.hash(); + headers.push(header); + } + + let mut downloader = BlockDownloader::new(BlockSet::OldBlocks, &headers[0].hash(), 0); + downloader.state = State::Blocks; + downloader.blocks.reset_to(vec![headers[0].hash()]); + + // Only import the first three block headers. + let rlp_data = encode_list(&headers[0..3]); + let headers_rlp = Rlp::new(&rlp_data); + assert!(downloader + .import_headers(&mut io, &headers_rlp, headers[0].hash()) + .is_ok()); + + // Import second and third receipts successfully. + let mut rlp_data = RlpStream::new_list(2); + rlp_data.append_raw(&receipts[1], 1); + rlp_data.append_raw(&receipts[2], 1); + let receipts_rlp = Rlp::new(rlp_data.as_raw()); + assert!(downloader + .import_receipts(&receipts_rlp, &[headers[1].hash(), headers[2].hash()]) + .is_ok()); + + // Import unexpected fourth receipt. + let mut rlp_data = RlpStream::new_list(1); + rlp_data.append_raw(&receipts[3], 1); + let bodies_rlp = Rlp::new(rlp_data.as_raw()); + match downloader.import_bodies(&bodies_rlp, &[headers[1].hash(), headers[2].hash()]) { + Err(BlockDownloaderImportError::Invalid) => (), + _ => panic!("expected BlockDownloaderImportError"), + }; + } + + #[test] + fn reset_after_multiple_sets_of_useless_headers() { + ::env_logger::try_init().ok(); + + let spec = Spec::new_test(); + let genesis_hash = spec.genesis_header().hash(); + + let mut downloader = BlockDownloader::new(BlockSet::NewBlocks, &genesis_hash, 0); + downloader.state = State::ChainHead; + + let mut chain = TestBlockChainClient::new(); + let snapshot_service = TestSnapshotService::new(); + let queue = RwLock::new(VecDeque::new()); + let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None); + + let heads = [ + spec.genesis_header(), + dummy_header(127, H256::random()), + dummy_header(254, H256::random()), + ]; + + let short_subchain = [dummy_header(1, genesis_hash)]; + + import_headers_ok(&heads, &mut downloader, &mut io); + import_headers_ok(&short_subchain, &mut downloader, &mut io); + + assert_eq!(downloader.state, State::Blocks); + assert!(!downloader.blocks.is_empty()); + + // simulate receiving useless headers + let head = vec![short_subchain.last().unwrap().clone()]; + for _ in 0..MAX_USELESS_HEADERS_PER_ROUND { + let res = import_headers(&head, &mut downloader, &mut io); + assert!(res.is_err()); + } + + assert_eq!(downloader.state, State::Idle); + assert!(downloader.blocks.is_empty()); + } + + #[test] + fn dont_reset_after_multiple_sets_of_useless_headers_for_chain_head() { + ::env_logger::try_init().ok(); + + let spec = Spec::new_test(); + let genesis_hash = spec.genesis_header().hash(); + + let mut downloader = BlockDownloader::new(BlockSet::NewBlocks, &genesis_hash, 0); + downloader.state = State::ChainHead; + + let mut chain = TestBlockChainClient::new(); + let snapshot_service = TestSnapshotService::new(); + let queue = RwLock::new(VecDeque::new()); + let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None); + + let heads = [spec.genesis_header()]; + + let short_subchain = [dummy_header(1, genesis_hash)]; + + import_headers_ok(&heads, &mut downloader, &mut io); + import_headers_ok(&short_subchain, &mut downloader, &mut io); + + assert_eq!(downloader.state, State::Blocks); + assert!(!downloader.blocks.is_empty()); + + // simulate receiving useless headers + let head = vec![short_subchain.last().unwrap().clone()]; + for _ in 0..MAX_USELESS_HEADERS_PER_ROUND { + let res = import_headers(&head, &mut downloader, &mut io); + assert!(res.is_err()); + } + + // download shouldn't be reset since this is the chain head for a single subchain. + // this state usually occurs for NewBlocks when it has reached the chain head. + assert_eq!(downloader.state, State::Blocks); + assert!(!downloader.blocks.is_empty()); + } } diff --git a/ethcore/sync/src/blocks.rs b/ethcore/sync/src/blocks.rs index 125c8d0b9..b0ece9250 100644 --- a/ethcore/sync/src/blocks.rs +++ b/ethcore/sync/src/blocks.rs @@ -14,128 +14,126 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::{HashSet, HashMap, hash_map}; -use hash::{keccak, KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP}; -use heapsize::HeapSizeOf; -use ethereum_types::H256; -use triehash_ethereum::ordered_trie_root; use bytes::Bytes; -use rlp::{Rlp, RlpStream, DecoderError}; -use network; use ethcore::verification::queue::kind::blocks::Unverified; -use types::transaction::UnverifiedTransaction; -use types::header::Header as BlockHeader; +use ethereum_types::H256; +use hash::{keccak, KECCAK_EMPTY_LIST_RLP, KECCAK_NULL_RLP}; +use heapsize::HeapSizeOf; +use network; +use rlp::{DecoderError, Rlp, RlpStream}; +use std::collections::{hash_map, HashMap, HashSet}; +use triehash_ethereum::ordered_trie_root; +use types::{header::Header as BlockHeader, transaction::UnverifiedTransaction}; known_heap_size!(0, HeaderId); #[derive(PartialEq, Debug, Clone)] pub struct SyncHeader { - pub bytes: Bytes, - pub header: BlockHeader, + pub bytes: Bytes, + pub header: BlockHeader, } impl HeapSizeOf for SyncHeader { - fn heap_size_of_children(&self) -> usize { - self.bytes.heap_size_of_children() - + self.header.heap_size_of_children() - } + fn heap_size_of_children(&self) -> usize { + self.bytes.heap_size_of_children() + self.header.heap_size_of_children() + } } impl SyncHeader { - pub fn from_rlp(bytes: Bytes) -> Result { - let result = SyncHeader { - header: ::rlp::decode(&bytes)?, - bytes, - }; + pub fn from_rlp(bytes: Bytes) -> Result { + let result = SyncHeader { + header: ::rlp::decode(&bytes)?, + bytes, + }; - Ok(result) - } + Ok(result) + } } pub struct SyncBody { - pub transactions_bytes: Bytes, - pub transactions: Vec, - pub uncles_bytes: Bytes, - pub uncles: Vec, + pub transactions_bytes: Bytes, + pub transactions: Vec, + pub uncles_bytes: Bytes, + pub uncles: Vec, } impl SyncBody { - pub fn from_rlp(bytes: &[u8]) -> Result { - let rlp = Rlp::new(bytes); - let transactions_rlp = rlp.at(0)?; - let uncles_rlp = rlp.at(1)?; + pub fn from_rlp(bytes: &[u8]) -> Result { + let rlp = Rlp::new(bytes); + let transactions_rlp = rlp.at(0)?; + let uncles_rlp = rlp.at(1)?; - let result = SyncBody { - transactions_bytes: transactions_rlp.as_raw().to_vec(), - transactions: transactions_rlp.as_list()?, - uncles_bytes: uncles_rlp.as_raw().to_vec(), - uncles: uncles_rlp.as_list()?, - }; + let result = SyncBody { + transactions_bytes: transactions_rlp.as_raw().to_vec(), + transactions: transactions_rlp.as_list()?, + uncles_bytes: uncles_rlp.as_raw().to_vec(), + uncles: uncles_rlp.as_list()?, + }; - Ok(result) - } + Ok(result) + } - fn empty_body() -> Self { - SyncBody { - transactions_bytes: ::rlp::EMPTY_LIST_RLP.to_vec(), - transactions: Vec::with_capacity(0), - uncles_bytes: ::rlp::EMPTY_LIST_RLP.to_vec(), - uncles: Vec::with_capacity(0), - } - } + fn empty_body() -> Self { + SyncBody { + transactions_bytes: ::rlp::EMPTY_LIST_RLP.to_vec(), + transactions: Vec::with_capacity(0), + uncles_bytes: ::rlp::EMPTY_LIST_RLP.to_vec(), + uncles: Vec::with_capacity(0), + } + } } impl HeapSizeOf for SyncBody { - fn heap_size_of_children(&self) -> usize { - self.transactions_bytes.heap_size_of_children() - + self.transactions.heap_size_of_children() - + self.uncles_bytes.heap_size_of_children() - + self.uncles.heap_size_of_children() - } + fn heap_size_of_children(&self) -> usize { + self.transactions_bytes.heap_size_of_children() + + self.transactions.heap_size_of_children() + + self.uncles_bytes.heap_size_of_children() + + self.uncles.heap_size_of_children() + } } /// Block data with optional body. struct SyncBlock { - header: SyncHeader, - body: Option, - receipts: Option, - receipts_root: H256, + header: SyncHeader, + body: Option, + receipts: Option, + receipts_root: H256, } impl HeapSizeOf for SyncBlock { - fn heap_size_of_children(&self) -> usize { - self.header.heap_size_of_children() + self.body.heap_size_of_children() - } + fn heap_size_of_children(&self) -> usize { + self.header.heap_size_of_children() + self.body.heap_size_of_children() + } } fn unverified_from_sync(header: SyncHeader, body: Option) -> Unverified { - let mut stream = RlpStream::new_list(3); - stream.append_raw(&header.bytes, 1); - let body = body.unwrap_or_else(SyncBody::empty_body); - stream.append_raw(&body.transactions_bytes, 1); - stream.append_raw(&body.uncles_bytes, 1); + let mut stream = RlpStream::new_list(3); + stream.append_raw(&header.bytes, 1); + let body = body.unwrap_or_else(SyncBody::empty_body); + stream.append_raw(&body.transactions_bytes, 1); + stream.append_raw(&body.uncles_bytes, 1); - Unverified { - header: header.header, - transactions: body.transactions, - uncles: body.uncles, - bytes: stream.out().to_vec(), - } + Unverified { + header: header.header, + transactions: body.transactions, + uncles: body.uncles, + bytes: stream.out().to_vec(), + } } /// Block with optional receipt pub struct BlockAndReceipts { - /// Block data. - pub block: Unverified, - /// Block receipts RLP list. - pub receipts: Option, + /// Block data. + pub block: Unverified, + /// Block receipts RLP list. + pub receipts: Option, } /// Used to identify header by transactions and uncles hashes #[derive(Eq, PartialEq, Hash)] struct HeaderId { - transactions_root: H256, - uncles: H256 + transactions_root: H256, + uncles: H256, } /// A collection of blocks and subchain pointers being downloaded. This keeps track of @@ -143,565 +141,641 @@ struct HeaderId { /// the downloaded blocks. #[derive(Default)] pub struct BlockCollection { - /// Does this collection need block receipts. - need_receipts: bool, - /// Heads of subchains to download - heads: Vec, - /// Downloaded blocks. - blocks: HashMap, - /// Downloaded blocks by parent. - parents: HashMap, - /// Used to map body to header. - header_ids: HashMap, - /// Used to map receipts root to headers. - receipt_ids: HashMap>, - /// First block in `blocks`. - head: Option, - /// Set of block header hashes being downloaded - downloading_headers: HashSet, - /// Set of block bodies being downloaded identified by block hash. - downloading_bodies: HashSet, - /// Set of block receipts being downloaded identified by receipt root. - downloading_receipts: HashSet, + /// Does this collection need block receipts. + need_receipts: bool, + /// Heads of subchains to download + heads: Vec, + /// Downloaded blocks. + blocks: HashMap, + /// Downloaded blocks by parent. + parents: HashMap, + /// Used to map body to header. + header_ids: HashMap, + /// Used to map receipts root to headers. + receipt_ids: HashMap>, + /// First block in `blocks`. + head: Option, + /// Set of block header hashes being downloaded + downloading_headers: HashSet, + /// Set of block bodies being downloaded identified by block hash. + downloading_bodies: HashSet, + /// Set of block receipts being downloaded identified by receipt root. + downloading_receipts: HashSet, } impl BlockCollection { - /// Create a new instance. - pub fn new(download_receipts: bool) -> BlockCollection { - BlockCollection { - need_receipts: download_receipts, - blocks: HashMap::new(), - header_ids: HashMap::new(), - receipt_ids: HashMap::new(), - heads: Vec::new(), - parents: HashMap::new(), - head: None, - downloading_headers: HashSet::new(), - downloading_bodies: HashSet::new(), - downloading_receipts: HashSet::new(), - } - } + /// Create a new instance. + pub fn new(download_receipts: bool) -> BlockCollection { + BlockCollection { + need_receipts: download_receipts, + blocks: HashMap::new(), + header_ids: HashMap::new(), + receipt_ids: HashMap::new(), + heads: Vec::new(), + parents: HashMap::new(), + head: None, + downloading_headers: HashSet::new(), + downloading_bodies: HashSet::new(), + downloading_receipts: HashSet::new(), + } + } - /// Clear everything. - pub fn clear(&mut self) { - self.blocks.clear(); - self.parents.clear(); - self.header_ids.clear(); - self.receipt_ids.clear(); - self.heads.clear(); - self.head = None; - self.downloading_headers.clear(); - self.downloading_bodies.clear(); - self.downloading_receipts.clear(); - } + /// Clear everything. + pub fn clear(&mut self) { + self.blocks.clear(); + self.parents.clear(); + self.header_ids.clear(); + self.receipt_ids.clear(); + self.heads.clear(); + self.head = None; + self.downloading_headers.clear(); + self.downloading_bodies.clear(); + self.downloading_receipts.clear(); + } - /// Reset collection for a new sync round with given subchain block hashes. - pub fn reset_to(&mut self, hashes: Vec) { - self.clear(); - self.heads = hashes; - } + /// Reset collection for a new sync round with given subchain block hashes. + pub fn reset_to(&mut self, hashes: Vec) { + self.clear(); + self.heads = hashes; + } - /// Insert a set of headers into collection and advance subchain head pointers. - pub fn insert_headers(&mut self, headers: Vec) { - for h in headers { - if let Err(e) = self.insert_header(h) { - trace!(target: "sync", "Ignored invalid header: {:?}", e); - } - } - self.update_heads(); - } + /// Insert a set of headers into collection and advance subchain head pointers. + pub fn insert_headers(&mut self, headers: Vec) { + for h in headers { + if let Err(e) = self.insert_header(h) { + trace!(target: "sync", "Ignored invalid header: {:?}", e); + } + } + self.update_heads(); + } - /// Insert a collection of block bodies for previously downloaded headers. - pub fn insert_bodies(&mut self, bodies: Vec) -> Vec { - bodies.into_iter() - .filter_map(|b| { - self.insert_body(b) - .map_err(|e| trace!(target: "sync", "Ignored invalid body: {:?}", e)) - .ok() - }) - .collect() - } + /// Insert a collection of block bodies for previously downloaded headers. + pub fn insert_bodies(&mut self, bodies: Vec) -> Vec { + bodies + .into_iter() + .filter_map(|b| { + self.insert_body(b) + .map_err(|e| trace!(target: "sync", "Ignored invalid body: {:?}", e)) + .ok() + }) + .collect() + } - /// Insert a collection of block receipts for previously downloaded headers. - pub fn insert_receipts(&mut self, receipts: Vec) -> Vec> { - if !self.need_receipts { - return Vec::new(); - } - receipts.into_iter() - .filter_map(|r| { - self.insert_receipt(r) - .map_err(|e| trace!(target: "sync", "Ignored invalid receipt: {:?}", e)) - .ok() - }) - .collect() - } + /// Insert a collection of block receipts for previously downloaded headers. + pub fn insert_receipts(&mut self, receipts: Vec) -> Vec> { + if !self.need_receipts { + return Vec::new(); + } + receipts + .into_iter() + .filter_map(|r| { + self.insert_receipt(r) + .map_err(|e| trace!(target: "sync", "Ignored invalid receipt: {:?}", e)) + .ok() + }) + .collect() + } - /// Returns a set of block hashes that require a body download. The returned set is marked as being downloaded. - pub fn needed_bodies(&mut self, count: usize, _ignore_downloading: bool) -> Vec { - if self.head.is_none() { - return Vec::new(); - } - let mut needed_bodies: Vec = Vec::new(); - let mut head = self.head; - while head.is_some() && needed_bodies.len() < count { - head = self.parents.get(&head.unwrap()).cloned(); - if let Some(head) = head { - match self.blocks.get(&head) { - Some(block) if block.body.is_none() && !self.downloading_bodies.contains(&head) => { - self.downloading_bodies.insert(head.clone()); - needed_bodies.push(head.clone()); - } - _ => (), - } - } - } - for h in self.header_ids.values() { - if needed_bodies.len() >= count { - break; - } - if !self.downloading_bodies.contains(h) { - needed_bodies.push(h.clone()); - self.downloading_bodies.insert(h.clone()); - } - } - needed_bodies - } + /// Returns a set of block hashes that require a body download. The returned set is marked as being downloaded. + pub fn needed_bodies(&mut self, count: usize, _ignore_downloading: bool) -> Vec { + if self.head.is_none() { + return Vec::new(); + } + let mut needed_bodies: Vec = Vec::new(); + let mut head = self.head; + while head.is_some() && needed_bodies.len() < count { + head = self.parents.get(&head.unwrap()).cloned(); + if let Some(head) = head { + match self.blocks.get(&head) { + Some(block) + if block.body.is_none() && !self.downloading_bodies.contains(&head) => + { + self.downloading_bodies.insert(head.clone()); + needed_bodies.push(head.clone()); + } + _ => (), + } + } + } + for h in self.header_ids.values() { + if needed_bodies.len() >= count { + break; + } + if !self.downloading_bodies.contains(h) { + needed_bodies.push(h.clone()); + self.downloading_bodies.insert(h.clone()); + } + } + needed_bodies + } - /// Returns a set of block hashes that require a receipt download. The returned set is marked as being downloaded. - pub fn needed_receipts(&mut self, count: usize, _ignore_downloading: bool) -> Vec { - if self.head.is_none() || !self.need_receipts { - return Vec::new(); - } - let mut needed_receipts: Vec = Vec::new(); - let mut head = self.head; - while head.is_some() && needed_receipts.len() < count { - head = self.parents.get(&head.unwrap()).cloned(); - if let Some(head) = head { - match self.blocks.get(&head) { - Some(block) => { - if block.receipts.is_none() && !self.downloading_receipts.contains(&block.receipts_root) { - self.downloading_receipts.insert(block.receipts_root); - needed_receipts.push(head.clone()); - } - } - _ => (), - } - } - } - // If there are multiple blocks per receipt, only request one of them. - for (root, h) in self.receipt_ids.iter().map(|(root, hashes)| (root, hashes[0])) { - if needed_receipts.len() >= count { - break; - } - if !self.downloading_receipts.contains(root) { - needed_receipts.push(h.clone()); - self.downloading_receipts.insert(*root); - } - } - needed_receipts - } + /// Returns a set of block hashes that require a receipt download. The returned set is marked as being downloaded. + pub fn needed_receipts(&mut self, count: usize, _ignore_downloading: bool) -> Vec { + if self.head.is_none() || !self.need_receipts { + return Vec::new(); + } + let mut needed_receipts: Vec = Vec::new(); + let mut head = self.head; + while head.is_some() && needed_receipts.len() < count { + head = self.parents.get(&head.unwrap()).cloned(); + if let Some(head) = head { + match self.blocks.get(&head) { + Some(block) => { + if block.receipts.is_none() + && !self.downloading_receipts.contains(&block.receipts_root) + { + self.downloading_receipts.insert(block.receipts_root); + needed_receipts.push(head.clone()); + } + } + _ => (), + } + } + } + // If there are multiple blocks per receipt, only request one of them. + for (root, h) in self + .receipt_ids + .iter() + .map(|(root, hashes)| (root, hashes[0])) + { + if needed_receipts.len() >= count { + break; + } + if !self.downloading_receipts.contains(root) { + needed_receipts.push(h.clone()); + self.downloading_receipts.insert(*root); + } + } + needed_receipts + } - /// Returns a set of block hashes that require a header download. The returned set is marked as being downloaded. - pub fn needed_headers(&mut self, count: usize, ignore_downloading: bool) -> Option<(H256, usize)> { - // find subchain to download - let mut download = None; - { - for h in &self.heads { - if ignore_downloading || !self.downloading_headers.contains(h) { - self.downloading_headers.insert(h.clone()); - download = Some(h.clone()); - break; - } - } - } - download.map(|h| (h, count)) - } + /// Returns a set of block hashes that require a header download. The returned set is marked as being downloaded. + pub fn needed_headers( + &mut self, + count: usize, + ignore_downloading: bool, + ) -> Option<(H256, usize)> { + // find subchain to download + let mut download = None; + { + for h in &self.heads { + if ignore_downloading || !self.downloading_headers.contains(h) { + self.downloading_headers.insert(h.clone()); + download = Some(h.clone()); + break; + } + } + } + download.map(|h| (h, count)) + } - /// Unmark header as being downloaded. - pub fn clear_header_download(&mut self, hash: &H256) { - self.downloading_headers.remove(hash); - } + /// Unmark header as being downloaded. + pub fn clear_header_download(&mut self, hash: &H256) { + self.downloading_headers.remove(hash); + } - /// Unmark block body as being downloaded. - pub fn clear_body_download(&mut self, hashes: &[H256]) { - for h in hashes { - self.downloading_bodies.remove(h); - } - } + /// Unmark block body as being downloaded. + pub fn clear_body_download(&mut self, hashes: &[H256]) { + for h in hashes { + self.downloading_bodies.remove(h); + } + } - /// Unmark block receipt as being downloaded. - pub fn clear_receipt_download(&mut self, hashes: &[H256]) { - for h in hashes { - if let Some(ref block) = self.blocks.get(h) { - self.downloading_receipts.remove(&block.receipts_root); - } - } - } + /// Unmark block receipt as being downloaded. + pub fn clear_receipt_download(&mut self, hashes: &[H256]) { + for h in hashes { + if let Some(ref block) = self.blocks.get(h) { + self.downloading_receipts.remove(&block.receipts_root); + } + } + } - /// Get a valid chain of blocks ordered in ascending order and ready for importing into blockchain. - pub fn drain(&mut self) -> Vec { - if self.blocks.is_empty() || self.head.is_none() { - return Vec::new(); - } + /// Get a valid chain of blocks ordered in ascending order and ready for importing into blockchain. + pub fn drain(&mut self) -> Vec { + if self.blocks.is_empty() || self.head.is_none() { + return Vec::new(); + } - let mut drained = Vec::new(); - let mut hashes = Vec::new(); - { - let mut blocks = Vec::new(); - let mut head = self.head; - while let Some(h) = head { - head = self.parents.get(&h).cloned(); - if let Some(head) = head { - match self.blocks.remove(&head) { - Some(block) => { - if block.body.is_some() && (!self.need_receipts || block.receipts.is_some()) { - blocks.push(block); - hashes.push(head); - self.head = Some(head); - } else { - self.blocks.insert(head, block); - break; - } - }, - _ => { - break; - }, - } - } - } + let mut drained = Vec::new(); + let mut hashes = Vec::new(); + { + let mut blocks = Vec::new(); + let mut head = self.head; + while let Some(h) = head { + head = self.parents.get(&h).cloned(); + if let Some(head) = head { + match self.blocks.remove(&head) { + Some(block) => { + if block.body.is_some() + && (!self.need_receipts || block.receipts.is_some()) + { + blocks.push(block); + hashes.push(head); + self.head = Some(head); + } else { + self.blocks.insert(head, block); + break; + } + } + _ => { + break; + } + } + } + } - for block in blocks.into_iter() { - let unverified = unverified_from_sync(block.header, block.body); - drained.push(BlockAndReceipts { - block: unverified, - receipts: block.receipts.clone(), - }); - } - } + for block in blocks.into_iter() { + let unverified = unverified_from_sync(block.header, block.body); + drained.push(BlockAndReceipts { + block: unverified, + receipts: block.receipts.clone(), + }); + } + } - trace!(target: "sync", "Drained {} blocks, new head :{:?}", drained.len(), self.head); - drained - } + trace!(target: "sync", "Drained {} blocks, new head :{:?}", drained.len(), self.head); + drained + } - /// Check if the collection is empty. We consider the syncing round complete once - /// there is no block data left and only a single or none head pointer remains. - pub fn is_empty(&self) -> bool { - self.heads.len() == 0 || (self.heads.len() == 1 && self.head.map_or(false, |h| h == self.heads[0])) - } + /// Check if the collection is empty. We consider the syncing round complete once + /// there is no block data left and only a single or none head pointer remains. + pub fn is_empty(&self) -> bool { + self.heads.len() == 0 + || (self.heads.len() == 1 && self.head.map_or(false, |h| h == self.heads[0])) + } - /// Check if collection contains a block header. - pub fn contains(&self, hash: &H256) -> bool { - self.blocks.contains_key(hash) - } + /// Check if collection contains a block header. + pub fn contains(&self, hash: &H256) -> bool { + self.blocks.contains_key(hash) + } - /// Check the number of heads - pub fn heads_len(&self) -> usize { - self.heads.len() - } + /// Check the number of heads + pub fn heads_len(&self) -> usize { + self.heads.len() + } - /// Return used heap size. - pub fn heap_size(&self) -> usize { - self.heads.heap_size_of_children() - + self.blocks.heap_size_of_children() - + self.parents.heap_size_of_children() - + self.header_ids.heap_size_of_children() - + self.downloading_headers.heap_size_of_children() - + self.downloading_bodies.heap_size_of_children() - } + /// Return used heap size. + pub fn heap_size(&self) -> usize { + self.heads.heap_size_of_children() + + self.blocks.heap_size_of_children() + + self.parents.heap_size_of_children() + + self.header_ids.heap_size_of_children() + + self.downloading_headers.heap_size_of_children() + + self.downloading_bodies.heap_size_of_children() + } - /// Check if given block hash is marked as being downloaded. - pub fn is_downloading(&self, hash: &H256) -> bool { - self.downloading_headers.contains(hash) || self.downloading_bodies.contains(hash) - } + /// Check if given block hash is marked as being downloaded. + pub fn is_downloading(&self, hash: &H256) -> bool { + self.downloading_headers.contains(hash) || self.downloading_bodies.contains(hash) + } - fn insert_body(&mut self, body: SyncBody) -> Result { - let header_id = { - let tx_root = ordered_trie_root(Rlp::new(&body.transactions_bytes).iter().map(|r| r.as_raw())); - let uncles = keccak(&body.uncles_bytes); - HeaderId { - transactions_root: tx_root, - uncles: uncles - } - }; + fn insert_body(&mut self, body: SyncBody) -> Result { + let header_id = { + let tx_root = ordered_trie_root( + Rlp::new(&body.transactions_bytes) + .iter() + .map(|r| r.as_raw()), + ); + let uncles = keccak(&body.uncles_bytes); + HeaderId { + transactions_root: tx_root, + uncles: uncles, + } + }; - match self.header_ids.remove(&header_id) { - Some(h) => { - self.downloading_bodies.remove(&h); - match self.blocks.get_mut(&h) { - Some(ref mut block) => { - trace!(target: "sync", "Got body {}", h); - block.body = Some(body); - Ok(h) - }, - None => { - warn!("Got body with no header {}", h); - Err(network::ErrorKind::BadProtocol.into()) - } - } - } - None => { - trace!(target: "sync", "Ignored unknown/stale block body. tx_root = {:?}, uncles = {:?}", header_id.transactions_root, header_id.uncles); - Err(network::ErrorKind::BadProtocol.into()) - } - } - } + match self.header_ids.remove(&header_id) { + Some(h) => { + self.downloading_bodies.remove(&h); + match self.blocks.get_mut(&h) { + Some(ref mut block) => { + trace!(target: "sync", "Got body {}", h); + block.body = Some(body); + Ok(h) + } + None => { + warn!("Got body with no header {}", h); + Err(network::ErrorKind::BadProtocol.into()) + } + } + } + None => { + trace!(target: "sync", "Ignored unknown/stale block body. tx_root = {:?}, uncles = {:?}", header_id.transactions_root, header_id.uncles); + Err(network::ErrorKind::BadProtocol.into()) + } + } + } - fn insert_receipt(&mut self, r: Bytes) -> Result, network::Error> { - let receipt_root = { - let receipts = Rlp::new(&r); - ordered_trie_root(receipts.iter().map(|r| r.as_raw())) - }; - self.downloading_receipts.remove(&receipt_root); - match self.receipt_ids.entry(receipt_root) { - hash_map::Entry::Occupied(entry) => { - let block_hashes = entry.remove(); - for h in block_hashes.iter() { - match self.blocks.get_mut(&h) { - Some(ref mut block) => { - trace!(target: "sync", "Got receipt {}", h); - block.receipts = Some(r.clone()); - }, - None => { - warn!("Got receipt with no header {}", h); - return Err(network::ErrorKind::BadProtocol.into()) - } - } - } - Ok(block_hashes) - }, - hash_map::Entry::Vacant(_) => { - trace!(target: "sync", "Ignored unknown/stale block receipt {:?}", receipt_root); - Err(network::ErrorKind::BadProtocol.into()) - } - } - } + fn insert_receipt(&mut self, r: Bytes) -> Result, network::Error> { + let receipt_root = { + let receipts = Rlp::new(&r); + ordered_trie_root(receipts.iter().map(|r| r.as_raw())) + }; + self.downloading_receipts.remove(&receipt_root); + match self.receipt_ids.entry(receipt_root) { + hash_map::Entry::Occupied(entry) => { + let block_hashes = entry.remove(); + for h in block_hashes.iter() { + match self.blocks.get_mut(&h) { + Some(ref mut block) => { + trace!(target: "sync", "Got receipt {}", h); + block.receipts = Some(r.clone()); + } + None => { + warn!("Got receipt with no header {}", h); + return Err(network::ErrorKind::BadProtocol.into()); + } + } + } + Ok(block_hashes) + } + hash_map::Entry::Vacant(_) => { + trace!(target: "sync", "Ignored unknown/stale block receipt {:?}", receipt_root); + Err(network::ErrorKind::BadProtocol.into()) + } + } + } - fn insert_header(&mut self, info: SyncHeader) -> Result { - let hash = info.header.hash(); - if self.blocks.contains_key(&hash) { - return Ok(hash); - } + fn insert_header(&mut self, info: SyncHeader) -> Result { + let hash = info.header.hash(); + if self.blocks.contains_key(&hash) { + return Ok(hash); + } - match self.head { - None if hash == self.heads[0] => { - trace!(target: "sync", "New head {}", hash); - self.head = Some(info.header.parent_hash().clone()); - }, - _ => () - } + match self.head { + None if hash == self.heads[0] => { + trace!(target: "sync", "New head {}", hash); + self.head = Some(info.header.parent_hash().clone()); + } + _ => (), + } - let header_id = HeaderId { - transactions_root: *info.header.transactions_root(), - uncles: *info.header.uncles_hash(), - }; + let header_id = HeaderId { + transactions_root: *info.header.transactions_root(), + uncles: *info.header.uncles_hash(), + }; - let body = if header_id.transactions_root == KECCAK_NULL_RLP && header_id.uncles == KECCAK_EMPTY_LIST_RLP { - // empty body, just mark as downloaded - Some(SyncBody::empty_body()) - } else { - trace!( - "Queueing body tx_root = {:?}, uncles = {:?}, block = {:?}, number = {}", - header_id.transactions_root, - header_id.uncles, - hash, - info.header.number() - ); - self.header_ids.insert(header_id, hash); - None - }; + let body = if header_id.transactions_root == KECCAK_NULL_RLP + && header_id.uncles == KECCAK_EMPTY_LIST_RLP + { + // empty body, just mark as downloaded + Some(SyncBody::empty_body()) + } else { + trace!( + "Queueing body tx_root = {:?}, uncles = {:?}, block = {:?}, number = {}", + header_id.transactions_root, + header_id.uncles, + hash, + info.header.number() + ); + self.header_ids.insert(header_id, hash); + None + }; - let (receipts, receipts_root) = if self.need_receipts { - let receipt_root = *info.header.receipts_root(); - if receipt_root == KECCAK_NULL_RLP { - let receipts_stream = RlpStream::new_list(0); - (Some(receipts_stream.out()), receipt_root) - } else { - self.receipt_ids.entry(receipt_root).or_insert_with(Vec::new).push(hash); - (None, receipt_root) - } - } else { - (None, H256::new()) - }; + let (receipts, receipts_root) = if self.need_receipts { + let receipt_root = *info.header.receipts_root(); + if receipt_root == KECCAK_NULL_RLP { + let receipts_stream = RlpStream::new_list(0); + (Some(receipts_stream.out()), receipt_root) + } else { + self.receipt_ids + .entry(receipt_root) + .or_insert_with(Vec::new) + .push(hash); + (None, receipt_root) + } + } else { + (None, H256::new()) + }; - self.parents.insert(*info.header.parent_hash(), hash); + self.parents.insert(*info.header.parent_hash(), hash); - let block = SyncBlock { - header: info, - body, - receipts, - receipts_root, - }; + let block = SyncBlock { + header: info, + body, + receipts, + receipts_root, + }; - self.blocks.insert(hash, block); - trace!(target: "sync", "New header: {:x}", hash); - Ok(hash) - } + self.blocks.insert(hash, block); + trace!(target: "sync", "New header: {:x}", hash); + Ok(hash) + } - // update subchain headers - fn update_heads(&mut self) { - let mut new_heads = Vec::new(); - let old_subchains: HashSet<_> = { self.heads.iter().cloned().collect() }; - for s in self.heads.drain(..) { - let mut h = s.clone(); - if !self.blocks.contains_key(&h) { - new_heads.push(h); - continue; - } - loop { - match self.parents.get(&h) { - Some(next) => { - h = next.clone(); - if old_subchains.contains(&h) { - trace!(target: "sync", "Completed subchain {:?}", s); - break; // reached head of the other subchain, merge by not adding - } - }, - _ => { - new_heads.push(h); - break; - } - } - } - } - self.heads = new_heads; - } + // update subchain headers + fn update_heads(&mut self) { + let mut new_heads = Vec::new(); + let old_subchains: HashSet<_> = { self.heads.iter().cloned().collect() }; + for s in self.heads.drain(..) { + let mut h = s.clone(); + if !self.blocks.contains_key(&h) { + new_heads.push(h); + continue; + } + loop { + match self.parents.get(&h) { + Some(next) => { + h = next.clone(); + if old_subchains.contains(&h) { + trace!(target: "sync", "Completed subchain {:?}", s); + break; // reached head of the other subchain, merge by not adding + } + } + _ => { + new_heads.push(h); + break; + } + } + } + } + self.heads = new_heads; + } } #[cfg(test)] mod test { - use super::{BlockCollection, SyncHeader}; - use ethcore::client::{TestBlockChainClient, EachBlockWith, BlockId, BlockChainClient}; - use types::BlockNumber; - use ethcore::verification::queue::kind::blocks::Unverified; - use rlp::*; + use super::{BlockCollection, SyncHeader}; + use ethcore::{ + client::{BlockChainClient, BlockId, EachBlockWith, TestBlockChainClient}, + verification::queue::kind::blocks::Unverified, + }; + use rlp::*; + use types::BlockNumber; - fn is_empty(bc: &BlockCollection) -> bool { - bc.heads.is_empty() && - bc.blocks.is_empty() && - bc.parents.is_empty() && - bc.header_ids.is_empty() && - bc.head.is_none() && - bc.downloading_headers.is_empty() && - bc.downloading_bodies.is_empty() - } + fn is_empty(bc: &BlockCollection) -> bool { + bc.heads.is_empty() + && bc.blocks.is_empty() + && bc.parents.is_empty() + && bc.header_ids.is_empty() + && bc.head.is_none() + && bc.downloading_headers.is_empty() + && bc.downloading_bodies.is_empty() + } - #[test] - fn create_clear() { - let mut bc = BlockCollection::new(false); - assert!(is_empty(&bc)); - let client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Nothing); - let hashes = (0 .. 100).map(|i| (&client as &BlockChainClient).block_hash(BlockId::Number(i)).unwrap()).collect(); - bc.reset_to(hashes); - assert!(!is_empty(&bc)); - bc.clear(); - assert!(is_empty(&bc)); - } + #[test] + fn create_clear() { + let mut bc = BlockCollection::new(false); + assert!(is_empty(&bc)); + let client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Nothing); + let hashes = (0..100) + .map(|i| { + (&client as &BlockChainClient) + .block_hash(BlockId::Number(i)) + .unwrap() + }) + .collect(); + bc.reset_to(hashes); + assert!(!is_empty(&bc)); + bc.clear(); + assert!(is_empty(&bc)); + } - #[test] - fn insert_headers() { - let mut bc = BlockCollection::new(false); - assert!(is_empty(&bc)); - let client = TestBlockChainClient::new(); - let nblocks = 200; - client.add_blocks(nblocks, EachBlockWith::Nothing); - let blocks: Vec<_> = (0..nblocks) - .map(|i| (&client as &BlockChainClient).block(BlockId::Number(i as BlockNumber)).unwrap().into_inner()) - .collect(); - let headers: Vec<_> = blocks.iter().map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()).collect(); - let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect(); - let heads: Vec<_> = hashes.iter().enumerate().filter_map(|(i, h)| if i % 20 == 0 { Some(*h) } else { None }).collect(); - bc.reset_to(heads); - assert!(!bc.is_empty()); - assert_eq!(hashes[0], bc.heads[0]); - assert!(bc.needed_bodies(1, false).is_empty()); - assert!(!bc.contains(&hashes[0])); - assert!(!bc.is_downloading(&hashes[0])); + #[test] + fn insert_headers() { + let mut bc = BlockCollection::new(false); + assert!(is_empty(&bc)); + let client = TestBlockChainClient::new(); + let nblocks = 200; + client.add_blocks(nblocks, EachBlockWith::Nothing); + let blocks: Vec<_> = (0..nblocks) + .map(|i| { + (&client as &BlockChainClient) + .block(BlockId::Number(i as BlockNumber)) + .unwrap() + .into_inner() + }) + .collect(); + let headers: Vec<_> = blocks + .iter() + .map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()) + .collect(); + let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect(); + let heads: Vec<_> = hashes + .iter() + .enumerate() + .filter_map(|(i, h)| if i % 20 == 0 { Some(*h) } else { None }) + .collect(); + bc.reset_to(heads); + assert!(!bc.is_empty()); + assert_eq!(hashes[0], bc.heads[0]); + assert!(bc.needed_bodies(1, false).is_empty()); + assert!(!bc.contains(&hashes[0])); + assert!(!bc.is_downloading(&hashes[0])); - let (h, n) = bc.needed_headers(6, false).unwrap(); - assert!(bc.is_downloading(&hashes[0])); - assert_eq!(hashes[0], h); - assert_eq!(n, 6); - assert_eq!(bc.downloading_headers.len(), 1); - assert!(bc.drain().is_empty()); + let (h, n) = bc.needed_headers(6, false).unwrap(); + assert!(bc.is_downloading(&hashes[0])); + assert_eq!(hashes[0], h); + assert_eq!(n, 6); + assert_eq!(bc.downloading_headers.len(), 1); + assert!(bc.drain().is_empty()); - bc.insert_headers(headers[0..6].into_iter().map(Clone::clone).collect()); - assert_eq!(hashes[5], bc.heads[0]); - for h in &hashes[0..6] { - bc.clear_header_download(h) - } - assert_eq!(bc.downloading_headers.len(), 0); - assert!(!bc.is_downloading(&hashes[0])); - assert!(bc.contains(&hashes[0])); + bc.insert_headers(headers[0..6].into_iter().map(Clone::clone).collect()); + assert_eq!(hashes[5], bc.heads[0]); + for h in &hashes[0..6] { + bc.clear_header_download(h) + } + assert_eq!(bc.downloading_headers.len(), 0); + assert!(!bc.is_downloading(&hashes[0])); + assert!(bc.contains(&hashes[0])); - assert_eq!( - bc.drain().into_iter().map(|b| b.block).collect::>(), - blocks[0..6].iter().map(|b| Unverified::from_rlp(b.to_vec()).unwrap()).collect::>() - ); - assert!(!bc.contains(&hashes[0])); - assert_eq!(hashes[5], bc.head.unwrap()); + assert_eq!( + bc.drain().into_iter().map(|b| b.block).collect::>(), + blocks[0..6] + .iter() + .map(|b| Unverified::from_rlp(b.to_vec()).unwrap()) + .collect::>() + ); + assert!(!bc.contains(&hashes[0])); + assert_eq!(hashes[5], bc.head.unwrap()); - let (h, _) = bc.needed_headers(6, false).unwrap(); - assert_eq!(hashes[5], h); - let (h, _) = bc.needed_headers(6, false).unwrap(); - assert_eq!(hashes[20], h); - bc.insert_headers(headers[10..16].into_iter().map(Clone::clone).collect()); - assert!(bc.drain().is_empty()); - bc.insert_headers(headers[5..10].into_iter().map(Clone::clone).collect()); - assert_eq!( - bc.drain().into_iter().map(|b| b.block).collect::>(), - blocks[6..16].iter().map(|b| Unverified::from_rlp(b.to_vec()).unwrap()).collect::>() - ); + let (h, _) = bc.needed_headers(6, false).unwrap(); + assert_eq!(hashes[5], h); + let (h, _) = bc.needed_headers(6, false).unwrap(); + assert_eq!(hashes[20], h); + bc.insert_headers(headers[10..16].into_iter().map(Clone::clone).collect()); + assert!(bc.drain().is_empty()); + bc.insert_headers(headers[5..10].into_iter().map(Clone::clone).collect()); + assert_eq!( + bc.drain().into_iter().map(|b| b.block).collect::>(), + blocks[6..16] + .iter() + .map(|b| Unverified::from_rlp(b.to_vec()).unwrap()) + .collect::>() + ); - assert_eq!(hashes[15], bc.heads[0]); + assert_eq!(hashes[15], bc.heads[0]); - bc.insert_headers(headers[15..].into_iter().map(Clone::clone).collect()); - bc.drain(); - assert!(bc.is_empty()); - } + bc.insert_headers(headers[15..].into_iter().map(Clone::clone).collect()); + bc.drain(); + assert!(bc.is_empty()); + } - #[test] - fn insert_headers_with_gap() { - let mut bc = BlockCollection::new(false); - assert!(is_empty(&bc)); - let client = TestBlockChainClient::new(); - let nblocks = 200; - client.add_blocks(nblocks, EachBlockWith::Nothing); - let blocks: Vec<_> = (0..nblocks) - .map(|i| (&client as &BlockChainClient).block(BlockId::Number(i as BlockNumber)).unwrap().into_inner()) - .collect(); - let headers: Vec<_> = blocks.iter().map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()).collect(); - let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect(); - let heads: Vec<_> = hashes.iter().enumerate().filter_map(|(i, h)| if i % 20 == 0 { Some(*h) } else { None }).collect(); - bc.reset_to(heads); + #[test] + fn insert_headers_with_gap() { + let mut bc = BlockCollection::new(false); + assert!(is_empty(&bc)); + let client = TestBlockChainClient::new(); + let nblocks = 200; + client.add_blocks(nblocks, EachBlockWith::Nothing); + let blocks: Vec<_> = (0..nblocks) + .map(|i| { + (&client as &BlockChainClient) + .block(BlockId::Number(i as BlockNumber)) + .unwrap() + .into_inner() + }) + .collect(); + let headers: Vec<_> = blocks + .iter() + .map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()) + .collect(); + let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect(); + let heads: Vec<_> = hashes + .iter() + .enumerate() + .filter_map(|(i, h)| if i % 20 == 0 { Some(*h) } else { None }) + .collect(); + bc.reset_to(heads); - bc.insert_headers(headers[2..22].into_iter().map(Clone::clone).collect()); - assert_eq!(hashes[0], bc.heads[0]); - assert_eq!(hashes[21], bc.heads[1]); - assert!(bc.head.is_none()); - bc.insert_headers(headers[0..2].into_iter().map(Clone::clone).collect()); - assert!(bc.head.is_some()); - assert_eq!(hashes[21], bc.heads[0]); - } + bc.insert_headers(headers[2..22].into_iter().map(Clone::clone).collect()); + assert_eq!(hashes[0], bc.heads[0]); + assert_eq!(hashes[21], bc.heads[1]); + assert!(bc.head.is_none()); + bc.insert_headers(headers[0..2].into_iter().map(Clone::clone).collect()); + assert!(bc.head.is_some()); + assert_eq!(hashes[21], bc.heads[0]); + } - #[test] - fn insert_headers_no_gap() { - let mut bc = BlockCollection::new(false); - assert!(is_empty(&bc)); - let client = TestBlockChainClient::new(); - let nblocks = 200; - client.add_blocks(nblocks, EachBlockWith::Nothing); - let blocks: Vec<_> = (0..nblocks) - .map(|i| (&client as &BlockChainClient).block(BlockId::Number(i as BlockNumber)).unwrap().into_inner()) - .collect(); - let headers: Vec<_> = blocks.iter().map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()).collect(); - let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect(); - let heads: Vec<_> = hashes.iter().enumerate().filter_map(|(i, h)| if i % 20 == 0 { Some(*h) } else { None }).collect(); - bc.reset_to(heads); + #[test] + fn insert_headers_no_gap() { + let mut bc = BlockCollection::new(false); + assert!(is_empty(&bc)); + let client = TestBlockChainClient::new(); + let nblocks = 200; + client.add_blocks(nblocks, EachBlockWith::Nothing); + let blocks: Vec<_> = (0..nblocks) + .map(|i| { + (&client as &BlockChainClient) + .block(BlockId::Number(i as BlockNumber)) + .unwrap() + .into_inner() + }) + .collect(); + let headers: Vec<_> = blocks + .iter() + .map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()) + .collect(); + let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect(); + let heads: Vec<_> = hashes + .iter() + .enumerate() + .filter_map(|(i, h)| if i % 20 == 0 { Some(*h) } else { None }) + .collect(); + bc.reset_to(heads); - bc.insert_headers(headers[1..2].into_iter().map(Clone::clone).collect()); - assert!(bc.drain().is_empty()); - bc.insert_headers(headers[0..1].into_iter().map(Clone::clone).collect()); - assert_eq!(bc.drain().len(), 2); - } + bc.insert_headers(headers[1..2].into_iter().map(Clone::clone).collect()); + assert!(bc.drain().is_empty()); + bc.insert_headers(headers[0..1].into_iter().map(Clone::clone).collect()); + assert_eq!(bc.drain().len(), 2); + } } diff --git a/ethcore/sync/src/chain/handler.rs b/ethcore/sync/src/chain/handler.rs index 63ab89161..5f230bf0d 100644 --- a/ethcore/sync/src/chain/handler.rs +++ b/ethcore/sync/src/chain/handler.rs @@ -18,824 +18,949 @@ use api::WARP_SYNC_PROTOCOL_ID; use block_sync::{BlockDownloaderImportError as DownloaderImportError, DownloadAction}; use bytes::Bytes; use enum_primitive::FromPrimitive; -use ethcore::error::{Error as EthcoreError, ErrorKind as EthcoreErrorKind, ImportErrorKind, BlockError}; -use ethcore::snapshot::{ManifestData, RestorationStatus}; -use ethcore::verification::queue::kind::blocks::Unverified; +use ethcore::{ + error::{BlockError, Error as EthcoreError, ErrorKind as EthcoreErrorKind, ImportErrorKind}, + snapshot::{ManifestData, RestorationStatus}, + verification::queue::kind::blocks::Unverified, +}; use ethereum_types::{H256, U256}; use hash::keccak; -use network::PeerId; -use network::client_version::ClientVersion; +use network::{client_version::ClientVersion, PeerId}; use rlp::Rlp; use snapshot::ChunkType; -use std::time::Instant; -use std::{mem, cmp}; +use std::{cmp, mem, time::Instant}; use sync_io::SyncIo; -use types::BlockNumber; -use types::block_status::BlockStatus; -use types::ids::BlockId; +use types::{block_status::BlockStatus, ids::BlockId, BlockNumber}; -use super::sync_packet::{PacketInfo, SyncPacket}; -use super::sync_packet::SyncPacket::{ - StatusPacket, - NewBlockHashesPacket, - BlockHeadersPacket, - BlockBodiesPacket, - NewBlockPacket, - ReceiptsPacket, - SnapshotManifestPacket, - SnapshotDataPacket, - PrivateTransactionPacket, - SignedPrivateTransactionPacket, +use super::sync_packet::{ + PacketInfo, SyncPacket, + SyncPacket::{ + BlockBodiesPacket, BlockHeadersPacket, NewBlockHashesPacket, NewBlockPacket, + PrivateTransactionPacket, ReceiptsPacket, SignedPrivateTransactionPacket, + SnapshotDataPacket, SnapshotManifestPacket, StatusPacket, + }, }; use super::{ - BlockSet, - ChainSync, - ForkConfirmation, - PacketDecodeError, - PeerAsking, - PeerInfo, - SyncRequester, - SyncState, - ETH_PROTOCOL_VERSION_62, - ETH_PROTOCOL_VERSION_63, - MAX_NEW_BLOCK_AGE, - MAX_NEW_HASHES, - PAR_PROTOCOL_VERSION_1, - PAR_PROTOCOL_VERSION_3, + BlockSet, ChainSync, ForkConfirmation, PacketDecodeError, PeerAsking, PeerInfo, SyncRequester, + SyncState, ETH_PROTOCOL_VERSION_62, ETH_PROTOCOL_VERSION_63, MAX_NEW_BLOCK_AGE, MAX_NEW_HASHES, + PAR_PROTOCOL_VERSION_1, PAR_PROTOCOL_VERSION_3, }; /// The Chain Sync Handler: handles responses from peers pub struct SyncHandler; impl SyncHandler { - /// Handle incoming packet from peer - pub fn on_packet(sync: &mut ChainSync, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) { - let rlp = Rlp::new(data); - if let Some(packet_id) = SyncPacket::from_u8(packet_id) { - let result = match packet_id { - StatusPacket => SyncHandler::on_peer_status(sync, io, peer, &rlp), - BlockHeadersPacket => SyncHandler::on_peer_block_headers(sync, io, peer, &rlp), - BlockBodiesPacket => SyncHandler::on_peer_block_bodies(sync, io, peer, &rlp), - ReceiptsPacket => SyncHandler::on_peer_block_receipts(sync, io, peer, &rlp), - NewBlockPacket => SyncHandler::on_peer_new_block(sync, io, peer, &rlp), - NewBlockHashesPacket => SyncHandler::on_peer_new_hashes(sync, io, peer, &rlp), - SnapshotManifestPacket => SyncHandler::on_snapshot_manifest(sync, io, peer, &rlp), - SnapshotDataPacket => SyncHandler::on_snapshot_data(sync, io, peer, &rlp), - PrivateTransactionPacket => SyncHandler::on_private_transaction(sync, io, peer, &rlp), - SignedPrivateTransactionPacket => SyncHandler::on_signed_private_transaction(sync, io, peer, &rlp), - _ => { - debug!(target: "sync", "{}: Unknown packet {}", peer, packet_id.id()); - Ok(()) - } - }; + /// Handle incoming packet from peer + pub fn on_packet( + sync: &mut ChainSync, + io: &mut SyncIo, + peer: PeerId, + packet_id: u8, + data: &[u8], + ) { + let rlp = Rlp::new(data); + if let Some(packet_id) = SyncPacket::from_u8(packet_id) { + let result = match packet_id { + StatusPacket => SyncHandler::on_peer_status(sync, io, peer, &rlp), + BlockHeadersPacket => SyncHandler::on_peer_block_headers(sync, io, peer, &rlp), + BlockBodiesPacket => SyncHandler::on_peer_block_bodies(sync, io, peer, &rlp), + ReceiptsPacket => SyncHandler::on_peer_block_receipts(sync, io, peer, &rlp), + NewBlockPacket => SyncHandler::on_peer_new_block(sync, io, peer, &rlp), + NewBlockHashesPacket => SyncHandler::on_peer_new_hashes(sync, io, peer, &rlp), + SnapshotManifestPacket => SyncHandler::on_snapshot_manifest(sync, io, peer, &rlp), + SnapshotDataPacket => SyncHandler::on_snapshot_data(sync, io, peer, &rlp), + PrivateTransactionPacket => { + SyncHandler::on_private_transaction(sync, io, peer, &rlp) + } + SignedPrivateTransactionPacket => { + SyncHandler::on_signed_private_transaction(sync, io, peer, &rlp) + } + _ => { + debug!(target: "sync", "{}: Unknown packet {}", peer, packet_id.id()); + Ok(()) + } + }; - match result { - Err(DownloaderImportError::Invalid) => { - debug!(target:"sync", "{} -> Invalid packet {}", peer, packet_id.id()); - io.disable_peer(peer); - sync.deactivate_peer(io, peer); - }, - Err(DownloaderImportError::Useless) => { - sync.deactivate_peer(io, peer); - }, - Ok(()) => { - // give a task to the same peer first - sync.sync_peer(io, peer, false); - }, - } - } else { - debug!(target: "sync", "{}: Unknown packet {}", peer, packet_id); - } - } + match result { + Err(DownloaderImportError::Invalid) => { + debug!(target:"sync", "{} -> Invalid packet {}", peer, packet_id.id()); + io.disable_peer(peer); + sync.deactivate_peer(io, peer); + } + Err(DownloaderImportError::Useless) => { + sync.deactivate_peer(io, peer); + } + Ok(()) => { + // give a task to the same peer first + sync.sync_peer(io, peer, false); + } + } + } else { + debug!(target: "sync", "{}: Unknown packet {}", peer, packet_id); + } + } - /// Called when peer sends us new consensus packet - pub fn on_consensus_packet(io: &mut SyncIo, peer_id: PeerId, r: &Rlp) { - trace!(target: "sync", "Received consensus packet from {:?}", peer_id); - io.chain().queue_consensus_message(r.as_raw().to_vec()); - } + /// Called when peer sends us new consensus packet + pub fn on_consensus_packet(io: &mut SyncIo, peer_id: PeerId, r: &Rlp) { + trace!(target: "sync", "Received consensus packet from {:?}", peer_id); + io.chain().queue_consensus_message(r.as_raw().to_vec()); + } - /// Called by peer when it is disconnecting - pub fn on_peer_aborting(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId) { - trace!(target: "sync", "== Disconnecting {}: {}", peer_id, io.peer_version(peer_id)); - sync.handshaking_peers.remove(&peer_id); - if sync.peers.contains_key(&peer_id) { - debug!(target: "sync", "Disconnected {}", peer_id); - sync.clear_peer_download(peer_id); - sync.peers.remove(&peer_id); - sync.active_peers.remove(&peer_id); + /// Called by peer when it is disconnecting + pub fn on_peer_aborting(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId) { + trace!(target: "sync", "== Disconnecting {}: {}", peer_id, io.peer_version(peer_id)); + sync.handshaking_peers.remove(&peer_id); + if sync.peers.contains_key(&peer_id) { + debug!(target: "sync", "Disconnected {}", peer_id); + sync.clear_peer_download(peer_id); + sync.peers.remove(&peer_id); + sync.active_peers.remove(&peer_id); - if sync.state == SyncState::SnapshotManifest { - // Check if we are asking other peers for - // the snapshot manifest as well. - // If not, return to initial state - let still_asking_manifest = sync.peers.iter() - .filter(|&(id, p)| sync.active_peers.contains(id) && p.asking == PeerAsking::SnapshotManifest) - .next().is_none(); + if sync.state == SyncState::SnapshotManifest { + // Check if we are asking other peers for + // the snapshot manifest as well. + // If not, return to initial state + let still_asking_manifest = sync + .peers + .iter() + .filter(|&(id, p)| { + sync.active_peers.contains(id) && p.asking == PeerAsking::SnapshotManifest + }) + .next() + .is_none(); - if still_asking_manifest { - sync.state = ChainSync::get_init_state(sync.warp_sync, io.chain()); - } - } - sync.continue_sync(io); - } - } + if still_asking_manifest { + sync.state = ChainSync::get_init_state(sync.warp_sync, io.chain()); + } + } + sync.continue_sync(io); + } + } - /// Called when a new peer is connected - pub fn on_peer_connected(sync: &mut ChainSync, io: &mut SyncIo, peer: PeerId) { - trace!(target: "sync", "== Connected {}: {}", peer, io.peer_version(peer)); - if let Err(e) = sync.send_status(io, peer) { - debug!(target:"sync", "Error sending status request: {:?}", e); - io.disconnect_peer(peer); - } else { - sync.handshaking_peers.insert(peer, Instant::now()); - } - } + /// Called when a new peer is connected + pub fn on_peer_connected(sync: &mut ChainSync, io: &mut SyncIo, peer: PeerId) { + trace!(target: "sync", "== Connected {}: {}", peer, io.peer_version(peer)); + if let Err(e) = sync.send_status(io, peer) { + debug!(target:"sync", "Error sending status request: {:?}", e); + io.disconnect_peer(peer); + } else { + sync.handshaking_peers.insert(peer, Instant::now()); + } + } - /// Called by peer once it has new block bodies - pub fn on_peer_new_block(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { - if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { - trace!(target: "sync", "Ignoring new block from unconfirmed peer {}", peer_id); - return Ok(()); - } - let difficulty: U256 = r.val_at(1)?; - if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { - if peer.difficulty.map_or(true, |pd| difficulty > pd) { - peer.difficulty = Some(difficulty); - } - } - let block = Unverified::from_rlp(r.at(0)?.as_raw().to_vec())?; - let hash = block.header.hash(); - let number = block.header.number(); - trace!(target: "sync", "{} -> NewBlock ({})", peer_id, hash); - if number > sync.highest_block.unwrap_or(0) { - sync.highest_block = Some(number); - } - let mut unknown = false; + /// Called by peer once it has new block bodies + pub fn on_peer_new_block( + sync: &mut ChainSync, + io: &mut SyncIo, + peer_id: PeerId, + r: &Rlp, + ) -> Result<(), DownloaderImportError> { + if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { + trace!(target: "sync", "Ignoring new block from unconfirmed peer {}", peer_id); + return Ok(()); + } + let difficulty: U256 = r.val_at(1)?; + if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { + if peer.difficulty.map_or(true, |pd| difficulty > pd) { + peer.difficulty = Some(difficulty); + } + } + let block = Unverified::from_rlp(r.at(0)?.as_raw().to_vec())?; + let hash = block.header.hash(); + let number = block.header.number(); + trace!(target: "sync", "{} -> NewBlock ({})", peer_id, hash); + if number > sync.highest_block.unwrap_or(0) { + sync.highest_block = Some(number); + } + let mut unknown = false; - if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { - peer.latest_hash = hash; - } + if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { + peer.latest_hash = hash; + } - let last_imported_number = sync.new_blocks.last_imported_block_number(); - if last_imported_number > number && last_imported_number - number > MAX_NEW_BLOCK_AGE { - trace!(target: "sync", "Ignored ancient new block {:?}", hash); - return Err(DownloaderImportError::Invalid); - } - match io.chain().import_block(block) { - Err(EthcoreError(EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain), _)) => { - trace!(target: "sync", "New block already in chain {:?}", hash); - }, - Err(EthcoreError(EthcoreErrorKind::Import(ImportErrorKind::AlreadyQueued), _)) => { - trace!(target: "sync", "New block already queued {:?}", hash); - }, - Ok(_) => { - // abort current download of the same block - sync.complete_sync(io); - sync.new_blocks.mark_as_known(&hash, number); - trace!(target: "sync", "New block queued {:?} ({})", hash, number); - }, - Err(EthcoreError(EthcoreErrorKind::Block(BlockError::UnknownParent(p)), _)) => { - unknown = true; - trace!(target: "sync", "New block with unknown parent ({:?}) {:?}", p, hash); - }, - Err(e) => { - debug!(target: "sync", "Bad new block {:?} : {:?}", hash, e); - return Err(DownloaderImportError::Invalid); - } - }; - if unknown { - if sync.state != SyncState::Idle { - trace!(target: "sync", "NewBlock ignored while seeking"); - } else { - trace!(target: "sync", "New unknown block {:?}", hash); - //TODO: handle too many unknown blocks - sync.sync_peer(io, peer_id, true); - } - } - Ok(()) - } + let last_imported_number = sync.new_blocks.last_imported_block_number(); + if last_imported_number > number && last_imported_number - number > MAX_NEW_BLOCK_AGE { + trace!(target: "sync", "Ignored ancient new block {:?}", hash); + return Err(DownloaderImportError::Invalid); + } + match io.chain().import_block(block) { + Err(EthcoreError(EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain), _)) => { + trace!(target: "sync", "New block already in chain {:?}", hash); + } + Err(EthcoreError(EthcoreErrorKind::Import(ImportErrorKind::AlreadyQueued), _)) => { + trace!(target: "sync", "New block already queued {:?}", hash); + } + Ok(_) => { + // abort current download of the same block + sync.complete_sync(io); + sync.new_blocks.mark_as_known(&hash, number); + trace!(target: "sync", "New block queued {:?} ({})", hash, number); + } + Err(EthcoreError(EthcoreErrorKind::Block(BlockError::UnknownParent(p)), _)) => { + unknown = true; + trace!(target: "sync", "New block with unknown parent ({:?}) {:?}", p, hash); + } + Err(e) => { + debug!(target: "sync", "Bad new block {:?} : {:?}", hash, e); + return Err(DownloaderImportError::Invalid); + } + }; + if unknown { + if sync.state != SyncState::Idle { + trace!(target: "sync", "NewBlock ignored while seeking"); + } else { + trace!(target: "sync", "New unknown block {:?}", hash); + //TODO: handle too many unknown blocks + sync.sync_peer(io, peer_id, true); + } + } + Ok(()) + } - /// Handles `NewHashes` packet. Initiates headers download for any unknown hashes. - pub fn on_peer_new_hashes(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { - if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { - trace!(target: "sync", "Ignoring new hashes from unconfirmed peer {}", peer_id); - return Ok(()); - } - let hashes: Vec<_> = r.iter().take(MAX_NEW_HASHES).map(|item| (item.val_at::(0), item.val_at::(1))).collect(); - if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { - // Peer has new blocks with unknown difficulty - peer.difficulty = None; - if let Some(&(Ok(ref h), _)) = hashes.last() { - peer.latest_hash = h.clone(); - } - } - if sync.state != SyncState::Idle { - trace!(target: "sync", "Ignoring new hashes since we're already downloading."); - let max = r.iter().take(MAX_NEW_HASHES).map(|item| item.val_at::(1).unwrap_or(0)).fold(0u64, cmp::max); - if max > sync.highest_block.unwrap_or(0) { - sync.highest_block = Some(max); - } - return Ok(()); - } - trace!(target: "sync", "{} -> NewHashes ({} entries)", peer_id, r.item_count()?); - let mut max_height: BlockNumber = 0; - let mut new_hashes = Vec::new(); - let last_imported_number = sync.new_blocks.last_imported_block_number(); - for (rh, rn) in hashes { - let hash = rh?; - let number = rn?; - if number > sync.highest_block.unwrap_or(0) { - sync.highest_block = Some(number); - } - if sync.new_blocks.is_downloading(&hash) { - continue; - } - if last_imported_number > number && last_imported_number - number > MAX_NEW_BLOCK_AGE { - trace!(target: "sync", "Ignored ancient new block hash {:?}", hash); - return Err(DownloaderImportError::Invalid); - } - match io.chain().block_status(BlockId::Hash(hash.clone())) { - BlockStatus::InChain => { - trace!(target: "sync", "New block hash already in chain {:?}", hash); - }, - BlockStatus::Queued => { - trace!(target: "sync", "New hash block already queued {:?}", hash); - }, - BlockStatus::Unknown => { - new_hashes.push(hash.clone()); - if number > max_height { - trace!(target: "sync", "New unknown block hash {:?}", hash); - if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { - peer.latest_hash = hash.clone(); - } - max_height = number; - } - }, - BlockStatus::Bad => { - debug!(target: "sync", "Bad new block hash {:?}", hash); - return Err(DownloaderImportError::Invalid); - } - } - }; - if max_height != 0 { - trace!(target: "sync", "Downloading blocks for new hashes"); - sync.new_blocks.reset_to(new_hashes); - sync.state = SyncState::NewBlocks; - sync.sync_peer(io, peer_id, true); - } - Ok(()) - } + /// Handles `NewHashes` packet. Initiates headers download for any unknown hashes. + pub fn on_peer_new_hashes( + sync: &mut ChainSync, + io: &mut SyncIo, + peer_id: PeerId, + r: &Rlp, + ) -> Result<(), DownloaderImportError> { + if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { + trace!(target: "sync", "Ignoring new hashes from unconfirmed peer {}", peer_id); + return Ok(()); + } + let hashes: Vec<_> = r + .iter() + .take(MAX_NEW_HASHES) + .map(|item| (item.val_at::(0), item.val_at::(1))) + .collect(); + if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { + // Peer has new blocks with unknown difficulty + peer.difficulty = None; + if let Some(&(Ok(ref h), _)) = hashes.last() { + peer.latest_hash = h.clone(); + } + } + if sync.state != SyncState::Idle { + trace!(target: "sync", "Ignoring new hashes since we're already downloading."); + let max = r + .iter() + .take(MAX_NEW_HASHES) + .map(|item| item.val_at::(1).unwrap_or(0)) + .fold(0u64, cmp::max); + if max > sync.highest_block.unwrap_or(0) { + sync.highest_block = Some(max); + } + return Ok(()); + } + trace!(target: "sync", "{} -> NewHashes ({} entries)", peer_id, r.item_count()?); + let mut max_height: BlockNumber = 0; + let mut new_hashes = Vec::new(); + let last_imported_number = sync.new_blocks.last_imported_block_number(); + for (rh, rn) in hashes { + let hash = rh?; + let number = rn?; + if number > sync.highest_block.unwrap_or(0) { + sync.highest_block = Some(number); + } + if sync.new_blocks.is_downloading(&hash) { + continue; + } + if last_imported_number > number && last_imported_number - number > MAX_NEW_BLOCK_AGE { + trace!(target: "sync", "Ignored ancient new block hash {:?}", hash); + return Err(DownloaderImportError::Invalid); + } + match io.chain().block_status(BlockId::Hash(hash.clone())) { + BlockStatus::InChain => { + trace!(target: "sync", "New block hash already in chain {:?}", hash); + } + BlockStatus::Queued => { + trace!(target: "sync", "New hash block already queued {:?}", hash); + } + BlockStatus::Unknown => { + new_hashes.push(hash.clone()); + if number > max_height { + trace!(target: "sync", "New unknown block hash {:?}", hash); + if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { + peer.latest_hash = hash.clone(); + } + max_height = number; + } + } + BlockStatus::Bad => { + debug!(target: "sync", "Bad new block hash {:?}", hash); + return Err(DownloaderImportError::Invalid); + } + } + } + if max_height != 0 { + trace!(target: "sync", "Downloading blocks for new hashes"); + sync.new_blocks.reset_to(new_hashes); + sync.state = SyncState::NewBlocks; + sync.sync_peer(io, peer_id, true); + } + Ok(()) + } - /// Called by peer once it has new block bodies - fn on_peer_block_bodies(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { - sync.clear_peer_download(peer_id); - let block_set = sync.peers.get(&peer_id) - .and_then(|p| p.block_set) - .unwrap_or(BlockSet::NewBlocks); - let allowed = sync.peers.get(&peer_id).map(|p| p.is_allowed()).unwrap_or(false); + /// Called by peer once it has new block bodies + fn on_peer_block_bodies( + sync: &mut ChainSync, + io: &mut SyncIo, + peer_id: PeerId, + r: &Rlp, + ) -> Result<(), DownloaderImportError> { + sync.clear_peer_download(peer_id); + let block_set = sync + .peers + .get(&peer_id) + .and_then(|p| p.block_set) + .unwrap_or(BlockSet::NewBlocks); + let allowed = sync + .peers + .get(&peer_id) + .map(|p| p.is_allowed()) + .unwrap_or(false); - if !sync.reset_peer_asking(peer_id, PeerAsking::BlockBodies) || !allowed { - trace!(target: "sync", "{}: Ignored unexpected bodies", peer_id); - return Ok(()); - } - let expected_blocks = match sync.peers.get_mut(&peer_id) { - Some(peer) => mem::replace(&mut peer.asking_blocks, Vec::new()), - None => { - trace!(target: "sync", "{}: Ignored unexpected bodies (peer not found)", peer_id); - return Ok(()); - } - }; - let item_count = r.item_count()?; - trace!(target: "sync", "{} -> BlockBodies ({} entries), set = {:?}", peer_id, item_count, block_set); - if item_count == 0 { - Err(DownloaderImportError::Useless) - } else if sync.state == SyncState::Waiting { - trace!(target: "sync", "Ignored block bodies while waiting"); - Ok(()) - } else { - { - let downloader = match block_set { - BlockSet::NewBlocks => &mut sync.new_blocks, - BlockSet::OldBlocks => match sync.old_blocks { - None => { - trace!(target: "sync", "Ignored block headers while block download is inactive"); - return Ok(()); - }, - Some(ref mut blocks) => blocks, - } - }; - downloader.import_bodies(r, expected_blocks.as_slice())?; - } - sync.collect_blocks(io, block_set); - Ok(()) - } - } + if !sync.reset_peer_asking(peer_id, PeerAsking::BlockBodies) || !allowed { + trace!(target: "sync", "{}: Ignored unexpected bodies", peer_id); + return Ok(()); + } + let expected_blocks = match sync.peers.get_mut(&peer_id) { + Some(peer) => mem::replace(&mut peer.asking_blocks, Vec::new()), + None => { + trace!(target: "sync", "{}: Ignored unexpected bodies (peer not found)", peer_id); + return Ok(()); + } + }; + let item_count = r.item_count()?; + trace!(target: "sync", "{} -> BlockBodies ({} entries), set = {:?}", peer_id, item_count, block_set); + if item_count == 0 { + Err(DownloaderImportError::Useless) + } else if sync.state == SyncState::Waiting { + trace!(target: "sync", "Ignored block bodies while waiting"); + Ok(()) + } else { + { + let downloader = match block_set { + BlockSet::NewBlocks => &mut sync.new_blocks, + BlockSet::OldBlocks => match sync.old_blocks { + None => { + trace!(target: "sync", "Ignored block headers while block download is inactive"); + return Ok(()); + } + Some(ref mut blocks) => blocks, + }, + }; + downloader.import_bodies(r, expected_blocks.as_slice())?; + } + sync.collect_blocks(io, block_set); + Ok(()) + } + } - fn on_peer_fork_header(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { - { - let peer = sync.peers.get_mut(&peer_id).expect("Is only called when peer is present in peers"); - peer.asking = PeerAsking::Nothing; - let item_count = r.item_count()?; - let (fork_number, fork_hash) = sync.fork_block.expect("ForkHeader request is sent only fork block is Some; qed").clone(); + fn on_peer_fork_header( + sync: &mut ChainSync, + io: &mut SyncIo, + peer_id: PeerId, + r: &Rlp, + ) -> Result<(), DownloaderImportError> { + { + let peer = sync + .peers + .get_mut(&peer_id) + .expect("Is only called when peer is present in peers"); + peer.asking = PeerAsking::Nothing; + let item_count = r.item_count()?; + let (fork_number, fork_hash) = sync + .fork_block + .expect("ForkHeader request is sent only fork block is Some; qed") + .clone(); - if item_count == 0 || item_count != 1 { - trace!(target: "sync", "{}: Chain is too short to confirm the block", peer_id); - peer.confirmation = ForkConfirmation::TooShort; + if item_count == 0 || item_count != 1 { + trace!(target: "sync", "{}: Chain is too short to confirm the block", peer_id); + peer.confirmation = ForkConfirmation::TooShort; + } else { + let header = r.at(0)?.as_raw(); + if keccak(&header) != fork_hash { + trace!(target: "sync", "{}: Fork mismatch", peer_id); + return Err(DownloaderImportError::Invalid); + } - } else { - let header = r.at(0)?.as_raw(); - if keccak(&header) != fork_hash { - trace!(target: "sync", "{}: Fork mismatch", peer_id); - return Err(DownloaderImportError::Invalid); - } + trace!(target: "sync", "{}: Confirmed peer", peer_id); + peer.confirmation = ForkConfirmation::Confirmed; - trace!(target: "sync", "{}: Confirmed peer", peer_id); - peer.confirmation = ForkConfirmation::Confirmed; + if !io.chain_overlay().read().contains_key(&fork_number) { + trace!(target: "sync", "Inserting (fork) block {} header", fork_number); + io.chain_overlay() + .write() + .insert(fork_number, header.to_vec()); + } + } + } - if !io.chain_overlay().read().contains_key(&fork_number) { - trace!(target: "sync", "Inserting (fork) block {} header", fork_number); - io.chain_overlay().write().insert(fork_number, header.to_vec()); - } - } - } + return Ok(()); + } - return Ok(()); - } + /// Called by peer once it has new block headers during sync + fn on_peer_block_headers( + sync: &mut ChainSync, + io: &mut SyncIo, + peer_id: PeerId, + r: &Rlp, + ) -> Result<(), DownloaderImportError> { + let is_fork_header_request = match sync.peers.get(&peer_id) { + Some(peer) if peer.asking == PeerAsking::ForkHeader => true, + _ => false, + }; - /// Called by peer once it has new block headers during sync - fn on_peer_block_headers(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { - let is_fork_header_request = match sync.peers.get(&peer_id) { - Some(peer) if peer.asking == PeerAsking::ForkHeader => true, - _ => false, - }; + if is_fork_header_request { + return SyncHandler::on_peer_fork_header(sync, io, peer_id, r); + } - if is_fork_header_request { - return SyncHandler::on_peer_fork_header(sync, io, peer_id, r); - } + sync.clear_peer_download(peer_id); + let expected_hash = sync.peers.get(&peer_id).and_then(|p| p.asking_hash); + let allowed = sync + .peers + .get(&peer_id) + .map(|p| p.is_allowed()) + .unwrap_or(false); + let block_set = sync + .peers + .get(&peer_id) + .and_then(|p| p.block_set) + .unwrap_or(BlockSet::NewBlocks); - sync.clear_peer_download(peer_id); - let expected_hash = sync.peers.get(&peer_id).and_then(|p| p.asking_hash); - let allowed = sync.peers.get(&peer_id).map(|p| p.is_allowed()).unwrap_or(false); - let block_set = sync.peers.get(&peer_id).and_then(|p| p.block_set).unwrap_or(BlockSet::NewBlocks); + if !sync.reset_peer_asking(peer_id, PeerAsking::BlockHeaders) { + debug!(target: "sync", "{}: Ignored unexpected headers", peer_id); + return Ok(()); + } + let expected_hash = match expected_hash { + Some(hash) => hash, + None => { + debug!(target: "sync", "{}: Ignored unexpected headers (expected_hash is None)", peer_id); + return Ok(()); + } + }; + if !allowed { + debug!(target: "sync", "{}: Ignored unexpected headers (peer not allowed)", peer_id); + return Ok(()); + } - if !sync.reset_peer_asking(peer_id, PeerAsking::BlockHeaders) { - debug!(target: "sync", "{}: Ignored unexpected headers", peer_id); - return Ok(()); - } - let expected_hash = match expected_hash { - Some(hash) => hash, - None => { - debug!(target: "sync", "{}: Ignored unexpected headers (expected_hash is None)", peer_id); - return Ok(()); - } - }; - if !allowed { - debug!(target: "sync", "{}: Ignored unexpected headers (peer not allowed)", peer_id); - return Ok(()); - } + let item_count = r.item_count()?; + trace!(target: "sync", "{} -> BlockHeaders ({} entries), state = {:?}, set = {:?}", peer_id, item_count, sync.state, block_set); + if (sync.state == SyncState::Idle || sync.state == SyncState::WaitingPeers) + && sync.old_blocks.is_none() + { + trace!(target: "sync", "Ignored unexpected block headers"); + return Ok(()); + } + if sync.state == SyncState::Waiting { + trace!(target: "sync", "Ignored block headers while waiting"); + return Ok(()); + } - let item_count = r.item_count()?; - trace!(target: "sync", "{} -> BlockHeaders ({} entries), state = {:?}, set = {:?}", peer_id, item_count, sync.state, block_set); - if (sync.state == SyncState::Idle || sync.state == SyncState::WaitingPeers) && sync.old_blocks.is_none() { - trace!(target: "sync", "Ignored unexpected block headers"); - return Ok(()); - } - if sync.state == SyncState::Waiting { - trace!(target: "sync", "Ignored block headers while waiting"); - return Ok(()); - } + let result = { + let downloader = match block_set { + BlockSet::NewBlocks => &mut sync.new_blocks, + BlockSet::OldBlocks => match sync.old_blocks { + None => { + trace!(target: "sync", "Ignored block headers while block download is inactive"); + return Ok(()); + } + Some(ref mut blocks) => blocks, + }, + }; + downloader.import_headers(io, r, expected_hash)? + }; - let result = { - let downloader = match block_set { - BlockSet::NewBlocks => &mut sync.new_blocks, - BlockSet::OldBlocks => { - match sync.old_blocks { - None => { - trace!(target: "sync", "Ignored block headers while block download is inactive"); - return Ok(()); - }, - Some(ref mut blocks) => blocks, - } - } - }; - downloader.import_headers(io, r, expected_hash)? - }; + if result == DownloadAction::Reset { + sync.reset_downloads(block_set); + } - if result == DownloadAction::Reset { - sync.reset_downloads(block_set); - } + sync.collect_blocks(io, block_set); + Ok(()) + } - sync.collect_blocks(io, block_set); - Ok(()) - } + /// Called by peer once it has new block receipts + fn on_peer_block_receipts( + sync: &mut ChainSync, + io: &mut SyncIo, + peer_id: PeerId, + r: &Rlp, + ) -> Result<(), DownloaderImportError> { + sync.clear_peer_download(peer_id); + let block_set = sync + .peers + .get(&peer_id) + .and_then(|p| p.block_set) + .unwrap_or(BlockSet::NewBlocks); + let allowed = sync + .peers + .get(&peer_id) + .map(|p| p.is_allowed()) + .unwrap_or(false); + if !sync.reset_peer_asking(peer_id, PeerAsking::BlockReceipts) || !allowed { + trace!(target: "sync", "{}: Ignored unexpected receipts", peer_id); + return Ok(()); + } + let expected_blocks = match sync.peers.get_mut(&peer_id) { + Some(peer) => mem::replace(&mut peer.asking_blocks, Vec::new()), + None => { + trace!(target: "sync", "{}: Ignored unexpected bodies (peer not found)", peer_id); + return Ok(()); + } + }; + let item_count = r.item_count()?; + trace!(target: "sync", "{} -> BlockReceipts ({} entries)", peer_id, item_count); + if item_count == 0 { + Err(DownloaderImportError::Useless) + } else if sync.state == SyncState::Waiting { + trace!(target: "sync", "Ignored block receipts while waiting"); + Ok(()) + } else { + { + let downloader = match block_set { + BlockSet::NewBlocks => &mut sync.new_blocks, + BlockSet::OldBlocks => match sync.old_blocks { + None => { + trace!(target: "sync", "Ignored block headers while block download is inactive"); + return Ok(()); + } + Some(ref mut blocks) => blocks, + }, + }; + downloader.import_receipts(r, expected_blocks.as_slice())?; + } + sync.collect_blocks(io, block_set); + Ok(()) + } + } - /// Called by peer once it has new block receipts - fn on_peer_block_receipts(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { - sync.clear_peer_download(peer_id); - let block_set = sync.peers.get(&peer_id).and_then(|p| p.block_set).unwrap_or(BlockSet::NewBlocks); - let allowed = sync.peers.get(&peer_id).map(|p| p.is_allowed()).unwrap_or(false); - if !sync.reset_peer_asking(peer_id, PeerAsking::BlockReceipts) || !allowed { - trace!(target: "sync", "{}: Ignored unexpected receipts", peer_id); - return Ok(()); - } - let expected_blocks = match sync.peers.get_mut(&peer_id) { - Some(peer) => mem::replace(&mut peer.asking_blocks, Vec::new()), - None => { - trace!(target: "sync", "{}: Ignored unexpected bodies (peer not found)", peer_id); - return Ok(()); - } - }; - let item_count = r.item_count()?; - trace!(target: "sync", "{} -> BlockReceipts ({} entries)", peer_id, item_count); - if item_count == 0 { - Err(DownloaderImportError::Useless) - } else if sync.state == SyncState::Waiting { - trace!(target: "sync", "Ignored block receipts while waiting"); - Ok(()) - } else { - { - let downloader = match block_set { - BlockSet::NewBlocks => &mut sync.new_blocks, - BlockSet::OldBlocks => match sync.old_blocks { - None => { - trace!(target: "sync", "Ignored block headers while block download is inactive"); - return Ok(()); - }, - Some(ref mut blocks) => blocks, - } - }; - downloader.import_receipts(r, expected_blocks.as_slice())?; - } - sync.collect_blocks(io, block_set); - Ok(()) - } - } + /// Called when snapshot manifest is downloaded from a peer. + fn on_snapshot_manifest( + sync: &mut ChainSync, + io: &mut SyncIo, + peer_id: PeerId, + r: &Rlp, + ) -> Result<(), DownloaderImportError> { + if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { + trace!(target: "sync", "Ignoring snapshot manifest from unconfirmed peer {}", peer_id); + return Ok(()); + } + sync.clear_peer_download(peer_id); + if !sync.reset_peer_asking(peer_id, PeerAsking::SnapshotManifest) + || sync.state != SyncState::SnapshotManifest + { + trace!(target: "sync", "{}: Ignored unexpected/expired manifest", peer_id); + return Ok(()); + } - /// Called when snapshot manifest is downloaded from a peer. - fn on_snapshot_manifest(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { - if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { - trace!(target: "sync", "Ignoring snapshot manifest from unconfirmed peer {}", peer_id); - return Ok(()); - } - sync.clear_peer_download(peer_id); - if !sync.reset_peer_asking(peer_id, PeerAsking::SnapshotManifest) || sync.state != SyncState::SnapshotManifest { - trace!(target: "sync", "{}: Ignored unexpected/expired manifest", peer_id); - return Ok(()); - } + let manifest_rlp = r.at(0)?; + let manifest = ManifestData::from_rlp(manifest_rlp.as_raw())?; - let manifest_rlp = r.at(0)?; - let manifest = ManifestData::from_rlp(manifest_rlp.as_raw())?; + let is_supported_version = io + .snapshot_service() + .supported_versions() + .map_or(false, |(l, h)| { + manifest.version >= l && manifest.version <= h + }); - let is_supported_version = io.snapshot_service().supported_versions() - .map_or(false, |(l, h)| manifest.version >= l && manifest.version <= h); + if !is_supported_version { + trace!(target: "sync", "{}: Snapshot manifest version not supported: {}", peer_id, manifest.version); + return Err(DownloaderImportError::Invalid); + } + sync.snapshot + .reset_to(&manifest, &keccak(manifest_rlp.as_raw())); + io.snapshot_service().begin_restore(manifest); + sync.state = SyncState::SnapshotData; - if !is_supported_version { - trace!(target: "sync", "{}: Snapshot manifest version not supported: {}", peer_id, manifest.version); - return Err(DownloaderImportError::Invalid); - } - sync.snapshot.reset_to(&manifest, &keccak(manifest_rlp.as_raw())); - io.snapshot_service().begin_restore(manifest); - sync.state = SyncState::SnapshotData; + Ok(()) + } - Ok(()) - } + /// Called when snapshot data is downloaded from a peer. + fn on_snapshot_data( + sync: &mut ChainSync, + io: &mut SyncIo, + peer_id: PeerId, + r: &Rlp, + ) -> Result<(), DownloaderImportError> { + if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { + trace!(target: "sync", "Ignoring snapshot data from unconfirmed peer {}", peer_id); + return Ok(()); + } + sync.clear_peer_download(peer_id); + if !sync.reset_peer_asking(peer_id, PeerAsking::SnapshotData) + || (sync.state != SyncState::SnapshotData && sync.state != SyncState::SnapshotWaiting) + { + trace!(target: "sync", "{}: Ignored unexpected snapshot data", peer_id); + return Ok(()); + } - /// Called when snapshot data is downloaded from a peer. - fn on_snapshot_data(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { - if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { - trace!(target: "sync", "Ignoring snapshot data from unconfirmed peer {}", peer_id); - return Ok(()); - } - sync.clear_peer_download(peer_id); - if !sync.reset_peer_asking(peer_id, PeerAsking::SnapshotData) || (sync.state != SyncState::SnapshotData && sync.state != SyncState::SnapshotWaiting) { - trace!(target: "sync", "{}: Ignored unexpected snapshot data", peer_id); - return Ok(()); - } + // check service status + let status = io.snapshot_service().status(); + match status { + RestorationStatus::Inactive | RestorationStatus::Failed => { + trace!(target: "sync", "{}: Snapshot restoration aborted", peer_id); + sync.state = SyncState::WaitingPeers; - // check service status - let status = io.snapshot_service().status(); - match status { - RestorationStatus::Inactive | RestorationStatus::Failed => { - trace!(target: "sync", "{}: Snapshot restoration aborted", peer_id); - sync.state = SyncState::WaitingPeers; + // only note bad if restoration failed. + if let (Some(hash), RestorationStatus::Failed) = + (sync.snapshot.snapshot_hash(), status) + { + trace!(target: "sync", "Noting snapshot hash {} as bad", hash); + sync.snapshot.note_bad(hash); + } - // only note bad if restoration failed. - if let (Some(hash), RestorationStatus::Failed) = (sync.snapshot.snapshot_hash(), status) { - trace!(target: "sync", "Noting snapshot hash {} as bad", hash); - sync.snapshot.note_bad(hash); - } + sync.snapshot.clear(); + return Ok(()); + } + RestorationStatus::Initializing { .. } => { + trace!(target: "warp", "{}: Snapshot restoration is initializing", peer_id); + return Ok(()); + } + RestorationStatus::Ongoing { .. } => { + trace!(target: "sync", "{}: Snapshot restoration is ongoing", peer_id); + } + } - sync.snapshot.clear(); - return Ok(()); - }, - RestorationStatus::Initializing { .. } => { - trace!(target: "warp", "{}: Snapshot restoration is initializing", peer_id); - return Ok(()); - } - RestorationStatus::Ongoing { .. } => { - trace!(target: "sync", "{}: Snapshot restoration is ongoing", peer_id); - }, - } + let snapshot_data: Bytes = r.val_at(0)?; + match sync.snapshot.validate_chunk(&snapshot_data) { + Ok(ChunkType::Block(hash)) => { + trace!(target: "sync", "{}: Processing block chunk", peer_id); + io.snapshot_service() + .restore_block_chunk(hash, snapshot_data); + } + Ok(ChunkType::State(hash)) => { + trace!(target: "sync", "{}: Processing state chunk", peer_id); + io.snapshot_service() + .restore_state_chunk(hash, snapshot_data); + } + Err(()) => { + trace!(target: "sync", "{}: Got bad snapshot chunk", peer_id); + io.disconnect_peer(peer_id); + return Ok(()); + } + } - let snapshot_data: Bytes = r.val_at(0)?; - match sync.snapshot.validate_chunk(&snapshot_data) { - Ok(ChunkType::Block(hash)) => { - trace!(target: "sync", "{}: Processing block chunk", peer_id); - io.snapshot_service().restore_block_chunk(hash, snapshot_data); - } - Ok(ChunkType::State(hash)) => { - trace!(target: "sync", "{}: Processing state chunk", peer_id); - io.snapshot_service().restore_state_chunk(hash, snapshot_data); - } - Err(()) => { - trace!(target: "sync", "{}: Got bad snapshot chunk", peer_id); - io.disconnect_peer(peer_id); - return Ok(()); - } - } + if sync.snapshot.is_complete() { + // wait for snapshot restoration process to complete + sync.state = SyncState::SnapshotWaiting; + } - if sync.snapshot.is_complete() { - // wait for snapshot restoration process to complete - sync.state = SyncState::SnapshotWaiting; - } + Ok(()) + } - Ok(()) - } + /// Called by peer to report status + fn on_peer_status( + sync: &mut ChainSync, + io: &mut SyncIo, + peer_id: PeerId, + r: &Rlp, + ) -> Result<(), DownloaderImportError> { + sync.handshaking_peers.remove(&peer_id); + let protocol_version: u8 = r.val_at(0)?; + let warp_protocol_version = io.protocol_version(&WARP_SYNC_PROTOCOL_ID, peer_id); + let warp_protocol = warp_protocol_version != 0; + let private_tx_protocol = warp_protocol_version >= PAR_PROTOCOL_VERSION_3.0; + let peer = PeerInfo { + protocol_version: protocol_version, + network_id: r.val_at(1)?, + difficulty: Some(r.val_at(2)?), + latest_hash: r.val_at(3)?, + genesis: r.val_at(4)?, + asking: PeerAsking::Nothing, + asking_blocks: Vec::new(), + asking_hash: None, + ask_time: Instant::now(), + last_sent_transactions: Default::default(), + last_sent_private_transactions: Default::default(), + expired: false, + confirmation: if sync.fork_block.is_none() { + ForkConfirmation::Confirmed + } else { + ForkConfirmation::Unconfirmed + }, + asking_snapshot_data: None, + snapshot_hash: if warp_protocol { + Some(r.val_at(5)?) + } else { + None + }, + snapshot_number: if warp_protocol { + Some(r.val_at(6)?) + } else { + None + }, + block_set: None, + private_tx_enabled: if private_tx_protocol { + r.val_at(7).unwrap_or(false) + } else { + false + }, + client_version: ClientVersion::from(io.peer_version(peer_id)), + }; - /// Called by peer to report status - fn on_peer_status(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { - sync.handshaking_peers.remove(&peer_id); - let protocol_version: u8 = r.val_at(0)?; - let warp_protocol_version = io.protocol_version(&WARP_SYNC_PROTOCOL_ID, peer_id); - let warp_protocol = warp_protocol_version != 0; - let private_tx_protocol = warp_protocol_version >= PAR_PROTOCOL_VERSION_3.0; - let peer = PeerInfo { - protocol_version: protocol_version, - network_id: r.val_at(1)?, - difficulty: Some(r.val_at(2)?), - latest_hash: r.val_at(3)?, - genesis: r.val_at(4)?, - asking: PeerAsking::Nothing, - asking_blocks: Vec::new(), - asking_hash: None, - ask_time: Instant::now(), - last_sent_transactions: Default::default(), - last_sent_private_transactions: Default::default(), - expired: false, - confirmation: if sync.fork_block.is_none() { ForkConfirmation::Confirmed } else { ForkConfirmation::Unconfirmed }, - asking_snapshot_data: None, - snapshot_hash: if warp_protocol { Some(r.val_at(5)?) } else { None }, - snapshot_number: if warp_protocol { Some(r.val_at(6)?) } else { None }, - block_set: None, - private_tx_enabled: if private_tx_protocol { r.val_at(7).unwrap_or(false) } else { false }, - client_version: ClientVersion::from(io.peer_version(peer_id)), - }; + trace!(target: "sync", "New peer {} (\ + protocol: {}, \ + network: {:?}, \ + difficulty: {:?}, \ + latest:{}, \ + genesis:{}, \ + snapshot:{:?}, \ + private_tx_enabled:{})", + peer_id, + peer.protocol_version, + peer.network_id, + peer.difficulty, + peer.latest_hash, + peer.genesis, + peer.snapshot_number, + peer.private_tx_enabled + ); + if io.is_expired() { + trace!(target: "sync", "Status packet from expired session {}:{}", peer_id, io.peer_version(peer_id)); + return Ok(()); + } - trace!(target: "sync", "New peer {} (\ - protocol: {}, \ - network: {:?}, \ - difficulty: {:?}, \ - latest:{}, \ - genesis:{}, \ - snapshot:{:?}, \ - private_tx_enabled:{})", - peer_id, - peer.protocol_version, - peer.network_id, - peer.difficulty, - peer.latest_hash, - peer.genesis, - peer.snapshot_number, - peer.private_tx_enabled - ); - if io.is_expired() { - trace!(target: "sync", "Status packet from expired session {}:{}", peer_id, io.peer_version(peer_id)); - return Ok(()); - } + if sync.peers.contains_key(&peer_id) { + debug!(target: "sync", "Unexpected status packet from {}:{}", peer_id, io.peer_version(peer_id)); + return Ok(()); + } + let chain_info = io.chain().chain_info(); + if peer.genesis != chain_info.genesis_hash { + trace!(target: "sync", "Peer {} genesis hash mismatch (ours: {}, theirs: {})", peer_id, chain_info.genesis_hash, peer.genesis); + return Err(DownloaderImportError::Invalid); + } + if peer.network_id != sync.network_id { + trace!(target: "sync", "Peer {} network id mismatch (ours: {}, theirs: {})", peer_id, sync.network_id, peer.network_id); + return Err(DownloaderImportError::Invalid); + } - if sync.peers.contains_key(&peer_id) { - debug!(target: "sync", "Unexpected status packet from {}:{}", peer_id, io.peer_version(peer_id)); - return Ok(()); - } - let chain_info = io.chain().chain_info(); - if peer.genesis != chain_info.genesis_hash { - trace!(target: "sync", "Peer {} genesis hash mismatch (ours: {}, theirs: {})", peer_id, chain_info.genesis_hash, peer.genesis); - return Err(DownloaderImportError::Invalid); - } - if peer.network_id != sync.network_id { - trace!(target: "sync", "Peer {} network id mismatch (ours: {}, theirs: {})", peer_id, sync.network_id, peer.network_id); - return Err(DownloaderImportError::Invalid); - } + if false + || (warp_protocol + && (peer.protocol_version < PAR_PROTOCOL_VERSION_1.0 + || peer.protocol_version > PAR_PROTOCOL_VERSION_3.0)) + || (!warp_protocol + && (peer.protocol_version < ETH_PROTOCOL_VERSION_62.0 + || peer.protocol_version > ETH_PROTOCOL_VERSION_63.0)) + { + trace!(target: "sync", "Peer {} unsupported eth protocol ({})", peer_id, peer.protocol_version); + return Err(DownloaderImportError::Invalid); + } - if false - || (warp_protocol && (peer.protocol_version < PAR_PROTOCOL_VERSION_1.0 || peer.protocol_version > PAR_PROTOCOL_VERSION_3.0)) - || (!warp_protocol && (peer.protocol_version < ETH_PROTOCOL_VERSION_62.0 || peer.protocol_version > ETH_PROTOCOL_VERSION_63.0)) - { - trace!(target: "sync", "Peer {} unsupported eth protocol ({})", peer_id, peer.protocol_version); - return Err(DownloaderImportError::Invalid); - } + if sync.sync_start_time.is_none() { + sync.sync_start_time = Some(Instant::now()); + } - if sync.sync_start_time.is_none() { - sync.sync_start_time = Some(Instant::now()); - } + sync.peers.insert(peer_id.clone(), peer); + // Don't activate peer immediatelly when searching for common block. + // Let the current sync round complete first. + sync.active_peers.insert(peer_id.clone()); + debug!(target: "sync", "Connected {}:{}", peer_id, io.peer_version(peer_id)); - sync.peers.insert(peer_id.clone(), peer); - // Don't activate peer immediatelly when searching for common block. - // Let the current sync round complete first. - sync.active_peers.insert(peer_id.clone()); - debug!(target: "sync", "Connected {}:{}", peer_id, io.peer_version(peer_id)); + if let Some((fork_block, _)) = sync.fork_block { + SyncRequester::request_fork_header(sync, io, peer_id, fork_block); + } - if let Some((fork_block, _)) = sync.fork_block { - SyncRequester::request_fork_header(sync, io, peer_id, fork_block); - } + Ok(()) + } - Ok(()) - } + /// Called when peer sends us new transactions + pub fn on_peer_transactions( + sync: &ChainSync, + io: &mut SyncIo, + peer_id: PeerId, + r: &Rlp, + ) -> Result<(), PacketDecodeError> { + // Accept transactions only when fully synced + if !io.is_chain_queue_empty() + || (sync.state != SyncState::Idle && sync.state != SyncState::NewBlocks) + { + trace!(target: "sync", "{} Ignoring transactions while syncing", peer_id); + return Ok(()); + } + if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { + trace!(target: "sync", "{} Ignoring transactions from unconfirmed/unknown peer", peer_id); + return Ok(()); + } - /// Called when peer sends us new transactions - pub fn on_peer_transactions(sync: &ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { - // Accept transactions only when fully synced - if !io.is_chain_queue_empty() || (sync.state != SyncState::Idle && sync.state != SyncState::NewBlocks) { - trace!(target: "sync", "{} Ignoring transactions while syncing", peer_id); - return Ok(()); - } - if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { - trace!(target: "sync", "{} Ignoring transactions from unconfirmed/unknown peer", peer_id); - return Ok(()); - } + let item_count = r.item_count()?; + trace!(target: "sync", "{:02} -> Transactions ({} entries)", peer_id, item_count); + let mut transactions = Vec::with_capacity(item_count); + for i in 0..item_count { + let rlp = r.at(i)?; + let tx = rlp.as_raw().to_vec(); + transactions.push(tx); + } + io.chain().queue_transactions(transactions, peer_id); + Ok(()) + } - let item_count = r.item_count()?; - trace!(target: "sync", "{:02} -> Transactions ({} entries)", peer_id, item_count); - let mut transactions = Vec::with_capacity(item_count); - for i in 0 .. item_count { - let rlp = r.at(i)?; - let tx = rlp.as_raw().to_vec(); - transactions.push(tx); - } - io.chain().queue_transactions(transactions, peer_id); - Ok(()) - } + /// Called when peer sends us signed private transaction packet + fn on_signed_private_transaction( + sync: &mut ChainSync, + _io: &mut SyncIo, + peer_id: PeerId, + r: &Rlp, + ) -> Result<(), DownloaderImportError> { + if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { + trace!(target: "sync", "{} Ignoring packet from unconfirmed/unknown peer", peer_id); + return Ok(()); + } + let private_handler = match sync.private_tx_handler { + Some(ref handler) => handler, + None => { + trace!(target: "sync", "{} Ignoring private tx packet from peer", peer_id); + return Ok(()); + } + }; + trace!(target: "sync", "Received signed private transaction packet from {:?}", peer_id); + match private_handler.import_signed_private_transaction(r.as_raw()) { + Ok(transaction_hash) => { + //don't send the packet back + if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { + peer.last_sent_private_transactions.insert(transaction_hash); + } + } + Err(e) => { + trace!(target: "sync", "Ignoring the message, error queueing: {}", e); + } + } + Ok(()) + } - /// Called when peer sends us signed private transaction packet - fn on_signed_private_transaction(sync: &mut ChainSync, _io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { - if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { - trace!(target: "sync", "{} Ignoring packet from unconfirmed/unknown peer", peer_id); - return Ok(()); - } - let private_handler = match sync.private_tx_handler { - Some(ref handler) => handler, - None => { - trace!(target: "sync", "{} Ignoring private tx packet from peer", peer_id); - return Ok(()); - } - }; - trace!(target: "sync", "Received signed private transaction packet from {:?}", peer_id); - match private_handler.import_signed_private_transaction(r.as_raw()) { - Ok(transaction_hash) => { - //don't send the packet back - if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { - peer.last_sent_private_transactions.insert(transaction_hash); - } - }, - Err(e) => { - trace!(target: "sync", "Ignoring the message, error queueing: {}", e); - } - } - Ok(()) - } - - /// Called when peer sends us new private transaction packet - fn on_private_transaction(sync: &mut ChainSync, _io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { - if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { - trace!(target: "sync", "{} Ignoring packet from unconfirmed/unknown peer", peer_id); - return Ok(()); - } - let private_handler = match sync.private_tx_handler { - Some(ref handler) => handler, - None => { - trace!(target: "sync", "{} Ignoring private tx packet from peer", peer_id); - return Ok(()); - } - }; - trace!(target: "sync", "Received private transaction packet from {:?}", peer_id); - match private_handler.import_private_transaction(r.as_raw()) { - Ok(transaction_hash) => { - //don't send the packet back - if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { - peer.last_sent_private_transactions.insert(transaction_hash); - } - }, - Err(e) => { - trace!(target: "sync", "Ignoring the message, error queueing: {}", e); - } - } - Ok(()) - } + /// Called when peer sends us new private transaction packet + fn on_private_transaction( + sync: &mut ChainSync, + _io: &mut SyncIo, + peer_id: PeerId, + r: &Rlp, + ) -> Result<(), DownloaderImportError> { + if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { + trace!(target: "sync", "{} Ignoring packet from unconfirmed/unknown peer", peer_id); + return Ok(()); + } + let private_handler = match sync.private_tx_handler { + Some(ref handler) => handler, + None => { + trace!(target: "sync", "{} Ignoring private tx packet from peer", peer_id); + return Ok(()); + } + }; + trace!(target: "sync", "Received private transaction packet from {:?}", peer_id); + match private_handler.import_private_transaction(r.as_raw()) { + Ok(transaction_hash) => { + //don't send the packet back + if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { + peer.last_sent_private_transactions.insert(transaction_hash); + } + } + Err(e) => { + trace!(target: "sync", "Ignoring the message, error queueing: {}", e); + } + } + Ok(()) + } } #[cfg(test)] mod tests { - use ethcore::client::{ChainInfo, EachBlockWith, TestBlockChainClient}; - use parking_lot::RwLock; - use rlp::{Rlp}; - use std::collections::{VecDeque}; - use tests::helpers::{TestIo}; - use tests::snapshot::TestSnapshotService; + use ethcore::client::{ChainInfo, EachBlockWith, TestBlockChainClient}; + use parking_lot::RwLock; + use rlp::Rlp; + use std::collections::VecDeque; + use tests::{helpers::TestIo, snapshot::TestSnapshotService}; - use super::*; - use super::super::tests::{ - dummy_sync_with_peer, - get_dummy_block, - get_dummy_blocks, - get_dummy_hashes, - }; + use super::{ + super::tests::{dummy_sync_with_peer, get_dummy_block, get_dummy_blocks, get_dummy_hashes}, + *, + }; - #[test] - fn handles_peer_new_hashes() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(10, EachBlockWith::Uncle); - let queue = RwLock::new(VecDeque::new()); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); + #[test] + fn handles_peer_new_hashes() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(10, EachBlockWith::Uncle); + let queue = RwLock::new(VecDeque::new()); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); - let hashes_data = get_dummy_hashes(); - let hashes_rlp = Rlp::new(&hashes_data); + let hashes_data = get_dummy_hashes(); + let hashes_rlp = Rlp::new(&hashes_data); - let result = SyncHandler::on_peer_new_hashes(&mut sync, &mut io, 0, &hashes_rlp); + let result = SyncHandler::on_peer_new_hashes(&mut sync, &mut io, 0, &hashes_rlp); - assert!(result.is_ok()); - } + assert!(result.is_ok()); + } - #[test] - fn handles_peer_new_block_malformed() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(10, EachBlockWith::Uncle); + #[test] + fn handles_peer_new_block_malformed() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(10, EachBlockWith::Uncle); - let block_data = get_dummy_block(11, client.chain_info().best_block_hash); + let block_data = get_dummy_block(11, client.chain_info().best_block_hash); - let queue = RwLock::new(VecDeque::new()); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - //sync.have_common_block = true; - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); + let queue = RwLock::new(VecDeque::new()); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + //sync.have_common_block = true; + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); - let block = Rlp::new(&block_data); + let block = Rlp::new(&block_data); - let result = SyncHandler::on_peer_new_block(&mut sync, &mut io, 0, &block); + let result = SyncHandler::on_peer_new_block(&mut sync, &mut io, 0, &block); - assert!(result.is_err()); - } + assert!(result.is_err()); + } - #[test] - fn handles_peer_new_block() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(10, EachBlockWith::Uncle); + #[test] + fn handles_peer_new_block() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(10, EachBlockWith::Uncle); - let block_data = get_dummy_blocks(11, client.chain_info().best_block_hash); + let block_data = get_dummy_blocks(11, client.chain_info().best_block_hash); - let queue = RwLock::new(VecDeque::new()); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); + let queue = RwLock::new(VecDeque::new()); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); - let block = Rlp::new(&block_data); + let block = Rlp::new(&block_data); - SyncHandler::on_peer_new_block(&mut sync, &mut io, 0, &block).expect("result to be ok"); - } + SyncHandler::on_peer_new_block(&mut sync, &mut io, 0, &block).expect("result to be ok"); + } - #[test] - fn handles_peer_new_block_empty() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(10, EachBlockWith::Uncle); - let queue = RwLock::new(VecDeque::new()); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); + #[test] + fn handles_peer_new_block_empty() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(10, EachBlockWith::Uncle); + let queue = RwLock::new(VecDeque::new()); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); - let empty_data = vec![]; - let block = Rlp::new(&empty_data); + let empty_data = vec![]; + let block = Rlp::new(&empty_data); - let result = SyncHandler::on_peer_new_block(&mut sync, &mut io, 0, &block); + let result = SyncHandler::on_peer_new_block(&mut sync, &mut io, 0, &block); - assert!(result.is_err()); - } + assert!(result.is_err()); + } - #[test] - fn handles_peer_new_hashes_empty() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(10, EachBlockWith::Uncle); - let queue = RwLock::new(VecDeque::new()); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); + #[test] + fn handles_peer_new_hashes_empty() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(10, EachBlockWith::Uncle); + let queue = RwLock::new(VecDeque::new()); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); - let empty_hashes_data = vec![]; - let hashes_rlp = Rlp::new(&empty_hashes_data); + let empty_hashes_data = vec![]; + let hashes_rlp = Rlp::new(&empty_hashes_data); - let result = SyncHandler::on_peer_new_hashes(&mut sync, &mut io, 0, &hashes_rlp); + let result = SyncHandler::on_peer_new_hashes(&mut sync, &mut io, 0, &hashes_rlp); - assert!(result.is_ok()); - } + assert!(result.is_ok()); + } } diff --git a/ethcore/sync/src/chain/mod.rs b/ethcore/sync/src/chain/mod.rs index 58359f062..fc114b51a 100644 --- a/ethcore/sync/src/chain/mod.rs +++ b/ethcore/sync/src/chain/mod.rs @@ -88,47 +88,49 @@ //! All other messages are ignored. mod handler; -pub mod sync_packet; mod propagator; mod requester; mod supplier; +pub mod sync_packet; -use std::sync::{Arc, mpsc}; -use std::collections::{HashSet, HashMap, BTreeMap}; -use std::cmp; -use std::time::{Duration, Instant}; -use hash::keccak; -use heapsize::HeapSizeOf; +use super::{SyncConfig, WarpSync}; +use api::{EthProtocolInfo as PeerInfoDigest, PriorityTask, WARP_SYNC_PROTOCOL_ID}; +use block_sync::{BlockDownloader, DownloadAction}; +use bytes::Bytes; +use ethcore::{ + client::{BlockChainClient, BlockChainInfo, BlockId, BlockQueueInfo, BlockStatus}, + snapshot::RestorationStatus, +}; use ethereum_types::{H256, U256}; use fastmap::{H256FastMap, H256FastSet}; +use hash::keccak; +use heapsize::HeapSizeOf; +use network::{self, client_version::ClientVersion, PacketId, PeerId}; use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; -use bytes::Bytes; -use rlp::{RlpStream, DecoderError}; -use network::{self, PeerId, PacketId}; -use network::client_version::ClientVersion; -use ethcore::client::{BlockChainClient, BlockStatus, BlockId, BlockChainInfo, BlockQueueInfo}; -use ethcore::snapshot::{RestorationStatus}; -use sync_io::SyncIo; -use super::{WarpSync, SyncConfig}; -use block_sync::{BlockDownloader, DownloadAction}; -use rand::Rng; -use snapshot::{Snapshot}; -use api::{EthProtocolInfo as PeerInfoDigest, WARP_SYNC_PROTOCOL_ID, PriorityTask}; use private_tx::PrivateTxHandler; -use transactions_stats::{TransactionsStats, Stats as TransactionStats}; -use types::transaction::UnverifiedTransaction; -use types::BlockNumber; +use rand::Rng; +use rlp::{DecoderError, RlpStream}; +use snapshot::Snapshot; +use std::{ + cmp, + collections::{BTreeMap, HashMap, HashSet}, + sync::{mpsc, Arc}, + time::{Duration, Instant}, +}; +use sync_io::SyncIo; +use transactions_stats::{Stats as TransactionStats, TransactionsStats}; +use types::{transaction::UnverifiedTransaction, BlockNumber}; -use self::handler::SyncHandler; -use self::sync_packet::{PacketInfo, SyncPacket}; -use self::sync_packet::SyncPacket::{ - NewBlockPacket, - StatusPacket, +use self::{ + handler::SyncHandler, + sync_packet::{ + PacketInfo, SyncPacket, + SyncPacket::{NewBlockPacket, StatusPacket}, + }, }; -use self::propagator::SyncPropagator; -use self::requester::SyncRequester; pub(crate) use self::supplier::SyncSupplier; +use self::{propagator::SyncPropagator, requester::SyncRequester}; known_heap_size!(0, PeerInfo); @@ -180,190 +182,197 @@ const PRIORITY_TASK_DEADLINE: Duration = Duration::from_millis(100); #[derive(Copy, Clone, Eq, PartialEq, Debug)] /// Sync state pub enum SyncState { - /// Collecting enough peers to start syncing. - WaitingPeers, - /// Waiting for snapshot manifest download - SnapshotManifest, - /// Downloading snapshot data - SnapshotData, - /// Waiting for snapshot restoration progress. - SnapshotWaiting, - /// Downloading new blocks - Blocks, - /// Initial chain sync complete. Waiting for new packets - Idle, - /// Block downloading paused. Waiting for block queue to process blocks and free some space - Waiting, - /// Downloading blocks learned from `NewHashes` packet - NewBlocks, + /// Collecting enough peers to start syncing. + WaitingPeers, + /// Waiting for snapshot manifest download + SnapshotManifest, + /// Downloading snapshot data + SnapshotData, + /// Waiting for snapshot restoration progress. + SnapshotWaiting, + /// Downloading new blocks + Blocks, + /// Initial chain sync complete. Waiting for new packets + Idle, + /// Block downloading paused. Waiting for block queue to process blocks and free some space + Waiting, + /// Downloading blocks learned from `NewHashes` packet + NewBlocks, } /// Syncing status and statistics #[derive(Clone, Copy)] pub struct SyncStatus { - /// State - pub state: SyncState, - /// Syncing protocol version. That's the maximum protocol version we connect to. - pub protocol_version: u8, - /// The underlying p2p network version. - pub network_id: u64, - /// `BlockChain` height for the moment the sync started. - pub start_block_number: BlockNumber, - /// Last fully downloaded and imported block number (if any). - pub last_imported_block_number: Option, - /// Highest block number in the download queue (if any). - pub highest_block_number: Option, - /// Total number of blocks for the sync process. - pub blocks_total: BlockNumber, - /// Number of blocks downloaded so far. - pub blocks_received: BlockNumber, - /// Total number of connected peers - pub num_peers: usize, - /// Total number of active peers. - pub num_active_peers: usize, - /// Heap memory used in bytes. - pub mem_used: usize, - /// Snapshot chunks - pub num_snapshot_chunks: usize, - /// Snapshot chunks downloaded - pub snapshot_chunks_done: usize, - /// Last fully downloaded and imported ancient block number (if any). - pub last_imported_old_block_number: Option, + /// State + pub state: SyncState, + /// Syncing protocol version. That's the maximum protocol version we connect to. + pub protocol_version: u8, + /// The underlying p2p network version. + pub network_id: u64, + /// `BlockChain` height for the moment the sync started. + pub start_block_number: BlockNumber, + /// Last fully downloaded and imported block number (if any). + pub last_imported_block_number: Option, + /// Highest block number in the download queue (if any). + pub highest_block_number: Option, + /// Total number of blocks for the sync process. + pub blocks_total: BlockNumber, + /// Number of blocks downloaded so far. + pub blocks_received: BlockNumber, + /// Total number of connected peers + pub num_peers: usize, + /// Total number of active peers. + pub num_active_peers: usize, + /// Heap memory used in bytes. + pub mem_used: usize, + /// Snapshot chunks + pub num_snapshot_chunks: usize, + /// Snapshot chunks downloaded + pub snapshot_chunks_done: usize, + /// Last fully downloaded and imported ancient block number (if any). + pub last_imported_old_block_number: Option, } impl SyncStatus { - /// Indicates if snapshot download is in progress - pub fn is_snapshot_syncing(&self) -> bool { - match self.state { - SyncState::SnapshotManifest | - SyncState::SnapshotData | - SyncState::SnapshotWaiting => true, - _ => false, - } - } + /// Indicates if snapshot download is in progress + pub fn is_snapshot_syncing(&self) -> bool { + match self.state { + SyncState::SnapshotManifest | SyncState::SnapshotData | SyncState::SnapshotWaiting => { + true + } + _ => false, + } + } - /// Returns max no of peers to display in informants - pub fn current_max_peers(&self, min_peers: u32, max_peers: u32) -> u32 { - if self.num_peers as u32 > min_peers { - max_peers - } else { - min_peers - } - } + /// Returns max no of peers to display in informants + pub fn current_max_peers(&self, min_peers: u32, max_peers: u32) -> u32 { + if self.num_peers as u32 > min_peers { + max_peers + } else { + min_peers + } + } - /// Is it doing a major sync? - pub fn is_syncing(&self, queue_info: BlockQueueInfo) -> bool { - let is_syncing_state = match self.state { SyncState::Idle | SyncState::NewBlocks => false, _ => true }; - let is_verifying = queue_info.unverified_queue_size + queue_info.verified_queue_size > 3; - is_verifying || is_syncing_state - } + /// Is it doing a major sync? + pub fn is_syncing(&self, queue_info: BlockQueueInfo) -> bool { + let is_syncing_state = match self.state { + SyncState::Idle | SyncState::NewBlocks => false, + _ => true, + }; + let is_verifying = queue_info.unverified_queue_size + queue_info.verified_queue_size > 3; + is_verifying || is_syncing_state + } } #[derive(PartialEq, Eq, Debug, Clone)] /// Peer data type requested pub enum PeerAsking { - Nothing, - ForkHeader, - BlockHeaders, - BlockBodies, - BlockReceipts, - SnapshotManifest, - SnapshotData, + Nothing, + ForkHeader, + BlockHeaders, + BlockBodies, + BlockReceipts, + SnapshotManifest, + SnapshotData, } #[derive(PartialEq, Eq, Debug, Clone, Copy)] /// Block downloader channel. pub enum BlockSet { - /// New blocks better than out best blocks - NewBlocks, - /// Missing old blocks - OldBlocks, + /// New blocks better than out best blocks + NewBlocks, + /// Missing old blocks + OldBlocks, } #[derive(Clone, Eq, PartialEq)] pub enum ForkConfirmation { - /// Fork block confirmation pending. - Unconfirmed, - /// Peer's chain is too short to confirm the fork. - TooShort, - /// Fork is confirmed. - Confirmed, + /// Fork block confirmation pending. + Unconfirmed, + /// Peer's chain is too short to confirm the fork. + TooShort, + /// Fork is confirmed. + Confirmed, } #[derive(Clone)] /// Syncing peer information pub struct PeerInfo { - /// eth protocol version - protocol_version: u8, - /// Peer chain genesis hash - genesis: H256, - /// Peer network id - network_id: u64, - /// Peer best block hash - latest_hash: H256, - /// Peer total difficulty if known - difficulty: Option, - /// Type of data currenty being requested from peer. - asking: PeerAsking, - /// A set of block numbers being requested - asking_blocks: Vec, - /// Holds requested header hash if currently requesting block header by hash - asking_hash: Option, - /// Holds requested snapshot chunk hash if any. - asking_snapshot_data: Option, - /// Request timestamp - ask_time: Instant, - /// Holds a set of transactions recently sent to this peer to avoid spamming. - last_sent_transactions: H256FastSet, - /// Holds a set of private transactions and their signatures recently sent to this peer to avoid spamming. - last_sent_private_transactions: H256FastSet, - /// Pending request is expired and result should be ignored - expired: bool, - /// Private transactions enabled - private_tx_enabled: bool, - /// Peer fork confirmation status - confirmation: ForkConfirmation, - /// Best snapshot hash - snapshot_hash: Option, - /// Best snapshot block number - snapshot_number: Option, - /// Block set requested - block_set: Option, - /// Version of the software the peer is running - client_version: ClientVersion, + /// eth protocol version + protocol_version: u8, + /// Peer chain genesis hash + genesis: H256, + /// Peer network id + network_id: u64, + /// Peer best block hash + latest_hash: H256, + /// Peer total difficulty if known + difficulty: Option, + /// Type of data currenty being requested from peer. + asking: PeerAsking, + /// A set of block numbers being requested + asking_blocks: Vec, + /// Holds requested header hash if currently requesting block header by hash + asking_hash: Option, + /// Holds requested snapshot chunk hash if any. + asking_snapshot_data: Option, + /// Request timestamp + ask_time: Instant, + /// Holds a set of transactions recently sent to this peer to avoid spamming. + last_sent_transactions: H256FastSet, + /// Holds a set of private transactions and their signatures recently sent to this peer to avoid spamming. + last_sent_private_transactions: H256FastSet, + /// Pending request is expired and result should be ignored + expired: bool, + /// Private transactions enabled + private_tx_enabled: bool, + /// Peer fork confirmation status + confirmation: ForkConfirmation, + /// Best snapshot hash + snapshot_hash: Option, + /// Best snapshot block number + snapshot_number: Option, + /// Block set requested + block_set: Option, + /// Version of the software the peer is running + client_version: ClientVersion, } impl PeerInfo { - fn can_sync(&self) -> bool { - self.confirmation == ForkConfirmation::Confirmed && !self.expired - } + fn can_sync(&self) -> bool { + self.confirmation == ForkConfirmation::Confirmed && !self.expired + } - fn is_allowed(&self) -> bool { - self.confirmation != ForkConfirmation::Unconfirmed && !self.expired - } + fn is_allowed(&self) -> bool { + self.confirmation != ForkConfirmation::Unconfirmed && !self.expired + } - fn reset_asking(&mut self) { - self.asking_blocks.clear(); - self.asking_hash = None; - // mark any pending requests as expired - if self.asking != PeerAsking::Nothing && self.is_allowed() { - self.expired = true; - } - } + fn reset_asking(&mut self) { + self.asking_blocks.clear(); + self.asking_hash = None; + // mark any pending requests as expired + if self.asking != PeerAsking::Nothing && self.is_allowed() { + self.expired = true; + } + } - fn reset_private_stats(&mut self) { - self.last_sent_private_transactions.clear(); - } + fn reset_private_stats(&mut self) { + self.last_sent_private_transactions.clear(); + } } #[cfg(not(test))] pub mod random { - use rand; - pub fn new() -> rand::ThreadRng { rand::thread_rng() } + use rand; + pub fn new() -> rand::ThreadRng { + rand::thread_rng() + } } #[cfg(test)] pub mod random { - use rand::{self, SeedableRng}; - pub fn new() -> rand::XorShiftRng { rand::XorShiftRng::from_seed([0, 1, 2, 3]) } + use rand::{self, SeedableRng}; + pub fn new() -> rand::XorShiftRng { + rand::XorShiftRng::from_seed([0, 1, 2, 3]) + } } pub type RlpResponseResult = Result, PacketDecodeError>; @@ -373,401 +382,441 @@ pub type Peers = HashMap; /// /// NOTE always lock in order of fields declaration pub struct ChainSyncApi { - /// Priority tasks queue - priority_tasks: Mutex>, - /// The rest of sync data - sync: RwLock, + /// Priority tasks queue + priority_tasks: Mutex>, + /// The rest of sync data + sync: RwLock, } impl ChainSyncApi { - /// Creates new `ChainSyncApi` - pub fn new( - config: SyncConfig, - chain: &BlockChainClient, - private_tx_handler: Option>, - priority_tasks: mpsc::Receiver, - ) -> Self { - ChainSyncApi { - sync: RwLock::new(ChainSync::new(config, chain, private_tx_handler)), - priority_tasks: Mutex::new(priority_tasks), - } - } + /// Creates new `ChainSyncApi` + pub fn new( + config: SyncConfig, + chain: &BlockChainClient, + private_tx_handler: Option>, + priority_tasks: mpsc::Receiver, + ) -> Self { + ChainSyncApi { + sync: RwLock::new(ChainSync::new(config, chain, private_tx_handler)), + priority_tasks: Mutex::new(priority_tasks), + } + } - /// Gives `write` access to underlying `ChainSync` - pub fn write(&self) -> RwLockWriteGuard { - self.sync.write() - } + /// Gives `write` access to underlying `ChainSync` + pub fn write(&self) -> RwLockWriteGuard { + self.sync.write() + } - /// Returns info about given list of peers - pub fn peer_info(&self, ids: &[PeerId]) -> Vec> { - let sync = self.sync.read(); - ids.iter().map(|id| sync.peer_info(id)).collect() - } + /// Returns info about given list of peers + pub fn peer_info(&self, ids: &[PeerId]) -> Vec> { + let sync = self.sync.read(); + ids.iter().map(|id| sync.peer_info(id)).collect() + } - /// Returns synchonization status - pub fn status(&self) -> SyncStatus { - self.sync.read().status() - } + /// Returns synchonization status + pub fn status(&self) -> SyncStatus { + self.sync.read().status() + } - /// Returns transactions propagation statistics - pub fn transactions_stats(&self) -> BTreeMap { - self.sync.read().transactions_stats() - .iter() - .map(|(hash, stats)| (*hash, stats.into())) - .collect() - } + /// Returns transactions propagation statistics + pub fn transactions_stats(&self) -> BTreeMap { + self.sync + .read() + .transactions_stats() + .iter() + .map(|(hash, stats)| (*hash, stats.into())) + .collect() + } - /// Dispatch incoming requests and responses - pub fn dispatch_packet(&self, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) { - SyncSupplier::dispatch_packet(&self.sync, io, peer, packet_id, data) - } + /// Dispatch incoming requests and responses + pub fn dispatch_packet(&self, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) { + SyncSupplier::dispatch_packet(&self.sync, io, peer, packet_id, data) + } - /// Process a priority propagation queue. - /// This task is run from a timer and should be time constrained. - /// Hence we set up a deadline for the execution and cancel the task if the deadline is exceeded. - /// - /// NOTE This method should only handle stuff that can be canceled and would reach other peers - /// by other means. - pub fn process_priority_queue(&self, io: &mut SyncIo) { - fn check_deadline(deadline: Instant) -> Option { - let now = Instant::now(); - if now > deadline { - None - } else { - Some(deadline - now) - } - } + /// Process a priority propagation queue. + /// This task is run from a timer and should be time constrained. + /// Hence we set up a deadline for the execution and cancel the task if the deadline is exceeded. + /// + /// NOTE This method should only handle stuff that can be canceled and would reach other peers + /// by other means. + pub fn process_priority_queue(&self, io: &mut SyncIo) { + fn check_deadline(deadline: Instant) -> Option { + let now = Instant::now(); + if now > deadline { + None + } else { + Some(deadline - now) + } + } - // deadline to get the task from the queue - let deadline = Instant::now() + ::api::PRIORITY_TIMER_INTERVAL; - let mut work = || { - let task = { - let tasks = self.priority_tasks.try_lock_until(deadline)?; - let left = check_deadline(deadline)?; - tasks.recv_timeout(left).ok()? - }; - task.starting(); - // wait for the sync lock until deadline, - // note we might drop the task here if we won't manage to acquire the lock. - let mut sync = self.sync.try_write_until(deadline)?; - // since we already have everything let's use a different deadline - // to do the rest of the job now, so that previous work is not wasted. - let deadline = Instant::now() + PRIORITY_TASK_DEADLINE; - let as_ms = move |prev| { - let dur: Duration = Instant::now() - prev; - dur.as_secs() * 1_000 + dur.subsec_millis() as u64 - }; - match task { - // NOTE We can't simply use existing methods, - // cause the block is not in the DB yet. - PriorityTask::PropagateBlock { started, block, hash, difficulty } => { - // try to send to peers that are on the same block as us - // (they will most likely accept the new block). - let chain_info = io.chain().chain_info(); - let total_difficulty = chain_info.total_difficulty + difficulty; - let rlp = ChainSync::create_block_rlp(&block, total_difficulty); - for peers in sync.get_peers(&chain_info, PeerState::SameBlock).chunks(10) { - check_deadline(deadline)?; - for peer in peers { - SyncPropagator::send_packet(io, *peer, NewBlockPacket, rlp.clone()); - if let Some(ref mut peer) = sync.peers.get_mut(peer) { - peer.latest_hash = hash; - } - } - } - debug!(target: "sync", "Finished block propagation, took {}ms", as_ms(started)); - }, - PriorityTask::PropagateTransactions(time, _) => { - SyncPropagator::propagate_new_transactions(&mut sync, io, || { - check_deadline(deadline).is_some() - }); - debug!(target: "sync", "Finished transaction propagation, took {}ms", as_ms(time)); - }, - } + // deadline to get the task from the queue + let deadline = Instant::now() + ::api::PRIORITY_TIMER_INTERVAL; + let mut work = || { + let task = { + let tasks = self.priority_tasks.try_lock_until(deadline)?; + let left = check_deadline(deadline)?; + tasks.recv_timeout(left).ok()? + }; + task.starting(); + // wait for the sync lock until deadline, + // note we might drop the task here if we won't manage to acquire the lock. + let mut sync = self.sync.try_write_until(deadline)?; + // since we already have everything let's use a different deadline + // to do the rest of the job now, so that previous work is not wasted. + let deadline = Instant::now() + PRIORITY_TASK_DEADLINE; + let as_ms = move |prev| { + let dur: Duration = Instant::now() - prev; + dur.as_secs() * 1_000 + dur.subsec_millis() as u64 + }; + match task { + // NOTE We can't simply use existing methods, + // cause the block is not in the DB yet. + PriorityTask::PropagateBlock { + started, + block, + hash, + difficulty, + } => { + // try to send to peers that are on the same block as us + // (they will most likely accept the new block). + let chain_info = io.chain().chain_info(); + let total_difficulty = chain_info.total_difficulty + difficulty; + let rlp = ChainSync::create_block_rlp(&block, total_difficulty); + for peers in sync.get_peers(&chain_info, PeerState::SameBlock).chunks(10) { + check_deadline(deadline)?; + for peer in peers { + SyncPropagator::send_packet(io, *peer, NewBlockPacket, rlp.clone()); + if let Some(ref mut peer) = sync.peers.get_mut(peer) { + peer.latest_hash = hash; + } + } + } + debug!(target: "sync", "Finished block propagation, took {}ms", as_ms(started)); + } + PriorityTask::PropagateTransactions(time, _) => { + SyncPropagator::propagate_new_transactions(&mut sync, io, || { + check_deadline(deadline).is_some() + }); + debug!(target: "sync", "Finished transaction propagation, took {}ms", as_ms(time)); + } + } - Some(()) - }; + Some(()) + }; - // Process as many items as we can until the deadline is reached. - loop { - if work().is_none() { - return; - } - } - } + // Process as many items as we can until the deadline is reached. + loop { + if work().is_none() { + return; + } + } + } } // Static methods impl ChainSync { - /// creates rlp to send for the tree defined by 'from' and 'to' hashes - fn create_new_hashes_rlp(chain: &BlockChainClient, from: &H256, to: &H256) -> Option { - match chain.tree_route(from, to) { - Some(route) => { - let uncles = chain.find_uncles(from).unwrap_or_else(Vec::new); - match route.blocks.len() { - 0 => None, - _ => { - let mut blocks = route.blocks; - blocks.extend(uncles); - let mut rlp_stream = RlpStream::new_list(blocks.len()); - for block_hash in blocks { - let mut hash_rlp = RlpStream::new_list(2); - let number = chain.block_header(BlockId::Hash(block_hash.clone())) + /// creates rlp to send for the tree defined by 'from' and 'to' hashes + fn create_new_hashes_rlp(chain: &BlockChainClient, from: &H256, to: &H256) -> Option { + match chain.tree_route(from, to) { + Some(route) => { + let uncles = chain.find_uncles(from).unwrap_or_else(Vec::new); + match route.blocks.len() { + 0 => None, + _ => { + let mut blocks = route.blocks; + blocks.extend(uncles); + let mut rlp_stream = RlpStream::new_list(blocks.len()); + for block_hash in blocks { + let mut hash_rlp = RlpStream::new_list(2); + let number = chain.block_header(BlockId::Hash(block_hash.clone())) .expect("chain.tree_route and chain.find_uncles only return hahses of blocks that are in the blockchain. qed.").number(); - hash_rlp.append(&block_hash); - hash_rlp.append(&number); - rlp_stream.append_raw(hash_rlp.as_raw(), 1); - } - Some(rlp_stream.out()) - } - } - }, - None => None - } - } + hash_rlp.append(&block_hash); + hash_rlp.append(&number); + rlp_stream.append_raw(hash_rlp.as_raw(), 1); + } + Some(rlp_stream.out()) + } + } + } + None => None, + } + } - /// creates rlp from block bytes and total difficulty - fn create_block_rlp(bytes: &Bytes, total_difficulty: U256) -> Bytes { - let mut rlp_stream = RlpStream::new_list(2); - rlp_stream.append_raw(bytes, 1); - rlp_stream.append(&total_difficulty); - rlp_stream.out() - } + /// creates rlp from block bytes and total difficulty + fn create_block_rlp(bytes: &Bytes, total_difficulty: U256) -> Bytes { + let mut rlp_stream = RlpStream::new_list(2); + rlp_stream.append_raw(bytes, 1); + rlp_stream.append(&total_difficulty); + rlp_stream.out() + } - /// creates latest block rlp for the given client - fn create_latest_block_rlp(chain: &BlockChainClient) -> Bytes { - Self::create_block_rlp( - &chain.block(BlockId::Hash(chain.chain_info().best_block_hash)) - .expect("Best block always exists").into_inner(), - chain.chain_info().total_difficulty - ) - } + /// creates latest block rlp for the given client + fn create_latest_block_rlp(chain: &BlockChainClient) -> Bytes { + Self::create_block_rlp( + &chain + .block(BlockId::Hash(chain.chain_info().best_block_hash)) + .expect("Best block always exists") + .into_inner(), + chain.chain_info().total_difficulty, + ) + } - /// creates given hash block rlp for the given client - fn create_new_block_rlp(chain: &BlockChainClient, hash: &H256) -> Bytes { - Self::create_block_rlp( - &chain.block(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed").into_inner(), - chain.block_total_difficulty(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed.") - ) - } + /// creates given hash block rlp for the given client + fn create_new_block_rlp(chain: &BlockChainClient, hash: &H256) -> Bytes { + Self::create_block_rlp( + &chain + .block(BlockId::Hash(hash.clone())) + .expect("Block has just been sealed; qed") + .into_inner(), + chain + .block_total_difficulty(BlockId::Hash(hash.clone())) + .expect("Block has just been sealed; qed."), + ) + } - fn select_random_peers(peers: &[PeerId]) -> Vec { - // take sqrt(x) peers - let mut peers = peers.to_vec(); - let mut count = (peers.len() as f64).powf(0.5).round() as usize; - count = cmp::min(count, MAX_PEERS_PROPAGATION); - count = cmp::max(count, MIN_PEERS_PROPAGATION); - random::new().shuffle(&mut peers); - peers.truncate(count); - peers - } + fn select_random_peers(peers: &[PeerId]) -> Vec { + // take sqrt(x) peers + let mut peers = peers.to_vec(); + let mut count = (peers.len() as f64).powf(0.5).round() as usize; + count = cmp::min(count, MAX_PEERS_PROPAGATION); + count = cmp::max(count, MIN_PEERS_PROPAGATION); + random::new().shuffle(&mut peers); + peers.truncate(count); + peers + } - fn get_init_state(warp_sync: WarpSync, chain: &BlockChainClient) -> SyncState { - let best_block = chain.chain_info().best_block_number; - match warp_sync { - WarpSync::Enabled => SyncState::WaitingPeers, - WarpSync::OnlyAndAfter(block) if block > best_block => SyncState::WaitingPeers, - _ => SyncState::Idle, - } - } + fn get_init_state(warp_sync: WarpSync, chain: &BlockChainClient) -> SyncState { + let best_block = chain.chain_info().best_block_number; + match warp_sync { + WarpSync::Enabled => SyncState::WaitingPeers, + WarpSync::OnlyAndAfter(block) if block > best_block => SyncState::WaitingPeers, + _ => SyncState::Idle, + } + } } /// A peer query method for getting a list of peers enum PeerState { - /// Peer is on different hash than us - Lagging, - /// Peer is on the same block as us - SameBlock + /// Peer is on different hash than us + Lagging, + /// Peer is on the same block as us + SameBlock, } /// Blockchain sync handler. /// See module documentation for more details. pub struct ChainSync { - /// Sync state - state: SyncState, - /// Last block number for the start of sync - starting_block: BlockNumber, - /// Highest block number seen - highest_block: Option, - /// All connected peers - peers: Peers, - /// Peers active for current sync round - active_peers: HashSet, - /// Block download process for new blocks - new_blocks: BlockDownloader, - /// Block download process for ancient blocks - old_blocks: Option, - /// Last propagated block number - last_sent_block_number: BlockNumber, - /// Network ID - network_id: u64, - /// Optional fork block to check - fork_block: Option<(BlockNumber, H256)>, - /// Snapshot downloader. - snapshot: Snapshot, - /// Connected peers pending Status message. - /// Value is request timestamp. - handshaking_peers: HashMap, - /// Sync start timestamp. Measured when first peer is connected - sync_start_time: Option, - /// Transactions propagation statistics - transactions_stats: TransactionsStats, - /// Enable ancient block downloading - download_old_blocks: bool, - /// Shared private tx service. - private_tx_handler: Option>, - /// Enable warp sync. - warp_sync: WarpSync, + /// Sync state + state: SyncState, + /// Last block number for the start of sync + starting_block: BlockNumber, + /// Highest block number seen + highest_block: Option, + /// All connected peers + peers: Peers, + /// Peers active for current sync round + active_peers: HashSet, + /// Block download process for new blocks + new_blocks: BlockDownloader, + /// Block download process for ancient blocks + old_blocks: Option, + /// Last propagated block number + last_sent_block_number: BlockNumber, + /// Network ID + network_id: u64, + /// Optional fork block to check + fork_block: Option<(BlockNumber, H256)>, + /// Snapshot downloader. + snapshot: Snapshot, + /// Connected peers pending Status message. + /// Value is request timestamp. + handshaking_peers: HashMap, + /// Sync start timestamp. Measured when first peer is connected + sync_start_time: Option, + /// Transactions propagation statistics + transactions_stats: TransactionsStats, + /// Enable ancient block downloading + download_old_blocks: bool, + /// Shared private tx service. + private_tx_handler: Option>, + /// Enable warp sync. + warp_sync: WarpSync, } impl ChainSync { - /// Create a new instance of syncing strategy. - pub fn new( - config: SyncConfig, - chain: &BlockChainClient, - private_tx_handler: Option>, - ) -> Self { - let chain_info = chain.chain_info(); - let best_block = chain.chain_info().best_block_number; - let state = Self::get_init_state(config.warp_sync, chain); + /// Create a new instance of syncing strategy. + pub fn new( + config: SyncConfig, + chain: &BlockChainClient, + private_tx_handler: Option>, + ) -> Self { + let chain_info = chain.chain_info(); + let best_block = chain.chain_info().best_block_number; + let state = Self::get_init_state(config.warp_sync, chain); - let mut sync = ChainSync { - state, - starting_block: best_block, - highest_block: None, - peers: HashMap::new(), - handshaking_peers: HashMap::new(), - active_peers: HashSet::new(), - new_blocks: BlockDownloader::new(BlockSet::NewBlocks, &chain_info.best_block_hash, chain_info.best_block_number), - old_blocks: None, - last_sent_block_number: 0, - network_id: config.network_id, - fork_block: config.fork_block, - download_old_blocks: config.download_old_blocks, - snapshot: Snapshot::new(), - sync_start_time: None, - transactions_stats: TransactionsStats::default(), - private_tx_handler, - warp_sync: config.warp_sync, - }; - sync.update_targets(chain); - sync - } + let mut sync = ChainSync { + state, + starting_block: best_block, + highest_block: None, + peers: HashMap::new(), + handshaking_peers: HashMap::new(), + active_peers: HashSet::new(), + new_blocks: BlockDownloader::new( + BlockSet::NewBlocks, + &chain_info.best_block_hash, + chain_info.best_block_number, + ), + old_blocks: None, + last_sent_block_number: 0, + network_id: config.network_id, + fork_block: config.fork_block, + download_old_blocks: config.download_old_blocks, + snapshot: Snapshot::new(), + sync_start_time: None, + transactions_stats: TransactionsStats::default(), + private_tx_handler, + warp_sync: config.warp_sync, + }; + sync.update_targets(chain); + sync + } - /// Returns synchonization status - pub fn status(&self) -> SyncStatus { - let last_imported_number = self.new_blocks.last_imported_block_number(); - SyncStatus { - state: self.state.clone(), - protocol_version: ETH_PROTOCOL_VERSION_63.0, - network_id: self.network_id, - start_block_number: self.starting_block, - last_imported_block_number: Some(last_imported_number), - last_imported_old_block_number: self.old_blocks.as_ref().map(|d| d.last_imported_block_number()), - highest_block_number: self.highest_block.map(|n| cmp::max(n, last_imported_number)), - blocks_received: if last_imported_number > self.starting_block { last_imported_number - self.starting_block } else { 0 }, - blocks_total: match self.highest_block { Some(x) if x > self.starting_block => x - self.starting_block, _ => 0 }, - num_peers: self.peers.values().filter(|p| p.is_allowed()).count(), - num_active_peers: self.peers.values().filter(|p| p.is_allowed() && p.asking != PeerAsking::Nothing).count(), - num_snapshot_chunks: self.snapshot.total_chunks(), - snapshot_chunks_done: self.snapshot.done_chunks(), - mem_used: - self.new_blocks.heap_size() - + self.old_blocks.as_ref().map_or(0, |d| d.heap_size()) - + self.peers.heap_size_of_children(), - } - } + /// Returns synchonization status + pub fn status(&self) -> SyncStatus { + let last_imported_number = self.new_blocks.last_imported_block_number(); + SyncStatus { + state: self.state.clone(), + protocol_version: ETH_PROTOCOL_VERSION_63.0, + network_id: self.network_id, + start_block_number: self.starting_block, + last_imported_block_number: Some(last_imported_number), + last_imported_old_block_number: self + .old_blocks + .as_ref() + .map(|d| d.last_imported_block_number()), + highest_block_number: self + .highest_block + .map(|n| cmp::max(n, last_imported_number)), + blocks_received: if last_imported_number > self.starting_block { + last_imported_number - self.starting_block + } else { + 0 + }, + blocks_total: match self.highest_block { + Some(x) if x > self.starting_block => x - self.starting_block, + _ => 0, + }, + num_peers: self.peers.values().filter(|p| p.is_allowed()).count(), + num_active_peers: self + .peers + .values() + .filter(|p| p.is_allowed() && p.asking != PeerAsking::Nothing) + .count(), + num_snapshot_chunks: self.snapshot.total_chunks(), + snapshot_chunks_done: self.snapshot.done_chunks(), + mem_used: self.new_blocks.heap_size() + + self.old_blocks.as_ref().map_or(0, |d| d.heap_size()) + + self.peers.heap_size_of_children(), + } + } - /// Returns information on peers connections - pub fn peer_info(&self, peer_id: &PeerId) -> Option { - self.peers.get(peer_id).map(|peer_data| { - PeerInfoDigest { - version: peer_data.protocol_version as u32, - difficulty: peer_data.difficulty, - head: peer_data.latest_hash, - } - }) - } + /// Returns information on peers connections + pub fn peer_info(&self, peer_id: &PeerId) -> Option { + self.peers.get(peer_id).map(|peer_data| PeerInfoDigest { + version: peer_data.protocol_version as u32, + difficulty: peer_data.difficulty, + head: peer_data.latest_hash, + }) + } - /// Returns transactions propagation statistics - pub fn transactions_stats(&self) -> &H256FastMap { - self.transactions_stats.stats() - } + /// Returns transactions propagation statistics + pub fn transactions_stats(&self) -> &H256FastMap { + self.transactions_stats.stats() + } - /// Updates transactions were received by a peer - pub fn transactions_received(&mut self, txs: &[UnverifiedTransaction], peer_id: PeerId) { - if let Some(peer_info) = self.peers.get_mut(&peer_id) { - peer_info.last_sent_transactions.extend(txs.iter().map(|tx| tx.hash())); - } - } + /// Updates transactions were received by a peer + pub fn transactions_received(&mut self, txs: &[UnverifiedTransaction], peer_id: PeerId) { + if let Some(peer_info) = self.peers.get_mut(&peer_id) { + peer_info + .last_sent_transactions + .extend(txs.iter().map(|tx| tx.hash())); + } + } - /// Abort all sync activity - pub fn abort(&mut self, io: &mut SyncIo) { - self.reset_and_continue(io); - self.peers.clear(); - } + /// Abort all sync activity + pub fn abort(&mut self, io: &mut SyncIo) { + self.reset_and_continue(io); + self.peers.clear(); + } - /// Reset sync. Clear all downloaded data but keep the queue. - /// Set sync state to the given state or to the initial state if `None` is provided. - fn reset(&mut self, io: &mut SyncIo, state: Option) { - self.new_blocks.reset(); - let chain_info = io.chain().chain_info(); - for (_, ref mut p) in &mut self.peers { - if p.block_set != Some(BlockSet::OldBlocks) { - p.reset_asking(); - if p.difficulty.is_none() { - // assume peer has up to date difficulty - p.difficulty = Some(chain_info.pending_total_difficulty); - } - } - } - self.state = state.unwrap_or_else(|| Self::get_init_state(self.warp_sync, io.chain())); - // Reactivate peers only if some progress has been made - // since the last sync round of if starting fresh. - self.active_peers = self.peers.keys().cloned().collect(); - } + /// Reset sync. Clear all downloaded data but keep the queue. + /// Set sync state to the given state or to the initial state if `None` is provided. + fn reset(&mut self, io: &mut SyncIo, state: Option) { + self.new_blocks.reset(); + let chain_info = io.chain().chain_info(); + for (_, ref mut p) in &mut self.peers { + if p.block_set != Some(BlockSet::OldBlocks) { + p.reset_asking(); + if p.difficulty.is_none() { + // assume peer has up to date difficulty + p.difficulty = Some(chain_info.pending_total_difficulty); + } + } + } + self.state = state.unwrap_or_else(|| Self::get_init_state(self.warp_sync, io.chain())); + // Reactivate peers only if some progress has been made + // since the last sync round of if starting fresh. + self.active_peers = self.peers.keys().cloned().collect(); + } - /// Restart sync - pub fn reset_and_continue(&mut self, io: &mut SyncIo) { - trace!(target: "sync", "Restarting"); - if self.state == SyncState::SnapshotData { - debug!(target:"sync", "Aborting snapshot restore"); - io.snapshot_service().abort_restore(); - } - self.snapshot.clear(); - self.reset(io, None); - self.continue_sync(io); - } + /// Restart sync + pub fn reset_and_continue(&mut self, io: &mut SyncIo) { + trace!(target: "sync", "Restarting"); + if self.state == SyncState::SnapshotData { + debug!(target:"sync", "Aborting snapshot restore"); + io.snapshot_service().abort_restore(); + } + self.snapshot.clear(); + self.reset(io, None); + self.continue_sync(io); + } - /// Remove peer from active peer set. Peer will be reactivated on the next sync - /// round. - fn deactivate_peer(&mut self, _io: &mut SyncIo, peer_id: PeerId) { - trace!(target: "sync", "Deactivating peer {}", peer_id); - self.active_peers.remove(&peer_id); - } + /// Remove peer from active peer set. Peer will be reactivated on the next sync + /// round. + fn deactivate_peer(&mut self, _io: &mut SyncIo, peer_id: PeerId) { + trace!(target: "sync", "Deactivating peer {}", peer_id); + self.active_peers.remove(&peer_id); + } - fn maybe_start_snapshot_sync(&mut self, io: &mut SyncIo) { - if !self.warp_sync.is_enabled() || io.snapshot_service().supported_versions().is_none() { - trace!(target: "sync", "Skipping warp sync. Disabled or not supported."); - return; - } - if self.state != SyncState::WaitingPeers && self.state != SyncState::Blocks && self.state != SyncState::Waiting { - trace!(target: "sync", "Skipping warp sync. State: {:?}", self.state); - return; - } - // Make sure the snapshot block is not too far away from best block and network best block and - // that it is higher than fork detection block - let our_best_block = io.chain().chain_info().best_block_number; - let fork_block = self.fork_block.map_or(0, |(n, _)| n); + fn maybe_start_snapshot_sync(&mut self, io: &mut SyncIo) { + if !self.warp_sync.is_enabled() || io.snapshot_service().supported_versions().is_none() { + trace!(target: "sync", "Skipping warp sync. Disabled or not supported."); + return; + } + if self.state != SyncState::WaitingPeers + && self.state != SyncState::Blocks + && self.state != SyncState::Waiting + { + trace!(target: "sync", "Skipping warp sync. State: {:?}", self.state); + return; + } + // Make sure the snapshot block is not too far away from best block and network best block and + // that it is higher than fork detection block + let our_best_block = io.chain().chain_info().best_block_number; + let fork_block = self.fork_block.map_or(0, |(n, _)| n); - let (best_hash, max_peers, snapshot_peers) = { - let expected_warp_block = match self.warp_sync { - WarpSync::OnlyAndAfter(block) => block, - _ => 0, - }; - //collect snapshot infos from peers - let snapshots = self.peers.iter() - .filter(|&(_, p)| p.is_allowed() && p.snapshot_number.map_or(false, |sn| + let (best_hash, max_peers, snapshot_peers) = { + let expected_warp_block = match self.warp_sync { + WarpSync::OnlyAndAfter(block) => block, + _ => 0, + }; + //collect snapshot infos from peers + let snapshots = self + .peers + .iter() + .filter(|&(_, p)| { + p.is_allowed() + && p.snapshot_number.map_or(false, |sn| // Snapshot must be old enough that it's usefull to sync with it our_best_block < sn && (sn - our_best_block) > SNAPSHOT_RESTORE_THRESHOLD && // Snapshot must have been taken after the Fork @@ -777,160 +826,198 @@ impl ChainSync { // If we know a highest block, snapshot must be recent enough self.highest_block.map_or(true, |highest| { highest < sn || (highest - sn) <= SNAPSHOT_RESTORE_THRESHOLD - }) - )) - .filter_map(|(p, peer)| peer.snapshot_hash.map(|hash| (p, hash.clone()))) - .filter(|&(_, ref hash)| !self.snapshot.is_known_bad(hash)); + })) + }) + .filter_map(|(p, peer)| peer.snapshot_hash.map(|hash| (p, hash.clone()))) + .filter(|&(_, ref hash)| !self.snapshot.is_known_bad(hash)); - let mut snapshot_peers = HashMap::new(); - let mut max_peers: usize = 0; - let mut best_hash = None; - for (p, hash) in snapshots { - let peers = snapshot_peers.entry(hash).or_insert_with(Vec::new); - peers.push(*p); - if peers.len() > max_peers { - max_peers = peers.len(); - best_hash = Some(hash); - } - } - (best_hash, max_peers, snapshot_peers) - }; + let mut snapshot_peers = HashMap::new(); + let mut max_peers: usize = 0; + let mut best_hash = None; + for (p, hash) in snapshots { + let peers = snapshot_peers.entry(hash).or_insert_with(Vec::new); + peers.push(*p); + if peers.len() > max_peers { + max_peers = peers.len(); + best_hash = Some(hash); + } + } + (best_hash, max_peers, snapshot_peers) + }; - let timeout = (self.state == SyncState::WaitingPeers) && self.sync_start_time.map_or(false, |t| t.elapsed() > WAIT_PEERS_TIMEOUT); + let timeout = (self.state == SyncState::WaitingPeers) + && self + .sync_start_time + .map_or(false, |t| t.elapsed() > WAIT_PEERS_TIMEOUT); - if let (Some(hash), Some(peers)) = (best_hash, best_hash.map_or(None, |h| snapshot_peers.get(&h))) { - if max_peers >= SNAPSHOT_MIN_PEERS { - trace!(target: "sync", "Starting confirmed snapshot sync {:?} with {:?}", hash, peers); - self.start_snapshot_sync(io, peers); - } else if timeout { - trace!(target: "sync", "Starting unconfirmed snapshot sync {:?} with {:?}", hash, peers); - self.start_snapshot_sync(io, peers); - } - } else if timeout && !self.warp_sync.is_warp_only() { - trace!(target: "sync", "No snapshots found, starting full sync"); - self.state = SyncState::Idle; - self.continue_sync(io); - } - } + if let (Some(hash), Some(peers)) = ( + best_hash, + best_hash.map_or(None, |h| snapshot_peers.get(&h)), + ) { + if max_peers >= SNAPSHOT_MIN_PEERS { + trace!(target: "sync", "Starting confirmed snapshot sync {:?} with {:?}", hash, peers); + self.start_snapshot_sync(io, peers); + } else if timeout { + trace!(target: "sync", "Starting unconfirmed snapshot sync {:?} with {:?}", hash, peers); + self.start_snapshot_sync(io, peers); + } + } else if timeout && !self.warp_sync.is_warp_only() { + trace!(target: "sync", "No snapshots found, starting full sync"); + self.state = SyncState::Idle; + self.continue_sync(io); + } + } - fn start_snapshot_sync(&mut self, io: &mut SyncIo, peers: &[PeerId]) { - if !self.snapshot.have_manifest() { - for p in peers { - if self.peers.get(p).map_or(false, |p| p.asking == PeerAsking::Nothing) { - SyncRequester::request_snapshot_manifest(self, io, *p); - } - } - self.state = SyncState::SnapshotManifest; - trace!(target: "sync", "New snapshot sync with {:?}", peers); - } else { - self.state = SyncState::SnapshotData; - trace!(target: "sync", "Resumed snapshot sync with {:?}", peers); - } - } + fn start_snapshot_sync(&mut self, io: &mut SyncIo, peers: &[PeerId]) { + if !self.snapshot.have_manifest() { + for p in peers { + if self + .peers + .get(p) + .map_or(false, |p| p.asking == PeerAsking::Nothing) + { + SyncRequester::request_snapshot_manifest(self, io, *p); + } + } + self.state = SyncState::SnapshotManifest; + trace!(target: "sync", "New snapshot sync with {:?}", peers); + } else { + self.state = SyncState::SnapshotData; + trace!(target: "sync", "Resumed snapshot sync with {:?}", peers); + } + } - /// Restart sync disregarding the block queue status. May end up re-downloading up to QUEUE_SIZE blocks - pub fn restart(&mut self, io: &mut SyncIo) { - self.update_targets(io.chain()); - self.reset_and_continue(io); - } + /// Restart sync disregarding the block queue status. May end up re-downloading up to QUEUE_SIZE blocks + pub fn restart(&mut self, io: &mut SyncIo) { + self.update_targets(io.chain()); + self.reset_and_continue(io); + } - /// Update sync after the blockchain has been changed externally. - pub fn update_targets(&mut self, chain: &BlockChainClient) { - // Do not assume that the block queue/chain still has our last_imported_block - let chain = chain.chain_info(); - self.new_blocks = BlockDownloader::new(BlockSet::NewBlocks, &chain.best_block_hash, chain.best_block_number); - self.old_blocks = None; - if self.download_old_blocks { - if let (Some(ancient_block_hash), Some(ancient_block_number)) = (chain.ancient_block_hash, chain.ancient_block_number) { + /// Update sync after the blockchain has been changed externally. + pub fn update_targets(&mut self, chain: &BlockChainClient) { + // Do not assume that the block queue/chain still has our last_imported_block + let chain = chain.chain_info(); + self.new_blocks = BlockDownloader::new( + BlockSet::NewBlocks, + &chain.best_block_hash, + chain.best_block_number, + ); + self.old_blocks = None; + if self.download_old_blocks { + if let (Some(ancient_block_hash), Some(ancient_block_number)) = + (chain.ancient_block_hash, chain.ancient_block_number) + { + trace!(target: "sync", "Downloading old blocks from {:?} (#{}) till {:?} (#{:?})", ancient_block_hash, ancient_block_number, chain.first_block_hash, chain.first_block_number); + let mut downloader = BlockDownloader::new( + BlockSet::OldBlocks, + &ancient_block_hash, + ancient_block_number, + ); + if let Some(hash) = chain.first_block_hash { + trace!(target: "sync", "Downloader target set to {:?}", hash); + downloader.set_target(&hash); + } + self.old_blocks = Some(downloader); + } + } + } - trace!(target: "sync", "Downloading old blocks from {:?} (#{}) till {:?} (#{:?})", ancient_block_hash, ancient_block_number, chain.first_block_hash, chain.first_block_number); - let mut downloader = BlockDownloader::new(BlockSet::OldBlocks, &ancient_block_hash, ancient_block_number); - if let Some(hash) = chain.first_block_hash { - trace!(target: "sync", "Downloader target set to {:?}", hash); - downloader.set_target(&hash); - } - self.old_blocks = Some(downloader); - } - } - } + /// Resume downloading + pub fn continue_sync(&mut self, io: &mut SyncIo) { + if self.state == SyncState::Waiting { + trace!(target: "sync", "Waiting for the block queue"); + } else if self.state == SyncState::SnapshotWaiting { + trace!(target: "sync", "Waiting for the snapshot restoration"); + } else { + // Collect active peers that can sync + let mut peers: Vec<(PeerId, u8)> = self + .peers + .iter() + .filter_map(|(peer_id, peer)| { + if peer.can_sync() + && peer.asking == PeerAsking::Nothing + && self.active_peers.contains(&peer_id) + { + Some((*peer_id, peer.protocol_version)) + } else { + None + } + }) + .collect(); - /// Resume downloading - pub fn continue_sync(&mut self, io: &mut SyncIo) { - if self.state == SyncState::Waiting { - trace!(target: "sync", "Waiting for the block queue"); - } else if self.state == SyncState::SnapshotWaiting { - trace!(target: "sync", "Waiting for the snapshot restoration"); - } else { - // Collect active peers that can sync - let mut peers: Vec<(PeerId, u8)> = self.peers.iter().filter_map(|(peer_id, peer)| - if peer.can_sync() && peer.asking == PeerAsking::Nothing && self.active_peers.contains(&peer_id) { - Some((*peer_id, peer.protocol_version)) - } else { - None - } - ).collect(); + if peers.len() > 0 { + trace!( + target: "sync", + "Syncing with peers: {} active, {} available, {} total", + self.active_peers.len(), peers.len(), self.peers.len() + ); - if peers.len() > 0 { - trace!( - target: "sync", - "Syncing with peers: {} active, {} available, {} total", - self.active_peers.len(), peers.len(), self.peers.len() - ); + random::new().shuffle(&mut peers); // TODO (#646): sort by rating + // prefer peers with higher protocol version + peers.sort_by(|&(_, ref v1), &(_, ref v2)| v1.cmp(v2)); - random::new().shuffle(&mut peers); // TODO (#646): sort by rating - // prefer peers with higher protocol version - peers.sort_by(|&(_, ref v1), &(_, ref v2)| v1.cmp(v2)); + for (peer_id, _) in peers { + self.sync_peer(io, peer_id, false); + } + } + } - for (peer_id, _) in peers { - self.sync_peer(io, peer_id, false); - } - } - } + if (self.state == SyncState::Blocks || self.state == SyncState::NewBlocks) + && !self.peers.values().any(|p| { + p.asking != PeerAsking::Nothing + && p.block_set != Some(BlockSet::OldBlocks) + && p.can_sync() + }) + { + self.complete_sync(io); + } + } - if - (self.state == SyncState::Blocks || self.state == SyncState::NewBlocks) && - !self.peers.values().any(|p| p.asking != PeerAsking::Nothing && p.block_set != Some(BlockSet::OldBlocks) && p.can_sync()) - { - self.complete_sync(io); - } - } + /// Called after all blocks have been downloaded + fn complete_sync(&mut self, io: &mut SyncIo) { + trace!(target: "sync", "Sync complete"); + self.reset(io, Some(SyncState::Idle)); + } - /// Called after all blocks have been downloaded - fn complete_sync(&mut self, io: &mut SyncIo) { - trace!(target: "sync", "Sync complete"); - self.reset(io, Some(SyncState::Idle)); - } + /// Enter waiting state + fn pause_sync(&mut self) { + trace!(target: "sync", "Block queue full, pausing sync"); + self.state = SyncState::Waiting; + } - /// Enter waiting state - fn pause_sync(&mut self) { - trace!(target: "sync", "Block queue full, pausing sync"); - self.state = SyncState::Waiting; - } + /// Find something to do for a peer. Called for a new peer or when a peer is done with its task. + fn sync_peer(&mut self, io: &mut SyncIo, peer_id: PeerId, force: bool) { + if !self.active_peers.contains(&peer_id) { + trace!(target: "sync", "Skipping deactivated peer {}", peer_id); + return; + } + let (peer_latest, peer_difficulty, peer_snapshot_number, peer_snapshot_hash) = { + if let Some(peer) = self.peers.get_mut(&peer_id) { + if peer.asking != PeerAsking::Nothing || !peer.can_sync() { + trace!(target: "sync", "Skipping busy peer {}", peer_id); + return; + } + ( + peer.latest_hash.clone(), + peer.difficulty.clone(), + peer.snapshot_number.as_ref().cloned().unwrap_or(0), + peer.snapshot_hash.as_ref().cloned(), + ) + } else { + return; + } + }; + let chain_info = io.chain().chain_info(); + let syncing_difficulty = chain_info.pending_total_difficulty; + let num_active_peers = self + .peers + .values() + .filter(|p| p.asking != PeerAsking::Nothing) + .count(); - /// Find something to do for a peer. Called for a new peer or when a peer is done with its task. - fn sync_peer(&mut self, io: &mut SyncIo, peer_id: PeerId, force: bool) { - if !self.active_peers.contains(&peer_id) { - trace!(target: "sync", "Skipping deactivated peer {}", peer_id); - return; - } - let (peer_latest, peer_difficulty, peer_snapshot_number, peer_snapshot_hash) = { - if let Some(peer) = self.peers.get_mut(&peer_id) { - if peer.asking != PeerAsking::Nothing || !peer.can_sync() { - trace!(target: "sync", "Skipping busy peer {}", peer_id); - return; - } - (peer.latest_hash.clone(), peer.difficulty.clone(), peer.snapshot_number.as_ref().cloned().unwrap_or(0), peer.snapshot_hash.as_ref().cloned()) - } else { - return; - } - }; - let chain_info = io.chain().chain_info(); - let syncing_difficulty = chain_info.pending_total_difficulty; - let num_active_peers = self.peers.values().filter(|p| p.asking != PeerAsking::Nothing).count(); - - let higher_difficulty = peer_difficulty.map_or(true, |pd| pd > syncing_difficulty); - if force || higher_difficulty || self.old_blocks.is_some() { - match self.state { + let higher_difficulty = peer_difficulty.map_or(true, |pd| pd > syncing_difficulty); + if force || higher_difficulty || self.old_blocks.is_some() { + match self.state { SyncState::WaitingPeers => { trace!( target: "sync", @@ -1009,214 +1096,232 @@ impl ChainSync { SyncState::Waiting | SyncState::SnapshotWaiting => () } - } else { - trace!(target: "sync", "Skipping peer {}, force={}, td={:?}, our td={}, state={:?}", peer_id, force, peer_difficulty, syncing_difficulty, self.state); - } - } + } else { + trace!(target: "sync", "Skipping peer {}, force={}, td={:?}, our td={}, state={:?}", peer_id, force, peer_difficulty, syncing_difficulty, self.state); + } + } - /// Clear all blocks/headers marked as being downloaded by a peer. - fn clear_peer_download(&mut self, peer_id: PeerId) { - if let Some(ref peer) = self.peers.get(&peer_id) { - match peer.asking { - PeerAsking::BlockHeaders => { - if let Some(ref hash) = peer.asking_hash { - self.new_blocks.clear_header_download(hash); - if let Some(ref mut old) = self.old_blocks { - old.clear_header_download(hash); - } - } - }, - PeerAsking::BlockBodies => { - self.new_blocks.clear_body_download(&peer.asking_blocks); - if let Some(ref mut old) = self.old_blocks { - old.clear_body_download(&peer.asking_blocks); - } - }, - PeerAsking::BlockReceipts => { - self.new_blocks.clear_receipt_download(&peer.asking_blocks); - if let Some(ref mut old) = self.old_blocks { - old.clear_receipt_download(&peer.asking_blocks); - } - }, - PeerAsking::SnapshotData => { - if let Some(hash) = peer.asking_snapshot_data { - self.snapshot.clear_chunk_download(&hash); - } - }, - _ => (), - } - } - } + /// Clear all blocks/headers marked as being downloaded by a peer. + fn clear_peer_download(&mut self, peer_id: PeerId) { + if let Some(ref peer) = self.peers.get(&peer_id) { + match peer.asking { + PeerAsking::BlockHeaders => { + if let Some(ref hash) = peer.asking_hash { + self.new_blocks.clear_header_download(hash); + if let Some(ref mut old) = self.old_blocks { + old.clear_header_download(hash); + } + } + } + PeerAsking::BlockBodies => { + self.new_blocks.clear_body_download(&peer.asking_blocks); + if let Some(ref mut old) = self.old_blocks { + old.clear_body_download(&peer.asking_blocks); + } + } + PeerAsking::BlockReceipts => { + self.new_blocks.clear_receipt_download(&peer.asking_blocks); + if let Some(ref mut old) = self.old_blocks { + old.clear_receipt_download(&peer.asking_blocks); + } + } + PeerAsking::SnapshotData => { + if let Some(hash) = peer.asking_snapshot_data { + self.snapshot.clear_chunk_download(&hash); + } + } + _ => (), + } + } + } - /// Checks if there are blocks fully downloaded that can be imported into the blockchain and does the import. - fn collect_blocks(&mut self, io: &mut SyncIo, block_set: BlockSet) { - match block_set { - BlockSet::NewBlocks => { - if self.new_blocks.collect_blocks(io, self.state == SyncState::NewBlocks) == DownloadAction::Reset { - self.reset_downloads(block_set); - self.new_blocks.reset(); - } - }, - BlockSet::OldBlocks => { - let mut is_complete = false; - let mut download_action = DownloadAction::None; - if let Some(downloader) = self.old_blocks.as_mut() { - download_action = downloader.collect_blocks(io, false); - is_complete = downloader.is_complete(); - } + /// Checks if there are blocks fully downloaded that can be imported into the blockchain and does the import. + fn collect_blocks(&mut self, io: &mut SyncIo, block_set: BlockSet) { + match block_set { + BlockSet::NewBlocks => { + if self + .new_blocks + .collect_blocks(io, self.state == SyncState::NewBlocks) + == DownloadAction::Reset + { + self.reset_downloads(block_set); + self.new_blocks.reset(); + } + } + BlockSet::OldBlocks => { + let mut is_complete = false; + let mut download_action = DownloadAction::None; + if let Some(downloader) = self.old_blocks.as_mut() { + download_action = downloader.collect_blocks(io, false); + is_complete = downloader.is_complete(); + } - if download_action == DownloadAction::Reset { - self.reset_downloads(block_set); - if let Some(downloader) = self.old_blocks.as_mut() { - downloader.reset(); - } - } + if download_action == DownloadAction::Reset { + self.reset_downloads(block_set); + if let Some(downloader) = self.old_blocks.as_mut() { + downloader.reset(); + } + } - if is_complete { - trace!(target: "sync", "Background block download is complete"); - self.old_blocks = None; - } - } - }; - } + if is_complete { + trace!(target: "sync", "Background block download is complete"); + self.old_blocks = None; + } + } + }; + } - /// Mark all outstanding requests as expired - fn reset_downloads(&mut self, block_set: BlockSet) { - trace!(target: "sync", "Resetting downloads for {:?}", block_set); - for (_, ref mut p) in self.peers.iter_mut().filter(|&(_, ref p)| p.block_set == Some(block_set)) { - p.reset_asking(); - } - } + /// Mark all outstanding requests as expired + fn reset_downloads(&mut self, block_set: BlockSet) { + trace!(target: "sync", "Resetting downloads for {:?}", block_set); + for (_, ref mut p) in self + .peers + .iter_mut() + .filter(|&(_, ref p)| p.block_set == Some(block_set)) + { + p.reset_asking(); + } + } - /// Reset peer status after request is complete. - fn reset_peer_asking(&mut self, peer_id: PeerId, asking: PeerAsking) -> bool { - if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { - peer.expired = false; - peer.block_set = None; - if peer.asking != asking { - trace!(target:"sync", "Asking {:?} while expected {:?}", peer.asking, asking); - peer.asking = PeerAsking::Nothing; - return false; - } else { - peer.asking = PeerAsking::Nothing; - return true; - } - } - false - } + /// Reset peer status after request is complete. + fn reset_peer_asking(&mut self, peer_id: PeerId, asking: PeerAsking) -> bool { + if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { + peer.expired = false; + peer.block_set = None; + if peer.asking != asking { + trace!(target:"sync", "Asking {:?} while expected {:?}", peer.asking, asking); + peer.asking = PeerAsking::Nothing; + return false; + } else { + peer.asking = PeerAsking::Nothing; + return true; + } + } + false + } - /// Send Status message - fn send_status(&mut self, io: &mut SyncIo, peer: PeerId) -> Result<(), network::Error> { - let warp_protocol_version = io.protocol_version(&WARP_SYNC_PROTOCOL_ID, peer); - let warp_protocol = warp_protocol_version != 0; - let private_tx_protocol = warp_protocol_version >= PAR_PROTOCOL_VERSION_3.0; - let protocol = if warp_protocol { warp_protocol_version } else { ETH_PROTOCOL_VERSION_63.0 }; - trace!(target: "sync", "Sending status to {}, protocol version {}", peer, protocol); - let mut packet = RlpStream::new(); - packet.begin_unbounded_list(); - let chain = io.chain().chain_info(); - packet.append(&(protocol as u32)); - packet.append(&self.network_id); - packet.append(&chain.total_difficulty); - packet.append(&chain.best_block_hash); - packet.append(&chain.genesis_hash); - if warp_protocol { - let manifest = io.snapshot_service().manifest(); - let block_number = manifest.as_ref().map_or(0, |m| m.block_number); - let manifest_hash = manifest.map_or(H256::new(), |m| keccak(m.into_rlp())); - packet.append(&manifest_hash); - packet.append(&block_number); - if private_tx_protocol { - packet.append(&self.private_tx_handler.is_some()); - } - } - packet.complete_unbounded_list(); - io.respond(StatusPacket.id(), packet.out()) - } + /// Send Status message + fn send_status(&mut self, io: &mut SyncIo, peer: PeerId) -> Result<(), network::Error> { + let warp_protocol_version = io.protocol_version(&WARP_SYNC_PROTOCOL_ID, peer); + let warp_protocol = warp_protocol_version != 0; + let private_tx_protocol = warp_protocol_version >= PAR_PROTOCOL_VERSION_3.0; + let protocol = if warp_protocol { + warp_protocol_version + } else { + ETH_PROTOCOL_VERSION_63.0 + }; + trace!(target: "sync", "Sending status to {}, protocol version {}", peer, protocol); + let mut packet = RlpStream::new(); + packet.begin_unbounded_list(); + let chain = io.chain().chain_info(); + packet.append(&(protocol as u32)); + packet.append(&self.network_id); + packet.append(&chain.total_difficulty); + packet.append(&chain.best_block_hash); + packet.append(&chain.genesis_hash); + if warp_protocol { + let manifest = io.snapshot_service().manifest(); + let block_number = manifest.as_ref().map_or(0, |m| m.block_number); + let manifest_hash = manifest.map_or(H256::new(), |m| keccak(m.into_rlp())); + packet.append(&manifest_hash); + packet.append(&block_number); + if private_tx_protocol { + packet.append(&self.private_tx_handler.is_some()); + } + } + packet.complete_unbounded_list(); + io.respond(StatusPacket.id(), packet.out()) + } - pub fn maintain_peers(&mut self, io: &mut SyncIo) { - let tick = Instant::now(); - let mut aborting = Vec::new(); - for (peer_id, peer) in &self.peers { - let elapsed = tick - peer.ask_time; - let timeout = match peer.asking { - PeerAsking::BlockHeaders => elapsed > HEADERS_TIMEOUT, - PeerAsking::BlockBodies => elapsed > BODIES_TIMEOUT, - PeerAsking::BlockReceipts => elapsed > RECEIPTS_TIMEOUT, - PeerAsking::Nothing => false, - PeerAsking::ForkHeader => elapsed > FORK_HEADER_TIMEOUT, - PeerAsking::SnapshotManifest => elapsed > SNAPSHOT_MANIFEST_TIMEOUT, - PeerAsking::SnapshotData => elapsed > SNAPSHOT_DATA_TIMEOUT, - }; - if timeout { - debug!(target:"sync", "Timeout {}", peer_id); - io.disconnect_peer(*peer_id); - aborting.push(*peer_id); - } - } - for p in aborting { - SyncHandler::on_peer_aborting(self, io, p); - } + pub fn maintain_peers(&mut self, io: &mut SyncIo) { + let tick = Instant::now(); + let mut aborting = Vec::new(); + for (peer_id, peer) in &self.peers { + let elapsed = tick - peer.ask_time; + let timeout = match peer.asking { + PeerAsking::BlockHeaders => elapsed > HEADERS_TIMEOUT, + PeerAsking::BlockBodies => elapsed > BODIES_TIMEOUT, + PeerAsking::BlockReceipts => elapsed > RECEIPTS_TIMEOUT, + PeerAsking::Nothing => false, + PeerAsking::ForkHeader => elapsed > FORK_HEADER_TIMEOUT, + PeerAsking::SnapshotManifest => elapsed > SNAPSHOT_MANIFEST_TIMEOUT, + PeerAsking::SnapshotData => elapsed > SNAPSHOT_DATA_TIMEOUT, + }; + if timeout { + debug!(target:"sync", "Timeout {}", peer_id); + io.disconnect_peer(*peer_id); + aborting.push(*peer_id); + } + } + for p in aborting { + SyncHandler::on_peer_aborting(self, io, p); + } - // Check for handshake timeouts - for (peer, &ask_time) in &self.handshaking_peers { - let elapsed = (tick - ask_time) / 1_000_000_000; - if elapsed > STATUS_TIMEOUT { - trace!(target:"sync", "Status timeout {}", peer); - io.disconnect_peer(*peer); - } - } - } + // Check for handshake timeouts + for (peer, &ask_time) in &self.handshaking_peers { + let elapsed = (tick - ask_time) / 1_000_000_000; + if elapsed > STATUS_TIMEOUT { + trace!(target:"sync", "Status timeout {}", peer); + io.disconnect_peer(*peer); + } + } + } - fn check_resume(&mut self, io: &mut SyncIo) { - match self.state { - SyncState::Waiting if !io.chain().queue_info().is_full() => { - self.state = SyncState::Blocks; - self.continue_sync(io); - }, - SyncState::SnapshotData => match io.snapshot_service().status() { - RestorationStatus::Inactive | RestorationStatus::Failed => { - self.state = SyncState::SnapshotWaiting; - }, - RestorationStatus::Initializing { .. } | RestorationStatus::Ongoing { .. } => (), - }, - SyncState::SnapshotWaiting => { - match io.snapshot_service().status() { - RestorationStatus::Inactive => { - trace!(target:"sync", "Snapshot restoration is complete"); - self.restart(io); - }, - RestorationStatus::Initializing { .. } => { - trace!(target:"sync", "Snapshot restoration is initializing"); - }, - RestorationStatus::Ongoing { state_chunks_done, block_chunks_done, .. } => { - if !self.snapshot.is_complete() && self.snapshot.done_chunks() - (state_chunks_done + block_chunks_done) as usize <= MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD { - trace!(target:"sync", "Resuming snapshot sync"); - self.state = SyncState::SnapshotData; - self.continue_sync(io); - } - }, - RestorationStatus::Failed => { - trace!(target: "sync", "Snapshot restoration aborted"); - self.state = SyncState::WaitingPeers; - self.snapshot.clear(); - self.continue_sync(io); - }, - } - }, - _ => (), - } - } + fn check_resume(&mut self, io: &mut SyncIo) { + match self.state { + SyncState::Waiting if !io.chain().queue_info().is_full() => { + self.state = SyncState::Blocks; + self.continue_sync(io); + } + SyncState::SnapshotData => match io.snapshot_service().status() { + RestorationStatus::Inactive | RestorationStatus::Failed => { + self.state = SyncState::SnapshotWaiting; + } + RestorationStatus::Initializing { .. } | RestorationStatus::Ongoing { .. } => (), + }, + SyncState::SnapshotWaiting => match io.snapshot_service().status() { + RestorationStatus::Inactive => { + trace!(target:"sync", "Snapshot restoration is complete"); + self.restart(io); + } + RestorationStatus::Initializing { .. } => { + trace!(target:"sync", "Snapshot restoration is initializing"); + } + RestorationStatus::Ongoing { + state_chunks_done, + block_chunks_done, + .. + } => { + if !self.snapshot.is_complete() + && self.snapshot.done_chunks() + - (state_chunks_done + block_chunks_done) as usize + <= MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD + { + trace!(target:"sync", "Resuming snapshot sync"); + self.state = SyncState::SnapshotData; + self.continue_sync(io); + } + } + RestorationStatus::Failed => { + trace!(target: "sync", "Snapshot restoration aborted"); + self.state = SyncState::WaitingPeers; + self.snapshot.clear(); + self.continue_sync(io); + } + }, + _ => (), + } + } - /// returns peer ids that have different block than our chain - fn get_lagging_peers(&self, chain_info: &BlockChainInfo) -> Vec { - self.get_peers(chain_info, PeerState::Lagging) - } + /// returns peer ids that have different block than our chain + fn get_lagging_peers(&self, chain_info: &BlockChainInfo) -> Vec { + self.get_peers(chain_info, PeerState::Lagging) + } - /// returns peer ids that have different or the same blocks than our chain - fn get_peers(&self, chain_info: &BlockChainInfo, peers: PeerState) -> Vec { - let latest_hash = chain_info.best_block_hash; - self + /// returns peer ids that have different or the same blocks than our chain + fn get_peers(&self, chain_info: &BlockChainInfo, peers: PeerState) -> Vec { + let latest_hash = chain_info.best_block_hash; + self .peers .iter() .filter_map(|(&id, ref mut peer_info)| { @@ -1232,367 +1337,422 @@ impl ChainSync { } }) .collect::>() - } + } - fn get_consensus_peers(&self) -> Vec { - self.peers.iter().filter_map(|(id, p)| if p.protocol_version >= PAR_PROTOCOL_VERSION_2.0 { Some(*id) } else { None }).collect() - } + fn get_consensus_peers(&self) -> Vec { + self.peers + .iter() + .filter_map(|(id, p)| { + if p.protocol_version >= PAR_PROTOCOL_VERSION_2.0 { + Some(*id) + } else { + None + } + }) + .collect() + } - fn get_private_transaction_peers(&self, transaction_hash: &H256) -> Vec { - self.peers.iter().filter_map( - |(id, p)| if p.protocol_version >= PAR_PROTOCOL_VERSION_3.0 - && !p.last_sent_private_transactions.contains(transaction_hash) - && p.private_tx_enabled { - Some(*id) - } else { - None - } - ).collect() - } + fn get_private_transaction_peers(&self, transaction_hash: &H256) -> Vec { + self.peers + .iter() + .filter_map(|(id, p)| { + if p.protocol_version >= PAR_PROTOCOL_VERSION_3.0 + && !p.last_sent_private_transactions.contains(transaction_hash) + && p.private_tx_enabled + { + Some(*id) + } else { + None + } + }) + .collect() + } - /// Maintain other peers. Send out any new blocks and transactions - pub fn maintain_sync(&mut self, io: &mut SyncIo) { - self.maybe_start_snapshot_sync(io); - self.check_resume(io); - } + /// Maintain other peers. Send out any new blocks and transactions + pub fn maintain_sync(&mut self, io: &mut SyncIo) { + self.maybe_start_snapshot_sync(io); + self.check_resume(io); + } - /// called when block is imported to chain - propagates the blocks and updates transactions sent to peers - pub fn chain_new_blocks(&mut self, io: &mut SyncIo, _imported: &[H256], invalid: &[H256], enacted: &[H256], _retracted: &[H256], sealed: &[H256], proposed: &[Bytes]) { - let queue_info = io.chain().queue_info(); - let is_syncing = self.status().is_syncing(queue_info); + /// called when block is imported to chain - propagates the blocks and updates transactions sent to peers + pub fn chain_new_blocks( + &mut self, + io: &mut SyncIo, + _imported: &[H256], + invalid: &[H256], + enacted: &[H256], + _retracted: &[H256], + sealed: &[H256], + proposed: &[Bytes], + ) { + let queue_info = io.chain().queue_info(); + let is_syncing = self.status().is_syncing(queue_info); - if !is_syncing || !sealed.is_empty() || !proposed.is_empty() { - trace!(target: "sync", "Propagating blocks, state={:?}", self.state); - SyncPropagator::propagate_latest_blocks(self, io, sealed); - SyncPropagator::propagate_proposed_blocks(self, io, proposed); - } - if !invalid.is_empty() { - trace!(target: "sync", "Bad blocks in the queue, restarting"); - self.restart(io); - } + if !is_syncing || !sealed.is_empty() || !proposed.is_empty() { + trace!(target: "sync", "Propagating blocks, state={:?}", self.state); + SyncPropagator::propagate_latest_blocks(self, io, sealed); + SyncPropagator::propagate_proposed_blocks(self, io, proposed); + } + if !invalid.is_empty() { + trace!(target: "sync", "Bad blocks in the queue, restarting"); + self.restart(io); + } - if !is_syncing && !enacted.is_empty() && !self.peers.is_empty() { - // Select random peer to re-broadcast transactions to. - let peer = random::new().gen_range(0, self.peers.len()); - trace!(target: "sync", "Re-broadcasting transactions to a random peer."); - self.peers.values_mut().nth(peer).map(|peer_info| { - peer_info.last_sent_transactions.clear(); - peer_info.reset_private_stats() - } - ); - } - } + if !is_syncing && !enacted.is_empty() && !self.peers.is_empty() { + // Select random peer to re-broadcast transactions to. + let peer = random::new().gen_range(0, self.peers.len()); + trace!(target: "sync", "Re-broadcasting transactions to a random peer."); + self.peers.values_mut().nth(peer).map(|peer_info| { + peer_info.last_sent_transactions.clear(); + peer_info.reset_private_stats() + }); + } + } - pub fn on_packet(&mut self, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) { - SyncHandler::on_packet(self, io, peer, packet_id, data); - } + pub fn on_packet(&mut self, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) { + SyncHandler::on_packet(self, io, peer, packet_id, data); + } - /// Called by peer when it is disconnecting - pub fn on_peer_aborting(&mut self, io: &mut SyncIo, peer: PeerId) { - SyncHandler::on_peer_aborting(self, io, peer); - } + /// Called by peer when it is disconnecting + pub fn on_peer_aborting(&mut self, io: &mut SyncIo, peer: PeerId) { + SyncHandler::on_peer_aborting(self, io, peer); + } - /// Called when a new peer is connected - pub fn on_peer_connected(&mut self, io: &mut SyncIo, peer: PeerId) { - SyncHandler::on_peer_connected(self, io, peer); - } + /// Called when a new peer is connected + pub fn on_peer_connected(&mut self, io: &mut SyncIo, peer: PeerId) { + SyncHandler::on_peer_connected(self, io, peer); + } - /// propagates new transactions to all peers - pub fn propagate_new_transactions(&mut self, io: &mut SyncIo) { - let deadline = Instant::now() + Duration::from_millis(500); - SyncPropagator::propagate_new_transactions(self, io, || { - if deadline > Instant::now() { - true - } else { - debug!(target: "sync", "Wasn't able to finish transaction propagation within a deadline."); - false - } - }); - } + /// propagates new transactions to all peers + pub fn propagate_new_transactions(&mut self, io: &mut SyncIo) { + let deadline = Instant::now() + Duration::from_millis(500); + SyncPropagator::propagate_new_transactions(self, io, || { + if deadline > Instant::now() { + true + } else { + debug!(target: "sync", "Wasn't able to finish transaction propagation within a deadline."); + false + } + }); + } - /// Broadcast consensus message to peers. - pub fn propagate_consensus_packet(&mut self, io: &mut SyncIo, packet: Bytes) { - SyncPropagator::propagate_consensus_packet(self, io, packet); - } + /// Broadcast consensus message to peers. + pub fn propagate_consensus_packet(&mut self, io: &mut SyncIo, packet: Bytes) { + SyncPropagator::propagate_consensus_packet(self, io, packet); + } - /// Broadcast private transaction message to peers. - pub fn propagate_private_transaction(&mut self, io: &mut SyncIo, transaction_hash: H256, packet_id: SyncPacket, packet: Bytes) { - SyncPropagator::propagate_private_transaction(self, io, transaction_hash, packet_id, packet); - } + /// Broadcast private transaction message to peers. + pub fn propagate_private_transaction( + &mut self, + io: &mut SyncIo, + transaction_hash: H256, + packet_id: SyncPacket, + packet: Bytes, + ) { + SyncPropagator::propagate_private_transaction( + self, + io, + transaction_hash, + packet_id, + packet, + ); + } } #[cfg(test)] pub mod tests { - use std::collections::{VecDeque}; - use ethkey; - use network::PeerId; - use tests::helpers::{TestIo}; - use tests::snapshot::TestSnapshotService; - use ethereum_types::{H256, U256, Address}; - use parking_lot::RwLock; - use bytes::Bytes; - use rlp::{Rlp, RlpStream}; - use super::*; - use ::SyncConfig; - use super::{PeerInfo, PeerAsking}; - use ethcore::client::{BlockChainClient, EachBlockWith, TestBlockChainClient, ChainInfo, BlockInfo}; - use ethcore::miner::{MinerService, PendingOrdering}; - use types::header::Header; + use super::{PeerAsking, PeerInfo, *}; + use bytes::Bytes; + use ethcore::{ + client::{BlockChainClient, BlockInfo, ChainInfo, EachBlockWith, TestBlockChainClient}, + miner::{MinerService, PendingOrdering}, + }; + use ethereum_types::{Address, H256, U256}; + use ethkey; + use network::PeerId; + use parking_lot::RwLock; + use rlp::{Rlp, RlpStream}; + use std::collections::VecDeque; + use tests::{helpers::TestIo, snapshot::TestSnapshotService}; + use types::header::Header; + use SyncConfig; - pub fn get_dummy_block(order: u32, parent_hash: H256) -> Bytes { - let mut header = Header::new(); - header.set_gas_limit(0.into()); - header.set_difficulty((order * 100).into()); - header.set_timestamp((order * 10) as u64); - header.set_number(order as u64); - header.set_parent_hash(parent_hash); - header.set_state_root(H256::zero()); + pub fn get_dummy_block(order: u32, parent_hash: H256) -> Bytes { + let mut header = Header::new(); + header.set_gas_limit(0.into()); + header.set_difficulty((order * 100).into()); + header.set_timestamp((order * 10) as u64); + header.set_number(order as u64); + header.set_parent_hash(parent_hash); + header.set_state_root(H256::zero()); - let mut rlp = RlpStream::new_list(3); - rlp.append(&header); - rlp.append_raw(&::rlp::EMPTY_LIST_RLP, 1); - rlp.append_raw(&::rlp::EMPTY_LIST_RLP, 1); - rlp.out() - } + let mut rlp = RlpStream::new_list(3); + rlp.append(&header); + rlp.append_raw(&::rlp::EMPTY_LIST_RLP, 1); + rlp.append_raw(&::rlp::EMPTY_LIST_RLP, 1); + rlp.out() + } - pub fn get_dummy_blocks(order: u32, parent_hash: H256) -> Bytes { - let mut rlp = RlpStream::new_list(2); - rlp.append_raw(&get_dummy_block(order, parent_hash), 1); - let difficulty: U256 = (100 * order).into(); - rlp.append(&difficulty); - rlp.out() - } + pub fn get_dummy_blocks(order: u32, parent_hash: H256) -> Bytes { + let mut rlp = RlpStream::new_list(2); + rlp.append_raw(&get_dummy_block(order, parent_hash), 1); + let difficulty: U256 = (100 * order).into(); + rlp.append(&difficulty); + rlp.out() + } - pub fn get_dummy_hashes() -> Bytes { - let mut rlp = RlpStream::new_list(5); - for _ in 0..5 { - let mut hash_d_rlp = RlpStream::new_list(2); - let hash: H256 = H256::from(0u64); - let diff: U256 = U256::from(1u64); - hash_d_rlp.append(&hash); - hash_d_rlp.append(&diff); + pub fn get_dummy_hashes() -> Bytes { + let mut rlp = RlpStream::new_list(5); + for _ in 0..5 { + let mut hash_d_rlp = RlpStream::new_list(2); + let hash: H256 = H256::from(0u64); + let diff: U256 = U256::from(1u64); + hash_d_rlp.append(&hash); + hash_d_rlp.append(&diff); - rlp.append_raw(&hash_d_rlp.out(), 1); - } + rlp.append_raw(&hash_d_rlp.out(), 1); + } - rlp.out() - } + rlp.out() + } - fn queue_info(unverified: usize, verified: usize) -> BlockQueueInfo { - BlockQueueInfo { - unverified_queue_size: unverified, - verified_queue_size: verified, - verifying_queue_size: 0, - max_queue_size: 1000, - max_mem_use: 1000, - mem_used: 500 - } - } + fn queue_info(unverified: usize, verified: usize) -> BlockQueueInfo { + BlockQueueInfo { + unverified_queue_size: unverified, + verified_queue_size: verified, + verifying_queue_size: 0, + max_queue_size: 1000, + max_mem_use: 1000, + mem_used: 500, + } + } - fn sync_status(state: SyncState) -> SyncStatus { - SyncStatus { - state: state, - protocol_version: 0, - network_id: 0, - start_block_number: 0, - last_imported_block_number: None, - highest_block_number: None, - blocks_total: 0, - blocks_received: 0, - num_peers: 0, - num_active_peers: 0, - mem_used: 0, - num_snapshot_chunks: 0, - snapshot_chunks_done: 0, - last_imported_old_block_number: None, - } - } + fn sync_status(state: SyncState) -> SyncStatus { + SyncStatus { + state: state, + protocol_version: 0, + network_id: 0, + start_block_number: 0, + last_imported_block_number: None, + highest_block_number: None, + blocks_total: 0, + blocks_received: 0, + num_peers: 0, + num_active_peers: 0, + mem_used: 0, + num_snapshot_chunks: 0, + snapshot_chunks_done: 0, + last_imported_old_block_number: None, + } + } - #[test] - fn is_still_verifying() { - assert!(!sync_status(SyncState::Idle).is_syncing(queue_info(2, 1))); - assert!(sync_status(SyncState::Idle).is_syncing(queue_info(2, 2))); - } + #[test] + fn is_still_verifying() { + assert!(!sync_status(SyncState::Idle).is_syncing(queue_info(2, 1))); + assert!(sync_status(SyncState::Idle).is_syncing(queue_info(2, 2))); + } - #[test] - fn is_synced_state() { - assert!(sync_status(SyncState::Blocks).is_syncing(queue_info(0, 0))); - assert!(!sync_status(SyncState::Idle).is_syncing(queue_info(0, 0))); - } + #[test] + fn is_synced_state() { + assert!(sync_status(SyncState::Blocks).is_syncing(queue_info(0, 0))); + assert!(!sync_status(SyncState::Idle).is_syncing(queue_info(0, 0))); + } - pub fn dummy_sync_with_peer(peer_latest_hash: H256, client: &BlockChainClient) -> ChainSync { - let mut sync = ChainSync::new(SyncConfig::default(), client, None); - insert_dummy_peer(&mut sync, 0, peer_latest_hash); - sync - } + pub fn dummy_sync_with_peer(peer_latest_hash: H256, client: &BlockChainClient) -> ChainSync { + let mut sync = ChainSync::new(SyncConfig::default(), client, None); + insert_dummy_peer(&mut sync, 0, peer_latest_hash); + sync + } - pub fn insert_dummy_peer(sync: &mut ChainSync, peer_id: PeerId, peer_latest_hash: H256) { - sync.peers.insert(peer_id, - PeerInfo { - protocol_version: 0, - genesis: H256::zero(), - network_id: 0, - latest_hash: peer_latest_hash, - difficulty: None, - asking: PeerAsking::Nothing, - asking_blocks: Vec::new(), - asking_hash: None, - ask_time: Instant::now(), - last_sent_transactions: Default::default(), - last_sent_private_transactions: Default::default(), - expired: false, - private_tx_enabled: false, - confirmation: super::ForkConfirmation::Confirmed, - snapshot_number: None, - snapshot_hash: None, - asking_snapshot_data: None, - block_set: None, - client_version: ClientVersion::from(""), - }); + pub fn insert_dummy_peer(sync: &mut ChainSync, peer_id: PeerId, peer_latest_hash: H256) { + sync.peers.insert( + peer_id, + PeerInfo { + protocol_version: 0, + genesis: H256::zero(), + network_id: 0, + latest_hash: peer_latest_hash, + difficulty: None, + asking: PeerAsking::Nothing, + asking_blocks: Vec::new(), + asking_hash: None, + ask_time: Instant::now(), + last_sent_transactions: Default::default(), + last_sent_private_transactions: Default::default(), + expired: false, + private_tx_enabled: false, + confirmation: super::ForkConfirmation::Confirmed, + snapshot_number: None, + snapshot_hash: None, + asking_snapshot_data: None, + block_set: None, + client_version: ClientVersion::from(""), + }, + ); + } - } + #[test] + fn finds_lagging_peers() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + let sync = dummy_sync_with_peer(client.block_hash_delta_minus(10), &client); + let chain_info = client.chain_info(); - #[test] - fn finds_lagging_peers() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - let sync = dummy_sync_with_peer(client.block_hash_delta_minus(10), &client); - let chain_info = client.chain_info(); + let lagging_peers = sync.get_lagging_peers(&chain_info); - let lagging_peers = sync.get_lagging_peers(&chain_info); + assert_eq!(1, lagging_peers.len()); + } - assert_eq!(1, lagging_peers.len()); - } + #[test] + fn calculates_tree_for_lagging_peer() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(15, EachBlockWith::Uncle); - #[test] - fn calculates_tree_for_lagging_peer() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(15, EachBlockWith::Uncle); + let start = client.block_hash_delta_minus(4); + let end = client.block_hash_delta_minus(2); - let start = client.block_hash_delta_minus(4); - let end = client.block_hash_delta_minus(2); + // wrong way end -> start, should be None + let rlp = ChainSync::create_new_hashes_rlp(&client, &end, &start); + assert!(rlp.is_none()); - // wrong way end -> start, should be None - let rlp = ChainSync::create_new_hashes_rlp(&client, &end, &start); - assert!(rlp.is_none()); + let rlp = ChainSync::create_new_hashes_rlp(&client, &start, &end).unwrap(); + // size of three rlp encoded hash-difficulty + assert_eq!(107, rlp.len()); + } + // idea is that what we produce when propagading latest hashes should be accepted in + // on_peer_new_hashes in our code as well + #[test] + fn hashes_rlp_mutually_acceptable() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + let queue = RwLock::new(VecDeque::new()); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + let chain_info = client.chain_info(); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); - let rlp = ChainSync::create_new_hashes_rlp(&client, &start, &end).unwrap(); - // size of three rlp encoded hash-difficulty - assert_eq!(107, rlp.len()); - } - // idea is that what we produce when propagading latest hashes should be accepted in - // on_peer_new_hashes in our code as well - #[test] - fn hashes_rlp_mutually_acceptable() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - let queue = RwLock::new(VecDeque::new()); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let chain_info = client.chain_info(); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); + let peers = sync.get_lagging_peers(&chain_info); + SyncPropagator::propagate_new_hashes(&mut sync, &chain_info, &mut io, &peers); - let peers = sync.get_lagging_peers(&chain_info); - SyncPropagator::propagate_new_hashes(&mut sync, &chain_info, &mut io, &peers); + let data = &io.packets[0].data.clone(); + let result = SyncHandler::on_peer_new_hashes(&mut sync, &mut io, 0, &Rlp::new(data)); + assert!(result.is_ok()); + } - let data = &io.packets[0].data.clone(); - let result = SyncHandler::on_peer_new_hashes(&mut sync, &mut io, 0, &Rlp::new(data)); - assert!(result.is_ok()); - } + // idea is that what we produce when propagading latest block should be accepted in + // on_peer_new_block in our code as well + #[test] + fn block_rlp_mutually_acceptable() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + let queue = RwLock::new(VecDeque::new()); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + let chain_info = client.chain_info(); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); - // idea is that what we produce when propagading latest block should be accepted in - // on_peer_new_block in our code as well - #[test] - fn block_rlp_mutually_acceptable() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - let queue = RwLock::new(VecDeque::new()); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let chain_info = client.chain_info(); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); + let peers = sync.get_lagging_peers(&chain_info); + SyncPropagator::propagate_blocks(&mut sync, &chain_info, &mut io, &[], &peers); - let peers = sync.get_lagging_peers(&chain_info); - SyncPropagator::propagate_blocks(&mut sync, &chain_info, &mut io, &[], &peers); + let data = &io.packets[0].data.clone(); + let result = SyncHandler::on_peer_new_block(&mut sync, &mut io, 0, &Rlp::new(data)); + assert!(result.is_ok()); + } - let data = &io.packets[0].data.clone(); - let result = SyncHandler::on_peer_new_block(&mut sync, &mut io, 0, &Rlp::new(data)); - assert!(result.is_ok()); - } + #[test] + fn should_add_transactions_to_queue() { + fn sender(tx: &UnverifiedTransaction) -> Address { + ethkey::public_to_address(&tx.recover_public().unwrap()) + } - #[test] - fn should_add_transactions_to_queue() { - fn sender(tx: &UnverifiedTransaction) -> Address { - ethkey::public_to_address(&tx.recover_public().unwrap()) - } + // given + let mut client = TestBlockChainClient::new(); + client.add_blocks(98, EachBlockWith::Uncle); + client.add_blocks(1, EachBlockWith::UncleAndTransaction); + client.add_blocks(1, EachBlockWith::Transaction); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - // given - let mut client = TestBlockChainClient::new(); - client.add_blocks(98, EachBlockWith::Uncle); - client.add_blocks(1, EachBlockWith::UncleAndTransaction); - client.add_blocks(1, EachBlockWith::Transaction); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + let good_blocks = vec![client.block_hash_delta_minus(2)]; + let retracted_blocks = vec![client.block_hash_delta_minus(1)]; - let good_blocks = vec![client.block_hash_delta_minus(2)]; - let retracted_blocks = vec![client.block_hash_delta_minus(1)]; + // Add some balance to clients and reset nonces + for h in &[good_blocks[0], retracted_blocks[0]] { + let block = client.block(BlockId::Hash(*h)).unwrap(); + let sender = sender(&block.transactions()[0]); + client.set_balance(sender, U256::from(10_000_000_000_000_000_000u64)); + client.set_nonce(sender, U256::from(0)); + } - // Add some balance to clients and reset nonces - for h in &[good_blocks[0], retracted_blocks[0]] { - let block = client.block(BlockId::Hash(*h)).unwrap(); - let sender = sender(&block.transactions()[0]);; - client.set_balance(sender, U256::from(10_000_000_000_000_000_000u64)); - client.set_nonce(sender, U256::from(0)); - } + // when + { + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + io.chain + .miner + .chain_new_blocks(io.chain, &[], &[], &[], &good_blocks, false); + sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[], &[]); + assert_eq!( + io.chain + .miner + .ready_transactions(io.chain, 10, PendingOrdering::Priority) + .len(), + 1 + ); + } + // We need to update nonce status (because we say that the block has been imported) + for h in &[good_blocks[0]] { + let block = client.block(BlockId::Hash(*h)).unwrap(); + client.set_nonce(sender(&block.transactions()[0]), U256::from(1)); + } + { + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&client, &ss, &queue, None); + io.chain.miner.chain_new_blocks( + io.chain, + &[], + &[], + &good_blocks, + &retracted_blocks, + false, + ); + sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[], &[]); + } - // when - { - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - io.chain.miner.chain_new_blocks(io.chain, &[], &[], &[], &good_blocks, false); - sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[], &[]); - assert_eq!(io.chain.miner.ready_transactions(io.chain, 10, PendingOrdering::Priority).len(), 1); - } - // We need to update nonce status (because we say that the block has been imported) - for h in &[good_blocks[0]] { - let block = client.block(BlockId::Hash(*h)).unwrap(); - client.set_nonce(sender(&block.transactions()[0]), U256::from(1)); - } - { - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&client, &ss, &queue, None); - io.chain.miner.chain_new_blocks(io.chain, &[], &[], &good_blocks, &retracted_blocks, false); - sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[], &[]); - } + // then + assert_eq!( + client + .miner + .ready_transactions(&client, 10, PendingOrdering::Priority) + .len(), + 1 + ); + } - // then - assert_eq!(client.miner.ready_transactions(&client, 10, PendingOrdering::Priority).len(), 1); - } + #[test] + fn should_not_add_transactions_to_queue_if_not_synced() { + // given + let mut client = TestBlockChainClient::new(); + client.add_blocks(98, EachBlockWith::Uncle); + client.add_blocks(1, EachBlockWith::UncleAndTransaction); + client.add_blocks(1, EachBlockWith::Transaction); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - #[test] - fn should_not_add_transactions_to_queue_if_not_synced() { - // given - let mut client = TestBlockChainClient::new(); - client.add_blocks(98, EachBlockWith::Uncle); - client.add_blocks(1, EachBlockWith::UncleAndTransaction); - client.add_blocks(1, EachBlockWith::Transaction); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + let good_blocks = vec![client.block_hash_delta_minus(2)]; + let retracted_blocks = vec![client.block_hash_delta_minus(1)]; - let good_blocks = vec![client.block_hash_delta_minus(2)]; - let retracted_blocks = vec![client.block_hash_delta_minus(1)]; + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); + // when + sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[], &[]); + assert_eq!(io.chain.miner.queue_status().status.transaction_count, 0); + sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[], &[]); - // when - sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[], &[]); - assert_eq!(io.chain.miner.queue_status().status.transaction_count, 0); - sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[], &[]); - - // then - let status = io.chain.miner.queue_status(); - assert_eq!(status.status.transaction_count, 0); - } + // then + let status = io.chain.miner.queue_status(); + assert_eq!(status.status.transaction_count, 0); + } } diff --git a/ethcore/sync/src/chain/propagator.rs b/ethcore/sync/src/chain/propagator.rs index c3654553f..c893c6f90 100644 --- a/ethcore/sync/src/chain/propagator.rs +++ b/ethcore/sync/src/chain/propagator.rs @@ -14,626 +14,699 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::cmp; -use std::collections::HashSet; +use std::{cmp, collections::HashSet}; use bytes::Bytes; use ethereum_types::H256; use fastmap::H256FastSet; -use network::client_version::ClientCapabilities; -use network::PeerId; +use network::{client_version::ClientCapabilities, PeerId}; use rand::Rng; use rlp::{Encodable, RlpStream}; use sync_io::SyncIo; -use types::transaction::SignedTransaction; -use types::BlockNumber; -use types::blockchain_info::BlockChainInfo; +use types::{blockchain_info::BlockChainInfo, transaction::SignedTransaction, BlockNumber}; -use super::sync_packet::SyncPacket; -use super::sync_packet::SyncPacket::{ - NewBlockHashesPacket, - TransactionsPacket, - NewBlockPacket, - ConsensusDataPacket, +use super::sync_packet::{ + SyncPacket, + SyncPacket::{ConsensusDataPacket, NewBlockHashesPacket, NewBlockPacket, TransactionsPacket}, }; use super::{ - random, - ChainSync, - MAX_TRANSACTION_PACKET_SIZE, - MAX_PEER_LAG_PROPAGATION, - MAX_PEERS_PROPAGATION, - MIN_PEERS_PROPAGATION, + random, ChainSync, MAX_PEERS_PROPAGATION, MAX_PEER_LAG_PROPAGATION, + MAX_TRANSACTION_PACKET_SIZE, MIN_PEERS_PROPAGATION, }; /// The Chain Sync Propagator: propagates data to peers pub struct SyncPropagator; impl SyncPropagator { - /// propagates latest block to a set of peers - pub fn propagate_blocks(sync: &mut ChainSync, chain_info: &BlockChainInfo, io: &mut SyncIo, blocks: &[H256], peers: &[PeerId]) -> usize { - trace!(target: "sync", "Sending NewBlocks to {:?}", peers); - let sent = peers.len(); - let mut send_packet = |io: &mut SyncIo, rlp: Bytes| { - for peer_id in peers { - SyncPropagator::send_packet(io, *peer_id, NewBlockPacket, rlp.clone()); + /// propagates latest block to a set of peers + pub fn propagate_blocks( + sync: &mut ChainSync, + chain_info: &BlockChainInfo, + io: &mut SyncIo, + blocks: &[H256], + peers: &[PeerId], + ) -> usize { + trace!(target: "sync", "Sending NewBlocks to {:?}", peers); + let sent = peers.len(); + let mut send_packet = |io: &mut SyncIo, rlp: Bytes| { + for peer_id in peers { + SyncPropagator::send_packet(io, *peer_id, NewBlockPacket, rlp.clone()); - if let Some(ref mut peer) = sync.peers.get_mut(peer_id) { - peer.latest_hash = chain_info.best_block_hash.clone(); - } - } - }; + if let Some(ref mut peer) = sync.peers.get_mut(peer_id) { + peer.latest_hash = chain_info.best_block_hash.clone(); + } + } + }; - if blocks.is_empty() { - let rlp = ChainSync::create_latest_block_rlp(io.chain()); - send_packet(io, rlp); - } else { - for h in blocks { - let rlp = ChainSync::create_new_block_rlp(io.chain(), h); - send_packet(io, rlp); - } - } + if blocks.is_empty() { + let rlp = ChainSync::create_latest_block_rlp(io.chain()); + send_packet(io, rlp); + } else { + for h in blocks { + let rlp = ChainSync::create_new_block_rlp(io.chain(), h); + send_packet(io, rlp); + } + } - sent - } + sent + } - /// propagates new known hashes to all peers - pub fn propagate_new_hashes(sync: &mut ChainSync, chain_info: &BlockChainInfo, io: &mut SyncIo, peers: &[PeerId]) -> usize { - trace!(target: "sync", "Sending NewHashes to {:?}", peers); - let last_parent = *io.chain().best_block_header().parent_hash(); - let best_block_hash = chain_info.best_block_hash; - let rlp = match ChainSync::create_new_hashes_rlp(io.chain(), &last_parent, &best_block_hash) { - Some(rlp) => rlp, - None => return 0 - }; + /// propagates new known hashes to all peers + pub fn propagate_new_hashes( + sync: &mut ChainSync, + chain_info: &BlockChainInfo, + io: &mut SyncIo, + peers: &[PeerId], + ) -> usize { + trace!(target: "sync", "Sending NewHashes to {:?}", peers); + let last_parent = *io.chain().best_block_header().parent_hash(); + let best_block_hash = chain_info.best_block_hash; + let rlp = match ChainSync::create_new_hashes_rlp(io.chain(), &last_parent, &best_block_hash) + { + Some(rlp) => rlp, + None => return 0, + }; - let sent = peers.len(); - for peer_id in peers { - if let Some(ref mut peer) = sync.peers.get_mut(peer_id) { - peer.latest_hash = best_block_hash; - } - SyncPropagator::send_packet(io, *peer_id, NewBlockHashesPacket, rlp.clone()); - } - sent - } + let sent = peers.len(); + for peer_id in peers { + if let Some(ref mut peer) = sync.peers.get_mut(peer_id) { + peer.latest_hash = best_block_hash; + } + SyncPropagator::send_packet(io, *peer_id, NewBlockHashesPacket, rlp.clone()); + } + sent + } - /// propagates new transactions to all peers - pub fn propagate_new_transactions bool>(sync: &mut ChainSync, io: &mut SyncIo, mut should_continue: F) -> usize { - // Early out if nobody to send to. - if sync.peers.is_empty() { - return 0; - } + /// propagates new transactions to all peers + pub fn propagate_new_transactions bool>( + sync: &mut ChainSync, + io: &mut SyncIo, + mut should_continue: F, + ) -> usize { + // Early out if nobody to send to. + if sync.peers.is_empty() { + return 0; + } - let transactions = io.chain().transactions_to_propagate(); - if transactions.is_empty() { - return 0; - } + let transactions = io.chain().transactions_to_propagate(); + if transactions.is_empty() { + return 0; + } - if !should_continue() { - return 0; - } + if !should_continue() { + return 0; + } - let (transactions, service_transactions): (Vec<_>, Vec<_>) = transactions.iter() - .map(|tx| tx.signed()) - .partition(|tx| !tx.gas_price.is_zero()); + let (transactions, service_transactions): (Vec<_>, Vec<_>) = transactions + .iter() + .map(|tx| tx.signed()) + .partition(|tx| !tx.gas_price.is_zero()); - // usual transactions could be propagated to all peers - let mut affected_peers = HashSet::new(); - if !transactions.is_empty() { - let peers = SyncPropagator::select_peers_for_transactions(sync, |_| true); - affected_peers = SyncPropagator::propagate_transactions_to_peers( - sync, io, peers, transactions, &mut should_continue, - ); - } + // usual transactions could be propagated to all peers + let mut affected_peers = HashSet::new(); + if !transactions.is_empty() { + let peers = SyncPropagator::select_peers_for_transactions(sync, |_| true); + affected_peers = SyncPropagator::propagate_transactions_to_peers( + sync, + io, + peers, + transactions, + &mut should_continue, + ); + } - // most of times service_transactions will be empty - // => there's no need to merge packets - if !service_transactions.is_empty() { - let service_transactions_peers = SyncPropagator::select_peers_for_transactions(sync, |peer_id| io.peer_version(*peer_id).accepts_service_transaction()); - let service_transactions_affected_peers = SyncPropagator::propagate_transactions_to_peers( - sync, io, service_transactions_peers, service_transactions, &mut should_continue - ); - affected_peers.extend(&service_transactions_affected_peers); - } + // most of times service_transactions will be empty + // => there's no need to merge packets + if !service_transactions.is_empty() { + let service_transactions_peers = + SyncPropagator::select_peers_for_transactions(sync, |peer_id| { + io.peer_version(*peer_id).accepts_service_transaction() + }); + let service_transactions_affected_peers = + SyncPropagator::propagate_transactions_to_peers( + sync, + io, + service_transactions_peers, + service_transactions, + &mut should_continue, + ); + affected_peers.extend(&service_transactions_affected_peers); + } - affected_peers.len() - } + affected_peers.len() + } - fn propagate_transactions_to_peers bool>( - sync: &mut ChainSync, - io: &mut SyncIo, - peers: Vec, - transactions: Vec<&SignedTransaction>, - mut should_continue: F, - ) -> HashSet { - let all_transactions_hashes = transactions.iter() - .map(|tx| tx.hash()) - .collect::(); - let all_transactions_rlp = { - let mut packet = RlpStream::new_list(transactions.len()); - for tx in &transactions { packet.append(&**tx); } - packet.out() - }; + fn propagate_transactions_to_peers bool>( + sync: &mut ChainSync, + io: &mut SyncIo, + peers: Vec, + transactions: Vec<&SignedTransaction>, + mut should_continue: F, + ) -> HashSet { + let all_transactions_hashes = transactions + .iter() + .map(|tx| tx.hash()) + .collect::(); + let all_transactions_rlp = { + let mut packet = RlpStream::new_list(transactions.len()); + for tx in &transactions { + packet.append(&**tx); + } + packet.out() + }; - // Clear old transactions from stats - sync.transactions_stats.retain(&all_transactions_hashes); + // Clear old transactions from stats + sync.transactions_stats.retain(&all_transactions_hashes); - let send_packet = |io: &mut SyncIo, peer_id: PeerId, sent: usize, rlp: Bytes| { - let size = rlp.len(); - SyncPropagator::send_packet(io, peer_id, TransactionsPacket, rlp); - trace!(target: "sync", "{:02} <- Transactions ({} entries; {} bytes)", peer_id, sent, size); - }; + let send_packet = |io: &mut SyncIo, peer_id: PeerId, sent: usize, rlp: Bytes| { + let size = rlp.len(); + SyncPropagator::send_packet(io, peer_id, TransactionsPacket, rlp); + trace!(target: "sync", "{:02} <- Transactions ({} entries; {} bytes)", peer_id, sent, size); + }; - let block_number = io.chain().chain_info().best_block_number; - let mut sent_to_peers = HashSet::new(); - let mut max_sent = 0; + let block_number = io.chain().chain_info().best_block_number; + let mut sent_to_peers = HashSet::new(); + let mut max_sent = 0; - // for every peer construct and send transactions packet - for peer_id in peers { - if !should_continue() { - debug!(target: "sync", "Sent up to {} transactions to {} peers.", max_sent, sent_to_peers.len()); - return sent_to_peers; - } + // for every peer construct and send transactions packet + for peer_id in peers { + if !should_continue() { + debug!(target: "sync", "Sent up to {} transactions to {} peers.", max_sent, sent_to_peers.len()); + return sent_to_peers; + } - let stats = &mut sync.transactions_stats; - let peer_info = sync.peers.get_mut(&peer_id) + let stats = &mut sync.transactions_stats; + let peer_info = sync.peers.get_mut(&peer_id) .expect("peer_id is form peers; peers is result of select_peers_for_transactions; select_peers_for_transactions selects peers from self.peers; qed"); - // Send all transactions, if the peer doesn't know about anything - if peer_info.last_sent_transactions.is_empty() { - // update stats - for hash in &all_transactions_hashes { - let id = io.peer_session_info(peer_id).and_then(|info| info.id); - stats.propagated(hash, id, block_number); - } - peer_info.last_sent_transactions = all_transactions_hashes.clone(); + // Send all transactions, if the peer doesn't know about anything + if peer_info.last_sent_transactions.is_empty() { + // update stats + for hash in &all_transactions_hashes { + let id = io.peer_session_info(peer_id).and_then(|info| info.id); + stats.propagated(hash, id, block_number); + } + peer_info.last_sent_transactions = all_transactions_hashes.clone(); - send_packet(io, peer_id, all_transactions_hashes.len(), all_transactions_rlp.clone()); - sent_to_peers.insert(peer_id); - max_sent = cmp::max(max_sent, all_transactions_hashes.len()); - continue; - } + send_packet( + io, + peer_id, + all_transactions_hashes.len(), + all_transactions_rlp.clone(), + ); + sent_to_peers.insert(peer_id); + max_sent = cmp::max(max_sent, all_transactions_hashes.len()); + continue; + } - // Get hashes of all transactions to send to this peer - let to_send = all_transactions_hashes.difference(&peer_info.last_sent_transactions) - .cloned() - .collect::>(); - if to_send.is_empty() { - continue; - } + // Get hashes of all transactions to send to this peer + let to_send = all_transactions_hashes + .difference(&peer_info.last_sent_transactions) + .cloned() + .collect::>(); + if to_send.is_empty() { + continue; + } - // Construct RLP - let (packet, to_send) = { - let mut to_send = to_send; - let mut packet = RlpStream::new(); - packet.begin_unbounded_list(); - let mut pushed = 0; - for tx in &transactions { - let hash = tx.hash(); - if to_send.contains(&hash) { - let mut transaction = RlpStream::new(); - tx.rlp_append(&mut transaction); - let appended = packet.append_raw_checked(&transaction.drain(), 1, MAX_TRANSACTION_PACKET_SIZE); - if !appended { - // Maximal packet size reached just proceed with sending - debug!(target: "sync", "Transaction packet size limit reached. Sending incomplete set of {}/{} transactions.", pushed, to_send.len()); - to_send = to_send.into_iter().take(pushed).collect(); - break; - } - pushed += 1; - } - } - packet.complete_unbounded_list(); - (packet, to_send) - }; + // Construct RLP + let (packet, to_send) = { + let mut to_send = to_send; + let mut packet = RlpStream::new(); + packet.begin_unbounded_list(); + let mut pushed = 0; + for tx in &transactions { + let hash = tx.hash(); + if to_send.contains(&hash) { + let mut transaction = RlpStream::new(); + tx.rlp_append(&mut transaction); + let appended = packet.append_raw_checked( + &transaction.drain(), + 1, + MAX_TRANSACTION_PACKET_SIZE, + ); + if !appended { + // Maximal packet size reached just proceed with sending + debug!(target: "sync", "Transaction packet size limit reached. Sending incomplete set of {}/{} transactions.", pushed, to_send.len()); + to_send = to_send.into_iter().take(pushed).collect(); + break; + } + pushed += 1; + } + } + packet.complete_unbounded_list(); + (packet, to_send) + }; - // Update stats - let id = io.peer_session_info(peer_id).and_then(|info| info.id); - for hash in &to_send { - // update stats - stats.propagated(hash, id, block_number); - } + // Update stats + let id = io.peer_session_info(peer_id).and_then(|info| info.id); + for hash in &to_send { + // update stats + stats.propagated(hash, id, block_number); + } - peer_info.last_sent_transactions = all_transactions_hashes - .intersection(&peer_info.last_sent_transactions) - .chain(&to_send) - .cloned() - .collect(); - send_packet(io, peer_id, to_send.len(), packet.out()); - sent_to_peers.insert(peer_id); - max_sent = cmp::max(max_sent, to_send.len()); + peer_info.last_sent_transactions = all_transactions_hashes + .intersection(&peer_info.last_sent_transactions) + .chain(&to_send) + .cloned() + .collect(); + send_packet(io, peer_id, to_send.len(), packet.out()); + sent_to_peers.insert(peer_id); + max_sent = cmp::max(max_sent, to_send.len()); + } - } + debug!(target: "sync", "Sent up to {} transactions to {} peers.", max_sent, sent_to_peers.len()); + sent_to_peers + } - debug!(target: "sync", "Sent up to {} transactions to {} peers.", max_sent, sent_to_peers.len()); - sent_to_peers - } + pub fn propagate_latest_blocks(sync: &mut ChainSync, io: &mut SyncIo, sealed: &[H256]) { + let chain_info = io.chain().chain_info(); + if (((chain_info.best_block_number as i64) - (sync.last_sent_block_number as i64)).abs() + as BlockNumber) + < MAX_PEER_LAG_PROPAGATION + { + let peers = sync.get_lagging_peers(&chain_info); + if sealed.is_empty() { + let hashes = SyncPropagator::propagate_new_hashes(sync, &chain_info, io, &peers); + let peers = ChainSync::select_random_peers(&peers); + let blocks = + SyncPropagator::propagate_blocks(sync, &chain_info, io, sealed, &peers); + if blocks != 0 || hashes != 0 { + trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes); + } + } else { + SyncPropagator::propagate_blocks(sync, &chain_info, io, sealed, &peers); + SyncPropagator::propagate_new_hashes(sync, &chain_info, io, &peers); + trace!(target: "sync", "Sent sealed block to all peers"); + }; + } + sync.last_sent_block_number = chain_info.best_block_number; + } - pub fn propagate_latest_blocks(sync: &mut ChainSync, io: &mut SyncIo, sealed: &[H256]) { - let chain_info = io.chain().chain_info(); - if (((chain_info.best_block_number as i64) - (sync.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION { - let peers = sync.get_lagging_peers(&chain_info); - if sealed.is_empty() { - let hashes = SyncPropagator::propagate_new_hashes(sync, &chain_info, io, &peers); - let peers = ChainSync::select_random_peers(&peers); - let blocks = SyncPropagator::propagate_blocks(sync, &chain_info, io, sealed, &peers); - if blocks != 0 || hashes != 0 { - trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes); - } - } else { - SyncPropagator::propagate_blocks(sync, &chain_info, io, sealed, &peers); - SyncPropagator::propagate_new_hashes(sync, &chain_info, io, &peers); - trace!(target: "sync", "Sent sealed block to all peers"); - }; - } - sync.last_sent_block_number = chain_info.best_block_number; - } + /// Distribute valid proposed blocks to subset of current peers. + pub fn propagate_proposed_blocks(sync: &mut ChainSync, io: &mut SyncIo, proposed: &[Bytes]) { + let peers = sync.get_consensus_peers(); + trace!(target: "sync", "Sending proposed blocks to {:?}", peers); + for block in proposed { + let rlp = ChainSync::create_block_rlp(block, io.chain().chain_info().total_difficulty); + for peer_id in &peers { + SyncPropagator::send_packet(io, *peer_id, NewBlockPacket, rlp.clone()); + } + } + } - /// Distribute valid proposed blocks to subset of current peers. - pub fn propagate_proposed_blocks(sync: &mut ChainSync, io: &mut SyncIo, proposed: &[Bytes]) { - let peers = sync.get_consensus_peers(); - trace!(target: "sync", "Sending proposed blocks to {:?}", peers); - for block in proposed { - let rlp = ChainSync::create_block_rlp( - block, - io.chain().chain_info().total_difficulty - ); - for peer_id in &peers { - SyncPropagator::send_packet(io, *peer_id, NewBlockPacket, rlp.clone()); - } - } - } + /// Broadcast consensus message to peers. + pub fn propagate_consensus_packet(sync: &mut ChainSync, io: &mut SyncIo, packet: Bytes) { + let lucky_peers = ChainSync::select_random_peers(&sync.get_consensus_peers()); + trace!(target: "sync", "Sending consensus packet to {:?}", lucky_peers); + for peer_id in lucky_peers { + SyncPropagator::send_packet(io, peer_id, ConsensusDataPacket, packet.clone()); + } + } - /// Broadcast consensus message to peers. - pub fn propagate_consensus_packet(sync: &mut ChainSync, io: &mut SyncIo, packet: Bytes) { - let lucky_peers = ChainSync::select_random_peers(&sync.get_consensus_peers()); - trace!(target: "sync", "Sending consensus packet to {:?}", lucky_peers); - for peer_id in lucky_peers { - SyncPropagator::send_packet(io, peer_id, ConsensusDataPacket, packet.clone()); - } - } + /// Broadcast private transaction message to peers. + pub fn propagate_private_transaction( + sync: &mut ChainSync, + io: &mut SyncIo, + transaction_hash: H256, + packet_id: SyncPacket, + packet: Bytes, + ) { + let lucky_peers = + ChainSync::select_random_peers(&sync.get_private_transaction_peers(&transaction_hash)); + if lucky_peers.is_empty() { + error!(target: "privatetx", "Cannot propagate the packet, no peers with private tx enabled connected"); + } else { + trace!(target: "privatetx", "Sending private transaction packet to {:?}", lucky_peers); + for peer_id in lucky_peers { + if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { + peer.last_sent_private_transactions.insert(transaction_hash); + } + SyncPropagator::send_packet(io, peer_id, packet_id, packet.clone()); + } + } + } - /// Broadcast private transaction message to peers. - pub fn propagate_private_transaction(sync: &mut ChainSync, io: &mut SyncIo, transaction_hash: H256, packet_id: SyncPacket, packet: Bytes) { - let lucky_peers = ChainSync::select_random_peers(&sync.get_private_transaction_peers(&transaction_hash)); - if lucky_peers.is_empty() { - error!(target: "privatetx", "Cannot propagate the packet, no peers with private tx enabled connected"); - } else { - trace!(target: "privatetx", "Sending private transaction packet to {:?}", lucky_peers); - for peer_id in lucky_peers { - if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { - peer.last_sent_private_transactions.insert(transaction_hash); - } - SyncPropagator::send_packet(io, peer_id, packet_id, packet.clone()); - } - } - } + fn select_peers_for_transactions(sync: &ChainSync, filter: F) -> Vec + where + F: Fn(&PeerId) -> bool, + { + // sqrt(x)/x scaled to max u32 + let fraction = + ((sync.peers.len() as f64).powf(-0.5) * (u32::max_value() as f64).round()) as u32; + let small = sync.peers.len() < MIN_PEERS_PROPAGATION; - fn select_peers_for_transactions(sync: &ChainSync, filter: F) -> Vec - where F: Fn(&PeerId) -> bool { - // sqrt(x)/x scaled to max u32 - let fraction = ((sync.peers.len() as f64).powf(-0.5) * (u32::max_value() as f64).round()) as u32; - let small = sync.peers.len() < MIN_PEERS_PROPAGATION; + let mut random = random::new(); + sync.peers + .keys() + .cloned() + .filter(filter) + .filter(|_| small || random.next_u32() < fraction) + .take(MAX_PEERS_PROPAGATION) + .collect() + } - let mut random = random::new(); - sync.peers.keys() - .cloned() - .filter(filter) - .filter(|_| small || random.next_u32() < fraction) - .take(MAX_PEERS_PROPAGATION) - .collect() - } - - /// Generic packet sender - pub fn send_packet(sync: &mut SyncIo, peer_id: PeerId, packet_id: SyncPacket, packet: Bytes) { - if let Err(e) = sync.send(peer_id, packet_id, packet) { - debug!(target:"sync", "Error sending packet: {:?}", e); - sync.disconnect_peer(peer_id); - } - } + /// Generic packet sender + pub fn send_packet(sync: &mut SyncIo, peer_id: PeerId, packet_id: SyncPacket, packet: Bytes) { + if let Err(e) = sync.send(peer_id, packet_id, packet) { + debug!(target:"sync", "Error sending packet: {:?}", e); + sync.disconnect_peer(peer_id); + } + } } #[cfg(test)] mod tests { - use ethcore::client::{BlockInfo, ChainInfo, EachBlockWith, TestBlockChainClient}; - use parking_lot::RwLock; - use rlp::{Rlp}; - use std::collections::{VecDeque}; - use tests::helpers::{TestIo}; - use tests::snapshot::TestSnapshotService; + use ethcore::client::{BlockInfo, ChainInfo, EachBlockWith, TestBlockChainClient}; + use parking_lot::RwLock; + use rlp::Rlp; + use std::collections::VecDeque; + use tests::{helpers::TestIo, snapshot::TestSnapshotService}; - use super::{*, super::{*, tests::*}}; + use super::{ + super::{tests::*, *}, + *, + }; - #[test] - fn sends_new_hashes_to_lagging_peer() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - let queue = RwLock::new(VecDeque::new()); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let chain_info = client.chain_info(); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); + #[test] + fn sends_new_hashes_to_lagging_peer() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + let queue = RwLock::new(VecDeque::new()); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + let chain_info = client.chain_info(); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peers = sync.get_lagging_peers(&chain_info); - let peer_count = SyncPropagator::propagate_new_hashes(&mut sync, &chain_info, &mut io, &peers); + let peers = sync.get_lagging_peers(&chain_info); + let peer_count = + SyncPropagator::propagate_new_hashes(&mut sync, &chain_info, &mut io, &peers); - // 1 message should be send - assert_eq!(1, io.packets.len()); - // 1 peer should be updated - assert_eq!(1, peer_count); - // NEW_BLOCK_HASHES_PACKET - assert_eq!(0x01, io.packets[0].packet_id); - } + // 1 message should be send + assert_eq!(1, io.packets.len()); + // 1 peer should be updated + assert_eq!(1, peer_count); + // NEW_BLOCK_HASHES_PACKET + assert_eq!(0x01, io.packets[0].packet_id); + } - #[test] - fn sends_latest_block_to_lagging_peer() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - let queue = RwLock::new(VecDeque::new()); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let chain_info = client.chain_info(); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peers = sync.get_lagging_peers(&chain_info); - let peer_count = SyncPropagator::propagate_blocks(&mut sync, &chain_info, &mut io, &[], &peers); + #[test] + fn sends_latest_block_to_lagging_peer() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + let queue = RwLock::new(VecDeque::new()); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + let chain_info = client.chain_info(); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + let peers = sync.get_lagging_peers(&chain_info); + let peer_count = + SyncPropagator::propagate_blocks(&mut sync, &chain_info, &mut io, &[], &peers); - // 1 message should be send - assert_eq!(1, io.packets.len()); - // 1 peer should be updated - assert_eq!(1, peer_count); - // NEW_BLOCK_PACKET - assert_eq!(0x07, io.packets[0].packet_id); - } + // 1 message should be send + assert_eq!(1, io.packets.len()); + // 1 peer should be updated + assert_eq!(1, peer_count); + // NEW_BLOCK_PACKET + assert_eq!(0x07, io.packets[0].packet_id); + } - #[test] - fn sends_sealed_block() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - let queue = RwLock::new(VecDeque::new()); - let hash = client.block_hash(BlockId::Number(99)).unwrap(); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let chain_info = client.chain_info(); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peers = sync.get_lagging_peers(&chain_info); - let peer_count = SyncPropagator::propagate_blocks(&mut sync ,&chain_info, &mut io, &[hash.clone()], &peers); + #[test] + fn sends_sealed_block() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + let queue = RwLock::new(VecDeque::new()); + let hash = client.block_hash(BlockId::Number(99)).unwrap(); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + let chain_info = client.chain_info(); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + let peers = sync.get_lagging_peers(&chain_info); + let peer_count = SyncPropagator::propagate_blocks( + &mut sync, + &chain_info, + &mut io, + &[hash.clone()], + &peers, + ); - // 1 message should be send - assert_eq!(1, io.packets.len()); - // 1 peer should be updated - assert_eq!(1, peer_count); - // NEW_BLOCK_PACKET - assert_eq!(0x07, io.packets[0].packet_id); - } + // 1 message should be send + assert_eq!(1, io.packets.len()); + // 1 peer should be updated + assert_eq!(1, peer_count); + // NEW_BLOCK_PACKET + assert_eq!(0x07, io.packets[0].packet_id); + } - #[test] - fn sends_proposed_block() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(2, EachBlockWith::Uncle); - let queue = RwLock::new(VecDeque::new()); - let block = client.block(BlockId::Latest).unwrap().into_inner(); - let mut sync = ChainSync::new(SyncConfig::default(), &client, None); - sync.peers.insert(0, - PeerInfo { - // Messaging protocol - protocol_version: 2, - genesis: H256::zero(), - network_id: 0, - latest_hash: client.block_hash_delta_minus(1), - difficulty: None, - asking: PeerAsking::Nothing, - asking_blocks: Vec::new(), - asking_hash: None, - ask_time: Instant::now(), - last_sent_transactions: Default::default(), - last_sent_private_transactions: Default::default(), - expired: false, - private_tx_enabled: false, - confirmation: ForkConfirmation::Confirmed, - snapshot_number: None, - snapshot_hash: None, - asking_snapshot_data: None, - block_set: None, - client_version: ClientVersion::from(""), - }); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - SyncPropagator::propagate_proposed_blocks(&mut sync, &mut io, &[block]); + #[test] + fn sends_proposed_block() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(2, EachBlockWith::Uncle); + let queue = RwLock::new(VecDeque::new()); + let block = client.block(BlockId::Latest).unwrap().into_inner(); + let mut sync = ChainSync::new(SyncConfig::default(), &client, None); + sync.peers.insert( + 0, + PeerInfo { + // Messaging protocol + protocol_version: 2, + genesis: H256::zero(), + network_id: 0, + latest_hash: client.block_hash_delta_minus(1), + difficulty: None, + asking: PeerAsking::Nothing, + asking_blocks: Vec::new(), + asking_hash: None, + ask_time: Instant::now(), + last_sent_transactions: Default::default(), + last_sent_private_transactions: Default::default(), + expired: false, + private_tx_enabled: false, + confirmation: ForkConfirmation::Confirmed, + snapshot_number: None, + snapshot_hash: None, + asking_snapshot_data: None, + block_set: None, + client_version: ClientVersion::from(""), + }, + ); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + SyncPropagator::propagate_proposed_blocks(&mut sync, &mut io, &[block]); - // 1 message should be sent - assert_eq!(1, io.packets.len()); - // NEW_BLOCK_PACKET - assert_eq!(0x07, io.packets[0].packet_id); - } + // 1 message should be sent + assert_eq!(1, io.packets.len()); + // NEW_BLOCK_PACKET + assert_eq!(0x07, io.packets[0].packet_id); + } - #[test] - fn propagates_transactions() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - client.insert_transaction_to_queue(); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peer_count = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); - // Try to propagate same transactions for the second time - let peer_count2 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); - // Even after new block transactions should not be propagated twice - sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]); - // Try to propagate same transactions for the third time - let peer_count3 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); + #[test] + fn propagates_transactions() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + client.insert_transaction_to_queue(); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + let peer_count = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); + // Try to propagate same transactions for the second time + let peer_count2 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); + // Even after new block transactions should not be propagated twice + sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]); + // Try to propagate same transactions for the third time + let peer_count3 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); - // 1 message should be send - assert_eq!(1, io.packets.len()); - // 1 peer should be updated but only once - assert_eq!(1, peer_count); - assert_eq!(0, peer_count2); - assert_eq!(0, peer_count3); - // TRANSACTIONS_PACKET - assert_eq!(0x02, io.packets[0].packet_id); - } + // 1 message should be send + assert_eq!(1, io.packets.len()); + // 1 peer should be updated but only once + assert_eq!(1, peer_count); + assert_eq!(0, peer_count2); + assert_eq!(0, peer_count3); + // TRANSACTIONS_PACKET + assert_eq!(0x02, io.packets[0].packet_id); + } - #[test] - fn does_not_propagate_new_transactions_after_new_block() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - client.insert_transaction_to_queue(); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peer_count = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); - io.chain.insert_transaction_to_queue(); - // New block import should not trigger propagation. - // (we only propagate on timeout) - sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]); + #[test] + fn does_not_propagate_new_transactions_after_new_block() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + client.insert_transaction_to_queue(); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + let peer_count = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); + io.chain.insert_transaction_to_queue(); + // New block import should not trigger propagation. + // (we only propagate on timeout) + sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]); - // 2 message should be send - assert_eq!(1, io.packets.len()); - // 1 peer should receive the message - assert_eq!(1, peer_count); - // TRANSACTIONS_PACKET - assert_eq!(0x02, io.packets[0].packet_id); - } + // 2 message should be send + assert_eq!(1, io.packets.len()); + // 1 peer should receive the message + assert_eq!(1, peer_count); + // TRANSACTIONS_PACKET + assert_eq!(0x02, io.packets[0].packet_id); + } - #[test] - fn does_not_fail_for_no_peers() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - client.insert_transaction_to_queue(); - // Sync with no peers - let mut sync = ChainSync::new(SyncConfig::default(), &client, None); - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peer_count = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); - sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]); - // Try to propagate same transactions for the second time - let peer_count2 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); + #[test] + fn does_not_fail_for_no_peers() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + client.insert_transaction_to_queue(); + // Sync with no peers + let mut sync = ChainSync::new(SyncConfig::default(), &client, None); + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + let peer_count = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); + sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]); + // Try to propagate same transactions for the second time + let peer_count2 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); - assert_eq!(0, io.packets.len()); - assert_eq!(0, peer_count); - assert_eq!(0, peer_count2); - } + assert_eq!(0, io.packets.len()); + assert_eq!(0, peer_count); + assert_eq!(0, peer_count2); + } - #[test] - fn propagates_transactions_without_alternating() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - client.insert_transaction_to_queue(); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - // should sent some - { - let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peer_count = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); - assert_eq!(1, io.packets.len()); - assert_eq!(1, peer_count); - } - // Insert some more - client.insert_transaction_to_queue(); - let (peer_count2, peer_count3) = { - let mut io = TestIo::new(&mut client, &ss, &queue, None); - // Propagate new transactions - let peer_count2 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); - // And now the peer should have all transactions - let peer_count3 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); - (peer_count2, peer_count3) - }; + #[test] + fn propagates_transactions_without_alternating() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + client.insert_transaction_to_queue(); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + // should sent some + { + let mut io = TestIo::new(&mut client, &ss, &queue, None); + let peer_count = + SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); + assert_eq!(1, io.packets.len()); + assert_eq!(1, peer_count); + } + // Insert some more + client.insert_transaction_to_queue(); + let (peer_count2, peer_count3) = { + let mut io = TestIo::new(&mut client, &ss, &queue, None); + // Propagate new transactions + let peer_count2 = + SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); + // And now the peer should have all transactions + let peer_count3 = + SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); + (peer_count2, peer_count3) + }; - // 2 message should be send (in total) - assert_eq!(2, queue.read().len()); - // 1 peer should be updated but only once after inserting new transaction - assert_eq!(1, peer_count2); - assert_eq!(0, peer_count3); - // TRANSACTIONS_PACKET - assert_eq!(0x02, queue.read()[0].packet_id); - assert_eq!(0x02, queue.read()[1].packet_id); - } + // 2 message should be send (in total) + assert_eq!(2, queue.read().len()); + // 1 peer should be updated but only once after inserting new transaction + assert_eq!(1, peer_count2); + assert_eq!(0, peer_count3); + // TRANSACTIONS_PACKET + assert_eq!(0x02, queue.read()[0].packet_id); + assert_eq!(0x02, queue.read()[1].packet_id); + } - #[test] - fn should_maintain_transations_propagation_stats() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - client.insert_transaction_to_queue(); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); + #[test] + fn should_maintain_transations_propagation_stats() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + client.insert_transaction_to_queue(); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); - let stats = sync.transactions_stats(); - assert_eq!(stats.len(), 1, "Should maintain stats for single transaction.") - } + let stats = sync.transactions_stats(); + assert_eq!( + stats.len(), + 1, + "Should maintain stats for single transaction." + ) + } - #[test] - fn should_propagate_service_transaction_to_selected_peers_only() { - let mut client = TestBlockChainClient::new(); - client.insert_transaction_with_gas_price_to_queue(U256::zero()); - let block_hash = client.block_hash_delta_minus(1); - let mut sync = ChainSync::new(SyncConfig::default(), &client, None); - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); + #[test] + fn should_propagate_service_transaction_to_selected_peers_only() { + let mut client = TestBlockChainClient::new(); + client.insert_transaction_with_gas_price_to_queue(U256::zero()); + let block_hash = client.block_hash_delta_minus(1); + let mut sync = ChainSync::new(SyncConfig::default(), &client, None); + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); - // when peer#1 is Geth - insert_dummy_peer(&mut sync, 1, block_hash); - io.peers_info.insert(1, "Geth".to_owned()); - // and peer#2 is Parity, accepting service transactions - insert_dummy_peer(&mut sync, 2, block_hash); - io.peers_info.insert(2, "Parity-Ethereum/v2.6.0/linux/rustc".to_owned()); - // and peer#3 is Parity, accepting service transactions - insert_dummy_peer(&mut sync, 3, block_hash); - io.peers_info.insert(3, "Parity-Ethereum/ABCDEFGH/v2.7.3/linux/rustc".to_owned()); + // when peer#1 is Geth + insert_dummy_peer(&mut sync, 1, block_hash); + io.peers_info.insert(1, "Geth".to_owned()); + // and peer#2 is Parity, accepting service transactions + insert_dummy_peer(&mut sync, 2, block_hash); + io.peers_info + .insert(2, "Parity-Ethereum/v2.6.0/linux/rustc".to_owned()); + // and peer#3 is Parity, accepting service transactions + insert_dummy_peer(&mut sync, 3, block_hash); + io.peers_info + .insert(3, "Parity-Ethereum/ABCDEFGH/v2.7.3/linux/rustc".to_owned()); - // and new service transaction is propagated to peers - SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); + // and new service transaction is propagated to peers + SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); - // peer#2 && peer#3 are receiving service transaction - assert!(io.packets.iter().any(|p| p.packet_id == 0x02 && p.recipient == 2)); // TRANSACTIONS_PACKET - assert!(io.packets.iter().any(|p| p.packet_id == 0x02 && p.recipient == 3)); // TRANSACTIONS_PACKET - assert_eq!(io.packets.len(), 2); - } + // peer#2 && peer#3 are receiving service transaction + assert!(io + .packets + .iter() + .any(|p| p.packet_id == 0x02 && p.recipient == 2)); // TRANSACTIONS_PACKET + assert!(io + .packets + .iter() + .any(|p| p.packet_id == 0x02 && p.recipient == 3)); // TRANSACTIONS_PACKET + assert_eq!(io.packets.len(), 2); + } - #[test] - fn should_propagate_service_transaction_is_sent_as_separate_message() { - let mut client = TestBlockChainClient::new(); - let tx1_hash = client.insert_transaction_to_queue(); - let tx2_hash = client.insert_transaction_with_gas_price_to_queue(U256::zero()); - let block_hash = client.block_hash_delta_minus(1); - let mut sync = ChainSync::new(SyncConfig::default(), &client, None); - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); + #[test] + fn should_propagate_service_transaction_is_sent_as_separate_message() { + let mut client = TestBlockChainClient::new(); + let tx1_hash = client.insert_transaction_to_queue(); + let tx2_hash = client.insert_transaction_with_gas_price_to_queue(U256::zero()); + let block_hash = client.block_hash_delta_minus(1); + let mut sync = ChainSync::new(SyncConfig::default(), &client, None); + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); - // when peer#1 is Parity, accepting service transactions - insert_dummy_peer(&mut sync, 1, block_hash); - io.peers_info.insert(1, "Parity-Ethereum/v2.6.0/linux/rustc".to_owned()); + // when peer#1 is Parity, accepting service transactions + insert_dummy_peer(&mut sync, 1, block_hash); + io.peers_info + .insert(1, "Parity-Ethereum/v2.6.0/linux/rustc".to_owned()); - // and service + non-service transactions are propagated to peers - SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); + // and service + non-service transactions are propagated to peers + SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true); - // two separate packets for peer are queued: - // 1) with non-service-transaction - // 2) with service transaction - let sent_transactions: Vec = io.packets.iter() - .filter_map(|p| { - if p.packet_id != 0x02 || p.recipient != 1 { // TRANSACTIONS_PACKET - return None; - } + // two separate packets for peer are queued: + // 1) with non-service-transaction + // 2) with service transaction + let sent_transactions: Vec = io + .packets + .iter() + .filter_map(|p| { + if p.packet_id != 0x02 || p.recipient != 1 { + // TRANSACTIONS_PACKET + return None; + } - let rlp = Rlp::new(&*p.data); - let item_count = rlp.item_count().unwrap_or(0); - if item_count != 1 { - return None; - } + let rlp = Rlp::new(&*p.data); + let item_count = rlp.item_count().unwrap_or(0); + if item_count != 1 { + return None; + } - rlp.at(0).ok().and_then(|r| r.as_val().ok()) - }) - .collect(); - assert_eq!(sent_transactions.len(), 2); - assert!(sent_transactions.iter().any(|tx| tx.hash() == tx1_hash)); - assert!(sent_transactions.iter().any(|tx| tx.hash() == tx2_hash)); - } + rlp.at(0).ok().and_then(|r| r.as_val().ok()) + }) + .collect(); + assert_eq!(sent_transactions.len(), 2); + assert!(sent_transactions.iter().any(|tx| tx.hash() == tx1_hash)); + assert!(sent_transactions.iter().any(|tx| tx.hash() == tx2_hash)); + } } diff --git a/ethcore/sync/src/chain/requester.rs b/ethcore/sync/src/chain/requester.rs index 31d3ce590..287ff61b5 100644 --- a/ethcore/sync/src/chain/requester.rs +++ b/ethcore/sync/src/chain/requester.rs @@ -17,138 +17,221 @@ use block_sync::BlockRequest; use bytes::Bytes; use ethereum_types::H256; -use network::{PeerId}; +use network::PeerId; use rlp::RlpStream; use std::time::Instant; use sync_io::SyncIo; use types::BlockNumber; -use super::sync_packet::SyncPacket; -use super::sync_packet::SyncPacket::{ - GetBlockHeadersPacket, - GetBlockBodiesPacket, - GetReceiptsPacket, - GetSnapshotManifestPacket, - GetSnapshotDataPacket, +use super::sync_packet::{ + SyncPacket, + SyncPacket::{ + GetBlockBodiesPacket, GetBlockHeadersPacket, GetReceiptsPacket, GetSnapshotDataPacket, + GetSnapshotManifestPacket, + }, }; -use super::{ - BlockSet, - ChainSync, - PeerAsking, -}; +use super::{BlockSet, ChainSync, PeerAsking}; /// The Chain Sync Requester: requesting data to other peers pub struct SyncRequester; impl SyncRequester { - /// Perform block download request` - pub fn request_blocks(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, request: BlockRequest, block_set: BlockSet) { - match request { - BlockRequest::Headers { start, count, skip } => { - SyncRequester::request_headers_by_hash(sync, io, peer_id, &start, count, skip, false, block_set); - }, - BlockRequest::Bodies { hashes } => { - SyncRequester::request_bodies(sync, io, peer_id, hashes, block_set); - }, - BlockRequest::Receipts { hashes } => { - SyncRequester::request_receipts(sync, io, peer_id, hashes, block_set); - }, - } - } + /// Perform block download request` + pub fn request_blocks( + sync: &mut ChainSync, + io: &mut SyncIo, + peer_id: PeerId, + request: BlockRequest, + block_set: BlockSet, + ) { + match request { + BlockRequest::Headers { start, count, skip } => { + SyncRequester::request_headers_by_hash( + sync, io, peer_id, &start, count, skip, false, block_set, + ); + } + BlockRequest::Bodies { hashes } => { + SyncRequester::request_bodies(sync, io, peer_id, hashes, block_set); + } + BlockRequest::Receipts { hashes } => { + SyncRequester::request_receipts(sync, io, peer_id, hashes, block_set); + } + } + } - /// Request block bodies from a peer - fn request_bodies(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, hashes: Vec, set: BlockSet) { - let mut rlp = RlpStream::new_list(hashes.len()); - trace!(target: "sync", "{} <- GetBlockBodies: {} entries starting from {:?}, set = {:?}", peer_id, hashes.len(), hashes.first(), set); - for h in &hashes { - rlp.append(&h.clone()); - } - SyncRequester::send_request(sync, io, peer_id, PeerAsking::BlockBodies, GetBlockBodiesPacket, rlp.out()); - let peer = sync.peers.get_mut(&peer_id).expect("peer_id may originate either from on_packet, where it is already validated or from enumerating self.peers. qed"); - peer.asking_blocks = hashes; - peer.block_set = Some(set); - } + /// Request block bodies from a peer + fn request_bodies( + sync: &mut ChainSync, + io: &mut SyncIo, + peer_id: PeerId, + hashes: Vec, + set: BlockSet, + ) { + let mut rlp = RlpStream::new_list(hashes.len()); + trace!(target: "sync", "{} <- GetBlockBodies: {} entries starting from {:?}, set = {:?}", peer_id, hashes.len(), hashes.first(), set); + for h in &hashes { + rlp.append(&h.clone()); + } + SyncRequester::send_request( + sync, + io, + peer_id, + PeerAsking::BlockBodies, + GetBlockBodiesPacket, + rlp.out(), + ); + let peer = sync.peers.get_mut(&peer_id).expect("peer_id may originate either from on_packet, where it is already validated or from enumerating self.peers. qed"); + peer.asking_blocks = hashes; + peer.block_set = Some(set); + } - /// Request headers from a peer by block number - pub fn request_fork_header(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, n: BlockNumber) { - trace!(target: "sync", "{} <- GetForkHeader: at {}", peer_id, n); - let mut rlp = RlpStream::new_list(4); - rlp.append(&n); - rlp.append(&1u32); - rlp.append(&0u32); - rlp.append(&0u32); - SyncRequester::send_request(sync, io, peer_id, PeerAsking::ForkHeader, GetBlockHeadersPacket, rlp.out()); - } + /// Request headers from a peer by block number + pub fn request_fork_header( + sync: &mut ChainSync, + io: &mut SyncIo, + peer_id: PeerId, + n: BlockNumber, + ) { + trace!(target: "sync", "{} <- GetForkHeader: at {}", peer_id, n); + let mut rlp = RlpStream::new_list(4); + rlp.append(&n); + rlp.append(&1u32); + rlp.append(&0u32); + rlp.append(&0u32); + SyncRequester::send_request( + sync, + io, + peer_id, + PeerAsking::ForkHeader, + GetBlockHeadersPacket, + rlp.out(), + ); + } - /// Find some headers or blocks to download for a peer. - pub fn request_snapshot_data(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId) { - // find chunk data to download - if let Some(hash) = sync.snapshot.needed_chunk() { - if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { - peer.asking_snapshot_data = Some(hash.clone()); - } - SyncRequester::request_snapshot_chunk(sync, io, peer_id, &hash); - } - } + /// Find some headers or blocks to download for a peer. + pub fn request_snapshot_data(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId) { + // find chunk data to download + if let Some(hash) = sync.snapshot.needed_chunk() { + if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { + peer.asking_snapshot_data = Some(hash.clone()); + } + SyncRequester::request_snapshot_chunk(sync, io, peer_id, &hash); + } + } - /// Request snapshot manifest from a peer. - pub fn request_snapshot_manifest(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId) { - trace!(target: "sync", "{} <- GetSnapshotManifest", peer_id); - let rlp = RlpStream::new_list(0); - SyncRequester::send_request(sync, io, peer_id, PeerAsking::SnapshotManifest, GetSnapshotManifestPacket, rlp.out()); - } + /// Request snapshot manifest from a peer. + pub fn request_snapshot_manifest(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId) { + trace!(target: "sync", "{} <- GetSnapshotManifest", peer_id); + let rlp = RlpStream::new_list(0); + SyncRequester::send_request( + sync, + io, + peer_id, + PeerAsking::SnapshotManifest, + GetSnapshotManifestPacket, + rlp.out(), + ); + } - /// Request headers from a peer by block hash - fn request_headers_by_hash(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, h: &H256, count: u64, skip: u64, reverse: bool, set: BlockSet) { - trace!(target: "sync", "{} <- GetBlockHeaders: {} entries starting from {}, set = {:?}", peer_id, count, h, set); - let mut rlp = RlpStream::new_list(4); - rlp.append(h); - rlp.append(&count); - rlp.append(&skip); - rlp.append(&if reverse {1u32} else {0u32}); - SyncRequester::send_request(sync, io, peer_id, PeerAsking::BlockHeaders, GetBlockHeadersPacket, rlp.out()); - let peer = sync.peers.get_mut(&peer_id).expect("peer_id may originate either from on_packet, where it is already validated or from enumerating self.peers. qed"); - peer.asking_hash = Some(h.clone()); - peer.block_set = Some(set); - } + /// Request headers from a peer by block hash + fn request_headers_by_hash( + sync: &mut ChainSync, + io: &mut SyncIo, + peer_id: PeerId, + h: &H256, + count: u64, + skip: u64, + reverse: bool, + set: BlockSet, + ) { + trace!(target: "sync", "{} <- GetBlockHeaders: {} entries starting from {}, set = {:?}", peer_id, count, h, set); + let mut rlp = RlpStream::new_list(4); + rlp.append(h); + rlp.append(&count); + rlp.append(&skip); + rlp.append(&if reverse { 1u32 } else { 0u32 }); + SyncRequester::send_request( + sync, + io, + peer_id, + PeerAsking::BlockHeaders, + GetBlockHeadersPacket, + rlp.out(), + ); + let peer = sync.peers.get_mut(&peer_id).expect("peer_id may originate either from on_packet, where it is already validated or from enumerating self.peers. qed"); + peer.asking_hash = Some(h.clone()); + peer.block_set = Some(set); + } - /// Request block receipts from a peer - fn request_receipts(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, hashes: Vec, set: BlockSet) { - let mut rlp = RlpStream::new_list(hashes.len()); - trace!(target: "sync", "{} <- GetBlockReceipts: {} entries starting from {:?}, set = {:?}", peer_id, hashes.len(), hashes.first(), set); - for h in &hashes { - rlp.append(&h.clone()); - } - SyncRequester::send_request(sync, io, peer_id, PeerAsking::BlockReceipts, GetReceiptsPacket, rlp.out()); - let peer = sync.peers.get_mut(&peer_id).expect("peer_id may originate either from on_packet, where it is already validated or from enumerating self.peers. qed"); - peer.asking_blocks = hashes; - peer.block_set = Some(set); - } + /// Request block receipts from a peer + fn request_receipts( + sync: &mut ChainSync, + io: &mut SyncIo, + peer_id: PeerId, + hashes: Vec, + set: BlockSet, + ) { + let mut rlp = RlpStream::new_list(hashes.len()); + trace!(target: "sync", "{} <- GetBlockReceipts: {} entries starting from {:?}, set = {:?}", peer_id, hashes.len(), hashes.first(), set); + for h in &hashes { + rlp.append(&h.clone()); + } + SyncRequester::send_request( + sync, + io, + peer_id, + PeerAsking::BlockReceipts, + GetReceiptsPacket, + rlp.out(), + ); + let peer = sync.peers.get_mut(&peer_id).expect("peer_id may originate either from on_packet, where it is already validated or from enumerating self.peers. qed"); + peer.asking_blocks = hashes; + peer.block_set = Some(set); + } - /// Request snapshot chunk from a peer. - fn request_snapshot_chunk(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, chunk: &H256) { - trace!(target: "sync", "{} <- GetSnapshotData {:?}", peer_id, chunk); - let mut rlp = RlpStream::new_list(1); - rlp.append(chunk); - SyncRequester::send_request(sync, io, peer_id, PeerAsking::SnapshotData, GetSnapshotDataPacket, rlp.out()); - } + /// Request snapshot chunk from a peer. + fn request_snapshot_chunk( + sync: &mut ChainSync, + io: &mut SyncIo, + peer_id: PeerId, + chunk: &H256, + ) { + trace!(target: "sync", "{} <- GetSnapshotData {:?}", peer_id, chunk); + let mut rlp = RlpStream::new_list(1); + rlp.append(chunk); + SyncRequester::send_request( + sync, + io, + peer_id, + PeerAsking::SnapshotData, + GetSnapshotDataPacket, + rlp.out(), + ); + } - /// Generic request sender - fn send_request(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, asking: PeerAsking, packet_id: SyncPacket, packet: Bytes) { - if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { - if peer.asking != PeerAsking::Nothing { - warn!(target:"sync", "Asking {:?} while requesting {:?}", peer.asking, asking); - } - peer.asking = asking; - peer.ask_time = Instant::now(); + /// Generic request sender + fn send_request( + sync: &mut ChainSync, + io: &mut SyncIo, + peer_id: PeerId, + asking: PeerAsking, + packet_id: SyncPacket, + packet: Bytes, + ) { + if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { + if peer.asking != PeerAsking::Nothing { + warn!(target:"sync", "Asking {:?} while requesting {:?}", peer.asking, asking); + } + peer.asking = asking; + peer.ask_time = Instant::now(); - let result = io.send(peer_id, packet_id, packet); + let result = io.send(peer_id, packet_id, packet); - if let Err(e) = result { - debug!(target:"sync", "Error sending request: {:?}", e); - io.disconnect_peer(peer_id); - } - } - } + if let Err(e) = result { + debug!(target:"sync", "Error sending request: {:?}", e); + io.disconnect_peer(peer_id); + } + } + } } diff --git a/ethcore/sync/src/chain/supplier.rs b/ethcore/sync/src/chain/supplier.rs index 7e71e6aee..a97678268 100644 --- a/ethcore/sync/src/chain/supplier.rs +++ b/ethcore/sync/src/chain/supplier.rs @@ -21,535 +21,646 @@ use network::{self, PeerId}; use parking_lot::RwLock; use rlp::{Rlp, RlpStream}; use std::cmp; -use types::BlockNumber; -use types::ids::BlockId; +use types::{ids::BlockId, BlockNumber}; use sync_io::SyncIo; -use super::sync_packet::{PacketInfo, SyncPacket}; -use super::sync_packet::SyncPacket::{ - StatusPacket, - TransactionsPacket, - GetBlockHeadersPacket, - BlockHeadersPacket, - GetBlockBodiesPacket, - BlockBodiesPacket, - GetNodeDataPacket, - NodeDataPacket, - GetReceiptsPacket, - ReceiptsPacket, - GetSnapshotManifestPacket, - SnapshotManifestPacket, - GetSnapshotDataPacket, - SnapshotDataPacket, - ConsensusDataPacket, +use super::sync_packet::{ + PacketInfo, SyncPacket, + SyncPacket::{ + BlockBodiesPacket, BlockHeadersPacket, ConsensusDataPacket, GetBlockBodiesPacket, + GetBlockHeadersPacket, GetNodeDataPacket, GetReceiptsPacket, GetSnapshotDataPacket, + GetSnapshotManifestPacket, NodeDataPacket, ReceiptsPacket, SnapshotDataPacket, + SnapshotManifestPacket, StatusPacket, TransactionsPacket, + }, }; use super::{ - ChainSync, - SyncHandler, - RlpResponseResult, - PacketDecodeError, - MAX_BODIES_TO_SEND, - MAX_HEADERS_TO_SEND, - MAX_NODE_DATA_TO_SEND, - MAX_RECEIPTS_HEADERS_TO_SEND, + ChainSync, PacketDecodeError, RlpResponseResult, SyncHandler, MAX_BODIES_TO_SEND, + MAX_HEADERS_TO_SEND, MAX_NODE_DATA_TO_SEND, MAX_RECEIPTS_HEADERS_TO_SEND, }; /// The Chain Sync Supplier: answers requests from peers with available data pub struct SyncSupplier; impl SyncSupplier { - /// Dispatch incoming requests and responses - // Take a u8 and not a SyncPacketId because this is the entry point - // to chain sync from the outside world. - pub fn dispatch_packet(sync: &RwLock, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) { - let rlp = Rlp::new(data); + /// Dispatch incoming requests and responses + // Take a u8 and not a SyncPacketId because this is the entry point + // to chain sync from the outside world. + pub fn dispatch_packet( + sync: &RwLock, + io: &mut SyncIo, + peer: PeerId, + packet_id: u8, + data: &[u8], + ) { + let rlp = Rlp::new(data); - if let Some(id) = SyncPacket::from_u8(packet_id) { - let result = match id { - GetBlockBodiesPacket => SyncSupplier::return_rlp( - io, &rlp, peer, - SyncSupplier::return_block_bodies, - |e| format!("Error sending block bodies: {:?}", e)), + if let Some(id) = SyncPacket::from_u8(packet_id) { + let result = match id { + GetBlockBodiesPacket => SyncSupplier::return_rlp( + io, + &rlp, + peer, + SyncSupplier::return_block_bodies, + |e| format!("Error sending block bodies: {:?}", e), + ), - GetBlockHeadersPacket => SyncSupplier::return_rlp( - io, &rlp, peer, - SyncSupplier::return_block_headers, - |e| format!("Error sending block headers: {:?}", e)), + GetBlockHeadersPacket => SyncSupplier::return_rlp( + io, + &rlp, + peer, + SyncSupplier::return_block_headers, + |e| format!("Error sending block headers: {:?}", e), + ), - GetReceiptsPacket => SyncSupplier::return_rlp( - io, &rlp, peer, - SyncSupplier::return_receipts, - |e| format!("Error sending receipts: {:?}", e)), + GetReceiptsPacket => { + SyncSupplier::return_rlp(io, &rlp, peer, SyncSupplier::return_receipts, |e| { + format!("Error sending receipts: {:?}", e) + }) + } - GetNodeDataPacket => SyncSupplier::return_rlp( - io, &rlp, peer, - SyncSupplier::return_node_data, - |e| format!("Error sending nodes: {:?}", e)), + GetNodeDataPacket => { + SyncSupplier::return_rlp(io, &rlp, peer, SyncSupplier::return_node_data, |e| { + format!("Error sending nodes: {:?}", e) + }) + } - GetSnapshotManifestPacket => SyncSupplier::return_rlp( - io, &rlp, peer, - SyncSupplier::return_snapshot_manifest, - |e| format!("Error sending snapshot manifest: {:?}", e)), + GetSnapshotManifestPacket => SyncSupplier::return_rlp( + io, + &rlp, + peer, + SyncSupplier::return_snapshot_manifest, + |e| format!("Error sending snapshot manifest: {:?}", e), + ), - GetSnapshotDataPacket => SyncSupplier::return_rlp( - io, &rlp, peer, - SyncSupplier::return_snapshot_data, - |e| format!("Error sending snapshot data: {:?}", e)), + GetSnapshotDataPacket => SyncSupplier::return_rlp( + io, + &rlp, + peer, + SyncSupplier::return_snapshot_data, + |e| format!("Error sending snapshot data: {:?}", e), + ), - StatusPacket => { - sync.write().on_packet(io, peer, packet_id, data); - Ok(()) - }, - // Packets that require the peer to be confirmed - _ => { - if !sync.read().peers.contains_key(&peer) { - debug!(target:"sync", "Unexpected packet {} from unregistered peer: {}:{}", packet_id, peer, io.peer_version(peer)); - return; - } - debug!(target: "sync", "{} -> Dispatching packet: {}", peer, packet_id); + StatusPacket => { + sync.write().on_packet(io, peer, packet_id, data); + Ok(()) + } + // Packets that require the peer to be confirmed + _ => { + if !sync.read().peers.contains_key(&peer) { + debug!(target:"sync", "Unexpected packet {} from unregistered peer: {}:{}", packet_id, peer, io.peer_version(peer)); + return; + } + debug!(target: "sync", "{} -> Dispatching packet: {}", peer, packet_id); - match id { - ConsensusDataPacket => { - SyncHandler::on_consensus_packet(io, peer, &rlp) - }, - TransactionsPacket => { - let res = { - let sync_ro = sync.read(); - SyncHandler::on_peer_transactions(&*sync_ro, io, peer, &rlp) - }; - if res.is_err() { - // peer sent invalid data, disconnect. - io.disable_peer(peer); - sync.write().deactivate_peer(io, peer); - } - }, - _ => { - sync.write().on_packet(io, peer, packet_id, data); - } - } + match id { + ConsensusDataPacket => SyncHandler::on_consensus_packet(io, peer, &rlp), + TransactionsPacket => { + let res = { + let sync_ro = sync.read(); + SyncHandler::on_peer_transactions(&*sync_ro, io, peer, &rlp) + }; + if res.is_err() { + // peer sent invalid data, disconnect. + io.disable_peer(peer); + sync.write().deactivate_peer(io, peer); + } + } + _ => { + sync.write().on_packet(io, peer, packet_id, data); + } + } - Ok(()) - } - }; + Ok(()) + } + }; - result.unwrap_or_else(|e| { - debug!(target:"sync", "{} -> Malformed packet {} : {}", peer, packet_id, e); - }) - } - } + result.unwrap_or_else(|e| { + debug!(target:"sync", "{} -> Malformed packet {} : {}", peer, packet_id, e); + }) + } + } - /// Respond to GetBlockHeaders request - fn return_block_headers(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { - let payload_soft_limit = io.payload_soft_limit(); - // Packet layout: - // [ block: { P , B_32 }, maxHeaders: P, skip: P, reverse: P in { 0 , 1 } ] - let max_headers: usize = r.val_at(1)?; - let skip: usize = r.val_at(2)?; - let reverse: bool = r.val_at(3)?; - let last = io.chain().chain_info().best_block_number; - let number = if r.at(0)?.size() == 32 { - // id is a hash - let hash: H256 = r.val_at(0)?; - trace!(target: "sync", "{} -> GetBlockHeaders (hash: {}, max: {}, skip: {}, reverse:{})", peer_id, hash, max_headers, skip, reverse); - match io.chain().block_header(BlockId::Hash(hash)) { - Some(hdr) => { - let number = hdr.number().into(); - debug_assert_eq!(hdr.hash(), hash); + /// Respond to GetBlockHeaders request + fn return_block_headers(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { + let payload_soft_limit = io.payload_soft_limit(); + // Packet layout: + // [ block: { P , B_32 }, maxHeaders: P, skip: P, reverse: P in { 0 , 1 } ] + let max_headers: usize = r.val_at(1)?; + let skip: usize = r.val_at(2)?; + let reverse: bool = r.val_at(3)?; + let last = io.chain().chain_info().best_block_number; + let number = if r.at(0)?.size() == 32 { + // id is a hash + let hash: H256 = r.val_at(0)?; + trace!(target: "sync", "{} -> GetBlockHeaders (hash: {}, max: {}, skip: {}, reverse:{})", peer_id, hash, max_headers, skip, reverse); + match io.chain().block_header(BlockId::Hash(hash)) { + Some(hdr) => { + let number = hdr.number().into(); + debug_assert_eq!(hdr.hash(), hash); - if max_headers == 1 || io.chain().block_hash(BlockId::Number(number)) != Some(hash) { - // Non canonical header or single header requested - // TODO: handle single-step reverse hashchains of non-canon hashes - trace!(target:"sync", "Returning single header: {:?}", hash); - let mut rlp = RlpStream::new_list(1); - rlp.append_raw(&hdr.into_inner(), 1); - return Ok(Some((BlockHeadersPacket.id(), rlp))); - } - number - } - None => return Ok(Some((BlockHeadersPacket.id(), RlpStream::new_list(0)))) //no such header, return nothing - } - } else { - let number = r.val_at::(0)?; - trace!(target: "sync", "{} -> GetBlockHeaders (number: {}, max: {}, skip: {}, reverse:{})", peer_id, number, max_headers, skip, reverse); - number - }; + if max_headers == 1 + || io.chain().block_hash(BlockId::Number(number)) != Some(hash) + { + // Non canonical header or single header requested + // TODO: handle single-step reverse hashchains of non-canon hashes + trace!(target:"sync", "Returning single header: {:?}", hash); + let mut rlp = RlpStream::new_list(1); + rlp.append_raw(&hdr.into_inner(), 1); + return Ok(Some((BlockHeadersPacket.id(), rlp))); + } + number + } + None => return Ok(Some((BlockHeadersPacket.id(), RlpStream::new_list(0)))), //no such header, return nothing + } + } else { + let number = r.val_at::(0)?; + trace!(target: "sync", "{} -> GetBlockHeaders (number: {}, max: {}, skip: {}, reverse:{})", peer_id, number, max_headers, skip, reverse); + number + }; - let mut number = if reverse { - cmp::min(last, number) - } else { - cmp::max(0, number) - }; - let max_count = cmp::min(MAX_HEADERS_TO_SEND, max_headers); - let mut count = 0; - let mut data = Bytes::new(); - let inc = skip.saturating_add(1) as BlockNumber; - let overlay = io.chain_overlay().read(); + let mut number = if reverse { + cmp::min(last, number) + } else { + cmp::max(0, number) + }; + let max_count = cmp::min(MAX_HEADERS_TO_SEND, max_headers); + let mut count = 0; + let mut data = Bytes::new(); + let inc = skip.saturating_add(1) as BlockNumber; + let overlay = io.chain_overlay().read(); - // We are checking the `overlay` as well since it's where the ForkBlock - // header is cached : so peers can confirm we are on the right fork, - // even if we are not synced until the fork block - while (number <= last || overlay.contains_key(&number)) && count < max_count { - if let Some(hdr) = overlay.get(&number) { - trace!(target: "sync", "{}: Returning cached fork header", peer_id); - data.extend_from_slice(hdr); - count += 1; - } else if let Some(hdr) = io.chain().block_header(BlockId::Number(number)) { - data.append(&mut hdr.into_inner()); - count += 1; - // Check that the packet won't be oversized - if data.len() > payload_soft_limit { - break; - } - } else { - // No required block. - break; - } - if reverse { - if number <= inc || number == 0 { - break; - } - number = number.saturating_sub(inc); - } else { - number = number.saturating_add(inc); - } - } - let mut rlp = RlpStream::new_list(count as usize); - rlp.append_raw(&data, count as usize); - trace!(target: "sync", "{} -> GetBlockHeaders: returned {} entries", peer_id, count); - Ok(Some((BlockHeadersPacket.id(), rlp))) - } + // We are checking the `overlay` as well since it's where the ForkBlock + // header is cached : so peers can confirm we are on the right fork, + // even if we are not synced until the fork block + while (number <= last || overlay.contains_key(&number)) && count < max_count { + if let Some(hdr) = overlay.get(&number) { + trace!(target: "sync", "{}: Returning cached fork header", peer_id); + data.extend_from_slice(hdr); + count += 1; + } else if let Some(hdr) = io.chain().block_header(BlockId::Number(number)) { + data.append(&mut hdr.into_inner()); + count += 1; + // Check that the packet won't be oversized + if data.len() > payload_soft_limit { + break; + } + } else { + // No required block. + break; + } + if reverse { + if number <= inc || number == 0 { + break; + } + number = number.saturating_sub(inc); + } else { + number = number.saturating_add(inc); + } + } + let mut rlp = RlpStream::new_list(count as usize); + rlp.append_raw(&data, count as usize); + trace!(target: "sync", "{} -> GetBlockHeaders: returned {} entries", peer_id, count); + Ok(Some((BlockHeadersPacket.id(), rlp))) + } - /// Respond to GetBlockBodies request - fn return_block_bodies(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { - let payload_soft_limit = io.payload_soft_limit(); - let mut count = r.item_count().unwrap_or(0); - if count == 0 { - debug!(target: "sync", "Empty GetBlockBodies request, ignoring."); - return Ok(None); - } - count = cmp::min(count, MAX_BODIES_TO_SEND); - let mut added = 0usize; - let mut data = Bytes::new(); - for i in 0..count { - if let Some(body) = io.chain().block_body(BlockId::Hash(r.val_at::(i)?)) { - data.append(&mut body.into_inner()); - added += 1; - // Check that the packet won't be oversized - if data.len() > payload_soft_limit { - break; - } - } - } - let mut rlp = RlpStream::new_list(added); - rlp.append_raw(&data, added); - trace!(target: "sync", "{} -> GetBlockBodies: returned {} entries", peer_id, added); - Ok(Some((BlockBodiesPacket.id(), rlp))) - } + /// Respond to GetBlockBodies request + fn return_block_bodies(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { + let payload_soft_limit = io.payload_soft_limit(); + let mut count = r.item_count().unwrap_or(0); + if count == 0 { + debug!(target: "sync", "Empty GetBlockBodies request, ignoring."); + return Ok(None); + } + count = cmp::min(count, MAX_BODIES_TO_SEND); + let mut added = 0usize; + let mut data = Bytes::new(); + for i in 0..count { + if let Some(body) = io.chain().block_body(BlockId::Hash(r.val_at::(i)?)) { + data.append(&mut body.into_inner()); + added += 1; + // Check that the packet won't be oversized + if data.len() > payload_soft_limit { + break; + } + } + } + let mut rlp = RlpStream::new_list(added); + rlp.append_raw(&data, added); + trace!(target: "sync", "{} -> GetBlockBodies: returned {} entries", peer_id, added); + Ok(Some((BlockBodiesPacket.id(), rlp))) + } - /// Respond to GetNodeData request - fn return_node_data(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { - let payload_soft_limit = io.payload_soft_limit(); - let mut count = r.item_count().unwrap_or(0); - trace!(target: "sync", "{} -> GetNodeData: {} entries", peer_id, count); - if count == 0 { - debug!(target: "sync", "Empty GetNodeData request, ignoring."); - return Ok(None); - } - count = cmp::min(count, MAX_NODE_DATA_TO_SEND); - let mut added = 0usize; - let mut data = Vec::new(); - let mut total_bytes = 0; - for i in 0..count { - if let Some(node) = io.chain().state_data(&r.val_at::(i)?) { - total_bytes += node.len(); - // Check that the packet won't be oversized - if total_bytes > payload_soft_limit { - break; - } - data.push(node); - added += 1; - } - } - trace!(target: "sync", "{} -> GetNodeData: return {} entries", peer_id, added); - let mut rlp = RlpStream::new_list(added); - for d in data { - rlp.append(&d); - } - Ok(Some((NodeDataPacket.id(), rlp))) - } + /// Respond to GetNodeData request + fn return_node_data(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { + let payload_soft_limit = io.payload_soft_limit(); + let mut count = r.item_count().unwrap_or(0); + trace!(target: "sync", "{} -> GetNodeData: {} entries", peer_id, count); + if count == 0 { + debug!(target: "sync", "Empty GetNodeData request, ignoring."); + return Ok(None); + } + count = cmp::min(count, MAX_NODE_DATA_TO_SEND); + let mut added = 0usize; + let mut data = Vec::new(); + let mut total_bytes = 0; + for i in 0..count { + if let Some(node) = io.chain().state_data(&r.val_at::(i)?) { + total_bytes += node.len(); + // Check that the packet won't be oversized + if total_bytes > payload_soft_limit { + break; + } + data.push(node); + added += 1; + } + } + trace!(target: "sync", "{} -> GetNodeData: return {} entries", peer_id, added); + let mut rlp = RlpStream::new_list(added); + for d in data { + rlp.append(&d); + } + Ok(Some((NodeDataPacket.id(), rlp))) + } - fn return_receipts(io: &SyncIo, rlp: &Rlp, peer_id: PeerId) -> RlpResponseResult { - let payload_soft_limit = io.payload_soft_limit(); - let mut count = rlp.item_count().unwrap_or(0); - trace!(target: "sync", "{} -> GetReceipts: {} entries", peer_id, count); - if count == 0 { - debug!(target: "sync", "Empty GetReceipts request, ignoring."); - return Ok(None); - } - count = cmp::min(count, MAX_RECEIPTS_HEADERS_TO_SEND); - let mut added_headers = 0usize; - let mut data = Bytes::new(); - let mut total_bytes = 0; - for i in 0..count { - if let Some(receipts) = io.chain().block_receipts(&rlp.val_at::(i)?) { - let mut receipts_bytes = ::rlp::encode(&receipts); - total_bytes += receipts_bytes.len(); - if total_bytes > payload_soft_limit { break; } - data.append(&mut receipts_bytes); - added_headers += 1; - } - } - let mut rlp_result = RlpStream::new_list(added_headers); - rlp_result.append_raw(&data, added_headers); - Ok(Some((ReceiptsPacket.id(), rlp_result))) - } + fn return_receipts(io: &SyncIo, rlp: &Rlp, peer_id: PeerId) -> RlpResponseResult { + let payload_soft_limit = io.payload_soft_limit(); + let mut count = rlp.item_count().unwrap_or(0); + trace!(target: "sync", "{} -> GetReceipts: {} entries", peer_id, count); + if count == 0 { + debug!(target: "sync", "Empty GetReceipts request, ignoring."); + return Ok(None); + } + count = cmp::min(count, MAX_RECEIPTS_HEADERS_TO_SEND); + let mut added_headers = 0usize; + let mut data = Bytes::new(); + let mut total_bytes = 0; + for i in 0..count { + if let Some(receipts) = io.chain().block_receipts(&rlp.val_at::(i)?) { + let mut receipts_bytes = ::rlp::encode(&receipts); + total_bytes += receipts_bytes.len(); + if total_bytes > payload_soft_limit { + break; + } + data.append(&mut receipts_bytes); + added_headers += 1; + } + } + let mut rlp_result = RlpStream::new_list(added_headers); + rlp_result.append_raw(&data, added_headers); + Ok(Some((ReceiptsPacket.id(), rlp_result))) + } - /// Respond to GetSnapshotManifest request - fn return_snapshot_manifest(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { - let count = r.item_count().unwrap_or(0); - trace!(target: "warp", "{} -> GetSnapshotManifest", peer_id); - if count != 0 { - debug!(target: "warp", "Invalid GetSnapshotManifest request, ignoring."); - return Ok(None); - } - let rlp = match io.snapshot_service().manifest() { - Some(manifest) => { - trace!(target: "warp", "{} <- SnapshotManifest", peer_id); - let mut rlp = RlpStream::new_list(1); - rlp.append_raw(&manifest.into_rlp(), 1); - rlp - }, - None => { - trace!(target: "warp", "{}: No snapshot manifest to return", peer_id); - RlpStream::new_list(0) - } - }; - Ok(Some((SnapshotManifestPacket.id(), rlp))) - } + /// Respond to GetSnapshotManifest request + fn return_snapshot_manifest(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { + let count = r.item_count().unwrap_or(0); + trace!(target: "warp", "{} -> GetSnapshotManifest", peer_id); + if count != 0 { + debug!(target: "warp", "Invalid GetSnapshotManifest request, ignoring."); + return Ok(None); + } + let rlp = match io.snapshot_service().manifest() { + Some(manifest) => { + trace!(target: "warp", "{} <- SnapshotManifest", peer_id); + let mut rlp = RlpStream::new_list(1); + rlp.append_raw(&manifest.into_rlp(), 1); + rlp + } + None => { + trace!(target: "warp", "{}: No snapshot manifest to return", peer_id); + RlpStream::new_list(0) + } + }; + Ok(Some((SnapshotManifestPacket.id(), rlp))) + } - /// Respond to GetSnapshotData request - fn return_snapshot_data(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { - let hash: H256 = r.val_at(0)?; - trace!(target: "warp", "{} -> GetSnapshotData {:?}", peer_id, hash); - let rlp = match io.snapshot_service().chunk(hash) { - Some(data) => { - let mut rlp = RlpStream::new_list(1); - trace!(target: "warp", "{} <- SnapshotData", peer_id); - rlp.append(&data); - rlp - }, - None => { - trace!(target: "warp", "{}: No snapshot data to return", peer_id); - RlpStream::new_list(0) - } - }; - Ok(Some((SnapshotDataPacket.id(), rlp))) - } + /// Respond to GetSnapshotData request + fn return_snapshot_data(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { + let hash: H256 = r.val_at(0)?; + trace!(target: "warp", "{} -> GetSnapshotData {:?}", peer_id, hash); + let rlp = match io.snapshot_service().chunk(hash) { + Some(data) => { + let mut rlp = RlpStream::new_list(1); + trace!(target: "warp", "{} <- SnapshotData", peer_id); + rlp.append(&data); + rlp + } + None => { + trace!(target: "warp", "{}: No snapshot data to return", peer_id); + RlpStream::new_list(0) + } + }; + Ok(Some((SnapshotDataPacket.id(), rlp))) + } - fn return_rlp(io: &mut SyncIo, rlp: &Rlp, peer: PeerId, rlp_func: FRlp, error_func: FError) -> Result<(), PacketDecodeError> - where FRlp : Fn(&SyncIo, &Rlp, PeerId) -> RlpResponseResult, - FError : FnOnce(network::Error) -> String - { - let response = rlp_func(io, rlp, peer); - match response { - Err(e) => Err(e), - Ok(Some((packet_id, rlp_stream))) => { - io.respond(packet_id, rlp_stream.out()).unwrap_or_else( - |e| debug!(target: "sync", "{:?}", error_func(e))); - Ok(()) - } - _ => Ok(()) - } - } + fn return_rlp( + io: &mut SyncIo, + rlp: &Rlp, + peer: PeerId, + rlp_func: FRlp, + error_func: FError, + ) -> Result<(), PacketDecodeError> + where + FRlp: Fn(&SyncIo, &Rlp, PeerId) -> RlpResponseResult, + FError: FnOnce(network::Error) -> String, + { + let response = rlp_func(io, rlp, peer); + match response { + Err(e) => Err(e), + Ok(Some((packet_id, rlp_stream))) => { + io.respond(packet_id, rlp_stream.out()) + .unwrap_or_else(|e| debug!(target: "sync", "{:?}", error_func(e))); + Ok(()) + } + _ => Ok(()), + } + } } #[cfg(test)] mod test { - use std::collections::{VecDeque}; - use tests::helpers::{TestIo}; - use tests::snapshot::TestSnapshotService; - use ethereum_types::{H256}; - use parking_lot::RwLock; - use bytes::Bytes; - use rlp::{Rlp, RlpStream}; - use super::{*, super::tests::*}; - use blocks::SyncHeader; - use ethcore::client::{BlockChainClient, EachBlockWith, TestBlockChainClient}; + use super::{super::tests::*, *}; + use blocks::SyncHeader; + use bytes::Bytes; + use ethcore::client::{BlockChainClient, EachBlockWith, TestBlockChainClient}; + use ethereum_types::H256; + use parking_lot::RwLock; + use rlp::{Rlp, RlpStream}; + use std::collections::VecDeque; + use tests::{helpers::TestIo, snapshot::TestSnapshotService}; - #[test] - fn return_block_headers() { - fn make_hash_req(h: &H256, count: usize, skip: usize, reverse: bool) -> Bytes { - let mut rlp = RlpStream::new_list(4); - rlp.append(h); - rlp.append(&count); - rlp.append(&skip); - rlp.append(&if reverse {1u32} else {0u32}); - rlp.out() - } + #[test] + fn return_block_headers() { + fn make_hash_req(h: &H256, count: usize, skip: usize, reverse: bool) -> Bytes { + let mut rlp = RlpStream::new_list(4); + rlp.append(h); + rlp.append(&count); + rlp.append(&skip); + rlp.append(&if reverse { 1u32 } else { 0u32 }); + rlp.out() + } - fn make_num_req(n: usize, count: usize, skip: usize, reverse: bool) -> Bytes { - let mut rlp = RlpStream::new_list(4); - rlp.append(&n); - rlp.append(&count); - rlp.append(&skip); - rlp.append(&if reverse {1u32} else {0u32}); - rlp.out() - } - fn to_header_vec(rlp: ::chain::RlpResponseResult) -> Vec { - Rlp::new(&rlp.unwrap().unwrap().1.out()).iter().map(|r| SyncHeader::from_rlp(r.as_raw().to_vec()).unwrap()).collect() - } + fn make_num_req(n: usize, count: usize, skip: usize, reverse: bool) -> Bytes { + let mut rlp = RlpStream::new_list(4); + rlp.append(&n); + rlp.append(&count); + rlp.append(&skip); + rlp.append(&if reverse { 1u32 } else { 0u32 }); + rlp.out() + } + fn to_header_vec(rlp: ::chain::RlpResponseResult) -> Vec { + Rlp::new(&rlp.unwrap().unwrap().1.out()) + .iter() + .map(|r| SyncHeader::from_rlp(r.as_raw().to_vec()).unwrap()) + .collect() + } - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Nothing); - let blocks: Vec<_> = (0 .. 100) - .map(|i| (&client as &BlockChainClient).block(BlockId::Number(i as BlockNumber)).map(|b| b.into_inner()).unwrap()).collect(); - let headers: Vec<_> = blocks.iter().map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()).collect(); - let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect(); + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Nothing); + let blocks: Vec<_> = (0..100) + .map(|i| { + (&client as &BlockChainClient) + .block(BlockId::Number(i as BlockNumber)) + .map(|b| b.into_inner()) + .unwrap() + }) + .collect(); + let headers: Vec<_> = blocks + .iter() + .map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()) + .collect(); + let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect(); - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let io = TestIo::new(&mut client, &ss, &queue, None); + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let io = TestIo::new(&mut client, &ss, &queue, None); - let unknown: H256 = H256::new(); - let result = SyncSupplier::return_block_headers(&io, &Rlp::new(&make_hash_req(&unknown, 1, 0, false)), 0); - assert!(to_header_vec(result).is_empty()); - let result = SyncSupplier::return_block_headers(&io, &Rlp::new(&make_hash_req(&unknown, 1, 0, true)), 0); - assert!(to_header_vec(result).is_empty()); + let unknown: H256 = H256::new(); + let result = SyncSupplier::return_block_headers( + &io, + &Rlp::new(&make_hash_req(&unknown, 1, 0, false)), + 0, + ); + assert!(to_header_vec(result).is_empty()); + let result = SyncSupplier::return_block_headers( + &io, + &Rlp::new(&make_hash_req(&unknown, 1, 0, true)), + 0, + ); + assert!(to_header_vec(result).is_empty()); - let result = SyncSupplier::return_block_headers(&io, &Rlp::new(&make_hash_req(&hashes[2], 1, 0, true)), 0); - assert_eq!(to_header_vec(result), vec![headers[2].clone()]); + let result = SyncSupplier::return_block_headers( + &io, + &Rlp::new(&make_hash_req(&hashes[2], 1, 0, true)), + 0, + ); + assert_eq!(to_header_vec(result), vec![headers[2].clone()]); - let result = SyncSupplier::return_block_headers(&io, &Rlp::new(&make_hash_req(&hashes[2], 1, 0, false)), 0); - assert_eq!(to_header_vec(result), vec![headers[2].clone()]); + let result = SyncSupplier::return_block_headers( + &io, + &Rlp::new(&make_hash_req(&hashes[2], 1, 0, false)), + 0, + ); + assert_eq!(to_header_vec(result), vec![headers[2].clone()]); - let result = SyncSupplier::return_block_headers(&io, &Rlp::new(&make_hash_req(&hashes[50], 3, 5, false)), 0); - assert_eq!(to_header_vec(result), vec![headers[50].clone(), headers[56].clone(), headers[62].clone()]); + let result = SyncSupplier::return_block_headers( + &io, + &Rlp::new(&make_hash_req(&hashes[50], 3, 5, false)), + 0, + ); + assert_eq!( + to_header_vec(result), + vec![ + headers[50].clone(), + headers[56].clone(), + headers[62].clone() + ] + ); - let result = SyncSupplier::return_block_headers(&io, &Rlp::new(&make_hash_req(&hashes[50], 3, 5, true)), 0); - assert_eq!(to_header_vec(result), vec![headers[50].clone(), headers[44].clone(), headers[38].clone()]); + let result = SyncSupplier::return_block_headers( + &io, + &Rlp::new(&make_hash_req(&hashes[50], 3, 5, true)), + 0, + ); + assert_eq!( + to_header_vec(result), + vec![ + headers[50].clone(), + headers[44].clone(), + headers[38].clone() + ] + ); - let result = SyncSupplier::return_block_headers(&io, &Rlp::new(&make_num_req(2, 1, 0, true)), 0); - assert_eq!(to_header_vec(result), vec![headers[2].clone()]); + let result = + SyncSupplier::return_block_headers(&io, &Rlp::new(&make_num_req(2, 1, 0, true)), 0); + assert_eq!(to_header_vec(result), vec![headers[2].clone()]); - let result = SyncSupplier::return_block_headers(&io, &Rlp::new(&make_num_req(2, 1, 0, false)), 0); - assert_eq!(to_header_vec(result), vec![headers[2].clone()]); + let result = + SyncSupplier::return_block_headers(&io, &Rlp::new(&make_num_req(2, 1, 0, false)), 0); + assert_eq!(to_header_vec(result), vec![headers[2].clone()]); - let result = SyncSupplier::return_block_headers(&io, &Rlp::new(&make_num_req(50, 3, 5, false)), 0); - assert_eq!(to_header_vec(result), vec![headers[50].clone(), headers[56].clone(), headers[62].clone()]); + let result = + SyncSupplier::return_block_headers(&io, &Rlp::new(&make_num_req(50, 3, 5, false)), 0); + assert_eq!( + to_header_vec(result), + vec![ + headers[50].clone(), + headers[56].clone(), + headers[62].clone() + ] + ); - let result = SyncSupplier::return_block_headers(&io, &Rlp::new(&make_num_req(50, 3, 5, true)), 0); - assert_eq!(to_header_vec(result), vec![headers[50].clone(), headers[44].clone(), headers[38].clone()]); - } + let result = + SyncSupplier::return_block_headers(&io, &Rlp::new(&make_num_req(50, 3, 5, true)), 0); + assert_eq!( + to_header_vec(result), + vec![ + headers[50].clone(), + headers[44].clone(), + headers[38].clone() + ] + ); + } - #[test] - fn respect_packet_limit() { - let small_num_blocks = 10; - let large_num_blocks = 50; - let tx_per_block = 100; + #[test] + fn respect_packet_limit() { + let small_num_blocks = 10; + let large_num_blocks = 50; + let tx_per_block = 100; - let mut client = TestBlockChainClient::new(); - client.add_blocks(large_num_blocks, EachBlockWith::Transactions(tx_per_block)); + let mut client = TestBlockChainClient::new(); + client.add_blocks(large_num_blocks, EachBlockWith::Transactions(tx_per_block)); - let mut small_rlp_request = RlpStream::new_list(small_num_blocks); - let mut large_rlp_request = RlpStream::new_list(large_num_blocks); + let mut small_rlp_request = RlpStream::new_list(small_num_blocks); + let mut large_rlp_request = RlpStream::new_list(large_num_blocks); - for i in 0..small_num_blocks { - let hash: H256 = client.block_hash(BlockId::Number(i as u64)).unwrap(); - small_rlp_request.append(&hash); - large_rlp_request.append(&hash); - } + for i in 0..small_num_blocks { + let hash: H256 = client.block_hash(BlockId::Number(i as u64)).unwrap(); + small_rlp_request.append(&hash); + large_rlp_request.append(&hash); + } - for i in small_num_blocks..large_num_blocks { - let hash: H256 = client.block_hash(BlockId::Number(i as u64)).unwrap(); - large_rlp_request.append(&hash); - } + for i in small_num_blocks..large_num_blocks { + let hash: H256 = client.block_hash(BlockId::Number(i as u64)).unwrap(); + large_rlp_request.append(&hash); + } - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let io = TestIo::new(&mut client, &ss, &queue, None); + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let io = TestIo::new(&mut client, &ss, &queue, None); - let small_result = SyncSupplier::return_block_bodies(&io, &Rlp::new(&small_rlp_request.out()), 0); - let small_result = small_result.unwrap().unwrap().1; - assert_eq!(Rlp::new(&small_result.out()).item_count().unwrap(), small_num_blocks); + let small_result = + SyncSupplier::return_block_bodies(&io, &Rlp::new(&small_rlp_request.out()), 0); + let small_result = small_result.unwrap().unwrap().1; + assert_eq!( + Rlp::new(&small_result.out()).item_count().unwrap(), + small_num_blocks + ); - let large_result = SyncSupplier::return_block_bodies(&io, &Rlp::new(&large_rlp_request.out()), 0); - let large_result = large_result.unwrap().unwrap().1; - assert!(Rlp::new(&large_result.out()).item_count().unwrap() < large_num_blocks); - } + let large_result = + SyncSupplier::return_block_bodies(&io, &Rlp::new(&large_rlp_request.out()), 0); + let large_result = large_result.unwrap().unwrap().1; + assert!(Rlp::new(&large_result.out()).item_count().unwrap() < large_num_blocks); + } - #[test] - fn return_nodes() { - let mut client = TestBlockChainClient::new(); - let queue = RwLock::new(VecDeque::new()); - let sync = dummy_sync_with_peer(H256::new(), &client); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); + #[test] + fn return_nodes() { + let mut client = TestBlockChainClient::new(); + let queue = RwLock::new(VecDeque::new()); + let sync = dummy_sync_with_peer(H256::new(), &client); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); - let mut node_list = RlpStream::new_list(3); - node_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555")); - node_list.append(&H256::from("ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa")); - node_list.append(&H256::from("aff0000000000000000000000000000000000000000000000000000000000000")); + let mut node_list = RlpStream::new_list(3); + node_list.append(&H256::from( + "0000000000000000000000000000000000000000000000005555555555555555", + )); + node_list.append(&H256::from( + "ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa", + )); + node_list.append(&H256::from( + "aff0000000000000000000000000000000000000000000000000000000000000", + )); - let node_request = node_list.out(); - // it returns rlp ONLY for hashes started with "f" - let result = SyncSupplier::return_node_data(&io, &Rlp::new(&node_request.clone()), 0); + let node_request = node_list.out(); + // it returns rlp ONLY for hashes started with "f" + let result = SyncSupplier::return_node_data(&io, &Rlp::new(&node_request.clone()), 0); - assert!(result.is_ok()); - let rlp_result = result.unwrap(); - assert!(rlp_result.is_some()); + assert!(result.is_ok()); + let rlp_result = result.unwrap(); + assert!(rlp_result.is_some()); - // the length of one rlp-encoded hashe - let rlp = rlp_result.unwrap().1.out(); - let rlp = Rlp::new(&rlp); - assert_eq!(Ok(1), rlp.item_count()); + // the length of one rlp-encoded hashe + let rlp = rlp_result.unwrap().1.out(); + let rlp = Rlp::new(&rlp); + assert_eq!(Ok(1), rlp.item_count()); - io.sender = Some(2usize); + io.sender = Some(2usize); - SyncSupplier::dispatch_packet(&RwLock::new(sync), &mut io, 0usize, GetNodeDataPacket.id(), &node_request); - assert_eq!(1, io.packets.len()); - } + SyncSupplier::dispatch_packet( + &RwLock::new(sync), + &mut io, + 0usize, + GetNodeDataPacket.id(), + &node_request, + ); + assert_eq!(1, io.packets.len()); + } - #[test] - fn return_receipts_empty() { - let mut client = TestBlockChainClient::new(); - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let io = TestIo::new(&mut client, &ss, &queue, None); + #[test] + fn return_receipts_empty() { + let mut client = TestBlockChainClient::new(); + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let io = TestIo::new(&mut client, &ss, &queue, None); - let result = SyncSupplier::return_receipts(&io, &Rlp::new(&[0xc0]), 0); + let result = SyncSupplier::return_receipts(&io, &Rlp::new(&[0xc0]), 0); - assert!(result.is_ok()); - } + assert!(result.is_ok()); + } - #[test] - fn return_receipts() { - let mut client = TestBlockChainClient::new(); - let queue = RwLock::new(VecDeque::new()); - let sync = dummy_sync_with_peer(H256::new(), &client); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); + #[test] + fn return_receipts() { + let mut client = TestBlockChainClient::new(); + let queue = RwLock::new(VecDeque::new()); + let sync = dummy_sync_with_peer(H256::new(), &client); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); - let mut receipt_list = RlpStream::new_list(4); - receipt_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555")); - receipt_list.append(&H256::from("ff00000000000000000000000000000000000000000000000000000000000000")); - receipt_list.append(&H256::from("fff0000000000000000000000000000000000000000000000000000000000000")); - receipt_list.append(&H256::from("aff0000000000000000000000000000000000000000000000000000000000000")); + let mut receipt_list = RlpStream::new_list(4); + receipt_list.append(&H256::from( + "0000000000000000000000000000000000000000000000005555555555555555", + )); + receipt_list.append(&H256::from( + "ff00000000000000000000000000000000000000000000000000000000000000", + )); + receipt_list.append(&H256::from( + "fff0000000000000000000000000000000000000000000000000000000000000", + )); + receipt_list.append(&H256::from( + "aff0000000000000000000000000000000000000000000000000000000000000", + )); - let receipts_request = receipt_list.out(); - // it returns rlp ONLY for hashes started with "f" - let result = SyncSupplier::return_receipts(&io, &Rlp::new(&receipts_request.clone()), 0); + let receipts_request = receipt_list.out(); + // it returns rlp ONLY for hashes started with "f" + let result = SyncSupplier::return_receipts(&io, &Rlp::new(&receipts_request.clone()), 0); - assert!(result.is_ok()); - let rlp_result = result.unwrap(); - assert!(rlp_result.is_some()); + assert!(result.is_ok()); + let rlp_result = result.unwrap(); + assert!(rlp_result.is_some()); - // the length of two rlp-encoded receipts - assert_eq!(603, rlp_result.unwrap().1.out().len()); + // the length of two rlp-encoded receipts + assert_eq!(603, rlp_result.unwrap().1.out().len()); - io.sender = Some(2usize); - SyncSupplier::dispatch_packet(&RwLock::new(sync), &mut io, 0usize, GetReceiptsPacket.id(), &receipts_request); - assert_eq!(1, io.packets.len()); - } + io.sender = Some(2usize); + SyncSupplier::dispatch_packet( + &RwLock::new(sync), + &mut io, + 0usize, + GetReceiptsPacket.id(), + &receipts_request, + ); + assert_eq!(1, io.packets.len()); + } } diff --git a/ethcore/sync/src/chain/sync_packet.rs b/ethcore/sync/src/chain/sync_packet.rs index 3891090f6..930d6ad70 100644 --- a/ethcore/sync/src/chain/sync_packet.rs +++ b/ethcore/sync/src/chain/sync_packet.rs @@ -34,27 +34,27 @@ use network::{PacketId, ProtocolId}; enum_from_primitive! { #[derive(Clone, Copy, Debug, PartialEq)] pub enum SyncPacket { - StatusPacket = 0x00, - NewBlockHashesPacket = 0x01, - TransactionsPacket = 0x02, - GetBlockHeadersPacket = 0x03, - BlockHeadersPacket = 0x04, - GetBlockBodiesPacket = 0x05, - BlockBodiesPacket = 0x06, - NewBlockPacket = 0x07, + StatusPacket = 0x00, + NewBlockHashesPacket = 0x01, + TransactionsPacket = 0x02, + GetBlockHeadersPacket = 0x03, + BlockHeadersPacket = 0x04, + GetBlockBodiesPacket = 0x05, + BlockBodiesPacket = 0x06, + NewBlockPacket = 0x07, - GetNodeDataPacket = 0x0d, - NodeDataPacket = 0x0e, - GetReceiptsPacket = 0x0f, - ReceiptsPacket = 0x10, + GetNodeDataPacket = 0x0d, + NodeDataPacket = 0x0e, + GetReceiptsPacket = 0x0f, + ReceiptsPacket = 0x10, - GetSnapshotManifestPacket = 0x11, - SnapshotManifestPacket = 0x12, - GetSnapshotDataPacket = 0x13, - SnapshotDataPacket = 0x14, - ConsensusDataPacket = 0x15, - PrivateTransactionPacket = 0x16, - SignedPrivateTransactionPacket = 0x17, + GetSnapshotManifestPacket = 0x11, + SnapshotManifestPacket = 0x12, + GetSnapshotDataPacket = 0x13, + SnapshotDataPacket = 0x14, + ConsensusDataPacket = 0x15, + PrivateTransactionPacket = 0x16, + SignedPrivateTransactionPacket = 0x17, } } @@ -63,79 +63,73 @@ use self::SyncPacket::*; /// Provide both subprotocol and packet id information within the /// same object. pub trait PacketInfo { - fn id(&self) -> PacketId; - fn protocol(&self) -> ProtocolId; + fn id(&self) -> PacketId; + fn protocol(&self) -> ProtocolId; } // The mechanism to match packet ids and protocol may be improved // through some macro magic, but for now this works. impl PacketInfo for SyncPacket { - fn protocol(&self) -> ProtocolId { - match self { - StatusPacket | - NewBlockHashesPacket | - TransactionsPacket | - GetBlockHeadersPacket | - BlockHeadersPacket | - GetBlockBodiesPacket | - BlockBodiesPacket | - NewBlockPacket | + fn protocol(&self) -> ProtocolId { + match self { + StatusPacket + | NewBlockHashesPacket + | TransactionsPacket + | GetBlockHeadersPacket + | BlockHeadersPacket + | GetBlockBodiesPacket + | BlockBodiesPacket + | NewBlockPacket + | GetNodeDataPacket + | NodeDataPacket + | GetReceiptsPacket + | ReceiptsPacket => ETH_PROTOCOL, - GetNodeDataPacket| - NodeDataPacket | - GetReceiptsPacket | - ReceiptsPacket + GetSnapshotManifestPacket + | SnapshotManifestPacket + | GetSnapshotDataPacket + | SnapshotDataPacket + | ConsensusDataPacket + | PrivateTransactionPacket + | SignedPrivateTransactionPacket => WARP_SYNC_PROTOCOL_ID, + } + } - => ETH_PROTOCOL, - - GetSnapshotManifestPacket| - SnapshotManifestPacket | - GetSnapshotDataPacket | - SnapshotDataPacket | - ConsensusDataPacket | - PrivateTransactionPacket | - SignedPrivateTransactionPacket - - => WARP_SYNC_PROTOCOL_ID, - } - } - - fn id(&self) -> PacketId { - (*self) as PacketId - } + fn id(&self) -> PacketId { + (*self) as PacketId + } } - #[cfg(test)] mod tests { - use super::*; + use super::*; - use enum_primitive::FromPrimitive; + use enum_primitive::FromPrimitive; - #[test] - fn packet_ids_from_u8_when_from_primitive_zero_then_equals_status_packet() { - assert_eq!(SyncPacket::from_u8(0x00), Some(StatusPacket)); - } + #[test] + fn packet_ids_from_u8_when_from_primitive_zero_then_equals_status_packet() { + assert_eq!(SyncPacket::from_u8(0x00), Some(StatusPacket)); + } - #[test] - fn packet_ids_from_u8_when_from_primitive_eleven_then_equals_get_snapshot_manifest_packet() { - assert_eq!(SyncPacket::from_u8(0x11), Some(GetSnapshotManifestPacket)); - } + #[test] + fn packet_ids_from_u8_when_from_primitive_eleven_then_equals_get_snapshot_manifest_packet() { + assert_eq!(SyncPacket::from_u8(0x11), Some(GetSnapshotManifestPacket)); + } - #[test] - fn packet_ids_from_u8_when_invalid_packet_id_then_none() { - assert!(SyncPacket::from_u8(0x99).is_none()); - } + #[test] + fn packet_ids_from_u8_when_invalid_packet_id_then_none() { + assert!(SyncPacket::from_u8(0x99).is_none()); + } - #[test] - fn when_status_packet_then_id_and_protocol_match() { - assert_eq!(StatusPacket.id(), StatusPacket as PacketId); - assert_eq!(StatusPacket.protocol(), ETH_PROTOCOL); - } + #[test] + fn when_status_packet_then_id_and_protocol_match() { + assert_eq!(StatusPacket.id(), StatusPacket as PacketId); + assert_eq!(StatusPacket.protocol(), ETH_PROTOCOL); + } - #[test] - fn when_consensus_data_packet_then_id_and_protocol_match() { - assert_eq!(ConsensusDataPacket.id(), ConsensusDataPacket as PacketId); - assert_eq!(ConsensusDataPacket.protocol(), WARP_SYNC_PROTOCOL_ID); - } + #[test] + fn when_consensus_data_packet_then_id_and_protocol_match() { + assert_eq!(ConsensusDataPacket.id(), ConsensusDataPacket as PacketId); + assert_eq!(ConsensusDataPacket.protocol(), WARP_SYNC_PROTOCOL_ID); + } } diff --git a/ethcore/sync/src/lib.rs b/ethcore/sync/src/lib.rs index 8a1e19569..0469cc0d8 100644 --- a/ethcore/sync/src/lib.rs +++ b/ethcore/sync/src/lib.rs @@ -39,10 +39,14 @@ extern crate triehash_ethereum; extern crate ethcore_light as light; -#[cfg(test)] extern crate env_logger; -#[cfg(test)] extern crate ethcore_private_tx; -#[cfg(test)] extern crate kvdb_memorydb; -#[cfg(test)] extern crate rustc_hex; +#[cfg(test)] +extern crate env_logger; +#[cfg(test)] +extern crate ethcore_private_tx; +#[cfg(test)] +extern crate kvdb_memorydb; +#[cfg(test)] +extern crate rustc_hex; #[macro_use] extern crate enum_primitive; @@ -55,12 +59,12 @@ extern crate heapsize; #[macro_use] extern crate trace_time; -mod chain; -mod blocks; mod block_sync; -mod sync_io; +mod blocks; +mod chain; mod private_tx; mod snapshot; +mod sync_io; mod transactions_stats; pub mod light_sync; @@ -71,7 +75,7 @@ mod tests; mod api; pub use api::*; -pub use chain::{SyncStatus, SyncState}; +pub use chain::{SyncState, SyncStatus}; pub use devp2p::validate_node_url; -pub use network::{NonReservedPeerMode, Error, ErrorKind, ConnectionFilter, ConnectionDirection}; -pub use private_tx::{PrivateTxHandler, NoopPrivateTxHandler, SimplePrivateTxHandler}; +pub use network::{ConnectionDirection, ConnectionFilter, Error, ErrorKind, NonReservedPeerMode}; +pub use private_tx::{NoopPrivateTxHandler, PrivateTxHandler, SimplePrivateTxHandler}; diff --git a/ethcore/sync/src/light_sync/mod.rs b/ethcore/sync/src/light_sync/mod.rs index dae05c318..00cb0f0d4 100644 --- a/ethcore/sync/src/light_sync/mod.rs +++ b/ethcore/sync/src/light_sync/mod.rs @@ -32,26 +32,29 @@ //! announced blocks. //! - On bad block/response, punish peer and reset. -use std::collections::{HashMap, HashSet}; -use std::mem; -use std::ops::Deref; -use std::sync::Arc; -use std::time::{Instant, Duration}; - -use types::encoded; -use light::client::{AsLightClient, LightChainClient}; -use light::net::{ - PeerStatus, Announcement, Handler, BasicContext, - EventContext, Capabilities, ReqId, Status, - Error as NetError, +use std::{ + collections::{HashMap, HashSet}, + mem, + ops::Deref, + sync::Arc, + time::{Duration, Instant}, }; -use light::request::{self, CompleteHeadersRequest as HeadersRequest}; -use network::PeerId; -use ethereum_types::{H256, U256}; -use parking_lot::{Mutex, RwLock}; -use rand::{Rng, OsRng}; -use self::sync_round::{AbortReason, SyncRound, ResponseContext}; +use ethereum_types::{H256, U256}; +use light::{ + client::{AsLightClient, LightChainClient}, + net::{ + Announcement, BasicContext, Capabilities, Error as NetError, EventContext, Handler, + PeerStatus, ReqId, Status, + }, + request::{self, CompleteHeadersRequest as HeadersRequest}, +}; +use network::PeerId; +use parking_lot::{Mutex, RwLock}; +use rand::{OsRng, Rng}; +use types::encoded; + +use self::sync_round::{AbortReason, ResponseContext, SyncRound}; mod response; mod sync_round; @@ -69,655 +72,717 @@ const REQ_TIMEOUT_PER_HEADER: Duration = Duration::from_millis(10); /// Peer chain info. #[derive(Debug, Clone, PartialEq, Eq)] struct ChainInfo { - head_td: U256, - head_hash: H256, - head_num: u64, + head_td: U256, + head_hash: H256, + head_num: u64, } impl PartialOrd for ChainInfo { - fn partial_cmp(&self, other: &Self) -> Option<::std::cmp::Ordering> { - self.head_td.partial_cmp(&other.head_td) - } + fn partial_cmp(&self, other: &Self) -> Option<::std::cmp::Ordering> { + self.head_td.partial_cmp(&other.head_td) + } } impl Ord for ChainInfo { - fn cmp(&self, other: &Self) -> ::std::cmp::Ordering { - self.head_td.cmp(&other.head_td) - } + fn cmp(&self, other: &Self) -> ::std::cmp::Ordering { + self.head_td.cmp(&other.head_td) + } } struct Peer { - status: ChainInfo, + status: ChainInfo, } impl Peer { - // Create a new peer. - fn new(chain_info: ChainInfo) -> Self { - Peer { - status: chain_info, - } - } + // Create a new peer. + fn new(chain_info: ChainInfo) -> Self { + Peer { status: chain_info } + } } // search for a common ancestor with the best chain. #[derive(Debug)] enum AncestorSearch { - Queued(u64), // queued to search for blocks starting from here. - Awaiting(ReqId, u64, HeadersRequest), // awaiting response for this request. - Prehistoric, // prehistoric block found. TODO: start to roll back CHTs. - FoundCommon(u64, H256), // common block found. - Genesis, // common ancestor is the genesis. + Queued(u64), // queued to search for blocks starting from here. + Awaiting(ReqId, u64, HeadersRequest), // awaiting response for this request. + Prehistoric, // prehistoric block found. TODO: start to roll back CHTs. + FoundCommon(u64, H256), // common block found. + Genesis, // common ancestor is the genesis. } impl AncestorSearch { - fn begin(best_num: u64) -> Self { - match best_num { - 0 => AncestorSearch::Genesis, - _ => AncestorSearch::Queued(best_num), - } - } + fn begin(best_num: u64) -> Self { + match best_num { + 0 => AncestorSearch::Genesis, + _ => AncestorSearch::Queued(best_num), + } + } - fn process_response(self, ctx: &ResponseContext, client: &L) -> AncestorSearch - where L: AsLightClient - { - let client = client.as_light_client(); - let first_num = client.chain_info().first_block_number.unwrap_or(0); - match self { - AncestorSearch::Awaiting(id, start, req) => { - if &id == ctx.req_id() { - match response::verify(ctx.data(), &req) { - Ok(headers) => { - for header in &headers { - if client.is_known(&header.hash()) { - debug!(target: "sync", "Found common ancestor with best chain"); - return AncestorSearch::FoundCommon(header.number(), header.hash()); - } + fn process_response(self, ctx: &ResponseContext, client: &L) -> AncestorSearch + where + L: AsLightClient, + { + let client = client.as_light_client(); + let first_num = client.chain_info().first_block_number.unwrap_or(0); + match self { + AncestorSearch::Awaiting(id, start, req) => { + if &id == ctx.req_id() { + match response::verify(ctx.data(), &req) { + Ok(headers) => { + for header in &headers { + if client.is_known(&header.hash()) { + debug!(target: "sync", "Found common ancestor with best chain"); + return AncestorSearch::FoundCommon( + header.number(), + header.hash(), + ); + } - if header.number() < first_num { - debug!(target: "sync", "Prehistoric common ancestor with best chain."); - return AncestorSearch::Prehistoric; - } - } + if header.number() < first_num { + debug!(target: "sync", "Prehistoric common ancestor with best chain."); + return AncestorSearch::Prehistoric; + } + } - let probe = start - headers.len() as u64; - if probe == 0 { - AncestorSearch::Genesis - } else { - AncestorSearch::Queued(probe) - } - } - Err(e) => { - trace!(target: "sync", "Bad headers response from {}: {}", ctx.responder(), e); + let probe = start - headers.len() as u64; + if probe == 0 { + AncestorSearch::Genesis + } else { + AncestorSearch::Queued(probe) + } + } + Err(e) => { + trace!(target: "sync", "Bad headers response from {}: {}", ctx.responder(), e); - ctx.punish_responder(); - AncestorSearch::Queued(start) - } - } - } else { - AncestorSearch::Awaiting(id, start, req) - } - } - other => other, - } - } + ctx.punish_responder(); + AncestorSearch::Queued(start) + } + } + } else { + AncestorSearch::Awaiting(id, start, req) + } + } + other => other, + } + } - fn requests_abandoned(self, req_ids: &[ReqId]) -> AncestorSearch { - match self { - AncestorSearch::Awaiting(id, start, req) => { - if req_ids.iter().find(|&x| x == &id).is_some() { - AncestorSearch::Queued(start) - } else { - AncestorSearch::Awaiting(id, start, req) - } - } - other => other, - } - } + fn requests_abandoned(self, req_ids: &[ReqId]) -> AncestorSearch { + match self { + AncestorSearch::Awaiting(id, start, req) => { + if req_ids.iter().find(|&x| x == &id).is_some() { + AncestorSearch::Queued(start) + } else { + AncestorSearch::Awaiting(id, start, req) + } + } + other => other, + } + } - fn dispatch_request(self, mut dispatcher: F) -> AncestorSearch - where F: FnMut(HeadersRequest) -> Option - { - const BATCH_SIZE: u64 = 64; + fn dispatch_request(self, mut dispatcher: F) -> AncestorSearch + where + F: FnMut(HeadersRequest) -> Option, + { + const BATCH_SIZE: u64 = 64; - match self { - AncestorSearch::Queued(start) => { - let batch_size = ::std::cmp::min(start, BATCH_SIZE); - trace!(target: "sync", "Requesting {} reverse headers from {} to find common ancestor", + match self { + AncestorSearch::Queued(start) => { + let batch_size = ::std::cmp::min(start, BATCH_SIZE); + trace!(target: "sync", "Requesting {} reverse headers from {} to find common ancestor", batch_size, start); - let req = HeadersRequest { - start: start.into(), - max: batch_size, - skip: 0, - reverse: true, - }; + let req = HeadersRequest { + start: start.into(), + max: batch_size, + skip: 0, + reverse: true, + }; - match dispatcher(req.clone()) { - Some(req_id) => AncestorSearch::Awaiting(req_id, start, req), - None => AncestorSearch::Queued(start), - } - } - other => other, - } - } + match dispatcher(req.clone()) { + Some(req_id) => AncestorSearch::Awaiting(req_id, start, req), + None => AncestorSearch::Queued(start), + } + } + other => other, + } + } } // synchronization state machine. #[derive(Debug)] enum SyncState { - // Idle (waiting for peers) or at chain head. - Idle, - // searching for common ancestor with best chain. - // queue should be cleared at this phase. - AncestorSearch(AncestorSearch), - // Doing sync rounds. - Rounds(SyncRound), + // Idle (waiting for peers) or at chain head. + Idle, + // searching for common ancestor with best chain. + // queue should be cleared at this phase. + AncestorSearch(AncestorSearch), + // Doing sync rounds. + Rounds(SyncRound), } /// A wrapper around the SyncState that makes sure to /// update the giving reference to `is_idle` #[derive(Debug)] struct SyncStateWrapper { - state: SyncState, + state: SyncState, } impl SyncStateWrapper { - /// Create a new wrapper for SyncState::Idle - pub fn idle() -> Self { - SyncStateWrapper { - state: SyncState::Idle, - } - } + /// Create a new wrapper for SyncState::Idle + pub fn idle() -> Self { + SyncStateWrapper { + state: SyncState::Idle, + } + } - /// Set the new state's value, making sure `is_idle` gets updated - pub fn set(&mut self, state: SyncState, is_idle_handle: &mut bool) { - *is_idle_handle = match state { - SyncState::Idle => true, - _ => false, - }; - self.state = state; - } + /// Set the new state's value, making sure `is_idle` gets updated + pub fn set(&mut self, state: SyncState, is_idle_handle: &mut bool) { + *is_idle_handle = match state { + SyncState::Idle => true, + _ => false, + }; + self.state = state; + } - /// Returns the internal state's value - pub fn into_inner(self) -> SyncState { - self.state - } + /// Returns the internal state's value + pub fn into_inner(self) -> SyncState { + self.state + } } impl Deref for SyncStateWrapper { - type Target = SyncState; + type Target = SyncState; - fn deref(&self) -> &SyncState { - &self.state - } + fn deref(&self) -> &SyncState { + &self.state + } } struct ResponseCtx<'a> { - peer: PeerId, - req_id: ReqId, - ctx: &'a BasicContext, - data: &'a [encoded::Header], + peer: PeerId, + req_id: ReqId, + ctx: &'a BasicContext, + data: &'a [encoded::Header], } impl<'a> ResponseContext for ResponseCtx<'a> { - fn responder(&self) -> PeerId { self.peer } - fn req_id(&self) -> &ReqId { &self.req_id } - fn data(&self) -> &[encoded::Header] { self.data } - fn punish_responder(&self) { self.ctx.disable_peer(self.peer) } + fn responder(&self) -> PeerId { + self.peer + } + fn req_id(&self) -> &ReqId { + &self.req_id + } + fn data(&self) -> &[encoded::Header] { + self.data + } + fn punish_responder(&self) { + self.ctx.disable_peer(self.peer) + } } /// Light client synchronization manager. See module docs for more details. pub struct LightSync { - start_block_number: u64, - best_seen: Mutex>, // best seen block on the network. - peers: RwLock>>, // peers which are relevant to synchronization. - pending_reqs: Mutex>, // requests from this handler - client: Arc, - rng: Mutex, - state: Mutex, - // We duplicate this state tracking to avoid deadlocks in `is_major_importing`. - is_idle: Mutex, + start_block_number: u64, + best_seen: Mutex>, // best seen block on the network. + peers: RwLock>>, // peers which are relevant to synchronization. + pending_reqs: Mutex>, // requests from this handler + client: Arc, + rng: Mutex, + state: Mutex, + // We duplicate this state tracking to avoid deadlocks in `is_major_importing`. + is_idle: Mutex, } #[derive(Debug, Clone)] struct PendingReq { - started: Instant, - timeout: Duration, + started: Instant, + timeout: Duration, } impl Handler for LightSync { - fn on_connect( - &self, - ctx: &EventContext, - status: &Status, - capabilities: &Capabilities - ) -> PeerStatus { - use std::cmp; + fn on_connect( + &self, + ctx: &EventContext, + status: &Status, + capabilities: &Capabilities, + ) -> PeerStatus { + use std::cmp; - if capabilities.serve_headers { - let chain_info = ChainInfo { - head_td: status.head_td, - head_hash: status.head_hash, - head_num: status.head_num, - }; + if capabilities.serve_headers { + let chain_info = ChainInfo { + head_td: status.head_td, + head_hash: status.head_hash, + head_num: status.head_num, + }; - { - let mut best = self.best_seen.lock(); - *best = cmp::max(best.clone(), Some(chain_info.clone())); - } + { + let mut best = self.best_seen.lock(); + *best = cmp::max(best.clone(), Some(chain_info.clone())); + } - self.peers.write().insert(ctx.peer(), Mutex::new(Peer::new(chain_info))); - self.maintain_sync(ctx.as_basic()); + self.peers + .write() + .insert(ctx.peer(), Mutex::new(Peer::new(chain_info))); + self.maintain_sync(ctx.as_basic()); - PeerStatus::Kept - } else { - PeerStatus::Unkept - } - } + PeerStatus::Kept + } else { + PeerStatus::Unkept + } + } - fn on_disconnect(&self, ctx: &EventContext, unfulfilled: &[ReqId]) { - let peer_id = ctx.peer(); + fn on_disconnect(&self, ctx: &EventContext, unfulfilled: &[ReqId]) { + let peer_id = ctx.peer(); - let peer = match self.peers.write().remove(&peer_id).map(|p| p.into_inner()) { - Some(peer) => peer, - None => return, - }; + let peer = match self.peers.write().remove(&peer_id).map(|p| p.into_inner()) { + Some(peer) => peer, + None => return, + }; - trace!(target: "sync", "peer {} disconnecting", peer_id); + trace!(target: "sync", "peer {} disconnecting", peer_id); - let new_best = { - let mut best = self.best_seen.lock(); + let new_best = { + let mut best = self.best_seen.lock(); - if best.as_ref().map_or(false, |b| b == &peer.status) { - // search for next-best block. - let next_best: Option = self.peers.read().values() - .map(|p| p.lock().status.clone()) - .map(Some) - .fold(None, ::std::cmp::max); + if best.as_ref().map_or(false, |b| b == &peer.status) { + // search for next-best block. + let next_best: Option = self + .peers + .read() + .values() + .map(|p| p.lock().status.clone()) + .map(Some) + .fold(None, ::std::cmp::max); - *best = next_best; - } + *best = next_best; + } - best.clone() - }; + best.clone() + }; - { - let mut pending_reqs = self.pending_reqs.lock(); - for unfulfilled in unfulfilled { - pending_reqs.remove(&unfulfilled); - } - } + { + let mut pending_reqs = self.pending_reqs.lock(); + for unfulfilled in unfulfilled { + pending_reqs.remove(&unfulfilled); + } + } - if new_best.is_none() { - debug!(target: "sync", "No peers remain. Reverting to idle"); - self.set_state(&mut self.state.lock(), SyncState::Idle); - } else { - let mut state = self.state.lock(); + if new_best.is_none() { + debug!(target: "sync", "No peers remain. Reverting to idle"); + self.set_state(&mut self.state.lock(), SyncState::Idle); + } else { + let mut state = self.state.lock(); - let next_state = match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() { - SyncState::Idle => SyncState::Idle, - SyncState::AncestorSearch(search) => - SyncState::AncestorSearch(search.requests_abandoned(unfulfilled)), - SyncState::Rounds(round) => SyncState::Rounds(round.requests_abandoned(unfulfilled)), - }; - self.set_state(&mut state, next_state); - } + let next_state = match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() + { + SyncState::Idle => SyncState::Idle, + SyncState::AncestorSearch(search) => { + SyncState::AncestorSearch(search.requests_abandoned(unfulfilled)) + } + SyncState::Rounds(round) => { + SyncState::Rounds(round.requests_abandoned(unfulfilled)) + } + }; + self.set_state(&mut state, next_state); + } - self.maintain_sync(ctx.as_basic()); - } + self.maintain_sync(ctx.as_basic()); + } - fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) { - let (last_td, chain_info) = { - let peers = self.peers.read(); - match peers.get(&ctx.peer()) { - None => return, - Some(peer) => { - let mut peer = peer.lock(); - let last_td = peer.status.head_td; - peer.status = ChainInfo { - head_td: announcement.head_td, - head_hash: announcement.head_hash, - head_num: announcement.head_num, - }; - (last_td, peer.status.clone()) - } - } - }; + fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) { + let (last_td, chain_info) = { + let peers = self.peers.read(); + match peers.get(&ctx.peer()) { + None => return, + Some(peer) => { + let mut peer = peer.lock(); + let last_td = peer.status.head_td; + peer.status = ChainInfo { + head_td: announcement.head_td, + head_hash: announcement.head_hash, + head_num: announcement.head_num, + }; + (last_td, peer.status.clone()) + } + } + }; - trace!(target: "sync", "Announcement from peer {}: new chain head {:?}, reorg depth {}", + trace!(target: "sync", "Announcement from peer {}: new chain head {:?}, reorg depth {}", ctx.peer(), (announcement.head_hash, announcement.head_num), announcement.reorg_depth); - if last_td > announcement.head_td { - trace!(target: "sync", "Peer {} moved backwards.", ctx.peer()); - self.peers.write().remove(&ctx.peer()); - ctx.disconnect_peer(ctx.peer()); - return - } + if last_td > announcement.head_td { + trace!(target: "sync", "Peer {} moved backwards.", ctx.peer()); + self.peers.write().remove(&ctx.peer()); + ctx.disconnect_peer(ctx.peer()); + return; + } - { - let mut best = self.best_seen.lock(); - *best = ::std::cmp::max(best.clone(), Some(chain_info)); - } + { + let mut best = self.best_seen.lock(); + *best = ::std::cmp::max(best.clone(), Some(chain_info)); + } - self.maintain_sync(ctx.as_basic()); - } + self.maintain_sync(ctx.as_basic()); + } - fn on_responses(&self, ctx: &EventContext, req_id: ReqId, responses: &[request::Response]) { - let peer = ctx.peer(); - if !self.peers.read().contains_key(&peer) { - return - } + fn on_responses(&self, ctx: &EventContext, req_id: ReqId, responses: &[request::Response]) { + let peer = ctx.peer(); + if !self.peers.read().contains_key(&peer) { + return; + } - if self.pending_reqs.lock().remove(&req_id).is_none() { - return - } + if self.pending_reqs.lock().remove(&req_id).is_none() { + return; + } - let headers = match responses.get(0) { - Some(&request::Response::Headers(ref response)) => &response.headers[..], - Some(_) => { - trace!("Disabling peer {} for wrong response type.", peer); - ctx.disable_peer(peer); - &[] - } - None => &[], - }; + let headers = match responses.get(0) { + Some(&request::Response::Headers(ref response)) => &response.headers[..], + Some(_) => { + trace!("Disabling peer {} for wrong response type.", peer); + ctx.disable_peer(peer); + &[] + } + None => &[], + }; - { - let mut state = self.state.lock(); + { + let mut state = self.state.lock(); - let ctx = ResponseCtx { - peer: ctx.peer(), - req_id: req_id, - ctx: ctx.as_basic(), - data: headers, - }; + let ctx = ResponseCtx { + peer: ctx.peer(), + req_id: req_id, + ctx: ctx.as_basic(), + data: headers, + }; - let next_state = match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() { - SyncState::Idle => SyncState::Idle, - SyncState::AncestorSearch(search) => - SyncState::AncestorSearch(search.process_response(&ctx, &*self.client)), - SyncState::Rounds(round) => SyncState::Rounds(round.process_response(&ctx)), - }; - self.set_state(&mut state, next_state); - } + let next_state = match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() + { + SyncState::Idle => SyncState::Idle, + SyncState::AncestorSearch(search) => { + SyncState::AncestorSearch(search.process_response(&ctx, &*self.client)) + } + SyncState::Rounds(round) => SyncState::Rounds(round.process_response(&ctx)), + }; + self.set_state(&mut state, next_state); + } - self.maintain_sync(ctx.as_basic()); - } + self.maintain_sync(ctx.as_basic()); + } - fn tick(&self, ctx: &BasicContext) { - self.maintain_sync(ctx); - } + fn tick(&self, ctx: &BasicContext) { + self.maintain_sync(ctx); + } } // private helpers impl LightSync { - /// Sets the LightSync's state, and update - /// `is_idle` - fn set_state(&self, state: &mut SyncStateWrapper, next_state: SyncState) { - state.set(next_state, &mut self.is_idle.lock()); - } + /// Sets the LightSync's state, and update + /// `is_idle` + fn set_state(&self, state: &mut SyncStateWrapper, next_state: SyncState) { + state.set(next_state, &mut self.is_idle.lock()); + } - // Begins a search for the common ancestor and our best block. - // does not lock state, instead has a mutable reference to it passed. - fn begin_search(&self, state: &mut SyncStateWrapper) { - if let None = *self.best_seen.lock() { - // no peers. - self.set_state(state, SyncState::Idle); - return; - } + // Begins a search for the common ancestor and our best block. + // does not lock state, instead has a mutable reference to it passed. + fn begin_search(&self, state: &mut SyncStateWrapper) { + if let None = *self.best_seen.lock() { + // no peers. + self.set_state(state, SyncState::Idle); + return; + } - self.client.as_light_client().flush_queue(); - let chain_info = self.client.as_light_client().chain_info(); + self.client.as_light_client().flush_queue(); + let chain_info = self.client.as_light_client().chain_info(); - trace!(target: "sync", "Beginning search for common ancestor from {:?}", + trace!(target: "sync", "Beginning search for common ancestor from {:?}", (chain_info.best_block_number, chain_info.best_block_hash)); - let next_state = SyncState::AncestorSearch(AncestorSearch::begin(chain_info.best_block_number)); - self.set_state(state, next_state); - } + let next_state = + SyncState::AncestorSearch(AncestorSearch::begin(chain_info.best_block_number)); + self.set_state(state, next_state); + } - // handles request dispatch, block import, state machine transitions, and timeouts. - fn maintain_sync(&self, ctx: &BasicContext) { - use ethcore::error::{Error as EthcoreError, ErrorKind as EthcoreErrorKind, ImportErrorKind}; + // handles request dispatch, block import, state machine transitions, and timeouts. + fn maintain_sync(&self, ctx: &BasicContext) { + use ethcore::error::{ + Error as EthcoreError, ErrorKind as EthcoreErrorKind, ImportErrorKind, + }; - const DRAIN_AMOUNT: usize = 128; + const DRAIN_AMOUNT: usize = 128; - let client = self.client.as_light_client(); - let chain_info = client.chain_info(); + let client = self.client.as_light_client(); + let chain_info = client.chain_info(); - let mut state = self.state.lock(); - debug!(target: "sync", "Maintaining sync ({:?})", **state); + let mut state = self.state.lock(); + debug!(target: "sync", "Maintaining sync ({:?})", **state); - // drain any pending blocks into the queue. - { - let mut sink = Vec::with_capacity(DRAIN_AMOUNT); + // drain any pending blocks into the queue. + { + let mut sink = Vec::with_capacity(DRAIN_AMOUNT); - 'a: - loop { - if client.queue_info().is_full() { break } + 'a: loop { + if client.queue_info().is_full() { + break; + } - let next_state = match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() { - SyncState::Rounds(round) - => SyncState::Rounds(round.drain(&mut sink, Some(DRAIN_AMOUNT))), - other => other, - }; - self.set_state(&mut state, next_state); + let next_state = + match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() { + SyncState::Rounds(round) => { + SyncState::Rounds(round.drain(&mut sink, Some(DRAIN_AMOUNT))) + } + other => other, + }; + self.set_state(&mut state, next_state); - if sink.is_empty() { break } - trace!(target: "sync", "Drained {} headers to import", sink.len()); + if sink.is_empty() { + break; + } + trace!(target: "sync", "Drained {} headers to import", sink.len()); - for header in sink.drain(..) { - match client.queue_header(header) { - Ok(_) => {} - Err(EthcoreError(EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain), _)) => { - trace!(target: "sync", "Block already in chain. Continuing."); - }, - Err(EthcoreError(EthcoreErrorKind::Import(ImportErrorKind::AlreadyQueued), _)) => { - trace!(target: "sync", "Block already queued. Continuing."); - }, - Err(e) => { - debug!(target: "sync", "Found bad header ({:?}). Reset to search state.", e); + for header in sink.drain(..) { + match client.queue_header(header) { + Ok(_) => {} + Err(EthcoreError( + EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain), + _, + )) => { + trace!(target: "sync", "Block already in chain. Continuing."); + } + Err(EthcoreError( + EthcoreErrorKind::Import(ImportErrorKind::AlreadyQueued), + _, + )) => { + trace!(target: "sync", "Block already queued. Continuing."); + } + Err(e) => { + debug!(target: "sync", "Found bad header ({:?}). Reset to search state.", e); - self.begin_search(&mut state); - break 'a; - } - } - } - } - } + self.begin_search(&mut state); + break 'a; + } + } + } + } + } - // handle state transitions. - { - let best_td = chain_info.pending_total_difficulty; - let sync_target = match *self.best_seen.lock() { - Some(ref target) if target.head_td > best_td => (target.head_num, target.head_hash), - ref other => { - let network_score = other.as_ref().map(|target| target.head_td); - trace!(target: "sync", "No target to sync to. Network score: {:?}, Local score: {:?}", + // handle state transitions. + { + let best_td = chain_info.pending_total_difficulty; + let sync_target = match *self.best_seen.lock() { + Some(ref target) if target.head_td > best_td => (target.head_num, target.head_hash), + ref other => { + let network_score = other.as_ref().map(|target| target.head_td); + trace!(target: "sync", "No target to sync to. Network score: {:?}, Local score: {:?}", network_score, best_td); - self.set_state(&mut state, SyncState::Idle); - return; - } - }; + self.set_state(&mut state, SyncState::Idle); + return; + } + }; - match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() { - SyncState::Rounds(SyncRound::Abort(reason, remaining)) => { - if remaining.len() > 0 { - self.set_state(&mut state, SyncState::Rounds(SyncRound::Abort(reason, remaining))); - return; - } + match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() { + SyncState::Rounds(SyncRound::Abort(reason, remaining)) => { + if remaining.len() > 0 { + self.set_state( + &mut state, + SyncState::Rounds(SyncRound::Abort(reason, remaining)), + ); + return; + } - match reason { - AbortReason::BadScaffold(bad_peers) => { - debug!(target: "sync", "Disabling peers responsible for bad scaffold"); - for peer in bad_peers { - ctx.disable_peer(peer); - } - } - AbortReason::NoResponses => {} - AbortReason::TargetReached => { - debug!(target: "sync", "Sync target reached. Going idle"); - self.set_state(&mut state, SyncState::Idle); - return; - } - } + match reason { + AbortReason::BadScaffold(bad_peers) => { + debug!(target: "sync", "Disabling peers responsible for bad scaffold"); + for peer in bad_peers { + ctx.disable_peer(peer); + } + } + AbortReason::NoResponses => {} + AbortReason::TargetReached => { + debug!(target: "sync", "Sync target reached. Going idle"); + self.set_state(&mut state, SyncState::Idle); + return; + } + } - debug!(target: "sync", "Beginning search after aborted sync round"); - self.begin_search(&mut state); - } - SyncState::AncestorSearch(AncestorSearch::FoundCommon(num, hash)) => { - self.set_state(&mut state, SyncState::Rounds(SyncRound::begin((num, hash), sync_target))); - } - SyncState::AncestorSearch(AncestorSearch::Genesis) => { - // Same here. - let g_hash = chain_info.genesis_hash; - self.set_state(&mut state, SyncState::Rounds(SyncRound::begin((0, g_hash), sync_target))); - } - SyncState::Idle => self.begin_search(&mut state), - other => self.set_state(&mut state, other), // restore displaced state. - } - } + debug!(target: "sync", "Beginning search after aborted sync round"); + self.begin_search(&mut state); + } + SyncState::AncestorSearch(AncestorSearch::FoundCommon(num, hash)) => { + self.set_state( + &mut state, + SyncState::Rounds(SyncRound::begin((num, hash), sync_target)), + ); + } + SyncState::AncestorSearch(AncestorSearch::Genesis) => { + // Same here. + let g_hash = chain_info.genesis_hash; + self.set_state( + &mut state, + SyncState::Rounds(SyncRound::begin((0, g_hash), sync_target)), + ); + } + SyncState::Idle => self.begin_search(&mut state), + other => self.set_state(&mut state, other), // restore displaced state. + } + } - // handle requests timeouts - { - let mut pending_reqs = self.pending_reqs.lock(); - let mut unfulfilled = Vec::new(); - for (req_id, info) in pending_reqs.iter() { - if info.started.elapsed() >= info.timeout { - debug!(target: "sync", "{} timed out", req_id); - unfulfilled.push(req_id.clone()); - } - } + // handle requests timeouts + { + let mut pending_reqs = self.pending_reqs.lock(); + let mut unfulfilled = Vec::new(); + for (req_id, info) in pending_reqs.iter() { + if info.started.elapsed() >= info.timeout { + debug!(target: "sync", "{} timed out", req_id); + unfulfilled.push(req_id.clone()); + } + } - if !unfulfilled.is_empty() { - for unfulfilled in unfulfilled.iter() { - pending_reqs.remove(unfulfilled); - } - drop(pending_reqs); + if !unfulfilled.is_empty() { + for unfulfilled in unfulfilled.iter() { + pending_reqs.remove(unfulfilled); + } + drop(pending_reqs); - let next_state = match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() { - SyncState::Idle => SyncState::Idle, - SyncState::AncestorSearch(search) => - SyncState::AncestorSearch(search.requests_abandoned(&unfulfilled)), - SyncState::Rounds(round) => SyncState::Rounds(round.requests_abandoned(&unfulfilled)), - }; - self.set_state(&mut state, next_state); - } - } + let next_state = + match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() { + SyncState::Idle => SyncState::Idle, + SyncState::AncestorSearch(search) => { + SyncState::AncestorSearch(search.requests_abandoned(&unfulfilled)) + } + SyncState::Rounds(round) => { + SyncState::Rounds(round.requests_abandoned(&unfulfilled)) + } + }; + self.set_state(&mut state, next_state); + } + } - // allow dispatching of requests. - { - let peers = self.peers.read(); - let mut peer_ids: Vec<_> = peers.iter().filter_map(|(id, p)| { - if p.lock().status.head_td > chain_info.pending_total_difficulty { - Some(*id) - } else { - None - } - }).collect(); + // allow dispatching of requests. + { + let peers = self.peers.read(); + let mut peer_ids: Vec<_> = peers + .iter() + .filter_map(|(id, p)| { + if p.lock().status.head_td > chain_info.pending_total_difficulty { + Some(*id) + } else { + None + } + }) + .collect(); - let mut rng = self.rng.lock(); - let mut requested_from = HashSet::new(); + let mut rng = self.rng.lock(); + let mut requested_from = HashSet::new(); - // naive request dispatcher: just give to any peer which says it will - // give us responses. but only one request per peer per state transition. - let dispatcher = move |req: HeadersRequest| { - rng.shuffle(&mut peer_ids); + // naive request dispatcher: just give to any peer which says it will + // give us responses. but only one request per peer per state transition. + let dispatcher = move |req: HeadersRequest| { + rng.shuffle(&mut peer_ids); - let request = { - let mut builder = request::Builder::default(); - builder.push(request::Request::Headers(request::IncompleteHeadersRequest { + let request = { + let mut builder = request::Builder::default(); + builder.push(request::Request::Headers(request::IncompleteHeadersRequest { start: req.start.into(), skip: req.skip, max: req.max, reverse: req.reverse, })).expect("request provided fully complete with no unresolved back-references; qed"); - builder.build() - }; - for peer in &peer_ids { - if requested_from.contains(peer) { continue } - match ctx.request_from(*peer, request.clone()) { - Ok(id) => { - assert!(req.max <= u32::max_value() as u64, - "requesting more than 2^32 headers at a time would overflow"); - let timeout = REQ_TIMEOUT_BASE + REQ_TIMEOUT_PER_HEADER * req.max as u32; - self.pending_reqs.lock().insert(id.clone(), PendingReq { - started: Instant::now(), - timeout, - }); - requested_from.insert(peer.clone()); + builder.build() + }; + for peer in &peer_ids { + if requested_from.contains(peer) { + continue; + } + match ctx.request_from(*peer, request.clone()) { + Ok(id) => { + assert!( + req.max <= u32::max_value() as u64, + "requesting more than 2^32 headers at a time would overflow" + ); + let timeout = + REQ_TIMEOUT_BASE + REQ_TIMEOUT_PER_HEADER * req.max as u32; + self.pending_reqs.lock().insert( + id.clone(), + PendingReq { + started: Instant::now(), + timeout, + }, + ); + requested_from.insert(peer.clone()); - return Some(id) - } - Err(NetError::NoCredits) => {} - Err(e) => - trace!(target: "sync", "Error requesting headers from viable peer: {}", e), - } - } + return Some(id); + } + Err(NetError::NoCredits) => {} + Err(e) => { + trace!(target: "sync", "Error requesting headers from viable peer: {}", e) + } + } + } - None - }; + None + }; - let next_state = match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() { - SyncState::Rounds(round) => - SyncState::Rounds(round.dispatch_requests(dispatcher)), - SyncState::AncestorSearch(search) => - SyncState::AncestorSearch(search.dispatch_request(dispatcher)), - other => other, - }; - self.set_state(&mut state, next_state); - } - } + let next_state = match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() + { + SyncState::Rounds(round) => SyncState::Rounds(round.dispatch_requests(dispatcher)), + SyncState::AncestorSearch(search) => { + SyncState::AncestorSearch(search.dispatch_request(dispatcher)) + } + other => other, + }; + self.set_state(&mut state, next_state); + } + } } // public API impl LightSync { - /// Create a new instance of `LightSync`. - /// - /// This won't do anything until registered as a handler - /// so it can act on events. - pub fn new(client: Arc) -> Result { - Ok(LightSync { - start_block_number: client.as_light_client().chain_info().best_block_number, - best_seen: Mutex::new(None), - peers: RwLock::new(HashMap::new()), - pending_reqs: Mutex::new(HashMap::new()), - client: client, - rng: Mutex::new(OsRng::new()?), - state: Mutex::new(SyncStateWrapper::idle()), - is_idle: Mutex::new(true), - }) - } + /// Create a new instance of `LightSync`. + /// + /// This won't do anything until registered as a handler + /// so it can act on events. + pub fn new(client: Arc) -> Result { + Ok(LightSync { + start_block_number: client.as_light_client().chain_info().best_block_number, + best_seen: Mutex::new(None), + peers: RwLock::new(HashMap::new()), + pending_reqs: Mutex::new(HashMap::new()), + client: client, + rng: Mutex::new(OsRng::new()?), + state: Mutex::new(SyncStateWrapper::idle()), + is_idle: Mutex::new(true), + }) + } } /// Trait for erasing the type of a light sync object and exposing read-only methods. pub trait SyncInfo { - /// Get the highest block advertised on the network. - fn highest_block(&self) -> Option; + /// Get the highest block advertised on the network. + fn highest_block(&self) -> Option; - /// Get the block number at the time of sync start. - fn start_block(&self) -> u64; + /// Get the block number at the time of sync start. + fn start_block(&self) -> u64; - /// Whether major sync is underway. - fn is_major_importing(&self) -> bool; + /// Whether major sync is underway. + fn is_major_importing(&self) -> bool; } impl SyncInfo for LightSync { - fn highest_block(&self) -> Option { - self.best_seen.lock().as_ref().map(|x| x.head_num) - } + fn highest_block(&self) -> Option { + self.best_seen.lock().as_ref().map(|x| x.head_num) + } - fn start_block(&self) -> u64 { - self.start_block_number - } + fn start_block(&self) -> u64 { + self.start_block_number + } - fn is_major_importing(&self) -> bool { - const EMPTY_QUEUE: usize = 3; + fn is_major_importing(&self) -> bool { + const EMPTY_QUEUE: usize = 3; - let queue_info = self.client.as_light_client().queue_info(); - let is_verifying = queue_info.unverified_queue_size + queue_info.verified_queue_size > EMPTY_QUEUE; - let is_syncing = !*self.is_idle.lock(); - - is_verifying || is_syncing - } + let queue_info = self.client.as_light_client().queue_info(); + let is_verifying = + queue_info.unverified_queue_size + queue_info.verified_queue_size > EMPTY_QUEUE; + let is_syncing = !*self.is_idle.lock(); + is_verifying || is_syncing + } } diff --git a/ethcore/sync/src/light_sync/response.rs b/ethcore/sync/src/light_sync/response.rs index 96d2a8822..ae735f3f3 100644 --- a/ethcore/sync/src/light_sync/response.rs +++ b/ethcore/sync/src/light_sync/response.rs @@ -16,79 +16,85 @@ //! Helpers for decoding and verifying responses for headers. -use types::{encoded, header::Header}; use ethereum_types::H256; -use light::request::{HashOrNumber, CompleteHeadersRequest as HeadersRequest}; +use light::request::{CompleteHeadersRequest as HeadersRequest, HashOrNumber}; use rlp::DecoderError; use std::fmt; +use types::{encoded, header::Header}; /// Errors found when decoding headers and verifying with basic constraints. #[derive(Debug, PartialEq)] pub enum BasicError { - /// Wrong skip value: expected, found (if any). - WrongSkip(u64, Option), - /// Wrong start number. - WrongStartNumber(u64, u64), - /// Wrong start hash. - WrongStartHash(H256, H256), - /// Too many headers. - TooManyHeaders(usize, usize), - /// Decoder error. - Decoder(DecoderError), + /// Wrong skip value: expected, found (if any). + WrongSkip(u64, Option), + /// Wrong start number. + WrongStartNumber(u64, u64), + /// Wrong start hash. + WrongStartHash(H256, H256), + /// Too many headers. + TooManyHeaders(usize, usize), + /// Decoder error. + Decoder(DecoderError), } impl From for BasicError { - fn from(err: DecoderError) -> Self { - BasicError::Decoder(err) - } + fn from(err: DecoderError) -> Self { + BasicError::Decoder(err) + } } impl fmt::Display for BasicError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Header response verification error: ")?; + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Header response verification error: ")?; - match *self { - BasicError::WrongSkip(ref exp, ref got) - => write!(f, "wrong skip (expected {}, got {:?})", exp, got), - BasicError::WrongStartNumber(ref exp, ref got) - => write!(f, "wrong start number (expected {}, got {})", exp, got), - BasicError::WrongStartHash(ref exp, ref got) - => write!(f, "wrong start hash (expected {}, got {})", exp, got), - BasicError::TooManyHeaders(ref max, ref got) - => write!(f, "too many headers (max {}, got {})", max, got), - BasicError::Decoder(ref err) - => write!(f, "{}", err), - } - } + match *self { + BasicError::WrongSkip(ref exp, ref got) => { + write!(f, "wrong skip (expected {}, got {:?})", exp, got) + } + BasicError::WrongStartNumber(ref exp, ref got) => { + write!(f, "wrong start number (expected {}, got {})", exp, got) + } + BasicError::WrongStartHash(ref exp, ref got) => { + write!(f, "wrong start hash (expected {}, got {})", exp, got) + } + BasicError::TooManyHeaders(ref max, ref got) => { + write!(f, "too many headers (max {}, got {})", max, got) + } + BasicError::Decoder(ref err) => write!(f, "{}", err), + } + } } /// Request verification constraint. pub trait Constraint { - type Error; + type Error; - /// Verify headers against this. - fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), Self::Error>; + /// Verify headers against this. + fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), Self::Error>; } /// Do basic verification of provided headers against a request. -pub fn verify(headers: &[encoded::Header], request: &HeadersRequest) -> Result, BasicError> { - let headers: Result, _> = headers.iter().map(|h| h.decode() ).collect(); - match headers { - Ok(headers) => { - let reverse = request.reverse; +pub fn verify( + headers: &[encoded::Header], + request: &HeadersRequest, +) -> Result, BasicError> { + let headers: Result, _> = headers.iter().map(|h| h.decode()).collect(); + match headers { + Ok(headers) => { + let reverse = request.reverse; - Max(request.max as usize).verify(&headers, reverse)?; - match request.start { - HashOrNumber::Number(ref num) => StartsAtNumber(*num).verify(&headers, reverse)?, - HashOrNumber::Hash(ref hash) => StartsAtHash(*hash).verify(&headers, reverse)?, - } + Max(request.max as usize).verify(&headers, reverse)?; + match request.start { + HashOrNumber::Number(ref num) => StartsAtNumber(*num).verify(&headers, reverse)?, + HashOrNumber::Hash(ref hash) => StartsAtHash(*hash).verify(&headers, reverse)?, + } - SkipsBetween(request.skip).verify(&headers, reverse)?; + SkipsBetween(request.skip).verify(&headers, reverse)?; - Ok(headers) - }, - Err(e) => Err(e.into()) - } + Ok(headers) + } + Err(e) => Err(e.into()), + } } struct StartsAtNumber(u64); @@ -97,162 +103,189 @@ struct SkipsBetween(u64); struct Max(usize); impl Constraint for StartsAtNumber { - type Error = BasicError; + type Error = BasicError; - fn verify(&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> { - headers.first().map_or(Ok(()), |h| { - if h.number() == self.0 { - Ok(()) - } else { - Err(BasicError::WrongStartNumber(self.0, h.number())) - } - }) - } + fn verify(&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> { + headers.first().map_or(Ok(()), |h| { + if h.number() == self.0 { + Ok(()) + } else { + Err(BasicError::WrongStartNumber(self.0, h.number())) + } + }) + } } impl Constraint for StartsAtHash { - type Error = BasicError; + type Error = BasicError; - fn verify(&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> { - headers.first().map_or(Ok(()), |h| { - if h.hash() == self.0 { - Ok(()) - } else { - Err(BasicError::WrongStartHash(self.0, h.hash())) - } - }) - } + fn verify(&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> { + headers.first().map_or(Ok(()), |h| { + if h.hash() == self.0 { + Ok(()) + } else { + Err(BasicError::WrongStartHash(self.0, h.hash())) + } + }) + } } impl Constraint for SkipsBetween { - type Error = BasicError; + type Error = BasicError; - fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), BasicError> { - for pair in headers.windows(2) { - let (low, high) = if reverse { (&pair[1], &pair[0]) } else { (&pair[0], &pair[1]) }; - if low.number() >= high.number() { return Err(BasicError::WrongSkip(self.0, None)) } + fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), BasicError> { + for pair in headers.windows(2) { + let (low, high) = if reverse { + (&pair[1], &pair[0]) + } else { + (&pair[0], &pair[1]) + }; + if low.number() >= high.number() { + return Err(BasicError::WrongSkip(self.0, None)); + } - let skip = (high.number() - low.number()) - 1; - if skip != self.0 { return Err(BasicError::WrongSkip(self.0, Some(skip))) } - } + let skip = (high.number() - low.number()) - 1; + if skip != self.0 { + return Err(BasicError::WrongSkip(self.0, Some(skip))); + } + } - Ok(()) - } + Ok(()) + } } impl Constraint for Max { - type Error = BasicError; + type Error = BasicError; - fn verify(&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> { - match headers.len() > self.0 { - true => Err(BasicError::TooManyHeaders(self.0, headers.len())), - false => Ok(()) - } - } + fn verify(&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> { + match headers.len() > self.0 { + true => Err(BasicError::TooManyHeaders(self.0, headers.len())), + false => Ok(()), + } + } } #[cfg(test)] mod tests { - use types::encoded; - use types::header::Header; - use light::request::CompleteHeadersRequest as HeadersRequest; + use light::request::CompleteHeadersRequest as HeadersRequest; + use types::{encoded, header::Header}; - use super::*; + use super::*; - #[test] - fn sequential_forward() { - let request = HeadersRequest { - start: 10.into(), - max: 30, - skip: 0, - reverse: false, - }; + #[test] + fn sequential_forward() { + let request = HeadersRequest { + start: 10.into(), + max: 30, + skip: 0, + reverse: false, + }; - let mut parent_hash = None; - let headers: Vec<_> = (0..25).map(|x| x + 10).map(|x| { - let mut header = Header::default(); - header.set_number(x); + let mut parent_hash = None; + let headers: Vec<_> = (0..25) + .map(|x| x + 10) + .map(|x| { + let mut header = Header::default(); + header.set_number(x); - if let Some(parent_hash) = parent_hash { - header.set_parent_hash(parent_hash); - } + if let Some(parent_hash) = parent_hash { + header.set_parent_hash(parent_hash); + } - parent_hash = Some(header.hash()); + parent_hash = Some(header.hash()); - encoded::Header::new(::rlp::encode(&header)) - }).collect(); + encoded::Header::new(::rlp::encode(&header)) + }) + .collect(); - assert!(verify(&headers, &request).is_ok()); - } + assert!(verify(&headers, &request).is_ok()); + } - #[test] - fn sequential_backward() { - let request = HeadersRequest { - start: 34.into(), - max: 30, - skip: 0, - reverse: true, - }; + #[test] + fn sequential_backward() { + let request = HeadersRequest { + start: 34.into(), + max: 30, + skip: 0, + reverse: true, + }; - let mut parent_hash = None; - let headers: Vec<_> = (0..25).map(|x| x + 10).rev().map(|x| { - let mut header = Header::default(); - header.set_number(x); + let mut parent_hash = None; + let headers: Vec<_> = (0..25) + .map(|x| x + 10) + .rev() + .map(|x| { + let mut header = Header::default(); + header.set_number(x); - if let Some(parent_hash) = parent_hash { - header.set_parent_hash(parent_hash); - } + if let Some(parent_hash) = parent_hash { + header.set_parent_hash(parent_hash); + } - parent_hash = Some(header.hash()); + parent_hash = Some(header.hash()); - encoded::Header::new(::rlp::encode(&header)) - }).collect(); + encoded::Header::new(::rlp::encode(&header)) + }) + .collect(); - assert!(verify(&headers, &request).is_ok()); - } + assert!(verify(&headers, &request).is_ok()); + } - #[test] - fn too_many() { - let request = HeadersRequest { - start: 10.into(), - max: 20, - skip: 0, - reverse: false, - }; + #[test] + fn too_many() { + let request = HeadersRequest { + start: 10.into(), + max: 20, + skip: 0, + reverse: false, + }; - let mut parent_hash = None; - let headers: Vec<_> = (0..25).map(|x| x + 10).map(|x| { - let mut header = Header::default(); - header.set_number(x); + let mut parent_hash = None; + let headers: Vec<_> = (0..25) + .map(|x| x + 10) + .map(|x| { + let mut header = Header::default(); + header.set_number(x); - if let Some(parent_hash) = parent_hash { - header.set_parent_hash(parent_hash); - } + if let Some(parent_hash) = parent_hash { + header.set_parent_hash(parent_hash); + } - parent_hash = Some(header.hash()); + parent_hash = Some(header.hash()); - encoded::Header::new(::rlp::encode(&header)) - }).collect(); + encoded::Header::new(::rlp::encode(&header)) + }) + .collect(); - assert_eq!(verify(&headers, &request), Err(BasicError::TooManyHeaders(20, 25))); - } + assert_eq!( + verify(&headers, &request), + Err(BasicError::TooManyHeaders(20, 25)) + ); + } - #[test] - fn wrong_skip() { - let request = HeadersRequest { - start: 10.into(), - max: 30, - skip: 5, - reverse: false, - }; + #[test] + fn wrong_skip() { + let request = HeadersRequest { + start: 10.into(), + max: 30, + skip: 5, + reverse: false, + }; - let headers: Vec<_> = (0..25).map(|x| x * 3).map(|x| x + 10).map(|x| { - let mut header = Header::default(); - header.set_number(x); + let headers: Vec<_> = (0..25) + .map(|x| x * 3) + .map(|x| x + 10) + .map(|x| { + let mut header = Header::default(); + header.set_number(x); - encoded::Header::new(::rlp::encode(&header)) - }).collect(); + encoded::Header::new(::rlp::encode(&header)) + }) + .collect(); - assert_eq!(verify(&headers, &request), Err(BasicError::WrongSkip(5, Some(2)))); - } + assert_eq!( + verify(&headers, &request), + Err(BasicError::WrongSkip(5, Some(2))) + ); + } } diff --git a/ethcore/sync/src/light_sync/sync_round.rs b/ethcore/sync/src/light_sync/sync_round.rs index 7c2a2bc01..a099aba7a 100644 --- a/ethcore/sync/src/light_sync/sync_round.rs +++ b/ethcore/sync/src/light_sync/sync_round.rs @@ -16,18 +16,18 @@ //! Header download state machine. -use std::cmp::Ordering; -use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque}; -use std::fmt; +use std::{ + cmp::Ordering, + collections::{BinaryHeap, HashMap, HashSet, VecDeque}, + fmt, +}; -use types::encoded; -use types::header::Header; +use types::{encoded, header::Header}; -use light::net::ReqId; -use light::request::CompleteHeadersRequest as HeadersRequest; +use light::{net::ReqId, request::CompleteHeadersRequest as HeadersRequest}; -use network::PeerId; use ethereum_types::H256; +use network::PeerId; use super::response; @@ -36,277 +36,295 @@ const SCAFFOLD_ATTEMPTS: usize = 3; /// Context for a headers response. pub trait ResponseContext { - /// Get the peer who sent this response. - fn responder(&self) -> PeerId; - /// Get the request ID this response corresponds to. - fn req_id(&self) -> &ReqId; - /// Get the (unverified) response data. - fn data(&self) -> &[encoded::Header]; - /// Punish the responder. - fn punish_responder(&self); + /// Get the peer who sent this response. + fn responder(&self) -> PeerId; + /// Get the request ID this response corresponds to. + fn req_id(&self) -> &ReqId; + /// Get the (unverified) response data. + fn data(&self) -> &[encoded::Header]; + /// Punish the responder. + fn punish_responder(&self); } /// Reasons for sync round abort. #[derive(Debug, Clone)] pub enum AbortReason { - /// Bad sparse header chain along with a list of peers who contributed to it. - BadScaffold(Vec), - /// No incoming data. - NoResponses, - /// Sync rounds completed. - TargetReached, + /// Bad sparse header chain along with a list of peers who contributed to it. + BadScaffold(Vec), + /// No incoming data. + NoResponses, + /// Sync rounds completed. + TargetReached, } // A request for headers with a known starting header hash. // and a known parent hash for the last block. #[derive(PartialEq, Eq)] struct SubchainRequest { - subchain_parent: (u64, H256), - headers_request: HeadersRequest, - subchain_end: (u64, H256), - downloaded: VecDeque
, + subchain_parent: (u64, H256), + headers_request: HeadersRequest, + subchain_end: (u64, H256), + downloaded: VecDeque
, } // ordered by subchain parent number so pending requests towards the // front of the round are dispatched first. impl PartialOrd for SubchainRequest { - fn partial_cmp(&self, other: &Self) -> Option { - self.subchain_parent.0 - .partial_cmp(&other.subchain_parent.0) - .map(Ordering::reverse) - } + fn partial_cmp(&self, other: &Self) -> Option { + self.subchain_parent + .0 + .partial_cmp(&other.subchain_parent.0) + .map(Ordering::reverse) + } } impl Ord for SubchainRequest { - fn cmp(&self, other: &Self) -> Ordering { - self.subchain_parent.0.cmp(&other.subchain_parent.0).reverse() - } + fn cmp(&self, other: &Self) -> Ordering { + self.subchain_parent + .0 + .cmp(&other.subchain_parent.0) + .reverse() + } } /// Manages downloading of interior blocks of a sparse header chain. pub struct Fetcher { - sparse: VecDeque
, // sparse header chain. - requests: BinaryHeap, - complete_requests: HashMap, - pending: HashMap, - scaffold_contributors: Vec, - ready: VecDeque
, - end: (u64, H256), - target: (u64, H256), + sparse: VecDeque
, // sparse header chain. + requests: BinaryHeap, + complete_requests: HashMap, + pending: HashMap, + scaffold_contributors: Vec, + ready: VecDeque
, + end: (u64, H256), + target: (u64, H256), } impl Fetcher { - // Produce a new fetcher given a sparse headerchain, in ascending order along - // with a list of peers who helped produce the chain. - // The headers must be valid RLP at this point and must have a consistent - // non-zero gap between them. Will abort the round if found wrong. - fn new(sparse_headers: Vec
, contributors: Vec, target: (u64, H256)) -> SyncRound { - let mut requests = BinaryHeap::with_capacity(sparse_headers.len() - 1); + // Produce a new fetcher given a sparse headerchain, in ascending order along + // with a list of peers who helped produce the chain. + // The headers must be valid RLP at this point and must have a consistent + // non-zero gap between them. Will abort the round if found wrong. + fn new( + sparse_headers: Vec
, + contributors: Vec, + target: (u64, H256), + ) -> SyncRound { + let mut requests = BinaryHeap::with_capacity(sparse_headers.len() - 1); - for pair in sparse_headers.windows(2) { - let low_rung = &pair[0]; - let high_rung = &pair[1]; + for pair in sparse_headers.windows(2) { + let low_rung = &pair[0]; + let high_rung = &pair[1]; - let diff = high_rung.number() - low_rung.number(); + let diff = high_rung.number() - low_rung.number(); - // should never happen as long as we verify the gaps - // gotten from SyncRound::Start - if diff < 2 { continue } + // should never happen as long as we verify the gaps + // gotten from SyncRound::Start + if diff < 2 { + continue; + } - let needed_headers = HeadersRequest { - start: high_rung.parent_hash().clone().into(), - max: diff - 1, - skip: 0, - reverse: true, - }; + let needed_headers = HeadersRequest { + start: high_rung.parent_hash().clone().into(), + max: diff - 1, + skip: 0, + reverse: true, + }; - requests.push(SubchainRequest { - headers_request: needed_headers, - subchain_end: (high_rung.number() - 1, *high_rung.parent_hash()), - downloaded: VecDeque::new(), - subchain_parent: (low_rung.number(), low_rung.hash()), - }); - } + requests.push(SubchainRequest { + headers_request: needed_headers, + subchain_end: (high_rung.number() - 1, *high_rung.parent_hash()), + downloaded: VecDeque::new(), + subchain_parent: (low_rung.number(), low_rung.hash()), + }); + } - let end = match sparse_headers.last().map(|h| (h.number(), h.hash())) { - Some(end) => end, - None => return SyncRound::abort(AbortReason::BadScaffold(contributors), VecDeque::new()), - }; + let end = match sparse_headers.last().map(|h| (h.number(), h.hash())) { + Some(end) => end, + None => { + return SyncRound::abort(AbortReason::BadScaffold(contributors), VecDeque::new()) + } + }; - SyncRound::Fetch(Fetcher { - sparse: sparse_headers.into(), - requests: requests, - complete_requests: HashMap::new(), - pending: HashMap::new(), - scaffold_contributors: contributors, - ready: VecDeque::new(), - end: end, - target: target, - }) - } + SyncRound::Fetch(Fetcher { + sparse: sparse_headers.into(), + requests: requests, + complete_requests: HashMap::new(), + pending: HashMap::new(), + scaffold_contributors: contributors, + ready: VecDeque::new(), + end: end, + target: target, + }) + } - // collect complete requests and their subchain from the sparse header chain - // into the ready set in order. - fn collect_ready(&mut self) { - loop { - let start_hash = match self.sparse.front() { - Some(first) => first.hash(), - None => break, - }; + // collect complete requests and their subchain from the sparse header chain + // into the ready set in order. + fn collect_ready(&mut self) { + loop { + let start_hash = match self.sparse.front() { + Some(first) => first.hash(), + None => break, + }; - match self.complete_requests.remove(&start_hash) { - None => break, - Some(complete_req) => { - self.ready.push_back(self.sparse.pop_front().expect("first known to exist; qed")); - self.ready.extend(complete_req.downloaded); - } - } - } + match self.complete_requests.remove(&start_hash) { + None => break, + Some(complete_req) => { + self.ready + .push_back(self.sparse.pop_front().expect("first known to exist; qed")); + self.ready.extend(complete_req.downloaded); + } + } + } - // frames are between two sparse headers and keyed by subchain parent, so the last - // remaining will be the last header. - if self.sparse.len() == 1 { - self.ready.push_back(self.sparse.pop_back().expect("sparse known to have one entry; qed")) - } + // frames are between two sparse headers and keyed by subchain parent, so the last + // remaining will be the last header. + if self.sparse.len() == 1 { + self.ready.push_back( + self.sparse + .pop_back() + .expect("sparse known to have one entry; qed"), + ) + } - trace!(target: "sync", "{} headers ready to drain", self.ready.len()); - } + trace!(target: "sync", "{} headers ready to drain", self.ready.len()); + } - fn process_response(mut self, ctx: &R) -> SyncRound { - let mut request = match self.pending.remove(ctx.req_id()) { - Some(request) => request, - None => return SyncRound::Fetch(self), - }; + fn process_response(mut self, ctx: &R) -> SyncRound { + let mut request = match self.pending.remove(ctx.req_id()) { + Some(request) => request, + None => return SyncRound::Fetch(self), + }; - trace!(target: "sync", "Received response for subchain ({} -> {})", + trace!(target: "sync", "Received response for subchain ({} -> {})", request.subchain_parent.0, request.subchain_end.0); - let headers = ctx.data(); + let headers = ctx.data(); - if headers.is_empty() { - trace!(target: "sync", "Punishing peer {} for empty response", ctx.responder()); - ctx.punish_responder(); + if headers.is_empty() { + trace!(target: "sync", "Punishing peer {} for empty response", ctx.responder()); + ctx.punish_responder(); - self.requests.push(request); - return SyncRound::Fetch(self); - } + self.requests.push(request); + return SyncRound::Fetch(self); + } - match response::verify(headers, &request.headers_request) { - Err(e) => { - trace!(target: "sync", "Punishing peer {} for invalid response ({})", ctx.responder(), e); - ctx.punish_responder(); + match response::verify(headers, &request.headers_request) { + Err(e) => { + trace!(target: "sync", "Punishing peer {} for invalid response ({})", ctx.responder(), e); + ctx.punish_responder(); - // TODO: track number of attempts per request, - // abort if failure rate too high. - self.requests.push(request); - SyncRound::Fetch(self) - } - Ok(headers) => { - let mut parent_hash = None; - for header in headers { - if let Some(hash) = parent_hash.as_ref() { - if *hash != header.hash() { - trace!(target: "sync", "Punishing peer {} for parent mismatch", ctx.responder()); - ctx.punish_responder(); - self.requests.push(request); - return SyncRound::Fetch(self); - } - } - // incrementally update the frame request as we go so we can - // return at any time in the loop. - parent_hash = Some(*header.parent_hash()); - request.headers_request.start = header.parent_hash().clone().into(); - request.headers_request.max -= 1; - request.downloaded.push_front(header); - } + // TODO: track number of attempts per request, + // abort if failure rate too high. + self.requests.push(request); + SyncRound::Fetch(self) + } + Ok(headers) => { + let mut parent_hash = None; + for header in headers { + if let Some(hash) = parent_hash.as_ref() { + if *hash != header.hash() { + trace!(target: "sync", "Punishing peer {} for parent mismatch", ctx.responder()); + ctx.punish_responder(); + self.requests.push(request); + return SyncRound::Fetch(self); + } + } + // incrementally update the frame request as we go so we can + // return at any time in the loop. + parent_hash = Some(*header.parent_hash()); + request.headers_request.start = header.parent_hash().clone().into(); + request.headers_request.max -= 1; + request.downloaded.push_front(header); + } - let subchain_parent = request.subchain_parent.1; + let subchain_parent = request.subchain_parent.1; - // check if the subchain portion has been completely filled. - if request.headers_request.max == 0 { - if parent_hash.map_or(true, |hash| hash != subchain_parent) { - let abort = AbortReason::BadScaffold(self.scaffold_contributors); - return SyncRound::abort(abort, self.ready); - } + // check if the subchain portion has been completely filled. + if request.headers_request.max == 0 { + if parent_hash.map_or(true, |hash| hash != subchain_parent) { + let abort = AbortReason::BadScaffold(self.scaffold_contributors); + return SyncRound::abort(abort, self.ready); + } - self.complete_requests.insert(subchain_parent, request); - self.collect_ready(); - } + self.complete_requests.insert(subchain_parent, request); + self.collect_ready(); + } - // state transition not triggered until drain is finished. - (SyncRound::Fetch(self)) - } - } - } + // state transition not triggered until drain is finished. + (SyncRound::Fetch(self)) + } + } + } - fn requests_abandoned(mut self, abandoned: &[ReqId]) -> SyncRound { - trace!(target: "sync", "Abandonned requests {:?}", abandoned); + fn requests_abandoned(mut self, abandoned: &[ReqId]) -> SyncRound { + trace!(target: "sync", "Abandonned requests {:?}", abandoned); - for abandoned in abandoned { - match self.pending.remove(abandoned) { - None => {}, - Some(req) => self.requests.push(req), - } - } + for abandoned in abandoned { + match self.pending.remove(abandoned) { + None => {} + Some(req) => self.requests.push(req), + } + } - // TODO: track failure rate and potentially abort. - SyncRound::Fetch(self) - } + // TODO: track failure rate and potentially abort. + SyncRound::Fetch(self) + } - fn dispatch_requests(mut self, mut dispatcher: D) -> SyncRound - where D: FnMut(HeadersRequest) -> Option - { - while let Some(pending_req) = self.requests.pop() { - match dispatcher(pending_req.headers_request.clone()) { - Some(req_id) => { - trace!(target: "sync", "Assigned request {} for subchain ({} -> {})", + fn dispatch_requests(mut self, mut dispatcher: D) -> SyncRound + where + D: FnMut(HeadersRequest) -> Option, + { + while let Some(pending_req) = self.requests.pop() { + match dispatcher(pending_req.headers_request.clone()) { + Some(req_id) => { + trace!(target: "sync", "Assigned request {} for subchain ({} -> {})", req_id, pending_req.subchain_parent.0, pending_req.subchain_end.0); - self.pending.insert(req_id, pending_req); - } - None => { - trace!(target: "sync", "Failed to assign request for subchain ({} -> {})", + self.pending.insert(req_id, pending_req); + } + None => { + trace!(target: "sync", "Failed to assign request for subchain ({} -> {})", pending_req.subchain_parent.0, pending_req.subchain_end.0); - self.requests.push(pending_req); - break; - } - } - } + self.requests.push(pending_req); + break; + } + } + } - SyncRound::Fetch(self) - } + SyncRound::Fetch(self) + } - fn drain(mut self, headers: &mut Vec
, max: Option) -> SyncRound { - let max = ::std::cmp::min(max.unwrap_or(usize::max_value()), self.ready.len()); - headers.extend(self.ready.drain(0..max)); + fn drain(mut self, headers: &mut Vec
, max: Option) -> SyncRound { + let max = ::std::cmp::min(max.unwrap_or(usize::max_value()), self.ready.len()); + headers.extend(self.ready.drain(0..max)); - if self.sparse.is_empty() && self.ready.is_empty() { - trace!(target: "sync", "sync round complete. Starting anew from {:?}", self.end); - SyncRound::begin(self.end, self.target) - } else { - SyncRound::Fetch(self) - } - } + if self.sparse.is_empty() && self.ready.is_empty() { + trace!(target: "sync", "sync round complete. Starting anew from {:?}", self.end); + SyncRound::begin(self.end, self.target) + } else { + SyncRound::Fetch(self) + } + } } // Compute scaffold parameters from non-zero distance between start and target block: (skip, pivots). fn scaffold_params(diff: u64) -> (u64, u64) { - // default parameters. - // amount of blocks between each scaffold pivot. - const ROUND_SKIP: u64 = 255; - // amount of scaffold pivots: these are the Xs in "X___X___X" - const ROUND_PIVOTS: u64 = 256; + // default parameters. + // amount of blocks between each scaffold pivot. + const ROUND_SKIP: u64 = 255; + // amount of scaffold pivots: these are the Xs in "X___X___X" + const ROUND_PIVOTS: u64 = 256; - let rem = diff % (ROUND_SKIP + 1); - if diff <= ROUND_SKIP { - // just request headers from the start to the target. - (0, rem) - } else { - // the number of pivots necessary to exactly hit or overshoot the target. - let pivots_to_target = (diff / (ROUND_SKIP + 1)) + if rem == 0 { 0 } else { 1 }; - let num_pivots = ::std::cmp::min(pivots_to_target, ROUND_PIVOTS); - (ROUND_SKIP, num_pivots) - } + let rem = diff % (ROUND_SKIP + 1); + if diff <= ROUND_SKIP { + // just request headers from the start to the target. + (0, rem) + } else { + // the number of pivots necessary to exactly hit or overshoot the target. + let pivots_to_target = (diff / (ROUND_SKIP + 1)) + if rem == 0 { 0 } else { 1 }; + let num_pivots = ::std::cmp::min(pivots_to_target, ROUND_PIVOTS); + (ROUND_SKIP, num_pivots) + } } /// Round started: get stepped header chain. @@ -316,242 +334,252 @@ fn scaffold_params(diff: u64) -> (u64, u64) { /// only those blocks. If the sync target is within (ROUND_SKIP + 1) * (ROUND_PIVOTS - 1) of /// the start, we reduce the number of pivots so the target is outside it. pub struct RoundStart { - start_block: (u64, H256), - target: (u64, H256), - pending_req: Option<(ReqId, HeadersRequest)>, - sparse_headers: Vec
, - contributors: HashSet, - attempt: usize, - skip: u64, - pivots: u64, + start_block: (u64, H256), + target: (u64, H256), + pending_req: Option<(ReqId, HeadersRequest)>, + sparse_headers: Vec
, + contributors: HashSet, + attempt: usize, + skip: u64, + pivots: u64, } impl RoundStart { - fn new(start: (u64, H256), target: (u64, H256)) -> Self { - let (skip, pivots) = scaffold_params(target.0 - start.0); + fn new(start: (u64, H256), target: (u64, H256)) -> Self { + let (skip, pivots) = scaffold_params(target.0 - start.0); - trace!(target: "sync", "Beginning sync round: {} pivots and {} skip from block {}", + trace!(target: "sync", "Beginning sync round: {} pivots and {} skip from block {}", pivots, skip, start.0); - RoundStart { - start_block: start, - target: target, - pending_req: None, - sparse_headers: Vec::new(), - contributors: HashSet::new(), - attempt: 0, - skip: skip, - pivots: pivots, - } - } + RoundStart { + start_block: start, + target: target, + pending_req: None, + sparse_headers: Vec::new(), + contributors: HashSet::new(), + attempt: 0, + skip: skip, + pivots: pivots, + } + } - // called on failed attempt. may trigger a transition after a number of attempts. - // a failed attempt is defined as any time a peer returns invalid or incomplete response - fn failed_attempt(mut self) -> SyncRound { - self.attempt += 1; + // called on failed attempt. may trigger a transition after a number of attempts. + // a failed attempt is defined as any time a peer returns invalid or incomplete response + fn failed_attempt(mut self) -> SyncRound { + self.attempt += 1; - if self.attempt >= SCAFFOLD_ATTEMPTS { - return if self.sparse_headers.len() > 1 { - Fetcher::new(self.sparse_headers, self.contributors.into_iter().collect(), self.target) - } else { - let fetched_headers = if self.skip == 0 { - self.sparse_headers.into() - } else { - VecDeque::new() - }; + if self.attempt >= SCAFFOLD_ATTEMPTS { + return if self.sparse_headers.len() > 1 { + Fetcher::new( + self.sparse_headers, + self.contributors.into_iter().collect(), + self.target, + ) + } else { + let fetched_headers = if self.skip == 0 { + self.sparse_headers.into() + } else { + VecDeque::new() + }; - SyncRound::abort(AbortReason::NoResponses, fetched_headers) - } - } else { - SyncRound::Start(self) - } - } + SyncRound::abort(AbortReason::NoResponses, fetched_headers) + }; + } else { + SyncRound::Start(self) + } + } - fn process_response(mut self, ctx: &R) -> SyncRound { - let req = match self.pending_req.take() { - Some((id, ref req)) if ctx.req_id() == &id => { req.clone() } - other => { - self.pending_req = other; - return SyncRound::Start(self); - } - }; + fn process_response(mut self, ctx: &R) -> SyncRound { + let req = match self.pending_req.take() { + Some((id, ref req)) if ctx.req_id() == &id => req.clone(), + other => { + self.pending_req = other; + return SyncRound::Start(self); + } + }; - match response::verify(ctx.data(), &req) { - Ok(headers) => { - if self.sparse_headers.is_empty() - && headers.get(0).map_or(false, |x| x.parent_hash() != &self.start_block.1) { - trace!(target: "sync", "Wrong parent for first header in round"); - ctx.punish_responder(); // or should we reset? - } + match response::verify(ctx.data(), &req) { + Ok(headers) => { + if self.sparse_headers.is_empty() + && headers + .get(0) + .map_or(false, |x| x.parent_hash() != &self.start_block.1) + { + trace!(target: "sync", "Wrong parent for first header in round"); + ctx.punish_responder(); // or should we reset? + } - self.contributors.insert(ctx.responder()); - self.sparse_headers.extend(headers); + self.contributors.insert(ctx.responder()); + self.sparse_headers.extend(headers); - if self.sparse_headers.len() as u64 == self.pivots { - return if self.skip == 0 { - SyncRound::abort(AbortReason::TargetReached, self.sparse_headers.into()) - } else { - trace!(target: "sync", "Beginning fetch of blocks between {} sparse headers", + if self.sparse_headers.len() as u64 == self.pivots { + return if self.skip == 0 { + SyncRound::abort(AbortReason::TargetReached, self.sparse_headers.into()) + } else { + trace!(target: "sync", "Beginning fetch of blocks between {} sparse headers", self.sparse_headers.len()); - Fetcher::new( - self.sparse_headers, - self.contributors.into_iter().collect(), - self.target - ) - } - } - } - Err(e) => { - trace!(target: "sync", "Punishing peer {} for malformed response ({})", ctx.responder(), e); - ctx.punish_responder(); - } - }; + Fetcher::new( + self.sparse_headers, + self.contributors.into_iter().collect(), + self.target, + ) + }; + } + } + Err(e) => { + trace!(target: "sync", "Punishing peer {} for malformed response ({})", ctx.responder(), e); + ctx.punish_responder(); + } + }; - self.failed_attempt() - } + self.failed_attempt() + } - fn requests_abandoned(mut self, abandoned: &[ReqId]) -> SyncRound { - match self.pending_req.take() { - Some((id, req)) => { - if abandoned.iter().any(|r| r == &id) { - self.pending_req = None; - self.failed_attempt() - } else { - self.pending_req = Some((id, req)); - SyncRound::Start(self) - } - } - None => SyncRound::Start(self), - } - } + fn requests_abandoned(mut self, abandoned: &[ReqId]) -> SyncRound { + match self.pending_req.take() { + Some((id, req)) => { + if abandoned.iter().any(|r| r == &id) { + self.pending_req = None; + self.failed_attempt() + } else { + self.pending_req = Some((id, req)); + SyncRound::Start(self) + } + } + None => SyncRound::Start(self), + } + } - fn dispatch_requests(mut self, mut dispatcher: D) -> SyncRound - where D: FnMut(HeadersRequest) -> Option - { - if self.pending_req.is_none() { - // beginning offset + first block expected after last header we have. - let start = (self.start_block.0 + 1) - + self.sparse_headers.len() as u64 * (self.skip + 1); + fn dispatch_requests(mut self, mut dispatcher: D) -> SyncRound + where + D: FnMut(HeadersRequest) -> Option, + { + if self.pending_req.is_none() { + // beginning offset + first block expected after last header we have. + let start = + (self.start_block.0 + 1) + self.sparse_headers.len() as u64 * (self.skip + 1); - let max = self.pivots - self.sparse_headers.len() as u64; + let max = self.pivots - self.sparse_headers.len() as u64; - let headers_request = HeadersRequest { - start: start.into(), - max: max, - skip: self.skip, - reverse: false, - }; + let headers_request = HeadersRequest { + start: start.into(), + max: max, + skip: self.skip, + reverse: false, + }; - if let Some(req_id) = dispatcher(headers_request.clone()) { - trace!(target: "sync", "Requesting scaffold: {} headers forward from {}, skip={}", + if let Some(req_id) = dispatcher(headers_request.clone()) { + trace!(target: "sync", "Requesting scaffold: {} headers forward from {}, skip={}", max, start, self.skip); - self.pending_req = Some((req_id, headers_request)); - } - } + self.pending_req = Some((req_id, headers_request)); + } + } - SyncRound::Start(self) - } + SyncRound::Start(self) + } } /// Sync round state machine. pub enum SyncRound { - /// Beginning a sync round. - Start(RoundStart), - /// Fetching intermediate blocks during a sync round. - Fetch(Fetcher), - /// Aborted + Sequential headers - Abort(AbortReason, VecDeque
), + /// Beginning a sync round. + Start(RoundStart), + /// Fetching intermediate blocks during a sync round. + Fetch(Fetcher), + /// Aborted + Sequential headers + Abort(AbortReason, VecDeque
), } impl SyncRound { - fn abort(reason: AbortReason, remaining: VecDeque
) -> Self { - trace!(target: "sync", "Aborting sync round: {:?}. To drain: {}", reason, remaining.len()); + fn abort(reason: AbortReason, remaining: VecDeque
) -> Self { + trace!(target: "sync", "Aborting sync round: {:?}. To drain: {}", reason, remaining.len()); - SyncRound::Abort(reason, remaining) - } + SyncRound::Abort(reason, remaining) + } - /// Begin sync rounds from a starting block, but not to go past a given target - pub fn begin(start: (u64, H256), target: (u64, H256)) -> Self { - if target.0 <= start.0 { - SyncRound::abort(AbortReason::TargetReached, VecDeque::new()) - } else { - SyncRound::Start(RoundStart::new(start, target)) - } - } + /// Begin sync rounds from a starting block, but not to go past a given target + pub fn begin(start: (u64, H256), target: (u64, H256)) -> Self { + if target.0 <= start.0 { + SyncRound::abort(AbortReason::TargetReached, VecDeque::new()) + } else { + SyncRound::Start(RoundStart::new(start, target)) + } + } - /// Process an answer to a request. Unknown requests will be ignored. - pub fn process_response(self, ctx: &R) -> Self { - match self { - SyncRound::Start(round_start) => round_start.process_response(ctx), - SyncRound::Fetch(fetcher) => fetcher.process_response(ctx), - other => other, - } - } + /// Process an answer to a request. Unknown requests will be ignored. + pub fn process_response(self, ctx: &R) -> Self { + match self { + SyncRound::Start(round_start) => round_start.process_response(ctx), + SyncRound::Fetch(fetcher) => fetcher.process_response(ctx), + other => other, + } + } - /// Return unfulfilled requests from disconnected peer. Unknown requests will be ignored. - pub fn requests_abandoned(self, abandoned: &[ReqId]) -> Self { - match self { - SyncRound::Start(round_start) => round_start.requests_abandoned(abandoned), - SyncRound::Fetch(fetcher) => fetcher.requests_abandoned(abandoned), - other => other, - } - } + /// Return unfulfilled requests from disconnected peer. Unknown requests will be ignored. + pub fn requests_abandoned(self, abandoned: &[ReqId]) -> Self { + match self { + SyncRound::Start(round_start) => round_start.requests_abandoned(abandoned), + SyncRound::Fetch(fetcher) => fetcher.requests_abandoned(abandoned), + other => other, + } + } - /// Dispatch pending requests. The dispatcher provided will attempt to - /// find a suitable peer to serve the request. - // TODO: have dispatcher take capabilities argument? and return an error as - // to why no suitable peer can be found? (no buffer, no chain heads that high, etc) - pub fn dispatch_requests(self, dispatcher: D) -> Self - where D: FnMut(HeadersRequest) -> Option - { - match self { - SyncRound::Start(round_start) => round_start.dispatch_requests(dispatcher), - SyncRound::Fetch(fetcher) => fetcher.dispatch_requests(dispatcher), - other => other, - } - } + /// Dispatch pending requests. The dispatcher provided will attempt to + /// find a suitable peer to serve the request. + // TODO: have dispatcher take capabilities argument? and return an error as + // to why no suitable peer can be found? (no buffer, no chain heads that high, etc) + pub fn dispatch_requests(self, dispatcher: D) -> Self + where + D: FnMut(HeadersRequest) -> Option, + { + match self { + SyncRound::Start(round_start) => round_start.dispatch_requests(dispatcher), + SyncRound::Fetch(fetcher) => fetcher.dispatch_requests(dispatcher), + other => other, + } + } - /// Drain up to a maximum number (None -> all) of headers (continuous, starting with a child of - /// the round start block) from the round, starting a new one once finished. - pub fn drain(self, v: &mut Vec
, max: Option) -> Self { - match self { - SyncRound::Fetch(fetcher) => fetcher.drain(v, max), - SyncRound::Abort(reason, mut remaining) => { - let len = ::std::cmp::min(max.unwrap_or(usize::max_value()), remaining.len()); - v.extend(remaining.drain(..len)); - SyncRound::Abort(reason, remaining) - } - other => other, - } - } + /// Drain up to a maximum number (None -> all) of headers (continuous, starting with a child of + /// the round start block) from the round, starting a new one once finished. + pub fn drain(self, v: &mut Vec
, max: Option) -> Self { + match self { + SyncRound::Fetch(fetcher) => fetcher.drain(v, max), + SyncRound::Abort(reason, mut remaining) => { + let len = ::std::cmp::min(max.unwrap_or(usize::max_value()), remaining.len()); + v.extend(remaining.drain(..len)); + SyncRound::Abort(reason, remaining) + } + other => other, + } + } } impl fmt::Debug for SyncRound { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - SyncRound::Start(ref state) => write!(f, "Scaffolding from {:?}", state.start_block), - SyncRound::Fetch(ref fetcher) => write!(f, "Filling scaffold up to {:?}", fetcher.end), - SyncRound::Abort(ref reason, ref remaining) => - write!(f, "Aborted: {:?}, {} remain", reason, remaining.len()), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + SyncRound::Start(ref state) => write!(f, "Scaffolding from {:?}", state.start_block), + SyncRound::Fetch(ref fetcher) => write!(f, "Filling scaffold up to {:?}", fetcher.end), + SyncRound::Abort(ref reason, ref remaining) => { + write!(f, "Aborted: {:?}, {} remain", reason, remaining.len()) + } + } + } } #[cfg(test)] mod tests { - use super::scaffold_params; + use super::scaffold_params; - #[test] - fn scaffold_config() { - // within a certain distance of the head, we download - // sequentially. - assert_eq!(scaffold_params(1), (0, 1)); - assert_eq!(scaffold_params(6), (0, 6)); + #[test] + fn scaffold_config() { + // within a certain distance of the head, we download + // sequentially. + assert_eq!(scaffold_params(1), (0, 1)); + assert_eq!(scaffold_params(6), (0, 6)); - // when scaffolds are useful, download enough frames to get - // within a close distance of the goal. - assert_eq!(scaffold_params(1000), (255, 4)); - assert_eq!(scaffold_params(1024), (255, 4)); - } + // when scaffolds are useful, download enough frames to get + // within a close distance of the goal. + assert_eq!(scaffold_params(1000), (255, 4)); + assert_eq!(scaffold_params(1024), (255, 4)); + } } diff --git a/ethcore/sync/src/light_sync/tests/mod.rs b/ethcore/sync/src/light_sync/tests/mod.rs index 9bfb99ed0..8abfa7d5d 100644 --- a/ethcore/sync/src/light_sync/tests/mod.rs +++ b/ethcore/sync/src/light_sync/tests/mod.rs @@ -16,48 +16,61 @@ use tests::helpers::TestNet; -use ethcore::client::{BlockInfo, BlockId, EachBlockWith}; +use ethcore::client::{BlockId, BlockInfo, EachBlockWith}; mod test_net; #[test] fn basic_sync() { - let mut net = TestNet::light(1, 2); - net.peer(1).chain().add_blocks(5000, EachBlockWith::Nothing); - net.peer(2).chain().add_blocks(6000, EachBlockWith::Nothing); + let mut net = TestNet::light(1, 2); + net.peer(1).chain().add_blocks(5000, EachBlockWith::Nothing); + net.peer(2).chain().add_blocks(6000, EachBlockWith::Nothing); - net.sync(); + net.sync(); - assert!(net.peer(0).light_chain().block_header(BlockId::Number(6000)).is_some()); + assert!(net + .peer(0) + .light_chain() + .block_header(BlockId::Number(6000)) + .is_some()); } #[test] fn fork_post_cht() { - const CHAIN_LENGTH: u64 = 50; // shouldn't be longer than ::light::cht::size(); + const CHAIN_LENGTH: u64 = 50; // shouldn't be longer than ::light::cht::size(); - let mut net = TestNet::light(1, 2); + let mut net = TestNet::light(1, 2); - // peer 2 is on a higher TD chain. - net.peer(1).chain().add_blocks(CHAIN_LENGTH as usize, EachBlockWith::Nothing); - net.peer(2).chain().add_blocks(CHAIN_LENGTH as usize + 1, EachBlockWith::Uncle); + // peer 2 is on a higher TD chain. + net.peer(1) + .chain() + .add_blocks(CHAIN_LENGTH as usize, EachBlockWith::Nothing); + net.peer(2) + .chain() + .add_blocks(CHAIN_LENGTH as usize + 1, EachBlockWith::Uncle); - // get the light peer on peer 1's chain. - for id in (0..CHAIN_LENGTH).map(|x| x + 1).map(BlockId::Number) { - let (light_peer, full_peer) = (net.peer(0), net.peer(1)); - let light_chain = light_peer.light_chain(); - let header = full_peer.chain().block_header(id).unwrap().decode().expect("decoding failure"); - let _ = light_chain.import_header(header); - light_chain.flush_queue(); - light_chain.import_verified(); - assert!(light_chain.block_header(id).is_some()); - } + // get the light peer on peer 1's chain. + for id in (0..CHAIN_LENGTH).map(|x| x + 1).map(BlockId::Number) { + let (light_peer, full_peer) = (net.peer(0), net.peer(1)); + let light_chain = light_peer.light_chain(); + let header = full_peer + .chain() + .block_header(id) + .unwrap() + .decode() + .expect("decoding failure"); + let _ = light_chain.import_header(header); + light_chain.flush_queue(); + light_chain.import_verified(); + assert!(light_chain.block_header(id).is_some()); + } - net.sync(); + net.sync(); - for id in (0..CHAIN_LENGTH).map(|x| x + 1).map(BlockId::Number) { - assert_eq!( - net.peer(0).light_chain().block_header(id).unwrap(), - net.peer(2).chain().block_header(id).unwrap() - ); - } + for id in (0..CHAIN_LENGTH).map(|x| x + 1).map(BlockId::Number) { + assert_eq!( + net.peer(0).light_chain().block_header(id).unwrap(), + net.peer(2).chain().block_header(id).unwrap() + ); + } } diff --git a/ethcore/sync/src/light_sync/tests/test_net.rs b/ethcore/sync/src/light_sync/tests/test_net.rs index 74567c119..2f6dee611 100644 --- a/ethcore/sync/src/light_sync/tests/test_net.rs +++ b/ethcore/sync/src/light_sync/tests/test_net.rs @@ -16,236 +16,255 @@ //! TestNet peer definition. -use std::collections::{HashSet, VecDeque}; -use std::sync::Arc; +use std::{ + collections::{HashSet, VecDeque}, + sync::Arc, +}; use light_sync::*; -use tests::helpers::{TestNet, Peer as PeerLike, TestPacket}; +use tests::helpers::{Peer as PeerLike, TestNet, TestPacket}; -use ethcore::client::TestBlockChainClient; -use ethcore::spec::Spec; +use ethcore::{client::TestBlockChainClient, spec::Spec}; use io::IoChannel; use kvdb_memorydb; -use light::client::fetch::{self, Unavailable}; -use light::net::{LightProtocol, IoContext, Capabilities, Params as LightParams}; -use light::provider::LightProvider; +use light::{ + client::fetch::{self, Unavailable}, + net::{Capabilities, IoContext, LightProtocol, Params as LightParams}, + provider::LightProvider, +}; use network::{NodeId, PeerId}; use parking_lot::RwLock; -use std::time::Duration; use light::cache::Cache; +use std::time::Duration; const NETWORK_ID: u64 = 0xcafebabe; pub type LightClient = ::light::client::Client; struct TestIoContext<'a> { - queue: &'a RwLock>, - sender: Option, - to_disconnect: RwLock>, + queue: &'a RwLock>, + sender: Option, + to_disconnect: RwLock>, } impl<'a> IoContext for TestIoContext<'a> { - fn send(&self, peer: PeerId, packet_id: u8, packet_body: Vec) { - self.queue.write().push_back(TestPacket { - data: packet_body, - packet_id: packet_id, - recipient: peer, - }) - } + fn send(&self, peer: PeerId, packet_id: u8, packet_body: Vec) { + self.queue.write().push_back(TestPacket { + data: packet_body, + packet_id: packet_id, + recipient: peer, + }) + } - fn respond(&self, packet_id: u8, packet_body: Vec) { - if let Some(sender) = self.sender { - self.send(sender, packet_id, packet_body); - } - } + fn respond(&self, packet_id: u8, packet_body: Vec) { + if let Some(sender) = self.sender { + self.send(sender, packet_id, packet_body); + } + } - fn disconnect_peer(&self, peer: PeerId) { - self.to_disconnect.write().insert(peer); - } + fn disconnect_peer(&self, peer: PeerId) { + self.to_disconnect.write().insert(peer); + } - fn disable_peer(&self, peer: PeerId) { self.disconnect_peer(peer) } - fn protocol_version(&self, _peer: PeerId) -> Option { Some(::light::net::MAX_PROTOCOL_VERSION) } + fn disable_peer(&self, peer: PeerId) { + self.disconnect_peer(peer) + } + fn protocol_version(&self, _peer: PeerId) -> Option { + Some(::light::net::MAX_PROTOCOL_VERSION) + } - fn persistent_peer_id(&self, _peer: PeerId) -> Option { unimplemented!() } - fn is_reserved_peer(&self, _peer: PeerId) -> bool { false } + fn persistent_peer_id(&self, _peer: PeerId) -> Option { + unimplemented!() + } + fn is_reserved_peer(&self, _peer: PeerId) -> bool { + false + } } // peer-specific data. enum PeerData { - Light(Arc>, Arc), - Full(Arc) + Light(Arc>, Arc), + Full(Arc), } // test peer type. // Either a full peer or a light peer. pub struct Peer { - proto: LightProtocol, - queue: RwLock>, - data: PeerData, + proto: LightProtocol, + queue: RwLock>, + data: PeerData, } impl Peer { - // create a new full-client peer for light client peers to sync to. - // buffer flow is made negligible. - pub fn new_full(chain: Arc) -> Self { - let params = LightParams { - network_id: NETWORK_ID, - config: Default::default(), - capabilities: Capabilities { - serve_headers: true, - serve_chain_since: None, - serve_state_since: None, - tx_relay: true, - }, - sample_store: None, - }; + // create a new full-client peer for light client peers to sync to. + // buffer flow is made negligible. + pub fn new_full(chain: Arc) -> Self { + let params = LightParams { + network_id: NETWORK_ID, + config: Default::default(), + capabilities: Capabilities { + serve_headers: true, + serve_chain_since: None, + serve_state_since: None, + tx_relay: true, + }, + sample_store: None, + }; - let proto = LightProtocol::new(chain.clone(), params); - Peer { - proto: proto, - queue: RwLock::new(VecDeque::new()), - data: PeerData::Full(chain), - } - } + let proto = LightProtocol::new(chain.clone(), params); + Peer { + proto: proto, + queue: RwLock::new(VecDeque::new()), + data: PeerData::Full(chain), + } + } - // create a new light-client peer to sync to full peers. - pub fn new_light(chain: Arc) -> Self { - let sync = Arc::new(LightSync::new(chain.clone()).unwrap()); - let params = LightParams { - network_id: NETWORK_ID, - config: Default::default(), - capabilities: Capabilities { - serve_headers: false, - serve_chain_since: None, - serve_state_since: None, - tx_relay: false, - }, - sample_store: None, - }; + // create a new light-client peer to sync to full peers. + pub fn new_light(chain: Arc) -> Self { + let sync = Arc::new(LightSync::new(chain.clone()).unwrap()); + let params = LightParams { + network_id: NETWORK_ID, + config: Default::default(), + capabilities: Capabilities { + serve_headers: false, + serve_chain_since: None, + serve_state_since: None, + tx_relay: false, + }, + sample_store: None, + }; - let provider = LightProvider::new(chain.clone(), Arc::new(RwLock::new(Default::default()))); - let mut proto = LightProtocol::new(Arc::new(provider), params); - proto.add_handler(sync.clone()); - Peer { - proto: proto, - queue: RwLock::new(VecDeque::new()), - data: PeerData::Light(sync, chain), - } - } + let provider = LightProvider::new(chain.clone(), Arc::new(RwLock::new(Default::default()))); + let mut proto = LightProtocol::new(Arc::new(provider), params); + proto.add_handler(sync.clone()); + Peer { + proto: proto, + queue: RwLock::new(VecDeque::new()), + data: PeerData::Light(sync, chain), + } + } - // get the chain from the client, asserting that it is a full node. - pub fn chain(&self) -> &TestBlockChainClient { - match self.data { - PeerData::Full(ref chain) => &*chain, - _ => panic!("Attempted to access full chain on light peer."), - } - } + // get the chain from the client, asserting that it is a full node. + pub fn chain(&self) -> &TestBlockChainClient { + match self.data { + PeerData::Full(ref chain) => &*chain, + _ => panic!("Attempted to access full chain on light peer."), + } + } - // get the light chain from the peer, asserting that it is a light node. - pub fn light_chain(&self) -> &LightClient { - match self.data { - PeerData::Light(_, ref chain) => &*chain, - _ => panic!("Attempted to access light chain on full peer."), - } - } + // get the light chain from the peer, asserting that it is a light node. + pub fn light_chain(&self) -> &LightClient { + match self.data { + PeerData::Light(_, ref chain) => &*chain, + _ => panic!("Attempted to access light chain on full peer."), + } + } - // get a test Io context based on - fn io(&self, sender: Option) -> TestIoContext { - TestIoContext { - queue: &self.queue, - sender: sender, - to_disconnect: RwLock::new(HashSet::new()), - } - } + // get a test Io context based on + fn io(&self, sender: Option) -> TestIoContext { + TestIoContext { + queue: &self.queue, + sender: sender, + to_disconnect: RwLock::new(HashSet::new()), + } + } } impl PeerLike for Peer { - type Message = TestPacket; + type Message = TestPacket; - fn on_connect(&self, other: PeerId) { - let io = self.io(Some(other)); - self.proto.on_connect(other, &io); - } + fn on_connect(&self, other: PeerId) { + let io = self.io(Some(other)); + self.proto.on_connect(other, &io); + } - fn on_disconnect(&self, other: PeerId){ - let io = self.io(Some(other)); - self.proto.on_disconnect(other, &io); - } + fn on_disconnect(&self, other: PeerId) { + let io = self.io(Some(other)); + self.proto.on_disconnect(other, &io); + } - fn receive_message(&self, from: PeerId, msg: TestPacket) -> HashSet { - let io = self.io(Some(from)); - self.proto.handle_packet(&io, from, msg.packet_id, &msg.data); - io.to_disconnect.into_inner() - } + fn receive_message(&self, from: PeerId, msg: TestPacket) -> HashSet { + let io = self.io(Some(from)); + self.proto + .handle_packet(&io, from, msg.packet_id, &msg.data); + io.to_disconnect.into_inner() + } - fn pending_message(&self) -> Option { - self.queue.write().pop_front() - } + fn pending_message(&self) -> Option { + self.queue.write().pop_front() + } - fn is_done(&self) -> bool { - self.queue.read().is_empty() && match self.data { - PeerData::Light(_, ref client) => { - // should create a test light client which just imports - // headers directly and doesn't have a queue to drain. - client.import_verified(); - client.queue_info().is_empty() - } - _ => true, - } - } + fn is_done(&self) -> bool { + self.queue.read().is_empty() + && match self.data { + PeerData::Light(_, ref client) => { + // should create a test light client which just imports + // headers directly and doesn't have a queue to drain. + client.import_verified(); + client.queue_info().is_empty() + } + _ => true, + } + } - fn sync_step(&self) { - if let PeerData::Light(_, ref client) = self.data { - client.flush_queue(); + fn sync_step(&self) { + if let PeerData::Light(_, ref client) = self.data { + client.flush_queue(); - while !client.queue_info().is_empty() { - client.import_verified() - } - } - } + while !client.queue_info().is_empty() { + client.import_verified() + } + } + } - fn restart_sync(&self) { } + fn restart_sync(&self) {} - fn process_all_io_messages(&self) { } + fn process_all_io_messages(&self) {} - fn process_all_new_block_messages(&self) { } + fn process_all_new_block_messages(&self) {} } impl TestNet { - /// Create a new `TestNet` for testing light synchronization. - /// The first parameter is the number of light nodes, - /// the second is the number of full nodes. - pub fn light(n_light: usize, n_full: usize) -> Self { - let mut peers = Vec::with_capacity(n_light + n_full); - for _ in 0..n_light { - let mut config = ::light::client::Config::default(); + /// Create a new `TestNet` for testing light synchronization. + /// The first parameter is the number of light nodes, + /// the second is the number of full nodes. + pub fn light(n_light: usize, n_full: usize) -> Self { + let mut peers = Vec::with_capacity(n_light + n_full); + for _ in 0..n_light { + let mut config = ::light::client::Config::default(); - // skip full verification because the blocks are bad. - config.verify_full = false; - let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600)))); - let db = kvdb_memorydb::create(0); - let client = LightClient::new( - config, - Arc::new(db), - None, - &Spec::new_test(), - fetch::unavailable(), // TODO: allow fetch from full nodes. - IoChannel::disconnected(), - cache - ).expect("New DB creation infallible; qed"); + // skip full verification because the blocks are bad. + config.verify_full = false; + let cache = Arc::new(Mutex::new(Cache::new( + Default::default(), + Duration::from_secs(6 * 3600), + ))); + let db = kvdb_memorydb::create(0); + let client = LightClient::new( + config, + Arc::new(db), + None, + &Spec::new_test(), + fetch::unavailable(), // TODO: allow fetch from full nodes. + IoChannel::disconnected(), + cache, + ) + .expect("New DB creation infallible; qed"); - peers.push(Arc::new(Peer::new_light(Arc::new(client)))) - } + peers.push(Arc::new(Peer::new_light(Arc::new(client)))) + } - for _ in 0..n_full { - peers.push(Arc::new(Peer::new_full(Arc::new(TestBlockChainClient::new())))) - } + for _ in 0..n_full { + peers.push(Arc::new(Peer::new_full(Arc::new( + TestBlockChainClient::new(), + )))) + } - TestNet { - peers: peers, - started: false, - disconnect_events: Vec::new(), - } - } + TestNet { + peers: peers, + started: false, + disconnect_events: Vec::new(), + } + } } diff --git a/ethcore/sync/src/private_tx.rs b/ethcore/sync/src/private_tx.rs index c9396af5b..d086ef738 100644 --- a/ethcore/sync/src/private_tx.rs +++ b/ethcore/sync/src/private_tx.rs @@ -14,50 +14,50 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use parking_lot::Mutex; use ethereum_types::H256; +use parking_lot::Mutex; /// Trait which should be implemented by a private transaction handler. pub trait PrivateTxHandler: Send + Sync + 'static { - /// Function called on new private transaction received. - /// Returns the hash of the imported transaction - fn import_private_transaction(&self, rlp: &[u8]) -> Result; + /// Function called on new private transaction received. + /// Returns the hash of the imported transaction + fn import_private_transaction(&self, rlp: &[u8]) -> Result; - /// Function called on new signed private transaction received. - /// Returns the hash of the imported transaction - fn import_signed_private_transaction(&self, rlp: &[u8]) -> Result; + /// Function called on new signed private transaction received. + /// Returns the hash of the imported transaction + fn import_signed_private_transaction(&self, rlp: &[u8]) -> Result; } /// Nonoperative private transaction handler. pub struct NoopPrivateTxHandler; impl PrivateTxHandler for NoopPrivateTxHandler { - fn import_private_transaction(&self, _rlp: &[u8]) -> Result { - Ok(H256::default()) - } + fn import_private_transaction(&self, _rlp: &[u8]) -> Result { + Ok(H256::default()) + } - fn import_signed_private_transaction(&self, _rlp: &[u8]) -> Result { - Ok(H256::default()) - } + fn import_signed_private_transaction(&self, _rlp: &[u8]) -> Result { + Ok(H256::default()) + } } /// Simple private transaction handler. Used for tests. #[derive(Default)] pub struct SimplePrivateTxHandler { - /// imported private transactions - pub txs: Mutex>>, - /// imported signed private transactions - pub signed_txs: Mutex>>, + /// imported private transactions + pub txs: Mutex>>, + /// imported signed private transactions + pub signed_txs: Mutex>>, } impl PrivateTxHandler for SimplePrivateTxHandler { - fn import_private_transaction(&self, rlp: &[u8]) -> Result { - self.txs.lock().push(rlp.to_vec()); - Ok(H256::default()) - } + fn import_private_transaction(&self, rlp: &[u8]) -> Result { + self.txs.lock().push(rlp.to_vec()); + Ok(H256::default()) + } - fn import_signed_private_transaction(&self, rlp: &[u8]) -> Result { - self.signed_txs.lock().push(rlp.to_vec()); - Ok(H256::default()) - } + fn import_signed_private_transaction(&self, rlp: &[u8]) -> Result { + self.signed_txs.lock().push(rlp.to_vec()); + Ok(H256::default()) + } } diff --git a/ethcore/sync/src/snapshot.rs b/ethcore/sync/src/snapshot.rs index 64e463c7b..2643ab615 100644 --- a/ethcore/sync/src/snapshot.rs +++ b/ethcore/sync/src/snapshot.rs @@ -18,255 +18,268 @@ use ethcore::snapshot::{ManifestData, SnapshotService}; use ethereum_types::H256; use hash::keccak; -use std::collections::HashSet; -use std::iter::FromIterator; +use std::{collections::HashSet, iter::FromIterator}; #[derive(PartialEq, Eq, Debug)] pub enum ChunkType { - State(H256), - Block(H256), + State(H256), + Block(H256), } pub struct Snapshot { - pending_state_chunks: Vec, - pending_block_chunks: Vec, - downloading_chunks: HashSet, - completed_chunks: HashSet, - snapshot_hash: Option, - bad_hashes: HashSet, - initialized: bool, + pending_state_chunks: Vec, + pending_block_chunks: Vec, + downloading_chunks: HashSet, + completed_chunks: HashSet, + snapshot_hash: Option, + bad_hashes: HashSet, + initialized: bool, } impl Snapshot { - /// Create a new instance. - pub fn new() -> Snapshot { - Snapshot { - pending_state_chunks: Vec::new(), - pending_block_chunks: Vec::new(), - downloading_chunks: HashSet::new(), - completed_chunks: HashSet::new(), - snapshot_hash: None, - bad_hashes: HashSet::new(), - initialized: false, - } - } + /// Create a new instance. + pub fn new() -> Snapshot { + Snapshot { + pending_state_chunks: Vec::new(), + pending_block_chunks: Vec::new(), + downloading_chunks: HashSet::new(), + completed_chunks: HashSet::new(), + snapshot_hash: None, + bad_hashes: HashSet::new(), + initialized: false, + } + } - /// Sync the Snapshot completed chunks with the Snapshot Service - pub fn initialize(&mut self, snapshot_service: &SnapshotService) { - if self.initialized { - return; - } + /// Sync the Snapshot completed chunks with the Snapshot Service + pub fn initialize(&mut self, snapshot_service: &SnapshotService) { + if self.initialized { + return; + } - if let Some(completed_chunks) = snapshot_service.completed_chunks() { - self.completed_chunks = HashSet::from_iter(completed_chunks); - } + if let Some(completed_chunks) = snapshot_service.completed_chunks() { + self.completed_chunks = HashSet::from_iter(completed_chunks); + } - trace!( - target: "snapshot", - "Snapshot is now initialized with {} completed chunks.", - self.completed_chunks.len(), - ); + trace!( + target: "snapshot", + "Snapshot is now initialized with {} completed chunks.", + self.completed_chunks.len(), + ); - self.initialized = true; - } + self.initialized = true; + } - /// Clear everything. - pub fn clear(&mut self) { - self.pending_state_chunks.clear(); - self.pending_block_chunks.clear(); - self.downloading_chunks.clear(); - self.completed_chunks.clear(); - self.snapshot_hash = None; - self.initialized = false; - } + /// Clear everything. + pub fn clear(&mut self) { + self.pending_state_chunks.clear(); + self.pending_block_chunks.clear(); + self.downloading_chunks.clear(); + self.completed_chunks.clear(); + self.snapshot_hash = None; + self.initialized = false; + } - /// Check if currently downloading a snapshot. - pub fn have_manifest(&self) -> bool { - self.snapshot_hash.is_some() - } + /// Check if currently downloading a snapshot. + pub fn have_manifest(&self) -> bool { + self.snapshot_hash.is_some() + } - /// Reset collection for a manifest RLP - pub fn reset_to(&mut self, manifest: &ManifestData, hash: &H256) { - self.clear(); - self.pending_state_chunks = manifest.state_hashes.clone(); - self.pending_block_chunks = manifest.block_hashes.clone(); - self.snapshot_hash = Some(hash.clone()); - } + /// Reset collection for a manifest RLP + pub fn reset_to(&mut self, manifest: &ManifestData, hash: &H256) { + self.clear(); + self.pending_state_chunks = manifest.state_hashes.clone(); + self.pending_block_chunks = manifest.block_hashes.clone(); + self.snapshot_hash = Some(hash.clone()); + } - /// Validate chunk and mark it as downloaded - pub fn validate_chunk(&mut self, chunk: &[u8]) -> Result { - let hash = keccak(chunk); - if self.completed_chunks.contains(&hash) { - trace!(target: "sync", "Ignored proccessed chunk: {:x}", hash); - return Err(()); - } - self.downloading_chunks.remove(&hash); - if self.pending_block_chunks.iter().any(|h| h == &hash) { - self.completed_chunks.insert(hash.clone()); - return Ok(ChunkType::Block(hash)); - } - if self.pending_state_chunks.iter().any(|h| h == &hash) { - self.completed_chunks.insert(hash.clone()); - return Ok(ChunkType::State(hash)); - } - trace!(target: "sync", "Ignored unknown chunk: {:x}", hash); - Err(()) - } + /// Validate chunk and mark it as downloaded + pub fn validate_chunk(&mut self, chunk: &[u8]) -> Result { + let hash = keccak(chunk); + if self.completed_chunks.contains(&hash) { + trace!(target: "sync", "Ignored proccessed chunk: {:x}", hash); + return Err(()); + } + self.downloading_chunks.remove(&hash); + if self.pending_block_chunks.iter().any(|h| h == &hash) { + self.completed_chunks.insert(hash.clone()); + return Ok(ChunkType::Block(hash)); + } + if self.pending_state_chunks.iter().any(|h| h == &hash) { + self.completed_chunks.insert(hash.clone()); + return Ok(ChunkType::State(hash)); + } + trace!(target: "sync", "Ignored unknown chunk: {:x}", hash); + Err(()) + } - /// Find a chunk to download - pub fn needed_chunk(&mut self) -> Option { - // Find next needed chunk: first block, then state chunks - let chunk = { - let chunk_filter = |h| !self.downloading_chunks.contains(h) && !self.completed_chunks.contains(h); + /// Find a chunk to download + pub fn needed_chunk(&mut self) -> Option { + // Find next needed chunk: first block, then state chunks + let chunk = { + let chunk_filter = + |h| !self.downloading_chunks.contains(h) && !self.completed_chunks.contains(h); - let needed_block_chunk = self.pending_block_chunks.iter() - .filter(|&h| chunk_filter(h)) - .map(|h| *h) - .next(); + let needed_block_chunk = self + .pending_block_chunks + .iter() + .filter(|&h| chunk_filter(h)) + .map(|h| *h) + .next(); - // If no block chunks to download, get the state chunks - if needed_block_chunk.is_none() { - self.pending_state_chunks.iter() - .filter(|&h| chunk_filter(h)) - .map(|h| *h) - .next() - } else { - needed_block_chunk - } - }; + // If no block chunks to download, get the state chunks + if needed_block_chunk.is_none() { + self.pending_state_chunks + .iter() + .filter(|&h| chunk_filter(h)) + .map(|h| *h) + .next() + } else { + needed_block_chunk + } + }; - if let Some(hash) = chunk { - self.downloading_chunks.insert(hash.clone()); - } - chunk - } + if let Some(hash) = chunk { + self.downloading_chunks.insert(hash.clone()); + } + chunk + } - pub fn clear_chunk_download(&mut self, hash: &H256) { - self.downloading_chunks.remove(hash); - } + pub fn clear_chunk_download(&mut self, hash: &H256) { + self.downloading_chunks.remove(hash); + } - // note snapshot hash as bad. - pub fn note_bad(&mut self, hash: H256) { - self.bad_hashes.insert(hash); - } + // note snapshot hash as bad. + pub fn note_bad(&mut self, hash: H256) { + self.bad_hashes.insert(hash); + } - // whether snapshot hash is known to be bad. - pub fn is_known_bad(&self, hash: &H256) -> bool { - self.bad_hashes.contains(hash) - } + // whether snapshot hash is known to be bad. + pub fn is_known_bad(&self, hash: &H256) -> bool { + self.bad_hashes.contains(hash) + } - pub fn snapshot_hash(&self) -> Option { - self.snapshot_hash - } + pub fn snapshot_hash(&self) -> Option { + self.snapshot_hash + } - pub fn total_chunks(&self) -> usize { - self.pending_block_chunks.len() + self.pending_state_chunks.len() - } + pub fn total_chunks(&self) -> usize { + self.pending_block_chunks.len() + self.pending_state_chunks.len() + } - pub fn done_chunks(&self) -> usize { - self.completed_chunks.len() - } + pub fn done_chunks(&self) -> usize { + self.completed_chunks.len() + } - pub fn is_complete(&self) -> bool { - self.total_chunks() == self.completed_chunks.len() - } + pub fn is_complete(&self) -> bool { + self.total_chunks() == self.completed_chunks.len() + } } #[cfg(test)] mod test { - use hash::keccak; - use bytes::Bytes; - use super::*; - use ethcore::snapshot::ManifestData; + use super::*; + use bytes::Bytes; + use ethcore::snapshot::ManifestData; + use hash::keccak; - fn is_empty(snapshot: &Snapshot) -> bool { - snapshot.pending_block_chunks.is_empty() && - snapshot.pending_state_chunks.is_empty() && - snapshot.completed_chunks.is_empty() && - snapshot.downloading_chunks.is_empty() && - snapshot.snapshot_hash.is_none() - } + fn is_empty(snapshot: &Snapshot) -> bool { + snapshot.pending_block_chunks.is_empty() + && snapshot.pending_state_chunks.is_empty() + && snapshot.completed_chunks.is_empty() + && snapshot.downloading_chunks.is_empty() + && snapshot.snapshot_hash.is_none() + } - fn test_manifest() -> (ManifestData, H256, Vec, Vec) { - let state_chunks: Vec = (0..20).map(|_| H256::random().to_vec()).collect(); - let block_chunks: Vec = (0..20).map(|_| H256::random().to_vec()).collect(); - let manifest = ManifestData { - version: 2, - state_hashes: state_chunks.iter().map(|data| keccak(data)).collect(), - block_hashes: block_chunks.iter().map(|data| keccak(data)).collect(), - state_root: H256::new(), - block_number: 42, - block_hash: H256::new(), - }; - let mhash = keccak(manifest.clone().into_rlp()); - (manifest, mhash, state_chunks, block_chunks) - } + fn test_manifest() -> (ManifestData, H256, Vec, Vec) { + let state_chunks: Vec = (0..20).map(|_| H256::random().to_vec()).collect(); + let block_chunks: Vec = (0..20).map(|_| H256::random().to_vec()).collect(); + let manifest = ManifestData { + version: 2, + state_hashes: state_chunks.iter().map(|data| keccak(data)).collect(), + block_hashes: block_chunks.iter().map(|data| keccak(data)).collect(), + state_root: H256::new(), + block_number: 42, + block_hash: H256::new(), + }; + let mhash = keccak(manifest.clone().into_rlp()); + (manifest, mhash, state_chunks, block_chunks) + } - #[test] - fn create_clear() { - let mut snapshot = Snapshot::new(); - assert!(is_empty(&snapshot)); - let (manifest, mhash, _, _,) = test_manifest(); - snapshot.reset_to(&manifest, &mhash); - assert!(!is_empty(&snapshot)); - snapshot.clear(); - assert!(is_empty(&snapshot)); - } + #[test] + fn create_clear() { + let mut snapshot = Snapshot::new(); + assert!(is_empty(&snapshot)); + let (manifest, mhash, _, _) = test_manifest(); + snapshot.reset_to(&manifest, &mhash); + assert!(!is_empty(&snapshot)); + snapshot.clear(); + assert!(is_empty(&snapshot)); + } - #[test] - fn validate_chunks() { - let mut snapshot = Snapshot::new(); - let (manifest, mhash, state_chunks, block_chunks) = test_manifest(); - snapshot.reset_to(&manifest, &mhash); - assert_eq!(snapshot.done_chunks(), 0); - assert!(snapshot.validate_chunk(&H256::random().to_vec()).is_err()); + #[test] + fn validate_chunks() { + let mut snapshot = Snapshot::new(); + let (manifest, mhash, state_chunks, block_chunks) = test_manifest(); + snapshot.reset_to(&manifest, &mhash); + assert_eq!(snapshot.done_chunks(), 0); + assert!(snapshot.validate_chunk(&H256::random().to_vec()).is_err()); - let requested: Vec = (0..40).map(|_| snapshot.needed_chunk().unwrap()).collect(); - assert!(snapshot.needed_chunk().is_none()); + let requested: Vec = (0..40).map(|_| snapshot.needed_chunk().unwrap()).collect(); + assert!(snapshot.needed_chunk().is_none()); - let requested_all_block_chunks = manifest.block_hashes.iter() - .all(|h| requested.iter().any(|rh| rh == h)); - assert!(requested_all_block_chunks); + let requested_all_block_chunks = manifest + .block_hashes + .iter() + .all(|h| requested.iter().any(|rh| rh == h)); + assert!(requested_all_block_chunks); - let requested_all_state_chunks = manifest.state_hashes.iter() - .all(|h| requested.iter().any(|rh| rh == h)); - assert!(requested_all_state_chunks); + let requested_all_state_chunks = manifest + .state_hashes + .iter() + .all(|h| requested.iter().any(|rh| rh == h)); + assert!(requested_all_state_chunks); - assert_eq!(snapshot.downloading_chunks.len(), 40); + assert_eq!(snapshot.downloading_chunks.len(), 40); - assert_eq!(snapshot.validate_chunk(&state_chunks[4]), Ok(ChunkType::State(manifest.state_hashes[4].clone()))); - assert_eq!(snapshot.completed_chunks.len(), 1); - assert_eq!(snapshot.downloading_chunks.len(), 39); + assert_eq!( + snapshot.validate_chunk(&state_chunks[4]), + Ok(ChunkType::State(manifest.state_hashes[4].clone())) + ); + assert_eq!(snapshot.completed_chunks.len(), 1); + assert_eq!(snapshot.downloading_chunks.len(), 39); - assert_eq!(snapshot.validate_chunk(&block_chunks[10]), Ok(ChunkType::Block(manifest.block_hashes[10].clone()))); - assert_eq!(snapshot.completed_chunks.len(), 2); - assert_eq!(snapshot.downloading_chunks.len(), 38); + assert_eq!( + snapshot.validate_chunk(&block_chunks[10]), + Ok(ChunkType::Block(manifest.block_hashes[10].clone())) + ); + assert_eq!(snapshot.completed_chunks.len(), 2); + assert_eq!(snapshot.downloading_chunks.len(), 38); - for (i, data) in state_chunks.iter().enumerate() { - if i != 4 { - assert!(snapshot.validate_chunk(data).is_ok()); - } - } + for (i, data) in state_chunks.iter().enumerate() { + if i != 4 { + assert!(snapshot.validate_chunk(data).is_ok()); + } + } - for (i, data) in block_chunks.iter().enumerate() { - if i != 10 { - assert!(snapshot.validate_chunk(data).is_ok()); - } - } + for (i, data) in block_chunks.iter().enumerate() { + if i != 10 { + assert!(snapshot.validate_chunk(data).is_ok()); + } + } - assert!(snapshot.is_complete()); - assert_eq!(snapshot.done_chunks(), 40); - assert_eq!(snapshot.done_chunks(), snapshot.total_chunks()); - assert_eq!(snapshot.snapshot_hash(), Some(keccak(manifest.into_rlp()))); - } + assert!(snapshot.is_complete()); + assert_eq!(snapshot.done_chunks(), 40); + assert_eq!(snapshot.done_chunks(), snapshot.total_chunks()); + assert_eq!(snapshot.snapshot_hash(), Some(keccak(manifest.into_rlp()))); + } - #[test] - fn tracks_known_bad() { - let mut snapshot = Snapshot::new(); - let hash = H256::random(); + #[test] + fn tracks_known_bad() { + let mut snapshot = Snapshot::new(); + let hash = H256::random(); - assert_eq!(snapshot.is_known_bad(&hash), false); - snapshot.note_bad(hash); - assert_eq!(snapshot.is_known_bad(&hash), true); - } + assert_eq!(snapshot.is_known_bad(&hash), false); + snapshot.note_bad(hash); + assert_eq!(snapshot.is_known_bad(&hash), true); + } } diff --git a/ethcore/sync/src/sync_io.rs b/ethcore/sync/src/sync_io.rs index 56bf98ab2..3ee153694 100644 --- a/ethcore/sync/src/sync_io.rs +++ b/ethcore/sync/src/sync_io.rs @@ -14,127 +14,134 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::HashMap; -use chain::sync_packet::{PacketInfo, SyncPacket}; -use network::{NetworkContext, PeerId, PacketId, Error, SessionInfo, ProtocolId}; -use network::client_version::ClientVersion; use bytes::Bytes; -use ethcore::client::BlockChainClient; -use types::BlockNumber; -use ethcore::snapshot::SnapshotService; +use chain::sync_packet::{PacketInfo, SyncPacket}; +use ethcore::{client::BlockChainClient, snapshot::SnapshotService}; +use network::{ + client_version::ClientVersion, Error, NetworkContext, PacketId, PeerId, ProtocolId, SessionInfo, +}; use parking_lot::RwLock; +use std::collections::HashMap; +use types::BlockNumber; /// IO interface for the syncing handler. /// Provides peer connection management and an interface to the blockchain client. // TODO: ratings pub trait SyncIo { - /// Disable a peer - fn disable_peer(&mut self, peer_id: PeerId); - /// Disconnect peer - fn disconnect_peer(&mut self, peer_id: PeerId); - /// Respond to current request with a packet. Can be called from an IO handler for incoming packet. - fn respond(&mut self, packet_id: PacketId, data: Vec) -> Result<(), Error>; - /// Send a packet to a peer using specified protocol. - fn send(&mut self, peer_id: PeerId, packet_id: SyncPacket, data: Vec) -> Result<(), Error>; - /// Get the blockchain - fn chain(&self) -> &BlockChainClient; - /// Get the snapshot service. - fn snapshot_service(&self) -> &SnapshotService; - /// Returns peer version identifier - fn peer_version(&self, peer_id: PeerId) -> ClientVersion { - ClientVersion::from(peer_id.to_string()) - } - /// Returns information on p2p session - fn peer_session_info(&self, peer_id: PeerId) -> Option; - /// Maximum mutually supported ETH protocol version - fn eth_protocol_version(&self, peer_id: PeerId) -> u8; - /// Maximum mutually supported version of a gien protocol. - fn protocol_version(&self, protocol: &ProtocolId, peer_id: PeerId) -> u8; - /// Returns if the chain block queue empty - fn is_chain_queue_empty(&self) -> bool { - self.chain().is_queue_empty() - } - /// Check if the session is expired - fn is_expired(&self) -> bool; - /// Return sync overlay - fn chain_overlay(&self) -> &RwLock>; - /// Returns the size the payload shouldn't exceed - fn payload_soft_limit(&self) -> usize; + /// Disable a peer + fn disable_peer(&mut self, peer_id: PeerId); + /// Disconnect peer + fn disconnect_peer(&mut self, peer_id: PeerId); + /// Respond to current request with a packet. Can be called from an IO handler for incoming packet. + fn respond(&mut self, packet_id: PacketId, data: Vec) -> Result<(), Error>; + /// Send a packet to a peer using specified protocol. + fn send(&mut self, peer_id: PeerId, packet_id: SyncPacket, data: Vec) -> Result<(), Error>; + /// Get the blockchain + fn chain(&self) -> &BlockChainClient; + /// Get the snapshot service. + fn snapshot_service(&self) -> &SnapshotService; + /// Returns peer version identifier + fn peer_version(&self, peer_id: PeerId) -> ClientVersion { + ClientVersion::from(peer_id.to_string()) + } + /// Returns information on p2p session + fn peer_session_info(&self, peer_id: PeerId) -> Option; + /// Maximum mutually supported ETH protocol version + fn eth_protocol_version(&self, peer_id: PeerId) -> u8; + /// Maximum mutually supported version of a gien protocol. + fn protocol_version(&self, protocol: &ProtocolId, peer_id: PeerId) -> u8; + /// Returns if the chain block queue empty + fn is_chain_queue_empty(&self) -> bool { + self.chain().is_queue_empty() + } + /// Check if the session is expired + fn is_expired(&self) -> bool; + /// Return sync overlay + fn chain_overlay(&self) -> &RwLock>; + /// Returns the size the payload shouldn't exceed + fn payload_soft_limit(&self) -> usize; } /// Wraps `NetworkContext` and the blockchain client pub struct NetSyncIo<'s> { - network: &'s NetworkContext, - chain: &'s BlockChainClient, - snapshot_service: &'s SnapshotService, - chain_overlay: &'s RwLock>, + network: &'s NetworkContext, + chain: &'s BlockChainClient, + snapshot_service: &'s SnapshotService, + chain_overlay: &'s RwLock>, } impl<'s> NetSyncIo<'s> { - /// Creates a new instance from the `NetworkContext` and the blockchain client reference. - pub fn new(network: &'s NetworkContext, - chain: &'s BlockChainClient, - snapshot_service: &'s SnapshotService, - chain_overlay: &'s RwLock>) -> NetSyncIo<'s> { - NetSyncIo { - network: network, - chain: chain, - snapshot_service: snapshot_service, - chain_overlay: chain_overlay, - } - } + /// Creates a new instance from the `NetworkContext` and the blockchain client reference. + pub fn new( + network: &'s NetworkContext, + chain: &'s BlockChainClient, + snapshot_service: &'s SnapshotService, + chain_overlay: &'s RwLock>, + ) -> NetSyncIo<'s> { + NetSyncIo { + network: network, + chain: chain, + snapshot_service: snapshot_service, + chain_overlay: chain_overlay, + } + } } impl<'s> SyncIo for NetSyncIo<'s> { - fn disable_peer(&mut self, peer_id: PeerId) { - self.network.disable_peer(peer_id); - } + fn disable_peer(&mut self, peer_id: PeerId) { + self.network.disable_peer(peer_id); + } - fn disconnect_peer(&mut self, peer_id: PeerId) { - self.network.disconnect_peer(peer_id); - } + fn disconnect_peer(&mut self, peer_id: PeerId) { + self.network.disconnect_peer(peer_id); + } - fn respond(&mut self, packet_id: PacketId, data: Vec) -> Result<(), Error>{ - self.network.respond(packet_id, data) - } + fn respond(&mut self, packet_id: PacketId, data: Vec) -> Result<(), Error> { + self.network.respond(packet_id, data) + } - fn send(&mut self, peer_id: PeerId, packet_id: SyncPacket, data: Vec) -> Result<(), Error>{ - self.network.send_protocol(packet_id.protocol(), peer_id, packet_id.id(), data) - } + fn send(&mut self, peer_id: PeerId, packet_id: SyncPacket, data: Vec) -> Result<(), Error> { + self.network + .send_protocol(packet_id.protocol(), peer_id, packet_id.id(), data) + } - fn chain(&self) -> &BlockChainClient { - self.chain - } + fn chain(&self) -> &BlockChainClient { + self.chain + } - fn chain_overlay(&self) -> &RwLock> { - self.chain_overlay - } + fn chain_overlay(&self) -> &RwLock> { + self.chain_overlay + } - fn snapshot_service(&self) -> &SnapshotService { - self.snapshot_service - } + fn snapshot_service(&self) -> &SnapshotService { + self.snapshot_service + } - fn peer_session_info(&self, peer_id: PeerId) -> Option { - self.network.session_info(peer_id) - } + fn peer_session_info(&self, peer_id: PeerId) -> Option { + self.network.session_info(peer_id) + } - fn is_expired(&self) -> bool { - self.network.is_expired() - } + fn is_expired(&self) -> bool { + self.network.is_expired() + } - fn eth_protocol_version(&self, peer_id: PeerId) -> u8 { - self.network.protocol_version(self.network.subprotocol_name(), peer_id).unwrap_or(0) - } + fn eth_protocol_version(&self, peer_id: PeerId) -> u8 { + self.network + .protocol_version(self.network.subprotocol_name(), peer_id) + .unwrap_or(0) + } - fn protocol_version(&self, protocol: &ProtocolId, peer_id: PeerId) -> u8 { - self.network.protocol_version(*protocol, peer_id).unwrap_or(0) - } + fn protocol_version(&self, protocol: &ProtocolId, peer_id: PeerId) -> u8 { + self.network + .protocol_version(*protocol, peer_id) + .unwrap_or(0) + } - fn peer_version(&self, peer_id: PeerId) -> ClientVersion { - self.network.peer_client_version(peer_id) - } + fn peer_version(&self, peer_id: PeerId) -> ClientVersion { + self.network.peer_client_version(peer_id) + } - fn payload_soft_limit(&self) -> usize { - self.network.payload_soft_limit() - } + fn payload_soft_limit(&self) -> usize { + self.network.payload_soft_limit() + } } diff --git a/ethcore/sync/src/tests/chain.rs b/ethcore/sync/src/tests/chain.rs index d81a876d7..4cc03bb18 100644 --- a/ethcore/sync/src/tests/chain.rs +++ b/ethcore/sync/src/tests/chain.rs @@ -14,251 +14,288 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use ethcore::client::{TestBlockChainClient, BlockChainClient, BlockId, EachBlockWith, ChainInfo, BlockInfo}; -use chain::{SyncState}; use super::helpers::*; -use {SyncConfig, WarpSync}; +use chain::SyncState; +use ethcore::client::{ + BlockChainClient, BlockId, BlockInfo, ChainInfo, EachBlockWith, TestBlockChainClient, +}; +use std::sync::Arc; +use SyncConfig; +use WarpSync; #[test] fn two_peers() { - ::env_logger::try_init().ok(); - let mut net = TestNet::new(3); - net.peer(1).chain.add_blocks(1000, EachBlockWith::Uncle); - net.peer(2).chain.add_blocks(1000, EachBlockWith::Uncle); - net.sync(); - assert!(net.peer(0).chain.block(BlockId::Number(1000)).is_some()); - assert_eq!(*net.peer(0).chain.blocks.read(), *net.peer(1).chain.blocks.read()); + ::env_logger::try_init().ok(); + let mut net = TestNet::new(3); + net.peer(1).chain.add_blocks(1000, EachBlockWith::Uncle); + net.peer(2).chain.add_blocks(1000, EachBlockWith::Uncle); + net.sync(); + assert!(net.peer(0).chain.block(BlockId::Number(1000)).is_some()); + assert_eq!( + *net.peer(0).chain.blocks.read(), + *net.peer(1).chain.blocks.read() + ); } #[test] fn long_chain() { - ::env_logger::try_init().ok(); - let mut net = TestNet::new(2); - net.peer(1).chain.add_blocks(50000, EachBlockWith::Nothing); - net.sync(); - assert!(net.peer(0).chain.block(BlockId::Number(50000)).is_some()); - assert_eq!(*net.peer(0).chain.blocks.read(), *net.peer(1).chain.blocks.read()); + ::env_logger::try_init().ok(); + let mut net = TestNet::new(2); + net.peer(1).chain.add_blocks(50000, EachBlockWith::Nothing); + net.sync(); + assert!(net.peer(0).chain.block(BlockId::Number(50000)).is_some()); + assert_eq!( + *net.peer(0).chain.blocks.read(), + *net.peer(1).chain.blocks.read() + ); } #[test] fn status_after_sync() { - ::env_logger::try_init().ok(); - let mut net = TestNet::new(3); - net.peer(1).chain.add_blocks(1000, EachBlockWith::Uncle); - net.peer(2).chain.add_blocks(1000, EachBlockWith::Uncle); - net.sync(); - let status = net.peer(0).sync.read().status(); - assert_eq!(status.state, SyncState::Idle); + ::env_logger::try_init().ok(); + let mut net = TestNet::new(3); + net.peer(1).chain.add_blocks(1000, EachBlockWith::Uncle); + net.peer(2).chain.add_blocks(1000, EachBlockWith::Uncle); + net.sync(); + let status = net.peer(0).sync.read().status(); + assert_eq!(status.state, SyncState::Idle); } #[test] fn takes_few_steps() { - let mut net = TestNet::new(3); - net.peer(1).chain.add_blocks(100, EachBlockWith::Uncle); - net.peer(2).chain.add_blocks(100, EachBlockWith::Uncle); - let total_steps = net.sync(); - assert!(total_steps < 20); + let mut net = TestNet::new(3); + net.peer(1).chain.add_blocks(100, EachBlockWith::Uncle); + net.peer(2).chain.add_blocks(100, EachBlockWith::Uncle); + let total_steps = net.sync(); + assert!(total_steps < 20); } #[test] fn empty_blocks() { - ::env_logger::try_init().ok(); - let mut net = TestNet::new(3); - for n in 0..200 { - let with = if n % 2 == 0 { EachBlockWith::Nothing } else { EachBlockWith::Uncle }; - net.peer(1).chain.add_blocks(5, with.clone()); - net.peer(2).chain.add_blocks(5, with); - } - net.sync(); - assert!(net.peer(0).chain.block(BlockId::Number(1000)).is_some()); - assert_eq!(*net.peer(0).chain.blocks.read(), *net.peer(1).chain.blocks.read()); + ::env_logger::try_init().ok(); + let mut net = TestNet::new(3); + for n in 0..200 { + let with = if n % 2 == 0 { + EachBlockWith::Nothing + } else { + EachBlockWith::Uncle + }; + net.peer(1).chain.add_blocks(5, with.clone()); + net.peer(2).chain.add_blocks(5, with); + } + net.sync(); + assert!(net.peer(0).chain.block(BlockId::Number(1000)).is_some()); + assert_eq!( + *net.peer(0).chain.blocks.read(), + *net.peer(1).chain.blocks.read() + ); } #[test] fn forked() { - ::env_logger::try_init().ok(); - let mut net = TestNet::new(3); - net.peer(0).chain.add_blocks(30, EachBlockWith::Uncle); - net.peer(1).chain.add_blocks(30, EachBlockWith::Uncle); - net.peer(2).chain.add_blocks(30, EachBlockWith::Uncle); - net.peer(0).chain.add_blocks(10, EachBlockWith::Nothing); //fork - net.peer(1).chain.add_blocks(20, EachBlockWith::Uncle); - net.peer(2).chain.add_blocks(20, EachBlockWith::Uncle); - net.peer(1).chain.add_blocks(10, EachBlockWith::Uncle); //fork between 1 and 2 - net.peer(2).chain.add_blocks(1, EachBlockWith::Nothing); - // peer 1 has the best chain of 601 blocks - let peer1_chain = net.peer(1).chain.numbers.read().clone(); - net.sync(); - assert_eq!(*net.peer(0).chain.difficulty.read(), *net.peer(1).chain.difficulty.read()); - assert_eq!(&*net.peer(0).chain.numbers.read(), &peer1_chain); - assert_eq!(&*net.peer(1).chain.numbers.read(), &peer1_chain); - assert_eq!(&*net.peer(2).chain.numbers.read(), &peer1_chain); + ::env_logger::try_init().ok(); + let mut net = TestNet::new(3); + net.peer(0).chain.add_blocks(30, EachBlockWith::Uncle); + net.peer(1).chain.add_blocks(30, EachBlockWith::Uncle); + net.peer(2).chain.add_blocks(30, EachBlockWith::Uncle); + net.peer(0).chain.add_blocks(10, EachBlockWith::Nothing); //fork + net.peer(1).chain.add_blocks(20, EachBlockWith::Uncle); + net.peer(2).chain.add_blocks(20, EachBlockWith::Uncle); + net.peer(1).chain.add_blocks(10, EachBlockWith::Uncle); //fork between 1 and 2 + net.peer(2).chain.add_blocks(1, EachBlockWith::Nothing); + // peer 1 has the best chain of 601 blocks + let peer1_chain = net.peer(1).chain.numbers.read().clone(); + net.sync(); + assert_eq!( + *net.peer(0).chain.difficulty.read(), + *net.peer(1).chain.difficulty.read() + ); + assert_eq!(&*net.peer(0).chain.numbers.read(), &peer1_chain); + assert_eq!(&*net.peer(1).chain.numbers.read(), &peer1_chain); + assert_eq!(&*net.peer(2).chain.numbers.read(), &peer1_chain); } #[test] fn forked_with_misbehaving_peer() { - ::env_logger::try_init().ok(); - let mut net = TestNet::new(3); + ::env_logger::try_init().ok(); + let mut net = TestNet::new(3); - let mut alt_spec = ::ethcore::spec::Spec::new_test(); - alt_spec.extra_data = b"fork".to_vec(); - // peer 0 is on a totally different chain with higher total difficulty - net.peer_mut(0).chain = Arc::new(TestBlockChainClient::new_with_spec(alt_spec)); - net.peer(0).chain.add_blocks(50, EachBlockWith::Nothing); - net.peer(1).chain.add_blocks(10, EachBlockWith::Nothing); - net.peer(2).chain.add_blocks(10, EachBlockWith::Nothing); + let mut alt_spec = ::ethcore::spec::Spec::new_test(); + alt_spec.extra_data = b"fork".to_vec(); + // peer 0 is on a totally different chain with higher total difficulty + net.peer_mut(0).chain = Arc::new(TestBlockChainClient::new_with_spec(alt_spec)); + net.peer(0).chain.add_blocks(50, EachBlockWith::Nothing); + net.peer(1).chain.add_blocks(10, EachBlockWith::Nothing); + net.peer(2).chain.add_blocks(10, EachBlockWith::Nothing); - net.peer(1).chain.add_blocks(10, EachBlockWith::Nothing); - net.peer(2).chain.add_blocks(20, EachBlockWith::Uncle); - // peer 1 should sync to peer 2, others should not change - let peer0_chain = net.peer(0).chain.numbers.read().clone(); - let peer2_chain = net.peer(2).chain.numbers.read().clone(); - net.sync(); - assert_eq!(&*net.peer(0).chain.numbers.read(), &peer0_chain); - assert_eq!(&*net.peer(1).chain.numbers.read(), &peer2_chain); - assert_eq!(&*net.peer(2).chain.numbers.read(), &peer2_chain); + net.peer(1).chain.add_blocks(10, EachBlockWith::Nothing); + net.peer(2).chain.add_blocks(20, EachBlockWith::Uncle); + // peer 1 should sync to peer 2, others should not change + let peer0_chain = net.peer(0).chain.numbers.read().clone(); + let peer2_chain = net.peer(2).chain.numbers.read().clone(); + net.sync(); + assert_eq!(&*net.peer(0).chain.numbers.read(), &peer0_chain); + assert_eq!(&*net.peer(1).chain.numbers.read(), &peer2_chain); + assert_eq!(&*net.peer(2).chain.numbers.read(), &peer2_chain); } #[test] fn net_hard_fork() { - ::env_logger::try_init().ok(); - let ref_client = TestBlockChainClient::new(); - ref_client.add_blocks(50, EachBlockWith::Uncle); - { - let mut net = TestNet::new_with_fork(2, Some((50, ref_client.block_hash(BlockId::Number(50)).unwrap()))); - net.peer(0).chain.add_blocks(100, EachBlockWith::Uncle); - net.sync(); - assert_eq!(net.peer(1).chain.chain_info().best_block_number, 100); - } - { - let mut net = TestNet::new_with_fork(2, Some((50, ref_client.block_hash(BlockId::Number(50)).unwrap()))); - net.peer(0).chain.add_blocks(100, EachBlockWith::Nothing); - net.sync(); - assert_eq!(net.peer(1).chain.chain_info().best_block_number, 0); - } + ::env_logger::try_init().ok(); + let ref_client = TestBlockChainClient::new(); + ref_client.add_blocks(50, EachBlockWith::Uncle); + { + let mut net = TestNet::new_with_fork( + 2, + Some((50, ref_client.block_hash(BlockId::Number(50)).unwrap())), + ); + net.peer(0).chain.add_blocks(100, EachBlockWith::Uncle); + net.sync(); + assert_eq!(net.peer(1).chain.chain_info().best_block_number, 100); + } + { + let mut net = TestNet::new_with_fork( + 2, + Some((50, ref_client.block_hash(BlockId::Number(50)).unwrap())), + ); + net.peer(0).chain.add_blocks(100, EachBlockWith::Nothing); + net.sync(); + assert_eq!(net.peer(1).chain.chain_info().best_block_number, 0); + } } #[test] fn restart() { - ::env_logger::try_init().ok(); - let mut net = TestNet::new(3); - net.peer(1).chain.add_blocks(1000, EachBlockWith::Uncle); - net.peer(2).chain.add_blocks(1000, EachBlockWith::Uncle); + ::env_logger::try_init().ok(); + let mut net = TestNet::new(3); + net.peer(1).chain.add_blocks(1000, EachBlockWith::Uncle); + net.peer(2).chain.add_blocks(1000, EachBlockWith::Uncle); - net.sync(); + net.sync(); - // make sure that sync has actually happened - assert!(net.peer(0).chain.chain_info().best_block_number > 100); - net.restart_peer(0); + // make sure that sync has actually happened + assert!(net.peer(0).chain.chain_info().best_block_number > 100); + net.restart_peer(0); - let status = net.peer(0).sync.read().status(); - assert_eq!(status.state, SyncState::Idle); + let status = net.peer(0).sync.read().status(); + assert_eq!(status.state, SyncState::Idle); } #[test] fn status_empty() { - let net = TestNet::new(2); - assert_eq!(net.peer(0).sync.read().status().state, SyncState::Idle); - let mut config = SyncConfig::default(); - config.warp_sync = WarpSync::Enabled; - let net = TestNet::new_with_config(2, config); - assert_eq!(net.peer(0).sync.read().status().state, SyncState::WaitingPeers); + let net = TestNet::new(2); + assert_eq!(net.peer(0).sync.read().status().state, SyncState::Idle); + let mut config = SyncConfig::default(); + config.warp_sync = WarpSync::Enabled; + let net = TestNet::new_with_config(2, config); + assert_eq!( + net.peer(0).sync.read().status().state, + SyncState::WaitingPeers + ); } #[test] fn status_packet() { - let mut net = TestNet::new(2); - net.peer(0).chain.add_blocks(100, EachBlockWith::Uncle); - net.peer(1).chain.add_blocks(1, EachBlockWith::Uncle); + let mut net = TestNet::new(2); + net.peer(0).chain.add_blocks(100, EachBlockWith::Uncle); + net.peer(1).chain.add_blocks(1, EachBlockWith::Uncle); - net.start(); + net.start(); - net.sync_step_peer(0); + net.sync_step_peer(0); - assert_eq!(1, net.peer(0).queue.read().len()); - assert_eq!(0x00, net.peer(0).queue.read()[0].packet_id); + assert_eq!(1, net.peer(0).queue.read().len()); + assert_eq!(0x00, net.peer(0).queue.read()[0].packet_id); } #[test] fn propagate_hashes() { - let mut net = TestNet::new(6); - net.peer(1).chain.add_blocks(10, EachBlockWith::Uncle); - net.sync(); + let mut net = TestNet::new(6); + net.peer(1).chain.add_blocks(10, EachBlockWith::Uncle); + net.sync(); - net.peer(0).chain.add_blocks(10, EachBlockWith::Uncle); - net.sync(); - net.trigger_chain_new_blocks(0); //first event just sets the marker - net.trigger_chain_new_blocks(0); + net.peer(0).chain.add_blocks(10, EachBlockWith::Uncle); + net.sync(); + net.trigger_chain_new_blocks(0); //first event just sets the marker + net.trigger_chain_new_blocks(0); - // 5 peers with NewHahses, 4 with blocks - assert_eq!(9, net.peer(0).queue.read().len()); - let mut hashes = 0; - let mut blocks = 0; - for i in 0..net.peer(0).queue.read().len() { - if net.peer(0).queue.read()[i].packet_id == 0x1 { - hashes += 1; - } - if net.peer(0).queue.read()[i].packet_id == 0x7 { - blocks += 1; - } - } - assert_eq!(blocks, 4); - assert_eq!(hashes, 5); + // 5 peers with NewHahses, 4 with blocks + assert_eq!(9, net.peer(0).queue.read().len()); + let mut hashes = 0; + let mut blocks = 0; + for i in 0..net.peer(0).queue.read().len() { + if net.peer(0).queue.read()[i].packet_id == 0x1 { + hashes += 1; + } + if net.peer(0).queue.read()[i].packet_id == 0x7 { + blocks += 1; + } + } + assert_eq!(blocks, 4); + assert_eq!(hashes, 5); } #[test] fn propagate_blocks() { - let mut net = TestNet::new(20); - net.peer(1).chain.add_blocks(10, EachBlockWith::Uncle); - net.sync(); + let mut net = TestNet::new(20); + net.peer(1).chain.add_blocks(10, EachBlockWith::Uncle); + net.sync(); - net.peer(0).chain.add_blocks(10, EachBlockWith::Uncle); - net.trigger_chain_new_blocks(0); //first event just sets the marker - net.trigger_chain_new_blocks(0); + net.peer(0).chain.add_blocks(10, EachBlockWith::Uncle); + net.trigger_chain_new_blocks(0); //first event just sets the marker + net.trigger_chain_new_blocks(0); - assert!(!net.peer(0).queue.read().is_empty()); - // NEW_BLOCK_PACKET - let blocks = net.peer(0).queue.read().iter().filter(|p| p.packet_id == 0x7).count(); - assert!(blocks > 0); + assert!(!net.peer(0).queue.read().is_empty()); + // NEW_BLOCK_PACKET + let blocks = net + .peer(0) + .queue + .read() + .iter() + .filter(|p| p.packet_id == 0x7) + .count(); + assert!(blocks > 0); } #[test] fn restart_on_malformed_block() { - ::env_logger::try_init().ok(); - let mut net = TestNet::new(2); - net.peer(1).chain.add_blocks(5, EachBlockWith::Nothing); - net.peer(1).chain.add_block(EachBlockWith::Nothing, |mut header| { - header.set_extra_data(b"This extra data is way too long to be considered valid".to_vec()); - header - }); - net.sync_steps(20); + ::env_logger::try_init().ok(); + let mut net = TestNet::new(2); + net.peer(1).chain.add_blocks(5, EachBlockWith::Nothing); + net.peer(1) + .chain + .add_block(EachBlockWith::Nothing, |mut header| { + header + .set_extra_data(b"This extra data is way too long to be considered valid".to_vec()); + header + }); + net.sync_steps(20); - // This gets accepted just fine since the TestBlockChainClient performs no validation. - // Probably remove this test? - assert_eq!(net.peer(0).chain.chain_info().best_block_number, 6); + // This gets accepted just fine since the TestBlockChainClient performs no validation. + // Probably remove this test? + assert_eq!(net.peer(0).chain.chain_info().best_block_number, 6); } #[test] fn reject_on_broken_chain() { - let mut net = TestNet::new(2); - net.peer(1).chain.add_blocks(10, EachBlockWith::Nothing); - net.peer(1).chain.corrupt_block_parent(6); - net.sync_steps(20); + let mut net = TestNet::new(2); + net.peer(1).chain.add_blocks(10, EachBlockWith::Nothing); + net.peer(1).chain.corrupt_block_parent(6); + net.sync_steps(20); - assert_eq!(net.peer(0).chain.chain_info().best_block_number, 0); + assert_eq!(net.peer(0).chain.chain_info().best_block_number, 0); } #[test] fn disconnect_on_unrelated_chain() { - ::env_logger::try_init().ok(); - let mut net = TestNet::new(2); - net.peer(0).chain.set_history(Some(20)); - net.peer(1).chain.set_history(Some(20)); - net.restart_peer(0); - net.restart_peer(1); - net.peer(0).chain.add_blocks(500, EachBlockWith::Uncle); - net.peer(1).chain.add_blocks(300, EachBlockWith::Nothing); - net.sync(); - assert_eq!(net.disconnect_events, vec![(0, 0)]); + ::env_logger::try_init().ok(); + let mut net = TestNet::new(2); + net.peer(0).chain.set_history(Some(20)); + net.peer(1).chain.set_history(Some(20)); + net.restart_peer(0); + net.restart_peer(1); + net.peer(0).chain.add_blocks(500, EachBlockWith::Uncle); + net.peer(1).chain.add_blocks(300, EachBlockWith::Nothing); + net.sync(); + assert_eq!(net.disconnect_events, vec![(0, 0)]); } diff --git a/ethcore/sync/src/tests/consensus.rs b/ethcore/sync/src/tests/consensus.rs index df0936633..51f66b0e2 100644 --- a/ethcore/sync/src/tests/consensus.rs +++ b/ethcore/sync/src/tests/consensus.rs @@ -14,111 +14,161 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use hash::keccak; -use ethereum_types::{U256, Address}; -use io::{IoHandler, IoChannel}; -use ethcore::client::{ChainInfo, ClientIoMessage}; -use ethcore::engines; -use ethcore::spec::Spec; -use ethcore::miner::{self, MinerService}; -use ethkey::{KeyPair, Secret}; -use types::transaction::{Action, PendingTransaction, Transaction}; use super::helpers::*; +use ethcore::{ + client::{ChainInfo, ClientIoMessage}, + engines, + miner::{self, MinerService}, + spec::Spec, +}; +use ethereum_types::{Address, U256}; +use ethkey::{KeyPair, Secret}; +use hash::keccak; +use io::{IoChannel, IoHandler}; +use std::sync::Arc; +use types::transaction::{Action, PendingTransaction, Transaction}; use SyncConfig; fn new_tx(secret: &Secret, nonce: U256, chain_id: u64) -> PendingTransaction { - let signed = Transaction { - nonce: nonce.into(), - gas_price: 0.into(), - gas: 21000.into(), - action: Action::Call(Address::default()), - value: 0.into(), - data: Vec::new(), - }.sign(secret, Some(chain_id)); - PendingTransaction::new(signed, None) + let signed = Transaction { + nonce: nonce.into(), + gas_price: 0.into(), + gas: 21000.into(), + action: Action::Call(Address::default()), + value: 0.into(), + data: Vec::new(), + } + .sign(secret, Some(chain_id)); + PendingTransaction::new(signed, None) } #[test] fn authority_round() { - let s0 = KeyPair::from_secret_slice(&keccak("1")).unwrap(); - let s1 = KeyPair::from_secret_slice(&keccak("0")).unwrap(); + let s0 = KeyPair::from_secret_slice(&keccak("1")).unwrap(); + let s1 = KeyPair::from_secret_slice(&keccak("0")).unwrap(); - let chain_id = Spec::new_test_round().chain_id(); - let mut net = TestNet::with_spec(2, SyncConfig::default(), Spec::new_test_round); - let io_handler0: Arc> = Arc::new(TestIoHandler::new(net.peer(0).chain.clone())); - let io_handler1: Arc> = Arc::new(TestIoHandler::new(net.peer(1).chain.clone())); - // Push transaction to both clients. Only one of them gets lucky to produce a block. - net.peer(0).miner.set_author(miner::Author::Sealer(engines::signer::from_keypair(s0.clone()))); - net.peer(1).miner.set_author(miner::Author::Sealer(engines::signer::from_keypair(s1.clone()))); - net.peer(0).chain.engine().register_client(Arc::downgrade(&net.peer(0).chain) as _); - net.peer(1).chain.engine().register_client(Arc::downgrade(&net.peer(1).chain) as _); - net.peer(0).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1))); - net.peer(1).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0))); - // exchange statuses - net.sync(); - // Trigger block proposal - net.peer(0).miner.import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 0.into(), chain_id)).unwrap(); - net.peer(1).miner.import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 0.into(), chain_id)).unwrap(); - // Sync a block - net.sync(); - assert_eq!(net.peer(0).chain.chain_info().best_block_number, 1); - assert_eq!(net.peer(1).chain.chain_info().best_block_number, 1); + let chain_id = Spec::new_test_round().chain_id(); + let mut net = TestNet::with_spec(2, SyncConfig::default(), Spec::new_test_round); + let io_handler0: Arc> = + Arc::new(TestIoHandler::new(net.peer(0).chain.clone())); + let io_handler1: Arc> = + Arc::new(TestIoHandler::new(net.peer(1).chain.clone())); + // Push transaction to both clients. Only one of them gets lucky to produce a block. + net.peer(0) + .miner + .set_author(miner::Author::Sealer(engines::signer::from_keypair( + s0.clone(), + ))); + net.peer(1) + .miner + .set_author(miner::Author::Sealer(engines::signer::from_keypair( + s1.clone(), + ))); + net.peer(0) + .chain + .engine() + .register_client(Arc::downgrade(&net.peer(0).chain) as _); + net.peer(1) + .chain + .engine() + .register_client(Arc::downgrade(&net.peer(1).chain) as _); + net.peer(0) + .chain + .set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1))); + net.peer(1) + .chain + .set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0))); + // exchange statuses + net.sync(); + // Trigger block proposal + net.peer(0) + .miner + .import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 0.into(), chain_id)) + .unwrap(); + net.peer(1) + .miner + .import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 0.into(), chain_id)) + .unwrap(); + // Sync a block + net.sync(); + assert_eq!(net.peer(0).chain.chain_info().best_block_number, 1); + assert_eq!(net.peer(1).chain.chain_info().best_block_number, 1); - net.peer(0).miner.import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 1.into(), chain_id)).unwrap(); - net.peer(1).miner.import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 1.into(), chain_id)).unwrap(); - // Move to next proposer step. - net.peer(0).chain.engine().step(); - net.peer(1).chain.engine().step(); - net.sync(); - assert_eq!(net.peer(0).chain.chain_info().best_block_number, 2); - assert_eq!(net.peer(1).chain.chain_info().best_block_number, 2); + net.peer(0) + .miner + .import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 1.into(), chain_id)) + .unwrap(); + net.peer(1) + .miner + .import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 1.into(), chain_id)) + .unwrap(); + // Move to next proposer step. + net.peer(0).chain.engine().step(); + net.peer(1).chain.engine().step(); + net.sync(); + assert_eq!(net.peer(0).chain.chain_info().best_block_number, 2); + assert_eq!(net.peer(1).chain.chain_info().best_block_number, 2); - // Fork the network with equal height. - net.peer(0).miner.import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 2.into(), chain_id)).unwrap(); - net.peer(1).miner.import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 2.into(), chain_id)).unwrap(); - // Let both nodes build one block. - net.peer(0).chain.engine().step(); - let early_hash = net.peer(0).chain.chain_info().best_block_hash; - net.peer(1).chain.engine().step(); - net.peer(0).chain.engine().step(); - net.peer(1).chain.engine().step(); - let ci0 = net.peer(0).chain.chain_info(); - let ci1 = net.peer(1).chain.chain_info(); - assert_eq!(ci0.best_block_number, 3); - assert_eq!(ci1.best_block_number, 3); - assert!(ci0.best_block_hash != ci1.best_block_hash); - // Reorg to the chain with earlier view. - net.sync(); - let ci0 = net.peer(0).chain.chain_info(); - let ci1 = net.peer(1).chain.chain_info(); - assert_eq!(ci0.best_block_number, 3); - assert_eq!(ci1.best_block_number, 3); - assert_eq!(ci0.best_block_hash, ci1.best_block_hash); - assert_eq!(ci1.best_block_hash, early_hash); + // Fork the network with equal height. + net.peer(0) + .miner + .import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 2.into(), chain_id)) + .unwrap(); + net.peer(1) + .miner + .import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 2.into(), chain_id)) + .unwrap(); + // Let both nodes build one block. + net.peer(0).chain.engine().step(); + let early_hash = net.peer(0).chain.chain_info().best_block_hash; + net.peer(1).chain.engine().step(); + net.peer(0).chain.engine().step(); + net.peer(1).chain.engine().step(); + let ci0 = net.peer(0).chain.chain_info(); + let ci1 = net.peer(1).chain.chain_info(); + assert_eq!(ci0.best_block_number, 3); + assert_eq!(ci1.best_block_number, 3); + assert!(ci0.best_block_hash != ci1.best_block_hash); + // Reorg to the chain with earlier view. + net.sync(); + let ci0 = net.peer(0).chain.chain_info(); + let ci1 = net.peer(1).chain.chain_info(); + assert_eq!(ci0.best_block_number, 3); + assert_eq!(ci1.best_block_number, 3); + assert_eq!(ci0.best_block_hash, ci1.best_block_hash); + assert_eq!(ci1.best_block_hash, early_hash); - // Selfish miner - net.peer(0).miner.import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 3.into(), chain_id)).unwrap(); - net.peer(1).miner.import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 3.into(), chain_id)).unwrap(); - // Node 0 is an earlier primary. - net.peer(0).chain.engine().step(); - assert_eq!(net.peer(0).chain.chain_info().best_block_number, 4); - net.peer(0).chain.engine().step(); - net.peer(0).chain.engine().step(); - net.peer(0).chain.engine().step(); - assert_eq!(net.peer(0).chain.chain_info().best_block_number, 4); - // Node 1 makes 2 blocks, but is a later primary on the first one. - net.peer(1).chain.engine().step(); - net.peer(1).chain.engine().step(); - net.peer(1).miner.import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 4.into(), chain_id)).unwrap(); - net.peer(1).chain.engine().step(); - net.peer(1).chain.engine().step(); - assert_eq!(net.peer(1).chain.chain_info().best_block_number, 5); - // Reorg to the longest chain one not ealier view one. - net.sync(); - let ci0 = net.peer(0).chain.chain_info(); - let ci1 = net.peer(1).chain.chain_info(); - assert_eq!(ci0.best_block_number, 5); - assert_eq!(ci1.best_block_number, 5); - assert_eq!(ci0.best_block_hash, ci1.best_block_hash); + // Selfish miner + net.peer(0) + .miner + .import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 3.into(), chain_id)) + .unwrap(); + net.peer(1) + .miner + .import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 3.into(), chain_id)) + .unwrap(); + // Node 0 is an earlier primary. + net.peer(0).chain.engine().step(); + assert_eq!(net.peer(0).chain.chain_info().best_block_number, 4); + net.peer(0).chain.engine().step(); + net.peer(0).chain.engine().step(); + net.peer(0).chain.engine().step(); + assert_eq!(net.peer(0).chain.chain_info().best_block_number, 4); + // Node 1 makes 2 blocks, but is a later primary on the first one. + net.peer(1).chain.engine().step(); + net.peer(1).chain.engine().step(); + net.peer(1) + .miner + .import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 4.into(), chain_id)) + .unwrap(); + net.peer(1).chain.engine().step(); + net.peer(1).chain.engine().step(); + assert_eq!(net.peer(1).chain.chain_info().best_block_number, 5); + // Reorg to the longest chain one not ealier view one. + net.sync(); + let ci0 = net.peer(0).chain.chain_info(); + let ci1 = net.peer(1).chain.chain_info(); + assert_eq!(ci0.best_block_number, 5); + assert_eq!(ci1.best_block_number, 5); + assert_eq!(ci0.best_block_hash, ci1.best_block_hash); } diff --git a/ethcore/sync/src/tests/helpers.rs b/ethcore/sync/src/tests/helpers.rs index 8bc4b542e..952a2c6c0 100644 --- a/ethcore/sync/src/tests/helpers.rs +++ b/ethcore/sync/src/tests/helpers.rs @@ -14,550 +14,642 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::{VecDeque, HashSet, HashMap}; -use std::sync::Arc; -use ethereum_types::H256; -use parking_lot::{RwLock, Mutex}; -use bytes::Bytes; -use network::{self, PeerId, ProtocolId, PacketId, SessionInfo}; -use network::client_version::ClientVersion; -use tests::snapshot::*; -use ethcore::client::{TestBlockChainClient, BlockChainClient, Client as EthcoreClient, - ClientConfig, ChainNotify, NewBlocks, ChainMessageType, ClientIoMessage}; -use ethcore::snapshot::SnapshotService; -use ethcore::spec::Spec; -use ethcore::miner::Miner; -use ethcore::test_helpers; -use sync_io::SyncIo; -use io::{IoChannel, IoContext, IoHandler}; use api::WARP_SYNC_PROTOCOL_ID; -use chain::{ChainSync, SyncSupplier, ETH_PROTOCOL_VERSION_63, PAR_PROTOCOL_VERSION_3}; -use chain::sync_packet::{PacketInfo, SyncPacket}; -use chain::sync_packet::SyncPacket::{PrivateTransactionPacket, SignedPrivateTransactionPacket}; +use bytes::Bytes; +use chain::{ + sync_packet::{ + PacketInfo, SyncPacket, + SyncPacket::{PrivateTransactionPacket, SignedPrivateTransactionPacket}, + }, + ChainSync, SyncSupplier, ETH_PROTOCOL_VERSION_63, PAR_PROTOCOL_VERSION_3, +}; +use ethcore::{ + client::{ + BlockChainClient, ChainMessageType, ChainNotify, Client as EthcoreClient, ClientConfig, + ClientIoMessage, NewBlocks, TestBlockChainClient, + }, + miner::Miner, + snapshot::SnapshotService, + spec::Spec, + test_helpers, +}; +use ethereum_types::H256; +use io::{IoChannel, IoContext, IoHandler}; +use network::{self, client_version::ClientVersion, PacketId, PeerId, ProtocolId, SessionInfo}; +use parking_lot::{Mutex, RwLock}; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + sync::Arc, +}; +use sync_io::SyncIo; +use tests::snapshot::*; -use SyncConfig; use private_tx::SimplePrivateTxHandler; use types::BlockNumber; +use SyncConfig; pub trait FlushingBlockChainClient: BlockChainClient { - fn flush(&self) {} + fn flush(&self) {} } impl FlushingBlockChainClient for EthcoreClient { - fn flush(&self) { - self.flush_queue(); - } + fn flush(&self) { + self.flush_queue(); + } } impl FlushingBlockChainClient for TestBlockChainClient {} -pub struct TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p { - pub chain: &'p C, - pub snapshot_service: &'p TestSnapshotService, - pub queue: &'p RwLock>, - pub sender: Option, - pub to_disconnect: HashSet, - pub packets: Vec, - pub peers_info: HashMap, - overlay: RwLock>, +pub struct TestIo<'p, C> +where + C: FlushingBlockChainClient, + C: 'p, +{ + pub chain: &'p C, + pub snapshot_service: &'p TestSnapshotService, + pub queue: &'p RwLock>, + pub sender: Option, + pub to_disconnect: HashSet, + pub packets: Vec, + pub peers_info: HashMap, + overlay: RwLock>, } -impl<'p, C> TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p { - pub fn new(chain: &'p C, ss: &'p TestSnapshotService, queue: &'p RwLock>, sender: Option) -> TestIo<'p, C> { - TestIo { - chain: chain, - snapshot_service: ss, - queue: queue, - sender: sender, - to_disconnect: HashSet::new(), - overlay: RwLock::new(HashMap::new()), - packets: Vec::new(), - peers_info: HashMap::new(), - } - } +impl<'p, C> TestIo<'p, C> +where + C: FlushingBlockChainClient, + C: 'p, +{ + pub fn new( + chain: &'p C, + ss: &'p TestSnapshotService, + queue: &'p RwLock>, + sender: Option, + ) -> TestIo<'p, C> { + TestIo { + chain: chain, + snapshot_service: ss, + queue: queue, + sender: sender, + to_disconnect: HashSet::new(), + overlay: RwLock::new(HashMap::new()), + packets: Vec::new(), + peers_info: HashMap::new(), + } + } } -impl<'p, C> Drop for TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p { - fn drop(&mut self) { - self.queue.write().extend(self.packets.drain(..)); - } +impl<'p, C> Drop for TestIo<'p, C> +where + C: FlushingBlockChainClient, + C: 'p, +{ + fn drop(&mut self) { + self.queue.write().extend(self.packets.drain(..)); + } } -impl<'p, C> SyncIo for TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p { - fn disable_peer(&mut self, peer_id: PeerId) { - self.disconnect_peer(peer_id); - } +impl<'p, C> SyncIo for TestIo<'p, C> +where + C: FlushingBlockChainClient, + C: 'p, +{ + fn disable_peer(&mut self, peer_id: PeerId) { + self.disconnect_peer(peer_id); + } - fn disconnect_peer(&mut self, peer_id: PeerId) { - self.to_disconnect.insert(peer_id); - } + fn disconnect_peer(&mut self, peer_id: PeerId) { + self.to_disconnect.insert(peer_id); + } - fn is_expired(&self) -> bool { - false - } + fn is_expired(&self) -> bool { + false + } - fn respond(&mut self, packet_id: PacketId, data: Vec) -> Result<(), network::Error> { - self.packets.push(TestPacket { - data: data, - packet_id: packet_id, - recipient: self.sender.unwrap() - }); - Ok(()) - } + fn respond(&mut self, packet_id: PacketId, data: Vec) -> Result<(), network::Error> { + self.packets.push(TestPacket { + data: data, + packet_id: packet_id, + recipient: self.sender.unwrap(), + }); + Ok(()) + } - fn send(&mut self,peer_id: PeerId, packet_id: SyncPacket, data: Vec) -> Result<(), network::Error> { - self.packets.push(TestPacket { - data: data, - packet_id: packet_id.id(), - recipient: peer_id, - }); - Ok(()) - } + fn send( + &mut self, + peer_id: PeerId, + packet_id: SyncPacket, + data: Vec, + ) -> Result<(), network::Error> { + self.packets.push(TestPacket { + data: data, + packet_id: packet_id.id(), + recipient: peer_id, + }); + Ok(()) + } - fn chain(&self) -> &BlockChainClient { - &*self.chain - } + fn chain(&self) -> &BlockChainClient { + &*self.chain + } - fn peer_version(&self, peer_id: PeerId) -> ClientVersion { - let client_id = self.peers_info.get(&peer_id) - .cloned() - .unwrap_or_else(|| peer_id.to_string()); + fn peer_version(&self, peer_id: PeerId) -> ClientVersion { + let client_id = self + .peers_info + .get(&peer_id) + .cloned() + .unwrap_or_else(|| peer_id.to_string()); - ClientVersion::from(client_id) - } + ClientVersion::from(client_id) + } - fn snapshot_service(&self) -> &SnapshotService { - self.snapshot_service - } + fn snapshot_service(&self) -> &SnapshotService { + self.snapshot_service + } - fn peer_session_info(&self, _peer_id: PeerId) -> Option { - None - } + fn peer_session_info(&self, _peer_id: PeerId) -> Option { + None + } - fn eth_protocol_version(&self, _peer: PeerId) -> u8 { - ETH_PROTOCOL_VERSION_63.0 - } + fn eth_protocol_version(&self, _peer: PeerId) -> u8 { + ETH_PROTOCOL_VERSION_63.0 + } - fn protocol_version(&self, protocol: &ProtocolId, peer_id: PeerId) -> u8 { - if protocol == &WARP_SYNC_PROTOCOL_ID { PAR_PROTOCOL_VERSION_3.0 } else { self.eth_protocol_version(peer_id) } - } + fn protocol_version(&self, protocol: &ProtocolId, peer_id: PeerId) -> u8 { + if protocol == &WARP_SYNC_PROTOCOL_ID { + PAR_PROTOCOL_VERSION_3.0 + } else { + self.eth_protocol_version(peer_id) + } + } - fn chain_overlay(&self) -> &RwLock> { - &self.overlay - } + fn chain_overlay(&self) -> &RwLock> { + &self.overlay + } - fn payload_soft_limit(&self) -> usize { - 100_000 - } + fn payload_soft_limit(&self) -> usize { + 100_000 + } } /// Mock for emulution of async run of new blocks struct NewBlockMessage { - imported: Vec, - invalid: Vec, - enacted: Vec, - retracted: Vec, - sealed: Vec, - proposed: Vec, + imported: Vec, + invalid: Vec, + enacted: Vec, + retracted: Vec, + sealed: Vec, + proposed: Vec, } /// Abstract messages between peers. pub trait Message { - /// The intended recipient of this message. - fn recipient(&self) -> PeerId; + /// The intended recipient of this message. + fn recipient(&self) -> PeerId; } /// Mock subprotocol packet pub struct TestPacket { - pub data: Bytes, - pub packet_id: PacketId, - pub recipient: PeerId, + pub data: Bytes, + pub packet_id: PacketId, + pub recipient: PeerId, } impl Message for TestPacket { - fn recipient(&self) -> PeerId { self.recipient } + fn recipient(&self) -> PeerId { + self.recipient + } } /// A peer which can be a member of the `TestNet`. pub trait Peer { - type Message: Message; + type Message: Message; - /// Called on connection to other indicated peer. - fn on_connect(&self, other: PeerId); + /// Called on connection to other indicated peer. + fn on_connect(&self, other: PeerId); - /// Called on disconnect from other indicated peer. - fn on_disconnect(&self, other: PeerId); + /// Called on disconnect from other indicated peer. + fn on_disconnect(&self, other: PeerId); - /// Receive a message from another peer. Return a set of peers to disconnect. - fn receive_message(&self, from: PeerId, msg: Self::Message) -> HashSet; + /// Receive a message from another peer. Return a set of peers to disconnect. + fn receive_message(&self, from: PeerId, msg: Self::Message) -> HashSet; - /// Produce the next pending message to send to another peer. - fn pending_message(&self) -> Option; + /// Produce the next pending message to send to another peer. + fn pending_message(&self) -> Option; - /// Whether this peer is done syncing (has no messages to send). - fn is_done(&self) -> bool; + /// Whether this peer is done syncing (has no messages to send). + fn is_done(&self) -> bool; - /// Execute a "sync step". This is called for each peer after it sends a packet. - fn sync_step(&self); + /// Execute a "sync step". This is called for each peer after it sends a packet. + fn sync_step(&self); - /// Restart sync for a peer. - fn restart_sync(&self); + /// Restart sync for a peer. + fn restart_sync(&self); - /// Process the queue of pending io messages - fn process_all_io_messages(&self); + /// Process the queue of pending io messages + fn process_all_io_messages(&self); - /// Process the queue of new block messages - fn process_all_new_block_messages(&self); + /// Process the queue of new block messages + fn process_all_new_block_messages(&self); } -pub struct EthPeer where C: FlushingBlockChainClient { - pub chain: Arc, - pub miner: Arc, - pub snapshot_service: Arc, - pub sync: RwLock, - pub queue: RwLock>, - pub private_tx_handler: Arc, - pub io_queue: RwLock>, - new_blocks_queue: RwLock>, +pub struct EthPeer +where + C: FlushingBlockChainClient, +{ + pub chain: Arc, + pub miner: Arc, + pub snapshot_service: Arc, + pub sync: RwLock, + pub queue: RwLock>, + pub private_tx_handler: Arc, + pub io_queue: RwLock>, + new_blocks_queue: RwLock>, } -impl EthPeer where C: FlushingBlockChainClient { - fn is_io_queue_empty(&self) -> bool { - self.io_queue.read().is_empty() - } +impl EthPeer +where + C: FlushingBlockChainClient, +{ + fn is_io_queue_empty(&self) -> bool { + self.io_queue.read().is_empty() + } - fn is_new_blocks_queue_empty(&self) -> bool { - self.new_blocks_queue.read().is_empty() - } + fn is_new_blocks_queue_empty(&self) -> bool { + self.new_blocks_queue.read().is_empty() + } - fn process_io_message(&self, message: ChainMessageType) { - let mut io = TestIo::new(&*self.chain, &self.snapshot_service, &self.queue, None); - match message { - ChainMessageType::Consensus(data) => self.sync.write().propagate_consensus_packet(&mut io, data), - ChainMessageType::PrivateTransaction(transaction_hash, data) => - self.sync.write().propagate_private_transaction(&mut io, transaction_hash, PrivateTransactionPacket, data), - ChainMessageType::SignedPrivateTransaction(transaction_hash, data) => - self.sync.write().propagate_private_transaction(&mut io, transaction_hash, SignedPrivateTransactionPacket, data), - } - } + fn process_io_message(&self, message: ChainMessageType) { + let mut io = TestIo::new(&*self.chain, &self.snapshot_service, &self.queue, None); + match message { + ChainMessageType::Consensus(data) => { + self.sync.write().propagate_consensus_packet(&mut io, data) + } + ChainMessageType::PrivateTransaction(transaction_hash, data) => { + self.sync.write().propagate_private_transaction( + &mut io, + transaction_hash, + PrivateTransactionPacket, + data, + ) + } + ChainMessageType::SignedPrivateTransaction(transaction_hash, data) => { + self.sync.write().propagate_private_transaction( + &mut io, + transaction_hash, + SignedPrivateTransactionPacket, + data, + ) + } + } + } - fn process_new_block_message(&self, message: NewBlockMessage) { - let mut io = TestIo::new(&*self.chain, &self.snapshot_service, &self.queue, None); - self.sync.write().chain_new_blocks( - &mut io, - &message.imported, - &message.invalid, - &message.enacted, - &message.retracted, - &message.sealed, - &message.proposed - ); - } + fn process_new_block_message(&self, message: NewBlockMessage) { + let mut io = TestIo::new(&*self.chain, &self.snapshot_service, &self.queue, None); + self.sync.write().chain_new_blocks( + &mut io, + &message.imported, + &message.invalid, + &message.enacted, + &message.retracted, + &message.sealed, + &message.proposed, + ); + } } impl Peer for EthPeer { - type Message = TestPacket; + type Message = TestPacket; - fn on_connect(&self, other: PeerId) { - self.sync.write().update_targets(&*self.chain); - self.sync.write().on_peer_connected(&mut TestIo::new( - &*self.chain, - &self.snapshot_service, - &self.queue, - Some(other)), - other); - } + fn on_connect(&self, other: PeerId) { + self.sync.write().update_targets(&*self.chain); + self.sync.write().on_peer_connected( + &mut TestIo::new( + &*self.chain, + &self.snapshot_service, + &self.queue, + Some(other), + ), + other, + ); + } - fn on_disconnect(&self, other: PeerId) { - let mut io = TestIo::new(&*self.chain, &self.snapshot_service, &self.queue, Some(other)); - self.sync.write().on_peer_aborting(&mut io, other); - } + fn on_disconnect(&self, other: PeerId) { + let mut io = TestIo::new( + &*self.chain, + &self.snapshot_service, + &self.queue, + Some(other), + ); + self.sync.write().on_peer_aborting(&mut io, other); + } - fn receive_message(&self, from: PeerId, msg: TestPacket) -> HashSet { - let mut io = TestIo::new(&*self.chain, &self.snapshot_service, &self.queue, Some(from)); - SyncSupplier::dispatch_packet(&self.sync, &mut io, from, msg.packet_id, &msg.data); - self.chain.flush(); - io.to_disconnect.clone() - } + fn receive_message(&self, from: PeerId, msg: TestPacket) -> HashSet { + let mut io = TestIo::new( + &*self.chain, + &self.snapshot_service, + &self.queue, + Some(from), + ); + SyncSupplier::dispatch_packet(&self.sync, &mut io, from, msg.packet_id, &msg.data); + self.chain.flush(); + io.to_disconnect.clone() + } - fn pending_message(&self) -> Option { - self.chain.flush(); - self.queue.write().pop_front() - } + fn pending_message(&self) -> Option { + self.chain.flush(); + self.queue.write().pop_front() + } - fn is_done(&self) -> bool { - self.queue.read().is_empty() && self.is_io_queue_empty() && self.is_new_blocks_queue_empty() - } + fn is_done(&self) -> bool { + self.queue.read().is_empty() && self.is_io_queue_empty() && self.is_new_blocks_queue_empty() + } - fn sync_step(&self) { - let mut io = TestIo::new(&*self.chain, &self.snapshot_service, &self.queue, None); - self.chain.flush(); - self.sync.write().maintain_peers(&mut io); - self.sync.write().maintain_sync(&mut io); - self.sync.write().continue_sync(&mut io); - self.sync.write().propagate_new_transactions(&mut io); - } + fn sync_step(&self) { + let mut io = TestIo::new(&*self.chain, &self.snapshot_service, &self.queue, None); + self.chain.flush(); + self.sync.write().maintain_peers(&mut io); + self.sync.write().maintain_sync(&mut io); + self.sync.write().continue_sync(&mut io); + self.sync.write().propagate_new_transactions(&mut io); + } - fn restart_sync(&self) { - self.sync.write().restart(&mut TestIo::new(&*self.chain, &self.snapshot_service, &self.queue, None)); - } + fn restart_sync(&self) { + self.sync.write().restart(&mut TestIo::new( + &*self.chain, + &self.snapshot_service, + &self.queue, + None, + )); + } - fn process_all_io_messages(&self) { - if !self.is_io_queue_empty() { - while let Some(message) = self.io_queue.write().pop_front() { - self.process_io_message(message); - } - } - } + fn process_all_io_messages(&self) { + if !self.is_io_queue_empty() { + while let Some(message) = self.io_queue.write().pop_front() { + self.process_io_message(message); + } + } + } - fn process_all_new_block_messages(&self) { - if !self.is_new_blocks_queue_empty() { - while let Some(message) = self.new_blocks_queue.write().pop_front() { - self.process_new_block_message(message); - } - } - } + fn process_all_new_block_messages(&self) { + if !self.is_new_blocks_queue_empty() { + while let Some(message) = self.new_blocks_queue.write().pop_front() { + self.process_new_block_message(message); + } + } + } } pub struct TestNet

{ - pub peers: Vec>, - pub started: bool, - pub disconnect_events: Vec<(PeerId, PeerId)>, //disconnected (initiated by, to) + pub peers: Vec>, + pub started: bool, + pub disconnect_events: Vec<(PeerId, PeerId)>, //disconnected (initiated by, to) } impl TestNet> { - pub fn new(n: usize) -> Self { - Self::new_with_config(n, SyncConfig::default()) - } + pub fn new(n: usize) -> Self { + Self::new_with_config(n, SyncConfig::default()) + } - pub fn new_with_fork(n: usize, fork: Option<(BlockNumber, H256)>) -> Self { - let mut config = SyncConfig::default(); - config.fork_block = fork; - Self::new_with_config(n, config) - } + pub fn new_with_fork(n: usize, fork: Option<(BlockNumber, H256)>) -> Self { + let mut config = SyncConfig::default(); + config.fork_block = fork; + Self::new_with_config(n, config) + } - pub fn new_with_config(n: usize, config: SyncConfig) -> Self { - let mut net = TestNet { - peers: Vec::new(), - started: false, - disconnect_events: Vec::new(), - }; - for _ in 0..n { - let chain = TestBlockChainClient::new(); - let ss = Arc::new(TestSnapshotService::new()); - let private_tx_handler = Arc::new(SimplePrivateTxHandler::default()); - let sync = ChainSync::new(config.clone(), &chain, Some(private_tx_handler.clone())); - net.peers.push(Arc::new(EthPeer { - sync: RwLock::new(sync), - snapshot_service: ss, - chain: Arc::new(chain), - miner: Arc::new(Miner::new_for_tests(&Spec::new_test(), None)), - queue: RwLock::new(VecDeque::new()), - private_tx_handler, - io_queue: RwLock::new(VecDeque::new()), - new_blocks_queue: RwLock::new(VecDeque::new()), - })); - } - net - } + pub fn new_with_config(n: usize, config: SyncConfig) -> Self { + let mut net = TestNet { + peers: Vec::new(), + started: false, + disconnect_events: Vec::new(), + }; + for _ in 0..n { + let chain = TestBlockChainClient::new(); + let ss = Arc::new(TestSnapshotService::new()); + let private_tx_handler = Arc::new(SimplePrivateTxHandler::default()); + let sync = ChainSync::new(config.clone(), &chain, Some(private_tx_handler.clone())); + net.peers.push(Arc::new(EthPeer { + sync: RwLock::new(sync), + snapshot_service: ss, + chain: Arc::new(chain), + miner: Arc::new(Miner::new_for_tests(&Spec::new_test(), None)), + queue: RwLock::new(VecDeque::new()), + private_tx_handler, + io_queue: RwLock::new(VecDeque::new()), + new_blocks_queue: RwLock::new(VecDeque::new()), + })); + } + net + } - // relies on Arc uniqueness, which is only true when we haven't registered a ChainNotify. - pub fn peer_mut(&mut self, i: usize) -> &mut EthPeer { - Arc::get_mut(&mut self.peers[i]).expect("Arc never exposed externally") - } + // relies on Arc uniqueness, which is only true when we haven't registered a ChainNotify. + pub fn peer_mut(&mut self, i: usize) -> &mut EthPeer { + Arc::get_mut(&mut self.peers[i]).expect("Arc never exposed externally") + } } impl TestNet> { - pub fn with_spec( - n: usize, - config: SyncConfig, - spec_factory: F, - ) -> Self - where F: Fn() -> Spec - { - let mut net = TestNet { - peers: Vec::new(), - started: false, - disconnect_events: Vec::new(), - }; - for _ in 0..n { - net.add_peer_with_private_config(config.clone(), spec_factory()); - } - net - } + pub fn with_spec(n: usize, config: SyncConfig, spec_factory: F) -> Self + where + F: Fn() -> Spec, + { + let mut net = TestNet { + peers: Vec::new(), + started: false, + disconnect_events: Vec::new(), + }; + for _ in 0..n { + net.add_peer_with_private_config(config.clone(), spec_factory()); + } + net + } - pub fn add_peer_with_private_config(&mut self, config: SyncConfig, spec: Spec) { - let channel = IoChannel::disconnected(); - let miner = Arc::new(Miner::new_for_tests(&spec, None)); - let client = EthcoreClient::new( - ClientConfig::default(), - &spec, - test_helpers::new_db(), - miner.clone(), - channel.clone() - ).unwrap(); + pub fn add_peer_with_private_config(&mut self, config: SyncConfig, spec: Spec) { + let channel = IoChannel::disconnected(); + let miner = Arc::new(Miner::new_for_tests(&spec, None)); + let client = EthcoreClient::new( + ClientConfig::default(), + &spec, + test_helpers::new_db(), + miner.clone(), + channel.clone(), + ) + .unwrap(); - let private_tx_handler = Arc::new(SimplePrivateTxHandler::default()); - let ss = Arc::new(TestSnapshotService::new()); - let sync = ChainSync::new(config, &*client, Some(private_tx_handler.clone())); - let peer = Arc::new(EthPeer { - sync: RwLock::new(sync), - snapshot_service: ss, - chain: client, - miner, - queue: RwLock::new(VecDeque::new()), - private_tx_handler, - io_queue: RwLock::new(VecDeque::new()), - new_blocks_queue: RwLock::new(VecDeque::new()), - }); - peer.chain.add_notify(peer.clone()); - //private_provider.add_notify(peer.clone()); - self.peers.push(peer); - } + let private_tx_handler = Arc::new(SimplePrivateTxHandler::default()); + let ss = Arc::new(TestSnapshotService::new()); + let sync = ChainSync::new(config, &*client, Some(private_tx_handler.clone())); + let peer = Arc::new(EthPeer { + sync: RwLock::new(sync), + snapshot_service: ss, + chain: client, + miner, + queue: RwLock::new(VecDeque::new()), + private_tx_handler, + io_queue: RwLock::new(VecDeque::new()), + new_blocks_queue: RwLock::new(VecDeque::new()), + }); + peer.chain.add_notify(peer.clone()); + //private_provider.add_notify(peer.clone()); + self.peers.push(peer); + } } -impl

TestNet

where P: Peer { - pub fn peer(&self, i: usize) -> &P { - &self.peers[i] - } +impl

TestNet

+where + P: Peer, +{ + pub fn peer(&self, i: usize) -> &P { + &self.peers[i] + } - pub fn start(&mut self) { - if self.started { - return; - } - for peer in 0..self.peers.len() { - for client in 0..self.peers.len() { - if peer != client { - self.peers[peer].on_connect(client as PeerId); - } - } - } - self.started = true; - } + pub fn start(&mut self) { + if self.started { + return; + } + for peer in 0..self.peers.len() { + for client in 0..self.peers.len() { + if peer != client { + self.peers[peer].on_connect(client as PeerId); + } + } + } + self.started = true; + } - pub fn sync_step(&mut self) { - for peer in 0..self.peers.len() { - let packet = self.peers[peer].pending_message(); - if let Some(packet) = packet { - let disconnecting = { - let recipient = packet.recipient(); - trace!("--- {} -> {} ---", peer, recipient); - let to_disconnect = self.peers[recipient].receive_message(peer as PeerId, packet); - for d in &to_disconnect { - // notify this that disconnecting peers are disconnecting - self.peers[recipient].on_disconnect(*d as PeerId); - self.disconnect_events.push((peer, *d)); - } - to_disconnect - }; - for d in &disconnecting { - // notify other peers that this peer is disconnecting - self.peers[*d].on_disconnect(peer as PeerId); - } - } + pub fn sync_step(&mut self) { + for peer in 0..self.peers.len() { + let packet = self.peers[peer].pending_message(); + if let Some(packet) = packet { + let disconnecting = { + let recipient = packet.recipient(); + trace!("--- {} -> {} ---", peer, recipient); + let to_disconnect = + self.peers[recipient].receive_message(peer as PeerId, packet); + for d in &to_disconnect { + // notify this that disconnecting peers are disconnecting + self.peers[recipient].on_disconnect(*d as PeerId); + self.disconnect_events.push((peer, *d)); + } + to_disconnect + }; + for d in &disconnecting { + // notify other peers that this peer is disconnecting + self.peers[*d].on_disconnect(peer as PeerId); + } + } - self.sync_step_peer(peer); - } - } + self.sync_step_peer(peer); + } + } - pub fn sync_step_peer(&mut self, peer_num: usize) { - self.peers[peer_num].sync_step(); - } + pub fn sync_step_peer(&mut self, peer_num: usize) { + self.peers[peer_num].sync_step(); + } - pub fn restart_peer(&mut self, i: usize) { - self.peers[i].restart_sync(); - } + pub fn restart_peer(&mut self, i: usize) { + self.peers[i].restart_sync(); + } - pub fn sync(&mut self) -> u32 { - self.start(); - let mut total_steps = 0; - while !self.done() { - self.sync_step(); - self.deliver_io_messages(); - self.deliver_new_block_messages(); - total_steps += 1; - } - total_steps - } + pub fn sync(&mut self) -> u32 { + self.start(); + let mut total_steps = 0; + while !self.done() { + self.sync_step(); + self.deliver_io_messages(); + self.deliver_new_block_messages(); + total_steps += 1; + } + total_steps + } - pub fn sync_steps(&mut self, count: usize) { - self.start(); - for _ in 0..count { - self.sync_step(); - } - } + pub fn sync_steps(&mut self, count: usize) { + self.start(); + for _ in 0..count { + self.sync_step(); + } + } - pub fn deliver_io_messages(&mut self) { - for peer in self.peers.iter() { - peer.process_all_io_messages(); - } - } + pub fn deliver_io_messages(&mut self) { + for peer in self.peers.iter() { + peer.process_all_io_messages(); + } + } - pub fn deliver_new_block_messages(&mut self) { - for peer in self.peers.iter() { - peer.process_all_new_block_messages(); - } - } + pub fn deliver_new_block_messages(&mut self) { + for peer in self.peers.iter() { + peer.process_all_new_block_messages(); + } + } - pub fn done(&self) -> bool { - self.peers.iter().all(|p| p.is_done()) - } + pub fn done(&self) -> bool { + self.peers.iter().all(|p| p.is_done()) + } } impl TestNet> { - pub fn trigger_chain_new_blocks(&mut self, peer_id: usize) { - let peer = &mut self.peers[peer_id]; - peer.sync.write().chain_new_blocks(&mut TestIo::new(&*peer.chain, &peer.snapshot_service, &peer.queue, None), &[], &[], &[], &[], &[], &[]); - } + pub fn trigger_chain_new_blocks(&mut self, peer_id: usize) { + let peer = &mut self.peers[peer_id]; + peer.sync.write().chain_new_blocks( + &mut TestIo::new(&*peer.chain, &peer.snapshot_service, &peer.queue, None), + &[], + &[], + &[], + &[], + &[], + &[], + ); + } } pub struct TestIoHandler { - pub client: Arc, - pub private_tx_queued: Mutex, + pub client: Arc, + pub private_tx_queued: Mutex, } impl TestIoHandler { - pub fn new(client: Arc) -> Self { - TestIoHandler { - client, - private_tx_queued: Mutex::default(), - } - } + pub fn new(client: Arc) -> Self { + TestIoHandler { + client, + private_tx_queued: Mutex::default(), + } + } } impl IoHandler for TestIoHandler { - fn message(&self, _io: &IoContext, net_message: &ClientIoMessage) { - match *net_message { - ClientIoMessage::Execute(ref exec) => { - *self.private_tx_queued.lock() += 1; - (*exec.0)(&self.client); - }, - _ => {} // ignore other messages - } - } + fn message(&self, _io: &IoContext, net_message: &ClientIoMessage) { + match *net_message { + ClientIoMessage::Execute(ref exec) => { + *self.private_tx_queued.lock() += 1; + (*exec.0)(&self.client); + } + _ => {} // ignore other messages + } + } } impl ChainNotify for EthPeer { - fn new_blocks(&self, new_blocks: NewBlocks) - { - if new_blocks.has_more_blocks_to_import { return } - let (enacted, retracted) = new_blocks.route.into_enacted_retracted(); + fn new_blocks(&self, new_blocks: NewBlocks) { + if new_blocks.has_more_blocks_to_import { + return; + } + let (enacted, retracted) = new_blocks.route.into_enacted_retracted(); - self.new_blocks_queue.write().push_back(NewBlockMessage { - imported: new_blocks.imported, - invalid: new_blocks.invalid, - enacted, - retracted, - sealed: new_blocks.sealed, - proposed: new_blocks.proposed, - }); - } + self.new_blocks_queue.write().push_back(NewBlockMessage { + imported: new_blocks.imported, + invalid: new_blocks.invalid, + enacted, + retracted, + sealed: new_blocks.sealed, + proposed: new_blocks.proposed, + }); + } - fn start(&self) {} + fn start(&self) {} - fn stop(&self) {} + fn stop(&self) {} - fn broadcast(&self, message_type: ChainMessageType) { - self.io_queue.write().push_back(message_type) - } + fn broadcast(&self, message_type: ChainMessageType) { + self.io_queue.write().push_back(message_type) + } } diff --git a/ethcore/sync/src/tests/mod.rs b/ethcore/sync/src/tests/mod.rs index 34e04d196..16370ce3e 100644 --- a/ethcore/sync/src/tests/mod.rs +++ b/ethcore/sync/src/tests/mod.rs @@ -14,11 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -pub mod helpers; -pub mod snapshot; mod chain; mod consensus; +pub mod helpers; mod private; +pub mod snapshot; #[cfg(feature = "ipc")] mod rpc; diff --git a/ethcore/sync/src/tests/private.rs b/ethcore/sync/src/tests/private.rs index 24de14d93..790c78dbf 100644 --- a/ethcore/sync/src/tests/private.rs +++ b/ethcore/sync/src/tests/private.rs @@ -14,141 +14,187 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use hash::keccak; -use io::{IoHandler, IoChannel}; -use types::transaction::{Transaction, Action}; -use types::ids::BlockId; -use ethcore::CreateContractAddress; -use ethcore::client::{ClientIoMessage, BlockChainClient}; -use ethcore::executive::{contract_address}; -use ethcore::engines; -use ethcore::miner::{self, MinerService}; -use ethcore::spec::Spec; -use ethcore::test_helpers::{push_block_with_transactions}; -use ethcore_private_tx::{Provider, ProviderConfig, NoopEncryptor, Importer, SignedPrivateTransaction, StoringKeyProvider}; +use ethcore::{ + client::{BlockChainClient, ClientIoMessage}, + engines, + executive::contract_address, + miner::{self, MinerService}, + spec::Spec, + test_helpers::push_block_with_transactions, + CreateContractAddress, +}; +use ethcore_private_tx::{ + Importer, NoopEncryptor, Provider, ProviderConfig, SignedPrivateTransaction, StoringKeyProvider, +}; use ethkey::KeyPair; -use tests::helpers::{TestNet, TestIoHandler}; -use rustc_hex::FromHex; +use hash::keccak; +use io::{IoChannel, IoHandler}; use rlp::Rlp; +use rustc_hex::FromHex; +use std::sync::Arc; +use tests::helpers::{TestIoHandler, TestNet}; +use types::{ + ids::BlockId, + transaction::{Action, Transaction}, +}; use SyncConfig; fn seal_spec() -> Spec { - let spec_data = include_str!("../res/private_spec.json"); - Spec::load(&::std::env::temp_dir(), spec_data.as_bytes()).unwrap() + let spec_data = include_str!("../res/private_spec.json"); + Spec::load(&::std::env::temp_dir(), spec_data.as_bytes()).unwrap() } #[test] fn send_private_transaction() { - // Setup two clients - let s0 = KeyPair::from_secret_slice(&keccak("1")).unwrap(); - let s1 = KeyPair::from_secret_slice(&keccak("0")).unwrap(); + // Setup two clients + let s0 = KeyPair::from_secret_slice(&keccak("1")).unwrap(); + let s1 = KeyPair::from_secret_slice(&keccak("0")).unwrap(); - let signer = Arc::new(ethcore_private_tx::KeyPairSigner(vec![s0.clone(), s1.clone()])); + let signer = Arc::new(ethcore_private_tx::KeyPairSigner(vec![ + s0.clone(), + s1.clone(), + ])); - let mut net = TestNet::with_spec(2, SyncConfig::default(), seal_spec); - let client0 = net.peer(0).chain.clone(); - let client1 = net.peer(1).chain.clone(); - let io_handler0: Arc> = Arc::new(TestIoHandler::new(net.peer(0).chain.clone())); - let io_handler1: Arc> = Arc::new(TestIoHandler::new(net.peer(1).chain.clone())); + let mut net = TestNet::with_spec(2, SyncConfig::default(), seal_spec); + let client0 = net.peer(0).chain.clone(); + let client1 = net.peer(1).chain.clone(); + let io_handler0: Arc> = + Arc::new(TestIoHandler::new(net.peer(0).chain.clone())); + let io_handler1: Arc> = + Arc::new(TestIoHandler::new(net.peer(1).chain.clone())); - net.peer(0).miner.set_author(miner::Author::Sealer(engines::signer::from_keypair(s0.clone()))); - net.peer(1).miner.set_author(miner::Author::Sealer(engines::signer::from_keypair(s1.clone()))); - net.peer(0).chain.engine().register_client(Arc::downgrade(&net.peer(0).chain) as _); - net.peer(1).chain.engine().register_client(Arc::downgrade(&net.peer(1).chain) as _); - net.peer(0).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0))); - net.peer(1).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1))); + net.peer(0) + .miner + .set_author(miner::Author::Sealer(engines::signer::from_keypair( + s0.clone(), + ))); + net.peer(1) + .miner + .set_author(miner::Author::Sealer(engines::signer::from_keypair( + s1.clone(), + ))); + net.peer(0) + .chain + .engine() + .register_client(Arc::downgrade(&net.peer(0).chain) as _); + net.peer(1) + .chain + .engine() + .register_client(Arc::downgrade(&net.peer(1).chain) as _); + net.peer(0) + .chain + .set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0))); + net.peer(1) + .chain + .set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1))); - let (address, _) = contract_address(CreateContractAddress::FromSenderAndNonce, &s0.address(), &0.into(), &[]); - let chain_id = client0.signing_chain_id(); + let (address, _) = contract_address( + CreateContractAddress::FromSenderAndNonce, + &s0.address(), + &0.into(), + &[], + ); + let chain_id = client0.signing_chain_id(); - // Exhange statuses - net.sync(); + // Exhange statuses + net.sync(); - // Setup private providers - let validator_config = ProviderConfig{ - validator_accounts: vec![s1.address()], - signer_account: None, - }; + // Setup private providers + let validator_config = ProviderConfig { + validator_accounts: vec![s1.address()], + signer_account: None, + }; - let signer_config = ProviderConfig{ - validator_accounts: Vec::new(), - signer_account: Some(s0.address()), - }; + let signer_config = ProviderConfig { + validator_accounts: Vec::new(), + signer_account: Some(s0.address()), + }; - let private_keys = Arc::new(StoringKeyProvider::default()); + let private_keys = Arc::new(StoringKeyProvider::default()); - let pm0 = Arc::new(Provider::new( - client0.clone(), - net.peer(0).miner.clone(), - signer.clone(), - Box::new(NoopEncryptor::default()), - signer_config, - IoChannel::to_handler(Arc::downgrade(&io_handler0)), - private_keys.clone(), - )); - pm0.add_notify(net.peers[0].clone()); + let pm0 = Arc::new(Provider::new( + client0.clone(), + net.peer(0).miner.clone(), + signer.clone(), + Box::new(NoopEncryptor::default()), + signer_config, + IoChannel::to_handler(Arc::downgrade(&io_handler0)), + private_keys.clone(), + )); + pm0.add_notify(net.peers[0].clone()); - let pm1 = Arc::new(Provider::new( - client1.clone(), - net.peer(1).miner.clone(), - signer.clone(), - Box::new(NoopEncryptor::default()), - validator_config, - IoChannel::to_handler(Arc::downgrade(&io_handler1)), - private_keys.clone(), - )); - pm1.add_notify(net.peers[1].clone()); + let pm1 = Arc::new(Provider::new( + client1.clone(), + net.peer(1).miner.clone(), + signer.clone(), + Box::new(NoopEncryptor::default()), + validator_config, + IoChannel::to_handler(Arc::downgrade(&io_handler1)), + private_keys.clone(), + )); + pm1.add_notify(net.peers[1].clone()); - // Create and deploy contract - let private_contract_test = "6060604052341561000f57600080fd5b60d88061001d6000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680630c55699c146046578063bc64b76d14607457600080fd5b3415605057600080fd5b60566098565b60405180826000191660001916815260200191505060405180910390f35b3415607e57600080fd5b6096600480803560001916906020019091905050609e565b005b60005481565b8060008160001916905550505600a165627a7a723058206acbdf4b15ca4c2d43e1b1879b830451a34f1e9d02ff1f2f394d8d857e79d2080029".from_hex().unwrap(); - let mut private_create_tx = Transaction::default(); - private_create_tx.action = Action::Create; - private_create_tx.data = private_contract_test; - private_create_tx.gas = 200000.into(); - let private_create_tx_signed = private_create_tx.sign(&s0.secret(), None); - let validators = vec![s1.address()]; - let (public_tx, _) = pm0.public_creation_transaction(BlockId::Latest, &private_create_tx_signed, &validators, 0.into()).unwrap(); - let public_tx = public_tx.sign(&s0.secret(), chain_id); + // Create and deploy contract + let private_contract_test = "6060604052341561000f57600080fd5b60d88061001d6000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680630c55699c146046578063bc64b76d14607457600080fd5b3415605057600080fd5b60566098565b60405180826000191660001916815260200191505060405180910390f35b3415607e57600080fd5b6096600480803560001916906020019091905050609e565b005b60005481565b8060008160001916905550505600a165627a7a723058206acbdf4b15ca4c2d43e1b1879b830451a34f1e9d02ff1f2f394d8d857e79d2080029".from_hex().unwrap(); + let mut private_create_tx = Transaction::default(); + private_create_tx.action = Action::Create; + private_create_tx.data = private_contract_test; + private_create_tx.gas = 200000.into(); + let private_create_tx_signed = private_create_tx.sign(&s0.secret(), None); + let validators = vec![s1.address()]; + let (public_tx, _) = pm0 + .public_creation_transaction( + BlockId::Latest, + &private_create_tx_signed, + &validators, + 0.into(), + ) + .unwrap(); + let public_tx = public_tx.sign(&s0.secret(), chain_id); - let public_tx_copy = public_tx.clone(); - push_block_with_transactions(&client0, &[public_tx]); - push_block_with_transactions(&client1, &[public_tx_copy]); + let public_tx_copy = public_tx.clone(); + push_block_with_transactions(&client0, &[public_tx]); + push_block_with_transactions(&client1, &[public_tx_copy]); - net.sync(); + net.sync(); - //Create private transaction for modifying state - let mut private_tx = Transaction::default(); - private_tx.action = Action::Call(address.clone()); - private_tx.data = "bc64b76d2a00000000000000000000000000000000000000000000000000000000000000".from_hex().unwrap(); //setX(42) - private_tx.gas = 120000.into(); - private_tx.nonce = 1.into(); - let private_tx = private_tx.sign(&s0.secret(), None); - assert!(pm0.create_private_transaction(private_tx).is_ok()); + //Create private transaction for modifying state + let mut private_tx = Transaction::default(); + private_tx.action = Action::Call(address.clone()); + private_tx.data = "bc64b76d2a00000000000000000000000000000000000000000000000000000000000000" + .from_hex() + .unwrap(); //setX(42) + private_tx.gas = 120000.into(); + private_tx.nonce = 1.into(); + let private_tx = private_tx.sign(&s0.secret(), None); + assert!(pm0.create_private_transaction(private_tx).is_ok()); - //send private transaction message to validator - net.sync(); + //send private transaction message to validator + net.sync(); - let validator_handler = net.peer(1).private_tx_handler.clone(); - let received_private_transactions = validator_handler.txs.lock().clone(); - assert_eq!(received_private_transactions.len(), 1); + let validator_handler = net.peer(1).private_tx_handler.clone(); + let received_private_transactions = validator_handler.txs.lock().clone(); + assert_eq!(received_private_transactions.len(), 1); - //process received private transaction message - let private_transaction = received_private_transactions[0].clone(); - assert!(pm1.import_private_transaction(&private_transaction).is_ok()); + //process received private transaction message + let private_transaction = received_private_transactions[0].clone(); + assert!(pm1.import_private_transaction(&private_transaction).is_ok()); - //send signed response - net.sync(); + //send signed response + net.sync(); - let sender_handler = net.peer(0).private_tx_handler.clone(); - let received_signed_private_transactions = sender_handler.signed_txs.lock().clone(); - assert_eq!(received_signed_private_transactions.len(), 1); + let sender_handler = net.peer(0).private_tx_handler.clone(); + let received_signed_private_transactions = sender_handler.signed_txs.lock().clone(); + assert_eq!(received_signed_private_transactions.len(), 1); - //process signed response - let signed_private_transaction = received_signed_private_transactions[0].clone(); - assert!(pm0.import_signed_private_transaction(&signed_private_transaction).is_ok()); - let signature: SignedPrivateTransaction = Rlp::new(&signed_private_transaction).as_val().unwrap(); - assert!(pm0.process_signature(&signature).is_ok()); - let local_transactions = net.peer(0).miner.local_transactions(); - assert_eq!(local_transactions.len(), 1); + //process signed response + let signed_private_transaction = received_signed_private_transactions[0].clone(); + assert!(pm0 + .import_signed_private_transaction(&signed_private_transaction) + .is_ok()); + let signature: SignedPrivateTransaction = + Rlp::new(&signed_private_transaction).as_val().unwrap(); + assert!(pm0.process_signature(&signature).is_ok()); + let local_transactions = net.peer(0).miner.local_transactions(); + assert_eq!(local_transactions.len(), 1); } diff --git a/ethcore/sync/src/tests/rpc.rs b/ethcore/sync/src/tests/rpc.rs index 3e0523931..66b69f075 100644 --- a/ethcore/sync/src/tests/rpc.rs +++ b/ethcore/sync/src/tests/rpc.rs @@ -15,15 +15,15 @@ // along with Parity Ethereum. If not, see . use super::super::NetworkConfiguration; +use ipc::binary::{deserialize, serialize}; use network::NetworkConfiguration as BasicNetworkConfiguration; use std::convert::From; -use ipc::binary::{serialize, deserialize}; #[test] fn network_settings_serialize() { - let net_cfg = NetworkConfiguration::from(BasicNetworkConfiguration::new_local()); - let serialized = serialize(&net_cfg).unwrap(); - let deserialized = deserialize::(&serialized).unwrap(); + let net_cfg = NetworkConfiguration::from(BasicNetworkConfiguration::new_local()); + let serialized = serialize(&net_cfg).unwrap(); + let deserialized = deserialize::(&serialized).unwrap(); - assert_eq!(net_cfg.udp_port, deserialized.udp_port); + assert_eq!(net_cfg.udp_port, deserialized.udp_port); } diff --git a/ethcore/sync/src/tests/snapshot.rs b/ethcore/sync/src/tests/snapshot.rs index d865adc2a..1d4dac49e 100644 --- a/ethcore/sync/src/tests/snapshot.rs +++ b/ethcore/sync/src/tests/snapshot.rs @@ -14,145 +14,203 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::HashMap; -use std::sync::Arc; -use hash::keccak; -use ethereum_types::H256; -use parking_lot::Mutex; -use bytes::Bytes; -use ethcore::snapshot::{SnapshotService, ManifestData, RestorationStatus}; -use ethcore::client::EachBlockWith; -use types::BlockNumber; use super::helpers::*; -use {SyncConfig, WarpSync}; +use bytes::Bytes; +use ethcore::{ + client::EachBlockWith, + snapshot::{ManifestData, RestorationStatus, SnapshotService}, +}; +use ethereum_types::H256; +use hash::keccak; +use parking_lot::Mutex; +use std::{collections::HashMap, sync::Arc}; +use types::BlockNumber; +use SyncConfig; +use WarpSync; pub struct TestSnapshotService { - manifest: Option, - chunks: HashMap, + manifest: Option, + chunks: HashMap, - restoration_manifest: Mutex>, - state_restoration_chunks: Mutex>, - block_restoration_chunks: Mutex>, + restoration_manifest: Mutex>, + state_restoration_chunks: Mutex>, + block_restoration_chunks: Mutex>, } impl TestSnapshotService { - pub fn new() -> TestSnapshotService { - TestSnapshotService { - manifest: None, - chunks: HashMap::new(), - restoration_manifest: Mutex::new(None), - state_restoration_chunks: Mutex::new(HashMap::new()), - block_restoration_chunks: Mutex::new(HashMap::new()), - } - } + pub fn new() -> TestSnapshotService { + TestSnapshotService { + manifest: None, + chunks: HashMap::new(), + restoration_manifest: Mutex::new(None), + state_restoration_chunks: Mutex::new(HashMap::new()), + block_restoration_chunks: Mutex::new(HashMap::new()), + } + } - pub fn new_with_snapshot(num_chunks: usize, block_hash: H256, block_number: BlockNumber) -> TestSnapshotService { - let num_state_chunks = num_chunks / 2; - let num_block_chunks = num_chunks - num_state_chunks; - let state_chunks: Vec = (0..num_state_chunks).map(|_| H256::random().to_vec()).collect(); - let block_chunks: Vec = (0..num_block_chunks).map(|_| H256::random().to_vec()).collect(); - let manifest = ManifestData { - version: 2, - state_hashes: state_chunks.iter().map(|data| keccak(data)).collect(), - block_hashes: block_chunks.iter().map(|data| keccak(data)).collect(), - state_root: H256::new(), - block_number: block_number, - block_hash: block_hash, - }; - let mut chunks: HashMap = state_chunks.into_iter().map(|data| (keccak(&data), data)).collect(); - chunks.extend(block_chunks.into_iter().map(|data| (keccak(&data), data))); - TestSnapshotService { - manifest: Some(manifest), - chunks: chunks, - restoration_manifest: Mutex::new(None), - state_restoration_chunks: Mutex::new(HashMap::new()), - block_restoration_chunks: Mutex::new(HashMap::new()), - } - } + pub fn new_with_snapshot( + num_chunks: usize, + block_hash: H256, + block_number: BlockNumber, + ) -> TestSnapshotService { + let num_state_chunks = num_chunks / 2; + let num_block_chunks = num_chunks - num_state_chunks; + let state_chunks: Vec = (0..num_state_chunks) + .map(|_| H256::random().to_vec()) + .collect(); + let block_chunks: Vec = (0..num_block_chunks) + .map(|_| H256::random().to_vec()) + .collect(); + let manifest = ManifestData { + version: 2, + state_hashes: state_chunks.iter().map(|data| keccak(data)).collect(), + block_hashes: block_chunks.iter().map(|data| keccak(data)).collect(), + state_root: H256::new(), + block_number: block_number, + block_hash: block_hash, + }; + let mut chunks: HashMap = state_chunks + .into_iter() + .map(|data| (keccak(&data), data)) + .collect(); + chunks.extend(block_chunks.into_iter().map(|data| (keccak(&data), data))); + TestSnapshotService { + manifest: Some(manifest), + chunks: chunks, + restoration_manifest: Mutex::new(None), + state_restoration_chunks: Mutex::new(HashMap::new()), + block_restoration_chunks: Mutex::new(HashMap::new()), + } + } } impl SnapshotService for TestSnapshotService { - fn manifest(&self) -> Option { - self.manifest.as_ref().cloned() - } + fn manifest(&self) -> Option { + self.manifest.as_ref().cloned() + } - fn supported_versions(&self) -> Option<(u64, u64)> { - Some((1, 2)) - } + fn supported_versions(&self) -> Option<(u64, u64)> { + Some((1, 2)) + } - fn completed_chunks(&self) -> Option> { - Some(vec![]) - } + fn completed_chunks(&self) -> Option> { + Some(vec![]) + } - fn chunk(&self, hash: H256) -> Option { - self.chunks.get(&hash).cloned() - } + fn chunk(&self, hash: H256) -> Option { + self.chunks.get(&hash).cloned() + } - fn status(&self) -> RestorationStatus { - match *self.restoration_manifest.lock() { - Some(ref manifest) if self.state_restoration_chunks.lock().len() == manifest.state_hashes.len() && - self.block_restoration_chunks.lock().len() == manifest.block_hashes.len() => RestorationStatus::Inactive, - Some(ref manifest) => RestorationStatus::Ongoing { - state_chunks: manifest.state_hashes.len() as u32, - block_chunks: manifest.block_hashes.len() as u32, - state_chunks_done: self.state_restoration_chunks.lock().len() as u32, - block_chunks_done: self.block_restoration_chunks.lock().len() as u32, - }, - None => RestorationStatus::Inactive, - } - } + fn status(&self) -> RestorationStatus { + match *self.restoration_manifest.lock() { + Some(ref manifest) + if self.state_restoration_chunks.lock().len() == manifest.state_hashes.len() + && self.block_restoration_chunks.lock().len() + == manifest.block_hashes.len() => + { + RestorationStatus::Inactive + } + Some(ref manifest) => RestorationStatus::Ongoing { + state_chunks: manifest.state_hashes.len() as u32, + block_chunks: manifest.block_hashes.len() as u32, + state_chunks_done: self.state_restoration_chunks.lock().len() as u32, + block_chunks_done: self.block_restoration_chunks.lock().len() as u32, + }, + None => RestorationStatus::Inactive, + } + } - fn begin_restore(&self, manifest: ManifestData) { - let mut restoration_manifest = self.restoration_manifest.lock(); + fn begin_restore(&self, manifest: ManifestData) { + let mut restoration_manifest = self.restoration_manifest.lock(); - if let Some(ref c_manifest) = *restoration_manifest { - if c_manifest.state_root == manifest.state_root { - return; - } - } + if let Some(ref c_manifest) = *restoration_manifest { + if c_manifest.state_root == manifest.state_root { + return; + } + } - *restoration_manifest = Some(manifest); - self.state_restoration_chunks.lock().clear(); - self.block_restoration_chunks.lock().clear(); - } + *restoration_manifest = Some(manifest); + self.state_restoration_chunks.lock().clear(); + self.block_restoration_chunks.lock().clear(); + } - fn abort_restore(&self) { - *self.restoration_manifest.lock() = None; - self.state_restoration_chunks.lock().clear(); - self.block_restoration_chunks.lock().clear(); - } + fn abort_restore(&self) { + *self.restoration_manifest.lock() = None; + self.state_restoration_chunks.lock().clear(); + self.block_restoration_chunks.lock().clear(); + } - fn abort_snapshot(&self) {} + fn abort_snapshot(&self) {} - fn restore_state_chunk(&self, hash: H256, chunk: Bytes) { - if self.restoration_manifest.lock().as_ref().map_or(false, |m| m.state_hashes.iter().any(|h| h == &hash)) { - self.state_restoration_chunks.lock().insert(hash, chunk); - } - } + fn restore_state_chunk(&self, hash: H256, chunk: Bytes) { + if self + .restoration_manifest + .lock() + .as_ref() + .map_or(false, |m| m.state_hashes.iter().any(|h| h == &hash)) + { + self.state_restoration_chunks.lock().insert(hash, chunk); + } + } - fn restore_block_chunk(&self, hash: H256, chunk: Bytes) { - if self.restoration_manifest.lock().as_ref().map_or(false, |m| m.block_hashes.iter().any(|h| h == &hash)) { - self.block_restoration_chunks.lock().insert(hash, chunk); - } - } + fn restore_block_chunk(&self, hash: H256, chunk: Bytes) { + if self + .restoration_manifest + .lock() + .as_ref() + .map_or(false, |m| m.block_hashes.iter().any(|h| h == &hash)) + { + self.block_restoration_chunks.lock().insert(hash, chunk); + } + } - fn shutdown(&self) { - self.abort_restore(); - } + fn shutdown(&self) { + self.abort_restore(); + } } #[test] fn snapshot_sync() { - ::env_logger::try_init().ok(); - let mut config = SyncConfig::default(); - config.warp_sync = WarpSync::Enabled; - let mut net = TestNet::new_with_config(5, config); - let snapshot_service = Arc::new(TestSnapshotService::new_with_snapshot(16, H256::new(), 500000)); - for i in 0..4 { - net.peer_mut(i).snapshot_service = snapshot_service.clone(); - net.peer(i).chain.add_blocks(1, EachBlockWith::Nothing); - } - net.sync_steps(50); - assert_eq!(net.peer(4).snapshot_service.state_restoration_chunks.lock().len(), net.peer(0).snapshot_service.manifest.as_ref().unwrap().state_hashes.len()); - assert_eq!(net.peer(4).snapshot_service.block_restoration_chunks.lock().len(), net.peer(0).snapshot_service.manifest.as_ref().unwrap().block_hashes.len()); + ::env_logger::try_init().ok(); + let mut config = SyncConfig::default(); + config.warp_sync = WarpSync::Enabled; + let mut net = TestNet::new_with_config(5, config); + let snapshot_service = Arc::new(TestSnapshotService::new_with_snapshot( + 16, + H256::new(), + 500000, + )); + for i in 0..4 { + net.peer_mut(i).snapshot_service = snapshot_service.clone(); + net.peer(i).chain.add_blocks(1, EachBlockWith::Nothing); + } + net.sync_steps(50); + assert_eq!( + net.peer(4) + .snapshot_service + .state_restoration_chunks + .lock() + .len(), + net.peer(0) + .snapshot_service + .manifest + .as_ref() + .unwrap() + .state_hashes + .len() + ); + assert_eq!( + net.peer(4) + .snapshot_service + .block_restoration_chunks + .lock() + .len(), + net.peer(0) + .snapshot_service + .manifest + .as_ref() + .unwrap() + .block_hashes + .len() + ); } diff --git a/ethcore/sync/src/transactions_stats.rs b/ethcore/sync/src/transactions_stats.rs index 91094fa5f..0ee2773a0 100644 --- a/ethcore/sync/src/transactions_stats.rs +++ b/ethcore/sync/src/transactions_stats.rs @@ -15,121 +15,137 @@ // along with Parity Ethereum. If not, see . use api::TransactionStats; -use std::hash::BuildHasher; -use std::collections::{HashSet, HashMap}; use ethereum_types::{H256, H512}; use fastmap::H256FastMap; +use std::{ + collections::{HashMap, HashSet}, + hash::BuildHasher, +}; use types::BlockNumber; type NodeId = H512; #[derive(Debug, PartialEq, Clone)] pub struct Stats { - first_seen: BlockNumber, - propagated_to: HashMap, + first_seen: BlockNumber, + propagated_to: HashMap, } impl Stats { - pub fn new(number: BlockNumber) -> Self { - Stats { - first_seen: number, - propagated_to: Default::default(), - } - } + pub fn new(number: BlockNumber) -> Self { + Stats { + first_seen: number, + propagated_to: Default::default(), + } + } } impl<'a> From<&'a Stats> for TransactionStats { - fn from(other: &'a Stats) -> Self { - TransactionStats { - first_seen: other.first_seen, - propagated_to: other.propagated_to - .iter() - .map(|(hash, size)| (*hash, *size)) - .collect(), - } - } + fn from(other: &'a Stats) -> Self { + TransactionStats { + first_seen: other.first_seen, + propagated_to: other + .propagated_to + .iter() + .map(|(hash, size)| (*hash, *size)) + .collect(), + } + } } #[derive(Debug, Default)] pub struct TransactionsStats { - pending_transactions: H256FastMap, + pending_transactions: H256FastMap, } impl TransactionsStats { - /// Increases number of propagations to given `enodeid`. - pub fn propagated(&mut self, hash: &H256, enode_id: Option, current_block_num: BlockNumber) { - let enode_id = enode_id.unwrap_or_default(); - let stats = self.pending_transactions.entry(*hash).or_insert_with(|| Stats::new(current_block_num)); - let count = stats.propagated_to.entry(enode_id).or_insert(0); - *count = count.saturating_add(1); - } + /// Increases number of propagations to given `enodeid`. + pub fn propagated( + &mut self, + hash: &H256, + enode_id: Option, + current_block_num: BlockNumber, + ) { + let enode_id = enode_id.unwrap_or_default(); + let stats = self + .pending_transactions + .entry(*hash) + .or_insert_with(|| Stats::new(current_block_num)); + let count = stats.propagated_to.entry(enode_id).or_insert(0); + *count = count.saturating_add(1); + } - /// Returns propagation stats for given hash or `None` if hash is not known. - #[cfg(test)] - pub fn get(&self, hash: &H256) -> Option<&Stats> { - self.pending_transactions.get(hash) - } + /// Returns propagation stats for given hash or `None` if hash is not known. + #[cfg(test)] + pub fn get(&self, hash: &H256) -> Option<&Stats> { + self.pending_transactions.get(hash) + } - pub fn stats(&self) -> &H256FastMap { - &self.pending_transactions - } + pub fn stats(&self) -> &H256FastMap { + &self.pending_transactions + } - /// Retains only transactions present in given `HashSet`. - pub fn retain(&mut self, hashes: &HashSet) { - let to_remove = self.pending_transactions.keys() - .filter(|hash| !hashes.contains(hash)) - .cloned() - .collect::>(); + /// Retains only transactions present in given `HashSet`. + pub fn retain(&mut self, hashes: &HashSet) { + let to_remove = self + .pending_transactions + .keys() + .filter(|hash| !hashes.contains(hash)) + .cloned() + .collect::>(); - for hash in to_remove { - self.pending_transactions.remove(&hash); - } - } + for hash in to_remove { + self.pending_transactions.remove(&hash); + } + } } #[cfg(test)] mod tests { - use std::collections::{HashMap, HashSet}; - use super::{Stats, TransactionsStats}; + use super::{Stats, TransactionsStats}; + use std::collections::{HashMap, HashSet}; - #[test] - fn should_keep_track_of_propagations() { - // given - let mut stats = TransactionsStats::default(); - let hash = 5.into(); - let enodeid1 = 2.into(); - let enodeid2 = 5.into(); + #[test] + fn should_keep_track_of_propagations() { + // given + let mut stats = TransactionsStats::default(); + let hash = 5.into(); + let enodeid1 = 2.into(); + let enodeid2 = 5.into(); - // when - stats.propagated(&hash, Some(enodeid1), 5); - stats.propagated(&hash, Some(enodeid1), 10); - stats.propagated(&hash, Some(enodeid2), 15); + // when + stats.propagated(&hash, Some(enodeid1), 5); + stats.propagated(&hash, Some(enodeid1), 10); + stats.propagated(&hash, Some(enodeid2), 15); - // then - let stats = stats.get(&hash); - assert_eq!(stats, Some(&Stats { - first_seen: 5, - propagated_to: hash_map![ - enodeid1 => 2, - enodeid2 => 1 - ], - })); - } + // then + let stats = stats.get(&hash); + assert_eq!( + stats, + Some(&Stats { + first_seen: 5, + propagated_to: hash_map![ + enodeid1 => 2, + enodeid2 => 1 + ], + }) + ); + } - #[test] - fn should_remove_hash_from_tracking() { - // given - let mut stats = TransactionsStats::default(); - let hash = 5.into(); - let enodeid1 = 5.into(); - stats.propagated(&hash, Some(enodeid1), 10); + #[test] + fn should_remove_hash_from_tracking() { + // given + let mut stats = TransactionsStats::default(); + let hash = 5.into(); + let enodeid1 = 5.into(); + stats.propagated(&hash, Some(enodeid1), 10); - // when - stats.retain(&HashSet::new()); + // when + stats.retain(&HashSet::new()); - // then - let stats = stats.get(&hash); - assert_eq!(stats, None); - } + // then + let stats = stats.get(&hash); + assert_eq!(stats, None); + } } diff --git a/ethcore/types/src/account_diff.rs b/ethcore/types/src/account_diff.rs index 09751ba45..e22d5989f 100644 --- a/ethcore/types/src/account_diff.rs +++ b/ethcore/types/src/account_diff.rs @@ -16,131 +16,180 @@ //! Diff between two accounts. -use std::cmp::*; -use std::fmt; -use std::collections::BTreeMap; -use ethereum_types::{H256, U256}; use bytes::Bytes; +use ethereum_types::{H256, U256}; +use std::{cmp::*, collections::BTreeMap, fmt}; #[derive(Debug, PartialEq, Eq, Clone)] /// Diff type for specifying a change (or not). pub enum Diff { - /// Both sides are the same. - Same, - /// Left (pre, source) side doesn't include value, right side (post, destination) does. - Born(T), - /// Both sides include data; it chaged value between them. - Changed(T, T), - /// Left (pre, source) side does include value, right side (post, destination) does not. - Died(T), + /// Both sides are the same. + Same, + /// Left (pre, source) side doesn't include value, right side (post, destination) does. + Born(T), + /// Both sides include data; it chaged value between them. + Changed(T, T), + /// Left (pre, source) side does include value, right side (post, destination) does not. + Died(T), } impl Diff { - /// Construct new object with given `pre` and `post`. - pub fn new(pre: T, post: T) -> Self where T: Eq { - if pre == post { - Diff::Same - } else { - Diff::Changed(pre, post) - } - } + /// Construct new object with given `pre` and `post`. + pub fn new(pre: T, post: T) -> Self + where + T: Eq, + { + if pre == post { + Diff::Same + } else { + Diff::Changed(pre, post) + } + } - /// Get the before value, if there is one. - pub fn pre(&self) -> Option<&T> { match *self { Diff::Died(ref x) | Diff::Changed(ref x, _) => Some(x), _ => None } } + /// Get the before value, if there is one. + pub fn pre(&self) -> Option<&T> { + match *self { + Diff::Died(ref x) | Diff::Changed(ref x, _) => Some(x), + _ => None, + } + } - /// Get the after value, if there is one. - pub fn post(&self) -> Option<&T> { match *self { Diff::Born(ref x) | Diff::Changed(_, ref x) => Some(x), _ => None } } + /// Get the after value, if there is one. + pub fn post(&self) -> Option<&T> { + match *self { + Diff::Born(ref x) | Diff::Changed(_, ref x) => Some(x), + _ => None, + } + } - /// Determine whether there was a change or not. - pub fn is_same(&self) -> bool { match *self { Diff::Same => true, _ => false }} + /// Determine whether there was a change or not. + pub fn is_same(&self) -> bool { + match *self { + Diff::Same => true, + _ => false, + } + } } #[derive(Debug, PartialEq, Eq, Clone)] /// Account diff. pub struct AccountDiff { - /// Change in balance, allowed to be `Diff::Same`. - pub balance: Diff, - /// Change in nonce, allowed to be `Diff::Same`. - pub nonce: Diff, // Allowed to be Same - /// Change in code, allowed to be `Diff::Same`. - pub code: Diff, // Allowed to be Same - /// Change in storage, values are not allowed to be `Diff::Same`. - pub storage: BTreeMap>, + /// Change in balance, allowed to be `Diff::Same`. + pub balance: Diff, + /// Change in nonce, allowed to be `Diff::Same`. + pub nonce: Diff, // Allowed to be Same + /// Change in code, allowed to be `Diff::Same`. + pub code: Diff, // Allowed to be Same + /// Change in storage, values are not allowed to be `Diff::Same`. + pub storage: BTreeMap>, } #[derive(Debug, PartialEq, Eq, Clone)] /// Change in existance type. // TODO: include other types of change. pub enum Existance { - /// Item came into existance. - Born, - /// Item stayed in existance. - Alive, - /// Item went out of existance. - Died, + /// Item came into existance. + Born, + /// Item stayed in existance. + Alive, + /// Item went out of existance. + Died, } impl fmt::Display for Existance { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Existance::Born => write!(f, "+++")?, - Existance::Alive => write!(f, "***")?, - Existance::Died => write!(f, "XXX")?, - } - Ok(()) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Existance::Born => write!(f, "+++")?, + Existance::Alive => write!(f, "***")?, + Existance::Died => write!(f, "XXX")?, + } + Ok(()) + } } impl AccountDiff { - /// Get `Existance` projection. - pub fn existance(&self) -> Existance { - match self.balance { - Diff::Born(_) => Existance::Born, - Diff::Died(_) => Existance::Died, - _ => Existance::Alive, - } - } + /// Get `Existance` projection. + pub fn existance(&self) -> Existance { + match self.balance { + Diff::Born(_) => Existance::Born, + Diff::Died(_) => Existance::Died, + _ => Existance::Alive, + } + } } // TODO: refactor into something nicer. fn interpreted_hash(u: &H256) -> String { - if u <= &H256::from(0xffffffff) { - format!("{} = 0x{:x}", U256::from(&**u).low_u32(), U256::from(&**u).low_u32()) - } else if u <= &H256::from(u64::max_value()) { - format!("{} = 0x{:x}", U256::from(&**u).low_u64(), U256::from(&**u).low_u64()) -// } else if u <= &H256::from("0xffffffffffffffffffffffffffffffffffffffff") { -// format!("@{}", Address::from(u)) - } else { - format!("#{}", u) - } + if u <= &H256::from(0xffffffff) { + format!( + "{} = 0x{:x}", + U256::from(&**u).low_u32(), + U256::from(&**u).low_u32() + ) + } else if u <= &H256::from(u64::max_value()) { + format!( + "{} = 0x{:x}", + U256::from(&**u).low_u64(), + U256::from(&**u).low_u64() + ) + // } else if u <= &H256::from("0xffffffffffffffffffffffffffffffffffffffff") { + // format!("@{}", Address::from(u)) + } else { + format!("#{}", u) + } } impl fmt::Display for AccountDiff { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use bytes::ToPretty; + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use bytes::ToPretty; - match self.nonce { - Diff::Born(ref x) => write!(f, " non {}", x)?, - Diff::Changed(ref pre, ref post) => write!(f, "#{} ({} {} {})", post, pre, if pre > post {"-"} else {"+"}, *max(pre, post) - * min(pre, post))?, - _ => {}, - } - match self.balance { - Diff::Born(ref x) => write!(f, " bal {}", x)?, - Diff::Changed(ref pre, ref post) => write!(f, "${} ({} {} {})", post, pre, if pre > post {"-"} else {"+"}, *max(pre, post) - *min(pre, post))?, - _ => {}, - } - if let Diff::Born(ref x) = self.code { - write!(f, " code {}", x.pretty())?; - } - write!(f, "\n")?; - for (k, dv) in &self.storage { - match *dv { - Diff::Born(ref v) => write!(f, " + {} => {}\n", interpreted_hash(k), interpreted_hash(v))?, - Diff::Changed(ref pre, ref post) => write!(f, " * {} => {} (was {})\n", interpreted_hash(k), interpreted_hash(post), interpreted_hash(pre))?, - Diff::Died(_) => write!(f, " X {}\n", interpreted_hash(k))?, - _ => {}, - } - } - Ok(()) - } + match self.nonce { + Diff::Born(ref x) => write!(f, " non {}", x)?, + Diff::Changed(ref pre, ref post) => write!( + f, + "#{} ({} {} {})", + post, + pre, + if pre > post { "-" } else { "+" }, + *max(pre, post) - *min(pre, post) + )?, + _ => {} + } + match self.balance { + Diff::Born(ref x) => write!(f, " bal {}", x)?, + Diff::Changed(ref pre, ref post) => write!( + f, + "${} ({} {} {})", + post, + pre, + if pre > post { "-" } else { "+" }, + *max(pre, post) - *min(pre, post) + )?, + _ => {} + } + if let Diff::Born(ref x) = self.code { + write!(f, " code {}", x.pretty())?; + } + write!(f, "\n")?; + for (k, dv) in &self.storage { + match *dv { + Diff::Born(ref v) => write!( + f, + " + {} => {}\n", + interpreted_hash(k), + interpreted_hash(v) + )?, + Diff::Changed(ref pre, ref post) => write!( + f, + " * {} => {} (was {})\n", + interpreted_hash(k), + interpreted_hash(post), + interpreted_hash(pre) + )?, + Diff::Died(_) => write!(f, " X {}\n", interpreted_hash(k))?, + _ => {} + } + } + Ok(()) + } } diff --git a/ethcore/types/src/ancestry_action.rs b/ethcore/types/src/ancestry_action.rs index 39b73ef99..f41163af2 100644 --- a/ethcore/types/src/ancestry_action.rs +++ b/ethcore/types/src/ancestry_action.rs @@ -22,6 +22,6 @@ use ethereum_types::H256; /// Actions on a live block's parent block. Only committed when the live block is committed. Those actions here must /// respect the normal blockchain reorganization rules. pub enum AncestryAction { - /// Mark an ancestry block as finalized. - MarkFinalized(H256), + /// Mark an ancestry block as finalized. + MarkFinalized(H256), } diff --git a/ethcore/types/src/basic_account.rs b/ethcore/types/src/basic_account.rs index 039dbac94..14d414e6d 100644 --- a/ethcore/types/src/basic_account.rs +++ b/ethcore/types/src/basic_account.rs @@ -16,17 +16,17 @@ //! Basic account type -- the decoded RLP from the state trie. -use ethereum_types::{U256, H256}; +use ethereum_types::{H256, U256}; /// Basic account type. #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] pub struct BasicAccount { - /// Nonce of the account. - pub nonce: U256, - /// Balance of the account. - pub balance: U256, - /// Storage root of the account. - pub storage_root: H256, - /// Code hash of the account. - pub code_hash: H256, + /// Nonce of the account. + pub nonce: U256, + /// Balance of the account. + pub balance: U256, + /// Storage root of the account. + pub storage_root: H256, + /// Code hash of the account. + pub code_hash: H256, } diff --git a/ethcore/types/src/block.rs b/ethcore/types/src/block.rs index a423fb1ed..bcff1c748 100644 --- a/ethcore/types/src/block.rs +++ b/ethcore/types/src/block.rs @@ -34,43 +34,43 @@ use bytes::Bytes; use header::Header; -use rlp::{Rlp, RlpStream, Decodable, DecoderError}; +use rlp::{Decodable, DecoderError, Rlp, RlpStream}; use transaction::UnverifiedTransaction; /// A block, encoded as it is on the block chain. #[derive(Default, Debug, Clone, PartialEq)] pub struct Block { - /// The header of this block. - pub header: Header, - /// The transactions in this block. - pub transactions: Vec, - /// The uncles of this block. - pub uncles: Vec

, + /// The header of this block. + pub header: Header, + /// The transactions in this block. + pub transactions: Vec, + /// The uncles of this block. + pub uncles: Vec
, } impl Block { - /// Get the RLP-encoding of the block with the seal. - pub fn rlp_bytes(&self) -> Bytes { - let mut block_rlp = RlpStream::new_list(3); - block_rlp.append(&self.header); - block_rlp.append_list(&self.transactions); - block_rlp.append_list(&self.uncles); - block_rlp.out() - } + /// Get the RLP-encoding of the block with the seal. + pub fn rlp_bytes(&self) -> Bytes { + let mut block_rlp = RlpStream::new_list(3); + block_rlp.append(&self.header); + block_rlp.append_list(&self.transactions); + block_rlp.append_list(&self.uncles); + block_rlp.out() + } } impl Decodable for Block { - fn decode(rlp: &Rlp) -> Result { - if rlp.as_raw().len() != rlp.payload_info()?.total() { - return Err(DecoderError::RlpIsTooBig); - } - if rlp.item_count()? != 3 { - return Err(DecoderError::RlpIncorrectListLen); - } - Ok(Block { - header: rlp.val_at(0)?, - transactions: rlp.list_at(1)?, - uncles: rlp.list_at(2)?, - }) - } + fn decode(rlp: &Rlp) -> Result { + if rlp.as_raw().len() != rlp.payload_info()?.total() { + return Err(DecoderError::RlpIsTooBig); + } + if rlp.item_count()? != 3 { + return Err(DecoderError::RlpIncorrectListLen); + } + Ok(Block { + header: rlp.val_at(0)?, + transactions: rlp.list_at(1)?, + uncles: rlp.list_at(2)?, + }) + } } diff --git a/ethcore/types/src/block_status.rs b/ethcore/types/src/block_status.rs index 0460fcbe6..65f2b35ec 100644 --- a/ethcore/types/src/block_status.rs +++ b/ethcore/types/src/block_status.rs @@ -19,12 +19,12 @@ /// General block status #[derive(Debug, Eq, PartialEq)] pub enum BlockStatus { - /// Part of the blockchain. - InChain, - /// Queued for import. - Queued, - /// Known as bad. - Bad, - /// Unknown. - Unknown, + /// Part of the blockchain. + InChain, + /// Queued for import. + Queued, + /// Known as bad. + Bad, + /// Unknown. + Unknown, } diff --git a/ethcore/types/src/blockchain_info.rs b/ethcore/types/src/blockchain_info.rs index 42158638d..93d74b7a4 100644 --- a/ethcore/types/src/blockchain_info.rs +++ b/ethcore/types/src/blockchain_info.rs @@ -18,49 +18,54 @@ use std::fmt; -use ethereum_types::{U256, H256}; +use ethereum_types::{H256, U256}; use security_level::SecurityLevel; -use {BlockNumber}; +use BlockNumber; /// Information about the blockchain gathered together. #[derive(Clone, Debug)] pub struct BlockChainInfo { - /// Blockchain difficulty. - pub total_difficulty: U256, - /// Block queue difficulty. - pub pending_total_difficulty: U256, - /// Genesis block hash. - pub genesis_hash: H256, - /// Best blockchain block hash. - pub best_block_hash: H256, - /// Best blockchain block number. - pub best_block_number: BlockNumber, - /// Best blockchain block timestamp. - pub best_block_timestamp: u64, - /// Best ancient block hash. - pub ancient_block_hash: Option, - /// Best ancient block number. - pub ancient_block_number: Option, - /// First block on the best sequence. - pub first_block_hash: Option, - /// Number of the first block on the best sequence. - pub first_block_number: Option, + /// Blockchain difficulty. + pub total_difficulty: U256, + /// Block queue difficulty. + pub pending_total_difficulty: U256, + /// Genesis block hash. + pub genesis_hash: H256, + /// Best blockchain block hash. + pub best_block_hash: H256, + /// Best blockchain block number. + pub best_block_number: BlockNumber, + /// Best blockchain block timestamp. + pub best_block_timestamp: u64, + /// Best ancient block hash. + pub ancient_block_hash: Option, + /// Best ancient block number. + pub ancient_block_number: Option, + /// First block on the best sequence. + pub first_block_hash: Option, + /// Number of the first block on the best sequence. + pub first_block_number: Option, } impl BlockChainInfo { - /// Determine the security model for the current state. - pub fn security_level(&self) -> SecurityLevel { - // TODO: Detect SecurityLevel::FullState : https://github.com/paritytech/parity-ethereum/issues/3834 - if self.ancient_block_number.is_none() || self.first_block_number.is_none() { - SecurityLevel::FullProofOfWork - } else { - SecurityLevel::PartialProofOfWork(self.best_block_number - self.first_block_number.expect("Guard condition means this is not none")) - } - } + /// Determine the security model for the current state. + pub fn security_level(&self) -> SecurityLevel { + // TODO: Detect SecurityLevel::FullState : https://github.com/paritytech/parity-ethereum/issues/3834 + if self.ancient_block_number.is_none() || self.first_block_number.is_none() { + SecurityLevel::FullProofOfWork + } else { + SecurityLevel::PartialProofOfWork( + self.best_block_number + - self + .first_block_number + .expect("Guard condition means this is not none"), + ) + } + } } impl fmt::Display for BlockChainInfo { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "#{}.{}", self.best_block_number, self.best_block_hash) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "#{}.{}", self.best_block_number, self.best_block_hash) + } } diff --git a/ethcore/types/src/call_analytics.rs b/ethcore/types/src/call_analytics.rs index 902c75e56..23fce3a6c 100644 --- a/ethcore/types/src/call_analytics.rs +++ b/ethcore/types/src/call_analytics.rs @@ -19,10 +19,10 @@ /// Options concerning what analytics we run on the call. #[derive(Eq, PartialEq, Default, Clone, Copy, Debug)] pub struct CallAnalytics { - /// Make a transaction trace. - pub transaction_tracing: bool, - /// Make a VM trace. - pub vm_tracing: bool, - /// Make a diff. - pub state_diffing: bool, + /// Make a transaction trace. + pub transaction_tracing: bool, + /// Make a VM trace. + pub vm_tracing: bool, + /// Make a diff. + pub state_diffing: bool, } diff --git a/ethcore/types/src/data_format.rs b/ethcore/types/src/data_format.rs index 8cfe5cf2b..712d67308 100644 --- a/ethcore/types/src/data_format.rs +++ b/ethcore/types/src/data_format.rs @@ -37,7 +37,7 @@ impl FromStr for DataFormat { match s { "binary" | "bin" => Ok(DataFormat::Binary), "hex" => Ok(DataFormat::Hex), - x => Err(format!("Invalid format: {}", x)) + x => Err(format!("Invalid format: {}", x)), } } } diff --git a/ethcore/types/src/encoded.rs b/ethcore/types/src/encoded.rs index 4680f95af..c7a7f97e5 100644 --- a/ethcore/types/src/encoded.rs +++ b/ethcore/types/src/encoded.rs @@ -24,13 +24,13 @@ //! decoded object where parts like the hash can be saved. use block::Block as FullBlock; -use ethereum_types::{H256, Bloom, U256, Address}; +use ethereum_types::{Address, Bloom, H256, U256}; use hash::keccak; -use header::{Header as FullHeader}; +use header::Header as FullHeader; use heapsize::HeapSizeOf; use rlp::{self, Rlp, RlpStream}; use transaction::UnverifiedTransaction; -use views::{self, BlockView, HeaderView, BodyView}; +use views::{self, BlockView, BodyView, HeaderView}; use BlockNumber; /// Owning header view. @@ -38,78 +38,118 @@ use BlockNumber; pub struct Header(Vec); impl HeapSizeOf for Header { - fn heap_size_of_children(&self) -> usize { self.0.heap_size_of_children() } + fn heap_size_of_children(&self) -> usize { + self.0.heap_size_of_children() + } } impl Header { - /// Create a new owning header view. - /// Expects the data to be an RLP-encoded header -- any other case will likely lead to - /// panics further down the line. - pub fn new(encoded: Vec) -> Self { Header(encoded) } + /// Create a new owning header view. + /// Expects the data to be an RLP-encoded header -- any other case will likely lead to + /// panics further down the line. + pub fn new(encoded: Vec) -> Self { + Header(encoded) + } - /// Upgrade this encoded view to a fully owned `Header` object. - pub fn decode(&self) -> Result { - rlp::decode(&self.0) - } + /// Upgrade this encoded view to a fully owned `Header` object. + pub fn decode(&self) -> Result { + rlp::decode(&self.0) + } - /// Get a borrowed header view onto the data. - #[inline] - pub fn view(&self) -> HeaderView { view!(HeaderView, &self.0) } + /// Get a borrowed header view onto the data. + #[inline] + pub fn view(&self) -> HeaderView { + view!(HeaderView, &self.0) + } - /// Get the rlp of the header. - #[inline] - pub fn rlp(&self) -> Rlp { Rlp::new(&self.0) } + /// Get the rlp of the header. + #[inline] + pub fn rlp(&self) -> Rlp { + Rlp::new(&self.0) + } - /// Consume the view and return the raw bytes. - pub fn into_inner(self) -> Vec { self.0 } + /// Consume the view and return the raw bytes. + pub fn into_inner(self) -> Vec { + self.0 + } } // forwarders to borrowed view. impl Header { - /// Returns the header hash. - pub fn hash(&self) -> H256 { keccak(&self.0) } + /// Returns the header hash. + pub fn hash(&self) -> H256 { + keccak(&self.0) + } - /// Returns the parent hash. - pub fn parent_hash(&self) -> H256 { self.view().parent_hash() } + /// Returns the parent hash. + pub fn parent_hash(&self) -> H256 { + self.view().parent_hash() + } - /// Returns the uncles hash. - pub fn uncles_hash(&self) -> H256 { self.view().uncles_hash() } + /// Returns the uncles hash. + pub fn uncles_hash(&self) -> H256 { + self.view().uncles_hash() + } - /// Returns the author. - pub fn author(&self) -> Address { self.view().author() } + /// Returns the author. + pub fn author(&self) -> Address { + self.view().author() + } - /// Returns the state root. - pub fn state_root(&self) -> H256 { self.view().state_root() } + /// Returns the state root. + pub fn state_root(&self) -> H256 { + self.view().state_root() + } - /// Returns the transaction trie root. - pub fn transactions_root(&self) -> H256 { self.view().transactions_root() } + /// Returns the transaction trie root. + pub fn transactions_root(&self) -> H256 { + self.view().transactions_root() + } - /// Returns the receipts trie root - pub fn receipts_root(&self) -> H256 { self.view().receipts_root() } + /// Returns the receipts trie root + pub fn receipts_root(&self) -> H256 { + self.view().receipts_root() + } - /// Returns the block log bloom - pub fn log_bloom(&self) -> Bloom { self.view().log_bloom() } + /// Returns the block log bloom + pub fn log_bloom(&self) -> Bloom { + self.view().log_bloom() + } - /// Difficulty of this block - pub fn difficulty(&self) -> U256 { self.view().difficulty() } + /// Difficulty of this block + pub fn difficulty(&self) -> U256 { + self.view().difficulty() + } - /// Number of this block. - pub fn number(&self) -> BlockNumber { self.view().number() } + /// Number of this block. + pub fn number(&self) -> BlockNumber { + self.view().number() + } - /// Time this block was produced. - pub fn timestamp(&self) -> u64 { self.view().timestamp() } + /// Time this block was produced. + pub fn timestamp(&self) -> u64 { + self.view().timestamp() + } - /// Gas limit of this block. - pub fn gas_limit(&self) -> U256 { self.view().gas_limit() } + /// Gas limit of this block. + pub fn gas_limit(&self) -> U256 { + self.view().gas_limit() + } - /// Total gas used in this block. - pub fn gas_used(&self) -> U256 { self.view().gas_used() } + /// Total gas used in this block. + pub fn gas_used(&self) -> U256 { + self.view().gas_used() + } - /// Block extra data. - pub fn extra_data(&self) -> Vec { self.view().extra_data() } + /// Block extra data. + pub fn extra_data(&self) -> Vec { + self.view().extra_data() + } - /// Engine-specific seal fields. - pub fn seal(&self) -> Vec> { self.view().seal() } + /// Engine-specific seal fields. + pub fn seal(&self) -> Vec> { + self.view().seal() + } } /// Owning block body view. @@ -117,64 +157,92 @@ impl Header { pub struct Body(Vec); impl HeapSizeOf for Body { - fn heap_size_of_children(&self) -> usize { self.0.heap_size_of_children() } + fn heap_size_of_children(&self) -> usize { + self.0.heap_size_of_children() + } } impl Body { - /// Create a new owning block body view. The raw bytes passed in must be an rlp-encoded block - /// body. - pub fn new(raw: Vec) -> Self { Body(raw) } + /// Create a new owning block body view. The raw bytes passed in must be an rlp-encoded block + /// body. + pub fn new(raw: Vec) -> Self { + Body(raw) + } - /// Get a borrowed view of the data within. - #[inline] - pub fn view(&self) -> BodyView { view!(BodyView, &self.0) } + /// Get a borrowed view of the data within. + #[inline] + pub fn view(&self) -> BodyView { + view!(BodyView, &self.0) + } - /// Fully decode this block body. - pub fn decode(&self) -> (Vec, Vec) { - (self.view().transactions(), self.view().uncles()) - } + /// Fully decode this block body. + pub fn decode(&self) -> (Vec, Vec) { + (self.view().transactions(), self.view().uncles()) + } - /// Get the RLP of this block body. - #[inline] - pub fn rlp(&self) -> Rlp { - Rlp::new(&self.0) - } + /// Get the RLP of this block body. + #[inline] + pub fn rlp(&self) -> Rlp { + Rlp::new(&self.0) + } - /// Consume the view and return the raw bytes. - pub fn into_inner(self) -> Vec { self.0 } + /// Consume the view and return the raw bytes. + pub fn into_inner(self) -> Vec { + self.0 + } } // forwarders to borrowed view. impl Body { - /// Get raw rlp of transactions - pub fn transactions_rlp(&self) -> Rlp { self.view().transactions_rlp().rlp } + /// Get raw rlp of transactions + pub fn transactions_rlp(&self) -> Rlp { + self.view().transactions_rlp().rlp + } - /// Get a vector of all transactions. - pub fn transactions(&self) -> Vec { self.view().transactions() } + /// Get a vector of all transactions. + pub fn transactions(&self) -> Vec { + self.view().transactions() + } - /// Number of transactions in the block. - pub fn transactions_count(&self) -> usize { self.view().transactions_count() } + /// Number of transactions in the block. + pub fn transactions_count(&self) -> usize { + self.view().transactions_count() + } - /// A view over each transaction in the block. - pub fn transaction_views(&self) -> Vec { self.view().transaction_views() } + /// A view over each transaction in the block. + pub fn transaction_views(&self) -> Vec { + self.view().transaction_views() + } - /// The hash of each transaction in the block. - pub fn transaction_hashes(&self) -> Vec { self.view().transaction_hashes() } + /// The hash of each transaction in the block. + pub fn transaction_hashes(&self) -> Vec { + self.view().transaction_hashes() + } - /// Get raw rlp of uncle headers - pub fn uncles_rlp(&self) -> Rlp { self.view().uncles_rlp().rlp } + /// Get raw rlp of uncle headers + pub fn uncles_rlp(&self) -> Rlp { + self.view().uncles_rlp().rlp + } - /// Decode uncle headers. - pub fn uncles(&self) -> Vec { self.view().uncles() } + /// Decode uncle headers. + pub fn uncles(&self) -> Vec { + self.view().uncles() + } - /// Number of uncles. - pub fn uncles_count(&self) -> usize { self.view().uncles_count() } + /// Number of uncles. + pub fn uncles_count(&self) -> usize { + self.view().uncles_count() + } - /// Borrowed view over each uncle. - pub fn uncle_views(&self) -> Vec { self.view().uncle_views() } + /// Borrowed view over each uncle. + pub fn uncle_views(&self) -> Vec { + self.view().uncle_views() + } - /// Hash of each uncle. - pub fn uncle_hashes(&self) -> Vec { self.view().uncle_hashes() } + /// Hash of each uncle. + pub fn uncle_hashes(&self) -> Vec { + self.view().uncle_hashes() + } } /// Owning block view. @@ -182,125 +250,187 @@ impl Body { pub struct Block(Vec); impl HeapSizeOf for Block { - fn heap_size_of_children(&self) -> usize { self.0.heap_size_of_children() } + fn heap_size_of_children(&self) -> usize { + self.0.heap_size_of_children() + } } impl Block { - /// Create a new owning block view. The raw bytes passed in must be an rlp-encoded block. - pub fn new(raw: Vec) -> Self { Block(raw) } + /// Create a new owning block view. The raw bytes passed in must be an rlp-encoded block. + pub fn new(raw: Vec) -> Self { + Block(raw) + } - /// Create a new owning block view by concatenating the encoded header and body - pub fn new_from_header_and_body(header: &views::HeaderView, body: &views::BodyView) -> Self { - let mut stream = RlpStream::new_list(3); - stream.append_raw(header.rlp().as_raw(), 1); - stream.append_raw(body.transactions_rlp().as_raw(), 1); - stream.append_raw(body.uncles_rlp().as_raw(), 1); - Block::new(stream.out()) - } + /// Create a new owning block view by concatenating the encoded header and body + pub fn new_from_header_and_body(header: &views::HeaderView, body: &views::BodyView) -> Self { + let mut stream = RlpStream::new_list(3); + stream.append_raw(header.rlp().as_raw(), 1); + stream.append_raw(body.transactions_rlp().as_raw(), 1); + stream.append_raw(body.uncles_rlp().as_raw(), 1); + Block::new(stream.out()) + } - /// Get a borrowed view of the whole block. - #[inline] - pub fn view(&self) -> BlockView { view!(BlockView, &self.0) } + /// Get a borrowed view of the whole block. + #[inline] + pub fn view(&self) -> BlockView { + view!(BlockView, &self.0) + } - /// Get a borrowed view of the block header. - #[inline] - pub fn header_view(&self) -> HeaderView { self.view().header_view() } + /// Get a borrowed view of the block header. + #[inline] + pub fn header_view(&self) -> HeaderView { + self.view().header_view() + } - /// Decode to a full block. - pub fn decode(&self) -> Result { rlp::decode(&self.0) } + /// Decode to a full block. + pub fn decode(&self) -> Result { + rlp::decode(&self.0) + } - /// Decode the header. - pub fn decode_header(&self) -> FullHeader { self.view().rlp().val_at(0) } + /// Decode the header. + pub fn decode_header(&self) -> FullHeader { + self.view().rlp().val_at(0) + } - /// Clone the encoded header. - pub fn header(&self) -> Header { Header(self.view().rlp().at(0).as_raw().to_vec()) } + /// Clone the encoded header. + pub fn header(&self) -> Header { + Header(self.view().rlp().at(0).as_raw().to_vec()) + } - /// Get the rlp of this block. - #[inline] - pub fn rlp(&self) -> Rlp { - Rlp::new(&self.0) - } + /// Get the rlp of this block. + #[inline] + pub fn rlp(&self) -> Rlp { + Rlp::new(&self.0) + } - /// Consume the view and return the raw bytes. - pub fn into_inner(self) -> Vec { self.0 } + /// Consume the view and return the raw bytes. + pub fn into_inner(self) -> Vec { + self.0 + } - /// Returns the reference to slice of bytes - pub fn raw(&self) -> &[u8] { - &self.0 - } + /// Returns the reference to slice of bytes + pub fn raw(&self) -> &[u8] { + &self.0 + } } // forwarders to borrowed header view. impl Block { - /// Returns the header hash. - pub fn hash(&self) -> H256 { self.header_view().hash() } + /// Returns the header hash. + pub fn hash(&self) -> H256 { + self.header_view().hash() + } - /// Returns the parent hash. - pub fn parent_hash(&self) -> H256 { self.header_view().parent_hash() } + /// Returns the parent hash. + pub fn parent_hash(&self) -> H256 { + self.header_view().parent_hash() + } - /// Returns the uncles hash. - pub fn uncles_hash(&self) -> H256 { self.header_view().uncles_hash() } + /// Returns the uncles hash. + pub fn uncles_hash(&self) -> H256 { + self.header_view().uncles_hash() + } - /// Returns the author. - pub fn author(&self) -> Address { self.header_view().author() } + /// Returns the author. + pub fn author(&self) -> Address { + self.header_view().author() + } - /// Returns the state root. - pub fn state_root(&self) -> H256 { self.header_view().state_root() } + /// Returns the state root. + pub fn state_root(&self) -> H256 { + self.header_view().state_root() + } - /// Returns the transaction trie root. - pub fn transactions_root(&self) -> H256 { self.header_view().transactions_root() } + /// Returns the transaction trie root. + pub fn transactions_root(&self) -> H256 { + self.header_view().transactions_root() + } - /// Returns the receipts trie root - pub fn receipts_root(&self) -> H256 { self.header_view().receipts_root() } + /// Returns the receipts trie root + pub fn receipts_root(&self) -> H256 { + self.header_view().receipts_root() + } - /// Returns the block log bloom - pub fn log_bloom(&self) -> Bloom { self.header_view().log_bloom() } + /// Returns the block log bloom + pub fn log_bloom(&self) -> Bloom { + self.header_view().log_bloom() + } - /// Difficulty of this block - pub fn difficulty(&self) -> U256 { self.header_view().difficulty() } + /// Difficulty of this block + pub fn difficulty(&self) -> U256 { + self.header_view().difficulty() + } - /// Number of this block. - pub fn number(&self) -> BlockNumber { self.header_view().number() } + /// Number of this block. + pub fn number(&self) -> BlockNumber { + self.header_view().number() + } - /// Time this block was produced. - pub fn timestamp(&self) -> u64 { self.header_view().timestamp() } + /// Time this block was produced. + pub fn timestamp(&self) -> u64 { + self.header_view().timestamp() + } - /// Gas limit of this block. - pub fn gas_limit(&self) -> U256 { self.header_view().gas_limit() } + /// Gas limit of this block. + pub fn gas_limit(&self) -> U256 { + self.header_view().gas_limit() + } - /// Total gas used in this block. - pub fn gas_used(&self) -> U256 { self.header_view().gas_used() } + /// Total gas used in this block. + pub fn gas_used(&self) -> U256 { + self.header_view().gas_used() + } - /// Block extra data. - pub fn extra_data(&self) -> Vec { self.header_view().extra_data() } + /// Block extra data. + pub fn extra_data(&self) -> Vec { + self.header_view().extra_data() + } - /// Engine-specific seal fields. - pub fn seal(&self) -> Vec> { self.header_view().seal() } + /// Engine-specific seal fields. + pub fn seal(&self) -> Vec> { + self.header_view().seal() + } } // forwarders to body view. impl Block { - /// Get a vector of all transactions. - pub fn transactions(&self) -> Vec { self.view().transactions() } + /// Get a vector of all transactions. + pub fn transactions(&self) -> Vec { + self.view().transactions() + } - /// Number of transactions in the block. - pub fn transactions_count(&self) -> usize { self.view().transactions_count() } + /// Number of transactions in the block. + pub fn transactions_count(&self) -> usize { + self.view().transactions_count() + } - /// A view over each transaction in the block. - pub fn transaction_views(&self) -> Vec { self.view().transaction_views() } + /// A view over each transaction in the block. + pub fn transaction_views(&self) -> Vec { + self.view().transaction_views() + } - /// The hash of each transaction in the block. - pub fn transaction_hashes(&self) -> Vec { self.view().transaction_hashes() } + /// The hash of each transaction in the block. + pub fn transaction_hashes(&self) -> Vec { + self.view().transaction_hashes() + } - /// Decode uncle headers. - pub fn uncles(&self) -> Vec { self.view().uncles() } + /// Decode uncle headers. + pub fn uncles(&self) -> Vec { + self.view().uncles() + } - /// Number of uncles. - pub fn uncles_count(&self) -> usize { self.view().uncles_count() } + /// Number of uncles. + pub fn uncles_count(&self) -> usize { + self.view().uncles_count() + } - /// Borrowed view over each uncle. - pub fn uncle_views(&self) -> Vec { self.view().uncle_views() } + /// Borrowed view over each uncle. + pub fn uncle_views(&self) -> Vec { + self.view().uncle_views() + } - /// Hash of each uncle. - pub fn uncle_hashes(&self) -> Vec { self.view().uncle_hashes() } + /// Hash of each uncle. + pub fn uncle_hashes(&self) -> Vec { + self.view().uncle_hashes() + } } diff --git a/ethcore/types/src/engines/epoch.rs b/ethcore/types/src/engines/epoch.rs index 2a43b4775..0ed200732 100644 --- a/ethcore/types/src/engines/epoch.rs +++ b/ethcore/types/src/engines/epoch.rs @@ -18,56 +18,55 @@ use ethereum_types::H256; -use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp}; +use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; /// A full epoch transition. #[derive(Debug, Clone)] pub struct Transition { - /// Block hash at which the transition occurred. - pub block_hash: H256, - /// Block number at which the transition occurred. - pub block_number: u64, - /// "transition/epoch" proof from the engine combined with a finality proof. - pub proof: Vec, + /// Block hash at which the transition occurred. + pub block_hash: H256, + /// Block number at which the transition occurred. + pub block_number: u64, + /// "transition/epoch" proof from the engine combined with a finality proof. + pub proof: Vec, } impl Encodable for Transition { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(3) - .append(&self.block_hash) - .append(&self.block_number) - .append(&self.proof); - } + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(3) + .append(&self.block_hash) + .append(&self.block_number) + .append(&self.proof); + } } impl Decodable for Transition { - fn decode(rlp: &Rlp) -> Result { - Ok(Transition { - block_hash: rlp.val_at(0)?, - block_number: rlp.val_at(1)?, - proof: rlp.val_at(2)?, - }) - } + fn decode(rlp: &Rlp) -> Result { + Ok(Transition { + block_hash: rlp.val_at(0)?, + block_number: rlp.val_at(1)?, + proof: rlp.val_at(2)?, + }) + } } /// An epoch transition pending a finality proof. /// Not all transitions need one. pub struct PendingTransition { - /// "transition/epoch" proof from the engine. - pub proof: Vec, + /// "transition/epoch" proof from the engine. + pub proof: Vec, } impl Encodable for PendingTransition { - fn rlp_append(&self, s: &mut RlpStream) { - s.append(&self.proof); - } + fn rlp_append(&self, s: &mut RlpStream) { + s.append(&self.proof); + } } impl Decodable for PendingTransition { - fn decode(rlp: &Rlp) -> Result { - Ok(PendingTransition { - proof: rlp.as_val()?, - }) - } + fn decode(rlp: &Rlp) -> Result { + Ok(PendingTransition { + proof: rlp.as_val()?, + }) + } } - diff --git a/ethcore/types/src/engines/mod.rs b/ethcore/types/src/engines/mod.rs index cc622bbe6..1c6646d6c 100644 --- a/ethcore/types/src/engines/mod.rs +++ b/ethcore/types/src/engines/mod.rs @@ -21,8 +21,8 @@ pub mod epoch; /// Fork choice. #[derive(Debug, PartialEq, Eq)] pub enum ForkChoice { - /// Choose the new block. - New, - /// Choose the current best block. - Old, + /// Choose the new block. + New, + /// Choose the current best block. + Old, } diff --git a/ethcore/types/src/filter.rs b/ethcore/types/src/filter.rs index 71e8d3944..60b896aaa 100644 --- a/ethcore/types/src/filter.rs +++ b/ethcore/types/src/filter.rs @@ -16,233 +16,253 @@ //! Blockchain filter -use ethereum_types::{H256, Address, Bloom, BloomInput}; +use ethereum_types::{Address, Bloom, BloomInput, H256}; use ids::BlockId; use log_entry::LogEntry; /// Blockchain Filter. #[derive(Debug, PartialEq)] pub struct Filter { - /// Blockchain will be searched from this block. - pub from_block: BlockId, + /// Blockchain will be searched from this block. + pub from_block: BlockId, - /// Till this block. - pub to_block: BlockId, + /// Till this block. + pub to_block: BlockId, - /// Search addresses. - /// - /// If None, match all. - /// If specified, log must be produced by one of these addresses. - pub address: Option>, + /// Search addresses. + /// + /// If None, match all. + /// If specified, log must be produced by one of these addresses. + pub address: Option>, - /// Search topics. - /// - /// If None, match all. - /// If specified, log must contain one of these topics. - pub topics: Vec>>, + /// Search topics. + /// + /// If None, match all. + /// If specified, log must contain one of these topics. + pub topics: Vec>>, - /// Logs limit - /// - /// If None, return all logs - /// If specified, should only return *last* `n` logs. - pub limit: Option, + /// Logs limit + /// + /// If None, return all logs + /// If specified, should only return *last* `n` logs. + pub limit: Option, } impl Clone for Filter { - fn clone(&self) -> Self { - let mut topics = [ - None, - None, - None, - None, - ]; - for i in 0..4 { - topics[i] = self.topics[i].clone(); - } + fn clone(&self) -> Self { + let mut topics = [None, None, None, None]; + for i in 0..4 { + topics[i] = self.topics[i].clone(); + } - Filter { - from_block: self.from_block.clone(), - to_block: self.to_block.clone(), - address: self.address.clone(), - topics: topics[..].to_vec(), - limit: self.limit, - } - } + Filter { + from_block: self.from_block.clone(), + to_block: self.to_block.clone(), + address: self.address.clone(), + topics: topics[..].to_vec(), + limit: self.limit, + } + } } impl Filter { - /// Returns combinations of each address and topic. - pub fn bloom_possibilities(&self) -> Vec { - let blooms = match self.address { - Some(ref addresses) if !addresses.is_empty() => - addresses.iter() - .map(|ref address| Bloom::from(BloomInput::Raw(address))) - .collect(), - _ => vec![Bloom::default()] - }; + /// Returns combinations of each address and topic. + pub fn bloom_possibilities(&self) -> Vec { + let blooms = match self.address { + Some(ref addresses) if !addresses.is_empty() => addresses + .iter() + .map(|ref address| Bloom::from(BloomInput::Raw(address))) + .collect(), + _ => vec![Bloom::default()], + }; - self.topics.iter().fold(blooms, |bs, topic| match *topic { - None => bs, - Some(ref topics) => bs.into_iter().flat_map(|bloom| { - topics.into_iter().map(|topic| { - let mut b = bloom.clone(); - b.accrue(BloomInput::Raw(topic)); - b - }).collect::>() - }).collect() - }) - } + self.topics.iter().fold(blooms, |bs, topic| match *topic { + None => bs, + Some(ref topics) => bs + .into_iter() + .flat_map(|bloom| { + topics + .into_iter() + .map(|topic| { + let mut b = bloom.clone(); + b.accrue(BloomInput::Raw(topic)); + b + }) + .collect::>() + }) + .collect(), + }) + } - /// Returns true if given log entry matches filter. - pub fn matches(&self, log: &LogEntry) -> bool { - let matches = match self.address { - Some(ref addresses) if !addresses.is_empty() => addresses.iter().any(|address| &log.address == address), - _ => true - }; + /// Returns true if given log entry matches filter. + pub fn matches(&self, log: &LogEntry) -> bool { + let matches = match self.address { + Some(ref addresses) if !addresses.is_empty() => { + addresses.iter().any(|address| &log.address == address) + } + _ => true, + }; - matches && self.topics.iter().enumerate().all(|(i, topic)| match *topic { - Some(ref topics) if !topics.is_empty() => topics.iter().any(|topic| log.topics.get(i) == Some(topic)), - _ => true - }) - } + matches + && self + .topics + .iter() + .enumerate() + .all(|(i, topic)| match *topic { + Some(ref topics) if !topics.is_empty() => { + topics.iter().any(|topic| log.topics.get(i) == Some(topic)) + } + _ => true, + }) + } } #[cfg(test)] mod tests { - use ethereum_types::Bloom; - use filter::Filter; - use ids::BlockId; - use log_entry::LogEntry; + use ethereum_types::Bloom; + use filter::Filter; + use ids::BlockId; + use log_entry::LogEntry; - #[test] - fn test_bloom_possibilities_none() { - let none_filter = Filter { - from_block: BlockId::Earliest, - to_block: BlockId::Latest, - address: None, - topics: vec![None, None, None, None], - limit: None, - }; + #[test] + fn test_bloom_possibilities_none() { + let none_filter = Filter { + from_block: BlockId::Earliest, + to_block: BlockId::Latest, + address: None, + topics: vec![None, None, None, None], + limit: None, + }; - let possibilities = none_filter.bloom_possibilities(); - assert_eq!(possibilities.len(), 1); - assert!(possibilities[0].is_zero()) - } + let possibilities = none_filter.bloom_possibilities(); + assert_eq!(possibilities.len(), 1); + assert!(possibilities[0].is_zero()) + } - // block 399849 - #[test] - fn test_bloom_possibilities_single_address_and_topic() { - let filter = Filter { - from_block: BlockId::Earliest, - to_block: BlockId::Latest, - address: Some(vec!["b372018f3be9e171df0581136b59d2faf73a7d5d".into()]), - topics: vec![ - Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()]), - None, - None, - None, - ], - limit: None, - }; + // block 399849 + #[test] + fn test_bloom_possibilities_single_address_and_topic() { + let filter = Filter { + from_block: BlockId::Earliest, + to_block: BlockId::Latest, + address: Some(vec!["b372018f3be9e171df0581136b59d2faf73a7d5d".into()]), + topics: vec![ + Some(vec![ + "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into(), + ]), + None, + None, + None, + ], + limit: None, + }; - let possibilities = filter.bloom_possibilities(); - assert_eq!(possibilities, vec!["00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000004000000004000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000".into()] as Vec); - } + let possibilities = filter.bloom_possibilities(); + assert_eq!(possibilities, vec!["00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000004000000004000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000".into()] as Vec); + } - #[test] - fn test_bloom_possibilities_single_address_and_many_topics() { - let filter = Filter { - from_block: BlockId::Earliest, - to_block: BlockId::Latest, - address: Some(vec!["b372018f3be9e171df0581136b59d2faf73a7d5d".into()]), - topics: vec![ - Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()]), - Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()]), - None, - None, - ], - limit: None, - }; + #[test] + fn test_bloom_possibilities_single_address_and_many_topics() { + let filter = Filter { + from_block: BlockId::Earliest, + to_block: BlockId::Latest, + address: Some(vec!["b372018f3be9e171df0581136b59d2faf73a7d5d".into()]), + topics: vec![ + Some(vec![ + "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into(), + ]), + Some(vec![ + "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into(), + ]), + None, + None, + ], + limit: None, + }; - let possibilities = filter.bloom_possibilities(); - assert_eq!(possibilities, vec!["00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000004000000004000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000".into()] as Vec); - } + let possibilities = filter.bloom_possibilities(); + assert_eq!(possibilities, vec!["00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000004000000004000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000".into()] as Vec); + } - #[test] - fn test_bloom_possibilites_multiple_addresses_and_topics() { - let filter = Filter { - from_block: BlockId::Earliest, - to_block: BlockId::Latest, - address: Some(vec![ - "b372018f3be9e171df0581136b59d2faf73a7d5d".into(), - "b372018f3be9e171df0581136b59d2faf73a7d5d".into(), - ]), - topics: vec![ - Some(vec![ - "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into(), - "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into() - ]), - Some(vec![ - "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into(), - "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into() - ]), - Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()]), - None - ], - limit: None, - }; + #[test] + fn test_bloom_possibilites_multiple_addresses_and_topics() { + let filter = Filter { + from_block: BlockId::Earliest, + to_block: BlockId::Latest, + address: Some(vec![ + "b372018f3be9e171df0581136b59d2faf73a7d5d".into(), + "b372018f3be9e171df0581136b59d2faf73a7d5d".into(), + ]), + topics: vec![ + Some(vec![ + "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into(), + "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into(), + ]), + Some(vec![ + "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into(), + "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into(), + ]), + Some(vec![ + "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into(), + ]), + None, + ], + limit: None, + }; - // number of possibilites should be equal 2 * 2 * 2 * 1 = 8 - let possibilities = filter.bloom_possibilities(); - assert_eq!(possibilities.len(), 8); - assert_eq!(possibilities[0], Bloom::from("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000004000000004000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000")); - } + // number of possibilites should be equal 2 * 2 * 2 * 1 = 8 + let possibilities = filter.bloom_possibilities(); + assert_eq!(possibilities.len(), 8); + assert_eq!(possibilities[0], Bloom::from("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000004000000004000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000")); + } - #[test] - fn test_filter_matches() { - let filter = Filter { - from_block: BlockId::Earliest, - to_block: BlockId::Latest, - address: Some(vec!["b372018f3be9e171df0581136b59d2faf73a7d5d".into()]), - topics: vec![ - Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()]), - Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23fa".into()]), - None, - None, - ], - limit: None, - }; + #[test] + fn test_filter_matches() { + let filter = Filter { + from_block: BlockId::Earliest, + to_block: BlockId::Latest, + address: Some(vec!["b372018f3be9e171df0581136b59d2faf73a7d5d".into()]), + topics: vec![ + Some(vec![ + "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into(), + ]), + Some(vec![ + "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23fa".into(), + ]), + None, + None, + ], + limit: None, + }; - let entry0 = LogEntry { - address: "b372018f3be9e171df0581136b59d2faf73a7d5d".into(), - topics: vec![ - "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into(), - "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23fa".into(), - "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into(), - ], - data: vec![] - }; + let entry0 = LogEntry { + address: "b372018f3be9e171df0581136b59d2faf73a7d5d".into(), + topics: vec![ + "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into(), + "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23fa".into(), + "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into(), + ], + data: vec![], + }; - let entry1 = LogEntry { - address: "b372018f3be9e171df0581136b59d2faf73a7d5e".into(), - topics: vec![ - "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into(), - "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23fa".into(), - "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into(), - ], - data: vec![] - }; + let entry1 = LogEntry { + address: "b372018f3be9e171df0581136b59d2faf73a7d5e".into(), + topics: vec![ + "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into(), + "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23fa".into(), + "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into(), + ], + data: vec![], + }; - let entry2 = LogEntry { - address: "b372018f3be9e171df0581136b59d2faf73a7d5d".into(), - topics: vec![ - "ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into(), - ], - data: vec![] - }; + let entry2 = LogEntry { + address: "b372018f3be9e171df0581136b59d2faf73a7d5d".into(), + topics: vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()], + data: vec![], + }; - assert_eq!(filter.matches(&entry0), true); - assert_eq!(filter.matches(&entry1), false); - assert_eq!(filter.matches(&entry2), false); - } + assert_eq!(filter.matches(&entry0), true); + assert_eq!(filter.matches(&entry1), false); + assert_eq!(filter.matches(&entry2), false); + } } diff --git a/ethcore/types/src/header.rs b/ethcore/types/src/header.rs index cfe8f5bb6..e2b43df06 100644 --- a/ethcore/types/src/header.rs +++ b/ethcore/types/src/header.rs @@ -16,31 +16,31 @@ //! Block header. -use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP, keccak}; -use heapsize::HeapSizeOf; -use ethereum_types::{H256, U256, Address, Bloom}; use bytes::Bytes; -use rlp::{Rlp, RlpStream, Encodable, DecoderError, Decodable}; +use ethereum_types::{Address, Bloom, H256, U256}; +use hash::{keccak, KECCAK_EMPTY_LIST_RLP, KECCAK_NULL_RLP}; +use heapsize::HeapSizeOf; +use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; use BlockNumber; /// Semantic boolean for when a seal/signature is included. #[derive(Debug, Clone, Copy)] enum Seal { - /// The seal/signature is included. - With, - /// The seal/signature is not included. - Without, + /// The seal/signature is included. + With, + /// The seal/signature is not included. + Without, } /// Extended block header, wrapping `Header` with finalized and total difficulty information. #[derive(Debug, Clone, PartialEq, Eq)] pub struct ExtendedHeader { - /// The actual header. - pub header: Header, - /// Whether the block underlying this header is considered finalized. - pub is_finalized: bool, - /// The parent block difficulty. - pub parent_total_difficulty: U256, + /// The actual header. + pub header: Header, + /// Whether the block underlying this header is considered finalized. + pub is_finalized: bool, + /// The parent block difficulty. + pub parent_total_difficulty: U256, } /// A block header. @@ -51,375 +51,411 @@ pub struct ExtendedHeader { /// Doesn't do all that much on its own. #[derive(Debug, Clone, Eq)] pub struct Header { - /// Parent hash. - parent_hash: H256, - /// Block timestamp. - timestamp: u64, - /// Block number. - number: BlockNumber, - /// Block author. - author: Address, + /// Parent hash. + parent_hash: H256, + /// Block timestamp. + timestamp: u64, + /// Block number. + number: BlockNumber, + /// Block author. + author: Address, - /// Transactions root. - transactions_root: H256, - /// Block uncles hash. - uncles_hash: H256, - /// Block extra data. - extra_data: Bytes, + /// Transactions root. + transactions_root: H256, + /// Block uncles hash. + uncles_hash: H256, + /// Block extra data. + extra_data: Bytes, - /// State root. - state_root: H256, - /// Block receipts root. - receipts_root: H256, - /// Block bloom. - log_bloom: Bloom, - /// Gas used for contracts execution. - gas_used: U256, - /// Block gas limit. - gas_limit: U256, + /// State root. + state_root: H256, + /// Block receipts root. + receipts_root: H256, + /// Block bloom. + log_bloom: Bloom, + /// Gas used for contracts execution. + gas_used: U256, + /// Block gas limit. + gas_limit: U256, - /// Block difficulty. - difficulty: U256, - /// Vector of post-RLP-encoded fields. - seal: Vec, + /// Block difficulty. + difficulty: U256, + /// Vector of post-RLP-encoded fields. + seal: Vec, - /// Memoized hash of that header and the seal. - hash: Option, + /// Memoized hash of that header and the seal. + hash: Option, } impl PartialEq for Header { - fn eq(&self, c: &Header) -> bool { - if let (&Some(ref h1), &Some(ref h2)) = (&self.hash, &c.hash) { - if h1 == h2 { - return true - } - } + fn eq(&self, c: &Header) -> bool { + if let (&Some(ref h1), &Some(ref h2)) = (&self.hash, &c.hash) { + if h1 == h2 { + return true; + } + } - self.parent_hash == c.parent_hash && - self.timestamp == c.timestamp && - self.number == c.number && - self.author == c.author && - self.transactions_root == c.transactions_root && - self.uncles_hash == c.uncles_hash && - self.extra_data == c.extra_data && - self.state_root == c.state_root && - self.receipts_root == c.receipts_root && - self.log_bloom == c.log_bloom && - self.gas_used == c.gas_used && - self.gas_limit == c.gas_limit && - self.difficulty == c.difficulty && - self.seal == c.seal - } + self.parent_hash == c.parent_hash + && self.timestamp == c.timestamp + && self.number == c.number + && self.author == c.author + && self.transactions_root == c.transactions_root + && self.uncles_hash == c.uncles_hash + && self.extra_data == c.extra_data + && self.state_root == c.state_root + && self.receipts_root == c.receipts_root + && self.log_bloom == c.log_bloom + && self.gas_used == c.gas_used + && self.gas_limit == c.gas_limit + && self.difficulty == c.difficulty + && self.seal == c.seal + } } impl Default for Header { - fn default() -> Self { - Header { - parent_hash: H256::default(), - timestamp: 0, - number: 0, - author: Address::default(), + fn default() -> Self { + Header { + parent_hash: H256::default(), + timestamp: 0, + number: 0, + author: Address::default(), - transactions_root: KECCAK_NULL_RLP, - uncles_hash: KECCAK_EMPTY_LIST_RLP, - extra_data: vec![], + transactions_root: KECCAK_NULL_RLP, + uncles_hash: KECCAK_EMPTY_LIST_RLP, + extra_data: vec![], - state_root: KECCAK_NULL_RLP, - receipts_root: KECCAK_NULL_RLP, - log_bloom: Bloom::default(), - gas_used: U256::default(), - gas_limit: U256::default(), + state_root: KECCAK_NULL_RLP, + receipts_root: KECCAK_NULL_RLP, + log_bloom: Bloom::default(), + gas_used: U256::default(), + gas_limit: U256::default(), - difficulty: U256::default(), - seal: vec![], - hash: None, - } - } + difficulty: U256::default(), + seal: vec![], + hash: None, + } + } } impl Header { - /// Create a new, default-valued, header. - pub fn new() -> Self { Self::default() } + /// Create a new, default-valued, header. + pub fn new() -> Self { + Self::default() + } - /// Get the parent_hash field of the header. - pub fn parent_hash(&self) -> &H256 { &self.parent_hash } + /// Get the parent_hash field of the header. + pub fn parent_hash(&self) -> &H256 { + &self.parent_hash + } - /// Get the timestamp field of the header. - pub fn timestamp(&self) -> u64 { self.timestamp } + /// Get the timestamp field of the header. + pub fn timestamp(&self) -> u64 { + self.timestamp + } - /// Get the number field of the header. - pub fn number(&self) -> BlockNumber { self.number } + /// Get the number field of the header. + pub fn number(&self) -> BlockNumber { + self.number + } - /// Get the author field of the header. - pub fn author(&self) -> &Address { &self.author } + /// Get the author field of the header. + pub fn author(&self) -> &Address { + &self.author + } - /// Get the extra data field of the header. - pub fn extra_data(&self) -> &Bytes { &self.extra_data } + /// Get the extra data field of the header. + pub fn extra_data(&self) -> &Bytes { + &self.extra_data + } - /// Get the state root field of the header. - pub fn state_root(&self) -> &H256 { &self.state_root } + /// Get the state root field of the header. + pub fn state_root(&self) -> &H256 { + &self.state_root + } - /// Get the receipts root field of the header. - pub fn receipts_root(&self) -> &H256 { &self.receipts_root } + /// Get the receipts root field of the header. + pub fn receipts_root(&self) -> &H256 { + &self.receipts_root + } - /// Get the log bloom field of the header. - pub fn log_bloom(&self) -> &Bloom { &self.log_bloom } + /// Get the log bloom field of the header. + pub fn log_bloom(&self) -> &Bloom { + &self.log_bloom + } - /// Get the transactions root field of the header. - pub fn transactions_root(&self) -> &H256 { &self.transactions_root } + /// Get the transactions root field of the header. + pub fn transactions_root(&self) -> &H256 { + &self.transactions_root + } - /// Get the uncles hash field of the header. - pub fn uncles_hash(&self) -> &H256 { &self.uncles_hash } + /// Get the uncles hash field of the header. + pub fn uncles_hash(&self) -> &H256 { + &self.uncles_hash + } - /// Get the gas used field of the header. - pub fn gas_used(&self) -> &U256 { &self.gas_used } + /// Get the gas used field of the header. + pub fn gas_used(&self) -> &U256 { + &self.gas_used + } - /// Get the gas limit field of the header. - pub fn gas_limit(&self) -> &U256 { &self.gas_limit } + /// Get the gas limit field of the header. + pub fn gas_limit(&self) -> &U256 { + &self.gas_limit + } - /// Get the difficulty field of the header. - pub fn difficulty(&self) -> &U256 { &self.difficulty } + /// Get the difficulty field of the header. + pub fn difficulty(&self) -> &U256 { + &self.difficulty + } - /// Get the seal field of the header. - pub fn seal(&self) -> &[Bytes] { &self.seal } + /// Get the seal field of the header. + pub fn seal(&self) -> &[Bytes] { + &self.seal + } - /// Get the seal field with RLP-decoded values as bytes. - pub fn decode_seal<'a, T: ::std::iter::FromIterator<&'a [u8]>>(&'a self) -> Result { - self.seal.iter().map(|rlp| { - Rlp::new(rlp).data() - }).collect() - } + /// Get the seal field with RLP-decoded values as bytes. + pub fn decode_seal<'a, T: ::std::iter::FromIterator<&'a [u8]>>( + &'a self, + ) -> Result { + self.seal.iter().map(|rlp| Rlp::new(rlp).data()).collect() + } - /// Set the number field of the header. - pub fn set_parent_hash(&mut self, a: H256) { - change_field(&mut self.hash, &mut self.parent_hash, a); - } + /// Set the number field of the header. + pub fn set_parent_hash(&mut self, a: H256) { + change_field(&mut self.hash, &mut self.parent_hash, a); + } - /// Set the uncles hash field of the header. - pub fn set_uncles_hash(&mut self, a: H256) { - change_field(&mut self.hash, &mut self.uncles_hash, a); + /// Set the uncles hash field of the header. + pub fn set_uncles_hash(&mut self, a: H256) { + change_field(&mut self.hash, &mut self.uncles_hash, a); + } + /// Set the state root field of the header. + pub fn set_state_root(&mut self, a: H256) { + change_field(&mut self.hash, &mut self.state_root, a); + } - } - /// Set the state root field of the header. - pub fn set_state_root(&mut self, a: H256) { - change_field(&mut self.hash, &mut self.state_root, a); - } + /// Set the transactions root field of the header. + pub fn set_transactions_root(&mut self, a: H256) { + change_field(&mut self.hash, &mut self.transactions_root, a); + } - /// Set the transactions root field of the header. - pub fn set_transactions_root(&mut self, a: H256) { - change_field(&mut self.hash, &mut self.transactions_root, a); - } + /// Set the receipts root field of the header. + pub fn set_receipts_root(&mut self, a: H256) { + change_field(&mut self.hash, &mut self.receipts_root, a); + } - /// Set the receipts root field of the header. - pub fn set_receipts_root(&mut self, a: H256) { - change_field(&mut self.hash, &mut self.receipts_root, a); - } + /// Set the log bloom field of the header. + pub fn set_log_bloom(&mut self, a: Bloom) { + change_field(&mut self.hash, &mut self.log_bloom, a); + } - /// Set the log bloom field of the header. - pub fn set_log_bloom(&mut self, a: Bloom) { - change_field(&mut self.hash, &mut self.log_bloom, a); - } + /// Set the timestamp field of the header. + pub fn set_timestamp(&mut self, a: u64) { + change_field(&mut self.hash, &mut self.timestamp, a); + } - /// Set the timestamp field of the header. - pub fn set_timestamp(&mut self, a: u64) { - change_field(&mut self.hash, &mut self.timestamp, a); - } + /// Set the number field of the header. + pub fn set_number(&mut self, a: BlockNumber) { + change_field(&mut self.hash, &mut self.number, a); + } - /// Set the number field of the header. - pub fn set_number(&mut self, a: BlockNumber) { - change_field(&mut self.hash, &mut self.number, a); - } + /// Set the author field of the header. + pub fn set_author(&mut self, a: Address) { + change_field(&mut self.hash, &mut self.author, a); + } - /// Set the author field of the header. - pub fn set_author(&mut self, a: Address) { - change_field(&mut self.hash, &mut self.author, a); - } + /// Set the extra data field of the header. + pub fn set_extra_data(&mut self, a: Bytes) { + change_field(&mut self.hash, &mut self.extra_data, a); + } - /// Set the extra data field of the header. - pub fn set_extra_data(&mut self, a: Bytes) { - change_field(&mut self.hash, &mut self.extra_data, a); - } + /// Set the gas used field of the header. + pub fn set_gas_used(&mut self, a: U256) { + change_field(&mut self.hash, &mut self.gas_used, a); + } - /// Set the gas used field of the header. - pub fn set_gas_used(&mut self, a: U256) { - change_field(&mut self.hash, &mut self.gas_used, a); - } + /// Set the gas limit field of the header. + pub fn set_gas_limit(&mut self, a: U256) { + change_field(&mut self.hash, &mut self.gas_limit, a); + } - /// Set the gas limit field of the header. - pub fn set_gas_limit(&mut self, a: U256) { - change_field(&mut self.hash, &mut self.gas_limit, a); - } + /// Set the difficulty field of the header. + pub fn set_difficulty(&mut self, a: U256) { + change_field(&mut self.hash, &mut self.difficulty, a); + } - /// Set the difficulty field of the header. - pub fn set_difficulty(&mut self, a: U256) { - change_field(&mut self.hash, &mut self.difficulty, a); - } + /// Set the seal field of the header. + pub fn set_seal(&mut self, a: Vec) { + change_field(&mut self.hash, &mut self.seal, a) + } - /// Set the seal field of the header. - pub fn set_seal(&mut self, a: Vec) { - change_field(&mut self.hash, &mut self.seal, a) - } + /// Get & memoize the hash of this header (keccak of the RLP with seal). + pub fn compute_hash(&mut self) -> H256 { + let hash = self.hash(); + self.hash = Some(hash); + hash + } - /// Get & memoize the hash of this header (keccak of the RLP with seal). - pub fn compute_hash(&mut self) -> H256 { - let hash = self.hash(); - self.hash = Some(hash); - hash - } + /// Get the hash of this header (keccak of the RLP with seal). + pub fn hash(&self) -> H256 { + self.hash.unwrap_or_else(|| keccak(self.rlp(Seal::With))) + } - /// Get the hash of this header (keccak of the RLP with seal). - pub fn hash(&self) -> H256 { - self.hash.unwrap_or_else(|| keccak(self.rlp(Seal::With))) - } + /// Get the hash of the header excluding the seal + pub fn bare_hash(&self) -> H256 { + keccak(self.rlp(Seal::Without)) + } - /// Get the hash of the header excluding the seal - pub fn bare_hash(&self) -> H256 { - keccak(self.rlp(Seal::Without)) - } + /// Encode the header, getting a type-safe wrapper around the RLP. + pub fn encoded(&self) -> ::encoded::Header { + ::encoded::Header::new(self.rlp(Seal::With)) + } - /// Encode the header, getting a type-safe wrapper around the RLP. - pub fn encoded(&self) -> ::encoded::Header { - ::encoded::Header::new(self.rlp(Seal::With)) - } + /// Get the RLP representation of this Header. + fn rlp(&self, with_seal: Seal) -> Bytes { + let mut s = RlpStream::new(); + self.stream_rlp(&mut s, with_seal); + s.out() + } - /// Get the RLP representation of this Header. - fn rlp(&self, with_seal: Seal) -> Bytes { - let mut s = RlpStream::new(); - self.stream_rlp(&mut s, with_seal); - s.out() - } + /// Place this header into an RLP stream `s`, optionally `with_seal`. + fn stream_rlp(&self, s: &mut RlpStream, with_seal: Seal) { + if let Seal::With = with_seal { + s.begin_list(13 + self.seal.len()); + } else { + s.begin_list(13); + } - /// Place this header into an RLP stream `s`, optionally `with_seal`. - fn stream_rlp(&self, s: &mut RlpStream, with_seal: Seal) { - if let Seal::With = with_seal { - s.begin_list(13 + self.seal.len()); - } else { - s.begin_list(13); - } + s.append(&self.parent_hash); + s.append(&self.uncles_hash); + s.append(&self.author); + s.append(&self.state_root); + s.append(&self.transactions_root); + s.append(&self.receipts_root); + s.append(&self.log_bloom); + s.append(&self.difficulty); + s.append(&self.number); + s.append(&self.gas_limit); + s.append(&self.gas_used); + s.append(&self.timestamp); + s.append(&self.extra_data); - s.append(&self.parent_hash); - s.append(&self.uncles_hash); - s.append(&self.author); - s.append(&self.state_root); - s.append(&self.transactions_root); - s.append(&self.receipts_root); - s.append(&self.log_bloom); - s.append(&self.difficulty); - s.append(&self.number); - s.append(&self.gas_limit); - s.append(&self.gas_used); - s.append(&self.timestamp); - s.append(&self.extra_data); - - if let Seal::With = with_seal { - for b in &self.seal { - s.append_raw(b, 1); - } - } - } + if let Seal::With = with_seal { + for b in &self.seal { + s.append_raw(b, 1); + } + } + } } /// Alter value of given field, reset memoised hash if changed. -fn change_field(hash: &mut Option, field: &mut T, value: T) where T: PartialEq { - if field != &value { - *field = value; - *hash = None; - } +fn change_field(hash: &mut Option, field: &mut T, value: T) +where + T: PartialEq, +{ + if field != &value { + *field = value; + *hash = None; + } } impl Decodable for Header { - fn decode(r: &Rlp) -> Result { - let mut blockheader = Header { - parent_hash: r.val_at(0)?, - uncles_hash: r.val_at(1)?, - author: r.val_at(2)?, - state_root: r.val_at(3)?, - transactions_root: r.val_at(4)?, - receipts_root: r.val_at(5)?, - log_bloom: r.val_at(6)?, - difficulty: r.val_at(7)?, - number: r.val_at(8)?, - gas_limit: r.val_at(9)?, - gas_used: r.val_at(10)?, - timestamp: r.val_at(11)?, - extra_data: r.val_at(12)?, - seal: vec![], - hash: keccak(r.as_raw()).into(), - }; + fn decode(r: &Rlp) -> Result { + let mut blockheader = Header { + parent_hash: r.val_at(0)?, + uncles_hash: r.val_at(1)?, + author: r.val_at(2)?, + state_root: r.val_at(3)?, + transactions_root: r.val_at(4)?, + receipts_root: r.val_at(5)?, + log_bloom: r.val_at(6)?, + difficulty: r.val_at(7)?, + number: r.val_at(8)?, + gas_limit: r.val_at(9)?, + gas_used: r.val_at(10)?, + timestamp: r.val_at(11)?, + extra_data: r.val_at(12)?, + seal: vec![], + hash: keccak(r.as_raw()).into(), + }; - for i in 13..r.item_count()? { - blockheader.seal.push(r.at(i)?.as_raw().to_vec()) - } + for i in 13..r.item_count()? { + blockheader.seal.push(r.at(i)?.as_raw().to_vec()) + } - Ok(blockheader) - } + Ok(blockheader) + } } impl Encodable for Header { - fn rlp_append(&self, s: &mut RlpStream) { - self.stream_rlp(s, Seal::With); - } + fn rlp_append(&self, s: &mut RlpStream) { + self.stream_rlp(s, Seal::With); + } } impl HeapSizeOf for Header { - fn heap_size_of_children(&self) -> usize { - self.extra_data.heap_size_of_children() + self.seal.heap_size_of_children() - } + fn heap_size_of_children(&self) -> usize { + self.extra_data.heap_size_of_children() + self.seal.heap_size_of_children() + } } impl ExtendedHeader { - /// Returns combined difficulty of all ancestors together with the difficulty of this header. - pub fn total_score(&self) -> U256 { - self.parent_total_difficulty + *self.header.difficulty() - } + /// Returns combined difficulty of all ancestors together with the difficulty of this header. + pub fn total_score(&self) -> U256 { + self.parent_total_difficulty + *self.header.difficulty() + } } #[cfg(test)] mod tests { - use rustc_hex::FromHex; - use rlp; - use super::Header; + use super::Header; + use rlp; + use rustc_hex::FromHex; - #[test] - fn test_header_seal_fields() { - // that's rlp of block header created with ethash engine. - let header_rlp = "f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23".from_hex().unwrap(); - let mix_hash = "a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd".from_hex().unwrap(); - let mix_hash_decoded = "a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd".from_hex().unwrap(); - let nonce = "88ab4e252a7e8c2a23".from_hex().unwrap(); - let nonce_decoded = "ab4e252a7e8c2a23".from_hex().unwrap(); + #[test] + fn test_header_seal_fields() { + // that's rlp of block header created with ethash engine. + let header_rlp = "f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23".from_hex().unwrap(); + let mix_hash = "a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd" + .from_hex() + .unwrap(); + let mix_hash_decoded = "a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd" + .from_hex() + .unwrap(); + let nonce = "88ab4e252a7e8c2a23".from_hex().unwrap(); + let nonce_decoded = "ab4e252a7e8c2a23".from_hex().unwrap(); - let header: Header = rlp::decode(&header_rlp).expect("error decoding header"); - let seal_fields = header.seal.clone(); - assert_eq!(seal_fields.len(), 2); - assert_eq!(seal_fields[0], mix_hash); - assert_eq!(seal_fields[1], nonce); + let header: Header = rlp::decode(&header_rlp).expect("error decoding header"); + let seal_fields = header.seal.clone(); + assert_eq!(seal_fields.len(), 2); + assert_eq!(seal_fields[0], mix_hash); + assert_eq!(seal_fields[1], nonce); - let decoded_seal = header.decode_seal::>().unwrap(); - assert_eq!(decoded_seal.len(), 2); - assert_eq!(decoded_seal[0], &*mix_hash_decoded); - assert_eq!(decoded_seal[1], &*nonce_decoded); - } + let decoded_seal = header.decode_seal::>().unwrap(); + assert_eq!(decoded_seal.len(), 2); + assert_eq!(decoded_seal[0], &*mix_hash_decoded); + assert_eq!(decoded_seal[1], &*nonce_decoded); + } - #[test] - fn decode_and_encode_header() { - // that's rlp of block header created with ethash engine. - let header_rlp = "f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23".from_hex().unwrap(); + #[test] + fn decode_and_encode_header() { + // that's rlp of block header created with ethash engine. + let header_rlp = "f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23".from_hex().unwrap(); - let header: Header = rlp::decode(&header_rlp).expect("error decoding header"); - let encoded_header = rlp::encode(&header); + let header: Header = rlp::decode(&header_rlp).expect("error decoding header"); + let encoded_header = rlp::encode(&header); - assert_eq!(header_rlp, encoded_header); - } + assert_eq!(header_rlp, encoded_header); + } - #[test] - fn reject_header_with_large_timestamp() { - // that's rlp of block header created with ethash engine. - // The encoding contains a large timestamp (295147905179352825856) - let header_rlp = "f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d891000000000000000000080a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23".from_hex().unwrap(); + #[test] + fn reject_header_with_large_timestamp() { + // that's rlp of block header created with ethash engine. + // The encoding contains a large timestamp (295147905179352825856) + let header_rlp = "f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d891000000000000000000080a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23".from_hex().unwrap(); - // This should fail decoding timestamp - let header: Result = rlp::decode(&header_rlp); - assert_eq!(header.unwrap_err(), rlp::DecoderError::RlpIsTooBig); - } + // This should fail decoding timestamp + let header: Result = rlp::decode(&header_rlp); + assert_eq!(header.unwrap_err(), rlp::DecoderError::RlpIsTooBig); + } } diff --git a/ethcore/types/src/ids.rs b/ethcore/types/src/ids.rs index 1f099be57..12de70a2d 100644 --- a/ethcore/types/src/ids.rs +++ b/ethcore/types/src/ids.rs @@ -22,40 +22,40 @@ use BlockNumber; /// Uniquely identifies block. #[derive(Debug, PartialEq, Copy, Clone, Hash, Eq)] pub enum BlockId { - /// Block's sha3. - /// Querying by hash is always faster. - Hash(H256), - /// Block number within canon blockchain. - Number(BlockNumber), - /// Earliest block (genesis). - Earliest, - /// Latest mined block. - Latest, + /// Block's sha3. + /// Querying by hash is always faster. + Hash(H256), + /// Block number within canon blockchain. + Number(BlockNumber), + /// Earliest block (genesis). + Earliest, + /// Latest mined block. + Latest, } /// Uniquely identifies transaction. #[derive(Debug, PartialEq, Clone, Hash, Eq)] pub enum TransactionId { - /// Transaction's sha3. - Hash(H256), - /// Block id and transaction index within this block. - /// Querying by block position is always faster. - Location(BlockId, usize) + /// Transaction's sha3. + Hash(H256), + /// Block id and transaction index within this block. + /// Querying by block position is always faster. + Location(BlockId, usize), } /// Uniquely identifies Trace. pub struct TraceId { - /// Transaction - pub transaction: TransactionId, - /// Trace address within transaction. - pub address: Vec, + /// Transaction + pub transaction: TransactionId, + /// Trace address within transaction. + pub address: Vec, } /// Uniquely identifies Uncle. #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub struct UncleId { - /// Block id. - pub block: BlockId, - /// Position in block. - pub position: usize + /// Block id. + pub block: BlockId, + /// Position in block. + pub position: usize, } diff --git a/ethcore/types/src/lib.rs b/ethcore/types/src/lib.rs index d187704fd..36d86ff45 100644 --- a/ethcore/types/src/lib.rs +++ b/ethcore/types/src/lib.rs @@ -58,6 +58,7 @@ pub mod block; pub mod block_status; pub mod blockchain_info; pub mod call_analytics; +pub mod data_format; pub mod encoded; pub mod engines; pub mod filter; @@ -74,7 +75,6 @@ pub mod trace_filter; pub mod transaction; pub mod tree_route; pub mod verification_queue_info; -pub mod data_format; /// Type for block number. pub type BlockNumber = u64; diff --git a/ethcore/types/src/log_entry.rs b/ethcore/types/src/log_entry.rs index a5087b2a0..471dda430 100644 --- a/ethcore/types/src/log_entry.rs +++ b/ethcore/types/src/log_entry.rs @@ -16,92 +16,96 @@ //! Log entry type definition. -use std::ops::Deref; -use heapsize::HeapSizeOf; use bytes::Bytes; -use ethereum_types::{H256, Address, Bloom, BloomInput}; +use ethereum_types::{Address, Bloom, BloomInput, H256}; +use heapsize::HeapSizeOf; +use std::ops::Deref; -use {BlockNumber}; use ethjson; +use BlockNumber; /// A record of execution for a `LOG` operation. #[derive(Default, Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] pub struct LogEntry { - /// The address of the contract executing at the point of the `LOG` operation. - pub address: Address, - /// The topics associated with the `LOG` operation. - pub topics: Vec, - /// The data associated with the `LOG` operation. - pub data: Bytes, + /// The address of the contract executing at the point of the `LOG` operation. + pub address: Address, + /// The topics associated with the `LOG` operation. + pub topics: Vec, + /// The data associated with the `LOG` operation. + pub data: Bytes, } impl HeapSizeOf for LogEntry { - fn heap_size_of_children(&self) -> usize { - self.topics.heap_size_of_children() + self.data.heap_size_of_children() - } + fn heap_size_of_children(&self) -> usize { + self.topics.heap_size_of_children() + self.data.heap_size_of_children() + } } impl LogEntry { - /// Calculates the bloom of this log entry. - pub fn bloom(&self) -> Bloom { - self.topics.iter().fold(Bloom::from(BloomInput::Raw(&self.address)), |mut b, t| { - b.accrue(BloomInput::Raw(t)); - b - }) - } + /// Calculates the bloom of this log entry. + pub fn bloom(&self) -> Bloom { + self.topics + .iter() + .fold(Bloom::from(BloomInput::Raw(&self.address)), |mut b, t| { + b.accrue(BloomInput::Raw(t)); + b + }) + } } impl From for LogEntry { - fn from(l: ethjson::state::Log) -> Self { - LogEntry { - address: l.address.into(), - topics: l.topics.into_iter().map(Into::into).collect(), - data: l.data.into(), - } - } + fn from(l: ethjson::state::Log) -> Self { + LogEntry { + address: l.address.into(), + topics: l.topics.into_iter().map(Into::into).collect(), + data: l.data.into(), + } + } } /// Log localized in a blockchain. #[derive(Default, Debug, PartialEq, Clone)] pub struct LocalizedLogEntry { - /// Plain log entry. - pub entry: LogEntry, - /// Block in which this log was created. - pub block_hash: H256, - /// Block number. - pub block_number: BlockNumber, - /// Hash of transaction in which this log was created. - pub transaction_hash: H256, - /// Index of transaction within block. - pub transaction_index: usize, - /// Log position in the block. - pub log_index: usize, - /// Log position in the transaction. - pub transaction_log_index: usize, + /// Plain log entry. + pub entry: LogEntry, + /// Block in which this log was created. + pub block_hash: H256, + /// Block number. + pub block_number: BlockNumber, + /// Hash of transaction in which this log was created. + pub transaction_hash: H256, + /// Index of transaction within block. + pub transaction_index: usize, + /// Log position in the block. + pub log_index: usize, + /// Log position in the transaction. + pub transaction_log_index: usize, } impl Deref for LocalizedLogEntry { - type Target = LogEntry; + type Target = LogEntry; - fn deref(&self) -> &Self::Target { - &self.entry - } + fn deref(&self) -> &Self::Target { + &self.entry + } } #[cfg(test)] mod tests { - use ethereum_types::{Bloom, Address}; - use super::LogEntry; + use super::LogEntry; + use ethereum_types::{Address, Bloom}; - #[test] - fn test_empty_log_bloom() { - let bloom = "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".parse::().unwrap(); - let address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse::
().unwrap(); - let log = LogEntry { - address: address, - topics: vec![], - data: vec![] - }; - assert_eq!(log.bloom(), bloom); - } + #[test] + fn test_empty_log_bloom() { + let bloom = "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".parse::().unwrap(); + let address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6" + .parse::
() + .unwrap(); + let log = LogEntry { + address: address, + topics: vec![], + data: vec![], + }; + assert_eq!(log.bloom(), bloom); + } } diff --git a/ethcore/types/src/pruning_info.rs b/ethcore/types/src/pruning_info.rs index 76f775cb7..c9be07e15 100644 --- a/ethcore/types/src/pruning_info.rs +++ b/ethcore/types/src/pruning_info.rs @@ -23,8 +23,8 @@ /// Client pruning info. See module-level docs for more details. #[derive(Debug, Clone)] pub struct PruningInfo { - /// The first block which everything can be served after. - pub earliest_chain: u64, - /// The first block where state requests may be served. - pub earliest_state: u64, + /// The first block which everything can be served after. + pub earliest_chain: u64, + /// The first block where state requests may be served. + pub earliest_state: u64, } diff --git a/ethcore/types/src/receipt.rs b/ethcore/types/src/receipt.rs index 438296681..4500a8239 100644 --- a/ethcore/types/src/receipt.rs +++ b/ethcore/types/src/receipt.rs @@ -16,217 +16,219 @@ //! Receipt -use ethereum_types::{H160, H256, U256, Address, Bloom}; +use ethereum_types::{Address, Bloom, H160, H256, U256}; use heapsize::HeapSizeOf; -use rlp::{Rlp, RlpStream, Encodable, Decodable, DecoderError}; +use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; +use log_entry::{LocalizedLogEntry, LogEntry}; use BlockNumber; -use log_entry::{LogEntry, LocalizedLogEntry}; /// Transaction outcome store in the receipt. #[derive(Debug, Clone, PartialEq, Eq)] pub enum TransactionOutcome { - /// Status and state root are unknown under EIP-98 rules. - Unknown, - /// State root is known. Pre EIP-98 and EIP-658 rules. - StateRoot(H256), - /// Status code is known. EIP-658 rules. - StatusCode(u8), + /// Status and state root are unknown under EIP-98 rules. + Unknown, + /// State root is known. Pre EIP-98 and EIP-658 rules. + StateRoot(H256), + /// Status code is known. EIP-658 rules. + StatusCode(u8), } /// Information describing execution of a transaction. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Receipt { - /// The total gas used in the block following execution of the transaction. - pub gas_used: U256, - /// The OR-wide combination of all logs' blooms for this transaction. - pub log_bloom: Bloom, - /// The logs stemming from this transaction. - pub logs: Vec, - /// Transaction outcome. - pub outcome: TransactionOutcome, + /// The total gas used in the block following execution of the transaction. + pub gas_used: U256, + /// The OR-wide combination of all logs' blooms for this transaction. + pub log_bloom: Bloom, + /// The logs stemming from this transaction. + pub logs: Vec, + /// Transaction outcome. + pub outcome: TransactionOutcome, } impl Receipt { - /// Create a new receipt. - pub fn new(outcome: TransactionOutcome, gas_used: U256, logs: Vec) -> Self { - Self { - gas_used, - log_bloom: logs.iter().fold(Bloom::default(), |mut b, l| { - b.accrue_bloom(&l.bloom()); - b - }), - logs, - outcome, - } - } + /// Create a new receipt. + pub fn new(outcome: TransactionOutcome, gas_used: U256, logs: Vec) -> Self { + Self { + gas_used, + log_bloom: logs.iter().fold(Bloom::default(), |mut b, l| { + b.accrue_bloom(&l.bloom()); + b + }), + logs, + outcome, + } + } } impl Encodable for Receipt { - fn rlp_append(&self, s: &mut RlpStream) { - match self.outcome { - TransactionOutcome::Unknown => { - s.begin_list(3); - }, - TransactionOutcome::StateRoot(ref root) => { - s.begin_list(4); - s.append(root); - }, - TransactionOutcome::StatusCode(ref status_code) => { - s.begin_list(4); - s.append(status_code); - }, - } - s.append(&self.gas_used); - s.append(&self.log_bloom); - s.append_list(&self.logs); - } + fn rlp_append(&self, s: &mut RlpStream) { + match self.outcome { + TransactionOutcome::Unknown => { + s.begin_list(3); + } + TransactionOutcome::StateRoot(ref root) => { + s.begin_list(4); + s.append(root); + } + TransactionOutcome::StatusCode(ref status_code) => { + s.begin_list(4); + s.append(status_code); + } + } + s.append(&self.gas_used); + s.append(&self.log_bloom); + s.append_list(&self.logs); + } } impl Decodable for Receipt { - fn decode(rlp: &Rlp) -> Result { - if rlp.item_count()? == 3 { - Ok(Receipt { - outcome: TransactionOutcome::Unknown, - gas_used: rlp.val_at(0)?, - log_bloom: rlp.val_at(1)?, - logs: rlp.list_at(2)?, - }) - } else { - Ok(Receipt { - gas_used: rlp.val_at(1)?, - log_bloom: rlp.val_at(2)?, - logs: rlp.list_at(3)?, - outcome: { - let first = rlp.at(0)?; - if first.is_data() && first.data()?.len() <= 1 { - TransactionOutcome::StatusCode(first.as_val()?) - } else { - TransactionOutcome::StateRoot(first.as_val()?) - } - } - }) - } - } + fn decode(rlp: &Rlp) -> Result { + if rlp.item_count()? == 3 { + Ok(Receipt { + outcome: TransactionOutcome::Unknown, + gas_used: rlp.val_at(0)?, + log_bloom: rlp.val_at(1)?, + logs: rlp.list_at(2)?, + }) + } else { + Ok(Receipt { + gas_used: rlp.val_at(1)?, + log_bloom: rlp.val_at(2)?, + logs: rlp.list_at(3)?, + outcome: { + let first = rlp.at(0)?; + if first.is_data() && first.data()?.len() <= 1 { + TransactionOutcome::StatusCode(first.as_val()?) + } else { + TransactionOutcome::StateRoot(first.as_val()?) + } + }, + }) + } + } } impl HeapSizeOf for Receipt { - fn heap_size_of_children(&self) -> usize { - self.logs.heap_size_of_children() - } + fn heap_size_of_children(&self) -> usize { + self.logs.heap_size_of_children() + } } /// Receipt with additional info. #[derive(Debug, Clone, PartialEq)] pub struct RichReceipt { - /// Transaction hash. - pub transaction_hash: H256, - /// Transaction index. - pub transaction_index: usize, - /// The total gas used in the block following execution of the transaction. - pub cumulative_gas_used: U256, - /// The gas used in the execution of the transaction. Note the difference of meaning to `Receipt::gas_used`. - pub gas_used: U256, - /// Contract address. - /// NOTE: It is an Option because only `Action::Create` transactions has a contract address - pub contract_address: Option
, - /// Logs - pub logs: Vec, - /// Logs bloom - pub log_bloom: Bloom, - /// Transaction outcome. - pub outcome: TransactionOutcome, - /// Receiver address - /// NOTE: It is an Option because only `Action::Call` transactions has a receiver address - pub to: Option, - /// Sender - pub from: H160 + /// Transaction hash. + pub transaction_hash: H256, + /// Transaction index. + pub transaction_index: usize, + /// The total gas used in the block following execution of the transaction. + pub cumulative_gas_used: U256, + /// The gas used in the execution of the transaction. Note the difference of meaning to `Receipt::gas_used`. + pub gas_used: U256, + /// Contract address. + /// NOTE: It is an Option because only `Action::Create` transactions has a contract address + pub contract_address: Option
, + /// Logs + pub logs: Vec, + /// Logs bloom + pub log_bloom: Bloom, + /// Transaction outcome. + pub outcome: TransactionOutcome, + /// Receiver address + /// NOTE: It is an Option because only `Action::Call` transactions has a receiver address + pub to: Option, + /// Sender + pub from: H160, } /// Receipt with additional info. #[derive(Debug, Clone, PartialEq)] pub struct LocalizedReceipt { - /// Transaction hash. - pub transaction_hash: H256, - /// Transaction index. - pub transaction_index: usize, - /// Block hash. - pub block_hash: H256, - /// Block number. - pub block_number: BlockNumber, - /// The total gas used in the block following execution of the transaction. - pub cumulative_gas_used: U256, - /// The gas used in the execution of the transaction. Note the difference of meaning to `Receipt::gas_used`. - pub gas_used: U256, - /// Contract address. - /// NOTE: It is an Option because only `Action::Create` transactions has a contract address - pub contract_address: Option
, - /// Logs - pub logs: Vec, - /// Logs bloom - pub log_bloom: Bloom, - /// Transaction outcome. - pub outcome: TransactionOutcome, - /// Receiver address - /// NOTE: It is an Option because only `Action::Call` transactions has a receiver address - pub to: Option, - /// Sender - pub from: H160 + /// Transaction hash. + pub transaction_hash: H256, + /// Transaction index. + pub transaction_index: usize, + /// Block hash. + pub block_hash: H256, + /// Block number. + pub block_number: BlockNumber, + /// The total gas used in the block following execution of the transaction. + pub cumulative_gas_used: U256, + /// The gas used in the execution of the transaction. Note the difference of meaning to `Receipt::gas_used`. + pub gas_used: U256, + /// Contract address. + /// NOTE: It is an Option because only `Action::Create` transactions has a contract address + pub contract_address: Option
, + /// Logs + pub logs: Vec, + /// Logs bloom + pub log_bloom: Bloom, + /// Transaction outcome. + pub outcome: TransactionOutcome, + /// Receiver address + /// NOTE: It is an Option because only `Action::Call` transactions has a receiver address + pub to: Option, + /// Sender + pub from: H160, } #[cfg(test)] mod tests { - use super::{Receipt, TransactionOutcome}; - use log_entry::LogEntry; + use super::{Receipt, TransactionOutcome}; + use log_entry::LogEntry; - #[test] - fn test_no_state_root() { - let expected = ::rustc_hex::FromHex::from_hex("f9014183040caeb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000f838f794dcf421d093428b096ca501a7cd1a740855a7976fc0a00000000000000000000000000000000000000000000000000000000000000000").unwrap(); - let r = Receipt::new( - TransactionOutcome::Unknown, - 0x40cae.into(), - vec![LogEntry { - address: "dcf421d093428b096ca501a7cd1a740855a7976f".into(), - topics: vec![], - data: vec![0u8; 32] - }] - ); - assert_eq!(&::rlp::encode(&r)[..], &expected[..]); - } + #[test] + fn test_no_state_root() { + let expected = ::rustc_hex::FromHex::from_hex("f9014183040caeb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000f838f794dcf421d093428b096ca501a7cd1a740855a7976fc0a00000000000000000000000000000000000000000000000000000000000000000").unwrap(); + let r = Receipt::new( + TransactionOutcome::Unknown, + 0x40cae.into(), + vec![LogEntry { + address: "dcf421d093428b096ca501a7cd1a740855a7976f".into(), + topics: vec![], + data: vec![0u8; 32], + }], + ); + assert_eq!(&::rlp::encode(&r)[..], &expected[..]); + } - #[test] - fn test_basic() { - let expected = ::rustc_hex::FromHex::from_hex("f90162a02f697d671e9ae4ee24a43c4b0d7e15f1cb4ba6de1561120d43b9a4e8c4a8a6ee83040caeb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000f838f794dcf421d093428b096ca501a7cd1a740855a7976fc0a00000000000000000000000000000000000000000000000000000000000000000").unwrap(); - let r = Receipt::new( - TransactionOutcome::StateRoot("2f697d671e9ae4ee24a43c4b0d7e15f1cb4ba6de1561120d43b9a4e8c4a8a6ee".into()), - 0x40cae.into(), - vec![LogEntry { - address: "dcf421d093428b096ca501a7cd1a740855a7976f".into(), - topics: vec![], - data: vec![0u8; 32] - }] - ); - let encoded = ::rlp::encode(&r); - assert_eq!(&encoded[..], &expected[..]); - let decoded: Receipt = ::rlp::decode(&encoded).expect("decoding receipt failed"); - assert_eq!(decoded, r); - } + #[test] + fn test_basic() { + let expected = ::rustc_hex::FromHex::from_hex("f90162a02f697d671e9ae4ee24a43c4b0d7e15f1cb4ba6de1561120d43b9a4e8c4a8a6ee83040caeb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000f838f794dcf421d093428b096ca501a7cd1a740855a7976fc0a00000000000000000000000000000000000000000000000000000000000000000").unwrap(); + let r = Receipt::new( + TransactionOutcome::StateRoot( + "2f697d671e9ae4ee24a43c4b0d7e15f1cb4ba6de1561120d43b9a4e8c4a8a6ee".into(), + ), + 0x40cae.into(), + vec![LogEntry { + address: "dcf421d093428b096ca501a7cd1a740855a7976f".into(), + topics: vec![], + data: vec![0u8; 32], + }], + ); + let encoded = ::rlp::encode(&r); + assert_eq!(&encoded[..], &expected[..]); + let decoded: Receipt = ::rlp::decode(&encoded).expect("decoding receipt failed"); + assert_eq!(decoded, r); + } - #[test] - fn test_status_code() { - let expected = ::rustc_hex::FromHex::from_hex("f901428083040caeb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000f838f794dcf421d093428b096ca501a7cd1a740855a7976fc0a00000000000000000000000000000000000000000000000000000000000000000").unwrap(); - let r = Receipt::new( - TransactionOutcome::StatusCode(0), - 0x40cae.into(), - vec![LogEntry { - address: "dcf421d093428b096ca501a7cd1a740855a7976f".into(), - topics: vec![], - data: vec![0u8; 32] - }] - ); - let encoded = ::rlp::encode(&r); - assert_eq!(&encoded[..], &expected[..]); - let decoded: Receipt = ::rlp::decode(&encoded).expect("decoding receipt failed"); - assert_eq!(decoded, r); - } + #[test] + fn test_status_code() { + let expected = ::rustc_hex::FromHex::from_hex("f901428083040caeb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000f838f794dcf421d093428b096ca501a7cd1a740855a7976fc0a00000000000000000000000000000000000000000000000000000000000000000").unwrap(); + let r = Receipt::new( + TransactionOutcome::StatusCode(0), + 0x40cae.into(), + vec![LogEntry { + address: "dcf421d093428b096ca501a7cd1a740855a7976f".into(), + topics: vec![], + data: vec![0u8; 32], + }], + ); + let encoded = ::rlp::encode(&r); + assert_eq!(&encoded[..], &expected[..]); + let decoded: Receipt = ::rlp::decode(&encoded).expect("decoding receipt failed"); + assert_eq!(decoded, r); + } } diff --git a/ethcore/types/src/restoration_status.rs b/ethcore/types/src/restoration_status.rs index b36ec7ef4..4f24e3957 100644 --- a/ethcore/types/src/restoration_status.rs +++ b/ethcore/types/src/restoration_status.rs @@ -19,24 +19,24 @@ /// Statuses for restorations. #[derive(PartialEq, Eq, Clone, Copy, Debug)] pub enum RestorationStatus { - /// No restoration. - Inactive, - /// Restoration is initializing - Initializing { - /// Number of chunks done/imported - chunks_done: u32, - }, - /// Ongoing restoration. - Ongoing { - /// Total number of state chunks. - state_chunks: u32, - /// Total number of block chunks. - block_chunks: u32, - /// Number of state chunks completed. - state_chunks_done: u32, - /// Number of block chunks completed. - block_chunks_done: u32, - }, - /// Failed restoration. - Failed, + /// No restoration. + Inactive, + /// Restoration is initializing + Initializing { + /// Number of chunks done/imported + chunks_done: u32, + }, + /// Ongoing restoration. + Ongoing { + /// Total number of state chunks. + state_chunks: u32, + /// Total number of block chunks. + block_chunks: u32, + /// Number of state chunks completed. + state_chunks_done: u32, + /// Number of block chunks completed. + block_chunks_done: u32, + }, + /// Failed restoration. + Failed, } diff --git a/ethcore/types/src/security_level.rs b/ethcore/types/src/security_level.rs index eb87317e7..d4505e411 100644 --- a/ethcore/types/src/security_level.rs +++ b/ethcore/types/src/security_level.rs @@ -16,25 +16,25 @@ //! Indication of how secure the chain is. -use {BlockNumber}; +use BlockNumber; /// Indication of how secure the chain is. #[derive(Debug, PartialEq, Copy, Clone, Hash, Eq)] pub enum SecurityLevel { - /// All blocks from genesis to chain head are known to have valid state transitions and PoW. - FullState, - /// All blocks from genesis to chain head are known to have a valid PoW. - FullProofOfWork, - /// Some recent headers (the argument) are known to have a valid PoW. - PartialProofOfWork(BlockNumber), + /// All blocks from genesis to chain head are known to have valid state transitions and PoW. + FullState, + /// All blocks from genesis to chain head are known to have a valid PoW. + FullProofOfWork, + /// Some recent headers (the argument) are known to have a valid PoW. + PartialProofOfWork(BlockNumber), } impl SecurityLevel { - /// `true` for `FullPoW`/`FullState`. - pub fn is_full(&self) -> bool { - match *self { - SecurityLevel::FullState | SecurityLevel::FullProofOfWork => true, - _ => false, - } - } + /// `true` for `FullPoW`/`FullState`. + pub fn is_full(&self) -> bool { + match *self { + SecurityLevel::FullState | SecurityLevel::FullProofOfWork => true, + _ => false, + } + } } diff --git a/ethcore/types/src/snapshot_manifest.rs b/ethcore/types/src/snapshot_manifest.rs index 8ed19fcfc..09007e5ce 100644 --- a/ethcore/types/src/snapshot_manifest.rs +++ b/ethcore/types/src/snapshot_manifest.rs @@ -16,63 +16,63 @@ //! Snapshot manifest type definition -use ethereum_types::H256; -use rlp::{Rlp, RlpStream, DecoderError}; use bytes::Bytes; +use ethereum_types::H256; +use rlp::{DecoderError, Rlp, RlpStream}; /// Manifest data. #[derive(Debug, Clone, PartialEq, Eq)] pub struct ManifestData { - /// Snapshot format version. - pub version: u64, - /// List of state chunk hashes. - pub state_hashes: Vec, - /// List of block chunk hashes. - pub block_hashes: Vec, - /// The final, expected state root. - pub state_root: H256, - /// Block number this snapshot was taken at. - pub block_number: u64, - /// Block hash this snapshot was taken at. - pub block_hash: H256, + /// Snapshot format version. + pub version: u64, + /// List of state chunk hashes. + pub state_hashes: Vec, + /// List of block chunk hashes. + pub block_hashes: Vec, + /// The final, expected state root. + pub state_root: H256, + /// Block number this snapshot was taken at. + pub block_number: u64, + /// Block hash this snapshot was taken at. + pub block_hash: H256, } impl ManifestData { - /// Encode the manifest data to rlp. - pub fn into_rlp(self) -> Bytes { - let mut stream = RlpStream::new_list(6); - stream.append(&self.version); - stream.append_list(&self.state_hashes); - stream.append_list(&self.block_hashes); - stream.append(&self.state_root); - stream.append(&self.block_number); - stream.append(&self.block_hash); + /// Encode the manifest data to rlp. + pub fn into_rlp(self) -> Bytes { + let mut stream = RlpStream::new_list(6); + stream.append(&self.version); + stream.append_list(&self.state_hashes); + stream.append_list(&self.block_hashes); + stream.append(&self.state_root); + stream.append(&self.block_number); + stream.append(&self.block_hash); - stream.out() - } + stream.out() + } - /// Try to restore manifest data from raw bytes, interpreted as RLP. - pub fn from_rlp(raw: &[u8]) -> Result { - let decoder = Rlp::new(raw); - let (start, version) = if decoder.item_count()? == 5 { - (0, 1) - } else { - (1, decoder.val_at(0)?) - }; + /// Try to restore manifest data from raw bytes, interpreted as RLP. + pub fn from_rlp(raw: &[u8]) -> Result { + let decoder = Rlp::new(raw); + let (start, version) = if decoder.item_count()? == 5 { + (0, 1) + } else { + (1, decoder.val_at(0)?) + }; - let state_hashes: Vec = decoder.list_at(start + 0)?; - let block_hashes: Vec = decoder.list_at(start + 1)?; - let state_root: H256 = decoder.val_at(start + 2)?; - let block_number: u64 = decoder.val_at(start + 3)?; - let block_hash: H256 = decoder.val_at(start + 4)?; + let state_hashes: Vec = decoder.list_at(start + 0)?; + let block_hashes: Vec = decoder.list_at(start + 1)?; + let state_root: H256 = decoder.val_at(start + 2)?; + let block_number: u64 = decoder.val_at(start + 3)?; + let block_hash: H256 = decoder.val_at(start + 4)?; - Ok(ManifestData { - version: version, - state_hashes: state_hashes, - block_hashes: block_hashes, - state_root: state_root, - block_number: block_number, - block_hash: block_hash, - }) - } + Ok(ManifestData { + version: version, + state_hashes: state_hashes, + block_hashes: block_hashes, + state_root: state_root, + block_number: block_number, + block_hash: block_hash, + }) + } } diff --git a/ethcore/types/src/state_diff.rs b/ethcore/types/src/state_diff.rs index 5605719f0..905343a30 100644 --- a/ethcore/types/src/state_diff.rs +++ b/ethcore/types/src/state_diff.rs @@ -16,40 +16,38 @@ //! State diff module. -use std::fmt; -use std::ops::*; -use std::collections::BTreeMap; -use ethereum_types::Address; use account_diff::*; +use ethereum_types::Address; +use std::{collections::BTreeMap, fmt, ops::*}; /// Expression for the delta between two system states. Encoded the /// delta of every altered account. #[derive(Debug, PartialEq, Eq, Clone)] pub struct StateDiff { - /// Raw diff key-value - pub raw: BTreeMap + /// Raw diff key-value + pub raw: BTreeMap, } impl StateDiff { - /// Get the actual data. - pub fn get(&self) -> &BTreeMap { - &self.raw - } + /// Get the actual data. + pub fn get(&self) -> &BTreeMap { + &self.raw + } } impl fmt::Display for StateDiff { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - for (add, acc) in &self.raw { - write!(f, "{} {}: {}", acc.existance(), add, acc)?; - } - Ok(()) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + for (add, acc) in &self.raw { + write!(f, "{} {}: {}", acc.existance(), add, acc)?; + } + Ok(()) + } } impl Deref for StateDiff { - type Target = BTreeMap; + type Target = BTreeMap; - fn deref(&self) -> &Self::Target { - &self.raw - } + fn deref(&self) -> &Self::Target { + &self.raw + } } diff --git a/ethcore/types/src/trace_filter.rs b/ethcore/types/src/trace_filter.rs index 8b1d715b4..ef93c2095 100644 --- a/ethcore/types/src/trace_filter.rs +++ b/ethcore/types/src/trace_filter.rs @@ -16,20 +16,20 @@ //! Trace filter related types -use std::ops::Range; use ethereum_types::Address; use ids::BlockId; +use std::ops::Range; /// Easy to use trace filter. pub struct Filter { - /// Range of filtering. - pub range: Range, - /// From address. - pub from_address: Vec
, - /// To address. - pub to_address: Vec
, - /// Output offset - pub after: Option, - /// Output amount - pub count: Option, + /// Range of filtering. + pub range: Range, + /// From address. + pub from_address: Vec
, + /// To address. + pub to_address: Vec
, + /// Output offset + pub after: Option, + /// Output amount + pub count: Option, } diff --git a/ethcore/types/src/transaction/error.rs b/ethcore/types/src/transaction/error.rs index 68c0b2c0f..8b0074a5e 100644 --- a/ethcore/types/src/transaction/error.rs +++ b/ethcore/types/src/transaction/error.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::{fmt, error}; +use std::{error, fmt}; use ethereum_types::U256; use ethkey; @@ -24,117 +24,123 @@ use unexpected::OutOfBounds; #[derive(Debug, PartialEq, Clone)] /// Errors concerning transaction processing. pub enum Error { - /// Transaction is already imported to the queue - AlreadyImported, - /// Transaction is not valid anymore (state already has higher nonce) - Old, - /// Transaction was not imported to the queue because limit has been reached. - LimitReached, - /// Transaction's gas price is below threshold. - InsufficientGasPrice { - /// Minimal expected gas price - minimal: U256, - /// Transaction gas price - got: U256, - }, - /// Transaction has too low fee - /// (there is already a transaction with the same sender-nonce but higher gas price) - TooCheapToReplace { - /// previous transaction's gas price - prev: Option, - /// new transaction's gas price - new: Option, - }, - /// Transaction's gas is below currently set minimal gas requirement. - InsufficientGas { - /// Minimal expected gas - minimal: U256, - /// Transaction gas - got: U256, - }, - /// Sender doesn't have enough funds to pay for this transaction - InsufficientBalance { - /// Senders balance - balance: U256, - /// Transaction cost - cost: U256, - }, - /// Transactions gas is higher then current gas limit - GasLimitExceeded { - /// Current gas limit - limit: U256, - /// Declared transaction gas - got: U256, - }, - /// Transaction's gas limit (aka gas) is invalid. - InvalidGasLimit(OutOfBounds), - /// Transaction sender is banned. - SenderBanned, - /// Transaction receipient is banned. - RecipientBanned, - /// Contract creation code is banned. - CodeBanned, - /// Invalid chain ID given. - InvalidChainId, - /// Not enough permissions given by permission contract. - NotAllowed, - /// Signature error - InvalidSignature(String), - /// Transaction too big - TooBig, - /// Invalid RLP encoding - InvalidRlp(String), + /// Transaction is already imported to the queue + AlreadyImported, + /// Transaction is not valid anymore (state already has higher nonce) + Old, + /// Transaction was not imported to the queue because limit has been reached. + LimitReached, + /// Transaction's gas price is below threshold. + InsufficientGasPrice { + /// Minimal expected gas price + minimal: U256, + /// Transaction gas price + got: U256, + }, + /// Transaction has too low fee + /// (there is already a transaction with the same sender-nonce but higher gas price) + TooCheapToReplace { + /// previous transaction's gas price + prev: Option, + /// new transaction's gas price + new: Option, + }, + /// Transaction's gas is below currently set minimal gas requirement. + InsufficientGas { + /// Minimal expected gas + minimal: U256, + /// Transaction gas + got: U256, + }, + /// Sender doesn't have enough funds to pay for this transaction + InsufficientBalance { + /// Senders balance + balance: U256, + /// Transaction cost + cost: U256, + }, + /// Transactions gas is higher then current gas limit + GasLimitExceeded { + /// Current gas limit + limit: U256, + /// Declared transaction gas + got: U256, + }, + /// Transaction's gas limit (aka gas) is invalid. + InvalidGasLimit(OutOfBounds), + /// Transaction sender is banned. + SenderBanned, + /// Transaction receipient is banned. + RecipientBanned, + /// Contract creation code is banned. + CodeBanned, + /// Invalid chain ID given. + InvalidChainId, + /// Not enough permissions given by permission contract. + NotAllowed, + /// Signature error + InvalidSignature(String), + /// Transaction too big + TooBig, + /// Invalid RLP encoding + InvalidRlp(String), } impl From for Error { - fn from(err: ethkey::Error) -> Self { - Error::InvalidSignature(format!("{}", err)) - } + fn from(err: ethkey::Error) -> Self { + Error::InvalidSignature(format!("{}", err)) + } } impl From for Error { - fn from(err: rlp::DecoderError) -> Self { - Error::InvalidRlp(format!("{}", err)) - } + fn from(err: rlp::DecoderError) -> Self { + Error::InvalidRlp(format!("{}", err)) + } } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use self::Error::*; - let msg = match *self { - AlreadyImported => "Already imported".into(), - Old => "No longer valid".into(), - TooCheapToReplace { prev, new } => - format!("Gas price too low to replace, previous tx gas: {:?}, new tx gas: {:?}", - prev, new - ), - LimitReached => "Transaction limit reached".into(), - InsufficientGasPrice { minimal, got } => - format!("Insufficient gas price. Min={}, Given={}", minimal, got), - InsufficientGas { minimal, got } => - format!("Insufficient gas. Min={}, Given={}", minimal, got), - InsufficientBalance { balance, cost } => - format!("Insufficient balance for transaction. Balance={}, Cost={}", - balance, cost), - GasLimitExceeded { limit, got } => - format!("Gas limit exceeded. Limit={}, Given={}", limit, got), - InvalidGasLimit(ref err) => format!("Invalid gas limit. {}", err), - SenderBanned => "Sender is temporarily banned.".into(), - RecipientBanned => "Recipient is temporarily banned.".into(), - CodeBanned => "Contract code is temporarily banned.".into(), - InvalidChainId => "Transaction of this chain ID is not allowed on this chain.".into(), - InvalidSignature(ref err) => format!("Transaction has invalid signature: {}.", err), - NotAllowed => "Sender does not have permissions to execute this type of transction".into(), - TooBig => "Transaction too big".into(), - InvalidRlp(ref err) => format!("Transaction has invalid RLP structure: {}.", err), - }; + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::Error::*; + let msg = match *self { + AlreadyImported => "Already imported".into(), + Old => "No longer valid".into(), + TooCheapToReplace { prev, new } => format!( + "Gas price too low to replace, previous tx gas: {:?}, new tx gas: {:?}", + prev, new + ), + LimitReached => "Transaction limit reached".into(), + InsufficientGasPrice { minimal, got } => { + format!("Insufficient gas price. Min={}, Given={}", minimal, got) + } + InsufficientGas { minimal, got } => { + format!("Insufficient gas. Min={}, Given={}", minimal, got) + } + InsufficientBalance { balance, cost } => format!( + "Insufficient balance for transaction. Balance={}, Cost={}", + balance, cost + ), + GasLimitExceeded { limit, got } => { + format!("Gas limit exceeded. Limit={}, Given={}", limit, got) + } + InvalidGasLimit(ref err) => format!("Invalid gas limit. {}", err), + SenderBanned => "Sender is temporarily banned.".into(), + RecipientBanned => "Recipient is temporarily banned.".into(), + CodeBanned => "Contract code is temporarily banned.".into(), + InvalidChainId => "Transaction of this chain ID is not allowed on this chain.".into(), + InvalidSignature(ref err) => format!("Transaction has invalid signature: {}.", err), + NotAllowed => { + "Sender does not have permissions to execute this type of transction".into() + } + TooBig => "Transaction too big".into(), + InvalidRlp(ref err) => format!("Transaction has invalid RLP structure: {}.", err), + }; - f.write_fmt(format_args!("Transaction error ({})", msg)) - } + f.write_fmt(format_args!("Transaction error ({})", msg)) + } } impl error::Error for Error { - fn description(&self) -> &str { - "Transaction error" - } + fn description(&self) -> &str { + "Transaction error" + } } diff --git a/ethcore/types/src/transaction/mod.rs b/ethcore/types/src/transaction/mod.rs index 4b26b7dc1..a576f21fc 100644 --- a/ethcore/types/src/transaction/mod.rs +++ b/ethcore/types/src/transaction/mod.rs @@ -19,5 +19,4 @@ mod error; mod transaction; -pub use self::error::Error; -pub use self::transaction::*; +pub use self::{error::Error, transaction::*}; diff --git a/ethcore/types/src/transaction/transaction.rs b/ethcore/types/src/transaction/transaction.rs index 8153142eb..c0c66434a 100644 --- a/ethcore/types/src/transaction/transaction.rs +++ b/ethcore/types/src/transaction/transaction.rs @@ -18,12 +18,12 @@ use std::ops::Deref; -use ethereum_types::{H256, H160, Address, U256}; +use ethereum_types::{Address, H160, H256, U256}; use ethjson; -use ethkey::{self, Signature, Secret, Public, recover, public_to_address}; +use ethkey::{self, public_to_address, recover, Public, Secret, Signature}; use hash::keccak; use heapsize::HeapSizeOf; -use rlp::{self, RlpStream, Rlp, DecoderError, Encodable}; +use rlp::{self, DecoderError, Encodable, Rlp, RlpStream}; use transaction::error; @@ -34,667 +34,711 @@ type BlockNumber = u64; pub const UNSIGNED_SENDER: Address = H160([0xff; 20]); /// System sender address for internal state updates. -pub const SYSTEM_ADDRESS: Address = H160([0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,0xff, 0xff, 0xff, 0xff,0xff, 0xff, 0xff, 0xff,0xff, 0xff, 0xff, 0xfe]); +pub const SYSTEM_ADDRESS: Address = H160([ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, +]); /// Transaction action type. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Action { - /// Create creates new contract. - Create, - /// Calls contract at given address. - /// In the case of a transfer, this is the receiver's address.' - Call(Address), + /// Create creates new contract. + Create, + /// Calls contract at given address. + /// In the case of a transfer, this is the receiver's address.' + Call(Address), } impl Default for Action { - fn default() -> Action { Action::Create } + fn default() -> Action { + Action::Create + } } impl rlp::Decodable for Action { - fn decode(rlp: &Rlp) -> Result { - if rlp.is_empty() { - if rlp.is_data() { - Ok(Action::Create) - } else { - Err(DecoderError::RlpExpectedToBeData) - } - } else { - Ok(Action::Call(rlp.as_val()?)) - } - } + fn decode(rlp: &Rlp) -> Result { + if rlp.is_empty() { + if rlp.is_data() { + Ok(Action::Create) + } else { + Err(DecoderError::RlpExpectedToBeData) + } + } else { + Ok(Action::Call(rlp.as_val()?)) + } + } } impl rlp::Encodable for Action { - fn rlp_append(&self, s: &mut RlpStream) { - match *self { - Action::Create => s.append_internal(&""), - Action::Call(ref addr) => s.append_internal(addr), - }; - } + fn rlp_append(&self, s: &mut RlpStream) { + match *self { + Action::Create => s.append_internal(&""), + Action::Call(ref addr) => s.append_internal(addr), + }; + } } /// Transaction activation condition. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Condition { - /// Valid at this block number or later. - Number(BlockNumber), - /// Valid at this unix time or later. - Timestamp(u64), + /// Valid at this block number or later. + Number(BlockNumber), + /// Valid at this unix time or later. + Timestamp(u64), } /// Replay protection logic for v part of transaction's signature pub mod signature { - /// Adds chain id into v - pub fn add_chain_replay_protection(v: u64, chain_id: Option) -> u64 { - v + if let Some(n) = chain_id { 35 + n * 2 } else { 27 } - } + /// Adds chain id into v + pub fn add_chain_replay_protection(v: u64, chain_id: Option) -> u64 { + v + if let Some(n) = chain_id { + 35 + n * 2 + } else { + 27 + } + } - /// Returns refined v - /// 0 if `v` would have been 27 under "Electrum" notation, 1 if 28 or 4 if invalid. - pub fn check_replay_protection(v: u64) -> u8 { - match v { - v if v == 27 => 0, - v if v == 28 => 1, - v if v >= 35 => ((v - 1) % 2) as u8, - _ => 4 - } - } + /// Returns refined v + /// 0 if `v` would have been 27 under "Electrum" notation, 1 if 28 or 4 if invalid. + pub fn check_replay_protection(v: u64) -> u8 { + match v { + v if v == 27 => 0, + v if v == 28 => 1, + v if v >= 35 => ((v - 1) % 2) as u8, + _ => 4, + } + } } /// A set of information describing an externally-originating message call /// or contract creation operation. #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct Transaction { - /// Nonce. - pub nonce: U256, - /// Gas price. - pub gas_price: U256, - /// Gas paid up front for transaction execution. - pub gas: U256, - /// Action, can be either call or contract create. - pub action: Action, - /// Transfered value. - pub value: U256, - /// Transaction data. - pub data: Bytes, + /// Nonce. + pub nonce: U256, + /// Gas price. + pub gas_price: U256, + /// Gas paid up front for transaction execution. + pub gas: U256, + /// Action, can be either call or contract create. + pub action: Action, + /// Transfered value. + pub value: U256, + /// Transaction data. + pub data: Bytes, } impl Transaction { - /// Append object with a without signature into RLP stream - pub fn rlp_append_unsigned_transaction(&self, s: &mut RlpStream, chain_id: Option) { - s.begin_list(if chain_id.is_none() { 6 } else { 9 }); - s.append(&self.nonce); - s.append(&self.gas_price); - s.append(&self.gas); - s.append(&self.action); - s.append(&self.value); - s.append(&self.data); - if let Some(n) = chain_id { - s.append(&n); - s.append(&0u8); - s.append(&0u8); - } - } + /// Append object with a without signature into RLP stream + pub fn rlp_append_unsigned_transaction(&self, s: &mut RlpStream, chain_id: Option) { + s.begin_list(if chain_id.is_none() { 6 } else { 9 }); + s.append(&self.nonce); + s.append(&self.gas_price); + s.append(&self.gas); + s.append(&self.action); + s.append(&self.value); + s.append(&self.data); + if let Some(n) = chain_id { + s.append(&n); + s.append(&0u8); + s.append(&0u8); + } + } } impl HeapSizeOf for Transaction { - fn heap_size_of_children(&self) -> usize { - self.data.heap_size_of_children() - } + fn heap_size_of_children(&self) -> usize { + self.data.heap_size_of_children() + } } #[cfg(any(test, feature = "test-helpers"))] impl From for SignedTransaction { - fn from(t: ethjson::state::Transaction) -> Self { - let to: Option = t.to.into(); - let secret = t.secret.map(|s| Secret::from(s.0)); - let tx = Transaction { - nonce: t.nonce.into(), - gas_price: t.gas_price.into(), - gas: t.gas_limit.into(), - action: match to { - Some(to) => Action::Call(to.into()), - None => Action::Create - }, - value: t.value.into(), - data: t.data.into(), - }; - match secret { - Some(s) => tx.sign(&s, None), - None => tx.null_sign(1), - } - } + fn from(t: ethjson::state::Transaction) -> Self { + let to: Option = t.to.into(); + let secret = t.secret.map(|s| Secret::from(s.0)); + let tx = Transaction { + nonce: t.nonce.into(), + gas_price: t.gas_price.into(), + gas: t.gas_limit.into(), + action: match to { + Some(to) => Action::Call(to.into()), + None => Action::Create, + }, + value: t.value.into(), + data: t.data.into(), + }; + match secret { + Some(s) => tx.sign(&s, None), + None => tx.null_sign(1), + } + } } impl From for UnverifiedTransaction { - fn from(t: ethjson::transaction::Transaction) -> Self { - let to: Option = t.to.into(); - UnverifiedTransaction { - unsigned: Transaction { - nonce: t.nonce.into(), - gas_price: t.gas_price.into(), - gas: t.gas_limit.into(), - action: match to { - Some(to) => Action::Call(to.into()), - None => Action::Create - }, - value: t.value.into(), - data: t.data.into(), - }, - r: t.r.into(), - s: t.s.into(), - v: t.v.into(), - hash: 0.into(), - }.compute_hash() - } + fn from(t: ethjson::transaction::Transaction) -> Self { + let to: Option = t.to.into(); + UnverifiedTransaction { + unsigned: Transaction { + nonce: t.nonce.into(), + gas_price: t.gas_price.into(), + gas: t.gas_limit.into(), + action: match to { + Some(to) => Action::Call(to.into()), + None => Action::Create, + }, + value: t.value.into(), + data: t.data.into(), + }, + r: t.r.into(), + s: t.s.into(), + v: t.v.into(), + hash: 0.into(), + } + .compute_hash() + } } impl Transaction { - /// The message hash of the transaction. - pub fn hash(&self, chain_id: Option) -> H256 { - let mut stream = RlpStream::new(); - self.rlp_append_unsigned_transaction(&mut stream, chain_id); - keccak(stream.as_raw()) - } + /// The message hash of the transaction. + pub fn hash(&self, chain_id: Option) -> H256 { + let mut stream = RlpStream::new(); + self.rlp_append_unsigned_transaction(&mut stream, chain_id); + keccak(stream.as_raw()) + } - /// Signs the transaction as coming from `sender`. - pub fn sign(self, secret: &Secret, chain_id: Option) -> SignedTransaction { - let sig = ::ethkey::sign(secret, &self.hash(chain_id)) - .expect("data is valid and context has signing capabilities; qed"); - SignedTransaction::new(self.with_signature(sig, chain_id)) - .expect("secret is valid so it's recoverable") - } + /// Signs the transaction as coming from `sender`. + pub fn sign(self, secret: &Secret, chain_id: Option) -> SignedTransaction { + let sig = ::ethkey::sign(secret, &self.hash(chain_id)) + .expect("data is valid and context has signing capabilities; qed"); + SignedTransaction::new(self.with_signature(sig, chain_id)) + .expect("secret is valid so it's recoverable") + } - /// Signs the transaction with signature. - pub fn with_signature(self, sig: Signature, chain_id: Option) -> UnverifiedTransaction { - UnverifiedTransaction { - unsigned: self, - r: sig.r().into(), - s: sig.s().into(), - v: signature::add_chain_replay_protection(sig.v() as u64, chain_id), - hash: 0.into(), - }.compute_hash() - } + /// Signs the transaction with signature. + pub fn with_signature(self, sig: Signature, chain_id: Option) -> UnverifiedTransaction { + UnverifiedTransaction { + unsigned: self, + r: sig.r().into(), + s: sig.s().into(), + v: signature::add_chain_replay_protection(sig.v() as u64, chain_id), + hash: 0.into(), + } + .compute_hash() + } - /// Useful for test incorrectly signed transactions. - #[cfg(test)] - pub fn invalid_sign(self) -> UnverifiedTransaction { - UnverifiedTransaction { - unsigned: self, - r: U256::one(), - s: U256::one(), - v: 0, - hash: 0.into(), - }.compute_hash() - } + /// Useful for test incorrectly signed transactions. + #[cfg(test)] + pub fn invalid_sign(self) -> UnverifiedTransaction { + UnverifiedTransaction { + unsigned: self, + r: U256::one(), + s: U256::one(), + v: 0, + hash: 0.into(), + } + .compute_hash() + } - /// Specify the sender; this won't survive the serialize/deserialize process, but can be cloned. - pub fn fake_sign(self, from: Address) -> SignedTransaction { - SignedTransaction { - transaction: UnverifiedTransaction { - unsigned: self, - r: U256::one(), - s: U256::one(), - v: 0, - hash: 0.into(), - }.compute_hash(), - sender: from, - public: None, - } - } + /// Specify the sender; this won't survive the serialize/deserialize process, but can be cloned. + pub fn fake_sign(self, from: Address) -> SignedTransaction { + SignedTransaction { + transaction: UnverifiedTransaction { + unsigned: self, + r: U256::one(), + s: U256::one(), + v: 0, + hash: 0.into(), + } + .compute_hash(), + sender: from, + public: None, + } + } - /// Legacy EIP-86 compatible empty signature. - /// This method is used in json tests as well as - /// signature verification tests. - #[cfg(any(test, feature = "test-helpers"))] - pub fn null_sign(self, chain_id: u64) -> SignedTransaction { - SignedTransaction { - transaction: UnverifiedTransaction { - unsigned: self, - r: U256::zero(), - s: U256::zero(), - v: chain_id, - hash: 0.into(), - }.compute_hash(), - sender: UNSIGNED_SENDER, - public: None, - } - } + /// Legacy EIP-86 compatible empty signature. + /// This method is used in json tests as well as + /// signature verification tests. + #[cfg(any(test, feature = "test-helpers"))] + pub fn null_sign(self, chain_id: u64) -> SignedTransaction { + SignedTransaction { + transaction: UnverifiedTransaction { + unsigned: self, + r: U256::zero(), + s: U256::zero(), + v: chain_id, + hash: 0.into(), + } + .compute_hash(), + sender: UNSIGNED_SENDER, + public: None, + } + } } /// Signed transaction information without verified signature. #[derive(Debug, Clone, Eq, PartialEq)] pub struct UnverifiedTransaction { - /// Plain Transaction. - unsigned: Transaction, - /// The V field of the signature; the LS bit described which half of the curve our point falls - /// in. The MS bits describe which chain this transaction is for. If 27/28, its for all chains. - v: u64, - /// The R field of the signature; helps describe the point on the curve. - r: U256, - /// The S field of the signature; helps describe the point on the curve. - s: U256, - /// Hash of the transaction - hash: H256, + /// Plain Transaction. + unsigned: Transaction, + /// The V field of the signature; the LS bit described which half of the curve our point falls + /// in. The MS bits describe which chain this transaction is for. If 27/28, its for all chains. + v: u64, + /// The R field of the signature; helps describe the point on the curve. + r: U256, + /// The S field of the signature; helps describe the point on the curve. + s: U256, + /// Hash of the transaction + hash: H256, } impl HeapSizeOf for UnverifiedTransaction { - fn heap_size_of_children(&self) -> usize { - self.unsigned.heap_size_of_children() - } + fn heap_size_of_children(&self) -> usize { + self.unsigned.heap_size_of_children() + } } impl Deref for UnverifiedTransaction { - type Target = Transaction; + type Target = Transaction; - fn deref(&self) -> &Self::Target { - &self.unsigned - } + fn deref(&self) -> &Self::Target { + &self.unsigned + } } impl rlp::Decodable for UnverifiedTransaction { - fn decode(d: &Rlp) -> Result { - if d.item_count()? != 9 { - return Err(DecoderError::RlpIncorrectListLen); - } - let hash = keccak(d.as_raw()); - Ok(UnverifiedTransaction { - unsigned: Transaction { - nonce: d.val_at(0)?, - gas_price: d.val_at(1)?, - gas: d.val_at(2)?, - action: d.val_at(3)?, - value: d.val_at(4)?, - data: d.val_at(5)?, - }, - v: d.val_at(6)?, - r: d.val_at(7)?, - s: d.val_at(8)?, - hash, - }) - } + fn decode(d: &Rlp) -> Result { + if d.item_count()? != 9 { + return Err(DecoderError::RlpIncorrectListLen); + } + let hash = keccak(d.as_raw()); + Ok(UnverifiedTransaction { + unsigned: Transaction { + nonce: d.val_at(0)?, + gas_price: d.val_at(1)?, + gas: d.val_at(2)?, + action: d.val_at(3)?, + value: d.val_at(4)?, + data: d.val_at(5)?, + }, + v: d.val_at(6)?, + r: d.val_at(7)?, + s: d.val_at(8)?, + hash, + }) + } } impl rlp::Encodable for UnverifiedTransaction { - fn rlp_append(&self, s: &mut RlpStream) { self.rlp_append_sealed_transaction(s) } + fn rlp_append(&self, s: &mut RlpStream) { + self.rlp_append_sealed_transaction(s) + } } impl UnverifiedTransaction { - /// Used to compute hash of created transactions - fn compute_hash(mut self) -> UnverifiedTransaction { - let hash = keccak(&*self.rlp_bytes()); - self.hash = hash; - self - } + /// Used to compute hash of created transactions + fn compute_hash(mut self) -> UnverifiedTransaction { + let hash = keccak(&*self.rlp_bytes()); + self.hash = hash; + self + } - /// Checks if the signature is empty. - pub fn is_unsigned(&self) -> bool { - self.r.is_zero() && self.s.is_zero() - } + /// Checks if the signature is empty. + pub fn is_unsigned(&self) -> bool { + self.r.is_zero() && self.s.is_zero() + } - /// Append object with a signature into RLP stream - fn rlp_append_sealed_transaction(&self, s: &mut RlpStream) { - s.begin_list(9); - s.append(&self.nonce); - s.append(&self.gas_price); - s.append(&self.gas); - s.append(&self.action); - s.append(&self.value); - s.append(&self.data); - s.append(&self.v); - s.append(&self.r); - s.append(&self.s); - } + /// Append object with a signature into RLP stream + fn rlp_append_sealed_transaction(&self, s: &mut RlpStream) { + s.begin_list(9); + s.append(&self.nonce); + s.append(&self.gas_price); + s.append(&self.gas); + s.append(&self.action); + s.append(&self.value); + s.append(&self.data); + s.append(&self.v); + s.append(&self.r); + s.append(&self.s); + } - /// Reference to unsigned part of this transaction. - pub fn as_unsigned(&self) -> &Transaction { - &self.unsigned - } + /// Reference to unsigned part of this transaction. + pub fn as_unsigned(&self) -> &Transaction { + &self.unsigned + } - /// Returns standardized `v` value (0, 1 or 4 (invalid)) - pub fn standard_v(&self) -> u8 { signature::check_replay_protection(self.v) } + /// Returns standardized `v` value (0, 1 or 4 (invalid)) + pub fn standard_v(&self) -> u8 { + signature::check_replay_protection(self.v) + } - /// The `v` value that appears in the RLP. - pub fn original_v(&self) -> u64 { self.v } + /// The `v` value that appears in the RLP. + pub fn original_v(&self) -> u64 { + self.v + } - /// The chain ID, or `None` if this is a global transaction. - pub fn chain_id(&self) -> Option { - match self.v { - v if self.is_unsigned() => Some(v), - v if v >= 35 => Some((v - 35) / 2), - _ => None, - } - } + /// The chain ID, or `None` if this is a global transaction. + pub fn chain_id(&self) -> Option { + match self.v { + v if self.is_unsigned() => Some(v), + v if v >= 35 => Some((v - 35) / 2), + _ => None, + } + } - /// Construct a signature object from the sig. - pub fn signature(&self) -> Signature { - Signature::from_rsv(&self.r.into(), &self.s.into(), self.standard_v()) - } + /// Construct a signature object from the sig. + pub fn signature(&self) -> Signature { + Signature::from_rsv(&self.r.into(), &self.s.into(), self.standard_v()) + } - /// Checks whether the signature has a low 's' value. - pub fn check_low_s(&self) -> Result<(), ethkey::Error> { - if !self.signature().is_low_s() { - Err(ethkey::Error::InvalidSignature.into()) - } else { - Ok(()) - } - } + /// Checks whether the signature has a low 's' value. + pub fn check_low_s(&self) -> Result<(), ethkey::Error> { + if !self.signature().is_low_s() { + Err(ethkey::Error::InvalidSignature.into()) + } else { + Ok(()) + } + } - /// Get the hash of this transaction (keccak of the RLP). - pub fn hash(&self) -> H256 { - self.hash - } + /// Get the hash of this transaction (keccak of the RLP). + pub fn hash(&self) -> H256 { + self.hash + } - /// Recovers the public key of the sender. - pub fn recover_public(&self) -> Result { - Ok(recover(&self.signature(), &self.unsigned.hash(self.chain_id()))?) - } + /// Recovers the public key of the sender. + pub fn recover_public(&self) -> Result { + Ok(recover( + &self.signature(), + &self.unsigned.hash(self.chain_id()), + )?) + } - /// Verify basic signature params. Does not attempt sender recovery. - pub fn verify_basic(&self, check_low_s: bool, chain_id: Option) -> Result<(), error::Error> { - if self.is_unsigned() { - return Err(ethkey::Error::InvalidSignature.into()); - } - if check_low_s { - self.check_low_s()?; - } - match (self.chain_id(), chain_id) { - (None, _) => {}, - (Some(n), Some(m)) if n == m => {}, - _ => return Err(error::Error::InvalidChainId), - }; - Ok(()) - } + /// Verify basic signature params. Does not attempt sender recovery. + pub fn verify_basic( + &self, + check_low_s: bool, + chain_id: Option, + ) -> Result<(), error::Error> { + if self.is_unsigned() { + return Err(ethkey::Error::InvalidSignature.into()); + } + if check_low_s { + self.check_low_s()?; + } + match (self.chain_id(), chain_id) { + (None, _) => {} + (Some(n), Some(m)) if n == m => {} + _ => return Err(error::Error::InvalidChainId), + }; + Ok(()) + } } /// A `UnverifiedTransaction` with successfully recovered `sender`. #[derive(Debug, Clone, Eq, PartialEq)] pub struct SignedTransaction { - transaction: UnverifiedTransaction, - sender: Address, - public: Option, + transaction: UnverifiedTransaction, + sender: Address, + public: Option, } impl HeapSizeOf for SignedTransaction { - fn heap_size_of_children(&self) -> usize { - self.transaction.heap_size_of_children() - } + fn heap_size_of_children(&self) -> usize { + self.transaction.heap_size_of_children() + } } impl rlp::Encodable for SignedTransaction { - fn rlp_append(&self, s: &mut RlpStream) { self.transaction.rlp_append_sealed_transaction(s) } + fn rlp_append(&self, s: &mut RlpStream) { + self.transaction.rlp_append_sealed_transaction(s) + } } impl Deref for SignedTransaction { - type Target = UnverifiedTransaction; - fn deref(&self) -> &Self::Target { - &self.transaction - } + type Target = UnverifiedTransaction; + fn deref(&self) -> &Self::Target { + &self.transaction + } } impl From for UnverifiedTransaction { - fn from(tx: SignedTransaction) -> Self { - tx.transaction - } + fn from(tx: SignedTransaction) -> Self { + tx.transaction + } } impl SignedTransaction { - /// Try to verify transaction and recover sender. - pub fn new(transaction: UnverifiedTransaction) -> Result { - if transaction.is_unsigned() { - return Err(ethkey::Error::InvalidSignature); - } - let public = transaction.recover_public()?; - let sender = public_to_address(&public); - Ok(SignedTransaction { - transaction, - sender, - public: Some(public), - }) - } + /// Try to verify transaction and recover sender. + pub fn new(transaction: UnverifiedTransaction) -> Result { + if transaction.is_unsigned() { + return Err(ethkey::Error::InvalidSignature); + } + let public = transaction.recover_public()?; + let sender = public_to_address(&public); + Ok(SignedTransaction { + transaction, + sender, + public: Some(public), + }) + } - /// Returns transaction sender. - pub fn sender(&self) -> Address { - self.sender - } + /// Returns transaction sender. + pub fn sender(&self) -> Address { + self.sender + } - /// Returns a public key of the sender. - pub fn public_key(&self) -> Option { - self.public - } + /// Returns a public key of the sender. + pub fn public_key(&self) -> Option { + self.public + } - /// Checks is signature is empty. - pub fn is_unsigned(&self) -> bool { - self.transaction.is_unsigned() - } + /// Checks is signature is empty. + pub fn is_unsigned(&self) -> bool { + self.transaction.is_unsigned() + } - /// Deconstructs this transaction back into `UnverifiedTransaction` - pub fn deconstruct(self) -> (UnverifiedTransaction, Address, Option) { - (self.transaction, self.sender, self.public) - } + /// Deconstructs this transaction back into `UnverifiedTransaction` + pub fn deconstruct(self) -> (UnverifiedTransaction, Address, Option) { + (self.transaction, self.sender, self.public) + } } /// Signed Transaction that is a part of canon blockchain. #[derive(Debug, Clone, PartialEq, Eq)] pub struct LocalizedTransaction { - /// Signed part. - pub signed: UnverifiedTransaction, - /// Block number. - pub block_number: BlockNumber, - /// Block hash. - pub block_hash: H256, - /// Transaction index within block. - pub transaction_index: usize, - /// Cached sender - pub cached_sender: Option
, + /// Signed part. + pub signed: UnverifiedTransaction, + /// Block number. + pub block_number: BlockNumber, + /// Block hash. + pub block_hash: H256, + /// Transaction index within block. + pub transaction_index: usize, + /// Cached sender + pub cached_sender: Option
, } impl LocalizedTransaction { - /// Returns transaction sender. - /// Panics if `LocalizedTransaction` is constructed using invalid `UnverifiedTransaction`. - pub fn sender(&mut self) -> Address { - if let Some(sender) = self.cached_sender { - return sender; - } - if self.is_unsigned() { - return UNSIGNED_SENDER.clone(); - } - let sender = public_to_address(&self.recover_public() + /// Returns transaction sender. + /// Panics if `LocalizedTransaction` is constructed using invalid `UnverifiedTransaction`. + pub fn sender(&mut self) -> Address { + if let Some(sender) = self.cached_sender { + return sender; + } + if self.is_unsigned() { + return UNSIGNED_SENDER.clone(); + } + let sender = public_to_address(&self.recover_public() .expect("LocalizedTransaction is always constructed from transaction from blockchain; Blockchain only stores verified transactions; qed")); - self.cached_sender = Some(sender); - sender - } + self.cached_sender = Some(sender); + sender + } } impl Deref for LocalizedTransaction { - type Target = UnverifiedTransaction; + type Target = UnverifiedTransaction; - fn deref(&self) -> &Self::Target { - &self.signed - } + fn deref(&self) -> &Self::Target { + &self.signed + } } /// Queued transaction with additional information. #[derive(Debug, Clone, PartialEq, Eq)] pub struct PendingTransaction { - /// Signed transaction data. - pub transaction: SignedTransaction, - /// To be activated at this condition. `None` for immediately. - pub condition: Option, + /// Signed transaction data. + pub transaction: SignedTransaction, + /// To be activated at this condition. `None` for immediately. + pub condition: Option, } impl PendingTransaction { - /// Create a new pending transaction from signed transaction. - pub fn new(signed: SignedTransaction, condition: Option) -> Self { - PendingTransaction { - transaction: signed, - condition: condition, - } - } + /// Create a new pending transaction from signed transaction. + pub fn new(signed: SignedTransaction, condition: Option) -> Self { + PendingTransaction { + transaction: signed, + condition: condition, + } + } } impl Deref for PendingTransaction { - type Target = SignedTransaction; + type Target = SignedTransaction; - fn deref(&self) -> &SignedTransaction { &self.transaction } + fn deref(&self) -> &SignedTransaction { + &self.transaction + } } impl From for PendingTransaction { - fn from(t: SignedTransaction) -> Self { - PendingTransaction { - transaction: t, - condition: None, - } - } + fn from(t: SignedTransaction) -> Self { + PendingTransaction { + transaction: t, + condition: None, + } + } } #[cfg(test)] mod tests { - use super::*; - use ethereum_types::U256; - use hash::keccak; + use super::*; + use ethereum_types::U256; + use hash::keccak; - #[test] - fn sender_test() { - let bytes = ::rustc_hex::FromHex::from_hex("f85f800182520894095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804").unwrap(); - let t: UnverifiedTransaction = rlp::decode(&bytes).expect("decoding UnverifiedTransaction failed"); - assert_eq!(t.data, b""); - assert_eq!(t.gas, U256::from(0x5208u64)); - assert_eq!(t.gas_price, U256::from(0x01u64)); - assert_eq!(t.nonce, U256::from(0x00u64)); - if let Action::Call(ref to) = t.action { - assert_eq!(*to, "095e7baea6a6c7c4c2dfeb977efac326af552d87".into()); - } else { panic!(); } - assert_eq!(t.value, U256::from(0x0au64)); - assert_eq!(public_to_address(&t.recover_public().unwrap()), "0f65fe9276bc9a24ae7083ae28e2660ef72df99e".into()); - assert_eq!(t.chain_id(), None); - } + #[test] + fn sender_test() { + let bytes = ::rustc_hex::FromHex::from_hex("f85f800182520894095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804").unwrap(); + let t: UnverifiedTransaction = + rlp::decode(&bytes).expect("decoding UnverifiedTransaction failed"); + assert_eq!(t.data, b""); + assert_eq!(t.gas, U256::from(0x5208u64)); + assert_eq!(t.gas_price, U256::from(0x01u64)); + assert_eq!(t.nonce, U256::from(0x00u64)); + if let Action::Call(ref to) = t.action { + assert_eq!(*to, "095e7baea6a6c7c4c2dfeb977efac326af552d87".into()); + } else { + panic!(); + } + assert_eq!(t.value, U256::from(0x0au64)); + assert_eq!( + public_to_address(&t.recover_public().unwrap()), + "0f65fe9276bc9a24ae7083ae28e2660ef72df99e".into() + ); + assert_eq!(t.chain_id(), None); + } - #[test] - fn empty_atom_as_create_action() { - let empty_atom = [0x80]; - let action: Action = rlp::decode(&empty_atom).unwrap(); - assert_eq!(action, Action::Create); - } + #[test] + fn empty_atom_as_create_action() { + let empty_atom = [0x80]; + let action: Action = rlp::decode(&empty_atom).unwrap(); + assert_eq!(action, Action::Create); + } - #[test] - fn empty_list_as_create_action_rejected() { - let empty_list = [0xc0]; - let action: Result = rlp::decode(&empty_list); - assert_eq!(action, Err(DecoderError::RlpExpectedToBeData)); - } + #[test] + fn empty_list_as_create_action_rejected() { + let empty_list = [0xc0]; + let action: Result = rlp::decode(&empty_list); + assert_eq!(action, Err(DecoderError::RlpExpectedToBeData)); + } - #[test] - fn signing_eip155_zero_chainid() { - use ethkey::{Random, Generator}; + #[test] + fn signing_eip155_zero_chainid() { + use ethkey::{Generator, Random}; - let key = Random.generate().unwrap(); - let t = Transaction { - action: Action::Create, - nonce: U256::from(42), - gas_price: U256::from(3000), - gas: U256::from(50_000), - value: U256::from(1), - data: b"Hello!".to_vec() - }; + let key = Random.generate().unwrap(); + let t = Transaction { + action: Action::Create, + nonce: U256::from(42), + gas_price: U256::from(3000), + gas: U256::from(50_000), + value: U256::from(1), + data: b"Hello!".to_vec(), + }; - let hash = t.hash(Some(0)); - let sig = ::ethkey::sign(&key.secret(), &hash).unwrap(); - let u = t.with_signature(sig, Some(0)); + let hash = t.hash(Some(0)); + let sig = ::ethkey::sign(&key.secret(), &hash).unwrap(); + let u = t.with_signature(sig, Some(0)); - assert!(SignedTransaction::new(u).is_ok()); - } + assert!(SignedTransaction::new(u).is_ok()); + } - #[test] - fn signing() { - use ethkey::{Random, Generator}; + #[test] + fn signing() { + use ethkey::{Generator, Random}; - let key = Random.generate().unwrap(); - let t = Transaction { - action: Action::Create, - nonce: U256::from(42), - gas_price: U256::from(3000), - gas: U256::from(50_000), - value: U256::from(1), - data: b"Hello!".to_vec() - }.sign(&key.secret(), None); - assert_eq!(Address::from(keccak(key.public())), t.sender()); - assert_eq!(t.chain_id(), None); - } + let key = Random.generate().unwrap(); + let t = Transaction { + action: Action::Create, + nonce: U256::from(42), + gas_price: U256::from(3000), + gas: U256::from(50_000), + value: U256::from(1), + data: b"Hello!".to_vec(), + } + .sign(&key.secret(), None); + assert_eq!(Address::from(keccak(key.public())), t.sender()); + assert_eq!(t.chain_id(), None); + } - #[test] - fn fake_signing() { - let t = Transaction { - action: Action::Create, - nonce: U256::from(42), - gas_price: U256::from(3000), - gas: U256::from(50_000), - value: U256::from(1), - data: b"Hello!".to_vec() - }.fake_sign(Address::from(0x69)); - assert_eq!(Address::from(0x69), t.sender()); - assert_eq!(t.chain_id(), None); + #[test] + fn fake_signing() { + let t = Transaction { + action: Action::Create, + nonce: U256::from(42), + gas_price: U256::from(3000), + gas: U256::from(50_000), + value: U256::from(1), + data: b"Hello!".to_vec(), + } + .fake_sign(Address::from(0x69)); + assert_eq!(Address::from(0x69), t.sender()); + assert_eq!(t.chain_id(), None); - let t = t.clone(); - assert_eq!(Address::from(0x69), t.sender()); - assert_eq!(t.chain_id(), None); - } + let t = t.clone(); + assert_eq!(Address::from(0x69), t.sender()); + assert_eq!(t.chain_id(), None); + } - #[test] - fn should_reject_null_signature() { - use std::str::FromStr; - let t = Transaction { - nonce: U256::zero(), - gas_price: U256::from(10000000000u64), - gas: U256::from(21000), - action: Action::Call(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), - value: U256::from(1), - data: vec![] - }.null_sign(1); + #[test] + fn should_reject_null_signature() { + use std::str::FromStr; + let t = Transaction { + nonce: U256::zero(), + gas_price: U256::from(10000000000u64), + gas: U256::from(21000), + action: Action::Call( + Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(), + ), + value: U256::from(1), + data: vec![], + } + .null_sign(1); - let res = SignedTransaction::new(t.transaction); - match res { - Err(ethkey::Error::InvalidSignature) => {} - _ => panic!("null signature should be rejected"), - } - } + let res = SignedTransaction::new(t.transaction); + match res { + Err(ethkey::Error::InvalidSignature) => {} + _ => panic!("null signature should be rejected"), + } + } - #[test] - fn should_recover_from_chain_specific_signing() { - use ethkey::{Random, Generator}; - let key = Random.generate().unwrap(); - let t = Transaction { - action: Action::Create, - nonce: U256::from(42), - gas_price: U256::from(3000), - gas: U256::from(50_000), - value: U256::from(1), - data: b"Hello!".to_vec() - }.sign(&key.secret(), Some(69)); - assert_eq!(Address::from(keccak(key.public())), t.sender()); - assert_eq!(t.chain_id(), Some(69)); - } + #[test] + fn should_recover_from_chain_specific_signing() { + use ethkey::{Generator, Random}; + let key = Random.generate().unwrap(); + let t = Transaction { + action: Action::Create, + nonce: U256::from(42), + gas_price: U256::from(3000), + gas: U256::from(50_000), + value: U256::from(1), + data: b"Hello!".to_vec(), + } + .sign(&key.secret(), Some(69)); + assert_eq!(Address::from(keccak(key.public())), t.sender()); + assert_eq!(t.chain_id(), Some(69)); + } - #[test] - fn should_agree_with_vitalik() { - use rustc_hex::FromHex; + #[test] + fn should_agree_with_vitalik() { + use rustc_hex::FromHex; - let test_vector = |tx_data: &str, address: &'static str| { - let signed = rlp::decode(&FromHex::from_hex(tx_data).unwrap()).expect("decoding tx data failed"); - let signed = SignedTransaction::new(signed).unwrap(); - assert_eq!(signed.sender(), address.into()); - println!("chainid: {:?}", signed.chain_id()); - }; + let test_vector = |tx_data: &str, address: &'static str| { + let signed = + rlp::decode(&FromHex::from_hex(tx_data).unwrap()).expect("decoding tx data failed"); + let signed = SignedTransaction::new(signed).unwrap(); + assert_eq!(signed.sender(), address.into()); + println!("chainid: {:?}", signed.chain_id()); + }; - test_vector("f864808504a817c800825208943535353535353535353535353535353535353535808025a0044852b2a670ade5407e78fb2863c51de9fcb96542a07186fe3aeda6bb8a116da0044852b2a670ade5407e78fb2863c51de9fcb96542a07186fe3aeda6bb8a116d", "0xf0f6f18bca1b28cd68e4357452947e021241e9ce"); - test_vector("f864018504a817c80182a410943535353535353535353535353535353535353535018025a0489efdaa54c0f20c7adf612882df0950f5a951637e0307cdcb4c672f298b8bcaa0489efdaa54c0f20c7adf612882df0950f5a951637e0307cdcb4c672f298b8bc6", "0x23ef145a395ea3fa3deb533b8a9e1b4c6c25d112"); - test_vector("f864028504a817c80282f618943535353535353535353535353535353535353535088025a02d7c5bef027816a800da1736444fb58a807ef4c9603b7848673f7e3a68eb14a5a02d7c5bef027816a800da1736444fb58a807ef4c9603b7848673f7e3a68eb14a5", "0x2e485e0c23b4c3c542628a5f672eeab0ad4888be"); - test_vector("f865038504a817c803830148209435353535353535353535353535353535353535351b8025a02a80e1ef1d7842f27f2e6be0972bb708b9a135c38860dbe73c27c3486c34f4e0a02a80e1ef1d7842f27f2e6be0972bb708b9a135c38860dbe73c27c3486c34f4de", "0x82a88539669a3fd524d669e858935de5e5410cf0"); - test_vector("f865048504a817c80483019a28943535353535353535353535353535353535353535408025a013600b294191fc92924bb3ce4b969c1e7e2bab8f4c93c3fc6d0a51733df3c063a013600b294191fc92924bb3ce4b969c1e7e2bab8f4c93c3fc6d0a51733df3c060", "0xf9358f2538fd5ccfeb848b64a96b743fcc930554"); - test_vector("f865058504a817c8058301ec309435353535353535353535353535353535353535357d8025a04eebf77a833b30520287ddd9478ff51abbdffa30aa90a8d655dba0e8a79ce0c1a04eebf77a833b30520287ddd9478ff51abbdffa30aa90a8d655dba0e8a79ce0c1", "0xa8f7aba377317440bc5b26198a363ad22af1f3a4"); - test_vector("f866068504a817c80683023e3894353535353535353535353535353535353535353581d88025a06455bf8ea6e7463a1046a0b52804526e119b4bf5136279614e0b1e8e296a4e2fa06455bf8ea6e7463a1046a0b52804526e119b4bf5136279614e0b1e8e296a4e2d", "0xf1f571dc362a0e5b2696b8e775f8491d3e50de35"); - test_vector("f867078504a817c807830290409435353535353535353535353535353535353535358201578025a052f1a9b320cab38e5da8a8f97989383aab0a49165fc91c737310e4f7e9821021a052f1a9b320cab38e5da8a8f97989383aab0a49165fc91c737310e4f7e9821021", "0xd37922162ab7cea97c97a87551ed02c9a38b7332"); - test_vector("f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10", "0x9bddad43f934d313c2b79ca28a432dd2b7281029"); - test_vector("f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb", "0x3c24d7329e92f84f08556ceb6df1cdb0104ca49f"); - } + test_vector("f864808504a817c800825208943535353535353535353535353535353535353535808025a0044852b2a670ade5407e78fb2863c51de9fcb96542a07186fe3aeda6bb8a116da0044852b2a670ade5407e78fb2863c51de9fcb96542a07186fe3aeda6bb8a116d", "0xf0f6f18bca1b28cd68e4357452947e021241e9ce"); + test_vector("f864018504a817c80182a410943535353535353535353535353535353535353535018025a0489efdaa54c0f20c7adf612882df0950f5a951637e0307cdcb4c672f298b8bcaa0489efdaa54c0f20c7adf612882df0950f5a951637e0307cdcb4c672f298b8bc6", "0x23ef145a395ea3fa3deb533b8a9e1b4c6c25d112"); + test_vector("f864028504a817c80282f618943535353535353535353535353535353535353535088025a02d7c5bef027816a800da1736444fb58a807ef4c9603b7848673f7e3a68eb14a5a02d7c5bef027816a800da1736444fb58a807ef4c9603b7848673f7e3a68eb14a5", "0x2e485e0c23b4c3c542628a5f672eeab0ad4888be"); + test_vector("f865038504a817c803830148209435353535353535353535353535353535353535351b8025a02a80e1ef1d7842f27f2e6be0972bb708b9a135c38860dbe73c27c3486c34f4e0a02a80e1ef1d7842f27f2e6be0972bb708b9a135c38860dbe73c27c3486c34f4de", "0x82a88539669a3fd524d669e858935de5e5410cf0"); + test_vector("f865048504a817c80483019a28943535353535353535353535353535353535353535408025a013600b294191fc92924bb3ce4b969c1e7e2bab8f4c93c3fc6d0a51733df3c063a013600b294191fc92924bb3ce4b969c1e7e2bab8f4c93c3fc6d0a51733df3c060", "0xf9358f2538fd5ccfeb848b64a96b743fcc930554"); + test_vector("f865058504a817c8058301ec309435353535353535353535353535353535353535357d8025a04eebf77a833b30520287ddd9478ff51abbdffa30aa90a8d655dba0e8a79ce0c1a04eebf77a833b30520287ddd9478ff51abbdffa30aa90a8d655dba0e8a79ce0c1", "0xa8f7aba377317440bc5b26198a363ad22af1f3a4"); + test_vector("f866068504a817c80683023e3894353535353535353535353535353535353535353581d88025a06455bf8ea6e7463a1046a0b52804526e119b4bf5136279614e0b1e8e296a4e2fa06455bf8ea6e7463a1046a0b52804526e119b4bf5136279614e0b1e8e296a4e2d", "0xf1f571dc362a0e5b2696b8e775f8491d3e50de35"); + test_vector("f867078504a817c807830290409435353535353535353535353535353535353535358201578025a052f1a9b320cab38e5da8a8f97989383aab0a49165fc91c737310e4f7e9821021a052f1a9b320cab38e5da8a8f97989383aab0a49165fc91c737310e4f7e9821021", "0xd37922162ab7cea97c97a87551ed02c9a38b7332"); + test_vector("f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10", "0x9bddad43f934d313c2b79ca28a432dd2b7281029"); + test_vector("f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb", "0x3c24d7329e92f84f08556ceb6df1cdb0104ca49f"); + } } diff --git a/ethcore/types/src/tree_route.rs b/ethcore/types/src/tree_route.rs index 0386472b8..08a78465d 100644 --- a/ethcore/types/src/tree_route.rs +++ b/ethcore/types/src/tree_route.rs @@ -21,12 +21,12 @@ use ethereum_types::H256; /// Represents a tree route between `from` block and `to` block: #[derive(Debug)] pub struct TreeRoute { - /// A vector of hashes of all blocks, ordered from `from` to `to`. - pub blocks: Vec, - /// Best common ancestor of these blocks. - pub ancestor: H256, - /// An index where best common ancestor would be. - pub index: usize, - /// Whether it has finalized blocks from `from` (inclusive) to `ancestor` (exclusive). - pub is_from_route_finalized: bool, + /// A vector of hashes of all blocks, ordered from `from` to `to`. + pub blocks: Vec, + /// Best common ancestor of these blocks. + pub ancestor: H256, + /// An index where best common ancestor would be. + pub index: usize, + /// Whether it has finalized blocks from `from` (inclusive) to `ancestor` (exclusive). + pub is_from_route_finalized: bool, } diff --git a/ethcore/types/src/verification_queue_info.rs b/ethcore/types/src/verification_queue_info.rs index a855fee6a..923670dab 100644 --- a/ethcore/types/src/verification_queue_info.rs +++ b/ethcore/types/src/verification_queue_info.rs @@ -19,35 +19,40 @@ /// Verification queue status #[derive(Debug, Clone)] pub struct VerificationQueueInfo { - /// Number of queued items pending verification - pub unverified_queue_size: usize, - /// Number of verified queued items pending import - pub verified_queue_size: usize, - /// Number of items being verified - pub verifying_queue_size: usize, - /// Configured maximum number of items in the queue - pub max_queue_size: usize, - /// Configured maximum number of bytes to use - pub max_mem_use: usize, - /// Heap memory used in bytes - pub mem_used: usize, + /// Number of queued items pending verification + pub unverified_queue_size: usize, + /// Number of verified queued items pending import + pub verified_queue_size: usize, + /// Number of items being verified + pub verifying_queue_size: usize, + /// Configured maximum number of items in the queue + pub max_queue_size: usize, + /// Configured maximum number of bytes to use + pub max_mem_use: usize, + /// Heap memory used in bytes + pub mem_used: usize, } impl VerificationQueueInfo { - /// The total size of the queues. - pub fn total_queue_size(&self) -> usize { self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size } + /// The total size of the queues. + pub fn total_queue_size(&self) -> usize { + self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size + } - /// The size of the unverified and verifying queues. - pub fn incomplete_queue_size(&self) -> usize { self.unverified_queue_size + self.verifying_queue_size } + /// The size of the unverified and verifying queues. + pub fn incomplete_queue_size(&self) -> usize { + self.unverified_queue_size + self.verifying_queue_size + } - /// Indicates that queue is full - pub fn is_full(&self) -> bool { - self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size > self.max_queue_size || - self.mem_used > self.max_mem_use - } + /// Indicates that queue is full + pub fn is_full(&self) -> bool { + self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size + > self.max_queue_size + || self.mem_used > self.max_mem_use + } - /// Indicates that queue is empty - pub fn is_empty(&self) -> bool { - self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size == 0 - } + /// Indicates that queue is empty + pub fn is_empty(&self) -> bool { + self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size == 0 + } } diff --git a/ethcore/types/src/views/block.rs b/ethcore/types/src/views/block.rs index 9ad67ddd6..27cca98e1 100644 --- a/ethcore/types/src/views/block.rs +++ b/ethcore/types/src/views/block.rs @@ -16,177 +16,194 @@ //! View onto block rlp. +use super::ViewRlp; use bytes::Bytes; use ethereum_types::H256; use hash::keccak; use header::Header; -use transaction::{UnverifiedTransaction, LocalizedTransaction}; -use views::{TransactionView, HeaderView}; -use super::ViewRlp; +use transaction::{LocalizedTransaction, UnverifiedTransaction}; +use views::{HeaderView, TransactionView}; /// View onto block rlp. pub struct BlockView<'a> { - rlp: ViewRlp<'a> + rlp: ViewRlp<'a>, } impl<'a> BlockView<'a> { - /// Creates new view onto block from rlp. - /// Use the `view!` macro to create this view in order to capture debugging info. - /// - /// # Example - /// - /// ``` - /// #[macro_use] - /// extern crate common_types as types; - /// - /// use types::views::{BlockView}; - /// - /// fn main() { - /// let bytes : &[u8] = &[]; - /// let block_view = view!(BlockView, bytes); - /// } - /// ``` - pub fn new(rlp: ViewRlp<'a>) -> BlockView<'a> { - BlockView { - rlp: rlp - } - } + /// Creates new view onto block from rlp. + /// Use the `view!` macro to create this view in order to capture debugging info. + /// + /// # Example + /// + /// ``` + /// #[macro_use] + /// extern crate common_types as types; + /// + /// use types::views::{BlockView}; + /// + /// fn main() { + /// let bytes : &[u8] = &[]; + /// let block_view = view!(BlockView, bytes); + /// } + /// ``` + pub fn new(rlp: ViewRlp<'a>) -> BlockView<'a> { + BlockView { rlp: rlp } + } - /// Block header hash. - pub fn hash(&self) -> H256 { - self.header_view().hash() - } + /// Block header hash. + pub fn hash(&self) -> H256 { + self.header_view().hash() + } - /// Return reference to underlaying rlp. - pub fn rlp(&self) -> &ViewRlp<'a> { - &self.rlp - } + /// Return reference to underlaying rlp. + pub fn rlp(&self) -> &ViewRlp<'a> { + &self.rlp + } - /// Create new Header object from header rlp. - pub fn header(&self) -> Header { - self.rlp.val_at(0) - } + /// Create new Header object from header rlp. + pub fn header(&self) -> Header { + self.rlp.val_at(0) + } - /// Return header rlp. - pub fn header_rlp(&self) -> ViewRlp<'a> { - self.rlp.at(0) - } + /// Return header rlp. + pub fn header_rlp(&self) -> ViewRlp<'a> { + self.rlp.at(0) + } - /// Create new header view obto block head rlp. - pub fn header_view(&self) -> HeaderView<'a> { - HeaderView::new(self.header_rlp()) - } + /// Create new header view obto block head rlp. + pub fn header_view(&self) -> HeaderView<'a> { + HeaderView::new(self.header_rlp()) + } - /// Return List of transactions in given block. - pub fn transactions(&self) -> Vec { - self.rlp.list_at(1) - } + /// Return List of transactions in given block. + pub fn transactions(&self) -> Vec { + self.rlp.list_at(1) + } - /// Return List of transactions with additional localization info. - pub fn localized_transactions(&self) -> Vec { - let header = self.header_view(); - let block_hash = header.hash(); - let block_number = header.number(); - self.transactions() - .into_iter() - .enumerate() - .map(|(i, t)| LocalizedTransaction { - signed: t, - block_hash: block_hash.clone(), - block_number: block_number, - transaction_index: i, - cached_sender: None, - }).collect() - } + /// Return List of transactions with additional localization info. + pub fn localized_transactions(&self) -> Vec { + let header = self.header_view(); + let block_hash = header.hash(); + let block_number = header.number(); + self.transactions() + .into_iter() + .enumerate() + .map(|(i, t)| LocalizedTransaction { + signed: t, + block_hash: block_hash.clone(), + block_number: block_number, + transaction_index: i, + cached_sender: None, + }) + .collect() + } - /// Return the raw rlp for the transactions in the given block. - pub fn transactions_rlp(&self) -> ViewRlp<'a> { - self.rlp.at(1) - } + /// Return the raw rlp for the transactions in the given block. + pub fn transactions_rlp(&self) -> ViewRlp<'a> { + self.rlp.at(1) + } - /// Return number of transactions in given block, without deserializing them. - pub fn transactions_count(&self) -> usize { - self.transactions_rlp().iter().count() - } + /// Return number of transactions in given block, without deserializing them. + pub fn transactions_count(&self) -> usize { + self.transactions_rlp().iter().count() + } - /// Return List of transactions in given block. - pub fn transaction_views(&self) -> Vec> { - self.transactions_rlp().iter().map(TransactionView::new).collect() - } + /// Return List of transactions in given block. + pub fn transaction_views(&self) -> Vec> { + self.transactions_rlp() + .iter() + .map(TransactionView::new) + .collect() + } - /// Return transaction hashes. - pub fn transaction_hashes(&self) -> Vec { - self.transactions_rlp().iter().map(|rlp| keccak(rlp.as_raw())).collect() - } + /// Return transaction hashes. + pub fn transaction_hashes(&self) -> Vec { + self.transactions_rlp() + .iter() + .map(|rlp| keccak(rlp.as_raw())) + .collect() + } - /// Returns transaction at given index without deserializing unnecessary data. - pub fn transaction_at(&self, index: usize) -> Option { - self.transactions_rlp().iter().nth(index).map(|rlp| rlp.as_val()) - } + /// Returns transaction at given index without deserializing unnecessary data. + pub fn transaction_at(&self, index: usize) -> Option { + self.transactions_rlp() + .iter() + .nth(index) + .map(|rlp| rlp.as_val()) + } - /// Returns localized transaction at given index. - pub fn localized_transaction_at(&self, index: usize) -> Option { - let header = self.header_view(); - let block_hash = header.hash(); - let block_number = header.number(); - self.transaction_at(index).map(|t| LocalizedTransaction { - signed: t, - block_hash: block_hash, - block_number: block_number, - transaction_index: index, - cached_sender: None, - }) - } + /// Returns localized transaction at given index. + pub fn localized_transaction_at(&self, index: usize) -> Option { + let header = self.header_view(); + let block_hash = header.hash(); + let block_number = header.number(); + self.transaction_at(index).map(|t| LocalizedTransaction { + signed: t, + block_hash: block_hash, + block_number: block_number, + transaction_index: index, + cached_sender: None, + }) + } - /// Returns raw rlp for the uncles in the given block - pub fn uncles_rlp(&self) -> ViewRlp<'a> { - self.rlp.at(2) - } + /// Returns raw rlp for the uncles in the given block + pub fn uncles_rlp(&self) -> ViewRlp<'a> { + self.rlp.at(2) + } - /// Return list of uncles of given block. - pub fn uncles(&self) -> Vec
{ - self.rlp.list_at(2) - } + /// Return list of uncles of given block. + pub fn uncles(&self) -> Vec
{ + self.rlp.list_at(2) + } - /// Return number of uncles in given block, without deserializing them. - pub fn uncles_count(&self) -> usize { - self.uncles_rlp().iter().count() - } + /// Return number of uncles in given block, without deserializing them. + pub fn uncles_count(&self) -> usize { + self.uncles_rlp().iter().count() + } - /// Return List of transactions in given block. - pub fn uncle_views(&self) -> Vec> { - self.uncles_rlp().iter().map(HeaderView::new).collect() - } + /// Return List of transactions in given block. + pub fn uncle_views(&self) -> Vec> { + self.uncles_rlp().iter().map(HeaderView::new).collect() + } - /// Return list of uncle hashes of given block. - pub fn uncle_hashes(&self) -> Vec { - self.uncles_rlp().iter().map(|rlp| keccak(rlp.as_raw())).collect() - } + /// Return list of uncle hashes of given block. + pub fn uncle_hashes(&self) -> Vec { + self.uncles_rlp() + .iter() + .map(|rlp| keccak(rlp.as_raw())) + .collect() + } - /// Return nth uncle. - pub fn uncle_at(&self, index: usize) -> Option
{ - self.uncles_rlp().iter().nth(index).map(|rlp| rlp.as_val()) - } + /// Return nth uncle. + pub fn uncle_at(&self, index: usize) -> Option
{ + self.uncles_rlp().iter().nth(index).map(|rlp| rlp.as_val()) + } - /// Return nth uncle rlp. - pub fn uncle_rlp_at(&self, index: usize) -> Option { - self.uncles_rlp().iter().nth(index).map(|rlp| rlp.as_raw().to_vec()) - } + /// Return nth uncle rlp. + pub fn uncle_rlp_at(&self, index: usize) -> Option { + self.uncles_rlp() + .iter() + .nth(index) + .map(|rlp| rlp.as_raw().to_vec()) + } } #[cfg(test)] mod tests { - use rustc_hex::FromHex; - use super::BlockView; + use super::BlockView; + use rustc_hex::FromHex; - #[test] - fn test_block_view() { - // that's rlp of block created with ethash engine. - let rlp = "f90261f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23f862f86002018304cb2f94ec0e71ad0a90ffe1909d27dac207f7680abba42d01801ba03a347e72953c860f32b1eb2c78a680d8734b2ea08085d949d729479796f218d5a047ea6239d9e31ccac8af3366f5ca37184d26e7646e3191a3aeb81c4cf74de500c0".from_hex().unwrap(); + #[test] + fn test_block_view() { + // that's rlp of block created with ethash engine. + let rlp = "f90261f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23f862f86002018304cb2f94ec0e71ad0a90ffe1909d27dac207f7680abba42d01801ba03a347e72953c860f32b1eb2c78a680d8734b2ea08085d949d729479796f218d5a047ea6239d9e31ccac8af3366f5ca37184d26e7646e3191a3aeb81c4cf74de500c0".from_hex().unwrap(); - let view = view!(BlockView, &rlp); - assert_eq!(view.hash(), "2c9747e804293bd3f1a986484343f23bc88fd5be75dfe9d5c2860aff61e6f259".into()); - assert_eq!(view.transactions_count(), 1); - assert_eq!(view.uncles_count(), 0); - } + let view = view!(BlockView, &rlp); + assert_eq!( + view.hash(), + "2c9747e804293bd3f1a986484343f23bc88fd5be75dfe9d5c2860aff61e6f259".into() + ); + assert_eq!(view.transactions_count(), 1); + assert_eq!(view.uncles_count(), 0); + } } diff --git a/ethcore/types/src/views/body.rs b/ethcore/types/src/views/body.rs index 1ea4999b8..882fe7712 100644 --- a/ethcore/types/src/views/body.rs +++ b/ethcore/types/src/views/body.rs @@ -16,161 +16,184 @@ //! View onto block body rlp. +use super::ViewRlp; use bytes::Bytes; use ethereum_types::H256; use hash::keccak; use header::Header; use transaction::{LocalizedTransaction, UnverifiedTransaction}; -use views::{TransactionView, HeaderView}; -use super::ViewRlp; +use views::{HeaderView, TransactionView}; use BlockNumber; /// View onto block rlp. pub struct BodyView<'a> { - rlp: ViewRlp<'a> + rlp: ViewRlp<'a>, } impl<'a> BodyView<'a> { - /// Creates new view onto block body from rlp. - /// Use the `view!` macro to create this view in order to capture debugging info. - /// - /// # Example - /// - /// ``` - /// #[macro_use] - /// extern crate common_types as types; - /// - /// use types::views::{BodyView}; - /// - /// fn main() { - /// let bytes : &[u8] = &[]; - /// let body_view = view!(BodyView, bytes); - /// } - /// ``` - pub fn new(rlp: ViewRlp<'a>) -> BodyView<'a> { - BodyView { - rlp: rlp - } - } + /// Creates new view onto block body from rlp. + /// Use the `view!` macro to create this view in order to capture debugging info. + /// + /// # Example + /// + /// ``` + /// #[macro_use] + /// extern crate common_types as types; + /// + /// use types::views::{BodyView}; + /// + /// fn main() { + /// let bytes : &[u8] = &[]; + /// let body_view = view!(BodyView, bytes); + /// } + /// ``` + pub fn new(rlp: ViewRlp<'a>) -> BodyView<'a> { + BodyView { rlp: rlp } + } - /// Return reference to underlaying rlp. - pub fn rlp(&self) -> &ViewRlp<'a> { - &self.rlp - } + /// Return reference to underlaying rlp. + pub fn rlp(&self) -> &ViewRlp<'a> { + &self.rlp + } - /// Return List of transactions in given block. - pub fn transactions(&self) -> Vec { - self.rlp.list_at(0) - } + /// Return List of transactions in given block. + pub fn transactions(&self) -> Vec { + self.rlp.list_at(0) + } - /// Return List of transactions with additional localization info. - pub fn localized_transactions(&self, block_hash: &H256, block_number: BlockNumber) -> Vec { - self.transactions() - .into_iter() - .enumerate() - .map(|(i, t)| LocalizedTransaction { - signed: t, - block_hash: block_hash.clone(), - block_number: block_number, - transaction_index: i, - cached_sender: None, - }).collect() - } + /// Return List of transactions with additional localization info. + pub fn localized_transactions( + &self, + block_hash: &H256, + block_number: BlockNumber, + ) -> Vec { + self.transactions() + .into_iter() + .enumerate() + .map(|(i, t)| LocalizedTransaction { + signed: t, + block_hash: block_hash.clone(), + block_number: block_number, + transaction_index: i, + cached_sender: None, + }) + .collect() + } - /// Return the raw rlp for the transactions in the given block. - pub fn transactions_rlp(&self) -> ViewRlp<'a> { - self.rlp.at(0) - } + /// Return the raw rlp for the transactions in the given block. + pub fn transactions_rlp(&self) -> ViewRlp<'a> { + self.rlp.at(0) + } - /// Return number of transactions in given block, without deserializing them. - pub fn transactions_count(&self) -> usize { - self.transactions_rlp().item_count() - } - /// Return List of transactions in given block. - pub fn transaction_views(&self) -> Vec> { - self.transactions_rlp().iter().map(TransactionView::new).collect() - } + /// Return number of transactions in given block, without deserializing them. + pub fn transactions_count(&self) -> usize { + self.transactions_rlp().item_count() + } + /// Return List of transactions in given block. + pub fn transaction_views(&self) -> Vec> { + self.transactions_rlp() + .iter() + .map(TransactionView::new) + .collect() + } - /// Return transaction hashes. - pub fn transaction_hashes(&self) -> Vec { - self.transactions_rlp().iter().map(|rlp| keccak(rlp.as_raw())).collect() - } + /// Return transaction hashes. + pub fn transaction_hashes(&self) -> Vec { + self.transactions_rlp() + .iter() + .map(|rlp| keccak(rlp.as_raw())) + .collect() + } - /// Returns transaction at given index without deserializing unnecessary data. - pub fn transaction_at(&self, index: usize) -> Option { - self.transactions_rlp().iter().nth(index).map(|rlp| rlp.as_val()) - } + /// Returns transaction at given index without deserializing unnecessary data. + pub fn transaction_at(&self, index: usize) -> Option { + self.transactions_rlp() + .iter() + .nth(index) + .map(|rlp| rlp.as_val()) + } - /// Returns localized transaction at given index. - pub fn localized_transaction_at(&self, block_hash: &H256, block_number: BlockNumber, index: usize) -> Option { - self.transaction_at(index).map(|t| LocalizedTransaction { - signed: t, - block_hash: block_hash.clone(), - block_number: block_number, - transaction_index: index, - cached_sender: None, - }) - } + /// Returns localized transaction at given index. + pub fn localized_transaction_at( + &self, + block_hash: &H256, + block_number: BlockNumber, + index: usize, + ) -> Option { + self.transaction_at(index).map(|t| LocalizedTransaction { + signed: t, + block_hash: block_hash.clone(), + block_number: block_number, + transaction_index: index, + cached_sender: None, + }) + } - /// Returns raw rlp for the uncles in the given block - pub fn uncles_rlp(&self) -> ViewRlp<'a> { - self.rlp.at(1) - } + /// Returns raw rlp for the uncles in the given block + pub fn uncles_rlp(&self) -> ViewRlp<'a> { + self.rlp.at(1) + } - /// Return list of uncles of given block. - pub fn uncles(&self) -> Vec
{ - self.rlp.list_at(1) - } + /// Return list of uncles of given block. + pub fn uncles(&self) -> Vec
{ + self.rlp.list_at(1) + } - /// Return number of uncles in given block, without deserializing them. - pub fn uncles_count(&self) -> usize { - self.uncles_rlp().item_count() - } + /// Return number of uncles in given block, without deserializing them. + pub fn uncles_count(&self) -> usize { + self.uncles_rlp().item_count() + } - /// Return List of transactions in given block. - pub fn uncle_views(&self) -> Vec> { - self.uncles_rlp().iter().map(HeaderView::new).collect() - } + /// Return List of transactions in given block. + pub fn uncle_views(&self) -> Vec> { + self.uncles_rlp().iter().map(HeaderView::new).collect() + } - /// Return list of uncle hashes of given block. - pub fn uncle_hashes(&self) -> Vec { - self.uncles_rlp().iter().map(|rlp| keccak(rlp.as_raw())).collect() - } + /// Return list of uncle hashes of given block. + pub fn uncle_hashes(&self) -> Vec { + self.uncles_rlp() + .iter() + .map(|rlp| keccak(rlp.as_raw())) + .collect() + } - /// Return nth uncle. - pub fn uncle_at(&self, index: usize) -> Option
{ - self.uncles_rlp().iter().nth(index).map(|rlp| rlp.as_val()) - } + /// Return nth uncle. + pub fn uncle_at(&self, index: usize) -> Option
{ + self.uncles_rlp().iter().nth(index).map(|rlp| rlp.as_val()) + } - /// Return nth uncle rlp. - pub fn uncle_rlp_at(&self, index: usize) -> Option { - self.uncles_rlp().iter().nth(index).map(|rlp| rlp.as_raw().to_vec()) - } + /// Return nth uncle rlp. + pub fn uncle_rlp_at(&self, index: usize) -> Option { + self.uncles_rlp() + .iter() + .nth(index) + .map(|rlp| rlp.as_raw().to_vec()) + } } #[cfg(test)] mod tests { - use bytes::Bytes; - use rlp::RlpStream; - use rustc_hex::FromHex; - use super::BodyView; - use views::BlockView; + use super::BodyView; + use bytes::Bytes; + use rlp::RlpStream; + use rustc_hex::FromHex; + use views::BlockView; - fn block_to_body(block: &[u8]) -> Bytes { - let mut body = RlpStream::new_list(2); - let block_view = view!(BlockView, block); - body.append_raw(block_view.transactions_rlp().as_raw(), 1); - body.append_raw(block_view.uncles_rlp().as_raw(), 1); - body.out() - } + fn block_to_body(block: &[u8]) -> Bytes { + let mut body = RlpStream::new_list(2); + let block_view = view!(BlockView, block); + body.append_raw(block_view.transactions_rlp().as_raw(), 1); + body.append_raw(block_view.uncles_rlp().as_raw(), 1); + body.out() + } - #[test] - fn test_block_view() { - // that's rlp of block created with ethash engine. - let rlp = "f90261f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23f862f86002018304cb2f94ec0e71ad0a90ffe1909d27dac207f7680abba42d01801ba03a347e72953c860f32b1eb2c78a680d8734b2ea08085d949d729479796f218d5a047ea6239d9e31ccac8af3366f5ca37184d26e7646e3191a3aeb81c4cf74de500c0".from_hex().unwrap(); - let body = block_to_body(&rlp); - let view = view!(BodyView, &body); - assert_eq!(view.transactions_count(), 1); - assert_eq!(view.uncles_count(), 0); - } + #[test] + fn test_block_view() { + // that's rlp of block created with ethash engine. + let rlp = "f90261f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23f862f86002018304cb2f94ec0e71ad0a90ffe1909d27dac207f7680abba42d01801ba03a347e72953c860f32b1eb2c78a680d8734b2ea08085d949d729479796f218d5a047ea6239d9e31ccac8af3366f5ca37184d26e7646e3191a3aeb81c4cf74de500c0".from_hex().unwrap(); + let body = block_to_body(&rlp); + let view = view!(BodyView, &body); + assert_eq!(view.transactions_count(), 1); + assert_eq!(view.uncles_count(), 0); + } } diff --git a/ethcore/types/src/views/header.rs b/ethcore/types/src/views/header.rs index 400989291..ffa6e04ab 100644 --- a/ethcore/types/src/views/header.rs +++ b/ethcore/types/src/views/header.rs @@ -16,135 +16,183 @@ //! View onto block header rlp +use super::ViewRlp; use bytes::Bytes; -use ethereum_types::{H256, Bloom, U256, Address}; +use ethereum_types::{Address, Bloom, H256, U256}; use hash::keccak; use rlp::{self}; -use super::ViewRlp; use BlockNumber; /// View onto block header rlp. pub struct HeaderView<'a> { - rlp: ViewRlp<'a> + rlp: ViewRlp<'a>, } impl<'a> HeaderView<'a> { - /// Creates a new Header view from valid ViewRlp - /// Use the `view!` macro to create this view in order to capture debugging info. - /// - /// # Example - /// - /// ``` - /// #[macro_use] - /// extern crate common_types as types; - /// - /// use types::views::{HeaderView}; - /// - /// fn main() { - /// let bytes : &[u8] = &[]; - /// let tx_view = view!(HeaderView, bytes); - /// } - /// ``` - pub fn new(rlp: ViewRlp<'a>) -> HeaderView<'a> { - HeaderView { - rlp - } - } + /// Creates a new Header view from valid ViewRlp + /// Use the `view!` macro to create this view in order to capture debugging info. + /// + /// # Example + /// + /// ``` + /// #[macro_use] + /// extern crate common_types as types; + /// + /// use types::views::{HeaderView}; + /// + /// fn main() { + /// let bytes : &[u8] = &[]; + /// let tx_view = view!(HeaderView, bytes); + /// } + /// ``` + pub fn new(rlp: ViewRlp<'a>) -> HeaderView<'a> { + HeaderView { rlp } + } - /// Returns header hash. - pub fn hash(&self) -> H256 { - keccak(self.rlp.rlp.as_raw()) - } + /// Returns header hash. + pub fn hash(&self) -> H256 { + keccak(self.rlp.rlp.as_raw()) + } - /// Returns raw rlp. - pub fn rlp(&self) -> &ViewRlp<'a> { &self.rlp } + /// Returns raw rlp. + pub fn rlp(&self) -> &ViewRlp<'a> { + &self.rlp + } - /// Returns parent hash. - pub fn parent_hash(&self) -> H256 { self.rlp.val_at(0) } + /// Returns parent hash. + pub fn parent_hash(&self) -> H256 { + self.rlp.val_at(0) + } - /// Returns uncles hash. - pub fn uncles_hash(&self) -> H256 { self.rlp.val_at(1) } + /// Returns uncles hash. + pub fn uncles_hash(&self) -> H256 { + self.rlp.val_at(1) + } - /// Returns author. - pub fn author(&self) -> Address { self.rlp.val_at(2) } + /// Returns author. + pub fn author(&self) -> Address { + self.rlp.val_at(2) + } - /// Returns state root. - pub fn state_root(&self) -> H256 { self.rlp.val_at(3) } + /// Returns state root. + pub fn state_root(&self) -> H256 { + self.rlp.val_at(3) + } - /// Returns transactions root. - pub fn transactions_root(&self) -> H256 { self.rlp.val_at(4) } + /// Returns transactions root. + pub fn transactions_root(&self) -> H256 { + self.rlp.val_at(4) + } - /// Returns block receipts root. - pub fn receipts_root(&self) -> H256 { self.rlp.val_at(5) } + /// Returns block receipts root. + pub fn receipts_root(&self) -> H256 { + self.rlp.val_at(5) + } - /// Returns block log bloom. - pub fn log_bloom(&self) -> Bloom { self.rlp.val_at(6) } + /// Returns block log bloom. + pub fn log_bloom(&self) -> Bloom { + self.rlp.val_at(6) + } - /// Returns block difficulty. - pub fn difficulty(&self) -> U256 { self.rlp.val_at(7) } + /// Returns block difficulty. + pub fn difficulty(&self) -> U256 { + self.rlp.val_at(7) + } - /// Returns block number. - pub fn number(&self) -> BlockNumber { self.rlp.val_at(8) } + /// Returns block number. + pub fn number(&self) -> BlockNumber { + self.rlp.val_at(8) + } - /// Returns block gas limit. - pub fn gas_limit(&self) -> U256 { self.rlp.val_at(9) } + /// Returns block gas limit. + pub fn gas_limit(&self) -> U256 { + self.rlp.val_at(9) + } - /// Returns block gas used. - pub fn gas_used(&self) -> U256 { self.rlp.val_at(10) } + /// Returns block gas used. + pub fn gas_used(&self) -> U256 { + self.rlp.val_at(10) + } - /// Returns timestamp. - pub fn timestamp(&self) -> u64 { self.rlp.val_at(11) } + /// Returns timestamp. + pub fn timestamp(&self) -> u64 { + self.rlp.val_at(11) + } - /// Returns block extra data. - pub fn extra_data(&self) -> Bytes { self.rlp.val_at(12) } + /// Returns block extra data. + pub fn extra_data(&self) -> Bytes { + self.rlp.val_at(12) + } - /// Returns a vector of post-RLP-encoded seal fields. - pub fn seal(&self) -> Vec { - let mut seal = vec![]; - for i in 13..self.rlp.item_count() { - seal.push(self.rlp.at(i).as_raw().to_vec()); - } - seal - } - - /// Returns a vector of seal fields (RLP-decoded). - pub fn decode_seal(&self) -> Result, rlp::DecoderError> { - let seal = self.seal(); - seal.into_iter() - .map(|s| rlp::Rlp::new(&s).data().map(|x| x.to_vec())) - .collect() - } + /// Returns a vector of post-RLP-encoded seal fields. + pub fn seal(&self) -> Vec { + let mut seal = vec![]; + for i in 13..self.rlp.item_count() { + seal.push(self.rlp.at(i).as_raw().to_vec()); + } + seal + } + /// Returns a vector of seal fields (RLP-decoded). + pub fn decode_seal(&self) -> Result, rlp::DecoderError> { + let seal = self.seal(); + seal.into_iter() + .map(|s| rlp::Rlp::new(&s).data().map(|x| x.to_vec())) + .collect() + } } #[cfg(test)] mod tests { - use rustc_hex::FromHex; - use ethereum_types::Bloom; - use super::HeaderView; + use super::HeaderView; + use ethereum_types::Bloom; + use rustc_hex::FromHex; - #[test] - fn test_header_view() { - // that's rlp of block header created with ethash engine. - let rlp = "f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23".from_hex().unwrap(); - let mix_hash = "a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd".from_hex().unwrap(); - let nonce = "88ab4e252a7e8c2a23".from_hex().unwrap(); + #[test] + fn test_header_view() { + // that's rlp of block header created with ethash engine. + let rlp = "f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23".from_hex().unwrap(); + let mix_hash = "a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd" + .from_hex() + .unwrap(); + let nonce = "88ab4e252a7e8c2a23".from_hex().unwrap(); - let view = view!(HeaderView, &rlp); - assert_eq!(view.hash(), "2c9747e804293bd3f1a986484343f23bc88fd5be75dfe9d5c2860aff61e6f259".into()); - assert_eq!(view.parent_hash(), "d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7".into()); - assert_eq!(view.uncles_hash(), "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347".into()); - assert_eq!(view.author(), "8888f1f195afa192cfee860698584c030f4c9db1".into()); - assert_eq!(view.state_root(), "5fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25".into()); - assert_eq!(view.transactions_root(), "88d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158".into()); - assert_eq!(view.receipts_root(), "07c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1".into()); - assert_eq!(view.log_bloom(), Bloom::default()); - assert_eq!(view.difficulty(), 0x020080.into()); - assert_eq!(view.number(), 3); - assert_eq!(view.gas_limit(), 0x2fefba.into()); - assert_eq!(view.gas_used(), 0x524d.into()); - assert_eq!(view.timestamp(), 0x56_8e_93_2a); - assert_eq!(view.extra_data(), vec![] as Vec); - assert_eq!(view.seal(), vec![mix_hash, nonce]); - } + let view = view!(HeaderView, &rlp); + assert_eq!( + view.hash(), + "2c9747e804293bd3f1a986484343f23bc88fd5be75dfe9d5c2860aff61e6f259".into() + ); + assert_eq!( + view.parent_hash(), + "d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7".into() + ); + assert_eq!( + view.uncles_hash(), + "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347".into() + ); + assert_eq!( + view.author(), + "8888f1f195afa192cfee860698584c030f4c9db1".into() + ); + assert_eq!( + view.state_root(), + "5fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25".into() + ); + assert_eq!( + view.transactions_root(), + "88d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158".into() + ); + assert_eq!( + view.receipts_root(), + "07c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1".into() + ); + assert_eq!(view.log_bloom(), Bloom::default()); + assert_eq!(view.difficulty(), 0x020080.into()); + assert_eq!(view.number(), 3); + assert_eq!(view.gas_limit(), 0x2fefba.into()); + assert_eq!(view.gas_used(), 0x524d.into()); + assert_eq!(view.timestamp(), 0x56_8e_93_2a); + assert_eq!(view.extra_data(), vec![] as Vec); + assert_eq!(view.seal(), vec![mix_hash, nonce]); + } } diff --git a/ethcore/types/src/views/mod.rs b/ethcore/types/src/views/mod.rs index f5c2eab94..ac3ef5e18 100644 --- a/ethcore/types/src/views/mod.rs +++ b/ethcore/types/src/views/mod.rs @@ -23,19 +23,18 @@ mod body; mod header; mod transaction; -pub use self::view_rlp::ViewRlp; -pub use self::block::BlockView; -pub use self::body::BodyView; -pub use self::header::HeaderView; -pub use self::transaction::TransactionView; +pub use self::{ + block::BlockView, body::BodyView, header::HeaderView, transaction::TransactionView, + view_rlp::ViewRlp, +}; #[cfg(test)] mod tests { - use super::HeaderView; + use super::HeaderView; - #[test] - #[should_panic] - fn should_include_file_line_number_in_panic_for_invalid_rlp() { - let _ = view!(HeaderView, &[]).parent_hash(); - } + #[test] + #[should_panic] + fn should_include_file_line_number_in_panic_for_invalid_rlp() { + let _ = view!(HeaderView, &[]).parent_hash(); + } } diff --git a/ethcore/types/src/views/transaction.rs b/ethcore/types/src/views/transaction.rs index b7d412f6c..661807b7e 100644 --- a/ethcore/types/src/views/transaction.rs +++ b/ethcore/types/src/views/transaction.rs @@ -15,91 +15,117 @@ // along with Parity Ethereum. If not, see . //! View onto transaction rlp +use super::ViewRlp; use bytes::Bytes; use ethereum_types::{H256, U256}; use hash::keccak; -use super::ViewRlp; /// View onto transaction rlp. pub struct TransactionView<'a> { - rlp: ViewRlp<'a> + rlp: ViewRlp<'a>, } impl<'a> TransactionView<'a> { - /// Creates new view onto valid transaction rlp. - /// Use the `view!` macro to create this view in order to capture debugging info. - /// - /// # Example - /// - /// ``` - /// #[macro_use] - /// extern crate common_types as types; - /// - /// use types::views::{TransactionView}; - /// - /// fn main() { - /// let bytes : &[u8] = &[]; - /// let tx_view = view!(TransactionView, bytes); - /// } - /// ``` - pub fn new(rlp: ViewRlp<'a>) -> TransactionView<'a> { - TransactionView { - rlp: rlp - } - } + /// Creates new view onto valid transaction rlp. + /// Use the `view!` macro to create this view in order to capture debugging info. + /// + /// # Example + /// + /// ``` + /// #[macro_use] + /// extern crate common_types as types; + /// + /// use types::views::{TransactionView}; + /// + /// fn main() { + /// let bytes : &[u8] = &[]; + /// let tx_view = view!(TransactionView, bytes); + /// } + /// ``` + pub fn new(rlp: ViewRlp<'a>) -> TransactionView<'a> { + TransactionView { rlp: rlp } + } - /// Return reference to underlaying rlp. - pub fn rlp(&self) -> &ViewRlp<'a> { - &self.rlp - } + /// Return reference to underlaying rlp. + pub fn rlp(&self) -> &ViewRlp<'a> { + &self.rlp + } - /// Returns transaction hash. - pub fn hash(&self) -> H256 { - keccak(self.rlp.as_raw()) - } + /// Returns transaction hash. + pub fn hash(&self) -> H256 { + keccak(self.rlp.as_raw()) + } - /// Get the nonce field of the transaction. - pub fn nonce(&self) -> U256 { self.rlp.val_at(0) } + /// Get the nonce field of the transaction. + pub fn nonce(&self) -> U256 { + self.rlp.val_at(0) + } - /// Get the gas_price field of the transaction. - pub fn gas_price(&self) -> U256 { self.rlp.val_at(1) } + /// Get the gas_price field of the transaction. + pub fn gas_price(&self) -> U256 { + self.rlp.val_at(1) + } - /// Get the gas field of the transaction. - pub fn gas(&self) -> U256 { self.rlp.val_at(2) } + /// Get the gas field of the transaction. + pub fn gas(&self) -> U256 { + self.rlp.val_at(2) + } - /// Get the value field of the transaction. - pub fn value(&self) -> U256 { self.rlp.val_at(4) } + /// Get the value field of the transaction. + pub fn value(&self) -> U256 { + self.rlp.val_at(4) + } - /// Get the data field of the transaction. - pub fn data(&self) -> Bytes { self.rlp.val_at(5) } + /// Get the data field of the transaction. + pub fn data(&self) -> Bytes { + self.rlp.val_at(5) + } - /// Get the v field of the transaction. - pub fn v(&self) -> u8 { let r: u16 = self.rlp.val_at(6); r as u8 } + /// Get the v field of the transaction. + pub fn v(&self) -> u8 { + let r: u16 = self.rlp.val_at(6); + r as u8 + } - /// Get the r field of the transaction. - pub fn r(&self) -> U256 { self.rlp.val_at(7) } + /// Get the r field of the transaction. + pub fn r(&self) -> U256 { + self.rlp.val_at(7) + } - /// Get the s field of the transaction. - pub fn s(&self) -> U256 { self.rlp.val_at(8) } + /// Get the s field of the transaction. + pub fn s(&self) -> U256 { + self.rlp.val_at(8) + } } #[cfg(test)] mod tests { - use rustc_hex::FromHex; - use super::TransactionView; + use super::TransactionView; + use rustc_hex::FromHex; - #[test] - fn test_transaction_view() { - let rlp = "f87c80018261a894095e7baea6a6c7c4c2dfeb977efac326af552d870a9d00000000000000000000000000000000000000000000000000000000001ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804".from_hex().unwrap(); + #[test] + fn test_transaction_view() { + let rlp = "f87c80018261a894095e7baea6a6c7c4c2dfeb977efac326af552d870a9d00000000000000000000000000000000000000000000000000000000001ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804".from_hex().unwrap(); - let view = view!(TransactionView, &rlp); - assert_eq!(view.nonce(), 0.into()); - assert_eq!(view.gas_price(), 1.into()); - assert_eq!(view.gas(), 0x61a8.into()); - assert_eq!(view.value(), 0xa.into()); - assert_eq!(view.data(), "0000000000000000000000000000000000000000000000000000000000".from_hex().unwrap()); - assert_eq!(view.r(), "48b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353".into()); - assert_eq!(view.s(), "efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804".into()); - assert_eq!(view.v(), 0x1b); - } + let view = view!(TransactionView, &rlp); + assert_eq!(view.nonce(), 0.into()); + assert_eq!(view.gas_price(), 1.into()); + assert_eq!(view.gas(), 0x61a8.into()); + assert_eq!(view.value(), 0xa.into()); + assert_eq!( + view.data(), + "0000000000000000000000000000000000000000000000000000000000" + .from_hex() + .unwrap() + ); + assert_eq!( + view.r(), + "48b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353".into() + ); + assert_eq!( + view.s(), + "efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804".into() + ); + assert_eq!(view.v(), 0x1b); + } } diff --git a/ethcore/types/src/views/view_rlp.rs b/ethcore/types/src/views/view_rlp.rs index a6c789de9..0d5aa5ba5 100644 --- a/ethcore/types/src/views/view_rlp.rs +++ b/ethcore/types/src/views/view_rlp.rs @@ -16,120 +16,136 @@ //! Wrapper for view rlp expected to be valid with debug info -use rlp::{Rlp, Decodable, DecoderError}; +use rlp::{Decodable, DecoderError, Rlp}; /// Wrapper for trusted rlp, which is expected to be valid, for use in views /// When created with view!, records the file and line where it was created for debugging pub struct ViewRlp<'a> { - /// Wrapped Rlp, expected to be valid - pub rlp: Rlp<'a>, - file: &'a str, - line: u32, + /// Wrapped Rlp, expected to be valid + pub rlp: Rlp<'a>, + file: &'a str, + line: u32, } -impl<'a, 'view> ViewRlp<'a> where 'a : 'view { - #[doc(hidden)] - pub fn new(bytes: &'a [u8], file: &'a str, line: u32) -> Self { - ViewRlp { - rlp: Rlp::new(bytes), - file, - line - } - } +impl<'a, 'view> ViewRlp<'a> +where + 'a: 'view, +{ + #[doc(hidden)] + pub fn new(bytes: &'a [u8], file: &'a str, line: u32) -> Self { + ViewRlp { + rlp: Rlp::new(bytes), + file, + line, + } + } - /// Returns a new instance replacing existing rlp with new rlp, maintaining debug info - fn new_from_rlp(&self, rlp: Rlp<'a>) -> Self { - ViewRlp { - rlp, - file: self.file, - line: self.line - } - } + /// Returns a new instance replacing existing rlp with new rlp, maintaining debug info + fn new_from_rlp(&self, rlp: Rlp<'a>) -> Self { + ViewRlp { + rlp, + file: self.file, + line: self.line, + } + } - fn maybe_at(&self, index: usize) -> Option> { - self.rlp.at(index) - .map(|rlp| self.new_from_rlp(rlp)) - .ok() - } + fn maybe_at(&self, index: usize) -> Option> { + self.rlp.at(index).map(|rlp| self.new_from_rlp(rlp)).ok() + } - fn expect_valid_rlp(&self, r: Result) -> T { - r.unwrap_or_else(|e| panic!( - "View rlp is trusted and should be valid. Constructed in {} on line {}: {}", - self.file, - self.line, - e - )) - } + fn expect_valid_rlp(&self, r: Result) -> T { + r.unwrap_or_else(|e| { + panic!( + "View rlp is trusted and should be valid. Constructed in {} on line {}: {}", + self.file, self.line, e + ) + }) + } - /// Returns rlp at the given index, panics if no rlp at that index - pub fn at(&self, index: usize) -> ViewRlp<'a> { - let rlp = self.expect_valid_rlp(self.rlp.at(index)); - self.new_from_rlp(rlp) - } + /// Returns rlp at the given index, panics if no rlp at that index + pub fn at(&self, index: usize) -> ViewRlp<'a> { + let rlp = self.expect_valid_rlp(self.rlp.at(index)); + self.new_from_rlp(rlp) + } - /// Returns an iterator over all rlp values - pub fn iter(&'view self) -> ViewRlpIterator<'a, 'view> { - self.into_iter() - } + /// Returns an iterator over all rlp values + pub fn iter(&'view self) -> ViewRlpIterator<'a, 'view> { + self.into_iter() + } - /// Returns decoded value of this rlp, panics if rlp not valid - pub fn as_val(&self) -> T where T: Decodable { - self.expect_valid_rlp(self.rlp.as_val()) - } + /// Returns decoded value of this rlp, panics if rlp not valid + pub fn as_val(&self) -> T + where + T: Decodable, + { + self.expect_valid_rlp(self.rlp.as_val()) + } - /// Returns decoded value at the given index, panics not present or valid at that index - pub fn val_at(&self, index: usize) -> T where T : Decodable { - self.expect_valid_rlp(self.rlp.val_at(index)) - } + /// Returns decoded value at the given index, panics not present or valid at that index + pub fn val_at(&self, index: usize) -> T + where + T: Decodable, + { + self.expect_valid_rlp(self.rlp.val_at(index)) + } - /// Returns decoded list of values, panics if rlp is invalid - pub fn list_at(&self, index: usize) -> Vec where T: Decodable { - self.expect_valid_rlp(self.rlp.list_at(index)) - } + /// Returns decoded list of values, panics if rlp is invalid + pub fn list_at(&self, index: usize) -> Vec + where + T: Decodable, + { + self.expect_valid_rlp(self.rlp.list_at(index)) + } - /// Returns the number of items in the rlp, panics if it is not a list of rlp values - pub fn item_count(&self) -> usize { - self.expect_valid_rlp(self.rlp.item_count()) - } + /// Returns the number of items in the rlp, panics if it is not a list of rlp values + pub fn item_count(&self) -> usize { + self.expect_valid_rlp(self.rlp.item_count()) + } - /// Returns raw rlp bytes - pub fn as_raw(&'view self) -> &'a [u8] { - self.rlp.as_raw() - } + /// Returns raw rlp bytes + pub fn as_raw(&'view self) -> &'a [u8] { + self.rlp.as_raw() + } } /// Iterator over rlp-slice list elements. -pub struct ViewRlpIterator<'a, 'view> where 'a: 'view { - rlp: &'view ViewRlp<'a>, - index: usize, +pub struct ViewRlpIterator<'a, 'view> +where + 'a: 'view, +{ + rlp: &'view ViewRlp<'a>, + index: usize, } -impl<'a, 'view> IntoIterator for &'view ViewRlp<'a> where 'a: 'view { - type Item = ViewRlp<'a>; - type IntoIter = ViewRlpIterator<'a, 'view>; +impl<'a, 'view> IntoIterator for &'view ViewRlp<'a> +where + 'a: 'view, +{ + type Item = ViewRlp<'a>; + type IntoIter = ViewRlpIterator<'a, 'view>; - fn into_iter(self) -> Self::IntoIter { - ViewRlpIterator { - rlp: self, - index: 0, - } - } + fn into_iter(self) -> Self::IntoIter { + ViewRlpIterator { + rlp: self, + index: 0, + } + } } impl<'a, 'view> Iterator for ViewRlpIterator<'a, 'view> { - type Item = ViewRlp<'a>; + type Item = ViewRlp<'a>; - fn next(&mut self) -> Option> { - let index = self.index; - let result = self.rlp.maybe_at(index); - self.index += 1; - result - } + fn next(&mut self) -> Option> { + let index = self.index; + let result = self.rlp.maybe_at(index); + self.index += 1; + result + } } #[macro_export] macro_rules! view { - ($view: ident, $bytes: expr) => { - $view::new($crate::views::ViewRlp::new($bytes, file!(), line!())) - }; + ($view: ident, $bytes: expr) => { + $view::new($crate::views::ViewRlp::new($bytes, file!(), line!())) + }; } diff --git a/ethcore/vm/src/action_params.rs b/ethcore/vm/src/action_params.rs index 0d4959c18..2da9de944 100644 --- a/ethcore/vm/src/action_params.rs +++ b/ethcore/vm/src/action_params.rs @@ -15,10 +15,10 @@ // along with Parity Ethereum. If not, see . //! Evm input params. -use ethereum_types::{U256, H256, Address}; use bytes::Bytes; -use hash::{keccak, KECCAK_EMPTY}; +use ethereum_types::{Address, H256, U256}; use ethjson; +use hash::{keccak, KECCAK_EMPTY}; use call_type::CallType; @@ -27,107 +27,110 @@ use std::sync::Arc; /// Transaction value #[derive(Clone, Debug)] pub enum ActionValue { - /// Value that should be transfered - Transfer(U256), - /// Apparent value for transaction (not transfered) - Apparent(U256) + /// Value that should be transfered + Transfer(U256), + /// Apparent value for transaction (not transfered) + Apparent(U256), } /// Type of the way parameters encoded #[derive(Clone, Debug)] pub enum ParamsType { - /// Parameters are included in code - Embedded, - /// Parameters are passed in data section - Separate, + /// Parameters are included in code + Embedded, + /// Parameters are passed in data section + Separate, } impl ActionValue { - /// Returns action value as U256. - pub fn value(&self) -> U256 { - match *self { - ActionValue::Transfer(x) | ActionValue::Apparent(x) => x - } - } + /// Returns action value as U256. + pub fn value(&self) -> U256 { + match *self { + ActionValue::Transfer(x) | ActionValue::Apparent(x) => x, + } + } - /// Returns the transfer action value of the U256-convertable raw value - pub fn transfer>(transfer_value: T) -> ActionValue { - ActionValue::Transfer(transfer_value.into()) - } + /// Returns the transfer action value of the U256-convertable raw value + pub fn transfer>(transfer_value: T) -> ActionValue { + ActionValue::Transfer(transfer_value.into()) + } - /// Returns the apparent action value of the U256-convertable raw value - pub fn apparent>(apparent_value: T) -> ActionValue { - ActionValue::Apparent(apparent_value.into()) - } + /// Returns the apparent action value of the U256-convertable raw value + pub fn apparent>(apparent_value: T) -> ActionValue { + ActionValue::Apparent(apparent_value.into()) + } } // TODO: should be a trait, possible to avoid cloning everything from a Transaction(/View). /// Action (call/create) input params. Everything else should be specified in Externalities. #[derive(Clone, Debug)] pub struct ActionParams { - /// Address of currently executed code. - pub code_address: Address, - /// Hash of currently executed code. - pub code_hash: Option, - /// Receive address. Usually equal to code_address, - /// except when called using CALLCODE. - pub address: Address, - /// Sender of current part of the transaction. - pub sender: Address, - /// Transaction initiator. - pub origin: Address, - /// Gas paid up front for transaction execution - pub gas: U256, - /// Gas price. - pub gas_price: U256, - /// Transaction value. - pub value: ActionValue, - /// Code being executed. - pub code: Option>, - /// Input data. - pub data: Option, - /// Type of call - pub call_type: CallType, - /// Param types encoding - pub params_type: ParamsType, + /// Address of currently executed code. + pub code_address: Address, + /// Hash of currently executed code. + pub code_hash: Option, + /// Receive address. Usually equal to code_address, + /// except when called using CALLCODE. + pub address: Address, + /// Sender of current part of the transaction. + pub sender: Address, + /// Transaction initiator. + pub origin: Address, + /// Gas paid up front for transaction execution + pub gas: U256, + /// Gas price. + pub gas_price: U256, + /// Transaction value. + pub value: ActionValue, + /// Code being executed. + pub code: Option>, + /// Input data. + pub data: Option, + /// Type of call + pub call_type: CallType, + /// Param types encoding + pub params_type: ParamsType, } impl Default for ActionParams { - /// Returns default ActionParams initialized with zeros - fn default() -> ActionParams { - ActionParams { - code_address: Address::new(), - code_hash: Some(KECCAK_EMPTY), - address: Address::new(), - sender: Address::new(), - origin: Address::new(), - gas: U256::zero(), - gas_price: U256::zero(), - value: ActionValue::Transfer(U256::zero()), - code: None, - data: None, - call_type: CallType::None, - params_type: ParamsType::Separate, - } - } + /// Returns default ActionParams initialized with zeros + fn default() -> ActionParams { + ActionParams { + code_address: Address::new(), + code_hash: Some(KECCAK_EMPTY), + address: Address::new(), + sender: Address::new(), + origin: Address::new(), + gas: U256::zero(), + gas_price: U256::zero(), + value: ActionValue::Transfer(U256::zero()), + code: None, + data: None, + call_type: CallType::None, + params_type: ParamsType::Separate, + } + } } impl From for ActionParams { - fn from(t: ethjson::vm::Transaction) -> Self { - let address: Address = t.address.into(); - ActionParams { - code_address: Address::new(), - code_hash: Some(keccak(&*t.code)), - address: address, - sender: t.sender.into(), - origin: t.origin.into(), - code: Some(Arc::new(t.code.into())), - data: Some(t.data.into()), - gas: t.gas.into(), - gas_price: t.gas_price.into(), - value: ActionValue::Transfer(t.value.into()), - call_type: match address.is_zero() { true => CallType::None, false => CallType::Call }, // TODO @debris is this correct? - params_type: ParamsType::Separate, - } - } + fn from(t: ethjson::vm::Transaction) -> Self { + let address: Address = t.address.into(); + ActionParams { + code_address: Address::new(), + code_hash: Some(keccak(&*t.code)), + address: address, + sender: t.sender.into(), + origin: t.origin.into(), + code: Some(Arc::new(t.code.into())), + data: Some(t.data.into()), + gas: t.gas.into(), + gas_price: t.gas_price.into(), + value: ActionValue::Transfer(t.value.into()), + call_type: match address.is_zero() { + true => CallType::None, + false => CallType::Call, + }, // TODO @debris is this correct? + params_type: ParamsType::Separate, + } + } } diff --git a/ethcore/vm/src/call_type.rs b/ethcore/vm/src/call_type.rs index e5245c8c1..86dfbc94a 100644 --- a/ethcore/vm/src/call_type.rs +++ b/ethcore/vm/src/call_type.rs @@ -16,71 +16,73 @@ //! EVM call types. -use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp}; +use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; /// The type of the call-like instruction. #[derive(Debug, PartialEq, Clone)] pub enum CallType { - /// Not a CALL. - None, - /// CALL. - Call, - /// CALLCODE. - CallCode, - /// DELEGATECALL. - DelegateCall, - /// STATICCALL - StaticCall, + /// Not a CALL. + None, + /// CALL. + Call, + /// CALLCODE. + CallCode, + /// DELEGATECALL. + DelegateCall, + /// STATICCALL + StaticCall, } impl Encodable for CallType { - fn rlp_append(&self, s: &mut RlpStream) { - let v = match *self { - CallType::None => 0u32, - CallType::Call => 1, - CallType::CallCode => 2, - CallType::DelegateCall => 3, - CallType::StaticCall => 4, - }; - Encodable::rlp_append(&v, s); - } + fn rlp_append(&self, s: &mut RlpStream) { + let v = match *self { + CallType::None => 0u32, + CallType::Call => 1, + CallType::CallCode => 2, + CallType::DelegateCall => 3, + CallType::StaticCall => 4, + }; + Encodable::rlp_append(&v, s); + } } impl Decodable for CallType { - fn decode(rlp: &Rlp) -> Result { - rlp.as_val().and_then(|v| Ok(match v { - 0u32 => CallType::None, - 1 => CallType::Call, - 2 => CallType::CallCode, - 3 => CallType::DelegateCall, - 4 => CallType::StaticCall, - _ => return Err(DecoderError::Custom("Invalid value of CallType item")), - })) - } + fn decode(rlp: &Rlp) -> Result { + rlp.as_val().and_then(|v| { + Ok(match v { + 0u32 => CallType::None, + 1 => CallType::Call, + 2 => CallType::CallCode, + 3 => CallType::DelegateCall, + 4 => CallType::StaticCall, + _ => return Err(DecoderError::Custom("Invalid value of CallType item")), + }) + }) + } } #[cfg(test)] mod tests { - use rlp::*; - use super::CallType; + use super::CallType; + use rlp::*; - #[test] - fn encode_call_type() { - let ct = CallType::Call; + #[test] + fn encode_call_type() { + let ct = CallType::Call; - let mut s = RlpStream::new_list(2); - s.append(&ct); - assert!(!s.is_finished(), "List shouldn't finished yet"); - s.append(&ct); - assert!(s.is_finished(), "List should be finished now"); - s.out(); - } + let mut s = RlpStream::new_list(2); + s.append(&ct); + assert!(!s.is_finished(), "List shouldn't finished yet"); + s.append(&ct); + assert!(s.is_finished(), "List should be finished now"); + s.out(); + } - #[test] - fn should_encode_and_decode_call_type() { - let original = CallType::Call; - let encoded = encode(&original); - let decoded = decode(&encoded).expect("failure decoding CallType"); - assert_eq!(original, decoded); - } + #[test] + fn should_encode_and_decode_call_type() { + let original = CallType::Call; + let encoded = encode(&original); + let decoded = decode(&encoded).expect("failure decoding CallType"); + assert_eq!(original, decoded); + } } diff --git a/ethcore/vm/src/env_info.rs b/ethcore/vm/src/env_info.rs index addfa6289..be2e7376f 100644 --- a/ethcore/vm/src/env_info.rs +++ b/ethcore/vm/src/env_info.rs @@ -16,11 +16,10 @@ //! Environment information for transaction execution. -use std::cmp; -use std::sync::Arc; -use hash::keccak; -use ethereum_types::{U256, H256, Address}; +use ethereum_types::{Address, H256, U256}; use ethjson; +use hash::keccak; +use std::{cmp, sync::Arc}; type BlockNumber = u64; @@ -31,79 +30,88 @@ pub type LastHashes = Vec; /// Information concerning the execution environment for a message-call/contract-creation. #[derive(Debug, Clone)] pub struct EnvInfo { - /// The block number. - pub number: BlockNumber, - /// The block author. - pub author: Address, - /// The block timestamp. - pub timestamp: u64, - /// The block difficulty. - pub difficulty: U256, - /// The block gas limit. - pub gas_limit: U256, - /// The last 256 block hashes. - pub last_hashes: Arc, - /// The gas used. - pub gas_used: U256, + /// The block number. + pub number: BlockNumber, + /// The block author. + pub author: Address, + /// The block timestamp. + pub timestamp: u64, + /// The block difficulty. + pub difficulty: U256, + /// The block gas limit. + pub gas_limit: U256, + /// The last 256 block hashes. + pub last_hashes: Arc, + /// The gas used. + pub gas_used: U256, } impl Default for EnvInfo { - fn default() -> Self { - EnvInfo { - number: 0, - author: Address::default(), - timestamp: 0, - difficulty: 0.into(), - gas_limit: 0.into(), - last_hashes: Arc::new(vec![]), - gas_used: 0.into(), - } - } + fn default() -> Self { + EnvInfo { + number: 0, + author: Address::default(), + timestamp: 0, + difficulty: 0.into(), + gas_limit: 0.into(), + last_hashes: Arc::new(vec![]), + gas_used: 0.into(), + } + } } impl From for EnvInfo { - fn from(e: ethjson::vm::Env) -> Self { - let number = e.number.into(); - EnvInfo { - number, - author: e.author.into(), - difficulty: e.difficulty.into(), - gas_limit: e.gas_limit.into(), - timestamp: e.timestamp.into(), - last_hashes: Arc::new((1..cmp::min(number + 1, 257)).map(|i| keccak(format!("{}", number - i).as_bytes())).collect()), - gas_used: U256::default(), - } - } + fn from(e: ethjson::vm::Env) -> Self { + let number = e.number.into(); + EnvInfo { + number, + author: e.author.into(), + difficulty: e.difficulty.into(), + gas_limit: e.gas_limit.into(), + timestamp: e.timestamp.into(), + last_hashes: Arc::new( + (1..cmp::min(number + 1, 257)) + .map(|i| keccak(format!("{}", number - i).as_bytes())) + .collect(), + ), + gas_used: U256::default(), + } + } } #[cfg(test)] mod tests { - use std::str::FromStr; - use super::*; - use ethereum_types::{U256, Address}; - use ethjson; + use super::*; + use ethereum_types::{Address, U256}; + use ethjson; + use std::str::FromStr; - #[test] - fn it_serializes_from_json() { - let env_info = EnvInfo::from(ethjson::vm::Env { - author: ethjson::hash::Address(Address::from_str("000000f00000000f000000000000f00000000f00").unwrap()), - number: ethjson::uint::Uint(U256::from(1_112_339)), - difficulty: ethjson::uint::Uint(U256::from(50_000)), - gas_limit: ethjson::uint::Uint(U256::from(40_000)), - timestamp: ethjson::uint::Uint(U256::from(1_100)) - }); + #[test] + fn it_serializes_from_json() { + let env_info = EnvInfo::from(ethjson::vm::Env { + author: ethjson::hash::Address( + Address::from_str("000000f00000000f000000000000f00000000f00").unwrap(), + ), + number: ethjson::uint::Uint(U256::from(1_112_339)), + difficulty: ethjson::uint::Uint(U256::from(50_000)), + gas_limit: ethjson::uint::Uint(U256::from(40_000)), + timestamp: ethjson::uint::Uint(U256::from(1_100)), + }); - assert_eq!(env_info.number, 1112339); - assert_eq!(env_info.author, Address::from_str("000000f00000000f000000000000f00000000f00").unwrap()); - assert_eq!(env_info.gas_limit, 40000.into()); - assert_eq!(env_info.difficulty, 50000.into()); - assert_eq!(env_info.gas_used, 0.into()); - } + assert_eq!(env_info.number, 1112339); + assert_eq!( + env_info.author, + Address::from_str("000000f00000000f000000000000f00000000f00").unwrap() + ); + assert_eq!(env_info.gas_limit, 40000.into()); + assert_eq!(env_info.difficulty, 50000.into()); + assert_eq!(env_info.gas_used, 0.into()); + } - #[test] - fn it_can_be_created_as_default() { - let default_env_info = EnvInfo::default(); + #[test] + fn it_can_be_created_as_default() { + let default_env_info = EnvInfo::default(); - assert_eq!(default_env_info.difficulty, 0.into()); - } + assert_eq!(default_env_info.difficulty, 0.into()); + } } diff --git a/ethcore/vm/src/error.rs b/ethcore/vm/src/error.rs index 334005633..552d38288 100644 --- a/ethcore/vm/src/error.rs +++ b/ethcore/vm/src/error.rs @@ -16,104 +16,115 @@ //! VM errors module -use ::{ResumeCall, ResumeCreate}; -use ethereum_types::Address; use action_params::ActionParams; -use std::fmt; +use ethereum_types::Address; use ethtrie; +use std::fmt; +use ResumeCall; +use ResumeCreate; #[derive(Debug)] pub enum TrapKind { - Call(ActionParams), - Create(ActionParams, Address), + Call(ActionParams), + Create(ActionParams, Address), } pub enum TrapError { - Call(ActionParams, Call), - Create(ActionParams, Address, Create), + Call(ActionParams, Call), + Create(ActionParams, Address, Create), } /// VM errors. #[derive(Debug, Clone, PartialEq)] pub enum Error { - /// `OutOfGas` is returned when transaction execution runs out of gas. - /// The state should be reverted to the state from before the - /// transaction execution. But it does not mean that transaction - /// was invalid. Balance still should be transfered and nonce - /// should be increased. - OutOfGas, - /// `BadJumpDestination` is returned when execution tried to move - /// to position that wasn't marked with JUMPDEST instruction - BadJumpDestination { - /// Position the code tried to jump to. - destination: usize - }, - /// `BadInstructions` is returned when given instruction is not supported - BadInstruction { - /// Unrecognized opcode - instruction: u8, - }, - /// `StackUnderflow` when there is not enough stack elements to execute instruction - StackUnderflow { - /// Invoked instruction - instruction: &'static str, - /// How many stack elements was requested by instruction - wanted: usize, - /// How many elements were on stack - on_stack: usize - }, - /// When execution would exceed defined Stack Limit - OutOfStack { - /// Invoked instruction - instruction: &'static str, - /// How many stack elements instruction wanted to push - wanted: usize, - /// What was the stack limit - limit: usize - }, - /// Built-in contract failed on given input - BuiltIn(&'static str), - /// When execution tries to modify the state in static context - MutableCallInStaticContext, - /// Likely to cause consensus issues. - Internal(String), - /// Wasm runtime error - Wasm(String), - /// Out of bounds access in RETURNDATACOPY. - OutOfBounds, - /// Execution has been reverted with REVERT. - Reverted, + /// `OutOfGas` is returned when transaction execution runs out of gas. + /// The state should be reverted to the state from before the + /// transaction execution. But it does not mean that transaction + /// was invalid. Balance still should be transfered and nonce + /// should be increased. + OutOfGas, + /// `BadJumpDestination` is returned when execution tried to move + /// to position that wasn't marked with JUMPDEST instruction + BadJumpDestination { + /// Position the code tried to jump to. + destination: usize, + }, + /// `BadInstructions` is returned when given instruction is not supported + BadInstruction { + /// Unrecognized opcode + instruction: u8, + }, + /// `StackUnderflow` when there is not enough stack elements to execute instruction + StackUnderflow { + /// Invoked instruction + instruction: &'static str, + /// How many stack elements was requested by instruction + wanted: usize, + /// How many elements were on stack + on_stack: usize, + }, + /// When execution would exceed defined Stack Limit + OutOfStack { + /// Invoked instruction + instruction: &'static str, + /// How many stack elements instruction wanted to push + wanted: usize, + /// What was the stack limit + limit: usize, + }, + /// Built-in contract failed on given input + BuiltIn(&'static str), + /// When execution tries to modify the state in static context + MutableCallInStaticContext, + /// Likely to cause consensus issues. + Internal(String), + /// Wasm runtime error + Wasm(String), + /// Out of bounds access in RETURNDATACOPY. + OutOfBounds, + /// Execution has been reverted with REVERT. + Reverted, } impl From> for Error { - fn from(err: Box) -> Self { - Error::Internal(format!("Internal error: {}", err)) - } + fn from(err: Box) -> Self { + Error::Internal(format!("Internal error: {}", err)) + } } impl From for Error { - fn from(err: ethtrie::TrieError) -> Self { - Error::Internal(format!("Internal error: {}", err)) - } + fn from(err: ethtrie::TrieError) -> Self { + Error::Internal(format!("Internal error: {}", err)) + } } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use self::Error::*; - match *self { - OutOfGas => write!(f, "Out of gas"), - BadJumpDestination { destination } => write!(f, "Bad jump destination {:x}", destination), - BadInstruction { instruction } => write!(f, "Bad instruction {:x}", instruction), - StackUnderflow { instruction, wanted, on_stack } => write!(f, "Stack underflow {} {}/{}", instruction, wanted, on_stack), - OutOfStack { instruction, wanted, limit } => write!(f, "Out of stack {} {}/{}", instruction, wanted, limit), - BuiltIn(name) => write!(f, "Built-in failed: {}", name), - Internal(ref msg) => write!(f, "Internal error: {}", msg), - MutableCallInStaticContext => write!(f, "Mutable call in static context"), - Wasm(ref msg) => write!(f, "Internal error: {}", msg), - OutOfBounds => write!(f, "Out of bounds"), - Reverted => write!(f, "Reverted"), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::Error::*; + match *self { + OutOfGas => write!(f, "Out of gas"), + BadJumpDestination { destination } => { + write!(f, "Bad jump destination {:x}", destination) + } + BadInstruction { instruction } => write!(f, "Bad instruction {:x}", instruction), + StackUnderflow { + instruction, + wanted, + on_stack, + } => write!(f, "Stack underflow {} {}/{}", instruction, wanted, on_stack), + OutOfStack { + instruction, + wanted, + limit, + } => write!(f, "Out of stack {} {}/{}", instruction, wanted, limit), + BuiltIn(name) => write!(f, "Built-in failed: {}", name), + Internal(ref msg) => write!(f, "Internal error: {}", msg), + MutableCallInStaticContext => write!(f, "Mutable call in static context"), + Wasm(ref msg) => write!(f, "Internal error: {}", msg), + OutOfBounds => write!(f, "Out of bounds"), + Reverted => write!(f, "Reverted"), + } + } } pub type Result = ::std::result::Result; diff --git a/ethcore/vm/src/ext.rs b/ethcore/vm/src/ext.rs index 247758c1e..bd300c975 100644 --- a/ethcore/vm/src/ext.rs +++ b/ethcore/vm/src/ext.rs @@ -16,162 +16,172 @@ //! Interface for Evm externalities. -use std::sync::Arc; -use ethereum_types::{U256, H256, Address}; use bytes::Bytes; use call_type::CallType; use env_info::EnvInfo; -use schedule::Schedule; -use return_data::ReturnData; use error::{Result, TrapKind}; +use ethereum_types::{Address, H256, U256}; +use return_data::ReturnData; +use schedule::Schedule; +use std::sync::Arc; #[derive(Debug)] /// Result of externalities create function. pub enum ContractCreateResult { - /// Returned when creation was successfull. - /// Contains an address of newly created contract and gas left. - Created(Address, U256), - /// Returned when contract creation failed. - /// VM doesn't have to know the reason. - Failed, - /// Reverted with REVERT. - Reverted(U256, ReturnData), + /// Returned when creation was successfull. + /// Contains an address of newly created contract and gas left. + Created(Address, U256), + /// Returned when contract creation failed. + /// VM doesn't have to know the reason. + Failed, + /// Reverted with REVERT. + Reverted(U256, ReturnData), } #[derive(Debug)] /// Result of externalities call function. pub enum MessageCallResult { - /// Returned when message call was successfull. - /// Contains gas left and output data. - Success(U256, ReturnData), - /// Returned when message call failed. - /// VM doesn't have to know the reason. - Failed, - /// Returned when message call was reverted. - /// Contains gas left and output data. - Reverted(U256, ReturnData), + /// Returned when message call was successfull. + /// Contains gas left and output data. + Success(U256, ReturnData), + /// Returned when message call failed. + /// VM doesn't have to know the reason. + Failed, + /// Returned when message call was reverted. + /// Contains gas left and output data. + Reverted(U256, ReturnData), } /// Specifies how an address is calculated for a new contract. #[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] pub enum CreateContractAddress { - /// Address is calculated from sender and nonce. pWASM `create` scheme. - FromSenderAndNonce, - /// Address is calculated from sender, salt and code hash. pWASM `create2` scheme and EIP-1014 CREATE2 scheme. - FromSenderSaltAndCodeHash(H256), - /// Address is calculated from code hash and sender. Used by pwasm create ext. - FromSenderAndCodeHash, + /// Address is calculated from sender and nonce. pWASM `create` scheme. + FromSenderAndNonce, + /// Address is calculated from sender, salt and code hash. pWASM `create2` scheme and EIP-1014 CREATE2 scheme. + FromSenderSaltAndCodeHash(H256), + /// Address is calculated from code hash and sender. Used by pwasm create ext. + FromSenderAndCodeHash, } /// Externalities interface for EVMs pub trait Ext { - /// Returns the storage value for a given key if reversion happens on the current transaction. - fn initial_storage_at(&self, key: &H256) -> Result; + /// Returns the storage value for a given key if reversion happens on the current transaction. + fn initial_storage_at(&self, key: &H256) -> Result; - /// Returns a value for given key. - fn storage_at(&self, key: &H256) -> Result; + /// Returns a value for given key. + fn storage_at(&self, key: &H256) -> Result; - /// Stores a value for given key. - fn set_storage(&mut self, key: H256, value: H256) -> Result<()>; + /// Stores a value for given key. + fn set_storage(&mut self, key: H256, value: H256) -> Result<()>; - /// Determine whether an account exists. - fn exists(&self, address: &Address) -> Result; + /// Determine whether an account exists. + fn exists(&self, address: &Address) -> Result; - /// Determine whether an account exists and is not null (zero balance/nonce, no code). - fn exists_and_not_null(&self, address: &Address) -> Result; + /// Determine whether an account exists and is not null (zero balance/nonce, no code). + fn exists_and_not_null(&self, address: &Address) -> Result; - /// Balance of the origin account. - fn origin_balance(&self) -> Result; + /// Balance of the origin account. + fn origin_balance(&self) -> Result; - /// Returns address balance. - fn balance(&self, address: &Address) -> Result; + /// Returns address balance. + fn balance(&self, address: &Address) -> Result; - /// Returns the hash of one of the 256 most recent complete blocks. - fn blockhash(&mut self, number: &U256) -> H256; + /// Returns the hash of one of the 256 most recent complete blocks. + fn blockhash(&mut self, number: &U256) -> H256; - /// Creates new contract. - /// - /// Returns gas_left and contract address if contract creation was successful. - fn create( - &mut self, - gas: &U256, - value: &U256, - code: &[u8], - address: CreateContractAddress, - trap: bool, - ) -> ::std::result::Result; + /// Creates new contract. + /// + /// Returns gas_left and contract address if contract creation was successful. + fn create( + &mut self, + gas: &U256, + value: &U256, + code: &[u8], + address: CreateContractAddress, + trap: bool, + ) -> ::std::result::Result; - /// Message call. - /// - /// Returns Err, if we run out of gas. - /// Otherwise returns call_result which contains gas left - /// and true if subcall was successfull. - fn call( - &mut self, - gas: &U256, - sender_address: &Address, - receive_address: &Address, - value: Option, - data: &[u8], - code_address: &Address, - call_type: CallType, - trap: bool - ) -> ::std::result::Result; + /// Message call. + /// + /// Returns Err, if we run out of gas. + /// Otherwise returns call_result which contains gas left + /// and true if subcall was successfull. + fn call( + &mut self, + gas: &U256, + sender_address: &Address, + receive_address: &Address, + value: Option, + data: &[u8], + code_address: &Address, + call_type: CallType, + trap: bool, + ) -> ::std::result::Result; - /// Returns code at given address - fn extcode(&self, address: &Address) -> Result>>; + /// Returns code at given address + fn extcode(&self, address: &Address) -> Result>>; - /// Returns code hash at given address - fn extcodehash(&self, address: &Address) -> Result>; + /// Returns code hash at given address + fn extcodehash(&self, address: &Address) -> Result>; - /// Returns code size at given address - fn extcodesize(&self, address: &Address) -> Result>; + /// Returns code size at given address + fn extcodesize(&self, address: &Address) -> Result>; - /// Creates log entry with given topics and data - fn log(&mut self, topics: Vec, data: &[u8]) -> Result<()>; + /// Creates log entry with given topics and data + fn log(&mut self, topics: Vec, data: &[u8]) -> Result<()>; - /// Should be called when transaction calls `RETURN` opcode. - /// Returns gas_left if cost of returning the data is not too high. - fn ret(self, gas: &U256, data: &ReturnData, apply_state: bool) -> Result; + /// Should be called when transaction calls `RETURN` opcode. + /// Returns gas_left if cost of returning the data is not too high. + fn ret(self, gas: &U256, data: &ReturnData, apply_state: bool) -> Result; - /// Should be called when contract commits suicide. - /// Address to which funds should be refunded. - fn suicide(&mut self, refund_address: &Address) -> Result<()> ; + /// Should be called when contract commits suicide. + /// Address to which funds should be refunded. + fn suicide(&mut self, refund_address: &Address) -> Result<()>; - /// Returns schedule. - fn schedule(&self) -> &Schedule; + /// Returns schedule. + fn schedule(&self) -> &Schedule; - /// Returns environment info. - fn env_info(&self) -> &EnvInfo; + /// Returns environment info. + fn env_info(&self) -> &EnvInfo; - /// Returns the chain ID of the blockchain - fn chain_id(&self) -> u64; + /// Returns the chain ID of the blockchain + fn chain_id(&self) -> u64; - /// Returns current depth of execution. - /// - /// If contract A calls contract B, and contract B calls C, - /// then A depth is 0, B is 1, C is 2 and so on. - fn depth(&self) -> usize; + /// Returns current depth of execution. + /// + /// If contract A calls contract B, and contract B calls C, + /// then A depth is 0, B is 1, C is 2 and so on. + fn depth(&self) -> usize; - /// Increments sstore refunds counter. - fn add_sstore_refund(&mut self, value: usize); + /// Increments sstore refunds counter. + fn add_sstore_refund(&mut self, value: usize); - /// Decrements sstore refunds counter. - fn sub_sstore_refund(&mut self, value: usize); + /// Decrements sstore refunds counter. + fn sub_sstore_refund(&mut self, value: usize); - /// Decide if any more operations should be traced. Passthrough for the VM trace. - fn trace_next_instruction(&mut self, _pc: usize, _instruction: u8, _current_gas: U256) -> bool { false } + /// Decide if any more operations should be traced. Passthrough for the VM trace. + fn trace_next_instruction(&mut self, _pc: usize, _instruction: u8, _current_gas: U256) -> bool { + false + } - /// Prepare to trace an operation. Passthrough for the VM trace. - /// For each call of `trace_prepare_execute` either `trace_failed` or `trace_executed` MUST be called. - fn trace_prepare_execute(&mut self, _pc: usize, _instruction: u8, _gas_cost: U256, _mem_written: Option<(usize, usize)>, _store_written: Option<(U256, U256)>) {} + /// Prepare to trace an operation. Passthrough for the VM trace. + /// For each call of `trace_prepare_execute` either `trace_failed` or `trace_executed` MUST be called. + fn trace_prepare_execute( + &mut self, + _pc: usize, + _instruction: u8, + _gas_cost: U256, + _mem_written: Option<(usize, usize)>, + _store_written: Option<(U256, U256)>, + ) { + } - /// Trace the execution failure of a single instruction. - fn trace_failed(&mut self) {} + /// Trace the execution failure of a single instruction. + fn trace_failed(&mut self) {} - /// Trace the finalised execution of a single instruction. - fn trace_executed(&mut self, _gas_used: U256, _stack_push: &[U256], _mem: &[u8]) {} + /// Trace the finalised execution of a single instruction. + fn trace_executed(&mut self, _gas_used: U256, _stack_push: &[U256], _mem: &[u8]) {} - /// Check if running in static context. - fn is_static(&self) -> bool; + /// Check if running in static context. + fn is_static(&self) -> bool; } diff --git a/ethcore/vm/src/lib.rs b/ethcore/vm/src/lib.rs index 4204ff92d..f0e13def5 100644 --- a/ethcore/vm/src/lib.rs +++ b/ethcore/vm/src/lib.rs @@ -17,46 +17,46 @@ //! Virtual machines support library extern crate ethereum_types; -extern crate parity_bytes as bytes; extern crate ethjson; -extern crate rlp; extern crate keccak_hash as hash; +extern crate parity_bytes as bytes; extern crate patricia_trie_ethereum as ethtrie; +extern crate rlp; mod action_params; mod call_type; mod env_info; -mod schedule; +mod error; mod ext; mod return_data; -mod error; +mod schedule; pub mod tests; pub use action_params::{ActionParams, ActionValue, ParamsType}; pub use call_type::CallType; pub use env_info::{EnvInfo, LastHashes}; -pub use schedule::{Schedule, CleanDustMode, WasmCosts}; -pub use ext::{Ext, MessageCallResult, ContractCreateResult, CreateContractAddress}; -pub use return_data::{ReturnData, GasLeft}; -pub use error::{Error, Result, TrapResult, TrapError, TrapKind, ExecTrapResult, ExecTrapError}; +pub use error::{Error, ExecTrapError, ExecTrapResult, Result, TrapError, TrapKind, TrapResult}; +pub use ext::{ContractCreateResult, CreateContractAddress, Ext, MessageCallResult}; +pub use return_data::{GasLeft, ReturnData}; +pub use schedule::{CleanDustMode, Schedule, WasmCosts}; /// Virtual Machine interface pub trait Exec: Send { - /// This function should be used to execute transaction. - /// It returns either an error, a known amount of gas left, or parameters to be used - /// to compute the final gas left. - fn exec(self: Box, ext: &mut Ext) -> ExecTrapResult; + /// This function should be used to execute transaction. + /// It returns either an error, a known amount of gas left, or parameters to be used + /// to compute the final gas left. + fn exec(self: Box, ext: &mut Ext) -> ExecTrapResult; } /// Resume call interface pub trait ResumeCall: Send { - /// Resume an execution for call, returns back the Vm interface. - fn resume_call(self: Box, result: MessageCallResult) -> Box; + /// Resume an execution for call, returns back the Vm interface. + fn resume_call(self: Box, result: MessageCallResult) -> Box; } /// Resume create interface pub trait ResumeCreate: Send { - /// Resume an execution from create, returns back the Vm interface. - fn resume_create(self: Box, result: ContractCreateResult) -> Box; + /// Resume an execution from create, returns back the Vm interface. + fn resume_create(self: Box, result: ContractCreateResult) -> Box; } diff --git a/ethcore/vm/src/return_data.rs b/ethcore/vm/src/return_data.rs index 38ac23ffd..23bdb9278 100644 --- a/ethcore/vm/src/return_data.rs +++ b/ethcore/vm/src/return_data.rs @@ -21,46 +21,46 @@ use ethereum_types::U256; /// Return data buffer. Holds memory from a previous call and a slice into that memory. #[derive(Debug)] pub struct ReturnData { - mem: Vec, - offset: usize, - size: usize, + mem: Vec, + offset: usize, + size: usize, } impl ::std::ops::Deref for ReturnData { - type Target = [u8]; - fn deref(&self) -> &[u8] { - &self.mem[self.offset..self.offset + self.size] - } + type Target = [u8]; + fn deref(&self) -> &[u8] { + &self.mem[self.offset..self.offset + self.size] + } } impl ReturnData { - /// Create empty `ReturnData`. - pub fn empty() -> Self { - ReturnData { - mem: Vec::new(), - offset: 0, - size: 0, - } - } - /// Create `ReturnData` from give buffer and slice. - pub fn new(mem: Vec, offset: usize, size: usize) -> Self { - ReturnData { mem, offset, size } - } + /// Create empty `ReturnData`. + pub fn empty() -> Self { + ReturnData { + mem: Vec::new(), + offset: 0, + size: 0, + } + } + /// Create `ReturnData` from give buffer and slice. + pub fn new(mem: Vec, offset: usize, size: usize) -> Self { + ReturnData { mem, offset, size } + } } /// Gas Left: either it is a known value, or it needs to be computed by processing /// a return instruction. #[derive(Debug)] pub enum GasLeft { - /// Known gas left - Known(U256), - /// Return or Revert instruction must be processed. - NeedsReturn { - /// Amount of gas left. - gas_left: U256, - /// Return data buffer. - data: ReturnData, - /// Apply or revert state changes on revert. - apply_state: bool - }, + /// Known gas left + Known(U256), + /// Return or Revert instruction must be processed. + NeedsReturn { + /// Amount of gas left. + gas_left: U256, + /// Return data buffer. + data: ReturnData, + /// Apply or revert state changes on revert. + apply_state: bool, + }, } diff --git a/ethcore/vm/src/schedule.rs b/ethcore/vm/src/schedule.rs index 6fad8351e..925a943d1 100644 --- a/ethcore/vm/src/schedule.rs +++ b/ethcore/vm/src/schedule.rs @@ -15,379 +15,386 @@ // along with Parity Ethereum. If not, see . //! Cost schedule and other parameterisations for the EVM. -use std::collections::HashMap; use ethereum_types::U256; +use std::collections::HashMap; /// Definition of schedules that can be applied to a version. #[derive(Debug)] pub enum VersionedSchedule { - PWasm, + PWasm, } /// Definition of the cost schedule and other parameterisations for the EVM. #[derive(Debug)] pub struct Schedule { - /// Does it support exceptional failed code deposit - pub exceptional_failed_code_deposit: bool, - /// Does it have a delegate cal - pub have_delegate_call: bool, - /// Does it have a CREATE2 instruction - pub have_create2: bool, - /// Does it have a REVERT instruction - pub have_revert: bool, - /// Does it have a EXTCODEHASH instruction - pub have_extcodehash: bool, - /// VM stack limit - pub stack_limit: usize, - /// Max number of nested calls/creates - pub max_depth: usize, - /// Gas prices for instructions in all tiers - pub tier_step_gas: [usize; 8], - /// Gas price for `EXP` opcode - pub exp_gas: usize, - /// Additional gas for `EXP` opcode for each byte of exponent - pub exp_byte_gas: usize, - /// Gas price for `SHA3` opcode - pub sha3_gas: usize, - /// Additional gas for `SHA3` opcode for each word of hashed memory - pub sha3_word_gas: usize, - /// Gas price for loading from storage - pub sload_gas: usize, - /// Gas price for setting new value to storage (`storage==0`, `new!=0`) - pub sstore_set_gas: usize, - /// Gas price for altering value in storage - pub sstore_reset_gas: usize, - /// Gas refund for `SSTORE` clearing (when `storage!=0`, `new==0`) - pub sstore_refund_gas: usize, - /// Gas price for `JUMPDEST` opcode - pub jumpdest_gas: usize, - /// Gas price for `LOG*` - pub log_gas: usize, - /// Additional gas for data in `LOG*` - pub log_data_gas: usize, - /// Additional gas for each topic in `LOG*` - pub log_topic_gas: usize, - /// Gas price for `CREATE` opcode - pub create_gas: usize, - /// Gas price for `*CALL*` opcodes - pub call_gas: usize, - /// Stipend for transfer for `CALL|CALLCODE` opcode when `value>0` - pub call_stipend: usize, - /// Additional gas required for value transfer (`CALL|CALLCODE`) - pub call_value_transfer_gas: usize, - /// Additional gas for creating new account (`CALL|CALLCODE`) - pub call_new_account_gas: usize, - /// Refund for SUICIDE - pub suicide_refund_gas: usize, - /// Gas for used memory - pub memory_gas: usize, - /// Coefficient used to convert memory size to gas price for memory - pub quad_coeff_div: usize, - /// Cost for contract length when executing `CREATE` - pub create_data_gas: usize, - /// Maximum code size when creating a contract. - pub create_data_limit: usize, - /// Transaction cost - pub tx_gas: usize, - /// `CREATE` transaction cost - pub tx_create_gas: usize, - /// Additional cost for empty data transaction - pub tx_data_zero_gas: usize, - /// Additional cost for non-empty data transaction - pub tx_data_non_zero_gas: usize, - /// Gas price for copying memory - pub copy_gas: usize, - /// Price of EXTCODESIZE - pub extcodesize_gas: usize, - /// Base price of EXTCODECOPY - pub extcodecopy_base_gas: usize, - /// Price of BALANCE - pub balance_gas: usize, - /// Price of EXTCODEHASH - pub extcodehash_gas: usize, - /// Price of SUICIDE - pub suicide_gas: usize, - /// Amount of additional gas to pay when SUICIDE credits a non-existant account - pub suicide_to_new_account_cost: usize, - /// If Some(x): let limit = GAS * (x - 1) / x; let CALL's gas = min(requested, limit). let CREATE's gas = limit. - /// If None: let CALL's gas = (requested > GAS ? [OOG] : GAS). let CREATE's gas = GAS - pub sub_gas_cap_divisor: Option, - /// Don't ever make empty accounts; contracts start with nonce=1. Also, don't charge 25k when sending/suicide zero-value. - pub no_empty: bool, - /// Kill empty accounts if touched. - pub kill_empty: bool, - /// Blockhash instruction gas cost. - pub blockhash_gas: usize, - /// Static Call opcode enabled. - pub have_static_call: bool, - /// RETURNDATA and RETURNDATASIZE opcodes enabled. - pub have_return_data: bool, - /// SHL, SHR, SAR opcodes enabled. - pub have_bitwise_shifting: bool, - /// CHAINID opcode enabled. - pub have_chain_id: bool, - /// SELFBALANCE opcode enabled. - pub have_selfbalance: bool, - /// Kill basic accounts below this balance if touched. - pub kill_dust: CleanDustMode, - /// Enable EIP-1283 rules - pub eip1283: bool, - /// Enable EIP-1706 rules - pub eip1706: bool, - /// VM execution does not increase null signed address nonce if this field is true. - pub keep_unsigned_nonce: bool, - /// Wasm extra schedule settings, if wasm activated - pub wasm: Option, + /// Does it support exceptional failed code deposit + pub exceptional_failed_code_deposit: bool, + /// Does it have a delegate cal + pub have_delegate_call: bool, + /// Does it have a CREATE2 instruction + pub have_create2: bool, + /// Does it have a REVERT instruction + pub have_revert: bool, + /// Does it have a EXTCODEHASH instruction + pub have_extcodehash: bool, + /// VM stack limit + pub stack_limit: usize, + /// Max number of nested calls/creates + pub max_depth: usize, + /// Gas prices for instructions in all tiers + pub tier_step_gas: [usize; 8], + /// Gas price for `EXP` opcode + pub exp_gas: usize, + /// Additional gas for `EXP` opcode for each byte of exponent + pub exp_byte_gas: usize, + /// Gas price for `SHA3` opcode + pub sha3_gas: usize, + /// Additional gas for `SHA3` opcode for each word of hashed memory + pub sha3_word_gas: usize, + /// Gas price for loading from storage + pub sload_gas: usize, + /// Gas price for setting new value to storage (`storage==0`, `new!=0`) + pub sstore_set_gas: usize, + /// Gas price for altering value in storage + pub sstore_reset_gas: usize, + /// Gas refund for `SSTORE` clearing (when `storage!=0`, `new==0`) + pub sstore_refund_gas: usize, + /// Gas price for `JUMPDEST` opcode + pub jumpdest_gas: usize, + /// Gas price for `LOG*` + pub log_gas: usize, + /// Additional gas for data in `LOG*` + pub log_data_gas: usize, + /// Additional gas for each topic in `LOG*` + pub log_topic_gas: usize, + /// Gas price for `CREATE` opcode + pub create_gas: usize, + /// Gas price for `*CALL*` opcodes + pub call_gas: usize, + /// Stipend for transfer for `CALL|CALLCODE` opcode when `value>0` + pub call_stipend: usize, + /// Additional gas required for value transfer (`CALL|CALLCODE`) + pub call_value_transfer_gas: usize, + /// Additional gas for creating new account (`CALL|CALLCODE`) + pub call_new_account_gas: usize, + /// Refund for SUICIDE + pub suicide_refund_gas: usize, + /// Gas for used memory + pub memory_gas: usize, + /// Coefficient used to convert memory size to gas price for memory + pub quad_coeff_div: usize, + /// Cost for contract length when executing `CREATE` + pub create_data_gas: usize, + /// Maximum code size when creating a contract. + pub create_data_limit: usize, + /// Transaction cost + pub tx_gas: usize, + /// `CREATE` transaction cost + pub tx_create_gas: usize, + /// Additional cost for empty data transaction + pub tx_data_zero_gas: usize, + /// Additional cost for non-empty data transaction + pub tx_data_non_zero_gas: usize, + /// Gas price for copying memory + pub copy_gas: usize, + /// Price of EXTCODESIZE + pub extcodesize_gas: usize, + /// Base price of EXTCODECOPY + pub extcodecopy_base_gas: usize, + /// Price of BALANCE + pub balance_gas: usize, + /// Price of EXTCODEHASH + pub extcodehash_gas: usize, + /// Price of SUICIDE + pub suicide_gas: usize, + /// Amount of additional gas to pay when SUICIDE credits a non-existant account + pub suicide_to_new_account_cost: usize, + /// If Some(x): let limit = GAS * (x - 1) / x; let CALL's gas = min(requested, limit). let CREATE's gas = limit. + /// If None: let CALL's gas = (requested > GAS ? [OOG] : GAS). let CREATE's gas = GAS + pub sub_gas_cap_divisor: Option, + /// Don't ever make empty accounts; contracts start with nonce=1. Also, don't charge 25k when sending/suicide zero-value. + pub no_empty: bool, + /// Kill empty accounts if touched. + pub kill_empty: bool, + /// Blockhash instruction gas cost. + pub blockhash_gas: usize, + /// Static Call opcode enabled. + pub have_static_call: bool, + /// RETURNDATA and RETURNDATASIZE opcodes enabled. + pub have_return_data: bool, + /// SHL, SHR, SAR opcodes enabled. + pub have_bitwise_shifting: bool, + /// CHAINID opcode enabled. + pub have_chain_id: bool, + /// SELFBALANCE opcode enabled. + pub have_selfbalance: bool, + /// Kill basic accounts below this balance if touched. + pub kill_dust: CleanDustMode, + /// Enable EIP-1283 rules + pub eip1283: bool, + /// Enable EIP-1706 rules + pub eip1706: bool, + /// VM execution does not increase null signed address nonce if this field is true. + pub keep_unsigned_nonce: bool, + /// Wasm extra schedule settings, if wasm activated + pub wasm: Option, } /// Wasm cost table #[derive(Debug)] pub struct WasmCosts { - /// Default opcode cost - pub regular: u32, - /// Div operations multiplier. - pub div: u32, - /// Div operations multiplier. - pub mul: u32, - /// Memory (load/store) operations multiplier. - pub mem: u32, - /// General static query of U256 value from env-info - pub static_u256: u32, - /// General static query of Address value from env-info - pub static_address: u32, - /// Memory stipend. Amount of free memory (in 64kb pages) each contract can use for stack. - pub initial_mem: u32, - /// Grow memory cost, per page (64kb) - pub grow_mem: u32, - /// Memory copy cost, per byte - pub memcpy: u32, - /// Max stack height (native WebAssembly stack limiter) - pub max_stack_height: u32, - /// Cost of wasm opcode is calculated as TABLE_ENTRY_COST * `opcodes_mul` / `opcodes_div` - pub opcodes_mul: u32, - /// Cost of wasm opcode is calculated as TABLE_ENTRY_COST * `opcodes_mul` / `opcodes_div` - pub opcodes_div: u32, - /// Whether create2 extern function is activated. - pub have_create2: bool, - /// Whether gasleft extern function is activated. - pub have_gasleft: bool, + /// Default opcode cost + pub regular: u32, + /// Div operations multiplier. + pub div: u32, + /// Div operations multiplier. + pub mul: u32, + /// Memory (load/store) operations multiplier. + pub mem: u32, + /// General static query of U256 value from env-info + pub static_u256: u32, + /// General static query of Address value from env-info + pub static_address: u32, + /// Memory stipend. Amount of free memory (in 64kb pages) each contract can use for stack. + pub initial_mem: u32, + /// Grow memory cost, per page (64kb) + pub grow_mem: u32, + /// Memory copy cost, per byte + pub memcpy: u32, + /// Max stack height (native WebAssembly stack limiter) + pub max_stack_height: u32, + /// Cost of wasm opcode is calculated as TABLE_ENTRY_COST * `opcodes_mul` / `opcodes_div` + pub opcodes_mul: u32, + /// Cost of wasm opcode is calculated as TABLE_ENTRY_COST * `opcodes_mul` / `opcodes_div` + pub opcodes_div: u32, + /// Whether create2 extern function is activated. + pub have_create2: bool, + /// Whether gasleft extern function is activated. + pub have_gasleft: bool, } impl Default for WasmCosts { - fn default() -> Self { - WasmCosts { - regular: 1, - div: 16, - mul: 4, - mem: 2, - static_u256: 64, - static_address: 40, - initial_mem: 4096, - grow_mem: 8192, - memcpy: 1, - max_stack_height: 64*1024, - opcodes_mul: 3, - opcodes_div: 8, - have_create2: false, - have_gasleft: false, - } - } + fn default() -> Self { + WasmCosts { + regular: 1, + div: 16, + mul: 4, + mem: 2, + static_u256: 64, + static_address: 40, + initial_mem: 4096, + grow_mem: 8192, + memcpy: 1, + max_stack_height: 64 * 1024, + opcodes_mul: 3, + opcodes_div: 8, + have_create2: false, + have_gasleft: false, + } + } } /// Dust accounts cleanup mode. #[derive(Debug, PartialEq, Eq)] pub enum CleanDustMode { - /// Dust cleanup is disabled. - Off, - /// Basic dust accounts will be removed. - BasicOnly, - /// Basic and contract dust accounts will be removed. - WithCodeAndStorage, + /// Dust cleanup is disabled. + Off, + /// Basic dust accounts will be removed. + BasicOnly, + /// Basic and contract dust accounts will be removed. + WithCodeAndStorage, } impl Schedule { - /// Schedule for the Frontier-era of the Ethereum main net. - pub fn new_frontier() -> Schedule { - Self::new(false, false, 21000) - } + /// Schedule for the Frontier-era of the Ethereum main net. + pub fn new_frontier() -> Schedule { + Self::new(false, false, 21000) + } - /// Schedule for the Homestead-era of the Ethereum main net. - pub fn new_homestead() -> Schedule { - Self::new(true, true, 53000) - } + /// Schedule for the Homestead-era of the Ethereum main net. + pub fn new_homestead() -> Schedule { + Self::new(true, true, 53000) + } - /// Schedule for the post-EIP-150-era of the Ethereum main net. - pub fn new_post_eip150(max_code_size: usize, fix_exp: bool, no_empty: bool, kill_empty: bool) -> Schedule { - Schedule { - exceptional_failed_code_deposit: true, - have_delegate_call: true, - have_create2: false, - have_revert: false, - have_return_data: false, - have_bitwise_shifting: false, - have_chain_id: false, - have_selfbalance: false, - have_extcodehash: false, - stack_limit: 1024, - max_depth: 1024, - tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0], - exp_gas: 10, - exp_byte_gas: if fix_exp {50} else {10}, - sha3_gas: 30, - sha3_word_gas: 6, - sload_gas: 200, - sstore_set_gas: 20000, - sstore_reset_gas: 5000, - sstore_refund_gas: 15000, - jumpdest_gas: 1, - log_gas: 375, - log_data_gas: 8, - log_topic_gas: 375, - create_gas: 32000, - call_gas: 700, - call_stipend: 2300, - call_value_transfer_gas: 9000, - call_new_account_gas: 25000, - suicide_refund_gas: 24000, - memory_gas: 3, - quad_coeff_div: 512, - create_data_gas: 200, - create_data_limit: max_code_size, - tx_gas: 21000, - tx_create_gas: 53000, - tx_data_zero_gas: 4, - tx_data_non_zero_gas: 68, - copy_gas: 3, - extcodesize_gas: 700, - extcodecopy_base_gas: 700, - extcodehash_gas: 400, - balance_gas: 400, - suicide_gas: 5000, - suicide_to_new_account_cost: 25000, - sub_gas_cap_divisor: Some(64), - no_empty: no_empty, - kill_empty: kill_empty, - blockhash_gas: 20, - have_static_call: false, - kill_dust: CleanDustMode::Off, - eip1283: false, - eip1706: false, - keep_unsigned_nonce: false, - wasm: None, - } - } + /// Schedule for the post-EIP-150-era of the Ethereum main net. + pub fn new_post_eip150( + max_code_size: usize, + fix_exp: bool, + no_empty: bool, + kill_empty: bool, + ) -> Schedule { + Schedule { + exceptional_failed_code_deposit: true, + have_delegate_call: true, + have_create2: false, + have_revert: false, + have_return_data: false, + have_bitwise_shifting: false, + have_chain_id: false, + have_selfbalance: false, + have_extcodehash: false, + stack_limit: 1024, + max_depth: 1024, + tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0], + exp_gas: 10, + exp_byte_gas: if fix_exp { 50 } else { 10 }, + sha3_gas: 30, + sha3_word_gas: 6, + sload_gas: 200, + sstore_set_gas: 20000, + sstore_reset_gas: 5000, + sstore_refund_gas: 15000, + jumpdest_gas: 1, + log_gas: 375, + log_data_gas: 8, + log_topic_gas: 375, + create_gas: 32000, + call_gas: 700, + call_stipend: 2300, + call_value_transfer_gas: 9000, + call_new_account_gas: 25000, + suicide_refund_gas: 24000, + memory_gas: 3, + quad_coeff_div: 512, + create_data_gas: 200, + create_data_limit: max_code_size, + tx_gas: 21000, + tx_create_gas: 53000, + tx_data_zero_gas: 4, + tx_data_non_zero_gas: 68, + copy_gas: 3, + extcodesize_gas: 700, + extcodecopy_base_gas: 700, + extcodehash_gas: 400, + balance_gas: 400, + suicide_gas: 5000, + suicide_to_new_account_cost: 25000, + sub_gas_cap_divisor: Some(64), + no_empty: no_empty, + kill_empty: kill_empty, + blockhash_gas: 20, + have_static_call: false, + kill_dust: CleanDustMode::Off, + eip1283: false, + eip1706: false, + keep_unsigned_nonce: false, + wasm: None, + } + } - /// Schedule for the Byzantium fork of the Ethereum main net. - pub fn new_byzantium() -> Schedule { - let mut schedule = Self::new_post_eip150(24576, true, true, true); - schedule.have_create2 = true; - schedule.have_revert = true; - schedule.have_static_call = true; - schedule.have_return_data = true; - schedule - } + /// Schedule for the Byzantium fork of the Ethereum main net. + pub fn new_byzantium() -> Schedule { + let mut schedule = Self::new_post_eip150(24576, true, true, true); + schedule.have_create2 = true; + schedule.have_revert = true; + schedule.have_static_call = true; + schedule.have_return_data = true; + schedule + } - /// Schedule for the Constantinople fork of the Ethereum main net. - pub fn new_constantinople() -> Schedule { - let mut schedule = Self::new_byzantium(); - schedule.have_bitwise_shifting = true; - schedule - } + /// Schedule for the Constantinople fork of the Ethereum main net. + pub fn new_constantinople() -> Schedule { + let mut schedule = Self::new_byzantium(); + schedule.have_bitwise_shifting = true; + schedule + } - /// Schedule for the Istanbul fork of the Ethereum main net. - pub fn new_istanbul() -> Schedule { - let mut schedule = Self::new_constantinople(); - schedule.have_chain_id = true; // EIP 1344 - schedule.tx_data_non_zero_gas = 16; // EIP 2028 - schedule.sload_gas = 800; // EIP 1884 - schedule.balance_gas = 700; // EIP 1884 - schedule.extcodehash_gas = 700; // EIP 1884 - schedule.have_selfbalance = true; // EIP 1884 - schedule - } + /// Schedule for the Istanbul fork of the Ethereum main net. + pub fn new_istanbul() -> Schedule { + let mut schedule = Self::new_constantinople(); + schedule.have_chain_id = true; // EIP 1344 + schedule.tx_data_non_zero_gas = 16; // EIP 2028 + schedule.sload_gas = 800; // EIP 1884 + schedule.balance_gas = 700; // EIP 1884 + schedule.extcodehash_gas = 700; // EIP 1884 + schedule.have_selfbalance = true; // EIP 1884 + schedule + } - fn new(efcd: bool, hdc: bool, tcg: usize) -> Schedule { - Schedule { - exceptional_failed_code_deposit: efcd, - have_delegate_call: hdc, - have_create2: false, - have_revert: false, - have_return_data: false, - have_bitwise_shifting: false, - have_chain_id: false, - have_selfbalance: false, - have_extcodehash: false, - stack_limit: 1024, - max_depth: 1024, - tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0], - exp_gas: 10, - exp_byte_gas: 10, - sha3_gas: 30, - sha3_word_gas: 6, - sload_gas: 50, - sstore_set_gas: 20000, - sstore_reset_gas: 5000, - sstore_refund_gas: 15000, - jumpdest_gas: 1, - log_gas: 375, - log_data_gas: 8, - log_topic_gas: 375, - create_gas: 32000, - call_gas: 40, - call_stipend: 2300, - call_value_transfer_gas: 9000, - call_new_account_gas: 25000, - suicide_refund_gas: 24000, - memory_gas: 3, - quad_coeff_div: 512, - create_data_gas: 200, - create_data_limit: usize::max_value(), - tx_gas: 21000, - tx_create_gas: tcg, - tx_data_zero_gas: 4, - tx_data_non_zero_gas: 68, - copy_gas: 3, - extcodesize_gas: 20, - extcodecopy_base_gas: 20, - extcodehash_gas: 400, - balance_gas: 20, - suicide_gas: 0, - suicide_to_new_account_cost: 0, - sub_gas_cap_divisor: None, - no_empty: false, - kill_empty: false, - blockhash_gas: 20, - have_static_call: false, - kill_dust: CleanDustMode::Off, - eip1283: false, - eip1706: false, - keep_unsigned_nonce: false, - wasm: None, - } - } + fn new(efcd: bool, hdc: bool, tcg: usize) -> Schedule { + Schedule { + exceptional_failed_code_deposit: efcd, + have_delegate_call: hdc, + have_create2: false, + have_revert: false, + have_return_data: false, + have_bitwise_shifting: false, + have_chain_id: false, + have_selfbalance: false, + have_extcodehash: false, + stack_limit: 1024, + max_depth: 1024, + tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0], + exp_gas: 10, + exp_byte_gas: 10, + sha3_gas: 30, + sha3_word_gas: 6, + sload_gas: 50, + sstore_set_gas: 20000, + sstore_reset_gas: 5000, + sstore_refund_gas: 15000, + jumpdest_gas: 1, + log_gas: 375, + log_data_gas: 8, + log_topic_gas: 375, + create_gas: 32000, + call_gas: 40, + call_stipend: 2300, + call_value_transfer_gas: 9000, + call_new_account_gas: 25000, + suicide_refund_gas: 24000, + memory_gas: 3, + quad_coeff_div: 512, + create_data_gas: 200, + create_data_limit: usize::max_value(), + tx_gas: 21000, + tx_create_gas: tcg, + tx_data_zero_gas: 4, + tx_data_non_zero_gas: 68, + copy_gas: 3, + extcodesize_gas: 20, + extcodecopy_base_gas: 20, + extcodehash_gas: 400, + balance_gas: 20, + suicide_gas: 0, + suicide_to_new_account_cost: 0, + sub_gas_cap_divisor: None, + no_empty: false, + kill_empty: false, + blockhash_gas: 20, + have_static_call: false, + kill_dust: CleanDustMode::Off, + eip1283: false, + eip1706: false, + keep_unsigned_nonce: false, + wasm: None, + } + } - /// Returns wasm schedule - /// - /// May panic if there is no wasm schedule - pub fn wasm(&self) -> &WasmCosts { - // *** Prefer PANIC here instead of silently breaking consensus! *** - self.wasm.as_ref().expect("Wasm schedule expected to exist while checking wasm contract. Misconfigured client?") - } + /// Returns wasm schedule + /// + /// May panic if there is no wasm schedule + pub fn wasm(&self) -> &WasmCosts { + // *** Prefer PANIC here instead of silently breaking consensus! *** + self.wasm.as_ref().expect( + "Wasm schedule expected to exist while checking wasm contract. Misconfigured client?", + ) + } } impl Default for Schedule { - fn default() -> Self { - Schedule::new_frontier() - } + fn default() -> Self { + Schedule::new_frontier() + } } #[test] #[cfg(test)] fn schedule_evm_assumptions() { - let s1 = Schedule::new_frontier(); - let s2 = Schedule::new_homestead(); + let s1 = Schedule::new_frontier(); + let s2 = Schedule::new_homestead(); - // To optimize division we assume 2**9 for quad_coeff_div - assert_eq!(s1.quad_coeff_div, 512); - assert_eq!(s2.quad_coeff_div, 512); + // To optimize division we assume 2**9 for quad_coeff_div + assert_eq!(s1.quad_coeff_div, 512); + assert_eq!(s2.quad_coeff_div, 512); } diff --git a/ethcore/vm/src/tests.rs b/ethcore/vm/src/tests.rs index 5684a234a..261473dac 100644 --- a/ethcore/vm/src/tests.rs +++ b/ethcore/vm/src/tests.rs @@ -14,39 +14,47 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::collections::{HashMap, HashSet}; - -use ethereum_types::{U256, H256, Address}; -use bytes::Bytes; -use { - CallType, Schedule, EnvInfo, - ReturnData, Ext, ContractCreateResult, MessageCallResult, - CreateContractAddress, Result, GasLeft, +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, }; -use hash::keccak; + +use bytes::Bytes; use error::TrapKind; +use ethereum_types::{Address, H256, U256}; +use hash::keccak; +use CallType; +use ContractCreateResult; +use CreateContractAddress; +use EnvInfo; +use Ext; +use GasLeft; +use MessageCallResult; +use Result; +use ReturnData; +use Schedule; pub struct FakeLogEntry { - pub topics: Vec, - pub data: Bytes + pub topics: Vec, + pub data: Bytes, } #[derive(PartialEq, Eq, Hash, Debug)] pub enum FakeCallType { - Call, Create + Call, + Create, } #[derive(PartialEq, Eq, Hash, Debug)] pub struct FakeCall { - pub call_type: FakeCallType, - pub create_scheme: Option, - pub gas: U256, - pub sender_address: Option
, - pub receive_address: Option
, - pub value: Option, - pub data: Bytes, - pub code_address: Option
, + pub call_type: FakeCallType, + pub create_scheme: Option, + pub gas: U256, + pub sender_address: Option
, + pub receive_address: Option
, + pub value: Option, + pub data: Bytes, + pub code_address: Option
, } /// Fake externalities test structure. @@ -54,211 +62,211 @@ pub struct FakeCall { /// Can't do recursive calls. #[derive(Default)] pub struct FakeExt { - pub store: HashMap, - pub suicides: HashSet
, - pub calls: HashSet, - pub sstore_clears: i128, - pub depth: usize, - pub blockhashes: HashMap, - pub codes: HashMap>, - pub logs: Vec, - pub info: EnvInfo, - pub schedule: Schedule, - pub balances: HashMap, - pub tracing: bool, - pub is_static: bool, + pub store: HashMap, + pub suicides: HashSet
, + pub calls: HashSet, + pub sstore_clears: i128, + pub depth: usize, + pub blockhashes: HashMap, + pub codes: HashMap>, + pub logs: Vec, + pub info: EnvInfo, + pub schedule: Schedule, + pub balances: HashMap, + pub tracing: bool, + pub is_static: bool, - chain_id: u64, + chain_id: u64, } // similar to the normal `finalize` function, but ignoring NeedsReturn. pub fn test_finalize(res: Result) -> Result { - match res { - Ok(GasLeft::Known(gas)) => Ok(gas), - Ok(GasLeft::NeedsReturn{..}) => unimplemented!(), // since ret is unimplemented. - Err(e) => Err(e), - } + match res { + Ok(GasLeft::Known(gas)) => Ok(gas), + Ok(GasLeft::NeedsReturn { .. }) => unimplemented!(), // since ret is unimplemented. + Err(e) => Err(e), + } } impl FakeExt { - /// New fake externalities - pub fn new() -> Self { - FakeExt::default() - } + /// New fake externalities + pub fn new() -> Self { + FakeExt::default() + } - /// New fake externalities with byzantium schedule rules - pub fn new_byzantium() -> Self { - let mut ext = FakeExt::default(); - ext.schedule = Schedule::new_byzantium(); - ext - } + /// New fake externalities with byzantium schedule rules + pub fn new_byzantium() -> Self { + let mut ext = FakeExt::default(); + ext.schedule = Schedule::new_byzantium(); + ext + } - /// New fake externalities with constantinople schedule rules - pub fn new_constantinople() -> Self { - let mut ext = FakeExt::default(); - ext.schedule = Schedule::new_constantinople(); - ext - } + /// New fake externalities with constantinople schedule rules + pub fn new_constantinople() -> Self { + let mut ext = FakeExt::default(); + ext.schedule = Schedule::new_constantinople(); + ext + } - /// New fake externalities with Istanbul schedule rules - pub fn new_istanbul() -> Self { - let mut ext = FakeExt::default(); - ext.schedule = Schedule::new_istanbul(); - ext - } + /// New fake externalities with Istanbul schedule rules + pub fn new_istanbul() -> Self { + let mut ext = FakeExt::default(); + ext.schedule = Schedule::new_istanbul(); + ext + } - /// Alter fake externalities to allow wasm - pub fn with_wasm(mut self) -> Self { - self.schedule.wasm = Some(Default::default()); - self - } + /// Alter fake externalities to allow wasm + pub fn with_wasm(mut self) -> Self { + self.schedule.wasm = Some(Default::default()); + self + } - /// Set chain ID - pub fn with_chain_id(mut self, chain_id: u64) -> Self { - self.chain_id = chain_id; - self - } + /// Set chain ID + pub fn with_chain_id(mut self, chain_id: u64) -> Self { + self.chain_id = chain_id; + self + } } impl Ext for FakeExt { - fn initial_storage_at(&self, _key: &H256) -> Result { - Ok(H256::new()) - } + fn initial_storage_at(&self, _key: &H256) -> Result { + Ok(H256::new()) + } - fn storage_at(&self, key: &H256) -> Result { - Ok(self.store.get(key).unwrap_or(&H256::new()).clone()) - } + fn storage_at(&self, key: &H256) -> Result { + Ok(self.store.get(key).unwrap_or(&H256::new()).clone()) + } - fn set_storage(&mut self, key: H256, value: H256) -> Result<()> { - self.store.insert(key, value); - Ok(()) - } + fn set_storage(&mut self, key: H256, value: H256) -> Result<()> { + self.store.insert(key, value); + Ok(()) + } - fn exists(&self, address: &Address) -> Result { - Ok(self.balances.contains_key(address)) - } + fn exists(&self, address: &Address) -> Result { + Ok(self.balances.contains_key(address)) + } - fn exists_and_not_null(&self, address: &Address) -> Result { - Ok(self.balances.get(address).map_or(false, |b| !b.is_zero())) - } + fn exists_and_not_null(&self, address: &Address) -> Result { + Ok(self.balances.get(address).map_or(false, |b| !b.is_zero())) + } - fn origin_balance(&self) -> Result { - unimplemented!() - } + fn origin_balance(&self) -> Result { + unimplemented!() + } - fn balance(&self, address: &Address) -> Result { - Ok(self.balances[address]) - } + fn balance(&self, address: &Address) -> Result { + Ok(self.balances[address]) + } - fn blockhash(&mut self, number: &U256) -> H256 { - self.blockhashes.get(number).unwrap_or(&H256::new()).clone() - } + fn blockhash(&mut self, number: &U256) -> H256 { + self.blockhashes.get(number).unwrap_or(&H256::new()).clone() + } - fn create( - &mut self, - gas: &U256, - value: &U256, - code: &[u8], - address: CreateContractAddress, - _trap: bool, - ) -> ::std::result::Result { - self.calls.insert(FakeCall { - call_type: FakeCallType::Create, - create_scheme: Some(address), - gas: *gas, - sender_address: None, - receive_address: None, - value: Some(*value), - data: code.to_vec(), - code_address: None - }); - // TODO: support traps in testing. - Ok(ContractCreateResult::Failed) - } + fn create( + &mut self, + gas: &U256, + value: &U256, + code: &[u8], + address: CreateContractAddress, + _trap: bool, + ) -> ::std::result::Result { + self.calls.insert(FakeCall { + call_type: FakeCallType::Create, + create_scheme: Some(address), + gas: *gas, + sender_address: None, + receive_address: None, + value: Some(*value), + data: code.to_vec(), + code_address: None, + }); + // TODO: support traps in testing. + Ok(ContractCreateResult::Failed) + } - fn call( - &mut self, - gas: &U256, - sender_address: &Address, - receive_address: &Address, - value: Option, - data: &[u8], - code_address: &Address, - _call_type: CallType, - _trap: bool, - ) -> ::std::result::Result { - self.calls.insert(FakeCall { - call_type: FakeCallType::Call, - create_scheme: None, - gas: *gas, - sender_address: Some(sender_address.clone()), - receive_address: Some(receive_address.clone()), - value: value, - data: data.to_vec(), - code_address: Some(code_address.clone()) - }); - // TODO: support traps in testing. - Ok(MessageCallResult::Success(*gas, ReturnData::empty())) - } + fn call( + &mut self, + gas: &U256, + sender_address: &Address, + receive_address: &Address, + value: Option, + data: &[u8], + code_address: &Address, + _call_type: CallType, + _trap: bool, + ) -> ::std::result::Result { + self.calls.insert(FakeCall { + call_type: FakeCallType::Call, + create_scheme: None, + gas: *gas, + sender_address: Some(sender_address.clone()), + receive_address: Some(receive_address.clone()), + value: value, + data: data.to_vec(), + code_address: Some(code_address.clone()), + }); + // TODO: support traps in testing. + Ok(MessageCallResult::Success(*gas, ReturnData::empty())) + } - fn extcode(&self, address: &Address) -> Result>> { - Ok(self.codes.get(address).cloned()) - } + fn extcode(&self, address: &Address) -> Result>> { + Ok(self.codes.get(address).cloned()) + } - fn extcodesize(&self, address: &Address) -> Result> { - Ok(self.codes.get(address).map(|c| c.len())) - } + fn extcodesize(&self, address: &Address) -> Result> { + Ok(self.codes.get(address).map(|c| c.len())) + } - fn extcodehash(&self, address: &Address) -> Result> { - Ok(self.codes.get(address).map(|c| keccak(c.as_ref()))) - } + fn extcodehash(&self, address: &Address) -> Result> { + Ok(self.codes.get(address).map(|c| keccak(c.as_ref()))) + } - fn log(&mut self, topics: Vec, data: &[u8]) -> Result<()> { - self.logs.push(FakeLogEntry { - topics, - data: data.to_vec() - }); - Ok(()) - } + fn log(&mut self, topics: Vec, data: &[u8]) -> Result<()> { + self.logs.push(FakeLogEntry { + topics, + data: data.to_vec(), + }); + Ok(()) + } - fn ret(self, _gas: &U256, _data: &ReturnData, _apply_state: bool) -> Result { - unimplemented!(); - } + fn ret(self, _gas: &U256, _data: &ReturnData, _apply_state: bool) -> Result { + unimplemented!(); + } - fn suicide(&mut self, refund_address: &Address) -> Result<()> { - self.suicides.insert(refund_address.clone()); - Ok(()) - } + fn suicide(&mut self, refund_address: &Address) -> Result<()> { + self.suicides.insert(refund_address.clone()); + Ok(()) + } - fn schedule(&self) -> &Schedule { - &self.schedule - } + fn schedule(&self) -> &Schedule { + &self.schedule + } - fn env_info(&self) -> &EnvInfo { - &self.info - } + fn env_info(&self) -> &EnvInfo { + &self.info + } - fn chain_id(&self) -> u64 { - self.chain_id - } + fn chain_id(&self) -> u64 { + self.chain_id + } - fn depth(&self) -> usize { - self.depth - } + fn depth(&self) -> usize { + self.depth + } - fn is_static(&self) -> bool { - self.is_static - } + fn is_static(&self) -> bool { + self.is_static + } - fn add_sstore_refund(&mut self, value: usize) { - self.sstore_clears += value as i128; - } + fn add_sstore_refund(&mut self, value: usize) { + self.sstore_clears += value as i128; + } - fn sub_sstore_refund(&mut self, value: usize) { - self.sstore_clears -= value as i128; - } + fn sub_sstore_refund(&mut self, value: usize) { + self.sstore_clears -= value as i128; + } - fn trace_next_instruction(&mut self, _pc: usize, _instruction: u8, _gas: U256) -> bool { - self.tracing - } + fn trace_next_instruction(&mut self, _pc: usize, _instruction: u8, _gas: U256) -> bool { + self.tracing + } } diff --git a/ethcore/wasm/run/src/fixture.rs b/ethcore/wasm/run/src/fixture.rs index 42117d6df..abc36dbf6 100644 --- a/ethcore/wasm/run/src/fixture.rs +++ b/ethcore/wasm/run/src/fixture.rs @@ -14,73 +14,75 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use ethjson::{ + bytes::Bytes, + hash::{Address, H256}, + uint::Uint, +}; use std::borrow::Cow; -use ethjson::uint::Uint; -use ethjson::hash::{Address, H256}; -use ethjson::bytes::Bytes; #[derive(Deserialize)] #[serde(untagged)] pub enum Source { - Raw(Cow<'static, String>), - Constructor { - #[serde(rename = "constructor")] - source: Cow<'static, String>, - arguments: Bytes, - sender: Address, - at: Address, - }, + Raw(Cow<'static, String>), + Constructor { + #[serde(rename = "constructor")] + source: Cow<'static, String>, + arguments: Bytes, + sender: Address, + at: Address, + }, } impl Source { - pub fn as_ref(&self) -> &str { - match *self { - Source::Raw(ref r) => r.as_ref(), - Source::Constructor { ref source, .. } => source.as_ref(), - } - } + pub fn as_ref(&self) -> &str { + match *self { + Source::Raw(ref r) => r.as_ref(), + Source::Constructor { ref source, .. } => source.as_ref(), + } + } } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct Fixture { - pub caption: Cow<'static, String>, - pub source: Source, - pub address: Option
, - pub sender: Option
, - pub value: Option, - pub gas_limit: Option, - pub payload: Option, - pub storage: Option>, - pub asserts: Vec, + pub caption: Cow<'static, String>, + pub source: Source, + pub address: Option
, + pub sender: Option
, + pub value: Option, + pub gas_limit: Option, + pub payload: Option, + pub storage: Option>, + pub asserts: Vec, } #[derive(Deserialize, Debug)] pub struct StorageEntry { - pub key: Uint, - pub value: Uint, + pub key: Uint, + pub value: Uint, } #[derive(Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] pub struct CallLocator { - pub sender: Option
, - pub receiver: Option
, - pub value: Option, - pub data: Option, - pub code_address: Option
, + pub sender: Option
, + pub receiver: Option
, + pub value: Option, + pub data: Option, + pub code_address: Option
, } #[derive(Deserialize, Debug)] pub struct StorageAssert { - pub key: H256, - pub value: H256, + pub key: H256, + pub value: H256, } #[derive(Deserialize, Debug)] pub enum Assert { - HasCall(CallLocator), - HasStorage(StorageAssert), - UsedGas(u64), - Return(Bytes), + HasCall(CallLocator), + HasStorage(StorageAssert), + UsedGas(u64), + Return(Bytes), } diff --git a/ethcore/wasm/run/src/main.rs b/ethcore/wasm/run/src/main.rs index 0773f9b42..997c964dc 100644 --- a/ethcore/wasm/run/src/main.rs +++ b/ethcore/wasm/run/src/main.rs @@ -16,47 +16,56 @@ extern crate serde; extern crate serde_json; -#[macro_use] extern crate serde_derive; +#[macro_use] +extern crate serde_derive; +extern crate clap; +extern crate env_logger; extern crate ethereum_types; extern crate ethjson; -extern crate wasm; -extern crate vm; -extern crate clap; extern crate rustc_hex; -extern crate env_logger; +extern crate vm; +extern crate wasm; mod fixture; mod runner; -use fixture::Fixture; use clap::{App, Arg}; +use fixture::Fixture; use std::fs; fn main() { - ::env_logger::init(); + ::env_logger::init(); - let matches = App::new("pwasm-run-test") - .arg(Arg::with_name("target") - .index(1) - .required(true) - .multiple(true) - .help("JSON fixture")) - .get_matches(); + let matches = App::new("pwasm-run-test") + .arg( + Arg::with_name("target") + .index(1) + .required(true) + .multiple(true) + .help("JSON fixture"), + ) + .get_matches(); - let mut exit_code = 0; + let mut exit_code = 0; - for target in matches.values_of("target").expect("No target parameter") { - let mut f = fs::File::open(target).expect("Failed to open file"); - let fixtures: Vec = serde_json::from_reader(&mut f).expect("Failed to deserialize json"); + for target in matches.values_of("target").expect("No target parameter") { + let mut f = fs::File::open(target).expect("Failed to open file"); + let fixtures: Vec = + serde_json::from_reader(&mut f).expect("Failed to deserialize json"); - for fixture in fixtures.into_iter() { - let fails = runner::run_fixture(&fixture); - for fail in fails.iter() { - exit_code = 1; - println!("Failed assert in test \"{}\" ('{}'): {}", fixture.caption.as_ref(), target, fail); - } - } - } + for fixture in fixtures.into_iter() { + let fails = runner::run_fixture(&fixture); + for fail in fails.iter() { + exit_code = 1; + println!( + "Failed assert in test \"{}\" ('{}'): {}", + fixture.caption.as_ref(), + target, + fail + ); + } + } + } - std::process::exit(exit_code); + std::process::exit(exit_code); } diff --git a/ethcore/wasm/run/src/runner.rs b/ethcore/wasm/run/src/runner.rs index a69fe6bf6..edab0732c 100644 --- a/ethcore/wasm/run/src/runner.rs +++ b/ethcore/wasm/run/src/runner.rs @@ -14,264 +14,332 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use fixture::{Fixture, Assert, CallLocator, Source}; -use wasm::WasmInterpreter; -use vm::{self, Exec, GasLeft, ActionParams, ActionValue, ParamsType}; -use vm::tests::FakeExt; -use std::io::{self, Read}; -use std::{fs, path, fmt}; -use std::sync::Arc; -use ethereum_types::{U256, H256, H160}; +use ethereum_types::{H160, H256, U256}; +use fixture::{Assert, CallLocator, Fixture, Source}; use rustc_hex::ToHex; +use std::{ + fmt, fs, + io::{self, Read}, + path, + sync::Arc, +}; +use vm::{self, tests::FakeExt, ActionParams, ActionValue, Exec, GasLeft, ParamsType}; +use wasm::WasmInterpreter; fn load_code>(p: P) -> io::Result> { - let mut result = Vec::new(); - let mut f = fs::File::open(p)?; - f.read_to_end(&mut result)?; - Ok(result) + let mut result = Vec::new(); + let mut f = fs::File::open(p)?; + f.read_to_end(&mut result)?; + Ok(result) } fn wasm_interpreter(params: ActionParams) -> Box { - Box::new(WasmInterpreter::new(params)) + Box::new(WasmInterpreter::new(params)) } #[derive(Debug)] pub enum SpecNonconformity { - Address, + Address, } #[derive(Debug)] pub enum Fail { - Return { expected: Vec, actual: Vec }, - UsedGas { expected: u64, actual: u64 }, - Runtime(String), - Load(io::Error), - NoCall(CallLocator), - StorageMismatch { key: H256, expected: H256, actual: Option }, - Nonconformity(SpecNonconformity) + Return { + expected: Vec, + actual: Vec, + }, + UsedGas { + expected: u64, + actual: u64, + }, + Runtime(String), + Load(io::Error), + NoCall(CallLocator), + StorageMismatch { + key: H256, + expected: H256, + actual: Option, + }, + Nonconformity(SpecNonconformity), } impl Fail { - fn runtime(err: vm::Error) -> Vec { - vec![Fail::Runtime(format!("{}", err))] - } + fn runtime(err: vm::Error) -> Vec { + vec![Fail::Runtime(format!("{}", err))] + } - fn load(err: io::Error) -> Vec { - vec![Fail::Load(err)] - } + fn load(err: io::Error) -> Vec { + vec![Fail::Load(err)] + } - fn nononformity(kind: SpecNonconformity) -> Vec { - vec![Fail::Nonconformity(kind)] - } + fn nononformity(kind: SpecNonconformity) -> Vec { + vec![Fail::Nonconformity(kind)] + } } impl fmt::Display for Fail { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use self::Fail::*; - match *self { - Return { ref expected, ref actual } => - write!( - f, - "Expected to return result: 0x{} ({} bytes), but got 0x{} ({} bytes)", - expected.to_hex(), - expected.len(), - actual.to_hex(), - actual.len() - ), + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::Fail::*; + match *self { + Return { + ref expected, + ref actual, + } => write!( + f, + "Expected to return result: 0x{} ({} bytes), but got 0x{} ({} bytes)", + expected.to_hex(), + expected.len(), + actual.to_hex(), + actual.len() + ), - UsedGas { expected, actual } => - write!(f, "Expected to use gas: {}, but got actual gas used: {}", expected, actual), + UsedGas { expected, actual } => write!( + f, + "Expected to use gas: {}, but got actual gas used: {}", + expected, actual + ), - Runtime(ref s) => - write!(f, "WASM Runtime error: {}", s), + Runtime(ref s) => write!(f, "WASM Runtime error: {}", s), - Load(ref e) => - write!(f, "Load i/o error: {}", e), + Load(ref e) => write!(f, "Load i/o error: {}", e), - NoCall(ref call) => - write!(f, "Call not found: {:?}", call), + NoCall(ref call) => write!(f, "Call not found: {:?}", call), - StorageMismatch { ref key, ref expected, actual: Some(ref actual)} => - write!( - f, - "Storage key {} value mismatch, expected {}, got: {}", - key.to_vec().to_hex(), - expected.to_vec().to_hex(), - actual.to_vec().to_hex(), - ), + StorageMismatch { + ref key, + ref expected, + actual: Some(ref actual), + } => write!( + f, + "Storage key {} value mismatch, expected {}, got: {}", + key.to_vec().to_hex(), + expected.to_vec().to_hex(), + actual.to_vec().to_hex(), + ), - StorageMismatch { ref key, ref expected, actual: None} => - write!( - f, - "No expected storage value for key {} found, expected {}", - key.to_vec().to_hex(), - expected.to_vec().to_hex(), - ), + StorageMismatch { + ref key, + ref expected, + actual: None, + } => write!( + f, + "No expected storage value for key {} found, expected {}", + key.to_vec().to_hex(), + expected.to_vec().to_hex(), + ), - Nonconformity(SpecNonconformity::Address) => - write!(f, "Cannot use address when constructor is specified!"), - } - } + Nonconformity(SpecNonconformity::Address) => { + write!(f, "Cannot use address when constructor is specified!") + } + } + } } pub fn construct( - ext: &mut vm::Ext, - source: Vec, - arguments: Vec, - sender: H160, - at: H160, + ext: &mut vm::Ext, + source: Vec, + arguments: Vec, + sender: H160, + at: H160, ) -> Result, vm::Error> { + let mut params = ActionParams::default(); + params.sender = sender; + params.address = at; + params.gas = U256::from(100_000_000); + params.data = Some(arguments); + params.code = Some(Arc::new(source)); + params.params_type = ParamsType::Separate; - let mut params = ActionParams::default(); - params.sender = sender; - params.address = at; - params.gas = U256::from(100_000_000); - params.data = Some(arguments); - params.code = Some(Arc::new(source)); - params.params_type = ParamsType::Separate; - - Ok( - match wasm_interpreter(params).exec(ext).ok().expect("Wasm interpreter always calls with trap=false; trap never happens; qed")? { - GasLeft::Known(_) => Vec::new(), - GasLeft::NeedsReturn { data, .. } => data.to_vec(), - } - ) + Ok( + match wasm_interpreter(params) + .exec(ext) + .ok() + .expect("Wasm interpreter always calls with trap=false; trap never happens; qed")? + { + GasLeft::Known(_) => Vec::new(), + GasLeft::NeedsReturn { data, .. } => data.to_vec(), + }, + ) } pub fn run_fixture(fixture: &Fixture) -> Vec { - let mut params = ActionParams::default(); + let mut params = ActionParams::default(); - let source = match load_code(fixture.source.as_ref()) { - Ok(code) => code, - Err(e) => { return Fail::load(e); }, - }; + let source = match load_code(fixture.source.as_ref()) { + Ok(code) => code, + Err(e) => { + return Fail::load(e); + } + }; - let mut ext = FakeExt::new().with_wasm(); - params.code = Some(Arc::new( - if let Source::Constructor { ref arguments, ref sender, ref at, .. } = fixture.source { - match construct(&mut ext, source, arguments.clone().into(), sender.clone().into(), at.clone().into()) { - Ok(code) => code, - Err(e) => { return Fail::runtime(e); } - } - } else { - source - } - )); + let mut ext = FakeExt::new().with_wasm(); + params.code = Some(Arc::new( + if let Source::Constructor { + ref arguments, + ref sender, + ref at, + .. + } = fixture.source + { + match construct( + &mut ext, + source, + arguments.clone().into(), + sender.clone().into(), + at.clone().into(), + ) { + Ok(code) => code, + Err(e) => { + return Fail::runtime(e); + } + } + } else { + source + }, + )); - if let Some(ref sender) = fixture.sender { - params.sender = sender.clone().into(); - } + if let Some(ref sender) = fixture.sender { + params.sender = sender.clone().into(); + } - if let Some(ref address) = fixture.address { - if let Source::Constructor { .. } = fixture.source { - return Fail::nononformity(SpecNonconformity::Address); - } + if let Some(ref address) = fixture.address { + if let Source::Constructor { .. } = fixture.source { + return Fail::nononformity(SpecNonconformity::Address); + } - params.address = address.clone().into(); - } else if let Source::Constructor { ref at, .. } = fixture.source { - params.address = at.clone().into(); - } + params.address = address.clone().into(); + } else if let Source::Constructor { ref at, .. } = fixture.source { + params.address = at.clone().into(); + } - if let Some(gas_limit) = fixture.gas_limit { - params.gas = U256::from(gas_limit); - } + if let Some(gas_limit) = fixture.gas_limit { + params.gas = U256::from(gas_limit); + } - if let Some(ref data) = fixture.payload { - params.data = Some(data.clone().into()) - } + if let Some(ref data) = fixture.payload { + params.data = Some(data.clone().into()) + } - if let Some(value) = fixture.value { - params.value = ActionValue::Transfer(value.clone().into()) - } + if let Some(value) = fixture.value { + params.value = ActionValue::Transfer(value.clone().into()) + } - if let Some(ref storage) = fixture.storage { - for storage_entry in storage.iter() { - let key: U256 = storage_entry.key.into(); - let val: U256 = storage_entry.value.into(); - ext.store.insert(key.into(), val.into()); - } - } + if let Some(ref storage) = fixture.storage { + for storage_entry in storage.iter() { + let key: U256 = storage_entry.key.into(); + let val: U256 = storage_entry.value.into(); + ext.store.insert(key.into(), val.into()); + } + } - let interpreter = wasm_interpreter(params); + let interpreter = wasm_interpreter(params); - let interpreter_return = match interpreter.exec(&mut ext).ok().expect("Wasm interpreter always calls with trap=false; trap never happens; qed") { - Ok(ret) => ret, - Err(e) => { return Fail::runtime(e); } - }; - let (gas_left, result) = match interpreter_return { - GasLeft::Known(gas) => { (gas, Vec::new()) }, - GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), - }; + let interpreter_return = match interpreter + .exec(&mut ext) + .ok() + .expect("Wasm interpreter always calls with trap=false; trap never happens; qed") + { + Ok(ret) => ret, + Err(e) => { + return Fail::runtime(e); + } + }; + let (gas_left, result) = match interpreter_return { + GasLeft::Known(gas) => (gas, Vec::new()), + GasLeft::NeedsReturn { + gas_left: gas, + data: result, + apply_state: _apply, + } => (gas, result.to_vec()), + }; - let mut fails = Vec::new(); + let mut fails = Vec::new(); - for assert in fixture.asserts.iter() { - match *assert { - Assert::Return(ref data) => { - if &data[..] != &result[..] { - fails.push(Fail::Return { expected: (&data[..]).to_vec(), actual: (&result[..]).to_vec() }) - } - }, - Assert::UsedGas(gas) => { - let used_gas = fixture.gas_limit.unwrap_or(0) - gas_left.low_u64(); - if gas != used_gas { - fails.push(Fail::UsedGas { expected: gas, actual: used_gas }); - } - }, - Assert::HasCall(ref locator) => { - let mut found = false; + for assert in fixture.asserts.iter() { + match *assert { + Assert::Return(ref data) => { + if &data[..] != &result[..] { + fails.push(Fail::Return { + expected: (&data[..]).to_vec(), + actual: (&result[..]).to_vec(), + }) + } + } + Assert::UsedGas(gas) => { + let used_gas = fixture.gas_limit.unwrap_or(0) - gas_left.low_u64(); + if gas != used_gas { + fails.push(Fail::UsedGas { + expected: gas, + actual: used_gas, + }); + } + } + Assert::HasCall(ref locator) => { + let mut found = false; - for fake_call in ext.calls.iter() { - let mut match_ = true; - if let Some(ref data) = locator.data { - if data.as_ref() != &fake_call.data[..] { match_ = false; } - } + for fake_call in ext.calls.iter() { + let mut match_ = true; + if let Some(ref data) = locator.data { + if data.as_ref() != &fake_call.data[..] { + match_ = false; + } + } - if let Some(ref code_addr) = locator.code_address { - if fake_call.code_address.unwrap_or(H160::zero()) != code_addr.clone().into() { match_ = false } - } + if let Some(ref code_addr) = locator.code_address { + if fake_call.code_address.unwrap_or(H160::zero()) + != code_addr.clone().into() + { + match_ = false + } + } - if let Some(ref sender) = locator.sender { - if fake_call.sender_address.unwrap_or(H160::zero()) != sender.clone().into() { match_ = false } - } + if let Some(ref sender) = locator.sender { + if fake_call.sender_address.unwrap_or(H160::zero()) != sender.clone().into() + { + match_ = false + } + } - if let Some(ref receiver) = locator.receiver { - if fake_call.receive_address.unwrap_or(H160::zero()) != receiver.clone().into() { match_ = false } - } + if let Some(ref receiver) = locator.receiver { + if fake_call.receive_address.unwrap_or(H160::zero()) + != receiver.clone().into() + { + match_ = false + } + } - if match_ { - found = true; - break; - } - } + if match_ { + found = true; + break; + } + } - if !found { - fails.push(Fail::NoCall(locator.clone())) - } - }, - Assert::HasStorage(ref storage_entry) => { - let expected_storage_key: H256 = storage_entry.key.clone().into(); - let expected_storage_value: H256 = storage_entry.value.clone().into(); - let val = ext.store.get(&expected_storage_key); + if !found { + fails.push(Fail::NoCall(locator.clone())) + } + } + Assert::HasStorage(ref storage_entry) => { + let expected_storage_key: H256 = storage_entry.key.clone().into(); + let expected_storage_value: H256 = storage_entry.value.clone().into(); + let val = ext.store.get(&expected_storage_key); - if let Some(val) = val { - if val != &expected_storage_value { - fails.push(Fail::StorageMismatch { - key: expected_storage_key, - expected: expected_storage_value, - actual: Some(val.clone()) - }) - } - } else { - fails.push(Fail::StorageMismatch { - key: expected_storage_key, - expected: expected_storage_value, - actual: None, - }) - } - - }, - } - } - fails + if let Some(val) = val { + if val != &expected_storage_value { + fails.push(Fail::StorageMismatch { + key: expected_storage_key, + expected: expected_storage_value, + actual: Some(val.clone()), + }) + } + } else { + fails.push(Fail::StorageMismatch { + key: expected_storage_key, + expected: expected_storage_value, + actual: None, + }) + } + } + } + } + fails } diff --git a/ethcore/wasm/src/env.rs b/ethcore/wasm/src/env.rs index b996ea000..64eb2e3f1 100644 --- a/ethcore/wasm/src/env.rs +++ b/ethcore/wasm/src/env.rs @@ -19,189 +19,111 @@ use std::cell::RefCell; use vm::WasmCosts; use wasmi::{ - self, Signature, Error, FuncRef, FuncInstance, MemoryDescriptor, - MemoryRef, MemoryInstance, memory_units, + self, memory_units, Error, FuncInstance, FuncRef, MemoryDescriptor, MemoryInstance, MemoryRef, + Signature, }; /// Internal ids all functions runtime supports. This is just a glue for wasmi interpreter /// that lacks high-level api and later will be factored out pub mod ids { - pub const STORAGE_WRITE_FUNC: usize = 0; - pub const STORAGE_READ_FUNC: usize = 10; - pub const RET_FUNC: usize = 20; - pub const GAS_FUNC: usize = 30; - pub const FETCH_INPUT_FUNC: usize = 40; - pub const INPUT_LENGTH_FUNC: usize = 50; - pub const CCALL_FUNC: usize = 60; - pub const SCALL_FUNC: usize = 70; - pub const DCALL_FUNC: usize = 80; - pub const VALUE_FUNC: usize = 90; - pub const CREATE_FUNC: usize = 100; - pub const SUICIDE_FUNC: usize = 110; - pub const BLOCKHASH_FUNC: usize = 120; - pub const BLOCKNUMBER_FUNC: usize = 130; - pub const COINBASE_FUNC: usize = 140; - pub const DIFFICULTY_FUNC: usize = 150; - pub const GASLIMIT_FUNC: usize = 160; - pub const TIMESTAMP_FUNC: usize = 170; - pub const ADDRESS_FUNC: usize = 180; - pub const SENDER_FUNC: usize = 190; - pub const ORIGIN_FUNC: usize = 200; - pub const ELOG_FUNC: usize = 210; - pub const CREATE2_FUNC: usize = 220; - pub const GASLEFT_FUNC: usize = 230; + pub const STORAGE_WRITE_FUNC: usize = 0; + pub const STORAGE_READ_FUNC: usize = 10; + pub const RET_FUNC: usize = 20; + pub const GAS_FUNC: usize = 30; + pub const FETCH_INPUT_FUNC: usize = 40; + pub const INPUT_LENGTH_FUNC: usize = 50; + pub const CCALL_FUNC: usize = 60; + pub const SCALL_FUNC: usize = 70; + pub const DCALL_FUNC: usize = 80; + pub const VALUE_FUNC: usize = 90; + pub const CREATE_FUNC: usize = 100; + pub const SUICIDE_FUNC: usize = 110; + pub const BLOCKHASH_FUNC: usize = 120; + pub const BLOCKNUMBER_FUNC: usize = 130; + pub const COINBASE_FUNC: usize = 140; + pub const DIFFICULTY_FUNC: usize = 150; + pub const GASLIMIT_FUNC: usize = 160; + pub const TIMESTAMP_FUNC: usize = 170; + pub const ADDRESS_FUNC: usize = 180; + pub const SENDER_FUNC: usize = 190; + pub const ORIGIN_FUNC: usize = 200; + pub const ELOG_FUNC: usize = 210; + pub const CREATE2_FUNC: usize = 220; + pub const GASLEFT_FUNC: usize = 230; - pub const PANIC_FUNC: usize = 1000; - pub const DEBUG_FUNC: usize = 1010; + pub const PANIC_FUNC: usize = 1000; + pub const DEBUG_FUNC: usize = 1010; } /// Signatures of all functions runtime supports. The actual dispatch happens at /// impl runtime::Runtime methods. pub mod signatures { - use wasmi::{self, ValueType}; - use wasmi::ValueType::*; + use wasmi::{self, ValueType, ValueType::*}; - pub struct StaticSignature(pub &'static [ValueType], pub Option); + pub struct StaticSignature(pub &'static [ValueType], pub Option); - pub const STORAGE_READ: StaticSignature = StaticSignature( - &[I32, I32], - None, - ); + pub const STORAGE_READ: StaticSignature = StaticSignature(&[I32, I32], None); - pub const STORAGE_WRITE: StaticSignature = StaticSignature( - &[I32, I32], - None, - ); + pub const STORAGE_WRITE: StaticSignature = StaticSignature(&[I32, I32], None); - pub const RET: StaticSignature = StaticSignature( - &[I32, I32], - None, - ); + pub const RET: StaticSignature = StaticSignature(&[I32, I32], None); - pub const GAS: StaticSignature = StaticSignature( - &[I32], - None, - ); + pub const GAS: StaticSignature = StaticSignature(&[I32], None); - pub const FETCH_INPUT: StaticSignature = StaticSignature( - &[I32], - None, - ); + pub const FETCH_INPUT: StaticSignature = StaticSignature(&[I32], None); - pub const INPUT_LENGTH: StaticSignature = StaticSignature( - &[], - Some(I32), - ); + pub const INPUT_LENGTH: StaticSignature = StaticSignature(&[], Some(I32)); - pub const CCALL: StaticSignature = StaticSignature( - &[I64, I32, I32, I32, I32, I32, I32], - Some(I32), - ); + pub const CCALL: StaticSignature = + StaticSignature(&[I64, I32, I32, I32, I32, I32, I32], Some(I32)); - pub const DCALL: StaticSignature = StaticSignature( - &[I64, I32, I32, I32, I32, I32], - Some(I32), - ); + pub const DCALL: StaticSignature = StaticSignature(&[I64, I32, I32, I32, I32, I32], Some(I32)); - pub const SCALL: StaticSignature = StaticSignature( - &[I64, I32, I32, I32, I32, I32], - Some(I32), - ); + pub const SCALL: StaticSignature = StaticSignature(&[I64, I32, I32, I32, I32, I32], Some(I32)); - pub const PANIC: StaticSignature = StaticSignature( - &[I32, I32], - None, - ); + pub const PANIC: StaticSignature = StaticSignature(&[I32, I32], None); - pub const DEBUG: StaticSignature = StaticSignature( - &[I32, I32], - None, - ); + pub const DEBUG: StaticSignature = StaticSignature(&[I32, I32], None); - pub const VALUE: StaticSignature = StaticSignature( - &[I32], - None, - ); + pub const VALUE: StaticSignature = StaticSignature(&[I32], None); - pub const CREATE: StaticSignature = StaticSignature( - &[I32, I32, I32, I32], - Some(I32), - ); + pub const CREATE: StaticSignature = StaticSignature(&[I32, I32, I32, I32], Some(I32)); - pub const CREATE2: StaticSignature = StaticSignature( - &[I32, I32, I32, I32, I32], - Some(I32), - ); + pub const CREATE2: StaticSignature = StaticSignature(&[I32, I32, I32, I32, I32], Some(I32)); - pub const SUICIDE: StaticSignature = StaticSignature( - &[I32], - None, - ); + pub const SUICIDE: StaticSignature = StaticSignature(&[I32], None); - pub const BLOCKHASH: StaticSignature = StaticSignature( - &[I64, I32], - None, - ); + pub const BLOCKHASH: StaticSignature = StaticSignature(&[I64, I32], None); - pub const BLOCKNUMBER: StaticSignature = StaticSignature( - &[], - Some(I64), - ); + pub const BLOCKNUMBER: StaticSignature = StaticSignature(&[], Some(I64)); - pub const COINBASE: StaticSignature = StaticSignature( - &[I32], - None, - ); + pub const COINBASE: StaticSignature = StaticSignature(&[I32], None); - pub const DIFFICULTY: StaticSignature = StaticSignature( - &[I32], - None, - ); + pub const DIFFICULTY: StaticSignature = StaticSignature(&[I32], None); - pub const GASLEFT: StaticSignature = StaticSignature( - &[], - Some(I64), - ); + pub const GASLEFT: StaticSignature = StaticSignature(&[], Some(I64)); - pub const GASLIMIT: StaticSignature = StaticSignature( - &[I32], - None, - ); + pub const GASLIMIT: StaticSignature = StaticSignature(&[I32], None); - pub const TIMESTAMP: StaticSignature = StaticSignature( - &[], - Some(I64), - ); + pub const TIMESTAMP: StaticSignature = StaticSignature(&[], Some(I64)); - pub const ADDRESS: StaticSignature = StaticSignature( - &[I32], - None, - ); + pub const ADDRESS: StaticSignature = StaticSignature(&[I32], None); - pub const SENDER: StaticSignature = StaticSignature( - &[I32], - None, - ); + pub const SENDER: StaticSignature = StaticSignature(&[I32], None); - pub const ORIGIN: StaticSignature = StaticSignature( - &[I32], - None, - ); + pub const ORIGIN: StaticSignature = StaticSignature(&[I32], None); - pub const ELOG: StaticSignature = StaticSignature( - &[I32, I32, I32, I32], - None, - ); + pub const ELOG: StaticSignature = StaticSignature(&[I32, I32, I32, I32], None); - impl Into for StaticSignature { - fn into(self) -> wasmi::Signature { - wasmi::Signature::new(self.0, self.1) - } - } + impl Into for StaticSignature { + fn into(self) -> wasmi::Signature { + wasmi::Signature::new(self.0, self.1) + } + } } fn host(signature: signatures::StaticSignature, idx: usize) -> FuncRef { - FuncInstance::alloc_host(signature.into(), idx) + FuncInstance::alloc_host(signature.into(), idx) } /// Import resolver for wasmi @@ -209,110 +131,117 @@ fn host(signature: signatures::StaticSignature, idx: usize) -> FuncRef { /// entries. /// Also manages initial memory request from the runtime. pub struct ImportResolver { - max_memory: u32, - memory: RefCell>, + max_memory: u32, + memory: RefCell>, - have_create2: bool, - have_gasleft: bool, + have_create2: bool, + have_gasleft: bool, } impl ImportResolver { - /// New import resolver with specifed maximum amount of inital memory (in wasm pages = 64kb) - pub fn with_limit(max_memory: u32, schedule: &WasmCosts) -> ImportResolver { - ImportResolver { - max_memory: max_memory, - memory: RefCell::new(None), + /// New import resolver with specifed maximum amount of inital memory (in wasm pages = 64kb) + pub fn with_limit(max_memory: u32, schedule: &WasmCosts) -> ImportResolver { + ImportResolver { + max_memory: max_memory, + memory: RefCell::new(None), - have_create2: schedule.have_create2, - have_gasleft: schedule.have_gasleft, - } - } + have_create2: schedule.have_create2, + have_gasleft: schedule.have_gasleft, + } + } - /// Returns memory that was instantiated during the contract module - /// start. If contract does not use memory at all, the dummy memory of length (0, 0) - /// will be created instead. So this method always returns memory instance - /// unless errored. - pub fn memory_ref(&self) -> MemoryRef { - { - let mut mem_ref = self.memory.borrow_mut(); - if mem_ref.is_none() { - *mem_ref = Some( - MemoryInstance::alloc( - memory_units::Pages(0), - Some(memory_units::Pages(0)), - ).expect("Memory allocation (0, 0) should not fail; qed") - ); - } - } + /// Returns memory that was instantiated during the contract module + /// start. If contract does not use memory at all, the dummy memory of length (0, 0) + /// will be created instead. So this method always returns memory instance + /// unless errored. + pub fn memory_ref(&self) -> MemoryRef { + { + let mut mem_ref = self.memory.borrow_mut(); + if mem_ref.is_none() { + *mem_ref = Some( + MemoryInstance::alloc(memory_units::Pages(0), Some(memory_units::Pages(0))) + .expect("Memory allocation (0, 0) should not fail; qed"), + ); + } + } - self.memory.borrow().clone().expect("it is either existed or was created as (0, 0) above; qed") - } + self.memory + .borrow() + .clone() + .expect("it is either existed or was created as (0, 0) above; qed") + } - /// Returns memory size module initially requested - pub fn memory_size(&self) -> Result { - Ok(self.memory_ref().current_size().0 as u32) - } + /// Returns memory size module initially requested + pub fn memory_size(&self) -> Result { + Ok(self.memory_ref().current_size().0 as u32) + } } impl wasmi::ModuleImportResolver for ImportResolver { - fn resolve_func(&self, field_name: &str, _signature: &Signature) -> Result { - let func_ref = match field_name { - "storage_read" => host(signatures::STORAGE_READ, ids::STORAGE_READ_FUNC), - "storage_write" => host(signatures::STORAGE_WRITE, ids::STORAGE_WRITE_FUNC), - "ret" => host(signatures::RET, ids::RET_FUNC), - "gas" => host(signatures::GAS, ids::GAS_FUNC), - "input_length" => host(signatures::INPUT_LENGTH, ids::INPUT_LENGTH_FUNC), - "fetch_input" => host(signatures::FETCH_INPUT, ids::FETCH_INPUT_FUNC), - "panic" => host(signatures::PANIC, ids::PANIC_FUNC), - "debug" => host(signatures::DEBUG, ids::DEBUG_FUNC), - "ccall" => host(signatures::CCALL, ids::CCALL_FUNC), - "dcall" => host(signatures::DCALL, ids::DCALL_FUNC), - "scall" => host(signatures::SCALL, ids::SCALL_FUNC), - "value" => host(signatures::VALUE, ids::VALUE_FUNC), - "create" => host(signatures::CREATE, ids::CREATE_FUNC), - "suicide" => host(signatures::SUICIDE, ids::SUICIDE_FUNC), - "blockhash" => host(signatures::BLOCKHASH, ids::BLOCKHASH_FUNC), - "blocknumber" => host(signatures::BLOCKNUMBER, ids::BLOCKNUMBER_FUNC), - "coinbase" => host(signatures::COINBASE, ids::COINBASE_FUNC), - "difficulty" => host(signatures::DIFFICULTY, ids::DIFFICULTY_FUNC), - "gaslimit" => host(signatures::GASLIMIT, ids::GASLIMIT_FUNC), - "timestamp" => host(signatures::TIMESTAMP, ids::TIMESTAMP_FUNC), - "address" => host(signatures::ADDRESS, ids::ADDRESS_FUNC), - "sender" => host(signatures::SENDER, ids::SENDER_FUNC), - "origin" => host(signatures::ORIGIN, ids::ORIGIN_FUNC), - "elog" => host(signatures::ELOG, ids::ELOG_FUNC), - "create2" if self.have_create2 => host(signatures::CREATE2, ids::CREATE2_FUNC), - "gasleft" if self.have_gasleft => host(signatures::GASLEFT, ids::GASLEFT_FUNC), - _ => { - return Err(wasmi::Error::Instantiation( - format!("Export {} not found", field_name), - )) - } - }; + fn resolve_func(&self, field_name: &str, _signature: &Signature) -> Result { + let func_ref = match field_name { + "storage_read" => host(signatures::STORAGE_READ, ids::STORAGE_READ_FUNC), + "storage_write" => host(signatures::STORAGE_WRITE, ids::STORAGE_WRITE_FUNC), + "ret" => host(signatures::RET, ids::RET_FUNC), + "gas" => host(signatures::GAS, ids::GAS_FUNC), + "input_length" => host(signatures::INPUT_LENGTH, ids::INPUT_LENGTH_FUNC), + "fetch_input" => host(signatures::FETCH_INPUT, ids::FETCH_INPUT_FUNC), + "panic" => host(signatures::PANIC, ids::PANIC_FUNC), + "debug" => host(signatures::DEBUG, ids::DEBUG_FUNC), + "ccall" => host(signatures::CCALL, ids::CCALL_FUNC), + "dcall" => host(signatures::DCALL, ids::DCALL_FUNC), + "scall" => host(signatures::SCALL, ids::SCALL_FUNC), + "value" => host(signatures::VALUE, ids::VALUE_FUNC), + "create" => host(signatures::CREATE, ids::CREATE_FUNC), + "suicide" => host(signatures::SUICIDE, ids::SUICIDE_FUNC), + "blockhash" => host(signatures::BLOCKHASH, ids::BLOCKHASH_FUNC), + "blocknumber" => host(signatures::BLOCKNUMBER, ids::BLOCKNUMBER_FUNC), + "coinbase" => host(signatures::COINBASE, ids::COINBASE_FUNC), + "difficulty" => host(signatures::DIFFICULTY, ids::DIFFICULTY_FUNC), + "gaslimit" => host(signatures::GASLIMIT, ids::GASLIMIT_FUNC), + "timestamp" => host(signatures::TIMESTAMP, ids::TIMESTAMP_FUNC), + "address" => host(signatures::ADDRESS, ids::ADDRESS_FUNC), + "sender" => host(signatures::SENDER, ids::SENDER_FUNC), + "origin" => host(signatures::ORIGIN, ids::ORIGIN_FUNC), + "elog" => host(signatures::ELOG, ids::ELOG_FUNC), + "create2" if self.have_create2 => host(signatures::CREATE2, ids::CREATE2_FUNC), + "gasleft" if self.have_gasleft => host(signatures::GASLEFT, ids::GASLEFT_FUNC), + _ => { + return Err(wasmi::Error::Instantiation(format!( + "Export {} not found", + field_name + ))) + } + }; - Ok(func_ref) - } + Ok(func_ref) + } - fn resolve_memory( - &self, - field_name: &str, - descriptor: &MemoryDescriptor, - ) -> Result { - if field_name == "memory" { - let effective_max = descriptor.maximum().unwrap_or(self.max_memory + 1); - if descriptor.initial() > self.max_memory || effective_max > self.max_memory - { - Err(Error::Instantiation("Module requested too much memory".to_owned())) - } else { - let mem = MemoryInstance::alloc( - memory_units::Pages(descriptor.initial() as usize), - descriptor.maximum().map(|x| memory_units::Pages(x as usize)), - )?; - *self.memory.borrow_mut() = Some(mem.clone()); - Ok(mem) - } - } else { - Err(Error::Instantiation("Memory imported under unknown name".to_owned())) - } - } + fn resolve_memory( + &self, + field_name: &str, + descriptor: &MemoryDescriptor, + ) -> Result { + if field_name == "memory" { + let effective_max = descriptor.maximum().unwrap_or(self.max_memory + 1); + if descriptor.initial() > self.max_memory || effective_max > self.max_memory { + Err(Error::Instantiation( + "Module requested too much memory".to_owned(), + )) + } else { + let mem = MemoryInstance::alloc( + memory_units::Pages(descriptor.initial() as usize), + descriptor + .maximum() + .map(|x| memory_units::Pages(x as usize)), + )?; + *self.memory.borrow_mut() = Some(mem.clone()); + Ok(mem) + } + } else { + Err(Error::Instantiation( + "Memory imported under unknown name".to_owned(), + )) + } + } } diff --git a/ethcore/wasm/src/lib.rs b/ethcore/wasm/src/lib.rs index 1e6129ba1..9efb16c96 100644 --- a/ethcore/wasm/src/lib.rs +++ b/ethcore/wasm/src/lib.rs @@ -18,11 +18,12 @@ extern crate byteorder; extern crate ethereum_types; -#[macro_use] extern crate log; +#[macro_use] +extern crate log; extern crate libc; extern crate parity_wasm; -extern crate vm; extern crate pwasm_utils as wasm_utils; +extern crate vm; extern crate wasmi; #[cfg(test)] @@ -36,8 +37,7 @@ mod runtime; #[cfg(test)] mod tests; - -use vm::{GasLeft, ReturnData, ActionParams}; +use vm::{ActionParams, GasLeft, ReturnData}; use wasmi::{Error as InterpreterError, Trap}; use runtime::{Runtime, RuntimeContext}; @@ -47,155 +47,164 @@ use ethereum_types::U256; /// Wrapped interpreter error #[derive(Debug)] pub enum Error { - Interpreter(InterpreterError), - Trap(Trap), + Interpreter(InterpreterError), + Trap(Trap), } impl From for Error { - fn from(e: InterpreterError) -> Self { - Error::Interpreter(e) - } + fn from(e: InterpreterError) -> Self { + Error::Interpreter(e) + } } impl From for Error { - fn from(e: Trap) -> Self { - Error::Trap(e) - } + fn from(e: Trap) -> Self { + Error::Trap(e) + } } impl From for vm::Error { - fn from(e: Error) -> Self { - match e { - Error::Interpreter(e) => vm::Error::Wasm(format!("Wasm runtime error: {:?}", e)), - Error::Trap(e) => vm::Error::Wasm(format!("Wasm contract trap: {:?}", e)), - } - } + fn from(e: Error) -> Self { + match e { + Error::Interpreter(e) => vm::Error::Wasm(format!("Wasm runtime error: {:?}", e)), + Error::Trap(e) => vm::Error::Wasm(format!("Wasm contract trap: {:?}", e)), + } + } } /// Wasm interpreter instance pub struct WasmInterpreter { - params: ActionParams, + params: ActionParams, } impl WasmInterpreter { - pub fn new(params: ActionParams) -> Self { - WasmInterpreter { params } - } + pub fn new(params: ActionParams) -> Self { + WasmInterpreter { params } + } } impl From for vm::Error { - fn from(e: runtime::Error) -> Self { - vm::Error::Wasm(format!("Wasm runtime error: {:?}", e)) - } + fn from(e: runtime::Error) -> Self { + vm::Error::Wasm(format!("Wasm runtime error: {:?}", e)) + } } enum ExecutionOutcome { - Suicide, - Return, - NotSpecial, + Suicide, + Return, + NotSpecial, } impl WasmInterpreter { - pub fn run(self: Box, ext: &mut vm::Ext) -> vm::Result { - let (module, data) = parser::payload(&self.params, ext.schedule().wasm())?; + pub fn run(self: Box, ext: &mut vm::Ext) -> vm::Result { + let (module, data) = parser::payload(&self.params, ext.schedule().wasm())?; - let loaded_module = wasmi::Module::from_parity_wasm_module(module).map_err(Error::Interpreter)?; + let loaded_module = + wasmi::Module::from_parity_wasm_module(module).map_err(Error::Interpreter)?; - let instantiation_resolver = env::ImportResolver::with_limit(16, ext.schedule().wasm()); + let instantiation_resolver = env::ImportResolver::with_limit(16, ext.schedule().wasm()); - let module_instance = wasmi::ModuleInstance::new( - &loaded_module, - &wasmi::ImportsBuilder::new().with_resolver("env", &instantiation_resolver) - ).map_err(Error::Interpreter)?; + let module_instance = wasmi::ModuleInstance::new( + &loaded_module, + &wasmi::ImportsBuilder::new().with_resolver("env", &instantiation_resolver), + ) + .map_err(Error::Interpreter)?; - let adjusted_gas = self.params.gas * U256::from(ext.schedule().wasm().opcodes_div) / - U256::from(ext.schedule().wasm().opcodes_mul); + let adjusted_gas = self.params.gas * U256::from(ext.schedule().wasm().opcodes_div) + / U256::from(ext.schedule().wasm().opcodes_mul); - if adjusted_gas > ::std::u64::MAX.into() - { - return Err(vm::Error::Wasm("Wasm interpreter cannot run contracts with gas (wasm adjusted) >= 2^64".to_owned())); - } + if adjusted_gas > ::std::u64::MAX.into() { + return Err(vm::Error::Wasm( + "Wasm interpreter cannot run contracts with gas (wasm adjusted) >= 2^64".to_owned(), + )); + } - let initial_memory = instantiation_resolver.memory_size().map_err(Error::Interpreter)?; - trace!(target: "wasm", "Contract requested {:?} pages of initial memory", initial_memory); + let initial_memory = instantiation_resolver + .memory_size() + .map_err(Error::Interpreter)?; + trace!(target: "wasm", "Contract requested {:?} pages of initial memory", initial_memory); - let (gas_left, result) = { - let mut runtime = Runtime::with_params( - ext, - instantiation_resolver.memory_ref(), - // cannot overflow, checked above - adjusted_gas.low_u64(), - data.to_vec(), - RuntimeContext { - address: self.params.address, - sender: self.params.sender, - origin: self.params.origin, - code_address: self.params.code_address, - value: self.params.value.value(), - }, - ); + let (gas_left, result) = { + let mut runtime = Runtime::with_params( + ext, + instantiation_resolver.memory_ref(), + // cannot overflow, checked above + adjusted_gas.low_u64(), + data.to_vec(), + RuntimeContext { + address: self.params.address, + sender: self.params.sender, + origin: self.params.origin, + code_address: self.params.code_address, + value: self.params.value.value(), + }, + ); - // cannot overflow if static_region < 2^16, - // initial_memory ∈ [0..2^32) - // total_charge <- static_region * 2^32 * 2^16 - // total_charge ∈ [0..2^64) if static_region ∈ [0..2^16) - // qed - assert!(runtime.schedule().wasm().initial_mem < 1 << 16); - runtime.charge(|s| initial_memory as u64 * s.wasm().initial_mem as u64)?; + // cannot overflow if static_region < 2^16, + // initial_memory ∈ [0..2^32) + // total_charge <- static_region * 2^32 * 2^16 + // total_charge ∈ [0..2^64) if static_region ∈ [0..2^16) + // qed + assert!(runtime.schedule().wasm().initial_mem < 1 << 16); + runtime.charge(|s| initial_memory as u64 * s.wasm().initial_mem as u64)?; - let module_instance = module_instance.run_start(&mut runtime).map_err(Error::Trap)?; + let module_instance = module_instance + .run_start(&mut runtime) + .map_err(Error::Trap)?; - let invoke_result = module_instance.invoke_export("call", &[], &mut runtime); + let invoke_result = module_instance.invoke_export("call", &[], &mut runtime); - let mut execution_outcome = ExecutionOutcome::NotSpecial; - if let Err(InterpreterError::Trap(ref trap)) = invoke_result { - if let wasmi::TrapKind::Host(ref boxed) = *trap.kind() { - let ref runtime_err = boxed.downcast_ref::() - .expect("Host errors other than runtime::Error never produced; qed"); + let mut execution_outcome = ExecutionOutcome::NotSpecial; + if let Err(InterpreterError::Trap(ref trap)) = invoke_result { + if let wasmi::TrapKind::Host(ref boxed) = *trap.kind() { + let ref runtime_err = boxed + .downcast_ref::() + .expect("Host errors other than runtime::Error never produced; qed"); - match **runtime_err { - runtime::Error::Suicide => { execution_outcome = ExecutionOutcome::Suicide; }, - runtime::Error::Return => { execution_outcome = ExecutionOutcome::Return; }, - _ => {} - } - } - } + match **runtime_err { + runtime::Error::Suicide => { + execution_outcome = ExecutionOutcome::Suicide; + } + runtime::Error::Return => { + execution_outcome = ExecutionOutcome::Return; + } + _ => {} + } + } + } - if let (ExecutionOutcome::NotSpecial, Err(e)) = (execution_outcome, invoke_result) { - trace!(target: "wasm", "Error executing contract: {:?}", e); - return Err(vm::Error::from(Error::from(e))); - } + if let (ExecutionOutcome::NotSpecial, Err(e)) = (execution_outcome, invoke_result) { + trace!(target: "wasm", "Error executing contract: {:?}", e); + return Err(vm::Error::from(Error::from(e))); + } - ( - runtime.gas_left().expect("Cannot fail since it was not updated since last charge"), - runtime.into_result(), - ) - }; + ( + runtime + .gas_left() + .expect("Cannot fail since it was not updated since last charge"), + runtime.into_result(), + ) + }; - let gas_left = - U256::from(gas_left) * U256::from(ext.schedule().wasm().opcodes_mul) - / U256::from(ext.schedule().wasm().opcodes_div); + let gas_left = U256::from(gas_left) * U256::from(ext.schedule().wasm().opcodes_mul) + / U256::from(ext.schedule().wasm().opcodes_div); - if result.is_empty() { - trace!(target: "wasm", "Contract execution result is empty."); - Ok(GasLeft::Known(gas_left)) - } else { - let len = result.len(); - Ok(GasLeft::NeedsReturn { - gas_left: gas_left, - data: ReturnData::new( - result, - 0, - len, - ), - apply_state: true, - }) - } - } + if result.is_empty() { + trace!(target: "wasm", "Contract execution result is empty."); + Ok(GasLeft::Known(gas_left)) + } else { + let len = result.len(); + Ok(GasLeft::NeedsReturn { + gas_left: gas_left, + data: ReturnData::new(result, 0, len), + apply_state: true, + }) + } + } } impl vm::Exec for WasmInterpreter { - fn exec(self: Box, ext: &mut vm::Ext) -> vm::ExecTrapResult { - Ok(self.run(ext)) - } + fn exec(self: Box, ext: &mut vm::Ext) -> vm::ExecTrapResult { + Ok(self.run(ext)) + } } diff --git a/ethcore/wasm/src/panic_payload.rs b/ethcore/wasm/src/panic_payload.rs index a484daf71..d962aea24 100644 --- a/ethcore/wasm/src/panic_payload.rs +++ b/ethcore/wasm/src/panic_payload.rs @@ -19,150 +19,150 @@ use std::io::{self, Read}; #[derive(Debug, PartialEq, Eq)] pub struct PanicPayload { - pub msg: Option, - pub file: Option, - pub line: Option, - pub col: Option, + pub msg: Option, + pub file: Option, + pub line: Option, + pub col: Option, } fn read_string(rdr: &mut io::Cursor<&[u8]>) -> io::Result> { - let string_len = rdr.read_u32::()?; - let string = if string_len == 0 { - None - } else { - let mut content = vec![0; string_len as usize]; - rdr.read_exact(&mut content)?; - Some(String::from_utf8_lossy(&content).into_owned()) - }; - Ok(string) + let string_len = rdr.read_u32::()?; + let string = if string_len == 0 { + None + } else { + let mut content = vec![0; string_len as usize]; + rdr.read_exact(&mut content)?; + Some(String::from_utf8_lossy(&content).into_owned()) + }; + Ok(string) } pub fn decode(raw: &[u8]) -> PanicPayload { - let mut rdr = io::Cursor::new(raw); - let msg = read_string(&mut rdr).ok().and_then(|x| x); - let file = read_string(&mut rdr).ok().and_then(|x| x); - let line = rdr.read_u32::().ok(); - let col = rdr.read_u32::().ok(); - PanicPayload { - msg: msg, - file: file, - line: line, - col: col, - } + let mut rdr = io::Cursor::new(raw); + let msg = read_string(&mut rdr).ok().and_then(|x| x); + let file = read_string(&mut rdr).ok().and_then(|x| x); + let line = rdr.read_u32::().ok(); + let col = rdr.read_u32::().ok(); + PanicPayload { + msg: msg, + file: file, + line: line, + col: col, + } } #[cfg(test)] mod tests { - use super::*; - use byteorder::WriteBytesExt; + use super::*; + use byteorder::WriteBytesExt; - fn write_u32(payload: &mut Vec, val: u32) { - payload.write_u32::(val).unwrap(); - } + fn write_u32(payload: &mut Vec, val: u32) { + payload.write_u32::(val).unwrap(); + } - fn write_bytes(payload: &mut Vec, bytes: &[u8]) { - write_u32(payload, bytes.len() as u32); - payload.extend(bytes); - } + fn write_bytes(payload: &mut Vec, bytes: &[u8]) { + write_u32(payload, bytes.len() as u32); + payload.extend(bytes); + } - #[test] - fn it_works() { - let mut raw = Vec::new(); - write_bytes(&mut raw, b"msg"); - write_bytes(&mut raw, b"file"); - write_u32(&mut raw, 1); - write_u32(&mut raw, 2); + #[test] + fn it_works() { + let mut raw = Vec::new(); + write_bytes(&mut raw, b"msg"); + write_bytes(&mut raw, b"file"); + write_u32(&mut raw, 1); + write_u32(&mut raw, 2); - let payload = decode(&raw); + let payload = decode(&raw); - assert_eq!( - payload, - PanicPayload { - msg: Some("msg".to_string()), - file: Some("file".to_string()), - line: Some(1), - col: Some(2), - } - ); - } + assert_eq!( + payload, + PanicPayload { + msg: Some("msg".to_string()), + file: Some("file".to_string()), + line: Some(1), + col: Some(2), + } + ); + } - #[test] - fn only_msg() { - let mut raw = Vec::new(); - write_bytes(&mut raw, b"msg"); + #[test] + fn only_msg() { + let mut raw = Vec::new(); + write_bytes(&mut raw, b"msg"); - let payload = decode(&raw); + let payload = decode(&raw); - assert_eq!( - payload, - PanicPayload { - msg: Some("msg".to_string()), - file: None, - line: None, - col: None, - } - ); - } + assert_eq!( + payload, + PanicPayload { + msg: Some("msg".to_string()), + file: None, + line: None, + col: None, + } + ); + } - #[test] - fn invalid_utf8() { - let mut raw = Vec::new(); - write_bytes(&mut raw, b"\xF0\x90\x80msg"); - write_bytes(&mut raw, b"file"); - write_u32(&mut raw, 1); - write_u32(&mut raw, 2); + #[test] + fn invalid_utf8() { + let mut raw = Vec::new(); + write_bytes(&mut raw, b"\xF0\x90\x80msg"); + write_bytes(&mut raw, b"file"); + write_u32(&mut raw, 1); + write_u32(&mut raw, 2); - let payload = decode(&raw); + let payload = decode(&raw); - assert_eq!( - payload, - PanicPayload { - msg: Some("�msg".to_string()), - file: Some("file".to_string()), - line: Some(1), - col: Some(2), - } - ); - } + assert_eq!( + payload, + PanicPayload { + msg: Some("�msg".to_string()), + file: Some("file".to_string()), + line: Some(1), + col: Some(2), + } + ); + } - #[test] - fn trailing_data() { - let mut raw = Vec::new(); - write_bytes(&mut raw, b"msg"); - write_bytes(&mut raw, b"file"); - write_u32(&mut raw, 1); - write_u32(&mut raw, 2); - write_u32(&mut raw, 0xdeadbeef); + #[test] + fn trailing_data() { + let mut raw = Vec::new(); + write_bytes(&mut raw, b"msg"); + write_bytes(&mut raw, b"file"); + write_u32(&mut raw, 1); + write_u32(&mut raw, 2); + write_u32(&mut raw, 0xdeadbeef); - let payload = decode(&raw); + let payload = decode(&raw); - assert_eq!( - payload, - PanicPayload { - msg: Some("msg".to_string()), - file: Some("file".to_string()), - line: Some(1), - col: Some(2), - } - ); - } + assert_eq!( + payload, + PanicPayload { + msg: Some("msg".to_string()), + file: Some("file".to_string()), + line: Some(1), + col: Some(2), + } + ); + } - #[test] - fn empty_str_is_none() { - let mut raw = Vec::new(); - write_bytes(&mut raw, b"msg"); - write_bytes(&mut raw, b""); + #[test] + fn empty_str_is_none() { + let mut raw = Vec::new(); + write_bytes(&mut raw, b"msg"); + write_bytes(&mut raw, b""); - let payload = decode(&raw); + let payload = decode(&raw); - assert_eq!( - payload, - PanicPayload { - msg: Some("msg".to_string()), - file: None, - line: None, - col: None, - } - ); - } + assert_eq!( + payload, + PanicPayload { + msg: Some("msg".to_string()), + file: None, + line: None, + col: None, + } + ); + } } diff --git a/ethcore/wasm/src/parser.rs b/ethcore/wasm/src/parser.rs index ca730e718..c816cafda 100644 --- a/ethcore/wasm/src/parser.rs +++ b/ethcore/wasm/src/parser.rs @@ -16,83 +16,95 @@ //! ActionParams parser for wasm +use parity_wasm::{ + elements::{self, Deserialize}, + peek_size, +}; use vm; use wasm_utils::{self, rules}; -use parity_wasm::elements::{self, Deserialize}; -use parity_wasm::peek_size; fn gas_rules(wasm_costs: &vm::WasmCosts) -> rules::Set { - rules::Set::new( - wasm_costs.regular, - { - let mut vals = ::std::collections::BTreeMap::new(); - vals.insert(rules::InstructionType::Load, rules::Metering::Fixed(wasm_costs.mem as u32)); - vals.insert(rules::InstructionType::Store, rules::Metering::Fixed(wasm_costs.mem as u32)); - vals.insert(rules::InstructionType::Div, rules::Metering::Fixed(wasm_costs.div as u32)); - vals.insert(rules::InstructionType::Mul, rules::Metering::Fixed(wasm_costs.mul as u32)); - vals - }) - .with_grow_cost(wasm_costs.grow_mem) - .with_forbidden_floats() + rules::Set::new(wasm_costs.regular, { + let mut vals = ::std::collections::BTreeMap::new(); + vals.insert( + rules::InstructionType::Load, + rules::Metering::Fixed(wasm_costs.mem as u32), + ); + vals.insert( + rules::InstructionType::Store, + rules::Metering::Fixed(wasm_costs.mem as u32), + ); + vals.insert( + rules::InstructionType::Div, + rules::Metering::Fixed(wasm_costs.div as u32), + ); + vals.insert( + rules::InstructionType::Mul, + rules::Metering::Fixed(wasm_costs.mul as u32), + ); + vals + }) + .with_grow_cost(wasm_costs.grow_mem) + .with_forbidden_floats() } /// Splits payload to code and data according to params.params_type, also /// loads the module instance from payload and injects gas counter according /// to schedule. -pub fn payload<'a>(params: &'a vm::ActionParams, wasm_costs: &vm::WasmCosts) - -> Result<(elements::Module, &'a [u8]), vm::Error> -{ - let code = match params.code { - Some(ref code) => &code[..], - None => { return Err(vm::Error::Wasm("Invalid wasm call".to_owned())); } - }; +pub fn payload<'a>( + params: &'a vm::ActionParams, + wasm_costs: &vm::WasmCosts, +) -> Result<(elements::Module, &'a [u8]), vm::Error> { + let code = match params.code { + Some(ref code) => &code[..], + None => { + return Err(vm::Error::Wasm("Invalid wasm call".to_owned())); + } + }; - let (mut cursor, data_position) = match params.params_type { - vm::ParamsType::Embedded => { - let module_size = peek_size(&*code); - ( - ::std::io::Cursor::new(&code[..module_size]), - module_size - ) - }, - vm::ParamsType::Separate => { - (::std::io::Cursor::new(&code[..]), 0) - }, - }; + let (mut cursor, data_position) = match params.params_type { + vm::ParamsType::Embedded => { + let module_size = peek_size(&*code); + (::std::io::Cursor::new(&code[..module_size]), module_size) + } + vm::ParamsType::Separate => (::std::io::Cursor::new(&code[..]), 0), + }; - let deserialized_module = elements::Module::deserialize( - &mut cursor - ).map_err(|err| { - vm::Error::Wasm(format!("Error deserializing contract code ({:?})", err)) - })?; + let deserialized_module = elements::Module::deserialize(&mut cursor) + .map_err(|err| vm::Error::Wasm(format!("Error deserializing contract code ({:?})", err)))?; - if deserialized_module.memory_section().map_or(false, |ms| ms.entries().len() > 0) { - // According to WebAssembly spec, internal memory is hidden from embedder and should not - // be interacted with. So we disable this kind of modules at decoding level. - return Err(vm::Error::Wasm(format!("Malformed wasm module: internal memory"))); - } + if deserialized_module + .memory_section() + .map_or(false, |ms| ms.entries().len() > 0) + { + // According to WebAssembly spec, internal memory is hidden from embedder and should not + // be interacted with. So we disable this kind of modules at decoding level. + return Err(vm::Error::Wasm(format!( + "Malformed wasm module: internal memory" + ))); + } - let contract_module = wasm_utils::inject_gas_counter( - deserialized_module, - &gas_rules(wasm_costs), - ).map_err(|_| vm::Error::Wasm(format!("Wasm contract error: bytecode invalid")))?; + let contract_module = + wasm_utils::inject_gas_counter(deserialized_module, &gas_rules(wasm_costs)) + .map_err(|_| vm::Error::Wasm(format!("Wasm contract error: bytecode invalid")))?; - let contract_module = wasm_utils::stack_height::inject_limiter( - contract_module, - wasm_costs.max_stack_height, - ).map_err(|_| vm::Error::Wasm(format!("Wasm contract error: stack limiter failure")))?; + let contract_module = + wasm_utils::stack_height::inject_limiter(contract_module, wasm_costs.max_stack_height) + .map_err(|_| vm::Error::Wasm(format!("Wasm contract error: stack limiter failure")))?; - let data = match params.params_type { - vm::ParamsType::Embedded => { - if data_position < code.len() { &code[data_position..] } else { &[] } - }, - vm::ParamsType::Separate => { - match params.data { - Some(ref s) => &s[..], - None => &[] - } - } - }; + let data = match params.params_type { + vm::ParamsType::Embedded => { + if data_position < code.len() { + &code[data_position..] + } else { + &[] + } + } + vm::ParamsType::Separate => match params.data { + Some(ref s) => &s[..], + None => &[], + }, + }; - Ok((contract_module, data)) + Ok((contract_module, data)) } diff --git a/ethcore/wasm/src/runtime.rs b/ethcore/wasm/src/runtime.rs index 8466c3b8d..1ea7f6eee 100644 --- a/ethcore/wasm/src/runtime.rs +++ b/ethcore/wasm/src/runtime.rs @@ -14,788 +14,848 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::cmp; -use ethereum_types::{U256, H256, Address}; -use vm::{self, CallType}; -use wasmi::{self, MemoryRef, RuntimeArgs, RuntimeValue, Error as InterpreterError, Trap, TrapKind}; use super::panic_payload; +use ethereum_types::{Address, H256, U256}; +use std::cmp; +use vm::{self, CallType}; +use wasmi::{ + self, Error as InterpreterError, MemoryRef, RuntimeArgs, RuntimeValue, Trap, TrapKind, +}; pub struct RuntimeContext { - pub address: Address, - pub sender: Address, - pub origin: Address, - pub code_address: Address, - pub value: U256, + pub address: Address, + pub sender: Address, + pub origin: Address, + pub code_address: Address, + pub value: U256, } pub struct Runtime<'a> { - gas_counter: u64, - gas_limit: u64, - ext: &'a mut vm::Ext, - context: RuntimeContext, - memory: MemoryRef, - args: Vec, - result: Vec, + gas_counter: u64, + gas_limit: u64, + ext: &'a mut vm::Ext, + context: RuntimeContext, + memory: MemoryRef, + args: Vec, + result: Vec, } /// User trap in native code #[derive(Debug, Clone, PartialEq)] pub enum Error { - /// Storage read error - StorageReadError, - /// Storage update error - StorageUpdateError, - /// Memory access violation - MemoryAccessViolation, - /// Native code resulted in suicide - Suicide, - /// Native code requested execution to finish - Return, - /// Suicide was requested but coudn't complete - SuicideAbort, - /// Invalid gas state inside interpreter - InvalidGasState, - /// Query of the balance resulted in an error - BalanceQueryError, - /// Failed allocation - AllocationFailed, - /// Gas limit reached - GasLimit, - /// Unknown runtime function - Unknown, - /// Passed string had invalid utf-8 encoding - BadUtf8, - /// Log event error - Log, - /// Other error in native code - Other, - /// Syscall signature mismatch - InvalidSyscall, - /// Unreachable instruction encountered - Unreachable, - /// Invalid virtual call - InvalidVirtualCall, - /// Division by zero - DivisionByZero, - /// Invalid conversion to integer - InvalidConversionToInt, - /// Stack overflow - StackOverflow, - /// Panic with message - Panic(String), + /// Storage read error + StorageReadError, + /// Storage update error + StorageUpdateError, + /// Memory access violation + MemoryAccessViolation, + /// Native code resulted in suicide + Suicide, + /// Native code requested execution to finish + Return, + /// Suicide was requested but coudn't complete + SuicideAbort, + /// Invalid gas state inside interpreter + InvalidGasState, + /// Query of the balance resulted in an error + BalanceQueryError, + /// Failed allocation + AllocationFailed, + /// Gas limit reached + GasLimit, + /// Unknown runtime function + Unknown, + /// Passed string had invalid utf-8 encoding + BadUtf8, + /// Log event error + Log, + /// Other error in native code + Other, + /// Syscall signature mismatch + InvalidSyscall, + /// Unreachable instruction encountered + Unreachable, + /// Invalid virtual call + InvalidVirtualCall, + /// Division by zero + DivisionByZero, + /// Invalid conversion to integer + InvalidConversionToInt, + /// Stack overflow + StackOverflow, + /// Panic with message + Panic(String), } -impl wasmi::HostError for Error { } +impl wasmi::HostError for Error {} impl From for Error { - fn from(trap: Trap) -> Self { - match *trap.kind() { - TrapKind::Unreachable => Error::Unreachable, - TrapKind::MemoryAccessOutOfBounds => Error::MemoryAccessViolation, - TrapKind::TableAccessOutOfBounds | TrapKind::ElemUninitialized => Error::InvalidVirtualCall, - TrapKind::DivisionByZero => Error::DivisionByZero, - TrapKind::InvalidConversionToInt => Error::InvalidConversionToInt, - TrapKind::UnexpectedSignature => Error::InvalidVirtualCall, - TrapKind::StackOverflow => Error::StackOverflow, - TrapKind::Host(_) => Error::Other, - } - } + fn from(trap: Trap) -> Self { + match *trap.kind() { + TrapKind::Unreachable => Error::Unreachable, + TrapKind::MemoryAccessOutOfBounds => Error::MemoryAccessViolation, + TrapKind::TableAccessOutOfBounds | TrapKind::ElemUninitialized => { + Error::InvalidVirtualCall + } + TrapKind::DivisionByZero => Error::DivisionByZero, + TrapKind::InvalidConversionToInt => Error::InvalidConversionToInt, + TrapKind::UnexpectedSignature => Error::InvalidVirtualCall, + TrapKind::StackOverflow => Error::StackOverflow, + TrapKind::Host(_) => Error::Other, + } + } } impl From for Error { - fn from(err: InterpreterError) -> Self { - match err { - InterpreterError::Value(_) => Error::InvalidSyscall, - InterpreterError::Memory(_) => Error::MemoryAccessViolation, - _ => Error::Other, - } - } + fn from(err: InterpreterError) -> Self { + match err { + InterpreterError::Value(_) => Error::InvalidSyscall, + InterpreterError::Memory(_) => Error::MemoryAccessViolation, + _ => Error::Other, + } + } } impl ::std::fmt::Display for Error { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::result::Result<(), ::std::fmt::Error> { - match *self { - Error::StorageReadError => write!(f, "Storage read error"), - Error::StorageUpdateError => write!(f, "Storage update error"), - Error::MemoryAccessViolation => write!(f, "Memory access violation"), - Error::SuicideAbort => write!(f, "Attempt to suicide resulted in an error"), - Error::InvalidGasState => write!(f, "Invalid gas state"), - Error::BalanceQueryError => write!(f, "Balance query resulted in an error"), - Error::Suicide => write!(f, "Suicide result"), - Error::Return => write!(f, "Return result"), - Error::Unknown => write!(f, "Unknown runtime function invoked"), - Error::AllocationFailed => write!(f, "Memory allocation failed (OOM)"), - Error::BadUtf8 => write!(f, "String encoding is bad utf-8 sequence"), - Error::GasLimit => write!(f, "Invocation resulted in gas limit violated"), - Error::Log => write!(f, "Error occured while logging an event"), - Error::InvalidSyscall => write!(f, "Invalid syscall signature encountered at runtime"), - Error::Other => write!(f, "Other unspecified error"), - Error::Unreachable => write!(f, "Unreachable instruction encountered"), - Error::InvalidVirtualCall => write!(f, "Invalid virtual call"), - Error::DivisionByZero => write!(f, "Division by zero"), - Error::StackOverflow => write!(f, "Stack overflow"), - Error::InvalidConversionToInt => write!(f, "Invalid conversion to integer"), - Error::Panic(ref msg) => write!(f, "Panic: {}", msg), - } - } + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::result::Result<(), ::std::fmt::Error> { + match *self { + Error::StorageReadError => write!(f, "Storage read error"), + Error::StorageUpdateError => write!(f, "Storage update error"), + Error::MemoryAccessViolation => write!(f, "Memory access violation"), + Error::SuicideAbort => write!(f, "Attempt to suicide resulted in an error"), + Error::InvalidGasState => write!(f, "Invalid gas state"), + Error::BalanceQueryError => write!(f, "Balance query resulted in an error"), + Error::Suicide => write!(f, "Suicide result"), + Error::Return => write!(f, "Return result"), + Error::Unknown => write!(f, "Unknown runtime function invoked"), + Error::AllocationFailed => write!(f, "Memory allocation failed (OOM)"), + Error::BadUtf8 => write!(f, "String encoding is bad utf-8 sequence"), + Error::GasLimit => write!(f, "Invocation resulted in gas limit violated"), + Error::Log => write!(f, "Error occured while logging an event"), + Error::InvalidSyscall => write!(f, "Invalid syscall signature encountered at runtime"), + Error::Other => write!(f, "Other unspecified error"), + Error::Unreachable => write!(f, "Unreachable instruction encountered"), + Error::InvalidVirtualCall => write!(f, "Invalid virtual call"), + Error::DivisionByZero => write!(f, "Division by zero"), + Error::StackOverflow => write!(f, "Stack overflow"), + Error::InvalidConversionToInt => write!(f, "Invalid conversion to integer"), + Error::Panic(ref msg) => write!(f, "Panic: {}", msg), + } + } } type Result = ::std::result::Result; impl<'a> Runtime<'a> { - - /// New runtime for wasm contract with specified params - pub fn with_params( - ext: &mut vm::Ext, - memory: MemoryRef, - gas_limit: u64, - args: Vec, - context: RuntimeContext, - ) -> Runtime { - Runtime { - gas_counter: 0, - gas_limit: gas_limit, - memory: memory, - ext: ext, - context: context, - args: args, - result: Vec::new(), - } - } - - /// Loads 256-bit hash from the specifed sandboxed memory pointer - fn h256_at(&self, ptr: u32) -> Result { - let mut buf = [0u8; 32]; - self.memory.get_into(ptr, &mut buf[..])?; - - Ok(H256::from(&buf[..])) - } - - /// Loads 160-bit hash (Ethereum address) from the specified sandboxed memory pointer - fn address_at(&self, ptr: u32) -> Result
{ - let mut buf = [0u8; 20]; - self.memory.get_into(ptr, &mut buf[..])?; - - Ok(Address::from(&buf[..])) - } - - /// Loads 256-bit integer represented with bigendian from the specified sandboxed memory pointer - fn u256_at(&self, ptr: u32) -> Result { - let mut buf = [0u8; 32]; - self.memory.get_into(ptr, &mut buf[..])?; - - Ok(U256::from_big_endian(&buf[..])) - } - - /// Charge specified amount of gas - /// - /// Returns false if gas limit exceeded and true if not. - /// Intuition about the return value sense is to aswer the question 'are we allowed to continue?' - fn charge_gas(&mut self, amount: u64) -> bool { - let prev = self.gas_counter; - match prev.checked_add(amount) { - // gas charge overflow protection - None => false, - Some(val) if val > self.gas_limit => false, - Some(_) => { - self.gas_counter = prev + amount; - true - } - } - } - - /// Charge gas according to closure - pub fn charge(&mut self, f: F) -> Result<()> - where F: FnOnce(&vm::Schedule) -> u64 - { - let amount = f(self.ext.schedule()); - if !self.charge_gas(amount as u64) { - Err(Error::GasLimit) - } else { - Ok(()) - } - } - - /// Adjusted charge of gas which scales actual charge according to the wasm opcode counting coefficient - pub fn adjusted_charge(&mut self, f: F) -> Result<()> - where F: FnOnce(&vm::Schedule) -> u64 - { - self.charge(|schedule| f(schedule) * schedule.wasm().opcodes_div as u64 / schedule.wasm().opcodes_mul as u64) - } - - /// Charge gas provided by the closure - /// - /// Closure also can return overflowing flag as None in gas cost. - pub fn overflow_charge(&mut self, f: F) -> Result<()> - where F: FnOnce(&vm::Schedule) -> Option - { - let amount = match f(self.ext.schedule()) { - Some(amount) => amount, - None => { return Err(Error::GasLimit.into()); } - }; - - if !self.charge_gas(amount as u64) { - Err(Error::GasLimit.into()) - } else { - Ok(()) - } - } - - /// Same as overflow_charge, but with amount adjusted by wasm opcodes coeff - pub fn adjusted_overflow_charge(&mut self, f: F) -> Result<()> - where F: FnOnce(&vm::Schedule) -> Option - { - self.overflow_charge(|schedule| - f(schedule) - .and_then(|x| x.checked_mul(schedule.wasm().opcodes_div as u64)) - .map(|x| x / schedule.wasm().opcodes_mul as u64) - ) - } - - /// Read from the storage to wasm memory - pub fn storage_read(&mut self, args: RuntimeArgs) -> Result<()> - { - let key = self.h256_at(args.nth_checked(0)?)?; - let val_ptr: u32 = args.nth_checked(1)?; - - let val = self.ext.storage_at(&key).map_err(|_| Error::StorageReadError)?; - - self.adjusted_charge(|schedule| schedule.sload_gas as u64)?; - - self.memory.set(val_ptr as u32, &*val)?; - - Ok(()) - } - - /// Write to storage from wasm memory - pub fn storage_write(&mut self, args: RuntimeArgs) -> Result<()> - { - let key = self.h256_at(args.nth_checked(0)?)?; - let val_ptr: u32 = args.nth_checked(1)?; - - let val = self.h256_at(val_ptr)?; - let former_val = self.ext.storage_at(&key).map_err(|_| Error::StorageUpdateError)?; - - if former_val == H256::zero() && val != H256::zero() { - self.adjusted_charge(|schedule| schedule.sstore_set_gas as u64)?; - } else { - self.adjusted_charge(|schedule| schedule.sstore_reset_gas as u64)?; - } - - self.ext.set_storage(key, val).map_err(|_| Error::StorageUpdateError)?; - - if former_val != H256::zero() && val == H256::zero() { - let sstore_clears_schedule = self.schedule().sstore_refund_gas; - self.ext.add_sstore_refund(sstore_clears_schedule); - } - - Ok(()) - } - - /// Return currently used schedule - pub fn schedule(&self) -> &vm::Schedule { - self.ext.schedule() - } - - /// Sets a return value for the call - /// - /// Syscall takes 2 arguments: - /// * pointer in sandboxed memory where result is - /// * the length of the result - pub fn ret(&mut self, args: RuntimeArgs) -> Result<()> { - let ptr: u32 = args.nth_checked(0)?; - let len: u32 = args.nth_checked(1)?; - - trace!(target: "wasm", "Contract ret: {} bytes @ {}", len, ptr); - - self.result = self.memory.get(ptr, len as usize)?; - - Err(Error::Return) - } - - /// Destroy the runtime, returning currently recorded result of the execution - pub fn into_result(self) -> Vec { - self.result - } - - /// Query current gas left for execution - pub fn gas_left(&self) -> Result { - if self.gas_counter > self.gas_limit { return Err(Error::InvalidGasState); } - Ok(self.gas_limit - self.gas_counter) - } - - /// General gas charging extern. - fn gas(&mut self, args: RuntimeArgs) -> Result<()> { - let amount: u32 = args.nth_checked(0)?; - if self.charge_gas(amount as u64) { - Ok(()) - } else { - Err(Error::GasLimit.into()) - } - } - - /// Query the length of the input bytes - fn input_legnth(&mut self) -> RuntimeValue { - RuntimeValue::I32(self.args.len() as i32) - } - - /// Write input bytes to the memory location using the passed pointer - fn fetch_input(&mut self, args: RuntimeArgs) -> Result<()> { - let ptr: u32 = args.nth_checked(0)?; - - let args_len = self.args.len() as u64; - self.charge(|s| args_len * s.wasm().memcpy as u64)?; - - self.memory.set(ptr, &self.args[..])?; - Ok(()) - } - - /// User panic - /// - /// Contract can invoke this when he encounters unrecoverable error. - fn panic(&mut self, args: RuntimeArgs) -> Result<()> - { - let payload_ptr: u32 = args.nth_checked(0)?; - let payload_len: u32 = args.nth_checked(1)?; - - let raw_payload = self.memory.get(payload_ptr, payload_len as usize)?; - let payload = panic_payload::decode(&raw_payload); - let msg = format!( - "{msg}, {file}:{line}:{col}", - msg = payload - .msg - .as_ref() - .map(String::as_ref) - .unwrap_or(""), - file = payload - .file - .as_ref() - .map(String::as_ref) - .unwrap_or(""), - line = payload.line.unwrap_or(0), - col = payload.col.unwrap_or(0) - ); - trace!(target: "wasm", "Contract custom panic message: {}", msg); - - Err(Error::Panic(msg).into()) - } - - fn do_call( - &mut self, - use_val: bool, - call_type: CallType, - args: RuntimeArgs, - ) - -> Result - { - trace!(target: "wasm", "runtime: CALL({:?})", call_type); - - let gas: u64 = args.nth_checked(0)?; - trace!(target: "wasm", " gas: {:?}", gas); - - let address = self.address_at(args.nth_checked(1)?)?; - trace!(target: "wasm", " address: {:?}", address); - - let vofs = if use_val { 1 } else { 0 }; - let val = if use_val { Some(self.u256_at(args.nth_checked(2)?)?) } else { None }; - trace!(target: "wasm", " val: {:?}", val); - - let input_ptr: u32 = args.nth_checked(2 + vofs)?; - trace!(target: "wasm", " input_ptr: {:?}", input_ptr); - - let input_len: u32 = args.nth_checked(3 + vofs)?; - trace!(target: "wasm", " input_len: {:?}", input_len); - - let result_ptr: u32 = args.nth_checked(4 + vofs)?; - trace!(target: "wasm", " result_ptr: {:?}", result_ptr); - - let result_alloc_len: u32 = args.nth_checked(5 + vofs)?; - trace!(target: "wasm", " result_len: {:?}", result_alloc_len); - - if let Some(ref val) = val { - let address_balance = self.ext.balance(&self.context.address) - .map_err(|_| Error::BalanceQueryError)?; - - if &address_balance < val { - trace!(target: "wasm", "runtime: call failed due to balance check"); - return Ok((-1i32).into()); - } - } - - self.adjusted_charge(|schedule| schedule.call_gas as u64)?; - - let mut result = Vec::with_capacity(result_alloc_len as usize); - result.resize(result_alloc_len as usize, 0); - - // todo: optimize to use memory views once it's in - let payload = self.memory.get(input_ptr, input_len as usize)?; - - let adjusted_gas = match gas.checked_mul(self.ext.schedule().wasm().opcodes_div as u64) - .map(|x| x / self.ext.schedule().wasm().opcodes_mul as u64) - { - Some(x) => x, - None => { - trace!("CALL overflowed gas, call aborted with error returned"); - return Ok(RuntimeValue::I32(-1)) - }, - }; - - self.charge(|_| adjusted_gas)?; - - let call_result = self.ext.call( - &gas.into(), - match call_type { CallType::DelegateCall => &self.context.sender, _ => &self.context.address }, - match call_type { CallType::Call | CallType::StaticCall => &address, _ => &self.context.address }, - val, - &payload, - &address, - call_type, - false - ).ok().expect("Trap is false; trap error will not happen; qed"); - - match call_result { - vm::MessageCallResult::Success(gas_left, data) => { - let len = cmp::min(result.len(), data.len()); - (&mut result[..len]).copy_from_slice(&data[..len]); - - // cannot overflow, before making call gas_counter was incremented with gas, and gas_left < gas - self.gas_counter = self.gas_counter - - gas_left.low_u64() * self.ext.schedule().wasm().opcodes_div as u64 - / self.ext.schedule().wasm().opcodes_mul as u64; - - self.memory.set(result_ptr, &result)?; - Ok(0i32.into()) - }, - vm::MessageCallResult::Reverted(gas_left, data) => { - let len = cmp::min(result.len(), data.len()); - (&mut result[..len]).copy_from_slice(&data[..len]); - - // cannot overflow, before making call gas_counter was incremented with gas, and gas_left < gas - self.gas_counter = self.gas_counter - - gas_left.low_u64() * self.ext.schedule().wasm().opcodes_div as u64 - / self.ext.schedule().wasm().opcodes_mul as u64; - - self.memory.set(result_ptr, &result)?; - Ok((-1i32).into()) - }, - vm::MessageCallResult::Failed => { - Ok((-1i32).into()) - } - } - } - - /// Message call - fn ccall(&mut self, args: RuntimeArgs) -> Result { - self.do_call(true, CallType::Call, args) - } - - /// Delegate call - fn dcall(&mut self, args: RuntimeArgs) -> Result { - self.do_call(false, CallType::DelegateCall, args) - } - - /// Static call - fn scall(&mut self, args: RuntimeArgs) -> Result { - self.do_call(false, CallType::StaticCall, args) - } - - fn return_address_ptr(&mut self, ptr: u32, val: Address) -> Result<()> - { - self.charge(|schedule| schedule.wasm().static_address as u64)?; - self.memory.set(ptr, &*val)?; - Ok(()) - } - - fn return_u256_ptr(&mut self, ptr: u32, val: U256) -> Result<()> { - let value: H256 = val.into(); - self.charge(|schedule| schedule.wasm().static_u256 as u64)?; - self.memory.set(ptr, &*value)?; - Ok(()) - } - - /// Returns value (in Wei) passed to contract - pub fn value(&mut self, args: RuntimeArgs) -> Result<()> { - let val = self.context.value; - self.return_u256_ptr(args.nth_checked(0)?, val) - } - - fn do_create(&mut self, endowment: U256, code_ptr: u32, code_len: u32, result_ptr: u32, scheme: vm::CreateContractAddress) -> Result { - let code = self.memory.get(code_ptr, code_len as usize)?; - - self.adjusted_charge(|schedule| schedule.create_gas as u64)?; - self.adjusted_charge(|schedule| schedule.create_data_gas as u64 * code.len() as u64)?; - - let gas_left: U256 = U256::from(self.gas_left()?) - * U256::from(self.ext.schedule().wasm().opcodes_mul) - / U256::from(self.ext.schedule().wasm().opcodes_div); - - match self.ext.create(&gas_left, &endowment, &code, scheme, false).ok().expect("Trap is false; trap error will not happen; qed") { - vm::ContractCreateResult::Created(address, gas_left) => { - self.memory.set(result_ptr, &*address)?; - self.gas_counter = self.gas_limit - + /// New runtime for wasm contract with specified params + pub fn with_params( + ext: &mut vm::Ext, + memory: MemoryRef, + gas_limit: u64, + args: Vec, + context: RuntimeContext, + ) -> Runtime { + Runtime { + gas_counter: 0, + gas_limit: gas_limit, + memory: memory, + ext: ext, + context: context, + args: args, + result: Vec::new(), + } + } + + /// Loads 256-bit hash from the specifed sandboxed memory pointer + fn h256_at(&self, ptr: u32) -> Result { + let mut buf = [0u8; 32]; + self.memory.get_into(ptr, &mut buf[..])?; + + Ok(H256::from(&buf[..])) + } + + /// Loads 160-bit hash (Ethereum address) from the specified sandboxed memory pointer + fn address_at(&self, ptr: u32) -> Result
{ + let mut buf = [0u8; 20]; + self.memory.get_into(ptr, &mut buf[..])?; + + Ok(Address::from(&buf[..])) + } + + /// Loads 256-bit integer represented with bigendian from the specified sandboxed memory pointer + fn u256_at(&self, ptr: u32) -> Result { + let mut buf = [0u8; 32]; + self.memory.get_into(ptr, &mut buf[..])?; + + Ok(U256::from_big_endian(&buf[..])) + } + + /// Charge specified amount of gas + /// + /// Returns false if gas limit exceeded and true if not. + /// Intuition about the return value sense is to aswer the question 'are we allowed to continue?' + fn charge_gas(&mut self, amount: u64) -> bool { + let prev = self.gas_counter; + match prev.checked_add(amount) { + // gas charge overflow protection + None => false, + Some(val) if val > self.gas_limit => false, + Some(_) => { + self.gas_counter = prev + amount; + true + } + } + } + + /// Charge gas according to closure + pub fn charge(&mut self, f: F) -> Result<()> + where + F: FnOnce(&vm::Schedule) -> u64, + { + let amount = f(self.ext.schedule()); + if !self.charge_gas(amount as u64) { + Err(Error::GasLimit) + } else { + Ok(()) + } + } + + /// Adjusted charge of gas which scales actual charge according to the wasm opcode counting coefficient + pub fn adjusted_charge(&mut self, f: F) -> Result<()> + where + F: FnOnce(&vm::Schedule) -> u64, + { + self.charge(|schedule| { + f(schedule) * schedule.wasm().opcodes_div as u64 / schedule.wasm().opcodes_mul as u64 + }) + } + + /// Charge gas provided by the closure + /// + /// Closure also can return overflowing flag as None in gas cost. + pub fn overflow_charge(&mut self, f: F) -> Result<()> + where + F: FnOnce(&vm::Schedule) -> Option, + { + let amount = match f(self.ext.schedule()) { + Some(amount) => amount, + None => { + return Err(Error::GasLimit.into()); + } + }; + + if !self.charge_gas(amount as u64) { + Err(Error::GasLimit.into()) + } else { + Ok(()) + } + } + + /// Same as overflow_charge, but with amount adjusted by wasm opcodes coeff + pub fn adjusted_overflow_charge(&mut self, f: F) -> Result<()> + where + F: FnOnce(&vm::Schedule) -> Option, + { + self.overflow_charge(|schedule| { + f(schedule) + .and_then(|x| x.checked_mul(schedule.wasm().opcodes_div as u64)) + .map(|x| x / schedule.wasm().opcodes_mul as u64) + }) + } + + /// Read from the storage to wasm memory + pub fn storage_read(&mut self, args: RuntimeArgs) -> Result<()> { + let key = self.h256_at(args.nth_checked(0)?)?; + let val_ptr: u32 = args.nth_checked(1)?; + + let val = self + .ext + .storage_at(&key) + .map_err(|_| Error::StorageReadError)?; + + self.adjusted_charge(|schedule| schedule.sload_gas as u64)?; + + self.memory.set(val_ptr as u32, &*val)?; + + Ok(()) + } + + /// Write to storage from wasm memory + pub fn storage_write(&mut self, args: RuntimeArgs) -> Result<()> { + let key = self.h256_at(args.nth_checked(0)?)?; + let val_ptr: u32 = args.nth_checked(1)?; + + let val = self.h256_at(val_ptr)?; + let former_val = self + .ext + .storage_at(&key) + .map_err(|_| Error::StorageUpdateError)?; + + if former_val == H256::zero() && val != H256::zero() { + self.adjusted_charge(|schedule| schedule.sstore_set_gas as u64)?; + } else { + self.adjusted_charge(|schedule| schedule.sstore_reset_gas as u64)?; + } + + self.ext + .set_storage(key, val) + .map_err(|_| Error::StorageUpdateError)?; + + if former_val != H256::zero() && val == H256::zero() { + let sstore_clears_schedule = self.schedule().sstore_refund_gas; + self.ext.add_sstore_refund(sstore_clears_schedule); + } + + Ok(()) + } + + /// Return currently used schedule + pub fn schedule(&self) -> &vm::Schedule { + self.ext.schedule() + } + + /// Sets a return value for the call + /// + /// Syscall takes 2 arguments: + /// * pointer in sandboxed memory where result is + /// * the length of the result + pub fn ret(&mut self, args: RuntimeArgs) -> Result<()> { + let ptr: u32 = args.nth_checked(0)?; + let len: u32 = args.nth_checked(1)?; + + trace!(target: "wasm", "Contract ret: {} bytes @ {}", len, ptr); + + self.result = self.memory.get(ptr, len as usize)?; + + Err(Error::Return) + } + + /// Destroy the runtime, returning currently recorded result of the execution + pub fn into_result(self) -> Vec { + self.result + } + + /// Query current gas left for execution + pub fn gas_left(&self) -> Result { + if self.gas_counter > self.gas_limit { + return Err(Error::InvalidGasState); + } + Ok(self.gas_limit - self.gas_counter) + } + + /// General gas charging extern. + fn gas(&mut self, args: RuntimeArgs) -> Result<()> { + let amount: u32 = args.nth_checked(0)?; + if self.charge_gas(amount as u64) { + Ok(()) + } else { + Err(Error::GasLimit.into()) + } + } + + /// Query the length of the input bytes + fn input_legnth(&mut self) -> RuntimeValue { + RuntimeValue::I32(self.args.len() as i32) + } + + /// Write input bytes to the memory location using the passed pointer + fn fetch_input(&mut self, args: RuntimeArgs) -> Result<()> { + let ptr: u32 = args.nth_checked(0)?; + + let args_len = self.args.len() as u64; + self.charge(|s| args_len * s.wasm().memcpy as u64)?; + + self.memory.set(ptr, &self.args[..])?; + Ok(()) + } + + /// User panic + /// + /// Contract can invoke this when he encounters unrecoverable error. + fn panic(&mut self, args: RuntimeArgs) -> Result<()> { + let payload_ptr: u32 = args.nth_checked(0)?; + let payload_len: u32 = args.nth_checked(1)?; + + let raw_payload = self.memory.get(payload_ptr, payload_len as usize)?; + let payload = panic_payload::decode(&raw_payload); + let msg = format!( + "{msg}, {file}:{line}:{col}", + msg = payload + .msg + .as_ref() + .map(String::as_ref) + .unwrap_or(""), + file = payload + .file + .as_ref() + .map(String::as_ref) + .unwrap_or(""), + line = payload.line.unwrap_or(0), + col = payload.col.unwrap_or(0) + ); + trace!(target: "wasm", "Contract custom panic message: {}", msg); + + Err(Error::Panic(msg).into()) + } + + fn do_call( + &mut self, + use_val: bool, + call_type: CallType, + args: RuntimeArgs, + ) -> Result { + trace!(target: "wasm", "runtime: CALL({:?})", call_type); + + let gas: u64 = args.nth_checked(0)?; + trace!(target: "wasm", " gas: {:?}", gas); + + let address = self.address_at(args.nth_checked(1)?)?; + trace!(target: "wasm", " address: {:?}", address); + + let vofs = if use_val { 1 } else { 0 }; + let val = if use_val { + Some(self.u256_at(args.nth_checked(2)?)?) + } else { + None + }; + trace!(target: "wasm", " val: {:?}", val); + + let input_ptr: u32 = args.nth_checked(2 + vofs)?; + trace!(target: "wasm", " input_ptr: {:?}", input_ptr); + + let input_len: u32 = args.nth_checked(3 + vofs)?; + trace!(target: "wasm", " input_len: {:?}", input_len); + + let result_ptr: u32 = args.nth_checked(4 + vofs)?; + trace!(target: "wasm", " result_ptr: {:?}", result_ptr); + + let result_alloc_len: u32 = args.nth_checked(5 + vofs)?; + trace!(target: "wasm", " result_len: {:?}", result_alloc_len); + + if let Some(ref val) = val { + let address_balance = self + .ext + .balance(&self.context.address) + .map_err(|_| Error::BalanceQueryError)?; + + if &address_balance < val { + trace!(target: "wasm", "runtime: call failed due to balance check"); + return Ok((-1i32).into()); + } + } + + self.adjusted_charge(|schedule| schedule.call_gas as u64)?; + + let mut result = Vec::with_capacity(result_alloc_len as usize); + result.resize(result_alloc_len as usize, 0); + + // todo: optimize to use memory views once it's in + let payload = self.memory.get(input_ptr, input_len as usize)?; + + let adjusted_gas = match gas + .checked_mul(self.ext.schedule().wasm().opcodes_div as u64) + .map(|x| x / self.ext.schedule().wasm().opcodes_mul as u64) + { + Some(x) => x, + None => { + trace!("CALL overflowed gas, call aborted with error returned"); + return Ok(RuntimeValue::I32(-1)); + } + }; + + self.charge(|_| adjusted_gas)?; + + let call_result = self + .ext + .call( + &gas.into(), + match call_type { + CallType::DelegateCall => &self.context.sender, + _ => &self.context.address, + }, + match call_type { + CallType::Call | CallType::StaticCall => &address, + _ => &self.context.address, + }, + val, + &payload, + &address, + call_type, + false, + ) + .ok() + .expect("Trap is false; trap error will not happen; qed"); + + match call_result { + vm::MessageCallResult::Success(gas_left, data) => { + let len = cmp::min(result.len(), data.len()); + (&mut result[..len]).copy_from_slice(&data[..len]); + + // cannot overflow, before making call gas_counter was incremented with gas, and gas_left < gas + self.gas_counter = self.gas_counter + - gas_left.low_u64() * self.ext.schedule().wasm().opcodes_div as u64 + / self.ext.schedule().wasm().opcodes_mul as u64; + + self.memory.set(result_ptr, &result)?; + Ok(0i32.into()) + } + vm::MessageCallResult::Reverted(gas_left, data) => { + let len = cmp::min(result.len(), data.len()); + (&mut result[..len]).copy_from_slice(&data[..len]); + + // cannot overflow, before making call gas_counter was incremented with gas, and gas_left < gas + self.gas_counter = self.gas_counter + - gas_left.low_u64() * self.ext.schedule().wasm().opcodes_div as u64 + / self.ext.schedule().wasm().opcodes_mul as u64; + + self.memory.set(result_ptr, &result)?; + Ok((-1i32).into()) + } + vm::MessageCallResult::Failed => Ok((-1i32).into()), + } + } + + /// Message call + fn ccall(&mut self, args: RuntimeArgs) -> Result { + self.do_call(true, CallType::Call, args) + } + + /// Delegate call + fn dcall(&mut self, args: RuntimeArgs) -> Result { + self.do_call(false, CallType::DelegateCall, args) + } + + /// Static call + fn scall(&mut self, args: RuntimeArgs) -> Result { + self.do_call(false, CallType::StaticCall, args) + } + + fn return_address_ptr(&mut self, ptr: u32, val: Address) -> Result<()> { + self.charge(|schedule| schedule.wasm().static_address as u64)?; + self.memory.set(ptr, &*val)?; + Ok(()) + } + + fn return_u256_ptr(&mut self, ptr: u32, val: U256) -> Result<()> { + let value: H256 = val.into(); + self.charge(|schedule| schedule.wasm().static_u256 as u64)?; + self.memory.set(ptr, &*value)?; + Ok(()) + } + + /// Returns value (in Wei) passed to contract + pub fn value(&mut self, args: RuntimeArgs) -> Result<()> { + let val = self.context.value; + self.return_u256_ptr(args.nth_checked(0)?, val) + } + + fn do_create( + &mut self, + endowment: U256, + code_ptr: u32, + code_len: u32, + result_ptr: u32, + scheme: vm::CreateContractAddress, + ) -> Result { + let code = self.memory.get(code_ptr, code_len as usize)?; + + self.adjusted_charge(|schedule| schedule.create_gas as u64)?; + self.adjusted_charge(|schedule| schedule.create_data_gas as u64 * code.len() as u64)?; + + let gas_left: U256 = U256::from(self.gas_left()?) + * U256::from(self.ext.schedule().wasm().opcodes_mul) + / U256::from(self.ext.schedule().wasm().opcodes_div); + + match self + .ext + .create(&gas_left, &endowment, &code, scheme, false) + .ok() + .expect("Trap is false; trap error will not happen; qed") + { + vm::ContractCreateResult::Created(address, gas_left) => { + self.memory.set(result_ptr, &*address)?; + self.gas_counter = self.gas_limit - // this cannot overflow, since initial gas is in [0..u64::max) range, // and gas_left cannot be bigger gas_left.low_u64() * self.ext.schedule().wasm().opcodes_div as u64 / self.ext.schedule().wasm().opcodes_mul as u64; - trace!(target: "wasm", "runtime: create contract success (@{:?})", address); - Ok(0i32.into()) - }, - vm::ContractCreateResult::Failed => { - trace!(target: "wasm", "runtime: create contract fail"); - Ok((-1i32).into()) - }, - vm::ContractCreateResult::Reverted(gas_left, _) => { - trace!(target: "wasm", "runtime: create contract reverted"); - self.gas_counter = self.gas_limit - + trace!(target: "wasm", "runtime: create contract success (@{:?})", address); + Ok(0i32.into()) + } + vm::ContractCreateResult::Failed => { + trace!(target: "wasm", "runtime: create contract fail"); + Ok((-1i32).into()) + } + vm::ContractCreateResult::Reverted(gas_left, _) => { + trace!(target: "wasm", "runtime: create contract reverted"); + self.gas_counter = self.gas_limit - // this cannot overflow, since initial gas is in [0..u64::max) range, // and gas_left cannot be bigger gas_left.low_u64() * self.ext.schedule().wasm().opcodes_div as u64 / self.ext.schedule().wasm().opcodes_mul as u64; - Ok((-1i32).into()) - }, - } - } + Ok((-1i32).into()) + } + } + } - /// Creates a new contract - /// - /// Arguments: - /// * endowment - how much value (in Wei) transfer to the newly created contract - /// * code_ptr - pointer to the code data - /// * code_len - lenght of the code data - /// * result_ptr - pointer to write an address of the newly created contract - pub fn create(&mut self, args: RuntimeArgs) -> Result { - // - // method signature: - // fn create(endowment: *const u8, code_ptr: *const u8, code_len: u32, result_ptr: *mut u8) -> i32; - // - trace!(target: "wasm", "runtime: CREATE"); - let endowment = self.u256_at(args.nth_checked(0)?)?; - trace!(target: "wasm", " val: {:?}", endowment); - let code_ptr: u32 = args.nth_checked(1)?; - trace!(target: "wasm", " code_ptr: {:?}", code_ptr); - let code_len: u32 = args.nth_checked(2)?; - trace!(target: "wasm", " code_len: {:?}", code_len); - let result_ptr: u32 = args.nth_checked(3)?; - trace!(target: "wasm", "result_ptr: {:?}", result_ptr); + /// Creates a new contract + /// + /// Arguments: + /// * endowment - how much value (in Wei) transfer to the newly created contract + /// * code_ptr - pointer to the code data + /// * code_len - lenght of the code data + /// * result_ptr - pointer to write an address of the newly created contract + pub fn create(&mut self, args: RuntimeArgs) -> Result { + // + // method signature: + // fn create(endowment: *const u8, code_ptr: *const u8, code_len: u32, result_ptr: *mut u8) -> i32; + // + trace!(target: "wasm", "runtime: CREATE"); + let endowment = self.u256_at(args.nth_checked(0)?)?; + trace!(target: "wasm", " val: {:?}", endowment); + let code_ptr: u32 = args.nth_checked(1)?; + trace!(target: "wasm", " code_ptr: {:?}", code_ptr); + let code_len: u32 = args.nth_checked(2)?; + trace!(target: "wasm", " code_len: {:?}", code_len); + let result_ptr: u32 = args.nth_checked(3)?; + trace!(target: "wasm", "result_ptr: {:?}", result_ptr); - self.do_create(endowment, code_ptr, code_len, result_ptr, vm::CreateContractAddress::FromSenderAndCodeHash) - } + self.do_create( + endowment, + code_ptr, + code_len, + result_ptr, + vm::CreateContractAddress::FromSenderAndCodeHash, + ) + } - /// Creates a new contract using FromSenderSaltAndCodeHash scheme - /// - /// Arguments: - /// * endowment - how much value (in Wei) transfer to the newly created contract - /// * salt - salt to be used in contract creation address - /// * code_ptr - pointer to the code data - /// * code_len - lenght of the code data - /// * result_ptr - pointer to write an address of the newly created contract - pub fn create2(&mut self, args: RuntimeArgs) -> Result { - // - // method signature: - // fn create2(endowment: *const u8, salt: *const u8, code_ptr: *const u8, code_len: u32, result_ptr: *mut u8) -> i32; - // - trace!(target: "wasm", "runtime: CREATE2"); - let endowment = self.u256_at(args.nth_checked(0)?)?; - trace!(target: "wasm", " val: {:?}", endowment); - let salt: H256 = self.u256_at(args.nth_checked(1)?)?.into(); - trace!(target: "wasm", " salt: {:?}", salt); - let code_ptr: u32 = args.nth_checked(2)?; - trace!(target: "wasm", " code_ptr: {:?}", code_ptr); - let code_len: u32 = args.nth_checked(3)?; - trace!(target: "wasm", " code_len: {:?}", code_len); - let result_ptr: u32 = args.nth_checked(4)?; - trace!(target: "wasm", "result_ptr: {:?}", result_ptr); + /// Creates a new contract using FromSenderSaltAndCodeHash scheme + /// + /// Arguments: + /// * endowment - how much value (in Wei) transfer to the newly created contract + /// * salt - salt to be used in contract creation address + /// * code_ptr - pointer to the code data + /// * code_len - lenght of the code data + /// * result_ptr - pointer to write an address of the newly created contract + pub fn create2(&mut self, args: RuntimeArgs) -> Result { + // + // method signature: + // fn create2(endowment: *const u8, salt: *const u8, code_ptr: *const u8, code_len: u32, result_ptr: *mut u8) -> i32; + // + trace!(target: "wasm", "runtime: CREATE2"); + let endowment = self.u256_at(args.nth_checked(0)?)?; + trace!(target: "wasm", " val: {:?}", endowment); + let salt: H256 = self.u256_at(args.nth_checked(1)?)?.into(); + trace!(target: "wasm", " salt: {:?}", salt); + let code_ptr: u32 = args.nth_checked(2)?; + trace!(target: "wasm", " code_ptr: {:?}", code_ptr); + let code_len: u32 = args.nth_checked(3)?; + trace!(target: "wasm", " code_len: {:?}", code_len); + let result_ptr: u32 = args.nth_checked(4)?; + trace!(target: "wasm", "result_ptr: {:?}", result_ptr); - self.do_create(endowment, code_ptr, code_len, result_ptr, vm::CreateContractAddress::FromSenderSaltAndCodeHash(salt)) - } + self.do_create( + endowment, + code_ptr, + code_len, + result_ptr, + vm::CreateContractAddress::FromSenderSaltAndCodeHash(salt), + ) + } - fn debug(&mut self, args: RuntimeArgs) -> Result<()> - { - trace!(target: "wasm", "Contract debug message: {}", { - let msg_ptr: u32 = args.nth_checked(0)?; - let msg_len: u32 = args.nth_checked(1)?; + fn debug(&mut self, args: RuntimeArgs) -> Result<()> { + trace!(target: "wasm", "Contract debug message: {}", { + let msg_ptr: u32 = args.nth_checked(0)?; + let msg_len: u32 = args.nth_checked(1)?; - String::from_utf8(self.memory.get(msg_ptr, msg_len as usize)?) - .map_err(|_| Error::BadUtf8)? - }); + String::from_utf8(self.memory.get(msg_ptr, msg_len as usize)?) + .map_err(|_| Error::BadUtf8)? + }); - Ok(()) - } + Ok(()) + } - /// Pass suicide to state runtime - pub fn suicide(&mut self, args: RuntimeArgs) -> Result<()> - { - let refund_address = self.address_at(args.nth_checked(0)?)?; + /// Pass suicide to state runtime + pub fn suicide(&mut self, args: RuntimeArgs) -> Result<()> { + let refund_address = self.address_at(args.nth_checked(0)?)?; - if self.ext.exists(&refund_address).map_err(|_| Error::SuicideAbort)? { - trace!(target: "wasm", "Suicide: refund to existing address {}", refund_address); - self.adjusted_charge(|schedule| schedule.suicide_gas as u64)?; - } else { - trace!(target: "wasm", "Suicide: refund to new address {}", refund_address); - self.adjusted_charge(|schedule| schedule.suicide_to_new_account_cost as u64)?; - } + if self + .ext + .exists(&refund_address) + .map_err(|_| Error::SuicideAbort)? + { + trace!(target: "wasm", "Suicide: refund to existing address {}", refund_address); + self.adjusted_charge(|schedule| schedule.suicide_gas as u64)?; + } else { + trace!(target: "wasm", "Suicide: refund to new address {}", refund_address); + self.adjusted_charge(|schedule| schedule.suicide_to_new_account_cost as u64)?; + } - self.ext.suicide(&refund_address).map_err(|_| Error::SuicideAbort)?; + self.ext + .suicide(&refund_address) + .map_err(|_| Error::SuicideAbort)?; - // We send trap to interpreter so it should abort further execution - Err(Error::Suicide.into()) - } + // We send trap to interpreter so it should abort further execution + Err(Error::Suicide.into()) + } - /// Signature: `fn blockhash(number: i64, dest: *mut u8)` - pub fn blockhash(&mut self, args: RuntimeArgs) -> Result<()> { - self.adjusted_charge(|schedule| schedule.blockhash_gas as u64)?; - let hash = self.ext.blockhash(&U256::from(args.nth_checked::(0)?)); - self.memory.set(args.nth_checked(1)?, &*hash)?; + /// Signature: `fn blockhash(number: i64, dest: *mut u8)` + pub fn blockhash(&mut self, args: RuntimeArgs) -> Result<()> { + self.adjusted_charge(|schedule| schedule.blockhash_gas as u64)?; + let hash = self.ext.blockhash(&U256::from(args.nth_checked::(0)?)); + self.memory.set(args.nth_checked(1)?, &*hash)?; - Ok(()) - } + Ok(()) + } - /// Signature: `fn blocknumber() -> i64` - pub fn blocknumber(&mut self) -> Result { - Ok(RuntimeValue::from(self.ext.env_info().number)) - } + /// Signature: `fn blocknumber() -> i64` + pub fn blocknumber(&mut self) -> Result { + Ok(RuntimeValue::from(self.ext.env_info().number)) + } - /// Signature: `fn coinbase(dest: *mut u8)` - pub fn coinbase(&mut self, args: RuntimeArgs) -> Result<()> { - let coinbase = self.ext.env_info().author; - self.return_address_ptr(args.nth_checked(0)?, coinbase) - } + /// Signature: `fn coinbase(dest: *mut u8)` + pub fn coinbase(&mut self, args: RuntimeArgs) -> Result<()> { + let coinbase = self.ext.env_info().author; + self.return_address_ptr(args.nth_checked(0)?, coinbase) + } - /// Signature: `fn difficulty(dest: *mut u8)` - pub fn difficulty(&mut self, args: RuntimeArgs) -> Result<()> { - let difficulty = self.ext.env_info().difficulty; - self.return_u256_ptr(args.nth_checked(0)?, difficulty) - } + /// Signature: `fn difficulty(dest: *mut u8)` + pub fn difficulty(&mut self, args: RuntimeArgs) -> Result<()> { + let difficulty = self.ext.env_info().difficulty; + self.return_u256_ptr(args.nth_checked(0)?, difficulty) + } - /// Signature: `fn gasleft() -> i64` - pub fn gasleft(&mut self) -> Result { - Ok(RuntimeValue::from( - self.gas_left()? * self.ext.schedule().wasm().opcodes_mul as u64 - / self.ext.schedule().wasm().opcodes_div as u64 - ) - ) - } + /// Signature: `fn gasleft() -> i64` + pub fn gasleft(&mut self) -> Result { + Ok(RuntimeValue::from( + self.gas_left()? * self.ext.schedule().wasm().opcodes_mul as u64 + / self.ext.schedule().wasm().opcodes_div as u64, + )) + } - /// Signature: `fn gaslimit(dest: *mut u8)` - pub fn gaslimit(&mut self, args: RuntimeArgs) -> Result<()> { - let gas_limit = self.ext.env_info().gas_limit; - self.return_u256_ptr(args.nth_checked(0)?, gas_limit) - } + /// Signature: `fn gaslimit(dest: *mut u8)` + pub fn gaslimit(&mut self, args: RuntimeArgs) -> Result<()> { + let gas_limit = self.ext.env_info().gas_limit; + self.return_u256_ptr(args.nth_checked(0)?, gas_limit) + } - /// Signature: `fn address(dest: *mut u8)` - pub fn address(&mut self, args: RuntimeArgs) -> Result<()> { - let address = self.context.address; - self.return_address_ptr(args.nth_checked(0)?, address) - } + /// Signature: `fn address(dest: *mut u8)` + pub fn address(&mut self, args: RuntimeArgs) -> Result<()> { + let address = self.context.address; + self.return_address_ptr(args.nth_checked(0)?, address) + } - /// Signature: `sender(dest: *mut u8)` - pub fn sender(&mut self, args: RuntimeArgs) -> Result<()> { - let sender = self.context.sender; - self.return_address_ptr(args.nth_checked(0)?, sender) - } + /// Signature: `sender(dest: *mut u8)` + pub fn sender(&mut self, args: RuntimeArgs) -> Result<()> { + let sender = self.context.sender; + self.return_address_ptr(args.nth_checked(0)?, sender) + } - /// Signature: `origin(dest: *mut u8)` - pub fn origin(&mut self, args: RuntimeArgs) -> Result<()> { - let origin = self.context.origin; - self.return_address_ptr(args.nth_checked(0)?, origin) - } + /// Signature: `origin(dest: *mut u8)` + pub fn origin(&mut self, args: RuntimeArgs) -> Result<()> { + let origin = self.context.origin; + self.return_address_ptr(args.nth_checked(0)?, origin) + } - /// Signature: `timestamp() -> i64` - pub fn timestamp(&mut self) -> Result { - let timestamp = self.ext.env_info().timestamp; - Ok(RuntimeValue::from(timestamp)) - } + /// Signature: `timestamp() -> i64` + pub fn timestamp(&mut self) -> Result { + let timestamp = self.ext.env_info().timestamp; + Ok(RuntimeValue::from(timestamp)) + } - /// Signature: `fn elog(topic_ptr: *const u8, topic_count: u32, data_ptr: *const u8, data_len: u32)` - pub fn elog(&mut self, args: RuntimeArgs) -> Result<()> - { - let topic_ptr: u32 = args.nth_checked(0)?; - let topic_count: u32 = args.nth_checked(1)?; - let data_ptr: u32 = args.nth_checked(2)?; - let data_len: u32 = args.nth_checked(3)?; + /// Signature: `fn elog(topic_ptr: *const u8, topic_count: u32, data_ptr: *const u8, data_len: u32)` + pub fn elog(&mut self, args: RuntimeArgs) -> Result<()> { + let topic_ptr: u32 = args.nth_checked(0)?; + let topic_count: u32 = args.nth_checked(1)?; + let data_ptr: u32 = args.nth_checked(2)?; + let data_len: u32 = args.nth_checked(3)?; - if topic_count > 4 { - return Err(Error::Log.into()); - } + if topic_count > 4 { + return Err(Error::Log.into()); + } - self.adjusted_overflow_charge(|schedule| - { - let topics_gas = schedule.log_gas as u64 + schedule.log_topic_gas as u64 * topic_count as u64; - (schedule.log_data_gas as u64) - .checked_mul(schedule.log_data_gas as u64) - .and_then(|data_gas| data_gas.checked_add(topics_gas)) - } - )?; + self.adjusted_overflow_charge(|schedule| { + let topics_gas = + schedule.log_gas as u64 + schedule.log_topic_gas as u64 * topic_count as u64; + (schedule.log_data_gas as u64) + .checked_mul(schedule.log_data_gas as u64) + .and_then(|data_gas| data_gas.checked_add(topics_gas)) + })?; - let mut topics: Vec = Vec::with_capacity(topic_count as usize); - topics.resize(topic_count as usize, H256::zero()); - for i in 0..topic_count { - let offset = i.checked_mul(32).ok_or(Error::MemoryAccessViolation)? - .checked_add(topic_ptr).ok_or(Error::MemoryAccessViolation)?; + let mut topics: Vec = Vec::with_capacity(topic_count as usize); + topics.resize(topic_count as usize, H256::zero()); + for i in 0..topic_count { + let offset = i + .checked_mul(32) + .ok_or(Error::MemoryAccessViolation)? + .checked_add(topic_ptr) + .ok_or(Error::MemoryAccessViolation)?; - *topics.get_mut(i as usize) + *topics.get_mut(i as usize) .expect("topics is resized to `topic_count`, i is in 0..topic count iterator, get_mut uses i as an indexer, get_mut cannot fail; qed") = H256::from(&self.memory.get(offset, 32)?[..]); - } - self.ext.log(topics, &self.memory.get(data_ptr, data_len as usize)?).map_err(|_| Error::Log)?; + } + self.ext + .log(topics, &self.memory.get(data_ptr, data_len as usize)?) + .map_err(|_| Error::Log)?; - Ok(()) - } + Ok(()) + } } mod ext_impl { - use wasmi::{Externals, RuntimeArgs, RuntimeValue, Trap}; - use env::ids::*; + use env::ids::*; + use wasmi::{Externals, RuntimeArgs, RuntimeValue, Trap}; - macro_rules! void { + macro_rules! void { { $e: expr } => { { $e?; Ok(None) } } } - macro_rules! some { + macro_rules! some { { $e: expr } => { { Ok(Some($e?)) } } } - macro_rules! cast { + macro_rules! cast { { $e: expr } => { { Ok(Some($e)) } } } - impl<'a> Externals for super::Runtime<'a> { - fn invoke_index( - &mut self, - index: usize, - args: RuntimeArgs, - ) -> Result, Trap> { - match index { - STORAGE_WRITE_FUNC => void!(self.storage_write(args)), - STORAGE_READ_FUNC => void!(self.storage_read(args)), - RET_FUNC => void!(self.ret(args)), - GAS_FUNC => void!(self.gas(args)), - INPUT_LENGTH_FUNC => cast!(self.input_legnth()), - FETCH_INPUT_FUNC => void!(self.fetch_input(args)), - PANIC_FUNC => void!(self.panic(args)), - DEBUG_FUNC => void!(self.debug(args)), - CCALL_FUNC => some!(self.ccall(args)), - DCALL_FUNC => some!(self.dcall(args)), - SCALL_FUNC => some!(self.scall(args)), - VALUE_FUNC => void!(self.value(args)), - CREATE_FUNC => some!(self.create(args)), - SUICIDE_FUNC => void!(self.suicide(args)), - BLOCKHASH_FUNC => void!(self.blockhash(args)), - BLOCKNUMBER_FUNC => some!(self.blocknumber()), - COINBASE_FUNC => void!(self.coinbase(args)), - DIFFICULTY_FUNC => void!(self.difficulty(args)), - GASLIMIT_FUNC => void!(self.gaslimit(args)), - TIMESTAMP_FUNC => some!(self.timestamp()), - ADDRESS_FUNC => void!(self.address(args)), - SENDER_FUNC => void!(self.sender(args)), - ORIGIN_FUNC => void!(self.origin(args)), - ELOG_FUNC => void!(self.elog(args)), - CREATE2_FUNC => some!(self.create2(args)), - GASLEFT_FUNC => some!(self.gasleft()), - _ => panic!("env module doesn't provide function at index {}", index), - } - } - } + impl<'a> Externals for super::Runtime<'a> { + fn invoke_index( + &mut self, + index: usize, + args: RuntimeArgs, + ) -> Result, Trap> { + match index { + STORAGE_WRITE_FUNC => void!(self.storage_write(args)), + STORAGE_READ_FUNC => void!(self.storage_read(args)), + RET_FUNC => void!(self.ret(args)), + GAS_FUNC => void!(self.gas(args)), + INPUT_LENGTH_FUNC => cast!(self.input_legnth()), + FETCH_INPUT_FUNC => void!(self.fetch_input(args)), + PANIC_FUNC => void!(self.panic(args)), + DEBUG_FUNC => void!(self.debug(args)), + CCALL_FUNC => some!(self.ccall(args)), + DCALL_FUNC => some!(self.dcall(args)), + SCALL_FUNC => some!(self.scall(args)), + VALUE_FUNC => void!(self.value(args)), + CREATE_FUNC => some!(self.create(args)), + SUICIDE_FUNC => void!(self.suicide(args)), + BLOCKHASH_FUNC => void!(self.blockhash(args)), + BLOCKNUMBER_FUNC => some!(self.blocknumber()), + COINBASE_FUNC => void!(self.coinbase(args)), + DIFFICULTY_FUNC => void!(self.difficulty(args)), + GASLIMIT_FUNC => void!(self.gaslimit(args)), + TIMESTAMP_FUNC => some!(self.timestamp()), + ADDRESS_FUNC => void!(self.address(args)), + SENDER_FUNC => void!(self.sender(args)), + ORIGIN_FUNC => void!(self.origin(args)), + ELOG_FUNC => void!(self.elog(args)), + CREATE2_FUNC => some!(self.create2(args)), + GASLEFT_FUNC => some!(self.gasleft()), + _ => panic!("env module doesn't provide function at index {}", index), + } + } + } } diff --git a/ethcore/wasm/src/tests.rs b/ethcore/wasm/src/tests.rs index 9ed7053da..a79491be6 100644 --- a/ethcore/wasm/src/tests.rs +++ b/ethcore/wasm/src/tests.rs @@ -14,79 +14,88 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::collections::HashMap; -use byteorder::{LittleEndian, ByteOrder}; -use ethereum_types::{H256, U256, Address}; +use byteorder::{ByteOrder, LittleEndian}; +use ethereum_types::{Address, H256, U256}; +use std::{collections::HashMap, sync::Arc}; use super::WasmInterpreter; -use vm::{self, Exec, GasLeft, ActionParams, ActionValue, CreateContractAddress}; -use vm::tests::{FakeCall, FakeExt, FakeCallType}; +use vm::{ + self, + tests::{FakeCall, FakeCallType, FakeExt}, + ActionParams, ActionValue, CreateContractAddress, Exec, GasLeft, +}; macro_rules! load_sample { - ($name: expr) => { - include_bytes!(concat!("../../res/wasm-tests/compiled/", $name)).to_vec() - } + ($name: expr) => { + include_bytes!(concat!("../../res/wasm-tests/compiled/", $name)).to_vec() + }; } macro_rules! reqrep_test { - ($name: expr, $input: expr) => { - reqrep_test!($name, $input, vm::EnvInfo::default(), HashMap::new()) - }; - ($name: expr, $input: expr, $info: expr, $block_hashes: expr) => { - { - let _ = ::env_logger::try_init(); - let code = load_sample!($name); + ($name: expr, $input: expr) => { + reqrep_test!($name, $input, vm::EnvInfo::default(), HashMap::new()) + }; + ($name: expr, $input: expr, $info: expr, $block_hashes: expr) => {{ + let _ = ::env_logger::try_init(); + let code = load_sample!($name); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.data = Some($input); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + params.data = Some($input); - let mut fake_ext = FakeExt::new().with_wasm(); - fake_ext.info = $info; - fake_ext.blockhashes = $block_hashes; + let mut fake_ext = FakeExt::new().with_wasm(); + fake_ext.info = $info; + fake_ext.blockhashes = $block_hashes; - let mut interpreter = wasm_interpreter(params); - interpreter.exec(&mut fake_ext).ok().unwrap() - .map(|result| match result { - GasLeft::Known(_) => { panic!("Test is expected to return payload to check"); }, - GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), - }) - } - }; + let mut interpreter = wasm_interpreter(params); + interpreter + .exec(&mut fake_ext) + .ok() + .unwrap() + .map(|result| match result { + GasLeft::Known(_) => { + panic!("Test is expected to return payload to check"); + } + GasLeft::NeedsReturn { + gas_left: gas, + data: result, + apply_state: _apply, + } => (gas, result.to_vec()), + }) + }}; } fn test_finalize(res: Result) -> Result { - match res { - Ok(GasLeft::Known(gas)) => Ok(gas), - Ok(GasLeft::NeedsReturn{..}) => unimplemented!(), // since ret is unimplemented. - Err(e) => Err(e), - } + match res { + Ok(GasLeft::Known(gas)) => Ok(gas), + Ok(GasLeft::NeedsReturn { .. }) => unimplemented!(), // since ret is unimplemented. + Err(e) => Err(e), + } } fn wasm_interpreter(params: ActionParams) -> Box { - Box::new(WasmInterpreter::new(params)) + Box::new(WasmInterpreter::new(params)) } /// Empty contract does almost nothing except producing 1 (one) local node debug log message #[test] fn empty() { - let code = load_sample!("empty.wasm"); - let address: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); + let code = load_sample!("empty.wasm"); + let address: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new().with_wasm(); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new().with_wasm(); - let gas_left = { - let mut interpreter = wasm_interpreter(params); - test_finalize(interpreter.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut interpreter = wasm_interpreter(params); + test_finalize(interpreter.exec(&mut ext).ok().unwrap()).unwrap() + }; - assert_eq!(gas_left, U256::from(96_926)); + assert_eq!(gas_left, U256::from(96_926)); } // This test checks if the contract deserializes payload header properly. @@ -94,51 +103,77 @@ fn empty() { // logger.wasm writes all these provided fixed header fields to some arbitrary storage keys. #[test] fn logger() { - let _ = ::env_logger::try_init(); + let _ = ::env_logger::try_init(); - let code = load_sample!("logger.wasm"); - let address: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); - let sender: Address = "0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d".parse().unwrap(); - let origin: Address = "0102030405060708090a0b0c0d0e0f1011121314".parse().unwrap(); + let code = load_sample!("logger.wasm"); + let address: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); + let sender: Address = "0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d".parse().unwrap(); + let origin: Address = "0102030405060708090a0b0c0d0e0f1011121314".parse().unwrap(); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.sender = sender.clone(); - params.origin = origin.clone(); - params.gas = U256::from(100_000); - params.value = ActionValue::transfer(1_000_000_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new().with_wasm(); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.sender = sender.clone(); + params.origin = origin.clone(); + params.gas = U256::from(100_000); + params.value = ActionValue::transfer(1_000_000_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new().with_wasm(); - let gas_left = { - let mut interpreter = wasm_interpreter(params); - test_finalize(interpreter.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut interpreter = wasm_interpreter(params); + test_finalize(interpreter.exec(&mut ext).ok().unwrap()).unwrap() + }; - let address_val: H256 = address.into(); - assert_eq!( - ext.store.get(&"0100000000000000000000000000000000000000000000000000000000000000".parse().unwrap()).expect("storage key to exist"), - &address_val, - "Logger sets 0x01 key to the provided address" - ); - let sender_val: H256 = sender.into(); - assert_eq!( - ext.store.get(&"0200000000000000000000000000000000000000000000000000000000000000".parse().unwrap()).expect("storage key to exist"), - &sender_val, - "Logger sets 0x02 key to the provided sender" - ); - let origin_val: H256 = origin.into(); - assert_eq!( - ext.store.get(&"0300000000000000000000000000000000000000000000000000000000000000".parse().unwrap()).expect("storage key to exist"), - &origin_val, - "Logger sets 0x03 key to the provided origin" - ); - assert_eq!( - U256::from(ext.store.get(&"0400000000000000000000000000000000000000000000000000000000000000".parse().unwrap()).expect("storage key to exist")), - U256::from(1_000_000_000), - "Logger sets 0x04 key to the trasferred value" - ); - assert_eq!(gas_left, U256::from(17_716)); + let address_val: H256 = address.into(); + assert_eq!( + ext.store + .get( + &"0100000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap() + ) + .expect("storage key to exist"), + &address_val, + "Logger sets 0x01 key to the provided address" + ); + let sender_val: H256 = sender.into(); + assert_eq!( + ext.store + .get( + &"0200000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap() + ) + .expect("storage key to exist"), + &sender_val, + "Logger sets 0x02 key to the provided sender" + ); + let origin_val: H256 = origin.into(); + assert_eq!( + ext.store + .get( + &"0300000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap() + ) + .expect("storage key to exist"), + &origin_val, + "Logger sets 0x03 key to the provided origin" + ); + assert_eq!( + U256::from( + ext.store + .get( + &"0400000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap() + ) + .expect("storage key to exist") + ), + U256::from(1_000_000_000), + "Logger sets 0x04 key to the trasferred value" + ); + assert_eq!(gas_left, U256::from(17_716)); } // This test checks if the contract can allocate memory and pass pointer to the result stream properly. @@ -148,32 +183,42 @@ fn logger() { // if it has any result. #[test] fn identity() { - let _ = ::env_logger::try_init(); + let _ = ::env_logger::try_init(); - let code = load_sample!("identity.wasm"); - let sender: Address = "01030507090b0d0f11131517191b1d1f21232527".parse().unwrap(); + let code = load_sample!("identity.wasm"); + let sender: Address = "01030507090b0d0f11131517191b1d1f21232527".parse().unwrap(); - let mut params = ActionParams::default(); - params.sender = sender.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new().with_wasm(); + let mut params = ActionParams::default(); + params.sender = sender.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new().with_wasm(); - let (gas_left, result) = { - let mut interpreter = wasm_interpreter(params); - let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { panic!("Identity contract should return payload"); }, - GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), - } - }; + let (gas_left, result) = { + let mut interpreter = wasm_interpreter(params); + let result = interpreter + .exec(&mut ext) + .ok() + .unwrap() + .expect("Interpreter to execute without any errors"); + match result { + GasLeft::Known(_) => { + panic!("Identity contract should return payload"); + } + GasLeft::NeedsReturn { + gas_left: gas, + data: result, + apply_state: _apply, + } => (gas, result.to_vec()), + } + }; - assert_eq!( - Address::from_slice(&result), - sender, - "Idenity test contract does not return the sender passed" - ); - assert_eq!(gas_left, U256::from(98_419)); + assert_eq!( + Address::from_slice(&result), + sender, + "Idenity test contract does not return the sender passed" + ); + assert_eq!(gas_left, U256::from(98_419)); } // Dispersion test sends byte array and expect the contract to 'disperse' the original elements with @@ -182,435 +227,537 @@ fn identity() { // This also tests byte-perfect memory allocation and in/out ptr lifecycle. #[test] fn dispersion() { - let _ = ::env_logger::try_init(); + let _ = ::env_logger::try_init(); - let code = load_sample!("dispersion.wasm"); + let code = load_sample!("dispersion.wasm"); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.data = Some(vec![ - 0u8, 125, 197, 255, 19 - ]); - let mut ext = FakeExt::new().with_wasm(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + params.data = Some(vec![0u8, 125, 197, 255, 19]); + let mut ext = FakeExt::new().with_wasm(); - let (gas_left, result) = { - let mut interpreter = wasm_interpreter(params); - let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { panic!("Dispersion routine should return payload"); }, - GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), - } - }; + let (gas_left, result) = { + let mut interpreter = wasm_interpreter(params); + let result = interpreter + .exec(&mut ext) + .ok() + .unwrap() + .expect("Interpreter to execute without any errors"); + match result { + GasLeft::Known(_) => { + panic!("Dispersion routine should return payload"); + } + GasLeft::NeedsReturn { + gas_left: gas, + data: result, + apply_state: _apply, + } => (gas, result.to_vec()), + } + }; - assert_eq!( - result, - vec![0u8, 0, 125, 11, 197, 7, 255, 8, 19, 0] - ); - assert_eq!(gas_left, U256::from(92_377)); + assert_eq!(result, vec![0u8, 0, 125, 11, 197, 7, 255, 8, 19, 0]); + assert_eq!(gas_left, U256::from(92_377)); } #[test] fn suicide_not() { - let code = load_sample!("suicidal.wasm"); + let code = load_sample!("suicidal.wasm"); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.data = Some(vec![ - 0u8 - ]); - let mut ext = FakeExt::new().with_wasm(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + params.data = Some(vec![0u8]); + let mut ext = FakeExt::new().with_wasm(); - let (gas_left, result) = { - let mut interpreter = wasm_interpreter(params); - let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { panic!("Suicidal contract should return payload when had not actualy killed himself"); }, - GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), - } - }; + let (gas_left, result) = { + let mut interpreter = wasm_interpreter(params); + let result = interpreter + .exec(&mut ext) + .ok() + .unwrap() + .expect("Interpreter to execute without any errors"); + match result { + GasLeft::Known(_) => { + panic!( + "Suicidal contract should return payload when had not actualy killed himself" + ); + } + GasLeft::NeedsReturn { + gas_left: gas, + data: result, + apply_state: _apply, + } => (gas, result.to_vec()), + } + }; - assert_eq!( - result, - vec![0u8] - ); - assert_eq!(gas_left, U256::from(93_378)); + assert_eq!(result, vec![0u8]); + assert_eq!(gas_left, U256::from(93_378)); } #[test] fn suicide() { - let _ = ::env_logger::try_init(); + let _ = ::env_logger::try_init(); - let code = load_sample!("suicidal.wasm"); + let code = load_sample!("suicidal.wasm"); - let refund: Address = "01030507090b0d0f11131517191b1d1f21232527".parse().unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); + let refund: Address = "01030507090b0d0f11131517191b1d1f21232527".parse().unwrap(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); - let mut args = vec![127u8]; - args.extend(refund.to_vec()); - params.data = Some(args); + let mut args = vec![127u8]; + args.extend(refund.to_vec()); + params.data = Some(args); - let mut ext = FakeExt::new().with_wasm(); + let mut ext = FakeExt::new().with_wasm(); - let gas_left = { - let mut interpreter = wasm_interpreter(params); - let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(gas) => gas, - GasLeft::NeedsReturn { .. } => { - panic!("Suicidal contract should not return anything when had killed itself"); - }, - } - }; + let gas_left = { + let mut interpreter = wasm_interpreter(params); + let result = interpreter + .exec(&mut ext) + .ok() + .unwrap() + .expect("Interpreter to execute without any errors"); + match result { + GasLeft::Known(gas) => gas, + GasLeft::NeedsReturn { .. } => { + panic!("Suicidal contract should not return anything when had killed itself"); + } + } + }; - assert!(ext.suicides.contains(&refund)); - assert_eq!(gas_left, U256::from(93_346)); + assert!(ext.suicides.contains(&refund)); + assert_eq!(gas_left, U256::from(93_346)); } #[test] fn create() { - let _ = ::env_logger::try_init(); + let _ = ::env_logger::try_init(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(load_sample!("creator.wasm"))); - params.data = Some(vec![0u8, 2, 4, 8, 16, 32, 64, 128]); - params.value = ActionValue::transfer(1_000_000_000); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(load_sample!("creator.wasm"))); + params.data = Some(vec![0u8, 2, 4, 8, 16, 32, 64, 128]); + params.value = ActionValue::transfer(1_000_000_000); - let mut ext = FakeExt::new().with_wasm(); - ext.schedule.wasm.as_mut().unwrap().have_create2 = true; + let mut ext = FakeExt::new().with_wasm(); + ext.schedule.wasm.as_mut().unwrap().have_create2 = true; - let gas_left = { - let mut interpreter = wasm_interpreter(params); - let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { - panic!("Create contract always return 40 bytes of the creation address, or in the case where it fails, return 40 bytes of zero."); - }, - GasLeft::NeedsReturn { gas_left, data, apply_state } => { - assert!(apply_state); - assert_eq!(data.as_ref(), [0u8; 40].as_ref()); // FakeExt never succeeds in create. - gas_left - }, - } - }; + let gas_left = { + let mut interpreter = wasm_interpreter(params); + let result = interpreter + .exec(&mut ext) + .ok() + .unwrap() + .expect("Interpreter to execute without any errors"); + match result { + GasLeft::Known(_) => { + panic!("Create contract always return 40 bytes of the creation address, or in the case where it fails, return 40 bytes of zero."); + } + GasLeft::NeedsReturn { + gas_left, + data, + apply_state, + } => { + assert!(apply_state); + assert_eq!(data.as_ref(), [0u8; 40].as_ref()); // FakeExt never succeeds in create. + gas_left + } + } + }; - trace!(target: "wasm", "fake_calls: {:?}", &ext.calls); - assert!(ext.calls.contains( - &FakeCall { - call_type: FakeCallType::Create, - create_scheme: Some(CreateContractAddress::FromSenderAndCodeHash), - gas: U256::from(49_674), - sender_address: None, - receive_address: None, - value: Some((1_000_000_000 / 2).into()), - data: vec![0u8, 2, 4, 8, 16, 32, 64, 128], - code_address: None, - } - )); - assert!(ext.calls.contains( - &FakeCall { - call_type: FakeCallType::Create, - create_scheme: Some(CreateContractAddress::FromSenderSaltAndCodeHash(H256::from([5u8].as_ref()))), - gas: U256::from(6039), - sender_address: None, - receive_address: None, - value: Some((1_000_000_000 / 2).into()), - data: vec![0u8, 2, 4, 8, 16, 32, 64, 128], - code_address: None, - } - )); - assert_eq!(gas_left, U256::from(5974)); + trace!(target: "wasm", "fake_calls: {:?}", &ext.calls); + assert!(ext.calls.contains(&FakeCall { + call_type: FakeCallType::Create, + create_scheme: Some(CreateContractAddress::FromSenderAndCodeHash), + gas: U256::from(49_674), + sender_address: None, + receive_address: None, + value: Some((1_000_000_000 / 2).into()), + data: vec![0u8, 2, 4, 8, 16, 32, 64, 128], + code_address: None, + })); + assert!(ext.calls.contains(&FakeCall { + call_type: FakeCallType::Create, + create_scheme: Some(CreateContractAddress::FromSenderSaltAndCodeHash( + H256::from([5u8].as_ref()) + )), + gas: U256::from(6039), + sender_address: None, + receive_address: None, + value: Some((1_000_000_000 / 2).into()), + data: vec![0u8, 2, 4, 8, 16, 32, 64, 128], + code_address: None, + })); + assert_eq!(gas_left, U256::from(5974)); } #[test] fn call_msg() { - let _ = ::env_logger::try_init(); + let _ = ::env_logger::try_init(); - let sender: Address = "01030507090b0d0f11131517191b1d1f21232527".parse().unwrap(); - let receiver: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); - let contract_address: Address = "0d461d4174b4ae35775c4a342f1e5e1e4e6c4db5".parse().unwrap(); + let sender: Address = "01030507090b0d0f11131517191b1d1f21232527".parse().unwrap(); + let receiver: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); + let contract_address: Address = "0d461d4174b4ae35775c4a342f1e5e1e4e6c4db5".parse().unwrap(); - let mut params = ActionParams::default(); - params.sender = sender.clone(); - params.address = receiver.clone(); - params.code_address = contract_address.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(load_sample!("call.wasm"))); - params.data = Some(Vec::new()); + let mut params = ActionParams::default(); + params.sender = sender.clone(); + params.address = receiver.clone(); + params.code_address = contract_address.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(load_sample!("call.wasm"))); + params.data = Some(Vec::new()); - let mut ext = FakeExt::new().with_wasm(); - ext.balances.insert(receiver.clone(), U256::from(10000000000u64)); + let mut ext = FakeExt::new().with_wasm(); + ext.balances + .insert(receiver.clone(), U256::from(10000000000u64)); - let gas_left = { - let mut interpreter = wasm_interpreter(params); - let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(gas_left) => gas_left, - GasLeft::NeedsReturn { .. } => { panic!("Call test should not return payload"); }, - } - }; + let gas_left = { + let mut interpreter = wasm_interpreter(params); + let result = interpreter + .exec(&mut ext) + .ok() + .unwrap() + .expect("Interpreter to execute without any errors"); + match result { + GasLeft::Known(gas_left) => gas_left, + GasLeft::NeedsReturn { .. } => { + panic!("Call test should not return payload"); + } + } + }; - trace!(target: "wasm", "fake_calls: {:?}", &ext.calls); - assert!(ext.calls.contains( - &FakeCall { - call_type: FakeCallType::Call, - create_scheme: None, - gas: U256::from(33_000), - sender_address: Some(receiver), - receive_address: Some(Address::from([99, 88, 77, 66, 55, 44, 33, 22, 11, 0, 11, 22, 33, 44, 55, 66, 77, 88, 99, 0])), - value: Some(1000000000.into()), - data: vec![129u8, 123, 113, 107, 101, 97], - code_address: Some(Address::from([99, 88, 77, 66, 55, 44, 33, 22, 11, 0, 11, 22, 33, 44, 55, 66, 77, 88, 99, 0])), - } - )); + trace!(target: "wasm", "fake_calls: {:?}", &ext.calls); + assert!(ext.calls.contains(&FakeCall { + call_type: FakeCallType::Call, + create_scheme: None, + gas: U256::from(33_000), + sender_address: Some(receiver), + receive_address: Some(Address::from([ + 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, 11, 22, 33, 44, 55, 66, 77, 88, 99, 0 + ])), + value: Some(1000000000.into()), + data: vec![129u8, 123, 113, 107, 101, 97], + code_address: Some(Address::from([ + 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, 11, 22, 33, 44, 55, 66, 77, 88, 99, 0 + ])), + })); - assert_eq!(gas_left, U256::from(91_672)); + assert_eq!(gas_left, U256::from(91_672)); } // The same as `call_msg`, but send a `pwasm_ethereum::gasleft` // value as `gas` argument to the inner pwasm_ethereum::call #[test] fn call_msg_gasleft() { - let _ = ::env_logger::try_init(); + let _ = ::env_logger::try_init(); - let sender: Address = "01030507090b0d0f11131517191b1d1f21232527".parse().unwrap(); - let receiver: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); - let contract_address: Address = "0d461d4174b4ae35775c4a342f1e5e1e4e6c4db5".parse().unwrap(); + let sender: Address = "01030507090b0d0f11131517191b1d1f21232527".parse().unwrap(); + let receiver: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); + let contract_address: Address = "0d461d4174b4ae35775c4a342f1e5e1e4e6c4db5".parse().unwrap(); - let mut params = ActionParams::default(); - params.sender = sender.clone(); - params.address = receiver.clone(); - params.code_address = contract_address.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(load_sample!("call_gasleft.wasm"))); - params.data = Some(Vec::new()); + let mut params = ActionParams::default(); + params.sender = sender.clone(); + params.address = receiver.clone(); + params.code_address = contract_address.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(load_sample!("call_gasleft.wasm"))); + params.data = Some(Vec::new()); - let mut ext = FakeExt::new().with_wasm(); - ext.schedule.wasm.as_mut().unwrap().have_gasleft = true; - ext.balances.insert(receiver.clone(), U256::from(10000000000u64)); + let mut ext = FakeExt::new().with_wasm(); + ext.schedule.wasm.as_mut().unwrap().have_gasleft = true; + ext.balances + .insert(receiver.clone(), U256::from(10000000000u64)); - let gas_left = { - let mut interpreter = wasm_interpreter(params); - let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(gas_left) => gas_left, - GasLeft::NeedsReturn { .. } => { panic!("Call test should not return payload"); }, - } - }; + let gas_left = { + let mut interpreter = wasm_interpreter(params); + let result = interpreter + .exec(&mut ext) + .ok() + .unwrap() + .expect("Interpreter to execute without any errors"); + match result { + GasLeft::Known(gas_left) => gas_left, + GasLeft::NeedsReturn { .. } => { + panic!("Call test should not return payload"); + } + } + }; - trace!(target: "wasm", "fake_calls: {:?}", &ext.calls); - assert!(ext.calls.contains( - &FakeCall { - call_type: FakeCallType::Call, - create_scheme: None, - gas: U256::from(91_165), - sender_address: Some(receiver), - receive_address: Some(Address::from([99, 88, 77, 66, 55, 44, 33, 22, 11, 0, 11, 22, 33, 44, 55, 66, 77, 88, 99, 0])), - value: Some(1000000000.into()), - data: vec![129u8, 123, 113, 107, 101, 97], - code_address: Some(Address::from([99, 88, 77, 66, 55, 44, 33, 22, 11, 0, 11, 22, 33, 44, 55, 66, 77, 88, 99, 0])), - } - )); + trace!(target: "wasm", "fake_calls: {:?}", &ext.calls); + assert!(ext.calls.contains(&FakeCall { + call_type: FakeCallType::Call, + create_scheme: None, + gas: U256::from(91_165), + sender_address: Some(receiver), + receive_address: Some(Address::from([ + 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, 11, 22, 33, 44, 55, 66, 77, 88, 99, 0 + ])), + value: Some(1000000000.into()), + data: vec![129u8, 123, 113, 107, 101, 97], + code_address: Some(Address::from([ + 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, 11, 22, 33, 44, 55, 66, 77, 88, 99, 0 + ])), + })); - assert_eq!(gas_left, U256::from(91_671)); + assert_eq!(gas_left, U256::from(91_671)); } #[test] fn call_code() { - let _ = ::env_logger::try_init(); + let _ = ::env_logger::try_init(); - let sender: Address = "01030507090b0d0f11131517191b1d1f21232527".parse().unwrap(); - let receiver: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); + let sender: Address = "01030507090b0d0f11131517191b1d1f21232527".parse().unwrap(); + let receiver: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); - let mut params = ActionParams::default(); - params.sender = sender.clone(); - params.address = receiver.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(load_sample!("call_code.wasm"))); - params.data = Some(Vec::new()); - params.value = ActionValue::transfer(1_000_000_000); + let mut params = ActionParams::default(); + params.sender = sender.clone(); + params.address = receiver.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(load_sample!("call_code.wasm"))); + params.data = Some(Vec::new()); + params.value = ActionValue::transfer(1_000_000_000); - let mut ext = FakeExt::new().with_wasm(); + let mut ext = FakeExt::new().with_wasm(); - let (gas_left, result) = { - let mut interpreter = wasm_interpreter(params); - let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { panic!("Call test should return payload"); }, - GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), - } - }; + let (gas_left, result) = { + let mut interpreter = wasm_interpreter(params); + let result = interpreter + .exec(&mut ext) + .ok() + .unwrap() + .expect("Interpreter to execute without any errors"); + match result { + GasLeft::Known(_) => { + panic!("Call test should return payload"); + } + GasLeft::NeedsReturn { + gas_left: gas, + data: result, + apply_state: _apply, + } => (gas, result.to_vec()), + } + }; - trace!(target: "wasm", "fake_calls: {:?}", &ext.calls); - assert!(ext.calls.contains( - &FakeCall { - call_type: FakeCallType::Call, - create_scheme: None, - gas: U256::from(20_000), - sender_address: Some(sender), - receive_address: Some(receiver), - value: None, - data: vec![1u8, 2, 3, 5, 7, 11], - code_address: Some("0d13710000000000000000000000000000000000".parse().unwrap()), - } - )); + trace!(target: "wasm", "fake_calls: {:?}", &ext.calls); + assert!(ext.calls.contains(&FakeCall { + call_type: FakeCallType::Call, + create_scheme: None, + gas: U256::from(20_000), + sender_address: Some(sender), + receive_address: Some(receiver), + value: None, + data: vec![1u8, 2, 3, 5, 7, 11], + code_address: Some("0d13710000000000000000000000000000000000".parse().unwrap()), + })); - // siphash result - let res = LittleEndian::read_u32(&result[..]); - assert_eq!(res, 4198595614); - assert_eq!(gas_left, U256::from(90_037)); + // siphash result + let res = LittleEndian::read_u32(&result[..]); + assert_eq!(res, 4198595614); + assert_eq!(gas_left, U256::from(90_037)); } #[test] fn call_static() { - let _ = ::env_logger::try_init(); + let _ = ::env_logger::try_init(); - let sender: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); - let receiver: Address = "01030507090b0d0f11131517191b1d1f21232527".parse().unwrap(); - let contract_address: Address = "0d461d4174b4ae35775c4a342f1e5e1e4e6c4db5".parse().unwrap(); + let sender: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); + let receiver: Address = "01030507090b0d0f11131517191b1d1f21232527".parse().unwrap(); + let contract_address: Address = "0d461d4174b4ae35775c4a342f1e5e1e4e6c4db5".parse().unwrap(); - let mut params = ActionParams::default(); - params.sender = sender.clone(); - params.address = receiver.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(load_sample!("call_static.wasm"))); - params.data = Some(Vec::new()); - params.value = ActionValue::transfer(1_000_000_000); - params.code_address = contract_address.clone(); + let mut params = ActionParams::default(); + params.sender = sender.clone(); + params.address = receiver.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(load_sample!("call_static.wasm"))); + params.data = Some(Vec::new()); + params.value = ActionValue::transfer(1_000_000_000); + params.code_address = contract_address.clone(); - let mut ext = FakeExt::new().with_wasm(); + let mut ext = FakeExt::new().with_wasm(); - let (gas_left, result) = { - let mut interpreter = wasm_interpreter(params); - let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { panic!("Static call test should return payload"); }, - GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), - } - }; + let (gas_left, result) = { + let mut interpreter = wasm_interpreter(params); + let result = interpreter + .exec(&mut ext) + .ok() + .unwrap() + .expect("Interpreter to execute without any errors"); + match result { + GasLeft::Known(_) => { + panic!("Static call test should return payload"); + } + GasLeft::NeedsReturn { + gas_left: gas, + data: result, + apply_state: _apply, + } => (gas, result.to_vec()), + } + }; - trace!(target: "wasm", "fake_calls: {:?}", &ext.calls); - assert!(ext.calls.contains( - &FakeCall { - call_type: FakeCallType::Call, - create_scheme: None, - gas: U256::from(20_000), - sender_address: Some(receiver), - receive_address: Some("13077bfb00000000000000000000000000000000".parse().unwrap()), - value: None, - data: vec![1u8, 2, 3, 5, 7, 11], - code_address: Some("13077bfb00000000000000000000000000000000".parse().unwrap()), - } - )); + trace!(target: "wasm", "fake_calls: {:?}", &ext.calls); + assert!(ext.calls.contains(&FakeCall { + call_type: FakeCallType::Call, + create_scheme: None, + gas: U256::from(20_000), + sender_address: Some(receiver), + receive_address: Some("13077bfb00000000000000000000000000000000".parse().unwrap()), + value: None, + data: vec![1u8, 2, 3, 5, 7, 11], + code_address: Some("13077bfb00000000000000000000000000000000".parse().unwrap()), + })); - // siphash result - let res = LittleEndian::read_u32(&result[..]); - assert_eq!(res, 317632590); + // siphash result + let res = LittleEndian::read_u32(&result[..]); + assert_eq!(res, 317632590); - assert_eq!(gas_left, U256::from(90_042)); + assert_eq!(gas_left, U256::from(90_042)); } // Realloc test #[test] fn realloc() { - let code = load_sample!("realloc.wasm"); + let code = load_sample!("realloc.wasm"); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.data = Some(vec![0u8]); - let mut ext = FakeExt::new().with_wasm(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + params.data = Some(vec![0u8]); + let mut ext = FakeExt::new().with_wasm(); - let (gas_left, result) = { - let mut interpreter = wasm_interpreter(params); - let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { panic!("Realloc should return payload"); }, - GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), - } - }; - assert_eq!(result, vec![0u8; 2]); - assert_eq!(gas_left, U256::from(92_848)); + let (gas_left, result) = { + let mut interpreter = wasm_interpreter(params); + let result = interpreter + .exec(&mut ext) + .ok() + .unwrap() + .expect("Interpreter to execute without any errors"); + match result { + GasLeft::Known(_) => { + panic!("Realloc should return payload"); + } + GasLeft::NeedsReturn { + gas_left: gas, + data: result, + apply_state: _apply, + } => (gas, result.to_vec()), + } + }; + assert_eq!(result, vec![0u8; 2]); + assert_eq!(gas_left, U256::from(92_848)); } #[test] fn alloc() { - let code = load_sample!("alloc.wasm"); + let code = load_sample!("alloc.wasm"); - let mut params = ActionParams::default(); - params.gas = U256::from(10_000_000); - params.code = Some(Arc::new(code)); - params.data = Some(vec![0u8]); - let mut ext = FakeExt::new().with_wasm(); + let mut params = ActionParams::default(); + params.gas = U256::from(10_000_000); + params.code = Some(Arc::new(code)); + params.data = Some(vec![0u8]); + let mut ext = FakeExt::new().with_wasm(); - let (gas_left, result) = { - let mut interpreter = wasm_interpreter(params); - let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { panic!("alloc test should return payload"); }, - GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), - } - }; - assert_eq!(result, vec![5u8; 1024*400]); - assert_eq!(gas_left, U256::from(6_893_881)); + let (gas_left, result) = { + let mut interpreter = wasm_interpreter(params); + let result = interpreter + .exec(&mut ext) + .ok() + .unwrap() + .expect("Interpreter to execute without any errors"); + match result { + GasLeft::Known(_) => { + panic!("alloc test should return payload"); + } + GasLeft::NeedsReturn { + gas_left: gas, + data: result, + apply_state: _apply, + } => (gas, result.to_vec()), + } + }; + assert_eq!(result, vec![5u8; 1024 * 400]); + assert_eq!(gas_left, U256::from(6_893_881)); } // Tests that contract's ability to read from a storage // Test prepopulates address into storage, than executes a contract which read that address from storage and write this address into result #[test] fn storage_read() { - let _ = ::env_logger::try_init(); + let _ = ::env_logger::try_init(); - let code = load_sample!("storage_read.wasm"); - let address: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); + let code = load_sample!("storage_read.wasm"); + let address: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new().with_wasm(); - ext.store.insert("0100000000000000000000000000000000000000000000000000000000000000".into(), address.into()); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new().with_wasm(); + ext.store.insert( + "0100000000000000000000000000000000000000000000000000000000000000".into(), + address.into(), + ); - let (gas_left, result) = { - let mut interpreter = wasm_interpreter(params); - let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { panic!("storage_read should return payload"); }, - GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), - } - }; + let (gas_left, result) = { + let mut interpreter = wasm_interpreter(params); + let result = interpreter + .exec(&mut ext) + .ok() + .unwrap() + .expect("Interpreter to execute without any errors"); + match result { + GasLeft::Known(_) => { + panic!("storage_read should return payload"); + } + GasLeft::NeedsReturn { + gas_left: gas, + data: result, + apply_state: _apply, + } => (gas, result.to_vec()), + } + }; - assert_eq!(Address::from(&result[12..32]), address); - assert_eq!(gas_left, U256::from(98_369)); + assert_eq!(Address::from(&result[12..32]), address); + assert_eq!(gas_left, U256::from(98_369)); } // Tests keccak calculation // keccak.wasm runs wasm-std::keccak function on data param and returns hash #[test] fn keccak() { - let _ = ::env_logger::try_init(); - let code = load_sample!("keccak.wasm"); + let _ = ::env_logger::try_init(); + let code = load_sample!("keccak.wasm"); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.data = Some(b"something".to_vec()); - let mut ext = FakeExt::new().with_wasm(); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + params.data = Some(b"something".to_vec()); + let mut ext = FakeExt::new().with_wasm(); - let (gas_left, result) = { - let mut interpreter = wasm_interpreter(params); - let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { panic!("keccak should return payload"); }, - GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), - } - }; + let (gas_left, result) = { + let mut interpreter = wasm_interpreter(params); + let result = interpreter + .exec(&mut ext) + .ok() + .unwrap() + .expect("Interpreter to execute without any errors"); + match result { + GasLeft::Known(_) => { + panic!("keccak should return payload"); + } + GasLeft::NeedsReturn { + gas_left: gas, + data: result, + apply_state: _apply, + } => (gas, result.to_vec()), + } + }; - assert_eq!(H256::from_slice(&result), H256::from("68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87")); - assert_eq!(gas_left, U256::from(85_949)); + assert_eq!( + H256::from_slice(&result), + H256::from("68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87") + ); + assert_eq!(gas_left, U256::from(85_949)); } // math_* tests check the ability of wasm contract to perform big integer operations @@ -622,312 +769,324 @@ fn keccak() { // addition #[test] fn math_add() { + let (gas_left, result) = reqrep_test!("math.wasm", { + let mut args = [0u8; 65]; + let arg_a = U256::from_dec_str("999999999999999999999999999999").unwrap(); + let arg_b = U256::from_dec_str("888888888888888888888888888888").unwrap(); + arg_a.to_big_endian(&mut args[1..33]); + arg_b.to_big_endian(&mut args[33..65]); + args.to_vec() + }) + .expect("Interpreter to execute without any errors"); - let (gas_left, result) = reqrep_test!( - "math.wasm", - { - let mut args = [0u8; 65]; - let arg_a = U256::from_dec_str("999999999999999999999999999999").unwrap(); - let arg_b = U256::from_dec_str("888888888888888888888888888888").unwrap(); - arg_a.to_big_endian(&mut args[1..33]); - arg_b.to_big_endian(&mut args[33..65]); - args.to_vec() - } - ).expect("Interpreter to execute without any errors"); - - assert_eq!( - U256::from_dec_str("1888888888888888888888888888887").unwrap(), - (&result[..]).into() - ); - assert_eq!(gas_left, U256::from(92_072)); + assert_eq!( + U256::from_dec_str("1888888888888888888888888888887").unwrap(), + (&result[..]).into() + ); + assert_eq!(gas_left, U256::from(92_072)); } // multiplication #[test] fn math_mul() { - let (gas_left, result) = reqrep_test!( - "math.wasm", - { - let mut args = [1u8; 65]; - let arg_a = U256::from_dec_str("888888888888888888888888888888").unwrap(); - let arg_b = U256::from_dec_str("999999999999999999999999999999").unwrap(); - arg_a.to_big_endian(&mut args[1..33]); - arg_b.to_big_endian(&mut args[33..65]); - args.to_vec() - } - ).expect("Interpreter to execute without any errors"); + let (gas_left, result) = reqrep_test!("math.wasm", { + let mut args = [1u8; 65]; + let arg_a = U256::from_dec_str("888888888888888888888888888888").unwrap(); + let arg_b = U256::from_dec_str("999999999999999999999999999999").unwrap(); + arg_a.to_big_endian(&mut args[1..33]); + arg_b.to_big_endian(&mut args[33..65]); + args.to_vec() + }) + .expect("Interpreter to execute without any errors"); - assert_eq!( - U256::from_dec_str("888888888888888888888888888887111111111111111111111111111112").unwrap(), - (&result[..]).into() - ); - assert_eq!(gas_left, U256::from(91_400)); + assert_eq!( + U256::from_dec_str("888888888888888888888888888887111111111111111111111111111112").unwrap(), + (&result[..]).into() + ); + assert_eq!(gas_left, U256::from(91_400)); } // subtraction #[test] fn math_sub() { - let (gas_left, result) = reqrep_test!( - "math.wasm", - { - let mut args = [2u8; 65]; - let arg_a = U256::from_dec_str("999999999999999999999999999999").unwrap(); - let arg_b = U256::from_dec_str("888888888888888888888888888888").unwrap(); - arg_a.to_big_endian(&mut args[1..33]); - arg_b.to_big_endian(&mut args[33..65]); - args.to_vec() - } - ).expect("Interpreter to execute without any errors"); + let (gas_left, result) = reqrep_test!("math.wasm", { + let mut args = [2u8; 65]; + let arg_a = U256::from_dec_str("999999999999999999999999999999").unwrap(); + let arg_b = U256::from_dec_str("888888888888888888888888888888").unwrap(); + arg_a.to_big_endian(&mut args[1..33]); + arg_b.to_big_endian(&mut args[33..65]); + args.to_vec() + }) + .expect("Interpreter to execute without any errors"); - assert_eq!( - U256::from_dec_str("111111111111111111111111111111").unwrap(), - (&result[..]).into() - ); - assert_eq!(gas_left, U256::from(92_072)); + assert_eq!( + U256::from_dec_str("111111111111111111111111111111").unwrap(), + (&result[..]).into() + ); + assert_eq!(gas_left, U256::from(92_072)); } // subtraction with overflow #[test] fn math_sub_with_overflow() { - let result = reqrep_test!( - "math.wasm", - { - let mut args = [2u8; 65]; - let arg_a = U256::from_dec_str("888888888888888888888888888888").unwrap(); - let arg_b = U256::from_dec_str("999999999999999999999999999999").unwrap(); - arg_a.to_big_endian(&mut args[1..33]); - arg_b.to_big_endian(&mut args[33..65]); - args.to_vec() - } - ); + let result = reqrep_test!("math.wasm", { + let mut args = [2u8; 65]; + let arg_a = U256::from_dec_str("888888888888888888888888888888").unwrap(); + let arg_b = U256::from_dec_str("999999999999999999999999999999").unwrap(); + arg_a.to_big_endian(&mut args[1..33]); + arg_b.to_big_endian(&mut args[33..65]); + args.to_vec() + }); - match result { - Err(vm::Error::Wasm(_)) => {}, - _ => panic!("Unexpected result {:?}", result), - } + match result { + Err(vm::Error::Wasm(_)) => {} + _ => panic!("Unexpected result {:?}", result), + } } #[test] fn math_div() { - let (gas_left, result) = reqrep_test!( - "math.wasm", - { - let mut args = [3u8; 65]; - let arg_a = U256::from_dec_str("999999999999999999999999999999").unwrap(); - let arg_b = U256::from_dec_str("888888888888888888888888").unwrap(); - arg_a.to_big_endian(&mut args[1..33]); - arg_b.to_big_endian(&mut args[33..65]); - args.to_vec() - } - ).expect("Interpreter to execute without any errors"); + let (gas_left, result) = reqrep_test!("math.wasm", { + let mut args = [3u8; 65]; + let arg_a = U256::from_dec_str("999999999999999999999999999999").unwrap(); + let arg_b = U256::from_dec_str("888888888888888888888888").unwrap(); + arg_a.to_big_endian(&mut args[1..33]); + arg_b.to_big_endian(&mut args[33..65]); + args.to_vec() + }) + .expect("Interpreter to execute without any errors"); - assert_eq!( - U256::from_dec_str("1125000").unwrap(), - (&result[..]).into() - ); - assert_eq!(gas_left, U256::from(85_700)); + assert_eq!(U256::from_dec_str("1125000").unwrap(), (&result[..]).into()); + assert_eq!(gas_left, U256::from(85_700)); } #[test] fn storage_metering() { - let _ = ::env_logger::try_init(); + let _ = ::env_logger::try_init(); - // #1 - let mut ext = FakeExt::new().with_wasm(); + // #1 + let mut ext = FakeExt::new().with_wasm(); - let code = Arc::new(load_sample!("setter.wasm")); - let address: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); + let code = Arc::new(load_sample!("setter.wasm")); + let address: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); - let mut params = ActionParams::default(); - params.address = address.clone(); - params.gas = U256::from(100_000); - params.code = Some(code.clone()); - params.data = Some(vec![ - 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, - 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, - ]); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.gas = U256::from(100_000); + params.code = Some(code.clone()); + params.data = Some(vec![ + 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, + 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, + 0x9d, 0x9d, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, + ]); - let gas_left = { - let mut interpreter = wasm_interpreter(params); - test_finalize(interpreter.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut interpreter = wasm_interpreter(params); + test_finalize(interpreter.exec(&mut ext).ok().unwrap()).unwrap() + }; - // 0 -> not 0 - assert_eq!(gas_left, U256::from(72_164)); + // 0 -> not 0 + assert_eq!(gas_left, U256::from(72_164)); - // #2 + // #2 - let mut params = ActionParams::default(); - params.address = address.clone(); - params.gas = U256::from(100_000); - params.code = Some(code.clone()); - params.data = Some(vec![ - 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, - 0x6b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, - ]); + let mut params = ActionParams::default(); + params.address = address.clone(); + params.gas = U256::from(100_000); + params.code = Some(code.clone()); + params.data = Some(vec![ + 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, + 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, + 0x9d, 0x9d, 0x6b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, + ]); - let gas_left = { - let mut interpreter = wasm_interpreter(params); - test_finalize(interpreter.exec(&mut ext).ok().unwrap()).unwrap() - }; + let gas_left = { + let mut interpreter = wasm_interpreter(params); + test_finalize(interpreter.exec(&mut ext).ok().unwrap()).unwrap() + }; - // not 0 -> not 0 - assert_eq!(gas_left, U256::from(87_164)); + // not 0 -> not 0 + assert_eq!(gas_left, U256::from(87_164)); } // This test checks the ability of wasm contract to invoke // varios blockchain runtime methods #[test] fn externs() { - let (gas_left, result) = reqrep_test!( - "externs.wasm", - Vec::new(), - vm::EnvInfo { - number: 0x9999999999u64.into(), - author: "efefefefefefefefefefefefefefefefefefefef".parse().unwrap(), - timestamp: 0x8888888888u64.into(), - difficulty: H256::from("0f1f2f3f4f5f6f7f8f9fafbfcfdfefff0d1d2d3d4d5d6d7d8d9dadbdcdddedfd").into(), - gas_limit: 0x777777777777u64.into(), - last_hashes: Default::default(), - gas_used: 0.into(), - }, - { - let mut hashes = HashMap::new(); - hashes.insert( - U256::from(0), - H256::from("9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d") - ); - hashes.insert( - U256::from(1), - H256::from("7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b") - ); - hashes - } - ).expect("Interpreter to execute without any errors"); + let (gas_left, result) = reqrep_test!( + "externs.wasm", + Vec::new(), + vm::EnvInfo { + number: 0x9999999999u64.into(), + author: "efefefefefefefefefefefefefefefefefefefef".parse().unwrap(), + timestamp: 0x8888888888u64.into(), + difficulty: H256::from( + "0f1f2f3f4f5f6f7f8f9fafbfcfdfefff0d1d2d3d4d5d6d7d8d9dadbdcdddedfd" + ) + .into(), + gas_limit: 0x777777777777u64.into(), + last_hashes: Default::default(), + gas_used: 0.into(), + }, + { + let mut hashes = HashMap::new(); + hashes.insert( + U256::from(0), + H256::from("9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d"), + ); + hashes.insert( + U256::from(1), + H256::from("7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b"), + ); + hashes + } + ) + .expect("Interpreter to execute without any errors"); - assert_eq!( - &result[0..64].to_vec(), - &vec![ - 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, - 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b,0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, - ], - "Block hashes requested and returned do not match" - ); + assert_eq!( + &result[0..64].to_vec(), + &vec![ + 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, + 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, + 0x9d, 0x9d, 0x9d, 0x9d, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + ], + "Block hashes requested and returned do not match" + ); - assert_eq!( - &result[64..84].to_vec(), - &vec![ - 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, - ], - "Coinbase requested and returned does not match" - ); + assert_eq!( + &result[64..84].to_vec(), + &vec![ + 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, + 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, + ], + "Coinbase requested and returned does not match" + ); - assert_eq!( - &result[84..92].to_vec(), - &vec![ - 0x88, 0x88, 0x88, 0x88, 0x88, 0x00, 0x00, 0x00 - ], - "Timestamp requested and returned does not match" - ); + assert_eq!( + &result[84..92].to_vec(), + &vec![0x88, 0x88, 0x88, 0x88, 0x88, 0x00, 0x00, 0x00], + "Timestamp requested and returned does not match" + ); - assert_eq!( - &result[92..100].to_vec(), - &vec![ - 0x99, 0x99, 0x99, 0x99, 0x99, 0x00, 0x00, 0x00 - ], - "Block number requested and returned does not match" - ); + assert_eq!( + &result[92..100].to_vec(), + &vec![0x99, 0x99, 0x99, 0x99, 0x99, 0x00, 0x00, 0x00], + "Block number requested and returned does not match" + ); - assert_eq!( - &result[100..132].to_vec(), - &vec![ - 0x0f, 0x1f, 0x2f, 0x3f, 0x4f, 0x5f, 0x6f, 0x7f, - 0x8f, 0x9f, 0xaf, 0xbf, 0xcf, 0xdf, 0xef, 0xff, - 0x0d, 0x1d, 0x2d, 0x3d, 0x4d, 0x5d, 0x6d, 0x7d, - 0x8d, 0x9d, 0xad, 0xbd, 0xcd, 0xdd, 0xed, 0xfd, - ], - "Difficulty requested and returned does not match" - ); + assert_eq!( + &result[100..132].to_vec(), + &vec![ + 0x0f, 0x1f, 0x2f, 0x3f, 0x4f, 0x5f, 0x6f, 0x7f, 0x8f, 0x9f, 0xaf, 0xbf, 0xcf, 0xdf, + 0xef, 0xff, 0x0d, 0x1d, 0x2d, 0x3d, 0x4d, 0x5d, 0x6d, 0x7d, 0x8d, 0x9d, 0xad, 0xbd, + 0xcd, 0xdd, 0xed, 0xfd, + ], + "Difficulty requested and returned does not match" + ); - assert_eq!( - &result[132..164].to_vec(), - &vec![ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, - ], - "Gas limit requested and returned does not match" - ); + assert_eq!( + &result[132..164].to_vec(), + &vec![ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, + ], + "Gas limit requested and returned does not match" + ); - assert_eq!(gas_left, U256::from(90_428)); + assert_eq!(gas_left, U256::from(90_428)); } // This test checks the ability of wasm contract to invoke gasleft #[test] fn gasleft() { - let _ = ::env_logger::try_init(); + let _ = ::env_logger::try_init(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(load_sample!("gasleft.wasm"))); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(load_sample!("gasleft.wasm"))); - let mut ext = FakeExt::new().with_wasm(); - ext.schedule.wasm.as_mut().unwrap().have_gasleft = true; + let mut ext = FakeExt::new().with_wasm(); + ext.schedule.wasm.as_mut().unwrap().have_gasleft = true; - let interpreter = wasm_interpreter(params); - let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => {}, - GasLeft::NeedsReturn { gas_left, data, .. } => { - let gas = LittleEndian::read_u64(data.as_ref()); - assert_eq!(gas, 93_423); - assert_eq!(gas_left, U256::from(93_349)); - }, - } + let interpreter = wasm_interpreter(params); + let result = interpreter + .exec(&mut ext) + .ok() + .unwrap() + .expect("Interpreter to execute without any errors"); + match result { + GasLeft::Known(_) => {} + GasLeft::NeedsReturn { gas_left, data, .. } => { + let gas = LittleEndian::read_u64(data.as_ref()); + assert_eq!(gas, 93_423); + assert_eq!(gas_left, U256::from(93_349)); + } + } } // This test should fail because // ext.schedule.wasm.as_mut().unwrap().have_gasleft = false; #[test] fn gasleft_fail() { - let _ = ::env_logger::try_init(); + let _ = ::env_logger::try_init(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(load_sample!("gasleft.wasm"))); - let mut ext = FakeExt::new().with_wasm(); - let interpreter = wasm_interpreter(params); - match interpreter.exec(&mut ext).ok().unwrap() { - Err(_) => {}, - Ok(_) => panic!("interpreter.exec should return Err if ext.schedule.wasm.have_gasleft = false") - } + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(load_sample!("gasleft.wasm"))); + let mut ext = FakeExt::new().with_wasm(); + let interpreter = wasm_interpreter(params); + match interpreter.exec(&mut ext).ok().unwrap() { + Err(_) => {} + Ok(_) => { + panic!("interpreter.exec should return Err if ext.schedule.wasm.have_gasleft = false") + } + } } #[test] fn embedded_keccak() { - let _ = ::env_logger::try_init(); - let mut code = load_sample!("keccak.wasm"); - code.extend_from_slice(b"something"); + let _ = ::env_logger::try_init(); + let mut code = load_sample!("keccak.wasm"); + code.extend_from_slice(b"something"); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.params_type = vm::ParamsType::Embedded; + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + params.params_type = vm::ParamsType::Embedded; - let mut ext = FakeExt::new().with_wasm(); + let mut ext = FakeExt::new().with_wasm(); - let (gas_left, result) = { - let mut interpreter = wasm_interpreter(params); - let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { panic!("keccak should return payload"); }, - GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), - } - }; + let (gas_left, result) = { + let mut interpreter = wasm_interpreter(params); + let result = interpreter + .exec(&mut ext) + .ok() + .unwrap() + .expect("Interpreter to execute without any errors"); + match result { + GasLeft::Known(_) => { + panic!("keccak should return payload"); + } + GasLeft::NeedsReturn { + gas_left: gas, + data: result, + apply_state: _apply, + } => (gas, result.to_vec()), + } + }; - assert_eq!(H256::from_slice(&result), H256::from("68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87")); - assert_eq!(gas_left, U256::from(85_949)); + assert_eq!( + H256::from_slice(&result), + H256::from("68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87") + ); + assert_eq!(gas_left, U256::from(85_949)); } /// This test checks the correctness of log extern @@ -935,72 +1094,88 @@ fn embedded_keccak() { /// and reversed input as a data #[test] fn events() { - let _ = ::env_logger::try_init(); - let code = load_sample!("events.wasm"); + let _ = ::env_logger::try_init(); + let code = load_sample!("events.wasm"); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.data = Some(b"something".to_vec()); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + params.data = Some(b"something".to_vec()); - let mut ext = FakeExt::new().with_wasm(); + let mut ext = FakeExt::new().with_wasm(); - let (gas_left, result) = { - let mut interpreter = wasm_interpreter(params); - let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { panic!("events should return payload"); }, - GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), - } - }; + let (gas_left, result) = { + let mut interpreter = wasm_interpreter(params); + let result = interpreter + .exec(&mut ext) + .ok() + .unwrap() + .expect("Interpreter to execute without any errors"); + match result { + GasLeft::Known(_) => { + panic!("events should return payload"); + } + GasLeft::NeedsReturn { + gas_left: gas, + data: result, + apply_state: _apply, + } => (gas, result.to_vec()), + } + }; - assert_eq!(ext.logs.len(), 1); - let log_entry = &ext.logs[0]; - assert_eq!(log_entry.topics.len(), 2); - assert_eq!(&log_entry.topics[0], &H256::from("68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87")); - assert_eq!(&log_entry.topics[1], &H256::from("871d5ea37430753faab7dff7a7187783517d83bd822c02e28a164c887e1d3768")); - assert_eq!(&log_entry.data, b"gnihtemos"); + assert_eq!(ext.logs.len(), 1); + let log_entry = &ext.logs[0]; + assert_eq!(log_entry.topics.len(), 2); + assert_eq!( + &log_entry.topics[0], + &H256::from("68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87") + ); + assert_eq!( + &log_entry.topics[1], + &H256::from("871d5ea37430753faab7dff7a7187783517d83bd822c02e28a164c887e1d3768") + ); + assert_eq!(&log_entry.data, b"gnihtemos"); - assert_eq!(&result, b"gnihtemos"); - assert_eq!(gas_left, U256::from(83_161)); + assert_eq!(&result, b"gnihtemos"); + assert_eq!(gas_left, U256::from(83_161)); } #[test] fn recursive() { - let _ = ::env_logger::try_init(); - let code = load_sample!("recursive.wasm"); + let _ = ::env_logger::try_init(); + let code = load_sample!("recursive.wasm"); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000_000); - params.code = Some(Arc::new(code)); - params.data = Some({ - // `recursive` expects only one 32-bit word in LE that - // represents an iteration count. - // - // We pick a relative big number to definitely hit stack overflow. - use byteorder::WriteBytesExt; - let mut data = vec![]; - data.write_u32::(100000).unwrap(); - data - }); + let mut params = ActionParams::default(); + params.gas = U256::from(100_000_000); + params.code = Some(Arc::new(code)); + params.data = Some({ + // `recursive` expects only one 32-bit word in LE that + // represents an iteration count. + // + // We pick a relative big number to definitely hit stack overflow. + use byteorder::WriteBytesExt; + let mut data = vec![]; + data.write_u32::(100000).unwrap(); + data + }); - let mut ext = FakeExt::new().with_wasm(); + let mut ext = FakeExt::new().with_wasm(); - let interpreter = wasm_interpreter(params); - let result = interpreter.exec(&mut ext).ok().unwrap(); + let interpreter = wasm_interpreter(params); + let result = interpreter.exec(&mut ext).ok().unwrap(); - // We expect that stack overflow will occur and it should be generated by - // deterministic stack metering. Exceeding deterministic stack height limit - // always ends with a trap generated by `unreachable` instruction. - match result { - Err(trap) => { - let err_description = trap.to_string(); - assert!( - err_description.contains("Unreachable"), - "err_description: {} should contain 'Unreachable'", - err_description - ); - }, - _ => panic!("this test should trap"), - } + // We expect that stack overflow will occur and it should be generated by + // deterministic stack metering. Exceeding deterministic stack height limit + // always ends with a trap generated by `unreachable` instruction. + match result { + Err(trap) => { + let err_description = trap.to_string(); + assert!( + err_description.contains("Unreachable"), + "err_description: {} should contain 'Unreachable'", + err_description + ); + } + _ => panic!("this test should trap"), + } } diff --git a/evmbin/benches/mod.rs b/evmbin/benches/mod.rs index 0f38fc0dc..d5fc08de1 100644 --- a/evmbin/benches/mod.rs +++ b/evmbin/benches/mod.rs @@ -23,77 +23,76 @@ #[macro_use] extern crate criterion; extern crate ethcore; -extern crate evm; extern crate ethereum_types; +extern crate evm; extern crate rustc_hex; extern crate vm; +use criterion::{black_box, Criterion}; use std::sync::Arc; -use criterion::{Criterion, black_box}; use ethereum_types::U256; use evm::Factory; use rustc_hex::FromHex; -use vm::tests::FakeExt; -use vm::{ActionParams, Ext}; +use vm::{tests::FakeExt, ActionParams, Ext}; criterion_group!( - evmbin, - bench_simple_loop_usize, - bench_simple_loop_u256, - bench_rng_usize, - bench_rng_u256 + evmbin, + bench_simple_loop_usize, + bench_simple_loop_u256, + bench_rng_usize, + bench_rng_u256 ); criterion_main!(evmbin); fn bench_simple_loop_usize(c: &mut Criterion) { - simple_loop(U256::from(::std::usize::MAX), c, "simple_loop_usize") + simple_loop(U256::from(::std::usize::MAX), c, "simple_loop_usize") } fn bench_simple_loop_u256(c: &mut Criterion) { - simple_loop(!U256::zero(), c, "simple_loop_u256") + simple_loop(!U256::zero(), c, "simple_loop_u256") } fn simple_loop(gas: U256, c: &mut Criterion, bench_id: &str) { - let code = black_box( + let code = black_box( "606060405260005b620042408112156019575b6001016007565b600081905550600680602b6000396000f3606060405200".from_hex().unwrap() ); - c.bench_function(bench_id, move |b| { - b.iter(|| { - let mut params = ActionParams::default(); - params.gas = gas; - params.code = Some(Arc::new(code.clone())); + c.bench_function(bench_id, move |b| { + b.iter(|| { + let mut params = ActionParams::default(); + params.gas = gas; + params.code = Some(Arc::new(code.clone())); - let mut ext = FakeExt::new(); - let evm = Factory::default().create(params, ext.schedule(), ext.depth()); - let _ = evm.exec(&mut ext); - }) - }); + let mut ext = FakeExt::new(); + let evm = Factory::default().create(params, ext.schedule(), ext.depth()); + let _ = evm.exec(&mut ext); + }) + }); } fn bench_rng_usize(c: &mut Criterion) { - rng(U256::from(::std::usize::MAX), c, "rng_usize") + rng(U256::from(::std::usize::MAX), c, "rng_usize") } fn bench_rng_u256(c: &mut Criterion) { - rng(!U256::zero(), c, "rng_u256") + rng(!U256::zero(), c, "rng_u256") } fn rng(gas: U256, c: &mut Criterion, bench_id: &str) { - let code = black_box( + let code = black_box( "6060604052600360056007600b60005b62004240811215607f5767ffe7649d5eca84179490940267f47ed85c4b9a6379019367f8e5dd9a5c994bba9390930267f91d87e4b8b74e55019267ff97f6f3b29cda529290920267f393ada8dd75c938019167fe8d437c45bb3735830267f47d9a7b5428ffec019150600101600f565b838518831882186000555050505050600680609a6000396000f3606060405200".from_hex().unwrap() ); - c.bench_function(bench_id, move |b| { - b.iter(|| { - let mut params = ActionParams::default(); - params.gas = gas; - params.code = Some(Arc::new(code.clone())); + c.bench_function(bench_id, move |b| { + b.iter(|| { + let mut params = ActionParams::default(); + params.gas = gas; + params.code = Some(Arc::new(code.clone())); - let mut ext = FakeExt::new(); - let evm = Factory::default().create(params, ext.schedule(), ext.depth()); - let _ = evm.exec(&mut ext); - }) - }); + let mut ext = FakeExt::new(); + let evm = Factory::default().create(params, ext.schedule(), ext.depth()); + let _ = evm.exec(&mut ext); + }) + }); } diff --git a/evmbin/src/display/json.rs b/evmbin/src/display/json.rs index eec5131c7..62f0f63d4 100644 --- a/evmbin/src/display/json.rs +++ b/evmbin/src/display/json.rs @@ -16,12 +16,11 @@ //! JSON VM output. -use std::collections::HashMap; -use std::mem; +use std::{collections::HashMap, mem}; -use ethereum_types::{U256, H256}; use bytes::ToPretty; use ethcore::trace; +use ethereum_types::{H256, U256}; use display; use info as vm; @@ -29,292 +28,321 @@ use info as vm; /// JSON formatting informant. #[derive(Default)] pub struct Informant { - code: Vec, - depth: usize, - pc: usize, - instruction: u8, - gas_cost: U256, - gas_used: U256, - mem_written: Option<(usize, usize)>, - store_written: Option<(U256, U256)>, - stack: Vec, - memory: Vec, - storage: HashMap, - traces: Vec, - subtraces: Vec, - subinfos: Vec, - subdepth: usize, - unmatched: bool, + code: Vec, + depth: usize, + pc: usize, + instruction: u8, + gas_cost: U256, + gas_used: U256, + mem_written: Option<(usize, usize)>, + store_written: Option<(U256, U256)>, + stack: Vec, + memory: Vec, + storage: HashMap, + traces: Vec, + subtraces: Vec, + subinfos: Vec, + subdepth: usize, + unmatched: bool, } impl Informant { - fn with_informant_in_depth(informant: &mut Informant, depth: usize, f: F) { - if depth == 0 { - f(informant); - } else { - Self::with_informant_in_depth(informant.subinfos.last_mut().expect("prepare/done_trace are not balanced"), depth - 1, f); - } - } + fn with_informant_in_depth( + informant: &mut Informant, + depth: usize, + f: F, + ) { + if depth == 0 { + f(informant); + } else { + Self::with_informant_in_depth( + informant + .subinfos + .last_mut() + .expect("prepare/done_trace are not balanced"), + depth - 1, + f, + ); + } + } - fn informant_trace(informant: &Informant, gas_used: U256) -> String { - let info = ::evm::Instruction::from_u8(informant.instruction).map(|i| i.info()); + fn informant_trace(informant: &Informant, gas_used: U256) -> String { + let info = ::evm::Instruction::from_u8(informant.instruction).map(|i| i.info()); - json!({ - "pc": informant.pc, - "op": informant.instruction, - "opName": info.map(|i| i.name).unwrap_or(""), - "gas": format!("{:#x}", gas_used.saturating_add(informant.gas_cost)), - "gasCost": format!("{:#x}", informant.gas_cost), - "memory": format!("0x{}", informant.memory.to_hex()), - "stack": informant.stack, - "storage": informant.storage, - "depth": informant.depth, - }).to_string() - } + json!({ + "pc": informant.pc, + "op": informant.instruction, + "opName": info.map(|i| i.name).unwrap_or(""), + "gas": format!("{:#x}", gas_used.saturating_add(informant.gas_cost)), + "gasCost": format!("{:#x}", informant.gas_cost), + "memory": format!("0x{}", informant.memory.to_hex()), + "stack": informant.stack, + "storage": informant.storage, + "depth": informant.depth, + }) + .to_string() + } } impl vm::Informant for Informant { - type Sink = (); + type Sink = (); - fn before_test(&mut self, name: &str, action: &str) { - println!("{}", json!({"action": action, "test": name})); - } + fn before_test(&mut self, name: &str, action: &str) { + println!("{}", json!({"action": action, "test": name})); + } - fn set_gas(&mut self, gas: U256) { - self.gas_used = gas; - } + fn set_gas(&mut self, gas: U256) { + self.gas_used = gas; + } - fn clone_sink(&self) -> Self::Sink { () } + fn clone_sink(&self) -> Self::Sink { + () + } - fn finish(result: vm::RunResult, _sink: &mut Self::Sink) { - match result { - Ok(success) => { - for trace in success.traces.unwrap_or_else(Vec::new) { - println!("{}", trace); - } + fn finish(result: vm::RunResult, _sink: &mut Self::Sink) { + match result { + Ok(success) => { + for trace in success.traces.unwrap_or_else(Vec::new) { + println!("{}", trace); + } - let success_msg = json!({ - "output": format!("0x{}", success.output.to_hex()), - "gasUsed": format!("{:#x}", success.gas_used), - "time": display::as_micros(&success.time), - }); + let success_msg = json!({ + "output": format!("0x{}", success.output.to_hex()), + "gasUsed": format!("{:#x}", success.gas_used), + "time": display::as_micros(&success.time), + }); - println!("{}", success_msg) - }, - Err(failure) => { - for trace in failure.traces.unwrap_or_else(Vec::new) { - println!("{}", trace); - } + println!("{}", success_msg) + } + Err(failure) => { + for trace in failure.traces.unwrap_or_else(Vec::new) { + println!("{}", trace); + } - let failure_msg = json!({ - "error": &failure.error.to_string(), - "gasUsed": format!("{:#x}", failure.gas_used), - "time": display::as_micros(&failure.time), - }); + let failure_msg = json!({ + "error": &failure.error.to_string(), + "gasUsed": format!("{:#x}", failure.gas_used), + "time": display::as_micros(&failure.time), + }); - println!("{}", failure_msg) - }, - } - } + println!("{}", failure_msg) + } + } + } } impl trace::VMTracer for Informant { - type Output = Vec; + type Output = Vec; - fn trace_next_instruction(&mut self, pc: usize, instruction: u8, _current_gas: U256) -> bool { - let subdepth = self.subdepth; - Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { - informant.pc = pc; - informant.instruction = instruction; - informant.unmatched = true; - }); - true - } + fn trace_next_instruction(&mut self, pc: usize, instruction: u8, _current_gas: U256) -> bool { + let subdepth = self.subdepth; + Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { + informant.pc = pc; + informant.instruction = instruction; + informant.unmatched = true; + }); + true + } - fn trace_prepare_execute(&mut self, pc: usize, instruction: u8, gas_cost: U256, mem_written: Option<(usize, usize)>, store_written: Option<(U256, U256)>) { - let subdepth = self.subdepth; - Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { - informant.pc = pc; - informant.instruction = instruction; - informant.gas_cost = gas_cost; - informant.mem_written = mem_written; - informant.store_written = store_written; - }); - } + fn trace_prepare_execute( + &mut self, + pc: usize, + instruction: u8, + gas_cost: U256, + mem_written: Option<(usize, usize)>, + store_written: Option<(U256, U256)>, + ) { + let subdepth = self.subdepth; + Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { + informant.pc = pc; + informant.instruction = instruction; + informant.gas_cost = gas_cost; + informant.mem_written = mem_written; + informant.store_written = store_written; + }); + } - fn trace_executed(&mut self, gas_used: U256, stack_push: &[U256], mem: &[u8]) { - let subdepth = self.subdepth; - Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { - let store_diff = informant.store_written.clone(); - let info = ::evm::Instruction::from_u8(informant.instruction).map(|i| i.info()); + fn trace_executed(&mut self, gas_used: U256, stack_push: &[U256], mem: &[u8]) { + let subdepth = self.subdepth; + Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { + let store_diff = informant.store_written.clone(); + let info = ::evm::Instruction::from_u8(informant.instruction).map(|i| i.info()); - let trace = Self::informant_trace(informant, gas_used); - informant.traces.push(trace); + let trace = Self::informant_trace(informant, gas_used); + informant.traces.push(trace); - informant.unmatched = false; - informant.gas_used = gas_used; + informant.unmatched = false; + informant.gas_used = gas_used; - let len = informant.stack.len(); - let info_args = info.map(|i| i.args).unwrap_or(0); - informant.stack.truncate(if len > info_args { len - info_args } else { 0 }); - informant.stack.extend_from_slice(stack_push); + let len = informant.stack.len(); + let info_args = info.map(|i| i.args).unwrap_or(0); + informant + .stack + .truncate(if len > info_args { len - info_args } else { 0 }); + informant.stack.extend_from_slice(stack_push); - // TODO [ToDr] Align memory? - if let Some((pos, size)) = informant.mem_written.clone() { - if informant.memory.len() < (pos + size) { - informant.memory.resize(pos + size, 0); - } - informant.memory[pos..(pos + size)].copy_from_slice(&mem[pos..(pos + size)]); - } + // TODO [ToDr] Align memory? + if let Some((pos, size)) = informant.mem_written.clone() { + if informant.memory.len() < (pos + size) { + informant.memory.resize(pos + size, 0); + } + informant.memory[pos..(pos + size)].copy_from_slice(&mem[pos..(pos + size)]); + } - if let Some((pos, val)) = store_diff { - informant.storage.insert(pos.into(), val.into()); - } + if let Some((pos, val)) = store_diff { + informant.storage.insert(pos.into(), val.into()); + } - if !informant.subtraces.is_empty() { - informant.traces.extend(mem::replace(&mut informant.subtraces, vec![])); - } - }); - } + if !informant.subtraces.is_empty() { + informant + .traces + .extend(mem::replace(&mut informant.subtraces, vec![])); + } + }); + } - fn prepare_subtrace(&mut self, code: &[u8]) { - let subdepth = self.subdepth; - Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { - let mut vm = Informant::default(); - vm.depth = informant.depth + 1; - vm.code = code.to_vec(); - vm.gas_used = informant.gas_used; - informant.subinfos.push(vm); - }); - self.subdepth += 1; - } + fn prepare_subtrace(&mut self, code: &[u8]) { + let subdepth = self.subdepth; + Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { + let mut vm = Informant::default(); + vm.depth = informant.depth + 1; + vm.code = code.to_vec(); + vm.gas_used = informant.gas_used; + informant.subinfos.push(vm); + }); + self.subdepth += 1; + } - fn done_subtrace(&mut self) { - self.subdepth -= 1; - let subdepth = self.subdepth; - Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { - if let Some(subtraces) = informant.subinfos.pop().expect("prepare/done_subtrace are not balanced").drain() { - informant.subtraces.extend(subtraces); - } - }); - } + fn done_subtrace(&mut self) { + self.subdepth -= 1; + let subdepth = self.subdepth; + Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { + if let Some(subtraces) = informant + .subinfos + .pop() + .expect("prepare/done_subtrace are not balanced") + .drain() + { + informant.subtraces.extend(subtraces); + } + }); + } - fn drain(mut self) -> Option { - if self.unmatched { - // print last line with final state: - self.gas_cost = 0.into(); - let gas_used = self.gas_used; - let subdepth = self.subdepth; + fn drain(mut self) -> Option { + if self.unmatched { + // print last line with final state: + self.gas_cost = 0.into(); + let gas_used = self.gas_used; + let subdepth = self.subdepth; - Self::with_informant_in_depth(&mut self, subdepth, |informant: &mut Informant| { - let trace = Self::informant_trace(informant, gas_used); - informant.traces.push(trace); - }); - } else if !self.subtraces.is_empty() { - self.traces.extend(mem::replace(&mut self.subtraces, vec![])); - } - Some(self.traces) - } + Self::with_informant_in_depth(&mut self, subdepth, |informant: &mut Informant| { + let trace = Self::informant_trace(informant, gas_used); + informant.traces.push(trace); + }); + } else if !self.subtraces.is_empty() { + self.traces + .extend(mem::replace(&mut self.subtraces, vec![])); + } + Some(self.traces) + } } #[cfg(test)] mod tests { - use super::*; - use info::tests::run_test; - use serde_json; + use super::*; + use info::tests::run_test; + use serde_json; - #[derive(Serialize, Deserialize, Debug, PartialEq)] - #[serde(rename_all = "camelCase")] - struct TestTrace { - pc: usize, - #[serde(rename = "op")] - instruction: u8, - op_name: String, - #[serde(rename = "gas")] - gas_used: U256, - gas_cost: U256, - memory: String, - stack: Vec, - storage: HashMap, - depth: usize, - } + #[derive(Serialize, Deserialize, Debug, PartialEq)] + #[serde(rename_all = "camelCase")] + struct TestTrace { + pc: usize, + #[serde(rename = "op")] + instruction: u8, + op_name: String, + #[serde(rename = "gas")] + gas_used: U256, + gas_cost: U256, + memory: String, + stack: Vec, + storage: HashMap, + depth: usize, + } - fn assert_traces_eq( - a: &[String], - b: &[String], - ) { - let mut ita = a.iter(); - let mut itb = b.iter(); + fn assert_traces_eq(a: &[String], b: &[String]) { + let mut ita = a.iter(); + let mut itb = b.iter(); - loop { - match (ita.next(), itb.next()) { - (Some(a), Some(b)) => { - // Compare both without worrying about the order of the fields - let actual: TestTrace = serde_json::from_str(a).unwrap(); - let expected: TestTrace = serde_json::from_str(b).unwrap(); - assert_eq!(actual, expected); - println!("{}", a); - }, - (None, None) => return, - e => { - panic!("Traces mismatch: {:?}", e); - } - } - } - } + loop { + match (ita.next(), itb.next()) { + (Some(a), Some(b)) => { + // Compare both without worrying about the order of the fields + let actual: TestTrace = serde_json::from_str(a).unwrap(); + let expected: TestTrace = serde_json::from_str(b).unwrap(); + assert_eq!(actual, expected); + println!("{}", a); + } + (None, None) => return, + e => { + panic!("Traces mismatch: {:?}", e); + } + } + } + } - fn compare_json(traces: Option>, expected: &str) { - let expected = expected.split("\n") - .map(|x| x.trim()) - .map(|x| x.to_owned()) - .filter(|x| !x.is_empty()) - .collect::>(); - assert_traces_eq(&traces.unwrap(), &expected); - } + fn compare_json(traces: Option>, expected: &str) { + let expected = expected + .split("\n") + .map(|x| x.trim()) + .map(|x| x.to_owned()) + .filter(|x| !x.is_empty()) + .collect::>(); + assert_traces_eq(&traces.unwrap(), &expected); + } - #[test] - fn should_trace_failure() { - run_test( - Informant::default(), - &compare_json, - "60F8d6", - 0xffff, - r#" + #[test] + fn should_trace_failure() { + run_test( + Informant::default(), + &compare_json, + "60F8d6", + 0xffff, + r#" {"pc":0,"op":96,"opName":"PUSH1","gas":"0xffff","gasCost":"0x3","memory":"0x","stack":[],"storage":{},"depth":1} {"pc":2,"op":214,"opName":"","gas":"0xfffc","gasCost":"0x0","memory":"0x","stack":["0xf8"],"storage":{},"depth":1} "#, - ); + ); - run_test( - Informant::default(), - &compare_json, - "F8d6", - 0xffff, - r#" + run_test( + Informant::default(), + &compare_json, + "F8d6", + 0xffff, + r#" {"pc":0,"op":248,"opName":"","gas":"0xffff","gasCost":"0x0","memory":"0x","stack":[],"storage":{},"depth":1} "#, - ); + ); - run_test( - Informant::default(), - &compare_json, - "5A51", - 0xfffff, - r#" + run_test( + Informant::default(), + &compare_json, + "5A51", + 0xfffff, + r#" {"depth":1,"gas":"0xfffff","gasCost":"0x2","memory":"0x","op":90,"opName":"GAS","pc":0,"stack":[],"storage":{}} {"depth":1,"gas":"0xffffd","gasCost":"0x0","memory":"0x","op":81,"opName":"MLOAD","pc":1,"stack":["0xffffd"],"storage":{}} "#, - ); - } + ); + } - #[test] - fn should_trace_create_correctly() { - run_test( - Informant::default(), - &compare_json, - "32343434345830f138343438323439f0", - 0xffff, - r#" + #[test] + fn should_trace_create_correctly() { + run_test( + Informant::default(), + &compare_json, + "32343434345830f138343438323439f0", + 0xffff, + r#" {"pc":0,"op":50,"opName":"ORIGIN","gas":"0xffff","gasCost":"0x2","memory":"0x","stack":[],"storage":{},"depth":1} {"pc":1,"op":52,"opName":"CALLVALUE","gas":"0xfffd","gasCost":"0x2","memory":"0x","stack":["0x0"],"storage":{},"depth":1} {"pc":2,"op":52,"opName":"CALLVALUE","gas":"0xfffb","gasCost":"0x2","memory":"0x","stack":["0x0","0x0"],"storage":{},"depth":1} @@ -340,19 +368,19 @@ mod tests { {"pc":6,"op":48,"opName":"ADDRESS","gas":"0x2100","gasCost":"0x2","memory":"0x","stack":["0x0","0x0","0x0","0x0","0x0","0x5"],"storage":{},"depth":2} {"pc":7,"op":241,"opName":"CALL","gas":"0x20fe","gasCost":"0x0","memory":"0x","stack":["0x0","0x0","0x0","0x0","0x0","0x5","0xbd770416a3345f91e4b34576cb804a576fa48eb1"],"storage":{},"depth":2} "#, - ); + ); - run_test( - Informant::default(), - &compare_json, - "3260D85554", - 0xffff, - r#" + run_test( + Informant::default(), + &compare_json, + "3260D85554", + 0xffff, + r#" {"pc":0,"op":50,"opName":"ORIGIN","gas":"0xffff","gasCost":"0x2","memory":"0x","stack":[],"storage":{},"depth":1} {"pc":1,"op":96,"opName":"PUSH1","gas":"0xfffd","gasCost":"0x3","memory":"0x","stack":["0x0"],"storage":{},"depth":1} {"pc":3,"op":85,"opName":"SSTORE","gas":"0xfffa","gasCost":"0x1388","memory":"0x","stack":["0x0","0xd8"],"storage":{},"depth":1} {"pc":4,"op":84,"opName":"SLOAD","gas":"0xec72","gasCost":"0x0","memory":"0x","stack":[],"storage":{"0x00000000000000000000000000000000000000000000000000000000000000d8":"0x0000000000000000000000000000000000000000000000000000000000000000"},"depth":1} "#, - ) - } + ) + } } diff --git a/evmbin/src/display/mod.rs b/evmbin/src/display/mod.rs index 32b45c569..251545ad6 100644 --- a/evmbin/src/display/mod.rs +++ b/evmbin/src/display/mod.rs @@ -19,15 +19,15 @@ use std::time::Duration; pub mod json; -pub mod std_json; pub mod simple; +pub mod std_json; /// Formats duration into human readable format. pub fn format_time(time: &Duration) -> String { - format!("{}.{:.9}s", time.as_secs(), time.subsec_nanos()) + format!("{}.{:.9}s", time.as_secs(), time.subsec_nanos()) } /// Formats the time as microseconds. pub fn as_micros(time: &Duration) -> u64 { - time.as_secs() * 1_000_000 + time.subsec_nanos() as u64 / 1_000 + time.as_secs() * 1_000_000 + time.subsec_nanos() as u64 / 1_000 } diff --git a/evmbin/src/display/simple.rs b/evmbin/src/display/simple.rs index 58d4a7045..eb64c220a 100644 --- a/evmbin/src/display/simple.rs +++ b/evmbin/src/display/simple.rs @@ -16,8 +16,8 @@ //! Simple VM output. -use ethcore::trace; use bytes::ToPretty; +use ethcore::trace; use display; use info as vm; @@ -27,34 +27,39 @@ use info as vm; pub struct Informant; impl vm::Informant for Informant { + type Sink = (); - type Sink = (); + fn before_test(&mut self, name: &str, action: &str) { + println!("Test: {} ({})", name, action); + } - fn before_test(&mut self, name: &str, action: &str) { - println!("Test: {} ({})", name, action); - } + fn clone_sink(&self) -> Self::Sink { + () + } - fn clone_sink(&self) -> Self::Sink { () } - - fn finish(result: vm::RunResult, _sink: &mut Self::Sink) { - match result { - Ok(success) => { - println!("Output: 0x{}", success.output.to_hex()); - println!("Gas used: {:x}", success.gas_used); - println!("Time: {}", display::format_time(&success.time)); - }, - Err(failure) => { - println!("Error: {}", failure.error); - println!("Time: {}", display::format_time(&failure.time)); - }, - } - } + fn finish(result: vm::RunResult, _sink: &mut Self::Sink) { + match result { + Ok(success) => { + println!("Output: 0x{}", success.output.to_hex()); + println!("Gas used: {:x}", success.gas_used); + println!("Time: {}", display::format_time(&success.time)); + } + Err(failure) => { + println!("Error: {}", failure.error); + println!("Time: {}", display::format_time(&failure.time)); + } + } + } } impl trace::VMTracer for Informant { - type Output = (); + type Output = (); - fn prepare_subtrace(&mut self, _code: &[u8]) { Default::default() } - fn done_subtrace(&mut self) {} - fn drain(self) -> Option<()> { None } + fn prepare_subtrace(&mut self, _code: &[u8]) { + Default::default() + } + fn done_subtrace(&mut self) {} + fn drain(self) -> Option<()> { + None + } } diff --git a/evmbin/src/display/std_json.rs b/evmbin/src/display/std_json.rs index a0071b174..f035649b4 100644 --- a/evmbin/src/display/std_json.rs +++ b/evmbin/src/display/std_json.rs @@ -16,300 +16,328 @@ //! Standardized JSON VM output. -use std::collections::HashMap; -use std::io; +use std::{collections::HashMap, io}; -use ethereum_types::{H256, U256}; use bytes::ToPretty; -use ethcore::{trace, pod_state}; +use ethcore::{pod_state, trace}; +use ethereum_types::{H256, U256}; use display; use info as vm; pub trait Writer: io::Write + Send + Sized { - fn clone(&self) -> Self; - fn default() -> Self; + fn clone(&self) -> Self; + fn default() -> Self; } impl Writer for io::Stdout { - fn clone(&self) -> Self { - io::stdout() - } + fn clone(&self) -> Self { + io::stdout() + } - fn default() -> Self { - io::stdout() - } + fn default() -> Self { + io::stdout() + } } impl Writer for io::Stderr { - fn clone(&self) -> Self { - io::stderr() - } + fn clone(&self) -> Self { + io::stderr() + } - fn default() -> Self { - io::stderr() - } + fn default() -> Self { + io::stderr() + } } /// JSON formatting informant. pub struct Informant { - code: Vec, - instruction: u8, - depth: usize, - stack: Vec, - storage: HashMap, - subinfos: Vec>, - subdepth: usize, - trace_sink: Trace, - out_sink: Out, + code: Vec, + instruction: u8, + depth: usize, + stack: Vec, + storage: HashMap, + subinfos: Vec>, + subdepth: usize, + trace_sink: Trace, + out_sink: Out, } impl Default for Informant { - fn default() -> Self { - Self::new(io::stderr(), io::stdout()) - } + fn default() -> Self { + Self::new(io::stderr(), io::stdout()) + } } impl Informant { - /// std json informant using out only. - pub fn out_only() -> Self { - Self::new(io::stdout(), io::stdout()) - } + /// std json informant using out only. + pub fn out_only() -> Self { + Self::new(io::stdout(), io::stdout()) + } } impl Informant { - /// std json informant using err only. - pub fn err_only() -> Self { - Self::new(io::stderr(), io::stderr()) - } + /// std json informant using err only. + pub fn err_only() -> Self { + Self::new(io::stderr(), io::stderr()) + } } impl Informant { + pub fn new(trace_sink: Trace, out_sink: Out) -> Self { + Informant { + code: Default::default(), + instruction: Default::default(), + depth: Default::default(), + stack: Default::default(), + storage: Default::default(), + subinfos: Default::default(), + subdepth: 0, + trace_sink, + out_sink, + } + } - pub fn new(trace_sink: Trace, out_sink: Out) -> Self { - Informant { - code: Default::default(), - instruction: Default::default(), - depth: Default::default(), - stack: Default::default(), - storage: Default::default(), - subinfos: Default::default(), - subdepth: 0, - trace_sink, out_sink - } - } - - fn with_informant_in_depth)>(informant: &mut Informant, depth: usize, f: F) { - if depth == 0 { - f(informant); - } else { - Self::with_informant_in_depth(informant.subinfos.last_mut().expect("prepare/done_trace are not balanced"), depth - 1, f); - } - } - - fn dump_state_into(trace_sink: &mut Trace, root: H256, end_state: &Option) { - if let Some(ref end_state) = end_state { - let dump_data = json!({ - "root": root, - "accounts": end_state, - }); - writeln!(trace_sink, "{}", dump_data).expect("The sink must be writeable."); - } - } + fn with_informant_in_depth)>( + informant: &mut Informant, + depth: usize, + f: F, + ) { + if depth == 0 { + f(informant); + } else { + Self::with_informant_in_depth( + informant + .subinfos + .last_mut() + .expect("prepare/done_trace are not balanced"), + depth - 1, + f, + ); + } + } + fn dump_state_into( + trace_sink: &mut Trace, + root: H256, + end_state: &Option, + ) { + if let Some(ref end_state) = end_state { + let dump_data = json!({ + "root": root, + "accounts": end_state, + }); + writeln!(trace_sink, "{}", dump_data).expect("The sink must be writeable."); + } + } } impl vm::Informant for Informant { + type Sink = (Trace, Out); - type Sink = (Trace, Out); + fn before_test(&mut self, name: &str, action: &str) { + let out_data = json!({ + "action": action, + "test": name, + }); - fn before_test(&mut self, name: &str, action: &str) { - let out_data = json!({ - "action": action, - "test": name, - }); + writeln!(&mut self.out_sink, "{}", out_data).expect("The sink must be writeable."); + } - writeln!(&mut self.out_sink, "{}", out_data).expect("The sink must be writeable."); - } + fn set_gas(&mut self, _gas: U256) {} - fn set_gas(&mut self, _gas: U256) {} + fn clone_sink(&self) -> Self::Sink { + (self.trace_sink.clone(), self.out_sink.clone()) + } + fn finish( + result: vm::RunResult<::Output>, + (ref mut trace_sink, ref mut out_sink): &mut Self::Sink, + ) { + match result { + Ok(success) => { + let trace_data = json!({"stateRoot": success.state_root}); + writeln!(trace_sink, "{}", trace_data).expect("The sink must be writeable."); - fn clone_sink(&self) -> Self::Sink { - (self.trace_sink.clone(), self.out_sink.clone()) - } - fn finish(result: vm::RunResult<::Output>, (ref mut trace_sink, ref mut out_sink): &mut Self::Sink) { + Self::dump_state_into(trace_sink, success.state_root, &success.end_state); - match result { - Ok(success) => { - let trace_data = json!({"stateRoot": success.state_root}); - writeln!(trace_sink, "{}", trace_data) - .expect("The sink must be writeable."); + let out_data = json!({ + "output": format!("0x{}", success.output.to_hex()), + "gasUsed": format!("{:#x}", success.gas_used), + "time": display::as_micros(&success.time), + }); - Self::dump_state_into(trace_sink, success.state_root, &success.end_state); + writeln!(out_sink, "{}", out_data).expect("The sink must be writeable."); + } + Err(failure) => { + let out_data = json!({ + "error": &failure.error.to_string(), + "gasUsed": format!("{:#x}", failure.gas_used), + "time": display::as_micros(&failure.time), + }); - let out_data = json!({ - "output": format!("0x{}", success.output.to_hex()), - "gasUsed": format!("{:#x}", success.gas_used), - "time": display::as_micros(&success.time), - }); + Self::dump_state_into(trace_sink, failure.state_root, &failure.end_state); - writeln!(out_sink, "{}", out_data).expect("The sink must be writeable."); - }, - Err(failure) => { - let out_data = json!({ - "error": &failure.error.to_string(), - "gasUsed": format!("{:#x}", failure.gas_used), - "time": display::as_micros(&failure.time), - }); - - Self::dump_state_into(trace_sink, failure.state_root, &failure.end_state); - - writeln!(out_sink, "{}", out_data).expect("The sink must be writeable."); - }, - } - } + writeln!(out_sink, "{}", out_data).expect("The sink must be writeable."); + } + } + } } impl trace::VMTracer for Informant { - type Output = (); + type Output = (); - fn trace_next_instruction(&mut self, pc: usize, instruction: u8, current_gas: U256) -> bool { - let subdepth = self.subdepth; - Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { - let info = ::evm::Instruction::from_u8(instruction).map(|i| i.info()); - informant.instruction = instruction; - let trace_data = json!({ - "pc": pc, - "op": instruction, - "opName": info.map(|i| i.name).unwrap_or(""), - "gas": format!("{:#x}", current_gas), - "stack": informant.stack, - "storage": informant.storage, - "depth": informant.depth, - }); + fn trace_next_instruction(&mut self, pc: usize, instruction: u8, current_gas: U256) -> bool { + let subdepth = self.subdepth; + Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { + let info = ::evm::Instruction::from_u8(instruction).map(|i| i.info()); + informant.instruction = instruction; + let trace_data = json!({ + "pc": pc, + "op": instruction, + "opName": info.map(|i| i.name).unwrap_or(""), + "gas": format!("{:#x}", current_gas), + "stack": informant.stack, + "storage": informant.storage, + "depth": informant.depth, + }); - writeln!(&mut informant.trace_sink, "{}", trace_data).expect("The sink must be writeable."); - }); - true - } + writeln!(&mut informant.trace_sink, "{}", trace_data) + .expect("The sink must be writeable."); + }); + true + } - fn trace_prepare_execute(&mut self, _pc: usize, _instruction: u8, _gas_cost: U256, _mem_written: Option<(usize, usize)>, store_written: Option<(U256, U256)>) { - let subdepth = self.subdepth; - Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { - if let Some((pos, val)) = store_written { - informant.storage.insert(pos.into(), val.into()); - } - }); - } + fn trace_prepare_execute( + &mut self, + _pc: usize, + _instruction: u8, + _gas_cost: U256, + _mem_written: Option<(usize, usize)>, + store_written: Option<(U256, U256)>, + ) { + let subdepth = self.subdepth; + Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { + if let Some((pos, val)) = store_written { + informant.storage.insert(pos.into(), val.into()); + } + }); + } - fn trace_executed(&mut self, _gas_used: U256, stack_push: &[U256], _mem: &[u8]) { - let subdepth = self.subdepth; - Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { - let info = ::evm::Instruction::from_u8(informant.instruction).map(|i| i.info()); + fn trace_executed(&mut self, _gas_used: U256, stack_push: &[U256], _mem: &[u8]) { + let subdepth = self.subdepth; + Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { + let info = ::evm::Instruction::from_u8(informant.instruction).map(|i| i.info()); - let len = informant.stack.len(); - let info_args = info.map(|i| i.args).unwrap_or(0); - informant.stack.truncate(if len > info_args { len - info_args } else { 0 }); - informant.stack.extend_from_slice(stack_push); - }); - } + let len = informant.stack.len(); + let info_args = info.map(|i| i.args).unwrap_or(0); + informant + .stack + .truncate(if len > info_args { len - info_args } else { 0 }); + informant.stack.extend_from_slice(stack_push); + }); + } - fn prepare_subtrace(&mut self, code: &[u8]) { - let subdepth = self.subdepth; - Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { - let mut vm = Informant::new(informant.trace_sink.clone(), informant.out_sink.clone()); - vm.depth = informant.depth + 1; - vm.code = code.to_vec(); - informant.subinfos.push(vm); - }); - self.subdepth += 1; - } + fn prepare_subtrace(&mut self, code: &[u8]) { + let subdepth = self.subdepth; + Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { + let mut vm = Informant::new(informant.trace_sink.clone(), informant.out_sink.clone()); + vm.depth = informant.depth + 1; + vm.code = code.to_vec(); + informant.subinfos.push(vm); + }); + self.subdepth += 1; + } - fn done_subtrace(&mut self) { - self.subdepth -= 1; - let subdepth = self.subdepth; - Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { - informant.subinfos.pop(); - }); - } - - fn drain(self) -> Option { None } + fn done_subtrace(&mut self) { + self.subdepth -= 1; + let subdepth = self.subdepth; + Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { + informant.subinfos.pop(); + }); + } + fn drain(self) -> Option { + None + } } #[cfg(test)] pub mod tests { - use std::sync::{Arc, Mutex}; - use super::*; - use info::tests::run_test; + use super::*; + use info::tests::run_test; + use std::sync::{Arc, Mutex}; - #[derive(Debug, Clone, Default)] - pub struct TestWriter(pub Arc>>); + #[derive(Debug, Clone, Default)] + pub struct TestWriter(pub Arc>>); - impl Writer for TestWriter { - fn clone(&self) -> Self { Clone::clone(self) } - fn default() -> Self { Default::default() } - } + impl Writer for TestWriter { + fn clone(&self) -> Self { + Clone::clone(self) + } + fn default() -> Self { + Default::default() + } + } - impl io::Write for TestWriter { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.0.lock().unwrap().write(buf) - } + impl io::Write for TestWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.0.lock().unwrap().write(buf) + } - fn flush(&mut self) -> io::Result<()> { - self.0.lock().unwrap().flush() - } - } + fn flush(&mut self) -> io::Result<()> { + self.0.lock().unwrap().flush() + } + } - pub fn informant() -> (Informant, Arc>>) { - let trace_writer: TestWriter = Default::default(); - let out_writer: TestWriter = Default::default(); - let res = trace_writer.0.clone(); - (Informant::new(trace_writer, out_writer), res) - } + pub fn informant() -> (Informant, Arc>>) { + let trace_writer: TestWriter = Default::default(); + let out_writer: TestWriter = Default::default(); + let res = trace_writer.0.clone(); + (Informant::new(trace_writer, out_writer), res) + } - #[test] - fn should_trace_failure() { - let (inf, res) = informant(); - run_test( - inf, - move |_, expected| { - let bytes = res.lock().unwrap(); - assert_eq!(expected, &String::from_utf8_lossy(&**bytes)) - }, - "60F8d6", - 0xffff, - r#"{"depth":1,"gas":"0xffff","op":96,"opName":"PUSH1","pc":0,"stack":[],"storage":{}} + #[test] + fn should_trace_failure() { + let (inf, res) = informant(); + run_test( + inf, + move |_, expected| { + let bytes = res.lock().unwrap(); + assert_eq!(expected, &String::from_utf8_lossy(&**bytes)) + }, + "60F8d6", + 0xffff, + r#"{"depth":1,"gas":"0xffff","op":96,"opName":"PUSH1","pc":0,"stack":[],"storage":{}} {"depth":1,"gas":"0xfffc","op":214,"opName":"","pc":2,"stack":["0xf8"],"storage":{}} "#, - ); + ); - let (inf, res) = informant(); - run_test( - inf, - move |_, expected| { - let bytes = res.lock().unwrap(); - assert_eq!(expected, &String::from_utf8_lossy(&**bytes)) - }, - "F8d6", - 0xffff, - r#"{"depth":1,"gas":"0xffff","op":248,"opName":"","pc":0,"stack":[],"storage":{}} + let (inf, res) = informant(); + run_test( + inf, + move |_, expected| { + let bytes = res.lock().unwrap(); + assert_eq!(expected, &String::from_utf8_lossy(&**bytes)) + }, + "F8d6", + 0xffff, + r#"{"depth":1,"gas":"0xffff","op":248,"opName":"","pc":0,"stack":[],"storage":{}} "#, - ); - } + ); + } - #[test] - fn should_trace_create_correctly() { - let (informant, res) = informant(); - run_test( - informant, - move |_, expected| { - let bytes = res.lock().unwrap(); - assert_eq!(expected, &String::from_utf8_lossy(&**bytes)) - }, - "32343434345830f138343438323439f0", - 0xffff, - r#"{"depth":1,"gas":"0xffff","op":50,"opName":"ORIGIN","pc":0,"stack":[],"storage":{}} + #[test] + fn should_trace_create_correctly() { + let (informant, res) = informant(); + run_test( + informant, + move |_, expected| { + let bytes = res.lock().unwrap(); + assert_eq!(expected, &String::from_utf8_lossy(&**bytes)) + }, + "32343434345830f138343438323439f0", + 0xffff, + r#"{"depth":1,"gas":"0xffff","op":50,"opName":"ORIGIN","pc":0,"stack":[],"storage":{}} {"depth":1,"gas":"0xfffd","op":52,"opName":"CALLVALUE","pc":1,"stack":["0x0"],"storage":{}} {"depth":1,"gas":"0xfffb","op":52,"opName":"CALLVALUE","pc":2,"stack":["0x0","0x0"],"storage":{}} {"depth":1,"gas":"0xfff9","op":52,"opName":"CALLVALUE","pc":3,"stack":["0x0","0x0","0x0"],"storage":{}} @@ -334,6 +362,6 @@ pub mod tests { {"depth":2,"gas":"0x2100","op":48,"opName":"ADDRESS","pc":6,"stack":["0x0","0x0","0x0","0x0","0x0","0x5"],"storage":{}} {"depth":2,"gas":"0x20fe","op":241,"opName":"CALL","pc":7,"stack":["0x0","0x0","0x0","0x0","0x0","0x5","0xbd770416a3345f91e4b34576cb804a576fa48eb1"],"storage":{}} "#, - ) - } + ) + } } diff --git a/evmbin/src/info.rs b/evmbin/src/info.rs index 74ea3175a..9e4d14fa3 100644 --- a/evmbin/src/info.rs +++ b/evmbin/src/info.rs @@ -16,60 +16,62 @@ //! VM runner. -use std::time::{Instant, Duration}; +use ethcore::{ + client::{self, EvmTestClient, EvmTestError, TransactErr, TransactSuccess}, + pod_state, spec, state, state_db, trace, TrieSpec, +}; use ethereum_types::{H256, U256}; -use ethcore::client::{self, EvmTestClient, EvmTestError, TransactErr, TransactSuccess}; -use ethcore::{state, state_db, trace, spec, pod_state, TrieSpec}; use ethjson; +use std::time::{Duration, Instant}; use types::transaction; use vm::ActionParams; /// VM execution informant pub trait Informant: trace::VMTracer { - /// Sink to use with finish - type Sink; - /// Display a single run init message - fn before_test(&mut self, test: &str, action: &str); - /// Set initial gas. - fn set_gas(&mut self, _gas: U256) {} - /// Clone sink. - fn clone_sink(&self) -> Self::Sink; - /// Display final result. - fn finish(result: RunResult, &mut Self::Sink); + /// Sink to use with finish + type Sink; + /// Display a single run init message + fn before_test(&mut self, test: &str, action: &str); + /// Set initial gas. + fn set_gas(&mut self, _gas: U256) {} + /// Clone sink. + fn clone_sink(&self) -> Self::Sink; + /// Display final result. + fn finish(result: RunResult, &mut Self::Sink); } /// Execution finished correctly #[derive(Debug)] pub struct Success { - /// State root - pub state_root: H256, - /// Used gas - pub gas_used: U256, - /// Output as bytes - pub output: Vec, - /// Time Taken - pub time: Duration, - /// Traces - pub traces: Option, - /// Optional end state dump - pub end_state: Option, + /// State root + pub state_root: H256, + /// Used gas + pub gas_used: U256, + /// Output as bytes + pub output: Vec, + /// Time Taken + pub time: Duration, + /// Traces + pub traces: Option, + /// Optional end state dump + pub end_state: Option, } /// Execution failed #[derive(Debug)] pub struct Failure { - /// State root - pub state_root: H256, - /// Used gas - pub gas_used: U256, - /// Internal error - pub error: EvmTestError, - /// Duration - pub time: Duration, - /// Traces - pub traces: Option, - /// Optional end state dump - pub end_state: Option, + /// State root + pub state_root: H256, + /// Used gas + pub gas_used: U256, + /// Internal error + pub error: EvmTestError, + /// Duration + pub time: Duration, + /// Traces + pub traces: Option, + /// Optional end state dump + pub end_state: Option, } /// EVM Execution result @@ -77,185 +79,227 @@ pub type RunResult = Result, Failure>; /// Execute given `ActionParams` and return the result. pub fn run_action( - spec: &spec::Spec, - mut params: ActionParams, - mut informant: T, - trie_spec: TrieSpec, + spec: &spec::Spec, + mut params: ActionParams, + mut informant: T, + trie_spec: TrieSpec, ) -> RunResult { - informant.set_gas(params.gas); + informant.set_gas(params.gas); - // if the code is not overwritten from CLI, use code from spec file. - if params.code.is_none() { - if let Some(acc) = spec.genesis_state().get().get(¶ms.code_address) { - params.code = acc.code.clone().map(::std::sync::Arc::new); - params.code_hash = None; - } - } - run(spec, trie_spec, params.gas, spec.genesis_state(), |mut client| { - let result = match client.call(params, &mut trace::NoopTracer, &mut informant) { - Ok(r) => (Ok(r.return_data.to_vec()), Some(r.gas_left)), - Err(err) => (Err(err), None), - }; - (result.0, 0.into(), None, result.1, informant.drain()) - }) + // if the code is not overwritten from CLI, use code from spec file. + if params.code.is_none() { + if let Some(acc) = spec.genesis_state().get().get(¶ms.code_address) { + params.code = acc.code.clone().map(::std::sync::Arc::new); + params.code_hash = None; + } + } + run( + spec, + trie_spec, + params.gas, + spec.genesis_state(), + |mut client| { + let result = match client.call(params, &mut trace::NoopTracer, &mut informant) { + Ok(r) => (Ok(r.return_data.to_vec()), Some(r.gas_left)), + Err(err) => (Err(err), None), + }; + (result.0, 0.into(), None, result.1, informant.drain()) + }, + ) } /// Execute given Transaction and verify resulting state root. pub fn run_transaction( - name: &str, - idx: usize, - spec: ðjson::spec::ForkSpec, - pre_state: &pod_state::PodState, - post_root: H256, - env_info: &client::EnvInfo, - transaction: transaction::SignedTransaction, - mut informant: T, - trie_spec: TrieSpec, + name: &str, + idx: usize, + spec: ðjson::spec::ForkSpec, + pre_state: &pod_state::PodState, + post_root: H256, + env_info: &client::EnvInfo, + transaction: transaction::SignedTransaction, + mut informant: T, + trie_spec: TrieSpec, ) { - let spec_name = format!("{:?}", spec).to_lowercase(); - let spec = match EvmTestClient::spec_from_json(spec) { - Some(spec) => { - informant.before_test(&format!("{}:{}:{}", name, spec_name, idx), "starting"); - spec - }, - None => { - informant.before_test(&format!("{}:{}:{}", name, spec_name, idx), "skipping because of missing spec"); - return; - }, - }; + let spec_name = format!("{:?}", spec).to_lowercase(); + let spec = match EvmTestClient::spec_from_json(spec) { + Some(spec) => { + informant.before_test(&format!("{}:{}:{}", name, spec_name, idx), "starting"); + spec + } + None => { + informant.before_test( + &format!("{}:{}:{}", name, spec_name, idx), + "skipping because of missing spec", + ); + return; + } + }; - informant.set_gas(env_info.gas_limit); + informant.set_gas(env_info.gas_limit); - let mut sink = informant.clone_sink(); - let result = run(&spec, trie_spec, transaction.gas, pre_state, |mut client| { - let result = client.transact(env_info, transaction, trace::NoopTracer, informant); - match result { - Ok(TransactSuccess { state_root, gas_left, output, vm_trace, end_state, .. }) => { - if state_root != post_root { - (Err(EvmTestError::PostCondition(format!( - "State root mismatch (got: {:#x}, expected: {:#x})", - state_root, - post_root, - ))), state_root, end_state, Some(gas_left), None) - } else { - (Ok(output), state_root, end_state, Some(gas_left), vm_trace) - } - }, - Err(TransactErr { state_root, error, end_state }) => { - (Err(EvmTestError::PostCondition(format!( - "Unexpected execution error: {:?}", error - ))), state_root, end_state, None, None) - }, - } - }); + let mut sink = informant.clone_sink(); + let result = run( + &spec, + trie_spec, + transaction.gas, + pre_state, + |mut client| { + let result = client.transact(env_info, transaction, trace::NoopTracer, informant); + match result { + Ok(TransactSuccess { + state_root, + gas_left, + output, + vm_trace, + end_state, + .. + }) => { + if state_root != post_root { + ( + Err(EvmTestError::PostCondition(format!( + "State root mismatch (got: {:#x}, expected: {:#x})", + state_root, post_root, + ))), + state_root, + end_state, + Some(gas_left), + None, + ) + } else { + (Ok(output), state_root, end_state, Some(gas_left), vm_trace) + } + } + Err(TransactErr { + state_root, + error, + end_state, + }) => ( + Err(EvmTestError::PostCondition(format!( + "Unexpected execution error: {:?}", + error + ))), + state_root, + end_state, + None, + None, + ), + } + }, + ); - T::finish(result, &mut sink) + T::finish(result, &mut sink) } fn dump_state(state: &state::State) -> Option { - state.to_pod_full().ok() + state.to_pod_full().ok() } /// Execute VM with given `ActionParams` pub fn run<'a, F, X>( - spec: &'a spec::Spec, - trie_spec: TrieSpec, - initial_gas: U256, - pre_state: &'a pod_state::PodState, - run: F, -) -> RunResult where - F: FnOnce(EvmTestClient) -> (Result, EvmTestError>, H256, Option, Option, Option), + spec: &'a spec::Spec, + trie_spec: TrieSpec, + initial_gas: U256, + pre_state: &'a pod_state::PodState, + run: F, +) -> RunResult +where + F: FnOnce( + EvmTestClient, + ) -> ( + Result, EvmTestError>, + H256, + Option, + Option, + Option, + ), { - let do_dump = trie_spec == TrieSpec::Fat; + let do_dump = trie_spec == TrieSpec::Fat; - let mut test_client = EvmTestClient::from_pod_state_with_trie(spec, pre_state.clone(), trie_spec) - .map_err(|error| Failure { - gas_used: 0.into(), - error, - time: Duration::from_secs(0), - traces: None, - state_root: H256::default(), - end_state: None, - })?; + let mut test_client = + EvmTestClient::from_pod_state_with_trie(spec, pre_state.clone(), trie_spec).map_err( + |error| Failure { + gas_used: 0.into(), + error, + time: Duration::from_secs(0), + traces: None, + state_root: H256::default(), + end_state: None, + }, + )?; - if do_dump { - test_client.set_dump_state_fn(dump_state); - } + if do_dump { + test_client.set_dump_state_fn(dump_state); + } - let start = Instant::now(); - let result = run(test_client); - let time = start.elapsed(); + let start = Instant::now(); + let result = run(test_client); + let time = start.elapsed(); - match result { - (Ok(output), state_root, end_state, gas_left, traces) => Ok(Success { - state_root, - gas_used: gas_left.map(|gas_left| initial_gas - gas_left).unwrap_or(initial_gas), - output, - time, - traces, - end_state, - }), - (Err(error), state_root, end_state, gas_left, traces) => Err(Failure { - gas_used: gas_left.map(|gas_left| initial_gas - gas_left).unwrap_or(initial_gas), - error, - time, - traces, - state_root, - end_state, - }), - } + match result { + (Ok(output), state_root, end_state, gas_left, traces) => Ok(Success { + state_root, + gas_used: gas_left + .map(|gas_left| initial_gas - gas_left) + .unwrap_or(initial_gas), + output, + time, + traces, + end_state, + }), + (Err(error), state_root, end_state, gas_left, traces) => Err(Failure { + gas_used: gas_left + .map(|gas_left| initial_gas - gas_left) + .unwrap_or(initial_gas), + error, + time, + traces, + state_root, + end_state, + }), + } } #[cfg(test)] pub mod tests { - use std::sync::Arc; - use rustc_hex::FromHex; - use super::*; - use tempdir::TempDir; + use super::*; + use rustc_hex::FromHex; + use std::sync::Arc; + use tempdir::TempDir; - pub fn run_test( - informant: I, - compare: F, - code: &str, - gas: T, - expected: &str, - ) where - T: Into, - I: Informant, - F: FnOnce(Option, &str), - { - let mut params = ActionParams::default(); - params.code = Some(Arc::new(code.from_hex().unwrap())); - params.gas = gas.into(); + pub fn run_test(informant: I, compare: F, code: &str, gas: T, expected: &str) + where + T: Into, + I: Informant, + F: FnOnce(Option, &str), + { + let mut params = ActionParams::default(); + params.code = Some(Arc::new(code.from_hex().unwrap())); + params.gas = gas.into(); - let tempdir = TempDir::new("").unwrap(); - let spec = ::ethcore::ethereum::new_foundation(&tempdir.path()); - let result = run_action(&spec, params, informant, TrieSpec::Secure); - match result { - Ok(Success { traces, .. }) => { - compare(traces, expected) - }, - Err(Failure { traces, .. }) => { - compare(traces, expected) - }, - } - } + let tempdir = TempDir::new("").unwrap(); + let spec = ::ethcore::ethereum::new_foundation(&tempdir.path()); + let result = run_action(&spec, params, informant, TrieSpec::Secure); + match result { + Ok(Success { traces, .. }) => compare(traces, expected), + Err(Failure { traces, .. }) => compare(traces, expected), + } + } - #[test] - fn should_call_account_from_spec() { - use display::std_json::tests::informant; + #[test] + fn should_call_account_from_spec() { + use display::std_json::tests::informant; - let (inf, res) = informant(); - let mut params = ActionParams::default(); - params.code_address = 0x20.into(); - params.gas = 0xffff.into(); + let (inf, res) = informant(); + let mut params = ActionParams::default(); + params.code_address = 0x20.into(); + params.gas = 0xffff.into(); - let spec = ::ethcore::ethereum::load(None, include_bytes!("../res/testchain.json")); - let _result = run_action(&spec, params, inf, TrieSpec::Secure); + let spec = ::ethcore::ethereum::load(None, include_bytes!("../res/testchain.json")); + let _result = run_action(&spec, params, inf, TrieSpec::Secure); - assert_eq!( - &String::from_utf8_lossy(&**res.lock().unwrap()), -r#"{"depth":1,"gas":"0xffff","op":98,"opName":"PUSH3","pc":0,"stack":[],"storage":{}} + assert_eq!( + &String::from_utf8_lossy(&**res.lock().unwrap()), + r#"{"depth":1,"gas":"0xffff","op":98,"opName":"PUSH3","pc":0,"stack":[],"storage":{}} {"depth":1,"gas":"0xfffc","op":96,"opName":"PUSH1","pc":4,"stack":["0xaaaaaa"],"storage":{}} {"depth":1,"gas":"0xfff9","op":96,"opName":"PUSH1","pc":6,"stack":["0xaaaaaa","0xaa"],"storage":{}} {"depth":1,"gas":"0xfff6","op":80,"opName":"POP","pc":8,"stack":["0xaaaaaa","0xaa","0xaa"],"storage":{}} @@ -265,6 +309,7 @@ r#"{"depth":1,"gas":"0xffff","op":98,"opName":"PUSH3","pc":0,"stack":[],"storage {"depth":1,"gas":"0xffeb","op":96,"opName":"PUSH1","pc":15,"stack":["0xaaaaaa","0xaa","0xaa","0xaa","0xaa"],"storage":{}} {"depth":1,"gas":"0xffe8","op":96,"opName":"PUSH1","pc":17,"stack":["0xaaaaaa","0xaa","0xaa","0xaa","0xaa","0xaa"],"storage":{}} {"depth":1,"gas":"0xffe5","op":96,"opName":"PUSH1","pc":19,"stack":["0xaaaaaa","0xaa","0xaa","0xaa","0xaa","0xaa","0xaa"],"storage":{}} -"#); - } +"# + ); + } } diff --git a/evmbin/src/main.rs b/evmbin/src/main.rs index 48c1e8581..d03544d0a 100644 --- a/evmbin/src/main.rs +++ b/evmbin/src/main.rs @@ -28,12 +28,12 @@ extern crate serde_derive; #[macro_use] extern crate serde_json; extern crate docopt; -extern crate parity_bytes as bytes; +extern crate env_logger; extern crate ethereum_types; -extern crate vm; extern crate evm; extern crate panic_hook; -extern crate env_logger; +extern crate parity_bytes as bytes; +extern crate vm; #[cfg(test)] #[macro_use] @@ -42,18 +42,16 @@ extern crate pretty_assertions; #[cfg(test)] extern crate tempdir; -use std::sync::Arc; -use std::{fmt, fs}; -use std::path::PathBuf; -use docopt::Docopt; -use rustc_hex::FromHex; -use ethereum_types::{U256, Address}; use bytes::Bytes; -use ethcore::{spec, json_tests, TrieSpec}; +use docopt::Docopt; +use ethcore::{json_tests, spec, TrieSpec}; +use ethereum_types::{Address, U256}; +use rustc_hex::FromHex; +use std::{fmt, fs, path::PathBuf, sync::Arc}; use vm::{ActionParams, CallType}; -mod info; mod display; +mod info; use info::Informant; @@ -102,301 +100,377 @@ Display result state dump in standardized JSON format. "#; fn main() { - panic_hook::set_abort(); - env_logger::init(); + panic_hook::set_abort(); + env_logger::init(); - let args: Args = Docopt::new(USAGE).and_then(|d| d.deserialize()).unwrap_or_else(|e| e.exit()); + let args: Args = Docopt::new(USAGE) + .and_then(|d| d.deserialize()) + .unwrap_or_else(|e| e.exit()); - if args.cmd_state_test { - run_state_test(args) - } else if args.cmd_stats_jsontests_vm { - run_stats_jsontests_vm(args) - } else if args.flag_json { - run_call(args, display::json::Informant::default()) - } else if args.flag_std_dump_json || args.flag_std_json { - if args.flag_std_err_only { - run_call(args, display::std_json::Informant::err_only()) - } else if args.flag_std_out_only { - run_call(args, display::std_json::Informant::out_only()) - } else { - run_call(args, display::std_json::Informant::default()) - }; - } else { - run_call(args, display::simple::Informant::default()) - } + if args.cmd_state_test { + run_state_test(args) + } else if args.cmd_stats_jsontests_vm { + run_stats_jsontests_vm(args) + } else if args.flag_json { + run_call(args, display::json::Informant::default()) + } else if args.flag_std_dump_json || args.flag_std_json { + if args.flag_std_err_only { + run_call(args, display::std_json::Informant::err_only()) + } else if args.flag_std_out_only { + run_call(args, display::std_json::Informant::out_only()) + } else { + run_call(args, display::std_json::Informant::default()) + }; + } else { + run_call(args, display::simple::Informant::default()) + } } fn run_stats_jsontests_vm(args: Args) { - use json_tests::HookType; - use std::collections::HashMap; - use std::time::{Instant, Duration}; + use json_tests::HookType; + use std::{ + collections::HashMap, + time::{Duration, Instant}, + }; - let file = args.arg_file.expect("FILE (or PATH) is required"); + let file = args.arg_file.expect("FILE (or PATH) is required"); - let mut timings: HashMap)> = HashMap::new(); + let mut timings: HashMap)> = HashMap::new(); - { - let mut record_time = |name: &str, typ: HookType| { - match typ { - HookType::OnStart => { - timings.insert(name.to_string(), (Instant::now(), None)); - }, - HookType::OnStop => { - timings.entry(name.to_string()).and_modify(|v| { - v.1 = Some(v.0.elapsed()); - }); - }, - } - }; - if !file.is_file() { - json_tests::run_executive_test_path(&file, &[], &mut record_time); - } else { - json_tests::run_executive_test_file(&file, &mut record_time); - } - } + { + let mut record_time = |name: &str, typ: HookType| match typ { + HookType::OnStart => { + timings.insert(name.to_string(), (Instant::now(), None)); + } + HookType::OnStop => { + timings.entry(name.to_string()).and_modify(|v| { + v.1 = Some(v.0.elapsed()); + }); + } + }; + if !file.is_file() { + json_tests::run_executive_test_path(&file, &[], &mut record_time); + } else { + json_tests::run_executive_test_file(&file, &mut record_time); + } + } - for (name, v) in timings { - println!("{}\t{}", name, display::as_micros(&v.1.expect("All hooks are called with OnStop; qed"))); - } + for (name, v) in timings { + println!( + "{}\t{}", + name, + display::as_micros(&v.1.expect("All hooks are called with OnStop; qed")) + ); + } } fn run_state_test(args: Args) { - use ethjson::state::test::Test; + use ethjson::state::test::Test; - let file = args.arg_file.expect("FILE is required"); - let mut file = match fs::File::open(&file) { - Err(err) => die(format!("Unable to open: {:?}: {}", file, err)), - Ok(file) => file, - }; - let state_test = match Test::load(&mut file) { - Err(err) => die(format!("Unable to load the test file: {}", err)), - Ok(test) => test, - }; - let only_test = args.flag_only.map(|s| s.to_lowercase()); - let only_chain = args.flag_chain.map(|s| s.to_lowercase()); + let file = args.arg_file.expect("FILE is required"); + let mut file = match fs::File::open(&file) { + Err(err) => die(format!("Unable to open: {:?}: {}", file, err)), + Ok(file) => file, + }; + let state_test = match Test::load(&mut file) { + Err(err) => die(format!("Unable to load the test file: {}", err)), + Ok(test) => test, + }; + let only_test = args.flag_only.map(|s| s.to_lowercase()); + let only_chain = args.flag_chain.map(|s| s.to_lowercase()); - for (name, test) in state_test { - if let Some(false) = only_test.as_ref().map(|only_test| &name.to_lowercase() == only_test) { - continue; - } + for (name, test) in state_test { + if let Some(false) = only_test + .as_ref() + .map(|only_test| &name.to_lowercase() == only_test) + { + continue; + } - let multitransaction = test.transaction; - let env_info = test.env.into(); - let pre = test.pre_state.into(); + let multitransaction = test.transaction; + let env_info = test.env.into(); + let pre = test.pre_state.into(); - for (spec, states) in test.post_states { - if let Some(false) = only_chain.as_ref().map(|only_chain| &format!("{:?}", spec).to_lowercase() == only_chain) { - continue; - } + for (spec, states) in test.post_states { + if let Some(false) = only_chain + .as_ref() + .map(|only_chain| &format!("{:?}", spec).to_lowercase() == only_chain) + { + continue; + } - for (idx, state) in states.into_iter().enumerate() { - let post_root = state.hash.into(); - let transaction = multitransaction.select(&state.indexes).into(); + for (idx, state) in states.into_iter().enumerate() { + let post_root = state.hash.into(); + let transaction = multitransaction.select(&state.indexes).into(); - let trie_spec = if args.flag_std_dump_json { - TrieSpec::Fat - } else { - TrieSpec::Secure - }; - if args.flag_json { - info::run_transaction(&name, idx, &spec, &pre, post_root, &env_info, transaction, display::json::Informant::default(), trie_spec) - } else if args.flag_std_dump_json || args.flag_std_json { - if args.flag_std_err_only { - info::run_transaction(&name, idx, &spec, &pre, post_root, &env_info, transaction, display::std_json::Informant::err_only(), trie_spec) - } else if args.flag_std_out_only { - info::run_transaction(&name, idx, &spec, &pre, post_root, &env_info, transaction, display::std_json::Informant::out_only(), trie_spec) - } else { - info::run_transaction(&name, idx, &spec, &pre, post_root, &env_info, transaction, display::std_json::Informant::default(), trie_spec) - } - } else { - info::run_transaction(&name, idx, &spec, &pre, post_root, &env_info, transaction, display::simple::Informant::default(), trie_spec) - } - } - } - } + let trie_spec = if args.flag_std_dump_json { + TrieSpec::Fat + } else { + TrieSpec::Secure + }; + if args.flag_json { + info::run_transaction( + &name, + idx, + &spec, + &pre, + post_root, + &env_info, + transaction, + display::json::Informant::default(), + trie_spec, + ) + } else if args.flag_std_dump_json || args.flag_std_json { + if args.flag_std_err_only { + info::run_transaction( + &name, + idx, + &spec, + &pre, + post_root, + &env_info, + transaction, + display::std_json::Informant::err_only(), + trie_spec, + ) + } else if args.flag_std_out_only { + info::run_transaction( + &name, + idx, + &spec, + &pre, + post_root, + &env_info, + transaction, + display::std_json::Informant::out_only(), + trie_spec, + ) + } else { + info::run_transaction( + &name, + idx, + &spec, + &pre, + post_root, + &env_info, + transaction, + display::std_json::Informant::default(), + trie_spec, + ) + } + } else { + info::run_transaction( + &name, + idx, + &spec, + &pre, + post_root, + &env_info, + transaction, + display::simple::Informant::default(), + trie_spec, + ) + } + } + } + } } fn run_call(args: Args, informant: T) { - let from = arg(args.from(), "--from"); - let to = arg(args.to(), "--to"); - let code = arg(args.code(), "--code"); - let spec = arg(args.spec(), "--chain"); - let gas = arg(args.gas(), "--gas"); - let gas_price = arg(args.gas_price(), "--gas-price"); - let data = arg(args.data(), "--input"); + let from = arg(args.from(), "--from"); + let to = arg(args.to(), "--to"); + let code = arg(args.code(), "--code"); + let spec = arg(args.spec(), "--chain"); + let gas = arg(args.gas(), "--gas"); + let gas_price = arg(args.gas_price(), "--gas-price"); + let data = arg(args.data(), "--input"); - if code.is_none() && to == Address::default() { - die("Either --code or --to is required."); - } + if code.is_none() && to == Address::default() { + die("Either --code or --to is required."); + } - let mut params = ActionParams::default(); - params.call_type = if code.is_none() { CallType::Call } else { CallType::None }; - params.code_address = to; - params.address = to; - params.sender = from; - params.origin = from; - params.gas = gas; - params.gas_price = gas_price; - params.code = code.map(Arc::new); - params.data = data; + let mut params = ActionParams::default(); + params.call_type = if code.is_none() { + CallType::Call + } else { + CallType::None + }; + params.code_address = to; + params.address = to; + params.sender = from; + params.origin = from; + params.gas = gas; + params.gas_price = gas_price; + params.code = code.map(Arc::new); + params.data = data; - let mut sink = informant.clone_sink(); - let result = if args.flag_std_dump_json { - info::run_action(&spec, params, informant, TrieSpec::Fat) - } else { - info::run_action(&spec, params, informant, TrieSpec::Secure) - }; - T::finish(result, &mut sink); + let mut sink = informant.clone_sink(); + let result = if args.flag_std_dump_json { + info::run_action(&spec, params, informant, TrieSpec::Fat) + } else { + info::run_action(&spec, params, informant, TrieSpec::Secure) + }; + T::finish(result, &mut sink); } #[derive(Debug, Deserialize)] struct Args { - cmd_stats: bool, - cmd_state_test: bool, - cmd_stats_jsontests_vm: bool, - arg_file: Option, - flag_only: Option, - flag_from: Option, - flag_to: Option, - flag_code: Option, - flag_gas: Option, - flag_gas_price: Option, - flag_input: Option, - flag_chain: Option, - flag_json: bool, - flag_std_json: bool, - flag_std_dump_json: bool, - flag_std_err_only: bool, - flag_std_out_only: bool, + cmd_stats: bool, + cmd_state_test: bool, + cmd_stats_jsontests_vm: bool, + arg_file: Option, + flag_only: Option, + flag_from: Option, + flag_to: Option, + flag_code: Option, + flag_gas: Option, + flag_gas_price: Option, + flag_input: Option, + flag_chain: Option, + flag_json: bool, + flag_std_json: bool, + flag_std_dump_json: bool, + flag_std_err_only: bool, + flag_std_out_only: bool, } impl Args { - pub fn gas(&self) -> Result { - match self.flag_gas { - Some(ref gas) => gas.parse().map_err(to_string), - None => Ok(U256::from(u64::max_value())), - } - } + pub fn gas(&self) -> Result { + match self.flag_gas { + Some(ref gas) => gas.parse().map_err(to_string), + None => Ok(U256::from(u64::max_value())), + } + } - pub fn gas_price(&self) -> Result { - match self.flag_gas_price { - Some(ref gas_price) => gas_price.parse().map_err(to_string), - None => Ok(U256::zero()), - } - } + pub fn gas_price(&self) -> Result { + match self.flag_gas_price { + Some(ref gas_price) => gas_price.parse().map_err(to_string), + None => Ok(U256::zero()), + } + } - pub fn from(&self) -> Result { - match self.flag_from { - Some(ref from) => from.parse().map_err(to_string), - None => Ok(Address::default()), - } - } + pub fn from(&self) -> Result { + match self.flag_from { + Some(ref from) => from.parse().map_err(to_string), + None => Ok(Address::default()), + } + } - pub fn to(&self) -> Result { - match self.flag_to { - Some(ref to) => to.parse().map_err(to_string), - None => Ok(Address::default()), - } - } + pub fn to(&self) -> Result { + match self.flag_to { + Some(ref to) => to.parse().map_err(to_string), + None => Ok(Address::default()), + } + } - pub fn code(&self) -> Result, String> { - match self.flag_code { - Some(ref code) => code.from_hex().map(Some).map_err(to_string), - None => Ok(None), - } - } + pub fn code(&self) -> Result, String> { + match self.flag_code { + Some(ref code) => code.from_hex().map(Some).map_err(to_string), + None => Ok(None), + } + } - pub fn data(&self) -> Result, String> { - match self.flag_input { - Some(ref input) => input.from_hex().map_err(to_string).map(Some), - None => Ok(None), - } - } + pub fn data(&self) -> Result, String> { + match self.flag_input { + Some(ref input) => input.from_hex().map_err(to_string).map(Some), + None => Ok(None), + } + } - pub fn spec(&self) -> Result { - Ok(match self.flag_chain { - Some(ref filename) => { - let file = fs::File::open(filename).map_err(|e| format!("{}", e))?; - spec::Spec::load(&::std::env::temp_dir(), file)? - }, - None => { - ethcore::ethereum::new_foundation(&::std::env::temp_dir()) - }, - }) - } + pub fn spec(&self) -> Result { + Ok(match self.flag_chain { + Some(ref filename) => { + let file = fs::File::open(filename).map_err(|e| format!("{}", e))?; + spec::Spec::load(&::std::env::temp_dir(), file)? + } + None => ethcore::ethereum::new_foundation(&::std::env::temp_dir()), + }) + } } fn arg(v: Result, param: &str) -> T { - v.unwrap_or_else(|e| die(format!("Invalid {}: {}", param, e))) + v.unwrap_or_else(|e| die(format!("Invalid {}: {}", param, e))) } fn to_string(msg: T) -> String { - format!("{}", msg) + format!("{}", msg) } fn die(msg: T) -> ! { - println!("{}", msg); - ::std::process::exit(-1) + println!("{}", msg); + ::std::process::exit(-1) } #[cfg(test)] mod tests { - use docopt::Docopt; - use super::{Args, USAGE}; + use super::{Args, USAGE}; + use docopt::Docopt; - fn run>(args: &[T]) -> Args { - Docopt::new(USAGE).and_then(|d| d.argv(args.into_iter()).deserialize()).unwrap() - } + fn run>(args: &[T]) -> Args { + Docopt::new(USAGE) + .and_then(|d| d.argv(args.into_iter()).deserialize()) + .unwrap() + } - #[test] - fn should_parse_all_the_options() { - let args = run(&[ - "parity-evm", - "--json", - "--std-json", - "--std-dump-json", - "--gas", "1", - "--gas-price", "2", - "--from", "0000000000000000000000000000000000000003", - "--to", "0000000000000000000000000000000000000004", - "--code", "05", - "--input", "06", - "--chain", "./testfile", "--std-err-only", "--std-out-only" - ]); + #[test] + fn should_parse_all_the_options() { + let args = run(&[ + "parity-evm", + "--json", + "--std-json", + "--std-dump-json", + "--gas", + "1", + "--gas-price", + "2", + "--from", + "0000000000000000000000000000000000000003", + "--to", + "0000000000000000000000000000000000000004", + "--code", + "05", + "--input", + "06", + "--chain", + "./testfile", + "--std-err-only", + "--std-out-only", + ]); - assert_eq!(args.flag_json, true); - assert_eq!(args.flag_std_json, true); - assert_eq!(args.flag_std_dump_json, true); - assert_eq!(args.flag_std_err_only, true); - assert_eq!(args.flag_std_out_only, true); - assert_eq!(args.gas(), Ok(1.into())); - assert_eq!(args.gas_price(), Ok(2.into())); - assert_eq!(args.from(), Ok(3.into())); - assert_eq!(args.to(), Ok(4.into())); - assert_eq!(args.code(), Ok(Some(vec![05]))); - assert_eq!(args.data(), Ok(Some(vec![06]))); - assert_eq!(args.flag_chain, Some("./testfile".to_owned())); - } + assert_eq!(args.flag_json, true); + assert_eq!(args.flag_std_json, true); + assert_eq!(args.flag_std_dump_json, true); + assert_eq!(args.flag_std_err_only, true); + assert_eq!(args.flag_std_out_only, true); + assert_eq!(args.gas(), Ok(1.into())); + assert_eq!(args.gas_price(), Ok(2.into())); + assert_eq!(args.from(), Ok(3.into())); + assert_eq!(args.to(), Ok(4.into())); + assert_eq!(args.code(), Ok(Some(vec![05]))); + assert_eq!(args.data(), Ok(Some(vec![06]))); + assert_eq!(args.flag_chain, Some("./testfile".to_owned())); + } - #[test] - fn should_parse_state_test_command() { - let args = run(&[ - "parity-evm", - "state-test", - "./file.json", - "--chain", "homestead", - "--only=add11", - "--json", - "--std-json", - "--std-dump-json" - ]); + #[test] + fn should_parse_state_test_command() { + let args = run(&[ + "parity-evm", + "state-test", + "./file.json", + "--chain", + "homestead", + "--only=add11", + "--json", + "--std-json", + "--std-dump-json", + ]); - assert_eq!(args.cmd_state_test, true); - assert!(args.arg_file.is_some()); - assert_eq!(args.flag_json, true); - assert_eq!(args.flag_std_json, true); - assert_eq!(args.flag_std_dump_json, true); - assert_eq!(args.flag_chain, Some("homestead".to_owned())); - assert_eq!(args.flag_only, Some("add11".to_owned())); - } + assert_eq!(args.cmd_state_test, true); + assert!(args.arg_file.is_some()); + assert_eq!(args.flag_json, true); + assert_eq!(args.flag_std_json, true); + assert_eq!(args.flag_std_dump_json, true); + assert_eq!(args.flag_chain, Some("homestead".to_owned())); + assert_eq!(args.flag_only, Some("add11".to_owned())); + } } diff --git a/ipfs/src/error.rs b/ipfs/src/error.rs index a9a584985..9af718f46 100644 --- a/ipfs/src/error.rs +++ b/ipfs/src/error.rs @@ -14,7 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use {multihash, cid, http}; +use cid; +use http; +use multihash; use route::Out; pub type Result = ::std::result::Result; @@ -22,43 +24,43 @@ pub type Result = ::std::result::Result; /// IPFS server error #[derive(Debug)] pub enum ServerError { - /// Wrapped `std::io::Error` - IoError(::std::io::Error), - /// Other `hyper` error - Other(http::hyper::error::Error), - /// Invalid --ipfs-api-interface - InvalidInterface + /// Wrapped `std::io::Error` + IoError(::std::io::Error), + /// Other `hyper` error + Other(http::hyper::error::Error), + /// Invalid --ipfs-api-interface + InvalidInterface, } /// Handle IO errors (ports taken when starting the server). impl From<::std::io::Error> for ServerError { - fn from(err: ::std::io::Error) -> ServerError { - ServerError::IoError(err) - } + fn from(err: ::std::io::Error) -> ServerError { + ServerError::IoError(err) + } } impl From for ServerError { - fn from(err: http::hyper::error::Error) -> ServerError { - ServerError::Other(err) - } + fn from(err: http::hyper::error::Error) -> ServerError { + ServerError::Other(err) + } } impl From for String { - fn from(err: ServerError) -> String { - match err { - ServerError::IoError(err) => err.to_string(), - ServerError::Other(err) => err.to_string(), - ServerError::InvalidInterface => "Invalid --ipfs-api-interface parameter".into(), - } - } + fn from(err: ServerError) -> String { + match err { + ServerError::IoError(err) => err.to_string(), + ServerError::Other(err) => err.to_string(), + ServerError::InvalidInterface => "Invalid --ipfs-api-interface parameter".into(), + } + } } impl ::std::fmt::Display for ServerError { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { match self { - ServerError::IoError(err) => write!(f, "Io Error: {}", err), - ServerError::Other(err) => write!(f, "Other error: {}", err), - ServerError::InvalidInterface => write!(f, "Invalid interface"), + ServerError::IoError(err) => write!(f, "Io Error: {}", err), + ServerError::Other(err) => write!(f, "Other error: {}", err), + ServerError::InvalidInterface => write!(f, "Invalid interface"), } } } @@ -67,43 +69,43 @@ impl ::std::error::Error for ServerError {} #[derive(Debug, PartialEq)] pub enum Error { - CidParsingFailed, - UnsupportedHash, - UnsupportedCid, - BlockNotFound, - TransactionNotFound, - StateRootNotFound, - ContractNotFound, + CidParsingFailed, + UnsupportedHash, + UnsupportedCid, + BlockNotFound, + TransactionNotFound, + StateRootNotFound, + ContractNotFound, } /// Convert Error into Out, handy when switching from Rust's Result-based /// error handling to Hyper's request handling. impl From for Out { - fn from(err: Error) -> Out { - use self::Error::*; + fn from(err: Error) -> Out { + use self::Error::*; - match err { - UnsupportedHash => Out::Bad("Hash must be Keccak-256"), - UnsupportedCid => Out::Bad("CID codec not supported"), - CidParsingFailed => Out::Bad("CID parsing failed"), - BlockNotFound => Out::NotFound("Block not found"), - TransactionNotFound => Out::NotFound("Transaction not found"), - StateRootNotFound => Out::NotFound("State root not found"), - ContractNotFound => Out::NotFound("Contract not found"), - } - } + match err { + UnsupportedHash => Out::Bad("Hash must be Keccak-256"), + UnsupportedCid => Out::Bad("CID codec not supported"), + CidParsingFailed => Out::Bad("CID parsing failed"), + BlockNotFound => Out::NotFound("Block not found"), + TransactionNotFound => Out::NotFound("Transaction not found"), + StateRootNotFound => Out::NotFound("State root not found"), + ContractNotFound => Out::NotFound("Contract not found"), + } + } } /// Convert Content ID errors. impl From for Error { - fn from(_: cid::Error) -> Error { - Error::CidParsingFailed - } + fn from(_: cid::Error) -> Error { + Error::CidParsingFailed + } } /// Convert multihash errors (multihash being part of CID). impl From for Error { - fn from(_: multihash::Error) -> Error { - Error::CidParsingFailed - } + fn from(_: multihash::Error) -> Error { + Error::CidParsingFailed + } } diff --git a/ipfs/src/lib.rs b/ipfs/src/lib.rs index 0a3d83432..386da9f27 100644 --- a/ipfs/src/lib.rs +++ b/ipfs/src/lib.rs @@ -14,188 +14,213 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -extern crate multihash; extern crate cid; +extern crate multihash; extern crate unicase; -extern crate rlp; extern crate ethcore; -extern crate parity_bytes as bytes; extern crate ethereum_types; extern crate jsonrpc_core; extern crate jsonrpc_http_server as http; +extern crate parity_bytes as bytes; +extern crate rlp; pub mod error; mod route; -use std::thread; -use std::sync::{mpsc, Arc}; -use std::net::{SocketAddr, IpAddr}; +use std::{ + net::{IpAddr, SocketAddr}, + sync::{mpsc, Arc}, + thread, +}; -use jsonrpc_core::futures::future::{self, FutureResult}; -use jsonrpc_core::futures::{self, Future}; use ethcore::client::BlockChainClient; -use http::hyper::{self, server, Method, StatusCode, Body, - header::{self, HeaderValue}, +use http::hyper::{ + self, + header::{self, HeaderValue}, + server, Body, Method, StatusCode, +}; +use jsonrpc_core::futures::{ + self, + future::{self, FutureResult}, + Future, }; use error::ServerError; use route::Out; -pub use http::{AccessControlAllowOrigin, Host, DomainsValidation}; +pub use http::{AccessControlAllowOrigin, DomainsValidation, Host}; /// Request/response handler pub struct IpfsHandler { - /// Allowed CORS domains - cors_domains: Option>, - /// Hostnames allowed in the `Host` request header - allowed_hosts: Option>, - /// Reference to the Blockchain Client - client: Arc, + /// Allowed CORS domains + cors_domains: Option>, + /// Hostnames allowed in the `Host` request header + allowed_hosts: Option>, + /// Reference to the Blockchain Client + client: Arc, } impl IpfsHandler { - pub fn client(&self) -> &dyn BlockChainClient { - &*self.client - } + pub fn client(&self) -> &dyn BlockChainClient { + &*self.client + } - pub fn new(cors: DomainsValidation, hosts: DomainsValidation, client: Arc) -> Self { - IpfsHandler { - cors_domains: cors.into(), - allowed_hosts: hosts.into(), - client, - } - } - pub fn on_request(&self, req: hyper::Request) -> (Option, Out) { - match *req.method() { - Method::GET | Method::POST => {}, - _ => return (None, Out::Bad("Invalid Request")), - } + pub fn new( + cors: DomainsValidation, + hosts: DomainsValidation, + client: Arc, + ) -> Self { + IpfsHandler { + cors_domains: cors.into(), + allowed_hosts: hosts.into(), + client, + } + } + pub fn on_request(&self, req: hyper::Request) -> (Option, Out) { + match *req.method() { + Method::GET | Method::POST => {} + _ => return (None, Out::Bad("Invalid Request")), + } - if !http::is_host_allowed(&req, &self.allowed_hosts) { - return (None, Out::Bad("Disallowed Host header")); - } + if !http::is_host_allowed(&req, &self.allowed_hosts) { + return (None, Out::Bad("Disallowed Host header")); + } - let cors_header = http::cors_allow_origin(&req, &self.cors_domains); - if cors_header == http::AllowCors::Invalid { - return (None, Out::Bad("Disallowed Origin header")); - } + let cors_header = http::cors_allow_origin(&req, &self.cors_domains); + if cors_header == http::AllowCors::Invalid { + return (None, Out::Bad("Disallowed Origin header")); + } - let path = req.uri().path(); - let query = req.uri().query(); - return (cors_header.into(), self.route(path, query)); - } + let path = req.uri().path(); + let query = req.uri().query(); + return (cors_header.into(), self.route(path, query)); + } } impl hyper::service::Service for IpfsHandler { - type ReqBody = Body; - type ResBody = Body; - type Error = hyper::Error; - type Future = FutureResult, Self::Error>; + type ReqBody = Body; + type ResBody = Body; + type Error = hyper::Error; + type Future = FutureResult, Self::Error>; - fn call(&mut self, request: hyper::Request) -> Self::Future { - let (cors_header, out) = self.on_request(request); + fn call(&mut self, request: hyper::Request) -> Self::Future { + let (cors_header, out) = self.on_request(request); - let mut res = match out { - Out::OctetStream(bytes) => { - hyper::Response::builder() - .status(StatusCode::OK) - .header("content-type", HeaderValue::from_static("application/octet-stream")) - .body(bytes.into()) - }, - Out::NotFound(reason) => { - hyper::Response::builder() - .status(StatusCode::NOT_FOUND) - .header("content-type", HeaderValue::from_static("text/plain; charset=utf-8")) - .body(reason.into()) - }, - Out::Bad(reason) => { - hyper::Response::builder() - .status(StatusCode::BAD_REQUEST) - .header("content-type", HeaderValue::from_static("text/plain; charset=utf-8")) - .body(reason.into()) - } - }.expect("Response builder: Parsing 'content-type' header name will not fail; qed"); + let mut res = match out { + Out::OctetStream(bytes) => hyper::Response::builder() + .status(StatusCode::OK) + .header( + "content-type", + HeaderValue::from_static("application/octet-stream"), + ) + .body(bytes.into()), + Out::NotFound(reason) => hyper::Response::builder() + .status(StatusCode::NOT_FOUND) + .header( + "content-type", + HeaderValue::from_static("text/plain; charset=utf-8"), + ) + .body(reason.into()), + Out::Bad(reason) => hyper::Response::builder() + .status(StatusCode::BAD_REQUEST) + .header( + "content-type", + HeaderValue::from_static("text/plain; charset=utf-8"), + ) + .body(reason.into()), + } + .expect("Response builder: Parsing 'content-type' header name will not fail; qed"); - if let Some(cors_header) = cors_header { - res.headers_mut().append(header::ACCESS_CONTROL_ALLOW_ORIGIN, cors_header); - res.headers_mut().append(header::VARY, HeaderValue::from_static("origin")); - } + if let Some(cors_header) = cors_header { + res.headers_mut() + .append(header::ACCESS_CONTROL_ALLOW_ORIGIN, cors_header); + res.headers_mut() + .append(header::VARY, HeaderValue::from_static("origin")); + } - future::ok(res) - } + future::ok(res) + } } /// Add current interface (default: "127.0.0.1:5001") to list of allowed hosts fn include_current_interface(mut hosts: Vec, interface: String, port: u16) -> Vec { - hosts.push(match port { - 80 => interface, - _ => format!("{}:{}", interface, port), - }.into()); + hosts.push( + match port { + 80 => interface, + _ => format!("{}:{}", interface, port), + } + .into(), + ); - hosts + hosts } #[derive(Debug)] pub struct Listening { - close: Option>, - thread: Option>, + close: Option>, + thread: Option>, } impl Drop for Listening { - fn drop(&mut self) { - self.close.take().unwrap().send(()).unwrap(); - let _ = self.thread.take().unwrap().join(); - } + fn drop(&mut self) { + self.close.take().unwrap().send(()).unwrap(); + let _ = self.thread.take().unwrap().join(); + } } pub fn start_server( - port: u16, - interface: String, - cors: DomainsValidation, - hosts: DomainsValidation, - client: Arc + port: u16, + interface: String, + cors: DomainsValidation, + hosts: DomainsValidation, + client: Arc, ) -> Result { + let ip: IpAddr = interface + .parse() + .map_err(|_| ServerError::InvalidInterface)?; + let addr = SocketAddr::new(ip, port); + let hosts: Option> = hosts.into(); + let hosts: DomainsValidation<_> = hosts + .map(move |hosts| include_current_interface(hosts, interface, port)) + .into(); - let ip: IpAddr = interface.parse().map_err(|_| ServerError::InvalidInterface)?; - let addr = SocketAddr::new(ip, port); - let hosts: Option> = hosts.into(); - let hosts: DomainsValidation<_> = hosts.map(move |hosts| include_current_interface(hosts, interface, port)).into(); + let (close, shutdown_signal) = futures::sync::oneshot::channel::<()>(); + let (tx, rx) = mpsc::sync_channel::>(1); + let thread = thread::spawn(move || { + let send = |res| tx.send(res).expect("rx end is never dropped; qed"); - let (close, shutdown_signal) = futures::sync::oneshot::channel::<()>(); - let (tx, rx) = mpsc::sync_channel::>(1); - let thread = thread::spawn(move || { - let send = |res| tx.send(res).expect("rx end is never dropped; qed"); + let server_bldr = match server::Server::try_bind(&addr) { + Ok(s) => s, + Err(err) => { + send(Err(ServerError::from(err))); + return; + } + }; - let server_bldr = match server::Server::try_bind(&addr) { - Ok(s) => s, - Err(err) => { - send(Err(ServerError::from(err))); - return; - } - }; + let new_service = move || { + Ok::<_, ServerError>(IpfsHandler::new( + cors.clone(), + hosts.clone(), + client.clone(), + )) + }; - let new_service = move || { - Ok::<_, ServerError>( - IpfsHandler::new(cors.clone(), hosts.clone(), client.clone()) - ) - }; + let server = server_bldr + .serve(new_service) + .map_err(|_| ()) + .select(shutdown_signal.map_err(|_| ())) + .then(|_| Ok(())); - let server = server_bldr - .serve(new_service) - .map_err(|_| ()) - .select(shutdown_signal.map_err(|_| ())) - .then(|_| Ok(())); + hyper::rt::run(server); + send(Ok(())); + }); - hyper::rt::run(server); - send(Ok(())); - }); + // Wait for server to start successfuly. + rx.recv().expect("tx end is never dropped; qed")?; - // Wait for server to start successfuly. - rx.recv().expect("tx end is never dropped; qed")?; - - Ok(Listening { - close: close.into(), - thread: thread.into(), - }) + Ok(Listening { + close: close.into(), + thread: thread.into(), + }) } diff --git a/ipfs/src/route.rs b/ipfs/src/route.rs index f4a730338..981f2de5a 100644 --- a/ipfs/src/route.rs +++ b/ipfs/src/route.rs @@ -14,233 +14,266 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use {rlp, multihash, IpfsHandler}; +use cid::{Codec, ToCid}; use error::{Error, Result}; -use cid::{ToCid, Codec}; +use multihash; +use rlp; +use IpfsHandler; -use multihash::Hash; -use ethereum_types::H256; use bytes::Bytes; use ethcore::client::{BlockId, TransactionId}; +use ethereum_types::H256; +use multihash::Hash; type Reason = &'static str; /// Keeps the state of the response to send out #[derive(Debug, PartialEq)] pub enum Out { - OctetStream(Bytes), - NotFound(Reason), - Bad(Reason), + OctetStream(Bytes), + NotFound(Reason), + Bad(Reason), } impl IpfsHandler { - /// Route path + query string to a specialized method - pub fn route(&self, path: &str, query: Option<&str>) -> Out { - match path { - "/api/v0/block/get" => { - let arg = query.and_then(|q| get_param(q, "arg")).unwrap_or(""); + /// Route path + query string to a specialized method + pub fn route(&self, path: &str, query: Option<&str>) -> Out { + match path { + "/api/v0/block/get" => { + let arg = query.and_then(|q| get_param(q, "arg")).unwrap_or(""); - self.route_cid(arg).unwrap_or_else(Into::into) - }, + self.route_cid(arg).unwrap_or_else(Into::into) + } - _ => Out::NotFound("Route not found") - } - } + _ => Out::NotFound("Route not found"), + } + } - /// Attempt to read Content ID from `arg` query parameter, get a hash and - /// route further by the CID's codec. - fn route_cid(&self, cid: &str) -> Result { - let cid = cid.to_cid()?; + /// Attempt to read Content ID from `arg` query parameter, get a hash and + /// route further by the CID's codec. + fn route_cid(&self, cid: &str) -> Result { + let cid = cid.to_cid()?; - let mh = multihash::decode(&cid.hash)?; + let mh = multihash::decode(&cid.hash)?; - if mh.alg != Hash::Keccak256 { return Err(Error::UnsupportedHash); } + if mh.alg != Hash::Keccak256 { + return Err(Error::UnsupportedHash); + } - let hash: H256 = mh.digest.into(); + let hash: H256 = mh.digest.into(); - match cid.codec { - Codec::EthereumBlock => self.block(hash), - Codec::EthereumBlockList => self.block_list(hash), - Codec::EthereumTx => self.transaction(hash), - Codec::EthereumStateTrie => self.state_trie(hash), - Codec::Raw => self.contract_code(hash), - _ => return Err(Error::UnsupportedCid), - } - } + match cid.codec { + Codec::EthereumBlock => self.block(hash), + Codec::EthereumBlockList => self.block_list(hash), + Codec::EthereumTx => self.transaction(hash), + Codec::EthereumStateTrie => self.state_trie(hash), + Codec::Raw => self.contract_code(hash), + _ => return Err(Error::UnsupportedCid), + } + } - /// Get block header by hash as raw binary. - fn block(&self, hash: H256) -> Result { - let block_id = BlockId::Hash(hash); - let block = self.client().block_header(block_id).ok_or(Error::BlockNotFound)?; + /// Get block header by hash as raw binary. + fn block(&self, hash: H256) -> Result { + let block_id = BlockId::Hash(hash); + let block = self + .client() + .block_header(block_id) + .ok_or(Error::BlockNotFound)?; - Ok(Out::OctetStream(block.into_inner())) - } + Ok(Out::OctetStream(block.into_inner())) + } - /// Get list of block ommers by hash as raw binary. - fn block_list(&self, hash: H256) -> Result { - let uncles = self.client().find_uncles(&hash).ok_or(Error::BlockNotFound)?; + /// Get list of block ommers by hash as raw binary. + fn block_list(&self, hash: H256) -> Result { + let uncles = self + .client() + .find_uncles(&hash) + .ok_or(Error::BlockNotFound)?; - Ok(Out::OctetStream(rlp::encode_list(&uncles))) - } + Ok(Out::OctetStream(rlp::encode_list(&uncles))) + } - /// Get transaction by hash and return as raw binary. - fn transaction(&self, hash: H256) -> Result { - let tx_id = TransactionId::Hash(hash); - let tx = self.client().transaction(tx_id).ok_or(Error::TransactionNotFound)?; + /// Get transaction by hash and return as raw binary. + fn transaction(&self, hash: H256) -> Result { + let tx_id = TransactionId::Hash(hash); + let tx = self + .client() + .transaction(tx_id) + .ok_or(Error::TransactionNotFound)?; - Ok(Out::OctetStream(rlp::encode(&*tx))) - } + Ok(Out::OctetStream(rlp::encode(&*tx))) + } - /// Get state trie node by hash and return as raw binary. - fn state_trie(&self, hash: H256) -> Result { - let data = self.client().state_data(&hash).ok_or(Error::StateRootNotFound)?; + /// Get state trie node by hash and return as raw binary. + fn state_trie(&self, hash: H256) -> Result { + let data = self + .client() + .state_data(&hash) + .ok_or(Error::StateRootNotFound)?; - Ok(Out::OctetStream(data)) - } + Ok(Out::OctetStream(data)) + } - /// Get state trie node by hash and return as raw binary. - fn contract_code(&self, hash: H256) -> Result { - let data = self.client().state_data(&hash).ok_or(Error::ContractNotFound)?; + /// Get state trie node by hash and return as raw binary. + fn contract_code(&self, hash: H256) -> Result { + let data = self + .client() + .state_data(&hash) + .ok_or(Error::ContractNotFound)?; - Ok(Out::OctetStream(data)) - } + Ok(Out::OctetStream(data)) + } } /// Get a query parameter's value by name. fn get_param<'a>(query: &'a str, name: &str) -> Option<&'a str> { - query.split('&') - .find(|part| part.starts_with(name) && part[name.len()..].starts_with("=")) - .map(|part| &part[name.len() + 1..]) + query + .split('&') + .find(|part| part.starts_with(name) && part[name.len()..].starts_with("=")) + .map(|part| &part[name.len() + 1..]) } #[cfg(test)] mod tests { - use std::sync::Arc; - use super::*; - use ethcore::client::TestBlockChainClient; + use super::*; + use ethcore::client::TestBlockChainClient; + use std::sync::Arc; - fn get_mocked_handler() -> IpfsHandler { - IpfsHandler::new(None.into(), None.into(), Arc::new(TestBlockChainClient::new())) - } + fn get_mocked_handler() -> IpfsHandler { + IpfsHandler::new( + None.into(), + None.into(), + Arc::new(TestBlockChainClient::new()), + ) + } - #[test] - fn test_get_param() { - let query = "foo=100&bar=200&qux=300"; + #[test] + fn test_get_param() { + let query = "foo=100&bar=200&qux=300"; - assert_eq!(get_param(query, "foo"), Some("100")); - assert_eq!(get_param(query, "bar"), Some("200")); - assert_eq!(get_param(query, "qux"), Some("300")); - assert_eq!(get_param(query, "bar="), None); - assert_eq!(get_param(query, "200"), None); - assert_eq!(get_param("", "foo"), None); - assert_eq!(get_param("foo", "foo"), None); - assert_eq!(get_param("foo&bar", "foo"), None); - assert_eq!(get_param("bar&foo", "foo"), None); - } + assert_eq!(get_param(query, "foo"), Some("100")); + assert_eq!(get_param(query, "bar"), Some("200")); + assert_eq!(get_param(query, "qux"), Some("300")); + assert_eq!(get_param(query, "bar="), None); + assert_eq!(get_param(query, "200"), None); + assert_eq!(get_param("", "foo"), None); + assert_eq!(get_param("foo", "foo"), None); + assert_eq!(get_param("foo&bar", "foo"), None); + assert_eq!(get_param("bar&foo", "foo"), None); + } - #[test] - fn cid_route_block() { - let handler = get_mocked_handler(); + #[test] + fn cid_route_block() { + let handler = get_mocked_handler(); - // `eth-block` with Keccak-256 - let cid = "z43AaGF5tmkT9SEX6urrhwpEW5ZSaACY73Vw357ZXTsur2fR8BM"; + // `eth-block` with Keccak-256 + let cid = "z43AaGF5tmkT9SEX6urrhwpEW5ZSaACY73Vw357ZXTsur2fR8BM"; - assert_eq!(Err(Error::BlockNotFound), handler.route_cid(cid)); - } + assert_eq!(Err(Error::BlockNotFound), handler.route_cid(cid)); + } - #[test] - fn cid_route_block_list() { - let handler = get_mocked_handler(); + #[test] + fn cid_route_block_list() { + let handler = get_mocked_handler(); - // `eth-block-list` with Keccak-256 - let cid = "z43c7o7FsNxqdLJW8Ucj19tuCALtnmUb2EkDptj4W6xSkFVTqWs"; + // `eth-block-list` with Keccak-256 + let cid = "z43c7o7FsNxqdLJW8Ucj19tuCALtnmUb2EkDptj4W6xSkFVTqWs"; - assert_eq!(Err(Error::BlockNotFound), handler.route_cid(cid)); - } + assert_eq!(Err(Error::BlockNotFound), handler.route_cid(cid)); + } - #[test] - fn cid_route_tx() { - let handler = get_mocked_handler(); + #[test] + fn cid_route_tx() { + let handler = get_mocked_handler(); - // `eth-tx` with Keccak-256 - let cid = "z44VCrqbpbPcb8SUBc8Tba4EaKuoDz2grdEoQXx4TP7WYh9ZGBu"; + // `eth-tx` with Keccak-256 + let cid = "z44VCrqbpbPcb8SUBc8Tba4EaKuoDz2grdEoQXx4TP7WYh9ZGBu"; - assert_eq!(Err(Error::TransactionNotFound), handler.route_cid(cid)); - } + assert_eq!(Err(Error::TransactionNotFound), handler.route_cid(cid)); + } - #[test] - fn cid_route_state_trie() { - let handler = get_mocked_handler(); + #[test] + fn cid_route_state_trie() { + let handler = get_mocked_handler(); - // `eth-state-trie` with Keccak-256 - let cid = "z45oqTS7kR2n2peRGJQ4VCJEeaG9sorqcCyfmznZPJM7FMdhQCT"; + // `eth-state-trie` with Keccak-256 + let cid = "z45oqTS7kR2n2peRGJQ4VCJEeaG9sorqcCyfmznZPJM7FMdhQCT"; - assert_eq!(Err(Error::StateRootNotFound), handler.route_cid(&cid)); - } + assert_eq!(Err(Error::StateRootNotFound), handler.route_cid(&cid)); + } - #[test] - fn cid_route_contract_code() { - let handler = get_mocked_handler(); + #[test] + fn cid_route_contract_code() { + let handler = get_mocked_handler(); - // `raw` with Keccak-256 - let cid = "zb34WAp1Q5fhtLGZ3w3jhnTWaNbVV5ZZvGq4vuJQzERj6Pu3H"; + // `raw` with Keccak-256 + let cid = "zb34WAp1Q5fhtLGZ3w3jhnTWaNbVV5ZZvGq4vuJQzERj6Pu3H"; - assert_eq!(Err(Error::ContractNotFound), handler.route_cid(&cid)); - } + assert_eq!(Err(Error::ContractNotFound), handler.route_cid(&cid)); + } - #[test] - fn cid_route_invalid_hash() { - let handler = get_mocked_handler(); + #[test] + fn cid_route_invalid_hash() { + let handler = get_mocked_handler(); - // `eth-block` with SHA3-256 hash - let cid = "z43Aa9gr1MM7TENJh4Em9d9Ttr7p3UcfyMpNei6WLVeCmSEPu8F"; + // `eth-block` with SHA3-256 hash + let cid = "z43Aa9gr1MM7TENJh4Em9d9Ttr7p3UcfyMpNei6WLVeCmSEPu8F"; - assert_eq!(Err(Error::UnsupportedHash), handler.route_cid(cid)); - } + assert_eq!(Err(Error::UnsupportedHash), handler.route_cid(cid)); + } - #[test] - fn cid_route_invalid_codec() { - let handler = get_mocked_handler(); + #[test] + fn cid_route_invalid_codec() { + let handler = get_mocked_handler(); - // `bitcoin-block` with Keccak-256 - let cid = "z4HFyHvb8CarYARyxz4cCcPaciduXd49TFPCKLhYmvNxf7Auvwu"; + // `bitcoin-block` with Keccak-256 + let cid = "z4HFyHvb8CarYARyxz4cCcPaciduXd49TFPCKLhYmvNxf7Auvwu"; - assert_eq!(Err(Error::UnsupportedCid), handler.route_cid(&cid)); - } + assert_eq!(Err(Error::UnsupportedCid), handler.route_cid(&cid)); + } - #[test] - fn route_block() { - let handler = get_mocked_handler(); + #[test] + fn route_block() { + let handler = get_mocked_handler(); - let out = handler.route("/api/v0/block/get", Some("arg=z43AaGF5tmkT9SEX6urrhwpEW5ZSaACY73Vw357ZXTsur2fR8BM")); + let out = handler.route( + "/api/v0/block/get", + Some("arg=z43AaGF5tmkT9SEX6urrhwpEW5ZSaACY73Vw357ZXTsur2fR8BM"), + ); - assert_eq!(out, Out::NotFound("Block not found")); - } + assert_eq!(out, Out::NotFound("Block not found")); + } - #[test] - fn route_block_missing_query() { - let handler = get_mocked_handler(); + #[test] + fn route_block_missing_query() { + let handler = get_mocked_handler(); - let out = handler.route("/api/v0/block/get", None); + let out = handler.route("/api/v0/block/get", None); - assert_eq!(out, Out::Bad("CID parsing failed")); - } + assert_eq!(out, Out::Bad("CID parsing failed")); + } - #[test] - fn route_block_invalid_query() { - let handler = get_mocked_handler(); + #[test] + fn route_block_invalid_query() { + let handler = get_mocked_handler(); - let out = handler.route("/api/v0/block/get", Some("arg=foobarz43AaGF5tmkT9SEX6urrhwpEW5ZSaACY73Vw357ZXTsur2fR8BM")); + let out = handler.route( + "/api/v0/block/get", + Some("arg=foobarz43AaGF5tmkT9SEX6urrhwpEW5ZSaACY73Vw357ZXTsur2fR8BM"), + ); - assert_eq!(out, Out::Bad("CID parsing failed")); - } + assert_eq!(out, Out::Bad("CID parsing failed")); + } - #[test] - fn route_invalid_route() { - let handler = get_mocked_handler(); + #[test] + fn route_invalid_route() { + let handler = get_mocked_handler(); - let out = handler.route("/foo/bar/baz", Some("arg=z43AaGF5tmkT9SEX6urrhwpEW5ZSaACY73Vw357ZXTsur2fR8BM")); + let out = handler.route( + "/foo/bar/baz", + Some("arg=z43AaGF5tmkT9SEX6urrhwpEW5ZSaACY73Vw357ZXTsur2fR8BM"), + ); - assert_eq!(out, Out::NotFound("Route not found")); - } + assert_eq!(out, Out::NotFound("Route not found")); + } } diff --git a/json/src/blockchain/account.rs b/json/src/blockchain/account.rs index aa6f6f8bf..12fac563f 100644 --- a/json/src/blockchain/account.rs +++ b/json/src/blockchain/account.rs @@ -16,31 +16,31 @@ //! Blockchain test account deserializer. +use bytes::Bytes; use std::collections::BTreeMap; use uint::Uint; -use bytes::Bytes; /// Blockchain test account deserializer. #[derive(Debug, PartialEq, Deserialize, Clone)] pub struct Account { - /// Balance. - pub balance: Uint, - /// Code. - pub code: Bytes, - /// Nonce. - pub nonce: Uint, - /// Storage. - pub storage: BTreeMap, + /// Balance. + pub balance: Uint, + /// Code. + pub code: Bytes, + /// Nonce. + pub nonce: Uint, + /// Storage. + pub storage: BTreeMap, } #[cfg(test)] mod tests { - use serde_json; - use blockchain::account::Account; + use blockchain::account::Account; + use serde_json; - #[test] - fn account_deserialization() { - let s = r#"{ + #[test] + fn account_deserialization() { + let s = r#"{ "balance" : "0x09184e72a078", "code" : "0x600140600155", "nonce" : "0x00", @@ -48,7 +48,7 @@ mod tests { "0x01" : "0x9a10c2b5bb8f3c602e674006d9b21f09167df57c87a78a5ce96d4159ecb76520" } }"#; - let _deserialized: Account = serde_json::from_str(s).unwrap(); - // TODO: validate all fields - } + let _deserialized: Account = serde_json::from_str(s).unwrap(); + // TODO: validate all fields + } } diff --git a/json/src/blockchain/block.rs b/json/src/blockchain/block.rs index 23ba5300d..d879104d5 100644 --- a/json/src/blockchain/block.rs +++ b/json/src/blockchain/block.rs @@ -16,36 +16,35 @@ //! Blockchain test block deserializer. +use blockchain::{header::Header, transaction::Transaction}; use bytes::Bytes; -use blockchain::header::Header; -use blockchain::transaction::Transaction; /// Blockchain test block deserializer. #[derive(Debug, PartialEq, Deserialize)] pub struct Block { - #[serde(rename = "blockHeader")] - header: Option
, - rlp: Bytes, - transactions: Option>, - #[serde(rename = "uncleHeaders")] - uncles: Option>, + #[serde(rename = "blockHeader")] + header: Option
, + rlp: Bytes, + transactions: Option>, + #[serde(rename = "uncleHeaders")] + uncles: Option>, } impl Block { - /// Returns block rlp. - pub fn rlp(&self) -> Vec { - self.rlp.clone().into() - } + /// Returns block rlp. + pub fn rlp(&self) -> Vec { + self.rlp.clone().into() + } } #[cfg(test)] mod tests { - use serde_json; - use blockchain::block::Block; + use blockchain::block::Block; + use serde_json; - #[test] - fn block_deserialization() { - let s = r#"{ + #[test] + fn block_deserialization() { + let s = r#"{ "blockHeader" : { "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1", @@ -69,7 +68,7 @@ mod tests { "transactions" : [], "uncleHeaders" : [] }"#; - let _deserialized: Block = serde_json::from_str(s).unwrap(); - // TODO: validate all fields - } + let _deserialized: Block = serde_json::from_str(s).unwrap(); + // TODO: validate all fields + } } diff --git a/json/src/blockchain/blockchain.rs b/json/src/blockchain/blockchain.rs index b92336f79..8329a9321 100644 --- a/json/src/blockchain/blockchain.rs +++ b/json/src/blockchain/blockchain.rs @@ -16,91 +16,89 @@ //! Blockchain deserialization. +use blockchain::{block::Block, header::Header, state::State}; use bytes::Bytes; use hash::H256; -use blockchain::state::State; -use blockchain::header::Header; -use blockchain::block::Block; -use spec::{ForkSpec, Genesis, Seal, Ethereum}; +use spec::{Ethereum, ForkSpec, Genesis, Seal}; /// Json Block test possible engine kind. #[derive(Debug, PartialEq, Deserialize)] pub enum Engine { - /// Default (old) behaviour. - Ethash, - /// No check of block's difficulty and nonce for tests. - NoProof, + /// Default (old) behaviour. + Ethash, + /// No check of block's difficulty and nonce for tests. + NoProof, } impl Default for Engine { - fn default() -> Self { - Engine::Ethash - } + fn default() -> Self { + Engine::Ethash + } } /// Blockchain deserialization. #[derive(Debug, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] pub struct BlockChain { - /// Genesis block header. - #[serde(rename = "genesisBlockHeader")] - pub genesis_block: Header, - /// Genesis block rlp. - #[serde(rename = "genesisRLP")] - pub genesis_rlp: Option, - /// Blocks. - pub blocks: Vec, - /// Post state. - pub post_state: State, - /// Pre state. - #[serde(rename = "pre")] - pub pre_state: State, - /// Hash of best block. - #[serde(rename = "lastblockhash")] - pub best_block: H256, - /// Network. - pub network: ForkSpec, - #[serde(default)] - #[serde(rename="sealEngine")] - /// Engine - pub engine: Engine, + /// Genesis block header. + #[serde(rename = "genesisBlockHeader")] + pub genesis_block: Header, + /// Genesis block rlp. + #[serde(rename = "genesisRLP")] + pub genesis_rlp: Option, + /// Blocks. + pub blocks: Vec, + /// Post state. + pub post_state: State, + /// Pre state. + #[serde(rename = "pre")] + pub pre_state: State, + /// Hash of best block. + #[serde(rename = "lastblockhash")] + pub best_block: H256, + /// Network. + pub network: ForkSpec, + #[serde(default)] + #[serde(rename = "sealEngine")] + /// Engine + pub engine: Engine, } impl BlockChain { - /// Returns blocks rlp. - pub fn blocks_rlp(&self) -> Vec> { - self.blocks.iter().map(|block| block.rlp()).collect() - } + /// Returns blocks rlp. + pub fn blocks_rlp(&self) -> Vec> { + self.blocks.iter().map(|block| block.rlp()).collect() + } - /// Returns spec compatible genesis struct. - pub fn genesis(&self) -> Genesis { - Genesis { - seal: Seal::Ethereum(Ethereum { - nonce: self.genesis_block.nonce.clone(), - mix_hash: self.genesis_block.mix_hash.clone(), - }), - difficulty: self.genesis_block.difficulty, - author: Some(self.genesis_block.author.clone()), - timestamp: Some(self.genesis_block.timestamp), - parent_hash: Some(self.genesis_block.parent_hash.clone()), - gas_limit: self.genesis_block.gas_limit, - transactions_root: Some(self.genesis_block.transactions_root.clone()), - receipts_root: Some(self.genesis_block.receipts_root.clone()), - state_root: Some(self.genesis_block.state_root.clone()), - gas_used: Some(self.genesis_block.gas_used), - extra_data: Some(self.genesis_block.extra_data.clone()), - } - } + /// Returns spec compatible genesis struct. + pub fn genesis(&self) -> Genesis { + Genesis { + seal: Seal::Ethereum(Ethereum { + nonce: self.genesis_block.nonce.clone(), + mix_hash: self.genesis_block.mix_hash.clone(), + }), + difficulty: self.genesis_block.difficulty, + author: Some(self.genesis_block.author.clone()), + timestamp: Some(self.genesis_block.timestamp), + parent_hash: Some(self.genesis_block.parent_hash.clone()), + gas_limit: self.genesis_block.gas_limit, + transactions_root: Some(self.genesis_block.transactions_root.clone()), + receipts_root: Some(self.genesis_block.receipts_root.clone()), + state_root: Some(self.genesis_block.state_root.clone()), + gas_used: Some(self.genesis_block.gas_used), + extra_data: Some(self.genesis_block.extra_data.clone()), + } + } } #[cfg(test)] mod tests { - use serde_json; - use blockchain::blockchain::BlockChain; + use blockchain::blockchain::BlockChain; + use serde_json; - #[test] - fn blockchain_deserialization() { - let s = r#"{ + #[test] + fn blockchain_deserialization() { + let s = r#"{ "blocks" : [{ "blockHeader" : { "bloom" : "00000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000040000000000000000000000000000000000000000000000000000000", @@ -198,7 +196,7 @@ mod tests { } } }"#; - let _deserialized: BlockChain = serde_json::from_str(s).unwrap(); - // TODO: validate all fields - } + let _deserialized: BlockChain = serde_json::from_str(s).unwrap(); + // TODO: validate all fields + } } diff --git a/json/src/blockchain/header.rs b/json/src/blockchain/header.rs index 8de5b16ed..e23d6ee06 100644 --- a/json/src/blockchain/header.rs +++ b/json/src/blockchain/header.rs @@ -16,60 +16,60 @@ //! Blockchain test header deserializer. -use hash::{H64, Address, H256, Bloom}; -use uint::Uint; use bytes::Bytes; +use hash::{Address, Bloom, H256, H64}; +use uint::Uint; /// Blockchain test header deserializer. #[derive(Debug, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Header { - /// Blocks bloom. - pub bloom: Bloom, - /// Blocks author. - #[serde(rename = "coinbase")] - pub author: Address, - /// Difficulty. - pub difficulty: Uint, - /// Extra data. - pub extra_data: Bytes, - /// Gas limit. - pub gas_limit: Uint, - /// Gas used. - pub gas_used: Uint, - /// Hash. - pub hash: H256, - /// Mix hash. - pub mix_hash: H256, - /// Seal nonce. - pub nonce: H64, - /// Block number. - pub number: Uint, - /// Parent hash. - pub parent_hash: H256, - /// Receipt root. - #[serde(rename = "receiptTrie")] - pub receipts_root: H256, - /// State root. - pub state_root: H256, - /// Timestamp. - pub timestamp: Uint, - /// Transactions root. - #[serde(rename = "transactionsTrie")] - pub transactions_root: H256, - /// Uncles hash. - #[serde(rename = "uncleHash")] - pub uncles_hash: H256, + /// Blocks bloom. + pub bloom: Bloom, + /// Blocks author. + #[serde(rename = "coinbase")] + pub author: Address, + /// Difficulty. + pub difficulty: Uint, + /// Extra data. + pub extra_data: Bytes, + /// Gas limit. + pub gas_limit: Uint, + /// Gas used. + pub gas_used: Uint, + /// Hash. + pub hash: H256, + /// Mix hash. + pub mix_hash: H256, + /// Seal nonce. + pub nonce: H64, + /// Block number. + pub number: Uint, + /// Parent hash. + pub parent_hash: H256, + /// Receipt root. + #[serde(rename = "receiptTrie")] + pub receipts_root: H256, + /// State root. + pub state_root: H256, + /// Timestamp. + pub timestamp: Uint, + /// Transactions root. + #[serde(rename = "transactionsTrie")] + pub transactions_root: H256, + /// Uncles hash. + #[serde(rename = "uncleHash")] + pub uncles_hash: H256, } #[cfg(test)] mod tests { - use serde_json; - use blockchain::header::Header; + use blockchain::header::Header; + use serde_json; - #[test] - fn header_deserialization() { - let s = r#"{ + #[test] + fn header_deserialization() { + let s = r#"{ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1", "difficulty" : "0x020000", @@ -87,7 +87,7 @@ mod tests { "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347" }"#; - let _deserialized: Header = serde_json::from_str(s).unwrap(); - // TODO: validate all fields - } + let _deserialized: Header = serde_json::from_str(s).unwrap(); + // TODO: validate all fields + } } diff --git a/json/src/blockchain/mod.rs b/json/src/blockchain/mod.rs index 0a3b162e9..d532c3208 100644 --- a/json/src/blockchain/mod.rs +++ b/json/src/blockchain/mod.rs @@ -21,14 +21,15 @@ pub mod block; pub mod blockchain; pub mod header; pub mod state; -pub mod transaction; pub mod test; +pub mod transaction; -pub use self::account::Account; -pub use self::block::Block; -pub use self::blockchain::BlockChain; -pub use self::blockchain::Engine; -pub use self::header::Header; -pub use self::state::State; -pub use self::test::Test; -pub use self::transaction::Transaction; +pub use self::{ + account::Account, + block::Block, + blockchain::{BlockChain, Engine}, + header::Header, + state::State, + test::Test, + transaction::Transaction, +}; diff --git a/json/src/blockchain/state.rs b/json/src/blockchain/state.rs index e108c937f..1431f012c 100644 --- a/json/src/blockchain/state.rs +++ b/json/src/blockchain/state.rs @@ -16,19 +16,19 @@ //! Blockchain test state deserializer. -use std::collections::BTreeMap; -use hash::Address; use blockchain::account::Account; +use hash::Address; +use std::collections::BTreeMap; /// Blockchain test state deserializer. #[derive(Debug, PartialEq, Deserialize, Clone)] pub struct State(BTreeMap); impl IntoIterator for State { - type Item = as IntoIterator>::Item; - type IntoIter = as IntoIterator>::IntoIter; + type Item = as IntoIterator>::Item; + type IntoIter = as IntoIterator>::IntoIter; - fn into_iter(self) -> Self::IntoIter { - self.0.into_iter() - } + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } } diff --git a/json/src/blockchain/test.rs b/json/src/blockchain/test.rs index d773aa3b5..0666d09d1 100644 --- a/json/src/blockchain/test.rs +++ b/json/src/blockchain/test.rs @@ -16,28 +16,29 @@ //! Blockchain test deserializer. -use std::collections::BTreeMap; -use std::io::Read; -use serde_json; -use serde_json::Error; use blockchain::blockchain::BlockChain; +use serde_json::{self, Error}; +use std::{collections::BTreeMap, io::Read}; /// Blockchain test deserializer. #[derive(Debug, PartialEq, Deserialize)] pub struct Test(BTreeMap); impl IntoIterator for Test { - type Item = as IntoIterator>::Item; - type IntoIter = as IntoIterator>::IntoIter; + type Item = as IntoIterator>::Item; + type IntoIter = as IntoIterator>::IntoIter; - fn into_iter(self) -> Self::IntoIter { - self.0.into_iter() - } + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } } impl Test { - /// Loads test from json. - pub fn load(reader: R) -> Result where R: Read { - serde_json::from_reader(reader) - } + /// Loads test from json. + pub fn load(reader: R) -> Result + where + R: Read, + { + serde_json::from_reader(reader) + } } diff --git a/json/src/blockchain/transaction.rs b/json/src/blockchain/transaction.rs index 4e519f394..f01ddb9d9 100644 --- a/json/src/blockchain/transaction.rs +++ b/json/src/blockchain/transaction.rs @@ -16,19 +16,19 @@ //! Blockchain test transaction deserialization. -use uint::Uint; use bytes::Bytes; +use uint::Uint; /// Blockchain test transaction deserialization. #[derive(Debug, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Transaction { - data: Bytes, - gas_limit: Uint, - gas_price: Uint, - nonce: Uint, - r: Uint, - s: Uint, - v: Uint, - value: Uint + data: Bytes, + gas_limit: Uint, + gas_price: Uint, + nonce: Uint, + r: Uint, + s: Uint, + v: Uint, + value: Uint, } diff --git a/json/src/bytes.rs b/json/src/bytes.rs index 3fbdee238..919a81a1a 100644 --- a/json/src/bytes.rs +++ b/json/src/bytes.rs @@ -16,104 +16,115 @@ //! Lenient bytes json deserialization for test json files. -use std::fmt; -use std::str::FromStr; -use std::ops::Deref; use rustc_hex::FromHex; -use serde::{Deserialize, Deserializer}; -use serde::de::{Error, Visitor}; +use serde::{ + de::{Error, Visitor}, + Deserialize, Deserializer, +}; +use std::{fmt, ops::Deref, str::FromStr}; /// Lenient bytes json deserialization for test json files. #[derive(Default, Debug, PartialEq, Eq, Clone, PartialOrd, Ord)] pub struct Bytes(Vec); impl Bytes { - /// Creates bytes struct. - pub fn new(v: Vec) -> Self { - Bytes(v) - } + /// Creates bytes struct. + pub fn new(v: Vec) -> Self { + Bytes(v) + } } impl Into> for Bytes { - fn into(self) -> Vec { - self.0 - } + fn into(self) -> Vec { + self.0 + } } impl Deref for Bytes { - type Target = [u8]; + type Target = [u8]; - fn deref(&self) -> &[u8] { - &self.0 - } + fn deref(&self) -> &[u8] { + &self.0 + } } impl FromStr for Bytes { - type Err = String; + type Err = String; - fn from_str(value: &str) -> Result { - let v = match value.len() { - 0 => vec![], - 2 if value.starts_with("0x") => vec![], - _ if value.starts_with("0x") && value.len() % 2 == 1 => { - let v = "0".to_owned() + &value[2..]; - FromHex::from_hex(v.as_str()).unwrap_or(vec![]) - }, - _ if value.starts_with("0x") => FromHex::from_hex(&value[2..]).unwrap_or(vec![]), - _ => FromHex::from_hex(value).unwrap_or(vec![]), - }; + fn from_str(value: &str) -> Result { + let v = match value.len() { + 0 => vec![], + 2 if value.starts_with("0x") => vec![], + _ if value.starts_with("0x") && value.len() % 2 == 1 => { + let v = "0".to_owned() + &value[2..]; + FromHex::from_hex(v.as_str()).unwrap_or(vec![]) + } + _ if value.starts_with("0x") => FromHex::from_hex(&value[2..]).unwrap_or(vec![]), + _ => FromHex::from_hex(value).unwrap_or(vec![]), + }; - Ok(Bytes(v)) - } + Ok(Bytes(v)) + } } impl<'a> Deserialize<'a> for Bytes { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> { - deserializer.deserialize_any(BytesVisitor) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + deserializer.deserialize_any(BytesVisitor) + } } struct BytesVisitor; impl<'a> Visitor<'a> for BytesVisitor { - type Value = Bytes; + type Value = Bytes; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a hex encoded string of bytes") - } + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a hex encoded string of bytes") + } - fn visit_str(self, value: &str) -> Result where E: Error { - Bytes::from_str(value).map_err(Error::custom) - } + fn visit_str(self, value: &str) -> Result + where + E: Error, + { + Bytes::from_str(value).map_err(Error::custom) + } - fn visit_string(self, value: String) -> Result where E: Error { - self.visit_str(value.as_ref()) - } + fn visit_string(self, value: String) -> Result + where + E: Error, + { + self.visit_str(value.as_ref()) + } } #[cfg(test)] mod test { - use serde_json; - use bytes::Bytes; + use bytes::Bytes; + use serde_json; - #[test] - fn bytes_deserialization() { - let s = r#"["", "0x", "0x12", "1234", "0x001"]"#; - let deserialized: Vec = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, vec![ - Bytes(vec![]), - Bytes(vec![]), - Bytes(vec![0x12]), - Bytes(vec![0x12, 0x34]), - Bytes(vec![0, 1]) - ]); - } + #[test] + fn bytes_deserialization() { + let s = r#"["", "0x", "0x12", "1234", "0x001"]"#; + let deserialized: Vec = serde_json::from_str(s).unwrap(); + assert_eq!( + deserialized, + vec![ + Bytes(vec![]), + Bytes(vec![]), + Bytes(vec![0x12]), + Bytes(vec![0x12, 0x34]), + Bytes(vec![0, 1]) + ] + ); + } - #[test] - fn bytes_into() { - let bytes = Bytes(vec![0xff, 0x11]); - let v: Vec = bytes.into(); - assert_eq!(vec![0xff, 0x11], v); - } + #[test] + fn bytes_into() { + let bytes = Bytes(vec![0xff, 0x11]); + let v: Vec = bytes.into(); + assert_eq!(vec![0xff, 0x11], v); + } } diff --git a/json/src/hash.rs b/json/src/hash.rs index a025dc454..59a0c9e66 100644 --- a/json/src/hash.rs +++ b/json/src/hash.rs @@ -16,76 +16,95 @@ //! Lenient hash json deserialization for test json files. -use std::str::FromStr; -use std::fmt; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use serde::de::{Error, Visitor}; +use ethereum_types::{ + Bloom as Hash2048, H160 as Hash160, H256 as Hash256, H520 as Hash520, H64 as Hash64, +}; use rustc_hex::ToHex; -use ethereum_types::{H64 as Hash64, H160 as Hash160, H256 as Hash256, H520 as Hash520, Bloom as Hash2048}; +use serde::{ + de::{Error, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, +}; +use std::{fmt, str::FromStr}; macro_rules! impl_hash { - ($name: ident, $inner: ident) => { - /// Lenient hash json deserialization for test json files. - #[derive(Default, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Clone)] - pub struct $name(pub $inner); + ($name: ident, $inner: ident) => { + /// Lenient hash json deserialization for test json files. + #[derive(Default, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Clone)] + pub struct $name(pub $inner); - impl From<$name> for $inner { - fn from(other: $name) -> $inner { - other.0 - } - } + impl From<$name> for $inner { + fn from(other: $name) -> $inner { + other.0 + } + } - impl From<$inner> for $name { - fn from(i: $inner) -> Self { - $name(i) - } - } + impl From<$inner> for $name { + fn from(i: $inner) -> Self { + $name(i) + } + } - impl<'a> Deserialize<'a> for $name { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> { + impl<'a> Deserialize<'a> for $name { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + struct HashVisitor; - struct HashVisitor; + impl<'b> Visitor<'b> for HashVisitor { + type Value = $name; - impl<'b> Visitor<'b> for HashVisitor { - type Value = $name; + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a 0x-prefixed hex-encoded hash") + } - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a 0x-prefixed hex-encoded hash") - } + fn visit_str(self, value: &str) -> Result + where + E: Error, + { + let value = match value.len() { + 0 => $inner::from(0), + 2 if value == "0x" => $inner::from(0), + _ if value.starts_with("0x") => { + $inner::from_str(&value[2..]).map_err(|e| { + Error::custom( + format!("Invalid hex value {}: {}", value, e).as_str(), + ) + })? + } + _ => $inner::from_str(value).map_err(|e| { + Error::custom( + format!("Invalid hex value {}: {}", value, e).as_str(), + ) + })?, + }; - fn visit_str(self, value: &str) -> Result where E: Error { - let value = match value.len() { - 0 => $inner::from(0), - 2 if value == "0x" => $inner::from(0), - _ if value.starts_with("0x") => $inner::from_str(&value[2..]).map_err(|e| { - Error::custom(format!("Invalid hex value {}: {}", value, e).as_str()) - })?, - _ => $inner::from_str(value).map_err(|e| { - Error::custom(format!("Invalid hex value {}: {}", value, e).as_str()) - })?, - }; + Ok($name(value)) + } - Ok($name(value)) - } + fn visit_string(self, value: String) -> Result + where + E: Error, + { + self.visit_str(value.as_ref()) + } + } - fn visit_string(self, value: String) -> Result where E: Error { - self.visit_str(value.as_ref()) - } - } + deserializer.deserialize_any(HashVisitor) + } + } - deserializer.deserialize_any(HashVisitor) - } - } - - impl Serialize for $name { - fn serialize(&self, serializer: S) -> Result where S: Serializer { - let mut hex = "0x".to_owned(); - hex.push_str(&self.0.to_hex()); - serializer.serialize_str(&hex) - } - } - } + impl Serialize for $name { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut hex = "0x".to_owned(); + hex.push_str(&self.0.to_hex()); + serializer.serialize_str(&hex) + } + } + }; } impl_hash!(H64, Hash64); @@ -96,23 +115,34 @@ impl_hash!(Bloom, Hash2048); #[cfg(test)] mod test { - use std::str::FromStr; - use serde_json; - use ethereum_types; - use hash::H256; + use ethereum_types; + use hash::H256; + use serde_json; + use std::str::FromStr; - #[test] - fn hash_deserialization() { - let s = r#"["", "5a39ed1020c04d4d84539975b893a4e7c53eab6c2965db8bc3468093a31bc5ae"]"#; - let deserialized: Vec = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, vec![ - H256(ethereum_types::H256::from(0)), - H256(ethereum_types::H256::from_str("5a39ed1020c04d4d84539975b893a4e7c53eab6c2965db8bc3468093a31bc5ae").unwrap()) - ]); - } + #[test] + fn hash_deserialization() { + let s = r#"["", "5a39ed1020c04d4d84539975b893a4e7c53eab6c2965db8bc3468093a31bc5ae"]"#; + let deserialized: Vec = serde_json::from_str(s).unwrap(); + assert_eq!( + deserialized, + vec![ + H256(ethereum_types::H256::from(0)), + H256( + ethereum_types::H256::from_str( + "5a39ed1020c04d4d84539975b893a4e7c53eab6c2965db8bc3468093a31bc5ae" + ) + .unwrap() + ) + ] + ); + } - #[test] - fn hash_into() { - assert_eq!(ethereum_types::H256::from(0), H256(ethereum_types::H256::from(0)).into()); - } + #[test] + fn hash_into() { + assert_eq!( + ethereum_types::H256::from(0), + H256(ethereum_types::H256::from(0)).into() + ); + } } diff --git a/json/src/lib.rs b/json/src/lib.rs index d6857d198..e31fbe86b 100644 --- a/json/src/lib.rs +++ b/json/src/lib.rs @@ -14,23 +14,24 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +extern crate ethereum_types; extern crate rustc_hex; extern crate serde; extern crate serde_json; -extern crate ethereum_types; -#[macro_use] extern crate serde_derive; +#[macro_use] +extern crate serde_derive; #[cfg(test)] extern crate macros; -pub mod hash; -pub mod uint; -pub mod bytes; pub mod blockchain; -pub mod spec; -pub mod trie; -pub mod vm; +pub mod bytes; +pub mod hash; pub mod maybe; +pub mod spec; pub mod state; -pub mod transaction; pub mod test; +pub mod transaction; +pub mod trie; +pub mod uint; +pub mod vm; diff --git a/json/src/maybe.rs b/json/src/maybe.rs index 4fd2ca60e..9922118f5 100644 --- a/json/src/maybe.rs +++ b/json/src/maybe.rs @@ -16,85 +16,105 @@ //! Deserializer of empty string values into optionals. -use std::fmt; -use std::marker::PhantomData; -use serde::{Deserialize, Deserializer}; -use serde::de::{Error, Visitor, IntoDeserializer}; +use serde::{ + de::{Error, IntoDeserializer, Visitor}, + Deserialize, Deserializer, +}; +use std::{fmt, marker::PhantomData}; /// Deserializer of empty string values into optionals. #[derive(Debug, PartialEq, Clone)] pub enum MaybeEmpty { - /// Some. - Some(T), - /// None. - None, + /// Some. + Some(T), + /// None. + None, } -impl<'a, T> Deserialize<'a> for MaybeEmpty where T: Deserialize<'a> { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> { - deserializer.deserialize_any(MaybeEmptyVisitor::new()) - } +impl<'a, T> Deserialize<'a> for MaybeEmpty +where + T: Deserialize<'a>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + deserializer.deserialize_any(MaybeEmptyVisitor::new()) + } } struct MaybeEmptyVisitor { - _phantom: PhantomData + _phantom: PhantomData, } impl MaybeEmptyVisitor { - fn new() -> Self { - MaybeEmptyVisitor { - _phantom: PhantomData - } - } + fn new() -> Self { + MaybeEmptyVisitor { + _phantom: PhantomData, + } + } } -impl<'a, T> Visitor<'a> for MaybeEmptyVisitor where T: Deserialize<'a> { - type Value = MaybeEmpty; +impl<'a, T> Visitor<'a> for MaybeEmptyVisitor +where + T: Deserialize<'a>, +{ + type Value = MaybeEmpty; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "an empty string or string-encoded type") - } + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "an empty string or string-encoded type") + } - fn visit_str(self, value: &str) -> Result where E: Error { - self.visit_string(value.to_owned()) - } + fn visit_str(self, value: &str) -> Result + where + E: Error, + { + self.visit_string(value.to_owned()) + } - fn visit_string(self, value: String) -> Result where E: Error { - match value.is_empty() { - true => Ok(MaybeEmpty::None), - false => { - T::deserialize(value.into_deserializer()).map(MaybeEmpty::Some) - } - } - } + fn visit_string(self, value: String) -> Result + where + E: Error, + { + match value.is_empty() { + true => Ok(MaybeEmpty::None), + false => T::deserialize(value.into_deserializer()).map(MaybeEmpty::Some), + } + } } impl Into> for MaybeEmpty { - fn into(self) -> Option { - match self { - MaybeEmpty::Some(s) => Some(s), - MaybeEmpty::None => None - } - } + fn into(self) -> Option { + match self { + MaybeEmpty::Some(s) => Some(s), + MaybeEmpty::None => None, + } + } } #[cfg(test)] mod tests { - use std::str::FromStr; - use serde_json; - use ethereum_types; - use hash::H256; - use maybe::MaybeEmpty; - - #[test] - fn maybe_deserialization() { - let s = r#"["", "5a39ed1020c04d4d84539975b893a4e7c53eab6c2965db8bc3468093a31bc5ae"]"#; - let deserialized: Vec> = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, vec![ - MaybeEmpty::None, - MaybeEmpty::Some(H256(ethereum_types::H256::from_str("5a39ed1020c04d4d84539975b893a4e7c53eab6c2965db8bc3468093a31bc5ae").unwrap())) - ]); - } + use ethereum_types; + use hash::H256; + use maybe::MaybeEmpty; + use serde_json; + use std::str::FromStr; + #[test] + fn maybe_deserialization() { + let s = r#"["", "5a39ed1020c04d4d84539975b893a4e7c53eab6c2965db8bc3468093a31bc5ae"]"#; + let deserialized: Vec> = serde_json::from_str(s).unwrap(); + assert_eq!( + deserialized, + vec![ + MaybeEmpty::None, + MaybeEmpty::Some(H256( + ethereum_types::H256::from_str( + "5a39ed1020c04d4d84539975b893a4e7c53eab6c2965db8bc3468093a31bc5ae" + ) + .unwrap() + )) + ] + ); + } } diff --git a/json/src/spec/account.rs b/json/src/spec/account.rs index 0b32a44a4..c91bb8eec 100644 --- a/json/src/spec/account.rs +++ b/json/src/spec/account.rs @@ -18,92 +18,95 @@ use std::collections::BTreeMap; -use uint::Uint; use bytes::Bytes; use spec::builtin::BuiltinCompat; +use uint::Uint; /// Spec account. #[derive(Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub struct Account { - /// Builtin contract. - pub builtin: Option, - /// Balance. - pub balance: Option, - /// Nonce. - pub nonce: Option, - /// Code. - pub code: Option, - /// Storage. - pub storage: Option>, - /// Constructor. - pub constructor: Option, + /// Builtin contract. + pub builtin: Option, + /// Balance. + pub balance: Option, + /// Nonce. + pub nonce: Option, + /// Code. + pub code: Option, + /// Storage. + pub storage: Option>, + /// Constructor. + pub constructor: Option, } impl Account { - /// Returns true if account does not have nonce, balance, code and storage. - pub fn is_empty(&self) -> bool { - self.balance.is_none() && self.nonce.is_none() && self.code.is_none() && self.storage.is_none() - } + /// Returns true if account does not have nonce, balance, code and storage. + pub fn is_empty(&self) -> bool { + self.balance.is_none() + && self.nonce.is_none() + && self.code.is_none() + && self.storage.is_none() + } } #[cfg(test)] mod tests { - use std::collections::BTreeMap; - use serde_json; - use spec::account::Account; - use ethereum_types::U256; - use uint::Uint; - use bytes::Bytes; + use bytes::Bytes; + use ethereum_types::U256; + use serde_json; + use spec::account::Account; + use std::collections::BTreeMap; + use uint::Uint; - #[test] - fn account_balance_missing_not_empty() { - let s = r#"{ + #[test] + fn account_balance_missing_not_empty() { + let s = r#"{ "nonce": "0", "code": "1234", "storage": { "0x7fffffffffffffff7fffffffffffffff": "0x1" } }"#; - let deserialized: Account = serde_json::from_str(s).unwrap(); - assert!(!deserialized.is_empty()); - } + let deserialized: Account = serde_json::from_str(s).unwrap(); + assert!(!deserialized.is_empty()); + } - #[test] - fn account_nonce_missing_not_empty() { - let s = r#"{ + #[test] + fn account_nonce_missing_not_empty() { + let s = r#"{ "balance": "1", "code": "1234", "storage": { "0x7fffffffffffffff7fffffffffffffff": "0x1" } }"#; - let deserialized: Account = serde_json::from_str(s).unwrap(); - assert!(!deserialized.is_empty()); - } + let deserialized: Account = serde_json::from_str(s).unwrap(); + assert!(!deserialized.is_empty()); + } - #[test] - fn account_code_missing_not_empty() { - let s = r#"{ + #[test] + fn account_code_missing_not_empty() { + let s = r#"{ "balance": "1", "nonce": "0", "storage": { "0x7fffffffffffffff7fffffffffffffff": "0x1" } }"#; - let deserialized: Account = serde_json::from_str(s).unwrap(); - assert!(!deserialized.is_empty()); - } + let deserialized: Account = serde_json::from_str(s).unwrap(); + assert!(!deserialized.is_empty()); + } - #[test] - fn account_storage_missing_not_empty() { - let s = r#"{ + #[test] + fn account_storage_missing_not_empty() { + let s = r#"{ "balance": "1", "nonce": "0", "code": "1234" }"#; - let deserialized: Account = serde_json::from_str(s).unwrap(); - assert!(!deserialized.is_empty()); - } + let deserialized: Account = serde_json::from_str(s).unwrap(); + assert!(!deserialized.is_empty()); + } - #[test] - fn account_empty() { - let s = r#"{ + #[test] + fn account_empty() { + let s = r#"{ "builtin": { "name": "ecrecover", "pricing": { @@ -114,13 +117,13 @@ mod tests { } } }"#; - let deserialized: Account = serde_json::from_str(s).unwrap(); - assert!(deserialized.is_empty()); - } + let deserialized: Account = serde_json::from_str(s).unwrap(); + assert!(deserialized.is_empty()); + } - #[test] - fn account_deserialization() { - let s = r#"{ + #[test] + fn account_deserialization() { + let s = r#"{ "balance": "1", "nonce": "0", "code": "1234", @@ -134,29 +137,32 @@ mod tests { } } }"#; - let deserialized: Account = serde_json::from_str(s).unwrap(); - assert!(!deserialized.is_empty()); - assert_eq!(deserialized.balance.unwrap(), Uint(U256::from(1))); - assert_eq!(deserialized.nonce.unwrap(), Uint(U256::from(0))); - assert_eq!(deserialized.code.unwrap(), Bytes::new(vec![0x12, 0x34])); - assert!(deserialized.builtin.is_some()); // Further tested in builtin.rs - } + let deserialized: Account = serde_json::from_str(s).unwrap(); + assert!(!deserialized.is_empty()); + assert_eq!(deserialized.balance.unwrap(), Uint(U256::from(1))); + assert_eq!(deserialized.nonce.unwrap(), Uint(U256::from(0))); + assert_eq!(deserialized.code.unwrap(), Bytes::new(vec![0x12, 0x34])); + assert!(deserialized.builtin.is_some()); // Further tested in builtin.rs + } - #[test] - fn account_storage_deserialization() { - let s = r#"{ + #[test] + fn account_storage_deserialization() { + let s = r#"{ "balance": "1", "nonce": "0", "code": "1234", "storage": { "0x7fffffffffffffff7fffffffffffffff": "0x1" } }"#; - let deserialized: Account = serde_json::from_str(s).unwrap(); - assert!(!deserialized.is_empty()); - assert_eq!(deserialized.balance.unwrap(), Uint(U256::from(1))); - assert_eq!(deserialized.nonce.unwrap(), Uint(U256::from(0))); - assert_eq!(deserialized.code.unwrap(), Bytes::new(vec![0x12, 0x34])); - let mut storage = BTreeMap::new(); - storage.insert(Uint(U256::from("7fffffffffffffff7fffffffffffffff")), Uint(U256::from(1))); - assert_eq!(deserialized.storage.unwrap(), storage); - } + let deserialized: Account = serde_json::from_str(s).unwrap(); + assert!(!deserialized.is_empty()); + assert_eq!(deserialized.balance.unwrap(), Uint(U256::from(1))); + assert_eq!(deserialized.nonce.unwrap(), Uint(U256::from(0))); + assert_eq!(deserialized.code.unwrap(), Bytes::new(vec![0x12, 0x34])); + let mut storage = BTreeMap::new(); + storage.insert( + Uint(U256::from("7fffffffffffffff7fffffffffffffff")), + Uint(U256::from(1)), + ); + assert_eq!(deserialized.storage.unwrap(), storage); + } } diff --git a/json/src/spec/authority_round.rs b/json/src/spec/authority_round.rs index 61936a5f9..102273ae1 100644 --- a/json/src/spec/authority_round.rs +++ b/json/src/spec/authority_round.rs @@ -16,70 +16,69 @@ //! Authority params deserialization. +use super::ValidatorSet; +use bytes::Bytes; use hash::Address; use uint::Uint; -use bytes::Bytes; -use super::ValidatorSet; /// Authority params deserialization. #[derive(Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub struct AuthorityRoundParams { - /// Block duration, in seconds. - pub step_duration: Uint, - /// Valid authorities - pub validators: ValidatorSet, - /// Starting step. Determined automatically if not specified. - /// To be used for testing only. - pub start_step: Option, - /// Block at which score validation should start. - pub validate_score_transition: Option, - /// Block from which monotonic steps start. - pub validate_step_transition: Option, - /// Whether transitions should be immediate. - pub immediate_transitions: Option, - /// Reward per block in wei. - pub block_reward: Option, - /// Block at which the block reward contract should start being used. - pub block_reward_contract_transition: Option, - /// Block reward contract address (setting the block reward contract - /// overrides the static block reward definition). - pub block_reward_contract_address: Option
, - /// Block reward code. This overrides the block reward contract address. - pub block_reward_contract_code: Option, - /// Block at which maximum uncle count should be considered. - pub maximum_uncle_count_transition: Option, - /// Maximum number of accepted uncles. - pub maximum_uncle_count: Option, - /// Block at which empty step messages should start. - pub empty_steps_transition: Option, - /// Maximum number of accepted empty steps. - pub maximum_empty_steps: Option, - /// Strict validation of empty steps transition block. - pub strict_empty_steps_transition: Option, + /// Block duration, in seconds. + pub step_duration: Uint, + /// Valid authorities + pub validators: ValidatorSet, + /// Starting step. Determined automatically if not specified. + /// To be used for testing only. + pub start_step: Option, + /// Block at which score validation should start. + pub validate_score_transition: Option, + /// Block from which monotonic steps start. + pub validate_step_transition: Option, + /// Whether transitions should be immediate. + pub immediate_transitions: Option, + /// Reward per block in wei. + pub block_reward: Option, + /// Block at which the block reward contract should start being used. + pub block_reward_contract_transition: Option, + /// Block reward contract address (setting the block reward contract + /// overrides the static block reward definition). + pub block_reward_contract_address: Option
, + /// Block reward code. This overrides the block reward contract address. + pub block_reward_contract_code: Option, + /// Block at which maximum uncle count should be considered. + pub maximum_uncle_count_transition: Option, + /// Maximum number of accepted uncles. + pub maximum_uncle_count: Option, + /// Block at which empty step messages should start. + pub empty_steps_transition: Option, + /// Maximum number of accepted empty steps. + pub maximum_empty_steps: Option, + /// Strict validation of empty steps transition block. + pub strict_empty_steps_transition: Option, } /// Authority engine deserialization. #[derive(Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] pub struct AuthorityRound { - /// Ethash params. - pub params: AuthorityRoundParams, + /// Ethash params. + pub params: AuthorityRoundParams, } #[cfg(test)] mod tests { - use ethereum_types::{U256, H160}; - use uint::Uint; - use serde_json; - use hash::Address; - use spec::validator_set::ValidatorSet; - use spec::authority_round::AuthorityRound; + use ethereum_types::{H160, U256}; + use hash::Address; + use serde_json; + use spec::{authority_round::AuthorityRound, validator_set::ValidatorSet}; + use uint::Uint; - #[test] - fn authority_round_deserialization() { - let s = r#"{ + #[test] + fn authority_round_deserialization() { + let s = r#"{ "params": { "stepDuration": "0x02", "validators": { @@ -93,13 +92,23 @@ mod tests { } }"#; - let deserialized: AuthorityRound = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized.params.step_duration, Uint(U256::from(0x02))); - assert_eq!(deserialized.params.validators, ValidatorSet::List(vec![Address(H160::from("0xc6d9d2cd449a754c494264e1809c50e34d64562b"))])); - assert_eq!(deserialized.params.start_step, Some(Uint(U256::from(24)))); - assert_eq!(deserialized.params.immediate_transitions, None); - assert_eq!(deserialized.params.maximum_uncle_count_transition, Some(Uint(10_000_000.into()))); - assert_eq!(deserialized.params.maximum_uncle_count, Some(Uint(5.into()))); - - } + let deserialized: AuthorityRound = serde_json::from_str(s).unwrap(); + assert_eq!(deserialized.params.step_duration, Uint(U256::from(0x02))); + assert_eq!( + deserialized.params.validators, + ValidatorSet::List(vec![Address(H160::from( + "0xc6d9d2cd449a754c494264e1809c50e34d64562b" + ))]) + ); + assert_eq!(deserialized.params.start_step, Some(Uint(U256::from(24)))); + assert_eq!(deserialized.params.immediate_transitions, None); + assert_eq!( + deserialized.params.maximum_uncle_count_transition, + Some(Uint(10_000_000.into())) + ); + assert_eq!( + deserialized.params.maximum_uncle_count, + Some(Uint(5.into())) + ); + } } diff --git a/json/src/spec/basic_authority.rs b/json/src/spec/basic_authority.rs index 195b89beb..74b35f4ae 100644 --- a/json/src/spec/basic_authority.rs +++ b/json/src/spec/basic_authority.rs @@ -16,40 +16,39 @@ //! Authority params deserialization. -use uint::Uint; use super::ValidatorSet; +use uint::Uint; /// Authority params deserialization. #[derive(Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub struct BasicAuthorityParams { - /// Block duration. - pub duration_limit: Uint, - /// Valid authorities - pub validators: ValidatorSet, + /// Block duration. + pub duration_limit: Uint, + /// Valid authorities + pub validators: ValidatorSet, } /// Authority engine deserialization. #[derive(Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] pub struct BasicAuthority { - /// Ethash params. - pub params: BasicAuthorityParams, + /// Ethash params. + pub params: BasicAuthorityParams, } #[cfg(test)] mod tests { - use serde_json; - use uint::Uint; - use ethereum_types::{U256, H160}; - use hash::Address; - use spec::basic_authority::BasicAuthority; - use spec::validator_set::ValidatorSet; + use ethereum_types::{H160, U256}; + use hash::Address; + use serde_json; + use spec::{basic_authority::BasicAuthority, validator_set::ValidatorSet}; + use uint::Uint; - #[test] - fn basic_authority_deserialization() { - let s = r#"{ + #[test] + fn basic_authority_deserialization() { + let s = r#"{ "params": { "durationLimit": "0x0d", "validators" : { @@ -58,10 +57,12 @@ mod tests { } }"#; - let deserialized: BasicAuthority = serde_json::from_str(s).unwrap(); + let deserialized: BasicAuthority = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized.params.duration_limit, Uint(U256::from(0x0d))); - let vs = ValidatorSet::List(vec![Address(H160::from("0xc6d9d2cd449a754c494264e1809c50e34d64562b"))]); - assert_eq!(deserialized.params.validators, vs); - } + assert_eq!(deserialized.params.duration_limit, Uint(U256::from(0x0d))); + let vs = ValidatorSet::List(vec![Address(H160::from( + "0xc6d9d2cd449a754c494264e1809c50e34d64562b", + ))]); + assert_eq!(deserialized.params.validators, vs); + } } diff --git a/json/src/spec/builtin.rs b/json/src/spec/builtin.rs index 77b5bcce6..24edd4863 100644 --- a/json/src/spec/builtin.rs +++ b/json/src/spec/builtin.rs @@ -16,43 +16,43 @@ //! Spec builtin deserialization. -use uint::Uint; use std::collections::BTreeMap; +use uint::Uint; /// Linear pricing. #[derive(Debug, PartialEq, Deserialize, Clone)] #[serde(deny_unknown_fields)] pub struct Linear { - /// Base price. - pub base: u64, - /// Price for word. - pub word: u64, + /// Base price. + pub base: u64, + /// Price for word. + pub word: u64, } /// Pricing for modular exponentiation. #[derive(Debug, PartialEq, Deserialize, Clone)] #[serde(deny_unknown_fields)] pub struct Modexp { - /// Price divisor. - pub divisor: u64, + /// Price divisor. + pub divisor: u64, } /// Pricing for constant alt_bn128 operations (ECADD and ECMUL) #[derive(Debug, PartialEq, Deserialize, Clone)] #[serde(deny_unknown_fields)] pub struct AltBn128ConstOperations { - /// price - pub price: u64, + /// price + pub price: u64, } /// Pricing for alt_bn128_pairing. #[derive(Debug, PartialEq, Deserialize, Clone)] #[serde(deny_unknown_fields)] pub struct AltBn128Pairing { - /// Base price. - pub base: u64, - /// Price per point pair. - pub pair: u64, + /// Base price. + pub base: u64, + /// Price per point pair. + pub pair: u64, } /// Pricing variants. @@ -60,57 +60,66 @@ pub struct AltBn128Pairing { #[serde(deny_unknown_fields)] #[serde(rename_all = "snake_case")] pub enum Pricing { - /// Pricing for Blake2 compression function: each call costs the same amount per round. - Blake2F { - /// Price per round of Blake2 compression function. - gas_per_round: u64, - }, - /// Linear pricing. - Linear(Linear), - /// Pricing for modular exponentiation. - Modexp(Modexp), - /// Pricing for alt_bn128_pairing exponentiation. - AltBn128Pairing(AltBn128Pairing), - /// Pricing for constant alt_bn128 operations - AltBn128ConstOperations(AltBn128ConstOperations), + /// Pricing for Blake2 compression function: each call costs the same amount per round. + Blake2F { + /// Price per round of Blake2 compression function. + gas_per_round: u64, + }, + /// Linear pricing. + Linear(Linear), + /// Pricing for modular exponentiation. + Modexp(Modexp), + /// Pricing for alt_bn128_pairing exponentiation. + AltBn128Pairing(AltBn128Pairing), + /// Pricing for constant alt_bn128 operations + AltBn128ConstOperations(AltBn128ConstOperations), } /// Builtin compability layer #[derive(Debug, PartialEq, Deserialize, Clone)] #[serde(deny_unknown_fields)] pub struct BuiltinCompat { - /// Builtin name. - name: String, - /// Builtin pricing. - pricing: PricingCompat, - /// Activation block. - activate_at: Option, + /// Builtin name. + name: String, + /// Builtin pricing. + pricing: PricingCompat, + /// Activation block. + activate_at: Option, } /// Spec builtin. #[derive(Debug, PartialEq, Clone)] pub struct Builtin { - /// Builtin name. - pub name: String, - /// Builtin pricing. - pub pricing: BTreeMap, + /// Builtin name. + pub name: String, + /// Builtin pricing. + pub pricing: BTreeMap, } impl From for Builtin { - fn from(legacy: BuiltinCompat) -> Self { - let pricing = match legacy.pricing { - PricingCompat::Single(pricing) => { - let mut map = BTreeMap::new(); - let activate_at: u64 = legacy.activate_at.map_or(0, Into::into); - map.insert(activate_at, PricingAt { info: None, price: pricing }); - map - } - PricingCompat::Multi(pricings) => { - pricings.into_iter().map(|(a, p)| (a.into(), p)).collect() - } - }; - Self { name: legacy.name, pricing } - } + fn from(legacy: BuiltinCompat) -> Self { + let pricing = match legacy.pricing { + PricingCompat::Single(pricing) => { + let mut map = BTreeMap::new(); + let activate_at: u64 = legacy.activate_at.map_or(0, Into::into); + map.insert( + activate_at, + PricingAt { + info: None, + price: pricing, + }, + ); + map + } + PricingCompat::Multi(pricings) => { + pricings.into_iter().map(|(a, p)| (a.into(), p)).collect() + } + }; + Self { + name: legacy.name, + pricing, + } + } } /// Compability layer for different pricings @@ -119,48 +128,54 @@ impl From for Builtin { #[serde(deny_unknown_fields)] #[serde(untagged)] enum PricingCompat { - /// Single builtin - Single(Pricing), - /// Multiple builtins - Multi(BTreeMap), + /// Single builtin + Single(Pricing), + /// Multiple builtins + Multi(BTreeMap), } /// Price for a builtin, with the block number to activate it on #[derive(Debug, PartialEq, Deserialize, Clone)] #[serde(deny_unknown_fields)] pub struct PricingAt { - /// Description of the activation, e.g. "PunyPony HF, March 12, 2025". - pub info: Option, - /// Builtin pricing. - pub price: Pricing, + /// Description of the activation, e.g. "PunyPony HF, March 12, 2025". + pub info: Option, + /// Builtin pricing. + pub price: Pricing, } #[cfg(test)] mod tests { - use serde_json; - use uint::Uint; - use super::{Builtin, BuiltinCompat, BTreeMap, Pricing, PricingAt, Linear, Modexp, AltBn128ConstOperations}; - use macros::map; + use super::{ + AltBn128ConstOperations, BTreeMap, Builtin, BuiltinCompat, Linear, Modexp, Pricing, + PricingAt, + }; + use macros::map; + use serde_json; + use uint::Uint; - #[test] - fn builtin_deserialization() { - let s = r#"{ + #[test] + fn builtin_deserialization() { + let s = r#"{ "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } }"#; - let builtin: Builtin = serde_json::from_str::(s).unwrap().into(); - assert_eq!(builtin.name, "ecrecover"); - assert_eq!(builtin.pricing, map![ - 0 => PricingAt { - info: None, - price: Pricing::Linear(Linear { base: 3000, word: 0 }) - } - ]); - } + let builtin: Builtin = serde_json::from_str::(s).unwrap().into(); + assert_eq!(builtin.name, "ecrecover"); + assert_eq!( + builtin.pricing, + map![ + 0 => PricingAt { + info: None, + price: Pricing::Linear(Linear { base: 3000, word: 0 }) + } + ] + ); + } - #[test] - fn deserialize_multiple_pricings() { - let s = r#"{ + #[test] + fn deserialize_multiple_pricings() { + let s = r#"{ "name": "ecrecover", "pricing": { "0": { @@ -172,40 +187,46 @@ mod tests { } } }"#; - let builtin: Builtin = serde_json::from_str::(s).unwrap().into(); - assert_eq!(builtin.name, "ecrecover"); - assert_eq!(builtin.pricing, map![ - 0 => PricingAt { - info: None, - price: Pricing::Linear(Linear { base: 3000, word: 0 }) - }, - 500 => PricingAt { - info: Some(String::from("enable fake EIP at block 500")), - price: Pricing::Linear(Linear { base: 10, word: 0 }) - } - ]); - } + let builtin: Builtin = serde_json::from_str::(s).unwrap().into(); + assert_eq!(builtin.name, "ecrecover"); + assert_eq!( + builtin.pricing, + map![ + 0 => PricingAt { + info: None, + price: Pricing::Linear(Linear { base: 3000, word: 0 }) + }, + 500 => PricingAt { + info: Some(String::from("enable fake EIP at block 500")), + price: Pricing::Linear(Linear { base: 10, word: 0 }) + } + ] + ); + } - #[test] - fn deserialization_blake2_f_builtin() { - let s = r#"{ + #[test] + fn deserialization_blake2_f_builtin() { + let s = r#"{ "name": "blake2_f", "activate_at": "0xffffff", "pricing": { "blake2_f": { "gas_per_round": 123 } } }"#; - let builtin: Builtin = serde_json::from_str::(s).unwrap().into(); - assert_eq!(builtin.name, "blake2_f"); - assert_eq!(builtin.pricing, map![ - 0xffffff => PricingAt { - info: None, - price: Pricing::Blake2F { gas_per_round: 123 } - } - ]); - } + let builtin: Builtin = serde_json::from_str::(s).unwrap().into(); + assert_eq!(builtin.name, "blake2_f"); + assert_eq!( + builtin.pricing, + map![ + 0xffffff => PricingAt { + info: None, + price: Pricing::Blake2F { gas_per_round: 123 } + } + ] + ); + } - #[test] - fn deserialization_alt_bn128_const_operations() { - let s = r#"{ + #[test] + fn deserialization_alt_bn128_const_operations() { + let s = r#"{ "name": "alt_bn128_mul", "pricing": { "100500": { @@ -213,33 +234,39 @@ mod tests { } } }"#; - let builtin: Builtin = serde_json::from_str::(s).unwrap().into(); - assert_eq!(builtin.name, "alt_bn128_mul"); - assert_eq!(builtin.pricing, map![ - 100500 => PricingAt { - info: None, - price: Pricing::AltBn128ConstOperations(AltBn128ConstOperations { - price: 123, - }), - } - ]); - } + let builtin: Builtin = serde_json::from_str::(s).unwrap().into(); + assert_eq!(builtin.name, "alt_bn128_mul"); + assert_eq!( + builtin.pricing, + map![ + 100500 => PricingAt { + info: None, + price: Pricing::AltBn128ConstOperations(AltBn128ConstOperations { + price: 123, + }), + } + ] + ); + } - #[test] - fn activate_at() { - let s = r#"{ + #[test] + fn activate_at() { + let s = r#"{ "name": "late_start", "activate_at": 100000, "pricing": { "modexp": { "divisor": 5 } } }"#; - let builtin: Builtin = serde_json::from_str::(s).unwrap().into(); - assert_eq!(builtin.name, "late_start"); - assert_eq!(builtin.pricing, map![ - 100_000 => PricingAt { - info: None, - price: Pricing::Modexp(Modexp { divisor: 5 }) - } - ]); - } + let builtin: Builtin = serde_json::from_str::(s).unwrap().into(); + assert_eq!(builtin.name, "late_start"); + assert_eq!( + builtin.pricing, + map![ + 100_000 => PricingAt { + info: None, + price: Pricing::Modexp(Modexp { divisor: 5 }) + } + ] + ); + } } diff --git a/json/src/spec/clique.rs b/json/src/spec/clique.rs index 64be9c569..e1bb34bb6 100644 --- a/json/src/spec/clique.rs +++ b/json/src/spec/clique.rs @@ -21,37 +21,37 @@ use std::num::NonZeroU64; /// Clique params deserialization. #[derive(Debug, PartialEq, Deserialize)] pub struct CliqueParams { - /// period as defined in EIP - pub period: Option, - /// epoch length as defined in EIP - pub epoch: Option + /// period as defined in EIP + pub period: Option, + /// epoch length as defined in EIP + pub epoch: Option, } /// Clique engine deserialization. #[derive(Debug, PartialEq, Deserialize)] pub struct Clique { - /// CliqueEngine params - pub params: CliqueParams, + /// CliqueEngine params + pub params: CliqueParams, } #[cfg(test)] mod tests { - use serde_json; - use uint::Uint; - use ethereum_types::U256; - use super::*; + use super::*; + use ethereum_types::U256; + use serde_json; + use uint::Uint; - #[test] - fn clique_deserialization() { - let s = r#"{ + #[test] + fn clique_deserialization() { + let s = r#"{ "params": { "period": 5, "epoch": 30000 } }"#; - let deserialized: Clique = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized.params.period, Some(5u64)); - assert_eq!(deserialized.params.epoch, NonZeroU64::new(30000)); - } + let deserialized: Clique = serde_json::from_str(s).unwrap(); + assert_eq!(deserialized.params.period, Some(5u64)); + assert_eq!(deserialized.params.epoch, NonZeroU64::new(30000)); + } } diff --git a/json/src/spec/engine.rs b/json/src/spec/engine.rs index cfa1d8caf..f1ba63207 100644 --- a/json/src/spec/engine.rs +++ b/json/src/spec/engine.rs @@ -16,36 +16,36 @@ //! Engine deserialization. -use super::{Ethash, BasicAuthority, AuthorityRound, NullEngine, InstantSeal, Clique}; +use super::{AuthorityRound, BasicAuthority, Clique, Ethash, InstantSeal, NullEngine}; /// Engine deserialization. #[derive(Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub enum Engine { - /// Null engine. - Null(NullEngine), - /// Instantly sealing engine. - InstantSeal(Option), - /// Ethash engine. - #[serde(rename = "Ethash")] - Ethash(Ethash), - /// BasicAuthority engine. - BasicAuthority(BasicAuthority), - /// AuthorityRound engine. - AuthorityRound(AuthorityRound), - /// Clique engine. - Clique(Clique) + /// Null engine. + Null(NullEngine), + /// Instantly sealing engine. + InstantSeal(Option), + /// Ethash engine. + #[serde(rename = "Ethash")] + Ethash(Ethash), + /// BasicAuthority engine. + BasicAuthority(BasicAuthority), + /// AuthorityRound engine. + AuthorityRound(AuthorityRound), + /// Clique engine. + Clique(Clique), } #[cfg(test)] mod tests { - use serde_json; - use spec::Engine; + use serde_json; + use spec::Engine; - #[test] - fn engine_deserialization() { - let s = r#"{ + #[test] + fn engine_deserialization() { + let s = r#"{ "null": { "params": { "blockReward": "0x0d" @@ -53,33 +53,33 @@ mod tests { } }"#; - let deserialized: Engine = serde_json::from_str(s).unwrap(); - match deserialized { - Engine::Null(_) => {}, // unit test in its own file. - _ => panic!(), - } + let deserialized: Engine = serde_json::from_str(s).unwrap(); + match deserialized { + Engine::Null(_) => {} // unit test in its own file. + _ => panic!(), + } - let s = r#"{ + let s = r#"{ "instantSeal": {"params": {}} }"#; - let deserialized: Engine = serde_json::from_str(s).unwrap(); - match deserialized { - Engine::InstantSeal(_) => {}, // instant seal is unit tested in its own file. - _ => panic!(), - }; + let deserialized: Engine = serde_json::from_str(s).unwrap(); + match deserialized { + Engine::InstantSeal(_) => {} // instant seal is unit tested in its own file. + _ => panic!(), + }; - let s = r#"{ + let s = r#"{ "instantSeal": null }"#; - let deserialized: Engine = serde_json::from_str(s).unwrap(); - match deserialized { - Engine::InstantSeal(_) => {}, // instant seal is unit tested in its own file. - _ => panic!(), - }; + let deserialized: Engine = serde_json::from_str(s).unwrap(); + match deserialized { + Engine::InstantSeal(_) => {} // instant seal is unit tested in its own file. + _ => panic!(), + }; - let s = r#"{ + let s = r#"{ "Ethash": { "params": { "minimumDifficulty": "0x020000", @@ -93,13 +93,13 @@ mod tests { } }"#; - let deserialized: Engine = serde_json::from_str(s).unwrap(); - match deserialized { - Engine::Ethash(_) => {}, // ethash is unit tested in its own file. - _ => panic!(), - }; + let deserialized: Engine = serde_json::from_str(s).unwrap(); + match deserialized { + Engine::Ethash(_) => {} // ethash is unit tested in its own file. + _ => panic!(), + }; - let s = r#"{ + let s = r#"{ "basicAuthority": { "params": { "durationLimit": "0x0d", @@ -109,13 +109,13 @@ mod tests { } } }"#; - let deserialized: Engine = serde_json::from_str(s).unwrap(); - match deserialized { - Engine::BasicAuthority(_) => {}, // basicAuthority is unit tested in its own file. - _ => panic!(), - }; + let deserialized: Engine = serde_json::from_str(s).unwrap(); + match deserialized { + Engine::BasicAuthority(_) => {} // basicAuthority is unit tested in its own file. + _ => panic!(), + }; - let s = r#"{ + let s = r#"{ "authorityRound": { "params": { "stepDuration": "0x02", @@ -127,13 +127,13 @@ mod tests { } } }"#; - let deserialized: Engine = serde_json::from_str(s).unwrap(); - match deserialized { - Engine::AuthorityRound(_) => {}, // AuthorityRound is unit tested in its own file. - _ => panic!(), - }; + let deserialized: Engine = serde_json::from_str(s).unwrap(); + match deserialized { + Engine::AuthorityRound(_) => {} // AuthorityRound is unit tested in its own file. + _ => panic!(), + }; - let s = r#"{ + let s = r#"{ "clique": { "params": { "period": 15, @@ -141,10 +141,10 @@ mod tests { } } }"#; - let deserialized: Engine = serde_json::from_str(s).unwrap(); - match deserialized { - Engine::Clique(_) => {}, // Clique is unit tested in its own file. - _ => panic!(), - }; - } + let deserialized: Engine = serde_json::from_str(s).unwrap(); + match deserialized { + Engine::Clique(_) => {} // Clique is unit tested in its own file. + _ => panic!(), + }; + } } diff --git a/json/src/spec/ethash.rs b/json/src/spec/ethash.rs index 6051ac90d..6fe97f027 100644 --- a/json/src/spec/ethash.rs +++ b/json/src/spec/ethash.rs @@ -16,18 +16,18 @@ //! Ethash params deserialization. -use std::collections::BTreeMap; -use uint::{self, Uint}; use bytes::Bytes; use hash::Address; +use std::collections::BTreeMap; +use uint::{self, Uint}; /// Deserializable doppelganger of block rewards for EthashParams #[derive(Clone, Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] #[serde(untagged)] pub enum BlockReward { - Single(Uint), - Multi(BTreeMap), + Single(Uint), + Multi(BTreeMap), } /// Deserializable doppelganger of EthashParams. @@ -35,90 +35,90 @@ pub enum BlockReward { #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub struct EthashParams { - /// See main EthashParams docs. - #[serde(deserialize_with="uint::validate_non_zero")] - pub minimum_difficulty: Uint, - /// See main EthashParams docs. - #[serde(deserialize_with="uint::validate_non_zero")] - pub difficulty_bound_divisor: Uint, - /// See main EthashParams docs. - #[serde(default, deserialize_with="uint::validate_optional_non_zero")] - pub difficulty_increment_divisor: Option, - /// See main EthashParams docs. - #[serde(default, deserialize_with="uint::validate_optional_non_zero")] - pub metropolis_difficulty_increment_divisor: Option, - /// See main EthashParams docs. - pub duration_limit: Option, + /// See main EthashParams docs. + #[serde(deserialize_with = "uint::validate_non_zero")] + pub minimum_difficulty: Uint, + /// See main EthashParams docs. + #[serde(deserialize_with = "uint::validate_non_zero")] + pub difficulty_bound_divisor: Uint, + /// See main EthashParams docs. + #[serde(default, deserialize_with = "uint::validate_optional_non_zero")] + pub difficulty_increment_divisor: Option, + /// See main EthashParams docs. + #[serde(default, deserialize_with = "uint::validate_optional_non_zero")] + pub metropolis_difficulty_increment_divisor: Option, + /// See main EthashParams docs. + pub duration_limit: Option, - /// See main EthashParams docs. - pub homestead_transition: Option, - /// Reward per block in wei. - pub block_reward: Option, - /// Block at which the block reward contract should start being used. - pub block_reward_contract_transition: Option, - /// Block reward contract address (setting the block reward contract - /// overrides all other block reward parameters). - pub block_reward_contract_address: Option
, - /// Block reward code. This overrides the block reward contract address. - pub block_reward_contract_code: Option, + /// See main EthashParams docs. + pub homestead_transition: Option, + /// Reward per block in wei. + pub block_reward: Option, + /// Block at which the block reward contract should start being used. + pub block_reward_contract_transition: Option, + /// Block reward contract address (setting the block reward contract + /// overrides all other block reward parameters). + pub block_reward_contract_address: Option
, + /// Block reward code. This overrides the block reward contract address. + pub block_reward_contract_code: Option, - /// See main EthashParams docs. - pub dao_hardfork_transition: Option, - /// See main EthashParams docs. - pub dao_hardfork_beneficiary: Option
, - /// See main EthashParams docs. - pub dao_hardfork_accounts: Option>, + /// See main EthashParams docs. + pub dao_hardfork_transition: Option, + /// See main EthashParams docs. + pub dao_hardfork_beneficiary: Option
, + /// See main EthashParams docs. + pub dao_hardfork_accounts: Option>, - /// See main EthashParams docs. - pub difficulty_hardfork_transition: Option, - /// See main EthashParams docs. - #[serde(default, deserialize_with="uint::validate_optional_non_zero")] - pub difficulty_hardfork_bound_divisor: Option, - /// See main EthashParams docs. - pub bomb_defuse_transition: Option, + /// See main EthashParams docs. + pub difficulty_hardfork_transition: Option, + /// See main EthashParams docs. + #[serde(default, deserialize_with = "uint::validate_optional_non_zero")] + pub difficulty_hardfork_bound_divisor: Option, + /// See main EthashParams docs. + pub bomb_defuse_transition: Option, - /// See main EthashParams docs. - pub eip100b_transition: Option, + /// See main EthashParams docs. + pub eip100b_transition: Option, - /// See main EthashParams docs. - pub ecip1010_pause_transition: Option, - /// See main EthashParams docs. - pub ecip1010_continue_transition: Option, + /// See main EthashParams docs. + pub ecip1010_pause_transition: Option, + /// See main EthashParams docs. + pub ecip1010_continue_transition: Option, - /// See main EthashParams docs. - pub ecip1017_era_rounds: Option, + /// See main EthashParams docs. + pub ecip1017_era_rounds: Option, - /// Delays of difficulty bombs. - pub difficulty_bomb_delays: Option>, + /// Delays of difficulty bombs. + pub difficulty_bomb_delays: Option>, - /// EXPIP-2 block height - pub expip2_transition: Option, - /// EXPIP-2 duration limit - pub expip2_duration_limit: Option, - /// Block to transition to progpow - #[serde(rename="progpowTransition")] - pub progpow_transition: Option, + /// EXPIP-2 block height + pub expip2_transition: Option, + /// EXPIP-2 duration limit + pub expip2_duration_limit: Option, + /// Block to transition to progpow + #[serde(rename = "progpowTransition")] + pub progpow_transition: Option, } /// Ethash engine deserialization. #[derive(Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] pub struct Ethash { - /// Ethash params. - pub params: EthashParams, + /// Ethash params. + pub params: EthashParams, } #[cfg(test)] mod tests { - use serde_json; - use uint::Uint; - use ethereum_types::{H160, U256}; - use hash::Address; - use spec::ethash::{Ethash, EthashParams, BlockReward}; + use ethereum_types::{H160, U256}; + use hash::Address; + use serde_json; + use spec::ethash::{BlockReward, Ethash, EthashParams}; + use uint::Uint; - #[test] - fn ethash_deserialization() { - let s = r#"{ + #[test] + fn ethash_deserialization() { + let s = r#"{ "params": { "minimumDifficulty": "0x020000", "difficultyBoundDivisor": "0x0800", @@ -156,109 +156,117 @@ mod tests { } }"#; - let deserialized: Ethash = serde_json::from_str(s).unwrap(); + let deserialized: Ethash = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, Ethash { - params: EthashParams { - minimum_difficulty: Uint(U256::from(0x020000)), - difficulty_bound_divisor: Uint(U256::from(0x0800)), - difficulty_increment_divisor: None, - metropolis_difficulty_increment_divisor: None, - duration_limit: Some(Uint(U256::from(0x0d))), - homestead_transition: Some(Uint(U256::from(0x42))), - block_reward: Some(BlockReward::Single(Uint(U256::from(0x100)))), - block_reward_contract_address: None, - block_reward_contract_code: None, - block_reward_contract_transition: None, - dao_hardfork_transition: Some(Uint(U256::from(0x08))), - dao_hardfork_beneficiary: Some(Address(H160::from("0xabcabcabcabcabcabcabcabcabcabcabcabcabca"))), - dao_hardfork_accounts: Some(vec![ - Address(H160::from("0x304a554a310c7e546dfe434669c62820b7d83490")), - Address(H160::from("0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79")), - Address(H160::from("0xfe24cdd8648121a43a7c86d289be4dd2951ed49f")), - Address(H160::from("0x17802f43a0137c506ba92291391a8a8f207f487d")), - Address(H160::from("0xb136707642a4ea12fb4bae820f03d2562ebff487")), - Address(H160::from("0xdbe9b615a3ae8709af8b93336ce9b477e4ac0940")), - Address(H160::from("0xf14c14075d6c4ed84b86798af0956deef67365b5")), - Address(H160::from("0xca544e5c4687d109611d0f8f928b53a25af72448")), - Address(H160::from("0xaeeb8ff27288bdabc0fa5ebb731b6f409507516c")), - Address(H160::from("0xcbb9d3703e651b0d496cdefb8b92c25aeb2171f7")), - Address(H160::from("0xaccc230e8a6e5be9160b8cdf2864dd2a001c28b6")), - Address(H160::from("0x2b3455ec7fedf16e646268bf88846bd7a2319bb2")), - Address(H160::from("0x4613f3bca5c44ea06337a9e439fbc6d42e501d0a")), - Address(H160::from("0xd343b217de44030afaa275f54d31a9317c7f441e")), - Address(H160::from("0x84ef4b2357079cd7a7c69fd7a37cd0609a679106")), - Address(H160::from("0xda2fef9e4a3230988ff17df2165440f37e8b1708")), - Address(H160::from("0xf4c64518ea10f995918a454158c6b61407ea345c")), - Address(H160::from("0x7602b46df5390e432ef1c307d4f2c9ff6d65cc97")), - Address(H160::from("0xbb9bc244d798123fde783fcc1c72d3bb8c189413")), - Address(H160::from("0x807640a13483f8ac783c557fcdf27be11ea4ac7a")), - ]), - difficulty_hardfork_transition: Some(Uint(U256::from(0x59d9))), - difficulty_hardfork_bound_divisor: Some(Uint(U256::from(0x0200))), - bomb_defuse_transition: Some(Uint(U256::from(0x41))), - eip100b_transition: Some(Uint(U256::from(0x42))), - ecip1010_pause_transition: None, - ecip1010_continue_transition: None, - ecip1017_era_rounds: None, - expip2_transition: None, - expip2_duration_limit: None, - progpow_transition: None, - difficulty_bomb_delays: None, - } - }); - } + assert_eq!( + deserialized, + Ethash { + params: EthashParams { + minimum_difficulty: Uint(U256::from(0x020000)), + difficulty_bound_divisor: Uint(U256::from(0x0800)), + difficulty_increment_divisor: None, + metropolis_difficulty_increment_divisor: None, + duration_limit: Some(Uint(U256::from(0x0d))), + homestead_transition: Some(Uint(U256::from(0x42))), + block_reward: Some(BlockReward::Single(Uint(U256::from(0x100)))), + block_reward_contract_address: None, + block_reward_contract_code: None, + block_reward_contract_transition: None, + dao_hardfork_transition: Some(Uint(U256::from(0x08))), + dao_hardfork_beneficiary: Some(Address(H160::from( + "0xabcabcabcabcabcabcabcabcabcabcabcabcabca" + ))), + dao_hardfork_accounts: Some(vec![ + Address(H160::from("0x304a554a310c7e546dfe434669c62820b7d83490")), + Address(H160::from("0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79")), + Address(H160::from("0xfe24cdd8648121a43a7c86d289be4dd2951ed49f")), + Address(H160::from("0x17802f43a0137c506ba92291391a8a8f207f487d")), + Address(H160::from("0xb136707642a4ea12fb4bae820f03d2562ebff487")), + Address(H160::from("0xdbe9b615a3ae8709af8b93336ce9b477e4ac0940")), + Address(H160::from("0xf14c14075d6c4ed84b86798af0956deef67365b5")), + Address(H160::from("0xca544e5c4687d109611d0f8f928b53a25af72448")), + Address(H160::from("0xaeeb8ff27288bdabc0fa5ebb731b6f409507516c")), + Address(H160::from("0xcbb9d3703e651b0d496cdefb8b92c25aeb2171f7")), + Address(H160::from("0xaccc230e8a6e5be9160b8cdf2864dd2a001c28b6")), + Address(H160::from("0x2b3455ec7fedf16e646268bf88846bd7a2319bb2")), + Address(H160::from("0x4613f3bca5c44ea06337a9e439fbc6d42e501d0a")), + Address(H160::from("0xd343b217de44030afaa275f54d31a9317c7f441e")), + Address(H160::from("0x84ef4b2357079cd7a7c69fd7a37cd0609a679106")), + Address(H160::from("0xda2fef9e4a3230988ff17df2165440f37e8b1708")), + Address(H160::from("0xf4c64518ea10f995918a454158c6b61407ea345c")), + Address(H160::from("0x7602b46df5390e432ef1c307d4f2c9ff6d65cc97")), + Address(H160::from("0xbb9bc244d798123fde783fcc1c72d3bb8c189413")), + Address(H160::from("0x807640a13483f8ac783c557fcdf27be11ea4ac7a")), + ]), + difficulty_hardfork_transition: Some(Uint(U256::from(0x59d9))), + difficulty_hardfork_bound_divisor: Some(Uint(U256::from(0x0200))), + bomb_defuse_transition: Some(Uint(U256::from(0x41))), + eip100b_transition: Some(Uint(U256::from(0x42))), + ecip1010_pause_transition: None, + ecip1010_continue_transition: None, + ecip1017_era_rounds: None, + expip2_transition: None, + expip2_duration_limit: None, + progpow_transition: None, + difficulty_bomb_delays: None, + } + } + ); + } - #[test] - fn ethash_deserialization_missing_optionals() { - let s = r#"{ + #[test] + fn ethash_deserialization_missing_optionals() { + let s = r#"{ "params": { "difficultyBoundDivisor": "0x0800", "minimumDifficulty": "0x020000" } }"#; - let deserialized: Ethash = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, Ethash { - params: EthashParams { - minimum_difficulty: Uint(U256::from(0x020000)), - difficulty_bound_divisor: Uint(U256::from(0x0800)), - difficulty_increment_divisor: None, - metropolis_difficulty_increment_divisor: None, - duration_limit: None, - homestead_transition: None, - block_reward: None, - block_reward_contract_address: None, - block_reward_contract_code: None, - block_reward_contract_transition: None, - dao_hardfork_transition: None, - dao_hardfork_beneficiary: None, - dao_hardfork_accounts: None, - difficulty_hardfork_transition: None, - difficulty_hardfork_bound_divisor: None, - bomb_defuse_transition: None, - eip100b_transition: None, - ecip1010_pause_transition: None, - ecip1010_continue_transition: None, - ecip1017_era_rounds: None, - expip2_transition: None, - expip2_duration_limit: None, - progpow_transition: None, - difficulty_bomb_delays: None, - } - }); - } + let deserialized: Ethash = serde_json::from_str(s).unwrap(); + assert_eq!( + deserialized, + Ethash { + params: EthashParams { + minimum_difficulty: Uint(U256::from(0x020000)), + difficulty_bound_divisor: Uint(U256::from(0x0800)), + difficulty_increment_divisor: None, + metropolis_difficulty_increment_divisor: None, + duration_limit: None, + homestead_transition: None, + block_reward: None, + block_reward_contract_address: None, + block_reward_contract_code: None, + block_reward_contract_transition: None, + dao_hardfork_transition: None, + dao_hardfork_beneficiary: None, + dao_hardfork_accounts: None, + difficulty_hardfork_transition: None, + difficulty_hardfork_bound_divisor: None, + bomb_defuse_transition: None, + eip100b_transition: None, + ecip1010_pause_transition: None, + ecip1010_continue_transition: None, + ecip1017_era_rounds: None, + expip2_transition: None, + expip2_duration_limit: None, + progpow_transition: None, + difficulty_bomb_delays: None, + } + } + ); + } - #[test] - #[should_panic(expected = "a non-zero value")] - fn test_zero_value_divisor() { - let s = r#"{ + #[test] + #[should_panic(expected = "a non-zero value")] + fn test_zero_value_divisor() { + let s = r#"{ "params": { "difficultyBoundDivisor": "0x0", "minimumDifficulty": "0x020000" } }"#; - let _deserialized: Ethash = serde_json::from_str(s).unwrap(); - } + let _deserialized: Ethash = serde_json::from_str(s).unwrap(); + } } diff --git a/json/src/spec/genesis.rs b/json/src/spec/genesis.rs index 1452bea0c..5c768cc75 100644 --- a/json/src/spec/genesis.rs +++ b/json/src/spec/genesis.rs @@ -16,55 +16,54 @@ //! Spec genesis deserialization. -use uint::{Uint, self}; -use hash::{Address, H256}; use bytes::Bytes; +use hash::{Address, H256}; use spec::Seal; +use uint::{self, Uint}; /// Spec genesis. #[derive(Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub struct Genesis { - /// Seal. - pub seal: Seal, - /// Difficulty. - pub difficulty: Uint, - /// Block author, defaults to 0. - pub author: Option
, - /// Block timestamp, defaults to 0. - pub timestamp: Option, - /// Parent hash, defaults to 0. - pub parent_hash: Option, - /// Gas limit. - #[serde(deserialize_with="uint::validate_non_zero")] - pub gas_limit: Uint, - /// Transactions root. - pub transactions_root: Option, - /// Receipts root. - pub receipts_root: Option, - /// State root. - pub state_root: Option, - /// Gas used. - pub gas_used: Option, - /// Extra data. - pub extra_data: Option, + /// Seal. + pub seal: Seal, + /// Difficulty. + pub difficulty: Uint, + /// Block author, defaults to 0. + pub author: Option
, + /// Block timestamp, defaults to 0. + pub timestamp: Option, + /// Parent hash, defaults to 0. + pub parent_hash: Option, + /// Gas limit. + #[serde(deserialize_with = "uint::validate_non_zero")] + pub gas_limit: Uint, + /// Transactions root. + pub transactions_root: Option, + /// Receipts root. + pub receipts_root: Option, + /// State root. + pub state_root: Option, + /// Gas used. + pub gas_used: Option, + /// Extra data. + pub extra_data: Option, } #[cfg(test)] mod tests { - use serde_json; - use bytes::Bytes; - use uint::Uint; - use ethereum_types::{U256, H160, H64 as Eth64, H256 as Eth256}; - use hash::{H64, H256, Address}; - use spec::genesis::Genesis; - use spec::{Ethereum, Seal}; - use std::str::FromStr; + use bytes::Bytes; + use ethereum_types::{H160, H256 as Eth256, H64 as Eth64, U256}; + use hash::{Address, H256, H64}; + use serde_json; + use spec::{genesis::Genesis, Ethereum, Seal}; + use std::str::FromStr; + use uint::Uint; - #[test] - fn genesis_deserialization() { - let s = r#"{ + #[test] + fn genesis_deserialization() { + let s = r#"{ "difficulty": "0x400000000", "seal": { "ethereum": { @@ -79,22 +78,38 @@ mod tests { "gasLimit": "0x1388", "stateRoot": "0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544" }"#; - let deserialized: Genesis = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, Genesis { - seal: Seal::Ethereum(Ethereum { - nonce: H64(Eth64::from("0x00006d6f7264656e")), - mix_hash: H256(Eth256::from("0x0000000000000000000000000000000000000000000000000000000000000000")) - }), - difficulty: Uint(U256::from(0x400000000u64)), - author: Some(Address(H160::from("0x1000000000000000000000000000000000000001"))), - timestamp: Some(Uint(U256::from(0x07))), - parent_hash: Some(H256(Eth256::from("0x9000000000000000000000000000000000000000000000000000000000000000"))), - gas_limit: Uint(U256::from(0x1388)), - transactions_root: None, - receipts_root: None, - state_root: Some(H256(Eth256::from("0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544"))), - gas_used: None, - extra_data: Some(Bytes::from_str("0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa").unwrap()), - }); - } + let deserialized: Genesis = serde_json::from_str(s).unwrap(); + assert_eq!( + deserialized, + Genesis { + seal: Seal::Ethereum(Ethereum { + nonce: H64(Eth64::from("0x00006d6f7264656e")), + mix_hash: H256(Eth256::from( + "0x0000000000000000000000000000000000000000000000000000000000000000" + )) + }), + difficulty: Uint(U256::from(0x400000000u64)), + author: Some(Address(H160::from( + "0x1000000000000000000000000000000000000001" + ))), + timestamp: Some(Uint(U256::from(0x07))), + parent_hash: Some(H256(Eth256::from( + "0x9000000000000000000000000000000000000000000000000000000000000000" + ))), + gas_limit: Uint(U256::from(0x1388)), + transactions_root: None, + receipts_root: None, + state_root: Some(H256(Eth256::from( + "0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544" + ))), + gas_used: None, + extra_data: Some( + Bytes::from_str( + "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa" + ) + .unwrap() + ), + } + ); + } } diff --git a/json/src/spec/hardcoded_sync.rs b/json/src/spec/hardcoded_sync.rs index 381cd1f1a..d7b88c680 100644 --- a/json/src/spec/hardcoded_sync.rs +++ b/json/src/spec/hardcoded_sync.rs @@ -24,26 +24,26 @@ use uint::Uint; #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub struct HardcodedSync { - /// Hexadecimal of the RLP encoding of the header of the block to start synchronization from. - pub header: String, - /// Total difficulty including the block of `header`. - pub total_difficulty: Uint, - /// Ordered trie roots of blocks before and including `header`. - #[serde(rename = "CHTs")] - pub chts: Vec, + /// Hexadecimal of the RLP encoding of the header of the block to start synchronization from. + pub header: String, + /// Total difficulty including the block of `header`. + pub total_difficulty: Uint, + /// Ordered trie roots of blocks before and including `header`. + #[serde(rename = "CHTs")] + pub chts: Vec, } #[cfg(test)] mod tests { - use serde_json; - use uint::Uint; - use ethereum_types::{U256, H256 as Eth256}; - use hash::H256; - use spec::hardcoded_sync::HardcodedSync; + use ethereum_types::{H256 as Eth256, U256}; + use hash::H256; + use serde_json; + use spec::hardcoded_sync::HardcodedSync; + use uint::Uint; - #[test] - fn hardcoded_sync_deserialization() { - let s = r#"{ + #[test] + fn hardcoded_sync_deserialization() { + let s = r#"{ "header": "f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23", "totalDifficulty": "0x400000000", "CHTs": [ @@ -51,8 +51,8 @@ mod tests { "0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544" ] }"#; - let deserialized: HardcodedSync = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, HardcodedSync { + let deserialized: HardcodedSync = serde_json::from_str(s).unwrap(); + assert_eq!(deserialized, HardcodedSync { header: String::from("f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23"), total_difficulty: Uint(U256::from(0x400000000u64)), chts: vec![ @@ -60,5 +60,5 @@ mod tests { H256(Eth256::from("0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544")), ] }); - } + } } diff --git a/json/src/spec/instant_seal.rs b/json/src/spec/instant_seal.rs index 2f1ad33e9..7642a2454 100644 --- a/json/src/spec/instant_seal.rs +++ b/json/src/spec/instant_seal.rs @@ -21,15 +21,15 @@ #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub struct InstantSealParams { - /// Whether to enable millisecond timestamp. - #[serde(default)] - pub millisecond_timestamp: bool, + /// Whether to enable millisecond timestamp. + #[serde(default)] + pub millisecond_timestamp: bool, } /// Instant seal engine descriptor. #[derive(Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] pub struct InstantSeal { - /// Instant seal parameters. - pub params: InstantSealParams, + /// Instant seal parameters. + pub params: InstantSealParams, } diff --git a/json/src/spec/mod.rs b/json/src/spec/mod.rs index f1145be2e..329573cf7 100644 --- a/json/src/spec/mod.rs +++ b/json/src/spec/mod.rs @@ -17,35 +17,37 @@ //! Spec deserialization. pub mod account; -pub mod builtin; -pub mod genesis; -pub mod params; -pub mod spec; -pub mod seal; -pub mod engine; -pub mod state; -pub mod ethash; -pub mod validator_set; -pub mod basic_authority; pub mod authority_round; -pub mod null_engine; -pub mod instant_seal; -pub mod hardcoded_sync; +pub mod basic_authority; +pub mod builtin; pub mod clique; +pub mod engine; +pub mod ethash; +pub mod genesis; +pub mod hardcoded_sync; +pub mod instant_seal; +pub mod null_engine; +pub mod params; +pub mod seal; +pub mod spec; +pub mod state; +pub mod validator_set; -pub use self::account::Account; -pub use self::builtin::{Builtin, Pricing, Linear}; -pub use self::genesis::Genesis; -pub use self::params::Params; -pub use self::spec::{Spec, ForkSpec}; -pub use self::seal::{Seal, Ethereum, AuthorityRoundSeal, TendermintSeal}; -pub use self::engine::Engine; -pub use self::state::State; -pub use self::ethash::{Ethash, EthashParams, BlockReward}; -pub use self::validator_set::ValidatorSet; -pub use self::basic_authority::{BasicAuthority, BasicAuthorityParams}; -pub use self::authority_round::{AuthorityRound, AuthorityRoundParams}; -pub use self::clique::{Clique, CliqueParams}; -pub use self::null_engine::{NullEngine, NullEngineParams}; -pub use self::instant_seal::{InstantSeal, InstantSealParams}; -pub use self::hardcoded_sync::HardcodedSync; +pub use self::{ + account::Account, + authority_round::{AuthorityRound, AuthorityRoundParams}, + basic_authority::{BasicAuthority, BasicAuthorityParams}, + builtin::{Builtin, Linear, Pricing}, + clique::{Clique, CliqueParams}, + engine::Engine, + ethash::{BlockReward, Ethash, EthashParams}, + genesis::Genesis, + hardcoded_sync::HardcodedSync, + instant_seal::{InstantSeal, InstantSealParams}, + null_engine::{NullEngine, NullEngineParams}, + params::Params, + seal::{AuthorityRoundSeal, Ethereum, Seal, TendermintSeal}, + spec::{ForkSpec, Spec}, + state::State, + validator_set::ValidatorSet, +}; diff --git a/json/src/spec/null_engine.rs b/json/src/spec/null_engine.rs index 37ade4783..515a8cf65 100644 --- a/json/src/spec/null_engine.rs +++ b/json/src/spec/null_engine.rs @@ -23,36 +23,39 @@ use uint::Uint; #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub struct NullEngineParams { - /// Block reward. - pub block_reward: Option, - /// Immediate finalization. - pub immediate_finalization: Option + /// Block reward. + pub block_reward: Option, + /// Immediate finalization. + pub immediate_finalization: Option, } /// Null engine descriptor #[derive(Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] pub struct NullEngine { - /// Ethash params. - pub params: NullEngineParams, + /// Ethash params. + pub params: NullEngineParams, } #[cfg(test)] mod tests { - use serde_json; - use uint::Uint; - use ethereum_types::U256; - use super::*; + use super::*; + use ethereum_types::U256; + use serde_json; + use uint::Uint; - #[test] - fn null_engine_deserialization() { - let s = r#"{ + #[test] + fn null_engine_deserialization() { + let s = r#"{ "params": { "blockReward": "0x0d" } }"#; - let deserialized: NullEngine = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized.params.block_reward, Some(Uint(U256::from(0x0d)))); - } + let deserialized: NullEngine = serde_json::from_str(s).unwrap(); + assert_eq!( + deserialized.params.block_reward, + Some(Uint(U256::from(0x0d))) + ); + } } diff --git a/json/src/spec/params.rs b/json/src/spec/params.rs index e8b3ded8a..c3dfef255 100644 --- a/json/src/spec/params.rs +++ b/json/src/spec/params.rs @@ -16,136 +16,136 @@ //! Spec params deserialization. -use uint::{self, Uint}; -use hash::{H256, Address}; use bytes::Bytes; +use hash::{Address, H256}; +use uint::{self, Uint}; /// Spec params. #[derive(Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub struct Params { - /// Account start nonce, defaults to 0. - pub account_start_nonce: Option, - /// Maximum size of extra data. - pub maximum_extra_data_size: Uint, - /// Minimum gas limit. - pub min_gas_limit: Uint, + /// Account start nonce, defaults to 0. + pub account_start_nonce: Option, + /// Maximum size of extra data. + pub maximum_extra_data_size: Uint, + /// Minimum gas limit. + pub min_gas_limit: Uint, - /// Network id. - #[serde(rename = "networkID")] - pub network_id: Uint, - /// Chain id. - #[serde(rename = "chainID")] - pub chain_id: Option, + /// Network id. + #[serde(rename = "networkID")] + pub network_id: Uint, + /// Chain id. + #[serde(rename = "chainID")] + pub chain_id: Option, - /// Name of the main ("eth") subprotocol. - pub subprotocol_name: Option, + /// Name of the main ("eth") subprotocol. + pub subprotocol_name: Option, - /// Option fork block number to check. - pub fork_block: Option, - /// Expected fork block hash. - #[serde(rename = "forkCanonHash")] - pub fork_hash: Option, + /// Option fork block number to check. + pub fork_block: Option, + /// Expected fork block hash. + #[serde(rename = "forkCanonHash")] + pub fork_hash: Option, - /// See main EthashParams docs. - pub eip150_transition: Option, + /// See main EthashParams docs. + pub eip150_transition: Option, - /// See main EthashParams docs. - pub eip160_transition: Option, + /// See main EthashParams docs. + pub eip160_transition: Option, - /// See main EthashParams docs. - pub eip161abc_transition: Option, - /// See main EthashParams docs. - pub eip161d_transition: Option, + /// See main EthashParams docs. + pub eip161abc_transition: Option, + /// See main EthashParams docs. + pub eip161d_transition: Option, - /// See `CommonParams` docs. - pub eip98_transition: Option, - /// See `CommonParams` docs. - pub eip155_transition: Option, - /// See `CommonParams` docs. - pub validate_chain_id_transition: Option, - /// See `CommonParams` docs. - pub validate_receipts_transition: Option, - /// See `CommonParams` docs. - pub eip140_transition: Option, - /// See `CommonParams` docs. - pub eip210_transition: Option, - /// See `CommonParams` docs. - pub eip210_contract_address: Option
, - /// See `CommonParams` docs. - pub eip210_contract_code: Option, - /// See `CommonParams` docs. - pub eip210_contract_gas: Option, - /// See `CommonParams` docs. - pub eip211_transition: Option, - /// See `CommonParams` docs. - pub eip145_transition: Option, - /// See `CommonParams` docs. - pub eip214_transition: Option, - /// See `CommonParams` docs. - pub eip658_transition: Option, - /// See `CommonParams` docs. - pub eip1052_transition: Option, - /// See `CommonParams` docs. - pub eip1283_transition: Option, - /// See `CommonParams` docs. - pub eip1283_disable_transition: Option, - /// See `CommonParams` docs. - pub eip1283_reenable_transition: Option, - /// See `CommonParams` docs. - pub eip1014_transition: Option, - /// See `CommonParams` docs. - pub eip1706_transition: Option, - /// See `CommonParams` docs. - pub eip1344_transition: Option, - /// See `CommonParams` docs. - pub eip1884_transition: Option, - /// See `CommonParams` docs. - pub eip2028_transition: Option, - /// See `CommonParams` docs. - pub dust_protection_transition: Option, - /// See `CommonParams` docs. - pub nonce_cap_increment: Option, - /// See `CommonParams` docs. - pub remove_dust_contracts : Option, - /// See `CommonParams` docs. - #[serde(deserialize_with="uint::validate_non_zero")] - pub gas_limit_bound_divisor: Uint, - /// See `CommonParams` docs. - pub registrar: Option
, - /// Apply reward flag - pub apply_reward: Option, - /// Node permission contract address. - pub node_permission_contract: Option
, - /// See main EthashParams docs. - pub max_code_size: Option, - /// Maximum size of transaction RLP payload. - pub max_transaction_size: Option, - /// See main EthashParams docs. - pub max_code_size_transition: Option, - /// Transaction permission contract address. - pub transaction_permission_contract: Option
, - /// Block at which the transaction permission contract should start being used. - pub transaction_permission_contract_transition: Option, - /// Wasm activation block height, if not activated from start - pub wasm_activation_transition: Option, - /// KIP4 activiation block height. - pub kip4_transition: Option, - /// KIP6 activiation block height. - pub kip6_transition: Option, + /// See `CommonParams` docs. + pub eip98_transition: Option, + /// See `CommonParams` docs. + pub eip155_transition: Option, + /// See `CommonParams` docs. + pub validate_chain_id_transition: Option, + /// See `CommonParams` docs. + pub validate_receipts_transition: Option, + /// See `CommonParams` docs. + pub eip140_transition: Option, + /// See `CommonParams` docs. + pub eip210_transition: Option, + /// See `CommonParams` docs. + pub eip210_contract_address: Option
, + /// See `CommonParams` docs. + pub eip210_contract_code: Option, + /// See `CommonParams` docs. + pub eip210_contract_gas: Option, + /// See `CommonParams` docs. + pub eip211_transition: Option, + /// See `CommonParams` docs. + pub eip145_transition: Option, + /// See `CommonParams` docs. + pub eip214_transition: Option, + /// See `CommonParams` docs. + pub eip658_transition: Option, + /// See `CommonParams` docs. + pub eip1052_transition: Option, + /// See `CommonParams` docs. + pub eip1283_transition: Option, + /// See `CommonParams` docs. + pub eip1283_disable_transition: Option, + /// See `CommonParams` docs. + pub eip1283_reenable_transition: Option, + /// See `CommonParams` docs. + pub eip1014_transition: Option, + /// See `CommonParams` docs. + pub eip1706_transition: Option, + /// See `CommonParams` docs. + pub eip1344_transition: Option, + /// See `CommonParams` docs. + pub eip1884_transition: Option, + /// See `CommonParams` docs. + pub eip2028_transition: Option, + /// See `CommonParams` docs. + pub dust_protection_transition: Option, + /// See `CommonParams` docs. + pub nonce_cap_increment: Option, + /// See `CommonParams` docs. + pub remove_dust_contracts: Option, + /// See `CommonParams` docs. + #[serde(deserialize_with = "uint::validate_non_zero")] + pub gas_limit_bound_divisor: Uint, + /// See `CommonParams` docs. + pub registrar: Option
, + /// Apply reward flag + pub apply_reward: Option, + /// Node permission contract address. + pub node_permission_contract: Option
, + /// See main EthashParams docs. + pub max_code_size: Option, + /// Maximum size of transaction RLP payload. + pub max_transaction_size: Option, + /// See main EthashParams docs. + pub max_code_size_transition: Option, + /// Transaction permission contract address. + pub transaction_permission_contract: Option
, + /// Block at which the transaction permission contract should start being used. + pub transaction_permission_contract_transition: Option, + /// Wasm activation block height, if not activated from start + pub wasm_activation_transition: Option, + /// KIP4 activiation block height. + pub kip4_transition: Option, + /// KIP6 activiation block height. + pub kip6_transition: Option, } #[cfg(test)] mod tests { - use serde_json; - use uint::Uint; - use ethereum_types::U256; - use spec::params::Params; + use ethereum_types::U256; + use serde_json; + use spec::params::Params; + use uint::Uint; - #[test] - fn params_deserialization() { - let s = r#"{ + #[test] + fn params_deserialization() { + let s = r#"{ "maximumExtraDataSize": "0x20", "networkID" : "0x1", "chainID" : "0x15", @@ -157,22 +157,28 @@ mod tests { "wasmActivationTransition": "0x1010" }"#; - let deserialized: Params = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized.maximum_extra_data_size, Uint(U256::from(0x20))); - assert_eq!(deserialized.network_id, Uint(U256::from(0x1))); - assert_eq!(deserialized.chain_id, Some(Uint(U256::from(0x15)))); - assert_eq!(deserialized.subprotocol_name, Some("exp".to_owned())); - assert_eq!(deserialized.min_gas_limit, Uint(U256::from(0x1388))); - assert_eq!(deserialized.account_start_nonce, Some(Uint(U256::from(0x01)))); - assert_eq!(deserialized.gas_limit_bound_divisor, Uint(U256::from(0x20))); - assert_eq!(deserialized.max_code_size, Some(Uint(U256::from(0x1000)))); - assert_eq!(deserialized.wasm_activation_transition, Some(Uint(U256::from(0x1010)))); - } + let deserialized: Params = serde_json::from_str(s).unwrap(); + assert_eq!(deserialized.maximum_extra_data_size, Uint(U256::from(0x20))); + assert_eq!(deserialized.network_id, Uint(U256::from(0x1))); + assert_eq!(deserialized.chain_id, Some(Uint(U256::from(0x15)))); + assert_eq!(deserialized.subprotocol_name, Some("exp".to_owned())); + assert_eq!(deserialized.min_gas_limit, Uint(U256::from(0x1388))); + assert_eq!( + deserialized.account_start_nonce, + Some(Uint(U256::from(0x01))) + ); + assert_eq!(deserialized.gas_limit_bound_divisor, Uint(U256::from(0x20))); + assert_eq!(deserialized.max_code_size, Some(Uint(U256::from(0x1000)))); + assert_eq!( + deserialized.wasm_activation_transition, + Some(Uint(U256::from(0x1010))) + ); + } - #[test] - #[should_panic(expected = "a non-zero value")] - fn test_zero_value_divisor() { - let s = r#"{ + #[test] + #[should_panic(expected = "a non-zero value")] + fn test_zero_value_divisor() { + let s = r#"{ "maximumExtraDataSize": "0x20", "networkID" : "0x1", "chainID" : "0x15", @@ -183,6 +189,6 @@ mod tests { "maxCodeSize": "0x1000" }"#; - let _deserialized: Params = serde_json::from_str(s).unwrap(); - } + let _deserialized: Params = serde_json::from_str(s).unwrap(); + } } diff --git a/json/src/spec/seal.rs b/json/src/spec/seal.rs index e716a05bc..e9b01c3b8 100644 --- a/json/src/spec/seal.rs +++ b/json/src/spec/seal.rs @@ -16,41 +16,41 @@ //! Spec seal deserialization. +use bytes::Bytes; use hash::*; use uint::Uint; -use bytes::Bytes; /// Ethereum seal. #[derive(Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub struct Ethereum { - /// Seal nonce. - pub nonce: H64, - /// Seal mix hash. - pub mix_hash: H256, + /// Seal nonce. + pub nonce: H64, + /// Seal mix hash. + pub mix_hash: H256, } /// AuthorityRound seal. #[derive(Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] pub struct AuthorityRoundSeal { - /// Seal step. - pub step: Uint, - /// Seal signature. - pub signature: H520, + /// Seal step. + pub step: Uint, + /// Seal signature. + pub signature: H520, } /// Tendermint seal. #[derive(Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] pub struct TendermintSeal { - /// Seal round. - pub round: Uint, - /// Proposal seal signature. - pub proposal: H520, - /// Proposal seal signature. - pub precommits: Vec, + /// Seal round. + pub round: Uint, + /// Proposal seal signature. + pub proposal: H520, + /// Proposal seal signature. + pub precommits: Vec, } /// Seal variants. @@ -58,28 +58,28 @@ pub struct TendermintSeal { #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub enum Seal { - /// Ethereum seal. - Ethereum(Ethereum), - /// AuthorityRound seal. - AuthorityRound(AuthorityRoundSeal), - /// Tendermint seal. - Tendermint(TendermintSeal), - /// Generic seal. - Generic(Bytes), + /// Ethereum seal. + Ethereum(Ethereum), + /// AuthorityRound seal. + AuthorityRound(AuthorityRoundSeal), + /// Tendermint seal. + Tendermint(TendermintSeal), + /// Generic seal. + Generic(Bytes), } #[cfg(test)] mod tests { - use serde_json; - use hash::*; - use bytes::Bytes; - use uint::Uint; - use ethereum_types::{U256, H64 as Eth64, H256 as Eth256, H520 as Eth520}; - use spec::{Ethereum, AuthorityRoundSeal, TendermintSeal, Seal}; + use bytes::Bytes; + use ethereum_types::{H256 as Eth256, H520 as Eth520, H64 as Eth64, U256}; + use hash::*; + use serde_json; + use spec::{AuthorityRoundSeal, Ethereum, Seal, TendermintSeal}; + use uint::Uint; - #[test] - fn seal_deserialization() { - let s = r#"[{ + #[test] + fn seal_deserialization() { + let s = r#"[{ "ethereum": { "nonce": "0x0000000000000042", "mixHash": "0x1000000000000000000000000000000000000000000000000000000000000001" @@ -101,31 +101,41 @@ mod tests { } }]"#; - let deserialized: Vec = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized.len(), 4); + let deserialized: Vec = serde_json::from_str(s).unwrap(); + assert_eq!(deserialized.len(), 4); - // [0] - assert_eq!(deserialized[0], Seal::Ethereum(Ethereum { - nonce: H64(Eth64::from("0x0000000000000042")), - mix_hash: H256(Eth256::from("0x1000000000000000000000000000000000000000000000000000000000000001")) - })); + // [0] + assert_eq!( + deserialized[0], + Seal::Ethereum(Ethereum { + nonce: H64(Eth64::from("0x0000000000000042")), + mix_hash: H256(Eth256::from( + "0x1000000000000000000000000000000000000000000000000000000000000001" + )) + }) + ); - // [1] - assert_eq!(deserialized[1], Seal::Generic(Bytes::new(vec![ - 0xe0, 0x11, 0xbb, 0xe8, 0xdb, 0x4e, 0x34, 0x7b, 0x4e, 0x8c, 0x93, 0x7c, 0x1c, 0x83, 0x70, 0xe4, - 0xb5, 0xed, 0x33, 0xad, 0xb3, 0xdb, 0x69, 0xcb, 0xdb, 0x7a, 0x38, 0xe1, 0xe5, 0x0b, 0x1b, 0x82, 0xfa]))); + // [1] + assert_eq!( + deserialized[1], + Seal::Generic(Bytes::new(vec![ + 0xe0, 0x11, 0xbb, 0xe8, 0xdb, 0x4e, 0x34, 0x7b, 0x4e, 0x8c, 0x93, 0x7c, 0x1c, 0x83, + 0x70, 0xe4, 0xb5, 0xed, 0x33, 0xad, 0xb3, 0xdb, 0x69, 0xcb, 0xdb, 0x7a, 0x38, 0xe1, + 0xe5, 0x0b, 0x1b, 0x82, 0xfa + ])) + ); - // [2] - assert_eq!(deserialized[2], Seal::AuthorityRound(AuthorityRoundSeal { + // [2] + assert_eq!(deserialized[2], Seal::AuthorityRound(AuthorityRoundSeal { step: Uint(U256::from(0x0)), signature: H520(Eth520::from("0x2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002")) })); - // [3] - assert_eq!(deserialized[3], Seal::Tendermint(TendermintSeal { + // [3] + assert_eq!(deserialized[3], Seal::Tendermint(TendermintSeal { round: Uint(U256::from(0x3)), proposal: H520(Eth520::from("0x3000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003")), precommits: vec![H520(Eth520::from("0x4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004"))] })); - } + } } diff --git a/json/src/spec/spec.rs b/json/src/spec/spec.rs index bdd8bdb30..c71574171 100644 --- a/json/src/spec/spec.rs +++ b/json/src/spec/spec.rs @@ -16,26 +16,25 @@ //! Spec deserialization. +use serde_json::{self, Error}; +use spec::{Engine, Genesis, HardcodedSync, Params, State}; use std::io::Read; -use serde_json; -use serde_json::Error; -use spec::{Params, Genesis, Engine, State, HardcodedSync}; /// Fork spec definition #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Deserialize)] pub enum ForkSpec { - EIP150, - EIP158, - Frontier, - Homestead, - Byzantium, - Constantinople, - ConstantinopleFix, - Istanbul, - EIP158ToByzantiumAt5, - FrontierToHomesteadAt5, - HomesteadToDaoAt5, - HomesteadToEIP150At5, + EIP150, + EIP158, + Frontier, + Homestead, + Byzantium, + Constantinople, + ConstantinopleFix, + Istanbul, + EIP158ToByzantiumAt5, + FrontierToHomesteadAt5, + HomesteadToDaoAt5, + HomesteadToEIP150At5, } /// Spec deserialization. @@ -43,39 +42,42 @@ pub enum ForkSpec { #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub struct Spec { - /// Spec name. - pub name: String, - /// Special fork name. - pub data_dir: Option, - /// Engine. - pub engine: Engine, - /// Spec params. - pub params: Params, - /// Genesis header. - pub genesis: Genesis, - /// Genesis state. - pub accounts: State, - /// Boot nodes. - pub nodes: Option>, - /// Hardcoded synchronization for the light client. - pub hardcoded_sync: Option, + /// Spec name. + pub name: String, + /// Special fork name. + pub data_dir: Option, + /// Engine. + pub engine: Engine, + /// Spec params. + pub params: Params, + /// Genesis header. + pub genesis: Genesis, + /// Genesis state. + pub accounts: State, + /// Boot nodes. + pub nodes: Option>, + /// Hardcoded synchronization for the light client. + pub hardcoded_sync: Option, } impl Spec { - /// Loads test from json. - pub fn load(reader: R) -> Result where R: Read { - serde_json::from_reader(reader) - } + /// Loads test from json. + pub fn load(reader: R) -> Result + where + R: Read, + { + serde_json::from_reader(reader) + } } #[cfg(test)] mod tests { - use serde_json; - use spec::spec::Spec; + use serde_json; + use spec::spec::Spec; - #[test] - fn should_error_on_unknown_fields() { - let s = r#"{ + #[test] + fn should_error_on_unknown_fields() { + let s = r#"{ "name": "Morden", "dataDir": "morden", "engine": { @@ -134,13 +136,13 @@ mod tests { ] } }"#; - let result: Result = serde_json::from_str(s); - assert!(result.is_err()); - } + let result: Result = serde_json::from_str(s); + assert!(result.is_err()); + } - #[test] - fn spec_deserialization() { - let s = r#"{ + #[test] + fn spec_deserialization() { + let s = r#"{ "name": "Morden", "dataDir": "morden", "engine": { @@ -246,7 +248,7 @@ mod tests { ] } }"#; - let _deserialized: Spec = serde_json::from_str(s).unwrap(); - // TODO: validate all fields - } + let _deserialized: Spec = serde_json::from_str(s).unwrap(); + // TODO: validate all fields + } } diff --git a/json/src/spec/state.rs b/json/src/spec/state.rs index 95b13fea4..19e3b0be5 100644 --- a/json/src/spec/state.rs +++ b/json/src/spec/state.rs @@ -16,10 +16,10 @@ //! Blockchain test state deserializer. -use std::collections::BTreeMap; -use hash::Address; use bytes::Bytes; +use hash::Address; use spec::{Account, Builtin}; +use std::collections::BTreeMap; /// Blockchain test state deserializer. #[derive(Debug, PartialEq, Deserialize)] @@ -27,28 +27,28 @@ use spec::{Account, Builtin}; pub struct State(BTreeMap); impl State { - /// Returns all builtins. - pub fn builtins(&self) -> BTreeMap { - self.0 - .iter() - .filter_map(|(add, ref acc)| acc.builtin.clone().map(|b| (add.clone(), b.into()))) - .collect() - } + /// Returns all builtins. + pub fn builtins(&self) -> BTreeMap { + self.0 + .iter() + .filter_map(|(add, ref acc)| acc.builtin.clone().map(|b| (add.clone(), b.into()))) + .collect() + } - /// Returns all constructors. - pub fn constructors(&self) -> BTreeMap { - self.0 - .iter() - .filter_map(|(add, ref acc)| acc.constructor.clone().map(|b| (add.clone(), b))) - .collect() - } + /// Returns all constructors. + pub fn constructors(&self) -> BTreeMap { + self.0 + .iter() + .filter_map(|(add, ref acc)| acc.constructor.clone().map(|b| (add.clone(), b))) + .collect() + } } impl IntoIterator for State { - type Item = as IntoIterator>::Item; - type IntoIter = as IntoIterator>::IntoIter; + type Item = as IntoIterator>::Item; + type IntoIter = as IntoIterator>::IntoIter; - fn into_iter(self) -> Self::IntoIter { - self.0.into_iter() - } + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } } diff --git a/json/src/spec/validator_set.rs b/json/src/spec/validator_set.rs index e7e82282c..908719aba 100644 --- a/json/src/spec/validator_set.rs +++ b/json/src/spec/validator_set.rs @@ -16,36 +16,36 @@ //! Validator set deserialization. +use hash::Address; use std::collections::BTreeMap; use uint::Uint; -use hash::Address; /// Different ways of specifying validators. #[derive(Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub enum ValidatorSet { - /// A simple list of authorities. - List(Vec
), - /// Address of a contract that indicates the list of authorities. - SafeContract(Address), - /// Address of a contract that indicates the list of authorities and enables reporting of theor misbehaviour using transactions. - Contract(Address), - /// A map of starting blocks for each validator set. - Multi(BTreeMap), + /// A simple list of authorities. + List(Vec
), + /// Address of a contract that indicates the list of authorities. + SafeContract(Address), + /// Address of a contract that indicates the list of authorities and enables reporting of theor misbehaviour using transactions. + Contract(Address), + /// A map of starting blocks for each validator set. + Multi(BTreeMap), } #[cfg(test)] mod tests { - use serde_json; - use uint::Uint; - use ethereum_types::{H160, U256}; - use hash::Address; - use spec::validator_set::ValidatorSet; + use ethereum_types::{H160, U256}; + use hash::Address; + use serde_json; + use spec::validator_set::ValidatorSet; + use uint::Uint; - #[test] - fn validator_set_deserialization() { - let s = r#"[{ + #[test] + fn validator_set_deserialization() { + let s = r#"[{ "list": ["0xc6d9d2cd449a754c494264e1809c50e34d64562b"] }, { "safeContract": "0xc6d9d2cd449a754c494264e1809c50e34d64562b" @@ -59,20 +59,35 @@ mod tests { } }]"#; - let deserialized: Vec = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized.len(), 4); + let deserialized: Vec = serde_json::from_str(s).unwrap(); + assert_eq!(deserialized.len(), 4); - assert_eq!(deserialized[0], ValidatorSet::List(vec![Address(H160::from("0xc6d9d2cd449a754c494264e1809c50e34d64562b"))])); - assert_eq!(deserialized[1], ValidatorSet::SafeContract(Address(H160::from("0xc6d9d2cd449a754c494264e1809c50e34d64562b")))); - assert_eq!(deserialized[2], ValidatorSet::Contract(Address(H160::from("0xc6d9d2cd449a754c494264e1809c50e34d64562b")))); - match deserialized[3] { - ValidatorSet::Multi(ref map) => { - assert_eq!(map.len(), 3); - assert!(map.contains_key(&Uint(U256::from(0)))); - assert!(map.contains_key(&Uint(U256::from(10)))); - assert!(map.contains_key(&Uint(U256::from(20)))); - }, - _ => assert!(false), - } - } + assert_eq!( + deserialized[0], + ValidatorSet::List(vec![Address(H160::from( + "0xc6d9d2cd449a754c494264e1809c50e34d64562b" + ))]) + ); + assert_eq!( + deserialized[1], + ValidatorSet::SafeContract(Address(H160::from( + "0xc6d9d2cd449a754c494264e1809c50e34d64562b" + ))) + ); + assert_eq!( + deserialized[2], + ValidatorSet::Contract(Address(H160::from( + "0xc6d9d2cd449a754c494264e1809c50e34d64562b" + ))) + ); + match deserialized[3] { + ValidatorSet::Multi(ref map) => { + assert_eq!(map.len(), 3); + assert!(map.contains_key(&Uint(U256::from(0)))); + assert!(map.contains_key(&Uint(U256::from(10)))); + assert!(map.contains_key(&Uint(U256::from(20)))); + } + _ => assert!(false), + } + } } diff --git a/json/src/state/log.rs b/json/src/state/log.rs index 1a0dda529..d9006b246 100644 --- a/json/src/state/log.rs +++ b/json/src/state/log.rs @@ -15,30 +15,30 @@ // along with Parity Ethereum. If not, see . //! State test log deserialization. -use hash::{Address, H256, Bloom}; use bytes::Bytes; +use hash::{Address, Bloom, H256}; /// State test log deserialization. #[derive(Debug, PartialEq, Deserialize)] pub struct Log { - /// Address. - pub address: Address, - /// Topics. - pub topics: Vec, - /// Data. - pub data: Bytes, - /// Bloom. - pub bloom: Bloom, + /// Address. + pub address: Address, + /// Topics. + pub topics: Vec, + /// Data. + pub data: Bytes, + /// Bloom. + pub bloom: Bloom, } #[cfg(test)] mod tests { - use serde_json; - use state::Log; + use serde_json; + use state::Log; - #[test] - fn log_deserialization() { - let s = r#"{ + #[test] + fn log_deserialization() { + let s = r#"{ "address" : "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6", "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008800000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000", "data" : "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", @@ -46,7 +46,7 @@ mod tests { "0000000000000000000000000000000000000000000000000000000000000000" ] }"#; - let _deserialized: Log = serde_json::from_str(s).unwrap(); - // TODO: validate all fields - } + let _deserialized: Log = serde_json::from_str(s).unwrap(); + // TODO: validate all fields + } } diff --git a/json/src/state/mod.rs b/json/src/state/mod.rs index 8c2ac2875..f746df073 100644 --- a/json/src/state/mod.rs +++ b/json/src/state/mod.rs @@ -16,14 +16,11 @@ //! State test deserialization. -pub mod state; -pub mod transaction; -pub mod test; pub mod log; +pub mod state; +pub mod test; +pub mod transaction; -pub use self::state::State; -pub use self::transaction::Transaction; -pub use self::test::Test; -pub use self::log::Log; -pub use vm::Env as Env; +pub use self::{log::Log, state::State, test::Test, transaction::Transaction}; pub use blockchain::State as AccountState; +pub use vm::Env; diff --git a/json/src/state/state.rs b/json/src/state/state.rs index f25dabec7..9a160349a 100644 --- a/json/src/state/state.rs +++ b/json/src/state/state.rs @@ -18,39 +18,39 @@ use bytes::Bytes; use hash::H256; -use state::{Env, AccountState, Transaction, Log}; +use state::{AccountState, Env, Log, Transaction}; /// State test deserialization. #[derive(Debug, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] pub struct State { - /// Environment. - pub env: Env, - /// Output. - #[serde(rename = "out")] - pub output: Bytes, - /// Pre state. - #[serde(rename = "pre")] - pub pre_state: AccountState, - /// Post state. - #[serde(rename = "post")] - pub post_state: AccountState, - /// Post state root. - pub post_state_root: H256, - /// Transaction. - pub transaction: Transaction, - /// Logs. - pub logs: Vec + /// Environment. + pub env: Env, + /// Output. + #[serde(rename = "out")] + pub output: Bytes, + /// Pre state. + #[serde(rename = "pre")] + pub pre_state: AccountState, + /// Post state. + #[serde(rename = "post")] + pub post_state: AccountState, + /// Post state root. + pub post_state_root: H256, + /// Transaction. + pub transaction: Transaction, + /// Logs. + pub logs: Vec, } #[cfg(test)] mod tests { - use serde_json; - use state::State; + use serde_json; + use state::State; - #[test] - fn state_deserialization() { - let s = r#"{ + #[test] + fn state_deserialization() { + let s = r#"{ "env" : { "currentCoinbase" : "2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", "currentDifficulty" : "0x0100", @@ -150,7 +150,7 @@ mod tests { "value" : "0x00" } }"#; - let _deserialized: State = serde_json::from_str(s).unwrap(); - // TODO: validate all fields - } + let _deserialized: State = serde_json::from_str(s).unwrap(); + // TODO: validate all fields + } } diff --git a/json/src/state/test.rs b/json/src/state/test.rs index 3521c7977..4cbc35e5d 100644 --- a/json/src/state/test.rs +++ b/json/src/state/test.rs @@ -16,115 +16,117 @@ //! General test deserialization. -use std::io::Read; -use std::collections::BTreeMap; -use uint::Uint; use bytes::Bytes; use hash::{Address, H256}; -use spec::ForkSpec; -use state::{Env, AccountState, Transaction}; use maybe::MaybeEmpty; use serde_json::{self, Error}; +use spec::ForkSpec; +use state::{AccountState, Env, Transaction}; +use std::{collections::BTreeMap, io::Read}; +use uint::Uint; /// State test deserializer. #[derive(Debug, PartialEq, Deserialize)] pub struct Test(BTreeMap); impl IntoIterator for Test { - type Item = as IntoIterator>::Item; - type IntoIter = as IntoIterator>::IntoIter; + type Item = as IntoIterator>::Item; + type IntoIter = as IntoIterator>::IntoIter; - fn into_iter(self) -> Self::IntoIter { - self.0.into_iter() - } + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } } impl Test { - /// Loads test from json. - pub fn load(reader: R) -> Result where R: Read { - serde_json::from_reader(reader) - } + /// Loads test from json. + pub fn load(reader: R) -> Result + where + R: Read, + { + serde_json::from_reader(reader) + } } /// State test deserialization. #[derive(Debug, PartialEq, Deserialize)] pub struct State { - /// Environment. - pub env: Env, - /// Pre state. - #[serde(rename = "pre")] - pub pre_state: AccountState, - /// Post state. - #[serde(rename = "post")] - pub post_states: BTreeMap>, - /// Transaction. - pub transaction: MultiTransaction, + /// Environment. + pub env: Env, + /// Pre state. + #[serde(rename = "pre")] + pub pre_state: AccountState, + /// Post state. + #[serde(rename = "post")] + pub post_states: BTreeMap>, + /// Transaction. + pub transaction: MultiTransaction, } /// State test transaction deserialization. #[derive(Debug, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] pub struct MultiTransaction { - /// Transaction data set. - pub data: Vec, - /// Gas limit set. - pub gas_limit: Vec, - /// Gas price. - pub gas_price: Uint, - /// Nonce. - pub nonce: Uint, - /// Secret key. - #[serde(rename = "secretKey")] - pub secret: Option, - /// To. - pub to: MaybeEmpty
, - /// Value set. - pub value: Vec, + /// Transaction data set. + pub data: Vec, + /// Gas limit set. + pub gas_limit: Vec, + /// Gas price. + pub gas_price: Uint, + /// Nonce. + pub nonce: Uint, + /// Secret key. + #[serde(rename = "secretKey")] + pub secret: Option, + /// To. + pub to: MaybeEmpty
, + /// Value set. + pub value: Vec, } impl MultiTransaction { - /// Build transaction with given indexes. - pub fn select(&self, indexes: &PostStateIndexes) -> Transaction { - Transaction { - data: self.data[indexes.data as usize].clone(), - gas_limit: self.gas_limit[indexes.gas as usize].clone(), - gas_price: self.gas_price.clone(), - nonce: self.nonce.clone(), - secret: self.secret.clone(), - to: self.to.clone(), - value: self.value[indexes.value as usize].clone(), - } - } + /// Build transaction with given indexes. + pub fn select(&self, indexes: &PostStateIndexes) -> Transaction { + Transaction { + data: self.data[indexes.data as usize].clone(), + gas_limit: self.gas_limit[indexes.gas as usize].clone(), + gas_price: self.gas_price.clone(), + nonce: self.nonce.clone(), + secret: self.secret.clone(), + to: self.to.clone(), + value: self.value[indexes.value as usize].clone(), + } + } } /// State test indexes deserialization. #[derive(Debug, PartialEq, Deserialize)] pub struct PostStateIndexes { - /// Index into transaction data set. - pub data: u64, - /// Index into transaction gas limit set. - pub gas: u64, - /// Index into transaction value set. - pub value: u64, + /// Index into transaction data set. + pub data: u64, + /// Index into transaction gas limit set. + pub gas: u64, + /// Index into transaction value set. + pub value: u64, } /// State test indexed state result deserialization. #[derive(Debug, PartialEq, Deserialize)] pub struct PostStateResult { - /// Post state hash - pub hash: H256, - /// Indexes - pub indexes: PostStateIndexes, + /// Post state hash + pub hash: H256, + /// Indexes + pub indexes: PostStateIndexes, } #[cfg(test)] mod tests { - use serde_json; - use super::{MultiTransaction, State}; + use super::{MultiTransaction, State}; + use serde_json; - #[test] - fn multi_transaction_deserialization() { - let s = r#"{ + #[test] + fn multi_transaction_deserialization() { + let s = r#"{ "data" : [ "" ], "gasLimit" : [ "0x2dc6c0", "0x222222" ], "gasPrice" : "0x01", @@ -133,12 +135,12 @@ mod tests { "to" : "1000000000000000000000000000000000000000", "value" : [ "0x00", "0x01", "0x02" ] }"#; - let _deserialized: MultiTransaction = serde_json::from_str(s).unwrap(); - } + let _deserialized: MultiTransaction = serde_json::from_str(s).unwrap(); + } - #[test] - fn state_deserialization() { - let s = r#"{ + #[test] + fn state_deserialization() { + let s = r#"{ "env" : { "currentCoinbase" : "2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", "currentDifficulty" : "0x0100", @@ -209,7 +211,7 @@ mod tests { "value" : [ "10", "0" ] } }"#; - let _deserialized: State = serde_json::from_str(s).unwrap(); - // TODO: validate all fields - } + let _deserialized: State = serde_json::from_str(s).unwrap(); + // TODO: validate all fields + } } diff --git a/json/src/state/transaction.rs b/json/src/state/transaction.rs index 693b97699..f43059cac 100644 --- a/json/src/state/transaction.rs +++ b/json/src/state/transaction.rs @@ -16,40 +16,40 @@ //! State test transaction deserialization. -use uint::Uint; use bytes::Bytes; use hash::{Address, H256}; use maybe::MaybeEmpty; +use uint::Uint; /// State test transaction deserialization. #[derive(Debug, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Transaction { - /// Transaction data. - pub data: Bytes, - /// Gas limit. - pub gas_limit: Uint, - /// Gas price. - pub gas_price: Uint, - /// Nonce. - pub nonce: Uint, - /// Secret key. - #[serde(rename = "secretKey")] - pub secret: Option, - /// To. - pub to: MaybeEmpty
, - /// Value. - pub value: Uint, + /// Transaction data. + pub data: Bytes, + /// Gas limit. + pub gas_limit: Uint, + /// Gas price. + pub gas_price: Uint, + /// Nonce. + pub nonce: Uint, + /// Secret key. + #[serde(rename = "secretKey")] + pub secret: Option, + /// To. + pub to: MaybeEmpty
, + /// Value. + pub value: Uint, } #[cfg(test)] mod tests { - use serde_json; - use state::Transaction; + use serde_json; + use state::Transaction; - #[test] - fn transaction_deserialization() { - let s = r#"{ + #[test] + fn transaction_deserialization() { + let s = r#"{ "data" : "", "gasLimit" : "0x2dc6c0", "gasPrice" : "0x01", @@ -58,7 +58,7 @@ mod tests { "to" : "1000000000000000000000000000000000000000", "value" : "0x00" }"#; - let _deserialized: Transaction = serde_json::from_str(s).unwrap(); - // TODO: validate all fields - } + let _deserialized: Transaction = serde_json::from_str(s).unwrap(); + // TODO: validate all fields + } } diff --git a/json/src/test/mod.rs b/json/src/test/mod.rs index 355d30d69..78430a0ba 100644 --- a/json/src/test/mod.rs +++ b/json/src/test/mod.rs @@ -16,29 +16,27 @@ //! Additional test structures deserialization. -use std::collections::BTreeMap; -use std::io::Read; -use serde_json; -use serde_json::Error; use hash::H256; +use serde_json::{self, Error}; +use std::{collections::BTreeMap, io::Read}; use uint::Uint; /// Blockchain test header deserializer. #[derive(Debug, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] pub struct DifficultyTestCase { - /// Parent timestamp. - pub parent_timestamp: Uint, - /// Parent difficulty. - pub parent_difficulty: Uint, - /// Parent uncle hash. - pub parent_uncles: H256, - /// Current timestamp. - pub current_timestamp: Uint, - /// Current difficulty. - pub current_difficulty: Uint, - /// Current block number. - pub current_block_number: Uint, + /// Parent timestamp. + pub parent_timestamp: Uint, + /// Parent difficulty. + pub parent_difficulty: Uint, + /// Parent uncle hash. + pub parent_uncles: H256, + /// Current timestamp. + pub current_timestamp: Uint, + /// Current difficulty. + pub current_difficulty: Uint, + /// Current block number. + pub current_block_number: Uint, } /// Blockchain test deserializer. @@ -46,73 +44,78 @@ pub struct DifficultyTestCase { pub struct DifficultyTest(BTreeMap); impl IntoIterator for DifficultyTest { - type Item = as IntoIterator>::Item; - type IntoIter = as IntoIterator>::IntoIter; + type Item = as IntoIterator>::Item; + type IntoIter = as IntoIterator>::IntoIter; - fn into_iter(self) -> Self::IntoIter { - self.0.into_iter() - } + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } } impl DifficultyTest { - /// Loads test from json. - pub fn load(reader: R) -> Result where R: Read { - serde_json::from_reader(reader) - } + /// Loads test from json. + pub fn load(reader: R) -> Result + where + R: Read, + { + serde_json::from_reader(reader) + } } /// Test to skip (only if issue ongoing) #[derive(Debug, PartialEq, Deserialize)] pub struct SkipStates { - /// Block tests - pub block: Vec, - /// State tests - pub state: Vec, - + /// Block tests + pub block: Vec, + /// State tests + pub state: Vec, } /// Block test to skip. #[derive(Debug, PartialEq, Deserialize)] pub struct BlockSkipStates { - /// Issue reference. - pub reference: String, - /// Test failing name. - pub failing: String, - /// Items failing for the test. - pub subtests: Vec, + /// Issue reference. + pub reference: String, + /// Test failing name. + pub failing: String, + /// Items failing for the test. + pub subtests: Vec, } /// State test to skip. #[derive(Debug, PartialEq, Deserialize)] pub struct StateSkipStates { - /// Issue reference. - pub reference: String, - /// Test failing name. - pub failing: String, - /// Items failing for the test. - pub subtests: BTreeMap + /// Issue reference. + pub reference: String, + /// Test failing name. + pub failing: String, + /// Items failing for the test. + pub subtests: BTreeMap, } /// State subtest to skip. #[derive(Debug, PartialEq, Deserialize)] pub struct StateSkipSubStates { - /// State test number of this item. Or '*' for all state. - pub subnumbers: Vec, - /// Chain for this items. - pub chain: String, + /// State test number of this item. Or '*' for all state. + pub subnumbers: Vec, + /// Chain for this items. + pub chain: String, } impl SkipStates { - /// Loads skip states from json. - pub fn load(reader: R) -> Result where R: Read { - serde_json::from_reader(reader) - } + /// Loads skip states from json. + pub fn load(reader: R) -> Result + where + R: Read, + { + serde_json::from_reader(reader) + } - /// Empty skip states. - pub fn empty() -> Self { - SkipStates { - block: Vec::new(), - state: Vec::new(), - } - } + /// Empty skip states. + pub fn empty() -> Self { + SkipStates { + block: Vec::new(), + state: Vec::new(), + } + } } diff --git a/json/src/transaction/mod.rs b/json/src/transaction/mod.rs index f845fe334..52153c695 100644 --- a/json/src/transaction/mod.rs +++ b/json/src/transaction/mod.rs @@ -16,10 +16,8 @@ //! Transaction test deserialization. +mod test; mod transaction; mod txtest; -mod test; -pub use self::transaction::Transaction; -pub use self::txtest::TransactionTest; -pub use self::test::Test; +pub use self::{test::Test, transaction::Transaction, txtest::TransactionTest}; diff --git a/json/src/transaction/test.rs b/json/src/transaction/test.rs index ee9d4110f..8ab839a93 100644 --- a/json/src/transaction/test.rs +++ b/json/src/transaction/test.rs @@ -16,10 +16,8 @@ //! TransactionTest test deserializer. -use std::collections::BTreeMap; -use std::io::Read; -use serde_json; -use serde_json::Error; +use serde_json::{self, Error}; +use std::{collections::BTreeMap, io::Read}; use transaction::TransactionTest; /// TransactionTest test deserializer. @@ -27,17 +25,20 @@ use transaction::TransactionTest; pub struct Test(BTreeMap); impl IntoIterator for Test { - type Item = as IntoIterator>::Item; - type IntoIter = as IntoIterator>::IntoIter; + type Item = as IntoIterator>::Item; + type IntoIter = as IntoIterator>::IntoIter; - fn into_iter(self) -> Self::IntoIter { - self.0.into_iter() - } + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } } impl Test { - /// Loads test from json. - pub fn load(reader: R) -> Result where R: Read { - serde_json::from_reader(reader) - } + /// Loads test from json. + pub fn load(reader: R) -> Result + where + R: Read, + { + serde_json::from_reader(reader) + } } diff --git a/json/src/transaction/transaction.rs b/json/src/transaction/transaction.rs index 718d3080b..39105e0b5 100644 --- a/json/src/transaction/transaction.rs +++ b/json/src/transaction/transaction.rs @@ -16,43 +16,43 @@ //! Transaction test transaction deserialization. -use uint::Uint; use bytes::Bytes; use hash::Address; use maybe::MaybeEmpty; +use uint::Uint; /// Transaction test transaction deserialization. #[derive(Debug, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Transaction { - /// Transaction data. - pub data: Bytes, - /// Gas limit. - pub gas_limit: Uint, - /// Gas price. - pub gas_price: Uint, - /// Nonce. - pub nonce: Uint, - /// To. - pub to: MaybeEmpty
, - /// Value. - pub value: Uint, - /// R. - pub r: Uint, - /// S. - pub s: Uint, - /// V. - pub v: Uint, + /// Transaction data. + pub data: Bytes, + /// Gas limit. + pub gas_limit: Uint, + /// Gas price. + pub gas_price: Uint, + /// Nonce. + pub nonce: Uint, + /// To. + pub to: MaybeEmpty
, + /// Value. + pub value: Uint, + /// R. + pub r: Uint, + /// S. + pub s: Uint, + /// V. + pub v: Uint, } #[cfg(test)] mod tests { - use serde_json; - use transaction::Transaction; + use serde_json; + use transaction::Transaction; - #[test] - fn transaction_deserialization() { - let s = r#"{ + #[test] + fn transaction_deserialization() { + let s = r#"{ "data" : "0x", "gasLimit" : "0xf388", "gasPrice" : "0x09184e72a000", @@ -63,7 +63,7 @@ mod tests { "v" : "0x1b", "value" : "0x00" }"#; - let _deserialized: Transaction = serde_json::from_str(s).unwrap(); - // TODO: validate all fields - } + let _deserialized: Transaction = serde_json::from_str(s).unwrap(); + // TODO: validate all fields + } } diff --git a/json/src/transaction/txtest.rs b/json/src/transaction/txtest.rs index e72e5f60b..34a2c0f64 100644 --- a/json/src/transaction/txtest.rs +++ b/json/src/transaction/txtest.rs @@ -16,39 +16,38 @@ //! Transaction test deserialization. -use std::collections::BTreeMap; use bytes::Bytes; -use hash::Address; -use hash::H256; +use hash::{Address, H256}; use spec::ForkSpec; +use std::collections::BTreeMap; /// Transaction test deserialization. #[derive(Debug, Deserialize)] pub struct TransactionTest { - pub rlp: Bytes, - pub _info: ::serde::de::IgnoredAny, - #[serde(flatten)] - pub post_state: BTreeMap, + pub rlp: Bytes, + pub _info: ::serde::de::IgnoredAny, + #[serde(flatten)] + pub post_state: BTreeMap, } /// TransactionTest post state. #[derive(Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] pub struct PostState { - /// Transaction sender. - pub sender: Option
, - /// Transaction hash. - pub hash: Option, + /// Transaction sender. + pub sender: Option
, + /// Transaction hash. + pub hash: Option, } #[cfg(test)] mod tests { - use serde_json; - use transaction::TransactionTest; + use serde_json; + use transaction::TransactionTest; - #[test] - fn transaction_deserialization() { - let s = r#"{ + #[test] + fn transaction_deserialization() { + let s = r#"{ "Byzantium" : { "hash" : "4782cb5edcaeda1f0aef204b161214f124cefade9e146245183abbb9ca01bca5", "sender" : "2ea991808ba979ba103147edfd72304ebd95c028" @@ -77,7 +76,7 @@ mod tests { "rlp" : "0xf865808698852840a46f82d6d894095e7baea6a6c7c4c2dfeb977efac326af552d87808025a098ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4aa01887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a3" }"#; - let _deserialized: TransactionTest = serde_json::from_str(s).unwrap(); - // TODO: validate all fields - } + let _deserialized: TransactionTest = serde_json::from_str(s).unwrap(); + // TODO: validate all fields + } } diff --git a/json/src/trie/input.rs b/json/src/trie/input.rs index 4c56c10d7..acab2544a 100644 --- a/json/src/trie/input.rs +++ b/json/src/trie/input.rs @@ -16,141 +16,164 @@ //! Trie test input deserialization. -use std::fmt; -use std::collections::BTreeMap; -use std::str::FromStr; use bytes::Bytes; -use serde::{Deserialize, Deserializer}; -use serde::de::{Error as ErrorTrait, Visitor, MapAccess, SeqAccess}; +use serde::{ + de::{Error as ErrorTrait, MapAccess, SeqAccess, Visitor}, + Deserialize, Deserializer, +}; +use std::{collections::BTreeMap, fmt, str::FromStr}; /// Trie test input. #[derive(Debug, PartialEq)] pub struct Input { - /// Input params. - pub data: BTreeMap>, + /// Input params. + pub data: BTreeMap>, } impl<'a> Deserialize<'a> for Input { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> - { - deserializer.deserialize_any(InputVisitor) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + deserializer.deserialize_any(InputVisitor) + } } struct InputVisitor; impl<'a> Visitor<'a> for InputVisitor { - type Value = Input; + type Value = Input; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a map of bytes into bytes") - } + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a map of bytes into bytes") + } - fn visit_map(self, mut visitor: V) -> Result where V: MapAccess<'a> { - let mut result = BTreeMap::new(); + fn visit_map(self, mut visitor: V) -> Result + where + V: MapAccess<'a>, + { + let mut result = BTreeMap::new(); - loop { - let key_str: Option = visitor.next_key()?; - let key = match key_str { - Some(ref k) if k.starts_with("0x") => Bytes::from_str(k).map_err(V::Error::custom)?, - Some(k) => Bytes::new(k.into_bytes()), - None => { break; } - }; + loop { + let key_str: Option = visitor.next_key()?; + let key = match key_str { + Some(ref k) if k.starts_with("0x") => { + Bytes::from_str(k).map_err(V::Error::custom)? + } + Some(k) => Bytes::new(k.into_bytes()), + None => { + break; + } + }; - let val_str: Option = visitor.next_value()?; - let val = match val_str { - Some(ref v) if v.starts_with("0x") => Some(Bytes::from_str(v).map_err(V::Error::custom)?), - Some(v) => Some(Bytes::new(v.into_bytes())), - None => None, - }; + let val_str: Option = visitor.next_value()?; + let val = match val_str { + Some(ref v) if v.starts_with("0x") => { + Some(Bytes::from_str(v).map_err(V::Error::custom)?) + } + Some(v) => Some(Bytes::new(v.into_bytes())), + None => None, + }; - result.insert(key, val); - } + result.insert(key, val); + } - let input = Input { - data: result - }; + let input = Input { data: result }; - Ok(input) - } + Ok(input) + } - fn visit_seq(self, mut visitor: V) -> Result where V: SeqAccess<'a> { - let mut result = BTreeMap::new(); + fn visit_seq(self, mut visitor: V) -> Result + where + V: SeqAccess<'a>, + { + let mut result = BTreeMap::new(); - loop { - let keyval: Option>> = visitor.next_element()?; - let keyval = match keyval { - Some(k) => k, - _ => { break; }, - }; + loop { + let keyval: Option>> = visitor.next_element()?; + let keyval = match keyval { + Some(k) => k, + _ => { + break; + } + }; - if keyval.len() != 2 { - return Err(V::Error::custom("Invalid key value pair.")); - } + if keyval.len() != 2 { + return Err(V::Error::custom("Invalid key value pair.")); + } - let ref key_str: Option = keyval[0]; - let ref val_str: Option = keyval[1]; + let ref key_str: Option = keyval[0]; + let ref val_str: Option = keyval[1]; - let key = match *key_str { - Some(ref k) if k.starts_with("0x") => Bytes::from_str(k).map_err(V::Error::custom)?, - Some(ref k) => Bytes::new(k.clone().into_bytes()), - None => { break; } - }; + let key = match *key_str { + Some(ref k) if k.starts_with("0x") => { + Bytes::from_str(k).map_err(V::Error::custom)? + } + Some(ref k) => Bytes::new(k.clone().into_bytes()), + None => { + break; + } + }; - let val = match *val_str { - Some(ref v) if v.starts_with("0x") => Some(Bytes::from_str(v).map_err(V::Error::custom)?), - Some(ref v) => Some(Bytes::new(v.clone().into_bytes())), - None => None, - }; + let val = match *val_str { + Some(ref v) if v.starts_with("0x") => { + Some(Bytes::from_str(v).map_err(V::Error::custom)?) + } + Some(ref v) => Some(Bytes::new(v.clone().into_bytes())), + None => None, + }; - result.insert(key, val); - } + result.insert(key, val); + } - let input = Input { - data: result - }; + let input = Input { data: result }; - Ok(input) - } + Ok(input) + } } #[cfg(test)] mod tests { - use std::collections::BTreeMap; - use serde_json; - use bytes::Bytes; - use super::Input; + use super::Input; + use bytes::Bytes; + use serde_json; + use std::collections::BTreeMap; - #[test] - fn input_deserialization_from_map() { - let s = r#"{ + #[test] + fn input_deserialization_from_map() { + let s = r#"{ "0x0045" : "0x0123456789", "be" : "e", "0x0a" : null }"#; - let input: Input = serde_json::from_str(s).unwrap(); - let mut map = BTreeMap::new(); - map.insert(Bytes::new(vec![0, 0x45]), Some(Bytes::new(vec![0x01, 0x23, 0x45, 0x67, 0x89]))); - map.insert(Bytes::new(vec![0x62, 0x65]), Some(Bytes::new(vec![0x65]))); - map.insert(Bytes::new(vec![0x0a]), None); - assert_eq!(input.data, map); - } + let input: Input = serde_json::from_str(s).unwrap(); + let mut map = BTreeMap::new(); + map.insert( + Bytes::new(vec![0, 0x45]), + Some(Bytes::new(vec![0x01, 0x23, 0x45, 0x67, 0x89])), + ); + map.insert(Bytes::new(vec![0x62, 0x65]), Some(Bytes::new(vec![0x65]))); + map.insert(Bytes::new(vec![0x0a]), None); + assert_eq!(input.data, map); + } - #[test] - fn input_deserialization_from_array() { - let s = r#"[ + #[test] + fn input_deserialization_from_array() { + let s = r#"[ ["0x0045", "0x0123456789"], ["be", "e"], ["0x0a", null] ]"#; - let input: Input = serde_json::from_str(s).unwrap(); - let mut map = BTreeMap::new(); - map.insert(Bytes::new(vec![0, 0x45]), Some(Bytes::new(vec![0x01, 0x23, 0x45, 0x67, 0x89]))); - map.insert(Bytes::new(vec![0x62, 0x65]), Some(Bytes::new(vec![0x65]))); - map.insert(Bytes::new(vec![0x0a]), None); - assert_eq!(input.data, map); - } + let input: Input = serde_json::from_str(s).unwrap(); + let mut map = BTreeMap::new(); + map.insert( + Bytes::new(vec![0, 0x45]), + Some(Bytes::new(vec![0x01, 0x23, 0x45, 0x67, 0x89])), + ); + map.insert(Bytes::new(vec![0x62, 0x65]), Some(Bytes::new(vec![0x65]))); + map.insert(Bytes::new(vec![0x0a]), None); + assert_eq!(input.data, map); + } } diff --git a/json/src/trie/mod.rs b/json/src/trie/mod.rs index 68fec0876..d6c57ba54 100644 --- a/json/src/trie/mod.rs +++ b/json/src/trie/mod.rs @@ -17,9 +17,7 @@ //! Trie test deserialization. mod input; -mod trie; mod test; +mod trie; -pub use self::input::Input; -pub use self::trie::Trie; -pub use self::test::Test; +pub use self::{input::Input, test::Test, trie::Trie}; diff --git a/json/src/trie/test.rs b/json/src/trie/test.rs index 39876da0a..407838def 100644 --- a/json/src/trie/test.rs +++ b/json/src/trie/test.rs @@ -16,10 +16,8 @@ //! TransactionTest test deserializer. -use std::collections::BTreeMap; -use std::io::Read; -use serde_json; -use serde_json::Error; +use serde_json::{self, Error}; +use std::{collections::BTreeMap, io::Read}; use trie::Trie; /// TransactionTest test deserializer. @@ -27,17 +25,20 @@ use trie::Trie; pub struct Test(BTreeMap); impl IntoIterator for Test { - type Item = as IntoIterator>::Item; - type IntoIter = as IntoIterator>::IntoIter; + type Item = as IntoIterator>::Item; + type IntoIter = as IntoIterator>::IntoIter; - fn into_iter(self) -> Self::IntoIter { - self.0.into_iter() - } + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } } impl Test { - /// Loads test from json. - pub fn load(reader: R) -> Result where R: Read { - serde_json::from_reader(reader) - } + /// Loads test from json. + pub fn load(reader: R) -> Result + where + R: Read, + { + serde_json::from_reader(reader) + } } diff --git a/json/src/trie/trie.rs b/json/src/trie/trie.rs index 756e54f43..d21311137 100644 --- a/json/src/trie/trie.rs +++ b/json/src/trie/trie.rs @@ -22,9 +22,9 @@ use trie::Input; /// Trie test deserialization. #[derive(Debug, Deserialize, PartialEq)] pub struct Trie { - /// Trie test input. - #[serde(rename = "in")] - pub input: Input, - /// Trie root hash. - pub root: H256, + /// Trie test input. + #[serde(rename = "in")] + pub input: Input, + /// Trie root hash. + pub root: H256, } diff --git a/json/src/uint.rs b/json/src/uint.rs index 4b9ccec6f..ca387017d 100644 --- a/json/src/uint.rs +++ b/json/src/uint.rs @@ -16,132 +16,161 @@ //! Lenient uint json deserialization for test json files. -use std::fmt; -use std::str::FromStr; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use serde::de::{Error, Visitor, Unexpected}; use ethereum_types::U256; +use serde::{ + de::{Error, Unexpected, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, +}; +use std::{fmt, str::FromStr}; /// Lenient uint json deserialization for test json files. #[derive(Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy)] pub struct Uint(pub U256); impl Into for Uint { - fn into(self) -> U256 { - self.0 - } + fn into(self) -> U256 { + self.0 + } } impl Into for Uint { - fn into(self) -> u64 { - u64::from(self.0) - } + fn into(self) -> u64 { + u64::from(self.0) + } } impl Into for Uint { - fn into(self) -> usize { - // TODO: clean it after util conversions refactored. - u64::from(self.0) as usize - } + fn into(self) -> usize { + // TODO: clean it after util conversions refactored. + u64::from(self.0) as usize + } } impl Into for Uint { - fn into(self) -> u8 { - u64::from(self.0) as u8 - } + fn into(self) -> u8 { + u64::from(self.0) as u8 + } } impl Serialize for Uint { - fn serialize(&self, serializer: S) -> Result - where S: Serializer { - self.0.to_string().serialize(serializer) - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + self.0.to_string().serialize(serializer) + } } impl<'a> Deserialize<'a> for Uint { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> { - deserializer.deserialize_any(UintVisitor) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + deserializer.deserialize_any(UintVisitor) + } } struct UintVisitor; impl<'a> Visitor<'a> for UintVisitor { - type Value = Uint; + type Value = Uint; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a hex encoded or decimal uint") - } + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a hex encoded or decimal uint") + } - fn visit_u64(self, value: u64) -> Result where E: Error { - Ok(Uint(U256::from(value))) - } + fn visit_u64(self, value: u64) -> Result + where + E: Error, + { + Ok(Uint(U256::from(value))) + } - fn visit_str(self, value: &str) -> Result where E: Error { - let value = match value.len() { - 0 => U256::from(0), - 2 if value.starts_with("0x") => U256::from(0), - _ if value.starts_with("0x") => U256::from_str(&value[2..]).map_err(|e| { - Error::custom(format!("Invalid hex value {}: {}", value, e).as_str()) - })?, - _ => U256::from_dec_str(value).map_err(|e| { - Error::custom(format!("Invalid decimal value {}: {:?}", value, e).as_str()) - })? - }; + fn visit_str(self, value: &str) -> Result + where + E: Error, + { + let value = match value.len() { + 0 => U256::from(0), + 2 if value.starts_with("0x") => U256::from(0), + _ if value.starts_with("0x") => U256::from_str(&value[2..]).map_err(|e| { + Error::custom(format!("Invalid hex value {}: {}", value, e).as_str()) + })?, + _ => U256::from_dec_str(value).map_err(|e| { + Error::custom(format!("Invalid decimal value {}: {:?}", value, e).as_str()) + })?, + }; - Ok(Uint(value)) - } + Ok(Uint(value)) + } - fn visit_string(self, value: String) -> Result where E: Error { - self.visit_str(value.as_ref()) - } + fn visit_string(self, value: String) -> Result + where + E: Error, + { + self.visit_str(value.as_ref()) + } } /// Deserialize and validate that the value is non-zero -pub fn validate_non_zero<'de, D>(d: D) -> Result where D: Deserializer<'de> { - let value = Uint::deserialize(d)?; +pub fn validate_non_zero<'de, D>(d: D) -> Result +where + D: Deserializer<'de>, +{ + let value = Uint::deserialize(d)?; - if value == Uint(U256::from(0)) { - return Err(Error::invalid_value(Unexpected::Unsigned(value.into()), &"a non-zero value")) - } + if value == Uint(U256::from(0)) { + return Err(Error::invalid_value( + Unexpected::Unsigned(value.into()), + &"a non-zero value", + )); + } - Ok(value) + Ok(value) } /// Deserialize and validate that the value is non-zero -pub fn validate_optional_non_zero<'de, D>(d: D) -> Result, D::Error> where D: Deserializer<'de> { - let value: Option = Option::deserialize(d)?; +pub fn validate_optional_non_zero<'de, D>(d: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let value: Option = Option::deserialize(d)?; - if let Some(value) = value { - if value == Uint(U256::from(0)) { - return Err(Error::invalid_value(Unexpected::Unsigned(value.into()), &"a non-zero value")) - } - } + if let Some(value) = value { + if value == Uint(U256::from(0)) { + return Err(Error::invalid_value( + Unexpected::Unsigned(value.into()), + &"a non-zero value", + )); + } + } - Ok(value) + Ok(value) } #[cfg(test)] mod test { - use serde_json; - use ethereum_types::U256; - use uint::Uint; + use ethereum_types::U256; + use serde_json; + use uint::Uint; - #[test] - fn uint_deserialization() { - let s = r#"["0xa", "10", "", "0x", 0]"#; - let deserialized: Vec = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, vec![ - Uint(U256::from(10)), - Uint(U256::from(10)), - Uint(U256::from(0)), - Uint(U256::from(0)), - Uint(U256::from(0)) - ]); - } + #[test] + fn uint_deserialization() { + let s = r#"["0xa", "10", "", "0x", 0]"#; + let deserialized: Vec = serde_json::from_str(s).unwrap(); + assert_eq!( + deserialized, + vec![ + Uint(U256::from(10)), + Uint(U256::from(10)), + Uint(U256::from(0)), + Uint(U256::from(0)), + Uint(U256::from(0)) + ] + ); + } - #[test] - fn uint_into() { - assert_eq!(U256::from(10), Uint(U256::from(10)).into()); - } + #[test] + fn uint_into() { + assert_eq!(U256::from(10), Uint(U256::from(10)).into()); + } } diff --git a/json/src/vm/call.rs b/json/src/vm/call.rs index aa75862f0..fafad33d1 100644 --- a/json/src/vm/call.rs +++ b/json/src/vm/call.rs @@ -18,67 +18,76 @@ use bytes::Bytes; use hash::Address; -use uint::Uint; use maybe::MaybeEmpty; +use uint::Uint; /// Vm call deserialization. #[derive(Debug, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Call { - /// Call data. - pub data: Bytes, - /// Call destination. - pub destination: MaybeEmpty
, - /// Gas limit. - pub gas_limit: Uint, - /// Call value. - pub value: Uint, + /// Call data. + pub data: Bytes, + /// Call destination. + pub destination: MaybeEmpty
, + /// Gas limit. + pub gas_limit: Uint, + /// Call value. + pub value: Uint, } #[cfg(test)] mod tests { - use serde_json; - use vm::Call; - use ethereum_types::{U256, H160 as Hash160}; - use uint::Uint; - use hash::Address; - use maybe::MaybeEmpty; - use std::str::FromStr; + use ethereum_types::{H160 as Hash160, U256}; + use hash::Address; + use maybe::MaybeEmpty; + use serde_json; + use std::str::FromStr; + use uint::Uint; + use vm::Call; - #[test] - fn call_deserialization_empty_dest() { - let s = r#"{ + #[test] + fn call_deserialization_empty_dest() { + let s = r#"{ "data" : "0x1111222233334444555566667777888899990000aaaabbbbccccddddeeeeffff", "destination" : "", "gasLimit" : "0x1748766aa5", "value" : "0x00" }"#; - let call: Call = serde_json::from_str(s).unwrap(); + let call: Call = serde_json::from_str(s).unwrap(); - assert_eq!(&call.data[..], - &[0x11, 0x11, 0x22, 0x22, 0x33, 0x33, 0x44, 0x44, 0x55, 0x55, 0x66, 0x66, 0x77, 0x77, - 0x88, 0x88, 0x99, 0x99, 0x00, 0x00, 0xaa, 0xaa, 0xbb, 0xbb, 0xcc, 0xcc, 0xdd, 0xdd, - 0xee, 0xee, 0xff, 0xff]); + assert_eq!( + &call.data[..], + &[ + 0x11, 0x11, 0x22, 0x22, 0x33, 0x33, 0x44, 0x44, 0x55, 0x55, 0x66, 0x66, 0x77, 0x77, + 0x88, 0x88, 0x99, 0x99, 0x00, 0x00, 0xaa, 0xaa, 0xbb, 0xbb, 0xcc, 0xcc, 0xdd, 0xdd, + 0xee, 0xee, 0xff, 0xff + ] + ); - assert_eq!(call.destination, MaybeEmpty::None); - assert_eq!(call.gas_limit, Uint(U256::from(0x1748766aa5u64))); - assert_eq!(call.value, Uint(U256::from(0))); - } + assert_eq!(call.destination, MaybeEmpty::None); + assert_eq!(call.gas_limit, Uint(U256::from(0x1748766aa5u64))); + assert_eq!(call.value, Uint(U256::from(0))); + } - #[test] - fn call_deserialization_full_dest() { - let s = r#"{ + #[test] + fn call_deserialization_full_dest() { + let s = r#"{ "data" : "0x1234", "destination" : "5a39ed1020c04d4d84539975b893a4e7c53eab6c", "gasLimit" : "0x1748766aa5", "value" : "0x00" }"#; - let call: Call = serde_json::from_str(s).unwrap(); + let call: Call = serde_json::from_str(s).unwrap(); - assert_eq!(&call.data[..], &[0x12, 0x34]); - assert_eq!(call.destination, MaybeEmpty::Some(Address(Hash160::from_str("5a39ed1020c04d4d84539975b893a4e7c53eab6c").unwrap()))); - assert_eq!(call.gas_limit, Uint(U256::from(0x1748766aa5u64))); - assert_eq!(call.value, Uint(U256::from(0))); - } + assert_eq!(&call.data[..], &[0x12, 0x34]); + assert_eq!( + call.destination, + MaybeEmpty::Some(Address( + Hash160::from_str("5a39ed1020c04d4d84539975b893a4e7c53eab6c").unwrap() + )) + ); + assert_eq!(call.gas_limit, Uint(U256::from(0x1748766aa5u64))); + assert_eq!(call.value, Uint(U256::from(0))); + } } diff --git a/json/src/vm/env.rs b/json/src/vm/env.rs index e06812c0a..6dd1e6157 100644 --- a/json/src/vm/env.rs +++ b/json/src/vm/env.rs @@ -21,38 +21,38 @@ use uint::Uint; /// Vm environment. #[derive(Debug, PartialEq, Deserialize)] pub struct Env { - /// Address. - #[serde(rename = "currentCoinbase")] - pub author: Address, - /// Difficulty - #[serde(rename = "currentDifficulty")] - pub difficulty: Uint, - /// Gas limit. - #[serde(rename = "currentGasLimit")] - pub gas_limit: Uint, - /// Number. - #[serde(rename = "currentNumber")] - pub number: Uint, - /// Timestamp. - #[serde(rename = "currentTimestamp")] - pub timestamp: Uint, + /// Address. + #[serde(rename = "currentCoinbase")] + pub author: Address, + /// Difficulty + #[serde(rename = "currentDifficulty")] + pub difficulty: Uint, + /// Gas limit. + #[serde(rename = "currentGasLimit")] + pub gas_limit: Uint, + /// Number. + #[serde(rename = "currentNumber")] + pub number: Uint, + /// Timestamp. + #[serde(rename = "currentTimestamp")] + pub timestamp: Uint, } #[cfg(test)] mod tests { - use serde_json; - use vm::Env; + use serde_json; + use vm::Env; - #[test] - fn env_deserialization() { - let s = r#"{ + #[test] + fn env_deserialization() { + let s = r#"{ "currentCoinbase" : "2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", "currentDifficulty" : "0x0100", "currentGasLimit" : "0x0f4240", "currentNumber" : "0x00", "currentTimestamp" : "0x01" }"#; - let _deserialized: Env = serde_json::from_str(s).unwrap(); - // TODO: validate all fields - } + let _deserialized: Env = serde_json::from_str(s).unwrap(); + // TODO: validate all fields + } } diff --git a/json/src/vm/mod.rs b/json/src/vm/mod.rs index d8f99e200..f80938bc5 100644 --- a/json/src/vm/mod.rs +++ b/json/src/vm/mod.rs @@ -16,14 +16,10 @@ //! Vm test loader. +pub mod call; pub mod env; +pub mod test; pub mod transaction; pub mod vm; -pub mod call; -pub mod test; -pub use self::env::Env; -pub use self::transaction::Transaction; -pub use self::vm::Vm; -pub use self::call::Call; -pub use self::test::Test; +pub use self::{call::Call, env::Env, test::Test, transaction::Transaction, vm::Vm}; diff --git a/json/src/vm/test.rs b/json/src/vm/test.rs index 9dfe814ae..baa6d2d7d 100644 --- a/json/src/vm/test.rs +++ b/json/src/vm/test.rs @@ -16,10 +16,8 @@ //! Vm test deserializer. -use std::collections::BTreeMap; -use std::io::Read; -use serde_json; -use serde_json::Error; +use serde_json::{self, Error}; +use std::{collections::BTreeMap, io::Read}; use vm::Vm; /// Vm test deserializer. @@ -27,17 +25,20 @@ use vm::Vm; pub struct Test(BTreeMap); impl IntoIterator for Test { - type Item = as IntoIterator>::Item; - type IntoIter = as IntoIterator>::IntoIter; + type Item = as IntoIterator>::Item; + type IntoIter = as IntoIterator>::IntoIter; - fn into_iter(self) -> Self::IntoIter { - self.0.into_iter() - } + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } } impl Test { - /// Loads test from json. - pub fn load(reader: R) -> Result where R: Read { - serde_json::from_reader(reader) - } + /// Loads test from json. + pub fn load(reader: R) -> Result + where + R: Read, + { + serde_json::from_reader(reader) + } } diff --git a/json/src/vm/transaction.rs b/json/src/vm/transaction.rs index 4a9374531..2e28015c9 100644 --- a/json/src/vm/transaction.rs +++ b/json/src/vm/transaction.rs @@ -15,41 +15,41 @@ // along with Parity Ethereum. If not, see . //! Executed transaction. +use bytes::Bytes; use hash::Address; use uint::Uint; -use bytes::Bytes; /// Executed transaction. #[derive(Debug, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Transaction { - /// Contract address. - pub address: Address, - /// Transaction sender. - #[serde(rename = "caller")] - pub sender: Address, - /// Contract code. - pub code: Bytes, - /// Input data. - pub data: Bytes, - /// Gas. - pub gas: Uint, - /// Gas price. - pub gas_price: Uint, - /// Transaction origin. - pub origin: Address, - /// Sent value. - pub value: Uint, + /// Contract address. + pub address: Address, + /// Transaction sender. + #[serde(rename = "caller")] + pub sender: Address, + /// Contract code. + pub code: Bytes, + /// Input data. + pub data: Bytes, + /// Gas. + pub gas: Uint, + /// Gas price. + pub gas_price: Uint, + /// Transaction origin. + pub origin: Address, + /// Sent value. + pub value: Uint, } #[cfg(test)] mod tests { - use serde_json; - use vm::Transaction; + use serde_json; + use vm::Transaction; - #[test] - fn transaction_deserialization() { - let s = r#"{ + #[test] + fn transaction_deserialization() { + let s = r#"{ "address" : "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6", "caller" : "cd1722f2947def4cf144679da39c4c32bdc35681", "code" : "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01600055", @@ -59,6 +59,6 @@ mod tests { "origin" : "cd1722f2947def4cf144679da39c4c32bdc35681", "value" : "0x0de0b6b3a7640000" }"#; - let _deserialized: Transaction = serde_json::from_str(s).unwrap(); - } + let _deserialized: Transaction = serde_json::from_str(s).unwrap(); + } } diff --git a/json/src/vm/vm.rs b/json/src/vm/vm.rs index 1fbb937cb..04c111717 100644 --- a/json/src/vm/vm.rs +++ b/json/src/vm/vm.rs @@ -16,54 +16,54 @@ //! Vm execution env. -use bytes::Bytes; -use uint::Uint; -use hash::H256; use blockchain::State; -use vm::{Transaction, Call, Env}; +use bytes::Bytes; +use hash::H256; +use uint::Uint; +use vm::{Call, Env, Transaction}; /// Represents vm execution environment before and after execution of transaction. #[derive(Debug, PartialEq, Deserialize)] pub struct Vm { - /// Contract calls made internaly by executed transaction. - #[serde(rename = "callcreates")] - pub calls: Option>, - /// Env info. - pub env: Env, - /// Executed transaction - #[serde(rename = "exec")] - pub transaction: Transaction, - /// Gas left after transaction execution. - #[serde(rename = "gas")] - pub gas_left: Option, - /// Hash of logs created during execution of transaction. - pub logs: Option, - /// Transaction output. - #[serde(rename = "out")] - pub output: Option, - /// Post execution vm state. - #[serde(rename = "post")] - pub post_state: Option, - /// Pre execution vm state. - #[serde(rename = "pre")] - pub pre_state: State, + /// Contract calls made internaly by executed transaction. + #[serde(rename = "callcreates")] + pub calls: Option>, + /// Env info. + pub env: Env, + /// Executed transaction + #[serde(rename = "exec")] + pub transaction: Transaction, + /// Gas left after transaction execution. + #[serde(rename = "gas")] + pub gas_left: Option, + /// Hash of logs created during execution of transaction. + pub logs: Option, + /// Transaction output. + #[serde(rename = "out")] + pub output: Option, + /// Post execution vm state. + #[serde(rename = "post")] + pub post_state: Option, + /// Pre execution vm state. + #[serde(rename = "pre")] + pub pre_state: State, } impl Vm { - /// Returns true if transaction execution run out of gas. - pub fn out_of_gas(&self) -> bool { - self.calls.is_none() - } + /// Returns true if transaction execution run out of gas. + pub fn out_of_gas(&self) -> bool { + self.calls.is_none() + } } #[cfg(test)] mod tests { - use serde_json; - use vm::Vm; + use serde_json; + use vm::Vm; - #[test] - fn vm_deserialization() { - let s = r#"{ + #[test] + fn vm_deserialization() { + let s = r#"{ "callcreates" : [ ], "env" : { @@ -107,7 +107,7 @@ mod tests { } } }"#; - let _deserialized: Vm = serde_json::from_str(s).unwrap(); - // TODO: validate all fields - } + let _deserialized: Vm = serde_json::from_str(s).unwrap(); + // TODO: validate all fields + } } diff --git a/miner/local-store/src/lib.rs b/miner/local-store/src/lib.rs index 56c573b06..e671cf7d5 100644 --- a/miner/local-store/src/lib.rs +++ b/miner/local-store/src/lib.rs @@ -16,24 +16,21 @@ //! Manages local node data: pending local transactions, sync security level -use std::sync::Arc; -use std::fmt; -use std::time::Duration; +use std::{fmt, sync::Arc, time::Duration}; -use types::transaction::{ - SignedTransaction, PendingTransaction, UnverifiedTransaction, - Condition as TransactionCondition -}; use io::IoHandler; -use rlp::Rlp; use kvdb::KeyValueDB; +use rlp::Rlp; +use types::transaction::{ + Condition as TransactionCondition, PendingTransaction, SignedTransaction, UnverifiedTransaction, +}; extern crate common_types as types; extern crate ethcore_io as io; -extern crate rlp; -extern crate serde_json; -extern crate serde; extern crate kvdb; +extern crate rlp; +extern crate serde; +extern crate serde_json; #[macro_use] extern crate serde_derive; @@ -54,95 +51,95 @@ const UPDATE_TIMEOUT: Duration = Duration::from_secs(15 * 60); // once every 15 /// Errors which can occur while using the local data store. #[derive(Debug)] pub enum Error { - /// Io and database errors: these manifest as `String`s. - Io(::std::io::Error), - /// JSON errors. - Json(::serde_json::Error), + /// Io and database errors: these manifest as `String`s. + Io(::std::io::Error), + /// JSON errors. + Json(::serde_json::Error), } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Error::Io(ref val) => write!(f, "{}", val), - Error::Json(ref err) => write!(f, "{}", err), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Error::Io(ref val) => write!(f, "{}", val), + Error::Json(ref err) => write!(f, "{}", err), + } + } } #[derive(Serialize, Deserialize)] enum Condition { - Number(types::BlockNumber), - Timestamp(u64), + Number(types::BlockNumber), + Timestamp(u64), } impl From for Condition { - fn from(cond: TransactionCondition) -> Self { - match cond { - TransactionCondition::Number(num) => Condition::Number(num), - TransactionCondition::Timestamp(tm) => Condition::Timestamp(tm), - } - } + fn from(cond: TransactionCondition) -> Self { + match cond { + TransactionCondition::Number(num) => Condition::Number(num), + TransactionCondition::Timestamp(tm) => Condition::Timestamp(tm), + } + } } impl Into for Condition { - fn into(self) -> TransactionCondition { - match self { - Condition::Number(num) => TransactionCondition::Number(num), - Condition::Timestamp(tm) => TransactionCondition::Timestamp(tm), - } - } + fn into(self) -> TransactionCondition { + match self { + Condition::Number(num) => TransactionCondition::Number(num), + Condition::Timestamp(tm) => TransactionCondition::Timestamp(tm), + } + } } #[derive(Serialize, Deserialize)] struct TransactionEntry { - rlp_bytes: Vec, - condition: Option, + rlp_bytes: Vec, + condition: Option, } impl TransactionEntry { - fn into_pending(self) -> Option { - let tx: UnverifiedTransaction = match Rlp::new(&self.rlp_bytes).as_val() { - Err(e) => { - warn!(target: "local_store", "Invalid persistent transaction stored: {}", e); - return None - } - Ok(tx) => tx, - }; + fn into_pending(self) -> Option { + let tx: UnverifiedTransaction = match Rlp::new(&self.rlp_bytes).as_val() { + Err(e) => { + warn!(target: "local_store", "Invalid persistent transaction stored: {}", e); + return None; + } + Ok(tx) => tx, + }; - let hash = tx.hash(); - match SignedTransaction::new(tx) { - Ok(tx) => Some(PendingTransaction::new(tx, self.condition.map(Into::into))), - Err(_) => { - warn!(target: "local_store", "Bad signature on persistent transaction: {}", hash); - return None - } - } - } + let hash = tx.hash(); + match SignedTransaction::new(tx) { + Ok(tx) => Some(PendingTransaction::new(tx, self.condition.map(Into::into))), + Err(_) => { + warn!(target: "local_store", "Bad signature on persistent transaction: {}", hash); + return None; + } + } + } } impl From for TransactionEntry { - fn from(pending: PendingTransaction) -> Self { - TransactionEntry { - rlp_bytes: ::rlp::encode(&pending.transaction), - condition: pending.condition.map(Into::into), - } - } + fn from(pending: PendingTransaction) -> Self { + TransactionEntry { + rlp_bytes: ::rlp::encode(&pending.transaction), + condition: pending.condition.map(Into::into), + } + } } /// Something which can provide information about the local node. pub trait NodeInfo: Send + Sync { - /// Get all pending transactions of local origin. - fn pending_transactions(&self) -> Vec; + /// Get all pending transactions of local origin. + fn pending_transactions(&self) -> Vec; } /// Create a new local data store, given a database, a column to write to, and a node. /// Attempts to read data out of the store, and move it into the node. pub fn create(db: Arc, col: Option, node: T) -> LocalDataStore { - LocalDataStore { - db: db, - col: col, - node: node, - } + LocalDataStore { + db: db, + col: col, + node: node, + } } /// Manages local node data. @@ -150,180 +147,192 @@ pub fn create(db: Arc, col: Option, node: T) -> Lo /// In specific, this will be used to store things like unpropagated local transactions /// and the node security level. pub struct LocalDataStore { - db: Arc, - col: Option, - node: T, + db: Arc, + col: Option, + node: T, } impl LocalDataStore { - /// Attempt to read pending transactions out of the local store. - pub fn pending_transactions(&self) -> Result, Error> { - if let Some(val) = self.db.get(self.col, LOCAL_TRANSACTIONS_KEY).map_err(Error::Io)? { - let local_txs: Vec<_> = ::serde_json::from_slice::>(&val) - .map_err(Error::Json)? - .into_iter() - .filter_map(TransactionEntry::into_pending) - .collect(); + /// Attempt to read pending transactions out of the local store. + pub fn pending_transactions(&self) -> Result, Error> { + if let Some(val) = self + .db + .get(self.col, LOCAL_TRANSACTIONS_KEY) + .map_err(Error::Io)? + { + let local_txs: Vec<_> = ::serde_json::from_slice::>(&val) + .map_err(Error::Json)? + .into_iter() + .filter_map(TransactionEntry::into_pending) + .collect(); - Ok(local_txs) - } else { - Ok(Vec::new()) - } - } + Ok(local_txs) + } else { + Ok(Vec::new()) + } + } - /// Update the entries in the database. - pub fn update(&self) -> Result<(), Error> { - trace!(target: "local_store", "Updating local store entries."); + /// Update the entries in the database. + pub fn update(&self) -> Result<(), Error> { + trace!(target: "local_store", "Updating local store entries."); - let local_entries: Vec = self.node.pending_transactions() - .into_iter() - .map(Into::into) - .collect(); + let local_entries: Vec = self + .node + .pending_transactions() + .into_iter() + .map(Into::into) + .collect(); - self.write_txs(&local_entries) - } + self.write_txs(&local_entries) + } - /// Clear data in this column. - pub fn clear(&self) -> Result<(), Error> { - trace!(target: "local_store", "Clearing local store entries."); + /// Clear data in this column. + pub fn clear(&self) -> Result<(), Error> { + trace!(target: "local_store", "Clearing local store entries."); - self.write_txs(&[]) - } + self.write_txs(&[]) + } - // helper for writing a vector of transaction entries to disk. - fn write_txs(&self, txs: &[TransactionEntry]) -> Result<(), Error> { - let mut batch = self.db.transaction(); + // helper for writing a vector of transaction entries to disk. + fn write_txs(&self, txs: &[TransactionEntry]) -> Result<(), Error> { + let mut batch = self.db.transaction(); - let local_json = ::serde_json::to_value(txs).map_err(Error::Json)?; - let json_str = format!("{}", local_json); + let local_json = ::serde_json::to_value(txs).map_err(Error::Json)?; + let json_str = format!("{}", local_json); - batch.put_vec(self.col, LOCAL_TRANSACTIONS_KEY, json_str.into_bytes()); - self.db.write(batch).map_err(Error::Io) - } + batch.put_vec(self.col, LOCAL_TRANSACTIONS_KEY, json_str.into_bytes()); + self.db.write(batch).map_err(Error::Io) + } } impl IoHandler for LocalDataStore { - fn initialize(&self, io: &::io::IoContext) { - if let Err(e) = io.register_timer(UPDATE_TIMER, UPDATE_TIMEOUT) { - warn!(target: "local_store", "Error registering local store update timer: {}", e); - } - } + fn initialize(&self, io: &::io::IoContext) { + if let Err(e) = io.register_timer(UPDATE_TIMER, UPDATE_TIMEOUT) { + warn!(target: "local_store", "Error registering local store update timer: {}", e); + } + } - fn timeout(&self, _io: &::io::IoContext, timer: ::io::TimerToken) { - if let UPDATE_TIMER = timer { - if let Err(e) = self.update() { - debug!(target: "local_store", "Error updating local store: {}", e); - } - } - } + fn timeout(&self, _io: &::io::IoContext, timer: ::io::TimerToken) { + if let UPDATE_TIMER = timer { + if let Err(e) = self.update() { + debug!(target: "local_store", "Error updating local store: {}", e); + } + } + } } impl Drop for LocalDataStore { - fn drop(&mut self) { - debug!(target: "local_store", "Updating node data store on shutdown."); + fn drop(&mut self) { + debug!(target: "local_store", "Updating node data store on shutdown."); - let _ = self.update(); - } + let _ = self.update(); + } } #[cfg(test)] mod tests { - use super::NodeInfo; + use super::NodeInfo; - use std::sync::Arc; - use types::transaction::{Transaction, Condition, PendingTransaction}; - use ethkey::{Brain, Generator}; + use ethkey::{Brain, Generator}; + use std::sync::Arc; + use types::transaction::{Condition, PendingTransaction, Transaction}; - // we want to test: round-trip of good transactions. - // failure to roundtrip bad transactions (but that it doesn't panic) + // we want to test: round-trip of good transactions. + // failure to roundtrip bad transactions (but that it doesn't panic) - struct Dummy(Vec); - impl NodeInfo for Dummy { - fn pending_transactions(&self) -> Vec { self.0.clone() } - } + struct Dummy(Vec); + impl NodeInfo for Dummy { + fn pending_transactions(&self) -> Vec { + self.0.clone() + } + } - #[test] - fn twice_empty() { - let db = Arc::new(::kvdb_memorydb::create(0)); + #[test] + fn twice_empty() { + let db = Arc::new(::kvdb_memorydb::create(0)); - { - let store = super::create(db.clone(), None, Dummy(vec![])); - assert_eq!(store.pending_transactions().unwrap(), vec![]) - } + { + let store = super::create(db.clone(), None, Dummy(vec![])); + assert_eq!(store.pending_transactions().unwrap(), vec![]) + } - { - let store = super::create(db.clone(), None, Dummy(vec![])); - assert_eq!(store.pending_transactions().unwrap(), vec![]) - } - } + { + let store = super::create(db.clone(), None, Dummy(vec![])); + assert_eq!(store.pending_transactions().unwrap(), vec![]) + } + } - #[test] - fn with_condition() { - let keypair = Brain::new("abcd".into()).generate().unwrap(); - let transactions: Vec<_> = (0..10u64).map(|nonce| { - let mut tx = Transaction::default(); - tx.nonce = nonce.into(); + #[test] + fn with_condition() { + let keypair = Brain::new("abcd".into()).generate().unwrap(); + let transactions: Vec<_> = (0..10u64) + .map(|nonce| { + let mut tx = Transaction::default(); + tx.nonce = nonce.into(); - let signed = tx.sign(keypair.secret(), None); - let condition = match nonce { - 5 => Some(Condition::Number(100_000)), - _ => None, - }; + let signed = tx.sign(keypair.secret(), None); + let condition = match nonce { + 5 => Some(Condition::Number(100_000)), + _ => None, + }; - PendingTransaction::new(signed, condition) - }).collect(); + PendingTransaction::new(signed, condition) + }) + .collect(); - let db = Arc::new(::kvdb_memorydb::create(0)); + let db = Arc::new(::kvdb_memorydb::create(0)); - { - // nothing written yet, will write pending. - let store = super::create(db.clone(), None, Dummy(transactions.clone())); - assert_eq!(store.pending_transactions().unwrap(), vec![]) - } - { - // pending written, will write nothing. - let store = super::create(db.clone(), None, Dummy(vec![])); - assert_eq!(store.pending_transactions().unwrap(), transactions) - } - { - // pending removed, will write nothing. - let store = super::create(db.clone(), None, Dummy(vec![])); - assert_eq!(store.pending_transactions().unwrap(), vec![]) - } - } + { + // nothing written yet, will write pending. + let store = super::create(db.clone(), None, Dummy(transactions.clone())); + assert_eq!(store.pending_transactions().unwrap(), vec![]) + } + { + // pending written, will write nothing. + let store = super::create(db.clone(), None, Dummy(vec![])); + assert_eq!(store.pending_transactions().unwrap(), transactions) + } + { + // pending removed, will write nothing. + let store = super::create(db.clone(), None, Dummy(vec![])); + assert_eq!(store.pending_transactions().unwrap(), vec![]) + } + } - #[test] - fn skips_bad_transactions() { - let keypair = Brain::new("abcd".into()).generate().unwrap(); - let mut transactions: Vec<_> = (0..10u64).map(|nonce| { - let mut tx = Transaction::default(); - tx.nonce = nonce.into(); + #[test] + fn skips_bad_transactions() { + let keypair = Brain::new("abcd".into()).generate().unwrap(); + let mut transactions: Vec<_> = (0..10u64) + .map(|nonce| { + let mut tx = Transaction::default(); + tx.nonce = nonce.into(); - let signed = tx.sign(keypair.secret(), None); + let signed = tx.sign(keypair.secret(), None); - PendingTransaction::new(signed, None) - }).collect(); + PendingTransaction::new(signed, None) + }) + .collect(); - transactions.push({ - let mut tx = Transaction::default(); - tx.nonce = 10.into(); + transactions.push({ + let mut tx = Transaction::default(); + tx.nonce = 10.into(); - let signed = tx.fake_sign(Default::default()); - PendingTransaction::new(signed, None) - }); + let signed = tx.fake_sign(Default::default()); + PendingTransaction::new(signed, None) + }); - let db = Arc::new(::kvdb_memorydb::create(0)); - { - // nothing written, will write bad. - let store = super::create(db.clone(), None, Dummy(transactions.clone())); - assert_eq!(store.pending_transactions().unwrap(), vec![]) - } - { - // try to load transactions. The last transaction, which is invalid, will be skipped. - let store = super::create(db.clone(), None, Dummy(vec![])); - let loaded = store.pending_transactions().unwrap(); - transactions.pop(); - assert_eq!(loaded, transactions); - } - } + let db = Arc::new(::kvdb_memorydb::create(0)); + { + // nothing written, will write bad. + let store = super::create(db.clone(), None, Dummy(transactions.clone())); + assert_eq!(store.pending_transactions().unwrap(), vec![]) + } + { + // try to load transactions. The last transaction, which is invalid, will be skipped. + let store = super::create(db.clone(), None, Dummy(vec![])); + let loaded = store.pending_transactions().unwrap(); + transactions.pop(); + assert_eq!(loaded, transactions); + } + } } diff --git a/miner/price-info/src/lib.rs b/miner/price-info/src/lib.rs index 3745ef12e..91c093452 100644 --- a/miner/price-info/src/lib.rs +++ b/miner/price-info/src/lib.rs @@ -19,8 +19,8 @@ //! A simple client to get the current ETH price using an external API. extern crate futures; -extern crate serde_json; extern crate parity_runtime; +extern crate serde_json; #[macro_use] extern crate log; @@ -30,130 +30,151 @@ extern crate fake_fetch; pub extern crate fetch; -use std::cmp; -use std::fmt; -use std::io; -use std::str; +use std::{cmp, fmt, io, str}; use fetch::{Client as FetchClient, Fetch}; -use futures::{Future, Stream}; -use futures::future::{self, Either}; -use serde_json::Value; +use futures::{ + future::{self, Either}, + Future, Stream, +}; use parity_runtime::Executor; +use serde_json::Value; /// Current ETH price information. #[derive(Debug)] pub struct PriceInfo { - /// Current ETH price in USD. - pub ethusd: f32, + /// Current ETH price in USD. + pub ethusd: f32, } /// Price info error. #[derive(Debug)] pub enum Error { - /// The API returned an unexpected status code. - StatusCode(&'static str), - /// The API returned an unexpected status content. - UnexpectedResponse(Option), - /// There was an error when trying to reach the API. - Fetch(fetch::Error), - /// IO error when reading API response. - Io(io::Error), + /// The API returned an unexpected status code. + StatusCode(&'static str), + /// The API returned an unexpected status content. + UnexpectedResponse(Option), + /// There was an error when trying to reach the API. + Fetch(fetch::Error), + /// IO error when reading API response. + Io(io::Error), } impl From for Error { - fn from(err: io::Error) -> Self { Error::Io(err) } + fn from(err: io::Error) -> Self { + Error::Io(err) + } } impl From for Error { - fn from(err: fetch::Error) -> Self { Error::Fetch(err) } + fn from(err: fetch::Error) -> Self { + Error::Fetch(err) + } } /// A client to get the current ETH price using an external API. pub struct Client { - pool: Executor, - api_endpoint: String, - fetch: F, + pool: Executor, + api_endpoint: String, + fetch: F, } impl fmt::Debug for Client { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("price_info::Client") - .field("api_endpoint", &self.api_endpoint) - .finish() - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("price_info::Client") + .field("api_endpoint", &self.api_endpoint) + .finish() + } } impl cmp::PartialEq for Client { - fn eq(&self, other: &Client) -> bool { - self.api_endpoint == other.api_endpoint - } + fn eq(&self, other: &Client) -> bool { + self.api_endpoint == other.api_endpoint + } } impl Client { - /// Creates a new instance of the `Client` given a `fetch::Client`. - pub fn new(fetch: F, pool: Executor, api_endpoint: String) -> Client { - Client { pool, api_endpoint, fetch } - } + /// Creates a new instance of the `Client` given a `fetch::Client`. + pub fn new(fetch: F, pool: Executor, api_endpoint: String) -> Client { + Client { + pool, + api_endpoint, + fetch, + } + } - /// Gets the current ETH price and calls `set_price` with the result. - pub fn get(&self, set_price: G) { - let future = self.fetch.get(&self.api_endpoint, fetch::Abort::default()) - .from_err() - .and_then(|response| { - if !response.is_success() { - let s = Error::StatusCode(response.status().canonical_reason().unwrap_or("unknown")); - return Either::A(future::err(s)); - } - Either::B(response.concat2().from_err()) - }) - .and_then(move |body| { - let body_str = str::from_utf8(&body).ok(); - let value: Option = body_str.and_then(|s| serde_json::from_str(s).ok()); + /// Gets the current ETH price and calls `set_price` with the result. + pub fn get(&self, set_price: G) { + let future = self + .fetch + .get(&self.api_endpoint, fetch::Abort::default()) + .from_err() + .and_then(|response| { + if !response.is_success() { + let s = Error::StatusCode( + response.status().canonical_reason().unwrap_or("unknown"), + ); + return Either::A(future::err(s)); + } + Either::B(response.concat2().from_err()) + }) + .and_then(move |body| { + let body_str = str::from_utf8(&body).ok(); + let value: Option = body_str.and_then(|s| serde_json::from_str(s).ok()); - let ethusd = value - .as_ref() - .and_then(|value| value.pointer("/result/ethusd")) - .and_then(|obj| obj.as_str()) - .and_then(|s| s.parse().ok()); + let ethusd = value + .as_ref() + .and_then(|value| value.pointer("/result/ethusd")) + .and_then(|obj| obj.as_str()) + .and_then(|s| s.parse().ok()); - match ethusd { - Some(ethusd) => { - set_price(PriceInfo { ethusd }); - Ok(()) - }, - None => Err(Error::UnexpectedResponse(body_str.map(From::from))), - } - }) - .map_err(|err| { - warn!("Failed to auto-update latest ETH price: {:?}", err); - }); - self.pool.spawn(future) - } + match ethusd { + Some(ethusd) => { + set_price(PriceInfo { ethusd }); + Ok(()) + } + None => Err(Error::UnexpectedResponse(body_str.map(From::from))), + } + }) + .map_err(|err| { + warn!("Failed to auto-update latest ETH price: {:?}", err); + }); + self.pool.spawn(future) + } } #[cfg(test)] mod test { - use std::sync::Arc; - use parity_runtime::{Runtime, Executor}; - use Client; - use std::sync::atomic::{AtomicBool, Ordering}; - use fake_fetch::FakeFetch; + use fake_fetch::FakeFetch; + use parity_runtime::{Executor, Runtime}; + use std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }; + use Client; - fn price_info_ok(response: &str, executor: Executor) -> Client> { - Client::new(FakeFetch::new(Some(response.to_owned())), executor, "fake_endpoint".to_owned()) - } + fn price_info_ok(response: &str, executor: Executor) -> Client> { + Client::new( + FakeFetch::new(Some(response.to_owned())), + executor, + "fake_endpoint".to_owned(), + ) + } - fn price_info_not_found(executor: Executor) -> Client> { - Client::new(FakeFetch::new(None::), executor, "fake_endpoint".to_owned()) - } + fn price_info_not_found(executor: Executor) -> Client> { + Client::new( + FakeFetch::new(None::), + executor, + "fake_endpoint".to_owned(), + ) + } - #[test] - fn should_get_price_info() { - let runtime = Runtime::with_thread_count(1); + #[test] + fn should_get_price_info() { + let runtime = Runtime::with_thread_count(1); - // given - let response = r#"{ + // given + let response = r#"{ "status": "1", "message": "OK", "result": { @@ -164,51 +185,50 @@ mod test { } }"#; - let price_info = price_info_ok(response, runtime.executor()); + let price_info = price_info_ok(response, runtime.executor()); - // when - price_info.get(|price| { + // when + price_info.get(|price| { + // then + assert_eq!(price.ethusd, 209.55); + }); + } - // then - assert_eq!(price.ethusd, 209.55); - }); - } + #[test] + fn should_not_call_set_price_if_response_is_malformed() { + let runtime = Runtime::with_thread_count(1); - #[test] - fn should_not_call_set_price_if_response_is_malformed() { - let runtime = Runtime::with_thread_count(1); + // given + let response = "{}"; - // given - let response = "{}"; + let price_info = price_info_ok(response, runtime.executor()); + let b = Arc::new(AtomicBool::new(false)); - let price_info = price_info_ok(response, runtime.executor()); - let b = Arc::new(AtomicBool::new(false)); + // when + let bb = b.clone(); + price_info.get(move |_| { + bb.store(true, Ordering::Relaxed); + }); - // when - let bb = b.clone(); - price_info.get(move |_| { - bb.store(true, Ordering::Relaxed); - }); + // then + assert_eq!(b.load(Ordering::Relaxed), false); + } - // then - assert_eq!(b.load(Ordering::Relaxed), false); - } + #[test] + fn should_not_call_set_price_if_response_is_invalid() { + let runtime = Runtime::with_thread_count(1); - #[test] - fn should_not_call_set_price_if_response_is_invalid() { - let runtime = Runtime::with_thread_count(1); + // given + let price_info = price_info_not_found(runtime.executor()); + let b = Arc::new(AtomicBool::new(false)); - // given - let price_info = price_info_not_found(runtime.executor()); - let b = Arc::new(AtomicBool::new(false)); + // when + let bb = b.clone(); + price_info.get(move |_| { + bb.store(true, Ordering::Relaxed); + }); - // when - let bb = b.clone(); - price_info.get(move |_| { - bb.store(true, Ordering::Relaxed); - }); - - // then - assert_eq!(b.load(Ordering::Relaxed), false); - } + // then + assert_eq!(b.load(Ordering::Relaxed), false); + } } diff --git a/miner/src/external.rs b/miner/src/external.rs index f68e065ab..3f2f5754f 100644 --- a/miner/src/external.rs +++ b/miner/src/external.rs @@ -16,97 +16,105 @@ //! External Miner hashrate tracker. -use std::collections::HashMap; -use std::sync::Arc; -use std::time::{Instant, Duration}; use ethereum_types::{H256, U256}; use parking_lot::Mutex; +use std::{ + collections::HashMap, + sync::Arc, + time::{Duration, Instant}, +}; /// External miner interface. pub trait ExternalMinerService: Send + Sync { - /// Submit hashrate for given miner. - fn submit_hashrate(&self, hashrate: U256, id: H256); + /// Submit hashrate for given miner. + fn submit_hashrate(&self, hashrate: U256, id: H256); - /// Total hashrate. - fn hashrate(&self) -> U256; + /// Total hashrate. + fn hashrate(&self) -> U256; } /// External Miner. pub struct ExternalMiner { - hashrates: Arc>>, + hashrates: Arc>>, } impl Default for ExternalMiner { - fn default() -> Self { - ExternalMiner { - hashrates: Arc::new(Mutex::new(HashMap::new())), - } - } + fn default() -> Self { + ExternalMiner { + hashrates: Arc::new(Mutex::new(HashMap::new())), + } + } } impl ExternalMiner { - /// Creates new external miner with prefilled hashrates. - pub fn new(hashrates: Arc>>) -> Self { - ExternalMiner { - hashrates: hashrates, - } - } + /// Creates new external miner with prefilled hashrates. + pub fn new(hashrates: Arc>>) -> Self { + ExternalMiner { + hashrates: hashrates, + } + } } const ENTRY_TIMEOUT: Duration = Duration::from_secs(2); impl ExternalMinerService for ExternalMiner { - fn submit_hashrate(&self, hashrate: U256, id: H256) { - self.hashrates.lock().insert(id, (Instant::now() + ENTRY_TIMEOUT, hashrate)); - } + fn submit_hashrate(&self, hashrate: U256, id: H256) { + self.hashrates + .lock() + .insert(id, (Instant::now() + ENTRY_TIMEOUT, hashrate)); + } - fn hashrate(&self) -> U256 { - let mut hashrates = self.hashrates.lock(); - let h = hashrates.drain().filter(|&(_, (t, _))| t > Instant::now()).collect(); - *hashrates = h; - hashrates.iter().fold(U256::from(0), |sum, (_, &(_, v))| sum + v) - } + fn hashrate(&self) -> U256 { + let mut hashrates = self.hashrates.lock(); + let h = hashrates + .drain() + .filter(|&(_, (t, _))| t > Instant::now()) + .collect(); + *hashrates = h; + hashrates + .iter() + .fold(U256::from(0), |sum, (_, &(_, v))| sum + v) + } } #[cfg(test)] mod tests { - use super::*; - use std::thread::sleep; - use std::time::Duration; - use ethereum_types::{H256, U256}; + use super::*; + use ethereum_types::{H256, U256}; + use std::{thread::sleep, time::Duration}; - fn miner() -> ExternalMiner { - ExternalMiner::default() - } + fn miner() -> ExternalMiner { + ExternalMiner::default() + } - #[test] - fn it_should_forget_old_hashrates() { - // given - let m = miner(); - assert_eq!(m.hashrate(), U256::from(0)); - m.submit_hashrate(U256::from(10), H256::from(1)); - assert_eq!(m.hashrate(), U256::from(10)); + #[test] + fn it_should_forget_old_hashrates() { + // given + let m = miner(); + assert_eq!(m.hashrate(), U256::from(0)); + m.submit_hashrate(U256::from(10), H256::from(1)); + assert_eq!(m.hashrate(), U256::from(10)); - // when - sleep(Duration::from_secs(3)); + // when + sleep(Duration::from_secs(3)); - // then - assert_eq!(m.hashrate(), U256::from(0)); - } + // then + assert_eq!(m.hashrate(), U256::from(0)); + } - #[test] - fn should_sum_up_hashrate() { - // given - let m = miner(); - assert_eq!(m.hashrate(), U256::from(0)); - m.submit_hashrate(U256::from(10), H256::from(1)); - assert_eq!(m.hashrate(), U256::from(10)); + #[test] + fn should_sum_up_hashrate() { + // given + let m = miner(); + assert_eq!(m.hashrate(), U256::from(0)); + m.submit_hashrate(U256::from(10), H256::from(1)); + assert_eq!(m.hashrate(), U256::from(10)); - // when - m.submit_hashrate(U256::from(15), H256::from(1)); - m.submit_hashrate(U256::from(20), H256::from(2)); + // when + m.submit_hashrate(U256::from(15), H256::from(1)); + m.submit_hashrate(U256::from(20), H256::from(2)); - // then - assert_eq!(m.hashrate(), U256::from(35)); - } + // then + assert_eq!(m.hashrate(), U256::from(35)); + } } diff --git a/miner/src/gas_price_calibrator.rs b/miner/src/gas_price_calibrator.rs index e91b880fa..983f43868 100644 --- a/miner/src/gas_price_calibrator.rs +++ b/miner/src/gas_price_calibrator.rs @@ -16,48 +16,52 @@ //! Auto-updates minimal gas price requirement from a price-info source. -use std::time::{Instant, Duration}; +use std::time::{Duration, Instant}; use ansi_term::Colour; use ethereum_types::U256; use parity_runtime::Executor; -use price_info::{Client as PriceInfoClient, PriceInfo}; -use price_info::fetch::Client as FetchClient; +use price_info::{fetch::Client as FetchClient, Client as PriceInfoClient, PriceInfo}; /// Options for the dynamic gas price recalibrator. #[derive(Debug, PartialEq)] pub struct GasPriceCalibratorOptions { - /// Base transaction price to match against. - pub usd_per_tx: f32, - /// How frequently we should recalibrate. - pub recalibration_period: Duration, + /// Base transaction price to match against. + pub usd_per_tx: f32, + /// How frequently we should recalibrate. + pub recalibration_period: Duration, } /// The gas price validator variant for a `GasPricer`. #[derive(Debug, PartialEq)] pub struct GasPriceCalibrator { - options: GasPriceCalibratorOptions, - next_calibration: Instant, - price_info: PriceInfoClient, + options: GasPriceCalibratorOptions, + next_calibration: Instant, + price_info: PriceInfoClient, } impl GasPriceCalibrator { - /// Create a new gas price calibrator. - pub fn new(options: GasPriceCalibratorOptions, fetch: FetchClient, p: Executor, api_endpoint: String) -> GasPriceCalibrator { - GasPriceCalibrator { - options: options, - next_calibration: Instant::now(), - price_info: PriceInfoClient::new(fetch, p, api_endpoint), - } - } + /// Create a new gas price calibrator. + pub fn new( + options: GasPriceCalibratorOptions, + fetch: FetchClient, + p: Executor, + api_endpoint: String, + ) -> GasPriceCalibrator { + GasPriceCalibrator { + options: options, + next_calibration: Instant::now(), + price_info: PriceInfoClient::new(fetch, p, api_endpoint), + } + } - pub(crate) fn recalibrate(&mut self, set_price: F) { - trace!(target: "miner", "Recalibrating {:?} versus {:?}", Instant::now(), self.next_calibration); - if Instant::now() >= self.next_calibration { - let usd_per_tx = self.options.usd_per_tx; - trace!(target: "miner", "Getting price info"); + pub(crate) fn recalibrate(&mut self, set_price: F) { + trace!(target: "miner", "Recalibrating {:?} versus {:?}", Instant::now(), self.next_calibration); + if Instant::now() >= self.next_calibration { + let usd_per_tx = self.options.usd_per_tx; + trace!(target: "miner", "Getting price info"); - self.price_info.get(move |price: PriceInfo| { + self.price_info.get(move |price: PriceInfo| { trace!(target: "miner", "Price info arrived: {:?}", price); let usd_per_eth = price.ethusd; let wei_per_usd: f32 = 1.0e18 / usd_per_eth; @@ -67,7 +71,7 @@ impl GasPriceCalibrator { set_price(U256::from(wei_per_gas as u64)); }); - self.next_calibration = Instant::now() + self.options.recalibration_period; - } - } + self.next_calibration = Instant::now() + self.options.recalibration_period; + } + } } diff --git a/miner/src/gas_pricer.rs b/miner/src/gas_pricer.rs index c4e04442f..85f904552 100644 --- a/miner/src/gas_pricer.rs +++ b/miner/src/gas_pricer.rs @@ -23,31 +23,31 @@ use gas_price_calibrator::GasPriceCalibrator; /// Struct to look after updating the acceptable gas price of a miner. #[derive(Debug, PartialEq)] pub enum GasPricer { - /// A fixed gas price in terms of Wei - always the argument given. - Fixed(U256), - /// Gas price is calibrated according to a fixed amount of USD. - #[cfg(feature = "price-info")] - Calibrated(GasPriceCalibrator), + /// A fixed gas price in terms of Wei - always the argument given. + Fixed(U256), + /// Gas price is calibrated according to a fixed amount of USD. + #[cfg(feature = "price-info")] + Calibrated(GasPriceCalibrator), } impl GasPricer { - /// Create a new Calibrated `GasPricer`. - #[cfg(feature = "price-info")] - pub fn new_calibrated(calibrator: GasPriceCalibrator) -> GasPricer { - GasPricer::Calibrated(calibrator) - } + /// Create a new Calibrated `GasPricer`. + #[cfg(feature = "price-info")] + pub fn new_calibrated(calibrator: GasPriceCalibrator) -> GasPricer { + GasPricer::Calibrated(calibrator) + } - /// Create a new Fixed `GasPricer`. - pub fn new_fixed(gas_price: U256) -> GasPricer { - GasPricer::Fixed(gas_price) - } + /// Create a new Fixed `GasPricer`. + pub fn new_fixed(gas_price: U256) -> GasPricer { + GasPricer::Fixed(gas_price) + } - /// Recalibrate current gas price. - pub fn recalibrate(&mut self, set_price: F) { - match *self { - GasPricer::Fixed(ref curr) => set_price(curr.clone()), - #[cfg(feature = "price-info")] - GasPricer::Calibrated(ref mut cal) => cal.recalibrate(set_price), - } - } + /// Recalibrate current gas price. + pub fn recalibrate(&mut self, set_price: F) { + match *self { + GasPricer::Fixed(ref curr) => set_price(curr.clone()), + #[cfg(feature = "price-info")] + GasPricer::Calibrated(ref mut cal) => cal.recalibrate(set_price), + } + } } diff --git a/miner/src/lib.rs b/miner/src/lib.rs index 55091093a..f3f7edebd 100644 --- a/miner/src/lib.rs +++ b/miner/src/lib.rs @@ -47,11 +47,11 @@ extern crate log; extern crate trace_time; #[cfg(test)] -extern crate rustc_hex; +extern crate env_logger; #[cfg(test)] extern crate ethkey; #[cfg(test)] -extern crate env_logger; +extern crate rustc_hex; pub mod external; #[cfg(feature = "price-info")] diff --git a/miner/src/local_accounts.rs b/miner/src/local_accounts.rs index 23bcf8144..8c600b8e5 100644 --- a/miner/src/local_accounts.rs +++ b/miner/src/local_accounts.rs @@ -22,22 +22,22 @@ use ethereum_types::Address; /// Local accounts checker pub trait LocalAccounts: Send + Sync { - /// Returns true if given address should be considered local account. - fn is_local(&self, &Address) -> bool; + /// Returns true if given address should be considered local account. + fn is_local(&self, &Address) -> bool; } impl LocalAccounts for HashSet
{ - fn is_local(&self, address: &Address) -> bool { - self.contains(address) - } + fn is_local(&self, address: &Address) -> bool { + self.contains(address) + } } -impl LocalAccounts for (A, B) where - A: LocalAccounts, - B: LocalAccounts, +impl LocalAccounts for (A, B) +where + A: LocalAccounts, + B: LocalAccounts, { - fn is_local(&self, address: &Address) -> bool { - self.0.is_local(address) || self.1.is_local(address) - } + fn is_local(&self, address: &Address) -> bool { + self.0.is_local(address) || self.1.is_local(address) + } } - diff --git a/miner/src/pool/client.rs b/miner/src/pool/client.rs index 1579ba40d..eac206294 100644 --- a/miner/src/pool/client.rs +++ b/miner/src/pool/client.rs @@ -22,63 +22,69 @@ use std::fmt; -use ethereum_types::{U256, H256, H160 as Address}; +use ethereum_types::{H160 as Address, H256, U256}; use types::transaction; /// Account Details #[derive(Debug, Clone)] pub struct AccountDetails { - /// Current account nonce - pub nonce: U256, - /// Current account balance - pub balance: U256, - /// Is this account a local account? - pub is_local: bool, + /// Current account nonce + pub nonce: U256, + /// Current account balance + pub balance: U256, + /// Is this account a local account? + pub is_local: bool, } /// Transaction type #[derive(Debug, PartialEq)] pub enum TransactionType { - /// Regular transaction - Regular, - /// Service transaction (allowed by a contract to have gas_price=0) - Service, + /// Regular transaction + Regular, + /// Service transaction (allowed by a contract to have gas_price=0) + Service, } /// Verification client. pub trait Client: fmt::Debug + Sync { - /// Is transaction with given hash already in the blockchain? - fn transaction_already_included(&self, hash: &H256) -> bool; + /// Is transaction with given hash already in the blockchain? + fn transaction_already_included(&self, hash: &H256) -> bool; - /// Perform basic/cheap transaction verification. - /// - /// This should include all cheap checks that can be done before - /// actually checking the signature, like chain-replay protection. - /// - /// This method is currently used only for verifying local transactions. - fn verify_transaction_basic(&self, t: &transaction::UnverifiedTransaction) - -> Result<(), transaction::Error>; + /// Perform basic/cheap transaction verification. + /// + /// This should include all cheap checks that can be done before + /// actually checking the signature, like chain-replay protection. + /// + /// This method is currently used only for verifying local transactions. + fn verify_transaction_basic( + &self, + t: &transaction::UnverifiedTransaction, + ) -> Result<(), transaction::Error>; - /// Structurarily verify given transaction. - fn verify_transaction(&self, tx: transaction::UnverifiedTransaction) - -> Result; + /// Structurarily verify given transaction. + fn verify_transaction( + &self, + tx: transaction::UnverifiedTransaction, + ) -> Result; - /// Estimate minimal gas requirurement for given transaction. - fn required_gas(&self, tx: &transaction::Transaction) -> U256; + /// Estimate minimal gas requirurement for given transaction. + fn required_gas(&self, tx: &transaction::Transaction) -> U256; - /// Fetch account details for given sender. - fn account_details(&self, address: &Address) -> AccountDetails; + /// Fetch account details for given sender. + fn account_details(&self, address: &Address) -> AccountDetails; - /// Classify transaction (check if transaction is filtered by some contracts). - fn transaction_type(&self, tx: &transaction::SignedTransaction) -> TransactionType; + /// Classify transaction (check if transaction is filtered by some contracts). + fn transaction_type(&self, tx: &transaction::SignedTransaction) -> TransactionType; - /// Performs pre-validation of RLP decoded transaction - fn decode_transaction(&self, transaction: &[u8]) - -> Result; + /// Performs pre-validation of RLP decoded transaction + fn decode_transaction( + &self, + transaction: &[u8], + ) -> Result; } /// State nonce client pub trait NonceClient: fmt::Debug + Sync { - /// Fetch only account nonce for given sender. - fn account_nonce(&self, address: &Address) -> U256; + /// Fetch only account nonce for given sender. + fn account_nonce(&self, address: &Address) -> U256; } diff --git a/miner/src/pool/listener.rs b/miner/src/pool/listener.rs index 67034aa52..7d8b9c86b 100644 --- a/miner/src/pool/listener.rs +++ b/miner/src/pool/listener.rs @@ -16,8 +16,7 @@ //! Notifier for new transaction hashes. -use std::fmt; -use std::sync::Arc; +use std::{fmt, sync::Arc}; use ethereum_types::H256; use txpool::{self, VerifiedTransaction}; @@ -29,43 +28,43 @@ type Listener = Box; /// Manages notifications to pending transaction listeners. #[derive(Default)] pub struct Notifier { - listeners: Vec, - pending: Vec, + listeners: Vec, + pending: Vec, } impl fmt::Debug for Notifier { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("Notifier") - .field("listeners", &self.listeners.len()) - .field("pending", &self.pending) - .finish() - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Notifier") + .field("listeners", &self.listeners.len()) + .field("pending", &self.pending) + .finish() + } } impl Notifier { - /// Add new listener to receive notifications. - pub fn add(&mut self, f: Listener) { - self.listeners.push(f) - } + /// Add new listener to receive notifications. + pub fn add(&mut self, f: Listener) { + self.listeners.push(f) + } - /// Notify listeners about all currently pending transactions. - pub fn notify(&mut self) { - if self.pending.is_empty() { - return; - } + /// Notify listeners about all currently pending transactions. + pub fn notify(&mut self) { + if self.pending.is_empty() { + return; + } - for l in &self.listeners { - (l)(&self.pending); - } + for l in &self.listeners { + (l)(&self.pending); + } - self.pending.clear(); - } + self.pending.clear(); + } } impl txpool::Listener for Notifier { - fn added(&mut self, tx: &Arc, _old: Option<&Arc>) { - self.pending.push(*tx.hash()); - } + fn added(&mut self, tx: &Arc, _old: Option<&Arc>) { + self.pending.push(*tx.hash()); + } } /// Transaction pool logger. @@ -73,91 +72,102 @@ impl txpool::Listener for Notifier { pub struct Logger; impl txpool::Listener for Logger { - fn added(&mut self, tx: &Arc, old: Option<&Arc>) { - debug!(target: "txqueue", "[{:?}] Added to the pool.", tx.hash()); - debug!( - target: "txqueue", - "[{hash:?}] Sender: {sender}, nonce: {nonce}, gasPrice: {gas_price}, gas: {gas}, value: {value}, dataLen: {data}))", - hash = tx.hash(), - sender = tx.sender(), - nonce = tx.signed().nonce, - gas_price = tx.signed().gas_price, - gas = tx.signed().gas, - value = tx.signed().value, - data = tx.signed().data.len(), - ); + fn added(&mut self, tx: &Arc, old: Option<&Arc>) { + debug!(target: "txqueue", "[{:?}] Added to the pool.", tx.hash()); + debug!( + target: "txqueue", + "[{hash:?}] Sender: {sender}, nonce: {nonce}, gasPrice: {gas_price}, gas: {gas}, value: {value}, dataLen: {data}))", + hash = tx.hash(), + sender = tx.sender(), + nonce = tx.signed().nonce, + gas_price = tx.signed().gas_price, + gas = tx.signed().gas, + value = tx.signed().value, + data = tx.signed().data.len(), + ); - if let Some(old) = old { - debug!(target: "txqueue", "[{:?}] Dropped. Replaced by [{:?}]", old.hash(), tx.hash()); - } - } + if let Some(old) = old { + debug!(target: "txqueue", "[{:?}] Dropped. Replaced by [{:?}]", old.hash(), tx.hash()); + } + } - fn rejected(&mut self, _tx: &Arc, reason: &txpool::Error) { - trace!(target: "txqueue", "Rejected {}.", reason); - } + fn rejected( + &mut self, + _tx: &Arc, + reason: &txpool::Error, + ) { + trace!(target: "txqueue", "Rejected {}.", reason); + } - fn dropped(&mut self, tx: &Arc, new: Option<&Transaction>) { - match new { - Some(new) => debug!(target: "txqueue", "[{:?}] Pushed out by [{:?}]", tx.hash(), new.hash()), - None => debug!(target: "txqueue", "[{:?}] Dropped.", tx.hash()), - } - } + fn dropped(&mut self, tx: &Arc, new: Option<&Transaction>) { + match new { + Some(new) => { + debug!(target: "txqueue", "[{:?}] Pushed out by [{:?}]", tx.hash(), new.hash()) + } + None => debug!(target: "txqueue", "[{:?}] Dropped.", tx.hash()), + } + } - fn invalid(&mut self, tx: &Arc) { - debug!(target: "txqueue", "[{:?}] Marked as invalid by executor.", tx.hash()); - } + fn invalid(&mut self, tx: &Arc) { + debug!(target: "txqueue", "[{:?}] Marked as invalid by executor.", tx.hash()); + } - fn canceled(&mut self, tx: &Arc) { - debug!(target: "txqueue", "[{:?}] Canceled by the user.", tx.hash()); - } + fn canceled(&mut self, tx: &Arc) { + debug!(target: "txqueue", "[{:?}] Canceled by the user.", tx.hash()); + } - fn culled(&mut self, tx: &Arc) { - debug!(target: "txqueue", "[{:?}] Culled or mined.", tx.hash()); - } + fn culled(&mut self, tx: &Arc) { + debug!(target: "txqueue", "[{:?}] Culled or mined.", tx.hash()); + } } #[cfg(test)] mod tests { - use super::*; - use parking_lot::Mutex; - use types::transaction; - use txpool::Listener; + use super::*; + use parking_lot::Mutex; + use txpool::Listener; + use types::transaction; - #[test] - fn should_notify_listeners() { - // given - let received = Arc::new(Mutex::new(vec![])); - let r = received.clone(); - let listener = Box::new(move |hashes: &[H256]| { - *r.lock() = hashes.iter().map(|x| *x).collect(); - }); + #[test] + fn should_notify_listeners() { + // given + let received = Arc::new(Mutex::new(vec![])); + let r = received.clone(); + let listener = Box::new(move |hashes: &[H256]| { + *r.lock() = hashes.iter().map(|x| *x).collect(); + }); - let mut tx_listener = Notifier::default(); - tx_listener.add(listener); + let mut tx_listener = Notifier::default(); + tx_listener.add(listener); - // when - let tx = new_tx(); - tx_listener.added(&tx, None); - assert_eq!(*received.lock(), vec![]); + // when + let tx = new_tx(); + tx_listener.added(&tx, None); + assert_eq!(*received.lock(), vec![]); - // then - tx_listener.notify(); - assert_eq!( - *received.lock(), - vec!["13aff4201ac1dc49daf6a7cf07b558ed956511acbaabf9502bdacc353953766d".parse().unwrap()] - ); - } + // then + tx_listener.notify(); + assert_eq!( + *received.lock(), + vec![ + "13aff4201ac1dc49daf6a7cf07b558ed956511acbaabf9502bdacc353953766d" + .parse() + .unwrap() + ] + ); + } - fn new_tx() -> Arc { - let signed = transaction::Transaction { - action: transaction::Action::Create, - data: vec![1, 2, 3], - nonce: 5.into(), - gas: 21_000.into(), - gas_price: 5.into(), - value: 0.into(), - }.fake_sign(5.into()); + fn new_tx() -> Arc { + let signed = transaction::Transaction { + action: transaction::Action::Create, + data: vec![1, 2, 3], + nonce: 5.into(), + gas: 21_000.into(), + gas_price: 5.into(), + value: 0.into(), + } + .fake_sign(5.into()); - Arc::new(Transaction::from_pending_block_transaction(signed)) - } + Arc::new(Transaction::from_pending_block_transaction(signed)) + } } diff --git a/miner/src/pool/local_transactions.rs b/miner/src/pool/local_transactions.rs index 346877d03..3da607dff 100644 --- a/miner/src/pool/local_transactions.rs +++ b/miner/src/pool/local_transactions.rs @@ -20,7 +20,7 @@ use std::{fmt, sync::Arc}; use ethereum_types::H256; use linked_hash_map::LinkedHashMap; -use pool::{VerifiedTransaction as Transaction, ScoredTransaction}; +use pool::{ScoredTransaction, VerifiedTransaction as Transaction}; use txpool::{self, VerifiedTransaction}; /// Status of local transaction. @@ -28,295 +28,331 @@ use txpool::{self, VerifiedTransaction}; /// or gives a reason why the transaction was removed. #[derive(Debug, PartialEq, Clone)] pub enum Status { - /// The transaction is currently in the transaction queue. - Pending(Arc), - /// Transaction is already mined. - Mined(Arc), - /// Transaction didn't get into any block, but some other tx with the same nonce got. - Culled(Arc), - /// Transaction is dropped because of limit - Dropped(Arc), - /// Replaced because of higher gas price of another transaction. - Replaced { - /// Replaced transaction - old: Arc, - /// Transaction that replaced this one. - new: Arc, - }, - /// Transaction was never accepted to the queue. - /// It means that it was too cheap to replace any transaction already in the pool. - Rejected(Arc, String), - /// Transaction is invalid. - Invalid(Arc), - /// Transaction was canceled. - Canceled(Arc), + /// The transaction is currently in the transaction queue. + Pending(Arc), + /// Transaction is already mined. + Mined(Arc), + /// Transaction didn't get into any block, but some other tx with the same nonce got. + Culled(Arc), + /// Transaction is dropped because of limit + Dropped(Arc), + /// Replaced because of higher gas price of another transaction. + Replaced { + /// Replaced transaction + old: Arc, + /// Transaction that replaced this one. + new: Arc, + }, + /// Transaction was never accepted to the queue. + /// It means that it was too cheap to replace any transaction already in the pool. + Rejected(Arc, String), + /// Transaction is invalid. + Invalid(Arc), + /// Transaction was canceled. + Canceled(Arc), } impl Status { - fn is_pending(&self) -> bool { - match *self { - Status::Pending(_) => true, - _ => false, - } - } + fn is_pending(&self) -> bool { + match *self { + Status::Pending(_) => true, + _ => false, + } + } } /// Keeps track of local transactions that are in the queue or were mined/dropped recently. pub struct LocalTransactionsList { - max_old: usize, - transactions: LinkedHashMap, - pending: usize, - in_chain: Option bool + Send + Sync>>, + max_old: usize, + transactions: LinkedHashMap, + pending: usize, + in_chain: Option bool + Send + Sync>>, } impl fmt::Debug for LocalTransactionsList { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("LocalTransactionsList") - .field("max_old", &self.max_old) - .field("transactions", &self.transactions) - .field("pending", &self.pending) - .field("in_chain", &self.in_chain.is_some()) - .finish() - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("LocalTransactionsList") + .field("max_old", &self.max_old) + .field("transactions", &self.transactions) + .field("pending", &self.pending) + .field("in_chain", &self.in_chain.is_some()) + .finish() + } } impl Default for LocalTransactionsList { - fn default() -> Self { - Self::new(10) - } + fn default() -> Self { + Self::new(10) + } } impl LocalTransactionsList { - /// Create a new list of local transactions. - pub fn new(max_old: usize) -> Self { - LocalTransactionsList { - max_old, - transactions: Default::default(), - pending: 0, - in_chain: None, - } - } + /// Create a new list of local transactions. + pub fn new(max_old: usize) -> Self { + LocalTransactionsList { + max_old, + transactions: Default::default(), + pending: 0, + in_chain: None, + } + } - /// Set blockchain checker. - /// - /// The function should return true if transaction is included in chain. - pub fn set_in_chain_checker(&mut self, checker: T) where - T: Into>, - F: Fn(&H256) -> bool + Send + Sync + 'static - { - self.in_chain = checker.into().map(|f| Box::new(f) as _); - } + /// Set blockchain checker. + /// + /// The function should return true if transaction is included in chain. + pub fn set_in_chain_checker(&mut self, checker: T) + where + T: Into>, + F: Fn(&H256) -> bool + Send + Sync + 'static, + { + self.in_chain = checker.into().map(|f| Box::new(f) as _); + } - /// Returns true if the transaction is already in local transactions. - pub fn contains(&self, hash: &H256) -> bool { - self.transactions.contains_key(hash) - } + /// Returns true if the transaction is already in local transactions. + pub fn contains(&self, hash: &H256) -> bool { + self.transactions.contains_key(hash) + } - /// Return a map of all currently stored transactions. - pub fn all_transactions(&self) -> &LinkedHashMap { - &self.transactions - } + /// Return a map of all currently stored transactions. + pub fn all_transactions(&self) -> &LinkedHashMap { + &self.transactions + } - /// Returns true if there are pending local transactions. - pub fn has_pending(&self) -> bool { - self.pending > 0 - } + /// Returns true if there are pending local transactions. + pub fn has_pending(&self) -> bool { + self.pending > 0 + } - fn clear_old(&mut self) { - let number_of_old = self.transactions.len() - self.pending; - if self.max_old >= number_of_old { - return; - } + fn clear_old(&mut self) { + let number_of_old = self.transactions.len() - self.pending; + if self.max_old >= number_of_old { + return; + } - let to_remove: Vec<_> = self.transactions - .iter() - .filter(|&(_, status)| !status.is_pending()) - .map(|(hash, _)| *hash) - .take(number_of_old - self.max_old) - .collect(); + let to_remove: Vec<_> = self + .transactions + .iter() + .filter(|&(_, status)| !status.is_pending()) + .map(|(hash, _)| *hash) + .take(number_of_old - self.max_old) + .collect(); - for hash in to_remove { - self.transactions.remove(&hash); - } - } + for hash in to_remove { + self.transactions.remove(&hash); + } + } - fn insert(&mut self, hash: H256, status: Status) { - let result = self.transactions.insert(hash, status); - if let Some(old) = result { - if old.is_pending() { - self.pending -= 1; - } - } - } + fn insert(&mut self, hash: H256, status: Status) { + let result = self.transactions.insert(hash, status); + if let Some(old) = result { + if old.is_pending() { + self.pending -= 1; + } + } + } } impl txpool::Listener for LocalTransactionsList { - fn added(&mut self, tx: &Arc, old: Option<&Arc>) { - if !tx.priority().is_local() { - return; - } + fn added(&mut self, tx: &Arc, old: Option<&Arc>) { + if !tx.priority().is_local() { + return; + } - debug!(target: "own_tx", "Imported to the pool (hash {:?})", tx.hash()); - self.clear_old(); - self.insert(*tx.hash(), Status::Pending(tx.clone())); - self.pending += 1; + debug!(target: "own_tx", "Imported to the pool (hash {:?})", tx.hash()); + self.clear_old(); + self.insert(*tx.hash(), Status::Pending(tx.clone())); + self.pending += 1; - if let Some(old) = old { - if self.transactions.contains_key(old.hash()) { - self.insert(*old.hash(), Status::Replaced { - old: old.clone(), - new: tx.clone(), - }); - } - } - } + if let Some(old) = old { + if self.transactions.contains_key(old.hash()) { + self.insert( + *old.hash(), + Status::Replaced { + old: old.clone(), + new: tx.clone(), + }, + ); + } + } + } - fn rejected(&mut self, tx: &Arc, reason: &txpool::Error) { - if !tx.priority().is_local() { - return; - } + fn rejected( + &mut self, + tx: &Arc, + reason: &txpool::Error, + ) { + if !tx.priority().is_local() { + return; + } - debug!(target: "own_tx", "Transaction rejected (hash {:?}). {}", tx.hash(), reason); - self.insert(*tx.hash(), Status::Rejected(tx.clone(), format!("{}", reason))); - self.clear_old(); - } + debug!(target: "own_tx", "Transaction rejected (hash {:?}). {}", tx.hash(), reason); + self.insert( + *tx.hash(), + Status::Rejected(tx.clone(), format!("{}", reason)), + ); + self.clear_old(); + } - fn dropped(&mut self, tx: &Arc, new: Option<&Transaction>) { - if !tx.priority().is_local() { - return; - } + fn dropped(&mut self, tx: &Arc, new: Option<&Transaction>) { + if !tx.priority().is_local() { + return; + } - match new { - Some(new) => warn!(target: "own_tx", "Transaction pushed out because of limit (hash {:?}, replacement: {:?})", tx.hash(), new.hash()), - None => warn!(target: "own_tx", "Transaction dropped because of limit (hash: {:?})", tx.hash()), - } - self.insert(*tx.hash(), Status::Dropped(tx.clone())); - self.clear_old(); - } + match new { + Some(new) => { + warn!(target: "own_tx", "Transaction pushed out because of limit (hash {:?}, replacement: {:?})", tx.hash(), new.hash()) + } + None => { + warn!(target: "own_tx", "Transaction dropped because of limit (hash: {:?})", tx.hash()) + } + } + self.insert(*tx.hash(), Status::Dropped(tx.clone())); + self.clear_old(); + } - fn invalid(&mut self, tx: &Arc) { - if !tx.priority().is_local() { - return; - } + fn invalid(&mut self, tx: &Arc) { + if !tx.priority().is_local() { + return; + } - warn!(target: "own_tx", "Transaction marked invalid (hash {:?})", tx.hash()); - self.insert(*tx.hash(), Status::Invalid(tx.clone())); - self.clear_old(); - } + warn!(target: "own_tx", "Transaction marked invalid (hash {:?})", tx.hash()); + self.insert(*tx.hash(), Status::Invalid(tx.clone())); + self.clear_old(); + } - fn canceled(&mut self, tx: &Arc) { - if !tx.priority().is_local() { - return; - } + fn canceled(&mut self, tx: &Arc) { + if !tx.priority().is_local() { + return; + } - warn!(target: "own_tx", "Transaction canceled (hash {:?})", tx.hash()); - self.insert(*tx.hash(), Status::Canceled(tx.clone())); - self.clear_old(); - } + warn!(target: "own_tx", "Transaction canceled (hash {:?})", tx.hash()); + self.insert(*tx.hash(), Status::Canceled(tx.clone())); + self.clear_old(); + } - fn culled(&mut self, tx: &Arc) { - if !tx.priority().is_local() { - return; - } + fn culled(&mut self, tx: &Arc) { + if !tx.priority().is_local() { + return; + } - let is_in_chain = self.in_chain.as_ref().map(|checker| checker(tx.hash())).unwrap_or(false); - if is_in_chain { - info!(target: "own_tx", "Transaction mined (hash {:?})", tx.hash()); - self.insert(*tx.hash(), Status::Mined(tx.clone())); - return; - } + let is_in_chain = self + .in_chain + .as_ref() + .map(|checker| checker(tx.hash())) + .unwrap_or(false); + if is_in_chain { + info!(target: "own_tx", "Transaction mined (hash {:?})", tx.hash()); + self.insert(*tx.hash(), Status::Mined(tx.clone())); + return; + } - info!(target: "own_tx", "Transaction culled (hash {:?})", tx.hash()); - self.insert(*tx.hash(), Status::Culled(tx.clone())); - } + info!(target: "own_tx", "Transaction culled (hash {:?})", tx.hash()); + self.insert(*tx.hash(), Status::Culled(tx.clone())); + } } #[cfg(test)] mod tests { - use super::*; - use ethereum_types::U256; - use ethkey::{Random, Generator}; - use types::transaction; - use txpool::Listener; + use super::*; + use ethereum_types::U256; + use ethkey::{Generator, Random}; + use txpool::Listener; + use types::transaction; - use pool; + use pool; - #[test] - fn should_add_transaction_as_pending() { - // given - let mut list = LocalTransactionsList::default(); - let tx1 = new_tx(10); - let tx2 = new_tx(20); + #[test] + fn should_add_transaction_as_pending() { + // given + let mut list = LocalTransactionsList::default(); + let tx1 = new_tx(10); + let tx2 = new_tx(20); - // when - list.added(&tx1, None); - list.added(&tx2, None); + // when + list.added(&tx1, None); + list.added(&tx2, None); - // then - assert!(list.contains(tx1.hash())); - assert!(list.contains(tx2.hash())); - let statuses = list.all_transactions().values().cloned().collect::>(); - assert_eq!(statuses, vec![Status::Pending(tx1), Status::Pending(tx2)]); - } + // then + assert!(list.contains(tx1.hash())); + assert!(list.contains(tx2.hash())); + let statuses = list + .all_transactions() + .values() + .cloned() + .collect::>(); + assert_eq!(statuses, vec![Status::Pending(tx1), Status::Pending(tx2)]); + } - #[test] - fn should_use_in_chain_checker_if_present() { - // given - let mut list = LocalTransactionsList::default(); - let tx1 = new_tx(10); - let tx2 = new_tx(20); - list.culled(&tx1); - list.culled(&tx2); - let statuses = list.all_transactions().values().cloned().collect::>(); - assert_eq!(statuses, vec![Status::Culled(tx1.clone()), Status::Culled(tx2.clone())]); + #[test] + fn should_use_in_chain_checker_if_present() { + // given + let mut list = LocalTransactionsList::default(); + let tx1 = new_tx(10); + let tx2 = new_tx(20); + list.culled(&tx1); + list.culled(&tx2); + let statuses = list + .all_transactions() + .values() + .cloned() + .collect::>(); + assert_eq!( + statuses, + vec![Status::Culled(tx1.clone()), Status::Culled(tx2.clone())] + ); - // when - list.set_in_chain_checker(|_: &_| true); - list.culled(&tx1); + // when + list.set_in_chain_checker(|_: &_| true); + list.culled(&tx1); - // then - let statuses = list.all_transactions().values().cloned().collect::>(); - assert_eq!(statuses, vec![Status::Culled(tx2), Status::Mined(tx1)]); - } + // then + let statuses = list + .all_transactions() + .values() + .cloned() + .collect::>(); + assert_eq!(statuses, vec![Status::Culled(tx2), Status::Mined(tx1)]); + } - #[test] - fn should_clear_old_transactions() { - // given - let mut list = LocalTransactionsList::new(1); - let tx1 = new_tx(10); - let tx2 = new_tx(50); - let tx3 = new_tx(51); + #[test] + fn should_clear_old_transactions() { + // given + let mut list = LocalTransactionsList::new(1); + let tx1 = new_tx(10); + let tx2 = new_tx(50); + let tx3 = new_tx(51); - list.added(&tx1, None); - list.invalid(&tx1); - list.dropped(&tx2, None); - assert!(!list.contains(tx1.hash())); - assert!(list.contains(tx2.hash())); - assert!(!list.contains(tx3.hash())); + list.added(&tx1, None); + list.invalid(&tx1); + list.dropped(&tx2, None); + assert!(!list.contains(tx1.hash())); + assert!(list.contains(tx2.hash())); + assert!(!list.contains(tx3.hash())); - // when - list.added(&tx3, Some(&tx1)); + // when + list.added(&tx3, Some(&tx1)); - // then - assert!(!list.contains(tx1.hash())); - assert!(list.contains(tx2.hash())); - assert!(list.contains(tx3.hash())); - } + // then + assert!(!list.contains(tx1.hash())); + assert!(list.contains(tx2.hash())); + assert!(list.contains(tx3.hash())); + } - fn new_tx>(nonce: T) -> Arc { - let keypair = Random.generate().unwrap(); - let signed = transaction::Transaction { - action: transaction::Action::Create, - value: U256::from(100), - data: Default::default(), - gas: U256::from(10), - gas_price: U256::from(1245), - nonce: nonce.into(), - }.sign(keypair.secret(), None); + fn new_tx>(nonce: T) -> Arc { + let keypair = Random.generate().unwrap(); + let signed = transaction::Transaction { + action: transaction::Action::Create, + value: U256::from(100), + data: Default::default(), + gas: U256::from(10), + gas_price: U256::from(1245), + nonce: nonce.into(), + } + .sign(keypair.secret(), None); - let mut tx = Transaction::from_pending_block_transaction(signed); - tx.priority = pool::Priority::Local; + let mut tx = Transaction::from_pending_block_transaction(signed); + tx.priority = pool::Priority::Local; - Arc::new(tx) - } + Arc::new(tx) + } } diff --git a/miner/src/pool/mod.rs b/miner/src/pool/mod.rs index 40a226d9f..5fe1a0233 100644 --- a/miner/src/pool/mod.rs +++ b/miner/src/pool/mod.rs @@ -16,10 +16,10 @@ //! Transaction Pool -use ethereum_types::{U256, H256, Address}; +use ethereum_types::{Address, H256, U256}; use heapsize::HeapSizeOf; -use types::transaction; use txpool; +use types::transaction; mod listener; mod queue; @@ -34,168 +34,169 @@ pub mod verifier; #[cfg(test)] mod tests; -pub use self::queue::{TransactionQueue, Status as QueueStatus}; -pub use self::txpool::{VerifiedTransaction as PoolVerifiedTransaction, Options}; +pub use self::{ + queue::{Status as QueueStatus, TransactionQueue}, + txpool::{Options, VerifiedTransaction as PoolVerifiedTransaction}, +}; /// How to prioritize transactions in the pool /// /// TODO [ToDr] Implement more strategies. #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum PrioritizationStrategy { - /// Simple gas-price based prioritization. - GasPriceOnly, + /// Simple gas-price based prioritization. + GasPriceOnly, } /// Transaction ordering when requesting pending set. #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum PendingOrdering { - /// Get pending transactions ordered by their priority (potentially expensive) - Priority, - /// Get pending transactions without any care of particular ordering (cheaper). - Unordered, + /// Get pending transactions ordered by their priority (potentially expensive) + Priority, + /// Get pending transactions without any care of particular ordering (cheaper). + Unordered, } /// Pending set query settings #[derive(Debug, Clone)] pub struct PendingSettings { - /// Current block number (affects readiness of some transactions). - pub block_number: u64, - /// Current timestamp (affects readiness of some transactions). - pub current_timestamp: u64, - /// Nonce cap (for dust protection; EIP-168) - pub nonce_cap: Option, - /// Maximal number of transactions in pending the set. - pub max_len: usize, - /// Ordering of transactions. - pub ordering: PendingOrdering, + /// Current block number (affects readiness of some transactions). + pub block_number: u64, + /// Current timestamp (affects readiness of some transactions). + pub current_timestamp: u64, + /// Nonce cap (for dust protection; EIP-168) + pub nonce_cap: Option, + /// Maximal number of transactions in pending the set. + pub max_len: usize, + /// Ordering of transactions. + pub ordering: PendingOrdering, } impl PendingSettings { - /// Get all transactions (no cap or len limit) prioritized. - pub fn all_prioritized(block_number: u64, current_timestamp: u64) -> Self { - PendingSettings { - block_number, - current_timestamp, - nonce_cap: None, - max_len: usize::max_value(), - ordering: PendingOrdering::Priority, - } - } + /// Get all transactions (no cap or len limit) prioritized. + pub fn all_prioritized(block_number: u64, current_timestamp: u64) -> Self { + PendingSettings { + block_number, + current_timestamp, + nonce_cap: None, + max_len: usize::max_value(), + ordering: PendingOrdering::Priority, + } + } } /// Transaction priority. -#[derive(Debug, PartialEq, Eq, PartialOrd, Clone, Copy)] +#[derive(Debug, PartialEq, Eq, PartialOrd, Clone, Copy)] pub enum Priority { - /// Regular transactions received over the network. (no priority boost) - Regular, - /// Transactions from retracted blocks (medium priority) - /// - /// When block becomes non-canonical we re-import the transactions it contains - /// to the queue and boost their priority. - Retracted, - /// Local transactions (high priority) - /// - /// Transactions either from a local account or - /// submitted over local RPC connection via `eth_sendRawTransaction` - Local, + /// Regular transactions received over the network. (no priority boost) + Regular, + /// Transactions from retracted blocks (medium priority) + /// + /// When block becomes non-canonical we re-import the transactions it contains + /// to the queue and boost their priority. + Retracted, + /// Local transactions (high priority) + /// + /// Transactions either from a local account or + /// submitted over local RPC connection via `eth_sendRawTransaction` + Local, } impl Priority { - fn is_local(&self) -> bool { - match *self { - Priority::Local => true, - _ => false, - } - } + fn is_local(&self) -> bool { + match *self { + Priority::Local => true, + _ => false, + } + } } /// Scoring properties for verified transaction. pub trait ScoredTransaction { - /// Gets transaction priority. - fn priority(&self) -> Priority; + /// Gets transaction priority. + fn priority(&self) -> Priority; - /// Gets transaction gas price. - fn gas_price(&self) -> &U256; + /// Gets transaction gas price. + fn gas_price(&self) -> &U256; - /// Gets transaction nonce. - fn nonce(&self) -> U256; + /// Gets transaction nonce. + fn nonce(&self) -> U256; } /// Verified transaction stored in the pool. #[derive(Debug, Clone, PartialEq, Eq)] pub struct VerifiedTransaction { - transaction: transaction::PendingTransaction, - // TODO [ToDr] hash and sender should go directly from the transaction - hash: H256, - sender: Address, - priority: Priority, - insertion_id: usize, + transaction: transaction::PendingTransaction, + // TODO [ToDr] hash and sender should go directly from the transaction + hash: H256, + sender: Address, + priority: Priority, + insertion_id: usize, } impl VerifiedTransaction { - /// Create `VerifiedTransaction` directly from `SignedTransaction`. - /// - /// This method should be used only: - /// 1. for tests - /// 2. In case we are converting pending block transactions that are already in the queue to match the function signature. - pub fn from_pending_block_transaction(tx: transaction::SignedTransaction) -> Self { - let hash = tx.hash(); - let sender = tx.sender(); - VerifiedTransaction { - transaction: tx.into(), - hash, - sender, - priority: Priority::Retracted, - insertion_id: 0, - } - } + /// Create `VerifiedTransaction` directly from `SignedTransaction`. + /// + /// This method should be used only: + /// 1. for tests + /// 2. In case we are converting pending block transactions that are already in the queue to match the function signature. + pub fn from_pending_block_transaction(tx: transaction::SignedTransaction) -> Self { + let hash = tx.hash(); + let sender = tx.sender(); + VerifiedTransaction { + transaction: tx.into(), + hash, + sender, + priority: Priority::Retracted, + insertion_id: 0, + } + } - /// Gets transaction insertion id. - pub(crate) fn insertion_id(&self) -> usize { - self.insertion_id - } + /// Gets transaction insertion id. + pub(crate) fn insertion_id(&self) -> usize { + self.insertion_id + } - /// Gets wrapped `SignedTransaction` - pub fn signed(&self) -> &transaction::SignedTransaction { - &self.transaction - } - - /// Gets wrapped `PendingTransaction` - pub fn pending(&self) -> &transaction::PendingTransaction { - &self.transaction - } + /// Gets wrapped `SignedTransaction` + pub fn signed(&self) -> &transaction::SignedTransaction { + &self.transaction + } + /// Gets wrapped `PendingTransaction` + pub fn pending(&self) -> &transaction::PendingTransaction { + &self.transaction + } } impl txpool::VerifiedTransaction for VerifiedTransaction { - type Hash = H256; - type Sender = Address; + type Hash = H256; + type Sender = Address; - fn hash(&self) -> &H256 { - &self.hash - } + fn hash(&self) -> &H256 { + &self.hash + } - fn mem_usage(&self) -> usize { - self.transaction.heap_size_of_children() - } + fn mem_usage(&self) -> usize { + self.transaction.heap_size_of_children() + } - fn sender(&self) -> &Address { - &self.sender - } + fn sender(&self) -> &Address { + &self.sender + } } impl ScoredTransaction for VerifiedTransaction { - fn priority(&self) -> Priority { - self.priority - } + fn priority(&self) -> Priority { + self.priority + } - /// Gets transaction gas price. - fn gas_price(&self) -> &U256 { - &self.transaction.gas_price - } + /// Gets transaction gas price. + fn gas_price(&self) -> &U256 { + &self.transaction.gas_price + } - /// Gets transaction nonce. - fn nonce(&self) -> U256 { - self.transaction.nonce - } + /// Gets transaction nonce. + fn nonce(&self) -> U256 { + self.transaction.nonce + } } diff --git a/miner/src/pool/queue.rs b/miner/src/pool/queue.rs index 58966df85..f5b91b796 100644 --- a/miner/src/pool/queue.rs +++ b/miner/src/pool/queue.rs @@ -16,23 +16,30 @@ //! Ethereum Transaction Queue -use std::{cmp, fmt}; -use std::sync::Arc; -use std::sync::atomic::{self, AtomicUsize}; -use std::collections::{BTreeMap, BTreeSet, HashMap}; +use std::{ + cmp, + collections::{BTreeMap, BTreeSet, HashMap}, + fmt, + sync::{ + atomic::{self, AtomicUsize}, + Arc, + }, +}; -use ethereum_types::{H256, U256, Address}; +use ethereum_types::{Address, H256, U256}; use parking_lot::RwLock; use txpool::{self, Verifier}; use types::transaction; use pool::{ - self, replace, scoring, verifier, client, ready, listener, - PrioritizationStrategy, PendingOrdering, PendingSettings, + self, client, listener, local_transactions::LocalTransactionsList, ready, replace, scoring, + verifier, PendingOrdering, PendingSettings, PrioritizationStrategy, }; -use pool::local_transactions::LocalTransactionsList; -type Listener = (LocalTransactionsList, (listener::Notifier, listener::Logger)); +type Listener = ( + LocalTransactionsList, + (listener::Notifier, listener::Logger), +); type Pool = txpool::Pool; /// Max cache time in milliseconds for pending transactions. @@ -54,17 +61,17 @@ const CULL_SENDERS_CHUNK: usize = 1024; /// Transaction queue status. #[derive(Debug, Clone, PartialEq)] pub struct Status { - /// Verifier options. - pub options: verifier::Options, - /// Current status of the transaction pool. - pub status: txpool::LightStatus, - /// Current limits of the transaction pool. - pub limits: txpool::Options, + /// Verifier options. + pub options: verifier::Options, + /// Current status of the transaction pool. + pub status: txpool::LightStatus, + /// Current limits of the transaction pool. + pub limits: txpool::Options, } impl fmt::Display for Status { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - writeln!( + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + writeln!( fmt, "Pool: {current}/{max} ({senders} senders; {mem}/{mem_max} kB) [minGasPrice: {gp} Mwei, maxGas: {max_gas}]", current = self.status.transaction_count, @@ -75,116 +82,116 @@ impl fmt::Display for Status { gp = self.options.minimal_gas_price / 1_000_000, max_gas = cmp::min(self.options.block_gas_limit, self.options.tx_gas_limit), ) - } + } } #[derive(Debug)] struct CachedPending { - block_number: u64, - current_timestamp: u64, - nonce_cap: Option, - has_local_pending: bool, - pending: Option>>, - max_len: usize, + block_number: u64, + current_timestamp: u64, + nonce_cap: Option, + has_local_pending: bool, + pending: Option>>, + max_len: usize, } impl CachedPending { - /// Creates new `CachedPending` without cached set. - pub fn none() -> Self { - CachedPending { - block_number: 0, - current_timestamp: 0, - has_local_pending: false, - pending: None, - nonce_cap: None, - max_len: 0, - } - } + /// Creates new `CachedPending` without cached set. + pub fn none() -> Self { + CachedPending { + block_number: 0, + current_timestamp: 0, + has_local_pending: false, + pending: None, + nonce_cap: None, + max_len: 0, + } + } - /// Remove cached pending set. - pub fn clear(&mut self) { - self.pending = None; - } + /// Remove cached pending set. + pub fn clear(&mut self) { + self.pending = None; + } - /// Returns cached pending set (if any) if it's valid. - pub fn pending( - &self, - block_number: u64, - current_timestamp: u64, - nonce_cap: Option<&U256>, - max_len: usize, - ) -> Option>> { - // First check if we have anything in cache. - let pending = self.pending.as_ref()?; + /// Returns cached pending set (if any) if it's valid. + pub fn pending( + &self, + block_number: u64, + current_timestamp: u64, + nonce_cap: Option<&U256>, + max_len: usize, + ) -> Option>> { + // First check if we have anything in cache. + let pending = self.pending.as_ref()?; - if block_number != self.block_number { - return None; - } + if block_number != self.block_number { + return None; + } - // In case we don't have any local pending transactions - // there is no need to invalidate the cache because of timestamp. - // Timestamp only affects local `PendingTransactions` with `Condition::Timestamp`. - if self.has_local_pending && current_timestamp > self.current_timestamp + TIMESTAMP_CACHE { - return None; - } + // In case we don't have any local pending transactions + // there is no need to invalidate the cache because of timestamp. + // Timestamp only affects local `PendingTransactions` with `Condition::Timestamp`. + if self.has_local_pending && current_timestamp > self.current_timestamp + TIMESTAMP_CACHE { + return None; + } - // It's fine to return limited set even if `nonce_cap` is `None`. - // The worst thing that may happen is that some transactions won't get propagated in current round, - // but they are not really valid in current block anyway. We will propagate them in the next round. - // Also there is no way to have both `Some` with different numbers since it depends on the block number - // and a constant parameter in schedule (`nonce_cap_increment`) - if self.nonce_cap.is_none() && nonce_cap.is_some() { - return None; - } + // It's fine to return limited set even if `nonce_cap` is `None`. + // The worst thing that may happen is that some transactions won't get propagated in current round, + // but they are not really valid in current block anyway. We will propagate them in the next round. + // Also there is no way to have both `Some` with different numbers since it depends on the block number + // and a constant parameter in schedule (`nonce_cap_increment`) + if self.nonce_cap.is_none() && nonce_cap.is_some() { + return None; + } - // It's fine to just take a smaller subset, but not other way around. - if max_len > self.max_len { - return None; - } + // It's fine to just take a smaller subset, but not other way around. + if max_len > self.max_len { + return None; + } - Some(pending.iter().take(max_len).cloned().collect()) - } + Some(pending.iter().take(max_len).cloned().collect()) + } } #[derive(Debug)] struct RecentlyRejected { - inner: RwLock>, - limit: usize, + inner: RwLock>, + limit: usize, } impl RecentlyRejected { - fn new(limit: usize) -> Self { - RecentlyRejected { - limit, - inner: RwLock::new(HashMap::with_capacity(MIN_REJECTED_CACHE_SIZE)), - } - } + fn new(limit: usize) -> Self { + RecentlyRejected { + limit, + inner: RwLock::new(HashMap::with_capacity(MIN_REJECTED_CACHE_SIZE)), + } + } - fn clear(&self) { - self.inner.write().clear(); - } + fn clear(&self) { + self.inner.write().clear(); + } - fn get(&self, hash: &H256) -> Option { - self.inner.read().get(hash).cloned() - } + fn get(&self, hash: &H256) -> Option { + self.inner.read().get(hash).cloned() + } - fn insert(&self, hash: H256, err: &transaction::Error) { - if self.inner.read().contains_key(&hash) { - return; - } + fn insert(&self, hash: H256, err: &transaction::Error) { + if self.inner.read().contains_key(&hash) { + return; + } - let mut inner = self.inner.write(); - inner.insert(hash, err.clone()); + let mut inner = self.inner.write(); + inner.insert(hash, err.clone()); - // clean up - if inner.len() > self.limit { - // randomly remove half of the entries - let to_remove: Vec<_> = inner.keys().take(self.limit / 2).cloned().collect(); - for key in to_remove { - inner.remove(&key); - } - } - } + // clean up + if inner.len() > self.limit { + // randomly remove half of the entries + let to_remove: Vec<_> = inner.keys().take(self.limit / 2).cloned().collect(); + for key in to_remove { + inner.remove(&key); + } + } + } } /// Minimal size of rejection cache, by default it's equal to queue size. @@ -198,80 +205,90 @@ const MIN_REJECTED_CACHE_SIZE: usize = 2048; /// - returning an iterator for transactions that are ready to be included in block (pending) #[derive(Debug)] pub struct TransactionQueue { - insertion_id: Arc, - pool: RwLock, - options: RwLock, - cached_pending: RwLock, - recently_rejected: RecentlyRejected, + insertion_id: Arc, + pool: RwLock, + options: RwLock, + cached_pending: RwLock, + recently_rejected: RecentlyRejected, } impl TransactionQueue { - /// Create new queue with given pool limits and initial verification options. - pub fn new( - limits: txpool::Options, - verification_options: verifier::Options, - strategy: PrioritizationStrategy, - ) -> Self { - let max_count = limits.max_count; - TransactionQueue { - insertion_id: Default::default(), - pool: RwLock::new(txpool::Pool::new(Default::default(), scoring::NonceAndGasPrice(strategy), limits)), - options: RwLock::new(verification_options), - cached_pending: RwLock::new(CachedPending::none()), - recently_rejected: RecentlyRejected::new(cmp::max(MIN_REJECTED_CACHE_SIZE, max_count / 4)), - } - } + /// Create new queue with given pool limits and initial verification options. + pub fn new( + limits: txpool::Options, + verification_options: verifier::Options, + strategy: PrioritizationStrategy, + ) -> Self { + let max_count = limits.max_count; + TransactionQueue { + insertion_id: Default::default(), + pool: RwLock::new(txpool::Pool::new( + Default::default(), + scoring::NonceAndGasPrice(strategy), + limits, + )), + options: RwLock::new(verification_options), + cached_pending: RwLock::new(CachedPending::none()), + recently_rejected: RecentlyRejected::new(cmp::max( + MIN_REJECTED_CACHE_SIZE, + max_count / 4, + )), + } + } - /// Update verification options - /// - /// Some parameters of verification may vary in time (like block gas limit or minimal gas price). - pub fn set_verifier_options(&self, options: verifier::Options) { - *self.options.write() = options; - } + /// Update verification options + /// + /// Some parameters of verification may vary in time (like block gas limit or minimal gas price). + pub fn set_verifier_options(&self, options: verifier::Options) { + *self.options.write() = options; + } - /// Sets the in-chain transaction checker for pool listener. - pub fn set_in_chain_checker(&self, f: F) where - F: Fn(&H256) -> bool + Send + Sync + 'static - { - self.pool.write().listener_mut().0.set_in_chain_checker(f) - } + /// Sets the in-chain transaction checker for pool listener. + pub fn set_in_chain_checker(&self, f: F) + where + F: Fn(&H256) -> bool + Send + Sync + 'static, + { + self.pool.write().listener_mut().0.set_in_chain_checker(f) + } - /// Import a set of transactions to the pool. - /// - /// Given blockchain and state access (Client) - /// verifies and imports transactions to the pool. - pub fn import( - &self, - client: C, - transactions: Vec, - ) -> Vec> { - // Run verification - trace_time!("pool::verify_and_import"); - let options = self.options.read().clone(); + /// Import a set of transactions to the pool. + /// + /// Given blockchain and state access (Client) + /// verifies and imports transactions to the pool. + pub fn import( + &self, + client: C, + transactions: Vec, + ) -> Vec> { + // Run verification + trace_time!("pool::verify_and_import"); + let options = self.options.read().clone(); - let transaction_to_replace = { - if options.no_early_reject { - None - } else { - let pool = self.pool.read(); - if pool.is_full() { - pool.worst_transaction().map(|worst| (pool.scoring().clone(), worst)) - } else { - None - } - } - }; + let transaction_to_replace = { + if options.no_early_reject { + None + } else { + let pool = self.pool.read(); + if pool.is_full() { + pool.worst_transaction() + .map(|worst| (pool.scoring().clone(), worst)) + } else { + None + } + } + }; - let verifier = verifier::Verifier::new( - client.clone(), - options, - self.insertion_id.clone(), - transaction_to_replace, - ); + let verifier = verifier::Verifier::new( + client.clone(), + options, + self.insertion_id.clone(), + transaction_to_replace, + ); - let mut replace = replace::ReplaceByScoreAndReadiness::new(self.pool.read().scoring().clone(), client); + let mut replace = + replace::ReplaceByScoreAndReadiness::new(self.pool.read().scoring().clone(), client); - let results = transactions + let results = transactions .into_iter() .map(|transaction| { let hash = transaction.hash(); @@ -301,309 +318,348 @@ impl TransactionQueue { }) .collect::>(); - // Notify about imported transactions. - (self.pool.write().listener_mut().1).0.notify(); + // Notify about imported transactions. + (self.pool.write().listener_mut().1).0.notify(); - if results.iter().any(|r| r.is_ok()) { - self.cached_pending.write().clear(); - } + if results.iter().any(|r| r.is_ok()) { + self.cached_pending.write().clear(); + } - results - } + results + } - /// Returns all transactions in the queue without explicit ordering. - pub fn all_transactions(&self) -> Vec> { - let ready = |_tx: &pool::VerifiedTransaction| txpool::Readiness::Ready; - self.pool.read().unordered_pending(ready).collect() - } + /// Returns all transactions in the queue without explicit ordering. + pub fn all_transactions(&self) -> Vec> { + let ready = |_tx: &pool::VerifiedTransaction| txpool::Readiness::Ready; + self.pool.read().unordered_pending(ready).collect() + } - /// Returns all transaction hashes in the queue without explicit ordering. - pub fn all_transaction_hashes(&self) -> Vec { - let ready = |_tx: &pool::VerifiedTransaction| txpool::Readiness::Ready; - self.pool.read().unordered_pending(ready).map(|tx| tx.hash).collect() - } + /// Returns all transaction hashes in the queue without explicit ordering. + pub fn all_transaction_hashes(&self) -> Vec { + let ready = |_tx: &pool::VerifiedTransaction| txpool::Readiness::Ready; + self.pool + .read() + .unordered_pending(ready) + .map(|tx| tx.hash) + .collect() + } - /// Computes unordered set of pending hashes. - /// - /// Since strict nonce-checking is not required, you may get some false positive future transactions as well. - pub fn pending_hashes( - &self, - nonce: N, - ) -> BTreeSet where - N: Fn(&Address) -> Option, - { - let ready = ready::OptionalState::new(nonce); - self.pool.read().unordered_pending(ready).map(|tx| tx.hash).collect() - } + /// Computes unordered set of pending hashes. + /// + /// Since strict nonce-checking is not required, you may get some false positive future transactions as well. + pub fn pending_hashes(&self, nonce: N) -> BTreeSet + where + N: Fn(&Address) -> Option, + { + let ready = ready::OptionalState::new(nonce); + self.pool + .read() + .unordered_pending(ready) + .map(|tx| tx.hash) + .collect() + } - /// Returns current pending transactions ordered by priority. - /// - /// NOTE: This may return a cached version of pending transaction set. - /// Re-computing the pending set is possible with `#collect_pending` method, - /// but be aware that it's a pretty expensive operation. - pub fn pending( - &self, - client: C, - settings: PendingSettings, - ) -> Vec> where - C: client::NonceClient, - { - let PendingSettings { block_number, current_timestamp, nonce_cap, max_len, ordering } = settings; - if let Some(pending) = self.cached_pending.read().pending(block_number, current_timestamp, nonce_cap.as_ref(), max_len) { - return pending; - } + /// Returns current pending transactions ordered by priority. + /// + /// NOTE: This may return a cached version of pending transaction set. + /// Re-computing the pending set is possible with `#collect_pending` method, + /// but be aware that it's a pretty expensive operation. + pub fn pending( + &self, + client: C, + settings: PendingSettings, + ) -> Vec> + where + C: client::NonceClient, + { + let PendingSettings { + block_number, + current_timestamp, + nonce_cap, + max_len, + ordering, + } = settings; + if let Some(pending) = self.cached_pending.read().pending( + block_number, + current_timestamp, + nonce_cap.as_ref(), + max_len, + ) { + return pending; + } - // Double check after acquiring write lock - let mut cached_pending = self.cached_pending.write(); - if let Some(pending) = cached_pending.pending(block_number, current_timestamp, nonce_cap.as_ref(), max_len) { - return pending; - } + // Double check after acquiring write lock + let mut cached_pending = self.cached_pending.write(); + if let Some(pending) = + cached_pending.pending(block_number, current_timestamp, nonce_cap.as_ref(), max_len) + { + return pending; + } - // In case we don't have a cached set, but we don't care about order - // just return the unordered set. - if let PendingOrdering::Unordered = ordering { - let ready = Self::ready(client, block_number, current_timestamp, nonce_cap); - return self.pool.read().unordered_pending(ready).take(max_len).collect(); - } + // In case we don't have a cached set, but we don't care about order + // just return the unordered set. + if let PendingOrdering::Unordered = ordering { + let ready = Self::ready(client, block_number, current_timestamp, nonce_cap); + return self + .pool + .read() + .unordered_pending(ready) + .take(max_len) + .collect(); + } - let pending: Vec<_> = self.collect_pending(client, block_number, current_timestamp, nonce_cap, |i| { - i.take(max_len).collect() - }); + let pending: Vec<_> = + self.collect_pending(client, block_number, current_timestamp, nonce_cap, |i| { + i.take(max_len).collect() + }); - *cached_pending = CachedPending { - block_number, - current_timestamp, - nonce_cap, - has_local_pending: self.has_local_pending_transactions(), - pending: Some(pending.clone()), - max_len, - }; + *cached_pending = CachedPending { + block_number, + current_timestamp, + nonce_cap, + has_local_pending: self.has_local_pending_transactions(), + pending: Some(pending.clone()), + max_len, + }; - pending - } + pending + } - /// Collect pending transactions. - /// - /// NOTE This is re-computing the pending set and it might be expensive to do so. - /// Prefer using cached pending set using `#pending` method. - pub fn collect_pending( - &self, - client: C, - block_number: u64, - current_timestamp: u64, - nonce_cap: Option, - collect: F, - ) -> T where - C: client::NonceClient, - F: FnOnce(txpool::PendingIterator< - pool::VerifiedTransaction, - (ready::Condition, ready::State), - scoring::NonceAndGasPrice, - Listener, - >) -> T, - { - debug!(target: "txqueue", "Re-computing pending set for block: {}", block_number); - trace_time!("pool::collect_pending"); - let ready = Self::ready(client, block_number, current_timestamp, nonce_cap); - collect(self.pool.read().pending(ready)) - } + /// Collect pending transactions. + /// + /// NOTE This is re-computing the pending set and it might be expensive to do so. + /// Prefer using cached pending set using `#pending` method. + pub fn collect_pending( + &self, + client: C, + block_number: u64, + current_timestamp: u64, + nonce_cap: Option, + collect: F, + ) -> T + where + C: client::NonceClient, + F: FnOnce( + txpool::PendingIterator< + pool::VerifiedTransaction, + (ready::Condition, ready::State), + scoring::NonceAndGasPrice, + Listener, + >, + ) -> T, + { + debug!(target: "txqueue", "Re-computing pending set for block: {}", block_number); + trace_time!("pool::collect_pending"); + let ready = Self::ready(client, block_number, current_timestamp, nonce_cap); + collect(self.pool.read().pending(ready)) + } - fn ready( - client: C, - block_number: u64, - current_timestamp: u64, - nonce_cap: Option, - ) -> (ready::Condition, ready::State) where - C: client::NonceClient, - { - let pending_readiness = ready::Condition::new(block_number, current_timestamp); - // don't mark any transactions as stale at this point. - let stale_id = None; - let state_readiness = ready::State::new(client, stale_id, nonce_cap); + fn ready( + client: C, + block_number: u64, + current_timestamp: u64, + nonce_cap: Option, + ) -> (ready::Condition, ready::State) + where + C: client::NonceClient, + { + let pending_readiness = ready::Condition::new(block_number, current_timestamp); + // don't mark any transactions as stale at this point. + let stale_id = None; + let state_readiness = ready::State::new(client, stale_id, nonce_cap); - (pending_readiness, state_readiness) - } + (pending_readiness, state_readiness) + } - /// Culls all stalled transactions from the pool. - pub fn cull( - &self, - client: C, - ) { - trace_time!("pool::cull"); - // We don't care about future transactions, so nonce_cap is not important. - let nonce_cap = None; - // We want to clear stale transactions from the queue as well. - // (Transactions that are occuping the queue for a long time without being included) - let stale_id = { - let current_id = self.insertion_id.load(atomic::Ordering::Relaxed); - // wait at least for half of the queue to be replaced - let gap = self.pool.read().options().max_count / 2; - // but never less than 100 transactions - let gap = cmp::max(100, gap); + /// Culls all stalled transactions from the pool. + pub fn cull(&self, client: C) { + trace_time!("pool::cull"); + // We don't care about future transactions, so nonce_cap is not important. + let nonce_cap = None; + // We want to clear stale transactions from the queue as well. + // (Transactions that are occuping the queue for a long time without being included) + let stale_id = { + let current_id = self.insertion_id.load(atomic::Ordering::Relaxed); + // wait at least for half of the queue to be replaced + let gap = self.pool.read().options().max_count / 2; + // but never less than 100 transactions + let gap = cmp::max(100, gap); - current_id.checked_sub(gap) - }; + current_id.checked_sub(gap) + }; - self.recently_rejected.clear(); + self.recently_rejected.clear(); - let mut removed = 0; - let senders: Vec<_> = { - let pool = self.pool.read(); - let senders = pool.senders().cloned().collect(); - senders - }; - for chunk in senders.chunks(CULL_SENDERS_CHUNK) { - trace_time!("pool::cull::chunk"); - let state_readiness = ready::State::new(client.clone(), stale_id, nonce_cap); - removed += self.pool.write().cull(Some(chunk), state_readiness); - } - debug!(target: "txqueue", "Removed {} stalled transactions. {}", removed, self.status()); - } + let mut removed = 0; + let senders: Vec<_> = { + let pool = self.pool.read(); + let senders = pool.senders().cloned().collect(); + senders + }; + for chunk in senders.chunks(CULL_SENDERS_CHUNK) { + trace_time!("pool::cull::chunk"); + let state_readiness = ready::State::new(client.clone(), stale_id, nonce_cap); + removed += self.pool.write().cull(Some(chunk), state_readiness); + } + debug!(target: "txqueue", "Removed {} stalled transactions. {}", removed, self.status()); + } - /// Returns next valid nonce for given sender - /// or `None` if there are no pending transactions from that sender. - pub fn next_nonce( - &self, - client: C, - address: &Address, - ) -> Option { - // Do not take nonce_cap into account when determining next nonce. - let nonce_cap = None; - // Also we ignore stale transactions in the queue. - let stale_id = None; + /// Returns next valid nonce for given sender + /// or `None` if there are no pending transactions from that sender. + pub fn next_nonce(&self, client: C, address: &Address) -> Option { + // Do not take nonce_cap into account when determining next nonce. + let nonce_cap = None; + // Also we ignore stale transactions in the queue. + let stale_id = None; - let state_readiness = ready::State::new(client, stale_id, nonce_cap); + let state_readiness = ready::State::new(client, stale_id, nonce_cap); - self.pool.read().pending_from_sender(state_readiness, address) - .last() - .map(|tx| tx.signed().nonce.saturating_add(U256::from(1))) - } + self.pool + .read() + .pending_from_sender(state_readiness, address) + .last() + .map(|tx| tx.signed().nonce.saturating_add(U256::from(1))) + } - /// Retrieve a transaction from the pool. - /// - /// Given transaction hash looks up that transaction in the pool - /// and returns a shared pointer to it or `None` if it's not present. - pub fn find( - &self, - hash: &H256, - ) -> Option> { - self.pool.read().find(hash) - } + /// Retrieve a transaction from the pool. + /// + /// Given transaction hash looks up that transaction in the pool + /// and returns a shared pointer to it or `None` if it's not present. + pub fn find(&self, hash: &H256) -> Option> { + self.pool.read().find(hash) + } - /// Remove a set of transactions from the pool. - /// - /// Given an iterator of transaction hashes - /// removes them from the pool. - /// That method should be used if invalid transactions are detected - /// or you want to cancel a transaction. - pub fn remove<'a, T: IntoIterator>( - &self, - hashes: T, - is_invalid: bool, - ) -> Vec>> { - let results = { - let mut pool = self.pool.write(); + /// Remove a set of transactions from the pool. + /// + /// Given an iterator of transaction hashes + /// removes them from the pool. + /// That method should be used if invalid transactions are detected + /// or you want to cancel a transaction. + pub fn remove<'a, T: IntoIterator>( + &self, + hashes: T, + is_invalid: bool, + ) -> Vec>> { + let results = { + let mut pool = self.pool.write(); - hashes - .into_iter() - .map(|hash| pool.remove(hash, is_invalid)) - .collect::>() - }; + hashes + .into_iter() + .map(|hash| pool.remove(hash, is_invalid)) + .collect::>() + }; - if results.iter().any(Option::is_some) { - self.cached_pending.write().clear(); - } + if results.iter().any(Option::is_some) { + self.cached_pending.write().clear(); + } - results - } + results + } - /// Clear the entire pool. - pub fn clear(&self) { - self.pool.write().clear(); - } + /// Clear the entire pool. + pub fn clear(&self) { + self.pool.write().clear(); + } - /// Penalize given senders. - pub fn penalize<'a, T: IntoIterator>(&self, senders: T) { - let mut pool = self.pool.write(); - for sender in senders { - pool.update_scores(sender, ()); - } - } + /// Penalize given senders. + pub fn penalize<'a, T: IntoIterator>(&self, senders: T) { + let mut pool = self.pool.write(); + for sender in senders { + pool.update_scores(sender, ()); + } + } - /// Returns gas price of currently the worst transaction in the pool. - pub fn current_worst_gas_price(&self) -> U256 { - match self.pool.read().worst_transaction() { - Some(tx) => tx.signed().gas_price, - None => self.options.read().minimal_gas_price, - } - } + /// Returns gas price of currently the worst transaction in the pool. + pub fn current_worst_gas_price(&self) -> U256 { + match self.pool.read().worst_transaction() { + Some(tx) => tx.signed().gas_price, + None => self.options.read().minimal_gas_price, + } + } - /// Returns a status of the queue. - pub fn status(&self) -> Status { - let pool = self.pool.read(); - let status = pool.light_status(); - let limits = pool.options(); - let options = self.options.read().clone(); + /// Returns a status of the queue. + pub fn status(&self) -> Status { + let pool = self.pool.read(); + let status = pool.light_status(); + let limits = pool.options(); + let options = self.options.read().clone(); - Status { - options, - status, - limits, - } - } + Status { + options, + status, + limits, + } + } - /// Check if there are any local transactions in the pool. - /// - /// Returns `true` if there are any transactions in the pool - /// that has been marked as local. - /// - /// Local transactions are the ones from accounts managed by this node - /// and transactions submitted via local RPC (`eth_sendRawTransaction`) - pub fn has_local_pending_transactions(&self) -> bool { - self.pool.read().listener().0.has_pending() - } + /// Check if there are any local transactions in the pool. + /// + /// Returns `true` if there are any transactions in the pool + /// that has been marked as local. + /// + /// Local transactions are the ones from accounts managed by this node + /// and transactions submitted via local RPC (`eth_sendRawTransaction`) + pub fn has_local_pending_transactions(&self) -> bool { + self.pool.read().listener().0.has_pending() + } - /// Returns status of recently seen local transactions. - pub fn local_transactions(&self) -> BTreeMap { - self.pool.read().listener().0.all_transactions().iter().map(|(a, b)| (*a, b.clone())).collect() - } + /// Returns status of recently seen local transactions. + pub fn local_transactions(&self) -> BTreeMap { + self.pool + .read() + .listener() + .0 + .all_transactions() + .iter() + .map(|(a, b)| (*a, b.clone())) + .collect() + } - /// Add a callback to be notified about all transactions entering the pool. - pub fn add_listener(&self, f: Box) { - let mut pool = self.pool.write(); - (pool.listener_mut().1).0.add(f); - } + /// Add a callback to be notified about all transactions entering the pool. + pub fn add_listener(&self, f: Box) { + let mut pool = self.pool.write(); + (pool.listener_mut().1).0.add(f); + } - /// Check if pending set is cached. - #[cfg(test)] - pub fn is_pending_cached(&self) -> bool { - self.cached_pending.read().pending.is_some() - } + /// Check if pending set is cached. + #[cfg(test)] + pub fn is_pending_cached(&self) -> bool { + self.cached_pending.read().pending.is_some() + } } fn convert_error(err: txpool::Error) -> transaction::Error { - use self::txpool::Error; + use self::txpool::Error; - match err { - Error::AlreadyImported(..) => transaction::Error::AlreadyImported, - Error::TooCheapToEnter(..) => transaction::Error::LimitReached, - Error::TooCheapToReplace(..) => transaction::Error::TooCheapToReplace { prev: None, new: None } - } + match err { + Error::AlreadyImported(..) => transaction::Error::AlreadyImported, + Error::TooCheapToEnter(..) => transaction::Error::LimitReached, + Error::TooCheapToReplace(..) => transaction::Error::TooCheapToReplace { + prev: None, + new: None, + }, + } } #[cfg(test)] mod tests { - use super::*; - use pool::tests::client::TestClient; + use super::*; + use pool::tests::client::TestClient; - #[test] - fn should_get_pending_transactions() { - let queue = TransactionQueue::new(txpool::Options::default(), verifier::Options::default(), PrioritizationStrategy::GasPriceOnly); + #[test] + fn should_get_pending_transactions() { + let queue = TransactionQueue::new( + txpool::Options::default(), + verifier::Options::default(), + PrioritizationStrategy::GasPriceOnly, + ); - let pending: Vec<_> = queue.pending(TestClient::default(), PendingSettings::all_prioritized(0, 0)); + let pending: Vec<_> = queue.pending( + TestClient::default(), + PendingSettings::all_prioritized(0, 0), + ); - for tx in pending { - assert!(tx.signed().nonce > 0.into()); - } - } + for tx in pending { + assert!(tx.signed().nonce > 0.into()); + } + } } diff --git a/miner/src/pool/ready.rs b/miner/src/pool/ready.rs index 3accba139..57f300533 100644 --- a/miner/src/pool/ready.rs +++ b/miner/src/pool/ready.rs @@ -38,95 +38,90 @@ //! First `Readiness::Future` response also causes all subsequent transactions from the same sender //! to be marked as `Future`. -use std::cmp; -use std::collections::HashMap; +use std::{cmp, collections::HashMap}; -use ethereum_types::{U256, H160 as Address}; +use ethereum_types::{H160 as Address, U256}; use txpool::{self, VerifiedTransaction as PoolVerifiedTransaction}; use types::transaction; -use super::client::NonceClient; -use super::VerifiedTransaction; +use super::{client::NonceClient, VerifiedTransaction}; /// Checks readiness of transactions by comparing the nonce to state nonce. #[derive(Debug)] pub struct State { - nonces: HashMap, - state: C, - max_nonce: Option, - stale_id: Option, + nonces: HashMap, + state: C, + max_nonce: Option, + stale_id: Option, } impl State { - /// Create new State checker, given client interface. - pub fn new( - state: C, - stale_id: Option, - max_nonce: Option, - ) -> Self { - State { - nonces: Default::default(), - state, - max_nonce, - stale_id, - } - } + /// Create new State checker, given client interface. + pub fn new(state: C, stale_id: Option, max_nonce: Option) -> Self { + State { + nonces: Default::default(), + state, + max_nonce, + stale_id, + } + } } impl txpool::Ready for State { - fn is_ready(&mut self, tx: &VerifiedTransaction) -> txpool::Readiness { - // Check max nonce - match self.max_nonce { - Some(nonce) if tx.transaction.nonce > nonce => { - return txpool::Readiness::Future; - }, - _ => {}, - } + fn is_ready(&mut self, tx: &VerifiedTransaction) -> txpool::Readiness { + // Check max nonce + match self.max_nonce { + Some(nonce) if tx.transaction.nonce > nonce => { + return txpool::Readiness::Future; + } + _ => {} + } - let sender = tx.sender(); - let state = &self.state; - let state_nonce = || state.account_nonce(sender); - let nonce = self.nonces.entry(*sender).or_insert_with(state_nonce); - match tx.transaction.nonce.cmp(nonce) { - // Before marking as future check for stale ids - cmp::Ordering::Greater => match self.stale_id { - Some(id) if tx.insertion_id() < id => txpool::Readiness::Stale, - _ => txpool::Readiness::Future, - }, - cmp::Ordering::Less => txpool::Readiness::Stale, - cmp::Ordering::Equal => { - *nonce = nonce.saturating_add(U256::from(1)); - txpool::Readiness::Ready - }, - } - } + let sender = tx.sender(); + let state = &self.state; + let state_nonce = || state.account_nonce(sender); + let nonce = self.nonces.entry(*sender).or_insert_with(state_nonce); + match tx.transaction.nonce.cmp(nonce) { + // Before marking as future check for stale ids + cmp::Ordering::Greater => match self.stale_id { + Some(id) if tx.insertion_id() < id => txpool::Readiness::Stale, + _ => txpool::Readiness::Future, + }, + cmp::Ordering::Less => txpool::Readiness::Stale, + cmp::Ordering::Equal => { + *nonce = nonce.saturating_add(U256::from(1)); + txpool::Readiness::Ready + } + } + } } /// Checks readines of Pending transactions by comparing it with current time and block number. #[derive(Debug)] pub struct Condition { - block_number: u64, - now: u64, + block_number: u64, + now: u64, } impl Condition { - /// Create a new condition checker given current block number and UTC timestamp. - pub fn new(block_number: u64, now: u64) -> Self { - Condition { - block_number, - now, - } - } + /// Create a new condition checker given current block number and UTC timestamp. + pub fn new(block_number: u64, now: u64) -> Self { + Condition { block_number, now } + } } impl txpool::Ready for Condition { - fn is_ready(&mut self, tx: &VerifiedTransaction) -> txpool::Readiness { - match tx.transaction.condition { - Some(transaction::Condition::Number(block)) if block > self.block_number => txpool::Readiness::Future, - Some(transaction::Condition::Timestamp(time)) if time > self.now => txpool::Readiness::Future, - _ => txpool::Readiness::Ready, - } - } + fn is_ready(&mut self, tx: &VerifiedTransaction) -> txpool::Readiness { + match tx.transaction.condition { + Some(transaction::Condition::Number(block)) if block > self.block_number => { + txpool::Readiness::Future + } + Some(transaction::Condition::Timestamp(time)) if time > self.now => { + txpool::Readiness::Future + } + _ => txpool::Readiness::Ready, + } + } } /// Readiness checker that only relies on nonce cache (does actually go to state). @@ -135,114 +130,144 @@ impl txpool::Ready for Condition { /// isn't found in provided state nonce store, defaults to the tx nonce and updates /// the nonce store. Useful for using with a state nonce cache when false positives are allowed. pub struct OptionalState { - nonces: HashMap, - state: C, + nonces: HashMap, + state: C, } impl OptionalState { - pub fn new(state: C) -> Self { - OptionalState { - nonces: Default::default(), - state, - } - } + pub fn new(state: C) -> Self { + OptionalState { + nonces: Default::default(), + state, + } + } } impl Option> txpool::Ready for OptionalState { - fn is_ready(&mut self, tx: &VerifiedTransaction) -> txpool::Readiness { - let sender = tx.sender(); - let state = &self.state; - let nonce = self.nonces.entry(*sender).or_insert_with(|| { - state(sender).unwrap_or_else(|| tx.transaction.nonce) - }); - match tx.transaction.nonce.cmp(nonce) { - cmp::Ordering::Greater => txpool::Readiness::Future, - cmp::Ordering::Less => txpool::Readiness::Stale, - cmp::Ordering::Equal => { - *nonce = nonce.saturating_add(U256::from(1)); - txpool::Readiness::Ready - }, - } - } + fn is_ready(&mut self, tx: &VerifiedTransaction) -> txpool::Readiness { + let sender = tx.sender(); + let state = &self.state; + let nonce = self + .nonces + .entry(*sender) + .or_insert_with(|| state(sender).unwrap_or_else(|| tx.transaction.nonce)); + match tx.transaction.nonce.cmp(nonce) { + cmp::Ordering::Greater => txpool::Readiness::Future, + cmp::Ordering::Less => txpool::Readiness::Stale, + cmp::Ordering::Equal => { + *nonce = nonce.saturating_add(U256::from(1)); + txpool::Readiness::Ready + } + } + } } #[cfg(test)] mod tests { - use super::*; - use txpool::Ready; - use pool::tests::client::TestClient; - use pool::tests::tx::{Tx, TxExt}; + use super::*; + use pool::tests::{ + client::TestClient, + tx::{Tx, TxExt}, + }; + use txpool::Ready; - #[test] - fn should_return_correct_state_readiness() { - // given - let (tx1, tx2, tx3) = Tx::default().signed_triple(); - let (tx1, tx2, tx3) = (tx1.verified(), tx2.verified(), tx3.verified()); + #[test] + fn should_return_correct_state_readiness() { + // given + let (tx1, tx2, tx3) = Tx::default().signed_triple(); + let (tx1, tx2, tx3) = (tx1.verified(), tx2.verified(), tx3.verified()); - // when - assert_eq!(State::new(TestClient::new(), None, None).is_ready(&tx3), txpool::Readiness::Future); - assert_eq!(State::new(TestClient::new(), None, None).is_ready(&tx2), txpool::Readiness::Future); + // when + assert_eq!( + State::new(TestClient::new(), None, None).is_ready(&tx3), + txpool::Readiness::Future + ); + assert_eq!( + State::new(TestClient::new(), None, None).is_ready(&tx2), + txpool::Readiness::Future + ); - let mut ready = State::new(TestClient::new(), None, None); + let mut ready = State::new(TestClient::new(), None, None); - // then - assert_eq!(ready.is_ready(&tx1), txpool::Readiness::Ready); - assert_eq!(ready.is_ready(&tx2), txpool::Readiness::Ready); - assert_eq!(ready.is_ready(&tx3), txpool::Readiness::Ready); - } + // then + assert_eq!(ready.is_ready(&tx1), txpool::Readiness::Ready); + assert_eq!(ready.is_ready(&tx2), txpool::Readiness::Ready); + assert_eq!(ready.is_ready(&tx3), txpool::Readiness::Ready); + } - #[test] - fn should_return_future_if_nonce_cap_reached() { - // given - let tx = Tx::default().signed().verified(); + #[test] + fn should_return_future_if_nonce_cap_reached() { + // given + let tx = Tx::default().signed().verified(); - // when - let res1 = State::new(TestClient::new(), None, Some(10.into())).is_ready(&tx); - let res2 = State::new(TestClient::new(), None, Some(124.into())).is_ready(&tx); + // when + let res1 = State::new(TestClient::new(), None, Some(10.into())).is_ready(&tx); + let res2 = State::new(TestClient::new(), None, Some(124.into())).is_ready(&tx); - // then - assert_eq!(res1, txpool::Readiness::Future); - assert_eq!(res2, txpool::Readiness::Ready); - } + // then + assert_eq!(res1, txpool::Readiness::Future); + assert_eq!(res2, txpool::Readiness::Ready); + } - #[test] - fn should_return_stale_if_nonce_does_not_match() { - // given - let tx = Tx::default().signed().verified(); + #[test] + fn should_return_stale_if_nonce_does_not_match() { + // given + let tx = Tx::default().signed().verified(); - // when - let res = State::new(TestClient::new().with_nonce(125), None, None).is_ready(&tx); + // when + let res = State::new(TestClient::new().with_nonce(125), None, None).is_ready(&tx); - // then - assert_eq!(res, txpool::Readiness::Stale); - } + // then + assert_eq!(res, txpool::Readiness::Stale); + } - #[test] - fn should_return_stale_for_old_transactions() { - // given - let (_, tx) = Tx::default().signed_pair().verified(); + #[test] + fn should_return_stale_for_old_transactions() { + // given + let (_, tx) = Tx::default().signed_pair().verified(); - // when - let res = State::new(TestClient::new(), Some(1), None).is_ready(&tx); + // when + let res = State::new(TestClient::new(), Some(1), None).is_ready(&tx); - // then - assert_eq!(res, txpool::Readiness::Stale); - } + // then + assert_eq!(res, txpool::Readiness::Stale); + } - #[test] - fn should_check_readiness_of_condition() { - // given - let tx = Tx::default().signed(); - let v = |tx: transaction::PendingTransaction| TestClient::new().verify(tx); - let tx1 = v(transaction::PendingTransaction::new(tx.clone(), transaction::Condition::Number(5).into())); - let tx2 = v(transaction::PendingTransaction::new(tx.clone(), transaction::Condition::Timestamp(3).into())); - let tx3 = v(transaction::PendingTransaction::new(tx.clone(), None)); + #[test] + fn should_check_readiness_of_condition() { + // given + let tx = Tx::default().signed(); + let v = |tx: transaction::PendingTransaction| TestClient::new().verify(tx); + let tx1 = v(transaction::PendingTransaction::new( + tx.clone(), + transaction::Condition::Number(5).into(), + )); + let tx2 = v(transaction::PendingTransaction::new( + tx.clone(), + transaction::Condition::Timestamp(3).into(), + )); + let tx3 = v(transaction::PendingTransaction::new(tx.clone(), None)); - // when/then - assert_eq!(Condition::new(0, 0).is_ready(&tx1), txpool::Readiness::Future); - assert_eq!(Condition::new(0, 0).is_ready(&tx2), txpool::Readiness::Future); - assert_eq!(Condition::new(0, 0).is_ready(&tx3), txpool::Readiness::Ready); - assert_eq!(Condition::new(5, 0).is_ready(&tx1), txpool::Readiness::Ready); - assert_eq!(Condition::new(0, 3).is_ready(&tx2), txpool::Readiness::Ready); - } + // when/then + assert_eq!( + Condition::new(0, 0).is_ready(&tx1), + txpool::Readiness::Future + ); + assert_eq!( + Condition::new(0, 0).is_ready(&tx2), + txpool::Readiness::Future + ); + assert_eq!( + Condition::new(0, 0).is_ready(&tx3), + txpool::Readiness::Ready + ); + assert_eq!( + Condition::new(5, 0).is_ready(&tx1), + txpool::Readiness::Ready + ); + assert_eq!( + Condition::new(0, 3).is_ready(&tx2), + txpool::Readiness::Ready + ); + } } diff --git a/miner/src/pool/replace.rs b/miner/src/pool/replace.rs index 0655af599..237524cf9 100644 --- a/miner/src/pool/replace.rs +++ b/miner/src/pool/replace.rs @@ -25,489 +25,658 @@ use std::cmp; -use ethereum_types::{U256, H160 as Address}; -use txpool::{self, scoring::{Choice, Scoring}, ReplaceTransaction}; -use txpool::VerifiedTransaction; use super::{client, ScoredTransaction}; +use ethereum_types::{H160 as Address, U256}; +use txpool::{ + self, + scoring::{Choice, Scoring}, + ReplaceTransaction, VerifiedTransaction, +}; /// Choose whether to replace based on the sender, the score and finally the /// `Readiness` of the transactions being compared. #[derive(Debug)] pub struct ReplaceByScoreAndReadiness { - scoring: S, - client: C, + scoring: S, + client: C, } impl ReplaceByScoreAndReadiness { - /// Create a new `ReplaceByScoreAndReadiness` - pub fn new(scoring: S, client: C) -> Self { - ReplaceByScoreAndReadiness { scoring, client } - } + /// Create a new `ReplaceByScoreAndReadiness` + pub fn new(scoring: S, client: C) -> Self { + ReplaceByScoreAndReadiness { scoring, client } + } } impl txpool::ShouldReplace for ReplaceByScoreAndReadiness where - T: VerifiedTransaction + ScoredTransaction + PartialEq, - S: Scoring, - C: client::NonceClient, + T: VerifiedTransaction + ScoredTransaction + PartialEq, + S: Scoring, + C: client::NonceClient, { - fn should_replace( - &self, - old: &ReplaceTransaction, - new: &ReplaceTransaction, - ) -> Choice { - let both_local = old.priority().is_local() && new.priority().is_local(); - if old.sender() == new.sender() { - // prefer earliest transaction - match new.nonce().cmp(&old.nonce()) { - cmp::Ordering::Equal => self.scoring.choose(&old, &new), - _ if both_local => Choice::InsertNew, - cmp::Ordering::Less => Choice::ReplaceOld, - cmp::Ordering::Greater => Choice::RejectNew, - } - } else if both_local { - Choice::InsertNew - } else { - let old_score = (old.priority(), old.gas_price()); - let new_score = (new.priority(), new.gas_price()); - if new_score > old_score { - // Check if this is a replacement transaction. - // - // With replacement transactions we can safely return `InsertNew` here, because - // we don't need to remove `old` (worst transaction in the pool) since `new` will replace - // some other transaction in the pool so we will never go above limit anyway. - if let Some(txs) = new.pooled_by_sender { - if let Ok(index) = txs.binary_search_by(|old| self.scoring.compare(old, new)) { - return match self.scoring.choose(&txs[index], new) { - Choice::ReplaceOld => Choice::InsertNew, - choice => choice, - } - } - } + fn should_replace(&self, old: &ReplaceTransaction, new: &ReplaceTransaction) -> Choice { + let both_local = old.priority().is_local() && new.priority().is_local(); + if old.sender() == new.sender() { + // prefer earliest transaction + match new.nonce().cmp(&old.nonce()) { + cmp::Ordering::Equal => self.scoring.choose(&old, &new), + _ if both_local => Choice::InsertNew, + cmp::Ordering::Less => Choice::ReplaceOld, + cmp::Ordering::Greater => Choice::RejectNew, + } + } else if both_local { + Choice::InsertNew + } else { + let old_score = (old.priority(), old.gas_price()); + let new_score = (new.priority(), new.gas_price()); + if new_score > old_score { + // Check if this is a replacement transaction. + // + // With replacement transactions we can safely return `InsertNew` here, because + // we don't need to remove `old` (worst transaction in the pool) since `new` will replace + // some other transaction in the pool so we will never go above limit anyway. + if let Some(txs) = new.pooled_by_sender { + if let Ok(index) = txs.binary_search_by(|old| self.scoring.compare(old, new)) { + return match self.scoring.choose(&txs[index], new) { + Choice::ReplaceOld => Choice::InsertNew, + choice => choice, + }; + } + } - let state = &self.client; - // calculate readiness based on state nonce + pooled txs from same sender - let is_ready = |replace: &ReplaceTransaction| { - let mut nonce = state.account_nonce(replace.sender()); - if let Some(txs) = replace.pooled_by_sender { - for tx in txs.iter() { - if nonce == tx.nonce() && *tx.transaction != ***replace.transaction { - nonce = nonce.saturating_add(U256::from(1)) - } else { - break - } - } - } - nonce == replace.nonce() - }; + let state = &self.client; + // calculate readiness based on state nonce + pooled txs from same sender + let is_ready = |replace: &ReplaceTransaction| { + let mut nonce = state.account_nonce(replace.sender()); + if let Some(txs) = replace.pooled_by_sender { + for tx in txs.iter() { + if nonce == tx.nonce() && *tx.transaction != ***replace.transaction { + nonce = nonce.saturating_add(U256::from(1)) + } else { + break; + } + } + } + nonce == replace.nonce() + }; - if !is_ready(new) && is_ready(old) { - // prevent a ready transaction being replace by a non-ready transaction - Choice::RejectNew - } else { - Choice::ReplaceOld - } - } else { - Choice::RejectNew - } - } - } + if !is_ready(new) && is_ready(old) { + // prevent a ready transaction being replace by a non-ready transaction + Choice::RejectNew + } else { + Choice::ReplaceOld + } + } else { + Choice::RejectNew + } + } + } } #[cfg(test)] mod tests { - use super::*; + use super::*; - use std::sync::Arc; - use ethkey::{Random, Generator, KeyPair}; - use pool::tests::tx::{Tx, TxExt}; - use pool::tests::client::TestClient; - use pool::scoring::*; - use pool::{PrioritizationStrategy, VerifiedTransaction}; - use txpool::scoring::Choice::*; - use txpool::ShouldReplace; + use ethkey::{Generator, KeyPair, Random}; + use pool::{ + scoring::*, + tests::{ + client::TestClient, + tx::{Tx, TxExt}, + }, + PrioritizationStrategy, VerifiedTransaction, + }; + use std::sync::Arc; + use txpool::{scoring::Choice::*, ShouldReplace}; - fn local_tx_verified(tx: Tx, keypair: &KeyPair) -> VerifiedTransaction { - let mut verified_tx = tx.unsigned().sign(keypair.secret(), None).verified(); - verified_tx.priority = ::pool::Priority::Local; - verified_tx - } + fn local_tx_verified(tx: Tx, keypair: &KeyPair) -> VerifiedTransaction { + let mut verified_tx = tx.unsigned().sign(keypair.secret(), None).verified(); + verified_tx.priority = ::pool::Priority::Local; + verified_tx + } - fn should_replace(replace: &ShouldReplace, old: VerifiedTransaction, new: VerifiedTransaction) -> Choice { - let old_tx = txpool::Transaction { insertion_id: 0, transaction: Arc::new(old) }; - let new_tx = txpool::Transaction { insertion_id: 0, transaction: Arc::new(new) }; - let old = ReplaceTransaction::new(&old_tx, Default::default()); - let new = ReplaceTransaction::new(&new_tx, Default::default()); - replace.should_replace(&old, &new) - } + fn should_replace( + replace: &ShouldReplace, + old: VerifiedTransaction, + new: VerifiedTransaction, + ) -> Choice { + let old_tx = txpool::Transaction { + insertion_id: 0, + transaction: Arc::new(old), + }; + let new_tx = txpool::Transaction { + insertion_id: 0, + transaction: Arc::new(new), + }; + let old = ReplaceTransaction::new(&old_tx, Default::default()); + let new = ReplaceTransaction::new(&new_tx, Default::default()); + replace.should_replace(&old, &new) + } - #[test] - fn should_always_accept_local_transactions_unless_same_sender_and_nonce() { - let scoring = NonceAndGasPrice(PrioritizationStrategy::GasPriceOnly); - let client = TestClient::new().with_nonce(1); - let replace = ReplaceByScoreAndReadiness::new(scoring, client); + #[test] + fn should_always_accept_local_transactions_unless_same_sender_and_nonce() { + let scoring = NonceAndGasPrice(PrioritizationStrategy::GasPriceOnly); + let client = TestClient::new().with_nonce(1); + let replace = ReplaceByScoreAndReadiness::new(scoring, client); - // same sender txs - let keypair = Random.generate().unwrap(); + // same sender txs + let keypair = Random.generate().unwrap(); - let same_sender_tx1 = local_tx_verified(Tx { - nonce: 1, - gas_price: 1, - ..Default::default() - }, &keypair); + let same_sender_tx1 = local_tx_verified( + Tx { + nonce: 1, + gas_price: 1, + ..Default::default() + }, + &keypair, + ); - let same_sender_tx2 = local_tx_verified(Tx { - nonce: 2, - gas_price: 100, - ..Default::default() - }, &keypair); + let same_sender_tx2 = local_tx_verified( + Tx { + nonce: 2, + gas_price: 100, + ..Default::default() + }, + &keypair, + ); - let same_sender_tx3 = local_tx_verified(Tx { - nonce: 2, - gas_price: 200, - ..Default::default() - }, &keypair); + let same_sender_tx3 = local_tx_verified( + Tx { + nonce: 2, + gas_price: 200, + ..Default::default() + }, + &keypair, + ); - // different sender txs - let sender1 = Random.generate().unwrap(); - let different_sender_tx1 = local_tx_verified(Tx { - nonce: 2, - gas_price: 1, - ..Default::default() - }, &sender1); + // different sender txs + let sender1 = Random.generate().unwrap(); + let different_sender_tx1 = local_tx_verified( + Tx { + nonce: 2, + gas_price: 1, + ..Default::default() + }, + &sender1, + ); - let sender2 = Random.generate().unwrap(); - let different_sender_tx2 = local_tx_verified(Tx { - nonce: 1, - gas_price: 10, - ..Default::default() - }, &sender2); + let sender2 = Random.generate().unwrap(); + let different_sender_tx2 = local_tx_verified( + Tx { + nonce: 1, + gas_price: 10, + ..Default::default() + }, + &sender2, + ); - assert_eq!(should_replace(&replace, same_sender_tx1.clone(), same_sender_tx2.clone()), InsertNew); - assert_eq!(should_replace(&replace, same_sender_tx2.clone(), same_sender_tx1.clone()), InsertNew); + assert_eq!( + should_replace(&replace, same_sender_tx1.clone(), same_sender_tx2.clone()), + InsertNew + ); + assert_eq!( + should_replace(&replace, same_sender_tx2.clone(), same_sender_tx1.clone()), + InsertNew + ); - assert_eq!(should_replace(&replace, different_sender_tx1.clone(), different_sender_tx2.clone()), InsertNew); - assert_eq!(should_replace(&replace, different_sender_tx2.clone(), different_sender_tx1.clone()), InsertNew); + assert_eq!( + should_replace( + &replace, + different_sender_tx1.clone(), + different_sender_tx2.clone() + ), + InsertNew + ); + assert_eq!( + should_replace( + &replace, + different_sender_tx2.clone(), + different_sender_tx1.clone() + ), + InsertNew + ); - // txs with same sender and nonce - assert_eq!(should_replace(&replace, same_sender_tx2.clone(), same_sender_tx3.clone()), ReplaceOld); - assert_eq!(should_replace(&replace, same_sender_tx3.clone(), same_sender_tx2.clone()), RejectNew); - } + // txs with same sender and nonce + assert_eq!( + should_replace(&replace, same_sender_tx2.clone(), same_sender_tx3.clone()), + ReplaceOld + ); + assert_eq!( + should_replace(&replace, same_sender_tx3.clone(), same_sender_tx2.clone()), + RejectNew + ); + } - #[test] - fn should_replace_same_sender_by_nonce() { - let scoring = NonceAndGasPrice(PrioritizationStrategy::GasPriceOnly); - let client = TestClient::new().with_nonce(1); - let replace = ReplaceByScoreAndReadiness::new(scoring, client); + #[test] + fn should_replace_same_sender_by_nonce() { + let scoring = NonceAndGasPrice(PrioritizationStrategy::GasPriceOnly); + let client = TestClient::new().with_nonce(1); + let replace = ReplaceByScoreAndReadiness::new(scoring, client); - let tx1 = Tx { - nonce: 1, - gas_price: 1, - ..Default::default() - }; - let tx2 = Tx { - nonce: 2, - gas_price: 100, - ..Default::default() - }; - let tx3 = Tx { - nonce: 2, - gas_price: 110, - ..Default::default() - }; - let tx4 = Tx { - nonce: 2, - gas_price: 130, - ..Default::default() - }; + let tx1 = Tx { + nonce: 1, + gas_price: 1, + ..Default::default() + }; + let tx2 = Tx { + nonce: 2, + gas_price: 100, + ..Default::default() + }; + let tx3 = Tx { + nonce: 2, + gas_price: 110, + ..Default::default() + }; + let tx4 = Tx { + nonce: 2, + gas_price: 130, + ..Default::default() + }; - let keypair = Random.generate().unwrap(); - let txs = vec![tx1, tx2, tx3, tx4].into_iter().map(|tx| { - tx.unsigned().sign(keypair.secret(), None).verified() - }).collect::>(); + let keypair = Random.generate().unwrap(); + let txs = vec![tx1, tx2, tx3, tx4] + .into_iter() + .map(|tx| tx.unsigned().sign(keypair.secret(), None).verified()) + .collect::>(); - assert_eq!(should_replace(&replace, txs[0].clone(), txs[1].clone()), RejectNew); - assert_eq!(should_replace(&replace, txs[1].clone(), txs[0].clone()), ReplaceOld); + assert_eq!( + should_replace(&replace, txs[0].clone(), txs[1].clone()), + RejectNew + ); + assert_eq!( + should_replace(&replace, txs[1].clone(), txs[0].clone()), + ReplaceOld + ); - assert_eq!(should_replace(&replace, txs[1].clone(), txs[2].clone()), RejectNew); - assert_eq!(should_replace(&replace, txs[2].clone(), txs[1].clone()), RejectNew); + assert_eq!( + should_replace(&replace, txs[1].clone(), txs[2].clone()), + RejectNew + ); + assert_eq!( + should_replace(&replace, txs[2].clone(), txs[1].clone()), + RejectNew + ); - assert_eq!(should_replace(&replace, txs[1].clone(), txs[3].clone()), ReplaceOld); - assert_eq!(should_replace(&replace, txs[3].clone(), txs[1].clone()), RejectNew); - } + assert_eq!( + should_replace(&replace, txs[1].clone(), txs[3].clone()), + ReplaceOld + ); + assert_eq!( + should_replace(&replace, txs[3].clone(), txs[1].clone()), + RejectNew + ); + } - #[test] - fn should_replace_different_sender_by_priority_and_gas_price() { - // given - let scoring = NonceAndGasPrice(PrioritizationStrategy::GasPriceOnly); - let client = TestClient::new().with_nonce(0); - let replace = ReplaceByScoreAndReadiness::new(scoring, client); + #[test] + fn should_replace_different_sender_by_priority_and_gas_price() { + // given + let scoring = NonceAndGasPrice(PrioritizationStrategy::GasPriceOnly); + let client = TestClient::new().with_nonce(0); + let replace = ReplaceByScoreAndReadiness::new(scoring, client); - let tx_regular_low_gas = { - let tx = Tx { - nonce: 1, - gas_price: 1, - ..Default::default() - }; - tx.signed().verified() - }; - let tx_regular_high_gas = { - let tx = Tx { - nonce: 2, - gas_price: 10, - ..Default::default() - }; - tx.signed().verified() - }; - let tx_local_low_gas = { - let tx = Tx { - nonce: 2, - gas_price: 1, - ..Default::default() - }; - let mut verified_tx = tx.signed().verified(); - verified_tx.priority = ::pool::Priority::Local; - verified_tx - }; - let tx_local_high_gas = { - let tx = Tx { - nonce: 1, - gas_price: 10, - ..Default::default() - }; - let mut verified_tx = tx.signed().verified(); - verified_tx.priority = ::pool::Priority::Local; - verified_tx - }; + let tx_regular_low_gas = { + let tx = Tx { + nonce: 1, + gas_price: 1, + ..Default::default() + }; + tx.signed().verified() + }; + let tx_regular_high_gas = { + let tx = Tx { + nonce: 2, + gas_price: 10, + ..Default::default() + }; + tx.signed().verified() + }; + let tx_local_low_gas = { + let tx = Tx { + nonce: 2, + gas_price: 1, + ..Default::default() + }; + let mut verified_tx = tx.signed().verified(); + verified_tx.priority = ::pool::Priority::Local; + verified_tx + }; + let tx_local_high_gas = { + let tx = Tx { + nonce: 1, + gas_price: 10, + ..Default::default() + }; + let mut verified_tx = tx.signed().verified(); + verified_tx.priority = ::pool::Priority::Local; + verified_tx + }; - assert_eq!(should_replace(&replace, tx_regular_low_gas.clone(), tx_regular_high_gas.clone()), ReplaceOld); - assert_eq!(should_replace(&replace, tx_regular_high_gas.clone(), tx_regular_low_gas.clone()), RejectNew); + assert_eq!( + should_replace( + &replace, + tx_regular_low_gas.clone(), + tx_regular_high_gas.clone() + ), + ReplaceOld + ); + assert_eq!( + should_replace( + &replace, + tx_regular_high_gas.clone(), + tx_regular_low_gas.clone() + ), + RejectNew + ); - assert_eq!(should_replace(&replace, tx_regular_high_gas.clone(), tx_local_low_gas.clone()), ReplaceOld); - assert_eq!(should_replace(&replace, tx_local_low_gas.clone(), tx_regular_high_gas.clone()), RejectNew); + assert_eq!( + should_replace( + &replace, + tx_regular_high_gas.clone(), + tx_local_low_gas.clone() + ), + ReplaceOld + ); + assert_eq!( + should_replace( + &replace, + tx_local_low_gas.clone(), + tx_regular_high_gas.clone() + ), + RejectNew + ); - assert_eq!(should_replace(&replace, tx_local_low_gas.clone(), tx_local_high_gas.clone()), InsertNew); - assert_eq!(should_replace(&replace, tx_local_high_gas.clone(), tx_regular_low_gas.clone()), RejectNew); - } + assert_eq!( + should_replace( + &replace, + tx_local_low_gas.clone(), + tx_local_high_gas.clone() + ), + InsertNew + ); + assert_eq!( + should_replace( + &replace, + tx_local_high_gas.clone(), + tx_regular_low_gas.clone() + ), + RejectNew + ); + } - #[test] - fn should_not_replace_ready_transaction_with_future_transaction() { - let scoring = NonceAndGasPrice(PrioritizationStrategy::GasPriceOnly); - let client = TestClient::new().with_nonce(1); - let replace = ReplaceByScoreAndReadiness::new(scoring, client); + #[test] + fn should_not_replace_ready_transaction_with_future_transaction() { + let scoring = NonceAndGasPrice(PrioritizationStrategy::GasPriceOnly); + let client = TestClient::new().with_nonce(1); + let replace = ReplaceByScoreAndReadiness::new(scoring, client); - let tx_ready_low_score = { - let tx = Tx { - nonce: 1, - gas_price: 1, - ..Default::default() - }; - tx.signed().verified() - }; - let tx_future_high_score = { - let tx = Tx { - nonce: 3, // future nonce - gas_price: 10, - ..Default::default() - }; - tx.signed().verified() - }; + let tx_ready_low_score = { + let tx = Tx { + nonce: 1, + gas_price: 1, + ..Default::default() + }; + tx.signed().verified() + }; + let tx_future_high_score = { + let tx = Tx { + nonce: 3, // future nonce + gas_price: 10, + ..Default::default() + }; + tx.signed().verified() + }; - assert_eq!(should_replace(&replace, tx_ready_low_score, tx_future_high_score), RejectNew); - } + assert_eq!( + should_replace(&replace, tx_ready_low_score, tx_future_high_score), + RejectNew + ); + } - #[test] - fn should_compute_readiness_with_pooled_transactions_from_the_same_sender_as_the_existing_transaction() { - let scoring = NonceAndGasPrice(PrioritizationStrategy::GasPriceOnly); - let client = TestClient::new().with_nonce(1); - let replace = ReplaceByScoreAndReadiness::new(scoring, client); + #[test] + fn should_compute_readiness_with_pooled_transactions_from_the_same_sender_as_the_existing_transaction( + ) { + let scoring = NonceAndGasPrice(PrioritizationStrategy::GasPriceOnly); + let client = TestClient::new().with_nonce(1); + let replace = ReplaceByScoreAndReadiness::new(scoring, client); - let old_sender = Random.generate().unwrap(); - let tx_old_ready_1 = { - let tx = Tx { - nonce: 1, - gas_price: 1, - ..Default::default() - }; - tx.unsigned().sign(&old_sender.secret(), None).verified() - }; - let tx_old_ready_2 = { - let tx = Tx { - nonce: 2, - gas_price: 1, - ..Default::default() - }; - tx.unsigned().sign(&old_sender.secret(), None).verified() - }; - let tx_old_ready_3 = { - let tx = Tx { - nonce: 3, - gas_price: 1, - ..Default::default() - }; - tx.unsigned().sign(&old_sender.secret(), None).verified() - }; + let old_sender = Random.generate().unwrap(); + let tx_old_ready_1 = { + let tx = Tx { + nonce: 1, + gas_price: 1, + ..Default::default() + }; + tx.unsigned().sign(&old_sender.secret(), None).verified() + }; + let tx_old_ready_2 = { + let tx = Tx { + nonce: 2, + gas_price: 1, + ..Default::default() + }; + tx.unsigned().sign(&old_sender.secret(), None).verified() + }; + let tx_old_ready_3 = { + let tx = Tx { + nonce: 3, + gas_price: 1, + ..Default::default() + }; + tx.unsigned().sign(&old_sender.secret(), None).verified() + }; - let new_tx = { - let tx = Tx { - nonce: 3, // future nonce - gas_price: 10, - ..Default::default() - }; - tx.signed().verified() - }; + let new_tx = { + let tx = Tx { + nonce: 3, // future nonce + gas_price: 10, + ..Default::default() + }; + tx.signed().verified() + }; - let old_tx = txpool::Transaction { insertion_id: 0, transaction: Arc::new(tx_old_ready_3) }; - let pooled_txs = [ - txpool::Transaction { insertion_id: 0, transaction: Arc::new(tx_old_ready_1) }, - txpool::Transaction { insertion_id: 0, transaction: Arc::new(tx_old_ready_2) }, - ]; + let old_tx = txpool::Transaction { + insertion_id: 0, + transaction: Arc::new(tx_old_ready_3), + }; + let pooled_txs = [ + txpool::Transaction { + insertion_id: 0, + transaction: Arc::new(tx_old_ready_1), + }, + txpool::Transaction { + insertion_id: 0, + transaction: Arc::new(tx_old_ready_2), + }, + ]; - let new_tx = txpool::Transaction { insertion_id: 0, transaction: Arc::new(new_tx) }; + let new_tx = txpool::Transaction { + insertion_id: 0, + transaction: Arc::new(new_tx), + }; - let old = ReplaceTransaction::new(&old_tx, Some(&pooled_txs)); - let new = ReplaceTransaction::new(&new_tx, Default::default()); + let old = ReplaceTransaction::new(&old_tx, Some(&pooled_txs)); + let new = ReplaceTransaction::new(&new_tx, Default::default()); - assert_eq!(replace.should_replace(&old, &new), RejectNew); - } + assert_eq!(replace.should_replace(&old, &new), RejectNew); + } - #[test] - fn should_compute_readiness_with_pooled_transactions_from_the_same_sender_as_the_new_transaction() { - let scoring = NonceAndGasPrice(PrioritizationStrategy::GasPriceOnly); - let client = TestClient::new().with_nonce(1); - let replace = ReplaceByScoreAndReadiness::new(scoring, client); + #[test] + fn should_compute_readiness_with_pooled_transactions_from_the_same_sender_as_the_new_transaction( + ) { + let scoring = NonceAndGasPrice(PrioritizationStrategy::GasPriceOnly); + let client = TestClient::new().with_nonce(1); + let replace = ReplaceByScoreAndReadiness::new(scoring, client); - // current transaction is ready but has a lower gas price than the new one - let old_tx = { - let tx = Tx { - nonce: 1, - gas_price: 1, - ..Default::default() - }; - tx.signed().verified() - }; + // current transaction is ready but has a lower gas price than the new one + let old_tx = { + let tx = Tx { + nonce: 1, + gas_price: 1, + ..Default::default() + }; + tx.signed().verified() + }; - let new_sender = Random.generate().unwrap(); - let tx_new_ready_1 = { - let tx = Tx { - nonce: 1, - gas_price: 1, - ..Default::default() - }; - tx.unsigned().sign(&new_sender.secret(), None).verified() - }; - let tx_new_ready_2 = { - let tx = Tx { - nonce: 2, - gas_price: 1, - ..Default::default() - }; - tx.unsigned().sign(&new_sender.secret(), None).verified() - }; - let tx_new_ready_3 = { - let tx = Tx { - nonce: 3, - gas_price: 10, // hi - ..Default::default() - }; - tx.unsigned().sign(&new_sender.secret(), None).verified() - }; + let new_sender = Random.generate().unwrap(); + let tx_new_ready_1 = { + let tx = Tx { + nonce: 1, + gas_price: 1, + ..Default::default() + }; + tx.unsigned().sign(&new_sender.secret(), None).verified() + }; + let tx_new_ready_2 = { + let tx = Tx { + nonce: 2, + gas_price: 1, + ..Default::default() + }; + tx.unsigned().sign(&new_sender.secret(), None).verified() + }; + let tx_new_ready_3 = { + let tx = Tx { + nonce: 3, + gas_price: 10, // hi + ..Default::default() + }; + tx.unsigned().sign(&new_sender.secret(), None).verified() + }; - let old_tx = txpool::Transaction { insertion_id: 0, transaction: Arc::new(old_tx) }; + let old_tx = txpool::Transaction { + insertion_id: 0, + transaction: Arc::new(old_tx), + }; - let new_tx = txpool::Transaction { insertion_id: 0, transaction: Arc::new(tx_new_ready_3) }; - let pooled_txs = [ - txpool::Transaction { insertion_id: 0, transaction: Arc::new(tx_new_ready_1) }, - txpool::Transaction { insertion_id: 0, transaction: Arc::new(tx_new_ready_2) }, - ]; + let new_tx = txpool::Transaction { + insertion_id: 0, + transaction: Arc::new(tx_new_ready_3), + }; + let pooled_txs = [ + txpool::Transaction { + insertion_id: 0, + transaction: Arc::new(tx_new_ready_1), + }, + txpool::Transaction { + insertion_id: 0, + transaction: Arc::new(tx_new_ready_2), + }, + ]; - let old = ReplaceTransaction::new(&old_tx, None); - let new = ReplaceTransaction::new(&new_tx, Some(&pooled_txs)); + let old = ReplaceTransaction::new(&old_tx, None); + let new = ReplaceTransaction::new(&new_tx, Some(&pooled_txs)); - assert_eq!(replace.should_replace(&old, &new), ReplaceOld); - } + assert_eq!(replace.should_replace(&old, &new), ReplaceOld); + } - #[test] - fn should_accept_local_tx_with_same_sender_and_nonce_with_better_gas_price() { - let scoring = NonceAndGasPrice(PrioritizationStrategy::GasPriceOnly); - let client = TestClient::new().with_nonce(1); - let replace = ReplaceByScoreAndReadiness::new(scoring, client); + #[test] + fn should_accept_local_tx_with_same_sender_and_nonce_with_better_gas_price() { + let scoring = NonceAndGasPrice(PrioritizationStrategy::GasPriceOnly); + let client = TestClient::new().with_nonce(1); + let replace = ReplaceByScoreAndReadiness::new(scoring, client); - // current transaction is ready - let old_tx = { - let tx = Tx { - nonce: 1, - gas_price: 1, - ..Default::default() - }; - tx.signed().verified() - }; + // current transaction is ready + let old_tx = { + let tx = Tx { + nonce: 1, + gas_price: 1, + ..Default::default() + }; + tx.signed().verified() + }; - let new_sender = Random.generate().unwrap(); - let tx_new_ready_1 = local_tx_verified(Tx { - nonce: 1, - gas_price: 1, - ..Default::default() - }, &new_sender); + let new_sender = Random.generate().unwrap(); + let tx_new_ready_1 = local_tx_verified( + Tx { + nonce: 1, + gas_price: 1, + ..Default::default() + }, + &new_sender, + ); - let tx_new_ready_2 = local_tx_verified(Tx { - nonce: 1, - gas_price: 2, // same nonce, higher gas price - ..Default::default() - }, &new_sender); + let tx_new_ready_2 = local_tx_verified( + Tx { + nonce: 1, + gas_price: 2, // same nonce, higher gas price + ..Default::default() + }, + &new_sender, + ); - let old_tx = txpool::Transaction { insertion_id: 0, transaction: Arc::new(old_tx) }; + let old_tx = txpool::Transaction { + insertion_id: 0, + transaction: Arc::new(old_tx), + }; - let new_tx = txpool::Transaction { insertion_id: 0, transaction: Arc::new(tx_new_ready_2) }; - let pooled_txs = [ - txpool::Transaction { insertion_id: 0, transaction: Arc::new(tx_new_ready_1) }, - ]; + let new_tx = txpool::Transaction { + insertion_id: 0, + transaction: Arc::new(tx_new_ready_2), + }; + let pooled_txs = [txpool::Transaction { + insertion_id: 0, + transaction: Arc::new(tx_new_ready_1), + }]; - let old = ReplaceTransaction::new(&old_tx, None); - let new = ReplaceTransaction::new(&new_tx, Some(&pooled_txs)); + let old = ReplaceTransaction::new(&old_tx, None); + let new = ReplaceTransaction::new(&new_tx, Some(&pooled_txs)); - assert_eq!(replace.should_replace(&old, &new), InsertNew); - } + assert_eq!(replace.should_replace(&old, &new), InsertNew); + } - #[test] - fn should_reject_local_tx_with_same_sender_and_nonce_with_worse_gas_price() { - let scoring = NonceAndGasPrice(PrioritizationStrategy::GasPriceOnly); - let client = TestClient::new().with_nonce(1); - let replace = ReplaceByScoreAndReadiness::new(scoring, client); + #[test] + fn should_reject_local_tx_with_same_sender_and_nonce_with_worse_gas_price() { + let scoring = NonceAndGasPrice(PrioritizationStrategy::GasPriceOnly); + let client = TestClient::new().with_nonce(1); + let replace = ReplaceByScoreAndReadiness::new(scoring, client); - // current transaction is ready - let old_tx = { - let tx = Tx { - nonce: 1, - gas_price: 1, - ..Default::default() - }; - tx.signed().verified() - }; + // current transaction is ready + let old_tx = { + let tx = Tx { + nonce: 1, + gas_price: 1, + ..Default::default() + }; + tx.signed().verified() + }; - let new_sender = Random.generate().unwrap(); - let tx_new_ready_1 = local_tx_verified(Tx { - nonce: 1, - gas_price: 2, - ..Default::default() - }, &new_sender); + let new_sender = Random.generate().unwrap(); + let tx_new_ready_1 = local_tx_verified( + Tx { + nonce: 1, + gas_price: 2, + ..Default::default() + }, + &new_sender, + ); - let tx_new_ready_2 = local_tx_verified(Tx { - nonce: 1, - gas_price: 1, // same nonce, lower gas price - ..Default::default() - }, &new_sender); + let tx_new_ready_2 = local_tx_verified( + Tx { + nonce: 1, + gas_price: 1, // same nonce, lower gas price + ..Default::default() + }, + &new_sender, + ); - let old_tx = txpool::Transaction { insertion_id: 0, transaction: Arc::new(old_tx) }; + let old_tx = txpool::Transaction { + insertion_id: 0, + transaction: Arc::new(old_tx), + }; - let new_tx = txpool::Transaction { insertion_id: 0, transaction: Arc::new(tx_new_ready_2) }; - let pooled_txs = [ - txpool::Transaction { insertion_id: 0, transaction: Arc::new(tx_new_ready_1) }, - ]; + let new_tx = txpool::Transaction { + insertion_id: 0, + transaction: Arc::new(tx_new_ready_2), + }; + let pooled_txs = [txpool::Transaction { + insertion_id: 0, + transaction: Arc::new(tx_new_ready_1), + }]; - let old = ReplaceTransaction::new(&old_tx, None); - let new = ReplaceTransaction::new(&new_tx, Some(&pooled_txs)); + let old = ReplaceTransaction::new(&old_tx, None); + let new = ReplaceTransaction::new(&new_tx, Some(&pooled_txs)); - assert_eq!(replace.should_replace(&old, &new), RejectNew); - } + assert_eq!(replace.should_replace(&old, &new), RejectNew); + } } diff --git a/miner/src/pool/scoring.rs b/miner/src/pool/scoring.rs index 0360bec35..0b9e912ac 100644 --- a/miner/src/pool/scoring.rs +++ b/miner/src/pool/scoring.rs @@ -29,9 +29,9 @@ use std::cmp; +use super::{verifier, PrioritizationStrategy, ScoredTransaction, VerifiedTransaction}; use ethereum_types::U256; use txpool::{self, scoring}; -use super::{verifier, PrioritizationStrategy, VerifiedTransaction, ScoredTransaction}; /// Transaction with the same (sender, nonce) can be replaced only if /// `new_gas_price > old_gas_price + old_gas_price >> SHIFT` @@ -40,7 +40,7 @@ const GAS_PRICE_BUMP_SHIFT: usize = 3; // 2 = 25%, 3 = 12.5%, 4 = 6.25% /// Calculate minimal gas price requirement. #[inline] fn bump_gas_price(old_gp: U256) -> U256 { - old_gp.saturating_add(old_gp >> GAS_PRICE_BUMP_SHIFT) + old_gp.saturating_add(old_gp >> GAS_PRICE_BUMP_SHIFT) } /// Simple, gas-price based scoring for transactions. @@ -51,140 +51,156 @@ fn bump_gas_price(old_gp: U256) -> U256 { pub struct NonceAndGasPrice(pub PrioritizationStrategy); impl NonceAndGasPrice { - /// Decide if the transaction should even be considered into the pool (if the pool is full). - /// - /// Used by Verifier to quickly reject transactions that don't have any chance to get into the pool later on, - /// and save time on more expensive checks like sender recovery, etc. - /// - /// NOTE The method is never called for zero-gas-price transactions or local transactions - /// (such transactions are always considered to the pool and potentially rejected later on) - pub fn should_reject_early(&self, old: &VerifiedTransaction, new: &verifier::Transaction) -> bool { - if old.priority().is_local() { - return true - } + /// Decide if the transaction should even be considered into the pool (if the pool is full). + /// + /// Used by Verifier to quickly reject transactions that don't have any chance to get into the pool later on, + /// and save time on more expensive checks like sender recovery, etc. + /// + /// NOTE The method is never called for zero-gas-price transactions or local transactions + /// (such transactions are always considered to the pool and potentially rejected later on) + pub fn should_reject_early( + &self, + old: &VerifiedTransaction, + new: &verifier::Transaction, + ) -> bool { + if old.priority().is_local() { + return true; + } - &old.transaction.gas_price > new.gas_price() - } + &old.transaction.gas_price > new.gas_price() + } } -impl

txpool::Scoring

for NonceAndGasPrice where P: ScoredTransaction + txpool::VerifiedTransaction { - type Score = U256; - type Event = (); +impl

txpool::Scoring

for NonceAndGasPrice +where + P: ScoredTransaction + txpool::VerifiedTransaction, +{ + type Score = U256; + type Event = (); - fn compare(&self, old: &P, other: &P) -> cmp::Ordering { - old.nonce().cmp(&other.nonce()) - } + fn compare(&self, old: &P, other: &P) -> cmp::Ordering { + old.nonce().cmp(&other.nonce()) + } - fn choose(&self, old: &P, new: &P) -> scoring::Choice { - if old.nonce() != new.nonce() { - return scoring::Choice::InsertNew - } + fn choose(&self, old: &P, new: &P) -> scoring::Choice { + if old.nonce() != new.nonce() { + return scoring::Choice::InsertNew; + } - let old_gp = old.gas_price(); - let new_gp = new.gas_price(); + let old_gp = old.gas_price(); + let new_gp = new.gas_price(); - let min_required_gp = bump_gas_price(*old_gp); + let min_required_gp = bump_gas_price(*old_gp); - match min_required_gp.cmp(&new_gp) { - cmp::Ordering::Greater => scoring::Choice::RejectNew, - _ => scoring::Choice::ReplaceOld, - } - } + match min_required_gp.cmp(&new_gp) { + cmp::Ordering::Greater => scoring::Choice::RejectNew, + _ => scoring::Choice::ReplaceOld, + } + } - fn update_scores(&self, txs: &[txpool::Transaction

], scores: &mut [U256], change: scoring::Change) { - use self::scoring::Change; + fn update_scores( + &self, + txs: &[txpool::Transaction

], + scores: &mut [U256], + change: scoring::Change, + ) { + use self::scoring::Change; - match change { - Change::Culled(_) => {}, - Change::RemovedAt(_) => {} - Change::InsertedAt(i) | Change::ReplacedAt(i) => { - assert!(i < txs.len()); - assert!(i < scores.len()); + match change { + Change::Culled(_) => {} + Change::RemovedAt(_) => {} + Change::InsertedAt(i) | Change::ReplacedAt(i) => { + assert!(i < txs.len()); + assert!(i < scores.len()); - scores[i] = *txs[i].transaction.gas_price(); - let boost = match txs[i].priority() { - super::Priority::Local => 15, - super::Priority::Retracted => 10, - super::Priority::Regular => 0, - }; - scores[i] = scores[i] << boost; - }, - // We are only sending an event in case of penalization. - // So just lower the priority of all non-local transactions. - Change::Event(_) => { - for (score, tx) in scores.iter_mut().zip(txs) { - // Never penalize local transactions. - if !tx.priority().is_local() { - *score = *score >> 3; - } - } - }, - } - } + scores[i] = *txs[i].transaction.gas_price(); + let boost = match txs[i].priority() { + super::Priority::Local => 15, + super::Priority::Retracted => 10, + super::Priority::Regular => 0, + }; + scores[i] = scores[i] << boost; + } + // We are only sending an event in case of penalization. + // So just lower the priority of all non-local transactions. + Change::Event(_) => { + for (score, tx) in scores.iter_mut().zip(txs) { + // Never penalize local transactions. + if !tx.priority().is_local() { + *score = *score >> 3; + } + } + } + } + } - fn should_ignore_sender_limit(&self, new: &P) -> bool { - new.priority().is_local() - } + fn should_ignore_sender_limit(&self, new: &P) -> bool { + new.priority().is_local() + } } #[cfg(test)] mod tests { - use super::*; + use super::*; - use std::sync::Arc; - use pool::tests::tx::{Tx, TxExt}; - use txpool::Scoring; + use pool::tests::tx::{Tx, TxExt}; + use std::sync::Arc; + use txpool::Scoring; - #[test] - fn should_calculate_score_correctly() { - // given - let scoring = NonceAndGasPrice(PrioritizationStrategy::GasPriceOnly); - let (tx1, tx2, tx3) = Tx::default().signed_triple(); - let transactions = vec![tx1, tx2, tx3].into_iter().enumerate().map(|(i, tx)| { - let mut verified = tx.verified(); - verified.priority = match i { - 0 => ::pool::Priority::Local, - 1 => ::pool::Priority::Retracted, - _ => ::pool::Priority::Regular, - }; - txpool::Transaction { - insertion_id: 0, - transaction: Arc::new(verified), - } - }).collect::>(); - let initial_scores = vec![U256::from(0), 0.into(), 0.into()]; + #[test] + fn should_calculate_score_correctly() { + // given + let scoring = NonceAndGasPrice(PrioritizationStrategy::GasPriceOnly); + let (tx1, tx2, tx3) = Tx::default().signed_triple(); + let transactions = vec![tx1, tx2, tx3] + .into_iter() + .enumerate() + .map(|(i, tx)| { + let mut verified = tx.verified(); + verified.priority = match i { + 0 => ::pool::Priority::Local, + 1 => ::pool::Priority::Retracted, + _ => ::pool::Priority::Regular, + }; + txpool::Transaction { + insertion_id: 0, + transaction: Arc::new(verified), + } + }) + .collect::>(); + let initial_scores = vec![U256::from(0), 0.into(), 0.into()]; - // No update required - let mut scores = initial_scores.clone(); - scoring.update_scores(&transactions, &mut *scores, scoring::Change::Culled(0)); - scoring.update_scores(&transactions, &mut *scores, scoring::Change::Culled(1)); - scoring.update_scores(&transactions, &mut *scores, scoring::Change::Culled(2)); - assert_eq!(scores, initial_scores); - let mut scores = initial_scores.clone(); - scoring.update_scores(&transactions, &mut *scores, scoring::Change::RemovedAt(0)); - scoring.update_scores(&transactions, &mut *scores, scoring::Change::RemovedAt(1)); - scoring.update_scores(&transactions, &mut *scores, scoring::Change::RemovedAt(2)); - assert_eq!(scores, initial_scores); + // No update required + let mut scores = initial_scores.clone(); + scoring.update_scores(&transactions, &mut *scores, scoring::Change::Culled(0)); + scoring.update_scores(&transactions, &mut *scores, scoring::Change::Culled(1)); + scoring.update_scores(&transactions, &mut *scores, scoring::Change::Culled(2)); + assert_eq!(scores, initial_scores); + let mut scores = initial_scores.clone(); + scoring.update_scores(&transactions, &mut *scores, scoring::Change::RemovedAt(0)); + scoring.update_scores(&transactions, &mut *scores, scoring::Change::RemovedAt(1)); + scoring.update_scores(&transactions, &mut *scores, scoring::Change::RemovedAt(2)); + assert_eq!(scores, initial_scores); - // Compute score at given index - let mut scores = initial_scores.clone(); - scoring.update_scores(&transactions, &mut *scores, scoring::Change::InsertedAt(0)); - assert_eq!(scores, vec![32768.into(), 0.into(), 0.into()]); - scoring.update_scores(&transactions, &mut *scores, scoring::Change::InsertedAt(1)); - assert_eq!(scores, vec![32768.into(), 1024.into(), 0.into()]); - scoring.update_scores(&transactions, &mut *scores, scoring::Change::InsertedAt(2)); - assert_eq!(scores, vec![32768.into(), 1024.into(), 1.into()]); + // Compute score at given index + let mut scores = initial_scores.clone(); + scoring.update_scores(&transactions, &mut *scores, scoring::Change::InsertedAt(0)); + assert_eq!(scores, vec![32768.into(), 0.into(), 0.into()]); + scoring.update_scores(&transactions, &mut *scores, scoring::Change::InsertedAt(1)); + assert_eq!(scores, vec![32768.into(), 1024.into(), 0.into()]); + scoring.update_scores(&transactions, &mut *scores, scoring::Change::InsertedAt(2)); + assert_eq!(scores, vec![32768.into(), 1024.into(), 1.into()]); - let mut scores = initial_scores.clone(); - scoring.update_scores(&transactions, &mut *scores, scoring::Change::ReplacedAt(0)); - assert_eq!(scores, vec![32768.into(), 0.into(), 0.into()]); - scoring.update_scores(&transactions, &mut *scores, scoring::Change::ReplacedAt(1)); - assert_eq!(scores, vec![32768.into(), 1024.into(), 0.into()]); - scoring.update_scores(&transactions, &mut *scores, scoring::Change::ReplacedAt(2)); - assert_eq!(scores, vec![32768.into(), 1024.into(), 1.into()]); + let mut scores = initial_scores.clone(); + scoring.update_scores(&transactions, &mut *scores, scoring::Change::ReplacedAt(0)); + assert_eq!(scores, vec![32768.into(), 0.into(), 0.into()]); + scoring.update_scores(&transactions, &mut *scores, scoring::Change::ReplacedAt(1)); + assert_eq!(scores, vec![32768.into(), 1024.into(), 0.into()]); + scoring.update_scores(&transactions, &mut *scores, scoring::Change::ReplacedAt(2)); + assert_eq!(scores, vec![32768.into(), 1024.into(), 1.into()]); - // Check penalization - scoring.update_scores(&transactions, &mut *scores, scoring::Change::Event(())); - assert_eq!(scores, vec![32768.into(), 128.into(), 0.into()]); - } + // Check penalization + scoring.update_scores(&transactions, &mut *scores, scoring::Change::Event(())); + assert_eq!(scores, vec![32768.into(), 128.into(), 0.into()]); + } } diff --git a/miner/src/pool/tests/client.rs b/miner/src/pool/tests/client.rs index ce927c8e9..69c2b1542 100644 --- a/miner/src/pool/tests/client.rs +++ b/miner/src/pool/tests/client.rs @@ -16,139 +16,147 @@ use std::sync::{atomic, Arc}; -use ethereum_types::{U256, H256, Address}; +use ethereum_types::{Address, H256, U256}; use rlp::Rlp; -use types::transaction::{self, Transaction, SignedTransaction, UnverifiedTransaction}; +use types::transaction::{self, SignedTransaction, Transaction, UnverifiedTransaction}; -use pool; -use pool::client::AccountDetails; +use pool::{self, client::AccountDetails}; const MAX_TRANSACTION_SIZE: usize = 15 * 1024; #[derive(Debug, Clone)] pub struct TestClient { - verification_invoked: Arc, - account_details: AccountDetails, - gas_required: U256, - is_service_transaction: bool, - local_address: Address, - max_transaction_size: usize, + verification_invoked: Arc, + account_details: AccountDetails, + gas_required: U256, + is_service_transaction: bool, + local_address: Address, + max_transaction_size: usize, } impl Default for TestClient { - fn default() -> Self { - TestClient { - verification_invoked: Default::default(), - account_details: AccountDetails { - nonce: 123.into(), - balance: 63_100.into(), - is_local: false, - }, - gas_required: 21_000.into(), - is_service_transaction: false, - local_address: Default::default(), - max_transaction_size: MAX_TRANSACTION_SIZE, - } - } + fn default() -> Self { + TestClient { + verification_invoked: Default::default(), + account_details: AccountDetails { + nonce: 123.into(), + balance: 63_100.into(), + is_local: false, + }, + gas_required: 21_000.into(), + is_service_transaction: false, + local_address: Default::default(), + max_transaction_size: MAX_TRANSACTION_SIZE, + } + } } impl TestClient { - pub fn new() -> Self { - TestClient::default() - } + pub fn new() -> Self { + TestClient::default() + } - pub fn with_balance>(mut self, balance: T) -> Self { - self.account_details.balance = balance.into(); - self - } + pub fn with_balance>(mut self, balance: T) -> Self { + self.account_details.balance = balance.into(); + self + } - pub fn with_nonce>(mut self, nonce: T) -> Self { - self.account_details.nonce = nonce.into(); - self - } + pub fn with_nonce>(mut self, nonce: T) -> Self { + self.account_details.nonce = nonce.into(); + self + } - pub fn with_gas_required>(mut self, gas_required: T) -> Self { - self.gas_required = gas_required.into(); - self - } + pub fn with_gas_required>(mut self, gas_required: T) -> Self { + self.gas_required = gas_required.into(); + self + } - pub fn with_local(mut self, address: &Address) -> Self { - self.local_address = *address; - self - } + pub fn with_local(mut self, address: &Address) -> Self { + self.local_address = *address; + self + } - pub fn with_service_transaction(mut self) -> Self { - self.is_service_transaction = true; - self - } + pub fn with_service_transaction(mut self) -> Self { + self.is_service_transaction = true; + self + } - pub fn verify>(&self, tx: T) -> pool::VerifiedTransaction { - let tx = tx.into(); - pool::VerifiedTransaction { - hash: tx.hash(), - sender: tx.sender(), - priority: pool::Priority::Regular, - transaction: tx, - insertion_id: 1, - } - } + pub fn verify>( + &self, + tx: T, + ) -> pool::VerifiedTransaction { + let tx = tx.into(); + pool::VerifiedTransaction { + hash: tx.hash(), + sender: tx.sender(), + priority: pool::Priority::Regular, + transaction: tx, + insertion_id: 1, + } + } - pub fn was_verification_triggered(&self) -> bool { - self.verification_invoked.load(atomic::Ordering::SeqCst) - } + pub fn was_verification_triggered(&self) -> bool { + self.verification_invoked.load(atomic::Ordering::SeqCst) + } } impl pool::client::Client for TestClient { - fn transaction_already_included(&self, _hash: &H256) -> bool { - false - } + fn transaction_already_included(&self, _hash: &H256) -> bool { + false + } - fn verify_transaction_basic(&self, _tx: &UnverifiedTransaction) - -> Result<(), transaction::Error> - { - Ok(()) - } + fn verify_transaction_basic( + &self, + _tx: &UnverifiedTransaction, + ) -> Result<(), transaction::Error> { + Ok(()) + } - fn verify_transaction(&self, tx: UnverifiedTransaction) - -> Result - { - self.verification_invoked.store(true, atomic::Ordering::SeqCst); - Ok(SignedTransaction::new(tx)?) - } + fn verify_transaction( + &self, + tx: UnverifiedTransaction, + ) -> Result { + self.verification_invoked + .store(true, atomic::Ordering::SeqCst); + Ok(SignedTransaction::new(tx)?) + } - fn account_details(&self, address: &Address) -> AccountDetails { - let mut details = self.account_details.clone(); - if address == &self.local_address { - details.is_local = true; - } + fn account_details(&self, address: &Address) -> AccountDetails { + let mut details = self.account_details.clone(); + if address == &self.local_address { + details.is_local = true; + } - details - } + details + } - fn required_gas(&self, _tx: &Transaction) -> U256 { - self.gas_required - } + fn required_gas(&self, _tx: &Transaction) -> U256 { + self.gas_required + } - fn transaction_type(&self, _tx: &SignedTransaction) -> pool::client::TransactionType { - if self.is_service_transaction { - pool::client::TransactionType::Service - } else { - pool::client::TransactionType::Regular - } - } - - fn decode_transaction(&self, transaction: &[u8]) -> Result { - let rlp = Rlp::new(&transaction); - if rlp.as_raw().len() > self.max_transaction_size { - return Err(transaction::Error::TooBig) - } - rlp.as_val().map_err(|e| transaction::Error::InvalidRlp(e.to_string())) - } + fn transaction_type(&self, _tx: &SignedTransaction) -> pool::client::TransactionType { + if self.is_service_transaction { + pool::client::TransactionType::Service + } else { + pool::client::TransactionType::Regular + } + } + fn decode_transaction( + &self, + transaction: &[u8], + ) -> Result { + let rlp = Rlp::new(&transaction); + if rlp.as_raw().len() > self.max_transaction_size { + return Err(transaction::Error::TooBig); + } + rlp.as_val() + .map_err(|e| transaction::Error::InvalidRlp(e.to_string())) + } } impl pool::client::NonceClient for TestClient { - fn account_nonce(&self, _address: &Address) -> U256 { - self.account_details.nonce - } + fn account_nonce(&self, _address: &Address) -> U256 { + self.account_details.nonce + } } diff --git a/miner/src/pool/tests/mod.rs b/miner/src/pool/tests/mod.rs index 1df1be4ce..ea7739a10 100644 --- a/miner/src/pool/tests/mod.rs +++ b/miner/src/pool/tests/mod.rs @@ -15,16 +15,18 @@ // along with Parity Ethereum. If not, see . use ethereum_types::U256; -use types::transaction::{self, PendingTransaction}; use txpool; +use types::transaction::{self, PendingTransaction}; -use pool::{verifier, TransactionQueue, PrioritizationStrategy, PendingSettings, PendingOrdering}; +use pool::{verifier, PendingOrdering, PendingSettings, PrioritizationStrategy, TransactionQueue}; -pub mod tx; pub mod client; +pub mod tx; -use self::tx::{Tx, TxExt, PairExt}; -use self::client::TestClient; +use self::{ + client::TestClient, + tx::{PairExt, Tx, TxExt}, +}; // max mem for 3 transaction, this is relative // to the global use allocator, the value is currently @@ -33,1044 +35,1181 @@ use self::client::TestClient; const TEST_QUEUE_MAX_MEM: usize = 100; fn new_queue() -> TransactionQueue { - TransactionQueue::new( - txpool::Options { - max_count: 3, - max_per_sender: 3, - max_mem_usage: TEST_QUEUE_MAX_MEM - }, - verifier::Options { - minimal_gas_price: 1.into(), - block_gas_limit: 1_000_000.into(), - tx_gas_limit: 1_000_000.into(), - no_early_reject: false, - }, - PrioritizationStrategy::GasPriceOnly, - ) + TransactionQueue::new( + txpool::Options { + max_count: 3, + max_per_sender: 3, + max_mem_usage: TEST_QUEUE_MAX_MEM, + }, + verifier::Options { + minimal_gas_price: 1.into(), + block_gas_limit: 1_000_000.into(), + tx_gas_limit: 1_000_000.into(), + no_early_reject: false, + }, + PrioritizationStrategy::GasPriceOnly, + ) } #[test] fn should_return_correct_nonces_when_dropped_because_of_limit() { - // given - let txq = TransactionQueue::new( - txpool::Options { - max_count: 3, - max_per_sender: 1, - max_mem_usage: TEST_QUEUE_MAX_MEM - }, - verifier::Options { - minimal_gas_price: 1.into(), - block_gas_limit: 1_000_000.into(), - tx_gas_limit: 1_000_000.into(), - no_early_reject: false, - }, - PrioritizationStrategy::GasPriceOnly, - ); - let (tx1, tx2) = Tx::gas_price(2).signed_pair(); - let sender = tx1.sender(); - let nonce = tx1.nonce; + // given + let txq = TransactionQueue::new( + txpool::Options { + max_count: 3, + max_per_sender: 1, + max_mem_usage: TEST_QUEUE_MAX_MEM, + }, + verifier::Options { + minimal_gas_price: 1.into(), + block_gas_limit: 1_000_000.into(), + tx_gas_limit: 1_000_000.into(), + no_early_reject: false, + }, + PrioritizationStrategy::GasPriceOnly, + ); + let (tx1, tx2) = Tx::gas_price(2).signed_pair(); + let sender = tx1.sender(); + let nonce = tx1.nonce; - // when - let r1 = txq.import(TestClient::new(), vec![tx1].retracted()); - let r2 = txq.import(TestClient::new(), vec![tx2].retracted()); - assert_eq!(r1, vec![Ok(())]); - assert_eq!(r2, vec![Err(transaction::Error::LimitReached)]); - assert_eq!(txq.status().status.transaction_count, 1); + // when + let r1 = txq.import(TestClient::new(), vec![tx1].retracted()); + let r2 = txq.import(TestClient::new(), vec![tx2].retracted()); + assert_eq!(r1, vec![Ok(())]); + assert_eq!(r2, vec![Err(transaction::Error::LimitReached)]); + assert_eq!(txq.status().status.transaction_count, 1); - // then - assert_eq!(txq.next_nonce(TestClient::new(), &sender), Some(nonce + 1)); + // then + assert_eq!(txq.next_nonce(TestClient::new(), &sender), Some(nonce + 1)); - // when - let tx1 = Tx::gas_price(2).signed(); - let tx2 = Tx::gas_price(2).signed(); - let sender = tx2.sender(); - let tx3 = Tx::gas_price(1).signed(); - let tx4 = Tx::gas_price(3).signed(); - let res = txq.import(TestClient::new(), vec![tx1, tx2].retracted()); - let res2 = txq.import(TestClient::new(), vec![tx3, tx4].retracted()); + // when + let tx1 = Tx::gas_price(2).signed(); + let tx2 = Tx::gas_price(2).signed(); + let sender = tx2.sender(); + let tx3 = Tx::gas_price(1).signed(); + let tx4 = Tx::gas_price(3).signed(); + let res = txq.import(TestClient::new(), vec![tx1, tx2].retracted()); + let res2 = txq.import(TestClient::new(), vec![tx3, tx4].retracted()); - // then - assert_eq!(res, vec![Ok(()), Ok(())]); - assert_eq!(res2, vec![ - // The error here indicates reaching the limit - // and minimal effective gas price taken into account. - Err(transaction::Error::TooCheapToReplace { prev: Some(2.into()), new: Some(1.into()) }), - Ok(()) - ]); - assert_eq!(txq.status().status.transaction_count, 3); - // tx2 transaction got dropped because of limit - // tx1 and tx1' are kept, because they have lower insertion_ids so they are preferred. - assert_eq!(txq.next_nonce(TestClient::new(), &sender), None); + // then + assert_eq!(res, vec![Ok(()), Ok(())]); + assert_eq!( + res2, + vec![ + // The error here indicates reaching the limit + // and minimal effective gas price taken into account. + Err(transaction::Error::TooCheapToReplace { + prev: Some(2.into()), + new: Some(1.into()) + }), + Ok(()) + ] + ); + assert_eq!(txq.status().status.transaction_count, 3); + // tx2 transaction got dropped because of limit + // tx1 and tx1' are kept, because they have lower insertion_ids so they are preferred. + assert_eq!(txq.next_nonce(TestClient::new(), &sender), None); } #[test] fn should_never_drop_local_transactions_from_different_senders() { - // given - let txq = TransactionQueue::new( - txpool::Options { - max_count: 3, - max_per_sender: 1, - max_mem_usage: TEST_QUEUE_MAX_MEM - }, - verifier::Options { - minimal_gas_price: 1.into(), - block_gas_limit: 1_000_000.into(), - tx_gas_limit: 1_000_000.into(), - no_early_reject: false, - }, - PrioritizationStrategy::GasPriceOnly, - ); - let (tx1, tx2) = Tx::gas_price(2).signed_pair(); - let sender = tx1.sender(); - let nonce = tx1.nonce; + // given + let txq = TransactionQueue::new( + txpool::Options { + max_count: 3, + max_per_sender: 1, + max_mem_usage: TEST_QUEUE_MAX_MEM, + }, + verifier::Options { + minimal_gas_price: 1.into(), + block_gas_limit: 1_000_000.into(), + tx_gas_limit: 1_000_000.into(), + no_early_reject: false, + }, + PrioritizationStrategy::GasPriceOnly, + ); + let (tx1, tx2) = Tx::gas_price(2).signed_pair(); + let sender = tx1.sender(); + let nonce = tx1.nonce; - // when - let r1 = txq.import(TestClient::new(), vec![tx1].local()); - let r2 = txq.import(TestClient::new(), vec![tx2].local()); - assert_eq!(r1, vec![Ok(())]); - assert_eq!(r2, vec![Ok(())]); - assert_eq!(txq.status().status.transaction_count, 2); + // when + let r1 = txq.import(TestClient::new(), vec![tx1].local()); + let r2 = txq.import(TestClient::new(), vec![tx2].local()); + assert_eq!(r1, vec![Ok(())]); + assert_eq!(r2, vec![Ok(())]); + assert_eq!(txq.status().status.transaction_count, 2); - // then - assert_eq!(txq.next_nonce(TestClient::new(), &sender), Some(nonce + 2)); + // then + assert_eq!(txq.next_nonce(TestClient::new(), &sender), Some(nonce + 2)); - // when - let tx1 = Tx::gas_price(2).signed(); - let tx2 = Tx::gas_price(2).signed(); - let tx3 = Tx::gas_price(1).signed(); - let tx4 = Tx::gas_price(3).signed(); - let res = txq.import(TestClient::new(), vec![tx1, tx2].local()); - let res2 = txq.import(TestClient::new(), vec![tx3, tx4].local()); + // when + let tx1 = Tx::gas_price(2).signed(); + let tx2 = Tx::gas_price(2).signed(); + let tx3 = Tx::gas_price(1).signed(); + let tx4 = Tx::gas_price(3).signed(); + let res = txq.import(TestClient::new(), vec![tx1, tx2].local()); + let res2 = txq.import(TestClient::new(), vec![tx3, tx4].local()); - // then - assert_eq!(res, vec![Ok(()), Ok(())]); - assert_eq!(res2, vec![Ok(()), Ok(())]); - assert_eq!(txq.status().status.transaction_count, 6); - assert_eq!(txq.next_nonce(TestClient::new(), &sender), Some(nonce + 2)); + // then + assert_eq!(res, vec![Ok(()), Ok(())]); + assert_eq!(res2, vec![Ok(()), Ok(())]); + assert_eq!(txq.status().status.transaction_count, 6); + assert_eq!(txq.next_nonce(TestClient::new(), &sender), Some(nonce + 2)); } #[test] fn should_handle_same_transaction_imported_twice_with_different_state_nonces() { - // given - let txq = new_queue(); - let (tx, tx2) = Tx::default().signed_replacement(); - let hash = tx2.hash(); - let client = TestClient::new().with_nonce(122); + // given + let txq = new_queue(); + let (tx, tx2) = Tx::default().signed_replacement(); + let hash = tx2.hash(); + let client = TestClient::new().with_nonce(122); - // First insert one transaction to future - let res = txq.import(client.clone(), vec![tx].local()); - assert_eq!(res, vec![Ok(())]); - // next_nonce === None -> transaction is in future - assert_eq!(txq.next_nonce(client.clone(), &tx2.sender()), None); + // First insert one transaction to future + let res = txq.import(client.clone(), vec![tx].local()); + assert_eq!(res, vec![Ok(())]); + // next_nonce === None -> transaction is in future + assert_eq!(txq.next_nonce(client.clone(), &tx2.sender()), None); - // now import second transaction to current - let res = txq.import(TestClient::new(), vec![tx2.local()]); + // now import second transaction to current + let res = txq.import(TestClient::new(), vec![tx2.local()]); - // and then there should be only one transaction in current (the one with higher gas_price) - assert_eq!(res, vec![Ok(())]); - assert_eq!(txq.status().status.transaction_count, 1); - let top = txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)); - assert_eq!(top[0].hash, hash); + // and then there should be only one transaction in current (the one with higher gas_price) + assert_eq!(res, vec![Ok(())]); + assert_eq!(txq.status().status.transaction_count, 1); + let top = txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)); + assert_eq!(top[0].hash, hash); } #[test] fn should_move_all_transactions_from_future() { - // given - let txq = new_queue(); - let txs = Tx::default().signed_pair(); - let (hash, hash2) = txs.hash(); - let (tx, tx2) = txs; - let client = TestClient::new().with_nonce(122); + // given + let txq = new_queue(); + let txs = Tx::default().signed_pair(); + let (hash, hash2) = txs.hash(); + let (tx, tx2) = txs; + let client = TestClient::new().with_nonce(122); - // First insert one transaction to future - let res = txq.import(client.clone(), vec![tx.local()]); - assert_eq!(res, vec![Ok(())]); - // next_nonce === None -> transaction is in future - assert_eq!(txq.next_nonce(client.clone(), &tx2.sender()), None); + // First insert one transaction to future + let res = txq.import(client.clone(), vec![tx.local()]); + assert_eq!(res, vec![Ok(())]); + // next_nonce === None -> transaction is in future + assert_eq!(txq.next_nonce(client.clone(), &tx2.sender()), None); - // now import second transaction to current - let res = txq.import(client.clone(), vec![tx2.local()]); + // now import second transaction to current + let res = txq.import(client.clone(), vec![tx2.local()]); - // then - assert_eq!(res, vec![Ok(())]); - assert_eq!(txq.status().status.transaction_count, 2); - let top = txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)); - assert_eq!(top[0].hash, hash); - assert_eq!(top[1].hash, hash2); + // then + assert_eq!(res, vec![Ok(())]); + assert_eq!(txq.status().status.transaction_count, 2); + let top = txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)); + assert_eq!(top[0].hash, hash); + assert_eq!(top[1].hash, hash2); } #[test] fn should_drop_transactions_from_senders_without_balance() { - // given - let txq = new_queue(); - let tx = Tx::default().signed(); - let client = TestClient::new().with_balance(1); + // given + let txq = new_queue(); + let tx = Tx::default().signed(); + let client = TestClient::new().with_balance(1); - // when - let res = txq.import(client, vec![tx.local()]); + // when + let res = txq.import(client, vec![tx.local()]); - // then - assert_eq!(res, vec![Err(transaction::Error::InsufficientBalance { - balance: U256::from(1), - cost: U256::from(21_100), - })]); - assert_eq!(txq.status().status.transaction_count, 0); + // then + assert_eq!( + res, + vec![Err(transaction::Error::InsufficientBalance { + balance: U256::from(1), + cost: U256::from(21_100), + })] + ); + assert_eq!(txq.status().status.transaction_count, 0); } #[test] fn should_not_import_transaction_below_min_gas_price_threshold_if_external() { - // given - let txq = new_queue(); - let tx = Tx::default(); - txq.set_verifier_options(verifier::Options { - minimal_gas_price: 3.into(), - ..Default::default() - }); + // given + let txq = new_queue(); + let tx = Tx::default(); + txq.set_verifier_options(verifier::Options { + minimal_gas_price: 3.into(), + ..Default::default() + }); - // when - let res = txq.import(TestClient::new(), vec![tx.signed().unverified()]); + // when + let res = txq.import(TestClient::new(), vec![tx.signed().unverified()]); - // then - assert_eq!(res, vec![Err(transaction::Error::InsufficientGasPrice { - minimal: U256::from(3), - got: U256::from(1), - })]); - assert_eq!(txq.status().status.transaction_count, 0); + // then + assert_eq!( + res, + vec![Err(transaction::Error::InsufficientGasPrice { + minimal: U256::from(3), + got: U256::from(1), + })] + ); + assert_eq!(txq.status().status.transaction_count, 0); } #[test] fn should_import_transaction_below_min_gas_price_threshold_if_local() { - // given - let txq = new_queue(); - let tx = Tx::default(); - txq.set_verifier_options(verifier::Options { - minimal_gas_price: 3.into(), - ..Default::default() - }); + // given + let txq = new_queue(); + let tx = Tx::default(); + txq.set_verifier_options(verifier::Options { + minimal_gas_price: 3.into(), + ..Default::default() + }); - // when - let res = txq.import(TestClient::new(), vec![tx.signed().local()]); + // when + let res = txq.import(TestClient::new(), vec![tx.signed().local()]); - // then - assert_eq!(res, vec![Ok(())]); - assert_eq!(txq.status().status.transaction_count, 1); + // then + assert_eq!(res, vec![Ok(())]); + assert_eq!(txq.status().status.transaction_count, 1); } #[test] fn should_import_txs_from_same_sender() { - // given - let txq = new_queue(); + // given + let txq = new_queue(); - let txs = Tx::default().signed_pair(); - let (hash, hash2) = txs.hash(); + let txs = Tx::default().signed_pair(); + let (hash, hash2) = txs.hash(); - // when - txq.import(TestClient::new(), txs.local().into_vec()); + // when + txq.import(TestClient::new(), txs.local().into_vec()); - // then - let top = txq.pending(TestClient::new(), PendingSettings::all_prioritized(0 ,0)); - assert_eq!(top[0].hash, hash); - assert_eq!(top[1].hash, hash2); - assert_eq!(top.len(), 2); + // then + let top = txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)); + assert_eq!(top[0].hash, hash); + assert_eq!(top[1].hash, hash2); + assert_eq!(top.len(), 2); } #[test] fn should_prioritize_local_transactions_within_same_nonce_height() { - // given - let txq = new_queue(); - let tx = Tx::default().signed(); - // the second one has same nonce but higher `gas_price` - let tx2 = Tx::gas_price(2).signed(); - let (hash, hash2) = (tx.hash(), tx2.hash()); - let client = TestClient::new().with_local(&tx.sender()); + // given + let txq = new_queue(); + let tx = Tx::default().signed(); + // the second one has same nonce but higher `gas_price` + let tx2 = Tx::gas_price(2).signed(); + let (hash, hash2) = (tx.hash(), tx2.hash()); + let client = TestClient::new().with_local(&tx.sender()); - // when - // first insert the one with higher gas price - let res = txq.import(client.clone(), vec![tx.local(), tx2.unverified()]); - assert_eq!(res, vec![Ok(()), Ok(())]); + // when + // first insert the one with higher gas price + let res = txq.import(client.clone(), vec![tx.local(), tx2.unverified()]); + assert_eq!(res, vec![Ok(()), Ok(())]); - // then - let top = txq.pending(client, PendingSettings::all_prioritized(0, 0)); - assert_eq!(top[0].hash, hash); // local should be first - assert_eq!(top[1].hash, hash2); - assert_eq!(top.len(), 2); + // then + let top = txq.pending(client, PendingSettings::all_prioritized(0, 0)); + assert_eq!(top[0].hash, hash); // local should be first + assert_eq!(top[1].hash, hash2); + assert_eq!(top.len(), 2); } #[test] fn should_prioritize_reimported_transactions_within_same_nonce_height() { - // given - let txq = new_queue(); - let tx = Tx::default().signed(); - // the second one has same nonce but higher `gas_price` - let tx2 = Tx::gas_price(2).signed(); - let (hash, hash2) = (tx.hash(), tx2.hash()); + // given + let txq = new_queue(); + let tx = Tx::default().signed(); + // the second one has same nonce but higher `gas_price` + let tx2 = Tx::gas_price(2).signed(); + let (hash, hash2) = (tx.hash(), tx2.hash()); - // when - // first insert local one with higher gas price - // then the one with lower gas price, but from retracted block - let res = txq.import(TestClient::new(), vec![tx2.unverified(), tx.retracted()]); - assert_eq!(res, vec![Ok(()), Ok(())]); + // when + // first insert local one with higher gas price + // then the one with lower gas price, but from retracted block + let res = txq.import(TestClient::new(), vec![tx2.unverified(), tx.retracted()]); + assert_eq!(res, vec![Ok(()), Ok(())]); - // then - let top = txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)); - assert_eq!(top[0].hash, hash); // retracted should be first - assert_eq!(top[1].hash, hash2); - assert_eq!(top.len(), 2); + // then + let top = txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)); + assert_eq!(top[0].hash, hash); // retracted should be first + assert_eq!(top[1].hash, hash2); + assert_eq!(top.len(), 2); } #[test] fn should_not_prioritize_local_transactions_with_different_nonce_height() { - // given - let txq = new_queue(); - let txs = Tx::default().signed_pair(); - let (hash, hash2) = txs.hash(); - let (tx, tx2) = txs; + // given + let txq = new_queue(); + let txs = Tx::default().signed_pair(); + let (hash, hash2) = txs.hash(); + let (tx, tx2) = txs; - // when - let res = txq.import(TestClient::new(), vec![tx.unverified(), tx2.local()]); - assert_eq!(res, vec![Ok(()), Ok(())]); + // when + let res = txq.import(TestClient::new(), vec![tx.unverified(), tx2.local()]); + assert_eq!(res, vec![Ok(()), Ok(())]); - // then - let top = txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)); - assert_eq!(top[0].hash, hash); - assert_eq!(top[1].hash, hash2); - assert_eq!(top.len(), 2); + // then + let top = txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)); + assert_eq!(top[0].hash, hash); + assert_eq!(top[1].hash, hash2); + assert_eq!(top.len(), 2); } #[test] fn should_put_transaction_to_futures_if_gap_detected() { - // given - let txq = new_queue(); - let (tx, _, tx2) = Tx::default().signed_triple(); - let hash = tx.hash(); + // given + let txq = new_queue(); + let (tx, _, tx2) = Tx::default().signed_triple(); + let hash = tx.hash(); - // when - let res = txq.import(TestClient::new(), vec![tx, tx2].local()); + // when + let res = txq.import(TestClient::new(), vec![tx, tx2].local()); - // then - assert_eq!(res, vec![Ok(()), Ok(())]); - let top = txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)); - assert_eq!(top.len(), 1); - assert_eq!(top[0].hash, hash); + // then + assert_eq!(res, vec![Ok(()), Ok(())]); + let top = txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)); + assert_eq!(top.len(), 1); + assert_eq!(top[0].hash, hash); } #[test] fn should_handle_min_block() { - // given - let txq = new_queue(); + // given + let txq = new_queue(); - let (tx, tx2) = Tx::default().signed_pair(); + let (tx, tx2) = Tx::default().signed_pair(); - // when - let res = txq.import(TestClient::new(), vec![ - verifier::Transaction::Local(PendingTransaction::new(tx, transaction::Condition::Number(1).into())), - tx2.local() - ]); - assert_eq!(res, vec![Ok(()), Ok(())]); + // when + let res = txq.import( + TestClient::new(), + vec![ + verifier::Transaction::Local(PendingTransaction::new( + tx, + transaction::Condition::Number(1).into(), + )), + tx2.local(), + ], + ); + assert_eq!(res, vec![Ok(()), Ok(())]); - // then - let top = txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)); - assert_eq!(top.len(), 0); - let top = txq.pending(TestClient::new(), PendingSettings::all_prioritized(1, 0)); - assert_eq!(top.len(), 2); + // then + let top = txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)); + assert_eq!(top.len(), 0); + let top = txq.pending(TestClient::new(), PendingSettings::all_prioritized(1, 0)); + assert_eq!(top.len(), 2); } #[test] fn should_correctly_update_futures_when_removing() { - // given - let txq = new_queue(); - let txs= Tx::default().signed_pair(); + // given + let txq = new_queue(); + let txs = Tx::default().signed_pair(); - let res = txq.import(TestClient::new().with_nonce(121), txs.local().into_vec()); - assert_eq!(res, vec![Ok(()), Ok(())]); - assert_eq!(txq.status().status.transaction_count, 2); + let res = txq.import(TestClient::new().with_nonce(121), txs.local().into_vec()); + assert_eq!(res, vec![Ok(()), Ok(())]); + assert_eq!(txq.status().status.transaction_count, 2); - // when - txq.cull(TestClient::new().with_nonce(125)); - // should remove both transactions since they are stalled + // when + txq.cull(TestClient::new().with_nonce(125)); + // should remove both transactions since they are stalled - // then - assert_eq!(txq.status().status.transaction_count, 0); + // then + assert_eq!(txq.status().status.transaction_count, 0); } #[test] fn should_move_transactions_if_gap_filled() { - // given - let txq = new_queue(); - let (tx, tx1, tx2) = Tx::default().signed_triple(); + // given + let txq = new_queue(); + let (tx, tx1, tx2) = Tx::default().signed_triple(); - let res = txq.import(TestClient::new(), vec![tx, tx2].local()); - assert_eq!(res, vec![Ok(()), Ok(())]); - assert_eq!(txq.status().status.transaction_count, 2); - assert_eq!(txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)).len(), 1); + let res = txq.import(TestClient::new(), vec![tx, tx2].local()); + assert_eq!(res, vec![Ok(()), Ok(())]); + assert_eq!(txq.status().status.transaction_count, 2); + assert_eq!( + txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)) + .len(), + 1 + ); - // when - let res = txq.import(TestClient::new(), vec![tx1.local()]); - assert_eq!(res, vec![Ok(())]); + // when + let res = txq.import(TestClient::new(), vec![tx1.local()]); + assert_eq!(res, vec![Ok(())]); - // then - assert_eq!(txq.status().status.transaction_count, 3); - assert_eq!(txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)).len(), 3); + // then + assert_eq!(txq.status().status.transaction_count, 3); + assert_eq!( + txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)) + .len(), + 3 + ); } #[test] fn should_remove_transaction() { - // given - let txq = new_queue(); - let (tx, _, tx2) = Tx::default().signed_triple(); + // given + let txq = new_queue(); + let (tx, _, tx2) = Tx::default().signed_triple(); - let res = txq.import(TestClient::default(), vec![tx, tx2].local()); - assert_eq!(res, vec![Ok(()), Ok(())]); - assert_eq!(txq.status().status.transaction_count, 2); - assert_eq!(txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)).len(), 1); + let res = txq.import(TestClient::default(), vec![tx, tx2].local()); + assert_eq!(res, vec![Ok(()), Ok(())]); + assert_eq!(txq.status().status.transaction_count, 2); + assert_eq!( + txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)) + .len(), + 1 + ); - // when - txq.cull(TestClient::new().with_nonce(124)); - assert_eq!(txq.status().status.transaction_count, 1); - assert_eq!(txq.pending(TestClient::new().with_nonce(125), PendingSettings::all_prioritized(0, 0)).len(), 1); - txq.cull(TestClient::new().with_nonce(126)); + // when + txq.cull(TestClient::new().with_nonce(124)); + assert_eq!(txq.status().status.transaction_count, 1); + assert_eq!( + txq.pending( + TestClient::new().with_nonce(125), + PendingSettings::all_prioritized(0, 0) + ) + .len(), + 1 + ); + txq.cull(TestClient::new().with_nonce(126)); - // then - assert_eq!(txq.status().status.transaction_count, 0); + // then + assert_eq!(txq.status().status.transaction_count, 0); } #[test] fn should_move_transactions_to_future_if_gap_introduced() { - // given - let txq = new_queue(); - let (tx, tx2) = Tx::default().signed_pair(); - let hash = tx.hash(); - let tx3 = Tx::default().signed(); + // given + let txq = new_queue(); + let (tx, tx2) = Tx::default().signed_pair(); + let hash = tx.hash(); + let tx3 = Tx::default().signed(); - let res = txq.import(TestClient::new(), vec![tx3, tx2].local()); - assert_eq!(res, vec![Ok(()), Ok(())]); - assert_eq!(txq.status().status.transaction_count, 2); - assert_eq!(txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)).len(), 1); + let res = txq.import(TestClient::new(), vec![tx3, tx2].local()); + assert_eq!(res, vec![Ok(()), Ok(())]); + assert_eq!(txq.status().status.transaction_count, 2); + assert_eq!( + txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)) + .len(), + 1 + ); - let res = txq.import(TestClient::new(), vec![tx].local()); - assert_eq!(res, vec![Ok(())]); - assert_eq!(txq.status().status.transaction_count, 3); - assert_eq!(txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)).len(), 3); + let res = txq.import(TestClient::new(), vec![tx].local()); + assert_eq!(res, vec![Ok(())]); + assert_eq!(txq.status().status.transaction_count, 3); + assert_eq!( + txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)) + .len(), + 3 + ); - // when - txq.remove(vec![&hash], true); + // when + txq.remove(vec![&hash], true); - // then - assert_eq!(txq.status().status.transaction_count, 2); - assert_eq!(txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)).len(), 1); + // then + assert_eq!(txq.status().status.transaction_count, 2); + assert_eq!( + txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)) + .len(), + 1 + ); } #[test] fn should_clear_queue() { - // given - let txq = new_queue(); - let txs = Tx::default().signed_pair(); + // given + let txq = new_queue(); + let txs = Tx::default().signed_pair(); - // add - txq.import(TestClient::new(), txs.local().into_vec()); - assert_eq!(txq.status().status.transaction_count, 2); + // add + txq.import(TestClient::new(), txs.local().into_vec()); + assert_eq!(txq.status().status.transaction_count, 2); - // when - txq.clear(); + // when + txq.clear(); - // then - assert_eq!(txq.status().status.transaction_count, 0); + // then + assert_eq!(txq.status().status.transaction_count, 0); } #[test] fn should_prefer_current_transactions_when_hitting_the_limit() { - // given - let txq = TransactionQueue::new( - txpool::Options { - max_count: 1, - max_per_sender: 2, - max_mem_usage: TEST_QUEUE_MAX_MEM - }, - verifier::Options { - minimal_gas_price: 1.into(), - block_gas_limit: 1_000_000.into(), - tx_gas_limit: 1_000_000.into(), - no_early_reject: false, - }, - PrioritizationStrategy::GasPriceOnly, - ); - let (tx, tx2) = Tx::default().signed_pair(); - let hash = tx.hash(); - let sender = tx.sender(); + // given + let txq = TransactionQueue::new( + txpool::Options { + max_count: 1, + max_per_sender: 2, + max_mem_usage: TEST_QUEUE_MAX_MEM, + }, + verifier::Options { + minimal_gas_price: 1.into(), + block_gas_limit: 1_000_000.into(), + tx_gas_limit: 1_000_000.into(), + no_early_reject: false, + }, + PrioritizationStrategy::GasPriceOnly, + ); + let (tx, tx2) = Tx::default().signed_pair(); + let hash = tx.hash(); + let sender = tx.sender(); - let res = txq.import(TestClient::new(), vec![tx2.unverified()]); - assert_eq!(res, vec![Ok(())]); - assert_eq!(txq.status().status.transaction_count, 1); + let res = txq.import(TestClient::new(), vec![tx2.unverified()]); + assert_eq!(res, vec![Ok(())]); + assert_eq!(txq.status().status.transaction_count, 1); - // when - let res = txq.import(TestClient::new(), vec![tx.unverified()]); + // when + let res = txq.import(TestClient::new(), vec![tx.unverified()]); - // then - assert_eq!(res, vec![Ok(())]); - assert_eq!(txq.status().status.transaction_count, 1); + // then + assert_eq!(res, vec![Ok(())]); + assert_eq!(txq.status().status.transaction_count, 1); - let top = txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)); - assert_eq!(top.len(), 1); - assert_eq!(top[0].hash, hash); - assert_eq!(txq.next_nonce(TestClient::new(), &sender), Some(124.into())); + let top = txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)); + assert_eq!(top.len(), 1); + assert_eq!(top[0].hash, hash); + assert_eq!(txq.next_nonce(TestClient::new(), &sender), Some(124.into())); } #[test] fn should_drop_transactions_with_old_nonces() { - let txq = new_queue(); - let tx = Tx::default().signed(); + let txq = new_queue(); + let tx = Tx::default().signed(); - // when - let res = txq.import(TestClient::new().with_nonce(125), vec![tx.unverified()]); + // when + let res = txq.import(TestClient::new().with_nonce(125), vec![tx.unverified()]); - // then - assert_eq!(res, vec![Err(transaction::Error::Old)]); - assert_eq!(txq.status().status.transaction_count, 0); + // then + assert_eq!(res, vec![Err(transaction::Error::Old)]); + assert_eq!(txq.status().status.transaction_count, 0); } #[test] fn should_not_insert_same_transaction_twice() { - // given - let txq = new_queue(); - let (_tx1, tx2) = Tx::default().signed_pair(); - let res = txq.import(TestClient::new(), vec![tx2.clone().local()]); - assert_eq!(res, vec![Ok(())]); - assert_eq!(txq.status().status.transaction_count, 1); + // given + let txq = new_queue(); + let (_tx1, tx2) = Tx::default().signed_pair(); + let res = txq.import(TestClient::new(), vec![tx2.clone().local()]); + assert_eq!(res, vec![Ok(())]); + assert_eq!(txq.status().status.transaction_count, 1); - // when - let res = txq.import(TestClient::new(), vec![tx2.local()]); + // when + let res = txq.import(TestClient::new(), vec![tx2.local()]); - // then - assert_eq!(res, vec![Err(transaction::Error::AlreadyImported)]); - assert_eq!(txq.status().status.transaction_count, 1); + // then + assert_eq!(res, vec![Err(transaction::Error::AlreadyImported)]); + assert_eq!(txq.status().status.transaction_count, 1); } #[test] fn should_accept_same_transaction_twice_if_removed() { - // given - let txq = new_queue(); - let txs = Tx::default().signed_pair(); - let (tx1, _) = txs.clone(); - let (hash, _) = txs.hash(); + // given + let txq = new_queue(); + let txs = Tx::default().signed_pair(); + let (tx1, _) = txs.clone(); + let (hash, _) = txs.hash(); - let res = txq.import(TestClient::new(), txs.local().into_vec()); - assert_eq!(res, vec![Ok(()), Ok(())]); - assert_eq!(txq.status().status.transaction_count, 2); - assert_eq!(txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)).len(), 2); + let res = txq.import(TestClient::new(), txs.local().into_vec()); + assert_eq!(res, vec![Ok(()), Ok(())]); + assert_eq!(txq.status().status.transaction_count, 2); + assert_eq!( + txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)) + .len(), + 2 + ); - // when - txq.remove(vec![&hash], true); - assert_eq!(txq.status().status.transaction_count, 1); - assert_eq!(txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)).len(), 0); + // when + txq.remove(vec![&hash], true); + assert_eq!(txq.status().status.transaction_count, 1); + assert_eq!( + txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)) + .len(), + 0 + ); - let res = txq.import(TestClient::new(), vec![tx1].local()); - assert_eq!(res, vec![Ok(())]); + let res = txq.import(TestClient::new(), vec![tx1].local()); + assert_eq!(res, vec![Ok(())]); - // then - assert_eq!(txq.status().status.transaction_count, 2); - assert_eq!(txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)).len(), 2); + // then + assert_eq!(txq.status().status.transaction_count, 2); + assert_eq!( + txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)) + .len(), + 2 + ); } #[test] fn should_not_replace_same_transaction_if_the_fee_is_less_than_minimal_bump() { - // given - let txq = new_queue(); - let (tx, tx2) = Tx::gas_price(20).signed_replacement(); - let (tx3, tx4) = Tx::gas_price(1).signed_replacement(); - let client = TestClient::new().with_balance(1_000_000); + // given + let txq = new_queue(); + let (tx, tx2) = Tx::gas_price(20).signed_replacement(); + let (tx3, tx4) = Tx::gas_price(1).signed_replacement(); + let client = TestClient::new().with_balance(1_000_000); - // when - let res = txq.import(client.clone(), vec![tx, tx3].local()); - assert_eq!(res, vec![Ok(()), Ok(())]); + // when + let res = txq.import(client.clone(), vec![tx, tx3].local()); + assert_eq!(res, vec![Ok(()), Ok(())]); - let res = txq.import(client.clone(), vec![tx2, tx4].local()); + let res = txq.import(client.clone(), vec![tx2, tx4].local()); - // then - assert_eq!(res, vec![Err(transaction::Error::TooCheapToReplace { prev: None, new: None }), Ok(())]); - assert_eq!(txq.status().status.transaction_count, 2); - assert_eq!(txq.pending(client.clone(), PendingSettings::all_prioritized(0, 0))[0].signed().gas_price, U256::from(20)); - assert_eq!(txq.pending(client.clone(), PendingSettings::all_prioritized(0, 0))[1].signed().gas_price, U256::from(2)); + // then + assert_eq!( + res, + vec![ + Err(transaction::Error::TooCheapToReplace { + prev: None, + new: None + }), + Ok(()) + ] + ); + assert_eq!(txq.status().status.transaction_count, 2); + assert_eq!( + txq.pending(client.clone(), PendingSettings::all_prioritized(0, 0))[0] + .signed() + .gas_price, + U256::from(20) + ); + assert_eq!( + txq.pending(client.clone(), PendingSettings::all_prioritized(0, 0))[1] + .signed() + .gas_price, + U256::from(2) + ); } #[test] fn should_return_none_when_transaction_from_given_address_does_not_exist() { - // given - let txq = new_queue(); + // given + let txq = new_queue(); - // then - assert_eq!(txq.next_nonce(TestClient::new(), &Default::default()), None); + // then + assert_eq!(txq.next_nonce(TestClient::new(), &Default::default()), None); } #[test] fn should_return_correct_nonce_when_transactions_from_given_address_exist() { - // given - let txq = new_queue(); - let tx = Tx::default().signed(); - let from = tx.sender(); - let nonce = tx.nonce; + // given + let txq = new_queue(); + let tx = Tx::default().signed(); + let from = tx.sender(); + let nonce = tx.nonce; - // when - txq.import(TestClient::new(), vec![tx.local()]); + // when + txq.import(TestClient::new(), vec![tx.local()]); - // then - assert_eq!(txq.next_nonce(TestClient::new(), &from), Some(nonce + 1 )); + // then + assert_eq!(txq.next_nonce(TestClient::new(), &from), Some(nonce + 1)); } #[test] fn should_return_valid_last_nonce_after_cull() { - // given - let txq = new_queue(); - let (tx1, _, tx2) = Tx::default().signed_triple(); - let sender = tx1.sender(); + // given + let txq = new_queue(); + let (tx1, _, tx2) = Tx::default().signed_triple(); + let sender = tx1.sender(); - // when - // Second should go to future - let res = txq.import(TestClient::new(), vec![tx1, tx2].local()); - assert_eq!(res, vec![Ok(()), Ok(())]); - // Now block is imported - let client = TestClient::new().with_nonce(124); - txq.cull(client.clone()); - // tx2 should be not be promoted to current - assert_eq!(txq.pending(client.clone(), PendingSettings::all_prioritized(0, 0)).len(), 0); + // when + // Second should go to future + let res = txq.import(TestClient::new(), vec![tx1, tx2].local()); + assert_eq!(res, vec![Ok(()), Ok(())]); + // Now block is imported + let client = TestClient::new().with_nonce(124); + txq.cull(client.clone()); + // tx2 should be not be promoted to current + assert_eq!( + txq.pending(client.clone(), PendingSettings::all_prioritized(0, 0)) + .len(), + 0 + ); - // then - assert_eq!(txq.next_nonce(client.clone(), &sender), None); - assert_eq!(txq.next_nonce(client.with_nonce(125), &sender), Some(126.into())); + // then + assert_eq!(txq.next_nonce(client.clone(), &sender), None); + assert_eq!( + txq.next_nonce(client.with_nonce(125), &sender), + Some(126.into()) + ); } #[test] fn should_return_true_if_there_is_local_transaction_pending() { - // given - let txq = new_queue(); - let (tx1, tx2) = Tx::default().signed_pair(); - assert_eq!(txq.has_local_pending_transactions(), false); - let client = TestClient::new().with_local(&tx1.sender()); + // given + let txq = new_queue(); + let (tx1, tx2) = Tx::default().signed_pair(); + assert_eq!(txq.has_local_pending_transactions(), false); + let client = TestClient::new().with_local(&tx1.sender()); - // when - let res = txq.import(client.clone(), vec![tx1.unverified(), tx2.local()]); - assert_eq!(res, vec![Ok(()), Ok(())]); + // when + let res = txq.import(client.clone(), vec![tx1.unverified(), tx2.local()]); + assert_eq!(res, vec![Ok(()), Ok(())]); - // then - assert_eq!(txq.has_local_pending_transactions(), true); + // then + assert_eq!(txq.has_local_pending_transactions(), true); } #[test] fn should_reject_transactions_below_base_gas() { - // given - let txq = new_queue(); - let tx = Tx::default().signed(); + // given + let txq = new_queue(); + let tx = Tx::default().signed(); - // when - let res = txq.import(TestClient::new().with_gas_required(100_001), vec![tx].local()); + // when + let res = txq.import( + TestClient::new().with_gas_required(100_001), + vec![tx].local(), + ); - // then - assert_eq!(res, vec![Err(transaction::Error::InsufficientGas { - minimal: 100_001.into(), - got: 21_000.into(), - })]); + // then + assert_eq!( + res, + vec![Err(transaction::Error::InsufficientGas { + minimal: 100_001.into(), + got: 21_000.into(), + })] + ); } #[test] fn should_remove_out_of_date_transactions_occupying_queue() { - // given - let txq = TransactionQueue::new( - txpool::Options { - max_count: 105, - max_per_sender: 3, - max_mem_usage: 5_000_000, - }, - verifier::Options { - minimal_gas_price: 10.into(), - ..Default::default() - }, - PrioritizationStrategy::GasPriceOnly, - ); - // that transaction will be occupying the queue - let (_, tx) = Tx::default().signed_pair(); - let res = txq.import(TestClient::new(), vec![tx.local()]); - assert_eq!(res, vec![Ok(())]); - // This should not clear the transaction (yet) - txq.cull(TestClient::new()); - assert_eq!(txq.status().status.transaction_count, 1); + // given + let txq = TransactionQueue::new( + txpool::Options { + max_count: 105, + max_per_sender: 3, + max_mem_usage: 5_000_000, + }, + verifier::Options { + minimal_gas_price: 10.into(), + ..Default::default() + }, + PrioritizationStrategy::GasPriceOnly, + ); + // that transaction will be occupying the queue + let (_, tx) = Tx::default().signed_pair(); + let res = txq.import(TestClient::new(), vec![tx.local()]); + assert_eq!(res, vec![Ok(())]); + // This should not clear the transaction (yet) + txq.cull(TestClient::new()); + assert_eq!(txq.status().status.transaction_count, 1); - // Now insert at least 100 transactions to have the other one marked as future. - for _ in 0..34 { - let (tx1, tx2, tx3) = Tx::default().signed_triple(); - txq.import(TestClient::new(), vec![tx1, tx2, tx3].local()); - } - assert_eq!(txq.status().status.transaction_count, 103); + // Now insert at least 100 transactions to have the other one marked as future. + for _ in 0..34 { + let (tx1, tx2, tx3) = Tx::default().signed_triple(); + txq.import(TestClient::new(), vec![tx1, tx2, tx3].local()); + } + assert_eq!(txq.status().status.transaction_count, 103); - // when - txq.cull(TestClient::new()); + // when + txq.cull(TestClient::new()); - // then - assert_eq!(txq.status().status.transaction_count, 102); + // then + assert_eq!(txq.status().status.transaction_count, 102); } #[test] fn should_accept_local_transactions_below_min_gas_price() { - // given - let txq = TransactionQueue::new( - txpool::Options { - max_count: 3, - max_per_sender: 3, - max_mem_usage: TEST_QUEUE_MAX_MEM - }, - verifier::Options { - minimal_gas_price: 10.into(), - ..Default::default() - }, - PrioritizationStrategy::GasPriceOnly, - ); - let tx = Tx::gas_price(1).signed(); + // given + let txq = TransactionQueue::new( + txpool::Options { + max_count: 3, + max_per_sender: 3, + max_mem_usage: TEST_QUEUE_MAX_MEM, + }, + verifier::Options { + minimal_gas_price: 10.into(), + ..Default::default() + }, + PrioritizationStrategy::GasPriceOnly, + ); + let tx = Tx::gas_price(1).signed(); - // when - let res = txq.import(TestClient::new(), vec![tx.local()]); - assert_eq!(res, vec![Ok(())]); + // when + let res = txq.import(TestClient::new(), vec![tx.local()]); + assert_eq!(res, vec![Ok(())]); - // then - assert_eq!(txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)).len(), 1); + // then + assert_eq!( + txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)) + .len(), + 1 + ); } #[test] fn should_accept_local_service_transaction() { - // given - let txq = new_queue(); - let tx = Tx::gas_price(0).signed(); + // given + let txq = new_queue(); + let tx = Tx::gas_price(0).signed(); - // when - let res = txq.import( - TestClient::new() - .with_local(&tx.sender()), - vec![tx.local()] - ); - assert_eq!(res, vec![Ok(())]); + // when + let res = txq.import(TestClient::new().with_local(&tx.sender()), vec![tx.local()]); + assert_eq!(res, vec![Ok(())]); - // then - assert_eq!(txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)).len(), 1); + // then + assert_eq!( + txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)) + .len(), + 1 + ); } #[test] fn should_not_accept_external_service_transaction_if_sender_not_certified() { - // given - let txq = new_queue(); - let tx1 = Tx::gas_price(0).signed().unverified(); - let tx2 = Tx::gas_price(0).signed().retracted(); - let tx3 = Tx::gas_price(0).signed().unverified(); + // given + let txq = new_queue(); + let tx1 = Tx::gas_price(0).signed().unverified(); + let tx2 = Tx::gas_price(0).signed().retracted(); + let tx3 = Tx::gas_price(0).signed().unverified(); - // when - let res = txq.import(TestClient::new(), vec![tx1, tx2]); - assert_eq!(res, vec![ - Err(transaction::Error::InsufficientGasPrice { - minimal: 1.into(), - got: 0.into(), - }), - Err(transaction::Error::InsufficientGasPrice { - minimal: 1.into(), - got: 0.into(), - }), - ]); + // when + let res = txq.import(TestClient::new(), vec![tx1, tx2]); + assert_eq!( + res, + vec![ + Err(transaction::Error::InsufficientGasPrice { + minimal: 1.into(), + got: 0.into(), + }), + Err(transaction::Error::InsufficientGasPrice { + minimal: 1.into(), + got: 0.into(), + }), + ] + ); - // then - let res = txq.import(TestClient::new().with_service_transaction(), vec![tx3]); - assert_eq!(res, vec![Ok(())]); + // then + let res = txq.import(TestClient::new().with_service_transaction(), vec![tx3]); + assert_eq!(res, vec![Ok(())]); } #[test] fn should_not_return_transactions_over_nonce_cap() { - // given - let txq = new_queue(); - let (tx1, tx2, tx3) = Tx::default().signed_triple(); - let res = txq.import( - TestClient::new(), - vec![tx1, tx2, tx3].local() - ); - assert_eq!(res, vec![Ok(()), Ok(()), Ok(())]); + // given + let txq = new_queue(); + let (tx1, tx2, tx3) = Tx::default().signed_triple(); + let res = txq.import(TestClient::new(), vec![tx1, tx2, tx3].local()); + assert_eq!(res, vec![Ok(()), Ok(()), Ok(())]); - // when - let all = txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)); - // This should invalidate the cache! - let limited = txq.pending(TestClient::new(), PendingSettings { - block_number: 0, - current_timestamp: 0, - nonce_cap: Some(123.into()), - max_len: usize::max_value(), - ordering: PendingOrdering::Priority, - }); + // when + let all = txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)); + // This should invalidate the cache! + let limited = txq.pending( + TestClient::new(), + PendingSettings { + block_number: 0, + current_timestamp: 0, + nonce_cap: Some(123.into()), + max_len: usize::max_value(), + ordering: PendingOrdering::Priority, + }, + ); - // then - assert_eq!(all.len(), 3); - assert_eq!(limited.len(), 1); + // then + assert_eq!(all.len(), 3); + assert_eq!(limited.len(), 1); } #[test] fn should_return_cached_pending_even_if_unordered_is_requested() { - // given - let txq = new_queue(); - let tx1 = Tx::default().signed(); - let (tx2_1, tx2_2)= Tx::default().signed_pair(); - let tx2_1_hash = tx2_1.hash(); - let res = txq.import(TestClient::new(), vec![tx1].unverified()); - assert_eq!(res, vec![Ok(())]); - let res = txq.import(TestClient::new(), vec![tx2_1, tx2_2].local()); - assert_eq!(res, vec![Ok(()), Ok(())]); + // given + let txq = new_queue(); + let tx1 = Tx::default().signed(); + let (tx2_1, tx2_2) = Tx::default().signed_pair(); + let tx2_1_hash = tx2_1.hash(); + let res = txq.import(TestClient::new(), vec![tx1].unverified()); + assert_eq!(res, vec![Ok(())]); + let res = txq.import(TestClient::new(), vec![tx2_1, tx2_2].local()); + assert_eq!(res, vec![Ok(()), Ok(())]); - // when - let all = txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)); - assert_eq!(all[0].hash, tx2_1_hash); - assert_eq!(all.len(), 3); + // when + let all = txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 0)); + assert_eq!(all[0].hash, tx2_1_hash); + assert_eq!(all.len(), 3); - // This should not invalidate the cache! - let limited = txq.pending(TestClient::new(), PendingSettings { - block_number: 0, - current_timestamp: 0, - nonce_cap: None, - max_len: 3, - ordering: PendingOrdering::Unordered, - }); + // This should not invalidate the cache! + let limited = txq.pending( + TestClient::new(), + PendingSettings { + block_number: 0, + current_timestamp: 0, + nonce_cap: None, + max_len: 3, + ordering: PendingOrdering::Unordered, + }, + ); - // then - assert_eq!(all, limited); + // then + assert_eq!(all, limited); } #[test] fn should_return_unordered_and_not_populate_the_cache() { - // given - let txq = new_queue(); - let tx1 = Tx::default().signed(); - let (tx2_1, tx2_2)= Tx::default().signed_pair(); - let res = txq.import(TestClient::new(), vec![tx1].unverified()); - assert_eq!(res, vec![Ok(())]); - let res = txq.import(TestClient::new(), vec![tx2_1, tx2_2].local()); - assert_eq!(res, vec![Ok(()), Ok(())]); + // given + let txq = new_queue(); + let tx1 = Tx::default().signed(); + let (tx2_1, tx2_2) = Tx::default().signed_pair(); + let res = txq.import(TestClient::new(), vec![tx1].unverified()); + assert_eq!(res, vec![Ok(())]); + let res = txq.import(TestClient::new(), vec![tx2_1, tx2_2].local()); + assert_eq!(res, vec![Ok(()), Ok(())]); - // when - // This should not invalidate the cache! - let limited = txq.pending(TestClient::new(), PendingSettings { - block_number: 0, - current_timestamp: 0, - nonce_cap: None, - max_len: usize::max_value(), - ordering: PendingOrdering::Unordered, - }); + // when + // This should not invalidate the cache! + let limited = txq.pending( + TestClient::new(), + PendingSettings { + block_number: 0, + current_timestamp: 0, + nonce_cap: None, + max_len: usize::max_value(), + ordering: PendingOrdering::Unordered, + }, + ); - // then - assert_eq!(limited.len(), 3); - assert!(!txq.is_pending_cached()); + // then + assert_eq!(limited.len(), 3); + assert!(!txq.is_pending_cached()); } #[test] fn should_clear_cache_after_timeout_for_local() { - // given - let txq = new_queue(); - let (tx, tx2) = Tx::default().signed_pair(); - let res = txq.import(TestClient::new(), vec![ - verifier::Transaction::Local(PendingTransaction::new(tx, transaction::Condition::Timestamp(1000).into())), - tx2.local() - ]); - assert_eq!(res, vec![Ok(()), Ok(())]); + // given + let txq = new_queue(); + let (tx, tx2) = Tx::default().signed_pair(); + let res = txq.import( + TestClient::new(), + vec![ + verifier::Transaction::Local(PendingTransaction::new( + tx, + transaction::Condition::Timestamp(1000).into(), + )), + tx2.local(), + ], + ); + assert_eq!(res, vec![Ok(()), Ok(())]); - // This should populate cache and set timestamp to 1 - // when - assert_eq!(txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 1)).len(), 0); - assert_eq!(txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 1000)).len(), 0); + // This should populate cache and set timestamp to 1 + // when + assert_eq!( + txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 1)) + .len(), + 0 + ); + assert_eq!( + txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 1000)) + .len(), + 0 + ); - // This should invalidate the cache and trigger transaction ready. - // then - assert_eq!(txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 1002)).len(), 2); + // This should invalidate the cache and trigger transaction ready. + // then + assert_eq!( + txq.pending(TestClient::new(), PendingSettings::all_prioritized(0, 1002)) + .len(), + 2 + ); } #[test] fn should_reject_big_transaction() { - let txq = new_queue(); - let big_tx = Tx::default().big_one(); - let res = txq.import(TestClient::new(), vec![ - verifier::Transaction::Local(PendingTransaction::new(big_tx, transaction::Condition::Timestamp(1000).into())) - ]); - assert_eq!(res, vec![Err(transaction::Error::TooBig)]); + let txq = new_queue(); + let big_tx = Tx::default().big_one(); + let res = txq.import( + TestClient::new(), + vec![verifier::Transaction::Local(PendingTransaction::new( + big_tx, + transaction::Condition::Timestamp(1000).into(), + ))], + ); + assert_eq!(res, vec![Err(transaction::Error::TooBig)]); } #[test] fn should_include_local_transaction_to_a_full_pool() { - // given - let txq = TransactionQueue::new( - txpool::Options { - max_count: 1, - max_per_sender: 2, - max_mem_usage: TEST_QUEUE_MAX_MEM - }, - verifier::Options { - minimal_gas_price: 1.into(), - block_gas_limit: 1_000_000.into(), - tx_gas_limit: 1_000_000.into(), - no_early_reject: false, - }, - PrioritizationStrategy::GasPriceOnly, - ); - let tx1 = Tx::gas_price(10_000).signed().unverified(); - let tx2 = Tx::gas_price(1).signed().local(); + // given + let txq = TransactionQueue::new( + txpool::Options { + max_count: 1, + max_per_sender: 2, + max_mem_usage: TEST_QUEUE_MAX_MEM, + }, + verifier::Options { + minimal_gas_price: 1.into(), + block_gas_limit: 1_000_000.into(), + tx_gas_limit: 1_000_000.into(), + no_early_reject: false, + }, + PrioritizationStrategy::GasPriceOnly, + ); + let tx1 = Tx::gas_price(10_000).signed().unverified(); + let tx2 = Tx::gas_price(1).signed().local(); - let res = txq.import(TestClient::new().with_balance(1_000_000_000), vec![tx1]); - assert_eq!(res, vec![Ok(())]); - assert_eq!(txq.status().status.transaction_count, 1); + let res = txq.import(TestClient::new().with_balance(1_000_000_000), vec![tx1]); + assert_eq!(res, vec![Ok(())]); + assert_eq!(txq.status().status.transaction_count, 1); - // when - let res = txq.import(TestClient::new(), vec![tx2]); - assert_eq!(res, vec![Ok(())]); + // when + let res = txq.import(TestClient::new(), vec![tx2]); + assert_eq!(res, vec![Ok(())]); - // then - assert_eq!(txq.status().status.transaction_count, 1); + // then + assert_eq!(txq.status().status.transaction_count, 1); } #[test] fn should_avoid_verifying_transaction_already_in_pool() { - // given - let txq = TransactionQueue::new( - txpool::Options { - max_count: 1, - max_per_sender: 2, - max_mem_usage: TEST_QUEUE_MAX_MEM - }, - verifier::Options { - minimal_gas_price: 1.into(), - block_gas_limit: 1_000_000.into(), - tx_gas_limit: 1_000_000.into(), - no_early_reject: false, - }, - PrioritizationStrategy::GasPriceOnly, - ); - let client = TestClient::new().with_balance(1_000_000_000); - let tx1 = Tx::gas_price(2).signed().unverified(); + // given + let txq = TransactionQueue::new( + txpool::Options { + max_count: 1, + max_per_sender: 2, + max_mem_usage: TEST_QUEUE_MAX_MEM, + }, + verifier::Options { + minimal_gas_price: 1.into(), + block_gas_limit: 1_000_000.into(), + tx_gas_limit: 1_000_000.into(), + no_early_reject: false, + }, + PrioritizationStrategy::GasPriceOnly, + ); + let client = TestClient::new().with_balance(1_000_000_000); + let tx1 = Tx::gas_price(2).signed().unverified(); - let res = txq.import(client.clone(), vec![tx1.clone()]); - assert_eq!(res, vec![Ok(())]); - assert_eq!(txq.status().status.transaction_count, 1); - assert!(client.was_verification_triggered()); + let res = txq.import(client.clone(), vec![tx1.clone()]); + assert_eq!(res, vec![Ok(())]); + assert_eq!(txq.status().status.transaction_count, 1); + assert!(client.was_verification_triggered()); - // when - let client = TestClient::new(); - let res = txq.import(client.clone(), vec![tx1]); - assert_eq!(res, vec![Err(transaction::Error::AlreadyImported)]); - assert!(!client.was_verification_triggered()); + // when + let client = TestClient::new(); + let res = txq.import(client.clone(), vec![tx1]); + assert_eq!(res, vec![Err(transaction::Error::AlreadyImported)]); + assert!(!client.was_verification_triggered()); - // then - assert_eq!(txq.status().status.transaction_count, 1); + // then + assert_eq!(txq.status().status.transaction_count, 1); } #[test] fn should_avoid_reverifying_recently_rejected_transactions() { - // given - let txq = TransactionQueue::new( - txpool::Options { - max_count: 1, - max_per_sender: 2, - max_mem_usage: TEST_QUEUE_MAX_MEM - }, - verifier::Options { - minimal_gas_price: 1.into(), - block_gas_limit: 1_000_000.into(), - tx_gas_limit: 1_000_000.into(), - no_early_reject: false, - }, - PrioritizationStrategy::GasPriceOnly, - ); + // given + let txq = TransactionQueue::new( + txpool::Options { + max_count: 1, + max_per_sender: 2, + max_mem_usage: TEST_QUEUE_MAX_MEM, + }, + verifier::Options { + minimal_gas_price: 1.into(), + block_gas_limit: 1_000_000.into(), + tx_gas_limit: 1_000_000.into(), + no_early_reject: false, + }, + PrioritizationStrategy::GasPriceOnly, + ); - let client = TestClient::new(); - let tx1 = Tx::gas_price(10_000).signed().unverified(); + let client = TestClient::new(); + let tx1 = Tx::gas_price(10_000).signed().unverified(); - let res = txq.import(client.clone(), vec![tx1.clone()]); - assert_eq!(res, vec![Err(transaction::Error::InsufficientBalance { - balance: 0xf67c.into(), - cost: 0xc8458e4.into(), - })]); - assert_eq!(txq.status().status.transaction_count, 0); - assert!(client.was_verification_triggered()); + let res = txq.import(client.clone(), vec![tx1.clone()]); + assert_eq!( + res, + vec![Err(transaction::Error::InsufficientBalance { + balance: 0xf67c.into(), + cost: 0xc8458e4.into(), + })] + ); + assert_eq!(txq.status().status.transaction_count, 0); + assert!(client.was_verification_triggered()); - // when - let client = TestClient::new(); - let res = txq.import(client.clone(), vec![tx1]); - assert_eq!(res, vec![Err(transaction::Error::InsufficientBalance { - balance: 0xf67c.into(), - cost: 0xc8458e4.into(), - })]); - assert!(!client.was_verification_triggered()); + // when + let client = TestClient::new(); + let res = txq.import(client.clone(), vec![tx1]); + assert_eq!( + res, + vec![Err(transaction::Error::InsufficientBalance { + balance: 0xf67c.into(), + cost: 0xc8458e4.into(), + })] + ); + assert!(!client.was_verification_triggered()); - // then - assert_eq!(txq.status().status.transaction_count, 0); + // then + assert_eq!(txq.status().status.transaction_count, 0); } #[test] fn should_reject_early_in_case_gas_price_is_less_than_min_effective() { - // given - let txq = TransactionQueue::new( - txpool::Options { - max_count: 1, - max_per_sender: 2, - max_mem_usage: TEST_QUEUE_MAX_MEM - }, - verifier::Options { - minimal_gas_price: 1.into(), - block_gas_limit: 1_000_000.into(), - tx_gas_limit: 1_000_000.into(), - no_early_reject: false, - }, - PrioritizationStrategy::GasPriceOnly, - ); - let client = TestClient::new().with_balance(1_000_000_000); - let tx1 = Tx::gas_price(2).signed().unverified(); + // given + let txq = TransactionQueue::new( + txpool::Options { + max_count: 1, + max_per_sender: 2, + max_mem_usage: TEST_QUEUE_MAX_MEM, + }, + verifier::Options { + minimal_gas_price: 1.into(), + block_gas_limit: 1_000_000.into(), + tx_gas_limit: 1_000_000.into(), + no_early_reject: false, + }, + PrioritizationStrategy::GasPriceOnly, + ); + let client = TestClient::new().with_balance(1_000_000_000); + let tx1 = Tx::gas_price(2).signed().unverified(); - let res = txq.import(client.clone(), vec![tx1]); - assert_eq!(res, vec![Ok(())]); - assert_eq!(txq.status().status.transaction_count, 1); - assert!(client.was_verification_triggered()); + let res = txq.import(client.clone(), vec![tx1]); + assert_eq!(res, vec![Ok(())]); + assert_eq!(txq.status().status.transaction_count, 1); + assert!(client.was_verification_triggered()); - // when - let client = TestClient::new(); - let tx1 = Tx::default().signed().unverified(); - let res = txq.import(client.clone(), vec![tx1]); - assert_eq!(res, vec![Err(transaction::Error::TooCheapToReplace { - prev: Some(2.into()), - new: Some(1.into()), - })]); - assert!(!client.was_verification_triggered()); + // when + let client = TestClient::new(); + let tx1 = Tx::default().signed().unverified(); + let res = txq.import(client.clone(), vec![tx1]); + assert_eq!( + res, + vec![Err(transaction::Error::TooCheapToReplace { + prev: Some(2.into()), + new: Some(1.into()), + })] + ); + assert!(!client.was_verification_triggered()); - // then - assert_eq!(txq.status().status.transaction_count, 1); + // then + assert_eq!(txq.status().status.transaction_count, 1); } #[test] fn should_not_reject_early_in_case_gas_price_is_less_than_min_effective() { - // given - let txq = TransactionQueue::new( - txpool::Options { - max_count: 1, - max_per_sender: 2, - max_mem_usage: TEST_QUEUE_MAX_MEM - }, - verifier::Options { - minimal_gas_price: 1.into(), - block_gas_limit: 1_000_000.into(), - tx_gas_limit: 1_000_000.into(), - no_early_reject: true, - }, - PrioritizationStrategy::GasPriceOnly, - ); - // when - let tx1 = Tx::gas_price(2).signed(); - let client = TestClient::new().with_local(&tx1.sender()); - let res = txq.import(client.clone(), vec![tx1.unverified()]); + // given + let txq = TransactionQueue::new( + txpool::Options { + max_count: 1, + max_per_sender: 2, + max_mem_usage: TEST_QUEUE_MAX_MEM, + }, + verifier::Options { + minimal_gas_price: 1.into(), + block_gas_limit: 1_000_000.into(), + tx_gas_limit: 1_000_000.into(), + no_early_reject: true, + }, + PrioritizationStrategy::GasPriceOnly, + ); + // when + let tx1 = Tx::gas_price(2).signed(); + let client = TestClient::new().with_local(&tx1.sender()); + let res = txq.import(client.clone(), vec![tx1.unverified()]); - // then - assert_eq!(res, vec![Ok(())]); - assert_eq!(txq.status().status.transaction_count, 1); - assert!(client.was_verification_triggered()); + // then + assert_eq!(res, vec![Ok(())]); + assert_eq!(txq.status().status.transaction_count, 1); + assert!(client.was_verification_triggered()); - // when - let tx1 = Tx::gas_price(1).signed(); - let client = TestClient::new().with_local(&tx1.sender()); - let res = txq.import(client.clone(), vec![tx1.unverified()]); + // when + let tx1 = Tx::gas_price(1).signed(); + let client = TestClient::new().with_local(&tx1.sender()); + let res = txq.import(client.clone(), vec![tx1.unverified()]); - // then - assert_eq!(res, vec![Ok(())]); - assert_eq!(txq.status().status.transaction_count, 2); - assert!(client.was_verification_triggered()); + // then + assert_eq!(res, vec![Ok(())]); + assert_eq!(txq.status().status.transaction_count, 2); + assert!(client.was_verification_triggered()); } diff --git a/miner/src/pool/tests/tx.rs b/miner/src/pool/tests/tx.rs index b8f6dca67..6dd53af4a 100644 --- a/miner/src/pool/tests/tx.rs +++ b/miner/src/pool/tests/tx.rs @@ -14,184 +14,208 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use ethereum_types::{U256, H256}; -use ethkey::{Random, Generator}; +use ethereum_types::{H256, U256}; +use ethkey::{Generator, Random}; use rustc_hex::FromHex; -use types::transaction::{self, Transaction, SignedTransaction, UnverifiedTransaction}; +use types::transaction::{self, SignedTransaction, Transaction, UnverifiedTransaction}; use pool::{verifier, VerifiedTransaction}; #[derive(Clone)] pub struct Tx { - pub nonce: u64, - pub gas: u64, - pub gas_price: u64, + pub nonce: u64, + pub gas: u64, + pub gas_price: u64, } impl Default for Tx { - fn default() -> Self { - Tx { - nonce: 123, - gas: 21_000, - gas_price: 1, - } - } + fn default() -> Self { + Tx { + nonce: 123, + gas: 21_000, + gas_price: 1, + } + } } impl Tx { - pub fn gas_price(gas_price: u64) -> Self { - Tx { - gas_price, - ..Default::default() - } - } + pub fn gas_price(gas_price: u64) -> Self { + Tx { + gas_price, + ..Default::default() + } + } - pub fn signed(self) -> SignedTransaction { - let keypair = Random.generate().unwrap(); - self.unsigned().sign(keypair.secret(), None) - } + pub fn signed(self) -> SignedTransaction { + let keypair = Random.generate().unwrap(); + self.unsigned().sign(keypair.secret(), None) + } - pub fn signed_pair(self) -> (SignedTransaction, SignedTransaction) { - let (tx1, tx2, _) = self.signed_triple(); - (tx1, tx2) - } + pub fn signed_pair(self) -> (SignedTransaction, SignedTransaction) { + let (tx1, tx2, _) = self.signed_triple(); + (tx1, tx2) + } - pub fn signed_triple(mut self) -> (SignedTransaction, SignedTransaction, SignedTransaction) { - let keypair = Random.generate().unwrap(); - let tx1 = self.clone().unsigned().sign(keypair.secret(), None); - self.nonce += 1; - let tx2 = self.clone().unsigned().sign(keypair.secret(), None); - self.nonce += 1; - let tx3 = self.unsigned().sign(keypair.secret(), None); + pub fn signed_triple(mut self) -> (SignedTransaction, SignedTransaction, SignedTransaction) { + let keypair = Random.generate().unwrap(); + let tx1 = self.clone().unsigned().sign(keypair.secret(), None); + self.nonce += 1; + let tx2 = self.clone().unsigned().sign(keypair.secret(), None); + self.nonce += 1; + let tx3 = self.unsigned().sign(keypair.secret(), None); - (tx1, tx2, tx3) - } + (tx1, tx2, tx3) + } - pub fn signed_replacement(mut self) -> (SignedTransaction, SignedTransaction) { - let keypair = Random.generate().unwrap(); - let tx1 = self.clone().unsigned().sign(keypair.secret(), None); - self.gas_price += 1; - let tx2 = self.unsigned().sign(keypair.secret(), None); + pub fn signed_replacement(mut self) -> (SignedTransaction, SignedTransaction) { + let keypair = Random.generate().unwrap(); + let tx1 = self.clone().unsigned().sign(keypair.secret(), None); + self.gas_price += 1; + let tx2 = self.unsigned().sign(keypair.secret(), None); - (tx1, tx2) - } + (tx1, tx2) + } - pub fn unsigned(self) -> Transaction { - Transaction { - action: transaction::Action::Create, - value: U256::from(100), - data: "3331600055".from_hex().unwrap(), - gas: self.gas.into(), - gas_price: self.gas_price.into(), - nonce: self.nonce.into() - } - } + pub fn unsigned(self) -> Transaction { + Transaction { + action: transaction::Action::Create, + value: U256::from(100), + data: "3331600055".from_hex().unwrap(), + gas: self.gas.into(), + gas_price: self.gas_price.into(), + nonce: self.nonce.into(), + } + } - pub fn big_one(self) -> SignedTransaction { - let keypair = Random.generate().unwrap(); - let tx = Transaction { - action: transaction::Action::Create, - value: U256::from(100), - data: include_str!("../res/big_transaction.data").from_hex().unwrap(), - gas: self.gas.into(), - gas_price: self.gas_price.into(), - nonce: self.nonce.into() - }; - tx.sign(keypair.secret(), None) - } + pub fn big_one(self) -> SignedTransaction { + let keypair = Random.generate().unwrap(); + let tx = Transaction { + action: transaction::Action::Create, + value: U256::from(100), + data: include_str!("../res/big_transaction.data") + .from_hex() + .unwrap(), + gas: self.gas.into(), + gas_price: self.gas_price.into(), + nonce: self.nonce.into(), + }; + tx.sign(keypair.secret(), None) + } } pub trait TxExt: Sized { - type Out; - type Verified; - type Hash; + type Out; + type Verified; + type Hash; - fn hash(&self) -> Self::Hash; + fn hash(&self) -> Self::Hash; - fn local(self) -> Self::Out; + fn local(self) -> Self::Out; - fn retracted(self) -> Self::Out; + fn retracted(self) -> Self::Out; - fn unverified(self) -> Self::Out; + fn unverified(self) -> Self::Out; - fn verified(self) -> Self::Verified; + fn verified(self) -> Self::Verified; } -impl TxExt for (A, B) where - A: TxExt, - B: TxExt, +impl TxExt for (A, B) +where + A: TxExt, + B: TxExt, { - type Out = (O, O); - type Verified = (V, V); - type Hash = (H, H); + type Out = (O, O); + type Verified = (V, V); + type Hash = (H, H); - fn hash(&self) -> Self::Hash { (self.0.hash(), self.1.hash()) } - fn local(self) -> Self::Out { (self.0.local(), self.1.local()) } - fn retracted(self) -> Self::Out { (self.0.retracted(), self.1.retracted()) } - fn unverified(self) -> Self::Out { (self.0.unverified(), self.1.unverified()) } - fn verified(self) -> Self::Verified { (self.0.verified(), self.1.verified()) } + fn hash(&self) -> Self::Hash { + (self.0.hash(), self.1.hash()) + } + fn local(self) -> Self::Out { + (self.0.local(), self.1.local()) + } + fn retracted(self) -> Self::Out { + (self.0.retracted(), self.1.retracted()) + } + fn unverified(self) -> Self::Out { + (self.0.unverified(), self.1.unverified()) + } + fn verified(self) -> Self::Verified { + (self.0.verified(), self.1.verified()) + } } impl TxExt for SignedTransaction { - type Out = verifier::Transaction; - type Verified = VerifiedTransaction; - type Hash = H256; + type Out = verifier::Transaction; + type Verified = VerifiedTransaction; + type Hash = H256; - fn hash(&self) -> Self::Hash { - UnverifiedTransaction::hash(self) - } + fn hash(&self) -> Self::Hash { + UnverifiedTransaction::hash(self) + } - fn local(self) -> Self::Out { - verifier::Transaction::Local(self.into()) - } + fn local(self) -> Self::Out { + verifier::Transaction::Local(self.into()) + } - fn retracted(self) -> Self::Out { - verifier::Transaction::Retracted(self.into()) - } + fn retracted(self) -> Self::Out { + verifier::Transaction::Retracted(self.into()) + } - fn unverified(self) -> Self::Out { - verifier::Transaction::Unverified(self.into()) - } + fn unverified(self) -> Self::Out { + verifier::Transaction::Unverified(self.into()) + } - fn verified(self) -> Self::Verified { - VerifiedTransaction::from_pending_block_transaction(self) - } + fn verified(self) -> Self::Verified { + VerifiedTransaction::from_pending_block_transaction(self) + } } impl TxExt for Vec { - type Out = Vec; - type Verified = Vec; - type Hash = Vec; + type Out = Vec; + type Verified = Vec; + type Hash = Vec; - fn hash(&self) -> Self::Hash { - self.iter().map(|tx| tx.hash()).collect() - } + fn hash(&self) -> Self::Hash { + self.iter().map(|tx| tx.hash()).collect() + } - fn local(self) -> Self::Out { - self.into_iter().map(Into::into).map(verifier::Transaction::Local).collect() - } + fn local(self) -> Self::Out { + self.into_iter() + .map(Into::into) + .map(verifier::Transaction::Local) + .collect() + } - fn retracted(self) -> Self::Out { - self.into_iter().map(Into::into).map(verifier::Transaction::Retracted).collect() - } + fn retracted(self) -> Self::Out { + self.into_iter() + .map(Into::into) + .map(verifier::Transaction::Retracted) + .collect() + } - fn unverified(self) -> Self::Out { - self.into_iter().map(Into::into).map(verifier::Transaction::Unverified).collect() - } + fn unverified(self) -> Self::Out { + self.into_iter() + .map(Into::into) + .map(verifier::Transaction::Unverified) + .collect() + } - fn verified(self) -> Self::Verified { - self.into_iter().map(VerifiedTransaction::from_pending_block_transaction).collect() - } + fn verified(self) -> Self::Verified { + self.into_iter() + .map(VerifiedTransaction::from_pending_block_transaction) + .collect() + } } pub trait PairExt { - type Type; + type Type; - fn into_vec(self) -> Vec; + fn into_vec(self) -> Vec; } impl PairExt for (A, A) { - type Type = A; - fn into_vec(self) -> Vec { - vec![self.0, self.1] - } + type Type = A; + fn into_vec(self) -> Vec { + vec![self.0, self.1] + } } diff --git a/miner/src/pool/verifier.rs b/miner/src/pool/verifier.rs index bd4b50e73..fc14f595b 100644 --- a/miner/src/pool/verifier.rs +++ b/miner/src/pool/verifier.rs @@ -22,110 +22,116 @@ //! May have some overlap with `Readiness` since we don't want to keep around //! stalled transactions. -use std::cmp; -use std::sync::Arc; -use std::sync::atomic::{self, AtomicUsize}; +use std::{ + cmp, + sync::{ + atomic::{self, AtomicUsize}, + Arc, + }, +}; -use ethereum_types::{U256, H256}; +use ethereum_types::{H256, U256}; use rlp::Encodable; use txpool; use types::transaction; -use super::client::{Client, TransactionType}; -use super::VerifiedTransaction; +use super::{ + client::{Client, TransactionType}, + VerifiedTransaction, +}; /// Verification options. #[derive(Debug, Clone, PartialEq)] pub struct Options { - /// Minimal allowed gas price. - pub minimal_gas_price: U256, - /// Current block gas limit. - pub block_gas_limit: U256, - /// Maximal gas limit for a single transaction. - pub tx_gas_limit: U256, - /// Skip checks for early rejection, to make sure that local transactions are always imported. - pub no_early_reject: bool, + /// Minimal allowed gas price. + pub minimal_gas_price: U256, + /// Current block gas limit. + pub block_gas_limit: U256, + /// Maximal gas limit for a single transaction. + pub tx_gas_limit: U256, + /// Skip checks for early rejection, to make sure that local transactions are always imported. + pub no_early_reject: bool, } #[cfg(test)] impl Default for Options { - fn default() -> Self { - Options { - minimal_gas_price: 0.into(), - block_gas_limit: U256::max_value(), - tx_gas_limit: U256::max_value(), - no_early_reject: false, - } - } + fn default() -> Self { + Options { + minimal_gas_price: 0.into(), + block_gas_limit: U256::max_value(), + tx_gas_limit: U256::max_value(), + no_early_reject: false, + } + } } /// Transaction to verify. #[cfg_attr(test, derive(Clone))] pub enum Transaction { - /// Fresh, never verified transaction. - /// - /// We need to do full verification of such transactions - Unverified(transaction::UnverifiedTransaction), + /// Fresh, never verified transaction. + /// + /// We need to do full verification of such transactions + Unverified(transaction::UnverifiedTransaction), - /// Transaction from retracted block. - /// - /// We could skip some parts of verification of such transactions - Retracted(transaction::UnverifiedTransaction), + /// Transaction from retracted block. + /// + /// We could skip some parts of verification of such transactions + Retracted(transaction::UnverifiedTransaction), - /// Locally signed or retracted transaction. - /// - /// We can skip consistency verifications and just verify readiness. - Local(transaction::PendingTransaction), + /// Locally signed or retracted transaction. + /// + /// We can skip consistency verifications and just verify readiness. + Local(transaction::PendingTransaction), } impl Transaction { - /// Return transaction hash - pub fn hash(&self) -> H256 { - match *self { - Transaction::Unverified(ref tx) => tx.hash(), - Transaction::Retracted(ref tx) => tx.hash(), - Transaction::Local(ref tx) => tx.hash(), - } - } + /// Return transaction hash + pub fn hash(&self) -> H256 { + match *self { + Transaction::Unverified(ref tx) => tx.hash(), + Transaction::Retracted(ref tx) => tx.hash(), + Transaction::Local(ref tx) => tx.hash(), + } + } - /// Return transaction gas price - pub fn gas_price(&self) -> &U256 { - match *self { - Transaction::Unverified(ref tx) => &tx.gas_price, - Transaction::Retracted(ref tx) => &tx.gas_price, - Transaction::Local(ref tx) => &tx.gas_price, - } - } + /// Return transaction gas price + pub fn gas_price(&self) -> &U256 { + match *self { + Transaction::Unverified(ref tx) => &tx.gas_price, + Transaction::Retracted(ref tx) => &tx.gas_price, + Transaction::Local(ref tx) => &tx.gas_price, + } + } - fn gas(&self) -> &U256 { - match *self { - Transaction::Unverified(ref tx) => &tx.gas, - Transaction::Retracted(ref tx) => &tx.gas, - Transaction::Local(ref tx) => &tx.gas, - } - } + fn gas(&self) -> &U256 { + match *self { + Transaction::Unverified(ref tx) => &tx.gas, + Transaction::Retracted(ref tx) => &tx.gas, + Transaction::Local(ref tx) => &tx.gas, + } + } - fn transaction(&self) -> &transaction::Transaction { - match *self { - Transaction::Unverified(ref tx) => &*tx, - Transaction::Retracted(ref tx) => &*tx, - Transaction::Local(ref tx) => &*tx, - } - } + fn transaction(&self) -> &transaction::Transaction { + match *self { + Transaction::Unverified(ref tx) => &*tx, + Transaction::Retracted(ref tx) => &*tx, + Transaction::Local(ref tx) => &*tx, + } + } - fn is_local(&self) -> bool { - match *self { - Transaction::Local(..) => true, - _ => false, - } - } + fn is_local(&self) -> bool { + match *self { + Transaction::Local(..) => true, + _ => false, + } + } - fn is_retracted(&self) -> bool { - match *self { - Transaction::Retracted(..) => true, - _ => false, - } - } + fn is_retracted(&self) -> bool { + match *self { + Transaction::Retracted(..) => true, + _ => false, + } + } } /// Transaction verifier. @@ -133,211 +139,218 @@ impl Transaction { /// Verification can be run in parallel for all incoming transactions. #[derive(Debug)] pub struct Verifier { - client: C, - options: Options, - id: Arc, - transaction_to_replace: Option<(S, Arc)>, + client: C, + options: Options, + id: Arc, + transaction_to_replace: Option<(S, Arc)>, } impl Verifier { - /// Creates new transaction verfier with specified options. - pub fn new( - client: C, - options: Options, - id: Arc, - transaction_to_replace: Option<(S, Arc)>, - ) -> Self { - Verifier { - client, - options, - id, - transaction_to_replace, - } - } + /// Creates new transaction verfier with specified options. + pub fn new( + client: C, + options: Options, + id: Arc, + transaction_to_replace: Option<(S, Arc)>, + ) -> Self { + Verifier { + client, + options, + id, + transaction_to_replace, + } + } } -impl txpool::Verifier for Verifier { - type Error = transaction::Error; - type VerifiedTransaction = VerifiedTransaction; +impl txpool::Verifier + for Verifier +{ + type Error = transaction::Error; + type VerifiedTransaction = VerifiedTransaction; - fn verify_transaction(&self, tx: Transaction) -> Result { - // The checks here should be ordered by cost/complexity. - // Cheap checks should be done as early as possible to discard unneeded transactions early. + fn verify_transaction( + &self, + tx: Transaction, + ) -> Result { + // The checks here should be ordered by cost/complexity. + // Cheap checks should be done as early as possible to discard unneeded transactions early. - let hash = tx.hash(); + let hash = tx.hash(); - if self.client.transaction_already_included(&hash) { - trace!(target: "txqueue", "[{:?}] Rejected tx already in the blockchain", hash); - bail!(transaction::Error::AlreadyImported) - } + if self.client.transaction_already_included(&hash) { + trace!(target: "txqueue", "[{:?}] Rejected tx already in the blockchain", hash); + bail!(transaction::Error::AlreadyImported) + } - let gas_limit = cmp::min(self.options.tx_gas_limit, self.options.block_gas_limit); - if tx.gas() > &gas_limit { - debug!( - target: "txqueue", - "[{:?}] Rejected transaction above gas limit: {} > min({}, {})", - hash, - tx.gas(), - self.options.block_gas_limit, - self.options.tx_gas_limit, - ); - bail!(transaction::Error::GasLimitExceeded { - limit: gas_limit, - got: *tx.gas(), - }); - } + let gas_limit = cmp::min(self.options.tx_gas_limit, self.options.block_gas_limit); + if tx.gas() > &gas_limit { + debug!( + target: "txqueue", + "[{:?}] Rejected transaction above gas limit: {} > min({}, {})", + hash, + tx.gas(), + self.options.block_gas_limit, + self.options.tx_gas_limit, + ); + bail!(transaction::Error::GasLimitExceeded { + limit: gas_limit, + got: *tx.gas(), + }); + } - let minimal_gas = self.client.required_gas(tx.transaction()); - if tx.gas() < &minimal_gas { - trace!(target: "txqueue", - "[{:?}] Rejected transaction with insufficient gas: {} < {}", - hash, - tx.gas(), - minimal_gas, - ); + let minimal_gas = self.client.required_gas(tx.transaction()); + if tx.gas() < &minimal_gas { + trace!(target: "txqueue", + "[{:?}] Rejected transaction with insufficient gas: {} < {}", + hash, + tx.gas(), + minimal_gas, + ); - bail!(transaction::Error::InsufficientGas { - minimal: minimal_gas, - got: *tx.gas(), - }) - } + bail!(transaction::Error::InsufficientGas { + minimal: minimal_gas, + got: *tx.gas(), + }) + } - let is_own = tx.is_local(); - // Quick exit for non-service and non-local transactions - // - // We're checking if the transaction is below configured minimal gas price - // or the effective minimal gas price in case the pool is full. - if !tx.gas_price().is_zero() && !is_own { - if tx.gas_price() < &self.options.minimal_gas_price { - trace!( - target: "txqueue", - "[{:?}] Rejected tx below minimal gas price threshold: {} < {}", - hash, - tx.gas_price(), - self.options.minimal_gas_price, - ); - bail!(transaction::Error::InsufficientGasPrice { - minimal: self.options.minimal_gas_price, - got: *tx.gas_price(), - }); - } + let is_own = tx.is_local(); + // Quick exit for non-service and non-local transactions + // + // We're checking if the transaction is below configured minimal gas price + // or the effective minimal gas price in case the pool is full. + if !tx.gas_price().is_zero() && !is_own { + if tx.gas_price() < &self.options.minimal_gas_price { + trace!( + target: "txqueue", + "[{:?}] Rejected tx below minimal gas price threshold: {} < {}", + hash, + tx.gas_price(), + self.options.minimal_gas_price, + ); + bail!(transaction::Error::InsufficientGasPrice { + minimal: self.options.minimal_gas_price, + got: *tx.gas_price(), + }); + } - if let Some((ref scoring, ref vtx)) = self.transaction_to_replace { - if scoring.should_reject_early(vtx, &tx) { - trace!( - target: "txqueue", - "[{:?}] Rejected tx early, cause it doesn't have any chance to get to the pool: (gas price: {} < {})", - hash, - tx.gas_price(), - vtx.transaction.gas_price, - ); - return Err(transaction::Error::TooCheapToReplace { - prev: Some(vtx.transaction.gas_price), - new: Some(*tx.gas_price()), - }); - } - } - } + if let Some((ref scoring, ref vtx)) = self.transaction_to_replace { + if scoring.should_reject_early(vtx, &tx) { + trace!( + target: "txqueue", + "[{:?}] Rejected tx early, cause it doesn't have any chance to get to the pool: (gas price: {} < {})", + hash, + tx.gas_price(), + vtx.transaction.gas_price, + ); + return Err(transaction::Error::TooCheapToReplace { + prev: Some(vtx.transaction.gas_price), + new: Some(*tx.gas_price()), + }); + } + } + } - // Some more heavy checks below. - // Actually recover sender and verify that transaction - let is_retracted = tx.is_retracted(); - let transaction = match tx { - Transaction::Retracted(tx) | Transaction::Unverified(tx) => match self.client.verify_transaction(tx) { - Ok(signed) => signed.into(), - Err(err) => { - debug!(target: "txqueue", "[{:?}] Rejected tx {:?}", hash, err); - bail!(err) - }, - }, - Transaction::Local(tx) => match self.client.verify_transaction_basic(&**tx) { - Ok(()) => tx, - Err(err) => { - warn!(target: "txqueue", "[{:?}] Rejected local tx {:?}", hash, err); - return Err(err) - } - }, - }; + // Some more heavy checks below. + // Actually recover sender and verify that transaction + let is_retracted = tx.is_retracted(); + let transaction = match tx { + Transaction::Retracted(tx) | Transaction::Unverified(tx) => { + match self.client.verify_transaction(tx) { + Ok(signed) => signed.into(), + Err(err) => { + debug!(target: "txqueue", "[{:?}] Rejected tx {:?}", hash, err); + bail!(err) + } + } + } + Transaction::Local(tx) => match self.client.verify_transaction_basic(&**tx) { + Ok(()) => tx, + Err(err) => { + warn!(target: "txqueue", "[{:?}] Rejected local tx {:?}", hash, err); + return Err(err); + } + }, + }; - // Verify RLP payload - if let Err(err) = self.client.decode_transaction(&transaction.rlp_bytes()) { - debug!(target: "txqueue", "[{:?}] Rejected transaction's rlp payload", err); - bail!(err) - } + // Verify RLP payload + if let Err(err) = self.client.decode_transaction(&transaction.rlp_bytes()) { + debug!(target: "txqueue", "[{:?}] Rejected transaction's rlp payload", err); + bail!(err) + } - let sender = transaction.sender(); - let account_details = self.client.account_details(&sender); + let sender = transaction.sender(); + let account_details = self.client.account_details(&sender); - if transaction.gas_price < self.options.minimal_gas_price { - let transaction_type = self.client.transaction_type(&transaction); - if let TransactionType::Service = transaction_type { - debug!(target: "txqueue", "Service tx {:?} below minimal gas price accepted", hash); - } else if is_own || account_details.is_local { - info!(target: "own_tx", "Local tx {:?} below minimal gas price accepted", hash); - } else { - trace!( - target: "txqueue", - "[{:?}] Rejected tx below minimal gas price threshold: {} < {}", - hash, - transaction.gas_price, - self.options.minimal_gas_price, - ); - bail!(transaction::Error::InsufficientGasPrice { - minimal: self.options.minimal_gas_price, - got: transaction.gas_price, - }); - } - } + if transaction.gas_price < self.options.minimal_gas_price { + let transaction_type = self.client.transaction_type(&transaction); + if let TransactionType::Service = transaction_type { + debug!(target: "txqueue", "Service tx {:?} below minimal gas price accepted", hash); + } else if is_own || account_details.is_local { + info!(target: "own_tx", "Local tx {:?} below minimal gas price accepted", hash); + } else { + trace!( + target: "txqueue", + "[{:?}] Rejected tx below minimal gas price threshold: {} < {}", + hash, + transaction.gas_price, + self.options.minimal_gas_price, + ); + bail!(transaction::Error::InsufficientGasPrice { + minimal: self.options.minimal_gas_price, + got: transaction.gas_price, + }); + } + } - let (full_gas_price, overflow_1) = transaction.gas_price.overflowing_mul(transaction.gas); - let (cost, overflow_2) = transaction.value.overflowing_add(full_gas_price); - if overflow_1 || overflow_2 { - trace!( - target: "txqueue", - "[{:?}] Rejected tx, price overflow", - hash - ); - bail!(transaction::Error::InsufficientBalance { - cost: U256::max_value(), - balance: account_details.balance, - }); - } - if account_details.balance < cost { - debug!( - target: "txqueue", - "[{:?}] Rejected tx with not enough balance: {} < {}", - hash, - account_details.balance, - cost, - ); - bail!(transaction::Error::InsufficientBalance { - cost: cost, - balance: account_details.balance, - }); - } + let (full_gas_price, overflow_1) = transaction.gas_price.overflowing_mul(transaction.gas); + let (cost, overflow_2) = transaction.value.overflowing_add(full_gas_price); + if overflow_1 || overflow_2 { + trace!( + target: "txqueue", + "[{:?}] Rejected tx, price overflow", + hash + ); + bail!(transaction::Error::InsufficientBalance { + cost: U256::max_value(), + balance: account_details.balance, + }); + } + if account_details.balance < cost { + debug!( + target: "txqueue", + "[{:?}] Rejected tx with not enough balance: {} < {}", + hash, + account_details.balance, + cost, + ); + bail!(transaction::Error::InsufficientBalance { + cost: cost, + balance: account_details.balance, + }); + } - if transaction.nonce < account_details.nonce { - debug!( - target: "txqueue", - "[{:?}] Rejected tx with old nonce ({} < {})", - hash, - transaction.nonce, - account_details.nonce, - ); - bail!(transaction::Error::Old); - } + if transaction.nonce < account_details.nonce { + debug!( + target: "txqueue", + "[{:?}] Rejected tx with old nonce ({} < {})", + hash, + transaction.nonce, + account_details.nonce, + ); + bail!(transaction::Error::Old); + } - let priority = match (is_own || account_details.is_local, is_retracted) { - (true, _) => super::Priority::Local, - (false, false) => super::Priority::Regular, - (false, true) => super::Priority::Retracted, - }; - Ok(VerifiedTransaction { - transaction, - priority, - hash, - sender, - insertion_id: self.id.fetch_add(1, atomic::Ordering::AcqRel), - }) - } + let priority = match (is_own || account_details.is_local, is_retracted) { + (true, _) => super::Priority::Local, + (false, false) => super::Priority::Regular, + (false, true) => super::Priority::Retracted, + }; + Ok(VerifiedTransaction { + transaction, + priority, + hash, + sender, + insertion_id: self.id.fetch_add(1, atomic::Ordering::AcqRel), + }) + } } diff --git a/miner/src/service_transaction_checker.rs b/miner/src/service_transaction_checker.rs index 56e65c8b8..1188871b8 100644 --- a/miner/src/service_transaction_checker.rs +++ b/miner/src/service_transaction_checker.rs @@ -16,79 +16,110 @@ //! A service transactions contract checker. -use std::collections::HashMap; -use std::mem; -use std::sync::Arc; -use call_contract::{RegistryInfo, CallContract}; -use types::ids::BlockId; -use types::transaction::SignedTransaction; +use call_contract::{CallContract, RegistryInfo}; use ethabi::FunctionOutputDecoder; use ethereum_types::Address; use parking_lot::RwLock; +use std::{collections::HashMap, mem, sync::Arc}; +use types::{ids::BlockId, transaction::SignedTransaction}; -use_contract!(service_transaction, "res/contracts/service_transaction.json"); +use_contract!( + service_transaction, + "res/contracts/service_transaction.json" +); const SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME: &'static str = "service_transaction_checker"; /// Service transactions checker. #[derive(Default, Clone)] pub struct ServiceTransactionChecker { - certified_addresses_cache: Arc>> + certified_addresses_cache: Arc>>, } impl ServiceTransactionChecker { + /// Checks if given address in tx is whitelisted to send service transactions. + pub fn check( + &self, + client: &C, + tx: &SignedTransaction, + ) -> Result { + let sender = tx.sender(); + // Skip checking the contract if the transaction does not have zero gas price + if !tx.gas_price.is_zero() { + return Ok(false); + } - /// Checks if given address in tx is whitelisted to send service transactions. - pub fn check(&self, client: &C, tx: &SignedTransaction) -> Result { - let sender = tx.sender(); - // Skip checking the contract if the transaction does not have zero gas price - if !tx.gas_price.is_zero() { - return Ok(false) - } + self.check_address(client, sender) + } - self.check_address(client, sender) - } + /// Checks if given address is whitelisted to send service transactions. + pub fn check_address( + &self, + client: &C, + sender: Address, + ) -> Result { + trace!(target: "txqueue", "Checking service transaction checker contract from {}", sender); + if let Some(allowed) = self + .certified_addresses_cache + .try_read() + .as_ref() + .and_then(|c| c.get(&sender)) + { + return Ok(*allowed); + } + let contract_address = client + .registry_address( + SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME.to_owned(), + BlockId::Latest, + ) + .ok_or_else(|| "contract is not configured")?; + self.call_contract(client, contract_address, sender) + .and_then(|allowed| { + if let Some(mut cache) = self.certified_addresses_cache.try_write() { + cache.insert(sender, allowed); + }; + Ok(allowed) + }) + } - /// Checks if given address is whitelisted to send service transactions. - pub fn check_address(&self, client: &C, sender: Address) -> Result { - trace!(target: "txqueue", "Checking service transaction checker contract from {}", sender); - if let Some(allowed) = self.certified_addresses_cache.try_read().as_ref().and_then(|c| c.get(&sender)) { - return Ok(*allowed); - } - let contract_address = client.registry_address(SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME.to_owned(), BlockId::Latest) - .ok_or_else(|| "contract is not configured")?; - self.call_contract(client, contract_address, sender).and_then(|allowed| { - if let Some(mut cache) = self.certified_addresses_cache.try_write() { - cache.insert(sender, allowed); - }; - Ok(allowed) - }) - } + /// Refresh certified addresses cache + pub fn refresh_cache( + &self, + client: &C, + ) -> Result { + trace!(target: "txqueue", "Refreshing certified addresses cache"); + // replace the cache with an empty list, + // since it's not recent it won't be used anyway. + let cache = mem::replace( + &mut *self.certified_addresses_cache.write(), + HashMap::default(), + ); - /// Refresh certified addresses cache - pub fn refresh_cache(&self, client: &C) -> Result { - trace!(target: "txqueue", "Refreshing certified addresses cache"); - // replace the cache with an empty list, - // since it's not recent it won't be used anyway. - let cache = mem::replace(&mut *self.certified_addresses_cache.write(), HashMap::default()); + if let Some(contract_address) = client.registry_address( + SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME.to_owned(), + BlockId::Latest, + ) { + let addresses: Vec<_> = cache.keys().collect(); + let mut cache: HashMap = HashMap::default(); + for address in addresses { + let allowed = self.call_contract(client, contract_address, *address)?; + cache.insert(*address, allowed); + } + mem::replace(&mut *self.certified_addresses_cache.write(), cache); + Ok(true) + } else { + Ok(false) + } + } - if let Some(contract_address) = client.registry_address(SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME.to_owned(), BlockId::Latest) { - let addresses: Vec<_> = cache.keys().collect(); - let mut cache: HashMap = HashMap::default(); - for address in addresses { - let allowed = self.call_contract(client, contract_address, *address)?; - cache.insert(*address, allowed); - } - mem::replace(&mut *self.certified_addresses_cache.write(), cache); - Ok(true) - } else { - Ok(false) - } - } - - fn call_contract(&self, client: &C, contract_address: Address, sender: Address) -> Result { - let (data, decoder) = service_transaction::functions::certified::call(sender); - let value = client.call_contract(BlockId::Latest, contract_address, data)?; - decoder.decode(&value).map_err(|e| e.to_string()) - } + fn call_contract( + &self, + client: &C, + contract_address: Address, + sender: Address, + ) -> Result { + let (data, decoder) = service_transaction::functions::certified::call(sender); + let value = client.call_contract(BlockId::Latest, contract_address, data)?; + decoder.decode(&value).map_err(|e| e.to_string()) + } } diff --git a/miner/src/work_notify.rs b/miner/src/work_notify.rs index 367990f22..bd9123932 100644 --- a/miner/src/work_notify.rs +++ b/miner/src/work_notify.rs @@ -18,15 +18,17 @@ extern crate ethash; extern crate fetch; +extern crate hyper; extern crate parity_runtime; extern crate url; -extern crate hyper; -use self::fetch::{Fetch, Request, Client as FetchClient, Method}; -use self::parity_runtime::Executor; -use self::ethash::SeedHashCompute; -use self::url::Url; -use self::hyper::header::{self, HeaderValue}; +use self::{ + ethash::SeedHashCompute, + fetch::{Client as FetchClient, Fetch, Method, Request}, + hyper::header::{self, HeaderValue}, + parity_runtime::Executor, + url::Url, +}; use ethereum_types::{H256, U256}; use parking_lot::Mutex; @@ -34,60 +36,70 @@ use parking_lot::Mutex; use futures::Future; /// Trait for notifying about new mining work -pub trait NotifyWork : Send + Sync { - /// Fired when new mining job available - fn notify(&self, pow_hash: H256, difficulty: U256, number: u64); +pub trait NotifyWork: Send + Sync { + /// Fired when new mining job available + fn notify(&self, pow_hash: H256, difficulty: U256, number: u64); } /// POSTs info about new work to given urls. pub struct WorkPoster { - urls: Vec, - client: FetchClient, - executor: Executor, - seed_compute: Mutex, + urls: Vec, + client: FetchClient, + executor: Executor, + seed_compute: Mutex, } impl WorkPoster { - /// Create new `WorkPoster`. - pub fn new(urls: &[String], fetch: FetchClient, executor: Executor) -> Self { - let urls = urls.into_iter().filter_map(|u| { - match Url::parse(u) { - Ok(url) => Some(url), - Err(e) => { - warn!("Error parsing URL {} : {}", u, e); - None - } - } - }).collect(); - WorkPoster { - client: fetch, - executor: executor, - urls: urls, - seed_compute: Mutex::new(SeedHashCompute::default()), - } - } + /// Create new `WorkPoster`. + pub fn new(urls: &[String], fetch: FetchClient, executor: Executor) -> Self { + let urls = urls + .into_iter() + .filter_map(|u| match Url::parse(u) { + Ok(url) => Some(url), + Err(e) => { + warn!("Error parsing URL {} : {}", u, e); + None + } + }) + .collect(); + WorkPoster { + client: fetch, + executor: executor, + urls: urls, + seed_compute: Mutex::new(SeedHashCompute::default()), + } + } } impl NotifyWork for WorkPoster { - fn notify(&self, pow_hash: H256, difficulty: U256, number: u64) { - // TODO: move this to engine - let target = ethash::difficulty_to_boundary(&difficulty); - let seed_hash = &self.seed_compute.lock().hash_block_number(number); - let seed_hash = H256::from_slice(&seed_hash[..]); - let body = format!( - r#"{{ "result": ["0x{:x}","0x{:x}","0x{:x}","0x{:x}"] }}"#, - pow_hash, seed_hash, target, number - ); + fn notify(&self, pow_hash: H256, difficulty: U256, number: u64) { + // TODO: move this to engine + let target = ethash::difficulty_to_boundary(&difficulty); + let seed_hash = &self.seed_compute.lock().hash_block_number(number); + let seed_hash = H256::from_slice(&seed_hash[..]); + let body = format!( + r#"{{ "result": ["0x{:x}","0x{:x}","0x{:x}","0x{:x}"] }}"#, + pow_hash, seed_hash, target, number + ); - for u in &self.urls { - let u = u.clone(); - self.executor.spawn(self.client.fetch( - Request::new(u.clone(), Method::POST) - .with_header(header::CONTENT_TYPE, HeaderValue::from_static("application/json")) - .with_body(body.clone()), Default::default() - ).map_err(move |e| { - warn!("Error sending HTTP notification to {} : {}, retrying", u, e); - }).map(|_| ())); - } - } + for u in &self.urls { + let u = u.clone(); + self.executor.spawn( + self.client + .fetch( + Request::new(u.clone(), Method::POST) + .with_header( + header::CONTENT_TYPE, + HeaderValue::from_static("application/json"), + ) + .with_body(body.clone()), + Default::default(), + ) + .map_err(move |e| { + warn!("Error sending HTTP notification to {} : {}, retrying", u, e); + }) + .map(|_| ()), + ); + } + } } diff --git a/miner/stratum/src/lib.rs b/miner/stratum/src/lib.rs index 05a7dfb6d..292c7dfa4 100644 --- a/miner/stratum/src/lib.rs +++ b/miner/stratum/src/lib.rs @@ -16,36 +16,40 @@ //! Stratum protocol implementation for parity ethereum/bitcoin clients -extern crate jsonrpc_tcp_server; -extern crate jsonrpc_core; extern crate ethereum_types; +extern crate jsonrpc_core; +extern crate jsonrpc_tcp_server; extern crate keccak_hash as hash; extern crate parking_lot; -#[macro_use] extern crate log; +#[macro_use] +extern crate log; -#[cfg(test)] extern crate tokio; -#[cfg(test)] extern crate tokio_io; -#[cfg(test)] extern crate env_logger; +#[cfg(test)] +extern crate env_logger; +#[cfg(test)] +extern crate tokio; +#[cfg(test)] +extern crate tokio_io; mod traits; -pub use traits::{ - JobDispatcher, PushWorkHandler, Error, ServiceConfiguration, -}; +pub use traits::{Error, JobDispatcher, PushWorkHandler, ServiceConfiguration}; +use jsonrpc_core::{to_value, Compatibility, IoDelegate, MetaIoHandler, Metadata, Params, Value}; use jsonrpc_tcp_server::{ - Server as JsonRpcServer, ServerBuilder as JsonRpcServerBuilder, - RequestContext, MetaExtractor, Dispatcher, PushMessageError, + Dispatcher, MetaExtractor, PushMessageError, RequestContext, Server as JsonRpcServer, + ServerBuilder as JsonRpcServerBuilder, }; -use jsonrpc_core::{MetaIoHandler, Params, to_value, Value, Metadata, Compatibility, IoDelegate}; use std::sync::Arc; -use std::net::SocketAddr; -use std::collections::{HashSet, HashMap}; -use hash::keccak; use ethereum_types::H256; +use hash::keccak; use parking_lot::RwLock; +use std::{ + collections::{HashMap, HashSet}, + net::SocketAddr, +}; type RpcResult = Result; @@ -53,428 +57,461 @@ const NOTIFY_COUNTER_INITIAL: u32 = 16; /// Container which owns rpc server and stratum implementation pub struct Stratum { - /// RPC server - /// - /// It is an `Option` so it can be easily closed and released during `drop` phase - rpc_server: Option, - /// stratum protocol implementation - /// - /// It is owned by a container and rpc server - implementation: Arc, - /// Message dispatcher (tcp/ip service) - /// - /// Used to push messages to peers - tcp_dispatcher: Dispatcher, + /// RPC server + /// + /// It is an `Option` so it can be easily closed and released during `drop` phase + rpc_server: Option, + /// stratum protocol implementation + /// + /// It is owned by a container and rpc server + implementation: Arc, + /// Message dispatcher (tcp/ip service) + /// + /// Used to push messages to peers + tcp_dispatcher: Dispatcher, } impl Stratum { - pub fn start( - addr: &SocketAddr, - dispatcher: Arc, - secret: Option, - ) -> Result, Error> { + pub fn start( + addr: &SocketAddr, + dispatcher: Arc, + secret: Option, + ) -> Result, Error> { + let implementation = Arc::new(StratumImpl { + subscribers: RwLock::default(), + job_queue: RwLock::default(), + dispatcher, + workers: Arc::new(RwLock::default()), + secret, + notify_counter: RwLock::new(NOTIFY_COUNTER_INITIAL), + }); - let implementation = Arc::new(StratumImpl { - subscribers: RwLock::default(), - job_queue: RwLock::default(), - dispatcher, - workers: Arc::new(RwLock::default()), - secret, - notify_counter: RwLock::new(NOTIFY_COUNTER_INITIAL), - }); + let mut delegate = IoDelegate::::new(implementation.clone()); + delegate.add_method_with_meta("mining.subscribe", StratumImpl::subscribe); + delegate.add_method_with_meta("mining.authorize", StratumImpl::authorize); + delegate.add_method_with_meta("mining.submit", StratumImpl::submit); + let mut handler = MetaIoHandler::::with_compatibility(Compatibility::Both); + handler.extend_with(delegate); - let mut delegate = IoDelegate::::new(implementation.clone()); - delegate.add_method_with_meta("mining.subscribe", StratumImpl::subscribe); - delegate.add_method_with_meta("mining.authorize", StratumImpl::authorize); - delegate.add_method_with_meta("mining.submit", StratumImpl::submit); - let mut handler = MetaIoHandler::::with_compatibility(Compatibility::Both); - handler.extend_with(delegate); + let server_builder = JsonRpcServerBuilder::new(handler); + let tcp_dispatcher = server_builder.dispatcher(); + let server_builder = + server_builder.session_meta_extractor(PeerMetaExtractor::new(tcp_dispatcher.clone())); + let server = server_builder.start(addr)?; - let server_builder = JsonRpcServerBuilder::new(handler); - let tcp_dispatcher = server_builder.dispatcher(); - let server_builder = server_builder.session_meta_extractor(PeerMetaExtractor::new(tcp_dispatcher.clone())); - let server = server_builder.start(addr)?; + let stratum = Arc::new(Stratum { + rpc_server: Some(server), + implementation, + tcp_dispatcher, + }); - let stratum = Arc::new(Stratum { - rpc_server: Some(server), - implementation, - tcp_dispatcher, - }); - - Ok(stratum) - } + Ok(stratum) + } } impl PushWorkHandler for Stratum { - fn push_work_all(&self, payload: String) { - self.implementation.push_work_all(payload, &self.tcp_dispatcher) - } + fn push_work_all(&self, payload: String) { + self.implementation + .push_work_all(payload, &self.tcp_dispatcher) + } } impl Drop for Stratum { - fn drop(&mut self) { - // shut down rpc server - self.rpc_server.take().map(|server| server.close()); - } + fn drop(&mut self) { + // shut down rpc server + self.rpc_server.take().map(|server| server.close()); + } } struct StratumImpl { - /// Subscribed clients - subscribers: RwLock>, - /// List of workers supposed to receive job update - job_queue: RwLock>, - /// Payload manager - dispatcher: Arc, - /// Authorized workers (socket - worker_id) - workers: Arc>>, - /// Secret if any - secret: Option, - /// Dispatch notify counter - notify_counter: RwLock, + /// Subscribed clients + subscribers: RwLock>, + /// List of workers supposed to receive job update + job_queue: RwLock>, + /// Payload manager + dispatcher: Arc, + /// Authorized workers (socket - worker_id) + workers: Arc>>, + /// Secret if any + secret: Option, + /// Dispatch notify counter + notify_counter: RwLock, } impl StratumImpl { - /// rpc method `mining.subscribe` - fn subscribe(&self, _params: Params, meta: SocketMetadata) -> RpcResult { - use std::str::FromStr; + /// rpc method `mining.subscribe` + fn subscribe(&self, _params: Params, meta: SocketMetadata) -> RpcResult { + use std::str::FromStr; - self.subscribers.write().push(meta.addr().clone()); - self.job_queue.write().insert(meta.addr().clone()); - trace!(target: "stratum", "Subscription request from {:?}", meta.addr()); + self.subscribers.write().push(meta.addr().clone()); + self.job_queue.write().insert(meta.addr().clone()); + trace!(target: "stratum", "Subscription request from {:?}", meta.addr()); - Ok(match self.dispatcher.initial() { - Some(initial) => match jsonrpc_core::Value::from_str(&initial) { - Ok(val) => Ok(val), - Err(e) => { - warn!(target: "stratum", "Invalid payload: '{}' ({:?})", &initial, e); - to_value(&[0u8; 0]) - }, - }, - None => to_value(&[0u8; 0]), - }.expect("Empty slices are serializable; qed")) - } + Ok(match self.dispatcher.initial() { + Some(initial) => match jsonrpc_core::Value::from_str(&initial) { + Ok(val) => Ok(val), + Err(e) => { + warn!(target: "stratum", "Invalid payload: '{}' ({:?})", &initial, e); + to_value(&[0u8; 0]) + } + }, + None => to_value(&[0u8; 0]), + } + .expect("Empty slices are serializable; qed")) + } - /// rpc method `mining.authorize` - fn authorize(&self, params: Params, meta: SocketMetadata) -> RpcResult { - params.parse::<(String, String)>().map(|(worker_id, secret)| { - if let Some(valid_secret) = self.secret { - let hash = keccak(secret); - if hash != valid_secret { - return to_value(&false); - } - } - trace!(target: "stratum", "New worker #{} registered", worker_id); - self.workers.write().insert(meta.addr().clone(), worker_id); - to_value(true) - }).map(|v| v.expect("Only true/false is returned and it's always serializable; qed")) - } + /// rpc method `mining.authorize` + fn authorize(&self, params: Params, meta: SocketMetadata) -> RpcResult { + params + .parse::<(String, String)>() + .map(|(worker_id, secret)| { + if let Some(valid_secret) = self.secret { + let hash = keccak(secret); + if hash != valid_secret { + return to_value(&false); + } + } + trace!(target: "stratum", "New worker #{} registered", worker_id); + self.workers.write().insert(meta.addr().clone(), worker_id); + to_value(true) + }) + .map(|v| v.expect("Only true/false is returned and it's always serializable; qed")) + } - /// rpc method `mining.submit` - fn submit(&self, params: Params, meta: SocketMetadata) -> RpcResult { - Ok(match params { - Params::Array(vals) => { - // first two elements are service messages (worker_id & job_id) - match self.dispatcher.submit(vals.iter().skip(2) - .filter_map(|val| match *val { - Value::String(ref s) => Some(s.to_owned()), - _ => None - }) - .collect::>()) { - Ok(()) => { - self.update_peers(&meta.tcp_dispatcher.expect("tcp_dispatcher is always initialized; qed")); - to_value(true) - }, - Err(submit_err) => { - warn!("Error while submitting share: {:?}", submit_err); - to_value(false) - } - } - }, - _ => { - trace!(target: "stratum", "Invalid submit work format {:?}", params); - to_value(false) - } - }.expect("Only true/false is returned and it's always serializable; qed")) - } + /// rpc method `mining.submit` + fn submit(&self, params: Params, meta: SocketMetadata) -> RpcResult { + Ok(match params { + Params::Array(vals) => { + // first two elements are service messages (worker_id & job_id) + match self.dispatcher.submit( + vals.iter() + .skip(2) + .filter_map(|val| match *val { + Value::String(ref s) => Some(s.to_owned()), + _ => None, + }) + .collect::>(), + ) { + Ok(()) => { + self.update_peers( + &meta + .tcp_dispatcher + .expect("tcp_dispatcher is always initialized; qed"), + ); + to_value(true) + } + Err(submit_err) => { + warn!("Error while submitting share: {:?}", submit_err); + to_value(false) + } + } + } + _ => { + trace!(target: "stratum", "Invalid submit work format {:?}", params); + to_value(false) + } + } + .expect("Only true/false is returned and it's always serializable; qed")) + } - /// Helper method - fn update_peers(&self, tcp_dispatcher: &Dispatcher) { - if let Some(job) = self.dispatcher.job() { - self.push_work_all(job, tcp_dispatcher) - } - } + /// Helper method + fn update_peers(&self, tcp_dispatcher: &Dispatcher) { + if let Some(job) = self.dispatcher.job() { + self.push_work_all(job, tcp_dispatcher) + } + } - fn push_work_all(&self, payload: String, tcp_dispatcher: &Dispatcher) { - let hup_peers = { - let workers = self.workers.read(); - let next_request_id = { - let mut counter = self.notify_counter.write(); - if *counter == ::std::u32::MAX { - *counter = NOTIFY_COUNTER_INITIAL; - } else { - *counter = *counter + 1 - } - *counter - }; + fn push_work_all(&self, payload: String, tcp_dispatcher: &Dispatcher) { + let hup_peers = { + let workers = self.workers.read(); + let next_request_id = { + let mut counter = self.notify_counter.write(); + if *counter == ::std::u32::MAX { + *counter = NOTIFY_COUNTER_INITIAL; + } else { + *counter = *counter + 1 + } + *counter + }; - let mut hup_peers = HashSet::new(); - let workers_msg = format!("{{ \"id\": {}, \"method\": \"mining.notify\", \"params\": {} }}", next_request_id, payload); - trace!(target: "stratum", "pushing work for {} workers (payload: '{}')", workers.len(), &workers_msg); - for (addr, _) in workers.iter() { - trace!(target: "stratum", "pusing work to {}", addr); - match tcp_dispatcher.push_message(addr, workers_msg.clone()) { - Err(PushMessageError::NoSuchPeer) => { - trace!(target: "stratum", "Worker no longer connected: {}", addr); - hup_peers.insert(addr.clone()); - }, - Err(e) => { - warn!(target: "stratum", "Unexpected transport error: {:?}", e); - }, - Ok(_) => {}, - } - } - hup_peers - }; + let mut hup_peers = HashSet::new(); + let workers_msg = format!( + "{{ \"id\": {}, \"method\": \"mining.notify\", \"params\": {} }}", + next_request_id, payload + ); + trace!(target: "stratum", "pushing work for {} workers (payload: '{}')", workers.len(), &workers_msg); + for (addr, _) in workers.iter() { + trace!(target: "stratum", "pusing work to {}", addr); + match tcp_dispatcher.push_message(addr, workers_msg.clone()) { + Err(PushMessageError::NoSuchPeer) => { + trace!(target: "stratum", "Worker no longer connected: {}", addr); + hup_peers.insert(addr.clone()); + } + Err(e) => { + warn!(target: "stratum", "Unexpected transport error: {:?}", e); + } + Ok(_) => {} + } + } + hup_peers + }; - if !hup_peers.is_empty() { - let mut workers = self.workers.write(); - for hup_peer in hup_peers { - workers.remove(&hup_peer); - } - } - } + if !hup_peers.is_empty() { + let mut workers = self.workers.write(); + for hup_peer in hup_peers { + workers.remove(&hup_peer); + } + } + } } #[derive(Clone)] pub struct SocketMetadata { - addr: SocketAddr, - // with the new version of jsonrpc-core, SocketMetadata - // won't have to implement default, so this field will not - // have to be an Option - tcp_dispatcher: Option, + addr: SocketAddr, + // with the new version of jsonrpc-core, SocketMetadata + // won't have to implement default, so this field will not + // have to be an Option + tcp_dispatcher: Option, } impl Default for SocketMetadata { - fn default() -> Self { - SocketMetadata { - addr: "0.0.0.0:0".parse().unwrap(), - tcp_dispatcher: None, - } - } + fn default() -> Self { + SocketMetadata { + addr: "0.0.0.0:0".parse().unwrap(), + tcp_dispatcher: None, + } + } } impl SocketMetadata { - pub fn addr(&self) -> &SocketAddr { - &self.addr - } + pub fn addr(&self) -> &SocketAddr { + &self.addr + } } -impl Metadata for SocketMetadata { } +impl Metadata for SocketMetadata {} pub struct PeerMetaExtractor { - tcp_dispatcher: Dispatcher, + tcp_dispatcher: Dispatcher, } impl PeerMetaExtractor { - fn new(tcp_dispatcher: Dispatcher) -> Self { - PeerMetaExtractor { - tcp_dispatcher, - } - } + fn new(tcp_dispatcher: Dispatcher) -> Self { + PeerMetaExtractor { tcp_dispatcher } + } } impl MetaExtractor for PeerMetaExtractor { - fn extract(&self, context: &RequestContext) -> SocketMetadata { - SocketMetadata { - addr: context.peer_addr, - tcp_dispatcher: Some(self.tcp_dispatcher.clone()), - } - } + fn extract(&self, context: &RequestContext) -> SocketMetadata { + SocketMetadata { + addr: context.peer_addr, + tcp_dispatcher: Some(self.tcp_dispatcher.clone()), + } + } } #[cfg(test)] mod tests { - use super::*; - use std::net::{SocketAddr, Shutdown}; - use std::sync::Arc; + use super::*; + use std::{ + net::{Shutdown, SocketAddr}, + sync::Arc, + }; - use tokio::{io, runtime::Runtime, timer::timeout::{self, Timeout}, net::TcpStream}; - use jsonrpc_core::futures::{Future, future}; + use jsonrpc_core::futures::{future, Future}; + use tokio::{ + io, + net::TcpStream, + runtime::Runtime, + timer::timeout::{self, Timeout}, + }; - pub struct VoidManager; + pub struct VoidManager; - impl JobDispatcher for VoidManager { - fn submit(&self, _payload: Vec) -> Result<(), Error> { - Ok(()) - } - } + impl JobDispatcher for VoidManager { + fn submit(&self, _payload: Vec) -> Result<(), Error> { + Ok(()) + } + } - fn dummy_request(addr: &SocketAddr, data: &str) -> Vec { - let mut runtime = Runtime::new().expect("Tokio Runtime should be created with no errors"); + fn dummy_request(addr: &SocketAddr, data: &str) -> Vec { + let mut runtime = Runtime::new().expect("Tokio Runtime should be created with no errors"); - let mut data_vec = data.as_bytes().to_vec(); - data_vec.extend(b"\n"); + let mut data_vec = data.as_bytes().to_vec(); + data_vec.extend(b"\n"); - let stream = TcpStream::connect(addr) - .and_then(move |stream| { - io::write_all(stream, data_vec) - }) - .and_then(|(stream, _)| { - stream.shutdown(Shutdown::Write).unwrap(); - io::read_to_end(stream, Vec::with_capacity(2048)) - }) - .and_then(|(_stream, read_buf)| { - future::ok(read_buf) - }); - let result = runtime.block_on(stream).expect("Runtime should run with no errors"); + let stream = TcpStream::connect(addr) + .and_then(move |stream| io::write_all(stream, data_vec)) + .and_then(|(stream, _)| { + stream.shutdown(Shutdown::Write).unwrap(); + io::read_to_end(stream, Vec::with_capacity(2048)) + }) + .and_then(|(_stream, read_buf)| future::ok(read_buf)); + let result = runtime + .block_on(stream) + .expect("Runtime should run with no errors"); - result - } + result + } - #[test] - fn can_be_started() { - let stratum = Stratum::start(&"127.0.0.1:19980".parse().unwrap(), Arc::new(VoidManager), None); - assert!(stratum.is_ok()); - } + #[test] + fn can_be_started() { + let stratum = Stratum::start( + &"127.0.0.1:19980".parse().unwrap(), + Arc::new(VoidManager), + None, + ); + assert!(stratum.is_ok()); + } - #[test] - fn records_subscriber() { - let _ = ::env_logger::try_init(); + #[test] + fn records_subscriber() { + let _ = ::env_logger::try_init(); - let addr = "127.0.0.1:19985".parse().unwrap(); - let stratum = Stratum::start(&addr, Arc::new(VoidManager), None).unwrap(); - let request = r#"{"jsonrpc": "2.0", "method": "mining.subscribe", "params": [], "id": 1}"#; - dummy_request(&addr, request); - assert_eq!(1, stratum.implementation.subscribers.read().len()); - } + let addr = "127.0.0.1:19985".parse().unwrap(); + let stratum = Stratum::start(&addr, Arc::new(VoidManager), None).unwrap(); + let request = r#"{"jsonrpc": "2.0", "method": "mining.subscribe", "params": [], "id": 1}"#; + dummy_request(&addr, request); + assert_eq!(1, stratum.implementation.subscribers.read().len()); + } - struct DummyManager { - initial_payload: String - } + struct DummyManager { + initial_payload: String, + } - impl DummyManager { - fn new() -> Arc { - Arc::new(Self::build()) - } + impl DummyManager { + fn new() -> Arc { + Arc::new(Self::build()) + } - fn build() -> DummyManager { - DummyManager { initial_payload: r#"[ "dummy payload" ]"#.to_owned() } - } + fn build() -> DummyManager { + DummyManager { + initial_payload: r#"[ "dummy payload" ]"#.to_owned(), + } + } - fn of_initial(mut self, new_initial: &str) -> DummyManager { - self.initial_payload = new_initial.to_owned(); - self - } - } + fn of_initial(mut self, new_initial: &str) -> DummyManager { + self.initial_payload = new_initial.to_owned(); + self + } + } - impl JobDispatcher for DummyManager { - fn initial(&self) -> Option { - Some(self.initial_payload.clone()) - } + impl JobDispatcher for DummyManager { + fn initial(&self) -> Option { + Some(self.initial_payload.clone()) + } - fn submit(&self, _payload: Vec) -> Result<(), Error> { - Ok(()) - } - } + fn submit(&self, _payload: Vec) -> Result<(), Error> { + Ok(()) + } + } - fn terminated_str(origin: &'static str) -> String { - let mut s = String::new(); - s.push_str(origin); - s.push_str("\n"); - s - } + fn terminated_str(origin: &'static str) -> String { + let mut s = String::new(); + s.push_str(origin); + s.push_str("\n"); + s + } - #[test] - fn receives_initial_payload() { - let addr = "127.0.0.1:19975".parse().unwrap(); - let _stratum = Stratum::start(&addr, DummyManager::new(), None).expect("There should be no error starting stratum"); - let request = r#"{"jsonrpc": "2.0", "method": "mining.subscribe", "params": [], "id": 2}"#; + #[test] + fn receives_initial_payload() { + let addr = "127.0.0.1:19975".parse().unwrap(); + let _stratum = Stratum::start(&addr, DummyManager::new(), None) + .expect("There should be no error starting stratum"); + let request = r#"{"jsonrpc": "2.0", "method": "mining.subscribe", "params": [], "id": 2}"#; - let response = String::from_utf8(dummy_request(&addr, request)).unwrap(); + let response = String::from_utf8(dummy_request(&addr, request)).unwrap(); - assert_eq!(terminated_str(r#"{"jsonrpc":"2.0","result":["dummy payload"],"id":2}"#), response); - } + assert_eq!( + terminated_str(r#"{"jsonrpc":"2.0","result":["dummy payload"],"id":2}"#), + response + ); + } - #[test] - fn can_authorize() { - let addr = "127.0.0.1:19970".parse().unwrap(); - let stratum = Stratum::start( - &addr, - Arc::new(DummyManager::build().of_initial(r#"["dummy autorize payload"]"#)), - None - ).expect("There should be no error starting stratum"); + #[test] + fn can_authorize() { + let addr = "127.0.0.1:19970".parse().unwrap(); + let stratum = Stratum::start( + &addr, + Arc::new(DummyManager::build().of_initial(r#"["dummy autorize payload"]"#)), + None, + ) + .expect("There should be no error starting stratum"); - let request = r#"{"jsonrpc": "2.0", "method": "mining.authorize", "params": ["miner1", ""], "id": 1}"#; - let response = String::from_utf8(dummy_request(&addr, request)).unwrap(); + let request = r#"{"jsonrpc": "2.0", "method": "mining.authorize", "params": ["miner1", ""], "id": 1}"#; + let response = String::from_utf8(dummy_request(&addr, request)).unwrap(); - assert_eq!(terminated_str(r#"{"jsonrpc":"2.0","result":true,"id":1}"#), response); - assert_eq!(1, stratum.implementation.workers.read().len()); - } + assert_eq!( + terminated_str(r#"{"jsonrpc":"2.0","result":true,"id":1}"#), + response + ); + assert_eq!(1, stratum.implementation.workers.read().len()); + } - #[test] - fn can_push_work() { - let _ = ::env_logger::try_init(); + #[test] + fn can_push_work() { + let _ = ::env_logger::try_init(); - let addr = "127.0.0.1:19995".parse().unwrap(); - let stratum = Stratum::start( - &addr, - Arc::new(DummyManager::build().of_initial(r#"["dummy autorize payload"]"#)), - None - ).expect("There should be no error starting stratum"); + let addr = "127.0.0.1:19995".parse().unwrap(); + let stratum = Stratum::start( + &addr, + Arc::new(DummyManager::build().of_initial(r#"["dummy autorize payload"]"#)), + None, + ) + .expect("There should be no error starting stratum"); - let mut auth_request = + let mut auth_request = r#"{"jsonrpc": "2.0", "method": "mining.authorize", "params": ["miner1", ""], "id": 1}"# .as_bytes() .to_vec(); - auth_request.extend(b"\n"); + auth_request.extend(b"\n"); - let auth_response = "{\"jsonrpc\":\"2.0\",\"result\":true,\"id\":1}\n"; + let auth_response = "{\"jsonrpc\":\"2.0\",\"result\":true,\"id\":1}\n"; - let mut runtime = Runtime::new().expect("Tokio Runtime should be created with no errors"); - let read_buf0 = vec![0u8; auth_response.len()]; - let read_buf1 = Vec::with_capacity(2048); - let stream = TcpStream::connect(&addr) - .and_then(move |stream| { - io::write_all(stream, auth_request) - }) - .and_then(|(stream, _)| { - io::read_exact(stream, read_buf0) - }) - .map_err(|err| panic!("{:?}", err)) - .and_then(move |(stream, read_buf0)| { - assert_eq!(String::from_utf8(read_buf0).unwrap(), auth_response); - trace!(target: "stratum", "Received authorization confirmation"); - Timeout::new(future::ok(stream), ::std::time::Duration::from_millis(100)) - }) - .map_err(|err: timeout::Error<()>| panic!("Timeout: {:?}", err)) - .and_then(move |stream| { - trace!(target: "stratum", "Pusing work to peers"); - stratum.push_work_all(r#"{ "00040008", "100500" }"#.to_owned()); - Timeout::new(future::ok(stream), ::std::time::Duration::from_millis(100)) - }) - .map_err(|err: timeout::Error<()>| panic!("Timeout: {:?}", err)) - .and_then(|stream| { - trace!(target: "stratum", "Ready to read work from server"); - stream.shutdown(Shutdown::Write).unwrap(); - io::read_to_end(stream, read_buf1) - }) - .and_then(|(_, read_buf1)| { - trace!(target: "stratum", "Received work from server"); - future::ok(read_buf1) - }); - let response = String::from_utf8( - runtime.block_on(stream).expect("Runtime should run with no errors") - ).expect("Response should be utf-8"); + let mut runtime = Runtime::new().expect("Tokio Runtime should be created with no errors"); + let read_buf0 = vec![0u8; auth_response.len()]; + let read_buf1 = Vec::with_capacity(2048); + let stream = TcpStream::connect(&addr) + .and_then(move |stream| io::write_all(stream, auth_request)) + .and_then(|(stream, _)| io::read_exact(stream, read_buf0)) + .map_err(|err| panic!("{:?}", err)) + .and_then(move |(stream, read_buf0)| { + assert_eq!(String::from_utf8(read_buf0).unwrap(), auth_response); + trace!(target: "stratum", "Received authorization confirmation"); + Timeout::new(future::ok(stream), ::std::time::Duration::from_millis(100)) + }) + .map_err(|err: timeout::Error<()>| panic!("Timeout: {:?}", err)) + .and_then(move |stream| { + trace!(target: "stratum", "Pusing work to peers"); + stratum.push_work_all(r#"{ "00040008", "100500" }"#.to_owned()); + Timeout::new(future::ok(stream), ::std::time::Duration::from_millis(100)) + }) + .map_err(|err: timeout::Error<()>| panic!("Timeout: {:?}", err)) + .and_then(|stream| { + trace!(target: "stratum", "Ready to read work from server"); + stream.shutdown(Shutdown::Write).unwrap(); + io::read_to_end(stream, read_buf1) + }) + .and_then(|(_, read_buf1)| { + trace!(target: "stratum", "Received work from server"); + future::ok(read_buf1) + }); + let response = String::from_utf8( + runtime + .block_on(stream) + .expect("Runtime should run with no errors"), + ) + .expect("Response should be utf-8"); - assert_eq!( + assert_eq!( "{ \"id\": 17, \"method\": \"mining.notify\", \"params\": { \"00040008\", \"100500\" } }\n", response); - } + } - #[test] - fn jsonprc_server_is_send_and_sync() { - fn is_send_and_sync() {} + #[test] + fn jsonprc_server_is_send_and_sync() { + fn is_send_and_sync() {} - is_send_and_sync::(); - } + is_send_and_sync::(); + } } diff --git a/miner/stratum/src/traits.rs b/miner/stratum/src/traits.rs index d71af1fee..c1b86fdd4 100644 --- a/miner/stratum/src/traits.rs +++ b/miner/stratum/src/traits.rs @@ -14,53 +14,58 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std; -use std::error::Error as StdError; use ethereum_types::H256; use jsonrpc_tcp_server::PushMessageError; +use std::{self, error::Error as StdError}; #[derive(Debug, Clone)] pub enum Error { - NoWork, - NoWorkers, - Io(String), - Tcp(String), - Dispatch(String), + NoWork, + NoWorkers, + Io(String), + Tcp(String), + Dispatch(String), } impl From for Error { - fn from(err: std::io::Error) -> Self { - Error::Io(err.description().to_owned()) - } + fn from(err: std::io::Error) -> Self { + Error::Io(err.description().to_owned()) + } } impl From for Error { - fn from(err: PushMessageError) -> Self { - Error::Tcp(format!("Push message error: {:?}", err)) - } + fn from(err: PushMessageError) -> Self { + Error::Tcp(format!("Push message error: {:?}", err)) + } } /// Interface that can provide pow/blockchain-specific responses for the clients pub trait JobDispatcher: Send + Sync { - // json for initial client handshake - fn initial(&self) -> Option { None } - // json for difficulty dispatch - fn difficulty(&self) -> Option { None } - // json for job update given worker_id (payload manager should split job!) - fn job(&self) -> Option { None } - // miner job result - fn submit(&self, payload: Vec) -> Result<(), Error>; + // json for initial client handshake + fn initial(&self) -> Option { + None + } + // json for difficulty dispatch + fn difficulty(&self) -> Option { + None + } + // json for job update given worker_id (payload manager should split job!) + fn job(&self) -> Option { + None + } + // miner job result + fn submit(&self, payload: Vec) -> Result<(), Error>; } /// Interface that can handle requests to push job for workers pub trait PushWorkHandler: Send + Sync { - /// push the same work package for all workers (`payload`: json of pow-specific set of work specification) - fn push_work_all(&self, payload: String); + /// push the same work package for all workers (`payload`: json of pow-specific set of work specification) + fn push_work_all(&self, payload: String); } pub struct ServiceConfiguration { - pub io_path: String, - pub listen_addr: String, - pub port: u16, - pub secret: Option, + pub io_path: String, + pub listen_addr: String, + pub port: u16, + pub secret: Option, } diff --git a/miner/using-queue/src/lib.rs b/miner/using-queue/src/lib.rs index 56e99879d..cc48b2ced 100644 --- a/miner/using-queue/src/lib.rs +++ b/miner/using-queue/src/lib.rs @@ -20,257 +20,282 @@ /// usage to avoid items that were queued but never used from making it into /// the queue. pub struct UsingQueue { - /// Not yet being sealed by a miner, but if one asks for work, we'd prefer they do this. - pending: Option, - /// Currently being sealed by miners. - in_use: Vec, - /// The maximum allowable number of items in_use. - max_size: usize, + /// Not yet being sealed by a miner, but if one asks for work, we'd prefer they do this. + pending: Option, + /// Currently being sealed by miners. + in_use: Vec, + /// The maximum allowable number of items in_use. + max_size: usize, } /// Take an item or just clone it? pub enum GetAction { - /// Remove the item, faster but you can't get it back. - Take, - /// Clone the item, slower but you can get it again. - Clone, + /// Remove the item, faster but you can't get it back. + Take, + /// Clone the item, slower but you can get it again. + Clone, } impl UsingQueue { - /// Create a new struct with a maximum size of `max_size`. - pub fn new(max_size: usize) -> UsingQueue { - UsingQueue { - pending: None, - in_use: vec![], - max_size: max_size, - } - } + /// Create a new struct with a maximum size of `max_size`. + pub fn new(max_size: usize) -> UsingQueue { + UsingQueue { + pending: None, + in_use: vec![], + max_size: max_size, + } + } - /// Return a reference to the item at the top of the queue (or `None` if the queue is empty); - /// it doesn't constitute noting that the item is used. - pub fn peek_last_ref(&self) -> Option<&T> { - self.pending.as_ref().or(self.in_use.last()) - } + /// Return a reference to the item at the top of the queue (or `None` if the queue is empty); + /// it doesn't constitute noting that the item is used. + pub fn peek_last_ref(&self) -> Option<&T> { + self.pending.as_ref().or(self.in_use.last()) + } - /// Return a reference to the item at the top of the queue (or `None` if the queue is empty); - /// this constitutes using the item and will remain in the queue for at least another - /// `max_size` invocations of `set_pending() + use_last_ref()`. - pub fn use_last_ref(&mut self) -> Option<&T> { - if let Some(x) = self.pending.take() { - self.in_use.push(x); - if self.in_use.len() > self.max_size { - self.in_use.remove(0); - } - } - self.in_use.last() - } + /// Return a reference to the item at the top of the queue (or `None` if the queue is empty); + /// this constitutes using the item and will remain in the queue for at least another + /// `max_size` invocations of `set_pending() + use_last_ref()`. + pub fn use_last_ref(&mut self) -> Option<&T> { + if let Some(x) = self.pending.take() { + self.in_use.push(x); + if self.in_use.len() > self.max_size { + self.in_use.remove(0); + } + } + self.in_use.last() + } - /// Place an item on the end of the queue. The previously pending item will be removed - /// if `use_last_ref()` since it was set. - pub fn set_pending(&mut self, b: T) { - self.pending = Some(b); - } + /// Place an item on the end of the queue. The previously pending item will be removed + /// if `use_last_ref()` since it was set. + pub fn set_pending(&mut self, b: T) { + self.pending = Some(b); + } - /// Is there anything in the queue currently? - pub fn is_in_use(&self) -> bool { self.in_use.len() > 0 } + /// Is there anything in the queue currently? + pub fn is_in_use(&self) -> bool { + self.in_use.len() > 0 + } - /// Clears everything; the queue is entirely reset. - pub fn reset(&mut self) { - self.pending = None; - self.in_use.clear(); - } + /// Clears everything; the queue is entirely reset. + pub fn reset(&mut self) { + self.pending = None; + self.in_use.clear(); + } - /// Returns `Some` item which is the first that `f` returns `true` with a reference to it - /// as a parameter or `None` if no such item exists in the queue. - fn take_used_if

(&mut self, predicate: P) -> Option where P: Fn(&T) -> bool { - self.in_use.iter().position(|r| predicate(r)).map(|i| self.in_use.remove(i)) - } + /// Returns `Some` item which is the first that `f` returns `true` with a reference to it + /// as a parameter or `None` if no such item exists in the queue. + fn take_used_if

(&mut self, predicate: P) -> Option + where + P: Fn(&T) -> bool, + { + self.in_use + .iter() + .position(|r| predicate(r)) + .map(|i| self.in_use.remove(i)) + } - /// Returns `Some` item which is the first that `f` returns `true` with a reference to it - /// as a parameter or `None` if no such item exists in the queue. - fn clone_used_if

(&mut self, predicate: P) -> Option where P: Fn(&T) -> bool, T: Clone { - self.in_use.iter().find(|r| predicate(r)).cloned() - } + /// Returns `Some` item which is the first that `f` returns `true` with a reference to it + /// as a parameter or `None` if no such item exists in the queue. + fn clone_used_if

(&mut self, predicate: P) -> Option + where + P: Fn(&T) -> bool, + T: Clone, + { + self.in_use.iter().find(|r| predicate(r)).cloned() + } - /// Fork-function for `take_used_if` and `clone_used_if`. - pub fn get_used_if

(&mut self, action: GetAction, predicate: P) -> Option where P: Fn(&T) -> bool, T: Clone { - match action { - GetAction::Take => self.take_used_if(predicate), - GetAction::Clone => self.clone_used_if(predicate), - } - } + /// Fork-function for `take_used_if` and `clone_used_if`. + pub fn get_used_if

(&mut self, action: GetAction, predicate: P) -> Option + where + P: Fn(&T) -> bool, + T: Clone, + { + match action { + GetAction::Take => self.take_used_if(predicate), + GetAction::Clone => self.clone_used_if(predicate), + } + } - /// Returns a clone of the pending block if `f` returns `true` with a reference to it as - /// a parameter, otherwise `None`. - /// - /// If pending block is not available will clone the first of the used blocks that match the predicate. - pub fn get_pending_if

(&mut self, predicate: P) -> Option where P: Fn(&T) -> bool, T: Clone { - // a bit clumsy - TODO: think about a nicer way of expressing this. - if let Some(ref x) = self.pending { - if predicate(x) { - Some(x.clone()) - } else { - None - } - } else { - self.in_use.last().into_iter().filter(|x| predicate(x)).next().cloned() - } - } + /// Returns a clone of the pending block if `f` returns `true` with a reference to it as + /// a parameter, otherwise `None`. + /// + /// If pending block is not available will clone the first of the used blocks that match the predicate. + pub fn get_pending_if

(&mut self, predicate: P) -> Option + where + P: Fn(&T) -> bool, + T: Clone, + { + // a bit clumsy - TODO: think about a nicer way of expressing this. + if let Some(ref x) = self.pending { + if predicate(x) { + Some(x.clone()) + } else { + None + } + } else { + self.in_use + .last() + .into_iter() + .filter(|x| predicate(x)) + .next() + .cloned() + } + } } #[test] fn should_not_find_when_pushed() { - let mut q = UsingQueue::new(2); - q.set_pending(1); - assert!(q.take_used_if(|i| i == &1).is_none()); + let mut q = UsingQueue::new(2); + q.set_pending(1); + assert!(q.take_used_if(|i| i == &1).is_none()); } #[test] fn should_not_find_when_pushed_with_clone() { - let mut q = UsingQueue::new(2); - q.set_pending(1); - assert!(q.clone_used_if(|i| i == &1).is_none()); + let mut q = UsingQueue::new(2); + q.set_pending(1); + assert!(q.clone_used_if(|i| i == &1).is_none()); } #[test] fn should_find_when_pushed_and_used() { - let mut q = UsingQueue::new(2); - q.set_pending(1); - q.use_last_ref(); - assert!(q.take_used_if(|i| i == &1).unwrap() == 1); + let mut q = UsingQueue::new(2); + q.set_pending(1); + q.use_last_ref(); + assert!(q.take_used_if(|i| i == &1).unwrap() == 1); } #[test] fn should_have_same_semantics_for_get_take_clone() { - let mut q = UsingQueue::new(2); - q.set_pending(1); - assert!(q.get_used_if(GetAction::Clone, |i| i == &1).is_none()); - assert!(q.get_used_if(GetAction::Take, |i| i == &1).is_none()); - q.use_last_ref(); - assert!(q.get_used_if(GetAction::Clone, |i| i == &1).unwrap() == 1); - assert!(q.get_used_if(GetAction::Clone, |i| i == &1).unwrap() == 1); - assert!(q.get_used_if(GetAction::Take, |i| i == &1).unwrap() == 1); - assert!(q.get_used_if(GetAction::Clone, |i| i == &1).is_none()); - assert!(q.get_used_if(GetAction::Take, |i| i == &1).is_none()); + let mut q = UsingQueue::new(2); + q.set_pending(1); + assert!(q.get_used_if(GetAction::Clone, |i| i == &1).is_none()); + assert!(q.get_used_if(GetAction::Take, |i| i == &1).is_none()); + q.use_last_ref(); + assert!(q.get_used_if(GetAction::Clone, |i| i == &1).unwrap() == 1); + assert!(q.get_used_if(GetAction::Clone, |i| i == &1).unwrap() == 1); + assert!(q.get_used_if(GetAction::Take, |i| i == &1).unwrap() == 1); + assert!(q.get_used_if(GetAction::Clone, |i| i == &1).is_none()); + assert!(q.get_used_if(GetAction::Take, |i| i == &1).is_none()); } #[test] fn should_find_when_pushed_and_used_with_clone() { - let mut q = UsingQueue::new(2); - q.set_pending(1); - q.use_last_ref(); - assert!(q.clone_used_if(|i| i == &1).unwrap() == 1); + let mut q = UsingQueue::new(2); + q.set_pending(1); + q.use_last_ref(); + assert!(q.clone_used_if(|i| i == &1).unwrap() == 1); } #[test] fn should_not_find_again_when_pushed_and_taken() { - let mut q = UsingQueue::new(2); - q.set_pending(1); - q.use_last_ref(); - assert!(q.take_used_if(|i| i == &1).unwrap() == 1); - assert!(q.clone_used_if(|i| i == &1).is_none()); + let mut q = UsingQueue::new(2); + q.set_pending(1); + q.use_last_ref(); + assert!(q.take_used_if(|i| i == &1).unwrap() == 1); + assert!(q.clone_used_if(|i| i == &1).is_none()); } #[test] fn should_find_again_when_pushed_and_cloned() { - let mut q = UsingQueue::new(2); - q.set_pending(1); - q.use_last_ref(); - assert!(q.clone_used_if(|i| i == &1).unwrap() == 1); - assert!(q.clone_used_if(|i| i == &1).unwrap() == 1); - assert!(q.take_used_if(|i| i == &1).unwrap() == 1); + let mut q = UsingQueue::new(2); + q.set_pending(1); + q.use_last_ref(); + assert!(q.clone_used_if(|i| i == &1).unwrap() == 1); + assert!(q.clone_used_if(|i| i == &1).unwrap() == 1); + assert!(q.take_used_if(|i| i == &1).unwrap() == 1); } #[test] fn should_find_when_others_used() { - let mut q = UsingQueue::new(2); - q.set_pending(1); - q.use_last_ref(); - q.set_pending(2); - q.use_last_ref(); - assert!(q.take_used_if(|i| i == &1).is_some()); + let mut q = UsingQueue::new(2); + q.set_pending(1); + q.use_last_ref(); + q.set_pending(2); + q.use_last_ref(); + assert!(q.take_used_if(|i| i == &1).is_some()); } #[test] fn should_not_find_when_too_many_used() { - let mut q = UsingQueue::new(1); - q.set_pending(1); - q.use_last_ref(); - q.set_pending(2); - q.use_last_ref(); - assert!(q.take_used_if(|i| i == &1).is_none()); + let mut q = UsingQueue::new(1); + q.set_pending(1); + q.use_last_ref(); + q.set_pending(2); + q.use_last_ref(); + assert!(q.take_used_if(|i| i == &1).is_none()); } #[test] fn should_not_find_when_not_used_and_then_pushed() { - let mut q = UsingQueue::new(3); - q.set_pending(1); - q.set_pending(2); - q.use_last_ref(); - assert!(q.take_used_if(|i| i == &1).is_none()); + let mut q = UsingQueue::new(3); + q.set_pending(1); + q.set_pending(2); + q.use_last_ref(); + assert!(q.take_used_if(|i| i == &1).is_none()); } #[test] fn should_peek_correctly_after_push() { - let mut q = UsingQueue::new(3); - q.set_pending(1); - assert_eq!(q.peek_last_ref(), Some(&1)); - q.set_pending(2); - assert_eq!(q.peek_last_ref(), Some(&2)); + let mut q = UsingQueue::new(3); + q.set_pending(1); + assert_eq!(q.peek_last_ref(), Some(&1)); + q.set_pending(2); + assert_eq!(q.peek_last_ref(), Some(&2)); } #[test] fn should_inspect_correctly() { - let mut q = UsingQueue::new(3); - q.set_pending(1); - assert_eq!(q.use_last_ref(), Some(&1)); - assert_eq!(q.peek_last_ref(), Some(&1)); - q.set_pending(2); - assert_eq!(q.use_last_ref(), Some(&2)); - assert_eq!(q.peek_last_ref(), Some(&2)); + let mut q = UsingQueue::new(3); + q.set_pending(1); + assert_eq!(q.use_last_ref(), Some(&1)); + assert_eq!(q.peek_last_ref(), Some(&1)); + q.set_pending(2); + assert_eq!(q.use_last_ref(), Some(&2)); + assert_eq!(q.peek_last_ref(), Some(&2)); } #[test] fn should_not_find_when_not_used_peeked_and_then_pushed() { - let mut q = UsingQueue::new(3); - q.set_pending(1); - q.peek_last_ref(); - q.set_pending(2); - q.use_last_ref(); - assert!(q.take_used_if(|i| i == &1).is_none()); + let mut q = UsingQueue::new(3); + q.set_pending(1); + q.peek_last_ref(); + q.set_pending(2); + q.use_last_ref(); + assert!(q.take_used_if(|i| i == &1).is_none()); } #[test] fn should_pop_used() { - let mut q = UsingQueue::new(3); - q.set_pending(1); - q.use_last_ref(); - let popped = q.get_pending_if(|i| i == &1); - assert_eq!(popped, Some(1)); + let mut q = UsingQueue::new(3); + q.set_pending(1); + q.use_last_ref(); + let popped = q.get_pending_if(|i| i == &1); + assert_eq!(popped, Some(1)); } #[test] fn should_not_pop_last_pending() { - let mut q = UsingQueue::new(3); - q.set_pending(1); - assert_eq!(q.get_pending_if(|i| i == &1), Some(1)); - assert_eq!(q.get_pending_if(|i| i == &1), Some(1)); + let mut q = UsingQueue::new(3); + q.set_pending(1); + assert_eq!(q.get_pending_if(|i| i == &1), Some(1)); + assert_eq!(q.get_pending_if(|i| i == &1), Some(1)); } #[test] fn should_not_pop_unused_before_used() { - let mut q = UsingQueue::new(3); - q.set_pending(1); - q.set_pending(2); - let popped = q.get_pending_if(|i| i == &1); - assert_eq!(popped, None); + let mut q = UsingQueue::new(3); + q.set_pending(1); + q.set_pending(2); + let popped = q.get_pending_if(|i| i == &1); + assert_eq!(popped, None); } #[test] fn should_not_remove_used_popped() { - let mut q = UsingQueue::new(3); - q.set_pending(1); - q.use_last_ref(); - assert_eq!(q.get_pending_if(|i| i == &1), Some(1)); - assert_eq!(q.get_pending_if(|i| i == &1), Some(1)); + let mut q = UsingQueue::new(3); + q.set_pending(1); + q.use_last_ref(); + assert_eq!(q.get_pending_if(|i| i == &1), Some(1)); + assert_eq!(q.get_pending_if(|i| i == &1), Some(1)); } diff --git a/parity-clib/src/java.rs b/parity-clib/src/java.rs index 98969b1d1..1c8751341 100644 --- a/parity-clib/src/java.rs +++ b/parity-clib/src/java.rs @@ -14,173 +14,217 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::{mem, ptr}; -use std::ffi::c_void; -use std::sync::Arc; +use std::{ffi::c_void, mem, ptr, sync::Arc}; -use {Callback, parity_config_from_cli, parity_destroy, parity_rpc_worker, parity_start, parity_set_logger, - parity_unsubscribe_ws, parity_ws_worker, ParityParams}; +use parity_config_from_cli; +use parity_destroy; +use parity_rpc_worker; +use parity_set_logger; +use parity_start; +use parity_unsubscribe_ws; +use parity_ws_worker; +use Callback; +use ParityParams; -use jni::{JavaVM, JNIEnv}; -use jni::objects::{JClass, JString, JObject, JValue, GlobalRef}; -use jni::sys::{jlong, jobjectArray, va_list}; +use jni::{ + objects::{GlobalRef, JClass, JObject, JString, JValue}, + sys::{jlong, jobjectArray, va_list}, + JNIEnv, JavaVM, +}; use parity_ethereum::RunningClient; type CheckedQuery<'a> = (&'a RunningClient, String, JavaVM, GlobalRef); // Creates a Java callback to a static method named `void callback(Object)` struct JavaCallback<'a> { - jvm: JavaVM, - callback: GlobalRef, - method_name: &'a str, - method_descriptor: &'a str, + jvm: JavaVM, + callback: GlobalRef, + method_name: &'a str, + method_descriptor: &'a str, } impl<'a> JavaCallback<'a> { - fn new(jvm: JavaVM, callback: GlobalRef) -> Self { - Self { - jvm, - callback, - method_name: "callback", - method_descriptor: "(Ljava/lang/Object;)V", - } - } + fn new(jvm: JavaVM, callback: GlobalRef) -> Self { + Self { + jvm, + callback, + method_name: "callback", + method_descriptor: "(Ljava/lang/Object;)V", + } + } } impl<'a> Callback for JavaCallback<'a> { - fn call(&self, msg: &str) { - let env = self.jvm.attach_current_thread().expect("JavaVM should have an environment; qed"); - let java_str = env.new_string(msg.to_string()).expect("Rust String is valid JString; qed"); - let val = &[JValue::Object(JObject::from(java_str))]; - env.call_method(self.callback.as_obj(), self.method_name, self.method_descriptor, val).expect( - "The callback must be an instance method and be named \"void callback(Object)\"; qed)"); - } + fn call(&self, msg: &str) { + let env = self + .jvm + .attach_current_thread() + .expect("JavaVM should have an environment; qed"); + let java_str = env + .new_string(msg.to_string()) + .expect("Rust String is valid JString; qed"); + let val = &[JValue::Object(JObject::from(java_str))]; + env.call_method( + self.callback.as_obj(), + self.method_name, + self.method_descriptor, + val, + ) + .expect( + "The callback must be an instance method and be named \"void callback(Object)\"; qed)", + ); + } } #[no_mangle] -pub unsafe extern "system" fn Java_io_parity_ethereum_Parity_configFromCli(env: JNIEnv, _: JClass, cli: jobjectArray) -> jlong { - let cli_len = env.get_array_length(cli).expect("invalid Java bindings") as usize; +pub unsafe extern "system" fn Java_io_parity_ethereum_Parity_configFromCli( + env: JNIEnv, + _: JClass, + cli: jobjectArray, +) -> jlong { + let cli_len = env.get_array_length(cli).expect("invalid Java bindings") as usize; - let mut jni_strings = Vec::with_capacity(cli_len); - let mut opts = Vec::with_capacity(cli_len); - let mut opts_lens = Vec::with_capacity(cli_len); + let mut jni_strings = Vec::with_capacity(cli_len); + let mut opts = Vec::with_capacity(cli_len); + let mut opts_lens = Vec::with_capacity(cli_len); - for n in 0..cli_len as i32 { - let elem = env.get_object_array_element(cli, n).expect("invalid Java bindings"); - let elem_str: JString = elem.into(); - match env.get_string(elem_str) { - Ok(s) => { - opts.push(s.as_ptr()); - opts_lens.push(s.to_bytes().len()); - jni_strings.push(s); - } - Err(err) => { - let _ = env.throw_new("java/lang/Exception", err.to_string()); - return 0 - } - }; - } + for n in 0..cli_len as i32 { + let elem = env + .get_object_array_element(cli, n) + .expect("invalid Java bindings"); + let elem_str: JString = elem.into(); + match env.get_string(elem_str) { + Ok(s) => { + opts.push(s.as_ptr()); + opts_lens.push(s.to_bytes().len()); + jni_strings.push(s); + } + Err(err) => { + let _ = env.throw_new("java/lang/Exception", err.to_string()); + return 0; + } + }; + } - let mut out = ptr::null_mut(); - match parity_config_from_cli(opts.as_ptr(), opts_lens.as_ptr(), cli_len, &mut out) { - 0 => out as jlong, - _ => { - let _ = env.throw_new("java/lang/Exception", "failed to create config object"); - 0 - }, - } + let mut out = ptr::null_mut(); + match parity_config_from_cli(opts.as_ptr(), opts_lens.as_ptr(), cli_len, &mut out) { + 0 => out as jlong, + _ => { + let _ = env.throw_new("java/lang/Exception", "failed to create config object"); + 0 + } + } } #[no_mangle] pub unsafe extern "system" fn Java_io_parity_ethereum_Parity_build( - env: JNIEnv, - _: JClass, - config: va_list, - logger_mode: JString, - logger_file: JString + env: JNIEnv, + _: JClass, + config: va_list, + logger_mode: JString, + logger_file: JString, ) -> jlong { - let mut params = ParityParams { - configuration: config, - .. mem::zeroed() - }; + let mut params = ParityParams { + configuration: config, + ..mem::zeroed() + }; - let logger_mode: String = env.get_string(logger_mode).expect("valid JString; qed").into(); - let logger_file: String = env.get_string(logger_file).expect("valid JString; qed").into(); + let logger_mode: String = env + .get_string(logger_mode) + .expect("valid JString; qed") + .into(); + let logger_file: String = env + .get_string(logger_file) + .expect("valid JString; qed") + .into(); - parity_set_logger(logger_mode.as_ptr(), logger_mode.as_bytes().len(), logger_file.as_ptr(), - logger_file.as_bytes().len(), &mut params.logger); + parity_set_logger( + logger_mode.as_ptr(), + logger_mode.as_bytes().len(), + logger_file.as_ptr(), + logger_file.as_bytes().len(), + &mut params.logger, + ); - let mut out = ptr::null_mut(); - match parity_start(¶ms, &mut out) { - 0 => out as jlong, - _ => { - let _ = env.throw_new("java/lang/Exception", "failed to start Parity"); - 0 - } - } + let mut out = ptr::null_mut(); + match parity_start(¶ms, &mut out) { + 0 => out as jlong, + _ => { + let _ = env.throw_new("java/lang/Exception", "failed to start Parity"); + 0 + } + } } #[no_mangle] -pub unsafe extern "system" fn Java_io_parity_ethereum_Parity_destroy(_env: JNIEnv, _: JClass, parity: va_list) { - parity_destroy(parity); +pub unsafe extern "system" fn Java_io_parity_ethereum_Parity_destroy( + _env: JNIEnv, + _: JClass, + parity: va_list, +) { + parity_destroy(parity); } -unsafe fn java_query_checker<'a>(client: va_list, rpc: JString, callback: JObject, env: &JNIEnv<'a>) --> Result, String> { - let query: String = env.get_string(rpc) - .map(Into::into) - .map_err(|e| e.to_string())?; +unsafe fn java_query_checker<'a>( + client: va_list, + rpc: JString, + callback: JObject, + env: &JNIEnv<'a>, +) -> Result, String> { + let query: String = env + .get_string(rpc) + .map(Into::into) + .map_err(|e| e.to_string())?; - let client: &RunningClient = &*(client as *const RunningClient); - let jvm = env.get_java_vm().map_err(|e| e.to_string())?; - let global_ref = env.new_global_ref(callback).map_err(|e| e.to_string())?; - Ok((client, query, jvm, global_ref)) + let client: &RunningClient = &*(client as *const RunningClient); + let jvm = env.get_java_vm().map_err(|e| e.to_string())?; + let global_ref = env.new_global_ref(callback).map_err(|e| e.to_string())?; + Ok((client, query, jvm, global_ref)) } #[no_mangle] pub unsafe extern "system" fn Java_io_parity_ethereum_Parity_rpcQueryNative( - env: JNIEnv, - _: JClass, - parity: va_list, - rpc: JString, - timeout_ms: jlong, - callback: JObject, - ) -{ - let _ = java_query_checker(parity, rpc, callback, &env) - .map(|(client, query, jvm, global_ref)| { - let callback = Arc::new(JavaCallback::new(jvm, global_ref)); - parity_rpc_worker(client, &query, callback, timeout_ms as u64); - }) - .map_err(|e| { - let _ = env.throw_new("java/lang/Exception", e); - }); + env: JNIEnv, + _: JClass, + parity: va_list, + rpc: JString, + timeout_ms: jlong, + callback: JObject, +) { + let _ = java_query_checker(parity, rpc, callback, &env) + .map(|(client, query, jvm, global_ref)| { + let callback = Arc::new(JavaCallback::new(jvm, global_ref)); + parity_rpc_worker(client, &query, callback, timeout_ms as u64); + }) + .map_err(|e| { + let _ = env.throw_new("java/lang/Exception", e); + }); } #[no_mangle] pub unsafe extern "system" fn Java_io_parity_ethereum_Parity_subscribeWebSocketNative( - env: JNIEnv, - _: JClass, - parity: va_list, - rpc: JString, - callback: JObject, - ) -> va_list { - - java_query_checker(parity, rpc, callback, &env) - .map(move |(client, query, jvm, global_ref)| { - let callback = Arc::new(JavaCallback::new(jvm, global_ref)); - parity_ws_worker(client, &query, callback) as va_list - }) - .unwrap_or_else(|e| { - let _ = env.throw_new("java/lang/Exception", e); - ptr::null_mut() - }) + env: JNIEnv, + _: JClass, + parity: va_list, + rpc: JString, + callback: JObject, +) -> va_list { + java_query_checker(parity, rpc, callback, &env) + .map(move |(client, query, jvm, global_ref)| { + let callback = Arc::new(JavaCallback::new(jvm, global_ref)); + parity_ws_worker(client, &query, callback) as va_list + }) + .unwrap_or_else(|e| { + let _ = env.throw_new("java/lang/Exception", e); + ptr::null_mut() + }) } #[no_mangle] pub unsafe extern "system" fn Java_io_parity_ethereum_Parity_unsubscribeWebSocketNative( - _: JNIEnv, - _: JClass, - session: va_list) { - parity_unsubscribe_ws(session as *const c_void); + _: JNIEnv, + _: JClass, + session: va_list, +) { + parity_unsubscribe_ws(session as *const c_void); } diff --git a/parity-clib/src/lib.rs b/parity-clib/src/lib.rs index bbb60ec2d..cf014e445 100644 --- a/parity-clib/src/lib.rs +++ b/parity-clib/src/lib.rs @@ -29,14 +29,16 @@ extern crate jni; #[cfg(feature = "jni")] mod java; -use std::ffi::CString; -use std::os::raw::{c_char, c_void, c_int}; -use std::{panic, ptr, slice, str, thread}; -use std::sync::Arc; -use std::time::Duration; +use std::{ + ffi::CString, + os::raw::{c_char, c_int, c_void}, + panic, ptr, slice, str, + sync::Arc, + thread, + time::Duration, +}; -use futures::{Future, Stream}; -use futures::sync::mpsc; +use futures::{sync::mpsc, Future, Stream}; use parity_ethereum::{PubSubSession, RunningClient}; use tokio_current_thread::CurrentThread; @@ -44,261 +46,293 @@ type CCallback = Option; type CheckedQuery<'a> = (&'a RunningClient, &'static str); pub mod error { - pub const EMPTY: &str = r#"{"jsonrpc":"2.0","result":"null","id":1}"#; - pub const TIMEOUT: &str = r#"{"jsonrpc":"2.0","result":"timeout","id":1}"#; - pub const SUBSCRIBE: &str = r#"{"jsonrpc":"2.0","result":"subcribe_fail","id":1}"#; + pub const EMPTY: &str = r#"{"jsonrpc":"2.0","result":"null","id":1}"#; + pub const TIMEOUT: &str = r#"{"jsonrpc":"2.0","result":"timeout","id":1}"#; + pub const SUBSCRIBE: &str = r#"{"jsonrpc":"2.0","result":"subcribe_fail","id":1}"#; } #[repr(C)] pub struct ParityParams { - pub configuration: *mut c_void, - pub on_client_restart_cb: CCallback, - pub on_client_restart_cb_custom: *mut c_void, - pub logger: *mut c_void + pub configuration: *mut c_void, + pub on_client_restart_cb: CCallback, + pub on_client_restart_cb_custom: *mut c_void, + pub logger: *mut c_void, } /// Trait representing a callback that passes a string pub(crate) trait Callback: Send + Sync { - fn call(&self, msg: &str); + fn call(&self, msg: &str); } // Internal structure for handling callbacks that get passed a string. struct CallbackStr { - user_data: *mut c_void, - function: CCallback, + user_data: *mut c_void, + function: CCallback, } unsafe impl Send for CallbackStr {} unsafe impl Sync for CallbackStr {} impl Callback for CallbackStr { - fn call(&self, msg: &str) { - if let Some(ref cb) = self.function { - let cstr = CString::new(msg).expect("valid string with no nul bytes in the middle; qed").into_raw(); - cb(self.user_data, cstr, msg.len()) - } - } + fn call(&self, msg: &str) { + if let Some(ref cb) = self.function { + let cstr = CString::new(msg) + .expect("valid string with no nul bytes in the middle; qed") + .into_raw(); + cb(self.user_data, cstr, msg.len()) + } + } } #[no_mangle] -pub unsafe extern fn parity_config_from_cli( - args: *const *const c_char, - args_lens: *const usize, - len: usize, - output: *mut *mut c_void +pub unsafe extern "C" fn parity_config_from_cli( + args: *const *const c_char, + args_lens: *const usize, + len: usize, + output: *mut *mut c_void, ) -> c_int { - panic::catch_unwind(|| { - *output = ptr::null_mut(); + panic::catch_unwind(|| { + *output = ptr::null_mut(); - let args = { - let arg_ptrs = slice::from_raw_parts(args, len); - let arg_lens = slice::from_raw_parts(args_lens, len); + let args = { + let arg_ptrs = slice::from_raw_parts(args, len); + let arg_lens = slice::from_raw_parts(args_lens, len); - let mut args = Vec::with_capacity(len + 1); - args.push("parity".to_owned()); + let mut args = Vec::with_capacity(len + 1); + args.push("parity".to_owned()); - for (&arg, &len) in arg_ptrs.iter().zip(arg_lens.iter()) { - let string = slice::from_raw_parts(arg as *const u8, len); - match String::from_utf8(string.to_owned()) { - Ok(a) => args.push(a), - Err(_) => return 1, - }; - } - args - }; + for (&arg, &len) in arg_ptrs.iter().zip(arg_lens.iter()) { + let string = slice::from_raw_parts(arg as *const u8, len); + match String::from_utf8(string.to_owned()) { + Ok(a) => args.push(a), + Err(_) => return 1, + }; + } + args + }; - match parity_ethereum::Configuration::parse_cli(&args) { - Ok(mut cfg) => { - // Always disable the auto-updater when used as a library. - cfg.args.arg_auto_update = "none".to_owned(); + match parity_ethereum::Configuration::parse_cli(&args) { + Ok(mut cfg) => { + // Always disable the auto-updater when used as a library. + cfg.args.arg_auto_update = "none".to_owned(); - let cfg = Box::into_raw(Box::new(cfg)); - *output = cfg as *mut _; - 0 - }, - Err(_) => { - 1 - }, - } - }).unwrap_or(1) + let cfg = Box::into_raw(Box::new(cfg)); + *output = cfg as *mut _; + 0 + } + Err(_) => 1, + } + }) + .unwrap_or(1) } #[no_mangle] -pub unsafe extern fn parity_config_destroy(cfg: *mut c_void) { - let _ = panic::catch_unwind(|| { - let _cfg = Box::from_raw(cfg as *mut parity_ethereum::Configuration); - }); +pub unsafe extern "C" fn parity_config_destroy(cfg: *mut c_void) { + let _ = panic::catch_unwind(|| { + let _cfg = Box::from_raw(cfg as *mut parity_ethereum::Configuration); + }); } #[no_mangle] -pub unsafe extern fn parity_start(cfg: *const ParityParams, output: *mut *mut c_void) -> c_int { - panic::catch_unwind(|| { - *output = ptr::null_mut(); - let cfg: &ParityParams = &*cfg; - let logger = Arc::from_raw(cfg.logger as *mut parity_ethereum::RotatingLogger); - let config = Box::from_raw(cfg.configuration as *mut parity_ethereum::Configuration); +pub unsafe extern "C" fn parity_start(cfg: *const ParityParams, output: *mut *mut c_void) -> c_int { + panic::catch_unwind(|| { + *output = ptr::null_mut(); + let cfg: &ParityParams = &*cfg; + let logger = Arc::from_raw(cfg.logger as *mut parity_ethereum::RotatingLogger); + let config = Box::from_raw(cfg.configuration as *mut parity_ethereum::Configuration); - let on_client_restart_cb = { - let cb = CallbackStr { - user_data: cfg.on_client_restart_cb_custom, - function: cfg.on_client_restart_cb, - }; - move |new_chain: String| { cb.call(&new_chain); } - }; + let on_client_restart_cb = { + let cb = CallbackStr { + user_data: cfg.on_client_restart_cb_custom, + function: cfg.on_client_restart_cb, + }; + move |new_chain: String| { + cb.call(&new_chain); + } + }; - let action = match parity_ethereum::start(*config, logger, on_client_restart_cb, || {}) { - Ok(action) => action, - Err(_) => return 1, - }; + let action = match parity_ethereum::start(*config, logger, on_client_restart_cb, || {}) { + Ok(action) => action, + Err(_) => return 1, + }; - match action { - parity_ethereum::ExecutionAction::Instant(Some(s)) => { println!("{}", s); 0 }, - parity_ethereum::ExecutionAction::Instant(None) => 0, - parity_ethereum::ExecutionAction::Running(client) => { - *output = Box::into_raw(Box::new(client)) as *mut c_void; - 0 - } - } - }).unwrap_or(1) + match action { + parity_ethereum::ExecutionAction::Instant(Some(s)) => { + println!("{}", s); + 0 + } + parity_ethereum::ExecutionAction::Instant(None) => 0, + parity_ethereum::ExecutionAction::Running(client) => { + *output = Box::into_raw(Box::new(client)) as *mut c_void; + 0 + } + } + }) + .unwrap_or(1) } #[no_mangle] -pub unsafe extern fn parity_destroy(client: *mut c_void) { - let _ = panic::catch_unwind(|| { - let client = Box::from_raw(client as *mut RunningClient); - client.shutdown(); - }); +pub unsafe extern "C" fn parity_destroy(client: *mut c_void) { + let _ = panic::catch_unwind(|| { + let client = Box::from_raw(client as *mut RunningClient); + client.shutdown(); + }); } #[no_mangle] -pub unsafe extern fn parity_rpc( - client: *const c_void, - query: *const c_char, - len: usize, - timeout_ms: usize, - callback: CCallback, - user_data: *mut c_void, +pub unsafe extern "C" fn parity_rpc( + client: *const c_void, + query: *const c_char, + len: usize, + timeout_ms: usize, + callback: CCallback, + user_data: *mut c_void, ) -> c_int { - panic::catch_unwind(|| { - if let Some((client, query)) = parity_rpc_query_checker(client, query, len) { - let callback = Arc::new(CallbackStr {user_data, function: callback} ); - parity_rpc_worker(client, query, callback, timeout_ms as u64); - 0 - } else { - 1 - } - }).unwrap_or(1) + panic::catch_unwind(|| { + if let Some((client, query)) = parity_rpc_query_checker(client, query, len) { + let callback = Arc::new(CallbackStr { + user_data, + function: callback, + }); + parity_rpc_worker(client, query, callback, timeout_ms as u64); + 0 + } else { + 1 + } + }) + .unwrap_or(1) } #[no_mangle] -pub unsafe extern fn parity_subscribe_ws( - client: *const c_void, - query: *const c_char, - len: usize, - callback: CCallback, - user_data: *mut c_void, +pub unsafe extern "C" fn parity_subscribe_ws( + client: *const c_void, + query: *const c_char, + len: usize, + callback: CCallback, + user_data: *mut c_void, ) -> *const c_void { - panic::catch_unwind(|| { - if let Some((client, query)) = parity_rpc_query_checker(client, query, len) { - let callback = Arc::new(CallbackStr { user_data, function: callback}); - parity_ws_worker(client, query, callback) - } else { - ptr::null() - } - }) - .unwrap_or(ptr::null()) + panic::catch_unwind(|| { + if let Some((client, query)) = parity_rpc_query_checker(client, query, len) { + let callback = Arc::new(CallbackStr { + user_data, + function: callback, + }); + parity_ws_worker(client, query, callback) + } else { + ptr::null() + } + }) + .unwrap_or(ptr::null()) } #[no_mangle] -pub unsafe extern fn parity_unsubscribe_ws(session: *const c_void) { - let _ = panic::catch_unwind(|| { - let _session = Arc::from_raw(session as *const PubSubSession); - }); +pub unsafe extern "C" fn parity_unsubscribe_ws(session: *const c_void) { + let _ = panic::catch_unwind(|| { + let _session = Arc::from_raw(session as *const PubSubSession); + }); } #[no_mangle] -pub extern fn parity_set_panic_hook(callback: CCallback, param: *mut c_void) { - let cb = CallbackStr {user_data: param, function: callback}; - panic_hook::set_with(move |panic_msg| { - cb.call(panic_msg); - }); +pub extern "C" fn parity_set_panic_hook(callback: CCallback, param: *mut c_void) { + let cb = CallbackStr { + user_data: param, + function: callback, + }; + panic_hook::set_with(move |panic_msg| { + cb.call(panic_msg); + }); } #[no_mangle] -pub unsafe extern fn parity_set_logger( - logger_mode: *const u8, - logger_mode_len: usize, - log_file: *const u8, - log_file_len: usize, - logger: *mut *mut c_void) { +pub unsafe extern "C" fn parity_set_logger( + logger_mode: *const u8, + logger_mode_len: usize, + log_file: *const u8, + log_file_len: usize, + logger: *mut *mut c_void, +) { + let mut logger_cfg = parity_ethereum::LoggerConfig::default(); + logger_cfg.mode = + String::from_utf8(slice::from_raw_parts(logger_mode, logger_mode_len).to_owned()).ok(); - let mut logger_cfg = parity_ethereum::LoggerConfig::default(); - logger_cfg.mode = String::from_utf8(slice::from_raw_parts(logger_mode, logger_mode_len).to_owned()).ok(); + // Make sure an empty string is not constructed as file name (to prevent panic) + if log_file_len != 0 && !log_file.is_null() { + logger_cfg.file = + String::from_utf8(slice::from_raw_parts(log_file, log_file_len).to_owned()).ok(); + } - // Make sure an empty string is not constructed as file name (to prevent panic) - if log_file_len != 0 && !log_file.is_null() { - logger_cfg.file = String::from_utf8(slice::from_raw_parts(log_file, log_file_len).to_owned()).ok(); - } - - *logger = Arc::into_raw(parity_ethereum::setup_log(&logger_cfg).expect("Logger initialized only once; qed")) as *mut _; + *logger = Arc::into_raw( + parity_ethereum::setup_log(&logger_cfg).expect("Logger initialized only once; qed"), + ) as *mut _; } // WebSocket event loop fn parity_ws_worker(client: &RunningClient, query: &str, callback: Arc) -> *const c_void { - let (tx, mut rx) = mpsc::channel(1); - let session = Arc::new(PubSubSession::new(tx)); - let query_future = client.rpc_query(query, Some(session.clone())); - let weak_session = Arc::downgrade(&session); - let _handle = thread::Builder::new() - .name("ws-subscriber".into()) - .spawn(move || { - // Wait for subscription ID - // Note this may block forever and be can't destroyed using the session object - // However, this will likely timeout or be catched the RPC layer - if let Ok(Some(response)) = query_future.wait() { - callback.call(&response); - } else { - callback.call(error::SUBSCRIBE); - return; - } + let (tx, mut rx) = mpsc::channel(1); + let session = Arc::new(PubSubSession::new(tx)); + let query_future = client.rpc_query(query, Some(session.clone())); + let weak_session = Arc::downgrade(&session); + let _handle = thread::Builder::new() + .name("ws-subscriber".into()) + .spawn(move || { + // Wait for subscription ID + // Note this may block forever and be can't destroyed using the session object + // However, this will likely timeout or be catched the RPC layer + if let Ok(Some(response)) = query_future.wait() { + callback.call(&response); + } else { + callback.call(error::SUBSCRIBE); + return; + } - while weak_session.upgrade().map_or(0, |session| Arc::strong_count(&session)) > 1 { - for response in rx.by_ref().wait() { - if let Ok(r) = response { - callback.call(&r); - } - } - } - }) - .expect("rpc-subscriber thread shouldn't fail; qed"); - Arc::into_raw(session) as *const c_void + while weak_session + .upgrade() + .map_or(0, |session| Arc::strong_count(&session)) + > 1 + { + for response in rx.by_ref().wait() { + if let Ok(r) = response { + callback.call(&r); + } + } + } + }) + .expect("rpc-subscriber thread shouldn't fail; qed"); + Arc::into_raw(session) as *const c_void } // RPC event loop that runs for at most `timeout_ms` -fn parity_rpc_worker(client: &RunningClient, query: &str, callback: Arc, timeout_ms: u64) { - let cb = callback.clone(); - let query = client.rpc_query(query, None).map(move |response| { - let response = response.unwrap_or_else(|| error::EMPTY.to_string()); - callback.call(&response); - }); +fn parity_rpc_worker( + client: &RunningClient, + query: &str, + callback: Arc, + timeout_ms: u64, +) { + let cb = callback.clone(); + let query = client.rpc_query(query, None).map(move |response| { + let response = response.unwrap_or_else(|| error::EMPTY.to_string()); + callback.call(&response); + }); - let _handle = thread::Builder::new() - .name("rpc_query".to_string()) - .spawn(move || { - let mut current_thread = CurrentThread::new(); - current_thread.spawn(query); - let _ = current_thread - .run_timeout(Duration::from_millis(timeout_ms)) - .map_err(|_e| { - cb.call(error::TIMEOUT); - }); - }) - .expect("rpc-query thread shouldn't fail; qed"); + let _handle = thread::Builder::new() + .name("rpc_query".to_string()) + .spawn(move || { + let mut current_thread = CurrentThread::new(); + current_thread.spawn(query); + let _ = current_thread + .run_timeout(Duration::from_millis(timeout_ms)) + .map_err(|_e| { + cb.call(error::TIMEOUT); + }); + }) + .expect("rpc-query thread shouldn't fail; qed"); } -unsafe fn parity_rpc_query_checker<'a>(client: *const c_void, query: *const c_char, len: usize) - -> Option> -{ - let query_str = str::from_utf8(slice::from_raw_parts(query as *const u8, len)).ok()?; - let client: &RunningClient = &*(client as *const RunningClient); - Some((client, query_str)) +unsafe fn parity_rpc_query_checker<'a>( + client: *const c_void, + query: *const c_char, + len: usize, +) -> Option> { + let query_str = str::from_utf8(slice::from_raw_parts(query as *const u8, len)).ok()?; + let client: &RunningClient = &*(client as *const RunningClient); + Some((client, query_str)) } diff --git a/parity/account.rs b/parity/account.rs index 118c06fdd..ba1bbc9d2 100644 --- a/parity/account.rs +++ b/parity/account.rs @@ -14,143 +14,159 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::num::NonZeroU32; use params::SpecType; +use std::num::NonZeroU32; #[derive(Debug, PartialEq)] pub enum AccountCmd { - New(NewAccount), - List(ListAccounts), - Import(ImportAccounts), - ImportFromGeth(ImportFromGethAccounts) + New(NewAccount), + List(ListAccounts), + Import(ImportAccounts), + ImportFromGeth(ImportFromGethAccounts), } #[derive(Debug, PartialEq)] pub struct ListAccounts { - pub path: String, - pub spec: SpecType, + pub path: String, + pub spec: SpecType, } #[derive(Debug, PartialEq)] pub struct NewAccount { - pub iterations: NonZeroU32, - pub path: String, - pub spec: SpecType, - pub password_file: Option, + pub iterations: NonZeroU32, + pub path: String, + pub spec: SpecType, + pub password_file: Option, } #[derive(Debug, PartialEq)] pub struct ImportAccounts { - pub from: Vec, - pub to: String, - pub spec: SpecType, + pub from: Vec, + pub to: String, + pub spec: SpecType, } /// Parameters for geth accounts' import #[derive(Debug, PartialEq)] pub struct ImportFromGethAccounts { - /// import mainnet (false) or testnet (true) accounts - pub testnet: bool, - /// directory to import accounts to - pub to: String, - pub spec: SpecType, + /// import mainnet (false) or testnet (true) accounts + pub testnet: bool, + /// directory to import accounts to + pub to: String, + pub spec: SpecType, } - #[cfg(not(feature = "accounts"))] pub fn execute(_cmd: AccountCmd) -> Result { - Err("Account management is deprecated. Please see #9997 for alternatives:\nhttps://github.com/paritytech/parity-ethereum/issues/9997".into()) + Err("Account management is deprecated. Please see #9997 for alternatives:\nhttps://github.com/paritytech/parity-ethereum/issues/9997".into()) } #[cfg(feature = "accounts")] mod command { - use super::*; - use std::path::PathBuf; - use accounts::{AccountProvider, AccountProviderSettings}; - use ethstore::{EthStore, SecretStore, SecretVaultRef, import_account, import_accounts, read_geth_accounts}; - use ethstore::accounts_dir::RootDiskDirectory; - use helpers::{password_prompt, password_from_file}; + use super::*; + use accounts::{AccountProvider, AccountProviderSettings}; + use ethstore::{ + accounts_dir::RootDiskDirectory, import_account, import_accounts, read_geth_accounts, + EthStore, SecretStore, SecretVaultRef, + }; + use helpers::{password_from_file, password_prompt}; + use std::path::PathBuf; - pub fn execute(cmd: AccountCmd) -> Result { - match cmd { - AccountCmd::New(new_cmd) => new(new_cmd), - AccountCmd::List(list_cmd) => list(list_cmd), - AccountCmd::Import(import_cmd) => import(import_cmd), - AccountCmd::ImportFromGeth(import_geth_cmd) => import_geth(import_geth_cmd) - } - } + pub fn execute(cmd: AccountCmd) -> Result { + match cmd { + AccountCmd::New(new_cmd) => new(new_cmd), + AccountCmd::List(list_cmd) => list(list_cmd), + AccountCmd::Import(import_cmd) => import(import_cmd), + AccountCmd::ImportFromGeth(import_geth_cmd) => import_geth(import_geth_cmd), + } + } - fn keys_dir(path: String, spec: SpecType) -> Result { - let spec = spec.spec(&::std::env::temp_dir())?; - let mut path = PathBuf::from(&path); - path.push(spec.data_dir); - RootDiskDirectory::create(path).map_err(|e| format!("Could not open keys directory: {}", e)) - } + fn keys_dir(path: String, spec: SpecType) -> Result { + let spec = spec.spec(&::std::env::temp_dir())?; + let mut path = PathBuf::from(&path); + path.push(spec.data_dir); + RootDiskDirectory::create(path).map_err(|e| format!("Could not open keys directory: {}", e)) + } - fn secret_store(dir: Box, iterations: Option) -> Result { - match iterations { - Some(i) => EthStore::open_with_iterations(dir, i), - _ => EthStore::open(dir) - }.map_err(|e| format!("Could not open keys store: {}", e)) - } + fn secret_store( + dir: Box, + iterations: Option, + ) -> Result { + match iterations { + Some(i) => EthStore::open_with_iterations(dir, i), + _ => EthStore::open(dir), + } + .map_err(|e| format!("Could not open keys store: {}", e)) + } - fn new(n: NewAccount) -> Result { - let password = match n.password_file { - Some(file) => password_from_file(file)?, - None => password_prompt()?, - }; + fn new(n: NewAccount) -> Result { + let password = match n.password_file { + Some(file) => password_from_file(file)?, + None => password_prompt()?, + }; - let dir = Box::new(keys_dir(n.path, n.spec)?); - let secret_store = Box::new(secret_store(dir, Some(n.iterations))?); - let acc_provider = AccountProvider::new(secret_store, AccountProviderSettings::default()); - let new_account = acc_provider.new_account(&password).map_err(|e| format!("Could not create new account: {}", e))?; - Ok(format!("0x{:x}", new_account)) - } + let dir = Box::new(keys_dir(n.path, n.spec)?); + let secret_store = Box::new(secret_store(dir, Some(n.iterations))?); + let acc_provider = AccountProvider::new(secret_store, AccountProviderSettings::default()); + let new_account = acc_provider + .new_account(&password) + .map_err(|e| format!("Could not create new account: {}", e))?; + Ok(format!("0x{:x}", new_account)) + } - fn list(list_cmd: ListAccounts) -> Result { - let dir = Box::new(keys_dir(list_cmd.path, list_cmd.spec)?); - let secret_store = Box::new(secret_store(dir, None)?); - let acc_provider = AccountProvider::new(secret_store, AccountProviderSettings::default()); - let accounts = acc_provider.accounts().map_err(|e| format!("{}", e))?; - let result = accounts.into_iter() - .map(|a| format!("0x{:x}", a)) - .collect::>() - .join("\n"); + fn list(list_cmd: ListAccounts) -> Result { + let dir = Box::new(keys_dir(list_cmd.path, list_cmd.spec)?); + let secret_store = Box::new(secret_store(dir, None)?); + let acc_provider = AccountProvider::new(secret_store, AccountProviderSettings::default()); + let accounts = acc_provider.accounts().map_err(|e| format!("{}", e))?; + let result = accounts + .into_iter() + .map(|a| format!("0x{:x}", a)) + .collect::>() + .join("\n"); - Ok(result) - } + Ok(result) + } - fn import(i: ImportAccounts) -> Result { - let to = keys_dir(i.to, i.spec)?; - let mut imported = 0; + fn import(i: ImportAccounts) -> Result { + let to = keys_dir(i.to, i.spec)?; + let mut imported = 0; - for path in &i.from { - let path = PathBuf::from(path); - if path.is_dir() { - let from = RootDiskDirectory::at(&path); - imported += import_accounts(&from, &to).map_err(|e| format!("Importing accounts from {:?} failed: {}", path, e))?.len(); - } else if path.is_file() { - import_account(&path, &to).map_err(|e| format!("Importing account from {:?} failed: {}", path, e))?; - imported += 1; - } - } + for path in &i.from { + let path = PathBuf::from(path); + if path.is_dir() { + let from = RootDiskDirectory::at(&path); + imported += import_accounts(&from, &to) + .map_err(|e| format!("Importing accounts from {:?} failed: {}", path, e))? + .len(); + } else if path.is_file() { + import_account(&path, &to) + .map_err(|e| format!("Importing account from {:?} failed: {}", path, e))?; + imported += 1; + } + } - Ok(format!("{} account(s) imported", imported)) - } + Ok(format!("{} account(s) imported", imported)) + } - fn import_geth(i: ImportFromGethAccounts) -> Result { - use std::io::ErrorKind; - use ethstore::Error; + fn import_geth(i: ImportFromGethAccounts) -> Result { + use ethstore::Error; + use std::io::ErrorKind; - let dir = Box::new(keys_dir(i.to, i.spec)?); - let secret_store = Box::new(secret_store(dir, None)?); - let geth_accounts = read_geth_accounts(i.testnet); - match secret_store.import_geth_accounts(SecretVaultRef::Root, geth_accounts, i.testnet) { - Ok(v) => Ok(format!("Successfully imported {} account(s) from geth.", v.len())), - Err(Error::Io(ref io_err)) if io_err.kind() == ErrorKind::NotFound => Err("Failed to find geth keys folder.".into()), - Err(err) => Err(format!("Import geth accounts failed. {}", err)) - } - } + let dir = Box::new(keys_dir(i.to, i.spec)?); + let secret_store = Box::new(secret_store(dir, None)?); + let geth_accounts = read_geth_accounts(i.testnet); + match secret_store.import_geth_accounts(SecretVaultRef::Root, geth_accounts, i.testnet) { + Ok(v) => Ok(format!( + "Successfully imported {} account(s) from geth.", + v.len() + )), + Err(Error::Io(ref io_err)) if io_err.kind() == ErrorKind::NotFound => { + Err("Failed to find geth keys folder.".into()) + } + Err(err) => Err(format!("Import geth accounts failed. {}", err)), + } + } } #[cfg(feature = "accounts")] diff --git a/parity/account_utils.rs b/parity/account_utils.rs index 20285ad83..90bb778d8 100644 --- a/parity/account_utils.rs +++ b/parity/account_utils.rs @@ -20,225 +20,306 @@ use dir::Directories; use ethereum_types::Address; use ethkey::Password; -use params::{SpecType, AccountsConfig}; +use params::{AccountsConfig, SpecType}; #[cfg(not(feature = "accounts"))] mod accounts { - use super::*; + use super::*; - /// Dummy AccountProvider - pub struct AccountProvider; + /// Dummy AccountProvider + pub struct AccountProvider; - impl ::ethcore::miner::LocalAccounts for AccountProvider { - fn is_local(&self, _address: &Address) -> bool { - false - } - } + impl ::ethcore::miner::LocalAccounts for AccountProvider { + fn is_local(&self, _address: &Address) -> bool { + false + } + } - pub fn prepare_account_provider(_spec: &SpecType, _dirs: &Directories, _data_dir: &str, _cfg: AccountsConfig, _passwords: &[Password]) -> Result { - warn!("Note: Your instance of Parity Ethereum is running without account support. Some CLI options are ignored."); - Ok(AccountProvider) - } + pub fn prepare_account_provider( + _spec: &SpecType, + _dirs: &Directories, + _data_dir: &str, + _cfg: AccountsConfig, + _passwords: &[Password], + ) -> Result { + warn!("Note: Your instance of Parity Ethereum is running without account support. Some CLI options are ignored."); + Ok(AccountProvider) + } - pub fn miner_local_accounts(_: Arc) -> AccountProvider { - AccountProvider - } + pub fn miner_local_accounts(_: Arc) -> AccountProvider { + AccountProvider + } - pub fn miner_author(_spec: &SpecType, _dirs: &Directories, _account_provider: &Arc, _engine_signer: Address, _passwords: &[Password]) -> Result, String> { - Ok(None) - } + pub fn miner_author( + _spec: &SpecType, + _dirs: &Directories, + _account_provider: &Arc, + _engine_signer: Address, + _passwords: &[Password], + ) -> Result, String> { + Ok(None) + } - pub fn private_tx_signer(_account_provider: Arc, _passwords: &[Password]) -> Result, String> { - Ok(Arc::new(::ethcore_private_tx::DummySigner)) - } + pub fn private_tx_signer( + _account_provider: Arc, + _passwords: &[Password], + ) -> Result, String> { + Ok(Arc::new(::ethcore_private_tx::DummySigner)) + } - pub fn accounts_list(_account_provider: Arc) -> Arc Vec

+ Send + Sync> { - Arc::new(|| vec![]) - } + pub fn accounts_list( + _account_provider: Arc, + ) -> Arc Vec
+ Send + Sync> { + Arc::new(|| vec![]) + } } #[cfg(feature = "accounts")] mod accounts { - use super::*; - use upgrade::upgrade_key_location; + use super::*; + use upgrade::upgrade_key_location; - pub use accounts::AccountProvider; + pub use accounts::AccountProvider; - /// Pops along with error messages when a password is missing or invalid. - const VERIFY_PASSWORD_HINT: &str = "Make sure valid password is present in files passed using `--password` or in the configuration file."; + /// Pops along with error messages when a password is missing or invalid. + const VERIFY_PASSWORD_HINT: &str = "Make sure valid password is present in files passed using `--password` or in the configuration file."; - /// Initialize account provider - pub fn prepare_account_provider(spec: &SpecType, dirs: &Directories, data_dir: &str, cfg: AccountsConfig, passwords: &[Password]) -> Result { - use ethstore::EthStore; - use ethstore::accounts_dir::RootDiskDirectory; - use accounts::AccountProviderSettings; + /// Initialize account provider + pub fn prepare_account_provider( + spec: &SpecType, + dirs: &Directories, + data_dir: &str, + cfg: AccountsConfig, + passwords: &[Password], + ) -> Result { + use accounts::AccountProviderSettings; + use ethstore::{accounts_dir::RootDiskDirectory, EthStore}; - let path = dirs.keys_path(data_dir); - upgrade_key_location(&dirs.legacy_keys_path(cfg.testnet), &path); - let dir = Box::new(RootDiskDirectory::create(&path).map_err(|e| format!("Could not open keys directory: {}", e))?); - let account_settings = AccountProviderSettings { - enable_hardware_wallets: cfg.enable_hardware_wallets, - hardware_wallet_classic_key: spec == &SpecType::Classic, - unlock_keep_secret: cfg.enable_fast_unlock, - blacklisted_accounts: match *spec { - SpecType::Morden | SpecType::Mordor | SpecType::Ropsten | SpecType::Kovan | SpecType::Goerli | SpecType::Kotti | SpecType::Sokol | SpecType::Dev => vec![], - _ => vec![ - "00a329c0648769a73afac7f9381e08fb43dbea72".into() - ], - }, - }; + let path = dirs.keys_path(data_dir); + upgrade_key_location(&dirs.legacy_keys_path(cfg.testnet), &path); + let dir = Box::new( + RootDiskDirectory::create(&path) + .map_err(|e| format!("Could not open keys directory: {}", e))?, + ); + let account_settings = AccountProviderSettings { + enable_hardware_wallets: cfg.enable_hardware_wallets, + hardware_wallet_classic_key: spec == &SpecType::Classic, + unlock_keep_secret: cfg.enable_fast_unlock, + blacklisted_accounts: match *spec { + SpecType::Morden + | SpecType::Mordor + | SpecType::Ropsten + | SpecType::Kovan + | SpecType::Goerli + | SpecType::Kotti + | SpecType::Sokol + | SpecType::Dev => vec![], + _ => vec!["00a329c0648769a73afac7f9381e08fb43dbea72".into()], + }, + }; - let ethstore = EthStore::open_with_iterations(dir, cfg.iterations).map_err(|e| format!("Could not open keys directory: {}", e))?; - if cfg.refresh_time > 0 { - ethstore.set_refresh_time(::std::time::Duration::from_secs(cfg.refresh_time)); - } - let account_provider = AccountProvider::new( - Box::new(ethstore), - account_settings, - ); + let ethstore = EthStore::open_with_iterations(dir, cfg.iterations) + .map_err(|e| format!("Could not open keys directory: {}", e))?; + if cfg.refresh_time > 0 { + ethstore.set_refresh_time(::std::time::Duration::from_secs(cfg.refresh_time)); + } + let account_provider = AccountProvider::new(Box::new(ethstore), account_settings); - // Add development account if running dev chain: - if let SpecType::Dev = *spec { - insert_dev_account(&account_provider); - } + // Add development account if running dev chain: + if let SpecType::Dev = *spec { + insert_dev_account(&account_provider); + } - for a in cfg.unlocked_accounts { - // Check if the account exists - if !account_provider.has_account(a) { - return Err(format!("Account {} not found for the current chain. {}", a, build_create_account_hint(spec, &dirs.keys))); - } + for a in cfg.unlocked_accounts { + // Check if the account exists + if !account_provider.has_account(a) { + return Err(format!( + "Account {} not found for the current chain. {}", + a, + build_create_account_hint(spec, &dirs.keys) + )); + } - // Check if any passwords have been read from the password file(s) - if passwords.is_empty() { - return Err(format!("No password found to unlock account {}. {}", a, VERIFY_PASSWORD_HINT)); - } + // Check if any passwords have been read from the password file(s) + if passwords.is_empty() { + return Err(format!( + "No password found to unlock account {}. {}", + a, VERIFY_PASSWORD_HINT + )); + } - if !passwords.iter().any(|p| account_provider.unlock_account_permanently(a, (*p).clone()).is_ok()) { - return Err(format!("No valid password to unlock account {}. {}", a, VERIFY_PASSWORD_HINT)); - } - } + if !passwords.iter().any(|p| { + account_provider + .unlock_account_permanently(a, (*p).clone()) + .is_ok() + }) { + return Err(format!( + "No valid password to unlock account {}. {}", + a, VERIFY_PASSWORD_HINT + )); + } + } - Ok(account_provider) - } + Ok(account_provider) + } - pub struct LocalAccounts(Arc); - impl ::ethcore::miner::LocalAccounts for LocalAccounts { - fn is_local(&self, address: &Address) -> bool { - self.0.has_account(*address) - } - } + pub struct LocalAccounts(Arc); + impl ::ethcore::miner::LocalAccounts for LocalAccounts { + fn is_local(&self, address: &Address) -> bool { + self.0.has_account(*address) + } + } - pub fn miner_local_accounts(account_provider: Arc) -> LocalAccounts { - LocalAccounts(account_provider) - } + pub fn miner_local_accounts(account_provider: Arc) -> LocalAccounts { + LocalAccounts(account_provider) + } - pub fn miner_author(spec: &SpecType, dirs: &Directories, account_provider: &Arc, engine_signer: Address, passwords: &[Password]) -> Result, String> { - use ethcore::engines::EngineSigner; + pub fn miner_author( + spec: &SpecType, + dirs: &Directories, + account_provider: &Arc, + engine_signer: Address, + passwords: &[Password], + ) -> Result, String> { + use ethcore::engines::EngineSigner; - // Check if engine signer exists - if !account_provider.has_account(engine_signer) { - return Err(format!("Consensus signer account not found for the current chain. {}", build_create_account_hint(spec, &dirs.keys))); - } + // Check if engine signer exists + if !account_provider.has_account(engine_signer) { + return Err(format!( + "Consensus signer account not found for the current chain. {}", + build_create_account_hint(spec, &dirs.keys) + )); + } - // Check if any passwords have been read from the password file(s) - if passwords.is_empty() { - return Err(format!("No password found for the consensus signer {}. {}", engine_signer, VERIFY_PASSWORD_HINT)); - } + // Check if any passwords have been read from the password file(s) + if passwords.is_empty() { + return Err(format!( + "No password found for the consensus signer {}. {}", + engine_signer, VERIFY_PASSWORD_HINT + )); + } - let mut author = None; - for password in passwords { - let signer = parity_rpc::signer::EngineSigner::new( - account_provider.clone(), - engine_signer, - password.clone(), - ); - if signer.sign(Default::default()).is_ok() { - author = Some(::ethcore::miner::Author::Sealer(Box::new(signer))); - } - } - if author.is_none() { - return Err(format!("No valid password for the consensus signer {}. {}", engine_signer, VERIFY_PASSWORD_HINT)); - } + let mut author = None; + for password in passwords { + let signer = parity_rpc::signer::EngineSigner::new( + account_provider.clone(), + engine_signer, + password.clone(), + ); + if signer.sign(Default::default()).is_ok() { + author = Some(::ethcore::miner::Author::Sealer(Box::new(signer))); + } + } + if author.is_none() { + return Err(format!( + "No valid password for the consensus signer {}. {}", + engine_signer, VERIFY_PASSWORD_HINT + )); + } - Ok(author) - } + Ok(author) + } + mod private_tx { + use super::*; + use ethcore_private_tx::Error; + use ethkey::{Message, Signature}; - mod private_tx { - use super::*; - use ethkey::{Signature, Message}; - use ethcore_private_tx::{Error}; + pub struct AccountSigner { + pub accounts: Arc, + pub passwords: Vec, + } - pub struct AccountSigner { - pub accounts: Arc, - pub passwords: Vec, - } + impl ::ethcore_private_tx::Signer for AccountSigner { + fn decrypt( + &self, + account: Address, + shared_mac: &[u8], + payload: &[u8], + ) -> Result, Error> { + let password = self.find_account_password(&account); + Ok(self + .accounts + .decrypt(account, password, shared_mac, payload) + .map_err(|e| e.to_string())?) + } - impl ::ethcore_private_tx::Signer for AccountSigner { - fn decrypt(&self, account: Address, shared_mac: &[u8], payload: &[u8]) -> Result, Error> { - let password = self.find_account_password(&account); - Ok(self.accounts.decrypt(account, password, shared_mac, payload).map_err(|e| e.to_string())?) - } + fn sign(&self, account: Address, hash: Message) -> Result { + let password = self.find_account_password(&account); + Ok(self + .accounts + .sign(account, password, hash) + .map_err(|e| e.to_string())?) + } + } - fn sign(&self, account: Address, hash: Message) -> Result { - let password = self.find_account_password(&account); - Ok(self.accounts.sign(account, password, hash).map_err(|e| e.to_string())?) - } - } + impl AccountSigner { + /// Try to unlock account using stored password, return found password if any + fn find_account_password(&self, account: &Address) -> Option { + for password in &self.passwords { + if let Ok(true) = self.accounts.test_password(account, password) { + return Some(password.clone()); + } + } + None + } + } + } - impl AccountSigner { - /// Try to unlock account using stored password, return found password if any - fn find_account_password(&self, account: &Address) -> Option { - for password in &self.passwords { - if let Ok(true) = self.accounts.test_password(account, password) { - return Some(password.clone()); - } - } - None - } - } - } + pub fn private_tx_signer( + accounts: Arc, + passwords: &[Password], + ) -> Result, String> { + Ok(Arc::new(self::private_tx::AccountSigner { + accounts, + passwords: passwords.to_vec(), + })) + } - pub fn private_tx_signer(accounts: Arc, passwords: &[Password]) -> Result, String> { - Ok(Arc::new(self::private_tx::AccountSigner { - accounts, - passwords: passwords.to_vec(), - })) - } + pub fn accounts_list( + account_provider: Arc, + ) -> Arc Vec
+ Send + Sync> { + Arc::new(move || account_provider.accounts().unwrap_or_default()) + } - pub fn accounts_list(account_provider: Arc) -> Arc Vec
+ Send + Sync> { - Arc::new(move || account_provider.accounts().unwrap_or_default()) - } + fn insert_dev_account(account_provider: &AccountProvider) { + let secret: ethkey::Secret = + "4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7".into(); + let dev_account = ethkey::KeyPair::from_secret(secret.clone()) + .expect("Valid secret produces valid key;qed"); + if !account_provider.has_account(dev_account.address()) { + match account_provider.insert_account(secret, &Password::from(String::new())) { + Err(e) => warn!("Unable to add development account: {}", e), + Ok(address) => { + let _ = account_provider + .set_account_name(address.clone(), "Development Account".into()); + let _ = account_provider.set_account_meta( + address, + ::serde_json::to_string( + &(vec![ + ( + "description", + "Never use this account outside of development chain!", + ), + ("passwordHint", "Password is empty string"), + ] + .into_iter() + .collect::<::std::collections::HashMap<_, _>>()), + ) + .expect("Serialization of hashmap does not fail."), + ); + } + } + } + } - fn insert_dev_account(account_provider: &AccountProvider) { - let secret: ethkey::Secret = "4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7".into(); - let dev_account = ethkey::KeyPair::from_secret(secret.clone()).expect("Valid secret produces valid key;qed"); - if !account_provider.has_account(dev_account.address()) { - match account_provider.insert_account(secret, &Password::from(String::new())) { - Err(e) => warn!("Unable to add development account: {}", e), - Ok(address) => { - let _ = account_provider.set_account_name(address.clone(), "Development Account".into()); - let _ = account_provider.set_account_meta(address, ::serde_json::to_string(&(vec![ - ("description", "Never use this account outside of development chain!"), - ("passwordHint","Password is empty string"), - ].into_iter().collect::<::std::collections::HashMap<_,_>>())).expect("Serialization of hashmap does not fail.")); - }, - } - } - } - - // Construct an error `String` with an adaptive hint on how to create an account. - fn build_create_account_hint(spec: &SpecType, keys: &str) -> String { - format!("You can create an account via RPC, UI or `parity account new --chain {} --keys-path {}`.", spec, keys) - } + // Construct an error `String` with an adaptive hint on how to create an account. + fn build_create_account_hint(spec: &SpecType, keys: &str) -> String { + format!("You can create an account via RPC, UI or `parity account new --chain {} --keys-path {}`.", spec, keys) + } } pub use self::accounts::{ - AccountProvider, - prepare_account_provider, - miner_local_accounts, - miner_author, - private_tx_signer, - accounts_list, + accounts_list, miner_author, miner_local_accounts, prepare_account_provider, private_tx_signer, + AccountProvider, }; - diff --git a/parity/blockchain.rs b/parity/blockchain.rs index 6fdd92b0a..519a94543 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -14,406 +14,448 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::str::from_utf8; -use std::{io, fs}; -use std::io::{BufReader, BufRead}; -use std::time::{Instant, Duration}; -use std::thread::sleep; -use std::sync::Arc; - -use rustc_hex::FromHex; -use hash::{keccak, KECCAK_NULL_RLP}; -use ethereum_types::{U256, H256, Address}; -use bytes::ToPretty; -use rlp::PayloadInfo; -use ethcore::client::{ - Mode, DatabaseCompactionProfile, VMType, Nonce, Balance, BlockChainClient, BlockId, BlockInfo, ImportBlock, BlockChainReset, ImportExportBlocks +use std::{ + fs, io, + io::{BufRead, BufReader}, + str::from_utf8, + sync::Arc, + thread::sleep, + time::{Duration, Instant}, }; -use types::data_format::DataFormat; -use ethcore::error::{ImportErrorKind, ErrorKind as EthcoreErrorKind, Error as EthcoreError}; -use ethcore::miner::Miner; -use ethcore::verification::queue::VerifierSettings; -use ethcore::verification::queue::kind::blocks::Unverified; -use ethcore_service::ClientService; -use cache::CacheConfig; -use informant::{Informant, FullNodeInformantData, MillisecondDuration}; -use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool}; -use helpers::{to_client_config, execute_upgrades}; -use dir::Directories; -use user_defaults::UserDefaults; -use ethcore_private_tx; -use db; + use ansi_term::Colour; +use bytes::ToPretty; +use cache::CacheConfig; +use db; +use dir::Directories; +use ethcore::{ + client::{ + Balance, BlockChainClient, BlockChainReset, BlockId, BlockInfo, DatabaseCompactionProfile, + ImportBlock, ImportExportBlocks, Mode, Nonce, VMType, + }, + error::{Error as EthcoreError, ErrorKind as EthcoreErrorKind, ImportErrorKind}, + miner::Miner, + verification::queue::{kind::blocks::Unverified, VerifierSettings}, +}; +use ethcore_private_tx; +use ethcore_service::ClientService; +use ethereum_types::{Address, H256, U256}; +use hash::{keccak, KECCAK_NULL_RLP}; +use helpers::{execute_upgrades, to_client_config}; +use informant::{FullNodeInformantData, Informant, MillisecondDuration}; +use params::{fatdb_switch_to_bool, tracing_switch_to_bool, Pruning, SpecType, Switch}; +use rlp::PayloadInfo; +use rustc_hex::FromHex; +use types::data_format::DataFormat; +use user_defaults::UserDefaults; #[derive(Debug, PartialEq)] pub enum BlockchainCmd { - Kill(KillBlockchain), - Import(ImportBlockchain), - Export(ExportBlockchain), - ExportState(ExportState), - Reset(ResetBlockchain) + Kill(KillBlockchain), + Import(ImportBlockchain), + Export(ExportBlockchain), + ExportState(ExportState), + Reset(ResetBlockchain), } #[derive(Debug, PartialEq)] pub struct ResetBlockchain { - pub dirs: Directories, - pub spec: SpecType, - pub pruning: Pruning, - pub pruning_history: u64, - pub pruning_memory: usize, - pub tracing: Switch, - pub fat_db: Switch, - pub compaction: DatabaseCompactionProfile, - pub cache_config: CacheConfig, - pub num: u32, + pub dirs: Directories, + pub spec: SpecType, + pub pruning: Pruning, + pub pruning_history: u64, + pub pruning_memory: usize, + pub tracing: Switch, + pub fat_db: Switch, + pub compaction: DatabaseCompactionProfile, + pub cache_config: CacheConfig, + pub num: u32, } #[derive(Debug, PartialEq)] pub struct KillBlockchain { - pub spec: SpecType, - pub dirs: Directories, - pub pruning: Pruning, + pub spec: SpecType, + pub dirs: Directories, + pub pruning: Pruning, } #[derive(Debug, PartialEq)] pub struct ImportBlockchain { - pub spec: SpecType, - pub cache_config: CacheConfig, - pub dirs: Directories, - pub file_path: Option, - pub format: Option, - pub pruning: Pruning, - pub pruning_history: u64, - pub pruning_memory: usize, - pub compaction: DatabaseCompactionProfile, - pub tracing: Switch, - pub fat_db: Switch, - pub vm_type: VMType, - pub check_seal: bool, - pub with_color: bool, - pub verifier_settings: VerifierSettings, - pub light: bool, - pub max_round_blocks_to_import: usize, + pub spec: SpecType, + pub cache_config: CacheConfig, + pub dirs: Directories, + pub file_path: Option, + pub format: Option, + pub pruning: Pruning, + pub pruning_history: u64, + pub pruning_memory: usize, + pub compaction: DatabaseCompactionProfile, + pub tracing: Switch, + pub fat_db: Switch, + pub vm_type: VMType, + pub check_seal: bool, + pub with_color: bool, + pub verifier_settings: VerifierSettings, + pub light: bool, + pub max_round_blocks_to_import: usize, } #[derive(Debug, PartialEq)] pub struct ExportBlockchain { - pub spec: SpecType, - pub cache_config: CacheConfig, - pub dirs: Directories, - pub file_path: Option, - pub format: Option, - pub pruning: Pruning, - pub pruning_history: u64, - pub pruning_memory: usize, - pub compaction: DatabaseCompactionProfile, - pub fat_db: Switch, - pub tracing: Switch, - pub from_block: BlockId, - pub to_block: BlockId, - pub check_seal: bool, - pub max_round_blocks_to_import: usize, + pub spec: SpecType, + pub cache_config: CacheConfig, + pub dirs: Directories, + pub file_path: Option, + pub format: Option, + pub pruning: Pruning, + pub pruning_history: u64, + pub pruning_memory: usize, + pub compaction: DatabaseCompactionProfile, + pub fat_db: Switch, + pub tracing: Switch, + pub from_block: BlockId, + pub to_block: BlockId, + pub check_seal: bool, + pub max_round_blocks_to_import: usize, } #[derive(Debug, PartialEq)] pub struct ExportState { - pub spec: SpecType, - pub cache_config: CacheConfig, - pub dirs: Directories, - pub file_path: Option, - pub format: Option, - pub pruning: Pruning, - pub pruning_history: u64, - pub pruning_memory: usize, - pub compaction: DatabaseCompactionProfile, - pub fat_db: Switch, - pub tracing: Switch, - pub at: BlockId, - pub storage: bool, - pub code: bool, - pub min_balance: Option, - pub max_balance: Option, - pub max_round_blocks_to_import: usize, + pub spec: SpecType, + pub cache_config: CacheConfig, + pub dirs: Directories, + pub file_path: Option, + pub format: Option, + pub pruning: Pruning, + pub pruning_history: u64, + pub pruning_memory: usize, + pub compaction: DatabaseCompactionProfile, + pub fat_db: Switch, + pub tracing: Switch, + pub at: BlockId, + pub storage: bool, + pub code: bool, + pub min_balance: Option, + pub max_balance: Option, + pub max_round_blocks_to_import: usize, } pub fn execute(cmd: BlockchainCmd) -> Result<(), String> { - match cmd { - BlockchainCmd::Kill(kill_cmd) => kill_db(kill_cmd), - BlockchainCmd::Import(import_cmd) => { - if import_cmd.light { - execute_import_light(import_cmd) - } else { - execute_import(import_cmd) - } - } - BlockchainCmd::Export(export_cmd) => execute_export(export_cmd), - BlockchainCmd::ExportState(export_cmd) => execute_export_state(export_cmd), - BlockchainCmd::Reset(reset_cmd) => execute_reset(reset_cmd), - } + match cmd { + BlockchainCmd::Kill(kill_cmd) => kill_db(kill_cmd), + BlockchainCmd::Import(import_cmd) => { + if import_cmd.light { + execute_import_light(import_cmd) + } else { + execute_import(import_cmd) + } + } + BlockchainCmd::Export(export_cmd) => execute_export(export_cmd), + BlockchainCmd::ExportState(export_cmd) => execute_export_state(export_cmd), + BlockchainCmd::Reset(reset_cmd) => execute_reset(reset_cmd), + } } fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> { - use light::client::{Service as LightClientService, Config as LightClientConfig}; - use light::cache::Cache as LightDataCache; - use parking_lot::Mutex; + use light::{ + cache::Cache as LightDataCache, + client::{Config as LightClientConfig, Service as LightClientService}, + }; + use parking_lot::Mutex; - let timer = Instant::now(); + let timer = Instant::now(); - // load spec file - let spec = cmd.spec.spec(&cmd.dirs.cache)?; + // load spec file + let spec = cmd.spec.spec(&cmd.dirs.cache)?; - // load genesis hash - let genesis_hash = spec.genesis_header().hash(); + // load genesis hash + let genesis_hash = spec.genesis_header().hash(); - // database paths - let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir.clone()); + // database paths + let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir.clone()); - // user defaults path - let user_defaults_path = db_dirs.user_defaults_path(); + // user defaults path + let user_defaults_path = db_dirs.user_defaults_path(); - // load user defaults - let user_defaults = UserDefaults::load(&user_defaults_path)?; + // load user defaults + let user_defaults = UserDefaults::load(&user_defaults_path)?; - // select pruning algorithm - let algorithm = cmd.pruning.to_algorithm(&user_defaults); + // select pruning algorithm + let algorithm = cmd.pruning.to_algorithm(&user_defaults); - // prepare client and snapshot paths. - let client_path = db_dirs.client_path(algorithm); + // prepare client and snapshot paths. + let client_path = db_dirs.client_path(algorithm); - // execute upgrades - execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?; + // execute upgrades + execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?; - // create dirs used by parity - cmd.dirs.create_dirs(false, false)?; + // create dirs used by parity + cmd.dirs.create_dirs(false, false)?; - let cache = Arc::new(Mutex::new( - LightDataCache::new(Default::default(), Duration::new(0, 0)) - )); + let cache = Arc::new(Mutex::new(LightDataCache::new( + Default::default(), + Duration::new(0, 0), + ))); - let mut config = LightClientConfig { - queue: Default::default(), - chain_column: ethcore_db::COL_LIGHT_CHAIN, - verify_full: true, - check_seal: cmd.check_seal, - no_hardcoded_sync: true, - }; + let mut config = LightClientConfig { + queue: Default::default(), + chain_column: ethcore_db::COL_LIGHT_CHAIN, + verify_full: true, + check_seal: cmd.check_seal, + no_hardcoded_sync: true, + }; - config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024; - config.queue.verifier_settings = cmd.verifier_settings; + config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024; + config.queue.verifier_settings = cmd.verifier_settings; - // initialize database. - let db = db::open_db(&client_path.to_str().expect("DB path could not be converted to string."), - &cmd.cache_config, - &cmd.compaction).map_err(|e| format!("Failed to open database: {:?}", e))?; + // initialize database. + let db = db::open_db( + &client_path + .to_str() + .expect("DB path could not be converted to string."), + &cmd.cache_config, + &cmd.compaction, + ) + .map_err(|e| format!("Failed to open database: {:?}", e))?; - // TODO: could epoch signals be available at the end of the file? - let fetch = ::light::client::fetch::unavailable(); - let service = LightClientService::start(config, &spec, fetch, db, cache) - .map_err(|e| format!("Failed to start client: {}", e))?; + // TODO: could epoch signals be available at the end of the file? + let fetch = ::light::client::fetch::unavailable(); + let service = LightClientService::start(config, &spec, fetch, db, cache) + .map_err(|e| format!("Failed to start client: {}", e))?; - // free up the spec in memory. - drop(spec); + // free up the spec in memory. + drop(spec); - let client = service.client(); + let client = service.client(); - let mut instream: Box = match cmd.file_path { - Some(f) => Box::new(fs::File::open(&f).map_err(|_| format!("Cannot open given file: {}", f))?), - None => Box::new(io::stdin()), - }; + let mut instream: Box = match cmd.file_path { + Some(f) => { + Box::new(fs::File::open(&f).map_err(|_| format!("Cannot open given file: {}", f))?) + } + None => Box::new(io::stdin()), + }; - const READAHEAD_BYTES: usize = 8; + const READAHEAD_BYTES: usize = 8; - let mut first_bytes: Vec = vec![0; READAHEAD_BYTES]; - let mut first_read = 0; + let mut first_bytes: Vec = vec![0; READAHEAD_BYTES]; + let mut first_read = 0; - let format = match cmd.format { - Some(format) => format, - None => { - first_read = instream.read(&mut first_bytes).map_err(|_| "Error reading from the file/stream.")?; - match first_bytes[0] { - 0xf9 => DataFormat::Binary, - _ => DataFormat::Hex, - } - } - }; + let format = match cmd.format { + Some(format) => format, + None => { + first_read = instream + .read(&mut first_bytes) + .map_err(|_| "Error reading from the file/stream.")?; + match first_bytes[0] { + 0xf9 => DataFormat::Binary, + _ => DataFormat::Hex, + } + } + }; - let do_import = |bytes: Vec| { - while client.queue_info().is_full() { sleep(Duration::from_secs(1)); } + let do_import = |bytes: Vec| { + while client.queue_info().is_full() { + sleep(Duration::from_secs(1)); + } - let header: ::types::header::Header = ::rlp::Rlp::new(&bytes).val_at(0) - .map_err(|e| format!("Bad block: {}", e))?; + let header: ::types::header::Header = ::rlp::Rlp::new(&bytes) + .val_at(0) + .map_err(|e| format!("Bad block: {}", e))?; - if client.best_block_header().number() >= header.number() { return Ok(()) } + if client.best_block_header().number() >= header.number() { + return Ok(()); + } - if header.number() % 10000 == 0 { - info!("#{}", header.number()); - } + if header.number() % 10000 == 0 { + info!("#{}", header.number()); + } - match client.import_header(header) { - Err(EthcoreError(EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain), _)) => { - trace!("Skipping block already in chain."); - } - Err(e) => { - return Err(format!("Cannot import block: {:?}", e)); - }, - Ok(_) => {}, - } - Ok(()) - }; + match client.import_header(header) { + Err(EthcoreError(EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain), _)) => { + trace!("Skipping block already in chain."); + } + Err(e) => { + return Err(format!("Cannot import block: {:?}", e)); + } + Ok(_) => {} + } + Ok(()) + }; - match format { - DataFormat::Binary => { - loop { - let mut bytes = if first_read > 0 {first_bytes.clone()} else {vec![0; READAHEAD_BYTES]}; - let n = if first_read > 0 { - first_read - } else { - instream.read(&mut bytes).map_err(|_| "Error reading from the file/stream.")? - }; - if n == 0 { break; } - first_read = 0; - let s = PayloadInfo::from(&bytes).map_err(|e| format!("Invalid RLP in the file/stream: {:?}", e))?.total(); - bytes.resize(s, 0); - instream.read_exact(&mut bytes[n..]).map_err(|_| "Error reading from the file/stream.")?; - do_import(bytes)?; - } - } - DataFormat::Hex => { - for line in BufReader::new(instream).lines() { - let s = line.map_err(|_| "Error reading from the file/stream.")?; - let s = if first_read > 0 {from_utf8(&first_bytes).unwrap().to_owned() + &(s[..])} else {s}; - first_read = 0; - let bytes = s.from_hex().map_err(|_| "Invalid hex in file/stream.")?; - do_import(bytes)?; - } - } - } - client.flush_queue(); + match format { + DataFormat::Binary => loop { + let mut bytes = if first_read > 0 { + first_bytes.clone() + } else { + vec![0; READAHEAD_BYTES] + }; + let n = if first_read > 0 { + first_read + } else { + instream + .read(&mut bytes) + .map_err(|_| "Error reading from the file/stream.")? + }; + if n == 0 { + break; + } + first_read = 0; + let s = PayloadInfo::from(&bytes) + .map_err(|e| format!("Invalid RLP in the file/stream: {:?}", e))? + .total(); + bytes.resize(s, 0); + instream + .read_exact(&mut bytes[n..]) + .map_err(|_| "Error reading from the file/stream.")?; + do_import(bytes)?; + }, + DataFormat::Hex => { + for line in BufReader::new(instream).lines() { + let s = line.map_err(|_| "Error reading from the file/stream.")?; + let s = if first_read > 0 { + from_utf8(&first_bytes).unwrap().to_owned() + &(s[..]) + } else { + s + }; + first_read = 0; + let bytes = s.from_hex().map_err(|_| "Invalid hex in file/stream.")?; + do_import(bytes)?; + } + } + } + client.flush_queue(); - let ms = timer.elapsed().as_milliseconds(); - let report = client.report(); + let ms = timer.elapsed().as_milliseconds(); + let report = client.report(); - info!("Import completed in {} seconds, {} headers, {} hdr/s", - ms / 1000, - report.blocks_imported, - (report.blocks_imported * 1000) as u64 / ms, - ); + info!( + "Import completed in {} seconds, {} headers, {} hdr/s", + ms / 1000, + report.blocks_imported, + (report.blocks_imported * 1000) as u64 / ms, + ); - Ok(()) + Ok(()) } fn execute_import(cmd: ImportBlockchain) -> Result<(), String> { - let timer = Instant::now(); + let timer = Instant::now(); - // load spec file - let spec = cmd.spec.spec(&cmd.dirs.cache)?; + // load spec file + let spec = cmd.spec.spec(&cmd.dirs.cache)?; - // load genesis hash - let genesis_hash = spec.genesis_header().hash(); + // load genesis hash + let genesis_hash = spec.genesis_header().hash(); - // database paths - let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir.clone()); + // database paths + let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir.clone()); - // user defaults path - let user_defaults_path = db_dirs.user_defaults_path(); + // user defaults path + let user_defaults_path = db_dirs.user_defaults_path(); - // load user defaults - let mut user_defaults = UserDefaults::load(&user_defaults_path)?; + // load user defaults + let mut user_defaults = UserDefaults::load(&user_defaults_path)?; - // select pruning algorithm - let algorithm = cmd.pruning.to_algorithm(&user_defaults); + // select pruning algorithm + let algorithm = cmd.pruning.to_algorithm(&user_defaults); - // check if tracing is on - let tracing = tracing_switch_to_bool(cmd.tracing, &user_defaults)?; + // check if tracing is on + let tracing = tracing_switch_to_bool(cmd.tracing, &user_defaults)?; - // check if fatdb is on - let fat_db = fatdb_switch_to_bool(cmd.fat_db, &user_defaults, algorithm)?; + // check if fatdb is on + let fat_db = fatdb_switch_to_bool(cmd.fat_db, &user_defaults, algorithm)?; - // prepare client and snapshot paths. - let client_path = db_dirs.client_path(algorithm); - let snapshot_path = db_dirs.snapshot_path(); + // prepare client and snapshot paths. + let client_path = db_dirs.client_path(algorithm); + let snapshot_path = db_dirs.snapshot_path(); - // execute upgrades - execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?; + // execute upgrades + execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?; - // create dirs used by parity - cmd.dirs.create_dirs(false, false)?; + // create dirs used by parity + cmd.dirs.create_dirs(false, false)?; - // prepare client config - let mut client_config = to_client_config( - &cmd.cache_config, - spec.name.to_lowercase(), - Mode::Active, - tracing, - fat_db, - cmd.compaction, - cmd.vm_type, - "".into(), - algorithm, - cmd.pruning_history, - cmd.pruning_memory, - cmd.check_seal, - 12, - ); + // prepare client config + let mut client_config = to_client_config( + &cmd.cache_config, + spec.name.to_lowercase(), + Mode::Active, + tracing, + fat_db, + cmd.compaction, + cmd.vm_type, + "".into(), + algorithm, + cmd.pruning_history, + cmd.pruning_memory, + cmd.check_seal, + 12, + ); - client_config.queue.verifier_settings = cmd.verifier_settings; + client_config.queue.verifier_settings = cmd.verifier_settings; - let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config); - let client_db = restoration_db_handler.open(&client_path) - .map_err(|e| format!("Failed to open database {:?}", e))?; + let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config); + let client_db = restoration_db_handler + .open(&client_path) + .map_err(|e| format!("Failed to open database {:?}", e))?; - // build client - let service = ClientService::start( - client_config, - &spec, - client_db, - &snapshot_path, - restoration_db_handler, - &cmd.dirs.ipc_path(), - // TODO [ToDr] don't use test miner here - // (actually don't require miner at all) - Arc::new(Miner::new_for_tests(&spec, None)), - Arc::new(ethcore_private_tx::DummySigner), - Box::new(ethcore_private_tx::NoopEncryptor), - Default::default(), - Default::default(), - ).map_err(|e| format!("Client service error: {:?}", e))?; + // build client + let service = ClientService::start( + client_config, + &spec, + client_db, + &snapshot_path, + restoration_db_handler, + &cmd.dirs.ipc_path(), + // TODO [ToDr] don't use test miner here + // (actually don't require miner at all) + Arc::new(Miner::new_for_tests(&spec, None)), + Arc::new(ethcore_private_tx::DummySigner), + Box::new(ethcore_private_tx::NoopEncryptor), + Default::default(), + Default::default(), + ) + .map_err(|e| format!("Client service error: {:?}", e))?; - // free up the spec in memory. - drop(spec); + // free up the spec in memory. + drop(spec); - let client = service.client(); + let client = service.client(); - let instream: Box = match cmd.file_path { - Some(f) => Box::new(fs::File::open(&f).map_err(|_| format!("Cannot open given file: {}", f))?), - None => Box::new(io::stdin()), - }; + let instream: Box = match cmd.file_path { + Some(f) => { + Box::new(fs::File::open(&f).map_err(|_| format!("Cannot open given file: {}", f))?) + } + None => Box::new(io::stdin()), + }; - let informant = Arc::new(Informant::new( - FullNodeInformantData { - client: client.clone(), - sync: None, - net: None, - }, - None, - None, - cmd.with_color, - )); + let informant = Arc::new(Informant::new( + FullNodeInformantData { + client: client.clone(), + sync: None, + net: None, + }, + None, + None, + cmd.with_color, + )); - service.register_io_handler(informant).map_err(|_| "Unable to register informant handler".to_owned())?; + service + .register_io_handler(informant) + .map_err(|_| "Unable to register informant handler".to_owned())?; - client.import_blocks(instream, cmd.format)?; + client.import_blocks(instream, cmd.format)?; - // save user defaults - user_defaults.pruning = algorithm; - user_defaults.tracing = tracing; - user_defaults.fat_db = fat_db; - user_defaults.save(&user_defaults_path)?; + // save user defaults + user_defaults.pruning = algorithm; + user_defaults.tracing = tracing; + user_defaults.fat_db = fat_db; + user_defaults.save(&user_defaults_path)?; - let report = client.report(); + let report = client.report(); - let ms = timer.elapsed().as_milliseconds(); - info!("Import completed in {} seconds, {} blocks, {} blk/s, {} transactions, {} tx/s, {} Mgas, {} Mgas/s", + let ms = timer.elapsed().as_milliseconds(); + info!("Import completed in {} seconds, {} blocks, {} blk/s, {} transactions, {} tx/s, {} Mgas, {} Mgas/s", ms / 1000, report.blocks_imported, (report.blocks_imported * 1000) as u64 / ms, @@ -422,260 +464,294 @@ fn execute_import(cmd: ImportBlockchain) -> Result<(), String> { report.gas_processed / 1_000_000, (report.gas_processed / (ms * 1000)).low_u64(), ); - Ok(()) + Ok(()) } fn start_client( - dirs: Directories, - spec: SpecType, - pruning: Pruning, - pruning_history: u64, - pruning_memory: usize, - tracing: Switch, - fat_db: Switch, - compaction: DatabaseCompactionProfile, - cache_config: CacheConfig, - require_fat_db: bool, - max_round_blocks_to_import: usize, + dirs: Directories, + spec: SpecType, + pruning: Pruning, + pruning_history: u64, + pruning_memory: usize, + tracing: Switch, + fat_db: Switch, + compaction: DatabaseCompactionProfile, + cache_config: CacheConfig, + require_fat_db: bool, + max_round_blocks_to_import: usize, ) -> Result { + // load spec file + let spec = spec.spec(&dirs.cache)?; - // load spec file - let spec = spec.spec(&dirs.cache)?; + // load genesis hash + let genesis_hash = spec.genesis_header().hash(); - // load genesis hash - let genesis_hash = spec.genesis_header().hash(); + // database paths + let db_dirs = dirs.database(genesis_hash, None, spec.data_dir.clone()); - // database paths - let db_dirs = dirs.database(genesis_hash, None, spec.data_dir.clone()); + // user defaults path + let user_defaults_path = db_dirs.user_defaults_path(); - // user defaults path - let user_defaults_path = db_dirs.user_defaults_path(); + // load user defaults + let user_defaults = UserDefaults::load(&user_defaults_path)?; - // load user defaults - let user_defaults = UserDefaults::load(&user_defaults_path)?; + // select pruning algorithm + let algorithm = pruning.to_algorithm(&user_defaults); - // select pruning algorithm - let algorithm = pruning.to_algorithm(&user_defaults); + // check if tracing is on + let tracing = tracing_switch_to_bool(tracing, &user_defaults)?; - // check if tracing is on - let tracing = tracing_switch_to_bool(tracing, &user_defaults)?; + // check if fatdb is on + let fat_db = fatdb_switch_to_bool(fat_db, &user_defaults, algorithm)?; + if !fat_db && require_fat_db { + return Err("This command requires Parity to be synced with --fat-db on.".to_owned()); + } - // check if fatdb is on - let fat_db = fatdb_switch_to_bool(fat_db, &user_defaults, algorithm)?; - if !fat_db && require_fat_db { - return Err("This command requires Parity to be synced with --fat-db on.".to_owned()); - } + // prepare client and snapshot paths. + let client_path = db_dirs.client_path(algorithm); + let snapshot_path = db_dirs.snapshot_path(); - // prepare client and snapshot paths. - let client_path = db_dirs.client_path(algorithm); - let snapshot_path = db_dirs.snapshot_path(); + // execute upgrades + execute_upgrades(&dirs.base, &db_dirs, algorithm, &compaction)?; - // execute upgrades - execute_upgrades(&dirs.base, &db_dirs, algorithm, &compaction)?; + // create dirs used by parity + dirs.create_dirs(false, false)?; - // create dirs used by parity - dirs.create_dirs(false, false)?; + // prepare client config + let client_config = to_client_config( + &cache_config, + spec.name.to_lowercase(), + Mode::Active, + tracing, + fat_db, + compaction, + VMType::default(), + "".into(), + algorithm, + pruning_history, + pruning_memory, + true, + max_round_blocks_to_import, + ); - // prepare client config - let client_config = to_client_config( - &cache_config, - spec.name.to_lowercase(), - Mode::Active, - tracing, - fat_db, - compaction, - VMType::default(), - "".into(), - algorithm, - pruning_history, - pruning_memory, - true, - max_round_blocks_to_import, - ); + let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config); + let client_db = restoration_db_handler + .open(&client_path) + .map_err(|e| format!("Failed to open database {:?}", e))?; - let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config); - let client_db = restoration_db_handler.open(&client_path) - .map_err(|e| format!("Failed to open database {:?}", e))?; + let service = ClientService::start( + client_config, + &spec, + client_db, + &snapshot_path, + restoration_db_handler, + &dirs.ipc_path(), + // It's fine to use test version here, + // since we don't care about miner parameters at all + Arc::new(Miner::new_for_tests(&spec, None)), + Arc::new(ethcore_private_tx::DummySigner), + Box::new(ethcore_private_tx::NoopEncryptor), + Default::default(), + Default::default(), + ) + .map_err(|e| format!("Client service error: {:?}", e))?; - let service = ClientService::start( - client_config, - &spec, - client_db, - &snapshot_path, - restoration_db_handler, - &dirs.ipc_path(), - // It's fine to use test version here, - // since we don't care about miner parameters at all - Arc::new(Miner::new_for_tests(&spec, None)), - Arc::new(ethcore_private_tx::DummySigner), - Box::new(ethcore_private_tx::NoopEncryptor), - Default::default(), - Default::default(), - ).map_err(|e| format!("Client service error: {:?}", e))?; - - drop(spec); - Ok(service) + drop(spec); + Ok(service) } fn execute_export(cmd: ExportBlockchain) -> Result<(), String> { - let service = start_client( - cmd.dirs, - cmd.spec, - cmd.pruning, - cmd.pruning_history, - cmd.pruning_memory, - cmd.tracing, - cmd.fat_db, - cmd.compaction, - cmd.cache_config, - false, - cmd.max_round_blocks_to_import, - )?; - let client = service.client(); + let service = start_client( + cmd.dirs, + cmd.spec, + cmd.pruning, + cmd.pruning_history, + cmd.pruning_memory, + cmd.tracing, + cmd.fat_db, + cmd.compaction, + cmd.cache_config, + false, + cmd.max_round_blocks_to_import, + )?; + let client = service.client(); - let out: Box = match cmd.file_path { - Some(f) => Box::new(fs::File::create(&f).map_err(|_| format!("Cannot write to file given: {}", f))?), - None => Box::new(io::stdout()), - }; + let out: Box = match cmd.file_path { + Some(f) => Box::new( + fs::File::create(&f).map_err(|_| format!("Cannot write to file given: {}", f))?, + ), + None => Box::new(io::stdout()), + }; - client.export_blocks(out, cmd.from_block, cmd.to_block, cmd.format)?; + client.export_blocks(out, cmd.from_block, cmd.to_block, cmd.format)?; - info!("Export completed."); - Ok(()) + info!("Export completed."); + Ok(()) } fn execute_export_state(cmd: ExportState) -> Result<(), String> { - let service = start_client( - cmd.dirs, - cmd.spec, - cmd.pruning, - cmd.pruning_history, - cmd.pruning_memory, - cmd.tracing, - cmd.fat_db, - cmd.compaction, - cmd.cache_config, - true, - cmd.max_round_blocks_to_import, - )?; + let service = start_client( + cmd.dirs, + cmd.spec, + cmd.pruning, + cmd.pruning_history, + cmd.pruning_memory, + cmd.tracing, + cmd.fat_db, + cmd.compaction, + cmd.cache_config, + true, + cmd.max_round_blocks_to_import, + )?; - let client = service.client(); + let client = service.client(); - let mut out: Box = match cmd.file_path { - Some(f) => Box::new(fs::File::create(&f).map_err(|_| format!("Cannot write to file given: {}", f))?), - None => Box::new(io::stdout()), - }; + let mut out: Box = match cmd.file_path { + Some(f) => Box::new( + fs::File::create(&f).map_err(|_| format!("Cannot write to file given: {}", f))?, + ), + None => Box::new(io::stdout()), + }; - let mut last: Option
= None; - let at = cmd.at; - let mut i = 0usize; + let mut last: Option
= None; + let at = cmd.at; + let mut i = 0usize; - out.write_fmt(format_args!("{{ \"state\": {{", )).expect("Couldn't write to stream."); - loop { - let accounts = client.list_accounts(at, last.as_ref(), 1000).ok_or("Specified block not found")?; - if accounts.is_empty() { - break; - } + out.write_fmt(format_args!("{{ \"state\": {{",)) + .expect("Couldn't write to stream."); + loop { + let accounts = client + .list_accounts(at, last.as_ref(), 1000) + .ok_or("Specified block not found")?; + if accounts.is_empty() { + break; + } - for account in accounts.into_iter() { - let balance = client.balance(&account, at.into()).unwrap_or_else(U256::zero); - if cmd.min_balance.map_or(false, |m| balance < m) || cmd.max_balance.map_or(false, |m| balance > m) { - last = Some(account); - continue; //filtered out - } + for account in accounts.into_iter() { + let balance = client + .balance(&account, at.into()) + .unwrap_or_else(U256::zero); + if cmd.min_balance.map_or(false, |m| balance < m) + || cmd.max_balance.map_or(false, |m| balance > m) + { + last = Some(account); + continue; //filtered out + } - if i != 0 { - out.write(b",").expect("Write error"); - } - out.write_fmt(format_args!("\n\"0x{:x}\": {{\"balance\": \"{:x}\", \"nonce\": \"{:x}\"", account, balance, client.nonce(&account, at).unwrap_or_else(U256::zero))).expect("Write error"); - let code = client.code(&account, at.into()).unwrap_or(None).unwrap_or_else(Vec::new); - if !code.is_empty() { - out.write_fmt(format_args!(", \"code_hash\": \"0x{:x}\"", keccak(&code))).expect("Write error"); - if cmd.code { - out.write_fmt(format_args!(", \"code\": \"{}\"", code.to_hex())).expect("Write error"); - } - } - let storage_root = client.storage_root(&account, at).unwrap_or(KECCAK_NULL_RLP); - if storage_root != KECCAK_NULL_RLP { - out.write_fmt(format_args!(", \"storage_root\": \"0x{:x}\"", storage_root)).expect("Write error"); - if cmd.storage { - out.write_fmt(format_args!(", \"storage\": {{")).expect("Write error"); - let mut last_storage: Option = None; - loop { - let keys = client.list_storage(at, &account, last_storage.as_ref(), 1000).ok_or("Specified block not found")?; - if keys.is_empty() { - break; - } + if i != 0 { + out.write(b",").expect("Write error"); + } + out.write_fmt(format_args!( + "\n\"0x{:x}\": {{\"balance\": \"{:x}\", \"nonce\": \"{:x}\"", + account, + balance, + client.nonce(&account, at).unwrap_or_else(U256::zero) + )) + .expect("Write error"); + let code = client + .code(&account, at.into()) + .unwrap_or(None) + .unwrap_or_else(Vec::new); + if !code.is_empty() { + out.write_fmt(format_args!(", \"code_hash\": \"0x{:x}\"", keccak(&code))) + .expect("Write error"); + if cmd.code { + out.write_fmt(format_args!(", \"code\": \"{}\"", code.to_hex())) + .expect("Write error"); + } + } + let storage_root = client.storage_root(&account, at).unwrap_or(KECCAK_NULL_RLP); + if storage_root != KECCAK_NULL_RLP { + out.write_fmt(format_args!(", \"storage_root\": \"0x{:x}\"", storage_root)) + .expect("Write error"); + if cmd.storage { + out.write_fmt(format_args!(", \"storage\": {{")) + .expect("Write error"); + let mut last_storage: Option = None; + loop { + let keys = client + .list_storage(at, &account, last_storage.as_ref(), 1000) + .ok_or("Specified block not found")?; + if keys.is_empty() { + break; + } - for key in keys.into_iter() { - if last_storage.is_some() { - out.write(b",").expect("Write error"); - } - out.write_fmt(format_args!("\n\t\"0x{:x}\": \"0x{:x}\"", key, client.storage_at(&account, &key, at.into()).unwrap_or_else(Default::default))).expect("Write error"); - last_storage = Some(key); - } - } - out.write(b"\n}").expect("Write error"); - } - } - out.write(b"}").expect("Write error"); - i += 1; - if i % 10000 == 0 { - info!("Account #{}", i); - } - last = Some(account); - } - } - out.write_fmt(format_args!("\n}}}}")).expect("Write error"); - info!("Export completed."); - Ok(()) + for key in keys.into_iter() { + if last_storage.is_some() { + out.write(b",").expect("Write error"); + } + out.write_fmt(format_args!( + "\n\t\"0x{:x}\": \"0x{:x}\"", + key, + client + .storage_at(&account, &key, at.into()) + .unwrap_or_else(Default::default) + )) + .expect("Write error"); + last_storage = Some(key); + } + } + out.write(b"\n}").expect("Write error"); + } + } + out.write(b"}").expect("Write error"); + i += 1; + if i % 10000 == 0 { + info!("Account #{}", i); + } + last = Some(account); + } + } + out.write_fmt(format_args!("\n}}}}")).expect("Write error"); + info!("Export completed."); + Ok(()) } fn execute_reset(cmd: ResetBlockchain) -> Result<(), String> { - let service = start_client( - cmd.dirs, - cmd.spec, - cmd.pruning, - cmd.pruning_history, - cmd.pruning_memory, - cmd.tracing, - cmd.fat_db, - cmd.compaction, - cmd.cache_config, - false, - 0, - )?; + let service = start_client( + cmd.dirs, + cmd.spec, + cmd.pruning, + cmd.pruning_history, + cmd.pruning_memory, + cmd.tracing, + cmd.fat_db, + cmd.compaction, + cmd.cache_config, + false, + 0, + )?; - let client = service.client(); - client.reset(cmd.num)?; - info!("{}", Colour::Green.bold().paint("Successfully reset db!")); + let client = service.client(); + client.reset(cmd.num)?; + info!("{}", Colour::Green.bold().paint("Successfully reset db!")); - Ok(()) + Ok(()) } pub fn kill_db(cmd: KillBlockchain) -> Result<(), String> { - let spec = cmd.spec.spec(&cmd.dirs.cache)?; - let genesis_hash = spec.genesis_header().hash(); - let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir); - let user_defaults_path = db_dirs.user_defaults_path(); - let mut user_defaults = UserDefaults::load(&user_defaults_path)?; - let algorithm = cmd.pruning.to_algorithm(&user_defaults); - let dir = db_dirs.db_path(algorithm); - fs::remove_dir_all(&dir).map_err(|e| format!("Error removing database: {:?}", e))?; - user_defaults.is_first_launch = true; - user_defaults.save(&user_defaults_path)?; - info!("Database deleted."); - Ok(()) + let spec = cmd.spec.spec(&cmd.dirs.cache)?; + let genesis_hash = spec.genesis_header().hash(); + let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir); + let user_defaults_path = db_dirs.user_defaults_path(); + let mut user_defaults = UserDefaults::load(&user_defaults_path)?; + let algorithm = cmd.pruning.to_algorithm(&user_defaults); + let dir = db_dirs.db_path(algorithm); + fs::remove_dir_all(&dir).map_err(|e| format!("Error removing database: {:?}", e))?; + user_defaults.is_first_launch = true; + user_defaults.save(&user_defaults_path)?; + info!("Database deleted."); + Ok(()) } #[cfg(test)] mod test { - use super::DataFormat; + use super::DataFormat; - #[test] - fn test_data_format_parsing() { - assert_eq!(DataFormat::Binary, "binary".parse().unwrap()); - assert_eq!(DataFormat::Binary, "bin".parse().unwrap()); - assert_eq!(DataFormat::Hex, "hex".parse().unwrap()); - } + #[test] + fn test_data_format_parsing() { + assert_eq!(DataFormat::Binary, "binary".parse().unwrap()); + assert_eq!(DataFormat::Binary, "bin".parse().unwrap()); + assert_eq!(DataFormat::Hex, "hex".parse().unwrap()); + } } diff --git a/parity/cache.rs b/parity/cache.rs index d6487221b..bef13fec6 100644 --- a/parity/cache.rs +++ b/parity/cache.rs @@ -29,110 +29,114 @@ const DEFAULT_STATE_CACHE_SIZE: u32 = 25; /// All values are represented in MB. #[derive(Debug, PartialEq)] pub struct CacheConfig { - /// Size of rocksDB cache. Almost all goes to the state column. - db: u32, - /// Size of blockchain cache. - blockchain: u32, - /// Size of transaction queue cache. - queue: u32, - /// Size of traces cache. - traces: u32, - /// Size of the state cache. - state: u32, + /// Size of rocksDB cache. Almost all goes to the state column. + db: u32, + /// Size of blockchain cache. + blockchain: u32, + /// Size of transaction queue cache. + queue: u32, + /// Size of traces cache. + traces: u32, + /// Size of the state cache. + state: u32, } impl Default for CacheConfig { - fn default() -> Self { - CacheConfig::new( - DEFAULT_DB_CACHE_SIZE, - DEFAULT_BC_CACHE_SIZE, - DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, - DEFAULT_STATE_CACHE_SIZE) - } + fn default() -> Self { + CacheConfig::new( + DEFAULT_DB_CACHE_SIZE, + DEFAULT_BC_CACHE_SIZE, + DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, + DEFAULT_STATE_CACHE_SIZE, + ) + } } impl CacheConfig { - /// Creates new cache config with cumulative size equal `total`. - pub fn new_with_total_cache_size(total: u32) -> Self { - CacheConfig { - db: total * 7 / 10, - blockchain: total / 10, - queue: DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, - traces: DEFAULT_TRACE_CACHE_SIZE, - state: total * 2 / 10, - } - } + /// Creates new cache config with cumulative size equal `total`. + pub fn new_with_total_cache_size(total: u32) -> Self { + CacheConfig { + db: total * 7 / 10, + blockchain: total / 10, + queue: DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, + traces: DEFAULT_TRACE_CACHE_SIZE, + state: total * 2 / 10, + } + } - /// Creates new cache config with gitven details. - pub fn new(db: u32, blockchain: u32, queue: u32, state: u32) -> Self { - CacheConfig { - db: db, - blockchain: blockchain, - queue: queue, - traces: DEFAULT_TRACE_CACHE_SIZE, - state: state, - } - } + /// Creates new cache config with gitven details. + pub fn new(db: u32, blockchain: u32, queue: u32, state: u32) -> Self { + CacheConfig { + db: db, + blockchain: blockchain, + queue: queue, + traces: DEFAULT_TRACE_CACHE_SIZE, + state: state, + } + } - /// Size of db cache. - pub fn db_cache_size(&self) -> u32 { - max(MIN_DB_CACHE_MB, self.db) - } + /// Size of db cache. + pub fn db_cache_size(&self) -> u32 { + max(MIN_DB_CACHE_MB, self.db) + } - /// Size of block queue size limit - pub fn queue(&self) -> u32 { - max(self.queue, MIN_BLOCK_QUEUE_SIZE_LIMIT_MB) - } + /// Size of block queue size limit + pub fn queue(&self) -> u32 { + max(self.queue, MIN_BLOCK_QUEUE_SIZE_LIMIT_MB) + } - /// Size of the blockchain cache. - pub fn blockchain(&self) -> u32 { - max(self.blockchain, MIN_BC_CACHE_MB) - } + /// Size of the blockchain cache. + pub fn blockchain(&self) -> u32 { + max(self.blockchain, MIN_BC_CACHE_MB) + } - /// Size of the traces cache. - pub fn traces(&self) -> u32 { - self.traces - } + /// Size of the traces cache. + pub fn traces(&self) -> u32 { + self.traces + } - /// Size of the state cache. - pub fn state(&self) -> u32 { - self.state * 3 / 4 - } + /// Size of the state cache. + pub fn state(&self) -> u32 { + self.state * 3 / 4 + } - /// Size of the jump-tables cache. - pub fn jump_tables(&self) -> u32 { - self.state / 4 - } + /// Size of the jump-tables cache. + pub fn jump_tables(&self) -> u32 { + self.state / 4 + } } #[cfg(test)] mod tests { - use super::CacheConfig; + use super::CacheConfig; - #[test] - fn test_cache_config_constructor() { - let config = CacheConfig::new_with_total_cache_size(200); - assert_eq!(config.db, 140); - assert_eq!(config.blockchain(), 20); - assert_eq!(config.queue(), 40); - assert_eq!(config.state(), 30); - assert_eq!(config.jump_tables(), 10); - } + #[test] + fn test_cache_config_constructor() { + let config = CacheConfig::new_with_total_cache_size(200); + assert_eq!(config.db, 140); + assert_eq!(config.blockchain(), 20); + assert_eq!(config.queue(), 40); + assert_eq!(config.state(), 30); + assert_eq!(config.jump_tables(), 10); + } - #[test] - fn test_cache_config_db_cache_sizes() { - let config = CacheConfig::new_with_total_cache_size(400); - assert_eq!(config.db, 280); - assert_eq!(config.db_cache_size(), 280); - } + #[test] + fn test_cache_config_db_cache_sizes() { + let config = CacheConfig::new_with_total_cache_size(400); + assert_eq!(config.db, 280); + assert_eq!(config.db_cache_size(), 280); + } - #[test] - fn test_cache_config_default() { - assert_eq!(CacheConfig::default(), - CacheConfig::new( - super::DEFAULT_DB_CACHE_SIZE, - super::DEFAULT_BC_CACHE_SIZE, - super::DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, - super::DEFAULT_STATE_CACHE_SIZE)); - } + #[test] + fn test_cache_config_default() { + assert_eq!( + CacheConfig::default(), + CacheConfig::new( + super::DEFAULT_DB_CACHE_SIZE, + super::DEFAULT_BC_CACHE_SIZE, + super::DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, + super::DEFAULT_STATE_CACHE_SIZE + ) + ); + } } diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index 0e8c692cc..f7ecafe55 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -18,2137 +18,2183 @@ mod usage; mod presets; -use std::collections::HashSet; use super::helpers; +use std::collections::HashSet; usage! { - { - // CLI subcommands - // Subcommands must start with cmd_ and have '_' in place of '-' - // Sub-subcommands must start with the name of the subcommand - // Arguments must start with arg_ - // Flags must start with flag_ - - CMD cmd_daemon - { - "Use Parity as a daemon", - - ARG arg_daemon_pid_file: (Option) = None, - "", - "Path to the pid file", - } - - CMD cmd_account - { - "Manage accounts", - - CMD cmd_account_new { - "Create a new account (and its associated key) for the given --chain (default: mainnet)", - } - - CMD cmd_account_list { - "List existing accounts of the given --chain (default: mainnet)", - } - - CMD cmd_account_import - { - "Import accounts from JSON UTC keystore files to the specified --chain (default mainnet)", - - ARG arg_account_import_path : (Option>) = None, - "...", - "Path to the accounts", - } - } - - CMD cmd_wallet - { - "Manage wallet", - - CMD cmd_wallet_import - { - "Import wallet into the given --chain (default: mainnet)", - - ARG arg_wallet_import_path: (Option) = None, - "", - "Path to the wallet", - } - } - - CMD cmd_import - { - "Import blockchain data from a file to the given --chain database (default: mainnet)", - - ARG arg_import_format: (Option) = None, - "--format=[FORMAT]", - "Import in a given format. FORMAT must be either 'hex' or 'binary'. (default: auto)", - - ARG arg_import_file: (Option) = None, - "[FILE]", - "Path to the file to import from", - } - - CMD cmd_export - { - "Export blockchain", - - CMD cmd_export_blocks - { - "Export the blockchain blocks from the given --chain database (default: mainnet) into a file. This command requires the chain to be synced with --fat-db on.", - - ARG arg_export_blocks_format: (Option) = None, - "--format=[FORMAT]", - "Export in a given format. FORMAT must be either 'hex' or 'binary'. (default: binary)", - - ARG arg_export_blocks_from: (String) = "1", - "--from=[BLOCK]", - "Export from block BLOCK, which may be an index or hash.", - - ARG arg_export_blocks_to: (String) = "latest", - "--to=[BLOCK]", - "Export to (including) block BLOCK, which may be an index, hash or latest.", - - ARG arg_export_blocks_file: (Option) = None, - "[FILE]", - "Path to the exported file", - } - - CMD cmd_export_state - { - "Export the blockchain state from the given --chain (default: mainnet) into a file. This command requires the chain to be synced with --fat-db on.", - - FLAG flag_export_state_no_storage: (bool) = false, - "--no-storage", - "Don't export account storage.", - - FLAG flag_export_state_no_code: (bool) = false, - "--no-code", - "Don't export account code.", - - ARG arg_export_state_min_balance: (Option) = None, - "--min-balance=[WEI]", - "Don't export accounts with balance less than specified.", - - ARG arg_export_state_max_balance: (Option) = None, - "--max-balance=[WEI]", - "Don't export accounts with balance greater than specified.", - - ARG arg_export_state_at: (String) = "latest", - "--at=[BLOCK]", - "Take a snapshot at the given block, which may be an index, hash, or latest. Note that taking snapshots at non-recent blocks will only work with --pruning archive", - - ARG arg_export_state_format: (Option) = None, - "--format=[FORMAT]", - "Export in a given format. FORMAT must be either 'hex' or 'binary'. (default: binary)", - - ARG arg_export_state_file: (Option) = None, - "[FILE]", - "Path to the exported file", - } - } - - CMD cmd_signer - { - "Manage signer", - - CMD cmd_signer_new_token { - "Generate a new signer-authentication token for the given --chain (default: mainnet)", - } - - CMD cmd_signer_list { - "List the signer-authentication tokens from given --chain (default: mainnet)", - } - - CMD cmd_signer_sign - { - "Sign", - - ARG arg_signer_sign_id: (Option) = None, - "[ID]", - "ID", - } - - CMD cmd_signer_reject - { - "Reject", - - ARG arg_signer_reject_id: (Option) = None, - "", - "ID", - } - } - - CMD cmd_snapshot - { - "Make a snapshot of the database of the given --chain (default: mainnet)", - - ARG arg_snapshot_at: (String) = "latest", - "--at=[BLOCK]", - "Take a snapshot at the given block, which may be an index, hash, or latest. Note that taking snapshots at non-recent blocks will only work with --pruning archive", - - ARG arg_snapshot_file: (Option) = None, - "", - "Path to the file to export to", - } - - CMD cmd_restore - { - "Restore the database of the given --chain (default: mainnet) from a snapshot file", - - ARG arg_restore_file: (Option) = None, - "[FILE]", - "Path to the file to restore from", - } - - CMD cmd_tools - { - "Tools", - - CMD cmd_tools_hash - { - "Hash a file using the Keccak-256 algorithm", - - ARG arg_tools_hash_file: (Option) = None, - "", - "File", - } - } - - CMD cmd_db - { - "Manage the database representing the state of the blockchain on this system", - - CMD cmd_db_kill { - "Clean the database of the given --chain (default: mainnet)", - } - - CMD cmd_db_reset { - "Removes NUM latests blocks from the db", - - ARG arg_db_reset_num: (u32) = 10u32, - "", - "Number of blocks to revert", - } - - } - - CMD cmd_export_hardcoded_sync - { - "Print the hashed light clients headers of the given --chain (default: mainnet) in a JSON format. To be used as hardcoded headers in a genesis file.", - } - - // CMD removed in 2.0 - - CMD cmd_dapp - { - "Manage dapps", - - ARG arg_dapp_path: (Option) = None, - "", - "Path to the dapps", - } - } - { - // Global flags and arguments - ["Operating Options"] - FLAG flag_no_download: (bool) = false, or |c: &Config| c.parity.as_ref()?.no_download.clone(), - "--no-download", - "Normally new releases will be downloaded ready for updating. This disables it. Not recommended.", - - FLAG flag_no_consensus: (bool) = false, or |c: &Config| c.parity.as_ref()?.no_consensus.clone(), - "--no-consensus", - "Force the binary to run even if there are known issues regarding consensus. Not recommended.", - - FLAG flag_light: (bool) = false, or |c: &Config| c.parity.as_ref()?.light, - "--light", - "Experimental: run in light client mode. Light clients synchronize a bare minimum of data and fetch necessary data on-demand from the network. Much lower in storage, potentially higher in bandwidth. Has no effect with subcommands.", - - FLAG flag_no_hardcoded_sync: (bool) = false, or |c: &Config| c.parity.as_ref()?.no_hardcoded_sync, - "--no-hardcoded-sync", - "By default, if there is no existing database the light client will automatically jump to a block hardcoded in the chain's specifications. This disables this feature.", - - FLAG flag_force_direct: (bool) = false, or |_| None, - "--force-direct", - "Run the originally installed version of Parity, ignoring any updates that have since been installed.", - - ARG arg_mode: (String) = "last", or |c: &Config| c.parity.as_ref()?.mode.clone(), - "--mode=[MODE]", - "Set the operating mode. MODE can be one of: last - Uses the last-used mode, active if none; active - Parity continuously syncs the chain; passive - Parity syncs initially, then sleeps and wakes regularly to resync; dark - Parity syncs only when the JSON-RPC is active; offline - Parity doesn't sync.", - - ARG arg_mode_timeout: (u64) = 300u64, or |c: &Config| c.parity.as_ref()?.mode_timeout.clone(), - "--mode-timeout=[SECS]", - "Specify the number of seconds before inactivity timeout occurs when mode is dark or passive", - - ARG arg_mode_alarm: (u64) = 3600u64, or |c: &Config| c.parity.as_ref()?.mode_alarm.clone(), - "--mode-alarm=[SECS]", - "Specify the number of seconds before auto sleep reawake timeout occurs when mode is passive", - - ARG arg_auto_update: (String) = "critical", or |c: &Config| c.parity.as_ref()?.auto_update.clone(), - "--auto-update=[SET]", - "Set a releases set to automatically update and install. SET can be one of: all - All updates in the our release track; critical - Only consensus/security updates; none - No updates will be auto-installed.", - - ARG arg_auto_update_delay: (u16) = 100u16, or |c: &Config| c.parity.as_ref()?.auto_update_delay.clone(), - "--auto-update-delay=[NUM]", - "Specify the maximum number of blocks used for randomly delaying updates.", - - ARG arg_auto_update_check_frequency: (u16) = 20u16, or |c: &Config| c.parity.as_ref()?.auto_update_check_frequency.clone(), - "--auto-update-check-frequency=[NUM]", - "Specify the number of blocks between each auto-update check.", - - ARG arg_release_track: (String) = "current", or |c: &Config| c.parity.as_ref()?.release_track.clone(), - "--release-track=[TRACK]", - "Set which release track we should use for updates. TRACK can be one of: stable - Stable releases; beta - Beta releases; nightly - Nightly releases (unstable); testing - Testing releases (do not use); current - Whatever track this executable was released on.", - - ARG arg_chain: (String) = "foundation", or |c: &Config| c.parity.as_ref()?.chain.clone(), - "--chain=[CHAIN]", - "Specify the blockchain type. CHAIN may be either a JSON chain specification file or ethereum, classic, poacore, xdai, volta, ewc, musicoin, ellaism, mix, callisto, morden, mordor, ropsten, kovan, rinkeby, goerli, kotti, poasokol, testnet, or dev.", - - ARG arg_keys_path: (String) = "$BASE/keys", or |c: &Config| c.parity.as_ref()?.keys_path.clone(), - "--keys-path=[PATH]", - "Specify the path for JSON key files to be found", - - ARG arg_identity: (String) = "", or |c: &Config| c.parity.as_ref()?.identity.clone(), - "--identity=[NAME]", - "Specify your node's name.", - - ARG arg_base_path: (Option) = None, or |c: &Config| c.parity.as_ref()?.base_path.clone(), - "-d, --base-path=[PATH]", - "Specify the base data storage path.", - - ARG arg_db_path: (Option) = None, or |c: &Config| c.parity.as_ref()?.db_path.clone(), - "--db-path=[PATH]", - "Specify the database directory path", - - ["Convenience Options"] - FLAG flag_unsafe_expose: (bool) = false, or |c: &Config| c.misc.as_ref()?.unsafe_expose, - "--unsafe-expose", - "All servers will listen on external interfaces and will be remotely accessible. It's equivalent with setting the following: --[ws,jsonrpc,ipfs-api,secretstore,stratum,dapps,secretstore-http]-interface=all --*-hosts=all This option is UNSAFE and should be used with great care!", - - ARG arg_config: (String) = "$BASE/config.toml", or |_| None, - "-c, --config=[CONFIG]", - "Specify a configuration. CONFIG may be either a configuration file or a preset: dev, insecure, dev-insecure, mining, or non-standard-ports.", - - ARG arg_ports_shift: (u16) = 0u16, or |c: &Config| c.misc.as_ref()?.ports_shift, - "--ports-shift=[SHIFT]", - "Add SHIFT to all port numbers Parity is listening on. Includes network port and all servers (HTTP JSON-RPC, WebSockets JSON-RPC, IPFS, SecretStore).", - - ["Account Options"] - FLAG flag_no_hardware_wallets: (bool) = false, or |c: &Config| c.account.as_ref()?.disable_hardware.clone(), - "--no-hardware-wallets", - "Disables hardware wallet support.", - - FLAG flag_fast_unlock: (bool) = false, or |c: &Config| c.account.as_ref()?.fast_unlock.clone(), - "--fast-unlock", - "Use drastically faster unlocking mode. This setting causes raw secrets to be stored unprotected in memory, so use with care.", - - ARG arg_keys_iterations: (u32) = 10240u32, or |c: &Config| c.account.as_ref()?.keys_iterations.clone(), - "--keys-iterations=[NUM]", - "Specify the number of iterations to use when deriving key from the password (bigger is more secure)", - - ARG arg_accounts_refresh: (u64) = 5u64, or |c: &Config| c.account.as_ref()?.refresh_time.clone(), - "--accounts-refresh=[TIME]", - "Specify the cache time of accounts read from disk. If you manage thousands of accounts set this to 0 to disable refresh.", - - ARG arg_unlock: (Option) = None, or |c: &Config| c.account.as_ref()?.unlock.as_ref().map(|vec| vec.join(",")), - "--unlock=[ACCOUNTS]", - "Unlock ACCOUNTS for the duration of the execution. ACCOUNTS is a comma-delimited list of addresses.", - - ARG arg_password: (Vec) = Vec::new(), or |c: &Config| c.account.as_ref()?.password.clone(), - "--password=[FILE]...", - "Provide a file containing a password for unlocking an account. Leading and trailing whitespace is trimmed.", - - ["Private Transactions Options"] - FLAG flag_private_enabled: (bool) = false, or |c: &Config| c.private_tx.as_ref()?.enabled, - "--private-tx-enabled", - "Enable private transactions.", - - ARG arg_private_signer: (Option) = None, or |c: &Config| c.private_tx.as_ref()?.signer.clone(), - "--private-signer=[ACCOUNT]", - "Specify the account for signing public transaction created upon verified private transaction.", - - ARG arg_private_validators: (Option) = None, or |c: &Config| c.private_tx.as_ref()?.validators.as_ref().map(|vec| vec.join(",")), - "--private-validators=[ACCOUNTS]", - "Specify the accounts for validating private transactions. ACCOUNTS is a comma-delimited list of addresses.", - - ARG arg_private_account: (Option) = None, or |c: &Config| c.private_tx.as_ref()?.account.clone(), - "--private-account=[ACCOUNT]", - "Specify the account for signing requests to secret store.", - - ARG arg_private_sstore_url: (Option) = None, or |c: &Config| c.private_tx.as_ref()?.sstore_url.clone(), - "--private-sstore-url=[URL]", - "Specify secret store URL used for encrypting private transactions.", - - ARG arg_private_sstore_threshold: (Option) = None, or |c: &Config| c.private_tx.as_ref()?.sstore_threshold.clone(), - "--private-sstore-threshold=[NUM]", - "Specify secret store threshold used for encrypting private transactions.", - - ARG arg_private_passwords: (Option) = None, or |c: &Config| c.private_tx.as_ref()?.passwords.clone(), - "--private-passwords=[FILE]...", - "Provide a file containing passwords for unlocking accounts (signer, private account, validators).", - - ["UI Options"] - ARG arg_ui_path: (String) = "$BASE/signer", or |c: &Config| c.ui.as_ref()?.path.clone(), - "--ui-path=[PATH]", - "Specify directory where Trusted UIs tokens should be stored.", - - ["Networking Options"] - FLAG flag_no_warp: (bool) = false, or |c: &Config| c.network.as_ref()?.warp.clone().map(|w| !w), - "--no-warp", - "Disable syncing from the snapshot over the network.", - - FLAG flag_no_discovery: (bool) = false, or |c: &Config| c.network.as_ref()?.discovery.map(|d| !d).clone(), - "--no-discovery", - "Disable new peer discovery.", - - FLAG flag_reserved_only: (bool) = false, or |c: &Config| c.network.as_ref()?.reserved_only.clone(), - "--reserved-only", - "Connect only to reserved nodes.", - - FLAG flag_no_ancient_blocks: (bool) = false, or |_| None, - "--no-ancient-blocks", - "Disable downloading old blocks after snapshot restoration or warp sync. Not recommended.", - - FLAG flag_no_serve_light: (bool) = false, or |c: &Config| c.network.as_ref()?.no_serve_light.clone(), - "--no-serve-light", - "Disable serving of light peers.", - - ARG arg_warp_barrier: (Option) = None, or |c: &Config| c.network.as_ref()?.warp_barrier.clone(), - "--warp-barrier=[NUM]", - "When warp enabled never attempt regular sync before warping to block NUM.", - - ARG arg_port: (u16) = 30303u16, or |c: &Config| c.network.as_ref()?.port.clone(), - "--port=[PORT]", - "Override the port on which the node should listen.", - - ARG arg_interface: (String) = "all", or |c: &Config| c.network.as_ref()?.interface.clone(), - "--interface=[IP]", - "Network interfaces. Valid values are 'all', 'local' or the ip of the interface you want parity to listen to.", - - ARG arg_min_peers: (Option) = None, or |c: &Config| c.network.as_ref()?.min_peers.clone(), - "--min-peers=[NUM]", - "Try to maintain at least NUM peers.", - - ARG arg_max_peers: (Option) = None, or |c: &Config| c.network.as_ref()?.max_peers.clone(), - "--max-peers=[NUM]", - "Allow up to NUM peers.", - - ARG arg_snapshot_peers: (u16) = 0u16, or |c: &Config| c.network.as_ref()?.snapshot_peers.clone(), - "--snapshot-peers=[NUM]", - "Allow additional NUM peers for a snapshot sync.", - - ARG arg_nat: (String) = "any", or |c: &Config| c.network.as_ref()?.nat.clone(), - "--nat=[METHOD]", - "Specify method to use for determining public address. Must be one of: any, none, upnp, extip:.", - - ARG arg_allow_ips: (String) = "all", or |c: &Config| c.network.as_ref()?.allow_ips.clone(), - "--allow-ips=[FILTER]", - "Filter outbound connections. Must be one of: private - connect to private network IP addresses only; public - connect to public network IP addresses only; all - connect to any IP address.", - - ARG arg_max_pending_peers: (u16) = 64u16, or |c: &Config| c.network.as_ref()?.max_pending_peers.clone(), - "--max-pending-peers=[NUM]", - "Allow up to NUM pending connections.", - - ARG arg_network_id: (Option) = None, or |c: &Config| c.network.as_ref()?.id.clone(), - "--network-id=[INDEX]", - "Override the network identifier from the chain we are on.", - - ARG arg_bootnodes: (Option) = None, or |c: &Config| c.network.as_ref()?.bootnodes.as_ref().map(|vec| vec.join(",")), - "--bootnodes=[NODES]", - "Override the bootnodes from our chain. NODES should be comma-delimited enodes.", - - ARG arg_node_key: (Option) = None, or |c: &Config| c.network.as_ref()?.node_key.clone(), - "--node-key=[KEY]", - "Specify node secret key, either as 64-character hex string or input to SHA3 operation.", - - ARG arg_reserved_peers: (Option) = None, or |c: &Config| c.network.as_ref()?.reserved_peers.clone(), - "--reserved-peers=[FILE]", - "Provide a file containing enodes, one per line. These nodes will always have a reserved slot on top of the normal maximum peers.", - - CHECK |args: &Args| { - if let (Some(max_peers), Some(min_peers)) = (args.arg_max_peers, args.arg_min_peers) { - if min_peers > max_peers { - return Err(ArgsError::PeerConfiguration); - } - } - - Ok(()) - }, - - ["API and Console Options – HTTP JSON-RPC"] - FLAG flag_jsonrpc_allow_missing_blocks: (bool) = false, or |c: &Config| c.rpc.as_ref()?.allow_missing_blocks.clone(), - "--jsonrpc-allow-missing-blocks", - "RPC calls will return 'null' instead of an error if ancient block sync is still in progress and the block information requested could not be found", - - FLAG flag_no_jsonrpc: (bool) = false, or |c: &Config| c.rpc.as_ref()?.disable.clone(), - "--no-jsonrpc", - "Disable the HTTP JSON-RPC API server.", - - FLAG flag_jsonrpc_no_keep_alive: (bool) = false, or |c: &Config| c.rpc.as_ref()?.keep_alive, - "--jsonrpc-no-keep-alive", - "Disable HTTP/1.1 keep alive header. Disabling keep alive will prevent re-using the same TCP connection to fire multiple requests, recommended when using one request per connection.", - - FLAG flag_jsonrpc_experimental: (bool) = false, or |c: &Config| c.rpc.as_ref()?.experimental_rpcs.clone(), - "--jsonrpc-experimental", - "Enable experimental RPCs. Enable to have access to methods from unfinalised EIPs in all namespaces", - - ARG arg_jsonrpc_port: (u16) = 8545u16, or |c: &Config| c.rpc.as_ref()?.port.clone(), - "--jsonrpc-port=[PORT]", - "Specify the port portion of the HTTP JSON-RPC API server.", - - ARG arg_jsonrpc_interface: (String) = "local", or |c: &Config| c.rpc.as_ref()?.interface.clone(), - "--jsonrpc-interface=[IP]", - "Specify the hostname portion of the HTTP JSON-RPC API server, IP should be an interface's IP address, or all (all interfaces) or local.", - - ARG arg_jsonrpc_apis: (String) = "web3,eth,pubsub,net,parity,private,parity_pubsub,traces,rpc,shh,shh_pubsub", or |c: &Config| c.rpc.as_ref()?.apis.as_ref().map(|vec| vec.join(",")), - "--jsonrpc-apis=[APIS]", - "Specify the APIs available through the HTTP JSON-RPC interface using a comma-delimited list of API names. Possible names are: all, safe, debug, web3, net, eth, pubsub, personal, signer, parity, parity_pubsub, parity_accounts, parity_set, traces, rpc, secretstore, shh, shh_pubsub. You can also disable a specific API by putting '-' in the front, example: all,-personal. 'safe' enables the following APIs: web3, net, eth, pubsub, parity, parity_pubsub, traces, rpc, shh, shh_pubsub", - - ARG arg_jsonrpc_hosts: (String) = "none", or |c: &Config| c.rpc.as_ref()?.hosts.as_ref().map(|vec| vec.join(",")), - "--jsonrpc-hosts=[HOSTS]", - "List of allowed Host header values. This option will validate the Host header sent by the browser, it is additional security against some attack vectors. Special options: \"all\", \"none\",.", - - ARG arg_jsonrpc_threads: (usize) = 4usize, or |c: &Config| c.rpc.as_ref()?.processing_threads, - "--jsonrpc-threads=[THREADS]", - "Turn on additional processing threads for JSON-RPC servers (all transports). Setting this to a non-zero value allows parallel execution of cpu-heavy queries.", - - ARG arg_jsonrpc_cors: (String) = "none", or |c: &Config| c.rpc.as_ref()?.cors.as_ref().map(|vec| vec.join(",")), - "--jsonrpc-cors=[URL]", - "Specify CORS header for HTTP JSON-RPC API responses. Special options: \"all\", \"none\".", - - ARG arg_jsonrpc_server_threads: (Option) = None, or |c: &Config| c.rpc.as_ref()?.server_threads, - "--jsonrpc-server-threads=[NUM]", - "Enables multiple threads handling incoming connections for HTTP JSON-RPC server.", - - ARG arg_jsonrpc_max_payload: (Option) = None, or |c: &Config| c.rpc.as_ref()?.max_payload, - "--jsonrpc-max-payload=[MB]", - "Specify maximum size for HTTP JSON-RPC requests in megabytes.", - - ARG arg_poll_lifetime: (u32) = 60u32, or |c: &Config| c.rpc.as_ref()?.poll_lifetime.clone(), - "--poll-lifetime=[S]", - "Set the RPC filter lifetime to S seconds. The filter has to be polled at least every S seconds , otherwise it is removed.", - - ["API and Console Options – WebSockets"] - FLAG flag_no_ws: (bool) = false, or |c: &Config| c.websockets.as_ref()?.disable.clone(), - "--no-ws", - "Disable the WebSockets JSON-RPC server.", - - ARG arg_ws_port: (u16) = 8546u16, or |c: &Config| c.websockets.as_ref()?.port.clone(), - "--ws-port=[PORT]", - "Specify the port portion of the WebSockets JSON-RPC server.", - - ARG arg_ws_interface: (String) = "local", or |c: &Config| c.websockets.as_ref()?.interface.clone(), - "--ws-interface=[IP]", - "Specify the hostname portion of the WebSockets JSON-RPC server, IP should be an interface's IP address, or all (all interfaces) or local.", - - ARG arg_ws_apis: (String) = "web3,eth,pubsub,net,parity,parity_pubsub,private,traces,rpc,shh,shh_pubsub", or |c: &Config| c.websockets.as_ref()?.apis.as_ref().map(|vec| vec.join(",")), - "--ws-apis=[APIS]", - "Specify the JSON-RPC APIs available through the WebSockets interface using a comma-delimited list of API names. Possible names are: all, safe, web3, net, eth, pubsub, personal, signer, parity, parity_pubsub, parity_accounts, parity_set, traces, rpc, secretstore, shh, shh_pubsub. You can also disable a specific API by putting '-' in the front, example: all,-personal. 'safe' enables the following APIs: web3, net, eth, pubsub, parity, parity_pubsub, traces, rpc, shh, shh_pubsub", - - ARG arg_ws_origins: (String) = "parity://*,chrome-extension://*,moz-extension://*", or |c: &Config| c.websockets.as_ref()?.origins.as_ref().map(|vec| vec.join(",")), - "--ws-origins=[URL]", - "Specify Origin header values allowed to connect. Special options: \"all\", \"none\".", - - ARG arg_ws_hosts: (String) = "none", or |c: &Config| c.websockets.as_ref()?.hosts.as_ref().map(|vec| vec.join(",")), - "--ws-hosts=[HOSTS]", - "List of allowed Host header values. This option will validate the Host header sent by the browser, it is additional security against some attack vectors. Special options: \"all\", \"none\".", - - ARG arg_ws_max_connections: (usize) = 100usize, or |c: &Config| c.websockets.as_ref()?.max_connections, - "--ws-max-connections=[CONN]", - "Maximum number of allowed concurrent WebSockets JSON-RPC connections.", - - ["API and Console Options – IPC"] - FLAG flag_no_ipc: (bool) = false, or |c: &Config| c.ipc.as_ref()?.disable.clone(), - "--no-ipc", - "Disable JSON-RPC over IPC service.", - - ARG arg_ipc_path: (String) = if cfg!(windows) { r"\\.\pipe\jsonrpc.ipc" } else { "$BASE/jsonrpc.ipc" }, or |c: &Config| c.ipc.as_ref()?.path.clone(), - "--ipc-path=[PATH]", - "Specify custom path for JSON-RPC over IPC service.", - - ARG arg_ipc_apis: (String) = "web3,eth,pubsub,net,parity,parity_pubsub,parity_accounts,private,traces,rpc,shh,shh_pubsub", or |c: &Config| c.ipc.as_ref()?.apis.as_ref().map(|vec| vec.join(",")), - "--ipc-apis=[APIS]", - "Specify custom API set available via JSON-RPC over IPC using a comma-delimited list of API names. Possible names are: all, safe, web3, net, eth, pubsub, personal, signer, parity, parity_pubsub, parity_accounts, parity_set, traces, rpc, secretstore, shh, shh_pubsub. You can also disable a specific API by putting '-' in the front, example: all,-personal. 'safe' enables the following APIs: web3, net, eth, pubsub, parity, parity_pubsub, traces, rpc, shh, shh_pubsub", - - ["API and Console Options – IPFS"] - FLAG flag_ipfs_api: (bool) = false, or |c: &Config| c.ipfs.as_ref()?.enable.clone(), - "--ipfs-api", - "Enable IPFS-compatible HTTP API.", - - ARG arg_ipfs_api_port: (u16) = 5001u16, or |c: &Config| c.ipfs.as_ref()?.port.clone(), - "--ipfs-api-port=[PORT]", - "Configure on which port the IPFS HTTP API should listen.", - - ARG arg_ipfs_api_interface: (String) = "local", or |c: &Config| c.ipfs.as_ref()?.interface.clone(), - "--ipfs-api-interface=[IP]", - "Specify the hostname portion of the IPFS API server, IP should be an interface's IP address or local.", - - ARG arg_ipfs_api_hosts: (String) = "none", or |c: &Config| c.ipfs.as_ref()?.hosts.as_ref().map(|vec| vec.join(",")), - "--ipfs-api-hosts=[HOSTS]", - "List of allowed Host header values. This option will validate the Host header sent by the browser, it is additional security against some attack vectors. Special options: \"all\", \"none\".", - - ARG arg_ipfs_api_cors: (String) = "none", or |c: &Config| c.ipfs.as_ref()?.cors.as_ref().map(|vec| vec.join(",")), - "--ipfs-api-cors=[URL]", - "Specify CORS header for IPFS API responses. Special options: \"all\", \"none\".", - - ["Light Client Options"] - ARG arg_on_demand_response_time_window: (Option) = None, or |c: &Config| c.light.as_ref()?.on_demand_response_time_window, - "--on-demand-time-window=[S]", - "Specify the maximum time to wait for a successful response", - - ARG arg_on_demand_request_backoff_start: (Option) = None, or |c: &Config| c.light.as_ref()?.on_demand_request_backoff_start, - "--on-demand-start-backoff=[S]", - "Specify light client initial backoff time for a request", - - ARG arg_on_demand_request_backoff_max: (Option) = None, or |c: &Config| c.light.as_ref()?.on_demand_request_backoff_max, - "--on-demand-end-backoff=[S]", - "Specify light client maximum backoff time for a request", - - ARG arg_on_demand_request_backoff_rounds_max: (Option) = None, or |c: &Config| c.light.as_ref()?.on_demand_request_backoff_rounds_max, - "--on-demand-max-backoff-rounds=[TIMES]", - "Specify light client maximum number of backoff iterations for a request", - - ARG arg_on_demand_request_consecutive_failures: (Option) = None, or |c: &Config| c.light.as_ref()?.on_demand_request_consecutive_failures, - "--on-demand-consecutive-failures=[TIMES]", - "Specify light client the number of failures for a request until it gets exponentially backed off", - - ["Secret Store Options"] - FLAG flag_no_secretstore: (bool) = false, or |c: &Config| c.secretstore.as_ref()?.disable.clone(), - "--no-secretstore", - "Disable Secret Store functionality.", - - FLAG flag_no_secretstore_http: (bool) = false, or |c: &Config| c.secretstore.as_ref()?.disable_http.clone(), - "--no-secretstore-http", - "Disable Secret Store HTTP API.", - - FLAG flag_no_secretstore_auto_migrate: (bool) = false, or |c: &Config| c.secretstore.as_ref()?.disable_auto_migrate.clone(), - "--no-secretstore-auto-migrate", - "Do not run servers set change session automatically when servers set changes. This option has no effect when servers set is read from configuration file.", - - ARG arg_secretstore_acl_contract: (Option) = Some("registry".into()), or |c: &Config| c.secretstore.as_ref()?.acl_contract.clone(), - "--secretstore-acl-contract=[SOURCE]", - "Secret Store permissioning contract address source: none, registry (contract address is read from 'secretstore_acl_checker' entry in registry) or address.", - - ARG arg_secretstore_contract: (Option) = None, or |c: &Config| c.secretstore.as_ref()?.service_contract.clone(), - "--secretstore-contract=[SOURCE]", - "Secret Store Service contract address source: none, registry (contract address is read from 'secretstore_service' entry in registry) or address.", - - ARG arg_secretstore_srv_gen_contract: (Option) = None, or |c: &Config| c.secretstore.as_ref()?.service_contract_srv_gen.clone(), - "--secretstore-srv-gen-contract=[SOURCE]", - "Secret Store Service server key generation contract address source: none, registry (contract address is read from 'secretstore_service_srv_gen' entry in registry) or address.", - - ARG arg_secretstore_srv_retr_contract: (Option) = None, or |c: &Config| c.secretstore.as_ref()?.service_contract_srv_retr.clone(), - "--secretstore-srv-retr-contract=[SOURCE]", - "Secret Store Service server key retrieval contract address source: none, registry (contract address is read from 'secretstore_service_srv_retr' entry in registry) or address.", - - ARG arg_secretstore_doc_store_contract: (Option) = None, or |c: &Config| c.secretstore.as_ref()?.service_contract_doc_store.clone(), - "--secretstore-doc-store-contract=[SOURCE]", - "Secret Store Service document key store contract address source: none, registry (contract address is read from 'secretstore_service_doc_store' entry in registry) or address.", - - ARG arg_secretstore_doc_sretr_contract: (Option) = None, or |c: &Config| c.secretstore.as_ref()?.service_contract_doc_sretr.clone(), - "--secretstore-doc-sretr-contract=[SOURCE]", - "Secret Store Service document key shadow retrieval contract address source: none, registry (contract address is read from 'secretstore_service_doc_sretr' entry in registry) or address.", - - ARG arg_secretstore_nodes: (String) = "", or |c: &Config| c.secretstore.as_ref()?.nodes.as_ref().map(|vec| vec.join(",")), - "--secretstore-nodes=[NODES]", - "Comma-separated list of other secret store cluster nodes in form NODE_PUBLIC_KEY_IN_HEX@NODE_IP_ADDR:NODE_PORT.", - - ARG arg_secretstore_server_set_contract: (Option) = Some("registry".into()), or |c: &Config| c.secretstore.as_ref()?.server_set_contract.clone(), - "--secretstore-server-set-contract=[SOURCE]", - "Secret Store server set contract address source: none, registry (contract address is read from 'secretstore_server_set' entry in registry) or address.", - - ARG arg_secretstore_interface: (String) = "local", or |c: &Config| c.secretstore.as_ref()?.interface.clone(), - "--secretstore-interface=[IP]", - "Specify the hostname portion for listening to Secret Store Key Server internal requests, IP should be an interface's IP address, or local.", - - ARG arg_secretstore_port: (u16) = 8083u16, or |c: &Config| c.secretstore.as_ref()?.port.clone(), - "--secretstore-port=[PORT]", - "Specify the port portion for listening to Secret Store Key Server internal requests.", - - ARG arg_secretstore_http_interface: (String) = "local", or |c: &Config| c.secretstore.as_ref()?.http_interface.clone(), - "--secretstore-http-interface=[IP]", - "Specify the hostname portion for listening to Secret Store Key Server HTTP requests, IP should be an interface's IP address, or local.", - - ARG arg_secretstore_http_port: (u16) = 8082u16, or |c: &Config| c.secretstore.as_ref()?.http_port.clone(), - "--secretstore-http-port=[PORT]", - "Specify the port portion for listening to Secret Store Key Server HTTP requests.", - - ARG arg_secretstore_path: (String) = "$BASE/secretstore", or |c: &Config| c.secretstore.as_ref()?.path.clone(), - "--secretstore-path=[PATH]", - "Specify directory where Secret Store should save its data.", - - ARG arg_secretstore_secret: (Option) = None, or |c: &Config| c.secretstore.as_ref()?.self_secret.clone(), - "--secretstore-secret=[SECRET]", - "Hex-encoded secret key of this node.", - - ARG arg_secretstore_admin_public: (Option) = None, or |c: &Config| c.secretstore.as_ref()?.admin_public.clone(), - "--secretstore-admin=[PUBLIC]", - "Hex-encoded public key of secret store administrator.", - - ["Sealing/Mining Options"] - FLAG flag_force_sealing: (bool) = false, or |c: &Config| c.mining.as_ref()?.force_sealing.clone(), - "--force-sealing", - "Force the node to author new blocks as if it were always sealing/mining.", + { + // CLI subcommands + // Subcommands must start with cmd_ and have '_' in place of '-' + // Sub-subcommands must start with the name of the subcommand + // Arguments must start with arg_ + // Flags must start with flag_ + + CMD cmd_daemon + { + "Use Parity as a daemon", + + ARG arg_daemon_pid_file: (Option) = None, + "", + "Path to the pid file", + } + + CMD cmd_account + { + "Manage accounts", + + CMD cmd_account_new { + "Create a new account (and its associated key) for the given --chain (default: mainnet)", + } + + CMD cmd_account_list { + "List existing accounts of the given --chain (default: mainnet)", + } + + CMD cmd_account_import + { + "Import accounts from JSON UTC keystore files to the specified --chain (default mainnet)", + + ARG arg_account_import_path : (Option>) = None, + "...", + "Path to the accounts", + } + } + + CMD cmd_wallet + { + "Manage wallet", + + CMD cmd_wallet_import + { + "Import wallet into the given --chain (default: mainnet)", + + ARG arg_wallet_import_path: (Option) = None, + "", + "Path to the wallet", + } + } + + CMD cmd_import + { + "Import blockchain data from a file to the given --chain database (default: mainnet)", + + ARG arg_import_format: (Option) = None, + "--format=[FORMAT]", + "Import in a given format. FORMAT must be either 'hex' or 'binary'. (default: auto)", + + ARG arg_import_file: (Option) = None, + "[FILE]", + "Path to the file to import from", + } + + CMD cmd_export + { + "Export blockchain", + + CMD cmd_export_blocks + { + "Export the blockchain blocks from the given --chain database (default: mainnet) into a file. This command requires the chain to be synced with --fat-db on.", + + ARG arg_export_blocks_format: (Option) = None, + "--format=[FORMAT]", + "Export in a given format. FORMAT must be either 'hex' or 'binary'. (default: binary)", + + ARG arg_export_blocks_from: (String) = "1", + "--from=[BLOCK]", + "Export from block BLOCK, which may be an index or hash.", + + ARG arg_export_blocks_to: (String) = "latest", + "--to=[BLOCK]", + "Export to (including) block BLOCK, which may be an index, hash or latest.", + + ARG arg_export_blocks_file: (Option) = None, + "[FILE]", + "Path to the exported file", + } + + CMD cmd_export_state + { + "Export the blockchain state from the given --chain (default: mainnet) into a file. This command requires the chain to be synced with --fat-db on.", + + FLAG flag_export_state_no_storage: (bool) = false, + "--no-storage", + "Don't export account storage.", + + FLAG flag_export_state_no_code: (bool) = false, + "--no-code", + "Don't export account code.", + + ARG arg_export_state_min_balance: (Option) = None, + "--min-balance=[WEI]", + "Don't export accounts with balance less than specified.", + + ARG arg_export_state_max_balance: (Option) = None, + "--max-balance=[WEI]", + "Don't export accounts with balance greater than specified.", + + ARG arg_export_state_at: (String) = "latest", + "--at=[BLOCK]", + "Take a snapshot at the given block, which may be an index, hash, or latest. Note that taking snapshots at non-recent blocks will only work with --pruning archive", + + ARG arg_export_state_format: (Option) = None, + "--format=[FORMAT]", + "Export in a given format. FORMAT must be either 'hex' or 'binary'. (default: binary)", + + ARG arg_export_state_file: (Option) = None, + "[FILE]", + "Path to the exported file", + } + } + + CMD cmd_signer + { + "Manage signer", + + CMD cmd_signer_new_token { + "Generate a new signer-authentication token for the given --chain (default: mainnet)", + } + + CMD cmd_signer_list { + "List the signer-authentication tokens from given --chain (default: mainnet)", + } + + CMD cmd_signer_sign + { + "Sign", + + ARG arg_signer_sign_id: (Option) = None, + "[ID]", + "ID", + } + + CMD cmd_signer_reject + { + "Reject", + + ARG arg_signer_reject_id: (Option) = None, + "", + "ID", + } + } + + CMD cmd_snapshot + { + "Make a snapshot of the database of the given --chain (default: mainnet)", + + ARG arg_snapshot_at: (String) = "latest", + "--at=[BLOCK]", + "Take a snapshot at the given block, which may be an index, hash, or latest. Note that taking snapshots at non-recent blocks will only work with --pruning archive", + + ARG arg_snapshot_file: (Option) = None, + "", + "Path to the file to export to", + } + + CMD cmd_restore + { + "Restore the database of the given --chain (default: mainnet) from a snapshot file", + + ARG arg_restore_file: (Option) = None, + "[FILE]", + "Path to the file to restore from", + } + + CMD cmd_tools + { + "Tools", + + CMD cmd_tools_hash + { + "Hash a file using the Keccak-256 algorithm", + + ARG arg_tools_hash_file: (Option) = None, + "", + "File", + } + } + + CMD cmd_db + { + "Manage the database representing the state of the blockchain on this system", + + CMD cmd_db_kill { + "Clean the database of the given --chain (default: mainnet)", + } + + CMD cmd_db_reset { + "Removes NUM latests blocks from the db", + + ARG arg_db_reset_num: (u32) = 10u32, + "", + "Number of blocks to revert", + } + + } + + CMD cmd_export_hardcoded_sync + { + "Print the hashed light clients headers of the given --chain (default: mainnet) in a JSON format. To be used as hardcoded headers in a genesis file.", + } + + // CMD removed in 2.0 + + CMD cmd_dapp + { + "Manage dapps", + + ARG arg_dapp_path: (Option) = None, + "", + "Path to the dapps", + } + } + { + // Global flags and arguments + ["Operating Options"] + FLAG flag_no_download: (bool) = false, or |c: &Config| c.parity.as_ref()?.no_download.clone(), + "--no-download", + "Normally new releases will be downloaded ready for updating. This disables it. Not recommended.", + + FLAG flag_no_consensus: (bool) = false, or |c: &Config| c.parity.as_ref()?.no_consensus.clone(), + "--no-consensus", + "Force the binary to run even if there are known issues regarding consensus. Not recommended.", + + FLAG flag_light: (bool) = false, or |c: &Config| c.parity.as_ref()?.light, + "--light", + "Experimental: run in light client mode. Light clients synchronize a bare minimum of data and fetch necessary data on-demand from the network. Much lower in storage, potentially higher in bandwidth. Has no effect with subcommands.", + + FLAG flag_no_hardcoded_sync: (bool) = false, or |c: &Config| c.parity.as_ref()?.no_hardcoded_sync, + "--no-hardcoded-sync", + "By default, if there is no existing database the light client will automatically jump to a block hardcoded in the chain's specifications. This disables this feature.", + + FLAG flag_force_direct: (bool) = false, or |_| None, + "--force-direct", + "Run the originally installed version of Parity, ignoring any updates that have since been installed.", + + ARG arg_mode: (String) = "last", or |c: &Config| c.parity.as_ref()?.mode.clone(), + "--mode=[MODE]", + "Set the operating mode. MODE can be one of: last - Uses the last-used mode, active if none; active - Parity continuously syncs the chain; passive - Parity syncs initially, then sleeps and wakes regularly to resync; dark - Parity syncs only when the JSON-RPC is active; offline - Parity doesn't sync.", + + ARG arg_mode_timeout: (u64) = 300u64, or |c: &Config| c.parity.as_ref()?.mode_timeout.clone(), + "--mode-timeout=[SECS]", + "Specify the number of seconds before inactivity timeout occurs when mode is dark or passive", + + ARG arg_mode_alarm: (u64) = 3600u64, or |c: &Config| c.parity.as_ref()?.mode_alarm.clone(), + "--mode-alarm=[SECS]", + "Specify the number of seconds before auto sleep reawake timeout occurs when mode is passive", + + ARG arg_auto_update: (String) = "critical", or |c: &Config| c.parity.as_ref()?.auto_update.clone(), + "--auto-update=[SET]", + "Set a releases set to automatically update and install. SET can be one of: all - All updates in the our release track; critical - Only consensus/security updates; none - No updates will be auto-installed.", + + ARG arg_auto_update_delay: (u16) = 100u16, or |c: &Config| c.parity.as_ref()?.auto_update_delay.clone(), + "--auto-update-delay=[NUM]", + "Specify the maximum number of blocks used for randomly delaying updates.", + + ARG arg_auto_update_check_frequency: (u16) = 20u16, or |c: &Config| c.parity.as_ref()?.auto_update_check_frequency.clone(), + "--auto-update-check-frequency=[NUM]", + "Specify the number of blocks between each auto-update check.", + + ARG arg_release_track: (String) = "current", or |c: &Config| c.parity.as_ref()?.release_track.clone(), + "--release-track=[TRACK]", + "Set which release track we should use for updates. TRACK can be one of: stable - Stable releases; beta - Beta releases; nightly - Nightly releases (unstable); testing - Testing releases (do not use); current - Whatever track this executable was released on.", + + ARG arg_chain: (String) = "foundation", or |c: &Config| c.parity.as_ref()?.chain.clone(), + "--chain=[CHAIN]", + "Specify the blockchain type. CHAIN may be either a JSON chain specification file or ethereum, classic, poacore, xdai, volta, ewc, musicoin, ellaism, mix, callisto, morden, mordor, ropsten, kovan, rinkeby, goerli, kotti, poasokol, testnet, or dev.", + + ARG arg_keys_path: (String) = "$BASE/keys", or |c: &Config| c.parity.as_ref()?.keys_path.clone(), + "--keys-path=[PATH]", + "Specify the path for JSON key files to be found", + + ARG arg_identity: (String) = "", or |c: &Config| c.parity.as_ref()?.identity.clone(), + "--identity=[NAME]", + "Specify your node's name.", + + ARG arg_base_path: (Option) = None, or |c: &Config| c.parity.as_ref()?.base_path.clone(), + "-d, --base-path=[PATH]", + "Specify the base data storage path.", + + ARG arg_db_path: (Option) = None, or |c: &Config| c.parity.as_ref()?.db_path.clone(), + "--db-path=[PATH]", + "Specify the database directory path", + + ["Convenience Options"] + FLAG flag_unsafe_expose: (bool) = false, or |c: &Config| c.misc.as_ref()?.unsafe_expose, + "--unsafe-expose", + "All servers will listen on external interfaces and will be remotely accessible. It's equivalent with setting the following: --[ws,jsonrpc,ipfs-api,secretstore,stratum,dapps,secretstore-http]-interface=all --*-hosts=all This option is UNSAFE and should be used with great care!", + + ARG arg_config: (String) = "$BASE/config.toml", or |_| None, + "-c, --config=[CONFIG]", + "Specify a configuration. CONFIG may be either a configuration file or a preset: dev, insecure, dev-insecure, mining, or non-standard-ports.", + + ARG arg_ports_shift: (u16) = 0u16, or |c: &Config| c.misc.as_ref()?.ports_shift, + "--ports-shift=[SHIFT]", + "Add SHIFT to all port numbers Parity is listening on. Includes network port and all servers (HTTP JSON-RPC, WebSockets JSON-RPC, IPFS, SecretStore).", + + ["Account Options"] + FLAG flag_no_hardware_wallets: (bool) = false, or |c: &Config| c.account.as_ref()?.disable_hardware.clone(), + "--no-hardware-wallets", + "Disables hardware wallet support.", + + FLAG flag_fast_unlock: (bool) = false, or |c: &Config| c.account.as_ref()?.fast_unlock.clone(), + "--fast-unlock", + "Use drastically faster unlocking mode. This setting causes raw secrets to be stored unprotected in memory, so use with care.", + + ARG arg_keys_iterations: (u32) = 10240u32, or |c: &Config| c.account.as_ref()?.keys_iterations.clone(), + "--keys-iterations=[NUM]", + "Specify the number of iterations to use when deriving key from the password (bigger is more secure)", + + ARG arg_accounts_refresh: (u64) = 5u64, or |c: &Config| c.account.as_ref()?.refresh_time.clone(), + "--accounts-refresh=[TIME]", + "Specify the cache time of accounts read from disk. If you manage thousands of accounts set this to 0 to disable refresh.", + + ARG arg_unlock: (Option) = None, or |c: &Config| c.account.as_ref()?.unlock.as_ref().map(|vec| vec.join(",")), + "--unlock=[ACCOUNTS]", + "Unlock ACCOUNTS for the duration of the execution. ACCOUNTS is a comma-delimited list of addresses.", + + ARG arg_password: (Vec) = Vec::new(), or |c: &Config| c.account.as_ref()?.password.clone(), + "--password=[FILE]...", + "Provide a file containing a password for unlocking an account. Leading and trailing whitespace is trimmed.", + + ["Private Transactions Options"] + FLAG flag_private_enabled: (bool) = false, or |c: &Config| c.private_tx.as_ref()?.enabled, + "--private-tx-enabled", + "Enable private transactions.", + + ARG arg_private_signer: (Option) = None, or |c: &Config| c.private_tx.as_ref()?.signer.clone(), + "--private-signer=[ACCOUNT]", + "Specify the account for signing public transaction created upon verified private transaction.", + + ARG arg_private_validators: (Option) = None, or |c: &Config| c.private_tx.as_ref()?.validators.as_ref().map(|vec| vec.join(",")), + "--private-validators=[ACCOUNTS]", + "Specify the accounts for validating private transactions. ACCOUNTS is a comma-delimited list of addresses.", + + ARG arg_private_account: (Option) = None, or |c: &Config| c.private_tx.as_ref()?.account.clone(), + "--private-account=[ACCOUNT]", + "Specify the account for signing requests to secret store.", + + ARG arg_private_sstore_url: (Option) = None, or |c: &Config| c.private_tx.as_ref()?.sstore_url.clone(), + "--private-sstore-url=[URL]", + "Specify secret store URL used for encrypting private transactions.", + + ARG arg_private_sstore_threshold: (Option) = None, or |c: &Config| c.private_tx.as_ref()?.sstore_threshold.clone(), + "--private-sstore-threshold=[NUM]", + "Specify secret store threshold used for encrypting private transactions.", + + ARG arg_private_passwords: (Option) = None, or |c: &Config| c.private_tx.as_ref()?.passwords.clone(), + "--private-passwords=[FILE]...", + "Provide a file containing passwords for unlocking accounts (signer, private account, validators).", + + ["UI Options"] + ARG arg_ui_path: (String) = "$BASE/signer", or |c: &Config| c.ui.as_ref()?.path.clone(), + "--ui-path=[PATH]", + "Specify directory where Trusted UIs tokens should be stored.", + + ["Networking Options"] + FLAG flag_no_warp: (bool) = false, or |c: &Config| c.network.as_ref()?.warp.clone().map(|w| !w), + "--no-warp", + "Disable syncing from the snapshot over the network.", + + FLAG flag_no_discovery: (bool) = false, or |c: &Config| c.network.as_ref()?.discovery.map(|d| !d).clone(), + "--no-discovery", + "Disable new peer discovery.", + + FLAG flag_reserved_only: (bool) = false, or |c: &Config| c.network.as_ref()?.reserved_only.clone(), + "--reserved-only", + "Connect only to reserved nodes.", + + FLAG flag_no_ancient_blocks: (bool) = false, or |_| None, + "--no-ancient-blocks", + "Disable downloading old blocks after snapshot restoration or warp sync. Not recommended.", + + FLAG flag_no_serve_light: (bool) = false, or |c: &Config| c.network.as_ref()?.no_serve_light.clone(), + "--no-serve-light", + "Disable serving of light peers.", + + ARG arg_warp_barrier: (Option) = None, or |c: &Config| c.network.as_ref()?.warp_barrier.clone(), + "--warp-barrier=[NUM]", + "When warp enabled never attempt regular sync before warping to block NUM.", + + ARG arg_port: (u16) = 30303u16, or |c: &Config| c.network.as_ref()?.port.clone(), + "--port=[PORT]", + "Override the port on which the node should listen.", + + ARG arg_interface: (String) = "all", or |c: &Config| c.network.as_ref()?.interface.clone(), + "--interface=[IP]", + "Network interfaces. Valid values are 'all', 'local' or the ip of the interface you want parity to listen to.", + + ARG arg_min_peers: (Option) = None, or |c: &Config| c.network.as_ref()?.min_peers.clone(), + "--min-peers=[NUM]", + "Try to maintain at least NUM peers.", + + ARG arg_max_peers: (Option) = None, or |c: &Config| c.network.as_ref()?.max_peers.clone(), + "--max-peers=[NUM]", + "Allow up to NUM peers.", + + ARG arg_snapshot_peers: (u16) = 0u16, or |c: &Config| c.network.as_ref()?.snapshot_peers.clone(), + "--snapshot-peers=[NUM]", + "Allow additional NUM peers for a snapshot sync.", + + ARG arg_nat: (String) = "any", or |c: &Config| c.network.as_ref()?.nat.clone(), + "--nat=[METHOD]", + "Specify method to use for determining public address. Must be one of: any, none, upnp, extip:.", + + ARG arg_allow_ips: (String) = "all", or |c: &Config| c.network.as_ref()?.allow_ips.clone(), + "--allow-ips=[FILTER]", + "Filter outbound connections. Must be one of: private - connect to private network IP addresses only; public - connect to public network IP addresses only; all - connect to any IP address.", + + ARG arg_max_pending_peers: (u16) = 64u16, or |c: &Config| c.network.as_ref()?.max_pending_peers.clone(), + "--max-pending-peers=[NUM]", + "Allow up to NUM pending connections.", + + ARG arg_network_id: (Option) = None, or |c: &Config| c.network.as_ref()?.id.clone(), + "--network-id=[INDEX]", + "Override the network identifier from the chain we are on.", + + ARG arg_bootnodes: (Option) = None, or |c: &Config| c.network.as_ref()?.bootnodes.as_ref().map(|vec| vec.join(",")), + "--bootnodes=[NODES]", + "Override the bootnodes from our chain. NODES should be comma-delimited enodes.", + + ARG arg_node_key: (Option) = None, or |c: &Config| c.network.as_ref()?.node_key.clone(), + "--node-key=[KEY]", + "Specify node secret key, either as 64-character hex string or input to SHA3 operation.", + + ARG arg_reserved_peers: (Option) = None, or |c: &Config| c.network.as_ref()?.reserved_peers.clone(), + "--reserved-peers=[FILE]", + "Provide a file containing enodes, one per line. These nodes will always have a reserved slot on top of the normal maximum peers.", + + CHECK |args: &Args| { + if let (Some(max_peers), Some(min_peers)) = (args.arg_max_peers, args.arg_min_peers) { + if min_peers > max_peers { + return Err(ArgsError::PeerConfiguration); + } + } + + Ok(()) + }, + + ["API and Console Options – HTTP JSON-RPC"] + FLAG flag_jsonrpc_allow_missing_blocks: (bool) = false, or |c: &Config| c.rpc.as_ref()?.allow_missing_blocks.clone(), + "--jsonrpc-allow-missing-blocks", + "RPC calls will return 'null' instead of an error if ancient block sync is still in progress and the block information requested could not be found", + + FLAG flag_no_jsonrpc: (bool) = false, or |c: &Config| c.rpc.as_ref()?.disable.clone(), + "--no-jsonrpc", + "Disable the HTTP JSON-RPC API server.", + + FLAG flag_jsonrpc_no_keep_alive: (bool) = false, or |c: &Config| c.rpc.as_ref()?.keep_alive, + "--jsonrpc-no-keep-alive", + "Disable HTTP/1.1 keep alive header. Disabling keep alive will prevent re-using the same TCP connection to fire multiple requests, recommended when using one request per connection.", + + FLAG flag_jsonrpc_experimental: (bool) = false, or |c: &Config| c.rpc.as_ref()?.experimental_rpcs.clone(), + "--jsonrpc-experimental", + "Enable experimental RPCs. Enable to have access to methods from unfinalised EIPs in all namespaces", + + ARG arg_jsonrpc_port: (u16) = 8545u16, or |c: &Config| c.rpc.as_ref()?.port.clone(), + "--jsonrpc-port=[PORT]", + "Specify the port portion of the HTTP JSON-RPC API server.", + + ARG arg_jsonrpc_interface: (String) = "local", or |c: &Config| c.rpc.as_ref()?.interface.clone(), + "--jsonrpc-interface=[IP]", + "Specify the hostname portion of the HTTP JSON-RPC API server, IP should be an interface's IP address, or all (all interfaces) or local.", + + ARG arg_jsonrpc_apis: (String) = "web3,eth,pubsub,net,parity,private,parity_pubsub,traces,rpc,shh,shh_pubsub", or |c: &Config| c.rpc.as_ref()?.apis.as_ref().map(|vec| vec.join(",")), + "--jsonrpc-apis=[APIS]", + "Specify the APIs available through the HTTP JSON-RPC interface using a comma-delimited list of API names. Possible names are: all, safe, debug, web3, net, eth, pubsub, personal, signer, parity, parity_pubsub, parity_accounts, parity_set, traces, rpc, secretstore, shh, shh_pubsub. You can also disable a specific API by putting '-' in the front, example: all,-personal. 'safe' enables the following APIs: web3, net, eth, pubsub, parity, parity_pubsub, traces, rpc, shh, shh_pubsub", + + ARG arg_jsonrpc_hosts: (String) = "none", or |c: &Config| c.rpc.as_ref()?.hosts.as_ref().map(|vec| vec.join(",")), + "--jsonrpc-hosts=[HOSTS]", + "List of allowed Host header values. This option will validate the Host header sent by the browser, it is additional security against some attack vectors. Special options: \"all\", \"none\",.", + + ARG arg_jsonrpc_threads: (usize) = 4usize, or |c: &Config| c.rpc.as_ref()?.processing_threads, + "--jsonrpc-threads=[THREADS]", + "Turn on additional processing threads for JSON-RPC servers (all transports). Setting this to a non-zero value allows parallel execution of cpu-heavy queries.", + + ARG arg_jsonrpc_cors: (String) = "none", or |c: &Config| c.rpc.as_ref()?.cors.as_ref().map(|vec| vec.join(",")), + "--jsonrpc-cors=[URL]", + "Specify CORS header for HTTP JSON-RPC API responses. Special options: \"all\", \"none\".", + + ARG arg_jsonrpc_server_threads: (Option) = None, or |c: &Config| c.rpc.as_ref()?.server_threads, + "--jsonrpc-server-threads=[NUM]", + "Enables multiple threads handling incoming connections for HTTP JSON-RPC server.", + + ARG arg_jsonrpc_max_payload: (Option) = None, or |c: &Config| c.rpc.as_ref()?.max_payload, + "--jsonrpc-max-payload=[MB]", + "Specify maximum size for HTTP JSON-RPC requests in megabytes.", + + ARG arg_poll_lifetime: (u32) = 60u32, or |c: &Config| c.rpc.as_ref()?.poll_lifetime.clone(), + "--poll-lifetime=[S]", + "Set the RPC filter lifetime to S seconds. The filter has to be polled at least every S seconds , otherwise it is removed.", + + ["API and Console Options – WebSockets"] + FLAG flag_no_ws: (bool) = false, or |c: &Config| c.websockets.as_ref()?.disable.clone(), + "--no-ws", + "Disable the WebSockets JSON-RPC server.", + + ARG arg_ws_port: (u16) = 8546u16, or |c: &Config| c.websockets.as_ref()?.port.clone(), + "--ws-port=[PORT]", + "Specify the port portion of the WebSockets JSON-RPC server.", + + ARG arg_ws_interface: (String) = "local", or |c: &Config| c.websockets.as_ref()?.interface.clone(), + "--ws-interface=[IP]", + "Specify the hostname portion of the WebSockets JSON-RPC server, IP should be an interface's IP address, or all (all interfaces) or local.", + + ARG arg_ws_apis: (String) = "web3,eth,pubsub,net,parity,parity_pubsub,private,traces,rpc,shh,shh_pubsub", or |c: &Config| c.websockets.as_ref()?.apis.as_ref().map(|vec| vec.join(",")), + "--ws-apis=[APIS]", + "Specify the JSON-RPC APIs available through the WebSockets interface using a comma-delimited list of API names. Possible names are: all, safe, web3, net, eth, pubsub, personal, signer, parity, parity_pubsub, parity_accounts, parity_set, traces, rpc, secretstore, shh, shh_pubsub. You can also disable a specific API by putting '-' in the front, example: all,-personal. 'safe' enables the following APIs: web3, net, eth, pubsub, parity, parity_pubsub, traces, rpc, shh, shh_pubsub", + + ARG arg_ws_origins: (String) = "parity://*,chrome-extension://*,moz-extension://*", or |c: &Config| c.websockets.as_ref()?.origins.as_ref().map(|vec| vec.join(",")), + "--ws-origins=[URL]", + "Specify Origin header values allowed to connect. Special options: \"all\", \"none\".", + + ARG arg_ws_hosts: (String) = "none", or |c: &Config| c.websockets.as_ref()?.hosts.as_ref().map(|vec| vec.join(",")), + "--ws-hosts=[HOSTS]", + "List of allowed Host header values. This option will validate the Host header sent by the browser, it is additional security against some attack vectors. Special options: \"all\", \"none\".", + + ARG arg_ws_max_connections: (usize) = 100usize, or |c: &Config| c.websockets.as_ref()?.max_connections, + "--ws-max-connections=[CONN]", + "Maximum number of allowed concurrent WebSockets JSON-RPC connections.", + + ["API and Console Options – IPC"] + FLAG flag_no_ipc: (bool) = false, or |c: &Config| c.ipc.as_ref()?.disable.clone(), + "--no-ipc", + "Disable JSON-RPC over IPC service.", + + ARG arg_ipc_path: (String) = if cfg!(windows) { r"\\.\pipe\jsonrpc.ipc" } else { "$BASE/jsonrpc.ipc" }, or |c: &Config| c.ipc.as_ref()?.path.clone(), + "--ipc-path=[PATH]", + "Specify custom path for JSON-RPC over IPC service.", + + ARG arg_ipc_apis: (String) = "web3,eth,pubsub,net,parity,parity_pubsub,parity_accounts,private,traces,rpc,shh,shh_pubsub", or |c: &Config| c.ipc.as_ref()?.apis.as_ref().map(|vec| vec.join(",")), + "--ipc-apis=[APIS]", + "Specify custom API set available via JSON-RPC over IPC using a comma-delimited list of API names. Possible names are: all, safe, web3, net, eth, pubsub, personal, signer, parity, parity_pubsub, parity_accounts, parity_set, traces, rpc, secretstore, shh, shh_pubsub. You can also disable a specific API by putting '-' in the front, example: all,-personal. 'safe' enables the following APIs: web3, net, eth, pubsub, parity, parity_pubsub, traces, rpc, shh, shh_pubsub", + + ["API and Console Options – IPFS"] + FLAG flag_ipfs_api: (bool) = false, or |c: &Config| c.ipfs.as_ref()?.enable.clone(), + "--ipfs-api", + "Enable IPFS-compatible HTTP API.", + + ARG arg_ipfs_api_port: (u16) = 5001u16, or |c: &Config| c.ipfs.as_ref()?.port.clone(), + "--ipfs-api-port=[PORT]", + "Configure on which port the IPFS HTTP API should listen.", + + ARG arg_ipfs_api_interface: (String) = "local", or |c: &Config| c.ipfs.as_ref()?.interface.clone(), + "--ipfs-api-interface=[IP]", + "Specify the hostname portion of the IPFS API server, IP should be an interface's IP address or local.", + + ARG arg_ipfs_api_hosts: (String) = "none", or |c: &Config| c.ipfs.as_ref()?.hosts.as_ref().map(|vec| vec.join(",")), + "--ipfs-api-hosts=[HOSTS]", + "List of allowed Host header values. This option will validate the Host header sent by the browser, it is additional security against some attack vectors. Special options: \"all\", \"none\".", + + ARG arg_ipfs_api_cors: (String) = "none", or |c: &Config| c.ipfs.as_ref()?.cors.as_ref().map(|vec| vec.join(",")), + "--ipfs-api-cors=[URL]", + "Specify CORS header for IPFS API responses. Special options: \"all\", \"none\".", + + ["Light Client Options"] + ARG arg_on_demand_response_time_window: (Option) = None, or |c: &Config| c.light.as_ref()?.on_demand_response_time_window, + "--on-demand-time-window=[S]", + "Specify the maximum time to wait for a successful response", + + ARG arg_on_demand_request_backoff_start: (Option) = None, or |c: &Config| c.light.as_ref()?.on_demand_request_backoff_start, + "--on-demand-start-backoff=[S]", + "Specify light client initial backoff time for a request", + + ARG arg_on_demand_request_backoff_max: (Option) = None, or |c: &Config| c.light.as_ref()?.on_demand_request_backoff_max, + "--on-demand-end-backoff=[S]", + "Specify light client maximum backoff time for a request", + + ARG arg_on_demand_request_backoff_rounds_max: (Option) = None, or |c: &Config| c.light.as_ref()?.on_demand_request_backoff_rounds_max, + "--on-demand-max-backoff-rounds=[TIMES]", + "Specify light client maximum number of backoff iterations for a request", + + ARG arg_on_demand_request_consecutive_failures: (Option) = None, or |c: &Config| c.light.as_ref()?.on_demand_request_consecutive_failures, + "--on-demand-consecutive-failures=[TIMES]", + "Specify light client the number of failures for a request until it gets exponentially backed off", + + ["Secret Store Options"] + FLAG flag_no_secretstore: (bool) = false, or |c: &Config| c.secretstore.as_ref()?.disable.clone(), + "--no-secretstore", + "Disable Secret Store functionality.", + + FLAG flag_no_secretstore_http: (bool) = false, or |c: &Config| c.secretstore.as_ref()?.disable_http.clone(), + "--no-secretstore-http", + "Disable Secret Store HTTP API.", + + FLAG flag_no_secretstore_auto_migrate: (bool) = false, or |c: &Config| c.secretstore.as_ref()?.disable_auto_migrate.clone(), + "--no-secretstore-auto-migrate", + "Do not run servers set change session automatically when servers set changes. This option has no effect when servers set is read from configuration file.", + + ARG arg_secretstore_acl_contract: (Option) = Some("registry".into()), or |c: &Config| c.secretstore.as_ref()?.acl_contract.clone(), + "--secretstore-acl-contract=[SOURCE]", + "Secret Store permissioning contract address source: none, registry (contract address is read from 'secretstore_acl_checker' entry in registry) or address.", + + ARG arg_secretstore_contract: (Option) = None, or |c: &Config| c.secretstore.as_ref()?.service_contract.clone(), + "--secretstore-contract=[SOURCE]", + "Secret Store Service contract address source: none, registry (contract address is read from 'secretstore_service' entry in registry) or address.", + + ARG arg_secretstore_srv_gen_contract: (Option) = None, or |c: &Config| c.secretstore.as_ref()?.service_contract_srv_gen.clone(), + "--secretstore-srv-gen-contract=[SOURCE]", + "Secret Store Service server key generation contract address source: none, registry (contract address is read from 'secretstore_service_srv_gen' entry in registry) or address.", + + ARG arg_secretstore_srv_retr_contract: (Option) = None, or |c: &Config| c.secretstore.as_ref()?.service_contract_srv_retr.clone(), + "--secretstore-srv-retr-contract=[SOURCE]", + "Secret Store Service server key retrieval contract address source: none, registry (contract address is read from 'secretstore_service_srv_retr' entry in registry) or address.", + + ARG arg_secretstore_doc_store_contract: (Option) = None, or |c: &Config| c.secretstore.as_ref()?.service_contract_doc_store.clone(), + "--secretstore-doc-store-contract=[SOURCE]", + "Secret Store Service document key store contract address source: none, registry (contract address is read from 'secretstore_service_doc_store' entry in registry) or address.", + + ARG arg_secretstore_doc_sretr_contract: (Option) = None, or |c: &Config| c.secretstore.as_ref()?.service_contract_doc_sretr.clone(), + "--secretstore-doc-sretr-contract=[SOURCE]", + "Secret Store Service document key shadow retrieval contract address source: none, registry (contract address is read from 'secretstore_service_doc_sretr' entry in registry) or address.", + + ARG arg_secretstore_nodes: (String) = "", or |c: &Config| c.secretstore.as_ref()?.nodes.as_ref().map(|vec| vec.join(",")), + "--secretstore-nodes=[NODES]", + "Comma-separated list of other secret store cluster nodes in form NODE_PUBLIC_KEY_IN_HEX@NODE_IP_ADDR:NODE_PORT.", + + ARG arg_secretstore_server_set_contract: (Option) = Some("registry".into()), or |c: &Config| c.secretstore.as_ref()?.server_set_contract.clone(), + "--secretstore-server-set-contract=[SOURCE]", + "Secret Store server set contract address source: none, registry (contract address is read from 'secretstore_server_set' entry in registry) or address.", + + ARG arg_secretstore_interface: (String) = "local", or |c: &Config| c.secretstore.as_ref()?.interface.clone(), + "--secretstore-interface=[IP]", + "Specify the hostname portion for listening to Secret Store Key Server internal requests, IP should be an interface's IP address, or local.", + + ARG arg_secretstore_port: (u16) = 8083u16, or |c: &Config| c.secretstore.as_ref()?.port.clone(), + "--secretstore-port=[PORT]", + "Specify the port portion for listening to Secret Store Key Server internal requests.", + + ARG arg_secretstore_http_interface: (String) = "local", or |c: &Config| c.secretstore.as_ref()?.http_interface.clone(), + "--secretstore-http-interface=[IP]", + "Specify the hostname portion for listening to Secret Store Key Server HTTP requests, IP should be an interface's IP address, or local.", + + ARG arg_secretstore_http_port: (u16) = 8082u16, or |c: &Config| c.secretstore.as_ref()?.http_port.clone(), + "--secretstore-http-port=[PORT]", + "Specify the port portion for listening to Secret Store Key Server HTTP requests.", + + ARG arg_secretstore_path: (String) = "$BASE/secretstore", or |c: &Config| c.secretstore.as_ref()?.path.clone(), + "--secretstore-path=[PATH]", + "Specify directory where Secret Store should save its data.", + + ARG arg_secretstore_secret: (Option) = None, or |c: &Config| c.secretstore.as_ref()?.self_secret.clone(), + "--secretstore-secret=[SECRET]", + "Hex-encoded secret key of this node.", + + ARG arg_secretstore_admin_public: (Option) = None, or |c: &Config| c.secretstore.as_ref()?.admin_public.clone(), + "--secretstore-admin=[PUBLIC]", + "Hex-encoded public key of secret store administrator.", + + ["Sealing/Mining Options"] + FLAG flag_force_sealing: (bool) = false, or |c: &Config| c.mining.as_ref()?.force_sealing.clone(), + "--force-sealing", + "Force the node to author new blocks as if it were always sealing/mining.", - FLAG flag_reseal_on_uncle: (bool) = false, or |c: &Config| c.mining.as_ref()?.reseal_on_uncle.clone(), - "--reseal-on-uncle", - "Force the node to author new blocks when a new uncle block is imported.", + FLAG flag_reseal_on_uncle: (bool) = false, or |c: &Config| c.mining.as_ref()?.reseal_on_uncle.clone(), + "--reseal-on-uncle", + "Force the node to author new blocks when a new uncle block is imported.", - FLAG flag_remove_solved: (bool) = false, or |c: &Config| c.mining.as_ref()?.remove_solved.clone(), - "--remove-solved", - "Move solved blocks from the work package queue instead of cloning them. This gives a slightly faster import speed, but means that extra solutions submitted for the same work package will go unused.", + FLAG flag_remove_solved: (bool) = false, or |c: &Config| c.mining.as_ref()?.remove_solved.clone(), + "--remove-solved", + "Move solved blocks from the work package queue instead of cloning them. This gives a slightly faster import speed, but means that extra solutions submitted for the same work package will go unused.", - FLAG flag_tx_queue_no_unfamiliar_locals: (bool) = false, or |c: &Config| c.mining.as_ref()?.tx_queue_no_unfamiliar_locals.clone(), - "--tx-queue-no-unfamiliar-locals", - "Local transactions sent through JSON-RPC (HTTP, WebSockets, etc) will be treated as 'external' if the sending account is unknown.", + FLAG flag_tx_queue_no_unfamiliar_locals: (bool) = false, or |c: &Config| c.mining.as_ref()?.tx_queue_no_unfamiliar_locals.clone(), + "--tx-queue-no-unfamiliar-locals", + "Local transactions sent through JSON-RPC (HTTP, WebSockets, etc) will be treated as 'external' if the sending account is unknown.", - FLAG flag_tx_queue_no_early_reject: (bool) = false, or |c: &Config| c.mining.as_ref()?.tx_queue_no_early_reject.clone(), - "--tx-queue-no-early-reject", - "Disables transaction queue optimization to early reject transactions below minimal effective gas price. This allows local transactions to always enter the pool, despite it being full, but requires additional ecrecover on every transaction.", + FLAG flag_tx_queue_no_early_reject: (bool) = false, or |c: &Config| c.mining.as_ref()?.tx_queue_no_early_reject.clone(), + "--tx-queue-no-early-reject", + "Disables transaction queue optimization to early reject transactions below minimal effective gas price. This allows local transactions to always enter the pool, despite it being full, but requires additional ecrecover on every transaction.", - FLAG flag_refuse_service_transactions: (bool) = false, or |c: &Config| c.mining.as_ref()?.refuse_service_transactions.clone(), - "--refuse-service-transactions", - "Always refuse service transactions.", + FLAG flag_refuse_service_transactions: (bool) = false, or |c: &Config| c.mining.as_ref()?.refuse_service_transactions.clone(), + "--refuse-service-transactions", + "Always refuse service transactions.", - FLAG flag_infinite_pending_block: (bool) = false, or |c: &Config| c.mining.as_ref()?.infinite_pending_block.clone(), - "--infinite-pending-block", - "Pending block will be created with maximal possible gas limit and will execute all transactions in the queue. Note that such block is invalid and should never be attempted to be mined.", + FLAG flag_infinite_pending_block: (bool) = false, or |c: &Config| c.mining.as_ref()?.infinite_pending_block.clone(), + "--infinite-pending-block", + "Pending block will be created with maximal possible gas limit and will execute all transactions in the queue. Note that such block is invalid and should never be attempted to be mined.", - FLAG flag_no_persistent_txqueue: (bool) = false, or |c: &Config| c.parity.as_ref()?.no_persistent_txqueue, - "--no-persistent-txqueue", - "Don't save pending local transactions to disk to be restored whenever the node restarts.", + FLAG flag_no_persistent_txqueue: (bool) = false, or |c: &Config| c.parity.as_ref()?.no_persistent_txqueue, + "--no-persistent-txqueue", + "Don't save pending local transactions to disk to be restored whenever the node restarts.", - FLAG flag_stratum: (bool) = false, or |c: &Config| Some(c.stratum.is_some()), - "--stratum", - "Run Stratum server for miner push notification.", + FLAG flag_stratum: (bool) = false, or |c: &Config| Some(c.stratum.is_some()), + "--stratum", + "Run Stratum server for miner push notification.", - ARG arg_reseal_on_txs: (String) = "own", or |c: &Config| c.mining.as_ref()?.reseal_on_txs.clone(), - "--reseal-on-txs=[SET]", - "Specify which transactions should force the node to reseal a block. SET is one of: none - never reseal on new transactions; own - reseal only on a new local transaction; ext - reseal only on a new external transaction; all - reseal on all new transactions.", + ARG arg_reseal_on_txs: (String) = "own", or |c: &Config| c.mining.as_ref()?.reseal_on_txs.clone(), + "--reseal-on-txs=[SET]", + "Specify which transactions should force the node to reseal a block. SET is one of: none - never reseal on new transactions; own - reseal only on a new local transaction; ext - reseal only on a new external transaction; all - reseal on all new transactions.", - ARG arg_reseal_min_period: (u64) = 2000u64, or |c: &Config| c.mining.as_ref()?.reseal_min_period.clone(), - "--reseal-min-period=[MS]", - "Specify the minimum time between reseals from incoming transactions. MS is time measured in milliseconds.", + ARG arg_reseal_min_period: (u64) = 2000u64, or |c: &Config| c.mining.as_ref()?.reseal_min_period.clone(), + "--reseal-min-period=[MS]", + "Specify the minimum time between reseals from incoming transactions. MS is time measured in milliseconds.", - ARG arg_reseal_max_period: (u64) = 120000u64, or |c: &Config| c.mining.as_ref()?.reseal_max_period.clone(), - "--reseal-max-period=[MS]", - "Specify the maximum time since last block to enable force-sealing. MS is time measured in milliseconds.", - - ARG arg_work_queue_size: (usize) = 20usize, or |c: &Config| c.mining.as_ref()?.work_queue_size.clone(), - "--work-queue-size=[ITEMS]", - "Specify the number of historical work packages which are kept cached lest a solution is found for them later. High values take more memory but result in fewer unusable solutions.", - - ARG arg_relay_set: (String) = "cheap", or |c: &Config| c.mining.as_ref()?.relay_set.clone(), - "--relay-set=[SET]", - "Set of transactions to relay. SET may be: cheap - Relay any transaction in the queue (this may include invalid transactions); strict - Relay only executed transactions (this guarantees we don't relay invalid transactions, but means we relay nothing if not mining); lenient - Same as strict when mining, and cheap when not.", - - ARG arg_usd_per_tx: (String) = "0.0001", or |c: &Config| c.mining.as_ref()?.usd_per_tx.clone(), - "--usd-per-tx=[USD]", - "Amount of USD to be paid for a basic transaction. The minimum gas price is set accordingly.", - - ARG arg_usd_per_eth: (String) = "auto", or |c: &Config| c.mining.as_ref()?.usd_per_eth.clone(), - "--usd-per-eth=[SOURCE]", - "USD value of a single ETH. SOURCE may be either an amount in USD, a web service or 'auto' to use each web service in turn and fallback on the last known good value.", - - ARG arg_price_update_period: (String) = "hourly", or |c: &Config| c.mining.as_ref()?.price_update_period.clone(), - "--price-update-period=[T]", - "T will be allowed to pass between each gas price update. T may be daily, hourly, a number of seconds, or a time string of the form \"2 days\", \"30 minutes\" etc..", - - ARG arg_gas_floor_target: (String) = "8000000", or |c: &Config| c.mining.as_ref()?.gas_floor_target.clone(), - "--gas-floor-target=[GAS]", - "Amount of gas per block to target when sealing a new block.", - - ARG arg_gas_cap: (String) = "10000000", or |c: &Config| c.mining.as_ref()?.gas_cap.clone(), - "--gas-cap=[GAS]", - "A cap on how large we will raise the gas limit per block due to transaction volume.", - - ARG arg_tx_queue_mem_limit: (u32) = 4u32, or |c: &Config| c.mining.as_ref()?.tx_queue_mem_limit.clone(), - "--tx-queue-mem-limit=[MB]", - "Maximum amount of memory that can be used by the transaction queue. Setting this parameter to 0 disables limiting.", - - ARG arg_tx_queue_size: (usize) = 8_192usize, or |c: &Config| c.mining.as_ref()?.tx_queue_size.clone(), - "--tx-queue-size=[LIMIT]", - "Maximum amount of transactions in the queue (waiting to be included in next block).", - - ARG arg_tx_queue_per_sender: (Option) = None, or |c: &Config| c.mining.as_ref()?.tx_queue_per_sender.clone(), - "--tx-queue-per-sender=[LIMIT]", - "Maximum number of transactions per sender in the queue. By default it's 1% of the entire queue, but not less than 16.", - - ARG arg_tx_queue_locals: (Option) = None, or |c: &Config| helpers::join_set(c.mining.as_ref()?.tx_queue_locals.as_ref()), - "--tx-queue-locals=[ACCOUNTS]", - "Specify local accounts for which transactions are prioritized in the queue. ACCOUNTS is a comma-delimited list of addresses.", - - ARG arg_tx_queue_strategy: (String) = "gas_price", or |c: &Config| c.mining.as_ref()?.tx_queue_strategy.clone(), - "--tx-queue-strategy=[S]", - "Prioritization strategy used to order transactions in the queue. S may be: gas_price - Prioritize txs with high gas price", - - ARG arg_stratum_interface: (String) = "local", or |c: &Config| c.stratum.as_ref()?.interface.clone(), - "--stratum-interface=[IP]", - "Interface address for Stratum server.", - - ARG arg_stratum_port: (u16) = 8008u16, or |c: &Config| c.stratum.as_ref()?.port.clone(), - "--stratum-port=[PORT]", - "Port for Stratum server to listen on.", + ARG arg_reseal_max_period: (u64) = 120000u64, or |c: &Config| c.mining.as_ref()?.reseal_max_period.clone(), + "--reseal-max-period=[MS]", + "Specify the maximum time since last block to enable force-sealing. MS is time measured in milliseconds.", + + ARG arg_work_queue_size: (usize) = 20usize, or |c: &Config| c.mining.as_ref()?.work_queue_size.clone(), + "--work-queue-size=[ITEMS]", + "Specify the number of historical work packages which are kept cached lest a solution is found for them later. High values take more memory but result in fewer unusable solutions.", + + ARG arg_relay_set: (String) = "cheap", or |c: &Config| c.mining.as_ref()?.relay_set.clone(), + "--relay-set=[SET]", + "Set of transactions to relay. SET may be: cheap - Relay any transaction in the queue (this may include invalid transactions); strict - Relay only executed transactions (this guarantees we don't relay invalid transactions, but means we relay nothing if not mining); lenient - Same as strict when mining, and cheap when not.", + + ARG arg_usd_per_tx: (String) = "0.0001", or |c: &Config| c.mining.as_ref()?.usd_per_tx.clone(), + "--usd-per-tx=[USD]", + "Amount of USD to be paid for a basic transaction. The minimum gas price is set accordingly.", + + ARG arg_usd_per_eth: (String) = "auto", or |c: &Config| c.mining.as_ref()?.usd_per_eth.clone(), + "--usd-per-eth=[SOURCE]", + "USD value of a single ETH. SOURCE may be either an amount in USD, a web service or 'auto' to use each web service in turn and fallback on the last known good value.", + + ARG arg_price_update_period: (String) = "hourly", or |c: &Config| c.mining.as_ref()?.price_update_period.clone(), + "--price-update-period=[T]", + "T will be allowed to pass between each gas price update. T may be daily, hourly, a number of seconds, or a time string of the form \"2 days\", \"30 minutes\" etc..", + + ARG arg_gas_floor_target: (String) = "8000000", or |c: &Config| c.mining.as_ref()?.gas_floor_target.clone(), + "--gas-floor-target=[GAS]", + "Amount of gas per block to target when sealing a new block.", + + ARG arg_gas_cap: (String) = "10000000", or |c: &Config| c.mining.as_ref()?.gas_cap.clone(), + "--gas-cap=[GAS]", + "A cap on how large we will raise the gas limit per block due to transaction volume.", + + ARG arg_tx_queue_mem_limit: (u32) = 4u32, or |c: &Config| c.mining.as_ref()?.tx_queue_mem_limit.clone(), + "--tx-queue-mem-limit=[MB]", + "Maximum amount of memory that can be used by the transaction queue. Setting this parameter to 0 disables limiting.", + + ARG arg_tx_queue_size: (usize) = 8_192usize, or |c: &Config| c.mining.as_ref()?.tx_queue_size.clone(), + "--tx-queue-size=[LIMIT]", + "Maximum amount of transactions in the queue (waiting to be included in next block).", + + ARG arg_tx_queue_per_sender: (Option) = None, or |c: &Config| c.mining.as_ref()?.tx_queue_per_sender.clone(), + "--tx-queue-per-sender=[LIMIT]", + "Maximum number of transactions per sender in the queue. By default it's 1% of the entire queue, but not less than 16.", + + ARG arg_tx_queue_locals: (Option) = None, or |c: &Config| helpers::join_set(c.mining.as_ref()?.tx_queue_locals.as_ref()), + "--tx-queue-locals=[ACCOUNTS]", + "Specify local accounts for which transactions are prioritized in the queue. ACCOUNTS is a comma-delimited list of addresses.", + + ARG arg_tx_queue_strategy: (String) = "gas_price", or |c: &Config| c.mining.as_ref()?.tx_queue_strategy.clone(), + "--tx-queue-strategy=[S]", + "Prioritization strategy used to order transactions in the queue. S may be: gas_price - Prioritize txs with high gas price", + + ARG arg_stratum_interface: (String) = "local", or |c: &Config| c.stratum.as_ref()?.interface.clone(), + "--stratum-interface=[IP]", + "Interface address for Stratum server.", + + ARG arg_stratum_port: (u16) = 8008u16, or |c: &Config| c.stratum.as_ref()?.port.clone(), + "--stratum-port=[PORT]", + "Port for Stratum server to listen on.", - ARG arg_min_gas_price: (Option) = None, or |c: &Config| c.mining.as_ref()?.min_gas_price.clone(), - "--min-gas-price=[STRING]", - "Minimum amount of Wei per GAS to be paid for a transaction to be accepted for mining. Overrides --usd-per-tx.", + ARG arg_min_gas_price: (Option) = None, or |c: &Config| c.mining.as_ref()?.min_gas_price.clone(), + "--min-gas-price=[STRING]", + "Minimum amount of Wei per GAS to be paid for a transaction to be accepted for mining. Overrides --usd-per-tx.", - ARG arg_gas_price_percentile: (usize) = 50usize, or |c: &Config| c.mining.as_ref()?.gas_price_percentile, - "--gas-price-percentile=[PCT]", - "Set PCT percentile gas price value from last 100 blocks as default gas price when sending transactions.", + ARG arg_gas_price_percentile: (usize) = 50usize, or |c: &Config| c.mining.as_ref()?.gas_price_percentile, + "--gas-price-percentile=[PCT]", + "Set PCT percentile gas price value from last 100 blocks as default gas price when sending transactions.", - ARG arg_author: (Option) = None, or |c: &Config| c.mining.as_ref()?.author.clone(), - "--author=[ADDRESS]", - "Specify the block author (aka \"coinbase\") address for sending block rewards from sealed blocks. NOTE: MINING WILL NOT WORK WITHOUT THIS OPTION.", // Sealing/Mining Option + ARG arg_author: (Option) = None, or |c: &Config| c.mining.as_ref()?.author.clone(), + "--author=[ADDRESS]", + "Specify the block author (aka \"coinbase\") address for sending block rewards from sealed blocks. NOTE: MINING WILL NOT WORK WITHOUT THIS OPTION.", // Sealing/Mining Option - ARG arg_engine_signer: (Option) = None, or |c: &Config| c.mining.as_ref()?.engine_signer.clone(), - "--engine-signer=[ADDRESS]", - "Specify the address which should be used to sign consensus messages and issue blocks. Relevant only to non-PoW chains.", + ARG arg_engine_signer: (Option) = None, or |c: &Config| c.mining.as_ref()?.engine_signer.clone(), + "--engine-signer=[ADDRESS]", + "Specify the address which should be used to sign consensus messages and issue blocks. Relevant only to non-PoW chains.", - ARG arg_tx_gas_limit: (Option) = None, or |c: &Config| c.mining.as_ref()?.tx_gas_limit.clone(), - "--tx-gas-limit=[GAS]", - "Apply a limit of GAS as the maximum amount of gas a single transaction may have for it to be mined.", + ARG arg_tx_gas_limit: (Option) = None, or |c: &Config| c.mining.as_ref()?.tx_gas_limit.clone(), + "--tx-gas-limit=[GAS]", + "Apply a limit of GAS as the maximum amount of gas a single transaction may have for it to be mined.", - ARG arg_tx_time_limit: (Option) = None, or |c: &Config| c.mining.as_ref()?.tx_time_limit.clone(), - "--tx-time-limit=[MS]", - "Maximal time for processing single transaction. If enabled senders of transactions offending the limit will get other transactions penalized.", + ARG arg_tx_time_limit: (Option) = None, or |c: &Config| c.mining.as_ref()?.tx_time_limit.clone(), + "--tx-time-limit=[MS]", + "Maximal time for processing single transaction. If enabled senders of transactions offending the limit will get other transactions penalized.", - ARG arg_extra_data: (Option) = None, or |c: &Config| c.mining.as_ref()?.extra_data.clone(), - "--extra-data=[STRING]", - "Specify a custom extra-data for authored blocks, no more than 32 characters.", - - ARG arg_notify_work: (Option) = None, or |c: &Config| c.mining.as_ref()?.notify_work.as_ref().map(|vec| vec.join(",")), - "--notify-work=[URLS]", - "URLs to which work package notifications are pushed. URLS should be a comma-delimited list of HTTP URLs.", - - ARG arg_stratum_secret: (Option) = None, or |c: &Config| c.stratum.as_ref()?.secret.clone(), - "--stratum-secret=[STRING]", - "Secret for authorizing Stratum server for peers.", - - ARG arg_max_round_blocks_to_import: (usize) = 12usize, or |c: &Config| c.mining.as_ref()?.max_round_blocks_to_import.clone(), - "--max-round-blocks-to-import=[S]", - "Maximal number of blocks to import for each import round.", - - ["Internal Options"] - FLAG flag_can_restart: (bool) = false, or |_| None, - "--can-restart", - "Executable will auto-restart if exiting with 69", - - ["Miscellaneous Options"] - FLAG flag_no_color: (bool) = false, or |c: &Config| c.misc.as_ref()?.color.map(|c| !c).clone(), - "--no-color", - "Don't use terminal color codes in output.", - - FLAG flag_version: (bool) = false, or |_| None, - "-v, --version", - "Show information about version.", - - FLAG flag_no_config: (bool) = false, or |_| None, - "--no-config", - "Don't load a configuration file.", - - ARG arg_logging: (Option) = None, or |c: &Config| c.misc.as_ref()?.logging.clone(), - "-l, --logging=[LOGGING]", - "Specify the general logging level (error, warn, info, debug or trace). It can also be set for a specific module, example: '-l sync=debug,rpc=trace'", - - ARG arg_log_file: (Option) = None, or |c: &Config| c.misc.as_ref()?.log_file.clone(), - "--log-file=[FILENAME]", - "Specify a filename into which logging should be appended.", - - ["Footprint Options"] - FLAG flag_scale_verifiers: (bool) = false, or |c: &Config| c.footprint.as_ref()?.scale_verifiers.clone(), - "--scale-verifiers", - "Automatically scale amount of verifier threads based on workload. Not guaranteed to be faster.", - - ARG arg_tracing: (String) = "auto", or |c: &Config| c.footprint.as_ref()?.tracing.clone(), - "--tracing=[BOOL]", - "Indicates if full transaction tracing should be enabled. Works only if client had been fully synced with tracing enabled. BOOL may be one of auto, on, off. auto uses last used value of this option (off if it does not exist).", // footprint option - - ARG arg_pruning: (String) = "auto", or |c: &Config| c.footprint.as_ref()?.pruning.clone(), - "--pruning=[METHOD]", - "Configure pruning of the state/storage trie. METHOD may be one of auto, archive, fast: archive - keep all state trie data. No pruning. fast - maintain journal overlay. Fast but 50MB used. auto - use the method most recently synced or default to fast if none synced.", - - ARG arg_pruning_history: (u64) = 64u64, or |c: &Config| c.footprint.as_ref()?.pruning_history.clone(), - "--pruning-history=[NUM]", - "Set a minimum number of recent states to keep in memory when pruning is active.", - - ARG arg_pruning_memory: (usize) = 32usize, or |c: &Config| c.footprint.as_ref()?.pruning_memory.clone(), - "--pruning-memory=[MB]", - "The ideal amount of memory in megabytes to use to store recent states. As many states as possible will be kept within this limit, and at least --pruning-history states will always be kept.", - - ARG arg_cache_size_db: (u32) = 128u32, or |c: &Config| c.footprint.as_ref()?.cache_size_db.clone(), - "--cache-size-db=[MB]", - "Override database cache size.", - - ARG arg_cache_size_blocks: (u32) = 8u32, or |c: &Config| c.footprint.as_ref()?.cache_size_blocks.clone(), - "--cache-size-blocks=[MB]", - "Specify the preferred size of the blockchain cache in megabytes.", - - ARG arg_cache_size_queue: (u32) = 40u32, or |c: &Config| c.footprint.as_ref()?.cache_size_queue.clone(), - "--cache-size-queue=[MB]", - "Specify the maximum size of memory to use for block queue.", + ARG arg_extra_data: (Option) = None, or |c: &Config| c.mining.as_ref()?.extra_data.clone(), + "--extra-data=[STRING]", + "Specify a custom extra-data for authored blocks, no more than 32 characters.", + + ARG arg_notify_work: (Option) = None, or |c: &Config| c.mining.as_ref()?.notify_work.as_ref().map(|vec| vec.join(",")), + "--notify-work=[URLS]", + "URLs to which work package notifications are pushed. URLS should be a comma-delimited list of HTTP URLs.", + + ARG arg_stratum_secret: (Option) = None, or |c: &Config| c.stratum.as_ref()?.secret.clone(), + "--stratum-secret=[STRING]", + "Secret for authorizing Stratum server for peers.", + + ARG arg_max_round_blocks_to_import: (usize) = 12usize, or |c: &Config| c.mining.as_ref()?.max_round_blocks_to_import.clone(), + "--max-round-blocks-to-import=[S]", + "Maximal number of blocks to import for each import round.", + + ["Internal Options"] + FLAG flag_can_restart: (bool) = false, or |_| None, + "--can-restart", + "Executable will auto-restart if exiting with 69", + + ["Miscellaneous Options"] + FLAG flag_no_color: (bool) = false, or |c: &Config| c.misc.as_ref()?.color.map(|c| !c).clone(), + "--no-color", + "Don't use terminal color codes in output.", + + FLAG flag_version: (bool) = false, or |_| None, + "-v, --version", + "Show information about version.", + + FLAG flag_no_config: (bool) = false, or |_| None, + "--no-config", + "Don't load a configuration file.", + + ARG arg_logging: (Option) = None, or |c: &Config| c.misc.as_ref()?.logging.clone(), + "-l, --logging=[LOGGING]", + "Specify the general logging level (error, warn, info, debug or trace). It can also be set for a specific module, example: '-l sync=debug,rpc=trace'", + + ARG arg_log_file: (Option) = None, or |c: &Config| c.misc.as_ref()?.log_file.clone(), + "--log-file=[FILENAME]", + "Specify a filename into which logging should be appended.", + + ["Footprint Options"] + FLAG flag_scale_verifiers: (bool) = false, or |c: &Config| c.footprint.as_ref()?.scale_verifiers.clone(), + "--scale-verifiers", + "Automatically scale amount of verifier threads based on workload. Not guaranteed to be faster.", + + ARG arg_tracing: (String) = "auto", or |c: &Config| c.footprint.as_ref()?.tracing.clone(), + "--tracing=[BOOL]", + "Indicates if full transaction tracing should be enabled. Works only if client had been fully synced with tracing enabled. BOOL may be one of auto, on, off. auto uses last used value of this option (off if it does not exist).", // footprint option + + ARG arg_pruning: (String) = "auto", or |c: &Config| c.footprint.as_ref()?.pruning.clone(), + "--pruning=[METHOD]", + "Configure pruning of the state/storage trie. METHOD may be one of auto, archive, fast: archive - keep all state trie data. No pruning. fast - maintain journal overlay. Fast but 50MB used. auto - use the method most recently synced or default to fast if none synced.", + + ARG arg_pruning_history: (u64) = 64u64, or |c: &Config| c.footprint.as_ref()?.pruning_history.clone(), + "--pruning-history=[NUM]", + "Set a minimum number of recent states to keep in memory when pruning is active.", + + ARG arg_pruning_memory: (usize) = 32usize, or |c: &Config| c.footprint.as_ref()?.pruning_memory.clone(), + "--pruning-memory=[MB]", + "The ideal amount of memory in megabytes to use to store recent states. As many states as possible will be kept within this limit, and at least --pruning-history states will always be kept.", + + ARG arg_cache_size_db: (u32) = 128u32, or |c: &Config| c.footprint.as_ref()?.cache_size_db.clone(), + "--cache-size-db=[MB]", + "Override database cache size.", + + ARG arg_cache_size_blocks: (u32) = 8u32, or |c: &Config| c.footprint.as_ref()?.cache_size_blocks.clone(), + "--cache-size-blocks=[MB]", + "Specify the preferred size of the blockchain cache in megabytes.", + + ARG arg_cache_size_queue: (u32) = 40u32, or |c: &Config| c.footprint.as_ref()?.cache_size_queue.clone(), + "--cache-size-queue=[MB]", + "Specify the maximum size of memory to use for block queue.", - ARG arg_cache_size_state: (u32) = 25u32, or |c: &Config| c.footprint.as_ref()?.cache_size_state.clone(), - "--cache-size-state=[MB]", - "Specify the maximum size of memory to use for the state cache.", + ARG arg_cache_size_state: (u32) = 25u32, or |c: &Config| c.footprint.as_ref()?.cache_size_state.clone(), + "--cache-size-state=[MB]", + "Specify the maximum size of memory to use for the state cache.", - ARG arg_db_compaction: (String) = "auto", or |c: &Config| c.footprint.as_ref()?.db_compaction.clone(), - "--db-compaction=[TYPE]", - "Database compaction type. TYPE may be one of: ssd - suitable for SSDs and fast HDDs; hdd - suitable for slow HDDs; auto - determine automatically.", + ARG arg_db_compaction: (String) = "auto", or |c: &Config| c.footprint.as_ref()?.db_compaction.clone(), + "--db-compaction=[TYPE]", + "Database compaction type. TYPE may be one of: ssd - suitable for SSDs and fast HDDs; hdd - suitable for slow HDDs; auto - determine automatically.", - ARG arg_fat_db: (String) = "auto", or |c: &Config| c.footprint.as_ref()?.fat_db.clone(), - "--fat-db=[BOOL]", - "Build appropriate information to allow enumeration of all accounts and storage keys. Doubles the size of the state database. BOOL may be one of on, off or auto.", + ARG arg_fat_db: (String) = "auto", or |c: &Config| c.footprint.as_ref()?.fat_db.clone(), + "--fat-db=[BOOL]", + "Build appropriate information to allow enumeration of all accounts and storage keys. Doubles the size of the state database. BOOL may be one of on, off or auto.", - ARG arg_cache_size: (Option) = None, or |c: &Config| c.footprint.as_ref()?.cache_size.clone(), - "--cache-size=[MB]", - "Set total amount of discretionary memory to use for the entire system, overrides other cache and queue options.", + ARG arg_cache_size: (Option) = None, or |c: &Config| c.footprint.as_ref()?.cache_size.clone(), + "--cache-size=[MB]", + "Set total amount of discretionary memory to use for the entire system, overrides other cache and queue options.", - ARG arg_num_verifiers: (Option) = None, or |c: &Config| c.footprint.as_ref()?.num_verifiers.clone(), - "--num-verifiers=[INT]", - "Amount of verifier threads to use or to begin with, if verifier auto-scaling is enabled.", + ARG arg_num_verifiers: (Option) = None, or |c: &Config| c.footprint.as_ref()?.num_verifiers.clone(), + "--num-verifiers=[INT]", + "Amount of verifier threads to use or to begin with, if verifier auto-scaling is enabled.", - ["Import/export Options"] - FLAG flag_no_seal_check: (bool) = false, or |_| None, - "--no-seal-check", - "Skip block seal check.", + ["Import/export Options"] + FLAG flag_no_seal_check: (bool) = false, or |_| None, + "--no-seal-check", + "Skip block seal check.", - ["Snapshot Options"] - FLAG flag_no_periodic_snapshot: (bool) = false, or |c: &Config| c.snapshots.as_ref()?.disable_periodic.clone(), - "--no-periodic-snapshot", - "Disable automated snapshots which usually occur once every 5000 blocks.", + ["Snapshot Options"] + FLAG flag_no_periodic_snapshot: (bool) = false, or |c: &Config| c.snapshots.as_ref()?.disable_periodic.clone(), + "--no-periodic-snapshot", + "Disable automated snapshots which usually occur once every 5000 blocks.", - ARG arg_snapshot_threads: (Option) = None, or |c: &Config| c.snapshots.as_ref()?.processing_threads, - "--snapshot-threads=[NUM]", - "Enables multiple threads for snapshots creation.", + ARG arg_snapshot_threads: (Option) = None, or |c: &Config| c.snapshots.as_ref()?.processing_threads, + "--snapshot-threads=[NUM]", + "Enables multiple threads for snapshots creation.", - ["Whisper Options"] - FLAG flag_whisper: (bool) = false, or |c: &Config| c.whisper.as_ref()?.enabled, - "--whisper", - "Enable the Whisper network.", + ["Whisper Options"] + FLAG flag_whisper: (bool) = false, or |c: &Config| c.whisper.as_ref()?.enabled, + "--whisper", + "Enable the Whisper network.", - ARG arg_whisper_pool_size: (usize) = 10usize, or |c: &Config| c.whisper.as_ref()?.pool_size.clone(), - "--whisper-pool-size=[MB]", - "Target size of the whisper message pool in megabytes.", + ARG arg_whisper_pool_size: (usize) = 10usize, or |c: &Config| c.whisper.as_ref()?.pool_size.clone(), + "--whisper-pool-size=[MB]", + "Target size of the whisper message pool in megabytes.", - ["Legacy Options"] - // Options that are hidden from config, but are still unique for its functionality. + ["Legacy Options"] + // Options that are hidden from config, but are still unique for its functionality. - FLAG flag_geth: (bool) = false, or |_| None, - "--geth", - "Run in Geth-compatibility mode. Sets the IPC path to be the same as Geth's. Overrides the --ipc-path and --ipcpath options. Alters RPCs to reflect Geth bugs. Includes the personal_ RPC by default.", + FLAG flag_geth: (bool) = false, or |_| None, + "--geth", + "Run in Geth-compatibility mode. Sets the IPC path to be the same as Geth's. Overrides the --ipc-path and --ipcpath options. Alters RPCs to reflect Geth bugs. Includes the personal_ RPC by default.", - FLAG flag_import_geth_keys: (bool) = false, or |_| None, - "--import-geth-keys", - "Attempt to import keys from Geth client.", + FLAG flag_import_geth_keys: (bool) = false, or |_| None, + "--import-geth-keys", + "Attempt to import keys from Geth client.", - // Options that either do nothing, or are replaced by other options. - // FLAG Removed in 1.6 or before. + // Options that either do nothing, or are replaced by other options. + // FLAG Removed in 1.6 or before. - FLAG flag_warp: (bool) = false, or |_| None, - "--warp", - "Does nothing; warp sync is enabled by default. Use --no-warp to disable.", + FLAG flag_warp: (bool) = false, or |_| None, + "--warp", + "Does nothing; warp sync is enabled by default. Use --no-warp to disable.", - FLAG flag_jsonrpc: (bool) = false, or |_| None, - "-j, --jsonrpc", - "Does nothing; HTTP JSON-RPC is on by default now.", + FLAG flag_jsonrpc: (bool) = false, or |_| None, + "-j, --jsonrpc", + "Does nothing; HTTP JSON-RPC is on by default now.", - FLAG flag_rpc: (bool) = false, or |_| None, - "--rpc", - "Does nothing; HTTP JSON-RPC is on by default now.", + FLAG flag_rpc: (bool) = false, or |_| None, + "--rpc", + "Does nothing; HTTP JSON-RPC is on by default now.", - FLAG flag_jsonrpc_off: (bool) = false, or |_| None, - "--jsonrpc-off", - "Equivalent to --no-jsonrpc.", + FLAG flag_jsonrpc_off: (bool) = false, or |_| None, + "--jsonrpc-off", + "Equivalent to --no-jsonrpc.", - FLAG flag_webapp: (bool) = false, or |_| None, - "-w, --webapp", - "Does nothing; dapps server has been removed.", + FLAG flag_webapp: (bool) = false, or |_| None, + "-w, --webapp", + "Does nothing; dapps server has been removed.", - FLAG flag_dapps_off: (bool) = false, or |_| None, - "--dapps-off", - "Equivalent to --no-dapps.", + FLAG flag_dapps_off: (bool) = false, or |_| None, + "--dapps-off", + "Equivalent to --no-dapps.", - FLAG flag_ipcdisable: (bool) = false, or |_| None, - "--ipcdisable", - "Equivalent to --no-ipc.", + FLAG flag_ipcdisable: (bool) = false, or |_| None, + "--ipcdisable", + "Equivalent to --no-ipc.", - FLAG flag_ipc_off: (bool) = false, or |_| None, - "--ipc-off", - "Equivalent to --no-ipc.", + FLAG flag_ipc_off: (bool) = false, or |_| None, + "--ipc-off", + "Equivalent to --no-ipc.", - FLAG flag_testnet: (bool) = false, or |_| None, - "--testnet", - "Testnet mode. Equivalent to --chain testnet. Overrides the --keys-path option.", + FLAG flag_testnet: (bool) = false, or |_| None, + "--testnet", + "Testnet mode. Equivalent to --chain testnet. Overrides the --keys-path option.", - FLAG flag_nodiscover: (bool) = false, or |_| None, - "--nodiscover", - "Equivalent to --no-discovery.", + FLAG flag_nodiscover: (bool) = false, or |_| None, + "--nodiscover", + "Equivalent to --no-discovery.", - // FLAG Removed in 1.7. + // FLAG Removed in 1.7. - FLAG flag_dapps_apis_all: (bool) = false, or |_| None, - "--dapps-apis-all", - "Dapps server is merged with HTTP JSON-RPC server. Use --jsonrpc-apis.", + FLAG flag_dapps_apis_all: (bool) = false, or |_| None, + "--dapps-apis-all", + "Dapps server is merged with HTTP JSON-RPC server. Use --jsonrpc-apis.", - // FLAG Removed in 1.11. + // FLAG Removed in 1.11. - FLAG flag_public_node: (bool) = false, or |_| None, - "--public-node", - "Does nothing; Public node is removed from Parity.", + FLAG flag_public_node: (bool) = false, or |_| None, + "--public-node", + "Does nothing; Public node is removed from Parity.", - FLAG flag_force_ui: (bool) = false, or |_| None, - "--force-ui", - "Does nothing; UI is now a separate project.", + FLAG flag_force_ui: (bool) = false, or |_| None, + "--force-ui", + "Does nothing; UI is now a separate project.", - FLAG flag_no_ui: (bool) = false, or |_| None, - "--no-ui", - "Does nothing; UI is now a separate project.", + FLAG flag_no_ui: (bool) = false, or |_| None, + "--no-ui", + "Does nothing; UI is now a separate project.", - FLAG flag_ui_no_validation: (bool) = false, or |_| None, - "--ui-no-validation", - "Does nothing; UI is now a separate project.", + FLAG flag_ui_no_validation: (bool) = false, or |_| None, + "--ui-no-validation", + "Does nothing; UI is now a separate project.", - // FLAG Removed in 2.0. + // FLAG Removed in 2.0. - FLAG flag_fast_and_loose: (bool) = false, or |_| None, - "--fast-and-loose", - "Does nothing; DB WAL is always activated.", + FLAG flag_fast_and_loose: (bool) = false, or |_| None, + "--fast-and-loose", + "Does nothing; DB WAL is always activated.", - FLAG flag_no_dapps: (bool) = false, or |c: &Config| c.dapps.as_ref()?._legacy_disable.clone(), - "--no-dapps", - "Disable the Dapps server (e.g. status page).", + FLAG flag_no_dapps: (bool) = false, or |c: &Config| c.dapps.as_ref()?._legacy_disable.clone(), + "--no-dapps", + "Disable the Dapps server (e.g. status page).", - // ARG Removed in 1.6 or before. + // ARG Removed in 1.6 or before. - ARG arg_etherbase: (Option) = None, or |_| None, - "--etherbase=[ADDRESS]", - "Equivalent to --author ADDRESS.", + ARG arg_etherbase: (Option) = None, or |_| None, + "--etherbase=[ADDRESS]", + "Equivalent to --author ADDRESS.", - ARG arg_extradata: (Option) = None, or |_| None, - "--extradata=[STRING]", - "Equivalent to --extra-data STRING.", + ARG arg_extradata: (Option) = None, or |_| None, + "--extradata=[STRING]", + "Equivalent to --extra-data STRING.", - ARG arg_datadir: (Option) = None, or |_| None, - "--datadir=[PATH]", - "Equivalent to --base-path PATH.", + ARG arg_datadir: (Option) = None, or |_| None, + "--datadir=[PATH]", + "Equivalent to --base-path PATH.", - ARG arg_networkid: (Option) = None, or |_| None, - "--networkid=[INDEX]", - "Equivalent to --network-id INDEX.", + ARG arg_networkid: (Option) = None, or |_| None, + "--networkid=[INDEX]", + "Equivalent to --network-id INDEX.", - ARG arg_peers: (Option) = None, or |_| None, - "--peers=[NUM]", - "Equivalent to --min-peers NUM.", + ARG arg_peers: (Option) = None, or |_| None, + "--peers=[NUM]", + "Equivalent to --min-peers NUM.", - ARG arg_nodekey: (Option) = None, or |_| None, - "--nodekey=[KEY]", - "Equivalent to --node-key KEY.", + ARG arg_nodekey: (Option) = None, or |_| None, + "--nodekey=[KEY]", + "Equivalent to --node-key KEY.", - ARG arg_rpcaddr: (Option) = None, or |_| None, - "--rpcaddr=[IP]", - "Equivalent to --jsonrpc-interface IP.", + ARG arg_rpcaddr: (Option) = None, or |_| None, + "--rpcaddr=[IP]", + "Equivalent to --jsonrpc-interface IP.", - ARG arg_rpcport: (Option) = None, or |_| None, - "--rpcport=[PORT]", - "Equivalent to --jsonrpc-port PORT.", + ARG arg_rpcport: (Option) = None, or |_| None, + "--rpcport=[PORT]", + "Equivalent to --jsonrpc-port PORT.", - ARG arg_rpcapi: (Option) = None, or |_| None, - "--rpcapi=[APIS]", - "Equivalent to --jsonrpc-apis APIS.", + ARG arg_rpcapi: (Option) = None, or |_| None, + "--rpcapi=[APIS]", + "Equivalent to --jsonrpc-apis APIS.", - ARG arg_rpccorsdomain: (Option) = None, or |_| None, - "--rpccorsdomain=[URL]", - "Equivalent to --jsonrpc-cors URL.", + ARG arg_rpccorsdomain: (Option) = None, or |_| None, + "--rpccorsdomain=[URL]", + "Equivalent to --jsonrpc-cors URL.", - ARG arg_ipcapi: (Option) = None, or |_| None, - "--ipcapi=[APIS]", - "Equivalent to --ipc-apis APIS.", + ARG arg_ipcapi: (Option) = None, or |_| None, + "--ipcapi=[APIS]", + "Equivalent to --ipc-apis APIS.", - ARG arg_ipcpath: (Option) = None, or |_| None, - "--ipcpath=[PATH]", - "Equivalent to --ipc-path PATH.", + ARG arg_ipcpath: (Option) = None, or |_| None, + "--ipcpath=[PATH]", + "Equivalent to --ipc-path PATH.", - ARG arg_gasprice: (Option) = None, or |_| None, - "--gasprice=[WEI]", - "Equivalent to --min-gas-price WEI.", + ARG arg_gasprice: (Option) = None, or |_| None, + "--gasprice=[WEI]", + "Equivalent to --min-gas-price WEI.", - ARG arg_cache: (Option) = None, or |_| None, - "--cache=[MB]", - "Equivalent to --cache-size MB.", + ARG arg_cache: (Option) = None, or |_| None, + "--cache=[MB]", + "Equivalent to --cache-size MB.", - // ARG Removed in 1.7. + // ARG Removed in 1.7. - ARG arg_dapps_port: (Option) = None, or |c: &Config| c.dapps.as_ref()?._legacy_port.clone(), - "--dapps-port=[PORT]", - "Does nothing; dapps server has been removed.", + ARG arg_dapps_port: (Option) = None, or |c: &Config| c.dapps.as_ref()?._legacy_port.clone(), + "--dapps-port=[PORT]", + "Does nothing; dapps server has been removed.", - ARG arg_dapps_interface: (Option) = None, or |c: &Config| c.dapps.as_ref()?._legacy_interface.clone(), - "--dapps-interface=[IP]", - "Does nothing; dapps server has been removed.", + ARG arg_dapps_interface: (Option) = None, or |c: &Config| c.dapps.as_ref()?._legacy_interface.clone(), + "--dapps-interface=[IP]", + "Does nothing; dapps server has been removed.", - ARG arg_dapps_hosts: (Option) = None, or |c: &Config| c.dapps.as_ref()?._legacy_hosts.as_ref().map(|vec| vec.join(",")), - "--dapps-hosts=[HOSTS]", - "Does nothing; dapps server has been removed.", + ARG arg_dapps_hosts: (Option) = None, or |c: &Config| c.dapps.as_ref()?._legacy_hosts.as_ref().map(|vec| vec.join(",")), + "--dapps-hosts=[HOSTS]", + "Does nothing; dapps server has been removed.", - ARG arg_dapps_cors: (Option) = None, or |c: &Config| c.dapps.as_ref()?._legacy_cors.clone(), - "--dapps-cors=[URL]", - "Does nothing; dapps server has been removed.", + ARG arg_dapps_cors: (Option) = None, or |c: &Config| c.dapps.as_ref()?._legacy_cors.clone(), + "--dapps-cors=[URL]", + "Does nothing; dapps server has been removed.", - ARG arg_dapps_user: (Option) = None, or |c: &Config| c.dapps.as_ref()?._legacy_user.clone(), - "--dapps-user=[USERNAME]", - "Dapps server authentication has been removed.", + ARG arg_dapps_user: (Option) = None, or |c: &Config| c.dapps.as_ref()?._legacy_user.clone(), + "--dapps-user=[USERNAME]", + "Dapps server authentication has been removed.", - ARG arg_dapps_pass: (Option) = None, or |c: &Config| c.dapps.as_ref()?._legacy_pass.clone(), - "--dapps-pass=[PASSWORD]", - "Dapps server authentication has been removed.", + ARG arg_dapps_pass: (Option) = None, or |c: &Config| c.dapps.as_ref()?._legacy_pass.clone(), + "--dapps-pass=[PASSWORD]", + "Dapps server authentication has been removed.", - // ARG removed in 1.11. + // ARG removed in 1.11. - ARG arg_ui_interface: (Option) = None, or |_| None, - "--ui-interface=[IP]", - "Does nothing; UI is now a separate project.", + ARG arg_ui_interface: (Option) = None, or |_| None, + "--ui-interface=[IP]", + "Does nothing; UI is now a separate project.", - ARG arg_ui_hosts: (Option) = None, or |_| None, - "--ui-hosts=[HOSTS]", - "Does nothing; UI is now a separate project.", + ARG arg_ui_hosts: (Option) = None, or |_| None, + "--ui-hosts=[HOSTS]", + "Does nothing; UI is now a separate project.", - ARG arg_ui_port: (Option) = None, or |_| None, - "--ui-port=[PORT]", - "Does nothing; UI is now a separate project.", + ARG arg_ui_port: (Option) = None, or |_| None, + "--ui-port=[PORT]", + "Does nothing; UI is now a separate project.", - ARG arg_tx_queue_ban_count: (Option) = None, or |c: &Config| c.mining.as_ref()?.tx_queue_ban_count.clone(), - "--tx-queue-ban-count=[C]", - "Not supported.", + ARG arg_tx_queue_ban_count: (Option) = None, or |c: &Config| c.mining.as_ref()?.tx_queue_ban_count.clone(), + "--tx-queue-ban-count=[C]", + "Not supported.", - ARG arg_tx_queue_ban_time: (Option) = None, or |c: &Config| c.mining.as_ref()?.tx_queue_ban_time.clone(), - "--tx-queue-ban-time=[SEC]", - "Not supported.", + ARG arg_tx_queue_ban_time: (Option) = None, or |c: &Config| c.mining.as_ref()?.tx_queue_ban_time.clone(), + "--tx-queue-ban-time=[SEC]", + "Not supported.", - // ARG removed in 2.0. + // ARG removed in 2.0. - ARG arg_dapps_path: (Option) = None, or |c: &Config| c.dapps.as_ref()?._legacy_path.clone(), - "--dapps-path=[PATH]", - "Specify directory where dapps should be installed.", + ARG arg_dapps_path: (Option) = None, or |c: &Config| c.dapps.as_ref()?._legacy_path.clone(), + "--dapps-path=[PATH]", + "Specify directory where dapps should be installed.", - ARG arg_ntp_servers: (Option) = None, or |_| None, - "--ntp-servers=[HOSTS]", - "Does nothing; checking if clock is sync with NTP servers is now done on the UI.", - } + ARG arg_ntp_servers: (Option) = None, or |_| None, + "--ntp-servers=[HOSTS]", + "Does nothing; checking if clock is sync with NTP servers is now done on the UI.", + } } #[derive(Default, Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] struct Config { - parity: Option, - account: Option, - ui: Option, - network: Option, - rpc: Option, - websockets: Option, - ipc: Option, - dapps: Option, - secretstore: Option, - private_tx: Option, - ipfs: Option, - mining: Option, - footprint: Option, - snapshots: Option, - misc: Option, - stratum: Option, - whisper: Option, - light: Option, + parity: Option, + account: Option, + ui: Option, + network: Option, + rpc: Option, + websockets: Option, + ipc: Option, + dapps: Option, + secretstore: Option, + private_tx: Option, + ipfs: Option, + mining: Option, + footprint: Option, + snapshots: Option, + misc: Option, + stratum: Option, + whisper: Option, + light: Option, } #[derive(Default, Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] struct Operating { - mode: Option, - mode_timeout: Option, - mode_alarm: Option, - auto_update: Option, - auto_update_delay: Option, - auto_update_check_frequency: Option, - release_track: Option, - no_download: Option, - no_consensus: Option, - chain: Option, - base_path: Option, - db_path: Option, - keys_path: Option, - identity: Option, - light: Option, - no_persistent_txqueue: Option, - no_hardcoded_sync: Option, + mode: Option, + mode_timeout: Option, + mode_alarm: Option, + auto_update: Option, + auto_update_delay: Option, + auto_update_check_frequency: Option, + release_track: Option, + no_download: Option, + no_consensus: Option, + chain: Option, + base_path: Option, + db_path: Option, + keys_path: Option, + identity: Option, + light: Option, + no_persistent_txqueue: Option, + no_hardcoded_sync: Option, - #[serde(rename = "public_node")] - _legacy_public_node: Option, + #[serde(rename = "public_node")] + _legacy_public_node: Option, } #[derive(Default, Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] struct Account { - unlock: Option>, - password: Option>, - keys_iterations: Option, - refresh_time: Option, - disable_hardware: Option, - fast_unlock: Option, + unlock: Option>, + password: Option>, + keys_iterations: Option, + refresh_time: Option, + disable_hardware: Option, + fast_unlock: Option, } #[derive(Default, Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] struct PrivateTransactions { - enabled: Option, - signer: Option, - validators: Option>, - account: Option, - passwords: Option, - sstore_url: Option, - sstore_threshold: Option, + enabled: Option, + signer: Option, + validators: Option>, + account: Option, + passwords: Option, + sstore_url: Option, + sstore_threshold: Option, } #[derive(Default, Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] struct Ui { - path: Option, + path: Option, - #[serde(rename = "force")] - _legacy_force: Option, - #[serde(rename = "disable")] - _legacy_disable: Option, - #[serde(rename = "port")] - _legacy_port: Option, - #[serde(rename = "interface")] - _legacy_interface: Option, - #[serde(rename = "hosts")] - _legacy_hosts: Option>, + #[serde(rename = "force")] + _legacy_force: Option, + #[serde(rename = "disable")] + _legacy_disable: Option, + #[serde(rename = "port")] + _legacy_port: Option, + #[serde(rename = "interface")] + _legacy_interface: Option, + #[serde(rename = "hosts")] + _legacy_hosts: Option>, } #[derive(Default, Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] struct Network { - warp: Option, - warp_barrier: Option, - port: Option, - interface: Option, - min_peers: Option, - max_peers: Option, - snapshot_peers: Option, - max_pending_peers: Option, - nat: Option, - allow_ips: Option, - id: Option, - bootnodes: Option>, - discovery: Option, - node_key: Option, - reserved_peers: Option, - reserved_only: Option, - no_serve_light: Option, + warp: Option, + warp_barrier: Option, + port: Option, + interface: Option, + min_peers: Option, + max_peers: Option, + snapshot_peers: Option, + max_pending_peers: Option, + nat: Option, + allow_ips: Option, + id: Option, + bootnodes: Option>, + discovery: Option, + node_key: Option, + reserved_peers: Option, + reserved_only: Option, + no_serve_light: Option, } #[derive(Default, Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] struct Rpc { - disable: Option, - port: Option, - interface: Option, - cors: Option>, - apis: Option>, - hosts: Option>, - server_threads: Option, - processing_threads: Option, - max_payload: Option, - keep_alive: Option, - experimental_rpcs: Option, - poll_lifetime: Option, - allow_missing_blocks: Option, + disable: Option, + port: Option, + interface: Option, + cors: Option>, + apis: Option>, + hosts: Option>, + server_threads: Option, + processing_threads: Option, + max_payload: Option, + keep_alive: Option, + experimental_rpcs: Option, + poll_lifetime: Option, + allow_missing_blocks: Option, } #[derive(Default, Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] struct Ws { - disable: Option, - port: Option, - interface: Option, - apis: Option>, - origins: Option>, - hosts: Option>, - max_connections: Option, + disable: Option, + port: Option, + interface: Option, + apis: Option>, + origins: Option>, + hosts: Option>, + max_connections: Option, } #[derive(Default, Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] struct Ipc { - disable: Option, - path: Option, - apis: Option>, + disable: Option, + path: Option, + apis: Option>, } #[derive(Default, Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] struct Dapps { - #[serde(rename = "disable")] - _legacy_disable: Option, - #[serde(rename = "port")] - _legacy_port: Option, - #[serde(rename = "interface")] - _legacy_interface: Option, - #[serde(rename = "hosts")] - _legacy_hosts: Option>, - #[serde(rename = "cors")] - _legacy_cors: Option, - #[serde(rename = "path")] - _legacy_path: Option, - #[serde(rename = "user")] - _legacy_user: Option, - #[serde(rename = "pass")] - _legacy_pass: Option, + #[serde(rename = "disable")] + _legacy_disable: Option, + #[serde(rename = "port")] + _legacy_port: Option, + #[serde(rename = "interface")] + _legacy_interface: Option, + #[serde(rename = "hosts")] + _legacy_hosts: Option>, + #[serde(rename = "cors")] + _legacy_cors: Option, + #[serde(rename = "path")] + _legacy_path: Option, + #[serde(rename = "user")] + _legacy_user: Option, + #[serde(rename = "pass")] + _legacy_pass: Option, } #[derive(Default, Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] struct SecretStore { - disable: Option, - disable_http: Option, - disable_auto_migrate: Option, - acl_contract: Option, - service_contract: Option, - service_contract_srv_gen: Option, - service_contract_srv_retr: Option, - service_contract_doc_store: Option, - service_contract_doc_sretr: Option, - self_secret: Option, - admin_public: Option, - nodes: Option>, - server_set_contract: Option, - interface: Option, - port: Option, - http_interface: Option, - http_port: Option, - path: Option, + disable: Option, + disable_http: Option, + disable_auto_migrate: Option, + acl_contract: Option, + service_contract: Option, + service_contract_srv_gen: Option, + service_contract_srv_retr: Option, + service_contract_doc_store: Option, + service_contract_doc_sretr: Option, + self_secret: Option, + admin_public: Option, + nodes: Option>, + server_set_contract: Option, + interface: Option, + port: Option, + http_interface: Option, + http_port: Option, + path: Option, } #[derive(Default, Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] struct Ipfs { - enable: Option, - port: Option, - interface: Option, - cors: Option>, - hosts: Option>, + enable: Option, + port: Option, + interface: Option, + cors: Option>, + hosts: Option>, } #[derive(Default, Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] struct Mining { - author: Option, - engine_signer: Option, - force_sealing: Option, - reseal_on_uncle: Option, - reseal_on_txs: Option, - reseal_min_period: Option, - reseal_max_period: Option, - work_queue_size: Option, - tx_gas_limit: Option, - tx_time_limit: Option, - relay_set: Option, - min_gas_price: Option, - gas_price_percentile: Option, - usd_per_tx: Option, - usd_per_eth: Option, - price_update_period: Option, - gas_floor_target: Option, - gas_cap: Option, - extra_data: Option, - tx_queue_size: Option, - tx_queue_per_sender: Option, - tx_queue_mem_limit: Option, - tx_queue_locals: Option>, - tx_queue_strategy: Option, - tx_queue_ban_count: Option, - tx_queue_ban_time: Option, - tx_queue_no_unfamiliar_locals: Option, - tx_queue_no_early_reject: Option, - remove_solved: Option, - notify_work: Option>, - refuse_service_transactions: Option, - infinite_pending_block: Option, - max_round_blocks_to_import: Option, + author: Option, + engine_signer: Option, + force_sealing: Option, + reseal_on_uncle: Option, + reseal_on_txs: Option, + reseal_min_period: Option, + reseal_max_period: Option, + work_queue_size: Option, + tx_gas_limit: Option, + tx_time_limit: Option, + relay_set: Option, + min_gas_price: Option, + gas_price_percentile: Option, + usd_per_tx: Option, + usd_per_eth: Option, + price_update_period: Option, + gas_floor_target: Option, + gas_cap: Option, + extra_data: Option, + tx_queue_size: Option, + tx_queue_per_sender: Option, + tx_queue_mem_limit: Option, + tx_queue_locals: Option>, + tx_queue_strategy: Option, + tx_queue_ban_count: Option, + tx_queue_ban_time: Option, + tx_queue_no_unfamiliar_locals: Option, + tx_queue_no_early_reject: Option, + remove_solved: Option, + notify_work: Option>, + refuse_service_transactions: Option, + infinite_pending_block: Option, + max_round_blocks_to_import: Option, } #[derive(Default, Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] struct Stratum { - interface: Option, - port: Option, - secret: Option, + interface: Option, + port: Option, + secret: Option, } #[derive(Default, Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] struct Footprint { - tracing: Option, - pruning: Option, - pruning_history: Option, - pruning_memory: Option, - fast_and_loose: Option, - cache_size: Option, - cache_size_db: Option, - cache_size_blocks: Option, - cache_size_queue: Option, - cache_size_state: Option, - db_compaction: Option, - fat_db: Option, - scale_verifiers: Option, - num_verifiers: Option, + tracing: Option, + pruning: Option, + pruning_history: Option, + pruning_memory: Option, + fast_and_loose: Option, + cache_size: Option, + cache_size_db: Option, + cache_size_blocks: Option, + cache_size_queue: Option, + cache_size_state: Option, + db_compaction: Option, + fat_db: Option, + scale_verifiers: Option, + num_verifiers: Option, } #[derive(Default, Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] struct Snapshots { - disable_periodic: Option, - processing_threads: Option, + disable_periodic: Option, + processing_threads: Option, } #[derive(Default, Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] struct Misc { - logging: Option, - log_file: Option, - color: Option, - ports_shift: Option, - unsafe_expose: Option, + logging: Option, + log_file: Option, + color: Option, + ports_shift: Option, + unsafe_expose: Option, } #[derive(Default, Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] struct Whisper { - enabled: Option, - pool_size: Option, + enabled: Option, + pool_size: Option, } #[derive(Default, Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] struct Light { - on_demand_response_time_window: Option, - on_demand_request_backoff_start: Option, - on_demand_request_backoff_max: Option, - on_demand_request_backoff_rounds_max: Option, - on_demand_request_consecutive_failures: Option, + on_demand_response_time_window: Option, + on_demand_request_backoff_start: Option, + on_demand_request_backoff_max: Option, + on_demand_request_backoff_rounds_max: Option, + on_demand_request_consecutive_failures: Option, } #[cfg(test)] mod tests { - use super::{ - Args, ArgsError, - Config, Operating, Account, Ui, Network, Ws, Rpc, Ipc, Dapps, Ipfs, Mining, Footprint, - Snapshots, Misc, Whisper, SecretStore, Light, - }; - use toml; - use clap::{ErrorKind as ClapErrorKind}; - - #[test] - fn should_accept_any_argument_order() { - let args = Args::parse(&["parity", "--no-warp", "account", "list"]).unwrap(); - assert_eq!(args.flag_no_warp, true); - - let args = Args::parse(&["parity", "account", "list", "--no-warp"]).unwrap(); - assert_eq!(args.flag_no_warp, true); - - let args = Args::parse(&["parity", "--chain=dev", "account", "list"]).unwrap(); - assert_eq!(args.arg_chain, "dev"); - - let args = Args::parse(&["parity", "account", "list", "--chain=dev"]).unwrap(); - assert_eq!(args.arg_chain, "dev"); - } - - #[test] - fn should_not_crash_on_warp() { - let args = Args::parse(&["parity", "--warp"]); - assert!(args.is_ok()); - - let args = Args::parse(&["parity", "account", "list", "--warp"]); - assert!(args.is_ok()); - } - - #[test] - fn should_reject_invalid_values() { - let args = Args::parse(&["parity", "--cache=20"]); - assert!(args.is_ok()); - - let args = Args::parse(&["parity", "--cache=asd"]); - assert!(args.is_err()); - } - - #[test] - fn should_parse_args_and_flags() { - let args = Args::parse(&["parity", "--no-warp"]).unwrap(); - assert_eq!(args.flag_no_warp, true); - - let args = Args::parse(&["parity", "--pruning", "archive"]).unwrap(); - assert_eq!(args.arg_pruning, "archive"); - - let args = Args::parse(&["parity", "export", "state", "--no-storage"]).unwrap(); - assert_eq!(args.flag_export_state_no_storage, true); - - let args = Args::parse(&["parity", "export", "state", "--min-balance","123"]).unwrap(); - assert_eq!(args.arg_export_state_min_balance, Some("123".to_string())); - } - - #[test] - fn should_exit_gracefully_on_unknown_argument() { - let result = Args::parse(&["parity", "--please-exit-gracefully"]); - assert!( - match result { - Err(ArgsError::Clap(ref clap_error)) if clap_error.kind == ClapErrorKind::UnknownArgument => true, - _ => false - } - ); - } - - #[test] - fn should_use_subcommand_arg_default() { - let args = Args::parse(&["parity", "export", "state", "--at", "123"]).unwrap(); - assert_eq!(args.arg_export_state_at, "123"); - assert_eq!(args.arg_snapshot_at, "latest"); - - let args = Args::parse(&["parity", "snapshot", "--at", "123", "file.dump"]).unwrap(); - assert_eq!(args.arg_snapshot_at, "123"); - assert_eq!(args.arg_export_state_at, "latest"); - - let args = Args::parse(&["parity", "export", "state"]).unwrap(); - assert_eq!(args.arg_snapshot_at, "latest"); - assert_eq!(args.arg_export_state_at, "latest"); - - let args = Args::parse(&["parity", "snapshot", "file.dump"]).unwrap(); - assert_eq!(args.arg_snapshot_at, "latest"); - assert_eq!(args.arg_export_state_at, "latest"); - } - - #[test] - fn should_parse_multiple_values() { - let args = Args::parse(&["parity", "account", "import", "~/1", "~/2"]).unwrap(); - assert_eq!(args.arg_account_import_path, Some(vec!["~/1".to_owned(), "~/2".to_owned()])); - - let args = Args::parse(&["parity", "account", "import", "~/1,ext"]).unwrap(); - assert_eq!(args.arg_account_import_path, Some(vec!["~/1,ext".to_owned()])); - - let args = Args::parse(&["parity", "--secretstore-nodes", "abc@127.0.0.1:3333,cde@10.10.10.10:4444"]).unwrap(); - assert_eq!(args.arg_secretstore_nodes, "abc@127.0.0.1:3333,cde@10.10.10.10:4444"); - - let args = Args::parse(&["parity", "--password", "~/.safe/1", "--password", "~/.safe/2", "--ui-port", "8123"]).unwrap(); - assert_eq!(args.arg_password, vec!["~/.safe/1".to_owned(), "~/.safe/2".to_owned()]); - assert_eq!(args.arg_ui_port, Some(8123)); - - let args = Args::parse(&["parity", "--password", "~/.safe/1,~/.safe/2", "--ui-port", "8123"]).unwrap(); - assert_eq!(args.arg_password, vec!["~/.safe/1".to_owned(), "~/.safe/2".to_owned()]); - assert_eq!(args.arg_ui_port, Some(8123)); - } - - #[test] - fn should_parse_global_args_with_subcommand() { - let args = Args::parse(&["parity", "--chain", "dev", "account", "list"]).unwrap(); - assert_eq!(args.arg_chain, "dev".to_owned()); - } - - #[test] - fn should_parse_args_and_include_config() { - // given - let mut config = Config::default(); - let mut operating = Operating::default(); - operating.chain = Some("mordor".into()); - config.parity = Some(operating); - - // when - let args = Args::parse_with_config(&["parity"], config).unwrap(); - - // then - assert_eq!(args.arg_chain, "mordor".to_owned()); - } - - #[test] - fn should_not_use_config_if_cli_is_provided() { - // given - let mut config = Config::default(); - let mut operating = Operating::default(); - operating.chain = Some("mordor".into()); - config.parity = Some(operating); - - // when - let args = Args::parse_with_config(&["parity", "--chain", "xyz"], config).unwrap(); - - // then - assert_eq!(args.arg_chain, "xyz".to_owned()); - } - - #[test] - fn should_use_config_if_cli_is_missing() { - let mut config = Config::default(); - let mut footprint = Footprint::default(); - footprint.pruning_history = Some(128); - config.footprint = Some(footprint); - - // when - let args = Args::parse_with_config(&["parity"], config).unwrap(); - - // then - assert_eq!(args.arg_pruning_history, 128); - } - - #[test] - fn should_parse_full_config() { - // given - let config = toml::from_str(include_str!("./tests/config.full.toml")).unwrap(); - - // when - let args = Args::parse_with_config(&["parity", "--chain", "xyz"], config).unwrap(); - - // then - assert_eq!(args, Args { - // Commands - cmd_dapp: false, - cmd_daemon: false, - cmd_account: false, - cmd_account_new: false, - cmd_account_list: false, - cmd_account_import: false, - cmd_wallet: false, - cmd_wallet_import: false, - cmd_import: false, - cmd_export: false, - cmd_export_blocks: false, - cmd_export_state: false, - cmd_signer: false, - cmd_signer_list: false, - cmd_signer_sign: false, - cmd_signer_reject: false, - cmd_signer_new_token: false, - cmd_snapshot: false, - cmd_restore: false, - cmd_tools: false, - cmd_tools_hash: false, - cmd_db: false, - cmd_db_kill: false, - cmd_db_reset: false, - cmd_export_hardcoded_sync: false, - - // Arguments - arg_daemon_pid_file: None, - arg_import_file: None, - arg_import_format: None, - arg_export_blocks_file: None, - arg_export_blocks_format: None, - arg_export_state_file: None, - arg_export_state_format: None, - arg_snapshot_file: None, - arg_restore_file: None, - arg_tools_hash_file: None, - - arg_signer_sign_id: None, - arg_signer_reject_id: None, - arg_dapp_path: None, - arg_account_import_path: None, - arg_wallet_import_path: None, - arg_db_reset_num: 10, - - // -- Operating Options - arg_mode: "last".into(), - arg_mode_timeout: 300u64, - arg_mode_alarm: 3600u64, - arg_auto_update: "none".into(), - arg_auto_update_delay: 200u16, - arg_auto_update_check_frequency: 50u16, - arg_release_track: "current".into(), - flag_public_node: false, - flag_no_download: false, - flag_no_consensus: false, - arg_chain: "xyz".into(), - arg_base_path: Some("$HOME/.parity".into()), - arg_db_path: Some("$HOME/.parity/chains".into()), - arg_keys_path: "$HOME/.parity/keys".into(), - arg_identity: "".into(), - flag_light: false, - flag_no_hardcoded_sync: false, - flag_no_persistent_txqueue: false, - flag_force_direct: false, - - // -- Convenience Options - arg_config: "$BASE/config.toml".into(), - arg_ports_shift: 0, - flag_unsafe_expose: false, - - // -- Account Options - arg_unlock: Some("0xdeadbeefcafe0000000000000000000000000000".into()), - arg_password: vec!["~/.safe/password.file".into()], - arg_keys_iterations: 10240u32, - arg_accounts_refresh: 5u64, - flag_no_hardware_wallets: false, - flag_fast_unlock: false, - - // -- Private Transactions Options - flag_private_enabled: true, - arg_private_signer: Some("0xdeadbeefcafe0000000000000000000000000000".into()), - arg_private_validators: Some("0xdeadbeefcafe0000000000000000000000000000".into()), - arg_private_passwords: Some("~/.safe/password.file".into()), - arg_private_account: Some("0xdeadbeefcafe0000000000000000000000000000".into()), - arg_private_sstore_url: Some("http://localhost:8082".into()), - arg_private_sstore_threshold: Some(0), - - flag_force_ui: false, - flag_no_ui: false, - arg_ui_port: None, - arg_ui_interface: None, - arg_ui_hosts: None, - arg_ui_path: "$HOME/.parity/signer".into(), - flag_ui_no_validation: false, - - // -- Networking Options - flag_no_warp: false, - arg_port: 30303u16, - arg_interface: "all".into(), - arg_min_peers: Some(25u16), - arg_max_peers: Some(50u16), - arg_max_pending_peers: 64u16, - arg_snapshot_peers: 0u16, - arg_allow_ips: "all".into(), - arg_nat: "any".into(), - arg_network_id: Some(1), - arg_bootnodes: Some("".into()), - flag_no_discovery: false, - arg_node_key: None, - arg_reserved_peers: Some("./path_to_file".into()), - flag_reserved_only: false, - flag_no_ancient_blocks: false, - flag_no_serve_light: false, - - // -- API and Console Options - // RPC - flag_no_jsonrpc: false, - flag_jsonrpc_no_keep_alive: false, - flag_jsonrpc_experimental: false, - arg_jsonrpc_port: 8545u16, - arg_jsonrpc_interface: "local".into(), - arg_jsonrpc_cors: "null".into(), - arg_jsonrpc_apis: "web3,eth,net,parity,traces,rpc,secretstore".into(), - arg_jsonrpc_hosts: "none".into(), - arg_jsonrpc_server_threads: None, - arg_jsonrpc_threads: 4, - arg_jsonrpc_max_payload: None, - arg_poll_lifetime: 60u32, - flag_jsonrpc_allow_missing_blocks: false, - - // WS - flag_no_ws: false, - arg_ws_port: 8546u16, - arg_ws_interface: "local".into(), - arg_ws_apis: "web3,eth,net,parity,traces,rpc,secretstore".into(), - arg_ws_origins: "none".into(), - arg_ws_hosts: "none".into(), - arg_ws_max_connections: 100, - - // IPC - flag_no_ipc: false, - arg_ipc_path: "$HOME/.parity/jsonrpc.ipc".into(), - arg_ipc_apis: "web3,eth,net,parity,parity_accounts,personal,traces,rpc,secretstore".into(), - - // DAPPS - arg_dapps_path: Some("$HOME/.parity/dapps".into()), - flag_no_dapps: false, - - // SECRETSTORE - flag_no_secretstore: false, - flag_no_secretstore_http: false, - flag_no_secretstore_auto_migrate: false, - arg_secretstore_acl_contract: Some("registry".into()), - arg_secretstore_contract: Some("none".into()), - arg_secretstore_srv_gen_contract: Some("none".into()), - arg_secretstore_srv_retr_contract: Some("none".into()), - arg_secretstore_doc_store_contract: Some("none".into()), - arg_secretstore_doc_sretr_contract: Some("none".into()), - arg_secretstore_secret: None, - arg_secretstore_admin_public: None, - arg_secretstore_nodes: "".into(), - arg_secretstore_server_set_contract: Some("registry".into()), - arg_secretstore_interface: "local".into(), - arg_secretstore_port: 8083u16, - arg_secretstore_http_interface: "local".into(), - arg_secretstore_http_port: 8082u16, - arg_secretstore_path: "$HOME/.parity/secretstore".into(), - - // IPFS - flag_ipfs_api: false, - arg_ipfs_api_port: 5001u16, - arg_ipfs_api_interface: "local".into(), - arg_ipfs_api_cors: "null".into(), - arg_ipfs_api_hosts: "none".into(), - - // -- Sealing/Mining Options - arg_author: Some("0xdeadbeefcafe0000000000000000000000000001".into()), - arg_engine_signer: Some("0xdeadbeefcafe0000000000000000000000000001".into()), - flag_force_sealing: true, - arg_reseal_on_txs: "all".into(), - arg_reseal_min_period: 4000u64, - arg_reseal_max_period: 60000u64, - flag_reseal_on_uncle: false, - arg_work_queue_size: 20usize, - arg_tx_gas_limit: Some("10000000".into()), - arg_tx_time_limit: Some(100u64), - arg_relay_set: "cheap".into(), - arg_min_gas_price: Some(0u64), - arg_usd_per_tx: "0.0001".into(), - arg_gas_price_percentile: 50usize, - arg_usd_per_eth: "auto".into(), - arg_price_update_period: "hourly".into(), - arg_gas_floor_target: "8000000".into(), - arg_gas_cap: "10000000".into(), - arg_extra_data: Some("Parity".into()), - flag_tx_queue_no_unfamiliar_locals: false, - flag_tx_queue_no_early_reject: false, - arg_tx_queue_size: 8192usize, - arg_tx_queue_per_sender: None, - arg_tx_queue_mem_limit: 4u32, - arg_tx_queue_locals: Some("0xdeadbeefcafe0000000000000000000000000000".into()), - arg_tx_queue_strategy: "gas_factor".into(), - arg_tx_queue_ban_count: Some(1u16), - arg_tx_queue_ban_time: Some(180u16), - flag_remove_solved: false, - arg_notify_work: Some("http://localhost:3001".into()), - flag_refuse_service_transactions: false, - flag_infinite_pending_block: false, - arg_max_round_blocks_to_import: 12usize, - - flag_stratum: false, - arg_stratum_interface: "local".to_owned(), - arg_stratum_port: 8008u16, - arg_stratum_secret: None, - - // -- Footprint Options - arg_tracing: "auto".into(), - arg_pruning: "auto".into(), - arg_pruning_history: 64u64, - arg_pruning_memory: 500usize, - arg_cache_size_db: 64u32, - arg_cache_size_blocks: 8u32, - arg_cache_size_queue: 50u32, - arg_cache_size_state: 25u32, - arg_cache_size: Some(128), - flag_fast_and_loose: false, - arg_db_compaction: "ssd".into(), - arg_fat_db: "auto".into(), - flag_scale_verifiers: true, - arg_num_verifiers: Some(6), - - // -- Import/Export Options - arg_export_blocks_from: "1".into(), - arg_export_blocks_to: "latest".into(), - flag_no_seal_check: false, - flag_export_state_no_code: false, - flag_export_state_no_storage: false, - arg_export_state_min_balance: None, - arg_export_state_max_balance: None, - - // -- Snapshot Optons - arg_export_state_at: "latest".into(), - arg_snapshot_at: "latest".into(), - flag_no_periodic_snapshot: false, - arg_snapshot_threads: None, - - // -- Light options. - arg_on_demand_response_time_window: Some(2), - arg_on_demand_request_backoff_start: Some(9), - arg_on_demand_request_backoff_max: Some(15), - arg_on_demand_request_backoff_rounds_max: Some(100), - arg_on_demand_request_consecutive_failures: Some(1), - - // -- Whisper options. - flag_whisper: false, - arg_whisper_pool_size: 20, - - // -- Legacy Options - flag_warp: false, - flag_geth: false, - flag_testnet: false, - flag_import_geth_keys: false, - arg_warp_barrier: None, - arg_datadir: None, - arg_networkid: None, - arg_peers: None, - arg_nodekey: None, - flag_nodiscover: false, - flag_jsonrpc: false, - flag_jsonrpc_off: false, - flag_webapp: false, - flag_dapps_off: false, - flag_rpc: false, - arg_rpcaddr: None, - arg_rpcport: None, - arg_rpcapi: None, - arg_rpccorsdomain: None, - flag_ipcdisable: false, - flag_ipc_off: false, - arg_ipcapi: None, - arg_ipcpath: None, - arg_gasprice: None, - arg_etherbase: None, - arg_extradata: None, - arg_cache: None, - // Legacy-Dapps - arg_dapps_port: Some(8080), - arg_dapps_interface: Some("local".into()), - arg_dapps_hosts: Some("none".into()), - arg_dapps_cors: None, - arg_dapps_user: Some("test_user".into()), - arg_dapps_pass: Some("test_pass".into()), - flag_dapps_apis_all: false, - - // -- Internal Options - flag_can_restart: false, - - // -- Miscellaneous Options - arg_ntp_servers: None, - flag_version: false, - arg_logging: Some("own_tx=trace".into()), - arg_log_file: Some("/var/log/parity.log".into()), - flag_no_color: false, - flag_no_config: false, - }); - } - - #[test] - fn should_parse_config_and_return_errors() { - let config1 = Args::parse_config(include_str!("./tests/config.invalid1.toml")); - let config2 = Args::parse_config(include_str!("./tests/config.invalid2.toml")); - let config3 = Args::parse_config(include_str!("./tests/config.invalid3.toml")); - let config4 = Args::parse_config(include_str!("./tests/config.invalid4.toml")); - - match (config1, config2, config3, config4) { - ( - Err(ArgsError::Decode(_)), - Err(ArgsError::Decode(_)), - Err(ArgsError::Decode(_)), - Err(ArgsError::Decode(_)), - ) => {}, - (a, b, c, d) => { - assert!(false, "Got invalid error types: {:?}, {:?}, {:?}, {:?}", a, b, c, d); - } - } - } - - #[test] - fn should_deserialize_toml_file() { - let config: Config = toml::from_str(include_str!("./tests/config.toml")).unwrap(); - - assert_eq!(config, Config { - parity: Some(Operating { - mode: Some("dark".into()), - mode_timeout: Some(15u64), - mode_alarm: Some(10u64), - auto_update: None, - auto_update_delay: None, - auto_update_check_frequency: None, - release_track: None, - no_download: None, - no_consensus: None, - chain: Some("./chain.json".into()), - base_path: None, - db_path: None, - keys_path: None, - identity: None, - light: None, - no_hardcoded_sync: None, - no_persistent_txqueue: None, - _legacy_public_node: None, - }), - account: Some(Account { - unlock: Some(vec!["0x1".into(), "0x2".into(), "0x3".into()]), - password: Some(vec!["passwdfile path".into()]), - keys_iterations: None, - refresh_time: None, - disable_hardware: None, - fast_unlock: None, - }), - ui: Some(Ui { - path: None, - _legacy_force: None, - _legacy_disable: Some(true), - _legacy_port: None, - _legacy_interface: None, - _legacy_hosts: None, - }), - network: Some(Network { - warp: Some(false), - warp_barrier: None, - port: None, - interface: None, - min_peers: Some(10), - max_peers: Some(20), - max_pending_peers: Some(30), - snapshot_peers: Some(40), - allow_ips: Some("public".into()), - nat: Some("any".into()), - id: None, - bootnodes: None, - discovery: Some(true), - node_key: None, - reserved_peers: Some("./path/to/reserved_peers".into()), - reserved_only: Some(true), - no_serve_light: None, - }), - websockets: Some(Ws { - disable: Some(true), - port: None, - interface: None, - apis: None, - origins: Some(vec!["none".into()]), - hosts: None, - max_connections: None, - }), - rpc: Some(Rpc { - disable: Some(true), - port: Some(8180), - interface: None, - cors: None, - apis: None, - hosts: None, - server_threads: None, - processing_threads: None, - max_payload: None, - keep_alive: None, - experimental_rpcs: None, - poll_lifetime: None, - allow_missing_blocks: None - }), - ipc: Some(Ipc { - disable: None, - path: None, - apis: Some(vec!["rpc".into(), "eth".into()]), - }), - dapps: Some(Dapps { - _legacy_disable: None, - _legacy_port: Some(8080), - _legacy_path: None, - _legacy_interface: None, - _legacy_hosts: None, - _legacy_cors: None, - _legacy_user: Some("username".into()), - _legacy_pass: Some("password".into()) - }), - secretstore: Some(SecretStore { - disable: None, - disable_http: None, - disable_auto_migrate: None, - acl_contract: None, - service_contract: None, - service_contract_srv_gen: None, - service_contract_srv_retr: None, - service_contract_doc_store: None, - service_contract_doc_sretr: None, - self_secret: None, - admin_public: None, - nodes: None, - server_set_contract: None, - interface: None, - port: Some(8083), - http_interface: None, - http_port: Some(8082), - path: None, - }), - private_tx: None, - ipfs: Some(Ipfs { - enable: Some(false), - port: Some(5001), - interface: None, - cors: None, - hosts: None, - }), - mining: Some(Mining { - author: Some("0xdeadbeefcafe0000000000000000000000000001".into()), - engine_signer: Some("0xdeadbeefcafe0000000000000000000000000001".into()), - force_sealing: Some(true), - reseal_on_txs: Some("all".into()), - reseal_on_uncle: None, - reseal_min_period: Some(4000), - reseal_max_period: Some(60000), - work_queue_size: None, - relay_set: None, - min_gas_price: None, - gas_price_percentile: None, - usd_per_tx: None, - usd_per_eth: None, - price_update_period: Some("hourly".into()), - gas_floor_target: None, - gas_cap: None, - tx_queue_size: Some(8192), - tx_queue_per_sender: None, - tx_queue_mem_limit: None, - tx_queue_locals: None, - tx_queue_strategy: None, - tx_queue_ban_count: None, - tx_queue_ban_time: None, - tx_queue_no_unfamiliar_locals: None, - tx_queue_no_early_reject: None, - tx_gas_limit: None, - tx_time_limit: None, - extra_data: None, - remove_solved: None, - notify_work: None, - refuse_service_transactions: None, - infinite_pending_block: None, - max_round_blocks_to_import: None, - }), - footprint: Some(Footprint { - tracing: Some("on".into()), - pruning: Some("fast".into()), - pruning_history: Some(64), - pruning_memory: None, - fast_and_loose: None, - cache_size: None, - cache_size_db: Some(256), - cache_size_blocks: Some(16), - cache_size_queue: Some(100), - cache_size_state: Some(25), - db_compaction: Some("ssd".into()), - fat_db: Some("off".into()), - scale_verifiers: Some(false), - num_verifiers: None, - }), - light: Some(Light { - on_demand_response_time_window: Some(2), - on_demand_request_backoff_start: Some(9), - on_demand_request_backoff_max: Some(15), - on_demand_request_backoff_rounds_max: Some(10), - on_demand_request_consecutive_failures: Some(1), - }), - snapshots: Some(Snapshots { - disable_periodic: Some(true), - processing_threads: None, - }), - misc: Some(Misc { - logging: Some("own_tx=trace".into()), - log_file: Some("/var/log/parity.log".into()), - color: Some(true), - ports_shift: Some(0), - unsafe_expose: Some(false), - }), - whisper: Some(Whisper { - enabled: Some(true), - pool_size: Some(50), - }), - stratum: None, - }); - } - - #[test] - fn should_not_accept_min_peers_bigger_than_max_peers() { - match Args::parse(&["parity", "--max-peers=39", "--min-peers=40"]) { - Err(ArgsError::PeerConfiguration) => (), - _ => assert_eq!(false, true), - } - } - - #[test] - fn should_accept_max_peers_equal_or_bigger_than_min_peers() { - Args::parse(&["parity", "--max-peers=40", "--min-peers=40"]).unwrap(); - Args::parse(&["parity", "--max-peers=100", "--min-peers=40"]).unwrap(); - } + use super::{ + Account, Args, ArgsError, Config, Dapps, Footprint, Ipc, Ipfs, Light, Mining, Misc, + Network, Operating, Rpc, SecretStore, Snapshots, Ui, Whisper, Ws, + }; + use clap::ErrorKind as ClapErrorKind; + use toml; + + #[test] + fn should_accept_any_argument_order() { + let args = Args::parse(&["parity", "--no-warp", "account", "list"]).unwrap(); + assert_eq!(args.flag_no_warp, true); + + let args = Args::parse(&["parity", "account", "list", "--no-warp"]).unwrap(); + assert_eq!(args.flag_no_warp, true); + + let args = Args::parse(&["parity", "--chain=dev", "account", "list"]).unwrap(); + assert_eq!(args.arg_chain, "dev"); + + let args = Args::parse(&["parity", "account", "list", "--chain=dev"]).unwrap(); + assert_eq!(args.arg_chain, "dev"); + } + + #[test] + fn should_not_crash_on_warp() { + let args = Args::parse(&["parity", "--warp"]); + assert!(args.is_ok()); + + let args = Args::parse(&["parity", "account", "list", "--warp"]); + assert!(args.is_ok()); + } + + #[test] + fn should_reject_invalid_values() { + let args = Args::parse(&["parity", "--cache=20"]); + assert!(args.is_ok()); + + let args = Args::parse(&["parity", "--cache=asd"]); + assert!(args.is_err()); + } + + #[test] + fn should_parse_args_and_flags() { + let args = Args::parse(&["parity", "--no-warp"]).unwrap(); + assert_eq!(args.flag_no_warp, true); + + let args = Args::parse(&["parity", "--pruning", "archive"]).unwrap(); + assert_eq!(args.arg_pruning, "archive"); + + let args = Args::parse(&["parity", "export", "state", "--no-storage"]).unwrap(); + assert_eq!(args.flag_export_state_no_storage, true); + + let args = Args::parse(&["parity", "export", "state", "--min-balance", "123"]).unwrap(); + assert_eq!(args.arg_export_state_min_balance, Some("123".to_string())); + } + + #[test] + fn should_exit_gracefully_on_unknown_argument() { + let result = Args::parse(&["parity", "--please-exit-gracefully"]); + assert!(match result { + Err(ArgsError::Clap(ref clap_error)) + if clap_error.kind == ClapErrorKind::UnknownArgument => + true, + _ => false, + }); + } + + #[test] + fn should_use_subcommand_arg_default() { + let args = Args::parse(&["parity", "export", "state", "--at", "123"]).unwrap(); + assert_eq!(args.arg_export_state_at, "123"); + assert_eq!(args.arg_snapshot_at, "latest"); + + let args = Args::parse(&["parity", "snapshot", "--at", "123", "file.dump"]).unwrap(); + assert_eq!(args.arg_snapshot_at, "123"); + assert_eq!(args.arg_export_state_at, "latest"); + + let args = Args::parse(&["parity", "export", "state"]).unwrap(); + assert_eq!(args.arg_snapshot_at, "latest"); + assert_eq!(args.arg_export_state_at, "latest"); + + let args = Args::parse(&["parity", "snapshot", "file.dump"]).unwrap(); + assert_eq!(args.arg_snapshot_at, "latest"); + assert_eq!(args.arg_export_state_at, "latest"); + } + + #[test] + fn should_parse_multiple_values() { + let args = Args::parse(&["parity", "account", "import", "~/1", "~/2"]).unwrap(); + assert_eq!( + args.arg_account_import_path, + Some(vec!["~/1".to_owned(), "~/2".to_owned()]) + ); + + let args = Args::parse(&["parity", "account", "import", "~/1,ext"]).unwrap(); + assert_eq!( + args.arg_account_import_path, + Some(vec!["~/1,ext".to_owned()]) + ); + + let args = Args::parse(&[ + "parity", + "--secretstore-nodes", + "abc@127.0.0.1:3333,cde@10.10.10.10:4444", + ]) + .unwrap(); + assert_eq!( + args.arg_secretstore_nodes, + "abc@127.0.0.1:3333,cde@10.10.10.10:4444" + ); + + let args = Args::parse(&[ + "parity", + "--password", + "~/.safe/1", + "--password", + "~/.safe/2", + "--ui-port", + "8123", + ]) + .unwrap(); + assert_eq!( + args.arg_password, + vec!["~/.safe/1".to_owned(), "~/.safe/2".to_owned()] + ); + assert_eq!(args.arg_ui_port, Some(8123)); + + let args = Args::parse(&[ + "parity", + "--password", + "~/.safe/1,~/.safe/2", + "--ui-port", + "8123", + ]) + .unwrap(); + assert_eq!( + args.arg_password, + vec!["~/.safe/1".to_owned(), "~/.safe/2".to_owned()] + ); + assert_eq!(args.arg_ui_port, Some(8123)); + } + + #[test] + fn should_parse_global_args_with_subcommand() { + let args = Args::parse(&["parity", "--chain", "dev", "account", "list"]).unwrap(); + assert_eq!(args.arg_chain, "dev".to_owned()); + } + + #[test] + fn should_parse_args_and_include_config() { + // given + let mut config = Config::default(); + let mut operating = Operating::default(); + operating.chain = Some("mordor".into()); + config.parity = Some(operating); + + // when + let args = Args::parse_with_config(&["parity"], config).unwrap(); + + // then + assert_eq!(args.arg_chain, "mordor".to_owned()); + } + + #[test] + fn should_not_use_config_if_cli_is_provided() { + // given + let mut config = Config::default(); + let mut operating = Operating::default(); + operating.chain = Some("mordor".into()); + config.parity = Some(operating); + + // when + let args = Args::parse_with_config(&["parity", "--chain", "xyz"], config).unwrap(); + + // then + assert_eq!(args.arg_chain, "xyz".to_owned()); + } + + #[test] + fn should_use_config_if_cli_is_missing() { + let mut config = Config::default(); + let mut footprint = Footprint::default(); + footprint.pruning_history = Some(128); + config.footprint = Some(footprint); + + // when + let args = Args::parse_with_config(&["parity"], config).unwrap(); + + // then + assert_eq!(args.arg_pruning_history, 128); + } + + #[test] + fn should_parse_full_config() { + // given + let config = toml::from_str(include_str!("./tests/config.full.toml")).unwrap(); + + // when + let args = Args::parse_with_config(&["parity", "--chain", "xyz"], config).unwrap(); + + // then + assert_eq!( + args, + Args { + // Commands + cmd_dapp: false, + cmd_daemon: false, + cmd_account: false, + cmd_account_new: false, + cmd_account_list: false, + cmd_account_import: false, + cmd_wallet: false, + cmd_wallet_import: false, + cmd_import: false, + cmd_export: false, + cmd_export_blocks: false, + cmd_export_state: false, + cmd_signer: false, + cmd_signer_list: false, + cmd_signer_sign: false, + cmd_signer_reject: false, + cmd_signer_new_token: false, + cmd_snapshot: false, + cmd_restore: false, + cmd_tools: false, + cmd_tools_hash: false, + cmd_db: false, + cmd_db_kill: false, + cmd_db_reset: false, + cmd_export_hardcoded_sync: false, + + // Arguments + arg_daemon_pid_file: None, + arg_import_file: None, + arg_import_format: None, + arg_export_blocks_file: None, + arg_export_blocks_format: None, + arg_export_state_file: None, + arg_export_state_format: None, + arg_snapshot_file: None, + arg_restore_file: None, + arg_tools_hash_file: None, + + arg_signer_sign_id: None, + arg_signer_reject_id: None, + arg_dapp_path: None, + arg_account_import_path: None, + arg_wallet_import_path: None, + arg_db_reset_num: 10, + + // -- Operating Options + arg_mode: "last".into(), + arg_mode_timeout: 300u64, + arg_mode_alarm: 3600u64, + arg_auto_update: "none".into(), + arg_auto_update_delay: 200u16, + arg_auto_update_check_frequency: 50u16, + arg_release_track: "current".into(), + flag_public_node: false, + flag_no_download: false, + flag_no_consensus: false, + arg_chain: "xyz".into(), + arg_base_path: Some("$HOME/.parity".into()), + arg_db_path: Some("$HOME/.parity/chains".into()), + arg_keys_path: "$HOME/.parity/keys".into(), + arg_identity: "".into(), + flag_light: false, + flag_no_hardcoded_sync: false, + flag_no_persistent_txqueue: false, + flag_force_direct: false, + + // -- Convenience Options + arg_config: "$BASE/config.toml".into(), + arg_ports_shift: 0, + flag_unsafe_expose: false, + + // -- Account Options + arg_unlock: Some("0xdeadbeefcafe0000000000000000000000000000".into()), + arg_password: vec!["~/.safe/password.file".into()], + arg_keys_iterations: 10240u32, + arg_accounts_refresh: 5u64, + flag_no_hardware_wallets: false, + flag_fast_unlock: false, + + // -- Private Transactions Options + flag_private_enabled: true, + arg_private_signer: Some("0xdeadbeefcafe0000000000000000000000000000".into()), + arg_private_validators: Some("0xdeadbeefcafe0000000000000000000000000000".into()), + arg_private_passwords: Some("~/.safe/password.file".into()), + arg_private_account: Some("0xdeadbeefcafe0000000000000000000000000000".into()), + arg_private_sstore_url: Some("http://localhost:8082".into()), + arg_private_sstore_threshold: Some(0), + + flag_force_ui: false, + flag_no_ui: false, + arg_ui_port: None, + arg_ui_interface: None, + arg_ui_hosts: None, + arg_ui_path: "$HOME/.parity/signer".into(), + flag_ui_no_validation: false, + + // -- Networking Options + flag_no_warp: false, + arg_port: 30303u16, + arg_interface: "all".into(), + arg_min_peers: Some(25u16), + arg_max_peers: Some(50u16), + arg_max_pending_peers: 64u16, + arg_snapshot_peers: 0u16, + arg_allow_ips: "all".into(), + arg_nat: "any".into(), + arg_network_id: Some(1), + arg_bootnodes: Some("".into()), + flag_no_discovery: false, + arg_node_key: None, + arg_reserved_peers: Some("./path_to_file".into()), + flag_reserved_only: false, + flag_no_ancient_blocks: false, + flag_no_serve_light: false, + + // -- API and Console Options + // RPC + flag_no_jsonrpc: false, + flag_jsonrpc_no_keep_alive: false, + flag_jsonrpc_experimental: false, + arg_jsonrpc_port: 8545u16, + arg_jsonrpc_interface: "local".into(), + arg_jsonrpc_cors: "null".into(), + arg_jsonrpc_apis: "web3,eth,net,parity,traces,rpc,secretstore".into(), + arg_jsonrpc_hosts: "none".into(), + arg_jsonrpc_server_threads: None, + arg_jsonrpc_threads: 4, + arg_jsonrpc_max_payload: None, + arg_poll_lifetime: 60u32, + flag_jsonrpc_allow_missing_blocks: false, + + // WS + flag_no_ws: false, + arg_ws_port: 8546u16, + arg_ws_interface: "local".into(), + arg_ws_apis: "web3,eth,net,parity,traces,rpc,secretstore".into(), + arg_ws_origins: "none".into(), + arg_ws_hosts: "none".into(), + arg_ws_max_connections: 100, + + // IPC + flag_no_ipc: false, + arg_ipc_path: "$HOME/.parity/jsonrpc.ipc".into(), + arg_ipc_apis: "web3,eth,net,parity,parity_accounts,personal,traces,rpc,secretstore" + .into(), + + // DAPPS + arg_dapps_path: Some("$HOME/.parity/dapps".into()), + flag_no_dapps: false, + + // SECRETSTORE + flag_no_secretstore: false, + flag_no_secretstore_http: false, + flag_no_secretstore_auto_migrate: false, + arg_secretstore_acl_contract: Some("registry".into()), + arg_secretstore_contract: Some("none".into()), + arg_secretstore_srv_gen_contract: Some("none".into()), + arg_secretstore_srv_retr_contract: Some("none".into()), + arg_secretstore_doc_store_contract: Some("none".into()), + arg_secretstore_doc_sretr_contract: Some("none".into()), + arg_secretstore_secret: None, + arg_secretstore_admin_public: None, + arg_secretstore_nodes: "".into(), + arg_secretstore_server_set_contract: Some("registry".into()), + arg_secretstore_interface: "local".into(), + arg_secretstore_port: 8083u16, + arg_secretstore_http_interface: "local".into(), + arg_secretstore_http_port: 8082u16, + arg_secretstore_path: "$HOME/.parity/secretstore".into(), + + // IPFS + flag_ipfs_api: false, + arg_ipfs_api_port: 5001u16, + arg_ipfs_api_interface: "local".into(), + arg_ipfs_api_cors: "null".into(), + arg_ipfs_api_hosts: "none".into(), + + // -- Sealing/Mining Options + arg_author: Some("0xdeadbeefcafe0000000000000000000000000001".into()), + arg_engine_signer: Some("0xdeadbeefcafe0000000000000000000000000001".into()), + flag_force_sealing: true, + arg_reseal_on_txs: "all".into(), + arg_reseal_min_period: 4000u64, + arg_reseal_max_period: 60000u64, + flag_reseal_on_uncle: false, + arg_work_queue_size: 20usize, + arg_tx_gas_limit: Some("10000000".into()), + arg_tx_time_limit: Some(100u64), + arg_relay_set: "cheap".into(), + arg_min_gas_price: Some(0u64), + arg_usd_per_tx: "0.0001".into(), + arg_gas_price_percentile: 50usize, + arg_usd_per_eth: "auto".into(), + arg_price_update_period: "hourly".into(), + arg_gas_floor_target: "8000000".into(), + arg_gas_cap: "10000000".into(), + arg_extra_data: Some("Parity".into()), + flag_tx_queue_no_unfamiliar_locals: false, + flag_tx_queue_no_early_reject: false, + arg_tx_queue_size: 8192usize, + arg_tx_queue_per_sender: None, + arg_tx_queue_mem_limit: 4u32, + arg_tx_queue_locals: Some("0xdeadbeefcafe0000000000000000000000000000".into()), + arg_tx_queue_strategy: "gas_factor".into(), + arg_tx_queue_ban_count: Some(1u16), + arg_tx_queue_ban_time: Some(180u16), + flag_remove_solved: false, + arg_notify_work: Some("http://localhost:3001".into()), + flag_refuse_service_transactions: false, + flag_infinite_pending_block: false, + arg_max_round_blocks_to_import: 12usize, + + flag_stratum: false, + arg_stratum_interface: "local".to_owned(), + arg_stratum_port: 8008u16, + arg_stratum_secret: None, + + // -- Footprint Options + arg_tracing: "auto".into(), + arg_pruning: "auto".into(), + arg_pruning_history: 64u64, + arg_pruning_memory: 500usize, + arg_cache_size_db: 64u32, + arg_cache_size_blocks: 8u32, + arg_cache_size_queue: 50u32, + arg_cache_size_state: 25u32, + arg_cache_size: Some(128), + flag_fast_and_loose: false, + arg_db_compaction: "ssd".into(), + arg_fat_db: "auto".into(), + flag_scale_verifiers: true, + arg_num_verifiers: Some(6), + + // -- Import/Export Options + arg_export_blocks_from: "1".into(), + arg_export_blocks_to: "latest".into(), + flag_no_seal_check: false, + flag_export_state_no_code: false, + flag_export_state_no_storage: false, + arg_export_state_min_balance: None, + arg_export_state_max_balance: None, + + // -- Snapshot Optons + arg_export_state_at: "latest".into(), + arg_snapshot_at: "latest".into(), + flag_no_periodic_snapshot: false, + arg_snapshot_threads: None, + + // -- Light options. + arg_on_demand_response_time_window: Some(2), + arg_on_demand_request_backoff_start: Some(9), + arg_on_demand_request_backoff_max: Some(15), + arg_on_demand_request_backoff_rounds_max: Some(100), + arg_on_demand_request_consecutive_failures: Some(1), + + // -- Whisper options. + flag_whisper: false, + arg_whisper_pool_size: 20, + + // -- Legacy Options + flag_warp: false, + flag_geth: false, + flag_testnet: false, + flag_import_geth_keys: false, + arg_warp_barrier: None, + arg_datadir: None, + arg_networkid: None, + arg_peers: None, + arg_nodekey: None, + flag_nodiscover: false, + flag_jsonrpc: false, + flag_jsonrpc_off: false, + flag_webapp: false, + flag_dapps_off: false, + flag_rpc: false, + arg_rpcaddr: None, + arg_rpcport: None, + arg_rpcapi: None, + arg_rpccorsdomain: None, + flag_ipcdisable: false, + flag_ipc_off: false, + arg_ipcapi: None, + arg_ipcpath: None, + arg_gasprice: None, + arg_etherbase: None, + arg_extradata: None, + arg_cache: None, + // Legacy-Dapps + arg_dapps_port: Some(8080), + arg_dapps_interface: Some("local".into()), + arg_dapps_hosts: Some("none".into()), + arg_dapps_cors: None, + arg_dapps_user: Some("test_user".into()), + arg_dapps_pass: Some("test_pass".into()), + flag_dapps_apis_all: false, + + // -- Internal Options + flag_can_restart: false, + + // -- Miscellaneous Options + arg_ntp_servers: None, + flag_version: false, + arg_logging: Some("own_tx=trace".into()), + arg_log_file: Some("/var/log/parity.log".into()), + flag_no_color: false, + flag_no_config: false, + } + ); + } + + #[test] + fn should_parse_config_and_return_errors() { + let config1 = Args::parse_config(include_str!("./tests/config.invalid1.toml")); + let config2 = Args::parse_config(include_str!("./tests/config.invalid2.toml")); + let config3 = Args::parse_config(include_str!("./tests/config.invalid3.toml")); + let config4 = Args::parse_config(include_str!("./tests/config.invalid4.toml")); + + match (config1, config2, config3, config4) { + ( + Err(ArgsError::Decode(_)), + Err(ArgsError::Decode(_)), + Err(ArgsError::Decode(_)), + Err(ArgsError::Decode(_)), + ) => {} + (a, b, c, d) => { + assert!( + false, + "Got invalid error types: {:?}, {:?}, {:?}, {:?}", + a, b, c, d + ); + } + } + } + + #[test] + fn should_deserialize_toml_file() { + let config: Config = toml::from_str(include_str!("./tests/config.toml")).unwrap(); + + assert_eq!( + config, + Config { + parity: Some(Operating { + mode: Some("dark".into()), + mode_timeout: Some(15u64), + mode_alarm: Some(10u64), + auto_update: None, + auto_update_delay: None, + auto_update_check_frequency: None, + release_track: None, + no_download: None, + no_consensus: None, + chain: Some("./chain.json".into()), + base_path: None, + db_path: None, + keys_path: None, + identity: None, + light: None, + no_hardcoded_sync: None, + no_persistent_txqueue: None, + _legacy_public_node: None, + }), + account: Some(Account { + unlock: Some(vec!["0x1".into(), "0x2".into(), "0x3".into()]), + password: Some(vec!["passwdfile path".into()]), + keys_iterations: None, + refresh_time: None, + disable_hardware: None, + fast_unlock: None, + }), + ui: Some(Ui { + path: None, + _legacy_force: None, + _legacy_disable: Some(true), + _legacy_port: None, + _legacy_interface: None, + _legacy_hosts: None, + }), + network: Some(Network { + warp: Some(false), + warp_barrier: None, + port: None, + interface: None, + min_peers: Some(10), + max_peers: Some(20), + max_pending_peers: Some(30), + snapshot_peers: Some(40), + allow_ips: Some("public".into()), + nat: Some("any".into()), + id: None, + bootnodes: None, + discovery: Some(true), + node_key: None, + reserved_peers: Some("./path/to/reserved_peers".into()), + reserved_only: Some(true), + no_serve_light: None, + }), + websockets: Some(Ws { + disable: Some(true), + port: None, + interface: None, + apis: None, + origins: Some(vec!["none".into()]), + hosts: None, + max_connections: None, + }), + rpc: Some(Rpc { + disable: Some(true), + port: Some(8180), + interface: None, + cors: None, + apis: None, + hosts: None, + server_threads: None, + processing_threads: None, + max_payload: None, + keep_alive: None, + experimental_rpcs: None, + poll_lifetime: None, + allow_missing_blocks: None + }), + ipc: Some(Ipc { + disable: None, + path: None, + apis: Some(vec!["rpc".into(), "eth".into()]), + }), + dapps: Some(Dapps { + _legacy_disable: None, + _legacy_port: Some(8080), + _legacy_path: None, + _legacy_interface: None, + _legacy_hosts: None, + _legacy_cors: None, + _legacy_user: Some("username".into()), + _legacy_pass: Some("password".into()) + }), + secretstore: Some(SecretStore { + disable: None, + disable_http: None, + disable_auto_migrate: None, + acl_contract: None, + service_contract: None, + service_contract_srv_gen: None, + service_contract_srv_retr: None, + service_contract_doc_store: None, + service_contract_doc_sretr: None, + self_secret: None, + admin_public: None, + nodes: None, + server_set_contract: None, + interface: None, + port: Some(8083), + http_interface: None, + http_port: Some(8082), + path: None, + }), + private_tx: None, + ipfs: Some(Ipfs { + enable: Some(false), + port: Some(5001), + interface: None, + cors: None, + hosts: None, + }), + mining: Some(Mining { + author: Some("0xdeadbeefcafe0000000000000000000000000001".into()), + engine_signer: Some("0xdeadbeefcafe0000000000000000000000000001".into()), + force_sealing: Some(true), + reseal_on_txs: Some("all".into()), + reseal_on_uncle: None, + reseal_min_period: Some(4000), + reseal_max_period: Some(60000), + work_queue_size: None, + relay_set: None, + min_gas_price: None, + gas_price_percentile: None, + usd_per_tx: None, + usd_per_eth: None, + price_update_period: Some("hourly".into()), + gas_floor_target: None, + gas_cap: None, + tx_queue_size: Some(8192), + tx_queue_per_sender: None, + tx_queue_mem_limit: None, + tx_queue_locals: None, + tx_queue_strategy: None, + tx_queue_ban_count: None, + tx_queue_ban_time: None, + tx_queue_no_unfamiliar_locals: None, + tx_queue_no_early_reject: None, + tx_gas_limit: None, + tx_time_limit: None, + extra_data: None, + remove_solved: None, + notify_work: None, + refuse_service_transactions: None, + infinite_pending_block: None, + max_round_blocks_to_import: None, + }), + footprint: Some(Footprint { + tracing: Some("on".into()), + pruning: Some("fast".into()), + pruning_history: Some(64), + pruning_memory: None, + fast_and_loose: None, + cache_size: None, + cache_size_db: Some(256), + cache_size_blocks: Some(16), + cache_size_queue: Some(100), + cache_size_state: Some(25), + db_compaction: Some("ssd".into()), + fat_db: Some("off".into()), + scale_verifiers: Some(false), + num_verifiers: None, + }), + light: Some(Light { + on_demand_response_time_window: Some(2), + on_demand_request_backoff_start: Some(9), + on_demand_request_backoff_max: Some(15), + on_demand_request_backoff_rounds_max: Some(10), + on_demand_request_consecutive_failures: Some(1), + }), + snapshots: Some(Snapshots { + disable_periodic: Some(true), + processing_threads: None, + }), + misc: Some(Misc { + logging: Some("own_tx=trace".into()), + log_file: Some("/var/log/parity.log".into()), + color: Some(true), + ports_shift: Some(0), + unsafe_expose: Some(false), + }), + whisper: Some(Whisper { + enabled: Some(true), + pool_size: Some(50), + }), + stratum: None, + } + ); + } + + #[test] + fn should_not_accept_min_peers_bigger_than_max_peers() { + match Args::parse(&["parity", "--max-peers=39", "--min-peers=40"]) { + Err(ArgsError::PeerConfiguration) => (), + _ => assert_eq!(false, true), + } + } + + #[test] + fn should_accept_max_peers_equal_or_bigger_than_min_peers() { + Args::parse(&["parity", "--max-peers=40", "--min-peers=40"]).unwrap(); + Args::parse(&["parity", "--max-peers=100", "--min-peers=40"]).unwrap(); + } } diff --git a/parity/cli/usage.rs b/parity/cli/usage.rs index 8b06f4f1f..da3118d45 100644 --- a/parity/cli/usage.rs +++ b/parity/cli/usage.rs @@ -15,17 +15,24 @@ // along with Parity Ethereum. If not, see . macro_rules! return_if_parse_error { - ($e:expr) => ( - match $e { - Err(clap_error @ ClapError { kind: ClapErrorKind::ValueValidation, .. }) => { - return Err(clap_error); - }, + ($e:expr) => { + match $e { + Err( + clap_error + @ + ClapError { + kind: ClapErrorKind::ValueValidation, + .. + }, + ) => { + return Err(clap_error); + } - // Otherwise, if $e is ClapErrorKind::ArgumentNotFound or Ok(), - // then convert to Option - _ => $e.ok() - } - ) + // Otherwise, if $e is ClapErrorKind::ArgumentNotFound or Ok(), + // then convert to Option + _ => $e.ok(), + } + }; } macro_rules! if_option { @@ -47,46 +54,46 @@ macro_rules! if_vec { } macro_rules! if_option_vec { - (Option>, THEN {$then:expr} ELSE {$otherwise:expr}) => ( - $then - ); - (Option<$type:ty>, THEN {$then:expr} ELSE {$otherwise:expr}) => ( - $otherwise - ); + (Option>, THEN {$then:expr} ELSE {$otherwise:expr}) => { + $then + }; + (Option<$type:ty>, THEN {$then:expr} ELSE {$otherwise:expr}) => { + $otherwise + }; } macro_rules! inner_option_type { - (Option<$type:ty>) => ( - $type - ) + (Option<$type:ty>) => { + $type + }; } macro_rules! inner_vec_type { - (Vec<$type:ty>) => ( - $type - ) + (Vec<$type:ty>) => { + $type + }; } macro_rules! inner_option_vec_type { - (Option>) => ( - String - ) + (Option>) => { + String + }; } macro_rules! usage_with_ident { - ($name:expr, $usage:expr, $help:expr) => ( - if $usage.contains("<") { - format!("<{}> {} '{}'",$name, $usage, $help) - } else { - format!("[{}] {} '{}'",$name, $usage, $help) - } - ); + ($name:expr, $usage:expr, $help:expr) => { + if $usage.contains("<") { + format!("<{}> {} '{}'", $name, $usage, $help) + } else { + format!("[{}] {} '{}'", $name, $usage, $help) + } + }; } macro_rules! underscore_to_hyphen { - ($e:expr) => ( - str::replace($e, "_", "-") - ) + ($e:expr) => { + str::replace($e, "_", "-") + }; } macro_rules! usage { diff --git a/parity/configuration.rs b/parity/configuration.rs index 17f6d05a0..de2af80a8 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -14,1070 +14,1256 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::time::Duration; -use std::io::Read; -use std::net::{SocketAddr, ToSocketAddrs}; -use std::num::NonZeroU32; -use std::path::PathBuf; -use std::collections::{HashSet, BTreeMap}; -use std::iter::FromIterator; -use std::cmp; -use cli::{Args, ArgsError}; -use hash::keccak; -use ethereum_types::{U256, H256, Address}; -use parity_version::{version_data, version}; -use bytes::Bytes; use ansi_term::Colour; -use sync::{NetworkConfiguration, validate_node_url, self}; -use ethkey::{Secret, Public}; -use ethcore::client::{VMType}; -use ethcore::miner::{stratum, MinerOptions}; -use ethcore::snapshot::SnapshotConfiguration; -use ethcore::verification::queue::VerifierSettings; +use bytes::Bytes; +use cli::{Args, ArgsError}; +use ethcore::{ + client::VMType, + miner::{stratum, MinerOptions}, + snapshot::SnapshotConfiguration, + verification::queue::VerifierSettings, +}; +use ethereum_types::{Address, H256, U256}; +use ethkey::{Public, Secret}; +use hash::keccak; use miner::pool; use num_cpus; +use parity_version::{version, version_data}; +use std::{ + cmp, + collections::{BTreeMap, HashSet}, + io::Read, + iter::FromIterator, + net::{SocketAddr, ToSocketAddrs}, + num::NonZeroU32, + path::PathBuf, + time::Duration, +}; +use sync::{self, validate_node_url, NetworkConfiguration}; -use rpc::{IpcConfiguration, HttpConfiguration, WsConfiguration}; -use parity_rpc::NetworkSettings; +use account::{AccountCmd, ImportAccounts, ImportFromGethAccounts, ListAccounts, NewAccount}; +use blockchain::{ + BlockchainCmd, ExportBlockchain, ExportState, ImportBlockchain, KillBlockchain, ResetBlockchain, +}; use cache::CacheConfig; -use helpers::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_price, geth_ipc_path, parity_ipc_path, to_bootnodes, to_addresses, to_address, to_queue_strategy, to_queue_penalization}; -use dir::helpers::{replace_home, replace_home_and_local}; -use params::{ResealPolicy, AccountsConfig, GasPricerConfig, MinerExtras, SpecType}; +use dir::{ + self, default_data_path, default_hypervisor_path, default_local_path, + helpers::{replace_home, replace_home_and_local}, + Directories, +}; use ethcore_logger::Config as LogConfig; -use dir::{self, Directories, default_hypervisor_path, default_local_path, default_data_path}; -use ipfs::Configuration as IpfsConfiguration; -use ethcore_private_tx::{ProviderConfig, EncryptorConfig}; -use secretstore::{NodeSecretKey, Configuration as SecretStoreConfiguration, ContractAddress as SecretStoreContractAddress}; -use updater::{UpdatePolicy, UpdateFilter, ReleaseTrack}; -use run::RunCmd; -use types::data_format::DataFormat; -use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, KillBlockchain, ExportState, ResetBlockchain}; +use ethcore_private_tx::{EncryptorConfig, ProviderConfig}; use export_hardcoded_sync::ExportHsyncCmd; +use helpers::{ + geth_ipc_path, parity_ipc_path, to_address, to_addresses, to_block_id, to_bootnodes, + to_duration, to_mode, to_pending_set, to_price, to_queue_penalization, to_queue_strategy, + to_u256, +}; +use ipfs::Configuration as IpfsConfiguration; +use network::IpFilter; +use params::{AccountsConfig, GasPricerConfig, MinerExtras, ResealPolicy, SpecType}; +use parity_rpc::NetworkSettings; use presale::ImportWallet; -use account::{AccountCmd, NewAccount, ListAccounts, ImportAccounts, ImportFromGethAccounts}; +use rpc::{HttpConfiguration, IpcConfiguration, WsConfiguration}; +use run::RunCmd; +use secretstore::{ + Configuration as SecretStoreConfiguration, ContractAddress as SecretStoreContractAddress, + NodeSecretKey, +}; use snapshot::{self, SnapshotCommand}; -use network::{IpFilter}; +use types::data_format::DataFormat; +use updater::{ReleaseTrack, UpdateFilter, UpdatePolicy}; const DEFAULT_MAX_PEERS: u16 = 50; const DEFAULT_MIN_PEERS: u16 = 25; -pub const ETHERSCAN_ETH_PRICE_ENDPOINT: &str = "https://api.etherscan.io/api?module=stats&action=ethprice"; +pub const ETHERSCAN_ETH_PRICE_ENDPOINT: &str = + "https://api.etherscan.io/api?module=stats&action=ethprice"; #[derive(Debug, PartialEq)] pub enum Cmd { - Run(RunCmd), - Version, - Account(AccountCmd), - ImportPresaleWallet(ImportWallet), - Blockchain(BlockchainCmd), - SignerToken(WsConfiguration, LogConfig), - SignerSign { - id: Option, - pwfile: Option, - port: u16, - authfile: PathBuf, - }, - SignerList { - port: u16, - authfile: PathBuf - }, - SignerReject { - id: Option, - port: u16, - authfile: PathBuf - }, - Snapshot(SnapshotCommand), - Hash(Option), - ExportHardcodedSync(ExportHsyncCmd), + Run(RunCmd), + Version, + Account(AccountCmd), + ImportPresaleWallet(ImportWallet), + Blockchain(BlockchainCmd), + SignerToken(WsConfiguration, LogConfig), + SignerSign { + id: Option, + pwfile: Option, + port: u16, + authfile: PathBuf, + }, + SignerList { + port: u16, + authfile: PathBuf, + }, + SignerReject { + id: Option, + port: u16, + authfile: PathBuf, + }, + Snapshot(SnapshotCommand), + Hash(Option), + ExportHardcodedSync(ExportHsyncCmd), } pub struct Execute { - pub logger: LogConfig, - pub cmd: Cmd, + pub logger: LogConfig, + pub cmd: Cmd, } /// Configuration for the Parity client. #[derive(Debug, PartialEq)] pub struct Configuration { - /// Arguments to be interpreted. - pub args: Args, + /// Arguments to be interpreted. + pub args: Args, } impl Configuration { - /// Parses a configuration from a list of command line arguments. - /// - /// # Example - /// - /// ``` - /// let _cfg = parity_ethereum::Configuration::parse_cli(&["--light", "--chain", "kovan"]).unwrap(); - /// ``` - pub fn parse_cli>(command: &[S]) -> Result { - let config = Configuration { - args: Args::parse(command)?, - }; - - Ok(config) - } - - pub(crate) fn into_command(self) -> Result { - let dirs = self.directories(); - let pruning = self.args.arg_pruning.parse()?; - let pruning_history = self.args.arg_pruning_history; - let vm_type = self.vm_type()?; - let spec = self.chain()?; - let mode = match self.args.arg_mode.as_ref() { - "last" => None, - mode => Some(to_mode(&mode, self.args.arg_mode_timeout, self.args.arg_mode_alarm)?), - }; - let update_policy = self.update_policy()?; - let logger_config = self.logger_config(); - let ws_conf = self.ws_config()?; - let snapshot_conf = self.snapshot_config()?; - let http_conf = self.http_config()?; - let ipc_conf = self.ipc_config()?; - let net_conf = self.net_config()?; - let network_id = self.network_id(); - let cache_config = self.cache_config(); - let tracing = self.args.arg_tracing.parse()?; - let fat_db = self.args.arg_fat_db.parse()?; - let compaction = self.args.arg_db_compaction.parse()?; - let warp_sync = !self.args.flag_no_warp; - let geth_compatibility = self.args.flag_geth; - let experimental_rpcs = self.args.flag_jsonrpc_experimental; - let ipfs_conf = self.ipfs_config(); - let secretstore_conf = self.secretstore_config()?; - let format = self.format()?; - let keys_iterations = NonZeroU32::new(self.args.arg_keys_iterations) - .ok_or_else(|| "--keys-iterations must be non-zero")?; - - let cmd = if self.args.flag_version { - Cmd::Version - } else if self.args.cmd_signer { - let authfile = ::signer::codes_path(&ws_conf.signer_path); - - if self.args.cmd_signer_new_token { - Cmd::SignerToken(ws_conf, logger_config.clone()) - } else if self.args.cmd_signer_sign { - let pwfile = self.accounts_config()?.password_files.first().map(|pwfile| { - PathBuf::from(pwfile) - }); - Cmd::SignerSign { - id: self.args.arg_signer_sign_id, - pwfile: pwfile, - port: ws_conf.port, - authfile: authfile, - } - } else if self.args.cmd_signer_reject { - Cmd::SignerReject { - id: self.args.arg_signer_reject_id, - port: ws_conf.port, - authfile: authfile, - } - } else if self.args.cmd_signer_list { - Cmd::SignerList { - port: ws_conf.port, - authfile: authfile, - } - } else { - unreachable!(); - } - } else if self.args.cmd_tools && self.args.cmd_tools_hash { - Cmd::Hash(self.args.arg_tools_hash_file) - } else if self.args.cmd_db && self.args.cmd_db_reset { - Cmd::Blockchain(BlockchainCmd::Reset(ResetBlockchain { - dirs, - spec, - pruning, - pruning_history, - pruning_memory: self.args.arg_pruning_memory, - tracing, - fat_db, - compaction, - cache_config, - num: self.args.arg_db_reset_num, - })) - } else if self.args.cmd_db && self.args.cmd_db_kill { - Cmd::Blockchain(BlockchainCmd::Kill(KillBlockchain { - spec: spec, - dirs: dirs, - pruning: pruning, - })) - } else if self.args.cmd_account { - let account_cmd = if self.args.cmd_account_new { - let new_acc = NewAccount { - iterations: keys_iterations, - path: dirs.keys, - spec: spec, - password_file: self.accounts_config()?.password_files.first().map(|x| x.to_owned()), - }; - AccountCmd::New(new_acc) - } else if self.args.cmd_account_list { - let list_acc = ListAccounts { - path: dirs.keys, - spec: spec, - }; - AccountCmd::List(list_acc) - } else if self.args.cmd_account_import { - let import_acc = ImportAccounts { - from: self.args.arg_account_import_path.expect("CLI argument is required; qed").clone(), - to: dirs.keys, - spec: spec, - }; - AccountCmd::Import(import_acc) - } else { - unreachable!(); - }; - Cmd::Account(account_cmd) - } else if self.args.flag_import_geth_keys { - let account_cmd = AccountCmd::ImportFromGeth( - ImportFromGethAccounts { - spec: spec, - to: dirs.keys, - testnet: self.args.flag_testnet - } - ); - Cmd::Account(account_cmd) - } else if self.args.cmd_wallet { - let presale_cmd = ImportWallet { - iterations: keys_iterations, - path: dirs.keys, - spec: spec, - wallet_path: self.args.arg_wallet_import_path.clone().unwrap(), - password_file: self.accounts_config()?.password_files.first().map(|x| x.to_owned()), - }; - Cmd::ImportPresaleWallet(presale_cmd) - } else if self.args.cmd_import { - let import_cmd = ImportBlockchain { - spec: spec, - cache_config: cache_config, - dirs: dirs, - file_path: self.args.arg_import_file.clone(), - format: format, - pruning: pruning, - pruning_history: pruning_history, - pruning_memory: self.args.arg_pruning_memory, - compaction: compaction, - tracing: tracing, - fat_db: fat_db, - vm_type: vm_type, - check_seal: !self.args.flag_no_seal_check, - with_color: logger_config.color, - verifier_settings: self.verifier_settings(), - light: self.args.flag_light, - max_round_blocks_to_import: self.args.arg_max_round_blocks_to_import, - }; - Cmd::Blockchain(BlockchainCmd::Import(import_cmd)) - } else if self.args.cmd_export { - if self.args.cmd_export_blocks { - let export_cmd = ExportBlockchain { - spec: spec, - cache_config: cache_config, - dirs: dirs, - file_path: self.args.arg_export_blocks_file.clone(), - format: format, - pruning: pruning, - pruning_history: pruning_history, - pruning_memory: self.args.arg_pruning_memory, - compaction: compaction, - tracing: tracing, - fat_db: fat_db, - from_block: to_block_id(&self.args.arg_export_blocks_from)?, - to_block: to_block_id(&self.args.arg_export_blocks_to)?, - check_seal: !self.args.flag_no_seal_check, - max_round_blocks_to_import: self.args.arg_max_round_blocks_to_import, - }; - Cmd::Blockchain(BlockchainCmd::Export(export_cmd)) - } else if self.args.cmd_export_state { - let export_cmd = ExportState { - spec: spec, - cache_config: cache_config, - dirs: dirs, - file_path: self.args.arg_export_state_file.clone(), - format: format, - pruning: pruning, - pruning_history: pruning_history, - pruning_memory: self.args.arg_pruning_memory, - compaction: compaction, - tracing: tracing, - fat_db: fat_db, - at: to_block_id(&self.args.arg_export_state_at)?, - storage: !self.args.flag_export_state_no_storage, - code: !self.args.flag_export_state_no_code, - min_balance: self.args.arg_export_state_min_balance.and_then(|s| to_u256(&s).ok()), - max_balance: self.args.arg_export_state_max_balance.and_then(|s| to_u256(&s).ok()), - max_round_blocks_to_import: self.args.arg_max_round_blocks_to_import, - }; - Cmd::Blockchain(BlockchainCmd::ExportState(export_cmd)) - } else { - unreachable!(); - } - } else if self.args.cmd_snapshot { - let snapshot_cmd = SnapshotCommand { - cache_config: cache_config, - dirs: dirs, - spec: spec, - pruning: pruning, - pruning_history: pruning_history, - pruning_memory: self.args.arg_pruning_memory, - tracing: tracing, - fat_db: fat_db, - compaction: compaction, - file_path: self.args.arg_snapshot_file.clone(), - kind: snapshot::Kind::Take, - block_at: to_block_id(&self.args.arg_snapshot_at)?, - max_round_blocks_to_import: self.args.arg_max_round_blocks_to_import, - snapshot_conf: snapshot_conf, - }; - Cmd::Snapshot(snapshot_cmd) - } else if self.args.cmd_restore { - let restore_cmd = SnapshotCommand { - cache_config: cache_config, - dirs: dirs, - spec: spec, - pruning: pruning, - pruning_history: pruning_history, - pruning_memory: self.args.arg_pruning_memory, - tracing: tracing, - fat_db: fat_db, - compaction: compaction, - file_path: self.args.arg_restore_file.clone(), - kind: snapshot::Kind::Restore, - block_at: to_block_id("latest")?, // unimportant. - max_round_blocks_to_import: self.args.arg_max_round_blocks_to_import, - snapshot_conf: snapshot_conf, - }; - Cmd::Snapshot(restore_cmd) - } else if self.args.cmd_export_hardcoded_sync { - let export_hs_cmd = ExportHsyncCmd { - cache_config: cache_config, - dirs: dirs, - spec: spec, - pruning: pruning, - compaction: compaction, - }; - Cmd::ExportHardcodedSync(export_hs_cmd) - } else { - let daemon = if self.args.cmd_daemon { - Some(self.args.arg_daemon_pid_file.clone().expect("CLI argument is required; qed")) - } else { - None - }; - - let verifier_settings = self.verifier_settings(); - let whisper_config = self.whisper_config(); - let (private_provider_conf, private_enc_conf, private_tx_enabled) = self.private_provider_config()?; - - let run_cmd = RunCmd { - cache_config: cache_config, - dirs: dirs, - spec: spec, - pruning: pruning, - pruning_history: pruning_history, - pruning_memory: self.args.arg_pruning_memory, - daemon: daemon, - logger_config: logger_config.clone(), - miner_options: self.miner_options()?, - gas_price_percentile: self.args.arg_gas_price_percentile, - poll_lifetime: self.args.arg_poll_lifetime, - ws_conf: ws_conf, - snapshot_conf: snapshot_conf, - http_conf: http_conf, - ipc_conf: ipc_conf, - net_conf: net_conf, - network_id: network_id, - acc_conf: self.accounts_config()?, - gas_pricer_conf: self.gas_pricer_config()?, - miner_extras: self.miner_extras()?, - stratum: self.stratum_options()?, - update_policy: update_policy, - allow_missing_blocks: self.args.flag_jsonrpc_allow_missing_blocks, - mode: mode, - tracing: tracing, - fat_db: fat_db, - compaction: compaction, - vm_type: vm_type, - warp_sync: warp_sync, - warp_barrier: self.args.arg_warp_barrier, - geth_compatibility: geth_compatibility, - experimental_rpcs, - net_settings: self.network_settings()?, - ipfs_conf: ipfs_conf, - secretstore_conf: secretstore_conf, - private_provider_conf: private_provider_conf, - private_encryptor_conf: private_enc_conf, - private_tx_enabled, - name: self.args.arg_identity, - custom_bootnodes: self.args.arg_bootnodes.is_some(), - check_seal: !self.args.flag_no_seal_check, - download_old_blocks: !self.args.flag_no_ancient_blocks, - verifier_settings: verifier_settings, - serve_light: !self.args.flag_no_serve_light, - light: self.args.flag_light, - no_persistent_txqueue: self.args.flag_no_persistent_txqueue, - whisper: whisper_config, - no_hardcoded_sync: self.args.flag_no_hardcoded_sync, - max_round_blocks_to_import: self.args.arg_max_round_blocks_to_import, - on_demand_response_time_window: self.args.arg_on_demand_response_time_window, - on_demand_request_backoff_start: self.args.arg_on_demand_request_backoff_start, - on_demand_request_backoff_max: self.args.arg_on_demand_request_backoff_max, - on_demand_request_backoff_rounds_max: self.args.arg_on_demand_request_backoff_rounds_max, - on_demand_request_consecutive_failures: self.args.arg_on_demand_request_consecutive_failures, - }; - Cmd::Run(run_cmd) - }; - - Ok(Execute { - logger: logger_config, - cmd: cmd, - }) - } - - fn vm_type(&self) -> Result { - Ok(VMType::Interpreter) - } - - fn miner_extras(&self) -> Result { - let floor = to_u256(&self.args.arg_gas_floor_target)?; - let ceil = to_u256(&self.args.arg_gas_cap)?; - let extras = MinerExtras { - author: self.author()?, - extra_data: self.extra_data()?, - gas_range_target: (floor, ceil), - engine_signer: self.engine_signer()?, - work_notify: self.work_notify(), - local_accounts: HashSet::from_iter(to_addresses(&self.args.arg_tx_queue_locals)?.into_iter()), - }; - - Ok(extras) - } - - fn author(&self) -> Result { - to_address(self.args.arg_etherbase.clone().or(self.args.arg_author.clone())) - } - - fn engine_signer(&self) -> Result { - to_address(self.args.arg_engine_signer.clone()) - } - - fn format(&self) -> Result, String> { - match self.args.arg_import_format.clone() - .or(self.args.arg_export_blocks_format.clone()) - .or(self.args.arg_export_state_format.clone()) { - Some(ref f) => Ok(Some(f.parse()?)), - None => Ok(None), - } - } - - fn cache_config(&self) -> CacheConfig { - match self.args.arg_cache_size.or(self.args.arg_cache) { - Some(size) => CacheConfig::new_with_total_cache_size(size), - None => CacheConfig::new( - self.args.arg_cache_size_db, - self.args.arg_cache_size_blocks, - self.args.arg_cache_size_queue, - self.args.arg_cache_size_state, - ), - } - } - - /// returns logger config - pub fn logger_config(&self) -> LogConfig { - LogConfig { - mode: self.args.arg_logging.clone(), - color: !self.args.flag_no_color && !cfg!(windows), - file: self.args.arg_log_file.as_ref().map(|log_file| replace_home(&self.directories().base, log_file)), - } - } - - fn chain(&self) -> Result { - let name = if self.args.flag_testnet { - "testnet".to_owned() - } else { - self.args.arg_chain.clone() - }; - - Ok(name.parse()?) - } - - fn is_dev_chain(&self) -> Result { - Ok(self.chain()? == SpecType::Dev) - } - - fn max_peers(&self) -> u32 { - self.args.arg_max_peers - .or(cmp::max(self.args.arg_min_peers, Some(DEFAULT_MAX_PEERS))) - .unwrap_or(DEFAULT_MAX_PEERS) as u32 - } - - fn ip_filter(&self) -> Result { - match IpFilter::parse(self.args.arg_allow_ips.as_str()) { - Ok(allow_ip) => Ok(allow_ip), - Err(_) => Err("Invalid IP filter value".to_owned()), - } - } - - fn min_peers(&self) -> u32 { - self.args.arg_min_peers - .or(cmp::min(self.args.arg_max_peers, Some(DEFAULT_MIN_PEERS))) - .unwrap_or(DEFAULT_MIN_PEERS) as u32 - } - - fn max_pending_peers(&self) -> u32 { - self.args.arg_max_pending_peers as u32 - } - - fn snapshot_peers(&self) -> u32 { - self.args.arg_snapshot_peers as u32 - } - - fn work_notify(&self) -> Vec { - self.args.arg_notify_work.as_ref().map_or_else(Vec::new, |s| s.split(',').map(|s| s.to_owned()).collect()) - } - - fn accounts_config(&self) -> Result { - let keys_iterations = NonZeroU32::new(self.args.arg_keys_iterations) - .ok_or_else(|| "--keys-iterations must be non-zero")?; - let cfg = AccountsConfig { - iterations: keys_iterations, - refresh_time: self.args.arg_accounts_refresh, - testnet: self.args.flag_testnet, - password_files: self.args.arg_password.iter().map(|s| replace_home(&self.directories().base, s)).collect(), - unlocked_accounts: to_addresses(&self.args.arg_unlock)?, - enable_hardware_wallets: !self.args.flag_no_hardware_wallets, - enable_fast_unlock: self.args.flag_fast_unlock, - }; - - Ok(cfg) - } - - fn stratum_options(&self) -> Result, String> { - if self.args.flag_stratum { - Ok(Some(stratum::Options { - io_path: self.directories().db, - listen_addr: self.stratum_interface(), - port: self.args.arg_ports_shift + self.args.arg_stratum_port, - secret: self.args.arg_stratum_secret.as_ref().map(|s| s.parse::().unwrap_or_else(|_| keccak(s))), - })) - } else { Ok(None) } - } - - fn miner_options(&self) -> Result { - let is_dev_chain = self.is_dev_chain()?; - if is_dev_chain && self.args.flag_force_sealing && self.args.arg_reseal_min_period == 0 { - return Err("Force sealing can't be used with reseal_min_period = 0".into()); - } - - let reseal = self.args.arg_reseal_on_txs.parse::()?; - - let options = MinerOptions { - force_sealing: self.args.flag_force_sealing, - reseal_on_external_tx: reseal.external, - reseal_on_own_tx: reseal.own, - reseal_on_uncle: self.args.flag_reseal_on_uncle, - reseal_min_period: Duration::from_millis(self.args.arg_reseal_min_period), - reseal_max_period: Duration::from_millis(self.args.arg_reseal_max_period), - - pending_set: to_pending_set(&self.args.arg_relay_set)?, - work_queue_size: self.args.arg_work_queue_size, - enable_resubmission: !self.args.flag_remove_solved, - infinite_pending_block: self.args.flag_infinite_pending_block, - - tx_queue_penalization: to_queue_penalization(self.args.arg_tx_time_limit)?, - tx_queue_strategy: to_queue_strategy(&self.args.arg_tx_queue_strategy)?, - tx_queue_no_unfamiliar_locals: self.args.flag_tx_queue_no_unfamiliar_locals, - refuse_service_transactions: self.args.flag_refuse_service_transactions, - - pool_limits: self.pool_limits()?, - pool_verification_options: self.pool_verification_options()?, - }; - - Ok(options) - } - - fn pool_limits(&self) -> Result { - let max_count = self.args.arg_tx_queue_size; - - Ok(pool::Options { - max_count, - max_per_sender: self.args.arg_tx_queue_per_sender.unwrap_or_else(|| cmp::max(16, max_count / 100)), - max_mem_usage: if self.args.arg_tx_queue_mem_limit > 0 { - self.args.arg_tx_queue_mem_limit as usize * 1024 * 1024 - } else { - usize::max_value() - }, - }) - } - - fn pool_verification_options(&self) -> Result{ - Ok(pool::verifier::Options { - // NOTE min_gas_price and block_gas_limit will be overwritten right after start. - minimal_gas_price: U256::from(20_000_000) * 1_000u32, - block_gas_limit: U256::max_value(), - tx_gas_limit: match self.args.arg_tx_gas_limit { - Some(ref d) => to_u256(d)?, - None => U256::max_value(), - }, - no_early_reject: self.args.flag_tx_queue_no_early_reject, - }) - } - - fn secretstore_config(&self) -> Result { - Ok(SecretStoreConfiguration { - enabled: self.secretstore_enabled(), - http_enabled: self.secretstore_http_enabled(), - auto_migrate_enabled: self.secretstore_auto_migrate_enabled(), - acl_check_contract_address: self.secretstore_acl_check_contract_address()?, - service_contract_address: self.secretstore_service_contract_address()?, - service_contract_srv_gen_address: self.secretstore_service_contract_srv_gen_address()?, - service_contract_srv_retr_address: self.secretstore_service_contract_srv_retr_address()?, - service_contract_doc_store_address: self.secretstore_service_contract_doc_store_address()?, - service_contract_doc_sretr_address: self.secretstore_service_contract_doc_sretr_address()?, - self_secret: self.secretstore_self_secret()?, - nodes: self.secretstore_nodes()?, - key_server_set_contract_address: self.secretstore_key_server_set_contract_address()?, - interface: self.secretstore_interface(), - port: self.args.arg_ports_shift + self.args.arg_secretstore_port, - http_interface: self.secretstore_http_interface(), - http_port: self.args.arg_ports_shift + self.args.arg_secretstore_http_port, - data_path: self.directories().secretstore, - admin_public: self.secretstore_admin_public()?, - }) - } - - fn ipfs_config(&self) -> IpfsConfiguration { - IpfsConfiguration { - enabled: self.args.flag_ipfs_api, - port: self.args.arg_ports_shift + self.args.arg_ipfs_api_port, - interface: self.ipfs_interface(), - cors: self.ipfs_cors(), - hosts: self.ipfs_hosts(), - } - } - - fn gas_pricer_config(&self) -> Result { - fn wei_per_gas(usd_per_tx: f32, usd_per_eth: f32) -> U256 { - let wei_per_usd: f32 = 1.0e18 / usd_per_eth; - let gas_per_tx: f32 = 21000.0; - let wei_per_gas: f32 = wei_per_usd * usd_per_tx / gas_per_tx; - U256::from_dec_str(&format!("{:.0}", wei_per_gas)).unwrap() - } - - if let Some(dec) = self.args.arg_gasprice.as_ref() { - return Ok(GasPricerConfig::Fixed(to_u256(dec)?)); - } else if let Some(dec) = self.args.arg_min_gas_price { - return Ok(GasPricerConfig::Fixed(U256::from(dec))); - } else if self.chain()? != SpecType::Foundation { - return Ok(GasPricerConfig::Fixed(U256::zero())); - } - - let usd_per_tx = to_price(&self.args.arg_usd_per_tx)?; - - if "auto" == self.args.arg_usd_per_eth { - Ok(GasPricerConfig::Calibrated { - usd_per_tx: usd_per_tx, - recalibration_period: to_duration(self.args.arg_price_update_period.as_str())?, - api_endpoint: ETHERSCAN_ETH_PRICE_ENDPOINT.to_string(), - }) - } else if let Ok(usd_per_eth_parsed) = to_price(&self.args.arg_usd_per_eth) { - let wei_per_gas = wei_per_gas(usd_per_tx, usd_per_eth_parsed); - - info!( - "Using a fixed conversion rate of Ξ1 = {} ({} wei/gas)", - Colour::White.bold().paint(format!("US${:.2}", usd_per_eth_parsed)), - Colour::Yellow.bold().paint(format!("{}", wei_per_gas)) - ); - - Ok(GasPricerConfig::Fixed(wei_per_gas)) - } else { - Ok(GasPricerConfig::Calibrated { - usd_per_tx: usd_per_tx, - recalibration_period: to_duration(self.args.arg_price_update_period.as_str())?, - api_endpoint: self.args.arg_usd_per_eth.clone(), - }) - } - } - - fn extra_data(&self) -> Result { - match self.args.arg_extradata.as_ref().or(self.args.arg_extra_data.as_ref()) { - Some(x) if x.len() <= 32 => Ok(x.as_bytes().to_owned()), - None => Ok(version_data()), - Some(_) => Err("Extra data must be at most 32 characters".into()), - } - } - - fn init_reserved_nodes(&self) -> Result, String> { - use std::fs::File; - - match self.args.arg_reserved_peers { - Some(ref path) => { - let path = replace_home(&self.directories().base, path); - - let mut buffer = String::new(); - let mut node_file = File::open(&path).map_err(|e| format!("Error opening reserved nodes file: {}", e))?; - node_file.read_to_string(&mut buffer).map_err(|_| "Error reading reserved node file")?; - let lines = buffer.lines().map(|s| s.trim().to_owned()).filter(|s| !s.is_empty() && !s.starts_with("#")).collect::>(); - - for line in &lines { - match validate_node_url(line).map(Into::into) { - None => continue, - Some(sync::ErrorKind::AddressResolve(_)) => return Err(format!("Failed to resolve hostname of a boot node: {}", line)), - Some(_) => return Err(format!("Invalid node address format given for a boot node: {}", line)), - } - } - - Ok(lines) - }, - None => Ok(Vec::new()) - } - } - - fn net_addresses(&self) -> Result<(SocketAddr, Option), String> { - let port = self.args.arg_ports_shift + self.args.arg_port; - let listen_address = SocketAddr::new(self.interface(&self.args.arg_interface).parse().unwrap(), port); - let public_address = if self.args.arg_nat.starts_with("extip:") { - let host = self.args.arg_nat[6..].split(':').next().expect("split has at least one part; qed"); - let host = format!("{}:{}", host, port); - match host.to_socket_addrs() { - Ok(mut addr_iter) => { - if let Some(addr) = addr_iter.next() { - Some(addr) - } else { - return Err(format!("Invalid host given with `--nat extip:{}`", &self.args.arg_nat[6..])) - } - }, - Err(_) => return Err(format!("Invalid host given with `--nat extip:{}`", &self.args.arg_nat[6..])) - } - } else { - None - }; - Ok((listen_address, public_address)) - } - - fn net_config(&self) -> Result { - let mut ret = NetworkConfiguration::new(); - ret.nat_enabled = self.args.arg_nat == "any" || self.args.arg_nat == "upnp"; - ret.boot_nodes = to_bootnodes(&self.args.arg_bootnodes)?; - let (listen, public) = self.net_addresses()?; - ret.listen_address = Some(format!("{}", listen)); - ret.public_address = public.map(|p| format!("{}", p)); - ret.use_secret = match self.args.arg_node_key.as_ref() - .map(|s| s.parse::().or_else(|_| Secret::from_unsafe_slice(&keccak(s))).map_err(|e| format!("Invalid key: {:?}", e)) - ) { - None => None, - Some(Ok(key)) => Some(key), - Some(Err(err)) => return Err(err), - }; - ret.discovery_enabled = !self.args.flag_no_discovery && !self.args.flag_nodiscover; - ret.max_peers = self.max_peers(); - ret.min_peers = self.min_peers(); - ret.snapshot_peers = self.snapshot_peers(); - ret.ip_filter = self.ip_filter()?; - ret.max_pending_peers = self.max_pending_peers(); - let mut net_path = PathBuf::from(self.directories().base); - net_path.push("network"); - ret.config_path = Some(net_path.to_str().unwrap().to_owned()); - ret.reserved_nodes = self.init_reserved_nodes()?; - ret.allow_non_reserved = !self.args.flag_reserved_only; - ret.client_version = { - let mut client_version = version(); - if !self.args.arg_identity.is_empty() { - // Insert name after the "Parity-Ethereum/" at the beginning of version string. - let idx = client_version.find('/').unwrap_or(client_version.len()); - client_version.insert_str(idx, &format!("/{}", self.args.arg_identity)); - } - client_version - }; - Ok(ret) - } - - fn network_id(&self) -> Option { - self.args.arg_network_id.or(self.args.arg_networkid) - } - - fn rpc_apis(&self) -> String { - let mut apis: Vec<&str> = self.args.arg_rpcapi - .as_ref() - .unwrap_or(&self.args.arg_jsonrpc_apis) - .split(",") - .collect(); - - if self.args.flag_geth { - apis.insert(0, "personal"); - } - - apis.join(",") - } - - fn cors(cors: &str) -> Option> { - match cors { - "none" => return Some(Vec::new()), - "*" | "all" | "any" => return None, - _ => {}, - } - - Some(cors.split(',').map(Into::into).collect()) - } - - fn rpc_cors(&self) -> Option> { - let cors = self.args.arg_rpccorsdomain.clone().unwrap_or_else(|| self.args.arg_jsonrpc_cors.to_owned()); - Self::cors(&cors) - } - - fn ipfs_cors(&self) -> Option> { - Self::cors(self.args.arg_ipfs_api_cors.as_ref()) - } - - fn hosts(&self, hosts: &str, interface: &str) -> Option> { - if self.args.flag_unsafe_expose { - return None; - } - - if interface == "0.0.0.0" && hosts == "none" { - return None; - } - - Self::parse_hosts(hosts) - } - - fn parse_hosts(hosts: &str) -> Option> { - match hosts { - "none" => return Some(Vec::new()), - "*" | "all" | "any" => return None, - _ => {} - } - let hosts = hosts.split(',').map(Into::into).collect(); - Some(hosts) - } - - fn rpc_hosts(&self) -> Option> { - self.hosts(&self.args.arg_jsonrpc_hosts, &self.rpc_interface()) - } - - fn ws_hosts(&self) -> Option> { - self.hosts(&self.args.arg_ws_hosts, &self.ws_interface()) - } - - fn ws_origins(&self) -> Option> { - if self.args.flag_unsafe_expose { - return None; - } - - Self::parse_hosts(&self.args.arg_ws_origins) - } - - fn ipfs_hosts(&self) -> Option> { - self.hosts(&self.args.arg_ipfs_api_hosts, &self.ipfs_interface()) - } - - fn ipc_config(&self) -> Result { - let conf = IpcConfiguration { - enabled: !(self.args.flag_ipcdisable || self.args.flag_ipc_off || self.args.flag_no_ipc), - socket_addr: self.ipc_path(), - apis: { - let mut apis = self.args.arg_ipcapi.clone().unwrap_or(self.args.arg_ipc_apis.clone()); - if self.args.flag_geth { - if !apis.is_empty() { - apis.push_str(","); - } - apis.push_str("personal"); - } - apis.parse()? - }, - }; - - Ok(conf) - } - - fn http_config(&self) -> Result { - let conf = HttpConfiguration { - enabled: self.rpc_enabled(), - interface: self.rpc_interface(), - port: self.args.arg_ports_shift + self.args.arg_rpcport.unwrap_or(self.args.arg_jsonrpc_port), - apis: self.rpc_apis().parse()?, - hosts: self.rpc_hosts(), - cors: self.rpc_cors(), - server_threads: match self.args.arg_jsonrpc_server_threads { - Some(threads) if threads > 0 => threads, - _ => 1, - }, - processing_threads: self.args.arg_jsonrpc_threads, - max_payload: match self.args.arg_jsonrpc_max_payload { - Some(max) if max > 0 => max as usize, - _ => 5usize, - }, - keep_alive: !self.args.flag_jsonrpc_no_keep_alive, - }; - - Ok(conf) - } - - fn ws_config(&self) -> Result { - let support_token_api = + /// Parses a configuration from a list of command line arguments. + /// + /// # Example + /// + /// ``` + /// let _cfg = parity_ethereum::Configuration::parse_cli(&["--light", "--chain", "kovan"]).unwrap(); + /// ``` + pub fn parse_cli>(command: &[S]) -> Result { + let config = Configuration { + args: Args::parse(command)?, + }; + + Ok(config) + } + + pub(crate) fn into_command(self) -> Result { + let dirs = self.directories(); + let pruning = self.args.arg_pruning.parse()?; + let pruning_history = self.args.arg_pruning_history; + let vm_type = self.vm_type()?; + let spec = self.chain()?; + let mode = match self.args.arg_mode.as_ref() { + "last" => None, + mode => Some(to_mode( + &mode, + self.args.arg_mode_timeout, + self.args.arg_mode_alarm, + )?), + }; + let update_policy = self.update_policy()?; + let logger_config = self.logger_config(); + let ws_conf = self.ws_config()?; + let snapshot_conf = self.snapshot_config()?; + let http_conf = self.http_config()?; + let ipc_conf = self.ipc_config()?; + let net_conf = self.net_config()?; + let network_id = self.network_id(); + let cache_config = self.cache_config(); + let tracing = self.args.arg_tracing.parse()?; + let fat_db = self.args.arg_fat_db.parse()?; + let compaction = self.args.arg_db_compaction.parse()?; + let warp_sync = !self.args.flag_no_warp; + let geth_compatibility = self.args.flag_geth; + let experimental_rpcs = self.args.flag_jsonrpc_experimental; + let ipfs_conf = self.ipfs_config(); + let secretstore_conf = self.secretstore_config()?; + let format = self.format()?; + let keys_iterations = NonZeroU32::new(self.args.arg_keys_iterations) + .ok_or_else(|| "--keys-iterations must be non-zero")?; + + let cmd = if self.args.flag_version { + Cmd::Version + } else if self.args.cmd_signer { + let authfile = ::signer::codes_path(&ws_conf.signer_path); + + if self.args.cmd_signer_new_token { + Cmd::SignerToken(ws_conf, logger_config.clone()) + } else if self.args.cmd_signer_sign { + let pwfile = self + .accounts_config()? + .password_files + .first() + .map(|pwfile| PathBuf::from(pwfile)); + Cmd::SignerSign { + id: self.args.arg_signer_sign_id, + pwfile: pwfile, + port: ws_conf.port, + authfile: authfile, + } + } else if self.args.cmd_signer_reject { + Cmd::SignerReject { + id: self.args.arg_signer_reject_id, + port: ws_conf.port, + authfile: authfile, + } + } else if self.args.cmd_signer_list { + Cmd::SignerList { + port: ws_conf.port, + authfile: authfile, + } + } else { + unreachable!(); + } + } else if self.args.cmd_tools && self.args.cmd_tools_hash { + Cmd::Hash(self.args.arg_tools_hash_file) + } else if self.args.cmd_db && self.args.cmd_db_reset { + Cmd::Blockchain(BlockchainCmd::Reset(ResetBlockchain { + dirs, + spec, + pruning, + pruning_history, + pruning_memory: self.args.arg_pruning_memory, + tracing, + fat_db, + compaction, + cache_config, + num: self.args.arg_db_reset_num, + })) + } else if self.args.cmd_db && self.args.cmd_db_kill { + Cmd::Blockchain(BlockchainCmd::Kill(KillBlockchain { + spec: spec, + dirs: dirs, + pruning: pruning, + })) + } else if self.args.cmd_account { + let account_cmd = if self.args.cmd_account_new { + let new_acc = NewAccount { + iterations: keys_iterations, + path: dirs.keys, + spec: spec, + password_file: self + .accounts_config()? + .password_files + .first() + .map(|x| x.to_owned()), + }; + AccountCmd::New(new_acc) + } else if self.args.cmd_account_list { + let list_acc = ListAccounts { + path: dirs.keys, + spec: spec, + }; + AccountCmd::List(list_acc) + } else if self.args.cmd_account_import { + let import_acc = ImportAccounts { + from: self + .args + .arg_account_import_path + .expect("CLI argument is required; qed") + .clone(), + to: dirs.keys, + spec: spec, + }; + AccountCmd::Import(import_acc) + } else { + unreachable!(); + }; + Cmd::Account(account_cmd) + } else if self.args.flag_import_geth_keys { + let account_cmd = AccountCmd::ImportFromGeth(ImportFromGethAccounts { + spec: spec, + to: dirs.keys, + testnet: self.args.flag_testnet, + }); + Cmd::Account(account_cmd) + } else if self.args.cmd_wallet { + let presale_cmd = ImportWallet { + iterations: keys_iterations, + path: dirs.keys, + spec: spec, + wallet_path: self.args.arg_wallet_import_path.clone().unwrap(), + password_file: self + .accounts_config()? + .password_files + .first() + .map(|x| x.to_owned()), + }; + Cmd::ImportPresaleWallet(presale_cmd) + } else if self.args.cmd_import { + let import_cmd = ImportBlockchain { + spec: spec, + cache_config: cache_config, + dirs: dirs, + file_path: self.args.arg_import_file.clone(), + format: format, + pruning: pruning, + pruning_history: pruning_history, + pruning_memory: self.args.arg_pruning_memory, + compaction: compaction, + tracing: tracing, + fat_db: fat_db, + vm_type: vm_type, + check_seal: !self.args.flag_no_seal_check, + with_color: logger_config.color, + verifier_settings: self.verifier_settings(), + light: self.args.flag_light, + max_round_blocks_to_import: self.args.arg_max_round_blocks_to_import, + }; + Cmd::Blockchain(BlockchainCmd::Import(import_cmd)) + } else if self.args.cmd_export { + if self.args.cmd_export_blocks { + let export_cmd = ExportBlockchain { + spec: spec, + cache_config: cache_config, + dirs: dirs, + file_path: self.args.arg_export_blocks_file.clone(), + format: format, + pruning: pruning, + pruning_history: pruning_history, + pruning_memory: self.args.arg_pruning_memory, + compaction: compaction, + tracing: tracing, + fat_db: fat_db, + from_block: to_block_id(&self.args.arg_export_blocks_from)?, + to_block: to_block_id(&self.args.arg_export_blocks_to)?, + check_seal: !self.args.flag_no_seal_check, + max_round_blocks_to_import: self.args.arg_max_round_blocks_to_import, + }; + Cmd::Blockchain(BlockchainCmd::Export(export_cmd)) + } else if self.args.cmd_export_state { + let export_cmd = ExportState { + spec: spec, + cache_config: cache_config, + dirs: dirs, + file_path: self.args.arg_export_state_file.clone(), + format: format, + pruning: pruning, + pruning_history: pruning_history, + pruning_memory: self.args.arg_pruning_memory, + compaction: compaction, + tracing: tracing, + fat_db: fat_db, + at: to_block_id(&self.args.arg_export_state_at)?, + storage: !self.args.flag_export_state_no_storage, + code: !self.args.flag_export_state_no_code, + min_balance: self + .args + .arg_export_state_min_balance + .and_then(|s| to_u256(&s).ok()), + max_balance: self + .args + .arg_export_state_max_balance + .and_then(|s| to_u256(&s).ok()), + max_round_blocks_to_import: self.args.arg_max_round_blocks_to_import, + }; + Cmd::Blockchain(BlockchainCmd::ExportState(export_cmd)) + } else { + unreachable!(); + } + } else if self.args.cmd_snapshot { + let snapshot_cmd = SnapshotCommand { + cache_config: cache_config, + dirs: dirs, + spec: spec, + pruning: pruning, + pruning_history: pruning_history, + pruning_memory: self.args.arg_pruning_memory, + tracing: tracing, + fat_db: fat_db, + compaction: compaction, + file_path: self.args.arg_snapshot_file.clone(), + kind: snapshot::Kind::Take, + block_at: to_block_id(&self.args.arg_snapshot_at)?, + max_round_blocks_to_import: self.args.arg_max_round_blocks_to_import, + snapshot_conf: snapshot_conf, + }; + Cmd::Snapshot(snapshot_cmd) + } else if self.args.cmd_restore { + let restore_cmd = SnapshotCommand { + cache_config: cache_config, + dirs: dirs, + spec: spec, + pruning: pruning, + pruning_history: pruning_history, + pruning_memory: self.args.arg_pruning_memory, + tracing: tracing, + fat_db: fat_db, + compaction: compaction, + file_path: self.args.arg_restore_file.clone(), + kind: snapshot::Kind::Restore, + block_at: to_block_id("latest")?, // unimportant. + max_round_blocks_to_import: self.args.arg_max_round_blocks_to_import, + snapshot_conf: snapshot_conf, + }; + Cmd::Snapshot(restore_cmd) + } else if self.args.cmd_export_hardcoded_sync { + let export_hs_cmd = ExportHsyncCmd { + cache_config: cache_config, + dirs: dirs, + spec: spec, + pruning: pruning, + compaction: compaction, + }; + Cmd::ExportHardcodedSync(export_hs_cmd) + } else { + let daemon = if self.args.cmd_daemon { + Some( + self.args + .arg_daemon_pid_file + .clone() + .expect("CLI argument is required; qed"), + ) + } else { + None + }; + + let verifier_settings = self.verifier_settings(); + let whisper_config = self.whisper_config(); + let (private_provider_conf, private_enc_conf, private_tx_enabled) = + self.private_provider_config()?; + + let run_cmd = RunCmd { + cache_config: cache_config, + dirs: dirs, + spec: spec, + pruning: pruning, + pruning_history: pruning_history, + pruning_memory: self.args.arg_pruning_memory, + daemon: daemon, + logger_config: logger_config.clone(), + miner_options: self.miner_options()?, + gas_price_percentile: self.args.arg_gas_price_percentile, + poll_lifetime: self.args.arg_poll_lifetime, + ws_conf: ws_conf, + snapshot_conf: snapshot_conf, + http_conf: http_conf, + ipc_conf: ipc_conf, + net_conf: net_conf, + network_id: network_id, + acc_conf: self.accounts_config()?, + gas_pricer_conf: self.gas_pricer_config()?, + miner_extras: self.miner_extras()?, + stratum: self.stratum_options()?, + update_policy: update_policy, + allow_missing_blocks: self.args.flag_jsonrpc_allow_missing_blocks, + mode: mode, + tracing: tracing, + fat_db: fat_db, + compaction: compaction, + vm_type: vm_type, + warp_sync: warp_sync, + warp_barrier: self.args.arg_warp_barrier, + geth_compatibility: geth_compatibility, + experimental_rpcs, + net_settings: self.network_settings()?, + ipfs_conf: ipfs_conf, + secretstore_conf: secretstore_conf, + private_provider_conf: private_provider_conf, + private_encryptor_conf: private_enc_conf, + private_tx_enabled, + name: self.args.arg_identity, + custom_bootnodes: self.args.arg_bootnodes.is_some(), + check_seal: !self.args.flag_no_seal_check, + download_old_blocks: !self.args.flag_no_ancient_blocks, + verifier_settings: verifier_settings, + serve_light: !self.args.flag_no_serve_light, + light: self.args.flag_light, + no_persistent_txqueue: self.args.flag_no_persistent_txqueue, + whisper: whisper_config, + no_hardcoded_sync: self.args.flag_no_hardcoded_sync, + max_round_blocks_to_import: self.args.arg_max_round_blocks_to_import, + on_demand_response_time_window: self.args.arg_on_demand_response_time_window, + on_demand_request_backoff_start: self.args.arg_on_demand_request_backoff_start, + on_demand_request_backoff_max: self.args.arg_on_demand_request_backoff_max, + on_demand_request_backoff_rounds_max: self + .args + .arg_on_demand_request_backoff_rounds_max, + on_demand_request_consecutive_failures: self + .args + .arg_on_demand_request_consecutive_failures, + }; + Cmd::Run(run_cmd) + }; + + Ok(Execute { + logger: logger_config, + cmd: cmd, + }) + } + + fn vm_type(&self) -> Result { + Ok(VMType::Interpreter) + } + + fn miner_extras(&self) -> Result { + let floor = to_u256(&self.args.arg_gas_floor_target)?; + let ceil = to_u256(&self.args.arg_gas_cap)?; + let extras = MinerExtras { + author: self.author()?, + extra_data: self.extra_data()?, + gas_range_target: (floor, ceil), + engine_signer: self.engine_signer()?, + work_notify: self.work_notify(), + local_accounts: HashSet::from_iter( + to_addresses(&self.args.arg_tx_queue_locals)?.into_iter(), + ), + }; + + Ok(extras) + } + + fn author(&self) -> Result { + to_address( + self.args + .arg_etherbase + .clone() + .or(self.args.arg_author.clone()), + ) + } + + fn engine_signer(&self) -> Result { + to_address(self.args.arg_engine_signer.clone()) + } + + fn format(&self) -> Result, String> { + match self + .args + .arg_import_format + .clone() + .or(self.args.arg_export_blocks_format.clone()) + .or(self.args.arg_export_state_format.clone()) + { + Some(ref f) => Ok(Some(f.parse()?)), + None => Ok(None), + } + } + + fn cache_config(&self) -> CacheConfig { + match self.args.arg_cache_size.or(self.args.arg_cache) { + Some(size) => CacheConfig::new_with_total_cache_size(size), + None => CacheConfig::new( + self.args.arg_cache_size_db, + self.args.arg_cache_size_blocks, + self.args.arg_cache_size_queue, + self.args.arg_cache_size_state, + ), + } + } + + /// returns logger config + pub fn logger_config(&self) -> LogConfig { + LogConfig { + mode: self.args.arg_logging.clone(), + color: !self.args.flag_no_color && !cfg!(windows), + file: self + .args + .arg_log_file + .as_ref() + .map(|log_file| replace_home(&self.directories().base, log_file)), + } + } + + fn chain(&self) -> Result { + let name = if self.args.flag_testnet { + "testnet".to_owned() + } else { + self.args.arg_chain.clone() + }; + + Ok(name.parse()?) + } + + fn is_dev_chain(&self) -> Result { + Ok(self.chain()? == SpecType::Dev) + } + + fn max_peers(&self) -> u32 { + self.args + .arg_max_peers + .or(cmp::max(self.args.arg_min_peers, Some(DEFAULT_MAX_PEERS))) + .unwrap_or(DEFAULT_MAX_PEERS) as u32 + } + + fn ip_filter(&self) -> Result { + match IpFilter::parse(self.args.arg_allow_ips.as_str()) { + Ok(allow_ip) => Ok(allow_ip), + Err(_) => Err("Invalid IP filter value".to_owned()), + } + } + + fn min_peers(&self) -> u32 { + self.args + .arg_min_peers + .or(cmp::min(self.args.arg_max_peers, Some(DEFAULT_MIN_PEERS))) + .unwrap_or(DEFAULT_MIN_PEERS) as u32 + } + + fn max_pending_peers(&self) -> u32 { + self.args.arg_max_pending_peers as u32 + } + + fn snapshot_peers(&self) -> u32 { + self.args.arg_snapshot_peers as u32 + } + + fn work_notify(&self) -> Vec { + self.args + .arg_notify_work + .as_ref() + .map_or_else(Vec::new, |s| s.split(',').map(|s| s.to_owned()).collect()) + } + + fn accounts_config(&self) -> Result { + let keys_iterations = NonZeroU32::new(self.args.arg_keys_iterations) + .ok_or_else(|| "--keys-iterations must be non-zero")?; + let cfg = AccountsConfig { + iterations: keys_iterations, + refresh_time: self.args.arg_accounts_refresh, + testnet: self.args.flag_testnet, + password_files: self + .args + .arg_password + .iter() + .map(|s| replace_home(&self.directories().base, s)) + .collect(), + unlocked_accounts: to_addresses(&self.args.arg_unlock)?, + enable_hardware_wallets: !self.args.flag_no_hardware_wallets, + enable_fast_unlock: self.args.flag_fast_unlock, + }; + + Ok(cfg) + } + + fn stratum_options(&self) -> Result, String> { + if self.args.flag_stratum { + Ok(Some(stratum::Options { + io_path: self.directories().db, + listen_addr: self.stratum_interface(), + port: self.args.arg_ports_shift + self.args.arg_stratum_port, + secret: self + .args + .arg_stratum_secret + .as_ref() + .map(|s| s.parse::().unwrap_or_else(|_| keccak(s))), + })) + } else { + Ok(None) + } + } + + fn miner_options(&self) -> Result { + let is_dev_chain = self.is_dev_chain()?; + if is_dev_chain && self.args.flag_force_sealing && self.args.arg_reseal_min_period == 0 { + return Err("Force sealing can't be used with reseal_min_period = 0".into()); + } + + let reseal = self.args.arg_reseal_on_txs.parse::()?; + + let options = MinerOptions { + force_sealing: self.args.flag_force_sealing, + reseal_on_external_tx: reseal.external, + reseal_on_own_tx: reseal.own, + reseal_on_uncle: self.args.flag_reseal_on_uncle, + reseal_min_period: Duration::from_millis(self.args.arg_reseal_min_period), + reseal_max_period: Duration::from_millis(self.args.arg_reseal_max_period), + + pending_set: to_pending_set(&self.args.arg_relay_set)?, + work_queue_size: self.args.arg_work_queue_size, + enable_resubmission: !self.args.flag_remove_solved, + infinite_pending_block: self.args.flag_infinite_pending_block, + + tx_queue_penalization: to_queue_penalization(self.args.arg_tx_time_limit)?, + tx_queue_strategy: to_queue_strategy(&self.args.arg_tx_queue_strategy)?, + tx_queue_no_unfamiliar_locals: self.args.flag_tx_queue_no_unfamiliar_locals, + refuse_service_transactions: self.args.flag_refuse_service_transactions, + + pool_limits: self.pool_limits()?, + pool_verification_options: self.pool_verification_options()?, + }; + + Ok(options) + } + + fn pool_limits(&self) -> Result { + let max_count = self.args.arg_tx_queue_size; + + Ok(pool::Options { + max_count, + max_per_sender: self + .args + .arg_tx_queue_per_sender + .unwrap_or_else(|| cmp::max(16, max_count / 100)), + max_mem_usage: if self.args.arg_tx_queue_mem_limit > 0 { + self.args.arg_tx_queue_mem_limit as usize * 1024 * 1024 + } else { + usize::max_value() + }, + }) + } + + fn pool_verification_options(&self) -> Result { + Ok(pool::verifier::Options { + // NOTE min_gas_price and block_gas_limit will be overwritten right after start. + minimal_gas_price: U256::from(20_000_000) * 1_000u32, + block_gas_limit: U256::max_value(), + tx_gas_limit: match self.args.arg_tx_gas_limit { + Some(ref d) => to_u256(d)?, + None => U256::max_value(), + }, + no_early_reject: self.args.flag_tx_queue_no_early_reject, + }) + } + + fn secretstore_config(&self) -> Result { + Ok(SecretStoreConfiguration { + enabled: self.secretstore_enabled(), + http_enabled: self.secretstore_http_enabled(), + auto_migrate_enabled: self.secretstore_auto_migrate_enabled(), + acl_check_contract_address: self.secretstore_acl_check_contract_address()?, + service_contract_address: self.secretstore_service_contract_address()?, + service_contract_srv_gen_address: self + .secretstore_service_contract_srv_gen_address()?, + service_contract_srv_retr_address: self + .secretstore_service_contract_srv_retr_address()?, + service_contract_doc_store_address: self + .secretstore_service_contract_doc_store_address()?, + service_contract_doc_sretr_address: self + .secretstore_service_contract_doc_sretr_address()?, + self_secret: self.secretstore_self_secret()?, + nodes: self.secretstore_nodes()?, + key_server_set_contract_address: self.secretstore_key_server_set_contract_address()?, + interface: self.secretstore_interface(), + port: self.args.arg_ports_shift + self.args.arg_secretstore_port, + http_interface: self.secretstore_http_interface(), + http_port: self.args.arg_ports_shift + self.args.arg_secretstore_http_port, + data_path: self.directories().secretstore, + admin_public: self.secretstore_admin_public()?, + }) + } + + fn ipfs_config(&self) -> IpfsConfiguration { + IpfsConfiguration { + enabled: self.args.flag_ipfs_api, + port: self.args.arg_ports_shift + self.args.arg_ipfs_api_port, + interface: self.ipfs_interface(), + cors: self.ipfs_cors(), + hosts: self.ipfs_hosts(), + } + } + + fn gas_pricer_config(&self) -> Result { + fn wei_per_gas(usd_per_tx: f32, usd_per_eth: f32) -> U256 { + let wei_per_usd: f32 = 1.0e18 / usd_per_eth; + let gas_per_tx: f32 = 21000.0; + let wei_per_gas: f32 = wei_per_usd * usd_per_tx / gas_per_tx; + U256::from_dec_str(&format!("{:.0}", wei_per_gas)).unwrap() + } + + if let Some(dec) = self.args.arg_gasprice.as_ref() { + return Ok(GasPricerConfig::Fixed(to_u256(dec)?)); + } else if let Some(dec) = self.args.arg_min_gas_price { + return Ok(GasPricerConfig::Fixed(U256::from(dec))); + } else if self.chain()? != SpecType::Foundation { + return Ok(GasPricerConfig::Fixed(U256::zero())); + } + + let usd_per_tx = to_price(&self.args.arg_usd_per_tx)?; + + if "auto" == self.args.arg_usd_per_eth { + Ok(GasPricerConfig::Calibrated { + usd_per_tx: usd_per_tx, + recalibration_period: to_duration(self.args.arg_price_update_period.as_str())?, + api_endpoint: ETHERSCAN_ETH_PRICE_ENDPOINT.to_string(), + }) + } else if let Ok(usd_per_eth_parsed) = to_price(&self.args.arg_usd_per_eth) { + let wei_per_gas = wei_per_gas(usd_per_tx, usd_per_eth_parsed); + + info!( + "Using a fixed conversion rate of Ξ1 = {} ({} wei/gas)", + Colour::White + .bold() + .paint(format!("US${:.2}", usd_per_eth_parsed)), + Colour::Yellow.bold().paint(format!("{}", wei_per_gas)) + ); + + Ok(GasPricerConfig::Fixed(wei_per_gas)) + } else { + Ok(GasPricerConfig::Calibrated { + usd_per_tx: usd_per_tx, + recalibration_period: to_duration(self.args.arg_price_update_period.as_str())?, + api_endpoint: self.args.arg_usd_per_eth.clone(), + }) + } + } + + fn extra_data(&self) -> Result { + match self + .args + .arg_extradata + .as_ref() + .or(self.args.arg_extra_data.as_ref()) + { + Some(x) if x.len() <= 32 => Ok(x.as_bytes().to_owned()), + None => Ok(version_data()), + Some(_) => Err("Extra data must be at most 32 characters".into()), + } + } + + fn init_reserved_nodes(&self) -> Result, String> { + use std::fs::File; + + match self.args.arg_reserved_peers { + Some(ref path) => { + let path = replace_home(&self.directories().base, path); + + let mut buffer = String::new(); + let mut node_file = File::open(&path) + .map_err(|e| format!("Error opening reserved nodes file: {}", e))?; + node_file + .read_to_string(&mut buffer) + .map_err(|_| "Error reading reserved node file")?; + let lines = buffer + .lines() + .map(|s| s.trim().to_owned()) + .filter(|s| !s.is_empty() && !s.starts_with("#")) + .collect::>(); + + for line in &lines { + match validate_node_url(line).map(Into::into) { + None => continue, + Some(sync::ErrorKind::AddressResolve(_)) => { + return Err(format!( + "Failed to resolve hostname of a boot node: {}", + line + )) + } + Some(_) => { + return Err(format!( + "Invalid node address format given for a boot node: {}", + line + )) + } + } + } + + Ok(lines) + } + None => Ok(Vec::new()), + } + } + + fn net_addresses(&self) -> Result<(SocketAddr, Option), String> { + let port = self.args.arg_ports_shift + self.args.arg_port; + let listen_address = SocketAddr::new( + self.interface(&self.args.arg_interface).parse().unwrap(), + port, + ); + let public_address = if self.args.arg_nat.starts_with("extip:") { + let host = self.args.arg_nat[6..] + .split(':') + .next() + .expect("split has at least one part; qed"); + let host = format!("{}:{}", host, port); + match host.to_socket_addrs() { + Ok(mut addr_iter) => { + if let Some(addr) = addr_iter.next() { + Some(addr) + } else { + return Err(format!( + "Invalid host given with `--nat extip:{}`", + &self.args.arg_nat[6..] + )); + } + } + Err(_) => { + return Err(format!( + "Invalid host given with `--nat extip:{}`", + &self.args.arg_nat[6..] + )) + } + } + } else { + None + }; + Ok((listen_address, public_address)) + } + + fn net_config(&self) -> Result { + let mut ret = NetworkConfiguration::new(); + ret.nat_enabled = self.args.arg_nat == "any" || self.args.arg_nat == "upnp"; + ret.boot_nodes = to_bootnodes(&self.args.arg_bootnodes)?; + let (listen, public) = self.net_addresses()?; + ret.listen_address = Some(format!("{}", listen)); + ret.public_address = public.map(|p| format!("{}", p)); + ret.use_secret = match self.args.arg_node_key.as_ref().map(|s| { + s.parse::() + .or_else(|_| Secret::from_unsafe_slice(&keccak(s))) + .map_err(|e| format!("Invalid key: {:?}", e)) + }) { + None => None, + Some(Ok(key)) => Some(key), + Some(Err(err)) => return Err(err), + }; + ret.discovery_enabled = !self.args.flag_no_discovery && !self.args.flag_nodiscover; + ret.max_peers = self.max_peers(); + ret.min_peers = self.min_peers(); + ret.snapshot_peers = self.snapshot_peers(); + ret.ip_filter = self.ip_filter()?; + ret.max_pending_peers = self.max_pending_peers(); + let mut net_path = PathBuf::from(self.directories().base); + net_path.push("network"); + ret.config_path = Some(net_path.to_str().unwrap().to_owned()); + ret.reserved_nodes = self.init_reserved_nodes()?; + ret.allow_non_reserved = !self.args.flag_reserved_only; + ret.client_version = { + let mut client_version = version(); + if !self.args.arg_identity.is_empty() { + // Insert name after the "Parity-Ethereum/" at the beginning of version string. + let idx = client_version.find('/').unwrap_or(client_version.len()); + client_version.insert_str(idx, &format!("/{}", self.args.arg_identity)); + } + client_version + }; + Ok(ret) + } + + fn network_id(&self) -> Option { + self.args.arg_network_id.or(self.args.arg_networkid) + } + + fn rpc_apis(&self) -> String { + let mut apis: Vec<&str> = self + .args + .arg_rpcapi + .as_ref() + .unwrap_or(&self.args.arg_jsonrpc_apis) + .split(",") + .collect(); + + if self.args.flag_geth { + apis.insert(0, "personal"); + } + + apis.join(",") + } + + fn cors(cors: &str) -> Option> { + match cors { + "none" => return Some(Vec::new()), + "*" | "all" | "any" => return None, + _ => {} + } + + Some(cors.split(',').map(Into::into).collect()) + } + + fn rpc_cors(&self) -> Option> { + let cors = self + .args + .arg_rpccorsdomain + .clone() + .unwrap_or_else(|| self.args.arg_jsonrpc_cors.to_owned()); + Self::cors(&cors) + } + + fn ipfs_cors(&self) -> Option> { + Self::cors(self.args.arg_ipfs_api_cors.as_ref()) + } + + fn hosts(&self, hosts: &str, interface: &str) -> Option> { + if self.args.flag_unsafe_expose { + return None; + } + + if interface == "0.0.0.0" && hosts == "none" { + return None; + } + + Self::parse_hosts(hosts) + } + + fn parse_hosts(hosts: &str) -> Option> { + match hosts { + "none" => return Some(Vec::new()), + "*" | "all" | "any" => return None, + _ => {} + } + let hosts = hosts.split(',').map(Into::into).collect(); + Some(hosts) + } + + fn rpc_hosts(&self) -> Option> { + self.hosts(&self.args.arg_jsonrpc_hosts, &self.rpc_interface()) + } + + fn ws_hosts(&self) -> Option> { + self.hosts(&self.args.arg_ws_hosts, &self.ws_interface()) + } + + fn ws_origins(&self) -> Option> { + if self.args.flag_unsafe_expose { + return None; + } + + Self::parse_hosts(&self.args.arg_ws_origins) + } + + fn ipfs_hosts(&self) -> Option> { + self.hosts(&self.args.arg_ipfs_api_hosts, &self.ipfs_interface()) + } + + fn ipc_config(&self) -> Result { + let conf = IpcConfiguration { + enabled: !(self.args.flag_ipcdisable + || self.args.flag_ipc_off + || self.args.flag_no_ipc), + socket_addr: self.ipc_path(), + apis: { + let mut apis = self + .args + .arg_ipcapi + .clone() + .unwrap_or(self.args.arg_ipc_apis.clone()); + if self.args.flag_geth { + if !apis.is_empty() { + apis.push_str(","); + } + apis.push_str("personal"); + } + apis.parse()? + }, + }; + + Ok(conf) + } + + fn http_config(&self) -> Result { + let conf = HttpConfiguration { + enabled: self.rpc_enabled(), + interface: self.rpc_interface(), + port: self.args.arg_ports_shift + + self.args.arg_rpcport.unwrap_or(self.args.arg_jsonrpc_port), + apis: self.rpc_apis().parse()?, + hosts: self.rpc_hosts(), + cors: self.rpc_cors(), + server_threads: match self.args.arg_jsonrpc_server_threads { + Some(threads) if threads > 0 => threads, + _ => 1, + }, + processing_threads: self.args.arg_jsonrpc_threads, + max_payload: match self.args.arg_jsonrpc_max_payload { + Some(max) if max > 0 => max as usize, + _ => 5usize, + }, + keep_alive: !self.args.flag_jsonrpc_no_keep_alive, + }; + + Ok(conf) + } + + fn ws_config(&self) -> Result { + let support_token_api = // enabled when not unlocking self.args.arg_unlock.is_none(); - let conf = WsConfiguration { - enabled: self.ws_enabled(), - interface: self.ws_interface(), - port: self.args.arg_ports_shift + self.args.arg_ws_port, - apis: self.args.arg_ws_apis.parse()?, - hosts: self.ws_hosts(), - origins: self.ws_origins(), - signer_path: self.directories().signer.into(), - support_token_api, - max_connections: self.args.arg_ws_max_connections, - }; + let conf = WsConfiguration { + enabled: self.ws_enabled(), + interface: self.ws_interface(), + port: self.args.arg_ports_shift + self.args.arg_ws_port, + apis: self.args.arg_ws_apis.parse()?, + hosts: self.ws_hosts(), + origins: self.ws_origins(), + signer_path: self.directories().signer.into(), + support_token_api, + max_connections: self.args.arg_ws_max_connections, + }; - Ok(conf) - } + Ok(conf) + } - fn private_provider_config(&self) -> Result<(ProviderConfig, EncryptorConfig, bool), String> { - let provider_conf = ProviderConfig { - validator_accounts: to_addresses(&self.args.arg_private_validators)?, - signer_account: self.args.arg_private_signer.clone().and_then(|account| to_address(Some(account)).ok()), - }; + fn private_provider_config(&self) -> Result<(ProviderConfig, EncryptorConfig, bool), String> { + let provider_conf = ProviderConfig { + validator_accounts: to_addresses(&self.args.arg_private_validators)?, + signer_account: self + .args + .arg_private_signer + .clone() + .and_then(|account| to_address(Some(account)).ok()), + }; - let encryptor_conf = EncryptorConfig { - base_url: self.args.arg_private_sstore_url.clone(), - threshold: self.args.arg_private_sstore_threshold.unwrap_or(0), - key_server_account: self.args.arg_private_account.clone().and_then(|account| to_address(Some(account)).ok()), - }; + let encryptor_conf = EncryptorConfig { + base_url: self.args.arg_private_sstore_url.clone(), + threshold: self.args.arg_private_sstore_threshold.unwrap_or(0), + key_server_account: self + .args + .arg_private_account + .clone() + .and_then(|account| to_address(Some(account)).ok()), + }; - Ok((provider_conf, encryptor_conf, self.args.flag_private_enabled)) - } + Ok(( + provider_conf, + encryptor_conf, + self.args.flag_private_enabled, + )) + } - fn snapshot_config(&self) -> Result { - let conf = SnapshotConfiguration { - no_periodic: self.args.flag_no_periodic_snapshot, - processing_threads: match self.args.arg_snapshot_threads { - Some(threads) if threads > 0 => threads, - _ => ::std::cmp::max(1, num_cpus::get_physical() / 2), - }, - }; + fn snapshot_config(&self) -> Result { + let conf = SnapshotConfiguration { + no_periodic: self.args.flag_no_periodic_snapshot, + processing_threads: match self.args.arg_snapshot_threads { + Some(threads) if threads > 0 => threads, + _ => ::std::cmp::max(1, num_cpus::get_physical() / 2), + }, + }; - Ok(conf) - } + Ok(conf) + } - fn network_settings(&self) -> Result { - let http_conf = self.http_config()?; - let net_addresses = self.net_addresses()?; - Ok(NetworkSettings { - name: self.args.arg_identity.clone(), - chain: format!("{}", self.chain()?), - is_dev_chain: self.is_dev_chain()?, - network_port: net_addresses.0.port(), - rpc_enabled: http_conf.enabled, - rpc_interface: http_conf.interface, - rpc_port: http_conf.port, - }) - } + fn network_settings(&self) -> Result { + let http_conf = self.http_config()?; + let net_addresses = self.net_addresses()?; + Ok(NetworkSettings { + name: self.args.arg_identity.clone(), + chain: format!("{}", self.chain()?), + is_dev_chain: self.is_dev_chain()?, + network_port: net_addresses.0.port(), + rpc_enabled: http_conf.enabled, + rpc_interface: http_conf.interface, + rpc_port: http_conf.port, + }) + } - fn update_policy(&self) -> Result { - Ok(UpdatePolicy { - enable_downloading: !self.args.flag_no_download, - require_consensus: !self.args.flag_no_consensus, - filter: match self.args.arg_auto_update.as_ref() { - "none" => UpdateFilter::None, - "critical" => UpdateFilter::Critical, - "all" => UpdateFilter::All, - _ => return Err("Invalid value for `--auto-update`. See `--help` for more information.".into()), - }, - track: match self.args.arg_release_track.as_ref() { - "stable" => ReleaseTrack::Stable, - "beta" => ReleaseTrack::Beta, - "nightly" => ReleaseTrack::Nightly, - "testing" => ReleaseTrack::Testing, - "current" => ReleaseTrack::Unknown, - _ => return Err("Invalid value for `--releases-track`. See `--help` for more information.".into()), - }, - path: default_hypervisor_path(), - max_size: 128 * 1024 * 1024, - max_delay: self.args.arg_auto_update_delay as u64, - frequency: self.args.arg_auto_update_check_frequency as u64, - }) - } + fn update_policy(&self) -> Result { + Ok(UpdatePolicy { + enable_downloading: !self.args.flag_no_download, + require_consensus: !self.args.flag_no_consensus, + filter: match self.args.arg_auto_update.as_ref() { + "none" => UpdateFilter::None, + "critical" => UpdateFilter::Critical, + "all" => UpdateFilter::All, + _ => { + return Err( + "Invalid value for `--auto-update`. See `--help` for more information." + .into(), + ) + } + }, + track: match self.args.arg_release_track.as_ref() { + "stable" => ReleaseTrack::Stable, + "beta" => ReleaseTrack::Beta, + "nightly" => ReleaseTrack::Nightly, + "testing" => ReleaseTrack::Testing, + "current" => ReleaseTrack::Unknown, + _ => { + return Err( + "Invalid value for `--releases-track`. See `--help` for more information." + .into(), + ) + } + }, + path: default_hypervisor_path(), + max_size: 128 * 1024 * 1024, + max_delay: self.args.arg_auto_update_delay as u64, + frequency: self.args.arg_auto_update_check_frequency as u64, + }) + } - fn directories(&self) -> Directories { - let local_path = default_local_path(); - let base_path = self.args.arg_base_path.as_ref().or_else(|| self.args.arg_datadir.as_ref()).map_or_else(|| default_data_path(), |s| s.clone()); - let data_path = replace_home("", &base_path); - let is_using_base_path = self.args.arg_base_path.is_some(); - // If base_path is set and db_path is not we default to base path subdir instead of LOCAL. - let base_db_path = if is_using_base_path && self.args.arg_db_path.is_none() { - if self.args.flag_light { - "$BASE/chains_light" - } else { - "$BASE/chains" - } - } else if self.args.flag_light { - self.args.arg_db_path.as_ref().map_or(dir::CHAINS_PATH_LIGHT, |s| &s) - } else { - self.args.arg_db_path.as_ref().map_or(dir::CHAINS_PATH, |s| &s) - }; - let cache_path = if is_using_base_path { "$BASE/cache" } else { dir::CACHE_PATH }; + fn directories(&self) -> Directories { + let local_path = default_local_path(); + let base_path = self + .args + .arg_base_path + .as_ref() + .or_else(|| self.args.arg_datadir.as_ref()) + .map_or_else(|| default_data_path(), |s| s.clone()); + let data_path = replace_home("", &base_path); + let is_using_base_path = self.args.arg_base_path.is_some(); + // If base_path is set and db_path is not we default to base path subdir instead of LOCAL. + let base_db_path = if is_using_base_path && self.args.arg_db_path.is_none() { + if self.args.flag_light { + "$BASE/chains_light" + } else { + "$BASE/chains" + } + } else if self.args.flag_light { + self.args + .arg_db_path + .as_ref() + .map_or(dir::CHAINS_PATH_LIGHT, |s| &s) + } else { + self.args + .arg_db_path + .as_ref() + .map_or(dir::CHAINS_PATH, |s| &s) + }; + let cache_path = if is_using_base_path { + "$BASE/cache" + } else { + dir::CACHE_PATH + }; - let db_path = replace_home_and_local(&data_path, &local_path, &base_db_path); - let cache_path = replace_home_and_local(&data_path, &local_path, cache_path); - let keys_path = replace_home(&data_path, &self.args.arg_keys_path); - let secretstore_path = replace_home(&data_path, &self.args.arg_secretstore_path); - let ui_path = replace_home(&data_path, &self.args.arg_ui_path); + let db_path = replace_home_and_local(&data_path, &local_path, &base_db_path); + let cache_path = replace_home_and_local(&data_path, &local_path, cache_path); + let keys_path = replace_home(&data_path, &self.args.arg_keys_path); + let secretstore_path = replace_home(&data_path, &self.args.arg_secretstore_path); + let ui_path = replace_home(&data_path, &self.args.arg_ui_path); - Directories { - keys: keys_path, - base: data_path, - cache: cache_path, - db: db_path, - signer: ui_path, - secretstore: secretstore_path, - } - } + Directories { + keys: keys_path, + base: data_path, + cache: cache_path, + db: db_path, + signer: ui_path, + secretstore: secretstore_path, + } + } - fn ipc_path(&self) -> String { - if self.args.flag_geth { - geth_ipc_path(self.args.flag_testnet) - } else { - parity_ipc_path( - &self.directories().base, - &self.args.arg_ipcpath.clone().unwrap_or(self.args.arg_ipc_path.clone()), - self.args.arg_ports_shift, - ) - } - } + fn ipc_path(&self) -> String { + if self.args.flag_geth { + geth_ipc_path(self.args.flag_testnet) + } else { + parity_ipc_path( + &self.directories().base, + &self + .args + .arg_ipcpath + .clone() + .unwrap_or(self.args.arg_ipc_path.clone()), + self.args.arg_ports_shift, + ) + } + } - fn interface(&self, interface: &str) -> String { - if self.args.flag_unsafe_expose { - return "0.0.0.0".into(); - } + fn interface(&self, interface: &str) -> String { + if self.args.flag_unsafe_expose { + return "0.0.0.0".into(); + } - match interface { - "all" => "0.0.0.0", - "local" => "127.0.0.1", - x => x, - }.into() - } + match interface { + "all" => "0.0.0.0", + "local" => "127.0.0.1", + x => x, + } + .into() + } - fn rpc_interface(&self) -> String { - let rpc_interface = self.args.arg_rpcaddr.clone().unwrap_or(self.args.arg_jsonrpc_interface.clone()); - self.interface(&rpc_interface) - } + fn rpc_interface(&self) -> String { + let rpc_interface = self + .args + .arg_rpcaddr + .clone() + .unwrap_or(self.args.arg_jsonrpc_interface.clone()); + self.interface(&rpc_interface) + } - fn ws_interface(&self) -> String { - self.interface(&self.args.arg_ws_interface) - } + fn ws_interface(&self) -> String { + self.interface(&self.args.arg_ws_interface) + } - fn ipfs_interface(&self) -> String { - self.interface(&self.args.arg_ipfs_api_interface) - } + fn ipfs_interface(&self) -> String { + self.interface(&self.args.arg_ipfs_api_interface) + } - fn secretstore_interface(&self) -> String { - self.interface(&self.args.arg_secretstore_interface) - } + fn secretstore_interface(&self) -> String { + self.interface(&self.args.arg_secretstore_interface) + } - fn secretstore_http_interface(&self) -> String { - self.interface(&self.args.arg_secretstore_http_interface) - } + fn secretstore_http_interface(&self) -> String { + self.interface(&self.args.arg_secretstore_http_interface) + } - fn secretstore_self_secret(&self) -> Result, String> { - match self.args.arg_secretstore_secret { + fn secretstore_self_secret(&self) -> Result, String> { + match self.args.arg_secretstore_secret { Some(ref s) if s.len() == 64 => Ok(Some(NodeSecretKey::Plain(s.parse() .map_err(|e| format!("Invalid secret store secret: {}. Error: {:?}", s, e))?))), #[cfg(feature = "accounts")] @@ -1086,933 +1272,1133 @@ impl Configuration { Some(_) => Err(format!("Invalid secret store secret. Must be either existing account address, or hex-encoded private key")), None => Ok(None), } - } + } - fn secretstore_admin_public(&self) -> Result, String> { - match self.args.arg_secretstore_admin_public.as_ref() { - Some(admin_public) => Ok(Some(admin_public.parse().map_err(|e| format!("Invalid secret store admin public: {}", e))?)), - None => Ok(None), - } - } + fn secretstore_admin_public(&self) -> Result, String> { + match self.args.arg_secretstore_admin_public.as_ref() { + Some(admin_public) => { + Ok(Some(admin_public.parse().map_err(|e| { + format!("Invalid secret store admin public: {}", e) + })?)) + } + None => Ok(None), + } + } - fn secretstore_nodes(&self) -> Result, String> { - let mut nodes = BTreeMap::new(); - for node in self.args.arg_secretstore_nodes.split(',').filter(|n| n != &"") { - let public_and_addr: Vec<_> = node.split('@').collect(); - if public_and_addr.len() != 2 { - return Err(format!("Invalid secret store node: {}", node)); - } + fn secretstore_nodes(&self) -> Result, String> { + let mut nodes = BTreeMap::new(); + for node in self + .args + .arg_secretstore_nodes + .split(',') + .filter(|n| n != &"") + { + let public_and_addr: Vec<_> = node.split('@').collect(); + if public_and_addr.len() != 2 { + return Err(format!("Invalid secret store node: {}", node)); + } - let ip_and_port: Vec<_> = public_and_addr[1].split(':').collect(); - if ip_and_port.len() != 2 { - return Err(format!("Invalid secret store node: {}", node)); - } + let ip_and_port: Vec<_> = public_and_addr[1].split(':').collect(); + if ip_and_port.len() != 2 { + return Err(format!("Invalid secret store node: {}", node)); + } - let public = public_and_addr[0].parse() - .map_err(|e| format!("Invalid public key in secret store node: {}. Error: {:?}", public_and_addr[0], e))?; - let port = ip_and_port[1].parse() - .map_err(|e| format!("Invalid port in secret store node: {}. Error: {:?}", ip_and_port[1], e))?; + let public = public_and_addr[0].parse().map_err(|e| { + format!( + "Invalid public key in secret store node: {}. Error: {:?}", + public_and_addr[0], e + ) + })?; + let port = ip_and_port[1].parse().map_err(|e| { + format!( + "Invalid port in secret store node: {}. Error: {:?}", + ip_and_port[1], e + ) + })?; - nodes.insert(public, (ip_and_port[0].into(), port)); - } + nodes.insert(public, (ip_and_port[0].into(), port)); + } - Ok(nodes) - } + Ok(nodes) + } - fn stratum_interface(&self) -> String { - self.interface(&self.args.arg_stratum_interface) - } + fn stratum_interface(&self) -> String { + self.interface(&self.args.arg_stratum_interface) + } - fn rpc_enabled(&self) -> bool { - !self.args.flag_jsonrpc_off && !self.args.flag_no_jsonrpc - } + fn rpc_enabled(&self) -> bool { + !self.args.flag_jsonrpc_off && !self.args.flag_no_jsonrpc + } - fn ws_enabled(&self) -> bool { - !self.args.flag_no_ws - } + fn ws_enabled(&self) -> bool { + !self.args.flag_no_ws + } - fn secretstore_enabled(&self) -> bool { - !self.args.flag_no_secretstore && cfg!(feature = "secretstore") - } + fn secretstore_enabled(&self) -> bool { + !self.args.flag_no_secretstore && cfg!(feature = "secretstore") + } - fn secretstore_http_enabled(&self) -> bool { - !self.args.flag_no_secretstore_http && cfg!(feature = "secretstore") - } + fn secretstore_http_enabled(&self) -> bool { + !self.args.flag_no_secretstore_http && cfg!(feature = "secretstore") + } - fn secretstore_auto_migrate_enabled(&self) -> bool { - !self.args.flag_no_secretstore_auto_migrate - } + fn secretstore_auto_migrate_enabled(&self) -> bool { + !self.args.flag_no_secretstore_auto_migrate + } - fn secretstore_acl_check_contract_address(&self) -> Result, String> { - into_secretstore_service_contract_address(self.args.arg_secretstore_acl_contract.as_ref()) - } + fn secretstore_acl_check_contract_address( + &self, + ) -> Result, String> { + into_secretstore_service_contract_address(self.args.arg_secretstore_acl_contract.as_ref()) + } - fn secretstore_service_contract_address(&self) -> Result, String> { - into_secretstore_service_contract_address(self.args.arg_secretstore_contract.as_ref()) - } + fn secretstore_service_contract_address( + &self, + ) -> Result, String> { + into_secretstore_service_contract_address(self.args.arg_secretstore_contract.as_ref()) + } - fn secretstore_service_contract_srv_gen_address(&self) -> Result, String> { - into_secretstore_service_contract_address(self.args.arg_secretstore_srv_gen_contract.as_ref()) - } + fn secretstore_service_contract_srv_gen_address( + &self, + ) -> Result, String> { + into_secretstore_service_contract_address( + self.args.arg_secretstore_srv_gen_contract.as_ref(), + ) + } - fn secretstore_service_contract_srv_retr_address(&self) -> Result, String> { - into_secretstore_service_contract_address(self.args.arg_secretstore_srv_retr_contract.as_ref()) - } + fn secretstore_service_contract_srv_retr_address( + &self, + ) -> Result, String> { + into_secretstore_service_contract_address( + self.args.arg_secretstore_srv_retr_contract.as_ref(), + ) + } - fn secretstore_service_contract_doc_store_address(&self) -> Result, String> { - into_secretstore_service_contract_address(self.args.arg_secretstore_doc_store_contract.as_ref()) - } + fn secretstore_service_contract_doc_store_address( + &self, + ) -> Result, String> { + into_secretstore_service_contract_address( + self.args.arg_secretstore_doc_store_contract.as_ref(), + ) + } - fn secretstore_service_contract_doc_sretr_address(&self) -> Result, String> { - into_secretstore_service_contract_address(self.args.arg_secretstore_doc_sretr_contract.as_ref()) - } + fn secretstore_service_contract_doc_sretr_address( + &self, + ) -> Result, String> { + into_secretstore_service_contract_address( + self.args.arg_secretstore_doc_sretr_contract.as_ref(), + ) + } - fn secretstore_key_server_set_contract_address(&self) -> Result, String> { - into_secretstore_service_contract_address(self.args.arg_secretstore_server_set_contract.as_ref()) - } + fn secretstore_key_server_set_contract_address( + &self, + ) -> Result, String> { + into_secretstore_service_contract_address( + self.args.arg_secretstore_server_set_contract.as_ref(), + ) + } - fn verifier_settings(&self) -> VerifierSettings { - let mut settings = VerifierSettings::default(); - settings.scale_verifiers = self.args.flag_scale_verifiers; - if let Some(num_verifiers) = self.args.arg_num_verifiers { - settings.num_verifiers = num_verifiers; - } + fn verifier_settings(&self) -> VerifierSettings { + let mut settings = VerifierSettings::default(); + settings.scale_verifiers = self.args.flag_scale_verifiers; + if let Some(num_verifiers) = self.args.arg_num_verifiers { + settings.num_verifiers = num_verifiers; + } - settings - } + settings + } - fn whisper_config(&self) -> ::whisper::Config { - ::whisper::Config { - enabled: self.args.flag_whisper, - target_message_pool_size: self.args.arg_whisper_pool_size * 1024 * 1024, - } - } + fn whisper_config(&self) -> ::whisper::Config { + ::whisper::Config { + enabled: self.args.flag_whisper, + target_message_pool_size: self.args.arg_whisper_pool_size * 1024 * 1024, + } + } } -fn into_secretstore_service_contract_address(s: Option<&String>) -> Result, String> { - match s.map(String::as_str) { - None | Some("none") => Ok(None), - Some("registry") => Ok(Some(SecretStoreContractAddress::Registry)), - Some(a) => Ok(Some(SecretStoreContractAddress::Address(a.parse().map_err(|e| format!("{}", e))?))), - } +fn into_secretstore_service_contract_address( + s: Option<&String>, +) -> Result, String> { + match s.map(String::as_str) { + None | Some("none") => Ok(None), + Some("registry") => Ok(Some(SecretStoreContractAddress::Registry)), + Some(a) => Ok(Some(SecretStoreContractAddress::Address( + a.parse().map_err(|e| format!("{}", e))?, + ))), + } } #[cfg(test)] mod tests { - use std::io::Write; - use std::fs::File; - use std::str::FromStr; - - use tempdir::TempDir; - use ethcore::client::VMType; - use ethcore::miner::MinerOptions; - use miner::pool::PrioritizationStrategy; - use parity_rpc::NetworkSettings; - use updater::{UpdatePolicy, UpdateFilter, ReleaseTrack}; - use types::ids::BlockId; - use types::data_format::DataFormat; - use account::{AccountCmd, NewAccount, ImportAccounts, ListAccounts}; - use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, ExportState}; - use cli::Args; - use dir::{Directories, default_hypervisor_path}; - use helpers::{default_network_config}; - use params::SpecType; - use presale::ImportWallet; - use rpc::WsConfiguration; - use rpc_apis::ApiSet; - use run::RunCmd; - - use network::{AllowIP, IpFilter}; - - extern crate ipnetwork; - use self::ipnetwork::IpNetwork; - - use super::*; - - lazy_static! { - static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(10240).expect("10240 > 0; qed"); - } - - #[derive(Debug, PartialEq)] - struct TestPasswordReader(&'static str); - - fn parse(args: &[&str]) -> Configuration { - Configuration { - args: Args::parse_without_config(args).unwrap(), - } - } - - #[test] - fn test_command_version() { - let args = vec!["parity", "--version"]; - let conf = parse(&args); - assert_eq!(conf.into_command().unwrap().cmd, Cmd::Version); - } - - #[test] - fn test_command_account_new() { - let args = vec!["parity", "account", "new"]; - let conf = parse(&args); - assert_eq!(conf.into_command().unwrap().cmd, Cmd::Account(AccountCmd::New(NewAccount { - iterations: *ITERATIONS, - path: Directories::default().keys, - password_file: None, - spec: SpecType::default(), - }))); - } - - #[test] - fn test_command_account_list() { - let args = vec!["parity", "account", "list"]; - let conf = parse(&args); - assert_eq!(conf.into_command().unwrap().cmd, Cmd::Account( - AccountCmd::List(ListAccounts { - path: Directories::default().keys, - spec: SpecType::default(), - }) - )); - } - - #[test] - fn test_command_account_import() { - let args = vec!["parity", "account", "import", "my_dir", "another_dir"]; - let conf = parse(&args); - assert_eq!(conf.into_command().unwrap().cmd, Cmd::Account(AccountCmd::Import(ImportAccounts { - from: vec!["my_dir".into(), "another_dir".into()], - to: Directories::default().keys, - spec: SpecType::default(), - }))); - } - - #[test] - fn test_command_wallet_import() { - let args = vec!["parity", "wallet", "import", "my_wallet.json", "--password", "pwd"]; - let conf = parse(&args); - assert_eq!(conf.into_command().unwrap().cmd, Cmd::ImportPresaleWallet(ImportWallet { - iterations: *ITERATIONS, - path: Directories::default().keys, - wallet_path: "my_wallet.json".into(), - password_file: Some("pwd".into()), - spec: SpecType::default(), - })); - } - - #[test] - fn test_command_blockchain_import() { - let args = vec!["parity", "import", "blockchain.json"]; - let conf = parse(&args); - assert_eq!(conf.into_command().unwrap().cmd, Cmd::Blockchain(BlockchainCmd::Import(ImportBlockchain { - spec: Default::default(), - cache_config: Default::default(), - dirs: Default::default(), - file_path: Some("blockchain.json".into()), - format: Default::default(), - pruning: Default::default(), - pruning_history: 64, - pruning_memory: 32, - compaction: Default::default(), - tracing: Default::default(), - fat_db: Default::default(), - vm_type: VMType::Interpreter, - check_seal: true, - with_color: !cfg!(windows), - verifier_settings: Default::default(), - light: false, - max_round_blocks_to_import: 12, - }))); - } - - #[test] - fn test_command_blockchain_export() { - let args = vec!["parity", "export", "blocks", "blockchain.json"]; - let conf = parse(&args); - assert_eq!(conf.into_command().unwrap().cmd, Cmd::Blockchain(BlockchainCmd::Export(ExportBlockchain { - spec: Default::default(), - cache_config: Default::default(), - dirs: Default::default(), - file_path: Some("blockchain.json".into()), - pruning: Default::default(), - pruning_history: 64, - pruning_memory: 32, - format: Default::default(), - compaction: Default::default(), - tracing: Default::default(), - fat_db: Default::default(), - from_block: BlockId::Number(1), - to_block: BlockId::Latest, - check_seal: true, - max_round_blocks_to_import: 12, - }))); - } - - #[test] - fn test_command_state_export() { - let args = vec!["parity", "export", "state", "state.json"]; - let conf = parse(&args); - assert_eq!(conf.into_command().unwrap().cmd, Cmd::Blockchain(BlockchainCmd::ExportState(ExportState { - spec: Default::default(), - cache_config: Default::default(), - dirs: Default::default(), - file_path: Some("state.json".into()), - pruning: Default::default(), - pruning_history: 64, - pruning_memory: 32, - format: Default::default(), - compaction: Default::default(), - tracing: Default::default(), - fat_db: Default::default(), - at: BlockId::Latest, - storage: true, - code: true, - min_balance: None, - max_balance: None, - max_round_blocks_to_import: 12, - }))); - } - - #[test] - fn test_command_blockchain_export_with_custom_format() { - let args = vec!["parity", "export", "blocks", "--format", "hex", "blockchain.json"]; - let conf = parse(&args); - assert_eq!(conf.into_command().unwrap().cmd, Cmd::Blockchain(BlockchainCmd::Export(ExportBlockchain { - spec: Default::default(), - cache_config: Default::default(), - dirs: Default::default(), - file_path: Some("blockchain.json".into()), - pruning: Default::default(), - pruning_history: 64, - pruning_memory: 32, - format: Some(DataFormat::Hex), - compaction: Default::default(), - tracing: Default::default(), - fat_db: Default::default(), - from_block: BlockId::Number(1), - to_block: BlockId::Latest, - check_seal: true, - max_round_blocks_to_import: 12, - }))); - } - - #[test] - fn test_command_signer_new_token() { - let args = vec!["parity", "signer", "new-token"]; - let conf = parse(&args); - let expected = Directories::default().signer; - assert_eq!(conf.into_command().unwrap().cmd, Cmd::SignerToken(WsConfiguration { - enabled: true, - interface: "127.0.0.1".into(), - port: 8546, - apis: ApiSet::UnsafeContext, - origins: Some(vec!["parity://*".into(),"chrome-extension://*".into(), "moz-extension://*".into()]), - hosts: Some(vec![]), - signer_path: expected.into(), - support_token_api: true, - max_connections: 100, - }, LogConfig { - color: !cfg!(windows), - mode: None, - file: None, - } )); - } - - #[test] - fn test_ws_max_connections() { - let args = vec!["parity", "--ws-max-connections", "1"]; - let conf = parse(&args); - - assert_eq!(conf.ws_config().unwrap(), WsConfiguration { - max_connections: 1, - ..Default::default() - }); - } - - #[test] - fn test_run_cmd() { - let args = vec!["parity"]; - let conf = parse(&args); - let mut expected = RunCmd { - allow_missing_blocks: false, - cache_config: Default::default(), - dirs: Default::default(), - spec: Default::default(), - pruning: Default::default(), - pruning_history: 64, - pruning_memory: 32, - daemon: None, - logger_config: Default::default(), - miner_options: Default::default(), - gas_price_percentile: 50, - poll_lifetime: 60, - ws_conf: Default::default(), - http_conf: Default::default(), - ipc_conf: Default::default(), - net_conf: default_network_config(), - network_id: None, - warp_sync: true, - warp_barrier: None, - acc_conf: Default::default(), - gas_pricer_conf: Default::default(), - miner_extras: Default::default(), - update_policy: UpdatePolicy { - enable_downloading: true, - require_consensus: true, - filter: UpdateFilter::Critical, - track: ReleaseTrack::Unknown, - path: default_hypervisor_path(), - max_size: 128 * 1024 * 1024, - max_delay: 100, - frequency: 20, - }, - mode: Default::default(), - tracing: Default::default(), - compaction: Default::default(), - vm_type: Default::default(), - geth_compatibility: false, - experimental_rpcs: false, - net_settings: Default::default(), - ipfs_conf: Default::default(), - secretstore_conf: Default::default(), - private_provider_conf: Default::default(), - private_encryptor_conf: Default::default(), - private_tx_enabled: false, - name: "".into(), - custom_bootnodes: false, - fat_db: Default::default(), - snapshot_conf: Default::default(), - stratum: None, - check_seal: true, - download_old_blocks: true, - verifier_settings: Default::default(), - serve_light: true, - light: false, - no_hardcoded_sync: false, - no_persistent_txqueue: false, - whisper: Default::default(), - max_round_blocks_to_import: 12, - on_demand_response_time_window: None, - on_demand_request_backoff_start: None, - on_demand_request_backoff_max: None, - on_demand_request_backoff_rounds_max: None, - on_demand_request_consecutive_failures: None, - }; - expected.secretstore_conf.enabled = cfg!(feature = "secretstore"); - expected.secretstore_conf.http_enabled = cfg!(feature = "secretstore"); - assert_eq!(conf.into_command().unwrap().cmd, Cmd::Run(expected)); - } - - #[test] - fn should_parse_mining_options() { - // given - let mut mining_options = MinerOptions::default(); - - // when - let conf0 = parse(&["parity"]); - let conf2 = parse(&["parity", "--tx-queue-strategy", "gas_price"]); - - // then - assert_eq!(conf0.miner_options().unwrap(), mining_options); - mining_options.tx_queue_strategy = PrioritizationStrategy::GasPriceOnly; - assert_eq!(conf2.miner_options().unwrap(), mining_options); - } - - #[test] - fn should_fail_on_force_reseal_and_reseal_min_period() { - let conf = parse(&["parity", "--chain", "dev", "--force-sealing", "--reseal-min-period", "0"]); - - assert!(conf.miner_options().is_err()); - } - - #[test] - fn should_parse_updater_options() { - // when - let conf0 = parse(&["parity", "--release-track=testing"]); - let conf1 = parse(&["parity", "--auto-update", "all", "--no-consensus", "--auto-update-delay", "300"]); - let conf2 = parse(&["parity", "--no-download", "--auto-update=all", "--release-track=beta", "--auto-update-delay=300", "--auto-update-check-frequency=100"]); - let conf3 = parse(&["parity", "--auto-update=xxx"]); - - // then - assert_eq!(conf0.update_policy().unwrap(), UpdatePolicy { - enable_downloading: true, - require_consensus: true, - filter: UpdateFilter::Critical, - track: ReleaseTrack::Testing, - path: default_hypervisor_path(), - max_size: 128 * 1024 * 1024, - max_delay: 100, - frequency: 20, - }); - assert_eq!(conf1.update_policy().unwrap(), UpdatePolicy { - enable_downloading: true, - require_consensus: false, - filter: UpdateFilter::All, - track: ReleaseTrack::Unknown, - path: default_hypervisor_path(), - max_size: 128 * 1024 * 1024, - max_delay: 300, - frequency: 20, - }); - assert_eq!(conf2.update_policy().unwrap(), UpdatePolicy { - enable_downloading: false, - require_consensus: true, - filter: UpdateFilter::All, - track: ReleaseTrack::Beta, - path: default_hypervisor_path(), - max_size: 128 * 1024 * 1024, - max_delay: 300, - frequency: 100, - }); - assert!(conf3.update_policy().is_err()); - } - - #[test] - fn should_parse_network_settings() { - // given - - // when - let conf = parse(&["parity", "--testnet", "--identity", "testname"]); - - // then - assert_eq!(conf.network_settings(), Ok(NetworkSettings { - name: "testname".to_owned(), - chain: "goerli".to_owned(), - is_dev_chain: false, - network_port: 30303, - rpc_enabled: true, - rpc_interface: "127.0.0.1".to_owned(), - rpc_port: 8545, - })); - } - - #[test] - fn should_parse_rpc_settings_with_geth_compatiblity() { - // given - fn assert(conf: Configuration) { - let net = conf.network_settings().unwrap(); - assert_eq!(net.rpc_enabled, true); - assert_eq!(net.rpc_interface, "0.0.0.0".to_owned()); - assert_eq!(net.rpc_port, 8000); - assert_eq!(conf.rpc_cors(), None); - assert_eq!(conf.rpc_apis(), "web3,eth".to_owned()); - } - - // when - let conf1 = parse(&["parity", "-j", - "--jsonrpc-port", "8000", - "--jsonrpc-interface", "all", - "--jsonrpc-cors", "*", - "--jsonrpc-apis", "web3,eth" - ]); - let conf2 = parse(&["parity", "--rpc", - "--rpcport", "8000", - "--rpcaddr", "all", - "--rpccorsdomain", "*", - "--rpcapi", "web3,eth" - ]); - - // then - assert(conf1); - assert(conf2); - } - - #[test] - fn should_parse_rpc_hosts() { - // given - - // when - let conf0 = parse(&["parity"]); - let conf1 = parse(&["parity", "--jsonrpc-hosts", "none"]); - let conf2 = parse(&["parity", "--jsonrpc-hosts", "all"]); - let conf3 = parse(&["parity", "--jsonrpc-hosts", "parity.io,something.io"]); - - // then - assert_eq!(conf0.rpc_hosts(), Some(Vec::new())); - assert_eq!(conf1.rpc_hosts(), Some(Vec::new())); - assert_eq!(conf2.rpc_hosts(), None); - assert_eq!(conf3.rpc_hosts(), Some(vec!["parity.io".into(), "something.io".into()])); - } - - #[test] - fn should_parse_ipfs_hosts() { - // given - - // when - let conf0 = parse(&["parity"]); - let conf1 = parse(&["parity", "--ipfs-api-hosts", "none"]); - let conf2 = parse(&["parity", "--ipfs-api-hosts", "all"]); - let conf3 = parse(&["parity", "--ipfs-api-hosts", "parity.io,something.io"]); - - // then - assert_eq!(conf0.ipfs_hosts(), Some(Vec::new())); - assert_eq!(conf1.ipfs_hosts(), Some(Vec::new())); - assert_eq!(conf2.ipfs_hosts(), None); - assert_eq!(conf3.ipfs_hosts(), Some(vec!["parity.io".into(), "something.io".into()])); - } - - #[test] - fn should_parse_ipfs_cors() { - // given - - // when - let conf0 = parse(&["parity"]); - let conf1 = parse(&["parity", "--ipfs-api-cors", "*"]); - let conf2 = parse(&["parity", "--ipfs-api-cors", "http://parity.io,http://something.io"]); - - // then - assert_eq!(conf0.ipfs_cors(), Some(vec![])); - assert_eq!(conf1.ipfs_cors(), None); - assert_eq!(conf2.ipfs_cors(), Some(vec!["http://parity.io".into(),"http://something.io".into()])); - } - - #[test] - fn should_parse_ui_configuration() { - // given - - // when - let conf0 = parse(&["parity", "--ui-path=signer"]); - let conf1 = parse(&["parity", "--ui-path=signer", "--ui-no-validation"]); - let conf2 = parse(&["parity", "--ui-path=signer", "--ui-port", "3123"]); - let conf3 = parse(&["parity", "--ui-path=signer", "--ui-interface", "test"]); - let conf4 = parse(&["parity", "--ui-path=signer", "--force-ui"]); - - // then - assert_eq!(conf0.directories().signer, "signer".to_owned()); - - assert!(conf1.ws_config().unwrap().hosts.is_some()); - assert_eq!(conf1.ws_config().unwrap().origins, Some(vec!["parity://*".into(), "chrome-extension://*".into(), "moz-extension://*".into()])); - assert_eq!(conf1.directories().signer, "signer".to_owned()); - - assert!(conf2.ws_config().unwrap().hosts.is_some()); - assert_eq!(conf2.directories().signer, "signer".to_owned()); - - assert!(conf3.ws_config().unwrap().hosts.is_some()); - assert_eq!(conf3.directories().signer, "signer".to_owned()); - - assert!(conf4.ws_config().unwrap().hosts.is_some()); - assert_eq!(conf4.directories().signer, "signer".to_owned()); - } - - #[test] - fn should_not_bail_on_empty_line_in_reserved_peers() { - let tempdir = TempDir::new("").unwrap(); - let filename = tempdir.path().join("peers"); - File::create(&filename).unwrap().write_all(b" \n\t\n").unwrap(); - let args = vec!["parity", "--reserved-peers", filename.to_str().unwrap()]; - let conf = Configuration::parse_cli(&args).unwrap(); - assert!(conf.init_reserved_nodes().is_ok()); - } - - #[test] - fn should_ignore_comments_in_reserved_peers() { - let tempdir = TempDir::new("").unwrap(); - let filename = tempdir.path().join("peers_comments"); - File::create(&filename).unwrap().write_all(b"# Sample comment\nenode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@172.0.0.1:30303\n").unwrap(); - let args = vec!["parity", "--reserved-peers", filename.to_str().unwrap()]; - let conf = Configuration::parse_cli(&args).unwrap(); - let reserved_nodes = conf.init_reserved_nodes(); - assert!(reserved_nodes.is_ok()); - assert_eq!(reserved_nodes.unwrap().len(), 1); - } - - #[test] - fn test_dev_preset() { - let args = vec!["parity", "--config", "dev"]; - let conf = Configuration::parse_cli(&args).unwrap(); - match conf.into_command().unwrap().cmd { - Cmd::Run(c) => { - assert_eq!(c.net_settings.chain, "dev"); - assert_eq!(c.gas_pricer_conf, GasPricerConfig::Fixed(0.into())); - assert_eq!(c.miner_options.reseal_min_period, Duration::from_millis(0)); - }, - _ => panic!("Should be Cmd::Run"), - } - } - - #[test] - fn test_mining_preset() { - let args = vec!["parity", "--config", "mining"]; - let conf = Configuration::parse_cli(&args).unwrap(); - match conf.into_command().unwrap().cmd { - Cmd::Run(c) => { - assert_eq!(c.net_conf.min_peers, 50); - assert_eq!(c.net_conf.max_peers, 100); - assert_eq!(c.ipc_conf.enabled, false); - assert_eq!(c.miner_options.force_sealing, true); - assert_eq!(c.miner_options.reseal_on_external_tx, true); - assert_eq!(c.miner_options.reseal_on_own_tx, true); - assert_eq!(c.miner_options.reseal_min_period, Duration::from_millis(4000)); - assert_eq!(c.miner_options.pool_limits.max_count, 8192); - assert_eq!(c.cache_config, CacheConfig::new_with_total_cache_size(1024)); - assert_eq!(c.logger_config.mode.unwrap(), "miner=trace,own_tx=trace"); - }, - _ => panic!("Should be Cmd::Run"), - } - } - - #[test] - fn test_non_standard_ports_preset() { - let args = vec!["parity", "--config", "non-standard-ports"]; - let conf = Configuration::parse_cli(&args).unwrap(); - match conf.into_command().unwrap().cmd { - Cmd::Run(c) => { - assert_eq!(c.net_settings.network_port, 30305); - assert_eq!(c.net_settings.rpc_port, 8645); - }, - _ => panic!("Should be Cmd::Run"), - } - } - - #[test] - fn test_insecure_preset() { - let args = vec!["parity", "--config", "insecure"]; - let conf = Configuration::parse_cli(&args).unwrap(); - match conf.into_command().unwrap().cmd { - Cmd::Run(c) => { - assert_eq!(c.update_policy.require_consensus, false); - assert_eq!(c.net_settings.rpc_interface, "0.0.0.0"); - match c.http_conf.apis { - ApiSet::List(set) => assert_eq!(set, ApiSet::All.list_apis()), - _ => panic!("Incorrect rpc apis"), - } - // "web3,eth,net,personal,parity,parity_set,traces,rpc,parity_accounts"); - assert_eq!(c.http_conf.hosts, None); - assert_eq!(c.ipfs_conf.hosts, None); - }, - _ => panic!("Should be Cmd::Run"), - } - } - - #[test] - fn test_dev_insecure_preset() { - let args = vec!["parity", "--config", "dev-insecure"]; - let conf = Configuration::parse_cli(&args).unwrap(); - match conf.into_command().unwrap().cmd { - Cmd::Run(c) => { - assert_eq!(c.net_settings.chain, "dev"); - assert_eq!(c.gas_pricer_conf, GasPricerConfig::Fixed(0.into())); - assert_eq!(c.miner_options.reseal_min_period, Duration::from_millis(0)); - assert_eq!(c.update_policy.require_consensus, false); - assert_eq!(c.net_settings.rpc_interface, "0.0.0.0"); - match c.http_conf.apis { - ApiSet::List(set) => assert_eq!(set, ApiSet::All.list_apis()), - _ => panic!("Incorrect rpc apis"), - } - // "web3,eth,net,personal,parity,parity_set,traces,rpc,parity_accounts"); - assert_eq!(c.http_conf.hosts, None); - assert_eq!(c.ipfs_conf.hosts, None); - }, - _ => panic!("Should be Cmd::Run"), - } - } - - #[test] - fn test_override_preset() { - let args = vec!["parity", "--config", "mining", "--min-peers=99"]; - let conf = Configuration::parse_cli(&args).unwrap(); - match conf.into_command().unwrap().cmd { - Cmd::Run(c) => { - assert_eq!(c.net_conf.min_peers, 99); - }, - _ => panic!("Should be Cmd::Run"), - } - } - - #[test] - fn test_identity_arg() { - let args = vec!["parity", "--identity", "Somebody"]; - let conf = Configuration::parse_cli(&args).unwrap(); - match conf.into_command().unwrap().cmd { - Cmd::Run(c) => { - assert_eq!(c.name, "Somebody"); - assert!(c.net_conf.client_version.starts_with("Parity-Ethereum/Somebody/")); - } - _ => panic!("Should be Cmd::Run"), - } - } - - #[test] - fn should_apply_ports_shift() { - // given - - // when - let conf0 = parse(&["parity", "--ports-shift", "1", "--stratum"]); - let conf1 = parse(&["parity", "--ports-shift", "1", "--jsonrpc-port", "8544"]); - - // then - assert_eq!(conf0.net_addresses().unwrap().0.port(), 30304); - assert_eq!(conf0.network_settings().unwrap().network_port, 30304); - assert_eq!(conf0.network_settings().unwrap().rpc_port, 8546); - assert_eq!(conf0.http_config().unwrap().port, 8546); - assert_eq!(conf0.ws_config().unwrap().port, 8547); - assert_eq!(conf0.secretstore_config().unwrap().port, 8084); - assert_eq!(conf0.secretstore_config().unwrap().http_port, 8083); - assert_eq!(conf0.ipfs_config().port, 5002); - assert_eq!(conf0.stratum_options().unwrap().unwrap().port, 8009); - - assert_eq!(conf1.net_addresses().unwrap().0.port(), 30304); - assert_eq!(conf1.network_settings().unwrap().network_port, 30304); - assert_eq!(conf1.network_settings().unwrap().rpc_port, 8545); - assert_eq!(conf1.http_config().unwrap().port, 8545); - assert_eq!(conf1.ws_config().unwrap().port, 8547); - assert_eq!(conf1.secretstore_config().unwrap().port, 8084); - assert_eq!(conf1.secretstore_config().unwrap().http_port, 8083); - assert_eq!(conf1.ipfs_config().port, 5002); - } - - #[test] - fn should_resolve_external_nat_hosts() { - // Ip works - let conf = parse(&["parity", "--nat", "extip:1.1.1.1"]); - assert_eq!(conf.net_addresses().unwrap().1.unwrap().ip().to_string(), "1.1.1.1"); - assert_eq!(conf.net_addresses().unwrap().1.unwrap().port(), 30303); - - // Ip with port works, port is discarded - let conf = parse(&["parity", "--nat", "extip:192.168.1.1:123"]); - assert_eq!(conf.net_addresses().unwrap().1.unwrap().ip().to_string(), "192.168.1.1"); - assert_eq!(conf.net_addresses().unwrap().1.unwrap().port(), 30303); - - // Hostname works - let conf = parse(&["parity", "--nat", "extip:ethereum.org"]); - assert!(conf.net_addresses().unwrap().1.is_some()); - assert_eq!(conf.net_addresses().unwrap().1.unwrap().port(), 30303); - - // Hostname works, garbage at the end is discarded - let conf = parse(&["parity", "--nat", "extip:ethereum.org:whatever bla bla 123"]); - assert!(conf.net_addresses().unwrap().1.is_some()); - assert_eq!(conf.net_addresses().unwrap().1.unwrap().port(), 30303); - - // Garbage is error - let conf = parse(&["parity", "--nat", "extip:blabla"]); - assert!(conf.net_addresses().is_err()); - } - - #[test] - fn should_expose_all_servers() { - // given - - // when - let conf0 = parse(&["parity", "--unsafe-expose"]); - - // then - assert_eq!(&conf0.network_settings().unwrap().rpc_interface, "0.0.0.0"); - assert_eq!(&conf0.http_config().unwrap().interface, "0.0.0.0"); - assert_eq!(conf0.http_config().unwrap().hosts, None); - assert_eq!(&conf0.ws_config().unwrap().interface, "0.0.0.0"); - assert_eq!(conf0.ws_config().unwrap().hosts, None); - assert_eq!(conf0.ws_config().unwrap().origins, None); - assert_eq!(&conf0.secretstore_config().unwrap().interface, "0.0.0.0"); - assert_eq!(&conf0.secretstore_config().unwrap().http_interface, "0.0.0.0"); - assert_eq!(&conf0.ipfs_config().interface, "0.0.0.0"); - assert_eq!(conf0.ipfs_config().hosts, None); - } - - #[test] - fn allow_ips() { - let all = parse(&["parity", "--allow-ips", "all"]); - let private = parse(&["parity", "--allow-ips", "private"]); - let block_custom = parse(&["parity", "--allow-ips", "-10.0.0.0/8"]); - let combo = parse(&["parity", "--allow-ips", "public 10.0.0.0/8 -1.0.0.0/8"]); - let ipv6_custom_public = parse(&["parity", "--allow-ips", "public fc00::/7"]); - let ipv6_custom_private = parse(&["parity", "--allow-ips", "private -fc00::/7"]); - - assert_eq!(all.ip_filter().unwrap(), IpFilter { - predefined: AllowIP::All, - custom_allow: vec![], - custom_block: vec![], - }); - - assert_eq!(private.ip_filter().unwrap(), IpFilter { - predefined: AllowIP::Private, - custom_allow: vec![], - custom_block: vec![], - }); - - assert_eq!(block_custom.ip_filter().unwrap(), IpFilter { - predefined: AllowIP::All, - custom_allow: vec![], - custom_block: vec![IpNetwork::from_str("10.0.0.0/8").unwrap()], - }); - - assert_eq!(combo.ip_filter().unwrap(), IpFilter { - predefined: AllowIP::Public, - custom_allow: vec![IpNetwork::from_str("10.0.0.0/8").unwrap()], - custom_block: vec![IpNetwork::from_str("1.0.0.0/8").unwrap()], - }); - - assert_eq!(ipv6_custom_public.ip_filter().unwrap(), IpFilter { - predefined: AllowIP::Public, - custom_allow: vec![IpNetwork::from_str("fc00::/7").unwrap()], - custom_block: vec![], - }); - - assert_eq!(ipv6_custom_private.ip_filter().unwrap(), IpFilter { - predefined: AllowIP::Private, - custom_allow: vec![], - custom_block: vec![IpNetwork::from_str("fc00::/7").unwrap()], - }); - } - - #[test] - fn should_use_correct_cache_path_if_base_is_set() { - use std::path; - - let std = parse(&["parity"]); - let base = parse(&["parity", "--base-path", "/test"]); - - let base_path = ::dir::default_data_path(); - let local_path = ::dir::default_local_path(); - assert_eq!(std.directories().cache, dir::helpers::replace_home_and_local(&base_path, &local_path, ::dir::CACHE_PATH)); - assert_eq!(path::Path::new(&base.directories().cache), path::Path::new("/test/cache")); - } - - #[test] - fn should_respect_only_max_peers_and_default() { - let args = vec!["parity", "--max-peers=50"]; - let conf = Configuration::parse_cli(&args).unwrap(); - match conf.into_command().unwrap().cmd { - Cmd::Run(c) => { - assert_eq!(c.net_conf.min_peers, 25); - assert_eq!(c.net_conf.max_peers, 50); - }, - _ => panic!("Should be Cmd::Run"), - } - } - - #[test] - fn should_respect_only_max_peers_less_than_default() { - let args = vec!["parity", "--max-peers=5"]; - let conf = Configuration::parse_cli(&args).unwrap(); - match conf.into_command().unwrap().cmd { - Cmd::Run(c) => { - assert_eq!(c.net_conf.min_peers, 5); - assert_eq!(c.net_conf.max_peers, 5); - }, - _ => panic!("Should be Cmd::Run"), - } - } - - #[test] - fn should_respect_only_min_peers_and_default() { - let args = vec!["parity", "--min-peers=5"]; - let conf = Configuration::parse_cli(&args).unwrap(); - match conf.into_command().unwrap().cmd { - Cmd::Run(c) => { - assert_eq!(c.net_conf.min_peers, 5); - assert_eq!(c.net_conf.max_peers, 50); - }, - _ => panic!("Should be Cmd::Run"), - } - } - - #[test] - fn should_respect_only_min_peers_and_greater_than_default() { - let args = vec!["parity", "--min-peers=500"]; - let conf = Configuration::parse_cli(&args).unwrap(); - match conf.into_command().unwrap().cmd { - Cmd::Run(c) => { - assert_eq!(c.net_conf.min_peers, 500); - assert_eq!(c.net_conf.max_peers, 500); - }, - _ => panic!("Should be Cmd::Run"), - } - } + use std::{fs::File, io::Write, str::FromStr}; + + use account::{AccountCmd, ImportAccounts, ListAccounts, NewAccount}; + use blockchain::{BlockchainCmd, ExportBlockchain, ExportState, ImportBlockchain}; + use cli::Args; + use dir::{default_hypervisor_path, Directories}; + use ethcore::{client::VMType, miner::MinerOptions}; + use helpers::default_network_config; + use miner::pool::PrioritizationStrategy; + use params::SpecType; + use parity_rpc::NetworkSettings; + use presale::ImportWallet; + use rpc::WsConfiguration; + use rpc_apis::ApiSet; + use run::RunCmd; + use tempdir::TempDir; + use types::{data_format::DataFormat, ids::BlockId}; + use updater::{ReleaseTrack, UpdateFilter, UpdatePolicy}; + + use network::{AllowIP, IpFilter}; + + extern crate ipnetwork; + use self::ipnetwork::IpNetwork; + + use super::*; + + lazy_static! { + static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(10240).expect("10240 > 0; qed"); + } + + #[derive(Debug, PartialEq)] + struct TestPasswordReader(&'static str); + + fn parse(args: &[&str]) -> Configuration { + Configuration { + args: Args::parse_without_config(args).unwrap(), + } + } + + #[test] + fn test_command_version() { + let args = vec!["parity", "--version"]; + let conf = parse(&args); + assert_eq!(conf.into_command().unwrap().cmd, Cmd::Version); + } + + #[test] + fn test_command_account_new() { + let args = vec!["parity", "account", "new"]; + let conf = parse(&args); + assert_eq!( + conf.into_command().unwrap().cmd, + Cmd::Account(AccountCmd::New(NewAccount { + iterations: *ITERATIONS, + path: Directories::default().keys, + password_file: None, + spec: SpecType::default(), + })) + ); + } + + #[test] + fn test_command_account_list() { + let args = vec!["parity", "account", "list"]; + let conf = parse(&args); + assert_eq!( + conf.into_command().unwrap().cmd, + Cmd::Account(AccountCmd::List(ListAccounts { + path: Directories::default().keys, + spec: SpecType::default(), + })) + ); + } + + #[test] + fn test_command_account_import() { + let args = vec!["parity", "account", "import", "my_dir", "another_dir"]; + let conf = parse(&args); + assert_eq!( + conf.into_command().unwrap().cmd, + Cmd::Account(AccountCmd::Import(ImportAccounts { + from: vec!["my_dir".into(), "another_dir".into()], + to: Directories::default().keys, + spec: SpecType::default(), + })) + ); + } + + #[test] + fn test_command_wallet_import() { + let args = vec![ + "parity", + "wallet", + "import", + "my_wallet.json", + "--password", + "pwd", + ]; + let conf = parse(&args); + assert_eq!( + conf.into_command().unwrap().cmd, + Cmd::ImportPresaleWallet(ImportWallet { + iterations: *ITERATIONS, + path: Directories::default().keys, + wallet_path: "my_wallet.json".into(), + password_file: Some("pwd".into()), + spec: SpecType::default(), + }) + ); + } + + #[test] + fn test_command_blockchain_import() { + let args = vec!["parity", "import", "blockchain.json"]; + let conf = parse(&args); + assert_eq!( + conf.into_command().unwrap().cmd, + Cmd::Blockchain(BlockchainCmd::Import(ImportBlockchain { + spec: Default::default(), + cache_config: Default::default(), + dirs: Default::default(), + file_path: Some("blockchain.json".into()), + format: Default::default(), + pruning: Default::default(), + pruning_history: 64, + pruning_memory: 32, + compaction: Default::default(), + tracing: Default::default(), + fat_db: Default::default(), + vm_type: VMType::Interpreter, + check_seal: true, + with_color: !cfg!(windows), + verifier_settings: Default::default(), + light: false, + max_round_blocks_to_import: 12, + })) + ); + } + + #[test] + fn test_command_blockchain_export() { + let args = vec!["parity", "export", "blocks", "blockchain.json"]; + let conf = parse(&args); + assert_eq!( + conf.into_command().unwrap().cmd, + Cmd::Blockchain(BlockchainCmd::Export(ExportBlockchain { + spec: Default::default(), + cache_config: Default::default(), + dirs: Default::default(), + file_path: Some("blockchain.json".into()), + pruning: Default::default(), + pruning_history: 64, + pruning_memory: 32, + format: Default::default(), + compaction: Default::default(), + tracing: Default::default(), + fat_db: Default::default(), + from_block: BlockId::Number(1), + to_block: BlockId::Latest, + check_seal: true, + max_round_blocks_to_import: 12, + })) + ); + } + + #[test] + fn test_command_state_export() { + let args = vec!["parity", "export", "state", "state.json"]; + let conf = parse(&args); + assert_eq!( + conf.into_command().unwrap().cmd, + Cmd::Blockchain(BlockchainCmd::ExportState(ExportState { + spec: Default::default(), + cache_config: Default::default(), + dirs: Default::default(), + file_path: Some("state.json".into()), + pruning: Default::default(), + pruning_history: 64, + pruning_memory: 32, + format: Default::default(), + compaction: Default::default(), + tracing: Default::default(), + fat_db: Default::default(), + at: BlockId::Latest, + storage: true, + code: true, + min_balance: None, + max_balance: None, + max_round_blocks_to_import: 12, + })) + ); + } + + #[test] + fn test_command_blockchain_export_with_custom_format() { + let args = vec![ + "parity", + "export", + "blocks", + "--format", + "hex", + "blockchain.json", + ]; + let conf = parse(&args); + assert_eq!( + conf.into_command().unwrap().cmd, + Cmd::Blockchain(BlockchainCmd::Export(ExportBlockchain { + spec: Default::default(), + cache_config: Default::default(), + dirs: Default::default(), + file_path: Some("blockchain.json".into()), + pruning: Default::default(), + pruning_history: 64, + pruning_memory: 32, + format: Some(DataFormat::Hex), + compaction: Default::default(), + tracing: Default::default(), + fat_db: Default::default(), + from_block: BlockId::Number(1), + to_block: BlockId::Latest, + check_seal: true, + max_round_blocks_to_import: 12, + })) + ); + } + + #[test] + fn test_command_signer_new_token() { + let args = vec!["parity", "signer", "new-token"]; + let conf = parse(&args); + let expected = Directories::default().signer; + assert_eq!( + conf.into_command().unwrap().cmd, + Cmd::SignerToken( + WsConfiguration { + enabled: true, + interface: "127.0.0.1".into(), + port: 8546, + apis: ApiSet::UnsafeContext, + origins: Some(vec![ + "parity://*".into(), + "chrome-extension://*".into(), + "moz-extension://*".into() + ]), + hosts: Some(vec![]), + signer_path: expected.into(), + support_token_api: true, + max_connections: 100, + }, + LogConfig { + color: !cfg!(windows), + mode: None, + file: None, + } + ) + ); + } + + #[test] + fn test_ws_max_connections() { + let args = vec!["parity", "--ws-max-connections", "1"]; + let conf = parse(&args); + + assert_eq!( + conf.ws_config().unwrap(), + WsConfiguration { + max_connections: 1, + ..Default::default() + } + ); + } + + #[test] + fn test_run_cmd() { + let args = vec!["parity"]; + let conf = parse(&args); + let mut expected = RunCmd { + allow_missing_blocks: false, + cache_config: Default::default(), + dirs: Default::default(), + spec: Default::default(), + pruning: Default::default(), + pruning_history: 64, + pruning_memory: 32, + daemon: None, + logger_config: Default::default(), + miner_options: Default::default(), + gas_price_percentile: 50, + poll_lifetime: 60, + ws_conf: Default::default(), + http_conf: Default::default(), + ipc_conf: Default::default(), + net_conf: default_network_config(), + network_id: None, + warp_sync: true, + warp_barrier: None, + acc_conf: Default::default(), + gas_pricer_conf: Default::default(), + miner_extras: Default::default(), + update_policy: UpdatePolicy { + enable_downloading: true, + require_consensus: true, + filter: UpdateFilter::Critical, + track: ReleaseTrack::Unknown, + path: default_hypervisor_path(), + max_size: 128 * 1024 * 1024, + max_delay: 100, + frequency: 20, + }, + mode: Default::default(), + tracing: Default::default(), + compaction: Default::default(), + vm_type: Default::default(), + geth_compatibility: false, + experimental_rpcs: false, + net_settings: Default::default(), + ipfs_conf: Default::default(), + secretstore_conf: Default::default(), + private_provider_conf: Default::default(), + private_encryptor_conf: Default::default(), + private_tx_enabled: false, + name: "".into(), + custom_bootnodes: false, + fat_db: Default::default(), + snapshot_conf: Default::default(), + stratum: None, + check_seal: true, + download_old_blocks: true, + verifier_settings: Default::default(), + serve_light: true, + light: false, + no_hardcoded_sync: false, + no_persistent_txqueue: false, + whisper: Default::default(), + max_round_blocks_to_import: 12, + on_demand_response_time_window: None, + on_demand_request_backoff_start: None, + on_demand_request_backoff_max: None, + on_demand_request_backoff_rounds_max: None, + on_demand_request_consecutive_failures: None, + }; + expected.secretstore_conf.enabled = cfg!(feature = "secretstore"); + expected.secretstore_conf.http_enabled = cfg!(feature = "secretstore"); + assert_eq!(conf.into_command().unwrap().cmd, Cmd::Run(expected)); + } + + #[test] + fn should_parse_mining_options() { + // given + let mut mining_options = MinerOptions::default(); + + // when + let conf0 = parse(&["parity"]); + let conf2 = parse(&["parity", "--tx-queue-strategy", "gas_price"]); + + // then + assert_eq!(conf0.miner_options().unwrap(), mining_options); + mining_options.tx_queue_strategy = PrioritizationStrategy::GasPriceOnly; + assert_eq!(conf2.miner_options().unwrap(), mining_options); + } + + #[test] + fn should_fail_on_force_reseal_and_reseal_min_period() { + let conf = parse(&[ + "parity", + "--chain", + "dev", + "--force-sealing", + "--reseal-min-period", + "0", + ]); + + assert!(conf.miner_options().is_err()); + } + + #[test] + fn should_parse_updater_options() { + // when + let conf0 = parse(&["parity", "--release-track=testing"]); + let conf1 = parse(&[ + "parity", + "--auto-update", + "all", + "--no-consensus", + "--auto-update-delay", + "300", + ]); + let conf2 = parse(&[ + "parity", + "--no-download", + "--auto-update=all", + "--release-track=beta", + "--auto-update-delay=300", + "--auto-update-check-frequency=100", + ]); + let conf3 = parse(&["parity", "--auto-update=xxx"]); + + // then + assert_eq!( + conf0.update_policy().unwrap(), + UpdatePolicy { + enable_downloading: true, + require_consensus: true, + filter: UpdateFilter::Critical, + track: ReleaseTrack::Testing, + path: default_hypervisor_path(), + max_size: 128 * 1024 * 1024, + max_delay: 100, + frequency: 20, + } + ); + assert_eq!( + conf1.update_policy().unwrap(), + UpdatePolicy { + enable_downloading: true, + require_consensus: false, + filter: UpdateFilter::All, + track: ReleaseTrack::Unknown, + path: default_hypervisor_path(), + max_size: 128 * 1024 * 1024, + max_delay: 300, + frequency: 20, + } + ); + assert_eq!( + conf2.update_policy().unwrap(), + UpdatePolicy { + enable_downloading: false, + require_consensus: true, + filter: UpdateFilter::All, + track: ReleaseTrack::Beta, + path: default_hypervisor_path(), + max_size: 128 * 1024 * 1024, + max_delay: 300, + frequency: 100, + } + ); + assert!(conf3.update_policy().is_err()); + } + + #[test] + fn should_parse_network_settings() { + // given + + // when + let conf = parse(&["parity", "--testnet", "--identity", "testname"]); + + // then + assert_eq!( + conf.network_settings(), + Ok(NetworkSettings { + name: "testname".to_owned(), + chain: "goerli".to_owned(), + is_dev_chain: false, + network_port: 30303, + rpc_enabled: true, + rpc_interface: "127.0.0.1".to_owned(), + rpc_port: 8545, + }) + ); + } + + #[test] + fn should_parse_rpc_settings_with_geth_compatiblity() { + // given + fn assert(conf: Configuration) { + let net = conf.network_settings().unwrap(); + assert_eq!(net.rpc_enabled, true); + assert_eq!(net.rpc_interface, "0.0.0.0".to_owned()); + assert_eq!(net.rpc_port, 8000); + assert_eq!(conf.rpc_cors(), None); + assert_eq!(conf.rpc_apis(), "web3,eth".to_owned()); + } + + // when + let conf1 = parse(&[ + "parity", + "-j", + "--jsonrpc-port", + "8000", + "--jsonrpc-interface", + "all", + "--jsonrpc-cors", + "*", + "--jsonrpc-apis", + "web3,eth", + ]); + let conf2 = parse(&[ + "parity", + "--rpc", + "--rpcport", + "8000", + "--rpcaddr", + "all", + "--rpccorsdomain", + "*", + "--rpcapi", + "web3,eth", + ]); + + // then + assert(conf1); + assert(conf2); + } + + #[test] + fn should_parse_rpc_hosts() { + // given + + // when + let conf0 = parse(&["parity"]); + let conf1 = parse(&["parity", "--jsonrpc-hosts", "none"]); + let conf2 = parse(&["parity", "--jsonrpc-hosts", "all"]); + let conf3 = parse(&["parity", "--jsonrpc-hosts", "parity.io,something.io"]); + + // then + assert_eq!(conf0.rpc_hosts(), Some(Vec::new())); + assert_eq!(conf1.rpc_hosts(), Some(Vec::new())); + assert_eq!(conf2.rpc_hosts(), None); + assert_eq!( + conf3.rpc_hosts(), + Some(vec!["parity.io".into(), "something.io".into()]) + ); + } + + #[test] + fn should_parse_ipfs_hosts() { + // given + + // when + let conf0 = parse(&["parity"]); + let conf1 = parse(&["parity", "--ipfs-api-hosts", "none"]); + let conf2 = parse(&["parity", "--ipfs-api-hosts", "all"]); + let conf3 = parse(&["parity", "--ipfs-api-hosts", "parity.io,something.io"]); + + // then + assert_eq!(conf0.ipfs_hosts(), Some(Vec::new())); + assert_eq!(conf1.ipfs_hosts(), Some(Vec::new())); + assert_eq!(conf2.ipfs_hosts(), None); + assert_eq!( + conf3.ipfs_hosts(), + Some(vec!["parity.io".into(), "something.io".into()]) + ); + } + + #[test] + fn should_parse_ipfs_cors() { + // given + + // when + let conf0 = parse(&["parity"]); + let conf1 = parse(&["parity", "--ipfs-api-cors", "*"]); + let conf2 = parse(&[ + "parity", + "--ipfs-api-cors", + "http://parity.io,http://something.io", + ]); + + // then + assert_eq!(conf0.ipfs_cors(), Some(vec![])); + assert_eq!(conf1.ipfs_cors(), None); + assert_eq!( + conf2.ipfs_cors(), + Some(vec![ + "http://parity.io".into(), + "http://something.io".into() + ]) + ); + } + + #[test] + fn should_parse_ui_configuration() { + // given + + // when + let conf0 = parse(&["parity", "--ui-path=signer"]); + let conf1 = parse(&["parity", "--ui-path=signer", "--ui-no-validation"]); + let conf2 = parse(&["parity", "--ui-path=signer", "--ui-port", "3123"]); + let conf3 = parse(&["parity", "--ui-path=signer", "--ui-interface", "test"]); + let conf4 = parse(&["parity", "--ui-path=signer", "--force-ui"]); + + // then + assert_eq!(conf0.directories().signer, "signer".to_owned()); + + assert!(conf1.ws_config().unwrap().hosts.is_some()); + assert_eq!( + conf1.ws_config().unwrap().origins, + Some(vec![ + "parity://*".into(), + "chrome-extension://*".into(), + "moz-extension://*".into() + ]) + ); + assert_eq!(conf1.directories().signer, "signer".to_owned()); + + assert!(conf2.ws_config().unwrap().hosts.is_some()); + assert_eq!(conf2.directories().signer, "signer".to_owned()); + + assert!(conf3.ws_config().unwrap().hosts.is_some()); + assert_eq!(conf3.directories().signer, "signer".to_owned()); + + assert!(conf4.ws_config().unwrap().hosts.is_some()); + assert_eq!(conf4.directories().signer, "signer".to_owned()); + } + + #[test] + fn should_not_bail_on_empty_line_in_reserved_peers() { + let tempdir = TempDir::new("").unwrap(); + let filename = tempdir.path().join("peers"); + File::create(&filename) + .unwrap() + .write_all(b" \n\t\n") + .unwrap(); + let args = vec!["parity", "--reserved-peers", filename.to_str().unwrap()]; + let conf = Configuration::parse_cli(&args).unwrap(); + assert!(conf.init_reserved_nodes().is_ok()); + } + + #[test] + fn should_ignore_comments_in_reserved_peers() { + let tempdir = TempDir::new("").unwrap(); + let filename = tempdir.path().join("peers_comments"); + File::create(&filename).unwrap().write_all(b"# Sample comment\nenode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@172.0.0.1:30303\n").unwrap(); + let args = vec!["parity", "--reserved-peers", filename.to_str().unwrap()]; + let conf = Configuration::parse_cli(&args).unwrap(); + let reserved_nodes = conf.init_reserved_nodes(); + assert!(reserved_nodes.is_ok()); + assert_eq!(reserved_nodes.unwrap().len(), 1); + } + + #[test] + fn test_dev_preset() { + let args = vec!["parity", "--config", "dev"]; + let conf = Configuration::parse_cli(&args).unwrap(); + match conf.into_command().unwrap().cmd { + Cmd::Run(c) => { + assert_eq!(c.net_settings.chain, "dev"); + assert_eq!(c.gas_pricer_conf, GasPricerConfig::Fixed(0.into())); + assert_eq!(c.miner_options.reseal_min_period, Duration::from_millis(0)); + } + _ => panic!("Should be Cmd::Run"), + } + } + + #[test] + fn test_mining_preset() { + let args = vec!["parity", "--config", "mining"]; + let conf = Configuration::parse_cli(&args).unwrap(); + match conf.into_command().unwrap().cmd { + Cmd::Run(c) => { + assert_eq!(c.net_conf.min_peers, 50); + assert_eq!(c.net_conf.max_peers, 100); + assert_eq!(c.ipc_conf.enabled, false); + assert_eq!(c.miner_options.force_sealing, true); + assert_eq!(c.miner_options.reseal_on_external_tx, true); + assert_eq!(c.miner_options.reseal_on_own_tx, true); + assert_eq!( + c.miner_options.reseal_min_period, + Duration::from_millis(4000) + ); + assert_eq!(c.miner_options.pool_limits.max_count, 8192); + assert_eq!(c.cache_config, CacheConfig::new_with_total_cache_size(1024)); + assert_eq!(c.logger_config.mode.unwrap(), "miner=trace,own_tx=trace"); + } + _ => panic!("Should be Cmd::Run"), + } + } + + #[test] + fn test_non_standard_ports_preset() { + let args = vec!["parity", "--config", "non-standard-ports"]; + let conf = Configuration::parse_cli(&args).unwrap(); + match conf.into_command().unwrap().cmd { + Cmd::Run(c) => { + assert_eq!(c.net_settings.network_port, 30305); + assert_eq!(c.net_settings.rpc_port, 8645); + } + _ => panic!("Should be Cmd::Run"), + } + } + + #[test] + fn test_insecure_preset() { + let args = vec!["parity", "--config", "insecure"]; + let conf = Configuration::parse_cli(&args).unwrap(); + match conf.into_command().unwrap().cmd { + Cmd::Run(c) => { + assert_eq!(c.update_policy.require_consensus, false); + assert_eq!(c.net_settings.rpc_interface, "0.0.0.0"); + match c.http_conf.apis { + ApiSet::List(set) => assert_eq!(set, ApiSet::All.list_apis()), + _ => panic!("Incorrect rpc apis"), + } + // "web3,eth,net,personal,parity,parity_set,traces,rpc,parity_accounts"); + assert_eq!(c.http_conf.hosts, None); + assert_eq!(c.ipfs_conf.hosts, None); + } + _ => panic!("Should be Cmd::Run"), + } + } + + #[test] + fn test_dev_insecure_preset() { + let args = vec!["parity", "--config", "dev-insecure"]; + let conf = Configuration::parse_cli(&args).unwrap(); + match conf.into_command().unwrap().cmd { + Cmd::Run(c) => { + assert_eq!(c.net_settings.chain, "dev"); + assert_eq!(c.gas_pricer_conf, GasPricerConfig::Fixed(0.into())); + assert_eq!(c.miner_options.reseal_min_period, Duration::from_millis(0)); + assert_eq!(c.update_policy.require_consensus, false); + assert_eq!(c.net_settings.rpc_interface, "0.0.0.0"); + match c.http_conf.apis { + ApiSet::List(set) => assert_eq!(set, ApiSet::All.list_apis()), + _ => panic!("Incorrect rpc apis"), + } + // "web3,eth,net,personal,parity,parity_set,traces,rpc,parity_accounts"); + assert_eq!(c.http_conf.hosts, None); + assert_eq!(c.ipfs_conf.hosts, None); + } + _ => panic!("Should be Cmd::Run"), + } + } + + #[test] + fn test_override_preset() { + let args = vec!["parity", "--config", "mining", "--min-peers=99"]; + let conf = Configuration::parse_cli(&args).unwrap(); + match conf.into_command().unwrap().cmd { + Cmd::Run(c) => { + assert_eq!(c.net_conf.min_peers, 99); + } + _ => panic!("Should be Cmd::Run"), + } + } + + #[test] + fn test_identity_arg() { + let args = vec!["parity", "--identity", "Somebody"]; + let conf = Configuration::parse_cli(&args).unwrap(); + match conf.into_command().unwrap().cmd { + Cmd::Run(c) => { + assert_eq!(c.name, "Somebody"); + assert!(c + .net_conf + .client_version + .starts_with("Parity-Ethereum/Somebody/")); + } + _ => panic!("Should be Cmd::Run"), + } + } + + #[test] + fn should_apply_ports_shift() { + // given + + // when + let conf0 = parse(&["parity", "--ports-shift", "1", "--stratum"]); + let conf1 = parse(&["parity", "--ports-shift", "1", "--jsonrpc-port", "8544"]); + + // then + assert_eq!(conf0.net_addresses().unwrap().0.port(), 30304); + assert_eq!(conf0.network_settings().unwrap().network_port, 30304); + assert_eq!(conf0.network_settings().unwrap().rpc_port, 8546); + assert_eq!(conf0.http_config().unwrap().port, 8546); + assert_eq!(conf0.ws_config().unwrap().port, 8547); + assert_eq!(conf0.secretstore_config().unwrap().port, 8084); + assert_eq!(conf0.secretstore_config().unwrap().http_port, 8083); + assert_eq!(conf0.ipfs_config().port, 5002); + assert_eq!(conf0.stratum_options().unwrap().unwrap().port, 8009); + + assert_eq!(conf1.net_addresses().unwrap().0.port(), 30304); + assert_eq!(conf1.network_settings().unwrap().network_port, 30304); + assert_eq!(conf1.network_settings().unwrap().rpc_port, 8545); + assert_eq!(conf1.http_config().unwrap().port, 8545); + assert_eq!(conf1.ws_config().unwrap().port, 8547); + assert_eq!(conf1.secretstore_config().unwrap().port, 8084); + assert_eq!(conf1.secretstore_config().unwrap().http_port, 8083); + assert_eq!(conf1.ipfs_config().port, 5002); + } + + #[test] + fn should_resolve_external_nat_hosts() { + // Ip works + let conf = parse(&["parity", "--nat", "extip:1.1.1.1"]); + assert_eq!( + conf.net_addresses().unwrap().1.unwrap().ip().to_string(), + "1.1.1.1" + ); + assert_eq!(conf.net_addresses().unwrap().1.unwrap().port(), 30303); + + // Ip with port works, port is discarded + let conf = parse(&["parity", "--nat", "extip:192.168.1.1:123"]); + assert_eq!( + conf.net_addresses().unwrap().1.unwrap().ip().to_string(), + "192.168.1.1" + ); + assert_eq!(conf.net_addresses().unwrap().1.unwrap().port(), 30303); + + // Hostname works + let conf = parse(&["parity", "--nat", "extip:ethereum.org"]); + assert!(conf.net_addresses().unwrap().1.is_some()); + assert_eq!(conf.net_addresses().unwrap().1.unwrap().port(), 30303); + + // Hostname works, garbage at the end is discarded + let conf = parse(&["parity", "--nat", "extip:ethereum.org:whatever bla bla 123"]); + assert!(conf.net_addresses().unwrap().1.is_some()); + assert_eq!(conf.net_addresses().unwrap().1.unwrap().port(), 30303); + + // Garbage is error + let conf = parse(&["parity", "--nat", "extip:blabla"]); + assert!(conf.net_addresses().is_err()); + } + + #[test] + fn should_expose_all_servers() { + // given + + // when + let conf0 = parse(&["parity", "--unsafe-expose"]); + + // then + assert_eq!(&conf0.network_settings().unwrap().rpc_interface, "0.0.0.0"); + assert_eq!(&conf0.http_config().unwrap().interface, "0.0.0.0"); + assert_eq!(conf0.http_config().unwrap().hosts, None); + assert_eq!(&conf0.ws_config().unwrap().interface, "0.0.0.0"); + assert_eq!(conf0.ws_config().unwrap().hosts, None); + assert_eq!(conf0.ws_config().unwrap().origins, None); + assert_eq!(&conf0.secretstore_config().unwrap().interface, "0.0.0.0"); + assert_eq!( + &conf0.secretstore_config().unwrap().http_interface, + "0.0.0.0" + ); + assert_eq!(&conf0.ipfs_config().interface, "0.0.0.0"); + assert_eq!(conf0.ipfs_config().hosts, None); + } + + #[test] + fn allow_ips() { + let all = parse(&["parity", "--allow-ips", "all"]); + let private = parse(&["parity", "--allow-ips", "private"]); + let block_custom = parse(&["parity", "--allow-ips", "-10.0.0.0/8"]); + let combo = parse(&["parity", "--allow-ips", "public 10.0.0.0/8 -1.0.0.0/8"]); + let ipv6_custom_public = parse(&["parity", "--allow-ips", "public fc00::/7"]); + let ipv6_custom_private = parse(&["parity", "--allow-ips", "private -fc00::/7"]); + + assert_eq!( + all.ip_filter().unwrap(), + IpFilter { + predefined: AllowIP::All, + custom_allow: vec![], + custom_block: vec![], + } + ); + + assert_eq!( + private.ip_filter().unwrap(), + IpFilter { + predefined: AllowIP::Private, + custom_allow: vec![], + custom_block: vec![], + } + ); + + assert_eq!( + block_custom.ip_filter().unwrap(), + IpFilter { + predefined: AllowIP::All, + custom_allow: vec![], + custom_block: vec![IpNetwork::from_str("10.0.0.0/8").unwrap()], + } + ); + + assert_eq!( + combo.ip_filter().unwrap(), + IpFilter { + predefined: AllowIP::Public, + custom_allow: vec![IpNetwork::from_str("10.0.0.0/8").unwrap()], + custom_block: vec![IpNetwork::from_str("1.0.0.0/8").unwrap()], + } + ); + + assert_eq!( + ipv6_custom_public.ip_filter().unwrap(), + IpFilter { + predefined: AllowIP::Public, + custom_allow: vec![IpNetwork::from_str("fc00::/7").unwrap()], + custom_block: vec![], + } + ); + + assert_eq!( + ipv6_custom_private.ip_filter().unwrap(), + IpFilter { + predefined: AllowIP::Private, + custom_allow: vec![], + custom_block: vec![IpNetwork::from_str("fc00::/7").unwrap()], + } + ); + } + + #[test] + fn should_use_correct_cache_path_if_base_is_set() { + use std::path; + + let std = parse(&["parity"]); + let base = parse(&["parity", "--base-path", "/test"]); + + let base_path = ::dir::default_data_path(); + let local_path = ::dir::default_local_path(); + assert_eq!( + std.directories().cache, + dir::helpers::replace_home_and_local(&base_path, &local_path, ::dir::CACHE_PATH) + ); + assert_eq!( + path::Path::new(&base.directories().cache), + path::Path::new("/test/cache") + ); + } + + #[test] + fn should_respect_only_max_peers_and_default() { + let args = vec!["parity", "--max-peers=50"]; + let conf = Configuration::parse_cli(&args).unwrap(); + match conf.into_command().unwrap().cmd { + Cmd::Run(c) => { + assert_eq!(c.net_conf.min_peers, 25); + assert_eq!(c.net_conf.max_peers, 50); + } + _ => panic!("Should be Cmd::Run"), + } + } + + #[test] + fn should_respect_only_max_peers_less_than_default() { + let args = vec!["parity", "--max-peers=5"]; + let conf = Configuration::parse_cli(&args).unwrap(); + match conf.into_command().unwrap().cmd { + Cmd::Run(c) => { + assert_eq!(c.net_conf.min_peers, 5); + assert_eq!(c.net_conf.max_peers, 5); + } + _ => panic!("Should be Cmd::Run"), + } + } + + #[test] + fn should_respect_only_min_peers_and_default() { + let args = vec!["parity", "--min-peers=5"]; + let conf = Configuration::parse_cli(&args).unwrap(); + match conf.into_command().unwrap().cmd { + Cmd::Run(c) => { + assert_eq!(c.net_conf.min_peers, 5); + assert_eq!(c.net_conf.max_peers, 50); + } + _ => panic!("Should be Cmd::Run"), + } + } + + #[test] + fn should_respect_only_min_peers_and_greater_than_default() { + let args = vec!["parity", "--min-peers=500"]; + let conf = Configuration::parse_cli(&args).unwrap(); + match conf.into_command().unwrap().cmd { + Cmd::Run(c) => { + assert_eq!(c.net_conf.min_peers, 500); + assert_eq!(c.net_conf.max_peers, 500); + } + _ => panic!("Should be Cmd::Run"), + } + } } diff --git a/parity/db/mod.rs b/parity/db/mod.rs index 9b4662442..7065884dc 100644 --- a/parity/db/mod.rs +++ b/parity/db/mod.rs @@ -16,10 +16,10 @@ //! Database-related operations. -#[path="rocksdb/mod.rs"] +#[path = "rocksdb/mod.rs"] mod impls; -pub use self::impls::{open_db, restoration_db_handler, migrate}; +pub use self::impls::{migrate, open_db, restoration_db_handler}; #[cfg(feature = "secretstore")] pub use self::impls::open_secretstore_db; diff --git a/parity/db/rocksdb/blooms.rs b/parity/db/rocksdb/blooms.rs index eea913bea..a607ebdd4 100644 --- a/parity/db/rocksdb/blooms.rs +++ b/parity/db/rocksdb/blooms.rs @@ -16,73 +16,68 @@ //! Blooms migration from rocksdb to blooms-db -use std::path::Path; -use ethereum_types::Bloom; +use super::{kvdb_rocksdb::DatabaseConfig, open_database}; use ethcore::error::Error; +use ethereum_types::Bloom; use rlp; -use super::kvdb_rocksdb::DatabaseConfig; -use super::open_database; +use std::path::Path; const LOG_BLOOMS_ELEMENTS_PER_INDEX: u64 = 16; pub fn migrate_blooms>(path: P, config: &DatabaseConfig) -> Result<(), Error> { - // init - let db = open_database(&path.as_ref().to_string_lossy(), config)?; + // init + let db = open_database(&path.as_ref().to_string_lossy(), config)?; - // possible optimization: - // pre-allocate space on disk for faster migration + // possible optimization: + // pre-allocate space on disk for faster migration - // iterate over header blooms and insert them in blooms-db - // Some(3) -> COL_EXTRA - // 3u8 -> ExtrasIndex::BlocksBlooms - // 0u8 -> level 0 - let blooms_iterator = db.key_value() - .iter_from_prefix(Some(3), &[3u8, 0u8]) - .filter(|(key, _)| key.len() == 6) - .take_while(|(key, _)| { - key[0] == 3u8 && key[1] == 0u8 - }) - .map(|(key, group)| { - let index = - (key[2] as u64) << 24 | - (key[3] as u64) << 16 | - (key[4] as u64) << 8 | - (key[5] as u64); - let number = index * LOG_BLOOMS_ELEMENTS_PER_INDEX; + // iterate over header blooms and insert them in blooms-db + // Some(3) -> COL_EXTRA + // 3u8 -> ExtrasIndex::BlocksBlooms + // 0u8 -> level 0 + let blooms_iterator = db + .key_value() + .iter_from_prefix(Some(3), &[3u8, 0u8]) + .filter(|(key, _)| key.len() == 6) + .take_while(|(key, _)| key[0] == 3u8 && key[1] == 0u8) + .map(|(key, group)| { + let index = (key[2] as u64) << 24 + | (key[3] as u64) << 16 + | (key[4] as u64) << 8 + | (key[5] as u64); + let number = index * LOG_BLOOMS_ELEMENTS_PER_INDEX; - let blooms = rlp::decode_list::(&group); - (number, blooms) - }); + let blooms = rlp::decode_list::(&group); + (number, blooms) + }); - for (number, blooms) in blooms_iterator { - db.blooms().insert_blooms(number, blooms.iter())?; - } + for (number, blooms) in blooms_iterator { + db.blooms().insert_blooms(number, blooms.iter())?; + } - // iterate over trace blooms and insert them in blooms-db - // Some(4) -> COL_TRACE - // 1u8 -> TraceDBIndex::BloomGroups - // 0u8 -> level 0 - let trace_blooms_iterator = db.key_value() - .iter_from_prefix(Some(4), &[1u8, 0u8]) - .filter(|(key, _)| key.len() == 6) - .take_while(|(key, _)| { - key[0] == 1u8 && key[1] == 0u8 - }) - .map(|(key, group)| { - let index = - (key[2] as u64) | - (key[3] as u64) << 8 | - (key[4] as u64) << 16 | - (key[5] as u64) << 24; - let number = index * LOG_BLOOMS_ELEMENTS_PER_INDEX; + // iterate over trace blooms and insert them in blooms-db + // Some(4) -> COL_TRACE + // 1u8 -> TraceDBIndex::BloomGroups + // 0u8 -> level 0 + let trace_blooms_iterator = db + .key_value() + .iter_from_prefix(Some(4), &[1u8, 0u8]) + .filter(|(key, _)| key.len() == 6) + .take_while(|(key, _)| key[0] == 1u8 && key[1] == 0u8) + .map(|(key, group)| { + let index = (key[2] as u64) + | (key[3] as u64) << 8 + | (key[4] as u64) << 16 + | (key[5] as u64) << 24; + let number = index * LOG_BLOOMS_ELEMENTS_PER_INDEX; - let blooms = rlp::decode_list::(&group); - (number, blooms) - }); + let blooms = rlp::decode_list::(&group); + (number, blooms) + }); - for (number, blooms) in trace_blooms_iterator { - db.trace_blooms().insert_blooms(number, blooms.iter())?; - } + for (number, blooms) in trace_blooms_iterator { + db.trace_blooms().insert_blooms(number, blooms.iter())?; + } - Ok(()) + Ok(()) } diff --git a/parity/db/rocksdb/helpers.rs b/parity/db/rocksdb/helpers.rs index 9829cb5a6..40c422ef3 100644 --- a/parity/db/rocksdb/helpers.rs +++ b/parity/db/rocksdb/helpers.rs @@ -14,24 +14,27 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::path::Path; -use ethcore_db::NUM_COLUMNS; -use ethcore::client::{ClientConfig, DatabaseCompactionProfile}; use super::kvdb_rocksdb::{CompactionProfile, DatabaseConfig}; +use ethcore::client::{ClientConfig, DatabaseCompactionProfile}; +use ethcore_db::NUM_COLUMNS; +use std::path::Path; -pub fn compaction_profile(profile: &DatabaseCompactionProfile, db_path: &Path) -> CompactionProfile { - match profile { - &DatabaseCompactionProfile::Auto => CompactionProfile::auto(db_path), - &DatabaseCompactionProfile::SSD => CompactionProfile::ssd(), - &DatabaseCompactionProfile::HDD => CompactionProfile::hdd(), - } +pub fn compaction_profile( + profile: &DatabaseCompactionProfile, + db_path: &Path, +) -> CompactionProfile { + match profile { + &DatabaseCompactionProfile::Auto => CompactionProfile::auto(db_path), + &DatabaseCompactionProfile::SSD => CompactionProfile::ssd(), + &DatabaseCompactionProfile::HDD => CompactionProfile::hdd(), + } } pub fn client_db_config(client_path: &Path, client_config: &ClientConfig) -> DatabaseConfig { - let mut client_db_config = DatabaseConfig::with_columns(NUM_COLUMNS); + let mut client_db_config = DatabaseConfig::with_columns(NUM_COLUMNS); - client_db_config.memory_budget = client_config.db_cache_size; - client_db_config.compaction = compaction_profile(&client_config.db_compaction, &client_path); + client_db_config.memory_budget = client_config.db_cache_size; + client_db_config.compaction = compaction_profile(&client_config.db_compaction, &client_path); - client_db_config + client_db_config } diff --git a/parity/db/rocksdb/migration.rs b/parity/db/rocksdb/migration.rs index eec43d233..61ce30904 100644 --- a/parity/db/rocksdb/migration.rs +++ b/parity/db/rocksdb/migration.rs @@ -14,32 +14,34 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::fs; -use std::io::{Read, Write, Error as IoError, ErrorKind}; -use std::path::{Path, PathBuf}; -use std::fmt::{Display, Formatter, Error as FmtError}; -use super::migration_rocksdb::{Manager as MigrationManager, Config as MigrationConfig, ChangeColumns}; -use super::kvdb_rocksdb::{CompactionProfile, DatabaseConfig}; -use ethcore::client::DatabaseCompactionProfile; -use ethcore; +use super::{ + kvdb_rocksdb::{CompactionProfile, DatabaseConfig}, + migration_rocksdb::{ChangeColumns, Config as MigrationConfig, Manager as MigrationManager}, +}; +use ethcore::{self, client::DatabaseCompactionProfile}; +use std::{ + fmt::{Display, Error as FmtError, Formatter}, + fs, + io::{Error as IoError, ErrorKind, Read, Write}, + path::{Path, PathBuf}, +}; -use super::helpers; -use super::blooms::migrate_blooms; +use super::{blooms::migrate_blooms, helpers}; /// The migration from v10 to v11. /// Adds a column for node info. pub const TO_V11: ChangeColumns = ChangeColumns { - pre_columns: Some(6), - post_columns: Some(7), - version: 11, + pre_columns: Some(6), + post_columns: Some(7), + version: 11, }; /// The migration from v11 to v12. /// Adds a column for light chain storage. pub const TO_V12: ChangeColumns = ChangeColumns { - pre_columns: Some(7), - post_columns: Some(8), - version: 12, + pre_columns: Some(7), + post_columns: Some(8), + version: 12, }; /// Database is assumed to be at default version, when no version file is found. @@ -56,22 +58,22 @@ const VERSION_FILE_NAME: &'static str = "db_version"; /// Migration related erorrs. #[derive(Debug)] pub enum Error { - /// Returned when current version cannot be read or guessed. - UnknownDatabaseVersion, - /// Existing DB is newer than the known one. - FutureDBVersion, - /// Migration is not possible. - MigrationImpossible, - /// Blooms-db migration error. - BloomsDB(ethcore::error::Error), - /// Migration was completed succesfully, - /// but there was a problem with io. - Io(IoError), + /// Returned when current version cannot be read or guessed. + UnknownDatabaseVersion, + /// Existing DB is newer than the known one. + FutureDBVersion, + /// Migration is not possible. + MigrationImpossible, + /// Blooms-db migration error. + BloomsDB(ethcore::error::Error), + /// Migration was completed succesfully, + /// but there was a problem with io. + Io(IoError), } impl Display for Error { - fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { - let out = match *self { + fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { + let out = match *self { Error::UnknownDatabaseVersion => "Current database version cannot be read".into(), Error::FutureDBVersion => "Database was created with newer client version. Upgrade your client or delete DB and resync.".into(), Error::MigrationImpossible => format!("Database migration to version {} is not possible.", CURRENT_VERSION), @@ -79,153 +81,173 @@ impl Display for Error { Error::Io(ref err) => format!("Unexpected io error on DB migration: {}.", err), }; - write!(f, "{}", out) - } + write!(f, "{}", out) + } } impl From for Error { - fn from(err: IoError) -> Self { - Error::Io(err) - } + fn from(err: IoError) -> Self { + Error::Io(err) + } } /// Returns the version file path. fn version_file_path(path: &Path) -> PathBuf { - let mut file_path = path.to_owned(); - file_path.push(VERSION_FILE_NAME); - file_path + let mut file_path = path.to_owned(); + file_path.push(VERSION_FILE_NAME); + file_path } /// Reads current database version from the file at given path. /// If the file does not exist returns `DEFAULT_VERSION`. fn current_version(path: &Path) -> Result { - match fs::File::open(version_file_path(path)) { - Err(ref err) if err.kind() == ErrorKind::NotFound => Ok(DEFAULT_VERSION), - Err(_) => Err(Error::UnknownDatabaseVersion), - Ok(mut file) => { - let mut s = String::new(); - file.read_to_string(&mut s).map_err(|_| Error::UnknownDatabaseVersion)?; - u32::from_str_radix(&s, 10).map_err(|_| Error::UnknownDatabaseVersion) - }, - } + match fs::File::open(version_file_path(path)) { + Err(ref err) if err.kind() == ErrorKind::NotFound => Ok(DEFAULT_VERSION), + Err(_) => Err(Error::UnknownDatabaseVersion), + Ok(mut file) => { + let mut s = String::new(); + file.read_to_string(&mut s) + .map_err(|_| Error::UnknownDatabaseVersion)?; + u32::from_str_radix(&s, 10).map_err(|_| Error::UnknownDatabaseVersion) + } + } } /// Writes current database version to the file. /// Creates a new file if the version file does not exist yet. fn update_version(path: &Path) -> Result<(), Error> { - fs::create_dir_all(path)?; - let mut file = fs::File::create(version_file_path(path))?; - file.write_all(format!("{}", CURRENT_VERSION).as_bytes())?; - Ok(()) + fs::create_dir_all(path)?; + let mut file = fs::File::create(version_file_path(path))?; + file.write_all(format!("{}", CURRENT_VERSION).as_bytes())?; + Ok(()) } /// Consolidated database path fn consolidated_database_path(path: &Path) -> PathBuf { - let mut state_path = path.to_owned(); - state_path.push("db"); - state_path + let mut state_path = path.to_owned(); + state_path.push("db"); + state_path } /// Database backup fn backup_database_path(path: &Path) -> PathBuf { - let mut backup_path = path.to_owned(); - backup_path.pop(); - backup_path.push("temp_backup"); - backup_path + let mut backup_path = path.to_owned(); + backup_path.pop(); + backup_path.push("temp_backup"); + backup_path } /// Default migration settings. pub fn default_migration_settings(compaction_profile: &CompactionProfile) -> MigrationConfig { - MigrationConfig { - batch_size: BATCH_SIZE, - compaction_profile: *compaction_profile, - } + MigrationConfig { + batch_size: BATCH_SIZE, + compaction_profile: *compaction_profile, + } } /// Migrations on the consolidated database. -fn consolidated_database_migrations(compaction_profile: &CompactionProfile) -> Result { - let mut manager = MigrationManager::new(default_migration_settings(compaction_profile)); - manager.add_migration(TO_V11).map_err(|_| Error::MigrationImpossible)?; - manager.add_migration(TO_V12).map_err(|_| Error::MigrationImpossible)?; - Ok(manager) +fn consolidated_database_migrations( + compaction_profile: &CompactionProfile, +) -> Result { + let mut manager = MigrationManager::new(default_migration_settings(compaction_profile)); + manager + .add_migration(TO_V11) + .map_err(|_| Error::MigrationImpossible)?; + manager + .add_migration(TO_V12) + .map_err(|_| Error::MigrationImpossible)?; + Ok(manager) } /// Migrates database at given position with given migration rules. -fn migrate_database(version: u32, db_path: &Path, mut migrations: MigrationManager) -> Result<(), Error> { - // check if migration is needed - if !migrations.is_needed(version) { - return Ok(()) - } +fn migrate_database( + version: u32, + db_path: &Path, + mut migrations: MigrationManager, +) -> Result<(), Error> { + // check if migration is needed + if !migrations.is_needed(version) { + return Ok(()); + } - let backup_path = backup_database_path(&db_path); - // remove the backup dir if it exists - let _ = fs::remove_dir_all(&backup_path); + let backup_path = backup_database_path(&db_path); + // remove the backup dir if it exists + let _ = fs::remove_dir_all(&backup_path); - // migrate old database to the new one - let temp_path = migrations.execute(&db_path, version)?; + // migrate old database to the new one + let temp_path = migrations.execute(&db_path, version)?; - // completely in-place migration leads to the paths being equal. - // in that case, no need to shuffle directories. - if temp_path == db_path { return Ok(()) } + // completely in-place migration leads to the paths being equal. + // in that case, no need to shuffle directories. + if temp_path == db_path { + return Ok(()); + } - // create backup - fs::rename(&db_path, &backup_path)?; + // create backup + fs::rename(&db_path, &backup_path)?; - // replace the old database with the new one - if let Err(err) = fs::rename(&temp_path, &db_path) { - // if something went wrong, bring back backup - fs::rename(&backup_path, &db_path)?; - return Err(err.into()); - } + // replace the old database with the new one + if let Err(err) = fs::rename(&temp_path, &db_path) { + // if something went wrong, bring back backup + fs::rename(&backup_path, &db_path)?; + return Err(err.into()); + } - // remove backup - fs::remove_dir_all(&backup_path).map_err(Into::into) + // remove backup + fs::remove_dir_all(&backup_path).map_err(Into::into) } fn exists(path: &Path) -> bool { - fs::metadata(path).is_ok() + fs::metadata(path).is_ok() } /// Migrates the database. pub fn migrate(path: &Path, compaction_profile: &DatabaseCompactionProfile) -> Result<(), Error> { - let compaction_profile = helpers::compaction_profile(&compaction_profile, path); + let compaction_profile = helpers::compaction_profile(&compaction_profile, path); - // read version file. - let version = current_version(path)?; + // read version file. + let version = current_version(path)?; - // migrate the databases. - // main db directory may already exists, so let's check if we have blocks dir - if version > CURRENT_VERSION { - return Err(Error::FutureDBVersion); - } + // migrate the databases. + // main db directory may already exists, so let's check if we have blocks dir + if version > CURRENT_VERSION { + return Err(Error::FutureDBVersion); + } - // We are in the latest version, yay! - if version == CURRENT_VERSION { - return Ok(()) - } + // We are in the latest version, yay! + if version == CURRENT_VERSION { + return Ok(()); + } - let db_path = consolidated_database_path(path); + let db_path = consolidated_database_path(path); - // Further migrations - if version < CURRENT_VERSION && exists(&db_path) { - println!("Migrating database from version {} to {}", version, CURRENT_VERSION); - migrate_database(version, &db_path, consolidated_database_migrations(&compaction_profile)?)?; + // Further migrations + if version < CURRENT_VERSION && exists(&db_path) { + println!( + "Migrating database from version {} to {}", + version, CURRENT_VERSION + ); + migrate_database( + version, + &db_path, + consolidated_database_migrations(&compaction_profile)?, + )?; - if version < BLOOMS_DB_VERSION { - println!("Migrating blooms to blooms-db..."); - let db_config = DatabaseConfig { - max_open_files: 64, - memory_budget: None, - compaction: compaction_profile, - columns: ethcore_db::NUM_COLUMNS, - }; + if version < BLOOMS_DB_VERSION { + println!("Migrating blooms to blooms-db..."); + let db_config = DatabaseConfig { + max_open_files: 64, + memory_budget: None, + compaction: compaction_profile, + columns: ethcore_db::NUM_COLUMNS, + }; - migrate_blooms(&db_path, &db_config).map_err(Error::BloomsDB)?; - } + migrate_blooms(&db_path, &db_config).map_err(Error::BloomsDB)?; + } - println!("Migration finished"); - } + println!("Migration finished"); + } - // update version file. - update_version(path) + // update version file. + update_version(path) } diff --git a/parity/db/rocksdb/mod.rs b/parity/db/rocksdb/mod.rs index c7aa0a534..05bd1560d 100644 --- a/parity/db/rocksdb/mod.rs +++ b/parity/db/rocksdb/mod.rs @@ -14,104 +14,115 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +extern crate ethcore_blockchain; extern crate kvdb_rocksdb; extern crate migration_rocksdb; -extern crate ethcore_blockchain; -use std::{io, fs}; -use std::sync::Arc; -use std::path::Path; +use self::{ + ethcore_blockchain::{BlockChainDB, BlockChainDBHandler}, + kvdb_rocksdb::{Database, DatabaseConfig}, +}; use blooms_db; -use ethcore_db::NUM_COLUMNS; use ethcore::client::{ClientConfig, DatabaseCompactionProfile}; +use ethcore_db::NUM_COLUMNS; use kvdb::KeyValueDB; -use self::ethcore_blockchain::{BlockChainDBHandler, BlockChainDB}; -use self::kvdb_rocksdb::{Database, DatabaseConfig}; +use std::{fs, io, path::Path, sync::Arc}; use cache::CacheConfig; mod blooms; -mod migration; mod helpers; +mod migration; pub use self::migration::migrate; struct AppDB { - key_value: Arc, - blooms: blooms_db::Database, - trace_blooms: blooms_db::Database, + key_value: Arc, + blooms: blooms_db::Database, + trace_blooms: blooms_db::Database, } impl BlockChainDB for AppDB { - fn key_value(&self) -> &Arc { - &self.key_value - } + fn key_value(&self) -> &Arc { + &self.key_value + } - fn blooms(&self) -> &blooms_db::Database { - &self.blooms - } + fn blooms(&self) -> &blooms_db::Database { + &self.blooms + } - fn trace_blooms(&self) -> &blooms_db::Database { - &self.trace_blooms - } + fn trace_blooms(&self) -> &blooms_db::Database { + &self.trace_blooms + } } /// Open a secret store DB using the given secret store data path. The DB path is one level beneath the data path. #[cfg(feature = "secretstore")] pub fn open_secretstore_db(data_path: &str) -> Result, String> { - use std::path::PathBuf; + use std::path::PathBuf; - let mut db_path = PathBuf::from(data_path); - db_path.push("db"); - let db_path = db_path.to_str().ok_or_else(|| "Invalid secretstore path".to_string())?; - Ok(Arc::new(Database::open_default(&db_path).map_err(|e| format!("Error opening database: {:?}", e))?)) + let mut db_path = PathBuf::from(data_path); + db_path.push("db"); + let db_path = db_path + .to_str() + .ok_or_else(|| "Invalid secretstore path".to_string())?; + Ok(Arc::new( + Database::open_default(&db_path).map_err(|e| format!("Error opening database: {:?}", e))?, + )) } /// Create a restoration db handler using the config generated by `client_path` and `client_config`. -pub fn restoration_db_handler(client_path: &Path, client_config: &ClientConfig) -> Box { - let client_db_config = helpers::client_db_config(client_path, client_config); +pub fn restoration_db_handler( + client_path: &Path, + client_config: &ClientConfig, +) -> Box { + let client_db_config = helpers::client_db_config(client_path, client_config); - struct RestorationDBHandler { - config: DatabaseConfig, - } + struct RestorationDBHandler { + config: DatabaseConfig, + } - impl BlockChainDBHandler for RestorationDBHandler { - fn open(&self, db_path: &Path) -> io::Result> { - open_database(&db_path.to_string_lossy(), &self.config) - } - } + impl BlockChainDBHandler for RestorationDBHandler { + fn open(&self, db_path: &Path) -> io::Result> { + open_database(&db_path.to_string_lossy(), &self.config) + } + } - Box::new(RestorationDBHandler { - config: client_db_config, - }) + Box::new(RestorationDBHandler { + config: client_db_config, + }) } /// Open a new main DB. -pub fn open_db(client_path: &str, cache_config: &CacheConfig, compaction: &DatabaseCompactionProfile) -> io::Result> { - let path = Path::new(client_path); +pub fn open_db( + client_path: &str, + cache_config: &CacheConfig, + compaction: &DatabaseCompactionProfile, +) -> io::Result> { + let path = Path::new(client_path); - let db_config = DatabaseConfig { - memory_budget: Some(cache_config.blockchain() as usize * 1024 * 1024), - compaction: helpers::compaction_profile(&compaction, path), - .. DatabaseConfig::with_columns(NUM_COLUMNS) - }; + let db_config = DatabaseConfig { + memory_budget: Some(cache_config.blockchain() as usize * 1024 * 1024), + compaction: helpers::compaction_profile(&compaction, path), + ..DatabaseConfig::with_columns(NUM_COLUMNS) + }; - open_database(client_path, &db_config) + open_database(client_path, &db_config) } pub fn open_database(client_path: &str, config: &DatabaseConfig) -> io::Result> { - let path = Path::new(client_path); + let path = Path::new(client_path); - let blooms_path = path.join("blooms"); - let trace_blooms_path = path.join("trace_blooms"); - fs::create_dir_all(&blooms_path)?; - fs::create_dir_all(&trace_blooms_path)?; + let blooms_path = path.join("blooms"); + let trace_blooms_path = path.join("trace_blooms"); + fs::create_dir_all(&blooms_path)?; + fs::create_dir_all(&trace_blooms_path)?; - let db = AppDB { - key_value: Arc::new(Database::open(&config, client_path)?), - blooms: blooms_db::Database::open(blooms_path)?, - trace_blooms: blooms_db::Database::open(trace_blooms_path)?, - }; + let db = AppDB { + key_value: Arc::new(Database::open(&config, client_path)?), + blooms: blooms_db::Database::open(blooms_path)?, + trace_blooms: blooms_db::Database::open(trace_blooms_path)?, + }; - Ok(Arc::new(db)) + Ok(Arc::new(db)) } diff --git a/parity/deprecated.rs b/parity/deprecated.rs index 49155225a..ab2c1b012 100644 --- a/parity/deprecated.rs +++ b/parity/deprecated.rs @@ -14,274 +14,287 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::fmt; use cli::Args; +use std::fmt; #[derive(Debug, PartialEq)] pub enum Deprecated { - DoesNothing(&'static str), - Replaced(&'static str, &'static str), - Removed(&'static str), + DoesNothing(&'static str), + Replaced(&'static str, &'static str), + Removed(&'static str), } impl fmt::Display for Deprecated { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - match *self { - Deprecated::DoesNothing(s) => write!(f, "Option '{}' does nothing. It's on by default.", s), - Deprecated::Replaced(old, new) => write!(f, "Option '{}' is deprecated. Please use '{}' instead.", old, new), - Deprecated::Removed(s) => write!(f, "Option '{}' has been removed and is no longer supported.", s) - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + match *self { + Deprecated::DoesNothing(s) => { + write!(f, "Option '{}' does nothing. It's on by default.", s) + } + Deprecated::Replaced(old, new) => write!( + f, + "Option '{}' is deprecated. Please use '{}' instead.", + old, new + ), + Deprecated::Removed(s) => write!( + f, + "Option '{}' has been removed and is no longer supported.", + s + ), + } + } } pub fn find_deprecated(args: &Args) -> Vec { - let mut result = vec![]; + let mut result = vec![]; - // Removed in 1.6 or before. + // Removed in 1.6 or before. - if args.flag_warp { - result.push(Deprecated::DoesNothing("--warp")); - } + if args.flag_warp { + result.push(Deprecated::DoesNothing("--warp")); + } - if args.flag_jsonrpc { - result.push(Deprecated::DoesNothing("--jsonrpc")); - } + if args.flag_jsonrpc { + result.push(Deprecated::DoesNothing("--jsonrpc")); + } - if args.flag_rpc { - result.push(Deprecated::DoesNothing("--rpc")); - } + if args.flag_rpc { + result.push(Deprecated::DoesNothing("--rpc")); + } - if args.flag_jsonrpc_off { - result.push(Deprecated::Replaced("--jsonrpc-off", "--no-jsonrpc")); - } + if args.flag_jsonrpc_off { + result.push(Deprecated::Replaced("--jsonrpc-off", "--no-jsonrpc")); + } - if args.flag_webapp { - result.push(Deprecated::DoesNothing("--webapp")); - } + if args.flag_webapp { + result.push(Deprecated::DoesNothing("--webapp")); + } - if args.flag_dapps_off { - result.push(Deprecated::Replaced("--dapps-off", "--no-dapps")); - } + if args.flag_dapps_off { + result.push(Deprecated::Replaced("--dapps-off", "--no-dapps")); + } - if args.flag_ipcdisable { - result.push(Deprecated::Replaced("--ipcdisable", "--no-ipc")); - } + if args.flag_ipcdisable { + result.push(Deprecated::Replaced("--ipcdisable", "--no-ipc")); + } - if args.flag_ipc_off { - result.push(Deprecated::Replaced("--ipc-off", "--no-ipc")); - } + if args.flag_ipc_off { + result.push(Deprecated::Replaced("--ipc-off", "--no-ipc")); + } - if args.arg_etherbase.is_some() { - result.push(Deprecated::Replaced("--etherbase", "--author")); - } + if args.arg_etherbase.is_some() { + result.push(Deprecated::Replaced("--etherbase", "--author")); + } - if args.arg_extradata.is_some() { - result.push(Deprecated::Replaced("--extradata", "--extra-data")); - } + if args.arg_extradata.is_some() { + result.push(Deprecated::Replaced("--extradata", "--extra-data")); + } - if args.flag_testnet { - result.push(Deprecated::Replaced("--testnet", "--chain testnet")); - } + if args.flag_testnet { + result.push(Deprecated::Replaced("--testnet", "--chain testnet")); + } - if args.flag_nodiscover { - result.push(Deprecated::Replaced("--nodiscover", "--no-discovery")); - } + if args.flag_nodiscover { + result.push(Deprecated::Replaced("--nodiscover", "--no-discovery")); + } - if args.arg_datadir.is_some() { - result.push(Deprecated::Replaced("--datadir", "--base-path")); - } + if args.arg_datadir.is_some() { + result.push(Deprecated::Replaced("--datadir", "--base-path")); + } - if args.arg_networkid.is_some() { - result.push(Deprecated::Replaced("--networkid", "--network-id")); - } + if args.arg_networkid.is_some() { + result.push(Deprecated::Replaced("--networkid", "--network-id")); + } - if args.arg_peers.is_some() { - result.push(Deprecated::Replaced("--peers", "--min-peers")); - } + if args.arg_peers.is_some() { + result.push(Deprecated::Replaced("--peers", "--min-peers")); + } - if args.arg_nodekey.is_some() { - result.push(Deprecated::Replaced("--nodekey", "--node-key")); - } + if args.arg_nodekey.is_some() { + result.push(Deprecated::Replaced("--nodekey", "--node-key")); + } - if args.arg_rpcaddr.is_some() { - result.push(Deprecated::Replaced("--rpcaddr", "--jsonrpc-interface")); - } + if args.arg_rpcaddr.is_some() { + result.push(Deprecated::Replaced("--rpcaddr", "--jsonrpc-interface")); + } - if args.arg_rpcport.is_some() { - result.push(Deprecated::Replaced("--rpcport", "--jsonrpc-port")); - } + if args.arg_rpcport.is_some() { + result.push(Deprecated::Replaced("--rpcport", "--jsonrpc-port")); + } - if args.arg_rpcapi.is_some() { - result.push(Deprecated::Replaced("--rpcapi", "--jsonrpc-api")); - } + if args.arg_rpcapi.is_some() { + result.push(Deprecated::Replaced("--rpcapi", "--jsonrpc-api")); + } - if args.arg_rpccorsdomain.is_some() { - result.push(Deprecated::Replaced("--rpccorsdomain", "--jsonrpc-cors")); - } + if args.arg_rpccorsdomain.is_some() { + result.push(Deprecated::Replaced("--rpccorsdomain", "--jsonrpc-cors")); + } - if args.arg_ipcapi.is_some() { - result.push(Deprecated::Replaced("--ipcapi", "--ipc-apis")); - } + if args.arg_ipcapi.is_some() { + result.push(Deprecated::Replaced("--ipcapi", "--ipc-apis")); + } - if args.arg_ipcpath.is_some() { - result.push(Deprecated::Replaced("--ipcpath", "--ipc-path")); - } + if args.arg_ipcpath.is_some() { + result.push(Deprecated::Replaced("--ipcpath", "--ipc-path")); + } - if args.arg_gasprice.is_some() { - result.push(Deprecated::Replaced("--gasprice", "--min-gas-price")); - } + if args.arg_gasprice.is_some() { + result.push(Deprecated::Replaced("--gasprice", "--min-gas-price")); + } - if args.arg_cache.is_some() { - result.push(Deprecated::Replaced("--cache", "--cache-size")); - } + if args.arg_cache.is_some() { + result.push(Deprecated::Replaced("--cache", "--cache-size")); + } - // Removed in 1.7. + // Removed in 1.7. - if args.arg_dapps_port.is_some() { - result.push(Deprecated::Removed("--dapps-port")); - } + if args.arg_dapps_port.is_some() { + result.push(Deprecated::Removed("--dapps-port")); + } - if args.arg_dapps_interface.is_some() { - result.push(Deprecated::Removed("--dapps-interface")); - } + if args.arg_dapps_interface.is_some() { + result.push(Deprecated::Removed("--dapps-interface")); + } - if args.arg_dapps_hosts.is_some() { - result.push(Deprecated::Removed("--dapps-hosts")); - } + if args.arg_dapps_hosts.is_some() { + result.push(Deprecated::Removed("--dapps-hosts")); + } - if args.arg_dapps_cors.is_some() { - result.push(Deprecated::Removed("--dapps-cors")); - } + if args.arg_dapps_cors.is_some() { + result.push(Deprecated::Removed("--dapps-cors")); + } - if args.arg_dapps_user.is_some() { - result.push(Deprecated::Removed("--dapps-user")); - } + if args.arg_dapps_user.is_some() { + result.push(Deprecated::Removed("--dapps-user")); + } - if args.arg_dapps_pass.is_some() { - result.push(Deprecated::Removed("--dapps-pass")); - } + if args.arg_dapps_pass.is_some() { + result.push(Deprecated::Removed("--dapps-pass")); + } - if args.flag_dapps_apis_all { - result.push(Deprecated::Replaced("--dapps-apis-all", "--jsonrpc-apis")); - } + if args.flag_dapps_apis_all { + result.push(Deprecated::Replaced("--dapps-apis-all", "--jsonrpc-apis")); + } - // Removed in 1.11. + // Removed in 1.11. - if args.flag_public_node { - result.push(Deprecated::Removed("--public-node")); - } + if args.flag_public_node { + result.push(Deprecated::Removed("--public-node")); + } - if args.flag_force_ui { - result.push(Deprecated::Removed("--force-ui")); - } + if args.flag_force_ui { + result.push(Deprecated::Removed("--force-ui")); + } - if args.flag_no_ui { - result.push(Deprecated::Removed("--no-ui")); - } + if args.flag_no_ui { + result.push(Deprecated::Removed("--no-ui")); + } - if args.flag_ui_no_validation { - result.push(Deprecated::Removed("--ui-no-validation")); - } + if args.flag_ui_no_validation { + result.push(Deprecated::Removed("--ui-no-validation")); + } - if args.arg_ui_interface.is_some() { - result.push(Deprecated::Removed("--ui-interface")); - } + if args.arg_ui_interface.is_some() { + result.push(Deprecated::Removed("--ui-interface")); + } - if args.arg_ui_hosts.is_some() { - result.push(Deprecated::Removed("--ui-hosts")); - } + if args.arg_ui_hosts.is_some() { + result.push(Deprecated::Removed("--ui-hosts")); + } - if args.arg_ui_port.is_some() { - result.push(Deprecated::Removed("--ui-port")); - } + if args.arg_ui_port.is_some() { + result.push(Deprecated::Removed("--ui-port")); + } - if args.arg_tx_queue_ban_count.is_some() { - result.push(Deprecated::Removed("--tx-queue-ban-count")); - } + if args.arg_tx_queue_ban_count.is_some() { + result.push(Deprecated::Removed("--tx-queue-ban-count")); + } - if args.arg_tx_queue_ban_time.is_some() { - result.push(Deprecated::Removed("--tx-queue-ban-time")); - } + if args.arg_tx_queue_ban_time.is_some() { + result.push(Deprecated::Removed("--tx-queue-ban-time")); + } - // Removed in 2.0. + // Removed in 2.0. - if args.flag_fast_and_loose { - result.push(Deprecated::Removed("--fast-and-loose")); - } + if args.flag_fast_and_loose { + result.push(Deprecated::Removed("--fast-and-loose")); + } - if args.cmd_dapp { - result.push(Deprecated::Removed("parity dapp")); - } + if args.cmd_dapp { + result.push(Deprecated::Removed("parity dapp")); + } - if args.arg_dapp_path.is_some() { - result.push(Deprecated::Removed("--dapp-path")); - } + if args.arg_dapp_path.is_some() { + result.push(Deprecated::Removed("--dapp-path")); + } - if args.flag_no_dapps { - result.push(Deprecated::Removed("--no-dapps")); - } + if args.flag_no_dapps { + result.push(Deprecated::Removed("--no-dapps")); + } - if args.arg_dapps_path.is_some() { - result.push(Deprecated::Removed("--dapps-path")); - } + if args.arg_dapps_path.is_some() { + result.push(Deprecated::Removed("--dapps-path")); + } - if args.arg_ntp_servers.is_some() { - result.push(Deprecated::Removed("--ntp-servers")); - } + if args.arg_ntp_servers.is_some() { + result.push(Deprecated::Removed("--ntp-servers")); + } - result + result } #[cfg(test)] mod tests { - use cli::Args; - use super::{Deprecated, find_deprecated}; + use super::{find_deprecated, Deprecated}; + use cli::Args; - #[test] - fn test_find_deprecated() { - assert_eq!(find_deprecated(&Args::default()), vec![]); - assert_eq!(find_deprecated(&{ - let mut args = Args::default(); - args.flag_warp = true; - args.flag_jsonrpc = true; - args.flag_rpc = true; - args.flag_jsonrpc_off = true; - args.flag_webapp = true; - args.flag_dapps_off = true; - args.flag_ipcdisable = true; - args.flag_ipc_off = true; - args.arg_etherbase = Some(Default::default()); - args.arg_extradata = Some(Default::default()); - args.arg_dapps_port = Some(Default::default()); - args.arg_dapps_interface = Some(Default::default()); - args.arg_dapps_hosts = Some(Default::default()); - args.arg_dapps_cors = Some(Default::default()); - args.arg_dapps_user = Some(Default::default()); - args.arg_dapps_pass = Some(Default::default()); - args.flag_dapps_apis_all = true; - args.flag_fast_and_loose = true; - args.arg_ntp_servers = Some(Default::default()); - args - }), vec![ - Deprecated::DoesNothing("--warp"), - Deprecated::DoesNothing("--jsonrpc"), - Deprecated::DoesNothing("--rpc"), - Deprecated::Replaced("--jsonrpc-off", "--no-jsonrpc"), - Deprecated::DoesNothing("--webapp"), - Deprecated::Replaced("--dapps-off", "--no-dapps"), - Deprecated::Replaced("--ipcdisable", "--no-ipc"), - Deprecated::Replaced("--ipc-off", "--no-ipc"), - Deprecated::Replaced("--etherbase", "--author"), - Deprecated::Replaced("--extradata", "--extra-data"), - Deprecated::Removed("--dapps-port"), - Deprecated::Removed("--dapps-interface"), - Deprecated::Removed("--dapps-hosts"), - Deprecated::Removed("--dapps-cors"), - Deprecated::Removed("--dapps-user"), - Deprecated::Removed("--dapps-pass"), - Deprecated::Replaced("--dapps-apis-all", "--jsonrpc-apis"), - Deprecated::Removed("--fast-and-loose"), - Deprecated::Removed("--ntp-servers"), - ]); - } + #[test] + fn test_find_deprecated() { + assert_eq!(find_deprecated(&Args::default()), vec![]); + assert_eq!( + find_deprecated(&{ + let mut args = Args::default(); + args.flag_warp = true; + args.flag_jsonrpc = true; + args.flag_rpc = true; + args.flag_jsonrpc_off = true; + args.flag_webapp = true; + args.flag_dapps_off = true; + args.flag_ipcdisable = true; + args.flag_ipc_off = true; + args.arg_etherbase = Some(Default::default()); + args.arg_extradata = Some(Default::default()); + args.arg_dapps_port = Some(Default::default()); + args.arg_dapps_interface = Some(Default::default()); + args.arg_dapps_hosts = Some(Default::default()); + args.arg_dapps_cors = Some(Default::default()); + args.arg_dapps_user = Some(Default::default()); + args.arg_dapps_pass = Some(Default::default()); + args.flag_dapps_apis_all = true; + args.flag_fast_and_loose = true; + args.arg_ntp_servers = Some(Default::default()); + args + }), + vec![ + Deprecated::DoesNothing("--warp"), + Deprecated::DoesNothing("--jsonrpc"), + Deprecated::DoesNothing("--rpc"), + Deprecated::Replaced("--jsonrpc-off", "--no-jsonrpc"), + Deprecated::DoesNothing("--webapp"), + Deprecated::Replaced("--dapps-off", "--no-dapps"), + Deprecated::Replaced("--ipcdisable", "--no-ipc"), + Deprecated::Replaced("--ipc-off", "--no-ipc"), + Deprecated::Replaced("--etherbase", "--author"), + Deprecated::Replaced("--extradata", "--extra-data"), + Deprecated::Removed("--dapps-port"), + Deprecated::Removed("--dapps-interface"), + Deprecated::Removed("--dapps-hosts"), + Deprecated::Removed("--dapps-cors"), + Deprecated::Removed("--dapps-user"), + Deprecated::Removed("--dapps-pass"), + Deprecated::Replaced("--dapps-apis-all", "--jsonrpc-apis"), + Deprecated::Removed("--fast-and-loose"), + Deprecated::Removed("--ntp-servers"), + ] + ); + } } diff --git a/parity/export_hardcoded_sync.rs b/parity/export_hardcoded_sync.rs index 0e527b341..4adb034d1 100644 --- a/parity/export_hardcoded_sync.rs +++ b/parity/export_hardcoded_sync.rs @@ -14,20 +14,20 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; -use ethcore::client::DatabaseCompactionProfile; -use ethcore::spec::{SpecParams, OptimizeFor}; -use light::client::fetch::Unavailable as UnavailableDataFetcher; -use light::Cache as LightDataCache; +use ethcore::{ + client::DatabaseCompactionProfile, + spec::{OptimizeFor, SpecParams}, +}; +use light::{client::fetch::Unavailable as UnavailableDataFetcher, Cache as LightDataCache}; -use params::{SpecType, Pruning}; -use helpers::execute_upgrades; -use dir::Directories; use cache::CacheConfig; -use user_defaults::UserDefaults; use db; +use dir::Directories; +use helpers::execute_upgrades; +use params::{Pruning, SpecType}; +use user_defaults::UserDefaults; // Number of minutes before a given gas price corpus should expire. // Light client only. @@ -35,69 +35,87 @@ const GAS_CORPUS_EXPIRATION_MINUTES: u64 = 60 * 6; #[derive(Debug, PartialEq)] pub struct ExportHsyncCmd { - pub cache_config: CacheConfig, - pub dirs: Directories, - pub spec: SpecType, - pub pruning: Pruning, - pub compaction: DatabaseCompactionProfile, + pub cache_config: CacheConfig, + pub dirs: Directories, + pub spec: SpecType, + pub pruning: Pruning, + pub compaction: DatabaseCompactionProfile, } pub fn execute(cmd: ExportHsyncCmd) -> Result { - use light::client as light_client; - use parking_lot::Mutex; + use light::client as light_client; + use parking_lot::Mutex; - // load spec - let spec = cmd.spec.spec(SpecParams::new(cmd.dirs.cache.as_ref(), OptimizeFor::Memory))?; + // load spec + let spec = cmd.spec.spec(SpecParams::new( + cmd.dirs.cache.as_ref(), + OptimizeFor::Memory, + ))?; - // load genesis hash - let genesis_hash = spec.genesis_header().hash(); + // load genesis hash + let genesis_hash = spec.genesis_header().hash(); - // database paths - let db_dirs = cmd.dirs.database(genesis_hash, cmd.spec.legacy_fork_name(), spec.data_dir.clone()); + // database paths + let db_dirs = cmd.dirs.database( + genesis_hash, + cmd.spec.legacy_fork_name(), + spec.data_dir.clone(), + ); - // user defaults path - let user_defaults_path = db_dirs.user_defaults_path(); + // user defaults path + let user_defaults_path = db_dirs.user_defaults_path(); - // load user defaults - let user_defaults = UserDefaults::load(&user_defaults_path)?; + // load user defaults + let user_defaults = UserDefaults::load(&user_defaults_path)?; - // select pruning algorithm - let algorithm = cmd.pruning.to_algorithm(&user_defaults); + // select pruning algorithm + let algorithm = cmd.pruning.to_algorithm(&user_defaults); - // execute upgrades - execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?; + // execute upgrades + execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?; - // create dirs used by parity - cmd.dirs.create_dirs(false, false)?; + // create dirs used by parity + cmd.dirs.create_dirs(false, false)?; - // TODO: configurable cache size. - let cache = LightDataCache::new(Default::default(), Duration::from_secs(60 * GAS_CORPUS_EXPIRATION_MINUTES)); - let cache = Arc::new(Mutex::new(cache)); + // TODO: configurable cache size. + let cache = LightDataCache::new( + Default::default(), + Duration::from_secs(60 * GAS_CORPUS_EXPIRATION_MINUTES), + ); + let cache = Arc::new(Mutex::new(cache)); - // start client and create transaction queue. - let mut config = light_client::Config { - queue: Default::default(), - chain_column: ::ethcore_db::COL_LIGHT_CHAIN, - verify_full: true, - check_seal: true, - no_hardcoded_sync: true, - }; + // start client and create transaction queue. + let mut config = light_client::Config { + queue: Default::default(), + chain_column: ::ethcore_db::COL_LIGHT_CHAIN, + verify_full: true, + check_seal: true, + no_hardcoded_sync: true, + }; - config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024; + config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024; - // initialize database. - let db = db::open_db(&db_dirs.client_path(algorithm).to_str().expect("DB path could not be converted to string."), - &cmd.cache_config, - &cmd.compaction).map_err(|e| format!("Failed to open database {:?}", e))?; + // initialize database. + let db = db::open_db( + &db_dirs + .client_path(algorithm) + .to_str() + .expect("DB path could not be converted to string."), + &cmd.cache_config, + &cmd.compaction, + ) + .map_err(|e| format!("Failed to open database {:?}", e))?; - let service = light_client::Service::start(config, &spec, UnavailableDataFetcher, db, cache) - .map_err(|e| format!("Error starting light client: {}", e))?; + let service = light_client::Service::start(config, &spec, UnavailableDataFetcher, db, cache) + .map_err(|e| format!("Error starting light client: {}", e))?; - let hs = service.client().read_hardcoded_sync() - .map_err(|e| format!("Error reading hardcoded sync: {}", e))?; - if let Some(hs) = hs { - Ok(::serde_json::to_string_pretty(&hs.to_json()).expect("generated JSON is always valid")) - } else { - Err("Error: cannot generate hardcoded sync because the database is empty.".into()) - } + let hs = service + .client() + .read_hardcoded_sync() + .map_err(|e| format!("Error reading hardcoded sync: {}", e))?; + if let Some(hs) = hs { + Ok(::serde_json::to_string_pretty(&hs.to_json()).expect("generated JSON is always valid")) + } else { + Err("Error: cannot generate hardcoded sync because the database is empty.".into()) + } } diff --git a/parity/helpers.rs b/parity/helpers.rs index 6ebfc991b..26363a9cd 100644 --- a/parity/helpers.rs +++ b/parity/helpers.rs @@ -14,311 +14,369 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::io; -use std::io::{Write, BufReader, BufRead}; -use std::time::Duration; -use std::fs::File; -use std::collections::HashSet; -use ethereum_types::{U256, clean_0x, Address}; -use journaldb::Algorithm; -use ethcore::client::{Mode, BlockId, VMType, DatabaseCompactionProfile, ClientConfig, VerifierType}; -use ethcore::miner::{PendingSet, Penalization}; -use miner::pool::PrioritizationStrategy; use cache::CacheConfig; -use dir::DatabaseDirectories; -use dir::helpers::replace_home; -use upgrade::{upgrade, upgrade_data_paths}; -use sync::{validate_node_url, self}; use db::migrate; -use path; +use dir::{helpers::replace_home, DatabaseDirectories}; +use ethcore::{ + client::{BlockId, ClientConfig, DatabaseCompactionProfile, Mode, VMType, VerifierType}, + miner::{Penalization, PendingSet}, +}; +use ethereum_types::{clean_0x, Address, U256}; use ethkey::Password; +use journaldb::Algorithm; +use miner::pool::PrioritizationStrategy; +use path; +use std::{ + collections::HashSet, + fs::File, + io, + io::{BufRead, BufReader, Write}, + time::Duration, +}; +use sync::{self, validate_node_url}; +use upgrade::{upgrade, upgrade_data_paths}; pub fn to_duration(s: &str) -> Result { - to_seconds(s).map(Duration::from_secs) + to_seconds(s).map(Duration::from_secs) } fn to_seconds(s: &str) -> Result { - let bad = |_| { - format!("{}: Invalid duration given. See parity --help for more information.", s) - }; + let bad = |_| { + format!( + "{}: Invalid duration given. See parity --help for more information.", + s + ) + }; - match s { - "twice-daily" => Ok(12 * 60 * 60), - "half-hourly" => Ok(30 * 60), - "1second" | "1 second" | "second" => Ok(1), - "1minute" | "1 minute" | "minute" => Ok(60), - "hourly" | "1hour" | "1 hour" | "hour" => Ok(60 * 60), - "daily" | "1day" | "1 day" | "day" => Ok(24 * 60 * 60), - x if x.ends_with("seconds") => x[0..x.len() - 7].trim().parse().map_err(bad), - x if x.ends_with("minutes") => x[0..x.len() - 7].trim().parse::().map_err(bad).map(|x| x * 60), - x if x.ends_with("hours") => x[0..x.len() - 5].trim().parse::().map_err(bad).map(|x| x * 60 * 60), - x if x.ends_with("days") => x[0..x.len() - 4].trim().parse::().map_err(bad).map(|x| x * 24 * 60 * 60), - x => x.trim().parse().map_err(bad), - } + match s { + "twice-daily" => Ok(12 * 60 * 60), + "half-hourly" => Ok(30 * 60), + "1second" | "1 second" | "second" => Ok(1), + "1minute" | "1 minute" | "minute" => Ok(60), + "hourly" | "1hour" | "1 hour" | "hour" => Ok(60 * 60), + "daily" | "1day" | "1 day" | "day" => Ok(24 * 60 * 60), + x if x.ends_with("seconds") => x[0..x.len() - 7].trim().parse().map_err(bad), + x if x.ends_with("minutes") => x[0..x.len() - 7] + .trim() + .parse::() + .map_err(bad) + .map(|x| x * 60), + x if x.ends_with("hours") => x[0..x.len() - 5] + .trim() + .parse::() + .map_err(bad) + .map(|x| x * 60 * 60), + x if x.ends_with("days") => x[0..x.len() - 4] + .trim() + .parse::() + .map_err(bad) + .map(|x| x * 24 * 60 * 60), + x => x.trim().parse().map_err(bad), + } } pub fn to_mode(s: &str, timeout: u64, alarm: u64) -> Result { - match s { - "active" => Ok(Mode::Active), - "passive" => Ok(Mode::Passive(Duration::from_secs(timeout), Duration::from_secs(alarm))), - "dark" => Ok(Mode::Dark(Duration::from_secs(timeout))), - "offline" => Ok(Mode::Off), - _ => Err(format!("{}: Invalid value for --mode. Must be one of active, passive, dark or offline.", s)), - } + match s { + "active" => Ok(Mode::Active), + "passive" => Ok(Mode::Passive( + Duration::from_secs(timeout), + Duration::from_secs(alarm), + )), + "dark" => Ok(Mode::Dark(Duration::from_secs(timeout))), + "offline" => Ok(Mode::Off), + _ => Err(format!( + "{}: Invalid value for --mode. Must be one of active, passive, dark or offline.", + s + )), + } } pub fn to_block_id(s: &str) -> Result { - if s == "latest" { - Ok(BlockId::Latest) - } else if let Ok(num) = s.parse() { - Ok(BlockId::Number(num)) - } else if let Ok(hash) = s.parse() { - Ok(BlockId::Hash(hash)) - } else { - Err("Invalid block.".into()) - } + if s == "latest" { + Ok(BlockId::Latest) + } else if let Ok(num) = s.parse() { + Ok(BlockId::Number(num)) + } else if let Ok(hash) = s.parse() { + Ok(BlockId::Hash(hash)) + } else { + Err("Invalid block.".into()) + } } pub fn to_u256(s: &str) -> Result { - if let Ok(decimal) = U256::from_dec_str(s) { - Ok(decimal) - } else if let Ok(hex) = clean_0x(s).parse() { - Ok(hex) - } else { - Err(format!("Invalid numeric value: {}", s)) - } + if let Ok(decimal) = U256::from_dec_str(s) { + Ok(decimal) + } else if let Ok(hex) = clean_0x(s).parse() { + Ok(hex) + } else { + Err(format!("Invalid numeric value: {}", s)) + } } pub fn to_pending_set(s: &str) -> Result { - match s { - "cheap" => Ok(PendingSet::AlwaysQueue), - "strict" => Ok(PendingSet::AlwaysSealing), - "lenient" => Ok(PendingSet::SealingOrElseQueue), - other => Err(format!("Invalid pending set value: {:?}", other)), - } + match s { + "cheap" => Ok(PendingSet::AlwaysQueue), + "strict" => Ok(PendingSet::AlwaysSealing), + "lenient" => Ok(PendingSet::SealingOrElseQueue), + other => Err(format!("Invalid pending set value: {:?}", other)), + } } pub fn to_queue_strategy(s: &str) -> Result { - match s { - "gas_price" => Ok(PrioritizationStrategy::GasPriceOnly), - other => Err(format!("Invalid queue strategy: {}", other)), - } + match s { + "gas_price" => Ok(PrioritizationStrategy::GasPriceOnly), + other => Err(format!("Invalid queue strategy: {}", other)), + } } pub fn to_queue_penalization(time: Option) -> Result { - Ok(match time { - Some(threshold_ms) => Penalization::Enabled { - offend_threshold: Duration::from_millis(threshold_ms), - }, - None => Penalization::Disabled, - }) + Ok(match time { + Some(threshold_ms) => Penalization::Enabled { + offend_threshold: Duration::from_millis(threshold_ms), + }, + None => Penalization::Disabled, + }) } pub fn to_address(s: Option) -> Result { - match s { - Some(ref a) => clean_0x(a).parse().map_err(|_| format!("Invalid address: {:?}", a)), - None => Ok(Address::default()) - } + match s { + Some(ref a) => clean_0x(a) + .parse() + .map_err(|_| format!("Invalid address: {:?}", a)), + None => Ok(Address::default()), + } } pub fn to_addresses(s: &Option) -> Result, String> { - match *s { - Some(ref adds) if !adds.is_empty() => adds.split(',') - .map(|a| clean_0x(a).parse().map_err(|_| format!("Invalid address: {:?}", a))) - .collect(), - _ => Ok(Vec::new()), - } + match *s { + Some(ref adds) if !adds.is_empty() => adds + .split(',') + .map(|a| { + clean_0x(a) + .parse() + .map_err(|_| format!("Invalid address: {:?}", a)) + }) + .collect(), + _ => Ok(Vec::new()), + } } /// Tries to parse string as a price. pub fn to_price(s: &str) -> Result { - s.parse::().map_err(|_| format!("Invalid transaction price {:?} given. Must be a decimal number.", s)) + s.parse::().map_err(|_| { + format!( + "Invalid transaction price {:?} given. Must be a decimal number.", + s + ) + }) } pub fn join_set(set: Option<&HashSet>) -> Option { - match set { - Some(s) => Some(s.iter().map(|s| s.as_str()).collect::>().join(",")), - None => None - } + match set { + Some(s) => Some( + s.iter() + .map(|s| s.as_str()) + .collect::>() + .join(","), + ), + None => None, + } } /// Flush output buffer. pub fn flush_stdout() { - io::stdout().flush().expect("stdout is flushable; qed"); + io::stdout().flush().expect("stdout is flushable; qed"); } /// Returns default geth ipc path. pub fn geth_ipc_path(testnet: bool) -> String { - // Windows path should not be hardcoded here. - // Instead it should be a part of path::ethereum - if cfg!(windows) { - return r"\\.\pipe\geth.ipc".to_owned(); - } + // Windows path should not be hardcoded here. + // Instead it should be a part of path::ethereum + if cfg!(windows) { + return r"\\.\pipe\geth.ipc".to_owned(); + } - if testnet { - path::ethereum::with_testnet("geth.ipc").to_str().unwrap().to_owned() - } else { - path::ethereum::with_default("geth.ipc").to_str().unwrap().to_owned() - } + if testnet { + path::ethereum::with_testnet("geth.ipc") + .to_str() + .unwrap() + .to_owned() + } else { + path::ethereum::with_default("geth.ipc") + .to_str() + .unwrap() + .to_owned() + } } /// Formats and returns parity ipc path. pub fn parity_ipc_path(base: &str, path: &str, shift: u16) -> String { - let mut path = path.to_owned(); - if shift != 0 { - path = path.replace("jsonrpc.ipc", &format!("jsonrpc-{}.ipc", shift)); - } - replace_home(base, &path) + let mut path = path.to_owned(); + if shift != 0 { + path = path.replace("jsonrpc.ipc", &format!("jsonrpc-{}.ipc", shift)); + } + replace_home(base, &path) } /// Validates and formats bootnodes option. pub fn to_bootnodes(bootnodes: &Option) -> Result, String> { - match *bootnodes { - Some(ref x) if !x.is_empty() => x.split(',').map(|s| { - match validate_node_url(s).map(Into::into) { - None => Ok(s.to_owned()), - Some(sync::ErrorKind::AddressResolve(_)) => Err(format!("Failed to resolve hostname of a boot node: {}", s)), - Some(_) => Err(format!("Invalid node address format given for a boot node: {}", s)), - } - }).collect(), - Some(_) => Ok(vec![]), - None => Ok(vec![]) - } + match *bootnodes { + Some(ref x) if !x.is_empty() => x + .split(',') + .map(|s| match validate_node_url(s).map(Into::into) { + None => Ok(s.to_owned()), + Some(sync::ErrorKind::AddressResolve(_)) => { + Err(format!("Failed to resolve hostname of a boot node: {}", s)) + } + Some(_) => Err(format!( + "Invalid node address format given for a boot node: {}", + s + )), + }) + .collect(), + Some(_) => Ok(vec![]), + None => Ok(vec![]), + } } #[cfg(test)] pub fn default_network_config() -> ::sync::NetworkConfiguration { - use sync::{NetworkConfiguration}; - use super::network::IpFilter; - NetworkConfiguration { - config_path: Some(replace_home(&::dir::default_data_path(), "$BASE/network")), - net_config_path: None, - listen_address: Some("0.0.0.0:30303".into()), - public_address: None, - udp_port: None, - nat_enabled: true, - discovery_enabled: true, - boot_nodes: Vec::new(), - use_secret: None, - max_peers: 50, - min_peers: 25, - snapshot_peers: 0, - max_pending_peers: 64, - ip_filter: IpFilter::default(), - reserved_nodes: Vec::new(), - allow_non_reserved: true, - client_version: ::parity_version::version(), - } + use super::network::IpFilter; + use sync::NetworkConfiguration; + NetworkConfiguration { + config_path: Some(replace_home(&::dir::default_data_path(), "$BASE/network")), + net_config_path: None, + listen_address: Some("0.0.0.0:30303".into()), + public_address: None, + udp_port: None, + nat_enabled: true, + discovery_enabled: true, + boot_nodes: Vec::new(), + use_secret: None, + max_peers: 50, + min_peers: 25, + snapshot_peers: 0, + max_pending_peers: 64, + ip_filter: IpFilter::default(), + reserved_nodes: Vec::new(), + allow_non_reserved: true, + client_version: ::parity_version::version(), + } } pub fn to_client_config( - cache_config: &CacheConfig, - spec_name: String, - mode: Mode, - tracing: bool, - fat_db: bool, - compaction: DatabaseCompactionProfile, - vm_type: VMType, - name: String, - pruning: Algorithm, - pruning_history: u64, - pruning_memory: usize, - check_seal: bool, - max_round_blocks_to_import: usize, + cache_config: &CacheConfig, + spec_name: String, + mode: Mode, + tracing: bool, + fat_db: bool, + compaction: DatabaseCompactionProfile, + vm_type: VMType, + name: String, + pruning: Algorithm, + pruning_history: u64, + pruning_memory: usize, + check_seal: bool, + max_round_blocks_to_import: usize, ) -> ClientConfig { - let mut client_config = ClientConfig::default(); + let mut client_config = ClientConfig::default(); - let mb = 1024 * 1024; - // in bytes - client_config.blockchain.max_cache_size = cache_config.blockchain() as usize * mb; - // in bytes - client_config.blockchain.pref_cache_size = cache_config.blockchain() as usize * 3 / 4 * mb; - // db cache size, in megabytes - client_config.db_cache_size = Some(cache_config.db_cache_size() as usize); - // db queue cache size, in bytes - client_config.queue.max_mem_use = cache_config.queue() as usize * mb; - // in bytes - client_config.tracing.max_cache_size = cache_config.traces() as usize * mb; - // in bytes - client_config.tracing.pref_cache_size = cache_config.traces() as usize * 3 / 4 * mb; - // in bytes - client_config.state_cache_size = cache_config.state() as usize * mb; - // in bytes - client_config.jump_table_size = cache_config.jump_tables() as usize * mb; - // in bytes - client_config.history_mem = pruning_memory * mb; + let mb = 1024 * 1024; + // in bytes + client_config.blockchain.max_cache_size = cache_config.blockchain() as usize * mb; + // in bytes + client_config.blockchain.pref_cache_size = cache_config.blockchain() as usize * 3 / 4 * mb; + // db cache size, in megabytes + client_config.db_cache_size = Some(cache_config.db_cache_size() as usize); + // db queue cache size, in bytes + client_config.queue.max_mem_use = cache_config.queue() as usize * mb; + // in bytes + client_config.tracing.max_cache_size = cache_config.traces() as usize * mb; + // in bytes + client_config.tracing.pref_cache_size = cache_config.traces() as usize * 3 / 4 * mb; + // in bytes + client_config.state_cache_size = cache_config.state() as usize * mb; + // in bytes + client_config.jump_table_size = cache_config.jump_tables() as usize * mb; + // in bytes + client_config.history_mem = pruning_memory * mb; - client_config.mode = mode; - client_config.tracing.enabled = tracing; - client_config.fat_db = fat_db; - client_config.pruning = pruning; - client_config.history = pruning_history; - client_config.db_compaction = compaction; - client_config.vm_type = vm_type; - client_config.name = name; - client_config.verifier_type = if check_seal { VerifierType::Canon } else { VerifierType::CanonNoSeal }; - client_config.spec_name = spec_name; - client_config.max_round_blocks_to_import = max_round_blocks_to_import; - client_config + client_config.mode = mode; + client_config.tracing.enabled = tracing; + client_config.fat_db = fat_db; + client_config.pruning = pruning; + client_config.history = pruning_history; + client_config.db_compaction = compaction; + client_config.vm_type = vm_type; + client_config.name = name; + client_config.verifier_type = if check_seal { + VerifierType::Canon + } else { + VerifierType::CanonNoSeal + }; + client_config.spec_name = spec_name; + client_config.max_round_blocks_to_import = max_round_blocks_to_import; + client_config } pub fn execute_upgrades( - base_path: &str, - dirs: &DatabaseDirectories, - pruning: Algorithm, - compaction_profile: &DatabaseCompactionProfile + base_path: &str, + dirs: &DatabaseDirectories, + pruning: Algorithm, + compaction_profile: &DatabaseCompactionProfile, ) -> Result<(), String> { + upgrade_data_paths(base_path, dirs, pruning); - upgrade_data_paths(base_path, dirs, pruning); + match upgrade(&dirs.path) { + Ok(upgrades_applied) if upgrades_applied > 0 => { + debug!("Executed {} upgrade scripts - ok", upgrades_applied); + } + Err(e) => { + return Err(format!("Error upgrading parity data: {:?}", e)); + } + _ => {} + } - match upgrade(&dirs.path) { - Ok(upgrades_applied) if upgrades_applied > 0 => { - debug!("Executed {} upgrade scripts - ok", upgrades_applied); - }, - Err(e) => { - return Err(format!("Error upgrading parity data: {:?}", e)); - }, - _ => {}, - } - - let client_path = dirs.db_path(pruning); - migrate(&client_path, compaction_profile).map_err(|e| format!("{}", e)) + let client_path = dirs.db_path(pruning); + migrate(&client_path, compaction_profile).map_err(|e| format!("{}", e)) } /// Prompts user asking for password. pub fn password_prompt() -> Result { - use rpassword::read_password; - const STDIN_ERROR: &'static str = "Unable to ask for password on non-interactive terminal."; + use rpassword::read_password; + const STDIN_ERROR: &'static str = "Unable to ask for password on non-interactive terminal."; - println!("Please note that password is NOT RECOVERABLE."); - print!("Type password: "); - flush_stdout(); + println!("Please note that password is NOT RECOVERABLE."); + print!("Type password: "); + flush_stdout(); - let password = read_password().map_err(|_| STDIN_ERROR.to_owned())?.into(); + let password = read_password().map_err(|_| STDIN_ERROR.to_owned())?.into(); - print!("Repeat password: "); - flush_stdout(); + print!("Repeat password: "); + flush_stdout(); - let password_repeat = read_password().map_err(|_| STDIN_ERROR.to_owned())?.into(); + let password_repeat = read_password().map_err(|_| STDIN_ERROR.to_owned())?.into(); - if password != password_repeat { - return Err("Passwords do not match!".into()); - } + if password != password_repeat { + return Err("Passwords do not match!".into()); + } - Ok(password) + Ok(password) } /// Read a password from password file. pub fn password_from_file(path: String) -> Result { - let passwords = passwords_from_files(&[path])?; - // use only first password from the file - passwords.get(0).map(Password::clone) - .ok_or_else(|| "Password file seems to be empty.".to_owned()) + let passwords = passwords_from_files(&[path])?; + // use only first password from the file + passwords + .get(0) + .map(Password::clone) + .ok_or_else(|| "Password file seems to be empty.".to_owned()) } /// Reads passwords from files. Treats each line as a separate password. pub fn passwords_from_files(files: &[String]) -> Result, String> { - let passwords = files.iter().map(|filename| { + let passwords = files.iter().map(|filename| { let file = File::open(filename).map_err(|_| format!("{} Unable to read password file. Ensure it exists and permissions are correct.", filename))?; let reader = BufReader::new(&file); let lines = reader.lines() @@ -327,174 +385,258 @@ pub fn passwords_from_files(files: &[String]) -> Result, String> { .collect::>(); Ok(lines) }).collect::>, String>>(); - Ok(passwords?.into_iter().flat_map(|x| x).collect()) + Ok(passwords?.into_iter().flat_map(|x| x).collect()) } #[cfg(test)] mod tests { - use std::time::Duration; - use std::fs::File; - use std::io::Write; - use std::collections::HashSet; - use tempdir::TempDir; - use ethereum_types::U256; - use ethcore::client::{Mode, BlockId}; - use ethcore::miner::PendingSet; - use ethkey::Password; - use super::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_address, to_addresses, to_price, geth_ipc_path, to_bootnodes, join_set, password_from_file}; + use super::{ + geth_ipc_path, join_set, password_from_file, to_address, to_addresses, to_block_id, + to_bootnodes, to_duration, to_mode, to_pending_set, to_price, to_u256, + }; + use ethcore::{ + client::{BlockId, Mode}, + miner::PendingSet, + }; + use ethereum_types::U256; + use ethkey::Password; + use std::{collections::HashSet, fs::File, io::Write, time::Duration}; + use tempdir::TempDir; - #[test] - fn test_to_duration() { - assert_eq!(to_duration("twice-daily").unwrap(), Duration::from_secs(12 * 60 * 60)); - assert_eq!(to_duration("half-hourly").unwrap(), Duration::from_secs(30 * 60)); - assert_eq!(to_duration("1second").unwrap(), Duration::from_secs(1)); - assert_eq!(to_duration("2seconds").unwrap(), Duration::from_secs(2)); - assert_eq!(to_duration("15seconds").unwrap(), Duration::from_secs(15)); - assert_eq!(to_duration("1minute").unwrap(), Duration::from_secs(1 * 60)); - assert_eq!(to_duration("2minutes").unwrap(), Duration::from_secs(2 * 60)); - assert_eq!(to_duration("15minutes").unwrap(), Duration::from_secs(15 * 60)); - assert_eq!(to_duration("hourly").unwrap(), Duration::from_secs(60 * 60)); - assert_eq!(to_duration("daily").unwrap(), Duration::from_secs(24 * 60 * 60)); - assert_eq!(to_duration("1hour").unwrap(), Duration::from_secs(1 * 60 * 60)); - assert_eq!(to_duration("2hours").unwrap(), Duration::from_secs(2 * 60 * 60)); - assert_eq!(to_duration("15hours").unwrap(), Duration::from_secs(15 * 60 * 60)); - assert_eq!(to_duration("1day").unwrap(), Duration::from_secs(1 * 24 * 60 * 60)); - assert_eq!(to_duration("2days").unwrap(), Duration::from_secs(2 * 24 *60 * 60)); - assert_eq!(to_duration("15days").unwrap(), Duration::from_secs(15 * 24 * 60 * 60)); - assert_eq!(to_duration("15 days").unwrap(), Duration::from_secs(15 * 24 * 60 * 60)); - assert_eq!(to_duration("2 seconds").unwrap(), Duration::from_secs(2)); - } + #[test] + fn test_to_duration() { + assert_eq!( + to_duration("twice-daily").unwrap(), + Duration::from_secs(12 * 60 * 60) + ); + assert_eq!( + to_duration("half-hourly").unwrap(), + Duration::from_secs(30 * 60) + ); + assert_eq!(to_duration("1second").unwrap(), Duration::from_secs(1)); + assert_eq!(to_duration("2seconds").unwrap(), Duration::from_secs(2)); + assert_eq!(to_duration("15seconds").unwrap(), Duration::from_secs(15)); + assert_eq!(to_duration("1minute").unwrap(), Duration::from_secs(1 * 60)); + assert_eq!( + to_duration("2minutes").unwrap(), + Duration::from_secs(2 * 60) + ); + assert_eq!( + to_duration("15minutes").unwrap(), + Duration::from_secs(15 * 60) + ); + assert_eq!(to_duration("hourly").unwrap(), Duration::from_secs(60 * 60)); + assert_eq!( + to_duration("daily").unwrap(), + Duration::from_secs(24 * 60 * 60) + ); + assert_eq!( + to_duration("1hour").unwrap(), + Duration::from_secs(1 * 60 * 60) + ); + assert_eq!( + to_duration("2hours").unwrap(), + Duration::from_secs(2 * 60 * 60) + ); + assert_eq!( + to_duration("15hours").unwrap(), + Duration::from_secs(15 * 60 * 60) + ); + assert_eq!( + to_duration("1day").unwrap(), + Duration::from_secs(1 * 24 * 60 * 60) + ); + assert_eq!( + to_duration("2days").unwrap(), + Duration::from_secs(2 * 24 * 60 * 60) + ); + assert_eq!( + to_duration("15days").unwrap(), + Duration::from_secs(15 * 24 * 60 * 60) + ); + assert_eq!( + to_duration("15 days").unwrap(), + Duration::from_secs(15 * 24 * 60 * 60) + ); + assert_eq!(to_duration("2 seconds").unwrap(), Duration::from_secs(2)); + } - #[test] - fn test_to_mode() { - assert_eq!(to_mode("active", 0, 0).unwrap(), Mode::Active); - assert_eq!(to_mode("passive", 10, 20).unwrap(), Mode::Passive(Duration::from_secs(10), Duration::from_secs(20))); - assert_eq!(to_mode("dark", 20, 30).unwrap(), Mode::Dark(Duration::from_secs(20))); - assert!(to_mode("other", 20, 30).is_err()); - } + #[test] + fn test_to_mode() { + assert_eq!(to_mode("active", 0, 0).unwrap(), Mode::Active); + assert_eq!( + to_mode("passive", 10, 20).unwrap(), + Mode::Passive(Duration::from_secs(10), Duration::from_secs(20)) + ); + assert_eq!( + to_mode("dark", 20, 30).unwrap(), + Mode::Dark(Duration::from_secs(20)) + ); + assert!(to_mode("other", 20, 30).is_err()); + } - #[test] - fn test_to_block_id() { - assert_eq!(to_block_id("latest").unwrap(), BlockId::Latest); - assert_eq!(to_block_id("0").unwrap(), BlockId::Number(0)); - assert_eq!(to_block_id("2").unwrap(), BlockId::Number(2)); - assert_eq!(to_block_id("15").unwrap(), BlockId::Number(15)); - assert_eq!( - to_block_id("9fc84d84f6a785dc1bd5abacfcf9cbdd3b6afb80c0f799bfb2fd42c44a0c224e").unwrap(), - BlockId::Hash("9fc84d84f6a785dc1bd5abacfcf9cbdd3b6afb80c0f799bfb2fd42c44a0c224e".parse().unwrap()) - ); - } + #[test] + fn test_to_block_id() { + assert_eq!(to_block_id("latest").unwrap(), BlockId::Latest); + assert_eq!(to_block_id("0").unwrap(), BlockId::Number(0)); + assert_eq!(to_block_id("2").unwrap(), BlockId::Number(2)); + assert_eq!(to_block_id("15").unwrap(), BlockId::Number(15)); + assert_eq!( + to_block_id("9fc84d84f6a785dc1bd5abacfcf9cbdd3b6afb80c0f799bfb2fd42c44a0c224e") + .unwrap(), + BlockId::Hash( + "9fc84d84f6a785dc1bd5abacfcf9cbdd3b6afb80c0f799bfb2fd42c44a0c224e" + .parse() + .unwrap() + ) + ); + } - #[test] - fn test_to_u256() { - assert_eq!(to_u256("0").unwrap(), U256::from(0)); - assert_eq!(to_u256("11").unwrap(), U256::from(11)); - assert_eq!(to_u256("0x11").unwrap(), U256::from(17)); - assert!(to_u256("u").is_err()) - } + #[test] + fn test_to_u256() { + assert_eq!(to_u256("0").unwrap(), U256::from(0)); + assert_eq!(to_u256("11").unwrap(), U256::from(11)); + assert_eq!(to_u256("0x11").unwrap(), U256::from(17)); + assert!(to_u256("u").is_err()) + } - #[test] - fn test_pending_set() { - assert_eq!(to_pending_set("cheap").unwrap(), PendingSet::AlwaysQueue); - assert_eq!(to_pending_set("strict").unwrap(), PendingSet::AlwaysSealing); - assert_eq!(to_pending_set("lenient").unwrap(), PendingSet::SealingOrElseQueue); - assert!(to_pending_set("othe").is_err()); - } + #[test] + fn test_pending_set() { + assert_eq!(to_pending_set("cheap").unwrap(), PendingSet::AlwaysQueue); + assert_eq!(to_pending_set("strict").unwrap(), PendingSet::AlwaysSealing); + assert_eq!( + to_pending_set("lenient").unwrap(), + PendingSet::SealingOrElseQueue + ); + assert!(to_pending_set("othe").is_err()); + } - #[test] - fn test_to_address() { - assert_eq!( - to_address(Some("0xD9A111feda3f362f55Ef1744347CDC8Dd9964a41".into())).unwrap(), - "D9A111feda3f362f55Ef1744347CDC8Dd9964a41".parse().unwrap() - ); - assert_eq!( - to_address(Some("D9A111feda3f362f55Ef1744347CDC8Dd9964a41".into())).unwrap(), - "D9A111feda3f362f55Ef1744347CDC8Dd9964a41".parse().unwrap() - ); - assert_eq!(to_address(None).unwrap(), Default::default()); - } + #[test] + fn test_to_address() { + assert_eq!( + to_address(Some("0xD9A111feda3f362f55Ef1744347CDC8Dd9964a41".into())).unwrap(), + "D9A111feda3f362f55Ef1744347CDC8Dd9964a41".parse().unwrap() + ); + assert_eq!( + to_address(Some("D9A111feda3f362f55Ef1744347CDC8Dd9964a41".into())).unwrap(), + "D9A111feda3f362f55Ef1744347CDC8Dd9964a41".parse().unwrap() + ); + assert_eq!(to_address(None).unwrap(), Default::default()); + } - #[test] - fn test_to_addresses() { - let addresses = to_addresses(&Some("0xD9A111feda3f362f55Ef1744347CDC8Dd9964a41,D9A111feda3f362f55Ef1744347CDC8Dd9964a42".into())).unwrap(); - assert_eq!( - addresses, - vec![ - "D9A111feda3f362f55Ef1744347CDC8Dd9964a41".parse().unwrap(), - "D9A111feda3f362f55Ef1744347CDC8Dd9964a42".parse().unwrap(), - ] - ); - } + #[test] + fn test_to_addresses() { + let addresses = to_addresses(&Some( + "0xD9A111feda3f362f55Ef1744347CDC8Dd9964a41,D9A111feda3f362f55Ef1744347CDC8Dd9964a42" + .into(), + )) + .unwrap(); + assert_eq!( + addresses, + vec![ + "D9A111feda3f362f55Ef1744347CDC8Dd9964a41".parse().unwrap(), + "D9A111feda3f362f55Ef1744347CDC8Dd9964a42".parse().unwrap(), + ] + ); + } - #[test] - fn test_password() { - let tempdir = TempDir::new("").unwrap(); - let path = tempdir.path().join("file"); - let mut file = File::create(&path).unwrap(); - file.write_all(b"a bc ").unwrap(); - assert_eq!(password_from_file(path.to_str().unwrap().into()).unwrap().as_bytes(), b"a bc"); - } + #[test] + fn test_password() { + let tempdir = TempDir::new("").unwrap(); + let path = tempdir.path().join("file"); + let mut file = File::create(&path).unwrap(); + file.write_all(b"a bc ").unwrap(); + assert_eq!( + password_from_file(path.to_str().unwrap().into()) + .unwrap() + .as_bytes(), + b"a bc" + ); + } - #[test] - fn test_password_multiline() { - let tempdir = TempDir::new("").unwrap(); - let path = tempdir.path().join("file"); - let mut file = File::create(path.as_path()).unwrap(); - file.write_all(br#" password with trailing whitespace + #[test] + fn test_password_multiline() { + let tempdir = TempDir::new("").unwrap(); + let path = tempdir.path().join("file"); + let mut file = File::create(path.as_path()).unwrap(); + file.write_all( + br#" password with trailing whitespace those passwords should be ignored but the first password is trimmed -"#).unwrap(); - assert_eq!(password_from_file(path.to_str().unwrap().into()).unwrap(), Password::from("password with trailing whitespace")); - } +"#, + ) + .unwrap(); + assert_eq!( + password_from_file(path.to_str().unwrap().into()).unwrap(), + Password::from("password with trailing whitespace") + ); + } - #[test] - fn test_to_price() { - assert_eq!(to_price("1").unwrap(), 1.0); - assert_eq!(to_price("2.3").unwrap(), 2.3); - assert_eq!(to_price("2.33").unwrap(), 2.33); - } + #[test] + fn test_to_price() { + assert_eq!(to_price("1").unwrap(), 1.0); + assert_eq!(to_price("2.3").unwrap(), 2.3); + assert_eq!(to_price("2.33").unwrap(), 2.33); + } - #[test] - #[cfg(windows)] - fn test_geth_ipc_path() { - assert_eq!(geth_ipc_path(true), r"\\.\pipe\geth.ipc".to_owned()); - assert_eq!(geth_ipc_path(false), r"\\.\pipe\geth.ipc".to_owned()); - } + #[test] + #[cfg(windows)] + fn test_geth_ipc_path() { + assert_eq!(geth_ipc_path(true), r"\\.\pipe\geth.ipc".to_owned()); + assert_eq!(geth_ipc_path(false), r"\\.\pipe\geth.ipc".to_owned()); + } - #[test] - #[cfg(not(windows))] - fn test_geth_ipc_path() { - use path; - assert_eq!(geth_ipc_path(true), path::ethereum::with_testnet("geth.ipc").to_str().unwrap().to_owned()); - assert_eq!(geth_ipc_path(false), path::ethereum::with_default("geth.ipc").to_str().unwrap().to_owned()); - } + #[test] + #[cfg(not(windows))] + fn test_geth_ipc_path() { + use path; + assert_eq!( + geth_ipc_path(true), + path::ethereum::with_testnet("geth.ipc") + .to_str() + .unwrap() + .to_owned() + ); + assert_eq!( + geth_ipc_path(false), + path::ethereum::with_default("geth.ipc") + .to_str() + .unwrap() + .to_owned() + ); + } - #[test] - fn test_to_bootnodes() { - let one_bootnode = "enode://e731347db0521f3476e6bbbb83375dcd7133a1601425ebd15fd10f3835fd4c304fba6282087ca5a0deeafadf0aa0d4fd56c3323331901c1f38bd181c283e3e35@128.199.55.137:30303"; - let two_bootnodes = "enode://e731347db0521f3476e6bbbb83375dcd7133a1601425ebd15fd10f3835fd4c304fba6282087ca5a0deeafadf0aa0d4fd56c3323331901c1f38bd181c283e3e35@128.199.55.137:30303,enode://e731347db0521f3476e6bbbb83375dcd7133a1601425ebd15fd10f3835fd4c304fba6282087ca5a0deeafadf0aa0d4fd56c3323331901c1f38bd181c283e3e35@128.199.55.137:30303"; + #[test] + fn test_to_bootnodes() { + let one_bootnode = "enode://e731347db0521f3476e6bbbb83375dcd7133a1601425ebd15fd10f3835fd4c304fba6282087ca5a0deeafadf0aa0d4fd56c3323331901c1f38bd181c283e3e35@128.199.55.137:30303"; + let two_bootnodes = "enode://e731347db0521f3476e6bbbb83375dcd7133a1601425ebd15fd10f3835fd4c304fba6282087ca5a0deeafadf0aa0d4fd56c3323331901c1f38bd181c283e3e35@128.199.55.137:30303,enode://e731347db0521f3476e6bbbb83375dcd7133a1601425ebd15fd10f3835fd4c304fba6282087ca5a0deeafadf0aa0d4fd56c3323331901c1f38bd181c283e3e35@128.199.55.137:30303"; - assert_eq!(to_bootnodes(&Some("".into())), Ok(vec![])); - assert_eq!(to_bootnodes(&None), Ok(vec![])); - assert_eq!(to_bootnodes(&Some(one_bootnode.into())), Ok(vec![one_bootnode.into()])); - assert_eq!(to_bootnodes(&Some(two_bootnodes.into())), Ok(vec![one_bootnode.into(), one_bootnode.into()])); - } + assert_eq!(to_bootnodes(&Some("".into())), Ok(vec![])); + assert_eq!(to_bootnodes(&None), Ok(vec![])); + assert_eq!( + to_bootnodes(&Some(one_bootnode.into())), + Ok(vec![one_bootnode.into()]) + ); + assert_eq!( + to_bootnodes(&Some(two_bootnodes.into())), + Ok(vec![one_bootnode.into(), one_bootnode.into()]) + ); + } - #[test] - fn test_join_set() { - let mut test_set = HashSet::new(); - test_set.insert("0x1111111111111111111111111111111111111111".to_string()); - test_set.insert("0x0000000000000000000000000000000000000000".to_string()); + #[test] + fn test_join_set() { + let mut test_set = HashSet::new(); + test_set.insert("0x1111111111111111111111111111111111111111".to_string()); + test_set.insert("0x0000000000000000000000000000000000000000".to_string()); + let res = join_set(Some(&test_set)).unwrap(); - let res = join_set(Some(&test_set)).unwrap(); - - assert!( + assert!( res == "0x1111111111111111111111111111111111111111,0x0000000000000000000000000000000000000000" || res == "0x0000000000000000000000000000000000000000,0x1111111111111111111111111111111111111111" ); - } + } } diff --git a/parity/informant.rs b/parity/informant.rs index 78d055686..7743c9d79 100644 --- a/parity/informant.rs +++ b/parity/informant.rs @@ -15,437 +15,481 @@ // along with Parity Ethereum. If not, see . extern crate ansi_term; -use self::ansi_term::Colour::{White, Yellow, Green, Cyan, Blue}; -use self::ansi_term::{Colour, Style}; +use self::ansi_term::{ + Colour, + Colour::{Blue, Cyan, Green, White, Yellow}, + Style, +}; -use std::sync::{Arc}; -use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering}; -use std::time::{Instant, Duration}; +use std::{ + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering as AtomicOrdering}, + Arc, + }, + time::{Duration, Instant}, +}; use atty; -use ethcore::client::{ - BlockId, BlockChainClient, ChainInfo, BlockInfo, BlockChainInfo, - BlockQueueInfo, ChainNotify, NewBlocks, ClientReport, Client, ClientIoMessage +use ethcore::{ + client::{ + BlockChainClient, BlockChainInfo, BlockId, BlockInfo, BlockQueueInfo, ChainInfo, + ChainNotify, Client, ClientIoMessage, ClientReport, NewBlocks, + }, + snapshot::{service::Service as SnapshotService, RestorationStatus, SnapshotService as SS}, }; -use types::BlockNumber; -use ethcore::snapshot::{RestorationStatus, SnapshotService as SS}; -use ethcore::snapshot::service::Service as SnapshotService; -use sync::{LightSyncProvider, LightSync, SyncProvider, ManageNetwork}; -use io::{TimerToken, IoContext, IoHandler}; -use light::Cache as LightDataCache; -use light::client::{LightChainClient, LightChainNotify}; -use number_prefix::{binary_prefix, Standalone, Prefixed}; -use parity_rpc::is_major_importing_or_waiting; -use parity_rpc::informant::RpcStats; use ethereum_types::H256; -use parking_lot::{RwLock, Mutex}; +use io::{IoContext, IoHandler, TimerToken}; +use light::{ + client::{LightChainClient, LightChainNotify}, + Cache as LightDataCache, +}; +use number_prefix::{binary_prefix, Prefixed, Standalone}; +use parity_rpc::{informant::RpcStats, is_major_importing_or_waiting}; +use parking_lot::{Mutex, RwLock}; +use sync::{LightSync, LightSyncProvider, ManageNetwork, SyncProvider}; +use types::BlockNumber; /// Format byte counts to standard denominations. pub fn format_bytes(b: usize) -> String { - match binary_prefix(b as f64) { - Standalone(bytes) => format!("{} bytes", bytes), - Prefixed(prefix, n) => format!("{:.0} {}B", n, prefix), - } + match binary_prefix(b as f64) { + Standalone(bytes) => format!("{} bytes", bytes), + Prefixed(prefix, n) => format!("{:.0} {}B", n, prefix), + } } /// Something that can be converted to milliseconds. pub trait MillisecondDuration { - /// Get the value in milliseconds. - fn as_milliseconds(&self) -> u64; + /// Get the value in milliseconds. + fn as_milliseconds(&self) -> u64; } impl MillisecondDuration for Duration { - fn as_milliseconds(&self) -> u64 { - self.as_secs() * 1000 + self.subsec_nanos() as u64 / 1_000_000 - } + fn as_milliseconds(&self) -> u64 { + self.as_secs() * 1000 + self.subsec_nanos() as u64 / 1_000_000 + } } #[derive(Default)] struct CacheSizes { - sizes: ::std::collections::BTreeMap<&'static str, usize>, + sizes: ::std::collections::BTreeMap<&'static str, usize>, } impl CacheSizes { - fn insert(&mut self, key: &'static str, bytes: usize) { - self.sizes.insert(key, bytes); - } + fn insert(&mut self, key: &'static str, bytes: usize) { + self.sizes.insert(key, bytes); + } - fn display(&self, style: Style, paint: F) -> String - where F: Fn(Style, String) -> String - { - use std::fmt::Write; + fn display(&self, style: Style, paint: F) -> String + where + F: Fn(Style, String) -> String, + { + use std::fmt::Write; - let mut buf = String::new(); - for (name, &size) in &self.sizes { + let mut buf = String::new(); + for (name, &size) in &self.sizes { + write!(buf, " {:>8} {}", paint(style, format_bytes(size)), name) + .expect("writing to string won't fail unless OOM; qed") + } - write!(buf, " {:>8} {}", paint(style, format_bytes(size)), name) - .expect("writing to string won't fail unless OOM; qed") - } - - buf - } + buf + } } pub struct SyncInfo { - last_imported_block_number: BlockNumber, - last_imported_old_block_number: Option, - num_peers: usize, - max_peers: u32, - snapshot_sync: bool, + last_imported_block_number: BlockNumber, + last_imported_old_block_number: Option, + num_peers: usize, + max_peers: u32, + snapshot_sync: bool, } pub struct Report { - importing: bool, - chain_info: BlockChainInfo, - client_report: ClientReport, - queue_info: BlockQueueInfo, - cache_sizes: CacheSizes, - sync_info: Option, + importing: bool, + chain_info: BlockChainInfo, + client_report: ClientReport, + queue_info: BlockQueueInfo, + cache_sizes: CacheSizes, + sync_info: Option, } /// Something which can provide data to the informant. pub trait InformantData: Send + Sync { - /// Whether it executes transactions - fn executes_transactions(&self) -> bool; + /// Whether it executes transactions + fn executes_transactions(&self) -> bool; - /// Whether it is currently importing (also included in `Report`) - fn is_major_importing(&self) -> bool; + /// Whether it is currently importing (also included in `Report`) + fn is_major_importing(&self) -> bool; - /// Generate a report of blockchain status, memory usage, and sync info. - fn report(&self) -> Report; + /// Generate a report of blockchain status, memory usage, and sync info. + fn report(&self) -> Report; } /// Informant data for a full node. pub struct FullNodeInformantData { - pub client: Arc, - pub sync: Option>, - pub net: Option>, + pub client: Arc, + pub sync: Option>, + pub net: Option>, } impl InformantData for FullNodeInformantData { - fn executes_transactions(&self) -> bool { true } + fn executes_transactions(&self) -> bool { + true + } - fn is_major_importing(&self) -> bool { - let state = self.sync.as_ref().map(|sync| sync.status().state); - is_major_importing_or_waiting(state, self.client.queue_info(), false) - } + fn is_major_importing(&self) -> bool { + let state = self.sync.as_ref().map(|sync| sync.status().state); + is_major_importing_or_waiting(state, self.client.queue_info(), false) + } - fn report(&self) -> Report { - let (client_report, queue_info, blockchain_cache_info) = - (self.client.report(), self.client.queue_info(), self.client.blockchain_cache_info()); + fn report(&self) -> Report { + let (client_report, queue_info, blockchain_cache_info) = ( + self.client.report(), + self.client.queue_info(), + self.client.blockchain_cache_info(), + ); - let chain_info = self.client.chain_info(); + let chain_info = self.client.chain_info(); - let mut cache_sizes = CacheSizes::default(); - cache_sizes.insert("db", client_report.state_db_mem); - cache_sizes.insert("queue", queue_info.mem_used); - cache_sizes.insert("chain", blockchain_cache_info.total()); + let mut cache_sizes = CacheSizes::default(); + cache_sizes.insert("db", client_report.state_db_mem); + cache_sizes.insert("queue", queue_info.mem_used); + cache_sizes.insert("chain", blockchain_cache_info.total()); - let importing = self.is_major_importing(); - let sync_info = match (self.sync.as_ref(), self.net.as_ref()) { - (Some(sync), Some(net)) => { - let status = sync.status(); - let num_peers_range = net.num_peers_range(); - debug_assert!(num_peers_range.end() >= num_peers_range.start()); + let importing = self.is_major_importing(); + let sync_info = match (self.sync.as_ref(), self.net.as_ref()) { + (Some(sync), Some(net)) => { + let status = sync.status(); + let num_peers_range = net.num_peers_range(); + debug_assert!(num_peers_range.end() >= num_peers_range.start()); - cache_sizes.insert("sync", status.mem_used); + cache_sizes.insert("sync", status.mem_used); - Some(SyncInfo { - last_imported_block_number: status.last_imported_block_number.unwrap_or(chain_info.best_block_number), - last_imported_old_block_number: status.last_imported_old_block_number, - num_peers: status.num_peers, - max_peers: status.current_max_peers(*num_peers_range.start(), *num_peers_range.end()), - snapshot_sync: status.is_snapshot_syncing(), - }) - } - _ => None - }; + Some(SyncInfo { + last_imported_block_number: status + .last_imported_block_number + .unwrap_or(chain_info.best_block_number), + last_imported_old_block_number: status.last_imported_old_block_number, + num_peers: status.num_peers, + max_peers: status + .current_max_peers(*num_peers_range.start(), *num_peers_range.end()), + snapshot_sync: status.is_snapshot_syncing(), + }) + } + _ => None, + }; - Report { - importing, - chain_info, - client_report, - queue_info, - cache_sizes, - sync_info, - } - } + Report { + importing, + chain_info, + client_report, + queue_info, + cache_sizes, + sync_info, + } + } } /// Informant data for a light node -- note that the network is required. pub struct LightNodeInformantData { - pub client: Arc, - pub sync: Arc, - pub cache: Arc>, + pub client: Arc, + pub sync: Arc, + pub cache: Arc>, } impl InformantData for LightNodeInformantData { - fn executes_transactions(&self) -> bool { false } + fn executes_transactions(&self) -> bool { + false + } - fn is_major_importing(&self) -> bool { - self.sync.is_major_importing() - } + fn is_major_importing(&self) -> bool { + self.sync.is_major_importing() + } - fn report(&self) -> Report { - let (client_report, queue_info, chain_info) = - (self.client.report(), self.client.queue_info(), self.client.chain_info()); + fn report(&self) -> Report { + let (client_report, queue_info, chain_info) = ( + self.client.report(), + self.client.queue_info(), + self.client.chain_info(), + ); - let mut cache_sizes = CacheSizes::default(); - cache_sizes.insert("queue", queue_info.mem_used); - cache_sizes.insert("cache", self.cache.lock().mem_used()); + let mut cache_sizes = CacheSizes::default(); + cache_sizes.insert("queue", queue_info.mem_used); + cache_sizes.insert("cache", self.cache.lock().mem_used()); - let peer_numbers = self.sync.peer_numbers(); - let sync_info = Some(SyncInfo { - last_imported_block_number: chain_info.best_block_number, - last_imported_old_block_number: None, - num_peers: peer_numbers.connected, - max_peers: peer_numbers.max as u32, - snapshot_sync: false, - }); + let peer_numbers = self.sync.peer_numbers(); + let sync_info = Some(SyncInfo { + last_imported_block_number: chain_info.best_block_number, + last_imported_old_block_number: None, + num_peers: peer_numbers.connected, + max_peers: peer_numbers.max as u32, + snapshot_sync: false, + }); - Report { - importing: self.sync.is_major_importing(), - chain_info, - client_report, - queue_info, - cache_sizes, - sync_info, - } - } + Report { + importing: self.sync.is_major_importing(), + chain_info, + client_report, + queue_info, + cache_sizes, + sync_info, + } + } } pub struct Informant { - last_tick: RwLock, - with_color: bool, - target: T, - snapshot: Option>, - rpc_stats: Option>, - last_import: Mutex, - skipped: AtomicUsize, - skipped_txs: AtomicUsize, - in_shutdown: AtomicBool, - last_report: Mutex, + last_tick: RwLock, + with_color: bool, + target: T, + snapshot: Option>, + rpc_stats: Option>, + last_import: Mutex, + skipped: AtomicUsize, + skipped_txs: AtomicUsize, + in_shutdown: AtomicBool, + last_report: Mutex, } impl Informant { - /// Make a new instance potentially `with_color` output. - pub fn new( - target: T, - snapshot: Option>, - rpc_stats: Option>, - with_color: bool, - ) -> Self { - Informant { - last_tick: RwLock::new(Instant::now()), - with_color: with_color, - target: target, - snapshot: snapshot, - rpc_stats: rpc_stats, - last_import: Mutex::new(Instant::now()), - skipped: AtomicUsize::new(0), - skipped_txs: AtomicUsize::new(0), - in_shutdown: AtomicBool::new(false), - last_report: Mutex::new(Default::default()), - } - } + /// Make a new instance potentially `with_color` output. + pub fn new( + target: T, + snapshot: Option>, + rpc_stats: Option>, + with_color: bool, + ) -> Self { + Informant { + last_tick: RwLock::new(Instant::now()), + with_color: with_color, + target: target, + snapshot: snapshot, + rpc_stats: rpc_stats, + last_import: Mutex::new(Instant::now()), + skipped: AtomicUsize::new(0), + skipped_txs: AtomicUsize::new(0), + in_shutdown: AtomicBool::new(false), + last_report: Mutex::new(Default::default()), + } + } - /// Signal that we're shutting down; no more output necessary. - pub fn shutdown(&self) { - self.in_shutdown.store(true, ::std::sync::atomic::Ordering::SeqCst); - } + /// Signal that we're shutting down; no more output necessary. + pub fn shutdown(&self) { + self.in_shutdown + .store(true, ::std::sync::atomic::Ordering::SeqCst); + } - pub fn tick(&self) { - let now = Instant::now(); - let elapsed = now.duration_since(*self.last_tick.read()); + pub fn tick(&self) { + let now = Instant::now(); + let elapsed = now.duration_since(*self.last_tick.read()); - let (client_report, full_report) = { - let mut last_report = self.last_report.lock(); - let full_report = self.target.report(); - let diffed = full_report.client_report.clone() - &*last_report; - (diffed, full_report) - }; + let (client_report, full_report) = { + let mut last_report = self.last_report.lock(); + let full_report = self.target.report(); + let diffed = full_report.client_report.clone() - &*last_report; + (diffed, full_report) + }; - let Report { - importing, - chain_info, - queue_info, - cache_sizes, - sync_info, - .. - } = full_report; + let Report { + importing, + chain_info, + queue_info, + cache_sizes, + sync_info, + .. + } = full_report; - let rpc_stats = self.rpc_stats.as_ref(); - let snapshot_sync = sync_info.as_ref().map_or(false, |s| s.snapshot_sync) && self.snapshot.as_ref().map_or(false, |s| - match s.status() { - RestorationStatus::Ongoing { .. } | RestorationStatus::Initializing { .. } => true, - _ => false, - } - ); - if !importing && !snapshot_sync && elapsed < Duration::from_secs(30) { - return; - } + let rpc_stats = self.rpc_stats.as_ref(); + let snapshot_sync = sync_info.as_ref().map_or(false, |s| s.snapshot_sync) + && self.snapshot.as_ref().map_or(false, |s| match s.status() { + RestorationStatus::Ongoing { .. } | RestorationStatus::Initializing { .. } => true, + _ => false, + }); + if !importing && !snapshot_sync && elapsed < Duration::from_secs(30) { + return; + } - *self.last_tick.write() = now; - *self.last_report.lock() = full_report.client_report.clone(); + *self.last_tick.write() = now; + *self.last_report.lock() = full_report.client_report.clone(); - let paint = |c: Style, t: String| match self.with_color && atty::is(atty::Stream::Stdout) { - true => format!("{}", c.paint(t)), - false => t, - }; + let paint = |c: Style, t: String| match self.with_color && atty::is(atty::Stream::Stdout) { + true => format!("{}", c.paint(t)), + false => t, + }; - info!(target: "import", "{} {} {} {}", - match importing { - true => match snapshot_sync { - false => format!("Syncing {} {} {} {}+{} Qed", - paint(White.bold(), format!("{:>8}", format!("#{}", chain_info.best_block_number))), - paint(White.bold(), format!("{}", chain_info.best_block_hash)), - if self.target.executes_transactions() { - format!("{} blk/s {} tx/s {} Mgas/s", - paint(Yellow.bold(), format!("{:7.2}", (client_report.blocks_imported * 1000) as f64 / elapsed.as_milliseconds() as f64)), - paint(Yellow.bold(), format!("{:6.1}", (client_report.transactions_applied * 1000) as f64 / elapsed.as_milliseconds() as f64)), - paint(Yellow.bold(), format!("{:6.1}", (client_report.gas_processed / 1000).low_u64() as f64 / elapsed.as_milliseconds() as f64)) - ) - } else { - format!("{} hdr/s", - paint(Yellow.bold(), format!("{:6.1}", (client_report.blocks_imported * 1000) as f64 / elapsed.as_milliseconds() as f64)) - ) - }, - paint(Green.bold(), format!("{:5}", queue_info.unverified_queue_size)), - paint(Green.bold(), format!("{:5}", queue_info.verified_queue_size)) - ), - true => { - self.snapshot.as_ref().map_or(String::new(), |s| - match s.status() { - RestorationStatus::Ongoing { state_chunks, block_chunks, state_chunks_done, block_chunks_done } => { - format!("Syncing snapshot {}/{}", state_chunks_done + block_chunks_done, state_chunks + block_chunks) - }, - RestorationStatus::Initializing { chunks_done } => { - format!("Snapshot initializing ({} chunks restored)", chunks_done) - }, - _ => String::new(), - } - ) - }, - }, - false => String::new(), - }, - match sync_info.as_ref() { - Some(ref sync_info) => format!("{}{}/{} peers", - match importing { - true => format!("{}", - if self.target.executes_transactions() { - paint(Green.bold(), format!("{:>8} ", format!("#{}", sync_info.last_imported_block_number))) - } else { - String::new() - } - ), - false => match sync_info.last_imported_old_block_number { - Some(number) => format!("{} ", paint(Yellow.bold(), format!("{:>8}", format!("#{}", number)))), - None => String::new(), - } - }, - paint(Cyan.bold(), format!("{:2}", sync_info.num_peers)), - paint(Cyan.bold(), format!("{:2}", sync_info.max_peers)), - ), - _ => String::new(), - }, - cache_sizes.display(Blue.bold(), &paint), - match rpc_stats { - Some(ref rpc_stats) => format!( - "RPC: {} conn, {} req/s, {} µs", - paint(Blue.bold(), format!("{:2}", rpc_stats.sessions())), - paint(Blue.bold(), format!("{:4}", rpc_stats.requests_rate())), - paint(Blue.bold(), format!("{:4}", rpc_stats.approximated_roundtrip())), - ), - _ => String::new(), - }, - ); - } + info!(target: "import", "{} {} {} {}", + match importing { + true => match snapshot_sync { + false => format!("Syncing {} {} {} {}+{} Qed", + paint(White.bold(), format!("{:>8}", format!("#{}", chain_info.best_block_number))), + paint(White.bold(), format!("{}", chain_info.best_block_hash)), + if self.target.executes_transactions() { + format!("{} blk/s {} tx/s {} Mgas/s", + paint(Yellow.bold(), format!("{:7.2}", (client_report.blocks_imported * 1000) as f64 / elapsed.as_milliseconds() as f64)), + paint(Yellow.bold(), format!("{:6.1}", (client_report.transactions_applied * 1000) as f64 / elapsed.as_milliseconds() as f64)), + paint(Yellow.bold(), format!("{:6.1}", (client_report.gas_processed / 1000).low_u64() as f64 / elapsed.as_milliseconds() as f64)) + ) + } else { + format!("{} hdr/s", + paint(Yellow.bold(), format!("{:6.1}", (client_report.blocks_imported * 1000) as f64 / elapsed.as_milliseconds() as f64)) + ) + }, + paint(Green.bold(), format!("{:5}", queue_info.unverified_queue_size)), + paint(Green.bold(), format!("{:5}", queue_info.verified_queue_size)) + ), + true => { + self.snapshot.as_ref().map_or(String::new(), |s| + match s.status() { + RestorationStatus::Ongoing { state_chunks, block_chunks, state_chunks_done, block_chunks_done } => { + format!("Syncing snapshot {}/{}", state_chunks_done + block_chunks_done, state_chunks + block_chunks) + }, + RestorationStatus::Initializing { chunks_done } => { + format!("Snapshot initializing ({} chunks restored)", chunks_done) + }, + _ => String::new(), + } + ) + }, + }, + false => String::new(), + }, + match sync_info.as_ref() { + Some(ref sync_info) => format!("{}{}/{} peers", + match importing { + true => format!("{}", + if self.target.executes_transactions() { + paint(Green.bold(), format!("{:>8} ", format!("#{}", sync_info.last_imported_block_number))) + } else { + String::new() + } + ), + false => match sync_info.last_imported_old_block_number { + Some(number) => format!("{} ", paint(Yellow.bold(), format!("{:>8}", format!("#{}", number)))), + None => String::new(), + } + }, + paint(Cyan.bold(), format!("{:2}", sync_info.num_peers)), + paint(Cyan.bold(), format!("{:2}", sync_info.max_peers)), + ), + _ => String::new(), + }, + cache_sizes.display(Blue.bold(), &paint), + match rpc_stats { + Some(ref rpc_stats) => format!( + "RPC: {} conn, {} req/s, {} µs", + paint(Blue.bold(), format!("{:2}", rpc_stats.sessions())), + paint(Blue.bold(), format!("{:4}", rpc_stats.requests_rate())), + paint(Blue.bold(), format!("{:4}", rpc_stats.approximated_roundtrip())), + ), + _ => String::new(), + }, + ); + } } impl ChainNotify for Informant { - fn new_blocks(&self, new_blocks: NewBlocks) { - if new_blocks.has_more_blocks_to_import { return } - let mut last_import = self.last_import.lock(); - let client = &self.target.client; + fn new_blocks(&self, new_blocks: NewBlocks) { + if new_blocks.has_more_blocks_to_import { + return; + } + let mut last_import = self.last_import.lock(); + let client = &self.target.client; - let importing = self.target.is_major_importing(); - let ripe = Instant::now() > *last_import + Duration::from_secs(1) && !importing; - let txs_imported = new_blocks.imported.iter() - .take(new_blocks.imported.len().saturating_sub(if ripe { 1 } else { 0 })) - .filter_map(|h| client.block(BlockId::Hash(*h))) - .map(|b| b.transactions_count()) - .sum(); + let importing = self.target.is_major_importing(); + let ripe = Instant::now() > *last_import + Duration::from_secs(1) && !importing; + let txs_imported = new_blocks + .imported + .iter() + .take( + new_blocks + .imported + .len() + .saturating_sub(if ripe { 1 } else { 0 }), + ) + .filter_map(|h| client.block(BlockId::Hash(*h))) + .map(|b| b.transactions_count()) + .sum(); - if ripe { - if let Some(block) = new_blocks.imported.last().and_then(|h| client.block(BlockId::Hash(*h))) { - let header_view = block.header_view(); - let size = block.rlp().as_raw().len(); - let (skipped, skipped_txs) = (self.skipped.load(AtomicOrdering::Relaxed) + new_blocks.imported.len() - 1, self.skipped_txs.load(AtomicOrdering::Relaxed) + txs_imported); - info!(target: "import", "Imported {} {} ({} txs, {} Mgas, {} ms, {} KiB){}", - Colour::White.bold().paint(format!("#{}", header_view.number())), - Colour::White.bold().paint(format!("{}", header_view.hash())), - Colour::Yellow.bold().paint(format!("{}", block.transactions_count())), - Colour::Yellow.bold().paint(format!("{:.2}", header_view.gas_used().low_u64() as f32 / 1000000f32)), - Colour::Purple.bold().paint(format!("{}", new_blocks.duration.as_milliseconds())), - Colour::Blue.bold().paint(format!("{:.2}", size as f32 / 1024f32)), - if skipped > 0 { - format!(" + another {} block(s) containing {} tx(s)", - Colour::Red.bold().paint(format!("{}", skipped)), - Colour::Red.bold().paint(format!("{}", skipped_txs)) - ) - } else { - String::new() - } - ); - self.skipped.store(0, AtomicOrdering::Relaxed); - self.skipped_txs.store(0, AtomicOrdering::Relaxed); - *last_import = Instant::now(); - } - } else { - self.skipped.fetch_add(new_blocks.imported.len(), AtomicOrdering::Relaxed); - self.skipped_txs.fetch_add(txs_imported, AtomicOrdering::Relaxed); - } - } + if ripe { + if let Some(block) = new_blocks + .imported + .last() + .and_then(|h| client.block(BlockId::Hash(*h))) + { + let header_view = block.header_view(); + let size = block.rlp().as_raw().len(); + let (skipped, skipped_txs) = ( + self.skipped.load(AtomicOrdering::Relaxed) + new_blocks.imported.len() - 1, + self.skipped_txs.load(AtomicOrdering::Relaxed) + txs_imported, + ); + info!(target: "import", "Imported {} {} ({} txs, {} Mgas, {} ms, {} KiB){}", + Colour::White.bold().paint(format!("#{}", header_view.number())), + Colour::White.bold().paint(format!("{}", header_view.hash())), + Colour::Yellow.bold().paint(format!("{}", block.transactions_count())), + Colour::Yellow.bold().paint(format!("{:.2}", header_view.gas_used().low_u64() as f32 / 1000000f32)), + Colour::Purple.bold().paint(format!("{}", new_blocks.duration.as_milliseconds())), + Colour::Blue.bold().paint(format!("{:.2}", size as f32 / 1024f32)), + if skipped > 0 { + format!(" + another {} block(s) containing {} tx(s)", + Colour::Red.bold().paint(format!("{}", skipped)), + Colour::Red.bold().paint(format!("{}", skipped_txs)) + ) + } else { + String::new() + } + ); + self.skipped.store(0, AtomicOrdering::Relaxed); + self.skipped_txs.store(0, AtomicOrdering::Relaxed); + *last_import = Instant::now(); + } + } else { + self.skipped + .fetch_add(new_blocks.imported.len(), AtomicOrdering::Relaxed); + self.skipped_txs + .fetch_add(txs_imported, AtomicOrdering::Relaxed); + } + } } impl LightChainNotify for Informant { - fn new_headers(&self, good: &[H256]) { - let mut last_import = self.last_import.lock(); - let client = &self.target.client; + fn new_headers(&self, good: &[H256]) { + let mut last_import = self.last_import.lock(); + let client = &self.target.client; - let importing = self.target.is_major_importing(); - let ripe = Instant::now() > *last_import + Duration::from_secs(1) && !importing; + let importing = self.target.is_major_importing(); + let ripe = Instant::now() > *last_import + Duration::from_secs(1) && !importing; - if ripe { - if let Some(header) = good.last().and_then(|h| client.block_header(BlockId::Hash(*h))) { - info!(target: "import", "Imported {} {} ({} Mgas){}", - Colour::White.bold().paint(format!("#{}", header.number())), - Colour::White.bold().paint(format!("{}", header.hash())), - Colour::Yellow.bold().paint(format!("{:.2}", header.gas_used().low_u64() as f32 / 1000000f32)), - if good.len() > 1 { - format!(" + another {} header(s)", - Colour::Red.bold().paint(format!("{}", good.len() - 1))) - } else { - String::new() - } - ); - *last_import = Instant::now(); - } - } - } + if ripe { + if let Some(header) = good + .last() + .and_then(|h| client.block_header(BlockId::Hash(*h))) + { + info!(target: "import", "Imported {} {} ({} Mgas){}", + Colour::White.bold().paint(format!("#{}", header.number())), + Colour::White.bold().paint(format!("{}", header.hash())), + Colour::Yellow.bold().paint(format!("{:.2}", header.gas_used().low_u64() as f32 / 1000000f32)), + if good.len() > 1 { + format!(" + another {} header(s)", + Colour::Red.bold().paint(format!("{}", good.len() - 1))) + } else { + String::new() + } + ); + *last_import = Instant::now(); + } + } + } } const INFO_TIMER: TimerToken = 0; impl IoHandler for Informant { - fn initialize(&self, io: &IoContext) { - io.register_timer(INFO_TIMER, Duration::from_secs(5)).expect("Error registering timer"); - } + fn initialize(&self, io: &IoContext) { + io.register_timer(INFO_TIMER, Duration::from_secs(5)) + .expect("Error registering timer"); + } - fn timeout(&self, _io: &IoContext, timer: TimerToken) { - if timer == INFO_TIMER && !self.in_shutdown.load(AtomicOrdering::SeqCst) { - self.tick(); - } - } + fn timeout(&self, _io: &IoContext, timer: TimerToken) { + if timer == INFO_TIMER && !self.in_shutdown.load(AtomicOrdering::SeqCst) { + self.tick(); + } + } } diff --git a/parity/ipfs.rs b/parity/ipfs.rs index 0923a1e7d..c0b17c90f 100644 --- a/parity/ipfs.rs +++ b/parity/ipfs.rs @@ -14,45 +14,48 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use parity_ipfs_api::{self, AccessControlAllowOrigin, Host, Listening}; -use parity_ipfs_api::error::ServerError; use ethcore::client::BlockChainClient; +use parity_ipfs_api::{self, error::ServerError, AccessControlAllowOrigin, Host, Listening}; +use std::sync::Arc; #[derive(Debug, PartialEq, Clone)] pub struct Configuration { - pub enabled: bool, - pub port: u16, - pub interface: String, - pub cors: Option>, - pub hosts: Option>, + pub enabled: bool, + pub port: u16, + pub interface: String, + pub cors: Option>, + pub hosts: Option>, } impl Default for Configuration { - fn default() -> Self { - Configuration { - enabled: false, - port: 5001, - interface: "127.0.0.1".into(), - cors: Some(vec![]), - hosts: Some(vec![]), - } - } + fn default() -> Self { + Configuration { + enabled: false, + port: 5001, + interface: "127.0.0.1".into(), + cors: Some(vec![]), + hosts: Some(vec![]), + } + } } -pub fn start_server(conf: Configuration, client: Arc) -> Result, ServerError> { - if !conf.enabled { - return Ok(None); - } +pub fn start_server( + conf: Configuration, + client: Arc, +) -> Result, ServerError> { + if !conf.enabled { + return Ok(None); + } - let cors = conf.cors.map(|cors| cors.into_iter().map(AccessControlAllowOrigin::from).collect()); - let hosts = conf.hosts.map(|hosts| hosts.into_iter().map(Host::from).collect()); + let cors = conf.cors.map(|cors| { + cors.into_iter() + .map(AccessControlAllowOrigin::from) + .collect() + }); + let hosts = conf + .hosts + .map(|hosts| hosts.into_iter().map(Host::from).collect()); - parity_ipfs_api::start_server( - conf.port, - conf.interface, - cors.into(), - hosts.into(), - client - ).map(Some) + parity_ipfs_api::start_server(conf.port, conf.interface, cors.into(), hosts.into(), client) + .map(Some) } diff --git a/parity/lib.rs b/parity/lib.rs index a88a68947..249e60555 100644 --- a/parity/lib.rs +++ b/parity/lib.rs @@ -21,9 +21,9 @@ extern crate ansi_term; extern crate docopt; #[macro_use] extern crate clap; +extern crate atty; extern crate dir; extern crate futures; -extern crate atty; extern crate jsonrpc_core; extern crate num_cpus; extern crate number_prefix; @@ -98,11 +98,12 @@ mod blockchain; mod cache; mod cli; mod configuration; -mod export_hardcoded_sync; -mod ipfs; +mod db; mod deprecated; +mod export_hardcoded_sync; mod helpers; mod informant; +mod ipfs; mod light_helpers; mod modules; mod params; @@ -116,11 +117,8 @@ mod snapshot; mod upgrade; mod user_defaults; mod whisper; -mod db; -use std::fs::File; -use std::io::BufReader; -use std::sync::Arc; +use std::{fs::File, io::BufReader, sync::Arc}; use cli::Args; use configuration::{Cmd, Execute}; @@ -130,93 +128,121 @@ use hash::keccak_buffer; #[cfg(feature = "memory_profiling")] use std::alloc::System; -pub use self::configuration::Configuration; -pub use self::run::RunningClient; +pub use self::{configuration::Configuration, run::RunningClient}; +pub use ethcore_logger::{setup_log, Config as LoggerConfig, RotatingLogger}; pub use parity_rpc::PubSubSession; -pub use ethcore_logger::{Config as LoggerConfig, setup_log, RotatingLogger}; #[cfg(feature = "memory_profiling")] #[global_allocator] static A: System = System; fn print_hash_of(maybe_file: Option) -> Result { - if let Some(file) = maybe_file { - let mut f = BufReader::new(File::open(&file).map_err(|_| "Unable to open file".to_owned())?); - let hash = keccak_buffer(&mut f).map_err(|_| "Unable to read from file".to_owned())?; - Ok(format!("{:x}", hash)) - } else { - Err("Streaming from standard input not yet supported. Specify a file.".to_owned()) - } + if let Some(file) = maybe_file { + let mut f = + BufReader::new(File::open(&file).map_err(|_| "Unable to open file".to_owned())?); + let hash = keccak_buffer(&mut f).map_err(|_| "Unable to read from file".to_owned())?; + Ok(format!("{:x}", hash)) + } else { + Err("Streaming from standard input not yet supported. Specify a file.".to_owned()) + } } #[cfg(feature = "deadlock_detection")] fn run_deadlock_detection_thread() { - use std::thread; - use std::time::Duration; - use parking_lot::deadlock; - use ansi_term::Style; + use ansi_term::Style; + use parking_lot::deadlock; + use std::{thread, time::Duration}; - info!("Starting deadlock detection thread."); - // Create a background thread which checks for deadlocks every 10s - thread::spawn(move || { - loop { - thread::sleep(Duration::from_secs(10)); - let deadlocks = deadlock::check_deadlock(); - if deadlocks.is_empty() { - continue; - } + info!("Starting deadlock detection thread."); + // Create a background thread which checks for deadlocks every 10s + thread::spawn(move || loop { + thread::sleep(Duration::from_secs(10)); + let deadlocks = deadlock::check_deadlock(); + if deadlocks.is_empty() { + continue; + } - warn!("{} {} detected", deadlocks.len(), Style::new().bold().paint("deadlock(s)")); - for (i, threads) in deadlocks.iter().enumerate() { - warn!("{} #{}", Style::new().bold().paint("Deadlock"), i); - for t in threads { - warn!("Thread Id {:#?}", t.thread_id()); - warn!("{:#?}", t.backtrace()); - } - } - } - }); + warn!( + "{} {} detected", + deadlocks.len(), + Style::new().bold().paint("deadlock(s)") + ); + for (i, threads) in deadlocks.iter().enumerate() { + warn!("{} #{}", Style::new().bold().paint("Deadlock"), i); + for t in threads { + warn!("Thread Id {:#?}", t.thread_id()); + warn!("{:#?}", t.backtrace()); + } + } + }); } /// Action that Parity performed when running `start`. pub enum ExecutionAction { - /// The execution didn't require starting a node, and thus has finished. - /// Contains the string to print on stdout, if any. - Instant(Option), + /// The execution didn't require starting a node, and thus has finished. + /// Contains the string to print on stdout, if any. + Instant(Option), - /// The client has started running and must be shut down manually by calling `shutdown`. - /// - /// If you don't call `shutdown()`, execution will continue in the background. - Running(RunningClient), + /// The client has started running and must be shut down manually by calling `shutdown`. + /// + /// If you don't call `shutdown()`, execution will continue in the background. + Running(RunningClient), } fn execute( - command: Execute, - logger: Arc, - on_client_rq: Cr, on_updater_rq: Rr) -> Result - where Cr: Fn(String) + 'static + Send, - Rr: Fn() + 'static + Send + command: Execute, + logger: Arc, + on_client_rq: Cr, + on_updater_rq: Rr, +) -> Result +where + Cr: Fn(String) + 'static + Send, + Rr: Fn() + 'static + Send, { - #[cfg(feature = "deadlock_detection")] - run_deadlock_detection_thread(); + #[cfg(feature = "deadlock_detection")] + run_deadlock_detection_thread(); - match command.cmd { - Cmd::Run(run_cmd) => { - let outcome = run::execute(run_cmd, logger, on_client_rq, on_updater_rq)?; - Ok(ExecutionAction::Running(outcome)) - }, - Cmd::Version => Ok(ExecutionAction::Instant(Some(Args::print_version()))), - Cmd::Hash(maybe_file) => print_hash_of(maybe_file).map(|s| ExecutionAction::Instant(Some(s))), - Cmd::Account(account_cmd) => account::execute(account_cmd).map(|s| ExecutionAction::Instant(Some(s))), - Cmd::ImportPresaleWallet(presale_cmd) => presale::execute(presale_cmd).map(|s| ExecutionAction::Instant(Some(s))), - Cmd::Blockchain(blockchain_cmd) => blockchain::execute(blockchain_cmd).map(|_| ExecutionAction::Instant(None)), - Cmd::SignerToken(ws_conf, logger_config) => signer::execute(ws_conf, logger_config).map(|s| ExecutionAction::Instant(Some(s))), - Cmd::SignerSign { id, pwfile, port, authfile } => cli_signer::signer_sign(id, pwfile, port, authfile).map(|s| ExecutionAction::Instant(Some(s))), - Cmd::SignerList { port, authfile } => cli_signer::signer_list(port, authfile).map(|s| ExecutionAction::Instant(Some(s))), - Cmd::SignerReject { id, port, authfile } => cli_signer::signer_reject(id, port, authfile).map(|s| ExecutionAction::Instant(Some(s))), - Cmd::Snapshot(snapshot_cmd) => snapshot::execute(snapshot_cmd).map(|s| ExecutionAction::Instant(Some(s))), - Cmd::ExportHardcodedSync(export_hs_cmd) => export_hardcoded_sync::execute(export_hs_cmd).map(|s| ExecutionAction::Instant(Some(s))), - } + match command.cmd { + Cmd::Run(run_cmd) => { + let outcome = run::execute(run_cmd, logger, on_client_rq, on_updater_rq)?; + Ok(ExecutionAction::Running(outcome)) + } + Cmd::Version => Ok(ExecutionAction::Instant(Some(Args::print_version()))), + Cmd::Hash(maybe_file) => { + print_hash_of(maybe_file).map(|s| ExecutionAction::Instant(Some(s))) + } + Cmd::Account(account_cmd) => { + account::execute(account_cmd).map(|s| ExecutionAction::Instant(Some(s))) + } + Cmd::ImportPresaleWallet(presale_cmd) => { + presale::execute(presale_cmd).map(|s| ExecutionAction::Instant(Some(s))) + } + Cmd::Blockchain(blockchain_cmd) => { + blockchain::execute(blockchain_cmd).map(|_| ExecutionAction::Instant(None)) + } + Cmd::SignerToken(ws_conf, logger_config) => { + signer::execute(ws_conf, logger_config).map(|s| ExecutionAction::Instant(Some(s))) + } + Cmd::SignerSign { + id, + pwfile, + port, + authfile, + } => cli_signer::signer_sign(id, pwfile, port, authfile) + .map(|s| ExecutionAction::Instant(Some(s))), + Cmd::SignerList { port, authfile } => { + cli_signer::signer_list(port, authfile).map(|s| ExecutionAction::Instant(Some(s))) + } + Cmd::SignerReject { id, port, authfile } => { + cli_signer::signer_reject(id, port, authfile).map(|s| ExecutionAction::Instant(Some(s))) + } + Cmd::Snapshot(snapshot_cmd) => { + snapshot::execute(snapshot_cmd).map(|s| ExecutionAction::Instant(Some(s))) + } + Cmd::ExportHardcodedSync(export_hs_cmd) => { + export_hardcoded_sync::execute(export_hs_cmd).map(|s| ExecutionAction::Instant(Some(s))) + } + } } /// Starts the parity client. @@ -232,19 +258,19 @@ fn execute( /// On error, returns what to print on stderr. // FIXME: totally independent logging capability, see https://github.com/paritytech/parity-ethereum/issues/10252 pub fn start( - conf: Configuration, - logger: Arc, - on_client_rq: Cr, - on_updater_rq: Rr + conf: Configuration, + logger: Arc, + on_client_rq: Cr, + on_updater_rq: Rr, ) -> Result - where - Cr: Fn(String) + 'static + Send, - Rr: Fn() + 'static + Send +where + Cr: Fn(String) + 'static + Send, + Rr: Fn() + 'static + Send, { - let deprecated = find_deprecated(&conf.args); - for d in deprecated { - println!("{}", d); - } + let deprecated = find_deprecated(&conf.args); + for d in deprecated { + println!("{}", d); + } - execute(conf.into_command()?, logger, on_client_rq, on_updater_rq) + execute(conf.into_command()?, logger, on_client_rq, on_updater_rq) } diff --git a/parity/light_helpers/epoch_fetch.rs b/parity/light_helpers/epoch_fetch.rs index 9c7fd6a8e..f86d2130f 100644 --- a/parity/light_helpers/epoch_fetch.rs +++ b/parity/light_helpers/epoch_fetch.rs @@ -16,21 +16,22 @@ use std::sync::{Arc, Weak}; -use ethcore::engines::{EthEngine, StateDependentProof}; -use ethcore::machine::EthereumMachine; -use sync::{LightSync, LightNetworkDispatcher}; -use types::encoded; -use types::header::Header; -use types::receipt::Receipt; +use ethcore::{ + engines::{EthEngine, StateDependentProof}, + machine::EthereumMachine, +}; +use sync::{LightNetworkDispatcher, LightSync}; +use types::{encoded, header::Header, receipt::Receipt}; -use futures::{future, Future}; -use futures::future::Either; +use futures::{future, future::Either, Future}; -use light::client::fetch::ChainDataFetcher; -use light::on_demand::{request, OnDemand, OnDemandRequester}; +use light::{ + client::fetch::ChainDataFetcher, + on_demand::{request, OnDemand, OnDemandRequester}, +}; -use parking_lot::RwLock; use ethereum_types::H256; +use parking_lot::RwLock; const ALL_VALID_BACKREFS: &str = "no back-references, therefore all back-references valid; qed"; @@ -38,57 +39,62 @@ type BoxFuture = Box>; /// Allows on-demand fetch of data useful for the light client. pub struct EpochFetch { - /// A handle to the sync service. - pub sync: Arc>>, - /// The on-demand request service. - pub on_demand: Arc, + /// A handle to the sync service. + pub sync: Arc>>, + /// The on-demand request service. + pub on_demand: Arc, } impl EpochFetch { - fn request(&self, req: T) -> BoxFuture - where T: Send + request::RequestAdapter + 'static, T::Out: Send + 'static - { - Box::new(match self.sync.read().upgrade() { - Some(sync) => { - let on_demand = &self.on_demand; - let maybe_future = sync.with_context(move |ctx| { - on_demand.request(ctx, req).expect(ALL_VALID_BACKREFS) - }); + fn request(&self, req: T) -> BoxFuture + where + T: Send + request::RequestAdapter + 'static, + T::Out: Send + 'static, + { + Box::new(match self.sync.read().upgrade() { + Some(sync) => { + let on_demand = &self.on_demand; + let maybe_future = sync.with_context(move |ctx| { + on_demand.request(ctx, req).expect(ALL_VALID_BACKREFS) + }); - match maybe_future { - Some(x) => Either::A(x.map_err(|_| "Request canceled")), - None => Either::B(future::err("Unable to access network.")), - } - } - None => Either::B(future::err("Unable to access network")), - }) - } + match maybe_future { + Some(x) => Either::A(x.map_err(|_| "Request canceled")), + None => Either::B(future::err("Unable to access network.")), + } + } + None => Either::B(future::err("Unable to access network")), + }) + } } impl ChainDataFetcher for EpochFetch { - type Error = &'static str; + type Error = &'static str; - type Body = BoxFuture; - type Receipts = BoxFuture, &'static str>; - type Transition = BoxFuture, &'static str>; + type Body = BoxFuture; + type Receipts = BoxFuture, &'static str>; + type Transition = BoxFuture, &'static str>; - fn block_body(&self, header: &Header) -> Self::Body { - self.request(request::Body(header.encoded().into())) - } + fn block_body(&self, header: &Header) -> Self::Body { + self.request(request::Body(header.encoded().into())) + } - /// Fetch block receipts. - fn block_receipts(&self, header: &Header) -> Self::Receipts { - self.request(request::BlockReceipts(header.encoded().into())) - } + /// Fetch block receipts. + fn block_receipts(&self, header: &Header) -> Self::Receipts { + self.request(request::BlockReceipts(header.encoded().into())) + } - /// Fetch epoch transition proof at given header. - fn epoch_transition(&self, hash: H256, engine: Arc, checker: Arc>) - -> Self::Transition - { - self.request(request::Signal { - hash: hash, - engine: engine, - proof_check: checker, - }) - } + /// Fetch epoch transition proof at given header. + fn epoch_transition( + &self, + hash: H256, + engine: Arc, + checker: Arc>, + ) -> Self::Transition { + self.request(request::Signal { + hash: hash, + engine: engine, + proof_check: checker, + }) + } } diff --git a/parity/logger/src/lib.rs b/parity/logger/src/lib.rs index a2e3de176..3b00240ec 100644 --- a/parity/logger/src/lib.rs +++ b/parity/logger/src/lib.rs @@ -30,136 +30,161 @@ extern crate lazy_static; mod rotating; -use std::{env, thread, fs}; -use std::sync::{Weak, Arc}; -use std::io::Write; -use env_logger::{Builder as LogBuilder, Formatter}; -use regex::Regex; use ansi_term::Colour; +use env_logger::{Builder as LogBuilder, Formatter}; use parking_lot::Mutex; +use regex::Regex; +use std::{ + env, fs, + io::Write, + sync::{Arc, Weak}, + thread, +}; -pub use rotating::{RotatingLogger, init_log}; +pub use rotating::{init_log, RotatingLogger}; #[derive(Debug, PartialEq, Clone)] pub struct Config { - pub mode: Option, - pub color: bool, - pub file: Option, + pub mode: Option, + pub color: bool, + pub file: Option, } impl Default for Config { - fn default() -> Self { - Config { - mode: None, - color: !cfg!(windows), - file: None, - } - } + fn default() -> Self { + Config { + mode: None, + color: !cfg!(windows), + file: None, + } + } } lazy_static! { - static ref ROTATING_LOGGER : Mutex> = Mutex::new(Default::default()); + static ref ROTATING_LOGGER: Mutex> = Mutex::new(Default::default()); } /// Sets up the logger pub fn setup_log(config: &Config) -> Result, String> { - use rlog::*; + use rlog::*; - let mut levels = String::new(); - let mut builder = LogBuilder::new(); - // Disable info logging by default for some modules: - builder.filter(Some("ws"), LevelFilter::Warn); - builder.filter(Some("hyper"), LevelFilter::Warn); - builder.filter(Some("rustls"), LevelFilter::Error); - // Enable info for others. - builder.filter(None, LevelFilter::Info); + let mut levels = String::new(); + let mut builder = LogBuilder::new(); + // Disable info logging by default for some modules: + builder.filter(Some("ws"), LevelFilter::Warn); + builder.filter(Some("hyper"), LevelFilter::Warn); + builder.filter(Some("rustls"), LevelFilter::Error); + // Enable info for others. + builder.filter(None, LevelFilter::Info); - if let Ok(lvl) = env::var("RUST_LOG") { - levels.push_str(&lvl); - levels.push_str(","); - builder.parse(&lvl); - } + if let Ok(lvl) = env::var("RUST_LOG") { + levels.push_str(&lvl); + levels.push_str(","); + builder.parse(&lvl); + } - if let Some(ref s) = config.mode { - levels.push_str(s); - builder.parse(s); - } + if let Some(ref s) = config.mode { + levels.push_str(s); + builder.parse(s); + } - let isatty = atty::is(atty::Stream::Stderr); - let enable_color = config.color && isatty; - let logs = Arc::new(RotatingLogger::new(levels)); - let logger = logs.clone(); - let mut open_options = fs::OpenOptions::new(); + let isatty = atty::is(atty::Stream::Stderr); + let enable_color = config.color && isatty; + let logs = Arc::new(RotatingLogger::new(levels)); + let logger = logs.clone(); + let mut open_options = fs::OpenOptions::new(); - let maybe_file = match config.file.as_ref() { - Some(f) => Some(open_options - .append(true).create(true).open(f) - .map_err(|e| format!("Cannot write to log file given: {}, {}", f, e))?), - None => None, - }; - - let format = move |buf: &mut Formatter, record: &Record| { - let timestamp = time::strftime("%Y-%m-%d %H:%M:%S %Z", &time::now()).unwrap(); - - let with_color = if max_level() <= LevelFilter::Info { - format!("{} {}", Colour::Black.bold().paint(timestamp), record.args()) - } else { - let name = thread::current().name().map_or_else(Default::default, |x| format!("{}", Colour::Blue.bold().paint(x))); - format!("{} {} {} {} {}", Colour::Black.bold().paint(timestamp), name, record.level(), record.target(), record.args()) - }; - - let removed_color = kill_color(with_color.as_ref()); - - let ret = match enable_color { - true => with_color, - false => removed_color.clone(), - }; - - if let Some(mut file) = maybe_file.as_ref() { - // ignore errors - there's nothing we can do - let _ = file.write_all(removed_color.as_bytes()); - let _ = file.write_all(b"\n"); - } - logger.append(removed_color); - if !isatty && record.level() <= Level::Info && atty::is(atty::Stream::Stdout) { - // duplicate INFO/WARN output to console - println!("{}", ret); - } - - writeln!(buf, "{}", ret) + let maybe_file = match config.file.as_ref() { + Some(f) => Some( + open_options + .append(true) + .create(true) + .open(f) + .map_err(|e| format!("Cannot write to log file given: {}, {}", f, e))?, + ), + None => None, }; - builder.format(format); - builder.try_init() - .and_then(|_| { - *ROTATING_LOGGER.lock() = Arc::downgrade(&logs); - Ok(logs) - }) - // couldn't create new logger - try to fall back on previous logger. - .or_else(|err| match ROTATING_LOGGER.lock().upgrade() { - Some(l) => Ok(l), - // no previous logger. fatal. - None => Err(format!("{:?}", err)), - }) + let format = move |buf: &mut Formatter, record: &Record| { + let timestamp = time::strftime("%Y-%m-%d %H:%M:%S %Z", &time::now()).unwrap(); + + let with_color = if max_level() <= LevelFilter::Info { + format!( + "{} {}", + Colour::Black.bold().paint(timestamp), + record.args() + ) + } else { + let name = thread::current().name().map_or_else(Default::default, |x| { + format!("{}", Colour::Blue.bold().paint(x)) + }); + format!( + "{} {} {} {} {}", + Colour::Black.bold().paint(timestamp), + name, + record.level(), + record.target(), + record.args() + ) + }; + + let removed_color = kill_color(with_color.as_ref()); + + let ret = match enable_color { + true => with_color, + false => removed_color.clone(), + }; + + if let Some(mut file) = maybe_file.as_ref() { + // ignore errors - there's nothing we can do + let _ = file.write_all(removed_color.as_bytes()); + let _ = file.write_all(b"\n"); + } + logger.append(removed_color); + if !isatty && record.level() <= Level::Info && atty::is(atty::Stream::Stdout) { + // duplicate INFO/WARN output to console + println!("{}", ret); + } + + writeln!(buf, "{}", ret) + }; + + builder.format(format); + builder + .try_init() + .and_then(|_| { + *ROTATING_LOGGER.lock() = Arc::downgrade(&logs); + Ok(logs) + }) + // couldn't create new logger - try to fall back on previous logger. + .or_else(|err| match ROTATING_LOGGER.lock().upgrade() { + Some(l) => Ok(l), + // no previous logger. fatal. + None => Err(format!("{:?}", err)), + }) } fn kill_color(s: &str) -> String { - lazy_static! { - static ref RE: Regex = Regex::new("\x1b\\[[^m]+m").unwrap(); - } - RE.replace_all(s, "").to_string() + lazy_static! { + static ref RE: Regex = Regex::new("\x1b\\[[^m]+m").unwrap(); + } + RE.replace_all(s, "").to_string() } #[test] fn should_remove_colour() { - let before = "test"; - let after = kill_color(&Colour::Red.bold().paint(before)); - assert_eq!(after, "test"); + let before = "test"; + let after = kill_color(&Colour::Red.bold().paint(before)); + assert_eq!(after, "test"); } #[test] fn should_remove_multiple_colour() { - let t = format!("{} {}", Colour::Red.bold().paint("test"), Colour::White.normal().paint("again")); - let after = kill_color(&t); - assert_eq!(after, "test again"); + let t = format!( + "{} {}", + Colour::Red.bold().paint("test"), + Colour::White.normal().paint("again") + ); + let after = kill_color(&t); + assert_eq!(after, "test again"); } diff --git a/parity/logger/src/rotating.rs b/parity/logger/src/rotating.rs index 2745e95bf..c3c97e463 100644 --- a/parity/logger/src/rotating.rs +++ b/parity/logger/src/rotating.rs @@ -16,108 +16,106 @@ //! Common log helper functions -use std::env; -use rlog::LevelFilter; -use env_logger::Builder as LogBuilder; use arrayvec::ArrayVec; +use env_logger::Builder as LogBuilder; +use rlog::LevelFilter; +use std::env; use parking_lot::{RwLock, RwLockReadGuard}; lazy_static! { - static ref LOG_DUMMY: () = { - let mut builder = LogBuilder::new(); - builder.filter(None, LevelFilter::Info); + static ref LOG_DUMMY: () = { + let mut builder = LogBuilder::new(); + builder.filter(None, LevelFilter::Info); - if let Ok(log) = env::var("RUST_LOG") { - builder.parse(&log); - } + if let Ok(log) = env::var("RUST_LOG") { + builder.parse(&log); + } - if !builder.try_init().is_ok() { - println!("logger initialization failed!"); - } - }; + if !builder.try_init().is_ok() { + println!("logger initialization failed!"); + } + }; } /// Intialize log with default settings pub fn init_log() { - *LOG_DUMMY + *LOG_DUMMY } -const LOG_SIZE : usize = 128; +const LOG_SIZE: usize = 128; /// Logger implementation that keeps up to `LOG_SIZE` log elements. pub struct RotatingLogger { - /// Defined logger levels - levels: String, - /// Logs array. Latest log is always at index 0 - logs: RwLock>, + /// Defined logger levels + levels: String, + /// Logs array. Latest log is always at index 0 + logs: RwLock>, } impl RotatingLogger { + /// Creates new `RotatingLogger` with given levels. + /// It does not enforce levels - it's just read only. + pub fn new(levels: String) -> Self { + RotatingLogger { + levels: levels, + logs: RwLock::new(ArrayVec::<[_; LOG_SIZE]>::new()), + } + } - /// Creates new `RotatingLogger` with given levels. - /// It does not enforce levels - it's just read only. - pub fn new(levels: String) -> Self { - RotatingLogger { - levels: levels, - logs: RwLock::new(ArrayVec::<[_; LOG_SIZE]>::new()), - } - } + /// Append new log entry + pub fn append(&self, log: String) { + let mut logs = self.logs.write(); + if logs.is_full() { + logs.pop(); + } + logs.insert(0, log); + } - /// Append new log entry - pub fn append(&self, log: String) { - let mut logs = self.logs.write(); - if logs.is_full() { - logs.pop(); - } - logs.insert(0, log); - } - - /// Return levels - pub fn levels(&self) -> &str { - &self.levels - } - - /// Return logs - pub fn logs(&self) -> RwLockReadGuard> { - self.logs.read() - } + /// Return levels + pub fn levels(&self) -> &str { + &self.levels + } + /// Return logs + pub fn logs(&self) -> RwLockReadGuard> { + self.logs.read() + } } #[cfg(test)] mod test { - use super::RotatingLogger; + use super::RotatingLogger; - fn logger() -> RotatingLogger { - RotatingLogger::new("test".to_owned()) - } + fn logger() -> RotatingLogger { + RotatingLogger::new("test".to_owned()) + } - #[test] - fn should_return_log_levels() { - // given - let logger = logger(); + #[test] + fn should_return_log_levels() { + // given + let logger = logger(); - // when - let levels = logger.levels(); + // when + let levels = logger.levels(); - // then - assert_eq!(levels, "test"); - } + // then + assert_eq!(levels, "test"); + } - #[test] - fn should_return_latest_logs() { - // given - let logger = logger(); + #[test] + fn should_return_latest_logs() { + // given + let logger = logger(); - // when - logger.append("a".to_owned()); - logger.append("b".to_owned()); + // when + logger.append("a".to_owned()); + logger.append("b".to_owned()); - // then - let logs = logger.logs(); - assert_eq!(logs[0], "b".to_owned()); - assert_eq!(logs[1], "a".to_owned()); - assert_eq!(logs.len(), 2); - } + // then + let logs = logger.logs(); + assert_eq!(logs[0], "b".to_owned()); + assert_eq!(logs[1], "a".to_owned()); + assert_eq!(logs.len(), 2); + } } diff --git a/parity/main.rs b/parity/main.rs index 60ad8cad6..944402fd5 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -23,30 +23,36 @@ extern crate dir; extern crate fdlimit; #[macro_use] extern crate log; +extern crate ansi_term; extern crate panic_hook; +extern crate parity_daemonize; extern crate parity_ethereum; extern crate parking_lot; -extern crate parity_daemonize; -extern crate ansi_term; -#[cfg(windows)] extern crate winapi; extern crate ethcore_logger; +#[cfg(windows)] +extern crate winapi; -use std::ffi::OsString; -use std::fs::{remove_file, metadata, File, create_dir_all}; -use std::io::{Read, Write}; -use std::path::PathBuf; -use std::sync::Arc; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::{process, env}; +use std::{ + env, + ffi::OsString, + fs::{create_dir_all, metadata, remove_file, File}, + io::{Read, Write}, + path::PathBuf, + process, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; use ansi_term::Colour; use ctrlc::CtrlC; use dir::default_hypervisor_path; -use fdlimit::raise_fd_limit; use ethcore_logger::setup_log; -use parity_ethereum::{start, ExecutionAction}; +use fdlimit::raise_fd_limit; use parity_daemonize::AsHandle; +use parity_ethereum::{start, ExecutionAction}; use parking_lot::{Condvar, Mutex}; const PLEASE_RESTART_EXIT_CODE: i32 = 69; @@ -54,72 +60,81 @@ const PARITY_EXECUTABLE_NAME: &str = "parity"; #[derive(Debug)] enum Error { - BinaryNotFound, - ExitCode(i32), - Restart, - Unknown + BinaryNotFound, + ExitCode(i32), + Restart, + Unknown, } fn update_path(name: &str) -> PathBuf { - let mut dest = default_hypervisor_path(); - dest.push(name); - dest + let mut dest = default_hypervisor_path(); + dest.push(name); + dest } fn latest_exe_path() -> Result { - File::open(update_path("latest")).and_then(|mut f| { - let mut exe_path = String::new(); - trace!(target: "updater", "latest binary path: {:?}", f); - f.read_to_string(&mut exe_path).map(|_| update_path(&exe_path)) - }) - .or(Err(Error::BinaryNotFound)) + File::open(update_path("latest")) + .and_then(|mut f| { + let mut exe_path = String::new(); + trace!(target: "updater", "latest binary path: {:?}", f); + f.read_to_string(&mut exe_path) + .map(|_| update_path(&exe_path)) + }) + .or(Err(Error::BinaryNotFound)) } -fn latest_binary_is_newer(current_binary: &Option, latest_binary: &Option) -> bool { - match ( - current_binary - .as_ref() - .and_then(|p| metadata(p.as_path()).ok()) - .and_then(|m| m.modified().ok()), - latest_binary - .as_ref() - .and_then(|p| metadata(p.as_path()).ok()) - .and_then(|m| m.modified().ok()) - ) { - (Some(latest_exe_time), Some(this_exe_time)) if latest_exe_time > this_exe_time => true, - _ => false, - } +fn latest_binary_is_newer( + current_binary: &Option, + latest_binary: &Option, +) -> bool { + match ( + current_binary + .as_ref() + .and_then(|p| metadata(p.as_path()).ok()) + .and_then(|m| m.modified().ok()), + latest_binary + .as_ref() + .and_then(|p| metadata(p.as_path()).ok()) + .and_then(|m| m.modified().ok()), + ) { + (Some(latest_exe_time), Some(this_exe_time)) if latest_exe_time > this_exe_time => true, + _ => false, + } } fn set_spec_name_override(spec_name: &str) { - if let Err(e) = create_dir_all(default_hypervisor_path()) - .and_then(|_| File::create(update_path("spec_name_override")) - .and_then(|mut f| f.write_all(spec_name.as_bytes()))) - { - warn!("Couldn't override chain spec: {} at {:?}", e, update_path("spec_name_override")); - } + if let Err(e) = create_dir_all(default_hypervisor_path()).and_then(|_| { + File::create(update_path("spec_name_override")) + .and_then(|mut f| f.write_all(spec_name.as_bytes())) + }) { + warn!( + "Couldn't override chain spec: {} at {:?}", + e, + update_path("spec_name_override") + ); + } } fn take_spec_name_override() -> Option { - let p = update_path("spec_name_override"); - let r = File::open(p.clone()) - .ok() - .and_then(|mut f| { - let mut spec_name = String::new(); - f.read_to_string(&mut spec_name).ok().map(|_| spec_name) - }); - let _ = remove_file(p); - r + let p = update_path("spec_name_override"); + let r = File::open(p.clone()).ok().and_then(|mut f| { + let mut spec_name = String::new(); + f.read_to_string(&mut spec_name).ok().map(|_| spec_name) + }); + let _ = remove_file(p); + r } #[cfg(windows)] fn global_cleanup() { - // We need to cleanup all sockets before spawning another Parity process. This makes sure everything is cleaned up. - // The loop is required because of internal reference counter for winsock dll. We don't know how many crates we use do - // initialize it. There's at least 2 now. - for _ in 0.. 10 { - unsafe { ::winapi::um::winsock2::WSACleanup(); } - } + // We need to cleanup all sockets before spawning another Parity process. This makes sure everything is cleaned up. + // The loop is required because of internal reference counter for winsock dll. We don't know how many crates we use do + // initialize it. There's at least 2 now. + for _ in 0..10 { + unsafe { + ::winapi::um::winsock2::WSACleanup(); + } + } } #[cfg(not(windows))] @@ -127,12 +142,12 @@ fn global_init() {} #[cfg(windows)] fn global_init() { - // When restarting in the same process this reinits windows sockets. - unsafe { - const WS_VERSION: u16 = 0x202; - let mut wsdata: ::winapi::um::winsock2::WSADATA = ::std::mem::zeroed(); - ::winapi::um::winsock2::WSAStartup(WS_VERSION, &mut wsdata); - } + // When restarting in the same process this reinits windows sockets. + unsafe { + const WS_VERSION: u16 = 0x202; + let mut wsdata: ::winapi::um::winsock2::WSADATA = ::std::mem::zeroed(); + ::winapi::um::winsock2::WSAStartup(WS_VERSION, &mut wsdata); + } } #[cfg(not(windows))] @@ -140,228 +155,246 @@ fn global_cleanup() {} // Starts parity binary installed via `parity-updater` and returns the code it exits with. fn run_parity() -> Result<(), Error> { - global_init(); + global_init(); - let prefix = vec![OsString::from("--can-restart"), OsString::from("--force-direct")]; + let prefix = vec![ + OsString::from("--can-restart"), + OsString::from("--force-direct"), + ]; - let res: Result<(), Error> = latest_exe_path() - .and_then(|exe| process::Command::new(exe) - .args(&(env::args_os().skip(1).chain(prefix.into_iter()).collect::>())) - .status() - .ok() - .map_or(Err(Error::Unknown), |es| { - match es.code() { - // Process success - Some(0) => Ok(()), - // Please restart - Some(PLEASE_RESTART_EXIT_CODE) => Err(Error::Restart), - // Process error code `c` - Some(c) => Err(Error::ExitCode(c)), - // Unknown error, couldn't determine error code - _ => Err(Error::Unknown), - } - }) - ); + let res: Result<(), Error> = latest_exe_path().and_then(|exe| { + process::Command::new(exe) + .args( + &(env::args_os() + .skip(1) + .chain(prefix.into_iter()) + .collect::>()), + ) + .status() + .ok() + .map_or(Err(Error::Unknown), |es| { + match es.code() { + // Process success + Some(0) => Ok(()), + // Please restart + Some(PLEASE_RESTART_EXIT_CODE) => Err(Error::Restart), + // Process error code `c` + Some(c) => Err(Error::ExitCode(c)), + // Unknown error, couldn't determine error code + _ => Err(Error::Unknown), + } + }) + }); - global_cleanup(); - res + global_cleanup(); + res } #[derive(Debug)] /// Status used to exit or restart the program. struct ExitStatus { - /// Whether the program panicked. - panicking: bool, - /// Whether the program should exit. - should_exit: bool, - /// Whether the program should restart. - should_restart: bool, - /// If a restart happens, whether a new chain spec should be used. - spec_name_override: Option, + /// Whether the program panicked. + panicking: bool, + /// Whether the program should exit. + should_exit: bool, + /// Whether the program should restart. + should_restart: bool, + /// If a restart happens, whether a new chain spec should be used. + spec_name_override: Option, } // Run `locally installed version` of parity (i.e, not installed via `parity-updater`) // Returns the exit error code. fn main_direct(force_can_restart: bool) -> i32 { - global_init(); + global_init(); - let mut conf = { - let args = std::env::args().collect::>(); - parity_ethereum::Configuration::parse_cli(&args).unwrap_or_else(|e| e.exit()) - }; + let mut conf = { + let args = std::env::args().collect::>(); + parity_ethereum::Configuration::parse_cli(&args).unwrap_or_else(|e| e.exit()) + }; - let logger = setup_log(&conf.logger_config()).unwrap_or_else(|e| { - eprintln!("{}", e); - process::exit(2) - }); + let logger = setup_log(&conf.logger_config()).unwrap_or_else(|e| { + eprintln!("{}", e); + process::exit(2) + }); - if let Some(spec_override) = take_spec_name_override() { - conf.args.flag_testnet = false; - conf.args.arg_chain = spec_override; - } + if let Some(spec_override) = take_spec_name_override() { + conf.args.flag_testnet = false; + conf.args.arg_chain = spec_override; + } - // FIXME: `pid_file` shouldn't need to cloned here - // see: `https://github.com/paritytech/parity-daemonize/pull/13` for more info - let handle = if let Some(pid) = conf.args.arg_daemon_pid_file.clone() { - info!("{}", Colour::Blue.paint("starting in daemon mode").to_string()); - let _ = std::io::stdout().flush(); + // FIXME: `pid_file` shouldn't need to cloned here + // see: `https://github.com/paritytech/parity-daemonize/pull/13` for more info + let handle = if let Some(pid) = conf.args.arg_daemon_pid_file.clone() { + info!( + "{}", + Colour::Blue.paint("starting in daemon mode").to_string() + ); + let _ = std::io::stdout().flush(); - match parity_daemonize::daemonize(pid) { - Ok(h) => Some(h), - Err(e) => { - error!( - "{}", - Colour::Red.paint(format!("{}", e)) - ); - return 1; - } - } - } else { - None - }; + match parity_daemonize::daemonize(pid) { + Ok(h) => Some(h), + Err(e) => { + error!("{}", Colour::Red.paint(format!("{}", e))); + return 1; + } + } + } else { + None + }; - let can_restart = force_can_restart || conf.args.flag_can_restart; + let can_restart = force_can_restart || conf.args.flag_can_restart; - // increase max number of open files - raise_fd_limit(); + // increase max number of open files + raise_fd_limit(); - let exit = Arc::new((Mutex::new(ExitStatus { - panicking: false, - should_exit: false, - should_restart: false, - spec_name_override: None - }), Condvar::new())); + let exit = Arc::new(( + Mutex::new(ExitStatus { + panicking: false, + should_exit: false, + should_restart: false, + spec_name_override: None, + }), + Condvar::new(), + )); - // Double panic can happen. So when we lock `ExitStatus` after the main thread is notified, it cannot be locked - // again. - let exiting = Arc::new(AtomicBool::new(false)); + // Double panic can happen. So when we lock `ExitStatus` after the main thread is notified, it cannot be locked + // again. + let exiting = Arc::new(AtomicBool::new(false)); - let exec = if can_restart { - start( - conf, - logger, - { - let e = exit.clone(); - let exiting = exiting.clone(); - move |new_chain: String| { - if !exiting.swap(true, Ordering::SeqCst) { - *e.0.lock() = ExitStatus { - panicking: false, - should_exit: true, - should_restart: true, - spec_name_override: Some(new_chain), - }; - e.1.notify_all(); - } - } - }, - { - let e = exit.clone(); - let exiting = exiting.clone(); - move || { - if !exiting.swap(true, Ordering::SeqCst) { - *e.0.lock() = ExitStatus { - panicking: false, - should_exit: true, - should_restart: true, - spec_name_override: None, - }; - e.1.notify_all(); - } - } - } - ) - } else { - trace!(target: "mode", "Not hypervised: not setting exit handlers."); - start(conf, logger, move |_| {}, move || {}) - }; + let exec = if can_restart { + start( + conf, + logger, + { + let e = exit.clone(); + let exiting = exiting.clone(); + move |new_chain: String| { + if !exiting.swap(true, Ordering::SeqCst) { + *e.0.lock() = ExitStatus { + panicking: false, + should_exit: true, + should_restart: true, + spec_name_override: Some(new_chain), + }; + e.1.notify_all(); + } + } + }, + { + let e = exit.clone(); + let exiting = exiting.clone(); + move || { + if !exiting.swap(true, Ordering::SeqCst) { + *e.0.lock() = ExitStatus { + panicking: false, + should_exit: true, + should_restart: true, + spec_name_override: None, + }; + e.1.notify_all(); + } + } + }, + ) + } else { + trace!(target: "mode", "Not hypervised: not setting exit handlers."); + start(conf, logger, move |_| {}, move || {}) + }; - let res = match exec { - Ok(result) => match result { - ExecutionAction::Instant(Some(s)) => { println!("{}", s); 0 }, - ExecutionAction::Instant(None) => 0, - ExecutionAction::Running(client) => { - panic_hook::set_with({ - let e = exit.clone(); - let exiting = exiting.clone(); - move |panic_msg| { - warn!("Panic occured, see stderr for details"); - eprintln!("{}", panic_msg); - if !exiting.swap(true, Ordering::SeqCst) { - *e.0.lock() = ExitStatus { - panicking: true, - should_exit: true, - should_restart: false, - spec_name_override: None, - }; - e.1.notify_all(); - } - } - }); + let res = match exec { + Ok(result) => match result { + ExecutionAction::Instant(Some(s)) => { + println!("{}", s); + 0 + } + ExecutionAction::Instant(None) => 0, + ExecutionAction::Running(client) => { + panic_hook::set_with({ + let e = exit.clone(); + let exiting = exiting.clone(); + move |panic_msg| { + warn!("Panic occured, see stderr for details"); + eprintln!("{}", panic_msg); + if !exiting.swap(true, Ordering::SeqCst) { + *e.0.lock() = ExitStatus { + panicking: true, + should_exit: true, + should_restart: false, + spec_name_override: None, + }; + e.1.notify_all(); + } + } + }); - CtrlC::set_handler({ - let e = exit.clone(); - let exiting = exiting.clone(); - move || { - if !exiting.swap(true, Ordering::SeqCst) { - *e.0.lock() = ExitStatus { - panicking: false, - should_exit: true, - should_restart: false, - spec_name_override: None, - }; - e.1.notify_all(); - } - } - }); + CtrlC::set_handler({ + let e = exit.clone(); + let exiting = exiting.clone(); + move || { + if !exiting.swap(true, Ordering::SeqCst) { + *e.0.lock() = ExitStatus { + panicking: false, + should_exit: true, + should_restart: false, + spec_name_override: None, + }; + e.1.notify_all(); + } + } + }); - // so the client has started successfully - // if this is a daemon, detach from the parent process - if let Some(mut handle) = handle { - handle.detach() - } + // so the client has started successfully + // if this is a daemon, detach from the parent process + if let Some(mut handle) = handle { + handle.detach() + } - // Wait for signal - let mut lock = exit.0.lock(); - if !lock.should_exit { - let _ = exit.1.wait(&mut lock); - } + // Wait for signal + let mut lock = exit.0.lock(); + if !lock.should_exit { + let _ = exit.1.wait(&mut lock); + } - client.shutdown(); + client.shutdown(); - if lock.should_restart { - if let Some(ref spec_name) = lock.spec_name_override { - set_spec_name_override(&spec_name.clone()); - } - PLEASE_RESTART_EXIT_CODE - } else { - if lock.panicking { - 1 - } else { - 0 - } - } - }, - }, - Err(err) => { - // error occured during start up - // if this is a daemon, detach from the parent process - if let Some(mut handle) = handle { - handle.detach_with_msg(format!("{}", Colour::Red.paint(&err))) - } - eprintln!("{}", err); - 1 - }, - }; + if lock.should_restart { + if let Some(ref spec_name) = lock.spec_name_override { + set_spec_name_override(&spec_name.clone()); + } + PLEASE_RESTART_EXIT_CODE + } else { + if lock.panicking { + 1 + } else { + 0 + } + } + } + }, + Err(err) => { + // error occured during start up + // if this is a daemon, detach from the parent process + if let Some(mut handle) = handle { + handle.detach_with_msg(format!("{}", Colour::Red.paint(&err))) + } + eprintln!("{}", err); + 1 + } + }; - global_cleanup(); - res + global_cleanup(); + res } fn println_trace_main(s: String) { - if env::var("RUST_LOG").ok().and_then(|s| s.find("main=trace")).is_some() { - println!("{}", s); - } + if env::var("RUST_LOG") + .ok() + .and_then(|s| s.find("main=trace")) + .is_some() + { + println!("{}", s); + } } macro_rules! trace_main { @@ -370,86 +403,101 @@ macro_rules! trace_main { } fn main() { - panic_hook::set_abort(); + panic_hook::set_abort(); - // the user has specified to run its originally installed binary (not via `parity-updater`) - let force_direct = std::env::args().any(|arg| arg == "--force-direct"); + // the user has specified to run its originally installed binary (not via `parity-updater`) + let force_direct = std::env::args().any(|arg| arg == "--force-direct"); - // absolute path to the current `binary` - let exe_path = std::env::current_exe().ok(); + // absolute path to the current `binary` + let exe_path = std::env::current_exe().ok(); - // the binary is named `target/xx/yy` - let development = exe_path - .as_ref() - .and_then(|p| { - p.parent() - .and_then(|p| p.parent()) - .and_then(|p| p.file_name()) - .map(|n| n == "target") - }) - .unwrap_or(false); + // the binary is named `target/xx/yy` + let development = exe_path + .as_ref() + .and_then(|p| { + p.parent() + .and_then(|p| p.parent()) + .and_then(|p| p.file_name()) + .map(|n| n == "target") + }) + .unwrap_or(false); - // the binary is named `parity` - let same_name = exe_path - .as_ref() - .map_or(false, |p| { - p.file_stem().map_or(false, |n| n == PARITY_EXECUTABLE_NAME) - }); + // the binary is named `parity` + let same_name = exe_path.as_ref().map_or(false, |p| { + p.file_stem().map_or(false, |n| n == PARITY_EXECUTABLE_NAME) + }); - trace_main!("Starting up {} (force-direct: {}, development: {}, same-name: {})", - std::env::current_exe().ok().map_or_else(|| "".into(), |x| format!("{}", x.display())), - force_direct, - development, - same_name); + trace_main!( + "Starting up {} (force-direct: {}, development: {}, same-name: {})", + std::env::current_exe() + .ok() + .map_or_else(|| "".into(), |x| format!("{}", x.display())), + force_direct, + development, + same_name + ); - if !force_direct && !development && same_name { - // Try to run the latest installed version of `parity`, - // Upon failure it falls back to the locally installed version of `parity` - // Everything run inside a loop, so we'll be able to restart from the child into a new version seamlessly. - loop { - // `Path` to the latest downloaded binary - let latest_exe = latest_exe_path().ok(); + if !force_direct && !development && same_name { + // Try to run the latest installed version of `parity`, + // Upon failure it falls back to the locally installed version of `parity` + // Everything run inside a loop, so we'll be able to restart from the child into a new version seamlessly. + loop { + // `Path` to the latest downloaded binary + let latest_exe = latest_exe_path().ok(); - // `Latest´ binary exist - let have_update = latest_exe.as_ref().map_or(false, |p| p.exists()); + // `Latest´ binary exist + let have_update = latest_exe.as_ref().map_or(false, |p| p.exists()); - // Canonicalized path to the current binary is not the same as to latest binary - let canonicalized_path_not_same = exe_path - .as_ref() - .map_or(false, |exe| latest_exe.as_ref() - .map_or(false, |lexe| exe.canonicalize().ok() != lexe.canonicalize().ok())); + // Canonicalized path to the current binary is not the same as to latest binary + let canonicalized_path_not_same = exe_path.as_ref().map_or(false, |exe| { + latest_exe.as_ref().map_or(false, |lexe| { + exe.canonicalize().ok() != lexe.canonicalize().ok() + }) + }); - // Downloaded `binary` is newer - let update_is_newer = latest_binary_is_newer(&latest_exe, &exe_path); - trace_main!("Starting... (have-update: {}, non-updated-current: {}, update-is-newer: {})", have_update, canonicalized_path_not_same, update_is_newer); + // Downloaded `binary` is newer + let update_is_newer = latest_binary_is_newer(&latest_exe, &exe_path); + trace_main!( + "Starting... (have-update: {}, non-updated-current: {}, update-is-newer: {})", + have_update, + canonicalized_path_not_same, + update_is_newer + ); - let exit_code = if have_update && canonicalized_path_not_same && update_is_newer { - trace_main!("Attempting to run latest update ({})...", - latest_exe.as_ref().expect("guarded by have_update; latest_exe must exist for have_update; qed").display()); - match run_parity() { - Ok(_) => 0, - // Restart parity - Err(Error::Restart) => PLEASE_RESTART_EXIT_CODE, - // Fall back to local version - Err(e) => { - error!(target: "updater", "Updated binary could not be executed error: {:?}. Falling back to local version", e); - main_direct(true) - } - } - } else { - trace_main!("No latest update. Attempting to direct..."); - main_direct(true) - }; - trace_main!("Latest binary exited with exit code: {}", exit_code); - if exit_code != PLEASE_RESTART_EXIT_CODE { - trace_main!("Quitting..."); - process::exit(exit_code); - } - trace!(target: "updater", "Re-running updater loop"); - } - } else { - trace_main!("Running direct"); - // Otherwise, we're presumably running the version we want. Just run and fall-through. - process::exit(main_direct(false)); - } + let exit_code = if have_update && canonicalized_path_not_same && update_is_newer { + trace_main!( + "Attempting to run latest update ({})...", + latest_exe + .as_ref() + .expect( + "guarded by have_update; latest_exe must exist for have_update; qed" + ) + .display() + ); + match run_parity() { + Ok(_) => 0, + // Restart parity + Err(Error::Restart) => PLEASE_RESTART_EXIT_CODE, + // Fall back to local version + Err(e) => { + error!(target: "updater", "Updated binary could not be executed error: {:?}. Falling back to local version", e); + main_direct(true) + } + } + } else { + trace_main!("No latest update. Attempting to direct..."); + main_direct(true) + }; + trace_main!("Latest binary exited with exit code: {}", exit_code); + if exit_code != PLEASE_RESTART_EXIT_CODE { + trace_main!("Quitting..."); + process::exit(exit_code); + } + trace!(target: "updater", "Re-running updater loop"); + } + } else { + trace_main!("Running direct"); + // Otherwise, we're presumably running the version we want. Just run and fall-through. + process::exit(main_direct(false)); + } } diff --git a/parity/modules.rs b/parity/modules.rs index 9f5d25a11..31c324a72 100644 --- a/parity/modules.rs +++ b/parity/modules.rs @@ -14,50 +14,51 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::{Arc, mpsc}; +use std::sync::{mpsc, Arc}; -use ethcore::client::BlockChainClient; -use sync::{self, AttachedProtocol, SyncConfig, NetworkConfiguration, Params, ConnectionFilter}; -use ethcore::snapshot::SnapshotService; +use ethcore::{client::BlockChainClient, snapshot::SnapshotService}; use light::Provider; +use sync::{self, AttachedProtocol, ConnectionFilter, NetworkConfiguration, Params, SyncConfig}; -pub use sync::{EthSync, SyncProvider, ManageNetwork, PrivateTxHandler}; pub use ethcore::client::ChainNotify; use ethcore_logger::Config as LogConfig; +pub use sync::{EthSync, ManageNetwork, PrivateTxHandler, SyncProvider}; pub type SyncModules = ( - Arc, - Arc, - Arc, - mpsc::Sender, + Arc, + Arc, + Arc, + mpsc::Sender, ); pub fn sync( - config: SyncConfig, - network_config: NetworkConfiguration, - chain: Arc, - snapshot_service: Arc, - private_tx_handler: Option>, - provider: Arc, - _log_settings: &LogConfig, - attached_protos: Vec, - connection_filter: Option>, + config: SyncConfig, + network_config: NetworkConfiguration, + chain: Arc, + snapshot_service: Arc, + private_tx_handler: Option>, + provider: Arc, + _log_settings: &LogConfig, + attached_protos: Vec, + connection_filter: Option>, ) -> Result { - let eth_sync = EthSync::new(Params { - config, - chain, - provider, - snapshot_service, - private_tx_handler, - network_config, - attached_protos, - }, - connection_filter)?; + let eth_sync = EthSync::new( + Params { + config, + chain, + provider, + snapshot_service, + private_tx_handler, + network_config, + attached_protos, + }, + connection_filter, + )?; - Ok(( - eth_sync.clone() as Arc, - eth_sync.clone() as Arc, - eth_sync.clone() as Arc, - eth_sync.priority_tasks() - )) + Ok(( + eth_sync.clone() as Arc, + eth_sync.clone() as Arc, + eth_sync.clone() as Arc, + eth_sync.priority_tasks(), + )) } diff --git a/parity/params.rs b/parity/params.rs index 7e99adbac..4635f2590 100644 --- a/parity/params.rs +++ b/parity/params.rs @@ -14,20 +14,21 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::HashSet; -use std::time::Duration; -use std::{str, fs, fmt}; -use std::num::NonZeroU32; +use std::{collections::HashSet, fmt, fs, num::NonZeroU32, str, time::Duration}; -use ethcore::client::Mode; -use ethcore::ethereum; -use ethcore::spec::{Spec, SpecParams}; -use ethereum_types::{U256, Address}; -use parity_runtime::Executor; +use ethcore::{ + client::Mode, + ethereum, + spec::{Spec, SpecParams}, +}; +use ethereum_types::{Address, U256}; use hash_fetch::fetch::Client as FetchClient; use journaldb::Algorithm; -use miner::gas_pricer::GasPricer; -use miner::gas_price_calibrator::{GasPriceCalibratorOptions, GasPriceCalibrator}; +use miner::{ + gas_price_calibrator::{GasPriceCalibrator, GasPriceCalibratorOptions}, + gas_pricer::GasPricer, +}; +use parity_runtime::Executor; use parity_version::version_data; use user_defaults::UserDefaults; @@ -35,475 +36,531 @@ use crate::configuration; #[derive(Debug, PartialEq)] pub enum SpecType { - Foundation, - Classic, - Poanet, - Xdai, - Volta, - Ewc, - Expanse, - Musicoin, - Ellaism, - Mix, - Callisto, - Morden, - Mordor, - Ropsten, - Kovan, - Rinkeby, - Goerli, - Kotti, - Sokol, - Dev, - Custom(String), + Foundation, + Classic, + Poanet, + Xdai, + Volta, + Ewc, + Expanse, + Musicoin, + Ellaism, + Mix, + Callisto, + Morden, + Mordor, + Ropsten, + Kovan, + Rinkeby, + Goerli, + Kotti, + Sokol, + Dev, + Custom(String), } impl Default for SpecType { - fn default() -> Self { - SpecType::Foundation - } + fn default() -> Self { + SpecType::Foundation + } } impl str::FromStr for SpecType { - type Err = String; + type Err = String; - fn from_str(s: &str) -> Result { - let spec = match s { - "eth" | "ethereum" | "foundation" | "mainnet" => SpecType::Foundation, - "etc" | "classic" => SpecType::Classic, - "poanet" | "poacore" => SpecType::Poanet, - "xdai" => SpecType::Xdai, - "volta" => SpecType::Volta, - "ewc" | "energyweb" => SpecType::Ewc, - "expanse" => SpecType::Expanse, - "musicoin" => SpecType::Musicoin, - "ellaism" => SpecType::Ellaism, - "mix" => SpecType::Mix, - "callisto" => SpecType::Callisto, - "morden" => SpecType::Morden, - "mordor" | "classic-testnet" => SpecType::Mordor, - "ropsten" => SpecType::Ropsten, - "kovan" => SpecType::Kovan, - "rinkeby" => SpecType::Rinkeby, - "goerli" | "görli" | "testnet" => SpecType::Goerli, - "kotti" => SpecType::Kotti, - "sokol" | "poasokol" => SpecType::Sokol, - "dev" => SpecType::Dev, - other => SpecType::Custom(other.into()), - }; - Ok(spec) - } + fn from_str(s: &str) -> Result { + let spec = match s { + "eth" | "ethereum" | "foundation" | "mainnet" => SpecType::Foundation, + "etc" | "classic" => SpecType::Classic, + "poanet" | "poacore" => SpecType::Poanet, + "xdai" => SpecType::Xdai, + "volta" => SpecType::Volta, + "ewc" | "energyweb" => SpecType::Ewc, + "expanse" => SpecType::Expanse, + "musicoin" => SpecType::Musicoin, + "ellaism" => SpecType::Ellaism, + "mix" => SpecType::Mix, + "callisto" => SpecType::Callisto, + "morden" => SpecType::Morden, + "mordor" | "classic-testnet" => SpecType::Mordor, + "ropsten" => SpecType::Ropsten, + "kovan" => SpecType::Kovan, + "rinkeby" => SpecType::Rinkeby, + "goerli" | "görli" | "testnet" => SpecType::Goerli, + "kotti" => SpecType::Kotti, + "sokol" | "poasokol" => SpecType::Sokol, + "dev" => SpecType::Dev, + other => SpecType::Custom(other.into()), + }; + Ok(spec) + } } impl fmt::Display for SpecType { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(match *self { - SpecType::Foundation => "foundation", - SpecType::Classic => "classic", - SpecType::Poanet => "poanet", - SpecType::Xdai => "xdai", - SpecType::Volta => "volta", - SpecType::Ewc => "energyweb", - SpecType::Expanse => "expanse", - SpecType::Musicoin => "musicoin", - SpecType::Ellaism => "ellaism", - SpecType::Mix => "mix", - SpecType::Callisto => "callisto", - SpecType::Morden => "morden", - SpecType::Mordor => "mordor", - SpecType::Ropsten => "ropsten", - SpecType::Kovan => "kovan", - SpecType::Rinkeby => "rinkeby", - SpecType::Goerli => "goerli", - SpecType::Kotti => "kotti", - SpecType::Sokol => "sokol", - SpecType::Dev => "dev", - SpecType::Custom(ref custom) => custom, - }) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(match *self { + SpecType::Foundation => "foundation", + SpecType::Classic => "classic", + SpecType::Poanet => "poanet", + SpecType::Xdai => "xdai", + SpecType::Volta => "volta", + SpecType::Ewc => "energyweb", + SpecType::Expanse => "expanse", + SpecType::Musicoin => "musicoin", + SpecType::Ellaism => "ellaism", + SpecType::Mix => "mix", + SpecType::Callisto => "callisto", + SpecType::Morden => "morden", + SpecType::Mordor => "mordor", + SpecType::Ropsten => "ropsten", + SpecType::Kovan => "kovan", + SpecType::Rinkeby => "rinkeby", + SpecType::Goerli => "goerli", + SpecType::Kotti => "kotti", + SpecType::Sokol => "sokol", + SpecType::Dev => "dev", + SpecType::Custom(ref custom) => custom, + }) + } } impl SpecType { - pub fn spec<'a, T: Into>>(&self, params: T) -> Result { - let params = params.into(); - match *self { - SpecType::Foundation => Ok(ethereum::new_foundation(params)), - SpecType::Classic => Ok(ethereum::new_classic(params)), - SpecType::Poanet => Ok(ethereum::new_poanet(params)), - SpecType::Xdai => Ok(ethereum::new_xdai(params)), - SpecType::Volta => Ok(ethereum::new_volta(params)), - SpecType::Ewc => Ok(ethereum::new_ewc(params)), - SpecType::Expanse => Ok(ethereum::new_expanse(params)), - SpecType::Musicoin => Ok(ethereum::new_musicoin(params)), - SpecType::Ellaism => Ok(ethereum::new_ellaism(params)), - SpecType::Mix => Ok(ethereum::new_mix(params)), - SpecType::Callisto => Ok(ethereum::new_callisto(params)), - SpecType::Morden => Ok(ethereum::new_morden(params)), - SpecType::Mordor => Ok(ethereum::new_mordor(params)), - SpecType::Ropsten => Ok(ethereum::new_ropsten(params)), - SpecType::Kovan => Ok(ethereum::new_kovan(params)), - SpecType::Rinkeby => Ok(ethereum::new_rinkeby(params)), - SpecType::Goerli => Ok(ethereum::new_goerli(params)), - SpecType::Kotti => Ok(ethereum::new_kotti(params)), - SpecType::Sokol => Ok(ethereum::new_sokol(params)), - SpecType::Dev => Ok(Spec::new_instant()), - SpecType::Custom(ref filename) => { - let file = fs::File::open(filename).map_err(|e| format!("Could not load specification file at {}: {}", filename, e))?; - Spec::load(params, file) - } - } - } + pub fn spec<'a, T: Into>>(&self, params: T) -> Result { + let params = params.into(); + match *self { + SpecType::Foundation => Ok(ethereum::new_foundation(params)), + SpecType::Classic => Ok(ethereum::new_classic(params)), + SpecType::Poanet => Ok(ethereum::new_poanet(params)), + SpecType::Xdai => Ok(ethereum::new_xdai(params)), + SpecType::Volta => Ok(ethereum::new_volta(params)), + SpecType::Ewc => Ok(ethereum::new_ewc(params)), + SpecType::Expanse => Ok(ethereum::new_expanse(params)), + SpecType::Musicoin => Ok(ethereum::new_musicoin(params)), + SpecType::Ellaism => Ok(ethereum::new_ellaism(params)), + SpecType::Mix => Ok(ethereum::new_mix(params)), + SpecType::Callisto => Ok(ethereum::new_callisto(params)), + SpecType::Morden => Ok(ethereum::new_morden(params)), + SpecType::Mordor => Ok(ethereum::new_mordor(params)), + SpecType::Ropsten => Ok(ethereum::new_ropsten(params)), + SpecType::Kovan => Ok(ethereum::new_kovan(params)), + SpecType::Rinkeby => Ok(ethereum::new_rinkeby(params)), + SpecType::Goerli => Ok(ethereum::new_goerli(params)), + SpecType::Kotti => Ok(ethereum::new_kotti(params)), + SpecType::Sokol => Ok(ethereum::new_sokol(params)), + SpecType::Dev => Ok(Spec::new_instant()), + SpecType::Custom(ref filename) => { + let file = fs::File::open(filename).map_err(|e| { + format!("Could not load specification file at {}: {}", filename, e) + })?; + Spec::load(params, file) + } + } + } - pub fn legacy_fork_name(&self) -> Option { - match *self { - SpecType::Classic => Some("classic".to_owned()), - SpecType::Expanse => Some("expanse".to_owned()), - SpecType::Musicoin => Some("musicoin".to_owned()), - _ => None, - } - } + pub fn legacy_fork_name(&self) -> Option { + match *self { + SpecType::Classic => Some("classic".to_owned()), + SpecType::Expanse => Some("expanse".to_owned()), + SpecType::Musicoin => Some("musicoin".to_owned()), + _ => None, + } + } } #[derive(Debug, PartialEq)] pub enum Pruning { - Specific(Algorithm), - Auto, + Specific(Algorithm), + Auto, } impl Default for Pruning { - fn default() -> Self { - Pruning::Auto - } + fn default() -> Self { + Pruning::Auto + } } impl str::FromStr for Pruning { - type Err = String; + type Err = String; - fn from_str(s: &str) -> Result { - match s { - "auto" => Ok(Pruning::Auto), - other => other.parse().map(Pruning::Specific), - } - } + fn from_str(s: &str) -> Result { + match s { + "auto" => Ok(Pruning::Auto), + other => other.parse().map(Pruning::Specific), + } + } } impl Pruning { - pub fn to_algorithm(&self, user_defaults: &UserDefaults) -> Algorithm { - match *self { - Pruning::Specific(algo) => algo, - Pruning::Auto => user_defaults.pruning, - } - } + pub fn to_algorithm(&self, user_defaults: &UserDefaults) -> Algorithm { + match *self { + Pruning::Specific(algo) => algo, + Pruning::Auto => user_defaults.pruning, + } + } } #[derive(Debug, PartialEq)] pub struct ResealPolicy { - pub own: bool, - pub external: bool, + pub own: bool, + pub external: bool, } impl Default for ResealPolicy { - fn default() -> Self { - ResealPolicy { - own: true, - external: true, - } - } + fn default() -> Self { + ResealPolicy { + own: true, + external: true, + } + } } impl str::FromStr for ResealPolicy { - type Err = String; + type Err = String; - fn from_str(s: &str) -> Result { - let (own, external) = match s { - "none" => (false, false), - "own" => (true, false), - "ext" => (false, true), - "all" => (true, true), - x => return Err(format!("Invalid reseal value: {}", x)), - }; + fn from_str(s: &str) -> Result { + let (own, external) = match s { + "none" => (false, false), + "own" => (true, false), + "ext" => (false, true), + "all" => (true, true), + x => return Err(format!("Invalid reseal value: {}", x)), + }; - let reseal = ResealPolicy { - own: own, - external: external, - }; + let reseal = ResealPolicy { + own: own, + external: external, + }; - Ok(reseal) - } + Ok(reseal) + } } #[derive(Debug, PartialEq)] pub struct AccountsConfig { - pub iterations: NonZeroU32, - pub refresh_time: u64, - pub testnet: bool, - pub password_files: Vec, - pub unlocked_accounts: Vec
, - pub enable_hardware_wallets: bool, - pub enable_fast_unlock: bool, + pub iterations: NonZeroU32, + pub refresh_time: u64, + pub testnet: bool, + pub password_files: Vec, + pub unlocked_accounts: Vec
, + pub enable_hardware_wallets: bool, + pub enable_fast_unlock: bool, } impl Default for AccountsConfig { - fn default() -> Self { - AccountsConfig { - iterations: NonZeroU32::new(10240).expect("10240 > 0; qed"), - refresh_time: 5, - testnet: false, - password_files: Vec::new(), - unlocked_accounts: Vec::new(), - enable_hardware_wallets: true, - enable_fast_unlock: false, - } - } + fn default() -> Self { + AccountsConfig { + iterations: NonZeroU32::new(10240).expect("10240 > 0; qed"), + refresh_time: 5, + testnet: false, + password_files: Vec::new(), + unlocked_accounts: Vec::new(), + enable_hardware_wallets: true, + enable_fast_unlock: false, + } + } } #[derive(Debug, PartialEq)] pub enum GasPricerConfig { - Fixed(U256), - Calibrated { - usd_per_tx: f32, - recalibration_period: Duration, - api_endpoint: String - } + Fixed(U256), + Calibrated { + usd_per_tx: f32, + recalibration_period: Duration, + api_endpoint: String, + }, } impl Default for GasPricerConfig { - fn default() -> Self { - GasPricerConfig::Calibrated { - usd_per_tx: 0.0001f32, - recalibration_period: Duration::from_secs(3600), - api_endpoint: configuration::ETHERSCAN_ETH_PRICE_ENDPOINT.to_string(), - } - } + fn default() -> Self { + GasPricerConfig::Calibrated { + usd_per_tx: 0.0001f32, + recalibration_period: Duration::from_secs(3600), + api_endpoint: configuration::ETHERSCAN_ETH_PRICE_ENDPOINT.to_string(), + } + } } impl GasPricerConfig { - pub fn to_gas_pricer(&self, fetch: FetchClient, p: Executor) -> GasPricer { - match *self { - GasPricerConfig::Fixed(u) => GasPricer::Fixed(u), - GasPricerConfig::Calibrated { usd_per_tx, recalibration_period, ref api_endpoint } => { - GasPricer::new_calibrated( - GasPriceCalibrator::new( - GasPriceCalibratorOptions { - usd_per_tx: usd_per_tx, - recalibration_period: recalibration_period, - }, - fetch, - p, - api_endpoint.clone(), - ) - ) - } - } - } + pub fn to_gas_pricer(&self, fetch: FetchClient, p: Executor) -> GasPricer { + match *self { + GasPricerConfig::Fixed(u) => GasPricer::Fixed(u), + GasPricerConfig::Calibrated { + usd_per_tx, + recalibration_period, + ref api_endpoint, + } => GasPricer::new_calibrated(GasPriceCalibrator::new( + GasPriceCalibratorOptions { + usd_per_tx: usd_per_tx, + recalibration_period: recalibration_period, + }, + fetch, + p, + api_endpoint.clone(), + )), + } + } } #[derive(Debug, PartialEq)] pub struct MinerExtras { - pub author: Address, - pub engine_signer: Address, - pub extra_data: Vec, - pub gas_range_target: (U256, U256), - pub work_notify: Vec, - pub local_accounts: HashSet
, + pub author: Address, + pub engine_signer: Address, + pub extra_data: Vec, + pub gas_range_target: (U256, U256), + pub work_notify: Vec, + pub local_accounts: HashSet
, } impl Default for MinerExtras { - fn default() -> Self { - MinerExtras { - author: Default::default(), - engine_signer: Default::default(), - extra_data: version_data(), - gas_range_target: (8_000_000.into(), 10_000_000.into()), - work_notify: Default::default(), - local_accounts: Default::default(), - } - } + fn default() -> Self { + MinerExtras { + author: Default::default(), + engine_signer: Default::default(), + extra_data: version_data(), + gas_range_target: (8_000_000.into(), 10_000_000.into()), + work_notify: Default::default(), + local_accounts: Default::default(), + } + } } /// 3-value enum. #[derive(Debug, Clone, Copy, PartialEq)] pub enum Switch { - /// True. - On, - /// False. - Off, - /// Auto. - Auto, + /// True. + On, + /// False. + Off, + /// Auto. + Auto, } impl Default for Switch { - fn default() -> Self { - Switch::Auto - } + fn default() -> Self { + Switch::Auto + } } impl str::FromStr for Switch { - type Err = String; + type Err = String; - fn from_str(s: &str) -> Result { - match s { - "on" => Ok(Switch::On), - "off" => Ok(Switch::Off), - "auto" => Ok(Switch::Auto), - other => Err(format!("Invalid switch value: {}", other)) - } - } + fn from_str(s: &str) -> Result { + match s { + "on" => Ok(Switch::On), + "off" => Ok(Switch::Off), + "auto" => Ok(Switch::Auto), + other => Err(format!("Invalid switch value: {}", other)), + } + } } -pub fn tracing_switch_to_bool(switch: Switch, user_defaults: &UserDefaults) -> Result { - match (user_defaults.is_first_launch, switch, user_defaults.tracing) { - (false, Switch::On, false) => Err("TraceDB resync required".into()), - (_, Switch::On, _) => Ok(true), - (_, Switch::Off, _) => Ok(false), - (_, Switch::Auto, def) => Ok(def), - } +pub fn tracing_switch_to_bool( + switch: Switch, + user_defaults: &UserDefaults, +) -> Result { + match (user_defaults.is_first_launch, switch, user_defaults.tracing) { + (false, Switch::On, false) => Err("TraceDB resync required".into()), + (_, Switch::On, _) => Ok(true), + (_, Switch::Off, _) => Ok(false), + (_, Switch::Auto, def) => Ok(def), + } } -pub fn fatdb_switch_to_bool(switch: Switch, user_defaults: &UserDefaults, _algorithm: Algorithm) -> Result { - let result = match (user_defaults.is_first_launch, switch, user_defaults.fat_db) { - (false, Switch::On, false) => Err("FatDB resync required".into()), - (_, Switch::On, _) => Ok(true), - (_, Switch::Off, _) => Ok(false), - (_, Switch::Auto, def) => Ok(def), - }; - result +pub fn fatdb_switch_to_bool( + switch: Switch, + user_defaults: &UserDefaults, + _algorithm: Algorithm, +) -> Result { + let result = match (user_defaults.is_first_launch, switch, user_defaults.fat_db) { + (false, Switch::On, false) => Err("FatDB resync required".into()), + (_, Switch::On, _) => Ok(true), + (_, Switch::Off, _) => Ok(false), + (_, Switch::Auto, def) => Ok(def), + }; + result } -pub fn mode_switch_to_bool(switch: Option, user_defaults: &UserDefaults) -> Result { - Ok(switch.unwrap_or(user_defaults.mode().clone())) +pub fn mode_switch_to_bool( + switch: Option, + user_defaults: &UserDefaults, +) -> Result { + Ok(switch.unwrap_or(user_defaults.mode().clone())) } #[cfg(test)] mod tests { - use journaldb::Algorithm; - use user_defaults::UserDefaults; - use super::{SpecType, Pruning, ResealPolicy, Switch, tracing_switch_to_bool}; + use super::{tracing_switch_to_bool, Pruning, ResealPolicy, SpecType, Switch}; + use journaldb::Algorithm; + use user_defaults::UserDefaults; - #[test] - fn test_spec_type_parsing() { - assert_eq!(SpecType::Foundation, "eth".parse().unwrap()); - assert_eq!(SpecType::Foundation, "ethereum".parse().unwrap()); - assert_eq!(SpecType::Foundation, "foundation".parse().unwrap()); - assert_eq!(SpecType::Foundation, "mainnet".parse().unwrap()); - assert_eq!(SpecType::Classic, "etc".parse().unwrap()); - assert_eq!(SpecType::Classic, "classic".parse().unwrap()); - assert_eq!(SpecType::Poanet, "poanet".parse().unwrap()); - assert_eq!(SpecType::Poanet, "poacore".parse().unwrap()); - assert_eq!(SpecType::Xdai, "xdai".parse().unwrap()); - assert_eq!(SpecType::Volta, "volta".parse().unwrap()); - assert_eq!(SpecType::Ewc, "ewc".parse().unwrap()); - assert_eq!(SpecType::Ewc, "energyweb".parse().unwrap()); - assert_eq!(SpecType::Expanse, "expanse".parse().unwrap()); - assert_eq!(SpecType::Musicoin, "musicoin".parse().unwrap()); - assert_eq!(SpecType::Ellaism, "ellaism".parse().unwrap()); - assert_eq!(SpecType::Mix, "mix".parse().unwrap()); - assert_eq!(SpecType::Callisto, "callisto".parse().unwrap()); - assert_eq!(SpecType::Morden, "morden".parse().unwrap()); - assert_eq!(SpecType::Mordor, "mordor".parse().unwrap()); - assert_eq!(SpecType::Mordor, "classic-testnet".parse().unwrap()); - assert_eq!(SpecType::Ropsten, "ropsten".parse().unwrap()); - assert_eq!(SpecType::Kovan, "kovan".parse().unwrap()); - assert_eq!(SpecType::Rinkeby, "rinkeby".parse().unwrap()); - assert_eq!(SpecType::Goerli, "goerli".parse().unwrap()); - assert_eq!(SpecType::Goerli, "görli".parse().unwrap()); - assert_eq!(SpecType::Goerli, "testnet".parse().unwrap()); - assert_eq!(SpecType::Kotti, "kotti".parse().unwrap()); - assert_eq!(SpecType::Sokol, "sokol".parse().unwrap()); - assert_eq!(SpecType::Sokol, "poasokol".parse().unwrap()); - } + #[test] + fn test_spec_type_parsing() { + assert_eq!(SpecType::Foundation, "eth".parse().unwrap()); + assert_eq!(SpecType::Foundation, "ethereum".parse().unwrap()); + assert_eq!(SpecType::Foundation, "foundation".parse().unwrap()); + assert_eq!(SpecType::Foundation, "mainnet".parse().unwrap()); + assert_eq!(SpecType::Classic, "etc".parse().unwrap()); + assert_eq!(SpecType::Classic, "classic".parse().unwrap()); + assert_eq!(SpecType::Poanet, "poanet".parse().unwrap()); + assert_eq!(SpecType::Poanet, "poacore".parse().unwrap()); + assert_eq!(SpecType::Xdai, "xdai".parse().unwrap()); + assert_eq!(SpecType::Volta, "volta".parse().unwrap()); + assert_eq!(SpecType::Ewc, "ewc".parse().unwrap()); + assert_eq!(SpecType::Ewc, "energyweb".parse().unwrap()); + assert_eq!(SpecType::Expanse, "expanse".parse().unwrap()); + assert_eq!(SpecType::Musicoin, "musicoin".parse().unwrap()); + assert_eq!(SpecType::Ellaism, "ellaism".parse().unwrap()); + assert_eq!(SpecType::Mix, "mix".parse().unwrap()); + assert_eq!(SpecType::Callisto, "callisto".parse().unwrap()); + assert_eq!(SpecType::Morden, "morden".parse().unwrap()); + assert_eq!(SpecType::Mordor, "mordor".parse().unwrap()); + assert_eq!(SpecType::Mordor, "classic-testnet".parse().unwrap()); + assert_eq!(SpecType::Ropsten, "ropsten".parse().unwrap()); + assert_eq!(SpecType::Kovan, "kovan".parse().unwrap()); + assert_eq!(SpecType::Rinkeby, "rinkeby".parse().unwrap()); + assert_eq!(SpecType::Goerli, "goerli".parse().unwrap()); + assert_eq!(SpecType::Goerli, "görli".parse().unwrap()); + assert_eq!(SpecType::Goerli, "testnet".parse().unwrap()); + assert_eq!(SpecType::Kotti, "kotti".parse().unwrap()); + assert_eq!(SpecType::Sokol, "sokol".parse().unwrap()); + assert_eq!(SpecType::Sokol, "poasokol".parse().unwrap()); + } - #[test] - fn test_spec_type_default() { - assert_eq!(SpecType::Foundation, SpecType::default()); - } + #[test] + fn test_spec_type_default() { + assert_eq!(SpecType::Foundation, SpecType::default()); + } - #[test] - fn test_spec_type_display() { - assert_eq!(format!("{}", SpecType::Foundation), "foundation"); - assert_eq!(format!("{}", SpecType::Classic), "classic"); - assert_eq!(format!("{}", SpecType::Poanet), "poanet"); - assert_eq!(format!("{}", SpecType::Xdai), "xdai"); - assert_eq!(format!("{}", SpecType::Volta), "volta"); - assert_eq!(format!("{}", SpecType::Ewc), "energyweb"); - assert_eq!(format!("{}", SpecType::Expanse), "expanse"); - assert_eq!(format!("{}", SpecType::Musicoin), "musicoin"); - assert_eq!(format!("{}", SpecType::Ellaism), "ellaism"); - assert_eq!(format!("{}", SpecType::Mix), "mix"); - assert_eq!(format!("{}", SpecType::Callisto), "callisto"); - assert_eq!(format!("{}", SpecType::Morden), "morden"); - assert_eq!(format!("{}", SpecType::Mordor), "mordor"); - assert_eq!(format!("{}", SpecType::Ropsten), "ropsten"); - assert_eq!(format!("{}", SpecType::Kovan), "kovan"); - assert_eq!(format!("{}", SpecType::Rinkeby), "rinkeby"); - assert_eq!(format!("{}", SpecType::Goerli), "goerli"); - assert_eq!(format!("{}", SpecType::Kotti), "kotti"); - assert_eq!(format!("{}", SpecType::Sokol), "sokol"); - assert_eq!(format!("{}", SpecType::Dev), "dev"); - assert_eq!(format!("{}", SpecType::Custom("foo/bar".into())), "foo/bar"); - } + #[test] + fn test_spec_type_display() { + assert_eq!(format!("{}", SpecType::Foundation), "foundation"); + assert_eq!(format!("{}", SpecType::Classic), "classic"); + assert_eq!(format!("{}", SpecType::Poanet), "poanet"); + assert_eq!(format!("{}", SpecType::Xdai), "xdai"); + assert_eq!(format!("{}", SpecType::Volta), "volta"); + assert_eq!(format!("{}", SpecType::Ewc), "energyweb"); + assert_eq!(format!("{}", SpecType::Expanse), "expanse"); + assert_eq!(format!("{}", SpecType::Musicoin), "musicoin"); + assert_eq!(format!("{}", SpecType::Ellaism), "ellaism"); + assert_eq!(format!("{}", SpecType::Mix), "mix"); + assert_eq!(format!("{}", SpecType::Callisto), "callisto"); + assert_eq!(format!("{}", SpecType::Morden), "morden"); + assert_eq!(format!("{}", SpecType::Mordor), "mordor"); + assert_eq!(format!("{}", SpecType::Ropsten), "ropsten"); + assert_eq!(format!("{}", SpecType::Kovan), "kovan"); + assert_eq!(format!("{}", SpecType::Rinkeby), "rinkeby"); + assert_eq!(format!("{}", SpecType::Goerli), "goerli"); + assert_eq!(format!("{}", SpecType::Kotti), "kotti"); + assert_eq!(format!("{}", SpecType::Sokol), "sokol"); + assert_eq!(format!("{}", SpecType::Dev), "dev"); + assert_eq!(format!("{}", SpecType::Custom("foo/bar".into())), "foo/bar"); + } - #[test] - fn test_pruning_parsing() { - assert_eq!(Pruning::Auto, "auto".parse().unwrap()); - assert_eq!(Pruning::Specific(Algorithm::Archive), "archive".parse().unwrap()); - assert_eq!(Pruning::Specific(Algorithm::EarlyMerge), "light".parse().unwrap()); - assert_eq!(Pruning::Specific(Algorithm::OverlayRecent), "fast".parse().unwrap()); - assert_eq!(Pruning::Specific(Algorithm::RefCounted), "basic".parse().unwrap()); - } + #[test] + fn test_pruning_parsing() { + assert_eq!(Pruning::Auto, "auto".parse().unwrap()); + assert_eq!( + Pruning::Specific(Algorithm::Archive), + "archive".parse().unwrap() + ); + assert_eq!( + Pruning::Specific(Algorithm::EarlyMerge), + "light".parse().unwrap() + ); + assert_eq!( + Pruning::Specific(Algorithm::OverlayRecent), + "fast".parse().unwrap() + ); + assert_eq!( + Pruning::Specific(Algorithm::RefCounted), + "basic".parse().unwrap() + ); + } - #[test] - fn test_pruning_default() { - assert_eq!(Pruning::Auto, Pruning::default()); - } + #[test] + fn test_pruning_default() { + assert_eq!(Pruning::Auto, Pruning::default()); + } - #[test] - fn test_reseal_policy_parsing() { - let none = ResealPolicy { own: false, external: false }; - let own = ResealPolicy { own: true, external: false }; - let ext = ResealPolicy { own: false, external: true }; - let all = ResealPolicy { own: true, external: true }; - assert_eq!(none, "none".parse().unwrap()); - assert_eq!(own, "own".parse().unwrap()); - assert_eq!(ext, "ext".parse().unwrap()); - assert_eq!(all, "all".parse().unwrap()); - } + #[test] + fn test_reseal_policy_parsing() { + let none = ResealPolicy { + own: false, + external: false, + }; + let own = ResealPolicy { + own: true, + external: false, + }; + let ext = ResealPolicy { + own: false, + external: true, + }; + let all = ResealPolicy { + own: true, + external: true, + }; + assert_eq!(none, "none".parse().unwrap()); + assert_eq!(own, "own".parse().unwrap()); + assert_eq!(ext, "ext".parse().unwrap()); + assert_eq!(all, "all".parse().unwrap()); + } - #[test] - fn test_reseal_policy_default() { - let all = ResealPolicy { own: true, external: true }; - assert_eq!(all, ResealPolicy::default()); - } + #[test] + fn test_reseal_policy_default() { + let all = ResealPolicy { + own: true, + external: true, + }; + assert_eq!(all, ResealPolicy::default()); + } - #[test] - fn test_switch_parsing() { - assert_eq!(Switch::On, "on".parse().unwrap()); - assert_eq!(Switch::Off, "off".parse().unwrap()); - assert_eq!(Switch::Auto, "auto".parse().unwrap()); - } + #[test] + fn test_switch_parsing() { + assert_eq!(Switch::On, "on".parse().unwrap()); + assert_eq!(Switch::Off, "off".parse().unwrap()); + assert_eq!(Switch::Auto, "auto".parse().unwrap()); + } - #[test] - fn test_switch_default() { - assert_eq!(Switch::default(), Switch::Auto); - } + #[test] + fn test_switch_default() { + assert_eq!(Switch::default(), Switch::Auto); + } - fn user_defaults_with_tracing(first_launch: bool, tracing: bool) -> UserDefaults { - let mut ud = UserDefaults::default(); - ud.is_first_launch = first_launch; - ud.tracing = tracing; - ud - } + fn user_defaults_with_tracing(first_launch: bool, tracing: bool) -> UserDefaults { + let mut ud = UserDefaults::default(); + ud.is_first_launch = first_launch; + ud.tracing = tracing; + ud + } - #[test] - fn test_switch_to_bool() { - assert!(!tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(true, true)).unwrap()); - assert!(!tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(true, false)).unwrap()); - assert!(!tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(false, true)).unwrap()); - assert!(!tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(false, false)).unwrap()); + #[test] + fn test_switch_to_bool() { + assert!( + !tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(true, true)).unwrap() + ); + assert!( + !tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(true, false)).unwrap() + ); + assert!( + !tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(false, true)).unwrap() + ); + assert!( + !tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(false, false)) + .unwrap() + ); - assert!(tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(true, true)).unwrap()); - assert!(tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(true, false)).unwrap()); - assert!(tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(false, true)).unwrap()); - assert!(tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(false, false)).is_err()); - } + assert!( + tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(true, true)).unwrap() + ); + assert!( + tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(true, false)).unwrap() + ); + assert!( + tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(false, true)).unwrap() + ); + assert!( + tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(false, false)).is_err() + ); + } } diff --git a/parity/presale.rs b/parity/presale.rs index 162d149b5..82c202ee1 100644 --- a/parity/presale.rs +++ b/parity/presale.rs @@ -14,45 +14,46 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . - use ethkey::Password; use ethstore::PresaleWallet; -use helpers::{password_prompt, password_from_file}; +use helpers::{password_from_file, password_prompt}; use params::SpecType; use std::num::NonZeroU32; #[derive(Debug, PartialEq)] pub struct ImportWallet { - pub iterations: NonZeroU32, - pub path: String, - pub spec: SpecType, - pub wallet_path: String, - pub password_file: Option, + pub iterations: NonZeroU32, + pub path: String, + pub spec: SpecType, + pub wallet_path: String, + pub password_file: Option, } pub fn execute(cmd: ImportWallet) -> Result { - let password = match cmd.password_file.clone() { - Some(file) => password_from_file(file)?, - None => password_prompt()?, - }; + let password = match cmd.password_file.clone() { + Some(file) => password_from_file(file)?, + None => password_prompt()?, + }; - let wallet = PresaleWallet::open(cmd.wallet_path.clone()).map_err(|_| "Unable to open presale wallet.")?; - let kp = wallet.decrypt(&password).map_err(|_| "Invalid password.")?; - let address = kp.address(); - import_account(&cmd, kp, password); - Ok(format!("{:?}", address)) + let wallet = PresaleWallet::open(cmd.wallet_path.clone()) + .map_err(|_| "Unable to open presale wallet.")?; + let kp = wallet.decrypt(&password).map_err(|_| "Invalid password.")?; + let address = kp.address(); + import_account(&cmd, kp, password); + Ok(format!("{:?}", address)) } #[cfg(feature = "accounts")] pub fn import_account(cmd: &ImportWallet, kp: ethkey::KeyPair, password: Password) { - use accounts::{AccountProvider, AccountProviderSettings}; - use ethstore::EthStore; - use ethstore::accounts_dir::RootDiskDirectory; + use accounts::{AccountProvider, AccountProviderSettings}; + use ethstore::{accounts_dir::RootDiskDirectory, EthStore}; - let dir = Box::new(RootDiskDirectory::create(cmd.path.clone()).unwrap()); - let secret_store = Box::new(EthStore::open_with_iterations(dir, cmd.iterations).unwrap()); - let acc_provider = AccountProvider::new(secret_store, AccountProviderSettings::default()); - acc_provider.insert_account(kp.secret().clone(), &password).unwrap(); + let dir = Box::new(RootDiskDirectory::create(cmd.path.clone()).unwrap()); + let secret_store = Box::new(EthStore::open_with_iterations(dir, cmd.iterations).unwrap()); + let acc_provider = AccountProvider::new(secret_store, AccountProviderSettings::default()); + acc_provider + .insert_account(kp.secret().clone(), &password) + .unwrap(); } #[cfg(not(feature = "accounts"))] diff --git a/parity/rpc.rs b/parity/rpc.rs index 4a5b0a205..561a3ede2 100644 --- a/parity/rpc.rs +++ b/parity/rpc.rs @@ -14,186 +14,196 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::io; -use std::sync::Arc; -use std::path::PathBuf; -use std::collections::HashSet; +use std::{collections::HashSet, io, path::PathBuf, sync::Arc}; -use dir::default_data_path; -use dir::helpers::replace_home; +use dir::{default_data_path, helpers::replace_home}; use helpers::parity_ipc_path; use jsonrpc_core::MetaIoHandler; +use parity_rpc::{ + self as rpc, + informant::{Middleware, RpcStats}, + DomainsValidation, Metadata, +}; use parity_runtime::Executor; -use parity_rpc::informant::{RpcStats, Middleware}; -use parity_rpc::{self as rpc, Metadata, DomainsValidation}; use rpc_apis::{self, ApiSet}; -pub use parity_rpc::{IpcServer, HttpServer, RequestMiddleware}; +pub use parity_rpc::{HttpServer, IpcServer, RequestMiddleware}; //pub use parity_rpc::ws::Server as WsServer; -pub use parity_rpc::ws::{Server as WsServer, ws}; +pub use parity_rpc::ws::{ws, Server as WsServer}; pub const DAPPS_DOMAIN: &'static str = "web3.site"; #[derive(Debug, Clone, PartialEq)] pub struct HttpConfiguration { - pub enabled: bool, - pub interface: String, - pub port: u16, - pub apis: ApiSet, - pub cors: Option>, - pub hosts: Option>, - pub server_threads: usize, - pub processing_threads: usize, - pub max_payload: usize, - pub keep_alive: bool, + pub enabled: bool, + pub interface: String, + pub port: u16, + pub apis: ApiSet, + pub cors: Option>, + pub hosts: Option>, + pub server_threads: usize, + pub processing_threads: usize, + pub max_payload: usize, + pub keep_alive: bool, } impl Default for HttpConfiguration { - fn default() -> Self { - HttpConfiguration { - enabled: true, - interface: "127.0.0.1".into(), - port: 8545, - apis: ApiSet::UnsafeContext, - cors: Some(vec![]), - hosts: Some(vec![]), - server_threads: 1, - processing_threads: 4, - max_payload: 5, - keep_alive: true, - } - } + fn default() -> Self { + HttpConfiguration { + enabled: true, + interface: "127.0.0.1".into(), + port: 8545, + apis: ApiSet::UnsafeContext, + cors: Some(vec![]), + hosts: Some(vec![]), + server_threads: 1, + processing_threads: 4, + max_payload: 5, + keep_alive: true, + } + } } #[derive(Debug, PartialEq)] pub struct IpcConfiguration { - pub enabled: bool, - pub socket_addr: String, - pub apis: ApiSet, + pub enabled: bool, + pub socket_addr: String, + pub apis: ApiSet, } impl Default for IpcConfiguration { - fn default() -> Self { - IpcConfiguration { - enabled: true, - socket_addr: if cfg!(windows) { - r"\\.\pipe\jsonrpc.ipc".into() - } else { - let data_dir = ::dir::default_data_path(); - parity_ipc_path(&data_dir, "$BASE/jsonrpc.ipc", 0) - }, - apis: ApiSet::IpcContext, - } - } + fn default() -> Self { + IpcConfiguration { + enabled: true, + socket_addr: if cfg!(windows) { + r"\\.\pipe\jsonrpc.ipc".into() + } else { + let data_dir = ::dir::default_data_path(); + parity_ipc_path(&data_dir, "$BASE/jsonrpc.ipc", 0) + }, + apis: ApiSet::IpcContext, + } + } } #[derive(Debug, Clone, PartialEq)] pub struct WsConfiguration { - pub enabled: bool, - pub interface: String, - pub port: u16, - pub apis: ApiSet, - pub max_connections: usize, - pub origins: Option>, - pub hosts: Option>, - pub signer_path: PathBuf, - pub support_token_api: bool, + pub enabled: bool, + pub interface: String, + pub port: u16, + pub apis: ApiSet, + pub max_connections: usize, + pub origins: Option>, + pub hosts: Option>, + pub signer_path: PathBuf, + pub support_token_api: bool, } impl Default for WsConfiguration { - fn default() -> Self { - let data_dir = default_data_path(); - WsConfiguration { - enabled: true, - interface: "127.0.0.1".into(), - port: 8546, - apis: ApiSet::UnsafeContext, - max_connections: 100, - origins: Some(vec!["parity://*".into(),"chrome-extension://*".into(), "moz-extension://*".into()]), - hosts: Some(Vec::new()), - signer_path: replace_home(&data_dir, "$BASE/signer").into(), - support_token_api: true, - } - } + fn default() -> Self { + let data_dir = default_data_path(); + WsConfiguration { + enabled: true, + interface: "127.0.0.1".into(), + port: 8546, + apis: ApiSet::UnsafeContext, + max_connections: 100, + origins: Some(vec![ + "parity://*".into(), + "chrome-extension://*".into(), + "moz-extension://*".into(), + ]), + hosts: Some(Vec::new()), + signer_path: replace_home(&data_dir, "$BASE/signer").into(), + support_token_api: true, + } + } } impl WsConfiguration { - pub fn address(&self) -> Option { - address(self.enabled, &self.interface, self.port, &self.hosts) - } + pub fn address(&self) -> Option { + address(self.enabled, &self.interface, self.port, &self.hosts) + } } -fn address(enabled: bool, bind_iface: &str, bind_port: u16, hosts: &Option>) -> Option { - if !enabled { - return None; - } +fn address( + enabled: bool, + bind_iface: &str, + bind_port: u16, + hosts: &Option>, +) -> Option { + if !enabled { + return None; + } - match *hosts { - Some(ref hosts) if !hosts.is_empty() => Some(hosts[0].clone().into()), - _ => Some(format!("{}:{}", bind_iface, bind_port).into()), - } + match *hosts { + Some(ref hosts) if !hosts.is_empty() => Some(hosts[0].clone().into()), + _ => Some(format!("{}:{}", bind_iface, bind_port).into()), + } } pub struct Dependencies { - pub apis: Arc, - pub executor: Executor, - pub stats: Arc, + pub apis: Arc, + pub executor: Executor, + pub stats: Arc, } pub fn new_ws( - conf: WsConfiguration, - deps: &Dependencies, + conf: WsConfiguration, + deps: &Dependencies, ) -> Result, String> { - if !conf.enabled { - return Ok(None); - } + if !conf.enabled { + return Ok(None); + } - let domain = DAPPS_DOMAIN; - let url = format!("{}:{}", conf.interface, conf.port); - let addr = url.parse().map_err(|_| format!("Invalid WebSockets listen host/port given: {}", url))?; + let domain = DAPPS_DOMAIN; + let url = format!("{}:{}", conf.interface, conf.port); + let addr = url + .parse() + .map_err(|_| format!("Invalid WebSockets listen host/port given: {}", url))?; - let full_handler = setup_apis(rpc_apis::ApiSet::All, deps); - let handler = { - let mut handler = MetaIoHandler::with_middleware(( - rpc::WsDispatcher::new(full_handler), - Middleware::new(deps.stats.clone(), deps.apis.activity_notifier()) - )); - let apis = conf.apis.list_apis(); - deps.apis.extend_with_set(&mut handler, &apis); + let full_handler = setup_apis(rpc_apis::ApiSet::All, deps); + let handler = { + let mut handler = MetaIoHandler::with_middleware(( + rpc::WsDispatcher::new(full_handler), + Middleware::new(deps.stats.clone(), deps.apis.activity_notifier()), + )); + let apis = conf.apis.list_apis(); + deps.apis.extend_with_set(&mut handler, &apis); - handler - }; + handler + }; - let allowed_origins = into_domains(with_domain(conf.origins, domain, &None)); - let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()))); + let allowed_origins = into_domains(with_domain(conf.origins, domain, &None)); + let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()))); - let signer_path; - let path = match conf.support_token_api { - true => { - signer_path = ::signer::codes_path(&conf.signer_path); - Some(signer_path.as_path()) - }, - false => None - }; - let start_result = rpc::start_ws( - &addr, - handler, - allowed_origins, - allowed_hosts, - conf.max_connections, - rpc::WsExtractor::new(path.clone()), - rpc::WsExtractor::new(path.clone()), - rpc::WsStats::new(deps.stats.clone()), - ); + let signer_path; + let path = match conf.support_token_api { + true => { + signer_path = ::signer::codes_path(&conf.signer_path); + Some(signer_path.as_path()) + } + false => None, + }; + let start_result = rpc::start_ws( + &addr, + handler, + allowed_origins, + allowed_hosts, + conf.max_connections, + rpc::WsExtractor::new(path.clone()), + rpc::WsExtractor::new(path.clone()), + rpc::WsStats::new(deps.stats.clone()), + ); -// match start_result { -// Ok(server) => Ok(Some(server)), -// Err(rpc::ws::Error::Io(rpc::ws::ErrorKind::Io(ref err), _)) if err.kind() == io::ErrorKind::AddrInUse => Err( -// format!("WebSockets address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --ws-port and --ws-interface options.", url) -// ), -// Err(e) => Err(format!("WebSockets error: {:?}", e)), -// } - match start_result { + // match start_result { + // Ok(server) => Ok(Some(server)), + // Err(rpc::ws::Error::Io(rpc::ws::ErrorKind::Io(ref err), _)) if err.kind() == io::ErrorKind::AddrInUse => Err( + // format!("WebSockets address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --ws-port and --ws-interface options.", url) + // ), + // Err(e) => Err(format!("WebSockets error: {:?}", e)), + // } + match start_result { Ok(server) => Ok(Some(server)), Err(rpc::ws::Error::WsError(ws::Error { kind: ws::ErrorKind::Io(ref err), .. @@ -205,35 +215,37 @@ pub fn new_ws( } pub fn new_http( - id: &str, - options: &str, - conf: HttpConfiguration, - deps: &Dependencies, + id: &str, + options: &str, + conf: HttpConfiguration, + deps: &Dependencies, ) -> Result, String> { - if !conf.enabled { - return Ok(None); - } + if !conf.enabled { + return Ok(None); + } - let domain = DAPPS_DOMAIN; - let url = format!("{}:{}", conf.interface, conf.port); - let addr = url.parse().map_err(|_| format!("Invalid {} listen host/port given: {}", id, url))?; - let handler = setup_apis(conf.apis, deps); + let domain = DAPPS_DOMAIN; + let url = format!("{}:{}", conf.interface, conf.port); + let addr = url + .parse() + .map_err(|_| format!("Invalid {} listen host/port given: {}", id, url))?; + let handler = setup_apis(conf.apis, deps); - let cors_domains = into_domains(conf.cors); - let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()))); + let cors_domains = into_domains(conf.cors); + let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()))); - let start_result = rpc::start_http( - &addr, - cors_domains, - allowed_hosts, - handler, - rpc::RpcExtractor, - conf.server_threads, - conf.max_payload, - conf.keep_alive, - ); + let start_result = rpc::start_http( + &addr, + cors_domains, + allowed_hosts, + handler, + rpc::RpcExtractor, + conf.server_threads, + conf.max_payload, + conf.keep_alive, + ); - match start_result { + match start_result { Ok(server) => Ok(Some(server)), Err(ref err) if err.kind() == io::ErrorKind::AddrInUse => Err( format!("{} address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --{}-port and --{}-interface options.", id, url, options, options) @@ -243,80 +255,105 @@ pub fn new_http( } pub fn new_ipc( - conf: IpcConfiguration, - dependencies: &Dependencies + conf: IpcConfiguration, + dependencies: &Dependencies, ) -> Result, String> { - if !conf.enabled { - return Ok(None); - } + if !conf.enabled { + return Ok(None); + } - let handler = setup_apis(conf.apis, dependencies); - let path = PathBuf::from(&conf.socket_addr); - // Make sure socket file can be created on unix-like OS. - // Windows pipe paths are not on the FS. - if !cfg!(windows) { - if let Some(dir) = path.parent() { - ::std::fs::create_dir_all(&dir) - .map_err(|err| format!("Unable to create IPC directory at {}: {}", dir.display(), err))?; - } - } + let handler = setup_apis(conf.apis, dependencies); + let path = PathBuf::from(&conf.socket_addr); + // Make sure socket file can be created on unix-like OS. + // Windows pipe paths are not on the FS. + if !cfg!(windows) { + if let Some(dir) = path.parent() { + ::std::fs::create_dir_all(&dir).map_err(|err| { + format!( + "Unable to create IPC directory at {}: {}", + dir.display(), + err + ) + })?; + } + } - match rpc::start_ipc(&conf.socket_addr, handler, rpc::RpcExtractor) { - Ok(server) => Ok(Some(server)), - Err(io_error) => Err(format!("IPC error: {}", io_error)), - } + match rpc::start_ipc(&conf.socket_addr, handler, rpc::RpcExtractor) { + Ok(server) => Ok(Some(server)), + Err(io_error) => Err(format!("IPC error: {}", io_error)), + } } fn into_domains>(items: Option>) -> DomainsValidation { - items.map(|vals| vals.into_iter().map(T::from).collect()).into() + items + .map(|vals| vals.into_iter().map(T::from).collect()) + .into() } -fn with_domain(items: Option>, domain: &str, dapps_address: &Option) -> Option> { - fn extract_port(s: &str) -> Option { - s.split(':').nth(1).and_then(|s| s.parse().ok()) - } +fn with_domain( + items: Option>, + domain: &str, + dapps_address: &Option, +) -> Option> { + fn extract_port(s: &str) -> Option { + s.split(':').nth(1).and_then(|s| s.parse().ok()) + } - items.map(move |items| { - let mut items = items.into_iter().collect::>(); - { - let mut add_hosts = |address: &Option| { - if let Some(host) = address.clone() { - items.insert(host.to_string()); - items.insert(host.replace("127.0.0.1", "localhost")); - items.insert(format!("http://*.{}", domain)); //proxypac - if let Some(port) = extract_port(&*host) { - items.insert(format!("http://*.{}:{}", domain, port)); - } - } - }; + items.map(move |items| { + let mut items = items.into_iter().collect::>(); + { + let mut add_hosts = |address: &Option| { + if let Some(host) = address.clone() { + items.insert(host.to_string()); + items.insert(host.replace("127.0.0.1", "localhost")); + items.insert(format!("http://*.{}", domain)); //proxypac + if let Some(port) = extract_port(&*host) { + items.insert(format!("http://*.{}:{}", domain, port)); + } + } + }; - add_hosts(dapps_address); - } - items.into_iter().collect() - }) + add_hosts(dapps_address); + } + items.into_iter().collect() + }) } -pub fn setup_apis(apis: ApiSet, deps: &Dependencies) -> MetaIoHandler> - where D: rpc_apis::Dependencies +pub fn setup_apis( + apis: ApiSet, + deps: &Dependencies, +) -> MetaIoHandler> +where + D: rpc_apis::Dependencies, { - let mut handler = MetaIoHandler::with_middleware( - Middleware::new(deps.stats.clone(), deps.apis.activity_notifier()) - ); - let apis = apis.list_apis(); - deps.apis.extend_with_set(&mut handler, &apis); + let mut handler = MetaIoHandler::with_middleware(Middleware::new( + deps.stats.clone(), + deps.apis.activity_notifier(), + )); + let apis = apis.list_apis(); + deps.apis.extend_with_set(&mut handler, &apis); - handler + handler } #[cfg(test)] mod tests { - use super::address; + use super::address; - #[test] - fn should_return_proper_address() { - assert_eq!(address(false, "localhost", 8180, &None), None); - assert_eq!(address(true, "localhost", 8180, &None), Some("localhost:8180".into())); - assert_eq!(address(true, "localhost", 8180, &Some(vec!["host:443".into()])), Some("host:443".into())); - assert_eq!(address(true, "localhost", 8180, &Some(vec!["host".into()])), Some("host".into())); - } + #[test] + fn should_return_proper_address() { + assert_eq!(address(false, "localhost", 8180, &None), None); + assert_eq!( + address(true, "localhost", 8180, &None), + Some("localhost:8180".into()) + ); + assert_eq!( + address(true, "localhost", 8180, &Some(vec!["host:443".into()])), + Some("host:443".into()) + ); + assert_eq!( + address(true, "localhost", 8180, &Some(vec!["host".into()])), + Some("host".into()) + ); + } } diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index 668206174..ff2ba4a69 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -14,28 +14,31 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::cmp::PartialEq; -use std::collections::{BTreeMap, HashSet}; -use std::str::FromStr; -use std::sync::{Arc, Weak}; +use std::{ + cmp::PartialEq, + collections::{BTreeMap, HashSet}, + str::FromStr, + sync::{Arc, Weak}, +}; pub use parity_rpc::signer::SignerService; use account_utils::{self, AccountProvider}; -use ethcore::client::Client; -use ethcore::miner::Miner; -use ethcore::snapshot::SnapshotService; +use ethcore::{client::Client, miner::Miner, snapshot::SnapshotService}; use ethcore_logger::RotatingLogger; use ethcore_private_tx::Provider as PrivateTransactionManager; use ethcore_service::PrivateTxService; use hash_fetch::fetch::Client as FetchClient; use jsonrpc_core::{self as core, MetaIoHandler}; -use light::client::LightChainClient; -use light::{Cache as LightDataCache, TransactionQueue as LightTransactionQueue}; +use light::{ + client::LightChainClient, Cache as LightDataCache, TransactionQueue as LightTransactionQueue, +}; use miner::external::ExternalMiner; -use parity_rpc::dispatch::{FullDispatcher, LightDispatcher}; -use parity_rpc::informant::{ActivityNotifier, ClientNotifier}; -use parity_rpc::{Host, Metadata, NetworkSettings}; +use parity_rpc::{ + dispatch::{FullDispatcher, LightDispatcher}, + informant::{ActivityNotifier, ClientNotifier}, + Host, Metadata, NetworkSettings, +}; use parity_runtime::Executor; use parking_lot::{Mutex, RwLock}; use sync::{LightSync, ManageNetwork, SyncProvider}; @@ -43,868 +46,907 @@ use updater::Updater; #[derive(Debug, PartialEq, Clone, Eq, Hash)] pub enum Api { - /// Web3 (Safe) - Web3, - /// Net (Safe) - Net, - /// Eth (Safe) - Eth, - /// Eth Pub-Sub (Safe) - EthPubSub, - /// Geth-compatible "personal" API (DEPRECATED; only used in `--geth` mode.) - Personal, - /// Signer - Confirm transactions in Signer (UNSAFE: Passwords, List of transactions) - Signer, - /// Parity - Custom extensions (Safe) - Parity, - /// Traces (Safe) - Traces, - /// Rpc (Safe) - Rpc, - /// Private transaction manager (Safe) - Private, - /// Whisper (Safe) - // TODO: _if_ someone guesses someone else's key or filter IDs they can remove - // BUT these are all ephemeral so it seems fine. - Whisper, - /// Whisper Pub-Sub (Safe but same concerns as above). - WhisperPubSub, - /// Parity PubSub - Generic Publish-Subscriber (Safety depends on other APIs exposed). - ParityPubSub, - /// Parity Accounts extensions (UNSAFE: Passwords, Side Effects (new account)) - ParityAccounts, - /// Parity - Set methods (UNSAFE: Side Effects affecting node operation) - ParitySet, - /// SecretStore (UNSAFE: arbitrary hash signing) - SecretStore, - /// Geth-compatible (best-effort) debug API (Potentially UNSAFE) - /// NOTE We don't aim to support all methods, only the ones that are useful. - Debug, + /// Web3 (Safe) + Web3, + /// Net (Safe) + Net, + /// Eth (Safe) + Eth, + /// Eth Pub-Sub (Safe) + EthPubSub, + /// Geth-compatible "personal" API (DEPRECATED; only used in `--geth` mode.) + Personal, + /// Signer - Confirm transactions in Signer (UNSAFE: Passwords, List of transactions) + Signer, + /// Parity - Custom extensions (Safe) + Parity, + /// Traces (Safe) + Traces, + /// Rpc (Safe) + Rpc, + /// Private transaction manager (Safe) + Private, + /// Whisper (Safe) + // TODO: _if_ someone guesses someone else's key or filter IDs they can remove + // BUT these are all ephemeral so it seems fine. + Whisper, + /// Whisper Pub-Sub (Safe but same concerns as above). + WhisperPubSub, + /// Parity PubSub - Generic Publish-Subscriber (Safety depends on other APIs exposed). + ParityPubSub, + /// Parity Accounts extensions (UNSAFE: Passwords, Side Effects (new account)) + ParityAccounts, + /// Parity - Set methods (UNSAFE: Side Effects affecting node operation) + ParitySet, + /// SecretStore (UNSAFE: arbitrary hash signing) + SecretStore, + /// Geth-compatible (best-effort) debug API (Potentially UNSAFE) + /// NOTE We don't aim to support all methods, only the ones that are useful. + Debug, } impl FromStr for Api { - type Err = String; + type Err = String; - fn from_str(s: &str) -> Result { - use self::Api::*; + fn from_str(s: &str) -> Result { + use self::Api::*; - match s { - "debug" => Ok(Debug), - "eth" => Ok(Eth), - "net" => Ok(Net), - "parity" => Ok(Parity), - "parity_accounts" => Ok(ParityAccounts), - "parity_pubsub" => Ok(ParityPubSub), - "parity_set" => Ok(ParitySet), - "personal" => Ok(Personal), - "private" => Ok(Private), - "pubsub" => Ok(EthPubSub), - "rpc" => Ok(Rpc), - "secretstore" => Ok(SecretStore), - "shh" => Ok(Whisper), - "shh_pubsub" => Ok(WhisperPubSub), - "signer" => Ok(Signer), - "traces" => Ok(Traces), - "web3" => Ok(Web3), - api => Err(format!("Unknown api: {}", api)), - } - } + match s { + "debug" => Ok(Debug), + "eth" => Ok(Eth), + "net" => Ok(Net), + "parity" => Ok(Parity), + "parity_accounts" => Ok(ParityAccounts), + "parity_pubsub" => Ok(ParityPubSub), + "parity_set" => Ok(ParitySet), + "personal" => Ok(Personal), + "private" => Ok(Private), + "pubsub" => Ok(EthPubSub), + "rpc" => Ok(Rpc), + "secretstore" => Ok(SecretStore), + "shh" => Ok(Whisper), + "shh_pubsub" => Ok(WhisperPubSub), + "signer" => Ok(Signer), + "traces" => Ok(Traces), + "web3" => Ok(Web3), + api => Err(format!("Unknown api: {}", api)), + } + } } #[derive(Debug, Clone)] pub enum ApiSet { - // Unsafe context (like jsonrpc over http) - UnsafeContext, - // All possible APIs (safe context like token-protected WS interface) - All, - // Local "unsafe" context and accounts access - IpcContext, - // APIs for Parity Generic Pub-Sub - PubSub, - // Fixed list of APis - List(HashSet), + // Unsafe context (like jsonrpc over http) + UnsafeContext, + // All possible APIs (safe context like token-protected WS interface) + All, + // Local "unsafe" context and accounts access + IpcContext, + // APIs for Parity Generic Pub-Sub + PubSub, + // Fixed list of APis + List(HashSet), } impl Default for ApiSet { - fn default() -> Self { - ApiSet::UnsafeContext - } + fn default() -> Self { + ApiSet::UnsafeContext + } } impl PartialEq for ApiSet { - fn eq(&self, other: &Self) -> bool { - self.list_apis() == other.list_apis() - } + fn eq(&self, other: &Self) -> bool { + self.list_apis() == other.list_apis() + } } impl FromStr for ApiSet { - type Err = String; + type Err = String; - fn from_str(s: &str) -> Result { - let mut apis = HashSet::new(); + fn from_str(s: &str) -> Result { + let mut apis = HashSet::new(); - for api in s.split(',') { - match api { - "all" => { - apis.extend(ApiSet::All.list_apis()); - } - "safe" => { - // Safe APIs are those that are safe even in UnsafeContext. - apis.extend(ApiSet::UnsafeContext.list_apis()); - } - // Remove the API - api if api.starts_with("-") => { - let api = api[1..].parse()?; - apis.remove(&api); - } - api => { - let api = api.parse()?; - apis.insert(api); - } - } - } + for api in s.split(',') { + match api { + "all" => { + apis.extend(ApiSet::All.list_apis()); + } + "safe" => { + // Safe APIs are those that are safe even in UnsafeContext. + apis.extend(ApiSet::UnsafeContext.list_apis()); + } + // Remove the API + api if api.starts_with("-") => { + let api = api[1..].parse()?; + apis.remove(&api); + } + api => { + let api = api.parse()?; + apis.insert(api); + } + } + } - Ok(ApiSet::List(apis)) - } + Ok(ApiSet::List(apis)) + } } fn to_modules(apis: &HashSet) -> BTreeMap { - let mut modules = BTreeMap::new(); - for api in apis { - let (name, version) = match *api { - Api::Debug => ("debug", "1.0"), - Api::Eth => ("eth", "1.0"), - Api::EthPubSub => ("pubsub", "1.0"), - Api::Net => ("net", "1.0"), - Api::Parity => ("parity", "1.0"), - Api::ParityAccounts => ("parity_accounts", "1.0"), - Api::ParityPubSub => ("parity_pubsub", "1.0"), - Api::ParitySet => ("parity_set", "1.0"), - Api::Personal => ("personal", "1.0"), - Api::Private => ("private", "1.0"), - Api::Rpc => ("rpc", "1.0"), - Api::SecretStore => ("secretstore", "1.0"), - Api::Signer => ("signer", "1.0"), - Api::Traces => ("traces", "1.0"), - Api::Web3 => ("web3", "1.0"), - Api::Whisper => ("shh", "1.0"), - Api::WhisperPubSub => ("shh_pubsub", "1.0"), - }; - modules.insert(name.into(), version.into()); - } - modules + let mut modules = BTreeMap::new(); + for api in apis { + let (name, version) = match *api { + Api::Debug => ("debug", "1.0"), + Api::Eth => ("eth", "1.0"), + Api::EthPubSub => ("pubsub", "1.0"), + Api::Net => ("net", "1.0"), + Api::Parity => ("parity", "1.0"), + Api::ParityAccounts => ("parity_accounts", "1.0"), + Api::ParityPubSub => ("parity_pubsub", "1.0"), + Api::ParitySet => ("parity_set", "1.0"), + Api::Personal => ("personal", "1.0"), + Api::Private => ("private", "1.0"), + Api::Rpc => ("rpc", "1.0"), + Api::SecretStore => ("secretstore", "1.0"), + Api::Signer => ("signer", "1.0"), + Api::Traces => ("traces", "1.0"), + Api::Web3 => ("web3", "1.0"), + Api::Whisper => ("shh", "1.0"), + Api::WhisperPubSub => ("shh_pubsub", "1.0"), + }; + modules.insert(name.into(), version.into()); + } + modules } macro_rules! add_signing_methods { - ($namespace:ident, $handler:expr, $deps:expr, $dispatch:expr) => {{ - let deps = &$deps; - let (dispatcher, accounts) = $dispatch; - if deps.signer_service.is_enabled() { - $handler.extend_with($namespace::to_delegate(SigningQueueClient::new( - &deps.signer_service, - dispatcher.clone(), - deps.executor.clone(), - accounts, - ))) - } else { - $handler.extend_with($namespace::to_delegate(SigningUnsafeClient::new( - accounts, - dispatcher.clone(), - ))) - } - }}; + ($namespace:ident, $handler:expr, $deps:expr, $dispatch:expr) => {{ + let deps = &$deps; + let (dispatcher, accounts) = $dispatch; + if deps.signer_service.is_enabled() { + $handler.extend_with($namespace::to_delegate(SigningQueueClient::new( + &deps.signer_service, + dispatcher.clone(), + deps.executor.clone(), + accounts, + ))) + } else { + $handler.extend_with($namespace::to_delegate(SigningUnsafeClient::new( + accounts, + dispatcher.clone(), + ))) + } + }}; } /// RPC dependencies can be used to initialize RPC endpoints from APIs. pub trait Dependencies { - type Notifier: ActivityNotifier; + type Notifier: ActivityNotifier; - /// Create the activity notifier. - fn activity_notifier(&self) -> Self::Notifier; + /// Create the activity notifier. + fn activity_notifier(&self) -> Self::Notifier; - /// Extend the given I/O handler with endpoints for each API. - fn extend_with_set(&self, handler: &mut MetaIoHandler, apis: &HashSet) - where - S: core::Middleware; + /// Extend the given I/O handler with endpoints for each API. + fn extend_with_set(&self, handler: &mut MetaIoHandler, apis: &HashSet) + where + S: core::Middleware; } /// RPC dependencies for a full node. pub struct FullDependencies { - pub signer_service: Arc, - pub client: Arc, - pub snapshot: Arc, - pub sync: Arc, - pub net: Arc, - pub accounts: Arc, - pub private_tx_service: Option>, - pub miner: Arc, - pub external_miner: Arc, - pub logger: Arc, - pub settings: Arc, - pub net_service: Arc, - pub updater: Arc, - pub geth_compatibility: bool, - pub experimental_rpcs: bool, - pub ws_address: Option, - pub fetch: FetchClient, - pub executor: Executor, - pub whisper_rpc: Option<::whisper::RpcFactory>, - pub gas_price_percentile: usize, - pub poll_lifetime: u32, - pub allow_missing_blocks: bool, - pub no_ancient_blocks: bool, + pub signer_service: Arc, + pub client: Arc, + pub snapshot: Arc, + pub sync: Arc, + pub net: Arc, + pub accounts: Arc, + pub private_tx_service: Option>, + pub miner: Arc, + pub external_miner: Arc, + pub logger: Arc, + pub settings: Arc, + pub net_service: Arc, + pub updater: Arc, + pub geth_compatibility: bool, + pub experimental_rpcs: bool, + pub ws_address: Option, + pub fetch: FetchClient, + pub executor: Executor, + pub whisper_rpc: Option<::whisper::RpcFactory>, + pub gas_price_percentile: usize, + pub poll_lifetime: u32, + pub allow_missing_blocks: bool, + pub no_ancient_blocks: bool, } impl FullDependencies { - fn extend_api( - &self, - handler: &mut MetaIoHandler, - apis: &HashSet, - for_generic_pubsub: bool, - ) where - S: core::Middleware, - { - use parity_rpc::v1::*; + fn extend_api( + &self, + handler: &mut MetaIoHandler, + apis: &HashSet, + for_generic_pubsub: bool, + ) where + S: core::Middleware, + { + use parity_rpc::v1::*; - let nonces = Arc::new(Mutex::new(dispatch::Reservations::new( - self.executor.clone(), - ))); - let dispatcher = FullDispatcher::new( - self.client.clone(), - self.miner.clone(), - nonces.clone(), - self.gas_price_percentile, - ); - let account_signer = Arc::new(dispatch::Signer::new(self.accounts.clone())) as _; - let accounts = account_utils::accounts_list(self.accounts.clone()); + let nonces = Arc::new(Mutex::new(dispatch::Reservations::new( + self.executor.clone(), + ))); + let dispatcher = FullDispatcher::new( + self.client.clone(), + self.miner.clone(), + nonces.clone(), + self.gas_price_percentile, + ); + let account_signer = Arc::new(dispatch::Signer::new(self.accounts.clone())) as _; + let accounts = account_utils::accounts_list(self.accounts.clone()); - for api in apis { - match *api { - Api::Debug => { - handler.extend_with(DebugClient::new(self.client.clone()).to_delegate()); - } - Api::Web3 => { - handler.extend_with(Web3Client::default().to_delegate()); - } - Api::Net => { - handler.extend_with(NetClient::new(&self.sync).to_delegate()); - } - Api::Eth => { - let client = EthClient::new( - &self.client, - &self.snapshot, - &self.sync, - &accounts, - &self.miner, - &self.external_miner, - EthClientOptions { - pending_nonce_from_queue: self.geth_compatibility, - allow_pending_receipt_query: !self.geth_compatibility, - send_block_number_in_get_work: !self.geth_compatibility, - gas_price_percentile: self.gas_price_percentile, - allow_missing_blocks: self.allow_missing_blocks, - allow_experimental_rpcs: self.experimental_rpcs, - no_ancient_blocks: self.no_ancient_blocks - } - ); - handler.extend_with(client.to_delegate()); + for api in apis { + match *api { + Api::Debug => { + handler.extend_with(DebugClient::new(self.client.clone()).to_delegate()); + } + Api::Web3 => { + handler.extend_with(Web3Client::default().to_delegate()); + } + Api::Net => { + handler.extend_with(NetClient::new(&self.sync).to_delegate()); + } + Api::Eth => { + let client = EthClient::new( + &self.client, + &self.snapshot, + &self.sync, + &accounts, + &self.miner, + &self.external_miner, + EthClientOptions { + pending_nonce_from_queue: self.geth_compatibility, + allow_pending_receipt_query: !self.geth_compatibility, + send_block_number_in_get_work: !self.geth_compatibility, + gas_price_percentile: self.gas_price_percentile, + allow_missing_blocks: self.allow_missing_blocks, + allow_experimental_rpcs: self.experimental_rpcs, + no_ancient_blocks: self.no_ancient_blocks, + }, + ); + handler.extend_with(client.to_delegate()); - if !for_generic_pubsub { - let filter_client = EthFilterClient::new( - self.client.clone(), - self.miner.clone(), - self.poll_lifetime, - ); - handler.extend_with(filter_client.to_delegate()); + if !for_generic_pubsub { + let filter_client = EthFilterClient::new( + self.client.clone(), + self.miner.clone(), + self.poll_lifetime, + ); + handler.extend_with(filter_client.to_delegate()); - add_signing_methods!(EthSigning, handler, self, (&dispatcher, &account_signer)); - } - } - Api::EthPubSub => { - if !for_generic_pubsub { - let client = - EthPubSubClient::new(self.client.clone(), self.executor.clone()); - let h = client.handler(); - self.miner - .add_transactions_listener(Box::new(move |hashes| { - if let Some(h) = h.upgrade() { - h.notify_new_transactions(hashes); - } - })); + add_signing_methods!( + EthSigning, + handler, + self, + (&dispatcher, &account_signer) + ); + } + } + Api::EthPubSub => { + if !for_generic_pubsub { + let client = + EthPubSubClient::new(self.client.clone(), self.executor.clone()); + let h = client.handler(); + self.miner + .add_transactions_listener(Box::new(move |hashes| { + if let Some(h) = h.upgrade() { + h.notify_new_transactions(hashes); + } + })); - if let Some(h) = client.handler().upgrade() { - self.client.add_notify(h); - } - handler.extend_with(client.to_delegate()); - } - } - Api::Personal => { - #[cfg(feature = "accounts")] - handler.extend_with( - PersonalClient::new( - &self.accounts, - dispatcher.clone(), - self.geth_compatibility, - self.experimental_rpcs, - ).to_delegate(), - ); - } - Api::Signer => { - handler.extend_with( - SignerClient::new( - account_signer.clone(), - dispatcher.clone(), - &self.signer_service, - self.executor.clone(), - ).to_delegate(), - ); - } - Api::Parity => { - let signer = match self.signer_service.is_enabled() { - true => Some(self.signer_service.clone()), - false => None, - }; - handler.extend_with( - ParityClient::new( - self.client.clone(), - self.miner.clone(), - self.sync.clone(), - self.updater.clone(), - self.net_service.clone(), - self.logger.clone(), - self.settings.clone(), - signer, - self.ws_address.clone(), - self.snapshot.clone().into(), - ).to_delegate(), - ); - #[cfg(feature = "accounts")] - handler.extend_with(ParityAccountsInfo::to_delegate(ParityAccountsClient::new(&self.accounts))); + if let Some(h) = client.handler().upgrade() { + self.client.add_notify(h); + } + handler.extend_with(client.to_delegate()); + } + } + Api::Personal => { + #[cfg(feature = "accounts")] + handler.extend_with( + PersonalClient::new( + &self.accounts, + dispatcher.clone(), + self.geth_compatibility, + self.experimental_rpcs, + ) + .to_delegate(), + ); + } + Api::Signer => { + handler.extend_with( + SignerClient::new( + account_signer.clone(), + dispatcher.clone(), + &self.signer_service, + self.executor.clone(), + ) + .to_delegate(), + ); + } + Api::Parity => { + let signer = match self.signer_service.is_enabled() { + true => Some(self.signer_service.clone()), + false => None, + }; + handler.extend_with( + ParityClient::new( + self.client.clone(), + self.miner.clone(), + self.sync.clone(), + self.updater.clone(), + self.net_service.clone(), + self.logger.clone(), + self.settings.clone(), + signer, + self.ws_address.clone(), + self.snapshot.clone().into(), + ) + .to_delegate(), + ); + #[cfg(feature = "accounts")] + handler.extend_with(ParityAccountsInfo::to_delegate( + ParityAccountsClient::new(&self.accounts), + )); - if !for_generic_pubsub { - add_signing_methods!(ParitySigning, handler, self, (&dispatcher, &account_signer)); - } - } - Api::ParityPubSub => { - if !for_generic_pubsub { - let mut rpc = MetaIoHandler::default(); - let apis = ApiSet::List(apis.clone()) - .retain(ApiSet::PubSub) - .list_apis(); - self.extend_api(&mut rpc, &apis, true); - handler.extend_with( - PubSubClient::new(rpc, self.executor.clone()).to_delegate(), - ); - } - } - Api::ParityAccounts => { - #[cfg(feature = "accounts")] - handler.extend_with(ParityAccounts::to_delegate(ParityAccountsClient::new(&self.accounts))); - } - Api::ParitySet => { - handler.extend_with( - ParitySetClient::new( - &self.client, - &self.miner, - &self.updater, - &self.net_service, - self.fetch.clone(), - ).to_delegate(), - ); - #[cfg(feature = "accounts")] - handler.extend_with( - ParitySetAccountsClient::new( - &self.accounts, - &self.miner, - ).to_delegate(), - ); - } - Api::Traces => handler.extend_with(TracesClient::new(&self.client).to_delegate()), - Api::Rpc => { - let modules = to_modules(&apis); - handler.extend_with(RpcClient::new(modules).to_delegate()); - } - Api::SecretStore => { - #[cfg(feature = "accounts")] - handler.extend_with(SecretStoreClient::new(&self.accounts).to_delegate()); - } - Api::Whisper => { - if let Some(ref whisper_rpc) = self.whisper_rpc { - let whisper = whisper_rpc.make_handler(self.net.clone()); - handler.extend_with(::parity_whisper::rpc::Whisper::to_delegate(whisper)); - } - } - Api::WhisperPubSub => { - if !for_generic_pubsub { - if let Some(ref whisper_rpc) = self.whisper_rpc { - let whisper = whisper_rpc.make_handler(self.net.clone()); - handler.extend_with(::parity_whisper::rpc::WhisperPubSub::to_delegate( - whisper, - )); - } - } - } - Api::Private => { - handler.extend_with( - PrivateClient::new(self.private_tx_service.as_ref().map(|p| p.provider())) - .to_delegate(), - ); - } - } - } - } + if !for_generic_pubsub { + add_signing_methods!( + ParitySigning, + handler, + self, + (&dispatcher, &account_signer) + ); + } + } + Api::ParityPubSub => { + if !for_generic_pubsub { + let mut rpc = MetaIoHandler::default(); + let apis = ApiSet::List(apis.clone()) + .retain(ApiSet::PubSub) + .list_apis(); + self.extend_api(&mut rpc, &apis, true); + handler.extend_with( + PubSubClient::new(rpc, self.executor.clone()).to_delegate(), + ); + } + } + Api::ParityAccounts => { + #[cfg(feature = "accounts")] + handler.extend_with(ParityAccounts::to_delegate(ParityAccountsClient::new( + &self.accounts, + ))); + } + Api::ParitySet => { + handler.extend_with( + ParitySetClient::new( + &self.client, + &self.miner, + &self.updater, + &self.net_service, + self.fetch.clone(), + ) + .to_delegate(), + ); + #[cfg(feature = "accounts")] + handler.extend_with( + ParitySetAccountsClient::new(&self.accounts, &self.miner).to_delegate(), + ); + } + Api::Traces => handler.extend_with(TracesClient::new(&self.client).to_delegate()), + Api::Rpc => { + let modules = to_modules(&apis); + handler.extend_with(RpcClient::new(modules).to_delegate()); + } + Api::SecretStore => { + #[cfg(feature = "accounts")] + handler.extend_with(SecretStoreClient::new(&self.accounts).to_delegate()); + } + Api::Whisper => { + if let Some(ref whisper_rpc) = self.whisper_rpc { + let whisper = whisper_rpc.make_handler(self.net.clone()); + handler.extend_with(::parity_whisper::rpc::Whisper::to_delegate(whisper)); + } + } + Api::WhisperPubSub => { + if !for_generic_pubsub { + if let Some(ref whisper_rpc) = self.whisper_rpc { + let whisper = whisper_rpc.make_handler(self.net.clone()); + handler.extend_with(::parity_whisper::rpc::WhisperPubSub::to_delegate( + whisper, + )); + } + } + } + Api::Private => { + handler.extend_with( + PrivateClient::new(self.private_tx_service.as_ref().map(|p| p.provider())) + .to_delegate(), + ); + } + } + } + } } impl Dependencies for FullDependencies { - type Notifier = ClientNotifier; + type Notifier = ClientNotifier; - fn activity_notifier(&self) -> ClientNotifier { - ClientNotifier { - client: self.client.clone(), - } - } + fn activity_notifier(&self) -> ClientNotifier { + ClientNotifier { + client: self.client.clone(), + } + } - fn extend_with_set(&self, handler: &mut MetaIoHandler, apis: &HashSet) - where - S: core::Middleware, - { - self.extend_api(handler, apis, false) - } + fn extend_with_set(&self, handler: &mut MetaIoHandler, apis: &HashSet) + where + S: core::Middleware, + { + self.extend_api(handler, apis, false) + } } /// Light client notifier. Doesn't do anything yet, but might in the future. pub struct LightClientNotifier; impl ActivityNotifier for LightClientNotifier { - fn active(&self) {} + fn active(&self) {} } /// RPC dependencies for a light client. pub struct LightDependencies { - pub signer_service: Arc, - pub client: Arc, - pub sync: Arc, - pub net: Arc, - pub accounts: Arc, - pub logger: Arc, - pub settings: Arc, - pub on_demand: Arc<::light::on_demand::OnDemand>, - pub cache: Arc>, - pub transaction_queue: Arc>, - pub ws_address: Option, - pub fetch: FetchClient, - pub geth_compatibility: bool, - pub experimental_rpcs: bool, - pub executor: Executor, - pub whisper_rpc: Option<::whisper::RpcFactory>, - pub private_tx_service: Option>, - pub gas_price_percentile: usize, - pub poll_lifetime: u32, + pub signer_service: Arc, + pub client: Arc, + pub sync: Arc, + pub net: Arc, + pub accounts: Arc, + pub logger: Arc, + pub settings: Arc, + pub on_demand: Arc<::light::on_demand::OnDemand>, + pub cache: Arc>, + pub transaction_queue: Arc>, + pub ws_address: Option, + pub fetch: FetchClient, + pub geth_compatibility: bool, + pub experimental_rpcs: bool, + pub executor: Executor, + pub whisper_rpc: Option<::whisper::RpcFactory>, + pub private_tx_service: Option>, + pub gas_price_percentile: usize, + pub poll_lifetime: u32, } impl LightDependencies { - fn extend_api>( - &self, - handler: &mut MetaIoHandler, - apis: &HashSet, - for_generic_pubsub: bool, - ) { - use parity_rpc::v1::*; + fn extend_api>( + &self, + handler: &mut MetaIoHandler, + apis: &HashSet, + for_generic_pubsub: bool, + ) { + use parity_rpc::v1::*; - let dispatcher = LightDispatcher::new( - self.sync.clone(), - self.client.clone(), - self.on_demand.clone(), - self.cache.clone(), - self.transaction_queue.clone(), - Arc::new(Mutex::new(dispatch::Reservations::new( - self.executor.clone(), - ))), - self.gas_price_percentile, - ); - let account_signer = Arc::new(dispatch::Signer::new(self.accounts.clone())) as _; - let accounts = account_utils::accounts_list(self.accounts.clone()); + let dispatcher = LightDispatcher::new( + self.sync.clone(), + self.client.clone(), + self.on_demand.clone(), + self.cache.clone(), + self.transaction_queue.clone(), + Arc::new(Mutex::new(dispatch::Reservations::new( + self.executor.clone(), + ))), + self.gas_price_percentile, + ); + let account_signer = Arc::new(dispatch::Signer::new(self.accounts.clone())) as _; + let accounts = account_utils::accounts_list(self.accounts.clone()); - for api in apis { - match *api { - Api::Debug => { - warn!(target: "rpc", "Debug API is not available in light client mode.") - } - Api::Web3 => { - handler.extend_with(Web3Client::default().to_delegate()); - } - Api::Net => { - handler.extend_with(light::NetClient::new(self.sync.clone()).to_delegate()); - } - Api::Eth => { - let client = light::EthClient::new( - self.sync.clone(), - self.client.clone(), - self.on_demand.clone(), - self.transaction_queue.clone(), - accounts.clone(), - self.cache.clone(), - self.gas_price_percentile, - self.poll_lifetime, - ); - handler.extend_with(Eth::to_delegate(client.clone())); + for api in apis { + match *api { + Api::Debug => { + warn!(target: "rpc", "Debug API is not available in light client mode.") + } + Api::Web3 => { + handler.extend_with(Web3Client::default().to_delegate()); + } + Api::Net => { + handler.extend_with(light::NetClient::new(self.sync.clone()).to_delegate()); + } + Api::Eth => { + let client = light::EthClient::new( + self.sync.clone(), + self.client.clone(), + self.on_demand.clone(), + self.transaction_queue.clone(), + accounts.clone(), + self.cache.clone(), + self.gas_price_percentile, + self.poll_lifetime, + ); + handler.extend_with(Eth::to_delegate(client.clone())); - if !for_generic_pubsub { - handler.extend_with(EthFilter::to_delegate(client)); - add_signing_methods!(EthSigning, handler, self, (&dispatcher, &account_signer)); - } - } - Api::EthPubSub => { - let client = EthPubSubClient::light( - self.client.clone(), - self.on_demand.clone(), - self.sync.clone(), - self.cache.clone(), - self.executor.clone(), - self.gas_price_percentile, - ); - self.client.add_listener(client.handler() as Weak<_>); - let h = client.handler(); - self.transaction_queue - .write() - .add_listener(Box::new(move |transactions| { - if let Some(h) = h.upgrade() { - h.notify_new_transactions(transactions); - } - })); - handler.extend_with(EthPubSub::to_delegate(client)); - } - Api::Personal => { - #[cfg(feature = "accounts")] - handler.extend_with( - PersonalClient::new( - &self.accounts, - dispatcher.clone(), - self.geth_compatibility, - self.experimental_rpcs, - ).to_delegate(), - ); - } - Api::Signer => { - handler.extend_with( - SignerClient::new( - account_signer.clone(), - dispatcher.clone(), - &self.signer_service, - self.executor.clone(), - ).to_delegate(), - ); - } - Api::Parity => { - let signer = match self.signer_service.is_enabled() { - true => Some(self.signer_service.clone()), - false => None, - }; - handler.extend_with( - light::ParityClient::new( - Arc::new(dispatcher.clone()), - self.logger.clone(), - self.settings.clone(), - signer, - self.ws_address.clone(), - self.gas_price_percentile, - ).to_delegate(), - ); - #[cfg(feature = "accounts")] - handler.extend_with( - ParityAccountsInfo::to_delegate(ParityAccountsClient::new(&self.accounts)) - ); + if !for_generic_pubsub { + handler.extend_with(EthFilter::to_delegate(client)); + add_signing_methods!( + EthSigning, + handler, + self, + (&dispatcher, &account_signer) + ); + } + } + Api::EthPubSub => { + let client = EthPubSubClient::light( + self.client.clone(), + self.on_demand.clone(), + self.sync.clone(), + self.cache.clone(), + self.executor.clone(), + self.gas_price_percentile, + ); + self.client.add_listener(client.handler() as Weak<_>); + let h = client.handler(); + self.transaction_queue + .write() + .add_listener(Box::new(move |transactions| { + if let Some(h) = h.upgrade() { + h.notify_new_transactions(transactions); + } + })); + handler.extend_with(EthPubSub::to_delegate(client)); + } + Api::Personal => { + #[cfg(feature = "accounts")] + handler.extend_with( + PersonalClient::new( + &self.accounts, + dispatcher.clone(), + self.geth_compatibility, + self.experimental_rpcs, + ) + .to_delegate(), + ); + } + Api::Signer => { + handler.extend_with( + SignerClient::new( + account_signer.clone(), + dispatcher.clone(), + &self.signer_service, + self.executor.clone(), + ) + .to_delegate(), + ); + } + Api::Parity => { + let signer = match self.signer_service.is_enabled() { + true => Some(self.signer_service.clone()), + false => None, + }; + handler.extend_with( + light::ParityClient::new( + Arc::new(dispatcher.clone()), + self.logger.clone(), + self.settings.clone(), + signer, + self.ws_address.clone(), + self.gas_price_percentile, + ) + .to_delegate(), + ); + #[cfg(feature = "accounts")] + handler.extend_with(ParityAccountsInfo::to_delegate( + ParityAccountsClient::new(&self.accounts), + )); - if !for_generic_pubsub { - add_signing_methods!(ParitySigning, handler, self, (&dispatcher, &account_signer)); - } - } - Api::ParityPubSub => { - if !for_generic_pubsub { - let mut rpc = MetaIoHandler::default(); - let apis = ApiSet::List(apis.clone()) - .retain(ApiSet::PubSub) - .list_apis(); - self.extend_api(&mut rpc, &apis, true); - handler.extend_with( - PubSubClient::new(rpc, self.executor.clone()).to_delegate(), - ); - } - } - Api::ParityAccounts => { - #[cfg(feature = "accounts")] - handler.extend_with(ParityAccounts::to_delegate(ParityAccountsClient::new(&self.accounts))); - } - Api::ParitySet => handler.extend_with( - light::ParitySetClient::new(self.client.clone(), self.sync.clone(), self.fetch.clone()) - .to_delegate(), - ), - Api::Traces => handler.extend_with(light::TracesClient.to_delegate()), - Api::Rpc => { - let modules = to_modules(&apis); - handler.extend_with(RpcClient::new(modules).to_delegate()); - } - Api::SecretStore => { - #[cfg(feature = "accounts")] - handler.extend_with(SecretStoreClient::new(&self.accounts).to_delegate()); - } - Api::Whisper => { - if let Some(ref whisper_rpc) = self.whisper_rpc { - let whisper = whisper_rpc.make_handler(self.net.clone()); - handler.extend_with(::parity_whisper::rpc::Whisper::to_delegate(whisper)); - } - } - Api::WhisperPubSub => { - if let Some(ref whisper_rpc) = self.whisper_rpc { - let whisper = whisper_rpc.make_handler(self.net.clone()); - handler.extend_with(::parity_whisper::rpc::WhisperPubSub::to_delegate( - whisper, - )); - } - } - Api::Private => { - if let Some(ref tx_manager) = self.private_tx_service { - let private_tx_service = Some(tx_manager.clone()); - handler.extend_with(PrivateClient::new(private_tx_service).to_delegate()); - } - } - } - } - } + if !for_generic_pubsub { + add_signing_methods!( + ParitySigning, + handler, + self, + (&dispatcher, &account_signer) + ); + } + } + Api::ParityPubSub => { + if !for_generic_pubsub { + let mut rpc = MetaIoHandler::default(); + let apis = ApiSet::List(apis.clone()) + .retain(ApiSet::PubSub) + .list_apis(); + self.extend_api(&mut rpc, &apis, true); + handler.extend_with( + PubSubClient::new(rpc, self.executor.clone()).to_delegate(), + ); + } + } + Api::ParityAccounts => { + #[cfg(feature = "accounts")] + handler.extend_with(ParityAccounts::to_delegate(ParityAccountsClient::new( + &self.accounts, + ))); + } + Api::ParitySet => handler.extend_with( + light::ParitySetClient::new( + self.client.clone(), + self.sync.clone(), + self.fetch.clone(), + ) + .to_delegate(), + ), + Api::Traces => handler.extend_with(light::TracesClient.to_delegate()), + Api::Rpc => { + let modules = to_modules(&apis); + handler.extend_with(RpcClient::new(modules).to_delegate()); + } + Api::SecretStore => { + #[cfg(feature = "accounts")] + handler.extend_with(SecretStoreClient::new(&self.accounts).to_delegate()); + } + Api::Whisper => { + if let Some(ref whisper_rpc) = self.whisper_rpc { + let whisper = whisper_rpc.make_handler(self.net.clone()); + handler.extend_with(::parity_whisper::rpc::Whisper::to_delegate(whisper)); + } + } + Api::WhisperPubSub => { + if let Some(ref whisper_rpc) = self.whisper_rpc { + let whisper = whisper_rpc.make_handler(self.net.clone()); + handler.extend_with(::parity_whisper::rpc::WhisperPubSub::to_delegate( + whisper, + )); + } + } + Api::Private => { + if let Some(ref tx_manager) = self.private_tx_service { + let private_tx_service = Some(tx_manager.clone()); + handler.extend_with(PrivateClient::new(private_tx_service).to_delegate()); + } + } + } + } + } } impl Dependencies for LightDependencies { - type Notifier = LightClientNotifier; + type Notifier = LightClientNotifier; - fn activity_notifier(&self) -> Self::Notifier { - LightClientNotifier - } + fn activity_notifier(&self) -> Self::Notifier { + LightClientNotifier + } - fn extend_with_set(&self, handler: &mut MetaIoHandler, apis: &HashSet) - where - S: core::Middleware, - { - self.extend_api(handler, apis, false) - } + fn extend_with_set(&self, handler: &mut MetaIoHandler, apis: &HashSet) + where + S: core::Middleware, + { + self.extend_api(handler, apis, false) + } } impl ApiSet { - /// Retains only APIs in given set. - pub fn retain(self, set: Self) -> Self { - ApiSet::List(&self.list_apis() & &set.list_apis()) - } + /// Retains only APIs in given set. + pub fn retain(self, set: Self) -> Self { + ApiSet::List(&self.list_apis() & &set.list_apis()) + } - pub fn list_apis(&self) -> HashSet { - let mut public_list: HashSet = [ - Api::Web3, - Api::Net, - Api::Eth, - Api::EthPubSub, - Api::Parity, - Api::Rpc, - Api::Whisper, - Api::WhisperPubSub, - Api::Private, - ] - .into_iter() - .cloned() - .collect(); + pub fn list_apis(&self) -> HashSet { + let mut public_list: HashSet = [ + Api::Web3, + Api::Net, + Api::Eth, + Api::EthPubSub, + Api::Parity, + Api::Rpc, + Api::Whisper, + Api::WhisperPubSub, + Api::Private, + ] + .into_iter() + .cloned() + .collect(); - match *self { - ApiSet::List(ref apis) => apis.clone(), - ApiSet::UnsafeContext => { - public_list.insert(Api::Traces); - public_list.insert(Api::ParityPubSub); - public_list - } - ApiSet::IpcContext => { - public_list.insert(Api::Traces); - public_list.insert(Api::ParityPubSub); - public_list.insert(Api::ParityAccounts); - public_list - } - ApiSet::All => { - public_list.insert(Api::Debug); - public_list.insert(Api::Traces); - public_list.insert(Api::ParityPubSub); - public_list.insert(Api::ParityAccounts); - public_list.insert(Api::ParitySet); - public_list.insert(Api::Signer); - public_list.insert(Api::Personal); - public_list.insert(Api::SecretStore); - public_list - } - ApiSet::PubSub => [ - Api::Eth, - Api::Parity, - Api::ParityAccounts, - Api::ParitySet, - Api::Traces, - ] - .into_iter() - .cloned() - .collect(), - } - } + match *self { + ApiSet::List(ref apis) => apis.clone(), + ApiSet::UnsafeContext => { + public_list.insert(Api::Traces); + public_list.insert(Api::ParityPubSub); + public_list + } + ApiSet::IpcContext => { + public_list.insert(Api::Traces); + public_list.insert(Api::ParityPubSub); + public_list.insert(Api::ParityAccounts); + public_list + } + ApiSet::All => { + public_list.insert(Api::Debug); + public_list.insert(Api::Traces); + public_list.insert(Api::ParityPubSub); + public_list.insert(Api::ParityAccounts); + public_list.insert(Api::ParitySet); + public_list.insert(Api::Signer); + public_list.insert(Api::Personal); + public_list.insert(Api::SecretStore); + public_list + } + ApiSet::PubSub => [ + Api::Eth, + Api::Parity, + Api::ParityAccounts, + Api::ParitySet, + Api::Traces, + ] + .into_iter() + .cloned() + .collect(), + } + } } #[cfg(test)] mod test { - use super::{Api, ApiSet}; + use super::{Api, ApiSet}; - #[test] - fn test_api_parsing() { - assert_eq!(Api::Debug, "debug".parse().unwrap()); - assert_eq!(Api::Web3, "web3".parse().unwrap()); - assert_eq!(Api::Net, "net".parse().unwrap()); - assert_eq!(Api::Eth, "eth".parse().unwrap()); - assert_eq!(Api::EthPubSub, "pubsub".parse().unwrap()); - assert_eq!(Api::Personal, "personal".parse().unwrap()); - assert_eq!(Api::Signer, "signer".parse().unwrap()); - assert_eq!(Api::Parity, "parity".parse().unwrap()); - assert_eq!(Api::ParityAccounts, "parity_accounts".parse().unwrap()); - assert_eq!(Api::ParitySet, "parity_set".parse().unwrap()); - assert_eq!(Api::Traces, "traces".parse().unwrap()); - assert_eq!(Api::Rpc, "rpc".parse().unwrap()); - assert_eq!(Api::SecretStore, "secretstore".parse().unwrap()); - assert_eq!(Api::Private, "private".parse().unwrap()); - assert_eq!(Api::Whisper, "shh".parse().unwrap()); - assert_eq!(Api::WhisperPubSub, "shh_pubsub".parse().unwrap()); - assert!("rp".parse::().is_err()); - } + #[test] + fn test_api_parsing() { + assert_eq!(Api::Debug, "debug".parse().unwrap()); + assert_eq!(Api::Web3, "web3".parse().unwrap()); + assert_eq!(Api::Net, "net".parse().unwrap()); + assert_eq!(Api::Eth, "eth".parse().unwrap()); + assert_eq!(Api::EthPubSub, "pubsub".parse().unwrap()); + assert_eq!(Api::Personal, "personal".parse().unwrap()); + assert_eq!(Api::Signer, "signer".parse().unwrap()); + assert_eq!(Api::Parity, "parity".parse().unwrap()); + assert_eq!(Api::ParityAccounts, "parity_accounts".parse().unwrap()); + assert_eq!(Api::ParitySet, "parity_set".parse().unwrap()); + assert_eq!(Api::Traces, "traces".parse().unwrap()); + assert_eq!(Api::Rpc, "rpc".parse().unwrap()); + assert_eq!(Api::SecretStore, "secretstore".parse().unwrap()); + assert_eq!(Api::Private, "private".parse().unwrap()); + assert_eq!(Api::Whisper, "shh".parse().unwrap()); + assert_eq!(Api::WhisperPubSub, "shh_pubsub".parse().unwrap()); + assert!("rp".parse::().is_err()); + } - #[test] - fn test_api_set_default() { - assert_eq!(ApiSet::UnsafeContext, ApiSet::default()); - } + #[test] + fn test_api_set_default() { + assert_eq!(ApiSet::UnsafeContext, ApiSet::default()); + } - #[test] - fn test_api_set_parsing() { - assert_eq!( - ApiSet::List(vec![Api::Web3, Api::Eth].into_iter().collect()), - "web3,eth".parse().unwrap() - ); - } + #[test] + fn test_api_set_parsing() { + assert_eq!( + ApiSet::List(vec![Api::Web3, Api::Eth].into_iter().collect()), + "web3,eth".parse().unwrap() + ); + } - #[test] - fn test_api_set_unsafe_context() { - let expected = vec![ - // make sure this list contains only SAFE methods - Api::Web3, - Api::Net, - Api::Eth, - Api::EthPubSub, - Api::Parity, - Api::ParityPubSub, - Api::Traces, - Api::Rpc, - Api::Whisper, - Api::WhisperPubSub, - Api::Private, - ].into_iter() - .collect(); - assert_eq!(ApiSet::UnsafeContext.list_apis(), expected); - } + #[test] + fn test_api_set_unsafe_context() { + let expected = vec![ + // make sure this list contains only SAFE methods + Api::Web3, + Api::Net, + Api::Eth, + Api::EthPubSub, + Api::Parity, + Api::ParityPubSub, + Api::Traces, + Api::Rpc, + Api::Whisper, + Api::WhisperPubSub, + Api::Private, + ] + .into_iter() + .collect(); + assert_eq!(ApiSet::UnsafeContext.list_apis(), expected); + } - #[test] - fn test_api_set_ipc_context() { - let expected = vec![ - // safe - Api::Web3, - Api::Net, - Api::Eth, - Api::EthPubSub, - Api::Parity, - Api::ParityPubSub, - Api::Traces, - Api::Rpc, - Api::Whisper, - Api::WhisperPubSub, - Api::Private, - // semi-safe - Api::ParityAccounts, - ].into_iter() - .collect(); - assert_eq!(ApiSet::IpcContext.list_apis(), expected); - } + #[test] + fn test_api_set_ipc_context() { + let expected = vec![ + // safe + Api::Web3, + Api::Net, + Api::Eth, + Api::EthPubSub, + Api::Parity, + Api::ParityPubSub, + Api::Traces, + Api::Rpc, + Api::Whisper, + Api::WhisperPubSub, + Api::Private, + // semi-safe + Api::ParityAccounts, + ] + .into_iter() + .collect(); + assert_eq!(ApiSet::IpcContext.list_apis(), expected); + } - #[test] - fn test_all_apis() { - assert_eq!( - "all".parse::().unwrap(), - ApiSet::List( - vec![ - Api::Web3, - Api::Net, - Api::Eth, - Api::EthPubSub, - Api::Parity, - Api::ParityPubSub, - Api::Traces, - Api::Rpc, - Api::SecretStore, - Api::Whisper, - Api::WhisperPubSub, - Api::ParityAccounts, - Api::ParitySet, - Api::Signer, - Api::Personal, - Api::Private, - Api::Debug, - ].into_iter() - .collect() - ) - ); - } + #[test] + fn test_all_apis() { + assert_eq!( + "all".parse::().unwrap(), + ApiSet::List( + vec![ + Api::Web3, + Api::Net, + Api::Eth, + Api::EthPubSub, + Api::Parity, + Api::ParityPubSub, + Api::Traces, + Api::Rpc, + Api::SecretStore, + Api::Whisper, + Api::WhisperPubSub, + Api::ParityAccounts, + Api::ParitySet, + Api::Signer, + Api::Personal, + Api::Private, + Api::Debug, + ] + .into_iter() + .collect() + ) + ); + } - #[test] - fn test_all_without_personal_apis() { - assert_eq!( - "personal,all,-personal".parse::().unwrap(), - ApiSet::List( - vec![ - Api::Web3, - Api::Net, - Api::Eth, - Api::EthPubSub, - Api::Parity, - Api::ParityPubSub, - Api::Traces, - Api::Rpc, - Api::SecretStore, - Api::Whisper, - Api::WhisperPubSub, - Api::ParityAccounts, - Api::ParitySet, - Api::Signer, - Api::Private, - Api::Debug, - ].into_iter() - .collect() - ) - ); - } + #[test] + fn test_all_without_personal_apis() { + assert_eq!( + "personal,all,-personal".parse::().unwrap(), + ApiSet::List( + vec![ + Api::Web3, + Api::Net, + Api::Eth, + Api::EthPubSub, + Api::Parity, + Api::ParityPubSub, + Api::Traces, + Api::Rpc, + Api::SecretStore, + Api::Whisper, + Api::WhisperPubSub, + Api::ParityAccounts, + Api::ParitySet, + Api::Signer, + Api::Private, + Api::Debug, + ] + .into_iter() + .collect() + ) + ); + } - #[test] - fn test_safe_parsing() { - assert_eq!( - "safe".parse::().unwrap(), - ApiSet::List( - vec![ - Api::Web3, - Api::Net, - Api::Eth, - Api::EthPubSub, - Api::Parity, - Api::ParityPubSub, - Api::Traces, - Api::Rpc, - Api::Whisper, - Api::WhisperPubSub, - Api::Private, - ].into_iter() - .collect() - ) - ); - } + #[test] + fn test_safe_parsing() { + assert_eq!( + "safe".parse::().unwrap(), + ApiSet::List( + vec![ + Api::Web3, + Api::Net, + Api::Eth, + Api::EthPubSub, + Api::Parity, + Api::ParityPubSub, + Api::Traces, + Api::Rpc, + Api::Whisper, + Api::WhisperPubSub, + Api::Private, + ] + .into_iter() + .collect() + ) + ); + } } diff --git a/parity/run.rs b/parity/run.rs index 6ddeadbe0..085efcd19 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -14,56 +14,62 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::any::Any; -use std::sync::{Arc, Weak, atomic}; -use std::time::{Duration, Instant}; -use std::thread; +use std::{ + any::Any, + sync::{atomic, Arc, Weak}, + thread, + time::{Duration, Instant}, +}; +use account_utils; use ansi_term::Colour; use bytes::Bytes; +use cache::CacheConfig; use call_contract::CallContract; -use ethcore::client::{BlockId, Client, Mode, DatabaseCompactionProfile, VMType, BlockChainClient, BlockInfo}; -use ethcore::miner::{self, stratum, Miner, MinerService, MinerOptions}; -use ethcore::snapshot::{self, SnapshotConfiguration}; -use ethcore::spec::{SpecParams, OptimizeFor}; -use ethcore::verification::queue::VerifierSettings; +use db; +use dir::{DatabaseDirectories, Directories}; +use ethcore::{ + client::{ + BlockChainClient, BlockId, BlockInfo, Client, DatabaseCompactionProfile, Mode, VMType, + }, + miner::{self, stratum, Miner, MinerOptions, MinerService}, + snapshot::{self, SnapshotConfiguration}, + spec::{OptimizeFor, SpecParams}, + verification::queue::VerifierSettings, +}; use ethcore_logger::{Config as LogConfig, RotatingLogger}; +use ethcore_private_tx::{EncryptorConfig, ProviderConfig, SecretStoreEncryptor}; use ethcore_service::ClientService; use ethereum_types::Address; use futures::IntoFuture; use hash_fetch::{self, fetch}; -use informant::{Informant, LightNodeInformantData, FullNodeInformantData}; -use journaldb::Algorithm; -use light::Cache as LightDataCache; -use miner::external::ExternalMiner; -use miner::work_notify::WorkPoster; -use node_filter::NodeFilter; -use parity_runtime::Runtime; -use sync::{self, SyncConfig, PrivateTxHandler}; -use parity_rpc::{ - Origin, Metadata, NetworkSettings, informant, is_major_importing, PubSubSession, FutureResult, FutureResponse, FutureOutput -}; -use updater::{UpdatePolicy, Updater}; -use parity_version::version; -use ethcore_private_tx::{ProviderConfig, EncryptorConfig, SecretStoreEncryptor}; -use params::{ - SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch, - tracing_switch_to_bool, fatdb_switch_to_bool, mode_switch_to_bool -}; -use account_utils; -use helpers::{to_client_config, execute_upgrades, passwords_from_files}; -use dir::{Directories, DatabaseDirectories}; -use cache::CacheConfig; -use user_defaults::UserDefaults; +use helpers::{execute_upgrades, passwords_from_files, to_client_config}; +use informant::{FullNodeInformantData, Informant, LightNodeInformantData}; use ipfs; +use journaldb::Algorithm; use jsonrpc_core; +use light::Cache as LightDataCache; +use miner::{external::ExternalMiner, work_notify::WorkPoster}; use modules; -use registrar::{RegistrarClient, Asynchronous}; +use node_filter::NodeFilter; +use params::{ + fatdb_switch_to_bool, mode_switch_to_bool, tracing_switch_to_bool, AccountsConfig, + GasPricerConfig, MinerExtras, Pruning, SpecType, Switch, +}; +use parity_rpc::{ + informant, is_major_importing, FutureOutput, FutureResponse, FutureResult, Metadata, + NetworkSettings, Origin, PubSubSession, +}; +use parity_runtime::Runtime; +use parity_version::version; +use registrar::{Asynchronous, RegistrarClient}; use rpc; use rpc_apis; use secretstore; use signer; -use db; +use sync::{self, PrivateTxHandler, SyncConfig}; +use updater::{UpdatePolicy, Updater}; +use user_defaults::UserDefaults; // how often to take periodic snapshots. const SNAPSHOT_PERIOD: u64 = 5000; @@ -83,754 +89,901 @@ const FETCH_LIGHT_NUM_DNS_THREADS: usize = 1; #[derive(Debug, PartialEq)] pub struct RunCmd { - pub cache_config: CacheConfig, - pub dirs: Directories, - pub spec: SpecType, - pub pruning: Pruning, - pub pruning_history: u64, - pub pruning_memory: usize, - /// Some if execution should be daemonized. Contains pid_file path. - pub daemon: Option, - pub logger_config: LogConfig, - pub miner_options: MinerOptions, - pub gas_price_percentile: usize, - pub poll_lifetime: u32, - pub ws_conf: rpc::WsConfiguration, - pub http_conf: rpc::HttpConfiguration, - pub ipc_conf: rpc::IpcConfiguration, - pub net_conf: sync::NetworkConfiguration, - pub network_id: Option, - pub warp_sync: bool, - pub warp_barrier: Option, - pub acc_conf: AccountsConfig, - pub gas_pricer_conf: GasPricerConfig, - pub miner_extras: MinerExtras, - pub update_policy: UpdatePolicy, - pub mode: Option, - pub tracing: Switch, - pub fat_db: Switch, - pub compaction: DatabaseCompactionProfile, - pub vm_type: VMType, - pub geth_compatibility: bool, - pub experimental_rpcs: bool, - pub net_settings: NetworkSettings, - pub ipfs_conf: ipfs::Configuration, - pub secretstore_conf: secretstore::Configuration, - pub private_provider_conf: ProviderConfig, - pub private_encryptor_conf: EncryptorConfig, - pub private_tx_enabled: bool, - pub name: String, - pub custom_bootnodes: bool, - pub stratum: Option, - pub snapshot_conf: SnapshotConfiguration, - pub check_seal: bool, - pub allow_missing_blocks: bool, - pub download_old_blocks: bool, - pub verifier_settings: VerifierSettings, - pub serve_light: bool, - pub light: bool, - pub no_persistent_txqueue: bool, - pub whisper: ::whisper::Config, - pub no_hardcoded_sync: bool, - pub max_round_blocks_to_import: usize, - pub on_demand_response_time_window: Option, - pub on_demand_request_backoff_start: Option, - pub on_demand_request_backoff_max: Option, - pub on_demand_request_backoff_rounds_max: Option, - pub on_demand_request_consecutive_failures: Option, + pub cache_config: CacheConfig, + pub dirs: Directories, + pub spec: SpecType, + pub pruning: Pruning, + pub pruning_history: u64, + pub pruning_memory: usize, + /// Some if execution should be daemonized. Contains pid_file path. + pub daemon: Option, + pub logger_config: LogConfig, + pub miner_options: MinerOptions, + pub gas_price_percentile: usize, + pub poll_lifetime: u32, + pub ws_conf: rpc::WsConfiguration, + pub http_conf: rpc::HttpConfiguration, + pub ipc_conf: rpc::IpcConfiguration, + pub net_conf: sync::NetworkConfiguration, + pub network_id: Option, + pub warp_sync: bool, + pub warp_barrier: Option, + pub acc_conf: AccountsConfig, + pub gas_pricer_conf: GasPricerConfig, + pub miner_extras: MinerExtras, + pub update_policy: UpdatePolicy, + pub mode: Option, + pub tracing: Switch, + pub fat_db: Switch, + pub compaction: DatabaseCompactionProfile, + pub vm_type: VMType, + pub geth_compatibility: bool, + pub experimental_rpcs: bool, + pub net_settings: NetworkSettings, + pub ipfs_conf: ipfs::Configuration, + pub secretstore_conf: secretstore::Configuration, + pub private_provider_conf: ProviderConfig, + pub private_encryptor_conf: EncryptorConfig, + pub private_tx_enabled: bool, + pub name: String, + pub custom_bootnodes: bool, + pub stratum: Option, + pub snapshot_conf: SnapshotConfiguration, + pub check_seal: bool, + pub allow_missing_blocks: bool, + pub download_old_blocks: bool, + pub verifier_settings: VerifierSettings, + pub serve_light: bool, + pub light: bool, + pub no_persistent_txqueue: bool, + pub whisper: ::whisper::Config, + pub no_hardcoded_sync: bool, + pub max_round_blocks_to_import: usize, + pub on_demand_response_time_window: Option, + pub on_demand_request_backoff_start: Option, + pub on_demand_request_backoff_max: Option, + pub on_demand_request_backoff_rounds_max: Option, + pub on_demand_request_consecutive_failures: Option, } // node info fetcher for the local store. struct FullNodeInfo { - miner: Option>, // TODO: only TXQ needed, just use that after decoupling. + miner: Option>, // TODO: only TXQ needed, just use that after decoupling. } impl ::local_store::NodeInfo for FullNodeInfo { - fn pending_transactions(&self) -> Vec<::types::transaction::PendingTransaction> { - let miner = match self.miner.as_ref() { - Some(m) => m, - None => return Vec::new(), - }; + fn pending_transactions(&self) -> Vec<::types::transaction::PendingTransaction> { + let miner = match self.miner.as_ref() { + Some(m) => m, + None => return Vec::new(), + }; - miner.local_transactions() - .values() - .filter_map(|status| match *status { - ::miner::pool::local_transactions::Status::Pending(ref tx) => Some(tx.pending().clone()), - _ => None, - }) - .collect() - } + miner + .local_transactions() + .values() + .filter_map(|status| match *status { + ::miner::pool::local_transactions::Status::Pending(ref tx) => { + Some(tx.pending().clone()) + } + _ => None, + }) + .collect() + } } type LightClient = ::light::client::Client<::light_helpers::EpochFetch>; // helper for light execution. -fn execute_light_impl(cmd: RunCmd, logger: Arc, on_client_rq: Cr) -> Result - where Cr: Fn(String) + 'static + Send +fn execute_light_impl( + cmd: RunCmd, + logger: Arc, + on_client_rq: Cr, +) -> Result +where + Cr: Fn(String) + 'static + Send, { - use light::client as light_client; - use sync::{LightSyncParams, LightSync, ManageNetwork}; - use parking_lot::{Mutex, RwLock}; + use light::client as light_client; + use parking_lot::{Mutex, RwLock}; + use sync::{LightSync, LightSyncParams, ManageNetwork}; - // load spec - let spec = cmd.spec.spec(SpecParams::new(cmd.dirs.cache.as_ref(), OptimizeFor::Memory))?; + // load spec + let spec = cmd.spec.spec(SpecParams::new( + cmd.dirs.cache.as_ref(), + OptimizeFor::Memory, + ))?; - // load genesis hash - let genesis_hash = spec.genesis_header().hash(); + // load genesis hash + let genesis_hash = spec.genesis_header().hash(); - // database paths - let db_dirs = cmd.dirs.database(genesis_hash, cmd.spec.legacy_fork_name(), spec.data_dir.clone()); + // database paths + let db_dirs = cmd.dirs.database( + genesis_hash, + cmd.spec.legacy_fork_name(), + spec.data_dir.clone(), + ); - // user defaults path - let user_defaults_path = db_dirs.user_defaults_path(); + // user defaults path + let user_defaults_path = db_dirs.user_defaults_path(); - // load user defaults - let user_defaults = UserDefaults::load(&user_defaults_path)?; + // load user defaults + let user_defaults = UserDefaults::load(&user_defaults_path)?; - // select pruning algorithm - let algorithm = cmd.pruning.to_algorithm(&user_defaults); + // select pruning algorithm + let algorithm = cmd.pruning.to_algorithm(&user_defaults); - // execute upgrades - execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?; + // execute upgrades + execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?; - // create dirs used by parity - cmd.dirs.create_dirs(cmd.acc_conf.unlocked_accounts.len() == 0, cmd.secretstore_conf.enabled)?; + // create dirs used by parity + cmd.dirs.create_dirs( + cmd.acc_conf.unlocked_accounts.len() == 0, + cmd.secretstore_conf.enabled, + )?; - //print out running parity environment - print_running_environment(&spec.data_dir, &cmd.dirs, &db_dirs); + //print out running parity environment + print_running_environment(&spec.data_dir, &cmd.dirs, &db_dirs); - info!("Running in experimental {} mode.", Colour::Blue.bold().paint("Light Client")); + info!( + "Running in experimental {} mode.", + Colour::Blue.bold().paint("Light Client") + ); - // TODO: configurable cache size. - let cache = LightDataCache::new(Default::default(), Duration::from_secs(60 * GAS_CORPUS_EXPIRATION_MINUTES)); - let cache = Arc::new(Mutex::new(cache)); + // TODO: configurable cache size. + let cache = LightDataCache::new( + Default::default(), + Duration::from_secs(60 * GAS_CORPUS_EXPIRATION_MINUTES), + ); + let cache = Arc::new(Mutex::new(cache)); - // start client and create transaction queue. - let mut config = light_client::Config { - queue: Default::default(), - chain_column: ::ethcore_db::COL_LIGHT_CHAIN, - verify_full: true, - check_seal: cmd.check_seal, - no_hardcoded_sync: cmd.no_hardcoded_sync, - }; + // start client and create transaction queue. + let mut config = light_client::Config { + queue: Default::default(), + chain_column: ::ethcore_db::COL_LIGHT_CHAIN, + verify_full: true, + check_seal: cmd.check_seal, + no_hardcoded_sync: cmd.no_hardcoded_sync, + }; - config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024; - config.queue.verifier_settings = cmd.verifier_settings; + config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024; + config.queue.verifier_settings = cmd.verifier_settings; - // start on_demand service. + // start on_demand service. - let response_time_window = cmd.on_demand_response_time_window.map_or( - ::light::on_demand::DEFAULT_RESPONSE_TIME_TO_LIVE, - |s| Duration::from_secs(s) - ); + let response_time_window = cmd + .on_demand_response_time_window + .map_or(::light::on_demand::DEFAULT_RESPONSE_TIME_TO_LIVE, |s| { + Duration::from_secs(s) + }); - let request_backoff_start = cmd.on_demand_request_backoff_start.map_or( - ::light::on_demand::DEFAULT_REQUEST_MIN_BACKOFF_DURATION, - |s| Duration::from_secs(s) - ); + let request_backoff_start = cmd.on_demand_request_backoff_start.map_or( + ::light::on_demand::DEFAULT_REQUEST_MIN_BACKOFF_DURATION, + |s| Duration::from_secs(s), + ); - let request_backoff_max = cmd.on_demand_request_backoff_max.map_or( - ::light::on_demand::DEFAULT_REQUEST_MAX_BACKOFF_DURATION, - |s| Duration::from_secs(s) - ); + let request_backoff_max = cmd.on_demand_request_backoff_max.map_or( + ::light::on_demand::DEFAULT_REQUEST_MAX_BACKOFF_DURATION, + |s| Duration::from_secs(s), + ); - let on_demand = Arc::new({ - ::light::on_demand::OnDemand::new( - cache.clone(), - response_time_window, - request_backoff_start, - request_backoff_max, - cmd.on_demand_request_backoff_rounds_max.unwrap_or(::light::on_demand::DEFAULT_MAX_REQUEST_BACKOFF_ROUNDS), - cmd.on_demand_request_consecutive_failures.unwrap_or(::light::on_demand::DEFAULT_NUM_CONSECUTIVE_FAILED_REQUESTS) - ) - }); + let on_demand = Arc::new({ + ::light::on_demand::OnDemand::new( + cache.clone(), + response_time_window, + request_backoff_start, + request_backoff_max, + cmd.on_demand_request_backoff_rounds_max + .unwrap_or(::light::on_demand::DEFAULT_MAX_REQUEST_BACKOFF_ROUNDS), + cmd.on_demand_request_consecutive_failures + .unwrap_or(::light::on_demand::DEFAULT_NUM_CONSECUTIVE_FAILED_REQUESTS), + ) + }); - let sync_handle = Arc::new(RwLock::new(Weak::new())); - let fetch = ::light_helpers::EpochFetch { - on_demand: on_demand.clone(), - sync: sync_handle.clone(), - }; + let sync_handle = Arc::new(RwLock::new(Weak::new())); + let fetch = ::light_helpers::EpochFetch { + on_demand: on_demand.clone(), + sync: sync_handle.clone(), + }; - // initialize database. - let db = db::open_db(&db_dirs.client_path(algorithm).to_str().expect("DB path could not be converted to string."), - &cmd.cache_config, - &cmd.compaction).map_err(|e| format!("Failed to open database {:?}", e))?; + // initialize database. + let db = db::open_db( + &db_dirs + .client_path(algorithm) + .to_str() + .expect("DB path could not be converted to string."), + &cmd.cache_config, + &cmd.compaction, + ) + .map_err(|e| format!("Failed to open database {:?}", e))?; - let service = light_client::Service::start(config, &spec, fetch, db, cache.clone()) - .map_err(|e| format!("Error starting light client: {}", e))?; - let client = service.client().clone(); - let txq = Arc::new(RwLock::new(::light::transaction_queue::TransactionQueue::default())); - let provider = ::light::provider::LightProvider::new(client.clone(), txq.clone()); + let service = light_client::Service::start(config, &spec, fetch, db, cache.clone()) + .map_err(|e| format!("Error starting light client: {}", e))?; + let client = service.client().clone(); + let txq = Arc::new(RwLock::new( + ::light::transaction_queue::TransactionQueue::default(), + )); + let provider = ::light::provider::LightProvider::new(client.clone(), txq.clone()); - // start network. - // set up bootnodes - let mut net_conf = cmd.net_conf; - if !cmd.custom_bootnodes { - net_conf.boot_nodes = spec.nodes.clone(); - } + // start network. + // set up bootnodes + let mut net_conf = cmd.net_conf; + if !cmd.custom_bootnodes { + net_conf.boot_nodes = spec.nodes.clone(); + } - let mut attached_protos = Vec::new(); - let whisper_factory = if cmd.whisper.enabled { - let whisper_factory = ::whisper::setup(cmd.whisper.target_message_pool_size, &mut attached_protos) - .map_err(|e| format!("Failed to initialize whisper: {}", e))?; - whisper_factory - } else { - None - }; + let mut attached_protos = Vec::new(); + let whisper_factory = if cmd.whisper.enabled { + let whisper_factory = + ::whisper::setup(cmd.whisper.target_message_pool_size, &mut attached_protos) + .map_err(|e| format!("Failed to initialize whisper: {}", e))?; + whisper_factory + } else { + None + }; - // set network path. - net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned()); - let sync_params = LightSyncParams { - network_config: net_conf.into_basic().map_err(|e| format!("Failed to produce network config: {}", e))?, - client: Arc::new(provider), - network_id: cmd.network_id.unwrap_or(spec.network_id()), - subprotocol_name: sync::LIGHT_PROTOCOL, - handlers: vec![on_demand.clone()], - attached_protos: attached_protos, - }; - let light_sync = LightSync::new(sync_params).map_err(|e| format!("Error starting network: {}", e))?; - let light_sync = Arc::new(light_sync); - *sync_handle.write() = Arc::downgrade(&light_sync); + // set network path. + net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned()); + let sync_params = LightSyncParams { + network_config: net_conf + .into_basic() + .map_err(|e| format!("Failed to produce network config: {}", e))?, + client: Arc::new(provider), + network_id: cmd.network_id.unwrap_or(spec.network_id()), + subprotocol_name: sync::LIGHT_PROTOCOL, + handlers: vec![on_demand.clone()], + attached_protos: attached_protos, + }; + let light_sync = + LightSync::new(sync_params).map_err(|e| format!("Error starting network: {}", e))?; + let light_sync = Arc::new(light_sync); + *sync_handle.write() = Arc::downgrade(&light_sync); - // spin up event loop - let runtime = Runtime::with_default_thread_count(); + // spin up event loop + let runtime = Runtime::with_default_thread_count(); - // start the network. - light_sync.start_network(); + // start the network. + light_sync.start_network(); - // fetch service - let fetch = fetch::Client::new(FETCH_LIGHT_NUM_DNS_THREADS).map_err(|e| format!("Error starting fetch client: {:?}", e))?; - let passwords = passwords_from_files(&cmd.acc_conf.password_files)?; + // fetch service + let fetch = fetch::Client::new(FETCH_LIGHT_NUM_DNS_THREADS) + .map_err(|e| format!("Error starting fetch client: {:?}", e))?; + let passwords = passwords_from_files(&cmd.acc_conf.password_files)?; - // prepare account provider - let account_provider = Arc::new(account_utils::prepare_account_provider(&cmd.spec, &cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords)?); - let rpc_stats = Arc::new(informant::RpcStats::default()); + // prepare account provider + let account_provider = Arc::new(account_utils::prepare_account_provider( + &cmd.spec, + &cmd.dirs, + &spec.data_dir, + cmd.acc_conf, + &passwords, + )?); + let rpc_stats = Arc::new(informant::RpcStats::default()); - // the dapps server - let signer_service = Arc::new(signer::new_service(&cmd.ws_conf, &cmd.logger_config)); + // the dapps server + let signer_service = Arc::new(signer::new_service(&cmd.ws_conf, &cmd.logger_config)); - // start RPCs - let deps_for_rpc_apis = Arc::new(rpc_apis::LightDependencies { - signer_service: signer_service, - client: client.clone(), - sync: light_sync.clone(), - net: light_sync.clone(), - accounts: account_provider, - logger: logger, - settings: Arc::new(cmd.net_settings), - on_demand: on_demand, - cache: cache.clone(), - transaction_queue: txq, - ws_address: cmd.ws_conf.address(), - fetch: fetch, - geth_compatibility: cmd.geth_compatibility, - experimental_rpcs: cmd.experimental_rpcs, - executor: runtime.executor(), - whisper_rpc: whisper_factory, - private_tx_service: None, //TODO: add this to client. - gas_price_percentile: cmd.gas_price_percentile, - poll_lifetime: cmd.poll_lifetime - }); + // start RPCs + let deps_for_rpc_apis = Arc::new(rpc_apis::LightDependencies { + signer_service: signer_service, + client: client.clone(), + sync: light_sync.clone(), + net: light_sync.clone(), + accounts: account_provider, + logger: logger, + settings: Arc::new(cmd.net_settings), + on_demand: on_demand, + cache: cache.clone(), + transaction_queue: txq, + ws_address: cmd.ws_conf.address(), + fetch: fetch, + geth_compatibility: cmd.geth_compatibility, + experimental_rpcs: cmd.experimental_rpcs, + executor: runtime.executor(), + whisper_rpc: whisper_factory, + private_tx_service: None, //TODO: add this to client. + gas_price_percentile: cmd.gas_price_percentile, + poll_lifetime: cmd.poll_lifetime, + }); - let dependencies = rpc::Dependencies { - apis: deps_for_rpc_apis.clone(), - executor: runtime.executor(), - stats: rpc_stats.clone(), - }; + let dependencies = rpc::Dependencies { + apis: deps_for_rpc_apis.clone(), + executor: runtime.executor(), + stats: rpc_stats.clone(), + }; - // start rpc servers - let rpc_direct = rpc::setup_apis(rpc_apis::ApiSet::All, &dependencies); - let ws_server = rpc::new_ws(cmd.ws_conf, &dependencies)?; - let http_server = rpc::new_http("HTTP JSON-RPC", "jsonrpc", cmd.http_conf.clone(), &dependencies)?; - let ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?; + // start rpc servers + let rpc_direct = rpc::setup_apis(rpc_apis::ApiSet::All, &dependencies); + let ws_server = rpc::new_ws(cmd.ws_conf, &dependencies)?; + let http_server = rpc::new_http( + "HTTP JSON-RPC", + "jsonrpc", + cmd.http_conf.clone(), + &dependencies, + )?; + let ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?; - // the informant - let informant = Arc::new(Informant::new( - LightNodeInformantData { - client: client.clone(), - sync: light_sync.clone(), - cache: cache, - }, - None, - Some(rpc_stats), - cmd.logger_config.color, - )); - service.add_notify(informant.clone()); - service.register_handler(informant.clone()).map_err(|_| "Unable to register informant handler".to_owned())?; + // the informant + let informant = Arc::new(Informant::new( + LightNodeInformantData { + client: client.clone(), + sync: light_sync.clone(), + cache: cache, + }, + None, + Some(rpc_stats), + cmd.logger_config.color, + )); + service.add_notify(informant.clone()); + service + .register_handler(informant.clone()) + .map_err(|_| "Unable to register informant handler".to_owned())?; - client.set_exit_handler(on_client_rq); + client.set_exit_handler(on_client_rq); - Ok(RunningClient { - inner: RunningClientInner::Light { - rpc: rpc_direct, - informant, - client, - keep_alive: Box::new((service, ws_server, http_server, ipc_server, runtime)), - } - }) + Ok(RunningClient { + inner: RunningClientInner::Light { + rpc: rpc_direct, + informant, + client, + keep_alive: Box::new((service, ws_server, http_server, ipc_server, runtime)), + }, + }) } -fn execute_impl(cmd: RunCmd, logger: Arc, on_client_rq: Cr, - on_updater_rq: Rr) -> Result - where Cr: Fn(String) + 'static + Send, - Rr: Fn() + 'static + Send +fn execute_impl( + cmd: RunCmd, + logger: Arc, + on_client_rq: Cr, + on_updater_rq: Rr, +) -> Result +where + Cr: Fn(String) + 'static + Send, + Rr: Fn() + 'static + Send, { - // load spec - let spec = cmd.spec.spec(&cmd.dirs.cache)?; - - // load genesis hash - let genesis_hash = spec.genesis_header().hash(); - - // database paths - let db_dirs = cmd.dirs.database(genesis_hash, cmd.spec.legacy_fork_name(), spec.data_dir.clone()); - - // user defaults path - let user_defaults_path = db_dirs.user_defaults_path(); - - // load user defaults - let mut user_defaults = UserDefaults::load(&user_defaults_path)?; - - // select pruning algorithm - let algorithm = cmd.pruning.to_algorithm(&user_defaults); - - // check if tracing is on - let tracing = tracing_switch_to_bool(cmd.tracing, &user_defaults)?; - - // check if fatdb is on - let fat_db = fatdb_switch_to_bool(cmd.fat_db, &user_defaults, algorithm)?; - - // get the mode - let mode = mode_switch_to_bool(cmd.mode, &user_defaults)?; - trace!(target: "mode", "mode is {:?}", mode); - let network_enabled = match mode { Mode::Dark(_) | Mode::Off => false, _ => true, }; - - // get the update policy - let update_policy = cmd.update_policy; - - // prepare client and snapshot paths. - let client_path = db_dirs.client_path(algorithm); - let snapshot_path = db_dirs.snapshot_path(); - - // execute upgrades - execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?; - - // create dirs used by parity - cmd.dirs.create_dirs(cmd.acc_conf.unlocked_accounts.len() == 0, cmd.secretstore_conf.enabled)?; - - //print out running parity environment - print_running_environment(&spec.data_dir, &cmd.dirs, &db_dirs); - - // display info about used pruning algorithm - info!("State DB configuration: {}{}{}", - Colour::White.bold().paint(algorithm.as_str()), - match fat_db { - true => Colour::White.bold().paint(" +Fat").to_string(), - false => "".to_owned(), - }, - match tracing { - true => Colour::White.bold().paint(" +Trace").to_string(), - false => "".to_owned(), - } - ); - info!("Operating mode: {}", Colour::White.bold().paint(format!("{}", mode))); - - // display warning about using experimental journaldb algorithm - if !algorithm.is_stable() { - warn!("Your chosen strategy is {}! You can re-run with --pruning to change.", Colour::Red.bold().paint("unstable")); - } - - // create sync config - let mut sync_config = SyncConfig::default(); - sync_config.network_id = match cmd.network_id { - Some(id) => id, - None => spec.network_id(), - }; - if spec.subprotocol_name().len() != 3 { - warn!("Your chain specification's subprotocol length is not 3. Ignoring."); - } else { - sync_config.subprotocol_name.clone_from_slice(spec.subprotocol_name().as_bytes()); - } - - sync_config.fork_block = spec.fork_block(); - let mut warp_sync = spec.engine.supports_warp() && cmd.warp_sync; - if warp_sync { - // Logging is not initialized yet, so we print directly to stderr - if fat_db { - warn!("Warning: Warp Sync is disabled because Fat DB is turned on."); - warp_sync = false; - } else if tracing { - warn!("Warning: Warp Sync is disabled because tracing is turned on."); - warp_sync = false; - } else if algorithm != Algorithm::OverlayRecent { - warn!("Warning: Warp Sync is disabled because of non-default pruning mode."); - warp_sync = false; - } - } - sync_config.warp_sync = match (warp_sync, cmd.warp_barrier) { - (true, Some(block)) => sync::WarpSync::OnlyAndAfter(block), - (true, _) => sync::WarpSync::Enabled, - _ => sync::WarpSync::Disabled, - }; - sync_config.download_old_blocks = cmd.download_old_blocks; - sync_config.serve_light = cmd.serve_light; - - let passwords = passwords_from_files(&cmd.acc_conf.password_files)?; - - // prepare account provider - let account_provider = Arc::new(account_utils::prepare_account_provider(&cmd.spec, &cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords)?); - - // spin up event loop - let runtime = Runtime::with_default_thread_count(); - - // fetch service - let fetch = fetch::Client::new(FETCH_FULL_NUM_DNS_THREADS).map_err(|e| format!("Error starting fetch client: {:?}", e))?; - - let txpool_size = cmd.miner_options.pool_limits.max_count; - // create miner - let miner = Arc::new(Miner::new( - cmd.miner_options, - cmd.gas_pricer_conf.to_gas_pricer(fetch.clone(), runtime.executor()), - &spec, - ( - cmd.miner_extras.local_accounts, - account_utils::miner_local_accounts(account_provider.clone()), - ) - )); - miner.set_author(miner::Author::External(cmd.miner_extras.author)); - miner.set_gas_range_target(cmd.miner_extras.gas_range_target); - miner.set_extra_data(cmd.miner_extras.extra_data); - - if !cmd.miner_extras.work_notify.is_empty() { - miner.add_work_listener(Box::new( - WorkPoster::new(&cmd.miner_extras.work_notify, fetch.clone(), runtime.executor()) - )); - } - - let engine_signer = cmd.miner_extras.engine_signer; - if engine_signer != Default::default() { - if let Some(author) = account_utils::miner_author(&cmd.spec, &cmd.dirs, &account_provider, engine_signer, &passwords)? { - miner.set_author(author); - } - } - - // display warning if using --no-hardcoded-sync - if cmd.no_hardcoded_sync { - warn!("The --no-hardcoded-sync flag has no effect if you don't use --light"); - } - - // create client config - let mut client_config = to_client_config( - &cmd.cache_config, - spec.name.to_lowercase(), - mode.clone(), - tracing, - fat_db, - cmd.compaction, - cmd.vm_type, - cmd.name, - algorithm, - cmd.pruning_history, - cmd.pruning_memory, - cmd.check_seal, - cmd.max_round_blocks_to_import, - ); - - client_config.queue.verifier_settings = cmd.verifier_settings; - client_config.transaction_verification_queue_size = ::std::cmp::max(2048, txpool_size / 4); - client_config.snapshot = cmd.snapshot_conf.clone(); - - // set up bootnodes - let mut net_conf = cmd.net_conf; - if !cmd.custom_bootnodes { - net_conf.boot_nodes = spec.nodes.clone(); - } - - // set network path. - net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned()); - - let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config); - let client_db = restoration_db_handler.open(&client_path) - .map_err(|e| format!("Failed to open database {:?}", e))?; - - let private_tx_signer = account_utils::private_tx_signer(account_provider.clone(), &passwords)?; - - // create client service. - let service = ClientService::start( - client_config, - &spec, - client_db, - &snapshot_path, - restoration_db_handler, - &cmd.dirs.ipc_path(), - miner.clone(), - private_tx_signer.clone(), - Box::new(SecretStoreEncryptor::new(cmd.private_encryptor_conf.clone(), fetch.clone(), private_tx_signer).map_err(|e| e.to_string())?), - cmd.private_provider_conf, - cmd.private_encryptor_conf, - ).map_err(|e| format!("Client service error: {:?}", e))?; - - let connection_filter_address = spec.params().node_permission_contract; - // drop the spec to free up genesis state. - drop(spec); - - // take handle to client - let client = service.client(); - // Update miners block gas limit - miner.update_transaction_queue_limits(*client.best_block_header().gas_limit()); - - // take handle to private transactions service - let private_tx_service = service.private_tx_service(); - let private_tx_provider = private_tx_service.provider(); - let connection_filter = connection_filter_address.map(|a| Arc::new(NodeFilter::new(Arc::downgrade(&client) as Weak, a))); - let snapshot_service = service.snapshot_service(); - - // initialize the local node information store. - let store = { - let db = service.db(); - let node_info = FullNodeInfo { - miner: match cmd.no_persistent_txqueue { - true => None, - false => Some(miner.clone()), - } - }; - - let store = ::local_store::create(db.key_value().clone(), ::ethcore_db::COL_NODE_INFO, node_info); - - if cmd.no_persistent_txqueue { - info!("Running without a persistent transaction queue."); - - if let Err(e) = store.clear() { - warn!("Error clearing persistent transaction queue: {}", e); - } - } - - // re-queue pending transactions. - match store.pending_transactions() { - Ok(pending) => { - for pending_tx in pending { - if let Err(e) = miner.import_own_transaction(&*client, pending_tx) { - warn!("Error importing saved transaction: {}", e) - } - } - } - Err(e) => warn!("Error loading cached pending transactions from disk: {}", e), - } - - Arc::new(store) - }; - - // register it as an IO service to update periodically. - service.register_io_handler(store).map_err(|_| "Unable to register local store handler".to_owned())?; - - // create external miner - let external_miner = Arc::new(ExternalMiner::default()); - - // start stratum - if let Some(ref stratum_config) = cmd.stratum { - stratum::Stratum::register(stratum_config, miner.clone(), Arc::downgrade(&client)) - .map_err(|e| format!("Stratum start error: {:?}", e))?; - } - - let mut attached_protos = Vec::new(); - - let whisper_factory = if cmd.whisper.enabled { - let whisper_factory = ::whisper::setup(cmd.whisper.target_message_pool_size, &mut attached_protos) - .map_err(|e| format!("Failed to initialize whisper: {}", e))?; - - whisper_factory - } else { - None - }; - - let private_tx_sync: Option> = match cmd.private_tx_enabled { - true => Some(private_tx_service.clone() as Arc), - false => None, - }; - - // create sync object - let (sync_provider, manage_network, chain_notify, priority_tasks) = modules::sync( - sync_config, - net_conf.clone().into(), - client.clone(), - snapshot_service.clone(), - private_tx_sync, - client.clone(), - &cmd.logger_config, - attached_protos, - connection_filter.clone().map(|f| f as Arc<::sync::ConnectionFilter + 'static>), - ).map_err(|e| format!("Sync error: {}", e))?; - - service.add_notify(chain_notify.clone()); - - // Propagate transactions as soon as they are imported. - let tx = ::parking_lot::Mutex::new(priority_tasks); - let is_ready = Arc::new(atomic::AtomicBool::new(true)); - miner.add_transactions_listener(Box::new(move |_hashes| { - // we want to have only one PendingTransactions task in the queue. - if is_ready.compare_and_swap(true, false, atomic::Ordering::SeqCst) { - let task = ::sync::PriorityTask::PropagateTransactions(Instant::now(), is_ready.clone()); - // we ignore error cause it means that we are closing - let _ = tx.lock().send(task); - } - })); - - // provider not added to a notification center is effectively disabled - // TODO [debris] refactor it later on - if cmd.private_tx_enabled { - service.add_notify(private_tx_provider.clone()); - // TODO [ToDr] PrivateTX should use separate notifications - // re-using ChainNotify for this is a bit abusive. - private_tx_provider.add_notify(chain_notify.clone()); - } - - // start network - if network_enabled { - chain_notify.start(); - } - - let contract_client = { - struct FullRegistrar { client: Arc } - impl RegistrarClient for FullRegistrar { - type Call = Asynchronous; - fn registrar_address(&self) -> Result { - self.client.registrar_address() - .ok_or_else(|| "Registrar not defined.".into()) - } - fn call_contract(&self, address: Address, data: Bytes) -> Self::Call { - Box::new(self.client.call_contract(BlockId::Latest, address, data).into_future()) - } - } - - Arc::new(FullRegistrar { client: client.clone() }) - }; - - // the updater service - let updater_fetch = fetch.clone(); - let updater = Updater::new( - &Arc::downgrade(&(service.client() as Arc)), - &Arc::downgrade(&sync_provider), - update_policy, - hash_fetch::Client::with_fetch(contract_client.clone(), updater_fetch, runtime.executor()) - ); - service.add_notify(updater.clone()); - - // set up dependencies for rpc servers - let rpc_stats = Arc::new(informant::RpcStats::default()); - let secret_store = account_provider.clone(); - let signer_service = Arc::new(signer::new_service(&cmd.ws_conf, &cmd.logger_config)); - - let deps_for_rpc_apis = Arc::new(rpc_apis::FullDependencies { - signer_service: signer_service, - snapshot: snapshot_service.clone(), - client: client.clone(), - sync: sync_provider.clone(), - net: manage_network.clone(), - accounts: secret_store, - miner: miner.clone(), - external_miner: external_miner.clone(), - logger: logger.clone(), - settings: Arc::new(cmd.net_settings.clone()), - net_service: manage_network.clone(), - updater: updater.clone(), - geth_compatibility: cmd.geth_compatibility, - experimental_rpcs: cmd.experimental_rpcs, - ws_address: cmd.ws_conf.address(), - fetch: fetch.clone(), - executor: runtime.executor(), - whisper_rpc: whisper_factory, - private_tx_service: Some(private_tx_service.clone()), - gas_price_percentile: cmd.gas_price_percentile, - poll_lifetime: cmd.poll_lifetime, - allow_missing_blocks: cmd.allow_missing_blocks, - no_ancient_blocks: !cmd.download_old_blocks, - }); - - let dependencies = rpc::Dependencies { - apis: deps_for_rpc_apis.clone(), - executor: runtime.executor(), - stats: rpc_stats.clone(), - }; - - // start rpc servers - let rpc_direct = rpc::setup_apis(rpc_apis::ApiSet::All, &dependencies); - let ws_server = rpc::new_ws(cmd.ws_conf.clone(), &dependencies)?; - let ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?; - let http_server = rpc::new_http("HTTP JSON-RPC", "jsonrpc", cmd.http_conf.clone(), &dependencies)?; - - // secret store key server - let secretstore_deps = secretstore::Dependencies { - client: client.clone(), - sync: sync_provider.clone(), - miner: miner.clone(), - account_provider, - accounts_passwords: &passwords, - }; - let secretstore_key_server = secretstore::start(cmd.secretstore_conf.clone(), secretstore_deps, runtime.executor())?; - - // the ipfs server - let ipfs_server = ipfs::start_server(cmd.ipfs_conf.clone(), client.clone())?; - - // the informant - let informant = Arc::new(Informant::new( - FullNodeInformantData { - client: service.client(), - sync: Some(sync_provider.clone()), - net: Some(manage_network.clone()), - }, - Some(snapshot_service.clone()), - Some(rpc_stats.clone()), - cmd.logger_config.color, - )); - service.add_notify(informant.clone()); - service.register_io_handler(informant.clone()).map_err(|_| "Unable to register informant handler".to_owned())?; - - // save user defaults - user_defaults.is_first_launch = false; - user_defaults.pruning = algorithm; - user_defaults.tracing = tracing; - user_defaults.fat_db = fat_db; - user_defaults.set_mode(mode); - user_defaults.save(&user_defaults_path)?; - - // tell client how to save the default mode if it gets changed. - client.on_user_defaults_change(move |mode: Option| { - if let Some(mode) = mode { - user_defaults.set_mode(mode); - } - let _ = user_defaults.save(&user_defaults_path); // discard failures - there's nothing we can do - }); - - // the watcher must be kept alive. - let watcher = match cmd.snapshot_conf.no_periodic { - true => None, - false => { - let sync = sync_provider.clone(); - let client = client.clone(); - let watcher = Arc::new(snapshot::Watcher::new( - service.client(), - move || is_major_importing(Some(sync.status().state), client.queue_info()), - service.io().channel(), - SNAPSHOT_PERIOD, - SNAPSHOT_HISTORY, - )); - - service.add_notify(watcher.clone()); - Some(watcher) - }, - }; - - client.set_exit_handler(on_client_rq); - updater.set_exit_handler(on_updater_rq); - - Ok(RunningClient { - inner: RunningClientInner::Full { - rpc: rpc_direct, - informant, - client, - client_service: Arc::new(service), - keep_alive: Box::new((watcher, updater, ws_server, http_server, ipc_server, secretstore_key_server, ipfs_server, runtime)), - } - }) + // load spec + let spec = cmd.spec.spec(&cmd.dirs.cache)?; + + // load genesis hash + let genesis_hash = spec.genesis_header().hash(); + + // database paths + let db_dirs = cmd.dirs.database( + genesis_hash, + cmd.spec.legacy_fork_name(), + spec.data_dir.clone(), + ); + + // user defaults path + let user_defaults_path = db_dirs.user_defaults_path(); + + // load user defaults + let mut user_defaults = UserDefaults::load(&user_defaults_path)?; + + // select pruning algorithm + let algorithm = cmd.pruning.to_algorithm(&user_defaults); + + // check if tracing is on + let tracing = tracing_switch_to_bool(cmd.tracing, &user_defaults)?; + + // check if fatdb is on + let fat_db = fatdb_switch_to_bool(cmd.fat_db, &user_defaults, algorithm)?; + + // get the mode + let mode = mode_switch_to_bool(cmd.mode, &user_defaults)?; + trace!(target: "mode", "mode is {:?}", mode); + let network_enabled = match mode { + Mode::Dark(_) | Mode::Off => false, + _ => true, + }; + + // get the update policy + let update_policy = cmd.update_policy; + + // prepare client and snapshot paths. + let client_path = db_dirs.client_path(algorithm); + let snapshot_path = db_dirs.snapshot_path(); + + // execute upgrades + execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?; + + // create dirs used by parity + cmd.dirs.create_dirs( + cmd.acc_conf.unlocked_accounts.len() == 0, + cmd.secretstore_conf.enabled, + )?; + + //print out running parity environment + print_running_environment(&spec.data_dir, &cmd.dirs, &db_dirs); + + // display info about used pruning algorithm + info!( + "State DB configuration: {}{}{}", + Colour::White.bold().paint(algorithm.as_str()), + match fat_db { + true => Colour::White.bold().paint(" +Fat").to_string(), + false => "".to_owned(), + }, + match tracing { + true => Colour::White.bold().paint(" +Trace").to_string(), + false => "".to_owned(), + } + ); + info!( + "Operating mode: {}", + Colour::White.bold().paint(format!("{}", mode)) + ); + + // display warning about using experimental journaldb algorithm + if !algorithm.is_stable() { + warn!( + "Your chosen strategy is {}! You can re-run with --pruning to change.", + Colour::Red.bold().paint("unstable") + ); + } + + // create sync config + let mut sync_config = SyncConfig::default(); + sync_config.network_id = match cmd.network_id { + Some(id) => id, + None => spec.network_id(), + }; + if spec.subprotocol_name().len() != 3 { + warn!("Your chain specification's subprotocol length is not 3. Ignoring."); + } else { + sync_config + .subprotocol_name + .clone_from_slice(spec.subprotocol_name().as_bytes()); + } + + sync_config.fork_block = spec.fork_block(); + let mut warp_sync = spec.engine.supports_warp() && cmd.warp_sync; + if warp_sync { + // Logging is not initialized yet, so we print directly to stderr + if fat_db { + warn!("Warning: Warp Sync is disabled because Fat DB is turned on."); + warp_sync = false; + } else if tracing { + warn!("Warning: Warp Sync is disabled because tracing is turned on."); + warp_sync = false; + } else if algorithm != Algorithm::OverlayRecent { + warn!("Warning: Warp Sync is disabled because of non-default pruning mode."); + warp_sync = false; + } + } + sync_config.warp_sync = match (warp_sync, cmd.warp_barrier) { + (true, Some(block)) => sync::WarpSync::OnlyAndAfter(block), + (true, _) => sync::WarpSync::Enabled, + _ => sync::WarpSync::Disabled, + }; + sync_config.download_old_blocks = cmd.download_old_blocks; + sync_config.serve_light = cmd.serve_light; + + let passwords = passwords_from_files(&cmd.acc_conf.password_files)?; + + // prepare account provider + let account_provider = Arc::new(account_utils::prepare_account_provider( + &cmd.spec, + &cmd.dirs, + &spec.data_dir, + cmd.acc_conf, + &passwords, + )?); + + // spin up event loop + let runtime = Runtime::with_default_thread_count(); + + // fetch service + let fetch = fetch::Client::new(FETCH_FULL_NUM_DNS_THREADS) + .map_err(|e| format!("Error starting fetch client: {:?}", e))?; + + let txpool_size = cmd.miner_options.pool_limits.max_count; + // create miner + let miner = Arc::new(Miner::new( + cmd.miner_options, + cmd.gas_pricer_conf + .to_gas_pricer(fetch.clone(), runtime.executor()), + &spec, + ( + cmd.miner_extras.local_accounts, + account_utils::miner_local_accounts(account_provider.clone()), + ), + )); + miner.set_author(miner::Author::External(cmd.miner_extras.author)); + miner.set_gas_range_target(cmd.miner_extras.gas_range_target); + miner.set_extra_data(cmd.miner_extras.extra_data); + + if !cmd.miner_extras.work_notify.is_empty() { + miner.add_work_listener(Box::new(WorkPoster::new( + &cmd.miner_extras.work_notify, + fetch.clone(), + runtime.executor(), + ))); + } + + let engine_signer = cmd.miner_extras.engine_signer; + if engine_signer != Default::default() { + if let Some(author) = account_utils::miner_author( + &cmd.spec, + &cmd.dirs, + &account_provider, + engine_signer, + &passwords, + )? { + miner.set_author(author); + } + } + + // display warning if using --no-hardcoded-sync + if cmd.no_hardcoded_sync { + warn!("The --no-hardcoded-sync flag has no effect if you don't use --light"); + } + + // create client config + let mut client_config = to_client_config( + &cmd.cache_config, + spec.name.to_lowercase(), + mode.clone(), + tracing, + fat_db, + cmd.compaction, + cmd.vm_type, + cmd.name, + algorithm, + cmd.pruning_history, + cmd.pruning_memory, + cmd.check_seal, + cmd.max_round_blocks_to_import, + ); + + client_config.queue.verifier_settings = cmd.verifier_settings; + client_config.transaction_verification_queue_size = ::std::cmp::max(2048, txpool_size / 4); + client_config.snapshot = cmd.snapshot_conf.clone(); + + // set up bootnodes + let mut net_conf = cmd.net_conf; + if !cmd.custom_bootnodes { + net_conf.boot_nodes = spec.nodes.clone(); + } + + // set network path. + net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned()); + + let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config); + let client_db = restoration_db_handler + .open(&client_path) + .map_err(|e| format!("Failed to open database {:?}", e))?; + + let private_tx_signer = account_utils::private_tx_signer(account_provider.clone(), &passwords)?; + + // create client service. + let service = ClientService::start( + client_config, + &spec, + client_db, + &snapshot_path, + restoration_db_handler, + &cmd.dirs.ipc_path(), + miner.clone(), + private_tx_signer.clone(), + Box::new( + SecretStoreEncryptor::new( + cmd.private_encryptor_conf.clone(), + fetch.clone(), + private_tx_signer, + ) + .map_err(|e| e.to_string())?, + ), + cmd.private_provider_conf, + cmd.private_encryptor_conf, + ) + .map_err(|e| format!("Client service error: {:?}", e))?; + + let connection_filter_address = spec.params().node_permission_contract; + // drop the spec to free up genesis state. + drop(spec); + + // take handle to client + let client = service.client(); + // Update miners block gas limit + miner.update_transaction_queue_limits(*client.best_block_header().gas_limit()); + + // take handle to private transactions service + let private_tx_service = service.private_tx_service(); + let private_tx_provider = private_tx_service.provider(); + let connection_filter = connection_filter_address.map(|a| { + Arc::new(NodeFilter::new( + Arc::downgrade(&client) as Weak, + a, + )) + }); + let snapshot_service = service.snapshot_service(); + + // initialize the local node information store. + let store = { + let db = service.db(); + let node_info = FullNodeInfo { + miner: match cmd.no_persistent_txqueue { + true => None, + false => Some(miner.clone()), + }, + }; + + let store = ::local_store::create( + db.key_value().clone(), + ::ethcore_db::COL_NODE_INFO, + node_info, + ); + + if cmd.no_persistent_txqueue { + info!("Running without a persistent transaction queue."); + + if let Err(e) = store.clear() { + warn!("Error clearing persistent transaction queue: {}", e); + } + } + + // re-queue pending transactions. + match store.pending_transactions() { + Ok(pending) => { + for pending_tx in pending { + if let Err(e) = miner.import_own_transaction(&*client, pending_tx) { + warn!("Error importing saved transaction: {}", e) + } + } + } + Err(e) => warn!("Error loading cached pending transactions from disk: {}", e), + } + + Arc::new(store) + }; + + // register it as an IO service to update periodically. + service + .register_io_handler(store) + .map_err(|_| "Unable to register local store handler".to_owned())?; + + // create external miner + let external_miner = Arc::new(ExternalMiner::default()); + + // start stratum + if let Some(ref stratum_config) = cmd.stratum { + stratum::Stratum::register(stratum_config, miner.clone(), Arc::downgrade(&client)) + .map_err(|e| format!("Stratum start error: {:?}", e))?; + } + + let mut attached_protos = Vec::new(); + + let whisper_factory = if cmd.whisper.enabled { + let whisper_factory = + ::whisper::setup(cmd.whisper.target_message_pool_size, &mut attached_protos) + .map_err(|e| format!("Failed to initialize whisper: {}", e))?; + + whisper_factory + } else { + None + }; + + let private_tx_sync: Option> = match cmd.private_tx_enabled { + true => Some(private_tx_service.clone() as Arc), + false => None, + }; + + // create sync object + let (sync_provider, manage_network, chain_notify, priority_tasks) = modules::sync( + sync_config, + net_conf.clone().into(), + client.clone(), + snapshot_service.clone(), + private_tx_sync, + client.clone(), + &cmd.logger_config, + attached_protos, + connection_filter + .clone() + .map(|f| f as Arc<::sync::ConnectionFilter + 'static>), + ) + .map_err(|e| format!("Sync error: {}", e))?; + + service.add_notify(chain_notify.clone()); + + // Propagate transactions as soon as they are imported. + let tx = ::parking_lot::Mutex::new(priority_tasks); + let is_ready = Arc::new(atomic::AtomicBool::new(true)); + miner.add_transactions_listener(Box::new(move |_hashes| { + // we want to have only one PendingTransactions task in the queue. + if is_ready.compare_and_swap(true, false, atomic::Ordering::SeqCst) { + let task = + ::sync::PriorityTask::PropagateTransactions(Instant::now(), is_ready.clone()); + // we ignore error cause it means that we are closing + let _ = tx.lock().send(task); + } + })); + + // provider not added to a notification center is effectively disabled + // TODO [debris] refactor it later on + if cmd.private_tx_enabled { + service.add_notify(private_tx_provider.clone()); + // TODO [ToDr] PrivateTX should use separate notifications + // re-using ChainNotify for this is a bit abusive. + private_tx_provider.add_notify(chain_notify.clone()); + } + + // start network + if network_enabled { + chain_notify.start(); + } + + let contract_client = { + struct FullRegistrar { + client: Arc, + } + impl RegistrarClient for FullRegistrar { + type Call = Asynchronous; + fn registrar_address(&self) -> Result { + self.client + .registrar_address() + .ok_or_else(|| "Registrar not defined.".into()) + } + fn call_contract(&self, address: Address, data: Bytes) -> Self::Call { + Box::new( + self.client + .call_contract(BlockId::Latest, address, data) + .into_future(), + ) + } + } + + Arc::new(FullRegistrar { + client: client.clone(), + }) + }; + + // the updater service + let updater_fetch = fetch.clone(); + let updater = Updater::new( + &Arc::downgrade(&(service.client() as Arc)), + &Arc::downgrade(&sync_provider), + update_policy, + hash_fetch::Client::with_fetch(contract_client.clone(), updater_fetch, runtime.executor()), + ); + service.add_notify(updater.clone()); + + // set up dependencies for rpc servers + let rpc_stats = Arc::new(informant::RpcStats::default()); + let secret_store = account_provider.clone(); + let signer_service = Arc::new(signer::new_service(&cmd.ws_conf, &cmd.logger_config)); + + let deps_for_rpc_apis = Arc::new(rpc_apis::FullDependencies { + signer_service: signer_service, + snapshot: snapshot_service.clone(), + client: client.clone(), + sync: sync_provider.clone(), + net: manage_network.clone(), + accounts: secret_store, + miner: miner.clone(), + external_miner: external_miner.clone(), + logger: logger.clone(), + settings: Arc::new(cmd.net_settings.clone()), + net_service: manage_network.clone(), + updater: updater.clone(), + geth_compatibility: cmd.geth_compatibility, + experimental_rpcs: cmd.experimental_rpcs, + ws_address: cmd.ws_conf.address(), + fetch: fetch.clone(), + executor: runtime.executor(), + whisper_rpc: whisper_factory, + private_tx_service: Some(private_tx_service.clone()), + gas_price_percentile: cmd.gas_price_percentile, + poll_lifetime: cmd.poll_lifetime, + allow_missing_blocks: cmd.allow_missing_blocks, + no_ancient_blocks: !cmd.download_old_blocks, + }); + + let dependencies = rpc::Dependencies { + apis: deps_for_rpc_apis.clone(), + executor: runtime.executor(), + stats: rpc_stats.clone(), + }; + + // start rpc servers + let rpc_direct = rpc::setup_apis(rpc_apis::ApiSet::All, &dependencies); + let ws_server = rpc::new_ws(cmd.ws_conf.clone(), &dependencies)?; + let ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?; + let http_server = rpc::new_http( + "HTTP JSON-RPC", + "jsonrpc", + cmd.http_conf.clone(), + &dependencies, + )?; + + // secret store key server + let secretstore_deps = secretstore::Dependencies { + client: client.clone(), + sync: sync_provider.clone(), + miner: miner.clone(), + account_provider, + accounts_passwords: &passwords, + }; + let secretstore_key_server = secretstore::start( + cmd.secretstore_conf.clone(), + secretstore_deps, + runtime.executor(), + )?; + + // the ipfs server + let ipfs_server = ipfs::start_server(cmd.ipfs_conf.clone(), client.clone())?; + + // the informant + let informant = Arc::new(Informant::new( + FullNodeInformantData { + client: service.client(), + sync: Some(sync_provider.clone()), + net: Some(manage_network.clone()), + }, + Some(snapshot_service.clone()), + Some(rpc_stats.clone()), + cmd.logger_config.color, + )); + service.add_notify(informant.clone()); + service + .register_io_handler(informant.clone()) + .map_err(|_| "Unable to register informant handler".to_owned())?; + + // save user defaults + user_defaults.is_first_launch = false; + user_defaults.pruning = algorithm; + user_defaults.tracing = tracing; + user_defaults.fat_db = fat_db; + user_defaults.set_mode(mode); + user_defaults.save(&user_defaults_path)?; + + // tell client how to save the default mode if it gets changed. + client.on_user_defaults_change(move |mode: Option| { + if let Some(mode) = mode { + user_defaults.set_mode(mode); + } + let _ = user_defaults.save(&user_defaults_path); // discard failures - there's nothing we can do + }); + + // the watcher must be kept alive. + let watcher = match cmd.snapshot_conf.no_periodic { + true => None, + false => { + let sync = sync_provider.clone(); + let client = client.clone(); + let watcher = Arc::new(snapshot::Watcher::new( + service.client(), + move || is_major_importing(Some(sync.status().state), client.queue_info()), + service.io().channel(), + SNAPSHOT_PERIOD, + SNAPSHOT_HISTORY, + )); + + service.add_notify(watcher.clone()); + Some(watcher) + } + }; + + client.set_exit_handler(on_client_rq); + updater.set_exit_handler(on_updater_rq); + + Ok(RunningClient { + inner: RunningClientInner::Full { + rpc: rpc_direct, + informant, + client, + client_service: Arc::new(service), + keep_alive: Box::new(( + watcher, + updater, + ws_server, + http_server, + ipc_server, + secretstore_key_server, + ipfs_server, + runtime, + )), + }, + }) } /// Parity client currently executing in background threads. @@ -838,86 +991,103 @@ fn execute_impl(cmd: RunCmd, logger: Arc, on_client_rq: /// Should be destroyed by calling `shutdown()`, otherwise execution will continue in the /// background. pub struct RunningClient { - inner: RunningClientInner, + inner: RunningClientInner, } enum RunningClientInner { - Light { - rpc: jsonrpc_core::MetaIoHandler>, - informant: Arc>, - client: Arc, - keep_alive: Box, - }, - Full { - rpc: jsonrpc_core::MetaIoHandler>, - informant: Arc>, - client: Arc, - client_service: Arc, - keep_alive: Box, - }, + Light { + rpc: jsonrpc_core::MetaIoHandler< + Metadata, + informant::Middleware, + >, + informant: Arc>, + client: Arc, + keep_alive: Box, + }, + Full { + rpc: + jsonrpc_core::MetaIoHandler>, + informant: Arc>, + client: Arc, + client_service: Arc, + keep_alive: Box, + }, } impl RunningClient { - /// Performs an asynchronous RPC query. - // FIXME: [tomaka] This API should be better, with for example a Future - pub fn rpc_query(&self, request: &str, session: Option>) - -> FutureResult - { - let metadata = Metadata { - origin: Origin::CApi, - session, - }; + /// Performs an asynchronous RPC query. + // FIXME: [tomaka] This API should be better, with for example a Future + pub fn rpc_query( + &self, + request: &str, + session: Option>, + ) -> FutureResult { + let metadata = Metadata { + origin: Origin::CApi, + session, + }; - match self.inner { - RunningClientInner::Light { ref rpc, .. } => rpc.handle_request(request, metadata), - RunningClientInner::Full { ref rpc, .. } => rpc.handle_request(request, metadata), - } - } + match self.inner { + RunningClientInner::Light { ref rpc, .. } => rpc.handle_request(request, metadata), + RunningClientInner::Full { ref rpc, .. } => rpc.handle_request(request, metadata), + } + } - /// Shuts down the client. - pub fn shutdown(self) { - match self.inner { - RunningClientInner::Light { rpc, informant, client, keep_alive } => { - // Create a weak reference to the client so that we can wait on shutdown - // until it is dropped - let weak_client = Arc::downgrade(&client); - drop(rpc); - drop(keep_alive); - informant.shutdown(); - drop(informant); - drop(client); - wait_for_drop(weak_client); - }, - RunningClientInner::Full { rpc, informant, client, client_service, keep_alive } => { - info!("Finishing work, please wait..."); - // Create a weak reference to the client so that we can wait on shutdown - // until it is dropped - let weak_client = Arc::downgrade(&client); - // Shutdown and drop the ClientService - client_service.shutdown(); - trace!(target: "shutdown", "ClientService shut down"); - drop(client_service); - trace!(target: "shutdown", "ClientService dropped"); - // drop this stuff as soon as exit detected. - drop(rpc); - trace!(target: "shutdown", "RPC dropped"); - drop(keep_alive); - trace!(target: "shutdown", "KeepAlive dropped"); - // to make sure timer does not spawn requests while shutdown is in progress - informant.shutdown(); - trace!(target: "shutdown", "Informant shut down"); - // just Arc is dropping here, to allow other reference release in its default time - drop(informant); - trace!(target: "shutdown", "Informant dropped"); - drop(client); - trace!(target: "shutdown", "Client dropped"); - // This may help when debugging ref cycles. Requires nightly-only `#![feature(weak_counts)]` - // trace!(target: "shutdown", "Waiting for refs to Client to shutdown, strong_count={:?}, weak_count={:?}", weak_client.strong_count(), weak_client.weak_count()); - trace!(target: "shutdown", "Waiting for refs to Client to shutdown"); - wait_for_drop(weak_client); - } - } - } + /// Shuts down the client. + pub fn shutdown(self) { + match self.inner { + RunningClientInner::Light { + rpc, + informant, + client, + keep_alive, + } => { + // Create a weak reference to the client so that we can wait on shutdown + // until it is dropped + let weak_client = Arc::downgrade(&client); + drop(rpc); + drop(keep_alive); + informant.shutdown(); + drop(informant); + drop(client); + wait_for_drop(weak_client); + } + RunningClientInner::Full { + rpc, + informant, + client, + client_service, + keep_alive, + } => { + info!("Finishing work, please wait..."); + // Create a weak reference to the client so that we can wait on shutdown + // until it is dropped + let weak_client = Arc::downgrade(&client); + // Shutdown and drop the ClientService + client_service.shutdown(); + trace!(target: "shutdown", "ClientService shut down"); + drop(client_service); + trace!(target: "shutdown", "ClientService dropped"); + // drop this stuff as soon as exit detected. + drop(rpc); + trace!(target: "shutdown", "RPC dropped"); + drop(keep_alive); + trace!(target: "shutdown", "KeepAlive dropped"); + // to make sure timer does not spawn requests while shutdown is in progress + informant.shutdown(); + trace!(target: "shutdown", "Informant shut down"); + // just Arc is dropping here, to allow other reference release in its default time + drop(informant); + trace!(target: "shutdown", "Informant dropped"); + drop(client); + trace!(target: "shutdown", "Client dropped"); + // This may help when debugging ref cycles. Requires nightly-only `#![feature(weak_counts)]` + // trace!(target: "shutdown", "Waiting for refs to Client to shutdown, strong_count={:?}, weak_count={:?}", weak_client.strong_count(), weak_client.weak_count()); + trace!(target: "shutdown", "Waiting for refs to Client to shutdown"); + wait_for_drop(weak_client); + } + } + } } /// Executes the given run command. @@ -928,51 +1098,65 @@ impl RunningClient { /// `on_updater_rq` is the action to perform when the updater has a new binary to execute. /// /// On error, returns what to print on stderr. -pub fn execute(cmd: RunCmd, logger: Arc, - on_client_rq: Cr, on_updater_rq: Rr) -> Result - where Cr: Fn(String) + 'static + Send, - Rr: Fn() + 'static + Send +pub fn execute( + cmd: RunCmd, + logger: Arc, + on_client_rq: Cr, + on_updater_rq: Rr, +) -> Result +where + Cr: Fn(String) + 'static + Send, + Rr: Fn() + 'static + Send, { - if cmd.light { - execute_light_impl(cmd, logger, on_client_rq) - } else { - execute_impl(cmd, logger, on_client_rq, on_updater_rq) - } + if cmd.light { + execute_light_impl(cmd, logger, on_client_rq) + } else { + execute_impl(cmd, logger, on_client_rq, on_updater_rq) + } } fn print_running_environment(data_dir: &str, dirs: &Directories, db_dirs: &DatabaseDirectories) { - info!("Starting {}", Colour::White.bold().paint(version())); - info!("Keys path {}", Colour::White.bold().paint(dirs.keys_path(data_dir).to_string_lossy().into_owned())); - info!("DB path {}", Colour::White.bold().paint(db_dirs.db_root_path().to_string_lossy().into_owned())); + info!("Starting {}", Colour::White.bold().paint(version())); + info!( + "Keys path {}", + Colour::White + .bold() + .paint(dirs.keys_path(data_dir).to_string_lossy().into_owned()) + ); + info!( + "DB path {}", + Colour::White + .bold() + .paint(db_dirs.db_root_path().to_string_lossy().into_owned()) + ); } fn wait_for_drop(w: Weak) { - const SLEEP_DURATION: Duration = Duration::from_secs(1); - const WARN_TIMEOUT: Duration = Duration::from_secs(60); - const MAX_TIMEOUT: Duration = Duration::from_secs(300); + const SLEEP_DURATION: Duration = Duration::from_secs(1); + const WARN_TIMEOUT: Duration = Duration::from_secs(60); + const MAX_TIMEOUT: Duration = Duration::from_secs(300); - let instant = Instant::now(); - let mut warned = false; + let instant = Instant::now(); + let mut warned = false; - while instant.elapsed() < MAX_TIMEOUT { - if w.upgrade().is_none() { - return; - } + while instant.elapsed() < MAX_TIMEOUT { + if w.upgrade().is_none() { + return; + } - if !warned && instant.elapsed() > WARN_TIMEOUT { - warned = true; - warn!("Shutdown is taking longer than expected."); - } + if !warned && instant.elapsed() > WARN_TIMEOUT { + warned = true; + warn!("Shutdown is taking longer than expected."); + } - thread::sleep(SLEEP_DURATION); + thread::sleep(SLEEP_DURATION); - // When debugging shutdown issues on a nightly build it can help to enable this with the - // `#![feature(weak_counts)]` added to lib.rs (TODO: enable when - // https://github.com/rust-lang/rust/issues/57977 is stable) - // trace!(target: "shutdown", "Waiting for client to drop, strong_count={:?}, weak_count={:?}", w.strong_count(), w.weak_count()); - trace!(target: "shutdown", "Waiting for client to drop"); - } + // When debugging shutdown issues on a nightly build it can help to enable this with the + // `#![feature(weak_counts)]` added to lib.rs (TODO: enable when + // https://github.com/rust-lang/rust/issues/57977 is stable) + // trace!(target: "shutdown", "Waiting for client to drop, strong_count={:?}, weak_count={:?}", w.strong_count(), w.weak_count()); + trace!(target: "shutdown", "Waiting for client to drop"); + } - warn!("Shutdown timeout reached, exiting uncleanly."); + warn!("Shutdown timeout reached, exiting uncleanly."); } - diff --git a/parity/secretstore.rs b/parity/secretstore.rs index d9075edec..930430560 100644 --- a/parity/secretstore.rs +++ b/parity/secretstore.rs @@ -14,236 +14,319 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::BTreeMap; -use std::sync::Arc; use account_utils::AccountProvider; -use dir::default_data_path; -use dir::helpers::replace_home; -use ethcore::client::Client; -use ethcore::miner::Miner; -use ethkey::{Secret, Public, Password}; -use sync::SyncProvider; +use dir::{default_data_path, helpers::replace_home}; +use ethcore::{client::Client, miner::Miner}; use ethereum_types::Address; +use ethkey::{Password, Public, Secret}; use parity_runtime::Executor; +use std::{collections::BTreeMap, sync::Arc}; +use sync::SyncProvider; /// This node secret key. #[derive(Debug, PartialEq, Clone)] pub enum NodeSecretKey { - /// Stored as plain text in configuration file. - Plain(Secret), - /// Stored as account in key store. - #[cfg(feature = "accounts")] - KeyStore(Address), + /// Stored as plain text in configuration file. + Plain(Secret), + /// Stored as account in key store. + #[cfg(feature = "accounts")] + KeyStore(Address), } /// Secret store service contract address. #[derive(Debug, PartialEq, Clone)] pub enum ContractAddress { - /// Contract address is read from registry. - Registry, - /// Contract address is specified. - Address(Address), + /// Contract address is read from registry. + Registry, + /// Contract address is specified. + Address(Address), } #[derive(Debug, PartialEq, Clone)] /// Secret store configuration pub struct Configuration { - /// Is secret store functionality enabled? - pub enabled: bool, - /// Is HTTP API enabled? - pub http_enabled: bool, - /// Is auto migrate enabled. - pub auto_migrate_enabled: bool, - /// ACL check contract address. - pub acl_check_contract_address: Option, - /// Service contract address. - pub service_contract_address: Option, - /// Server key generation service contract address. - pub service_contract_srv_gen_address: Option, - /// Server key retrieval service contract address. - pub service_contract_srv_retr_address: Option, - /// Document key store service contract address. - pub service_contract_doc_store_address: Option, - /// Document key shadow retrieval service contract address. - pub service_contract_doc_sretr_address: Option, - /// This node secret. - pub self_secret: Option, - /// Other nodes IDs + addresses. - pub nodes: BTreeMap, - /// Key Server Set contract address. If None, 'nodes' map is used. - pub key_server_set_contract_address: Option, - /// Interface to listen to - pub interface: String, - /// Port to listen to - pub port: u16, - /// Interface to listen to - pub http_interface: String, - /// Port to listen to - pub http_port: u16, - /// Data directory path for secret store - pub data_path: String, - /// Administrator public key. - pub admin_public: Option, + /// Is secret store functionality enabled? + pub enabled: bool, + /// Is HTTP API enabled? + pub http_enabled: bool, + /// Is auto migrate enabled. + pub auto_migrate_enabled: bool, + /// ACL check contract address. + pub acl_check_contract_address: Option, + /// Service contract address. + pub service_contract_address: Option, + /// Server key generation service contract address. + pub service_contract_srv_gen_address: Option, + /// Server key retrieval service contract address. + pub service_contract_srv_retr_address: Option, + /// Document key store service contract address. + pub service_contract_doc_store_address: Option, + /// Document key shadow retrieval service contract address. + pub service_contract_doc_sretr_address: Option, + /// This node secret. + pub self_secret: Option, + /// Other nodes IDs + addresses. + pub nodes: BTreeMap, + /// Key Server Set contract address. If None, 'nodes' map is used. + pub key_server_set_contract_address: Option, + /// Interface to listen to + pub interface: String, + /// Port to listen to + pub port: u16, + /// Interface to listen to + pub http_interface: String, + /// Port to listen to + pub http_port: u16, + /// Data directory path for secret store + pub data_path: String, + /// Administrator public key. + pub admin_public: Option, } /// Secret store dependencies pub struct Dependencies<'a> { - /// Blockchain client. - pub client: Arc, - /// Sync provider. - pub sync: Arc, - /// Miner service. - pub miner: Arc, - /// Account provider. - pub account_provider: Arc, - /// Passed accounts passwords. - pub accounts_passwords: &'a [Password], + /// Blockchain client. + pub client: Arc, + /// Sync provider. + pub sync: Arc, + /// Miner service. + pub miner: Arc, + /// Account provider. + pub account_provider: Arc, + /// Passed accounts passwords. + pub accounts_passwords: &'a [Password], } #[cfg(not(feature = "secretstore"))] mod server { - use super::{Configuration, Dependencies, Executor}; + use super::{Configuration, Dependencies, Executor}; - /// Noop key server implementation - pub struct KeyServer; + /// Noop key server implementation + pub struct KeyServer; - impl KeyServer { - /// Create new noop key server - pub fn new(_conf: Configuration, _deps: Dependencies, _executor: Executor) -> Result { - Ok(KeyServer) - } - } + impl KeyServer { + /// Create new noop key server + pub fn new( + _conf: Configuration, + _deps: Dependencies, + _executor: Executor, + ) -> Result { + Ok(KeyServer) + } + } } #[cfg(feature = "secretstore")] mod server { - use std::sync::Arc; - use ethcore_secretstore; - use ethkey::KeyPair; - use ansi_term::Colour::{Red, White}; - use db; - use super::{Configuration, Dependencies, NodeSecretKey, ContractAddress, Executor}; + use super::{Configuration, ContractAddress, Dependencies, Executor, NodeSecretKey}; + use ansi_term::Colour::{Red, White}; + use db; + use ethcore_secretstore; + use ethkey::KeyPair; + use std::sync::Arc; - fn into_service_contract_address(address: ContractAddress) -> ethcore_secretstore::ContractAddress { - match address { - ContractAddress::Registry => ethcore_secretstore::ContractAddress::Registry, - ContractAddress::Address(address) => ethcore_secretstore::ContractAddress::Address(address), - } - } + fn into_service_contract_address( + address: ContractAddress, + ) -> ethcore_secretstore::ContractAddress { + match address { + ContractAddress::Registry => ethcore_secretstore::ContractAddress::Registry, + ContractAddress::Address(address) => { + ethcore_secretstore::ContractAddress::Address(address) + } + } + } - /// Key server - pub struct KeyServer { - _key_server: Box, - } + /// Key server + pub struct KeyServer { + _key_server: Box, + } - impl KeyServer { - /// Create new key server - pub fn new(mut conf: Configuration, deps: Dependencies, executor: Executor) -> Result { - let self_secret: Arc = match conf.self_secret.take() { - Some(NodeSecretKey::Plain(secret)) => Arc::new(ethcore_secretstore::PlainNodeKeyPair::new( - KeyPair::from_secret(secret).map_err(|e| format!("invalid secret: {}", e))?)), - #[cfg(feature = "accounts")] - Some(NodeSecretKey::KeyStore(account)) => { - // Check if account exists - if !deps.account_provider.has_account(account.clone()) { - return Err(format!("Account {} passed as secret store node key is not found", account)); - } + impl KeyServer { + /// Create new key server + pub fn new( + mut conf: Configuration, + deps: Dependencies, + executor: Executor, + ) -> Result { + let self_secret: Arc = match conf.self_secret.take() { + Some(NodeSecretKey::Plain(secret)) => { + Arc::new(ethcore_secretstore::PlainNodeKeyPair::new( + KeyPair::from_secret(secret) + .map_err(|e| format!("invalid secret: {}", e))?, + )) + } + #[cfg(feature = "accounts")] + Some(NodeSecretKey::KeyStore(account)) => { + // Check if account exists + if !deps.account_provider.has_account(account.clone()) { + return Err(format!( + "Account {} passed as secret store node key is not found", + account + )); + } - // Check if any passwords have been read from the password file(s) - if deps.accounts_passwords.is_empty() { - return Err(format!("No password found for the secret store node account {}", account)); - } + // Check if any passwords have been read from the password file(s) + if deps.accounts_passwords.is_empty() { + return Err(format!( + "No password found for the secret store node account {}", + account + )); + } - // Attempt to sign in the engine signer. - let password = deps.accounts_passwords.iter() - .find(|p| deps.account_provider.sign(account.clone(), Some((*p).clone()), Default::default()).is_ok()) - .ok_or_else(|| format!("No valid password for the secret store node account {}", account))?; - Arc::new(ethcore_secretstore::KeyStoreNodeKeyPair::new(deps.account_provider, account, password.clone()) - .map_err(|e| format!("{}", e))?) - }, - None => return Err("self secret is required when using secretstore".into()), - }; + // Attempt to sign in the engine signer. + let password = deps + .accounts_passwords + .iter() + .find(|p| { + deps.account_provider + .sign(account.clone(), Some((*p).clone()), Default::default()) + .is_ok() + }) + .ok_or_else(|| { + format!( + "No valid password for the secret store node account {}", + account + ) + })?; + Arc::new( + ethcore_secretstore::KeyStoreNodeKeyPair::new( + deps.account_provider, + account, + password.clone(), + ) + .map_err(|e| format!("{}", e))?, + ) + } + None => return Err("self secret is required when using secretstore".into()), + }; - info!("Starting SecretStore node: {}", White.bold().paint(format!("{:?}", self_secret.public()))); - if conf.acl_check_contract_address.is_none() { - warn!("Running SecretStore with disabled ACL check: {}", Red.bold().paint("everyone has access to stored keys")); - } + info!( + "Starting SecretStore node: {}", + White.bold().paint(format!("{:?}", self_secret.public())) + ); + if conf.acl_check_contract_address.is_none() { + warn!( + "Running SecretStore with disabled ACL check: {}", + Red.bold().paint("everyone has access to stored keys") + ); + } - let key_server_name = format!("{}:{}", conf.interface, conf.port); - let mut cconf = ethcore_secretstore::ServiceConfiguration { - listener_address: if conf.http_enabled { Some(ethcore_secretstore::NodeAddress { - address: conf.http_interface.clone(), - port: conf.http_port, - }) } else { None }, - service_contract_address: conf.service_contract_address.map(into_service_contract_address), - service_contract_srv_gen_address: conf.service_contract_srv_gen_address.map(into_service_contract_address), - service_contract_srv_retr_address: conf.service_contract_srv_retr_address.map(into_service_contract_address), - service_contract_doc_store_address: conf.service_contract_doc_store_address.map(into_service_contract_address), - service_contract_doc_sretr_address: conf.service_contract_doc_sretr_address.map(into_service_contract_address), - acl_check_contract_address: conf.acl_check_contract_address.map(into_service_contract_address), - cluster_config: ethcore_secretstore::ClusterConfiguration { - listener_address: ethcore_secretstore::NodeAddress { - address: conf.interface.clone(), - port: conf.port, - }, - nodes: conf.nodes.into_iter().map(|(p, (ip, port))| (p, ethcore_secretstore::NodeAddress { - address: ip, - port: port, - })).collect(), - key_server_set_contract_address: conf.key_server_set_contract_address.map(into_service_contract_address), - allow_connecting_to_higher_nodes: true, - admin_public: conf.admin_public, - auto_migrate_enabled: conf.auto_migrate_enabled, - }, - }; + let key_server_name = format!("{}:{}", conf.interface, conf.port); + let mut cconf = ethcore_secretstore::ServiceConfiguration { + listener_address: if conf.http_enabled { + Some(ethcore_secretstore::NodeAddress { + address: conf.http_interface.clone(), + port: conf.http_port, + }) + } else { + None + }, + service_contract_address: conf + .service_contract_address + .map(into_service_contract_address), + service_contract_srv_gen_address: conf + .service_contract_srv_gen_address + .map(into_service_contract_address), + service_contract_srv_retr_address: conf + .service_contract_srv_retr_address + .map(into_service_contract_address), + service_contract_doc_store_address: conf + .service_contract_doc_store_address + .map(into_service_contract_address), + service_contract_doc_sretr_address: conf + .service_contract_doc_sretr_address + .map(into_service_contract_address), + acl_check_contract_address: conf + .acl_check_contract_address + .map(into_service_contract_address), + cluster_config: ethcore_secretstore::ClusterConfiguration { + listener_address: ethcore_secretstore::NodeAddress { + address: conf.interface.clone(), + port: conf.port, + }, + nodes: conf + .nodes + .into_iter() + .map(|(p, (ip, port))| { + ( + p, + ethcore_secretstore::NodeAddress { + address: ip, + port: port, + }, + ) + }) + .collect(), + key_server_set_contract_address: conf + .key_server_set_contract_address + .map(into_service_contract_address), + allow_connecting_to_higher_nodes: true, + admin_public: conf.admin_public, + auto_migrate_enabled: conf.auto_migrate_enabled, + }, + }; - cconf.cluster_config.nodes.insert(self_secret.public().clone(), cconf.cluster_config.listener_address.clone()); + cconf.cluster_config.nodes.insert( + self_secret.public().clone(), + cconf.cluster_config.listener_address.clone(), + ); - let db = db::open_secretstore_db(&conf.data_path)?; - let key_server = ethcore_secretstore::start(deps.client, deps.sync, deps.miner, self_secret, cconf, db, executor) - .map_err(|e| format!("Error starting KeyServer {}: {}", key_server_name, e))?; + let db = db::open_secretstore_db(&conf.data_path)?; + let key_server = ethcore_secretstore::start( + deps.client, + deps.sync, + deps.miner, + self_secret, + cconf, + db, + executor, + ) + .map_err(|e| format!("Error starting KeyServer {}: {}", key_server_name, e))?; - Ok(KeyServer { - _key_server: key_server, - }) - } - } + Ok(KeyServer { + _key_server: key_server, + }) + } + } } pub use self::server::KeyServer; impl Default for Configuration { - fn default() -> Self { - let data_dir = default_data_path(); - Configuration { - enabled: true, - http_enabled: true, - auto_migrate_enabled: true, - acl_check_contract_address: Some(ContractAddress::Registry), - service_contract_address: None, - service_contract_srv_gen_address: None, - service_contract_srv_retr_address: None, - service_contract_doc_store_address: None, - service_contract_doc_sretr_address: None, - self_secret: None, - admin_public: None, - nodes: BTreeMap::new(), - key_server_set_contract_address: Some(ContractAddress::Registry), - interface: "127.0.0.1".to_owned(), - port: 8083, - http_interface: "127.0.0.1".to_owned(), - http_port: 8082, - data_path: replace_home(&data_dir, "$BASE/secretstore"), - } - } + fn default() -> Self { + let data_dir = default_data_path(); + Configuration { + enabled: true, + http_enabled: true, + auto_migrate_enabled: true, + acl_check_contract_address: Some(ContractAddress::Registry), + service_contract_address: None, + service_contract_srv_gen_address: None, + service_contract_srv_retr_address: None, + service_contract_doc_store_address: None, + service_contract_doc_sretr_address: None, + self_secret: None, + admin_public: None, + nodes: BTreeMap::new(), + key_server_set_contract_address: Some(ContractAddress::Registry), + interface: "127.0.0.1".to_owned(), + port: 8083, + http_interface: "127.0.0.1".to_owned(), + http_port: 8082, + data_path: replace_home(&data_dir, "$BASE/secretstore"), + } + } } /// Start secret store-related functionality -pub fn start(conf: Configuration, deps: Dependencies, executor: Executor) -> Result, String> { - if !conf.enabled { - return Ok(None); - } +pub fn start( + conf: Configuration, + deps: Dependencies, + executor: Executor, +) -> Result, String> { + if !conf.enabled { + return Ok(None); + } - KeyServer::new(conf, deps, executor) - .map(|s| Some(s)) + KeyServer::new(conf, deps, executor).map(|s| Some(s)) } diff --git a/parity/signer.rs b/parity/signer.rs index 98f2b8cc6..df24ed1c9 100644 --- a/parity/signer.rs +++ b/parity/signer.rs @@ -14,72 +14,87 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::io; -use std::path::{Path, PathBuf}; +use std::{ + io, + path::{Path, PathBuf}, +}; use ansi_term::Colour::White; use ethcore_logger::Config as LogConfig; -use rpc; -use rpc_apis; use parity_rpc; use path::restrict_permissions_owner; +use rpc; +use rpc_apis; pub const CODES_FILENAME: &'static str = "authcodes"; pub struct NewToken { - pub token: String, - pub message: String, + pub token: String, + pub message: String, } -pub fn new_service(ws_conf: &rpc::WsConfiguration, logger_config: &LogConfig) -> rpc_apis::SignerService { - let logger_config_color = logger_config.color; - let signer_path = ws_conf.signer_path.clone(); - let signer_enabled = ws_conf.support_token_api; +pub fn new_service( + ws_conf: &rpc::WsConfiguration, + logger_config: &LogConfig, +) -> rpc_apis::SignerService { + let logger_config_color = logger_config.color; + let signer_path = ws_conf.signer_path.clone(); + let signer_enabled = ws_conf.support_token_api; - rpc_apis::SignerService::new(move || { - generate_new_token(&signer_path, logger_config_color).map_err(|e| format!("{:?}", e)) - }, signer_enabled) + rpc_apis::SignerService::new( + move || { + generate_new_token(&signer_path, logger_config_color).map_err(|e| format!("{:?}", e)) + }, + signer_enabled, + ) } pub fn codes_path(path: &Path) -> PathBuf { - let mut p = path.to_owned(); - p.push(CODES_FILENAME); - let _ = restrict_permissions_owner(&p, true, false); - p + let mut p = path.to_owned(); + p.push(CODES_FILENAME); + let _ = restrict_permissions_owner(&p, true, false); + p } pub fn execute(ws_conf: rpc::WsConfiguration, logger_config: LogConfig) -> Result { - Ok(generate_token_and_url(&ws_conf, &logger_config)?.message) + Ok(generate_token_and_url(&ws_conf, &logger_config)?.message) } -pub fn generate_token_and_url(ws_conf: &rpc::WsConfiguration, logger_config: &LogConfig) -> Result { - let code = generate_new_token(&ws_conf.signer_path, logger_config.color).map_err(|err| format!("Error generating token: {:?}", err))?; - let colored = |s: String| match logger_config.color { - true => format!("{}", White.bold().paint(s)), - false => s, - }; +pub fn generate_token_and_url( + ws_conf: &rpc::WsConfiguration, + logger_config: &LogConfig, +) -> Result { + let code = generate_new_token(&ws_conf.signer_path, logger_config.color) + .map_err(|err| format!("Error generating token: {:?}", err))?; + let colored = |s: String| match logger_config.color { + true => format!("{}", White.bold().paint(s)), + false => s, + }; - Ok(NewToken { - token: code.clone(), - message: format!( - r#" + Ok(NewToken { + token: code.clone(), + message: format!( + r#" Generated token: {} "#, - colored(code) - ), - }) + colored(code) + ), + }) } fn generate_new_token(path: &Path, logger_config_color: bool) -> io::Result { - let path = codes_path(path); - let mut codes = parity_rpc::AuthCodes::from_file(&path)?; - codes.clear_garbage(); - let code = codes.generate_new()?; - codes.to_file(&path)?; - trace!("New key code created: {}", match logger_config_color { - true => format!("{}", White.bold().paint(&code[..])), - false => format!("{}", &code[..]) - }); - Ok(code) + let path = codes_path(path); + let mut codes = parity_rpc::AuthCodes::from_file(&path)?; + codes.clear_garbage(); + let code = codes.generate_new()?; + codes.to_file(&path)?; + trace!( + "New key code created: {}", + match logger_config_color { + true => format!("{}", White.bold().paint(&code[..])), + false => format!("{}", &code[..]), + } + ); + Ok(code) } diff --git a/parity/snapshot.rs b/parity/snapshot.rs index 269965c33..9577c6131 100644 --- a/parity/snapshot.rs +++ b/parity/snapshot.rs @@ -16,279 +16,336 @@ //! Snapshot and restoration commands. -use std::time::Duration; -use std::path::{Path, PathBuf}; -use std::sync::Arc; +use std::{ + path::{Path, PathBuf}, + sync::Arc, + time::Duration, +}; -use hash::keccak; -use ethcore::snapshot::{Progress, RestorationStatus, SnapshotConfiguration, SnapshotService as SS}; -use ethcore::snapshot::io::{SnapshotReader, PackedReader, PackedWriter}; -use ethcore::snapshot::service::Service as SnapshotService; -use ethcore::client::{Mode, DatabaseCompactionProfile, VMType}; -use ethcore::miner::Miner; +use ethcore::{ + client::{DatabaseCompactionProfile, Mode, VMType}, + miner::Miner, + snapshot::{ + io::{PackedReader, PackedWriter, SnapshotReader}, + service::Service as SnapshotService, + Progress, RestorationStatus, SnapshotConfiguration, SnapshotService as SS, + }, +}; use ethcore_service::ClientService; +use hash::keccak; use types::ids::BlockId; use cache::CacheConfig; -use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool}; -use helpers::{to_client_config, execute_upgrades}; -use dir::Directories; -use user_defaults::UserDefaults; -use ethcore_private_tx; use db; +use dir::Directories; +use ethcore_private_tx; +use helpers::{execute_upgrades, to_client_config}; +use params::{fatdb_switch_to_bool, tracing_switch_to_bool, Pruning, SpecType, Switch}; +use user_defaults::UserDefaults; /// Kinds of snapshot commands. #[derive(Debug, PartialEq, Clone, Copy)] pub enum Kind { - /// Take a snapshot. - Take, - /// Restore a snapshot. - Restore + /// Take a snapshot. + Take, + /// Restore a snapshot. + Restore, } /// Command for snapshot creation or restoration. #[derive(Debug, PartialEq)] pub struct SnapshotCommand { - pub cache_config: CacheConfig, - pub dirs: Directories, - pub spec: SpecType, - pub pruning: Pruning, - pub pruning_history: u64, - pub pruning_memory: usize, - pub tracing: Switch, - pub fat_db: Switch, - pub compaction: DatabaseCompactionProfile, - pub file_path: Option, - pub kind: Kind, - pub block_at: BlockId, - pub max_round_blocks_to_import: usize, - pub snapshot_conf: SnapshotConfiguration, + pub cache_config: CacheConfig, + pub dirs: Directories, + pub spec: SpecType, + pub pruning: Pruning, + pub pruning_history: u64, + pub pruning_memory: usize, + pub tracing: Switch, + pub fat_db: Switch, + pub compaction: DatabaseCompactionProfile, + pub file_path: Option, + pub kind: Kind, + pub block_at: BlockId, + pub max_round_blocks_to_import: usize, + pub snapshot_conf: SnapshotConfiguration, } // helper for reading chunks from arbitrary reader and feeding them into the // service. -fn restore_using(snapshot: Arc, reader: &R, recover: bool) -> Result<(), String> { - let manifest = reader.manifest(); +fn restore_using( + snapshot: Arc, + reader: &R, + recover: bool, +) -> Result<(), String> { + let manifest = reader.manifest(); - info!("Restoring to block #{} (0x{:?})", manifest.block_number, manifest.block_hash); + info!( + "Restoring to block #{} (0x{:?})", + manifest.block_number, manifest.block_hash + ); - snapshot.init_restore(manifest.clone(), recover).map_err(|e| { - format!("Failed to begin restoration: {}", e) - })?; + snapshot + .init_restore(manifest.clone(), recover) + .map_err(|e| format!("Failed to begin restoration: {}", e))?; - let (num_state, num_blocks) = (manifest.state_hashes.len(), manifest.block_hashes.len()); + let (num_state, num_blocks) = (manifest.state_hashes.len(), manifest.block_hashes.len()); - let informant_handle = snapshot.clone(); - ::std::thread::spawn(move || { - while let RestorationStatus::Ongoing { state_chunks_done, block_chunks_done, .. } = informant_handle.status() { - info!("Processed {}/{} state chunks and {}/{} block chunks.", - state_chunks_done, num_state, block_chunks_done, num_blocks); - ::std::thread::sleep(Duration::from_secs(5)); - } - }); + let informant_handle = snapshot.clone(); + ::std::thread::spawn(move || { + while let RestorationStatus::Ongoing { + state_chunks_done, + block_chunks_done, + .. + } = informant_handle.status() + { + info!( + "Processed {}/{} state chunks and {}/{} block chunks.", + state_chunks_done, num_state, block_chunks_done, num_blocks + ); + ::std::thread::sleep(Duration::from_secs(5)); + } + }); - info!("Restoring state"); - for &state_hash in &manifest.state_hashes { - if snapshot.status() == RestorationStatus::Failed { - return Err("Restoration failed".into()); - } + info!("Restoring state"); + for &state_hash in &manifest.state_hashes { + if snapshot.status() == RestorationStatus::Failed { + return Err("Restoration failed".into()); + } - let chunk = reader.chunk(state_hash) - .map_err(|e| format!("Encountered error while reading chunk {:?}: {}", state_hash, e))?; + let chunk = reader.chunk(state_hash).map_err(|e| { + format!( + "Encountered error while reading chunk {:?}: {}", + state_hash, e + ) + })?; - let hash = keccak(&chunk); - if hash != state_hash { - return Err(format!("Mismatched chunk hash. Expected {:?}, got {:?}", state_hash, hash)); - } + let hash = keccak(&chunk); + if hash != state_hash { + return Err(format!( + "Mismatched chunk hash. Expected {:?}, got {:?}", + state_hash, hash + )); + } - snapshot.feed_state_chunk(state_hash, &chunk); - } + snapshot.feed_state_chunk(state_hash, &chunk); + } - info!("Restoring blocks"); - for &block_hash in &manifest.block_hashes { - if snapshot.status() == RestorationStatus::Failed { - return Err("Restoration failed".into()); - } + info!("Restoring blocks"); + for &block_hash in &manifest.block_hashes { + if snapshot.status() == RestorationStatus::Failed { + return Err("Restoration failed".into()); + } - let chunk = reader.chunk(block_hash) - .map_err(|e| format!("Encountered error while reading chunk {:?}: {}", block_hash, e))?; + let chunk = reader.chunk(block_hash).map_err(|e| { + format!( + "Encountered error while reading chunk {:?}: {}", + block_hash, e + ) + })?; - let hash = keccak(&chunk); - if hash != block_hash { - return Err(format!("Mismatched chunk hash. Expected {:?}, got {:?}", block_hash, hash)); - } - snapshot.feed_block_chunk(block_hash, &chunk); - } + let hash = keccak(&chunk); + if hash != block_hash { + return Err(format!( + "Mismatched chunk hash. Expected {:?}, got {:?}", + block_hash, hash + )); + } + snapshot.feed_block_chunk(block_hash, &chunk); + } - match snapshot.status() { - RestorationStatus::Ongoing { .. } => Err("Snapshot file is incomplete and missing chunks.".into()), - RestorationStatus::Initializing { .. } => Err("Snapshot restoration is still initializing.".into()), - RestorationStatus::Failed => Err("Snapshot restoration failed.".into()), - RestorationStatus::Inactive => { - info!("Restoration complete."); - Ok(()) - } - } + match snapshot.status() { + RestorationStatus::Ongoing { .. } => { + Err("Snapshot file is incomplete and missing chunks.".into()) + } + RestorationStatus::Initializing { .. } => { + Err("Snapshot restoration is still initializing.".into()) + } + RestorationStatus::Failed => Err("Snapshot restoration failed.".into()), + RestorationStatus::Inactive => { + info!("Restoration complete."); + Ok(()) + } + } } impl SnapshotCommand { - // shared portion of snapshot commands: start the client service - fn start_service(self) -> Result { - // load spec file - let spec = self.spec.spec(&self.dirs.cache)?; + // shared portion of snapshot commands: start the client service + fn start_service(self) -> Result { + // load spec file + let spec = self.spec.spec(&self.dirs.cache)?; - // load genesis hash - let genesis_hash = spec.genesis_header().hash(); + // load genesis hash + let genesis_hash = spec.genesis_header().hash(); - // database paths - let db_dirs = self.dirs.database(genesis_hash, None, spec.data_dir.clone()); + // database paths + let db_dirs = self + .dirs + .database(genesis_hash, None, spec.data_dir.clone()); - // user defaults path - let user_defaults_path = db_dirs.user_defaults_path(); + // user defaults path + let user_defaults_path = db_dirs.user_defaults_path(); - // load user defaults - let user_defaults = UserDefaults::load(&user_defaults_path)?; + // load user defaults + let user_defaults = UserDefaults::load(&user_defaults_path)?; - // select pruning algorithm - let algorithm = self.pruning.to_algorithm(&user_defaults); + // select pruning algorithm + let algorithm = self.pruning.to_algorithm(&user_defaults); - // check if tracing is on - let tracing = tracing_switch_to_bool(self.tracing, &user_defaults)?; + // check if tracing is on + let tracing = tracing_switch_to_bool(self.tracing, &user_defaults)?; - // check if fatdb is on - let fat_db = fatdb_switch_to_bool(self.fat_db, &user_defaults, algorithm)?; + // check if fatdb is on + let fat_db = fatdb_switch_to_bool(self.fat_db, &user_defaults, algorithm)?; - // prepare client and snapshot paths. - let client_path = db_dirs.client_path(algorithm); - let snapshot_path = db_dirs.snapshot_path(); + // prepare client and snapshot paths. + let client_path = db_dirs.client_path(algorithm); + let snapshot_path = db_dirs.snapshot_path(); - // execute upgrades - execute_upgrades(&self.dirs.base, &db_dirs, algorithm, &self.compaction)?; + // execute upgrades + execute_upgrades(&self.dirs.base, &db_dirs, algorithm, &self.compaction)?; - // prepare client config - let mut client_config = to_client_config( - &self.cache_config, - spec.name.to_lowercase(), - Mode::Active, - tracing, - fat_db, - self.compaction, - VMType::default(), - "".into(), - algorithm, - self.pruning_history, - self.pruning_memory, - true, - self.max_round_blocks_to_import, - ); + // prepare client config + let mut client_config = to_client_config( + &self.cache_config, + spec.name.to_lowercase(), + Mode::Active, + tracing, + fat_db, + self.compaction, + VMType::default(), + "".into(), + algorithm, + self.pruning_history, + self.pruning_memory, + true, + self.max_round_blocks_to_import, + ); - client_config.snapshot = self.snapshot_conf; + client_config.snapshot = self.snapshot_conf; - let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config); - let client_db = restoration_db_handler.open(&client_path) - .map_err(|e| format!("Failed to open database {:?}", e))?; + let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config); + let client_db = restoration_db_handler + .open(&client_path) + .map_err(|e| format!("Failed to open database {:?}", e))?; - let service = ClientService::start( - client_config, - &spec, - client_db, - &snapshot_path, - restoration_db_handler, - &self.dirs.ipc_path(), - // TODO [ToDr] don't use test miner here - // (actually don't require miner at all) - Arc::new(Miner::new_for_tests(&spec, None)), - Arc::new(ethcore_private_tx::DummySigner), - Box::new(ethcore_private_tx::NoopEncryptor), - Default::default(), - Default::default(), - ).map_err(|e| format!("Client service error: {:?}", e))?; + let service = ClientService::start( + client_config, + &spec, + client_db, + &snapshot_path, + restoration_db_handler, + &self.dirs.ipc_path(), + // TODO [ToDr] don't use test miner here + // (actually don't require miner at all) + Arc::new(Miner::new_for_tests(&spec, None)), + Arc::new(ethcore_private_tx::DummySigner), + Box::new(ethcore_private_tx::NoopEncryptor), + Default::default(), + Default::default(), + ) + .map_err(|e| format!("Client service error: {:?}", e))?; - Ok(service) - } - /// restore from a snapshot - pub fn restore(self) -> Result<(), String> { - let file = self.file_path.clone(); - let service = self.start_service()?; + Ok(service) + } + /// restore from a snapshot + pub fn restore(self) -> Result<(), String> { + let file = self.file_path.clone(); + let service = self.start_service()?; - warn!("Snapshot restoration is experimental and the format may be subject to change."); - warn!("On encountering an unexpected error, please ensure that you have a recent snapshot."); + warn!("Snapshot restoration is experimental and the format may be subject to change."); + warn!( + "On encountering an unexpected error, please ensure that you have a recent snapshot." + ); - let snapshot = service.snapshot_service(); + let snapshot = service.snapshot_service(); - if let Some(file) = file { - info!("Attempting to restore from snapshot at '{}'", file); + if let Some(file) = file { + info!("Attempting to restore from snapshot at '{}'", file); - let reader = PackedReader::new(Path::new(&file)) - .map_err(|e| format!("Couldn't open snapshot file: {}", e)) - .and_then(|x| x.ok_or("Snapshot file has invalid format.".into())); + let reader = PackedReader::new(Path::new(&file)) + .map_err(|e| format!("Couldn't open snapshot file: {}", e)) + .and_then(|x| x.ok_or("Snapshot file has invalid format.".into())); - let reader = reader?; - restore_using(snapshot, &reader, true)?; - } else { - info!("Attempting to restore from local snapshot."); + let reader = reader?; + restore_using(snapshot, &reader, true)?; + } else { + info!("Attempting to restore from local snapshot."); - // attempting restoration with recovery will lead to deadlock - // as we currently hold a read lock on the service's reader. - match *snapshot.reader() { - Some(ref reader) => restore_using(snapshot.clone(), reader, false)?, - None => return Err("No local snapshot found.".into()), - } - } + // attempting restoration with recovery will lead to deadlock + // as we currently hold a read lock on the service's reader. + match *snapshot.reader() { + Some(ref reader) => restore_using(snapshot.clone(), reader, false)?, + None => return Err("No local snapshot found.".into()), + } + } - Ok(()) - } + Ok(()) + } - /// Take a snapshot from the head of the chain. - pub fn take_snapshot(self) -> Result<(), String> { - let file_path = self.file_path.clone().ok_or("No file path provided.".to_owned())?; - let file_path: PathBuf = file_path.into(); - let block_at = self.block_at; - let service = self.start_service()?; + /// Take a snapshot from the head of the chain. + pub fn take_snapshot(self) -> Result<(), String> { + let file_path = self + .file_path + .clone() + .ok_or("No file path provided.".to_owned())?; + let file_path: PathBuf = file_path.into(); + let block_at = self.block_at; + let service = self.start_service()?; - warn!("Snapshots are currently experimental. File formats may be subject to change."); + warn!("Snapshots are currently experimental. File formats may be subject to change."); - let writer = PackedWriter::new(&file_path) - .map_err(|e| format!("Failed to open snapshot writer: {}", e))?; + let writer = PackedWriter::new(&file_path) + .map_err(|e| format!("Failed to open snapshot writer: {}", e))?; - let progress = Arc::new(Progress::default()); - let p = progress.clone(); - let informant_handle = ::std::thread::spawn(move || { - ::std::thread::sleep(Duration::from_secs(5)); + let progress = Arc::new(Progress::default()); + let p = progress.clone(); + let informant_handle = ::std::thread::spawn(move || { + ::std::thread::sleep(Duration::from_secs(5)); - let mut last_size = 0; - while !p.done() { - let cur_size = p.size(); - if cur_size != last_size { - last_size = cur_size; - let bytes = ::informant::format_bytes(cur_size as usize); - info!("Snapshot: {} accounts {} blocks {}", p.accounts(), p.blocks(), bytes); - } + let mut last_size = 0; + while !p.done() { + let cur_size = p.size(); + if cur_size != last_size { + last_size = cur_size; + let bytes = ::informant::format_bytes(cur_size as usize); + info!( + "Snapshot: {} accounts {} blocks {}", + p.accounts(), + p.blocks(), + bytes + ); + } - ::std::thread::sleep(Duration::from_secs(5)); - } - }); + ::std::thread::sleep(Duration::from_secs(5)); + } + }); - if let Err(e) = service.client().take_snapshot(writer, block_at, &*progress) { - let _ = ::std::fs::remove_file(&file_path); - return Err(format!("Encountered fatal error while creating snapshot: {}", e)); - } + if let Err(e) = service.client().take_snapshot(writer, block_at, &*progress) { + let _ = ::std::fs::remove_file(&file_path); + return Err(format!( + "Encountered fatal error while creating snapshot: {}", + e + )); + } - info!("snapshot creation complete"); + info!("snapshot creation complete"); - assert!(progress.done()); - informant_handle.join().map_err(|_| "failed to join logger thread")?; + assert!(progress.done()); + informant_handle + .join() + .map_err(|_| "failed to join logger thread")?; - Ok(()) - } + Ok(()) + } } /// Execute this snapshot command. pub fn execute(cmd: SnapshotCommand) -> Result { - match cmd.kind { - Kind::Take => cmd.take_snapshot()?, - Kind::Restore => cmd.restore()?, - } + match cmd.kind { + Kind::Take => cmd.take_snapshot()?, + Kind::Restore => cmd.restore()?, + } - Ok(String::new()) + Ok(String::new()) } diff --git a/parity/upgrade.rs b/parity/upgrade.rs index ecd9beff1..ce9e94642 100644 --- a/parity/upgrade.rs +++ b/parity/upgrade.rs @@ -16,202 +16,231 @@ //! Parity upgrade logic -use semver::{Version, SemVerError}; -use std::collections::*; -use std::fs::{self, File, create_dir_all}; -use std::io; -use std::io::{Read, Write}; -use std::path::{PathBuf, Path}; -use dir::{DatabaseDirectories, default_data_path, home_dir}; -use dir::helpers::replace_home; +use dir::{default_data_path, helpers::replace_home, home_dir, DatabaseDirectories}; use journaldb::Algorithm; +use semver::{SemVerError, Version}; +use std::{ + collections::*, + fs::{self, create_dir_all, File}, + io, + io::{Read, Write}, + path::{Path, PathBuf}, +}; #[derive(Debug)] pub enum Error { - CannotCreateConfigPath(io::Error), - CannotWriteVersionFile(io::Error), - CannotUpdateVersionFile(io::Error), - SemVer(SemVerError), + CannotCreateConfigPath(io::Error), + CannotWriteVersionFile(io::Error), + CannotUpdateVersionFile(io::Error), + SemVer(SemVerError), } impl From for Error { - fn from(err: SemVerError) -> Self { - Error::SemVer(err) - } + fn from(err: SemVerError) -> Self { + Error::SemVer(err) + } } const CURRENT_VERSION: &'static str = env!("CARGO_PKG_VERSION"); #[derive(Hash, PartialEq, Eq)] struct UpgradeKey { - pub old_version: Version, - pub new_version: Version, + pub old_version: Version, + pub new_version: Version, } type UpgradeList = HashMap Result<(), Error>>; impl UpgradeKey { - // given the following config exist - // ver.lock 1.1 (`previous_version`) - // - // current_version 1.4 (`current_version`) - // - // - //upgrades (set of `UpgradeKey`) - // 1.0 -> 1.1 (u1) - // 1.1 -> 1.2 (u2) - // 1.2 -> 1.3 (u3) - // 1.3 -> 1.4 (u4) - // 1.4 -> 1.5 (u5) - // - // then the following upgrades should be applied: - // u2, u3, u4 - fn is_applicable(&self, previous_version: &Version, current_version: &Version) -> bool { - self.old_version >= *previous_version && self.new_version <= *current_version - } + // given the following config exist + // ver.lock 1.1 (`previous_version`) + // + // current_version 1.4 (`current_version`) + // + // + //upgrades (set of `UpgradeKey`) + // 1.0 -> 1.1 (u1) + // 1.1 -> 1.2 (u2) + // 1.2 -> 1.3 (u3) + // 1.3 -> 1.4 (u4) + // 1.4 -> 1.5 (u5) + // + // then the following upgrades should be applied: + // u2, u3, u4 + fn is_applicable(&self, previous_version: &Version, current_version: &Version) -> bool { + self.old_version >= *previous_version && self.new_version <= *current_version + } } // dummy upgrade (remove when the first one is in) fn dummy_upgrade() -> Result<(), Error> { - Ok(()) + Ok(()) } -fn push_upgrades(upgrades: &mut UpgradeList) -{ - // dummy upgrade (remove when the first one is in) - upgrades.insert( - UpgradeKey { old_version: Version::new(0, 9, 0), new_version: Version::new(1, 0, 0)}, - dummy_upgrade); +fn push_upgrades(upgrades: &mut UpgradeList) { + // dummy upgrade (remove when the first one is in) + upgrades.insert( + UpgradeKey { + old_version: Version::new(0, 9, 0), + new_version: Version::new(1, 0, 0), + }, + dummy_upgrade, + ); } fn upgrade_from_version(previous_version: &Version) -> Result { - let mut upgrades = HashMap::new(); - push_upgrades(&mut upgrades); + let mut upgrades = HashMap::new(); + push_upgrades(&mut upgrades); - let current_version = Version::parse(CURRENT_VERSION)?; + let current_version = Version::parse(CURRENT_VERSION)?; - let mut count = 0; - for upgrade_key in upgrades.keys() { - if upgrade_key.is_applicable(previous_version, ¤t_version) { - let upgrade_script = upgrades[upgrade_key]; - upgrade_script()?; - count += 1; - } - } - Ok(count) + let mut count = 0; + for upgrade_key in upgrades.keys() { + if upgrade_key.is_applicable(previous_version, ¤t_version) { + let upgrade_script = upgrades[upgrade_key]; + upgrade_script()?; + count += 1; + } + } + Ok(count) } fn with_locked_version(db_path: &str, script: F) -> Result - where F: Fn(&Version) -> Result +where + F: Fn(&Version) -> Result, { - let mut path = PathBuf::from(db_path); - create_dir_all(&path).map_err(Error::CannotCreateConfigPath)?; - path.push("ver.lock"); + let mut path = PathBuf::from(db_path); + create_dir_all(&path).map_err(Error::CannotCreateConfigPath)?; + path.push("ver.lock"); - let version = - File::open(&path).ok().and_then(|ref mut file| - { - let mut version_string = String::new(); - file.read_to_string(&mut version_string) - .ok() - .and_then(|_| Version::parse(&version_string).ok()) - }) - .unwrap_or(Version::new(0, 9, 0)); + let version = File::open(&path) + .ok() + .and_then(|ref mut file| { + let mut version_string = String::new(); + file.read_to_string(&mut version_string) + .ok() + .and_then(|_| Version::parse(&version_string).ok()) + }) + .unwrap_or(Version::new(0, 9, 0)); - let mut lock = File::create(&path).map_err(Error::CannotWriteVersionFile)?; - let result = script(&version); + let mut lock = File::create(&path).map_err(Error::CannotWriteVersionFile)?; + let result = script(&version); - let written_version = Version::parse(CURRENT_VERSION)?; - lock.write_all(written_version.to_string().as_bytes()).map_err(Error::CannotUpdateVersionFile)?; - result + let written_version = Version::parse(CURRENT_VERSION)?; + lock.write_all(written_version.to_string().as_bytes()) + .map_err(Error::CannotUpdateVersionFile)?; + result } pub fn upgrade(db_path: &str) -> Result { - with_locked_version(db_path, |ver| { - upgrade_from_version(ver) - }) + with_locked_version(db_path, |ver| upgrade_from_version(ver)) } fn file_exists(path: &Path) -> bool { - match fs::metadata(&path) { - Err(ref e) if e.kind() == io::ErrorKind::NotFound => false, - _ => true, - } + match fs::metadata(&path) { + Err(ref e) if e.kind() == io::ErrorKind::NotFound => false, + _ => true, + } } #[cfg(any(test, feature = "accounts"))] pub fn upgrade_key_location(from: &PathBuf, to: &PathBuf) { - match fs::create_dir_all(&to).and_then(|()| fs::read_dir(from)) { - Ok(entries) => { - let files: Vec<_> = entries.filter_map(|f| f.ok().and_then(|f| if f.file_type().ok().map_or(false, |f| f.is_file()) { f.file_name().to_str().map(|s| s.to_owned()) } else { None })).collect(); - let mut num: usize = 0; - for name in files { - let mut from = from.clone(); - from.push(&name); - let mut to = to.clone(); - to.push(&name); - if !file_exists(&to) { - if let Err(e) = fs::rename(&from, &to) { - debug!("Error upgrading key {:?}: {:?}", from, e); - } else { - num += 1; - } - } else { - debug!("Skipped upgrading key {:?}", from); - } - } - if num > 0 { - info!("Moved {} keys from {} to {}", num, from.to_string_lossy(), to.to_string_lossy()); - } - }, - Err(e) => { - debug!("Error moving keys from {:?} to {:?}: {:?}", from, to, e); - } - } + match fs::create_dir_all(&to).and_then(|()| fs::read_dir(from)) { + Ok(entries) => { + let files: Vec<_> = entries + .filter_map(|f| { + f.ok().and_then(|f| { + if f.file_type().ok().map_or(false, |f| f.is_file()) { + f.file_name().to_str().map(|s| s.to_owned()) + } else { + None + } + }) + }) + .collect(); + let mut num: usize = 0; + for name in files { + let mut from = from.clone(); + from.push(&name); + let mut to = to.clone(); + to.push(&name); + if !file_exists(&to) { + if let Err(e) = fs::rename(&from, &to) { + debug!("Error upgrading key {:?}: {:?}", from, e); + } else { + num += 1; + } + } else { + debug!("Skipped upgrading key {:?}", from); + } + } + if num > 0 { + info!( + "Moved {} keys from {} to {}", + num, + from.to_string_lossy(), + to.to_string_lossy() + ); + } + } + Err(e) => { + debug!("Error moving keys from {:?} to {:?}: {:?}", from, to, e); + } + } } fn upgrade_dir_location(source: &PathBuf, dest: &PathBuf) { - if file_exists(&source) { - if !file_exists(&dest) { - let mut parent = dest.clone(); - parent.pop(); - if let Err(e) = fs::create_dir_all(&parent).and_then(|()| fs::rename(&source, &dest)) { - debug!("Skipped path {:?} -> {:?} :{:?}", source, dest, e); - } else { - info!("Moved {} to {}", source.to_string_lossy(), dest.to_string_lossy()); - } - } else { - debug!("Skipped upgrading directory {:?}, Destination already exists at {:?}", source, dest); - } - } + if file_exists(&source) { + if !file_exists(&dest) { + let mut parent = dest.clone(); + parent.pop(); + if let Err(e) = fs::create_dir_all(&parent).and_then(|()| fs::rename(&source, &dest)) { + debug!("Skipped path {:?} -> {:?} :{:?}", source, dest, e); + } else { + info!( + "Moved {} to {}", + source.to_string_lossy(), + dest.to_string_lossy() + ); + } + } else { + debug!( + "Skipped upgrading directory {:?}, Destination already exists at {:?}", + source, dest + ); + } + } } fn upgrade_user_defaults(dirs: &DatabaseDirectories) { - let source = dirs.legacy_user_defaults_path(); - let dest = dirs.user_defaults_path(); - if file_exists(&source) { - if !file_exists(&dest) { - if let Err(e) = fs::rename(&source, &dest) { - debug!("Skipped upgrading user defaults {:?}:{:?}", dest, e); - } - } else { - debug!("Skipped upgrading user defaults {:?}, File exists at {:?}", source, dest); - } - } + let source = dirs.legacy_user_defaults_path(); + let dest = dirs.user_defaults_path(); + if file_exists(&source) { + if !file_exists(&dest) { + if let Err(e) = fs::rename(&source, &dest) { + debug!("Skipped upgrading user defaults {:?}:{:?}", dest, e); + } + } else { + debug!( + "Skipped upgrading user defaults {:?}, File exists at {:?}", + source, dest + ); + } + } } pub fn upgrade_data_paths(base_path: &str, dirs: &DatabaseDirectories, pruning: Algorithm) { - if home_dir().is_none() { - return; - } + if home_dir().is_none() { + return; + } - let legacy_root_path = replace_home("", "$HOME/.parity"); - let default_path = default_data_path(); - if legacy_root_path != base_path && base_path == default_path { - upgrade_dir_location(&PathBuf::from(legacy_root_path), &PathBuf::from(&base_path)); - } - upgrade_dir_location(&dirs.legacy_version_path(pruning), &dirs.db_path(pruning)); - upgrade_dir_location(&dirs.legacy_snapshot_path(), &dirs.snapshot_path()); - upgrade_dir_location(&dirs.legacy_network_path(), &dirs.network_path()); - upgrade_user_defaults(&dirs); + let legacy_root_path = replace_home("", "$HOME/.parity"); + let default_path = default_data_path(); + if legacy_root_path != base_path && base_path == default_path { + upgrade_dir_location(&PathBuf::from(legacy_root_path), &PathBuf::from(&base_path)); + } + upgrade_dir_location(&dirs.legacy_version_path(pruning), &dirs.db_path(pruning)); + upgrade_dir_location(&dirs.legacy_snapshot_path(), &dirs.snapshot_path()); + upgrade_dir_location(&dirs.legacy_network_path(), &dirs.network_path()); + upgrade_user_defaults(&dirs); } diff --git a/parity/user_defaults.rs b/parity/user_defaults.rs index bdd5d9efb..e0fb1de4a 100644 --- a/parity/user_defaults.rs +++ b/parity/user_defaults.rs @@ -14,161 +14,175 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::fs::File; -use std::io::Write; -use std::path::Path; -use std::time::Duration; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use serde_json::de::from_reader; -use serde_json::ser::to_string; +use ethcore::client::Mode as ClientMode; use journaldb::Algorithm; -use ethcore::client::{Mode as ClientMode}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use serde_json::{de::from_reader, ser::to_string}; +use std::{fs::File, io::Write, path::Path, time::Duration}; #[derive(Clone)] pub struct Seconds(Duration); impl Seconds { - pub fn value(&self) -> u64 { - self.0.as_secs() - } + pub fn value(&self) -> u64 { + self.0.as_secs() + } } impl From for Seconds { - fn from(s: u64) -> Seconds { - Seconds(Duration::from_secs(s)) - } + fn from(s: u64) -> Seconds { + Seconds(Duration::from_secs(s)) + } } impl From for Seconds { - fn from(d: Duration) -> Seconds { - Seconds(d) - } + fn from(d: Duration) -> Seconds { + Seconds(d) + } } impl Into for Seconds { - fn into(self) -> Duration { - self.0 - } + fn into(self) -> Duration { + self.0 + } } impl Serialize for Seconds { - fn serialize(&self, serializer: S) -> Result { - serializer.serialize_u64(self.value()) - } + fn serialize(&self, serializer: S) -> Result { + serializer.serialize_u64(self.value()) + } } impl<'de> Deserialize<'de> for Seconds { - fn deserialize>(deserializer: D) -> Result { - let secs = u64::deserialize(deserializer)?; - Ok(Seconds::from(secs)) - } + fn deserialize>(deserializer: D) -> Result { + let secs = u64::deserialize(deserializer)?; + Ok(Seconds::from(secs)) + } } #[derive(Clone, Serialize, Deserialize)] #[serde(rename_all = "lowercase", tag = "mode")] pub enum Mode { - Active, - Passive { - #[serde(rename = "mode.timeout")] - timeout: Seconds, - #[serde(rename = "mode.alarm")] - alarm: Seconds, - }, - Dark { - #[serde(rename = "mode.timeout")] - timeout: Seconds, - }, - Offline, + Active, + Passive { + #[serde(rename = "mode.timeout")] + timeout: Seconds, + #[serde(rename = "mode.alarm")] + alarm: Seconds, + }, + Dark { + #[serde(rename = "mode.timeout")] + timeout: Seconds, + }, + Offline, } impl Into for Mode { - fn into(self) -> ClientMode { - match self { - Mode::Active => ClientMode::Active, - Mode::Passive { timeout, alarm } => ClientMode::Passive(timeout.into(), alarm.into()), - Mode::Dark { timeout } => ClientMode::Dark(timeout.into()), - Mode::Offline => ClientMode::Off, - } - } + fn into(self) -> ClientMode { + match self { + Mode::Active => ClientMode::Active, + Mode::Passive { timeout, alarm } => ClientMode::Passive(timeout.into(), alarm.into()), + Mode::Dark { timeout } => ClientMode::Dark(timeout.into()), + Mode::Offline => ClientMode::Off, + } + } } impl From for Mode { - fn from(mode: ClientMode) -> Mode { - match mode { - ClientMode::Active => Mode::Active, - ClientMode::Passive(timeout, alarm) => Mode::Passive { timeout: timeout.into(), alarm: alarm.into() }, - ClientMode::Dark(timeout) => Mode::Dark { timeout: timeout.into() }, - ClientMode::Off => Mode::Offline, - } - } + fn from(mode: ClientMode) -> Mode { + match mode { + ClientMode::Active => Mode::Active, + ClientMode::Passive(timeout, alarm) => Mode::Passive { + timeout: timeout.into(), + alarm: alarm.into(), + }, + ClientMode::Dark(timeout) => Mode::Dark { + timeout: timeout.into(), + }, + ClientMode::Off => Mode::Offline, + } + } } #[derive(Serialize, Deserialize)] pub struct UserDefaults { - pub is_first_launch: bool, - #[serde(with = "algorithm_serde")] - pub pruning: Algorithm, - pub tracing: bool, - pub fat_db: bool, - #[serde(flatten)] - mode: Mode, + pub is_first_launch: bool, + #[serde(with = "algorithm_serde")] + pub pruning: Algorithm, + pub tracing: bool, + pub fat_db: bool, + #[serde(flatten)] + mode: Mode, } impl UserDefaults { - pub fn mode(&self) -> ClientMode { - self.mode.clone().into() - } + pub fn mode(&self) -> ClientMode { + self.mode.clone().into() + } - pub fn set_mode(&mut self, mode: ClientMode) { - self.mode = mode.into(); - } + pub fn set_mode(&mut self, mode: ClientMode) { + self.mode = mode.into(); + } } mod algorithm_serde { - use serde::{Deserialize, Deserializer, Serialize, Serializer}; - use serde::de::Error; - use journaldb::Algorithm; + use journaldb::Algorithm; + use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; - pub fn serialize(algorithm: &Algorithm, serializer: S) -> Result - where S: Serializer { - algorithm.as_str().serialize(serializer) - } + pub fn serialize(algorithm: &Algorithm, serializer: S) -> Result + where + S: Serializer, + { + algorithm.as_str().serialize(serializer) + } - pub fn deserialize<'de, D>(deserializer: D) -> Result - where D: Deserializer<'de> { - let pruning = String::deserialize(deserializer)?; - pruning.parse().map_err(|_| Error::custom("invalid pruning method")) - } + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let pruning = String::deserialize(deserializer)?; + pruning + .parse() + .map_err(|_| Error::custom("invalid pruning method")) + } } impl Default for UserDefaults { - fn default() -> Self { - UserDefaults { - is_first_launch: true, - pruning: Algorithm::OverlayRecent, - tracing: false, - fat_db: false, - mode: Mode::Active, - } - } + fn default() -> Self { + UserDefaults { + is_first_launch: true, + pruning: Algorithm::OverlayRecent, + tracing: false, + fat_db: false, + mode: Mode::Active, + } + } } impl UserDefaults { - pub fn load

(path: P) -> Result where P: AsRef { - match File::open(path) { - Ok(file) => match from_reader(file) { - Ok(defaults) => Ok(defaults), - Err(e) => { - warn!("Error loading user defaults file: {:?}", e); - Ok(UserDefaults::default()) - }, - }, - _ => Ok(UserDefaults::default()), - } - } + pub fn load

(path: P) -> Result + where + P: AsRef, + { + match File::open(path) { + Ok(file) => match from_reader(file) { + Ok(defaults) => Ok(defaults), + Err(e) => { + warn!("Error loading user defaults file: {:?}", e); + Ok(UserDefaults::default()) + } + }, + _ => Ok(UserDefaults::default()), + } + } - pub fn save

(&self, path: P) -> Result<(), String> where P: AsRef { - let mut file: File = File::create(path).map_err(|_| "Cannot create user defaults file".to_owned())?; - file.write_all(to_string(&self).unwrap().as_bytes()).map_err(|_| "Failed to save user defaults".to_owned()) - } + pub fn save

(&self, path: P) -> Result<(), String> + where + P: AsRef, + { + let mut file: File = + File::create(path).map_err(|_| "Cannot create user defaults file".to_owned())?; + file.write_all(to_string(&self).unwrap().as_bytes()) + .map_err(|_| "Failed to save user defaults".to_owned()) + } } diff --git a/parity/whisper.rs b/parity/whisper.rs index e9a744b54..1ee685e1d 100644 --- a/parity/whisper.rs +++ b/parity/whisper.rs @@ -14,101 +14,111 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::io; +use std::{io, sync::Arc}; -use sync::{AttachedProtocol, ManageNetwork}; use parity_rpc::Metadata; -use parity_whisper::message::Message; -use parity_whisper::net::{self as whisper_net, Network as WhisperNetwork}; -use parity_whisper::rpc::{WhisperClient, PoolHandle, FilterManager}; +use parity_whisper::{ + message::Message, + net::{self as whisper_net, Network as WhisperNetwork}, + rpc::{FilterManager, PoolHandle, WhisperClient}, +}; +use sync::{AttachedProtocol, ManageNetwork}; /// Whisper config. #[derive(Debug, PartialEq, Eq)] pub struct Config { - pub enabled: bool, - pub target_message_pool_size: usize, + pub enabled: bool, + pub target_message_pool_size: usize, } impl Default for Config { - fn default() -> Self { - Config { - enabled: false, - target_message_pool_size: 10 * 1024 * 1024, - } - } + fn default() -> Self { + Config { + enabled: false, + target_message_pool_size: 10 * 1024 * 1024, + } + } } /// Standard pool handle. pub struct NetPoolHandle { - /// Pool handle. - handle: Arc>>, - /// Network manager. - net: Arc, + /// Pool handle. + handle: Arc>>, + /// Network manager. + net: Arc, } impl PoolHandle for NetPoolHandle { - fn relay(&self, message: Message) -> bool { - let mut res = false; - let mut message = Some(message); - self.net.with_proto_context(whisper_net::PROTOCOL_ID, &mut |ctx| { - if let Some(message) = message.take() { - res = self.handle.post_message(message, ctx); - } - }); - res - } + fn relay(&self, message: Message) -> bool { + let mut res = false; + let mut message = Some(message); + self.net + .with_proto_context(whisper_net::PROTOCOL_ID, &mut |ctx| { + if let Some(message) = message.take() { + res = self.handle.post_message(message, ctx); + } + }); + res + } - fn pool_status(&self) -> whisper_net::PoolStatus { - self.handle.pool_status() - } + fn pool_status(&self) -> whisper_net::PoolStatus { + self.handle.pool_status() + } } /// Factory for standard whisper RPC. pub struct RpcFactory { - net: Arc>>, - manager: Arc, + net: Arc>>, + manager: Arc, } impl RpcFactory { - pub fn make_handler(&self, net: Arc) -> WhisperClient { - let handle = NetPoolHandle { handle: self.net.clone(), net: net }; - WhisperClient::new(handle, self.manager.clone()) - } + pub fn make_handler(&self, net: Arc) -> WhisperClient { + let handle = NetPoolHandle { + handle: self.net.clone(), + net: net, + }; + WhisperClient::new(handle, self.manager.clone()) + } } /// Sets up whisper protocol and RPC handler. /// /// Will target the given pool size. #[cfg(not(feature = "ipc"))] -pub fn setup(target_pool_size: usize, protos: &mut Vec) - -> io::Result> -{ - let manager = Arc::new(FilterManager::new()?); - let net = Arc::new(WhisperNetwork::new(target_pool_size, manager.clone())); +pub fn setup( + target_pool_size: usize, + protos: &mut Vec, +) -> io::Result> { + let manager = Arc::new(FilterManager::new()?); + let net = Arc::new(WhisperNetwork::new(target_pool_size, manager.clone())); - protos.push(AttachedProtocol { - handler: net.clone() as Arc<_>, - versions: whisper_net::SUPPORTED_VERSIONS, - protocol_id: whisper_net::PROTOCOL_ID, - }); + protos.push(AttachedProtocol { + handler: net.clone() as Arc<_>, + versions: whisper_net::SUPPORTED_VERSIONS, + protocol_id: whisper_net::PROTOCOL_ID, + }); - // parity-only extensions to whisper. - protos.push(AttachedProtocol { - handler: Arc::new(whisper_net::ParityExtensions), - versions: whisper_net::SUPPORTED_VERSIONS, - protocol_id: whisper_net::PARITY_PROTOCOL_ID, - }); + // parity-only extensions to whisper. + protos.push(AttachedProtocol { + handler: Arc::new(whisper_net::ParityExtensions), + versions: whisper_net::SUPPORTED_VERSIONS, + protocol_id: whisper_net::PARITY_PROTOCOL_ID, + }); - let factory = RpcFactory { net: net, manager: manager }; + let factory = RpcFactory { + net: net, + manager: manager, + }; - Ok(Some(factory)) + Ok(Some(factory)) } // TODO: make it possible to attach generic protocols in IPC. #[cfg(feature = "ipc")] -pub fn setup(_target_pool_size: usize, _protos: &mut Vec) - -> io::Result> -{ - Ok(None) +pub fn setup( + _target_pool_size: usize, + _protos: &mut Vec, +) -> io::Result> { + Ok(None) } diff --git a/rpc/src/authcodes.rs b/rpc/src/authcodes.rs index b348dfd72..550ec67c1 100644 --- a/rpc/src/authcodes.rs +++ b/rpc/src/authcodes.rs @@ -14,26 +14,29 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::io::{self, Read, Write}; -use std::path::Path; -use std::{fs, time, mem}; +use std::{ + fs, + io::{self, Read, Write}, + mem, + path::Path, + time, +}; -use itertools::Itertools; -use rand::Rng; -use rand::os::OsRng; -use hash::keccak; use ethereum_types::H256; +use hash::keccak; +use itertools::Itertools; +use rand::{os::OsRng, Rng}; /// Providing current time in seconds pub trait TimeProvider { - /// Returns timestamp (in seconds since epoch) - fn now(&self) -> u64; + /// Returns timestamp (in seconds since epoch) + fn now(&self) -> u64; } -impl u64> TimeProvider for F { - fn now(&self) -> u64 { - self() - } +impl u64> TimeProvider for F { + fn now(&self) -> u64 { + self() + } } /// Default implementation of `TimeProvider` using system time. @@ -41,9 +44,12 @@ impl u64> TimeProvider for F { pub struct DefaultTimeProvider; impl TimeProvider for DefaultTimeProvider { - fn now(&self) -> u64 { - time::UNIX_EPOCH.elapsed().expect("Valid time has to be set in your system.").as_secs() - } + fn now(&self) -> u64 { + time::UNIX_EPOCH + .elapsed() + .expect("Valid time has to be set in your system.") + .as_secs() + } } /// No of seconds the hash is valid @@ -56,285 +62,303 @@ const SEPARATOR: &str = ";"; const UNUSED_TOKEN_TIMEOUT: u64 = 3600 * 24; // a day struct Code { - code: String, - /// Duration since unix_epoch - created_at: time::Duration, - /// Duration since unix_epoch - last_used_at: Option, + code: String, + /// Duration since unix_epoch + created_at: time::Duration, + /// Duration since unix_epoch + last_used_at: Option, } fn decode_time(val: &str) -> Option { - let time = val.parse::().ok(); - time.map(time::Duration::from_secs) + let time = val.parse::().ok(); + time.map(time::Duration::from_secs) } fn encode_time(time: time::Duration) -> String { - format!("{}", time.as_secs()) + format!("{}", time.as_secs()) } /// Manages authorization codes for `SignerUIs` pub struct AuthCodes { - codes: Vec, - now: T, + codes: Vec, + now: T, } impl AuthCodes { + /// Reads `AuthCodes` from file and creates new instance using `DefaultTimeProvider`. + pub fn from_file(file: &Path) -> io::Result { + let content = { + if let Ok(mut file) = fs::File::open(file) { + let mut s = String::new(); + let _ = file.read_to_string(&mut s)?; + s + } else { + "".into() + } + }; + let time_provider = DefaultTimeProvider::default(); - /// Reads `AuthCodes` from file and creates new instance using `DefaultTimeProvider`. - pub fn from_file(file: &Path) -> io::Result { - let content = { - if let Ok(mut file) = fs::File::open(file) { - let mut s = String::new(); - let _ = file.read_to_string(&mut s)?; - s - } else { - "".into() - } - }; - let time_provider = DefaultTimeProvider::default(); - - let codes = content.lines() - .filter_map(|line| { - let mut parts = line.split(SEPARATOR); - let token = parts.next(); - let created = parts.next(); - let used = parts.next(); - - match token { - None => None, - Some(token) if token.len() < TOKEN_LENGTH => None, - Some(token) => { - Some(Code { - code: token.into(), - last_used_at: used.and_then(decode_time), - created_at: created.and_then(decode_time) - .unwrap_or_else(|| time::Duration::from_secs(time_provider.now())), - }) - } - } - }) - .collect(); - Ok(AuthCodes { - codes, - now: time_provider, - }) - } + let codes = content + .lines() + .filter_map(|line| { + let mut parts = line.split(SEPARATOR); + let token = parts.next(); + let created = parts.next(); + let used = parts.next(); + match token { + None => None, + Some(token) if token.len() < TOKEN_LENGTH => None, + Some(token) => Some(Code { + code: token.into(), + last_used_at: used.and_then(decode_time), + created_at: created + .and_then(decode_time) + .unwrap_or_else(|| time::Duration::from_secs(time_provider.now())), + }), + } + }) + .collect(); + Ok(AuthCodes { + codes, + now: time_provider, + }) + } } impl AuthCodes { + /// Writes all `AuthCodes` to a disk. + pub fn to_file(&self, file: &Path) -> io::Result<()> { + let mut file = fs::File::create(file)?; + let content = self + .codes + .iter() + .map(|code| { + let mut data = vec![code.code.clone(), encode_time(code.created_at)]; + if let Some(used_at) = code.last_used_at { + data.push(encode_time(used_at)); + } + data.join(SEPARATOR) + }) + .join("\n"); + file.write_all(content.as_bytes()) + } - /// Writes all `AuthCodes` to a disk. - pub fn to_file(&self, file: &Path) -> io::Result<()> { - let mut file = fs::File::create(file)?; - let content = self.codes.iter().map(|code| { - let mut data = vec![code.code.clone(), encode_time(code.created_at)]; - if let Some(used_at) = code.last_used_at { - data.push(encode_time(used_at)); - } - data.join(SEPARATOR) - }).join("\n"); - file.write_all(content.as_bytes()) - } + /// Creates a new `AuthCodes` store with given `TimeProvider`. + pub fn new(codes: Vec, now: T) -> Self { + AuthCodes { + codes: codes + .into_iter() + .map(|code| Code { + code, + created_at: time::Duration::from_secs(now.now()), + last_used_at: None, + }) + .collect(), + now, + } + } - /// Creates a new `AuthCodes` store with given `TimeProvider`. - pub fn new(codes: Vec, now: T) -> Self { - AuthCodes { - codes: codes.into_iter().map(|code| Code { - code, - created_at: time::Duration::from_secs(now.now()), - last_used_at: None, - }).collect(), - now, - } - } + /// Checks if given hash is correct authcode of `SignerUI` + /// Updates this hash last used field in case it's valid. + pub fn is_valid(&mut self, hash: &H256, time: u64) -> bool { + let now = self.now.now(); + // check time + if time >= now + TIME_THRESHOLD || time <= now - TIME_THRESHOLD { + warn!(target: "signer", "Received old authentication request. ({} vs {})", now, time); + return false; + } - /// Checks if given hash is correct authcode of `SignerUI` - /// Updates this hash last used field in case it's valid. - pub fn is_valid(&mut self, hash: &H256, time: u64) -> bool { - let now = self.now.now(); - // check time - if time >= now + TIME_THRESHOLD || time <= now - TIME_THRESHOLD { - warn!(target: "signer", "Received old authentication request. ({} vs {})", now, time); - return false; - } + let as_token = |code| keccak(format!("{}:{}", code, time)); - let as_token = |code| keccak(format!("{}:{}", code, time)); + // look for code + for code in &mut self.codes { + if &as_token(&code.code) == hash { + code.last_used_at = Some(time::Duration::from_secs(now)); + return true; + } + } - // look for code - for code in &mut self.codes { - if &as_token(&code.code) == hash { - code.last_used_at = Some(time::Duration::from_secs(now)); - return true; - } - } + false + } - false - } + /// Generates and returns a new code that can be used by `SignerUIs` + pub fn generate_new(&mut self) -> io::Result { + let mut rng = OsRng::new()?; + let code = rng.gen_ascii_chars().take(TOKEN_LENGTH).collect::(); + let readable_code = code + .as_bytes() + .chunks(4) + .filter_map(|f| String::from_utf8(f.to_vec()).ok()) + .collect::>() + .join("-"); + trace!(target: "signer", "New authentication token generated."); + self.codes.push(Code { + code, + created_at: time::Duration::from_secs(self.now.now()), + last_used_at: None, + }); + Ok(readable_code) + } - /// Generates and returns a new code that can be used by `SignerUIs` - pub fn generate_new(&mut self) -> io::Result { - let mut rng = OsRng::new()?; - let code = rng.gen_ascii_chars().take(TOKEN_LENGTH).collect::(); - let readable_code = code.as_bytes() - .chunks(4) - .filter_map(|f| String::from_utf8(f.to_vec()).ok()) - .collect::>() - .join("-"); - trace!(target: "signer", "New authentication token generated."); - self.codes.push(Code { - code, - created_at: time::Duration::from_secs(self.now.now()), - last_used_at: None, - }); - Ok(readable_code) - } + /// Returns true if there are no tokens in this store + pub fn is_empty(&self) -> bool { + self.codes.is_empty() + } - /// Returns true if there are no tokens in this store - pub fn is_empty(&self) -> bool { - self.codes.is_empty() - } + /// Removes old tokens that have not been used since creation. + pub fn clear_garbage(&mut self) { + let now = self.now.now(); + let threshold = time::Duration::from_secs(now.saturating_sub(UNUSED_TOKEN_TIMEOUT)); - /// Removes old tokens that have not been used since creation. - pub fn clear_garbage(&mut self) { - let now = self.now.now(); - let threshold = time::Duration::from_secs(now.saturating_sub(UNUSED_TOKEN_TIMEOUT)); - - let codes = mem::replace(&mut self.codes, Vec::new()); - for code in codes { - // Skip codes that are old and were never used. - if code.last_used_at.is_none() && code.created_at <= threshold { - continue; - } - self.codes.push(code); - } - } + let codes = mem::replace(&mut self.codes, Vec::new()); + for code in codes { + // Skip codes that are old and were never used. + if code.last_used_at.is_none() && code.created_at <= threshold { + continue; + } + self.codes.push(code); + } + } } #[cfg(test)] mod tests { - use std::io::{Read, Write}; - use std::{time, fs}; - use std::cell::Cell; - use tempdir::TempDir; - use hash::keccak; + use hash::keccak; + use std::{ + cell::Cell, + fs, + io::{Read, Write}, + time, + }; + use tempdir::TempDir; - use ethereum_types::H256; - use super::*; + use super::*; + use ethereum_types::H256; - fn generate_hash(val: &str, time: u64) -> H256 { - keccak(format!("{}:{}", val, time)) - } + fn generate_hash(val: &str, time: u64) -> H256 { + keccak(format!("{}:{}", val, time)) + } - #[test] - fn should_return_false_even_if_code_is_initial_and_store_is_empty() { - // given - let code = "initial"; - let time = 99; - let mut codes = AuthCodes::new(vec![], || 100); + #[test] + fn should_return_false_even_if_code_is_initial_and_store_is_empty() { + // given + let code = "initial"; + let time = 99; + let mut codes = AuthCodes::new(vec![], || 100); - // when - let res1 = codes.is_valid(&generate_hash(code, time), time); - let res2 = codes.is_valid(&generate_hash(code, time), time); + // when + let res1 = codes.is_valid(&generate_hash(code, time), time); + let res2 = codes.is_valid(&generate_hash(code, time), time); - // then - assert_eq!(res1, false); - assert_eq!(res2, false); - } + // then + assert_eq!(res1, false); + assert_eq!(res2, false); + } - #[test] - fn should_return_true_if_hash_is_valid() { - // given - let code = "23521352asdfasdfadf"; - let time = 99; - let mut codes = AuthCodes::new(vec![code.into()], || 100); + #[test] + fn should_return_true_if_hash_is_valid() { + // given + let code = "23521352asdfasdfadf"; + let time = 99; + let mut codes = AuthCodes::new(vec![code.into()], || 100); - // when - let res = codes.is_valid(&generate_hash(code, time), time); + // when + let res = codes.is_valid(&generate_hash(code, time), time); - // then - assert_eq!(res, true); - } + // then + assert_eq!(res, true); + } - #[test] - fn should_return_false_if_code_is_unknown() { - // given - let code = "23521352asdfasdfadf"; - let time = 99; - let mut codes = AuthCodes::new(vec!["1".into()], || 100); + #[test] + fn should_return_false_if_code_is_unknown() { + // given + let code = "23521352asdfasdfadf"; + let time = 99; + let mut codes = AuthCodes::new(vec!["1".into()], || 100); - // when - let res = codes.is_valid(&generate_hash(code, time), time); + // when + let res = codes.is_valid(&generate_hash(code, time), time); - // then - assert_eq!(res, false); - } + // then + assert_eq!(res, false); + } - #[test] - fn should_return_false_if_hash_is_valid_but_time_is_invalid() { - // given - let code = "23521352asdfasdfadf"; - let time = 107; - let time2 = 93; - let mut codes = AuthCodes::new(vec![code.into()], || 100); + #[test] + fn should_return_false_if_hash_is_valid_but_time_is_invalid() { + // given + let code = "23521352asdfasdfadf"; + let time = 107; + let time2 = 93; + let mut codes = AuthCodes::new(vec![code.into()], || 100); - // when - let res1 = codes.is_valid(&generate_hash(code, time), time); - let res2 = codes.is_valid(&generate_hash(code, time2), time2); + // when + let res1 = codes.is_valid(&generate_hash(code, time), time); + let res2 = codes.is_valid(&generate_hash(code, time2), time2); - // then - assert_eq!(res1, false); - assert_eq!(res2, false); - } + // then + assert_eq!(res1, false); + assert_eq!(res2, false); + } - #[test] - fn should_read_old_format_from_file() { - // given - let tempdir = TempDir::new("").unwrap(); - let file_path = tempdir.path().join("file"); - let code = "23521352asdfasdfadf"; - { - let mut file = fs::File::create(&file_path).unwrap(); - file.write_all(b"a\n23521352asdfasdfadf\nb\n").unwrap(); - } + #[test] + fn should_read_old_format_from_file() { + // given + let tempdir = TempDir::new("").unwrap(); + let file_path = tempdir.path().join("file"); + let code = "23521352asdfasdfadf"; + { + let mut file = fs::File::create(&file_path).unwrap(); + file.write_all(b"a\n23521352asdfasdfadf\nb\n").unwrap(); + } - // when - let mut authcodes = AuthCodes::from_file(&file_path).unwrap(); - let time = time::UNIX_EPOCH.elapsed().unwrap().as_secs(); + // when + let mut authcodes = AuthCodes::from_file(&file_path).unwrap(); + let time = time::UNIX_EPOCH.elapsed().unwrap().as_secs(); - // then - assert!(authcodes.is_valid(&generate_hash(code, time), time), "Code should be read from file"); - } + // then + assert!( + authcodes.is_valid(&generate_hash(code, time), time), + "Code should be read from file" + ); + } - #[test] - fn should_remove_old_unused_tokens() { - // given - let tempdir = TempDir::new("").unwrap(); - let file_path = tempdir.path().join("file"); - let code1 = "11111111asdfasdf111"; - let code2 = "22222222asdfasdf222"; - let code3 = "33333333asdfasdf333"; + #[test] + fn should_remove_old_unused_tokens() { + // given + let tempdir = TempDir::new("").unwrap(); + let file_path = tempdir.path().join("file"); + let code1 = "11111111asdfasdf111"; + let code2 = "22222222asdfasdf222"; + let code3 = "33333333asdfasdf333"; - let time = Cell::new(100); - let mut codes = AuthCodes::new(vec![code1.into(), code2.into(), code3.into()], || time.get()); - // `code2` should not be removed (we never remove tokens that were used) - codes.is_valid(&generate_hash(code2, time.get()), time.get()); + let time = Cell::new(100); + let mut codes = AuthCodes::new(vec![code1.into(), code2.into(), code3.into()], || { + time.get() + }); + // `code2` should not be removed (we never remove tokens that were used) + codes.is_valid(&generate_hash(code2, time.get()), time.get()); - // when - time.set(100 + 10_000_000); - // mark `code1` as used now - codes.is_valid(&generate_hash(code1, time.get()), time.get()); + // when + time.set(100 + 10_000_000); + // mark `code1` as used now + codes.is_valid(&generate_hash(code1, time.get()), time.get()); - let new_code = codes.generate_new().unwrap().replace('-', ""); - codes.clear_garbage(); - codes.to_file(&file_path).unwrap(); + let new_code = codes.generate_new().unwrap().replace('-', ""); + codes.clear_garbage(); + codes.to_file(&file_path).unwrap(); - // then - let mut content = String::new(); - let mut file = fs::File::open(&file_path).unwrap(); - file.read_to_string(&mut content).unwrap(); - - assert_eq!(content, format!("{};100;10000100\n{};100;100\n{};10000100", code1, code2, new_code)); - } + // then + let mut content = String::new(); + let mut file = fs::File::open(&file_path).unwrap(); + file.read_to_string(&mut content).unwrap(); + assert_eq!( + content, + format!( + "{};100;10000100\n{};100;100\n{};10000100", + code1, code2, new_code + ) + ); + } } diff --git a/rpc/src/http_common.rs b/rpc/src/http_common.rs index 99bd392f3..f9e4ac9f5 100644 --- a/rpc/src/http_common.rs +++ b/rpc/src/http_common.rs @@ -16,39 +16,40 @@ //! Transport-specific metadata extractors. -use jsonrpc_core; use http; use hyper; +use jsonrpc_core; /// HTTP RPC server impl-independent metadata extractor pub trait HttpMetaExtractor: Send + Sync + 'static { - /// Type of Metadata - type Metadata: jsonrpc_core::Metadata; - /// Extracts metadata from given params. - fn read_metadata(&self, origin: Option, user_agent: Option) -> Self::Metadata; + /// Type of Metadata + type Metadata: jsonrpc_core::Metadata; + /// Extracts metadata from given params. + fn read_metadata(&self, origin: Option, user_agent: Option) -> Self::Metadata; } pub struct MetaExtractor { - extractor: T, + extractor: T, } impl MetaExtractor { - pub fn new(extractor: T) -> Self { - MetaExtractor { extractor } - } + pub fn new(extractor: T) -> Self { + MetaExtractor { extractor } + } } -impl http::MetaExtractor for MetaExtractor where - T: HttpMetaExtractor, - M: jsonrpc_core::Metadata, +impl http::MetaExtractor for MetaExtractor +where + T: HttpMetaExtractor, + M: jsonrpc_core::Metadata, { - fn read_metadata(&self, req: &hyper::Request) -> M { - let as_string = |header: Option<&hyper::header::HeaderValue>| { - header.and_then(|val| val.to_str().ok().map(ToOwned::to_owned)) - }; + fn read_metadata(&self, req: &hyper::Request) -> M { + let as_string = |header: Option<&hyper::header::HeaderValue>| { + header.and_then(|val| val.to_str().ok().map(ToOwned::to_owned)) + }; - let origin = as_string(req.headers().get("origin")); - let user_agent = as_string(req.headers().get("user-agent")); - self.extractor.read_metadata(origin, user_agent) - } + let origin = as_string(req.headers().get("origin")); + let user_agent = as_string(req.headers().get("user-agent")); + self.extractor.read_metadata(origin, user_agent) + } } diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 6c0ce2a01..29e57209e 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -61,11 +61,9 @@ extern crate jsonrpc_ipc_server as ipc; extern crate jsonrpc_pubsub; extern crate common_types as types; +extern crate eip_712; extern crate ethash; extern crate ethcore; -extern crate fastmap; -extern crate parity_bytes as bytes; -extern crate parity_crypto as crypto; extern crate ethcore_light as light; extern crate ethcore_logger; extern crate ethcore_miner as miner; @@ -75,12 +73,14 @@ extern crate ethcore_sync as sync; extern crate ethereum_types; extern crate ethkey; extern crate ethstore; +extern crate fastmap; extern crate fetch; extern crate keccak_hash as hash; +extern crate parity_bytes as bytes; +extern crate parity_crypto as crypto; extern crate parity_runtime; extern crate parity_updater as updater; extern crate parity_version as version; -extern crate eip_712; extern crate rlp; extern crate stats; extern crate tempdir; @@ -124,20 +124,24 @@ pub mod v1; pub mod tests; -pub use jsonrpc_core::{FutureOutput, FutureResult, FutureResponse, FutureRpcResult}; -pub use jsonrpc_pubsub::Session as PubSubSession; -pub use ipc::{Server as IpcServer, MetaExtractor as IpcMetaExtractor, RequestContext as IpcRequestContext}; pub use http::{ - hyper, - RequestMiddleware, RequestMiddlewareAction, - AccessControlAllowOrigin, Host, DomainsValidation, cors::AccessControlAllowHeaders + cors::AccessControlAllowHeaders, hyper, AccessControlAllowOrigin, DomainsValidation, Host, + RequestMiddleware, RequestMiddlewareAction, }; +pub use ipc::{ + MetaExtractor as IpcMetaExtractor, RequestContext as IpcRequestContext, Server as IpcServer, +}; +pub use jsonrpc_core::{FutureOutput, FutureResponse, FutureResult, FutureRpcResult}; +pub use jsonrpc_pubsub::Session as PubSubSession; -pub use v1::{NetworkSettings, Metadata, Origin, informant, dispatch, signer}; -pub use v1::block_import::{is_major_importing, is_major_importing_or_waiting}; -pub use v1::extractors::{RpcExtractor, WsExtractor, WsStats, WsDispatcher}; pub use authcodes::{AuthCodes, TimeProvider}; pub use http_common::HttpMetaExtractor; +pub use v1::{ + block_import::{is_major_importing, is_major_importing_or_waiting}, + dispatch, + extractors::{RpcExtractor, WsDispatcher, WsExtractor, WsStats}, + informant, signer, Metadata, NetworkSettings, Origin, +}; use std::net::SocketAddr; @@ -146,101 +150,100 @@ pub type HttpServer = http::Server; /// Start http server asynchronously and returns result with `Server` handle on success or an error. pub fn start_http( - addr: &SocketAddr, - cors_domains: http::DomainsValidation, - allowed_hosts: http::DomainsValidation, - handler: H, - extractor: T, - threads: usize, - max_payload: usize, - keep_alive: bool, -) -> ::std::io::Result where - M: jsonrpc_core::Metadata, - S: jsonrpc_core::Middleware, - H: Into>, - T: HttpMetaExtractor, + addr: &SocketAddr, + cors_domains: http::DomainsValidation, + allowed_hosts: http::DomainsValidation, + handler: H, + extractor: T, + threads: usize, + max_payload: usize, + keep_alive: bool, +) -> ::std::io::Result +where + M: jsonrpc_core::Metadata, + S: jsonrpc_core::Middleware, + H: Into>, + T: HttpMetaExtractor, { - let extractor = http_common::MetaExtractor::new(extractor); - Ok(http::ServerBuilder::with_meta_extractor(handler, extractor) - .keep_alive(keep_alive) - .threads(threads) - .cors(cors_domains) - .allowed_hosts(allowed_hosts) - .health_api(("/api/health", "parity_nodeStatus")) - .cors_allow_headers(AccessControlAllowHeaders::Any) - .max_request_body_size(max_payload * 1024 * 1024) - .start_http(addr)?) + let extractor = http_common::MetaExtractor::new(extractor); + Ok(http::ServerBuilder::with_meta_extractor(handler, extractor) + .keep_alive(keep_alive) + .threads(threads) + .cors(cors_domains) + .allowed_hosts(allowed_hosts) + .health_api(("/api/health", "parity_nodeStatus")) + .cors_allow_headers(AccessControlAllowHeaders::Any) + .max_request_body_size(max_payload * 1024 * 1024) + .start_http(addr)?) } /// Same as `start_http`, but takes an additional `middleware` parameter that is introduced as a /// hyper middleware. pub fn start_http_with_middleware( - addr: &SocketAddr, - cors_domains: http::DomainsValidation, - allowed_hosts: http::DomainsValidation, - handler: H, - extractor: T, - middleware: R, - threads: usize, - max_payload: usize, - keep_alive: bool, -) -> ::std::io::Result where - M: jsonrpc_core::Metadata, - S: jsonrpc_core::Middleware, - H: Into>, - T: HttpMetaExtractor, - R: RequestMiddleware, + addr: &SocketAddr, + cors_domains: http::DomainsValidation, + allowed_hosts: http::DomainsValidation, + handler: H, + extractor: T, + middleware: R, + threads: usize, + max_payload: usize, + keep_alive: bool, +) -> ::std::io::Result +where + M: jsonrpc_core::Metadata, + S: jsonrpc_core::Middleware, + H: Into>, + T: HttpMetaExtractor, + R: RequestMiddleware, { - let extractor = http_common::MetaExtractor::new(extractor); - Ok(http::ServerBuilder::with_meta_extractor(handler, extractor) - .keep_alive(keep_alive) - .threads(threads) - .cors(cors_domains) - .allowed_hosts(allowed_hosts) - .cors_allow_headers(AccessControlAllowHeaders::Any) - .max_request_body_size(max_payload * 1024 * 1024) - .request_middleware(middleware) - .start_http(addr)?) + let extractor = http_common::MetaExtractor::new(extractor); + Ok(http::ServerBuilder::with_meta_extractor(handler, extractor) + .keep_alive(keep_alive) + .threads(threads) + .cors(cors_domains) + .allowed_hosts(allowed_hosts) + .cors_allow_headers(AccessControlAllowHeaders::Any) + .max_request_body_size(max_payload * 1024 * 1024) + .request_middleware(middleware) + .start_http(addr)?) } /// Start ipc server asynchronously and returns result with `Server` handle on success or an error. -pub fn start_ipc( - addr: &str, - handler: H, - extractor: T, -) -> ::std::io::Result where - M: jsonrpc_core::Metadata, - S: jsonrpc_core::Middleware, - H: Into>, - T: IpcMetaExtractor, +pub fn start_ipc(addr: &str, handler: H, extractor: T) -> ::std::io::Result +where + M: jsonrpc_core::Metadata, + S: jsonrpc_core::Middleware, + H: Into>, + T: IpcMetaExtractor, { - ipc::ServerBuilder::with_meta_extractor(handler, extractor) - .start(addr) + ipc::ServerBuilder::with_meta_extractor(handler, extractor).start(addr) } /// Start WS server and return `Server` handle. pub fn start_ws( - addr: &SocketAddr, - handler: H, - allowed_origins: ws::DomainsValidation, - allowed_hosts: ws::DomainsValidation, - max_connections: usize, - extractor: T, - middleware: V, - stats: U, -) -> Result where - M: jsonrpc_core::Metadata, - S: jsonrpc_core::Middleware, - H: Into>, - T: ws::MetaExtractor, - U: ws::SessionStats, - V: ws::RequestMiddleware, + addr: &SocketAddr, + handler: H, + allowed_origins: ws::DomainsValidation, + allowed_hosts: ws::DomainsValidation, + max_connections: usize, + extractor: T, + middleware: V, + stats: U, +) -> Result +where + M: jsonrpc_core::Metadata, + S: jsonrpc_core::Middleware, + H: Into>, + T: ws::MetaExtractor, + U: ws::SessionStats, + V: ws::RequestMiddleware, { - ws::ServerBuilder::with_meta_extractor(handler, extractor) - .request_middleware(middleware) - .allowed_origins(allowed_origins) - .allowed_hosts(allowed_hosts) - .max_connections(max_connections) - .session_stats(stats) - .start(addr) + ws::ServerBuilder::with_meta_extractor(handler, extractor) + .request_middleware(middleware) + .allowed_origins(allowed_origins) + .allowed_hosts(allowed_hosts) + .max_connections(max_connections) + .session_stats(stats) + .start(addr) } diff --git a/rpc/src/tests/helpers.rs b/rpc/src/tests/helpers.rs index 301d77e91..ec7779de0 100644 --- a/rpc/src/tests/helpers.rs +++ b/rpc/src/tests/helpers.rs @@ -14,8 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::ops::{Deref, DerefMut}; -use std::path::PathBuf; +use std::{ + ops::{Deref, DerefMut}, + path::PathBuf, +}; use tempdir::TempDir; use parity_runtime::{Runtime, TaskExecutor}; @@ -24,64 +26,65 @@ use authcodes::AuthCodes; /// Server with event loop pub struct Server { - /// Server - pub server: T, - /// RPC Event Loop - pub event_loop: Runtime, + /// Server + pub server: T, + /// RPC Event Loop + pub event_loop: Runtime, } impl Server { - pub fn new(f: F) -> Server where - F: FnOnce(TaskExecutor) -> T, - { - let event_loop = Runtime::with_thread_count(1); - let remote = event_loop.raw_executor(); + pub fn new(f: F) -> Server + where + F: FnOnce(TaskExecutor) -> T, + { + let event_loop = Runtime::with_thread_count(1); + let remote = event_loop.raw_executor(); - Server { - server: f(remote), - event_loop, - } - } + Server { + server: f(remote), + event_loop, + } + } } impl Deref for Server { - type Target = T; + type Target = T; - fn deref(&self) -> &Self::Target { - &self.server - } + fn deref(&self) -> &Self::Target { + &self.server + } } /// Struct representing authcodes pub struct GuardedAuthCodes { - authcodes: AuthCodes, - _tempdir: TempDir, - /// The path to the mock authcodes - pub path: PathBuf, + authcodes: AuthCodes, + _tempdir: TempDir, + /// The path to the mock authcodes + pub path: PathBuf, } impl Default for GuardedAuthCodes { - fn default() -> Self { - let tempdir = TempDir::new("").unwrap(); - let path = tempdir.path().join("file"); + fn default() -> Self { + let tempdir = TempDir::new("").unwrap(); + let path = tempdir.path().join("file"); - GuardedAuthCodes { - authcodes: AuthCodes::from_file(&path).unwrap(), - _tempdir: tempdir, - path, - } - } + GuardedAuthCodes { + authcodes: AuthCodes::from_file(&path).unwrap(), + _tempdir: tempdir, + path, + } + } } impl Deref for GuardedAuthCodes { - type Target = AuthCodes; - fn deref(&self) -> &Self::Target { - &self.authcodes - } + type Target = AuthCodes; + fn deref(&self) -> &Self::Target { + &self.authcodes + } } impl DerefMut for GuardedAuthCodes { - fn deref_mut(&mut self) -> &mut AuthCodes { - &mut self.authcodes - } + fn deref_mut(&mut self) -> &mut AuthCodes { + &mut self.authcodes + } } diff --git a/rpc/src/tests/http_client.rs b/rpc/src/tests/http_client.rs index 0588c791e..c23056bd5 100644 --- a/rpc/src/tests/http_client.rs +++ b/rpc/src/tests/http_client.rs @@ -14,119 +14,134 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::thread; -use std::time::Duration; -use std::io::{self, Read, Write}; -use std::str::{self, Lines}; -use std::net::{TcpStream, SocketAddr}; +use std::{ + io::{self, Read, Write}, + net::{SocketAddr, TcpStream}, + str::{self, Lines}, + thread, + time::Duration, +}; pub struct Response { - pub status: String, - pub headers: Vec, - pub headers_raw: String, - pub body: String, + pub status: String, + pub headers: Vec, + pub headers_raw: String, + pub body: String, } impl Response { - pub fn assert_header(&self, header: &str, value: &str) { - let header = format!("{}: {}", header, value); - assert!(self.headers.iter().any(|h| h == &header), "Couldn't find header {} in {:?}", header, &self.headers) - } + pub fn assert_header(&self, header: &str, value: &str) { + let header = format!("{}: {}", header, value); + assert!( + self.headers.iter().any(|h| h == &header), + "Couldn't find header {} in {:?}", + header, + &self.headers + ) + } - pub fn assert_status(&self, status: &str) { - assert_eq!(self.status, status.to_owned(), "Got unexpected code. Body: {:?}", self.body); - } + pub fn assert_status(&self, status: &str) { + assert_eq!( + self.status, + status.to_owned(), + "Got unexpected code. Body: {:?}", + self.body + ); + } - pub fn assert_security_headers_present(&self, port: Option) { - assert_security_headers_present(&self.headers, port) - } + pub fn assert_security_headers_present(&self, port: Option) { + assert_security_headers_present(&self.headers, port) + } } pub fn read_block(lines: &mut Lines, all: bool) -> String { - let mut block = String::new(); - loop { - let line = lines.next(); - match line { - None => break, - Some("") if !all => break, - Some(v) => { - block.push_str(v); - block.push_str("\n"); - }, - } - } - block + let mut block = String::new(); + loop { + let line = lines.next(); + match line { + None => break, + Some("") if !all => break, + Some(v) => { + block.push_str(v); + block.push_str("\n"); + } + } + } + block } fn connect(address: &SocketAddr) -> TcpStream { - let mut retries = 0; - let mut last_error = None; - while retries < 10 { - retries += 1; + let mut retries = 0; + let mut last_error = None; + while retries < 10 { + retries += 1; - let res = TcpStream::connect(address); - match res { - Ok(stream) => { - return stream; - }, - Err(e) => { - last_error = Some(e); - thread::sleep(Duration::from_millis(retries * 10)); - } - } - } - panic!("Unable to connect to the server. Last error: {:?}", last_error); + let res = TcpStream::connect(address); + match res { + Ok(stream) => { + return stream; + } + Err(e) => { + last_error = Some(e); + thread::sleep(Duration::from_millis(retries * 10)); + } + } + } + panic!( + "Unable to connect to the server. Last error: {:?}", + last_error + ); } pub fn request(address: &SocketAddr, request: &str) -> Response { - let mut req = connect(address); - req.set_read_timeout(Some(Duration::from_secs(2))).unwrap(); - req.write_all(request.as_bytes()).unwrap(); + let mut req = connect(address); + req.set_read_timeout(Some(Duration::from_secs(2))).unwrap(); + req.write_all(request.as_bytes()).unwrap(); - let mut response = Vec::new(); - loop { - let mut chunk = [0; 32 *1024]; - match req.read(&mut chunk) { - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => break, - Err(err) => panic!("Unable to read response: {:?}", err), - Ok(0) => break, - Ok(read) => response.extend_from_slice(&chunk[..read]), - } - } + let mut response = Vec::new(); + loop { + let mut chunk = [0; 32 * 1024]; + match req.read(&mut chunk) { + Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => break, + Err(err) => panic!("Unable to read response: {:?}", err), + Ok(0) => break, + Ok(read) => response.extend_from_slice(&chunk[..read]), + } + } - let response = String::from_utf8_lossy(&response).into_owned(); - let mut lines = response.lines(); - let status = lines.next().expect("Expected a response").to_owned(); - let headers_raw = read_block(&mut lines, false); - let headers = headers_raw.split('\n').map(ToOwned::to_owned).collect(); - let body = read_block(&mut lines, true); + let response = String::from_utf8_lossy(&response).into_owned(); + let mut lines = response.lines(); + let status = lines.next().expect("Expected a response").to_owned(); + let headers_raw = read_block(&mut lines, false); + let headers = headers_raw.split('\n').map(ToOwned::to_owned).collect(); + let body = read_block(&mut lines, true); - Response { - status, - headers, - headers_raw, - body, - } + Response { + status, + headers, + headers_raw, + body, + } } /// Check if all required security headers are present pub fn assert_security_headers_present(headers: &[String], port: Option) { - if port.is_none() { - assert!( - headers.iter().any(|header| header.as_str() == "X-Frame-Options: SAMEORIGIN") - "X-Frame-Options: SAMEORIGIN missing: {:?}", headers - ); - } - assert!( - headers.iter().any(|header| header.as_str() == "X-XSS-Protection: 1; mode=block") - "X-XSS-Protection missing: {:?}", headers - ); - assert!( - headers.iter().any(|header| header.as_str() == "X-Content-Type-Options: nosniff") - "X-Content-Type-Options missing: {:?}", headers - ); - assert!( - headers.iter().any(|header| header.starts_with("Content-Security-Policy: ")) - "Content-Security-Policy missing: {:?}", headers - ) + if port.is_none() { + assert!( + headers.iter().any(|header| header.as_str() == "X-Frame-Options: SAMEORIGIN") + "X-Frame-Options: SAMEORIGIN missing: {:?}", headers + ); + } + assert!( + headers.iter().any(|header| header.as_str() == "X-XSS-Protection: 1; mode=block") + "X-XSS-Protection missing: {:?}", headers + ); + assert!( + headers.iter().any(|header| header.as_str() == "X-Content-Type-Options: nosniff") + "X-Content-Type-Options missing: {:?}", headers + ); + assert!( + headers.iter().any(|header| header.starts_with("Content-Security-Policy: ")) + "Content-Security-Policy missing: {:?}", headers + ) } diff --git a/rpc/src/tests/mod.rs b/rpc/src/tests/mod.rs index a73d69096..478a7eb20 100644 --- a/rpc/src/tests/mod.rs +++ b/rpc/src/tests/mod.rs @@ -18,5 +18,6 @@ mod helpers; mod http_client; -#[cfg(test)] mod rpc; +#[cfg(test)] +mod rpc; pub mod ws; diff --git a/rpc/src/tests/rpc.rs b/rpc/src/tests/rpc.rs index 99498c3e5..921dad0f2 100644 --- a/rpc/src/tests/rpc.rs +++ b/rpc/src/tests/rpc.rs @@ -14,68 +14,70 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use jsonrpc_core::MetaIoHandler; use http::{self, hyper}; +use jsonrpc_core::MetaIoHandler; -use {HttpServer}; -use tests::helpers::Server; -use tests::http_client; +use tests::{helpers::Server, http_client}; use v1::{extractors, Metadata}; +use HttpServer; fn serve(handler: Option>) -> Server { - let address = "127.0.0.1:0".parse().unwrap(); - let handler = handler.unwrap_or_default(); + let address = "127.0.0.1:0".parse().unwrap(); + let handler = handler.unwrap_or_default(); - Server::new(|_remote| ::start_http_with_middleware( - &address, - http::DomainsValidation::Disabled, - http::DomainsValidation::Disabled, - handler, - extractors::RpcExtractor, - |request: hyper::Request| { - http::RequestMiddlewareAction::Proceed { - should_continue_on_invalid_cors: false, - request, - } - }, - 1, - 5, - false, - ).unwrap()) + Server::new(|_remote| { + ::start_http_with_middleware( + &address, + http::DomainsValidation::Disabled, + http::DomainsValidation::Disabled, + handler, + extractors::RpcExtractor, + |request: hyper::Request| http::RequestMiddlewareAction::Proceed { + should_continue_on_invalid_cors: false, + request, + }, + 1, + 5, + false, + ) + .unwrap() + }) } /// Test a single request to running server fn request(server: Server, request: &str) -> http_client::Response { - http_client::request(server.server.address(), request) + http_client::request(server.server.address(), request) } #[cfg(test)] mod tests { - use jsonrpc_core::{MetaIoHandler, Value}; - use v1::Metadata; - use super::{request, Server}; + use super::{request, Server}; + use jsonrpc_core::{MetaIoHandler, Value}; + use v1::Metadata; - fn serve() -> (Server<::HttpServer>, ::std::net::SocketAddr) { - let mut io = MetaIoHandler::default(); - io.add_method_with_meta("hello", |_, meta: Metadata| { - Ok(Value::String(format!("{}", meta.origin))) - }); - let server = super::serve(Some(io)); - let address = server.server.address().to_owned(); + fn serve() -> (Server<::HttpServer>, ::std::net::SocketAddr) { + let mut io = MetaIoHandler::default(); + io.add_method_with_meta("hello", |_, meta: Metadata| { + Ok(Value::String(format!("{}", meta.origin))) + }); + let server = super::serve(Some(io)); + let address = server.server.address().to_owned(); - (server, address) - } + (server, address) + } - #[test] - fn should_extract_rpc_origin() { - // given - let (server, address) = serve(); + #[test] + fn should_extract_rpc_origin() { + // given + let (server, address) = serve(); - // when - let req = r#"{"method":"hello","params":[],"jsonrpc":"2.0","id":1}"#; - let expected = "{\"jsonrpc\":\"2.0\",\"result\":\"unknown origin / unknown agent via RPC\",\"id\":1}\n"; - let res = request(server, - &format!("\ + // when + let req = r#"{"method":"hello","params":[],"jsonrpc":"2.0","id":1}"#; + let expected = "{\"jsonrpc\":\"2.0\",\"result\":\"unknown origin / unknown agent via RPC\",\"id\":1}\n"; + let res = request( + server, + &format!( + "\ POST / HTTP/1.1\r\n\ Host: {}\r\n\ Content-Type: application/json\r\n\ @@ -83,24 +85,31 @@ mod tests { Connection: close\r\n\ \r\n\ {} - ", address, req.len(), req) - ); + ", + address, + req.len(), + req + ), + ); - // then - res.assert_status("HTTP/1.1 200 OK"); - assert_eq!(res.body, expected); - } + // then + res.assert_status("HTTP/1.1 200 OK"); + assert_eq!(res.body, expected); + } - #[test] - fn should_extract_rpc_origin_with_service() { - // given - let (server, address) = serve(); + #[test] + fn should_extract_rpc_origin_with_service() { + // given + let (server, address) = serve(); - // when - let req = r#"{"method":"hello","params":[],"jsonrpc":"2.0","id":1}"#; - let expected = "{\"jsonrpc\":\"2.0\",\"result\":\"unknown origin / curl/7.16.3 via RPC\",\"id\":1}\n"; - let res = request(server, - &format!("\ + // when + let req = r#"{"method":"hello","params":[],"jsonrpc":"2.0","id":1}"#; + let expected = + "{\"jsonrpc\":\"2.0\",\"result\":\"unknown origin / curl/7.16.3 via RPC\",\"id\":1}\n"; + let res = request( + server, + &format!( + "\ POST / HTTP/1.1\r\n\ Host: {}\r\n\ Content-Type: application/json\r\n\ @@ -109,23 +118,29 @@ mod tests { User-Agent: curl/7.16.3\r\n\ \r\n\ {} - ", address, req.len(), req) - ); + ", + address, + req.len(), + req + ), + ); - // then - res.assert_status("HTTP/1.1 200 OK"); - assert_eq!(res.body, expected); - } + // then + res.assert_status("HTTP/1.1 200 OK"); + assert_eq!(res.body, expected); + } - #[test] - fn should_respond_valid_to_any_requested_header() { - // given - let (server, address) = serve(); - let headers = "Something, Anything, Xyz, 123, _?"; + #[test] + fn should_respond_valid_to_any_requested_header() { + // given + let (server, address) = serve(); + let headers = "Something, Anything, Xyz, 123, _?"; - // when - let res = request(server, - &format!("\ + // when + let res = request( + server, + &format!( + "\ OPTIONS / HTTP/1.1\r\n\ Host: {}\r\n\ Origin: http://parity.io\r\n\ @@ -134,13 +149,18 @@ mod tests { Connection: close\r\n\ Access-Control-Request-Headers: {}\r\n\ \r\n\ - ", address, headers) - ); - - // then - assert_eq!(res.status, "HTTP/1.1 200 OK".to_owned()); - let expected = format!("access-control-allow-headers: {}", headers); - assert!(res.headers.contains(&expected), "Headers missing in {:?}", res.headers); - } + ", + address, headers + ), + ); + // then + assert_eq!(res.status, "HTTP/1.1 200 OK".to_owned()); + let expected = format!("access-control-allow-headers: {}", headers); + assert!( + res.headers.contains(&expected), + "Headers missing in {:?}", + res.headers + ); + } } diff --git a/rpc/src/tests/ws.rs b/rpc/src/tests/ws.rs index 3b6078882..952048ecb 100644 --- a/rpc/src/tests/ws.rs +++ b/rpc/src/tests/ws.rs @@ -21,71 +21,82 @@ use std::sync::Arc; use jsonrpc_core::MetaIoHandler; use ws; +use tests::{ + helpers::{GuardedAuthCodes, Server}, + http_client, +}; use v1::{extractors, informant}; -use tests::helpers::{GuardedAuthCodes, Server}; -use tests::http_client; /// Setup a mock signer for tests pub fn serve() -> (Server, usize, GuardedAuthCodes) { - let address = "127.0.0.1:0".parse().unwrap(); - let io = MetaIoHandler::default(); - let authcodes = GuardedAuthCodes::default(); - let stats = Arc::new(informant::RpcStats::default()); + let address = "127.0.0.1:0".parse().unwrap(); + let io = MetaIoHandler::default(); + let authcodes = GuardedAuthCodes::default(); + let stats = Arc::new(informant::RpcStats::default()); - let res = Server::new(|_| ::start_ws( - &address, - io, - ws::DomainsValidation::Disabled, - ws::DomainsValidation::Disabled, - 5, - extractors::WsExtractor::new(Some(&authcodes.path)), - extractors::WsExtractor::new(Some(&authcodes.path)), - extractors::WsStats::new(stats), - ).unwrap()); - let port = res.addr().port() as usize; + let res = Server::new(|_| { + ::start_ws( + &address, + io, + ws::DomainsValidation::Disabled, + ws::DomainsValidation::Disabled, + 5, + extractors::WsExtractor::new(Some(&authcodes.path)), + extractors::WsExtractor::new(Some(&authcodes.path)), + extractors::WsStats::new(stats), + ) + .unwrap() + }); + let port = res.addr().port() as usize; - (res, port, authcodes) + (res, port, authcodes) } /// Test a single request to running server pub fn request(server: Server, request: &str) -> http_client::Response { - http_client::request(server.server.addr(), request) + http_client::request(server.server.addr(), request) } #[cfg(test)] mod testing { - use std::time; - use hash::keccak; - use super::{serve, request, http_client}; + use super::{http_client, request, serve}; + use hash::keccak; + use std::time; - #[test] - fn should_not_redirect_to_parity_host() { - // given - let (server, port, _) = serve(); + #[test] + fn should_not_redirect_to_parity_host() { + // given + let (server, port, _) = serve(); - // when - let response = request(server, - &format!("\ + // when + let response = request( + server, + &format!( + "\ GET / HTTP/1.1\r\n\ Host: 127.0.0.1:{}\r\n\ Connection: close\r\n\ \r\n\ {{}} - ", port) - ); + ", + port + ), + ); - // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - } + // then + assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); + } - #[test] - fn should_block_if_authorization_is_incorrect() { - // given - let (server, port, _) = serve(); + #[test] + fn should_block_if_authorization_is_incorrect() { + // given + let (server, port, _) = serve(); - // when - let response = request(server, - &format!("\ + // when + let response = request( + server, + &format!( + "\ GET / HTTP/1.1\r\n\ Host: 127.0.0.1:{}\r\n\ Connection: Upgrade\r\n\ @@ -94,26 +105,30 @@ mod testing { Sec-WebSocket-Version: 13\r\n\ \r\n\ {{}} - ", port) - ); + ", + port + ), + ); - // then - assert_eq!(response.status, "HTTP/1.1 403 Forbidden".to_owned()); - http_client::assert_security_headers_present(&response.headers, None); - } + // then + assert_eq!(response.status, "HTTP/1.1 403 Forbidden".to_owned()); + http_client::assert_security_headers_present(&response.headers, None); + } - #[cfg(not(target_os = "windows"))] - #[test] - fn should_allow_if_authorization_is_correct() { - // given - let (server, port, mut authcodes) = serve(); - let code = authcodes.generate_new().unwrap().replace("-", ""); - authcodes.to_file(&authcodes.path).unwrap(); - let timestamp = time::UNIX_EPOCH.elapsed().unwrap().as_secs(); + #[cfg(not(target_os = "windows"))] + #[test] + fn should_allow_if_authorization_is_correct() { + // given + let (server, port, mut authcodes) = serve(); + let code = authcodes.generate_new().unwrap().replace("-", ""); + authcodes.to_file(&authcodes.path).unwrap(); + let timestamp = time::UNIX_EPOCH.elapsed().unwrap().as_secs(); - // when - let response = request(server, - &format!("\ + // when + let response = request( + server, + &format!( + "\ GET / HTTP/1.1\r\n\ Host: 127.0.0.1:{}\r\n\ Connection: Close\r\n\ @@ -123,27 +138,32 @@ mod testing { \r\n\ {{}} ", - port, - keccak(format!("{}:{}", code, timestamp)), - timestamp, - ) - ); + port, + keccak(format!("{}:{}", code, timestamp)), + timestamp, + ), + ); - // then - assert_eq!(response.status, "HTTP/1.1 101 Switching Protocols".to_owned()); - } + // then + assert_eq!( + response.status, + "HTTP/1.1 101 Switching Protocols".to_owned() + ); + } - #[test] - fn should_not_allow_initial_connection_even_once() { - // given - let (server, port, authcodes) = serve(); - let code = "initial"; - let timestamp = time::UNIX_EPOCH.elapsed().unwrap().as_secs(); - assert!(authcodes.is_empty()); + #[test] + fn should_not_allow_initial_connection_even_once() { + // given + let (server, port, authcodes) = serve(); + let code = "initial"; + let timestamp = time::UNIX_EPOCH.elapsed().unwrap().as_secs(); + assert!(authcodes.is_empty()); - // when - let response1 = http_client::request(server.addr(), - &format!("\ + // when + let response1 = http_client::request( + server.addr(), + &format!( + "\ GET / HTTP/1.1\r\n\ Host: 127.0.0.1:{}\r\n\ Connection: Close\r\n\ @@ -153,14 +173,14 @@ mod testing { \r\n\ {{}} ", - port, - keccak(format!("{}:{}", code, timestamp)), - timestamp, - ) - ); + port, + keccak(format!("{}:{}", code, timestamp)), + timestamp, + ), + ); - // then - assert_eq!(response1.status, "HTTP/1.1 403 Forbidden".to_owned()); - http_client::assert_security_headers_present(&response1.headers, None); - } + // then + assert_eq!(response1.status, "HTTP/1.1 403 Forbidden".to_owned()); + http_client::assert_security_headers_present(&response1.headers, None); + } } diff --git a/rpc/src/v1/extractors.rs b/rpc/src/v1/extractors.rs index d3384c2c1..c549b6cd3 100644 --- a/rpc/src/v1/extractors.rs +++ b/rpc/src/v1/extractors.rs @@ -16,249 +16,267 @@ //! Parity-specific metadata extractors. -use std::path::{Path, PathBuf}; -use std::sync::Arc; +use std::{ + path::{Path, PathBuf}, + sync::Arc, +}; use authcodes; +use ethereum_types::H256; use http_common::HttpMetaExtractor; use ipc; use jsonrpc_core as core; use jsonrpc_core::futures::future::Either; use jsonrpc_pubsub::Session; use ws; -use ethereum_types::H256; -use v1::{Metadata, Origin}; -use v1::informant::RpcStats; +use v1::{informant::RpcStats, Metadata, Origin}; /// Common HTTP & IPC metadata extractor. pub struct RpcExtractor; impl HttpMetaExtractor for RpcExtractor { - type Metadata = Metadata; + type Metadata = Metadata; - fn read_metadata(&self, origin: Option, user_agent: Option) -> Metadata { - Metadata { - origin: Origin::Rpc( - format!("{} / {}", - origin.unwrap_or_else(|| "unknown origin".to_string()), - user_agent.unwrap_or_else(|| "unknown agent".to_string())) - ), - session: None, - } - } + fn read_metadata(&self, origin: Option, user_agent: Option) -> Metadata { + Metadata { + origin: Origin::Rpc(format!( + "{} / {}", + origin.unwrap_or_else(|| "unknown origin".to_string()), + user_agent.unwrap_or_else(|| "unknown agent".to_string()) + )), + session: None, + } + } } impl ipc::MetaExtractor for RpcExtractor { - fn extract(&self, req: &ipc::RequestContext) -> Metadata { - Metadata { - origin: Origin::Ipc(req.session_id.into()), - session: Some(Arc::new(Session::new(req.sender.clone()))), - } - } + fn extract(&self, req: &ipc::RequestContext) -> Metadata { + Metadata { + origin: Origin::Ipc(req.session_id.into()), + session: Some(Arc::new(Session::new(req.sender.clone()))), + } + } } /// WebSockets server metadata extractor and request middleware. pub struct WsExtractor { - authcodes_path: Option, + authcodes_path: Option, } impl WsExtractor { - /// Creates new `WsExtractor` with given authcodes path. - pub fn new(path: Option<&Path>) -> Self { - WsExtractor { - authcodes_path: path.map(ToOwned::to_owned), - } - } + /// Creates new `WsExtractor` with given authcodes path. + pub fn new(path: Option<&Path>) -> Self { + WsExtractor { + authcodes_path: path.map(ToOwned::to_owned), + } + } } impl ws::MetaExtractor for WsExtractor { - fn extract(&self, req: &ws::RequestContext) -> Metadata { - let id = req.session_id as u64; + fn extract(&self, req: &ws::RequestContext) -> Metadata { + let id = req.session_id as u64; - let origin = match self.authcodes_path { - Some(ref path) => { - let authorization = req.protocols.get(0).and_then(|p| auth_token_hash(&path, p, true)); - match authorization { - Some(id) => Origin::Signer { session: id }, - None => Origin::Ws { session: id.into() }, - } - }, - None => Origin::Ws { session: id.into() }, - }; - let session = Some(Arc::new(Session::new(req.sender()))); - Metadata { - origin, - session, - } - } + let origin = match self.authcodes_path { + Some(ref path) => { + let authorization = req + .protocols + .get(0) + .and_then(|p| auth_token_hash(&path, p, true)); + match authorization { + Some(id) => Origin::Signer { session: id }, + None => Origin::Ws { session: id.into() }, + } + } + None => Origin::Ws { session: id.into() }, + }; + let session = Some(Arc::new(Session::new(req.sender()))); + Metadata { origin, session } + } } impl ws::RequestMiddleware for WsExtractor { - fn process(&self, req: &ws::ws::Request) -> ws::MiddlewareAction { - use self::ws::ws::Response; + fn process(&self, req: &ws::ws::Request) -> ws::MiddlewareAction { + use self::ws::ws::Response; - // Reply with 200 OK to HEAD requests. - if req.method() == "HEAD" { - let mut response = Response::new(200, "OK", vec![]); - add_security_headers(&mut response); - return Some(response).into(); - } + // Reply with 200 OK to HEAD requests. + if req.method() == "HEAD" { + let mut response = Response::new(200, "OK", vec![]); + add_security_headers(&mut response); + return Some(response).into(); + } - // Display WS info. - if req.header("sec-websocket-key").is_none() { - let mut response = Response::new(200, "OK", b"WebSocket interface is active. Open WS connection to access RPC.".to_vec()); - add_security_headers(&mut response); - return Some(response).into(); - } + // Display WS info. + if req.header("sec-websocket-key").is_none() { + let mut response = Response::new( + 200, + "OK", + b"WebSocket interface is active. Open WS connection to access RPC.".to_vec(), + ); + add_security_headers(&mut response); + return Some(response).into(); + } - // If protocol is provided it needs to be valid. - let protocols = req.protocols().ok().unwrap_or_else(Vec::new); - if let Some(ref path) = self.authcodes_path { - if protocols.len() == 1 { - let authorization = auth_token_hash(&path, protocols[0], false); - if authorization.is_none() { - warn!( - "Blocked connection from {} using invalid token.", - req.header("origin").and_then(|e| ::std::str::from_utf8(e).ok()).unwrap_or("Unknown Origin") - ); - let mut response = Response::new(403, "Forbidden", vec![]); - add_security_headers(&mut response); - return Some(response).into(); - } - } - } + // If protocol is provided it needs to be valid. + let protocols = req.protocols().ok().unwrap_or_else(Vec::new); + if let Some(ref path) = self.authcodes_path { + if protocols.len() == 1 { + let authorization = auth_token_hash(&path, protocols[0], false); + if authorization.is_none() { + warn!( + "Blocked connection from {} using invalid token.", + req.header("origin") + .and_then(|e| ::std::str::from_utf8(e).ok()) + .unwrap_or("Unknown Origin") + ); + let mut response = Response::new(403, "Forbidden", vec![]); + add_security_headers(&mut response); + return Some(response).into(); + } + } + } - // Otherwise just proceed. - ws::MiddlewareAction::Proceed - } + // Otherwise just proceed. + ws::MiddlewareAction::Proceed + } } fn add_security_headers(res: &mut ws::ws::Response) { - let headers = res.headers_mut(); - headers.push(("X-Frame-Options".into(), b"SAMEORIGIN".to_vec())); - headers.push(("X-XSS-Protection".into(), b"1; mode=block".to_vec())); - headers.push(("X-Content-Type-Options".into(), b"nosniff".to_vec())); - headers.push(("Content-Security-Policy".into(), - b"default-src 'self';form-action 'none';block-all-mixed-content;sandbox allow-scripts;".to_vec() - )); + let headers = res.headers_mut(); + headers.push(("X-Frame-Options".into(), b"SAMEORIGIN".to_vec())); + headers.push(("X-XSS-Protection".into(), b"1; mode=block".to_vec())); + headers.push(("X-Content-Type-Options".into(), b"nosniff".to_vec())); + headers.push(( + "Content-Security-Policy".into(), + b"default-src 'self';form-action 'none';block-all-mixed-content;sandbox allow-scripts;" + .to_vec(), + )); } fn auth_token_hash(codes_path: &Path, protocol: &str, save_file: bool) -> Option { - let mut split = protocol.split('_'); - let auth = split.next().and_then(|v| v.parse().ok()); - let time = split.next().and_then(|v| u64::from_str_radix(v, 10).ok()); + let mut split = protocol.split('_'); + let auth = split.next().and_then(|v| v.parse().ok()); + let time = split.next().and_then(|v| u64::from_str_radix(v, 10).ok()); - if let (Some(auth), Some(time)) = (auth, time) { - // Check if the code is valid - return authcodes::AuthCodes::from_file(codes_path) - .ok() - .and_then(|mut codes| { - // remove old tokens - codes.clear_garbage(); + if let (Some(auth), Some(time)) = (auth, time) { + // Check if the code is valid + return authcodes::AuthCodes::from_file(codes_path) + .ok() + .and_then(|mut codes| { + // remove old tokens + codes.clear_garbage(); - let res = codes.is_valid(&auth, time); + let res = codes.is_valid(&auth, time); - if save_file { - // make sure to save back authcodes - it might have been modified - if codes.to_file(codes_path).is_err() { - warn!(target: "signer", "Couldn't save authorization codes to file."); - } - } + if save_file { + // make sure to save back authcodes - it might have been modified + if codes.to_file(codes_path).is_err() { + warn!(target: "signer", "Couldn't save authorization codes to file."); + } + } - if res { - Some(auth) - } else { - None - } - }) - } + if res { + Some(auth) + } else { + None + } + }); + } - None + None } /// WebSockets RPC usage statistics. pub struct WsStats { - stats: Arc, + stats: Arc, } impl WsStats { - /// Creates new WS usage tracker. - pub fn new(stats: Arc) -> Self { - WsStats { - stats, - } - } + /// Creates new WS usage tracker. + pub fn new(stats: Arc) -> Self { + WsStats { stats } + } } impl ws::SessionStats for WsStats { - fn open_session(&self, _id: ws::SessionId) { - self.stats.open_session() - } + fn open_session(&self, _id: ws::SessionId) { + self.stats.open_session() + } - fn close_session(&self, _id: ws::SessionId) { - self.stats.close_session() - } + fn close_session(&self, _id: ws::SessionId) { + self.stats.close_session() + } } /// WebSockets middleware dispatching requests to different handles dependning on metadata. pub struct WsDispatcher> { - full_handler: core::MetaIoHandler, + full_handler: core::MetaIoHandler, } impl> WsDispatcher { - /// Create new `WsDispatcher` with given full handler. - pub fn new(full_handler: core::MetaIoHandler) -> Self { - WsDispatcher { - full_handler, - } - } + /// Create new `WsDispatcher` with given full handler. + pub fn new(full_handler: core::MetaIoHandler) -> Self { + WsDispatcher { full_handler } + } } impl> core::Middleware for WsDispatcher { - type Future = Either< - core::FutureRpcResult, - core::FutureResponse, - >; - type CallFuture = core::middleware::NoopCallFuture; + type Future = Either, core::FutureResponse>; + type CallFuture = core::middleware::NoopCallFuture; - fn on_request(&self, request: core::Request, meta: Metadata, process: F) - -> Either - where - F: FnOnce(core::Request, Metadata) -> X, - X: core::futures::Future, Error=()> + Send + 'static, - { - let use_full = match &meta.origin { - Origin::Signer { .. } => true, - _ => false, - }; + fn on_request( + &self, + request: core::Request, + meta: Metadata, + process: F, + ) -> Either + where + F: FnOnce(core::Request, Metadata) -> X, + X: core::futures::Future, Error = ()> + Send + 'static, + { + let use_full = match &meta.origin { + Origin::Signer { .. } => true, + _ => false, + }; - if use_full { - Either::A(Either::A(self.full_handler.handle_rpc_request(request, meta))) - } else { - Either::B(process(request, meta)) - } - } + if use_full { + Either::A(Either::A( + self.full_handler.handle_rpc_request(request, meta), + )) + } else { + Either::B(process(request, meta)) + } + } } #[cfg(test)] mod tests { - use super::RpcExtractor; - use {HttpMetaExtractor, Origin}; + use super::RpcExtractor; + use HttpMetaExtractor; + use Origin; - #[test] - fn should_extract_rpc_origin() { - // given - let extractor = RpcExtractor; + #[test] + fn should_extract_rpc_origin() { + // given + let extractor = RpcExtractor; - // when - let meta1 = extractor.read_metadata(None, None); - let meta2 = extractor.read_metadata(None, Some("http://parity.io".to_owned())); - let meta3 = extractor.read_metadata(None, Some("http://parity.io".to_owned())); + // when + let meta1 = extractor.read_metadata(None, None); + let meta2 = extractor.read_metadata(None, Some("http://parity.io".to_owned())); + let meta3 = extractor.read_metadata(None, Some("http://parity.io".to_owned())); - // then - assert_eq!(meta1.origin, Origin::Rpc("unknown origin / unknown agent".into())); - assert_eq!(meta2.origin, Origin::Rpc("unknown origin / http://parity.io".into())); - assert_eq!(meta3.origin, Origin::Rpc("unknown origin / http://parity.io".into())); - } + // then + assert_eq!( + meta1.origin, + Origin::Rpc("unknown origin / unknown agent".into()) + ); + assert_eq!( + meta2.origin, + Origin::Rpc("unknown origin / http://parity.io".into()) + ); + assert_eq!( + meta3.origin, + Origin::Rpc("unknown origin / http://parity.io".into()) + ); + } } diff --git a/rpc/src/v1/helpers/block_import.rs b/rpc/src/v1/helpers/block_import.rs index 3fd5d9fff..514db46a3 100644 --- a/rpc/src/v1/helpers/block_import.rs +++ b/rpc/src/v1/helpers/block_import.rs @@ -21,47 +21,54 @@ use sync::SyncState; /// Check if client is during major sync or during block import and allows defining whether 'waiting for peers' should /// be considered a syncing state. -pub fn is_major_importing_or_waiting(sync_state: Option, queue_info: BlockQueueInfo, waiting_is_syncing_state: bool) -> bool { - let is_syncing_state = sync_state.map_or(false, |s| match s { - SyncState::Idle | SyncState::NewBlocks => false, - SyncState::WaitingPeers if !waiting_is_syncing_state => false, - _ => true, - }); - let is_verifying = queue_info.unverified_queue_size + queue_info.verified_queue_size > 3; - is_verifying || is_syncing_state +pub fn is_major_importing_or_waiting( + sync_state: Option, + queue_info: BlockQueueInfo, + waiting_is_syncing_state: bool, +) -> bool { + let is_syncing_state = sync_state.map_or(false, |s| match s { + SyncState::Idle | SyncState::NewBlocks => false, + SyncState::WaitingPeers if !waiting_is_syncing_state => false, + _ => true, + }); + let is_verifying = queue_info.unverified_queue_size + queue_info.verified_queue_size > 3; + is_verifying || is_syncing_state } /// Check if client is during major sync or during block import. pub fn is_major_importing(sync_state: Option, queue_info: BlockQueueInfo) -> bool { - is_major_importing_or_waiting(sync_state, queue_info, true) + is_major_importing_or_waiting(sync_state, queue_info, true) } #[cfg(test)] mod tests { - use ethcore::client::BlockQueueInfo; - use sync::SyncState; - use super::is_major_importing; + use super::is_major_importing; + use ethcore::client::BlockQueueInfo; + use sync::SyncState; - fn queue_info(unverified: usize, verified: usize) -> BlockQueueInfo { - BlockQueueInfo { - unverified_queue_size: unverified, - verified_queue_size: verified, - verifying_queue_size: 0, - max_queue_size: 1000, - max_mem_use: 1000, - mem_used: 500 - } - } + fn queue_info(unverified: usize, verified: usize) -> BlockQueueInfo { + BlockQueueInfo { + unverified_queue_size: unverified, + verified_queue_size: verified, + verifying_queue_size: 0, + max_queue_size: 1000, + max_mem_use: 1000, + mem_used: 500, + } + } - #[test] - fn is_still_verifying() { - assert!(!is_major_importing(None, queue_info(2, 1))); - assert!(is_major_importing(None, queue_info(2, 2))); - } + #[test] + fn is_still_verifying() { + assert!(!is_major_importing(None, queue_info(2, 1))); + assert!(is_major_importing(None, queue_info(2, 2))); + } - #[test] - fn is_synced_state() { - assert!(is_major_importing(Some(SyncState::Blocks), queue_info(0, 0))); - assert!(!is_major_importing(Some(SyncState::Idle), queue_info(0, 0))); - } + #[test] + fn is_synced_state() { + assert!(is_major_importing( + Some(SyncState::Blocks), + queue_info(0, 0) + )); + assert!(!is_major_importing(Some(SyncState::Idle), queue_info(0, 0))); + } } diff --git a/rpc/src/v1/helpers/deprecated.rs b/rpc/src/v1/helpers/deprecated.rs index 49e9d8b07..3e9d25672 100644 --- a/rpc/src/v1/helpers/deprecated.rs +++ b/rpc/src/v1/helpers/deprecated.rs @@ -19,15 +19,16 @@ //! Displays a warning but avoids spamming the log. use std::{ - collections::HashMap, - time::{Duration, Instant}, + collections::HashMap, + time::{Duration, Instant}, }; use parking_lot::RwLock; /// Deprecation messages pub mod msgs { - pub const ACCOUNTS: Option<&str> = Some("Account management is being phased out see #9997 for alternatives."); + pub const ACCOUNTS: Option<&str> = + Some("Account management is being phased out see #9997 for alternatives."); } type MethodName = &'static str; @@ -36,76 +37,84 @@ const PRINT_INTERVAL: Duration = Duration::from_secs(60); /// Displays a deprecation notice without spamming the log. pub struct DeprecationNotice Instant> { - now: T, - next_warning_at: RwLock>, - printer: Box) + Send + Sync>, + now: T, + next_warning_at: RwLock>, + printer: Box) + Send + Sync>, } impl Default for DeprecationNotice { - fn default() -> Self { - Self::new(Instant::now, |method, more| { - let more = more.map(|x| format!(": {}", x)).unwrap_or_else(|| ".".into()); - warn!(target: "rpc", "{} is deprecated and will be removed in future versions{}", method, more); - }) - } + fn default() -> Self { + Self::new(Instant::now, |method, more| { + let more = more + .map(|x| format!(": {}", x)) + .unwrap_or_else(|| ".".into()); + warn!(target: "rpc", "{} is deprecated and will be removed in future versions{}", method, more); + }) + } } impl Instant> DeprecationNotice { - /// Create new deprecation notice printer with custom display and interval. - pub fn new(now: N, printer: T) -> Self where - T: Fn(MethodName, Option<&str>) + Send + Sync + 'static, - { - DeprecationNotice { - now, - next_warning_at: Default::default(), - printer: Box::new(printer), - } - } + /// Create new deprecation notice printer with custom display and interval. + pub fn new(now: N, printer: T) -> Self + where + T: Fn(MethodName, Option<&str>) + Send + Sync + 'static, + { + DeprecationNotice { + now, + next_warning_at: Default::default(), + printer: Box::new(printer), + } + } - /// Print deprecation notice for given method and with some additional details (explanations). - pub fn print<'a, T: Into>>(&self, method: MethodName, details: T) { - let now = (self.now)(); - match self.next_warning_at.read().get(method) { - Some(next) if *next > now => return, - _ => {}, - } + /// Print deprecation notice for given method and with some additional details (explanations). + pub fn print<'a, T: Into>>(&self, method: MethodName, details: T) { + let now = (self.now)(); + match self.next_warning_at.read().get(method) { + Some(next) if *next > now => return, + _ => {} + } - self.next_warning_at.write().insert(method.to_owned(), now + PRINT_INTERVAL); - (self.printer)(method, details.into()); - } + self.next_warning_at + .write() + .insert(method.to_owned(), now + PRINT_INTERVAL); + (self.printer)(method, details.into()); + } } #[cfg(test)] mod tests { - use super::*; + use super::*; - use std::sync::Arc; + use std::sync::Arc; - #[test] - fn should_throttle_printing() { - let saved = Arc::new(RwLock::new(None)); - let s = saved.clone(); - let printer = move |method: MethodName, more: Option<&str>| { - *s.write() = Some((method, more.map(|s| s.to_owned()))); - }; + #[test] + fn should_throttle_printing() { + let saved = Arc::new(RwLock::new(None)); + let s = saved.clone(); + let printer = move |method: MethodName, more: Option<&str>| { + *s.write() = Some((method, more.map(|s| s.to_owned()))); + }; - let now = Arc::new(RwLock::new(Instant::now())); - let n = now.clone(); - let get_now = || n.read().clone(); - let notice = DeprecationNotice::new(get_now, printer); + let now = Arc::new(RwLock::new(Instant::now())); + let n = now.clone(); + let get_now = || n.read().clone(); + let notice = DeprecationNotice::new(get_now, printer); - let details = Some("See issue #123456"); - notice.print("eth_test", details.clone()); - // printer shouldn't be called - notice.print("eth_test", None); - assert_eq!(saved.read().clone().unwrap(), ("eth_test", details.as_ref().map(|x| x.to_string()))); - // but calling a different method is fine - notice.print("eth_test2", None); - assert_eq!(saved.read().clone().unwrap(), ("eth_test2", None)); + let details = Some("See issue #123456"); + notice.print("eth_test", details.clone()); + // printer shouldn't be called + notice.print("eth_test", None); + assert_eq!( + saved.read().clone().unwrap(), + ("eth_test", details.as_ref().map(|x| x.to_string())) + ); + // but calling a different method is fine + notice.print("eth_test2", None); + assert_eq!(saved.read().clone().unwrap(), ("eth_test2", None)); - // wait and call again - *now.write() = Instant::now() + PRINT_INTERVAL; - notice.print("eth_test", None); - assert_eq!(saved.read().clone().unwrap(), ("eth_test", None)); - } + // wait and call again + *now.write() = Instant::now() + PRINT_INTERVAL; + notice.print("eth_test", None); + assert_eq!(saved.read().clone().unwrap(), ("eth_test", None)); + } } diff --git a/rpc/src/v1/helpers/dispatch/full.rs b/rpc/src/v1/helpers/dispatch/full.rs index d958416cb..166ea2a44 100644 --- a/rpc/src/v1/helpers/dispatch/full.rs +++ b/rpc/src/v1/helpers/dispatch/full.rs @@ -16,136 +16,165 @@ use std::sync::Arc; -use ethcore::client::BlockChainClient; -use ethcore::miner::{self, MinerService}; -use ethereum_types::{H256, U256, Address}; -use types::transaction::{SignedTransaction, PendingTransaction}; +use ethcore::{ + client::BlockChainClient, + miner::{self, MinerService}, +}; +use ethereum_types::{Address, H256, U256}; use parking_lot::Mutex; +use types::transaction::{PendingTransaction, SignedTransaction}; -use jsonrpc_core::{BoxFuture, Result}; -use jsonrpc_core::futures::{future, Future, IntoFuture}; -use v1::helpers::{errors, nonce, TransactionRequest, FilledTransactionRequest}; -use v1::types::{RichRawTransaction as RpcRichRawTransaction}; +use jsonrpc_core::{ + futures::{future, Future, IntoFuture}, + BoxFuture, Result, +}; +use v1::{ + helpers::{errors, nonce, FilledTransactionRequest, TransactionRequest}, + types::RichRawTransaction as RpcRichRawTransaction, +}; -use super::prospective_signer::ProspectiveSigner; -use super::{Dispatcher, Accounts, SignWith, PostSign, default_gas_price}; +use super::{ + default_gas_price, prospective_signer::ProspectiveSigner, Accounts, Dispatcher, PostSign, + SignWith, +}; /// A dispatcher which uses references to a client and miner in order to sign /// requests locally. #[derive(Debug)] pub struct FullDispatcher { - client: Arc, - miner: Arc, - nonces: Arc>, - gas_price_percentile: usize, + client: Arc, + miner: Arc, + nonces: Arc>, + gas_price_percentile: usize, } impl FullDispatcher { - /// Create a `FullDispatcher` from Arc references to a client and miner. - pub fn new( - client: Arc, - miner: Arc, - nonces: Arc>, - gas_price_percentile: usize, - ) -> Self { - FullDispatcher { - client, - miner, - nonces, - gas_price_percentile, - } - } + /// Create a `FullDispatcher` from Arc references to a client and miner. + pub fn new( + client: Arc, + miner: Arc, + nonces: Arc>, + gas_price_percentile: usize, + ) -> Self { + FullDispatcher { + client, + miner, + nonces, + gas_price_percentile, + } + } } impl Clone for FullDispatcher { - fn clone(&self) -> Self { - FullDispatcher { - client: self.client.clone(), - miner: self.miner.clone(), - nonces: self.nonces.clone(), - gas_price_percentile: self.gas_price_percentile, - } - } + fn clone(&self) -> Self { + FullDispatcher { + client: self.client.clone(), + miner: self.miner.clone(), + nonces: self.nonces.clone(), + gas_price_percentile: self.gas_price_percentile, + } + } } impl FullDispatcher { - fn state_nonce(&self, from: &Address) -> U256 { - self.miner.next_nonce(&*self.client, from) - } + fn state_nonce(&self, from: &Address) -> U256 { + self.miner.next_nonce(&*self.client, from) + } - /// Post transaction to the network. - /// - /// If transaction is trusted we are more likely to assume it is coming from a local account. - pub fn dispatch_transaction(client: &C, miner: &M, signed_transaction: PendingTransaction, trusted: bool) -> Result { - let hash = signed_transaction.transaction.hash(); + /// Post transaction to the network. + /// + /// If transaction is trusted we are more likely to assume it is coming from a local account. + pub fn dispatch_transaction( + client: &C, + miner: &M, + signed_transaction: PendingTransaction, + trusted: bool, + ) -> Result { + let hash = signed_transaction.transaction.hash(); - // use `import_claimed_local_transaction` so we can decide (based on config flags) if we want to treat - // it as local or not. Nodes with public RPC interfaces will want these transactions to be treated like - // external transactions. - miner.import_claimed_local_transaction(client, signed_transaction, trusted) - .map_err(errors::transaction) - .map(|_| hash) - } + // use `import_claimed_local_transaction` so we can decide (based on config flags) if we want to treat + // it as local or not. Nodes with public RPC interfaces will want these transactions to be treated like + // external transactions. + miner + .import_claimed_local_transaction(client, signed_transaction, trusted) + .map_err(errors::transaction) + .map(|_| hash) + } } -impl Dispatcher for FullDispatcher { - fn fill_optional_fields(&self, request: TransactionRequest, default_sender: Address, force_nonce: bool) - -> BoxFuture - { - let request = request; - let from = request.from.unwrap_or(default_sender); - let nonce = if force_nonce { - request.nonce.or_else(|| Some(self.state_nonce(&from))) - } else { - request.nonce - }; +impl Dispatcher + for FullDispatcher +{ + fn fill_optional_fields( + &self, + request: TransactionRequest, + default_sender: Address, + force_nonce: bool, + ) -> BoxFuture { + let request = request; + let from = request.from.unwrap_or(default_sender); + let nonce = if force_nonce { + request.nonce.or_else(|| Some(self.state_nonce(&from))) + } else { + request.nonce + }; - Box::new(future::ok(FilledTransactionRequest { - from, - used_default_from: request.from.is_none(), - to: request.to, - nonce, - gas_price: request.gas_price.unwrap_or_else(|| { - default_gas_price(&*self.client, &*self.miner, self.gas_price_percentile) - }), - gas: request.gas.unwrap_or_else(|| self.miner.sensible_gas_limit()), - value: request.value.unwrap_or_else(|| 0.into()), - data: request.data.unwrap_or_else(Vec::new), - condition: request.condition, - })) - } + Box::new(future::ok(FilledTransactionRequest { + from, + used_default_from: request.from.is_none(), + to: request.to, + nonce, + gas_price: request.gas_price.unwrap_or_else(|| { + default_gas_price(&*self.client, &*self.miner, self.gas_price_percentile) + }), + gas: request + .gas + .unwrap_or_else(|| self.miner.sensible_gas_limit()), + value: request.value.unwrap_or_else(|| 0.into()), + data: request.data.unwrap_or_else(Vec::new), + condition: request.condition, + })) + } - fn sign

( - &self, - filled: FilledTransactionRequest, - signer: &Arc, - password: SignWith, - post_sign: P, - ) -> BoxFuture - where - P: PostSign + 'static, - ::Future: Send, - { - let chain_id = self.client.signing_chain_id(); + fn sign

( + &self, + filled: FilledTransactionRequest, + signer: &Arc, + password: SignWith, + post_sign: P, + ) -> BoxFuture + where + P: PostSign + 'static, + ::Future: Send, + { + let chain_id = self.client.signing_chain_id(); - if let Some(nonce) = filled.nonce { - let future = signer.sign_transaction(filled, chain_id, nonce, password) - .into_future() - .and_then(move |signed| post_sign.execute(signed)); - Box::new(future) - } else { - let state = self.state_nonce(&filled.from); - let reserved = self.nonces.lock().reserve(filled.from, state); + if let Some(nonce) = filled.nonce { + let future = signer + .sign_transaction(filled, chain_id, nonce, password) + .into_future() + .and_then(move |signed| post_sign.execute(signed)); + Box::new(future) + } else { + let state = self.state_nonce(&filled.from); + let reserved = self.nonces.lock().reserve(filled.from, state); - Box::new(ProspectiveSigner::new(signer.clone(), filled, chain_id, reserved, password, post_sign)) - } - } + Box::new(ProspectiveSigner::new( + signer.clone(), + filled, + chain_id, + reserved, + password, + post_sign, + )) + } + } - fn enrich(&self, signed_transaction: SignedTransaction) -> RpcRichRawTransaction { - RpcRichRawTransaction::from_signed(signed_transaction) - } + fn enrich(&self, signed_transaction: SignedTransaction) -> RpcRichRawTransaction { + RpcRichRawTransaction::from_signed(signed_transaction) + } - fn dispatch_transaction(&self, signed_transaction: PendingTransaction) -> Result { - Self::dispatch_transaction(&*self.client, &*self.miner, signed_transaction, true) - } + fn dispatch_transaction(&self, signed_transaction: PendingTransaction) -> Result { + Self::dispatch_transaction(&*self.client, &*self.miner, signed_transaction, true) + } } diff --git a/rpc/src/v1/helpers/dispatch/light.rs b/rpc/src/v1/helpers/dispatch/light.rs index 88f9fafcf..17bacfeaa 100644 --- a/rpc/src/v1/helpers/dispatch/light.rs +++ b/rpc/src/v1/helpers/dispatch/light.rs @@ -16,284 +16,312 @@ use std::sync::Arc; -use ethereum_types::{H256, Address, U256}; -use light::TransactionQueue as LightTransactionQueue; -use light::cache::Cache as LightDataCache; -use light::client::LightChainClient; -use light::on_demand::{request, OnDemandRequester}; +use ethereum_types::{Address, H256, U256}; +use light::{ + cache::Cache as LightDataCache, + client::LightChainClient, + on_demand::{request, OnDemandRequester}, + TransactionQueue as LightTransactionQueue, +}; use parking_lot::{Mutex, RwLock}; use stats::Corpus; -use sync::{LightSyncProvider, LightNetworkDispatcher, ManageNetwork}; -use types::basic_account::BasicAccount; -use types::ids::BlockId; -use types::transaction::{SignedTransaction, PendingTransaction, Error as TransactionError}; +use sync::{LightNetworkDispatcher, LightSyncProvider, ManageNetwork}; +use types::{ + basic_account::BasicAccount, + ids::BlockId, + transaction::{Error as TransactionError, PendingTransaction, SignedTransaction}, +}; -use jsonrpc_core::{BoxFuture, Result}; -use jsonrpc_core::futures::{future, Future, IntoFuture}; -use jsonrpc_core::futures::future::Either; -use v1::helpers::{errors, nonce, TransactionRequest, FilledTransactionRequest}; -use v1::types::{RichRawTransaction as RpcRichRawTransaction,}; +use jsonrpc_core::{ + futures::{future, future::Either, Future, IntoFuture}, + BoxFuture, Result, +}; +use v1::{ + helpers::{errors, nonce, FilledTransactionRequest, TransactionRequest}, + types::RichRawTransaction as RpcRichRawTransaction, +}; -use super::{Dispatcher, Accounts, SignWith, PostSign}; +use super::{Accounts, Dispatcher, PostSign, SignWith}; /// Dispatcher for light clients -- fetches default gas price, next nonce, etc. from network. pub struct LightDispatcher where - S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, - OD: OnDemandRequester + 'static + S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, + OD: OnDemandRequester + 'static, { - /// Sync service. - pub sync: Arc, - /// Header chain client. - pub client: Arc, - /// On-demand request service. - pub on_demand: Arc, - /// Data cache. - pub cache: Arc>, - /// Transaction queue. - pub transaction_queue: Arc>, - /// Nonce reservations - pub nonces: Arc>, - /// Gas Price percentile value used as default gas price. - pub gas_price_percentile: usize, + /// Sync service. + pub sync: Arc, + /// Header chain client. + pub client: Arc, + /// On-demand request service. + pub on_demand: Arc, + /// Data cache. + pub cache: Arc>, + /// Transaction queue. + pub transaction_queue: Arc>, + /// Nonce reservations + pub nonces: Arc>, + /// Gas Price percentile value used as default gas price. + pub gas_price_percentile: usize, } impl LightDispatcher where - S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, - OD: OnDemandRequester + 'static + S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, + OD: OnDemandRequester + 'static, { - /// Create a new `LightDispatcher` from its requisite parts. - /// - /// For correct operation, the OnDemand service is assumed to be registered as a network handler, - pub fn new( - sync: Arc, - client: Arc, - on_demand: Arc, - cache: Arc>, - transaction_queue: Arc>, - nonces: Arc>, - gas_price_percentile: usize, - ) -> Self { - LightDispatcher { - sync, - client, - on_demand, - cache, - transaction_queue, - nonces, - gas_price_percentile, - } - } + /// Create a new `LightDispatcher` from its requisite parts. + /// + /// For correct operation, the OnDemand service is assumed to be registered as a network handler, + pub fn new( + sync: Arc, + client: Arc, + on_demand: Arc, + cache: Arc>, + transaction_queue: Arc>, + nonces: Arc>, + gas_price_percentile: usize, + ) -> Self { + LightDispatcher { + sync, + client, + on_demand, + cache, + transaction_queue, + nonces, + gas_price_percentile, + } + } - /// Get a recent gas price corpus. - // TODO: this could be `impl Trait`. - pub fn gas_price_corpus(&self) -> BoxFuture> { - fetch_gas_price_corpus( - self.sync.clone(), - self.client.clone(), - self.on_demand.clone(), - self.cache.clone(), - ) - } + /// Get a recent gas price corpus. + // TODO: this could be `impl Trait`. + pub fn gas_price_corpus(&self) -> BoxFuture> { + fetch_gas_price_corpus( + self.sync.clone(), + self.client.clone(), + self.on_demand.clone(), + self.cache.clone(), + ) + } - /// Get an account's state - fn account(&self, addr: Address) -> BoxFuture> { - let best_header = self.client.best_block_header(); - let account_future = self.sync.with_context(|ctx| self.on_demand.request(ctx, request::Account { - header: best_header.into(), - address: addr, - }).expect("no back-references; therefore all back-references valid; qed")); + /// Get an account's state + fn account(&self, addr: Address) -> BoxFuture> { + let best_header = self.client.best_block_header(); + let account_future = self.sync.with_context(|ctx| { + self.on_demand + .request( + ctx, + request::Account { + header: best_header.into(), + address: addr, + }, + ) + .expect("no back-references; therefore all back-references valid; qed") + }); - match account_future { - Some(response) => Box::new(response.map_err(|_| errors::no_light_peers())), - None => Box::new(future::err(errors::network_disabled())), - } - } + match account_future { + Some(response) => Box::new(response.map_err(|_| errors::no_light_peers())), + None => Box::new(future::err(errors::network_disabled())), + } + } - /// Get an account's next nonce. - pub fn next_nonce(&self, addr: Address) -> BoxFuture { - let account_start_nonce = self.client.engine().account_start_nonce(self.client.best_block_header().number()); - Box::new(self.account(addr) - .and_then(move |maybe_account| { - future::ok(maybe_account.map_or(account_start_nonce, |account| account.nonce)) - }) - ) - } + /// Get an account's next nonce. + pub fn next_nonce(&self, addr: Address) -> BoxFuture { + let account_start_nonce = self + .client + .engine() + .account_start_nonce(self.client.best_block_header().number()); + Box::new(self.account(addr).and_then(move |maybe_account| { + future::ok(maybe_account.map_or(account_start_nonce, |account| account.nonce)) + })) + } } impl Clone for LightDispatcher where - S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, - OD: OnDemandRequester + 'static + S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, + OD: OnDemandRequester + 'static, { - fn clone(&self) -> Self { - Self { - sync: self.sync.clone(), - client: self.client.clone(), - on_demand: self.on_demand.clone(), - cache: self.cache.clone(), - transaction_queue: self.transaction_queue.clone(), - nonces: self.nonces.clone(), - gas_price_percentile: self.gas_price_percentile - } - } + fn clone(&self) -> Self { + Self { + sync: self.sync.clone(), + client: self.client.clone(), + on_demand: self.on_demand.clone(), + cache: self.cache.clone(), + transaction_queue: self.transaction_queue.clone(), + nonces: self.nonces.clone(), + gas_price_percentile: self.gas_price_percentile, + } + } } impl Dispatcher for LightDispatcher where - S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, - OD: OnDemandRequester + 'static + S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, + OD: OnDemandRequester + 'static, { - // Ignore the `force_nonce` flag in order to always query the network when fetching the nonce and - // the account state. If the nonce is specified in the transaction use that nonce instead but do the - // network request anyway to the account state (balance) - fn fill_optional_fields(&self, request: TransactionRequest, default_sender: Address, _force_nonce: bool) - -> BoxFuture - { - const DEFAULT_GAS_PRICE: U256 = U256([0, 0, 0, 21_000_000]); + // Ignore the `force_nonce` flag in order to always query the network when fetching the nonce and + // the account state. If the nonce is specified in the transaction use that nonce instead but do the + // network request anyway to the account state (balance) + fn fill_optional_fields( + &self, + request: TransactionRequest, + default_sender: Address, + _force_nonce: bool, + ) -> BoxFuture { + const DEFAULT_GAS_PRICE: U256 = U256([0, 0, 0, 21_000_000]); - let gas_limit = self.client.best_block_header().gas_limit(); - let request_gas_price = request.gas_price; - let from = request.from.unwrap_or(default_sender); + let gas_limit = self.client.best_block_header().gas_limit(); + let request_gas_price = request.gas_price; + let from = request.from.unwrap_or(default_sender); - let with_gas_price = move |gas_price| { - let request = request; - FilledTransactionRequest { - from, - used_default_from: request.from.is_none(), - to: request.to, - nonce: request.nonce, - gas_price, - gas: request.gas.unwrap_or_else(|| gas_limit / 3), - value: request.value.unwrap_or_default(), - data: request.data.unwrap_or_else(Vec::new), - condition: request.condition, - } - }; + let with_gas_price = move |gas_price| { + let request = request; + FilledTransactionRequest { + from, + used_default_from: request.from.is_none(), + to: request.to, + nonce: request.nonce, + gas_price, + gas: request.gas.unwrap_or_else(|| gas_limit / 3), + value: request.value.unwrap_or_default(), + data: request.data.unwrap_or_else(Vec::new), + condition: request.condition, + } + }; - // fast path for known gas price. - let gas_price_percentile = self.gas_price_percentile; - let gas_price = match request_gas_price { - Some(gas_price) => Either::A(future::ok(with_gas_price(gas_price))), - None => Either::B(fetch_gas_price_corpus( - self.sync.clone(), - self.client.clone(), - self.on_demand.clone(), - self.cache.clone() - ).and_then(move |corp| match corp.percentile(gas_price_percentile) { - Some(percentile) => Ok(*percentile), - None => Ok(DEFAULT_GAS_PRICE), // fall back to default on error. - }).map(with_gas_price)) - }; + // fast path for known gas price. + let gas_price_percentile = self.gas_price_percentile; + let gas_price = match request_gas_price { + Some(gas_price) => Either::A(future::ok(with_gas_price(gas_price))), + None => Either::B( + fetch_gas_price_corpus( + self.sync.clone(), + self.client.clone(), + self.on_demand.clone(), + self.cache.clone(), + ) + .and_then(move |corp| match corp.percentile(gas_price_percentile) { + Some(percentile) => Ok(*percentile), + None => Ok(DEFAULT_GAS_PRICE), // fall back to default on error. + }) + .map(with_gas_price), + ), + }; - let future_account = self.account(from); + let future_account = self.account(from); - Box::new(gas_price.and_then(move |mut filled| { - future_account - .and_then(move |maybe_account| { - let cost = filled.value.saturating_add(filled.gas.saturating_mul(filled.gas_price)); - match maybe_account { - Some(ref account) if cost > account.balance => { - Err(errors::transaction(TransactionError::InsufficientBalance { - balance: account.balance, - cost, - })) - } - Some(account) => { - if filled.nonce.is_none() { - filled.nonce = Some(account.nonce); - } - Ok(filled) - } - None => Err(errors::account("Account not found", "")), - } - }) - })) - } + Box::new(gas_price.and_then(move |mut filled| { + future_account.and_then(move |maybe_account| { + let cost = filled + .value + .saturating_add(filled.gas.saturating_mul(filled.gas_price)); + match maybe_account { + Some(ref account) if cost > account.balance => { + Err(errors::transaction(TransactionError::InsufficientBalance { + balance: account.balance, + cost, + })) + } + Some(account) => { + if filled.nonce.is_none() { + filled.nonce = Some(account.nonce); + } + Ok(filled) + } + None => Err(errors::account("Account not found", "")), + } + }) + })) + } - fn sign

( - &self, - filled: FilledTransactionRequest, - signer: &Arc, - password: SignWith, - post_sign: P - ) -> BoxFuture - where - P: PostSign + 'static, - ::Future: Send, - { - let chain_id = self.client.signing_chain_id(); - let nonce = filled.nonce.expect("nonce is always provided; qed"); - let future = signer.sign_transaction(filled, chain_id, nonce, password) - .into_future() - .and_then(move |signed| post_sign.execute(signed)); - Box::new(future) - } + fn sign

( + &self, + filled: FilledTransactionRequest, + signer: &Arc, + password: SignWith, + post_sign: P, + ) -> BoxFuture + where + P: PostSign + 'static, + ::Future: Send, + { + let chain_id = self.client.signing_chain_id(); + let nonce = filled.nonce.expect("nonce is always provided; qed"); + let future = signer + .sign_transaction(filled, chain_id, nonce, password) + .into_future() + .and_then(move |signed| post_sign.execute(signed)); + Box::new(future) + } - fn enrich(&self, signed_transaction: SignedTransaction) -> RpcRichRawTransaction { - RpcRichRawTransaction::from_signed(signed_transaction) - } + fn enrich(&self, signed_transaction: SignedTransaction) -> RpcRichRawTransaction { + RpcRichRawTransaction::from_signed(signed_transaction) + } - fn dispatch_transaction(&self, signed_transaction: PendingTransaction) -> Result { - let hash = signed_transaction.transaction.hash(); + fn dispatch_transaction(&self, signed_transaction: PendingTransaction) -> Result { + let hash = signed_transaction.transaction.hash(); - self.transaction_queue.write().import(signed_transaction) - .map_err(errors::transaction) - .map(|_| hash) - } + self.transaction_queue + .write() + .import(signed_transaction) + .map_err(errors::transaction) + .map(|_| hash) + } } /// Get a recent gas price corpus. // TODO: this could be `impl Trait`. pub fn fetch_gas_price_corpus( - sync: Arc, - client: Arc, - on_demand: Arc, - cache: Arc>, + sync: Arc, + client: Arc, + on_demand: Arc, + cache: Arc>, ) -> BoxFuture> where - S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, - OD: OnDemandRequester + 'static + S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, + OD: OnDemandRequester + 'static, { - const GAS_PRICE_SAMPLE_SIZE: usize = 100; + const GAS_PRICE_SAMPLE_SIZE: usize = 100; - if let Some(cached) = { cache.lock().gas_price_corpus() } { - return Box::new(future::ok(cached)) - } + if let Some(cached) = { cache.lock().gas_price_corpus() } { + return Box::new(future::ok(cached)); + } - let cache = cache.clone(); - let eventual_corpus = sync.with_context(|ctx| { - // get some recent headers with gas used, - // and request each of the blocks from the network. - let block_requests = client.ancestry_iter(BlockId::Latest) - .filter(|hdr| hdr.gas_used() != U256::default()) - .take(GAS_PRICE_SAMPLE_SIZE) - .map(|hdr| request::Body(hdr.into())) - .collect::>(); + let cache = cache.clone(); + let eventual_corpus = sync.with_context(|ctx| { + // get some recent headers with gas used, + // and request each of the blocks from the network. + let block_requests = client + .ancestry_iter(BlockId::Latest) + .filter(|hdr| hdr.gas_used() != U256::default()) + .take(GAS_PRICE_SAMPLE_SIZE) + .map(|hdr| request::Body(hdr.into())) + .collect::>(); - // when the blocks come in, collect gas prices into a vector - on_demand.request(ctx, block_requests) - .expect("no back-references; therefore all back-references are valid; qed") - .map(|bodies| { - bodies.into_iter().fold(Vec::new(), |mut v, block| { - for t in block.transaction_views().iter() { - v.push(t.gas_price()) - } - v - }) - }) - .map(move |prices| { - // produce a corpus from the vector and cache it. - // It's later used to get a percentile for default gas price. - let corpus: ::stats::Corpus<_> = prices.into(); - cache.lock().set_gas_price_corpus(corpus.clone()); - corpus - }) - }); + // when the blocks come in, collect gas prices into a vector + on_demand + .request(ctx, block_requests) + .expect("no back-references; therefore all back-references are valid; qed") + .map(|bodies| { + bodies.into_iter().fold(Vec::new(), |mut v, block| { + for t in block.transaction_views().iter() { + v.push(t.gas_price()) + } + v + }) + }) + .map(move |prices| { + // produce a corpus from the vector and cache it. + // It's later used to get a percentile for default gas price. + let corpus: ::stats::Corpus<_> = prices.into(); + cache.lock().set_gas_price_corpus(corpus.clone()); + corpus + }) + }); - match eventual_corpus { - Some(corp) => Box::new(corp.map_err(|_| errors::no_light_peers())), - None => Box::new(future::err(errors::network_disabled())), - } + match eventual_corpus { + Some(corp) => Box::new(corp.map_err(|_| errors::no_light_peers())), + None => Box::new(future::err(errors::network_disabled())), + } } diff --git a/rpc/src/v1/helpers/dispatch/mod.rs b/rpc/src/v1/helpers/dispatch/mod.rs index 3f247f0c6..cefdef37d 100644 --- a/rpc/src/v1/helpers/dispatch/mod.rs +++ b/rpc/src/v1/helpers/dispatch/mod.rs @@ -16,83 +16,94 @@ //! Utilities and helpers for transaction dispatch. -pub(crate) mod light; mod full; +pub(crate) mod light; mod prospective_signer; #[cfg(any(test, feature = "accounts"))] mod signing; #[cfg(not(any(test, feature = "accounts")))] mod signing { - use super::*; - use v1::helpers::errors; + use super::*; + use v1::helpers::errors; - /// Dummy signer implementation - #[derive(Debug, Clone)] - pub struct Signer; + /// Dummy signer implementation + #[derive(Debug, Clone)] + pub struct Signer; - impl Signer { - /// Create new instance of dummy signer (accept any AccountProvider) - pub fn new(_ap: T) -> Self { - Signer - } - } + impl Signer { + /// Create new instance of dummy signer (accept any AccountProvider) + pub fn new(_ap: T) -> Self { + Signer + } + } - impl super::Accounts for Signer { - fn sign_transaction(&self, _filled: FilledTransactionRequest, _chain_id: Option, _nonce: U256, _password: SignWith) -> Result> { - Err(errors::account("Signing unsupported", "See #9997")) - } + impl super::Accounts for Signer { + fn sign_transaction( + &self, + _filled: FilledTransactionRequest, + _chain_id: Option, + _nonce: U256, + _password: SignWith, + ) -> Result> { + Err(errors::account("Signing unsupported", "See #9997")) + } - fn sign_message(&self, _address: Address, _password: SignWith, _hash: SignMessage) -> Result> { - Err(errors::account("Signing unsupported", "See #9997")) - } + fn sign_message( + &self, + _address: Address, + _password: SignWith, + _hash: SignMessage, + ) -> Result> { + Err(errors::account("Signing unsupported", "See #9997")) + } - fn decrypt(&self, _address: Address, _password: SignWith, _data: Bytes) -> Result> { - Err(errors::account("Signing unsupported", "See #9997")) - } + fn decrypt( + &self, + _address: Address, + _password: SignWith, + _data: Bytes, + ) -> Result> { + Err(errors::account("Signing unsupported", "See #9997")) + } - fn supports_prospective_signing(&self, _address: &Address, _password: &SignWith) -> bool { - false - } + fn supports_prospective_signing(&self, _address: &Address, _password: &SignWith) -> bool { + false + } - fn default_account(&self) -> Address { - Default::default() - } + fn default_account(&self) -> Address { + Default::default() + } - fn is_unlocked(&self, _address: &Address) -> bool { - false - } - } + fn is_unlocked(&self, _address: &Address) -> bool { + false + } + } } -pub use self::light::LightDispatcher; -pub use self::full::FullDispatcher; -pub use self::signing::Signer; +pub use self::{full::FullDispatcher, light::LightDispatcher, signing::Signer}; pub use v1::helpers::nonce::Reservations; -use std::fmt::Debug; -use std::ops::Deref; -use std::sync::Arc; +use std::{fmt::Debug, ops::Deref, sync::Arc}; use bytes::Bytes; -use ethcore::client::BlockChainClient; -use ethcore::miner::MinerService; -use ethereum_types::{H520, H256, U256, Address}; +use ethcore::{client::BlockChainClient, miner::MinerService}; +use ethereum_types::{Address, H256, H520, U256}; use ethkey::{Password, Signature}; use hash::keccak; -use types::transaction::{SignedTransaction, PendingTransaction}; +use types::transaction::{PendingTransaction, SignedTransaction}; -use jsonrpc_core::{BoxFuture, Result, Error}; -use jsonrpc_core::futures::{future, Future, IntoFuture}; -use v1::helpers::{TransactionRequest, FilledTransactionRequest, ConfirmationPayload}; -use v1::types::{ - Bytes as RpcBytes, - RichRawTransaction as RpcRichRawTransaction, - ConfirmationPayload as RpcConfirmationPayload, - ConfirmationResponse, - EthSignRequest as RpcEthSignRequest, - EIP191SignRequest as RpcSignRequest, - DecryptRequest as RpcDecryptRequest, +use jsonrpc_core::{ + futures::{future, Future, IntoFuture}, + BoxFuture, Error, Result, +}; +use v1::{ + helpers::{ConfirmationPayload, FilledTransactionRequest, TransactionRequest}, + types::{ + Bytes as RpcBytes, ConfirmationPayload as RpcConfirmationPayload, ConfirmationResponse, + DecryptRequest as RpcDecryptRequest, EIP191SignRequest as RpcSignRequest, + EthSignRequest as RpcEthSignRequest, RichRawTransaction as RpcRichRawTransaction, + }, }; /// Has the capability to dispatch, sign, and decrypt. @@ -100,37 +111,42 @@ use v1::types::{ /// Requires a clone implementation, with the implication that it be cheap; /// usually just bumping a reference count or two. pub trait Dispatcher: Send + Sync + Clone { - // TODO: when ATC exist, use zero-cost - // type Out: IntoFuture + // TODO: when ATC exist, use zero-cost + // type Out: IntoFuture - /// Fill optional fields of a transaction request, fetching gas price but not nonce. - fn fill_optional_fields(&self, request: TransactionRequest, default_sender: Address, force_nonce: bool) - -> BoxFuture; + /// Fill optional fields of a transaction request, fetching gas price but not nonce. + fn fill_optional_fields( + &self, + request: TransactionRequest, + default_sender: Address, + force_nonce: bool, + ) -> BoxFuture; - /// Sign the given transaction request without dispatching, fetching appropriate nonce. - fn sign

( - &self, - filled: FilledTransactionRequest, - signer: &Arc, - password: SignWith, - post_sign: P, - ) -> BoxFuture where - P: PostSign + 'static, - ::Future: Send; + /// Sign the given transaction request without dispatching, fetching appropriate nonce. + fn sign

( + &self, + filled: FilledTransactionRequest, + signer: &Arc, + password: SignWith, + post_sign: P, + ) -> BoxFuture + where + P: PostSign + 'static, + ::Future: Send; - /// Converts a `SignedTransaction` into `RichRawTransaction` - fn enrich(&self, SignedTransaction) -> RpcRichRawTransaction; + /// Converts a `SignedTransaction` into `RichRawTransaction` + fn enrich(&self, SignedTransaction) -> RpcRichRawTransaction; - /// "Dispatch" a local transaction. - fn dispatch_transaction(&self, signed_transaction: PendingTransaction) -> Result; + /// "Dispatch" a local transaction. + fn dispatch_transaction(&self, signed_transaction: PendingTransaction) -> Result; } /// Payload to sign pub enum SignMessage { - /// Eth-sign kind data (requires prefixing) - Data(Bytes), - /// Prefixed data hash - Hash(H256), + /// Eth-sign kind data (requires prefixing) + Data(Bytes), + /// Prefixed data hash + Hash(H256), } /// Abstract transaction signer. @@ -138,52 +154,69 @@ pub enum SignMessage { /// NOTE This signer is semi-correct, it's a temporary measure to avoid moving too much code. /// If accounts are ultimately removed all password-dealing endpoints will be wiped out. pub trait Accounts: Send + Sync { - /// Sign given filled transaction request for the specified chain_id. - fn sign_transaction(&self, filled: FilledTransactionRequest, chain_id: Option, nonce: U256, password: SignWith) -> Result>; + /// Sign given filled transaction request for the specified chain_id. + fn sign_transaction( + &self, + filled: FilledTransactionRequest, + chain_id: Option, + nonce: U256, + password: SignWith, + ) -> Result>; - /// Sign given message. - fn sign_message(&self, address: Address, password: SignWith, hash: SignMessage) -> Result>; + /// Sign given message. + fn sign_message( + &self, + address: Address, + password: SignWith, + hash: SignMessage, + ) -> Result>; - /// Decrypt given message. - fn decrypt(&self, address: Address, password: SignWith, data: Bytes) -> Result>; + /// Decrypt given message. + fn decrypt( + &self, + address: Address, + password: SignWith, + data: Bytes, + ) -> Result>; - /// Returns `true` if the accounts can sign multiple times. - fn supports_prospective_signing(&self, address: &Address, password: &SignWith) -> bool; + /// Returns `true` if the accounts can sign multiple times. + fn supports_prospective_signing(&self, address: &Address, password: &SignWith) -> bool; - /// Returns default account. - fn default_account(&self) -> Address; + /// Returns default account. + fn default_account(&self) -> Address; - /// Returns true if account is unlocked (i.e. can sign without a password) - fn is_unlocked(&self, address: &Address) -> bool; + /// Returns true if account is unlocked (i.e. can sign without a password) + fn is_unlocked(&self, address: &Address) -> bool; } /// action to execute after signing /// e.g importing a transaction into the chain pub trait PostSign: Send { - /// item that this PostSign returns - type Item: Send; - /// incase you need to perform async PostSign actions - type Out: IntoFuture + Send; - /// perform an action with the signed transaction - fn execute(self, signer: WithToken) -> Self::Out; + /// item that this PostSign returns + type Item: Send; + /// incase you need to perform async PostSign actions + type Out: IntoFuture + Send; + /// perform an action with the signed transaction + fn execute(self, signer: WithToken) -> Self::Out; } impl PostSign for () { - type Item = WithToken; - type Out = Result; - fn execute(self, signed: WithToken) -> Self::Out { - Ok(signed) - } + type Item = WithToken; + type Out = Result; + fn execute(self, signed: WithToken) -> Self::Out { + Ok(signed) + } } impl PostSign for F - where F: FnOnce(WithToken) -> Result +where + F: FnOnce(WithToken) -> Result, { - type Item = T; - type Out = Result; - fn execute(self, signed: WithToken) -> Self::Out { - (self)(signed) - } + type Item = T; + type Out = Result; + fn execute(self, signed: WithToken) -> Self::Out { + (self)(signed) + } } /// Single-use account token. @@ -192,189 +225,207 @@ pub type AccountToken = Password; /// Values used to unlock accounts for signing. #[derive(Clone, PartialEq)] pub enum SignWith { - /// Nothing -- implies the account is already unlocked. - Nothing, - /// Unlock with password. - Password(Password), - /// Unlock with single-use token. - Token(AccountToken), + /// Nothing -- implies the account is already unlocked. + Nothing, + /// Unlock with password. + Password(Password), + /// Unlock with single-use token. + Token(AccountToken), } impl SignWith { - #[cfg(any(test, feature = "accounts"))] - fn is_password(&self) -> bool { - if let SignWith::Password(_) = *self { - true - } else { - false - } - } + #[cfg(any(test, feature = "accounts"))] + fn is_password(&self) -> bool { + if let SignWith::Password(_) = *self { + true + } else { + false + } + } } /// A value, potentially accompanied by a signing token. pub enum WithToken { - /// No token. - No(T), - /// With token. - Yes(T, AccountToken), + /// No token. + No(T), + /// With token. + Yes(T, AccountToken), } impl Deref for WithToken { - type Target = T; + type Target = T; - fn deref(&self) -> &Self::Target { - match *self { - WithToken::No(ref v) => v, - WithToken::Yes(ref v, _) => v, - } - } + fn deref(&self) -> &Self::Target { + match *self { + WithToken::No(ref v) => v, + WithToken::Yes(ref v, _) => v, + } + } } impl WithToken { - /// Map the value with the given closure, preserving the token. - pub fn map(self, f: F) -> WithToken where - S: Debug, - F: FnOnce(T) -> S, - { - match self { - WithToken::No(v) => WithToken::No(f(v)), - WithToken::Yes(v, token) => WithToken::Yes(f(v), token), - } - } + /// Map the value with the given closure, preserving the token. + pub fn map(self, f: F) -> WithToken + where + S: Debug, + F: FnOnce(T) -> S, + { + match self { + WithToken::No(v) => WithToken::No(f(v)), + WithToken::Yes(v, token) => WithToken::Yes(f(v), token), + } + } - /// Convert into inner value, ignoring possible token. - pub fn into_value(self) -> T { - match self { - WithToken::No(v) => v, - WithToken::Yes(v, _) => v, - } - } + /// Convert into inner value, ignoring possible token. + pub fn into_value(self) -> T { + match self { + WithToken::No(v) => v, + WithToken::Yes(v, _) => v, + } + } - /// Convert the `WithToken` into a tuple. - pub fn into_tuple(self) -> (T, Option) { - match self { - WithToken::No(v) => (v, None), - WithToken::Yes(v, token) => (v, Some(token)) - } - } + /// Convert the `WithToken` into a tuple. + pub fn into_tuple(self) -> (T, Option) { + match self { + WithToken::No(v) => (v, None), + WithToken::Yes(v, token) => (v, Some(token)), + } + } } impl From<(T, AccountToken)> for WithToken { - fn from(tuple: (T, AccountToken)) -> Self { - WithToken::Yes(tuple.0, tuple.1) - } + fn from(tuple: (T, AccountToken)) -> Self { + WithToken::Yes(tuple.0, tuple.1) + } } impl From<(T, Option)> for WithToken { - fn from(tuple: (T, Option)) -> Self { - match tuple.1 { - Some(token) => WithToken::Yes(tuple.0, token), - None => WithToken::No(tuple.0), - } - } + fn from(tuple: (T, Option)) -> Self { + match tuple.1 { + Some(token) => WithToken::Yes(tuple.0, token), + None => WithToken::No(tuple.0), + } + } } /// Execute a confirmation payload. pub fn execute( - dispatcher: D, - signer: &Arc, - payload: ConfirmationPayload, - pass: SignWith + dispatcher: D, + signer: &Arc, + payload: ConfirmationPayload, + pass: SignWith, ) -> BoxFuture> { - match payload { - ConfirmationPayload::SendTransaction(request) => { - let condition = request.condition.clone().map(Into::into); - let cloned_dispatcher = dispatcher.clone(); - let post_sign = move |with_token_signed: WithToken| { - let (signed, token) = with_token_signed.into_tuple(); - let signed_transaction = PendingTransaction::new(signed, condition); - cloned_dispatcher.dispatch_transaction(signed_transaction) - .map(|hash| (hash, token)) - }; + match payload { + ConfirmationPayload::SendTransaction(request) => { + let condition = request.condition.clone().map(Into::into); + let cloned_dispatcher = dispatcher.clone(); + let post_sign = move |with_token_signed: WithToken| { + let (signed, token) = with_token_signed.into_tuple(); + let signed_transaction = PendingTransaction::new(signed, condition); + cloned_dispatcher + .dispatch_transaction(signed_transaction) + .map(|hash| (hash, token)) + }; - Box::new( - dispatcher.sign(request, &signer, pass, post_sign).map(|(hash, token)| { - WithToken::from((ConfirmationResponse::SendTransaction(hash), token)) - }) - ) - }, - ConfirmationPayload::SignTransaction(request) => { - Box::new(dispatcher.sign(request, &signer, pass, ()) - .map(move |result| result - .map(move |tx| dispatcher.enrich(tx)) - .map(ConfirmationResponse::SignTransaction) - )) - }, - ConfirmationPayload::EthSignMessage(address, data) => { - let res = signer.sign_message(address, pass, SignMessage::Data(data)) - .map(|result| result - .map(|s| H520(s.into_electrum())) - .map(ConfirmationResponse::Signature) - ); + Box::new( + dispatcher + .sign(request, &signer, pass, post_sign) + .map(|(hash, token)| { + WithToken::from((ConfirmationResponse::SendTransaction(hash), token)) + }), + ) + } + ConfirmationPayload::SignTransaction(request) => Box::new( + dispatcher + .sign(request, &signer, pass, ()) + .map(move |result| { + result + .map(move |tx| dispatcher.enrich(tx)) + .map(ConfirmationResponse::SignTransaction) + }), + ), + ConfirmationPayload::EthSignMessage(address, data) => { + let res = signer + .sign_message(address, pass, SignMessage::Data(data)) + .map(|result| { + result + .map(|s| H520(s.into_electrum())) + .map(ConfirmationResponse::Signature) + }); - Box::new(future::done(res)) - }, - ConfirmationPayload::SignMessage(address, data) => { - let res = signer.sign_message(address, pass, SignMessage::Hash(data)) - .map(|result| result - .map(|rsv| H520(rsv.into_electrum())) - .map(ConfirmationResponse::Signature) - ); + Box::new(future::done(res)) + } + ConfirmationPayload::SignMessage(address, data) => { + let res = signer + .sign_message(address, pass, SignMessage::Hash(data)) + .map(|result| { + result + .map(|rsv| H520(rsv.into_electrum())) + .map(ConfirmationResponse::Signature) + }); - Box::new(future::done(res)) - }, - ConfirmationPayload::Decrypt(address, data) => { - let res = signer.decrypt(address, pass, data) - .map(|result| result - .map(RpcBytes) - .map(ConfirmationResponse::Decrypt) - ); - Box::new(future::done(res)) - }, - } + Box::new(future::done(res)) + } + ConfirmationPayload::Decrypt(address, data) => { + let res = signer + .decrypt(address, pass, data) + .map(|result| result.map(RpcBytes).map(ConfirmationResponse::Decrypt)); + Box::new(future::done(res)) + } + } } /// Returns a eth_sign-compatible hash of data to sign. /// The data is prepended with special message to prevent /// malicious DApps from using the function to sign forged transactions. pub fn eth_data_hash(mut data: Bytes) -> H256 { - let mut message_data = - format!("\x19Ethereum Signed Message:\n{}", data.len()) - .into_bytes(); - message_data.append(&mut data); - keccak(message_data) + let mut message_data = format!("\x19Ethereum Signed Message:\n{}", data.len()).into_bytes(); + message_data.append(&mut data); + keccak(message_data) } /// Extract the default gas price from a client and miner. -pub fn default_gas_price(client: &C, miner: &M, percentile: usize) -> U256 where - C: BlockChainClient, - M: MinerService, +pub fn default_gas_price(client: &C, miner: &M, percentile: usize) -> U256 +where + C: BlockChainClient, + M: MinerService, { - client.gas_price_corpus(100).percentile(percentile).cloned().unwrap_or_else(|| miner.sensible_gas_price()) + client + .gas_price_corpus(100) + .percentile(percentile) + .cloned() + .unwrap_or_else(|| miner.sensible_gas_price()) } /// Convert RPC confirmation payload to signer confirmation payload. /// May need to resolve in the future to fetch things like gas price. -pub fn from_rpc(payload: RpcConfirmationPayload, default_account: Address, dispatcher: &D) -> BoxFuture - where D: Dispatcher +pub fn from_rpc( + payload: RpcConfirmationPayload, + default_account: Address, + dispatcher: &D, +) -> BoxFuture +where + D: Dispatcher, { - match payload { - RpcConfirmationPayload::SendTransaction(request) => { - Box::new(dispatcher.fill_optional_fields(request.into(), default_account, false) - .map(ConfirmationPayload::SendTransaction)) - }, - RpcConfirmationPayload::SignTransaction(request) => { - Box::new(dispatcher.fill_optional_fields(request.into(), default_account, false) - .map(ConfirmationPayload::SignTransaction)) - }, - RpcConfirmationPayload::Decrypt(RpcDecryptRequest { address, msg }) => { - Box::new(future::ok(ConfirmationPayload::Decrypt(address, msg.into()))) - }, - RpcConfirmationPayload::EthSignMessage(RpcEthSignRequest { address, data }) => { - Box::new(future::ok(ConfirmationPayload::EthSignMessage(address, data.into()))) - }, - RpcConfirmationPayload::EIP191SignMessage(RpcSignRequest { address, data }) => { - Box::new(future::ok(ConfirmationPayload::SignMessage(address, data))) - }, - } + match payload { + RpcConfirmationPayload::SendTransaction(request) => Box::new( + dispatcher + .fill_optional_fields(request.into(), default_account, false) + .map(ConfirmationPayload::SendTransaction), + ), + RpcConfirmationPayload::SignTransaction(request) => Box::new( + dispatcher + .fill_optional_fields(request.into(), default_account, false) + .map(ConfirmationPayload::SignTransaction), + ), + RpcConfirmationPayload::Decrypt(RpcDecryptRequest { address, msg }) => Box::new( + future::ok(ConfirmationPayload::Decrypt(address, msg.into())), + ), + RpcConfirmationPayload::EthSignMessage(RpcEthSignRequest { address, data }) => Box::new( + future::ok(ConfirmationPayload::EthSignMessage(address, data.into())), + ), + RpcConfirmationPayload::EIP191SignMessage(RpcSignRequest { address, data }) => { + Box::new(future::ok(ConfirmationPayload::SignMessage(address, data))) + } + } } diff --git a/rpc/src/v1/helpers/dispatch/prospective_signer.rs b/rpc/src/v1/helpers/dispatch/prospective_signer.rs index 034d19dc6..a32def84a 100644 --- a/rpc/src/v1/helpers/dispatch/prospective_signer.rs +++ b/rpc/src/v1/helpers/dispatch/prospective_signer.rs @@ -17,136 +17,142 @@ use std::sync::Arc; use ethereum_types::U256; -use jsonrpc_core::{Result, Error}; -use jsonrpc_core::futures::{Future, Poll, Async, IntoFuture}; +use jsonrpc_core::{ + futures::{Async, Future, IntoFuture, Poll}, + Error, Result, +}; use types::transaction::SignedTransaction; +use super::{Accounts, PostSign, SignWith, WithToken}; use v1::helpers::{errors, nonce, FilledTransactionRequest}; -use super::{Accounts, SignWith, WithToken, PostSign}; #[derive(Debug, Clone, Copy)] enum ProspectiveSignerState { - TryProspectiveSign, - WaitForPostSign, - WaitForNonce, + TryProspectiveSign, + WaitForPostSign, + WaitForNonce, } pub struct ProspectiveSigner { - signer: Arc, - filled: FilledTransactionRequest, - chain_id: Option, - reserved: nonce::Reserved, - password: SignWith, - state: ProspectiveSignerState, - prospective: Option>, - ready: Option, - post_sign: Option

, - post_sign_future: Option<::Future> + signer: Arc, + filled: FilledTransactionRequest, + chain_id: Option, + reserved: nonce::Reserved, + password: SignWith, + state: ProspectiveSignerState, + prospective: Option>, + ready: Option, + post_sign: Option

, + post_sign_future: Option<::Future>, } impl ProspectiveSigner

{ - pub fn new( - signer: Arc, - filled: FilledTransactionRequest, - chain_id: Option, - reserved: nonce::Reserved, - password: SignWith, - post_sign: P - ) -> Self { - let supports_prospective = signer.supports_prospective_signing(&filled.from, &password); + pub fn new( + signer: Arc, + filled: FilledTransactionRequest, + chain_id: Option, + reserved: nonce::Reserved, + password: SignWith, + post_sign: P, + ) -> Self { + let supports_prospective = signer.supports_prospective_signing(&filled.from, &password); - ProspectiveSigner { - signer, - filled, - chain_id, - reserved, - password, - state: if supports_prospective { - ProspectiveSignerState::TryProspectiveSign - } else { - ProspectiveSignerState::WaitForNonce - }, - prospective: None, - ready: None, - post_sign: Some(post_sign), - post_sign_future: None - } - } + ProspectiveSigner { + signer, + filled, + chain_id, + reserved, + password, + state: if supports_prospective { + ProspectiveSignerState::TryProspectiveSign + } else { + ProspectiveSignerState::WaitForNonce + }, + prospective: None, + ready: None, + post_sign: Some(post_sign), + post_sign_future: None, + } + } - fn sign(&self, nonce: &U256) -> Result> { - self.signer.sign_transaction( - self.filled.clone(), - self.chain_id, - *nonce, - self.password.clone() - ) - } + fn sign(&self, nonce: &U256) -> Result> { + self.signer.sign_transaction( + self.filled.clone(), + self.chain_id, + *nonce, + self.password.clone(), + ) + } - fn poll_reserved(&mut self) -> Poll { - self.reserved.poll().map_err(|_| errors::internal("Nonce reservation failure", "")) - } + fn poll_reserved(&mut self) -> Poll { + self.reserved + .poll() + .map_err(|_| errors::internal("Nonce reservation failure", "")) + } } impl Future for ProspectiveSigner

{ - type Item = P::Item; - type Error = Error; + type Item = P::Item; + type Error = Error; - fn poll(&mut self) -> Poll { - use self::ProspectiveSignerState::*; + fn poll(&mut self) -> Poll { + use self::ProspectiveSignerState::*; - loop { - match self.state { - TryProspectiveSign => { - // Try to poll reserved, it might be ready. - match self.poll_reserved()? { - Async::NotReady => { - self.state = WaitForNonce; - self.prospective = Some(self.sign(self.reserved.prospective_value())?); - }, - Async::Ready(nonce) => { - self.state = WaitForPostSign; - self.post_sign_future = Some( - self.post_sign.take() - .expect("post_sign is set on creation; qed") - .execute(self.sign(nonce.value())?) - .into_future() - ); - self.ready = Some(nonce); - }, - } - }, - WaitForNonce => { - let nonce = try_ready!(self.poll_reserved()); - let prospective = match (self.prospective.take(), nonce.matches_prospective()) { - (Some(prospective), true) => prospective, - _ => self.sign(nonce.value())?, - }; - self.ready = Some(nonce); - self.state = WaitForPostSign; - self.post_sign_future = Some(self.post_sign.take() - .expect("post_sign is set on creation; qed") - .execute(prospective) - .into_future()); - }, - WaitForPostSign => { - if let Some(mut fut) = self.post_sign_future.as_mut() { - match fut.poll()? { - Async::Ready(item) => { - let nonce = self.ready - .take() - .expect("nonce is set before state transitions to WaitForPostSign; qed"); - nonce.mark_used(); - return Ok(Async::Ready(item)) - }, - Async::NotReady => { - return Ok(Async::NotReady) - } - } - } else { - panic!("Poll after ready."); - } - } - } - } - } + loop { + match self.state { + TryProspectiveSign => { + // Try to poll reserved, it might be ready. + match self.poll_reserved()? { + Async::NotReady => { + self.state = WaitForNonce; + self.prospective = Some(self.sign(self.reserved.prospective_value())?); + } + Async::Ready(nonce) => { + self.state = WaitForPostSign; + self.post_sign_future = Some( + self.post_sign + .take() + .expect("post_sign is set on creation; qed") + .execute(self.sign(nonce.value())?) + .into_future(), + ); + self.ready = Some(nonce); + } + } + } + WaitForNonce => { + let nonce = try_ready!(self.poll_reserved()); + let prospective = match (self.prospective.take(), nonce.matches_prospective()) { + (Some(prospective), true) => prospective, + _ => self.sign(nonce.value())?, + }; + self.ready = Some(nonce); + self.state = WaitForPostSign; + self.post_sign_future = Some( + self.post_sign + .take() + .expect("post_sign is set on creation; qed") + .execute(prospective) + .into_future(), + ); + } + WaitForPostSign => { + if let Some(mut fut) = self.post_sign_future.as_mut() { + match fut.poll()? { + Async::Ready(item) => { + let nonce = self.ready.take().expect( + "nonce is set before state transitions to WaitForPostSign; qed", + ); + nonce.mark_used(); + return Ok(Async::Ready(item)); + } + Async::NotReady => return Ok(Async::NotReady), + } + } else { + panic!("Poll after ready."); + } + } + } + } + } } diff --git a/rpc/src/v1/helpers/dispatch/signing.rs b/rpc/src/v1/helpers/dispatch/signing.rs index 8243dcbdf..d2fa8ca2f 100644 --- a/rpc/src/v1/helpers/dispatch/signing.rs +++ b/rpc/src/v1/helpers/dispatch/signing.rs @@ -19,138 +19,181 @@ use std::sync::Arc; use accounts::AccountProvider; use bytes::Bytes; use crypto::DEFAULT_MAC; -use ethereum_types::{H256, U256, Address}; -use ethkey::{Signature}; -use types::transaction::{Transaction, Action, SignedTransaction}; +use ethereum_types::{Address, H256, U256}; +use ethkey::Signature; +use types::transaction::{Action, SignedTransaction, Transaction}; use jsonrpc_core::Result; use v1::helpers::{errors, FilledTransactionRequest}; -use super::{eth_data_hash, WithToken, SignWith, SignMessage}; +use super::{eth_data_hash, SignMessage, SignWith, WithToken}; /// Account-aware signer pub struct Signer { - accounts: Arc, + accounts: Arc, } impl Signer { - /// Create new instance of signer - pub fn new(accounts: Arc) -> Self { - Signer { accounts } - } + /// Create new instance of signer + pub fn new(accounts: Arc) -> Self { + Signer { accounts } + } } impl super::Accounts for Signer { - fn sign_transaction(&self, filled: FilledTransactionRequest, chain_id: Option, nonce: U256, password: SignWith) -> Result> { - let t = Transaction { - nonce: nonce, - action: filled.to.map_or(Action::Create, Action::Call), - gas: filled.gas, - gas_price: filled.gas_price, - value: filled.value, - data: filled.data, - }; + fn sign_transaction( + &self, + filled: FilledTransactionRequest, + chain_id: Option, + nonce: U256, + password: SignWith, + ) -> Result> { + let t = Transaction { + nonce: nonce, + action: filled.to.map_or(Action::Create, Action::Call), + gas: filled.gas, + gas_price: filled.gas_price, + value: filled.value, + data: filled.data, + }; - if self.accounts.is_hardware_address(&filled.from) { - return hardware_signature(&*self.accounts, filled.from, t, chain_id).map(WithToken::No) - } + if self.accounts.is_hardware_address(&filled.from) { + return hardware_signature(&*self.accounts, filled.from, t, chain_id).map(WithToken::No); + } - let hash = t.hash(chain_id); - let signature = signature(&*self.accounts, filled.from, hash, password)?; + let hash = t.hash(chain_id); + let signature = signature(&*self.accounts, filled.from, hash, password)?; - Ok(signature.map(|sig| { - SignedTransaction::new(t.with_signature(sig, chain_id)) + Ok(signature.map(|sig| { + SignedTransaction::new(t.with_signature(sig, chain_id)) .expect("Transaction was signed by AccountsProvider; it never produces invalid signatures; qed") - })) - } + })) + } - fn sign_message(&self, address: Address, password: SignWith, hash: SignMessage) -> Result> { - if self.accounts.is_hardware_address(&address) { - return if let SignMessage::Data(data) = hash { - let signature = self.accounts.sign_message_with_hardware(&address, &data) - // TODO: is this correct? I guess the `token` is the wallet in this context - .map(WithToken::No) - .map_err(|e| errors::account("Error signing message with hardware_wallet", e)); + fn sign_message( + &self, + address: Address, + password: SignWith, + hash: SignMessage, + ) -> Result> { + if self.accounts.is_hardware_address(&address) { + return if let SignMessage::Data(data) = hash { + let signature = self + .accounts + .sign_message_with_hardware(&address, &data) + // TODO: is this correct? I guess the `token` is the wallet in this context + .map(WithToken::No) + .map_err(|e| errors::account("Error signing message with hardware_wallet", e)); - signature - } else { - Err(errors::account("Error signing message with hardware_wallet", "Message signing is unsupported")) - } - } + signature + } else { + Err(errors::account( + "Error signing message with hardware_wallet", + "Message signing is unsupported", + )) + }; + } - match hash { - SignMessage::Data(data) => { - let hash = eth_data_hash(data); - signature(&self.accounts, address, hash, password) - }, - SignMessage::Hash(hash) => { - signature(&self.accounts, address, hash, password) - } - } - } + match hash { + SignMessage::Data(data) => { + let hash = eth_data_hash(data); + signature(&self.accounts, address, hash, password) + } + SignMessage::Hash(hash) => signature(&self.accounts, address, hash, password), + } + } - fn decrypt(&self, address: Address, password: SignWith, data: Bytes) -> Result> { - if self.accounts.is_hardware_address(&address) { - return Err(errors::unsupported("Decrypting via hardware wallets is not supported.", None)); - } + fn decrypt( + &self, + address: Address, + password: SignWith, + data: Bytes, + ) -> Result> { + if self.accounts.is_hardware_address(&address) { + return Err(errors::unsupported( + "Decrypting via hardware wallets is not supported.", + None, + )); + } - match password.clone() { - SignWith::Nothing => self.accounts.decrypt(address, None, &DEFAULT_MAC, &data).map(WithToken::No), - SignWith::Password(pass) => self.accounts.decrypt(address, Some(pass), &DEFAULT_MAC, &data).map(WithToken::No), - SignWith::Token(token) => self.accounts.decrypt_with_token(address, token, &DEFAULT_MAC, &data).map(Into::into), - }.map_err(|e| match password { - SignWith::Nothing => errors::signing(e), - _ => errors::password(e), - }) - } + match password.clone() { + SignWith::Nothing => self + .accounts + .decrypt(address, None, &DEFAULT_MAC, &data) + .map(WithToken::No), + SignWith::Password(pass) => self + .accounts + .decrypt(address, Some(pass), &DEFAULT_MAC, &data) + .map(WithToken::No), + SignWith::Token(token) => self + .accounts + .decrypt_with_token(address, token, &DEFAULT_MAC, &data) + .map(Into::into), + } + .map_err(|e| match password { + SignWith::Nothing => errors::signing(e), + _ => errors::password(e), + }) + } - fn supports_prospective_signing(&self, address: &Address, password: &SignWith) -> bool { - // If the account is permanently unlocked we can try to sign - // using prospective nonce. This should speed up sending - // multiple subsequent transactions in multi-threaded RPC environment. - let is_unlocked_permanently = self.accounts.is_unlocked_permanently(address); - let has_password = password.is_password(); + fn supports_prospective_signing(&self, address: &Address, password: &SignWith) -> bool { + // If the account is permanently unlocked we can try to sign + // using prospective nonce. This should speed up sending + // multiple subsequent transactions in multi-threaded RPC environment. + let is_unlocked_permanently = self.accounts.is_unlocked_permanently(address); + let has_password = password.is_password(); - is_unlocked_permanently || has_password - } + is_unlocked_permanently || has_password + } - fn default_account(&self) -> Address { - self.accounts.default_account().ok().unwrap_or_default() - } + fn default_account(&self) -> Address { + self.accounts.default_account().ok().unwrap_or_default() + } - fn is_unlocked(&self, address: &Address) -> bool { - self.accounts.is_unlocked(address) - } + fn is_unlocked(&self, address: &Address) -> bool { + self.accounts.is_unlocked(address) + } } -fn signature(accounts: &AccountProvider, address: Address, hash: H256, password: SignWith) -> Result> { - match password.clone() { - SignWith::Nothing => accounts.sign(address, None, hash).map(WithToken::No), - SignWith::Password(pass) => accounts.sign(address, Some(pass), hash).map(WithToken::No), - SignWith::Token(token) => accounts.sign_with_token(address, token, hash).map(Into::into), - }.map_err(|e| match password { - SignWith::Nothing => errors::signing(e), - _ => errors::password(e), - }) +fn signature( + accounts: &AccountProvider, + address: Address, + hash: H256, + password: SignWith, +) -> Result> { + match password.clone() { + SignWith::Nothing => accounts.sign(address, None, hash).map(WithToken::No), + SignWith::Password(pass) => accounts.sign(address, Some(pass), hash).map(WithToken::No), + SignWith::Token(token) => accounts + .sign_with_token(address, token, hash) + .map(Into::into), + } + .map_err(|e| match password { + SignWith::Nothing => errors::signing(e), + _ => errors::password(e), + }) } // obtain a hardware signature from the given account. -fn hardware_signature(accounts: &AccountProvider, address: Address, t: Transaction, chain_id: Option) - -> Result -{ - debug_assert!(accounts.is_hardware_address(&address)); +fn hardware_signature( + accounts: &AccountProvider, + address: Address, + t: Transaction, + chain_id: Option, +) -> Result { + debug_assert!(accounts.is_hardware_address(&address)); - let mut stream = rlp::RlpStream::new(); - t.rlp_append_unsigned_transaction(&mut stream, chain_id); - let signature = accounts.sign_transaction_with_hardware(&address, &t, chain_id, &stream.as_raw()) - .map_err(|e| { - debug!(target: "miner", "Error signing transaction with hardware wallet: {}", e); - errors::account("Error signing transaction with hardware wallet", e) - })?; + let mut stream = rlp::RlpStream::new(); + t.rlp_append_unsigned_transaction(&mut stream, chain_id); + let signature = accounts + .sign_transaction_with_hardware(&address, &t, chain_id, &stream.as_raw()) + .map_err(|e| { + debug!(target: "miner", "Error signing transaction with hardware wallet: {}", e); + errors::account("Error signing transaction with hardware wallet", e) + })?; - SignedTransaction::new(t.with_signature(signature, chain_id)) - .map_err(|e| { - debug!(target: "miner", "Hardware wallet has produced invalid signature: {}", e); - errors::account("Invalid signature generated", e) - }) + SignedTransaction::new(t.with_signature(signature, chain_id)).map_err(|e| { + debug!(target: "miner", "Hardware wallet has produced invalid signature: {}", e); + errors::account("Invalid signature generated", e) + }) } diff --git a/rpc/src/v1/helpers/eip191.rs b/rpc/src/v1/helpers/eip191.rs index 938ab81dc..8da2d06e8 100644 --- a/rpc/src/v1/helpers/eip191.rs +++ b/rpc/src/v1/helpers/eip191.rs @@ -15,47 +15,46 @@ // along with Parity Ethereum. If not, see . //! EIP-191 compliant decoding + hashing -use v1::types::{EIP191Version, Bytes, PresignedTransaction}; use eip_712::{hash_structured_data, EIP712}; -use serde_json::{Value, from_value}; -use v1::helpers::errors; -use jsonrpc_core::Error; -use v1::helpers::dispatch::eth_data_hash; -use hash::keccak; -use std::fmt::Display; use ethereum_types::H256; +use hash::keccak; +use jsonrpc_core::Error; +use serde_json::{from_value, Value}; +use std::fmt::Display; +use v1::{ + helpers::{dispatch::eth_data_hash, errors}, + types::{Bytes, EIP191Version, PresignedTransaction}, +}; /// deserializes and hashes the message depending on the version specifier pub fn hash_message(version: EIP191Version, message: Value) -> Result { - let data = match version { - EIP191Version::StructuredData => { - let typed_data = from_value::(message) - .map_err(map_serde_err("StructuredData"))?; + let data = match version { + EIP191Version::StructuredData => { + let typed_data = + from_value::(message).map_err(map_serde_err("StructuredData"))?; - hash_structured_data(typed_data) - .map_err(|err| errors::invalid_call_data(err.kind()))? - } + hash_structured_data(typed_data).map_err(|err| errors::invalid_call_data(err.kind()))? + } - EIP191Version::PresignedTransaction => { - let data = from_value::(message) - .map_err(map_serde_err("WithValidator"))?; - let prefix = b"\x19\x00"; - let data = [&prefix[..], &data.validator.0[..], &data.data.0[..]].concat(); - keccak(data) - } + EIP191Version::PresignedTransaction => { + let data = from_value::(message) + .map_err(map_serde_err("WithValidator"))?; + let prefix = b"\x19\x00"; + let data = [&prefix[..], &data.validator.0[..], &data.data.0[..]].concat(); + keccak(data) + } - EIP191Version::PersonalMessage => { - let bytes = from_value::(message) - .map_err(map_serde_err("Bytes"))?; - eth_data_hash(bytes.0) - } - }; + EIP191Version::PersonalMessage => { + let bytes = from_value::(message).map_err(map_serde_err("Bytes"))?; + eth_data_hash(bytes.0) + } + }; - Ok(data) + Ok(data) } fn map_serde_err(struct_name: &'static str) -> impl Fn(T) -> Error { - move |error: T| { - errors::invalid_call_data(format!("Error deserializing '{}': {}", struct_name, error)) - } + move |error: T| { + errors::invalid_call_data(format!("Error deserializing '{}': {}", struct_name, error)) + } } diff --git a/rpc/src/v1/helpers/engine_signer.rs b/rpc/src/v1/helpers/engine_signer.rs index 56cead696..e0e929850 100644 --- a/rpc/src/v1/helpers/engine_signer.rs +++ b/rpc/src/v1/helpers/engine_signer.rs @@ -21,28 +21,34 @@ use ethkey::{self, Address, Password}; /// An implementation of EngineSigner using internal account management. pub struct EngineSigner { - accounts: Arc, - address: Address, - password: Password, + accounts: Arc, + address: Address, + password: Password, } impl EngineSigner { - /// Creates new `EngineSigner` given account manager and account details. - pub fn new(accounts: Arc, address: Address, password: Password) -> Self { - EngineSigner { accounts, address, password } - } + /// Creates new `EngineSigner` given account manager and account details. + pub fn new(accounts: Arc, address: Address, password: Password) -> Self { + EngineSigner { + accounts, + address, + password, + } + } } impl ethcore::engines::EngineSigner for EngineSigner { - fn sign(&self, message: ethkey::Message) -> Result { - match self.accounts.sign(self.address, Some(self.password.clone()), message) { - Ok(ok) => Ok(ok), - Err(e) => Err(ethkey::Error::InvalidSecret), - } - } + fn sign(&self, message: ethkey::Message) -> Result { + match self + .accounts + .sign(self.address, Some(self.password.clone()), message) + { + Ok(ok) => Ok(ok), + Err(e) => Err(ethkey::Error::InvalidSecret), + } + } - fn address(&self) -> Address { - self.address - } + fn address(&self) -> Address { + self.address + } } - diff --git a/rpc/src/v1/helpers/errors.rs b/rpc/src/v1/helpers/errors.rs index 6adb114f3..e60505e03 100644 --- a/rpc/src/v1/helpers/errors.rs +++ b/rpc/src/v1/helpers/errors.rs @@ -18,117 +18,120 @@ use std::fmt; -use ethcore::error::{Error as EthcoreError, ErrorKind, CallError}; -use ethcore::client::BlockId; -use jsonrpc_core::{futures, Result as RpcResult, Error, ErrorCode, Value}; -use rlp::DecoderError; -use types::transaction::Error as TransactionError; +use ethcore::{ + client::{BlockChainClient, BlockId}, + error::{CallError, Error as EthcoreError, ErrorKind}, +}; use ethcore_private_tx::Error as PrivateTransactionError; -use vm::Error as VMError; +use jsonrpc_core::{futures, Error, ErrorCode, Result as RpcResult, Value}; use light::on_demand::error::{Error as OnDemandError, ErrorKind as OnDemandErrorKind}; -use ethcore::client::BlockChainClient; -use types::blockchain_info::BlockChainInfo; -use v1::types::BlockNumber; -use v1::impls::EthClientOptions; +use rlp::DecoderError; +use types::{blockchain_info::BlockChainInfo, transaction::Error as TransactionError}; +use v1::{impls::EthClientOptions, types::BlockNumber}; +use vm::Error as VMError; mod codes { - // NOTE [ToDr] Codes from [-32099, -32000] - pub const UNSUPPORTED_REQUEST: i64 = -32000; - pub const NO_WORK: i64 = -32001; - pub const NO_AUTHOR: i64 = -32002; - pub const NO_NEW_WORK: i64 = -32003; - pub const NO_WORK_REQUIRED: i64 = -32004; - pub const CANNOT_SUBMIT_WORK: i64 = -32005; - pub const UNKNOWN_ERROR: i64 = -32009; - pub const TRANSACTION_ERROR: i64 = -32010; - pub const EXECUTION_ERROR: i64 = -32015; - pub const EXCEPTION_ERROR: i64 = -32016; - pub const DATABASE_ERROR: i64 = -32017; - #[cfg(any(test, feature = "accounts"))] - pub const ACCOUNT_LOCKED: i64 = -32020; - #[cfg(any(test, feature = "accounts"))] - pub const PASSWORD_INVALID: i64 = -32021; - pub const ACCOUNT_ERROR: i64 = -32023; - pub const PRIVATE_ERROR: i64 = -32024; - pub const REQUEST_REJECTED: i64 = -32040; - pub const REQUEST_REJECTED_LIMIT: i64 = -32041; - pub const REQUEST_NOT_FOUND: i64 = -32042; - pub const ENCRYPTION_ERROR: i64 = -32055; - pub const ENCODING_ERROR: i64 = -32058; - pub const FETCH_ERROR: i64 = -32060; - pub const NO_LIGHT_PEERS: i64 = -32065; - pub const NO_PEERS: i64 = -32066; - pub const DEPRECATED: i64 = -32070; - pub const EXPERIMENTAL_RPC: i64 = -32071; - pub const CANNOT_RESTART: i64 = -32080; + // NOTE [ToDr] Codes from [-32099, -32000] + pub const UNSUPPORTED_REQUEST: i64 = -32000; + pub const NO_WORK: i64 = -32001; + pub const NO_AUTHOR: i64 = -32002; + pub const NO_NEW_WORK: i64 = -32003; + pub const NO_WORK_REQUIRED: i64 = -32004; + pub const CANNOT_SUBMIT_WORK: i64 = -32005; + pub const UNKNOWN_ERROR: i64 = -32009; + pub const TRANSACTION_ERROR: i64 = -32010; + pub const EXECUTION_ERROR: i64 = -32015; + pub const EXCEPTION_ERROR: i64 = -32016; + pub const DATABASE_ERROR: i64 = -32017; + #[cfg(any(test, feature = "accounts"))] + pub const ACCOUNT_LOCKED: i64 = -32020; + #[cfg(any(test, feature = "accounts"))] + pub const PASSWORD_INVALID: i64 = -32021; + pub const ACCOUNT_ERROR: i64 = -32023; + pub const PRIVATE_ERROR: i64 = -32024; + pub const REQUEST_REJECTED: i64 = -32040; + pub const REQUEST_REJECTED_LIMIT: i64 = -32041; + pub const REQUEST_NOT_FOUND: i64 = -32042; + pub const ENCRYPTION_ERROR: i64 = -32055; + pub const ENCODING_ERROR: i64 = -32058; + pub const FETCH_ERROR: i64 = -32060; + pub const NO_LIGHT_PEERS: i64 = -32065; + pub const NO_PEERS: i64 = -32066; + pub const DEPRECATED: i64 = -32070; + pub const EXPERIMENTAL_RPC: i64 = -32071; + pub const CANNOT_RESTART: i64 = -32080; } pub fn unimplemented(details: Option) -> Error { - Error { - code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), - message: "This request is not implemented yet. Please create an issue on Github repo.".into(), - data: details.map(Value::String), - } + Error { + code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), + message: "This request is not implemented yet. Please create an issue on Github repo." + .into(), + data: details.map(Value::String), + } } pub fn light_unimplemented(details: Option) -> Error { - Error { - code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), - message: "This request is unsupported for light clients.".into(), - data: details.map(Value::String), - } + Error { + code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), + message: "This request is unsupported for light clients.".into(), + data: details.map(Value::String), + } } pub fn unsupported>(msg: T, details: Option) -> Error { - Error { - code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), - message: msg.into(), - data: details.map(Into::into).map(Value::String), - } + Error { + code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), + message: msg.into(), + data: details.map(Into::into).map(Value::String), + } } pub fn request_not_found() -> Error { - Error { - code: ErrorCode::ServerError(codes::REQUEST_NOT_FOUND), - message: "Request not found.".into(), - data: None, - } + Error { + code: ErrorCode::ServerError(codes::REQUEST_NOT_FOUND), + message: "Request not found.".into(), + data: None, + } } pub fn request_rejected() -> Error { - Error { - code: ErrorCode::ServerError(codes::REQUEST_REJECTED), - message: "Request has been rejected.".into(), - data: None, - } + Error { + code: ErrorCode::ServerError(codes::REQUEST_REJECTED), + message: "Request has been rejected.".into(), + data: None, + } } pub fn request_rejected_limit() -> Error { - Error { - code: ErrorCode::ServerError(codes::REQUEST_REJECTED_LIMIT), - message: "Request has been rejected because of queue limit.".into(), - data: None, - } + Error { + code: ErrorCode::ServerError(codes::REQUEST_REJECTED_LIMIT), + message: "Request has been rejected because of queue limit.".into(), + data: None, + } } pub fn request_rejected_param_limit(limit: u64, items_desc: &str) -> Error { - Error { - code: ErrorCode::ServerError(codes::REQUEST_REJECTED_LIMIT), - message: format!("Requested data size exceeds limit of {} {}.", limit, items_desc), - data: None, - } + Error { + code: ErrorCode::ServerError(codes::REQUEST_REJECTED_LIMIT), + message: format!( + "Requested data size exceeds limit of {} {}.", + limit, items_desc + ), + data: None, + } } pub fn account(error: &str, details: T) -> Error { - Error { - code: ErrorCode::ServerError(codes::ACCOUNT_ERROR), - message: error.into(), - data: Some(Value::String(format!("{:?}", details))), - } + Error { + code: ErrorCode::ServerError(codes::ACCOUNT_ERROR), + message: error.into(), + data: Some(Value::String(format!("{:?}", details))), + } } pub fn cannot_restart() -> Error { - Error { + Error { code: ErrorCode::ServerError(codes::CANNOT_RESTART), message: "Parity could not be restarted. This feature is disabled in development mode and if the binary name isn't parity.".into(), data: None, @@ -139,31 +142,31 @@ pub fn cannot_restart() -> Error { /// Should not be used when function can just fail /// because of invalid parameters or incomplete node state. pub fn internal(error: &str, data: T) -> Error { - Error { - code: ErrorCode::InternalError, - message: format!("Internal error occurred: {}", error), - data: Some(Value::String(format!("{:?}", data))), - } + Error { + code: ErrorCode::InternalError, + message: format!("Internal error occurred: {}", error), + data: Some(Value::String(format!("{:?}", data))), + } } pub fn invalid_params(param: &str, details: T) -> Error { - Error { - code: ErrorCode::InvalidParams, - message: format!("Couldn't parse parameters: {}", param), - data: Some(Value::String(format!("{:?}", details))), - } + Error { + code: ErrorCode::InvalidParams, + message: format!("Couldn't parse parameters: {}", param), + data: Some(Value::String(format!("{:?}", details))), + } } pub fn execution(data: T) -> Error { - Error { - code: ErrorCode::ServerError(codes::EXECUTION_ERROR), - message: "Transaction execution error.".into(), - data: Some(Value::String(format!("{:?}", data))), - } + Error { + code: ErrorCode::ServerError(codes::EXECUTION_ERROR), + message: "Transaction execution error.".into(), + data: Some(Value::String(format!("{:?}", data))), + } } pub fn state_pruned() -> Error { - Error { + Error { code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), message: "This request is not supported because your node is running with state pruning. Run with --pruning=archive.".into(), data: None, @@ -171,206 +174,211 @@ pub fn state_pruned() -> Error { } pub fn state_corrupt() -> Error { - internal("State corrupt", "") + internal("State corrupt", "") } pub fn exceptional(data: T) -> Error { - Error { - code: ErrorCode::ServerError(codes::EXCEPTION_ERROR), - message: "The execution failed due to an exception.".into(), - data: Some(Value::String(data.to_string())), - } + Error { + code: ErrorCode::ServerError(codes::EXCEPTION_ERROR), + message: "The execution failed due to an exception.".into(), + data: Some(Value::String(data.to_string())), + } } pub fn no_work() -> Error { - Error { - code: ErrorCode::ServerError(codes::NO_WORK), - message: "Still syncing.".into(), - data: None, - } + Error { + code: ErrorCode::ServerError(codes::NO_WORK), + message: "Still syncing.".into(), + data: None, + } } pub fn no_new_work() -> Error { - Error { - code: ErrorCode::ServerError(codes::NO_NEW_WORK), - message: "Work has not changed.".into(), - data: None, - } + Error { + code: ErrorCode::ServerError(codes::NO_NEW_WORK), + message: "Work has not changed.".into(), + data: None, + } } pub fn no_author() -> Error { - Error { - code: ErrorCode::ServerError(codes::NO_AUTHOR), - message: "Author not configured. Run Parity with --author to configure.".into(), - data: None, - } + Error { + code: ErrorCode::ServerError(codes::NO_AUTHOR), + message: "Author not configured. Run Parity with --author to configure.".into(), + data: None, + } } pub fn no_work_required() -> Error { - Error { - code: ErrorCode::ServerError(codes::NO_WORK_REQUIRED), - message: "External work is only required for Proof of Work engines.".into(), - data: None, - } + Error { + code: ErrorCode::ServerError(codes::NO_WORK_REQUIRED), + message: "External work is only required for Proof of Work engines.".into(), + data: None, + } } pub fn cannot_submit_work(err: EthcoreError) -> Error { - Error { - code: ErrorCode::ServerError(codes::CANNOT_SUBMIT_WORK), - message: "Cannot submit work.".into(), - data: Some(Value::String(err.to_string())), - } + Error { + code: ErrorCode::ServerError(codes::CANNOT_SUBMIT_WORK), + message: "Cannot submit work.".into(), + data: Some(Value::String(err.to_string())), + } } pub fn unavailable_block(no_ancient_block: bool, by_hash: bool) -> Error { - if no_ancient_block { - Error { + if no_ancient_block { + Error { code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), message: "Looks like you disabled ancient block download, unfortunately the information you're \ trying to fetch doesn't exist in the db and is probably in the ancient blocks.".into(), data: None, } - } else if by_hash { - Error { + } else if by_hash { + Error { code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), message: "Block information is incomplete while ancient block sync is still in progress, before \ it's finished we can't determine the existence of requested item.".into(), data: None, } - } else { - Error { + } else { + Error { code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), message: "Requested block number is in a range that is not available yet, because the ancient block sync is still in progress.".into(), data: None, } - } + } } pub fn check_block_number_existence<'a, T, C>( - client: &'a C, - num: BlockNumber, - options: EthClientOptions, -) -> - impl Fn(Option) -> RpcResult> + 'a - where C: BlockChainClient, + client: &'a C, + num: BlockNumber, + options: EthClientOptions, +) -> impl Fn(Option) -> RpcResult> + 'a +where + C: BlockChainClient, { - move |response| { - if response.is_none() { - if let BlockNumber::Num(block_number) = num { - // tried to fetch block number and got nothing even though the block number is - // less than the latest block number - if block_number < client.chain_info().best_block_number && !options.allow_missing_blocks { - return Err(unavailable_block(options.no_ancient_blocks, false)); - } - } - } - Ok(response) - } + move |response| { + if response.is_none() { + if let BlockNumber::Num(block_number) = num { + // tried to fetch block number and got nothing even though the block number is + // less than the latest block number + if block_number < client.chain_info().best_block_number + && !options.allow_missing_blocks + { + return Err(unavailable_block(options.no_ancient_blocks, false)); + } + } + } + Ok(response) + } } pub fn check_block_gap<'a, T, C>( - client: &'a C, - options: EthClientOptions, + client: &'a C, + options: EthClientOptions, ) -> impl Fn(Option) -> RpcResult> + 'a - where C: BlockChainClient, +where + C: BlockChainClient, { - move |response| { - if response.is_none() && !options.allow_missing_blocks { - let BlockChainInfo { ancient_block_hash, .. } = client.chain_info(); - // block information was requested, but unfortunately we couldn't find it and there - // are gaps in the database ethcore/src/blockchain/blockchain.rs - if ancient_block_hash.is_some() { - return Err(unavailable_block(options.no_ancient_blocks, true)) - } - } - Ok(response) - } + move |response| { + if response.is_none() && !options.allow_missing_blocks { + let BlockChainInfo { + ancient_block_hash, .. + } = client.chain_info(); + // block information was requested, but unfortunately we couldn't find it and there + // are gaps in the database ethcore/src/blockchain/blockchain.rs + if ancient_block_hash.is_some() { + return Err(unavailable_block(options.no_ancient_blocks, true)); + } + } + Ok(response) + } } pub fn not_enough_data() -> Error { - Error { - code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), - message: "The node does not have enough data to compute the given statistic.".into(), - data: None, - } + Error { + code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), + message: "The node does not have enough data to compute the given statistic.".into(), + data: None, + } } pub fn token(e: String) -> Error { - Error { - code: ErrorCode::ServerError(codes::UNKNOWN_ERROR), - message: "There was an error when saving your authorization tokens.".into(), - data: Some(Value::String(e)), - } + Error { + code: ErrorCode::ServerError(codes::UNKNOWN_ERROR), + message: "There was an error when saving your authorization tokens.".into(), + data: Some(Value::String(e)), + } } pub fn signer_disabled() -> Error { - Error { - code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), - message: "Trusted Signer is disabled. This API is not available.".into(), - data: None, - } + Error { + code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), + message: "Trusted Signer is disabled. This API is not available.".into(), + data: None, + } } pub fn ws_disabled() -> Error { - Error { - code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), - message: "WebSockets Server is disabled. This API is not available.".into(), - data: None, - } + Error { + code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), + message: "WebSockets Server is disabled. This API is not available.".into(), + data: None, + } } pub fn network_disabled() -> Error { - Error { - code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), - message: "Network is disabled or not yet up.".into(), - data: None, - } + Error { + code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), + message: "Network is disabled or not yet up.".into(), + data: None, + } } pub fn encryption(error: T) -> Error { - Error { - code: ErrorCode::ServerError(codes::ENCRYPTION_ERROR), - message: "Encryption error.".into(), - data: Some(Value::String(format!("{:?}", error))), - } + Error { + code: ErrorCode::ServerError(codes::ENCRYPTION_ERROR), + message: "Encryption error.".into(), + data: Some(Value::String(format!("{:?}", error))), + } } pub fn encoding(error: T) -> Error { - Error { - code: ErrorCode::ServerError(codes::ENCODING_ERROR), - message: "Encoding error.".into(), - data: Some(Value::String(format!("{:?}", error))), - } + Error { + code: ErrorCode::ServerError(codes::ENCODING_ERROR), + message: "Encoding error.".into(), + data: Some(Value::String(format!("{:?}", error))), + } } pub fn database(error: T) -> Error { - Error { - code: ErrorCode::ServerError(codes::DATABASE_ERROR), - message: "Database error.".into(), - data: Some(Value::String(format!("{:?}", error))), - } + Error { + code: ErrorCode::ServerError(codes::DATABASE_ERROR), + message: "Database error.".into(), + data: Some(Value::String(format!("{:?}", error))), + } } pub fn fetch(error: T) -> Error { - Error { - code: ErrorCode::ServerError(codes::FETCH_ERROR), - message: "Error while fetching content.".into(), - data: Some(Value::String(format!("{:?}", error))), - } + Error { + code: ErrorCode::ServerError(codes::FETCH_ERROR), + message: "Error while fetching content.".into(), + data: Some(Value::String(format!("{:?}", error))), + } } #[cfg(any(test, feature = "accounts"))] pub fn invalid_call_data(error: T) -> Error { - Error { - code: ErrorCode::ServerError(codes::ENCODING_ERROR), - message: format!("{}", error), - data: None - } + Error { + code: ErrorCode::ServerError(codes::ENCODING_ERROR), + message: format!("{}", error), + data: None, + } } #[cfg(any(test, feature = "accounts"))] pub fn signing(error: ::accounts::SignError) -> Error { - Error { + Error { code: ErrorCode::ServerError(codes::ACCOUNT_LOCKED), message: "Your account is locked. Unlock the account via CLI, personal_unlockAccount or use Trusted Signer.".into(), data: Some(Value::String(format!("{:?}", error))), @@ -379,33 +387,33 @@ pub fn signing(error: ::accounts::SignError) -> Error { #[cfg(any(test, feature = "accounts"))] pub fn password(error: ::accounts::SignError) -> Error { - Error { - code: ErrorCode::ServerError(codes::PASSWORD_INVALID), - message: "Account password is invalid or account does not exist.".into(), - data: Some(Value::String(format!("{:?}", error))), - } + Error { + code: ErrorCode::ServerError(codes::PASSWORD_INVALID), + message: "Account password is invalid or account does not exist.".into(), + data: Some(Value::String(format!("{:?}", error))), + } } pub fn private_message(error: PrivateTransactionError) -> Error { - Error { - code: ErrorCode::ServerError(codes::PRIVATE_ERROR), - message: "Private transactions call failed.".into(), - data: Some(Value::String(format!("{:?}", error))), - } + Error { + code: ErrorCode::ServerError(codes::PRIVATE_ERROR), + message: "Private transactions call failed.".into(), + data: Some(Value::String(format!("{:?}", error))), + } } pub fn private_message_block_id_not_supported() -> Error { - Error { - code: ErrorCode::ServerError(codes::PRIVATE_ERROR), - message: "Pending block id not supported.".into(), - data: None, - } + Error { + code: ErrorCode::ServerError(codes::PRIVATE_ERROR), + message: "Pending block id not supported.".into(), + data: None, + } } pub fn transaction_message(error: &TransactionError) -> String { - use self::TransactionError::*; + use self::TransactionError::*; - match *error { + match *error { AlreadyImported => "Transaction with the same hash was already imported.".into(), Old => "Transaction nonce is too low. Try incrementing the nonce.".into(), TooCheapToReplace { prev, new } => { @@ -442,101 +450,104 @@ pub fn transaction_message(error: &TransactionError) -> String { } pub fn transaction>(error: T) -> Error { - let error = error.into(); - if let ErrorKind::Transaction(ref e) = *error.kind() { - Error { - code: ErrorCode::ServerError(codes::TRANSACTION_ERROR), - message: transaction_message(e), - data: None, - } - } else { - Error { - code: ErrorCode::ServerError(codes::UNKNOWN_ERROR), - message: "Unknown error when sending transaction.".into(), - data: Some(Value::String(format!("{:?}", error))), - } - } + let error = error.into(); + if let ErrorKind::Transaction(ref e) = *error.kind() { + Error { + code: ErrorCode::ServerError(codes::TRANSACTION_ERROR), + message: transaction_message(e), + data: None, + } + } else { + Error { + code: ErrorCode::ServerError(codes::UNKNOWN_ERROR), + message: "Unknown error when sending transaction.".into(), + data: Some(Value::String(format!("{:?}", error))), + } + } } pub fn decode>(error: T) -> Error { - let error = error.into(); - match *error.kind() { - ErrorKind::Decoder(ref dec_err) => rlp(dec_err.clone()), - _ => Error { - code: ErrorCode::InternalError, - message: "decoding error".into(), - data: None, - } - } + let error = error.into(); + match *error.kind() { + ErrorKind::Decoder(ref dec_err) => rlp(dec_err.clone()), + _ => Error { + code: ErrorCode::InternalError, + message: "decoding error".into(), + data: None, + }, + } } pub fn rlp(error: DecoderError) -> Error { - Error { - code: ErrorCode::InvalidParams, - message: "Invalid RLP.".into(), - data: Some(Value::String(format!("{:?}", error))), - } + Error { + code: ErrorCode::InvalidParams, + message: "Invalid RLP.".into(), + data: Some(Value::String(format!("{:?}", error))), + } } pub fn call(error: CallError) -> Error { - match error { - CallError::StatePruned => state_pruned(), - CallError::StateCorrupt => state_corrupt(), - CallError::Exceptional(e) => exceptional(e), - CallError::Execution(e) => execution(e), - CallError::TransactionNotFound => internal("{}, this should not be the case with eth_call, most likely a bug.", CallError::TransactionNotFound), - } + match error { + CallError::StatePruned => state_pruned(), + CallError::StateCorrupt => state_corrupt(), + CallError::Exceptional(e) => exceptional(e), + CallError::Execution(e) => execution(e), + CallError::TransactionNotFound => internal( + "{}, this should not be the case with eth_call, most likely a bug.", + CallError::TransactionNotFound, + ), + } } pub fn vm(error: &VMError, output: &[u8]) -> Error { - use rustc_hex::ToHex; + use rustc_hex::ToHex; - let data = match error { - &VMError::Reverted => format!("{} 0x{}", VMError::Reverted, output.to_hex()), - error => format!("{}", error), - }; + let data = match error { + &VMError::Reverted => format!("{} 0x{}", VMError::Reverted, output.to_hex()), + error => format!("{}", error), + }; - Error { - code: ErrorCode::ServerError(codes::EXECUTION_ERROR), - message: "VM execution error.".into(), - data: Some(Value::String(data)), - } + Error { + code: ErrorCode::ServerError(codes::EXECUTION_ERROR), + message: "VM execution error.".into(), + data: Some(Value::String(data)), + } } pub fn unknown_block() -> Error { - Error { - code: ErrorCode::InvalidParams, - message: "Unknown block number".into(), - data: None, - } + Error { + code: ErrorCode::InvalidParams, + message: "Unknown block number".into(), + data: None, + } } pub fn no_light_peers() -> Error { - Error { - code: ErrorCode::ServerError(codes::NO_LIGHT_PEERS), - message: "No light peers who can serve data".into(), - data: None, - } + Error { + code: ErrorCode::ServerError(codes::NO_LIGHT_PEERS), + message: "No light peers who can serve data".into(), + data: None, + } } pub fn deprecated, T: Into>>(message: T) -> Error { - Error { - code: ErrorCode::ServerError(codes::DEPRECATED), - message: "Method deprecated".into(), - data: message.into().map(Into::into).map(Value::String), - } + Error { + code: ErrorCode::ServerError(codes::DEPRECATED), + message: "Method deprecated".into(), + data: message.into().map(Into::into).map(Value::String), + } } pub fn filter_not_found() -> Error { - Error { - code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), - message: "Filter not found".into(), - data: None, - } + Error { + code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), + message: "Filter not found".into(), + data: None, + } } pub fn filter_block_not_found(id: BlockId) -> Error { - Error { + Error { code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), // Specified in EIP-234. message: "One of the blocks specified in filter (fromBlock, toBlock or blockHash) cannot be found".into(), data: Some(Value::String(match id { @@ -549,64 +560,64 @@ pub fn filter_block_not_found(id: BlockId) -> Error { } pub fn on_demand_error(err: OnDemandError) -> Error { - match err { - OnDemandError(OnDemandErrorKind::ChannelCanceled(e), _) => on_demand_cancel(e), - OnDemandError(OnDemandErrorKind::RequestLimit, _) => timeout_new_peer(&err), - OnDemandError(OnDemandErrorKind::BadResponse(_), _) => max_attempts_reached(&err), - _ => on_demand_others(&err), - } + match err { + OnDemandError(OnDemandErrorKind::ChannelCanceled(e), _) => on_demand_cancel(e), + OnDemandError(OnDemandErrorKind::RequestLimit, _) => timeout_new_peer(&err), + OnDemandError(OnDemandErrorKind::BadResponse(_), _) => max_attempts_reached(&err), + _ => on_demand_others(&err), + } } // on-demand sender cancelled. pub fn on_demand_cancel(_cancel: futures::sync::oneshot::Canceled) -> Error { - internal("on-demand sender cancelled", "") + internal("on-demand sender cancelled", "") } pub fn max_attempts_reached(err: &OnDemandError) -> Error { - Error { - code: ErrorCode::ServerError(codes::REQUEST_NOT_FOUND), - message: err.to_string(), - data: None, - } + Error { + code: ErrorCode::ServerError(codes::REQUEST_NOT_FOUND), + message: err.to_string(), + data: None, + } } pub fn timeout_new_peer(err: &OnDemandError) -> Error { - Error { - code: ErrorCode::ServerError(codes::NO_LIGHT_PEERS), - message: err.to_string(), - data: None, - } + Error { + code: ErrorCode::ServerError(codes::NO_LIGHT_PEERS), + message: err.to_string(), + data: None, + } } pub fn on_demand_others(err: &OnDemandError) -> Error { - Error { - code: ErrorCode::ServerError(codes::UNKNOWN_ERROR), - message: err.to_string(), - data: None, - } + Error { + code: ErrorCode::ServerError(codes::UNKNOWN_ERROR), + message: err.to_string(), + data: None, + } } pub fn status_error(has_peers: bool) -> Error { - if has_peers { - no_work() - } else { - Error { - code: ErrorCode::ServerError(codes::NO_PEERS), - message: "Node is not connected to any peers.".into(), - data: None, - } - } + if has_peers { + no_work() + } else { + Error { + code: ErrorCode::ServerError(codes::NO_PEERS), + message: "Node is not connected to any peers.".into(), + data: None, + } + } } /// Returns a descriptive error in case experimental RPCs are not enabled. pub fn require_experimental(allow_experimental_rpcs: bool, eip: &str) -> Result<(), Error> { - if allow_experimental_rpcs { - Ok(()) - } else { - Err(Error { + if allow_experimental_rpcs { + Ok(()) + } else { + Err(Error { code: ErrorCode::ServerError(codes::EXPERIMENTAL_RPC), message: format!("This method is not part of the official RPC API yet (EIP-{}). Run with `--jsonrpc-experimental` to enable it.", eip), data: Some(Value::String(format!("See EIP: https://eips.ethereum.org/EIPS/eip-{}", eip))), }) - } + } } diff --git a/rpc/src/v1/helpers/external_signer/mod.rs b/rpc/src/v1/helpers/external_signer/mod.rs index 0797929cb..cacd06e24 100644 --- a/rpc/src/v1/helpers/external_signer/mod.rs +++ b/rpc/src/v1/helpers/external_signer/mod.rs @@ -16,59 +16,62 @@ //! An list of requests to be confirmed or signed by an external approver/signer. -use std::sync::Arc; -use std::ops::Deref; +use std::{ops::Deref, sync::Arc}; mod oneshot; mod signing_queue; -pub use self::signing_queue::{SigningQueue, ConfirmationsQueue, ConfirmationReceiver, ConfirmationResult}; #[cfg(test)] pub use self::signing_queue::QueueEvent; +pub use self::signing_queue::{ + ConfirmationReceiver, ConfirmationResult, ConfirmationsQueue, SigningQueue, +}; /// Manages communication with Signer crate pub struct SignerService { - is_enabled: bool, - queue: Arc, - generate_new_token: Box Result + Send + Sync + 'static>, + is_enabled: bool, + queue: Arc, + generate_new_token: Box Result + Send + Sync + 'static>, } impl SignerService { - /// Creates new Signer Service given function to generate new tokens. - pub fn new(new_token: F, is_enabled: bool) -> Self - where F: Fn() -> Result + Send + Sync + 'static { - SignerService { - queue: Arc::new(ConfirmationsQueue::default()), - generate_new_token: Box::new(new_token), - is_enabled, - } - } + /// Creates new Signer Service given function to generate new tokens. + pub fn new(new_token: F, is_enabled: bool) -> Self + where + F: Fn() -> Result + Send + Sync + 'static, + { + SignerService { + queue: Arc::new(ConfirmationsQueue::default()), + generate_new_token: Box::new(new_token), + is_enabled, + } + } - /// Generates new signer authorization token. - pub fn generate_token(&self) -> Result { - (self.generate_new_token)() - } + /// Generates new signer authorization token. + pub fn generate_token(&self) -> Result { + (self.generate_new_token)() + } - /// Returns a reference to `ConfirmationsQueue` - pub fn queue(&self) -> Arc { - self.queue.clone() - } + /// Returns a reference to `ConfirmationsQueue` + pub fn queue(&self) -> Arc { + self.queue.clone() + } - /// Returns true if Signer is enabled. - pub fn is_enabled(&self) -> bool { - self.is_enabled - } + /// Returns true if Signer is enabled. + pub fn is_enabled(&self) -> bool { + self.is_enabled + } - #[cfg(test)] - /// Creates new Signer Service for tests. - pub fn new_test(is_enabled: bool) -> Self { - SignerService::new(|| Ok("new_token".into()), is_enabled) - } + #[cfg(test)] + /// Creates new Signer Service for tests. + pub fn new_test(is_enabled: bool) -> Self { + SignerService::new(|| Ok("new_token".into()), is_enabled) + } } impl Deref for SignerService { - type Target = ConfirmationsQueue; - fn deref(&self) -> &Self::Target { - &self.queue - } + type Target = ConfirmationsQueue; + fn deref(&self) -> &Self::Target { + &self.queue + } } diff --git a/rpc/src/v1/helpers/external_signer/oneshot.rs b/rpc/src/v1/helpers/external_signer/oneshot.rs index eac3dca7f..0df170e6e 100644 --- a/rpc/src/v1/helpers/external_signer/oneshot.rs +++ b/rpc/src/v1/helpers/external_signer/oneshot.rs @@ -14,54 +14,51 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use jsonrpc_core::Error; -use jsonrpc_core::futures::{self, Future}; -use jsonrpc_core::futures::sync::oneshot; +use jsonrpc_core::{ + futures::{self, sync::oneshot, Future}, + Error, +}; use v1::helpers::errors; pub type Res = Result; pub struct Sender { - sender: oneshot::Sender>, + sender: oneshot::Sender>, } impl Sender { - pub fn send(self, data: Res) { - let res = self.sender.send(data); - if res.is_err() { - debug!(target: "rpc", "Responding to a no longer active request."); - } - } + pub fn send(self, data: Res) { + let res = self.sender.send(data); + if res.is_err() { + debug!(target: "rpc", "Responding to a no longer active request."); + } + } } pub struct Receiver { - receiver: oneshot::Receiver>, + receiver: oneshot::Receiver>, } impl Future for Receiver { - type Item = T; - type Error = Error; + type Item = T; + type Error = Error; - fn poll(&mut self) -> futures::Poll { - let res = self.receiver.poll(); - match res { - Ok(futures::Async::NotReady) => Ok(futures::Async::NotReady), - Ok(futures::Async::Ready(Ok(res))) => Ok(futures::Async::Ready(res)), - Ok(futures::Async::Ready(Err(err))) => Err(err), - Err(e) => { - debug!(target: "rpc", "Responding to a canceled request: {:?}", e); - Err(errors::internal("Request was canceled by client.", e)) - }, - } - } + fn poll(&mut self) -> futures::Poll { + let res = self.receiver.poll(); + match res { + Ok(futures::Async::NotReady) => Ok(futures::Async::NotReady), + Ok(futures::Async::Ready(Ok(res))) => Ok(futures::Async::Ready(res)), + Ok(futures::Async::Ready(Err(err))) => Err(err), + Err(e) => { + debug!(target: "rpc", "Responding to a canceled request: {:?}", e); + Err(errors::internal("Request was canceled by client.", e)) + } + } + } } pub fn oneshot() -> (Sender, Receiver) { - let (tx, rx) = futures::oneshot(); + let (tx, rx) = futures::oneshot(); - (Sender { - sender: tx, - }, Receiver { - receiver: rx, - }) + (Sender { sender: tx }, Receiver { receiver: rx }) } diff --git a/rpc/src/v1/helpers/external_signer/signing_queue.rs b/rpc/src/v1/helpers/external_signer/signing_queue.rs index 00a459a86..102477cea 100644 --- a/rpc/src/v1/helpers/external_signer/signing_queue.rs +++ b/rpc/src/v1/helpers/external_signer/signing_queue.rs @@ -16,12 +16,16 @@ use std::collections::BTreeMap; +use super::oneshot; use ethereum_types::U256; use parking_lot::{Mutex, RwLock}; -use super::oneshot; -use v1::helpers::errors; -use v1::helpers::requests::{ConfirmationRequest, ConfirmationPayload}; -use v1::types::{ConfirmationResponse, Origin}; +use v1::{ + helpers::{ + errors, + requests::{ConfirmationPayload, ConfirmationRequest}, + }, + types::{ConfirmationResponse, Origin}, +}; use jsonrpc_core::Error; @@ -31,20 +35,20 @@ pub type ConfirmationResult = Result; /// Possible events happening in the queue that can be listened to. #[derive(Debug, PartialEq, Clone)] pub enum QueueEvent { - /// Receiver should stop work upon receiving `Finish` message. - Finish, - /// Informs about new request. - NewRequest(U256), - /// Request rejected. - RequestRejected(U256), - /// Request resolved. - RequestConfirmed(U256), + /// Receiver should stop work upon receiving `Finish` message. + Finish, + /// Informs about new request. + NewRequest(U256), + /// Request rejected. + RequestRejected(U256), + /// Request resolved. + RequestConfirmed(U256), } /// Defines possible errors when inserting to queue #[derive(Debug, PartialEq)] pub enum QueueAddError { - LimitReached, + LimitReached, } // TODO [todr] to consider: timeout instead of limit? @@ -52,241 +56,275 @@ pub const QUEUE_LIMIT: usize = 50; /// A queue of transactions awaiting to be confirmed and signed. pub trait SigningQueue: Send + Sync { - /// Add new request to the queue. - /// Returns a `Result` wrapping `ConfirmationReceiver` together with it's unique id in the queue. - /// `ConfirmationReceiver` is a `Future` awaiting for resolution of the given request. - fn add_request(&self, request: ConfirmationPayload, origin: Origin) -> Result<(U256, ConfirmationReceiver), QueueAddError>; + /// Add new request to the queue. + /// Returns a `Result` wrapping `ConfirmationReceiver` together with it's unique id in the queue. + /// `ConfirmationReceiver` is a `Future` awaiting for resolution of the given request. + fn add_request( + &self, + request: ConfirmationPayload, + origin: Origin, + ) -> Result<(U256, ConfirmationReceiver), QueueAddError>; - /// Notifies possible token holders that request was rejected. - fn request_rejected(&self, sender: ConfirmationSender) -> Option; + /// Notifies possible token holders that request was rejected. + fn request_rejected(&self, sender: ConfirmationSender) -> Option; - /// Notifies possible token holders that request was confirmed and given hash was assigned. - fn request_confirmed(&self, sender: ConfirmationSender, result: ConfirmationResult) -> Option; + /// Notifies possible token holders that request was confirmed and given hash was assigned. + fn request_confirmed( + &self, + sender: ConfirmationSender, + result: ConfirmationResult, + ) -> Option; - /// Put a request taken from `SigningQueue::take` back to the queue. - fn request_untouched(&self, sender: ConfirmationSender); + /// Put a request taken from `SigningQueue::take` back to the queue. + fn request_untouched(&self, sender: ConfirmationSender); - /// Returns and removes a request if it is contained in the queue. - fn take(&self, id: &U256) -> Option; + /// Returns and removes a request if it is contained in the queue. + fn take(&self, id: &U256) -> Option; - /// Return copy of all the requests in the queue. - fn requests(&self) -> Vec; + /// Return copy of all the requests in the queue. + fn requests(&self) -> Vec; - /// Returns number of requests awaiting confirmation. - fn len(&self) -> usize; + /// Returns number of requests awaiting confirmation. + fn len(&self) -> usize; - /// Returns true if there are no requests awaiting confirmation. - fn is_empty(&self) -> bool; + /// Returns true if there are no requests awaiting confirmation. + fn is_empty(&self) -> bool; } /// Confirmation request information with result notifier. pub struct ConfirmationSender { - /// Confirmation request information. - pub request: ConfirmationRequest, + /// Confirmation request information. + pub request: ConfirmationRequest, - sender: oneshot::Sender, + sender: oneshot::Sender, } /// Receiving end of the Confirmation channel; can be used as a `Future` to await for `ConfirmationRequest` /// being processed and turned into `ConfirmationOutcome` -pub type ConfirmationReceiver = oneshot::Receiver; +pub type ConfirmationReceiver = oneshot::Receiver; /// Queue for all unconfirmed requests. #[derive(Default)] pub struct ConfirmationsQueue { - id: Mutex, - queue: RwLock>, - on_event: RwLock () + Send + Sync>>>, + id: Mutex, + queue: RwLock>, + on_event: RwLock () + Send + Sync>>>, } impl ConfirmationsQueue { - /// Adds a queue listener. For each event, `listener` callback will be invoked. - pub fn on_event () + Send + Sync + 'static>(&self, listener: F) { - self.on_event.write().push(Box::new(listener)); - } + /// Adds a queue listener. For each event, `listener` callback will be invoked. + pub fn on_event () + Send + Sync + 'static>(&self, listener: F) { + self.on_event.write().push(Box::new(listener)); + } - /// Notifies consumer that the communcation is over. - /// No more events will be sent after this function is invoked. - pub fn finish(&self) { - self.notify_message(QueueEvent::Finish); - self.on_event.write().clear(); - } + /// Notifies consumer that the communcation is over. + /// No more events will be sent after this function is invoked. + pub fn finish(&self) { + self.notify_message(QueueEvent::Finish); + self.on_event.write().clear(); + } - /// Notifies `ConfirmationReceiver` holder about the result given a request. - fn notify_result(&self, sender: ConfirmationSender, result: Option) -> Option { - // notify receiver about the event - self.notify_message(result.clone().map_or_else( - || QueueEvent::RequestRejected(sender.request.id), - |_| QueueEvent::RequestConfirmed(sender.request.id) - )); + /// Notifies `ConfirmationReceiver` holder about the result given a request. + fn notify_result( + &self, + sender: ConfirmationSender, + result: Option, + ) -> Option { + // notify receiver about the event + self.notify_message(result.clone().map_or_else( + || QueueEvent::RequestRejected(sender.request.id), + |_| QueueEvent::RequestConfirmed(sender.request.id), + )); - // notify confirmation receiver about resolution - let result = result.ok_or_else(errors::request_rejected); - sender.sender.send(result); + // notify confirmation receiver about resolution + let result = result.ok_or_else(errors::request_rejected); + sender.sender.send(result); - Some(sender.request) - } + Some(sender.request) + } - /// Notifies receiver about the event happening in this queue. - fn notify_message(&self, message: QueueEvent) { - for listener in &*self.on_event.read() { - listener(message.clone()) - } - } + /// Notifies receiver about the event happening in this queue. + fn notify_message(&self, message: QueueEvent) { + for listener in &*self.on_event.read() { + listener(message.clone()) + } + } } impl Drop for ConfirmationsQueue { - fn drop(&mut self) { - self.finish(); - } + fn drop(&mut self) { + self.finish(); + } } impl SigningQueue for ConfirmationsQueue { - fn add_request(&self, request: ConfirmationPayload, origin: Origin) -> Result<(U256, ConfirmationReceiver), QueueAddError> { - if self.len() > QUEUE_LIMIT { - return Err(QueueAddError::LimitReached); - } + fn add_request( + &self, + request: ConfirmationPayload, + origin: Origin, + ) -> Result<(U256, ConfirmationReceiver), QueueAddError> { + if self.len() > QUEUE_LIMIT { + return Err(QueueAddError::LimitReached); + } - // Increment id - let id = { - let mut last_id = self.id.lock(); - *last_id += U256::from(1); - *last_id - }; - // Add request to queue - let res = { - debug!(target: "own_tx", "Signer: New entry ({:?}) in confirmation queue.", id); - trace!(target: "own_tx", "Signer: ({:?}) : {:?}", id, request); + // Increment id + let id = { + let mut last_id = self.id.lock(); + *last_id += U256::from(1); + *last_id + }; + // Add request to queue + let res = { + debug!(target: "own_tx", "Signer: New entry ({:?}) in confirmation queue.", id); + trace!(target: "own_tx", "Signer: ({:?}) : {:?}", id, request); - let mut queue = self.queue.write(); - let (sender, receiver) = oneshot::oneshot::(); + let mut queue = self.queue.write(); + let (sender, receiver) = oneshot::oneshot::(); - queue.insert(id, ConfirmationSender { - sender, - request: ConfirmationRequest { - id, - payload: request, - origin, - }, - }); - (id, receiver) - }; - // Notify listeners - self.notify_message(QueueEvent::NewRequest(id)); - Ok(res) - } + queue.insert( + id, + ConfirmationSender { + sender, + request: ConfirmationRequest { + id, + payload: request, + origin, + }, + }, + ); + (id, receiver) + }; + // Notify listeners + self.notify_message(QueueEvent::NewRequest(id)); + Ok(res) + } - fn take(&self, id: &U256) -> Option { - self.queue.write().remove(id) - } + fn take(&self, id: &U256) -> Option { + self.queue.write().remove(id) + } - fn request_rejected(&self, sender: ConfirmationSender) -> Option { - debug!(target: "own_tx", "Signer: Request rejected ({:?}).", sender.request.id); - self.notify_result(sender, None) - } + fn request_rejected(&self, sender: ConfirmationSender) -> Option { + debug!(target: "own_tx", "Signer: Request rejected ({:?}).", sender.request.id); + self.notify_result(sender, None) + } - fn request_confirmed(&self, sender: ConfirmationSender, result: ConfirmationResult) -> Option { - debug!(target: "own_tx", "Signer: Request confirmed ({:?}).", sender.request.id); - self.notify_result(sender, Some(result)) - } + fn request_confirmed( + &self, + sender: ConfirmationSender, + result: ConfirmationResult, + ) -> Option { + debug!(target: "own_tx", "Signer: Request confirmed ({:?}).", sender.request.id); + self.notify_result(sender, Some(result)) + } - fn request_untouched(&self, sender: ConfirmationSender) { - self.queue.write().insert(sender.request.id, sender); - } + fn request_untouched(&self, sender: ConfirmationSender) { + self.queue.write().insert(sender.request.id, sender); + } - fn requests(&self) -> Vec { - let queue = self.queue.read(); - queue.values().map(|sender| sender.request.clone()).collect() - } + fn requests(&self) -> Vec { + let queue = self.queue.read(); + queue + .values() + .map(|sender| sender.request.clone()) + .collect() + } - fn len(&self) -> usize { - let queue = self.queue.read(); - queue.len() - } + fn len(&self) -> usize { + let queue = self.queue.read(); + queue.len() + } - fn is_empty(&self) -> bool { - let queue = self.queue.read(); - queue.is_empty() - } + fn is_empty(&self) -> bool { + let queue = self.queue.read(); + queue.is_empty() + } } #[cfg(test)] mod test { - use std::sync::Arc; - use ethereum_types::{U256, Address}; - use parking_lot::Mutex; - use jsonrpc_core::futures::Future; - use v1::helpers::external_signer::{SigningQueue, ConfirmationsQueue, QueueEvent}; - use v1::helpers::{FilledTransactionRequest, ConfirmationPayload}; - use v1::types::ConfirmationResponse; + use ethereum_types::{Address, U256}; + use jsonrpc_core::futures::Future; + use parking_lot::Mutex; + use std::sync::Arc; + use v1::{ + helpers::{ + external_signer::{ConfirmationsQueue, QueueEvent, SigningQueue}, + ConfirmationPayload, FilledTransactionRequest, + }, + types::ConfirmationResponse, + }; - fn request() -> ConfirmationPayload { - ConfirmationPayload::SendTransaction(FilledTransactionRequest { - from: Address::from(1), - used_default_from: false, - to: Some(Address::from(2)), - gas_price: 0.into(), - gas: 10_000.into(), - value: 10_000_000.into(), - data: vec![], - nonce: None, - condition: None, - }) - } + fn request() -> ConfirmationPayload { + ConfirmationPayload::SendTransaction(FilledTransactionRequest { + from: Address::from(1), + used_default_from: false, + to: Some(Address::from(2)), + gas_price: 0.into(), + gas: 10_000.into(), + value: 10_000_000.into(), + data: vec![], + nonce: None, + condition: None, + }) + } - #[test] - fn should_wait_for_hash() { - // given - let queue = Arc::new(ConfirmationsQueue::default()); - let request = request(); + #[test] + fn should_wait_for_hash() { + // given + let queue = Arc::new(ConfirmationsQueue::default()); + let request = request(); - // when - let (id, future) = queue.add_request(request, Default::default()).unwrap(); - let sender = queue.take(&id).unwrap(); - queue.request_confirmed(sender, Ok(ConfirmationResponse::SendTransaction(1.into()))); + // when + let (id, future) = queue.add_request(request, Default::default()).unwrap(); + let sender = queue.take(&id).unwrap(); + queue.request_confirmed(sender, Ok(ConfirmationResponse::SendTransaction(1.into()))); - // then - let confirmation = future.wait().unwrap(); - assert_eq!(confirmation, Ok(ConfirmationResponse::SendTransaction(1.into()))); - } + // then + let confirmation = future.wait().unwrap(); + assert_eq!( + confirmation, + Ok(ConfirmationResponse::SendTransaction(1.into())) + ); + } - #[test] - fn should_receive_notification() { - // given - let received = Arc::new(Mutex::new(vec![])); - let queue = Arc::new(ConfirmationsQueue::default()); - let request = request(); + #[test] + fn should_receive_notification() { + // given + let received = Arc::new(Mutex::new(vec![])); + let queue = Arc::new(ConfirmationsQueue::default()); + let request = request(); - // when - let r = received.clone(); - queue.on_event(move |notification| { - r.lock().push(notification); - }); - let _future = queue.add_request(request, Default::default()).unwrap(); - queue.finish(); + // when + let r = received.clone(); + queue.on_event(move |notification| { + r.lock().push(notification); + }); + let _future = queue.add_request(request, Default::default()).unwrap(); + queue.finish(); - // then - let r = received.lock(); - assert_eq!(r[0], QueueEvent::NewRequest(U256::from(1))); - assert_eq!(r[1], QueueEvent::Finish); - assert_eq!(r.len(), 2); - } + // then + let r = received.lock(); + assert_eq!(r[0], QueueEvent::NewRequest(U256::from(1))); + assert_eq!(r[1], QueueEvent::Finish); + assert_eq!(r.len(), 2); + } - #[test] - fn should_add_transactions() { - // given - let queue = ConfirmationsQueue::default(); - let request = request(); + #[test] + fn should_add_transactions() { + // given + let queue = ConfirmationsQueue::default(); + let request = request(); - // when - let _future = queue.add_request(request.clone(), Default::default()).unwrap(); - let all = queue.requests(); + // when + let _future = queue + .add_request(request.clone(), Default::default()) + .unwrap(); + let all = queue.requests(); - // then - assert_eq!(all.len(), 1); - let el = all.get(0).unwrap(); - assert_eq!(el.id, U256::from(1)); - assert_eq!(el.payload, request); - } + // then + assert_eq!(all.len(), 1); + let el = all.get(0).unwrap(); + assert_eq!(el.id, U256::from(1)); + assert_eq!(el.payload, request); + } } - diff --git a/rpc/src/v1/helpers/fake_sign.rs b/rpc/src/v1/helpers/fake_sign.rs index d93408b89..5867bc218 100644 --- a/rpc/src/v1/helpers/fake_sign.rs +++ b/rpc/src/v1/helpers/fake_sign.rs @@ -14,24 +14,25 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use types::transaction::{Transaction, SignedTransaction, Action}; use std::cmp::min; +use types::transaction::{Action, SignedTransaction, Transaction}; use ethereum_types::U256; use jsonrpc_core::Error; use v1::helpers::CallRequest; pub fn sign_call(request: CallRequest) -> Result { - let max_gas = U256::from(500_000_000); - let gas = min(request.gas.unwrap_or(max_gas), max_gas); - let from = request.from.unwrap_or_default(); + let max_gas = U256::from(500_000_000); + let gas = min(request.gas.unwrap_or(max_gas), max_gas); + let from = request.from.unwrap_or_default(); - Ok(Transaction { - nonce: request.nonce.unwrap_or_default(), - action: request.to.map_or(Action::Create, Action::Call), - gas, - gas_price: request.gas_price.unwrap_or_default(), - value: request.value.unwrap_or_default(), - data: request.data.unwrap_or_default(), - }.fake_sign(from)) + Ok(Transaction { + nonce: request.nonce.unwrap_or_default(), + action: request.to.map_or(Action::Create, Action::Call), + gas, + gas_price: request.gas_price.unwrap_or_default(), + value: request.value.unwrap_or_default(), + data: request.data.unwrap_or_default(), + } + .fake_sign(from)) } diff --git a/rpc/src/v1/helpers/ipfs.rs b/rpc/src/v1/helpers/ipfs.rs index 93110dbf3..34a09a04c 100644 --- a/rpc/src/v1/helpers/ipfs.rs +++ b/rpc/src/v1/helpers/ipfs.rs @@ -16,17 +16,17 @@ //! IPFS utility functions -use multihash; +use super::errors; use cid::{Cid, Codec, Version}; use crypto::digest; use jsonrpc_core::Error; +use multihash; use v1::types::Bytes; -use super::errors; /// Compute CIDv0 from protobuf encoded bytes. pub fn cid(content: Bytes) -> Result { - let hash = digest::sha256(&content.0); - let mh = multihash::encode(multihash::Hash::SHA2256, &*hash).map_err(errors::encoding)?; - let cid = Cid::new(Codec::DagProtobuf, Version::V0, &mh); - Ok(cid.to_string()) + let hash = digest::sha256(&content.0); + let mh = multihash::encode(multihash::Hash::SHA2256, &*hash).map_err(errors::encoding)?; + let cid = Cid::new(Codec::DagProtobuf, Version::V0, &mh); + Ok(cid.to_string()) } diff --git a/rpc/src/v1/helpers/light_fetch.rs b/rpc/src/v1/helpers/light_fetch.rs index 819f56645..29aeda5cb 100644 --- a/rpc/src/v1/helpers/light_fetch.rs +++ b/rpc/src/v1/helpers/light_fetch.rs @@ -16,593 +16,717 @@ //! Helpers for fetching blockchain data either from the light client or the network. -use std::clone::Clone; -use std::cmp; -use std::collections::BTreeMap; -use std::sync::Arc; +use std::{clone::Clone, cmp, collections::BTreeMap, sync::Arc}; -use types::basic_account::BasicAccount; -use types::encoded; -use types::filter::Filter as EthcoreFilter; -use types::ids::BlockId; -use types::receipt::Receipt; use ethcore::executed::ExecutionError; - -use jsonrpc_core::{Result, Error}; -use jsonrpc_core::futures::{future, Future}; -use jsonrpc_core::futures::future::Either; - -use light::cache::Cache; -use light::client::LightChainClient; -use light::{cht, MAX_HEADERS_PER_REQUEST}; -use light::on_demand::{ - request, OnDemandRequester, HeaderRef, Request as OnDemandRequest, - Response as OnDemandResponse, ExecutionResult, +use types::{ + basic_account::BasicAccount, encoded, filter::Filter as EthcoreFilter, ids::BlockId, + receipt::Receipt, }; -use light::on_demand::error::Error as OnDemandError; -use light::request::Field; -use light::TransactionQueue; -use sync::{LightNetworkDispatcher, ManageNetwork, LightSyncProvider}; +use jsonrpc_core::{ + futures::{future, future::Either, Future}, + Error, Result, +}; + +use light::{ + cache::Cache, + cht, + client::LightChainClient, + on_demand::{ + error::Error as OnDemandError, request, ExecutionResult, HeaderRef, OnDemandRequester, + Request as OnDemandRequest, Response as OnDemandResponse, + }, + request::Field, + TransactionQueue, MAX_HEADERS_PER_REQUEST, +}; + +use sync::{LightNetworkDispatcher, LightSyncProvider, ManageNetwork}; use ethereum_types::{Address, U256}; +use fastmap::H256FastMap; use hash::H256; use parking_lot::{Mutex, RwLock}; -use fastmap::H256FastMap; -use types::transaction::{Action, Transaction as EthTransaction, PendingTransaction, SignedTransaction, LocalizedTransaction}; +use types::transaction::{ + Action, LocalizedTransaction, PendingTransaction, SignedTransaction, + Transaction as EthTransaction, +}; -use v1::helpers::{CallRequest as CallRequestHelper, errors, dispatch}; -use v1::types::{BlockNumber, CallRequest, Log, Transaction}; +use v1::{ + helpers::{dispatch, errors, CallRequest as CallRequestHelper}, + types::{BlockNumber, CallRequest, Log, Transaction}, +}; -const NO_INVALID_BACK_REFS_PROOF: &str = "Fails only on invalid back-references; back-references here known to be valid; qed"; -const WRONG_RESPONSE_AMOUNT_TYPE_PROOF: &str = "responses correspond directly with requests in amount and type; qed"; +const NO_INVALID_BACK_REFS_PROOF: &str = + "Fails only on invalid back-references; back-references here known to be valid; qed"; +const WRONG_RESPONSE_AMOUNT_TYPE_PROOF: &str = + "responses correspond directly with requests in amount and type; qed"; const DEFAULT_GAS_PRICE: u64 = 21_000; -pub fn light_all_transactions(dispatch: &Arc>) -> impl Iterator +pub fn light_all_transactions( + dispatch: &Arc>, +) -> impl Iterator where - S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, - OD: OnDemandRequester + 'static + S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, + OD: OnDemandRequester + 'static, { - let txq = dispatch.transaction_queue.read(); - let chain_info = dispatch.client.chain_info(); + let txq = dispatch.transaction_queue.read(); + let chain_info = dispatch.client.chain_info(); - let current = txq.ready_transactions(chain_info.best_block_number, chain_info.best_block_timestamp); - let future = txq.future_transactions(chain_info.best_block_number, chain_info.best_block_timestamp); - current.into_iter().chain(future.into_iter()) + let current = txq.ready_transactions( + chain_info.best_block_number, + chain_info.best_block_timestamp, + ); + let future = txq.future_transactions( + chain_info.best_block_number, + chain_info.best_block_timestamp, + ); + current.into_iter().chain(future.into_iter()) } /// Helper for fetching blockchain data either from the light client or the network /// as necessary. pub struct LightFetch where - S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, - OD: OnDemandRequester + 'static + S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, + OD: OnDemandRequester + 'static, { - /// The light client. - pub client: Arc, - /// The on-demand request service. - pub on_demand: Arc, - /// Handle to the network. - pub sync: Arc, - /// The light data cache. - pub cache: Arc>, - /// Gas Price percentile - pub gas_price_percentile: usize, + /// The light client. + pub client: Arc, + /// The on-demand request service. + pub on_demand: Arc, + /// Handle to the network. + pub sync: Arc, + /// The light data cache. + pub cache: Arc>, + /// Gas Price percentile + pub gas_price_percentile: usize, } impl Clone for LightFetch where - S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, - OD: OnDemandRequester + 'static + S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, + OD: OnDemandRequester + 'static, { - fn clone(&self) -> Self { - Self { - client: self.client.clone(), - on_demand: self.on_demand.clone(), - sync: self.sync.clone(), - cache: self.cache.clone(), - gas_price_percentile: self.gas_price_percentile, - } - } + fn clone(&self) -> Self { + Self { + client: self.client.clone(), + on_demand: self.on_demand.clone(), + sync: self.sync.clone(), + cache: self.cache.clone(), + gas_price_percentile: self.gas_price_percentile, + } + } } - /// Extract a transaction at given index. pub fn extract_transaction_at_index(block: encoded::Block, index: usize) -> Option { - block.transactions().into_iter().nth(index) - // Verify if transaction signature is correct. - .and_then(|tx| SignedTransaction::new(tx).ok()) - .map(|signed_tx| { - let (signed, sender, _) = signed_tx.deconstruct(); - let block_hash = block.hash(); - let block_number = block.number(); - let transaction_index = index; - let cached_sender = Some(sender); + block + .transactions() + .into_iter() + .nth(index) + // Verify if transaction signature is correct. + .and_then(|tx| SignedTransaction::new(tx).ok()) + .map(|signed_tx| { + let (signed, sender, _) = signed_tx.deconstruct(); + let block_hash = block.hash(); + let block_number = block.number(); + let transaction_index = index; + let cached_sender = Some(sender); - LocalizedTransaction { - signed, - block_number, - block_hash, - transaction_index, - cached_sender, - } - }) - .map(Transaction::from_localized) + LocalizedTransaction { + signed, + block_number, + block_hash, + transaction_index, + cached_sender, + } + }) + .map(Transaction::from_localized) } // extract the header indicated by the given `HeaderRef` from the given responses. // fails only if they do not correspond. fn extract_header(res: &[OnDemandResponse], header: HeaderRef) -> Option { - match header { - HeaderRef::Stored(hdr) => Some(hdr), - HeaderRef::Unresolved(idx, _) => match res.get(idx) { - Some(&OnDemandResponse::HeaderByHash(ref hdr)) => Some(hdr.clone()), - _ => None, - }, - } + match header { + HeaderRef::Stored(hdr) => Some(hdr), + HeaderRef::Unresolved(idx, _) => match res.get(idx) { + Some(&OnDemandResponse::HeaderByHash(ref hdr)) => Some(hdr.clone()), + _ => None, + }, + } } impl LightFetch where - S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, - OD: OnDemandRequester + 'static + S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, + OD: OnDemandRequester + 'static, { - // push the necessary requests onto the request chain to get the header by the given ID. - // yield a header reference which other requests can use. - fn make_header_requests(&self, id: BlockId, reqs: &mut Vec) -> Result { - if let Some(h) = self.client.block_header(id) { - return Ok(h.into()); - } + // push the necessary requests onto the request chain to get the header by the given ID. + // yield a header reference which other requests can use. + fn make_header_requests( + &self, + id: BlockId, + reqs: &mut Vec, + ) -> Result { + if let Some(h) = self.client.block_header(id) { + return Ok(h.into()); + } - match id { - BlockId::Number(n) => { - let cht_root = cht::block_to_cht_number(n).and_then(|cn| self.client.cht_root(cn as usize)); - match cht_root { - None => Err(errors::unknown_block()), - Some(root) => { - let req = request::HeaderProof::new(n, root) + match id { + BlockId::Number(n) => { + let cht_root = + cht::block_to_cht_number(n).and_then(|cn| self.client.cht_root(cn as usize)); + match cht_root { + None => Err(errors::unknown_block()), + Some(root) => { + let req = request::HeaderProof::new(n, root) .expect("only fails for 0; client always stores genesis; client already queried; qed"); - let idx = reqs.len(); - let hash_ref = Field::back_ref(idx, 0); - reqs.push(req.into()); - reqs.push(request::HeaderByHash(hash_ref).into()); + let idx = reqs.len(); + let hash_ref = Field::back_ref(idx, 0); + reqs.push(req.into()); + reqs.push(request::HeaderByHash(hash_ref).into()); - Ok(HeaderRef::Unresolved(idx + 1, hash_ref)) - } - } - } - BlockId::Hash(h) => { - let idx = reqs.len(); - reqs.push(request::HeaderByHash(h.into()).into()); - Ok(HeaderRef::Unresolved(idx, h.into())) - } - _ => Err(errors::unknown_block()) // latest, earliest, and pending will have all already returned. - } - } + Ok(HeaderRef::Unresolved(idx + 1, hash_ref)) + } + } + } + BlockId::Hash(h) => { + let idx = reqs.len(); + reqs.push(request::HeaderByHash(h.into()).into()); + Ok(HeaderRef::Unresolved(idx, h.into())) + } + _ => Err(errors::unknown_block()), // latest, earliest, and pending will have all already returned. + } + } - /// Get a block header from the on demand service or client, or error. - pub fn header(&self, id: BlockId) -> impl Future + Send { - let mut reqs = Vec::new(); - let header_ref = match self.make_header_requests(id, &mut reqs) { - Ok(r) => r, - Err(e) => return Either::A(future::err(e)), - }; + /// Get a block header from the on demand service or client, or error. + pub fn header(&self, id: BlockId) -> impl Future + Send { + let mut reqs = Vec::new(); + let header_ref = match self.make_header_requests(id, &mut reqs) { + Ok(r) => r, + Err(e) => return Either::A(future::err(e)), + }; - Either::B(self.send_requests(reqs, |res| - extract_header(&res, header_ref) - .expect("these responses correspond to requests that header_ref belongs to \ - therefore it will not fail; qed") - )) - } + Either::B(self.send_requests(reqs, |res| { + extract_header(&res, header_ref).expect( + "these responses correspond to requests that header_ref belongs to \ + therefore it will not fail; qed", + ) + })) + } - /// Helper for getting contract code at a given block. - pub fn code(&self, address: Address, id: BlockId) -> impl Future, Error = Error> + Send { - let mut reqs = Vec::new(); - let header_ref = match self.make_header_requests(id, &mut reqs) { - Ok(r) => r, - Err(e) => return Either::A(future::err(e)), - }; + /// Helper for getting contract code at a given block. + pub fn code( + &self, + address: Address, + id: BlockId, + ) -> impl Future, Error = Error> + Send { + let mut reqs = Vec::new(); + let header_ref = match self.make_header_requests(id, &mut reqs) { + Ok(r) => r, + Err(e) => return Either::A(future::err(e)), + }; - reqs.push(request::Account { header: header_ref.clone(), address }.into()); - let account_idx = reqs.len() - 1; - reqs.push(request::Code { header: header_ref, code_hash: Field::back_ref(account_idx, 0) }.into()); + reqs.push( + request::Account { + header: header_ref.clone(), + address, + } + .into(), + ); + let account_idx = reqs.len() - 1; + reqs.push( + request::Code { + header: header_ref, + code_hash: Field::back_ref(account_idx, 0), + } + .into(), + ); - Either::B(self.send_requests(reqs, |mut res| match res.pop() { - Some(OnDemandResponse::Code(code)) => code, - _ => panic!(WRONG_RESPONSE_AMOUNT_TYPE_PROOF), - })) - } + Either::B(self.send_requests(reqs, |mut res| match res.pop() { + Some(OnDemandResponse::Code(code)) => code, + _ => panic!(WRONG_RESPONSE_AMOUNT_TYPE_PROOF), + })) + } - /// Helper for getting account info at a given block. - /// `None` indicates the account doesn't exist at the given block. - pub fn account( - &self, - address: Address, - id: BlockId, - tx_queue: Arc> - ) -> impl Future, Error = Error> + Send { + /// Helper for getting account info at a given block. + /// `None` indicates the account doesn't exist at the given block. + pub fn account( + &self, + address: Address, + id: BlockId, + tx_queue: Arc>, + ) -> impl Future, Error = Error> + Send { + let mut reqs = Vec::new(); + let header_ref = match self.make_header_requests(id, &mut reqs) { + Ok(r) => r, + Err(e) => return Either::A(future::err(e)), + }; - let mut reqs = Vec::new(); - let header_ref = match self.make_header_requests(id, &mut reqs) { - Ok(r) => r, - Err(e) => return Either::A(future::err(e)), - }; + reqs.push( + request::Account { + header: header_ref, + address, + } + .into(), + ); - reqs.push(request::Account { header: header_ref, address }.into()); + Either::B(self.send_requests(reqs, move |mut res| match res.pop() { + Some(OnDemandResponse::Account(maybe_account)) => { + if let Some(ref acc) = maybe_account { + let mut txq = tx_queue.write(); + txq.cull(address, acc.nonce); + } + maybe_account + } + _ => panic!(WRONG_RESPONSE_AMOUNT_TYPE_PROOF), + })) + } - Either::B(self.send_requests(reqs, move |mut res| match res.pop() { - Some(OnDemandResponse::Account(maybe_account)) => { - if let Some(ref acc) = maybe_account { - let mut txq = tx_queue.write(); - txq.cull(address, acc.nonce); - } - maybe_account - } - _ => panic!(WRONG_RESPONSE_AMOUNT_TYPE_PROOF), - })) - } + /// Helper for getting proved execution. + pub fn proved_read_only_execution( + &self, + req: CallRequest, + num: Option, + txq: Arc>, + ) -> impl Future + Send { + // (21000 G_transaction + 32000 G_create + some marginal to allow a few operations) + const START_GAS: u64 = 60_000; - /// Helper for getting proved execution. - pub fn proved_read_only_execution( - &self, - req: CallRequest, - num: Option, - txq: Arc> - ) -> impl Future + Send { + let (sync, on_demand, client) = ( + self.sync.clone(), + self.on_demand.clone(), + self.client.clone(), + ); + let req: CallRequestHelper = req.into(); - // (21000 G_transaction + 32000 G_create + some marginal to allow a few operations) - const START_GAS: u64 = 60_000; + // Note: Here we treat `Pending` as `Latest`. + // Since light clients don't produce pending blocks + // (they don't have state) we can safely fallback to `Latest`. + let id = match num.unwrap_or_default() { + BlockNumber::Num(n) => BlockId::Number(n), + BlockNumber::Earliest => BlockId::Earliest, + BlockNumber::Latest => BlockId::Latest, + BlockNumber::Pending => { + warn!("`Pending` is deprecated and may be removed in future versions. Falling back to `Latest`"); + BlockId::Latest + } + }; - let (sync, on_demand, client) = (self.sync.clone(), self.on_demand.clone(), self.client.clone()); - let req: CallRequestHelper = req.into(); + let from = req.from.unwrap_or_default(); + let nonce_fut = match req.nonce { + Some(nonce) => Either::A(future::ok(Some(nonce))), + None => Either::B(self.account(from, id, txq).map(|acc| acc.map(|a| a.nonce))), + }; - // Note: Here we treat `Pending` as `Latest`. - // Since light clients don't produce pending blocks - // (they don't have state) we can safely fallback to `Latest`. - let id = match num.unwrap_or_default() { - BlockNumber::Num(n) => BlockId::Number(n), - BlockNumber::Earliest => BlockId::Earliest, - BlockNumber::Latest => BlockId::Latest, - BlockNumber::Pending => { - warn!("`Pending` is deprecated and may be removed in future versions. Falling back to `Latest`"); - BlockId::Latest - } - }; + let gas_price_fut = match req.gas_price { + Some(price) => Either::A(future::ok(price)), + None => Either::B(self.gas_price()), + }; - let from = req.from.unwrap_or_default(); - let nonce_fut = match req.nonce { - Some(nonce) => Either::A(future::ok(Some(nonce))), - None => Either::B(self.account(from, id, txq).map(|acc| acc.map(|a| a.nonce))), - }; + // if nonce resolves, this should too since it'll be in the LRU-cache. + let header_fut = self.header(id); - let gas_price_fut = match req.gas_price { - Some(price) => Either::A(future::ok(price)), - None => Either::B(self.gas_price()), - }; + // fetch missing transaction fields from the network. + Box::new( + nonce_fut + .join(gas_price_fut) + .and_then(move |(nonce, gas_price)| { + future::done(Ok(( + req.gas.is_some(), + EthTransaction { + nonce: nonce.unwrap_or_default(), + action: req.to.map_or(Action::Create, Action::Call), + gas: req.gas.unwrap_or_else(|| START_GAS.into()), + gas_price, + value: req.value.unwrap_or_default(), + data: req.data.unwrap_or_default(), + }, + ))) + }) + .join(header_fut) + .and_then(move |((gas_known, tx), hdr)| { + // then request proved execution. + // TODO: get last-hashes from network. + let hash = hdr.hash(); + let env_info = match client.env_info(BlockId::Hash(hash)) { + Some(env_info) => env_info, + _ => return Either::A(future::err(errors::unknown_block())), + }; - // if nonce resolves, this should too since it'll be in the LRU-cache. - let header_fut = self.header(id); + Either::B(execute_read_only_tx( + gas_known, + ExecuteParams { + from, + tx, + hdr, + env_info, + engine: client.engine().clone(), + on_demand, + sync, + }, + )) + }), + ) + } - // fetch missing transaction fields from the network. - Box::new(nonce_fut.join(gas_price_fut).and_then(move |(nonce, gas_price)| { - future::done( - Ok((req.gas.is_some(), EthTransaction { - nonce: nonce.unwrap_or_default(), - action: req.to.map_or(Action::Create, Action::Call), - gas: req.gas.unwrap_or_else(|| START_GAS.into()), - gas_price, - value: req.value.unwrap_or_default(), - data: req.data.unwrap_or_default(), - })) - ) - }).join(header_fut).and_then(move |((gas_known, tx), hdr)| { - // then request proved execution. - // TODO: get last-hashes from network. - let hash = hdr.hash(); - let env_info = match client.env_info(BlockId::Hash(hash)) { - Some(env_info) => env_info, - _ => return Either::A(future::err(errors::unknown_block())), - }; + /// Helper to fetch the corpus gas price from 1) the cache 2) the network then it tries to estimate the percentile + /// using `gas_price_percentile` if the estimated percentile is zero the `DEFAULT_GAS_PRICE` is returned + pub fn gas_price(&self) -> impl Future + Send { + let gas_price_percentile = self.gas_price_percentile; - Either::B(execute_read_only_tx(gas_known, ExecuteParams { - from, - tx, - hdr, - env_info, - engine: client.engine().clone(), - on_demand, - sync, - })) - })) - } + dispatch::light::fetch_gas_price_corpus( + self.sync.clone(), + self.client.clone(), + self.on_demand.clone(), + self.cache.clone(), + ) + .map(move |corp| { + corp.percentile(gas_price_percentile) + .map_or_else(|| DEFAULT_GAS_PRICE.into(), |percentile| *percentile) + }) + } - /// Helper to fetch the corpus gas price from 1) the cache 2) the network then it tries to estimate the percentile - /// using `gas_price_percentile` if the estimated percentile is zero the `DEFAULT_GAS_PRICE` is returned - pub fn gas_price(&self) -> impl Future + Send { - let gas_price_percentile = self.gas_price_percentile; + /// Get a block itself. Fails on unknown block ID. + pub fn block(&self, id: BlockId) -> impl Future + Send { + let mut reqs = Vec::new(); + let header_ref = match self.make_header_requests(id, &mut reqs) { + Ok(r) => r, + Err(e) => return Either::A(future::err(e)), + }; - dispatch::light::fetch_gas_price_corpus( - self.sync.clone(), - self.client.clone(), - self.on_demand.clone(), - self.cache.clone(), - ) - .map(move |corp| { - corp.percentile(gas_price_percentile) - .map_or_else(|| DEFAULT_GAS_PRICE.into(), |percentile| *percentile) - }) - } + reqs.push(request::Body(header_ref).into()); - /// Get a block itself. Fails on unknown block ID. - pub fn block(&self, id: BlockId) -> impl Future + Send { - let mut reqs = Vec::new(); - let header_ref = match self.make_header_requests(id, &mut reqs) { - Ok(r) => r, - Err(e) => return Either::A(future::err(e)), - }; + Either::B(self.send_requests(reqs, |mut res| match res.pop() { + Some(OnDemandResponse::Body(b)) => b, + _ => panic!(WRONG_RESPONSE_AMOUNT_TYPE_PROOF), + })) + } - reqs.push(request::Body(header_ref).into()); + /// Get the block receipts. Fails on unknown block ID. + pub fn receipts(&self, id: BlockId) -> impl Future, Error = Error> + Send { + let mut reqs = Vec::new(); + let header_ref = match self.make_header_requests(id, &mut reqs) { + Ok(r) => r, + Err(e) => return Either::A(future::err(e)), + }; - Either::B(self.send_requests(reqs, |mut res| match res.pop() { - Some(OnDemandResponse::Body(b)) => b, - _ => panic!(WRONG_RESPONSE_AMOUNT_TYPE_PROOF), - })) - } + reqs.push(request::BlockReceipts(header_ref).into()); - /// Get the block receipts. Fails on unknown block ID. - pub fn receipts(&self, id: BlockId) -> impl Future, Error = Error> + Send { - let mut reqs = Vec::new(); - let header_ref = match self.make_header_requests(id, &mut reqs) { - Ok(r) => r, - Err(e) => return Either::A(future::err(e)), - }; + Either::B(self.send_requests(reqs, |mut res| match res.pop() { + Some(OnDemandResponse::Receipts(b)) => b, + _ => panic!(WRONG_RESPONSE_AMOUNT_TYPE_PROOF), + })) + } - reqs.push(request::BlockReceipts(header_ref).into()); + pub fn logs_no_tx_hash( + &self, + filter: EthcoreFilter, + ) -> impl Future, Error = Error> + Send { + use jsonrpc_core::futures::stream::{self, Stream}; - Either::B(self.send_requests(reqs, |mut res| match res.pop() { - Some(OnDemandResponse::Receipts(b)) => b, - _ => panic!(WRONG_RESPONSE_AMOUNT_TYPE_PROOF), - })) - } + const MAX_BLOCK_RANGE: u64 = 1000; - pub fn logs_no_tx_hash(&self, filter: EthcoreFilter) -> impl Future, Error = Error> + Send { - use jsonrpc_core::futures::stream::{self, Stream}; + let fetcher = self.clone(); + self.headers_range_by_block_id(filter.from_block, filter.to_block, MAX_BLOCK_RANGE) + .and_then(move |mut headers| { + if headers.is_empty() { + return Either::A(future::ok(Vec::new())); + } - const MAX_BLOCK_RANGE: u64 = 1000; + let on_demand = &fetcher.on_demand; - let fetcher = self.clone(); - self.headers_range_by_block_id(filter.from_block, filter.to_block, MAX_BLOCK_RANGE) - .and_then(move |mut headers| { - if headers.is_empty() { - return Either::A(future::ok(Vec::new())); - } + let maybe_future = fetcher.sync.with_context(move |ctx| { + // find all headers which match the filter, and fetch the receipts for each one. + // match them with their numbers for easy sorting later. + let bit_combos = filter.bloom_possibilities(); + let receipts_futures: Vec<_> = headers + .drain(..) + .filter(|ref hdr| { + let hdr_bloom = hdr.log_bloom(); + bit_combos + .iter() + .any(|bloom| hdr_bloom.contains_bloom(bloom)) + }) + .map(|hdr| (hdr.number(), hdr.hash(), request::BlockReceipts(hdr.into()))) + .map(|(num, hash, req)| { + on_demand + .request(ctx, req) + .expect(NO_INVALID_BACK_REFS_PROOF) + .map(move |x| (num, hash, x)) + }) + .collect(); - let on_demand = &fetcher.on_demand; + // as the receipts come in, find logs within them which match the filter. + // insert them into a BTreeMap to maintain order by number and block index. + stream::futures_unordered(receipts_futures) + .fold( + BTreeMap::new(), + move |mut matches, (num, hash, receipts)| { + let mut block_index: usize = 0; + for (transaction_index, receipt) in receipts.into_iter().enumerate() + { + for (transaction_log_index, log) in + receipt.logs.into_iter().enumerate() + { + if filter.matches(&log) { + matches.insert( + (num, block_index), + Log { + address: log.address, + topics: log + .topics + .into_iter() + .map(Into::into) + .collect(), + data: log.data.into(), + block_hash: Some(hash), + block_number: Some(num.into()), + // No way to easily retrieve transaction hash, so let's just skip it. + transaction_hash: None, + transaction_index: Some( + transaction_index.into(), + ), + log_index: Some(block_index.into()), + transaction_log_index: Some( + transaction_log_index.into(), + ), + log_type: "mined".into(), + removed: false, + }, + ); + } + block_index += 1; + } + } + future::ok::<_, OnDemandError>(matches) + }, + ) + .map_err(errors::on_demand_error) + .map(|matches| matches.into_iter().map(|(_, v)| v).collect()) + }); - let maybe_future = fetcher.sync.with_context(move |ctx| { - // find all headers which match the filter, and fetch the receipts for each one. - // match them with their numbers for easy sorting later. - let bit_combos = filter.bloom_possibilities(); - let receipts_futures: Vec<_> = headers.drain(..) - .filter(|ref hdr| { - let hdr_bloom = hdr.log_bloom(); - bit_combos.iter().any(|bloom| hdr_bloom.contains_bloom(bloom)) - }) - .map(|hdr| (hdr.number(), hdr.hash(), request::BlockReceipts(hdr.into()))) - .map(|(num, hash, req)| on_demand.request(ctx, req).expect(NO_INVALID_BACK_REFS_PROOF).map(move |x| (num, hash, x))) - .collect(); + match maybe_future { + Some(fut) => Either::B(Either::A(fut)), + None => Either::B(Either::B(future::err(errors::network_disabled()))), + } + }) + } - // as the receipts come in, find logs within them which match the filter. - // insert them into a BTreeMap to maintain order by number and block index. - stream::futures_unordered(receipts_futures) - .fold(BTreeMap::new(), move |mut matches, (num, hash, receipts)| { - let mut block_index: usize = 0; - for (transaction_index, receipt) in receipts.into_iter().enumerate() { - for (transaction_log_index, log) in receipt.logs.into_iter().enumerate() { - if filter.matches(&log) { - matches.insert((num, block_index), Log { - address: log.address, - topics: log.topics.into_iter().map(Into::into).collect(), - data: log.data.into(), - block_hash: Some(hash), - block_number: Some(num.into()), - // No way to easily retrieve transaction hash, so let's just skip it. - transaction_hash: None, - transaction_index: Some(transaction_index.into()), - log_index: Some(block_index.into()), - transaction_log_index: Some(transaction_log_index.into()), - log_type: "mined".into(), - removed: false, - }); - } - block_index += 1; - } - } - future::ok::<_, OnDemandError>(matches) - }) - .map_err(errors::on_demand_error) - .map(|matches| matches.into_iter().map(|(_, v)| v).collect()) - }); + /// Get transaction logs + pub fn logs( + &self, + filter: EthcoreFilter, + ) -> impl Future, Error = Error> + Send { + use jsonrpc_core::futures::stream::{self, Stream}; + let fetcher_block = self.clone(); + self.logs_no_tx_hash(filter) + // retrieve transaction hash. + .and_then(move |mut result| { + let mut blocks = BTreeMap::new(); + for log in result.iter() { + let block_hash = log + .block_hash + .as_ref() + .expect("Previously initialized with value; qed"); + blocks + .entry(*block_hash) + .or_insert_with(|| fetcher_block.block(BlockId::Hash(*block_hash))); + } + // future get blocks (unordered it) + stream::futures_unordered(blocks.into_iter().map(|(_, v)| v)) + .collect() + .map(move |blocks| { + let transactions_per_block: BTreeMap<_, _> = blocks + .iter() + .map(|block| (block.hash(), block.transactions())) + .collect(); + for log in result.iter_mut() { + let log_index = log + .transaction_index + .expect("Previously initialized with value; qed"); + let block_hash = log + .block_hash + .expect("Previously initialized with value; qed"); + let tx_hash = transactions_per_block + .get(&block_hash) + // transaction index is from an enumerate call in log common so not need to check value + .and_then(|txs| txs.get(log_index.as_usize())) + .map(types::transaction::UnverifiedTransaction::hash); + log.transaction_hash = tx_hash; + } + result + }) + }) + } - match maybe_future { - Some(fut) => Either::B(Either::A(fut)), - None => Either::B(Either::B(future::err(errors::network_disabled()))), - } - }) - } + // Get a transaction by hash. also returns the index in the block. + // Only returns transactions in the canonical chain. + pub fn transaction_by_hash( + &self, + tx_hash: H256, + ) -> impl Future, Error = Error> + Send { + let params = (self.sync.clone(), self.on_demand.clone()); + let fetcher: Self = self.clone(); - /// Get transaction logs - pub fn logs(&self, filter: EthcoreFilter) -> impl Future, Error = Error> + Send { - use jsonrpc_core::futures::stream::{self, Stream}; - let fetcher_block = self.clone(); - self.logs_no_tx_hash(filter) - // retrieve transaction hash. - .and_then(move |mut result| { - let mut blocks = BTreeMap::new(); - for log in result.iter() { - let block_hash = log.block_hash.as_ref().expect("Previously initialized with value; qed"); - blocks.entry(*block_hash).or_insert_with(|| { - fetcher_block.block(BlockId::Hash(*block_hash)) - }); - } - // future get blocks (unordered it) - stream::futures_unordered(blocks.into_iter().map(|(_, v)| v)).collect().map(move |blocks| { - let transactions_per_block: BTreeMap<_, _> = blocks.iter() - .map(|block| (block.hash(), block.transactions())).collect(); - for log in result.iter_mut() { - let log_index = log.transaction_index.expect("Previously initialized with value; qed"); - let block_hash = log.block_hash.expect("Previously initialized with value; qed"); - let tx_hash = transactions_per_block.get(&block_hash) - // transaction index is from an enumerate call in log common so not need to check value - .and_then(|txs| txs.get(log_index.as_usize())) - .map(types::transaction::UnverifiedTransaction::hash); - log.transaction_hash = tx_hash; - } - result - }) - }) - } + Box::new(future::loop_fn(params, move |(sync, on_demand)| { + let maybe_future = sync.with_context(|ctx| { + let req = request::TransactionIndex(tx_hash.into()); + on_demand.request(ctx, req) + }); - // Get a transaction by hash. also returns the index in the block. - // Only returns transactions in the canonical chain. - pub fn transaction_by_hash(&self, tx_hash: H256) - -> impl Future, Error = Error> + Send - { - let params = (self.sync.clone(), self.on_demand.clone()); - let fetcher: Self = self.clone(); + let eventual_index = match maybe_future { + Some(e) => e + .expect(NO_INVALID_BACK_REFS_PROOF) + .map_err(errors::on_demand_error), + None => return Either::A(future::err(errors::network_disabled())), + }; - Box::new(future::loop_fn(params, move |(sync, on_demand)| { - let maybe_future = sync.with_context(|ctx| { - let req = request::TransactionIndex(tx_hash.into()); - on_demand.request(ctx, req) - }); + let fetcher = fetcher.clone(); + let extract_transaction = eventual_index.and_then(move |index| { + // check that the block is known by number. + // that ensures that it is within the chain that we are aware of. + fetcher + .block(BlockId::Number(index.num)) + .then(move |blk| match blk { + Ok(blk) => { + // if the block is known by number, make sure the + // index from earlier isn't garbage. - let eventual_index = match maybe_future { - Some(e) => e.expect(NO_INVALID_BACK_REFS_PROOF).map_err(errors::on_demand_error), - None => return Either::A(future::err(errors::network_disabled())), - }; + if blk.hash() != index.hash { + // index is on a different chain from us. + return Ok(future::Loop::Continue((sync, on_demand))); + } - let fetcher = fetcher.clone(); - let extract_transaction = eventual_index.and_then(move |index| { - // check that the block is known by number. - // that ensures that it is within the chain that we are aware of. - fetcher.block(BlockId::Number(index.num)).then(move |blk| match blk { - Ok(blk) => { - // if the block is known by number, make sure the - // index from earlier isn't garbage. + let index = index.index as usize; + let transaction = extract_transaction_at_index(blk, index); - if blk.hash() != index.hash { - // index is on a different chain from us. - return Ok(future::Loop::Continue((sync, on_demand))) - } + if transaction.as_ref().map_or(true, |tx| tx.hash != tx_hash) { + // index is actively wrong: indicated block has + // fewer transactions than necessary or the transaction + // at that index had a different hash. + // TODO: punish peer/move into OnDemand somehow? + Ok(future::Loop::Continue((sync, on_demand))) + } else { + let transaction = transaction.map(move |tx| (tx, index)); + Ok(future::Loop::Break(transaction)) + } + } + Err(ref e) if e == &errors::unknown_block() => { + // block by number not in the canonical chain. + Ok(future::Loop::Break(None)) + } + Err(e) => Err(e), + }) + }); - let index = index.index as usize; - let transaction = extract_transaction_at_index(blk, index); + Either::B(extract_transaction) + })) + } - if transaction.as_ref().map_or(true, |tx| tx.hash != tx_hash) { - // index is actively wrong: indicated block has - // fewer transactions than necessary or the transaction - // at that index had a different hash. - // TODO: punish peer/move into OnDemand somehow? - Ok(future::Loop::Continue((sync, on_demand))) - } else { - let transaction = transaction.map(move |tx| (tx, index)); - Ok(future::Loop::Break(transaction)) - } - } - Err(ref e) if e == &errors::unknown_block() => { - // block by number not in the canonical chain. - Ok(future::Loop::Break(None)) - } - Err(e) => Err(e), - }) - }); + /// Helper to cull the `light` transaction queue of mined transactions + pub fn light_cull( + &self, + txq: Arc>, + ) -> impl Future + Send { + let senders = txq.read().queued_senders(); + if senders.is_empty() { + return Either::B(future::err(errors::internal( + "No pending local transactions", + "", + ))); + } - Either::B(extract_transaction) - })) - } + let sync = self.sync.clone(); + let on_demand = self.on_demand.clone(); + let best_header = self.client.best_block_header(); + let start_nonce = self + .client + .engine() + .account_start_nonce(best_header.number()); - /// Helper to cull the `light` transaction queue of mined transactions - pub fn light_cull(&self, txq: Arc>) -> impl Future + Send { - let senders = txq.read().queued_senders(); - if senders.is_empty() { - return Either::B(future::err(errors::internal("No pending local transactions", ""))); - } + let account_request = sync.with_context(move |ctx| { + // fetch the nonce of each sender in the queue. + let nonce_reqs = senders + .iter() + .map(|&address| request::Account { + header: best_header.clone().into(), + address, + }) + .collect::>(); - let sync = self.sync.clone(); - let on_demand = self.on_demand.clone(); - let best_header = self.client.best_block_header(); - let start_nonce = self.client.engine().account_start_nonce(best_header.number()); + // when they come in, update each sender to the new nonce. + on_demand + .request(ctx, nonce_reqs) + .expect(NO_INVALID_BACK_REFS_PROOF) + .map(move |accs| { + let mut txq = txq.write(); + accs.into_iter() + .map(|maybe_acc| maybe_acc.map_or(start_nonce, |acc| acc.nonce)) + .zip(senders) + .for_each(|(nonce, addr)| { + txq.cull(addr, nonce); + }); + }) + .map_err(errors::on_demand_error) + }); - let account_request = sync.with_context(move |ctx| { - // fetch the nonce of each sender in the queue. - let nonce_reqs = senders.iter() - .map(|&address| request::Account { header: best_header.clone().into(), address }) - .collect::>(); + if let Some(fut) = account_request { + Either::A(fut) + } else { + Either::B(future::err(errors::network_disabled())) + } + } - // when they come in, update each sender to the new nonce. - on_demand.request(ctx, nonce_reqs) - .expect(NO_INVALID_BACK_REFS_PROOF) - .map(move |accs| { - let mut txq = txq.write(); - accs.into_iter() - .map(|maybe_acc| maybe_acc.map_or(start_nonce, |acc| acc.nonce)) - .zip(senders) - .for_each(|(nonce, addr)| { - txq.cull(addr, nonce); - }); - }) - .map_err(errors::on_demand_error) - }); + fn send_requests( + &self, + reqs: Vec, + parse_response: F, + ) -> impl Future + Send + where + F: FnOnce(Vec) -> T + Send + 'static, + T: Send + 'static, + { + let maybe_future = self.sync.with_context(move |ctx| { + Box::new( + self.on_demand + .request_raw(ctx, reqs) + .expect(NO_INVALID_BACK_REFS_PROOF) + .map_err(errors::on_demand_cancel) + .and_then(|responses| match responses { + Ok(responses) => Ok(parse_response(responses)), + Err(e) => Err(errors::on_demand_error(e)), + }), + ) + }); - if let Some(fut) = account_request { - Either::A(fut) - } else { - Either::B(future::err(errors::network_disabled())) - } - } + match maybe_future { + Some(recv) => recv, + None => Box::new(future::err(errors::network_disabled())) + as Box + Send>, + } + } - fn send_requests(&self, reqs: Vec, parse_response: F) -> impl Future + Send where - F: FnOnce(Vec) -> T + Send + 'static, - T: Send + 'static, - { - let maybe_future = self.sync.with_context(move |ctx| { - Box::new(self.on_demand.request_raw(ctx, reqs) - .expect(NO_INVALID_BACK_REFS_PROOF) - .map_err(errors::on_demand_cancel) - .and_then(|responses| { - match responses { - Ok(responses) => Ok(parse_response(responses)), - Err(e) => Err(errors::on_demand_error(e)), - } - }) - ) - }); + fn headers_range_by_block_id( + &self, + from_block: BlockId, + to_block: BlockId, + max: u64, + ) -> impl Future, Error = Error> { + let fetch_hashes = [from_block, to_block] + .iter() + .filter_map(|block_id| match block_id { + BlockId::Hash(hash) => Some(*hash), + _ => None, + }) + .collect::>(); - match maybe_future { - Some(recv) => recv, - None => Box::new(future::err(errors::network_disabled())) as Box + Send> - } - } + let best_number = self.client.chain_info().best_block_number; - fn headers_range_by_block_id( - &self, - from_block: BlockId, - to_block: BlockId, - max: u64 - ) -> impl Future, Error = Error> { - let fetch_hashes = [from_block, to_block].iter() - .filter_map(|block_id| match block_id { - BlockId::Hash(hash) => Some(*hash), - _ => None, - }) - .collect::>(); - - let best_number = self.client.chain_info().best_block_number; - - let fetcher = self.clone(); - self.headers_by_hash(&fetch_hashes[..]).and_then(move |mut header_map| { + let fetcher = self.clone(); + self.headers_by_hash(&fetch_hashes[..]).and_then(move |mut header_map| { let (from_block_num, to_block_num) = { let block_number = |id| match id { BlockId::Earliest => 0, @@ -636,141 +760,159 @@ where } })) }) - } + } - fn headers_by_hash(&self, hashes: &[H256]) -> impl Future, Error = Error> { - let mut refs = H256FastMap::with_capacity_and_hasher(hashes.len(), Default::default()); - let mut reqs = Vec::with_capacity(hashes.len()); + fn headers_by_hash( + &self, + hashes: &[H256], + ) -> impl Future, Error = Error> { + let mut refs = H256FastMap::with_capacity_and_hasher(hashes.len(), Default::default()); + let mut reqs = Vec::with_capacity(hashes.len()); - for hash in hashes { - refs.entry(*hash).or_insert_with(|| { - self.make_header_requests(BlockId::Hash(*hash), &mut reqs) - .expect("make_header_requests never fails for BlockId::Hash; qed") - }); - } + for hash in hashes { + refs.entry(*hash).or_insert_with(|| { + self.make_header_requests(BlockId::Hash(*hash), &mut reqs) + .expect("make_header_requests never fails for BlockId::Hash; qed") + }); + } - self.send_requests(reqs, move |res| { - refs.into_iter().map(|(hash, header_ref)| { - let hdr = extract_header(&res, header_ref) - .expect("these responses correspond to requests that header_ref belongs to; \ - qed"); - (hash, hdr) - }) - .collect() - }) - } + self.send_requests(reqs, move |res| { + refs.into_iter() + .map(|(hash, header_ref)| { + let hdr = extract_header(&res, header_ref).expect( + "these responses correspond to requests that header_ref belongs to; \ + qed", + ); + (hash, hdr) + }) + .collect() + }) + } - fn headers_range( - &self, - from_number: u64, - to_number: u64, - to_header_hint: Option - ) -> impl Future, Error = Error> { - let range_length = (to_number - from_number + 1) as usize; - let mut headers: Vec = Vec::with_capacity(range_length); + fn headers_range( + &self, + from_number: u64, + to_number: u64, + to_header_hint: Option, + ) -> impl Future, Error = Error> { + let range_length = (to_number - from_number + 1) as usize; + let mut headers: Vec = Vec::with_capacity(range_length); - let iter_start = match to_header_hint { - Some(hdr) => { - let block_id = BlockId::Hash(hdr.parent_hash()); - headers.push(hdr); - block_id - } - None => BlockId::Number(to_number), - }; - headers.extend(self.client.ancestry_iter(iter_start) - .take_while(|hdr| hdr.number() >= from_number)); + let iter_start = match to_header_hint { + Some(hdr) => { + let block_id = BlockId::Hash(hdr.parent_hash()); + headers.push(hdr); + block_id + } + None => BlockId::Number(to_number), + }; + headers.extend( + self.client + .ancestry_iter(iter_start) + .take_while(|hdr| hdr.number() >= from_number), + ); - let fetcher = self.clone(); - future::loop_fn(headers, move |mut headers| { - let remaining = range_length - headers.len(); - if remaining == 0 { - return Either::A(future::ok(future::Loop::Break(headers))); - } + let fetcher = self.clone(); + future::loop_fn(headers, move |mut headers| { + let remaining = range_length - headers.len(); + if remaining == 0 { + return Either::A(future::ok(future::Loop::Break(headers))); + } - let mut reqs: Vec = Vec::with_capacity(2); + let mut reqs: Vec = Vec::with_capacity(2); - let start_hash = if let Some(hdr) = headers.last() { - hdr.parent_hash().into() - } else { - let cht_root = cht::block_to_cht_number(to_number) - .and_then(|cht_num| fetcher.client.cht_root(cht_num as usize)); + let start_hash = if let Some(hdr) = headers.last() { + hdr.parent_hash().into() + } else { + let cht_root = cht::block_to_cht_number(to_number) + .and_then(|cht_num| fetcher.client.cht_root(cht_num as usize)); - let cht_root = match cht_root { - Some(cht_root) => cht_root, - None => return Either::A(future::err(errors::unknown_block())), - }; + let cht_root = match cht_root { + Some(cht_root) => cht_root, + None => return Either::A(future::err(errors::unknown_block())), + }; - let header_proof = request::HeaderProof::new(to_number, cht_root) - .expect("HeaderProof::new is Some(_) if cht::block_to_cht_number() is Some(_); \ - this would return above if block_to_cht_number returned None; qed"); + let header_proof = request::HeaderProof::new(to_number, cht_root).expect( + "HeaderProof::new is Some(_) if cht::block_to_cht_number() is Some(_); \ + this would return above if block_to_cht_number returned None; qed", + ); - let idx = reqs.len(); - let hash_ref = Field::back_ref(idx, 0); - reqs.push(header_proof.into()); + let idx = reqs.len(); + let hash_ref = Field::back_ref(idx, 0); + reqs.push(header_proof.into()); - hash_ref - }; + hash_ref + }; - let max = cmp::min(remaining as u64, MAX_HEADERS_PER_REQUEST); - reqs.push(request::HeaderWithAncestors { - block_hash: start_hash, - ancestor_count: max - 1, - }.into()); + let max = cmp::min(remaining as u64, MAX_HEADERS_PER_REQUEST); + reqs.push( + request::HeaderWithAncestors { + block_hash: start_hash, + ancestor_count: max - 1, + } + .into(), + ); - Either::B(fetcher.send_requests(reqs, |mut res| { - match res.last_mut() { - Some(&mut OnDemandResponse::HeaderWithAncestors(ref mut res_headers)) => - headers.extend(res_headers.drain(..)), - _ => panic!("reqs has at least one entry; each request maps to a response; qed"), - }; - future::Loop::Continue(headers) - })) - }) - } + Either::B(fetcher.send_requests(reqs, |mut res| { + match res.last_mut() { + Some(&mut OnDemandResponse::HeaderWithAncestors(ref mut res_headers)) => { + headers.extend(res_headers.drain(..)) + } + _ => { + panic!("reqs has at least one entry; each request maps to a response; qed") + } + }; + future::Loop::Continue(headers) + })) + }) + } } struct ExecuteParams where - S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, - OD: OnDemandRequester + 'static + S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, + OD: OnDemandRequester + 'static, { - from: Address, - tx: EthTransaction, - hdr: encoded::Header, - env_info: ::vm::EnvInfo, - engine: Arc<::ethcore::engines::EthEngine>, - on_demand: Arc, - sync: Arc, + from: Address, + tx: EthTransaction, + hdr: encoded::Header, + env_info: ::vm::EnvInfo, + engine: Arc<::ethcore::engines::EthEngine>, + on_demand: Arc, + sync: Arc, } impl Clone for ExecuteParams where - S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, - OD: OnDemandRequester + 'static + S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, + OD: OnDemandRequester + 'static, { - fn clone(&self) -> Self { - Self { - from: self.from, - tx: self.tx.clone(), - hdr: self.hdr.clone(), - env_info: self.env_info.clone(), - engine: self.engine.clone(), - on_demand: self.on_demand.clone(), - sync: self.sync.clone() - } - } + fn clone(&self) -> Self { + Self { + from: self.from, + tx: self.tx.clone(), + hdr: self.hdr.clone(), + env_info: self.env_info.clone(), + engine: self.engine.clone(), + on_demand: self.on_demand.clone(), + sync: self.sync.clone(), + } + } } // Has a peer execute the transaction with given params. If `gas_known` is false, this will set the `gas value` to the // `required gas value` unless it exceeds the block gas limit -fn execute_read_only_tx(gas_known: bool, params: ExecuteParams) -> impl Future + Send +fn execute_read_only_tx( + gas_known: bool, + params: ExecuteParams, +) -> impl Future + Send where - S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, - OD: OnDemandRequester + 'static + S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, + OD: OnDemandRequester + 'static, { - if !gas_known { - Box::new(future::loop_fn(params, |mut params| { - execute_read_only_tx(true, params.clone()).and_then(move |res| { + if !gas_known { + Box::new(future::loop_fn(params, |mut params| { + execute_read_only_tx(true, params.clone()).and_then(move |res| { match res { Ok(executed) => { // `OutOfGas` exception, try double the gas @@ -803,29 +945,30 @@ where failed => Ok(future::Loop::Break(failed)), } }) - })) as Box + Send> - } else { - trace!(target: "light_fetch", "Placing execution request for {} gas in on_demand", + })) as Box + Send> + } else { + trace!(target: "light_fetch", "Placing execution request for {} gas in on_demand", params.tx.gas); - let request = request::TransactionProof { - tx: params.tx.fake_sign(params.from), - header: params.hdr.into(), - env_info: params.env_info, - engine: params.engine, - }; + let request = request::TransactionProof { + tx: params.tx.fake_sign(params.from), + header: params.hdr.into(), + env_info: params.env_info, + engine: params.engine, + }; - let on_demand = params.on_demand; - let proved_future = params.sync.with_context(move |ctx| { - on_demand - .request(ctx, request) - .expect("no back-references; therefore all back-refs valid; qed") - .map_err(errors::on_demand_error) - }); + let on_demand = params.on_demand; + let proved_future = params.sync.with_context(move |ctx| { + on_demand + .request(ctx, request) + .expect("no back-references; therefore all back-refs valid; qed") + .map_err(errors::on_demand_error) + }); - match proved_future { - Some(fut) => Box::new(fut) as Box + Send>, - None => Box::new(future::err(errors::network_disabled())) as Box + Send>, - } - } + match proved_future { + Some(fut) => Box::new(fut) as Box + Send>, + None => Box::new(future::err(errors::network_disabled())) + as Box + Send>, + } + } } diff --git a/rpc/src/v1/helpers/mod.rs b/rpc/src/v1/helpers/mod.rs index 8a25f9305..33fa34c2f 100644 --- a/rpc/src/v1/helpers/mod.rs +++ b/rpc/src/v1/helpers/mod.rs @@ -36,23 +36,26 @@ mod network_settings; mod poll_filter; mod poll_manager; mod requests; +mod signature; mod subscribers; mod subscription_manager; mod work; -mod signature; -pub use self::dispatch::{Dispatcher, FullDispatcher, LightDispatcher}; -pub use self::signature::verify_signature; -pub use self::network_settings::NetworkSettings; -pub use self::poll_manager::PollManager; -pub use self::poll_filter::{PollFilter, SyncPollFilter, limit_logs}; -pub use self::requests::{ - TransactionRequest, FilledTransactionRequest, ConfirmationRequest, ConfirmationPayload, CallRequest, +pub use self::{ + dispatch::{Dispatcher, FullDispatcher, LightDispatcher}, + network_settings::NetworkSettings, + poll_filter::{limit_logs, PollFilter, SyncPollFilter}, + poll_manager::PollManager, + requests::{ + CallRequest, ConfirmationPayload, ConfirmationRequest, FilledTransactionRequest, + TransactionRequest, + }, + signature::verify_signature, + subscribers::Subscribers, + subscription_manager::GenericPollManager, + work::submit_work_detail, }; -pub use self::subscribers::Subscribers; -pub use self::subscription_manager::GenericPollManager; -pub use self::work::submit_work_detail; pub fn to_url(address: &Option<::Host>) -> Option { - address.as_ref().map(|host| (**host).to_owned()) + address.as_ref().map(|host| (**host).to_owned()) } diff --git a/rpc/src/v1/helpers/network_settings.rs b/rpc/src/v1/helpers/network_settings.rs index ed515e471..4427edf83 100644 --- a/rpc/src/v1/helpers/network_settings.rs +++ b/rpc/src/v1/helpers/network_settings.rs @@ -19,32 +19,32 @@ /// Networking & RPC settings #[derive(Debug, PartialEq, Clone)] pub struct NetworkSettings { - /// Node name - pub name: String, - /// Name of the chain we are connected to - pub chain: String, - /// Is development chain - pub is_dev_chain: bool, - /// Networking port - pub network_port: u16, - /// Is JSON-RPC server enabled? - pub rpc_enabled: bool, - /// Interface that JSON-RPC listens on - pub rpc_interface: String, - /// Port for JSON-RPC server - pub rpc_port: u16, + /// Node name + pub name: String, + /// Name of the chain we are connected to + pub chain: String, + /// Is development chain + pub is_dev_chain: bool, + /// Networking port + pub network_port: u16, + /// Is JSON-RPC server enabled? + pub rpc_enabled: bool, + /// Interface that JSON-RPC listens on + pub rpc_interface: String, + /// Port for JSON-RPC server + pub rpc_port: u16, } impl Default for NetworkSettings { - fn default() -> Self { - NetworkSettings { - name: "".into(), - chain: "foundation".into(), - is_dev_chain: false, - network_port: 30303, - rpc_enabled: true, - rpc_interface: "127.0.0.1".into(), - rpc_port: 8545 - } - } + fn default() -> Self { + NetworkSettings { + name: "".into(), + chain: "foundation".into(), + is_dev_chain: false, + network_port: 30303, + rpc_enabled: true, + rpc_interface: "127.0.0.1".into(), + rpc_port: 8545, + } + } } diff --git a/rpc/src/v1/helpers/nonce.rs b/rpc/src/v1/helpers/nonce.rs index 25ec89f01..5ea834603 100644 --- a/rpc/src/v1/helpers/nonce.rs +++ b/rpc/src/v1/helpers/nonce.rs @@ -14,142 +14,144 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::{cmp, mem}; -use std::collections::HashMap; -use std::sync::{atomic, Arc}; -use std::sync::atomic::{AtomicBool, AtomicUsize}; +use std::{ + cmp, + collections::HashMap, + mem, + sync::{ + atomic, + atomic::{AtomicBool, AtomicUsize}, + Arc, + }, +}; -use ethereum_types::{U256, Address}; -use futures::{Future, future, Poll, Async}; -use futures::future::Either; -use futures::sync::oneshot; +use ethereum_types::{Address, U256}; +use futures::{future, future::Either, sync::oneshot, Async, Future, Poll}; use parity_runtime::Executor; /// Manages currently reserved and prospective nonces /// for multiple senders. #[derive(Debug)] pub struct Reservations { - nonces: HashMap, - executor: Executor, + nonces: HashMap, + executor: Executor, } impl Reservations { - /// A maximal number of reserved nonces in the hashmap - /// before we start clearing the unused ones. - const CLEAN_AT: usize = 512; + /// A maximal number of reserved nonces in the hashmap + /// before we start clearing the unused ones. + const CLEAN_AT: usize = 512; - /// Create new nonces manager with given executor. - pub fn new(executor: Executor) -> Self { - Reservations { - nonces: Default::default(), - executor, - } - } + /// Create new nonces manager with given executor. + pub fn new(executor: Executor) -> Self { + Reservations { + nonces: Default::default(), + executor, + } + } - /// Reserve a nonce for particular address. - /// - /// The reserved nonce cannot be smaller than the minimal nonce. - pub fn reserve(&mut self, sender: Address, minimal: U256) -> Reserved { - if self.nonces.len() + 1 > Self::CLEAN_AT { - self.nonces.retain(|_, v| !v.is_empty()); - } + /// Reserve a nonce for particular address. + /// + /// The reserved nonce cannot be smaller than the minimal nonce. + pub fn reserve(&mut self, sender: Address, minimal: U256) -> Reserved { + if self.nonces.len() + 1 > Self::CLEAN_AT { + self.nonces.retain(|_, v| !v.is_empty()); + } - let executor = &self.executor; - self.nonces.entry(sender) - .or_insert_with(move || SenderReservations::new(executor.clone())) - .reserve_nonce(minimal) - } + let executor = &self.executor; + self.nonces + .entry(sender) + .or_insert_with(move || SenderReservations::new(executor.clone())) + .reserve_nonce(minimal) + } } /// Manages currently reserved and prospective nonces. #[derive(Debug)] pub struct SenderReservations { - previous: Option>, - previous_ready: Arc, - executor: Executor, - prospective_value: U256, - dropped: Arc, + previous: Option>, + previous_ready: Arc, + executor: Executor, + prospective_value: U256, + dropped: Arc, } impl SenderReservations { - /// Create new nonces manager with given executor. - pub fn new(executor: Executor) -> Self { - SenderReservations { - previous: None, - previous_ready: Arc::new(AtomicBool::new(true)), - executor, - prospective_value: Default::default(), - dropped: Default::default(), - } - } + /// Create new nonces manager with given executor. + pub fn new(executor: Executor) -> Self { + SenderReservations { + previous: None, + previous_ready: Arc::new(AtomicBool::new(true)), + executor, + prospective_value: Default::default(), + dropped: Default::default(), + } + } - /// Reserves a prospective nonce. - /// The caller should provide a minimal nonce that needs to be reserved (taken from state/txqueue). - /// If there were any previous reserved nonces the returned future will be resolved when those are finished - /// (confirmed that the nonce were indeed used). - /// The caller can use `prospective_nonce` and perform some heavy computation anticipating - /// that the `prospective_nonce` will be equal to the one he will get. - pub fn reserve_nonce(&mut self, minimal: U256) -> Reserved { - // Update prospective value - let dropped = self.dropped.swap(0, atomic::Ordering::SeqCst); - let prospective_value = cmp::max(minimal, self.prospective_value - dropped); - self.prospective_value = prospective_value + 1; + /// Reserves a prospective nonce. + /// The caller should provide a minimal nonce that needs to be reserved (taken from state/txqueue). + /// If there were any previous reserved nonces the returned future will be resolved when those are finished + /// (confirmed that the nonce were indeed used). + /// The caller can use `prospective_nonce` and perform some heavy computation anticipating + /// that the `prospective_nonce` will be equal to the one he will get. + pub fn reserve_nonce(&mut self, minimal: U256) -> Reserved { + // Update prospective value + let dropped = self.dropped.swap(0, atomic::Ordering::SeqCst); + let prospective_value = cmp::max(minimal, self.prospective_value - dropped); + self.prospective_value = prospective_value + 1; - let (next, rx) = oneshot::channel(); - let next = Some(next); - let next_sent = Arc::new(AtomicBool::default()); - let executor = self.executor.clone(); - let dropped = self.dropped.clone(); - self.previous_ready = next_sent.clone(); - match mem::replace(&mut self.previous, Some(rx)) { - Some(previous) => Reserved { - previous: Either::A(previous), - next, - next_sent, - minimal, - prospective_value, - executor, - dropped, - }, - None => Reserved { - previous: Either::B(future::ok(minimal)), - next, - next_sent, - minimal, - prospective_value, - executor, - dropped, - }, - } - } + let (next, rx) = oneshot::channel(); + let next = Some(next); + let next_sent = Arc::new(AtomicBool::default()); + let executor = self.executor.clone(); + let dropped = self.dropped.clone(); + self.previous_ready = next_sent.clone(); + match mem::replace(&mut self.previous, Some(rx)) { + Some(previous) => Reserved { + previous: Either::A(previous), + next, + next_sent, + minimal, + prospective_value, + executor, + dropped, + }, + None => Reserved { + previous: Either::B(future::ok(minimal)), + next, + next_sent, + minimal, + prospective_value, + executor, + dropped, + }, + } + } - /// Returns true if there are no reserved nonces. - pub fn is_empty(&self) -> bool { - self.previous_ready.load(atomic::Ordering::SeqCst) - } + /// Returns true if there are no reserved nonces. + pub fn is_empty(&self) -> bool { + self.previous_ready.load(atomic::Ordering::SeqCst) + } } /// Represents a future nonce. #[derive(Debug)] pub struct Reserved { - previous: Either< - oneshot::Receiver, - future::FutureResult, - >, - next: Option>, - next_sent: Arc, - minimal: U256, - prospective_value: U256, - executor: Executor, - dropped: Arc, + previous: Either, future::FutureResult>, + next: Option>, + next_sent: Arc, + minimal: U256, + prospective_value: U256, + executor: Executor, + dropped: Arc, } impl Reserved { - /// Returns a prospective value of the nonce. - /// NOTE: This might be different than the one we resolve to. - /// Make sure to check if both nonces match or use the latter one. - pub fn prospective_value(&self) -> &U256 { - &self.prospective_value - } + /// Returns a prospective value of the nonce. + /// NOTE: This might be different than the one we resolve to. + /// Make sure to check if both nonces match or use the latter one. + pub fn prospective_value(&self) -> &U256 { + &self.prospective_value + } } impl Future for Reserved { @@ -157,42 +159,42 @@ impl Future for Reserved { type Error = (); fn poll(&mut self) -> Poll { - let mut value = try_ready!(self.previous.poll().map_err(|e| { - warn!("Unexpected nonce cancellation: {}", e); - })); + let mut value = try_ready!(self.previous.poll().map_err(|e| { + warn!("Unexpected nonce cancellation: {}", e); + })); - if value < self.minimal { - value = self.minimal - } - let matches_prospective = value == self.prospective_value; + if value < self.minimal { + value = self.minimal + } + let matches_prospective = value == self.prospective_value; - Ok(Async::Ready(Ready { - value, - matches_prospective, - next: self.next.take(), - next_sent: self.next_sent.clone(), - dropped: self.dropped.clone(), - })) - } + Ok(Async::Ready(Ready { + value, + matches_prospective, + next: self.next.take(), + next_sent: self.next_sent.clone(), + dropped: self.dropped.clone(), + })) + } } impl Drop for Reserved { - fn drop(&mut self) { - if let Some(next) = self.next.take() { - let next_sent = self.next_sent.clone(); - self.dropped.fetch_add(1, atomic::Ordering::SeqCst); - // If Reserved is dropped just pipe previous and next together. - let previous = mem::replace(&mut self.previous, Either::B(future::ok(U256::default()))); - self.executor.spawn( - previous - .map(move |nonce| { - next_sent.store(true, atomic::Ordering::SeqCst); - next.send(nonce).expect(Ready::RECV_PROOF) - }) - .map_err(|err| error!("Error dropping `Reserved`: {:?}", err)) - ); - } - } + fn drop(&mut self) { + if let Some(next) = self.next.take() { + let next_sent = self.next_sent.clone(); + self.dropped.fetch_add(1, atomic::Ordering::SeqCst); + // If Reserved is dropped just pipe previous and next together. + let previous = mem::replace(&mut self.previous, Either::B(future::ok(U256::default()))); + self.executor.spawn( + previous + .map(move |nonce| { + next_sent.store(true, atomic::Ordering::SeqCst); + next.send(nonce).expect(Ready::RECV_PROOF) + }) + .map_err(|err| error!("Error dropping `Reserved`: {:?}", err)), + ); + } + } } /// Represents a valid reserved nonce. @@ -202,43 +204,46 @@ impl Drop for Reserved { /// using `mark_used` method. #[derive(Debug)] pub struct Ready { - value: U256, - matches_prospective: bool, - next: Option>, - next_sent: Arc, - dropped: Arc, + value: U256, + matches_prospective: bool, + next: Option>, + next_sent: Arc, + dropped: Arc, } impl Ready { - const RECV_PROOF: &'static str = "Receiver never dropped."; + const RECV_PROOF: &'static str = "Receiver never dropped."; - /// Returns a value of the nonce. - pub fn value(&self) -> &U256 { - &self.value - } + /// Returns a value of the nonce. + pub fn value(&self) -> &U256 { + &self.value + } - /// Returns true if current value matches the prospective nonce. - pub fn matches_prospective(&self) -> bool { - self.matches_prospective - } + /// Returns true if current value matches the prospective nonce. + pub fn matches_prospective(&self) -> bool { + self.matches_prospective + } - /// Marks this nonce as used. - /// Make sure to call that method after this nonce has been consumed. - pub fn mark_used(mut self) { - let next = self.next.take().expect("Nonce can be marked as used only once; qed"); - self.next_sent.store(true, atomic::Ordering::SeqCst); - next.send(self.value + 1).expect(Self::RECV_PROOF); - } + /// Marks this nonce as used. + /// Make sure to call that method after this nonce has been consumed. + pub fn mark_used(mut self) { + let next = self + .next + .take() + .expect("Nonce can be marked as used only once; qed"); + self.next_sent.store(true, atomic::Ordering::SeqCst); + next.send(self.value + 1).expect(Self::RECV_PROOF); + } } impl Drop for Ready { - fn drop(&mut self) { - if let Some(next) = self.next.take() { - self.dropped.fetch_add(1, atomic::Ordering::SeqCst); - self.next_sent.store(true, atomic::Ordering::SeqCst); - next.send(self.value).expect(Self::RECV_PROOF); - } - } + fn drop(&mut self) { + if let Some(next) = self.next.take() { + self.dropped.fetch_add(1, atomic::Ordering::SeqCst); + self.next_sent.store(true, atomic::Ordering::SeqCst); + next.send(self.value).expect(Self::RECV_PROOF); + } + } } #[cfg(test)] @@ -246,63 +251,63 @@ mod tests { use super::*; use parity_runtime::Runtime; - #[test] - fn should_reserve_a_set_of_nonces_and_resolve_them() { - let runtime = Runtime::with_thread_count(1); - let mut nonces = SenderReservations::new(runtime.executor()); + #[test] + fn should_reserve_a_set_of_nonces_and_resolve_them() { + let runtime = Runtime::with_thread_count(1); + let mut nonces = SenderReservations::new(runtime.executor()); - assert!(nonces.is_empty()); - let n1 = nonces.reserve_nonce(5.into()); - let n2 = nonces.reserve_nonce(5.into()); - let n3 = nonces.reserve_nonce(5.into()); - let n4 = nonces.reserve_nonce(5.into()); - assert!(!nonces.is_empty()); + assert!(nonces.is_empty()); + let n1 = nonces.reserve_nonce(5.into()); + let n2 = nonces.reserve_nonce(5.into()); + let n3 = nonces.reserve_nonce(5.into()); + let n4 = nonces.reserve_nonce(5.into()); + assert!(!nonces.is_empty()); - // Check first nonce - let r = n1.wait().unwrap(); - assert_eq!(r.value(), &U256::from(5)); - assert!(r.matches_prospective()); - r.mark_used(); + // Check first nonce + let r = n1.wait().unwrap(); + assert_eq!(r.value(), &U256::from(5)); + assert!(r.matches_prospective()); + r.mark_used(); - // Drop second nonce - drop(n2); + // Drop second nonce + drop(n2); - // Drop third without marking as used - let r = n3.wait().unwrap(); - drop(r); + // Drop third without marking as used + let r = n3.wait().unwrap(); + drop(r); - // Last nonce should be resolved to 6 - let r = n4.wait().unwrap(); - assert_eq!(r.value(), &U256::from(6)); - assert!(!r.matches_prospective()); - r.mark_used(); + // Last nonce should be resolved to 6 + let r = n4.wait().unwrap(); + assert_eq!(r.value(), &U256::from(6)); + assert!(!r.matches_prospective()); + r.mark_used(); - // Next nonce should be immediately available. - let n5 = nonces.reserve_nonce(5.into()); - let r = n5.wait().unwrap(); - assert_eq!(r.value(), &U256::from(7)); - assert!(r.matches_prospective()); - r.mark_used(); + // Next nonce should be immediately available. + let n5 = nonces.reserve_nonce(5.into()); + let r = n5.wait().unwrap(); + assert_eq!(r.value(), &U256::from(7)); + assert!(r.matches_prospective()); + r.mark_used(); - // Should use start number if it's greater - let n6 = nonces.reserve_nonce(10.into()); - let r = n6.wait().unwrap(); - assert_eq!(r.value(), &U256::from(10)); - assert!(r.matches_prospective()); - r.mark_used(); + // Should use start number if it's greater + let n6 = nonces.reserve_nonce(10.into()); + let r = n6.wait().unwrap(); + assert_eq!(r.value(), &U256::from(10)); + assert!(r.matches_prospective()); + r.mark_used(); - assert!(nonces.is_empty()); - } + assert!(nonces.is_empty()); + } - #[test] - fn should_return_prospective_nonce() { - let runtime = Runtime::with_thread_count(1); - let mut nonces = SenderReservations::new(runtime.executor()); + #[test] + fn should_return_prospective_nonce() { + let runtime = Runtime::with_thread_count(1); + let mut nonces = SenderReservations::new(runtime.executor()); - let n1 = nonces.reserve_nonce(5.into()); - let n2 = nonces.reserve_nonce(5.into()); + let n1 = nonces.reserve_nonce(5.into()); + let n2 = nonces.reserve_nonce(5.into()); - assert_eq!(n1.prospective_value(), &U256::from(5)); - assert_eq!(n2.prospective_value(), &U256::from(6)); - } + assert_eq!(n1.prospective_value(), &U256::from(5)); + assert_eq!(n2.prospective_value(), &U256::from(6)); + } } diff --git a/rpc/src/v1/helpers/poll_filter.rs b/rpc/src/v1/helpers/poll_filter.rs index 2d7eb9566..fe4698b74 100644 --- a/rpc/src/v1/helpers/poll_filter.rs +++ b/rpc/src/v1/helpers/poll_filter.rs @@ -16,14 +16,14 @@ //! Helper type with all filter state data. -use std::{ - collections::{BTreeSet, HashSet, VecDeque}, - sync::Arc, -}; use ethereum_types::H256; use parking_lot::Mutex; -use v1::types::Log; +use std::{ + collections::{BTreeSet, HashSet, VecDeque}, + sync::Arc, +}; use types::filter::Filter; +use v1::types::Log; pub type BlockNumber = u64; @@ -32,49 +32,50 @@ pub type BlockNumber = u64; pub struct SyncPollFilter(Arc>); impl SyncPollFilter { - /// New `SyncPollFilter` - pub fn new(f: PollFilter) -> Self { - SyncPollFilter(Arc::new(Mutex::new(f))) - } + /// New `SyncPollFilter` + pub fn new(f: PollFilter) -> Self { + SyncPollFilter(Arc::new(Mutex::new(f))) + } - /// Modify underlying filter - pub fn modify(&self, f: F) -> R where - F: FnOnce(&mut PollFilter) -> R, - { - f(&mut self.0.lock()) - } + /// Modify underlying filter + pub fn modify(&self, f: F) -> R + where + F: FnOnce(&mut PollFilter) -> R, + { + f(&mut self.0.lock()) + } } /// Filter state. #[derive(Clone)] pub enum PollFilter { - /// Number of last block which client was notified about. - Block { - last_block_number: BlockNumber, - #[doc(hidden)] - recent_reported_hashes: VecDeque<(BlockNumber, H256)>, - }, - /// Hashes of all pending transactions the client knows about. - PendingTransaction(BTreeSet), - /// Number of From block number, last seen block hash, pending logs and log filter itself. - Logs { - block_number: BlockNumber, - last_block_hash: Option, - previous_logs: HashSet, - filter: Filter, - include_pending: bool, - } + /// Number of last block which client was notified about. + Block { + last_block_number: BlockNumber, + #[doc(hidden)] + recent_reported_hashes: VecDeque<(BlockNumber, H256)>, + }, + /// Hashes of all pending transactions the client knows about. + PendingTransaction(BTreeSet), + /// Number of From block number, last seen block hash, pending logs and log filter itself. + Logs { + block_number: BlockNumber, + last_block_hash: Option, + previous_logs: HashSet, + filter: Filter, + include_pending: bool, + }, } impl PollFilter { - pub (in v1) const MAX_BLOCK_HISTORY_SIZE: usize = 32; + pub(in v1) const MAX_BLOCK_HISTORY_SIZE: usize = 32; } /// Returns only last `n` logs pub fn limit_logs(mut logs: Vec, limit: Option) -> Vec { - let len = logs.len(); - match limit { - Some(limit) if len >= limit => logs.split_off(len - limit), - _ => logs, - } + let len = logs.len(); + match limit { + Some(limit) if len >= limit => logs.split_off(len - limit), + _ => logs, + } } diff --git a/rpc/src/v1/helpers/poll_manager.rs b/rpc/src/v1/helpers/poll_manager.rs index a0f168439..64f98ba1d 100644 --- a/rpc/src/v1/helpers/poll_manager.rs +++ b/rpc/src/v1/helpers/poll_manager.rs @@ -16,108 +16,110 @@ //! Indexes all rpc poll requests. -use transient_hashmap::{TransientHashMap, Timer, StandardTimer}; +use transient_hashmap::{StandardTimer, Timer, TransientHashMap}; pub type PollId = usize; /// Indexes all poll requests. /// /// Lazily garbage collects unused polls info. -pub struct PollManager where T: Timer { - polls: TransientHashMap, - next_available_id: PollId, +pub struct PollManager +where + T: Timer, +{ + polls: TransientHashMap, + next_available_id: PollId, } impl PollManager { - /// Creates new instance of indexer - pub fn new(lifetime: u32) -> Self { - PollManager::new_with_timer(Default::default(), lifetime) - } + /// Creates new instance of indexer + pub fn new(lifetime: u32) -> Self { + PollManager::new_with_timer(Default::default(), lifetime) + } } -impl PollManager where T: Timer { +impl PollManager +where + T: Timer, +{ + pub fn new_with_timer(timer: T, lifetime: u32) -> Self { + PollManager { + polls: TransientHashMap::new_with_timer(lifetime, timer), + next_available_id: 0, + } + } - pub fn new_with_timer(timer: T, lifetime: u32) -> Self { - PollManager { - polls: TransientHashMap::new_with_timer(lifetime, timer), - next_available_id: 0, - } - } + /// Returns id which can be used for new poll. + /// + /// Stores information when last poll happend. + pub fn create_poll(&mut self, filter: F) -> PollId { + self.polls.prune(); - /// Returns id which can be used for new poll. - /// - /// Stores information when last poll happend. - pub fn create_poll(&mut self, filter: F) -> PollId { - self.polls.prune(); + let id = self.next_available_id; + self.polls.insert(id, filter); - let id = self.next_available_id; - self.polls.insert(id, filter); + self.next_available_id += 1; + id + } - self.next_available_id += 1; - id - } + // Implementation is always using `poll_mut` + /// Get a reference to stored poll filter + pub fn poll(&mut self, id: &PollId) -> Option<&F> { + self.polls.prune(); + self.polls.get(id) + } - // Implementation is always using `poll_mut` - /// Get a reference to stored poll filter - pub fn poll(&mut self, id: &PollId) -> Option<&F> { - self.polls.prune(); - self.polls.get(id) - } + /// Get a mutable reference to stored poll filter + pub fn poll_mut(&mut self, id: &PollId) -> Option<&mut F> { + self.polls.prune(); + self.polls.get_mut(id) + } - /// Get a mutable reference to stored poll filter - pub fn poll_mut(&mut self, id: &PollId) -> Option<&mut F> { - self.polls.prune(); - self.polls.get_mut(id) - } - - /// Removes poll info. - pub fn remove_poll(&mut self, id: &PollId) -> bool { - self.polls.remove(id).is_some() - } + /// Removes poll info. + pub fn remove_poll(&mut self, id: &PollId) -> bool { + self.polls.remove(id).is_some() + } } #[cfg(test)] mod tests { - use std::cell::Cell; - use transient_hashmap::Timer; - use v1::helpers::PollManager; + use std::cell::Cell; + use transient_hashmap::Timer; + use v1::helpers::PollManager; - struct TestTimer<'a> { - time: &'a Cell, - } + struct TestTimer<'a> { + time: &'a Cell, + } - impl<'a> Timer for TestTimer<'a> { - fn get_time(&self) -> i64 { - self.time.get() - } - } + impl<'a> Timer for TestTimer<'a> { + fn get_time(&self) -> i64 { + self.time.get() + } + } - #[test] - fn test_poll_indexer() { - let time = Cell::new(0); - let timer = TestTimer { - time: &time, - }; + #[test] + fn test_poll_indexer() { + let time = Cell::new(0); + let timer = TestTimer { time: &time }; - let mut indexer = PollManager::new_with_timer(timer,60); - assert_eq!(indexer.create_poll(20), 0); - assert_eq!(indexer.create_poll(20), 1); + let mut indexer = PollManager::new_with_timer(timer, 60); + assert_eq!(indexer.create_poll(20), 0); + assert_eq!(indexer.create_poll(20), 1); - time.set(10); - *indexer.poll_mut(&0).unwrap() = 21; - assert_eq!(*indexer.poll(&0).unwrap(), 21); - assert_eq!(*indexer.poll(&1).unwrap(), 20); + time.set(10); + *indexer.poll_mut(&0).unwrap() = 21; + assert_eq!(*indexer.poll(&0).unwrap(), 21); + assert_eq!(*indexer.poll(&1).unwrap(), 20); - time.set(30); - *indexer.poll_mut(&1).unwrap() = 23; - assert_eq!(*indexer.poll(&1).unwrap(), 23); + time.set(30); + *indexer.poll_mut(&1).unwrap() = 23; + assert_eq!(*indexer.poll(&1).unwrap(), 23); - time.set(75); - assert!(indexer.poll(&0).is_none()); - assert_eq!(*indexer.poll(&1).unwrap(), 23); - - indexer.remove_poll(&1); - assert!(indexer.poll(&1).is_none()); - } + time.set(75); + assert!(indexer.poll(&0).is_none()); + assert_eq!(*indexer.poll(&1).unwrap(), 23); + indexer.remove_poll(&1); + assert!(indexer.poll(&1).is_none()); + } } diff --git a/rpc/src/v1/helpers/requests.rs b/rpc/src/v1/helpers/requests.rs index e71d10444..019b07bc7 100644 --- a/rpc/src/v1/helpers/requests.rs +++ b/rpc/src/v1/helpers/requests.rs @@ -14,123 +14,123 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use ethereum_types::{U256, H256, Address}; use bytes::Bytes; +use ethereum_types::{Address, H256, U256}; use v1::types::{Origin, TransactionCondition}; /// Transaction request coming from RPC #[derive(Debug, Clone, Default, Eq, PartialEq, Hash)] pub struct TransactionRequest { - /// Sender - pub from: Option

, - /// Recipient - pub to: Option
, - /// Gas Price - pub gas_price: Option, - /// Gas - pub gas: Option, - /// Value of transaction in wei - pub value: Option, - /// Additional data sent with transaction - pub data: Option, - /// Transaction's nonce - pub nonce: Option, - /// Delay until this condition is met. - pub condition: Option, + /// Sender + pub from: Option
, + /// Recipient + pub to: Option
, + /// Gas Price + pub gas_price: Option, + /// Gas + pub gas: Option, + /// Value of transaction in wei + pub value: Option, + /// Additional data sent with transaction + pub data: Option, + /// Transaction's nonce + pub nonce: Option, + /// Delay until this condition is met. + pub condition: Option, } /// Transaction request coming from RPC with default values filled in. #[derive(Debug, Clone, Default, Eq, PartialEq, Hash)] pub struct FilledTransactionRequest { - /// Sender - pub from: Address, - /// Indicates if the sender was filled by default value. - pub used_default_from: bool, - /// Recipient - pub to: Option
, - /// Gas Price - pub gas_price: U256, - /// Gas - pub gas: U256, - /// Value of transaction in wei - pub value: U256, - /// Additional data sent with transaction - pub data: Bytes, - /// Transaction's nonce - pub nonce: Option, - /// Delay until this condition is met. - pub condition: Option, + /// Sender + pub from: Address, + /// Indicates if the sender was filled by default value. + pub used_default_from: bool, + /// Recipient + pub to: Option
, + /// Gas Price + pub gas_price: U256, + /// Gas + pub gas: U256, + /// Value of transaction in wei + pub value: U256, + /// Additional data sent with transaction + pub data: Bytes, + /// Transaction's nonce + pub nonce: Option, + /// Delay until this condition is met. + pub condition: Option, } impl From for TransactionRequest { - fn from(r: FilledTransactionRequest) -> Self { - TransactionRequest { - from: Some(r.from), - to: r.to, - gas_price: Some(r.gas_price), - gas: Some(r.gas), - value: Some(r.value), - data: Some(r.data), - nonce: r.nonce, - condition: r.condition, - } - } + fn from(r: FilledTransactionRequest) -> Self { + TransactionRequest { + from: Some(r.from), + to: r.to, + gas_price: Some(r.gas_price), + gas: Some(r.gas), + value: Some(r.value), + data: Some(r.data), + nonce: r.nonce, + condition: r.condition, + } + } } /// Call request #[derive(Debug, Default, PartialEq)] pub struct CallRequest { - /// From - pub from: Option
, - /// To - pub to: Option
, - /// Gas Price - pub gas_price: Option, - /// Gas - pub gas: Option, - /// Value - pub value: Option, - /// Data - pub data: Option>, - /// Nonce - pub nonce: Option, + /// From + pub from: Option
, + /// To + pub to: Option
, + /// Gas Price + pub gas_price: Option, + /// Gas + pub gas: Option, + /// Value + pub value: Option, + /// Data + pub data: Option>, + /// Nonce + pub nonce: Option, } /// Confirmation object #[derive(Debug, Clone, Eq, PartialEq, Hash)] pub struct ConfirmationRequest { - /// Id of this confirmation - pub id: U256, - /// Payload to confirm - pub payload: ConfirmationPayload, - /// Request origin - pub origin: Origin, + /// Id of this confirmation + pub id: U256, + /// Payload to confirm + pub payload: ConfirmationPayload, + /// Request origin + pub origin: Origin, } /// Payload to confirm in Trusted Signer #[derive(Debug, Clone, Eq, PartialEq, Hash)] pub enum ConfirmationPayload { - /// Transaction - SendTransaction(FilledTransactionRequest), - /// Sign Transaction - SignTransaction(FilledTransactionRequest), - /// Sign a message with an Ethereum specific security prefix. - EthSignMessage(Address, Bytes), - /// Sign a message - SignMessage(Address, H256), - /// Decrypt request - Decrypt(Address, Bytes), + /// Transaction + SendTransaction(FilledTransactionRequest), + /// Sign Transaction + SignTransaction(FilledTransactionRequest), + /// Sign a message with an Ethereum specific security prefix. + EthSignMessage(Address, Bytes), + /// Sign a message + SignMessage(Address, H256), + /// Decrypt request + Decrypt(Address, Bytes), } impl ConfirmationPayload { - pub fn sender(&self) -> Address { - match *self { - ConfirmationPayload::SendTransaction(ref request) => request.from, - ConfirmationPayload::SignTransaction(ref request) => request.from, - ConfirmationPayload::EthSignMessage(ref address, _) => *address, - ConfirmationPayload::SignMessage(ref address, _) => *address, - ConfirmationPayload::Decrypt(ref address, _) => *address, - } - } + pub fn sender(&self) -> Address { + match *self { + ConfirmationPayload::SendTransaction(ref request) => request.from, + ConfirmationPayload::SignTransaction(ref request) => request.from, + ConfirmationPayload::EthSignMessage(ref address, _) => *address, + ConfirmationPayload::SignMessage(ref address, _) => *address, + ConfirmationPayload::Decrypt(ref address, _) => *address, + } + } } diff --git a/rpc/src/v1/helpers/secretstore.rs b/rpc/src/v1/helpers/secretstore.rs index 6e1cbca45..5c3d7719f 100644 --- a/rpc/src/v1/helpers/secretstore.rs +++ b/rpc/src/v1/helpers/secretstore.rs @@ -14,174 +14,204 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::BTreeSet; -use rand::{Rng, OsRng}; -use ethereum_types::{H256, H512}; -use ethkey::{self, Public, Secret, Random, Generator, math}; -use crypto; use bytes::Bytes; +use crypto; +use ethereum_types::{H256, H512}; +use ethkey::{self, math, Generator, Public, Random, Secret}; use jsonrpc_core::Error; -use v1::helpers::errors; -use v1::types::EncryptedDocumentKey; +use rand::{OsRng, Rng}; +use std::collections::BTreeSet; use tiny_keccak::Keccak; +use v1::{helpers::errors, types::EncryptedDocumentKey}; /// Initialization vector length. const INIT_VEC_LEN: usize = 16; /// Generate document key to store in secret store. -pub fn generate_document_key(account_public: Public, server_key_public: Public) -> Result { - // generate random plain document key - let document_key = Random.generate().map_err(errors::encryption)?; +pub fn generate_document_key( + account_public: Public, + server_key_public: Public, +) -> Result { + // generate random plain document key + let document_key = Random.generate().map_err(errors::encryption)?; - // encrypt document key using server key - let (common_point, encrypted_point) = encrypt_secret(document_key.public(), &server_key_public)?; + // encrypt document key using server key + let (common_point, encrypted_point) = + encrypt_secret(document_key.public(), &server_key_public)?; - // ..and now encrypt document key with account public - let encrypted_key = ethkey::crypto::ecies::encrypt(&account_public, &crypto::DEFAULT_MAC, document_key.public()) - .map_err(errors::encryption)?; + // ..and now encrypt document key with account public + let encrypted_key = ethkey::crypto::ecies::encrypt( + &account_public, + &crypto::DEFAULT_MAC, + document_key.public(), + ) + .map_err(errors::encryption)?; - Ok(EncryptedDocumentKey { - common_point: common_point.into(), - encrypted_point: encrypted_point.into(), - encrypted_key: encrypted_key.into(), - }) + Ok(EncryptedDocumentKey { + common_point: common_point.into(), + encrypted_point: encrypted_point.into(), + encrypted_key: encrypted_key.into(), + }) } /// Encrypt document with distributely generated key. pub fn encrypt_document(key: Bytes, document: Bytes) -> Result { - // make document key - let key = into_document_key(key)?; + // make document key + let key = into_document_key(key)?; - // use symmetric encryption to encrypt document - let iv = initialization_vector(); - let mut encrypted_document = vec![0; document.len() + iv.len()]; - { - let (mut encryption_buffer, iv_buffer) = encrypted_document.split_at_mut(document.len()); + // use symmetric encryption to encrypt document + let iv = initialization_vector(); + let mut encrypted_document = vec![0; document.len() + iv.len()]; + { + let (mut encryption_buffer, iv_buffer) = encrypted_document.split_at_mut(document.len()); - crypto::aes::encrypt_128_ctr(&key, &iv, &document, &mut encryption_buffer).map_err(errors::encryption)?; - iv_buffer.copy_from_slice(&iv); - } + crypto::aes::encrypt_128_ctr(&key, &iv, &document, &mut encryption_buffer) + .map_err(errors::encryption)?; + iv_buffer.copy_from_slice(&iv); + } - Ok(encrypted_document) + Ok(encrypted_document) } /// Decrypt document with distributely generated key. pub fn decrypt_document(key: Bytes, mut encrypted_document: Bytes) -> Result { - // initialization vector takes INIT_VEC_LEN bytes - let encrypted_document_len = encrypted_document.len(); - if encrypted_document_len < INIT_VEC_LEN { - return Err(errors::invalid_params("encrypted_document", "invalid encrypted data")); - } + // initialization vector takes INIT_VEC_LEN bytes + let encrypted_document_len = encrypted_document.len(); + if encrypted_document_len < INIT_VEC_LEN { + return Err(errors::invalid_params( + "encrypted_document", + "invalid encrypted data", + )); + } - // make document key - let key = into_document_key(key)?; + // make document key + let key = into_document_key(key)?; - // use symmetric decryption to decrypt document - let iv = encrypted_document.split_off(encrypted_document_len - INIT_VEC_LEN); - let mut document = vec![0; encrypted_document_len - INIT_VEC_LEN]; - crypto::aes::decrypt_128_ctr(&key, &iv, &encrypted_document, &mut document).map_err(errors::encryption)?; + // use symmetric decryption to decrypt document + let iv = encrypted_document.split_off(encrypted_document_len - INIT_VEC_LEN); + let mut document = vec![0; encrypted_document_len - INIT_VEC_LEN]; + crypto::aes::decrypt_128_ctr(&key, &iv, &encrypted_document, &mut document) + .map_err(errors::encryption)?; - Ok(document) + Ok(document) } /// Decrypt document given secret shadow. -pub fn decrypt_document_with_shadow(decrypted_secret: Public, common_point: Public, shadows: Vec, encrypted_document: Bytes) -> Result { - let key = decrypt_with_shadow_coefficients(decrypted_secret, common_point, shadows)?; - decrypt_document(key.to_vec(), encrypted_document) +pub fn decrypt_document_with_shadow( + decrypted_secret: Public, + common_point: Public, + shadows: Vec, + encrypted_document: Bytes, +) -> Result { + let key = decrypt_with_shadow_coefficients(decrypted_secret, common_point, shadows)?; + decrypt_document(key.to_vec(), encrypted_document) } /// Calculate Keccak(ordered servers set) pub fn ordered_servers_keccak(servers_set: BTreeSet) -> H256 { - let mut servers_set_keccak = Keccak::new_keccak256(); - for server in servers_set { - servers_set_keccak.update(&server.0); - } + let mut servers_set_keccak = Keccak::new_keccak256(); + for server in servers_set { + servers_set_keccak.update(&server.0); + } - let mut servers_set_keccak_value = [0u8; 32]; - servers_set_keccak.finalize(&mut servers_set_keccak_value); + let mut servers_set_keccak_value = [0u8; 32]; + servers_set_keccak.finalize(&mut servers_set_keccak_value); - servers_set_keccak_value.into() + servers_set_keccak_value.into() } fn into_document_key(key: Bytes) -> Result { - // key is a previously distributely generated Public - if key.len() != 64 { - return Err(errors::invalid_params("key", "invalid public key length")); - } + // key is a previously distributely generated Public + if key.len() != 64 { + return Err(errors::invalid_params("key", "invalid public key length")); + } - // use x coordinate of distributely generated point as encryption key - Ok(key[..INIT_VEC_LEN].into()) + // use x coordinate of distributely generated point as encryption key + Ok(key[..INIT_VEC_LEN].into()) } fn initialization_vector() -> [u8; INIT_VEC_LEN] { - let mut result = [0u8; INIT_VEC_LEN]; - let mut rng = OsRng::new().unwrap(); - rng.fill_bytes(&mut result); - result + let mut result = [0u8; INIT_VEC_LEN]; + let mut rng = OsRng::new().unwrap(); + rng.fill_bytes(&mut result); + result } -fn decrypt_with_shadow_coefficients(mut decrypted_shadow: Public, mut common_shadow_point: Public, shadow_coefficients: Vec) -> Result { - let mut shadow_coefficients_sum = shadow_coefficients[0].clone(); - for shadow_coefficient in shadow_coefficients.iter().skip(1) { - shadow_coefficients_sum.add(shadow_coefficient) - .map_err(errors::encryption)?; - } +fn decrypt_with_shadow_coefficients( + mut decrypted_shadow: Public, + mut common_shadow_point: Public, + shadow_coefficients: Vec, +) -> Result { + let mut shadow_coefficients_sum = shadow_coefficients[0].clone(); + for shadow_coefficient in shadow_coefficients.iter().skip(1) { + shadow_coefficients_sum + .add(shadow_coefficient) + .map_err(errors::encryption)?; + } - math::public_mul_secret(&mut common_shadow_point, &shadow_coefficients_sum) - .map_err(errors::encryption)?; - math::public_add(&mut decrypted_shadow, &common_shadow_point) - .map_err(errors::encryption)?; - Ok(decrypted_shadow) + math::public_mul_secret(&mut common_shadow_point, &shadow_coefficients_sum) + .map_err(errors::encryption)?; + math::public_add(&mut decrypted_shadow, &common_shadow_point).map_err(errors::encryption)?; + Ok(decrypted_shadow) } fn encrypt_secret(secret: &Public, joint_public: &Public) -> Result<(Public, Public), Error> { - // TODO: it is copypaste of `encrypt_secret` from secret_store/src/key_server_cluster/math.rs - // use shared version from SS math library, when it'll be available + // TODO: it is copypaste of `encrypt_secret` from secret_store/src/key_server_cluster/math.rs + // use shared version from SS math library, when it'll be available - let key_pair = Random.generate() - .map_err(errors::encryption)?; + let key_pair = Random.generate().map_err(errors::encryption)?; - // k * T - let mut common_point = math::generation_point(); - math::public_mul_secret(&mut common_point, key_pair.secret()) - .map_err(errors::encryption)?; + // k * T + let mut common_point = math::generation_point(); + math::public_mul_secret(&mut common_point, key_pair.secret()).map_err(errors::encryption)?; - // M + k * y - let mut encrypted_point = joint_public.clone(); - math::public_mul_secret(&mut encrypted_point, key_pair.secret()) - .map_err(errors::encryption)?; - math::public_add(&mut encrypted_point, secret) - .map_err(errors::encryption)?; + // M + k * y + let mut encrypted_point = joint_public.clone(); + math::public_mul_secret(&mut encrypted_point, key_pair.secret()).map_err(errors::encryption)?; + math::public_add(&mut encrypted_point, secret).map_err(errors::encryption)?; - Ok((common_point, encrypted_point)) + Ok((common_point, encrypted_point)) } #[cfg(test)] mod tests { - use bytes::Bytes; - use rustc_hex::FromHex; - use super::{encrypt_document, decrypt_document, decrypt_document_with_shadow}; + use super::{decrypt_document, decrypt_document_with_shadow, encrypt_document}; + use bytes::Bytes; + use rustc_hex::FromHex; - #[test] - fn encrypt_and_decrypt_document() { - let document_key: Bytes = "cac6c205eb06c8308d65156ff6c862c62b000b8ead121a4455a8ddeff7248128d895692136f240d5d1614dc7cc4147b1bd584bd617e30560bb872064d09ea325".from_hex().unwrap(); - let document: Bytes = b"Hello, world!!!"[..].into(); + #[test] + fn encrypt_and_decrypt_document() { + let document_key: Bytes = "cac6c205eb06c8308d65156ff6c862c62b000b8ead121a4455a8ddeff7248128d895692136f240d5d1614dc7cc4147b1bd584bd617e30560bb872064d09ea325".from_hex().unwrap(); + let document: Bytes = b"Hello, world!!!"[..].into(); - let encrypted_document = encrypt_document(document_key.clone(), document.clone()).unwrap(); - assert!(document != encrypted_document); + let encrypted_document = encrypt_document(document_key.clone(), document.clone()).unwrap(); + assert!(document != encrypted_document); - let decrypted_document = decrypt_document(document_key.clone(), encrypted_document).unwrap(); - assert_eq!(decrypted_document, document); - } + let decrypted_document = + decrypt_document(document_key.clone(), encrypted_document).unwrap(); + assert_eq!(decrypted_document, document); + } - #[test] - fn encrypt_and_shadow_decrypt_document() { - let document: Bytes = "deadbeef".from_hex().unwrap(); - let encrypted_document = "2ddec1f96229efa2916988d8b2a82a47ef36f71c".from_hex().unwrap(); - let decrypted_secret = "843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91".parse().unwrap(); - let common_point = "07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3".parse().unwrap(); - let shadows = vec!["46f542416216f66a7d7881f5a283d2a1ab7a87b381cbc5f29d0b093c7c89ee31".parse().unwrap()]; - let decrypted_document = decrypt_document_with_shadow(decrypted_secret, common_point, shadows, encrypted_document).unwrap(); - assert_eq!(decrypted_document, document); - } + #[test] + fn encrypt_and_shadow_decrypt_document() { + let document: Bytes = "deadbeef".from_hex().unwrap(); + let encrypted_document = "2ddec1f96229efa2916988d8b2a82a47ef36f71c" + .from_hex() + .unwrap(); + let decrypted_secret = "843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91".parse().unwrap(); + let common_point = "07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3".parse().unwrap(); + let shadows = vec![ + "46f542416216f66a7d7881f5a283d2a1ab7a87b381cbc5f29d0b093c7c89ee31" + .parse() + .unwrap(), + ]; + let decrypted_document = decrypt_document_with_shadow( + decrypted_secret, + common_point, + shadows, + encrypted_document, + ) + .unwrap(); + assert_eq!(decrypted_document, document); + } } diff --git a/rpc/src/v1/helpers/signature.rs b/rpc/src/v1/helpers/signature.rs index b191a3737..6990ef0bc 100644 --- a/rpc/src/v1/helpers/signature.rs +++ b/rpc/src/v1/helpers/signature.rs @@ -14,169 +14,202 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use ethkey::{recover, public_to_address, Signature}; use ethereum_types::{H256, U64}; -use jsonrpc_core::Result; -use v1::types::{Bytes, RecoveredAccount}; -use v1::helpers::errors; -use v1::helpers::dispatch::eth_data_hash; +use ethkey::{public_to_address, recover, Signature}; use hash::keccak; +use jsonrpc_core::Result; +use v1::{ + helpers::{dispatch::eth_data_hash, errors}, + types::{Bytes, RecoveredAccount}, +}; /// helper method for parity_verifySignature pub fn verify_signature( - is_prefixed: bool, - message: Bytes, - r: H256, - s: H256, - v: U64, - chain_id: Option + is_prefixed: bool, + message: Bytes, + r: H256, + s: H256, + v: U64, + chain_id: Option, ) -> Result { - let hash = if is_prefixed { - eth_data_hash(message.0) - } else { - keccak(message.0) - }; - let v = v.as_u64(); - let is_valid_for_current_chain = match (chain_id, v) { - (None, v) if v == 0 || v == 1 => true, - (Some(chain_id), v) if v >= 35 => (v - 35) / 2 == chain_id, - _ => false, - }; + let hash = if is_prefixed { + eth_data_hash(message.0) + } else { + keccak(message.0) + }; + let v = v.as_u64(); + let is_valid_for_current_chain = match (chain_id, v) { + (None, v) if v == 0 || v == 1 => true, + (Some(chain_id), v) if v >= 35 => (v - 35) / 2 == chain_id, + _ => false, + }; - let v = if v >= 35 { (v - 1) % 2 } else { v }; + let v = if v >= 35 { (v - 1) % 2 } else { v }; - let signature = Signature::from_rsv(&r, &s, v as u8); - let public_key = recover(&signature, &hash).map_err(errors::encryption)?; - let address = public_to_address(&public_key); - Ok(RecoveredAccount { address, public_key, is_valid_for_current_chain }) + let signature = Signature::from_rsv(&r, &s, v as u8); + let public_key = recover(&signature, &hash).map_err(errors::encryption)?; + let address = public_to_address(&public_key); + Ok(RecoveredAccount { + address, + public_key, + is_valid_for_current_chain, + }) } #[cfg(test)] mod tests { - use super::*; - use ethkey::Generator; - use ethereum_types::{H160, U64}; + use super::*; + use ethereum_types::{H160, U64}; + use ethkey::Generator; - pub fn add_chain_replay_protection(v: u64, chain_id: Option) -> u64 { - v + if let Some(n) = chain_id { 35 + n * 2 } else { 0 } - } + pub fn add_chain_replay_protection(v: u64, chain_id: Option) -> u64 { + v + if let Some(n) = chain_id { + 35 + n * 2 + } else { + 0 + } + } - struct TestCase { - should_prefix: bool, - signing_chain_id: Option, - rpc_chain_id: Option, - is_valid_for_current_chain: bool, - } + struct TestCase { + should_prefix: bool, + signing_chain_id: Option, + rpc_chain_id: Option, + is_valid_for_current_chain: bool, + } - /// mocked signer - fn sign(should_prefix: bool, data: Vec, signing_chain_id: Option) -> (H160, [u8; 32], [u8; 32], U64) { - let hash = if should_prefix { eth_data_hash(data) } else { keccak(data) }; - let account = ethkey::Random.generate().unwrap(); - let address = account.address(); - let sig = ethkey::sign(account.secret(), &hash).unwrap(); - let (r, s, v) = (sig.r(), sig.s(), sig.v()); - let v = add_chain_replay_protection(v as u64, signing_chain_id); - let (r_buf, s_buf) = { - let (mut r_buf, mut s_buf) = ([0u8; 32], [0u8; 32]); - r_buf.copy_from_slice(r); - s_buf.copy_from_slice(s); - (r_buf, s_buf) - }; - (address.into(), r_buf, s_buf, v.into()) - } + /// mocked signer + fn sign( + should_prefix: bool, + data: Vec, + signing_chain_id: Option, + ) -> (H160, [u8; 32], [u8; 32], U64) { + let hash = if should_prefix { + eth_data_hash(data) + } else { + keccak(data) + }; + let account = ethkey::Random.generate().unwrap(); + let address = account.address(); + let sig = ethkey::sign(account.secret(), &hash).unwrap(); + let (r, s, v) = (sig.r(), sig.s(), sig.v()); + let v = add_chain_replay_protection(v as u64, signing_chain_id); + let (r_buf, s_buf) = { + let (mut r_buf, mut s_buf) = ([0u8; 32], [0u8; 32]); + r_buf.copy_from_slice(r); + s_buf.copy_from_slice(s); + (r_buf, s_buf) + }; + (address.into(), r_buf, s_buf, v.into()) + } - fn run_test(test_case: TestCase) { - let TestCase { should_prefix, signing_chain_id, rpc_chain_id, is_valid_for_current_chain } = test_case; - let data = vec![5u8]; + fn run_test(test_case: TestCase) { + let TestCase { + should_prefix, + signing_chain_id, + rpc_chain_id, + is_valid_for_current_chain, + } = test_case; + let data = vec![5u8]; - let (address, r, s, v) = sign(should_prefix, data.clone(), signing_chain_id); - let account = verify_signature(should_prefix, data.into(), r.into(), s.into(), v, rpc_chain_id).unwrap(); + let (address, r, s, v) = sign(should_prefix, data.clone(), signing_chain_id); + let account = verify_signature( + should_prefix, + data.into(), + r.into(), + s.into(), + v, + rpc_chain_id, + ) + .unwrap(); - assert_eq!(account.address, address.into()); - assert_eq!(account.is_valid_for_current_chain, is_valid_for_current_chain) - } + assert_eq!(account.address, address.into()); + assert_eq!( + account.is_valid_for_current_chain, + is_valid_for_current_chain + ) + } - #[test] - fn test_verify_signature_prefixed_mainnet() { - run_test(TestCase { - should_prefix: true, - signing_chain_id: Some(1), - rpc_chain_id: Some(1), - is_valid_for_current_chain: true, - }) - } + #[test] + fn test_verify_signature_prefixed_mainnet() { + run_test(TestCase { + should_prefix: true, + signing_chain_id: Some(1), + rpc_chain_id: Some(1), + is_valid_for_current_chain: true, + }) + } - #[test] - fn test_verify_signature_not_prefixed_mainnet() { - run_test(TestCase { - should_prefix: false, - signing_chain_id: Some(1), - rpc_chain_id: Some(1), - is_valid_for_current_chain: true, - }) - } + #[test] + fn test_verify_signature_not_prefixed_mainnet() { + run_test(TestCase { + should_prefix: false, + signing_chain_id: Some(1), + rpc_chain_id: Some(1), + is_valid_for_current_chain: true, + }) + } - #[test] - fn test_verify_signature_incompatible_chain_id() { - run_test(TestCase { - should_prefix: false, - signing_chain_id: Some(65), - rpc_chain_id: Some(1), - is_valid_for_current_chain: false, - }); - run_test(TestCase { - should_prefix: true, - signing_chain_id: Some(65), - rpc_chain_id: Some(1), - is_valid_for_current_chain: false, - }); - } + #[test] + fn test_verify_signature_incompatible_chain_id() { + run_test(TestCase { + should_prefix: false, + signing_chain_id: Some(65), + rpc_chain_id: Some(1), + is_valid_for_current_chain: false, + }); + run_test(TestCase { + should_prefix: true, + signing_chain_id: Some(65), + rpc_chain_id: Some(1), + is_valid_for_current_chain: false, + }); + } - #[test] - fn test_verify_signature_no_signing_chain_id() { - run_test(TestCase { - should_prefix: false, - signing_chain_id: None, - rpc_chain_id: Some(1), - is_valid_for_current_chain: false, - }); - run_test(TestCase { - should_prefix: true, - signing_chain_id: None, - rpc_chain_id: Some(1), - is_valid_for_current_chain: false, - }); - } + #[test] + fn test_verify_signature_no_signing_chain_id() { + run_test(TestCase { + should_prefix: false, + signing_chain_id: None, + rpc_chain_id: Some(1), + is_valid_for_current_chain: false, + }); + run_test(TestCase { + should_prefix: true, + signing_chain_id: None, + rpc_chain_id: Some(1), + is_valid_for_current_chain: false, + }); + } - #[test] - fn test_verify_signature_no_rpc_chain_id() { - run_test(TestCase { - should_prefix: false, - signing_chain_id: Some(1), - rpc_chain_id: None, - is_valid_for_current_chain: false, - }); - run_test(TestCase { - should_prefix: true, - signing_chain_id: Some(1), - rpc_chain_id: None, - is_valid_for_current_chain: false, - }); - } + #[test] + fn test_verify_signature_no_rpc_chain_id() { + run_test(TestCase { + should_prefix: false, + signing_chain_id: Some(1), + rpc_chain_id: None, + is_valid_for_current_chain: false, + }); + run_test(TestCase { + should_prefix: true, + signing_chain_id: Some(1), + rpc_chain_id: None, + is_valid_for_current_chain: false, + }); + } - #[test] - fn test_verify_signature_no_chain_replay_protection() { - run_test(TestCase { - should_prefix: false, - signing_chain_id: None, - rpc_chain_id: None, - is_valid_for_current_chain: true, - }); - run_test(TestCase { - should_prefix: true, - signing_chain_id: None, - rpc_chain_id: None, - is_valid_for_current_chain: true, - }); - } + #[test] + fn test_verify_signature_no_chain_replay_protection() { + run_test(TestCase { + should_prefix: false, + signing_chain_id: None, + rpc_chain_id: None, + is_valid_for_current_chain: true, + }); + run_test(TestCase { + should_prefix: true, + signing_chain_id: None, + rpc_chain_id: None, + is_valid_for_current_chain: true, + }); + } } diff --git a/rpc/src/v1/helpers/subscribers.rs b/rpc/src/v1/helpers/subscribers.rs index 9483d8e32..c766422e1 100644 --- a/rpc/src/v1/helpers/subscribers.rs +++ b/rpc/src/v1/helpers/subscribers.rs @@ -16,111 +16,113 @@ //! A map of subscribers. -use std::{ops, str}; -use std::collections::HashMap; -use jsonrpc_pubsub::{typed::{Subscriber, Sink}, SubscriptionId}; use ethereum_types::H64; +use jsonrpc_pubsub::{ + typed::{Sink, Subscriber}, + SubscriptionId, +}; use rand::{Rng, StdRng}; +use std::{collections::HashMap, ops, str}; #[derive(Debug, Clone, Hash, Eq, PartialEq)] pub struct Id(H64); impl str::FromStr for Id { - type Err = String; + type Err = String; - fn from_str(s: &str) -> Result { - if s.starts_with("0x") { - Ok(Id(s[2..].parse().map_err(|e| format!("{}", e))?)) - } else { - Err("The id must start with 0x".into()) - } - } + fn from_str(s: &str) -> Result { + if s.starts_with("0x") { + Ok(Id(s[2..].parse().map_err(|e| format!("{}", e))?)) + } else { + Err("The id must start with 0x".into()) + } + } } impl Id { - // TODO: replace `format!` see [#10412](https://github.com/paritytech/parity-ethereum/issues/10412) - pub fn as_string(&self) -> String { - format!("{:?}", self.0) - } + // TODO: replace `format!` see [#10412](https://github.com/paritytech/parity-ethereum/issues/10412) + pub fn as_string(&self) -> String { + format!("{:?}", self.0) + } } #[derive(Clone)] pub struct Subscribers { - rand: StdRng, - subscriptions: HashMap, + rand: StdRng, + subscriptions: HashMap, } impl Default for Subscribers { - fn default() -> Self { - Subscribers { - rand: StdRng::new().expect("Valid random source is required."), - subscriptions: HashMap::new(), - } - } + fn default() -> Self { + Subscribers { + rand: StdRng::new().expect("Valid random source is required."), + subscriptions: HashMap::new(), + } + } } impl Subscribers { - /// Create a new Subscribers with given random source. - #[cfg(test)] - pub fn new_test() -> Self { - Subscribers { - rand: ::rand::SeedableRng::from_seed([0usize].as_ref()), - subscriptions: HashMap::new(), - } - } + /// Create a new Subscribers with given random source. + #[cfg(test)] + pub fn new_test() -> Self { + Subscribers { + rand: ::rand::SeedableRng::from_seed([0usize].as_ref()), + subscriptions: HashMap::new(), + } + } - fn next_id(&mut self) -> Id { - let mut data = H64::default(); - self.rand.fill_bytes(&mut data.0); - Id(data) - } + fn next_id(&mut self) -> Id { + let mut data = H64::default(); + self.rand.fill_bytes(&mut data.0); + Id(data) + } - /// Insert new subscription and return assigned id. - pub fn insert(&mut self, val: T) -> SubscriptionId { - let id = self.next_id(); - debug!(target: "pubsub", "Adding subscription id={:?}", id); - let s = id.as_string(); - self.subscriptions.insert(id, val); - SubscriptionId::String(s) - } + /// Insert new subscription and return assigned id. + pub fn insert(&mut self, val: T) -> SubscriptionId { + let id = self.next_id(); + debug!(target: "pubsub", "Adding subscription id={:?}", id); + let s = id.as_string(); + self.subscriptions.insert(id, val); + SubscriptionId::String(s) + } - /// Removes subscription with given id and returns it (if any). - pub fn remove(&mut self, id: &SubscriptionId) -> Option { - trace!(target: "pubsub", "Removing subscription id={:?}", id); - match *id { - SubscriptionId::String(ref id) => match id.parse() { - Ok(id) => self.subscriptions.remove(&id), - Err(_) => None, - }, - _ => None, - } - } + /// Removes subscription with given id and returns it (if any). + pub fn remove(&mut self, id: &SubscriptionId) -> Option { + trace!(target: "pubsub", "Removing subscription id={:?}", id); + match *id { + SubscriptionId::String(ref id) => match id.parse() { + Ok(id) => self.subscriptions.remove(&id), + Err(_) => None, + }, + _ => None, + } + } } impl Subscribers> { - /// Assigns id and adds a subscriber to the list. - pub fn push(&mut self, sub: Subscriber) { - let id = self.next_id(); - if let Ok(sink) = sub.assign_id(SubscriptionId::String(id.as_string())) { - debug!(target: "pubsub", "Adding subscription id={:?}", id); - self.subscriptions.insert(id, sink); - } - } + /// Assigns id and adds a subscriber to the list. + pub fn push(&mut self, sub: Subscriber) { + let id = self.next_id(); + if let Ok(sink) = sub.assign_id(SubscriptionId::String(id.as_string())) { + debug!(target: "pubsub", "Adding subscription id={:?}", id); + self.subscriptions.insert(id, sink); + } + } } impl Subscribers<(Sink, V)> { - /// Assigns id and adds a subscriber to the list. - pub fn push(&mut self, sub: Subscriber, val: V) { - let id = self.next_id(); - if let Ok(sink) = sub.assign_id(SubscriptionId::String(id.as_string())) { - debug!(target: "pubsub", "Adding subscription id={:?}", id); - self.subscriptions.insert(id, (sink, val)); - } - } + /// Assigns id and adds a subscriber to the list. + pub fn push(&mut self, sub: Subscriber, val: V) { + let id = self.next_id(); + if let Ok(sink) = sub.assign_id(SubscriptionId::String(id.as_string())) { + debug!(target: "pubsub", "Adding subscription id={:?}", id); + self.subscriptions.insert(id, (sink, val)); + } + } } impl ops::Deref for Subscribers { - type Target = HashMap; + type Target = HashMap; - fn deref(&self) -> &Self::Target { - &self.subscriptions - } + fn deref(&self) -> &Self::Target { + &self.subscriptions + } } diff --git a/rpc/src/v1/helpers/subscription_manager.rs b/rpc/src/v1/helpers/subscription_manager.rs index d83beb397..2a960b83a 100644 --- a/rpc/src/v1/helpers/subscription_manager.rs +++ b/rpc/src/v1/helpers/subscription_manager.rs @@ -16,171 +16,193 @@ //! Generic poll manager for Pub-Sub. -use std::sync::Arc; -use std::sync::atomic::{self, AtomicBool}; use parking_lot::Mutex; +use std::sync::{ + atomic::{self, AtomicBool}, + Arc, +}; -use jsonrpc_core::futures::future::{self, Either}; -use jsonrpc_core::futures::sync::mpsc; -use jsonrpc_core::futures::{Sink, Future}; -use jsonrpc_core::{self as core, MetaIoHandler}; +use jsonrpc_core::{ + self as core, + futures::{ + future::{self, Either}, + sync::mpsc, + Future, Sink, + }, + MetaIoHandler, +}; use jsonrpc_pubsub::SubscriptionId; -use v1::helpers::Subscribers; -use v1::metadata::Metadata; +use v1::{helpers::Subscribers, metadata::Metadata}; #[derive(Debug)] struct Subscription { - metadata: Metadata, - method: String, - params: core::Params, - sink: mpsc::Sender>, - /// a flag if subscription is still active and last returned value - last_result: Arc<(AtomicBool, Mutex>)>, + metadata: Metadata, + method: String, + params: core::Params, + sink: mpsc::Sender>, + /// a flag if subscription is still active and last returned value + last_result: Arc<(AtomicBool, Mutex>)>, } /// A struct managing all subscriptions. /// TODO [ToDr] Depending on the method decide on poll interval. /// For most of the methods it will be enough to poll on new block instead of time-interval. pub struct GenericPollManager> { - subscribers: Subscribers, - rpc: MetaIoHandler, + subscribers: Subscribers, + rpc: MetaIoHandler, } impl> GenericPollManager { - /// Creates new poll manager - pub fn new(rpc: MetaIoHandler) -> Self { - GenericPollManager { - subscribers: Default::default(), - rpc, - } - } + /// Creates new poll manager + pub fn new(rpc: MetaIoHandler) -> Self { + GenericPollManager { + subscribers: Default::default(), + rpc, + } + } - /// Creates new poll manager with deterministic ids. - #[cfg(test)] - pub fn new_test(rpc: MetaIoHandler) -> Self { - let mut manager = Self::new(rpc); - manager.subscribers = Subscribers::new_test(); - manager - } + /// Creates new poll manager with deterministic ids. + #[cfg(test)] + pub fn new_test(rpc: MetaIoHandler) -> Self { + let mut manager = Self::new(rpc); + manager.subscribers = Subscribers::new_test(); + manager + } - /// Subscribes to update from polling given method. - pub fn subscribe(&mut self, metadata: Metadata, method: String, params: core::Params) - -> (SubscriptionId, mpsc::Receiver>) - { - let (sink, stream) = mpsc::channel(1); - let subscription = Subscription { - metadata, - method, - params, - sink, - last_result: Default::default(), - }; - let id = self.subscribers.insert(subscription); - (id, stream) - } + /// Subscribes to update from polling given method. + pub fn subscribe( + &mut self, + metadata: Metadata, + method: String, + params: core::Params, + ) -> ( + SubscriptionId, + mpsc::Receiver>, + ) { + let (sink, stream) = mpsc::channel(1); + let subscription = Subscription { + metadata, + method, + params, + sink, + last_result: Default::default(), + }; + let id = self.subscribers.insert(subscription); + (id, stream) + } - pub fn unsubscribe(&mut self, id: &SubscriptionId) -> bool { - debug!(target: "pubsub", "Removing subscription: {:?}", id); - self.subscribers.remove(id).map(|subscription| { - subscription.last_result.0.store(true, atomic::Ordering::SeqCst); - }).is_some() - } + pub fn unsubscribe(&mut self, id: &SubscriptionId) -> bool { + debug!(target: "pubsub", "Removing subscription: {:?}", id); + self.subscribers + .remove(id) + .map(|subscription| { + subscription + .last_result + .0 + .store(true, atomic::Ordering::SeqCst); + }) + .is_some() + } - pub fn tick(&self) -> Box + Send> { - let mut futures = Vec::new(); - // poll all subscriptions - for (id, subscription) in self.subscribers.iter() { - let call = core::MethodCall { - jsonrpc: Some(core::Version::V2), - id: core::Id::Str(id.as_string()), - method: subscription.method.clone(), - params: subscription.params.clone(), - }; - trace!(target: "pubsub", "Polling method: {:?}", call); - let result = self.rpc.handle_call(call.into(), subscription.metadata.clone()); + pub fn tick(&self) -> Box + Send> { + let mut futures = Vec::new(); + // poll all subscriptions + for (id, subscription) in self.subscribers.iter() { + let call = core::MethodCall { + jsonrpc: Some(core::Version::V2), + id: core::Id::Str(id.as_string()), + method: subscription.method.clone(), + params: subscription.params.clone(), + }; + trace!(target: "pubsub", "Polling method: {:?}", call); + let result = self + .rpc + .handle_call(call.into(), subscription.metadata.clone()); - let last_result = subscription.last_result.clone(); - let sender = subscription.sink.clone(); + let last_result = subscription.last_result.clone(); + let sender = subscription.sink.clone(); - let result = result.and_then(move |response| { - // quick check if the subscription is still valid - if last_result.0.load(atomic::Ordering::SeqCst) { - return Either::B(future::ok(())) - } + let result = result.and_then(move |response| { + // quick check if the subscription is still valid + if last_result.0.load(atomic::Ordering::SeqCst) { + return Either::B(future::ok(())); + } - let mut last_result = last_result.1.lock(); - if *last_result != response && response.is_some() { - let output = response.expect("Existence proved by the condition."); - debug!(target: "pubsub", "Got new response, sending: {:?}", output); - *last_result = Some(output.clone()); + let mut last_result = last_result.1.lock(); + if *last_result != response && response.is_some() { + let output = response.expect("Existence proved by the condition."); + debug!(target: "pubsub", "Got new response, sending: {:?}", output); + *last_result = Some(output.clone()); - let send = match output { - core::Output::Success(core::Success { result, .. }) => Ok(result), - core::Output::Failure(core::Failure { error, .. }) => Err(error), - }; - Either::A(sender.send(send).map(|_| ()).map_err(|_| ())) - } else { - trace!(target: "pubsub", "Response was not changed: {:?}", response); - Either::B(future::ok(())) - } - }); + let send = match output { + core::Output::Success(core::Success { result, .. }) => Ok(result), + core::Output::Failure(core::Failure { error, .. }) => Err(error), + }; + Either::A(sender.send(send).map(|_| ()).map_err(|_| ())) + } else { + trace!(target: "pubsub", "Response was not changed: {:?}", response); + Either::B(future::ok(())) + } + }); - futures.push(result) - } + futures.push(result) + } - // return a future represeting all the polls - Box::new(future::join_all(futures).map(|_| ())) - } + // return a future represeting all the polls + Box::new(future::join_all(futures).map(|_| ())) + } } #[cfg(test)] mod tests { - use std::sync::atomic::{self, AtomicBool}; + use std::sync::atomic::{self, AtomicBool}; - use jsonrpc_core::{MetaIoHandler, NoopMiddleware, Value, Params}; - use jsonrpc_core::futures::{Future, Stream}; - use jsonrpc_pubsub::SubscriptionId; - use http::tokio::runtime::Runtime; + use http::tokio::runtime::Runtime; + use jsonrpc_core::{ + futures::{Future, Stream}, + MetaIoHandler, NoopMiddleware, Params, Value, + }; + use jsonrpc_pubsub::SubscriptionId; - use super::GenericPollManager; + use super::GenericPollManager; - fn poll_manager() -> GenericPollManager { - let mut io = MetaIoHandler::default(); - let called = AtomicBool::new(false); - io.add_method("hello", move |_| { - if !called.load(atomic::Ordering::SeqCst) { - called.store(true, atomic::Ordering::SeqCst); - Ok(Value::String("hello".into())) - } else { - Ok(Value::String("world".into())) - } - }); - GenericPollManager::new_test(io) - } + fn poll_manager() -> GenericPollManager { + let mut io = MetaIoHandler::default(); + let called = AtomicBool::new(false); + io.add_method("hello", move |_| { + if !called.load(atomic::Ordering::SeqCst) { + called.store(true, atomic::Ordering::SeqCst); + Ok(Value::String("hello".into())) + } else { + Ok(Value::String("world".into())) + } + }); + GenericPollManager::new_test(io) + } - #[test] - fn should_poll_subscribed_method() { - // given - let mut el = Runtime::new().unwrap(); - let mut poll_manager = poll_manager(); - let (id, rx) = poll_manager.subscribe(Default::default(), "hello".into(), Params::None); - assert_eq!(id, SubscriptionId::String("0x416d77337e24399d".into())); + #[test] + fn should_poll_subscribed_method() { + // given + let mut el = Runtime::new().unwrap(); + let mut poll_manager = poll_manager(); + let (id, rx) = poll_manager.subscribe(Default::default(), "hello".into(), Params::None); + assert_eq!(id, SubscriptionId::String("0x416d77337e24399d".into())); - // then - poll_manager.tick().wait().unwrap(); - let (res, rx) = el.block_on(rx.into_future()).unwrap(); - assert_eq!(res, Some(Ok(Value::String("hello".into())))); + // then + poll_manager.tick().wait().unwrap(); + let (res, rx) = el.block_on(rx.into_future()).unwrap(); + assert_eq!(res, Some(Ok(Value::String("hello".into())))); - // retrieve second item - poll_manager.tick().wait().unwrap(); - let (res, rx) = el.block_on(rx.into_future()).unwrap(); - assert_eq!(res, Some(Ok(Value::String("world".into())))); + // retrieve second item + poll_manager.tick().wait().unwrap(); + let (res, rx) = el.block_on(rx.into_future()).unwrap(); + assert_eq!(res, Some(Ok(Value::String("world".into())))); - // and no more notifications - poll_manager.tick().wait().unwrap(); - // we need to unsubscribe otherwise the future will never finish. - poll_manager.unsubscribe(&id); - assert_eq!(el.block_on(rx.into_future()).unwrap().0, None); - } + // and no more notifications + poll_manager.tick().wait().unwrap(); + // we need to unsubscribe otherwise the future will never finish. + poll_manager.unsubscribe(&id); + assert_eq!(el.block_on(rx.into_future()).unwrap().0, None); + } } diff --git a/rpc/src/v1/helpers/work.rs b/rpc/src/v1/helpers/work.rs index b52cb70c5..9aa2f0b36 100644 --- a/rpc/src/v1/helpers/work.rs +++ b/rpc/src/v1/helpers/work.rs @@ -18,21 +18,28 @@ use std::sync::Arc; -use rlp; use ethcore::miner::{BlockChainClient, MinerService}; -use ethereum_types::{H64, H256}; +use ethereum_types::{H256, H64}; use jsonrpc_core::Error; +use rlp; use v1::helpers::errors; // Submit a POW work and return the block's hash -pub fn submit_work_detail(client: &Arc, miner: &Arc, nonce: H64, pow_hash: H256, mix_hash: H256) -> Result { - // TODO [ToDr] Should disallow submissions in case of PoA? - trace!(target: "miner", "submit_work_detail: Decoded: nonce={}, pow_hash={}, mix_hash={}", nonce, pow_hash, mix_hash); - let seal = vec![rlp::encode(&mix_hash), rlp::encode(&nonce)]; - miner.submit_seal(pow_hash, seal) - .and_then(|block| client.import_sealed_block(block)) - .map_err(|e| { - warn!(target: "miner", "Cannot submit work - {:?}.", e); - errors::cannot_submit_work(e) - }) +pub fn submit_work_detail( + client: &Arc, + miner: &Arc, + nonce: H64, + pow_hash: H256, + mix_hash: H256, +) -> Result { + // TODO [ToDr] Should disallow submissions in case of PoA? + trace!(target: "miner", "submit_work_detail: Decoded: nonce={}, pow_hash={}, mix_hash={}", nonce, pow_hash, mix_hash); + let seal = vec![rlp::encode(&mix_hash), rlp::encode(&nonce)]; + miner + .submit_seal(pow_hash, seal) + .and_then(|block| client.import_sealed_block(block)) + .map_err(|e| { + warn!(target: "miner", "Cannot submit work - {:?}.", e); + errors::cannot_submit_work(e) + }) } diff --git a/rpc/src/v1/impls/debug.rs b/rpc/src/v1/impls/debug.rs index e46dd628d..dbfdd6d1e 100644 --- a/rpc/src/v1/impls/debug.rs +++ b/rpc/src/v1/impls/debug.rs @@ -19,79 +19,96 @@ use std::sync::Arc; use ethcore::client::BlockChainClient; -use types::header::Header; -use types::transaction::LocalizedTransaction; +use types::{header::Header, transaction::LocalizedTransaction}; use jsonrpc_core::Result; -use v1::traits::Debug; -use v1::types::{Block, Bytes, RichBlock, BlockTransactions, Transaction}; +use v1::{ + traits::Debug, + types::{Block, BlockTransactions, Bytes, RichBlock, Transaction}, +}; /// Debug rpc implementation. pub struct DebugClient { - client: Arc, + client: Arc, } impl DebugClient { - /// Creates new debug client. - pub fn new(client: Arc) -> Self { - Self { - client, - } - } + /// Creates new debug client. + pub fn new(client: Arc) -> Self { + Self { client } + } } impl Debug for DebugClient { - fn bad_blocks(&self) -> Result> { - fn cast>(t: &T) -> O { - (*t).into() - } + fn bad_blocks(&self) -> Result> { + fn cast>(t: &T) -> O { + (*t).into() + } - Ok(self.client.bad_blocks().into_iter().map(|(block, reason)| { - let number = block.header.number(); - let hash = block.header.hash(); - RichBlock { - inner: Block { - hash: Some(hash), - size: Some(block.bytes.len().into()), - parent_hash: cast(block.header.parent_hash()), - uncles_hash: cast(block.header.uncles_hash()), - author: cast(block.header.author()), - miner: cast(block.header.author()), - state_root: cast(block.header.state_root()), - receipts_root: cast(block.header.receipts_root()), - number: Some(number.into()), - gas_used: cast(block.header.gas_used()), - gas_limit: cast(block.header.gas_limit()), - logs_bloom: Some(cast(block.header.log_bloom())), - timestamp: block.header.timestamp().into(), - difficulty: cast(block.header.difficulty()), - total_difficulty: None, - seal_fields: block.header.seal().iter().cloned().map(Into::into).collect(), - uncles: block.uncles.iter().map(Header::hash).collect(), - transactions: BlockTransactions::Full(block.transactions - .into_iter() - .enumerate() - .map(|(transaction_index, signed)| Transaction::from_localized(LocalizedTransaction { - block_number: number, - block_hash: hash, - transaction_index, - signed, - cached_sender: None, - })).collect() - ), - transactions_root: cast(block.header.transactions_root()), - extra_data: block.header.extra_data().clone().into(), - }, - extra_info: vec![ - ("reason".to_owned(), reason), - ("rlp".to_owned(), serialize(&Bytes(block.bytes))), - ("hash".to_owned(), format!("{:#x}", hash)), - ].into_iter().collect(), - } - }).collect()) - } + Ok(self + .client + .bad_blocks() + .into_iter() + .map(|(block, reason)| { + let number = block.header.number(); + let hash = block.header.hash(); + RichBlock { + inner: Block { + hash: Some(hash), + size: Some(block.bytes.len().into()), + parent_hash: cast(block.header.parent_hash()), + uncles_hash: cast(block.header.uncles_hash()), + author: cast(block.header.author()), + miner: cast(block.header.author()), + state_root: cast(block.header.state_root()), + receipts_root: cast(block.header.receipts_root()), + number: Some(number.into()), + gas_used: cast(block.header.gas_used()), + gas_limit: cast(block.header.gas_limit()), + logs_bloom: Some(cast(block.header.log_bloom())), + timestamp: block.header.timestamp().into(), + difficulty: cast(block.header.difficulty()), + total_difficulty: None, + seal_fields: block + .header + .seal() + .iter() + .cloned() + .map(Into::into) + .collect(), + uncles: block.uncles.iter().map(Header::hash).collect(), + transactions: BlockTransactions::Full( + block + .transactions + .into_iter() + .enumerate() + .map(|(transaction_index, signed)| { + Transaction::from_localized(LocalizedTransaction { + block_number: number, + block_hash: hash, + transaction_index, + signed, + cached_sender: None, + }) + }) + .collect(), + ), + transactions_root: cast(block.header.transactions_root()), + extra_data: block.header.extra_data().clone().into(), + }, + extra_info: vec![ + ("reason".to_owned(), reason), + ("rlp".to_owned(), serialize(&Bytes(block.bytes))), + ("hash".to_owned(), format!("{:#x}", hash)), + ] + .into_iter() + .collect(), + } + }) + .collect()) + } } fn serialize(t: &T) -> String { - ::serde_json::to_string(t).expect("RPC types serialization is non-fallible.") + ::serde_json::to_string(t).expect("RPC types serialization is non-fallible.") } diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 9a3e2ee24..55ec72b01 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -16,436 +16,506 @@ //! Eth rpc implementation. -use std::thread; -use std::time::{Instant, Duration, SystemTime, UNIX_EPOCH}; -use std::sync::Arc; +use std::{ + sync::Arc, + thread, + time::{Duration, Instant, SystemTime, UNIX_EPOCH}, +}; -use rlp::Rlp; -use ethereum_types::{Address, H64, H160, H256, U64, U256}; +use ethereum_types::{Address, H160, H256, H64, U256, U64}; use parking_lot::Mutex; +use rlp::Rlp; use ethash::{self, SeedHashCompute}; -use ethcore::client::{BlockChainClient, BlockId, TransactionId, UncleId, StateOrBlock, StateClient, StateInfo, Call, EngineInfo, ProvingBlockChainClient}; -use ethcore::miner::{self, MinerService}; -use ethcore::snapshot::SnapshotService; +use ethcore::{ + client::{ + BlockChainClient, BlockId, Call, EngineInfo, ProvingBlockChainClient, StateClient, + StateInfo, StateOrBlock, TransactionId, UncleId, + }, + miner::{self, MinerService}, + snapshot::SnapshotService, +}; use hash::keccak; use miner::external::ExternalMinerService; use sync::SyncProvider; -use types::transaction::{SignedTransaction, LocalizedTransaction}; -use types::BlockNumber as EthBlockNumber; -use types::encoded; -use types::filter::Filter as EthcoreFilter; -use types::header::Header; - -use jsonrpc_core::{BoxFuture, Result}; -use jsonrpc_core::futures::future; - -use v1::helpers::{self, errors, limit_logs, fake_sign}; -use v1::helpers::deprecated::{self, DeprecationNotice}; -use v1::helpers::dispatch::{FullDispatcher, default_gas_price}; -use v1::helpers::block_import::is_major_importing; -use v1::traits::Eth; -use v1::types::{ - RichBlock, Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, - Transaction, CallRequest, Index, Filter, Log, Receipt, Work, EthAccount, StorageProof, - block_number_to_id +use types::{ + encoded, + filter::Filter as EthcoreFilter, + header::Header, + transaction::{LocalizedTransaction, SignedTransaction}, + BlockNumber as EthBlockNumber, +}; + +use jsonrpc_core::{futures::future, BoxFuture, Result}; + +use v1::{ + helpers::{ + self, + block_import::is_major_importing, + deprecated::{self, DeprecationNotice}, + dispatch::{default_gas_price, FullDispatcher}, + errors, fake_sign, limit_logs, + }, + metadata::Metadata, + traits::Eth, + types::{ + block_number_to_id, Block, BlockNumber, BlockTransactions, Bytes, CallRequest, EthAccount, + Filter, Index, Log, Receipt, RichBlock, StorageProof, SyncInfo, SyncStatus, Transaction, + Work, + }, }; -use v1::metadata::Metadata; const EXTRA_INFO_PROOF: &str = "Object exists in blockchain (fetched earlier), extra_info is always available if object exists; qed"; /// Eth RPC options #[derive(Copy, Clone)] pub struct EthClientOptions { - /// Return nonce from transaction queue when pending block not available. - pub pending_nonce_from_queue: bool, - /// Returns receipt from pending blocks - pub allow_pending_receipt_query: bool, - /// Send additional block number when asking for work - pub send_block_number_in_get_work: bool, - /// Gas Price Percentile used as default gas price. - pub gas_price_percentile: usize, - /// Return 'null' instead of an error if ancient block sync is still in - /// progress and the block information requested could not be found. - pub allow_missing_blocks: bool, - /// Enable Experimental RPC-Calls - pub allow_experimental_rpcs: bool, - /// flag for ancient block sync - pub no_ancient_blocks: bool, + /// Return nonce from transaction queue when pending block not available. + pub pending_nonce_from_queue: bool, + /// Returns receipt from pending blocks + pub allow_pending_receipt_query: bool, + /// Send additional block number when asking for work + pub send_block_number_in_get_work: bool, + /// Gas Price Percentile used as default gas price. + pub gas_price_percentile: usize, + /// Return 'null' instead of an error if ancient block sync is still in + /// progress and the block information requested could not be found. + pub allow_missing_blocks: bool, + /// Enable Experimental RPC-Calls + pub allow_experimental_rpcs: bool, + /// flag for ancient block sync + pub no_ancient_blocks: bool, } impl EthClientOptions { - /// Creates new default `EthClientOptions` and allows alterations - /// by provided function. - pub fn with(fun: F) -> Self { - let mut options = Self::default(); - fun(&mut options); - options - } + /// Creates new default `EthClientOptions` and allows alterations + /// by provided function. + pub fn with(fun: F) -> Self { + let mut options = Self::default(); + fun(&mut options); + options + } } impl Default for EthClientOptions { - fn default() -> Self { - EthClientOptions { - pending_nonce_from_queue: false, - allow_pending_receipt_query: true, - send_block_number_in_get_work: true, - gas_price_percentile: 50, - allow_missing_blocks: false, - allow_experimental_rpcs: false, - no_ancient_blocks: false, - } - } + fn default() -> Self { + EthClientOptions { + pending_nonce_from_queue: false, + allow_pending_receipt_query: true, + send_block_number_in_get_work: true, + gas_price_percentile: 50, + allow_missing_blocks: false, + allow_experimental_rpcs: false, + no_ancient_blocks: false, + } + } } /// Eth rpc implementation. -pub struct EthClient where - C: miner::BlockChainClient + BlockChainClient, - SN: SnapshotService, - S: SyncProvider, - M: MinerService, - EM: ExternalMinerService { - - client: Arc, - snapshot: Arc, - sync: Arc, - accounts: Arc Vec
+ Send + Sync>, - miner: Arc, - external_miner: Arc, - seed_compute: Mutex, - options: EthClientOptions, - deprecation_notice: DeprecationNotice, +pub struct EthClient +where + C: miner::BlockChainClient + BlockChainClient, + SN: SnapshotService, + S: SyncProvider, + M: MinerService, + EM: ExternalMinerService, +{ + client: Arc, + snapshot: Arc, + sync: Arc, + accounts: Arc Vec
+ Send + Sync>, + miner: Arc, + external_miner: Arc, + seed_compute: Mutex, + options: EthClientOptions, + deprecation_notice: DeprecationNotice, } #[derive(Debug)] enum BlockNumberOrId { - Number(BlockNumber), - Id(BlockId), + Number(BlockNumber), + Id(BlockId), } impl From for BlockNumberOrId { - fn from(value: BlockId) -> BlockNumberOrId { - BlockNumberOrId::Id(value) - } + fn from(value: BlockId) -> BlockNumberOrId { + BlockNumberOrId::Id(value) + } } impl From for BlockNumberOrId { - fn from(value: BlockNumber) -> BlockNumberOrId { - BlockNumberOrId::Number(value) - } + fn from(value: BlockNumber) -> BlockNumberOrId { + BlockNumberOrId::Number(value) + } } enum PendingOrBlock { - Block(BlockId), - Pending, + Block(BlockId), + Pending, } struct PendingUncleId { - id: PendingOrBlock, - position: usize, + id: PendingOrBlock, + position: usize, } enum PendingTransactionId { - Hash(H256), - Location(PendingOrBlock, usize) + Hash(H256), + Location(PendingOrBlock, usize), } -pub fn base_logs (client: &C, miner: &M, filter: Filter) -> BoxFuture> where - C: miner::BlockChainClient + BlockChainClient + StateClient + Call, - M: MinerService { - let include_pending = filter.to_block == Some(BlockNumber::Pending); - let filter: EthcoreFilter = match filter.try_into() { - Ok(value) => value, - Err(err) => return Box::new(future::err(err)), - }; - let mut logs = match client.logs(filter.clone()) { - Ok(logs) => logs - .into_iter() - .map(From::from) - .collect::>(), - Err(id) => return Box::new(future::err(errors::filter_block_not_found(id))), - }; +pub fn base_logs( + client: &C, + miner: &M, + filter: Filter, +) -> BoxFuture> +where + C: miner::BlockChainClient + BlockChainClient + StateClient + Call, + M: MinerService, +{ + let include_pending = filter.to_block == Some(BlockNumber::Pending); + let filter: EthcoreFilter = match filter.try_into() { + Ok(value) => value, + Err(err) => return Box::new(future::err(err)), + }; + let mut logs = match client.logs(filter.clone()) { + Ok(logs) => logs.into_iter().map(From::from).collect::>(), + Err(id) => return Box::new(future::err(errors::filter_block_not_found(id))), + }; - if include_pending { - let best_block = client.chain_info().best_block_number; - let pending = pending_logs(&*miner, best_block, &filter); - logs.extend(pending); - } + if include_pending { + let best_block = client.chain_info().best_block_number; + let pending = pending_logs(&*miner, best_block, &filter); + logs.extend(pending); + } - let logs = limit_logs(logs, filter.limit); + let logs = limit_logs(logs, filter.limit); - Box::new(future::ok(logs)) + Box::new(future::ok(logs)) } -impl EthClient where - C: miner::BlockChainClient + BlockChainClient + StateClient + Call + EngineInfo, - SN: SnapshotService, - S: SyncProvider, - M: MinerService, - EM: ExternalMinerService { +impl EthClient +where + C: miner::BlockChainClient + + BlockChainClient + + StateClient + + Call + + EngineInfo, + SN: SnapshotService, + S: SyncProvider, + M: MinerService, + EM: ExternalMinerService, +{ + /// Creates new EthClient. + pub fn new( + client: &Arc, + snapshot: &Arc, + sync: &Arc, + accounts: &Arc Vec
+ Send + Sync>, + miner: &Arc, + em: &Arc, + options: EthClientOptions, + ) -> Self { + EthClient { + client: client.clone(), + snapshot: snapshot.clone(), + sync: sync.clone(), + miner: miner.clone(), + accounts: accounts.clone(), + external_miner: em.clone(), + seed_compute: Mutex::new(SeedHashCompute::default()), + options, + deprecation_notice: Default::default(), + } + } - /// Creates new EthClient. - pub fn new( - client: &Arc, - snapshot: &Arc, - sync: &Arc, - accounts: &Arc Vec
+ Send + Sync>, - miner: &Arc, - em: &Arc, - options: EthClientOptions - ) -> Self { - EthClient { - client: client.clone(), - snapshot: snapshot.clone(), - sync: sync.clone(), - miner: miner.clone(), - accounts: accounts.clone(), - external_miner: em.clone(), - seed_compute: Mutex::new(SeedHashCompute::default()), - options, - deprecation_notice: Default::default(), - } - } + fn rich_block(&self, id: BlockNumberOrId, include_txs: bool) -> Result> { + let client = &self.client; - fn rich_block(&self, id: BlockNumberOrId, include_txs: bool) -> Result> { - let client = &self.client; + let client_query = |id| { + ( + client.block(id), + client.block_total_difficulty(id), + client.block_extra_info(id), + false, + ) + }; - let client_query = |id| (client.block(id), client.block_total_difficulty(id), client.block_extra_info(id), false); + let (block, difficulty, extra, is_pending) = match id { + BlockNumberOrId::Number(BlockNumber::Pending) => { + let info = self.client.chain_info(); + match self.miner.pending_block(info.best_block_number) { + Some(pending_block) => { + warn!("`Pending` is deprecated and may be removed in future versions."); - let (block, difficulty, extra, is_pending) = match id { - BlockNumberOrId::Number(BlockNumber::Pending) => { - let info = self.client.chain_info(); - match self.miner.pending_block(info.best_block_number) { - Some(pending_block) => { - warn!("`Pending` is deprecated and may be removed in future versions."); + let difficulty = { + let latest_difficulty = self + .client + .block_total_difficulty(BlockId::Latest) + .expect("blocks in chain have details; qed"); + let pending_difficulty = self + .miner + .pending_block_header(info.best_block_number) + .map(|header| *header.difficulty()); - let difficulty = { - let latest_difficulty = self.client.block_total_difficulty(BlockId::Latest).expect("blocks in chain have details; qed"); - let pending_difficulty = self.miner.pending_block_header(info.best_block_number).map(|header| *header.difficulty()); + if let Some(difficulty) = pending_difficulty { + difficulty + latest_difficulty + } else { + latest_difficulty + } + }; - if let Some(difficulty) = pending_difficulty { - difficulty + latest_difficulty - } else { - latest_difficulty - } - }; + let extra = self.client.engine().extra_info(&pending_block.header); - let extra = self.client.engine().extra_info(&pending_block.header); + ( + Some(encoded::Block::new(pending_block.rlp_bytes())), + Some(difficulty), + Some(extra), + true, + ) + } + None => { + warn!("`Pending` is deprecated and may be removed in future versions. Falling back to `Latest`"); + client_query(BlockId::Latest) + } + } + } - (Some(encoded::Block::new(pending_block.rlp_bytes())), Some(difficulty), Some(extra), true) - }, - None => { - warn!("`Pending` is deprecated and may be removed in future versions. Falling back to `Latest`"); - client_query(BlockId::Latest) - } - } - }, + BlockNumberOrId::Number(num) => { + let id = match num { + BlockNumber::Latest => BlockId::Latest, + BlockNumber::Earliest => BlockId::Earliest, + BlockNumber::Num(n) => BlockId::Number(n), + BlockNumber::Pending => unreachable!(), // Already covered + }; - BlockNumberOrId::Number(num) => { - let id = match num { - BlockNumber::Latest => BlockId::Latest, - BlockNumber::Earliest => BlockId::Earliest, - BlockNumber::Num(n) => BlockId::Number(n), - BlockNumber::Pending => unreachable!() // Already covered - }; + client_query(id) + } - client_query(id) - }, + BlockNumberOrId::Id(id) => client_query(id), + }; - BlockNumberOrId::Id(id) => client_query(id), - }; + match (block, difficulty) { + (Some(block), Some(total_difficulty)) => { + let view = block.header_view(); + Ok(Some(RichBlock { + inner: Block { + hash: match is_pending { + true => None, + false => Some(view.hash()), + }, + size: Some(block.rlp().as_raw().len().into()), + parent_hash: view.parent_hash(), + uncles_hash: view.uncles_hash(), + author: view.author(), + miner: view.author(), + state_root: view.state_root(), + transactions_root: view.transactions_root(), + receipts_root: view.receipts_root(), + number: match is_pending { + true => None, + false => Some(view.number().into()), + }, + gas_used: view.gas_used(), + gas_limit: view.gas_limit(), + logs_bloom: match is_pending { + true => None, + false => Some(view.log_bloom()), + }, + timestamp: view.timestamp().into(), + difficulty: view.difficulty(), + total_difficulty: Some(total_difficulty), + seal_fields: view.seal().into_iter().map(Into::into).collect(), + uncles: block.uncle_hashes(), + transactions: match include_txs { + true => BlockTransactions::Full( + block + .view() + .localized_transactions() + .into_iter() + .map(Transaction::from_localized) + .collect(), + ), + false => BlockTransactions::Hashes(block.transaction_hashes()), + }, + extra_data: Bytes::new(view.extra_data()), + }, + extra_info: extra.expect(EXTRA_INFO_PROOF), + })) + } + _ => Ok(None), + } + } - match (block, difficulty) { - (Some(block), Some(total_difficulty)) => { - let view = block.header_view(); - Ok(Some(RichBlock { - inner: Block { - hash: match is_pending { - true => None, - false => Some(view.hash()), - }, - size: Some(block.rlp().as_raw().len().into()), - parent_hash: view.parent_hash(), - uncles_hash: view.uncles_hash(), - author: view.author(), - miner: view.author(), - state_root: view.state_root(), - transactions_root: view.transactions_root(), - receipts_root: view.receipts_root(), - number: match is_pending { - true => None, - false => Some(view.number().into()), - }, - gas_used: view.gas_used(), - gas_limit: view.gas_limit(), - logs_bloom: match is_pending { - true => None, - false => Some(view.log_bloom()), - }, - timestamp: view.timestamp().into(), - difficulty: view.difficulty(), - total_difficulty: Some(total_difficulty), - seal_fields: view.seal().into_iter().map(Into::into).collect(), - uncles: block.uncle_hashes(), - transactions: match include_txs { - true => BlockTransactions::Full(block.view().localized_transactions().into_iter().map(Transaction::from_localized).collect()), - false => BlockTransactions::Hashes(block.transaction_hashes()), - }, - extra_data: Bytes::new(view.extra_data()), - }, - extra_info: extra.expect(EXTRA_INFO_PROOF), - })) - }, - _ => Ok(None) - } - } + fn transaction(&self, id: PendingTransactionId) -> Result> { + let client_transaction = |id| match self.client.transaction(id) { + Some(t) => Ok(Some(Transaction::from_localized(t))), + None => Ok(None), + }; - fn transaction(&self, id: PendingTransactionId) -> Result> { - let client_transaction = |id| match self.client.transaction(id) { - Some(t) => Ok(Some(Transaction::from_localized(t))), - None => Ok(None), - }; + match id { + PendingTransactionId::Hash(hash) => client_transaction(TransactionId::Hash(hash)), - match id { - PendingTransactionId::Hash(hash) => client_transaction(TransactionId::Hash(hash)), + PendingTransactionId::Location(PendingOrBlock::Block(block), index) => { + client_transaction(TransactionId::Location(block, index)) + } - PendingTransactionId::Location(PendingOrBlock::Block(block), index) => { - client_transaction(TransactionId::Location(block, index)) - }, + PendingTransactionId::Location(PendingOrBlock::Pending, index) => { + let info = self.client.chain_info(); + let pending_block = match self.miner.pending_block(info.best_block_number) { + Some(block) => block, + None => return Ok(None), + }; - PendingTransactionId::Location(PendingOrBlock::Pending, index) => { - let info = self.client.chain_info(); - let pending_block = match self.miner.pending_block(info.best_block_number) { - Some(block) => block, - None => return Ok(None), - }; + // Implementation stolen from `extract_transaction_at_index` + let transaction = pending_block + .transactions + .get(index) + // Verify if transaction signature is correct. + .and_then(|tx| SignedTransaction::new(tx.clone()).ok()) + .map(|signed_tx| { + let (signed, sender, _) = signed_tx.deconstruct(); + let block_hash = pending_block.header.hash(); + let block_number = pending_block.header.number(); + let transaction_index = index; + let cached_sender = Some(sender); - // Implementation stolen from `extract_transaction_at_index` - let transaction = pending_block.transactions.get(index) - // Verify if transaction signature is correct. - .and_then(|tx| SignedTransaction::new(tx.clone()).ok()) - .map(|signed_tx| { - let (signed, sender, _) = signed_tx.deconstruct(); - let block_hash = pending_block.header.hash(); - let block_number = pending_block.header.number(); - let transaction_index = index; - let cached_sender = Some(sender); + LocalizedTransaction { + signed, + block_number, + block_hash, + transaction_index, + cached_sender, + } + }) + .map(Transaction::from_localized); - LocalizedTransaction { - signed, - block_number, - block_hash, - transaction_index, - cached_sender, - } - }) - .map(Transaction::from_localized); + Ok(transaction) + } + } + } - Ok(transaction) - } - } - } + fn uncle(&self, id: PendingUncleId) -> Result> { + let client = &self.client; - fn uncle(&self, id: PendingUncleId) -> Result> { - let client = &self.client; + let (uncle, parent_difficulty, extra) = match id { + PendingUncleId { + id: PendingOrBlock::Pending, + position, + } => { + let info = self.client.chain_info(); - let (uncle, parent_difficulty, extra) = match id { - PendingUncleId { id: PendingOrBlock::Pending, position } => { - let info = self.client.chain_info(); + let pending_block = match self.miner.pending_block(info.best_block_number) { + Some(block) => block, + None => return Ok(None), + }; - let pending_block = match self.miner.pending_block(info.best_block_number) { - Some(block) => block, - None => return Ok(None), - }; + let uncle = match pending_block.uncles.get(position) { + Some(uncle) => uncle.clone(), + None => return Ok(None), + }; - let uncle = match pending_block.uncles.get(position) { - Some(uncle) => uncle.clone(), - None => return Ok(None), - }; + let difficulty = { + let latest_difficulty = self + .client + .block_total_difficulty(BlockId::Latest) + .expect("blocks in chain have details; qed"); + let pending_difficulty = self + .miner + .pending_block_header(info.best_block_number) + .map(|header| *header.difficulty()); - let difficulty = { - let latest_difficulty = self.client.block_total_difficulty(BlockId::Latest).expect("blocks in chain have details; qed"); - let pending_difficulty = self.miner.pending_block_header(info.best_block_number).map(|header| *header.difficulty()); + if let Some(difficulty) = pending_difficulty { + difficulty + latest_difficulty + } else { + latest_difficulty + } + }; - if let Some(difficulty) = pending_difficulty { - difficulty + latest_difficulty - } else { - latest_difficulty - } - }; + let extra = self.client.engine().extra_info(&pending_block.header); - let extra = self.client.engine().extra_info(&pending_block.header); + (uncle, difficulty, extra) + } - (uncle, difficulty, extra) - }, + PendingUncleId { + id: PendingOrBlock::Block(block_id), + position, + } => { + let uncle_id = UncleId { + block: block_id, + position, + }; - PendingUncleId { id: PendingOrBlock::Block(block_id), position } => { - let uncle_id = UncleId { block: block_id, position }; + let uncle = match client.uncle(uncle_id) { + Some(hdr) => match hdr.decode() { + Ok(h) => h, + Err(e) => return Err(errors::decode(e)), + }, + None => { + return Ok(None); + } + }; - let uncle = match client.uncle(uncle_id) { - Some(hdr) => match hdr.decode() { - Ok(h) => h, - Err(e) => return Err(errors::decode(e)) - }, - None => { return Ok(None); } - }; + let parent_difficulty = + match client.block_total_difficulty(BlockId::Hash(*uncle.parent_hash())) { + Some(difficulty) => difficulty, + None => { + return Ok(None); + } + }; - let parent_difficulty = match client.block_total_difficulty(BlockId::Hash(*uncle.parent_hash())) { - Some(difficulty) => difficulty, - None => { return Ok(None); } - }; + let extra = client.uncle_extra_info(uncle_id).expect(EXTRA_INFO_PROOF); - let extra = client.uncle_extra_info(uncle_id).expect(EXTRA_INFO_PROOF); + (uncle, parent_difficulty, extra) + } + }; - (uncle, parent_difficulty, extra) - } - }; + let size = client + .block(BlockId::Hash(uncle.hash())) + .map(|block| block.into_inner().len()) + .map(U256::from); - let size = client.block(BlockId::Hash(uncle.hash())) - .map(|block| block.into_inner().len()) - .map(U256::from); + let block = RichBlock { + inner: Block { + hash: Some(uncle.hash()), + size, + parent_hash: *uncle.parent_hash(), + uncles_hash: *uncle.uncles_hash(), + author: *uncle.author(), + miner: *uncle.author(), + state_root: *uncle.state_root(), + transactions_root: *uncle.transactions_root(), + number: Some(uncle.number().into()), + gas_used: *uncle.gas_used(), + gas_limit: *uncle.gas_limit(), + logs_bloom: Some(*uncle.log_bloom()), + timestamp: uncle.timestamp().into(), + difficulty: *uncle.difficulty(), + total_difficulty: Some(uncle.difficulty() + parent_difficulty), + receipts_root: *uncle.receipts_root(), + extra_data: uncle.extra_data().clone().into(), + seal_fields: uncle.seal().iter().cloned().map(Into::into).collect(), + uncles: vec![], + transactions: BlockTransactions::Hashes(vec![]), + }, + extra_info: extra, + }; + Ok(Some(block)) + } - let block = RichBlock { - inner: Block { - hash: Some(uncle.hash()), - size, - parent_hash: *uncle.parent_hash(), - uncles_hash: *uncle.uncles_hash(), - author: *uncle.author(), - miner: *uncle.author(), - state_root: *uncle.state_root(), - transactions_root: *uncle.transactions_root(), - number: Some(uncle.number().into()), - gas_used: *uncle.gas_used(), - gas_limit: *uncle.gas_limit(), - logs_bloom: Some(*uncle.log_bloom()), - timestamp: uncle.timestamp().into(), - difficulty: *uncle.difficulty(), - total_difficulty: Some(uncle.difficulty() + parent_difficulty), - receipts_root: *uncle.receipts_root(), - extra_data: uncle.extra_data().clone().into(), - seal_fields: uncle.seal().iter().cloned().map(Into::into).collect(), - uncles: vec![], - transactions: BlockTransactions::Hashes(vec![]), - }, - extra_info: extra, - }; - Ok(Some(block)) - } + /// Get state for the given block number. Returns either the State or a block from which state + /// can be retrieved. + /// Note: When passing `BlockNumber::Pending` we fall back to the state of the current best block + /// if no state found for the best pending block. + fn get_state(&self, number: BlockNumber) -> StateOrBlock { + match number { + BlockNumber::Num(num) => BlockId::Number(num).into(), + BlockNumber::Earliest => BlockId::Earliest.into(), + BlockNumber::Latest => BlockId::Latest.into(), + BlockNumber::Pending => { + let info = self.client.chain_info(); - /// Get state for the given block number. Returns either the State or a block from which state - /// can be retrieved. - /// Note: When passing `BlockNumber::Pending` we fall back to the state of the current best block - /// if no state found for the best pending block. - fn get_state(&self, number: BlockNumber) -> StateOrBlock { - match number { - BlockNumber::Num(num) => BlockId::Number(num).into(), - BlockNumber::Earliest => BlockId::Earliest.into(), - BlockNumber::Latest => BlockId::Latest.into(), - BlockNumber::Pending => { - let info = self.client.chain_info(); - - self.miner + self.miner .pending_state(info.best_block_number) .map(|s| Box::new(s) as Box) .unwrap_or_else(|| { @@ -454,564 +524,685 @@ impl EthClient }) .into() - } - } - } + } + } + } - /// Get the state and header of best pending block. On failure, fall back to the best imported - /// blocks state&header. - fn pending_state_and_header_with_fallback(&self) -> (T, Header) { - let best_block_number = self.client.chain_info().best_block_number; - let (maybe_state, maybe_header) = - self.miner.pending_state(best_block_number).map_or_else(|| (None, None),|s| { - (Some(s), self.miner.pending_block_header(best_block_number)) - }); + /// Get the state and header of best pending block. On failure, fall back to the best imported + /// blocks state&header. + fn pending_state_and_header_with_fallback(&self) -> (T, Header) { + let best_block_number = self.client.chain_info().best_block_number; + let (maybe_state, maybe_header) = self.miner.pending_state(best_block_number).map_or_else( + || (None, None), + |s| (Some(s), self.miner.pending_block_header(best_block_number)), + ); - match (maybe_state, maybe_header) { - (Some(state), Some(header)) => (state, header), - _ => { - warn!("Falling back to \"Latest\""); - self.client.latest_state_and_header() - } - } - } + match (maybe_state, maybe_header) { + (Some(state), Some(header)) => (state, header), + _ => { + warn!("Falling back to \"Latest\""); + self.client.latest_state_and_header() + } + } + } } -pub fn pending_logs(miner: &M, best_block: EthBlockNumber, filter: &EthcoreFilter) -> Vec where M: MinerService { - let receipts = miner.pending_receipts(best_block).unwrap_or_default(); - - receipts.into_iter() - .flat_map(|r| { - let hash = r.transaction_hash; - r.logs.into_iter().map(move |l| (hash, l)) - }) - .filter(|pair| filter.matches(&pair.1)) - .map(|pair| { - let mut log = Log::from(pair.1); - log.transaction_hash = Some(pair.0); - log - }) - .collect() -} - -fn check_known(client: &C, number: BlockNumber) -> Result<()> where C: BlockChainClient { - use types::block_status::BlockStatus; - - let id = match number { - BlockNumber::Pending => return Ok(()), - BlockNumber::Num(n) => BlockId::Number(n), - BlockNumber::Latest => BlockId::Latest, - BlockNumber::Earliest => BlockId::Earliest, - }; - - match client.block_status(id) { - BlockStatus::InChain => Ok(()), - _ => Err(errors::unknown_block()), - } -} - -const MAX_QUEUE_SIZE_TO_MINE_ON: usize = 4; // because uncles go back 6. - -impl Eth for EthClient where - C: miner::BlockChainClient + StateClient + ProvingBlockChainClient + Call + EngineInfo + 'static, - SN: SnapshotService + 'static, - S: SyncProvider + 'static, - M: MinerService + 'static, - EM: ExternalMinerService + 'static, +pub fn pending_logs(miner: &M, best_block: EthBlockNumber, filter: &EthcoreFilter) -> Vec +where + M: MinerService, { - type Metadata = Metadata; - - fn protocol_version(&self) -> Result { - let version = self.sync.status().protocol_version.to_owned(); - Ok(format!("{}", version)) - } - - fn syncing(&self) -> Result { - use ethcore::snapshot::RestorationStatus; - - let status = self.sync.status(); - let client = &self.client; - let snapshot_status = self.snapshot.status(); - - let (warping, warp_chunks_amount, warp_chunks_processed) = match snapshot_status { - RestorationStatus::Ongoing { state_chunks, block_chunks, state_chunks_done, block_chunks_done } => - (true, Some(block_chunks + state_chunks), Some(block_chunks_done + state_chunks_done)), - _ => (false, None, None), - }; - - if warping || is_major_importing(Some(status.state), client.queue_info()) { - let chain_info = client.chain_info(); - let current_block = U256::from(chain_info.best_block_number); - let highest_block = U256::from(status.highest_block_number.unwrap_or(status.start_block_number)); - - let info = SyncInfo { - starting_block: status.start_block_number.into(), - current_block, - highest_block, - warp_chunks_amount: warp_chunks_amount.map(|x| U256::from(x as u64)).map(Into::into), - warp_chunks_processed: warp_chunks_processed.map(|x| U256::from(x as u64)).map(Into::into), - }; - Ok(SyncStatus::Info(info)) - } else { - Ok(SyncStatus::None) - } - } - - fn author(&self) -> Result { - let miner = self.miner.authoring_params().author; - if miner == 0.into() { - (self.accounts)() - .first() - .cloned() - .ok_or_else(|| errors::account("No accounts were found", "")) - } else { - Ok(miner) - } - } - - fn is_mining(&self) -> Result { - Ok(self.miner.is_currently_sealing()) - } - - fn chain_id(&self) -> Result> { - Ok(self.client.signing_chain_id().map(U64::from)) - } - - fn hashrate(&self) -> Result { - Ok(self.external_miner.hashrate()) - } - - fn gas_price(&self) -> BoxFuture { - Box::new(future::ok(default_gas_price(&*self.client, &*self.miner, self.options.gas_price_percentile))) - } - - fn accounts(&self) -> Result> { - self.deprecation_notice.print("eth_accounts", deprecated::msgs::ACCOUNTS); - - let accounts = (self.accounts)(); - Ok(accounts) - } - - fn block_number(&self) -> Result { - Ok(U256::from(self.client.chain_info().best_block_number)) - } - - fn balance(&self, address: H160, num: Option) -> BoxFuture { - let num = num.unwrap_or_default(); - - try_bf!(check_known(&*self.client, num.clone())); - let res = match self.client.balance(&address, self.get_state(num)) { - Some(balance) => Ok(balance), - None => Err(errors::state_pruned()), - }; - - Box::new(future::done(res)) - } - - fn proof(&self, address: H160, values: Vec, num: Option) -> BoxFuture { - try_bf!(errors::require_experimental(self.options.allow_experimental_rpcs, "1186")); - - let key1 = keccak(address); - - let num = num.unwrap_or_default(); - let id = match num { - BlockNumber::Num(n) => BlockId::Number(n), - BlockNumber::Earliest => BlockId::Earliest, - BlockNumber::Latest => BlockId::Latest, - BlockNumber::Pending => { - self.deprecation_notice.print("`Pending`", Some("falling back to `Latest`")); - BlockId::Latest - } - }; - - try_bf!(check_known(&*self.client, num.clone())); - let res = match self.client.prove_account(key1, id) { - Some((proof, account)) => Ok(EthAccount { - address, - balance: account.balance, - nonce: account.nonce, - code_hash: account.code_hash, - storage_hash: account.storage_root, - account_proof: proof.into_iter().map(Bytes::new).collect(), - storage_proof: values.into_iter().filter_map(|storage_index| { - let key2: H256 = storage_index; - self.client.prove_storage(key1, keccak(key2), id) - .map(|(storage_proof, storage_value)| StorageProof { - key: key2.into(), - value: storage_value.into(), - proof: storage_proof.into_iter().map(Bytes::new).collect() - }) - }) - .collect::>() - }), - None => Err(errors::state_pruned()), - }; - - Box::new(future::done(res)) - } - - fn storage_at(&self, address: H160, position: U256, num: Option) -> BoxFuture { - let num = num.unwrap_or_default(); - - try_bf!(check_known(&*self.client, num.clone())); - let res = match self.client.storage_at(&address, &H256::from(position), self.get_state(num)) { - Some(s) => Ok(s), - None => Err(errors::state_pruned()), - }; - - Box::new(future::done(res)) - } - - fn transaction_count(&self, address: H160, num: Option) -> BoxFuture { - let res = match num.unwrap_or_default() { - BlockNumber::Pending if self.options.pending_nonce_from_queue => { - Ok(self.miner.next_nonce(&*self.client, &address)) - } - BlockNumber::Pending => { - let info = self.client.chain_info(); - let nonce = self.miner - .pending_state(info.best_block_number) - .and_then(|s| s.nonce(&address).ok()) - .or_else(|| { - warn!("Fallback to `BlockId::Latest`"); - self.client.nonce(&address, BlockId::Latest) - }); - - match nonce { - Some(nonce) => Ok(nonce), - None => Err(errors::database("latest nonce missing")) - } - }, - number => { - try_bf!(check_known(&*self.client, number.clone())); - match self.client.nonce(&address, block_number_to_id(number)) { - Some(nonce) => Ok(nonce), - None => Err(errors::state_pruned()), - } - } - }; - - Box::new(future::done(res)) - } - - fn block_transaction_count_by_hash(&self, hash: H256) -> BoxFuture> { - let trx_count = self.client.block(BlockId::Hash(hash)) - .map(|block| block.transactions_count().into()); - let result = Ok(trx_count) - .and_then(errors::check_block_gap(&*self.client, self.options)); - Box::new(future::done(result)) - } - - fn block_transaction_count_by_number(&self, num: BlockNumber) -> BoxFuture> { - Box::new(future::done(match num { - BlockNumber::Pending => - Ok(Some(self.miner.pending_transaction_hashes(&*self.client).len().into())), - _ => { - let trx_count = self.client.block(block_number_to_id(num.clone())) - .map(|block| block.transactions_count().into()); - Ok(trx_count) - .and_then(errors::check_block_number_existence( - &*self.client, - num, - self.options - )) - } - })) - } - - fn block_uncles_count_by_hash(&self, hash: H256) -> BoxFuture> { - let uncle_count = self.client.block(BlockId::Hash(hash)) - .map(|block| block.uncles_count().into()); - let result = Ok(uncle_count) - .and_then(errors::check_block_gap(&*self.client, self.options)); - Box::new(future::done(result)) - } - - fn block_uncles_count_by_number(&self, num: BlockNumber) -> BoxFuture> { - Box::new(future::done(match num { - BlockNumber::Pending => Ok(Some(0.into())), - _ => { - let uncles_count = self.client.block(block_number_to_id(num.clone())) - .map(|block| block.uncles_count().into()); - Ok(uncles_count) - .and_then(errors::check_block_number_existence( - &*self.client, - num, - self.options - )) - } - })) - } - - fn code_at(&self, address: H160, num: Option) -> BoxFuture { - let address: Address = H160::into(address); - - let num = num.unwrap_or_default(); - try_bf!(check_known(&*self.client, num.clone())); - - let res = match self.client.code(&address, self.get_state(num)) { - Some(code) => Ok(code.map_or_else(Bytes::default, Bytes::new)), - None => Err(errors::state_pruned()), - }; - - Box::new(future::done(res)) - } - - fn block_by_hash(&self, hash: H256, include_txs: bool) -> BoxFuture> { - let result = self.rich_block(BlockId::Hash(hash).into(), include_txs) - .and_then(errors::check_block_gap(&*self.client, self.options)); - Box::new(future::done(result)) - } - - fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> BoxFuture> { - let result = self.rich_block(num.clone().into(), include_txs).and_then( - errors::check_block_number_existence(&*self.client, num, self.options)); - Box::new(future::done(result)) - } - - fn transaction_by_hash(&self, hash: H256) -> BoxFuture> { - let tx = try_bf!(self.transaction(PendingTransactionId::Hash(hash))).or_else(|| { - self.miner.transaction(&hash) - .map(|t| Transaction::from_pending(t.pending().clone())) - }); - let result = Ok(tx).and_then( - errors::check_block_gap(&*self.client, self.options)); - Box::new(future::done(result)) - } - - fn transaction_by_block_hash_and_index(&self, hash: H256, index: Index) -> BoxFuture> { - let id = PendingTransactionId::Location(PendingOrBlock::Block(BlockId::Hash(hash)), index.value()); - let result = self.transaction(id).and_then( - errors::check_block_gap(&*self.client, self.options)); - Box::new(future::done(result)) - } - - fn transaction_by_block_number_and_index(&self, num: BlockNumber, index: Index) -> BoxFuture> { - let block_id = match num { - BlockNumber::Latest => PendingOrBlock::Block(BlockId::Latest), - BlockNumber::Earliest => PendingOrBlock::Block(BlockId::Earliest), - BlockNumber::Num(num) => PendingOrBlock::Block(BlockId::Number(num)), - BlockNumber::Pending => PendingOrBlock::Pending, - }; - - let transaction_id = PendingTransactionId::Location(block_id, index.value()); - let result = self.transaction(transaction_id).and_then( - errors::check_block_number_existence(&*self.client, num, self.options)); - Box::new(future::done(result)) - } - - fn transaction_receipt(&self, hash: H256) -> BoxFuture> { - if self.options.allow_pending_receipt_query { - let best_block = self.client.chain_info().best_block_number; - if let Some(receipt) = self.miner.pending_receipt(best_block, &hash) { - return Box::new(future::ok(Some(receipt.into()))); - } - } - - let receipt = self.client.transaction_receipt(TransactionId::Hash(hash)); - let result = Ok(receipt.map(Into::into)) - .and_then(errors::check_block_gap(&*self.client, self.options)); - Box::new(future::done(result)) - } - - fn uncle_by_block_hash_and_index(&self, hash: H256, index: Index) -> BoxFuture> { - let result = self.uncle(PendingUncleId { - id: PendingOrBlock::Block(BlockId::Hash(hash)), - position: index.value() - }).and_then(errors::check_block_gap(&*self.client, self.options)); - Box::new(future::done(result)) - } - - fn uncle_by_block_number_and_index(&self, num: BlockNumber, index: Index) -> BoxFuture> { - let id = match num { - BlockNumber::Latest => PendingUncleId { id: PendingOrBlock::Block(BlockId::Latest), position: index.value() }, - BlockNumber::Earliest => PendingUncleId { id: PendingOrBlock::Block(BlockId::Earliest), position: index.value() }, - BlockNumber::Num(num) => PendingUncleId { id: PendingOrBlock::Block(BlockId::Number(num)), position: index.value() }, - - BlockNumber::Pending => PendingUncleId { id: PendingOrBlock::Pending, position: index.value() }, - }; - - let result = self.uncle(id) - .and_then(errors::check_block_number_existence( - &*self.client, - num, - self.options - )); - - Box::new(future::done(result)) - } - - fn compilers(&self) -> Result> { - Err(errors::deprecated("Compilation functionality is deprecated.".to_string())) - } - - fn logs(&self, filter: Filter) -> BoxFuture> { - base_logs(&*self.client, &*self.miner, filter) - } - - fn work(&self, no_new_work_timeout: Option) -> Result { - let no_new_work_timeout = no_new_work_timeout.unwrap_or_default(); - - // check if we're still syncing and return empty strings in that case - { - let sync_status = self.sync.status(); - let queue_info = self.client.queue_info(); - let total_queue_size = queue_info.total_queue_size(); - - if sync_status.is_snapshot_syncing() || total_queue_size > MAX_QUEUE_SIZE_TO_MINE_ON { - trace!(target: "miner", "Syncing. Cannot give any work."); - return Err(errors::no_work()); - } - - // Otherwise spin until our submitted block has been included. - let timeout = Instant::now() + Duration::from_millis(1000); - while Instant::now() < timeout && self.client.queue_info().total_queue_size() > 0 { - thread::sleep(Duration::from_millis(1)); - } - } - - if self.miner.authoring_params().author.is_zero() { - warn!(target: "miner", "Cannot give work package - no author is configured. Use --author to configure!"); - return Err(errors::no_author()) - } - - let work = self.miner.work_package(&*self.client).ok_or_else(|| { - warn!(target: "miner", "Cannot give work package - engine seals internally."); - errors::no_work_required() - })?; - - let (pow_hash, number, timestamp, difficulty) = work; - let target = ethash::difficulty_to_boundary(&difficulty); - let seed_hash = self.seed_compute.lock().hash_block_number(number); - - let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(); - if no_new_work_timeout > 0 && timestamp + no_new_work_timeout < now { - Err(errors::no_new_work()) - } else if self.options.send_block_number_in_get_work { - Ok(Work { - pow_hash, - seed_hash: seed_hash.into(), - target, - number: Some(number), - }) - } else { - Ok(Work { - pow_hash, - seed_hash: seed_hash.into(), - target, - number: None - }) - } - } - - fn submit_work(&self, nonce: H64, pow_hash: H256, mix_hash: H256) -> Result { - match helpers::submit_work_detail(&self.client, &self.miner, nonce, pow_hash, mix_hash) { - Ok(_) => Ok(true), - Err(_) => Ok(false), - } - } - - fn submit_hashrate(&self, rate: U256, id: H256) -> Result { - self.external_miner.submit_hashrate(rate, id); - Ok(true) - } - - fn send_raw_transaction(&self, raw: Bytes) -> Result { - Rlp::new(&raw.into_vec()).as_val() - .map_err(errors::rlp) - .and_then(|tx| SignedTransaction::new(tx).map_err(errors::transaction)) - .and_then(|signed_transaction| { - FullDispatcher::dispatch_transaction( - &*self.client, - &*self.miner, - signed_transaction.into(), - false - ) - }) - .map(Into::into) - } - - fn submit_transaction(&self, raw: Bytes) -> Result { - self.send_raw_transaction(raw) - } - - fn call(&self, request: CallRequest, num: Option) -> BoxFuture { - let request = CallRequest::into(request); - let signed = try_bf!(fake_sign::sign_call(request)); - - let num = num.unwrap_or_default(); - - let (mut state, header) = - if num == BlockNumber::Pending { - self.pending_state_and_header_with_fallback() - } else { - let id = match num { - BlockNumber::Num(num) => BlockId::Number(num), - BlockNumber::Earliest => BlockId::Earliest, - BlockNumber::Latest => BlockId::Latest, - BlockNumber::Pending => unreachable!(), // Already covered - }; - - let state = try_bf!(self.client.state_at(id).ok_or_else(errors::state_pruned)); - let header = try_bf!( - self.client.block_header(id).ok_or_else(errors::state_pruned) - .and_then(|h| h.decode().map_err(errors::decode)) - ); - - (state, header) - }; - - let result = self.client.call(&signed, Default::default(), &mut state, &header); - - Box::new(future::done(result - .map_err(errors::call) - .and_then(|executed| { - match executed.exception { - Some(ref exception) => Err(errors::vm(exception, &executed.output)), - None => Ok(executed) - } - }) - .map(|b| b.output.into()) - )) - } - - fn estimate_gas(&self, request: CallRequest, num: Option) -> BoxFuture { - let request = CallRequest::into(request); - let signed = try_bf!(fake_sign::sign_call(request)); - let num = num.unwrap_or_default(); - - let (state, header) = if num == BlockNumber::Pending { - self.pending_state_and_header_with_fallback() - } else { - let id = match num { - BlockNumber::Num(num) => BlockId::Number(num), - BlockNumber::Earliest => BlockId::Earliest, - BlockNumber::Latest => BlockId::Latest, - BlockNumber::Pending => unreachable!(), // Already covered - }; - - let state = try_bf!(self.client.state_at(id) - .ok_or_else(errors::state_pruned)); - let header = try_bf!(self.client.block_header(id) - .ok_or_else(errors::state_pruned) - .and_then(|h| h.decode().map_err(errors::decode))); - (state, header) - }; - - Box::new(future::done(self.client.estimate_gas(&signed, &state, &header) - .map_err(errors::call) - )) - } - - fn compile_lll(&self, _: String) -> Result { - Err(errors::deprecated("Compilation of LLL via RPC is deprecated".to_string())) - } - - fn compile_serpent(&self, _: String) -> Result { - Err(errors::deprecated("Compilation of Serpent via RPC is deprecated".to_string())) - } - - fn compile_solidity(&self, _: String) -> Result { - Err(errors::deprecated("Compilation of Solidity via RPC is deprecated".to_string())) - } + let receipts = miner.pending_receipts(best_block).unwrap_or_default(); + + receipts + .into_iter() + .flat_map(|r| { + let hash = r.transaction_hash; + r.logs.into_iter().map(move |l| (hash, l)) + }) + .filter(|pair| filter.matches(&pair.1)) + .map(|pair| { + let mut log = Log::from(pair.1); + log.transaction_hash = Some(pair.0); + log + }) + .collect() +} + +fn check_known(client: &C, number: BlockNumber) -> Result<()> +where + C: BlockChainClient, +{ + use types::block_status::BlockStatus; + + let id = match number { + BlockNumber::Pending => return Ok(()), + BlockNumber::Num(n) => BlockId::Number(n), + BlockNumber::Latest => BlockId::Latest, + BlockNumber::Earliest => BlockId::Earliest, + }; + + match client.block_status(id) { + BlockStatus::InChain => Ok(()), + _ => Err(errors::unknown_block()), + } +} + +const MAX_QUEUE_SIZE_TO_MINE_ON: usize = 4; // because uncles go back 6. + +impl Eth for EthClient +where + C: miner::BlockChainClient + + StateClient + + ProvingBlockChainClient + + Call + + EngineInfo + + 'static, + SN: SnapshotService + 'static, + S: SyncProvider + 'static, + M: MinerService + 'static, + EM: ExternalMinerService + 'static, +{ + type Metadata = Metadata; + + fn protocol_version(&self) -> Result { + let version = self.sync.status().protocol_version.to_owned(); + Ok(format!("{}", version)) + } + + fn syncing(&self) -> Result { + use ethcore::snapshot::RestorationStatus; + + let status = self.sync.status(); + let client = &self.client; + let snapshot_status = self.snapshot.status(); + + let (warping, warp_chunks_amount, warp_chunks_processed) = match snapshot_status { + RestorationStatus::Ongoing { + state_chunks, + block_chunks, + state_chunks_done, + block_chunks_done, + } => ( + true, + Some(block_chunks + state_chunks), + Some(block_chunks_done + state_chunks_done), + ), + _ => (false, None, None), + }; + + if warping || is_major_importing(Some(status.state), client.queue_info()) { + let chain_info = client.chain_info(); + let current_block = U256::from(chain_info.best_block_number); + let highest_block = U256::from( + status + .highest_block_number + .unwrap_or(status.start_block_number), + ); + + let info = SyncInfo { + starting_block: status.start_block_number.into(), + current_block, + highest_block, + warp_chunks_amount: warp_chunks_amount + .map(|x| U256::from(x as u64)) + .map(Into::into), + warp_chunks_processed: warp_chunks_processed + .map(|x| U256::from(x as u64)) + .map(Into::into), + }; + Ok(SyncStatus::Info(info)) + } else { + Ok(SyncStatus::None) + } + } + + fn author(&self) -> Result { + let miner = self.miner.authoring_params().author; + if miner == 0.into() { + (self.accounts)() + .first() + .cloned() + .ok_or_else(|| errors::account("No accounts were found", "")) + } else { + Ok(miner) + } + } + + fn is_mining(&self) -> Result { + Ok(self.miner.is_currently_sealing()) + } + + fn chain_id(&self) -> Result> { + Ok(self.client.signing_chain_id().map(U64::from)) + } + + fn hashrate(&self) -> Result { + Ok(self.external_miner.hashrate()) + } + + fn gas_price(&self) -> BoxFuture { + Box::new(future::ok(default_gas_price( + &*self.client, + &*self.miner, + self.options.gas_price_percentile, + ))) + } + + fn accounts(&self) -> Result> { + self.deprecation_notice + .print("eth_accounts", deprecated::msgs::ACCOUNTS); + + let accounts = (self.accounts)(); + Ok(accounts) + } + + fn block_number(&self) -> Result { + Ok(U256::from(self.client.chain_info().best_block_number)) + } + + fn balance(&self, address: H160, num: Option) -> BoxFuture { + let num = num.unwrap_or_default(); + + try_bf!(check_known(&*self.client, num.clone())); + let res = match self.client.balance(&address, self.get_state(num)) { + Some(balance) => Ok(balance), + None => Err(errors::state_pruned()), + }; + + Box::new(future::done(res)) + } + + fn proof( + &self, + address: H160, + values: Vec, + num: Option, + ) -> BoxFuture { + try_bf!(errors::require_experimental( + self.options.allow_experimental_rpcs, + "1186" + )); + + let key1 = keccak(address); + + let num = num.unwrap_or_default(); + let id = match num { + BlockNumber::Num(n) => BlockId::Number(n), + BlockNumber::Earliest => BlockId::Earliest, + BlockNumber::Latest => BlockId::Latest, + BlockNumber::Pending => { + self.deprecation_notice + .print("`Pending`", Some("falling back to `Latest`")); + BlockId::Latest + } + }; + + try_bf!(check_known(&*self.client, num.clone())); + let res = match self.client.prove_account(key1, id) { + Some((proof, account)) => Ok(EthAccount { + address, + balance: account.balance, + nonce: account.nonce, + code_hash: account.code_hash, + storage_hash: account.storage_root, + account_proof: proof.into_iter().map(Bytes::new).collect(), + storage_proof: values + .into_iter() + .filter_map(|storage_index| { + let key2: H256 = storage_index; + self.client.prove_storage(key1, keccak(key2), id).map( + |(storage_proof, storage_value)| StorageProof { + key: key2.into(), + value: storage_value.into(), + proof: storage_proof.into_iter().map(Bytes::new).collect(), + }, + ) + }) + .collect::>(), + }), + None => Err(errors::state_pruned()), + }; + + Box::new(future::done(res)) + } + + fn storage_at( + &self, + address: H160, + position: U256, + num: Option, + ) -> BoxFuture { + let num = num.unwrap_or_default(); + + try_bf!(check_known(&*self.client, num.clone())); + let res = match self + .client + .storage_at(&address, &H256::from(position), self.get_state(num)) + { + Some(s) => Ok(s), + None => Err(errors::state_pruned()), + }; + + Box::new(future::done(res)) + } + + fn transaction_count(&self, address: H160, num: Option) -> BoxFuture { + let res = match num.unwrap_or_default() { + BlockNumber::Pending if self.options.pending_nonce_from_queue => { + Ok(self.miner.next_nonce(&*self.client, &address)) + } + BlockNumber::Pending => { + let info = self.client.chain_info(); + let nonce = self + .miner + .pending_state(info.best_block_number) + .and_then(|s| s.nonce(&address).ok()) + .or_else(|| { + warn!("Fallback to `BlockId::Latest`"); + self.client.nonce(&address, BlockId::Latest) + }); + + match nonce { + Some(nonce) => Ok(nonce), + None => Err(errors::database("latest nonce missing")), + } + } + number => { + try_bf!(check_known(&*self.client, number.clone())); + match self.client.nonce(&address, block_number_to_id(number)) { + Some(nonce) => Ok(nonce), + None => Err(errors::state_pruned()), + } + } + }; + + Box::new(future::done(res)) + } + + fn block_transaction_count_by_hash(&self, hash: H256) -> BoxFuture> { + let trx_count = self + .client + .block(BlockId::Hash(hash)) + .map(|block| block.transactions_count().into()); + let result = Ok(trx_count).and_then(errors::check_block_gap(&*self.client, self.options)); + Box::new(future::done(result)) + } + + fn block_transaction_count_by_number(&self, num: BlockNumber) -> BoxFuture> { + Box::new(future::done(match num { + BlockNumber::Pending => Ok(Some( + self.miner + .pending_transaction_hashes(&*self.client) + .len() + .into(), + )), + _ => { + let trx_count = self + .client + .block(block_number_to_id(num.clone())) + .map(|block| block.transactions_count().into()); + Ok(trx_count).and_then(errors::check_block_number_existence( + &*self.client, + num, + self.options, + )) + } + })) + } + + fn block_uncles_count_by_hash(&self, hash: H256) -> BoxFuture> { + let uncle_count = self + .client + .block(BlockId::Hash(hash)) + .map(|block| block.uncles_count().into()); + let result = Ok(uncle_count).and_then(errors::check_block_gap(&*self.client, self.options)); + Box::new(future::done(result)) + } + + fn block_uncles_count_by_number(&self, num: BlockNumber) -> BoxFuture> { + Box::new(future::done(match num { + BlockNumber::Pending => Ok(Some(0.into())), + _ => { + let uncles_count = self + .client + .block(block_number_to_id(num.clone())) + .map(|block| block.uncles_count().into()); + Ok(uncles_count).and_then(errors::check_block_number_existence( + &*self.client, + num, + self.options, + )) + } + })) + } + + fn code_at(&self, address: H160, num: Option) -> BoxFuture { + let address: Address = H160::into(address); + + let num = num.unwrap_or_default(); + try_bf!(check_known(&*self.client, num.clone())); + + let res = match self.client.code(&address, self.get_state(num)) { + Some(code) => Ok(code.map_or_else(Bytes::default, Bytes::new)), + None => Err(errors::state_pruned()), + }; + + Box::new(future::done(res)) + } + + fn block_by_hash(&self, hash: H256, include_txs: bool) -> BoxFuture> { + let result = self + .rich_block(BlockId::Hash(hash).into(), include_txs) + .and_then(errors::check_block_gap(&*self.client, self.options)); + Box::new(future::done(result)) + } + + fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> BoxFuture> { + let result = self.rich_block(num.clone().into(), include_txs).and_then( + errors::check_block_number_existence(&*self.client, num, self.options), + ); + Box::new(future::done(result)) + } + + fn transaction_by_hash(&self, hash: H256) -> BoxFuture> { + let tx = try_bf!(self.transaction(PendingTransactionId::Hash(hash))).or_else(|| { + self.miner + .transaction(&hash) + .map(|t| Transaction::from_pending(t.pending().clone())) + }); + let result = Ok(tx).and_then(errors::check_block_gap(&*self.client, self.options)); + Box::new(future::done(result)) + } + + fn transaction_by_block_hash_and_index( + &self, + hash: H256, + index: Index, + ) -> BoxFuture> { + let id = PendingTransactionId::Location( + PendingOrBlock::Block(BlockId::Hash(hash)), + index.value(), + ); + let result = self + .transaction(id) + .and_then(errors::check_block_gap(&*self.client, self.options)); + Box::new(future::done(result)) + } + + fn transaction_by_block_number_and_index( + &self, + num: BlockNumber, + index: Index, + ) -> BoxFuture> { + let block_id = match num { + BlockNumber::Latest => PendingOrBlock::Block(BlockId::Latest), + BlockNumber::Earliest => PendingOrBlock::Block(BlockId::Earliest), + BlockNumber::Num(num) => PendingOrBlock::Block(BlockId::Number(num)), + BlockNumber::Pending => PendingOrBlock::Pending, + }; + + let transaction_id = PendingTransactionId::Location(block_id, index.value()); + let result = + self.transaction(transaction_id) + .and_then(errors::check_block_number_existence( + &*self.client, + num, + self.options, + )); + Box::new(future::done(result)) + } + + fn transaction_receipt(&self, hash: H256) -> BoxFuture> { + if self.options.allow_pending_receipt_query { + let best_block = self.client.chain_info().best_block_number; + if let Some(receipt) = self.miner.pending_receipt(best_block, &hash) { + return Box::new(future::ok(Some(receipt.into()))); + } + } + + let receipt = self.client.transaction_receipt(TransactionId::Hash(hash)); + let result = Ok(receipt.map(Into::into)) + .and_then(errors::check_block_gap(&*self.client, self.options)); + Box::new(future::done(result)) + } + + fn uncle_by_block_hash_and_index( + &self, + hash: H256, + index: Index, + ) -> BoxFuture> { + let result = self + .uncle(PendingUncleId { + id: PendingOrBlock::Block(BlockId::Hash(hash)), + position: index.value(), + }) + .and_then(errors::check_block_gap(&*self.client, self.options)); + Box::new(future::done(result)) + } + + fn uncle_by_block_number_and_index( + &self, + num: BlockNumber, + index: Index, + ) -> BoxFuture> { + let id = match num { + BlockNumber::Latest => PendingUncleId { + id: PendingOrBlock::Block(BlockId::Latest), + position: index.value(), + }, + BlockNumber::Earliest => PendingUncleId { + id: PendingOrBlock::Block(BlockId::Earliest), + position: index.value(), + }, + BlockNumber::Num(num) => PendingUncleId { + id: PendingOrBlock::Block(BlockId::Number(num)), + position: index.value(), + }, + + BlockNumber::Pending => PendingUncleId { + id: PendingOrBlock::Pending, + position: index.value(), + }, + }; + + let result = self + .uncle(id) + .and_then(errors::check_block_number_existence( + &*self.client, + num, + self.options, + )); + + Box::new(future::done(result)) + } + + fn compilers(&self) -> Result> { + Err(errors::deprecated( + "Compilation functionality is deprecated.".to_string(), + )) + } + + fn logs(&self, filter: Filter) -> BoxFuture> { + base_logs(&*self.client, &*self.miner, filter) + } + + fn work(&self, no_new_work_timeout: Option) -> Result { + let no_new_work_timeout = no_new_work_timeout.unwrap_or_default(); + + // check if we're still syncing and return empty strings in that case + { + let sync_status = self.sync.status(); + let queue_info = self.client.queue_info(); + let total_queue_size = queue_info.total_queue_size(); + + if sync_status.is_snapshot_syncing() || total_queue_size > MAX_QUEUE_SIZE_TO_MINE_ON { + trace!(target: "miner", "Syncing. Cannot give any work."); + return Err(errors::no_work()); + } + + // Otherwise spin until our submitted block has been included. + let timeout = Instant::now() + Duration::from_millis(1000); + while Instant::now() < timeout && self.client.queue_info().total_queue_size() > 0 { + thread::sleep(Duration::from_millis(1)); + } + } + + if self.miner.authoring_params().author.is_zero() { + warn!(target: "miner", "Cannot give work package - no author is configured. Use --author to configure!"); + return Err(errors::no_author()); + } + + let work = self.miner.work_package(&*self.client).ok_or_else(|| { + warn!(target: "miner", "Cannot give work package - engine seals internally."); + errors::no_work_required() + })?; + + let (pow_hash, number, timestamp, difficulty) = work; + let target = ethash::difficulty_to_boundary(&difficulty); + let seed_hash = self.seed_compute.lock().hash_block_number(number); + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_secs(); + if no_new_work_timeout > 0 && timestamp + no_new_work_timeout < now { + Err(errors::no_new_work()) + } else if self.options.send_block_number_in_get_work { + Ok(Work { + pow_hash, + seed_hash: seed_hash.into(), + target, + number: Some(number), + }) + } else { + Ok(Work { + pow_hash, + seed_hash: seed_hash.into(), + target, + number: None, + }) + } + } + + fn submit_work(&self, nonce: H64, pow_hash: H256, mix_hash: H256) -> Result { + match helpers::submit_work_detail(&self.client, &self.miner, nonce, pow_hash, mix_hash) { + Ok(_) => Ok(true), + Err(_) => Ok(false), + } + } + + fn submit_hashrate(&self, rate: U256, id: H256) -> Result { + self.external_miner.submit_hashrate(rate, id); + Ok(true) + } + + fn send_raw_transaction(&self, raw: Bytes) -> Result { + Rlp::new(&raw.into_vec()) + .as_val() + .map_err(errors::rlp) + .and_then(|tx| SignedTransaction::new(tx).map_err(errors::transaction)) + .and_then(|signed_transaction| { + FullDispatcher::dispatch_transaction( + &*self.client, + &*self.miner, + signed_transaction.into(), + false, + ) + }) + .map(Into::into) + } + + fn submit_transaction(&self, raw: Bytes) -> Result { + self.send_raw_transaction(raw) + } + + fn call(&self, request: CallRequest, num: Option) -> BoxFuture { + let request = CallRequest::into(request); + let signed = try_bf!(fake_sign::sign_call(request)); + + let num = num.unwrap_or_default(); + + let (mut state, header) = if num == BlockNumber::Pending { + self.pending_state_and_header_with_fallback() + } else { + let id = match num { + BlockNumber::Num(num) => BlockId::Number(num), + BlockNumber::Earliest => BlockId::Earliest, + BlockNumber::Latest => BlockId::Latest, + BlockNumber::Pending => unreachable!(), // Already covered + }; + + let state = try_bf!(self.client.state_at(id).ok_or_else(errors::state_pruned)); + let header = try_bf!(self + .client + .block_header(id) + .ok_or_else(errors::state_pruned) + .and_then(|h| h.decode().map_err(errors::decode))); + + (state, header) + }; + + let result = self + .client + .call(&signed, Default::default(), &mut state, &header); + + Box::new(future::done( + result + .map_err(errors::call) + .and_then(|executed| match executed.exception { + Some(ref exception) => Err(errors::vm(exception, &executed.output)), + None => Ok(executed), + }) + .map(|b| b.output.into()), + )) + } + + fn estimate_gas(&self, request: CallRequest, num: Option) -> BoxFuture { + let request = CallRequest::into(request); + let signed = try_bf!(fake_sign::sign_call(request)); + let num = num.unwrap_or_default(); + + let (state, header) = if num == BlockNumber::Pending { + self.pending_state_and_header_with_fallback() + } else { + let id = match num { + BlockNumber::Num(num) => BlockId::Number(num), + BlockNumber::Earliest => BlockId::Earliest, + BlockNumber::Latest => BlockId::Latest, + BlockNumber::Pending => unreachable!(), // Already covered + }; + + let state = try_bf!(self.client.state_at(id).ok_or_else(errors::state_pruned)); + let header = try_bf!(self + .client + .block_header(id) + .ok_or_else(errors::state_pruned) + .and_then(|h| h.decode().map_err(errors::decode))); + (state, header) + }; + + Box::new(future::done( + self.client + .estimate_gas(&signed, &state, &header) + .map_err(errors::call), + )) + } + + fn compile_lll(&self, _: String) -> Result { + Err(errors::deprecated( + "Compilation of LLL via RPC is deprecated".to_string(), + )) + } + + fn compile_serpent(&self, _: String) -> Result { + Err(errors::deprecated( + "Compilation of Serpent via RPC is deprecated".to_string(), + )) + } + + fn compile_solidity(&self, _: String) -> Result { + Err(errors::deprecated( + "Compilation of Solidity via RPC is deprecated".to_string(), + )) + } } diff --git a/rpc/src/v1/impls/eth_filter.rs b/rpc/src/v1/impls/eth_filter.rs index c51c85fb6..cce7fd393 100644 --- a/rpc/src/v1/impls/eth_filter.rs +++ b/rpc/src/v1/impls/eth_filter.rs @@ -16,297 +16,357 @@ //! Eth Filter RPC implementation -use std::sync::Arc; -use std::collections::{BTreeSet, VecDeque}; +use std::{ + collections::{BTreeSet, VecDeque}, + sync::Arc, +}; -use ethcore::client::{BlockChainClient, BlockId}; -use ethcore::miner::{self, MinerService}; +use ethcore::{ + client::{BlockChainClient, BlockId}, + miner::{self, MinerService}, +}; use ethereum_types::{H256, U256}; use parking_lot::Mutex; use types::filter::Filter as EthcoreFilter; -use jsonrpc_core::{BoxFuture, Result}; -use jsonrpc_core::futures::{future, Future}; -use jsonrpc_core::futures::future::Either; -use v1::traits::EthFilter; -use v1::types::{BlockNumber, Index, Filter, FilterChanges, Log}; -use v1::helpers::{errors, SyncPollFilter, PollFilter, PollManager, limit_logs}; -use v1::impls::eth::pending_logs; +use jsonrpc_core::{ + futures::{future, future::Either, Future}, + BoxFuture, Result, +}; +use v1::{ + helpers::{errors, limit_logs, PollFilter, PollManager, SyncPollFilter}, + impls::eth::pending_logs, + traits::EthFilter, + types::{BlockNumber, Filter, FilterChanges, Index, Log}, +}; /// Something which provides data that can be filtered over. pub trait Filterable { - /// Current best block number. - fn best_block_number(&self) -> u64; + /// Current best block number. + fn best_block_number(&self) -> u64; - /// Get a block hash by block id. - fn block_hash(&self, id: BlockId) -> Option; + /// Get a block hash by block id. + fn block_hash(&self, id: BlockId) -> Option; - /// pending transaction hashes at the given block (unordered). - fn pending_transaction_hashes(&self) -> BTreeSet; + /// pending transaction hashes at the given block (unordered). + fn pending_transaction_hashes(&self) -> BTreeSet; - /// Get logs that match the given filter. - fn logs(&self, filter: EthcoreFilter) -> BoxFuture>; + /// Get logs that match the given filter. + fn logs(&self, filter: EthcoreFilter) -> BoxFuture>; - /// Get logs from the pending block. - fn pending_logs(&self, block_number: u64, filter: &EthcoreFilter) -> Vec; + /// Get logs from the pending block. + fn pending_logs(&self, block_number: u64, filter: &EthcoreFilter) -> Vec; - /// Get a reference to the poll manager. - fn polls(&self) -> &Mutex>; + /// Get a reference to the poll manager. + fn polls(&self) -> &Mutex>; - /// Get removed logs within route from the given block to the nearest canon block, not including the canon block. Also returns how many logs have been traversed. - fn removed_logs(&self, block_hash: H256, filter: &EthcoreFilter) -> (Vec, u64); + /// Get removed logs within route from the given block to the nearest canon block, not including the canon block. Also returns how many logs have been traversed. + fn removed_logs(&self, block_hash: H256, filter: &EthcoreFilter) -> (Vec, u64); } /// Eth filter rpc implementation for a full node. pub struct EthFilterClient { - client: Arc, - miner: Arc, - polls: Mutex>, + client: Arc, + miner: Arc, + polls: Mutex>, } impl EthFilterClient { - /// Creates new Eth filter client. - pub fn new(client: Arc, miner: Arc, poll_lifetime: u32) -> Self { - EthFilterClient { - client, - miner, - polls: Mutex::new(PollManager::new(poll_lifetime)), - } - } + /// Creates new Eth filter client. + pub fn new(client: Arc, miner: Arc, poll_lifetime: u32) -> Self { + EthFilterClient { + client, + miner, + polls: Mutex::new(PollManager::new(poll_lifetime)), + } + } } -impl Filterable for EthFilterClient where - C: miner::BlockChainClient + BlockChainClient, - M: MinerService, +impl Filterable for EthFilterClient +where + C: miner::BlockChainClient + BlockChainClient, + M: MinerService, { - fn best_block_number(&self) -> u64 { - self.client.chain_info().best_block_number - } + fn best_block_number(&self) -> u64 { + self.client.chain_info().best_block_number + } - fn block_hash(&self, id: BlockId) -> Option { - self.client.block_hash(id) - } + fn block_hash(&self, id: BlockId) -> Option { + self.client.block_hash(id) + } - fn pending_transaction_hashes(&self) -> BTreeSet { - self.miner.pending_transaction_hashes(&*self.client) - } + fn pending_transaction_hashes(&self) -> BTreeSet { + self.miner.pending_transaction_hashes(&*self.client) + } - fn logs(&self, filter: EthcoreFilter) -> BoxFuture> { - Box::new(future::ok(self.client.logs(filter).unwrap_or_default().into_iter().map(Into::into).collect())) - } + fn logs(&self, filter: EthcoreFilter) -> BoxFuture> { + Box::new(future::ok( + self.client + .logs(filter) + .unwrap_or_default() + .into_iter() + .map(Into::into) + .collect(), + )) + } - fn pending_logs(&self, block_number: u64, filter: &EthcoreFilter) -> Vec { - pending_logs(&*self.miner, block_number, filter) - } + fn pending_logs(&self, block_number: u64, filter: &EthcoreFilter) -> Vec { + pending_logs(&*self.miner, block_number, filter) + } - fn polls(&self) -> &Mutex> { &self.polls } + fn polls(&self) -> &Mutex> { + &self.polls + } - fn removed_logs(&self, block_hash: H256, filter: &EthcoreFilter) -> (Vec, u64) { - let inner = || -> Option> { - let mut route = Vec::new(); + fn removed_logs(&self, block_hash: H256, filter: &EthcoreFilter) -> (Vec, u64) { + let inner = || -> Option> { + let mut route = Vec::new(); - let mut current_block_hash = block_hash; - let mut current_block_header = self.client.block_header(BlockId::Hash(current_block_hash))?; + let mut current_block_hash = block_hash; + let mut current_block_header = self + .client + .block_header(BlockId::Hash(current_block_hash))?; - while current_block_hash != self.client.block_hash(BlockId::Number(current_block_header.number()))? { - route.push(current_block_hash); + while current_block_hash + != self + .client + .block_hash(BlockId::Number(current_block_header.number()))? + { + route.push(current_block_hash); - current_block_hash = current_block_header.parent_hash(); - current_block_header = self.client.block_header(BlockId::Hash(current_block_hash))?; - } + current_block_hash = current_block_header.parent_hash(); + current_block_header = self + .client + .block_header(BlockId::Hash(current_block_hash))?; + } - Some(route) - }; + Some(route) + }; - let route = inner().unwrap_or_default(); - let route_len = route.len() as u64; - (route.into_iter().flat_map(|block_hash| { - let mut filter = filter.clone(); - filter.from_block = BlockId::Hash(block_hash); - filter.to_block = filter.from_block; + let route = inner().unwrap_or_default(); + let route_len = route.len() as u64; + ( + route + .into_iter() + .flat_map(|block_hash| { + let mut filter = filter.clone(); + filter.from_block = BlockId::Hash(block_hash); + filter.to_block = filter.from_block; - self.client.logs(filter).unwrap_or_default().into_iter().map(|log| { - let mut log: Log = log.into(); - log.log_type = "removed".into(); - log.removed = true; + self.client + .logs(filter) + .unwrap_or_default() + .into_iter() + .map(|log| { + let mut log: Log = log.into(); + log.log_type = "removed".into(); + log.removed = true; - log - }) - }).collect(), route_len) - } + log + }) + }) + .collect(), + route_len, + ) + } } impl EthFilter for T { - fn new_filter(&self, filter: Filter) -> Result { - let mut polls = self.polls().lock(); - let block_number = self.best_block_number(); - let include_pending = filter.to_block == Some(BlockNumber::Pending); - let filter = filter.try_into()?; - let id = polls.create_poll(SyncPollFilter::new(PollFilter::Logs { - block_number, filter, include_pending, - last_block_hash: None, - previous_logs: Default::default() - })); - Ok(id.into()) - } + fn new_filter(&self, filter: Filter) -> Result { + let mut polls = self.polls().lock(); + let block_number = self.best_block_number(); + let include_pending = filter.to_block == Some(BlockNumber::Pending); + let filter = filter.try_into()?; + let id = polls.create_poll(SyncPollFilter::new(PollFilter::Logs { + block_number, + filter, + include_pending, + last_block_hash: None, + previous_logs: Default::default(), + })); + Ok(id.into()) + } - fn new_block_filter(&self) -> Result { - let mut polls = self.polls().lock(); - // +1, since we don't want to include the current block - let id = polls.create_poll(SyncPollFilter::new(PollFilter::Block { - last_block_number: self.best_block_number(), - recent_reported_hashes: VecDeque::with_capacity(PollFilter::MAX_BLOCK_HISTORY_SIZE), - })); - Ok(id.into()) - } + fn new_block_filter(&self) -> Result { + let mut polls = self.polls().lock(); + // +1, since we don't want to include the current block + let id = polls.create_poll(SyncPollFilter::new(PollFilter::Block { + last_block_number: self.best_block_number(), + recent_reported_hashes: VecDeque::with_capacity(PollFilter::MAX_BLOCK_HISTORY_SIZE), + })); + Ok(id.into()) + } - fn new_pending_transaction_filter(&self) -> Result { - let mut polls = self.polls().lock(); - let pending_transactions = self.pending_transaction_hashes(); - let id = polls.create_poll(SyncPollFilter::new(PollFilter::PendingTransaction(pending_transactions))); - Ok(id.into()) - } + fn new_pending_transaction_filter(&self) -> Result { + let mut polls = self.polls().lock(); + let pending_transactions = self.pending_transaction_hashes(); + let id = polls.create_poll(SyncPollFilter::new(PollFilter::PendingTransaction( + pending_transactions, + ))); + Ok(id.into()) + } - fn filter_changes(&self, index: Index) -> BoxFuture { - let filter = match self.polls().lock().poll_mut(&index.value()) { - Some(filter) => filter.clone(), - None => return Box::new(future::err(errors::filter_not_found())), - }; + fn filter_changes(&self, index: Index) -> BoxFuture { + let filter = match self.polls().lock().poll_mut(&index.value()) { + Some(filter) => filter.clone(), + None => return Box::new(future::err(errors::filter_not_found())), + }; - Box::new(filter.modify(|filter| match *filter { - PollFilter::Block { - ref mut last_block_number, - ref mut recent_reported_hashes, - } => { - // Check validity of recently reported blocks -- in case of re-org, rewind block to last valid - while let Some((num, hash)) = recent_reported_hashes.front().cloned() { - if self.block_hash(BlockId::Number(num)) == Some(hash) { break; } - *last_block_number = num - 1; - recent_reported_hashes.pop_front(); - } - let current_number = self.best_block_number(); - let mut hashes = Vec::new(); - for n in (*last_block_number + 1)..=current_number { - let block_number = BlockId::Number(n); - if let Some(hash) = self.block_hash(block_number) { - *last_block_number = n; - hashes.push(hash); - // Only keep the most recent history - if recent_reported_hashes.len() >= PollFilter::MAX_BLOCK_HISTORY_SIZE { - recent_reported_hashes.pop_back(); - } - recent_reported_hashes.push_front((n, hash)); - } - } + Box::new(filter.modify(|filter| match *filter { + PollFilter::Block { + ref mut last_block_number, + ref mut recent_reported_hashes, + } => { + // Check validity of recently reported blocks -- in case of re-org, rewind block to last valid + while let Some((num, hash)) = recent_reported_hashes.front().cloned() { + if self.block_hash(BlockId::Number(num)) == Some(hash) { + break; + } + *last_block_number = num - 1; + recent_reported_hashes.pop_front(); + } + let current_number = self.best_block_number(); + let mut hashes = Vec::new(); + for n in (*last_block_number + 1)..=current_number { + let block_number = BlockId::Number(n); + if let Some(hash) = self.block_hash(block_number) { + *last_block_number = n; + hashes.push(hash); + // Only keep the most recent history + if recent_reported_hashes.len() >= PollFilter::MAX_BLOCK_HISTORY_SIZE { + recent_reported_hashes.pop_back(); + } + recent_reported_hashes.push_front((n, hash)); + } + } - Either::A(future::ok(FilterChanges::Hashes(hashes))) - }, - PollFilter::PendingTransaction(ref mut previous_hashes) => { - // get hashes of pending transactions - let current_hashes = self.pending_transaction_hashes(); + Either::A(future::ok(FilterChanges::Hashes(hashes))) + } + PollFilter::PendingTransaction(ref mut previous_hashes) => { + // get hashes of pending transactions + let current_hashes = self.pending_transaction_hashes(); - let new_hashes = { - // find all new hashes - current_hashes.difference(previous_hashes) - .cloned() - .map(Into::into) - .collect() - }; + let new_hashes = { + // find all new hashes + current_hashes + .difference(previous_hashes) + .cloned() + .map(Into::into) + .collect() + }; - // save all hashes of pending transactions - *previous_hashes = current_hashes; + // save all hashes of pending transactions + *previous_hashes = current_hashes; - // return new hashes - Either::A(future::ok(FilterChanges::Hashes(new_hashes))) - }, - PollFilter::Logs { - ref mut block_number, - ref mut last_block_hash, - ref mut previous_logs, - ref filter, - include_pending, - } => { - // retrive the current block number - let current_number = self.best_block_number(); + // return new hashes + Either::A(future::ok(FilterChanges::Hashes(new_hashes))) + } + PollFilter::Logs { + ref mut block_number, + ref mut last_block_hash, + ref mut previous_logs, + ref filter, + include_pending, + } => { + // retrive the current block number + let current_number = self.best_block_number(); - let mut filter = filter.clone(); + let mut filter = filter.clone(); - // retrieve reorg logs - let (mut reorg, reorg_len) = last_block_hash.map_or_else(|| (Vec::new(), 0), |h| self.removed_logs(h, &filter)); - *block_number -= reorg_len as u64; + // retrieve reorg logs + let (mut reorg, reorg_len) = last_block_hash + .map_or_else(|| (Vec::new(), 0), |h| self.removed_logs(h, &filter)); + *block_number -= reorg_len as u64; - filter.from_block = BlockId::Number(*block_number); - filter.to_block = BlockId::Latest; + filter.from_block = BlockId::Number(*block_number); + filter.to_block = BlockId::Latest; - // retrieve pending logs - let pending = if include_pending { - let pending_logs = self.pending_logs(current_number, &filter); + // retrieve pending logs + let pending = if include_pending { + let pending_logs = self.pending_logs(current_number, &filter); - // remove logs about which client was already notified about - let new_pending_logs: Vec<_> = pending_logs.iter() - .filter(|p| !previous_logs.contains(p)) - .cloned() - .collect(); + // remove logs about which client was already notified about + let new_pending_logs: Vec<_> = pending_logs + .iter() + .filter(|p| !previous_logs.contains(p)) + .cloned() + .collect(); - // save all logs retrieved by client - *previous_logs = pending_logs.into_iter().collect(); + // save all logs retrieved by client + *previous_logs = pending_logs.into_iter().collect(); - new_pending_logs - } else { - Vec::new() - }; + new_pending_logs + } else { + Vec::new() + }; - // save the number of the next block as a first block from which - // we want to get logs - *block_number = current_number + 1; + // save the number of the next block as a first block from which + // we want to get logs + *block_number = current_number + 1; - // save the current block hash, which we used to get back to the - // canon chain in case of reorg. - *last_block_hash = self.block_hash(BlockId::Number(current_number)); + // save the current block hash, which we used to get back to the + // canon chain in case of reorg. + *last_block_hash = self.block_hash(BlockId::Number(current_number)); - // retrieve logs in range from_block..min(BlockId::Latest..to_block) - let limit = filter.limit; - Either::B(self.logs(filter) - .map(move |logs| { reorg.extend(logs); reorg }) // append reorg logs in the front - .map(move |mut logs| { logs.extend(pending); logs }) // append fetched pending logs - .map(move |logs| limit_logs(logs, limit)) // limit the logs - .map(FilterChanges::Logs)) - } - })) - } + // retrieve logs in range from_block..min(BlockId::Latest..to_block) + let limit = filter.limit; + Either::B( + self.logs(filter) + .map(move |logs| { + reorg.extend(logs); + reorg + }) // append reorg logs in the front + .map(move |mut logs| { + logs.extend(pending); + logs + }) // append fetched pending logs + .map(move |logs| limit_logs(logs, limit)) // limit the logs + .map(FilterChanges::Logs), + ) + } + })) + } - fn filter_logs(&self, index: Index) -> BoxFuture> { - let (filter, include_pending) = { - let mut polls = self.polls().lock(); + fn filter_logs(&self, index: Index) -> BoxFuture> { + let (filter, include_pending) = { + let mut polls = self.polls().lock(); - match polls.poll(&index.value()).and_then(|f| f.modify(|filter| match *filter { - PollFilter::Logs { ref filter, include_pending, .. } => - Some((filter.clone(), include_pending)), - _ => None, - })) { - Some((filter, include_pending)) => (filter, include_pending), - None => return Box::new(future::err(errors::filter_not_found())), - } - }; + match polls.poll(&index.value()).and_then(|f| { + f.modify(|filter| match *filter { + PollFilter::Logs { + ref filter, + include_pending, + .. + } => Some((filter.clone(), include_pending)), + _ => None, + }) + }) { + Some((filter, include_pending)) => (filter, include_pending), + None => return Box::new(future::err(errors::filter_not_found())), + } + }; - // fetch pending logs. - let pending = if include_pending { - let best_block = self.best_block_number(); - self.pending_logs(best_block, &filter) - } else { - Vec::new() - }; + // fetch pending logs. + let pending = if include_pending { + let best_block = self.best_block_number(); + self.pending_logs(best_block, &filter) + } else { + Vec::new() + }; - // retrieve logs asynchronously, appending pending logs. - let limit = filter.limit; - let logs = self.logs(filter); - Box::new(logs - .map(move |mut logs| { logs.extend(pending); logs }) - .map(move |logs| limit_logs(logs, limit)) - ) - } + // retrieve logs asynchronously, appending pending logs. + let limit = filter.limit; + let logs = self.logs(filter); + Box::new( + logs.map(move |mut logs| { + logs.extend(pending); + logs + }) + .map(move |logs| limit_logs(logs, limit)), + ) + } - fn uninstall_filter(&self, index: Index) -> Result { - Ok(self.polls().lock().remove_poll(&index.value())) - } + fn uninstall_filter(&self, index: Index) -> Result { + Ok(self.polls().lock().remove_poll(&index.value())) + } } diff --git a/rpc/src/v1/impls/eth_pubsub.rs b/rpc/src/v1/impls/eth_pubsub.rs index 450728157..0d712a7fa 100644 --- a/rpc/src/v1/impls/eth_pubsub.rs +++ b/rpc/src/v1/impls/eth_pubsub.rs @@ -16,299 +16,332 @@ //! Eth PUB-SUB rpc implementation. -use std::sync::{Arc, Weak}; -use std::collections::BTreeMap; +use std::{ + collections::BTreeMap, + sync::{Arc, Weak}, +}; -use jsonrpc_core::{BoxFuture, Result, Error}; -use jsonrpc_core::futures::{self, Future, IntoFuture}; -use jsonrpc_pubsub::{SubscriptionId, typed::{Sink, Subscriber}}; +use jsonrpc_core::{ + futures::{self, Future, IntoFuture}, + BoxFuture, Error, Result, +}; +use jsonrpc_pubsub::{ + typed::{Sink, Subscriber}, + SubscriptionId, +}; -use v1::helpers::{errors, limit_logs, Subscribers}; -use v1::helpers::light_fetch::LightFetch; -use v1::metadata::Metadata; -use v1::traits::EthPubSub; -use v1::types::{pubsub, RichHeader, Log}; +use v1::{ + helpers::{errors, light_fetch::LightFetch, limit_logs, Subscribers}, + metadata::Metadata, + traits::EthPubSub, + types::{pubsub, Log, RichHeader}, +}; -use ethcore::client::{BlockChainClient, ChainNotify, NewBlocks, ChainRouteType, BlockId}; +use ethcore::client::{BlockChainClient, BlockId, ChainNotify, ChainRouteType, NewBlocks}; use ethereum_types::H256; -use light::cache::Cache; -use light::client::{LightChainClient, LightChainNotify}; -use light::on_demand::OnDemandRequester; +use light::{ + cache::Cache, + client::{LightChainClient, LightChainNotify}, + on_demand::OnDemandRequester, +}; use parity_runtime::Executor; -use parking_lot::{RwLock, Mutex}; +use parking_lot::{Mutex, RwLock}; -use sync::{LightSyncProvider, LightNetworkDispatcher, ManageNetwork}; +use sync::{LightNetworkDispatcher, LightSyncProvider, ManageNetwork}; -use types::encoded; -use types::filter::Filter as EthFilter; +use types::{encoded, filter::Filter as EthFilter}; type Client = Sink; /// Eth PubSub implementation. pub struct EthPubSubClient { - handler: Arc>, - heads_subscribers: Arc>>, - logs_subscribers: Arc>>, - transactions_subscribers: Arc>>, + handler: Arc>, + heads_subscribers: Arc>>, + logs_subscribers: Arc>>, + transactions_subscribers: Arc>>, } impl EthPubSubClient { - /// Creates new `EthPubSubClient`. - pub fn new(client: Arc, executor: Executor) -> Self { - let heads_subscribers = Arc::new(RwLock::new(Subscribers::default())); - let logs_subscribers = Arc::new(RwLock::new(Subscribers::default())); - let transactions_subscribers = Arc::new(RwLock::new(Subscribers::default())); + /// Creates new `EthPubSubClient`. + pub fn new(client: Arc, executor: Executor) -> Self { + let heads_subscribers = Arc::new(RwLock::new(Subscribers::default())); + let logs_subscribers = Arc::new(RwLock::new(Subscribers::default())); + let transactions_subscribers = Arc::new(RwLock::new(Subscribers::default())); - EthPubSubClient { - handler: Arc::new(ChainNotificationHandler { - client, - executor, - heads_subscribers: heads_subscribers.clone(), - logs_subscribers: logs_subscribers.clone(), - transactions_subscribers: transactions_subscribers.clone(), - }), - heads_subscribers, - logs_subscribers, - transactions_subscribers, - } - } + EthPubSubClient { + handler: Arc::new(ChainNotificationHandler { + client, + executor, + heads_subscribers: heads_subscribers.clone(), + logs_subscribers: logs_subscribers.clone(), + transactions_subscribers: transactions_subscribers.clone(), + }), + heads_subscribers, + logs_subscribers, + transactions_subscribers, + } + } - /// Creates new `EthPubSubCient` with deterministic subscription ids. - #[cfg(test)] - pub fn new_test(client: Arc, executor: Executor) -> Self { - let client = Self::new(client, executor); - *client.heads_subscribers.write() = Subscribers::new_test(); - *client.logs_subscribers.write() = Subscribers::new_test(); - *client.transactions_subscribers.write() = Subscribers::new_test(); - client - } + /// Creates new `EthPubSubCient` with deterministic subscription ids. + #[cfg(test)] + pub fn new_test(client: Arc, executor: Executor) -> Self { + let client = Self::new(client, executor); + *client.heads_subscribers.write() = Subscribers::new_test(); + *client.logs_subscribers.write() = Subscribers::new_test(); + *client.transactions_subscribers.write() = Subscribers::new_test(); + client + } - /// Returns a chain notification handler. - pub fn handler(&self) -> Weak> { - Arc::downgrade(&self.handler) - } + /// Returns a chain notification handler. + pub fn handler(&self) -> Weak> { + Arc::downgrade(&self.handler) + } } impl EthPubSubClient> where - S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, - OD: OnDemandRequester + 'static + S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, + OD: OnDemandRequester + 'static, { - /// Creates a new `EthPubSubClient` for `LightClient`. - pub fn light( - client: Arc, - on_demand: Arc, - sync: Arc, - cache: Arc>, - executor: Executor, - gas_price_percentile: usize, - ) -> Self { - let fetch = LightFetch { - client, - on_demand, - sync, - cache, - gas_price_percentile, - }; - EthPubSubClient::new(Arc::new(fetch), executor) - } + /// Creates a new `EthPubSubClient` for `LightClient`. + pub fn light( + client: Arc, + on_demand: Arc, + sync: Arc, + cache: Arc>, + executor: Executor, + gas_price_percentile: usize, + ) -> Self { + let fetch = LightFetch { + client, + on_demand, + sync, + cache, + gas_price_percentile, + }; + EthPubSubClient::new(Arc::new(fetch), executor) + } } /// PubSub Notification handler. pub struct ChainNotificationHandler { - client: Arc, - executor: Executor, - heads_subscribers: Arc>>, - logs_subscribers: Arc>>, - transactions_subscribers: Arc>>, + client: Arc, + executor: Executor, + heads_subscribers: Arc>>, + logs_subscribers: Arc>>, + transactions_subscribers: Arc>>, } impl ChainNotificationHandler { - fn notify(executor: &Executor, subscriber: &Client, result: pubsub::Result) { - executor.spawn(subscriber - .notify(Ok(result)) - .map(|_| ()) - .map_err(|e| warn!(target: "rpc", "Unable to send notification: {}", e)) - ); - } + fn notify(executor: &Executor, subscriber: &Client, result: pubsub::Result) { + executor.spawn( + subscriber + .notify(Ok(result)) + .map(|_| ()) + .map_err(|e| warn!(target: "rpc", "Unable to send notification: {}", e)), + ); + } - fn notify_heads(&self, headers: &[(encoded::Header, BTreeMap)]) { - for subscriber in self.heads_subscribers.read().values() { - for &(ref header, ref extra_info) in headers { - Self::notify(&self.executor, subscriber, pubsub::Result::Header(Box::new(RichHeader { - inner: header.into(), - extra_info: extra_info.clone(), - }))); - } - } - } + fn notify_heads(&self, headers: &[(encoded::Header, BTreeMap)]) { + for subscriber in self.heads_subscribers.read().values() { + for &(ref header, ref extra_info) in headers { + Self::notify( + &self.executor, + subscriber, + pubsub::Result::Header(Box::new(RichHeader { + inner: header.into(), + extra_info: extra_info.clone(), + })), + ); + } + } + } - fn notify_logs(&self, enacted: &[(H256, Ex)], logs: F) where - F: Fn(EthFilter, &Ex) -> T, - Ex: Send, - T: IntoFuture, Error = Error>, - T::Future: Send + 'static, - { - for &(ref subscriber, ref filter) in self.logs_subscribers.read().values() { - let logs = futures::future::join_all(enacted - .iter() - .map(|&(hash, ref ex)| { - let mut filter = filter.clone(); - filter.from_block = BlockId::Hash(hash); - filter.to_block = filter.from_block; - logs(filter, ex).into_future() - }) - .collect::>() - ); - let limit = filter.limit; - let executor = self.executor.clone(); - let subscriber = subscriber.clone(); - self.executor.spawn(logs - .map(move |logs| { - let logs = logs.into_iter().flat_map(|log| log).collect(); + fn notify_logs(&self, enacted: &[(H256, Ex)], logs: F) + where + F: Fn(EthFilter, &Ex) -> T, + Ex: Send, + T: IntoFuture, Error = Error>, + T::Future: Send + 'static, + { + for &(ref subscriber, ref filter) in self.logs_subscribers.read().values() { + let logs = futures::future::join_all( + enacted + .iter() + .map(|&(hash, ref ex)| { + let mut filter = filter.clone(); + filter.from_block = BlockId::Hash(hash); + filter.to_block = filter.from_block; + logs(filter, ex).into_future() + }) + .collect::>(), + ); + let limit = filter.limit; + let executor = self.executor.clone(); + let subscriber = subscriber.clone(); + self.executor.spawn( + logs.map(move |logs| { + let logs = logs.into_iter().flat_map(|log| log).collect(); - for log in limit_logs(logs, limit) { - Self::notify(&executor, &subscriber, pubsub::Result::Log(Box::new(log))) - } - }) - .map_err(|e| warn!("Unable to fetch latest logs: {:?}", e)) - ); - } - } + for log in limit_logs(logs, limit) { + Self::notify(&executor, &subscriber, pubsub::Result::Log(Box::new(log))) + } + }) + .map_err(|e| warn!("Unable to fetch latest logs: {:?}", e)), + ); + } + } - /// Notify all subscribers about new transaction hashes. - pub fn notify_new_transactions(&self, hashes: &[H256]) { - for subscriber in self.transactions_subscribers.read().values() { - for hash in hashes { - Self::notify(&self.executor, subscriber, pubsub::Result::TransactionHash(*hash)); - } - } - } + /// Notify all subscribers about new transaction hashes. + pub fn notify_new_transactions(&self, hashes: &[H256]) { + for subscriber in self.transactions_subscribers.read().values() { + for hash in hashes { + Self::notify( + &self.executor, + subscriber, + pubsub::Result::TransactionHash(*hash), + ); + } + } + } } /// A light client wrapper struct. pub trait LightClient: Send + Sync { - /// Get a recent block header. - fn block_header(&self, id: BlockId) -> Option; + /// Get a recent block header. + fn block_header(&self, id: BlockId) -> Option; - /// Fetch logs. - fn logs(&self, filter: EthFilter) -> BoxFuture>; + /// Fetch logs. + fn logs(&self, filter: EthFilter) -> BoxFuture>; } impl LightClient for LightFetch where - S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, - OD: OnDemandRequester + 'static + S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, + OD: OnDemandRequester + 'static, { - fn block_header(&self, id: BlockId) -> Option { - self.client.block_header(id) - } + fn block_header(&self, id: BlockId) -> Option { + self.client.block_header(id) + } - fn logs(&self, filter: EthFilter) -> BoxFuture> { - Box::new(LightFetch::logs(self, filter)) as BoxFuture<_> - } + fn logs(&self, filter: EthFilter) -> BoxFuture> { + Box::new(LightFetch::logs(self, filter)) as BoxFuture<_> + } } impl LightChainNotify for ChainNotificationHandler { - fn new_headers(&self, enacted: &[H256]) { - let headers = enacted - .iter() - .filter_map(|hash| self.client.block_header(BlockId::Hash(*hash))) - .map(|header| (header, Default::default())) - .collect::>(); + fn new_headers(&self, enacted: &[H256]) { + let headers = enacted + .iter() + .filter_map(|hash| self.client.block_header(BlockId::Hash(*hash))) + .map(|header| (header, Default::default())) + .collect::>(); - self.notify_heads(&headers); - self.notify_logs(&enacted.iter().map(|h| (*h, ())).collect::>(), |filter, _| self.client.logs(filter)) - } + self.notify_heads(&headers); + self.notify_logs( + &enacted.iter().map(|h| (*h, ())).collect::>(), + |filter, _| self.client.logs(filter), + ) + } } impl ChainNotify for ChainNotificationHandler { - fn new_blocks(&self, new_blocks: NewBlocks) { - if self.heads_subscribers.read().is_empty() && self.logs_subscribers.read().is_empty() { return } - const EXTRA_INFO_PROOF: &str = "Object exists in in blockchain (fetched earlier), extra_info is always available if object exists; qed"; - let headers = new_blocks.route.route() - .iter() - .filter_map(|&(hash, ref typ)| { - match typ { - ChainRouteType::Retracted => None, - ChainRouteType::Enacted => self.client.block_header(BlockId::Hash(hash)) - } - }) - .map(|header| { - let hash = header.hash(); - (header, self.client.block_extra_info(BlockId::Hash(hash)).expect(EXTRA_INFO_PROOF)) - }) - .collect::>(); + fn new_blocks(&self, new_blocks: NewBlocks) { + if self.heads_subscribers.read().is_empty() && self.logs_subscribers.read().is_empty() { + return; + } + const EXTRA_INFO_PROOF: &str = "Object exists in in blockchain (fetched earlier), extra_info is always available if object exists; qed"; + let headers = new_blocks + .route + .route() + .iter() + .filter_map(|&(hash, ref typ)| match typ { + ChainRouteType::Retracted => None, + ChainRouteType::Enacted => self.client.block_header(BlockId::Hash(hash)), + }) + .map(|header| { + let hash = header.hash(); + ( + header, + self.client + .block_extra_info(BlockId::Hash(hash)) + .expect(EXTRA_INFO_PROOF), + ) + }) + .collect::>(); - // Headers - self.notify_heads(&headers); + // Headers + self.notify_heads(&headers); - // We notify logs enacting and retracting as the order in route. - self.notify_logs(new_blocks.route.route(), |filter, ex| { - match ex { - ChainRouteType::Enacted => - Ok(self.client.logs(filter).unwrap_or_default().into_iter().map(Into::into).collect()), - ChainRouteType::Retracted => - Ok(self.client.logs(filter).unwrap_or_default().into_iter().map(Into::into).map(|mut log: Log| { - log.log_type = "removed".into(); - log.removed = true; - log - }).collect()), - } - }); - } + // We notify logs enacting and retracting as the order in route. + self.notify_logs(new_blocks.route.route(), |filter, ex| match ex { + ChainRouteType::Enacted => Ok(self + .client + .logs(filter) + .unwrap_or_default() + .into_iter() + .map(Into::into) + .collect()), + ChainRouteType::Retracted => Ok(self + .client + .logs(filter) + .unwrap_or_default() + .into_iter() + .map(Into::into) + .map(|mut log: Log| { + log.log_type = "removed".into(); + log.removed = true; + log + }) + .collect()), + }); + } } impl EthPubSub for EthPubSubClient { - type Metadata = Metadata; + type Metadata = Metadata; - fn subscribe( - &self, - _meta: Metadata, - subscriber: Subscriber, - kind: pubsub::Kind, - params: Option, - ) { - let error = match (kind, params) { - (pubsub::Kind::NewHeads, None) => { - self.heads_subscribers.write().push(subscriber); - return; - }, - (pubsub::Kind::NewHeads, _) => { - errors::invalid_params("newHeads", "Expected no parameters.") - }, - (pubsub::Kind::Logs, Some(pubsub::Params::Logs(filter))) => { - match filter.try_into() { - Ok(filter) => { - self.logs_subscribers.write().push(subscriber, filter); - return; - }, - Err(err) => err, - } - }, - (pubsub::Kind::Logs, _) => { - errors::invalid_params("logs", "Expected a filter object.") - }, - (pubsub::Kind::NewPendingTransactions, None) => { - self.transactions_subscribers.write().push(subscriber); - return; - }, - (pubsub::Kind::NewPendingTransactions, _) => { - errors::invalid_params("newPendingTransactions", "Expected no parameters.") - }, - _ => { - errors::unimplemented(None) - }, - }; + fn subscribe( + &self, + _meta: Metadata, + subscriber: Subscriber, + kind: pubsub::Kind, + params: Option, + ) { + let error = match (kind, params) { + (pubsub::Kind::NewHeads, None) => { + self.heads_subscribers.write().push(subscriber); + return; + } + (pubsub::Kind::NewHeads, _) => { + errors::invalid_params("newHeads", "Expected no parameters.") + } + (pubsub::Kind::Logs, Some(pubsub::Params::Logs(filter))) => match filter.try_into() { + Ok(filter) => { + self.logs_subscribers.write().push(subscriber, filter); + return; + } + Err(err) => err, + }, + (pubsub::Kind::Logs, _) => errors::invalid_params("logs", "Expected a filter object."), + (pubsub::Kind::NewPendingTransactions, None) => { + self.transactions_subscribers.write().push(subscriber); + return; + } + (pubsub::Kind::NewPendingTransactions, _) => { + errors::invalid_params("newPendingTransactions", "Expected no parameters.") + } + _ => errors::unimplemented(None), + }; - let _ = subscriber.reject(error); - } + let _ = subscriber.reject(error); + } - fn unsubscribe(&self, _: Option, id: SubscriptionId) -> Result { - let res = self.heads_subscribers.write().remove(&id).is_some(); - let res2 = self.logs_subscribers.write().remove(&id).is_some(); - let res3 = self.transactions_subscribers.write().remove(&id).is_some(); + fn unsubscribe(&self, _: Option, id: SubscriptionId) -> Result { + let res = self.heads_subscribers.write().remove(&id).is_some(); + let res2 = self.logs_subscribers.write().remove(&id).is_some(); + let res3 = self.transactions_subscribers.write().remove(&id).is_some(); - Ok(res || res2 || res3) - } + Ok(res || res2 || res3) + } } diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index 6467bfbc7..24163be38 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -16,592 +16,729 @@ //! Eth RPC interface for the light client. -use std::collections::BTreeSet; -use std::sync::Arc; +use std::{collections::BTreeSet, sync::Arc}; -use jsonrpc_core::{Result, BoxFuture}; -use jsonrpc_core::futures::{future, Future}; -use jsonrpc_core::futures::future::Either; - -use light::cache::Cache as LightDataCache; -use light::client::LightChainClient; -use light::{cht, TransactionQueue}; -use light::on_demand::{request, OnDemandRequester}; - -use ethereum_types::{Address, H64, H160, H256, U64, U256}; -use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP}; -use parking_lot::{RwLock, Mutex}; -use rlp::Rlp; -use types::transaction::SignedTransaction; -use types::encoded; -use types::filter::Filter as EthcoreFilter; -use types::ids::BlockId; - -use v1::impls::eth_filter::Filterable; -use v1::helpers::{errors, limit_logs, SyncPollFilter, PollManager}; -use v1::helpers::deprecated::{self, DeprecationNotice}; -use v1::helpers::light_fetch::{self, LightFetch}; -use v1::traits::Eth; -use v1::types::{ - RichBlock, Block, BlockTransactions, BlockNumber, LightBlockNumber, Bytes, SyncStatus as RpcSyncStatus, - SyncInfo as RpcSyncInfo, Transaction, CallRequest, Index, Filter, Log, Receipt, Work, EthAccount +use jsonrpc_core::{ + futures::{future, future::Either, Future}, + BoxFuture, Result, }; -use v1::metadata::Metadata; -use sync::{LightSyncInfo, LightSyncProvider, LightNetworkDispatcher, ManageNetwork}; +use light::{ + cache::Cache as LightDataCache, + cht, + client::LightChainClient, + on_demand::{request, OnDemandRequester}, + TransactionQueue, +}; -const NO_INVALID_BACK_REFS: &str = "Fails only on invalid back-references; back-references here known to be valid; qed"; +use ethereum_types::{Address, H160, H256, H64, U256, U64}; +use hash::{KECCAK_EMPTY_LIST_RLP, KECCAK_NULL_RLP}; +use parking_lot::{Mutex, RwLock}; +use rlp::Rlp; +use types::{ + encoded, filter::Filter as EthcoreFilter, ids::BlockId, transaction::SignedTransaction, +}; + +use v1::{ + helpers::{ + deprecated::{self, DeprecationNotice}, + errors, + light_fetch::{self, LightFetch}, + limit_logs, PollManager, SyncPollFilter, + }, + impls::eth_filter::Filterable, + metadata::Metadata, + traits::Eth, + types::{ + Block, BlockNumber, BlockTransactions, Bytes, CallRequest, EthAccount, Filter, Index, + LightBlockNumber, Log, Receipt, RichBlock, SyncInfo as RpcSyncInfo, + SyncStatus as RpcSyncStatus, Transaction, Work, + }, +}; + +use sync::{LightNetworkDispatcher, LightSyncInfo, LightSyncProvider, ManageNetwork}; + +const NO_INVALID_BACK_REFS: &str = + "Fails only on invalid back-references; back-references here known to be valid; qed"; /// Light client `ETH` (and filter) RPC. -pub struct EthClient { - sync: Arc, - client: Arc, - on_demand: Arc, - transaction_queue: Arc>, - accounts: Arc Vec
+ Send + Sync>, - cache: Arc>, - polls: Mutex>, - poll_lifetime: u32, - gas_price_percentile: usize, - deprecation_notice: DeprecationNotice, +pub struct EthClient< + C, + S: LightSyncProvider + LightNetworkDispatcher + 'static, + OD: OnDemandRequester + 'static, +> { + sync: Arc, + client: Arc, + on_demand: Arc, + transaction_queue: Arc>, + accounts: Arc Vec
+ Send + Sync>, + cache: Arc>, + polls: Mutex>, + poll_lifetime: u32, + gas_price_percentile: usize, + deprecation_notice: DeprecationNotice, } impl Clone for EthClient where - S: LightSyncProvider + LightNetworkDispatcher + 'static, - OD: OnDemandRequester + 'static + S: LightSyncProvider + LightNetworkDispatcher + 'static, + OD: OnDemandRequester + 'static, { - fn clone(&self) -> Self { - // each instance should have its own poll manager. - EthClient { - sync: self.sync.clone(), - client: self.client.clone(), - on_demand: self.on_demand.clone(), - transaction_queue: self.transaction_queue.clone(), - accounts: self.accounts.clone(), - cache: self.cache.clone(), - polls: Mutex::new(PollManager::new(self.poll_lifetime)), - poll_lifetime: self.poll_lifetime, - gas_price_percentile: self.gas_price_percentile, - deprecation_notice: Default::default(), - } - } + fn clone(&self) -> Self { + // each instance should have its own poll manager. + EthClient { + sync: self.sync.clone(), + client: self.client.clone(), + on_demand: self.on_demand.clone(), + transaction_queue: self.transaction_queue.clone(), + accounts: self.accounts.clone(), + cache: self.cache.clone(), + polls: Mutex::new(PollManager::new(self.poll_lifetime)), + poll_lifetime: self.poll_lifetime, + gas_price_percentile: self.gas_price_percentile, + deprecation_notice: Default::default(), + } + } } impl EthClient where - C: LightChainClient + 'static, - S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, - OD: OnDemandRequester + 'static + C: LightChainClient + 'static, + S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, + OD: OnDemandRequester + 'static, { - /// Create a new `EthClient` with a handle to the light sync instance, client, - /// and on-demand request service, which is assumed to be attached as a handler. - pub fn new( - sync: Arc, - client: Arc, - on_demand: Arc, - transaction_queue: Arc>, - accounts: Arc Vec
+ Send + Sync>, - cache: Arc>, - gas_price_percentile: usize, - poll_lifetime: u32 - ) -> Self { - EthClient { - sync, - client, - on_demand, - transaction_queue, - accounts, - cache, - polls: Mutex::new(PollManager::new(poll_lifetime)), - poll_lifetime, - gas_price_percentile, - deprecation_notice: Default::default(), - } - } + /// Create a new `EthClient` with a handle to the light sync instance, client, + /// and on-demand request service, which is assumed to be attached as a handler. + pub fn new( + sync: Arc, + client: Arc, + on_demand: Arc, + transaction_queue: Arc>, + accounts: Arc Vec
+ Send + Sync>, + cache: Arc>, + gas_price_percentile: usize, + poll_lifetime: u32, + ) -> Self { + EthClient { + sync, + client, + on_demand, + transaction_queue, + accounts, + cache, + polls: Mutex::new(PollManager::new(poll_lifetime)), + poll_lifetime, + gas_price_percentile, + deprecation_notice: Default::default(), + } + } - /// Create a light data fetcher instance. - fn fetcher(&self) -> LightFetch - { - LightFetch { - client: self.client.clone(), - on_demand: self.on_demand.clone(), - sync: self.sync.clone(), - cache: self.cache.clone(), - gas_price_percentile: self.gas_price_percentile, - } - } + /// Create a light data fetcher instance. + fn fetcher(&self) -> LightFetch { + LightFetch { + client: self.client.clone(), + on_demand: self.on_demand.clone(), + sync: self.sync.clone(), + cache: self.cache.clone(), + gas_price_percentile: self.gas_price_percentile, + } + } - // get a "rich" block structure. Fails on unknown block. - fn rich_block(&self, id: BlockId, include_txs: bool) -> BoxFuture { - let (on_demand, sync) = (self.on_demand.clone(), self.sync.clone()); - let (client, engine) = (self.client.clone(), self.client.engine().clone()); + // get a "rich" block structure. Fails on unknown block. + fn rich_block(&self, id: BlockId, include_txs: bool) -> BoxFuture { + let (on_demand, sync) = (self.on_demand.clone(), self.sync.clone()); + let (client, engine) = (self.client.clone(), self.client.engine().clone()); - // helper for filling out a rich block once we've got a block and a score. - let fill_rich = move |block: encoded::Block, score: Option| { - let header = block.decode_header(); - let extra_info = engine.extra_info(&header); - RichBlock { - inner: Block { - hash: Some(header.hash()), - size: Some(block.rlp().as_raw().len().into()), - parent_hash: *header.parent_hash(), - uncles_hash: *header.uncles_hash(), - author: *header.author(), - miner: *header.author(), - state_root: *header.state_root(), - transactions_root: *header.transactions_root(), - receipts_root: *header.receipts_root(), - number: Some(header.number().into()), - gas_used: *header.gas_used(), - gas_limit: *header.gas_limit(), - logs_bloom: Some(*header.log_bloom()), - timestamp: header.timestamp().into(), - difficulty: *header.difficulty(), - total_difficulty: score.map(Into::into), - seal_fields: header.seal().iter().cloned().map(Into::into).collect(), - uncles: block.uncle_hashes().into_iter().map(Into::into).collect(), - transactions: match include_txs { - true => BlockTransactions::Full(block.view().localized_transactions().into_iter().map(Transaction::from_localized).collect()), - _ => BlockTransactions::Hashes(block.transaction_hashes().into_iter().map(Into::into).collect()), - }, - extra_data: Bytes::new(header.extra_data().clone()), - }, - extra_info, - } - }; + // helper for filling out a rich block once we've got a block and a score. + let fill_rich = move |block: encoded::Block, score: Option| { + let header = block.decode_header(); + let extra_info = engine.extra_info(&header); + RichBlock { + inner: Block { + hash: Some(header.hash()), + size: Some(block.rlp().as_raw().len().into()), + parent_hash: *header.parent_hash(), + uncles_hash: *header.uncles_hash(), + author: *header.author(), + miner: *header.author(), + state_root: *header.state_root(), + transactions_root: *header.transactions_root(), + receipts_root: *header.receipts_root(), + number: Some(header.number().into()), + gas_used: *header.gas_used(), + gas_limit: *header.gas_limit(), + logs_bloom: Some(*header.log_bloom()), + timestamp: header.timestamp().into(), + difficulty: *header.difficulty(), + total_difficulty: score.map(Into::into), + seal_fields: header.seal().iter().cloned().map(Into::into).collect(), + uncles: block.uncle_hashes().into_iter().map(Into::into).collect(), + transactions: match include_txs { + true => BlockTransactions::Full( + block + .view() + .localized_transactions() + .into_iter() + .map(Transaction::from_localized) + .collect(), + ), + _ => BlockTransactions::Hashes( + block + .transaction_hashes() + .into_iter() + .map(Into::into) + .collect(), + ), + }, + extra_data: Bytes::new(header.extra_data().clone()), + }, + extra_info, + } + }; - // get the block itself. - Box::new(self.fetcher().block(id).and_then(move |block| { - // then fetch the total difficulty (this is much easier after getting the block). - match client.score(id) { - Some(score) => Either::A(future::ok(fill_rich(block, Some(score)))), - None => { - // make a CHT request to fetch the chain score. - let req = cht::block_to_cht_number(block.number()) - .and_then(|num| client.cht_root(num as usize)) - .and_then(|root| request::HeaderProof::new(block.number(), root)); + // get the block itself. + Box::new(self.fetcher().block(id).and_then(move |block| { + // then fetch the total difficulty (this is much easier after getting the block). + match client.score(id) { + Some(score) => Either::A(future::ok(fill_rich(block, Some(score)))), + None => { + // make a CHT request to fetch the chain score. + let req = cht::block_to_cht_number(block.number()) + .and_then(|num| client.cht_root(num as usize)) + .and_then(|root| request::HeaderProof::new(block.number(), root)); - let req = match req { - Some(req) => req, - None => { - // somehow the genesis block slipped past other checks. - // return it now. - let score = client.block_header(BlockId::Number(0)) - .expect("genesis always stored; qed") - .difficulty(); + let req = match req { + Some(req) => req, + None => { + // somehow the genesis block slipped past other checks. + // return it now. + let score = client + .block_header(BlockId::Number(0)) + .expect("genesis always stored; qed") + .difficulty(); - return Either::A(future::ok(fill_rich(block, Some(score)))) - } - }; + return Either::A(future::ok(fill_rich(block, Some(score)))); + } + }; - // three possible outcomes: - // - network is down. - // - we get a score, but our hash is non-canonical. - // - we get a score, and our hash is canonical. - let maybe_fut = sync.with_context(move |ctx| on_demand.request(ctx, req).expect(NO_INVALID_BACK_REFS)); - match maybe_fut { - Some(fut) => Either::B(fut - .map(move |(hash, score)| { - let score = if hash == block.hash() { - Some(score) - } else { - None - }; + // three possible outcomes: + // - network is down. + // - we get a score, but our hash is non-canonical. + // - we get a score, and our hash is canonical. + let maybe_fut = sync.with_context(move |ctx| { + on_demand.request(ctx, req).expect(NO_INVALID_BACK_REFS) + }); + match maybe_fut { + Some(fut) => Either::B( + fut.map(move |(hash, score)| { + let score = if hash == block.hash() { + Some(score) + } else { + None + }; - fill_rich(block, score) - }).map_err(errors::on_demand_error)), - None => Either::A(future::err(errors::network_disabled())), - } - } - } - })) - } + fill_rich(block, score) + }) + .map_err(errors::on_demand_error), + ), + None => Either::A(future::err(errors::network_disabled())), + } + } + } + })) + } } impl Eth for EthClient where - C: LightChainClient + 'static, - S: LightSyncInfo + LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, - OD: OnDemandRequester + 'static + C: LightChainClient + 'static, + S: LightSyncInfo + LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, + OD: OnDemandRequester + 'static, { - type Metadata = Metadata; + type Metadata = Metadata; - fn protocol_version(&self) -> Result { - Ok(format!("{}", ::light::net::MAX_PROTOCOL_VERSION)) - } + fn protocol_version(&self) -> Result { + Ok(format!("{}", ::light::net::MAX_PROTOCOL_VERSION)) + } - fn syncing(&self) -> Result { - if self.sync.is_major_importing() { - let chain_info = self.client.chain_info(); - let current_block = U256::from(chain_info.best_block_number); - let highest_block = self.sync.highest_block().map(U256::from) - .unwrap_or_else(|| current_block); + fn syncing(&self) -> Result { + if self.sync.is_major_importing() { + let chain_info = self.client.chain_info(); + let current_block = U256::from(chain_info.best_block_number); + let highest_block = self + .sync + .highest_block() + .map(U256::from) + .unwrap_or_else(|| current_block); - Ok(RpcSyncStatus::Info(RpcSyncInfo { - starting_block: U256::from(self.sync.start_block()), - current_block, - highest_block, - warp_chunks_amount: None, - warp_chunks_processed: None, - })) - } else { - Ok(RpcSyncStatus::None) - } - } + Ok(RpcSyncStatus::Info(RpcSyncInfo { + starting_block: U256::from(self.sync.start_block()), + current_block, + highest_block, + warp_chunks_amount: None, + warp_chunks_processed: None, + })) + } else { + Ok(RpcSyncStatus::None) + } + } - fn author(&self) -> Result { - (self.accounts)() - .first() - .cloned() - .map(From::from) - .ok_or_else(|| errors::account("No accounts were found", "")) - } + fn author(&self) -> Result { + (self.accounts)() + .first() + .cloned() + .map(From::from) + .ok_or_else(|| errors::account("No accounts were found", "")) + } - fn is_mining(&self) -> Result { - Ok(false) - } + fn is_mining(&self) -> Result { + Ok(false) + } - fn chain_id(&self) -> Result> { - Ok(self.client.signing_chain_id().map(U64::from)) - } + fn chain_id(&self) -> Result> { + Ok(self.client.signing_chain_id().map(U64::from)) + } - fn hashrate(&self) -> Result { - Ok(Default::default()) - } + fn hashrate(&self) -> Result { + Ok(Default::default()) + } - fn gas_price(&self) -> BoxFuture { - Box::new(self.fetcher().gas_price()) - } + fn gas_price(&self) -> BoxFuture { + Box::new(self.fetcher().gas_price()) + } - fn accounts(&self) -> Result> { - self.deprecation_notice.print("eth_accounts", deprecated::msgs::ACCOUNTS); + fn accounts(&self) -> Result> { + self.deprecation_notice + .print("eth_accounts", deprecated::msgs::ACCOUNTS); - Ok((self.accounts)() - .into_iter() - .map(Into::into) - .collect()) - } + Ok((self.accounts)().into_iter().map(Into::into).collect()) + } - fn block_number(&self) -> Result { - Ok(self.client.chain_info().best_block_number.into()) - } + fn block_number(&self) -> Result { + Ok(self.client.chain_info().best_block_number.into()) + } - fn balance(&self, address: H160, num: Option) -> BoxFuture { - Box::new(self.fetcher().account(address, num.unwrap_or_default().to_block_id(), self.transaction_queue.clone()) - .map(|acc| acc.map_or(0.into(), |a| a.balance))) - } + fn balance(&self, address: H160, num: Option) -> BoxFuture { + Box::new( + self.fetcher() + .account( + address, + num.unwrap_or_default().to_block_id(), + self.transaction_queue.clone(), + ) + .map(|acc| acc.map_or(0.into(), |a| a.balance)), + ) + } - fn storage_at(&self, _address: H160, _key: U256, _num: Option) -> BoxFuture { - Box::new(future::err(errors::unimplemented(None))) - } + fn storage_at(&self, _address: H160, _key: U256, _num: Option) -> BoxFuture { + Box::new(future::err(errors::unimplemented(None))) + } - fn block_by_hash(&self, hash: H256, include_txs: bool) -> BoxFuture> { - Box::new(self.rich_block(BlockId::Hash(hash), include_txs).map(Some)) - } + fn block_by_hash(&self, hash: H256, include_txs: bool) -> BoxFuture> { + Box::new(self.rich_block(BlockId::Hash(hash), include_txs).map(Some)) + } - fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> BoxFuture> { - Box::new(self.rich_block(num.to_block_id(), include_txs).map(Some)) - } + fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> BoxFuture> { + Box::new(self.rich_block(num.to_block_id(), include_txs).map(Some)) + } - fn transaction_count(&self, address: H160, num: Option) -> BoxFuture { - Box::new(self.fetcher().account(address, num.unwrap_or_default().to_block_id(), self.transaction_queue.clone()) - .map(|acc| acc.map_or(0.into(), |a| a.nonce))) - } + fn transaction_count(&self, address: H160, num: Option) -> BoxFuture { + Box::new( + self.fetcher() + .account( + address, + num.unwrap_or_default().to_block_id(), + self.transaction_queue.clone(), + ) + .map(|acc| acc.map_or(0.into(), |a| a.nonce)), + ) + } - fn block_transaction_count_by_hash(&self, hash: H256) -> BoxFuture> { - let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); + fn block_transaction_count_by_hash(&self, hash: H256) -> BoxFuture> { + let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); - Box::new(self.fetcher().header(BlockId::Hash(hash)).and_then(move |hdr| { - if hdr.transactions_root() == KECCAK_NULL_RLP { - Either::A(future::ok(Some(U256::from(0)))) - } else { - sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) - .map(|x| x.expect(NO_INVALID_BACK_REFS)) - .map(|x| x.map(|b| Some(U256::from(b.transactions_count())))) - .map(|x| Either::B(x.map_err(errors::on_demand_error))) - .unwrap_or_else(|| Either::A(future::err(errors::network_disabled()))) - } - })) - } + Box::new( + self.fetcher() + .header(BlockId::Hash(hash)) + .and_then(move |hdr| { + if hdr.transactions_root() == KECCAK_NULL_RLP { + Either::A(future::ok(Some(U256::from(0)))) + } else { + sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) + .map(|x| x.expect(NO_INVALID_BACK_REFS)) + .map(|x| x.map(|b| Some(U256::from(b.transactions_count())))) + .map(|x| Either::B(x.map_err(errors::on_demand_error))) + .unwrap_or_else(|| Either::A(future::err(errors::network_disabled()))) + } + }), + ) + } - fn block_transaction_count_by_number(&self, num: BlockNumber) -> BoxFuture> { - let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); + fn block_transaction_count_by_number(&self, num: BlockNumber) -> BoxFuture> { + let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); - Box::new(self.fetcher().header(num.to_block_id()).and_then(move |hdr| { - if hdr.transactions_root() == KECCAK_NULL_RLP { - Either::A(future::ok(Some(U256::from(0)))) - } else { - sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) - .map(|x| x.expect(NO_INVALID_BACK_REFS)) - .map(|x| x.map(|b| Some(U256::from(b.transactions_count())))) - .map(|x| Either::B(x.map_err(errors::on_demand_error))) - .unwrap_or_else(|| Either::A(future::err(errors::network_disabled()))) - } - })) - } + Box::new( + self.fetcher() + .header(num.to_block_id()) + .and_then(move |hdr| { + if hdr.transactions_root() == KECCAK_NULL_RLP { + Either::A(future::ok(Some(U256::from(0)))) + } else { + sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) + .map(|x| x.expect(NO_INVALID_BACK_REFS)) + .map(|x| x.map(|b| Some(U256::from(b.transactions_count())))) + .map(|x| Either::B(x.map_err(errors::on_demand_error))) + .unwrap_or_else(|| Either::A(future::err(errors::network_disabled()))) + } + }), + ) + } - fn block_uncles_count_by_hash(&self, hash: H256) -> BoxFuture> { - let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); + fn block_uncles_count_by_hash(&self, hash: H256) -> BoxFuture> { + let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); - Box::new(self.fetcher().header(BlockId::Hash(hash)).and_then(move |hdr| { - if hdr.uncles_hash() == KECCAK_EMPTY_LIST_RLP { - Either::A(future::ok(Some(U256::from(0)))) - } else { - sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) - .map(|x| x.expect(NO_INVALID_BACK_REFS)) - .map(|x| x.map(|b| Some(U256::from(b.uncles_count())))) - .map(|x| Either::B(x.map_err(errors::on_demand_error))) - .unwrap_or_else(|| Either::A(future::err(errors::network_disabled()))) - } - })) - } + Box::new( + self.fetcher() + .header(BlockId::Hash(hash)) + .and_then(move |hdr| { + if hdr.uncles_hash() == KECCAK_EMPTY_LIST_RLP { + Either::A(future::ok(Some(U256::from(0)))) + } else { + sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) + .map(|x| x.expect(NO_INVALID_BACK_REFS)) + .map(|x| x.map(|b| Some(U256::from(b.uncles_count())))) + .map(|x| Either::B(x.map_err(errors::on_demand_error))) + .unwrap_or_else(|| Either::A(future::err(errors::network_disabled()))) + } + }), + ) + } - fn block_uncles_count_by_number(&self, num: BlockNumber) -> BoxFuture> { - let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); + fn block_uncles_count_by_number(&self, num: BlockNumber) -> BoxFuture> { + let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); - Box::new(self.fetcher().header(num.to_block_id()).and_then(move |hdr| { - if hdr.uncles_hash() == KECCAK_EMPTY_LIST_RLP { - Either::B(future::ok(Some(U256::from(0)))) - } else { - sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) - .map(|x| x.expect(NO_INVALID_BACK_REFS)) - .map(|x| x.map(|b| Some(U256::from(b.uncles_count())))) - .map(|x| Either::A(x.map_err(errors::on_demand_error))) - .unwrap_or_else(|| Either::B(future::err(errors::network_disabled()))) - } - })) - } + Box::new( + self.fetcher() + .header(num.to_block_id()) + .and_then(move |hdr| { + if hdr.uncles_hash() == KECCAK_EMPTY_LIST_RLP { + Either::B(future::ok(Some(U256::from(0)))) + } else { + sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) + .map(|x| x.expect(NO_INVALID_BACK_REFS)) + .map(|x| x.map(|b| Some(U256::from(b.uncles_count())))) + .map(|x| Either::A(x.map_err(errors::on_demand_error))) + .unwrap_or_else(|| Either::B(future::err(errors::network_disabled()))) + } + }), + ) + } - fn code_at(&self, address: H160, num: Option) -> BoxFuture { - Box::new(self.fetcher().code(address, num.unwrap_or_default().to_block_id()).map(Into::into)) - } + fn code_at(&self, address: H160, num: Option) -> BoxFuture { + Box::new( + self.fetcher() + .code(address, num.unwrap_or_default().to_block_id()) + .map(Into::into), + ) + } - fn send_raw_transaction(&self, raw: Bytes) -> Result { - let best_header = self.client.best_block_header().decode().map_err(errors::decode)?; + fn send_raw_transaction(&self, raw: Bytes) -> Result { + let best_header = self + .client + .best_block_header() + .decode() + .map_err(errors::decode)?; - Rlp::new(&raw.into_vec()).as_val() - .map_err(errors::rlp) - .and_then(|tx| { - self.client.engine().verify_transaction_basic(&tx, &best_header) - .map_err(errors::transaction)?; + Rlp::new(&raw.into_vec()) + .as_val() + .map_err(errors::rlp) + .and_then(|tx| { + self.client + .engine() + .verify_transaction_basic(&tx, &best_header) + .map_err(errors::transaction)?; - let signed = SignedTransaction::new(tx).map_err(errors::transaction)?; - let hash = signed.hash(); + let signed = SignedTransaction::new(tx).map_err(errors::transaction)?; + let hash = signed.hash(); - self.transaction_queue.write().import(signed.into()) - .map(|_| hash) - .map_err(errors::transaction) - }) - .map(Into::into) - } + self.transaction_queue + .write() + .import(signed.into()) + .map(|_| hash) + .map_err(errors::transaction) + }) + .map(Into::into) + } - fn submit_transaction(&self, raw: Bytes) -> Result { - self.send_raw_transaction(raw) - } + fn submit_transaction(&self, raw: Bytes) -> Result { + self.send_raw_transaction(raw) + } - fn call(&self, req: CallRequest, num: Option) -> BoxFuture { - Box::new(self.fetcher().proved_read_only_execution(req, num, self.transaction_queue.clone()).and_then(|res| { - match res { - Ok(exec) => Ok(exec.output.into()), - Err(e) => Err(errors::execution(e)), - } - })) - } + fn call(&self, req: CallRequest, num: Option) -> BoxFuture { + Box::new( + self.fetcher() + .proved_read_only_execution(req, num, self.transaction_queue.clone()) + .and_then(|res| match res { + Ok(exec) => Ok(exec.output.into()), + Err(e) => Err(errors::execution(e)), + }), + ) + } - fn estimate_gas(&self, req: CallRequest, num: Option) -> BoxFuture { - // TODO: binary chop for more accurate estimates. - Box::new(self.fetcher().proved_read_only_execution(req, num, self.transaction_queue.clone()).and_then(|res| { - match res { - Ok(exec) => Ok(exec.refunded + exec.gas_used), - Err(e) => Err(errors::execution(e)), - } - })) - } + fn estimate_gas(&self, req: CallRequest, num: Option) -> BoxFuture { + // TODO: binary chop for more accurate estimates. + Box::new( + self.fetcher() + .proved_read_only_execution(req, num, self.transaction_queue.clone()) + .and_then(|res| match res { + Ok(exec) => Ok(exec.refunded + exec.gas_used), + Err(e) => Err(errors::execution(e)), + }), + ) + } - fn transaction_by_hash(&self, hash: H256) -> BoxFuture> { - let in_txqueue = self.transaction_queue.read().get(&hash).is_some(); + fn transaction_by_hash(&self, hash: H256) -> BoxFuture> { + let in_txqueue = self.transaction_queue.read().get(&hash).is_some(); - // The transaction is in the `local txqueue` then fetch the latest state from the network and attempt - // to cull the transaction queue. - if in_txqueue { - // Note, this will block (relies on HTTP timeout) to make sure `cull` will finish to avoid having to call - // `eth_getTransactionByHash` more than once to ensure the `txqueue` is up to `date` when it is called - if let Err(e) = self.fetcher().light_cull(self.transaction_queue.clone()).wait() { - debug!(target: "cull", "failed because of: {:?}", e); - } - if let Some(tx) = self.transaction_queue.read().get(&hash) { - return Box::new(future::ok(Some(Transaction::from_pending( - tx.clone(), - )))); - } - } - Box::new(self.fetcher().transaction_by_hash(hash).map(|x| x.map(|(tx, _)| tx))) - } + // The transaction is in the `local txqueue` then fetch the latest state from the network and attempt + // to cull the transaction queue. + if in_txqueue { + // Note, this will block (relies on HTTP timeout) to make sure `cull` will finish to avoid having to call + // `eth_getTransactionByHash` more than once to ensure the `txqueue` is up to `date` when it is called + if let Err(e) = self + .fetcher() + .light_cull(self.transaction_queue.clone()) + .wait() + { + debug!(target: "cull", "failed because of: {:?}", e); + } + if let Some(tx) = self.transaction_queue.read().get(&hash) { + return Box::new(future::ok(Some(Transaction::from_pending(tx.clone())))); + } + } + Box::new( + self.fetcher() + .transaction_by_hash(hash) + .map(|x| x.map(|(tx, _)| tx)), + ) + } - fn transaction_by_block_hash_and_index(&self, hash: H256, idx: Index) -> BoxFuture> { - Box::new(self.fetcher().block(BlockId::Hash(hash)).map(move |block| { - light_fetch::extract_transaction_at_index(block, idx.value()) - })) - } + fn transaction_by_block_hash_and_index( + &self, + hash: H256, + idx: Index, + ) -> BoxFuture> { + Box::new( + self.fetcher() + .block(BlockId::Hash(hash)) + .map(move |block| light_fetch::extract_transaction_at_index(block, idx.value())), + ) + } - fn transaction_by_block_number_and_index(&self, num: BlockNumber, idx: Index) -> BoxFuture> { - Box::new(self.fetcher().block(num.to_block_id()).map(move |block| { - light_fetch::extract_transaction_at_index(block, idx.value()) - })) - } + fn transaction_by_block_number_and_index( + &self, + num: BlockNumber, + idx: Index, + ) -> BoxFuture> { + Box::new( + self.fetcher() + .block(num.to_block_id()) + .map(move |block| light_fetch::extract_transaction_at_index(block, idx.value())), + ) + } - fn transaction_receipt(&self, hash: H256) -> BoxFuture> { - let fetcher = self.fetcher(); - Box::new(fetcher.transaction_by_hash(hash).and_then(move |tx| { - // the block hash included in the transaction object here has - // already been checked for canonicality and whether it contains - // the transaction. - match tx { - Some((tx, index)) => match tx.block_hash { - Some(block_hash) => { - let extract_receipt = fetcher.receipts(BlockId::Hash(block_hash)) - .and_then(move |mut receipts| future::ok(receipts.swap_remove(index))) - .map(Receipt::from) - .map(move |mut receipt| { - receipt.transaction_hash = Some(hash); - receipt.transaction_index = Some(index.into()); - receipt.block_hash = Some(block_hash); - receipt.block_number = tx.block_number; - receipt - }) - .map(Some); + fn transaction_receipt(&self, hash: H256) -> BoxFuture> { + let fetcher = self.fetcher(); + Box::new(fetcher.transaction_by_hash(hash).and_then(move |tx| { + // the block hash included in the transaction object here has + // already been checked for canonicality and whether it contains + // the transaction. + match tx { + Some((tx, index)) => match tx.block_hash { + Some(block_hash) => { + let extract_receipt = fetcher + .receipts(BlockId::Hash(block_hash)) + .and_then(move |mut receipts| future::ok(receipts.swap_remove(index))) + .map(Receipt::from) + .map(move |mut receipt| { + receipt.transaction_hash = Some(hash); + receipt.transaction_index = Some(index.into()); + receipt.block_hash = Some(block_hash); + receipt.block_number = tx.block_number; + receipt + }) + .map(Some); - Either::B(extract_receipt) - } - None => Either::A(future::err(errors::unknown_block())), - }, - None => Either::A(future::ok(None)), - } - })) - } + Either::B(extract_receipt) + } + None => Either::A(future::err(errors::unknown_block())), + }, + None => Either::A(future::ok(None)), + } + })) + } - fn uncle_by_block_hash_and_index(&self, hash: H256, idx: Index) -> BoxFuture> { - let client = self.client.clone(); - Box::new(self.fetcher().block(BlockId::Hash(hash)).map(move |block| { - extract_uncle_at_index(block, idx, client) - })) - } + fn uncle_by_block_hash_and_index( + &self, + hash: H256, + idx: Index, + ) -> BoxFuture> { + let client = self.client.clone(); + Box::new( + self.fetcher() + .block(BlockId::Hash(hash)) + .map(move |block| extract_uncle_at_index(block, idx, client)), + ) + } - fn uncle_by_block_number_and_index(&self, num: BlockNumber, idx: Index) -> BoxFuture> { - let client = self.client.clone(); - Box::new(self.fetcher().block(num.to_block_id()).map(move |block| { - extract_uncle_at_index(block, idx, client) - })) - } + fn uncle_by_block_number_and_index( + &self, + num: BlockNumber, + idx: Index, + ) -> BoxFuture> { + let client = self.client.clone(); + Box::new( + self.fetcher() + .block(num.to_block_id()) + .map(move |block| extract_uncle_at_index(block, idx, client)), + ) + } - fn proof(&self, _address: H160, _values:Vec, _num: Option) -> BoxFuture { - Box::new(future::err(errors::unimplemented(None))) - } + fn proof( + &self, + _address: H160, + _values: Vec, + _num: Option, + ) -> BoxFuture { + Box::new(future::err(errors::unimplemented(None))) + } - fn compilers(&self) -> Result> { - Err(errors::deprecated("Compilation functionality is deprecated.".to_string())) - } + fn compilers(&self) -> Result> { + Err(errors::deprecated( + "Compilation functionality is deprecated.".to_string(), + )) + } - fn compile_lll(&self, _: String) -> Result { - Err(errors::deprecated("Compilation of LLL via RPC is deprecated".to_string())) - } + fn compile_lll(&self, _: String) -> Result { + Err(errors::deprecated( + "Compilation of LLL via RPC is deprecated".to_string(), + )) + } - fn compile_serpent(&self, _: String) -> Result { - Err(errors::deprecated("Compilation of Serpent via RPC is deprecated".to_string())) - } + fn compile_serpent(&self, _: String) -> Result { + Err(errors::deprecated( + "Compilation of Serpent via RPC is deprecated".to_string(), + )) + } - fn compile_solidity(&self, _: String) -> Result { - Err(errors::deprecated("Compilation of Solidity via RPC is deprecated".to_string())) - } + fn compile_solidity(&self, _: String) -> Result { + Err(errors::deprecated( + "Compilation of Solidity via RPC is deprecated".to_string(), + )) + } - fn logs(&self, filter: Filter) -> BoxFuture> { - let limit = filter.limit; + fn logs(&self, filter: Filter) -> BoxFuture> { + let limit = filter.limit; - Box::new( - Filterable::logs(self, match filter.try_into() { - Ok(value) => value, - Err(err) => return Box::new(future::err(err)), - }).map(move |logs| limit_logs(logs, limit))) - } + Box::new( + Filterable::logs( + self, + match filter.try_into() { + Ok(value) => value, + Err(err) => return Box::new(future::err(err)), + }, + ) + .map(move |logs| limit_logs(logs, limit)), + ) + } - fn work(&self, _timeout: Option) -> Result { - Err(errors::light_unimplemented(None)) - } + fn work(&self, _timeout: Option) -> Result { + Err(errors::light_unimplemented(None)) + } - fn submit_work(&self, _nonce: H64, _pow_hash: H256, _mix_hash: H256) -> Result { - Err(errors::light_unimplemented(None)) - } + fn submit_work(&self, _nonce: H64, _pow_hash: H256, _mix_hash: H256) -> Result { + Err(errors::light_unimplemented(None)) + } - fn submit_hashrate(&self, _rate: U256, _id: H256) -> Result { - Err(errors::light_unimplemented(None)) - } + fn submit_hashrate(&self, _rate: U256, _id: H256) -> Result { + Err(errors::light_unimplemented(None)) + } } // This trait implementation triggers a blanked impl of `EthFilter`. impl Filterable for EthClient where - C: LightChainClient + 'static, - S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, - OD: OnDemandRequester + 'static + C: LightChainClient + 'static, + S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, + OD: OnDemandRequester + 'static, { - fn best_block_number(&self) -> u64 { self.client.chain_info().best_block_number } + fn best_block_number(&self) -> u64 { + self.client.chain_info().best_block_number + } - fn block_hash(&self, id: BlockId) -> Option { - self.client.block_hash(id) - } + fn block_hash(&self, id: BlockId) -> Option { + self.client.block_hash(id) + } - fn pending_transaction_hashes(&self) -> BTreeSet { - BTreeSet::new() - } + fn pending_transaction_hashes(&self) -> BTreeSet { + BTreeSet::new() + } - fn logs(&self, filter: EthcoreFilter) -> BoxFuture> { - Box::new(self.fetcher().logs(filter)) as BoxFuture<_> - } + fn logs(&self, filter: EthcoreFilter) -> BoxFuture> { + Box::new(self.fetcher().logs(filter)) as BoxFuture<_> + } - fn pending_logs(&self, _block_number: u64, _filter: &EthcoreFilter) -> Vec { - Vec::new() // light clients don't mine. - } + fn pending_logs(&self, _block_number: u64, _filter: &EthcoreFilter) -> Vec { + Vec::new() // light clients don't mine. + } - fn polls(&self) -> &Mutex> { - &self.polls - } + fn polls(&self) -> &Mutex> { + &self.polls + } - fn removed_logs(&self, _block_hash: ::ethereum_types::H256, _filter: &EthcoreFilter) -> (Vec, u64) { - (Default::default(), 0) - } + fn removed_logs( + &self, + _block_hash: ::ethereum_types::H256, + _filter: &EthcoreFilter, + ) -> (Vec, u64) { + (Default::default(), 0) + } } -fn extract_uncle_at_index(block: encoded::Block, index: Index, client: Arc) -> Option { - let uncle = match block.uncles().into_iter().nth(index.value()) { - Some(u) => u, - None => return None, - }; +fn extract_uncle_at_index( + block: encoded::Block, + index: Index, + client: Arc, +) -> Option { + let uncle = match block.uncles().into_iter().nth(index.value()) { + Some(u) => u, + None => return None, + }; - let extra_info = client.engine().extra_info(&uncle); - Some(RichBlock { - inner: Block { - hash: Some(uncle.hash()), - size: None, - parent_hash: *uncle.parent_hash(), - uncles_hash: *uncle.uncles_hash(), - author: *uncle.author(), - miner: *uncle.author(), - state_root: *uncle.state_root(), - transactions_root: *uncle.transactions_root(), - number: Some(uncle.number().into()), - gas_used: *uncle.gas_used(), - gas_limit: *uncle.gas_limit(), - logs_bloom: Some(*uncle.log_bloom()), - timestamp: uncle.timestamp().into(), - difficulty: *uncle.difficulty(), - total_difficulty: None, - receipts_root: *uncle.receipts_root(), - extra_data: uncle.extra_data().clone().into(), - seal_fields: uncle.seal().iter().cloned().map(Into::into).collect(), - uncles: vec![], - transactions: BlockTransactions::Hashes(vec![]), - }, - extra_info, - }) + let extra_info = client.engine().extra_info(&uncle); + Some(RichBlock { + inner: Block { + hash: Some(uncle.hash()), + size: None, + parent_hash: *uncle.parent_hash(), + uncles_hash: *uncle.uncles_hash(), + author: *uncle.author(), + miner: *uncle.author(), + state_root: *uncle.state_root(), + transactions_root: *uncle.transactions_root(), + number: Some(uncle.number().into()), + gas_used: *uncle.gas_used(), + gas_limit: *uncle.gas_limit(), + logs_bloom: Some(*uncle.log_bloom()), + timestamp: uncle.timestamp().into(), + difficulty: *uncle.difficulty(), + total_difficulty: None, + receipts_root: *uncle.receipts_root(), + extra_data: uncle.extra_data().clone().into(), + seal_fields: uncle.seal().iter().cloned().map(Into::into).collect(), + uncles: vec![], + transactions: BlockTransactions::Hashes(vec![]), + }, + extra_info, + }) } diff --git a/rpc/src/v1/impls/light/mod.rs b/rpc/src/v1/impls/light/mod.rs index c15951458..c40d49f94 100644 --- a/rpc/src/v1/impls/light/mod.rs +++ b/rpc/src/v1/impls/light/mod.rs @@ -20,13 +20,12 @@ //! significantly generic to be reused. pub mod eth; +pub mod net; pub mod parity; pub mod parity_set; pub mod trace; -pub mod net; -pub use self::eth::EthClient; -pub use self::parity::ParityClient; -pub use self::parity_set::ParitySetClient; -pub use self::net::NetClient; -pub use self::trace::TracesClient; +pub use self::{ + eth::EthClient, net::NetClient, parity::ParityClient, parity_set::ParitySetClient, + trace::TracesClient, +}; diff --git a/rpc/src/v1/impls/light/net.rs b/rpc/src/v1/impls/light/net.rs index a9ab012e5..311f5c78c 100644 --- a/rpc/src/v1/impls/light/net.rs +++ b/rpc/src/v1/impls/light/net.rs @@ -15,35 +15,39 @@ // along with Parity Ethereum. If not, see . //! Net rpc implementation. -use std::sync::Arc; use jsonrpc_core::Result; +use std::sync::Arc; use sync::LightSyncProvider; use v1::traits::Net; /// Net rpc implementation. pub struct NetClient { - sync: Arc + sync: Arc, } -impl NetClient where S: LightSyncProvider { - /// Creates new NetClient. - pub fn new(sync: Arc) -> Self { - NetClient { - sync, - } - } +impl NetClient +where + S: LightSyncProvider, +{ + /// Creates new NetClient. + pub fn new(sync: Arc) -> Self { + NetClient { sync } + } } -impl Net for NetClient where S: LightSyncProvider { - fn version(&self) -> Result { - Ok(format!("{}", self.sync.network_id()).to_owned()) - } +impl Net for NetClient +where + S: LightSyncProvider, +{ + fn version(&self) -> Result { + Ok(format!("{}", self.sync.network_id()).to_owned()) + } - fn peer_count(&self) -> Result { - Ok(format!("0x{:x}", self.sync.peer_numbers().connected as u64).to_owned()) - } + fn peer_count(&self) -> Result { + Ok(format!("0x{:x}", self.sync.peer_numbers().connected as u64).to_owned()) + } - fn is_listening(&self) -> Result { - Ok(true) - } + fn is_listening(&self) -> Result { + Ok(true) + } } diff --git a/rpc/src/v1/impls/light/parity.rs b/rpc/src/v1/impls/light/parity.rs index 0486366de..fecd879f7 100644 --- a/rpc/src/v1/impls/light/parity.rs +++ b/rpc/src/v1/impls/light/parity.rs @@ -15,396 +15,450 @@ // along with Parity Ethereum. If not, see . //! Parity-specific rpc implementation. -use std::sync::Arc; -use std::collections::BTreeMap; +use std::{collections::BTreeMap, sync::Arc}; use version::version_data; use crypto::DEFAULT_MAC; +use ethcore_logger::RotatingLogger; +use ethereum_types::{H160, H256, H512, H64, U256, U64}; use ethkey::{crypto::ecies, Brain, Generator}; use ethstore::random_phrase; -use sync::{LightSyncInfo, LightSyncProvider, LightNetworkDispatcher, ManageNetwork}; +use sync::{LightNetworkDispatcher, LightSyncInfo, LightSyncProvider, ManageNetwork}; use updater::VersionInfo as UpdaterVersionInfo; -use ethereum_types::{H64, H160, H256, H512, U64, U256}; -use ethcore_logger::RotatingLogger; -use jsonrpc_core::{Result, BoxFuture}; -use jsonrpc_core::futures::{future, Future}; +use jsonrpc_core::{ + futures::{future, Future}, + BoxFuture, Result, +}; use light::on_demand::OnDemandRequester; -use v1::helpers::{self, errors, ipfs, NetworkSettings, verify_signature}; -use v1::helpers::external_signer::{SignerService, SigningQueue}; -use v1::helpers::dispatch::LightDispatcher; -use v1::helpers::light_fetch::{LightFetch, light_all_transactions}; -use v1::metadata::Metadata; -use v1::traits::Parity; -use v1::types::{ - Bytes, CallRequest, - Peers, Transaction, RpcSettings, Histogram, - TransactionStats, LocalTransactionStatus, - LightBlockNumber, ChainStatus, Receipt, - BlockNumber, ConsensusCapability, VersionInfo, - OperationsInfo, Header, RichHeader, RecoveredAccount, - Log, Filter, +use v1::{ + helpers::{ + self, + dispatch::LightDispatcher, + errors, + external_signer::{SignerService, SigningQueue}, + ipfs, + light_fetch::{light_all_transactions, LightFetch}, + verify_signature, NetworkSettings, + }, + metadata::Metadata, + traits::Parity, + types::{ + BlockNumber, Bytes, CallRequest, ChainStatus, ConsensusCapability, Filter, Header, + Histogram, LightBlockNumber, LocalTransactionStatus, Log, OperationsInfo, Peers, Receipt, + RecoveredAccount, RichHeader, RpcSettings, Transaction, TransactionStats, VersionInfo, + }, }; use Host; /// Parity implementation for light client. pub struct ParityClient where - S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, - OD: OnDemandRequester + 'static + S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, + OD: OnDemandRequester + 'static, { - light_dispatch: Arc>, - logger: Arc, - settings: Arc, - signer: Option>, - ws_address: Option, - gas_price_percentile: usize, + light_dispatch: Arc>, + logger: Arc, + settings: Arc, + signer: Option>, + ws_address: Option, + gas_price_percentile: usize, } impl ParityClient where - S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, - OD: OnDemandRequester + 'static + S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, + OD: OnDemandRequester + 'static, { - /// Creates new `ParityClient`. - pub fn new( - light_dispatch: Arc>, - logger: Arc, - settings: Arc, - signer: Option>, - ws_address: Option, - gas_price_percentile: usize, - ) -> Self { - ParityClient { - light_dispatch, - logger, - settings, - signer, - ws_address, - gas_price_percentile, - } - } + /// Creates new `ParityClient`. + pub fn new( + light_dispatch: Arc>, + logger: Arc, + settings: Arc, + signer: Option>, + ws_address: Option, + gas_price_percentile: usize, + ) -> Self { + ParityClient { + light_dispatch, + logger, + settings, + signer, + ws_address, + gas_price_percentile, + } + } - /// Create a light blockchain data fetcher. - fn fetcher(&self) -> LightFetch - { - LightFetch { - client: self.light_dispatch.client.clone(), - on_demand: self.light_dispatch.on_demand.clone(), - sync: self.light_dispatch.sync.clone(), - cache: self.light_dispatch.cache.clone(), - gas_price_percentile: self.gas_price_percentile, - } - } + /// Create a light blockchain data fetcher. + fn fetcher(&self) -> LightFetch { + LightFetch { + client: self.light_dispatch.client.clone(), + on_demand: self.light_dispatch.on_demand.clone(), + sync: self.light_dispatch.sync.clone(), + cache: self.light_dispatch.cache.clone(), + gas_price_percentile: self.gas_price_percentile, + } + } } impl Parity for ParityClient where - S: LightSyncInfo + LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, - OD: OnDemandRequester + 'static + S: LightSyncInfo + LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static, + OD: OnDemandRequester + 'static, { - type Metadata = Metadata; + type Metadata = Metadata; - fn transactions_limit(&self) -> Result { - Ok(usize::max_value()) - } + fn transactions_limit(&self) -> Result { + Ok(usize::max_value()) + } - fn min_gas_price(&self) -> Result { - Ok(U256::default()) - } + fn min_gas_price(&self) -> Result { + Ok(U256::default()) + } - fn extra_data(&self) -> Result { - Ok(Bytes::default()) - } + fn extra_data(&self) -> Result { + Ok(Bytes::default()) + } - fn gas_floor_target(&self) -> Result { - Ok(U256::default()) - } + fn gas_floor_target(&self) -> Result { + Ok(U256::default()) + } - fn gas_ceil_target(&self) -> Result { - Ok(U256::default()) - } + fn gas_ceil_target(&self) -> Result { + Ok(U256::default()) + } - fn dev_logs(&self) -> Result> { - let logs = self.logger.logs(); - Ok(logs.as_slice().to_owned()) - } + fn dev_logs(&self) -> Result> { + let logs = self.logger.logs(); + Ok(logs.as_slice().to_owned()) + } - fn dev_logs_levels(&self) -> Result { - Ok(self.logger.levels().to_owned()) - } + fn dev_logs_levels(&self) -> Result { + Ok(self.logger.levels().to_owned()) + } - fn net_chain(&self) -> Result { - Ok(self.settings.chain.clone()) - } + fn net_chain(&self) -> Result { + Ok(self.settings.chain.clone()) + } - fn net_peers(&self) -> Result { - let peers = self.light_dispatch.sync.peers().into_iter().map(Into::into).collect(); - let peer_numbers = self.light_dispatch.sync.peer_numbers(); + fn net_peers(&self) -> Result { + let peers = self + .light_dispatch + .sync + .peers() + .into_iter() + .map(Into::into) + .collect(); + let peer_numbers = self.light_dispatch.sync.peer_numbers(); - Ok(Peers { - active: peer_numbers.active, - connected: peer_numbers.connected, - max: peer_numbers.max as u32, - peers, - }) - } + Ok(Peers { + active: peer_numbers.active, + connected: peer_numbers.connected, + max: peer_numbers.max as u32, + peers, + }) + } - fn net_port(&self) -> Result { - Ok(self.settings.network_port) - } + fn net_port(&self) -> Result { + Ok(self.settings.network_port) + } - fn node_name(&self) -> Result { - Ok(self.settings.name.clone()) - } + fn node_name(&self) -> Result { + Ok(self.settings.name.clone()) + } - fn registry_address(&self) -> Result> { - let reg = self.light_dispatch.client.engine().params().registrar; - if reg == Default::default() { - Ok(None) - } else { - Ok(Some(reg)) - } - } + fn registry_address(&self) -> Result> { + let reg = self.light_dispatch.client.engine().params().registrar; + if reg == Default::default() { + Ok(None) + } else { + Ok(Some(reg)) + } + } - fn rpc_settings(&self) -> Result { - Ok(RpcSettings { - enabled: self.settings.rpc_enabled, - interface: self.settings.rpc_interface.clone(), - port: self.settings.rpc_port as u64, - }) - } + fn rpc_settings(&self) -> Result { + Ok(RpcSettings { + enabled: self.settings.rpc_enabled, + interface: self.settings.rpc_interface.clone(), + port: self.settings.rpc_port as u64, + }) + } - fn default_extra_data(&self) -> Result { - Ok(Bytes::new(version_data())) - } + fn default_extra_data(&self) -> Result { + Ok(Bytes::new(version_data())) + } - fn gas_price_histogram(&self) -> BoxFuture { - Box::new(self.light_dispatch.gas_price_corpus() - .and_then(|corpus| corpus.histogram(10).ok_or_else(errors::not_enough_data)) - .map(Into::into)) - } + fn gas_price_histogram(&self) -> BoxFuture { + Box::new( + self.light_dispatch + .gas_price_corpus() + .and_then(|corpus| corpus.histogram(10).ok_or_else(errors::not_enough_data)) + .map(Into::into), + ) + } - fn unsigned_transactions_count(&self) -> Result { - match self.signer { - None => Err(errors::signer_disabled()), - Some(ref signer) => Ok(signer.len()), - } - } + fn unsigned_transactions_count(&self) -> Result { + match self.signer { + None => Err(errors::signer_disabled()), + Some(ref signer) => Ok(signer.len()), + } + } - fn generate_secret_phrase(&self) -> Result { - Ok(random_phrase(12)) - } + fn generate_secret_phrase(&self) -> Result { + Ok(random_phrase(12)) + } - fn phrase_to_address(&self, phrase: String) -> Result { - Ok(Brain::new(phrase).generate().expect("Brain::generate always returns Ok; qed").address()) - } + fn phrase_to_address(&self, phrase: String) -> Result { + Ok(Brain::new(phrase) + .generate() + .expect("Brain::generate always returns Ok; qed") + .address()) + } - fn list_accounts(&self, _: u64, _: Option, _: Option) -> Result>> { - Err(errors::light_unimplemented(None)) - } + fn list_accounts( + &self, + _: u64, + _: Option, + _: Option, + ) -> Result>> { + Err(errors::light_unimplemented(None)) + } - fn list_storage_keys(&self, _: H160, _: u64, _: Option, _: Option) -> Result>> { - Err(errors::light_unimplemented(None)) - } + fn list_storage_keys( + &self, + _: H160, + _: u64, + _: Option, + _: Option, + ) -> Result>> { + Err(errors::light_unimplemented(None)) + } - fn encrypt_message(&self, key: H512, phrase: Bytes) -> Result { - ecies::encrypt(&key, &DEFAULT_MAC, &phrase.0) - .map_err(errors::encryption) - .map(Into::into) - } + fn encrypt_message(&self, key: H512, phrase: Bytes) -> Result { + ecies::encrypt(&key, &DEFAULT_MAC, &phrase.0) + .map_err(errors::encryption) + .map(Into::into) + } - fn pending_transactions(&self, limit: Option) -> Result> { - let txq = self.light_dispatch.transaction_queue.read(); - let chain_info = self.light_dispatch.client.chain_info(); - Ok( - txq.ready_transactions(chain_info.best_block_number, chain_info.best_block_timestamp) - .into_iter() - .take(limit.unwrap_or_else(usize::max_value)) - .map(Transaction::from_pending) - .collect::>() - ) - } + fn pending_transactions(&self, limit: Option) -> Result> { + let txq = self.light_dispatch.transaction_queue.read(); + let chain_info = self.light_dispatch.client.chain_info(); + Ok(txq + .ready_transactions( + chain_info.best_block_number, + chain_info.best_block_timestamp, + ) + .into_iter() + .take(limit.unwrap_or_else(usize::max_value)) + .map(Transaction::from_pending) + .collect::>()) + } - fn all_transactions(&self) -> Result> { - Ok( - light_all_transactions(&self.light_dispatch) - .map(Transaction::from_pending) - .collect() - ) - } + fn all_transactions(&self) -> Result> { + Ok(light_all_transactions(&self.light_dispatch) + .map(Transaction::from_pending) + .collect()) + } - fn all_transaction_hashes(&self) -> Result> { - Ok( - light_all_transactions(&self.light_dispatch) - .map(|tx| tx.transaction.hash()) - .collect() - ) - } + fn all_transaction_hashes(&self) -> Result> { + Ok(light_all_transactions(&self.light_dispatch) + .map(|tx| tx.transaction.hash()) + .collect()) + } - fn future_transactions(&self) -> Result> { - let txq = self.light_dispatch.transaction_queue.read(); - let chain_info = self.light_dispatch.client.chain_info(); - Ok( - txq.future_transactions(chain_info.best_block_number, chain_info.best_block_timestamp) - .into_iter() - .map(Transaction::from_pending) - .collect::>() - ) - } + fn future_transactions(&self) -> Result> { + let txq = self.light_dispatch.transaction_queue.read(); + let chain_info = self.light_dispatch.client.chain_info(); + Ok(txq + .future_transactions( + chain_info.best_block_number, + chain_info.best_block_timestamp, + ) + .into_iter() + .map(Transaction::from_pending) + .collect::>()) + } - fn pending_transactions_stats(&self) -> Result> { - let stats = self.light_dispatch.sync.transactions_stats(); - Ok(stats.into_iter() - .map(|(hash, stats)| (hash, stats.into())) - .collect() - ) - } + fn pending_transactions_stats(&self) -> Result> { + let stats = self.light_dispatch.sync.transactions_stats(); + Ok(stats + .into_iter() + .map(|(hash, stats)| (hash, stats.into())) + .collect()) + } - fn local_transactions(&self) -> Result> { - let mut map = BTreeMap::new(); - let chain_info = self.light_dispatch.client.chain_info(); - let (best_num, best_tm) = (chain_info.best_block_number, chain_info.best_block_timestamp); - let txq = self.light_dispatch.transaction_queue.read(); + fn local_transactions(&self) -> Result> { + let mut map = BTreeMap::new(); + let chain_info = self.light_dispatch.client.chain_info(); + let (best_num, best_tm) = ( + chain_info.best_block_number, + chain_info.best_block_timestamp, + ); + let txq = self.light_dispatch.transaction_queue.read(); - for pending in txq.ready_transactions(best_num, best_tm) { - map.insert(pending.hash(), LocalTransactionStatus::Pending); - } + for pending in txq.ready_transactions(best_num, best_tm) { + map.insert(pending.hash(), LocalTransactionStatus::Pending); + } - for future in txq.future_transactions(best_num, best_tm) { - map.insert(future.hash(), LocalTransactionStatus::Future); - } + for future in txq.future_transactions(best_num, best_tm) { + map.insert(future.hash(), LocalTransactionStatus::Future); + } - // TODO: other types? + // TODO: other types? - Ok(map) - } + Ok(map) + } - fn ws_url(&self) -> Result { - helpers::to_url(&self.ws_address) - .ok_or_else(errors::ws_disabled) - } + fn ws_url(&self) -> Result { + helpers::to_url(&self.ws_address).ok_or_else(errors::ws_disabled) + } - fn next_nonce(&self, address: H160) -> BoxFuture { - Box::new(self.light_dispatch.next_nonce(address)) - } + fn next_nonce(&self, address: H160) -> BoxFuture { + Box::new(self.light_dispatch.next_nonce(address)) + } - fn mode(&self) -> Result { - Err(errors::light_unimplemented(None)) - } + fn mode(&self) -> Result { + Err(errors::light_unimplemented(None)) + } - fn chain(&self) -> Result { - Ok(self.settings.chain.clone()) - } + fn chain(&self) -> Result { + Ok(self.settings.chain.clone()) + } - fn enode(&self) -> Result { - self.light_dispatch.sync.enode().ok_or_else(errors::network_disabled) - } + fn enode(&self) -> Result { + self.light_dispatch + .sync + .enode() + .ok_or_else(errors::network_disabled) + } - fn consensus_capability(&self) -> Result { - Err(errors::light_unimplemented(None)) - } + fn consensus_capability(&self) -> Result { + Err(errors::light_unimplemented(None)) + } - fn version_info(&self) -> Result { - Ok(UpdaterVersionInfo::this().into()) - } + fn version_info(&self) -> Result { + Ok(UpdaterVersionInfo::this().into()) + } - fn releases_info(&self) -> Result> { - Err(errors::light_unimplemented(None)) - } + fn releases_info(&self) -> Result> { + Err(errors::light_unimplemented(None)) + } - fn chain_status(&self) -> Result { - let chain_info = self.light_dispatch.client.chain_info(); + fn chain_status(&self) -> Result { + let chain_info = self.light_dispatch.client.chain_info(); - let gap = chain_info.ancient_block_number.map(|x| U256::from(x + 1)) - .and_then(|first| chain_info.first_block_number.map(|last| (first, U256::from(last)))); + let gap = chain_info + .ancient_block_number + .map(|x| U256::from(x + 1)) + .and_then(|first| { + chain_info + .first_block_number + .map(|last| (first, U256::from(last))) + }); - Ok(ChainStatus { - block_gap: gap, - }) - } + Ok(ChainStatus { block_gap: gap }) + } - fn node_kind(&self) -> Result<::v1::types::NodeKind> { - use ::v1::types::{NodeKind, Availability, Capability}; + fn node_kind(&self) -> Result<::v1::types::NodeKind> { + use v1::types::{Availability, Capability, NodeKind}; - Ok(NodeKind { - availability: Availability::Personal, - capability: Capability::Light, - }) - } + Ok(NodeKind { + availability: Availability::Personal, + capability: Capability::Light, + }) + } - fn block_header(&self, number: Option) -> BoxFuture { - use types::encoded; + fn block_header(&self, number: Option) -> BoxFuture { + use types::encoded; - let engine = self.light_dispatch.client.engine().clone(); - let from_encoded = move |encoded: encoded::Header| { - let header = encoded.decode().map_err(errors::decode)?; - let extra_info = engine.extra_info(&header); - Ok(RichHeader { - inner: Header { - hash: Some(header.hash()), - size: Some(encoded.rlp().as_raw().len().into()), - parent_hash: *header.parent_hash(), - uncles_hash: *header.uncles_hash(), - author: *header.author(), - miner: *header.author(), - state_root: *header.state_root(), - transactions_root: *header.transactions_root(), - receipts_root: *header.receipts_root(), - number: Some(header.number().into()), - gas_used: *header.gas_used(), - gas_limit: *header.gas_limit(), - logs_bloom: *header.log_bloom(), - timestamp: header.timestamp().into(), - difficulty: *header.difficulty(), - seal_fields: header.seal().iter().cloned().map(Into::into).collect(), - extra_data: Bytes::new(header.extra_data().clone()), - }, - extra_info, - }) - }; - let id = number.unwrap_or_default().to_block_id(); - Box::new(self.fetcher().header(id).and_then(from_encoded)) - } + let engine = self.light_dispatch.client.engine().clone(); + let from_encoded = move |encoded: encoded::Header| { + let header = encoded.decode().map_err(errors::decode)?; + let extra_info = engine.extra_info(&header); + Ok(RichHeader { + inner: Header { + hash: Some(header.hash()), + size: Some(encoded.rlp().as_raw().len().into()), + parent_hash: *header.parent_hash(), + uncles_hash: *header.uncles_hash(), + author: *header.author(), + miner: *header.author(), + state_root: *header.state_root(), + transactions_root: *header.transactions_root(), + receipts_root: *header.receipts_root(), + number: Some(header.number().into()), + gas_used: *header.gas_used(), + gas_limit: *header.gas_limit(), + logs_bloom: *header.log_bloom(), + timestamp: header.timestamp().into(), + difficulty: *header.difficulty(), + seal_fields: header.seal().iter().cloned().map(Into::into).collect(), + extra_data: Bytes::new(header.extra_data().clone()), + }, + extra_info, + }) + }; + let id = number.unwrap_or_default().to_block_id(); + Box::new(self.fetcher().header(id).and_then(from_encoded)) + } - fn block_receipts(&self, number: Option) -> BoxFuture> { - let id = number.unwrap_or_default().to_block_id(); - Box::new(self.fetcher().receipts(id).and_then(|receipts| Ok(receipts.into_iter().map(Into::into).collect()))) - } + fn block_receipts(&self, number: Option) -> BoxFuture> { + let id = number.unwrap_or_default().to_block_id(); + Box::new( + self.fetcher() + .receipts(id) + .and_then(|receipts| Ok(receipts.into_iter().map(Into::into).collect())), + ) + } - fn ipfs_cid(&self, content: Bytes) -> Result { - ipfs::cid(content) - } + fn ipfs_cid(&self, content: Bytes) -> Result { + ipfs::cid(content) + } - fn call(&self, _requests: Vec, _block: Option) -> Result> { - Err(errors::light_unimplemented(None)) - } + fn call(&self, _requests: Vec, _block: Option) -> Result> { + Err(errors::light_unimplemented(None)) + } - fn submit_work_detail(&self, _nonce: H64, _pow_hash: H256, _mix_hash: H256) -> Result { - Err(errors::light_unimplemented(None)) - } + fn submit_work_detail(&self, _nonce: H64, _pow_hash: H256, _mix_hash: H256) -> Result { + Err(errors::light_unimplemented(None)) + } - fn status(&self) -> Result<()> { - let has_peers = self.settings.is_dev_chain || self.light_dispatch.sync.peer_numbers().connected > 0; - let is_importing = (*self.light_dispatch.sync).is_major_importing(); + fn status(&self) -> Result<()> { + let has_peers = + self.settings.is_dev_chain || self.light_dispatch.sync.peer_numbers().connected > 0; + let is_importing = (*self.light_dispatch.sync).is_major_importing(); - if has_peers && !is_importing { - Ok(()) - } else { - Err(errors::status_error(has_peers)) - } - } + if has_peers && !is_importing { + Ok(()) + } else { + Err(errors::status_error(has_peers)) + } + } - fn logs_no_tx_hash(&self, filter: Filter) -> BoxFuture> { - let filter = match filter.try_into() { - Ok(value) => value, - Err(err) => return Box::new(future::err(err)), - }; - Box::new(self.fetcher().logs_no_tx_hash(filter)) as BoxFuture<_> - } + fn logs_no_tx_hash(&self, filter: Filter) -> BoxFuture> { + let filter = match filter.try_into() { + Ok(value) => value, + Err(err) => return Box::new(future::err(err)), + }; + Box::new(self.fetcher().logs_no_tx_hash(filter)) as BoxFuture<_> + } - fn verify_signature(&self, is_prefixed: bool, message: Bytes, r: H256, s: H256, v: U64) -> Result { - verify_signature(is_prefixed, message, r, s, v, self.light_dispatch.client.signing_chain_id()) - } + fn verify_signature( + &self, + is_prefixed: bool, + message: Bytes, + r: H256, + s: H256, + v: U64, + ) -> Result { + verify_signature( + is_prefixed, + message, + r, + s, + v, + self.light_dispatch.client.signing_chain_id(), + ) + } } diff --git a/rpc/src/v1/impls/light/parity_set.rs b/rpc/src/v1/impls/light/parity_set.rs index 68fc212b2..bdf13ead1 100644 --- a/rpc/src/v1/impls/light/parity_set.rs +++ b/rpc/src/v1/impls/light/parity_set.rs @@ -17,8 +17,7 @@ //! Parity-specific rpc interface for operations altering the settings. //! Implementation for light client. -use std::io; -use std::sync::Arc; +use std::{io, sync::Arc}; use ethereum_types::{H160, H256, U256}; use fetch::{self, Fetch}; @@ -26,127 +25,130 @@ use hash::keccak_buffer; use light::client::LightChainClient; use sync::ManageNetwork; -use jsonrpc_core::{Result, BoxFuture}; -use jsonrpc_core::futures::Future; -use v1::helpers::errors; -use v1::traits::ParitySet; -use v1::types::{Bytes, ReleaseInfo, Transaction}; +use jsonrpc_core::{futures::Future, BoxFuture, Result}; +use v1::{ + helpers::errors, + traits::ParitySet, + types::{Bytes, ReleaseInfo, Transaction}, +}; /// Parity-specific rpc interface for operations altering the settings. pub struct ParitySetClient { - client: Arc, - net: Arc, - fetch: F, + client: Arc, + net: Arc, + fetch: F, } impl ParitySetClient { - /// Creates new `ParitySetClient` with given `Fetch`. - pub fn new(client: Arc, net: Arc, fetch: F) -> Self { - ParitySetClient { - client, - net, - fetch, - } - } + /// Creates new `ParitySetClient` with given `Fetch`. + pub fn new(client: Arc, net: Arc, fetch: F) -> Self { + ParitySetClient { client, net, fetch } + } } impl ParitySet for ParitySetClient { - fn set_min_gas_price(&self, _gas_price: U256) -> Result { - Err(errors::light_unimplemented(None)) - } + fn set_min_gas_price(&self, _gas_price: U256) -> Result { + Err(errors::light_unimplemented(None)) + } - fn set_gas_floor_target(&self, _target: U256) -> Result { - Err(errors::light_unimplemented(None)) - } + fn set_gas_floor_target(&self, _target: U256) -> Result { + Err(errors::light_unimplemented(None)) + } - fn set_gas_ceil_target(&self, _target: U256) -> Result { - Err(errors::light_unimplemented(None)) - } + fn set_gas_ceil_target(&self, _target: U256) -> Result { + Err(errors::light_unimplemented(None)) + } - fn set_extra_data(&self, _extra_data: Bytes) -> Result { - Err(errors::light_unimplemented(None)) - } + fn set_extra_data(&self, _extra_data: Bytes) -> Result { + Err(errors::light_unimplemented(None)) + } - fn set_author(&self, _author: H160) -> Result { - Err(errors::light_unimplemented(None)) - } + fn set_author(&self, _author: H160) -> Result { + Err(errors::light_unimplemented(None)) + } - fn set_engine_signer_secret(&self, _secret: H256) -> Result { - Err(errors::light_unimplemented(None)) - } + fn set_engine_signer_secret(&self, _secret: H256) -> Result { + Err(errors::light_unimplemented(None)) + } - fn set_transactions_limit(&self, _limit: usize) -> Result { - Err(errors::light_unimplemented(None)) - } + fn set_transactions_limit(&self, _limit: usize) -> Result { + Err(errors::light_unimplemented(None)) + } - fn set_tx_gas_limit(&self, _limit: U256) -> Result { - Err(errors::light_unimplemented(None)) - } + fn set_tx_gas_limit(&self, _limit: U256) -> Result { + Err(errors::light_unimplemented(None)) + } - fn add_reserved_peer(&self, peer: String) -> Result { - match self.net.add_reserved_peer(peer) { - Ok(()) => Ok(true), - Err(e) => Err(errors::invalid_params("Peer address", e)), - } - } + fn add_reserved_peer(&self, peer: String) -> Result { + match self.net.add_reserved_peer(peer) { + Ok(()) => Ok(true), + Err(e) => Err(errors::invalid_params("Peer address", e)), + } + } - fn remove_reserved_peer(&self, peer: String) -> Result { - match self.net.remove_reserved_peer(peer) { - Ok(()) => Ok(true), - Err(e) => Err(errors::invalid_params("Peer address", e)), - } - } + fn remove_reserved_peer(&self, peer: String) -> Result { + match self.net.remove_reserved_peer(peer) { + Ok(()) => Ok(true), + Err(e) => Err(errors::invalid_params("Peer address", e)), + } + } - fn drop_non_reserved_peers(&self) -> Result { - self.net.deny_unreserved_peers(); - Ok(true) - } + fn drop_non_reserved_peers(&self) -> Result { + self.net.deny_unreserved_peers(); + Ok(true) + } - fn accept_non_reserved_peers(&self) -> Result { - self.net.accept_unreserved_peers(); - Ok(true) - } + fn accept_non_reserved_peers(&self) -> Result { + self.net.accept_unreserved_peers(); + Ok(true) + } - fn start_network(&self) -> Result { - self.net.start_network(); - Ok(true) - } + fn start_network(&self) -> Result { + self.net.start_network(); + Ok(true) + } - fn stop_network(&self) -> Result { - self.net.stop_network(); - Ok(true) - } + fn stop_network(&self) -> Result { + self.net.stop_network(); + Ok(true) + } - fn set_mode(&self, _mode: String) -> Result { - Err(errors::light_unimplemented(None)) - } + fn set_mode(&self, _mode: String) -> Result { + Err(errors::light_unimplemented(None)) + } - fn set_spec_name(&self, spec_name: String) -> Result { - self.client.set_spec_name(spec_name).map(|_| true).map_err(|()| errors::cannot_restart()) - } + fn set_spec_name(&self, spec_name: String) -> Result { + self.client + .set_spec_name(spec_name) + .map(|_| true) + .map_err(|()| errors::cannot_restart()) + } - fn hash_content(&self, url: String) -> BoxFuture { - let future = self.fetch.get(&url, Default::default()).then(move |result| { - result - .map_err(errors::fetch) - .and_then(move |response| { - let mut reader = io::BufReader::new(fetch::BodyReader::new(response)); - keccak_buffer(&mut reader).map_err(errors::fetch) - }) - .map(Into::into) - }); - Box::new(future) - } + fn hash_content(&self, url: String) -> BoxFuture { + let future = self + .fetch + .get(&url, Default::default()) + .then(move |result| { + result + .map_err(errors::fetch) + .and_then(move |response| { + let mut reader = io::BufReader::new(fetch::BodyReader::new(response)); + keccak_buffer(&mut reader).map_err(errors::fetch) + }) + .map(Into::into) + }); + Box::new(future) + } - fn upgrade_ready(&self) -> Result> { - Err(errors::light_unimplemented(None)) - } + fn upgrade_ready(&self) -> Result> { + Err(errors::light_unimplemented(None)) + } - fn execute_upgrade(&self) -> Result { - Err(errors::light_unimplemented(None)) - } + fn execute_upgrade(&self) -> Result { + Err(errors::light_unimplemented(None)) + } - fn remove_transaction(&self, _hash: H256) -> Result> { - Err(errors::light_unimplemented(None)) - } + fn remove_transaction(&self, _hash: H256) -> Result> { + Err(errors::light_unimplemented(None)) + } } diff --git a/rpc/src/v1/impls/light/trace.rs b/rpc/src/v1/impls/light/trace.rs index a560f980e..072827881 100644 --- a/rpc/src/v1/impls/light/trace.rs +++ b/rpc/src/v1/impls/light/trace.rs @@ -18,52 +18,82 @@ use ethereum_types::H256; use jsonrpc_core::Result; -use v1::Metadata; -use v1::traits::Traces; -use v1::helpers::errors; -use v1::types::{TraceFilter, LocalizedTrace, BlockNumber, Index, CallRequest, Bytes, TraceResults, - TraceResultsWithTransactionHash, TraceOptions}; +use v1::{ + helpers::errors, + traits::Traces, + types::{ + BlockNumber, Bytes, CallRequest, Index, LocalizedTrace, TraceFilter, TraceOptions, + TraceResults, TraceResultsWithTransactionHash, + }, + Metadata, +}; /// Traces api implementation. // TODO: all calling APIs should be possible w. proved remote TX execution. pub struct TracesClient; impl Traces for TracesClient { - type Metadata = Metadata; + type Metadata = Metadata; - fn filter(&self, _filter: TraceFilter) -> Result>> { - Err(errors::light_unimplemented(None)) - } + fn filter(&self, _filter: TraceFilter) -> Result>> { + Err(errors::light_unimplemented(None)) + } - fn block_traces(&self, _block_number: BlockNumber) -> Result>> { - Err(errors::light_unimplemented(None)) - } + fn block_traces(&self, _block_number: BlockNumber) -> Result>> { + Err(errors::light_unimplemented(None)) + } - fn transaction_traces(&self, _transaction_hash: H256) -> Result>> { - Err(errors::light_unimplemented(None)) - } + fn transaction_traces(&self, _transaction_hash: H256) -> Result>> { + Err(errors::light_unimplemented(None)) + } - fn trace(&self, _transaction_hash: H256, _address: Vec) -> Result> { - Err(errors::light_unimplemented(None)) - } + fn trace( + &self, + _transaction_hash: H256, + _address: Vec, + ) -> Result> { + Err(errors::light_unimplemented(None)) + } - fn call(&self, _request: CallRequest, _flags: TraceOptions, _block: Option) -> Result { - Err(errors::light_unimplemented(None)) - } + fn call( + &self, + _request: CallRequest, + _flags: TraceOptions, + _block: Option, + ) -> Result { + Err(errors::light_unimplemented(None)) + } - fn call_many(&self, _request: Vec<(CallRequest, TraceOptions)>, _block: Option) -> Result> { - Err(errors::light_unimplemented(None)) - } + fn call_many( + &self, + _request: Vec<(CallRequest, TraceOptions)>, + _block: Option, + ) -> Result> { + Err(errors::light_unimplemented(None)) + } - fn raw_transaction(&self, _raw_transaction: Bytes, _flags: TraceOptions, _block: Option) -> Result { - Err(errors::light_unimplemented(None)) - } + fn raw_transaction( + &self, + _raw_transaction: Bytes, + _flags: TraceOptions, + _block: Option, + ) -> Result { + Err(errors::light_unimplemented(None)) + } - fn replay_transaction(&self, _transaction_hash: H256, _flags: TraceOptions) -> Result { - Err(errors::light_unimplemented(None)) - } + fn replay_transaction( + &self, + _transaction_hash: H256, + _flags: TraceOptions, + ) -> Result { + Err(errors::light_unimplemented(None)) + } - fn replay_block_transactions(&self, _block_number: BlockNumber, _flags: TraceOptions) -> Result> { - Err(errors::light_unimplemented(None)) - } + fn replay_block_transactions( + &self, + _block_number: BlockNumber, + _flags: TraceOptions, + ) -> Result> { + Err(errors::light_unimplemented(None)) + } } diff --git a/rpc/src/v1/impls/mod.rs b/rpc/src/v1/impls/mod.rs index ba1cc100e..09c481a17 100644 --- a/rpc/src/v1/impls/mod.rs +++ b/rpc/src/v1/impls/mod.rs @@ -40,26 +40,28 @@ mod web3; pub mod light; -pub use self::debug::DebugClient; -pub use self::eth::{EthClient, EthClientOptions}; -pub use self::eth_filter::EthFilterClient; -pub use self::eth_pubsub::EthPubSubClient; -pub use self::net::NetClient; -pub use self::parity::ParityClient; #[cfg(any(test, feature = "accounts"))] pub use self::parity_accounts::ParityAccountsClient; -pub use self::parity_set::ParitySetClient; #[cfg(any(test, feature = "accounts"))] pub use self::parity_set::accounts::ParitySetAccountsClient; #[cfg(any(test, feature = "accounts"))] pub use self::personal::PersonalClient; -pub use self::private::PrivateClient; -pub use self::pubsub::PubSubClient; -pub use self::rpc::RpcClient; #[cfg(any(test, feature = "accounts"))] pub use self::secretstore::SecretStoreClient; -pub use self::signer::SignerClient; -pub use self::signing::SigningQueueClient; -pub use self::signing_unsafe::SigningUnsafeClient; -pub use self::traces::TracesClient; -pub use self::web3::Web3Client; +pub use self::{ + debug::DebugClient, + eth::{EthClient, EthClientOptions}, + eth_filter::EthFilterClient, + eth_pubsub::EthPubSubClient, + net::NetClient, + parity::ParityClient, + parity_set::ParitySetClient, + private::PrivateClient, + pubsub::PubSubClient, + rpc::RpcClient, + signer::SignerClient, + signing::SigningQueueClient, + signing_unsafe::SigningUnsafeClient, + traces::TracesClient, + web3::Web3Client, +}; diff --git a/rpc/src/v1/impls/net.rs b/rpc/src/v1/impls/net.rs index 188d67cd5..3307ac9c9 100644 --- a/rpc/src/v1/impls/net.rs +++ b/rpc/src/v1/impls/net.rs @@ -15,45 +15,50 @@ // along with Parity Ethereum. If not, see . //! Net rpc implementation. -use std::sync::Arc; use jsonrpc_core::Result; +use std::sync::Arc; use sync::SyncProvider; use v1::traits::Net; /// Net rpc implementation. pub struct NetClient { - sync: Arc, - /// Cached `network_id`. - /// - /// We cache it to avoid redundant aquire of sync read lock. - /// https://github.com/paritytech/parity-ethereum/issues/8746 - network_id: u64, + sync: Arc, + /// Cached `network_id`. + /// + /// We cache it to avoid redundant aquire of sync read lock. + /// https://github.com/paritytech/parity-ethereum/issues/8746 + network_id: u64, } -impl NetClient where S: SyncProvider { - /// Creates new NetClient. - pub fn new(sync: &Arc) -> Self { - NetClient { - sync: sync.clone(), - network_id: sync.status().network_id, - } - } +impl NetClient +where + S: SyncProvider, +{ + /// Creates new NetClient. + pub fn new(sync: &Arc) -> Self { + NetClient { + sync: sync.clone(), + network_id: sync.status().network_id, + } + } } -impl Net for NetClient where S: SyncProvider + 'static { - fn version(&self) -> Result { - Ok(format!("{}", self.network_id)) - } +impl Net for NetClient +where + S: SyncProvider + 'static, +{ + fn version(&self) -> Result { + Ok(format!("{}", self.network_id)) + } - fn peer_count(&self) -> Result { - Ok(format!("{:#x}", self.sync.status().num_peers as u64)) - } - - fn is_listening(&self) -> Result { - // right now (11 march 2016), we are always listening for incoming connections - // - // (this may not be true now -- 26 september 2016) - Ok(true) - } + fn peer_count(&self) -> Result { + Ok(format!("{:#x}", self.sync.status().num_peers as u64)) + } + fn is_listening(&self) -> Result { + // right now (11 march 2016), we are always listening for incoming connections + // + // (this may not be true now -- 26 september 2016) + Ok(true) + } } diff --git a/rpc/src/v1/impls/parity.rs b/rpc/src/v1/impls/parity.rs index 27a703795..48f3c8ad9 100644 --- a/rpc/src/v1/impls/parity.rs +++ b/rpc/src/v1/impls/parity.rs @@ -15,453 +15,505 @@ // along with Parity Ethereum. If not, see . //! Parity-specific rpc implementation. -use std::sync::Arc; -use std::str::FromStr; -use std::collections::BTreeMap; +use std::{collections::BTreeMap, str::FromStr, sync::Arc}; use crypto::DEFAULT_MAC; -use ethereum_types::{Address, H64, H160, H256, H512, U64, U256}; -use ethcore::client::{BlockChainClient, StateClient, Call}; -use ethcore::miner::{self, MinerService}; -use ethcore::snapshot::{SnapshotService, RestorationStatus}; -use ethcore::state::StateInfo; +use ethcore::{ + client::{BlockChainClient, Call, StateClient}, + miner::{self, MinerService}, + snapshot::{RestorationStatus, SnapshotService}, + state::StateInfo, +}; use ethcore_logger::RotatingLogger; +use ethereum_types::{Address, H160, H256, H512, H64, U256, U64}; use ethkey::{crypto::ecies, Brain, Generator}; use ethstore::random_phrase; -use jsonrpc_core::futures::future; -use jsonrpc_core::{BoxFuture, Result}; -use sync::{SyncProvider, ManageNetwork}; +use jsonrpc_core::{futures::future, BoxFuture, Result}; +use sync::{ManageNetwork, SyncProvider}; use types::ids::BlockId; -use updater::{Service as UpdateService}; +use updater::Service as UpdateService; use version::version_data; -use v1::helpers::block_import::is_major_importing; -use v1::helpers::{self, errors, fake_sign, ipfs, NetworkSettings, verify_signature}; -use v1::helpers::external_signer::{SigningQueue, SignerService}; -use v1::metadata::Metadata; -use v1::traits::Parity; -use v1::types::{ - Bytes, CallRequest, - Peers, Transaction, RpcSettings, Histogram, - TransactionStats, LocalTransactionStatus, - BlockNumber, ConsensusCapability, VersionInfo, - OperationsInfo, ChainStatus, Log, Filter, - RichHeader, Receipt, RecoveredAccount, - block_number_to_id +use v1::{ + helpers::{ + self, + block_import::is_major_importing, + errors, + external_signer::{SignerService, SigningQueue}, + fake_sign, ipfs, verify_signature, NetworkSettings, + }, + metadata::Metadata, + traits::Parity, + types::{ + block_number_to_id, BlockNumber, Bytes, CallRequest, ChainStatus, ConsensusCapability, + Filter, Histogram, LocalTransactionStatus, Log, OperationsInfo, Peers, Receipt, + RecoveredAccount, RichHeader, RpcSettings, Transaction, TransactionStats, VersionInfo, + }, }; use Host; /// Parity implementation. pub struct ParityClient { - client: Arc, - miner: Arc, - updater: Arc, - sync: Arc, - net: Arc, - logger: Arc, - settings: Arc, - signer: Option>, - ws_address: Option, - snapshot: Option>, + client: Arc, + miner: Arc, + updater: Arc, + sync: Arc, + net: Arc, + logger: Arc, + settings: Arc, + signer: Option>, + ws_address: Option, + snapshot: Option>, } -impl ParityClient where - C: BlockChainClient, +impl ParityClient +where + C: BlockChainClient, { - /// Creates new `ParityClient`. - pub fn new( - client: Arc, - miner: Arc, - sync: Arc, - updater: Arc, - net: Arc, - logger: Arc, - settings: Arc, - signer: Option>, - ws_address: Option, - snapshot: Option>, - ) -> Self { - ParityClient { - client, - miner, - sync, - updater, - net, - logger, - settings, - signer, - ws_address, - snapshot, - } - } + /// Creates new `ParityClient`. + pub fn new( + client: Arc, + miner: Arc, + sync: Arc, + updater: Arc, + net: Arc, + logger: Arc, + settings: Arc, + signer: Option>, + ws_address: Option, + snapshot: Option>, + ) -> Self { + ParityClient { + client, + miner, + sync, + updater, + net, + logger, + settings, + signer, + ws_address, + snapshot, + } + } } -impl Parity for ParityClient where - S: StateInfo + 'static, - C: miner::BlockChainClient + BlockChainClient + StateClient + Call + 'static, - M: MinerService + 'static, - U: UpdateService + 'static, +impl Parity for ParityClient +where + S: StateInfo + 'static, + C: miner::BlockChainClient + + BlockChainClient + + StateClient + + Call + + 'static, + M: MinerService + 'static, + U: UpdateService + 'static, { - type Metadata = Metadata; - - fn transactions_limit(&self) -> Result { - Ok(self.miner.queue_status().limits.max_count) - } - - fn min_gas_price(&self) -> Result { - Ok(self.miner.queue_status().options.minimal_gas_price) - } - - fn extra_data(&self) -> Result { - Ok(Bytes::new(self.miner.authoring_params().extra_data)) - } - - fn gas_floor_target(&self) -> Result { - Ok(self.miner.authoring_params().gas_range_target.0) - } - - fn gas_ceil_target(&self) -> Result { - Ok(self.miner.authoring_params().gas_range_target.1) - } - - fn dev_logs(&self) -> Result> { - warn!("This method is deprecated and will be removed in future. See PR #10102"); - let logs = self.logger.logs(); - Ok(logs.as_slice().to_owned()) - } - - fn dev_logs_levels(&self) -> Result { - Ok(self.logger.levels().to_owned()) - } - - fn net_chain(&self) -> Result { - Ok(self.settings.chain.clone()) - } - - fn chain(&self) -> Result { - Ok(self.client.spec_name()) - } - - fn net_peers(&self) -> Result { - let sync_status = self.sync.status(); - let num_peers_range = self.net.num_peers_range(); - debug_assert!(num_peers_range.end() >= num_peers_range.start()); - let peers = self.sync.peers().into_iter().map(Into::into).collect(); - - Ok(Peers { - active: sync_status.num_active_peers, - connected: sync_status.num_peers, - max: sync_status.current_max_peers(*num_peers_range.start(), *num_peers_range.end()), - peers, - }) - } - - fn net_port(&self) -> Result { - Ok(self.settings.network_port) - } - - fn node_name(&self) -> Result { - Ok(self.settings.name.clone()) - } - - fn registry_address(&self) -> Result> { - Ok( - self.client - .additional_params() - .get("registrar") - .and_then(|s| Address::from_str(s).ok()) - ) - } - - fn rpc_settings(&self) -> Result { - Ok(RpcSettings { - enabled: self.settings.rpc_enabled, - interface: self.settings.rpc_interface.clone(), - port: self.settings.rpc_port as u64, - }) - } - - fn default_extra_data(&self) -> Result { - Ok(Bytes::new(version_data())) - } - - fn gas_price_histogram(&self) -> BoxFuture { - Box::new(future::done(self.client - .gas_price_corpus(100) - .histogram(10) - .ok_or_else(errors::not_enough_data) - .map(Into::into) - )) - } - - fn unsigned_transactions_count(&self) -> Result { - match self.signer { - None => Err(errors::signer_disabled()), - Some(ref signer) => Ok(signer.len()), - } - } - - fn generate_secret_phrase(&self) -> Result { - Ok(random_phrase(12)) - } - - fn phrase_to_address(&self, phrase: String) -> Result { - Ok(Brain::new(phrase).generate().expect("Brain::generate always returns Ok; qed").address()) - } - - fn list_accounts(&self, count: u64, after: Option, block_number: Option) -> Result>> { - let number = match block_number.unwrap_or_default() { - BlockNumber::Pending => { - warn!("BlockNumber::Pending is unsupported"); - return Ok(None); - }, - - num => block_number_to_id(num) - }; - - Ok(self.client - .list_accounts(number, after.map(Into::into).as_ref(), count) - .map(|a| a.into_iter().map(Into::into).collect())) - } - - fn list_storage_keys(&self, address: H160, count: u64, after: Option, block_number: Option) -> Result>> { - let number = match block_number.unwrap_or_default() { - BlockNumber::Pending => { - warn!("BlockNumber::Pending is unsupported"); - return Ok(None); - }, - - num => block_number_to_id(num) - }; - - Ok(self.client - .list_storage(number, &address, after.map(Into::into).as_ref(), count) - .map(|a| a.into_iter().map(Into::into).collect())) - } - - fn encrypt_message(&self, key: H512, phrase: Bytes) -> Result { - ecies::encrypt(&key, &DEFAULT_MAC, &phrase.0) - .map_err(errors::encryption) - .map(Into::into) - } - - fn pending_transactions(&self, limit: Option) -> Result> { - let ready_transactions = self.miner.ready_transactions( - &*self.client, - limit.unwrap_or_else(usize::max_value), - miner::PendingOrdering::Priority, - ); - - Ok(ready_transactions - .into_iter() - .map(|t| Transaction::from_pending(t.pending().clone())) - .collect() - ) - } - - fn all_transactions(&self) -> Result> { - let all_transactions = self.miner.queued_transactions(); - - Ok(all_transactions - .into_iter() - .map(|t| Transaction::from_pending(t.pending().clone())) - .collect() - ) - } - - fn all_transaction_hashes(&self) -> Result> { - Ok(self.miner.queued_transaction_hashes()) - } - - fn future_transactions(&self) -> Result> { - Err(errors::deprecated("Use `parity_allTransaction` instead.")) - } - - fn pending_transactions_stats(&self) -> Result> { - let stats = self.sync.transactions_stats(); - Ok(stats.into_iter() - .map(|(hash, stats)| (hash, stats.into())) - .collect() - ) - } - - fn local_transactions(&self) -> Result> { - let transactions = self.miner.local_transactions(); - Ok(transactions - .into_iter() - .map(|(hash, status)| (hash, LocalTransactionStatus::from(status))) - .collect() - ) - } - - fn ws_url(&self) -> Result { - helpers::to_url(&self.ws_address) - .ok_or_else(errors::ws_disabled) - } - - fn next_nonce(&self, address: H160) -> BoxFuture { - Box::new(future::ok(self.miner.next_nonce(&*self.client, &address))) - } - - fn mode(&self) -> Result { - Ok(self.client.mode().to_string()) - } - - fn enode(&self) -> Result { - self.sync.enode().ok_or_else(errors::network_disabled) - } - - fn consensus_capability(&self) -> Result { - Ok(self.updater.capability().into()) - } - - fn version_info(&self) -> Result { - Ok(self.updater.version_info().into()) - } - - fn releases_info(&self) -> Result> { - Ok(self.updater.info().map(Into::into)) - } - - fn chain_status(&self) -> Result { - let chain_info = self.client.chain_info(); - - let gap = chain_info.ancient_block_number.map(|x| U256::from(x + 1)) - .and_then(|first| chain_info.first_block_number.map(|last| (first, U256::from(last)))); - - Ok(ChainStatus { - block_gap: gap, - }) - } - - fn node_kind(&self) -> Result<::v1::types::NodeKind> { - use ::v1::types::{NodeKind, Availability, Capability}; - - Ok(NodeKind { - availability: Availability::Personal, - capability: Capability::Full, - }) - } - - fn block_header(&self, number: Option) -> BoxFuture { - const EXTRA_INFO_PROOF: &str = "Object exists in blockchain (fetched earlier), extra_info is always available if object exists; qed"; - let number = number.unwrap_or_default(); - - let (header, extra) = if number == BlockNumber::Pending { - let info = self.client.chain_info(); - let header = - try_bf!(self.miner.pending_block_header(info.best_block_number).ok_or_else(errors::unknown_block)); - - (header.encoded(), None) - } else { - let id = match number { - BlockNumber::Num(num) => BlockId::Number(num), - BlockNumber::Earliest => BlockId::Earliest, - BlockNumber::Latest => BlockId::Latest, - BlockNumber::Pending => unreachable!(), // Already covered - }; - - let header = try_bf!(self.client.block_header(id).ok_or_else(errors::unknown_block)); - let info = self.client.block_extra_info(id).expect(EXTRA_INFO_PROOF); - - (header, Some(info)) - }; - - Box::new(future::ok(RichHeader { - inner: header.into(), - extra_info: extra.unwrap_or_default(), - })) - } - - fn block_receipts(&self, number: Option) -> BoxFuture> { - let number = number.unwrap_or_default(); - - let id = match number { - BlockNumber::Pending => { - let info = self.client.chain_info(); - let receipts = try_bf!(self.miner.pending_receipts(info.best_block_number).ok_or_else(errors::unknown_block)); - return Box::new(future::ok(receipts - .into_iter() - .map(Into::into) - .collect() - )) - }, - BlockNumber::Num(num) => BlockId::Number(num), - BlockNumber::Earliest => BlockId::Earliest, - BlockNumber::Latest => BlockId::Latest, - }; - let receipts = try_bf!(self.client.localized_block_receipts(id).ok_or_else(errors::unknown_block)); - Box::new(future::ok(receipts.into_iter().map(Into::into).collect())) - } - - fn ipfs_cid(&self, content: Bytes) -> Result { - ipfs::cid(content) - } - - fn call(&self, requests: Vec, num: Option) -> Result> { - let requests = requests - .into_iter() - .map(|request| Ok(( - fake_sign::sign_call(request.into())?, - Default::default() - ))) - .collect::>>()?; - - let num = num.unwrap_or_default(); - - let (mut state, header) = if num == BlockNumber::Pending { - let info = self.client.chain_info(); - let state = self.miner.pending_state(info.best_block_number).ok_or_else(errors::state_pruned)?; - let header = self.miner.pending_block_header(info.best_block_number).ok_or_else(errors::state_pruned)?; - - (state, header) - } else { - let id = match num { - BlockNumber::Num(num) => BlockId::Number(num), - BlockNumber::Earliest => BlockId::Earliest, - BlockNumber::Latest => BlockId::Latest, - BlockNumber::Pending => unreachable!(), // Already covered - }; - - let state = self.client.state_at(id).ok_or_else(errors::state_pruned)?; - let header = self.client.block_header(id).ok_or_else(errors::state_pruned)?.decode().map_err(errors::decode)?; - - (state, header) - }; - - self.client.call_many(&requests, &mut state, &header) - .map(|res| res.into_iter().map(|res| res.output.into()).collect()) - .map_err(errors::call) - } - - fn submit_work_detail(&self, nonce: H64, pow_hash: H256, mix_hash: H256) -> Result { - helpers::submit_work_detail(&self.client, &self.miner, nonce, pow_hash, mix_hash) - } - - fn status(&self) -> Result<()> { - let has_peers = self.settings.is_dev_chain || self.sync.status().num_peers > 0; - let is_warping = match self.snapshot.as_ref().map(|s| s.status()) { - Some(RestorationStatus::Ongoing { .. }) => true, - _ => false, - }; - let is_not_syncing = - !is_warping && - !is_major_importing(Some(self.sync.status().state), self.client.queue_info()); - - if has_peers && is_not_syncing { - Ok(()) - } else { - Err(errors::status_error(has_peers)) - } - } - - fn logs_no_tx_hash(&self, filter: Filter) -> BoxFuture> { - use v1::impls::eth::base_logs; - // only specific impl for lightclient - base_logs(&*self.client, &*self.miner, filter) - } - - fn verify_signature(&self, is_prefixed: bool, message: Bytes, r: H256, s: H256, v: U64) -> Result { - verify_signature(is_prefixed, message, r, s, v, self.client.signing_chain_id()) - } + type Metadata = Metadata; + + fn transactions_limit(&self) -> Result { + Ok(self.miner.queue_status().limits.max_count) + } + + fn min_gas_price(&self) -> Result { + Ok(self.miner.queue_status().options.minimal_gas_price) + } + + fn extra_data(&self) -> Result { + Ok(Bytes::new(self.miner.authoring_params().extra_data)) + } + + fn gas_floor_target(&self) -> Result { + Ok(self.miner.authoring_params().gas_range_target.0) + } + + fn gas_ceil_target(&self) -> Result { + Ok(self.miner.authoring_params().gas_range_target.1) + } + + fn dev_logs(&self) -> Result> { + warn!("This method is deprecated and will be removed in future. See PR #10102"); + let logs = self.logger.logs(); + Ok(logs.as_slice().to_owned()) + } + + fn dev_logs_levels(&self) -> Result { + Ok(self.logger.levels().to_owned()) + } + + fn net_chain(&self) -> Result { + Ok(self.settings.chain.clone()) + } + + fn chain(&self) -> Result { + Ok(self.client.spec_name()) + } + + fn net_peers(&self) -> Result { + let sync_status = self.sync.status(); + let num_peers_range = self.net.num_peers_range(); + debug_assert!(num_peers_range.end() >= num_peers_range.start()); + let peers = self.sync.peers().into_iter().map(Into::into).collect(); + + Ok(Peers { + active: sync_status.num_active_peers, + connected: sync_status.num_peers, + max: sync_status.current_max_peers(*num_peers_range.start(), *num_peers_range.end()), + peers, + }) + } + + fn net_port(&self) -> Result { + Ok(self.settings.network_port) + } + + fn node_name(&self) -> Result { + Ok(self.settings.name.clone()) + } + + fn registry_address(&self) -> Result> { + Ok(self + .client + .additional_params() + .get("registrar") + .and_then(|s| Address::from_str(s).ok())) + } + + fn rpc_settings(&self) -> Result { + Ok(RpcSettings { + enabled: self.settings.rpc_enabled, + interface: self.settings.rpc_interface.clone(), + port: self.settings.rpc_port as u64, + }) + } + + fn default_extra_data(&self) -> Result { + Ok(Bytes::new(version_data())) + } + + fn gas_price_histogram(&self) -> BoxFuture { + Box::new(future::done( + self.client + .gas_price_corpus(100) + .histogram(10) + .ok_or_else(errors::not_enough_data) + .map(Into::into), + )) + } + + fn unsigned_transactions_count(&self) -> Result { + match self.signer { + None => Err(errors::signer_disabled()), + Some(ref signer) => Ok(signer.len()), + } + } + + fn generate_secret_phrase(&self) -> Result { + Ok(random_phrase(12)) + } + + fn phrase_to_address(&self, phrase: String) -> Result { + Ok(Brain::new(phrase) + .generate() + .expect("Brain::generate always returns Ok; qed") + .address()) + } + + fn list_accounts( + &self, + count: u64, + after: Option, + block_number: Option, + ) -> Result>> { + let number = match block_number.unwrap_or_default() { + BlockNumber::Pending => { + warn!("BlockNumber::Pending is unsupported"); + return Ok(None); + } + + num => block_number_to_id(num), + }; + + Ok(self + .client + .list_accounts(number, after.map(Into::into).as_ref(), count) + .map(|a| a.into_iter().map(Into::into).collect())) + } + + fn list_storage_keys( + &self, + address: H160, + count: u64, + after: Option, + block_number: Option, + ) -> Result>> { + let number = match block_number.unwrap_or_default() { + BlockNumber::Pending => { + warn!("BlockNumber::Pending is unsupported"); + return Ok(None); + } + + num => block_number_to_id(num), + }; + + Ok(self + .client + .list_storage(number, &address, after.map(Into::into).as_ref(), count) + .map(|a| a.into_iter().map(Into::into).collect())) + } + + fn encrypt_message(&self, key: H512, phrase: Bytes) -> Result { + ecies::encrypt(&key, &DEFAULT_MAC, &phrase.0) + .map_err(errors::encryption) + .map(Into::into) + } + + fn pending_transactions(&self, limit: Option) -> Result> { + let ready_transactions = self.miner.ready_transactions( + &*self.client, + limit.unwrap_or_else(usize::max_value), + miner::PendingOrdering::Priority, + ); + + Ok(ready_transactions + .into_iter() + .map(|t| Transaction::from_pending(t.pending().clone())) + .collect()) + } + + fn all_transactions(&self) -> Result> { + let all_transactions = self.miner.queued_transactions(); + + Ok(all_transactions + .into_iter() + .map(|t| Transaction::from_pending(t.pending().clone())) + .collect()) + } + + fn all_transaction_hashes(&self) -> Result> { + Ok(self.miner.queued_transaction_hashes()) + } + + fn future_transactions(&self) -> Result> { + Err(errors::deprecated("Use `parity_allTransaction` instead.")) + } + + fn pending_transactions_stats(&self) -> Result> { + let stats = self.sync.transactions_stats(); + Ok(stats + .into_iter() + .map(|(hash, stats)| (hash, stats.into())) + .collect()) + } + + fn local_transactions(&self) -> Result> { + let transactions = self.miner.local_transactions(); + Ok(transactions + .into_iter() + .map(|(hash, status)| (hash, LocalTransactionStatus::from(status))) + .collect()) + } + + fn ws_url(&self) -> Result { + helpers::to_url(&self.ws_address).ok_or_else(errors::ws_disabled) + } + + fn next_nonce(&self, address: H160) -> BoxFuture { + Box::new(future::ok(self.miner.next_nonce(&*self.client, &address))) + } + + fn mode(&self) -> Result { + Ok(self.client.mode().to_string()) + } + + fn enode(&self) -> Result { + self.sync.enode().ok_or_else(errors::network_disabled) + } + + fn consensus_capability(&self) -> Result { + Ok(self.updater.capability().into()) + } + + fn version_info(&self) -> Result { + Ok(self.updater.version_info().into()) + } + + fn releases_info(&self) -> Result> { + Ok(self.updater.info().map(Into::into)) + } + + fn chain_status(&self) -> Result { + let chain_info = self.client.chain_info(); + + let gap = chain_info + .ancient_block_number + .map(|x| U256::from(x + 1)) + .and_then(|first| { + chain_info + .first_block_number + .map(|last| (first, U256::from(last))) + }); + + Ok(ChainStatus { block_gap: gap }) + } + + fn node_kind(&self) -> Result<::v1::types::NodeKind> { + use v1::types::{Availability, Capability, NodeKind}; + + Ok(NodeKind { + availability: Availability::Personal, + capability: Capability::Full, + }) + } + + fn block_header(&self, number: Option) -> BoxFuture { + const EXTRA_INFO_PROOF: &str = "Object exists in blockchain (fetched earlier), extra_info is always available if object exists; qed"; + let number = number.unwrap_or_default(); + + let (header, extra) = if number == BlockNumber::Pending { + let info = self.client.chain_info(); + let header = try_bf!(self + .miner + .pending_block_header(info.best_block_number) + .ok_or_else(errors::unknown_block)); + + (header.encoded(), None) + } else { + let id = match number { + BlockNumber::Num(num) => BlockId::Number(num), + BlockNumber::Earliest => BlockId::Earliest, + BlockNumber::Latest => BlockId::Latest, + BlockNumber::Pending => unreachable!(), // Already covered + }; + + let header = try_bf!(self + .client + .block_header(id) + .ok_or_else(errors::unknown_block)); + let info = self.client.block_extra_info(id).expect(EXTRA_INFO_PROOF); + + (header, Some(info)) + }; + + Box::new(future::ok(RichHeader { + inner: header.into(), + extra_info: extra.unwrap_or_default(), + })) + } + + fn block_receipts(&self, number: Option) -> BoxFuture> { + let number = number.unwrap_or_default(); + + let id = match number { + BlockNumber::Pending => { + let info = self.client.chain_info(); + let receipts = try_bf!(self + .miner + .pending_receipts(info.best_block_number) + .ok_or_else(errors::unknown_block)); + return Box::new(future::ok(receipts.into_iter().map(Into::into).collect())); + } + BlockNumber::Num(num) => BlockId::Number(num), + BlockNumber::Earliest => BlockId::Earliest, + BlockNumber::Latest => BlockId::Latest, + }; + let receipts = try_bf!(self + .client + .localized_block_receipts(id) + .ok_or_else(errors::unknown_block)); + Box::new(future::ok(receipts.into_iter().map(Into::into).collect())) + } + + fn ipfs_cid(&self, content: Bytes) -> Result { + ipfs::cid(content) + } + + fn call(&self, requests: Vec, num: Option) -> Result> { + let requests = requests + .into_iter() + .map(|request| Ok((fake_sign::sign_call(request.into())?, Default::default()))) + .collect::>>()?; + + let num = num.unwrap_or_default(); + + let (mut state, header) = if num == BlockNumber::Pending { + let info = self.client.chain_info(); + let state = self + .miner + .pending_state(info.best_block_number) + .ok_or_else(errors::state_pruned)?; + let header = self + .miner + .pending_block_header(info.best_block_number) + .ok_or_else(errors::state_pruned)?; + + (state, header) + } else { + let id = match num { + BlockNumber::Num(num) => BlockId::Number(num), + BlockNumber::Earliest => BlockId::Earliest, + BlockNumber::Latest => BlockId::Latest, + BlockNumber::Pending => unreachable!(), // Already covered + }; + + let state = self.client.state_at(id).ok_or_else(errors::state_pruned)?; + let header = self + .client + .block_header(id) + .ok_or_else(errors::state_pruned)? + .decode() + .map_err(errors::decode)?; + + (state, header) + }; + + self.client + .call_many(&requests, &mut state, &header) + .map(|res| res.into_iter().map(|res| res.output.into()).collect()) + .map_err(errors::call) + } + + fn submit_work_detail(&self, nonce: H64, pow_hash: H256, mix_hash: H256) -> Result { + helpers::submit_work_detail(&self.client, &self.miner, nonce, pow_hash, mix_hash) + } + + fn status(&self) -> Result<()> { + let has_peers = self.settings.is_dev_chain || self.sync.status().num_peers > 0; + let is_warping = match self.snapshot.as_ref().map(|s| s.status()) { + Some(RestorationStatus::Ongoing { .. }) => true, + _ => false, + }; + let is_not_syncing = !is_warping + && !is_major_importing(Some(self.sync.status().state), self.client.queue_info()); + + if has_peers && is_not_syncing { + Ok(()) + } else { + Err(errors::status_error(has_peers)) + } + } + + fn logs_no_tx_hash(&self, filter: Filter) -> BoxFuture> { + use v1::impls::eth::base_logs; + // only specific impl for lightclient + base_logs(&*self.client, &*self.miner, filter) + } + + fn verify_signature( + &self, + is_prefixed: bool, + message: Bytes, + r: H256, + s: H256, + v: U64, + ) -> Result { + verify_signature( + is_prefixed, + message, + r, + s, + v, + self.client.signing_chain_id(), + ) + } } diff --git a/rpc/src/v1/impls/parity_accounts.rs b/rpc/src/v1/impls/parity_accounts.rs index e52f8b7ac..b229e0d28 100644 --- a/rpc/src/v1/impls/parity_accounts.rs +++ b/rpc/src/v1/impls/parity_accounts.rs @@ -15,353 +15,408 @@ // along with Parity Ethereum. If not, see . //! Account management (personal) rpc implementation -use std::sync::Arc; -use std::collections::{ - btree_map::{BTreeMap, Entry}, - HashSet, +use std::{ + collections::{ + btree_map::{BTreeMap, Entry}, + HashSet, + }, + sync::Arc, }; -use ethereum_types::{Address, H160, H256, H520}; -use ethkey::{Brain, Generator, Secret}; -use ethstore::KeyFile; use accounts::AccountProvider; +use ethereum_types::{Address, H160, H256, H520}; +use ethkey::{Brain, Generator, Password, Secret}; +use ethstore::KeyFile; use jsonrpc_core::Result; -use v1::helpers::deprecated::{self, DeprecationNotice}; -use v1::helpers::errors; -use v1::traits::{ParityAccounts, ParityAccountsInfo}; -use v1::types::{Derive, DeriveHierarchical, DeriveHash,ExtAccountInfo, AccountInfo, HwAccountInfo}; -use ethkey::Password; +use v1::{ + helpers::{ + deprecated::{self, DeprecationNotice}, + errors, + }, + traits::{ParityAccounts, ParityAccountsInfo}, + types::{AccountInfo, Derive, DeriveHash, DeriveHierarchical, ExtAccountInfo, HwAccountInfo}, +}; /// Account management (personal) rpc implementation. pub struct ParityAccountsClient { - accounts: Arc, - deprecation_notice: DeprecationNotice, + accounts: Arc, + deprecation_notice: DeprecationNotice, } impl ParityAccountsClient { - /// Creates new PersonalClient - pub fn new(store: &Arc) -> Self { - ParityAccountsClient { - accounts: store.clone(), - deprecation_notice: Default::default(), - } - } + /// Creates new PersonalClient + pub fn new(store: &Arc) -> Self { + ParityAccountsClient { + accounts: store.clone(), + deprecation_notice: Default::default(), + } + } } impl ParityAccountsClient { - fn deprecation_notice(&self, method: &'static str) { - self.deprecation_notice.print(method, deprecated::msgs::ACCOUNTS); - } + fn deprecation_notice(&self, method: &'static str) { + self.deprecation_notice + .print(method, deprecated::msgs::ACCOUNTS); + } } impl ParityAccountsInfo for ParityAccountsClient { - fn accounts_info(&self) -> Result> { - self.deprecation_notice("parity_accountsInfo"); + fn accounts_info(&self) -> Result> { + self.deprecation_notice("parity_accountsInfo"); - let dapp_accounts = self.accounts.accounts() - .map_err(|e| errors::account("Could not fetch accounts.", e))? - .into_iter().collect::>(); + let dapp_accounts = self + .accounts + .accounts() + .map_err(|e| errors::account("Could not fetch accounts.", e))? + .into_iter() + .collect::>(); - let info = self.accounts.accounts_info().map_err(|e| errors::account("Could not fetch account info.", e))?; - let other = self.accounts.addresses_info(); + let info = self + .accounts + .accounts_info() + .map_err(|e| errors::account("Could not fetch account info.", e))?; + let other = self.accounts.addresses_info(); - Ok(info - .into_iter() - .chain(other.into_iter()) - .filter(|&(ref a, _)| dapp_accounts.contains(a)) - .map(|(a, v)| (H160::from(a), AccountInfo { name: v.name })) - .collect() - ) - } + Ok(info + .into_iter() + .chain(other.into_iter()) + .filter(|&(ref a, _)| dapp_accounts.contains(a)) + .map(|(a, v)| (H160::from(a), AccountInfo { name: v.name })) + .collect()) + } - fn hardware_accounts_info(&self) -> Result> { - self.deprecation_notice("parity_hardwareAccountsInfo"); + fn hardware_accounts_info(&self) -> Result> { + self.deprecation_notice("parity_hardwareAccountsInfo"); - let info = self.accounts.hardware_accounts_info().map_err(|e| errors::account("Could not fetch account info.", e))?; - Ok(info - .into_iter() - .map(|(a, v)| (H160::from(a), HwAccountInfo { name: v.name, manufacturer: v.meta })) - .collect() - ) - } + let info = self + .accounts + .hardware_accounts_info() + .map_err(|e| errors::account("Could not fetch account info.", e))?; + Ok(info + .into_iter() + .map(|(a, v)| { + ( + H160::from(a), + HwAccountInfo { + name: v.name, + manufacturer: v.meta, + }, + ) + }) + .collect()) + } - fn locked_hardware_accounts_info(&self) -> Result> { - self.deprecation_notice("parity_lockedHardwareAccountsInfo"); + fn locked_hardware_accounts_info(&self) -> Result> { + self.deprecation_notice("parity_lockedHardwareAccountsInfo"); - self.accounts.locked_hardware_accounts().map_err(|e| errors::account("Error communicating with hardware wallet.", e)) - } + self.accounts + .locked_hardware_accounts() + .map_err(|e| errors::account("Error communicating with hardware wallet.", e)) + } - fn default_account(&self) -> Result { - self.deprecation_notice("parity_defaultAccount"); + fn default_account(&self) -> Result { + self.deprecation_notice("parity_defaultAccount"); - Ok(self.accounts.default_account() - .map(Into::into) - .ok() - .unwrap_or_default()) - } + Ok(self + .accounts + .default_account() + .map(Into::into) + .ok() + .unwrap_or_default()) + } } impl ParityAccounts for ParityAccountsClient { - fn all_accounts_info(&self) -> Result> { - let info = self.accounts.accounts_info().map_err(|e| errors::account("Could not fetch account info.", e))?; - let other = self.accounts.addresses_info(); + fn all_accounts_info(&self) -> Result> { + let info = self + .accounts + .accounts_info() + .map_err(|e| errors::account("Could not fetch account info.", e))?; + let other = self.accounts.addresses_info(); - let account_iter = info - .into_iter() - .chain(other.into_iter()) - .map(|(address, v)| (address.into(), ExtAccountInfo { - name: v.name, - meta: v.meta, - uuid: v.uuid.map(|uuid| uuid.to_string()) - })); + let account_iter = info + .into_iter() + .chain(other.into_iter()) + .map(|(address, v)| { + ( + address.into(), + ExtAccountInfo { + name: v.name, + meta: v.meta, + uuid: v.uuid.map(|uuid| uuid.to_string()), + }, + ) + }); - let mut accounts: BTreeMap = BTreeMap::new(); + let mut accounts: BTreeMap = BTreeMap::new(); - for (address, account) in account_iter { - match accounts.entry(address) { - // Insert only if occupied entry isn't already an account with UUID - Entry::Occupied(ref mut occupied) if occupied.get().uuid.is_none() => { - occupied.insert(account); - }, - Entry::Vacant(vacant) => { - vacant.insert(account); - }, - _ => {} - }; - } + for (address, account) in account_iter { + match accounts.entry(address) { + // Insert only if occupied entry isn't already an account with UUID + Entry::Occupied(ref mut occupied) if occupied.get().uuid.is_none() => { + occupied.insert(account); + } + Entry::Vacant(vacant) => { + vacant.insert(account); + } + _ => {} + }; + } - Ok(accounts) - } + Ok(accounts) + } - fn new_account_from_phrase(&self, phrase: String, pass: Password) -> Result { - self.deprecation_notice("parity_newAccountFromPhrase"); - let brain = Brain::new(phrase).generate().unwrap(); - self.accounts.insert_account(brain.secret().clone(), &pass) - .map(Into::into) - .map_err(|e| errors::account("Could not create account.", e)) - } + fn new_account_from_phrase(&self, phrase: String, pass: Password) -> Result { + self.deprecation_notice("parity_newAccountFromPhrase"); + let brain = Brain::new(phrase).generate().unwrap(); + self.accounts + .insert_account(brain.secret().clone(), &pass) + .map(Into::into) + .map_err(|e| errors::account("Could not create account.", e)) + } - fn new_account_from_wallet(&self, json: String, pass: Password) -> Result { - self.deprecation_notice("parity_newAccountFromWallet"); - self.accounts.import_presale(json.as_bytes(), &pass) - .or_else(|_| self.accounts.import_wallet(json.as_bytes(), &pass, true)) - .map(Into::into) - .map_err(|e| errors::account("Could not create account.", e)) - } + fn new_account_from_wallet(&self, json: String, pass: Password) -> Result { + self.deprecation_notice("parity_newAccountFromWallet"); + self.accounts + .import_presale(json.as_bytes(), &pass) + .or_else(|_| self.accounts.import_wallet(json.as_bytes(), &pass, true)) + .map(Into::into) + .map_err(|e| errors::account("Could not create account.", e)) + } - fn new_account_from_secret(&self, secret: H256, pass: Password) -> Result { - self.deprecation_notice("parity_newAccountFromSecret"); - let secret = Secret::from_unsafe_slice(&secret.0) - .map_err(|e| errors::account("Could not create account.", e))?; - self.accounts.insert_account(secret, &pass) - .map(Into::into) - .map_err(|e| errors::account("Could not create account.", e)) - } + fn new_account_from_secret(&self, secret: H256, pass: Password) -> Result { + self.deprecation_notice("parity_newAccountFromSecret"); + let secret = Secret::from_unsafe_slice(&secret.0) + .map_err(|e| errors::account("Could not create account.", e))?; + self.accounts + .insert_account(secret, &pass) + .map(Into::into) + .map_err(|e| errors::account("Could not create account.", e)) + } - fn test_password(&self, account: H160, password: Password) -> Result { - self.deprecation_notice("parity_testPassword"); - let account: Address = account.into(); + fn test_password(&self, account: H160, password: Password) -> Result { + self.deprecation_notice("parity_testPassword"); + let account: Address = account.into(); - self.accounts - .test_password(&account, &password) - .map_err(|e| errors::account("Could not fetch account info.", e)) - } + self.accounts + .test_password(&account, &password) + .map_err(|e| errors::account("Could not fetch account info.", e)) + } - fn change_password(&self, account: H160, password: Password, new_password: Password) -> Result { - self.deprecation_notice("parity_changePassword"); - let account: Address = account.into(); - self.accounts - .change_password(&account, password, new_password) - .map(|_| true) - .map_err(|e| errors::account("Could not fetch account info.", e)) - } + fn change_password( + &self, + account: H160, + password: Password, + new_password: Password, + ) -> Result { + self.deprecation_notice("parity_changePassword"); + let account: Address = account.into(); + self.accounts + .change_password(&account, password, new_password) + .map(|_| true) + .map_err(|e| errors::account("Could not fetch account info.", e)) + } - fn kill_account(&self, account: H160, password: Password) -> Result { - self.deprecation_notice("parity_killAccount"); - let account: Address = account.into(); - self.accounts - .kill_account(&account, &password) - .map(|_| true) - .map_err(|e| errors::account("Could not delete account.", e)) - } + fn kill_account(&self, account: H160, password: Password) -> Result { + self.deprecation_notice("parity_killAccount"); + let account: Address = account.into(); + self.accounts + .kill_account(&account, &password) + .map(|_| true) + .map_err(|e| errors::account("Could not delete account.", e)) + } - fn remove_address(&self, addr: H160) -> Result { - self.deprecation_notice("parity_removeAddresss"); - let addr: Address = addr.into(); + fn remove_address(&self, addr: H160) -> Result { + self.deprecation_notice("parity_removeAddresss"); + let addr: Address = addr.into(); - self.accounts.remove_address(addr); - Ok(true) - } + self.accounts.remove_address(addr); + Ok(true) + } - fn set_account_name(&self, addr: H160, name: String) -> Result { - self.deprecation_notice("parity_setAccountName"); - let addr: Address = addr.into(); + fn set_account_name(&self, addr: H160, name: String) -> Result { + self.deprecation_notice("parity_setAccountName"); + let addr: Address = addr.into(); - self.accounts.set_account_name(addr.clone(), name.clone()) - .unwrap_or_else(|_| self.accounts.set_address_name(addr, name)); - Ok(true) - } + self.accounts + .set_account_name(addr.clone(), name.clone()) + .unwrap_or_else(|_| self.accounts.set_address_name(addr, name)); + Ok(true) + } - fn set_account_meta(&self, addr: H160, meta: String) -> Result { - self.deprecation_notice("parity_setAccountMeta"); - let addr: Address = addr.into(); + fn set_account_meta(&self, addr: H160, meta: String) -> Result { + self.deprecation_notice("parity_setAccountMeta"); + let addr: Address = addr.into(); - self.accounts.set_account_meta(addr.clone(), meta.clone()) - .unwrap_or_else(|_| self.accounts.set_address_meta(addr, meta)); - Ok(true) - } + self.accounts + .set_account_meta(addr.clone(), meta.clone()) + .unwrap_or_else(|_| self.accounts.set_address_meta(addr, meta)); + Ok(true) + } - fn import_geth_accounts(&self, addresses: Vec) -> Result> { - self.deprecation_notice("parity_importGethAccounts"); - self.accounts - .import_geth_accounts(into_vec(addresses), false) - .map(into_vec) - .map_err(|e| errors::account("Couldn't import Geth accounts", e)) - } + fn import_geth_accounts(&self, addresses: Vec) -> Result> { + self.deprecation_notice("parity_importGethAccounts"); + self.accounts + .import_geth_accounts(into_vec(addresses), false) + .map(into_vec) + .map_err(|e| errors::account("Couldn't import Geth accounts", e)) + } - fn geth_accounts(&self) -> Result> { - self.deprecation_notice("parity_listGethAccounts"); - Ok(into_vec(self.accounts.list_geth_accounts(false))) - } + fn geth_accounts(&self) -> Result> { + self.deprecation_notice("parity_listGethAccounts"); + Ok(into_vec(self.accounts.list_geth_accounts(false))) + } - fn create_vault(&self, name: String, password: Password) -> Result { - self.deprecation_notice("parity_newVault"); + fn create_vault(&self, name: String, password: Password) -> Result { + self.deprecation_notice("parity_newVault"); - self.accounts - .create_vault(&name, &password) - .map_err(|e| errors::account("Could not create vault.", e)) - .map(|_| true) - } + self.accounts + .create_vault(&name, &password) + .map_err(|e| errors::account("Could not create vault.", e)) + .map(|_| true) + } - fn open_vault(&self, name: String, password: Password) -> Result { - self.deprecation_notice("parity_openVault"); + fn open_vault(&self, name: String, password: Password) -> Result { + self.deprecation_notice("parity_openVault"); - self.accounts - .open_vault(&name, &password) - .map_err(|e| errors::account("Could not open vault.", e)) - .map(|_| true) - } + self.accounts + .open_vault(&name, &password) + .map_err(|e| errors::account("Could not open vault.", e)) + .map(|_| true) + } - fn close_vault(&self, name: String) -> Result { - self.deprecation_notice("parity_closeVault"); + fn close_vault(&self, name: String) -> Result { + self.deprecation_notice("parity_closeVault"); - self.accounts - .close_vault(&name) - .map_err(|e| errors::account("Could not close vault.", e)) - .map(|_| true) - } + self.accounts + .close_vault(&name) + .map_err(|e| errors::account("Could not close vault.", e)) + .map(|_| true) + } - fn list_vaults(&self) -> Result> { - self.deprecation_notice("parity_listVaults"); + fn list_vaults(&self) -> Result> { + self.deprecation_notice("parity_listVaults"); - self.accounts - .list_vaults() - .map_err(|e| errors::account("Could not list vaults.", e)) - } + self.accounts + .list_vaults() + .map_err(|e| errors::account("Could not list vaults.", e)) + } - fn list_opened_vaults(&self) -> Result> { - self.deprecation_notice("parity_listOpenedVaults"); + fn list_opened_vaults(&self) -> Result> { + self.deprecation_notice("parity_listOpenedVaults"); - self.accounts - .list_opened_vaults() - .map_err(|e| errors::account("Could not list vaults.", e)) - } + self.accounts + .list_opened_vaults() + .map_err(|e| errors::account("Could not list vaults.", e)) + } - fn change_vault_password(&self, name: String, new_password: Password) -> Result { - self.deprecation_notice("parity_changeVaultPassword"); + fn change_vault_password(&self, name: String, new_password: Password) -> Result { + self.deprecation_notice("parity_changeVaultPassword"); - self.accounts - .change_vault_password(&name, &new_password) - .map_err(|e| errors::account("Could not change vault password.", e)) - .map(|_| true) - } + self.accounts + .change_vault_password(&name, &new_password) + .map_err(|e| errors::account("Could not change vault password.", e)) + .map(|_| true) + } - fn change_vault(&self, address: H160, new_vault: String) -> Result { - self.deprecation_notice("parity_changeVault"); - self.accounts - .change_vault(address.into(), &new_vault) - .map_err(|e| errors::account("Could not change vault.", e)) - .map(|_| true) - } + fn change_vault(&self, address: H160, new_vault: String) -> Result { + self.deprecation_notice("parity_changeVault"); + self.accounts + .change_vault(address.into(), &new_vault) + .map_err(|e| errors::account("Could not change vault.", e)) + .map(|_| true) + } - fn get_vault_meta(&self, name: String) -> Result { - self.deprecation_notice("parity_getVaultMeta"); + fn get_vault_meta(&self, name: String) -> Result { + self.deprecation_notice("parity_getVaultMeta"); - self.accounts - .get_vault_meta(&name) - .map_err(|e| errors::account("Could not get vault metadata.", e)) - } + self.accounts + .get_vault_meta(&name) + .map_err(|e| errors::account("Could not get vault metadata.", e)) + } - fn set_vault_meta(&self, name: String, meta: String) -> Result { - self.deprecation_notice("parity_setVaultMeta"); + fn set_vault_meta(&self, name: String, meta: String) -> Result { + self.deprecation_notice("parity_setVaultMeta"); - self.accounts - .set_vault_meta(&name, &meta) - .map_err(|e| errors::account("Could not update vault metadata.", e)) - .map(|_| true) - } + self.accounts + .set_vault_meta(&name, &meta) + .map_err(|e| errors::account("Could not update vault metadata.", e)) + .map(|_| true) + } - fn derive_key_index(&self, addr: H160, password: Password, derivation: DeriveHierarchical, save_as_account: bool) -> Result { - self.deprecation_notice("parity_deriveAddressIndex"); - let addr: Address = addr.into(); - self.accounts - .derive_account( - &addr, - Some(password), - Derive::from(derivation).to_derivation() - .map_err(|c| errors::account("Could not parse derivation request: {:?}", c))?, - save_as_account) - .map(Into::into) - .map_err(|e| errors::account("Could not derive account.", e)) - } + fn derive_key_index( + &self, + addr: H160, + password: Password, + derivation: DeriveHierarchical, + save_as_account: bool, + ) -> Result { + self.deprecation_notice("parity_deriveAddressIndex"); + let addr: Address = addr.into(); + self.accounts + .derive_account( + &addr, + Some(password), + Derive::from(derivation) + .to_derivation() + .map_err(|c| errors::account("Could not parse derivation request: {:?}", c))?, + save_as_account, + ) + .map(Into::into) + .map_err(|e| errors::account("Could not derive account.", e)) + } - fn derive_key_hash(&self, addr: H160, password: Password, derivation: DeriveHash, save_as_account: bool) -> Result { - self.deprecation_notice("parity_deriveAddressHash"); - let addr: Address = addr.into(); - self.accounts - .derive_account( - &addr, - Some(password), - Derive::from(derivation).to_derivation() - .map_err(|c| errors::account("Could not parse derivation request: {:?}", c))?, - save_as_account) - .map(Into::into) - .map_err(|e| errors::account("Could not derive account.", e)) - } + fn derive_key_hash( + &self, + addr: H160, + password: Password, + derivation: DeriveHash, + save_as_account: bool, + ) -> Result { + self.deprecation_notice("parity_deriveAddressHash"); + let addr: Address = addr.into(); + self.accounts + .derive_account( + &addr, + Some(password), + Derive::from(derivation) + .to_derivation() + .map_err(|c| errors::account("Could not parse derivation request: {:?}", c))?, + save_as_account, + ) + .map(Into::into) + .map_err(|e| errors::account("Could not derive account.", e)) + } - fn export_account(&self, addr: H160, password: Password) -> Result { - self.deprecation_notice("parity_exportAccount"); - let addr = addr.into(); - self.accounts - .export_account( - &addr, - password, - ) - .map(Into::into) - .map_err(|e| errors::account("Could not export account.", e)) - } + fn export_account(&self, addr: H160, password: Password) -> Result { + self.deprecation_notice("parity_exportAccount"); + let addr = addr.into(); + self.accounts + .export_account(&addr, password) + .map(Into::into) + .map_err(|e| errors::account("Could not export account.", e)) + } - fn sign_message(&self, addr: H160, password: Password, message: H256) -> Result { - self.deprecation_notice("parity_signMessage"); - self.accounts - .sign( - addr.into(), - Some(password), - message.into() - ) - .map(Into::into) - .map_err(|e| errors::account("Could not sign message.", e)) - } + fn sign_message(&self, addr: H160, password: Password, message: H256) -> Result { + self.deprecation_notice("parity_signMessage"); + self.accounts + .sign(addr.into(), Some(password), message.into()) + .map(Into::into) + .map_err(|e| errors::account("Could not sign message.", e)) + } - fn hardware_pin_matrix_ack(&self, path: String, pin: String) -> Result { - self.deprecation_notice("parity_hardwarePinMatrixAck"); + fn hardware_pin_matrix_ack(&self, path: String, pin: String) -> Result { + self.deprecation_notice("parity_hardwarePinMatrixAck"); - self.accounts.hardware_pin_matrix_ack(&path, &pin).map_err(|e| errors::account("Error communicating with hardware wallet.", e)) - } + self.accounts + .hardware_pin_matrix_ack(&path, &pin) + .map_err(|e| errors::account("Error communicating with hardware wallet.", e)) + } } -fn into_vec(a: Vec) -> Vec where - A: Into +fn into_vec(a: Vec) -> Vec +where + A: Into, { - a.into_iter().map(Into::into).collect() + a.into_iter().map(Into::into).collect() } diff --git a/rpc/src/v1/impls/parity_set.rs b/rpc/src/v1/impls/parity_set.rs index b7cef6c6b..6a1d4e60c 100644 --- a/rpc/src/v1/impls/parity_set.rs +++ b/rpc/src/v1/impls/parity_set.rs @@ -16,228 +16,241 @@ /// Parity-specific rpc interface for operations altering the settings. use std::io; -use std::sync::Arc; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; -use ethcore::client::{BlockChainClient, Mode}; -use ethcore::miner::{self, MinerService}; +use ethcore::{ + client::{BlockChainClient, Mode}, + miner::{self, MinerService}, +}; use ethereum_types::{H160, H256, U256}; use ethkey; use fetch::{self, Fetch}; use hash::keccak_buffer; use sync::ManageNetwork; -use updater::{Service as UpdateService}; +use updater::Service as UpdateService; -use jsonrpc_core::{BoxFuture, Result}; -use jsonrpc_core::futures::Future; -use v1::helpers::errors; -use v1::traits::ParitySet; -use v1::types::{Bytes, ReleaseInfo, Transaction}; +use jsonrpc_core::{futures::Future, BoxFuture, Result}; +use v1::{ + helpers::errors, + traits::ParitySet, + types::{Bytes, ReleaseInfo, Transaction}, +}; #[cfg(any(test, feature = "accounts"))] pub mod accounts { - use super::*; - use accounts::AccountProvider; - use v1::traits::ParitySetAccounts; - use v1::helpers::deprecated::DeprecationNotice; - use v1::helpers::engine_signer::EngineSigner; + use super::*; + use accounts::AccountProvider; + use v1::{ + helpers::{deprecated::DeprecationNotice, engine_signer::EngineSigner}, + traits::ParitySetAccounts, + }; - /// Parity-specific account-touching RPC interfaces. - pub struct ParitySetAccountsClient { - miner: Arc, - accounts: Arc, - deprecation_notice: DeprecationNotice, - } + /// Parity-specific account-touching RPC interfaces. + pub struct ParitySetAccountsClient { + miner: Arc, + accounts: Arc, + deprecation_notice: DeprecationNotice, + } - impl ParitySetAccountsClient { - /// Creates new ParitySetAccountsClient - pub fn new( - accounts: &Arc, - miner: &Arc, - ) -> Self { - ParitySetAccountsClient { - accounts: accounts.clone(), - miner: miner.clone(), - deprecation_notice: Default::default(), - } - } - } + impl ParitySetAccountsClient { + /// Creates new ParitySetAccountsClient + pub fn new(accounts: &Arc, miner: &Arc) -> Self { + ParitySetAccountsClient { + accounts: accounts.clone(), + miner: miner.clone(), + deprecation_notice: Default::default(), + } + } + } - impl ParitySetAccounts for ParitySetAccountsClient { - fn set_engine_signer(&self, address: H160, password: String) -> Result { - self.deprecation_notice.print( - "parity_setEngineSigner", - "use `parity_setEngineSignerSecret` instead. See #9997 for context." - ); + impl ParitySetAccounts for ParitySetAccountsClient { + fn set_engine_signer(&self, address: H160, password: String) -> Result { + self.deprecation_notice.print( + "parity_setEngineSigner", + "use `parity_setEngineSignerSecret` instead. See #9997 for context.", + ); - let signer = Box::new(EngineSigner::new( - self.accounts.clone(), - address.clone().into(), - password.into(), - )); - self.miner.set_author(miner::Author::Sealer(signer)); - Ok(true) - } - } + let signer = Box::new(EngineSigner::new( + self.accounts.clone(), + address.clone().into(), + password.into(), + )); + self.miner.set_author(miner::Author::Sealer(signer)); + Ok(true) + } + } } /// Parity-specific rpc interface for operations altering the settings. pub struct ParitySetClient { - client: Arc, - miner: Arc, - updater: Arc, - net: Arc, - fetch: F, + client: Arc, + miner: Arc, + updater: Arc, + net: Arc, + fetch: F, } impl ParitySetClient - where C: BlockChainClient + 'static, +where + C: BlockChainClient + 'static, { - /// Creates new `ParitySetClient` with given `Fetch`. - pub fn new( - client: &Arc, - miner: &Arc, - updater: &Arc, - net: &Arc, - fetch: F, - ) -> Self { - ParitySetClient { - client: client.clone(), - miner: miner.clone(), - updater: updater.clone(), - net: net.clone(), - fetch, - } - } + /// Creates new `ParitySetClient` with given `Fetch`. + pub fn new( + client: &Arc, + miner: &Arc, + updater: &Arc, + net: &Arc, + fetch: F, + ) -> Self { + ParitySetClient { + client: client.clone(), + miner: miner.clone(), + updater: updater.clone(), + net: net.clone(), + fetch, + } + } } -impl ParitySet for ParitySetClient where - C: BlockChainClient + 'static, - M: MinerService + 'static, - U: UpdateService + 'static, - F: Fetch + 'static, +impl ParitySet for ParitySetClient +where + C: BlockChainClient + 'static, + M: MinerService + 'static, + U: UpdateService + 'static, + F: Fetch + 'static, { + fn set_min_gas_price(&self, gas_price: U256) -> Result { + match self.miner.set_minimal_gas_price(gas_price) { + Ok(success) => Ok(success), + Err(e) => Err(errors::unsupported(e, None)), + } + } - fn set_min_gas_price(&self, gas_price: U256) -> Result { - match self.miner.set_minimal_gas_price(gas_price) { - Ok(success) => Ok(success), - Err(e) => Err(errors::unsupported(e, None)), - } - } + fn set_transactions_limit(&self, _limit: usize) -> Result { + warn!("setTransactionsLimit is deprecated. Ignoring request."); + Ok(false) + } - fn set_transactions_limit(&self, _limit: usize) -> Result { - warn!("setTransactionsLimit is deprecated. Ignoring request."); - Ok(false) - } + fn set_tx_gas_limit(&self, _limit: U256) -> Result { + warn!("setTxGasLimit is deprecated. Ignoring request."); + Ok(false) + } - fn set_tx_gas_limit(&self, _limit: U256) -> Result { - warn!("setTxGasLimit is deprecated. Ignoring request."); - Ok(false) - } + fn set_gas_floor_target(&self, target: U256) -> Result { + let mut range = self.miner.authoring_params().gas_range_target; + range.0 = target; + self.miner.set_gas_range_target(range); + Ok(true) + } - fn set_gas_floor_target(&self, target: U256) -> Result { - let mut range = self.miner.authoring_params().gas_range_target; - range.0 = target; - self.miner.set_gas_range_target(range); - Ok(true) - } + fn set_gas_ceil_target(&self, target: U256) -> Result { + let mut range = self.miner.authoring_params().gas_range_target; + range.1 = target; + self.miner.set_gas_range_target(range); + Ok(true) + } - fn set_gas_ceil_target(&self, target: U256) -> Result { - let mut range = self.miner.authoring_params().gas_range_target; - range.1 = target; - self.miner.set_gas_range_target(range); - Ok(true) - } + fn set_extra_data(&self, extra_data: Bytes) -> Result { + self.miner.set_extra_data(extra_data.into_vec()); + Ok(true) + } - fn set_extra_data(&self, extra_data: Bytes) -> Result { - self.miner.set_extra_data(extra_data.into_vec()); - Ok(true) - } + fn set_author(&self, address: H160) -> Result { + self.miner.set_author(miner::Author::External(address)); + Ok(true) + } - fn set_author(&self, address: H160) -> Result { - self.miner.set_author(miner::Author::External(address)); - Ok(true) - } + fn set_engine_signer_secret(&self, secret: H256) -> Result { + let keypair = ethkey::KeyPair::from_secret(secret.into()) + .map_err(|e| errors::account("Invalid secret", e))?; + self.miner.set_author(miner::Author::Sealer( + ethcore::engines::signer::from_keypair(keypair), + )); + Ok(true) + } - fn set_engine_signer_secret(&self, secret: H256) -> Result { - let keypair = ethkey::KeyPair::from_secret(secret.into()).map_err(|e| errors::account("Invalid secret", e))?; - self.miner.set_author(miner::Author::Sealer(ethcore::engines::signer::from_keypair(keypair))); - Ok(true) - } + fn add_reserved_peer(&self, peer: String) -> Result { + match self.net.add_reserved_peer(peer) { + Ok(()) => Ok(true), + Err(e) => Err(errors::invalid_params("Peer address", e)), + } + } - fn add_reserved_peer(&self, peer: String) -> Result { - match self.net.add_reserved_peer(peer) { - Ok(()) => Ok(true), - Err(e) => Err(errors::invalid_params("Peer address", e)), - } - } + fn remove_reserved_peer(&self, peer: String) -> Result { + match self.net.remove_reserved_peer(peer) { + Ok(()) => Ok(true), + Err(e) => Err(errors::invalid_params("Peer address", e)), + } + } - fn remove_reserved_peer(&self, peer: String) -> Result { - match self.net.remove_reserved_peer(peer) { - Ok(()) => Ok(true), - Err(e) => Err(errors::invalid_params("Peer address", e)), - } - } + fn drop_non_reserved_peers(&self) -> Result { + self.net.deny_unreserved_peers(); + Ok(true) + } - fn drop_non_reserved_peers(&self) -> Result { - self.net.deny_unreserved_peers(); - Ok(true) - } + fn accept_non_reserved_peers(&self) -> Result { + self.net.accept_unreserved_peers(); + Ok(true) + } - fn accept_non_reserved_peers(&self) -> Result { - self.net.accept_unreserved_peers(); - Ok(true) - } + fn start_network(&self) -> Result { + self.net.start_network(); + Ok(true) + } - fn start_network(&self) -> Result { - self.net.start_network(); - Ok(true) - } + fn stop_network(&self) -> Result { + self.net.stop_network(); + Ok(true) + } - fn stop_network(&self) -> Result { - self.net.stop_network(); - Ok(true) - } + fn set_mode(&self, mode: String) -> Result { + self.client.set_mode(match mode.as_str() { + "offline" => Mode::Off, + "dark" => Mode::Dark(Duration::from_secs(300)), + "passive" => Mode::Passive(Duration::from_secs(300), Duration::from_secs(3600)), + "active" => Mode::Active, + e => { + return Err(errors::invalid_params("mode", e.to_owned())); + } + }); + Ok(true) + } - fn set_mode(&self, mode: String) -> Result { - self.client.set_mode(match mode.as_str() { - "offline" => Mode::Off, - "dark" => Mode::Dark(Duration::from_secs(300)), - "passive" => Mode::Passive(Duration::from_secs(300), Duration::from_secs(3600)), - "active" => Mode::Active, - e => { return Err(errors::invalid_params("mode", e.to_owned())); }, - }); - Ok(true) - } + fn set_spec_name(&self, spec_name: String) -> Result { + self.client + .set_spec_name(spec_name) + .map(|_| true) + .map_err(|()| errors::cannot_restart()) + } - fn set_spec_name(&self, spec_name: String) -> Result { - self.client.set_spec_name(spec_name).map(|_| true).map_err(|()| errors::cannot_restart()) - } + fn hash_content(&self, url: String) -> BoxFuture { + let future = self + .fetch + .get(&url, Default::default()) + .then(move |result| { + result + .map_err(errors::fetch) + .and_then(move |response| { + let mut reader = io::BufReader::new(fetch::BodyReader::new(response)); + keccak_buffer(&mut reader).map_err(errors::fetch) + }) + .map(Into::into) + }); + Box::new(future) + } - fn hash_content(&self, url: String) -> BoxFuture { - let future = self.fetch.get(&url, Default::default()).then(move |result| { - result - .map_err(errors::fetch) - .and_then(move |response| { - let mut reader = io::BufReader::new(fetch::BodyReader::new(response)); - keccak_buffer(&mut reader).map_err(errors::fetch) - }) - .map(Into::into) - }); - Box::new(future) - } + fn upgrade_ready(&self) -> Result> { + Ok(self.updater.upgrade_ready().map(Into::into)) + } - fn upgrade_ready(&self) -> Result> { - Ok(self.updater.upgrade_ready().map(Into::into)) - } + fn execute_upgrade(&self) -> Result { + Ok(self.updater.execute_upgrade()) + } - fn execute_upgrade(&self) -> Result { - Ok(self.updater.execute_upgrade()) - } - - fn remove_transaction(&self, hash: H256) -> Result> { - Ok(self.miner.remove_transaction(&hash) - .map(|t| Transaction::from_pending(t.pending().clone())) - ) - } + fn remove_transaction(&self, hash: H256) -> Result> { + Ok(self + .miner + .remove_transaction(&hash) + .map(|t| Transaction::from_pending(t.pending().clone()))) + } } diff --git a/rpc/src/v1/impls/personal.rs b/rpc/src/v1/impls/personal.rs index b6af1f81e..9aec2ab0a 100644 --- a/rpc/src/v1/impls/personal.rs +++ b/rpc/src/v1/impls/personal.rs @@ -15,256 +15,346 @@ // along with Parity Ethereum. If not, see . //! Account management (personal) rpc implementation -use std::sync::Arc; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use accounts::AccountProvider; use bytes::Bytes; -use eip_712::{EIP712, hash_structured_data}; -use ethereum_types::{H160, H256, H520, U128, Address}; +use eip_712::{hash_structured_data, EIP712}; +use ethereum_types::{Address, H160, H256, H520, U128}; use ethkey::{public_to_address, recover, Signature}; use types::transaction::{PendingTransaction, SignedTransaction}; -use jsonrpc_core::futures::{future, Future}; -use jsonrpc_core::types::Value; -use jsonrpc_core::{BoxFuture, Result}; -use v1::helpers::deprecated::{self, DeprecationNotice}; -use v1::helpers::dispatch::{self, eth_data_hash, Dispatcher, SignWith, PostSign, WithToken}; -use v1::helpers::{errors, eip191}; -use v1::metadata::Metadata; -use v1::traits::Personal; -use v1::types::{ - Bytes as RpcBytes, - ConfirmationPayload as RpcConfirmationPayload, - ConfirmationResponse as RpcConfirmationResponse, - TransactionRequest, - RichRawTransaction as RpcRichRawTransaction, - EIP191Version, +use jsonrpc_core::{ + futures::{future, Future}, + types::Value, + BoxFuture, Result, +}; +use v1::{ + helpers::{ + deprecated::{self, DeprecationNotice}, + dispatch::{self, eth_data_hash, Dispatcher, PostSign, SignWith, WithToken}, + eip191, errors, + }, + metadata::Metadata, + traits::Personal, + types::{ + Bytes as RpcBytes, ConfirmationPayload as RpcConfirmationPayload, + ConfirmationResponse as RpcConfirmationResponse, EIP191Version, + RichRawTransaction as RpcRichRawTransaction, TransactionRequest, + }, }; /// Account management (personal) rpc implementation. pub struct PersonalClient { - accounts: Arc, - dispatcher: D, - allow_perm_unlock: bool, - allow_experimental_rpcs: bool, - deprecation_notice: DeprecationNotice, + accounts: Arc, + dispatcher: D, + allow_perm_unlock: bool, + allow_experimental_rpcs: bool, + deprecation_notice: DeprecationNotice, } impl PersonalClient { - /// Creates new PersonalClient - pub fn new( - accounts: &Arc, - dispatcher: D, - allow_perm_unlock: bool, - allow_experimental_rpcs: bool, - ) -> Self { - PersonalClient { - accounts: accounts.clone(), - dispatcher, - allow_perm_unlock, - allow_experimental_rpcs, - deprecation_notice: DeprecationNotice::default(), - } - } + /// Creates new PersonalClient + pub fn new( + accounts: &Arc, + dispatcher: D, + allow_perm_unlock: bool, + allow_experimental_rpcs: bool, + ) -> Self { + PersonalClient { + accounts: accounts.clone(), + dispatcher, + allow_perm_unlock, + allow_experimental_rpcs, + deprecation_notice: DeprecationNotice::default(), + } + } } impl PersonalClient { - fn do_sign_transaction

( - &self, - _meta: Metadata, - request: TransactionRequest, - password: String, - post_sign: P - ) -> BoxFuture - where P: PostSign + 'static, - ::Future: Send - { - let dispatcher = self.dispatcher.clone(); - let accounts = self.accounts.clone(); + fn do_sign_transaction

( + &self, + _meta: Metadata, + request: TransactionRequest, + password: String, + post_sign: P, + ) -> BoxFuture + where + P: PostSign + 'static, + ::Future: Send, + { + let dispatcher = self.dispatcher.clone(); + let accounts = self.accounts.clone(); - let default = match request.from.as_ref() { - Some(account) => Ok(account.clone().into()), - None => accounts - .default_account() - .map_err(|e| errors::account("Cannot find default account.", e)), - }; + let default = match request.from.as_ref() { + Some(account) => Ok(account.clone().into()), + None => accounts + .default_account() + .map_err(|e| errors::account("Cannot find default account.", e)), + }; - let default = match default { - Ok(default) => default, - Err(e) => return Box::new(future::err(e)), - }; + let default = match default { + Ok(default) => default, + Err(e) => return Box::new(future::err(e)), + }; - let accounts = Arc::new(dispatch::Signer::new(accounts)) as _; - Box::new(dispatcher.fill_optional_fields(request.into(), default, false) - .and_then(move |filled| { - dispatcher.sign(filled, &accounts, SignWith::Password(password.into()), post_sign) - }) - ) - } + let accounts = Arc::new(dispatch::Signer::new(accounts)) as _; + Box::new( + dispatcher + .fill_optional_fields(request.into(), default, false) + .and_then(move |filled| { + dispatcher.sign( + filled, + &accounts, + SignWith::Password(password.into()), + post_sign, + ) + }), + ) + } } impl Personal for PersonalClient { - type Metadata = Metadata; + type Metadata = Metadata; - fn accounts(&self) -> Result> { - self.deprecation_notice.print("personal_accounts", deprecated::msgs::ACCOUNTS); - let accounts = self.accounts.accounts().map_err(|e| errors::account("Could not fetch accounts.", e))?; - Ok(accounts.into_iter().map(Into::into).collect::>()) - } + fn accounts(&self) -> Result> { + self.deprecation_notice + .print("personal_accounts", deprecated::msgs::ACCOUNTS); + let accounts = self + .accounts + .accounts() + .map_err(|e| errors::account("Could not fetch accounts.", e))?; + Ok(accounts.into_iter().map(Into::into).collect::>()) + } - fn new_account(&self, pass: String) -> Result { - self.deprecation_notice.print("personal_newAccount", deprecated::msgs::ACCOUNTS); - self.accounts.new_account(&pass.into()) - .map(Into::into) - .map_err(|e| errors::account("Could not create account.", e)) - } + fn new_account(&self, pass: String) -> Result { + self.deprecation_notice + .print("personal_newAccount", deprecated::msgs::ACCOUNTS); + self.accounts + .new_account(&pass.into()) + .map(Into::into) + .map_err(|e| errors::account("Could not create account.", e)) + } - fn unlock_account(&self, account: H160, account_pass: String, duration: Option) -> Result { - self.deprecation_notice.print("personal_unlockAccount", deprecated::msgs::ACCOUNTS); - let account: Address = account.into(); - let store = self.accounts.clone(); - let duration = match duration { - None => None, - Some(duration) => { - let duration: U128 = duration.into(); - let v = duration.low_u64() as u32; - if duration != v.into() { - return Err(errors::invalid_params("Duration", "Invalid Number")); - } else { - Some(v) - } - }, - }; + fn unlock_account( + &self, + account: H160, + account_pass: String, + duration: Option, + ) -> Result { + self.deprecation_notice + .print("personal_unlockAccount", deprecated::msgs::ACCOUNTS); + let account: Address = account.into(); + let store = self.accounts.clone(); + let duration = match duration { + None => None, + Some(duration) => { + let duration: U128 = duration.into(); + let v = duration.low_u64() as u32; + if duration != v.into() { + return Err(errors::invalid_params("Duration", "Invalid Number")); + } else { + Some(v) + } + } + }; - let r = match (self.allow_perm_unlock, duration) { - (false, None) => store.unlock_account_temporarily(account, account_pass.into()), - (false, _) => return Err(errors::unsupported( - "Time-unlocking is not supported when permanent unlock is disabled.", - Some("Use personal_sendTransaction or enable permanent unlocking, instead."), - )), - (true, Some(0)) => store.unlock_account_permanently(account, account_pass.into()), - (true, Some(d)) => store.unlock_account_timed(account, account_pass.into(), Duration::from_secs(d.into())), - (true, None) => store.unlock_account_timed(account, account_pass.into(), Duration::from_secs(300)), - }; - match r { - Ok(_) => Ok(true), - Err(err) => Err(errors::account("Unable to unlock the account.", err)), - } - } + let r = match (self.allow_perm_unlock, duration) { + (false, None) => store.unlock_account_temporarily(account, account_pass.into()), + (false, _) => { + return Err(errors::unsupported( + "Time-unlocking is not supported when permanent unlock is disabled.", + Some("Use personal_sendTransaction or enable permanent unlocking, instead."), + )) + } + (true, Some(0)) => store.unlock_account_permanently(account, account_pass.into()), + (true, Some(d)) => store.unlock_account_timed( + account, + account_pass.into(), + Duration::from_secs(d.into()), + ), + (true, None) => { + store.unlock_account_timed(account, account_pass.into(), Duration::from_secs(300)) + } + }; + match r { + Ok(_) => Ok(true), + Err(err) => Err(errors::account("Unable to unlock the account.", err)), + } + } - fn sign(&self, data: RpcBytes, account: H160, password: String) -> BoxFuture { - self.deprecation_notice.print("personal_sign", deprecated::msgs::ACCOUNTS); - let dispatcher = self.dispatcher.clone(); - let accounts = Arc::new(dispatch::Signer::new(self.accounts.clone())) as _; + fn sign(&self, data: RpcBytes, account: H160, password: String) -> BoxFuture { + self.deprecation_notice + .print("personal_sign", deprecated::msgs::ACCOUNTS); + let dispatcher = self.dispatcher.clone(); + let accounts = Arc::new(dispatch::Signer::new(self.accounts.clone())) as _; - let payload = RpcConfirmationPayload::EthSignMessage((account.clone(), data).into()); + let payload = RpcConfirmationPayload::EthSignMessage((account.clone(), data).into()); - Box::new(dispatch::from_rpc(payload, account.into(), &dispatcher) - .and_then(move |payload| { - dispatch::execute(dispatcher, &accounts, payload, dispatch::SignWith::Password(password.into())) - }) - .map(|v| v.into_value()) - .then(|res| match res { - Ok(RpcConfirmationResponse::Signature(signature)) => Ok(signature), - Err(e) => Err(e), - e => Err(errors::internal("Unexpected result", e)), - })) - } + Box::new( + dispatch::from_rpc(payload, account.into(), &dispatcher) + .and_then(move |payload| { + dispatch::execute( + dispatcher, + &accounts, + payload, + dispatch::SignWith::Password(password.into()), + ) + }) + .map(|v| v.into_value()) + .then(|res| match res { + Ok(RpcConfirmationResponse::Signature(signature)) => Ok(signature), + Err(e) => Err(e), + e => Err(errors::internal("Unexpected result", e)), + }), + ) + } - fn sign_191(&self, version: EIP191Version, data: Value, account: H160, password: String) -> BoxFuture { - self.deprecation_notice.print("personal_sign191", deprecated::msgs::ACCOUNTS); - try_bf!(errors::require_experimental(self.allow_experimental_rpcs, "191")); + fn sign_191( + &self, + version: EIP191Version, + data: Value, + account: H160, + password: String, + ) -> BoxFuture { + self.deprecation_notice + .print("personal_sign191", deprecated::msgs::ACCOUNTS); + try_bf!(errors::require_experimental( + self.allow_experimental_rpcs, + "191" + )); - let data = try_bf!(eip191::hash_message(version, data)); - let dispatcher = self.dispatcher.clone(); - let accounts = Arc::new(dispatch::Signer::new(self.accounts.clone())) as _; + let data = try_bf!(eip191::hash_message(version, data)); + let dispatcher = self.dispatcher.clone(); + let accounts = Arc::new(dispatch::Signer::new(self.accounts.clone())) as _; - let payload = RpcConfirmationPayload::EIP191SignMessage((account.clone(), data.into()).into()); + let payload = + RpcConfirmationPayload::EIP191SignMessage((account.clone(), data.into()).into()); - Box::new(dispatch::from_rpc(payload, account.into(), &dispatcher) - .and_then(move |payload| { - dispatch::execute(dispatcher, &accounts, payload, dispatch::SignWith::Password(password.into())) - }) - .map(|v| v.into_value()) - .then(|res| match res { - Ok(RpcConfirmationResponse::Signature(signature)) => Ok(signature), - Err(e) => Err(e), - e => Err(errors::internal("Unexpected result", e)), - }) - ) - } + Box::new( + dispatch::from_rpc(payload, account.into(), &dispatcher) + .and_then(move |payload| { + dispatch::execute( + dispatcher, + &accounts, + payload, + dispatch::SignWith::Password(password.into()), + ) + }) + .map(|v| v.into_value()) + .then(|res| match res { + Ok(RpcConfirmationResponse::Signature(signature)) => Ok(signature), + Err(e) => Err(e), + e => Err(errors::internal("Unexpected result", e)), + }), + ) + } - fn sign_typed_data(&self, typed_data: EIP712, account: H160, password: String) -> BoxFuture { - self.deprecation_notice.print("personal_signTypedData", deprecated::msgs::ACCOUNTS); - try_bf!(errors::require_experimental(self.allow_experimental_rpcs, "712")); + fn sign_typed_data( + &self, + typed_data: EIP712, + account: H160, + password: String, + ) -> BoxFuture { + self.deprecation_notice + .print("personal_signTypedData", deprecated::msgs::ACCOUNTS); + try_bf!(errors::require_experimental( + self.allow_experimental_rpcs, + "712" + )); - let data = match hash_structured_data(typed_data) { - Ok(d) => d, - Err(err) => return Box::new(future::err(errors::invalid_call_data(err.kind()))), - }; - let dispatcher = self.dispatcher.clone(); - let accounts = Arc::new(dispatch::Signer::new(self.accounts.clone())) as _; + let data = match hash_structured_data(typed_data) { + Ok(d) => d, + Err(err) => return Box::new(future::err(errors::invalid_call_data(err.kind()))), + }; + let dispatcher = self.dispatcher.clone(); + let accounts = Arc::new(dispatch::Signer::new(self.accounts.clone())) as _; - let payload = RpcConfirmationPayload::EIP191SignMessage((account.clone(), data.into()).into()); + let payload = + RpcConfirmationPayload::EIP191SignMessage((account.clone(), data.into()).into()); - Box::new(dispatch::from_rpc(payload, account.into(), &dispatcher) - .and_then(move |payload| { - dispatch::execute(dispatcher, &accounts, payload, dispatch::SignWith::Password(password.into())) - }) - .map(|v| v.into_value()) - .then(|res| match res { - Ok(RpcConfirmationResponse::Signature(signature)) => Ok(signature), - Err(e) => Err(e), - e => Err(errors::internal("Unexpected result", e)), - }) - ) - } + Box::new( + dispatch::from_rpc(payload, account.into(), &dispatcher) + .and_then(move |payload| { + dispatch::execute( + dispatcher, + &accounts, + payload, + dispatch::SignWith::Password(password.into()), + ) + }) + .map(|v| v.into_value()) + .then(|res| match res { + Ok(RpcConfirmationResponse::Signature(signature)) => Ok(signature), + Err(e) => Err(e), + e => Err(errors::internal("Unexpected result", e)), + }), + ) + } - fn ec_recover(&self, data: RpcBytes, signature: H520) -> BoxFuture { - let signature: H520 = signature.into(); - let signature = Signature::from_electrum(&signature); - let data: Bytes = data.into(); + fn ec_recover(&self, data: RpcBytes, signature: H520) -> BoxFuture { + let signature: H520 = signature.into(); + let signature = Signature::from_electrum(&signature); + let data: Bytes = data.into(); - let hash = eth_data_hash(data); - let account = recover(&signature.into(), &hash) - .map_err(errors::encryption) - .map(|public| { - public_to_address(&public).into() - }); + let hash = eth_data_hash(data); + let account = recover(&signature.into(), &hash) + .map_err(errors::encryption) + .map(|public| public_to_address(&public).into()); - Box::new(future::done(account)) - } + Box::new(future::done(account)) + } - fn sign_transaction(&self, meta: Metadata, request: TransactionRequest, password: String) -> BoxFuture { - self.deprecation_notice.print("personal_signTransaction", deprecated::msgs::ACCOUNTS); + fn sign_transaction( + &self, + meta: Metadata, + request: TransactionRequest, + password: String, + ) -> BoxFuture { + self.deprecation_notice + .print("personal_signTransaction", deprecated::msgs::ACCOUNTS); - let condition = request.condition.clone().map(Into::into); - let dispatcher = self.dispatcher.clone(); - Box::new(self.do_sign_transaction(meta, request, password, ()) - .map(move |tx| PendingTransaction::new(tx.into_value(), condition)) - .map(move |pending_tx| dispatcher.enrich(pending_tx.transaction))) - } + let condition = request.condition.clone().map(Into::into); + let dispatcher = self.dispatcher.clone(); + Box::new( + self.do_sign_transaction(meta, request, password, ()) + .map(move |tx| PendingTransaction::new(tx.into_value(), condition)) + .map(move |pending_tx| dispatcher.enrich(pending_tx.transaction)), + ) + } - fn send_transaction(&self, meta: Metadata, request: TransactionRequest, password: String) -> BoxFuture { - self.deprecation_notice.print("personal_sendTransaction", deprecated::msgs::ACCOUNTS); - let condition = request.condition.clone().map(Into::into); - let dispatcher = self.dispatcher.clone(); - Box::new( - self.do_sign_transaction(meta, request, password, move |signed: WithToken| { - dispatcher.dispatch_transaction( - PendingTransaction::new( - signed.into_value(), - condition - ) - ) - }) - ) - } + fn send_transaction( + &self, + meta: Metadata, + request: TransactionRequest, + password: String, + ) -> BoxFuture { + self.deprecation_notice + .print("personal_sendTransaction", deprecated::msgs::ACCOUNTS); + let condition = request.condition.clone().map(Into::into); + let dispatcher = self.dispatcher.clone(); + Box::new(self.do_sign_transaction( + meta, + request, + password, + move |signed: WithToken| { + dispatcher + .dispatch_transaction(PendingTransaction::new(signed.into_value(), condition)) + }, + )) + } - fn sign_and_send_transaction(&self, meta: Metadata, request: TransactionRequest, password: String) -> BoxFuture { - self.deprecation_notice.print("personal_signAndSendTransaction", Some("use personal_sendTransaction instead.")); - warn!("Using deprecated personal_signAndSendTransaction, use personal_sendTransaction instead."); - self.send_transaction(meta, request, password) - } + fn sign_and_send_transaction( + &self, + meta: Metadata, + request: TransactionRequest, + password: String, + ) -> BoxFuture { + self.deprecation_notice.print( + "personal_signAndSendTransaction", + Some("use personal_sendTransaction instead."), + ); + warn!("Using deprecated personal_signAndSendTransaction, use personal_sendTransaction instead."); + self.send_transaction(meta, request, password) + } } diff --git a/rpc/src/v1/impls/private.rs b/rpc/src/v1/impls/private.rs index c3be3f915..f5c6280fb 100644 --- a/rpc/src/v1/impls/private.rs +++ b/rpc/src/v1/impls/private.rs @@ -24,99 +24,119 @@ use ethcore_private_tx::Provider as PrivateTransactionManager; use ethereum_types::{Address, H160, H256, U256}; use types::transaction::SignedTransaction; -use jsonrpc_core::{Error}; -use v1::types::{Bytes, PrivateTransactionReceipt, TransactionRequest, - BlockNumber, PrivateTransactionReceiptAndTransaction, CallRequest, block_number_to_id}; -use v1::traits::Private; -use v1::metadata::Metadata; -use v1::helpers::{errors, fake_sign}; +use jsonrpc_core::Error; +use v1::{ + helpers::{errors, fake_sign}, + metadata::Metadata, + traits::Private, + types::{ + block_number_to_id, BlockNumber, Bytes, CallRequest, PrivateTransactionReceipt, + PrivateTransactionReceiptAndTransaction, TransactionRequest, + }, +}; /// Private transaction manager API endpoint implementation. pub struct PrivateClient { - private: Option>, + private: Option>, } impl PrivateClient { - /// Creates a new instance. - pub fn new(private: Option>) -> Self { - PrivateClient { - private, - } - } + /// Creates a new instance. + pub fn new(private: Option>) -> Self { + PrivateClient { private } + } - fn unwrap_manager(&self) -> Result<&PrivateTransactionManager, Error> { - match self.private { - Some(ref arc) => Ok(&**arc), - None => Err(errors::light_unimplemented(None)), - } - } + fn unwrap_manager(&self) -> Result<&PrivateTransactionManager, Error> { + match self.private { + Some(ref arc) => Ok(&**arc), + None => Err(errors::light_unimplemented(None)), + } + } } impl Private for PrivateClient { - type Metadata = Metadata; + type Metadata = Metadata; - fn send_transaction(&self, request: Bytes) -> Result { - let signed_transaction = Rlp::new(&request.into_vec()).as_val() - .map_err(errors::rlp) - .and_then(|tx| SignedTransaction::new(tx).map_err(errors::transaction))?; - let client = self.unwrap_manager()?; - let receipt = client.create_private_transaction(signed_transaction).map_err(errors::private_message)?; - Ok(receipt.into()) - } + fn send_transaction(&self, request: Bytes) -> Result { + let signed_transaction = Rlp::new(&request.into_vec()) + .as_val() + .map_err(errors::rlp) + .and_then(|tx| SignedTransaction::new(tx).map_err(errors::transaction))?; + let client = self.unwrap_manager()?; + let receipt = client + .create_private_transaction(signed_transaction) + .map_err(errors::private_message)?; + Ok(receipt.into()) + } - fn compose_deployment_transaction(&self, block_number: BlockNumber, request: Bytes, validators: Vec, gas_price: U256) -> Result { - let signed_transaction = Rlp::new(&request.into_vec()).as_val() - .map_err(errors::rlp) - .and_then(|tx| SignedTransaction::new(tx).map_err(errors::transaction))?; - let client = self.unwrap_manager()?; + fn compose_deployment_transaction( + &self, + block_number: BlockNumber, + request: Bytes, + validators: Vec, + gas_price: U256, + ) -> Result { + let signed_transaction = Rlp::new(&request.into_vec()) + .as_val() + .map_err(errors::rlp) + .and_then(|tx| SignedTransaction::new(tx).map_err(errors::transaction))?; + let client = self.unwrap_manager()?; - let addresses: Vec

= validators.into_iter().map(Into::into).collect(); - let id = match block_number { - BlockNumber::Pending => return Err(errors::private_message_block_id_not_supported()), - num => block_number_to_id(num) - }; + let addresses: Vec
= validators.into_iter().map(Into::into).collect(); + let id = match block_number { + BlockNumber::Pending => return Err(errors::private_message_block_id_not_supported()), + num => block_number_to_id(num), + }; - let (transaction, contract_address) = client - .public_creation_transaction(id, &signed_transaction, addresses.as_slice(), gas_price) - .map_err(errors::private_message)?; - let tx_hash = transaction.hash(None); - let request = TransactionRequest { - from: Some(signed_transaction.sender()), - to: None, - nonce: Some(transaction.nonce), - gas_price: Some(transaction.gas_price), - gas: Some(transaction.gas), - value: Some(transaction.value), - data: Some(transaction.data.into()), - condition: None, - }; + let (transaction, contract_address) = client + .public_creation_transaction(id, &signed_transaction, addresses.as_slice(), gas_price) + .map_err(errors::private_message)?; + let tx_hash = transaction.hash(None); + let request = TransactionRequest { + from: Some(signed_transaction.sender()), + to: None, + nonce: Some(transaction.nonce), + gas_price: Some(transaction.gas_price), + gas: Some(transaction.gas), + value: Some(transaction.value), + data: Some(transaction.data.into()), + condition: None, + }; - Ok(PrivateTransactionReceiptAndTransaction { - transaction: request, - receipt: PrivateTransactionReceipt { - transaction_hash: tx_hash, - contract_address, - status_code: 0, - } - }) - } + Ok(PrivateTransactionReceiptAndTransaction { + transaction: request, + receipt: PrivateTransactionReceipt { + transaction_hash: tx_hash, + contract_address, + status_code: 0, + }, + }) + } - fn private_call(&self, block_number: BlockNumber, request: CallRequest) -> Result { - let id = match block_number { - BlockNumber::Pending => return Err(errors::private_message_block_id_not_supported()), - num => block_number_to_id(num) - }; + fn private_call( + &self, + block_number: BlockNumber, + request: CallRequest, + ) -> Result { + let id = match block_number { + BlockNumber::Pending => return Err(errors::private_message_block_id_not_supported()), + num => block_number_to_id(num), + }; - let request = CallRequest::into(request); - let signed = fake_sign::sign_call(request)?; - let client = self.unwrap_manager()?; - let executed_result = client.private_call(id, &signed).map_err(errors::private_message)?; - Ok(executed_result.output.into()) - } + let request = CallRequest::into(request); + let signed = fake_sign::sign_call(request)?; + let client = self.unwrap_manager()?; + let executed_result = client + .private_call(id, &signed) + .map_err(errors::private_message)?; + Ok(executed_result.output.into()) + } - fn private_contract_key(&self, contract_address: H160) -> Result { - let client = self.unwrap_manager()?; - let key = client.contract_key_id(&contract_address).map_err(errors::private_message)?; - Ok(key) - } + fn private_contract_key(&self, contract_address: H160) -> Result { + let client = self.unwrap_manager()?; + let key = client + .contract_key_id(&contract_address) + .map_err(errors::private_message)?; + Ok(key) + } } diff --git a/rpc/src/v1/impls/pubsub.rs b/rpc/src/v1/impls/pubsub.rs index 1575aacdd..575791331 100644 --- a/rpc/src/v1/impls/pubsub.rs +++ b/rpc/src/v1/impls/pubsub.rs @@ -16,91 +16,104 @@ //! Parity-specific PUB-SUB rpc implementation. -use std::sync::Arc; -use std::time::Duration; use parking_lot::RwLock; +use std::{sync::Arc, time::Duration}; -use jsonrpc_core::{self as core, Result, MetaIoHandler}; -use jsonrpc_core::futures::{future, Future, Stream, Sink}; -use jsonrpc_pubsub::typed::Subscriber; -use jsonrpc_pubsub::SubscriptionId; +use jsonrpc_core::{ + self as core, + futures::{future, Future, Sink, Stream}, + MetaIoHandler, Result, +}; +use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; use tokio_timer; use parity_runtime::Executor; -use v1::helpers::GenericPollManager; -use v1::metadata::Metadata; -use v1::traits::PubSub; +use v1::{helpers::GenericPollManager, metadata::Metadata, traits::PubSub}; /// Parity PubSub implementation. pub struct PubSubClient> { - poll_manager: Arc>>, - executor: Executor, + poll_manager: Arc>>, + executor: Executor, } impl> PubSubClient { - /// Creates new `PubSubClient`. - pub fn new(rpc: MetaIoHandler, executor: Executor) -> Self { - let poll_manager = Arc::new(RwLock::new(GenericPollManager::new(rpc))); - let pm2 = Arc::downgrade(&poll_manager); + /// Creates new `PubSubClient`. + pub fn new(rpc: MetaIoHandler, executor: Executor) -> Self { + let poll_manager = Arc::new(RwLock::new(GenericPollManager::new(rpc))); + let pm2 = Arc::downgrade(&poll_manager); - let timer = tokio_timer::wheel() - .tick_duration(Duration::from_millis(500)) - .build(); + let timer = tokio_timer::wheel() + .tick_duration(Duration::from_millis(500)) + .build(); - // Start ticking - let interval = timer.interval(Duration::from_millis(1000)); - executor.spawn(interval - .map_err(|e| warn!("Polling timer error: {:?}", e)) - .for_each(move |_| { - if let Some(pm2) = pm2.upgrade() { - pm2.read().tick() - } else { - Box::new(future::err(())) - } - }) - ); + // Start ticking + let interval = timer.interval(Duration::from_millis(1000)); + executor.spawn( + interval + .map_err(|e| warn!("Polling timer error: {:?}", e)) + .for_each(move |_| { + if let Some(pm2) = pm2.upgrade() { + pm2.read().tick() + } else { + Box::new(future::err(())) + } + }), + ); - PubSubClient { - poll_manager, - executor, - } - } + PubSubClient { + poll_manager, + executor, + } + } } impl PubSubClient { - /// Creates new `PubSubClient` with deterministic ids. - #[cfg(test)] - pub fn new_test(rpc: MetaIoHandler, executor: Executor) -> Self { - let client = Self::new(MetaIoHandler::with_middleware(Default::default()), executor); - *client.poll_manager.write() = GenericPollManager::new_test(rpc); - client - } + /// Creates new `PubSubClient` with deterministic ids. + #[cfg(test)] + pub fn new_test( + rpc: MetaIoHandler, + executor: Executor, + ) -> Self { + let client = Self::new(MetaIoHandler::with_middleware(Default::default()), executor); + *client.poll_manager.write() = GenericPollManager::new_test(rpc); + client + } } impl> PubSub for PubSubClient { - type Metadata = Metadata; + type Metadata = Metadata; - fn parity_subscribe(&self, mut meta: Metadata, subscriber: Subscriber, method: String, params: Option) { - let params = params.unwrap_or_else(|| core::Params::Array(vec![])); - // Make sure to get rid of PubSub session otherwise it will never be dropped. - meta.session = None; + fn parity_subscribe( + &self, + mut meta: Metadata, + subscriber: Subscriber, + method: String, + params: Option, + ) { + let params = params.unwrap_or_else(|| core::Params::Array(vec![])); + // Make sure to get rid of PubSub session otherwise it will never be dropped. + meta.session = None; - let mut poll_manager = self.poll_manager.write(); - let (id, receiver) = poll_manager.subscribe(meta, method, params); - match subscriber.assign_id(id.clone()) { - Ok(sink) => { - self.executor.spawn(receiver.forward(sink.sink_map_err(|e| { - warn!("Cannot send notification: {:?}", e); - })).map(|_| ())); - }, - Err(_) => { - poll_manager.unsubscribe(&id); - }, - } - } + let mut poll_manager = self.poll_manager.write(); + let (id, receiver) = poll_manager.subscribe(meta, method, params); + match subscriber.assign_id(id.clone()) { + Ok(sink) => { + self.executor.spawn( + receiver + .forward(sink.sink_map_err(|e| { + warn!("Cannot send notification: {:?}", e); + })) + .map(|_| ()), + ); + } + Err(_) => { + poll_manager.unsubscribe(&id); + } + } + } - fn parity_unsubscribe(&self, _: Option, id: SubscriptionId) -> Result { - let res = self.poll_manager.write().unsubscribe(&id); - Ok(res) - } + fn parity_unsubscribe(&self, _: Option, id: SubscriptionId) -> Result { + let res = self.poll_manager.write().unsubscribe(&id); + Ok(res) + } } diff --git a/rpc/src/v1/impls/rpc.rs b/rpc/src/v1/impls/rpc.rs index 0c2afd57c..0a9764dc0 100644 --- a/rpc/src/v1/impls/rpc.rs +++ b/rpc/src/v1/impls/rpc.rs @@ -15,50 +15,52 @@ // along with Parity Ethereum. If not, see . //! RPC generic methods implementation. -use std::collections::BTreeMap; use jsonrpc_core::Result; +use std::collections::BTreeMap; use v1::traits::Rpc; /// RPC generic methods implementation. pub struct RpcClient { - modules: BTreeMap, - valid_apis: Vec, + modules: BTreeMap, + valid_apis: Vec, } impl RpcClient { - /// Creates new `RpcClient`. - pub fn new(modules: BTreeMap) -> Self { - // geth 1.3.6 fails upon receiving unknown api - let valid_apis = vec!["web3", "eth", "net", "personal", "rpc"]; + /// Creates new `RpcClient`. + pub fn new(modules: BTreeMap) -> Self { + // geth 1.3.6 fails upon receiving unknown api + let valid_apis = vec!["web3", "eth", "net", "personal", "rpc"]; - RpcClient { - modules, - valid_apis: valid_apis.into_iter().map(ToOwned::to_owned).collect(), - } - } + RpcClient { + modules, + valid_apis: valid_apis.into_iter().map(ToOwned::to_owned).collect(), + } + } } impl Rpc for RpcClient { - fn rpc_modules(&self) -> Result> { - let modules = self.modules.iter() - .fold(BTreeMap::new(), |mut map, (k, v)| { - map.insert(k.to_owned(), v.to_owned()); - map - }); + fn rpc_modules(&self) -> Result> { + let modules = self + .modules + .iter() + .fold(BTreeMap::new(), |mut map, (k, v)| { + map.insert(k.to_owned(), v.to_owned()); + map + }); - Ok(modules) - } + Ok(modules) + } - fn modules(&self) -> Result> { - let modules = self.modules.iter() - .filter(|&(k, _v)| { - self.valid_apis.contains(k) - }) - .fold(BTreeMap::new(), |mut map, (k, v)| { - map.insert(k.to_owned(), v.to_owned()); - map - }); + fn modules(&self) -> Result> { + let modules = self + .modules + .iter() + .filter(|&(k, _v)| self.valid_apis.contains(k)) + .fold(BTreeMap::new(), |mut map, (k, v)| { + map.insert(k.to_owned(), v.to_owned()); + map + }); - Ok(modules) - } + Ok(modules) + } } diff --git a/rpc/src/v1/impls/secretstore.rs b/rpc/src/v1/impls/secretstore.rs index b6526b85d..188caaeb8 100644 --- a/rpc/src/v1/impls/secretstore.rs +++ b/rpc/src/v1/impls/secretstore.rs @@ -16,83 +16,108 @@ //! SecretStore-specific rpc implementation. -use std::collections::BTreeSet; -use std::sync::Arc; +use std::{collections::BTreeSet, sync::Arc}; use accounts::AccountProvider; use crypto::DEFAULT_MAC; use ethereum_types::{H160, H256, H512}; use ethkey::Secret; -use jsonrpc_core::Result; -use v1::helpers::errors; -use v1::helpers::secretstore::{generate_document_key, encrypt_document, - decrypt_document, decrypt_document_with_shadow, ordered_servers_keccak}; -use v1::traits::SecretStore; -use v1::types::{Bytes, EncryptedDocumentKey}; use ethkey::Password; +use jsonrpc_core::Result; +use v1::{ + helpers::{ + errors, + secretstore::{ + decrypt_document, decrypt_document_with_shadow, encrypt_document, + generate_document_key, ordered_servers_keccak, + }, + }, + traits::SecretStore, + types::{Bytes, EncryptedDocumentKey}, +}; /// Parity implementation. pub struct SecretStoreClient { - accounts: Arc, + accounts: Arc, } impl SecretStoreClient { - /// Creates new SecretStoreClient - pub fn new(store: &Arc) -> Self { - SecretStoreClient { - accounts: store.clone(), - } - } + /// Creates new SecretStoreClient + pub fn new(store: &Arc) -> Self { + SecretStoreClient { + accounts: store.clone(), + } + } - /// Decrypt public key using account' private key - fn decrypt_key(&self, address: H160, password: Password, key: Bytes) -> Result> { - self.accounts.decrypt(address.into(), Some(password), &DEFAULT_MAC, &key.0) - .map_err(|e| errors::account("Could not decrypt key.", e)) - } + /// Decrypt public key using account' private key + fn decrypt_key(&self, address: H160, password: Password, key: Bytes) -> Result> { + self.accounts + .decrypt(address.into(), Some(password), &DEFAULT_MAC, &key.0) + .map_err(|e| errors::account("Could not decrypt key.", e)) + } - /// Decrypt secret key using account' private key - fn decrypt_secret(&self, address: H160, password: Password, key: Bytes) -> Result { - self.decrypt_key(address, password, key) - .and_then(|s| Secret::from_unsafe_slice(&s).map_err(|e| errors::account("invalid secret", e))) - } + /// Decrypt secret key using account' private key + fn decrypt_secret(&self, address: H160, password: Password, key: Bytes) -> Result { + self.decrypt_key(address, password, key).and_then(|s| { + Secret::from_unsafe_slice(&s).map_err(|e| errors::account("invalid secret", e)) + }) + } } impl SecretStore for SecretStoreClient { - fn generate_document_key(&self, address: H160, password: Password, server_key_public: H512) -> Result { - let account_public = self.accounts.account_public(address.into(), &password) - .map_err(|e| errors::account("Could not read account public.", e))?; - generate_document_key(account_public, server_key_public.into()) - } + fn generate_document_key( + &self, + address: H160, + password: Password, + server_key_public: H512, + ) -> Result { + let account_public = self + .accounts + .account_public(address.into(), &password) + .map_err(|e| errors::account("Could not read account public.", e))?; + generate_document_key(account_public, server_key_public.into()) + } - fn encrypt(&self, address: H160, password: Password, key: Bytes, data: Bytes) -> Result { - encrypt_document(self.decrypt_key(address, password, key)?, data.0) - .map(Into::into) - } + fn encrypt(&self, address: H160, password: Password, key: Bytes, data: Bytes) -> Result { + encrypt_document(self.decrypt_key(address, password, key)?, data.0).map(Into::into) + } - fn decrypt(&self, address: H160, password: Password, key: Bytes, data: Bytes) -> Result { - decrypt_document(self.decrypt_key(address, password, key)?, data.0) - .map(Into::into) - } + fn decrypt(&self, address: H160, password: Password, key: Bytes, data: Bytes) -> Result { + decrypt_document(self.decrypt_key(address, password, key)?, data.0).map(Into::into) + } - fn shadow_decrypt(&self, address: H160, password: Password, decrypted_secret: H512, common_point: H512, decrypt_shadows: Vec, data: Bytes) -> Result { - let mut shadows = Vec::with_capacity(decrypt_shadows.len()); - for decrypt_shadow in decrypt_shadows { - shadows.push(self.decrypt_secret(address.clone(), password.clone(), decrypt_shadow)?); - } + fn shadow_decrypt( + &self, + address: H160, + password: Password, + decrypted_secret: H512, + common_point: H512, + decrypt_shadows: Vec, + data: Bytes, + ) -> Result { + let mut shadows = Vec::with_capacity(decrypt_shadows.len()); + for decrypt_shadow in decrypt_shadows { + shadows.push(self.decrypt_secret(address.clone(), password.clone(), decrypt_shadow)?); + } - decrypt_document_with_shadow(decrypted_secret.into(), common_point.into(), shadows, data.0) - .map(Into::into) - } + decrypt_document_with_shadow( + decrypted_secret.into(), + common_point.into(), + shadows, + data.0, + ) + .map(Into::into) + } - fn servers_set_hash(&self, servers_set: BTreeSet) -> Result { - Ok(ordered_servers_keccak(servers_set)) - } + fn servers_set_hash(&self, servers_set: BTreeSet) -> Result { + Ok(ordered_servers_keccak(servers_set)) + } - fn sign_raw_hash(&self, address: H160, password: Password, raw_hash: H256) -> Result { - self.accounts - .sign(address.into(), Some(password), raw_hash.into()) - .map(|s| Bytes::new((*s).to_vec())) - .map_err(|e| errors::account("Could not sign raw hash.", e)) - } + fn sign_raw_hash(&self, address: H160, password: Password, raw_hash: H256) -> Result { + self.accounts + .sign(address.into(), Some(password), raw_hash.into()) + .map(|s| Bytes::new((*s).to_vec())) + .map_err(|e| errors::account("Could not sign raw hash.", e)) + } } diff --git a/rpc/src/v1/impls/signer.rs b/rpc/src/v1/impls/signer.rs index 4edac1144..3d14853e2 100644 --- a/rpc/src/v1/impls/signer.rs +++ b/rpc/src/v1/impls/signer.rs @@ -23,250 +23,346 @@ use ethkey; use parity_runtime::Executor; use parking_lot::Mutex; use rlp::Rlp; -use types::transaction::{SignedTransaction, PendingTransaction}; +use types::transaction::{PendingTransaction, SignedTransaction}; -use jsonrpc_core::{Result, BoxFuture, Error}; -use jsonrpc_core::futures::{future, Future, IntoFuture}; -use jsonrpc_core::futures::future::Either; -use jsonrpc_pubsub::{SubscriptionId, typed::{Sink, Subscriber}}; -use v1::helpers::deprecated::{self, DeprecationNotice}; -use v1::helpers::dispatch::{self, Dispatcher, WithToken, eth_data_hash}; -use v1::helpers::{errors, ConfirmationPayload, FilledTransactionRequest, Subscribers}; -use v1::helpers::external_signer::{SigningQueue, SignerService}; -use v1::metadata::Metadata; -use v1::traits::Signer; -use v1::types::{TransactionModification, ConfirmationRequest, ConfirmationResponse, ConfirmationResponseWithToken, Bytes}; +use jsonrpc_core::{ + futures::{future, future::Either, Future, IntoFuture}, + BoxFuture, Error, Result, +}; +use jsonrpc_pubsub::{ + typed::{Sink, Subscriber}, + SubscriptionId, +}; +use v1::{ + helpers::{ + deprecated::{self, DeprecationNotice}, + dispatch::{self, eth_data_hash, Dispatcher, WithToken}, + errors, + external_signer::{SignerService, SigningQueue}, + ConfirmationPayload, FilledTransactionRequest, Subscribers, + }, + metadata::Metadata, + traits::Signer, + types::{ + Bytes, ConfirmationRequest, ConfirmationResponse, ConfirmationResponseWithToken, + TransactionModification, + }, +}; /// Transactions confirmation (personal) rpc implementation. pub struct SignerClient { - signer: Arc, - accounts: Arc, - dispatcher: D, - subscribers: Arc>>>>, - deprecation_notice: DeprecationNotice, + signer: Arc, + accounts: Arc, + dispatcher: D, + subscribers: Arc>>>>, + deprecation_notice: DeprecationNotice, } impl SignerClient { - /// Create new instance of signer client. - pub fn new( - accounts: Arc, - dispatcher: D, - signer: &Arc, - executor: Executor, - ) -> Self { - let subscribers = Arc::new(Mutex::new(Subscribers::default())); - let subs = Arc::downgrade(&subscribers); - let s = Arc::downgrade(signer); - signer.queue().on_event(move |_event| { - if let (Some(s), Some(subs)) = (s.upgrade(), subs.upgrade()) { - let requests = s.requests().into_iter().map(Into::into).collect::>(); - for subscription in subs.lock().values() { - let subscription: &Sink<_> = subscription; - executor.spawn(subscription - .notify(Ok(requests.clone())) - .map(|_| ()) - .map_err(|e| warn!(target: "rpc", "Unable to send notification: {}", e)) - ); - } - } - }); + /// Create new instance of signer client. + pub fn new( + accounts: Arc, + dispatcher: D, + signer: &Arc, + executor: Executor, + ) -> Self { + let subscribers = Arc::new(Mutex::new(Subscribers::default())); + let subs = Arc::downgrade(&subscribers); + let s = Arc::downgrade(signer); + signer.queue().on_event(move |_event| { + if let (Some(s), Some(subs)) = (s.upgrade(), subs.upgrade()) { + let requests = s + .requests() + .into_iter() + .map(Into::into) + .collect::>(); + for subscription in subs.lock().values() { + let subscription: &Sink<_> = subscription; + executor.spawn( + subscription + .notify(Ok(requests.clone())) + .map(|_| ()) + .map_err( + |e| warn!(target: "rpc", "Unable to send notification: {}", e), + ), + ); + } + } + }); - SignerClient { - signer: signer.clone(), - accounts: accounts.clone(), - dispatcher, - subscribers, - deprecation_notice: Default::default(), - } - } + SignerClient { + signer: signer.clone(), + accounts: accounts.clone(), + dispatcher, + subscribers, + deprecation_notice: Default::default(), + } + } - fn confirm_internal(&self, id: U256, modification: TransactionModification, f: F) -> BoxFuture> where - F: FnOnce(D, &Arc, ConfirmationPayload) -> T, - T: IntoFuture, Error=Error>, - T::Future: Send + 'static - { - let dispatcher = self.dispatcher.clone(); - let signer = self.signer.clone(); + fn confirm_internal( + &self, + id: U256, + modification: TransactionModification, + f: F, + ) -> BoxFuture> + where + F: FnOnce(D, &Arc, ConfirmationPayload) -> T, + T: IntoFuture, Error = Error>, + T::Future: Send + 'static, + { + let dispatcher = self.dispatcher.clone(); + let signer = self.signer.clone(); - Box::new(signer.take(&id).map(|sender| { - let mut payload = sender.request.payload.clone(); - // Modify payload - if let ConfirmationPayload::SendTransaction(ref mut request) = payload { - if let Some(sender) = modification.sender { - request.from = sender; - // Altering sender should always reset the nonce. - request.nonce = None; - } - if let Some(gas_price) = modification.gas_price { - request.gas_price = gas_price; - } - if let Some(gas) = modification.gas { - request.gas = gas; - } - if let Some(ref condition) = modification.condition { - request.condition = condition.clone().map(Into::into); - } - } - let fut = f(dispatcher, &self.accounts, payload); - Either::A(fut.into_future().then(move |result| { - // Execute - if let Ok(ref response) = result { - signer.request_confirmed(sender, Ok((*response).clone())); - } else { - signer.request_untouched(sender); - } + Box::new( + signer + .take(&id) + .map(|sender| { + let mut payload = sender.request.payload.clone(); + // Modify payload + if let ConfirmationPayload::SendTransaction(ref mut request) = payload { + if let Some(sender) = modification.sender { + request.from = sender; + // Altering sender should always reset the nonce. + request.nonce = None; + } + if let Some(gas_price) = modification.gas_price { + request.gas_price = gas_price; + } + if let Some(gas) = modification.gas { + request.gas = gas; + } + if let Some(ref condition) = modification.condition { + request.condition = condition.clone().map(Into::into); + } + } + let fut = f(dispatcher, &self.accounts, payload); + Either::A(fut.into_future().then(move |result| { + // Execute + if let Ok(ref response) = result { + signer.request_confirmed(sender, Ok((*response).clone())); + } else { + signer.request_untouched(sender); + } - result - })) - }) - .unwrap_or_else(|| Either::B(future::err(errors::invalid_params("Unknown RequestID", id))))) - } + result + })) + }) + .unwrap_or_else(|| { + Either::B(future::err(errors::invalid_params("Unknown RequestID", id))) + }), + ) + } - fn verify_transaction(bytes: Bytes, request: FilledTransactionRequest, process: F) -> Result where - F: FnOnce(PendingTransaction) -> Result, - { - let signed_transaction = Rlp::new(&bytes.0).as_val().map_err(errors::rlp)?; - let signed_transaction = SignedTransaction::new(signed_transaction).map_err(|e| errors::invalid_params("Invalid signature.", e))?; - let sender = signed_transaction.sender(); + fn verify_transaction( + bytes: Bytes, + request: FilledTransactionRequest, + process: F, + ) -> Result + where + F: FnOnce(PendingTransaction) -> Result, + { + let signed_transaction = Rlp::new(&bytes.0).as_val().map_err(errors::rlp)?; + let signed_transaction = SignedTransaction::new(signed_transaction) + .map_err(|e| errors::invalid_params("Invalid signature.", e))?; + let sender = signed_transaction.sender(); - // Verification - let sender_matches = sender == request.from; - let data_matches = signed_transaction.data == request.data; - let value_matches = signed_transaction.value == request.value; - let nonce_matches = match request.nonce { - Some(nonce) => signed_transaction.nonce == nonce, - None => true, - }; + // Verification + let sender_matches = sender == request.from; + let data_matches = signed_transaction.data == request.data; + let value_matches = signed_transaction.value == request.value; + let nonce_matches = match request.nonce { + Some(nonce) => signed_transaction.nonce == nonce, + None => true, + }; - // Dispatch if everything is ok - if sender_matches && data_matches && value_matches && nonce_matches { - let pending_transaction = PendingTransaction::new(signed_transaction, request.condition.map(Into::into)); - process(pending_transaction) - } else { - let mut error = Vec::new(); - if !sender_matches { error.push("from") } - if !data_matches { error.push("data") } - if !value_matches { error.push("value") } - if !nonce_matches { error.push("nonce") } + // Dispatch if everything is ok + if sender_matches && data_matches && value_matches && nonce_matches { + let pending_transaction = + PendingTransaction::new(signed_transaction, request.condition.map(Into::into)); + process(pending_transaction) + } else { + let mut error = Vec::new(); + if !sender_matches { + error.push("from") + } + if !data_matches { + error.push("data") + } + if !value_matches { + error.push("value") + } + if !nonce_matches { + error.push("nonce") + } - Err(errors::invalid_params("Sent transaction does not match the request.", error)) - } - } + Err(errors::invalid_params( + "Sent transaction does not match the request.", + error, + )) + } + } } impl Signer for SignerClient { - type Metadata = Metadata; + type Metadata = Metadata; - fn requests_to_confirm(&self) -> Result> { - self.deprecation_notice.print("signer_requestsToConfirm", deprecated::msgs::ACCOUNTS); + fn requests_to_confirm(&self) -> Result> { + self.deprecation_notice + .print("signer_requestsToConfirm", deprecated::msgs::ACCOUNTS); - Ok(self.signer.requests() - .into_iter() - .map(Into::into) - .collect() - ) - } + Ok(self.signer.requests().into_iter().map(Into::into).collect()) + } - // TODO [ToDr] TransactionModification is redundant for some calls - // might be better to replace it in future - fn confirm_request(&self, id: U256, modification: TransactionModification, pass: String) - -> BoxFuture - { - self.deprecation_notice.print("signer_confirmRequest", deprecated::msgs::ACCOUNTS); + // TODO [ToDr] TransactionModification is redundant for some calls + // might be better to replace it in future + fn confirm_request( + &self, + id: U256, + modification: TransactionModification, + pass: String, + ) -> BoxFuture { + self.deprecation_notice + .print("signer_confirmRequest", deprecated::msgs::ACCOUNTS); - Box::new(self.confirm_internal(id, modification, move |dis, accounts, payload| { - dispatch::execute(dis, accounts, payload, dispatch::SignWith::Password(pass.into())) - }).map(dispatch::WithToken::into_value)) - } + Box::new( + self.confirm_internal(id, modification, move |dis, accounts, payload| { + dispatch::execute( + dis, + accounts, + payload, + dispatch::SignWith::Password(pass.into()), + ) + }) + .map(dispatch::WithToken::into_value), + ) + } - fn confirm_request_with_token(&self, id: U256, modification: TransactionModification, token: String) - -> BoxFuture - { - self.deprecation_notice.print("signer_confirmRequestWithToken", deprecated::msgs::ACCOUNTS); + fn confirm_request_with_token( + &self, + id: U256, + modification: TransactionModification, + token: String, + ) -> BoxFuture { + self.deprecation_notice + .print("signer_confirmRequestWithToken", deprecated::msgs::ACCOUNTS); - Box::new(self.confirm_internal(id, modification, move |dis, accounts, payload| { - dispatch::execute(dis, accounts, payload, dispatch::SignWith::Token(token.into())) - }).and_then(|v| match v { - WithToken::No(_) => Err(errors::internal("Unexpected response without token.", "")), - WithToken::Yes(response, token) => Ok(ConfirmationResponseWithToken { - result: response, - token, - }), - })) - } + Box::new( + self.confirm_internal(id, modification, move |dis, accounts, payload| { + dispatch::execute( + dis, + accounts, + payload, + dispatch::SignWith::Token(token.into()), + ) + }) + .and_then(|v| match v { + WithToken::No(_) => Err(errors::internal("Unexpected response without token.", "")), + WithToken::Yes(response, token) => Ok(ConfirmationResponseWithToken { + result: response, + token, + }), + }), + ) + } - fn confirm_request_raw(&self, id: U256, bytes: Bytes) -> Result { - self.deprecation_notice.print("signer_confirmRequestRaw", deprecated::msgs::ACCOUNTS); + fn confirm_request_raw(&self, id: U256, bytes: Bytes) -> Result { + self.deprecation_notice + .print("signer_confirmRequestRaw", deprecated::msgs::ACCOUNTS); - self.signer.take(&id).map(|sender| { - let payload = sender.request.payload.clone(); - let result = match payload { - ConfirmationPayload::SendTransaction(request) => { - Self::verify_transaction(bytes, request, |pending_transaction| { - self.dispatcher.dispatch_transaction(pending_transaction) - .map(Into::into) - .map(ConfirmationResponse::SendTransaction) - }) - }, - ConfirmationPayload::SignTransaction(request) => { - Self::verify_transaction(bytes, request, |pending_transaction| { - let rich = self.dispatcher.enrich(pending_transaction.transaction); - Ok(ConfirmationResponse::SignTransaction(rich)) - }) - }, - ConfirmationPayload::EthSignMessage(address, data) => { - let expected_hash = eth_data_hash(data); - let signature = ethkey::Signature::from_electrum(&bytes.0); - match ethkey::verify_address(&address, &signature, &expected_hash) { - Ok(true) => Ok(ConfirmationResponse::Signature(bytes.0.as_slice().into())), - Ok(false) => Err(errors::invalid_params("Sender address does not match the signature.", ())), - Err(err) => Err(errors::invalid_params("Invalid signature received.", err)), - } - }, - ConfirmationPayload::SignMessage(address, hash) => { - let signature = ethkey::Signature::from_electrum(&bytes.0); - match ethkey::verify_address(&address, &signature, &hash) { - Ok(true) => Ok(ConfirmationResponse::Signature(bytes.0.as_slice().into())), - Ok(false) => Err(errors::invalid_params("Sender address does not match the signature.", ())), - Err(err) => Err(errors::invalid_params("Invalid signature received.", err)), - } - }, - ConfirmationPayload::Decrypt(_address, _data) => { - // TODO [ToDr]: Decrypt can we verify if the answer is correct? - Ok(ConfirmationResponse::Decrypt(bytes)) - }, - }; - if let Ok(ref response) = result { - self.signer.request_confirmed(sender, Ok(response.clone())); - } else { - self.signer.request_untouched(sender); - } - result - }).unwrap_or_else(|| Err(errors::invalid_params("Unknown RequestID", id))) - } + self.signer + .take(&id) + .map(|sender| { + let payload = sender.request.payload.clone(); + let result = match payload { + ConfirmationPayload::SendTransaction(request) => { + Self::verify_transaction(bytes, request, |pending_transaction| { + self.dispatcher + .dispatch_transaction(pending_transaction) + .map(Into::into) + .map(ConfirmationResponse::SendTransaction) + }) + } + ConfirmationPayload::SignTransaction(request) => { + Self::verify_transaction(bytes, request, |pending_transaction| { + let rich = self.dispatcher.enrich(pending_transaction.transaction); + Ok(ConfirmationResponse::SignTransaction(rich)) + }) + } + ConfirmationPayload::EthSignMessage(address, data) => { + let expected_hash = eth_data_hash(data); + let signature = ethkey::Signature::from_electrum(&bytes.0); + match ethkey::verify_address(&address, &signature, &expected_hash) { + Ok(true) => { + Ok(ConfirmationResponse::Signature(bytes.0.as_slice().into())) + } + Ok(false) => Err(errors::invalid_params( + "Sender address does not match the signature.", + (), + )), + Err(err) => { + Err(errors::invalid_params("Invalid signature received.", err)) + } + } + } + ConfirmationPayload::SignMessage(address, hash) => { + let signature = ethkey::Signature::from_electrum(&bytes.0); + match ethkey::verify_address(&address, &signature, &hash) { + Ok(true) => { + Ok(ConfirmationResponse::Signature(bytes.0.as_slice().into())) + } + Ok(false) => Err(errors::invalid_params( + "Sender address does not match the signature.", + (), + )), + Err(err) => { + Err(errors::invalid_params("Invalid signature received.", err)) + } + } + } + ConfirmationPayload::Decrypt(_address, _data) => { + // TODO [ToDr]: Decrypt can we verify if the answer is correct? + Ok(ConfirmationResponse::Decrypt(bytes)) + } + }; + if let Ok(ref response) = result { + self.signer.request_confirmed(sender, Ok(response.clone())); + } else { + self.signer.request_untouched(sender); + } + result + }) + .unwrap_or_else(|| Err(errors::invalid_params("Unknown RequestID", id))) + } - fn reject_request(&self, id: U256) -> Result { - self.deprecation_notice.print("signer_rejectRequest", deprecated::msgs::ACCOUNTS); + fn reject_request(&self, id: U256) -> Result { + self.deprecation_notice + .print("signer_rejectRequest", deprecated::msgs::ACCOUNTS); - let res = self.signer.take(&id).map(|sender| self.signer.request_rejected(sender)); - Ok(res.is_some()) - } + let res = self + .signer + .take(&id) + .map(|sender| self.signer.request_rejected(sender)); + Ok(res.is_some()) + } - fn generate_token(&self) -> Result { - self.deprecation_notice.print("signer_generateAuthorizationToken", deprecated::msgs::ACCOUNTS); + fn generate_token(&self) -> Result { + self.deprecation_notice.print( + "signer_generateAuthorizationToken", + deprecated::msgs::ACCOUNTS, + ); - self.signer.generate_token() - .map_err(errors::token) - } + self.signer.generate_token().map_err(errors::token) + } - fn subscribe_pending(&self, _meta: Self::Metadata, sub: Subscriber>) { - self.deprecation_notice.print("signer_subscribePending", deprecated::msgs::ACCOUNTS); + fn subscribe_pending(&self, _meta: Self::Metadata, sub: Subscriber>) { + self.deprecation_notice + .print("signer_subscribePending", deprecated::msgs::ACCOUNTS); - self.subscribers.lock().push(sub) - } + self.subscribers.lock().push(sub) + } - fn unsubscribe_pending(&self, _: Option, id: SubscriptionId) -> Result { - let res = self.subscribers.lock().remove(&id).is_some(); - Ok(res) - } + fn unsubscribe_pending(&self, _: Option, id: SubscriptionId) -> Result { + let res = self.subscribers.lock().remove(&id).is_some(); + Ok(res) + } } diff --git a/rpc/src/v1/impls/signing.rs b/rpc/src/v1/impls/signing.rs index 38ca6d59c..28457bd0d 100644 --- a/rpc/src/v1/impls/signing.rs +++ b/rpc/src/v1/impls/signing.rs @@ -16,34 +16,34 @@ //! Signing RPC implementation. +use parking_lot::Mutex; use std::sync::Arc; use transient_hashmap::TransientHashMap; -use parking_lot::Mutex; use ethereum_types::{H160, H256, H520, U256}; -use jsonrpc_core::{BoxFuture, Result, Error}; -use jsonrpc_core::futures::{future, Future, Poll, Async}; -use jsonrpc_core::futures::future::Either; - -use v1::helpers::deprecated::{self, DeprecationNotice}; -use v1::helpers::dispatch::{self, Dispatcher}; -use v1::helpers::errors; -use v1::helpers::external_signer::{ - SignerService, SigningQueue, - ConfirmationReceiver as RpcConfirmationReceiver, - ConfirmationResult as RpcConfirmationResult, +use jsonrpc_core::{ + futures::{future, future::Either, Async, Future, Poll}, + BoxFuture, Error, Result, }; -use v1::metadata::Metadata; -use v1::traits::{EthSigning, ParitySigning}; -use v1::types::{ - Bytes as RpcBytes, - Either as RpcEither, - RichRawTransaction as RpcRichRawTransaction, - TransactionRequest as RpcTransactionRequest, - ConfirmationPayload as RpcConfirmationPayload, - ConfirmationResponse as RpcConfirmationResponse, - Origin, + +use v1::{ + helpers::{ + deprecated::{self, DeprecationNotice}, + dispatch::{self, Dispatcher}, + errors, + external_signer::{ + ConfirmationReceiver as RpcConfirmationReceiver, + ConfirmationResult as RpcConfirmationResult, SignerService, SigningQueue, + }, + }, + metadata::Metadata, + traits::{EthSigning, ParitySigning}, + types::{ + Bytes as RpcBytes, ConfirmationPayload as RpcConfirmationPayload, + ConfirmationResponse as RpcConfirmationResponse, Either as RpcEither, Origin, + RichRawTransaction as RpcRichRawTransaction, TransactionRequest as RpcTransactionRequest, + }, }; use parity_runtime::Executor; @@ -53,200 +53,255 @@ const MAX_PENDING_DURATION_SEC: u32 = 60; #[must_use = "futures do nothing unless polled"] enum DispatchResult { - Future(U256, RpcConfirmationReceiver), - Value(RpcConfirmationResponse), + Future(U256, RpcConfirmationReceiver), + Value(RpcConfirmationResponse), } impl Future for DispatchResult { - type Item = RpcConfirmationResponse; - type Error = Error; + type Item = RpcConfirmationResponse; + type Error = Error; - fn poll(&mut self) -> Poll { - match *self { - DispatchResult::Value(ref response) => Ok(Async::Ready(response.clone())), - DispatchResult::Future(_uid, ref mut future) => try_ready!(future.poll()).map(Async::Ready), - } - } + fn poll(&mut self) -> Poll { + match *self { + DispatchResult::Value(ref response) => Ok(Async::Ready(response.clone())), + DispatchResult::Future(_uid, ref mut future) => { + try_ready!(future.poll()).map(Async::Ready) + } + } + } } -fn schedule(executor: Executor, - confirmations: Arc>>>, - id: U256, - future: RpcConfirmationReceiver) { - { - let mut confirmations = confirmations.lock(); - confirmations.insert(id, None); - } +fn schedule( + executor: Executor, + confirmations: Arc>>>, + id: U256, + future: RpcConfirmationReceiver, +) { + { + let mut confirmations = confirmations.lock(); + confirmations.insert(id, None); + } - let future = future.then(move |result| { - let mut confirmations = confirmations.lock(); - confirmations.prune(); - let result = result.and_then(|response| response); - confirmations.insert(id, Some(result)); - Ok(()) - }); - executor.spawn(future); + let future = future.then(move |result| { + let mut confirmations = confirmations.lock(); + confirmations.prune(); + let result = result.and_then(|response| response); + confirmations.insert(id, Some(result)); + Ok(()) + }); + executor.spawn(future); } /// Implementation of functions that require signing when no trusted signer is used. pub struct SigningQueueClient { - signer: Arc, - accounts: Arc, - dispatcher: D, - executor: Executor, - // None here means that the request hasn't yet been confirmed - confirmations: Arc>>>, - deprecation_notice: DeprecationNotice, + signer: Arc, + accounts: Arc, + dispatcher: D, + executor: Executor, + // None here means that the request hasn't yet been confirmed + confirmations: Arc>>>, + deprecation_notice: DeprecationNotice, } impl SigningQueueClient { - /// Creates a new signing queue client given shared signing queue. - pub fn new(signer: &Arc, dispatcher: D, executor: Executor, accounts: &Arc) -> Self { - SigningQueueClient { - signer: signer.clone(), - accounts: accounts.clone(), - dispatcher, - executor, - confirmations: Arc::new(Mutex::new(TransientHashMap::new(MAX_PENDING_DURATION_SEC))), - deprecation_notice: Default::default(), - } - } + /// Creates a new signing queue client given shared signing queue. + pub fn new( + signer: &Arc, + dispatcher: D, + executor: Executor, + accounts: &Arc, + ) -> Self { + SigningQueueClient { + signer: signer.clone(), + accounts: accounts.clone(), + dispatcher, + executor, + confirmations: Arc::new(Mutex::new(TransientHashMap::new(MAX_PENDING_DURATION_SEC))), + deprecation_notice: Default::default(), + } + } - fn dispatch(&self, payload: RpcConfirmationPayload, origin: Origin) -> BoxFuture { - let default_account = self.accounts.default_account(); - let accounts = self.accounts.clone(); - let dispatcher = self.dispatcher.clone(); - let signer = self.signer.clone(); - Box::new(dispatch::from_rpc(payload, default_account, &dispatcher) - .and_then(move |payload| { - let sender = payload.sender(); - if accounts.is_unlocked(&sender) { - Either::A(dispatch::execute(dispatcher, &accounts, payload, dispatch::SignWith::Nothing) - .map(dispatch::WithToken::into_value) - .map(DispatchResult::Value)) - } else { - Either::B(future::done( - signer.add_request(payload, origin) - .map(|(id, future)| DispatchResult::Future(id, future)) - .map_err(|_| errors::request_rejected_limit()) - )) - } - })) - } + fn dispatch( + &self, + payload: RpcConfirmationPayload, + origin: Origin, + ) -> BoxFuture { + let default_account = self.accounts.default_account(); + let accounts = self.accounts.clone(); + let dispatcher = self.dispatcher.clone(); + let signer = self.signer.clone(); + Box::new( + dispatch::from_rpc(payload, default_account, &dispatcher).and_then(move |payload| { + let sender = payload.sender(); + if accounts.is_unlocked(&sender) { + Either::A( + dispatch::execute( + dispatcher, + &accounts, + payload, + dispatch::SignWith::Nothing, + ) + .map(dispatch::WithToken::into_value) + .map(DispatchResult::Value), + ) + } else { + Either::B(future::done( + signer + .add_request(payload, origin) + .map(|(id, future)| DispatchResult::Future(id, future)) + .map_err(|_| errors::request_rejected_limit()), + )) + } + }), + ) + } } impl ParitySigning for SigningQueueClient { - type Metadata = Metadata; + type Metadata = Metadata; - fn compose_transaction(&self, _meta: Metadata, transaction: RpcTransactionRequest) -> BoxFuture { - let default_account = self.accounts.default_account(); - Box::new(self.dispatcher.fill_optional_fields(transaction.into(), default_account, true).map(Into::into)) - } + fn compose_transaction( + &self, + _meta: Metadata, + transaction: RpcTransactionRequest, + ) -> BoxFuture { + let default_account = self.accounts.default_account(); + Box::new( + self.dispatcher + .fill_optional_fields(transaction.into(), default_account, true) + .map(Into::into), + ) + } - fn post_sign(&self, meta: Metadata, address: H160, data: RpcBytes) -> BoxFuture> { - self.deprecation_notice.print("parity_postSign", deprecated::msgs::ACCOUNTS); - let executor = self.executor.clone(); - let confirmations = self.confirmations.clone(); + fn post_sign( + &self, + meta: Metadata, + address: H160, + data: RpcBytes, + ) -> BoxFuture> { + self.deprecation_notice + .print("parity_postSign", deprecated::msgs::ACCOUNTS); + let executor = self.executor.clone(); + let confirmations = self.confirmations.clone(); - Box::new(self.dispatch( - RpcConfirmationPayload::EthSignMessage((address, data).into()), - meta.origin - ).map(move |result| match result { - DispatchResult::Value(v) => RpcEither::Or(v), - DispatchResult::Future(id, future) => { - schedule(executor, confirmations, id, future); - RpcEither::Either(id) - }, - })) - } + Box::new( + self.dispatch( + RpcConfirmationPayload::EthSignMessage((address, data).into()), + meta.origin, + ) + .map(move |result| match result { + DispatchResult::Value(v) => RpcEither::Or(v), + DispatchResult::Future(id, future) => { + schedule(executor, confirmations, id, future); + RpcEither::Either(id) + } + }), + ) + } - fn post_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture> { - self.deprecation_notice.print("parity_postTransaction", deprecated::msgs::ACCOUNTS); - let executor = self.executor.clone(); - let confirmations = self.confirmations.clone(); + fn post_transaction( + &self, + meta: Metadata, + request: RpcTransactionRequest, + ) -> BoxFuture> { + self.deprecation_notice + .print("parity_postTransaction", deprecated::msgs::ACCOUNTS); + let executor = self.executor.clone(); + let confirmations = self.confirmations.clone(); - Box::new(self.dispatch(RpcConfirmationPayload::SendTransaction(request), meta.origin) - .map(|result| match result { - DispatchResult::Value(v) => RpcEither::Or(v), - DispatchResult::Future(id, future) => { - schedule(executor, confirmations, id, future); - RpcEither::Either(id) - }, - })) - } + Box::new( + self.dispatch( + RpcConfirmationPayload::SendTransaction(request), + meta.origin, + ) + .map(|result| match result { + DispatchResult::Value(v) => RpcEither::Or(v), + DispatchResult::Future(id, future) => { + schedule(executor, confirmations, id, future); + RpcEither::Either(id) + } + }), + ) + } - fn check_request(&self, id: U256) -> Result> { - self.deprecation_notice.print("parity_checkRequest", deprecated::msgs::ACCOUNTS); - match self.confirmations.lock().get(&id) { - None => Err(errors::request_not_found()), // Request info has been dropped, or even never been there - Some(&None) => Ok(None), // No confirmation yet, request is known, confirmation is pending - Some(&Some(ref confirmation)) => confirmation.clone().map(Some), // Confirmation is there - } - } + fn check_request(&self, id: U256) -> Result> { + self.deprecation_notice + .print("parity_checkRequest", deprecated::msgs::ACCOUNTS); + match self.confirmations.lock().get(&id) { + None => Err(errors::request_not_found()), // Request info has been dropped, or even never been there + Some(&None) => Ok(None), // No confirmation yet, request is known, confirmation is pending + Some(&Some(ref confirmation)) => confirmation.clone().map(Some), // Confirmation is there + } + } - fn decrypt_message(&self, meta: Metadata, address: H160, data: RpcBytes) -> BoxFuture { - self.deprecation_notice.print("parity_decryptMessage", deprecated::msgs::ACCOUNTS); - let res = self.dispatch( - RpcConfirmationPayload::Decrypt((address, data).into()), - meta.origin, - ); + fn decrypt_message( + &self, + meta: Metadata, + address: H160, + data: RpcBytes, + ) -> BoxFuture { + self.deprecation_notice + .print("parity_decryptMessage", deprecated::msgs::ACCOUNTS); + let res = self.dispatch( + RpcConfirmationPayload::Decrypt((address, data).into()), + meta.origin, + ); - // when dispatch is complete - wait for result and then - Box::new(res.flatten().and_then(move |response| { - match response { - RpcConfirmationResponse::Decrypt(data) => Ok(data), - e => Err(errors::internal("Unexpected result.", e)), - } - })) - } + // when dispatch is complete - wait for result and then + Box::new(res.flatten().and_then(move |response| match response { + RpcConfirmationResponse::Decrypt(data) => Ok(data), + e => Err(errors::internal("Unexpected result.", e)), + })) + } } impl EthSigning for SigningQueueClient { - type Metadata = Metadata; + type Metadata = Metadata; - fn sign(&self, meta: Metadata, address: H160, data: RpcBytes) -> BoxFuture { - self.deprecation_notice.print("eth_sign", deprecated::msgs::ACCOUNTS); - let res = self.dispatch( - RpcConfirmationPayload::EthSignMessage((address, data).into()), - meta.origin, - ); + fn sign(&self, meta: Metadata, address: H160, data: RpcBytes) -> BoxFuture { + self.deprecation_notice + .print("eth_sign", deprecated::msgs::ACCOUNTS); + let res = self.dispatch( + RpcConfirmationPayload::EthSignMessage((address, data).into()), + meta.origin, + ); - Box::new(res.flatten().and_then(move |response| { - match response { - RpcConfirmationResponse::Signature(sig) => Ok(sig), - e => Err(errors::internal("Unexpected result.", e)), - } - })) - } + Box::new(res.flatten().and_then(move |response| match response { + RpcConfirmationResponse::Signature(sig) => Ok(sig), + e => Err(errors::internal("Unexpected result.", e)), + })) + } - fn send_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture { - self.deprecation_notice.print("eth_sendTransaction", deprecated::msgs::ACCOUNTS); - let res = self.dispatch( - RpcConfirmationPayload::SendTransaction(request), - meta.origin, - ); + fn send_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture { + self.deprecation_notice + .print("eth_sendTransaction", deprecated::msgs::ACCOUNTS); + let res = self.dispatch( + RpcConfirmationPayload::SendTransaction(request), + meta.origin, + ); - Box::new(res.flatten().and_then(move |response| { - match response { - RpcConfirmationResponse::SendTransaction(hash) => Ok(hash), - e => Err(errors::internal("Unexpected result.", e)), - } - })) - } + Box::new(res.flatten().and_then(move |response| match response { + RpcConfirmationResponse::SendTransaction(hash) => Ok(hash), + e => Err(errors::internal("Unexpected result.", e)), + })) + } - fn sign_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture { - self.deprecation_notice.print("eth_signTransaction", deprecated::msgs::ACCOUNTS); + fn sign_transaction( + &self, + meta: Metadata, + request: RpcTransactionRequest, + ) -> BoxFuture { + self.deprecation_notice + .print("eth_signTransaction", deprecated::msgs::ACCOUNTS); - let res = self.dispatch( - RpcConfirmationPayload::SignTransaction(request), - meta.origin, - ); + let res = self.dispatch( + RpcConfirmationPayload::SignTransaction(request), + meta.origin, + ); - Box::new(res.flatten().and_then(move |response| { - match response { - RpcConfirmationResponse::SignTransaction(tx) => Ok(tx), - e => Err(errors::internal("Unexpected result.", e)), - } - })) - } + Box::new(res.flatten().and_then(move |response| match response { + RpcConfirmationResponse::SignTransaction(tx) => Ok(tx), + e => Err(errors::internal("Unexpected result.", e)), + })) + } } diff --git a/rpc/src/v1/impls/signing_unsafe.rs b/rpc/src/v1/impls/signing_unsafe.rs index f08a9ffbe..277fc9564 100644 --- a/rpc/src/v1/impls/signing_unsafe.rs +++ b/rpc/src/v1/impls/signing_unsafe.rs @@ -19,118 +19,171 @@ use std::sync::Arc; use ethereum_types::{Address, H160, H256, H520, U256}; -use jsonrpc_core::{BoxFuture, Result}; -use jsonrpc_core::futures::{future, Future}; -use v1::helpers::{errors}; -use v1::helpers::deprecated::{self, DeprecationNotice}; -use v1::helpers::dispatch::{self, Dispatcher}; -use v1::metadata::Metadata; -use v1::traits::{EthSigning, ParitySigning}; -use v1::types::{ - Bytes as RpcBytes, - Either as RpcEither, - RichRawTransaction as RpcRichRawTransaction, - TransactionRequest as RpcTransactionRequest, - ConfirmationPayload as RpcConfirmationPayload, - ConfirmationResponse as RpcConfirmationResponse, +use jsonrpc_core::{ + futures::{future, Future}, + BoxFuture, Result, +}; +use v1::{ + helpers::{ + deprecated::{self, DeprecationNotice}, + dispatch::{self, Dispatcher}, + errors, + }, + metadata::Metadata, + traits::{EthSigning, ParitySigning}, + types::{ + Bytes as RpcBytes, ConfirmationPayload as RpcConfirmationPayload, + ConfirmationResponse as RpcConfirmationResponse, Either as RpcEither, + RichRawTransaction as RpcRichRawTransaction, TransactionRequest as RpcTransactionRequest, + }, }; /// Implementation of functions that require signing when no trusted signer is used. pub struct SigningUnsafeClient { - accounts: Arc, - dispatcher: D, - deprecation_notice: DeprecationNotice, + accounts: Arc, + dispatcher: D, + deprecation_notice: DeprecationNotice, } impl SigningUnsafeClient { - /// Creates new SigningUnsafeClient. - pub fn new(accounts: &Arc, dispatcher: D) -> Self { - SigningUnsafeClient { - accounts: accounts.clone(), - dispatcher, - deprecation_notice: Default::default(), - } - } + /// Creates new SigningUnsafeClient. + pub fn new(accounts: &Arc, dispatcher: D) -> Self { + SigningUnsafeClient { + accounts: accounts.clone(), + dispatcher, + deprecation_notice: Default::default(), + } + } - fn handle(&self, payload: RpcConfirmationPayload, account: Address) -> BoxFuture { - let accounts = self.accounts.clone(); + fn handle( + &self, + payload: RpcConfirmationPayload, + account: Address, + ) -> BoxFuture { + let accounts = self.accounts.clone(); - let dis = self.dispatcher.clone(); - Box::new(dispatch::from_rpc(payload, account, &dis) - .and_then(move |payload| { - dispatch::execute(dis, &accounts, payload, dispatch::SignWith::Nothing) - }) - .map(dispatch::WithToken::into_value)) - } + let dis = self.dispatcher.clone(); + Box::new( + dispatch::from_rpc(payload, account, &dis) + .and_then(move |payload| { + dispatch::execute(dis, &accounts, payload, dispatch::SignWith::Nothing) + }) + .map(dispatch::WithToken::into_value), + ) + } } -impl EthSigning for SigningUnsafeClient -{ - type Metadata = Metadata; +impl EthSigning for SigningUnsafeClient { + type Metadata = Metadata; - fn sign(&self, _: Metadata, address: H160, data: RpcBytes) -> BoxFuture { - self.deprecation_notice.print("eth_sign", deprecated::msgs::ACCOUNTS); - Box::new(self.handle(RpcConfirmationPayload::EthSignMessage((address, data).into()), address) - .then(|res| match res { - Ok(RpcConfirmationResponse::Signature(signature)) => Ok(signature), - Err(e) => Err(e), - e => Err(errors::internal("Unexpected result", e)), - })) - } + fn sign(&self, _: Metadata, address: H160, data: RpcBytes) -> BoxFuture { + self.deprecation_notice + .print("eth_sign", deprecated::msgs::ACCOUNTS); + Box::new( + self.handle( + RpcConfirmationPayload::EthSignMessage((address, data).into()), + address, + ) + .then(|res| match res { + Ok(RpcConfirmationResponse::Signature(signature)) => Ok(signature), + Err(e) => Err(e), + e => Err(errors::internal("Unexpected result", e)), + }), + ) + } - fn send_transaction(&self, _meta: Metadata, request: RpcTransactionRequest) -> BoxFuture { - self.deprecation_notice.print("eth_sendTransaction", deprecated::msgs::ACCOUNTS); - Box::new(self.handle(RpcConfirmationPayload::SendTransaction(request), self.accounts.default_account()) - .then(|res| match res { - Ok(RpcConfirmationResponse::SendTransaction(hash)) => Ok(hash), - Err(e) => Err(e), - e => Err(errors::internal("Unexpected result", e)), - })) - } + fn send_transaction(&self, _meta: Metadata, request: RpcTransactionRequest) -> BoxFuture { + self.deprecation_notice + .print("eth_sendTransaction", deprecated::msgs::ACCOUNTS); + Box::new( + self.handle( + RpcConfirmationPayload::SendTransaction(request), + self.accounts.default_account(), + ) + .then(|res| match res { + Ok(RpcConfirmationResponse::SendTransaction(hash)) => Ok(hash), + Err(e) => Err(e), + e => Err(errors::internal("Unexpected result", e)), + }), + ) + } - fn sign_transaction(&self, _meta: Metadata, request: RpcTransactionRequest) -> BoxFuture { - self.deprecation_notice.print("eth_signTransaction", deprecated::msgs::ACCOUNTS); + fn sign_transaction( + &self, + _meta: Metadata, + request: RpcTransactionRequest, + ) -> BoxFuture { + self.deprecation_notice + .print("eth_signTransaction", deprecated::msgs::ACCOUNTS); - Box::new(self.handle(RpcConfirmationPayload::SignTransaction(request), self.accounts.default_account()) - .then(|res| match res { - Ok(RpcConfirmationResponse::SignTransaction(tx)) => Ok(tx), - Err(e) => Err(e), - e => Err(errors::internal("Unexpected result", e)), - })) - } + Box::new( + self.handle( + RpcConfirmationPayload::SignTransaction(request), + self.accounts.default_account(), + ) + .then(|res| match res { + Ok(RpcConfirmationResponse::SignTransaction(tx)) => Ok(tx), + Err(e) => Err(e), + e => Err(errors::internal("Unexpected result", e)), + }), + ) + } } impl ParitySigning for SigningUnsafeClient { - type Metadata = Metadata; + type Metadata = Metadata; - fn compose_transaction(&self, _meta: Metadata, transaction: RpcTransactionRequest) -> BoxFuture { - let accounts = self.accounts.clone(); - let default_account = accounts.default_account(); - Box::new(self.dispatcher.fill_optional_fields(transaction.into(), default_account, true).map(Into::into)) - } + fn compose_transaction( + &self, + _meta: Metadata, + transaction: RpcTransactionRequest, + ) -> BoxFuture { + let accounts = self.accounts.clone(); + let default_account = accounts.default_account(); + Box::new( + self.dispatcher + .fill_optional_fields(transaction.into(), default_account, true) + .map(Into::into), + ) + } - fn decrypt_message(&self, _: Metadata, address: H160, data: RpcBytes) -> BoxFuture { - self.deprecation_notice.print("parity_decryptMessage", deprecated::msgs::ACCOUNTS); - Box::new(self.handle(RpcConfirmationPayload::Decrypt((address, data).into()), address) - .then(|res| match res { - Ok(RpcConfirmationResponse::Decrypt(data)) => Ok(data), - Err(e) => Err(e), - e => Err(errors::internal("Unexpected result", e)), - })) - } + fn decrypt_message(&self, _: Metadata, address: H160, data: RpcBytes) -> BoxFuture { + self.deprecation_notice + .print("parity_decryptMessage", deprecated::msgs::ACCOUNTS); + Box::new( + self.handle( + RpcConfirmationPayload::Decrypt((address, data).into()), + address, + ) + .then(|res| match res { + Ok(RpcConfirmationResponse::Decrypt(data)) => Ok(data), + Err(e) => Err(e), + e => Err(errors::internal("Unexpected result", e)), + }), + ) + } - fn post_sign(&self, _: Metadata, _: H160, _: RpcBytes) -> BoxFuture> { - // We don't support this in non-signer mode. - Box::new(future::err(errors::signer_disabled())) - } + fn post_sign( + &self, + _: Metadata, + _: H160, + _: RpcBytes, + ) -> BoxFuture> { + // We don't support this in non-signer mode. + Box::new(future::err(errors::signer_disabled())) + } - fn post_transaction(&self, _: Metadata, _: RpcTransactionRequest) -> BoxFuture> { - // We don't support this in non-signer mode. - Box::new(future::err(errors::signer_disabled())) - } + fn post_transaction( + &self, + _: Metadata, + _: RpcTransactionRequest, + ) -> BoxFuture> { + // We don't support this in non-signer mode. + Box::new(future::err(errors::signer_disabled())) + } - fn check_request(&self, _: U256) -> Result> { - // We don't support this in non-signer mode. - Err(errors::signer_disabled()) - } + fn check_request(&self, _: U256) -> Result> { + // We don't support this in non-signer mode. + Err(errors::signer_disabled()) + } } diff --git a/rpc/src/v1/impls/traces.rs b/rpc/src/v1/impls/traces.rs index a6301eda5..25bc87f4f 100644 --- a/rpc/src/v1/impls/traces.rs +++ b/rpc/src/v1/impls/traces.rs @@ -18,164 +18,252 @@ use std::sync::Arc; -use ethcore::client::{BlockChainClient, CallAnalytics, TransactionId, TraceId, StateClient, StateInfo, Call, BlockId}; +use ethcore::client::{ + BlockChainClient, BlockId, Call, CallAnalytics, StateClient, StateInfo, TraceId, TransactionId, +}; use ethereum_types::H256; use rlp::Rlp; use types::transaction::SignedTransaction; use jsonrpc_core::Result; -use v1::Metadata; -use v1::traits::Traces; -use v1::helpers::{errors, fake_sign}; -use v1::types::{TraceFilter, LocalizedTrace, BlockNumber, Index, CallRequest, Bytes, TraceResults, - TraceResultsWithTransactionHash, TraceOptions, block_number_to_id}; +use v1::{ + helpers::{errors, fake_sign}, + traits::Traces, + types::{ + block_number_to_id, BlockNumber, Bytes, CallRequest, Index, LocalizedTrace, TraceFilter, + TraceOptions, TraceResults, TraceResultsWithTransactionHash, + }, + Metadata, +}; fn to_call_analytics(flags: TraceOptions) -> CallAnalytics { - CallAnalytics { - transaction_tracing: flags.contains(&("trace".to_owned())), - vm_tracing: flags.contains(&("vmTrace".to_owned())), - state_diffing: flags.contains(&("stateDiff".to_owned())), - } + CallAnalytics { + transaction_tracing: flags.contains(&("trace".to_owned())), + vm_tracing: flags.contains(&("vmTrace".to_owned())), + state_diffing: flags.contains(&("stateDiff".to_owned())), + } } /// Traces api implementation. pub struct TracesClient { - client: Arc, + client: Arc, } impl TracesClient { - /// Creates new Traces client. - pub fn new(client: &Arc) -> Self { - TracesClient { - client: client.clone(), - } - } + /// Creates new Traces client. + pub fn new(client: &Arc) -> Self { + TracesClient { + client: client.clone(), + } + } } -impl Traces for TracesClient where - S: StateInfo + 'static, - C: BlockChainClient + StateClient + Call + 'static +impl Traces for TracesClient +where + S: StateInfo + 'static, + C: BlockChainClient + StateClient + Call + 'static, { - type Metadata = Metadata; + type Metadata = Metadata; - fn filter(&self, filter: TraceFilter) -> Result>> { - Ok(self.client.filter_traces(filter.into()) - .map(|traces| traces.into_iter().map(LocalizedTrace::from).collect())) - } + fn filter(&self, filter: TraceFilter) -> Result>> { + Ok(self + .client + .filter_traces(filter.into()) + .map(|traces| traces.into_iter().map(LocalizedTrace::from).collect())) + } - fn block_traces(&self, block_number: BlockNumber) -> Result>> { - let id = match block_number { - BlockNumber::Pending => return Ok(None), - num => block_number_to_id(num) - }; + fn block_traces(&self, block_number: BlockNumber) -> Result>> { + let id = match block_number { + BlockNumber::Pending => return Ok(None), + num => block_number_to_id(num), + }; - Ok(self.client.block_traces(id) - .map(|traces| traces.into_iter().map(LocalizedTrace::from).collect())) - } + Ok(self + .client + .block_traces(id) + .map(|traces| traces.into_iter().map(LocalizedTrace::from).collect())) + } - fn transaction_traces(&self, transaction_hash: H256) -> Result>> { - Ok(self.client.transaction_traces(TransactionId::Hash(transaction_hash)) - .map(|traces| traces.into_iter().map(LocalizedTrace::from).collect())) - } + fn transaction_traces(&self, transaction_hash: H256) -> Result>> { + Ok(self + .client + .transaction_traces(TransactionId::Hash(transaction_hash)) + .map(|traces| traces.into_iter().map(LocalizedTrace::from).collect())) + } - fn trace(&self, transaction_hash: H256, address: Vec) -> Result> { - let id = TraceId { - transaction: TransactionId::Hash(transaction_hash), - address: address.into_iter().map(|i| i.value()).collect() - }; + fn trace(&self, transaction_hash: H256, address: Vec) -> Result> { + let id = TraceId { + transaction: TransactionId::Hash(transaction_hash), + address: address.into_iter().map(|i| i.value()).collect(), + }; - Ok(self.client.trace(id) - .map(LocalizedTrace::from)) - } + Ok(self.client.trace(id).map(LocalizedTrace::from)) + } - fn call(&self, request: CallRequest, flags: TraceOptions, block: Option) -> Result { - let block = block.unwrap_or_default(); + fn call( + &self, + request: CallRequest, + flags: TraceOptions, + block: Option, + ) -> Result { + let block = block.unwrap_or_default(); - let request = CallRequest::into(request); - let signed = fake_sign::sign_call(request)?; + let request = CallRequest::into(request); + let signed = fake_sign::sign_call(request)?; - let id = match block { - BlockNumber::Num(num) => BlockId::Number(num), - BlockNumber::Earliest => BlockId::Earliest, - BlockNumber::Latest => BlockId::Latest, + let id = match block { + BlockNumber::Num(num) => BlockId::Number(num), + BlockNumber::Earliest => BlockId::Earliest, + BlockNumber::Latest => BlockId::Latest, - BlockNumber::Pending => return Err(errors::invalid_params("`BlockNumber::Pending` is not supported", ())), - }; + BlockNumber::Pending => { + return Err(errors::invalid_params( + "`BlockNumber::Pending` is not supported", + (), + )) + } + }; - let mut state = self.client.state_at(id).ok_or_else(errors::state_pruned)?; - let header = self.client.block_header(id).ok_or_else(errors::state_pruned)?; + let mut state = self.client.state_at(id).ok_or_else(errors::state_pruned)?; + let header = self + .client + .block_header(id) + .ok_or_else(errors::state_pruned)?; - self.client.call(&signed, to_call_analytics(flags), &mut state, &header.decode().map_err(errors::decode)?) - .map(TraceResults::from) - .map_err(errors::call) - } + self.client + .call( + &signed, + to_call_analytics(flags), + &mut state, + &header.decode().map_err(errors::decode)?, + ) + .map(TraceResults::from) + .map_err(errors::call) + } - fn call_many(&self, requests: Vec<(CallRequest, TraceOptions)>, block: Option) -> Result> { - let block = block.unwrap_or_default(); + fn call_many( + &self, + requests: Vec<(CallRequest, TraceOptions)>, + block: Option, + ) -> Result> { + let block = block.unwrap_or_default(); - let requests = requests.into_iter() - .map(|(request, flags)| { - let request = CallRequest::into(request); - let signed = fake_sign::sign_call(request)?; - Ok((signed, to_call_analytics(flags))) - }) - .collect::>>()?; + let requests = requests + .into_iter() + .map(|(request, flags)| { + let request = CallRequest::into(request); + let signed = fake_sign::sign_call(request)?; + Ok((signed, to_call_analytics(flags))) + }) + .collect::>>()?; - let id = match block { - BlockNumber::Num(num) => BlockId::Number(num), - BlockNumber::Earliest => BlockId::Earliest, - BlockNumber::Latest => BlockId::Latest, + let id = match block { + BlockNumber::Num(num) => BlockId::Number(num), + BlockNumber::Earliest => BlockId::Earliest, + BlockNumber::Latest => BlockId::Latest, - BlockNumber::Pending => return Err(errors::invalid_params("`BlockNumber::Pending` is not supported", ())), - }; + BlockNumber::Pending => { + return Err(errors::invalid_params( + "`BlockNumber::Pending` is not supported", + (), + )) + } + }; - let mut state = self.client.state_at(id).ok_or_else(errors::state_pruned)?; - let header = self.client.block_header(id).ok_or_else(errors::state_pruned)?; + let mut state = self.client.state_at(id).ok_or_else(errors::state_pruned)?; + let header = self + .client + .block_header(id) + .ok_or_else(errors::state_pruned)?; - self.client.call_many(&requests, &mut state, &header.decode().map_err(errors::decode)?) - .map(|results| results.into_iter().map(TraceResults::from).collect()) - .map_err(errors::call) - } + self.client + .call_many( + &requests, + &mut state, + &header.decode().map_err(errors::decode)?, + ) + .map(|results| results.into_iter().map(TraceResults::from).collect()) + .map_err(errors::call) + } - fn raw_transaction(&self, raw_transaction: Bytes, flags: TraceOptions, block: Option) -> Result { - let block = block.unwrap_or_default(); + fn raw_transaction( + &self, + raw_transaction: Bytes, + flags: TraceOptions, + block: Option, + ) -> Result { + let block = block.unwrap_or_default(); - let tx = Rlp::new(&raw_transaction.into_vec()).as_val().map_err(|e| errors::invalid_params("Transaction is not valid RLP", e))?; - let signed = SignedTransaction::new(tx).map_err(errors::transaction)?; + let tx = Rlp::new(&raw_transaction.into_vec()) + .as_val() + .map_err(|e| errors::invalid_params("Transaction is not valid RLP", e))?; + let signed = SignedTransaction::new(tx).map_err(errors::transaction)?; - let id = match block { - BlockNumber::Num(num) => BlockId::Number(num), - BlockNumber::Earliest => BlockId::Earliest, - BlockNumber::Latest => BlockId::Latest, + let id = match block { + BlockNumber::Num(num) => BlockId::Number(num), + BlockNumber::Earliest => BlockId::Earliest, + BlockNumber::Latest => BlockId::Latest, - BlockNumber::Pending => return Err(errors::invalid_params("`BlockNumber::Pending` is not supported", ())), - }; + BlockNumber::Pending => { + return Err(errors::invalid_params( + "`BlockNumber::Pending` is not supported", + (), + )) + } + }; - let mut state = self.client.state_at(id).ok_or_else(errors::state_pruned)?; - let header = self.client.block_header(id).ok_or_else(errors::state_pruned)?; + let mut state = self.client.state_at(id).ok_or_else(errors::state_pruned)?; + let header = self + .client + .block_header(id) + .ok_or_else(errors::state_pruned)?; - self.client.call(&signed, to_call_analytics(flags), &mut state, &header.decode().map_err(errors::decode)?) - .map(TraceResults::from) - .map_err(errors::call) - } + self.client + .call( + &signed, + to_call_analytics(flags), + &mut state, + &header.decode().map_err(errors::decode)?, + ) + .map(TraceResults::from) + .map_err(errors::call) + } - fn replay_transaction(&self, transaction_hash: H256, flags: TraceOptions) -> Result { - self.client.replay(TransactionId::Hash(transaction_hash), to_call_analytics(flags)) - .map(TraceResults::from) - .map_err(errors::call) - } + fn replay_transaction( + &self, + transaction_hash: H256, + flags: TraceOptions, + ) -> Result { + self.client + .replay( + TransactionId::Hash(transaction_hash), + to_call_analytics(flags), + ) + .map(TraceResults::from) + .map_err(errors::call) + } - fn replay_block_transactions(&self, block_number: BlockNumber, flags: TraceOptions) -> Result> { - let id = match block_number { - BlockNumber::Num(num) => BlockId::Number(num), - BlockNumber::Earliest => BlockId::Earliest, - BlockNumber::Latest => BlockId::Latest, + fn replay_block_transactions( + &self, + block_number: BlockNumber, + flags: TraceOptions, + ) -> Result> { + let id = match block_number { + BlockNumber::Num(num) => BlockId::Number(num), + BlockNumber::Earliest => BlockId::Earliest, + BlockNumber::Latest => BlockId::Latest, - BlockNumber::Pending => return Err(errors::invalid_params("`BlockNumber::Pending` is not supported", ())), - }; + BlockNumber::Pending => { + return Err(errors::invalid_params( + "`BlockNumber::Pending` is not supported", + (), + )) + } + }; - self.client.replay_block_transactions(id, to_call_analytics(flags)) - .map(|results| results.map(TraceResultsWithTransactionHash::from).collect()) - .map_err(errors::call) - } + self.client + .replay_block_transactions(id, to_call_analytics(flags)) + .map(|results| results.map(TraceResultsWithTransactionHash::from).collect()) + .map_err(errors::call) + } } diff --git a/rpc/src/v1/impls/web3.rs b/rpc/src/v1/impls/web3.rs index 5ffda51b6..2b0fcece3 100644 --- a/rpc/src/v1/impls/web3.rs +++ b/rpc/src/v1/impls/web3.rs @@ -18,20 +18,19 @@ use ethereum_types::H256; use hash::keccak; use jsonrpc_core::Result; +use v1::{traits::Web3, types::Bytes}; use version::version; -use v1::traits::Web3; -use v1::types::Bytes; /// Web3 rpc implementation. #[derive(Default)] pub struct Web3Client; impl Web3 for Web3Client { - fn client_version(&self) -> Result { - Ok(version().to_owned().replacen("/", "//", 1)) - } + fn client_version(&self) -> Result { + Ok(version().to_owned().replacen("/", "//", 1)) + } - fn sha3(&self, data: Bytes) -> Result { - Ok(keccak(&data.0)) - } + fn sha3(&self, data: Bytes) -> Result { + Ok(keccak(&data.0)) + } } diff --git a/rpc/src/v1/informant.rs b/rpc/src/v1/informant.rs index 945378390..32817e232 100644 --- a/rpc/src/v1/informant.rs +++ b/rpc/src/v1/informant.rs @@ -16,15 +16,19 @@ //! RPC Requests Statistics -use std::fmt; -use std::sync::Arc; -use std::sync::atomic::{self, AtomicUsize}; -use std::time; -use parity_runtime; use jsonrpc_core as core; use jsonrpc_core::futures::future::Either; use order_stat; +use parity_runtime; use parking_lot::RwLock; +use std::{ + fmt, + sync::{ + atomic::{self, AtomicUsize}, + Arc, + }, + time, +}; pub use self::parity_runtime::Executor; @@ -32,284 +36,291 @@ const RATE_SECONDS: usize = 10; const STATS_SAMPLES: usize = 60; struct RateCalculator { - era: time::Instant, - samples: [u16; RATE_SECONDS], + era: time::Instant, + samples: [u16; RATE_SECONDS], } impl fmt::Debug for RateCalculator { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "{} req/s", self.rate()) - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "{} req/s", self.rate()) + } } impl Default for RateCalculator { - fn default() -> Self { - RateCalculator { - era: time::Instant::now(), - samples: [0; RATE_SECONDS], - } - } + fn default() -> Self { + RateCalculator { + era: time::Instant::now(), + samples: [0; RATE_SECONDS], + } + } } impl RateCalculator { - fn elapsed(&self) -> u64 { - self.era.elapsed().as_secs() - } + fn elapsed(&self) -> u64 { + self.era.elapsed().as_secs() + } - pub fn tick(&mut self) -> u16 { - if self.elapsed() >= RATE_SECONDS as u64 { - self.era = time::Instant::now(); - self.samples[0] = 0; - } + pub fn tick(&mut self) -> u16 { + if self.elapsed() >= RATE_SECONDS as u64 { + self.era = time::Instant::now(); + self.samples[0] = 0; + } - let pos = self.elapsed() as usize % RATE_SECONDS; - let next = (pos + 1) % RATE_SECONDS; - self.samples[next] = 0; - self.samples[pos] = self.samples[pos].saturating_add(1); - self.samples[pos] - } + let pos = self.elapsed() as usize % RATE_SECONDS; + let next = (pos + 1) % RATE_SECONDS; + self.samples[next] = 0; + self.samples[pos] = self.samples[pos].saturating_add(1); + self.samples[pos] + } - fn current_rate(&self) -> usize { - let now = match self.elapsed() { - i if i >= RATE_SECONDS as u64 => RATE_SECONDS, - i => i as usize + 1, - }; - let sum: usize = self.samples[0..now].iter().map(|x| *x as usize).sum(); - sum / now - } + fn current_rate(&self) -> usize { + let now = match self.elapsed() { + i if i >= RATE_SECONDS as u64 => RATE_SECONDS, + i => i as usize + 1, + }; + let sum: usize = self.samples[0..now].iter().map(|x| *x as usize).sum(); + sum / now + } - pub fn rate(&self) -> usize { - if self.elapsed() > RATE_SECONDS as u64 { - 0 - } else { - self.current_rate() - } - } + pub fn rate(&self) -> usize { + if self.elapsed() > RATE_SECONDS as u64 { + 0 + } else { + self.current_rate() + } + } } struct StatsCalculator { - filled: bool, - idx: usize, - samples: [T; STATS_SAMPLES], + filled: bool, + idx: usize, + samples: [T; STATS_SAMPLES], } impl Default for StatsCalculator { - fn default() -> Self { - StatsCalculator { - filled: false, - idx: 0, - samples: [T::default(); STATS_SAMPLES], - } - } + fn default() -> Self { + StatsCalculator { + filled: false, + idx: 0, + samples: [T::default(); STATS_SAMPLES], + } + } } impl fmt::Debug for StatsCalculator { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "median: {} ms", self.approximated_median()) - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "median: {} ms", self.approximated_median()) + } } impl StatsCalculator { - pub fn add(&mut self, sample: T) { - self.idx += 1; - if self.idx >= STATS_SAMPLES { - self.filled = true; - self.idx = 0; - } + pub fn add(&mut self, sample: T) { + self.idx += 1; + if self.idx >= STATS_SAMPLES { + self.filled = true; + self.idx = 0; + } - self.samples[self.idx] = sample; - } + self.samples[self.idx] = sample; + } - /// Returns aproximate of media - pub fn approximated_median(&self) -> T { - let mut copy = [T::default(); STATS_SAMPLES]; - copy.copy_from_slice(&self.samples); - let bound = if self.filled { STATS_SAMPLES } else { self.idx + 1 }; + /// Returns aproximate of media + pub fn approximated_median(&self) -> T { + let mut copy = [T::default(); STATS_SAMPLES]; + copy.copy_from_slice(&self.samples); + let bound = if self.filled { + STATS_SAMPLES + } else { + self.idx + 1 + }; - let (_, &mut median) = order_stat::median_of_medians(&mut copy[0..bound]); - median - } + let (_, &mut median) = order_stat::median_of_medians(&mut copy[0..bound]); + median + } } /// RPC Statistics #[derive(Default, Debug)] pub struct RpcStats { - requests: RwLock, - roundtrips: RwLock>, - active_sessions: AtomicUsize, + requests: RwLock, + roundtrips: RwLock>, + active_sessions: AtomicUsize, } impl RpcStats { - /// Count session opened - pub fn open_session(&self) { - self.active_sessions.fetch_add(1, atomic::Ordering::SeqCst); - } + /// Count session opened + pub fn open_session(&self) { + self.active_sessions.fetch_add(1, atomic::Ordering::SeqCst); + } - /// Count session closed. - /// Silently overflows if closing unopened session. - pub fn close_session(&self) { - self.active_sessions.fetch_sub(1, atomic::Ordering::SeqCst); - } + /// Count session closed. + /// Silently overflows if closing unopened session. + pub fn close_session(&self) { + self.active_sessions.fetch_sub(1, atomic::Ordering::SeqCst); + } - /// Count request. Returns number of requests in current second. - pub fn count_request(&self) -> u16 { - self.requests.write().tick() - } + /// Count request. Returns number of requests in current second. + pub fn count_request(&self) -> u16 { + self.requests.write().tick() + } - /// Add roundtrip time (microseconds) - pub fn add_roundtrip(&self, microseconds: u128) { - self.roundtrips.write().add(microseconds) - } + /// Add roundtrip time (microseconds) + pub fn add_roundtrip(&self, microseconds: u128) { + self.roundtrips.write().add(microseconds) + } - /// Returns number of open sessions - pub fn sessions(&self) -> usize { - self.active_sessions.load(atomic::Ordering::Relaxed) - } + /// Returns number of open sessions + pub fn sessions(&self) -> usize { + self.active_sessions.load(atomic::Ordering::Relaxed) + } - /// Returns requests rate - pub fn requests_rate(&self) -> usize { - self.requests.read().rate() - } + /// Returns requests rate + pub fn requests_rate(&self) -> usize { + self.requests.read().rate() + } - /// Returns approximated roundtrip in microseconds - pub fn approximated_roundtrip(&self) -> u128 { - self.roundtrips.read().approximated_median() - } + /// Returns approximated roundtrip in microseconds + pub fn approximated_roundtrip(&self) -> u128 { + self.roundtrips.read().approximated_median() + } } /// Notifies about RPC activity. pub trait ActivityNotifier: Send + Sync + 'static { - /// Activity on RPC interface - fn active(&self); + /// Activity on RPC interface + fn active(&self); } /// Stats-counting RPC middleware pub struct Middleware { - stats: Arc, - notifier: T, + stats: Arc, + notifier: T, } impl Middleware { - /// Create new Middleware with stats counter and activity notifier. - pub fn new(stats: Arc, notifier: T) -> Self { - Middleware { - stats, - notifier, - } - } + /// Create new Middleware with stats counter and activity notifier. + pub fn new(stats: Arc, notifier: T) -> Self { + Middleware { stats, notifier } + } } impl core::Middleware for Middleware { - type Future = core::FutureResponse; - type CallFuture = core::middleware::NoopCallFuture; + type Future = core::FutureResponse; + type CallFuture = core::middleware::NoopCallFuture; - fn on_request(&self, request: core::Request, meta: M, process: F) -> Either where - F: FnOnce(core::Request, M) -> X, - X: core::futures::Future, Error=()> + Send + 'static, - { - let start = time::Instant::now(); + fn on_request( + &self, + request: core::Request, + meta: M, + process: F, + ) -> Either + where + F: FnOnce(core::Request, M) -> X, + X: core::futures::Future, Error = ()> + Send + 'static, + { + let start = time::Instant::now(); - self.notifier.active(); - self.stats.count_request(); + self.notifier.active(); + self.stats.count_request(); - let id = match request { - core::Request::Single(core::Call::MethodCall(ref call)) => Some(call.id.clone()), - _ => None, - }; - let stats = self.stats.clone(); + let id = match request { + core::Request::Single(core::Call::MethodCall(ref call)) => Some(call.id.clone()), + _ => None, + }; + let stats = self.stats.clone(); - let future = process(request, meta).map(move |res| { - let time = start.elapsed().as_micros(); - if time > 10_000 { - debug!(target: "rpc", "[{:?}] Took {}ms", id, time / 1_000); - } - stats.add_roundtrip(time); - res - }); + let future = process(request, meta).map(move |res| { + let time = start.elapsed().as_micros(); + if time > 10_000 { + debug!(target: "rpc", "[{:?}] Took {}ms", id, time / 1_000); + } + stats.add_roundtrip(time); + res + }); - Either::A(Box::new(future)) - } + Either::A(Box::new(future)) + } } /// Client Notifier pub struct ClientNotifier { - /// Client - pub client: Arc<::ethcore::client::Client>, + /// Client + pub client: Arc<::ethcore::client::Client>, } impl ActivityNotifier for ClientNotifier { - fn active(&self) { - self.client.keep_alive() - } + fn active(&self) { + self.client.keep_alive() + } } #[cfg(test)] mod tests { - use super::{RateCalculator, StatsCalculator, RpcStats}; + use super::{RateCalculator, RpcStats, StatsCalculator}; - #[test] - fn should_calculate_rate() { - // given - let mut avg = RateCalculator::default(); + #[test] + fn should_calculate_rate() { + // given + let mut avg = RateCalculator::default(); - // when - avg.tick(); - avg.tick(); - avg.tick(); - let rate = avg.rate(); + // when + avg.tick(); + avg.tick(); + avg.tick(); + let rate = avg.rate(); - // then - assert_eq!(rate, 3usize); - } + // then + assert_eq!(rate, 3usize); + } - #[test] - fn should_approximate_median() { - // given - let mut stats = StatsCalculator::default(); - stats.add(5); - stats.add(100); - stats.add(3); - stats.add(15); - stats.add(20); - stats.add(6); + #[test] + fn should_approximate_median() { + // given + let mut stats = StatsCalculator::default(); + stats.add(5); + stats.add(100); + stats.add(3); + stats.add(15); + stats.add(20); + stats.add(6); - // when - let median = stats.approximated_median(); + // when + let median = stats.approximated_median(); - // then - assert_eq!(median, 5); - } + // then + assert_eq!(median, 5); + } - #[test] - fn should_count_rpc_stats() { - // given - let stats = RpcStats::default(); - assert_eq!(stats.sessions(), 0); - assert_eq!(stats.requests_rate(), 0); - assert_eq!(stats.approximated_roundtrip(), 0); + #[test] + fn should_count_rpc_stats() { + // given + let stats = RpcStats::default(); + assert_eq!(stats.sessions(), 0); + assert_eq!(stats.requests_rate(), 0); + assert_eq!(stats.approximated_roundtrip(), 0); - // when - stats.open_session(); - stats.close_session(); - stats.open_session(); - stats.count_request(); - stats.count_request(); - stats.add_roundtrip(125); + // when + stats.open_session(); + stats.close_session(); + stats.open_session(); + stats.count_request(); + stats.count_request(); + stats.add_roundtrip(125); - // then - assert_eq!(stats.sessions(), 1); - assert_eq!(stats.requests_rate(), 2); - assert_eq!(stats.approximated_roundtrip(), 125); - } + // then + assert_eq!(stats.sessions(), 1); + assert_eq!(stats.requests_rate(), 2); + assert_eq!(stats.approximated_roundtrip(), 125); + } - #[test] - fn should_be_sync_and_send() { - let stats = RpcStats::default(); - is_sync(stats); - } + #[test] + fn should_be_sync_and_send() { + let stats = RpcStats::default(); + is_sync(stats); + } - fn is_sync(x: F) { - drop(x) - } + fn is_sync(x: F) { + drop(x) + } } diff --git a/rpc/src/v1/metadata.rs b/rpc/src/v1/metadata.rs index 3224bd2c0..74b52c743 100644 --- a/rpc/src/v1/metadata.rs +++ b/rpc/src/v1/metadata.rs @@ -18,22 +18,22 @@ use std::sync::Arc; use jsonrpc_core; -use jsonrpc_pubsub::{Session, PubSubMetadata}; +use jsonrpc_pubsub::{PubSubMetadata, Session}; use v1::types::Origin; /// RPC methods metadata. #[derive(Clone, Default, Debug)] pub struct Metadata { - /// Request origin - pub origin: Origin, - /// Request PubSub Session - pub session: Option>, + /// Request origin + pub origin: Origin, + /// Request PubSub Session + pub session: Option>, } impl jsonrpc_core::Metadata for Metadata {} impl PubSubMetadata for Metadata { - fn session(&self) -> Option> { - self.session.clone() - } + fn session(&self) -> Option> { + self.session.clone() + } } diff --git a/rpc/src/v1/mod.rs b/rpc/src/v1/mod.rs index 8b8afacdb..da8e9520e 100644 --- a/rpc/src/v1/mod.rs +++ b/rpc/src/v1/mod.rs @@ -21,37 +21,45 @@ // short for "try_boxfuture" // unwrap a result, returning a BoxFuture<_, Err> on failure. macro_rules! try_bf { - ($res: expr) => { - match $res { - Ok(val) => val, - Err(e) => return Box::new(::jsonrpc_core::futures::future::err(e.into())), - } - } + ($res: expr) => { + match $res { + Ok(val) => val, + Err(e) => return Box::new(::jsonrpc_core::futures::future::err(e.into())), + } + }; } #[macro_use] mod helpers; mod impls; -mod types; #[cfg(test)] mod tests; +mod types; pub mod extractors; pub mod informant; pub mod metadata; pub mod traits; -pub use self::traits::{Debug, Eth, EthFilter, EthPubSub, EthSigning, Net, Parity, ParityAccountsInfo, ParityAccounts, ParitySet, ParitySetAccounts, ParitySigning, Personal, PubSub, Private, Rpc, SecretStore, Signer, Traces, Web3}; -pub use self::impls::*; -pub use self::helpers::{NetworkSettings, block_import, dispatch}; -pub use self::metadata::Metadata; -pub use self::types::Origin; -pub use self::extractors::{RpcExtractor, WsExtractor, WsStats, WsDispatcher}; +pub use self::{ + extractors::{RpcExtractor, WsDispatcher, WsExtractor, WsStats}, + helpers::{block_import, dispatch, NetworkSettings}, + impls::*, + metadata::Metadata, + traits::{ + Debug, Eth, EthFilter, EthPubSub, EthSigning, Net, Parity, ParityAccounts, + ParityAccountsInfo, ParitySet, ParitySetAccounts, ParitySigning, Personal, Private, PubSub, + Rpc, SecretStore, Signer, Traces, Web3, + }, + types::Origin, +}; /// Signer utilities pub mod signer { - #[cfg(any(test, feature = "accounts"))] - pub use super::helpers::engine_signer::EngineSigner; - pub use super::helpers::external_signer::{SignerService, ConfirmationsQueue}; - pub use super::types::{ConfirmationRequest, TransactionModification, TransactionCondition}; + #[cfg(any(test, feature = "accounts"))] + pub use super::helpers::engine_signer::EngineSigner; + pub use super::{ + helpers::external_signer::{ConfirmationsQueue, SignerService}, + types::{ConfirmationRequest, TransactionCondition, TransactionModification}, + }; } diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs index 9c2273746..b2f6911db 100644 --- a/rpc/src/v1/tests/eth.rs +++ b/rpc/src/v1/tests/eth.rs @@ -15,20 +15,19 @@ // along with Parity Ethereum. If not, see . //! rpc integration tests. -use std::env; -use std::sync::Arc; +use std::{env, sync::Arc}; use accounts::AccountProvider; -use ethcore::client::{BlockChainClient, Client, ClientConfig, ChainInfo, ImportBlock}; -use ethcore::ethereum; -use ethcore::miner::Miner; -use ethcore::spec::{Genesis, Spec}; -use ethcore::test_helpers; -use ethcore::verification::VerifierType; -use ethcore::verification::queue::kind::blocks::Unverified; +use ethcore::{ + client::{BlockChainClient, ChainInfo, Client, ClientConfig, ImportBlock}, + ethereum, + miner::Miner, + spec::{Genesis, Spec}, + test_helpers, + verification::{queue::kind::blocks::Unverified, VerifierType}, +}; use ethereum_types::{Address, H256, U256}; -use ethjson::blockchain::BlockChain; -use ethjson::spec::ForkSpec; +use ethjson::{blockchain::BlockChain, spec::ForkSpec}; use io::IoChannel; use miner::external::ExternalMiner; use parity_runtime::Runtime; @@ -36,236 +35,265 @@ use parking_lot::Mutex; use types::ids::BlockId; use jsonrpc_core::IoHandler; -use v1::helpers::dispatch::{self, FullDispatcher}; -use v1::helpers::nonce; -use v1::impls::{EthClient, EthClientOptions, SigningUnsafeClient}; -use v1::metadata::Metadata; -use v1::tests::helpers::{TestSnapshotService, TestSyncProvider, Config}; -use v1::traits::{Eth, EthSigning}; +use v1::{ + helpers::{ + dispatch::{self, FullDispatcher}, + nonce, + }, + impls::{EthClient, EthClientOptions, SigningUnsafeClient}, + metadata::Metadata, + tests::helpers::{Config, TestSnapshotService, TestSyncProvider}, + traits::{Eth, EthSigning}, +}; fn account_provider() -> Arc { - Arc::new(AccountProvider::transient_provider()) + Arc::new(AccountProvider::transient_provider()) } fn sync_provider() -> Arc { - Arc::new(TestSyncProvider::new(Config { - network_id: 3, - num_peers: 120, - })) + Arc::new(TestSyncProvider::new(Config { + network_id: 3, + num_peers: 120, + })) } fn miner_service(spec: &Spec) -> Arc { - Arc::new(Miner::new_for_tests(spec, None)) + Arc::new(Miner::new_for_tests(spec, None)) } fn snapshot_service() -> Arc { - Arc::new(TestSnapshotService::new()) + Arc::new(TestSnapshotService::new()) } fn make_spec(chain: &BlockChain) -> Spec { - let genesis = Genesis::from(chain.genesis()); - let mut spec = ethereum::new_frontier_test(); - let state = chain.pre_state.clone().into(); - spec.set_genesis_state(state).expect("unable to set genesis state"); - spec.overwrite_genesis_params(genesis); - assert!(spec.is_state_root_valid()); - spec + let genesis = Genesis::from(chain.genesis()); + let mut spec = ethereum::new_frontier_test(); + let state = chain.pre_state.clone().into(); + spec.set_genesis_state(state) + .expect("unable to set genesis state"); + spec.overwrite_genesis_params(genesis); + assert!(spec.is_state_root_valid()); + spec } struct EthTester { - _miner: Arc, - _runtime: Runtime, - _snapshot: Arc, - accounts: Arc, - client: Arc, - handler: IoHandler, + _miner: Arc, + _runtime: Runtime, + _snapshot: Arc, + accounts: Arc, + client: Arc, + handler: IoHandler, } impl EthTester { - fn from_chain(chain: &BlockChain) -> Self { + fn from_chain(chain: &BlockChain) -> Self { + let tester = if ::ethjson::blockchain::Engine::NoProof == chain.engine { + let mut config = ClientConfig::default(); + config.verifier_type = VerifierType::CanonNoSeal; + config.check_seal = false; + Self::from_spec_conf(make_spec(chain), config) + } else { + Self::from_spec(make_spec(chain)) + }; - let tester = if ::ethjson::blockchain::Engine::NoProof == chain.engine { - let mut config = ClientConfig::default(); - config.verifier_type = VerifierType::CanonNoSeal; - config.check_seal = false; - Self::from_spec_conf(make_spec(chain), config) - } else { - Self::from_spec(make_spec(chain)) - }; + for b in chain.blocks_rlp() { + if let Ok(block) = Unverified::from_rlp(b) { + let _ = tester.client.import_block(block); + tester.client.flush_queue(); + tester.client.import_verified_blocks(); + } + } - for b in chain.blocks_rlp() { - if let Ok(block) = Unverified::from_rlp(b) { - let _ = tester.client.import_block(block); - tester.client.flush_queue(); - tester.client.import_verified_blocks(); - } - } + tester.client.flush_queue(); - tester.client.flush_queue(); + assert!(tester.client.chain_info().best_block_hash == chain.best_block.clone().into()); + tester + } - assert!(tester.client.chain_info().best_block_hash == chain.best_block.clone().into()); - tester - } + fn from_spec(spec: Spec) -> Self { + let config = ClientConfig::default(); + Self::from_spec_conf(spec, config) + } - fn from_spec(spec: Spec) -> Self { - let config = ClientConfig::default(); - Self::from_spec_conf(spec, config) - } + fn from_spec_conf(spec: Spec, config: ClientConfig) -> Self { + let runtime = Runtime::with_thread_count(1); + let account_provider = account_provider(); + let ap = account_provider.clone(); + let accounts = Arc::new(move || ap.accounts().unwrap_or_default()) as _; + let miner_service = miner_service(&spec); + let snapshot_service = snapshot_service(); - fn from_spec_conf(spec: Spec, config: ClientConfig) -> Self { - let runtime = Runtime::with_thread_count(1); - let account_provider = account_provider(); - let ap = account_provider.clone(); - let accounts = Arc::new(move || ap.accounts().unwrap_or_default()) as _; - let miner_service = miner_service(&spec); - let snapshot_service = snapshot_service(); + let client = Client::new( + config, + &spec, + test_helpers::new_db(), + miner_service.clone(), + IoChannel::disconnected(), + ) + .unwrap(); + let sync_provider = sync_provider(); + let external_miner = Arc::new(ExternalMiner::default()); - let client = Client::new( - config, - &spec, - test_helpers::new_db(), - miner_service.clone(), - IoChannel::disconnected(), - ).unwrap(); - let sync_provider = sync_provider(); - let external_miner = Arc::new(ExternalMiner::default()); + let eth_client = EthClient::new( + &client, + &snapshot_service, + &sync_provider, + &accounts, + &miner_service, + &external_miner, + EthClientOptions { + pending_nonce_from_queue: false, + allow_pending_receipt_query: true, + send_block_number_in_get_work: true, + gas_price_percentile: 50, + allow_experimental_rpcs: true, + allow_missing_blocks: false, + no_ancient_blocks: false, + }, + ); - let eth_client = EthClient::new( - &client, - &snapshot_service, - &sync_provider, - &accounts, - &miner_service, - &external_miner, - EthClientOptions { - pending_nonce_from_queue: false, - allow_pending_receipt_query: true, - send_block_number_in_get_work: true, - gas_price_percentile: 50, - allow_experimental_rpcs: true, - allow_missing_blocks: false, - no_ancient_blocks: false - }, - ); + let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor()))); - let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor()))); + let dispatcher = + FullDispatcher::new(client.clone(), miner_service.clone(), reservations, 50); + let signer = Arc::new(dispatch::Signer::new(account_provider.clone())) as _; + let eth_sign = SigningUnsafeClient::new(&signer, dispatcher); - let dispatcher = FullDispatcher::new(client.clone(), miner_service.clone(), reservations, 50); - let signer = Arc::new(dispatch::Signer::new(account_provider.clone())) as _; - let eth_sign = SigningUnsafeClient::new( - &signer, - dispatcher, - ); + let mut handler = IoHandler::default(); + handler.extend_with(eth_client.to_delegate()); + handler.extend_with(eth_sign.to_delegate()); - let mut handler = IoHandler::default(); - handler.extend_with(eth_client.to_delegate()); - handler.extend_with(eth_sign.to_delegate()); - - EthTester { - _miner: miner_service, - _runtime: runtime, - _snapshot: snapshot_service, - accounts: account_provider, - client: client, - handler: handler, - } - } + EthTester { + _miner: miner_service, + _runtime: runtime, + _snapshot: snapshot_service, + accounts: account_provider, + client: client, + handler: handler, + } + } } #[test] fn harness_works() { - let chain: BlockChain = extract_chain!("BlockchainTests/bcWalletTest/wallet2outOf3txs"); - let _ = EthTester::from_chain(&chain); + let chain: BlockChain = extract_chain!("BlockchainTests/bcWalletTest/wallet2outOf3txs"); + let _ = EthTester::from_chain(&chain); } #[test] fn eth_get_balance() { - let chain = extract_chain!("BlockchainTests/bcWalletTest/wallet2outOf3txs"); - let tester = EthTester::from_chain(&chain); - // final account state - let req_latest = r#"{ + let chain = extract_chain!("BlockchainTests/bcWalletTest/wallet2outOf3txs"); + let tester = EthTester::from_chain(&chain); + // final account state + let req_latest = r#"{ "jsonrpc": "2.0", "method": "eth_getBalance", "params": ["0xaaaf5374fce5edbc8e2a8697c15331677e6ebaaa", "latest"], "id": 1 }"#; - let res_latest = r#"{"jsonrpc":"2.0","result":"0x9","id":1}"#.to_owned(); - assert_eq!(tester.handler.handle_request_sync(req_latest).unwrap(), res_latest); + let res_latest = r#"{"jsonrpc":"2.0","result":"0x9","id":1}"#.to_owned(); + assert_eq!( + tester.handler.handle_request_sync(req_latest).unwrap(), + res_latest + ); - // non-existant account - let req_new_acc = r#"{ + // non-existant account + let req_new_acc = r#"{ "jsonrpc": "2.0", "method": "eth_getBalance", "params": ["0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"], "id": 3 }"#; - let res_new_acc = r#"{"jsonrpc":"2.0","result":"0x0","id":3}"#.to_owned(); - assert_eq!(tester.handler.handle_request_sync(req_new_acc).unwrap(), res_new_acc); + let res_new_acc = r#"{"jsonrpc":"2.0","result":"0x0","id":3}"#.to_owned(); + assert_eq!( + tester.handler.handle_request_sync(req_new_acc).unwrap(), + res_new_acc + ); } #[test] fn eth_get_proof() { - let chain = extract_chain!("BlockchainTests/bcWalletTest/wallet2outOf3txs"); - let tester = EthTester::from_chain(&chain); - // final account state - let req_latest = r#"{ + let chain = extract_chain!("BlockchainTests/bcWalletTest/wallet2outOf3txs"); + let tester = EthTester::from_chain(&chain); + // final account state + let req_latest = r#"{ "jsonrpc": "2.0", "method": "eth_getProof", "params": ["0xaaaf5374fce5edbc8e2a8697c15331677e6ebaaa", [], "latest"], "id": 1 }"#; - let res_latest = r#","address":"0xaaaf5374fce5edbc8e2a8697c15331677e6ebaaa","balance":"0x9","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","nonce":"0x0","storageHash":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","storageProof":[]},"id":1}"#.to_owned(); - assert!(tester.handler.handle_request_sync(req_latest).unwrap().to_string().ends_with(res_latest.as_str())); + let res_latest = r#","address":"0xaaaf5374fce5edbc8e2a8697c15331677e6ebaaa","balance":"0x9","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","nonce":"0x0","storageHash":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","storageProof":[]},"id":1}"#.to_owned(); + assert!(tester + .handler + .handle_request_sync(req_latest) + .unwrap() + .to_string() + .ends_with(res_latest.as_str())); - // non-existant account - let req_new_acc = r#"{ + // non-existant account + let req_new_acc = r#"{ "jsonrpc": "2.0", "method": "eth_getProof", "params": ["0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",[],"latest"], "id": 3 }"#; - let res_new_acc = r#","address":"0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","balance":"0x0","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","nonce":"0x0","storageHash":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","storageProof":[]},"id":3}"#.to_owned(); - assert!(tester.handler.handle_request_sync(req_new_acc).unwrap().to_string().ends_with(res_new_acc.as_str())); + let res_new_acc = r#","address":"0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","balance":"0x0","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","nonce":"0x0","storageHash":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","storageProof":[]},"id":3}"#.to_owned(); + assert!(tester + .handler + .handle_request_sync(req_new_acc) + .unwrap() + .to_string() + .ends_with(res_new_acc.as_str())); } #[test] fn eth_block_number() { - let chain = extract_chain!("BlockchainTests/bcGasPricerTest/RPC_API_Test"); - let tester = EthTester::from_chain(&chain); - let req_number = r#"{ + let chain = extract_chain!("BlockchainTests/bcGasPricerTest/RPC_API_Test"); + let tester = EthTester::from_chain(&chain); + let req_number = r#"{ "jsonrpc": "2.0", "method": "eth_blockNumber", "params": [], "id": 1 }"#; - let res_number = r#"{"jsonrpc":"2.0","result":"0x20","id":1}"#.to_owned(); - assert_eq!(tester.handler.handle_request_sync(req_number).unwrap(), res_number); + let res_number = r#"{"jsonrpc":"2.0","result":"0x20","id":1}"#.to_owned(); + assert_eq!( + tester.handler.handle_request_sync(req_number).unwrap(), + res_number + ); } #[test] fn eth_get_block() { - let chain = extract_chain!("BlockchainTests/bcGasPricerTest/RPC_API_Test"); - let tester = EthTester::from_chain(&chain); - let req_block = r#"{"method":"eth_getBlockByNumber","params":["0x0",false],"id":1,"jsonrpc":"2.0"}"#; + let chain = extract_chain!("BlockchainTests/bcGasPricerTest/RPC_API_Test"); + let tester = EthTester::from_chain(&chain); + let req_block = + r#"{"method":"eth_getBlockByNumber","params":["0x0",false],"id":1,"jsonrpc":"2.0"}"#; - let res_block = r#"{"jsonrpc":"2.0","result":{"author":"0x8888f1f195afa192cfee860698584c030f4c9db1","difficulty":"0x20000","extraData":"0x42","gasLimit":"0x1df5d44","gasUsed":"0x0","hash":"0xcded1bc807465a72e2d54697076ab858f28b15d4beaae8faa47339c8eee386a3","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x8888f1f195afa192cfee860698584c030f4c9db1","mixHash":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","nonce":"0x0102030405060708","number":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sealFields":["0xa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","0x880102030405060708"],"sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x200","stateRoot":"0x7dba07d6b448a186e9612e5f737d1c909dce473e53199901a302c00646d523c1","timestamp":"0x54c98c81","totalDifficulty":"0x20000","transactions":[],"transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","uncles":[]},"id":1}"#; - assert_eq!(tester.handler.handle_request_sync(req_block).unwrap(), res_block); + let res_block = r#"{"jsonrpc":"2.0","result":{"author":"0x8888f1f195afa192cfee860698584c030f4c9db1","difficulty":"0x20000","extraData":"0x42","gasLimit":"0x1df5d44","gasUsed":"0x0","hash":"0xcded1bc807465a72e2d54697076ab858f28b15d4beaae8faa47339c8eee386a3","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x8888f1f195afa192cfee860698584c030f4c9db1","mixHash":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","nonce":"0x0102030405060708","number":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sealFields":["0xa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","0x880102030405060708"],"sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x200","stateRoot":"0x7dba07d6b448a186e9612e5f737d1c909dce473e53199901a302c00646d523c1","timestamp":"0x54c98c81","totalDifficulty":"0x20000","transactions":[],"transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","uncles":[]},"id":1}"#; + assert_eq!( + tester.handler.handle_request_sync(req_block).unwrap(), + res_block + ); } #[test] fn eth_get_block_by_hash() { - let chain = extract_chain!("BlockchainTests/bcGasPricerTest/RPC_API_Test"); - let tester = EthTester::from_chain(&chain); + let chain = extract_chain!("BlockchainTests/bcGasPricerTest/RPC_API_Test"); + let tester = EthTester::from_chain(&chain); - // We're looking for block number 4 from "RPC_API_Test_Frontier" - let req_block = r#"{"method":"eth_getBlockByHash","params":["0xaddb9e39795e9e041c936b88a2577802569f34afded0948707b074caa3163a87",false],"id":1,"jsonrpc":"2.0"}"#; + // We're looking for block number 4 from "RPC_API_Test_Frontier" + let req_block = r#"{"method":"eth_getBlockByHash","params":["0xaddb9e39795e9e041c936b88a2577802569f34afded0948707b074caa3163a87",false],"id":1,"jsonrpc":"2.0"}"#; - let res_block = r#"{"jsonrpc":"2.0","result":{"author":"0x8888f1f195afa192cfee860698584c030f4c9db1","difficulty":"0x20080","extraData":"0x","gasLimit":"0x1dd7ea0","gasUsed":"0x5458","hash":"0xaddb9e39795e9e041c936b88a2577802569f34afded0948707b074caa3163a87","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x8888f1f195afa192cfee860698584c030f4c9db1","mixHash":"0x713b0b31f6e72d8cb7367eaf59447ea531f209fc80e6379edd9f8d3bb73931c4","nonce":"0x4534b406bc23b86d","number":"0x4","parentHash":"0x17567aa5995b703736e32972289d68af50543acc4d56d37e8ad1fea7252cac4a","receiptsRoot":"0x7ed8026cf72ed0e98e6fd53ab406e51ffd34397d9da0052494ff41376fda7b5f","sealFields":["0xa0713b0b31f6e72d8cb7367eaf59447ea531f209fc80e6379edd9f8d3bb73931c4","0x884534b406bc23b86d"],"sha3Uncles":"0xe588a44b3e320e72e70b32b531f3ac0d432e756120135ae8fe5fa10895196b40","size":"0x661","stateRoot":"0x68805721294e365020aca15ed56c360d9dc2cf03cbeff84c9b84b8aed023bfb5","timestamp":"0x5bbdf772","totalDifficulty":"0xa00c0","transactions":["0xb094b9dc356dbb8b256402c6d5709288066ad6a372c90c9c516f14277545fd58"],"transactionsRoot":"0x97a593d8d7e15b57f5c6bb25bc6c325463ef99f874bc08a78656c3ab5cb23262","uncles":["0x86b48f5186c4b0882d3dca7977aa37840008832ef092f8ef797019dc74bfa8c7","0x2da9d062c11d536f0f1cc2a4e0111597c79926958d0fc26ae1a2d07d1a3bf47d"]},"id":1}"#; - assert_eq!(tester.handler.handle_request_sync(req_block).unwrap(), res_block); + let res_block = r#"{"jsonrpc":"2.0","result":{"author":"0x8888f1f195afa192cfee860698584c030f4c9db1","difficulty":"0x20080","extraData":"0x","gasLimit":"0x1dd7ea0","gasUsed":"0x5458","hash":"0xaddb9e39795e9e041c936b88a2577802569f34afded0948707b074caa3163a87","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x8888f1f195afa192cfee860698584c030f4c9db1","mixHash":"0x713b0b31f6e72d8cb7367eaf59447ea531f209fc80e6379edd9f8d3bb73931c4","nonce":"0x4534b406bc23b86d","number":"0x4","parentHash":"0x17567aa5995b703736e32972289d68af50543acc4d56d37e8ad1fea7252cac4a","receiptsRoot":"0x7ed8026cf72ed0e98e6fd53ab406e51ffd34397d9da0052494ff41376fda7b5f","sealFields":["0xa0713b0b31f6e72d8cb7367eaf59447ea531f209fc80e6379edd9f8d3bb73931c4","0x884534b406bc23b86d"],"sha3Uncles":"0xe588a44b3e320e72e70b32b531f3ac0d432e756120135ae8fe5fa10895196b40","size":"0x661","stateRoot":"0x68805721294e365020aca15ed56c360d9dc2cf03cbeff84c9b84b8aed023bfb5","timestamp":"0x5bbdf772","totalDifficulty":"0xa00c0","transactions":["0xb094b9dc356dbb8b256402c6d5709288066ad6a372c90c9c516f14277545fd58"],"transactionsRoot":"0x97a593d8d7e15b57f5c6bb25bc6c325463ef99f874bc08a78656c3ab5cb23262","uncles":["0x86b48f5186c4b0882d3dca7977aa37840008832ef092f8ef797019dc74bfa8c7","0x2da9d062c11d536f0f1cc2a4e0111597c79926958d0fc26ae1a2d07d1a3bf47d"]},"id":1}"#; + assert_eq!( + tester.handler.handle_request_sync(req_block).unwrap(), + res_block + ); } // a frontier-like test with an expanded gas limit and balance on known account. @@ -367,27 +395,43 @@ const POSITIVE_NONCE_SPEC: &'static [u8] = br#"{ #[test] fn eth_transaction_count() { - let secret = "8a283037bb19c4fed7b1c569e40c7dcff366165eb869110a1b11532963eb9cb2".parse().unwrap(); - let tester = EthTester::from_spec(Spec::load(&env::temp_dir(), TRANSACTION_COUNT_SPEC).expect("invalid chain spec")); - let address = tester.accounts.insert_account(secret, &"".into()).unwrap(); - tester.accounts.unlock_account_permanently(address, "".into()).unwrap(); + let secret = "8a283037bb19c4fed7b1c569e40c7dcff366165eb869110a1b11532963eb9cb2" + .parse() + .unwrap(); + let tester = EthTester::from_spec( + Spec::load(&env::temp_dir(), TRANSACTION_COUNT_SPEC).expect("invalid chain spec"), + ); + let address = tester.accounts.insert_account(secret, &"".into()).unwrap(); + tester + .accounts + .unlock_account_permanently(address, "".into()) + .unwrap(); - let req_before = r#"{ + let req_before = r#"{ "jsonrpc": "2.0", "method": "eth_getTransactionCount", - "params": [""#.to_owned() + format!("0x{:x}", address).as_ref() + r#"", "latest"], + "params": [""# + .to_owned() + + format!("0x{:x}", address).as_ref() + + r#"", "latest"], "id": 15 }"#; - let res_before = r#"{"jsonrpc":"2.0","result":"0x0","id":15}"#; + let res_before = r#"{"jsonrpc":"2.0","result":"0x0","id":15}"#; - assert_eq!(tester.handler.handle_request_sync(&req_before).unwrap(), res_before); + assert_eq!( + tester.handler.handle_request_sync(&req_before).unwrap(), + res_before + ); - let req_send_trans = r#"{ + let req_send_trans = r#"{ "jsonrpc": "2.0", "method": "eth_sendTransaction", "params": [{ - "from": ""#.to_owned() + format!("0x{:x}", address).as_ref() + r#"", + "from": ""# + .to_owned() + + format!("0x{:x}", address).as_ref() + + r#"", "to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567", "gas": "0x30000", "gasPrice": "0x1", @@ -396,120 +440,173 @@ fn eth_transaction_count() { "id": 16 }"#; - // dispatch the transaction. - tester.handler.handle_request_sync(&req_send_trans).unwrap(); + // dispatch the transaction. + tester.handler.handle_request_sync(&req_send_trans).unwrap(); - // we have submitted the transaction -- but this shouldn't be reflected in a "latest" query. - let req_after_latest = r#"{ + // we have submitted the transaction -- but this shouldn't be reflected in a "latest" query. + let req_after_latest = r#"{ "jsonrpc": "2.0", "method": "eth_getTransactionCount", - "params": [""#.to_owned() + format!("0x{:x}", address).as_ref() + r#"", "latest"], + "params": [""# + .to_owned() + + format!("0x{:x}", address).as_ref() + + r#"", "latest"], "id": 17 }"#; - let res_after_latest = r#"{"jsonrpc":"2.0","result":"0x0","id":17}"#; + let res_after_latest = r#"{"jsonrpc":"2.0","result":"0x0","id":17}"#; - assert_eq!(&tester.handler.handle_request_sync(&req_after_latest).unwrap(), res_after_latest); + assert_eq!( + &tester + .handler + .handle_request_sync(&req_after_latest) + .unwrap(), + res_after_latest + ); - // the pending transactions should have been updated. - let req_after_pending = r#"{ + // the pending transactions should have been updated. + let req_after_pending = r#"{ "jsonrpc": "2.0", "method": "eth_getTransactionCount", - "params": [""#.to_owned() + format!("0x{:x}", address).as_ref() + r#"", "pending"], + "params": [""# + .to_owned() + + format!("0x{:x}", address).as_ref() + + r#"", "pending"], "id": 18 }"#; - let res_after_pending = r#"{"jsonrpc":"2.0","result":"0x1","id":18}"#; + let res_after_pending = r#"{"jsonrpc":"2.0","result":"0x1","id":18}"#; - assert_eq!(&tester.handler.handle_request_sync(&req_after_pending).unwrap(), res_after_pending); + assert_eq!( + &tester + .handler + .handle_request_sync(&req_after_pending) + .unwrap(), + res_after_pending + ); } fn verify_transaction_counts(name: String, chain: BlockChain) { - struct PanicHandler(String); - impl Drop for PanicHandler { - fn drop(&mut self) { - if ::std::thread::panicking() { - println!("Test failed: {}", self.0); - } - } - } + struct PanicHandler(String); + impl Drop for PanicHandler { + fn drop(&mut self) { + if ::std::thread::panicking() { + println!("Test failed: {}", self.0); + } + } + } - let _panic = PanicHandler(name); + let _panic = PanicHandler(name); - fn by_hash(hash: H256, count: usize, id: &mut usize) -> (String, String) { - let req = r#"{ + fn by_hash(hash: H256, count: usize, id: &mut usize) -> (String, String) { + let req = r#"{ "jsonrpc": "2.0", "method": "eth_getBlockTransactionCountByHash", "params": [ - ""#.to_owned() + format!("0x{:x}", hash).as_ref() + r#"" + ""# + .to_owned() + + format!("0x{:x}", hash).as_ref() + + r#"" ], - "id": "# + format!("{}", *id).as_ref() + r#" + "id": "# + format!("{}", *id).as_ref() + + r#" }"#; - let res = r#"{"jsonrpc":"2.0","result":""#.to_owned() - + format!("0x{:x}", count).as_ref() - + r#"","id":"# - + format!("{}", *id).as_ref() + r#"}"#; - *id += 1; - (req, res) - } + let res = r#"{"jsonrpc":"2.0","result":""#.to_owned() + + format!("0x{:x}", count).as_ref() + + r#"","id":"# + + format!("{}", *id).as_ref() + + r#"}"#; + *id += 1; + (req, res) + } - fn by_number(num: u64, count: usize, id: &mut usize) -> (String, String) { - let req = r#"{ + fn by_number(num: u64, count: usize, id: &mut usize) -> (String, String) { + let req = r#"{ "jsonrpc": "2.0", "method": "eth_getBlockTransactionCountByNumber", "params": [ - "#.to_owned() + &::serde_json::to_string(&U256::from(num)).unwrap() + r#" + "# + .to_owned() + + &::serde_json::to_string(&U256::from(num)).unwrap() + + r#" ], - "id": "# + format!("{}", *id).as_ref() + r#" + "id": "# + format!("{}", *id).as_ref() + + r#" }"#; - let res = r#"{"jsonrpc":"2.0","result":""#.to_owned() - + format!("0x{:x}", count).as_ref() - + r#"","id":"# - + format!("{}", *id).as_ref() + r#"}"#; - *id += 1; - (req, res) - } + let res = r#"{"jsonrpc":"2.0","result":""#.to_owned() + + format!("0x{:x}", count).as_ref() + + r#"","id":"# + + format!("{}", *id).as_ref() + + r#"}"#; + *id += 1; + (req, res) + } - let tester = EthTester::from_chain(&chain); + let tester = EthTester::from_chain(&chain); - let mut id = 1; - for b in chain.blocks_rlp().into_iter().filter_map(|b| Unverified::from_rlp(b).ok()) { - let count = b.transactions.len(); + let mut id = 1; + for b in chain + .blocks_rlp() + .into_iter() + .filter_map(|b| Unverified::from_rlp(b).ok()) + { + let count = b.transactions.len(); - let hash = b.header.hash(); - let number = b.header.number(); + let hash = b.header.hash(); + let number = b.header.number(); - let (req, res) = by_hash(hash, count, &mut id); - assert_eq!(tester.handler.handle_request_sync(&req), Some(res)); + let (req, res) = by_hash(hash, count, &mut id); + assert_eq!(tester.handler.handle_request_sync(&req), Some(res)); - // uncles can share block numbers, so skip them. - if tester.client.block_hash(BlockId::Number(number)) == Some(hash) { - let (req, res) = by_number(number, count, &mut id); - assert_eq!(tester.handler.handle_request_sync(&req), Some(res)); - } - } + // uncles can share block numbers, so skip them. + if tester.client.block_hash(BlockId::Number(number)) == Some(hash) { + let (req, res) = by_number(number, count, &mut id); + assert_eq!(tester.handler.handle_request_sync(&req), Some(res)); + } + } } #[test] fn starting_nonce_test() { - let tester = EthTester::from_spec(Spec::load(&env::temp_dir(), POSITIVE_NONCE_SPEC).expect("invalid chain spec")); - let address = Address::from(10); + let tester = EthTester::from_spec( + Spec::load(&env::temp_dir(), POSITIVE_NONCE_SPEC).expect("invalid chain spec"), + ); + let address = Address::from(10); - let sample = tester.handler.handle_request_sync(&(r#" + let sample = tester + .handler + .handle_request_sync( + &(r#" { "jsonrpc": "2.0", "method": "eth_getTransactionCount", - "params": [""#.to_owned() + format!("0x{:x}", address).as_ref() + r#"", "latest"], + "params": [""# + .to_owned() + + format!("0x{:x}", address).as_ref() + + r#"", "latest"], "id": 15 } - "#) - ).unwrap(); + "#), + ) + .unwrap(); - assert_eq!(r#"{"jsonrpc":"2.0","result":"0x100","id":15}"#, &sample); + assert_eq!(r#"{"jsonrpc":"2.0","result":"0x100","id":15}"#, &sample); } -register_test!(eth_transaction_count_1, verify_transaction_counts, "BlockchainTests/bcWalletTest/wallet2outOf3txs"); -register_test!(eth_transaction_count_2, verify_transaction_counts, "BlockchainTests/bcTotalDifficultyTest/sideChainWithMoreTransactions"); -register_test!(eth_transaction_count_3, verify_transaction_counts, "BlockchainTests/bcGasPricerTest/RPC_API_Test"); +register_test!( + eth_transaction_count_1, + verify_transaction_counts, + "BlockchainTests/bcWalletTest/wallet2outOf3txs" +); +register_test!( + eth_transaction_count_2, + verify_transaction_counts, + "BlockchainTests/bcTotalDifficultyTest/sideChainWithMoreTransactions" +); +register_test!( + eth_transaction_count_3, + verify_transaction_counts, + "BlockchainTests/bcGasPricerTest/RPC_API_Test" +); diff --git a/rpc/src/v1/tests/helpers/miner_service.rs b/rpc/src/v1/tests/helpers/miner_service.rs index 86239be1a..e5cbf2a5c 100644 --- a/rpc/src/v1/tests/helpers/miner_service.rs +++ b/rpc/src/v1/tests/helpers/miner_service.rs @@ -16,285 +16,351 @@ //! Test implementation of miner service. -use std::sync::Arc; -use std::collections::{BTreeMap, BTreeSet, HashMap}; +use std::{ + collections::{BTreeMap, BTreeSet, HashMap}, + sync::Arc, +}; use bytes::Bytes; -use ethcore::block::SealedBlock; -use ethcore::client::{Nonce, PrepareOpenBlock, StateClient, EngineInfo, traits::ForceUpdateSealing}; -use ethcore::engines::{EthEngine, signer::EngineSigner}; -use ethcore::error::Error; -use ethcore::miner::{self, MinerService, AuthoringParams}; -use ethcore::client::test_client::TestState; -use ethereum_types::{H256, U256, Address}; -use miner::pool::local_transactions::Status as LocalTransactionStatus; -use miner::pool::{verifier, VerifiedTransaction, QueueStatus}; -use parking_lot::{RwLock, Mutex}; -use types::transaction::{self, UnverifiedTransaction, SignedTransaction, PendingTransaction}; +use ethcore::{ + block::SealedBlock, + client::{ + test_client::TestState, traits::ForceUpdateSealing, EngineInfo, Nonce, PrepareOpenBlock, + StateClient, + }, + engines::{signer::EngineSigner, EthEngine}, + error::Error, + miner::{self, AuthoringParams, MinerService}, +}; +use ethereum_types::{Address, H256, U256}; +use miner::pool::{ + local_transactions::Status as LocalTransactionStatus, verifier, QueueStatus, + VerifiedTransaction, +}; +use parking_lot::{Mutex, RwLock}; use txpool; -use types::BlockNumber; -use types::block::Block; -use types::header::Header; -use types::ids::BlockId; -use types::receipt::RichReceipt; +use types::{ + block::Block, + header::Header, + ids::BlockId, + receipt::RichReceipt, + transaction::{self, PendingTransaction, SignedTransaction, UnverifiedTransaction}, + BlockNumber, +}; /// Test miner service. pub struct TestMinerService { - /// Imported transactions. - pub imported_transactions: Mutex>, - /// Pre-existed pending transactions - pub pending_transactions: Mutex>, - /// Pre-existed local transactions - pub local_transactions: Mutex>, - /// Pre-existed pending receipts - pub pending_receipts: Mutex>, - /// Next nonces. - pub next_nonces: RwLock>, - /// Minimum gas price - pub min_gas_price: RwLock>, - /// Signer (if any) - pub signer: RwLock>>, + /// Imported transactions. + pub imported_transactions: Mutex>, + /// Pre-existed pending transactions + pub pending_transactions: Mutex>, + /// Pre-existed local transactions + pub local_transactions: Mutex>, + /// Pre-existed pending receipts + pub pending_receipts: Mutex>, + /// Next nonces. + pub next_nonces: RwLock>, + /// Minimum gas price + pub min_gas_price: RwLock>, + /// Signer (if any) + pub signer: RwLock>>, - authoring_params: RwLock, + authoring_params: RwLock, } impl Default for TestMinerService { - fn default() -> TestMinerService { - TestMinerService { - imported_transactions: Default::default(), - pending_transactions: Default::default(), - local_transactions: Default::default(), - pending_receipts: Default::default(), - next_nonces: Default::default(), - min_gas_price: RwLock::new(Some(0.into())), - authoring_params: RwLock::new(AuthoringParams { - author: Address::zero(), - gas_range_target: (12345.into(), 54321.into()), - extra_data: vec![1, 2, 3, 4], - }), - signer: RwLock::new(None), - } - } + fn default() -> TestMinerService { + TestMinerService { + imported_transactions: Default::default(), + pending_transactions: Default::default(), + local_transactions: Default::default(), + pending_receipts: Default::default(), + next_nonces: Default::default(), + min_gas_price: RwLock::new(Some(0.into())), + authoring_params: RwLock::new(AuthoringParams { + author: Address::zero(), + gas_range_target: (12345.into(), 54321.into()), + extra_data: vec![1, 2, 3, 4], + }), + signer: RwLock::new(None), + } + } } impl TestMinerService { - /// Increments nonce for given address. - pub fn increment_nonce(&self, address: &Address) { - let mut next_nonces = self.next_nonces.write(); - let nonce = next_nonces.entry(*address).or_insert_with(|| 0.into()); - *nonce = *nonce + 1; - } + /// Increments nonce for given address. + pub fn increment_nonce(&self, address: &Address) { + let mut next_nonces = self.next_nonces.write(); + let nonce = next_nonces.entry(*address).or_insert_with(|| 0.into()); + *nonce = *nonce + 1; + } } impl StateClient for TestMinerService { - // State will not be used by test client anyway, since all methods that accept state are mocked - type State = TestState; + // State will not be used by test client anyway, since all methods that accept state are mocked + type State = TestState; - fn latest_state_and_header(&self) -> (Self::State, Header) { - (TestState, Header::default()) - } + fn latest_state_and_header(&self) -> (Self::State, Header) { + (TestState, Header::default()) + } - fn state_at(&self, _id: BlockId) -> Option { - Some(TestState) - } + fn state_at(&self, _id: BlockId) -> Option { + Some(TestState) + } } impl EngineInfo for TestMinerService { - fn engine(&self) -> &EthEngine { - unimplemented!() - } + fn engine(&self) -> &EthEngine { + unimplemented!() + } } impl MinerService for TestMinerService { - type State = TestState; + type State = TestState; - fn pending_state(&self, _latest_block_number: BlockNumber) -> Option { - None - } + fn pending_state(&self, _latest_block_number: BlockNumber) -> Option { + None + } - fn pending_block_header(&self, _latest_block_number: BlockNumber) -> Option
{ - None - } + fn pending_block_header(&self, _latest_block_number: BlockNumber) -> Option
{ + None + } - fn pending_block(&self, _latest_block_number: BlockNumber) -> Option { - None - } + fn pending_block(&self, _latest_block_number: BlockNumber) -> Option { + None + } - fn authoring_params(&self) -> AuthoringParams { - self.authoring_params.read().clone() - } + fn authoring_params(&self) -> AuthoringParams { + self.authoring_params.read().clone() + } - fn set_author(&self, author: miner::Author) { - self.authoring_params.write().author = author.address(); - if let miner::Author::Sealer(signer) = author { - *self.signer.write() = Some(signer); - } - } + fn set_author(&self, author: miner::Author) { + self.authoring_params.write().author = author.address(); + if let miner::Author::Sealer(signer) = author { + *self.signer.write() = Some(signer); + } + } - fn set_extra_data(&self, extra_data: Bytes) { - self.authoring_params.write().extra_data = extra_data; - } + fn set_extra_data(&self, extra_data: Bytes) { + self.authoring_params.write().extra_data = extra_data; + } - fn set_gas_range_target(&self, target: (U256, U256)) { - self.authoring_params.write().gas_range_target = target; - } + fn set_gas_range_target(&self, target: (U256, U256)) { + self.authoring_params.write().gas_range_target = target; + } - /// Imports transactions to transaction queue. - fn import_external_transactions(&self, chain: &C, transactions: Vec) - -> Vec> - { - // lets assume that all txs are valid - let transactions: Vec<_> = transactions.into_iter().map(|tx| SignedTransaction::new(tx).unwrap()).collect(); - self.imported_transactions.lock().extend_from_slice(&transactions); + /// Imports transactions to transaction queue. + fn import_external_transactions( + &self, + chain: &C, + transactions: Vec, + ) -> Vec> { + // lets assume that all txs are valid + let transactions: Vec<_> = transactions + .into_iter() + .map(|tx| SignedTransaction::new(tx).unwrap()) + .collect(); + self.imported_transactions + .lock() + .extend_from_slice(&transactions); - for sender in transactions.iter().map(|tx| tx.sender()) { - let nonce = self.next_nonce(chain, &sender); - self.next_nonces.write().insert(sender, nonce); - } + for sender in transactions.iter().map(|tx| tx.sender()) { + let nonce = self.next_nonce(chain, &sender); + self.next_nonces.write().insert(sender, nonce); + } - transactions - .iter() - .map(|_| Ok(())) - .collect() - } + transactions.iter().map(|_| Ok(())).collect() + } - /// Imports transactions to transaction queue. - fn import_own_transaction(&self, _chain: &C, _pending: PendingTransaction) - -> Result<(), transaction::Error> { - // this function is no longer called directly from RPC - unimplemented!(); - } + /// Imports transactions to transaction queue. + fn import_own_transaction( + &self, + _chain: &C, + _pending: PendingTransaction, + ) -> Result<(), transaction::Error> { + // this function is no longer called directly from RPC + unimplemented!(); + } - /// Imports transactions to queue - treats as local based on trusted flag, config, and tx source - fn import_claimed_local_transaction(&self, chain: &C, pending: PendingTransaction, _trusted: bool) - -> Result<(), transaction::Error> { + /// Imports transactions to queue - treats as local based on trusted flag, config, and tx source + fn import_claimed_local_transaction( + &self, + chain: &C, + pending: PendingTransaction, + _trusted: bool, + ) -> Result<(), transaction::Error> { + // keep the pending nonces up to date + let sender = pending.transaction.sender(); + let nonce = self.next_nonce(chain, &sender); + self.next_nonces.write().insert(sender, nonce); - // keep the pending nonces up to date - let sender = pending.transaction.sender(); - let nonce = self.next_nonce(chain, &sender); - self.next_nonces.write().insert(sender, nonce); + // lets assume that all txs are valid + self.imported_transactions.lock().push(pending.transaction); - // lets assume that all txs are valid - self.imported_transactions.lock().push(pending.transaction); + Ok(()) + } - Ok(()) - } + /// Called when blocks are imported to chain, updates transactions queue. + fn chain_new_blocks( + &self, + _chain: &C, + _imported: &[H256], + _invalid: &[H256], + _enacted: &[H256], + _retracted: &[H256], + _is_internal: bool, + ) { + unimplemented!(); + } - /// Called when blocks are imported to chain, updates transactions queue. - fn chain_new_blocks(&self, _chain: &C, _imported: &[H256], _invalid: &[H256], _enacted: &[H256], _retracted: &[H256], _is_internal: bool) { - unimplemented!(); - } + /// New chain head event. Restart mining operation. + fn update_sealing(&self, _chain: &C, _force: ForceUpdateSealing) { + unimplemented!(); + } - /// New chain head event. Restart mining operation. - fn update_sealing(&self, _chain: &C, _force: ForceUpdateSealing) { - unimplemented!(); - } + fn work_package( + &self, + chain: &C, + ) -> Option<(H256, BlockNumber, u64, U256)> { + let params = self.authoring_params(); + let open_block = chain + .prepare_open_block(params.author, params.gas_range_target, params.extra_data) + .unwrap(); + let closed = open_block.close().unwrap(); + let header = &closed.header; - fn work_package(&self, chain: &C) -> Option<(H256, BlockNumber, u64, U256)> { - let params = self.authoring_params(); - let open_block = chain.prepare_open_block(params.author, params.gas_range_target, params.extra_data).unwrap(); - let closed = open_block.close().unwrap(); - let header = &closed.header; + Some(( + header.hash(), + header.number(), + header.timestamp(), + *header.difficulty(), + )) + } - Some((header.hash(), header.number(), header.timestamp(), *header.difficulty())) - } + fn transaction(&self, hash: &H256) -> Option> { + self.pending_transactions + .lock() + .get(hash) + .cloned() + .map(|tx| Arc::new(VerifiedTransaction::from_pending_block_transaction(tx))) + } - fn transaction(&self, hash: &H256) -> Option> { - self.pending_transactions.lock().get(hash).cloned().map(|tx| { - Arc::new(VerifiedTransaction::from_pending_block_transaction(tx)) - }) - } + fn remove_transaction(&self, hash: &H256) -> Option> { + self.pending_transactions + .lock() + .remove(hash) + .map(|tx| Arc::new(VerifiedTransaction::from_pending_block_transaction(tx))) + } - fn remove_transaction(&self, hash: &H256) -> Option> { - self.pending_transactions.lock().remove(hash).map(|tx| { - Arc::new(VerifiedTransaction::from_pending_block_transaction(tx)) - }) - } + fn pending_transactions(&self, _best_block: BlockNumber) -> Option> { + Some(self.pending_transactions.lock().values().cloned().collect()) + } - fn pending_transactions(&self, _best_block: BlockNumber) -> Option> { - Some(self.pending_transactions.lock().values().cloned().collect()) - } + fn local_transactions(&self) -> BTreeMap { + self.local_transactions + .lock() + .iter() + .map(|(hash, stats)| (*hash, stats.clone())) + .collect() + } - fn local_transactions(&self) -> BTreeMap { - self.local_transactions.lock().iter().map(|(hash, stats)| (*hash, stats.clone())).collect() - } + fn ready_transactions( + &self, + _chain: &C, + _max_len: usize, + _ordering: miner::PendingOrdering, + ) -> Vec> { + self.queued_transactions() + } - fn ready_transactions(&self, _chain: &C, _max_len: usize, _ordering: miner::PendingOrdering) -> Vec> { - self.queued_transactions() - } + fn pending_transaction_hashes(&self, _chain: &C) -> BTreeSet { + self.queued_transactions() + .into_iter() + .map(|tx| tx.signed().hash()) + .collect() + } - fn pending_transaction_hashes(&self, _chain: &C) -> BTreeSet { - self.queued_transactions().into_iter().map(|tx| tx.signed().hash()).collect() - } + fn queued_transactions(&self) -> Vec> { + self.pending_transactions + .lock() + .values() + .cloned() + .map(|tx| Arc::new(VerifiedTransaction::from_pending_block_transaction(tx))) + .collect() + } - fn queued_transactions(&self) -> Vec> { - self.pending_transactions.lock().values().cloned().map(|tx| { - Arc::new(VerifiedTransaction::from_pending_block_transaction(tx)) - }).collect() - } + fn queued_transaction_hashes(&self) -> Vec { + self.pending_transactions + .lock() + .keys() + .cloned() + .map(|hash| hash) + .collect() + } - fn queued_transaction_hashes(&self) -> Vec { - self.pending_transactions.lock().keys().cloned().map(|hash| hash).collect() - } + fn pending_receipts(&self, _best_block: BlockNumber) -> Option> { + Some(self.pending_receipts.lock().clone()) + } - fn pending_receipts(&self, _best_block: BlockNumber) -> Option> { - Some(self.pending_receipts.lock().clone()) - } + fn next_nonce(&self, _chain: &C, address: &Address) -> U256 { + self.next_nonces + .read() + .get(address) + .cloned() + .unwrap_or_default() + } - fn next_nonce(&self, _chain: &C, address: &Address) -> U256 { - self.next_nonces.read().get(address).cloned().unwrap_or_default() - } + fn is_currently_sealing(&self) -> bool { + false + } - fn is_currently_sealing(&self) -> bool { - false - } + fn queue_status(&self) -> QueueStatus { + QueueStatus { + options: verifier::Options { + minimal_gas_price: 0x1312d00.into(), + block_gas_limit: 5_000_000.into(), + tx_gas_limit: 5_000_000.into(), + no_early_reject: false, + }, + status: txpool::LightStatus { + mem_usage: 1_000, + transaction_count: 52, + senders: 1, + }, + limits: txpool::Options { + max_count: 1_024, + max_per_sender: 16, + max_mem_usage: 5_000, + }, + } + } - fn queue_status(&self) -> QueueStatus { - QueueStatus { - options: verifier::Options { - minimal_gas_price: 0x1312d00.into(), - block_gas_limit: 5_000_000.into(), - tx_gas_limit: 5_000_000.into(), - no_early_reject: false, - }, - status: txpool::LightStatus { - mem_usage: 1_000, - transaction_count: 52, - senders: 1, - }, - limits: txpool::Options { - max_count: 1_024, - max_per_sender: 16, - max_mem_usage: 5_000, - }, - } - } + /// Submit `seal` as a valid solution for the header of `pow_hash`. + /// Will check the seal, but not actually insert the block into the chain. + fn submit_seal(&self, _pow_hash: H256, _seal: Vec) -> Result { + unimplemented!(); + } - /// Submit `seal` as a valid solution for the header of `pow_hash`. - /// Will check the seal, but not actually insert the block into the chain. - fn submit_seal(&self, _pow_hash: H256, _seal: Vec) -> Result { - unimplemented!(); - } + fn sensible_gas_price(&self) -> U256 { + 20_000_000_000u64.into() + } - fn sensible_gas_price(&self) -> U256 { - 20_000_000_000u64.into() - } + fn sensible_gas_limit(&self) -> U256 { + 0x5208.into() + } - fn sensible_gas_limit(&self) -> U256 { - 0x5208.into() - } - - fn set_minimal_gas_price(&self, gas_price: U256) -> Result { - let mut new_price = self.min_gas_price.write(); - match *new_price { - Some(ref mut v) => { - *v = gas_price; - Ok(true) - }, - None => { - let error_msg = "Can't update fixed gas price while automatic gas calibration is enabled."; - Err(error_msg) - }, - } - } + fn set_minimal_gas_price(&self, gas_price: U256) -> Result { + let mut new_price = self.min_gas_price.write(); + match *new_price { + Some(ref mut v) => { + *v = gas_price; + Ok(true) + } + None => { + let error_msg = + "Can't update fixed gas price while automatic gas calibration is enabled."; + Err(error_msg) + } + } + } } diff --git a/rpc/src/v1/tests/helpers/mod.rs b/rpc/src/v1/tests/helpers/mod.rs index 0cecd271c..ca39fb0ae 100644 --- a/rpc/src/v1/tests/helpers/mod.rs +++ b/rpc/src/v1/tests/helpers/mod.rs @@ -21,7 +21,9 @@ mod snapshot_service; mod sync_provider; mod update_service; -pub use self::miner_service::TestMinerService; -pub use self::snapshot_service::TestSnapshotService; -pub use self::sync_provider::{Config, TestSyncProvider}; -pub use self::update_service::TestUpdater; +pub use self::{ + miner_service::TestMinerService, + snapshot_service::TestSnapshotService, + sync_provider::{Config, TestSyncProvider}, + update_service::TestUpdater, +}; diff --git a/rpc/src/v1/tests/helpers/snapshot_service.rs b/rpc/src/v1/tests/helpers/snapshot_service.rs index 881c434e1..fa5170242 100644 --- a/rpc/src/v1/tests/helpers/snapshot_service.rs +++ b/rpc/src/v1/tests/helpers/snapshot_service.rs @@ -22,34 +22,44 @@ use parking_lot::Mutex; /// Mocked snapshot service (used for sync info extensions). pub struct TestSnapshotService { - status: Mutex, + status: Mutex, } impl TestSnapshotService { - /// Create a test snapshot service. Only the `status` function matters -- it'll - /// return `Inactive` by default. - pub fn new() -> Self { - TestSnapshotService { - status: Mutex::new(RestorationStatus::Inactive), - } - } + /// Create a test snapshot service. Only the `status` function matters -- it'll + /// return `Inactive` by default. + pub fn new() -> Self { + TestSnapshotService { + status: Mutex::new(RestorationStatus::Inactive), + } + } - /// Set the restoration status. - pub fn set_status(&self, status: RestorationStatus) { - *self.status.lock() = status; - } + /// Set the restoration status. + pub fn set_status(&self, status: RestorationStatus) { + *self.status.lock() = status; + } } impl SnapshotService for TestSnapshotService { - fn manifest(&self) -> Option { None } - fn supported_versions(&self) -> Option<(u64, u64)> { None } - fn completed_chunks(&self) -> Option> { Some(vec![]) } - fn chunk(&self, _hash: H256) -> Option { None } - fn status(&self) -> RestorationStatus { self.status.lock().clone() } - fn begin_restore(&self, _manifest: ManifestData) { } - fn abort_restore(&self) { } - fn abort_snapshot(&self) {} - fn restore_state_chunk(&self, _hash: H256, _chunk: Bytes) { } - fn restore_block_chunk(&self, _hash: H256, _chunk: Bytes) { } - fn shutdown(&self) { } + fn manifest(&self) -> Option { + None + } + fn supported_versions(&self) -> Option<(u64, u64)> { + None + } + fn completed_chunks(&self) -> Option> { + Some(vec![]) + } + fn chunk(&self, _hash: H256) -> Option { + None + } + fn status(&self) -> RestorationStatus { + self.status.lock().clone() + } + fn begin_restore(&self, _manifest: ManifestData) {} + fn abort_restore(&self) {} + fn abort_snapshot(&self) {} + fn restore_state_chunk(&self, _hash: H256, _chunk: Bytes) {} + fn restore_block_chunk(&self, _hash: H256, _chunk: Bytes) {} + fn shutdown(&self) {} } diff --git a/rpc/src/v1/tests/helpers/sync_provider.rs b/rpc/src/v1/tests/helpers/sync_provider.rs index 37c2f9355..ce46a4769 100644 --- a/rpc/src/v1/tests/helpers/sync_provider.rs +++ b/rpc/src/v1/tests/helpers/sync_provider.rs @@ -16,111 +16,111 @@ //! Test implementation of SyncProvider. -use std::collections::BTreeMap; use ethereum_types::H256; -use parking_lot::RwLock; -use sync::{SyncProvider, EthProtocolInfo, SyncStatus, SyncState, PeerInfo, TransactionStats}; use network::client_version::ClientVersion; +use parking_lot::RwLock; +use std::collections::BTreeMap; +use sync::{EthProtocolInfo, PeerInfo, SyncProvider, SyncState, SyncStatus, TransactionStats}; /// TestSyncProvider config. pub struct Config { - /// Protocol version. - pub network_id: u64, - /// Number of peers. - pub num_peers: usize, + /// Protocol version. + pub network_id: u64, + /// Number of peers. + pub num_peers: usize, } /// Test sync provider. pub struct TestSyncProvider { - /// Sync status. - pub status: RwLock, + /// Sync status. + pub status: RwLock, } impl TestSyncProvider { - /// Creates new sync provider. - pub fn new(config: Config) -> Self { - TestSyncProvider { - status: RwLock::new(SyncStatus { - state: SyncState::Idle, - network_id: config.network_id, - protocol_version: 63, - start_block_number: 0, - last_imported_block_number: None, - highest_block_number: None, - blocks_total: 0, - blocks_received: 0, - num_peers: config.num_peers, - num_active_peers: 0, - mem_used: 0, - num_snapshot_chunks: 0, - snapshot_chunks_done: 0, - last_imported_old_block_number: None, - }), - } - } + /// Creates new sync provider. + pub fn new(config: Config) -> Self { + TestSyncProvider { + status: RwLock::new(SyncStatus { + state: SyncState::Idle, + network_id: config.network_id, + protocol_version: 63, + start_block_number: 0, + last_imported_block_number: None, + highest_block_number: None, + blocks_total: 0, + blocks_received: 0, + num_peers: config.num_peers, + num_active_peers: 0, + mem_used: 0, + num_snapshot_chunks: 0, + snapshot_chunks_done: 0, + last_imported_old_block_number: None, + }), + } + } - /// Simulate importing blocks. - pub fn increase_imported_block_number(&self, count: u64) { - let mut status = self.status.write(); - let current_number = status.last_imported_block_number.unwrap_or(0); - status.last_imported_block_number = Some(current_number + count); - } + /// Simulate importing blocks. + pub fn increase_imported_block_number(&self, count: u64) { + let mut status = self.status.write(); + let current_number = status.last_imported_block_number.unwrap_or(0); + status.last_imported_block_number = Some(current_number + count); + } } impl SyncProvider for TestSyncProvider { - fn status(&self) -> SyncStatus { - self.status.read().clone() - } + fn status(&self) -> SyncStatus { + self.status.read().clone() + } - fn peers(&self) -> Vec { - vec![ - PeerInfo { - id: Some("node1".to_owned()), - client_version: ClientVersion::from("Parity-Ethereum/1/v2.4.0/linux/rustc"), - capabilities: vec!["eth/62".to_owned(), "eth/63".to_owned()], - remote_address: "127.0.0.1:7777".to_owned(), - local_address: "127.0.0.1:8888".to_owned(), - eth_info: Some(EthProtocolInfo { - version: 62, - difficulty: Some(40.into()), - head: 50.into(), - }), - pip_info: None, - }, - PeerInfo { - id: None, - client_version: ClientVersion::from("Parity-Ethereum/2/v2.4.0/linux/rustc"), - capabilities: vec!["eth/63".to_owned(), "eth/64".to_owned()], - remote_address: "Handshake".to_owned(), - local_address: "127.0.0.1:3333".to_owned(), - eth_info: Some(EthProtocolInfo { - version: 64, - difficulty: None, - head: 60.into() - }), - pip_info: None, - } - ] - } + fn peers(&self) -> Vec { + vec![ + PeerInfo { + id: Some("node1".to_owned()), + client_version: ClientVersion::from("Parity-Ethereum/1/v2.4.0/linux/rustc"), + capabilities: vec!["eth/62".to_owned(), "eth/63".to_owned()], + remote_address: "127.0.0.1:7777".to_owned(), + local_address: "127.0.0.1:8888".to_owned(), + eth_info: Some(EthProtocolInfo { + version: 62, + difficulty: Some(40.into()), + head: 50.into(), + }), + pip_info: None, + }, + PeerInfo { + id: None, + client_version: ClientVersion::from("Parity-Ethereum/2/v2.4.0/linux/rustc"), + capabilities: vec!["eth/63".to_owned(), "eth/64".to_owned()], + remote_address: "Handshake".to_owned(), + local_address: "127.0.0.1:3333".to_owned(), + eth_info: Some(EthProtocolInfo { + version: 64, + difficulty: None, + head: 60.into(), + }), + pip_info: None, + }, + ] + } - fn enode(&self) -> Option { - None - } + fn enode(&self) -> Option { + None + } - fn transactions_stats(&self) -> BTreeMap { - map![ - 1.into() => TransactionStats { - first_seen: 10, - propagated_to: map![ - 128.into() => 16 - ], - }, - 5.into() => TransactionStats { - first_seen: 16, - propagated_to: map![ - 16.into() => 1 - ], - } - ] - } + fn transactions_stats(&self) -> BTreeMap { + map![ + 1.into() => TransactionStats { + first_seen: 10, + propagated_to: map![ + 128.into() => 16 + ], + }, + 5.into() => TransactionStats { + first_seen: 16, + propagated_to: map![ + 16.into() => 1 + ], + } + ] + } } diff --git a/rpc/src/v1/tests/helpers/update_service.rs b/rpc/src/v1/tests/helpers/update_service.rs index ccf3315c4..8ba2b828b 100644 --- a/rpc/src/v1/tests/helpers/update_service.rs +++ b/rpc/src/v1/tests/helpers/update_service.rs @@ -16,82 +16,96 @@ //! Test implementation of fetch client. -use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use semver::Version; -use updater::{Service as UpdateService, CapState, ReleaseInfo, VersionInfo, OperationsInfo, ReleaseTrack}; +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use updater::{ + CapState, OperationsInfo, ReleaseInfo, ReleaseTrack, Service as UpdateService, VersionInfo, +}; /// Test implementation of fetcher. Will always return the same file. #[derive(Default)] pub struct TestUpdater { - updated: AtomicBool, - current_block: AtomicUsize, + updated: AtomicBool, + current_block: AtomicUsize, } impl TestUpdater { - /// Update the (faked) current block. - pub fn set_current_block(&self, n: usize) { - self.current_block.store(n, Ordering::Relaxed); - } + /// Update the (faked) current block. + pub fn set_current_block(&self, n: usize) { + self.current_block.store(n, Ordering::Relaxed); + } - /// Update the (faked) current block. - pub fn set_updated(&self, v: bool) { - self.updated.store(v, Ordering::Relaxed); - } + /// Update the (faked) current block. + pub fn set_updated(&self, v: bool) { + self.updated.store(v, Ordering::Relaxed); + } } impl UpdateService for TestUpdater { - fn capability(&self) -> CapState { - if self.updated.load(Ordering::Relaxed) { - CapState::Capable - } else { - if self.current_block.load(Ordering::Relaxed) < 15100 { - CapState::CapableUntil(15100) - } else { - CapState::IncapableSince(15100) - } - } - } + fn capability(&self) -> CapState { + if self.updated.load(Ordering::Relaxed) { + CapState::Capable + } else { + if self.current_block.load(Ordering::Relaxed) < 15100 { + CapState::CapableUntil(15100) + } else { + CapState::IncapableSince(15100) + } + } + } - fn upgrade_ready(&self) -> Option { - if self.updated.load(Ordering::Relaxed) { - None - } else { - self.info().map(|i| i.track) - } - } + fn upgrade_ready(&self) -> Option { + if self.updated.load(Ordering::Relaxed) { + None + } else { + self.info().map(|i| i.track) + } + } - fn execute_upgrade(&self) -> bool { - if self.updated.load(Ordering::Relaxed) { - false - } else { - self.updated.store(true, Ordering::Relaxed); - true - } - } + fn execute_upgrade(&self) -> bool { + if self.updated.load(Ordering::Relaxed) { + false + } else { + self.updated.store(true, Ordering::Relaxed); + true + } + } - fn version_info(&self) -> VersionInfo { - VersionInfo { - track: ReleaseTrack::Beta, - version: Version{major: 1, minor: 5, patch: 0, build: vec![], pre: vec![]}, - hash: 150.into(), - } - } + fn version_info(&self) -> VersionInfo { + VersionInfo { + track: ReleaseTrack::Beta, + version: Version { + major: 1, + minor: 5, + patch: 0, + build: vec![], + pre: vec![], + }, + hash: 150.into(), + } + } - fn info(&self) -> Option { - Some(OperationsInfo { - fork: 15100, - this_fork: Some(15000), - track: ReleaseInfo { - version: VersionInfo { - track: ReleaseTrack::Beta, - version: Version{major: 1, minor: 5, patch: 1, build: vec![], pre: vec![]}, - hash: 151.into(), - }, - is_critical: true, - fork: 15100, - binary: Some(1510.into()), - }, - minor: None, - }) - } + fn info(&self) -> Option { + Some(OperationsInfo { + fork: 15100, + this_fork: Some(15000), + track: ReleaseInfo { + version: VersionInfo { + track: ReleaseTrack::Beta, + version: Version { + major: 1, + minor: 5, + patch: 1, + build: vec![], + pre: vec![], + }, + hash: 151.into(), + }, + is_critical: true, + fork: 15100, + binary: Some(1510.into()), + }, + minor: None, + }) + } } diff --git a/rpc/src/v1/tests/mocked/debug.rs b/rpc/src/v1/tests/mocked/debug.rs index ffbe8d4d1..c4cffd2ff 100644 --- a/rpc/src/v1/tests/mocked/debug.rs +++ b/rpc/src/v1/tests/mocked/debug.rs @@ -22,16 +22,16 @@ use jsonrpc_core::IoHandler; use v1::{Debug, DebugClient}; fn io() -> IoHandler { - let client = Arc::new(TestBlockChainClient::new()); + let client = Arc::new(TestBlockChainClient::new()); - let mut io = IoHandler::new(); - io.extend_with(DebugClient::new(client).to_delegate()); - io + let mut io = IoHandler::new(); + io.extend_with(DebugClient::new(client).to_delegate()); + io } #[test] fn rpc_debug_get_bad_blocks() { - let request = r#"{"jsonrpc": "2.0", "method": "debug_getBadBlocks", "params": [], "id": 1}"#; - let response = "{\"jsonrpc\":\"2.0\",\"result\":[{\"author\":\"0x0000000000000000000000000000000000000000\",\"difficulty\":\"0x0\",\"extraData\":\"0x\",\"gasLimit\":\"0x0\",\"gasUsed\":\"0x0\",\"hash\":\"0x27bfb37e507ce90da141307204b1c6ba24194380613590ac50ca4b1d7198ff65\",\"logsBloom\":\"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\"miner\":\"0x0000000000000000000000000000000000000000\",\"number\":\"0x0\",\"parentHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"reason\":\"Invalid block\",\"receiptsRoot\":\"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",\"rlp\":\"\\\"0x010203\\\"\",\"sealFields\":[],\"sha3Uncles\":\"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347\",\"size\":\"0x3\",\"stateRoot\":\"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",\"timestamp\":\"0x0\",\"totalDifficulty\":null,\"transactions\":[],\"transactionsRoot\":\"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",\"uncles\":[]}],\"id\":1}"; - assert_eq!(io().handle_request_sync(request), Some(response.to_owned())); + let request = r#"{"jsonrpc": "2.0", "method": "debug_getBadBlocks", "params": [], "id": 1}"#; + let response = "{\"jsonrpc\":\"2.0\",\"result\":[{\"author\":\"0x0000000000000000000000000000000000000000\",\"difficulty\":\"0x0\",\"extraData\":\"0x\",\"gasLimit\":\"0x0\",\"gasUsed\":\"0x0\",\"hash\":\"0x27bfb37e507ce90da141307204b1c6ba24194380613590ac50ca4b1d7198ff65\",\"logsBloom\":\"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\"miner\":\"0x0000000000000000000000000000000000000000\",\"number\":\"0x0\",\"parentHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"reason\":\"Invalid block\",\"receiptsRoot\":\"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",\"rlp\":\"\\\"0x010203\\\"\",\"sealFields\":[],\"sha3Uncles\":\"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347\",\"size\":\"0x3\",\"stateRoot\":\"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",\"timestamp\":\"0x0\",\"totalDifficulty\":null,\"transactions\":[],\"transactionsRoot\":\"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",\"uncles\":[]}],\"id\":1}"; + assert_eq!(io().handle_request_sync(request), Some(response.to_owned())); } diff --git a/rpc/src/v1/tests/mocked/eth.rs b/rpc/src/v1/tests/mocked/eth.rs index 2323e5ac0..a2e0df59f 100644 --- a/rpc/src/v1/tests/mocked/eth.rs +++ b/rpc/src/v1/tests/mocked/eth.rs @@ -14,15 +14,19 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::str::FromStr; -use std::collections::HashMap; -use std::sync::Arc; -use std::time::{Instant, Duration, SystemTime, UNIX_EPOCH}; +use std::{ + collections::HashMap, + str::FromStr, + sync::Arc, + time::{Duration, Instant, SystemTime, UNIX_EPOCH}, +}; use accounts::AccountProvider; -use ethcore::client::{BlockChainClient, EachBlockWith, Executed, TestBlockChainClient}; -use ethcore::miner::{self, MinerService}; -use ethereum_types::{H160, H256, U256, Address, Bloom}; +use ethcore::{ + client::{BlockChainClient, EachBlockWith, Executed, TestBlockChainClient}, + miner::{self, MinerService}, +}; +use ethereum_types::{Address, Bloom, H160, H256, U256}; use miner::external::ExternalMiner; use parity_runtime::Runtime; use parking_lot::Mutex; @@ -30,318 +34,412 @@ use rlp; use rustc_hex::{FromHex, ToHex}; use sync::SyncState; use types::{ - ids::{BlockId, TransactionId}, - transaction::{Transaction, Action}, - log_entry::{LocalizedLogEntry, LogEntry}, - receipt::{LocalizedReceipt, RichReceipt, TransactionOutcome}, + ids::{BlockId, TransactionId}, + log_entry::{LocalizedLogEntry, LogEntry}, + receipt::{LocalizedReceipt, RichReceipt, TransactionOutcome}, + transaction::{Action, Transaction}, }; use jsonrpc_core::IoHandler; -use v1::{Eth, EthClient, EthClientOptions, EthFilter, EthFilterClient}; -use v1::tests::helpers::{TestSyncProvider, Config, TestMinerService, TestSnapshotService}; -use v1::metadata::Metadata; +use v1::{ + metadata::Metadata, + tests::helpers::{Config, TestMinerService, TestSnapshotService, TestSyncProvider}, + Eth, EthClient, EthClientOptions, EthFilter, EthFilterClient, +}; fn blockchain_client() -> Arc { - let client = TestBlockChainClient::new(); - Arc::new(client) + let client = TestBlockChainClient::new(); + Arc::new(client) } fn accounts_provider() -> Arc { - Arc::new(AccountProvider::transient_provider()) + Arc::new(AccountProvider::transient_provider()) } fn sync_provider() -> Arc { - Arc::new(TestSyncProvider::new(Config { - network_id: 3, - num_peers: 120, - })) + Arc::new(TestSyncProvider::new(Config { + network_id: 3, + num_peers: 120, + })) } fn miner_service() -> Arc { - Arc::new(TestMinerService::default()) + Arc::new(TestMinerService::default()) } fn snapshot_service() -> Arc { - Arc::new(TestSnapshotService::new()) + Arc::new(TestSnapshotService::new()) } struct EthTester { - pub runtime: Runtime, - pub client: Arc, - pub sync: Arc, - pub accounts_provider: Arc, - pub miner: Arc, - pub snapshot: Arc, - hashrates: Arc>>, - pub io: IoHandler, + pub runtime: Runtime, + pub client: Arc, + pub sync: Arc, + pub accounts_provider: Arc, + pub miner: Arc, + pub snapshot: Arc, + hashrates: Arc>>, + pub io: IoHandler, } impl Default for EthTester { - fn default() -> Self { - Self::new_with_options(Default::default()) - } + fn default() -> Self { + Self::new_with_options(Default::default()) + } } impl EthTester { - pub fn new_with_options(options: EthClientOptions) -> Self { - let runtime = Runtime::with_thread_count(1); - let client = blockchain_client(); - let sync = sync_provider(); - let ap = accounts_provider(); - let ap2 = ap.clone(); - let opt_ap = Arc::new(move || ap2.accounts().unwrap_or_default()) as _; - let miner = miner_service(); - let snapshot = snapshot_service(); - let hashrates = Arc::new(Mutex::new(HashMap::new())); - let external_miner = Arc::new(ExternalMiner::new(hashrates.clone())); - let eth = EthClient::new(&client, &snapshot, &sync, &opt_ap, &miner, &external_miner, options).to_delegate(); - let filter = EthFilterClient::new(client.clone(), miner.clone(), 60).to_delegate(); + pub fn new_with_options(options: EthClientOptions) -> Self { + let runtime = Runtime::with_thread_count(1); + let client = blockchain_client(); + let sync = sync_provider(); + let ap = accounts_provider(); + let ap2 = ap.clone(); + let opt_ap = Arc::new(move || ap2.accounts().unwrap_or_default()) as _; + let miner = miner_service(); + let snapshot = snapshot_service(); + let hashrates = Arc::new(Mutex::new(HashMap::new())); + let external_miner = Arc::new(ExternalMiner::new(hashrates.clone())); + let eth = EthClient::new( + &client, + &snapshot, + &sync, + &opt_ap, + &miner, + &external_miner, + options, + ) + .to_delegate(); + let filter = EthFilterClient::new(client.clone(), miner.clone(), 60).to_delegate(); - let mut io: IoHandler = IoHandler::default(); - io.extend_with(eth); - io.extend_with(filter); + let mut io: IoHandler = IoHandler::default(); + io.extend_with(eth); + io.extend_with(filter); - EthTester { - runtime, - client, - sync, - accounts_provider: ap, - miner, - snapshot, - io, - hashrates, - } - } + EthTester { + runtime, + client, + sync, + accounts_provider: ap, + miner, + snapshot, + io, + hashrates, + } + } - pub fn add_blocks(&self, count: usize, with: EachBlockWith) { - self.client.add_blocks(count, with); - self.sync.increase_imported_block_number(count as u64); - } + pub fn add_blocks(&self, count: usize, with: EachBlockWith) { + self.client.add_blocks(count, with); + self.sync.increase_imported_block_number(count as u64); + } } #[test] fn rpc_eth_protocol_version() { - let request = r#"{"jsonrpc": "2.0", "method": "eth_protocolVersion", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"63","id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "eth_protocolVersion", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"63","id":1}"#; - assert_eq!(EthTester::default().io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + EthTester::default().io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_syncing() { - use ethcore::snapshot::RestorationStatus; + use ethcore::snapshot::RestorationStatus; - let request = r#"{"jsonrpc": "2.0", "method": "eth_syncing", "params": [], "id": 1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "eth_syncing", "params": [], "id": 1}"#; - let tester = EthTester::default(); + let tester = EthTester::default(); - let false_res = r#"{"jsonrpc":"2.0","result":false,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(false_res.to_owned())); + let false_res = r#"{"jsonrpc":"2.0","result":false,"id":1}"#; + assert_eq!( + tester.io.handle_request_sync(request), + Some(false_res.to_owned()) + ); - { - let mut status = tester.sync.status.write(); - status.state = SyncState::Blocks; - status.highest_block_number = Some(2500); - } + { + let mut status = tester.sync.status.write(); + status.state = SyncState::Blocks; + status.highest_block_number = Some(2500); + } - // "sync" to 1000 blocks. - // causes TestBlockChainClient to return 1000 for its best block number. - tester.add_blocks(1000, EachBlockWith::Nothing); + // "sync" to 1000 blocks. + // causes TestBlockChainClient to return 1000 for its best block number. + tester.add_blocks(1000, EachBlockWith::Nothing); - let true_res = r#"{"jsonrpc":"2.0","result":{"currentBlock":"0x3e8","highestBlock":"0x9c4","startingBlock":"0x0","warpChunksAmount":null,"warpChunksProcessed":null},"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(true_res.to_owned())); + let true_res = r#"{"jsonrpc":"2.0","result":{"currentBlock":"0x3e8","highestBlock":"0x9c4","startingBlock":"0x0","warpChunksAmount":null,"warpChunksProcessed":null},"id":1}"#; + assert_eq!( + tester.io.handle_request_sync(request), + Some(true_res.to_owned()) + ); - *tester.client.ancient_block.write() = None; - *tester.client.first_block.write() = None; + *tester.client.ancient_block.write() = None; + *tester.client.first_block.write() = None; - let snap_res = r#"{"jsonrpc":"2.0","result":{"currentBlock":"0x3e8","highestBlock":"0x9c4","startingBlock":"0x0","warpChunksAmount":"0x32","warpChunksProcessed":"0x18"},"id":1}"#; - tester.snapshot.set_status(RestorationStatus::Ongoing { - state_chunks: 40, - block_chunks: 10, - state_chunks_done: 18, - block_chunks_done: 6, - }); + let snap_res = r#"{"jsonrpc":"2.0","result":{"currentBlock":"0x3e8","highestBlock":"0x9c4","startingBlock":"0x0","warpChunksAmount":"0x32","warpChunksProcessed":"0x18"},"id":1}"#; + tester.snapshot.set_status(RestorationStatus::Ongoing { + state_chunks: 40, + block_chunks: 10, + state_chunks_done: 18, + block_chunks_done: 6, + }); - assert_eq!(tester.io.handle_request_sync(request), Some(snap_res.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(snap_res.to_owned()) + ); - tester.snapshot.set_status(RestorationStatus::Inactive); + tester.snapshot.set_status(RestorationStatus::Inactive); - // finish "syncing" - tester.add_blocks(1500, EachBlockWith::Nothing); + // finish "syncing" + tester.add_blocks(1500, EachBlockWith::Nothing); - { - let mut status = tester.sync.status.write(); - status.state = SyncState::Idle; - } + { + let mut status = tester.sync.status.write(); + status.state = SyncState::Idle; + } - assert_eq!(tester.io.handle_request_sync(request), Some(false_res.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(false_res.to_owned()) + ); } #[test] fn rpc_eth_chain_id() { - let tester = EthTester::default(); - let request = r#"{"jsonrpc": "2.0", "method": "eth_chainId", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; + let tester = EthTester::default(); + let request = r#"{"jsonrpc": "2.0", "method": "eth_chainId", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_hashrate() { - let tester = EthTester::default(); - tester.hashrates.lock().insert(H256::from(0), (Instant::now() + Duration::from_secs(2), U256::from(0xfffa))); - tester.hashrates.lock().insert(H256::from(0), (Instant::now() + Duration::from_secs(2), U256::from(0xfffb))); - tester.hashrates.lock().insert(H256::from(1), (Instant::now() + Duration::from_secs(2), U256::from(0x1))); + let tester = EthTester::default(); + tester.hashrates.lock().insert( + H256::from(0), + (Instant::now() + Duration::from_secs(2), U256::from(0xfffa)), + ); + tester.hashrates.lock().insert( + H256::from(0), + (Instant::now() + Duration::from_secs(2), U256::from(0xfffb)), + ); + tester.hashrates.lock().insert( + H256::from(1), + (Instant::now() + Duration::from_secs(2), U256::from(0x1)), + ); - let request = r#"{"jsonrpc": "2.0", "method": "eth_hashrate", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"0xfffc","id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "eth_hashrate", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0xfffc","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_logs() { - let tester = EthTester::default(); - tester.client.set_logs(vec![LocalizedLogEntry { - block_number: 1, - block_hash: H256::default(), - entry: LogEntry { - address: Address::default(), - topics: vec![], - data: vec![1,2,3], - }, - transaction_index: 0, - transaction_log_index: 0, - transaction_hash: H256::default(), - log_index: 0, - }, LocalizedLogEntry { - block_number: 1, - block_hash: H256::default(), - entry: LogEntry { - address: Address::default(), - topics: vec![], - data: vec![1,2,3], - }, - transaction_index: 0, - transaction_log_index: 1, - transaction_hash: H256::default(), - log_index: 1, - }]); + let tester = EthTester::default(); + tester.client.set_logs(vec![ + LocalizedLogEntry { + block_number: 1, + block_hash: H256::default(), + entry: LogEntry { + address: Address::default(), + topics: vec![], + data: vec![1, 2, 3], + }, + transaction_index: 0, + transaction_log_index: 0, + transaction_hash: H256::default(), + log_index: 0, + }, + LocalizedLogEntry { + block_number: 1, + block_hash: H256::default(), + entry: LogEntry { + address: Address::default(), + topics: vec![], + data: vec![1, 2, 3], + }, + transaction_index: 0, + transaction_log_index: 1, + transaction_hash: H256::default(), + log_index: 1, + }, + ]); - let request1 = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{}], "id": 1}"#; - let request2 = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{"limit":1}], "id": 1}"#; - let request3 = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{"limit":0}], "id": 1}"#; + let request1 = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{}], "id": 1}"#; + let request2 = + r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{"limit":1}], "id": 1}"#; + let request3 = + r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{"limit":0}], "id": 1}"#; - let response1 = r#"{"jsonrpc":"2.0","result":[{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","removed":false,"topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","transactionLogIndex":"0x0","type":"mined"},{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x1","removed":false,"topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","transactionLogIndex":"0x1","type":"mined"}],"id":1}"#; - let response2 = r#"{"jsonrpc":"2.0","result":[{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x1","removed":false,"topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","transactionLogIndex":"0x1","type":"mined"}],"id":1}"#; - let response3 = r#"{"jsonrpc":"2.0","result":[],"id":1}"#; + let response1 = r#"{"jsonrpc":"2.0","result":[{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","removed":false,"topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","transactionLogIndex":"0x0","type":"mined"},{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x1","removed":false,"topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","transactionLogIndex":"0x1","type":"mined"}],"id":1}"#; + let response2 = r#"{"jsonrpc":"2.0","result":[{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x1","removed":false,"topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","transactionLogIndex":"0x1","type":"mined"}],"id":1}"#; + let response3 = r#"{"jsonrpc":"2.0","result":[],"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request1), Some(response1.to_owned())); - assert_eq!(tester.io.handle_request_sync(request2), Some(response2.to_owned())); - assert_eq!(tester.io.handle_request_sync(request3), Some(response3.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request1), + Some(response1.to_owned()) + ); + assert_eq!( + tester.io.handle_request_sync(request2), + Some(response2.to_owned()) + ); + assert_eq!( + tester.io.handle_request_sync(request3), + Some(response3.to_owned()) + ); } #[test] fn rpc_eth_logs_error() { - let tester = EthTester::default(); - tester.client.set_error_on_logs(Some(BlockId::Hash(H256::from([5u8].as_ref())))); - let request = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{"limit":1,"blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"One of the blocks specified in filter (fromBlock, toBlock or blockHash) cannot be found","data":"0x0500000000000000000000000000000000000000000000000000000000000000"},"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + let tester = EthTester::default(); + tester + .client + .set_error_on_logs(Some(BlockId::Hash(H256::from([5u8].as_ref())))); + let request = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{"limit":1,"blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"One of the blocks specified in filter (fromBlock, toBlock or blockHash) cannot be found","data":"0x0500000000000000000000000000000000000000000000000000000000000000"},"id":1}"#; + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_logs_filter() { - let tester = EthTester::default(); - // Set some logs - tester.client.set_logs(vec![LocalizedLogEntry { - block_number: 1, - block_hash: H256::default(), - entry: LogEntry { - address: Address::default(), - topics: vec![], - data: vec![1,2,3], - }, - transaction_index: 0, - transaction_log_index: 0, - transaction_hash: H256::default(), - log_index: 0, - }, LocalizedLogEntry { - block_number: 1, - block_hash: H256::default(), - entry: LogEntry { - address: Address::default(), - topics: vec![], - data: vec![1,2,3], - }, - transaction_index: 0, - transaction_log_index: 1, - transaction_hash: H256::default(), - log_index: 1, - }]); + let tester = EthTester::default(); + // Set some logs + tester.client.set_logs(vec![ + LocalizedLogEntry { + block_number: 1, + block_hash: H256::default(), + entry: LogEntry { + address: Address::default(), + topics: vec![], + data: vec![1, 2, 3], + }, + transaction_index: 0, + transaction_log_index: 0, + transaction_hash: H256::default(), + log_index: 0, + }, + LocalizedLogEntry { + block_number: 1, + block_hash: H256::default(), + entry: LogEntry { + address: Address::default(), + topics: vec![], + data: vec![1, 2, 3], + }, + transaction_index: 0, + transaction_log_index: 1, + transaction_hash: H256::default(), + log_index: 1, + }, + ]); - // Register filters first - let request_default = r#"{"jsonrpc": "2.0", "method": "eth_newFilter", "params": [{}], "id": 1}"#; - let request_limit = r#"{"jsonrpc": "2.0", "method": "eth_newFilter", "params": [{"limit":1}], "id": 1}"#; - let response1 = r#"{"jsonrpc":"2.0","result":"0x0","id":1}"#; - let response2 = r#"{"jsonrpc":"2.0","result":"0x1","id":1}"#; + // Register filters first + let request_default = + r#"{"jsonrpc": "2.0", "method": "eth_newFilter", "params": [{}], "id": 1}"#; + let request_limit = + r#"{"jsonrpc": "2.0", "method": "eth_newFilter", "params": [{"limit":1}], "id": 1}"#; + let response1 = r#"{"jsonrpc":"2.0","result":"0x0","id":1}"#; + let response2 = r#"{"jsonrpc":"2.0","result":"0x1","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request_default), Some(response1.to_owned())); - assert_eq!(tester.io.handle_request_sync(request_limit), Some(response2.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request_default), + Some(response1.to_owned()) + ); + assert_eq!( + tester.io.handle_request_sync(request_limit), + Some(response2.to_owned()) + ); - let request_changes1 = r#"{"jsonrpc": "2.0", "method": "eth_getFilterChanges", "params": ["0x0"], "id": 1}"#; - let request_changes2 = r#"{"jsonrpc": "2.0", "method": "eth_getFilterChanges", "params": ["0x1"], "id": 1}"#; - let response1 = r#"{"jsonrpc":"2.0","result":[{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","removed":false,"topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","transactionLogIndex":"0x0","type":"mined"},{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x1","removed":false,"topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","transactionLogIndex":"0x1","type":"mined"}],"id":1}"#; - let response2 = r#"{"jsonrpc":"2.0","result":[{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x1","removed":false,"topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","transactionLogIndex":"0x1","type":"mined"}],"id":1}"#; + let request_changes1 = + r#"{"jsonrpc": "2.0", "method": "eth_getFilterChanges", "params": ["0x0"], "id": 1}"#; + let request_changes2 = + r#"{"jsonrpc": "2.0", "method": "eth_getFilterChanges", "params": ["0x1"], "id": 1}"#; + let response1 = r#"{"jsonrpc":"2.0","result":[{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","removed":false,"topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","transactionLogIndex":"0x0","type":"mined"},{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x1","removed":false,"topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","transactionLogIndex":"0x1","type":"mined"}],"id":1}"#; + let response2 = r#"{"jsonrpc":"2.0","result":[{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x1","removed":false,"topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","transactionLogIndex":"0x1","type":"mined"}],"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request_changes1), Some(response1.to_owned())); - assert_eq!(tester.io.handle_request_sync(request_changes2), Some(response2.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request_changes1), + Some(response1.to_owned()) + ); + assert_eq!( + tester.io.handle_request_sync(request_changes2), + Some(response2.to_owned()) + ); } #[test] fn rpc_blocks_filter() { - let tester = EthTester::default(); - let request_filter = r#"{"jsonrpc": "2.0", "method": "eth_newBlockFilter", "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"0x0","id":1}"#; + let tester = EthTester::default(); + let request_filter = r#"{"jsonrpc": "2.0", "method": "eth_newBlockFilter", "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x0","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request_filter), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request_filter), + Some(response.to_owned()) + ); - let request_changes = r#"{"jsonrpc": "2.0", "method": "eth_getFilterChanges", "params": ["0x0"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":[],"id":1}"#; + let request_changes = + r#"{"jsonrpc": "2.0", "method": "eth_getFilterChanges", "params": ["0x0"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":[],"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request_changes), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request_changes), + Some(response.to_owned()) + ); - tester.client.add_blocks(2, EachBlockWith::Nothing); + tester.client.add_blocks(2, EachBlockWith::Nothing); - let hash1 = tester.client.block_hash(BlockId::Number(1)).unwrap(); - let hash2 = tester.client.block_hash(BlockId::Number(2)).unwrap(); - let response = format!( - r#"{{"jsonrpc":"2.0","result":["0x{:x}","0x{:x}"],"id":1}}"#, - hash1, - hash2); + let hash1 = tester.client.block_hash(BlockId::Number(1)).unwrap(); + let hash2 = tester.client.block_hash(BlockId::Number(2)).unwrap(); + let response = format!( + r#"{{"jsonrpc":"2.0","result":["0x{:x}","0x{:x}"],"id":1}}"#, + hash1, hash2 + ); - assert_eq!(tester.io.handle_request_sync(request_changes), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request_changes), + Some(response.to_owned()) + ); - // in the case of a re-org we get same block number if hash is different - BlockId::Number(2) - tester.client.blocks.write().remove(&hash2).unwrap(); - tester.client.numbers.write().remove(&2).unwrap(); - *tester.client.last_hash.write() = hash1; - tester.client.add_blocks(2, EachBlockWith::Uncle); + // in the case of a re-org we get same block number if hash is different - BlockId::Number(2) + tester.client.blocks.write().remove(&hash2).unwrap(); + tester.client.numbers.write().remove(&2).unwrap(); + *tester.client.last_hash.write() = hash1; + tester.client.add_blocks(2, EachBlockWith::Uncle); - let request_changes = r#"{"jsonrpc": "2.0", "method": "eth_getFilterChanges", "params": ["0x0"], "id": 2}"#; - let response = format!( - r#"{{"jsonrpc":"2.0","result":["0x{:x}","0x{:x}"],"id":2}}"#, - tester.client.block_hash(BlockId::Number(2)).unwrap(), - tester.client.block_hash(BlockId::Number(3)).unwrap()); + let request_changes = + r#"{"jsonrpc": "2.0", "method": "eth_getFilterChanges", "params": ["0x0"], "id": 2}"#; + let response = format!( + r#"{{"jsonrpc":"2.0","result":["0x{:x}","0x{:x}"],"id":2}}"#, + tester.client.block_hash(BlockId::Number(2)).unwrap(), + tester.client.block_hash(BlockId::Number(3)).unwrap() + ); - assert_eq!(tester.io.handle_request_sync(request_changes), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request_changes), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_submit_hashrate() { - let tester = EthTester::default(); + let tester = EthTester::default(); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_submitHashrate", "params": [ @@ -349,295 +447,381 @@ fn rpc_eth_submit_hashrate() { "0x59daa26581d0acd1fce254fb7e85952f4c09d0915afd33d3886cd914bc7d283c"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); - assert_eq!(tester.hashrates.lock().get(&H256::from("0x59daa26581d0acd1fce254fb7e85952f4c09d0915afd33d3886cd914bc7d283c")).cloned().unwrap().1, - U256::from(0x500_000)); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); + assert_eq!( + tester + .hashrates + .lock() + .get(&H256::from( + "0x59daa26581d0acd1fce254fb7e85952f4c09d0915afd33d3886cd914bc7d283c" + )) + .cloned() + .unwrap() + .1, + U256::from(0x500_000) + ); } #[test] fn rpc_eth_author() { - let make_res = |addr| r#"{"jsonrpc":"2.0","result":""#.to_owned() + &format!("0x{:x}", addr) + r#"","id":1}"#; - let tester = EthTester::default(); + let make_res = |addr| { + r#"{"jsonrpc":"2.0","result":""#.to_owned() + &format!("0x{:x}", addr) + r#"","id":1}"# + }; + let tester = EthTester::default(); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_coinbase", "params": [], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32023,"message":"No accounts were found","data":"\"\""},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32023,"message":"No accounts were found","data":"\"\""},"id":1}"#; - // No accounts - returns an error indicating that no accounts were found - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_string())); + // No accounts - returns an error indicating that no accounts were found + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_string()) + ); - // Account set - return first account - let addr = tester.accounts_provider.new_account(&"123".into()).unwrap(); - assert_eq!(tester.io.handle_request_sync(request), Some(make_res(addr))); + // Account set - return first account + let addr = tester.accounts_provider.new_account(&"123".into()).unwrap(); + assert_eq!(tester.io.handle_request_sync(request), Some(make_res(addr))); - for i in 0..20 { - let addr = tester.accounts_provider.new_account(&format!("{}", i).into()).unwrap(); - tester.miner.set_author(miner::Author::External(addr)); + for i in 0..20 { + let addr = tester + .accounts_provider + .new_account(&format!("{}", i).into()) + .unwrap(); + tester.miner.set_author(miner::Author::External(addr)); - assert_eq!(tester.io.handle_request_sync(request), Some(make_res(addr))); - } + assert_eq!(tester.io.handle_request_sync(request), Some(make_res(addr))); + } } #[test] fn rpc_eth_mining() { - let tester = EthTester::default(); - tester.miner.set_author(miner::Author::External(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap())); + let tester = EthTester::default(); + tester.miner.set_author(miner::Author::External( + Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(), + )); - let request = r#"{"jsonrpc": "2.0", "method": "eth_mining", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":false,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + let request = r#"{"jsonrpc": "2.0", "method": "eth_mining", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":false,"id":1}"#; + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_gas_price() { - let request = r#"{"jsonrpc": "2.0", "method": "eth_gasPrice", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"0x4a817c800","id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "eth_gasPrice", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x4a817c800","id":1}"#; - assert_eq!(EthTester::default().io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + EthTester::default().io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_accounts() { - let tester = EthTester::default(); - let address = tester.accounts_provider.new_account(&"".into()).unwrap(); - tester.accounts_provider.set_address_name(1.into(), "1".into()); - tester.accounts_provider.set_address_name(10.into(), "10".into()); + let tester = EthTester::default(); + let address = tester.accounts_provider.new_account(&"".into()).unwrap(); + tester + .accounts_provider + .set_address_name(1.into(), "1".into()); + tester + .accounts_provider + .set_address_name(10.into(), "10".into()); - // with current policy it should return the account - let request = r#"{"jsonrpc": "2.0", "method": "eth_accounts", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":[""#.to_owned() + &format!("0x{:x}", address) + r#""],"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + // with current policy it should return the account + let request = r#"{"jsonrpc": "2.0", "method": "eth_accounts", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":[""#.to_owned() + + &format!("0x{:x}", address) + + r#""],"id":1}"#; + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_block_number() { - let tester = EthTester::default(); - tester.client.add_blocks(10, EachBlockWith::Nothing); + let tester = EthTester::default(); + tester.client.add_blocks(10, EachBlockWith::Nothing); - let request = r#"{"jsonrpc": "2.0", "method": "eth_blockNumber", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"0xa","id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "eth_blockNumber", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0xa","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_balance() { - let tester = EthTester::default(); - tester.client.set_balance(Address::from(1), U256::from(5)); + let tester = EthTester::default(); + tester.client.set_balance(Address::from(1), U256::from(5)); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_getBalance", "params": ["0x0000000000000000000000000000000000000001", "latest"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"0x5","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x5","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_balance_pending() { - let tester = EthTester::default(); - tester.client.set_balance(Address::from(1), U256::from(5)); + let tester = EthTester::default(); + tester.client.set_balance(Address::from(1), U256::from(5)); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_getBalance", "params": ["0x0000000000000000000000000000000000000001", "pending"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"0x5","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x5","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_storage_at() { - let tester = EthTester::default(); - tester.client.set_storage(Address::from(1), H256::from(4), H256::from(7)); + let tester = EthTester::default(); + tester + .client + .set_storage(Address::from(1), H256::from(4), H256::from(7)); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_getStorageAt", "params": ["0x0000000000000000000000000000000000000001", "0x4", "latest"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"0x0000000000000000000000000000000000000000000000000000000000000007","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x0000000000000000000000000000000000000000000000000000000000000007","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_transaction_count() { - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_getTransactionCount", "params": ["0x0000000000000000000000000000000000000001", "latest"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"0x0","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x0","id":1}"#; - assert_eq!(EthTester::default().io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + EthTester::default().io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_transaction_count_next_nonce() { - let tester = EthTester::new_with_options(EthClientOptions::with(|options| { - options.pending_nonce_from_queue = true; - })); - tester.miner.increment_nonce(&1.into()); + let tester = EthTester::new_with_options(EthClientOptions::with(|options| { + options.pending_nonce_from_queue = true; + })); + tester.miner.increment_nonce(&1.into()); - let request1 = r#"{ + let request1 = r#"{ "jsonrpc": "2.0", "method": "eth_getTransactionCount", "params": ["0x0000000000000000000000000000000000000001", "pending"], "id": 1 }"#; - let response1 = r#"{"jsonrpc":"2.0","result":"0x1","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request1), Some(response1.to_owned())); + let response1 = r#"{"jsonrpc":"2.0","result":"0x1","id":1}"#; + assert_eq!( + tester.io.handle_request_sync(request1), + Some(response1.to_owned()) + ); - let request2 = r#"{ + let request2 = r#"{ "jsonrpc": "2.0", "method": "eth_getTransactionCount", "params": ["0x0000000000000000000000000000000000000002", "pending"], "id": 1 }"#; - let response2 = r#"{"jsonrpc":"2.0","result":"0x0","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request2), Some(response2.to_owned())); + let response2 = r#"{"jsonrpc":"2.0","result":"0x0","id":1}"#; + assert_eq!( + tester.io.handle_request_sync(request2), + Some(response2.to_owned()) + ); } #[test] fn rpc_eth_block_transaction_count_by_hash() { - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_getBlockTransactionCountByHash", "params": ["0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; - assert_eq!(EthTester::default().io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + EthTester::default().io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_transaction_count_by_number() { - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_getBlockTransactionCountByNumber", "params": ["latest"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"0x0","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x0","id":1}"#; - assert_eq!(EthTester::default().io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + EthTester::default().io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_transaction_count_by_number_pending() { - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_getBlockTransactionCountByNumber", "params": ["pending"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"0x0","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x0","id":1}"#; - assert_eq!(EthTester::default().io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + EthTester::default().io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_pending_transaction_by_hash() { - use ethereum_types::H256; - use rlp; - use types::transaction::SignedTransaction; + use ethereum_types::H256; + use rlp; + use types::transaction::SignedTransaction; - let tester = EthTester::default(); - { - let bytes = FromHex::from_hex("f85f800182520894095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804").unwrap(); - let tx = rlp::decode(&bytes).expect("decoding failure"); - let tx = SignedTransaction::new(tx).unwrap(); - tester.miner.pending_transactions.lock().insert(H256::zero(), tx); - } + let tester = EthTester::default(); + { + let bytes = FromHex::from_hex("f85f800182520894095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804").unwrap(); + let tx = rlp::decode(&bytes).expect("decoding failure"); + let tx = SignedTransaction::new(tx).unwrap(); + tester + .miner + .pending_transactions + .lock() + .insert(H256::zero(), tx); + } - let response = r#"{"jsonrpc":"2.0","result":{"blockHash":null,"blockNumber":null,"chainId":null,"condition":null,"creates":null,"from":"0x0f65fe9276bc9a24ae7083ae28e2660ef72df99e","gas":"0x5208","gasPrice":"0x1","hash":"0x41df922fd0d4766fcc02e161f8295ec28522f329ae487f14d811e4b64c8d6e31","input":"0x","nonce":"0x0","publicKey":"0x7ae46da747962c2ee46825839c1ef9298e3bd2e70ca2938495c3693a485ec3eaa8f196327881090ff64cf4fbb0a48485d4f83098e189ed3b7a87d5941b59f789","r":"0x48b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353","raw":"0xf85f800182520894095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804","s":"0xefffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804","standardV":"0x0","to":"0x095e7baea6a6c7c4c2dfeb977efac326af552d87","transactionIndex":null,"v":"0x1b","value":"0xa"},"id":1}"#; - let request = r#"{ + let response = r#"{"jsonrpc":"2.0","result":{"blockHash":null,"blockNumber":null,"chainId":null,"condition":null,"creates":null,"from":"0x0f65fe9276bc9a24ae7083ae28e2660ef72df99e","gas":"0x5208","gasPrice":"0x1","hash":"0x41df922fd0d4766fcc02e161f8295ec28522f329ae487f14d811e4b64c8d6e31","input":"0x","nonce":"0x0","publicKey":"0x7ae46da747962c2ee46825839c1ef9298e3bd2e70ca2938495c3693a485ec3eaa8f196327881090ff64cf4fbb0a48485d4f83098e189ed3b7a87d5941b59f789","r":"0x48b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353","raw":"0xf85f800182520894095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804","s":"0xefffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804","standardV":"0x0","to":"0x095e7baea6a6c7c4c2dfeb977efac326af552d87","transactionIndex":null,"v":"0x1b","value":"0xa"},"id":1}"#; + let request = r#"{ "jsonrpc": "2.0", "method": "eth_getTransactionByHash", "params": ["0x0000000000000000000000000000000000000000000000000000000000000000"], "id": 1 }"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_uncle_count_by_block_hash() { - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_getUncleCountByBlockHash", "params": ["0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; - assert_eq!(EthTester::default().io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + EthTester::default().io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_uncle_count_by_block_number() { - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_getUncleCountByBlockNumber", "params": ["latest"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"0x0","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x0","id":1}"#; - assert_eq!(EthTester::default().io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + EthTester::default().io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_code() { - let tester = EthTester::default(); - tester.client.set_code(Address::from(1), vec![0xff, 0x21]); + let tester = EthTester::default(); + tester.client.set_code(Address::from(1), vec![0xff, 0x21]); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_getCode", "params": ["0x0000000000000000000000000000000000000001", "latest"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"0xff21","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0xff21","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_call_latest() { - let tester = EthTester::default(); - tester.client.set_execution_result(Ok(Executed { - exception: None, - gas: U256::zero(), - gas_used: U256::from(0xff30), - refunded: U256::from(0x5), - cumulative_gas_used: U256::zero(), - logs: vec![], - contracts_created: vec![], - output: vec![0x12, 0x34, 0xff], - trace: vec![], - vm_trace: None, - state_diff: None, - })); + let tester = EthTester::default(); + tester.client.set_execution_result(Ok(Executed { + exception: None, + gas: U256::zero(), + gas_used: U256::from(0xff30), + refunded: U256::from(0x5), + cumulative_gas_used: U256::zero(), + logs: vec![], + contracts_created: vec![], + output: vec![0x12, 0x34, 0xff], + trace: vec![], + vm_trace: None, + state_diff: None, + })); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_call", "params": [{ @@ -651,29 +835,32 @@ fn rpc_eth_call_latest() { "latest"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"0x1234ff","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x1234ff","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_call_pending() { - let tester = EthTester::default(); - tester.client.set_execution_result(Ok(Executed { - exception: None, - gas: U256::zero(), - gas_used: U256::from(0xff30), - refunded: U256::from(0x5), - cumulative_gas_used: U256::zero(), - logs: vec![], - contracts_created: vec![], - output: vec![0x12, 0x34, 0xff], - trace: vec![], - vm_trace: None, - state_diff: None, - })); + let tester = EthTester::default(); + tester.client.set_execution_result(Ok(Executed { + exception: None, + gas: U256::zero(), + gas_used: U256::from(0xff30), + refunded: U256::from(0x5), + cumulative_gas_used: U256::zero(), + logs: vec![], + contracts_created: vec![], + output: vec![0x12, 0x34, 0xff], + trace: vec![], + vm_trace: None, + state_diff: None, + })); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_call", "params": [{ @@ -687,30 +874,33 @@ fn rpc_eth_call_pending() { "pending"], "id": 1 }"#; - // Falls back to "Latest" and gives the same result. - let response = r#"{"jsonrpc":"2.0","result":"0x1234ff","id":1}"#; + // Falls back to "Latest" and gives the same result. + let response = r#"{"jsonrpc":"2.0","result":"0x1234ff","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_call() { - let tester = EthTester::default(); - tester.client.set_execution_result(Ok(Executed { - exception: None, - gas: U256::zero(), - gas_used: U256::from(0xff30), - refunded: U256::from(0x5), - cumulative_gas_used: U256::zero(), - logs: vec![], - contracts_created: vec![], - output: vec![0x12, 0x34, 0xff], - trace: vec![], - vm_trace: None, - state_diff: None, - })); + let tester = EthTester::default(); + tester.client.set_execution_result(Ok(Executed { + exception: None, + gas: U256::zero(), + gas_used: U256::from(0xff30), + refunded: U256::from(0x5), + cumulative_gas_used: U256::zero(), + logs: vec![], + contracts_created: vec![], + output: vec![0x12, 0x34, 0xff], + trace: vec![], + vm_trace: None, + state_diff: None, + })); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_call", "params": [{ @@ -724,29 +914,32 @@ fn rpc_eth_call() { "0x0"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"0x1234ff","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x1234ff","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_call_default_block() { - let tester = EthTester::default(); - tester.client.set_execution_result(Ok(Executed { - exception: None, - gas: U256::zero(), - gas_used: U256::from(0xff30), - refunded: U256::from(0x5), - cumulative_gas_used: U256::zero(), - logs: vec![], - contracts_created: vec![], - output: vec![0x12, 0x34, 0xff], - trace: vec![], - vm_trace: None, - state_diff: None, - })); + let tester = EthTester::default(); + tester.client.set_execution_result(Ok(Executed { + exception: None, + gas: U256::zero(), + gas_used: U256::from(0xff30), + refunded: U256::from(0x5), + cumulative_gas_used: U256::zero(), + logs: vec![], + contracts_created: vec![], + output: vec![0x12, 0x34, 0xff], + trace: vec![], + vm_trace: None, + state_diff: None, + })); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_call", "params": [{ @@ -759,29 +952,32 @@ fn rpc_eth_call_default_block() { }], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"0x1234ff","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x1234ff","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_estimate_gas() { - let tester = EthTester::default(); - tester.client.set_execution_result(Ok(Executed { - exception: None, - gas: U256::zero(), - gas_used: U256::from(0xff30), - refunded: U256::from(0x5), - cumulative_gas_used: U256::zero(), - logs: vec![], - contracts_created: vec![], - output: vec![0x12, 0x34, 0xff], - trace: vec![], - vm_trace: None, - state_diff: None, - })); + let tester = EthTester::default(); + tester.client.set_execution_result(Ok(Executed { + exception: None, + gas: U256::zero(), + gas_used: U256::from(0xff30), + refunded: U256::from(0x5), + cumulative_gas_used: U256::zero(), + logs: vec![], + contracts_created: vec![], + output: vec![0x12, 0x34, 0xff], + trace: vec![], + vm_trace: None, + state_diff: None, + })); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_estimateGas", "params": [{ @@ -795,29 +991,32 @@ fn rpc_eth_estimate_gas() { "latest"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"0x5208","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x5208","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_estimate_gas_pending() { - let tester = EthTester::default(); - tester.client.set_execution_result(Ok(Executed { - exception: None, - gas: U256::zero(), - gas_used: U256::from(0xff30), - refunded: U256::from(0x5), - cumulative_gas_used: U256::zero(), - logs: vec![], - contracts_created: vec![], - output: vec![0x12, 0x34, 0xff], - trace: vec![], - vm_trace: None, - state_diff: None, - })); + let tester = EthTester::default(); + tester.client.set_execution_result(Ok(Executed { + exception: None, + gas: U256::zero(), + gas_used: U256::from(0xff30), + refunded: U256::from(0x5), + cumulative_gas_used: U256::zero(), + logs: vec![], + contracts_created: vec![], + output: vec![0x12, 0x34, 0xff], + trace: vec![], + vm_trace: None, + state_diff: None, + })); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_estimateGas", "params": [{ @@ -831,30 +1030,33 @@ fn rpc_eth_estimate_gas_pending() { "pending"], "id": 1 }"#; - // Falls back to "Latest" so the result is the same - let response = r#"{"jsonrpc":"2.0","result":"0x5208","id":1}"#; + // Falls back to "Latest" so the result is the same + let response = r#"{"jsonrpc":"2.0","result":"0x5208","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_estimate_gas_default_block() { - let tester = EthTester::default(); - tester.client.set_execution_result(Ok(Executed { - exception: None, - gas: U256::zero(), - gas_used: U256::from(0xff30), - refunded: U256::from(0x5), - cumulative_gas_used: U256::zero(), - logs: vec![], - contracts_created: vec![], - output: vec![0x12, 0x34, 0xff], - trace: vec![], - vm_trace: None, - state_diff: None, - })); + let tester = EthTester::default(); + tester.client.set_execution_result(Ok(Executed { + exception: None, + gas: U256::zero(), + gas_used: U256::from(0xff30), + refunded: U256::from(0x5), + cumulative_gas_used: U256::zero(), + logs: vec![], + contracts_created: vec![], + output: vec![0x12, 0x34, 0xff], + trace: vec![], + vm_trace: None, + state_diff: None, + })); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_estimateGas", "params": [{ @@ -867,16 +1069,19 @@ fn rpc_eth_estimate_gas_default_block() { }], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"0x5208","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x5208","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_send_raw_transaction_error() { - let tester = EthTester::default(); + let tester = EthTester::default(); - let req = r#"{ + let req = r#"{ "jsonrpc": "2.0", "method": "eth_sendRawTransaction", "params": [ @@ -884,132 +1089,174 @@ fn rpc_eth_send_raw_transaction_error() { ], "id": 1 }"#; - let res = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid RLP.","data":"RlpExpectedToBeList"},"id":1}"#.into(); + let res = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid RLP.","data":"RlpExpectedToBeList"},"id":1}"#.into(); - assert_eq!(tester.io.handle_request_sync(&req), Some(res)); + assert_eq!(tester.io.handle_request_sync(&req), Some(res)); } #[test] fn rpc_eth_send_raw_transaction() { - let tester = EthTester::default(); - let address = tester.accounts_provider.new_account(&"abcd".into()).unwrap(); - tester.accounts_provider.unlock_account_permanently(address, "abcd".into()).unwrap(); + let tester = EthTester::default(); + let address = tester + .accounts_provider + .new_account(&"abcd".into()) + .unwrap(); + tester + .accounts_provider + .unlock_account_permanently(address, "abcd".into()) + .unwrap(); - let t = Transaction { - nonce: U256::zero(), - gas_price: U256::from(0x9184e72a000u64), - gas: U256::from(0x76c0), - action: Action::Call(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), - value: U256::from(0x9184e72au64), - data: vec![] - }; - let signature = tester.accounts_provider.sign(address, None, t.hash(None)).unwrap(); - let t = t.with_signature(signature, None); + let t = Transaction { + nonce: U256::zero(), + gas_price: U256::from(0x9184e72a000u64), + gas: U256::from(0x76c0), + action: Action::Call( + Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(), + ), + value: U256::from(0x9184e72au64), + data: vec![], + }; + let signature = tester + .accounts_provider + .sign(address, None, t.hash(None)) + .unwrap(); + let t = t.with_signature(signature, None); - let rlp = rlp::encode(&t).to_hex(); + let rlp = rlp::encode(&t).to_hex(); - let req = r#"{ + let req = r#"{ "jsonrpc": "2.0", "method": "eth_sendRawTransaction", "params": [ - "0x"#.to_owned() + &rlp + r#"" + "0x"# + .to_owned() + + &rlp + + r#"" ], "id": 1 }"#; - let res = r#"{"jsonrpc":"2.0","result":""#.to_owned() + &format!("0x{:x}", t.hash()) + r#"","id":1}"#; + let res = + r#"{"jsonrpc":"2.0","result":""#.to_owned() + &format!("0x{:x}", t.hash()) + r#"","id":1}"#; - assert_eq!(tester.io.handle_request_sync(&req), Some(res)); + assert_eq!(tester.io.handle_request_sync(&req), Some(res)); } #[test] fn rpc_eth_transaction_receipt() { - let receipt = LocalizedReceipt { - from: H160::from_str("b60e8dd61c5d32be8058bb8eb970870f07233155").unwrap(), - to: Some(H160::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), - transaction_hash: H256::zero(), - transaction_index: 0, - block_hash: H256::from_str("ed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5").unwrap(), - block_number: 0x4510c, - cumulative_gas_used: U256::from(0x20), - gas_used: U256::from(0x10), - contract_address: None, - logs: vec![LocalizedLogEntry { - entry: LogEntry { - address: Address::from_str("33990122638b9132ca29c723bdf037f1a891a70c").unwrap(), - topics: vec![ - H256::from_str("a6697e974e6a320f454390be03f74955e8978f1a6971ea6730542e37b66179bc").unwrap(), - H256::from_str("4861736852656700000000000000000000000000000000000000000000000000").unwrap() - ], - data: vec![], - }, - block_hash: H256::from_str("ed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5").unwrap(), - block_number: 0x4510c, - transaction_hash: H256::new(), - transaction_index: 0, - transaction_log_index: 0, - log_index: 1, - }], - log_bloom: 0.into(), - outcome: TransactionOutcome::StateRoot(0.into()), - }; + let receipt = LocalizedReceipt { + from: H160::from_str("b60e8dd61c5d32be8058bb8eb970870f07233155").unwrap(), + to: Some(H160::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), + transaction_hash: H256::zero(), + transaction_index: 0, + block_hash: H256::from_str( + "ed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5", + ) + .unwrap(), + block_number: 0x4510c, + cumulative_gas_used: U256::from(0x20), + gas_used: U256::from(0x10), + contract_address: None, + logs: vec![LocalizedLogEntry { + entry: LogEntry { + address: Address::from_str("33990122638b9132ca29c723bdf037f1a891a70c").unwrap(), + topics: vec![ + H256::from_str( + "a6697e974e6a320f454390be03f74955e8978f1a6971ea6730542e37b66179bc", + ) + .unwrap(), + H256::from_str( + "4861736852656700000000000000000000000000000000000000000000000000", + ) + .unwrap(), + ], + data: vec![], + }, + block_hash: H256::from_str( + "ed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5", + ) + .unwrap(), + block_number: 0x4510c, + transaction_hash: H256::new(), + transaction_index: 0, + transaction_log_index: 0, + log_index: 1, + }], + log_bloom: 0.into(), + outcome: TransactionOutcome::StateRoot(0.into()), + }; - let hash = H256::from_str("b903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238").unwrap(); - let tester = EthTester::default(); - tester.client.set_transaction_receipt(TransactionId::Hash(hash), receipt); + let hash = + H256::from_str("b903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238").unwrap(); + let tester = EthTester::default(); + tester + .client + .set_transaction_receipt(TransactionId::Hash(hash), receipt); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_getTransactionReceipt", "params": ["0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":{"blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","blockNumber":"0x4510c","contractAddress":null,"cumulativeGasUsed":"0x20","from":"0xb60e8dd61c5d32be8058bb8eb970870f07233155","gasUsed":"0x10","logs":[{"address":"0x33990122638b9132ca29c723bdf037f1a891a70c","blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","blockNumber":"0x4510c","data":"0x","logIndex":"0x1","removed":false,"topics":["0xa6697e974e6a320f454390be03f74955e8978f1a6971ea6730542e37b66179bc","0x4861736852656700000000000000000000000000000000000000000000000000"],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","transactionLogIndex":"0x0","type":"mined"}],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","root":"0x0000000000000000000000000000000000000000000000000000000000000000","to":"0xd46e8dd67c5d32be8058bb8eb970870f07244567","transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0"},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","blockNumber":"0x4510c","contractAddress":null,"cumulativeGasUsed":"0x20","from":"0xb60e8dd61c5d32be8058bb8eb970870f07233155","gasUsed":"0x10","logs":[{"address":"0x33990122638b9132ca29c723bdf037f1a891a70c","blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","blockNumber":"0x4510c","data":"0x","logIndex":"0x1","removed":false,"topics":["0xa6697e974e6a320f454390be03f74955e8978f1a6971ea6730542e37b66179bc","0x4861736852656700000000000000000000000000000000000000000000000000"],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","transactionLogIndex":"0x0","type":"mined"}],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","root":"0x0000000000000000000000000000000000000000000000000000000000000000","to":"0xd46e8dd67c5d32be8058bb8eb970870f07244567","transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0"},"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_transaction_receipt_null() { - let tester = EthTester::default(); + let tester = EthTester::default(); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_getTransactionReceipt", "params": ["0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_eth_pending_receipt() { - let pending = RichReceipt { - from: H160::from_str("b60e8dd61c5d32be8058bb8eb970870f07233155").unwrap(), - to: Some(H160::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), - transaction_hash: H256::from_str("b903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238").unwrap(), - transaction_index: 0, - cumulative_gas_used: U256::from(0x20), - gas_used: U256::from(0x10), - contract_address: None, - logs: Vec::new(), - log_bloom: Bloom::zero(), - outcome: TransactionOutcome::Unknown, - }; - let tester = EthTester::default(); + let pending = RichReceipt { + from: H160::from_str("b60e8dd61c5d32be8058bb8eb970870f07233155").unwrap(), + to: Some(H160::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), + transaction_hash: H256::from_str( + "b903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238", + ) + .unwrap(), + transaction_index: 0, + cumulative_gas_used: U256::from(0x20), + gas_used: U256::from(0x10), + contract_address: None, + logs: Vec::new(), + log_bloom: Bloom::zero(), + outcome: TransactionOutcome::Unknown, + }; + let tester = EthTester::default(); - tester.miner.pending_receipts.lock().push(pending); + tester.miner.pending_receipts.lock().push(pending); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_getTransactionReceipt", "params": ["0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":{"blockHash":null,"blockNumber":null,"contractAddress":null,"cumulativeGasUsed":"0x20","from":"0xb60e8dd61c5d32be8058bb8eb970870f07233155","gasUsed":"0x10","logs":[],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","to":"0xd46e8dd67c5d32be8058bb8eb970870f07244567","transactionHash":"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238","transactionIndex":"0x0"},"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + let response = r#"{"jsonrpc":"2.0","result":{"blockHash":null,"blockNumber":null,"contractAddress":null,"cumulativeGasUsed":"0x20","from":"0xb60e8dd61c5d32be8058bb8eb970870f07233155","gasUsed":"0x10","logs":[],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","to":"0xd46e8dd67c5d32be8058bb8eb970870f07244567","transactionHash":"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238","transactionIndex":"0x0"},"id":1}"#; + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } // These tests are incorrect: their output is undefined as long as eth_getCompilers is []. @@ -1019,104 +1266,152 @@ fn rpc_eth_pending_receipt() { #[ignore] #[test] fn rpc_eth_compilers() { - let request = r#"{"jsonrpc": "2.0", "method": "eth_getCompilers", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32070,"message":"Method deprecated","data":"Compilation functionality is deprecated."},"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "eth_getCompilers", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32070,"message":"Method deprecated","data":"Compilation functionality is deprecated."},"id":1}"#; - assert_eq!(EthTester::default().io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + EthTester::default().io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[ignore] #[test] fn rpc_eth_compile_lll() { - let request = r#"{"jsonrpc": "2.0", "method": "eth_compileLLL", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32070,"message":"Method deprecated","data":"Compilation of LLL via RPC is deprecated"},"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "eth_compileLLL", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32070,"message":"Method deprecated","data":"Compilation of LLL via RPC is deprecated"},"id":1}"#; - assert_eq!(EthTester::default().io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + EthTester::default().io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[ignore] #[test] fn rpc_eth_compile_solidity() { - let request = r#"{"jsonrpc": "2.0", "method": "eth_compileSolidity", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32070,"message":"Method deprecated","data":"Compilation of Solidity via RPC is deprecated"},"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "eth_compileSolidity", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32070,"message":"Method deprecated","data":"Compilation of Solidity via RPC is deprecated"},"id":1}"#; - assert_eq!(EthTester::default().io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + EthTester::default().io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[ignore] #[test] fn rpc_eth_compile_serpent() { - let request = r#"{"jsonrpc": "2.0", "method": "eth_compileSerpent", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32070,"message":"Method deprecated","data":"Compilation of Serpent via RPC is deprecated"},"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "eth_compileSerpent", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32070,"message":"Method deprecated","data":"Compilation of Serpent via RPC is deprecated"},"id":1}"#; - assert_eq!(EthTester::default().io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + EthTester::default().io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_get_work_returns_no_work_if_cant_mine() { - let eth_tester = EthTester::default(); - eth_tester.client.set_queue_size(10); + let eth_tester = EthTester::default(); + eth_tester.client.set_queue_size(10); - let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32001,"message":"Still syncing."},"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32001,"message":"Still syncing."},"id":1}"#; - assert_eq!(eth_tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + eth_tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_get_work_returns_correct_work_package() { - let eth_tester = EthTester::default(); - eth_tester.miner.set_author(miner::Author::External(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap())); + let eth_tester = EthTester::default(); + eth_tester.miner.set_author(miner::Author::External( + Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(), + )); - let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":["0x76c7bd86693aee93d1a80a408a09a0585b1a1292afcb56192f171d925ea18e2d","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000800000000000000000000000000000000000000000000000000000000000","0x1"],"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":["0x76c7bd86693aee93d1a80a408a09a0585b1a1292afcb56192f171d925ea18e2d","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000800000000000000000000000000000000000000000000000000000000000","0x1"],"id":1}"#; - assert_eq!(eth_tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + eth_tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_get_work_should_not_return_block_number() { - let eth_tester = EthTester::new_with_options(EthClientOptions::with(|options| { - options.send_block_number_in_get_work = false; - })); - eth_tester.miner.set_author(miner::Author::External(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap())); + let eth_tester = EthTester::new_with_options(EthClientOptions::with(|options| { + options.send_block_number_in_get_work = false; + })); + eth_tester.miner.set_author(miner::Author::External( + Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(), + )); - let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":["0x76c7bd86693aee93d1a80a408a09a0585b1a1292afcb56192f171d925ea18e2d","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000800000000000000000000000000000000000000000000000000000000000"],"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":["0x76c7bd86693aee93d1a80a408a09a0585b1a1292afcb56192f171d925ea18e2d","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000800000000000000000000000000000000000000000000000000000000000"],"id":1}"#; - assert_eq!(eth_tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + eth_tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_get_work_should_timeout() { - let eth_tester = EthTester::default(); - eth_tester.miner.set_author(miner::Author::External(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap())); - let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() - 1000; // Set latest block to 1000 seconds ago - eth_tester.client.set_latest_block_timestamp(timestamp); - let hash = eth_tester.miner.work_package(&*eth_tester.client).unwrap().0; + let eth_tester = EthTester::default(); + eth_tester.miner.set_author(miner::Author::External( + Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(), + )); + let timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs() + - 1000; // Set latest block to 1000 seconds ago + eth_tester.client.set_latest_block_timestamp(timestamp); + let hash = eth_tester + .miner + .work_package(&*eth_tester.client) + .unwrap() + .0; - // Request without providing timeout. This should work since we're disabling timeout. - let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": [], "id": 1}"#; - let work_response = format!( - r#"{{"jsonrpc":"2.0","result":["0x{:x}","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000800000000000000000000000000000000000000000000000000000000000","0x1"],"id":1}}"#, - hash, - ); - assert_eq!(eth_tester.io.handle_request_sync(request), Some(work_response.to_owned())); + // Request without providing timeout. This should work since we're disabling timeout. + let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": [], "id": 1}"#; + let work_response = format!( + r#"{{"jsonrpc":"2.0","result":["0x{:x}","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000800000000000000000000000000000000000000000000000000000000000","0x1"],"id":1}}"#, + hash, + ); + assert_eq!( + eth_tester.io.handle_request_sync(request), + Some(work_response.to_owned()) + ); - // Request with timeout of 0 seconds. This should work since we're disabling timeout. - let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": [0], "id": 1}"#; - let work_response = format!( - r#"{{"jsonrpc":"2.0","result":["0x{:x}","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000800000000000000000000000000000000000000000000000000000000000","0x1"],"id":1}}"#, - hash, - ); - assert_eq!(eth_tester.io.handle_request_sync(request), Some(work_response.to_owned())); + // Request with timeout of 0 seconds. This should work since we're disabling timeout. + let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": [0], "id": 1}"#; + let work_response = format!( + r#"{{"jsonrpc":"2.0","result":["0x{:x}","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000800000000000000000000000000000000000000000000000000000000000","0x1"],"id":1}}"#, + hash, + ); + assert_eq!( + eth_tester.io.handle_request_sync(request), + Some(work_response.to_owned()) + ); - // Request with timeout of 10K seconds. This should work. - let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": [10000], "id": 1}"#; - assert_eq!(eth_tester.io.handle_request_sync(request), Some(work_response.to_owned())); + // Request with timeout of 10K seconds. This should work. + let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": [10000], "id": 1}"#; + assert_eq!( + eth_tester.io.handle_request_sync(request), + Some(work_response.to_owned()) + ); - // Request with timeout of 10 seconds. This should fail. - let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": [10], "id": 1}"#; - let err_response = r#"{"jsonrpc":"2.0","error":{"code":-32003,"message":"Work has not changed."},"id":1}"#; - assert_eq!(eth_tester.io.handle_request_sync(request), Some(err_response.to_owned())); + // Request with timeout of 10 seconds. This should fail. + let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": [10], "id": 1}"#; + let err_response = + r#"{"jsonrpc":"2.0","error":{"code":-32003,"message":"Work has not changed."},"id":1}"#; + assert_eq!( + eth_tester.io.handle_request_sync(request), + Some(err_response.to_owned()) + ); } diff --git a/rpc/src/v1/tests/mocked/eth_pubsub.rs b/rpc/src/v1/tests/mocked/eth_pubsub.rs index 6fd7394a4..2a6bcc43e 100644 --- a/rpc/src/v1/tests/mocked/eth_pubsub.rs +++ b/rpc/src/v1/tests/mocked/eth_pubsub.rs @@ -16,207 +16,273 @@ use std::sync::Arc; -use jsonrpc_core::MetaIoHandler; -use jsonrpc_core::futures::{self, Stream, Future}; +use jsonrpc_core::{ + futures::{self, Future, Stream}, + MetaIoHandler, +}; use jsonrpc_pubsub::Session; use std::time::Duration; use v1::{EthPubSub, EthPubSubClient, Metadata}; -use ethcore::client::{TestBlockChainClient, EachBlockWith, ChainNotify, NewBlocks, ChainRoute, ChainRouteType}; +use ethcore::client::{ + ChainNotify, ChainRoute, ChainRouteType, EachBlockWith, NewBlocks, TestBlockChainClient, +}; use parity_runtime::Runtime; const DURATION_ZERO: Duration = Duration::from_millis(0); #[test] fn should_subscribe_to_new_heads() { - // given - let el = Runtime::with_thread_count(1); - let mut client = TestBlockChainClient::new(); - // Insert some blocks - client.add_blocks(3, EachBlockWith::Nothing); - let h3 = client.block_hash_delta_minus(1); - let h2 = client.block_hash_delta_minus(2); - let h1 = client.block_hash_delta_minus(3); + // given + let el = Runtime::with_thread_count(1); + let mut client = TestBlockChainClient::new(); + // Insert some blocks + client.add_blocks(3, EachBlockWith::Nothing); + let h3 = client.block_hash_delta_minus(1); + let h2 = client.block_hash_delta_minus(2); + let h1 = client.block_hash_delta_minus(3); - let pubsub = EthPubSubClient::new_test(Arc::new(client), el.executor()); - let handler = pubsub.handler().upgrade().unwrap(); - let pubsub = pubsub.to_delegate(); + let pubsub = EthPubSubClient::new_test(Arc::new(client), el.executor()); + let handler = pubsub.handler().upgrade().unwrap(); + let pubsub = pubsub.to_delegate(); - let mut io = MetaIoHandler::default(); - io.extend_with(pubsub); + let mut io = MetaIoHandler::default(); + io.extend_with(pubsub); - let mut metadata = Metadata::default(); - let (sender, receiver) = futures::sync::mpsc::channel(8); - metadata.session = Some(Arc::new(Session::new(sender))); + let mut metadata = Metadata::default(); + let (sender, receiver) = futures::sync::mpsc::channel(8); + metadata.session = Some(Arc::new(Session::new(sender))); - // Subscribe - let request = r#"{"jsonrpc": "2.0", "method": "eth_subscribe", "params": ["newHeads"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"0x416d77337e24399d","id":1}"#; - assert_eq!(io.handle_request_sync(request, metadata.clone()), Some(response.to_owned())); + // Subscribe + let request = + r#"{"jsonrpc": "2.0", "method": "eth_subscribe", "params": ["newHeads"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x416d77337e24399d","id":1}"#; + assert_eq!( + io.handle_request_sync(request, metadata.clone()), + Some(response.to_owned()) + ); - // Check notifications - handler.new_blocks(NewBlocks::new(vec![], vec![], ChainRoute::new(vec![(h1, ChainRouteType::Enacted)]), vec![], vec![], DURATION_ZERO, true)); - let (res, receiver) = receiver.into_future().wait().unwrap(); - let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":{"author":"0x0000000000000000000000000000000000000000","difficulty":"0x1","extraData":"0x","gasLimit":"0xf4240","gasUsed":"0x0","hash":"0x3457d2fa2e3dd33c78ac681cf542e429becf718859053448748383af67e23218","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","number":"0x1","parentHash":"0x0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sealFields":[],"sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x1c9","stateRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","timestamp":"0x0","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"},"subscription":"0x416d77337e24399d"}}"#; - assert_eq!(res, Some(response.into())); + // Check notifications + handler.new_blocks(NewBlocks::new( + vec![], + vec![], + ChainRoute::new(vec![(h1, ChainRouteType::Enacted)]), + vec![], + vec![], + DURATION_ZERO, + true, + )); + let (res, receiver) = receiver.into_future().wait().unwrap(); + let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":{"author":"0x0000000000000000000000000000000000000000","difficulty":"0x1","extraData":"0x","gasLimit":"0xf4240","gasUsed":"0x0","hash":"0x3457d2fa2e3dd33c78ac681cf542e429becf718859053448748383af67e23218","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","number":"0x1","parentHash":"0x0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sealFields":[],"sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x1c9","stateRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","timestamp":"0x0","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"},"subscription":"0x416d77337e24399d"}}"#; + assert_eq!(res, Some(response.into())); - // Notify about two blocks - handler.new_blocks(NewBlocks::new(vec![], vec![], ChainRoute::new(vec![(h2, ChainRouteType::Enacted), (h3, ChainRouteType::Enacted)]), vec![], vec![], DURATION_ZERO, true)); + // Notify about two blocks + handler.new_blocks(NewBlocks::new( + vec![], + vec![], + ChainRoute::new(vec![ + (h2, ChainRouteType::Enacted), + (h3, ChainRouteType::Enacted), + ]), + vec![], + vec![], + DURATION_ZERO, + true, + )); - // Receive both - let (res, receiver) = receiver.into_future().wait().unwrap(); - let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":{"author":"0x0000000000000000000000000000000000000000","difficulty":"0x2","extraData":"0x","gasLimit":"0xf4240","gasUsed":"0x0","hash":"0x44e5ecf454ea99af9d8a8f2ca0daba96964c90de05db7a78f59b84ae9e749706","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","number":"0x2","parentHash":"0x3457d2fa2e3dd33c78ac681cf542e429becf718859053448748383af67e23218","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sealFields":[],"sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x1c9","stateRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","timestamp":"0x0","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"},"subscription":"0x416d77337e24399d"}}"#; - assert_eq!(res, Some(response.into())); - let (res, receiver) = receiver.into_future().wait().unwrap(); - let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":{"author":"0x0000000000000000000000000000000000000000","difficulty":"0x3","extraData":"0x","gasLimit":"0xf4240","gasUsed":"0x0","hash":"0xdf04a98bb0c6fa8441bd429822f65a46d0cb553f6bcef602b973e65c81497f8e","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","number":"0x3","parentHash":"0x44e5ecf454ea99af9d8a8f2ca0daba96964c90de05db7a78f59b84ae9e749706","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sealFields":[],"sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x1c9","stateRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","timestamp":"0x0","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"},"subscription":"0x416d77337e24399d"}}"#; - assert_eq!(res, Some(response.into())); + // Receive both + let (res, receiver) = receiver.into_future().wait().unwrap(); + let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":{"author":"0x0000000000000000000000000000000000000000","difficulty":"0x2","extraData":"0x","gasLimit":"0xf4240","gasUsed":"0x0","hash":"0x44e5ecf454ea99af9d8a8f2ca0daba96964c90de05db7a78f59b84ae9e749706","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","number":"0x2","parentHash":"0x3457d2fa2e3dd33c78ac681cf542e429becf718859053448748383af67e23218","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sealFields":[],"sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x1c9","stateRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","timestamp":"0x0","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"},"subscription":"0x416d77337e24399d"}}"#; + assert_eq!(res, Some(response.into())); + let (res, receiver) = receiver.into_future().wait().unwrap(); + let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":{"author":"0x0000000000000000000000000000000000000000","difficulty":"0x3","extraData":"0x","gasLimit":"0xf4240","gasUsed":"0x0","hash":"0xdf04a98bb0c6fa8441bd429822f65a46d0cb553f6bcef602b973e65c81497f8e","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","number":"0x3","parentHash":"0x44e5ecf454ea99af9d8a8f2ca0daba96964c90de05db7a78f59b84ae9e749706","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sealFields":[],"sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x1c9","stateRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","timestamp":"0x0","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"},"subscription":"0x416d77337e24399d"}}"#; + assert_eq!(res, Some(response.into())); - // And unsubscribe - let request = r#"{"jsonrpc": "2.0", "method": "eth_unsubscribe", "params": ["0x416d77337e24399d"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(io.handle_request_sync(request, metadata), Some(response.to_owned())); + // And unsubscribe + let request = r#"{"jsonrpc": "2.0", "method": "eth_unsubscribe", "params": ["0x416d77337e24399d"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + assert_eq!( + io.handle_request_sync(request, metadata), + Some(response.to_owned()) + ); - let (res, _receiver) = receiver.into_future().wait().unwrap(); - assert_eq!(res, None); + let (res, _receiver) = receiver.into_future().wait().unwrap(); + assert_eq!(res, None); } #[test] fn should_subscribe_to_logs() { - use ethcore::client::BlockInfo; - use types::log_entry::{LocalizedLogEntry, LogEntry}; - use types::ids::BlockId; + use ethcore::client::BlockInfo; + use types::{ + ids::BlockId, + log_entry::{LocalizedLogEntry, LogEntry}, + }; - // given - let el = Runtime::with_thread_count(1); - let mut client = TestBlockChainClient::new(); - // Insert some blocks - client.add_blocks(1, EachBlockWith::Transaction); - let h1 = client.block_hash_delta_minus(1); - let block = client.block(BlockId::Hash(h1)).unwrap(); - let tx_hash = block.transactions()[0].hash(); - client.set_logs(vec![ - LocalizedLogEntry { - entry: LogEntry { - address: 5.into(), - topics: vec![1.into(), 2.into(), 0.into(), 0.into()], - data: vec![], - }, - block_hash: h1, - block_number: block.header().number(), - transaction_hash: tx_hash, - transaction_index: 0, - log_index: 0, - transaction_log_index: 0, - } - ]); + // given + let el = Runtime::with_thread_count(1); + let mut client = TestBlockChainClient::new(); + // Insert some blocks + client.add_blocks(1, EachBlockWith::Transaction); + let h1 = client.block_hash_delta_minus(1); + let block = client.block(BlockId::Hash(h1)).unwrap(); + let tx_hash = block.transactions()[0].hash(); + client.set_logs(vec![LocalizedLogEntry { + entry: LogEntry { + address: 5.into(), + topics: vec![1.into(), 2.into(), 0.into(), 0.into()], + data: vec![], + }, + block_hash: h1, + block_number: block.header().number(), + transaction_hash: tx_hash, + transaction_index: 0, + log_index: 0, + transaction_log_index: 0, + }]); - let pubsub = EthPubSubClient::new_test(Arc::new(client), el.executor()); - let handler = pubsub.handler().upgrade().unwrap(); - let pubsub = pubsub.to_delegate(); + let pubsub = EthPubSubClient::new_test(Arc::new(client), el.executor()); + let handler = pubsub.handler().upgrade().unwrap(); + let pubsub = pubsub.to_delegate(); - let mut io = MetaIoHandler::default(); - io.extend_with(pubsub); + let mut io = MetaIoHandler::default(); + io.extend_with(pubsub); - let mut metadata = Metadata::default(); - let (sender, receiver) = futures::sync::mpsc::channel(8); - metadata.session = Some(Arc::new(Session::new(sender))); + let mut metadata = Metadata::default(); + let (sender, receiver) = futures::sync::mpsc::channel(8); + metadata.session = Some(Arc::new(Session::new(sender))); - // Subscribe - let request = r#"{"jsonrpc": "2.0", "method": "eth_subscribe", "params": ["logs", {}], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"0x416d77337e24399d","id":1}"#; - assert_eq!(io.handle_request_sync(request, metadata.clone()), Some(response.to_owned())); + // Subscribe + let request = + r#"{"jsonrpc": "2.0", "method": "eth_subscribe", "params": ["logs", {}], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x416d77337e24399d","id":1}"#; + assert_eq!( + io.handle_request_sync(request, metadata.clone()), + Some(response.to_owned()) + ); - // Check notifications (enacted) - handler.new_blocks(NewBlocks::new(vec![], vec![], ChainRoute::new(vec![(h1, ChainRouteType::Enacted)]), vec![], vec![], DURATION_ZERO, false)); - let (res, receiver) = receiver.into_future().wait().unwrap(); - let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":{"address":"0x0000000000000000000000000000000000000005","blockHash":"0x3457d2fa2e3dd33c78ac681cf542e429becf718859053448748383af67e23218","blockNumber":"0x1","data":"0x","logIndex":"0x0","removed":false,"topics":["0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000"],"transactionHash":""#.to_owned() + // Check notifications (enacted) + handler.new_blocks(NewBlocks::new( + vec![], + vec![], + ChainRoute::new(vec![(h1, ChainRouteType::Enacted)]), + vec![], + vec![], + DURATION_ZERO, + false, + )); + let (res, receiver) = receiver.into_future().wait().unwrap(); + let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":{"address":"0x0000000000000000000000000000000000000005","blockHash":"0x3457d2fa2e3dd33c78ac681cf542e429becf718859053448748383af67e23218","blockNumber":"0x1","data":"0x","logIndex":"0x0","removed":false,"topics":["0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000"],"transactionHash":""#.to_owned() + &format!("0x{:x}", tx_hash) + r#"","transactionIndex":"0x0","transactionLogIndex":"0x0","type":"mined"},"subscription":"0x416d77337e24399d"}}"#; - assert_eq!(res, Some(response.into())); + assert_eq!(res, Some(response.into())); - // Check notifications (retracted) - handler.new_blocks(NewBlocks::new(vec![], vec![], ChainRoute::new(vec![(h1, ChainRouteType::Retracted)]), vec![], vec![], DURATION_ZERO, false)); - let (res, receiver) = receiver.into_future().wait().unwrap(); - let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":{"address":"0x0000000000000000000000000000000000000005","blockHash":"0x3457d2fa2e3dd33c78ac681cf542e429becf718859053448748383af67e23218","blockNumber":"0x1","data":"0x","logIndex":"0x0","removed":true,"topics":["0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000"],"transactionHash":""#.to_owned() + // Check notifications (retracted) + handler.new_blocks(NewBlocks::new( + vec![], + vec![], + ChainRoute::new(vec![(h1, ChainRouteType::Retracted)]), + vec![], + vec![], + DURATION_ZERO, + false, + )); + let (res, receiver) = receiver.into_future().wait().unwrap(); + let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":{"address":"0x0000000000000000000000000000000000000005","blockHash":"0x3457d2fa2e3dd33c78ac681cf542e429becf718859053448748383af67e23218","blockNumber":"0x1","data":"0x","logIndex":"0x0","removed":true,"topics":["0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000"],"transactionHash":""#.to_owned() + &format!("0x{:x}", tx_hash) + r#"","transactionIndex":"0x0","transactionLogIndex":"0x0","type":"removed"},"subscription":"0x416d77337e24399d"}}"#; - assert_eq!(res, Some(response.into())); + assert_eq!(res, Some(response.into())); - // And unsubscribe - let request = r#"{"jsonrpc": "2.0", "method": "eth_unsubscribe", "params": ["0x416d77337e24399d"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(io.handle_request_sync(request, metadata), Some(response.to_owned())); + // And unsubscribe + let request = r#"{"jsonrpc": "2.0", "method": "eth_unsubscribe", "params": ["0x416d77337e24399d"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + assert_eq!( + io.handle_request_sync(request, metadata), + Some(response.to_owned()) + ); - let (res, _receiver) = receiver.into_future().wait().unwrap(); - assert_eq!(res, None); + let (res, _receiver) = receiver.into_future().wait().unwrap(); + assert_eq!(res, None); } #[test] fn should_subscribe_to_pending_transactions() { - // given - let el = Runtime::with_thread_count(1); - let client = TestBlockChainClient::new(); + // given + let el = Runtime::with_thread_count(1); + let client = TestBlockChainClient::new(); - let pubsub = EthPubSubClient::new_test(Arc::new(client), el.executor()); - let handler = pubsub.handler().upgrade().unwrap(); - let pubsub = pubsub.to_delegate(); + let pubsub = EthPubSubClient::new_test(Arc::new(client), el.executor()); + let handler = pubsub.handler().upgrade().unwrap(); + let pubsub = pubsub.to_delegate(); - let mut io = MetaIoHandler::default(); - io.extend_with(pubsub); + let mut io = MetaIoHandler::default(); + io.extend_with(pubsub); - let mut metadata = Metadata::default(); - let (sender, receiver) = futures::sync::mpsc::channel(8); - metadata.session = Some(Arc::new(Session::new(sender))); + let mut metadata = Metadata::default(); + let (sender, receiver) = futures::sync::mpsc::channel(8); + metadata.session = Some(Arc::new(Session::new(sender))); - // Fail if params are provided - let request = r#"{"jsonrpc": "2.0", "method": "eth_subscribe", "params": ["newPendingTransactions", {}], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Couldn't parse parameters: newPendingTransactions","data":"\"Expected no parameters.\""},"id":1}"#; - assert_eq!(io.handle_request_sync(request, metadata.clone()), Some(response.to_owned())); + // Fail if params are provided + let request = r#"{"jsonrpc": "2.0", "method": "eth_subscribe", "params": ["newPendingTransactions", {}], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Couldn't parse parameters: newPendingTransactions","data":"\"Expected no parameters.\""},"id":1}"#; + assert_eq!( + io.handle_request_sync(request, metadata.clone()), + Some(response.to_owned()) + ); - // Subscribe - let request = r#"{"jsonrpc": "2.0", "method": "eth_subscribe", "params": ["newPendingTransactions"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"0x416d77337e24399d","id":1}"#; - assert_eq!(io.handle_request_sync(request, metadata.clone()), Some(response.to_owned())); + // Subscribe + let request = r#"{"jsonrpc": "2.0", "method": "eth_subscribe", "params": ["newPendingTransactions"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x416d77337e24399d","id":1}"#; + assert_eq!( + io.handle_request_sync(request, metadata.clone()), + Some(response.to_owned()) + ); - // Send new transactions - handler.notify_new_transactions(&[5.into(), 7.into()]); + // Send new transactions + handler.notify_new_transactions(&[5.into(), 7.into()]); - let (res, receiver) = receiver.into_future().wait().unwrap(); - let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":"0x0000000000000000000000000000000000000000000000000000000000000005","subscription":"0x416d77337e24399d"}}"#; - assert_eq!(res, Some(response.into())); + let (res, receiver) = receiver.into_future().wait().unwrap(); + let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":"0x0000000000000000000000000000000000000000000000000000000000000005","subscription":"0x416d77337e24399d"}}"#; + assert_eq!(res, Some(response.into())); - let (res, receiver) = receiver.into_future().wait().unwrap(); - let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":"0x0000000000000000000000000000000000000000000000000000000000000007","subscription":"0x416d77337e24399d"}}"#; - assert_eq!(res, Some(response.into())); + let (res, receiver) = receiver.into_future().wait().unwrap(); + let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":"0x0000000000000000000000000000000000000000000000000000000000000007","subscription":"0x416d77337e24399d"}}"#; + assert_eq!(res, Some(response.into())); - // And unsubscribe - let request = r#"{"jsonrpc": "2.0", "method": "eth_unsubscribe", "params": ["0x416d77337e24399d"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(io.handle_request_sync(request, metadata), Some(response.to_owned())); + // And unsubscribe + let request = r#"{"jsonrpc": "2.0", "method": "eth_unsubscribe", "params": ["0x416d77337e24399d"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + assert_eq!( + io.handle_request_sync(request, metadata), + Some(response.to_owned()) + ); - let (res, _receiver) = receiver.into_future().wait().unwrap(); - assert_eq!(res, None); + let (res, _receiver) = receiver.into_future().wait().unwrap(); + assert_eq!(res, None); } #[test] fn should_return_unimplemented() { - // given - let el = Runtime::with_thread_count(1); - let client = TestBlockChainClient::new(); - let pubsub = EthPubSubClient::new_test(Arc::new(client), el.executor()); - let pubsub = pubsub.to_delegate(); + // given + let el = Runtime::with_thread_count(1); + let client = TestBlockChainClient::new(); + let pubsub = EthPubSubClient::new_test(Arc::new(client), el.executor()); + let pubsub = pubsub.to_delegate(); - let mut io = MetaIoHandler::default(); - io.extend_with(pubsub); + let mut io = MetaIoHandler::default(); + io.extend_with(pubsub); - let mut metadata = Metadata::default(); - let (sender, _receiver) = futures::sync::mpsc::channel(8); - metadata.session = Some(Arc::new(Session::new(sender))); + let mut metadata = Metadata::default(); + let (sender, _receiver) = futures::sync::mpsc::channel(8); + metadata.session = Some(Arc::new(Session::new(sender))); - // Subscribe - let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"This request is not implemented yet. Please create an issue on Github repo."},"id":1}"#; - let request = r#"{"jsonrpc": "2.0", "method": "eth_subscribe", "params": ["syncing"], "id": 1}"#; - assert_eq!(io.handle_request_sync(request, metadata.clone()), Some(response.to_owned())); + // Subscribe + let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"This request is not implemented yet. Please create an issue on Github repo."},"id":1}"#; + let request = + r#"{"jsonrpc": "2.0", "method": "eth_subscribe", "params": ["syncing"], "id": 1}"#; + assert_eq!( + io.handle_request_sync(request, metadata.clone()), + Some(response.to_owned()) + ); } diff --git a/rpc/src/v1/tests/mocked/manage_network.rs b/rpc/src/v1/tests/mocked/manage_network.rs index d327a8743..60e7055cc 100644 --- a/rpc/src/v1/tests/mocked/manage_network.rs +++ b/rpc/src/v1/tests/mocked/manage_network.rs @@ -14,9 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use self::ethcore_network::{NetworkContext, ProtocolId}; use std::ops::RangeInclusive; use sync::ManageNetwork; -use self::ethcore_network::{ProtocolId, NetworkContext}; extern crate ethcore_network; @@ -24,12 +24,18 @@ pub struct TestManageNetwork; // TODO: rob, gavin (originally introduced this functions) - proper tests and test state impl ManageNetwork for TestManageNetwork { - fn accept_unreserved_peers(&self) { } - fn deny_unreserved_peers(&self) { } - fn remove_reserved_peer(&self, _peer: String) -> Result<(), String> { Ok(()) } - fn add_reserved_peer(&self, _peer: String) -> Result<(), String> { Ok(()) } - fn start_network(&self) {} - fn stop_network(&self) {} - fn num_peers_range(&self) -> RangeInclusive { 25..=50 } - fn with_proto_context(&self, _: ProtocolId, _: &mut FnMut(&NetworkContext)) { } + fn accept_unreserved_peers(&self) {} + fn deny_unreserved_peers(&self) {} + fn remove_reserved_peer(&self, _peer: String) -> Result<(), String> { + Ok(()) + } + fn add_reserved_peer(&self, _peer: String) -> Result<(), String> { + Ok(()) + } + fn start_network(&self) {} + fn stop_network(&self) {} + fn num_peers_range(&self) -> RangeInclusive { + 25..=50 + } + fn with_proto_context(&self, _: ProtocolId, _: &mut FnMut(&NetworkContext)) {} } diff --git a/rpc/src/v1/tests/mocked/net.rs b/rpc/src/v1/tests/mocked/net.rs index ff6d152d8..278e8b5f0 100644 --- a/rpc/src/v1/tests/mocked/net.rs +++ b/rpc/src/v1/tests/mocked/net.rs @@ -14,53 +14,55 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; use jsonrpc_core::IoHandler; -use v1::{Net, NetClient}; -use v1::tests::helpers::{Config, TestSyncProvider}; +use std::sync::Arc; +use v1::{ + tests::helpers::{Config, TestSyncProvider}, + Net, NetClient, +}; fn sync_provider() -> Arc { - Arc::new(TestSyncProvider::new(Config { - network_id: 3, - num_peers: 120, - })) + Arc::new(TestSyncProvider::new(Config { + network_id: 3, + num_peers: 120, + })) } #[test] fn rpc_net_version() { - let sync = sync_provider(); - let net = NetClient::new(&sync).to_delegate(); - let mut io = IoHandler::new(); - io.extend_with(net); + let sync = sync_provider(); + let net = NetClient::new(&sync).to_delegate(); + let mut io = IoHandler::new(); + io.extend_with(net); - let request = r#"{"jsonrpc": "2.0", "method": "net_version", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"3","id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "net_version", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"3","id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_net_peer_count() { - let sync = sync_provider(); - let net = NetClient::new(&sync).to_delegate(); - let mut io = IoHandler::new(); - io.extend_with(net); + let sync = sync_provider(); + let net = NetClient::new(&sync).to_delegate(); + let mut io = IoHandler::new(); + io.extend_with(net); - let request = r#"{"jsonrpc": "2.0", "method": "net_peerCount", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"0x78","id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "net_peerCount", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x78","id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_net_listening() { - let sync = sync_provider(); - let net = NetClient::new(&sync).to_delegate(); - let mut io = IoHandler::new(); - io.extend_with(net); + let sync = sync_provider(); + let net = NetClient::new(&sync).to_delegate(); + let mut io = IoHandler::new(); + io.extend_with(net); - let request = r#"{"jsonrpc": "2.0", "method": "net_listening", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "net_listening", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } diff --git a/rpc/src/v1/tests/mocked/parity.rs b/rpc/src/v1/tests/mocked/parity.rs index c0144a4bd..e51d2a100 100644 --- a/rpc/src/v1/tests/mocked/parity.rs +++ b/rpc/src/v1/tests/mocked/parity.rs @@ -14,454 +14,493 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use ethcore::client::{TestBlockChainClient, Executed, TransactionId}; +use ethcore::client::{Executed, TestBlockChainClient, TransactionId}; use ethcore_logger::RotatingLogger; -use ethereum_types::{Address, U256, H256}; +use ethereum_types::{Address, H256, U256}; use ethstore::ethkey::{Generator, Random}; use miner::pool::local_transactions::Status as LocalTransactionStatus; +use std::sync::Arc; use sync::ManageNetwork; use types::receipt::{LocalizedReceipt, TransactionOutcome}; -use jsonrpc_core::IoHandler; -use v1::{Parity, ParityClient}; -use v1::metadata::Metadata; -use v1::helpers::NetworkSettings; -use v1::helpers::external_signer::SignerService; -use v1::tests::helpers::{TestSyncProvider, Config, TestMinerService, TestUpdater}; use super::manage_network::TestManageNetwork; +use jsonrpc_core::IoHandler; +use v1::{ + helpers::{external_signer::SignerService, NetworkSettings}, + metadata::Metadata, + tests::helpers::{Config, TestMinerService, TestSyncProvider, TestUpdater}, + Parity, ParityClient, +}; use Host; pub type TestParityClient = ParityClient; pub struct Dependencies { - pub miner: Arc, - pub client: Arc, - pub sync: Arc, - pub updater: Arc, - pub logger: Arc, - pub settings: Arc, - pub network: Arc, - pub ws_address: Option, + pub miner: Arc, + pub client: Arc, + pub sync: Arc, + pub updater: Arc, + pub logger: Arc, + pub settings: Arc, + pub network: Arc, + pub ws_address: Option, } impl Dependencies { - pub fn new() -> Self { - Dependencies { - miner: Arc::new(TestMinerService::default()), - client: Arc::new(TestBlockChainClient::default()), - sync: Arc::new(TestSyncProvider::new(Config { - network_id: 3, - num_peers: 120, - })), - updater: Arc::new(TestUpdater::default()), - logger: Arc::new(RotatingLogger::new("rpc=trace".to_owned())), - settings: Arc::new(NetworkSettings { - name: "mynode".to_owned(), - chain: "testchain".to_owned(), - is_dev_chain: false, - network_port: 30303, - rpc_enabled: true, - rpc_interface: "all".to_owned(), - rpc_port: 8545, - }), - network: Arc::new(TestManageNetwork), - ws_address: Some("127.0.0.1:18546".into()), - } - } + pub fn new() -> Self { + Dependencies { + miner: Arc::new(TestMinerService::default()), + client: Arc::new(TestBlockChainClient::default()), + sync: Arc::new(TestSyncProvider::new(Config { + network_id: 3, + num_peers: 120, + })), + updater: Arc::new(TestUpdater::default()), + logger: Arc::new(RotatingLogger::new("rpc=trace".to_owned())), + settings: Arc::new(NetworkSettings { + name: "mynode".to_owned(), + chain: "testchain".to_owned(), + is_dev_chain: false, + network_port: 30303, + rpc_enabled: true, + rpc_interface: "all".to_owned(), + rpc_port: 8545, + }), + network: Arc::new(TestManageNetwork), + ws_address: Some("127.0.0.1:18546".into()), + } + } - pub fn client(&self, signer: Option>) -> TestParityClient { - ParityClient::new( - self.client.clone(), - self.miner.clone(), - self.sync.clone(), - self.updater.clone(), - self.network.clone(), - self.logger.clone(), - self.settings.clone(), - signer, - self.ws_address.clone(), - None, - ) - } + pub fn client(&self, signer: Option>) -> TestParityClient { + ParityClient::new( + self.client.clone(), + self.miner.clone(), + self.sync.clone(), + self.updater.clone(), + self.network.clone(), + self.logger.clone(), + self.settings.clone(), + signer, + self.ws_address.clone(), + None, + ) + } - fn default_client(&self) -> IoHandler { - let mut io = IoHandler::default(); - io.extend_with(self.client(None).to_delegate()); - io - } + fn default_client(&self) -> IoHandler { + let mut io = IoHandler::default(); + io.extend_with(self.client(None).to_delegate()); + io + } - fn with_signer(&self, signer: SignerService) -> IoHandler { - let mut io = IoHandler::default(); - io.extend_with(self.client(Some(Arc::new(signer))).to_delegate()); - io - } + fn with_signer(&self, signer: SignerService) -> IoHandler { + let mut io = IoHandler::default(); + io.extend_with(self.client(Some(Arc::new(signer))).to_delegate()); + io + } } #[test] fn rpc_parity_consensus_capability() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_consensusCapability", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"capableUntil":15100},"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + let request = + r#"{"jsonrpc": "2.0", "method": "parity_consensusCapability", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"capableUntil":15100},"id":1}"#; + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); - deps.updater.set_current_block(15101); + deps.updater.set_current_block(15101); - let request = r#"{"jsonrpc": "2.0", "method": "parity_consensusCapability", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"incapableSince":15100},"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + let request = + r#"{"jsonrpc": "2.0", "method": "parity_consensusCapability", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"incapableSince":15100},"id":1}"#; + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); - deps.updater.set_updated(true); + deps.updater.set_updated(true); - let request = r#"{"jsonrpc": "2.0", "method": "parity_consensusCapability", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"capable","id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + let request = + r#"{"jsonrpc": "2.0", "method": "parity_consensusCapability", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"capable","id":1}"#; + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_version_info() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_versionInfo", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"hash":"0x0000000000000000000000000000000000000096","track":"beta","version":{"major":1,"minor":5,"patch":0}},"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + let request = r#"{"jsonrpc": "2.0", "method": "parity_versionInfo", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"hash":"0x0000000000000000000000000000000000000096","track":"beta","version":{"major":1,"minor":5,"patch":0}},"id":1}"#; + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_releases_info() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_releasesInfo", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"fork":15100,"minor":null,"this_fork":15000,"track":{"binary":"0x00000000000000000000000000000000000000000000000000000000000005e6","fork":15100,"is_critical":true,"version":{"hash":"0x0000000000000000000000000000000000000097","track":"beta","version":{"major":1,"minor":5,"patch":1}}}},"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + let request = r#"{"jsonrpc": "2.0", "method": "parity_releasesInfo", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"fork":15100,"minor":null,"this_fork":15000,"track":{"binary":"0x00000000000000000000000000000000000000000000000000000000000005e6","fork":15100,"is_critical":true,"version":{"hash":"0x0000000000000000000000000000000000000097","track":"beta","version":{"major":1,"minor":5,"patch":1}}}},"id":1}"#; + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_extra_data() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_extraData", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"0x01020304","id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_extraData", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x01020304","id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_default_extra_data() { - use version::version_data; - use bytes::ToPretty; + use bytes::ToPretty; + use version::version_data; - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_defaultExtraData", "params": [], "id": 1}"#; - let response = format!(r#"{{"jsonrpc":"2.0","result":"0x{}","id":1}}"#, version_data().to_hex()); + let request = + r#"{"jsonrpc": "2.0", "method": "parity_defaultExtraData", "params": [], "id": 1}"#; + let response = format!( + r#"{{"jsonrpc":"2.0","result":"0x{}","id":1}}"#, + version_data().to_hex() + ); - assert_eq!(io.handle_request_sync(request), Some(response)); + assert_eq!(io.handle_request_sync(request), Some(response)); } #[test] fn rpc_parity_gas_floor_target() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_gasFloorTarget", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"0x3039","id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_gasFloorTarget", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x3039","id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_min_gas_price() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_minGasPrice", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"0x1312d00","id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_minGasPrice", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x1312d00","id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_dev_logs() { - let deps = Dependencies::new(); - deps.logger.append("a".to_owned()); - deps.logger.append("b".to_owned()); + let deps = Dependencies::new(); + deps.logger.append("a".to_owned()); + deps.logger.append("b".to_owned()); - let io = deps.default_client(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_devLogs", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":["b","a"],"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_devLogs", "params":[], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":["b","a"],"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_dev_logs_levels() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_devLogsLevels", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"rpc=trace","id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_devLogsLevels", "params":[], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"rpc=trace","id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_transactions_limit() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_transactionsLimit", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":1024,"id":1}"#; + let request = + r#"{"jsonrpc": "2.0", "method": "parity_transactionsLimit", "params":[], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":1024,"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_net_chain() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_netChain", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"testchain","id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_netChain", "params":[], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"testchain","id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_chain() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_chain", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"foundation","id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_chain", "params":[], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"foundation","id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_net_peers() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_netPeers", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"active":0,"connected":120,"max":50,"peers":[{"caps":["eth/62","eth/63"],"id":"node1","name":{"ParityClient":{"can_handle_large_requests":true,"compiler":"rustc","identity":"1","name":"Parity-Ethereum","os":"linux","semver":"2.4.0"}},"network":{"localAddress":"127.0.0.1:8888","remoteAddress":"127.0.0.1:7777"},"protocols":{"eth":{"difficulty":"0x28","head":"0000000000000000000000000000000000000000000000000000000000000032","version":62},"pip":null}},{"caps":["eth/63","eth/64"],"id":null,"name":{"ParityClient":{"can_handle_large_requests":true,"compiler":"rustc","identity":"2","name":"Parity-Ethereum","os":"linux","semver":"2.4.0"}},"network":{"localAddress":"127.0.0.1:3333","remoteAddress":"Handshake"},"protocols":{"eth":{"difficulty":null,"head":"000000000000000000000000000000000000000000000000000000000000003c","version":64},"pip":null}}]},"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_netPeers", "params":[], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"active":0,"connected":120,"max":50,"peers":[{"caps":["eth/62","eth/63"],"id":"node1","name":{"ParityClient":{"can_handle_large_requests":true,"compiler":"rustc","identity":"1","name":"Parity-Ethereum","os":"linux","semver":"2.4.0"}},"network":{"localAddress":"127.0.0.1:8888","remoteAddress":"127.0.0.1:7777"},"protocols":{"eth":{"difficulty":"0x28","head":"0000000000000000000000000000000000000000000000000000000000000032","version":62},"pip":null}},{"caps":["eth/63","eth/64"],"id":null,"name":{"ParityClient":{"can_handle_large_requests":true,"compiler":"rustc","identity":"2","name":"Parity-Ethereum","os":"linux","semver":"2.4.0"}},"network":{"localAddress":"127.0.0.1:3333","remoteAddress":"Handshake"},"protocols":{"eth":{"difficulty":null,"head":"000000000000000000000000000000000000000000000000000000000000003c","version":64},"pip":null}}]},"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_net_port() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_netPort", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":30303,"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_netPort", "params":[], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":30303,"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_rpc_settings() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_rpcSettings", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"enabled":true,"interface":"all","port":8545},"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_rpcSettings", "params":[], "id": 1}"#; + let response = + r#"{"jsonrpc":"2.0","result":{"enabled":true,"interface":"all","port":8545},"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_node_name() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_nodeName", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"mynode","id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_nodeName", "params":[], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"mynode","id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_unsigned_transactions_count() { - let deps = Dependencies::new(); - let io = deps.with_signer(SignerService::new_test(true)); + let deps = Dependencies::new(); + let io = deps.with_signer(SignerService::new_test(true)); - let request = r#"{"jsonrpc": "2.0", "method": "parity_unsignedTransactionsCount", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":0,"id":1}"#; + let request = + r#"{"jsonrpc": "2.0", "method": "parity_unsignedTransactionsCount", "params":[], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":0,"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_unsigned_transactions_count_when_signer_disabled() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_unsignedTransactionsCount", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"Trusted Signer is disabled. This API is not available."},"id":1}"#; + let request = + r#"{"jsonrpc": "2.0", "method": "parity_unsignedTransactionsCount", "params":[], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"Trusted Signer is disabled. This API is not available."},"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_pending_transactions() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_pendingTransactions", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":[],"id":1}"#; + let request = + r#"{"jsonrpc": "2.0", "method": "parity_pendingTransactions", "params":[], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":[],"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_encrypt() { - let deps = Dependencies::new(); - let io = deps.default_client(); - let key = format!("{:x}", Random.generate().unwrap().public()); + let deps = Dependencies::new(); + let io = deps.default_client(); + let key = format!("{:x}", Random.generate().unwrap().public()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_encryptMessage", "params":["0x"#.to_owned() + &key + r#"", "0x01"], "id": 1}"#; - assert!(io.handle_request_sync(&request).unwrap().contains("result"), "Should return success."); + let request = r#"{"jsonrpc": "2.0", "method": "parity_encryptMessage", "params":["0x"# + .to_owned() + + &key + + r#"", "0x01"], "id": 1}"#; + assert!( + io.handle_request_sync(&request).unwrap().contains("result"), + "Should return success." + ); } #[test] fn rpc_parity_ws_address() { - // given - let mut deps = Dependencies::new(); - let io1 = deps.default_client(); - deps.ws_address = None; - let io2 = deps.default_client(); + // given + let mut deps = Dependencies::new(); + let io1 = deps.default_client(); + deps.ws_address = None; + let io2 = deps.default_client(); - // when - let request = r#"{"jsonrpc": "2.0", "method": "parity_wsUrl", "params": [], "id": 1}"#; - let response1 = r#"{"jsonrpc":"2.0","result":"127.0.0.1:18546","id":1}"#; - let response2 = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"WebSockets Server is disabled. This API is not available."},"id":1}"#; + // when + let request = r#"{"jsonrpc": "2.0", "method": "parity_wsUrl", "params": [], "id": 1}"#; + let response1 = r#"{"jsonrpc":"2.0","result":"127.0.0.1:18546","id":1}"#; + let response2 = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"WebSockets Server is disabled. This API is not available."},"id":1}"#; - // then - assert_eq!(io1.handle_request_sync(request), Some(response1.to_owned())); - assert_eq!(io2.handle_request_sync(request), Some(response2.to_owned())); + // then + assert_eq!(io1.handle_request_sync(request), Some(response1.to_owned())); + assert_eq!(io2.handle_request_sync(request), Some(response2.to_owned())); } #[test] fn rpc_parity_next_nonce() { - let deps = Dependencies::new(); - let address = Address::default(); - let io1 = deps.default_client(); - let deps = Dependencies::new(); - deps.miner.increment_nonce(&address); - deps.miner.increment_nonce(&address); - deps.miner.increment_nonce(&address); - let io2 = deps.default_client(); + let deps = Dependencies::new(); + let address = Address::default(); + let io1 = deps.default_client(); + let deps = Dependencies::new(); + deps.miner.increment_nonce(&address); + deps.miner.increment_nonce(&address); + deps.miner.increment_nonce(&address); + let io2 = deps.default_client(); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "parity_nextNonce", - "params": [""#.to_owned() + &format!("0x{:x}", address) + r#""], + "params": [""# + .to_owned() + + &format!("0x{:x}", address) + + r#""], "id": 1 }"#; - let response1 = r#"{"jsonrpc":"2.0","result":"0x0","id":1}"#; - let response2 = r#"{"jsonrpc":"2.0","result":"0x3","id":1}"#; + let response1 = r#"{"jsonrpc":"2.0","result":"0x0","id":1}"#; + let response2 = r#"{"jsonrpc":"2.0","result":"0x3","id":1}"#; - assert_eq!(io1.handle_request_sync(&request), Some(response1.to_owned())); - assert_eq!(io2.handle_request_sync(&request), Some(response2.to_owned())); + assert_eq!( + io1.handle_request_sync(&request), + Some(response1.to_owned()) + ); + assert_eq!( + io2.handle_request_sync(&request), + Some(response2.to_owned()) + ); } #[test] fn rpc_parity_transactions_stats() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_pendingTransactionsStats", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"0x0000000000000000000000000000000000000000000000000000000000000001":{"firstSeen":10,"propagatedTo":{"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080":16}},"0x0000000000000000000000000000000000000000000000000000000000000005":{"firstSeen":16,"propagatedTo":{"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010":1}}},"id":1}"#; + let request = + r#"{"jsonrpc": "2.0", "method": "parity_pendingTransactionsStats", "params":[], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"0x0000000000000000000000000000000000000000000000000000000000000001":{"firstSeen":10,"propagatedTo":{"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080":16}},"0x0000000000000000000000000000000000000000000000000000000000000005":{"firstSeen":16,"propagatedTo":{"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010":1}}},"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_local_transactions() { - let deps = Dependencies::new(); - let io = deps.default_client(); - let tx = ::types::transaction::Transaction { - value: 5.into(), - gas: 3.into(), - gas_price: 2.into(), - action: ::types::transaction::Action::Create, - data: vec![1, 2, 3], - nonce: 0.into(), - }.fake_sign(3.into()); - let tx = Arc::new(::miner::pool::VerifiedTransaction::from_pending_block_transaction(tx)); - deps.miner.local_transactions.lock().insert(10.into(), LocalTransactionStatus::Pending(tx.clone())); - deps.miner.local_transactions.lock().insert(15.into(), LocalTransactionStatus::Pending(tx.clone())); + let deps = Dependencies::new(); + let io = deps.default_client(); + let tx = ::types::transaction::Transaction { + value: 5.into(), + gas: 3.into(), + gas_price: 2.into(), + action: ::types::transaction::Action::Create, + data: vec![1, 2, 3], + nonce: 0.into(), + } + .fake_sign(3.into()); + let tx = Arc::new(::miner::pool::VerifiedTransaction::from_pending_block_transaction(tx)); + deps.miner + .local_transactions + .lock() + .insert(10.into(), LocalTransactionStatus::Pending(tx.clone())); + deps.miner + .local_transactions + .lock() + .insert(15.into(), LocalTransactionStatus::Pending(tx.clone())); - let request = r#"{"jsonrpc": "2.0", "method": "parity_localTransactions", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"0x000000000000000000000000000000000000000000000000000000000000000a":{"status":"pending"},"0x000000000000000000000000000000000000000000000000000000000000000f":{"status":"pending"}},"id":1}"#; + let request = + r#"{"jsonrpc": "2.0", "method": "parity_localTransactions", "params":[], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"0x000000000000000000000000000000000000000000000000000000000000000a":{"status":"pending"},"0x000000000000000000000000000000000000000000000000000000000000000f":{"status":"pending"}},"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_chain_status() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - *deps.client.ancient_block.write() = Some((H256::default(), 5)); - *deps.client.first_block.write() = Some((H256::from(U256::from(1234)), 3333)); + *deps.client.ancient_block.write() = Some((H256::default(), 5)); + *deps.client.first_block.write() = Some((H256::from(U256::from(1234)), 3333)); - let request = r#"{"jsonrpc": "2.0", "method": "parity_chainStatus", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"blockGap":["0x6","0xd05"]},"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_chainStatus", "params":[], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"blockGap":["0x6","0xd05"]},"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_node_kind() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_nodeKind", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"availability":"personal","capability":"full"},"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_nodeKind", "params":[], "id": 1}"#; + let response = + r#"{"jsonrpc":"2.0","result":{"availability":"personal","capability":"full"},"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_cid() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_cidV0", "params":["0x414243"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"QmSF59MAENc8ZhM4aM1thuAE8w5gDmyfzkAvNoyPea7aDz","id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_cidV0", "params":["0x414243"], "id": 1}"#; + let response = + r#"{"jsonrpc":"2.0","result":"QmSF59MAENc8ZhM4aM1thuAE8w5gDmyfzkAvNoyPea7aDz","id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_call() { - let deps = Dependencies::new(); - deps.client.set_execution_result(Ok(Executed { - exception: None, - gas: U256::zero(), - gas_used: U256::from(0xff30), - refunded: U256::from(0x5), - cumulative_gas_used: U256::zero(), - logs: vec![], - contracts_created: vec![], - output: vec![0x12, 0x34, 0xff], - trace: vec![], - vm_trace: None, - state_diff: None, - })); - let io = deps.default_client(); + let deps = Dependencies::new(); + deps.client.set_execution_result(Ok(Executed { + exception: None, + gas: U256::zero(), + gas_used: U256::from(0xff30), + refunded: U256::from(0x5), + cumulative_gas_used: U256::zero(), + logs: vec![], + contracts_created: vec![], + output: vec![0x12, 0x34, 0xff], + trace: vec![], + vm_trace: None, + state_diff: None, + })); + let io = deps.default_client(); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "parity_call", "params": [[{ @@ -475,98 +514,100 @@ fn rpc_parity_call() { "latest"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":["0x1234ff"],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":["0x1234ff"],"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_block_receipts() { - let deps = Dependencies::new(); - deps.client.receipts.write() - .insert(TransactionId::Hash(1.into()), LocalizedReceipt { - transaction_hash: 1.into(), - transaction_index: 0, - block_hash: 3.into(), - block_number: 0, - cumulative_gas_used: 21_000.into(), - gas_used: 21_000.into(), - contract_address: None, - logs: vec![], - log_bloom: 1.into(), - outcome: TransactionOutcome::Unknown, - to: None, - from: 9.into(), - }); - let io = deps.default_client(); + let deps = Dependencies::new(); + deps.client.receipts.write().insert( + TransactionId::Hash(1.into()), + LocalizedReceipt { + transaction_hash: 1.into(), + transaction_index: 0, + block_hash: 3.into(), + block_number: 0, + cumulative_gas_used: 21_000.into(), + gas_used: 21_000.into(), + contract_address: None, + logs: vec![], + log_bloom: 1.into(), + outcome: TransactionOutcome::Unknown, + to: None, + from: 9.into(), + }, + ); + let io = deps.default_client(); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "parity_getBlockReceipts", "params": [], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":[{"blockHash":"0x0000000000000000000000000000000000000000000000000000000000000003","blockNumber":"0x0","contractAddress":null,"cumulativeGasUsed":"0x5208","from":"0x0000000000000000000000000000000000000009","gasUsed":"0x5208","logs":[],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001","to":null,"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000001","transactionIndex":"0x0"}],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":[{"blockHash":"0x0000000000000000000000000000000000000000000000000000000000000003","blockNumber":"0x0","contractAddress":null,"cumulativeGasUsed":"0x5208","from":"0x0000000000000000000000000000000000000009","gasUsed":"0x5208","logs":[],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001","to":null,"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000001","transactionIndex":"0x0"}],"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_status_ok() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "parity_nodeStatus", "params": [], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_status_error_peers() { - let deps = Dependencies::new(); - deps.sync.status.write().num_peers = 0; - let io = deps.default_client(); + let deps = Dependencies::new(); + deps.sync.status.write().num_peers = 0; + let io = deps.default_client(); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "parity_nodeStatus", "params": [], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32066,"message":"Node is not connected to any peers."},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32066,"message":"Node is not connected to any peers."},"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_status_error_sync() { - let deps = Dependencies::new(); - deps.sync.status.write().state = ::sync::SyncState::Blocks; - let io = deps.default_client(); + let deps = Dependencies::new(); + deps.sync.status.write().state = ::sync::SyncState::Blocks; + let io = deps.default_client(); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "parity_nodeStatus", "params": [], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32001,"message":"Still syncing."},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32001,"message":"Still syncing."},"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_verify_signature() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "parity_verifySignature", "params": [ @@ -579,7 +620,7 @@ fn rpc_parity_verify_signature() { "id": 0 }"#; - let response = r#"{"jsonrpc":"2.0","result":{"address":"0x9a2a08a1170f51208c2f3cede0d29ada94481eed","isValidForCurrentChain":true,"publicKey":"0xbeec94ea24444906fe247c47841a45220f07e5718d06157fe4502fac326dab617e973e221e45746721330c2db3f63202268686378cc28b9800c1daaf0bbafeb1"},"id":0}"#; + let response = r#"{"jsonrpc":"2.0","result":{"address":"0x9a2a08a1170f51208c2f3cede0d29ada94481eed","isValidForCurrentChain":true,"publicKey":"0xbeec94ea24444906fe247c47841a45220f07e5718d06157fe4502fac326dab617e973e221e45746721330c2db3f63202268686378cc28b9800c1daaf0bbafeb1"},"id":0}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } diff --git a/rpc/src/v1/tests/mocked/parity_accounts.rs b/rpc/src/v1/tests/mocked/parity_accounts.rs index 5b2e0762b..8f1f77413 100644 --- a/rpc/src/v1/tests/mocked/parity_accounts.rs +++ b/rpc/src/v1/tests/mocked/parity_accounts.rs @@ -18,464 +18,661 @@ use std::sync::Arc; use accounts::{AccountProvider, AccountProviderSettings}; use ethereum_types::Address; -use ethstore::EthStore; -use ethstore::accounts_dir::RootDiskDirectory; +use ethstore::{accounts_dir::RootDiskDirectory, EthStore}; use tempdir::TempDir; use jsonrpc_core::IoHandler; -use v1::{ParityAccounts, ParityAccountsInfo, ParityAccountsClient}; +use v1::{ParityAccounts, ParityAccountsClient, ParityAccountsInfo}; struct ParityAccountsTester { - accounts: Arc, - io: IoHandler, + accounts: Arc, + io: IoHandler, } fn accounts_provider() -> Arc { - Arc::new(AccountProvider::transient_provider()) + Arc::new(AccountProvider::transient_provider()) } fn accounts_provider_with_vaults_support(temp_path: &str) -> Arc { - let root_keys_dir = RootDiskDirectory::create(temp_path).unwrap(); - let secret_store = EthStore::open(Box::new(root_keys_dir)).unwrap(); - Arc::new(AccountProvider::new(Box::new(secret_store), AccountProviderSettings::default())) + let root_keys_dir = RootDiskDirectory::create(temp_path).unwrap(); + let secret_store = EthStore::open(Box::new(root_keys_dir)).unwrap(); + Arc::new(AccountProvider::new( + Box::new(secret_store), + AccountProviderSettings::default(), + )) } fn setup_with_accounts_provider(accounts_provider: Arc) -> ParityAccountsTester { - let opt_ap = accounts_provider.clone(); - let parity_accounts = ParityAccountsClient::new(&opt_ap); - let parity_accounts2 = ParityAccountsClient::new(&opt_ap); - let mut io = IoHandler::default(); - io.extend_with(ParityAccounts::to_delegate(parity_accounts)); - io.extend_with(ParityAccountsInfo::to_delegate(parity_accounts2)); + let opt_ap = accounts_provider.clone(); + let parity_accounts = ParityAccountsClient::new(&opt_ap); + let parity_accounts2 = ParityAccountsClient::new(&opt_ap); + let mut io = IoHandler::default(); + io.extend_with(ParityAccounts::to_delegate(parity_accounts)); + io.extend_with(ParityAccountsInfo::to_delegate(parity_accounts2)); - let tester = ParityAccountsTester { - accounts: accounts_provider, - io: io, - }; + let tester = ParityAccountsTester { + accounts: accounts_provider, + io: io, + }; - tester + tester } fn setup() -> ParityAccountsTester { - setup_with_accounts_provider(accounts_provider()) + setup_with_accounts_provider(accounts_provider()) } fn setup_with_vaults_support(temp_path: &str) -> ParityAccountsTester { - setup_with_accounts_provider(accounts_provider_with_vaults_support(temp_path)) + setup_with_accounts_provider(accounts_provider_with_vaults_support(temp_path)) } #[test] fn rpc_parity_accounts_info() { - let tester = setup(); - let io = tester.io; + let tester = setup(); + let io = tester.io; - tester.accounts.new_account(&"".into()).unwrap(); - let accounts = tester.accounts.accounts().unwrap(); - assert_eq!(accounts.len(), 1); - let address = accounts[0]; + tester.accounts.new_account(&"".into()).unwrap(); + let accounts = tester.accounts.accounts().unwrap(); + assert_eq!(accounts.len(), 1); + let address = accounts[0]; - tester.accounts.set_address_name(1.into(), "XX".into()); - tester.accounts.set_account_name(address.clone(), "Test".into()).unwrap(); - tester.accounts.set_account_meta(address.clone(), "{foo: 69}".into()).unwrap(); + tester.accounts.set_address_name(1.into(), "XX".into()); + tester + .accounts + .set_account_name(address.clone(), "Test".into()) + .unwrap(); + tester + .accounts + .set_account_meta(address.clone(), "{foo: 69}".into()) + .unwrap(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_accountsInfo", "params": [], "id": 1}"#; - let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"name\":\"Test\"}}}},\"id\":1}}", address); - assert_eq!(io.handle_request_sync(request), Some(response)); + let request = r#"{"jsonrpc": "2.0", "method": "parity_accountsInfo", "params": [], "id": 1}"#; + let response = format!( + "{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"name\":\"Test\"}}}},\"id\":1}}", + address + ); + assert_eq!(io.handle_request_sync(request), Some(response)); } #[test] fn rpc_parity_default_account() { - let tester = setup(); - let io = tester.io; + let tester = setup(); + let io = tester.io; - // Check empty - let address = Address::default(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_defaultAccount", "params": [], "id": 1}"#; - let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":\"0x{:x}\",\"id\":1}}", address); - assert_eq!(io.handle_request_sync(request), Some(response)); + // Check empty + let address = Address::default(); + let request = r#"{"jsonrpc": "2.0", "method": "parity_defaultAccount", "params": [], "id": 1}"#; + let response = format!( + "{{\"jsonrpc\":\"2.0\",\"result\":\"0x{:x}\",\"id\":1}}", + address + ); + assert_eq!(io.handle_request_sync(request), Some(response)); - // With account - tester.accounts.new_account(&"".into()).unwrap(); - let accounts = tester.accounts.accounts().unwrap(); - assert_eq!(accounts.len(), 1); - let address = accounts[0]; + // With account + tester.accounts.new_account(&"".into()).unwrap(); + let accounts = tester.accounts.accounts().unwrap(); + assert_eq!(accounts.len(), 1); + let address = accounts[0]; - let request = r#"{"jsonrpc": "2.0", "method": "parity_defaultAccount", "params": [], "id": 1}"#; - let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":\"0x{:x}\",\"id\":1}}", address); - assert_eq!(io.handle_request_sync(request), Some(response)); + let request = r#"{"jsonrpc": "2.0", "method": "parity_defaultAccount", "params": [], "id": 1}"#; + let response = format!( + "{{\"jsonrpc\":\"2.0\",\"result\":\"0x{:x}\",\"id\":1}}", + address + ); + assert_eq!(io.handle_request_sync(request), Some(response)); } #[test] fn should_be_able_to_get_account_info() { - let tester = setup(); - tester.accounts.new_account(&"".into()).unwrap(); - let accounts = tester.accounts.accounts().unwrap(); - assert_eq!(accounts.len(), 1); - let address = accounts[0]; + let tester = setup(); + tester.accounts.new_account(&"".into()).unwrap(); + let accounts = tester.accounts.accounts().unwrap(); + assert_eq!(accounts.len(), 1); + let address = accounts[0]; - let uuid = tester.accounts.accounts_info().unwrap().get(&address).unwrap().uuid.as_ref().unwrap().clone(); - tester.accounts.set_account_name(address.clone(), "Test".to_owned()).unwrap(); - tester.accounts.set_account_meta(address.clone(), "{foo: 69}".to_owned()).unwrap(); + let uuid = tester + .accounts + .accounts_info() + .unwrap() + .get(&address) + .unwrap() + .uuid + .as_ref() + .unwrap() + .clone(); + tester + .accounts + .set_account_name(address.clone(), "Test".to_owned()) + .unwrap(); + tester + .accounts + .set_account_meta(address.clone(), "{foo: 69}".to_owned()) + .unwrap(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params": [], "id": 1}"#; - let res = tester.io.handle_request_sync(request); - let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"meta\":\"{{foo: 69}}\",\"name\":\"Test\",\"uuid\":\"{}\"}}}},\"id\":1}}", address, uuid); - assert_eq!(res, Some(response)); + let request = + r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params": [], "id": 1}"#; + let res = tester.io.handle_request_sync(request); + let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"meta\":\"{{foo: 69}}\",\"name\":\"Test\",\"uuid\":\"{}\"}}}},\"id\":1}}", address, uuid); + assert_eq!(res, Some(response)); } #[test] fn should_be_able_to_set_name() { - let tester = setup(); - tester.accounts.new_account(&"".into()).unwrap(); - let accounts = tester.accounts.accounts().unwrap(); - assert_eq!(accounts.len(), 1); - let address = accounts[0]; + let tester = setup(); + tester.accounts.new_account(&"".into()).unwrap(); + let accounts = tester.accounts.accounts().unwrap(); + assert_eq!(accounts.len(), 1); + let address = accounts[0]; - let request = format!(r#"{{"jsonrpc": "2.0", "method": "parity_setAccountName", "params": ["0x{:x}", "Test"], "id": 1}}"#, address); - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - let res = tester.io.handle_request_sync(&request); - assert_eq!(res, Some(response.into())); + let request = format!( + r#"{{"jsonrpc": "2.0", "method": "parity_setAccountName", "params": ["0x{:x}", "Test"], "id": 1}}"#, + address + ); + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + let res = tester.io.handle_request_sync(&request); + assert_eq!(res, Some(response.into())); - let uuid = tester.accounts.accounts_info().unwrap().get(&address).unwrap().uuid.as_ref().unwrap().clone(); + let uuid = tester + .accounts + .accounts_info() + .unwrap() + .get(&address) + .unwrap() + .uuid + .as_ref() + .unwrap() + .clone(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params": [], "id": 1}"#; - let res = tester.io.handle_request_sync(request); - let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"meta\":\"{{}}\",\"name\":\"Test\",\"uuid\":\"{}\"}}}},\"id\":1}}", address, uuid); - assert_eq!(res, Some(response)); + let request = + r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params": [], "id": 1}"#; + let res = tester.io.handle_request_sync(request); + let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"meta\":\"{{}}\",\"name\":\"Test\",\"uuid\":\"{}\"}}}},\"id\":1}}", address, uuid); + assert_eq!(res, Some(response)); } #[test] fn should_be_able_to_set_meta() { - let tester = setup(); - tester.accounts.new_account(&"".into()).unwrap(); - let accounts = tester.accounts.accounts().unwrap(); - assert_eq!(accounts.len(), 1); - let address = accounts[0]; + let tester = setup(); + tester.accounts.new_account(&"".into()).unwrap(); + let accounts = tester.accounts.accounts().unwrap(); + assert_eq!(accounts.len(), 1); + let address = accounts[0]; - let request = format!(r#"{{"jsonrpc": "2.0", "method": "parity_setAccountMeta", "params": ["0x{:x}", "{{foo: 69}}"], "id": 1}}"#, address); - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - let res = tester.io.handle_request_sync(&request); - assert_eq!(res, Some(response.into())); + let request = format!( + r#"{{"jsonrpc": "2.0", "method": "parity_setAccountMeta", "params": ["0x{:x}", "{{foo: 69}}"], "id": 1}}"#, + address + ); + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + let res = tester.io.handle_request_sync(&request); + assert_eq!(res, Some(response.into())); - let uuid = tester.accounts.accounts_info().unwrap().get(&address).unwrap().uuid.as_ref().unwrap().clone(); + let uuid = tester + .accounts + .accounts_info() + .unwrap() + .get(&address) + .unwrap() + .uuid + .as_ref() + .unwrap() + .clone(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params": [], "id": 1}"#; - let res = tester.io.handle_request_sync(request); - let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"meta\":\"{{foo: 69}}\",\"name\":\"\",\"uuid\":\"{}\"}}}},\"id\":1}}", address, uuid); - assert_eq!(res, Some(response)); + let request = + r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params": [], "id": 1}"#; + let res = tester.io.handle_request_sync(request); + let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"meta\":\"{{foo: 69}}\",\"name\":\"\",\"uuid\":\"{}\"}}}},\"id\":1}}", address, uuid); + assert_eq!(res, Some(response)); } #[test] fn should_be_able_to_kill_account() { - let tester = setup(); - tester.accounts.new_account(&"password".into()).unwrap(); - let accounts = tester.accounts.accounts().unwrap(); - assert_eq!(accounts.len(), 1); - let address = accounts[0]; + let tester = setup(); + tester.accounts.new_account(&"password".into()).unwrap(); + let accounts = tester.accounts.accounts().unwrap(); + assert_eq!(accounts.len(), 1); + let address = accounts[0]; - let request = format!(r#"{{"jsonrpc": "2.0", "method": "parity_killAccount", "params": ["0xf00baba2f00baba2f00baba2f00baba2f00baba2"], "id": 1}}"#); - let response = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid params: invalid length 1, expected a tuple of size 2."},"id":1}"#; - let res = tester.io.handle_request_sync(&request); - assert_eq!(res, Some(response.into())); + let request = format!( + r#"{{"jsonrpc": "2.0", "method": "parity_killAccount", "params": ["0xf00baba2f00baba2f00baba2f00baba2f00baba2"], "id": 1}}"# + ); + let response = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid params: invalid length 1, expected a tuple of size 2."},"id":1}"#; + let res = tester.io.handle_request_sync(&request); + assert_eq!(res, Some(response.into())); - let request = format!(r#"{{"jsonrpc": "2.0", "method": "parity_killAccount", "params": ["0x{:x}", "password"], "id": 1}}"#, address); - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - let res = tester.io.handle_request_sync(&request); - assert_eq!(res, Some(response.into())); + let request = format!( + r#"{{"jsonrpc": "2.0", "method": "parity_killAccount", "params": ["0x{:x}", "password"], "id": 1}}"#, + address + ); + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + let res = tester.io.handle_request_sync(&request); + assert_eq!(res, Some(response.into())); - let accounts = tester.accounts.accounts().unwrap(); - assert_eq!(accounts.len(), 0); + let accounts = tester.accounts.accounts().unwrap(); + assert_eq!(accounts.len(), 0); } #[test] fn should_be_able_to_remove_address() { - let tester = setup(); + let tester = setup(); - // add an address - let request = r#"{"jsonrpc": "2.0", "method": "parity_setAccountName", "params": ["0x000baba1000baba2000baba3000baba4000baba5", "Test"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - let res = tester.io.handle_request_sync(&request); - assert_eq!(res, Some(response.into())); + // add an address + let request = r#"{"jsonrpc": "2.0", "method": "parity_setAccountName", "params": ["0x000baba1000baba2000baba3000baba4000baba5", "Test"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + let res = tester.io.handle_request_sync(&request); + assert_eq!(res, Some(response.into())); - // verify it exists - let request = r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params": [], "id": 2}"#; - let res = tester.io.handle_request_sync(request); - let response = r#"{"jsonrpc":"2.0","result":{"0x000baba1000baba2000baba3000baba4000baba5":{"meta":"{}","name":"Test"}},"id":2}"#; - assert_eq!(res, Some(response.into())); + // verify it exists + let request = + r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params": [], "id": 2}"#; + let res = tester.io.handle_request_sync(request); + let response = r#"{"jsonrpc":"2.0","result":{"0x000baba1000baba2000baba3000baba4000baba5":{"meta":"{}","name":"Test"}},"id":2}"#; + assert_eq!(res, Some(response.into())); - // remove the address - let request = r#"{"jsonrpc": "2.0", "method": "parity_removeAddress", "params": ["0x000baba1000baba2000baba3000baba4000baba5"], "id": 3}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":3}"#; - let res = tester.io.handle_request_sync(&request); - assert_eq!(res, Some(response.into())); + // remove the address + let request = r#"{"jsonrpc": "2.0", "method": "parity_removeAddress", "params": ["0x000baba1000baba2000baba3000baba4000baba5"], "id": 3}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":3}"#; + let res = tester.io.handle_request_sync(&request); + assert_eq!(res, Some(response.into())); - // verify empty - let request = r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params": [], "id": 4}"#; - let res = tester.io.handle_request_sync(request); - let response = r#"{"jsonrpc":"2.0","result":{},"id":4}"#; - assert_eq!(res, Some(response.into())); + // verify empty + let request = + r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params": [], "id": 4}"#; + let res = tester.io.handle_request_sync(request); + let response = r#"{"jsonrpc":"2.0","result":{},"id":4}"#; + assert_eq!(res, Some(response.into())); } #[test] fn rpc_parity_new_vault() { - let tempdir = TempDir::new("").unwrap(); - let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); + let tempdir = TempDir::new("").unwrap(); + let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_newVault", "params":["vault1", "password1"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_newVault", "params":["vault1", "password1"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); - assert!(tester.accounts.close_vault("vault1").is_ok()); - assert!(tester.accounts.open_vault("vault1", &"password1".into()).is_ok()); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); + assert!(tester.accounts.close_vault("vault1").is_ok()); + assert!(tester + .accounts + .open_vault("vault1", &"password1".into()) + .is_ok()); } #[test] fn rpc_parity_open_vault() { - let tempdir = TempDir::new("").unwrap(); - let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); + let tempdir = TempDir::new("").unwrap(); + let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); - assert!(tester.accounts.create_vault("vault1", &"password1".into()).is_ok()); - assert!(tester.accounts.close_vault("vault1").is_ok()); + assert!(tester + .accounts + .create_vault("vault1", &"password1".into()) + .is_ok()); + assert!(tester.accounts.close_vault("vault1").is_ok()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_openVault", "params":["vault1", "password1"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_openVault", "params":["vault1", "password1"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_parity_close_vault() { - let tempdir = TempDir::new("").unwrap(); - let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); + let tempdir = TempDir::new("").unwrap(); + let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); - assert!(tester.accounts.create_vault("vault1", &"password1".into()).is_ok()); + assert!(tester + .accounts + .create_vault("vault1", &"password1".into()) + .is_ok()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_closeVault", "params":["vault1"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + let request = + r#"{"jsonrpc": "2.0", "method": "parity_closeVault", "params":["vault1"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_parity_change_vault_password() { - let tempdir = TempDir::new("").unwrap(); - let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); + let tempdir = TempDir::new("").unwrap(); + let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); - assert!(tester.accounts.create_vault("vault1", &"password1".into()).is_ok()); + assert!(tester + .accounts + .create_vault("vault1", &"password1".into()) + .is_ok()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_changeVaultPassword", "params":["vault1", "password2"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_changeVaultPassword", "params":["vault1", "password2"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_parity_change_vault() { - let tempdir = TempDir::new("").unwrap(); - let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); + let tempdir = TempDir::new("").unwrap(); + let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); - let (address, _) = tester.accounts.new_account_and_public(&"root_password".into()).unwrap(); - assert!(tester.accounts.create_vault("vault1", &"password1".into()).is_ok()); + let (address, _) = tester + .accounts + .new_account_and_public(&"root_password".into()) + .unwrap(); + assert!(tester + .accounts + .create_vault("vault1", &"password1".into()) + .is_ok()); - let request = format!(r#"{{"jsonrpc": "2.0", "method": "parity_changeVault", "params":["0x{:x}", "vault1"], "id": 1}}"#, address); - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + let request = format!( + r#"{{"jsonrpc": "2.0", "method": "parity_changeVault", "params":["0x{:x}", "vault1"], "id": 1}}"#, + address + ); + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.to_owned()) + ); } #[test] fn rpc_parity_vault_adds_vault_field_to_acount_meta() { - let tempdir = TempDir::new("").unwrap(); - let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); + let tempdir = TempDir::new("").unwrap(); + let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); - let (address1, _) = tester.accounts.new_account_and_public(&"root_password1".into()).unwrap(); - let uuid1 = tester.accounts.account_meta(address1.clone()).unwrap().uuid.unwrap(); - assert!(tester.accounts.create_vault("vault1", &"password1".into()).is_ok()); - assert!(tester.accounts.change_vault(address1, "vault1").is_ok()); + let (address1, _) = tester + .accounts + .new_account_and_public(&"root_password1".into()) + .unwrap(); + let uuid1 = tester + .accounts + .account_meta(address1.clone()) + .unwrap() + .uuid + .unwrap(); + assert!(tester + .accounts + .create_vault("vault1", &"password1".into()) + .is_ok()); + assert!(tester.accounts.change_vault(address1, "vault1").is_ok()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params":[], "id": 1}"#; - let response = format!(r#"{{"jsonrpc":"2.0","result":{{"0x{:x}":{{"meta":"{{\"vault\":\"vault1\"}}","name":"","uuid":"{}"}}}},"id":1}}"#, address1, uuid1); + let request = r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params":[], "id": 1}"#; + let response = format!( + r#"{{"jsonrpc":"2.0","result":{{"0x{:x}":{{"meta":"{{\"vault\":\"vault1\"}}","name":"","uuid":"{}"}}}},"id":1}}"#, + address1, uuid1 + ); - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); - // and then - assert!(tester.accounts.change_vault(address1, "").is_ok()); + // and then + assert!(tester.accounts.change_vault(address1, "").is_ok()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params":[], "id": 1}"#; - let response = format!(r#"{{"jsonrpc":"2.0","result":{{"0x{:x}":{{"meta":"{{}}","name":"","uuid":"{}"}}}},"id":1}}"#, address1, uuid1); + let request = r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params":[], "id": 1}"#; + let response = format!( + r#"{{"jsonrpc":"2.0","result":{{"0x{:x}":{{"meta":"{{}}","name":"","uuid":"{}"}}}},"id":1}}"#, + address1, uuid1 + ); - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_parity_list_vaults() { - let tempdir = TempDir::new("").unwrap(); - let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); + let tempdir = TempDir::new("").unwrap(); + let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); - assert!(tester.accounts.create_vault("vault1", &"password1".into()).is_ok()); - assert!(tester.accounts.create_vault("vault2", &"password2".into()).is_ok()); + assert!(tester + .accounts + .create_vault("vault1", &"password1".into()) + .is_ok()); + assert!(tester + .accounts + .create_vault("vault2", &"password2".into()) + .is_ok()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_listVaults", "params":[], "id": 1}"#; - let response1 = r#"{"jsonrpc":"2.0","result":["vault1","vault2"],"id":1}"#; - let response2 = r#"{"jsonrpc":"2.0","result":["vault2","vault1"],"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_listVaults", "params":[], "id": 1}"#; + let response1 = r#"{"jsonrpc":"2.0","result":["vault1","vault2"],"id":1}"#; + let response2 = r#"{"jsonrpc":"2.0","result":["vault2","vault1"],"id":1}"#; - let actual_response = tester.io.handle_request_sync(request); - assert!(actual_response == Some(response1.to_owned()) - || actual_response == Some(response2.to_owned())); + let actual_response = tester.io.handle_request_sync(request); + assert!( + actual_response == Some(response1.to_owned()) + || actual_response == Some(response2.to_owned()) + ); } #[test] fn rpc_parity_list_opened_vaults() { - let tempdir = TempDir::new("").unwrap(); - let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); + let tempdir = TempDir::new("").unwrap(); + let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); - assert!(tester.accounts.create_vault("vault1", &"password1".into()).is_ok()); - assert!(tester.accounts.create_vault("vault2", &"password2".into()).is_ok()); - assert!(tester.accounts.create_vault("vault3", &"password3".into()).is_ok()); - assert!(tester.accounts.close_vault("vault2").is_ok()); + assert!(tester + .accounts + .create_vault("vault1", &"password1".into()) + .is_ok()); + assert!(tester + .accounts + .create_vault("vault2", &"password2".into()) + .is_ok()); + assert!(tester + .accounts + .create_vault("vault3", &"password3".into()) + .is_ok()); + assert!(tester.accounts.close_vault("vault2").is_ok()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_listOpenedVaults", "params":[], "id": 1}"#; - let response1 = r#"{"jsonrpc":"2.0","result":["vault1","vault3"],"id":1}"#; - let response2 = r#"{"jsonrpc":"2.0","result":["vault3","vault1"],"id":1}"#; + let request = + r#"{"jsonrpc": "2.0", "method": "parity_listOpenedVaults", "params":[], "id": 1}"#; + let response1 = r#"{"jsonrpc":"2.0","result":["vault1","vault3"],"id":1}"#; + let response2 = r#"{"jsonrpc":"2.0","result":["vault3","vault1"],"id":1}"#; - let actual_response = tester.io.handle_request_sync(request); - assert!(actual_response == Some(response1.to_owned()) - || actual_response == Some(response2.to_owned())); + let actual_response = tester.io.handle_request_sync(request); + assert!( + actual_response == Some(response1.to_owned()) + || actual_response == Some(response2.to_owned()) + ); } #[test] fn rpc_parity_get_set_vault_meta() { - let tempdir = TempDir::new("").unwrap(); - let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); + let tempdir = TempDir::new("").unwrap(); + let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); - assert!(tester.accounts.create_vault("vault1", &"password1".into()).is_ok()); + assert!(tester + .accounts + .create_vault("vault1", &"password1".into()) + .is_ok()); - // when no meta set - let request = r#"{"jsonrpc": "2.0", "method": "parity_getVaultMeta", "params":["vault1"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"{}","id":1}"#; + // when no meta set + let request = + r#"{"jsonrpc": "2.0", "method": "parity_getVaultMeta", "params":["vault1"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"{}","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); - // when meta set - assert!(tester.accounts.set_vault_meta("vault1", "vault1_meta").is_ok()); + // when meta set + assert!(tester + .accounts + .set_vault_meta("vault1", "vault1_meta") + .is_ok()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_getVaultMeta", "params":["vault1"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"vault1_meta","id":1}"#; + let request = + r#"{"jsonrpc": "2.0", "method": "parity_getVaultMeta", "params":["vault1"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"vault1_meta","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); - // change meta - let request = r#"{"jsonrpc": "2.0", "method": "parity_setVaultMeta", "params":["vault1", "updated_vault1_meta"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + // change meta + let request = r#"{"jsonrpc": "2.0", "method": "parity_setVaultMeta", "params":["vault1", "updated_vault1_meta"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); - // query changed meta - let request = r#"{"jsonrpc": "2.0", "method": "parity_getVaultMeta", "params":["vault1"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"updated_vault1_meta","id":1}"#; + // query changed meta + let request = + r#"{"jsonrpc": "2.0", "method": "parity_getVaultMeta", "params":["vault1"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"updated_vault1_meta","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } // name: parity_deriveAddressHash // example: {"jsonrpc": "2.0", "method": "parity_deriveAddressHash", "params": ["0xc171033d5cbff7175f29dfd3a63dda3d6f8f385e", "password1", { "type": "soft", "hash": "0x0c0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0c0c" }, true ], "id": 3} #[test] fn derive_key_hash() { - let tester = setup(); - let hash = tester.accounts - .insert_account( - "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a".parse().unwrap(), - &"password1".into()) - .expect("account should be inserted ok"); + let tester = setup(); + let hash = tester + .accounts + .insert_account( + "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a" + .parse() + .unwrap(), + &"password1".into(), + ) + .expect("account should be inserted ok"); - assert_eq!(hash, "c171033d5cbff7175f29dfd3a63dda3d6f8f385e".parse().unwrap()); + assert_eq!( + hash, + "c171033d5cbff7175f29dfd3a63dda3d6f8f385e".parse().unwrap() + ); - // derive by hash - let request = r#"{"jsonrpc": "2.0", "method": "parity_deriveAddressHash", "params": ["0xc171033d5cbff7175f29dfd3a63dda3d6f8f385e", "password1", { "type": "soft", "hash": "0x0c0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0c0c" }, true ], "id": 3}"#; - let response = r#"{"jsonrpc":"2.0","result":"0xf28c28fcddf4a9b8f474237278d3647f9c0d1b3c","id":3}"#; - let res = tester.io.handle_request_sync(&request); - assert_eq!(res, Some(response.into())); + // derive by hash + let request = r#"{"jsonrpc": "2.0", "method": "parity_deriveAddressHash", "params": ["0xc171033d5cbff7175f29dfd3a63dda3d6f8f385e", "password1", { "type": "soft", "hash": "0x0c0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0c0c" }, true ], "id": 3}"#; + let response = + r#"{"jsonrpc":"2.0","result":"0xf28c28fcddf4a9b8f474237278d3647f9c0d1b3c","id":3}"#; + let res = tester.io.handle_request_sync(&request); + assert_eq!(res, Some(response.into())); } // name: parity_deriveAddressIndex // example: {"jsonrpc": "2.0", "method": "parity_deriveAddressIndex", "params": ["0xc171033d5cbff7175f29dfd3a63dda3d6f8f385e", "password1", [{ "type": "soft", "index": 0 }, { "type": "soft", "index": 1 }], false ], "id": 3} #[test] fn derive_key_index() { - let tester = setup(); - let hash = tester.accounts - .insert_account( - "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a".parse().unwrap(), - &"password1".into()) - .expect("account should be inserted ok"); + let tester = setup(); + let hash = tester + .accounts + .insert_account( + "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a" + .parse() + .unwrap(), + &"password1".into(), + ) + .expect("account should be inserted ok"); - assert_eq!(hash, "c171033d5cbff7175f29dfd3a63dda3d6f8f385e".parse().unwrap()); + assert_eq!( + hash, + "c171033d5cbff7175f29dfd3a63dda3d6f8f385e".parse().unwrap() + ); - // derive by hash - let request = r#"{"jsonrpc": "2.0", "method": "parity_deriveAddressIndex", "params": ["0xc171033d5cbff7175f29dfd3a63dda3d6f8f385e", "password1", [{ "type": "soft", "index": 0 }, { "type": "soft", "index": 1 }], false ], "id": 3}"#; - let response = r#"{"jsonrpc":"2.0","result":"0xcc548e0bb2efe792a920ae0fbf583b13919f274f","id":3}"#; - let res = tester.io.handle_request_sync(&request); - assert_eq!(res, Some(response.into())); + // derive by hash + let request = r#"{"jsonrpc": "2.0", "method": "parity_deriveAddressIndex", "params": ["0xc171033d5cbff7175f29dfd3a63dda3d6f8f385e", "password1", [{ "type": "soft", "index": 0 }, { "type": "soft", "index": 1 }], false ], "id": 3}"#; + let response = + r#"{"jsonrpc":"2.0","result":"0xcc548e0bb2efe792a920ae0fbf583b13919f274f","id":3}"#; + let res = tester.io.handle_request_sync(&request); + assert_eq!(res, Some(response.into())); } #[test] fn should_export_account() { - // given - let tester = setup(); - let wallet = r#"{"id":"6a186c80-7797-cff2-bc2e-7c1d6a6cc76e","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"a1c6ff99070f8032ca1c4e8add006373"},"ciphertext":"df27e3db64aa18d984b6439443f73660643c2d119a6f0fa2fa9a6456fc802d75","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"ddc325335cda5567a1719313e73b4842511f3e4a837c9658eeb78e51ebe8c815"},"mac":"3dc888ae79cbb226ff9c455669f6cf2d79be72120f2298f6cb0d444fddc0aa3d"},"address":"0042e5d2a662eeaca8a7e828c174f98f35d8925b","name":"parity-export-test","meta":"{\"passwordHint\":\"parity-export-test\",\"timestamp\":1490017814987}"}"#; - tester.accounts.import_wallet(wallet.as_bytes(), &"parity-export-test".into(), false).unwrap(); - let accounts = tester.accounts.accounts().unwrap(); - assert_eq!(accounts.len(), 1); + // given + let tester = setup(); + let wallet = r#"{"id":"6a186c80-7797-cff2-bc2e-7c1d6a6cc76e","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"a1c6ff99070f8032ca1c4e8add006373"},"ciphertext":"df27e3db64aa18d984b6439443f73660643c2d119a6f0fa2fa9a6456fc802d75","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"ddc325335cda5567a1719313e73b4842511f3e4a837c9658eeb78e51ebe8c815"},"mac":"3dc888ae79cbb226ff9c455669f6cf2d79be72120f2298f6cb0d444fddc0aa3d"},"address":"0042e5d2a662eeaca8a7e828c174f98f35d8925b","name":"parity-export-test","meta":"{\"passwordHint\":\"parity-export-test\",\"timestamp\":1490017814987}"}"#; + tester + .accounts + .import_wallet(wallet.as_bytes(), &"parity-export-test".into(), false) + .unwrap(); + let accounts = tester.accounts.accounts().unwrap(); + assert_eq!(accounts.len(), 1); - // invalid password - let request = r#"{"jsonrpc":"2.0","method":"parity_exportAccount","params":["0x0042e5d2a662eeaca8a7e828c174f98f35d8925b","123"],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32023,"message":"Could not export account.","data":"InvalidPassword"},"id":1}"#; - let res = tester.io.handle_request_sync(&request); - assert_eq!(res, Some(response.into())); + // invalid password + let request = r#"{"jsonrpc":"2.0","method":"parity_exportAccount","params":["0x0042e5d2a662eeaca8a7e828c174f98f35d8925b","123"],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32023,"message":"Could not export account.","data":"InvalidPassword"},"id":1}"#; + let res = tester.io.handle_request_sync(&request); + assert_eq!(res, Some(response.into())); - // correct password - let request = r#"{"jsonrpc":"2.0","method":"parity_exportAccount","params":["0x0042e5d2a662eeaca8a7e828c174f98f35d8925b","parity-export-test"],"id":1}"#; + // correct password + let request = r#"{"jsonrpc":"2.0","method":"parity_exportAccount","params":["0x0042e5d2a662eeaca8a7e828c174f98f35d8925b","parity-export-test"],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"address":"0042e5d2a662eeaca8a7e828c174f98f35d8925b","crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"a1c6ff99070f8032ca1c4e8add006373"},"ciphertext":"df27e3db64aa18d984b6439443f73660643c2d119a6f0fa2fa9a6456fc802d75","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"ddc325335cda5567a1719313e73b4842511f3e4a837c9658eeb78e51ebe8c815"},"mac":"3dc888ae79cbb226ff9c455669f6cf2d79be72120f2298f6cb0d444fddc0aa3d"},"id":"6a186c80-7797-cff2-bc2e-7c1d6a6cc76e","meta":"{\"passwordHint\":\"parity-export-test\",\"timestamp\":1490017814987}","name":"parity-export-test","version":3},"id":1}"#; - let result = tester.io.handle_request_sync(&request); + let response = r#"{"jsonrpc":"2.0","result":{"address":"0042e5d2a662eeaca8a7e828c174f98f35d8925b","crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"a1c6ff99070f8032ca1c4e8add006373"},"ciphertext":"df27e3db64aa18d984b6439443f73660643c2d119a6f0fa2fa9a6456fc802d75","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"ddc325335cda5567a1719313e73b4842511f3e4a837c9658eeb78e51ebe8c815"},"mac":"3dc888ae79cbb226ff9c455669f6cf2d79be72120f2298f6cb0d444fddc0aa3d"},"id":"6a186c80-7797-cff2-bc2e-7c1d6a6cc76e","meta":"{\"passwordHint\":\"parity-export-test\",\"timestamp\":1490017814987}","name":"parity-export-test","version":3},"id":1}"#; + let result = tester.io.handle_request_sync(&request); - println!("Result: {:?}", result); - println!("Response: {:?}", response); - assert_eq!(result, Some(response.into())); + println!("Result: {:?}", result); + println!("Response: {:?}", response); + assert_eq!(result, Some(response.into())); } #[test] fn should_import_wallet() { - let tester = setup(); + let tester = setup(); - let id = "6a186c80-7797-cff2-bc2e-7c1d6a6cc76e"; - let request = r#"{"jsonrpc":"2.0","method":"parity_newAccountFromWallet","params":["{\"id\":\"\",\"version\":3,\"crypto\":{\"cipher\":\"aes-128-ctr\",\"cipherparams\":{\"iv\":\"478736fb55872c1baf01b27b1998c90b\"},\"ciphertext\":\"fe5a63cc0055d7b0b3b57886f930ad9b63f48950d1348145d95996c41e05f4e0\",\"kdf\":\"pbkdf2\",\"kdfparams\":{\"c\":10240,\"dklen\":32,\"prf\":\"hmac-sha256\",\"salt\":\"658436d6738a19731149a98744e5cf02c8d5aa1f8e80c1a43cc9351c70a984e4\"},\"mac\":\"c7384b26ecf25539d942030230062af9b69de5766cbcc4690bffce1536644631\"},\"address\":\"00bac56a8a27232baa044c03f43bf3648c961735\",\"name\":\"hello world\",\"meta\":\"{}\"}", "himom"],"id":1}"#; - let request = request.replace("", id); - let response = r#"{"jsonrpc":"2.0","result":"0x00bac56a8a27232baa044c03f43bf3648c961735","id":1}"#; + let id = "6a186c80-7797-cff2-bc2e-7c1d6a6cc76e"; + let request = r#"{"jsonrpc":"2.0","method":"parity_newAccountFromWallet","params":["{\"id\":\"\",\"version\":3,\"crypto\":{\"cipher\":\"aes-128-ctr\",\"cipherparams\":{\"iv\":\"478736fb55872c1baf01b27b1998c90b\"},\"ciphertext\":\"fe5a63cc0055d7b0b3b57886f930ad9b63f48950d1348145d95996c41e05f4e0\",\"kdf\":\"pbkdf2\",\"kdfparams\":{\"c\":10240,\"dklen\":32,\"prf\":\"hmac-sha256\",\"salt\":\"658436d6738a19731149a98744e5cf02c8d5aa1f8e80c1a43cc9351c70a984e4\"},\"mac\":\"c7384b26ecf25539d942030230062af9b69de5766cbcc4690bffce1536644631\"},\"address\":\"00bac56a8a27232baa044c03f43bf3648c961735\",\"name\":\"hello world\",\"meta\":\"{}\"}", "himom"],"id":1}"#; + let request = request.replace("", id); + let response = + r#"{"jsonrpc":"2.0","result":"0x00bac56a8a27232baa044c03f43bf3648c961735","id":1}"#; - let res = tester.io.handle_request_sync(&request).unwrap(); + let res = tester.io.handle_request_sync(&request).unwrap(); - assert_eq!(res, response); + assert_eq!(res, response); - let account_meta = tester.accounts.account_meta("0x00bac56a8a27232baa044c03f43bf3648c961735".into()).unwrap(); - let account_uuid: String = account_meta.uuid.unwrap().into(); + let account_meta = tester + .accounts + .account_meta("0x00bac56a8a27232baa044c03f43bf3648c961735".into()) + .unwrap(); + let account_uuid: String = account_meta.uuid.unwrap().into(); - // the RPC should import the account with a new id - assert!(account_uuid != id); + // the RPC should import the account with a new id + assert!(account_uuid != id); } #[test] fn should_sign_message() { - let tester = setup(); - let hash = tester.accounts - .insert_account( - "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a".parse().unwrap(), - &"password1".into()) - .expect("account should be inserted ok"); + let tester = setup(); + let hash = tester + .accounts + .insert_account( + "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a" + .parse() + .unwrap(), + &"password1".into(), + ) + .expect("account should be inserted ok"); - assert_eq!(hash, "c171033d5cbff7175f29dfd3a63dda3d6f8f385e".parse().unwrap()); + assert_eq!( + hash, + "c171033d5cbff7175f29dfd3a63dda3d6f8f385e".parse().unwrap() + ); - let request = r#"{"jsonrpc": "2.0", "method": "parity_signMessage", "params": ["0xc171033d5cbff7175f29dfd3a63dda3d6f8f385e", "password1", "0xbc36789e7a1e281436464229828f817d6612f7b477d66591ff96a9e064bcc98a"], "id": 3}"#; - let response = r#"{"jsonrpc":"2.0","result":"0x1d9e33a8cf8bfc089a172bca01da462f9e359c6cb1b0f29398bc884e4d18df4f78588aee4fb5cc067ca62d2abab995e0bba29527be6ac98105b0320020a2efaf00","id":3}"#; - let res = tester.io.handle_request_sync(&request); - assert_eq!(res, Some(response.into())); + let request = r#"{"jsonrpc": "2.0", "method": "parity_signMessage", "params": ["0xc171033d5cbff7175f29dfd3a63dda3d6f8f385e", "password1", "0xbc36789e7a1e281436464229828f817d6612f7b477d66591ff96a9e064bcc98a"], "id": 3}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x1d9e33a8cf8bfc089a172bca01da462f9e359c6cb1b0f29398bc884e4d18df4f78588aee4fb5cc067ca62d2abab995e0bba29527be6ac98105b0320020a2efaf00","id":3}"#; + let res = tester.io.handle_request_sync(&request); + assert_eq!(res, Some(response.into())); } diff --git a/rpc/src/v1/tests/mocked/parity_set.rs b/rpc/src/v1/tests/mocked/parity_set.rs index 25c13fb1c..d9de12748 100644 --- a/rpc/src/v1/tests/mocked/parity_set.rs +++ b/rpc/src/v1/tests/mocked/parity_set.rs @@ -14,258 +14,281 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::str::FromStr; +use ethereum_types::{Address, U256}; use rustc_hex::FromHex; -use ethereum_types::{U256, Address}; +use std::{str::FromStr, sync::Arc}; -use ethcore::miner::MinerService; -use ethcore::client::TestBlockChainClient; +use ethcore::{client::TestBlockChainClient, miner::MinerService}; use sync::ManageNetwork; -use jsonrpc_core::IoHandler; -use v1::{ParitySet, ParitySetClient}; -use v1::tests::helpers::{TestMinerService, TestUpdater}; use super::manage_network::TestManageNetwork; +use jsonrpc_core::IoHandler; +use v1::{ + tests::helpers::{TestMinerService, TestUpdater}, + ParitySet, ParitySetClient, +}; use fake_fetch::FakeFetch; fn miner_service() -> Arc { - Arc::new(TestMinerService::default()) + Arc::new(TestMinerService::default()) } fn client_service() -> Arc { - Arc::new(TestBlockChainClient::default()) + Arc::new(TestBlockChainClient::default()) } fn network_service() -> Arc { - Arc::new(TestManageNetwork) + Arc::new(TestManageNetwork) } fn updater_service() -> Arc { - Arc::new(TestUpdater::default()) + Arc::new(TestUpdater::default()) } -pub type TestParitySetClient = ParitySetClient>; +pub type TestParitySetClient = + ParitySetClient>; fn parity_set_client( - client: &Arc, - miner: &Arc, - updater: &Arc, - net: &Arc, + client: &Arc, + miner: &Arc, + updater: &Arc, + net: &Arc, ) -> TestParitySetClient { - ParitySetClient::new( - client, - miner, - updater, - &(net.clone() as Arc), - FakeFetch::new(Some(1)), - ) + ParitySetClient::new( + client, + miner, + updater, + &(net.clone() as Arc), + FakeFetch::new(Some(1)), + ) } #[test] fn rpc_parity_execute_upgrade() { - let miner = miner_service(); - let client = client_service(); - let network = network_service(); - let updater = updater_service(); - let mut io = IoHandler::new(); - io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); + let miner = miner_service(); + let client = client_service(); + let network = network_service(); + let updater = updater_service(); + let mut io = IoHandler::new(); + io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_executeUpgrade", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + let request = r#"{"jsonrpc": "2.0", "method": "parity_executeUpgrade", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); - let request = r#"{"jsonrpc": "2.0", "method": "parity_executeUpgrade", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":false,"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + let request = r#"{"jsonrpc": "2.0", "method": "parity_executeUpgrade", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":false,"id":1}"#; + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_upgrade_ready() { - let miner = miner_service(); - let client = client_service(); - let network = network_service(); - let updater = updater_service(); - let mut io = IoHandler::new(); - io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); + let miner = miner_service(); + let client = client_service(); + let network = network_service(); + let updater = updater_service(); + let mut io = IoHandler::new(); + io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_upgradeReady", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"binary":"0x00000000000000000000000000000000000000000000000000000000000005e6","fork":15100,"is_critical":true,"version":{"hash":"0x0000000000000000000000000000000000000097","track":"beta","version":{"major":1,"minor":5,"patch":1}}},"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + let request = r#"{"jsonrpc": "2.0", "method": "parity_upgradeReady", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"binary":"0x00000000000000000000000000000000000000000000000000000000000005e6","fork":15100,"is_critical":true,"version":{"hash":"0x0000000000000000000000000000000000000097","track":"beta","version":{"major":1,"minor":5,"patch":1}}},"id":1}"#; + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); - updater.set_updated(true); + updater.set_updated(true); - let request = r#"{"jsonrpc": "2.0", "method": "parity_upgradeReady", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + let request = r#"{"jsonrpc": "2.0", "method": "parity_upgradeReady", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_set_min_gas_price() { - let miner = miner_service(); - let client = client_service(); - let network = network_service(); - let updater = updater_service(); + let miner = miner_service(); + let client = client_service(); + let network = network_service(); + let updater = updater_service(); - let mut io = IoHandler::new(); - io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); + let mut io = IoHandler::new(); + io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_setMinGasPrice", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_setMinGasPrice", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_set_min_gas_price_with_automated_calibration_enabled() { - let miner = miner_service(); - *miner.min_gas_price.write() = None; + let miner = miner_service(); + *miner.min_gas_price.write() = None; - let client = client_service(); - let network = network_service(); - let updater = updater_service(); + let client = client_service(); + let network = network_service(); + let updater = updater_service(); - let mut io = IoHandler::new(); - io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); + let mut io = IoHandler::new(); + io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_setMinGasPrice", "params":["0xdeadbeef"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"Can't update fixed gas price while automatic gas calibration is enabled."},"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_setMinGasPrice", "params":["0xdeadbeef"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"Can't update fixed gas price while automatic gas calibration is enabled."},"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_set_gas_floor_target() { - let miner = miner_service(); - let client = client_service(); - let network = network_service(); - let updater = updater_service(); + let miner = miner_service(); + let client = client_service(); + let network = network_service(); + let updater = updater_service(); - let mut io = IoHandler::new(); - io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); + let mut io = IoHandler::new(); + io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_setGasFloorTarget", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_setGasFloorTarget", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); - assert_eq!(miner.authoring_params().gas_range_target.0, U256::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap()); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + miner.authoring_params().gas_range_target.0, + U256::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap() + ); } #[test] fn rpc_parity_set_extra_data() { - let miner = miner_service(); - let client = client_service(); - let network = network_service(); - let updater = updater_service(); + let miner = miner_service(); + let client = client_service(); + let network = network_service(); + let updater = updater_service(); - let mut io = IoHandler::new(); - io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); + let mut io = IoHandler::new(); + io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_setExtraData", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_setExtraData", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); - assert_eq!(miner.authoring_params().extra_data, "cd1722f3947def4cf144679da39c4c32bdc35681".from_hex().unwrap()); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + miner.authoring_params().extra_data, + "cd1722f3947def4cf144679da39c4c32bdc35681" + .from_hex() + .unwrap() + ); } #[test] fn rpc_parity_set_author() { - let miner = miner_service(); - let client = client_service(); - let network = network_service(); - let updater = updater_service(); - let mut io = IoHandler::new(); - io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); + let miner = miner_service(); + let client = client_service(); + let network = network_service(); + let updater = updater_service(); + let mut io = IoHandler::new(); + io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_setAuthor", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_setAuthor", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); - assert_eq!(miner.authoring_params().author, Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap()); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + miner.authoring_params().author, + Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap() + ); } #[test] fn rpc_parity_set_transactions_limit() { - let miner = miner_service(); - let client = client_service(); - let network = network_service(); - let updater = updater_service(); - let mut io = IoHandler::new(); - io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); + let miner = miner_service(); + let client = client_service(); + let network = network_service(); + let updater = updater_service(); + let mut io = IoHandler::new(); + io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_setTransactionsLimit", "params":[10240240], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":false,"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_setTransactionsLimit", "params":[10240240], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":false,"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_set_hash_content() { - let miner = miner_service(); - let client = client_service(); - let network = network_service(); - let updater = updater_service(); - let mut io = IoHandler::new(); - io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); + let miner = miner_service(); + let client = client_service(); + let network = network_service(); + let updater = updater_service(); + let mut io = IoHandler::new(); + io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_hashContent", "params":["https://parity.io/assets/images/ethcore-black-horizontal.png"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"0x2be00befcf008bc0e7d9cdefc194db9c75352e8632f48498b5a6bfce9f02c88e","id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_hashContent", "params":["https://parity.io/assets/images/ethcore-black-horizontal.png"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x2be00befcf008bc0e7d9cdefc194db9c75352e8632f48498b5a6bfce9f02c88e","id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_parity_remove_transaction() { - use types::transaction::{Transaction, Action}; + use types::transaction::{Action, Transaction}; - let miner = miner_service(); - let client = client_service(); - let network = network_service(); - let updater = updater_service(); - let mut io = IoHandler::new(); - io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); + let miner = miner_service(); + let client = client_service(); + let network = network_service(); + let updater = updater_service(); + let mut io = IoHandler::new(); + io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); - let tx = Transaction { - nonce: 1.into(), - gas_price: 0x9184e72a000u64.into(), - gas: 0x76c0.into(), - action: Action::Call(5.into()), - value: 0x9184e72au64.into(), - data: vec![] - }; - let signed = tx.fake_sign(2.into()); - let hash = signed.hash(); + let tx = Transaction { + nonce: 1.into(), + gas_price: 0x9184e72a000u64.into(), + gas: 0x76c0.into(), + action: Action::Call(5.into()), + value: 0x9184e72au64.into(), + data: vec![], + }; + let signed = tx.fake_sign(2.into()); + let hash = signed.hash(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_removeTransaction", "params":[""#.to_owned() + &format!("0x{:x}", hash) + r#""], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"blockHash":null,"blockNumber":null,"chainId":null,"condition":null,"creates":null,"from":"0x0000000000000000000000000000000000000002","gas":"0x76c0","gasPrice":"0x9184e72a000","hash":"0xa2e0da8a8064e0b9f93e95a53c2db6d01280efb8ac72a708d25487e67dd0f8fc","input":"0x","nonce":"0x1","publicKey":null,"r":"0x1","raw":"0xe9018609184e72a0008276c0940000000000000000000000000000000000000005849184e72a80800101","s":"0x1","standardV":"0x4","to":"0x0000000000000000000000000000000000000005","transactionIndex":null,"v":"0x0","value":"0x9184e72a"},"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_removeTransaction", "params":[""# + .to_owned() + + &format!("0x{:x}", hash) + + r#""], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"blockHash":null,"blockNumber":null,"chainId":null,"condition":null,"creates":null,"from":"0x0000000000000000000000000000000000000002","gas":"0x76c0","gasPrice":"0x9184e72a000","hash":"0xa2e0da8a8064e0b9f93e95a53c2db6d01280efb8ac72a708d25487e67dd0f8fc","input":"0x","nonce":"0x1","publicKey":null,"r":"0x1","raw":"0xe9018609184e72a0008276c0940000000000000000000000000000000000000005849184e72a80800101","s":"0x1","standardV":"0x4","to":"0x0000000000000000000000000000000000000005","transactionIndex":null,"v":"0x0","value":"0x9184e72a"},"id":1}"#; - miner.pending_transactions.lock().insert(hash, signed); - assert_eq!(io.handle_request_sync(&request), Some(response.to_owned())); + miner.pending_transactions.lock().insert(hash, signed); + assert_eq!(io.handle_request_sync(&request), Some(response.to_owned())); } #[test] fn rpc_parity_set_engine_signer() { - use accounts::AccountProvider; - use bytes::ToPretty; - use v1::impls::ParitySetAccountsClient; - use v1::traits::ParitySetAccounts; + use accounts::AccountProvider; + use bytes::ToPretty; + use v1::{impls::ParitySetAccountsClient, traits::ParitySetAccounts}; - let account_provider = Arc::new(AccountProvider::transient_provider()); - account_provider.insert_account(::hash::keccak("cow").into(), &"password".into()).unwrap(); + let account_provider = Arc::new(AccountProvider::transient_provider()); + account_provider + .insert_account(::hash::keccak("cow").into(), &"password".into()) + .unwrap(); - let miner = miner_service(); - let mut io = IoHandler::new(); - io.extend_with( - ParitySetAccountsClient::new(&account_provider, &miner).to_delegate() - ); + let miner = miner_service(); + let mut io = IoHandler::new(); + io.extend_with(ParitySetAccountsClient::new(&account_provider, &miner).to_delegate()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_setEngineSigner", "params":["0xcd2a3d9f938e13cd947ec05abc7fe734df8dd826", "password"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_setEngineSigner", "params":["0xcd2a3d9f938e13cd947ec05abc7fe734df8dd826", "password"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); - assert_eq!(miner.authoring_params().author, Address::from_str("cd2a3d9f938e13cd947ec05abc7fe734df8dd826").unwrap()); - let signature = miner.signer.read().as_ref().unwrap().sign(::hash::keccak("x")).unwrap().to_vec(); - assert_eq!(&format!("{}", signature.pretty()), "6f46069ded2154af6e806706e4f7f6fd310ac45f3c6dccb85f11c0059ee20a09245df0a0008bb84a10882b1298284bc93058e7bc5938ea728e77620061687a6401"); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + miner.authoring_params().author, + Address::from_str("cd2a3d9f938e13cd947ec05abc7fe734df8dd826").unwrap() + ); + let signature = miner + .signer + .read() + .as_ref() + .unwrap() + .sign(::hash::keccak("x")) + .unwrap() + .to_vec(); + assert_eq!(&format!("{}", signature.pretty()), "6f46069ded2154af6e806706e4f7f6fd310ac45f3c6dccb85f11c0059ee20a09245df0a0008bb84a10882b1298284bc93058e7bc5938ea728e77620061687a6401"); } - diff --git a/rpc/src/v1/tests/mocked/personal.rs b/rpc/src/v1/tests/mocked/personal.rs index a2d6b87ce..453d7767d 100644 --- a/rpc/src/v1/tests/mocked/personal.rs +++ b/rpc/src/v1/tests/mocked/personal.rs @@ -14,116 +14,131 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::str::FromStr; +use std::{str::FromStr, sync::Arc}; -use bytes::ToPretty; use accounts::AccountProvider; -use ethereum_types::{Address, H520, U256}; +use bytes::ToPretty; use ethcore::client::TestBlockChainClient; +use ethereum_types::{Address, H520, U256}; +use hash::keccak; use jsonrpc_core::IoHandler; +use parity_runtime::Runtime; use parking_lot::Mutex; use types::transaction::{Action, Transaction}; -use parity_runtime::Runtime; -use hash::keccak; -use v1::{PersonalClient, Personal, Metadata}; -use v1::helpers::{nonce, eip191}; -use v1::helpers::dispatch::{eth_data_hash, FullDispatcher}; -use v1::tests::helpers::TestMinerService; -use v1::types::{EIP191Version, PresignedTransaction}; +use ethkey::Secret; use rustc_hex::ToHex; use serde_json::to_value; -use ethkey::Secret; +use v1::{ + helpers::{ + dispatch::{eth_data_hash, FullDispatcher}, + eip191, nonce, + }, + tests::helpers::TestMinerService, + types::{EIP191Version, PresignedTransaction}, + Metadata, Personal, PersonalClient, +}; struct PersonalTester { - _runtime: Runtime, - accounts: Arc, - io: IoHandler, - miner: Arc, + _runtime: Runtime, + accounts: Arc, + io: IoHandler, + miner: Arc, } fn blockchain_client() -> Arc { - let client = TestBlockChainClient::new(); - Arc::new(client) + let client = TestBlockChainClient::new(); + Arc::new(client) } fn accounts_provider() -> Arc { - Arc::new(AccountProvider::transient_provider()) + Arc::new(AccountProvider::transient_provider()) } fn miner_service() -> Arc { - Arc::new(TestMinerService::default()) + Arc::new(TestMinerService::default()) } fn setup() -> PersonalTester { - setup_with(Config { - allow_experimental_rpcs: true - }) + setup_with(Config { + allow_experimental_rpcs: true, + }) } struct Config { - pub allow_experimental_rpcs: bool, + pub allow_experimental_rpcs: bool, } fn setup_with(c: Config) -> PersonalTester { - let runtime = Runtime::with_thread_count(1); - let accounts = accounts_provider(); - let client = blockchain_client(); - let miner = miner_service(); - let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor()))); + let runtime = Runtime::with_thread_count(1); + let accounts = accounts_provider(); + let client = blockchain_client(); + let miner = miner_service(); + let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor()))); - let dispatcher = FullDispatcher::new(client, miner.clone(), reservations, 50); - let personal = PersonalClient::new(&accounts, dispatcher, false, c.allow_experimental_rpcs); + let dispatcher = FullDispatcher::new(client, miner.clone(), reservations, 50); + let personal = PersonalClient::new(&accounts, dispatcher, false, c.allow_experimental_rpcs); - let mut io = IoHandler::default(); - io.extend_with(personal.to_delegate()); + let mut io = IoHandler::default(); + io.extend_with(personal.to_delegate()); - let tester = PersonalTester { - _runtime: runtime, - accounts: accounts, - io: io, - miner: miner, - }; + let tester = PersonalTester { + _runtime: runtime, + accounts: accounts, + io: io, + miner: miner, + }; - tester + tester } #[test] fn accounts() { - let tester = setup(); - let address = tester.accounts.new_account(&"".into()).unwrap(); - let request = r#"{"jsonrpc": "2.0", "method": "personal_listAccounts", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":[""#.to_owned() + &format!("0x{:x}", address) + r#""],"id":1}"#; + let tester = setup(); + let address = tester.accounts.new_account(&"".into()).unwrap(); + let request = r#"{"jsonrpc": "2.0", "method": "personal_listAccounts", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":[""#.to_owned() + + &format!("0x{:x}", address) + + r#""],"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn new_account() { - let tester = setup(); - let request = r#"{"jsonrpc": "2.0", "method": "personal_newAccount", "params": ["pass"], "id": 1}"#; + let tester = setup(); + let request = + r#"{"jsonrpc": "2.0", "method": "personal_newAccount", "params": ["pass"], "id": 1}"#; - let res = tester.io.handle_request_sync(request); + let res = tester.io.handle_request_sync(request); - let accounts = tester.accounts.accounts().unwrap(); - assert_eq!(accounts.len(), 1); - let address = accounts[0]; - let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{:x}", address).as_ref() + r#"","id":1}"#; + let accounts = tester.accounts.accounts().unwrap(); + assert_eq!(accounts.len(), 1); + let address = accounts[0]; + let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + + format!("0x{:x}", address).as_ref() + + r#"","id":1}"#; - assert_eq!(res, Some(response)); + assert_eq!(res, Some(response)); } -fn invalid_password_test(method: &str) -{ - let tester = setup(); - let address = tester.accounts.new_account(&"password123".into()).unwrap(); +fn invalid_password_test(method: &str) { + let tester = setup(); + let address = tester.accounts.new_account(&"password123".into()).unwrap(); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", - "method": ""#.to_owned() + method + r#"", + "method": ""# + .to_owned() + + method + + r#"", "params": [{ - "from": ""# + format!("0x{:x}", address).as_ref() + r#"", + "from": ""# + + format!("0x{:x}", address).as_ref() + + r#"", "to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567", "gas": "0x76c0", "gasPrice": "0x9184e72a000", @@ -132,87 +147,114 @@ fn invalid_password_test(method: &str) "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32021,"message":"Account password is invalid or account does not exist.","data":"SStore(InvalidPassword)"},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32021,"message":"Account password is invalid or account does not exist.","data":"SStore(InvalidPassword)"},"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request.as_ref()), Some(response.into())); + assert_eq!( + tester.io.handle_request_sync(request.as_ref()), + Some(response.into()) + ); } #[test] fn sign() { - let tester = setup(); - let address = tester.accounts.new_account(&"password123".into()).unwrap(); - let data = vec![5u8]; + let tester = setup(); + let address = tester.accounts.new_account(&"password123".into()).unwrap(); + let data = vec![5u8]; - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "personal_sign", "params": [ - ""#.to_owned() + format!("0x{}", data.to_hex()).as_ref() + r#"", - ""# + format!("0x{:x}", address).as_ref() + r#"", + ""# + .to_owned() + + format!("0x{}", data.to_hex()).as_ref() + + r#"", + ""# + format!("0x{:x}", address).as_ref() + + r#"", "password123" ], "id": 1 }"#; - let hash = eth_data_hash(data); - let signature = H520(tester.accounts.sign(address, Some("password123".into()), hash).unwrap().into_electrum()); - let signature = format!("{:?}", signature); + let hash = eth_data_hash(data); + let signature = H520( + tester + .accounts + .sign(address, Some("password123".into()), hash) + .unwrap() + .into_electrum(), + ); + let signature = format!("{:?}", signature); - let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + &signature + r#"","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + &signature + r#"","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request.as_ref()), Some(response)); + assert_eq!( + tester.io.handle_request_sync(request.as_ref()), + Some(response) + ); } #[test] fn sign_with_invalid_password() { - let tester = setup(); - let address = tester.accounts.new_account(&"password123".into()).unwrap(); + let tester = setup(); + let address = tester.accounts.new_account(&"password123".into()).unwrap(); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "personal_sign", "params": [ "0x0000000000000000000000000000000000000000000000000000000000000005", - ""#.to_owned() + format!("0x{:x}", address).as_ref() + r#"", + ""# + .to_owned() + + format!("0x{:x}", address).as_ref() + + r#"", "" ], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32021,"message":"Account password is invalid or account does not exist.","data":"SStore(InvalidPassword)"},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32021,"message":"Account password is invalid or account does not exist.","data":"SStore(InvalidPassword)"},"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request.as_ref()), Some(response.into())); + assert_eq!( + tester.io.handle_request_sync(request.as_ref()), + Some(response.into()) + ); } #[test] fn sign_transaction_with_invalid_password() { - invalid_password_test("personal_signTransaction"); + invalid_password_test("personal_signTransaction"); } #[test] fn sign_and_send_transaction_with_invalid_password() { - invalid_password_test("personal_sendTransaction"); + invalid_password_test("personal_sendTransaction"); } #[test] fn send_transaction() { - sign_and_send_test("personal_sendTransaction"); + sign_and_send_test("personal_sendTransaction"); } #[test] fn sign_and_send_transaction() { - sign_and_send_test("personal_signAndSendTransaction"); + sign_and_send_test("personal_signAndSendTransaction"); } fn sign_and_send_test(method: &str) { - let tester = setup(); - let address = tester.accounts.new_account(&"password123".into()).unwrap(); + let tester = setup(); + let address = tester.accounts.new_account(&"password123".into()).unwrap(); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", - "method": ""#.to_owned() + method + r#"", + "method": ""# + .to_owned() + + method + + r#"", "params": [{ - "from": ""# + format!("0x{:x}", address).as_ref() + r#"", + "from": ""# + + format!("0x{:x}", address).as_ref() + + r#"", "to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567", "gas": "0x76c0", "gasPrice": "0x9184e72a000", @@ -221,163 +263,242 @@ fn sign_and_send_test(method: &str) { "id": 1 }"#; - let t = Transaction { - nonce: U256::zero(), - gas_price: U256::from(0x9184e72a000u64), - gas: U256::from(0x76c0), - action: Action::Call(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), - value: U256::from(0x9184e72au64), - data: vec![] - }; - tester.accounts.unlock_account_temporarily(address, "password123".into()).unwrap(); - let signature = tester.accounts.sign(address, None, t.hash(None)).unwrap(); - let t = t.with_signature(signature, None); + let t = Transaction { + nonce: U256::zero(), + gas_price: U256::from(0x9184e72a000u64), + gas: U256::from(0x76c0), + action: Action::Call( + Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(), + ), + value: U256::from(0x9184e72au64), + data: vec![], + }; + tester + .accounts + .unlock_account_temporarily(address, "password123".into()) + .unwrap(); + let signature = tester.accounts.sign(address, None, t.hash(None)).unwrap(); + let t = t.with_signature(signature, None); - let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{:x}", t.hash()).as_ref() + r#"","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + + format!("0x{:x}", t.hash()).as_ref() + + r#"","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request.as_ref()), Some(response)); + assert_eq!( + tester.io.handle_request_sync(request.as_ref()), + Some(response) + ); - tester.miner.increment_nonce(&address); + tester.miner.increment_nonce(&address); - let t = Transaction { - nonce: U256::one(), - gas_price: U256::from(0x9184e72a000u64), - gas: U256::from(0x76c0), - action: Action::Call(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), - value: U256::from(0x9184e72au64), - data: vec![] - }; - tester.accounts.unlock_account_temporarily(address, "password123".into()).unwrap(); - let signature = tester.accounts.sign(address, None, t.hash(None)).unwrap(); - let t = t.with_signature(signature, None); + let t = Transaction { + nonce: U256::one(), + gas_price: U256::from(0x9184e72a000u64), + gas: U256::from(0x76c0), + action: Action::Call( + Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(), + ), + value: U256::from(0x9184e72au64), + data: vec![], + }; + tester + .accounts + .unlock_account_temporarily(address, "password123".into()) + .unwrap(); + let signature = tester.accounts.sign(address, None, t.hash(None)).unwrap(); + let t = t.with_signature(signature, None); - let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{:x}", t.hash()).as_ref() + r#"","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + + format!("0x{:x}", t.hash()).as_ref() + + r#"","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request.as_ref()), Some(response)); + assert_eq!( + tester.io.handle_request_sync(request.as_ref()), + Some(response) + ); } #[test] fn ec_recover() { - let tester = setup(); - let address = tester.accounts.new_account(&"password123".into()).unwrap(); - let data = vec![5u8]; + let tester = setup(); + let address = tester.accounts.new_account(&"password123".into()).unwrap(); + let data = vec![5u8]; - let hash = eth_data_hash(data.clone()); - let signature = H520(tester.accounts.sign(address, Some("password123".into()), hash).unwrap().into_electrum()); - let signature = format!("{:?}", signature); + let hash = eth_data_hash(data.clone()); + let signature = H520( + tester + .accounts + .sign(address, Some("password123".into()), hash) + .unwrap() + .into_electrum(), + ); + let signature = format!("{:?}", signature); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "personal_ecRecover", "params": [ - ""#.to_owned() + format!("0x{}", data.to_hex()).as_ref() + r#"", - ""# + &signature + r#"" + ""# + .to_owned() + + format!("0x{}", data.to_hex()).as_ref() + + r#"", + ""# + &signature + + r#"" ], "id": 1 }"#; - let address = format!("0x{:x}", address); - let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + &address + r#"","id":1}"#; + let address = format!("0x{:x}", address); + let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + &address + r#"","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request.as_ref()), Some(response.into())); + assert_eq!( + tester.io.handle_request_sync(request.as_ref()), + Some(response.into()) + ); } #[test] fn ec_recover_invalid_signature() { - let tester = setup(); - let data = vec![5u8]; + let tester = setup(); + let data = vec![5u8]; - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "personal_ecRecover", "params": [ - ""#.to_owned() + format!("0x{}", data.to_hex()).as_ref() + r#"", + ""# + .to_owned() + + format!("0x{}", data.to_hex()).as_ref() + + r#"", "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" ], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32055,"message":"Encryption error.","data":"InvalidSignature"},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32055,"message":"Encryption error.","data":"InvalidSignature"},"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request.as_ref()), Some(response.into())); + assert_eq!( + tester.io.handle_request_sync(request.as_ref()), + Some(response.into()) + ); } #[test] fn should_not_unlock_account_temporarily_if_allow_perm_is_disabled() { - let tester = setup(); - let address = tester.accounts.new_account(&"password123".into()).unwrap(); + let tester = setup(); + let address = tester.accounts.new_account(&"password123".into()).unwrap(); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "personal_unlockAccount", "params": [ - ""#.to_owned() + &format!("0x{:x}", address) + r#"", + ""# + .to_owned() + + &format!("0x{:x}", address) + + r#"", "password123", "0x100" ], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"Time-unlocking is not supported when permanent unlock is disabled.","data":"Use personal_sendTransaction or enable permanent unlocking, instead."},"id":1}"#; - assert_eq!(tester.io.handle_request_sync(&request), Some(response.into())); + let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"Time-unlocking is not supported when permanent unlock is disabled.","data":"Use personal_sendTransaction or enable permanent unlocking, instead."},"id":1}"#; + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.into()) + ); - assert!(tester.accounts.sign(address, None, Default::default()).is_err(), "Should not unlock account."); + assert!( + tester + .accounts + .sign(address, None, Default::default()) + .is_err(), + "Should not unlock account." + ); } #[test] fn should_unlock_account_permanently() { - let tester = setup(); - let address = tester.accounts.new_account(&"password123".into()).unwrap(); + let tester = setup(); + let address = tester.accounts.new_account(&"password123".into()).unwrap(); - let request = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "personal_unlockAccount", "params": [ - ""#.to_owned() + &format!("0x{:x}", address) + r#"", + ""# + .to_owned() + + &format!("0x{:x}", address) + + r#"", "password123", null ], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(&request), Some(response.into())); - assert!(tester.accounts.sign(address, None, Default::default()).is_ok(), "Should unlock account."); + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.into()) + ); + assert!( + tester + .accounts + .sign(address, None, Default::default()) + .is_ok(), + "Should unlock account." + ); } #[test] fn sign_eip191_with_validator() { - let tester = setup(); - let address = tester.accounts.new_account(&"password123".into()).unwrap(); - let request = r#"{ + let tester = setup(); + let address = tester.accounts.new_account(&"password123".into()).unwrap(); + let request = r#"{ "jsonrpc": "2.0", "method": "personal_sign191", "params": [ "0x00", { - "validator": ""#.to_owned() + &format!("0x{:x}", address) + r#"", - "data": ""# + &format!("0x{:x}", keccak("hello world")) + r#"" + "validator": ""# + .to_owned() + + &format!("0x{:x}", address) + + r#"", + "data": ""# + + &format!("0x{:x}", keccak("hello world")) + + r#"" }, - ""# + &format!("0x{:x}", address) + r#"", + ""# + &format!("0x{:x}", address) + + r#"", "password123" ], "id": 1 }"#; - let with_validator = to_value(PresignedTransaction { - validator: address.into(), - data: keccak("hello world").to_vec().into() - }).unwrap(); - let result = eip191::hash_message(EIP191Version::PresignedTransaction, with_validator).unwrap(); - let result = tester.accounts.sign(address, Some("password123".into()), result).unwrap().into_electrum(); - let expected = r#"{"jsonrpc":"2.0","result":""#.to_owned() + &format!("0x{}", result.to_hex()) + r#"","id":1}"#; - let response = tester.io.handle_request_sync(&request).unwrap(); - assert_eq!(response, expected) + let with_validator = to_value(PresignedTransaction { + validator: address.into(), + data: keccak("hello world").to_vec().into(), + }) + .unwrap(); + let result = eip191::hash_message(EIP191Version::PresignedTransaction, with_validator).unwrap(); + let result = tester + .accounts + .sign(address, Some("password123".into()), result) + .unwrap() + .into_electrum(); + let expected = r#"{"jsonrpc":"2.0","result":""#.to_owned() + + &format!("0x{}", result.to_hex()) + + r#"","id":1}"#; + let response = tester.io.handle_request_sync(&request).unwrap(); + assert_eq!(response, expected) } #[test] fn sign_eip191_structured_data() { - let tester = setup(); - let secret: Secret = keccak("cow").into(); - let address = tester.accounts.insert_account(secret, &"lol".into()).unwrap(); - let request = r#"{ + let tester = setup(); + let secret: Secret = keccak("cow").into(); + let address = tester + .accounts + .insert_account(secret, &"lol".into()) + .unwrap(); + let request = r#"{ "jsonrpc": "2.0", "method": "personal_sign191", "params": [ @@ -419,22 +540,28 @@ fn sign_eip191_structured_data() { ] } }, - ""#.to_owned() + &format!("0x{:x}", address) + r#"", + ""# + .to_owned() + + &format!("0x{:x}", address) + + r#"", "lol" ], "id": 1 }"#; - let expected = r#"{"jsonrpc":"2.0","result":"0x4355c47d63924e8a72e509b65029052eb6c299d53a04e167c5775fd466751c9d07299936d304c153f6443dfa05f40ff007d72911b6f72307f996231605b915621c","id":1}"#; - let response = tester.io.handle_request_sync(&request).unwrap(); - assert_eq!(response, expected) + let expected = r#"{"jsonrpc":"2.0","result":"0x4355c47d63924e8a72e509b65029052eb6c299d53a04e167c5775fd466751c9d07299936d304c153f6443dfa05f40ff007d72911b6f72307f996231605b915621c","id":1}"#; + let response = tester.io.handle_request_sync(&request).unwrap(); + assert_eq!(response, expected) } #[test] fn sign_structured_data() { - let tester = setup(); - let secret: Secret = keccak("cow").into(); - let address = tester.accounts.insert_account(secret, &"lol".into()).unwrap(); - let request = r#"{ + let tester = setup(); + let secret: Secret = keccak("cow").into(); + let address = tester + .accounts + .insert_account(secret, &"lol".into()) + .unwrap(); + let request = r#"{ "jsonrpc": "2.0", "method": "personal_signTypedData", "params": [ @@ -475,25 +602,28 @@ fn sign_structured_data() { ] } }, - ""#.to_owned() + &format!("0x{:x}", address) + r#"", + ""# + .to_owned() + + &format!("0x{:x}", address) + + r#"", "lol" ], "id": 1 }"#; - let expected = r#"{"jsonrpc":"2.0","result":"0x4355c47d63924e8a72e509b65029052eb6c299d53a04e167c5775fd466751c9d07299936d304c153f6443dfa05f40ff007d72911b6f72307f996231605b915621c","id":1}"#; - let response = tester.io.handle_request_sync(&request).unwrap(); - assert_eq!(response, expected) + let expected = r#"{"jsonrpc":"2.0","result":"0x4355c47d63924e8a72e509b65029052eb6c299d53a04e167c5775fd466751c9d07299936d304c153f6443dfa05f40ff007d72911b6f72307f996231605b915621c","id":1}"#; + let response = tester.io.handle_request_sync(&request).unwrap(); + assert_eq!(response, expected) } #[test] fn should_disable_experimental_apis() { - // given - let tester = setup_with(Config { - allow_experimental_rpcs: false, - }); + // given + let tester = setup_with(Config { + allow_experimental_rpcs: false, + }); - // when - let request = r#"{ + // when + let request = r#"{ "jsonrpc": "2.0", "method": "personal_sign191", "params": [ @@ -504,8 +634,8 @@ fn should_disable_experimental_apis() { ], "id": 1 }"#; - let r1 = tester.io.handle_request_sync(&request).unwrap(); - let request = r#"{ + let r1 = tester.io.handle_request_sync(&request).unwrap(); + let request = r#"{ "jsonrpc": "2.0", "method": "personal_signTypedData", "params": [ @@ -525,12 +655,12 @@ fn should_disable_experimental_apis() { ], "id": 1 }"#; - let r2 = tester.io.handle_request_sync(&request).unwrap(); + let r2 = tester.io.handle_request_sync(&request).unwrap(); - // then - let expected = r#"{"jsonrpc":"2.0","error":{"code":-32071,"message":"This method is not part of the official RPC API yet (EIP-191). Run with `--jsonrpc-experimental` to enable it.","data":"See EIP: https://eips.ethereum.org/EIPS/eip-191"},"id":1}"#; - assert_eq!(r1, expected); + // then + let expected = r#"{"jsonrpc":"2.0","error":{"code":-32071,"message":"This method is not part of the official RPC API yet (EIP-191). Run with `--jsonrpc-experimental` to enable it.","data":"See EIP: https://eips.ethereum.org/EIPS/eip-191"},"id":1}"#; + assert_eq!(r1, expected); - let expected = r#"{"jsonrpc":"2.0","error":{"code":-32071,"message":"This method is not part of the official RPC API yet (EIP-712). Run with `--jsonrpc-experimental` to enable it.","data":"See EIP: https://eips.ethereum.org/EIPS/eip-712"},"id":1}"#; - assert_eq!(r2, expected); + let expected = r#"{"jsonrpc":"2.0","error":{"code":-32071,"message":"This method is not part of the official RPC API yet (EIP-712). Run with `--jsonrpc-experimental` to enable it.","data":"See EIP: https://eips.ethereum.org/EIPS/eip-712"},"id":1}"#; + assert_eq!(r2, expected); } diff --git a/rpc/src/v1/tests/mocked/pubsub.rs b/rpc/src/v1/tests/mocked/pubsub.rs index c0f664d5f..fb7cd370e 100644 --- a/rpc/src/v1/tests/mocked/pubsub.rs +++ b/rpc/src/v1/tests/mocked/pubsub.rs @@ -16,62 +16,70 @@ use std::sync::{atomic, Arc}; -use jsonrpc_core::{self as core, MetaIoHandler}; -use jsonrpc_core::futures::{self, Stream, Future}; +use jsonrpc_core::{ + self as core, + futures::{self, Future, Stream}, + MetaIoHandler, +}; use jsonrpc_pubsub::Session; use parity_runtime::Runtime; -use v1::{PubSub, PubSubClient, Metadata}; +use v1::{Metadata, PubSub, PubSubClient}; fn rpc() -> MetaIoHandler { - let mut io = MetaIoHandler::default(); - let called = atomic::AtomicBool::new(false); - io.add_method("hello", move |_| { - if !called.load(atomic::Ordering::SeqCst) { - called.store(true, atomic::Ordering::SeqCst); - Ok(core::Value::String("hello".into())) - } else { - Ok(core::Value::String("world".into())) - } - }); - io + let mut io = MetaIoHandler::default(); + let called = atomic::AtomicBool::new(false); + io.add_method("hello", move |_| { + if !called.load(atomic::Ordering::SeqCst) { + called.store(true, atomic::Ordering::SeqCst); + Ok(core::Value::String("hello".into())) + } else { + Ok(core::Value::String("world".into())) + } + }); + io } #[test] fn should_subscribe_to_a_method() { - // given - let el = Runtime::with_thread_count(1); - let rpc = rpc(); - let pubsub = PubSubClient::new_test(rpc, el.executor()).to_delegate(); + // given + let el = Runtime::with_thread_count(1); + let rpc = rpc(); + let pubsub = PubSubClient::new_test(rpc, el.executor()).to_delegate(); - let mut io = MetaIoHandler::default(); - io.extend_with(pubsub); + let mut io = MetaIoHandler::default(); + io.extend_with(pubsub); - let mut metadata = Metadata::default(); - let (sender, receiver) = futures::sync::mpsc::channel(8); - metadata.session = Some(Arc::new(Session::new(sender))); + let mut metadata = Metadata::default(); + let (sender, receiver) = futures::sync::mpsc::channel(8); + metadata.session = Some(Arc::new(Session::new(sender))); - // Subscribe - let request = r#"{"jsonrpc": "2.0", "method": "parity_subscribe", "params": ["hello", []], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"0x416d77337e24399d","id":1}"#; - assert_eq!(io.handle_request_sync(request, metadata.clone()), Some(response.to_owned())); + // Subscribe + let request = + r#"{"jsonrpc": "2.0", "method": "parity_subscribe", "params": ["hello", []], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x416d77337e24399d","id":1}"#; + assert_eq!( + io.handle_request_sync(request, metadata.clone()), + Some(response.to_owned()) + ); - // Check notifications - let (res, receiver) = receiver.into_future().wait().unwrap(); - let response = - r#"{"jsonrpc":"2.0","method":"parity_subscription","params":{"result":"hello","subscription":"0x416d77337e24399d"}}"#; - assert_eq!(res, Some(response.into())); + // Check notifications + let (res, receiver) = receiver.into_future().wait().unwrap(); + let response = r#"{"jsonrpc":"2.0","method":"parity_subscription","params":{"result":"hello","subscription":"0x416d77337e24399d"}}"#; + assert_eq!(res, Some(response.into())); - let (res, receiver) = receiver.into_future().wait().unwrap(); - let response = - r#"{"jsonrpc":"2.0","method":"parity_subscription","params":{"result":"world","subscription":"0x416d77337e24399d"}}"#; - assert_eq!(res, Some(response.into())); + let (res, receiver) = receiver.into_future().wait().unwrap(); + let response = r#"{"jsonrpc":"2.0","method":"parity_subscription","params":{"result":"world","subscription":"0x416d77337e24399d"}}"#; + assert_eq!(res, Some(response.into())); - // And unsubscribe - let request = r#"{"jsonrpc": "2.0", "method": "parity_unsubscribe", "params": ["0x416d77337e24399d"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(io.handle_request_sync(request, metadata), Some(response.to_owned())); + // And unsubscribe + let request = r#"{"jsonrpc": "2.0", "method": "parity_unsubscribe", "params": ["0x416d77337e24399d"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + assert_eq!( + io.handle_request_sync(request, metadata), + Some(response.to_owned()) + ); - let (res, _receiver) = receiver.into_future().wait().unwrap(); - assert_eq!(res, None); + let (res, _receiver) = receiver.into_future().wait().unwrap(); + assert_eq!(res, None); } diff --git a/rpc/src/v1/tests/mocked/rpc.rs b/rpc/src/v1/tests/mocked/rpc.rs index d4634ac90..dad7b2201 100644 --- a/rpc/src/v1/tests/mocked/rpc.rs +++ b/rpc/src/v1/tests/mocked/rpc.rs @@ -14,38 +14,39 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::BTreeMap; use jsonrpc_core::IoHandler; +use std::collections::BTreeMap; use v1::{Rpc, RpcClient}; fn rpc_client() -> RpcClient { - let mut modules = BTreeMap::new(); - modules.insert("rpc".to_owned(), "1.0".to_owned()); - modules.insert("web3".to_owned(), "1.0".to_owned()); - modules.insert("ethcore".to_owned(), "1.0".to_owned()); - RpcClient::new(modules) + let mut modules = BTreeMap::new(); + modules.insert("rpc".to_owned(), "1.0".to_owned()); + modules.insert("web3".to_owned(), "1.0".to_owned()); + modules.insert("ethcore".to_owned(), "1.0".to_owned()); + RpcClient::new(modules) } #[test] fn modules() { - let rpc = rpc_client().to_delegate(); - let mut io = IoHandler::new(); - io.extend_with(rpc); + let rpc = rpc_client().to_delegate(); + let mut io = IoHandler::new(); + io.extend_with(rpc); - let request = r#"{"jsonrpc": "2.0", "method": "modules", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"rpc":"1.0","web3":"1.0"},"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "modules", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"rpc":"1.0","web3":"1.0"},"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_modules() { - let rpc = rpc_client().to_delegate(); - let mut io = IoHandler::new(); - io.extend_with(rpc); + let rpc = rpc_client().to_delegate(); + let mut io = IoHandler::new(); + io.extend_with(rpc); - let request = r#"{"jsonrpc": "2.0", "method": "rpc_modules", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"ethcore":"1.0","rpc":"1.0","web3":"1.0"},"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "rpc_modules", "params": [], "id": 1}"#; + let response = + r#"{"jsonrpc":"2.0","result":{"ethcore":"1.0","rpc":"1.0","web3":"1.0"},"id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } diff --git a/rpc/src/v1/tests/mocked/secretstore.rs b/rpc/src/v1/tests/mocked/secretstore.rs index 96e20d002..93fe1d2ac 100644 --- a/rpc/src/v1/tests/mocked/secretstore.rs +++ b/rpc/src/v1/tests/mocked/secretstore.rs @@ -16,161 +16,190 @@ use std::sync::Arc; -use crypto::DEFAULT_MAC; use accounts::AccountProvider; +use crypto::DEFAULT_MAC; use ethereum_types::H256; -use ethkey::{KeyPair, Signature, verify_public}; +use ethkey::{verify_public, KeyPair, Signature}; -use serde_json; use jsonrpc_core::{IoHandler, Success}; -use v1::metadata::Metadata; -use v1::SecretStoreClient; -use v1::traits::secretstore::SecretStore; -use v1::helpers::secretstore::ordered_servers_keccak; -use v1::types::EncryptedDocumentKey; +use serde_json; +use v1::{ + helpers::secretstore::ordered_servers_keccak, metadata::Metadata, + traits::secretstore::SecretStore, types::EncryptedDocumentKey, SecretStoreClient, +}; struct Dependencies { - pub accounts: Arc, + pub accounts: Arc, } impl Dependencies { - pub fn new() -> Self { - Dependencies { - accounts: Arc::new(AccountProvider::transient_provider()), - } - } + pub fn new() -> Self { + Dependencies { + accounts: Arc::new(AccountProvider::transient_provider()), + } + } - pub fn client(&self) -> SecretStoreClient { - SecretStoreClient::new(&self.accounts) - } + pub fn client(&self) -> SecretStoreClient { + SecretStoreClient::new(&self.accounts) + } - fn default_client(&self) -> IoHandler { - let mut io = IoHandler::default(); - io.extend_with(self.client().to_delegate()); - io - } + fn default_client(&self) -> IoHandler { + let mut io = IoHandler::default(); + io.extend_with(self.client().to_delegate()); + io + } } #[test] fn rpc_secretstore_encrypt_and_decrypt() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - // insert new account - let secret = "c1f1cfe279a5c350d13795bce162941967340c8a228e6ba175489afc564a5bef".parse().unwrap(); - deps.accounts.insert_account(secret, &"password".into()).unwrap(); + // insert new account + let secret = "c1f1cfe279a5c350d13795bce162941967340c8a228e6ba175489afc564a5bef" + .parse() + .unwrap(); + deps.accounts + .insert_account(secret, &"password".into()) + .unwrap(); - // execute encryption request - let encryption_request = r#"{"jsonrpc": "2.0", "method": "secretstore_encrypt", "params":[ + // execute encryption request + let encryption_request = r#"{"jsonrpc": "2.0", "method": "secretstore_encrypt", "params":[ "0x5c2f3b4ec0c2234f8358697edc8b82a62e3ac995", "password", "0x0440262acc06f1e13cb11b34e792cdf698673a16bb812163cb52689ac34c94ae47047b58f58d8b596d21ac7b03a55896132d07a7dc028b2dad88f6c5a90623fa5b30ff4b1ba385a98c970432d13417cf6d7facd62f86faaef15ca993735890da0cb3e417e2740fc72de7501eef083a12dd5a9ebe513b592b1740848576a936a1eb88fc553fc624b1cae41a0a4e074e34e2aaae686709f08d70e505c5acba12ef96017e89be675a2adb07c72c4e95814fbf", "0xdeadbeef" ], "id": 1}"#; - let encryption_response = io.handle_request_sync(encryption_request).unwrap(); - let encryption_response: Success = serde_json::from_str(&encryption_response).unwrap(); + let encryption_response = io.handle_request_sync(encryption_request).unwrap(); + let encryption_response: Success = serde_json::from_str(&encryption_response).unwrap(); - // execute decryption request - let decryption_request_left = r#"{"jsonrpc": "2.0", "method": "secretstore_decrypt", "params":[ + // execute decryption request + let decryption_request_left = r#"{"jsonrpc": "2.0", "method": "secretstore_decrypt", "params":[ "0x5c2f3b4ec0c2234f8358697edc8b82a62e3ac995", "password", "0x0440262acc06f1e13cb11b34e792cdf698673a16bb812163cb52689ac34c94ae47047b58f58d8b596d21ac7b03a55896132d07a7dc028b2dad88f6c5a90623fa5b30ff4b1ba385a98c970432d13417cf6d7facd62f86faaef15ca993735890da0cb3e417e2740fc72de7501eef083a12dd5a9ebe513b592b1740848576a936a1eb88fc553fc624b1cae41a0a4e074e34e2aaae686709f08d70e505c5acba12ef96017e89be675a2adb07c72c4e95814fbf",""#; - let decryption_request_mid = encryption_response.result.as_str().unwrap(); - let decryption_request_right = r#"" + let decryption_request_mid = encryption_response.result.as_str().unwrap(); + let decryption_request_right = r#"" ], "id": 2}"#; - let decryption_request = decryption_request_left.to_owned() + decryption_request_mid + decryption_request_right; - let decryption_response = io.handle_request_sync(&decryption_request).unwrap(); - assert_eq!(decryption_response, r#"{"jsonrpc":"2.0","result":"0xdeadbeef","id":2}"#); + let decryption_request = + decryption_request_left.to_owned() + decryption_request_mid + decryption_request_right; + let decryption_response = io.handle_request_sync(&decryption_request).unwrap(); + assert_eq!( + decryption_response, + r#"{"jsonrpc":"2.0","result":"0xdeadbeef","id":2}"# + ); } #[test] fn rpc_secretstore_shadow_decrypt() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - // insert new account - let secret = "82758356bf46b42710d3946a8efa612b7bf5e125e4d49f28facf1139db4a46f4".parse().unwrap(); - deps.accounts.insert_account(secret, &"password".into()).unwrap(); + // insert new account + let secret = "82758356bf46b42710d3946a8efa612b7bf5e125e4d49f28facf1139db4a46f4" + .parse() + .unwrap(); + deps.accounts + .insert_account(secret, &"password".into()) + .unwrap(); - // execute decryption request - let decryption_request = r#"{"jsonrpc": "2.0", "method": "secretstore_shadowDecrypt", "params":[ + // execute decryption request + let decryption_request = r#"{"jsonrpc": "2.0", "method": "secretstore_shadowDecrypt", "params":[ "0x00dfE63B22312ab4329aD0d28CaD8Af987A01932", "password", "0x843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91", "0x07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3", ["0x049ce50bbadb6352574f2c59742f78df83333975cbd5cbb151c6e8628749a33dc1fa93bb6dffae5994e3eb98ae859ed55ee82937538e6adb054d780d1e89ff140f121529eeadb1161562af9d3342db0008919ca280a064305e5a4e518e93279de7a9396fe5136a9658e337e8e276221248c381c5384cd1ad28e5921f46ff058d5fbcf8a388fc881d0dd29421c218d51761"], "0x2ddec1f96229efa2916988d8b2a82a47ef36f71c" ], "id": 1}"#; - let decryption_response = io.handle_request_sync(&decryption_request).unwrap(); - assert_eq!(decryption_response, r#"{"jsonrpc":"2.0","result":"0xdeadbeef","id":1}"#); + let decryption_response = io.handle_request_sync(&decryption_request).unwrap(); + assert_eq!( + decryption_response, + r#"{"jsonrpc":"2.0","result":"0xdeadbeef","id":1}"# + ); } #[test] fn rpc_secretstore_servers_set_hash() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - // execute hashing request - let hashing_request = r#"{"jsonrpc": "2.0", "method": "secretstore_serversSetHash", "params":[ + // execute hashing request + let hashing_request = r#"{"jsonrpc": "2.0", "method": "secretstore_serversSetHash", "params":[ ["0x843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91", "0x07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3"] ], "id": 1}"#; - let hashing_response = io.handle_request_sync(&hashing_request).unwrap(); - let hashing_response = hashing_response.replace(r#"{"jsonrpc":"2.0","result":"0x"#, ""); - let hashing_response = hashing_response.replace(r#"","id":1}"#, ""); - let hash: H256 = hashing_response.parse().unwrap(); + let hashing_response = io.handle_request_sync(&hashing_request).unwrap(); + let hashing_response = hashing_response.replace(r#"{"jsonrpc":"2.0","result":"0x"#, ""); + let hashing_response = hashing_response.replace(r#"","id":1}"#, ""); + let hash: H256 = hashing_response.parse().unwrap(); - let servers_set_keccak = ordered_servers_keccak(vec![ + let servers_set_keccak = ordered_servers_keccak(vec![ "843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91".parse().unwrap(), "07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3".parse().unwrap() ].into_iter().collect()); - assert_eq!(hash, servers_set_keccak); + assert_eq!(hash, servers_set_keccak); } #[test] fn rpc_secretstore_sign_raw_hash() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - // insert new account - let secret = "82758356bf46b42710d3946a8efa612b7bf5e125e4d49f28facf1139db4a46f4".parse().unwrap(); - let key_pair = KeyPair::from_secret(secret).unwrap(); - deps.accounts.insert_account(key_pair.secret().clone(), &"password".into()).unwrap(); + // insert new account + let secret = "82758356bf46b42710d3946a8efa612b7bf5e125e4d49f28facf1139db4a46f4" + .parse() + .unwrap(); + let key_pair = KeyPair::from_secret(secret).unwrap(); + deps.accounts + .insert_account(key_pair.secret().clone(), &"password".into()) + .unwrap(); - // execute signing request - let signing_request = r#"{"jsonrpc": "2.0", "method": "secretstore_signRawHash", "params":[ + // execute signing request + let signing_request = r#"{"jsonrpc": "2.0", "method": "secretstore_signRawHash", "params":[ "0x00dfE63B22312ab4329aD0d28CaD8Af987A01932", "password", "0x0000000000000000000000000000000000000000000000000000000000000001" ], "id": 1}"#; - let signing_response = io.handle_request_sync(&signing_request).unwrap(); - let signing_response = signing_response.replace(r#"{"jsonrpc":"2.0","result":"0x"#, ""); - let signing_response = signing_response.replace(r#"","id":1}"#, ""); - let signature: Signature = signing_response.parse().unwrap(); + let signing_response = io.handle_request_sync(&signing_request).unwrap(); + let signing_response = signing_response.replace(r#"{"jsonrpc":"2.0","result":"0x"#, ""); + let signing_response = signing_response.replace(r#"","id":1}"#, ""); + let signature: Signature = signing_response.parse().unwrap(); - let hash = "0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap(); - assert!(verify_public(key_pair.public(), &signature, &hash).unwrap()); + let hash = "0000000000000000000000000000000000000000000000000000000000000001" + .parse() + .unwrap(); + assert!(verify_public(key_pair.public(), &signature, &hash).unwrap()); } #[test] fn rpc_secretstore_generate_document_key() { - let deps = Dependencies::new(); - let io = deps.default_client(); + let deps = Dependencies::new(); + let io = deps.default_client(); - // insert new account - let secret = "82758356bf46b42710d3946a8efa612b7bf5e125e4d49f28facf1139db4a46f4".parse().unwrap(); - let key_pair = KeyPair::from_secret(secret).unwrap(); - deps.accounts.insert_account(key_pair.secret().clone(), &"password".into()).unwrap(); + // insert new account + let secret = "82758356bf46b42710d3946a8efa612b7bf5e125e4d49f28facf1139db4a46f4" + .parse() + .unwrap(); + let key_pair = KeyPair::from_secret(secret).unwrap(); + deps.accounts + .insert_account(key_pair.secret().clone(), &"password".into()) + .unwrap(); - // execute generation request - let generation_request = r#"{"jsonrpc": "2.0", "method": "secretstore_generateDocumentKey", "params":[ + // execute generation request + let generation_request = r#"{"jsonrpc": "2.0", "method": "secretstore_generateDocumentKey", "params":[ "0x00dfE63B22312ab4329aD0d28CaD8Af987A01932", "password", "0x843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91" ], "id": 1}"#; - let generation_response = io.handle_request_sync(&generation_request).unwrap(); - let generation_response = generation_response.replace(r#"{"jsonrpc":"2.0","result":"#, ""); - let generation_response = generation_response.replace(r#","id":1}"#, ""); - let generation_response: EncryptedDocumentKey = serde_json::from_str(&generation_response).unwrap(); + let generation_response = io.handle_request_sync(&generation_request).unwrap(); + let generation_response = generation_response.replace(r#"{"jsonrpc":"2.0","result":"#, ""); + let generation_response = generation_response.replace(r#","id":1}"#, ""); + let generation_response: EncryptedDocumentKey = + serde_json::from_str(&generation_response).unwrap(); - // the only thing we can check is that 'encrypted_key' can be decrypted by passed account - assert!(deps.accounts.decrypt( - "00dfE63B22312ab4329aD0d28CaD8Af987A01932".parse().unwrap(), - Some("password".into()), - &DEFAULT_MAC, - &generation_response.encrypted_key.0).is_ok()); + // the only thing we can check is that 'encrypted_key' can be decrypted by passed account + assert!(deps + .accounts + .decrypt( + "00dfE63B22312ab4329aD0d28CaD8Af987A01932".parse().unwrap(), + Some("password".into()), + &DEFAULT_MAC, + &generation_response.encrypted_key.0 + ) + .is_ok()); } diff --git a/rpc/src/v1/tests/mocked/signer.rs b/rpc/src/v1/tests/mocked/signer.rs index e22c5b8d2..24967d16b 100644 --- a/rpc/src/v1/tests/mocked/signer.rs +++ b/rpc/src/v1/tests/mocked/signer.rs @@ -14,546 +14,719 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::str::FromStr; -use ethereum_types::{H520, U256, Address}; use bytes::ToPretty; +use ethereum_types::{Address, H520, U256}; +use std::{str::FromStr, sync::Arc}; use accounts::AccountProvider; use ethcore::client::TestBlockChainClient; use parity_runtime::Runtime; use parking_lot::Mutex; use rlp::encode; -use types::transaction::{Transaction, Action, SignedTransaction}; +use types::transaction::{Action, SignedTransaction, Transaction}; -use serde_json; use jsonrpc_core::IoHandler; -use v1::{SignerClient, Signer, Origin}; -use v1::metadata::Metadata; -use v1::tests::helpers::TestMinerService; -use v1::types::Bytes as RpcBytes; -use v1::helpers::{nonce, FilledTransactionRequest, ConfirmationPayload}; -use v1::helpers::external_signer::{SigningQueue, SignerService}; -use v1::helpers::dispatch::{self, FullDispatcher, eth_data_hash}; +use serde_json; +use v1::{ + helpers::{ + dispatch::{self, eth_data_hash, FullDispatcher}, + external_signer::{SignerService, SigningQueue}, + nonce, ConfirmationPayload, FilledTransactionRequest, + }, + metadata::Metadata, + tests::helpers::TestMinerService, + types::Bytes as RpcBytes, + Origin, Signer, SignerClient, +}; struct SignerTester { - _runtime: Runtime, - signer: Arc, - accounts: Arc, - io: IoHandler, - miner: Arc, + _runtime: Runtime, + signer: Arc, + accounts: Arc, + io: IoHandler, + miner: Arc, } fn blockchain_client() -> Arc { - let client = TestBlockChainClient::new(); - Arc::new(client) + let client = TestBlockChainClient::new(); + Arc::new(client) } fn accounts_provider() -> Arc { - Arc::new(AccountProvider::transient_provider()) + Arc::new(AccountProvider::transient_provider()) } fn miner_service() -> Arc { - Arc::new(TestMinerService::default()) + Arc::new(TestMinerService::default()) } fn signer_tester() -> SignerTester { - let runtime = Runtime::with_thread_count(1); - let signer = Arc::new(SignerService::new_test(false)); - let accounts = accounts_provider(); - let account_signer = Arc::new(dispatch::Signer::new(accounts.clone())); - let client = blockchain_client(); - let miner = miner_service(); - let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor()))); + let runtime = Runtime::with_thread_count(1); + let signer = Arc::new(SignerService::new_test(false)); + let accounts = accounts_provider(); + let account_signer = Arc::new(dispatch::Signer::new(accounts.clone())); + let client = blockchain_client(); + let miner = miner_service(); + let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor()))); - let dispatcher = FullDispatcher::new(client, miner.clone(), reservations, 50); - let mut io = IoHandler::default(); - io.extend_with(SignerClient::new(account_signer, dispatcher, &signer, runtime.executor()).to_delegate()); + let dispatcher = FullDispatcher::new(client, miner.clone(), reservations, 50); + let mut io = IoHandler::default(); + io.extend_with( + SignerClient::new(account_signer, dispatcher, &signer, runtime.executor()).to_delegate(), + ); - SignerTester { - _runtime: runtime, - signer: signer, - accounts: accounts, - io: io, - miner: miner, - } + SignerTester { + _runtime: runtime, + signer: signer, + accounts: accounts, + io: io, + miner: miner, + } } #[test] fn should_return_list_of_items_to_confirm() { - // given - let tester = signer_tester(); - let _send_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest { - from: Address::from(1), - used_default_from: false, - to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), - gas_price: U256::from(10_000), - gas: U256::from(10_000_000), - value: U256::from(1), - data: vec![], - nonce: None, - condition: None, - }), Origin::Unknown).unwrap(); - let _sign_future = tester.signer.add_request(ConfirmationPayload::EthSignMessage(1.into(), vec![5].into()), Origin::Unknown).unwrap(); + // given + let tester = signer_tester(); + let _send_future = tester + .signer + .add_request( + ConfirmationPayload::SendTransaction(FilledTransactionRequest { + from: Address::from(1), + used_default_from: false, + to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), + gas_price: U256::from(10_000), + gas: U256::from(10_000_000), + value: U256::from(1), + data: vec![], + nonce: None, + condition: None, + }), + Origin::Unknown, + ) + .unwrap(); + let _sign_future = tester + .signer + .add_request( + ConfirmationPayload::EthSignMessage(1.into(), vec![5].into()), + Origin::Unknown, + ) + .unwrap(); - // when - let request = r#"{"jsonrpc":"2.0","method":"signer_requestsToConfirm","params":[],"id":1}"#; - let response = concat!( - r#"{"jsonrpc":"2.0","result":["#, - r#"{"id":"0x1","origin":"unknown","payload":{"sendTransaction":{"condition":null,"data":"0x","from":"0x0000000000000000000000000000000000000001","gas":"0x989680","gasPrice":"0x2710","nonce":null,"to":"0xd46e8dd67c5d32be8058bb8eb970870f07244567","value":"0x1"}}},"#, - r#"{"id":"0x2","origin":"unknown","payload":{"sign":{"address":"0x0000000000000000000000000000000000000001","data":"0x05"}}}"#, - r#"],"id":1}"# - ); + // when + let request = r#"{"jsonrpc":"2.0","method":"signer_requestsToConfirm","params":[],"id":1}"#; + let response = concat!( + r#"{"jsonrpc":"2.0","result":["#, + r#"{"id":"0x1","origin":"unknown","payload":{"sendTransaction":{"condition":null,"data":"0x","from":"0x0000000000000000000000000000000000000001","gas":"0x989680","gasPrice":"0x2710","nonce":null,"to":"0xd46e8dd67c5d32be8058bb8eb970870f07244567","value":"0x1"}}},"#, + r#"{"id":"0x2","origin":"unknown","payload":{"sign":{"address":"0x0000000000000000000000000000000000000001","data":"0x05"}}}"#, + r#"],"id":1}"# + ); - // then - assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); + // then + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.to_owned()) + ); } #[test] fn should_reject_transaction_from_queue_without_dispatching() { - // given - let tester = signer_tester(); - let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest { - from: Address::from(1), - used_default_from: false, - to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), - gas_price: U256::from(10_000), - gas: U256::from(10_000_000), - value: U256::from(1), - data: vec![], - nonce: None, - condition: None, - }), Origin::Unknown).unwrap(); - assert_eq!(tester.signer.requests().len(), 1); + // given + let tester = signer_tester(); + let _confirmation_future = tester + .signer + .add_request( + ConfirmationPayload::SendTransaction(FilledTransactionRequest { + from: Address::from(1), + used_default_from: false, + to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), + gas_price: U256::from(10_000), + gas: U256::from(10_000_000), + value: U256::from(1), + data: vec![], + nonce: None, + condition: None, + }), + Origin::Unknown, + ) + .unwrap(); + assert_eq!(tester.signer.requests().len(), 1); - // when - let request = r#"{"jsonrpc":"2.0","method":"signer_rejectRequest","params":["0x1"],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + // when + let request = r#"{"jsonrpc":"2.0","method":"signer_rejectRequest","params":["0x1"],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - // then - assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.signer.requests().len(), 0); - assert_eq!(tester.miner.imported_transactions.lock().len(), 0); + // then + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.to_owned()) + ); + assert_eq!(tester.signer.requests().len(), 0); + assert_eq!(tester.miner.imported_transactions.lock().len(), 0); } #[test] fn should_not_remove_transaction_if_password_is_invalid() { - // given - let tester = signer_tester(); - let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest { - from: Address::from(1), - used_default_from: false, - to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), - gas_price: U256::from(10_000), - gas: U256::from(10_000_000), - value: U256::from(1), - data: vec![], - nonce: None, - condition: None, - }), Origin::Unknown).unwrap(); - assert_eq!(tester.signer.requests().len(), 1); + // given + let tester = signer_tester(); + let _confirmation_future = tester + .signer + .add_request( + ConfirmationPayload::SendTransaction(FilledTransactionRequest { + from: Address::from(1), + used_default_from: false, + to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), + gas_price: U256::from(10_000), + gas: U256::from(10_000_000), + value: U256::from(1), + data: vec![], + nonce: None, + condition: None, + }), + Origin::Unknown, + ) + .unwrap(); + assert_eq!(tester.signer.requests().len(), 1); - // when - let request = r#"{"jsonrpc":"2.0","method":"signer_confirmRequest","params":["0x1",{},"xxx"],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32021,"message":"Account password is invalid or account does not exist.","data":"SStore(InvalidAccount)"},"id":1}"#; + // when + let request = + r#"{"jsonrpc":"2.0","method":"signer_confirmRequest","params":["0x1",{},"xxx"],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32021,"message":"Account password is invalid or account does not exist.","data":"SStore(InvalidAccount)"},"id":1}"#; - // then - assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.signer.requests().len(), 1); + // then + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.to_owned()) + ); + assert_eq!(tester.signer.requests().len(), 1); } #[test] fn should_not_remove_sign_if_password_is_invalid() { - // given - let tester = signer_tester(); - let _confirmation_future = tester.signer.add_request(ConfirmationPayload::EthSignMessage(0.into(), vec![5].into()), Origin::Unknown).unwrap(); - assert_eq!(tester.signer.requests().len(), 1); + // given + let tester = signer_tester(); + let _confirmation_future = tester + .signer + .add_request( + ConfirmationPayload::EthSignMessage(0.into(), vec![5].into()), + Origin::Unknown, + ) + .unwrap(); + assert_eq!(tester.signer.requests().len(), 1); - // when - let request = r#"{"jsonrpc":"2.0","method":"signer_confirmRequest","params":["0x1",{},"xxx"],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32021,"message":"Account password is invalid or account does not exist.","data":"SStore(InvalidAccount)"},"id":1}"#; + // when + let request = + r#"{"jsonrpc":"2.0","method":"signer_confirmRequest","params":["0x1",{},"xxx"],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32021,"message":"Account password is invalid or account does not exist.","data":"SStore(InvalidAccount)"},"id":1}"#; - // then - assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.signer.requests().len(), 1); + // then + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.to_owned()) + ); + assert_eq!(tester.signer.requests().len(), 1); } #[test] fn should_confirm_transaction_and_dispatch() { - //// given - let tester = signer_tester(); - let address = tester.accounts.new_account(&"test".into()).unwrap(); - let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(); - let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest { - from: address, - used_default_from: false, - to: Some(recipient), - gas_price: U256::from(10_000), - gas: U256::from(10_000_000), - value: U256::from(1), - data: vec![], - nonce: None, - condition: None, - }), Origin::Unknown).unwrap(); + //// given + let tester = signer_tester(); + let address = tester.accounts.new_account(&"test".into()).unwrap(); + let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(); + let _confirmation_future = tester + .signer + .add_request( + ConfirmationPayload::SendTransaction(FilledTransactionRequest { + from: address, + used_default_from: false, + to: Some(recipient), + gas_price: U256::from(10_000), + gas: U256::from(10_000_000), + value: U256::from(1), + data: vec![], + nonce: None, + condition: None, + }), + Origin::Unknown, + ) + .unwrap(); - let t = Transaction { - nonce: U256::zero(), - gas_price: U256::from(0x1000), - gas: U256::from(0x50505), - action: Action::Call(recipient), - value: U256::from(0x1), - data: vec![] - }; - tester.accounts.unlock_account_temporarily(address, "test".into()).unwrap(); - let signature = tester.accounts.sign(address, None, t.hash(None)).unwrap(); - let t = t.with_signature(signature, None); + let t = Transaction { + nonce: U256::zero(), + gas_price: U256::from(0x1000), + gas: U256::from(0x50505), + action: Action::Call(recipient), + value: U256::from(0x1), + data: vec![], + }; + tester + .accounts + .unlock_account_temporarily(address, "test".into()) + .unwrap(); + let signature = tester.accounts.sign(address, None, t.hash(None)).unwrap(); + let t = t.with_signature(signature, None); - assert_eq!(tester.signer.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); - // when - let request = r#"{ + // when + let request = r#"{ "jsonrpc":"2.0", "method":"signer_confirmRequest", "params":["0x1", {"gasPrice":"0x1000","gas":"0x50505"}, "test"], "id":1 }"#; - let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{:x}", t.hash()).as_ref() + r#"","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + + format!("0x{:x}", t.hash()).as_ref() + + r#"","id":1}"#; - // then - assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.signer.requests().len(), 0); - assert_eq!(tester.miner.imported_transactions.lock().len(), 1); + // then + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.to_owned()) + ); + assert_eq!(tester.signer.requests().len(), 0); + assert_eq!(tester.miner.imported_transactions.lock().len(), 1); } #[test] fn should_alter_the_sender_and_nonce() { - //// given - let tester = signer_tester(); - let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(); - let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest { - from: 0.into(), - used_default_from: false, - to: Some(recipient), - gas_price: U256::from(10_000), - gas: U256::from(10_000_000), - value: U256::from(1), - data: vec![], - nonce: Some(10.into()), - condition: None, - }), Origin::Unknown).unwrap(); + //// given + let tester = signer_tester(); + let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(); + let _confirmation_future = tester + .signer + .add_request( + ConfirmationPayload::SendTransaction(FilledTransactionRequest { + from: 0.into(), + used_default_from: false, + to: Some(recipient), + gas_price: U256::from(10_000), + gas: U256::from(10_000_000), + value: U256::from(1), + data: vec![], + nonce: Some(10.into()), + condition: None, + }), + Origin::Unknown, + ) + .unwrap(); - let t = Transaction { - nonce: U256::zero(), - gas_price: U256::from(0x1000), - gas: U256::from(0x50505), - action: Action::Call(recipient), - value: U256::from(0x1), - data: vec![] - }; + let t = Transaction { + nonce: U256::zero(), + gas_price: U256::from(0x1000), + gas: U256::from(0x50505), + action: Action::Call(recipient), + value: U256::from(0x1), + data: vec![], + }; - let address = tester.accounts.new_account(&"test".into()).unwrap(); - let signature = tester.accounts.sign(address, Some("test".into()), t.hash(None)).unwrap(); - let t = t.with_signature(signature, None); + let address = tester.accounts.new_account(&"test".into()).unwrap(); + let signature = tester + .accounts + .sign(address, Some("test".into()), t.hash(None)) + .unwrap(); + let t = t.with_signature(signature, None); - assert_eq!(tester.signer.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); - // when - let request = r#"{ + // when + let request = r#"{ "jsonrpc":"2.0", "method":"signer_confirmRequest", - "params":["0x1", {"sender":""#.to_owned() - + &format!("0x{:x}", address) - + r#"","gasPrice":"0x1000","gas":"0x50505"}, "test"], + "params":["0x1", {"sender":""# + .to_owned() + + &format!("0x{:x}", address) + + r#"","gasPrice":"0x1000","gas":"0x50505"}, "test"], "id":1 }"#; - let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + &format!("0x{:x}", t.hash()) + r#"","id":1}"#; + let response = + r#"{"jsonrpc":"2.0","result":""#.to_owned() + &format!("0x{:x}", t.hash()) + r#"","id":1}"#; - // then - assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.signer.requests().len(), 0); - assert_eq!(tester.miner.imported_transactions.lock().len(), 1); + // then + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.to_owned()) + ); + assert_eq!(tester.signer.requests().len(), 0); + assert_eq!(tester.miner.imported_transactions.lock().len(), 1); } #[test] fn should_confirm_transaction_with_token() { - // given - let tester = signer_tester(); - let address = tester.accounts.new_account(&"test".into()).unwrap(); - let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(); - let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest { - from: address, - used_default_from: false, - to: Some(recipient), - gas_price: U256::from(10_000), - gas: U256::from(10_000_000), - value: U256::from(1), - data: vec![], - nonce: None, - condition: None, - }), Origin::Unknown).unwrap(); + // given + let tester = signer_tester(); + let address = tester.accounts.new_account(&"test".into()).unwrap(); + let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(); + let _confirmation_future = tester + .signer + .add_request( + ConfirmationPayload::SendTransaction(FilledTransactionRequest { + from: address, + used_default_from: false, + to: Some(recipient), + gas_price: U256::from(10_000), + gas: U256::from(10_000_000), + value: U256::from(1), + data: vec![], + nonce: None, + condition: None, + }), + Origin::Unknown, + ) + .unwrap(); - let t = Transaction { - nonce: U256::zero(), - gas_price: U256::from(0x1000), - gas: U256::from(10_000_000), - action: Action::Call(recipient), - value: U256::from(0x1), - data: vec![] - }; - let (signature, token) = tester.accounts.sign_with_token(address, "test".into(), t.hash(None)).unwrap(); - let t = t.with_signature(signature, None); + let t = Transaction { + nonce: U256::zero(), + gas_price: U256::from(0x1000), + gas: U256::from(10_000_000), + action: Action::Call(recipient), + value: U256::from(0x1), + data: vec![], + }; + let (signature, token) = tester + .accounts + .sign_with_token(address, "test".into(), t.hash(None)) + .unwrap(); + let t = t.with_signature(signature, None); - assert_eq!(tester.signer.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); - // when - let request = r#"{ + // when + let request = r#"{ "jsonrpc":"2.0", "method":"signer_confirmRequestWithToken", - "params":["0x1", {"gasPrice":"0x1000"}, ""#.to_owned() + token.as_str() + r#""], + "params":["0x1", {"gasPrice":"0x1000"}, ""# + .to_owned() + + token.as_str() + + r#""], "id":1 }"#; - let response = r#"{"jsonrpc":"2.0","result":{"result":""#.to_owned() + - format!("0x{:x}", t.hash()).as_ref() + - r#"","token":""#; + let response = r#"{"jsonrpc":"2.0","result":{"result":""#.to_owned() + + format!("0x{:x}", t.hash()).as_ref() + + r#"","token":""#; - // then - let result = tester.io.handle_request_sync(&request).unwrap(); - assert!(result.starts_with(&response), "Should return correct result. Expected: {:?}, Got: {:?}", response, result); - assert_eq!(tester.signer.requests().len(), 0); - assert_eq!(tester.miner.imported_transactions.lock().len(), 1); + // then + let result = tester.io.handle_request_sync(&request).unwrap(); + assert!( + result.starts_with(&response), + "Should return correct result. Expected: {:?}, Got: {:?}", + response, + result + ); + assert_eq!(tester.signer.requests().len(), 0); + assert_eq!(tester.miner.imported_transactions.lock().len(), 1); } #[test] fn should_confirm_transaction_with_rlp() { - // given - let tester = signer_tester(); - let address = tester.accounts.new_account(&"test".into()).unwrap(); - let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(); - let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest { - from: address, - used_default_from: false, - to: Some(recipient), - gas_price: U256::from(10_000), - gas: U256::from(10_000_000), - value: U256::from(1), - data: vec![], - nonce: None, - condition: None, - }), Origin::Unknown).unwrap(); + // given + let tester = signer_tester(); + let address = tester.accounts.new_account(&"test".into()).unwrap(); + let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(); + let _confirmation_future = tester + .signer + .add_request( + ConfirmationPayload::SendTransaction(FilledTransactionRequest { + from: address, + used_default_from: false, + to: Some(recipient), + gas_price: U256::from(10_000), + gas: U256::from(10_000_000), + value: U256::from(1), + data: vec![], + nonce: None, + condition: None, + }), + Origin::Unknown, + ) + .unwrap(); - let t = Transaction { - nonce: U256::zero(), - gas_price: U256::from(0x1000), - gas: U256::from(10_000_000), - action: Action::Call(recipient), - value: U256::from(0x1), - data: vec![] - }; - let signature = tester.accounts.sign(address, Some("test".into()), t.hash(None)).unwrap(); - let t = t.with_signature(signature, None); - let rlp = encode(&t); + let t = Transaction { + nonce: U256::zero(), + gas_price: U256::from(0x1000), + gas: U256::from(10_000_000), + action: Action::Call(recipient), + value: U256::from(0x1), + data: vec![], + }; + let signature = tester + .accounts + .sign(address, Some("test".into()), t.hash(None)) + .unwrap(); + let t = t.with_signature(signature, None); + let rlp = encode(&t); - assert_eq!(tester.signer.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); - // when - let request = r#"{ + // when + let request = r#"{ "jsonrpc":"2.0", "method":"signer_confirmRequestRaw", - "params":["0x1", "0x"#.to_owned() + &rlp.to_hex() + r#""], + "params":["0x1", "0x"# + .to_owned() + + &rlp.to_hex() + + r#""], "id":1 }"#; - let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{:x}", t.hash()).as_ref() + r#"","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + + format!("0x{:x}", t.hash()).as_ref() + + r#"","id":1}"#; - // then - assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.signer.requests().len(), 0); - assert_eq!(tester.miner.imported_transactions.lock().len(), 1); + // then + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.to_owned()) + ); + assert_eq!(tester.signer.requests().len(), 0); + assert_eq!(tester.miner.imported_transactions.lock().len(), 1); } #[test] fn should_return_error_when_sender_does_not_match() { - // given - let tester = signer_tester(); - let address = tester.accounts.new_account(&"test".into()).unwrap(); - let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(); - let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest { - from: Address::default(), - used_default_from: false, - to: Some(recipient), - gas_price: U256::from(10_000), - gas: U256::from(10_000_000), - value: U256::from(1), - data: vec![], - nonce: None, - condition: None, - }), Origin::Unknown).unwrap(); + // given + let tester = signer_tester(); + let address = tester.accounts.new_account(&"test".into()).unwrap(); + let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(); + let _confirmation_future = tester + .signer + .add_request( + ConfirmationPayload::SendTransaction(FilledTransactionRequest { + from: Address::default(), + used_default_from: false, + to: Some(recipient), + gas_price: U256::from(10_000), + gas: U256::from(10_000_000), + value: U256::from(1), + data: vec![], + nonce: None, + condition: None, + }), + Origin::Unknown, + ) + .unwrap(); - let t = Transaction { - nonce: U256::zero(), - gas_price: U256::from(0x1000), - gas: U256::from(10_000_000), - action: Action::Call(recipient), - value: U256::from(0x1), - data: vec![] - }; - tester.accounts.unlock_account_temporarily(address, "test".into()).unwrap(); - let signature = tester.accounts.sign(address, None, t.hash(None)).unwrap(); - let t = t.with_signature(signature, None); - let rlp = encode(&t); + let t = Transaction { + nonce: U256::zero(), + gas_price: U256::from(0x1000), + gas: U256::from(10_000_000), + action: Action::Call(recipient), + value: U256::from(0x1), + data: vec![], + }; + tester + .accounts + .unlock_account_temporarily(address, "test".into()) + .unwrap(); + let signature = tester.accounts.sign(address, None, t.hash(None)).unwrap(); + let t = t.with_signature(signature, None); + let rlp = encode(&t); - assert_eq!(tester.signer.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); - // when - let request = r#"{ + // when + let request = r#"{ "jsonrpc":"2.0", "method":"signer_confirmRequestRaw", - "params":["0x1", "0x"#.to_owned() + &rlp.to_hex() + r#""], + "params":["0x1", "0x"# + .to_owned() + + &rlp.to_hex() + + r#""], "id":1 }"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Couldn't parse parameters: Sent transaction does not match the request.","data":"[\"from\"]"},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Couldn't parse parameters: Sent transaction does not match the request.","data":"[\"from\"]"},"id":1}"#; - // then - assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.signer.requests().len(), 1); + // then + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.to_owned()) + ); + assert_eq!(tester.signer.requests().len(), 1); } #[test] fn should_confirm_sign_transaction_with_rlp() { - // given - let tester = signer_tester(); - let address = tester.accounts.new_account(&"test".into()).unwrap(); - let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(); - let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SignTransaction(FilledTransactionRequest { - from: address, - used_default_from: false, - to: Some(recipient), - gas_price: U256::from(10_000), - gas: U256::from(10_000_000), - value: U256::from(1), - data: vec![], - nonce: None, - condition: None, - }), Origin::Unknown).unwrap(); - assert_eq!(tester.signer.requests().len(), 1); + // given + let tester = signer_tester(); + let address = tester.accounts.new_account(&"test".into()).unwrap(); + let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(); + let _confirmation_future = tester + .signer + .add_request( + ConfirmationPayload::SignTransaction(FilledTransactionRequest { + from: address, + used_default_from: false, + to: Some(recipient), + gas_price: U256::from(10_000), + gas: U256::from(10_000_000), + value: U256::from(1), + data: vec![], + nonce: None, + condition: None, + }), + Origin::Unknown, + ) + .unwrap(); + assert_eq!(tester.signer.requests().len(), 1); - let t = Transaction { - nonce: U256::zero(), - gas_price: U256::from(0x1000), - gas: U256::from(10_000_000), - action: Action::Call(recipient), - value: U256::from(0x1), - data: vec![] - }; - let signature = tester.accounts.sign(address, Some("test".into()), t.hash(None)).unwrap(); - let t = SignedTransaction::new(t.with_signature(signature.clone(), None)).unwrap(); - let rlp = encode(&t); + let t = Transaction { + nonce: U256::zero(), + gas_price: U256::from(0x1000), + gas: U256::from(10_000_000), + action: Action::Call(recipient), + value: U256::from(0x1), + data: vec![], + }; + let signature = tester + .accounts + .sign(address, Some("test".into()), t.hash(None)) + .unwrap(); + let t = SignedTransaction::new(t.with_signature(signature.clone(), None)).unwrap(); + let rlp = encode(&t); - // when - let request = r#"{ + // when + let request = r#"{ "jsonrpc":"2.0", "method":"signer_confirmRequestRaw", - "params":["0x1", "0x"#.to_owned() + &rlp.to_hex() + r#""], + "params":["0x1", "0x"# + .to_owned() + + &rlp.to_hex() + + r#""], "id":1 }"#; - let response = r#"{"jsonrpc":"2.0","result":{"#.to_owned() + - r#""raw":"0x"# + &rlp.to_hex() + r#"","# + - r#""tx":{"# + - r#""blockHash":null,"blockNumber":null,"# + - &format!("\"chainId\":{},", t.chain_id().map_or("null".to_owned(), |n| format!("{}", n))) + - r#""condition":null,"creates":null,"# + - &format!("\"from\":\"0x{:x}\",", &address) + - r#""gas":"0x989680","gasPrice":"0x1000","# + - &format!("\"hash\":\"0x{:x}\",", t.hash()) + - r#""input":"0x","# + - r#""nonce":"0x0","# + - &format!("\"publicKey\":\"0x{:x}\",", t.public_key().unwrap()) + - &format!("\"r\":\"0x{:x}\",", U256::from(signature.r())) + - &format!("\"raw\":\"0x{}\",", rlp.to_hex()) + - &format!("\"s\":\"0x{:x}\",", U256::from(signature.s())) + - &format!("\"standardV\":\"0x{:x}\",", U256::from(t.standard_v())) + - r#""to":"0xd46e8dd67c5d32be8058bb8eb970870f07244567","transactionIndex":null,"# + - &format!("\"v\":\"0x{:x}\",", U256::from(t.original_v())) + - r#""value":"0x1""# + - r#"}},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"#.to_owned() + + r#""raw":"0x"# + + &rlp.to_hex() + + r#"","# + + r#""tx":{"# + + r#""blockHash":null,"blockNumber":null,"# + + &format!( + "\"chainId\":{},", + t.chain_id().map_or("null".to_owned(), |n| format!("{}", n)) + ) + + r#""condition":null,"creates":null,"# + + &format!("\"from\":\"0x{:x}\",", &address) + + r#""gas":"0x989680","gasPrice":"0x1000","# + + &format!("\"hash\":\"0x{:x}\",", t.hash()) + + r#""input":"0x","# + + r#""nonce":"0x0","# + + &format!("\"publicKey\":\"0x{:x}\",", t.public_key().unwrap()) + + &format!("\"r\":\"0x{:x}\",", U256::from(signature.r())) + + &format!("\"raw\":\"0x{}\",", rlp.to_hex()) + + &format!("\"s\":\"0x{:x}\",", U256::from(signature.s())) + + &format!("\"standardV\":\"0x{:x}\",", U256::from(t.standard_v())) + + r#""to":"0xd46e8dd67c5d32be8058bb8eb970870f07244567","transactionIndex":null,"# + + &format!("\"v\":\"0x{:x}\",", U256::from(t.original_v())) + + r#""value":"0x1""# + + r#"}},"id":1}"#; - // then - assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.signer.requests().len(), 0); - assert_eq!(tester.miner.imported_transactions.lock().len(), 0); + // then + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.to_owned()) + ); + assert_eq!(tester.signer.requests().len(), 0); + assert_eq!(tester.miner.imported_transactions.lock().len(), 0); } #[test] fn should_confirm_data_sign_with_signature() { - // given - let tester = signer_tester(); - let address = tester.accounts.new_account(&"test".into()).unwrap(); - let _confirmation_future = tester.signer.add_request(ConfirmationPayload::EthSignMessage( - address, - vec![1, 2, 3, 4].into(), - ), Origin::Unknown).unwrap(); - assert_eq!(tester.signer.requests().len(), 1); + // given + let tester = signer_tester(); + let address = tester.accounts.new_account(&"test".into()).unwrap(); + let _confirmation_future = tester + .signer + .add_request( + ConfirmationPayload::EthSignMessage(address, vec![1, 2, 3, 4].into()), + Origin::Unknown, + ) + .unwrap(); + assert_eq!(tester.signer.requests().len(), 1); - let data_hash = eth_data_hash(vec![1, 2, 3, 4].into()); - let signature = H520(tester.accounts.sign(address, Some("test".into()), data_hash).unwrap().into_electrum()); - let signature = format!("{:?}", signature); + let data_hash = eth_data_hash(vec![1, 2, 3, 4].into()); + let signature = H520( + tester + .accounts + .sign(address, Some("test".into()), data_hash) + .unwrap() + .into_electrum(), + ); + let signature = format!("{:?}", signature); - // when - let request = r#"{ + // when + let request = r#"{ "jsonrpc":"2.0", "method":"signer_confirmRequestRaw", - "params":["0x1", ""#.to_owned() + &signature + r#""], + "params":["0x1", ""# + .to_owned() + + &signature + + r#""], "id":1 }"#; - let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + &signature + r#"","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + &signature + r#"","id":1}"#; - // then - assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.signer.requests().len(), 0); - assert_eq!(tester.miner.imported_transactions.lock().len(), 0); + // then + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.to_owned()) + ); + assert_eq!(tester.signer.requests().len(), 0); + assert_eq!(tester.miner.imported_transactions.lock().len(), 0); } #[test] fn should_confirm_decrypt_with_phrase() { - // given - let tester = signer_tester(); - let address = tester.accounts.new_account(&"test".into()).unwrap(); - let _confirmation_future = tester.signer.add_request(ConfirmationPayload::Decrypt( - address, - vec![1, 2, 3, 4].into(), - ), Origin::Unknown).unwrap(); - assert_eq!(tester.signer.requests().len(), 1); + // given + let tester = signer_tester(); + let address = tester.accounts.new_account(&"test".into()).unwrap(); + let _confirmation_future = tester + .signer + .add_request( + ConfirmationPayload::Decrypt(address, vec![1, 2, 3, 4].into()), + Origin::Unknown, + ) + .unwrap(); + assert_eq!(tester.signer.requests().len(), 1); - let decrypted = serde_json::to_string(&RpcBytes::new(b"phrase".to_vec())).unwrap(); + let decrypted = serde_json::to_string(&RpcBytes::new(b"phrase".to_vec())).unwrap(); - // when - let request = r#"{ + // when + let request = r#"{ "jsonrpc":"2.0", "method":"signer_confirmRequestRaw", - "params":["0x1", "#.to_owned() + &decrypted + r#"], + "params":["0x1", "# + .to_owned() + + &decrypted + + r#"], "id":1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"#.to_owned() + &decrypted + r#","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"#.to_owned() + &decrypted + r#","id":1}"#; - // then - assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.signer.requests().len(), 0); - assert_eq!(tester.miner.imported_transactions.lock().len(), 0); + // then + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.to_owned()) + ); + assert_eq!(tester.signer.requests().len(), 0); + assert_eq!(tester.miner.imported_transactions.lock().len(), 0); } #[test] fn should_generate_new_token() { - // given - let tester = signer_tester(); + // given + let tester = signer_tester(); - // when - let request = r#"{ + // when + let request = r#"{ "jsonrpc":"2.0", "method":"signer_generateAuthorizationToken", "params":[], "id":1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"new_token","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"new_token","id":1}"#; - // then - assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); + // then + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.to_owned()) + ); } diff --git a/rpc/src/v1/tests/mocked/signing.rs b/rpc/src/v1/tests/mocked/signing.rs index 39385d19b..339cc545c 100644 --- a/rpc/src/v1/tests/mocked/signing.rs +++ b/rpc/src/v1/tests/mocked/signing.rs @@ -14,263 +14,318 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::thread; -use std::str::FromStr; -use std::sync::Arc; -use std::time::Duration; use rlp; +use std::{str::FromStr, sync::Arc, thread, time::Duration}; -use jsonrpc_core::{IoHandler, Success}; -use jsonrpc_core::futures::Future; -use v1::impls::SigningQueueClient; -use v1::metadata::Metadata; -use v1::traits::{EthSigning, ParitySigning, Parity}; -use v1::helpers::{nonce, dispatch, FullDispatcher}; -use v1::helpers::external_signer::{SignerService, SigningQueue}; -use v1::types::{ConfirmationResponse, RichRawTransaction}; -use v1::tests::helpers::TestMinerService; -use v1::tests::mocked::parity; +use jsonrpc_core::{futures::Future, IoHandler, Success}; +use v1::{ + helpers::{ + dispatch, + external_signer::{SignerService, SigningQueue}, + nonce, FullDispatcher, + }, + impls::SigningQueueClient, + metadata::Metadata, + tests::{helpers::TestMinerService, mocked::parity}, + traits::{EthSigning, Parity, ParitySigning}, + types::{ConfirmationResponse, RichRawTransaction}, +}; use accounts::AccountProvider; use bytes::ToPretty; -use ethereum_types::{U256, Address}; use ethcore::client::TestBlockChainClient; +use ethereum_types::{Address, U256}; use ethkey::Secret; use ethstore::ethkey::{Generator, Random}; +use parity_runtime::{Executor, Runtime}; use parking_lot::Mutex; use serde_json; -use types::transaction::{Transaction, Action, SignedTransaction}; -use parity_runtime::{Runtime, Executor}; +use types::transaction::{Action, SignedTransaction, Transaction}; struct SigningTester { - pub runtime: Runtime, - pub signer: Arc, - pub client: Arc, - pub miner: Arc, - pub accounts: Arc, - pub io: IoHandler, + pub runtime: Runtime, + pub signer: Arc, + pub client: Arc, + pub miner: Arc, + pub accounts: Arc, + pub io: IoHandler, } impl Default for SigningTester { - fn default() -> Self { - let runtime = Runtime::with_thread_count(1); - let signer = Arc::new(SignerService::new_test(false)); - let client = Arc::new(TestBlockChainClient::default()); - let miner = Arc::new(TestMinerService::default()); - let accounts = Arc::new(AccountProvider::transient_provider()); - let account_signer = Arc::new(dispatch::Signer::new(accounts.clone())) as _; - let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor()))); - let mut io = IoHandler::default(); + fn default() -> Self { + let runtime = Runtime::with_thread_count(1); + let signer = Arc::new(SignerService::new_test(false)); + let client = Arc::new(TestBlockChainClient::default()); + let miner = Arc::new(TestMinerService::default()); + let accounts = Arc::new(AccountProvider::transient_provider()); + let account_signer = Arc::new(dispatch::Signer::new(accounts.clone())) as _; + let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor()))); + let mut io = IoHandler::default(); - let dispatcher = FullDispatcher::new(client.clone(), miner.clone(), reservations, 50); + let dispatcher = FullDispatcher::new(client.clone(), miner.clone(), reservations, 50); - let executor = Executor::new_thread_per_future(); + let executor = Executor::new_thread_per_future(); - let rpc = SigningQueueClient::new(&signer, dispatcher.clone(), executor.clone(), &account_signer); - io.extend_with(EthSigning::to_delegate(rpc)); - let rpc = SigningQueueClient::new(&signer, dispatcher, executor, &account_signer); - io.extend_with(ParitySigning::to_delegate(rpc)); + let rpc = SigningQueueClient::new( + &signer, + dispatcher.clone(), + executor.clone(), + &account_signer, + ); + io.extend_with(EthSigning::to_delegate(rpc)); + let rpc = SigningQueueClient::new(&signer, dispatcher, executor, &account_signer); + io.extend_with(ParitySigning::to_delegate(rpc)); - SigningTester { - runtime, - signer: signer, - client: client, - miner: miner, - accounts: accounts, - io: io, - } - } + SigningTester { + runtime, + signer: signer, + client: client, + miner: miner, + accounts: accounts, + io: io, + } + } } fn eth_signing() -> SigningTester { - SigningTester::default() + SigningTester::default() } #[test] fn rpc_eth_sign() { - use rustc_hex::FromHex; + use rustc_hex::FromHex; - let tester = eth_signing(); + let tester = eth_signing(); - let account = tester.accounts.insert_account(Secret::from([69u8; 32]), &"abcd".into()).unwrap(); - tester.accounts.unlock_account_permanently(account, "abcd".into()).unwrap(); - let _message = "0cc175b9c0f1b6a831c399e26977266192eb5ffee6ae2fec3ad71c777531578f".from_hex().unwrap(); + let account = tester + .accounts + .insert_account(Secret::from([69u8; 32]), &"abcd".into()) + .unwrap(); + tester + .accounts + .unlock_account_permanently(account, "abcd".into()) + .unwrap(); + let _message = "0cc175b9c0f1b6a831c399e26977266192eb5ffee6ae2fec3ad71c777531578f" + .from_hex() + .unwrap(); - let req = r#"{ + let req = r#"{ "jsonrpc": "2.0", "method": "eth_sign", "params": [ - ""#.to_owned() + &format!("0x{:x}", account) + r#"", + ""# + .to_owned() + + &format!("0x{:x}", account) + + r#"", "0x0cc175b9c0f1b6a831c399e26977266192eb5ffee6ae2fec3ad71c777531578f" ], "id": 1 }"#; - let res = r#"{"jsonrpc":"2.0","result":"0xa2870db1d0c26ef93c7b72d2a0830fa6b841e0593f7186bc6c7cc317af8cf3a42fda03bd589a49949aa05db83300cdb553116274518dbe9d90c65d0213f4af491b","id":1}"#; + let res = r#"{"jsonrpc":"2.0","result":"0xa2870db1d0c26ef93c7b72d2a0830fa6b841e0593f7186bc6c7cc317af8cf3a42fda03bd589a49949aa05db83300cdb553116274518dbe9d90c65d0213f4af491b","id":1}"#; - assert_eq!(tester.io.handle_request_sync(&req), Some(res.into())); + assert_eq!(tester.io.handle_request_sync(&req), Some(res.into())); } #[test] fn should_add_sign_to_queue() { - // given - let tester = eth_signing(); - let address = Address::random(); - assert_eq!(tester.signer.requests().len(), 0); + // given + let tester = eth_signing(); + let address = Address::random(); + assert_eq!(tester.signer.requests().len(), 0); - // when - let request = r#"{ + // when + let request = r#"{ "jsonrpc": "2.0", "method": "eth_sign", "params": [ - ""#.to_owned() + format!("0x{:x}", address).as_ref() + r#"", + ""# + .to_owned() + + format!("0x{:x}", address).as_ref() + + r#"", "0x0000000000000000000000000000000000000000000000000000000000000005" ], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","id":1}"#; - // then - let promise = tester.io.handle_request(&request); + // then + let promise = tester.io.handle_request(&request); - // the future must be polled at least once before request is queued. - let signer = tester.signer.clone(); - ::std::thread::spawn(move || loop { - if signer.requests().len() == 1 { - // respond - let sender = signer.take(&1.into()).unwrap(); - signer.request_confirmed(sender, Ok(ConfirmationResponse::Signature(0.into()))); - break - } - ::std::thread::sleep(Duration::from_millis(100)) - }); + // the future must be polled at least once before request is queued. + let signer = tester.signer.clone(); + ::std::thread::spawn(move || loop { + if signer.requests().len() == 1 { + // respond + let sender = signer.take(&1.into()).unwrap(); + signer.request_confirmed(sender, Ok(ConfirmationResponse::Signature(0.into()))); + break; + } + ::std::thread::sleep(Duration::from_millis(100)) + }); - let res = promise.wait().unwrap(); - assert_eq!(res, Some(response.to_owned())); + let res = promise.wait().unwrap(); + assert_eq!(res, Some(response.to_owned())); } #[test] fn should_post_sign_to_queue() { - // given - let tester = eth_signing(); - let address = Address::random(); - assert_eq!(tester.signer.requests().len(), 0); + // given + let tester = eth_signing(); + let address = Address::random(); + assert_eq!(tester.signer.requests().len(), 0); - // when - let request = r#"{ + // when + let request = r#"{ "jsonrpc": "2.0", "method": "parity_postSign", "params": [ - ""#.to_owned() + format!("0x{:x}", address).as_ref() + r#"", + ""# + .to_owned() + + format!("0x{:x}", address).as_ref() + + r#"", "0x0000000000000000000000000000000000000000000000000000000000000005" ], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"0x1","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x1","id":1}"#; - // then - assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.signer.requests().len(), 1); + // then + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.to_owned()) + ); + assert_eq!(tester.signer.requests().len(), 1); } #[test] fn should_check_status_of_request() { - // given - let tester = eth_signing(); - let address = Address::random(); - let request = r#"{ + // given + let tester = eth_signing(); + let address = Address::random(); + let request = r#"{ "jsonrpc": "2.0", "method": "parity_postSign", "params": [ - ""#.to_owned() + format!("0x{:x}", address).as_ref() + r#"", + ""# + .to_owned() + + format!("0x{:x}", address).as_ref() + + r#"", "0x0000000000000000000000000000000000000000000000000000000000000005" ], "id": 1 }"#; - tester.io.handle_request_sync(&request).expect("Sent"); + tester.io.handle_request_sync(&request).expect("Sent"); - // when - let request = r#"{ + // when + let request = r#"{ "jsonrpc": "2.0", "method": "parity_checkRequest", "params": ["0x1"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; - // then - assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); + // then + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.to_owned()) + ); } #[test] fn should_check_status_of_request_when_its_resolved() { - // given - let tester = eth_signing(); - let address = Address::random(); - let request = r#"{ + // given + let tester = eth_signing(); + let address = Address::random(); + let request = r#"{ "jsonrpc": "2.0", "method": "parity_postSign", "params": [ - ""#.to_owned() + format!("0x{:x}", address).as_ref() + r#"", + ""# + .to_owned() + + format!("0x{:x}", address).as_ref() + + r#"", "0x0000000000000000000000000000000000000000000000000000000000000005" ], "id": 1 }"#; - tester.io.handle_request_sync(&request).expect("Sent"); - let sender = tester.signer.take(&1.into()).unwrap(); - tester.signer.request_confirmed(sender, Ok(ConfirmationResponse::Signature(1.into()))); + tester.io.handle_request_sync(&request).expect("Sent"); + let sender = tester.signer.take(&1.into()).unwrap(); + tester + .signer + .request_confirmed(sender, Ok(ConfirmationResponse::Signature(1.into()))); - // This is not ideal, but we need to give futures some time to be executed, and they need to run in a separate thread - thread::sleep(Duration::from_millis(20)); + // This is not ideal, but we need to give futures some time to be executed, and they need to run in a separate thread + thread::sleep(Duration::from_millis(20)); - // when - let request = r#"{ + // when + let request = r#"{ "jsonrpc": "2.0", "method": "parity_checkRequest", "params": ["0x1"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001","id":1}"#; - // then - assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); + // then + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.to_owned()) + ); } #[test] fn should_sign_if_account_is_unlocked() { - // given - let tester = eth_signing(); - let data = vec![5u8]; - let acc = tester.accounts.insert_account(Secret::from([69u8; 32]), &"test".into()).unwrap(); - tester.accounts.unlock_account_permanently(acc, "test".into()).unwrap(); + // given + let tester = eth_signing(); + let data = vec![5u8]; + let acc = tester + .accounts + .insert_account(Secret::from([69u8; 32]), &"test".into()) + .unwrap(); + tester + .accounts + .unlock_account_permanently(acc, "test".into()) + .unwrap(); - // when - let request = r#"{ + // when + let request = r#"{ "jsonrpc": "2.0", "method": "eth_sign", "params": [ - ""#.to_owned() + format!("0x{:x}", acc).as_ref() + r#"", - ""# + format!("0x{}", data.to_hex()).as_ref() + r#"" + ""# + .to_owned() + + format!("0x{:x}", acc).as_ref() + + r#"", + ""# + format!("0x{}", data.to_hex()).as_ref() + + r#"" ], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"0xdb53b32e56cf3e9735377b7664d6de5a03e125b1bf8ec55715d253668b4238503b4ac931fe6af90add73e72a585e952665376b2b9afc5b6b239b7df74c734e121b","id":1}"#; - assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.signer.requests().len(), 0); + let response = r#"{"jsonrpc":"2.0","result":"0xdb53b32e56cf3e9735377b7664d6de5a03e125b1bf8ec55715d253668b4238503b4ac931fe6af90add73e72a585e952665376b2b9afc5b6b239b7df74c734e121b","id":1}"#; + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.to_owned()) + ); + assert_eq!(tester.signer.requests().len(), 0); } #[test] fn should_add_transaction_to_queue() { - // given - let tester = eth_signing(); - let address = Address::random(); - assert_eq!(tester.signer.requests().len(), 0); + // given + let tester = eth_signing(); + let address = Address::random(); + assert_eq!(tester.signer.requests().len(), 0); - // when - let request = r#"{ + // when + let request = r#"{ "jsonrpc": "2.0", "method": "eth_sendTransaction", "params": [{ - "from": ""#.to_owned() + format!("0x{:x}", address).as_ref() + r#"", + "from": ""# + .to_owned() + + format!("0x{:x}", address).as_ref() + + r#"", "to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567", "gas": "0x76c0", "gasPrice": "0x9184e72a000", @@ -278,41 +333,44 @@ fn should_add_transaction_to_queue() { }], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"0x0000000000000000000000000000000000000000000000000000000000000000","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x0000000000000000000000000000000000000000000000000000000000000000","id":1}"#; - // then - let promise = tester.io.handle_request(&request); + // then + let promise = tester.io.handle_request(&request); - // the future must be polled at least once before request is queued. - let signer = tester.signer.clone(); - ::std::thread::spawn(move || loop { - if signer.requests().len() == 1 { - // respond - let sender = signer.take(&1.into()).unwrap(); - signer.request_confirmed(sender, Ok(ConfirmationResponse::SendTransaction(0.into()))); - break - } - ::std::thread::sleep(Duration::from_millis(100)) - }); + // the future must be polled at least once before request is queued. + let signer = tester.signer.clone(); + ::std::thread::spawn(move || loop { + if signer.requests().len() == 1 { + // respond + let sender = signer.take(&1.into()).unwrap(); + signer.request_confirmed(sender, Ok(ConfirmationResponse::SendTransaction(0.into()))); + break; + } + ::std::thread::sleep(Duration::from_millis(100)) + }); - let res = promise.wait().unwrap(); - assert_eq!(res, Some(response.to_owned())); + let res = promise.wait().unwrap(); + assert_eq!(res, Some(response.to_owned())); } #[test] fn should_add_sign_transaction_to_the_queue() { - // given - let tester = eth_signing(); - let address = tester.accounts.new_account(&"test".into()).unwrap(); + // given + let tester = eth_signing(); + let address = tester.accounts.new_account(&"test".into()).unwrap(); - assert_eq!(tester.signer.requests().len(), 0); + assert_eq!(tester.signer.requests().len(), 0); - // when - let request = r#"{ + // when + let request = r#"{ "jsonrpc": "2.0", "method": "eth_signTransaction", "params": [{ - "from": ""#.to_owned() + format!("0x{:x}", address).as_ref() + r#"", + "from": ""# + .to_owned() + + format!("0x{:x}", address).as_ref() + + r#"", "to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567", "gas": "0x76c0", "gasPrice": "0x9184e72a000", @@ -321,87 +379,108 @@ fn should_add_sign_transaction_to_the_queue() { "id": 1 }"#; - let t = Transaction { - nonce: U256::one(), - gas_price: U256::from(0x9184e72a000u64), - gas: U256::from(0x76c0), - action: Action::Call(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), - value: U256::from(0x9184e72au64), - data: vec![] - }; - let signature = tester.accounts.sign(address, Some("test".into()), t.hash(None)).unwrap(); - let t = t.with_signature(signature, None); - let t = SignedTransaction::new(t).unwrap(); - let signature = t.signature(); - let rlp = rlp::encode(&t); + let t = Transaction { + nonce: U256::one(), + gas_price: U256::from(0x9184e72a000u64), + gas: U256::from(0x76c0), + action: Action::Call( + Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(), + ), + value: U256::from(0x9184e72au64), + data: vec![], + }; + let signature = tester + .accounts + .sign(address, Some("test".into()), t.hash(None)) + .unwrap(); + let t = t.with_signature(signature, None); + let t = SignedTransaction::new(t).unwrap(); + let signature = t.signature(); + let rlp = rlp::encode(&t); - let response = r#"{"jsonrpc":"2.0","result":{"#.to_owned() + - r#""raw":"0x"# + &rlp.to_hex() + r#"","# + - r#""tx":{"# + - r#""blockHash":null,"blockNumber":null,"# + - &format!("\"chainId\":{},", t.chain_id().map_or("null".to_owned(), |n| format!("{}", n))) + - r#""condition":null,"creates":null,"# + - &format!("\"from\":\"0x{:x}\",", &address) + - r#""gas":"0x76c0","gasPrice":"0x9184e72a000","# + - &format!("\"hash\":\"0x{:x}\",", t.hash()) + - r#""input":"0x","# + - r#""nonce":"0x1","# + - &format!("\"publicKey\":\"0x{:x}\",", t.public_key().unwrap()) + - &format!("\"r\":\"0x{:x}\",", U256::from(signature.r())) + - &format!("\"raw\":\"0x{}\",", rlp.to_hex()) + - &format!("\"s\":\"0x{:x}\",", U256::from(signature.s())) + - &format!("\"standardV\":\"0x{:x}\",", U256::from(t.standard_v())) + - r#""to":"0xd46e8dd67c5d32be8058bb8eb970870f07244567","transactionIndex":null,"# + - &format!("\"v\":\"0x{:x}\",", U256::from(t.original_v())) + - r#""value":"0x9184e72a""# + - r#"}},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"#.to_owned() + + r#""raw":"0x"# + + &rlp.to_hex() + + r#"","# + + r#""tx":{"# + + r#""blockHash":null,"blockNumber":null,"# + + &format!( + "\"chainId\":{},", + t.chain_id().map_or("null".to_owned(), |n| format!("{}", n)) + ) + + r#""condition":null,"creates":null,"# + + &format!("\"from\":\"0x{:x}\",", &address) + + r#""gas":"0x76c0","gasPrice":"0x9184e72a000","# + + &format!("\"hash\":\"0x{:x}\",", t.hash()) + + r#""input":"0x","# + + r#""nonce":"0x1","# + + &format!("\"publicKey\":\"0x{:x}\",", t.public_key().unwrap()) + + &format!("\"r\":\"0x{:x}\",", U256::from(signature.r())) + + &format!("\"raw\":\"0x{}\",", rlp.to_hex()) + + &format!("\"s\":\"0x{:x}\",", U256::from(signature.s())) + + &format!("\"standardV\":\"0x{:x}\",", U256::from(t.standard_v())) + + r#""to":"0xd46e8dd67c5d32be8058bb8eb970870f07244567","transactionIndex":null,"# + + &format!("\"v\":\"0x{:x}\",", U256::from(t.original_v())) + + r#""value":"0x9184e72a""# + + r#"}},"id":1}"#; - // then - tester.miner.increment_nonce(&address); - let promise = tester.io.handle_request(&request); + // then + tester.miner.increment_nonce(&address); + let promise = tester.io.handle_request(&request); - // the future must be polled at least once before request is queued. - let signer = tester.signer.clone(); - ::std::thread::spawn(move || loop { - if signer.requests().len() == 1 { - // respond - let sender = signer.take(&1.into()).unwrap(); - signer.request_confirmed(sender, Ok(ConfirmationResponse::SignTransaction( - RichRawTransaction::from_signed(t.into()) - ))); - break - } - ::std::thread::sleep(Duration::from_millis(100)) - }); + // the future must be polled at least once before request is queued. + let signer = tester.signer.clone(); + ::std::thread::spawn(move || loop { + if signer.requests().len() == 1 { + // respond + let sender = signer.take(&1.into()).unwrap(); + signer.request_confirmed( + sender, + Ok(ConfirmationResponse::SignTransaction( + RichRawTransaction::from_signed(t.into()), + )), + ); + break; + } + ::std::thread::sleep(Duration::from_millis(100)) + }); - let res = promise.wait().unwrap(); - assert_eq!(res, Some(response.to_owned())); + let res = promise.wait().unwrap(); + assert_eq!(res, Some(response.to_owned())); } #[test] fn should_dispatch_transaction_if_account_is_unlock() { - // given - let tester = eth_signing(); - let acc = tester.accounts.new_account(&"test".into()).unwrap(); - tester.accounts.unlock_account_permanently(acc, "test".into()).unwrap(); + // given + let tester = eth_signing(); + let acc = tester.accounts.new_account(&"test".into()).unwrap(); + tester + .accounts + .unlock_account_permanently(acc, "test".into()) + .unwrap(); - let t = Transaction { - nonce: U256::zero(), - gas_price: U256::from(0x9184e72a000u64), - gas: U256::from(0x76c0), - action: Action::Call(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), - value: U256::from(0x9184e72au64), - data: vec![] - }; - let signature = tester.accounts.sign(acc, None, t.hash(None)).unwrap(); - let t = t.with_signature(signature, None); + let t = Transaction { + nonce: U256::zero(), + gas_price: U256::from(0x9184e72a000u64), + gas: U256::from(0x76c0), + action: Action::Call( + Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(), + ), + value: U256::from(0x9184e72au64), + data: vec![], + }; + let signature = tester.accounts.sign(acc, None, t.hash(None)).unwrap(); + let t = t.with_signature(signature, None); - // when - let request = r#"{ + // when + let request = r#"{ "jsonrpc": "2.0", "method": "eth_sendTransaction", "params": [{ - "from": ""#.to_owned() + format!("0x{:x}", acc).as_ref() + r#"", + "from": ""# + .to_owned() + + format!("0x{:x}", acc).as_ref() + + r#"", "to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567", "gas": "0x76c0", "gasPrice": "0x9184e72a000", @@ -409,102 +488,129 @@ fn should_dispatch_transaction_if_account_is_unlock() { }], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{:x}", t.hash()).as_ref() + r#"","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + + format!("0x{:x}", t.hash()).as_ref() + + r#"","id":1}"#; - // then - assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); + // then + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.to_owned()) + ); } #[test] fn should_decrypt_message_if_account_is_unlocked() { - // given - let mut tester = eth_signing(); - let parity = parity::Dependencies::new(); - tester.io.extend_with(parity.client(None).to_delegate()); - let (address, public) = tester.accounts.new_account_and_public(&"test".into()).unwrap(); - tester.accounts.unlock_account_permanently(address, "test".into()).unwrap(); + // given + let mut tester = eth_signing(); + let parity = parity::Dependencies::new(); + tester.io.extend_with(parity.client(None).to_delegate()); + let (address, public) = tester + .accounts + .new_account_and_public(&"test".into()) + .unwrap(); + tester + .accounts + .unlock_account_permanently(address, "test".into()) + .unwrap(); - // First encrypt message - let request = format!("{}0x{:x}{}", - r#"{"jsonrpc": "2.0", "method": "parity_encryptMessage", "params":[""#, - public, - r#"", "0x01020304"], "id": 1}"# - ); - let encrypted: Success = serde_json::from_str(&tester.io.handle_request_sync(&request).unwrap()).unwrap(); + // First encrypt message + let request = format!( + "{}0x{:x}{}", + r#"{"jsonrpc": "2.0", "method": "parity_encryptMessage", "params":[""#, + public, + r#"", "0x01020304"], "id": 1}"# + ); + let encrypted: Success = + serde_json::from_str(&tester.io.handle_request_sync(&request).unwrap()).unwrap(); - // then call decrypt - let request = format!("{}{:x}{}{}{}", - r#"{"jsonrpc": "2.0", "method": "parity_decryptMessage", "params":["0x"#, - address, - r#"","#, - encrypted.result, - r#"], "id": 1}"# - ); - println!("Request: {:?}", request); - let response = r#"{"jsonrpc":"2.0","result":"0x01020304","id":1}"#; + // then call decrypt + let request = format!( + "{}{:x}{}{}{}", + r#"{"jsonrpc": "2.0", "method": "parity_decryptMessage", "params":["0x"#, + address, + r#"","#, + encrypted.result, + r#"], "id": 1}"# + ); + println!("Request: {:?}", request); + let response = r#"{"jsonrpc":"2.0","result":"0x01020304","id":1}"#; - // then - assert_eq!(tester.io.handle_request_sync(&request), Some(response.into())); + // then + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.into()) + ); } #[test] fn should_add_decryption_to_the_queue() { - // given - let tester = eth_signing(); - let acc = Random.generate().unwrap(); - assert_eq!(tester.signer.requests().len(), 0); + // given + let tester = eth_signing(); + let acc = Random.generate().unwrap(); + assert_eq!(tester.signer.requests().len(), 0); - // when - let request = r#"{ + // when + let request = r#"{ "jsonrpc": "2.0", "method": "parity_decryptMessage", - "params": ["0x"#.to_owned() + &format!("{:x}", acc.address()) + r#"", + "params": ["0x"# + .to_owned() + + &format!("{:x}", acc.address()) + + r#"", "0x012345"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":"0x0102","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x0102","id":1}"#; - // then - let promise = tester.io.handle_request(&request); + // then + let promise = tester.io.handle_request(&request); - // the future must be polled at least once before request is queued. - let signer = tester.signer.clone(); - ::std::thread::spawn(move || loop { - if signer.requests().len() == 1 { - // respond - let sender = signer.take(&1.into()).unwrap(); - signer.request_confirmed(sender, Ok(ConfirmationResponse::Decrypt(vec![0x1, 0x2].into()))); - break - } - ::std::thread::sleep(Duration::from_millis(10)) - }); + // the future must be polled at least once before request is queued. + let signer = tester.signer.clone(); + ::std::thread::spawn(move || loop { + if signer.requests().len() == 1 { + // respond + let sender = signer.take(&1.into()).unwrap(); + signer.request_confirmed( + sender, + Ok(ConfirmationResponse::Decrypt(vec![0x1, 0x2].into())), + ); + break; + } + ::std::thread::sleep(Duration::from_millis(10)) + }); - // check response: will deadlock if unsuccessful. - let res = promise.wait().unwrap(); - assert_eq!(res, Some(response.to_owned())); + // check response: will deadlock if unsuccessful. + let res = promise.wait().unwrap(); + assert_eq!(res, Some(response.to_owned())); } #[test] fn should_compose_transaction() { - // given - let tester = eth_signing(); - let acc = Random.generate().unwrap(); - assert_eq!(tester.signer.requests().len(), 0); - let from = format!("{:x}", acc.address()); + // given + let tester = eth_signing(); + let acc = Random.generate().unwrap(); + assert_eq!(tester.signer.requests().len(), 0); + let from = format!("{:x}", acc.address()); - // when - let request = r#"{ + // when + let request = r#"{ "jsonrpc": "2.0", "method": "parity_composeTransaction", - "params": [{"from":"0x"#.to_owned() + &from + r#"","value":"0x5"}], + "params": [{"from":"0x"# + .to_owned() + + &from + + r#"","value":"0x5"}], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":{"condition":null,"data":"0x","from":"0x"#.to_owned() - + &from - + r#"","gas":"0x5208","gasPrice":"0x4a817c800","nonce":"0x0","to":null,"value":"0x5"},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"condition":null,"data":"0x","from":"0x"# + .to_owned() + + &from + + r#"","gas":"0x5208","gasPrice":"0x4a817c800","nonce":"0x0","to":null,"value":"0x5"},"id":1}"#; - // then - let res = tester.io.handle_request(&request).wait().unwrap(); - assert_eq!(res, Some(response.to_owned())); + // then + let res = tester.io.handle_request(&request).wait().unwrap(); + assert_eq!(res, Some(response.to_owned())); } diff --git a/rpc/src/v1/tests/mocked/signing_unsafe.rs b/rpc/src/v1/tests/mocked/signing_unsafe.rs index a91a85ea1..4221d54a8 100644 --- a/rpc/src/v1/tests/mocked/signing_unsafe.rs +++ b/rpc/src/v1/tests/mocked/signing_unsafe.rs @@ -14,87 +14,101 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::str::FromStr; -use std::sync::Arc; +use std::{str::FromStr, sync::Arc}; use accounts::AccountProvider; use ethcore::client::TestBlockChainClient; -use ethereum_types::{U256, Address}; +use ethereum_types::{Address, U256}; use parity_runtime::Runtime; use parking_lot::Mutex; use rlp; use rustc_hex::ToHex; -use types::transaction::{Transaction, Action}; +use types::transaction::{Action, Transaction}; use jsonrpc_core::IoHandler; -use v1::{EthClientOptions, EthSigning, SigningUnsafeClient}; -use v1::helpers::nonce; -use v1::helpers::dispatch::{self, FullDispatcher}; -use v1::tests::helpers::{TestMinerService}; -use v1::metadata::Metadata; +use v1::{ + helpers::{ + dispatch::{self, FullDispatcher}, + nonce, + }, + metadata::Metadata, + tests::helpers::TestMinerService, + EthClientOptions, EthSigning, SigningUnsafeClient, +}; fn blockchain_client() -> Arc { - let client = TestBlockChainClient::new(); - Arc::new(client) + let client = TestBlockChainClient::new(); + Arc::new(client) } fn accounts_provider() -> Arc { - Arc::new(AccountProvider::transient_provider()) + Arc::new(AccountProvider::transient_provider()) } fn miner_service() -> Arc { - Arc::new(TestMinerService::default()) + Arc::new(TestMinerService::default()) } struct EthTester { - pub runtime: Runtime, - pub client: Arc, - pub accounts_provider: Arc, - pub miner: Arc, - pub io: IoHandler, + pub runtime: Runtime, + pub client: Arc, + pub accounts_provider: Arc, + pub miner: Arc, + pub io: IoHandler, } impl Default for EthTester { - fn default() -> Self { - Self::new_with_options(Default::default()) - } + fn default() -> Self { + Self::new_with_options(Default::default()) + } } impl EthTester { - pub fn new_with_options(options: EthClientOptions) -> Self { - let runtime = Runtime::with_thread_count(1); - let client = blockchain_client(); - let accounts_provider = accounts_provider(); - let ap = Arc::new(dispatch::Signer::new(accounts_provider.clone())) as _; - let miner = miner_service(); - let gas_price_percentile = options.gas_price_percentile; - let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor()))); + pub fn new_with_options(options: EthClientOptions) -> Self { + let runtime = Runtime::with_thread_count(1); + let client = blockchain_client(); + let accounts_provider = accounts_provider(); + let ap = Arc::new(dispatch::Signer::new(accounts_provider.clone())) as _; + let miner = miner_service(); + let gas_price_percentile = options.gas_price_percentile; + let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor()))); - let dispatcher = FullDispatcher::new(client.clone(), miner.clone(), reservations, gas_price_percentile); - let sign = SigningUnsafeClient::new(&ap, dispatcher).to_delegate(); - let mut io: IoHandler = IoHandler::default(); - io.extend_with(sign); + let dispatcher = FullDispatcher::new( + client.clone(), + miner.clone(), + reservations, + gas_price_percentile, + ); + let sign = SigningUnsafeClient::new(&ap, dispatcher).to_delegate(); + let mut io: IoHandler = IoHandler::default(); + io.extend_with(sign); - EthTester { - runtime, - client, - miner, - io, - accounts_provider, - } - } + EthTester { + runtime, + client, + miner, + io, + accounts_provider, + } + } } #[test] fn rpc_eth_send_transaction() { - let tester = EthTester::default(); - let address = tester.accounts_provider.new_account(&"".into()).unwrap(); - tester.accounts_provider.unlock_account_permanently(address, "".into()).unwrap(); - let request = r#"{ + let tester = EthTester::default(); + let address = tester.accounts_provider.new_account(&"".into()).unwrap(); + tester + .accounts_provider + .unlock_account_permanently(address, "".into()) + .unwrap(); + let request = r#"{ "jsonrpc": "2.0", "method": "eth_sendTransaction", "params": [{ - "from": ""#.to_owned() + format!("0x{:x}", address).as_ref() + r#"", + "from": ""# + .to_owned() + + format!("0x{:x}", address).as_ref() + + r#"", "to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567", "gas": "0x76c0", "gasPrice": "0x9184e72a000", @@ -103,49 +117,69 @@ fn rpc_eth_send_transaction() { "id": 1 }"#; - let t = Transaction { - nonce: U256::zero(), - gas_price: U256::from(0x9184e72a000u64), - gas: U256::from(0x76c0), - action: Action::Call(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), - value: U256::from(0x9184e72au64), - data: vec![] - }; - let signature = tester.accounts_provider.sign(address, None, t.hash(None)).unwrap(); - let t = t.with_signature(signature, None); + let t = Transaction { + nonce: U256::zero(), + gas_price: U256::from(0x9184e72a000u64), + gas: U256::from(0x76c0), + action: Action::Call( + Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(), + ), + value: U256::from(0x9184e72au64), + data: vec![], + }; + let signature = tester + .accounts_provider + .sign(address, None, t.hash(None)) + .unwrap(); + let t = t.with_signature(signature, None); - let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{:x}", t.hash()).as_ref() + r#"","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + + format!("0x{:x}", t.hash()).as_ref() + + r#"","id":1}"#; - assert_eq!(tester.io.handle_request_sync(&request), Some(response)); + assert_eq!(tester.io.handle_request_sync(&request), Some(response)); - tester.miner.increment_nonce(&address); + tester.miner.increment_nonce(&address); - let t = Transaction { - nonce: U256::one(), - gas_price: U256::from(0x9184e72a000u64), - gas: U256::from(0x76c0), - action: Action::Call(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), - value: U256::from(0x9184e72au64), - data: vec![] - }; - let signature = tester.accounts_provider.sign(address, None, t.hash(None)).unwrap(); - let t = t.with_signature(signature, None); + let t = Transaction { + nonce: U256::one(), + gas_price: U256::from(0x9184e72a000u64), + gas: U256::from(0x76c0), + action: Action::Call( + Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(), + ), + value: U256::from(0x9184e72au64), + data: vec![], + }; + let signature = tester + .accounts_provider + .sign(address, None, t.hash(None)) + .unwrap(); + let t = t.with_signature(signature, None); - let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{:x}", t.hash()).as_ref() + r#"","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + + format!("0x{:x}", t.hash()).as_ref() + + r#"","id":1}"#; - assert_eq!(tester.io.handle_request_sync(&request), Some(response)); + assert_eq!(tester.io.handle_request_sync(&request), Some(response)); } #[test] fn rpc_eth_sign_transaction() { - let tester = EthTester::default(); - let address = tester.accounts_provider.new_account(&"".into()).unwrap(); - tester.accounts_provider.unlock_account_permanently(address, "".into()).unwrap(); - let request = r#"{ + let tester = EthTester::default(); + let address = tester.accounts_provider.new_account(&"".into()).unwrap(); + tester + .accounts_provider + .unlock_account_permanently(address, "".into()) + .unwrap(); + let request = r#"{ "jsonrpc": "2.0", "method": "eth_signTransaction", "params": [{ - "from": ""#.to_owned() + format!("0x{:x}", address).as_ref() + r#"", + "from": ""# + .to_owned() + + format!("0x{:x}", address).as_ref() + + r#"", "to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567", "gas": "0x76c0", "gasPrice": "0x9184e72a000", @@ -154,54 +188,67 @@ fn rpc_eth_sign_transaction() { "id": 1 }"#; - let t = Transaction { - nonce: U256::one(), - gas_price: U256::from(0x9184e72a000u64), - gas: U256::from(0x76c0), - action: Action::Call(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), - value: U256::from(0x9184e72au64), - data: vec![] - }; - let signature = tester.accounts_provider.sign(address, None, t.hash(None)).unwrap(); - let t = t.with_signature(signature, None); - let signature = t.signature(); - let rlp = rlp::encode(&t); + let t = Transaction { + nonce: U256::one(), + gas_price: U256::from(0x9184e72a000u64), + gas: U256::from(0x76c0), + action: Action::Call( + Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(), + ), + value: U256::from(0x9184e72au64), + data: vec![], + }; + let signature = tester + .accounts_provider + .sign(address, None, t.hash(None)) + .unwrap(); + let t = t.with_signature(signature, None); + let signature = t.signature(); + let rlp = rlp::encode(&t); - let response = r#"{"jsonrpc":"2.0","result":{"#.to_owned() + - r#""raw":"0x"# + &rlp.to_hex() + r#"","# + - r#""tx":{"# + - r#""blockHash":null,"blockNumber":null,"# + - &format!("\"chainId\":{},", t.chain_id().map_or("null".to_owned(), |n| format!("{}", n))) + - r#""condition":null,"creates":null,"# + - &format!("\"from\":\"0x{:x}\",", &address) + - r#""gas":"0x76c0","gasPrice":"0x9184e72a000","# + - &format!("\"hash\":\"0x{:x}\",", t.hash()) + - r#""input":"0x","# + - r#""nonce":"0x1","# + - &format!("\"publicKey\":\"0x{:x}\",", t.recover_public().unwrap()) + - &format!("\"r\":\"0x{:x}\",", U256::from(signature.r())) + - &format!("\"raw\":\"0x{}\",", rlp.to_hex()) + - &format!("\"s\":\"0x{:x}\",", U256::from(signature.s())) + - &format!("\"standardV\":\"0x{:x}\",", U256::from(t.standard_v())) + - r#""to":"0xd46e8dd67c5d32be8058bb8eb970870f07244567","transactionIndex":null,"# + - &format!("\"v\":\"0x{:x}\",", U256::from(t.original_v())) + - r#""value":"0x9184e72a""# + - r#"}},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"#.to_owned() + + r#""raw":"0x"# + + &rlp.to_hex() + + r#"","# + + r#""tx":{"# + + r#""blockHash":null,"blockNumber":null,"# + + &format!( + "\"chainId\":{},", + t.chain_id().map_or("null".to_owned(), |n| format!("{}", n)) + ) + + r#""condition":null,"creates":null,"# + + &format!("\"from\":\"0x{:x}\",", &address) + + r#""gas":"0x76c0","gasPrice":"0x9184e72a000","# + + &format!("\"hash\":\"0x{:x}\",", t.hash()) + + r#""input":"0x","# + + r#""nonce":"0x1","# + + &format!("\"publicKey\":\"0x{:x}\",", t.recover_public().unwrap()) + + &format!("\"r\":\"0x{:x}\",", U256::from(signature.r())) + + &format!("\"raw\":\"0x{}\",", rlp.to_hex()) + + &format!("\"s\":\"0x{:x}\",", U256::from(signature.s())) + + &format!("\"standardV\":\"0x{:x}\",", U256::from(t.standard_v())) + + r#""to":"0xd46e8dd67c5d32be8058bb8eb970870f07244567","transactionIndex":null,"# + + &format!("\"v\":\"0x{:x}\",", U256::from(t.original_v())) + + r#""value":"0x9184e72a""# + + r#"}},"id":1}"#; - tester.miner.increment_nonce(&address); + tester.miner.increment_nonce(&address); - assert_eq!(tester.io.handle_request_sync(&request), Some(response)); + assert_eq!(tester.io.handle_request_sync(&request), Some(response)); } #[test] fn rpc_eth_send_transaction_with_bad_to() { - let tester = EthTester::default(); - let address = tester.accounts_provider.new_account(&"".into()).unwrap(); - let request = r#"{ + let tester = EthTester::default(); + let address = tester.accounts_provider.new_account(&"".into()).unwrap(); + let request = r#"{ "jsonrpc": "2.0", "method": "eth_sendTransaction", "params": [{ - "from": ""#.to_owned() + format!("0x{:x}", address).as_ref() + r#"", + "from": ""# + .to_owned() + + format!("0x{:x}", address).as_ref() + + r#"", "to": "", "gas": "0x76c0", "gasPrice": "0x9184e72a000", @@ -210,20 +257,26 @@ fn rpc_eth_send_transaction_with_bad_to() { "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid params: prefix is missing."},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid params: prefix is missing."},"id":1}"#; - assert_eq!(tester.io.handle_request_sync(&request), Some(response.into())); + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.into()) + ); } #[test] fn rpc_eth_send_transaction_error() { - let tester = EthTester::default(); - let address = tester.accounts_provider.new_account(&"".into()).unwrap(); - let request = r#"{ + let tester = EthTester::default(); + let address = tester.accounts_provider.new_account(&"".into()).unwrap(); + let request = r#"{ "jsonrpc": "2.0", "method": "eth_sendTransaction", "params": [{ - "from": ""#.to_owned() + format!("0x{:x}", address).as_ref() + r#"", + "from": ""# + .to_owned() + + format!("0x{:x}", address).as_ref() + + r#"", "to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567", "gas": "0x76c0", "gasPrice": "0x9184e72a000", @@ -232,6 +285,9 @@ fn rpc_eth_send_transaction_error() { "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32020,"message":"Your account is locked. Unlock the account via CLI, personal_unlockAccount or use Trusted Signer.","data":"NotUnlocked"},"id":1}"#; - assert_eq!(tester.io.handle_request_sync(&request), Some(response.into())); + let response = r#"{"jsonrpc":"2.0","error":{"code":-32020,"message":"Your account is locked. Unlock the account via CLI, personal_unlockAccount or use Trusted Signer.","data":"NotUnlocked"},"id":1}"#; + assert_eq!( + tester.io.handle_request_sync(&request), + Some(response.into()) + ); } diff --git a/rpc/src/v1/tests/mocked/traces.rs b/rpc/src/v1/tests/mocked/traces.rs index 89cf198aa..35216a59b 100644 --- a/rpc/src/v1/tests/mocked/traces.rs +++ b/rpc/src/v1/tests/mocked/traces.rs @@ -16,229 +16,280 @@ use std::sync::Arc; -use ethcore::executed::{Executed, CallError}; -use ethcore::trace::trace::{Action, Res, Call}; -use ethcore::trace::LocalizedTrace; -use ethcore::client::TestBlockChainClient; +use ethcore::{ + client::TestBlockChainClient, + executed::{CallError, Executed}, + trace::{ + trace::{Action, Call, Res}, + LocalizedTrace, + }, +}; use vm::CallType; use jsonrpc_core::IoHandler; -use v1::tests::helpers::{TestMinerService}; -use v1::{Metadata, Traces, TracesClient}; +use v1::{tests::helpers::TestMinerService, Metadata, Traces, TracesClient}; struct Tester { - client: Arc, - _miner: Arc, - io: IoHandler, + client: Arc, + _miner: Arc, + io: IoHandler, } fn io() -> Tester { - let client = Arc::new(TestBlockChainClient::new()); - *client.traces.write() = Some(vec![LocalizedTrace { - action: Action::Call(Call { - from: 0xf.into(), - to: 0x10.into(), - value: 0x1.into(), - gas: 0x100.into(), - input: vec![1, 2, 3], - call_type: CallType::Call, - }), - result: Res::None, - subtraces: 0, - trace_address: vec![0], - transaction_number: Some(0), - transaction_hash: Some(5.into()), - block_number: 10, - block_hash: 10.into(), - }]); - *client.execution_result.write() = Some(Ok(Executed { - exception: None, - gas: 20_000.into(), - gas_used: 10_000.into(), - refunded: 0.into(), - cumulative_gas_used: 10_000.into(), - logs: vec![], - contracts_created: vec![], - output: vec![1, 2, 3], - trace: vec![], - vm_trace: None, - state_diff: None, - })); - let miner = Arc::new(TestMinerService::default()); - let traces = TracesClient::new(&client); - let mut io = IoHandler::default(); - io.extend_with(traces.to_delegate()); + let client = Arc::new(TestBlockChainClient::new()); + *client.traces.write() = Some(vec![LocalizedTrace { + action: Action::Call(Call { + from: 0xf.into(), + to: 0x10.into(), + value: 0x1.into(), + gas: 0x100.into(), + input: vec![1, 2, 3], + call_type: CallType::Call, + }), + result: Res::None, + subtraces: 0, + trace_address: vec![0], + transaction_number: Some(0), + transaction_hash: Some(5.into()), + block_number: 10, + block_hash: 10.into(), + }]); + *client.execution_result.write() = Some(Ok(Executed { + exception: None, + gas: 20_000.into(), + gas_used: 10_000.into(), + refunded: 0.into(), + cumulative_gas_used: 10_000.into(), + logs: vec![], + contracts_created: vec![], + output: vec![1, 2, 3], + trace: vec![], + vm_trace: None, + state_diff: None, + })); + let miner = Arc::new(TestMinerService::default()); + let traces = TracesClient::new(&client); + let mut io = IoHandler::default(); + io.extend_with(traces.to_delegate()); - Tester { - client: client, - _miner: miner, - io: io, - } + Tester { + client: client, + _miner: miner, + io: io, + } } #[test] fn rpc_trace_filter() { - let tester = io(); + let tester = io(); - let request = r#"{"jsonrpc":"2.0","method":"trace_filter","params": [{}],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","result":[{"action":{"callType":"call","from":"0x000000000000000000000000000000000000000f","gas":"0x100","input":"0x010203","to":"0x0000000000000000000000000000000000000010","value":"0x1"},"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000a","blockNumber":10,"result":null,"subtraces":0,"traceAddress":[0],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000005","transactionPosition":0,"type":"call"}],"id":1}"#; + let request = r#"{"jsonrpc":"2.0","method":"trace_filter","params": [{}],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":[{"action":{"callType":"call","from":"0x000000000000000000000000000000000000000f","gas":"0x100","input":"0x010203","to":"0x0000000000000000000000000000000000000010","value":"0x1"},"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000a","blockNumber":10,"result":null,"subtraces":0,"traceAddress":[0],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000005","transactionPosition":0,"type":"call"}],"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_trace_filter_missing_trace() { - let tester = io(); - *tester.client.traces.write() = None; + let tester = io(); + *tester.client.traces.write() = None; - let request = r#"{"jsonrpc":"2.0","method":"trace_filter","params": [{}],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; + let request = r#"{"jsonrpc":"2.0","method":"trace_filter","params": [{}],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_trace_block() { - let tester = io(); + let tester = io(); - let request = r#"{"jsonrpc":"2.0","method":"trace_block","params": ["0x10"],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","result":[{"action":{"callType":"call","from":"0x000000000000000000000000000000000000000f","gas":"0x100","input":"0x010203","to":"0x0000000000000000000000000000000000000010","value":"0x1"},"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000a","blockNumber":10,"result":null,"subtraces":0,"traceAddress":[0],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000005","transactionPosition":0,"type":"call"}],"id":1}"#; + let request = r#"{"jsonrpc":"2.0","method":"trace_block","params": ["0x10"],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":[{"action":{"callType":"call","from":"0x000000000000000000000000000000000000000f","gas":"0x100","input":"0x010203","to":"0x0000000000000000000000000000000000000010","value":"0x1"},"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000a","blockNumber":10,"result":null,"subtraces":0,"traceAddress":[0],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000005","transactionPosition":0,"type":"call"}],"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_trace_block_missing_traces() { - let tester = io(); - *tester.client.traces.write() = None; + let tester = io(); + *tester.client.traces.write() = None; - let request = r#"{"jsonrpc":"2.0","method":"trace_block","params": ["0x10"],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; + let request = r#"{"jsonrpc":"2.0","method":"trace_block","params": ["0x10"],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_trace_transaction() { - let tester = io(); + let tester = io(); - let request = r#"{"jsonrpc":"2.0","method":"trace_transaction","params":["0x0000000000000000000000000000000000000000000000000000000000000005"],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","result":[{"action":{"callType":"call","from":"0x000000000000000000000000000000000000000f","gas":"0x100","input":"0x010203","to":"0x0000000000000000000000000000000000000010","value":"0x1"},"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000a","blockNumber":10,"result":null,"subtraces":0,"traceAddress":[0],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000005","transactionPosition":0,"type":"call"}],"id":1}"#; + let request = r#"{"jsonrpc":"2.0","method":"trace_transaction","params":["0x0000000000000000000000000000000000000000000000000000000000000005"],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":[{"action":{"callType":"call","from":"0x000000000000000000000000000000000000000f","gas":"0x100","input":"0x010203","to":"0x0000000000000000000000000000000000000010","value":"0x1"},"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000a","blockNumber":10,"result":null,"subtraces":0,"traceAddress":[0],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000005","transactionPosition":0,"type":"call"}],"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_trace_transaction_missing_trace() { - let tester = io(); - *tester.client.traces.write() = None; + let tester = io(); + *tester.client.traces.write() = None; - let request = r#"{"jsonrpc":"2.0","method":"trace_transaction","params":["0x0000000000000000000000000000000000000000000000000000000000000005"],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; + let request = r#"{"jsonrpc":"2.0","method":"trace_transaction","params":["0x0000000000000000000000000000000000000000000000000000000000000005"],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_trace_get() { - let tester = io(); + let tester = io(); - let request = r#"{"jsonrpc":"2.0","method":"trace_get","params":["0x0000000000000000000000000000000000000000000000000000000000000005", ["0","0","0"]],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"action":{"callType":"call","from":"0x000000000000000000000000000000000000000f","gas":"0x100","input":"0x010203","to":"0x0000000000000000000000000000000000000010","value":"0x1"},"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000a","blockNumber":10,"result":null,"subtraces":0,"traceAddress":[0],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000005","transactionPosition":0,"type":"call"},"id":1}"#; + let request = r#"{"jsonrpc":"2.0","method":"trace_get","params":["0x0000000000000000000000000000000000000000000000000000000000000005", ["0","0","0"]],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"action":{"callType":"call","from":"0x000000000000000000000000000000000000000f","gas":"0x100","input":"0x010203","to":"0x0000000000000000000000000000000000000010","value":"0x1"},"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000a","blockNumber":10,"result":null,"subtraces":0,"traceAddress":[0],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000005","transactionPosition":0,"type":"call"},"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_trace_get_missing_trace() { - let tester = io(); - *tester.client.traces.write() = None; + let tester = io(); + *tester.client.traces.write() = None; - let request = r#"{"jsonrpc":"2.0","method":"trace_get","params":["0x0000000000000000000000000000000000000000000000000000000000000005", ["0","0","0"]],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + let request = r#"{"jsonrpc":"2.0","method":"trace_get","params":["0x0000000000000000000000000000000000000000000000000000000000000005", ["0","0","0"]],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_trace_call() { - let tester = io(); + let tester = io(); - let request = r#"{"jsonrpc":"2.0","method":"trace_call","params":[{}, ["stateDiff", "vmTrace", "trace"]],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"output":"0x010203","stateDiff":null,"trace":[],"vmTrace":null},"id":1}"#; + let request = r#"{"jsonrpc":"2.0","method":"trace_call","params":[{}, ["stateDiff", "vmTrace", "trace"]],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"output":"0x010203","stateDiff":null,"trace":[],"vmTrace":null},"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_trace_multi_call() { - let tester = io(); + let tester = io(); - let request = r#"{"jsonrpc":"2.0","method":"trace_callMany","params":[[[{}, ["stateDiff", "vmTrace", "trace"]]]],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","result":[{"output":"0x010203","stateDiff":null,"trace":[],"vmTrace":null}],"id":1}"#; + let request = r#"{"jsonrpc":"2.0","method":"trace_callMany","params":[[[{}, ["stateDiff", "vmTrace", "trace"]]]],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":[{"output":"0x010203","stateDiff":null,"trace":[],"vmTrace":null}],"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_trace_call_state_pruned() { - let tester = io(); - *tester.client.execution_result.write() = Some(Err(CallError::StatePruned)); + let tester = io(); + *tester.client.execution_result.write() = Some(Err(CallError::StatePruned)); - let request = r#"{"jsonrpc":"2.0","method":"trace_call","params":[{}, ["stateDiff", "vmTrace", "trace"]],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"This request is not supported because your node is running with state pruning. Run with --pruning=archive."},"id":1}"#; + let request = r#"{"jsonrpc":"2.0","method":"trace_call","params":[{}, ["stateDiff", "vmTrace", "trace"]],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"This request is not supported because your node is running with state pruning. Run with --pruning=archive."},"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_trace_raw_transaction() { - let tester = io(); + let tester = io(); - let request = r#"{"jsonrpc":"2.0","method":"trace_rawTransaction","params":["0xf869018609184e72a0008276c094d46e8dd67c5d32be8058bb8eb970870f07244567849184e72a801ba0617f39c1a107b63302449c476d96a6cb17a5842fc98ff0c5bcf4d5c4d8166b95a009fdb6097c6196b9bbafc3a59f02f38d91baeef23d0c60a8e4f23c7714cea3a9", ["stateDiff", "vmTrace", "trace"]],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"output":"0x010203","stateDiff":null,"trace":[],"vmTrace":null},"id":1}"#; + let request = r#"{"jsonrpc":"2.0","method":"trace_rawTransaction","params":["0xf869018609184e72a0008276c094d46e8dd67c5d32be8058bb8eb970870f07244567849184e72a801ba0617f39c1a107b63302449c476d96a6cb17a5842fc98ff0c5bcf4d5c4d8166b95a009fdb6097c6196b9bbafc3a59f02f38d91baeef23d0c60a8e4f23c7714cea3a9", ["stateDiff", "vmTrace", "trace"]],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"output":"0x010203","stateDiff":null,"trace":[],"vmTrace":null},"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_trace_raw_transaction_state_pruned() { - let tester = io(); - *tester.client.execution_result.write() = Some(Err(CallError::StatePruned)); + let tester = io(); + *tester.client.execution_result.write() = Some(Err(CallError::StatePruned)); - let request = r#"{"jsonrpc":"2.0","method":"trace_rawTransaction","params":["0xf869018609184e72a0008276c094d46e8dd67c5d32be8058bb8eb970870f07244567849184e72a801ba0617f39c1a107b63302449c476d96a6cb17a5842fc98ff0c5bcf4d5c4d8166b95a009fdb6097c6196b9bbafc3a59f02f38d91baeef23d0c60a8e4f23c7714cea3a9", ["stateDiff", "vmTrace", "trace"]],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"This request is not supported because your node is running with state pruning. Run with --pruning=archive."},"id":1}"#; + let request = r#"{"jsonrpc":"2.0","method":"trace_rawTransaction","params":["0xf869018609184e72a0008276c094d46e8dd67c5d32be8058bb8eb970870f07244567849184e72a801ba0617f39c1a107b63302449c476d96a6cb17a5842fc98ff0c5bcf4d5c4d8166b95a009fdb6097c6196b9bbafc3a59f02f38d91baeef23d0c60a8e4f23c7714cea3a9", ["stateDiff", "vmTrace", "trace"]],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"This request is not supported because your node is running with state pruning. Run with --pruning=archive."},"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_trace_replay_transaction() { - let tester = io(); + let tester = io(); - let request = r#"{"jsonrpc":"2.0","method":"trace_replayTransaction","params":["0x0000000000000000000000000000000000000000000000000000000000000005", ["trace", "stateDiff", "vmTrace"]],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"output":"0x010203","stateDiff":null,"trace":[],"vmTrace":null},"id":1}"#; + let request = r#"{"jsonrpc":"2.0","method":"trace_replayTransaction","params":["0x0000000000000000000000000000000000000000000000000000000000000005", ["trace", "stateDiff", "vmTrace"]],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"output":"0x010203","stateDiff":null,"trace":[],"vmTrace":null},"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_trace_replay_transaction_state_pruned() { - let tester = io(); - *tester.client.execution_result.write() = Some(Err(CallError::StatePruned)); + let tester = io(); + *tester.client.execution_result.write() = Some(Err(CallError::StatePruned)); - let request = r#"{"jsonrpc":"2.0","method":"trace_replayTransaction","params":["0x0000000000000000000000000000000000000000000000000000000000000005", ["trace", "stateDiff", "vmTrace"]],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"This request is not supported because your node is running with state pruning. Run with --pruning=archive."},"id":1}"#; + let request = r#"{"jsonrpc":"2.0","method":"trace_replayTransaction","params":["0x0000000000000000000000000000000000000000000000000000000000000005", ["trace", "stateDiff", "vmTrace"]],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"This request is not supported because your node is running with state pruning. Run with --pruning=archive."},"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } #[test] fn rpc_trace_replay_block_transactions() { - let tester = io(); + let tester = io(); - let request = r#"{"jsonrpc":"2.0","method":"trace_replayBlockTransactions","params":["0x10", ["trace", "stateDiff", "vmTrace"]],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","result":[{"output":"0x010203","stateDiff":null,"trace":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000005","vmTrace":null}],"id":1}"#; + let request = r#"{"jsonrpc":"2.0","method":"trace_replayBlockTransactions","params":["0x10", ["trace", "stateDiff", "vmTrace"]],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":[{"output":"0x010203","stateDiff":null,"trace":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000005","vmTrace":null}],"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!( + tester.io.handle_request_sync(request), + Some(response.to_owned()) + ); } diff --git a/rpc/src/v1/tests/mocked/web3.rs b/rpc/src/v1/tests/mocked/web3.rs index 5590d5d28..16a69cfba 100644 --- a/rpc/src/v1/tests/mocked/web3.rs +++ b/rpc/src/v1/tests/mocked/web3.rs @@ -15,43 +15,45 @@ // along with Parity Ethereum. If not, see . use jsonrpc_core::IoHandler; -use version::version; use v1::{Web3, Web3Client}; +use version::version; #[test] fn rpc_web3_version() { - let web3 = Web3Client::default().to_delegate(); - let mut io = IoHandler::new(); - io.extend_with(web3); + let web3 = Web3Client::default().to_delegate(); + let mut io = IoHandler::new(); + io.extend_with(web3); - let v = version().to_owned().replacen("/", "//", 1); + let v = version().to_owned().replacen("/", "//", 1); - let request = r#"{"jsonrpc": "2.0", "method": "web3_clientVersion", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"VER","id":1}"#.to_owned().replace("VER", v.as_ref()); + let request = r#"{"jsonrpc": "2.0", "method": "web3_clientVersion", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"VER","id":1}"# + .to_owned() + .replace("VER", v.as_ref()); - assert_eq!(io.handle_request_sync(request), Some(response)); + assert_eq!(io.handle_request_sync(request), Some(response)); } #[test] fn rpc_web3_sha3() { - let web3 = Web3Client::default().to_delegate(); - let mut io = IoHandler::new(); - io.extend_with(web3); + let web3 = Web3Client::default().to_delegate(); + let mut io = IoHandler::new(); + io.extend_with(web3); - let request = r#"{"jsonrpc": "2.0", "method": "web3_sha3", "params": ["0x00"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"0xbc36789e7a1e281436464229828f817d6612f7b477d66591ff96a9e064bcc98a","id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "web3_sha3", "params": ["0x00"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0xbc36789e7a1e281436464229828f817d6612f7b477d66591ff96a9e064bcc98a","id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } #[test] fn rpc_web3_sha3_wiki() { - let web3 = Web3Client::default().to_delegate(); - let mut io = IoHandler::new(); - io.extend_with(web3); + let web3 = Web3Client::default().to_delegate(); + let mut io = IoHandler::new(); + io.extend_with(web3); - let request = r#"{"jsonrpc": "2.0", "method": "web3_sha3", "params": ["0x68656c6c6f20776f726c64"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"0x47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad","id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "web3_sha3", "params": ["0x68656c6c6f20776f726c64"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad","id":1}"#; - assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } diff --git a/rpc/src/v1/tests/mod.rs b/rpc/src/v1/tests/mod.rs index 83f9dca90..5a841ba1c 100644 --- a/rpc/src/v1/tests/mod.rs +++ b/rpc/src/v1/tests/mod.rs @@ -51,7 +51,7 @@ macro_rules! register_test { }; } -#[cfg(test)] -mod mocked; #[cfg(test)] mod eth; +#[cfg(test)] +mod mocked; diff --git a/rpc/src/v1/traits/debug.rs b/rpc/src/v1/traits/debug.rs index 373717ea6..62eb6803f 100644 --- a/rpc/src/v1/traits/debug.rs +++ b/rpc/src/v1/traits/debug.rs @@ -24,7 +24,7 @@ use v1::types::RichBlock; /// Debug RPC interface. #[rpc(server)] pub trait Debug { - /// Returns recently seen bad blocks. - #[rpc(name = "debug_getBadBlocks")] - fn bad_blocks(&self) -> Result>; + /// Returns recently seen bad blocks. + #[rpc(name = "debug_getBadBlocks")] + fn bad_blocks(&self) -> Result>; } diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index 6add37b3f..8a4c91542 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -15,203 +15,217 @@ // along with Parity Ethereum. If not, see . //! Eth rpc interface. -use jsonrpc_core::{Result, BoxFuture}; +use ethereum_types::{H160, H256, H64, U256, U64}; +use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_derive::rpc; -use ethereum_types::{H64, H160, H256, U64, U256}; -use v1::types::{RichBlock, BlockNumber, Bytes, CallRequest, Filter, FilterChanges, Index, EthAccount}; -use v1::types::{Log, Receipt, SyncStatus, Transaction, Work}; +use v1::types::{ + BlockNumber, Bytes, CallRequest, EthAccount, Filter, FilterChanges, Index, Log, Receipt, + RichBlock, SyncStatus, Transaction, Work, +}; /// Eth rpc interface. #[rpc(server)] pub trait Eth { - /// RPC Metadata - type Metadata; + /// RPC Metadata + type Metadata; - /// Returns protocol version encoded as a string (quotes are necessary). - #[rpc(name = "eth_protocolVersion")] - fn protocol_version(&self) -> Result; + /// Returns protocol version encoded as a string (quotes are necessary). + #[rpc(name = "eth_protocolVersion")] + fn protocol_version(&self) -> Result; - /// Returns an object with data about the sync status or false. (wtf?) - #[rpc(name = "eth_syncing")] - fn syncing(&self) -> Result; + /// Returns an object with data about the sync status or false. (wtf?) + #[rpc(name = "eth_syncing")] + fn syncing(&self) -> Result; - /// Returns the number of hashes per second that the node is mining with. - #[rpc(name = "eth_hashrate")] - fn hashrate(&self) -> Result; + /// Returns the number of hashes per second that the node is mining with. + #[rpc(name = "eth_hashrate")] + fn hashrate(&self) -> Result; - /// Returns block author. - #[rpc(name = "eth_coinbase")] - fn author(&self) -> Result; + /// Returns block author. + #[rpc(name = "eth_coinbase")] + fn author(&self) -> Result; - /// Returns true if client is actively mining new blocks. - #[rpc(name = "eth_mining")] - fn is_mining(&self) -> Result; + /// Returns true if client is actively mining new blocks. + #[rpc(name = "eth_mining")] + fn is_mining(&self) -> Result; - /// Returns the chain ID used for transaction signing at the - /// current best block. None is returned if not - /// available. - #[rpc(name = "eth_chainId")] - fn chain_id(&self) -> Result>; + /// Returns the chain ID used for transaction signing at the + /// current best block. None is returned if not + /// available. + #[rpc(name = "eth_chainId")] + fn chain_id(&self) -> Result>; - /// Returns current gas_price. - #[rpc(name = "eth_gasPrice")] - fn gas_price(&self) -> BoxFuture; + /// Returns current gas_price. + #[rpc(name = "eth_gasPrice")] + fn gas_price(&self) -> BoxFuture; - /// Returns accounts list. - #[rpc(name = "eth_accounts")] - fn accounts(&self) -> Result>; + /// Returns accounts list. + #[rpc(name = "eth_accounts")] + fn accounts(&self) -> Result>; - /// Returns highest block number. - #[rpc(name = "eth_blockNumber")] - fn block_number(&self) -> Result; + /// Returns highest block number. + #[rpc(name = "eth_blockNumber")] + fn block_number(&self) -> Result; - /// Returns balance of the given account. - #[rpc(name = "eth_getBalance")] - fn balance(&self, _: H160, _: Option) -> BoxFuture; + /// Returns balance of the given account. + #[rpc(name = "eth_getBalance")] + fn balance(&self, _: H160, _: Option) -> BoxFuture; - /// Returns the account- and storage-values of the specified account including the Merkle-proof - #[rpc(name = "eth_getProof")] - fn proof(&self, _: H160, _: Vec, _: Option) -> BoxFuture; + /// Returns the account- and storage-values of the specified account including the Merkle-proof + #[rpc(name = "eth_getProof")] + fn proof(&self, _: H160, _: Vec, _: Option) -> BoxFuture; - /// Returns content of the storage at given address. - #[rpc(name = "eth_getStorageAt")] - fn storage_at(&self, _: H160, _: U256, _: Option) -> BoxFuture; + /// Returns content of the storage at given address. + #[rpc(name = "eth_getStorageAt")] + fn storage_at(&self, _: H160, _: U256, _: Option) -> BoxFuture; - /// Returns block with given hash. - #[rpc(name = "eth_getBlockByHash")] - fn block_by_hash(&self, _: H256, _: bool) -> BoxFuture>; + /// Returns block with given hash. + #[rpc(name = "eth_getBlockByHash")] + fn block_by_hash(&self, _: H256, _: bool) -> BoxFuture>; - /// Returns block with given number. - #[rpc(name = "eth_getBlockByNumber")] - fn block_by_number(&self, _: BlockNumber, _: bool) -> BoxFuture>; + /// Returns block with given number. + #[rpc(name = "eth_getBlockByNumber")] + fn block_by_number(&self, _: BlockNumber, _: bool) -> BoxFuture>; - /// Returns the number of transactions sent from given address at given time (block number). - #[rpc(name = "eth_getTransactionCount")] - fn transaction_count(&self, _: H160, _: Option) -> BoxFuture; + /// Returns the number of transactions sent from given address at given time (block number). + #[rpc(name = "eth_getTransactionCount")] + fn transaction_count(&self, _: H160, _: Option) -> BoxFuture; - /// Returns the number of transactions in a block with given hash. - #[rpc(name = "eth_getBlockTransactionCountByHash")] - fn block_transaction_count_by_hash(&self, _: H256) -> BoxFuture>; + /// Returns the number of transactions in a block with given hash. + #[rpc(name = "eth_getBlockTransactionCountByHash")] + fn block_transaction_count_by_hash(&self, _: H256) -> BoxFuture>; - /// Returns the number of transactions in a block with given block number. - #[rpc(name = "eth_getBlockTransactionCountByNumber")] - fn block_transaction_count_by_number(&self, _: BlockNumber) -> BoxFuture>; + /// Returns the number of transactions in a block with given block number. + #[rpc(name = "eth_getBlockTransactionCountByNumber")] + fn block_transaction_count_by_number(&self, _: BlockNumber) -> BoxFuture>; - /// Returns the number of uncles in a block with given hash. - #[rpc(name = "eth_getUncleCountByBlockHash")] - fn block_uncles_count_by_hash(&self, _: H256) -> BoxFuture>; + /// Returns the number of uncles in a block with given hash. + #[rpc(name = "eth_getUncleCountByBlockHash")] + fn block_uncles_count_by_hash(&self, _: H256) -> BoxFuture>; - /// Returns the number of uncles in a block with given block number. - #[rpc(name = "eth_getUncleCountByBlockNumber")] - fn block_uncles_count_by_number(&self, _: BlockNumber) -> BoxFuture>; + /// Returns the number of uncles in a block with given block number. + #[rpc(name = "eth_getUncleCountByBlockNumber")] + fn block_uncles_count_by_number(&self, _: BlockNumber) -> BoxFuture>; - /// Returns the code at given address at given time (block number). - #[rpc(name = "eth_getCode")] - fn code_at(&self, _: H160, _: Option) -> BoxFuture; + /// Returns the code at given address at given time (block number). + #[rpc(name = "eth_getCode")] + fn code_at(&self, _: H160, _: Option) -> BoxFuture; - /// Sends signed transaction, returning its hash. - #[rpc(name = "eth_sendRawTransaction")] - fn send_raw_transaction(&self, _: Bytes) -> Result; + /// Sends signed transaction, returning its hash. + #[rpc(name = "eth_sendRawTransaction")] + fn send_raw_transaction(&self, _: Bytes) -> Result; - /// @alias of `eth_sendRawTransaction`. - #[rpc(name = "eth_submitTransaction")] - fn submit_transaction(&self, _: Bytes) -> Result; + /// @alias of `eth_sendRawTransaction`. + #[rpc(name = "eth_submitTransaction")] + fn submit_transaction(&self, _: Bytes) -> Result; - /// Call contract, returning the output data. - #[rpc(name = "eth_call")] - fn call(&self, _: CallRequest, _: Option) -> BoxFuture; + /// Call contract, returning the output data. + #[rpc(name = "eth_call")] + fn call(&self, _: CallRequest, _: Option) -> BoxFuture; - /// Estimate gas needed for execution of given contract. - #[rpc(name = "eth_estimateGas")] - fn estimate_gas(&self, _: CallRequest, _: Option) -> BoxFuture; + /// Estimate gas needed for execution of given contract. + #[rpc(name = "eth_estimateGas")] + fn estimate_gas(&self, _: CallRequest, _: Option) -> BoxFuture; - /// Get transaction by its hash. - #[rpc(name = "eth_getTransactionByHash")] - fn transaction_by_hash(&self, _: H256) -> BoxFuture>; + /// Get transaction by its hash. + #[rpc(name = "eth_getTransactionByHash")] + fn transaction_by_hash(&self, _: H256) -> BoxFuture>; - /// Returns transaction at given block hash and index. - #[rpc(name = "eth_getTransactionByBlockHashAndIndex")] - fn transaction_by_block_hash_and_index(&self, _: H256, _: Index) -> BoxFuture>; + /// Returns transaction at given block hash and index. + #[rpc(name = "eth_getTransactionByBlockHashAndIndex")] + fn transaction_by_block_hash_and_index( + &self, + _: H256, + _: Index, + ) -> BoxFuture>; - /// Returns transaction by given block number and index. - #[rpc(name = "eth_getTransactionByBlockNumberAndIndex")] - fn transaction_by_block_number_and_index(&self, _: BlockNumber, _: Index) -> BoxFuture>; + /// Returns transaction by given block number and index. + #[rpc(name = "eth_getTransactionByBlockNumberAndIndex")] + fn transaction_by_block_number_and_index( + &self, + _: BlockNumber, + _: Index, + ) -> BoxFuture>; - /// Returns transaction receipt by transaction hash. - #[rpc(name = "eth_getTransactionReceipt")] - fn transaction_receipt(&self, _: H256) -> BoxFuture>; + /// Returns transaction receipt by transaction hash. + #[rpc(name = "eth_getTransactionReceipt")] + fn transaction_receipt(&self, _: H256) -> BoxFuture>; - /// Returns an uncles at given block and index. - #[rpc(name = "eth_getUncleByBlockHashAndIndex")] - fn uncle_by_block_hash_and_index(&self, _: H256, _: Index) -> BoxFuture>; + /// Returns an uncles at given block and index. + #[rpc(name = "eth_getUncleByBlockHashAndIndex")] + fn uncle_by_block_hash_and_index(&self, _: H256, _: Index) -> BoxFuture>; - /// Returns an uncles at given block and index. - #[rpc(name = "eth_getUncleByBlockNumberAndIndex")] - fn uncle_by_block_number_and_index(&self, _: BlockNumber, _: Index) -> BoxFuture>; + /// Returns an uncles at given block and index. + #[rpc(name = "eth_getUncleByBlockNumberAndIndex")] + fn uncle_by_block_number_and_index( + &self, + _: BlockNumber, + _: Index, + ) -> BoxFuture>; - /// Returns available compilers. - /// @deprecated - #[rpc(name = "eth_getCompilers")] - fn compilers(&self) -> Result>; + /// Returns available compilers. + /// @deprecated + #[rpc(name = "eth_getCompilers")] + fn compilers(&self) -> Result>; - /// Compiles lll code. - /// @deprecated - #[rpc(name = "eth_compileLLL")] - fn compile_lll(&self, _: String) -> Result; + /// Compiles lll code. + /// @deprecated + #[rpc(name = "eth_compileLLL")] + fn compile_lll(&self, _: String) -> Result; - /// Compiles solidity. - /// @deprecated - #[rpc(name = "eth_compileSolidity")] - fn compile_solidity(&self, _: String) -> Result; + /// Compiles solidity. + /// @deprecated + #[rpc(name = "eth_compileSolidity")] + fn compile_solidity(&self, _: String) -> Result; - /// Compiles serpent. - /// @deprecated - #[rpc(name = "eth_compileSerpent")] - fn compile_serpent(&self, _: String) -> Result; + /// Compiles serpent. + /// @deprecated + #[rpc(name = "eth_compileSerpent")] + fn compile_serpent(&self, _: String) -> Result; - /// Returns logs matching given filter object. - #[rpc(name = "eth_getLogs")] - fn logs(&self, _: Filter) -> BoxFuture>; + /// Returns logs matching given filter object. + #[rpc(name = "eth_getLogs")] + fn logs(&self, _: Filter) -> BoxFuture>; - /// Returns the hash of the current block, the seedHash, and the boundary condition to be met. - #[rpc(name = "eth_getWork")] - fn work(&self, _: Option) -> Result; + /// Returns the hash of the current block, the seedHash, and the boundary condition to be met. + #[rpc(name = "eth_getWork")] + fn work(&self, _: Option) -> Result; - /// Used for submitting a proof-of-work solution. - #[rpc(name = "eth_submitWork")] - fn submit_work(&self, _: H64, _: H256, _: H256) -> Result; + /// Used for submitting a proof-of-work solution. + #[rpc(name = "eth_submitWork")] + fn submit_work(&self, _: H64, _: H256, _: H256) -> Result; - /// Used for submitting mining hashrate. - #[rpc(name = "eth_submitHashrate")] - fn submit_hashrate(&self, _: U256, _: H256) -> Result; + /// Used for submitting mining hashrate. + #[rpc(name = "eth_submitHashrate")] + fn submit_hashrate(&self, _: U256, _: H256) -> Result; } /// Eth filters rpc api (polling). // TODO: do filters api properly #[rpc(server)] pub trait EthFilter { - /// Returns id of new filter. - #[rpc(name = "eth_newFilter")] - fn new_filter(&self, _: Filter) -> Result; + /// Returns id of new filter. + #[rpc(name = "eth_newFilter")] + fn new_filter(&self, _: Filter) -> Result; - /// Returns id of new block filter. - #[rpc(name = "eth_newBlockFilter")] - fn new_block_filter(&self) -> Result; + /// Returns id of new block filter. + #[rpc(name = "eth_newBlockFilter")] + fn new_block_filter(&self) -> Result; - /// Returns id of new block filter. - #[rpc(name = "eth_newPendingTransactionFilter")] - fn new_pending_transaction_filter(&self) -> Result; + /// Returns id of new block filter. + #[rpc(name = "eth_newPendingTransactionFilter")] + fn new_pending_transaction_filter(&self) -> Result; - /// Returns filter changes since last poll. - #[rpc(name = "eth_getFilterChanges")] - fn filter_changes(&self, _: Index) -> BoxFuture; + /// Returns filter changes since last poll. + #[rpc(name = "eth_getFilterChanges")] + fn filter_changes(&self, _: Index) -> BoxFuture; - /// Returns all logs matching given filter (in a range 'from' - 'to'). - #[rpc(name = "eth_getFilterLogs")] - fn filter_logs(&self, _: Index) -> BoxFuture>; + /// Returns all logs matching given filter (in a range 'from' - 'to'). + #[rpc(name = "eth_getFilterLogs")] + fn filter_logs(&self, _: Index) -> BoxFuture>; - /// Uninstalls filter. - #[rpc(name = "eth_uninstallFilter")] - fn uninstall_filter(&self, _: Index) -> Result; + /// Uninstalls filter. + #[rpc(name = "eth_uninstallFilter")] + fn uninstall_filter(&self, _: Index) -> Result; } diff --git a/rpc/src/v1/traits/eth_pubsub.rs b/rpc/src/v1/traits/eth_pubsub.rs index e58cebebb..2d4d06892 100644 --- a/rpc/src/v1/traits/eth_pubsub.rs +++ b/rpc/src/v1/traits/eth_pubsub.rs @@ -25,14 +25,24 @@ use v1::types::pubsub; /// Eth PUB-SUB rpc interface. #[rpc(server)] pub trait EthPubSub { - /// RPC Metadata - type Metadata; + /// RPC Metadata + type Metadata; - /// Subscribe to Eth subscription. - #[pubsub(subscription = "eth_subscription", subscribe, name = "eth_subscribe")] - fn subscribe(&self, _: Self::Metadata, _: typed::Subscriber, _: pubsub::Kind, _: Option); + /// Subscribe to Eth subscription. + #[pubsub(subscription = "eth_subscription", subscribe, name = "eth_subscribe")] + fn subscribe( + &self, + _: Self::Metadata, + _: typed::Subscriber, + _: pubsub::Kind, + _: Option, + ); - /// Unsubscribe from existing Eth subscription. - #[pubsub(subscription = "eth_subscription", unsubscribe, name = "eth_unsubscribe")] - fn unsubscribe(&self, _: Option, _: SubscriptionId) -> Result; + /// Unsubscribe from existing Eth subscription. + #[pubsub( + subscription = "eth_subscription", + unsubscribe, + name = "eth_unsubscribe" + )] + fn unsubscribe(&self, _: Option, _: SubscriptionId) -> Result; } diff --git a/rpc/src/v1/traits/eth_signing.rs b/rpc/src/v1/traits/eth_signing.rs index 5dc5c37d9..7725b2f1c 100644 --- a/rpc/src/v1/traits/eth_signing.rs +++ b/rpc/src/v1/traits/eth_signing.rs @@ -20,27 +20,31 @@ use jsonrpc_core::BoxFuture; use jsonrpc_derive::rpc; use ethereum_types::{H160, H256, H520}; -use v1::types::{Bytes, TransactionRequest, RichRawTransaction}; +use v1::types::{Bytes, RichRawTransaction, TransactionRequest}; /// Signing methods implementation relying on unlocked accounts. #[rpc(server)] pub trait EthSigning { - /// RPC Metadata - type Metadata; + /// RPC Metadata + type Metadata; - /// Signs the hash of data with given address signature. - #[rpc(meta, name = "eth_sign")] - fn sign(&self, _: Self::Metadata, _: H160, _: Bytes) -> BoxFuture; + /// Signs the hash of data with given address signature. + #[rpc(meta, name = "eth_sign")] + fn sign(&self, _: Self::Metadata, _: H160, _: Bytes) -> BoxFuture; - /// Sends transaction; will block waiting for signer to return the - /// transaction hash. - /// If Signer is disable it will require the account to be unlocked. - #[rpc(meta, name = "eth_sendTransaction")] - fn send_transaction(&self, _: Self::Metadata, _: TransactionRequest) -> BoxFuture; + /// Sends transaction; will block waiting for signer to return the + /// transaction hash. + /// If Signer is disable it will require the account to be unlocked. + #[rpc(meta, name = "eth_sendTransaction")] + fn send_transaction(&self, _: Self::Metadata, _: TransactionRequest) -> BoxFuture; - /// Signs transactions without dispatching it to the network. - /// Returns signed transaction RLP representation and the transaction itself. - /// It can be later submitted using `eth_sendRawTransaction/eth_submitTransaction`. - #[rpc(meta, name = "eth_signTransaction")] - fn sign_transaction(&self, _: Self::Metadata, _: TransactionRequest) -> BoxFuture; + /// Signs transactions without dispatching it to the network. + /// Returns signed transaction RLP representation and the transaction itself. + /// It can be later submitted using `eth_sendRawTransaction/eth_submitTransaction`. + #[rpc(meta, name = "eth_signTransaction")] + fn sign_transaction( + &self, + _: Self::Metadata, + _: TransactionRequest, + ) -> BoxFuture; } diff --git a/rpc/src/v1/traits/mod.rs b/rpc/src/v1/traits/mod.rs index e25ca76ac..3f40ac466 100644 --- a/rpc/src/v1/traits/mod.rs +++ b/rpc/src/v1/traits/mod.rs @@ -34,20 +34,22 @@ pub mod signer; pub mod traces; pub mod web3; -pub use self::debug::Debug; -pub use self::eth::{Eth, EthFilter}; -pub use self::eth_pubsub::EthPubSub; -pub use self::eth_signing::EthSigning; -pub use self::net::Net; -pub use self::parity::Parity; -pub use self::parity_accounts::{ParityAccounts, ParityAccountsInfo}; -pub use self::parity_set::{ParitySet, ParitySetAccounts}; -pub use self::parity_signing::ParitySigning; -pub use self::personal::Personal; -pub use self::private::Private; -pub use self::pubsub::PubSub; -pub use self::rpc::Rpc; -pub use self::secretstore::SecretStore; -pub use self::signer::Signer; -pub use self::traces::Traces; -pub use self::web3::Web3; +pub use self::{ + debug::Debug, + eth::{Eth, EthFilter}, + eth_pubsub::EthPubSub, + eth_signing::EthSigning, + net::Net, + parity::Parity, + parity_accounts::{ParityAccounts, ParityAccountsInfo}, + parity_set::{ParitySet, ParitySetAccounts}, + parity_signing::ParitySigning, + personal::Personal, + private::Private, + pubsub::PubSub, + rpc::Rpc, + secretstore::SecretStore, + signer::Signer, + traces::Traces, + web3::Web3, +}; diff --git a/rpc/src/v1/traits/net.rs b/rpc/src/v1/traits/net.rs index 68cd23566..4c40e0106 100644 --- a/rpc/src/v1/traits/net.rs +++ b/rpc/src/v1/traits/net.rs @@ -21,16 +21,16 @@ use jsonrpc_derive::rpc; /// Net rpc interface. #[rpc(server)] pub trait Net { - /// Returns protocol version. - #[rpc(name = "net_version")] - fn version(&self) -> Result; + /// Returns protocol version. + #[rpc(name = "net_version")] + fn version(&self) -> Result; - /// Returns number of peers connected to node. - #[rpc(name = "net_peerCount")] - fn peer_count(&self) -> Result; + /// Returns number of peers connected to node. + #[rpc(name = "net_peerCount")] + fn peer_count(&self) -> Result; - /// Returns true if client is actively listening for network connections. - /// Otherwise false. - #[rpc(name = "net_listening")] - fn is_listening(&self) -> Result; + /// Returns true if client is actively listening for network connections. + /// Otherwise false. + #[rpc(name = "net_listening")] + fn is_listening(&self) -> Result; } diff --git a/rpc/src/v1/traits/parity.rs b/rpc/src/v1/traits/parity.rs index 72dbb73d4..e2e9d47a8 100644 --- a/rpc/src/v1/traits/parity.rs +++ b/rpc/src/v1/traits/parity.rs @@ -18,218 +18,233 @@ use std::collections::BTreeMap; -use ethereum_types::{H64, H160, H256, H512, U64, U256}; +use ethereum_types::{H160, H256, H512, H64, U256, U64}; use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_derive::rpc; use v1::types::{ - Bytes, CallRequest, - Peers, Transaction, RpcSettings, Histogram, RecoveredAccount, - TransactionStats, LocalTransactionStatus, - BlockNumber, ConsensusCapability, VersionInfo, - OperationsInfo, ChainStatus, Log, Filter, - RichHeader, Receipt, + BlockNumber, Bytes, CallRequest, ChainStatus, ConsensusCapability, Filter, Histogram, + LocalTransactionStatus, Log, OperationsInfo, Peers, Receipt, RecoveredAccount, RichHeader, + RpcSettings, Transaction, TransactionStats, VersionInfo, }; /// Parity-specific rpc interface. #[rpc(server)] pub trait Parity { - /// RPC Metadata - type Metadata; + /// RPC Metadata + type Metadata; - /// Returns current transactions limit. - #[rpc(name = "parity_transactionsLimit")] - fn transactions_limit(&self) -> Result; + /// Returns current transactions limit. + #[rpc(name = "parity_transactionsLimit")] + fn transactions_limit(&self) -> Result; - /// Returns mining extra data. - #[rpc(name = "parity_extraData")] - fn extra_data(&self) -> Result; + /// Returns mining extra data. + #[rpc(name = "parity_extraData")] + fn extra_data(&self) -> Result; - /// Returns mining gas floor target. - #[rpc(name = "parity_gasFloorTarget")] - fn gas_floor_target(&self) -> Result; + /// Returns mining gas floor target. + #[rpc(name = "parity_gasFloorTarget")] + fn gas_floor_target(&self) -> Result; - /// Returns mining gas floor cap. - #[rpc(name = "parity_gasCeilTarget")] - fn gas_ceil_target(&self) -> Result; + /// Returns mining gas floor cap. + #[rpc(name = "parity_gasCeilTarget")] + fn gas_ceil_target(&self) -> Result; - /// Returns minimal gas price for transaction to be included in queue. - #[rpc(name = "parity_minGasPrice")] - fn min_gas_price(&self) -> Result; + /// Returns minimal gas price for transaction to be included in queue. + #[rpc(name = "parity_minGasPrice")] + fn min_gas_price(&self) -> Result; - /// Returns latest logs - #[rpc(name = "parity_devLogs")] - fn dev_logs(&self) -> Result>; + /// Returns latest logs + #[rpc(name = "parity_devLogs")] + fn dev_logs(&self) -> Result>; - /// Returns logs levels - #[rpc(name = "parity_devLogsLevels")] - fn dev_logs_levels(&self) -> Result; + /// Returns logs levels + #[rpc(name = "parity_devLogsLevels")] + fn dev_logs_levels(&self) -> Result; - /// Returns chain name - DEPRECATED. Use `parity_chainName` instead. - #[rpc(name = "parity_netChain")] - fn net_chain(&self) -> Result; + /// Returns chain name - DEPRECATED. Use `parity_chainName` instead. + #[rpc(name = "parity_netChain")] + fn net_chain(&self) -> Result; - /// Returns peers details - #[rpc(name = "parity_netPeers")] - fn net_peers(&self) -> Result; + /// Returns peers details + #[rpc(name = "parity_netPeers")] + fn net_peers(&self) -> Result; - /// Returns network port - #[rpc(name = "parity_netPort")] - fn net_port(&self) -> Result; + /// Returns network port + #[rpc(name = "parity_netPort")] + fn net_port(&self) -> Result; - /// Returns rpc settings - #[rpc(name = "parity_rpcSettings")] - fn rpc_settings(&self) -> Result; + /// Returns rpc settings + #[rpc(name = "parity_rpcSettings")] + fn rpc_settings(&self) -> Result; - /// Returns node name - #[rpc(name = "parity_nodeName")] - fn node_name(&self) -> Result; + /// Returns node name + #[rpc(name = "parity_nodeName")] + fn node_name(&self) -> Result; - /// Returns default extra data - #[rpc(name = "parity_defaultExtraData")] - fn default_extra_data(&self) -> Result; + /// Returns default extra data + #[rpc(name = "parity_defaultExtraData")] + fn default_extra_data(&self) -> Result; - /// Returns distribution of gas price in latest blocks. - #[rpc(name = "parity_gasPriceHistogram")] - fn gas_price_histogram(&self) -> BoxFuture; + /// Returns distribution of gas price in latest blocks. + #[rpc(name = "parity_gasPriceHistogram")] + fn gas_price_histogram(&self) -> BoxFuture; - /// Returns number of unsigned transactions waiting in the signer queue (if signer enabled) - /// Returns error when signer is disabled - #[rpc(name = "parity_unsignedTransactionsCount")] - fn unsigned_transactions_count(&self) -> Result; + /// Returns number of unsigned transactions waiting in the signer queue (if signer enabled) + /// Returns error when signer is disabled + #[rpc(name = "parity_unsignedTransactionsCount")] + fn unsigned_transactions_count(&self) -> Result; - /// Returns a cryptographically random phrase sufficient for securely seeding a secret key. - #[rpc(name = "parity_generateSecretPhrase")] - fn generate_secret_phrase(&self) -> Result; + /// Returns a cryptographically random phrase sufficient for securely seeding a secret key. + #[rpc(name = "parity_generateSecretPhrase")] + fn generate_secret_phrase(&self) -> Result; - /// Returns whatever address would be derived from the given phrase if it were to seed a brainwallet. - #[rpc(name = "parity_phraseToAddress")] - fn phrase_to_address(&self, _: String) -> Result; + /// Returns whatever address would be derived from the given phrase if it were to seed a brainwallet. + #[rpc(name = "parity_phraseToAddress")] + fn phrase_to_address(&self, _: String) -> Result; - /// Returns the value of the registrar for this network. - #[rpc(name = "parity_registryAddress")] - fn registry_address(&self) -> Result>; + /// Returns the value of the registrar for this network. + #[rpc(name = "parity_registryAddress")] + fn registry_address(&self) -> Result>; - /// Returns all addresses if Fat DB is enabled (`--fat-db`), or null if not. - #[rpc(name = "parity_listAccounts")] - fn list_accounts(&self, _: u64, _: Option, _: Option) -> Result>>; + /// Returns all addresses if Fat DB is enabled (`--fat-db`), or null if not. + #[rpc(name = "parity_listAccounts")] + fn list_accounts( + &self, + _: u64, + _: Option, + _: Option, + ) -> Result>>; - /// Returns all storage keys of the given address (first parameter) if Fat DB is enabled (`--fat-db`), - /// or null if not. - #[rpc(name = "parity_listStorageKeys")] - fn list_storage_keys(&self, _: H160, _: u64, _: Option, _: Option) -> Result>>; + /// Returns all storage keys of the given address (first parameter) if Fat DB is enabled (`--fat-db`), + /// or null if not. + #[rpc(name = "parity_listStorageKeys")] + fn list_storage_keys( + &self, + _: H160, + _: u64, + _: Option, + _: Option, + ) -> Result>>; - /// Encrypt some data with a public key under ECIES. - /// First parameter is the 512-byte destination public key, second is the message. - #[rpc(name = "parity_encryptMessage")] - fn encrypt_message(&self, _: H512, _: Bytes) -> Result; + /// Encrypt some data with a public key under ECIES. + /// First parameter is the 512-byte destination public key, second is the message. + #[rpc(name = "parity_encryptMessage")] + fn encrypt_message(&self, _: H512, _: Bytes) -> Result; - /// Returns all pending transactions from transaction queue. - #[rpc(name = "parity_pendingTransactions")] - fn pending_transactions(&self, _: Option) -> Result>; + /// Returns all pending transactions from transaction queue. + #[rpc(name = "parity_pendingTransactions")] + fn pending_transactions(&self, _: Option) -> Result>; - /// Returns all transactions from transaction queue. - /// - /// Some of them might not be ready to be included in a block yet. - #[rpc(name = "parity_allTransactions")] - fn all_transactions(&self) -> Result>; + /// Returns all transactions from transaction queue. + /// + /// Some of them might not be ready to be included in a block yet. + #[rpc(name = "parity_allTransactions")] + fn all_transactions(&self) -> Result>; - /// Same as parity_allTransactions, but return only transactions hashes. - #[rpc(name = "parity_allTransactionHashes")] - fn all_transaction_hashes(&self) -> Result>; + /// Same as parity_allTransactions, but return only transactions hashes. + #[rpc(name = "parity_allTransactionHashes")] + fn all_transaction_hashes(&self) -> Result>; - /// Returns all future transactions from transaction queue (deprecated) - #[rpc(name = "parity_futureTransactions")] - fn future_transactions(&self) -> Result>; + /// Returns all future transactions from transaction queue (deprecated) + #[rpc(name = "parity_futureTransactions")] + fn future_transactions(&self) -> Result>; - /// Returns propagation statistics on transactions pending in the queue. - #[rpc(name = "parity_pendingTransactionsStats")] - fn pending_transactions_stats(&self) -> Result>; + /// Returns propagation statistics on transactions pending in the queue. + #[rpc(name = "parity_pendingTransactionsStats")] + fn pending_transactions_stats(&self) -> Result>; - /// Returns a list of current and past local transactions with status details. - #[rpc(name = "parity_localTransactions")] - fn local_transactions(&self) -> Result>; + /// Returns a list of current and past local transactions with status details. + #[rpc(name = "parity_localTransactions")] + fn local_transactions(&self) -> Result>; - /// Returns current WS Server interface and port or an error if ws server is disabled. - #[rpc(name = "parity_wsUrl")] - fn ws_url(&self) -> Result; + /// Returns current WS Server interface and port or an error if ws server is disabled. + #[rpc(name = "parity_wsUrl")] + fn ws_url(&self) -> Result; - /// Returns next nonce for particular sender. Should include all transactions in the queue. - #[rpc(name = "parity_nextNonce")] - fn next_nonce(&self, _: H160) -> BoxFuture; + /// Returns next nonce for particular sender. Should include all transactions in the queue. + #[rpc(name = "parity_nextNonce")] + fn next_nonce(&self, _: H160) -> BoxFuture; - /// Get the mode. Returns one of: "active", "passive", "dark", "offline". - #[rpc(name = "parity_mode")] - fn mode(&self) -> Result; + /// Get the mode. Returns one of: "active", "passive", "dark", "offline". + #[rpc(name = "parity_mode")] + fn mode(&self) -> Result; - /// Get the chain name. Returns one of the pre-configured chain names or a filename. - #[rpc(name = "parity_chain")] - fn chain(&self) -> Result; + /// Get the chain name. Returns one of the pre-configured chain names or a filename. + #[rpc(name = "parity_chain")] + fn chain(&self) -> Result; - /// Get the enode of this node. - #[rpc(name = "parity_enode")] - fn enode(&self) -> Result; + /// Get the enode of this node. + #[rpc(name = "parity_enode")] + fn enode(&self) -> Result; - /// Returns information on current consensus capability. - #[rpc(name = "parity_consensusCapability")] - fn consensus_capability(&self) -> Result; + /// Returns information on current consensus capability. + #[rpc(name = "parity_consensusCapability")] + fn consensus_capability(&self) -> Result; - /// Get our version information in a nice object. - #[rpc(name = "parity_versionInfo")] - fn version_info(&self) -> Result; + /// Get our version information in a nice object. + #[rpc(name = "parity_versionInfo")] + fn version_info(&self) -> Result; - /// Get information concerning the latest releases if available. - #[rpc(name = "parity_releasesInfo")] - fn releases_info(&self) -> Result>; + /// Get information concerning the latest releases if available. + #[rpc(name = "parity_releasesInfo")] + fn releases_info(&self) -> Result>; - /// Get the current chain status. - #[rpc(name = "parity_chainStatus")] - fn chain_status(&self) -> Result; + /// Get the current chain status. + #[rpc(name = "parity_chainStatus")] + fn chain_status(&self) -> Result; - /// Get node kind info. - #[rpc(name = "parity_nodeKind")] - fn node_kind(&self) -> Result<::v1::types::NodeKind>; + /// Get node kind info. + #[rpc(name = "parity_nodeKind")] + fn node_kind(&self) -> Result<::v1::types::NodeKind>; - /// Get block header. - /// Same as `eth_getBlockByNumber` but without uncles and transactions. - #[rpc(name = "parity_getBlockHeaderByNumber")] - fn block_header(&self, _: Option) -> BoxFuture; + /// Get block header. + /// Same as `eth_getBlockByNumber` but without uncles and transactions. + #[rpc(name = "parity_getBlockHeaderByNumber")] + fn block_header(&self, _: Option) -> BoxFuture; - /// Get block receipts. - /// Allows you to fetch receipts from the entire block at once. - /// If no parameter is provided defaults to `latest`. - #[rpc(name = "parity_getBlockReceipts")] - fn block_receipts(&self, _: Option) -> BoxFuture>; + /// Get block receipts. + /// Allows you to fetch receipts from the entire block at once. + /// If no parameter is provided defaults to `latest`. + #[rpc(name = "parity_getBlockReceipts")] + fn block_receipts(&self, _: Option) -> BoxFuture>; - /// Get IPFS CIDv0 given protobuf encoded bytes. - #[rpc(name = "parity_cidV0")] - fn ipfs_cid(&self, _: Bytes) -> Result; + /// Get IPFS CIDv0 given protobuf encoded bytes. + #[rpc(name = "parity_cidV0")] + fn ipfs_cid(&self, _: Bytes) -> Result; - /// Call contract, returning the output data. - #[rpc(name = "parity_call")] - fn call(&self, _: Vec, _: Option) -> Result>; + /// Call contract, returning the output data. + #[rpc(name = "parity_call")] + fn call(&self, _: Vec, _: Option) -> Result>; - /// Used for submitting a proof-of-work solution (similar to `eth_submitWork`, - /// but returns block hash on success, and returns an explicit error message on failure). - #[rpc(name = "parity_submitWorkDetail")] - fn submit_work_detail(&self, _: H64, _: H256, _: H256) -> Result; + /// Used for submitting a proof-of-work solution (similar to `eth_submitWork`, + /// but returns block hash on success, and returns an explicit error message on failure). + #[rpc(name = "parity_submitWorkDetail")] + fn submit_work_detail(&self, _: H64, _: H256, _: H256) -> Result; - /// Returns the status of the node. Used as the health endpoint. - /// - /// The RPC returns successful response if: - /// - The node have a peer (unless running a dev chain) - /// - The node is not syncing. - /// - /// Otherwise the RPC returns error. - #[rpc(name = "parity_nodeStatus")] - fn status(&self) -> Result<()>; + /// Returns the status of the node. Used as the health endpoint. + /// + /// The RPC returns successful response if: + /// - The node have a peer (unless running a dev chain) + /// - The node is not syncing. + /// + /// Otherwise the RPC returns error. + #[rpc(name = "parity_nodeStatus")] + fn status(&self) -> Result<()>; - /// Extracts Address and public key from signature using the r, s and v params. Equivalent to Solidity erecover - /// as well as checks the signature for chain replay protection - #[rpc(name = "parity_verifySignature")] - fn verify_signature(&self, _: bool, _: Bytes, _: H256, _: H256, _: U64) -> Result; + /// Extracts Address and public key from signature using the r, s and v params. Equivalent to Solidity erecover + /// as well as checks the signature for chain replay protection + #[rpc(name = "parity_verifySignature")] + fn verify_signature( + &self, + _: bool, + _: Bytes, + _: H256, + _: H256, + _: U64, + ) -> Result; - /// Returns logs matching given filter object. - /// Is allowed to skip filling transaction hash for faster query. - #[rpc(name = "parity_getLogsNoTransactionHash")] - fn logs_no_tx_hash(&self, _: Filter) -> BoxFuture>; + /// Returns logs matching given filter object. + /// Is allowed to skip filling transaction hash for faster query. + #[rpc(name = "parity_getLogsNoTransactionHash")] + fn logs_no_tx_hash(&self, _: Filter) -> BoxFuture>; } diff --git a/rpc/src/v1/traits/parity_accounts.rs b/rpc/src/v1/traits/parity_accounts.rs index 4293df6bd..3328cf9e4 100644 --- a/rpc/src/v1/traits/parity_accounts.rs +++ b/rpc/src/v1/traits/parity_accounts.rs @@ -17,148 +17,153 @@ //! Parity Accounts-related rpc interface. use std::collections::BTreeMap; -use jsonrpc_core::Result; -use jsonrpc_derive::rpc; use ethereum_types::{H160, H256, H520}; use ethkey::Password; use ethstore::KeyFile; -use v1::types::{DeriveHash, DeriveHierarchical, ExtAccountInfo}; -use v1::types::{AccountInfo, HwAccountInfo}; +use jsonrpc_core::Result; +use jsonrpc_derive::rpc; +use v1::types::{AccountInfo, DeriveHash, DeriveHierarchical, ExtAccountInfo, HwAccountInfo}; /// Parity-specific read-only accounts rpc interface. #[rpc(server)] pub trait ParityAccountsInfo { - /// Returns accounts information. - #[rpc(name = "parity_accountsInfo")] - fn accounts_info(&self) -> Result>; + /// Returns accounts information. + #[rpc(name = "parity_accountsInfo")] + fn accounts_info(&self) -> Result>; - /// Returns hardware accounts information. - #[rpc(name = "parity_hardwareAccountsInfo")] - fn hardware_accounts_info(&self) -> Result>; + /// Returns hardware accounts information. + #[rpc(name = "parity_hardwareAccountsInfo")] + fn hardware_accounts_info(&self) -> Result>; - /// Get a list of paths to locked hardware wallets - #[rpc(name = "parity_lockedHardwareAccountsInfo")] - fn locked_hardware_accounts_info(&self) -> Result>; + /// Get a list of paths to locked hardware wallets + #[rpc(name = "parity_lockedHardwareAccountsInfo")] + fn locked_hardware_accounts_info(&self) -> Result>; - /// Returns default account for dapp. - #[rpc(name = "parity_defaultAccount")] - fn default_account(&self) -> Result; + /// Returns default account for dapp. + #[rpc(name = "parity_defaultAccount")] + fn default_account(&self) -> Result; } /// Personal Parity rpc interface. #[rpc(server)] pub trait ParityAccounts { - /// Returns accounts information. - #[rpc(name = "parity_allAccountsInfo")] - fn all_accounts_info(&self) -> Result>; + /// Returns accounts information. + #[rpc(name = "parity_allAccountsInfo")] + fn all_accounts_info(&self) -> Result>; - /// Creates new account from the given phrase using standard brainwallet mechanism. - /// Second parameter is password for the new account. - #[rpc(name = "parity_newAccountFromPhrase")] - fn new_account_from_phrase(&self, _: String, _: Password) -> Result; + /// Creates new account from the given phrase using standard brainwallet mechanism. + /// Second parameter is password for the new account. + #[rpc(name = "parity_newAccountFromPhrase")] + fn new_account_from_phrase(&self, _: String, _: Password) -> Result; - /// Creates new account from the given JSON wallet. - /// Second parameter is password for the wallet and the new account. - #[rpc(name = "parity_newAccountFromWallet")] - fn new_account_from_wallet(&self, _: String, _: Password) -> Result; + /// Creates new account from the given JSON wallet. + /// Second parameter is password for the wallet and the new account. + #[rpc(name = "parity_newAccountFromWallet")] + fn new_account_from_wallet(&self, _: String, _: Password) -> Result; - /// Creates new account from the given raw secret. - /// Second parameter is password for the new account. - #[rpc(name = "parity_newAccountFromSecret")] - fn new_account_from_secret(&self, _: H256, _: Password) -> Result; + /// Creates new account from the given raw secret. + /// Second parameter is password for the new account. + #[rpc(name = "parity_newAccountFromSecret")] + fn new_account_from_secret(&self, _: H256, _: Password) -> Result; - /// Returns true if given `password` would unlock given `account`. - /// Arguments: `account`, `password`. - #[rpc(name = "parity_testPassword")] - fn test_password(&self, _: H160, _: Password) -> Result; + /// Returns true if given `password` would unlock given `account`. + /// Arguments: `account`, `password`. + #[rpc(name = "parity_testPassword")] + fn test_password(&self, _: H160, _: Password) -> Result; - /// Changes an account's password. - /// Arguments: `account`, `password`, `new_password`. - #[rpc(name = "parity_changePassword")] - fn change_password(&self, _: H160, _: Password, _: Password) -> Result; + /// Changes an account's password. + /// Arguments: `account`, `password`, `new_password`. + #[rpc(name = "parity_changePassword")] + fn change_password(&self, _: H160, _: Password, _: Password) -> Result; - /// Permanently deletes an account. - /// Arguments: `account`, `password`. - #[rpc(name = "parity_killAccount")] - fn kill_account(&self, _: H160, _: Password) -> Result; + /// Permanently deletes an account. + /// Arguments: `account`, `password`. + #[rpc(name = "parity_killAccount")] + fn kill_account(&self, _: H160, _: Password) -> Result; - /// Permanently deletes an address from the addressbook - /// Arguments: `address` - #[rpc(name = "parity_removeAddress")] - fn remove_address(&self, _: H160) -> Result; + /// Permanently deletes an address from the addressbook + /// Arguments: `address` + #[rpc(name = "parity_removeAddress")] + fn remove_address(&self, _: H160) -> Result; - /// Set an account's name. - #[rpc(name = "parity_setAccountName")] - fn set_account_name(&self, _: H160, _: String) -> Result; + /// Set an account's name. + #[rpc(name = "parity_setAccountName")] + fn set_account_name(&self, _: H160, _: String) -> Result; - /// Set an account's metadata string. - #[rpc(name = "parity_setAccountMeta")] - fn set_account_meta(&self, _: H160, _: String) -> Result; + /// Set an account's metadata string. + #[rpc(name = "parity_setAccountMeta")] + fn set_account_meta(&self, _: H160, _: String) -> Result; - /// Imports a number of Geth accounts, with the list provided as the argument. - #[rpc(name = "parity_importGethAccounts")] - fn import_geth_accounts(&self, _: Vec) -> Result>; + /// Imports a number of Geth accounts, with the list provided as the argument. + #[rpc(name = "parity_importGethAccounts")] + fn import_geth_accounts(&self, _: Vec) -> Result>; - /// Returns the accounts available for importing from Geth. - #[rpc(name = "parity_listGethAccounts")] - fn geth_accounts(&self) -> Result>; + /// Returns the accounts available for importing from Geth. + #[rpc(name = "parity_listGethAccounts")] + fn geth_accounts(&self) -> Result>; - /// Create new vault. - #[rpc(name = "parity_newVault")] - fn create_vault(&self, _: String, _: Password) -> Result; + /// Create new vault. + #[rpc(name = "parity_newVault")] + fn create_vault(&self, _: String, _: Password) -> Result; - /// Open existing vault. - #[rpc(name = "parity_openVault")] - fn open_vault(&self, _: String, _: Password) -> Result; + /// Open existing vault. + #[rpc(name = "parity_openVault")] + fn open_vault(&self, _: String, _: Password) -> Result; - /// Close previously opened vault. - #[rpc(name = "parity_closeVault")] - fn close_vault(&self, _: String) -> Result; + /// Close previously opened vault. + #[rpc(name = "parity_closeVault")] + fn close_vault(&self, _: String) -> Result; - /// List all vaults. - #[rpc(name = "parity_listVaults")] - fn list_vaults(&self) -> Result>; + /// List all vaults. + #[rpc(name = "parity_listVaults")] + fn list_vaults(&self) -> Result>; - /// List all currently opened vaults. - #[rpc(name = "parity_listOpenedVaults")] - fn list_opened_vaults(&self) -> Result>; + /// List all currently opened vaults. + #[rpc(name = "parity_listOpenedVaults")] + fn list_opened_vaults(&self) -> Result>; - /// Change vault password. - #[rpc(name = "parity_changeVaultPassword")] - fn change_vault_password(&self, _: String, _: Password) -> Result; + /// Change vault password. + #[rpc(name = "parity_changeVaultPassword")] + fn change_vault_password(&self, _: String, _: Password) -> Result; - /// Change vault of the given address. - #[rpc(name = "parity_changeVault")] - fn change_vault(&self, _: H160, _: String) -> Result; + /// Change vault of the given address. + #[rpc(name = "parity_changeVault")] + fn change_vault(&self, _: H160, _: String) -> Result; - /// Get vault metadata string. - #[rpc(name = "parity_getVaultMeta")] - fn get_vault_meta(&self, _: String) -> Result; + /// Get vault metadata string. + #[rpc(name = "parity_getVaultMeta")] + fn get_vault_meta(&self, _: String) -> Result; - /// Set vault metadata string. - #[rpc(name = "parity_setVaultMeta")] - fn set_vault_meta(&self, _: String, _: String) -> Result; + /// Set vault metadata string. + #[rpc(name = "parity_setVaultMeta")] + fn set_vault_meta(&self, _: String, _: String) -> Result; - /// Derive new address from given account address using specific hash. - /// Resulting address can be either saved as a new account (with the same password). - #[rpc(name = "parity_deriveAddressHash")] - fn derive_key_hash(&self, _: H160, _: Password, _: DeriveHash, _: bool) -> Result; + /// Derive new address from given account address using specific hash. + /// Resulting address can be either saved as a new account (with the same password). + #[rpc(name = "parity_deriveAddressHash")] + fn derive_key_hash(&self, _: H160, _: Password, _: DeriveHash, _: bool) -> Result; - /// Derive new address from given account address using - /// hierarchical derivation (sequence of 32-bit integer indices). - /// Resulting address can be either saved as a new account (with the same password). - #[rpc(name = "parity_deriveAddressIndex")] - fn derive_key_index(&self, _: H160, _: Password, _: DeriveHierarchical, _: bool) -> Result; + /// Derive new address from given account address using + /// hierarchical derivation (sequence of 32-bit integer indices). + /// Resulting address can be either saved as a new account (with the same password). + #[rpc(name = "parity_deriveAddressIndex")] + fn derive_key_index( + &self, + _: H160, + _: Password, + _: DeriveHierarchical, + _: bool, + ) -> Result; - /// Exports an account with given address if provided password matches. - #[rpc(name = "parity_exportAccount")] - fn export_account(&self, _: H160, _: Password) -> Result; + /// Exports an account with given address if provided password matches. + #[rpc(name = "parity_exportAccount")] + fn export_account(&self, _: H160, _: Password) -> Result; - /// Sign raw hash with the key corresponding to address and password. - #[rpc(name = "parity_signMessage")] - fn sign_message(&self, _: H160, _: Password, _: H256) -> Result; + /// Sign raw hash with the key corresponding to address and password. + #[rpc(name = "parity_signMessage")] + fn sign_message(&self, _: H160, _: Password, _: H256) -> Result; - /// Send a PinMatrixAck to a hardware wallet, unlocking it - #[rpc(name = "parity_hardwarePinMatrixAck")] - fn hardware_pin_matrix_ack(&self, _: String, _: String) -> Result; + /// Send a PinMatrixAck to a hardware wallet, unlocking it + #[rpc(name = "parity_hardwarePinMatrixAck")] + fn hardware_pin_matrix_ack(&self, _: String, _: String) -> Result; } diff --git a/rpc/src/v1/traits/parity_set.rs b/rpc/src/v1/traits/parity_set.rs index c9b25f03d..ce4dc19cf 100644 --- a/rpc/src/v1/traits/parity_set.rs +++ b/rpc/src/v1/traits/parity_set.rs @@ -25,100 +25,100 @@ use v1::types::{Bytes, ReleaseInfo, Transaction}; /// Parity-specific rpc interface for operations altering the account-related settings. #[rpc(server)] pub trait ParitySetAccounts { - /// Sets account for signing consensus messages. - #[rpc(name = "parity_setEngineSigner")] - fn set_engine_signer(&self, _: H160, _: String) -> Result; + /// Sets account for signing consensus messages. + #[rpc(name = "parity_setEngineSigner")] + fn set_engine_signer(&self, _: H160, _: String) -> Result; } /// Parity-specific rpc interface for operations altering the settings. #[rpc(server)] pub trait ParitySet { - /// Sets new minimal gas price for mined blocks. - #[rpc(name = "parity_setMinGasPrice")] - fn set_min_gas_price(&self, _: U256) -> Result; + /// Sets new minimal gas price for mined blocks. + #[rpc(name = "parity_setMinGasPrice")] + fn set_min_gas_price(&self, _: U256) -> Result; - /// Sets new gas floor target for mined blocks. - #[rpc(name = "parity_setGasFloorTarget")] - fn set_gas_floor_target(&self, _: U256) -> Result; + /// Sets new gas floor target for mined blocks. + #[rpc(name = "parity_setGasFloorTarget")] + fn set_gas_floor_target(&self, _: U256) -> Result; - /// Sets new gas ceiling target for mined blocks. - #[rpc(name = "parity_setGasCeilTarget")] - fn set_gas_ceil_target(&self, _: U256) -> Result; + /// Sets new gas ceiling target for mined blocks. + #[rpc(name = "parity_setGasCeilTarget")] + fn set_gas_ceil_target(&self, _: U256) -> Result; - /// Sets new extra data for mined blocks. - #[rpc(name = "parity_setExtraData")] - fn set_extra_data(&self, _: Bytes) -> Result; + /// Sets new extra data for mined blocks. + #[rpc(name = "parity_setExtraData")] + fn set_extra_data(&self, _: Bytes) -> Result; - /// Sets new author for mined block. - #[rpc(name = "parity_setAuthor")] - fn set_author(&self, _: H160) -> Result; + /// Sets new author for mined block. + #[rpc(name = "parity_setAuthor")] + fn set_author(&self, _: H160) -> Result; - /// Sets the secret of engine signer account. - #[rpc(name = "parity_setEngineSignerSecret")] - fn set_engine_signer_secret(&self, _: H256) -> Result; + /// Sets the secret of engine signer account. + #[rpc(name = "parity_setEngineSignerSecret")] + fn set_engine_signer_secret(&self, _: H256) -> Result; - /// Sets the limits for transaction queue. - #[rpc(name = "parity_setTransactionsLimit")] - fn set_transactions_limit(&self, _: usize) -> Result; + /// Sets the limits for transaction queue. + #[rpc(name = "parity_setTransactionsLimit")] + fn set_transactions_limit(&self, _: usize) -> Result; - /// Sets the maximum amount of gas a single transaction may consume. - #[rpc(name = "parity_setMaxTransactionGas")] - fn set_tx_gas_limit(&self, _: U256) -> Result; + /// Sets the maximum amount of gas a single transaction may consume. + #[rpc(name = "parity_setMaxTransactionGas")] + fn set_tx_gas_limit(&self, _: U256) -> Result; - /// Add a reserved peer. - #[rpc(name = "parity_addReservedPeer")] - fn add_reserved_peer(&self, _: String) -> Result; + /// Add a reserved peer. + #[rpc(name = "parity_addReservedPeer")] + fn add_reserved_peer(&self, _: String) -> Result; - /// Remove a reserved peer. - #[rpc(name = "parity_removeReservedPeer")] - fn remove_reserved_peer(&self, _: String) -> Result; + /// Remove a reserved peer. + #[rpc(name = "parity_removeReservedPeer")] + fn remove_reserved_peer(&self, _: String) -> Result; - /// Drop all non-reserved peers. - #[rpc(name = "parity_dropNonReservedPeers")] - fn drop_non_reserved_peers(&self) -> Result; + /// Drop all non-reserved peers. + #[rpc(name = "parity_dropNonReservedPeers")] + fn drop_non_reserved_peers(&self) -> Result; - /// Accept non-reserved peers (default behavior) - #[rpc(name = "parity_acceptNonReservedPeers")] - fn accept_non_reserved_peers(&self) -> Result; + /// Accept non-reserved peers (default behavior) + #[rpc(name = "parity_acceptNonReservedPeers")] + fn accept_non_reserved_peers(&self) -> Result; - /// Start the network. - /// - /// @deprecated - Use `set_mode("active")` instead. - #[rpc(name = "parity_startNetwork")] - fn start_network(&self) -> Result; + /// Start the network. + /// + /// @deprecated - Use `set_mode("active")` instead. + #[rpc(name = "parity_startNetwork")] + fn start_network(&self) -> Result; - /// Stop the network. - /// - /// @deprecated - Use `set_mode("offline")` instead. - #[rpc(name = "parity_stopNetwork")] - fn stop_network(&self) -> Result; + /// Stop the network. + /// + /// @deprecated - Use `set_mode("offline")` instead. + #[rpc(name = "parity_stopNetwork")] + fn stop_network(&self) -> Result; - /// Set the mode. Argument must be one of: "active", "passive", "dark", "offline". - #[rpc(name = "parity_setMode")] - fn set_mode(&self, _: String) -> Result; + /// Set the mode. Argument must be one of: "active", "passive", "dark", "offline". + #[rpc(name = "parity_setMode")] + fn set_mode(&self, _: String) -> Result; - /// Set the network spec. Argument must be one of pre-configured chains or a filename. - #[rpc(name = "parity_setChain")] - fn set_spec_name(&self, _: String) -> Result; + /// Set the network spec. Argument must be one of pre-configured chains or a filename. + #[rpc(name = "parity_setChain")] + fn set_spec_name(&self, _: String) -> Result; - /// Hash a file content under given URL. - #[rpc(name = "parity_hashContent")] - fn hash_content(&self, _: String) -> BoxFuture; + /// Hash a file content under given URL. + #[rpc(name = "parity_hashContent")] + fn hash_content(&self, _: String) -> BoxFuture; - /// Is there a release ready for install? - #[rpc(name = "parity_upgradeReady")] - fn upgrade_ready(&self) -> Result>; + /// Is there a release ready for install? + #[rpc(name = "parity_upgradeReady")] + fn upgrade_ready(&self) -> Result>; - /// Execute a release which is ready according to upgrade_ready(). - #[rpc(name = "parity_executeUpgrade")] - fn execute_upgrade(&self) -> Result; + /// Execute a release which is ready according to upgrade_ready(). + #[rpc(name = "parity_executeUpgrade")] + fn execute_upgrade(&self) -> Result; - /// Removes transaction from transaction queue. - /// Makes sense only for transactions that were not propagated to other peers yet - /// like scheduled transactions or transactions in future. - /// It might also work for some local transactions with to low gas price - /// or excessive gas limit that are not accepted by other peers whp. - /// Returns `true` when transaction was removed, `false` if it was not found. - #[rpc(name = "parity_removeTransaction")] - fn remove_transaction(&self, _: H256) -> Result>; + /// Removes transaction from transaction queue. + /// Makes sense only for transactions that were not propagated to other peers yet + /// like scheduled transactions or transactions in future. + /// It might also work for some local transactions with to low gas price + /// or excessive gas limit that are not accepted by other peers whp. + /// Returns `true` when transaction was removed, `false` if it was not found. + #[rpc(name = "parity_removeTransaction")] + fn remove_transaction(&self, _: H256) -> Result>; } diff --git a/rpc/src/v1/traits/parity_signing.rs b/rpc/src/v1/traits/parity_signing.rs index 941ff08ba..26d8ae76d 100644 --- a/rpc/src/v1/traits/parity_signing.rs +++ b/rpc/src/v1/traits/parity_signing.rs @@ -19,36 +19,49 @@ use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_derive::rpc; use ethereum_types::{H160, U256}; -use v1::types::{Bytes, ConfirmationResponse, TransactionRequest, Either}; +use v1::types::{Bytes, ConfirmationResponse, Either, TransactionRequest}; /// Signing methods implementation. #[rpc(server)] pub trait ParitySigning { - /// RPC Metadata - type Metadata; + /// RPC Metadata + type Metadata; - /// Given partial transaction request produces transaction with all fields filled in. - /// Such transaction can be then signed externally. - #[rpc(meta, name = "parity_composeTransaction")] - fn compose_transaction(&self, _: Self::Metadata, _: TransactionRequest) -> BoxFuture; + /// Given partial transaction request produces transaction with all fields filled in. + /// Such transaction can be then signed externally. + #[rpc(meta, name = "parity_composeTransaction")] + fn compose_transaction( + &self, + _: Self::Metadata, + _: TransactionRequest, + ) -> BoxFuture; - /// Posts sign request asynchronously. - /// Will return a confirmation ID for later use with check_transaction. - #[rpc(meta, name = "parity_postSign")] - fn post_sign(&self, _: Self::Metadata, _: H160, _: Bytes) -> BoxFuture>; + /// Posts sign request asynchronously. + /// Will return a confirmation ID for later use with check_transaction. + #[rpc(meta, name = "parity_postSign")] + fn post_sign( + &self, + _: Self::Metadata, + _: H160, + _: Bytes, + ) -> BoxFuture>; - /// Posts transaction asynchronously. - /// Will return a transaction ID for later use with check_transaction. - #[rpc(meta, name = "parity_postTransaction")] - fn post_transaction(&self, _: Self::Metadata, _: TransactionRequest) -> BoxFuture>; + /// Posts transaction asynchronously. + /// Will return a transaction ID for later use with check_transaction. + #[rpc(meta, name = "parity_postTransaction")] + fn post_transaction( + &self, + _: Self::Metadata, + _: TransactionRequest, + ) -> BoxFuture>; - /// Checks the progress of a previously posted request (transaction/sign). - /// Should be given a valid send_transaction ID. - #[rpc(name = "parity_checkRequest")] - fn check_request(&self, _: U256) -> Result>; + /// Checks the progress of a previously posted request (transaction/sign). + /// Should be given a valid send_transaction ID. + #[rpc(name = "parity_checkRequest")] + fn check_request(&self, _: U256) -> Result>; - /// Decrypt some ECIES-encrypted message. - /// First parameter is the address with which it is encrypted, second is the ciphertext. - #[rpc(meta, name = "parity_decryptMessage")] - fn decrypt_message(&self, _: Self::Metadata, _: H160, _: Bytes) -> BoxFuture; + /// Decrypt some ECIES-encrypted message. + /// First parameter is the address with which it is encrypted, second is the ciphertext. + #[rpc(meta, name = "parity_decryptMessage")] + fn decrypt_message(&self, _: Self::Metadata, _: H160, _: Bytes) -> BoxFuture; } diff --git a/rpc/src/v1/traits/personal.rs b/rpc/src/v1/traits/personal.rs index 3e06c02cb..a6e13865f 100644 --- a/rpc/src/v1/traits/personal.rs +++ b/rpc/src/v1/traits/personal.rs @@ -17,58 +17,74 @@ //! Personal rpc interface. use eip_712::EIP712; use ethereum_types::{H160, H256, H520, U128}; -use jsonrpc_core::types::Value; -use jsonrpc_core::{BoxFuture, Result}; +use jsonrpc_core::{types::Value, BoxFuture, Result}; use jsonrpc_derive::rpc; -use v1::types::{Bytes, TransactionRequest, RichRawTransaction as RpcRichRawTransaction, EIP191Version}; +use v1::types::{ + Bytes, EIP191Version, RichRawTransaction as RpcRichRawTransaction, TransactionRequest, +}; /// Personal rpc interface. Safe (read-only) functions. #[rpc(server)] pub trait Personal { - /// RPC Metadata - type Metadata; + /// RPC Metadata + type Metadata; - /// Lists all stored accounts - #[rpc(name = "personal_listAccounts")] - fn accounts(&self) -> Result>; + /// Lists all stored accounts + #[rpc(name = "personal_listAccounts")] + fn accounts(&self) -> Result>; - /// Creates new account (it becomes new current unlocked account) - /// Param is the password for the account. - #[rpc(name = "personal_newAccount")] - fn new_account(&self, _: String) -> Result; + /// Creates new account (it becomes new current unlocked account) + /// Param is the password for the account. + #[rpc(name = "personal_newAccount")] + fn new_account(&self, _: String) -> Result; - /// Unlocks specified account for use (can only be one unlocked account at one moment) - #[rpc(name = "personal_unlockAccount")] - fn unlock_account(&self, _: H160, _: String, _: Option) -> Result; + /// Unlocks specified account for use (can only be one unlocked account at one moment) + #[rpc(name = "personal_unlockAccount")] + fn unlock_account(&self, _: H160, _: String, _: Option) -> Result; - /// Signs the hash of data with given account signature using the given password to unlock the account during - /// the request. - #[rpc(name = "personal_sign")] - fn sign(&self, _: Bytes, _: H160, _: String) -> BoxFuture; + /// Signs the hash of data with given account signature using the given password to unlock the account during + /// the request. + #[rpc(name = "personal_sign")] + fn sign(&self, _: Bytes, _: H160, _: String) -> BoxFuture; - /// Produces an EIP-712 compliant signature with given account using the given password to unlock the - /// account during the request. - #[rpc(name = "personal_signTypedData")] - fn sign_typed_data(&self, _: EIP712, _: H160, _: String) -> BoxFuture; + /// Produces an EIP-712 compliant signature with given account using the given password to unlock the + /// account during the request. + #[rpc(name = "personal_signTypedData")] + fn sign_typed_data(&self, _: EIP712, _: H160, _: String) -> BoxFuture; - /// Signs an arbitrary message based on the version specified - #[rpc(name = "personal_sign191")] - fn sign_191(&self, _: EIP191Version, _: Value, _: H160, _: String) -> BoxFuture; + /// Signs an arbitrary message based on the version specified + #[rpc(name = "personal_sign191")] + fn sign_191(&self, _: EIP191Version, _: Value, _: H160, _: String) -> BoxFuture; - /// Returns the account associated with the private key that was used to calculate the signature in - /// `personal_sign`. - #[rpc(name = "personal_ecRecover")] - fn ec_recover(&self, _: Bytes, _: H520) -> BoxFuture; + /// Returns the account associated with the private key that was used to calculate the signature in + /// `personal_sign`. + #[rpc(name = "personal_ecRecover")] + fn ec_recover(&self, _: Bytes, _: H520) -> BoxFuture; - /// Signs transaction. The account is not unlocked in such case. - #[rpc(meta, name = "personal_signTransaction")] - fn sign_transaction(&self, _: Self::Metadata, _: TransactionRequest, _: String) -> BoxFuture; + /// Signs transaction. The account is not unlocked in such case. + #[rpc(meta, name = "personal_signTransaction")] + fn sign_transaction( + &self, + _: Self::Metadata, + _: TransactionRequest, + _: String, + ) -> BoxFuture; - /// Sends transaction and signs it in single call. The account is not unlocked in such case. - #[rpc(meta, name = "personal_sendTransaction")] - fn send_transaction(&self, _: Self::Metadata, _: TransactionRequest, _: String) -> BoxFuture; + /// Sends transaction and signs it in single call. The account is not unlocked in such case. + #[rpc(meta, name = "personal_sendTransaction")] + fn send_transaction( + &self, + _: Self::Metadata, + _: TransactionRequest, + _: String, + ) -> BoxFuture; - /// @deprecated alias for `personal_sendTransaction`. - #[rpc(meta, name = "personal_signAndSendTransaction")] - fn sign_and_send_transaction(&self, _: Self::Metadata, _: TransactionRequest, _: String) -> BoxFuture; + /// @deprecated alias for `personal_sendTransaction`. + #[rpc(meta, name = "personal_signAndSendTransaction")] + fn sign_and_send_transaction( + &self, + _: Self::Metadata, + _: TransactionRequest, + _: String, + ) -> BoxFuture; } diff --git a/rpc/src/v1/traits/private.rs b/rpc/src/v1/traits/private.rs index 0dedbf374..8772079a6 100644 --- a/rpc/src/v1/traits/private.rs +++ b/rpc/src/v1/traits/private.rs @@ -20,28 +20,36 @@ use ethereum_types::{H160, H256, U256}; use jsonrpc_core::Error; use jsonrpc_derive::rpc; -use v1::types::{Bytes, PrivateTransactionReceipt, BlockNumber, - PrivateTransactionReceiptAndTransaction, CallRequest}; +use v1::types::{ + BlockNumber, Bytes, CallRequest, PrivateTransactionReceipt, + PrivateTransactionReceiptAndTransaction, +}; /// Private transaction management RPC interface. #[rpc(server)] pub trait Private { - /// RPC Metadata - type Metadata; + /// RPC Metadata + type Metadata; - /// Sends private transaction; Transaction will be added to the validation queue and sent out when ready. - #[rpc(name = "private_sendTransaction")] - fn send_transaction(&self, _: Bytes) -> Result; + /// Sends private transaction; Transaction will be added to the validation queue and sent out when ready. + #[rpc(name = "private_sendTransaction")] + fn send_transaction(&self, _: Bytes) -> Result; - /// Creates a transaction for contract's deployment from origin (signed transaction) - #[rpc(name = "private_composeDeploymentTransaction")] - fn compose_deployment_transaction(&self, _: BlockNumber, _: Bytes, _: Vec, _: U256) -> Result; + /// Creates a transaction for contract's deployment from origin (signed transaction) + #[rpc(name = "private_composeDeploymentTransaction")] + fn compose_deployment_transaction( + &self, + _: BlockNumber, + _: Bytes, + _: Vec, + _: U256, + ) -> Result; - /// Make a call to the private contract - #[rpc(name = "private_call")] - fn private_call(&self, _: BlockNumber, _: CallRequest) -> Result; + /// Make a call to the private contract + #[rpc(name = "private_call")] + fn private_call(&self, _: BlockNumber, _: CallRequest) -> Result; - /// Retrieve the id of the key associated with the contract - #[rpc(name = "private_contractKey")] - fn private_contract_key(&self, _: H160) -> Result; + /// Retrieve the id of the key associated with the contract + #[rpc(name = "private_contractKey")] + fn private_contract_key(&self, _: H160) -> Result; } diff --git a/rpc/src/v1/traits/pubsub.rs b/rpc/src/v1/traits/pubsub.rs index 6cb97ec3a..d68447d58 100644 --- a/rpc/src/v1/traits/pubsub.rs +++ b/rpc/src/v1/traits/pubsub.rs @@ -16,21 +16,35 @@ //! Parity-specific PUB-SUB rpc interface. -use jsonrpc_core::{Result, Value, Params}; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; +use jsonrpc_core::{Params, Result, Value}; use jsonrpc_derive::rpc; +use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; /// Parity-specific PUB-SUB rpc interface. #[rpc(server)] pub trait PubSub { - /// Pub/Sub Metadata - type Metadata; + /// Pub/Sub Metadata + type Metadata; - /// Subscribe to changes of any RPC method in Parity. - #[pubsub(subscription = "parity_subscription", subscribe, name = "parity_subscribe")] - fn parity_subscribe(&self, _: Self::Metadata, _: Subscriber, _: String, _: Option); + /// Subscribe to changes of any RPC method in Parity. + #[pubsub( + subscription = "parity_subscription", + subscribe, + name = "parity_subscribe" + )] + fn parity_subscribe( + &self, + _: Self::Metadata, + _: Subscriber, + _: String, + _: Option, + ); - /// Unsubscribe from existing Parity subscription. - #[pubsub(subscription = "parity_subscription", unsubscribe, name = "parity_unsubscribe")] - fn parity_unsubscribe(&self, _: Option, _: SubscriptionId) -> Result; + /// Unsubscribe from existing Parity subscription. + #[pubsub( + subscription = "parity_subscription", + unsubscribe, + name = "parity_unsubscribe" + )] + fn parity_unsubscribe(&self, _: Option, _: SubscriptionId) -> Result; } diff --git a/rpc/src/v1/traits/rpc.rs b/rpc/src/v1/traits/rpc.rs index b1faa3b11..88b85e277 100644 --- a/rpc/src/v1/traits/rpc.rs +++ b/rpc/src/v1/traits/rpc.rs @@ -24,13 +24,13 @@ use jsonrpc_derive::rpc; /// RPC Interface. #[rpc(server)] pub trait Rpc { - /// Returns supported modules for Geth 1.3.6 - /// @ignore - #[rpc(name = "modules")] - fn modules(&self) -> Result>; + /// Returns supported modules for Geth 1.3.6 + /// @ignore + #[rpc(name = "modules")] + fn modules(&self) -> Result>; - /// Returns supported modules for Geth 1.4.0 - /// @ignore - #[rpc(name = "rpc_modules")] - fn rpc_modules(&self) -> Result>; + /// Returns supported modules for Geth 1.4.0 + /// @ignore + #[rpc(name = "rpc_modules")] + fn rpc_modules(&self) -> Result>; } diff --git a/rpc/src/v1/traits/secretstore.rs b/rpc/src/v1/traits/secretstore.rs index 2e355dd56..37fca5c36 100644 --- a/rpc/src/v1/traits/secretstore.rs +++ b/rpc/src/v1/traits/secretstore.rs @@ -18,44 +18,52 @@ use std::collections::BTreeSet; -use jsonrpc_core::Result; -use jsonrpc_derive::rpc; use ethereum_types::{H160, H256, H512}; use ethkey::Password; +use jsonrpc_core::Result; +use jsonrpc_derive::rpc; use v1::types::{Bytes, EncryptedDocumentKey}; /// Parity-specific rpc interface. #[rpc(server)] pub trait SecretStore { - /// Generate document key to store in secret store. - /// Arguments: `account`, `password`, `server_key_public`. - #[rpc(name = "secretstore_generateDocumentKey")] - fn generate_document_key(&self, _: H160, _: Password, _: H512) -> Result; + /// Generate document key to store in secret store. + /// Arguments: `account`, `password`, `server_key_public`. + #[rpc(name = "secretstore_generateDocumentKey")] + fn generate_document_key(&self, _: H160, _: Password, _: H512) -> Result; - /// Encrypt data with key, received from secret store. - /// Arguments: `account`, `password`, `key`, `data`. - #[rpc(name = "secretstore_encrypt")] - fn encrypt(&self, _: H160, _: Password, _: Bytes, _: Bytes) -> Result; + /// Encrypt data with key, received from secret store. + /// Arguments: `account`, `password`, `key`, `data`. + #[rpc(name = "secretstore_encrypt")] + fn encrypt(&self, _: H160, _: Password, _: Bytes, _: Bytes) -> Result; - /// Decrypt data with key, received from secret store. - /// Arguments: `account`, `password`, `key`, `data`. - #[rpc(name = "secretstore_decrypt")] - fn decrypt(&self, _: H160, _: Password, _: Bytes, _: Bytes) -> Result; + /// Decrypt data with key, received from secret store. + /// Arguments: `account`, `password`, `key`, `data`. + #[rpc(name = "secretstore_decrypt")] + fn decrypt(&self, _: H160, _: Password, _: Bytes, _: Bytes) -> Result; - /// Decrypt data with shadow key, received from secret store. - /// Arguments: `account`, `password`, `decrypted_secret`, `common_point`, `decrypt_shadows`, `data`. - #[rpc(name = "secretstore_shadowDecrypt")] - fn shadow_decrypt(&self, _: H160, _: Password, _: H512, _: H512, _: Vec, _: Bytes) -> Result; + /// Decrypt data with shadow key, received from secret store. + /// Arguments: `account`, `password`, `decrypted_secret`, `common_point`, `decrypt_shadows`, `data`. + #[rpc(name = "secretstore_shadowDecrypt")] + fn shadow_decrypt( + &self, + _: H160, + _: Password, + _: H512, + _: H512, + _: Vec, + _: Bytes, + ) -> Result; - /// Calculates the hash (keccak256) of servers set for using in ServersSetChange session. - /// Returned hash must be signed later by using `secretstore_signRawHash` method. - /// Arguments: `servers_set`. - #[rpc(name = "secretstore_serversSetHash")] - fn servers_set_hash(&self, _: BTreeSet) -> Result; + /// Calculates the hash (keccak256) of servers set for using in ServersSetChange session. + /// Returned hash must be signed later by using `secretstore_signRawHash` method. + /// Arguments: `servers_set`. + #[rpc(name = "secretstore_serversSetHash")] + fn servers_set_hash(&self, _: BTreeSet) -> Result; - /// Generate recoverable ECDSA signature of raw hash. - /// Passed hash is treated as an input to the `sign` function (no prefixes added, no hash function is applied). - /// Arguments: `account`, `password`, `raw_hash`. - #[rpc(name = "secretstore_signRawHash")] - fn sign_raw_hash(&self, _: H160, _: Password, _: H256) -> Result; + /// Generate recoverable ECDSA signature of raw hash. + /// Passed hash is treated as an input to the `sign` function (no prefixes added, no hash function is applied). + /// Arguments: `account`, `password`, `raw_hash`. + #[rpc(name = "secretstore_signRawHash")] + fn sign_raw_hash(&self, _: H160, _: Password, _: H256) -> Result; } diff --git a/rpc/src/v1/traits/signer.rs b/rpc/src/v1/traits/signer.rs index ba84d850a..3b04e4b44 100644 --- a/rpc/src/v1/traits/signer.rs +++ b/rpc/src/v1/traits/signer.rs @@ -18,46 +18,67 @@ use ethereum_types::U256; use jsonrpc_core::{BoxFuture, Result}; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; use jsonrpc_derive::rpc; +use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; -use v1::types::{Bytes, TransactionModification, ConfirmationRequest, ConfirmationResponse, ConfirmationResponseWithToken}; +use v1::types::{ + Bytes, ConfirmationRequest, ConfirmationResponse, ConfirmationResponseWithToken, + TransactionModification, +}; /// Signer extension for confirmations rpc interface. #[rpc(server)] pub trait Signer { - /// RPC Metadata - type Metadata; + /// RPC Metadata + type Metadata; - /// Returns a list of items to confirm. - #[rpc(name = "signer_requestsToConfirm")] - fn requests_to_confirm(&self) -> Result>; + /// Returns a list of items to confirm. + #[rpc(name = "signer_requestsToConfirm")] + fn requests_to_confirm(&self) -> Result>; - /// Confirm specific request. - #[rpc(name = "signer_confirmRequest")] - fn confirm_request(&self, _: U256, _: TransactionModification, _: String) -> BoxFuture; + /// Confirm specific request. + #[rpc(name = "signer_confirmRequest")] + fn confirm_request( + &self, + _: U256, + _: TransactionModification, + _: String, + ) -> BoxFuture; - /// Confirm specific request with token. - #[rpc(name = "signer_confirmRequestWithToken")] - fn confirm_request_with_token(&self, _: U256, _: TransactionModification, _: String) -> BoxFuture; + /// Confirm specific request with token. + #[rpc(name = "signer_confirmRequestWithToken")] + fn confirm_request_with_token( + &self, + _: U256, + _: TransactionModification, + _: String, + ) -> BoxFuture; - /// Confirm specific request with already signed data. - #[rpc(name = "signer_confirmRequestRaw")] - fn confirm_request_raw(&self, _: U256, _: Bytes) -> Result; + /// Confirm specific request with already signed data. + #[rpc(name = "signer_confirmRequestRaw")] + fn confirm_request_raw(&self, _: U256, _: Bytes) -> Result; - /// Reject the confirmation request. - #[rpc(name = "signer_rejectRequest")] - fn reject_request(&self, _: U256) -> Result; + /// Reject the confirmation request. + #[rpc(name = "signer_rejectRequest")] + fn reject_request(&self, _: U256) -> Result; - /// Generates new authorization token. - #[rpc(name = "signer_generateAuthorizationToken")] - fn generate_token(&self) -> Result; + /// Generates new authorization token. + #[rpc(name = "signer_generateAuthorizationToken")] + fn generate_token(&self) -> Result; - /// Subscribe to new pending requests on signer interface. - #[pubsub(subscription = "signer_pending", subscribe, name = "signer_subscribePending")] - fn subscribe_pending(&self, _: Self::Metadata, _: Subscriber>); + /// Subscribe to new pending requests on signer interface. + #[pubsub( + subscription = "signer_pending", + subscribe, + name = "signer_subscribePending" + )] + fn subscribe_pending(&self, _: Self::Metadata, _: Subscriber>); - /// Unsubscribe from pending requests subscription. - #[pubsub(subscription = "signer_pending", unsubscribe, name = "signer_unsubscribePending")] - fn unsubscribe_pending(&self, _: Option, _: SubscriptionId) -> Result; + /// Unsubscribe from pending requests subscription. + #[pubsub( + subscription = "signer_pending", + unsubscribe, + name = "signer_unsubscribePending" + )] + fn unsubscribe_pending(&self, _: Option, _: SubscriptionId) -> Result; } diff --git a/rpc/src/v1/traits/traces.rs b/rpc/src/v1/traits/traces.rs index b33f94b21..78cf01f41 100644 --- a/rpc/src/v1/traits/traces.rs +++ b/rpc/src/v1/traits/traces.rs @@ -19,48 +19,64 @@ use ethereum_types::H256; use jsonrpc_core::Result; use jsonrpc_derive::rpc; -use v1::types::{TraceFilter, LocalizedTrace, BlockNumber, Index, CallRequest, Bytes, TraceResults, - TraceResultsWithTransactionHash, TraceOptions}; +use v1::types::{ + BlockNumber, Bytes, CallRequest, Index, LocalizedTrace, TraceFilter, TraceOptions, + TraceResults, TraceResultsWithTransactionHash, +}; /// Traces specific rpc interface. #[rpc(server)] pub trait Traces { - /// RPC Metadata - type Metadata; + /// RPC Metadata + type Metadata; - /// Returns traces matching given filter. - #[rpc(name = "trace_filter")] - fn filter(&self, _: TraceFilter) -> Result>>; + /// Returns traces matching given filter. + #[rpc(name = "trace_filter")] + fn filter(&self, _: TraceFilter) -> Result>>; - /// Returns transaction trace at given index. - #[rpc(name = "trace_get")] - fn trace(&self, _: H256, _: Vec) -> Result>; + /// Returns transaction trace at given index. + #[rpc(name = "trace_get")] + fn trace(&self, _: H256, _: Vec) -> Result>; - /// Returns all traces of given transaction. - #[rpc(name = "trace_transaction")] - fn transaction_traces(&self, _: H256) -> Result>>; + /// Returns all traces of given transaction. + #[rpc(name = "trace_transaction")] + fn transaction_traces(&self, _: H256) -> Result>>; - /// Returns all traces produced at given block. - #[rpc(name = "trace_block")] - fn block_traces(&self, _: BlockNumber) -> Result>>; + /// Returns all traces produced at given block. + #[rpc(name = "trace_block")] + fn block_traces(&self, _: BlockNumber) -> Result>>; - /// Executes the given call and returns a number of possible traces for it. - #[rpc(name = "trace_call")] - fn call(&self, _: CallRequest, _: TraceOptions, _: Option) -> Result; + /// Executes the given call and returns a number of possible traces for it. + #[rpc(name = "trace_call")] + fn call(&self, _: CallRequest, _: TraceOptions, _: Option) + -> Result; - /// Executes all given calls and returns a number of possible traces for each of it. - #[rpc(name = "trace_callMany")] - fn call_many(&self, _: Vec<(CallRequest, TraceOptions)>, _: Option) -> Result>; + /// Executes all given calls and returns a number of possible traces for each of it. + #[rpc(name = "trace_callMany")] + fn call_many( + &self, + _: Vec<(CallRequest, TraceOptions)>, + _: Option, + ) -> Result>; - /// Executes the given raw transaction and returns a number of possible traces for it. - #[rpc(name = "trace_rawTransaction")] - fn raw_transaction(&self, _: Bytes, _: TraceOptions, _: Option) -> Result; + /// Executes the given raw transaction and returns a number of possible traces for it. + #[rpc(name = "trace_rawTransaction")] + fn raw_transaction( + &self, + _: Bytes, + _: TraceOptions, + _: Option, + ) -> Result; - /// Executes the transaction with the given hash and returns a number of possible traces for it. - #[rpc(name = "trace_replayTransaction")] - fn replay_transaction(&self, _: H256, _: TraceOptions) -> Result; + /// Executes the transaction with the given hash and returns a number of possible traces for it. + #[rpc(name = "trace_replayTransaction")] + fn replay_transaction(&self, _: H256, _: TraceOptions) -> Result; - /// Executes all the transactions at the given block and returns a number of possible traces for each transaction. - #[rpc(name = "trace_replayBlockTransactions")] - fn replay_block_transactions(&self, _: BlockNumber, _: TraceOptions) -> Result>; + /// Executes all the transactions at the given block and returns a number of possible traces for each transaction. + #[rpc(name = "trace_replayBlockTransactions")] + fn replay_block_transactions( + &self, + _: BlockNumber, + _: TraceOptions, + ) -> Result>; } diff --git a/rpc/src/v1/traits/web3.rs b/rpc/src/v1/traits/web3.rs index cdeab6d7c..758f55fd8 100644 --- a/rpc/src/v1/traits/web3.rs +++ b/rpc/src/v1/traits/web3.rs @@ -24,11 +24,11 @@ use v1::types::Bytes; /// Web3 rpc interface. #[rpc(server)] pub trait Web3 { - /// Returns current client version. - #[rpc(name = "web3_clientVersion")] - fn client_version(&self) -> Result; + /// Returns current client version. + #[rpc(name = "web3_clientVersion")] + fn client_version(&self) -> Result; - /// Returns sha3 of the given data - #[rpc(name = "web3_sha3")] - fn sha3(&self, _: Bytes) -> Result; + /// Returns sha3 of the given data + #[rpc(name = "web3_sha3")] + fn sha3(&self, _: Bytes) -> Result; } diff --git a/rpc/src/v1/types/account_info.rs b/rpc/src/v1/types/account_info.rs index 6d7585f87..df8640aea 100644 --- a/rpc/src/v1/types/account_info.rs +++ b/rpc/src/v1/types/account_info.rs @@ -16,71 +16,71 @@ //! Return types for RPC calls -use ethereum_types::{Public, Address, H160, H256, U256}; +use ethereum_types::{Address, Public, H160, H256, U256}; use v1::types::Bytes; /// Account information. #[derive(Debug, Default, Clone, PartialEq, Serialize)] pub struct AccountInfo { - /// Account name - pub name: String, + /// Account name + pub name: String, } /// Datastructure with proof for one single storage-entry #[derive(Debug, Default, Clone, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub struct StorageProof { - pub key: U256, - pub value: U256, - pub proof: Vec + pub key: U256, + pub value: U256, + pub proof: Vec, } /// Account information. #[derive(Debug, Default, Clone, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub struct EthAccount { - pub address: H160, - pub balance: U256, - pub nonce: U256, - pub code_hash: H256, - pub storage_hash: H256, - pub account_proof: Vec, - pub storage_proof: Vec, + pub address: H160, + pub balance: U256, + pub nonce: U256, + pub code_hash: H256, + pub storage_hash: H256, + pub account_proof: Vec, + pub storage_proof: Vec, } /// Extended account information (used by `parity_allAccountInfo`). #[derive(Debug, Default, Clone, PartialEq, Serialize)] pub struct ExtAccountInfo { - /// Account name - pub name: String, - /// Account meta JSON - pub meta: String, - /// Account UUID (`None` for address book entries) - #[serde(skip_serializing_if = "Option::is_none")] - pub uuid: Option, + /// Account name + pub name: String, + /// Account meta JSON + pub meta: String, + /// Account UUID (`None` for address book entries) + #[serde(skip_serializing_if = "Option::is_none")] + pub uuid: Option, } /// Hardware wallet information. #[derive(Debug, Default, Clone, PartialEq, Serialize)] pub struct HwAccountInfo { - /// Device name. - pub name: String, - /// Device manufacturer. - pub manufacturer: String, + /// Device name. + pub name: String, + /// Device manufacturer. + pub manufacturer: String, } /// account derived from a signature /// as well as information that tells if it is valid for /// the current chain #[derive(Debug, Clone, Serialize)] -#[serde(rename_all="camelCase")] +#[serde(rename_all = "camelCase")] pub struct RecoveredAccount { - /// address of the recovered account - pub address: Address, - /// public key of the recovered account - pub public_key: Public, - /// If the signature contains chain replay protection, - /// And the chain_id encoded within the signature - /// matches the current chain this would be true, otherwise false. - pub is_valid_for_current_chain: bool + /// address of the recovered account + pub address: Address, + /// public key of the recovered account + pub public_key: Public, + /// If the signature contains chain replay protection, + /// And the chain_id encoded within the signature + /// matches the current chain this would be true, otherwise false. + pub is_valid_for_current_chain: bool, } diff --git a/rpc/src/v1/types/block.rs b/rpc/src/v1/types/block.rs index 61f4402af..c0471533d 100644 --- a/rpc/src/v1/types/block.rs +++ b/rpc/src/v1/types/block.rs @@ -14,131 +14,131 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::ops::Deref; -use std::collections::BTreeMap; +use std::{collections::BTreeMap, ops::Deref}; -use ethereum_types::{H160, H256, U256, Bloom as H2048}; -use serde::ser::Error; -use serde::{Serialize, Serializer}; +use ethereum_types::{Bloom as H2048, H160, H256, U256}; +use serde::{ser::Error, Serialize, Serializer}; use types::encoded::Header as EthHeader; use v1::types::{Bytes, Transaction}; /// Block Transactions #[derive(Debug)] pub enum BlockTransactions { - /// Only hashes - Hashes(Vec), - /// Full transactions - Full(Vec) + /// Only hashes + Hashes(Vec), + /// Full transactions + Full(Vec), } impl Serialize for BlockTransactions { - fn serialize(&self, serializer: S) -> Result - where S: Serializer { - match *self { - BlockTransactions::Hashes(ref hashes) => hashes.serialize(serializer), - BlockTransactions::Full(ref ts) => ts.serialize(serializer) - } - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match *self { + BlockTransactions::Hashes(ref hashes) => hashes.serialize(serializer), + BlockTransactions::Full(ref ts) => ts.serialize(serializer), + } + } } /// Block representation #[derive(Debug, Serialize)] #[serde(rename_all = "camelCase")] pub struct Block { - /// Hash of the block - pub hash: Option, - /// Hash of the parent - pub parent_hash: H256, - /// Hash of the uncles - #[serde(rename = "sha3Uncles")] - pub uncles_hash: H256, - /// Authors address - pub author: H160, - /// Alias of `author` - pub miner: H160, - /// State root hash - pub state_root: H256, - /// Transactions root hash - pub transactions_root: H256, - /// Transactions receipts root hash - pub receipts_root: H256, - /// Block number - pub number: Option, - /// Gas Used - pub gas_used: U256, - /// Gas Limit - pub gas_limit: U256, - /// Extra data - pub extra_data: Bytes, - /// Logs bloom - pub logs_bloom: Option, - /// Timestamp - pub timestamp: U256, - /// Difficulty - pub difficulty: U256, - /// Total difficulty - pub total_difficulty: Option, - /// Seal fields - pub seal_fields: Vec, - /// Uncles' hashes - pub uncles: Vec, - /// Transactions - pub transactions: BlockTransactions, - /// Size in bytes - pub size: Option, + /// Hash of the block + pub hash: Option, + /// Hash of the parent + pub parent_hash: H256, + /// Hash of the uncles + #[serde(rename = "sha3Uncles")] + pub uncles_hash: H256, + /// Authors address + pub author: H160, + /// Alias of `author` + pub miner: H160, + /// State root hash + pub state_root: H256, + /// Transactions root hash + pub transactions_root: H256, + /// Transactions receipts root hash + pub receipts_root: H256, + /// Block number + pub number: Option, + /// Gas Used + pub gas_used: U256, + /// Gas Limit + pub gas_limit: U256, + /// Extra data + pub extra_data: Bytes, + /// Logs bloom + pub logs_bloom: Option, + /// Timestamp + pub timestamp: U256, + /// Difficulty + pub difficulty: U256, + /// Total difficulty + pub total_difficulty: Option, + /// Seal fields + pub seal_fields: Vec, + /// Uncles' hashes + pub uncles: Vec, + /// Transactions + pub transactions: BlockTransactions, + /// Size in bytes + pub size: Option, } /// Block header representation. #[derive(Debug, Clone, Serialize, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct Header { - /// Hash of the block - pub hash: Option, - /// Hash of the parent - pub parent_hash: H256, - /// Hash of the uncles - #[serde(rename = "sha3Uncles")] - pub uncles_hash: H256, - /// Authors address - pub author: H160, - /// Alias of `author` - pub miner: H160, - /// State root hash - pub state_root: H256, - /// Transactions root hash - pub transactions_root: H256, - /// Transactions receipts root hash - pub receipts_root: H256, - /// Block number - pub number: Option, - /// Gas Used - pub gas_used: U256, - /// Gas Limit - pub gas_limit: U256, - /// Extra data - pub extra_data: Bytes, - /// Logs bloom - pub logs_bloom: H2048, - /// Timestamp - pub timestamp: U256, - /// Difficulty - pub difficulty: U256, - /// Seal fields - pub seal_fields: Vec, - /// Size in bytes - pub size: Option, + /// Hash of the block + pub hash: Option, + /// Hash of the parent + pub parent_hash: H256, + /// Hash of the uncles + #[serde(rename = "sha3Uncles")] + pub uncles_hash: H256, + /// Authors address + pub author: H160, + /// Alias of `author` + pub miner: H160, + /// State root hash + pub state_root: H256, + /// Transactions root hash + pub transactions_root: H256, + /// Transactions receipts root hash + pub receipts_root: H256, + /// Block number + pub number: Option, + /// Gas Used + pub gas_used: U256, + /// Gas Limit + pub gas_limit: U256, + /// Extra data + pub extra_data: Bytes, + /// Logs bloom + pub logs_bloom: H2048, + /// Timestamp + pub timestamp: U256, + /// Difficulty + pub difficulty: U256, + /// Seal fields + pub seal_fields: Vec, + /// Size in bytes + pub size: Option, } impl From for Header { - fn from(h: EthHeader) -> Self { - (&h).into() - } + fn from(h: EthHeader) -> Self { + (&h).into() + } } impl<'a> From<&'a EthHeader> for Header { - fn from(h: &'a EthHeader) -> Self { - Header { + fn from(h: &'a EthHeader) -> Self { + Header { hash: Some(h.hash()), size: Some(h.rlp().as_raw().len().into()), parent_hash: h.parent_hash(), @@ -159,7 +159,7 @@ impl<'a> From<&'a EthHeader> for Header { .expect("Client/Miner returns only valid headers. We only serialize headers from Client/Miner; qed") .into_iter().map(Into::into).collect(), } - } + } } /// Block representation with additional info. @@ -171,164 +171,193 @@ pub type RichHeader = Rich
; /// Value representation with additional info #[derive(Debug, Clone, PartialEq, Eq)] pub struct Rich { - /// Standard value. - pub inner: T, - /// Engine-specific fields with additional description. - /// Should be included directly to serialized block object. - // TODO [ToDr] #[serde(skip_serializing)] - pub extra_info: BTreeMap, + /// Standard value. + pub inner: T, + /// Engine-specific fields with additional description. + /// Should be included directly to serialized block object. + // TODO [ToDr] #[serde(skip_serializing)] + pub extra_info: BTreeMap, } impl Deref for Rich { - type Target = T; - fn deref(&self) -> &Self::Target { - &self.inner - } + type Target = T; + fn deref(&self) -> &Self::Target { + &self.inner + } } impl Serialize for Rich { - fn serialize(&self, serializer: S) -> Result where S: Serializer { - use serde_json::{to_value, Value}; + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + use serde_json::{to_value, Value}; - let serialized = (to_value(&self.inner), to_value(&self.extra_info)); - if let (Ok(Value::Object(mut value)), Ok(Value::Object(extras))) = serialized { - // join two objects - value.extend(extras); - // and serialize - value.serialize(serializer) - } else { - Err(S::Error::custom("Unserializable structures: expected objects")) - } - } + let serialized = (to_value(&self.inner), to_value(&self.extra_info)); + if let (Ok(Value::Object(mut value)), Ok(Value::Object(extras))) = serialized { + // join two objects + value.extend(extras); + // and serialize + value.serialize(serializer) + } else { + Err(S::Error::custom( + "Unserializable structures: expected objects", + )) + } + } } #[cfg(test)] mod tests { - use std::collections::BTreeMap; - use ethereum_types::{H64, H160, H256, U256, Bloom as H2048}; - use serde_json; - use v1::types::{Transaction, Bytes}; - use super::{Block, RichBlock, BlockTransactions, Header, RichHeader}; + use super::{Block, BlockTransactions, Header, RichBlock, RichHeader}; + use ethereum_types::{Bloom as H2048, H160, H256, H64, U256}; + use serde_json; + use std::collections::BTreeMap; + use v1::types::{Bytes, Transaction}; - #[test] - fn test_serialize_block_transactions() { - let t = BlockTransactions::Full(vec![Transaction::default()]); - let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"[{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x0","gasPrice":"0x0","gas":"0x0","input":"0x","creates":null,"raw":"0x","publicKey":null,"chainId":null,"standardV":"0x0","v":"0x0","r":"0x0","s":"0x0","condition":null}]"#); + #[test] + fn test_serialize_block_transactions() { + let t = BlockTransactions::Full(vec![Transaction::default()]); + let serialized = serde_json::to_string(&t).unwrap(); + assert_eq!( + serialized, + r#"[{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x0","gasPrice":"0x0","gas":"0x0","input":"0x","creates":null,"raw":"0x","publicKey":null,"chainId":null,"standardV":"0x0","v":"0x0","r":"0x0","s":"0x0","condition":null}]"# + ); - let t = BlockTransactions::Hashes(vec![H256::default().into()]); - let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"["0x0000000000000000000000000000000000000000000000000000000000000000"]"#); - } + let t = BlockTransactions::Hashes(vec![H256::default().into()]); + let serialized = serde_json::to_string(&t).unwrap(); + assert_eq!( + serialized, + r#"["0x0000000000000000000000000000000000000000000000000000000000000000"]"# + ); + } - #[test] - fn test_serialize_block() { - let block = Block { - hash: Some(H256::default()), - parent_hash: H256::default(), - uncles_hash: H256::default(), - author: H160::default(), - miner: H160::default(), - state_root: H256::default(), - transactions_root: H256::default(), - receipts_root: H256::default(), - number: Some(U256::default()), - gas_used: U256::default(), - gas_limit: U256::default(), - extra_data: Bytes::default(), - logs_bloom: Some(H2048::default()), - timestamp: U256::default(), - difficulty: U256::default(), - total_difficulty: Some(U256::default()), - seal_fields: vec![Bytes::default(), Bytes::default()], - uncles: vec![], - transactions: BlockTransactions::Hashes(vec![].into()), - size: Some(69.into()), - }; - let serialized_block = serde_json::to_string(&block).unwrap(); - let rich_block = RichBlock { - inner: block, - extra_info: map![ - "mixHash".into() => format!("{:?}", H256::default()), - "nonce".into() => format!("{:?}", H64::default()) - ], - }; - let serialized_rich_block = serde_json::to_string(&rich_block).unwrap(); + #[test] + fn test_serialize_block() { + let block = Block { + hash: Some(H256::default()), + parent_hash: H256::default(), + uncles_hash: H256::default(), + author: H160::default(), + miner: H160::default(), + state_root: H256::default(), + transactions_root: H256::default(), + receipts_root: H256::default(), + number: Some(U256::default()), + gas_used: U256::default(), + gas_limit: U256::default(), + extra_data: Bytes::default(), + logs_bloom: Some(H2048::default()), + timestamp: U256::default(), + difficulty: U256::default(), + total_difficulty: Some(U256::default()), + seal_fields: vec![Bytes::default(), Bytes::default()], + uncles: vec![], + transactions: BlockTransactions::Hashes(vec![].into()), + size: Some(69.into()), + }; + let serialized_block = serde_json::to_string(&block).unwrap(); + let rich_block = RichBlock { + inner: block, + extra_info: map![ + "mixHash".into() => format!("{:?}", H256::default()), + "nonce".into() => format!("{:?}", H64::default()) + ], + }; + let serialized_rich_block = serde_json::to_string(&rich_block).unwrap(); - assert_eq!(serialized_block, r#"{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","sha3Uncles":"0x0000000000000000000000000000000000000000000000000000000000000000","author":"0x0000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","number":"0x0","gasUsed":"0x0","gasLimit":"0x0","extraData":"0x","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","difficulty":"0x0","totalDifficulty":"0x0","sealFields":["0x","0x"],"uncles":[],"transactions":[],"size":"0x45"}"#); - assert_eq!(serialized_rich_block, r#"{"author":"0x0000000000000000000000000000000000000000","difficulty":"0x0","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0x0000000000000000000000000000000000000000000000000000000000000000","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","sealFields":["0x","0x"],"sha3Uncles":"0x0000000000000000000000000000000000000000000000000000000000000000","size":"0x45","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","totalDifficulty":"0x0","transactions":[],"transactionsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","uncles":[]}"#); - } + assert_eq!( + serialized_block, + r#"{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","sha3Uncles":"0x0000000000000000000000000000000000000000000000000000000000000000","author":"0x0000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","number":"0x0","gasUsed":"0x0","gasLimit":"0x0","extraData":"0x","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","difficulty":"0x0","totalDifficulty":"0x0","sealFields":["0x","0x"],"uncles":[],"transactions":[],"size":"0x45"}"# + ); + assert_eq!( + serialized_rich_block, + r#"{"author":"0x0000000000000000000000000000000000000000","difficulty":"0x0","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0x0000000000000000000000000000000000000000000000000000000000000000","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","sealFields":["0x","0x"],"sha3Uncles":"0x0000000000000000000000000000000000000000000000000000000000000000","size":"0x45","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","totalDifficulty":"0x0","transactions":[],"transactionsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","uncles":[]}"# + ); + } - #[test] - fn none_size_null() { - let block = Block { - hash: Some(H256::default()), - parent_hash: H256::default(), - uncles_hash: H256::default(), - author: H160::default(), - miner: H160::default(), - state_root: H256::default(), - transactions_root: H256::default(), - receipts_root: H256::default(), - number: Some(U256::default()), - gas_used: U256::default(), - gas_limit: U256::default(), - extra_data: Bytes::default(), - logs_bloom: Some(H2048::default()), - timestamp: U256::default(), - difficulty: U256::default(), - total_difficulty: Some(U256::default()), - seal_fields: vec![Bytes::default(), Bytes::default()], - uncles: vec![], - transactions: BlockTransactions::Hashes(vec![].into()), - size: None, - }; - let serialized_block = serde_json::to_string(&block).unwrap(); - let rich_block = RichBlock { - inner: block, - extra_info: map![ - "mixHash".into() => format!("{:?}", H256::default()), - "nonce".into() => format!("{:?}", H64::default()) - ], - }; - let serialized_rich_block = serde_json::to_string(&rich_block).unwrap(); + #[test] + fn none_size_null() { + let block = Block { + hash: Some(H256::default()), + parent_hash: H256::default(), + uncles_hash: H256::default(), + author: H160::default(), + miner: H160::default(), + state_root: H256::default(), + transactions_root: H256::default(), + receipts_root: H256::default(), + number: Some(U256::default()), + gas_used: U256::default(), + gas_limit: U256::default(), + extra_data: Bytes::default(), + logs_bloom: Some(H2048::default()), + timestamp: U256::default(), + difficulty: U256::default(), + total_difficulty: Some(U256::default()), + seal_fields: vec![Bytes::default(), Bytes::default()], + uncles: vec![], + transactions: BlockTransactions::Hashes(vec![].into()), + size: None, + }; + let serialized_block = serde_json::to_string(&block).unwrap(); + let rich_block = RichBlock { + inner: block, + extra_info: map![ + "mixHash".into() => format!("{:?}", H256::default()), + "nonce".into() => format!("{:?}", H64::default()) + ], + }; + let serialized_rich_block = serde_json::to_string(&rich_block).unwrap(); - assert_eq!(serialized_block, r#"{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","sha3Uncles":"0x0000000000000000000000000000000000000000000000000000000000000000","author":"0x0000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","number":"0x0","gasUsed":"0x0","gasLimit":"0x0","extraData":"0x","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","difficulty":"0x0","totalDifficulty":"0x0","sealFields":["0x","0x"],"uncles":[],"transactions":[],"size":null}"#); - assert_eq!(serialized_rich_block, r#"{"author":"0x0000000000000000000000000000000000000000","difficulty":"0x0","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0x0000000000000000000000000000000000000000000000000000000000000000","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","sealFields":["0x","0x"],"sha3Uncles":"0x0000000000000000000000000000000000000000000000000000000000000000","size":null,"stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","totalDifficulty":"0x0","transactions":[],"transactionsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","uncles":[]}"#); - } + assert_eq!( + serialized_block, + r#"{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","sha3Uncles":"0x0000000000000000000000000000000000000000000000000000000000000000","author":"0x0000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","number":"0x0","gasUsed":"0x0","gasLimit":"0x0","extraData":"0x","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","difficulty":"0x0","totalDifficulty":"0x0","sealFields":["0x","0x"],"uncles":[],"transactions":[],"size":null}"# + ); + assert_eq!( + serialized_rich_block, + r#"{"author":"0x0000000000000000000000000000000000000000","difficulty":"0x0","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0x0000000000000000000000000000000000000000000000000000000000000000","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","sealFields":["0x","0x"],"sha3Uncles":"0x0000000000000000000000000000000000000000000000000000000000000000","size":null,"stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","totalDifficulty":"0x0","transactions":[],"transactionsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","uncles":[]}"# + ); + } - #[test] - fn test_serialize_header() { - let header = Header { - hash: Some(H256::default()), - parent_hash: H256::default(), - uncles_hash: H256::default(), - author: H160::default(), - miner: H160::default(), - state_root: H256::default(), - transactions_root: H256::default(), - receipts_root: H256::default(), - number: Some(U256::default()), - gas_used: U256::default(), - gas_limit: U256::default(), - extra_data: Bytes::default(), - logs_bloom: H2048::default(), - timestamp: U256::default(), - difficulty: U256::default(), - seal_fields: vec![Bytes::default(), Bytes::default()], - size: Some(69.into()), - }; - let serialized_header = serde_json::to_string(&header).unwrap(); - let rich_header = RichHeader { - inner: header, - extra_info: map![ - "mixHash".into() => format!("{:?}", H256::default()), - "nonce".into() => format!("{:?}", H64::default()) - ], - }; - let serialized_rich_header = serde_json::to_string(&rich_header).unwrap(); + #[test] + fn test_serialize_header() { + let header = Header { + hash: Some(H256::default()), + parent_hash: H256::default(), + uncles_hash: H256::default(), + author: H160::default(), + miner: H160::default(), + state_root: H256::default(), + transactions_root: H256::default(), + receipts_root: H256::default(), + number: Some(U256::default()), + gas_used: U256::default(), + gas_limit: U256::default(), + extra_data: Bytes::default(), + logs_bloom: H2048::default(), + timestamp: U256::default(), + difficulty: U256::default(), + seal_fields: vec![Bytes::default(), Bytes::default()], + size: Some(69.into()), + }; + let serialized_header = serde_json::to_string(&header).unwrap(); + let rich_header = RichHeader { + inner: header, + extra_info: map![ + "mixHash".into() => format!("{:?}", H256::default()), + "nonce".into() => format!("{:?}", H64::default()) + ], + }; + let serialized_rich_header = serde_json::to_string(&rich_header).unwrap(); - assert_eq!(serialized_header, r#"{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","sha3Uncles":"0x0000000000000000000000000000000000000000000000000000000000000000","author":"0x0000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","number":"0x0","gasUsed":"0x0","gasLimit":"0x0","extraData":"0x","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","difficulty":"0x0","sealFields":["0x","0x"],"size":"0x45"}"#); - assert_eq!(serialized_rich_header, r#"{"author":"0x0000000000000000000000000000000000000000","difficulty":"0x0","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0x0000000000000000000000000000000000000000000000000000000000000000","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","sealFields":["0x","0x"],"sha3Uncles":"0x0000000000000000000000000000000000000000000000000000000000000000","size":"0x45","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","transactionsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000"}"#); - } + assert_eq!( + serialized_header, + r#"{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","sha3Uncles":"0x0000000000000000000000000000000000000000000000000000000000000000","author":"0x0000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","number":"0x0","gasUsed":"0x0","gasLimit":"0x0","extraData":"0x","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","difficulty":"0x0","sealFields":["0x","0x"],"size":"0x45"}"# + ); + assert_eq!( + serialized_rich_header, + r#"{"author":"0x0000000000000000000000000000000000000000","difficulty":"0x0","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0x0000000000000000000000000000000000000000000000000000000000000000","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","sealFields":["0x","0x"],"sha3Uncles":"0x0000000000000000000000000000000000000000000000000000000000000000","size":"0x45","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","transactionsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000"}"# + ); + } } diff --git a/rpc/src/v1/types/block_number.rs b/rpc/src/v1/types/block_number.rs index 7e19f2d3d..220fa8336 100644 --- a/rpc/src/v1/types/block_number.rs +++ b/rpc/src/v1/types/block_number.rs @@ -14,149 +14,179 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::fmt; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use serde::de::{Error, Visitor}; use ethcore::client::BlockId; +use serde::{ + de::{Error, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, +}; +use std::fmt; /// Represents rpc api block number param. #[derive(Debug, PartialEq, Clone, Hash, Eq)] pub enum BlockNumber { - /// Number - Num(u64), - /// Latest block - Latest, - /// Earliest block (genesis) - Earliest, - /// Pending block (being mined) - Pending, + /// Number + Num(u64), + /// Latest block + Latest, + /// Earliest block (genesis) + Earliest, + /// Pending block (being mined) + Pending, } impl Default for BlockNumber { - fn default() -> Self { - BlockNumber::Latest - } + fn default() -> Self { + BlockNumber::Latest + } } impl<'a> Deserialize<'a> for BlockNumber { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'a> { - deserializer.deserialize_any(BlockNumberVisitor) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + deserializer.deserialize_any(BlockNumberVisitor) + } } impl BlockNumber { - /// Convert block number to min block target. - pub fn to_min_block_num(&self) -> Option { - match *self { - BlockNumber::Num(ref x) => Some(*x), - _ => None, - } - } + /// Convert block number to min block target. + pub fn to_min_block_num(&self) -> Option { + match *self { + BlockNumber::Num(ref x) => Some(*x), + _ => None, + } + } } /// BlockNumber to BlockId conversion /// /// NOTE use only for light clients. pub trait LightBlockNumber { - /// Convert block number to block id. - fn to_block_id(self) -> BlockId; + /// Convert block number to block id. + fn to_block_id(self) -> BlockId; } impl LightBlockNumber for BlockNumber { - fn to_block_id(self) -> BlockId { - // NOTE Here we treat `Pending` as `Latest`. - // Since light clients don't produce pending blocks - // (they don't have state) we can safely fallback to `Latest`. - match self { - BlockNumber::Num(n) => BlockId::Number(n), - BlockNumber::Earliest => BlockId::Earliest, - BlockNumber::Latest => BlockId::Latest, - BlockNumber::Pending => { - warn!("`Pending` is deprecated and may be removed in future versions. Falling back to `Latest`"); - BlockId::Latest - } - } - } + fn to_block_id(self) -> BlockId { + // NOTE Here we treat `Pending` as `Latest`. + // Since light clients don't produce pending blocks + // (they don't have state) we can safely fallback to `Latest`. + match self { + BlockNumber::Num(n) => BlockId::Number(n), + BlockNumber::Earliest => BlockId::Earliest, + BlockNumber::Latest => BlockId::Latest, + BlockNumber::Pending => { + warn!("`Pending` is deprecated and may be removed in future versions. Falling back to `Latest`"); + BlockId::Latest + } + } + } } impl Serialize for BlockNumber { - fn serialize(&self, serializer: S) -> Result where S: Serializer { - match *self { - BlockNumber::Num(ref x) => serializer.serialize_str(&format!("0x{:x}", x)), - BlockNumber::Latest => serializer.serialize_str("latest"), - BlockNumber::Earliest => serializer.serialize_str("earliest"), - BlockNumber::Pending => serializer.serialize_str("pending"), - } - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match *self { + BlockNumber::Num(ref x) => serializer.serialize_str(&format!("0x{:x}", x)), + BlockNumber::Latest => serializer.serialize_str("latest"), + BlockNumber::Earliest => serializer.serialize_str("earliest"), + BlockNumber::Pending => serializer.serialize_str("pending"), + } + } } struct BlockNumberVisitor; impl<'a> Visitor<'a> for BlockNumberVisitor { - type Value = BlockNumber; + type Value = BlockNumber; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a block number or 'latest', 'earliest' or 'pending'") - } + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!( + formatter, + "a block number or 'latest', 'earliest' or 'pending'" + ) + } - fn visit_str(self, value: &str) -> Result where E: Error { - match value { - "latest" => Ok(BlockNumber::Latest), - "earliest" => Ok(BlockNumber::Earliest), - "pending" => Ok(BlockNumber::Pending), - _ if value.starts_with("0x") => u64::from_str_radix(&value[2..], 16).map(BlockNumber::Num).map_err(|e| { - Error::custom(format!("Invalid block number: {}", e)) - }), - _ => Err(Error::custom("Invalid block number: missing 0x prefix".to_string())), - } - } + fn visit_str(self, value: &str) -> Result + where + E: Error, + { + match value { + "latest" => Ok(BlockNumber::Latest), + "earliest" => Ok(BlockNumber::Earliest), + "pending" => Ok(BlockNumber::Pending), + _ if value.starts_with("0x") => u64::from_str_radix(&value[2..], 16) + .map(BlockNumber::Num) + .map_err(|e| Error::custom(format!("Invalid block number: {}", e))), + _ => Err(Error::custom( + "Invalid block number: missing 0x prefix".to_string(), + )), + } + } - fn visit_string(self, value: String) -> Result where E: Error { - self.visit_str(value.as_ref()) - } + fn visit_string(self, value: String) -> Result + where + E: Error, + { + self.visit_str(value.as_ref()) + } } /// Converts `BlockNumber` to `BlockId`, panics on `BlockNumber::Pending` pub fn block_number_to_id(number: BlockNumber) -> BlockId { - match number { - BlockNumber::Num(num) => BlockId::Number(num), - BlockNumber::Earliest => BlockId::Earliest, - BlockNumber::Latest => BlockId::Latest, + match number { + BlockNumber::Num(num) => BlockId::Number(num), + BlockNumber::Earliest => BlockId::Earliest, + BlockNumber::Latest => BlockId::Latest, - BlockNumber::Pending => panic!("`BlockNumber::Pending` should be handled manually") - } + BlockNumber::Pending => panic!("`BlockNumber::Pending` should be handled manually"), + } } #[cfg(test)] mod tests { - use ethcore::client::BlockId; - use super::*; - use serde_json; + use super::*; + use ethcore::client::BlockId; + use serde_json; - #[test] - fn block_number_deserialization() { - let s = r#"["0xa", "latest", "earliest", "pending"]"#; - let deserialized: Vec = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, vec![BlockNumber::Num(10), BlockNumber::Latest, BlockNumber::Earliest, BlockNumber::Pending]) - } + #[test] + fn block_number_deserialization() { + let s = r#"["0xa", "latest", "earliest", "pending"]"#; + let deserialized: Vec = serde_json::from_str(s).unwrap(); + assert_eq!( + deserialized, + vec![ + BlockNumber::Num(10), + BlockNumber::Latest, + BlockNumber::Earliest, + BlockNumber::Pending + ] + ) + } - #[test] - fn should_not_deserialize_decimal() { - let s = r#""10""#; - assert!(serde_json::from_str::(s).is_err()); - } + #[test] + fn should_not_deserialize_decimal() { + let s = r#""10""#; + assert!(serde_json::from_str::(s).is_err()); + } - #[test] - fn normal_block_number_to_id() { - assert_eq!(block_number_to_id(BlockNumber::Num(100)), BlockId::Number(100)); - assert_eq!(block_number_to_id(BlockNumber::Earliest), BlockId::Earliest); - assert_eq!(block_number_to_id(BlockNumber::Latest), BlockId::Latest); - } + #[test] + fn normal_block_number_to_id() { + assert_eq!( + block_number_to_id(BlockNumber::Num(100)), + BlockId::Number(100) + ); + assert_eq!(block_number_to_id(BlockNumber::Earliest), BlockId::Earliest); + assert_eq!(block_number_to_id(BlockNumber::Latest), BlockId::Latest); + } - #[test] - #[should_panic] - fn pending_block_number_to_id() { - // Since this function is not allowed to be called in such way, panic should happen - block_number_to_id(BlockNumber::Pending); - } + #[test] + #[should_panic] + fn pending_block_number_to_id() { + // Since this function is not allowed to be called in such way, panic should happen + block_number_to_id(BlockNumber::Pending); + } } diff --git a/rpc/src/v1/types/bytes.rs b/rpc/src/v1/types/bytes.rs index 837f3b5f9..4bfc4452f 100644 --- a/rpc/src/v1/types/bytes.rs +++ b/rpc/src/v1/types/bytes.rs @@ -16,106 +16,122 @@ //! Serializable wrapper around vector of bytes +use rustc_hex::{FromHex, ToHex}; +use serde::{ + de::{Error, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, +}; use std::fmt; -use rustc_hex::{ToHex, FromHex}; -use serde::{Serialize, Serializer, Deserialize, Deserializer}; -use serde::de::{Error, Visitor}; /// Wrapper structure around vector of bytes. #[derive(Debug, PartialEq, Eq, Default, Hash, Clone)] pub struct Bytes(pub Vec); impl Bytes { - /// Simple constructor. - pub fn new(bytes: Vec) -> Bytes { - Bytes(bytes) - } - /// Convert back to vector - pub fn into_vec(self) -> Vec { - self.0 - } + /// Simple constructor. + pub fn new(bytes: Vec) -> Bytes { + Bytes(bytes) + } + /// Convert back to vector + pub fn into_vec(self) -> Vec { + self.0 + } } impl From> for Bytes { - fn from(bytes: Vec) -> Bytes { - Bytes(bytes) - } + fn from(bytes: Vec) -> Bytes { + Bytes(bytes) + } } impl Into> for Bytes { - fn into(self) -> Vec { - self.0 - } + fn into(self) -> Vec { + self.0 + } } impl Serialize for Bytes { - fn serialize(&self, serializer: S) -> Result - where S: Serializer { - let mut serialized = "0x".to_owned(); - serialized.push_str(self.0.to_hex().as_ref()); - serializer.serialize_str(serialized.as_ref()) - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut serialized = "0x".to_owned(); + serialized.push_str(self.0.to_hex().as_ref()); + serializer.serialize_str(serialized.as_ref()) + } } impl<'a> Deserialize<'a> for Bytes { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> { - deserializer.deserialize_any(BytesVisitor) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + deserializer.deserialize_any(BytesVisitor) + } } struct BytesVisitor; impl<'a> Visitor<'a> for BytesVisitor { - type Value = Bytes; + type Value = Bytes; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a 0x-prefixed, hex-encoded vector of bytes") - } + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a 0x-prefixed, hex-encoded vector of bytes") + } - fn visit_str(self, value: &str) -> Result where E: Error { - if value.len() >= 2 && value.starts_with("0x") && value.len() & 1 == 0 { - Ok(Bytes::new(FromHex::from_hex(&value[2..]).map_err(|e| Error::custom(format!("Invalid hex: {}", e)))?)) - } else { - Err(Error::custom("Invalid bytes format. Expected a 0x-prefixed hex string with even length")) - } - } + fn visit_str(self, value: &str) -> Result + where + E: Error, + { + if value.len() >= 2 && value.starts_with("0x") && value.len() & 1 == 0 { + Ok(Bytes::new(FromHex::from_hex(&value[2..]).map_err(|e| { + Error::custom(format!("Invalid hex: {}", e)) + })?)) + } else { + Err(Error::custom( + "Invalid bytes format. Expected a 0x-prefixed hex string with even length", + )) + } + } - fn visit_string(self, value: String) -> Result where E: Error { - self.visit_str(value.as_ref()) - } + fn visit_string(self, value: String) -> Result + where + E: Error, + { + self.visit_str(value.as_ref()) + } } #[cfg(test)] mod tests { - use super::*; - use serde_json; - use rustc_hex::FromHex; + use super::*; + use rustc_hex::FromHex; + use serde_json; - #[test] - fn test_bytes_serialize() { - let bytes = Bytes("0123456789abcdef".from_hex().unwrap()); - let serialized = serde_json::to_string(&bytes).unwrap(); - assert_eq!(serialized, r#""0x0123456789abcdef""#); - } + #[test] + fn test_bytes_serialize() { + let bytes = Bytes("0123456789abcdef".from_hex().unwrap()); + let serialized = serde_json::to_string(&bytes).unwrap(); + assert_eq!(serialized, r#""0x0123456789abcdef""#); + } - #[test] - fn test_bytes_deserialize() { - let bytes0: Result = serde_json::from_str(r#""∀∂""#); - let bytes1: Result = serde_json::from_str(r#""""#); - let bytes2: Result = serde_json::from_str(r#""0x123""#); - let bytes3: Result = serde_json::from_str(r#""0xgg""#); + #[test] + fn test_bytes_deserialize() { + let bytes0: Result = serde_json::from_str(r#""∀∂""#); + let bytes1: Result = serde_json::from_str(r#""""#); + let bytes2: Result = serde_json::from_str(r#""0x123""#); + let bytes3: Result = serde_json::from_str(r#""0xgg""#); - let bytes4: Bytes = serde_json::from_str(r#""0x""#).unwrap(); - let bytes5: Bytes = serde_json::from_str(r#""0x12""#).unwrap(); - let bytes6: Bytes = serde_json::from_str(r#""0x0123""#).unwrap(); + let bytes4: Bytes = serde_json::from_str(r#""0x""#).unwrap(); + let bytes5: Bytes = serde_json::from_str(r#""0x12""#).unwrap(); + let bytes6: Bytes = serde_json::from_str(r#""0x0123""#).unwrap(); - assert!(bytes0.is_err()); - assert!(bytes1.is_err()); - assert!(bytes2.is_err()); - assert!(bytes3.is_err()); - assert_eq!(bytes4, Bytes(vec![])); - assert_eq!(bytes5, Bytes(vec![0x12])); - assert_eq!(bytes6, Bytes(vec![0x1, 0x23])); - } + assert!(bytes0.is_err()); + assert!(bytes1.is_err()); + assert!(bytes2.is_err()); + assert!(bytes3.is_err()); + assert_eq!(bytes4, Bytes(vec![])); + assert_eq!(bytes5, Bytes(vec![0x12])); + assert_eq!(bytes6, Bytes(vec![0x1, 0x23])); + } } diff --git a/rpc/src/v1/types/call_request.rs b/rpc/src/v1/types/call_request.rs index d75e4b1a2..58f537bf3 100644 --- a/rpc/src/v1/types/call_request.rs +++ b/rpc/src/v1/types/call_request.rs @@ -15,55 +15,54 @@ // along with Parity Ethereum. If not, see . use ethereum_types::{H160, U256}; -use v1::helpers::CallRequest as Request; -use v1::types::Bytes; +use v1::{helpers::CallRequest as Request, types::Bytes}; /// Call request #[derive(Debug, Default, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub struct CallRequest { - /// From - pub from: Option, - /// To - pub to: Option, - /// Gas Price - pub gas_price: Option, - /// Gas - pub gas: Option, - /// Value - pub value: Option, - /// Data - pub data: Option, - /// Nonce - pub nonce: Option, + /// From + pub from: Option, + /// To + pub to: Option, + /// Gas Price + pub gas_price: Option, + /// Gas + pub gas: Option, + /// Value + pub value: Option, + /// Data + pub data: Option, + /// Nonce + pub nonce: Option, } impl Into for CallRequest { - fn into(self) -> Request { - Request { - from: self.from.map(Into::into), - to: self.to.map(Into::into), - gas_price: self.gas_price.map(Into::into), - gas: self.gas.map(Into::into), - value: self.value.map(Into::into), - data: self.data.map(Into::into), - nonce: self.nonce.map(Into::into), - } - } + fn into(self) -> Request { + Request { + from: self.from.map(Into::into), + to: self.to.map(Into::into), + gas_price: self.gas_price.map(Into::into), + gas: self.gas.map(Into::into), + value: self.value.map(Into::into), + data: self.data.map(Into::into), + nonce: self.nonce.map(Into::into), + } + } } #[cfg(test)] mod tests { - use std::str::FromStr; - use rustc_hex::FromHex; - use serde_json; - use ethereum_types::{U256, H160}; - use super::CallRequest; + use super::CallRequest; + use ethereum_types::{H160, U256}; + use rustc_hex::FromHex; + use serde_json; + use std::str::FromStr; - #[test] - fn call_request_deserialize() { - let s = r#"{ + #[test] + fn call_request_deserialize() { + let s = r#"{ "from":"0x0000000000000000000000000000000000000001", "to":"0x0000000000000000000000000000000000000002", "gasPrice":"0x1", @@ -72,22 +71,25 @@ mod tests { "data":"0x123456", "nonce":"0x4" }"#; - let deserialized: CallRequest = serde_json::from_str(s).unwrap(); + let deserialized: CallRequest = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, CallRequest { - from: Some(H160::from(1)), - to: Some(H160::from(2)), - gas_price: Some(U256::from(1)), - gas: Some(U256::from(2)), - value: Some(U256::from(3)), - data: Some(vec![0x12, 0x34, 0x56].into()), - nonce: Some(U256::from(4)), - }); - } + assert_eq!( + deserialized, + CallRequest { + from: Some(H160::from(1)), + to: Some(H160::from(2)), + gas_price: Some(U256::from(1)), + gas: Some(U256::from(2)), + value: Some(U256::from(3)), + data: Some(vec![0x12, 0x34, 0x56].into()), + nonce: Some(U256::from(4)), + } + ); + } - #[test] - fn call_request_deserialize2() { - let s = r#"{ + #[test] + fn call_request_deserialize2() { + let s = r#"{ "from": "0xb60e8dd61c5d32be8058bb8eb970870f07233155", "to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567", "gas": "0x76c0", @@ -95,9 +97,9 @@ mod tests { "value": "0x9184e72a", "data": "0xd46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675" }"#; - let deserialized: CallRequest = serde_json::from_str(s).unwrap(); + let deserialized: CallRequest = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, CallRequest { + assert_eq!(deserialized, CallRequest { from: Some(H160::from_str("b60e8dd61c5d32be8058bb8eb970870f07233155").unwrap()), to: Some(H160::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), gas_price: Some(U256::from_str("9184e72a000").unwrap()), @@ -106,21 +108,24 @@ mod tests { data: Some("d46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675".from_hex().unwrap().into()), nonce: None }); - } + } - #[test] - fn call_request_deserialize_empty() { - let s = r#"{"from":"0x0000000000000000000000000000000000000001"}"#; - let deserialized: CallRequest = serde_json::from_str(s).unwrap(); + #[test] + fn call_request_deserialize_empty() { + let s = r#"{"from":"0x0000000000000000000000000000000000000001"}"#; + let deserialized: CallRequest = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, CallRequest { - from: Some(H160::from(1)), - to: None, - gas_price: None, - gas: None, - value: None, - data: None, - nonce: None, - }); - } + assert_eq!( + deserialized, + CallRequest { + from: Some(H160::from(1)), + to: None, + gas_price: None, + gas: None, + value: None, + data: None, + nonce: None, + } + ); + } } diff --git a/rpc/src/v1/types/confirmations.rs b/rpc/src/v1/types/confirmations.rs index dedd0ba25..41305d38b 100644 --- a/rpc/src/v1/types/confirmations.rs +++ b/rpc/src/v1/types/confirmations.rs @@ -16,178 +16,187 @@ //! Types used in Confirmations queue (Trusted Signer) -use std::fmt; -use serde::{Serialize, Serializer}; use ansi_term::Colour; use bytes::ToPretty; +use serde::{Serialize, Serializer}; +use std::fmt; use ethereum_types::{H160, H256, H520, U256}; -use v1::types::{TransactionRequest, RichRawTransaction, Bytes, TransactionCondition, Origin}; -use v1::helpers; use ethkey::Password; +use v1::{ + helpers, + types::{Bytes, Origin, RichRawTransaction, TransactionCondition, TransactionRequest}, +}; /// Confirmation waiting in a queue #[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct ConfirmationRequest { - /// Id of this confirmation - pub id: U256, - /// Payload - pub payload: ConfirmationPayload, - /// Request origin - pub origin: Origin, + /// Id of this confirmation + pub id: U256, + /// Payload + pub payload: ConfirmationPayload, + /// Request origin + pub origin: Origin, } impl From for ConfirmationRequest { - fn from(c: helpers::ConfirmationRequest) -> Self { - ConfirmationRequest { - id: c.id, - payload: c.payload.into(), - origin: c.origin, - } - } + fn from(c: helpers::ConfirmationRequest) -> Self { + ConfirmationRequest { + id: c.id, + payload: c.payload.into(), + origin: c.origin, + } + } } impl fmt::Display for ConfirmationRequest { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "#{}: {} coming from {}", self.id, self.payload, self.origin) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "#{}: {} coming from {}", + self.id, self.payload, self.origin + ) + } } impl fmt::Display for ConfirmationPayload { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - ConfirmationPayload::SendTransaction(ref transaction) => write!(f, "{}", transaction), - ConfirmationPayload::SignTransaction(ref transaction) => write!(f, "(Sign only) {}", transaction), - ConfirmationPayload::EthSignMessage(ref sign) => write!(f, "{}", sign), - ConfirmationPayload::EIP191SignMessage(ref sign) => write!(f, "{}", sign), - ConfirmationPayload::Decrypt(ref decrypt) => write!(f, "{}", decrypt), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ConfirmationPayload::SendTransaction(ref transaction) => write!(f, "{}", transaction), + ConfirmationPayload::SignTransaction(ref transaction) => { + write!(f, "(Sign only) {}", transaction) + } + ConfirmationPayload::EthSignMessage(ref sign) => write!(f, "{}", sign), + ConfirmationPayload::EIP191SignMessage(ref sign) => write!(f, "{}", sign), + ConfirmationPayload::Decrypt(ref decrypt) => write!(f, "{}", decrypt), + } + } } /// Ethereum-prefixed Sign request #[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct EthSignRequest { - /// Address - pub address: H160, - /// Hash to sign - pub data: Bytes, + /// Address + pub address: H160, + /// Hash to sign + pub data: Bytes, } /// EIP191 Sign request #[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct EIP191SignRequest { - /// Address - pub address: H160, - /// Hash to sign - pub data: H256, + /// Address + pub address: H160, + /// Hash to sign + pub data: H256, } impl From<(H160, H256)> for EIP191SignRequest { - fn from(tuple: (H160, H256)) -> Self { - EIP191SignRequest { - address: tuple.0, - data: tuple.1, - } - } + fn from(tuple: (H160, H256)) -> Self { + EIP191SignRequest { + address: tuple.0, + data: tuple.1, + } + } } impl fmt::Display for EIP191SignRequest { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "sign 0x{} with {}", - self.data.0.pretty(), - Colour::White.bold().paint(format!("0x{:?}", self.address)), - ) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "sign 0x{} with {}", + self.data.0.pretty(), + Colour::White.bold().paint(format!("0x{:?}", self.address)), + ) + } } impl From<(H160, Bytes)> for EthSignRequest { - fn from(tuple: (H160, Bytes)) -> Self { - EthSignRequest { - address: tuple.0, - data: tuple.1, - } - } + fn from(tuple: (H160, Bytes)) -> Self { + EthSignRequest { + address: tuple.0, + data: tuple.1, + } + } } impl fmt::Display for EthSignRequest { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "sign 0x{} with {}", - self.data.0.pretty(), - Colour::White.bold().paint(format!("0x{:?}", self.address)), - ) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "sign 0x{} with {}", + self.data.0.pretty(), + Colour::White.bold().paint(format!("0x{:?}", self.address)), + ) + } } /// Decrypt request #[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct DecryptRequest { - /// Address - pub address: H160, - /// Message to decrypt - pub msg: Bytes, + /// Address + pub address: H160, + /// Message to decrypt + pub msg: Bytes, } impl From<(H160, Bytes)> for DecryptRequest { - fn from(tuple: (H160, Bytes)) -> Self { - DecryptRequest { - address: tuple.0, - msg: tuple.1, - } - } + fn from(tuple: (H160, Bytes)) -> Self { + DecryptRequest { + address: tuple.0, + msg: tuple.1, + } + } } impl fmt::Display for DecryptRequest { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "decrypt data with {}", - Colour::White.bold().paint(format!("0x{:?}", self.address)), - ) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "decrypt data with {}", + Colour::White.bold().paint(format!("0x{:?}", self.address)), + ) + } } /// Confirmation response for particular payload #[derive(Debug, Clone, PartialEq)] pub enum ConfirmationResponse { - /// Transaction Hash - SendTransaction(H256), - /// Transaction RLP - SignTransaction(RichRawTransaction), - /// Signature (encoded as VRS) - Signature(H520), - /// Decrypted data - Decrypt(Bytes), + /// Transaction Hash + SendTransaction(H256), + /// Transaction RLP + SignTransaction(RichRawTransaction), + /// Signature (encoded as VRS) + Signature(H520), + /// Decrypted data + Decrypt(Bytes), } impl Serialize for ConfirmationResponse { - fn serialize(&self, serializer: S) -> Result - where S: Serializer - { - match *self { - ConfirmationResponse::SendTransaction(ref hash) => hash.serialize(serializer), - ConfirmationResponse::SignTransaction(ref rlp) => rlp.serialize(serializer), - ConfirmationResponse::Signature(ref signature) => signature.serialize(serializer), - ConfirmationResponse::Decrypt(ref data) => data.serialize(serializer), - } - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match *self { + ConfirmationResponse::SendTransaction(ref hash) => hash.serialize(serializer), + ConfirmationResponse::SignTransaction(ref rlp) => rlp.serialize(serializer), + ConfirmationResponse::Signature(ref signature) => signature.serialize(serializer), + ConfirmationResponse::Decrypt(ref data) => data.serialize(serializer), + } + } } /// Confirmation response with additional token for further requests #[derive(Clone, PartialEq, Serialize)] pub struct ConfirmationResponseWithToken { - /// Actual response - pub result: ConfirmationResponse, - /// New token - pub token: Password, + /// Actual response + pub result: ConfirmationResponse, + /// New token + pub token: Password, } /// Confirmation payload, i.e. the thing to be confirmed @@ -195,38 +204,45 @@ pub struct ConfirmationResponseWithToken { #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub enum ConfirmationPayload { - /// Send Transaction - SendTransaction(TransactionRequest), - /// Sign Transaction - SignTransaction(TransactionRequest), - /// Signature - #[serde(rename = "sign")] - EthSignMessage(EthSignRequest), - /// signature without prefix - EIP191SignMessage(EIP191SignRequest), - /// Decryption - Decrypt(DecryptRequest), + /// Send Transaction + SendTransaction(TransactionRequest), + /// Sign Transaction + SignTransaction(TransactionRequest), + /// Signature + #[serde(rename = "sign")] + EthSignMessage(EthSignRequest), + /// signature without prefix + EIP191SignMessage(EIP191SignRequest), + /// Decryption + Decrypt(DecryptRequest), } impl From for ConfirmationPayload { - fn from(c: helpers::ConfirmationPayload) -> Self { - match c { - helpers::ConfirmationPayload::SendTransaction(t) => ConfirmationPayload::SendTransaction(t.into()), - helpers::ConfirmationPayload::SignTransaction(t) => ConfirmationPayload::SignTransaction(t.into()), - helpers::ConfirmationPayload::EthSignMessage(address, data) => ConfirmationPayload::EthSignMessage(EthSignRequest { - address, - data: data.into(), - }), - helpers::ConfirmationPayload::SignMessage(address, data) => ConfirmationPayload::EIP191SignMessage(EIP191SignRequest { - address, - data, - }), - helpers::ConfirmationPayload::Decrypt(address, msg) => ConfirmationPayload::Decrypt(DecryptRequest { - address, - msg: msg.into(), - }), - } - } + fn from(c: helpers::ConfirmationPayload) -> Self { + match c { + helpers::ConfirmationPayload::SendTransaction(t) => { + ConfirmationPayload::SendTransaction(t.into()) + } + helpers::ConfirmationPayload::SignTransaction(t) => { + ConfirmationPayload::SignTransaction(t.into()) + } + helpers::ConfirmationPayload::EthSignMessage(address, data) => { + ConfirmationPayload::EthSignMessage(EthSignRequest { + address, + data: data.into(), + }) + } + helpers::ConfirmationPayload::SignMessage(address, data) => { + ConfirmationPayload::EIP191SignMessage(EIP191SignRequest { address, data }) + } + helpers::ConfirmationPayload::Decrypt(address, msg) => { + ConfirmationPayload::Decrypt(DecryptRequest { + address, + msg: msg.into(), + }) + } + } + } } /// Possible modifications to the confirmed transaction sent by `Trusted Signer` @@ -234,202 +250,214 @@ impl From for ConfirmationPayload { #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub struct TransactionModification { - /// Modified transaction sender - pub sender: Option, - /// Modified gas price - pub gas_price: Option, - /// Modified gas - pub gas: Option, - /// Modified transaction condition. - pub condition: Option>, + /// Modified transaction sender + pub sender: Option, + /// Modified gas price + pub gas_price: Option, + /// Modified gas + pub gas: Option, + /// Modified transaction condition. + pub condition: Option>, } /// Represents two possible return values. #[derive(Debug, Clone)] -pub enum Either where - A: fmt::Debug + Clone, - B: fmt::Debug + Clone, +pub enum Either +where + A: fmt::Debug + Clone, + B: fmt::Debug + Clone, { - /// Primary value - Either(A), - /// Secondary value - Or(B), + /// Primary value + Either(A), + /// Secondary value + Or(B), } -impl From for Either where - A: fmt::Debug + Clone, - B: fmt::Debug + Clone, +impl From for Either +where + A: fmt::Debug + Clone, + B: fmt::Debug + Clone, { - fn from(a: A) -> Self { - Either::Either(a) - } + fn from(a: A) -> Self { + Either::Either(a) + } } -impl Serialize for Either where - A: Serialize + fmt::Debug + Clone, - B: Serialize + fmt::Debug + Clone, +impl Serialize for Either +where + A: Serialize + fmt::Debug + Clone, + B: Serialize + fmt::Debug + Clone, { - fn serialize(&self, serializer: S) -> Result - where S: Serializer - { - match *self { - Either::Either(ref a) => a.serialize(serializer), - Either::Or(ref b) => b.serialize(serializer), - } - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match *self { + Either::Either(ref a) => a.serialize(serializer), + Either::Or(ref b) => b.serialize(serializer), + } + } } #[cfg(test)] mod tests { - use std::str::FromStr; + use super::*; use ethereum_types::{H256, U256}; - use serde_json; - use v1::types::TransactionCondition; - use v1::helpers; - use super::*; + use serde_json; + use std::str::FromStr; + use v1::{helpers, types::TransactionCondition}; - #[test] - fn should_serialize_sign_confirmation() { - // given - let request = helpers::ConfirmationRequest { - id: 15.into(), - payload: helpers::ConfirmationPayload::EthSignMessage(1.into(), vec![5].into()), - origin: Origin::Rpc("test service".into()), - }; + #[test] + fn should_serialize_sign_confirmation() { + // given + let request = helpers::ConfirmationRequest { + id: 15.into(), + payload: helpers::ConfirmationPayload::EthSignMessage(1.into(), vec![5].into()), + origin: Origin::Rpc("test service".into()), + }; - // when - let res = serde_json::to_string(&ConfirmationRequest::from(request)); - let expected = r#"{"id":"0xf","payload":{"sign":{"address":"0x0000000000000000000000000000000000000001","data":"0x05"}},"origin":{"rpc":"test service"}}"#; + // when + let res = serde_json::to_string(&ConfirmationRequest::from(request)); + let expected = r#"{"id":"0xf","payload":{"sign":{"address":"0x0000000000000000000000000000000000000001","data":"0x05"}},"origin":{"rpc":"test service"}}"#; - // then - assert_eq!(res.unwrap(), expected.to_owned()); - } + // then + assert_eq!(res.unwrap(), expected.to_owned()); + } - #[test] - fn should_serialize_transaction_confirmation() { - // given - let request = helpers::ConfirmationRequest { - id: 15.into(), - payload: helpers::ConfirmationPayload::SendTransaction(helpers::FilledTransactionRequest { - from: 0.into(), - used_default_from: false, - to: None, - gas: 15_000.into(), - gas_price: 10_000.into(), - value: 100_000.into(), - data: vec![1, 2, 3], - nonce: Some(1.into()), - condition: None, - }), - origin: Origin::Signer { - session: 5.into(), - } - }; + #[test] + fn should_serialize_transaction_confirmation() { + // given + let request = helpers::ConfirmationRequest { + id: 15.into(), + payload: helpers::ConfirmationPayload::SendTransaction( + helpers::FilledTransactionRequest { + from: 0.into(), + used_default_from: false, + to: None, + gas: 15_000.into(), + gas_price: 10_000.into(), + value: 100_000.into(), + data: vec![1, 2, 3], + nonce: Some(1.into()), + condition: None, + }, + ), + origin: Origin::Signer { session: 5.into() }, + }; - // when - let res = serde_json::to_string(&ConfirmationRequest::from(request)); - let expected = r#"{"id":"0xf","payload":{"sendTransaction":{"from":"0x0000000000000000000000000000000000000000","to":null,"gasPrice":"0x2710","gas":"0x3a98","value":"0x186a0","data":"0x010203","nonce":"0x1","condition":null}},"origin":{"signer":{"session":"0x0000000000000000000000000000000000000000000000000000000000000005"}}}"#; + // when + let res = serde_json::to_string(&ConfirmationRequest::from(request)); + let expected = r#"{"id":"0xf","payload":{"sendTransaction":{"from":"0x0000000000000000000000000000000000000000","to":null,"gasPrice":"0x2710","gas":"0x3a98","value":"0x186a0","data":"0x010203","nonce":"0x1","condition":null}},"origin":{"signer":{"session":"0x0000000000000000000000000000000000000000000000000000000000000005"}}}"#; - // then - assert_eq!(res.unwrap(), expected.to_owned()); - } + // then + assert_eq!(res.unwrap(), expected.to_owned()); + } - #[test] - fn should_serialize_sign_transaction_confirmation() { - // given - let request = helpers::ConfirmationRequest { - id: 15.into(), - payload: helpers::ConfirmationPayload::SignTransaction(helpers::FilledTransactionRequest { - from: 0.into(), - used_default_from: false, - to: None, - gas: 15_000.into(), - gas_price: 10_000.into(), - value: 100_000.into(), - data: vec![1, 2, 3], - nonce: Some(1.into()), - condition: None, - }), - origin: Origin::Unknown, - }; + #[test] + fn should_serialize_sign_transaction_confirmation() { + // given + let request = helpers::ConfirmationRequest { + id: 15.into(), + payload: helpers::ConfirmationPayload::SignTransaction( + helpers::FilledTransactionRequest { + from: 0.into(), + used_default_from: false, + to: None, + gas: 15_000.into(), + gas_price: 10_000.into(), + value: 100_000.into(), + data: vec![1, 2, 3], + nonce: Some(1.into()), + condition: None, + }, + ), + origin: Origin::Unknown, + }; - // when - let res = serde_json::to_string(&ConfirmationRequest::from(request)); - let expected = r#"{"id":"0xf","payload":{"signTransaction":{"from":"0x0000000000000000000000000000000000000000","to":null,"gasPrice":"0x2710","gas":"0x3a98","value":"0x186a0","data":"0x010203","nonce":"0x1","condition":null}},"origin":"unknown"}"#; + // when + let res = serde_json::to_string(&ConfirmationRequest::from(request)); + let expected = r#"{"id":"0xf","payload":{"signTransaction":{"from":"0x0000000000000000000000000000000000000000","to":null,"gasPrice":"0x2710","gas":"0x3a98","value":"0x186a0","data":"0x010203","nonce":"0x1","condition":null}},"origin":"unknown"}"#; - // then - assert_eq!(res.unwrap(), expected.to_owned()); - } + // then + assert_eq!(res.unwrap(), expected.to_owned()); + } - #[test] - fn should_serialize_decrypt_confirmation() { - // given - let request = helpers::ConfirmationRequest { - id: 15.into(), - payload: helpers::ConfirmationPayload::Decrypt( - 10.into(), vec![1, 2, 3].into(), - ), - origin: Default::default(), - }; + #[test] + fn should_serialize_decrypt_confirmation() { + // given + let request = helpers::ConfirmationRequest { + id: 15.into(), + payload: helpers::ConfirmationPayload::Decrypt(10.into(), vec![1, 2, 3].into()), + origin: Default::default(), + }; - // when - let res = serde_json::to_string(&ConfirmationRequest::from(request)); - let expected = r#"{"id":"0xf","payload":{"decrypt":{"address":"0x000000000000000000000000000000000000000a","msg":"0x010203"}},"origin":"unknown"}"#; + // when + let res = serde_json::to_string(&ConfirmationRequest::from(request)); + let expected = r#"{"id":"0xf","payload":{"decrypt":{"address":"0x000000000000000000000000000000000000000a","msg":"0x010203"}},"origin":"unknown"}"#; - // then - assert_eq!(res.unwrap(), expected.to_owned()); - } + // then + assert_eq!(res.unwrap(), expected.to_owned()); + } - #[test] - fn should_deserialize_modification() { - // given - let s1 = r#"{ + #[test] + fn should_deserialize_modification() { + // given + let s1 = r#"{ "sender": "0x000000000000000000000000000000000000000a", "gasPrice":"0xba43b7400", "condition": { "block": 66 } }"#; - let s2 = r#"{"gas": "0x1233"}"#; - let s3 = r#"{}"#; + let s2 = r#"{"gas": "0x1233"}"#; + let s3 = r#"{}"#; - // when - let res1: TransactionModification = serde_json::from_str(s1).unwrap(); - let res2: TransactionModification = serde_json::from_str(s2).unwrap(); - let res3: TransactionModification = serde_json::from_str(s3).unwrap(); + // when + let res1: TransactionModification = serde_json::from_str(s1).unwrap(); + let res2: TransactionModification = serde_json::from_str(s2).unwrap(); + let res3: TransactionModification = serde_json::from_str(s3).unwrap(); - // then - assert_eq!(res1, TransactionModification { - sender: Some(10.into()), - gas_price: Some(U256::from_str("0ba43b7400").unwrap()), - gas: None, - condition: Some(Some(TransactionCondition::Number(0x42))), - }); - assert_eq!(res2, TransactionModification { - sender: None, - gas_price: None, - gas: Some(U256::from_str("1233").unwrap()), - condition: None, - }); - assert_eq!(res3, TransactionModification { - sender: None, - gas_price: None, - gas: None, - condition: None, - }); - } + // then + assert_eq!( + res1, + TransactionModification { + sender: Some(10.into()), + gas_price: Some(U256::from_str("0ba43b7400").unwrap()), + gas: None, + condition: Some(Some(TransactionCondition::Number(0x42))), + } + ); + assert_eq!( + res2, + TransactionModification { + sender: None, + gas_price: None, + gas: Some(U256::from_str("1233").unwrap()), + condition: None, + } + ); + assert_eq!( + res3, + TransactionModification { + sender: None, + gas_price: None, + gas: None, + condition: None, + } + ); + } - #[test] - fn should_serialize_confirmation_response_with_token() { - // given - let response = ConfirmationResponseWithToken { - result: ConfirmationResponse::SendTransaction(H256::default()), - token: "test-token".into(), - }; + #[test] + fn should_serialize_confirmation_response_with_token() { + // given + let response = ConfirmationResponseWithToken { + result: ConfirmationResponse::SendTransaction(H256::default()), + token: "test-token".into(), + }; - // when - let res = serde_json::to_string(&response); - let expected = r#"{"result":"0x0000000000000000000000000000000000000000000000000000000000000000","token":"test-token"}"#; + // when + let res = serde_json::to_string(&response); + let expected = r#"{"result":"0x0000000000000000000000000000000000000000000000000000000000000000","token":"test-token"}"#; - // then - assert_eq!(res.unwrap(), expected.to_owned()); - } + // then + assert_eq!(res.unwrap(), expected.to_owned()); + } } diff --git a/rpc/src/v1/types/consensus_status.rs b/rpc/src/v1/types/consensus_status.rs index da2aa26a1..be6fbacad 100644 --- a/rpc/src/v1/types/consensus_status.rs +++ b/rpc/src/v1/types/consensus_status.rs @@ -22,142 +22,142 @@ use updater::{self, CapState}; #[derive(Debug, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub enum ConsensusCapability { - /// Unknown. - Unknown, - /// Capable of consensus indefinitely. - Capable, - /// Capable of consensus up until a definite block. - CapableUntil(u64), - /// Incapable of consensus since a particular block. - IncapableSince(u64), + /// Unknown. + Unknown, + /// Capable of consensus indefinitely. + Capable, + /// Capable of consensus up until a definite block. + CapableUntil(u64), + /// Incapable of consensus since a particular block. + IncapableSince(u64), } impl Into for CapState { - fn into(self) -> ConsensusCapability { - match self { - CapState::Unknown => ConsensusCapability::Unknown, - CapState::Capable => ConsensusCapability::Capable, - CapState::CapableUntil(n) => ConsensusCapability::CapableUntil(n), - CapState::IncapableSince(n) => ConsensusCapability::IncapableSince(n), - } - } + fn into(self) -> ConsensusCapability { + match self { + CapState::Unknown => ConsensusCapability::Unknown, + CapState::Capable => ConsensusCapability::Capable, + CapState::CapableUntil(n) => ConsensusCapability::CapableUntil(n), + CapState::IncapableSince(n) => ConsensusCapability::IncapableSince(n), + } + } } /// A release's track. #[derive(Debug, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub enum ReleaseTrack { - /// Stable track. - Stable, - /// Beta track. - Beta, - /// Nightly track. - Nightly, - /// Testing track. - Testing, - /// No known track. - #[serde(rename = "null")] - Unknown, + /// Stable track. + Stable, + /// Beta track. + Beta, + /// Nightly track. + Nightly, + /// Testing track. + Testing, + /// No known track. + #[serde(rename = "null")] + Unknown, } impl Into for updater::ReleaseTrack { - fn into(self) -> ReleaseTrack { - match self { - updater::ReleaseTrack::Stable => ReleaseTrack::Stable, - updater::ReleaseTrack::Beta => ReleaseTrack::Beta, - updater::ReleaseTrack::Nightly => ReleaseTrack::Nightly, - updater::ReleaseTrack::Testing => ReleaseTrack::Testing, - updater::ReleaseTrack::Unknown => ReleaseTrack::Unknown, - } - } + fn into(self) -> ReleaseTrack { + match self { + updater::ReleaseTrack::Stable => ReleaseTrack::Stable, + updater::ReleaseTrack::Beta => ReleaseTrack::Beta, + updater::ReleaseTrack::Nightly => ReleaseTrack::Nightly, + updater::ReleaseTrack::Testing => ReleaseTrack::Testing, + updater::ReleaseTrack::Unknown => ReleaseTrack::Unknown, + } + } } /// Semantic version. #[derive(Debug, PartialEq, Serialize)] pub struct Version { - /// Major part. - major: u64, - /// Minor part. - minor: u64, - /// Patch part. - patch: u64, + /// Major part. + major: u64, + /// Minor part. + minor: u64, + /// Patch part. + patch: u64, } impl Into for semver::Version { - fn into(self) -> Version { - Version { - major: self.major, - minor: self.minor, - patch: self.patch, - } - } + fn into(self) -> Version { + Version { + major: self.major, + minor: self.minor, + patch: self.patch, + } + } } /// Version information of a particular release. #[derive(Debug, PartialEq, Serialize)] pub struct VersionInfo { - /// The track on which it was released. - pub track: ReleaseTrack, - /// The version. - pub version: Version, - /// The (SHA1?) 160-bit hash of this build's code base. - pub hash: H160, + /// The track on which it was released. + pub track: ReleaseTrack, + /// The version. + pub version: Version, + /// The (SHA1?) 160-bit hash of this build's code base. + pub hash: H160, } impl Into for updater::VersionInfo { - fn into(self) -> VersionInfo { - VersionInfo { - track: self.track.into(), - version: self.version.into(), - hash: self.hash, - } - } + fn into(self) -> VersionInfo { + VersionInfo { + track: self.track.into(), + version: self.version.into(), + hash: self.hash, + } + } } /// Information regarding a particular release of Parity #[derive(Debug, PartialEq, Serialize)] pub struct ReleaseInfo { - /// Information on the version. - pub version: VersionInfo, - /// Does this release contain critical security updates? - pub is_critical: bool, - /// The latest fork that this release can handle. - pub fork: u64, - /// Our platform's binary, if known. - pub binary: Option, + /// Information on the version. + pub version: VersionInfo, + /// Does this release contain critical security updates? + pub is_critical: bool, + /// The latest fork that this release can handle. + pub fork: u64, + /// Our platform's binary, if known. + pub binary: Option, } impl Into for updater::ReleaseInfo { - fn into(self) -> ReleaseInfo { - ReleaseInfo { - version: self.version.into(), - is_critical: self.is_critical, - fork: self.fork, - binary: self.binary.map(Into::into), - } - } + fn into(self) -> ReleaseInfo { + ReleaseInfo { + version: self.version.into(), + is_critical: self.is_critical, + fork: self.fork, + binary: self.binary.map(Into::into), + } + } } /// Information on our operations environment. #[derive(Debug, PartialEq, Serialize)] pub struct OperationsInfo { - /// Our blockchain's latest fork. - pub fork: u64, - /// Last fork our client supports, if known. - pub this_fork: Option, - /// Information on our track's latest release. - pub track: ReleaseInfo, - /// Information on our minor version's latest release. - pub minor: Option, + /// Our blockchain's latest fork. + pub fork: u64, + /// Last fork our client supports, if known. + pub this_fork: Option, + /// Information on our track's latest release. + pub track: ReleaseInfo, + /// Information on our minor version's latest release. + pub minor: Option, } impl Into for updater::OperationsInfo { - fn into(self) -> OperationsInfo { - OperationsInfo { - fork: self.fork, - this_fork: self.this_fork, - track: self.track.into(), - minor: self.minor.map(Into::into), - } - } + fn into(self) -> OperationsInfo { + OperationsInfo { + fork: self.fork, + this_fork: self.this_fork, + track: self.track.into(), + minor: self.minor.map(Into::into), + } + } } diff --git a/rpc/src/v1/types/derivation.rs b/rpc/src/v1/types/derivation.rs index 1f2764d9f..a4d67f109 100644 --- a/rpc/src/v1/types/derivation.rs +++ b/rpc/src/v1/types/derivation.rs @@ -14,35 +14,37 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use serde::{ + de::{Error, Visitor}, + Deserialize, Deserializer, +}; use std::fmt; -use serde::{Deserialize, Deserializer}; -use serde::de::{Error, Visitor}; use ethereum_types::H256; use ethstore; /// Type of derivation pub enum DerivationType { - /// Soft - allow proof of parent - Soft, - /// Hard - does not allow proof of parent - Hard, + /// Soft - allow proof of parent + Soft, + /// Hard - does not allow proof of parent + Hard, } /// Derivation request by hash #[derive(Deserialize)] pub struct DeriveHash { - hash: H256, - #[serde(rename = "type")] - d_type: DerivationType, + hash: H256, + #[serde(rename = "type")] + d_type: DerivationType, } /// Node propertoes in hierarchical derivation request #[derive(Deserialize)] pub struct DeriveHierarchicalItem { - index: u64, - #[serde(rename = "type")] - d_type: DerivationType, + index: u64, + #[serde(rename = "type")] + d_type: DerivationType, } /// Hierarchical (index sequence) request @@ -50,83 +52,96 @@ pub type DeriveHierarchical = Vec; /// Generic derivate request pub enum Derive { - /// Hierarchical (index sequence) request - Hierarchical(DeriveHierarchical), - /// Hash request - Hash(DeriveHash), + /// Hierarchical (index sequence) request + Hierarchical(DeriveHierarchical), + /// Hash request + Hash(DeriveHash), } impl From for Derive { - fn from(d: DeriveHierarchical) -> Self { - Derive::Hierarchical(d) - } + fn from(d: DeriveHierarchical) -> Self { + Derive::Hierarchical(d) + } } impl From for Derive { - fn from(d: DeriveHash) -> Self { - Derive::Hash(d) - } + fn from(d: DeriveHash) -> Self { + Derive::Hash(d) + } } /// Error converting request data #[cfg(any(test, feature = "accounts"))] #[derive(Debug)] pub enum ConvertError { - IndexOverlfow(u64), + IndexOverlfow(u64), } impl Derive { - /// Convert to account provider struct dealing with possible overflows - #[cfg(any(test, feature = "accounts"))] - pub fn to_derivation(self) -> Result { - Ok(match self { - Derive::Hierarchical(drv) => { - ethstore::Derivation::Hierarchical({ - let mut members = Vec::::new(); - for h in drv { - if h.index > ::std::u32::MAX as u64 { return Err(ConvertError::IndexOverlfow(h.index)); } - members.push(match h.d_type { - DerivationType::Soft => ethstore::IndexDerivation { soft: true, index: h.index as u32 }, - DerivationType::Hard => ethstore::IndexDerivation { soft: false, index: h.index as u32 }, - }); - } - members - }) - }, - Derive::Hash(drv) => { - match drv.d_type { - DerivationType::Soft => ethstore::Derivation::SoftHash(drv.hash.into()), - DerivationType::Hard => ethstore::Derivation::HardHash(drv.hash.into()), - } - }, - }) - } + /// Convert to account provider struct dealing with possible overflows + #[cfg(any(test, feature = "accounts"))] + pub fn to_derivation(self) -> Result { + Ok(match self { + Derive::Hierarchical(drv) => ethstore::Derivation::Hierarchical({ + let mut members = Vec::::new(); + for h in drv { + if h.index > ::std::u32::MAX as u64 { + return Err(ConvertError::IndexOverlfow(h.index)); + } + members.push(match h.d_type { + DerivationType::Soft => ethstore::IndexDerivation { + soft: true, + index: h.index as u32, + }, + DerivationType::Hard => ethstore::IndexDerivation { + soft: false, + index: h.index as u32, + }, + }); + } + members + }), + Derive::Hash(drv) => match drv.d_type { + DerivationType::Soft => ethstore::Derivation::SoftHash(drv.hash.into()), + DerivationType::Hard => ethstore::Derivation::HardHash(drv.hash.into()), + }, + }) + } } impl<'a> Deserialize<'a> for DerivationType { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'a> { - deserializer.deserialize_any(DerivationTypeVisitor) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + deserializer.deserialize_any(DerivationTypeVisitor) + } } struct DerivationTypeVisitor; impl<'a> Visitor<'a> for DerivationTypeVisitor { - type Value = DerivationType; + type Value = DerivationType; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "'hard' or 'soft'") - } + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "'hard' or 'soft'") + } - fn visit_str(self, value: &str) -> Result where E: Error { - match value { - "soft" => Ok(DerivationType::Soft), - "hard" => Ok(DerivationType::Hard), - v => Err(Error::custom(format!("invalid derivation type: {:?}", v))), - } - } + fn visit_str(self, value: &str) -> Result + where + E: Error, + { + match value { + "soft" => Ok(DerivationType::Soft), + "hard" => Ok(DerivationType::Hard), + v => Err(Error::custom(format!("invalid derivation type: {:?}", v))), + } + } - fn visit_string(self, value: String) -> Result where E: Error { - self.visit_str(value.as_ref()) - } + fn visit_string(self, value: String) -> Result + where + E: Error, + { + self.visit_str(value.as_ref()) + } } diff --git a/rpc/src/v1/types/eip191.rs b/rpc/src/v1/types/eip191.rs index fe3aab4c5..6e624eac9 100644 --- a/rpc/src/v1/types/eip191.rs +++ b/rpc/src/v1/types/eip191.rs @@ -17,43 +17,47 @@ //! EIP-191 specific types use ethereum_types::H160; -use serde::{Deserialize, Deserializer}; -use serde::de; +use serde::{de, Deserialize, Deserializer}; use v1::types::Bytes; /// EIP-191 version specifier #[derive(Debug)] pub enum EIP191Version { - /// byte specifier for structured data (0x01) - StructuredData, - /// byte specifier for personal message (0x45) - PersonalMessage, - /// byte specifier for presignedtransaction (0x00) - PresignedTransaction + /// byte specifier for structured data (0x01) + StructuredData, + /// byte specifier for personal message (0x45) + PersonalMessage, + /// byte specifier for presignedtransaction (0x00) + PresignedTransaction, } /// EIP-191 version 0x0 struct #[derive(Deserialize, Serialize)] #[serde(rename_all = "camelCase")] pub struct PresignedTransaction { - // address of intended validator - pub validator: H160, - // application specific data - pub data: Bytes + // address of intended validator + pub validator: H160, + // application specific data + pub data: Bytes, } impl<'de> Deserialize<'de> for EIP191Version { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let s = String::deserialize(deserializer)?; - let byte_version = match s.as_str() { - "0x00" => EIP191Version::PresignedTransaction, - "0x01" => EIP191Version::StructuredData, - "0x45" => EIP191Version::PersonalMessage, - other => return Err(de::Error::custom(format!("Invalid byte version '{}'", other))), - }; - Ok(byte_version) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + let byte_version = match s.as_str() { + "0x00" => EIP191Version::PresignedTransaction, + "0x01" => EIP191Version::StructuredData, + "0x45" => EIP191Version::PersonalMessage, + other => { + return Err(de::Error::custom(format!( + "Invalid byte version '{}'", + other + ))) + } + }; + Ok(byte_version) + } } diff --git a/rpc/src/v1/types/eth_types.rs b/rpc/src/v1/types/eth_types.rs index 606e75924..f048cf814 100644 --- a/rpc/src/v1/types/eth_types.rs +++ b/rpc/src/v1/types/eth_types.rs @@ -5,80 +5,111 @@ type Res = Result; #[test] fn should_serialize_u256() { - let serialized1 = serde_json::to_string(&U256::from(0)).unwrap(); - let serialized2 = serde_json::to_string(&U256::from(1)).unwrap(); - let serialized3 = serde_json::to_string(&U256::from(16)).unwrap(); - let serialized4 = serde_json::to_string(&U256::from(256)).unwrap(); + let serialized1 = serde_json::to_string(&U256::from(0)).unwrap(); + let serialized2 = serde_json::to_string(&U256::from(1)).unwrap(); + let serialized3 = serde_json::to_string(&U256::from(16)).unwrap(); + let serialized4 = serde_json::to_string(&U256::from(256)).unwrap(); - assert_eq!(serialized1, r#""0x0""#); - assert_eq!(serialized2, r#""0x1""#); - assert_eq!(serialized3, r#""0x10""#); - assert_eq!(serialized4, r#""0x100""#); + assert_eq!(serialized1, r#""0x0""#); + assert_eq!(serialized2, r#""0x1""#); + assert_eq!(serialized3, r#""0x10""#); + assert_eq!(serialized4, r#""0x100""#); } #[test] fn should_serialize_h256() { - let serialized1 = serde_json::to_string(&H256::from(0)).unwrap(); - let serialized2 = serde_json::to_string(&H256::from(1)).unwrap(); - let serialized3 = serde_json::to_string(&H256::from(16)).unwrap(); - let serialized4 = serde_json::to_string(&H256::from(256)).unwrap(); + let serialized1 = serde_json::to_string(&H256::from(0)).unwrap(); + let serialized2 = serde_json::to_string(&H256::from(1)).unwrap(); + let serialized3 = serde_json::to_string(&H256::from(16)).unwrap(); + let serialized4 = serde_json::to_string(&H256::from(256)).unwrap(); - assert_eq!(serialized1, r#""0x0000000000000000000000000000000000000000000000000000000000000000""#); - assert_eq!(serialized2, r#""0x0000000000000000000000000000000000000000000000000000000000000001""#); - assert_eq!(serialized3, r#""0x0000000000000000000000000000000000000000000000000000000000000010""#); - assert_eq!(serialized4, r#""0x0000000000000000000000000000000000000000000000000000000000000100""#); + assert_eq!( + serialized1, + r#""0x0000000000000000000000000000000000000000000000000000000000000000""# + ); + assert_eq!( + serialized2, + r#""0x0000000000000000000000000000000000000000000000000000000000000001""# + ); + assert_eq!( + serialized3, + r#""0x0000000000000000000000000000000000000000000000000000000000000010""# + ); + assert_eq!( + serialized4, + r#""0x0000000000000000000000000000000000000000000000000000000000000100""# + ); } #[test] fn should_fail_to_deserialize_decimals() { - let deserialized0: Res = serde_json::from_str(r#""∀∂""#); - let deserialized1: Res = serde_json::from_str(r#""""#); - let deserialized2: Res = serde_json::from_str(r#""0""#); - let deserialized3: Res = serde_json::from_str(r#""10""#); - let deserialized4: Res = serde_json::from_str(r#""1000000""#); - let deserialized5: Res = serde_json::from_str(r#""1000000000000000000""#); - let deserialized6: Res = serde_json::from_str(r#""0x""#); + let deserialized0: Res = serde_json::from_str(r#""∀∂""#); + let deserialized1: Res = serde_json::from_str(r#""""#); + let deserialized2: Res = serde_json::from_str(r#""0""#); + let deserialized3: Res = serde_json::from_str(r#""10""#); + let deserialized4: Res = serde_json::from_str(r#""1000000""#); + let deserialized5: Res = serde_json::from_str(r#""1000000000000000000""#); + let deserialized6: Res = serde_json::from_str(r#""0x""#); - assert!(deserialized0.is_err()); - assert!(deserialized1.is_err()); - assert!(deserialized2.is_err()); - assert!(deserialized3.is_err()); - assert!(deserialized4.is_err()); - assert!(deserialized5.is_err()); - assert!(deserialized6.is_err(), "Quantities should represent zero as 0x0"); + assert!(deserialized0.is_err()); + assert!(deserialized1.is_err()); + assert!(deserialized2.is_err()); + assert!(deserialized3.is_err()); + assert!(deserialized4.is_err()); + assert!(deserialized5.is_err()); + assert!( + deserialized6.is_err(), + "Quantities should represent zero as 0x0" + ); } #[test] fn should_fail_to_deserialize_bad_hex_strings() { - let deserialized1: Result = serde_json::from_str(r#""0""#); - let deserialized2: Result = serde_json::from_str(r#""0x""#); - let deserialized3: Result = serde_json::from_str(r#""0x∀∂0000000000000000000000000000000000000000000000000000000000""#); + let deserialized1: Result = serde_json::from_str(r#""0""#); + let deserialized2: Result = serde_json::from_str(r#""0x""#); + let deserialized3: Result = + serde_json::from_str(r#""0x∀∂0000000000000000000000000000000000000000000000000000000000""#); - assert!(deserialized1.is_err(), "hex string should start with 0x"); - assert!(deserialized2.is_err(), "0x-prefixed hex string of length 64"); - assert!(deserialized3.is_err(), "hex string should only contain hex chars"); + assert!(deserialized1.is_err(), "hex string should start with 0x"); + assert!( + deserialized2.is_err(), + "0x-prefixed hex string of length 64" + ); + assert!( + deserialized3.is_err(), + "hex string should only contain hex chars" + ); } #[test] fn should_deserialize_u256() { - let deserialized1: U256 = serde_json::from_str(r#""0x0""#).unwrap(); - let deserialized2: U256 = serde_json::from_str(r#""0x1""#).unwrap(); - let deserialized3: U256 = serde_json::from_str(r#""0x01""#).unwrap(); - let deserialized4: U256 = serde_json::from_str(r#""0x100""#).unwrap(); + let deserialized1: U256 = serde_json::from_str(r#""0x0""#).unwrap(); + let deserialized2: U256 = serde_json::from_str(r#""0x1""#).unwrap(); + let deserialized3: U256 = serde_json::from_str(r#""0x01""#).unwrap(); + let deserialized4: U256 = serde_json::from_str(r#""0x100""#).unwrap(); - assert_eq!(deserialized1, 0.into()); - assert_eq!(deserialized2, 1.into()); - assert_eq!(deserialized3, 1.into()); - assert_eq!(deserialized4, 256.into()); + assert_eq!(deserialized1, 0.into()); + assert_eq!(deserialized2, 1.into()); + assert_eq!(deserialized3, 1.into()); + assert_eq!(deserialized4, 256.into()); } #[test] fn should_deserialize_h256() { - let deserialized1: H256 = serde_json::from_str(r#""0x0000000000000000000000000000000000000000000000000000000000000000""#).unwrap(); - let deserialized2: H256 = serde_json::from_str(r#""0x0000000000000000000000000000000000000000000000000000000000000001""#).unwrap(); - let deserialized3: H256 = serde_json::from_str(r#""0x0000000000000000000000000000000000000000000000000000000000000100""#).unwrap(); + let deserialized1: H256 = serde_json::from_str( + r#""0x0000000000000000000000000000000000000000000000000000000000000000""#, + ) + .unwrap(); + let deserialized2: H256 = serde_json::from_str( + r#""0x0000000000000000000000000000000000000000000000000000000000000001""#, + ) + .unwrap(); + let deserialized3: H256 = serde_json::from_str( + r#""0x0000000000000000000000000000000000000000000000000000000000000100""#, + ) + .unwrap(); - assert_eq!(deserialized1, 0.into()); - assert_eq!(deserialized2, 1.into()); - assert_eq!(deserialized3, 256.into()); + assert_eq!(deserialized1, 0.into()); + assert_eq!(deserialized2, 1.into()); + assert_eq!(deserialized3, 256.into()); } diff --git a/rpc/src/v1/types/filter.rs b/rpc/src/v1/types/filter.rs index ec9f54179..890b83ada 100644 --- a/rpc/src/v1/types/filter.rs +++ b/rpc/src/v1/types/filter.rs @@ -15,40 +15,52 @@ // along with Parity Ethereum. If not, see . use ethereum_types::{H160, H256}; -use jsonrpc_core::{Error as RpcError}; -use serde::de::{Error, DeserializeOwned}; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use serde_json::{Value, from_value}; -use types::filter::Filter as EthFilter; -use types::ids::BlockId; +use jsonrpc_core::Error as RpcError; +use serde::{ + de::{DeserializeOwned, Error}, + Deserialize, Deserializer, Serialize, Serializer, +}; +use serde_json::{from_value, Value}; +use types::{filter::Filter as EthFilter, ids::BlockId}; -use v1::types::{BlockNumber, Log}; -use v1::helpers::errors::invalid_params; +use v1::{ + helpers::errors::invalid_params, + types::{BlockNumber, Log}, +}; /// Variadic value #[derive(Debug, PartialEq, Eq, Clone, Hash)] -pub enum VariadicValue where T: DeserializeOwned { - /// Single - Single(T), - /// List - Multiple(Vec), - /// None - Null, +pub enum VariadicValue +where + T: DeserializeOwned, +{ + /// Single + Single(T), + /// List + Multiple(Vec), + /// None + Null, } -impl<'a, T> Deserialize<'a> for VariadicValue where T: DeserializeOwned { - fn deserialize(deserializer: D) -> Result, D::Error> - where D: Deserializer<'a> { - let v: Value = Deserialize::deserialize(deserializer)?; +impl<'a, T> Deserialize<'a> for VariadicValue +where + T: DeserializeOwned, +{ + fn deserialize(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'a>, + { + let v: Value = Deserialize::deserialize(deserializer)?; - if v.is_null() { - return Ok(VariadicValue::Null); - } + if v.is_null() { + return Ok(VariadicValue::Null); + } - from_value(v.clone()).map(VariadicValue::Single) - .or_else(|_| from_value(v).map(VariadicValue::Multiple)) - .map_err(|err| D::Error::custom(format!("Invalid variadic value type: {}", err))) - } + from_value(v.clone()) + .map(VariadicValue::Single) + .or_else(|_| from_value(v).map(VariadicValue::Multiple)) + .map_err(|err| D::Error::custom(format!("Invalid variadic value type: {}", err))) + } } /// Filter Address @@ -61,151 +73,194 @@ pub type Topic = VariadicValue; #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub struct Filter { - /// From Block - pub from_block: Option, - /// To Block - pub to_block: Option, - /// Block hash - pub block_hash: Option, - /// Address - pub address: Option, - /// Topics - pub topics: Option>, - /// Limit - pub limit: Option, + /// From Block + pub from_block: Option, + /// To Block + pub to_block: Option, + /// Block hash + pub block_hash: Option, + /// Address + pub address: Option, + /// Topics + pub topics: Option>, + /// Limit + pub limit: Option, } impl Filter { - pub fn try_into(self) -> Result { - if self.block_hash.is_some() && (self.from_block.is_some() || self.to_block.is_some()) { - return Err(invalid_params("blockHash", "blockHash is mutually exclusive with fromBlock/toBlock")); - } + pub fn try_into(self) -> Result { + if self.block_hash.is_some() && (self.from_block.is_some() || self.to_block.is_some()) { + return Err(invalid_params( + "blockHash", + "blockHash is mutually exclusive with fromBlock/toBlock", + )); + } - let num_to_id = |num| match num { - BlockNumber::Num(n) => BlockId::Number(n), - BlockNumber::Earliest => BlockId::Earliest, - BlockNumber::Latest | BlockNumber::Pending => BlockId::Latest, - }; + let num_to_id = |num| match num { + BlockNumber::Num(n) => BlockId::Number(n), + BlockNumber::Earliest => BlockId::Earliest, + BlockNumber::Latest | BlockNumber::Pending => BlockId::Latest, + }; - let (from_block, to_block) = match self.block_hash { - Some(hash) => (BlockId::Hash(hash), BlockId::Hash(hash)), - None => - (self.from_block.map_or_else(|| BlockId::Latest, &num_to_id), - self.to_block.map_or_else(|| BlockId::Latest, &num_to_id)), - }; + let (from_block, to_block) = match self.block_hash { + Some(hash) => (BlockId::Hash(hash), BlockId::Hash(hash)), + None => ( + self.from_block.map_or_else(|| BlockId::Latest, &num_to_id), + self.to_block.map_or_else(|| BlockId::Latest, &num_to_id), + ), + }; - Ok(EthFilter { - from_block, to_block, - address: self.address.and_then(|address| match address { - VariadicValue::Null => None, - VariadicValue::Single(a) => Some(vec![a]), - VariadicValue::Multiple(a) => Some(a) - }), - topics: { - let mut iter = self.topics.map_or_else(Vec::new, |topics| topics.into_iter().take(4).map(|topic| match topic { - VariadicValue::Null => None, - VariadicValue::Single(t) => Some(vec![t]), - VariadicValue::Multiple(t) => Some(t) - }).collect()).into_iter(); + Ok(EthFilter { + from_block, + to_block, + address: self.address.and_then(|address| match address { + VariadicValue::Null => None, + VariadicValue::Single(a) => Some(vec![a]), + VariadicValue::Multiple(a) => Some(a), + }), + topics: { + let mut iter = self + .topics + .map_or_else(Vec::new, |topics| { + topics + .into_iter() + .take(4) + .map(|topic| match topic { + VariadicValue::Null => None, + VariadicValue::Single(t) => Some(vec![t]), + VariadicValue::Multiple(t) => Some(t), + }) + .collect() + }) + .into_iter(); - vec![ - iter.next().unwrap_or(None), - iter.next().unwrap_or(None), - iter.next().unwrap_or(None), - iter.next().unwrap_or(None) - ] - }, - limit: self.limit, - }) - } + vec![ + iter.next().unwrap_or(None), + iter.next().unwrap_or(None), + iter.next().unwrap_or(None), + iter.next().unwrap_or(None), + ] + }, + limit: self.limit, + }) + } } /// Results of the filter_changes RPC. #[derive(Debug, PartialEq)] pub enum FilterChanges { - /// New logs. - Logs(Vec), - /// New hashes (block or transactions) - Hashes(Vec), - /// Empty result, - Empty, + /// New logs. + Logs(Vec), + /// New hashes (block or transactions) + Hashes(Vec), + /// Empty result, + Empty, } impl Serialize for FilterChanges { - fn serialize(&self, s: S) -> Result where S: Serializer { - match *self { - FilterChanges::Logs(ref logs) => logs.serialize(s), - FilterChanges::Hashes(ref hashes) => hashes.serialize(s), - FilterChanges::Empty => (&[] as &[Value]).serialize(s), - } - } + fn serialize(&self, s: S) -> Result + where + S: Serializer, + { + match *self { + FilterChanges::Logs(ref logs) => logs.serialize(s), + FilterChanges::Hashes(ref hashes) => hashes.serialize(s), + FilterChanges::Empty => (&[] as &[Value]).serialize(s), + } + } } #[cfg(test)] mod tests { - use serde_json; - use std::str::FromStr; - use ethereum_types::H256; - use super::{VariadicValue, Topic, Filter}; - use v1::types::BlockNumber; - use types::filter::Filter as EthFilter; - use types::ids::BlockId; + use super::{Filter, Topic, VariadicValue}; + use ethereum_types::H256; + use serde_json; + use std::str::FromStr; + use types::{filter::Filter as EthFilter, ids::BlockId}; + use v1::types::BlockNumber; - #[test] - fn topic_deserialization() { - let s = r#"["0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b", null, ["0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b", "0x0000000000000000000000000aff3454fce5edbc8cca8697c15331677e6ebccc"]]"#; - let deserialized: Vec = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, vec![ - VariadicValue::Single(H256::from_str("000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap().into()), - VariadicValue::Null, - VariadicValue::Multiple(vec![ - H256::from_str("000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap().into(), - H256::from_str("0000000000000000000000000aff3454fce5edbc8cca8697c15331677e6ebccc").unwrap().into(), - ]) - ]); - } + #[test] + fn topic_deserialization() { + let s = r#"["0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b", null, ["0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b", "0x0000000000000000000000000aff3454fce5edbc8cca8697c15331677e6ebccc"]]"#; + let deserialized: Vec = serde_json::from_str(s).unwrap(); + assert_eq!( + deserialized, + vec![ + VariadicValue::Single( + H256::from_str( + "000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b" + ) + .unwrap() + .into() + ), + VariadicValue::Null, + VariadicValue::Multiple(vec![ + H256::from_str( + "000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b" + ) + .unwrap() + .into(), + H256::from_str( + "0000000000000000000000000aff3454fce5edbc8cca8697c15331677e6ebccc" + ) + .unwrap() + .into(), + ]) + ] + ); + } - #[test] - fn filter_deserialization() { - let s = r#"{"fromBlock":"earliest","toBlock":"latest"}"#; - let deserialized: Filter = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, Filter { - from_block: Some(BlockNumber::Earliest), - to_block: Some(BlockNumber::Latest), - block_hash: None, - address: None, - topics: None, - limit: None, - }); - } + #[test] + fn filter_deserialization() { + let s = r#"{"fromBlock":"earliest","toBlock":"latest"}"#; + let deserialized: Filter = serde_json::from_str(s).unwrap(); + assert_eq!( + deserialized, + Filter { + from_block: Some(BlockNumber::Earliest), + to_block: Some(BlockNumber::Latest), + block_hash: None, + address: None, + topics: None, + limit: None, + } + ); + } - #[test] - fn filter_conversion() { - let filter = Filter { - from_block: Some(BlockNumber::Earliest), - to_block: Some(BlockNumber::Latest), - block_hash: None, - address: Some(VariadicValue::Multiple(vec![])), - topics: Some(vec![ - VariadicValue::Null, - VariadicValue::Single("000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b".into()), - VariadicValue::Null, - ]), - limit: None, - }; + #[test] + fn filter_conversion() { + let filter = Filter { + from_block: Some(BlockNumber::Earliest), + to_block: Some(BlockNumber::Latest), + block_hash: None, + address: Some(VariadicValue::Multiple(vec![])), + topics: Some(vec![ + VariadicValue::Null, + VariadicValue::Single( + "000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b".into(), + ), + VariadicValue::Null, + ]), + limit: None, + }; - let eth_filter: EthFilter = filter.try_into().unwrap(); - assert_eq!(eth_filter, EthFilter { - from_block: BlockId::Earliest, - to_block: BlockId::Latest, - address: Some(vec![]), - topics: vec![ - None, - Some(vec!["000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b".into()]), - None, - None, - ], - limit: None, - }); - } + let eth_filter: EthFilter = filter.try_into().unwrap(); + assert_eq!( + eth_filter, + EthFilter { + from_block: BlockId::Earliest, + to_block: BlockId::Latest, + address: Some(vec![]), + topics: vec![ + None, + Some(vec![ + "000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b".into() + ]), + None, + None, + ], + limit: None, + } + ); + } } diff --git a/rpc/src/v1/types/histogram.rs b/rpc/src/v1/types/histogram.rs index d7f14c514..c53170d64 100644 --- a/rpc/src/v1/types/histogram.rs +++ b/rpc/src/v1/types/histogram.rs @@ -23,17 +23,17 @@ use ethereum_types::U256; #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub struct Histogram { - /// Gas prices for bucket edges. - pub bucket_bounds: Vec, - /// Transacion counts for each bucket. - pub counts: Vec, + /// Gas prices for bucket edges. + pub bucket_bounds: Vec, + /// Transacion counts for each bucket. + pub counts: Vec, } impl From<::stats::Histogram<::ethereum_types::U256>> for Histogram { - fn from(h: ::stats::Histogram<::ethereum_types::U256>) -> Self { - Histogram { - bucket_bounds: h.bucket_bounds.into_iter().map(Into::into).collect(), - counts: h.counts - } - } + fn from(h: ::stats::Histogram<::ethereum_types::U256>) -> Self { + Histogram { + bucket_bounds: h.bucket_bounds.into_iter().map(Into::into).collect(), + counts: h.counts, + } + } } diff --git a/rpc/src/v1/types/index.rs b/rpc/src/v1/types/index.rs index 3f4b4e317..024e83d8a 100644 --- a/rpc/src/v1/types/index.rs +++ b/rpc/src/v1/types/index.rs @@ -14,62 +14,73 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use serde::{ + de::{Error, Visitor}, + Deserialize, Deserializer, +}; use std::fmt; -use serde::{Deserialize, Deserializer}; -use serde::de::{Error, Visitor}; /// Represents usize. #[derive(Debug, PartialEq)] pub struct Index(usize); impl Index { - /// Convert to usize - pub fn value(&self) -> usize { - self.0 - } + /// Convert to usize + pub fn value(&self) -> usize { + self.0 + } } impl<'a> Deserialize<'a> for Index { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> { - deserializer.deserialize_any(IndexVisitor) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + deserializer.deserialize_any(IndexVisitor) + } } struct IndexVisitor; impl<'a> Visitor<'a> for IndexVisitor { - type Value = Index; + type Value = Index; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a hex-encoded or decimal index") - } + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a hex-encoded or decimal index") + } - fn visit_str(self, value: &str) -> Result where E: Error { - match value { - _ if value.starts_with("0x") => usize::from_str_radix(&value[2..], 16).map(Index).map_err(|e| { - Error::custom(format!("Invalid index: {}", e)) - }), - _ => value.parse::().map(Index).map_err(|e| { - Error::custom(format!("Invalid index: {}", e)) - }), - } - } + fn visit_str(self, value: &str) -> Result + where + E: Error, + { + match value { + _ if value.starts_with("0x") => usize::from_str_radix(&value[2..], 16) + .map(Index) + .map_err(|e| Error::custom(format!("Invalid index: {}", e))), + _ => value + .parse::() + .map(Index) + .map_err(|e| Error::custom(format!("Invalid index: {}", e))), + } + } - fn visit_string(self, value: String) -> Result where E: Error { - self.visit_str(value.as_ref()) - } + fn visit_string(self, value: String) -> Result + where + E: Error, + { + self.visit_str(value.as_ref()) + } } #[cfg(test)] mod tests { - use super::*; - use serde_json; + use super::*; + use serde_json; - #[test] - fn block_number_deserialization() { - let s = r#"["0xa", "10"]"#; - let deserialized: Vec = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, vec![Index(10), Index(10)]); - } + #[test] + fn block_number_deserialization() { + let s = r#"["0xa", "10"]"#; + let deserialized: Vec = serde_json::from_str(s).unwrap(); + assert_eq!(deserialized, vec![Index(10), Index(10)]); + } } diff --git a/rpc/src/v1/types/log.rs b/rpc/src/v1/types/log.rs index 57b2cdd5d..d48014840 100644 --- a/rpc/src/v1/types/log.rs +++ b/rpc/src/v1/types/log.rs @@ -22,97 +22,102 @@ use v1::types::Bytes; #[derive(Debug, Serialize, PartialEq, Eq, Hash, Clone)] #[serde(rename_all = "camelCase")] pub struct Log { - /// H160 - pub address: H160, - /// Topics - pub topics: Vec, - /// Data - pub data: Bytes, - /// Block Hash - pub block_hash: Option, - /// Block Number - pub block_number: Option, - /// Transaction Hash - pub transaction_hash: Option, - /// Transaction Index - pub transaction_index: Option, - /// Log Index in Block - pub log_index: Option, - /// Log Index in Transaction - pub transaction_log_index: Option, - /// Log Type - #[serde(rename = "type")] - pub log_type: String, - /// Whether Log Type is Removed (Geth Compatibility Field) - #[serde(default)] - pub removed: bool, + /// H160 + pub address: H160, + /// Topics + pub topics: Vec, + /// Data + pub data: Bytes, + /// Block Hash + pub block_hash: Option, + /// Block Number + pub block_number: Option, + /// Transaction Hash + pub transaction_hash: Option, + /// Transaction Index + pub transaction_index: Option, + /// Log Index in Block + pub log_index: Option, + /// Log Index in Transaction + pub transaction_log_index: Option, + /// Log Type + #[serde(rename = "type")] + pub log_type: String, + /// Whether Log Type is Removed (Geth Compatibility Field) + #[serde(default)] + pub removed: bool, } impl From for Log { - fn from(e: LocalizedLogEntry) -> Log { - Log { - address: e.entry.address, - topics: e.entry.topics.into_iter().map(Into::into).collect(), - data: e.entry.data.into(), - block_hash: Some(e.block_hash), - block_number: Some(e.block_number.into()), - transaction_hash: Some(e.transaction_hash), - transaction_index: Some(e.transaction_index.into()), - log_index: Some(e.log_index.into()), - transaction_log_index: Some(e.transaction_log_index.into()), - log_type: "mined".to_owned(), - removed: false, - } - } + fn from(e: LocalizedLogEntry) -> Log { + Log { + address: e.entry.address, + topics: e.entry.topics.into_iter().map(Into::into).collect(), + data: e.entry.data.into(), + block_hash: Some(e.block_hash), + block_number: Some(e.block_number.into()), + transaction_hash: Some(e.transaction_hash), + transaction_index: Some(e.transaction_index.into()), + log_index: Some(e.log_index.into()), + transaction_log_index: Some(e.transaction_log_index.into()), + log_type: "mined".to_owned(), + removed: false, + } + } } impl From for Log { - fn from(e: LogEntry) -> Log { - Log { - address: e.address, - topics: e.topics.into_iter().map(Into::into).collect(), - data: e.data.into(), - block_hash: None, - block_number: None, - transaction_hash: None, - transaction_index: None, - log_index: None, - transaction_log_index: None, - log_type: "pending".to_owned(), - removed: false, - } - } + fn from(e: LogEntry) -> Log { + Log { + address: e.address, + topics: e.topics.into_iter().map(Into::into).collect(), + data: e.data.into(), + block_hash: None, + block_number: None, + transaction_hash: None, + transaction_index: None, + log_index: None, + transaction_log_index: None, + log_type: "pending".to_owned(), + removed: false, + } + } } #[cfg(test)] mod tests { - use serde_json; - use std::str::FromStr; - use v1::types::Log; - use ethereum_types::{H160, H256, U256}; + use ethereum_types::{H160, H256, U256}; + use serde_json; + use std::str::FromStr; + use v1::types::Log; - #[test] - fn log_serialization() { - let s = r#"{"address":"0x33990122638b9132ca29c723bdf037f1a891a70c","topics":["0xa6697e974e6a320f454390be03f74955e8978f1a6971ea6730542e37b66179bc","0x4861736852656700000000000000000000000000000000000000000000000000"],"data":"0x","blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","blockNumber":"0x4510c","transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","logIndex":"0x1","transactionLogIndex":"0x1","type":"mined","removed":false}"#; + #[test] + fn log_serialization() { + let s = r#"{"address":"0x33990122638b9132ca29c723bdf037f1a891a70c","topics":["0xa6697e974e6a320f454390be03f74955e8978f1a6971ea6730542e37b66179bc","0x4861736852656700000000000000000000000000000000000000000000000000"],"data":"0x","blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","blockNumber":"0x4510c","transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","logIndex":"0x1","transactionLogIndex":"0x1","type":"mined","removed":false}"#; - let log = Log { - address: H160::from_str("33990122638b9132ca29c723bdf037f1a891a70c").unwrap(), - topics: vec![ - H256::from_str("a6697e974e6a320f454390be03f74955e8978f1a6971ea6730542e37b66179bc").unwrap(), - H256::from_str("4861736852656700000000000000000000000000000000000000000000000000").unwrap(), - ], - data: vec![].into(), - block_hash: Some(H256::from_str("ed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5").unwrap()), - block_number: Some(U256::from(0x4510c)), - transaction_hash: Some(H256::default()), - transaction_index: Some(U256::default()), - transaction_log_index: Some(1.into()), - log_index: Some(U256::from(1)), - log_type: "mined".to_owned(), - removed: false, - }; + let log = Log { + address: H160::from_str("33990122638b9132ca29c723bdf037f1a891a70c").unwrap(), + topics: vec![ + H256::from_str("a6697e974e6a320f454390be03f74955e8978f1a6971ea6730542e37b66179bc") + .unwrap(), + H256::from_str("4861736852656700000000000000000000000000000000000000000000000000") + .unwrap(), + ], + data: vec![].into(), + block_hash: Some( + H256::from_str("ed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5") + .unwrap(), + ), + block_number: Some(U256::from(0x4510c)), + transaction_hash: Some(H256::default()), + transaction_index: Some(U256::default()), + transaction_log_index: Some(1.into()), + log_index: Some(U256::from(1)), + log_type: "mined".to_owned(), + removed: false, + }; - let serialized = serde_json::to_string(&log).unwrap(); - assert_eq!(serialized, s); - } + let serialized = serde_json::to_string(&log).unwrap(); + assert_eq!(serialized, s); + } } diff --git a/rpc/src/v1/types/mod.rs b/rpc/src/v1/types/mod.rs index a41f49fab..9f0922fab 100644 --- a/rpc/src/v1/types/mod.rs +++ b/rpc/src/v1/types/mod.rs @@ -27,11 +27,13 @@ mod call_request; mod confirmations; mod consensus_status; mod derivation; +mod eip191; mod filter; mod histogram; mod index; mod log; mod node_kind; +mod private_receipt; mod provenance; mod receipt; mod rpc_settings; @@ -40,46 +42,49 @@ mod sync; mod trace; mod trace_filter; mod transaction; -mod transaction_request; mod transaction_condition; +mod transaction_request; mod work; -mod private_receipt; -mod eip191; pub mod pubsub; -pub use self::eip191::{EIP191Version, PresignedTransaction}; -pub use self::account_info::{AccountInfo, ExtAccountInfo, HwAccountInfo, EthAccount, StorageProof, RecoveredAccount}; -pub use self::bytes::Bytes; -pub use self::block::{RichBlock, Block, BlockTransactions, Header, RichHeader, Rich}; -pub use self::block_number::{BlockNumber, LightBlockNumber, block_number_to_id}; -pub use self::call_request::CallRequest; -pub use self::confirmations::{ - ConfirmationPayload, ConfirmationRequest, ConfirmationResponse, ConfirmationResponseWithToken, - TransactionModification, EIP191SignRequest, EthSignRequest, DecryptRequest, Either +pub use self::{ + account_info::{ + AccountInfo, EthAccount, ExtAccountInfo, HwAccountInfo, RecoveredAccount, StorageProof, + }, + block::{Block, BlockTransactions, Header, Rich, RichBlock, RichHeader}, + block_number::{block_number_to_id, BlockNumber, LightBlockNumber}, + bytes::Bytes, + call_request::CallRequest, + confirmations::{ + ConfirmationPayload, ConfirmationRequest, ConfirmationResponse, + ConfirmationResponseWithToken, DecryptRequest, EIP191SignRequest, Either, EthSignRequest, + TransactionModification, + }, + consensus_status::*, + derivation::{Derive, DeriveHash, DeriveHierarchical}, + eip191::{EIP191Version, PresignedTransaction}, + filter::{Filter, FilterChanges}, + histogram::Histogram, + index::Index, + log::Log, + node_kind::{Availability, Capability, NodeKind}, + private_receipt::{PrivateTransactionReceipt, PrivateTransactionReceiptAndTransaction}, + provenance::Origin, + receipt::Receipt, + rpc_settings::RpcSettings, + secretstore::EncryptedDocumentKey, + sync::{ + ChainStatus, EthProtocolInfo, PeerInfo, PeerNetworkInfo, PeerProtocolsInfo, Peers, + PipProtocolInfo, SyncInfo, SyncStatus, TransactionStats, + }, + trace::{LocalizedTrace, TraceResults, TraceResultsWithTransactionHash}, + trace_filter::TraceFilter, + transaction::{LocalTransactionStatus, RichRawTransaction, Transaction}, + transaction_condition::TransactionCondition, + transaction_request::TransactionRequest, + work::Work, }; -pub use self::consensus_status::*; -pub use self::derivation::{DeriveHash, DeriveHierarchical, Derive}; -pub use self::filter::{Filter, FilterChanges}; -pub use self::histogram::Histogram; -pub use self::index::Index; -pub use self::log::Log; -pub use self::node_kind::{NodeKind, Availability, Capability}; -pub use self::provenance::Origin; -pub use self::receipt::Receipt; -pub use self::rpc_settings::RpcSettings; -pub use self::secretstore::EncryptedDocumentKey; -pub use self::sync::{ - SyncStatus, SyncInfo, Peers, PeerInfo, PeerNetworkInfo, PeerProtocolsInfo, - TransactionStats, ChainStatus, EthProtocolInfo, PipProtocolInfo, -}; -pub use self::trace::{LocalizedTrace, TraceResults, TraceResultsWithTransactionHash}; -pub use self::trace_filter::TraceFilter; -pub use self::transaction::{Transaction, RichRawTransaction, LocalTransactionStatus}; -pub use self::transaction_request::TransactionRequest; -pub use self::transaction_condition::TransactionCondition; -pub use self::work::Work; -pub use self::private_receipt::{PrivateTransactionReceipt, PrivateTransactionReceiptAndTransaction}; // TODO [ToDr] Refactor to a proper type Vec of enums? /// Expected tracing type. diff --git a/rpc/src/v1/types/node_kind.rs b/rpc/src/v1/types/node_kind.rs index f02f21939..c7800b4d7 100644 --- a/rpc/src/v1/types/node_kind.rs +++ b/rpc/src/v1/types/node_kind.rs @@ -20,71 +20,89 @@ /// applications about how to utilize the RPC. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct NodeKind { - /// The capability of the node. - pub capability: Capability, - /// Who the node is available to. - pub availability: Availability, + /// The capability of the node. + pub capability: Capability, + /// Who the node is available to. + pub availability: Availability, } /// Who the node is available to. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum Availability { - /// A personal node, not intended to be available to everyone. - Personal, - /// A public, open node. - Public, + /// A personal node, not intended to be available to everyone. + Personal, + /// A public, open node. + Public, } /// The capability of the node. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum Capability { - /// A full node stores the full state and fully enacts incoming blocks. - Full, - /// A light node does a minimal header sync and fetches data as needed - /// from the network. - Light, + /// A full node stores the full state and fully enacts incoming blocks. + Full, + /// A light node does a minimal header sync and fetches data as needed + /// from the network. + Light, } #[cfg(test)] mod tests { - use super::{NodeKind, Availability, Capability}; - use serde_json; + use super::{Availability, Capability, NodeKind}; + use serde_json; - #[test] - fn availability() { - let personal = r#""personal""#; - let public = r#""public""#; + #[test] + fn availability() { + let personal = r#""personal""#; + let public = r#""public""#; - assert_eq!(serde_json::to_string(&Availability::Personal).unwrap(), personal); - assert_eq!(serde_json::to_string(&Availability::Public).unwrap(), public); + assert_eq!( + serde_json::to_string(&Availability::Personal).unwrap(), + personal + ); + assert_eq!( + serde_json::to_string(&Availability::Public).unwrap(), + public + ); - assert_eq!(serde_json::from_str::(personal).unwrap(), Availability::Personal); - assert_eq!(serde_json::from_str::(public).unwrap(), Availability::Public); - } + assert_eq!( + serde_json::from_str::(personal).unwrap(), + Availability::Personal + ); + assert_eq!( + serde_json::from_str::(public).unwrap(), + Availability::Public + ); + } - #[test] - fn capability() { - let light = r#""light""#; - let full = r#""full""#; + #[test] + fn capability() { + let light = r#""light""#; + let full = r#""full""#; - assert_eq!(serde_json::to_string(&Capability::Light).unwrap(), light); - assert_eq!(serde_json::to_string(&Capability::Full).unwrap(), full); + assert_eq!(serde_json::to_string(&Capability::Light).unwrap(), light); + assert_eq!(serde_json::to_string(&Capability::Full).unwrap(), full); - assert_eq!(serde_json::from_str::(light).unwrap(), Capability::Light); - assert_eq!(serde_json::from_str::(full).unwrap(), Capability::Full); - } + assert_eq!( + serde_json::from_str::(light).unwrap(), + Capability::Light + ); + assert_eq!( + serde_json::from_str::(full).unwrap(), + Capability::Full + ); + } - #[test] - fn node_kind() { - let kind = NodeKind { - capability: Capability::Full, - availability: Availability::Public, - }; - let s = r#"{"capability":"full","availability":"public"}"#; + #[test] + fn node_kind() { + let kind = NodeKind { + capability: Capability::Full, + availability: Availability::Public, + }; + let s = r#"{"capability":"full","availability":"public"}"#; - assert_eq!(serde_json::to_string(&kind).unwrap(), s); - assert_eq!(serde_json::from_str::(s).unwrap(), kind); - } + assert_eq!(serde_json::to_string(&kind).unwrap(), s); + assert_eq!(serde_json::from_str::(s).unwrap(), kind); + } } diff --git a/rpc/src/v1/types/private_receipt.rs b/rpc/src/v1/types/private_receipt.rs index 68e9e716f..368a856d2 100644 --- a/rpc/src/v1/types/private_receipt.rs +++ b/rpc/src/v1/types/private_receipt.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use ethcore_private_tx::{Receipt as EthPrivateReceipt}; +use ethcore_private_tx::Receipt as EthPrivateReceipt; use ethereum_types::{H160, H256}; use v1::types::TransactionRequest; @@ -22,30 +22,30 @@ use v1::types::TransactionRequest; #[derive(Debug, Serialize)] #[serde(rename_all = "camelCase")] pub struct PrivateTransactionReceipt { - /// Transaction Hash - pub transaction_hash: H256, - /// Private contract address - pub contract_address: H160, - /// Status code - #[serde(rename = "status")] - pub status_code: u8, + /// Transaction Hash + pub transaction_hash: H256, + /// Private contract address + pub contract_address: H160, + /// Status code + #[serde(rename = "status")] + pub status_code: u8, } impl From for PrivateTransactionReceipt { - fn from(r: EthPrivateReceipt) -> Self { - PrivateTransactionReceipt { - transaction_hash: r.hash, - contract_address: r.contract_address, - status_code: r.status_code, - } - } + fn from(r: EthPrivateReceipt) -> Self { + PrivateTransactionReceipt { + transaction_hash: r.hash, + contract_address: r.contract_address, + status_code: r.status_code, + } + } } /// Receipt and Transaction #[derive(Debug, Serialize)] pub struct PrivateTransactionReceiptAndTransaction { - /// Receipt - pub receipt: PrivateTransactionReceipt, - /// Transaction - pub transaction: TransactionRequest, + /// Receipt + pub receipt: PrivateTransactionReceipt, + /// Transaction + pub transaction: TransactionRequest, } diff --git a/rpc/src/v1/types/provenance.rs b/rpc/src/v1/types/provenance.rs index dcdd2408f..50c8db265 100644 --- a/rpc/src/v1/types/provenance.rs +++ b/rpc/src/v1/types/provenance.rs @@ -16,83 +16,88 @@ //! Request Provenance -use std::fmt; use ethereum_types::H256; +use std::fmt; /// RPC request origin #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(deny_unknown_fields)] #[serde(rename_all = "kebab-case")] pub enum Origin { - /// RPC server (includes request origin) - Rpc(String), - /// IPC server (includes session hash) - Ipc(H256), - /// WS server - Ws { - /// Session id - session: H256, - }, - /// Signer (authorized WS server) - Signer { - /// Session id - session: H256 - }, - /// From the C API - CApi, - /// Unknown - Unknown, + /// RPC server (includes request origin) + Rpc(String), + /// IPC server (includes session hash) + Ipc(H256), + /// WS server + Ws { + /// Session id + session: H256, + }, + /// Signer (authorized WS server) + Signer { + /// Session id + session: H256, + }, + /// From the C API + CApi, + /// Unknown + Unknown, } impl Default for Origin { - fn default() -> Self { - Origin::Unknown - } + fn default() -> Self { + Origin::Unknown + } } impl fmt::Display for Origin { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Origin::Rpc(ref origin) => write!(f, "{} via RPC", origin), - Origin::Ipc(ref session) => write!(f, "IPC (session: {})", session), - Origin::Ws { ref session } => write!(f, "WebSocket (session: {})", session), - Origin::Signer { ref session } => write!(f, "Secure Session (session: {})", session), - Origin::CApi => write!(f, "C API"), - Origin::Unknown => write!(f, "unknown origin"), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Origin::Rpc(ref origin) => write!(f, "{} via RPC", origin), + Origin::Ipc(ref session) => write!(f, "IPC (session: {})", session), + Origin::Ws { ref session } => write!(f, "WebSocket (session: {})", session), + Origin::Signer { ref session } => write!(f, "Secure Session (session: {})", session), + Origin::CApi => write!(f, "C API"), + Origin::Unknown => write!(f, "unknown origin"), + } + } } #[cfg(test)] mod tests { - use serde_json; - use super::Origin; + use super::Origin; + use serde_json; - #[test] - fn should_serialize_origin() { - // given - let o1 = Origin::Rpc("test service".into()); - let o3 = Origin::Ipc(5.into()); - let o4 = Origin::Signer { - session: 10.into(), - }; - let o5 = Origin::Unknown; - let o6 = Origin::Ws { - session: 5.into(), - }; + #[test] + fn should_serialize_origin() { + // given + let o1 = Origin::Rpc("test service".into()); + let o3 = Origin::Ipc(5.into()); + let o4 = Origin::Signer { session: 10.into() }; + let o5 = Origin::Unknown; + let o6 = Origin::Ws { session: 5.into() }; - // when - let res1 = serde_json::to_string(&o1).unwrap(); - let res3 = serde_json::to_string(&o3).unwrap(); - let res4 = serde_json::to_string(&o4).unwrap(); - let res5 = serde_json::to_string(&o5).unwrap(); - let res6 = serde_json::to_string(&o6).unwrap(); + // when + let res1 = serde_json::to_string(&o1).unwrap(); + let res3 = serde_json::to_string(&o3).unwrap(); + let res4 = serde_json::to_string(&o4).unwrap(); + let res5 = serde_json::to_string(&o5).unwrap(); + let res6 = serde_json::to_string(&o6).unwrap(); - // then - assert_eq!(res1, r#"{"rpc":"test service"}"#); - assert_eq!(res3, r#"{"ipc":"0x0000000000000000000000000000000000000000000000000000000000000005"}"#); - assert_eq!(res4, r#"{"signer":{"session":"0x000000000000000000000000000000000000000000000000000000000000000a"}}"#); - assert_eq!(res5, r#""unknown""#); - assert_eq!(res6, r#"{"ws":{"session":"0x0000000000000000000000000000000000000000000000000000000000000005"}}"#); - } + // then + assert_eq!(res1, r#"{"rpc":"test service"}"#); + assert_eq!( + res3, + r#"{"ipc":"0x0000000000000000000000000000000000000000000000000000000000000005"}"# + ); + assert_eq!( + res4, + r#"{"signer":{"session":"0x000000000000000000000000000000000000000000000000000000000000000a"}}"# + ); + assert_eq!(res5, r#""unknown""#); + assert_eq!( + res6, + r#"{"ws":{"session":"0x0000000000000000000000000000000000000000000000000000000000000005"}}"# + ); + } } diff --git a/rpc/src/v1/types/pubsub.rs b/rpc/src/v1/types/pubsub.rs index 1586b115c..e1fcf0cf7 100644 --- a/rpc/src/v1/types/pubsub.rs +++ b/rpc/src/v1/types/pubsub.rs @@ -17,32 +17,32 @@ //! Pub-Sub types. use ethereum_types::H256; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use serde::de::Error; -use serde_json::{Value, from_value}; -use v1::types::{RichHeader, Filter, Log}; +use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; +use serde_json::{from_value, Value}; +use v1::types::{Filter, Log, RichHeader}; /// Subscription result. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Result { - /// New block header. - Header(Box), - /// Log - Log(Box), - /// Transaction hash - TransactionHash(H256), + /// New block header. + Header(Box), + /// Log + Log(Box), + /// Transaction hash + TransactionHash(H256), } impl Serialize for Result { - fn serialize(&self, serializer: S) -> ::std::result::Result - where S: Serializer - { - match *self { - Result::Header(ref header) => header.serialize(serializer), - Result::Log(ref log) => log.serialize(serializer), - Result::TransactionHash(ref hash) => hash.serialize(serializer), - } - } + fn serialize(&self, serializer: S) -> ::std::result::Result + where + S: Serializer, + { + match *self { + Result::Header(ref header) => header.serialize(serializer), + Result::Log(ref log) => log.serialize(serializer), + Result::TransactionHash(ref hash) => hash.serialize(serializer), + } + } } /// Subscription kind. @@ -50,123 +50,149 @@ impl Serialize for Result { #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub enum Kind { - /// New block headers subscription. - NewHeads, - /// Logs subscription. - Logs, - /// New Pending Transactions subscription. - NewPendingTransactions, - /// Node syncing status subscription. - Syncing, + /// New block headers subscription. + NewHeads, + /// Logs subscription. + Logs, + /// New Pending Transactions subscription. + NewPendingTransactions, + /// Node syncing status subscription. + Syncing, } /// Subscription kind. #[derive(Debug, PartialEq, Eq, Hash, Clone)] pub enum Params { - /// No parameters passed. - None, - /// Log parameters. - Logs(Filter), + /// No parameters passed. + None, + /// Log parameters. + Logs(Filter), } impl Default for Params { - fn default() -> Self { - Params::None - } + fn default() -> Self { + Params::None + } } impl<'a> Deserialize<'a> for Params { - fn deserialize(deserializer: D) -> ::std::result::Result - where D: Deserializer<'a> { - let v: Value = Deserialize::deserialize(deserializer)?; + fn deserialize(deserializer: D) -> ::std::result::Result + where + D: Deserializer<'a>, + { + let v: Value = Deserialize::deserialize(deserializer)?; - if v.is_null() { - return Ok(Params::None); - } + if v.is_null() { + return Ok(Params::None); + } - from_value(v.clone()).map(Params::Logs) - .map_err(|e| D::Error::custom(format!("Invalid Pub-Sub parameters: {}", e))) - } + from_value(v.clone()) + .map(Params::Logs) + .map_err(|e| D::Error::custom(format!("Invalid Pub-Sub parameters: {}", e))) + } } #[cfg(test)] mod tests { - use serde_json; - use super::{Result, Kind, Params}; - use v1::types::{RichHeader, Header, Filter}; - use v1::types::filter::VariadicValue; + use super::{Kind, Params, Result}; + use serde_json; + use v1::types::{filter::VariadicValue, Filter, Header, RichHeader}; - #[test] - fn should_deserialize_kind() { - assert_eq!(serde_json::from_str::(r#""newHeads""#).unwrap(), Kind::NewHeads); - assert_eq!(serde_json::from_str::(r#""logs""#).unwrap(), Kind::Logs); - assert_eq!(serde_json::from_str::(r#""newPendingTransactions""#).unwrap(), Kind::NewPendingTransactions); - assert_eq!(serde_json::from_str::(r#""syncing""#).unwrap(), Kind::Syncing); - } + #[test] + fn should_deserialize_kind() { + assert_eq!( + serde_json::from_str::(r#""newHeads""#).unwrap(), + Kind::NewHeads + ); + assert_eq!( + serde_json::from_str::(r#""logs""#).unwrap(), + Kind::Logs + ); + assert_eq!( + serde_json::from_str::(r#""newPendingTransactions""#).unwrap(), + Kind::NewPendingTransactions + ); + assert_eq!( + serde_json::from_str::(r#""syncing""#).unwrap(), + Kind::Syncing + ); + } - #[test] - fn should_deserialize_logs() { - let none = serde_json::from_str::(r#"null"#).unwrap(); - assert_eq!(none, Params::None); + #[test] + fn should_deserialize_logs() { + let none = serde_json::from_str::(r#"null"#).unwrap(); + assert_eq!(none, Params::None); - let logs1 = serde_json::from_str::(r#"{}"#).unwrap(); - let logs2 = serde_json::from_str::(r#"{"limit":10}"#).unwrap(); - let logs3 = serde_json::from_str::( - r#"{"topics":["0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b"]}"# - ).unwrap(); - assert_eq!(logs1, Params::Logs(Filter { - from_block: None, - to_block: None, - block_hash: None, - address: None, - topics: None, - limit: None, - })); - assert_eq!(logs2, Params::Logs(Filter { - from_block: None, - to_block: None, - block_hash: None, - address: None, - topics: None, - limit: Some(10), - })); - assert_eq!(logs3, Params::Logs(Filter { - from_block: None, - to_block: None, - block_hash: None, - address: None, - topics: Some(vec![ - VariadicValue::Single("000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b".parse().unwrap() - )]), - limit: None, - })); - } + let logs1 = serde_json::from_str::(r#"{}"#).unwrap(); + let logs2 = serde_json::from_str::(r#"{"limit":10}"#).unwrap(); + let logs3 = serde_json::from_str::( + r#"{"topics":["0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b"]}"#, + ) + .unwrap(); + assert_eq!( + logs1, + Params::Logs(Filter { + from_block: None, + to_block: None, + block_hash: None, + address: None, + topics: None, + limit: None, + }) + ); + assert_eq!( + logs2, + Params::Logs(Filter { + from_block: None, + to_block: None, + block_hash: None, + address: None, + topics: None, + limit: Some(10), + }) + ); + assert_eq!( + logs3, + Params::Logs(Filter { + from_block: None, + to_block: None, + block_hash: None, + address: None, + topics: Some(vec![VariadicValue::Single( + "000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b" + .parse() + .unwrap() + )]), + limit: None, + }) + ); + } - #[test] - fn should_serialize_header() { - let header = Result::Header(Box::new(RichHeader { - extra_info: Default::default(), - inner: Header { - hash: Some(Default::default()), - parent_hash: Default::default(), - uncles_hash: Default::default(), - author: Default::default(), - miner: Default::default(), - state_root: Default::default(), - transactions_root: Default::default(), - receipts_root: Default::default(), - number: Some(Default::default()), - gas_used: Default::default(), - gas_limit: Default::default(), - extra_data: Default::default(), - logs_bloom: Default::default(), - timestamp: Default::default(), - difficulty: Default::default(), - seal_fields: vec![Default::default(), Default::default()], - size: Some(69.into()), - }, - })); - let expected = r#"{"author":"0x0000000000000000000000000000000000000000","difficulty":"0x0","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0x0000000000000000000000000000000000000000000000000000000000000000","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","number":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","sealFields":["0x","0x"],"sha3Uncles":"0x0000000000000000000000000000000000000000000000000000000000000000","size":"0x45","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","transactionsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000"}"#; - assert_eq!(serde_json::to_string(&header).unwrap(), expected); - } + #[test] + fn should_serialize_header() { + let header = Result::Header(Box::new(RichHeader { + extra_info: Default::default(), + inner: Header { + hash: Some(Default::default()), + parent_hash: Default::default(), + uncles_hash: Default::default(), + author: Default::default(), + miner: Default::default(), + state_root: Default::default(), + transactions_root: Default::default(), + receipts_root: Default::default(), + number: Some(Default::default()), + gas_used: Default::default(), + gas_limit: Default::default(), + extra_data: Default::default(), + logs_bloom: Default::default(), + timestamp: Default::default(), + difficulty: Default::default(), + seal_fields: vec![Default::default(), Default::default()], + size: Some(69.into()), + }, + })); + let expected = r#"{"author":"0x0000000000000000000000000000000000000000","difficulty":"0x0","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0x0000000000000000000000000000000000000000000000000000000000000000","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","number":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","sealFields":["0x","0x"],"sha3Uncles":"0x0000000000000000000000000000000000000000000000000000000000000000","size":"0x45","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","transactionsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000"}"#; + assert_eq!(serde_json::to_string(&header).unwrap(), expected); + } } diff --git a/rpc/src/v1/types/receipt.rs b/rpc/src/v1/types/receipt.rs index bede7d536..3a5a1cd50 100644 --- a/rpc/src/v1/types/receipt.rs +++ b/rpc/src/v1/types/receipt.rs @@ -14,163 +14,175 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use ethereum_types::{H160, H256, U64, U256, Bloom as H2048}; +use ethereum_types::{Bloom as H2048, H160, H256, U256, U64}; +use types::receipt::{LocalizedReceipt, Receipt as EthReceipt, RichReceipt, TransactionOutcome}; use v1::types::Log; -use types::receipt::{Receipt as EthReceipt, RichReceipt, LocalizedReceipt, TransactionOutcome}; /// Receipt #[derive(Debug, Serialize)] #[serde(rename_all = "camelCase")] pub struct Receipt { - /// Transaction Hash - pub transaction_hash: Option, - /// Transaction index - pub transaction_index: Option, - /// Block hash - pub block_hash: Option, - /// Sender - pub from: Option, - /// Recipient - pub to: Option, - /// Block number - pub block_number: Option, - /// Cumulative gas used - pub cumulative_gas_used: U256, - /// Gas used - pub gas_used: Option, - /// Contract address - pub contract_address: Option, - /// Logs - pub logs: Vec, - /// State Root - // NOTE(niklasad1): EIP98 makes this optional field, if it's missing then skip serializing it - #[serde(skip_serializing_if = "Option::is_none", rename = "root")] - pub state_root: Option, - /// Logs bloom - pub logs_bloom: H2048, - /// Status code - // NOTE(niklasad1): Unknown after EIP98 rules, if it's missing then skip serializing it - #[serde(skip_serializing_if = "Option::is_none", rename = "status")] - pub status_code: Option, + /// Transaction Hash + pub transaction_hash: Option, + /// Transaction index + pub transaction_index: Option, + /// Block hash + pub block_hash: Option, + /// Sender + pub from: Option, + /// Recipient + pub to: Option, + /// Block number + pub block_number: Option, + /// Cumulative gas used + pub cumulative_gas_used: U256, + /// Gas used + pub gas_used: Option, + /// Contract address + pub contract_address: Option, + /// Logs + pub logs: Vec, + /// State Root + // NOTE(niklasad1): EIP98 makes this optional field, if it's missing then skip serializing it + #[serde(skip_serializing_if = "Option::is_none", rename = "root")] + pub state_root: Option, + /// Logs bloom + pub logs_bloom: H2048, + /// Status code + // NOTE(niklasad1): Unknown after EIP98 rules, if it's missing then skip serializing it + #[serde(skip_serializing_if = "Option::is_none", rename = "status")] + pub status_code: Option, } impl Receipt { - fn outcome_to_state_root(outcome: TransactionOutcome) -> Option { - match outcome { - TransactionOutcome::Unknown | TransactionOutcome::StatusCode(_) => None, - TransactionOutcome::StateRoot(root) => Some(root), - } - } + fn outcome_to_state_root(outcome: TransactionOutcome) -> Option { + match outcome { + TransactionOutcome::Unknown | TransactionOutcome::StatusCode(_) => None, + TransactionOutcome::StateRoot(root) => Some(root), + } + } - fn outcome_to_status_code(outcome: &TransactionOutcome) -> Option { - match *outcome { - TransactionOutcome::Unknown | TransactionOutcome::StateRoot(_) => None, - TransactionOutcome::StatusCode(ref code) => Some((*code as u64).into()), - } - } + fn outcome_to_status_code(outcome: &TransactionOutcome) -> Option { + match *outcome { + TransactionOutcome::Unknown | TransactionOutcome::StateRoot(_) => None, + TransactionOutcome::StatusCode(ref code) => Some((*code as u64).into()), + } + } } impl From for Receipt { - fn from(r: LocalizedReceipt) -> Self { - Receipt { - to: r.to.map(Into::into), - from: Some(r.from), - transaction_hash: Some(r.transaction_hash), - transaction_index: Some(r.transaction_index.into()), - block_hash: Some(r.block_hash), - block_number: Some(r.block_number.into()), - cumulative_gas_used: r.cumulative_gas_used, - gas_used: Some(r.gas_used), - contract_address: r.contract_address.map(Into::into), - logs: r.logs.into_iter().map(Into::into).collect(), - status_code: Self::outcome_to_status_code(&r.outcome), - state_root: Self::outcome_to_state_root(r.outcome), - logs_bloom: r.log_bloom, - } - } + fn from(r: LocalizedReceipt) -> Self { + Receipt { + to: r.to.map(Into::into), + from: Some(r.from), + transaction_hash: Some(r.transaction_hash), + transaction_index: Some(r.transaction_index.into()), + block_hash: Some(r.block_hash), + block_number: Some(r.block_number.into()), + cumulative_gas_used: r.cumulative_gas_used, + gas_used: Some(r.gas_used), + contract_address: r.contract_address.map(Into::into), + logs: r.logs.into_iter().map(Into::into).collect(), + status_code: Self::outcome_to_status_code(&r.outcome), + state_root: Self::outcome_to_state_root(r.outcome), + logs_bloom: r.log_bloom, + } + } } impl From for Receipt { - fn from(r: RichReceipt) -> Self { - Receipt { - from: Some(r.from), - to: r.to.map(Into::into), - transaction_hash: Some(r.transaction_hash), - transaction_index: Some(r.transaction_index.into()), - block_hash: None, - block_number: None, - cumulative_gas_used: r.cumulative_gas_used, - gas_used: Some(r.gas_used), - contract_address: r.contract_address.map(Into::into), - logs: r.logs.into_iter().map(Into::into).collect(), - status_code: Self::outcome_to_status_code(&r.outcome), - state_root: Self::outcome_to_state_root(r.outcome), - logs_bloom: r.log_bloom, - } - } + fn from(r: RichReceipt) -> Self { + Receipt { + from: Some(r.from), + to: r.to.map(Into::into), + transaction_hash: Some(r.transaction_hash), + transaction_index: Some(r.transaction_index.into()), + block_hash: None, + block_number: None, + cumulative_gas_used: r.cumulative_gas_used, + gas_used: Some(r.gas_used), + contract_address: r.contract_address.map(Into::into), + logs: r.logs.into_iter().map(Into::into).collect(), + status_code: Self::outcome_to_status_code(&r.outcome), + state_root: Self::outcome_to_state_root(r.outcome), + logs_bloom: r.log_bloom, + } + } } impl From for Receipt { - fn from(r: EthReceipt) -> Self { - Receipt { - from: None, - to: None, - transaction_hash: None, - transaction_index: None, - block_hash: None, - block_number: None, - cumulative_gas_used: r.gas_used, - gas_used: None, - contract_address: None, - logs: r.logs.into_iter().map(Into::into).collect(), - status_code: Self::outcome_to_status_code(&r.outcome), - state_root: Self::outcome_to_state_root(r.outcome), - logs_bloom: r.log_bloom, - } - } + fn from(r: EthReceipt) -> Self { + Receipt { + from: None, + to: None, + transaction_hash: None, + transaction_index: None, + block_hash: None, + block_number: None, + cumulative_gas_used: r.gas_used, + gas_used: None, + contract_address: None, + logs: r.logs.into_iter().map(Into::into).collect(), + status_code: Self::outcome_to_status_code(&r.outcome), + state_root: Self::outcome_to_state_root(r.outcome), + logs_bloom: r.log_bloom, + } + } } #[cfg(test)] mod tests { - use serde_json; - use v1::types::{Log, Receipt}; + use serde_json; + use v1::types::{Log, Receipt}; - #[test] - fn receipt_serialization() { - let s = r#"{"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","from":null,"to":null,"blockNumber":"0x4510c","cumulativeGasUsed":"0x20","gasUsed":"0x10","contractAddress":null,"logs":[{"address":"0x33990122638b9132ca29c723bdf037f1a891a70c","topics":["0xa6697e974e6a320f454390be03f74955e8978f1a6971ea6730542e37b66179bc","0x4861736852656700000000000000000000000000000000000000000000000000"],"data":"0x","blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","blockNumber":"0x4510c","transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","logIndex":"0x1","transactionLogIndex":null,"type":"mined","removed":false}],"root":"0x000000000000000000000000000000000000000000000000000000000000000a","logsBloom":"0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f","status":"0x1"}"#; + #[test] + fn receipt_serialization() { + let s = r#"{"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","from":null,"to":null,"blockNumber":"0x4510c","cumulativeGasUsed":"0x20","gasUsed":"0x10","contractAddress":null,"logs":[{"address":"0x33990122638b9132ca29c723bdf037f1a891a70c","topics":["0xa6697e974e6a320f454390be03f74955e8978f1a6971ea6730542e37b66179bc","0x4861736852656700000000000000000000000000000000000000000000000000"],"data":"0x","blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","blockNumber":"0x4510c","transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","logIndex":"0x1","transactionLogIndex":null,"type":"mined","removed":false}],"root":"0x000000000000000000000000000000000000000000000000000000000000000a","logsBloom":"0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f","status":"0x1"}"#; - let receipt = Receipt { - from: None, - to: None, - transaction_hash: Some(0.into()), - transaction_index: Some(0.into()), - block_hash: Some("ed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5".parse().unwrap()), - block_number: Some(0x4510c.into()), - cumulative_gas_used: 0x20.into(), - gas_used: Some(0x10.into()), - contract_address: None, - logs: vec![Log { - address: "33990122638b9132ca29c723bdf037f1a891a70c".parse().unwrap(), - topics: vec![ - "a6697e974e6a320f454390be03f74955e8978f1a6971ea6730542e37b66179bc".parse().unwrap(), - "4861736852656700000000000000000000000000000000000000000000000000".parse().unwrap(), - ], - data: vec![].into(), - block_hash: Some("ed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5".parse().unwrap()), - block_number: Some(0x4510c.into()), - transaction_hash: Some(0.into()), - transaction_index: Some(0.into()), - transaction_log_index: None, - log_index: Some(1.into()), - log_type: "mined".into(), - removed: false, - }], - logs_bloom: 15.into(), - state_root: Some(10.into()), - status_code: Some(1u64.into()), - }; + let receipt = Receipt { + from: None, + to: None, + transaction_hash: Some(0.into()), + transaction_index: Some(0.into()), + block_hash: Some( + "ed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5" + .parse() + .unwrap(), + ), + block_number: Some(0x4510c.into()), + cumulative_gas_used: 0x20.into(), + gas_used: Some(0x10.into()), + contract_address: None, + logs: vec![Log { + address: "33990122638b9132ca29c723bdf037f1a891a70c".parse().unwrap(), + topics: vec![ + "a6697e974e6a320f454390be03f74955e8978f1a6971ea6730542e37b66179bc" + .parse() + .unwrap(), + "4861736852656700000000000000000000000000000000000000000000000000" + .parse() + .unwrap(), + ], + data: vec![].into(), + block_hash: Some( + "ed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5" + .parse() + .unwrap(), + ), + block_number: Some(0x4510c.into()), + transaction_hash: Some(0.into()), + transaction_index: Some(0.into()), + transaction_log_index: None, + log_index: Some(1.into()), + log_type: "mined".into(), + removed: false, + }], + logs_bloom: 15.into(), + state_root: Some(10.into()), + status_code: Some(1u64.into()), + }; - let serialized = serde_json::to_string(&receipt).unwrap(); - assert_eq!(serialized, s); - } + let serialized = serde_json::to_string(&receipt).unwrap(); + assert_eq!(serialized, s); + } } diff --git a/rpc/src/v1/types/rpc_settings.rs b/rpc/src/v1/types/rpc_settings.rs index 63dfba7a8..92f8d9094 100644 --- a/rpc/src/v1/types/rpc_settings.rs +++ b/rpc/src/v1/types/rpc_settings.rs @@ -20,10 +20,10 @@ #[derive(Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct RpcSettings { - /// Whether RPC is enabled. - pub enabled: bool, - /// The interface being listened on. - pub interface: String, - /// The port being listened on. - pub port: u64, + /// Whether RPC is enabled. + pub enabled: bool, + /// The interface being listened on. + pub interface: String, + /// The port being listened on. + pub port: u64, } diff --git a/rpc/src/v1/types/secretstore.rs b/rpc/src/v1/types/secretstore.rs index ef76ec5b4..abc7abd0e 100644 --- a/rpc/src/v1/types/secretstore.rs +++ b/rpc/src/v1/types/secretstore.rs @@ -21,33 +21,36 @@ use v1::types::Bytes; #[derive(Default, Debug, Serialize, PartialEq)] #[cfg_attr(test, derive(Deserialize))] pub struct EncryptedDocumentKey { - /// Common encryption point. Pass this to Secret Store 'Document key storing session' - pub common_point: H512, - /// Encrypted point. Pass this to Secret Store 'Document key storing session'. - pub encrypted_point: H512, - /// Document key itself, encrypted with passed account public. Pass this to 'secretstore_encrypt'. - pub encrypted_key: Bytes, + /// Common encryption point. Pass this to Secret Store 'Document key storing session' + pub common_point: H512, + /// Encrypted point. Pass this to Secret Store 'Document key storing session'. + pub encrypted_point: H512, + /// Document key itself, encrypted with passed account public. Pass this to 'secretstore_encrypt'. + pub encrypted_key: Bytes, } #[cfg(test)] mod tests { - use serde_json; - use super::EncryptedDocumentKey; + use super::EncryptedDocumentKey; + use serde_json; - #[test] - fn test_serialize_encrypted_document_key() { - let initial = EncryptedDocumentKey { - common_point: 1.into(), - encrypted_point: 2.into(), - encrypted_key: vec![3].into(), - }; + #[test] + fn test_serialize_encrypted_document_key() { + let initial = EncryptedDocumentKey { + common_point: 1.into(), + encrypted_point: 2.into(), + encrypted_key: vec![3].into(), + }; - let serialized = serde_json::to_string(&initial).unwrap(); - assert_eq!(serialized, r#"{"common_point":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001","encrypted_point":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002","encrypted_key":"0x03"}"#); + let serialized = serde_json::to_string(&initial).unwrap(); + assert_eq!( + serialized, + r#"{"common_point":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001","encrypted_point":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002","encrypted_key":"0x03"}"# + ); - let deserialized: EncryptedDocumentKey = serde_json::from_str(&serialized).unwrap(); - assert_eq!(deserialized.common_point, 1.into()); - assert_eq!(deserialized.encrypted_point, 2.into()); - assert_eq!(deserialized.encrypted_key, vec![3].into()); - } + let deserialized: EncryptedDocumentKey = serde_json::from_str(&serialized).unwrap(); + assert_eq!(deserialized.common_point, 1.into()); + assert_eq!(deserialized.encrypted_point, 2.into()); + assert_eq!(deserialized.encrypted_key, vec![3].into()); + } } diff --git a/rpc/src/v1/types/sync.rs b/rpc/src/v1/types/sync.rs index 901611fea..753b04ee6 100644 --- a/rpc/src/v1/types/sync.rs +++ b/rpc/src/v1/types/sync.rs @@ -17,235 +17,250 @@ use network::client_version::ClientVersion; use std::collections::BTreeMap; -use ethereum_types::{U256, H512}; -use sync::{self, PeerInfo as SyncPeerInfo, TransactionStats as SyncTransactionStats}; +use ethereum_types::{H512, U256}; use serde::{Serialize, Serializer}; +use sync::{self, PeerInfo as SyncPeerInfo, TransactionStats as SyncTransactionStats}; /// Sync info #[derive(Default, Debug, Serialize, PartialEq)] #[serde(rename_all = "camelCase")] pub struct SyncInfo { - /// Starting block - pub starting_block: U256, - /// Current block - pub current_block: U256, - /// Highest block seen so far - pub highest_block: U256, - /// Warp sync snapshot chunks total. - pub warp_chunks_amount: Option, - /// Warp sync snpashot chunks processed. - pub warp_chunks_processed: Option, + /// Starting block + pub starting_block: U256, + /// Current block + pub current_block: U256, + /// Highest block seen so far + pub highest_block: U256, + /// Warp sync snapshot chunks total. + pub warp_chunks_amount: Option, + /// Warp sync snpashot chunks processed. + pub warp_chunks_processed: Option, } /// Peers info #[derive(Default, Debug, Serialize)] pub struct Peers { - /// Number of active peers - pub active: usize, - /// Number of connected peers - pub connected: usize, - /// Max number of peers - pub max: u32, - /// Detailed information on peers - pub peers: Vec, + /// Number of active peers + pub active: usize, + /// Number of connected peers + pub connected: usize, + /// Max number of peers + pub max: u32, + /// Detailed information on peers + pub peers: Vec, } /// Peer connection information #[derive(Default, Debug, Serialize)] pub struct PeerInfo { - /// Public node id - pub id: Option, - /// Node client ID - pub name: ClientVersion, - /// Capabilities - pub caps: Vec, - /// Network information - pub network: PeerNetworkInfo, - /// Protocols information - pub protocols: PeerProtocolsInfo, + /// Public node id + pub id: Option, + /// Node client ID + pub name: ClientVersion, + /// Capabilities + pub caps: Vec, + /// Network information + pub network: PeerNetworkInfo, + /// Protocols information + pub protocols: PeerProtocolsInfo, } /// Peer network information #[derive(Default, Debug, Serialize)] #[serde(rename_all = "camelCase")] pub struct PeerNetworkInfo { - /// Remote endpoint address - pub remote_address: String, - /// Local endpoint address - pub local_address: String, + /// Remote endpoint address + pub remote_address: String, + /// Local endpoint address + pub local_address: String, } /// Peer protocols information #[derive(Default, Debug, Serialize)] pub struct PeerProtocolsInfo { - /// Ethereum protocol information - pub eth: Option, - /// PIP protocol information. - pub pip: Option, + /// Ethereum protocol information + pub eth: Option, + /// PIP protocol information. + pub pip: Option, } /// Peer Ethereum protocol information #[derive(Default, Debug, Serialize)] pub struct EthProtocolInfo { - /// Negotiated ethereum protocol version - pub version: u32, - /// Peer total difficulty if known - pub difficulty: Option, - /// SHA3 of peer best block hash - pub head: String, + /// Negotiated ethereum protocol version + pub version: u32, + /// Peer total difficulty if known + pub difficulty: Option, + /// SHA3 of peer best block hash + pub head: String, } impl From for EthProtocolInfo { - fn from(info: sync::EthProtocolInfo) -> Self { - EthProtocolInfo { - version: info.version, - difficulty: info.difficulty.map(Into::into), - head: format!("{:x}", info.head), - } - } + fn from(info: sync::EthProtocolInfo) -> Self { + EthProtocolInfo { + version: info.version, + difficulty: info.difficulty.map(Into::into), + head: format!("{:x}", info.head), + } + } } /// Peer PIP protocol information #[derive(Default, Debug, Serialize)] pub struct PipProtocolInfo { - /// Negotiated PIP protocol version - pub version: u32, - /// Peer total difficulty - pub difficulty: U256, - /// SHA3 of peer best block hash - pub head: String, + /// Negotiated PIP protocol version + pub version: u32, + /// Peer total difficulty + pub difficulty: U256, + /// SHA3 of peer best block hash + pub head: String, } impl From for PipProtocolInfo { - fn from(info: sync::PipProtocolInfo) -> Self { - PipProtocolInfo { - version: info.version, - difficulty: info.difficulty, - head: format!("{:x}", info.head), - } - } + fn from(info: sync::PipProtocolInfo) -> Self { + PipProtocolInfo { + version: info.version, + difficulty: info.difficulty, + head: format!("{:x}", info.head), + } + } } /// Sync status #[derive(Debug, PartialEq)] pub enum SyncStatus { - /// Info when syncing - Info(SyncInfo), - /// Not syncing - None + /// Info when syncing + Info(SyncInfo), + /// Not syncing + None, } impl Serialize for SyncStatus { - fn serialize(&self, serializer: S) -> Result - where S: Serializer { - match *self { - SyncStatus::Info(ref info) => info.serialize(serializer), - SyncStatus::None => false.serialize(serializer) - } - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match *self { + SyncStatus::Info(ref info) => info.serialize(serializer), + SyncStatus::None => false.serialize(serializer), + } + } } /// Propagation statistics for pending transaction. #[derive(Default, Debug, Serialize)] #[serde(rename_all = "camelCase")] pub struct TransactionStats { - /// Block no this transaction was first seen. - pub first_seen: u64, - /// Peers this transaction was propagated to with count. - pub propagated_to: BTreeMap, + /// Block no this transaction was first seen. + pub first_seen: u64, + /// Peers this transaction was propagated to with count. + pub propagated_to: BTreeMap, } impl From for PeerInfo { - fn from(p: SyncPeerInfo) -> Self { - PeerInfo { - id: p.id, - name: p.client_version, - caps: p.capabilities, - network: PeerNetworkInfo { - remote_address: p.remote_address, - local_address: p.local_address, - }, - protocols: PeerProtocolsInfo { - eth: p.eth_info.map(Into::into), - pip: p.pip_info.map(Into::into), - }, - } - } + fn from(p: SyncPeerInfo) -> Self { + PeerInfo { + id: p.id, + name: p.client_version, + caps: p.capabilities, + network: PeerNetworkInfo { + remote_address: p.remote_address, + local_address: p.local_address, + }, + protocols: PeerProtocolsInfo { + eth: p.eth_info.map(Into::into), + pip: p.pip_info.map(Into::into), + }, + } + } } impl From for TransactionStats { - fn from(s: SyncTransactionStats) -> Self { - TransactionStats { - first_seen: s.first_seen, - propagated_to: s.propagated_to - .into_iter() - .map(|(id, count)| (id, count)) - .collect(), - } - } + fn from(s: SyncTransactionStats) -> Self { + TransactionStats { + first_seen: s.first_seen, + propagated_to: s + .propagated_to + .into_iter() + .map(|(id, count)| (id, count)) + .collect(), + } + } } /// Chain status. #[derive(Default, Debug, Serialize)] #[serde(rename_all = "camelCase")] pub struct ChainStatus { - /// Describes the gap in the blockchain, if there is one: (first, last) - pub block_gap: Option<(U256, U256)>, + /// Describes the gap in the blockchain, if there is one: (first, last) + pub block_gap: Option<(U256, U256)>, } #[cfg(test)] mod tests { - use serde_json; - use std::collections::BTreeMap; - use super::{SyncInfo, SyncStatus, Peers, TransactionStats, ChainStatus}; + use super::{ChainStatus, Peers, SyncInfo, SyncStatus, TransactionStats}; + use serde_json; + use std::collections::BTreeMap; - #[test] - fn test_serialize_sync_info() { - let t = SyncInfo::default(); - let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"startingBlock":"0x0","currentBlock":"0x0","highestBlock":"0x0","warpChunksAmount":null,"warpChunksProcessed":null}"#); - } + #[test] + fn test_serialize_sync_info() { + let t = SyncInfo::default(); + let serialized = serde_json::to_string(&t).unwrap(); + assert_eq!( + serialized, + r#"{"startingBlock":"0x0","currentBlock":"0x0","highestBlock":"0x0","warpChunksAmount":null,"warpChunksProcessed":null}"# + ); + } - #[test] - fn test_serialize_peers() { - let t = Peers::default(); - let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"active":0,"connected":0,"max":0,"peers":[]}"#); - } + #[test] + fn test_serialize_peers() { + let t = Peers::default(); + let serialized = serde_json::to_string(&t).unwrap(); + assert_eq!( + serialized, + r#"{"active":0,"connected":0,"max":0,"peers":[]}"# + ); + } - #[test] - fn test_serialize_sync_status() { - let t = SyncStatus::None; - let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, "false"); + #[test] + fn test_serialize_sync_status() { + let t = SyncStatus::None; + let serialized = serde_json::to_string(&t).unwrap(); + assert_eq!(serialized, "false"); - let t = SyncStatus::Info(SyncInfo::default()); - let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"startingBlock":"0x0","currentBlock":"0x0","highestBlock":"0x0","warpChunksAmount":null,"warpChunksProcessed":null}"#); - } + let t = SyncStatus::Info(SyncInfo::default()); + let serialized = serde_json::to_string(&t).unwrap(); + assert_eq!( + serialized, + r#"{"startingBlock":"0x0","currentBlock":"0x0","highestBlock":"0x0","warpChunksAmount":null,"warpChunksProcessed":null}"# + ); + } - #[test] - fn test_serialize_block_gap() { - let mut t = ChainStatus::default(); - let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"blockGap":null}"#); + #[test] + fn test_serialize_block_gap() { + let mut t = ChainStatus::default(); + let serialized = serde_json::to_string(&t).unwrap(); + assert_eq!(serialized, r#"{"blockGap":null}"#); - t.block_gap = Some((1.into(), 5.into())); + t.block_gap = Some((1.into(), 5.into())); - let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"blockGap":["0x1","0x5"]}"#); - } + let serialized = serde_json::to_string(&t).unwrap(); + assert_eq!(serialized, r#"{"blockGap":["0x1","0x5"]}"#); + } - #[test] - fn test_serialize_transaction_stats() { - let stats = TransactionStats { - first_seen: 100, - propagated_to: map![ - 10.into() => 50 - ], - }; + #[test] + fn test_serialize_transaction_stats() { + let stats = TransactionStats { + first_seen: 100, + propagated_to: map![ + 10.into() => 50 + ], + }; - let serialized = serde_json::to_string(&stats).unwrap(); - assert_eq!(serialized, r#"{"firstSeen":100,"propagatedTo":{"0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a":50}}"#) - } + let serialized = serde_json::to_string(&stats).unwrap(); + assert_eq!( + serialized, + r#"{"firstSeen":100,"propagatedTo":{"0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a":50}}"# + ) + } } diff --git a/rpc/src/v1/types/trace.rs b/rpc/src/v1/types/trace.rs index 58a5ee596..543b08f3b 100644 --- a/rpc/src/v1/types/trace.rs +++ b/rpc/src/v1/types/trace.rs @@ -16,14 +16,14 @@ use std::collections::BTreeMap; -use ethcore::client::Executed; -use ethcore::trace as et; -use ethcore::trace::{FlatTrace, LocalizedTrace as EthLocalizedTrace, trace, TraceError}; +use ethcore::{ + client::Executed, + trace as et, + trace::{trace, FlatTrace, LocalizedTrace as EthLocalizedTrace, TraceError}, +}; use ethereum_types::{H160, H256, U256}; -use serde::ser::SerializeStruct; -use serde::{Serialize, Serializer}; -use types::account_diff; -use types::state_diff; +use serde::{ser::SerializeStruct, Serialize, Serializer}; +use types::{account_diff, state_diff}; use vm; use v1::types::Bytes; @@ -31,168 +31,188 @@ use v1::types::Bytes; #[derive(Debug, Serialize)] /// A diff of some chunk of memory. pub struct MemoryDiff { - /// Offset into memory the change begins. - pub off: usize, - /// The changed data. - pub data: Bytes, + /// Offset into memory the change begins. + pub off: usize, + /// The changed data. + pub data: Bytes, } impl From for MemoryDiff { - fn from(c: et::MemoryDiff) -> Self { - MemoryDiff { - off: c.offset, - data: c.data.into(), - } - } + fn from(c: et::MemoryDiff) -> Self { + MemoryDiff { + off: c.offset, + data: c.data.into(), + } + } } #[derive(Debug, Serialize)] /// A diff of some storage value. pub struct StorageDiff { - /// Which key in storage is changed. - pub key: U256, - /// What the value has been changed to. - pub val: U256, + /// Which key in storage is changed. + pub key: U256, + /// What the value has been changed to. + pub val: U256, } impl From for StorageDiff { - fn from(c: et::StorageDiff) -> Self { - StorageDiff { - key: c.location, - val: c.value, - } - } + fn from(c: et::StorageDiff) -> Self { + StorageDiff { + key: c.location, + val: c.value, + } + } } #[derive(Debug, Serialize)] /// A record of an executed VM operation. pub struct VMExecutedOperation { - /// The total gas used. - pub used: u64, - /// The stack item placed, if any. - pub push: Vec, - /// If altered, the memory delta. - pub mem: Option, - /// The altered storage value, if any. - pub store: Option, + /// The total gas used. + pub used: u64, + /// The stack item placed, if any. + pub push: Vec, + /// If altered, the memory delta. + pub mem: Option, + /// The altered storage value, if any. + pub store: Option, } impl From for VMExecutedOperation { - fn from(c: et::VMExecutedOperation) -> Self { - VMExecutedOperation { - used: c.gas_used.low_u64(), - push: c.stack_push.into_iter().map(Into::into).collect(), - mem: c.mem_diff.map(Into::into), - store: c.store_diff.map(Into::into), - } - } + fn from(c: et::VMExecutedOperation) -> Self { + VMExecutedOperation { + used: c.gas_used.low_u64(), + push: c.stack_push.into_iter().map(Into::into).collect(), + mem: c.mem_diff.map(Into::into), + store: c.store_diff.map(Into::into), + } + } } #[derive(Debug, Serialize)] /// A record of the execution of a single VM operation. pub struct VMOperation { - /// The program counter. - pub pc: usize, - /// The gas cost for this instruction. - pub cost: u64, - /// Information concerning the execution of the operation. - pub ex: Option, - /// Subordinate trace of the CALL/CREATE if applicable. - #[serde(bound="VMTrace: Serialize")] - pub sub: Option, + /// The program counter. + pub pc: usize, + /// The gas cost for this instruction. + pub cost: u64, + /// Information concerning the execution of the operation. + pub ex: Option, + /// Subordinate trace of the CALL/CREATE if applicable. + #[serde(bound = "VMTrace: Serialize")] + pub sub: Option, } impl From<(et::VMOperation, Option)> for VMOperation { - fn from(c: (et::VMOperation, Option)) -> Self { - VMOperation { - pc: c.0.pc, - cost: c.0.gas_cost.low_u64(), - ex: c.0.executed.map(Into::into), - sub: c.1.map(Into::into), - } - } + fn from(c: (et::VMOperation, Option)) -> Self { + VMOperation { + pc: c.0.pc, + cost: c.0.gas_cost.low_u64(), + ex: c.0.executed.map(Into::into), + sub: c.1.map(Into::into), + } + } } #[derive(Debug, Serialize)] /// A record of a full VM trace for a CALL/CREATE. pub struct VMTrace { - /// The code to be executed. - pub code: Bytes, - /// The operations executed. - pub ops: Vec, + /// The code to be executed. + pub code: Bytes, + /// The operations executed. + pub ops: Vec, } impl From for VMTrace { - fn from(c: et::VMTrace) -> Self { - let mut subs = c.subs.into_iter(); - let mut next_sub = subs.next(); - VMTrace { - code: c.code.into(), - ops: c.operations - .into_iter() - .enumerate() - .map(|(i, op)| (op, { - let have_sub = next_sub.is_some() && next_sub.as_ref().unwrap().parent_step == i; - if have_sub { - let r = next_sub.clone(); - next_sub = subs.next(); - r - } else { None } - }).into()) - .collect(), - } - } + fn from(c: et::VMTrace) -> Self { + let mut subs = c.subs.into_iter(); + let mut next_sub = subs.next(); + VMTrace { + code: c.code.into(), + ops: c + .operations + .into_iter() + .enumerate() + .map(|(i, op)| { + (op, { + let have_sub = + next_sub.is_some() && next_sub.as_ref().unwrap().parent_step == i; + if have_sub { + let r = next_sub.clone(); + next_sub = subs.next(); + r + } else { + None + } + }) + .into() + }) + .collect(), + } + } } #[derive(Debug, Serialize)] /// Aux type for Diff::Changed. -pub struct ChangedType where T: Serialize { - from: T, - to: T, +pub struct ChangedType +where + T: Serialize, +{ + from: T, + to: T, } #[derive(Debug, Serialize)] /// Serde-friendly `Diff` shadow. -pub enum Diff where T: Serialize { - #[serde(rename = "=")] - Same, - #[serde(rename = "+")] - Born(T), - #[serde(rename = "-")] - Died(T), - #[serde(rename = "*")] - Changed(ChangedType), +pub enum Diff +where + T: Serialize, +{ + #[serde(rename = "=")] + Same, + #[serde(rename = "+")] + Born(T), + #[serde(rename = "-")] + Died(T), + #[serde(rename = "*")] + Changed(ChangedType), } -impl From> for Diff where T: Eq, U: Serialize + From { - fn from(c: account_diff::Diff) -> Self { - match c { - account_diff::Diff::Same => Diff::Same, - account_diff::Diff::Born(t) => Diff::Born(t.into()), - account_diff::Diff::Died(t) => Diff::Died(t.into()), - account_diff::Diff::Changed(t, u) => Diff::Changed(ChangedType{from: t.into(), to: u.into()}), - } - } +impl From> for Diff +where + T: Eq, + U: Serialize + From, +{ + fn from(c: account_diff::Diff) -> Self { + match c { + account_diff::Diff::Same => Diff::Same, + account_diff::Diff::Born(t) => Diff::Born(t.into()), + account_diff::Diff::Died(t) => Diff::Died(t.into()), + account_diff::Diff::Changed(t, u) => Diff::Changed(ChangedType { + from: t.into(), + to: u.into(), + }), + } + } } #[derive(Debug, Serialize)] /// Serde-friendly `AccountDiff` shadow. pub struct AccountDiff { - pub balance: Diff, - pub nonce: Diff, - pub code: Diff, - pub storage: BTreeMap>, + pub balance: Diff, + pub nonce: Diff, + pub code: Diff, + pub storage: BTreeMap>, } impl From for AccountDiff { - fn from(c: account_diff::AccountDiff) -> Self { - AccountDiff { - balance: c.balance.into(), - nonce: c.nonce.into(), - code: c.code.into(), - storage: c.storage.into_iter().map(|(k, v)| (k, v.into())).collect(), - } - } + fn from(c: account_diff::AccountDiff) -> Self { + AccountDiff { + balance: c.balance.into(), + nonce: c.nonce.into(), + code: c.code.into(), + storage: c.storage.into_iter().map(|(k, v)| (k, v.into())).collect(), + } + } } #[derive(Debug)] @@ -200,674 +220,709 @@ impl From for AccountDiff { pub struct StateDiff(BTreeMap); impl Serialize for StateDiff { - fn serialize(&self, serializer: S) -> Result - where S: Serializer { - Serialize::serialize(&self.0, serializer) - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + Serialize::serialize(&self.0, serializer) + } } impl From for StateDiff { - fn from(c: state_diff::StateDiff) -> Self { - StateDiff(c.raw.into_iter().map(|(k, v)| (k, v.into())).collect()) - } + fn from(c: state_diff::StateDiff) -> Self { + StateDiff(c.raw.into_iter().map(|(k, v)| (k, v.into())).collect()) + } } /// Create response #[derive(Debug, Serialize)] pub struct Create { - /// Sender - from: H160, - /// Value - value: U256, - /// Gas - gas: U256, - /// Initialization code - init: Bytes, + /// Sender + from: H160, + /// Value + value: U256, + /// Gas + gas: U256, + /// Initialization code + init: Bytes, } impl From for Create { - fn from(c: trace::Create) -> Self { - Create { - from: c.from, - value: c.value, - gas: c.gas, - init: Bytes::new(c.init), - } - } + fn from(c: trace::Create) -> Self { + Create { + from: c.from, + value: c.value, + gas: c.gas, + init: Bytes::new(c.init), + } + } } /// Call type. #[derive(Debug, Serialize)] #[serde(rename_all = "lowercase")] pub enum CallType { - /// None - None, - /// Call - Call, - /// Call code - CallCode, - /// Delegate call - DelegateCall, - /// Static call - StaticCall, + /// None + None, + /// Call + Call, + /// Call code + CallCode, + /// Delegate call + DelegateCall, + /// Static call + StaticCall, } impl From for CallType { - fn from(c: vm::CallType) -> Self { - match c { - vm::CallType::None => CallType::None, - vm::CallType::Call => CallType::Call, - vm::CallType::CallCode => CallType::CallCode, - vm::CallType::DelegateCall => CallType::DelegateCall, - vm::CallType::StaticCall => CallType::StaticCall, - } - } + fn from(c: vm::CallType) -> Self { + match c { + vm::CallType::None => CallType::None, + vm::CallType::Call => CallType::Call, + vm::CallType::CallCode => CallType::CallCode, + vm::CallType::DelegateCall => CallType::DelegateCall, + vm::CallType::StaticCall => CallType::StaticCall, + } + } } /// Call response #[derive(Debug, Serialize)] #[serde(rename_all = "camelCase")] pub struct Call { - /// Sender - from: H160, - /// Recipient - to: H160, - /// Transfered Value - value: U256, - /// Gas - gas: U256, - /// Input data - input: Bytes, - /// The type of the call. - call_type: CallType, + /// Sender + from: H160, + /// Recipient + to: H160, + /// Transfered Value + value: U256, + /// Gas + gas: U256, + /// Input data + input: Bytes, + /// The type of the call. + call_type: CallType, } impl From for Call { - fn from(c: trace::Call) -> Self { - Call { - from: c.from, - to: c.to, - value: c.value, - gas: c.gas, - input: c.input.into(), - call_type: c.call_type.into(), - } - } + fn from(c: trace::Call) -> Self { + Call { + from: c.from, + to: c.to, + value: c.value, + gas: c.gas, + input: c.input.into(), + call_type: c.call_type.into(), + } + } } /// Reward type. #[derive(Debug, Serialize)] #[serde(rename_all = "camelCase")] pub enum RewardType { - /// Block - Block, - /// Uncle - Uncle, - /// EmptyStep (AuthorityRound) - EmptyStep, - /// External (attributed as part of an external protocol) - External, + /// Block + Block, + /// Uncle + Uncle, + /// EmptyStep (AuthorityRound) + EmptyStep, + /// External (attributed as part of an external protocol) + External, } impl From for RewardType { - fn from(c: trace::RewardType) -> Self { - match c { - trace::RewardType::Block => RewardType::Block, - trace::RewardType::Uncle => RewardType::Uncle, - trace::RewardType::EmptyStep => RewardType::EmptyStep, - trace::RewardType::External => RewardType::External, - } - } + fn from(c: trace::RewardType) -> Self { + match c { + trace::RewardType::Block => RewardType::Block, + trace::RewardType::Uncle => RewardType::Uncle, + trace::RewardType::EmptyStep => RewardType::EmptyStep, + trace::RewardType::External => RewardType::External, + } + } } /// Reward action #[derive(Debug, Serialize)] #[serde(rename_all = "camelCase")] pub struct Reward { - /// Author's address. - pub author: H160, - /// Reward amount. - pub value: U256, - /// Reward type. - pub reward_type: RewardType, + /// Author's address. + pub author: H160, + /// Reward amount. + pub value: U256, + /// Reward type. + pub reward_type: RewardType, } impl From for Reward { - fn from(r: trace::Reward) -> Self { - Reward { - author: r.author, - value: r.value, - reward_type: r.reward_type.into(), - } - } + fn from(r: trace::Reward) -> Self { + Reward { + author: r.author, + value: r.value, + reward_type: r.reward_type.into(), + } + } } /// Suicide #[derive(Debug, Serialize)] #[serde(rename_all = "camelCase")] pub struct Suicide { - /// Address. - pub address: H160, - /// Refund address. - pub refund_address: H160, - /// Balance. - pub balance: U256, + /// Address. + pub address: H160, + /// Refund address. + pub refund_address: H160, + /// Balance. + pub balance: U256, } impl From for Suicide { - fn from(s: trace::Suicide) -> Self { - Suicide { - address: s.address, - refund_address: s.refund_address, - balance: s.balance, - } - } + fn from(s: trace::Suicide) -> Self { + Suicide { + address: s.address, + refund_address: s.refund_address, + balance: s.balance, + } + } } /// Action #[derive(Debug)] pub enum Action { - /// Call - Call(Call), - /// Create - Create(Create), - /// Suicide - Suicide(Suicide), - /// Reward - Reward(Reward), + /// Call + Call(Call), + /// Create + Create(Create), + /// Suicide + Suicide(Suicide), + /// Reward + Reward(Reward), } impl From for Action { - fn from(c: trace::Action) -> Self { - match c { - trace::Action::Call(call) => Action::Call(call.into()), - trace::Action::Create(create) => Action::Create(create.into()), - trace::Action::Suicide(suicide) => Action::Suicide(suicide.into()), - trace::Action::Reward(reward) => Action::Reward(reward.into()), - } - } + fn from(c: trace::Action) -> Self { + match c { + trace::Action::Call(call) => Action::Call(call.into()), + trace::Action::Create(create) => Action::Create(create.into()), + trace::Action::Suicide(suicide) => Action::Suicide(suicide.into()), + trace::Action::Reward(reward) => Action::Reward(reward.into()), + } + } } /// Call Result #[derive(Debug, Serialize)] #[serde(rename_all = "camelCase")] pub struct CallResult { - /// Gas used - gas_used: U256, - /// Output bytes - output: Bytes, + /// Gas used + gas_used: U256, + /// Output bytes + output: Bytes, } impl From for CallResult { - fn from(c: trace::CallResult) -> Self { - CallResult { - gas_used: c.gas_used, - output: c.output.into(), - } - } + fn from(c: trace::CallResult) -> Self { + CallResult { + gas_used: c.gas_used, + output: c.output.into(), + } + } } /// Craete Result #[derive(Debug, Serialize)] #[serde(rename_all = "camelCase")] pub struct CreateResult { - /// Gas used - gas_used: U256, - /// Code - code: Bytes, - /// Assigned address - address: H160, + /// Gas used + gas_used: U256, + /// Code + code: Bytes, + /// Assigned address + address: H160, } impl From for CreateResult { - fn from(c: trace::CreateResult) -> Self { - CreateResult { - gas_used: c.gas_used, - code: c.code.into(), - address: c.address, - } - } + fn from(c: trace::CreateResult) -> Self { + CreateResult { + gas_used: c.gas_used, + code: c.code.into(), + address: c.address, + } + } } /// Response #[derive(Debug)] pub enum Res { - /// Call - Call(CallResult), - /// Create - Create(CreateResult), - /// Call failure - FailedCall(TraceError), - /// Creation failure - FailedCreate(TraceError), - /// None - None, + /// Call + Call(CallResult), + /// Create + Create(CreateResult), + /// Call failure + FailedCall(TraceError), + /// Creation failure + FailedCreate(TraceError), + /// None + None, } impl From for Res { - fn from(t: trace::Res) -> Self { - match t { - trace::Res::Call(call) => Res::Call(CallResult::from(call)), - trace::Res::Create(create) => Res::Create(CreateResult::from(create)), - trace::Res::FailedCall(error) => Res::FailedCall(error), - trace::Res::FailedCreate(error) => Res::FailedCreate(error), - trace::Res::None => Res::None, - } - } + fn from(t: trace::Res) -> Self { + match t { + trace::Res::Call(call) => Res::Call(CallResult::from(call)), + trace::Res::Create(create) => Res::Create(CreateResult::from(create)), + trace::Res::FailedCall(error) => Res::FailedCall(error), + trace::Res::FailedCreate(error) => Res::FailedCreate(error), + trace::Res::None => Res::None, + } + } } /// Trace #[derive(Debug)] pub struct LocalizedTrace { - /// Action - action: Action, - /// Result - result: Res, - /// Trace address - trace_address: Vec, - /// Subtraces - subtraces: usize, - /// Transaction position - transaction_position: Option, - /// Transaction hash - transaction_hash: Option, - /// Block Number - block_number: u64, - /// Block Hash - block_hash: H256, + /// Action + action: Action, + /// Result + result: Res, + /// Trace address + trace_address: Vec, + /// Subtraces + subtraces: usize, + /// Transaction position + transaction_position: Option, + /// Transaction hash + transaction_hash: Option, + /// Block Number + block_number: u64, + /// Block Hash + block_hash: H256, } impl Serialize for LocalizedTrace { - fn serialize(&self, serializer: S) -> Result - where S: Serializer - { - let mut struc = serializer.serialize_struct("LocalizedTrace", 9)?; - match self.action { - Action::Call(ref call) => { - struc.serialize_field("type", "call")?; - struc.serialize_field("action", call)?; - }, - Action::Create(ref create) => { - struc.serialize_field("type", "create")?; - struc.serialize_field("action", create)?; - }, - Action::Suicide(ref suicide) => { - struc.serialize_field("type", "suicide")?; - struc.serialize_field("action", suicide)?; - }, - Action::Reward(ref reward) => { - struc.serialize_field("type", "reward")?; - struc.serialize_field("action", reward)?; - }, - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut struc = serializer.serialize_struct("LocalizedTrace", 9)?; + match self.action { + Action::Call(ref call) => { + struc.serialize_field("type", "call")?; + struc.serialize_field("action", call)?; + } + Action::Create(ref create) => { + struc.serialize_field("type", "create")?; + struc.serialize_field("action", create)?; + } + Action::Suicide(ref suicide) => { + struc.serialize_field("type", "suicide")?; + struc.serialize_field("action", suicide)?; + } + Action::Reward(ref reward) => { + struc.serialize_field("type", "reward")?; + struc.serialize_field("action", reward)?; + } + } - match self.result { - Res::Call(ref call) => struc.serialize_field("result", call)?, - Res::Create(ref create) => struc.serialize_field("result", create)?, - Res::FailedCall(ref error) => struc.serialize_field("error", &error.to_string())?, - Res::FailedCreate(ref error) => struc.serialize_field("error", &error.to_string())?, - Res::None => struc.serialize_field("result", &None as &Option)?, - } + match self.result { + Res::Call(ref call) => struc.serialize_field("result", call)?, + Res::Create(ref create) => struc.serialize_field("result", create)?, + Res::FailedCall(ref error) => struc.serialize_field("error", &error.to_string())?, + Res::FailedCreate(ref error) => struc.serialize_field("error", &error.to_string())?, + Res::None => struc.serialize_field("result", &None as &Option)?, + } - struc.serialize_field("traceAddress", &self.trace_address)?; - struc.serialize_field("subtraces", &self.subtraces)?; - struc.serialize_field("transactionPosition", &self.transaction_position)?; - struc.serialize_field("transactionHash", &self.transaction_hash)?; - struc.serialize_field("blockNumber", &self.block_number)?; - struc.serialize_field("blockHash", &self.block_hash)?; + struc.serialize_field("traceAddress", &self.trace_address)?; + struc.serialize_field("subtraces", &self.subtraces)?; + struc.serialize_field("transactionPosition", &self.transaction_position)?; + struc.serialize_field("transactionHash", &self.transaction_hash)?; + struc.serialize_field("blockNumber", &self.block_number)?; + struc.serialize_field("blockHash", &self.block_hash)?; - struc.end() - } + struc.end() + } } impl From for LocalizedTrace { - fn from(t: EthLocalizedTrace) -> Self { - LocalizedTrace { - action: t.action.into(), - result: t.result.into(), - trace_address: t.trace_address.into_iter().map(Into::into).collect(), - subtraces: t.subtraces, - transaction_position: t.transaction_number.map(Into::into), - transaction_hash: t.transaction_hash.map(Into::into), - block_number: t.block_number, - block_hash: t.block_hash, - } - } + fn from(t: EthLocalizedTrace) -> Self { + LocalizedTrace { + action: t.action.into(), + result: t.result.into(), + trace_address: t.trace_address.into_iter().map(Into::into).collect(), + subtraces: t.subtraces, + transaction_position: t.transaction_number.map(Into::into), + transaction_hash: t.transaction_hash.map(Into::into), + block_number: t.block_number, + block_hash: t.block_hash, + } + } } /// Trace #[derive(Debug)] pub struct Trace { - /// Trace address - trace_address: Vec, - /// Subtraces - subtraces: usize, - /// Action - action: Action, - /// Result - result: Res, + /// Trace address + trace_address: Vec, + /// Subtraces + subtraces: usize, + /// Action + action: Action, + /// Result + result: Res, } impl Serialize for Trace { - fn serialize(&self, serializer: S) -> Result - where S: Serializer - { - let mut struc = serializer.serialize_struct("Trace", 4)?; - match self.action { - Action::Call(ref call) => { - struc.serialize_field("type", "call")?; - struc.serialize_field("action", call)?; - }, - Action::Create(ref create) => { - struc.serialize_field("type", "create")?; - struc.serialize_field("action", create)?; - }, - Action::Suicide(ref suicide) => { - struc.serialize_field("type", "suicide")?; - struc.serialize_field("action", suicide)?; - }, - Action::Reward(ref reward) => { - struc.serialize_field("type", "reward")?; - struc.serialize_field("action", reward)?; - }, - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut struc = serializer.serialize_struct("Trace", 4)?; + match self.action { + Action::Call(ref call) => { + struc.serialize_field("type", "call")?; + struc.serialize_field("action", call)?; + } + Action::Create(ref create) => { + struc.serialize_field("type", "create")?; + struc.serialize_field("action", create)?; + } + Action::Suicide(ref suicide) => { + struc.serialize_field("type", "suicide")?; + struc.serialize_field("action", suicide)?; + } + Action::Reward(ref reward) => { + struc.serialize_field("type", "reward")?; + struc.serialize_field("action", reward)?; + } + } - match self.result { - Res::Call(ref call) => struc.serialize_field("result", call)?, - Res::Create(ref create) => struc.serialize_field("result", create)?, - Res::FailedCall(ref error) => struc.serialize_field("error", &error.to_string())?, - Res::FailedCreate(ref error) => struc.serialize_field("error", &error.to_string())?, - Res::None => struc.serialize_field("result", &None as &Option)?, - } + match self.result { + Res::Call(ref call) => struc.serialize_field("result", call)?, + Res::Create(ref create) => struc.serialize_field("result", create)?, + Res::FailedCall(ref error) => struc.serialize_field("error", &error.to_string())?, + Res::FailedCreate(ref error) => struc.serialize_field("error", &error.to_string())?, + Res::None => struc.serialize_field("result", &None as &Option)?, + } - struc.serialize_field("traceAddress", &self.trace_address)?; - struc.serialize_field("subtraces", &self.subtraces)?; + struc.serialize_field("traceAddress", &self.trace_address)?; + struc.serialize_field("subtraces", &self.subtraces)?; - struc.end() - } + struc.end() + } } impl From for Trace { - fn from(t: FlatTrace) -> Self { - Trace { - trace_address: t.trace_address.into_iter().map(Into::into).collect(), - subtraces: t.subtraces, - action: t.action.into(), - result: t.result.into(), - } - } + fn from(t: FlatTrace) -> Self { + Trace { + trace_address: t.trace_address.into_iter().map(Into::into).collect(), + subtraces: t.subtraces, + action: t.action.into(), + result: t.result.into(), + } + } } #[derive(Debug, Serialize)] #[serde(rename_all = "camelCase")] /// A diff of some chunk of memory. pub struct TraceResults { - /// The output of the call/create - pub output: Bytes, - /// The transaction trace. - pub trace: Vec, - /// The transaction trace. - pub vm_trace: Option, - /// The transaction trace. - pub state_diff: Option, + /// The output of the call/create + pub output: Bytes, + /// The transaction trace. + pub trace: Vec, + /// The transaction trace. + pub vm_trace: Option, + /// The transaction trace. + pub state_diff: Option, } impl From for TraceResults { - fn from(t: Executed) -> Self { - TraceResults { - output: t.output.into(), - trace: t.trace.into_iter().map(Into::into).collect(), - vm_trace: t.vm_trace.map(Into::into), - state_diff: t.state_diff.map(Into::into), - } - } + fn from(t: Executed) -> Self { + TraceResults { + output: t.output.into(), + trace: t.trace.into_iter().map(Into::into).collect(), + vm_trace: t.vm_trace.map(Into::into), + state_diff: t.state_diff.map(Into::into), + } + } } #[derive(Debug, Serialize)] #[serde(rename_all = "camelCase")] /// A diff of some chunk of memory. pub struct TraceResultsWithTransactionHash { - /// The output of the call/create - pub output: Bytes, - /// The transaction trace. - pub trace: Vec, - /// The transaction trace. - pub vm_trace: Option, - /// The transaction trace. - pub state_diff: Option, - /// The transaction Hash. - pub transaction_hash: H256, + /// The output of the call/create + pub output: Bytes, + /// The transaction trace. + pub trace: Vec, + /// The transaction trace. + pub vm_trace: Option, + /// The transaction trace. + pub state_diff: Option, + /// The transaction Hash. + pub transaction_hash: H256, } impl From<(H256, Executed)> for TraceResultsWithTransactionHash { - fn from(t: (H256, Executed)) -> Self { - TraceResultsWithTransactionHash { - output: t.1.output.into(), - trace: t.1.trace.into_iter().map(Into::into).collect(), - vm_trace: t.1.vm_trace.map(Into::into), - state_diff: t.1.state_diff.map(Into::into), - transaction_hash: t.0, - } - } + fn from(t: (H256, Executed)) -> Self { + TraceResultsWithTransactionHash { + output: t.1.output.into(), + trace: t.1.trace.into_iter().map(Into::into).collect(), + vm_trace: t.1.vm_trace.map(Into::into), + state_diff: t.1.state_diff.map(Into::into), + transaction_hash: t.0, + } + } } #[cfg(test)] mod tests { - use serde_json; - use std::collections::BTreeMap; - use v1::types::Bytes; - use ethcore::trace::TraceError; - use super::*; + use super::*; + use ethcore::trace::TraceError; + use serde_json; + use std::collections::BTreeMap; + use v1::types::Bytes; - #[test] - fn should_serialize_trace_results() { - let r = TraceResults { - output: vec![0x60].into(), - trace: vec![], - vm_trace: None, - state_diff: None, - }; - let serialized = serde_json::to_string(&r).unwrap(); - assert_eq!(serialized, r#"{"output":"0x60","trace":[],"vmTrace":null,"stateDiff":null}"#); - } + #[test] + fn should_serialize_trace_results() { + let r = TraceResults { + output: vec![0x60].into(), + trace: vec![], + vm_trace: None, + state_diff: None, + }; + let serialized = serde_json::to_string(&r).unwrap(); + assert_eq!( + serialized, + r#"{"output":"0x60","trace":[],"vmTrace":null,"stateDiff":null}"# + ); + } - #[test] - fn test_trace_call_serialize() { - let t = LocalizedTrace { - action: Action::Call(Call { - from: 4.into(), - to: 5.into(), - value: 6.into(), - gas: 7.into(), - input: Bytes::new(vec![0x12, 0x34]), - call_type: CallType::Call, - }), - result: Res::Call(CallResult { - gas_used: 8.into(), - output: vec![0x56, 0x78].into(), - }), - trace_address: vec![10], - subtraces: 1, - transaction_position: Some(11), - transaction_hash: Some(12.into()), - block_number: 13, - block_hash: 14.into(), - }; - let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"type":"call","action":{"from":"0x0000000000000000000000000000000000000004","to":"0x0000000000000000000000000000000000000005","value":"0x6","gas":"0x7","input":"0x1234","callType":"call"},"result":{"gasUsed":"0x8","output":"0x5678"},"traceAddress":[10],"subtraces":1,"transactionPosition":11,"transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); - } + #[test] + fn test_trace_call_serialize() { + let t = LocalizedTrace { + action: Action::Call(Call { + from: 4.into(), + to: 5.into(), + value: 6.into(), + gas: 7.into(), + input: Bytes::new(vec![0x12, 0x34]), + call_type: CallType::Call, + }), + result: Res::Call(CallResult { + gas_used: 8.into(), + output: vec![0x56, 0x78].into(), + }), + trace_address: vec![10], + subtraces: 1, + transaction_position: Some(11), + transaction_hash: Some(12.into()), + block_number: 13, + block_hash: 14.into(), + }; + let serialized = serde_json::to_string(&t).unwrap(); + assert_eq!( + serialized, + r#"{"type":"call","action":{"from":"0x0000000000000000000000000000000000000004","to":"0x0000000000000000000000000000000000000005","value":"0x6","gas":"0x7","input":"0x1234","callType":"call"},"result":{"gasUsed":"0x8","output":"0x5678"},"traceAddress":[10],"subtraces":1,"transactionPosition":11,"transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"# + ); + } - #[test] - fn test_trace_failed_call_serialize() { - let t = LocalizedTrace { - action: Action::Call(Call { - from: 4.into(), - to: 5.into(), - value: 6.into(), - gas: 7.into(), - input: Bytes::new(vec![0x12, 0x34]), - call_type: CallType::Call, - }), - result: Res::FailedCall(TraceError::OutOfGas), - trace_address: vec![10], - subtraces: 1, - transaction_position: Some(11), - transaction_hash: Some(12.into()), - block_number: 13, - block_hash: 14.into(), - }; - let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"type":"call","action":{"from":"0x0000000000000000000000000000000000000004","to":"0x0000000000000000000000000000000000000005","value":"0x6","gas":"0x7","input":"0x1234","callType":"call"},"error":"Out of gas","traceAddress":[10],"subtraces":1,"transactionPosition":11,"transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); - } + #[test] + fn test_trace_failed_call_serialize() { + let t = LocalizedTrace { + action: Action::Call(Call { + from: 4.into(), + to: 5.into(), + value: 6.into(), + gas: 7.into(), + input: Bytes::new(vec![0x12, 0x34]), + call_type: CallType::Call, + }), + result: Res::FailedCall(TraceError::OutOfGas), + trace_address: vec![10], + subtraces: 1, + transaction_position: Some(11), + transaction_hash: Some(12.into()), + block_number: 13, + block_hash: 14.into(), + }; + let serialized = serde_json::to_string(&t).unwrap(); + assert_eq!( + serialized, + r#"{"type":"call","action":{"from":"0x0000000000000000000000000000000000000004","to":"0x0000000000000000000000000000000000000005","value":"0x6","gas":"0x7","input":"0x1234","callType":"call"},"error":"Out of gas","traceAddress":[10],"subtraces":1,"transactionPosition":11,"transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"# + ); + } - #[test] - fn test_trace_create_serialize() { - let t = LocalizedTrace { - action: Action::Create(Create { - from: 4.into(), - value: 6.into(), - gas: 7.into(), - init: Bytes::new(vec![0x12, 0x34]), - }), - result: Res::Create(CreateResult { - gas_used: 8.into(), - code: vec![0x56, 0x78].into(), - address: 0xff.into(), - }), - trace_address: vec![10], - subtraces: 1, - transaction_position: Some(11), - transaction_hash: Some(12.into()), - block_number: 13, - block_hash: 14.into(), - }; - let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"type":"create","action":{"from":"0x0000000000000000000000000000000000000004","value":"0x6","gas":"0x7","init":"0x1234"},"result":{"gasUsed":"0x8","code":"0x5678","address":"0x00000000000000000000000000000000000000ff"},"traceAddress":[10],"subtraces":1,"transactionPosition":11,"transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); - } + #[test] + fn test_trace_create_serialize() { + let t = LocalizedTrace { + action: Action::Create(Create { + from: 4.into(), + value: 6.into(), + gas: 7.into(), + init: Bytes::new(vec![0x12, 0x34]), + }), + result: Res::Create(CreateResult { + gas_used: 8.into(), + code: vec![0x56, 0x78].into(), + address: 0xff.into(), + }), + trace_address: vec![10], + subtraces: 1, + transaction_position: Some(11), + transaction_hash: Some(12.into()), + block_number: 13, + block_hash: 14.into(), + }; + let serialized = serde_json::to_string(&t).unwrap(); + assert_eq!( + serialized, + r#"{"type":"create","action":{"from":"0x0000000000000000000000000000000000000004","value":"0x6","gas":"0x7","init":"0x1234"},"result":{"gasUsed":"0x8","code":"0x5678","address":"0x00000000000000000000000000000000000000ff"},"traceAddress":[10],"subtraces":1,"transactionPosition":11,"transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"# + ); + } - #[test] - fn test_trace_failed_create_serialize() { - let t = LocalizedTrace { - action: Action::Create(Create { - from: 4.into(), - value: 6.into(), - gas: 7.into(), - init: Bytes::new(vec![0x12, 0x34]), - }), - result: Res::FailedCreate(TraceError::OutOfGas), - trace_address: vec![10], - subtraces: 1, - transaction_position: Some(11), - transaction_hash: Some(12.into()), - block_number: 13, - block_hash: 14.into(), - }; - let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"type":"create","action":{"from":"0x0000000000000000000000000000000000000004","value":"0x6","gas":"0x7","init":"0x1234"},"error":"Out of gas","traceAddress":[10],"subtraces":1,"transactionPosition":11,"transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); - } + #[test] + fn test_trace_failed_create_serialize() { + let t = LocalizedTrace { + action: Action::Create(Create { + from: 4.into(), + value: 6.into(), + gas: 7.into(), + init: Bytes::new(vec![0x12, 0x34]), + }), + result: Res::FailedCreate(TraceError::OutOfGas), + trace_address: vec![10], + subtraces: 1, + transaction_position: Some(11), + transaction_hash: Some(12.into()), + block_number: 13, + block_hash: 14.into(), + }; + let serialized = serde_json::to_string(&t).unwrap(); + assert_eq!( + serialized, + r#"{"type":"create","action":{"from":"0x0000000000000000000000000000000000000004","value":"0x6","gas":"0x7","init":"0x1234"},"error":"Out of gas","traceAddress":[10],"subtraces":1,"transactionPosition":11,"transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"# + ); + } - #[test] - fn test_trace_suicide_serialize() { - let t = LocalizedTrace { - action: Action::Suicide(Suicide { - address: 4.into(), - refund_address: 6.into(), - balance: 7.into(), - }), - result: Res::None, - trace_address: vec![10], - subtraces: 1, - transaction_position: Some(11), - transaction_hash: Some(12.into()), - block_number: 13, - block_hash: 14.into(), - }; - let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"type":"suicide","action":{"address":"0x0000000000000000000000000000000000000004","refundAddress":"0x0000000000000000000000000000000000000006","balance":"0x7"},"result":null,"traceAddress":[10],"subtraces":1,"transactionPosition":11,"transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); - } + #[test] + fn test_trace_suicide_serialize() { + let t = LocalizedTrace { + action: Action::Suicide(Suicide { + address: 4.into(), + refund_address: 6.into(), + balance: 7.into(), + }), + result: Res::None, + trace_address: vec![10], + subtraces: 1, + transaction_position: Some(11), + transaction_hash: Some(12.into()), + block_number: 13, + block_hash: 14.into(), + }; + let serialized = serde_json::to_string(&t).unwrap(); + assert_eq!( + serialized, + r#"{"type":"suicide","action":{"address":"0x0000000000000000000000000000000000000004","refundAddress":"0x0000000000000000000000000000000000000006","balance":"0x7"},"result":null,"traceAddress":[10],"subtraces":1,"transactionPosition":11,"transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"# + ); + } - #[test] - fn test_trace_reward_serialize() { - let t = LocalizedTrace { - action: Action::Reward(Reward { - author: 4.into(), - value: 6.into(), - reward_type: RewardType::Block, - }), - result: Res::None, - trace_address: vec![10], - subtraces: 1, - transaction_position: None, - transaction_hash: None, - block_number: 13, - block_hash: 14.into(), - }; - let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"type":"reward","action":{"author":"0x0000000000000000000000000000000000000004","value":"0x6","rewardType":"block"},"result":null,"traceAddress":[10],"subtraces":1,"transactionPosition":null,"transactionHash":null,"blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); - } + #[test] + fn test_trace_reward_serialize() { + let t = LocalizedTrace { + action: Action::Reward(Reward { + author: 4.into(), + value: 6.into(), + reward_type: RewardType::Block, + }), + result: Res::None, + trace_address: vec![10], + subtraces: 1, + transaction_position: None, + transaction_hash: None, + block_number: 13, + block_hash: 14.into(), + }; + let serialized = serde_json::to_string(&t).unwrap(); + assert_eq!( + serialized, + r#"{"type":"reward","action":{"author":"0x0000000000000000000000000000000000000004","value":"0x6","rewardType":"block"},"result":null,"traceAddress":[10],"subtraces":1,"transactionPosition":null,"transactionHash":null,"blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"# + ); + } - #[test] - fn test_vmtrace_serialize() { - let t = VMTrace { - code: vec![0, 1, 2, 3].into(), - ops: vec![ - VMOperation { - pc: 0, - cost: 10, - ex: None, - sub: None, - }, - VMOperation { - pc: 1, - cost: 11, - ex: Some(VMExecutedOperation { - used: 10, - push: vec![69.into()], - mem: None, - store: None, - }), - sub: Some(VMTrace { - code: vec![0].into(), - ops: vec![ - VMOperation { - pc: 0, - cost: 0, - ex: Some(VMExecutedOperation { - used: 10, - push: vec![42.into()].into(), - mem: Some(MemoryDiff {off: 42, data: vec![1, 2, 3].into()}), - store: Some(StorageDiff {key: 69.into(), val: 42.into()}), - }), - sub: None, - } - ] - }), - } - ] - }; - let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"code":"0x00010203","ops":[{"pc":0,"cost":10,"ex":null,"sub":null},{"pc":1,"cost":11,"ex":{"used":10,"push":["0x45"],"mem":null,"store":null},"sub":{"code":"0x00","ops":[{"pc":0,"cost":0,"ex":{"used":10,"push":["0x2a"],"mem":{"off":42,"data":"0x010203"},"store":{"key":"0x45","val":"0x2a"}},"sub":null}]}}]}"#); - } + #[test] + fn test_vmtrace_serialize() { + let t = VMTrace { + code: vec![0, 1, 2, 3].into(), + ops: vec![ + VMOperation { + pc: 0, + cost: 10, + ex: None, + sub: None, + }, + VMOperation { + pc: 1, + cost: 11, + ex: Some(VMExecutedOperation { + used: 10, + push: vec![69.into()], + mem: None, + store: None, + }), + sub: Some(VMTrace { + code: vec![0].into(), + ops: vec![VMOperation { + pc: 0, + cost: 0, + ex: Some(VMExecutedOperation { + used: 10, + push: vec![42.into()].into(), + mem: Some(MemoryDiff { + off: 42, + data: vec![1, 2, 3].into(), + }), + store: Some(StorageDiff { + key: 69.into(), + val: 42.into(), + }), + }), + sub: None, + }], + }), + }, + ], + }; + let serialized = serde_json::to_string(&t).unwrap(); + assert_eq!( + serialized, + r#"{"code":"0x00010203","ops":[{"pc":0,"cost":10,"ex":null,"sub":null},{"pc":1,"cost":11,"ex":{"used":10,"push":["0x45"],"mem":null,"store":null},"sub":{"code":"0x00","ops":[{"pc":0,"cost":0,"ex":{"used":10,"push":["0x2a"],"mem":{"off":42,"data":"0x010203"},"store":{"key":"0x45","val":"0x2a"}},"sub":null}]}}]}"# + ); + } - #[test] - fn test_statediff_serialize() { - let t = StateDiff(map![ - 42.into() => AccountDiff { - balance: Diff::Same, - nonce: Diff::Born(1.into()), - code: Diff::Same, - storage: map![ - 42.into() => Diff::Same - ] - }, - 69.into() => AccountDiff { - balance: Diff::Same, - nonce: Diff::Changed(ChangedType { from: 1.into(), to: 0.into() }), - code: Diff::Died(vec![96].into()), - storage: map![], - } - ]); - let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"0x000000000000000000000000000000000000002a":{"balance":"=","nonce":{"+":"0x1"},"code":"=","storage":{"0x000000000000000000000000000000000000000000000000000000000000002a":"="}},"0x0000000000000000000000000000000000000045":{"balance":"=","nonce":{"*":{"from":"0x1","to":"0x0"}},"code":{"-":"0x60"},"storage":{}}}"#); - } + #[test] + fn test_statediff_serialize() { + let t = StateDiff(map![ + 42.into() => AccountDiff { + balance: Diff::Same, + nonce: Diff::Born(1.into()), + code: Diff::Same, + storage: map![ + 42.into() => Diff::Same + ] + }, + 69.into() => AccountDiff { + balance: Diff::Same, + nonce: Diff::Changed(ChangedType { from: 1.into(), to: 0.into() }), + code: Diff::Died(vec![96].into()), + storage: map![], + } + ]); + let serialized = serde_json::to_string(&t).unwrap(); + assert_eq!( + serialized, + r#"{"0x000000000000000000000000000000000000002a":{"balance":"=","nonce":{"+":"0x1"},"code":"=","storage":{"0x000000000000000000000000000000000000000000000000000000000000002a":"="}},"0x0000000000000000000000000000000000000045":{"balance":"=","nonce":{"*":{"from":"0x1","to":"0x0"}},"code":{"-":"0x60"},"storage":{}}}"# + ); + } } diff --git a/rpc/src/v1/types/trace_filter.rs b/rpc/src/v1/types/trace_filter.rs index a455c3d1c..6cf1f66d2 100644 --- a/rpc/src/v1/types/trace_filter.rs +++ b/rpc/src/v1/types/trace_filter.rs @@ -16,8 +16,7 @@ //! Trace filter deserialization. -use ethcore::client::BlockId; -use ethcore::client; +use ethcore::{client, client::BlockId}; use ethereum_types::H160; use v1::types::BlockNumber; @@ -26,66 +25,73 @@ use v1::types::BlockNumber; #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub struct TraceFilter { - /// From block - pub from_block: Option, - /// To block - pub to_block: Option, - /// From address - pub from_address: Option>, - /// To address - pub to_address: Option>, - /// Output offset - pub after: Option, - /// Output amount - pub count: Option, + /// From block + pub from_block: Option, + /// To block + pub to_block: Option, + /// From address + pub from_address: Option>, + /// To address + pub to_address: Option>, + /// Output offset + pub after: Option, + /// Output amount + pub count: Option, } impl Into for TraceFilter { - fn into(self) -> client::TraceFilter { - let num_to_id = |num| match num { - BlockNumber::Num(n) => BlockId::Number(n), - BlockNumber::Earliest => BlockId::Earliest, - BlockNumber::Latest => BlockId::Latest, - BlockNumber::Pending => { - warn!("Pending traces are not supported and might be removed in future versions. Falling back to Latest"); - BlockId::Latest - } - }; - let start = self.from_block.map_or(BlockId::Latest, &num_to_id); - let end = self.to_block.map_or(BlockId::Latest, &num_to_id); - client::TraceFilter { - range: start..end, - from_address: self.from_address.map_or_else(Vec::new, |x| x.into_iter().map(Into::into).collect()), - to_address: self.to_address.map_or_else(Vec::new, |x| x.into_iter().map(Into::into).collect()), - after: self.after, - count: self.count, - } - } + fn into(self) -> client::TraceFilter { + let num_to_id = |num| match num { + BlockNumber::Num(n) => BlockId::Number(n), + BlockNumber::Earliest => BlockId::Earliest, + BlockNumber::Latest => BlockId::Latest, + BlockNumber::Pending => { + warn!("Pending traces are not supported and might be removed in future versions. Falling back to Latest"); + BlockId::Latest + } + }; + let start = self.from_block.map_or(BlockId::Latest, &num_to_id); + let end = self.to_block.map_or(BlockId::Latest, &num_to_id); + client::TraceFilter { + range: start..end, + from_address: self + .from_address + .map_or_else(Vec::new, |x| x.into_iter().map(Into::into).collect()), + to_address: self + .to_address + .map_or_else(Vec::new, |x| x.into_iter().map(Into::into).collect()), + after: self.after, + count: self.count, + } + } } #[cfg(test)] mod tests { - use serde_json; - use ethereum_types::Address; - use v1::types::{BlockNumber, TraceFilter}; + use ethereum_types::Address; + use serde_json; + use v1::types::{BlockNumber, TraceFilter}; - #[test] - fn test_empty_trace_filter_deserialize() { - let s = r#"{}"#; - let deserialized: TraceFilter = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, TraceFilter { - from_block: None, - to_block: None, - from_address: None, - to_address: None, - after: None, - count: None, - }); - } + #[test] + fn test_empty_trace_filter_deserialize() { + let s = r#"{}"#; + let deserialized: TraceFilter = serde_json::from_str(s).unwrap(); + assert_eq!( + deserialized, + TraceFilter { + from_block: None, + to_block: None, + from_address: None, + to_address: None, + after: None, + count: None, + } + ); + } - #[test] - fn test_trace_filter_deserialize() { - let s = r#"{ + #[test] + fn test_trace_filter_deserialize() { + let s = r#"{ "fromBlock": "latest", "toBlock": "latest", "fromAddress": ["0x0000000000000000000000000000000000000003"], @@ -93,14 +99,17 @@ mod tests { "after": 50, "count": 100 }"#; - let deserialized: TraceFilter = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, TraceFilter { - from_block: Some(BlockNumber::Latest), - to_block: Some(BlockNumber::Latest), - from_address: Some(vec![Address::from(3).into()]), - to_address: Some(vec![Address::from(5).into()]), - after: 50.into(), - count: 100.into(), - }); - } + let deserialized: TraceFilter = serde_json::from_str(s).unwrap(); + assert_eq!( + deserialized, + TraceFilter { + from_block: Some(BlockNumber::Latest), + to_block: Some(BlockNumber::Latest), + from_address: Some(vec![Address::from(3).into()]), + to_address: Some(vec![Address::from(5).into()]), + after: 50.into(), + count: 100.into(), + } + ); + } } diff --git a/rpc/src/v1/types/transaction.rs b/rpc/src/v1/types/transaction.rs index 931e03866..6e1c95e82 100644 --- a/rpc/src/v1/types/transaction.rs +++ b/rpc/src/v1/types/transaction.rs @@ -16,316 +16,320 @@ use std::sync::Arc; -use serde::{Serialize, Serializer}; -use serde::ser::SerializeStruct; use ethcore::{contract_address, CreateContractAddress}; -use ethereum_types::{H160, H256, H512, U64, U256}; +use ethereum_types::{H160, H256, H512, U256, U64}; use miner; -use types::transaction::{LocalizedTransaction, Action, PendingTransaction, SignedTransaction}; +use serde::{ser::SerializeStruct, Serialize, Serializer}; +use types::transaction::{Action, LocalizedTransaction, PendingTransaction, SignedTransaction}; use v1::types::{Bytes, TransactionCondition}; /// Transaction #[derive(Debug, Default, Clone, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub struct Transaction { - /// Hash - pub hash: H256, - /// Nonce - pub nonce: U256, - /// Block hash - pub block_hash: Option, - /// Block number - pub block_number: Option, - /// Transaction Index - pub transaction_index: Option, - /// Sender - pub from: H160, - /// Recipient - pub to: Option, - /// Transfered value - pub value: U256, - /// Gas Price - pub gas_price: U256, - /// Gas - pub gas: U256, - /// Data - pub input: Bytes, - /// Creates contract - pub creates: Option, - /// Raw transaction data - pub raw: Bytes, - /// Public key of the signer. - pub public_key: Option, - /// The network id of the transaction, if any. - pub chain_id: Option, - /// The standardised V field of the signature (0 or 1). - pub standard_v: U256, - /// The standardised V field of the signature. - pub v: U256, - /// The R field of the signature. - pub r: U256, - /// The S field of the signature. - pub s: U256, - /// Transaction activates at specified block. - pub condition: Option, + /// Hash + pub hash: H256, + /// Nonce + pub nonce: U256, + /// Block hash + pub block_hash: Option, + /// Block number + pub block_number: Option, + /// Transaction Index + pub transaction_index: Option, + /// Sender + pub from: H160, + /// Recipient + pub to: Option, + /// Transfered value + pub value: U256, + /// Gas Price + pub gas_price: U256, + /// Gas + pub gas: U256, + /// Data + pub input: Bytes, + /// Creates contract + pub creates: Option, + /// Raw transaction data + pub raw: Bytes, + /// Public key of the signer. + pub public_key: Option, + /// The network id of the transaction, if any. + pub chain_id: Option, + /// The standardised V field of the signature (0 or 1). + pub standard_v: U256, + /// The standardised V field of the signature. + pub v: U256, + /// The R field of the signature. + pub r: U256, + /// The S field of the signature. + pub s: U256, + /// Transaction activates at specified block. + pub condition: Option, } /// Local Transaction Status #[derive(Debug)] pub enum LocalTransactionStatus { - /// Transaction is pending - Pending, - /// Transaction is in future part of the queue - Future, - /// Transaction was mined. - Mined(Transaction), - /// Transaction was removed from the queue, but not mined. - Culled(Transaction), - /// Transaction was dropped because of limit. - Dropped(Transaction), - /// Transaction was replaced by transaction with higher gas price. - Replaced(Transaction, U256, H256), - /// Transaction never got into the queue. - Rejected(Transaction, String), - /// Transaction is invalid. - Invalid(Transaction), - /// Transaction was canceled. - Canceled(Transaction), + /// Transaction is pending + Pending, + /// Transaction is in future part of the queue + Future, + /// Transaction was mined. + Mined(Transaction), + /// Transaction was removed from the queue, but not mined. + Culled(Transaction), + /// Transaction was dropped because of limit. + Dropped(Transaction), + /// Transaction was replaced by transaction with higher gas price. + Replaced(Transaction, U256, H256), + /// Transaction never got into the queue. + Rejected(Transaction, String), + /// Transaction is invalid. + Invalid(Transaction), + /// Transaction was canceled. + Canceled(Transaction), } impl Serialize for LocalTransactionStatus { - fn serialize(&self, serializer: S) -> Result - where S: Serializer - { - use self::LocalTransactionStatus::*; + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + use self::LocalTransactionStatus::*; - let elems = match *self { - Pending | Future => 1, - Mined(..) | Culled(..) | Dropped(..) | Invalid(..) | Canceled(..) => 2, - Rejected(..) => 3, - Replaced(..) => 4, - }; + let elems = match *self { + Pending | Future => 1, + Mined(..) | Culled(..) | Dropped(..) | Invalid(..) | Canceled(..) => 2, + Rejected(..) => 3, + Replaced(..) => 4, + }; - let status = "status"; - let transaction = "transaction"; + let status = "status"; + let transaction = "transaction"; - let mut struc = serializer.serialize_struct("LocalTransactionStatus", elems)?; - match *self { - Pending => struc.serialize_field(status, "pending")?, - Future => struc.serialize_field(status, "future")?, - Mined(ref tx) => { - struc.serialize_field(status, "mined")?; - struc.serialize_field(transaction, tx)?; - }, - Culled(ref tx) => { - struc.serialize_field(status, "culled")?; - struc.serialize_field(transaction, tx)?; - }, - Dropped(ref tx) => { - struc.serialize_field(status, "dropped")?; - struc.serialize_field(transaction, tx)?; - }, - Canceled(ref tx) => { - struc.serialize_field(status, "canceled")?; - struc.serialize_field(transaction, tx)?; - }, - Invalid(ref tx) => { - struc.serialize_field(status, "invalid")?; - struc.serialize_field(transaction, tx)?; - }, - Rejected(ref tx, ref reason) => { - struc.serialize_field(status, "rejected")?; - struc.serialize_field(transaction, tx)?; - struc.serialize_field("error", reason)?; - }, - Replaced(ref tx, ref gas_price, ref hash) => { - struc.serialize_field(status, "replaced")?; - struc.serialize_field(transaction, tx)?; - struc.serialize_field("hash", hash)?; - struc.serialize_field("gasPrice", gas_price)?; - }, - } + let mut struc = serializer.serialize_struct("LocalTransactionStatus", elems)?; + match *self { + Pending => struc.serialize_field(status, "pending")?, + Future => struc.serialize_field(status, "future")?, + Mined(ref tx) => { + struc.serialize_field(status, "mined")?; + struc.serialize_field(transaction, tx)?; + } + Culled(ref tx) => { + struc.serialize_field(status, "culled")?; + struc.serialize_field(transaction, tx)?; + } + Dropped(ref tx) => { + struc.serialize_field(status, "dropped")?; + struc.serialize_field(transaction, tx)?; + } + Canceled(ref tx) => { + struc.serialize_field(status, "canceled")?; + struc.serialize_field(transaction, tx)?; + } + Invalid(ref tx) => { + struc.serialize_field(status, "invalid")?; + struc.serialize_field(transaction, tx)?; + } + Rejected(ref tx, ref reason) => { + struc.serialize_field(status, "rejected")?; + struc.serialize_field(transaction, tx)?; + struc.serialize_field("error", reason)?; + } + Replaced(ref tx, ref gas_price, ref hash) => { + struc.serialize_field(status, "replaced")?; + struc.serialize_field(transaction, tx)?; + struc.serialize_field("hash", hash)?; + struc.serialize_field("gasPrice", gas_price)?; + } + } - struc.end() - } + struc.end() + } } /// Geth-compatible output for eth_signTransaction method #[derive(Debug, Default, Clone, PartialEq, Serialize)] pub struct RichRawTransaction { - /// Raw transaction RLP - pub raw: Bytes, - /// Transaction details - #[serde(rename = "tx")] - pub transaction: Transaction + /// Raw transaction RLP + pub raw: Bytes, + /// Transaction details + #[serde(rename = "tx")] + pub transaction: Transaction, } impl RichRawTransaction { - /// Creates new `RichRawTransaction` from `SignedTransaction`. - pub fn from_signed(tx: SignedTransaction) -> Self { - let tx = Transaction::from_signed(tx); - RichRawTransaction { - raw: tx.raw.clone(), - transaction: tx, - } - } + /// Creates new `RichRawTransaction` from `SignedTransaction`. + pub fn from_signed(tx: SignedTransaction) -> Self { + let tx = Transaction::from_signed(tx); + RichRawTransaction { + raw: tx.raw.clone(), + transaction: tx, + } + } } impl Transaction { - /// Convert `LocalizedTransaction` into RPC Transaction. - pub fn from_localized(mut t: LocalizedTransaction) -> Transaction { - let signature = t.signature(); - let scheme = CreateContractAddress::FromSenderAndNonce; - Transaction { - hash: t.hash(), - nonce: t.nonce, - block_hash: Some(t.block_hash), - block_number: Some(t.block_number.into()), - transaction_index: Some(t.transaction_index.into()), - from: t.sender(), - to: match t.action { - Action::Create => None, - Action::Call(ref address) => Some(*address) - }, - value: t.value, - gas_price: t.gas_price, - gas: t.gas, - input: Bytes::new(t.data.clone()), - creates: match t.action { - Action::Create => Some(contract_address(scheme, &t.sender(), &t.nonce, &t.data).0), - Action::Call(_) => None, - }, - raw: ::rlp::encode(&t.signed).into(), - public_key: t.recover_public().ok().map(Into::into), - chain_id: t.chain_id().map(U64::from), - standard_v: t.standard_v().into(), - v: t.original_v().into(), - r: signature.r().into(), - s: signature.s().into(), - condition: None, - } - } + /// Convert `LocalizedTransaction` into RPC Transaction. + pub fn from_localized(mut t: LocalizedTransaction) -> Transaction { + let signature = t.signature(); + let scheme = CreateContractAddress::FromSenderAndNonce; + Transaction { + hash: t.hash(), + nonce: t.nonce, + block_hash: Some(t.block_hash), + block_number: Some(t.block_number.into()), + transaction_index: Some(t.transaction_index.into()), + from: t.sender(), + to: match t.action { + Action::Create => None, + Action::Call(ref address) => Some(*address), + }, + value: t.value, + gas_price: t.gas_price, + gas: t.gas, + input: Bytes::new(t.data.clone()), + creates: match t.action { + Action::Create => Some(contract_address(scheme, &t.sender(), &t.nonce, &t.data).0), + Action::Call(_) => None, + }, + raw: ::rlp::encode(&t.signed).into(), + public_key: t.recover_public().ok().map(Into::into), + chain_id: t.chain_id().map(U64::from), + standard_v: t.standard_v().into(), + v: t.original_v().into(), + r: signature.r().into(), + s: signature.s().into(), + condition: None, + } + } - /// Convert `SignedTransaction` into RPC Transaction. - pub fn from_signed(t: SignedTransaction) -> Transaction { - let signature = t.signature(); - let scheme = CreateContractAddress::FromSenderAndNonce; - Transaction { - hash: t.hash(), - nonce: t.nonce, - block_hash: None, - block_number: None, - transaction_index: None, - from: t.sender(), - to: match t.action { - Action::Create => None, - Action::Call(ref address) => Some(*address) - }, - value: t.value, - gas_price: t.gas_price, - gas: t.gas, - input: Bytes::new(t.data.clone()), - creates: match t.action { - Action::Create => Some(contract_address(scheme, &t.sender(), &t.nonce, &t.data).0), - Action::Call(_) => None, - }, - raw: ::rlp::encode(&t).into(), - public_key: t.public_key().map(Into::into), - chain_id: t.chain_id().map(U64::from), - standard_v: t.standard_v().into(), - v: t.original_v().into(), - r: signature.r().into(), - s: signature.s().into(), - condition: None, - } - } + /// Convert `SignedTransaction` into RPC Transaction. + pub fn from_signed(t: SignedTransaction) -> Transaction { + let signature = t.signature(); + let scheme = CreateContractAddress::FromSenderAndNonce; + Transaction { + hash: t.hash(), + nonce: t.nonce, + block_hash: None, + block_number: None, + transaction_index: None, + from: t.sender(), + to: match t.action { + Action::Create => None, + Action::Call(ref address) => Some(*address), + }, + value: t.value, + gas_price: t.gas_price, + gas: t.gas, + input: Bytes::new(t.data.clone()), + creates: match t.action { + Action::Create => Some(contract_address(scheme, &t.sender(), &t.nonce, &t.data).0), + Action::Call(_) => None, + }, + raw: ::rlp::encode(&t).into(), + public_key: t.public_key().map(Into::into), + chain_id: t.chain_id().map(U64::from), + standard_v: t.standard_v().into(), + v: t.original_v().into(), + r: signature.r().into(), + s: signature.s().into(), + condition: None, + } + } - /// Convert `PendingTransaction` into RPC Transaction. - pub fn from_pending(t: PendingTransaction) -> Transaction { - let mut r = Transaction::from_signed(t.transaction); - r.condition = r.condition.map(Into::into); - r - } + /// Convert `PendingTransaction` into RPC Transaction. + pub fn from_pending(t: PendingTransaction) -> Transaction { + let mut r = Transaction::from_signed(t.transaction); + r.condition = r.condition.map(Into::into); + r + } } impl LocalTransactionStatus { - /// Convert `LocalTransactionStatus` into RPC `LocalTransactionStatus`. - pub fn from(s: miner::pool::local_transactions::Status) -> Self { - let convert = |tx: Arc| { - Transaction::from_signed(tx.signed().clone()) - }; - use miner::pool::local_transactions::Status::*; - match s { - Pending(_) => LocalTransactionStatus::Pending, - Mined(tx) => LocalTransactionStatus::Mined(convert(tx)), - Culled(tx) => LocalTransactionStatus::Culled(convert(tx)), - Dropped(tx) => LocalTransactionStatus::Dropped(convert(tx)), - Rejected(tx, reason) => LocalTransactionStatus::Rejected(convert(tx), reason), - Invalid(tx) => LocalTransactionStatus::Invalid(convert(tx)), - Canceled(tx) => LocalTransactionStatus::Canceled(convert(tx)), - Replaced { old, new } => LocalTransactionStatus::Replaced( - convert(old), - new.signed().gas_price, - new.signed().hash(), - ), - } - } + /// Convert `LocalTransactionStatus` into RPC `LocalTransactionStatus`. + pub fn from(s: miner::pool::local_transactions::Status) -> Self { + let convert = |tx: Arc| { + Transaction::from_signed(tx.signed().clone()) + }; + use miner::pool::local_transactions::Status::*; + match s { + Pending(_) => LocalTransactionStatus::Pending, + Mined(tx) => LocalTransactionStatus::Mined(convert(tx)), + Culled(tx) => LocalTransactionStatus::Culled(convert(tx)), + Dropped(tx) => LocalTransactionStatus::Dropped(convert(tx)), + Rejected(tx, reason) => LocalTransactionStatus::Rejected(convert(tx), reason), + Invalid(tx) => LocalTransactionStatus::Invalid(convert(tx)), + Canceled(tx) => LocalTransactionStatus::Canceled(convert(tx)), + Replaced { old, new } => LocalTransactionStatus::Replaced( + convert(old), + new.signed().gas_price, + new.signed().hash(), + ), + } + } } #[cfg(test)] mod tests { - use super::{Transaction, LocalTransactionStatus}; - use serde_json; + use super::{LocalTransactionStatus, Transaction}; + use serde_json; - #[test] - fn test_transaction_serialize() { - let t = Transaction::default(); - let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x0","gasPrice":"0x0","gas":"0x0","input":"0x","creates":null,"raw":"0x","publicKey":null,"chainId":null,"standardV":"0x0","v":"0x0","r":"0x0","s":"0x0","condition":null}"#); - } + #[test] + fn test_transaction_serialize() { + let t = Transaction::default(); + let serialized = serde_json::to_string(&t).unwrap(); + assert_eq!( + serialized, + r#"{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x0","gasPrice":"0x0","gas":"0x0","input":"0x","creates":null,"raw":"0x","publicKey":null,"chainId":null,"standardV":"0x0","v":"0x0","r":"0x0","s":"0x0","condition":null}"# + ); + } - #[test] - fn test_local_transaction_status_serialize() { - let tx_ser = serde_json::to_string(&Transaction::default()).unwrap(); - let status1 = LocalTransactionStatus::Pending; - let status2 = LocalTransactionStatus::Future; - let status3 = LocalTransactionStatus::Mined(Transaction::default()); - let status4 = LocalTransactionStatus::Dropped(Transaction::default()); - let status5 = LocalTransactionStatus::Invalid(Transaction::default()); - let status6 = LocalTransactionStatus::Rejected(Transaction::default(), "Just because".into()); - let status7 = LocalTransactionStatus::Replaced(Transaction::default(), 5.into(), 10.into()); + #[test] + fn test_local_transaction_status_serialize() { + let tx_ser = serde_json::to_string(&Transaction::default()).unwrap(); + let status1 = LocalTransactionStatus::Pending; + let status2 = LocalTransactionStatus::Future; + let status3 = LocalTransactionStatus::Mined(Transaction::default()); + let status4 = LocalTransactionStatus::Dropped(Transaction::default()); + let status5 = LocalTransactionStatus::Invalid(Transaction::default()); + let status6 = + LocalTransactionStatus::Rejected(Transaction::default(), "Just because".into()); + let status7 = LocalTransactionStatus::Replaced(Transaction::default(), 5.into(), 10.into()); - assert_eq!( - serde_json::to_string(&status1).unwrap(), - r#"{"status":"pending"}"# - ); - assert_eq!( - serde_json::to_string(&status2).unwrap(), - r#"{"status":"future"}"# - ); - assert_eq!( - serde_json::to_string(&status3).unwrap(), - r#"{"status":"mined","transaction":"#.to_owned() + &format!("{}", tx_ser) + r#"}"# - ); - assert_eq!( - serde_json::to_string(&status4).unwrap(), - r#"{"status":"dropped","transaction":"#.to_owned() + &format!("{}", tx_ser) + r#"}"# - ); - assert_eq!( - serde_json::to_string(&status5).unwrap(), - r#"{"status":"invalid","transaction":"#.to_owned() + &format!("{}", tx_ser) + r#"}"# - ); - assert_eq!( - serde_json::to_string(&status6).unwrap(), - r#"{"status":"rejected","transaction":"#.to_owned() + - &format!("{}", tx_ser) + - r#","error":"Just because"}"# - ); - assert_eq!( - serde_json::to_string(&status7).unwrap(), - r#"{"status":"replaced","transaction":"#.to_owned() + - &format!("{}", tx_ser) + - r#","hash":"0x000000000000000000000000000000000000000000000000000000000000000a","gasPrice":"0x5"}"# - ); - } + assert_eq!( + serde_json::to_string(&status1).unwrap(), + r#"{"status":"pending"}"# + ); + assert_eq!( + serde_json::to_string(&status2).unwrap(), + r#"{"status":"future"}"# + ); + assert_eq!( + serde_json::to_string(&status3).unwrap(), + r#"{"status":"mined","transaction":"#.to_owned() + &format!("{}", tx_ser) + r#"}"# + ); + assert_eq!( + serde_json::to_string(&status4).unwrap(), + r#"{"status":"dropped","transaction":"#.to_owned() + &format!("{}", tx_ser) + r#"}"# + ); + assert_eq!( + serde_json::to_string(&status5).unwrap(), + r#"{"status":"invalid","transaction":"#.to_owned() + &format!("{}", tx_ser) + r#"}"# + ); + assert_eq!( + serde_json::to_string(&status6).unwrap(), + r#"{"status":"rejected","transaction":"#.to_owned() + + &format!("{}", tx_ser) + + r#","error":"Just because"}"# + ); + assert_eq!( + serde_json::to_string(&status7).unwrap(), + r#"{"status":"replaced","transaction":"#.to_owned() + + &format!("{}", tx_ser) + + r#","hash":"0x000000000000000000000000000000000000000000000000000000000000000a","gasPrice":"0x5"}"# + ); + } } diff --git a/rpc/src/v1/types/transaction_condition.rs b/rpc/src/v1/types/transaction_condition.rs index 589848fe5..0f6ad1768 100644 --- a/rpc/src/v1/types/transaction_condition.rs +++ b/rpc/src/v1/types/transaction_condition.rs @@ -20,47 +20,59 @@ use types::transaction; #[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub enum TransactionCondition { - /// Valid at this minimum block number. - #[serde(rename = "block")] - Number(u64), - /// Valid at given unix time. - #[serde(rename = "time")] - Timestamp(u64), + /// Valid at this minimum block number. + #[serde(rename = "block")] + Number(u64), + /// Valid at given unix time. + #[serde(rename = "time")] + Timestamp(u64), } impl Into for TransactionCondition { - fn into(self) -> transaction::Condition { - match self { - TransactionCondition::Number(n) => transaction::Condition::Number(n), - TransactionCondition::Timestamp(n) => transaction::Condition::Timestamp(n), - } - } + fn into(self) -> transaction::Condition { + match self { + TransactionCondition::Number(n) => transaction::Condition::Number(n), + TransactionCondition::Timestamp(n) => transaction::Condition::Timestamp(n), + } + } } impl From for TransactionCondition { - fn from(condition: transaction::Condition) -> Self { - match condition { - transaction::Condition::Number(n) => TransactionCondition::Number(n), - transaction::Condition::Timestamp(n) => TransactionCondition::Timestamp(n), - } - } + fn from(condition: transaction::Condition) -> Self { + match condition { + transaction::Condition::Number(n) => TransactionCondition::Number(n), + transaction::Condition::Timestamp(n) => TransactionCondition::Timestamp(n), + } + } } #[cfg(test)] mod tests { - use super::*; - use serde_json; + use super::*; + use serde_json; - #[test] - fn condition_deserialization() { - let s = r#"[{ "block": 51 }, { "time": 10 }]"#; - let deserialized: Vec = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, vec![TransactionCondition::Number(51), TransactionCondition::Timestamp(10)]) - } + #[test] + fn condition_deserialization() { + let s = r#"[{ "block": 51 }, { "time": 10 }]"#; + let deserialized: Vec = serde_json::from_str(s).unwrap(); + assert_eq!( + deserialized, + vec![ + TransactionCondition::Number(51), + TransactionCondition::Timestamp(10) + ] + ) + } - #[test] - fn condition_into() { - assert_eq!(transaction::Condition::Number(100), TransactionCondition::Number(100).into()); - assert_eq!(transaction::Condition::Timestamp(100), TransactionCondition::Timestamp(100).into()); - } + #[test] + fn condition_into() { + assert_eq!( + transaction::Condition::Number(100), + TransactionCondition::Number(100).into() + ); + assert_eq!( + transaction::Condition::Timestamp(100), + TransactionCondition::Timestamp(100).into() + ); + } } diff --git a/rpc/src/v1/types/transaction_request.rs b/rpc/src/v1/types/transaction_request.rs index 944ee1114..19928fa86 100644 --- a/rpc/src/v1/types/transaction_request.rs +++ b/rpc/src/v1/types/transaction_request.rs @@ -16,10 +16,12 @@ //! `TransactionRequest` type -use ethereum_types::{H160, U256}; -use v1::types::{Bytes, TransactionCondition}; -use v1::helpers; use ansi_term::Colour; +use ethereum_types::{H160, U256}; +use v1::{ + helpers, + types::{Bytes, TransactionCondition}, +}; use std::fmt; @@ -28,123 +30,127 @@ use std::fmt; #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub struct TransactionRequest { - /// Sender - pub from: Option, - /// Recipient - pub to: Option, - /// Gas Price - pub gas_price: Option, - /// Gas - pub gas: Option, - /// Value of transaction in wei - pub value: Option, - /// Additional data sent with transaction - pub data: Option, - /// Transaction's nonce - pub nonce: Option, - /// Delay until this block condition. - pub condition: Option, + /// Sender + pub from: Option, + /// Recipient + pub to: Option, + /// Gas Price + pub gas_price: Option, + /// Gas + pub gas: Option, + /// Value of transaction in wei + pub value: Option, + /// Additional data sent with transaction + pub data: Option, + /// Transaction's nonce + pub nonce: Option, + /// Delay until this block condition. + pub condition: Option, } pub fn format_ether(i: U256) -> String { - let mut string = format!("{}", i); - let idx = string.len() as isize - 18; - if idx <= 0 { - let mut prefix = String::from("0."); - for _ in 0..idx.abs() { - prefix.push('0'); - } - string = prefix + &string; - } else { - string.insert(idx as usize, '.'); - } - String::from(string.trim_end_matches('0').trim_end_matches('.')) + let mut string = format!("{}", i); + let idx = string.len() as isize - 18; + if idx <= 0 { + let mut prefix = String::from("0."); + for _ in 0..idx.abs() { + prefix.push('0'); + } + string = prefix + &string; + } else { + string.insert(idx as usize, '.'); + } + String::from(string.trim_end_matches('0').trim_end_matches('.')) } impl fmt::Display for TransactionRequest { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let eth = self.value.unwrap_or_default(); - match self.to { - Some(ref to) => write!( - f, - "{} ETH from {} to 0x{:?}", - Colour::White.bold().paint(format_ether(eth)), - Colour::White.bold().paint( - self.from.as_ref() - .map(|f| format!("0x{:?}", f)) - .unwrap_or_else(|| "?".to_string())), - to - ), - None => write!( - f, - "{} ETH from {} for contract creation", - Colour::White.bold().paint(format_ether(eth)), - Colour::White.bold().paint( - self.from.as_ref() - .map(|f| format!("0x{:?}", f)) - .unwrap_or_else(|| "?".to_string())), - ), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let eth = self.value.unwrap_or_default(); + match self.to { + Some(ref to) => write!( + f, + "{} ETH from {} to 0x{:?}", + Colour::White.bold().paint(format_ether(eth)), + Colour::White.bold().paint( + self.from + .as_ref() + .map(|f| format!("0x{:?}", f)) + .unwrap_or_else(|| "?".to_string()) + ), + to + ), + None => write!( + f, + "{} ETH from {} for contract creation", + Colour::White.bold().paint(format_ether(eth)), + Colour::White.bold().paint( + self.from + .as_ref() + .map(|f| format!("0x{:?}", f)) + .unwrap_or_else(|| "?".to_string()) + ), + ), + } + } } impl From for TransactionRequest { - fn from(r: helpers::TransactionRequest) -> Self { - TransactionRequest { - from: r.from.map(Into::into), - to: r.to.map(Into::into), - gas_price: r.gas_price.map(Into::into), - gas: r.gas.map(Into::into), - value: r.value.map(Into::into), - data: r.data.map(Into::into), - nonce: r.nonce.map(Into::into), - condition: r.condition.map(Into::into), - } - } + fn from(r: helpers::TransactionRequest) -> Self { + TransactionRequest { + from: r.from.map(Into::into), + to: r.to.map(Into::into), + gas_price: r.gas_price.map(Into::into), + gas: r.gas.map(Into::into), + value: r.value.map(Into::into), + data: r.data.map(Into::into), + nonce: r.nonce.map(Into::into), + condition: r.condition.map(Into::into), + } + } } impl From for TransactionRequest { - fn from(r: helpers::FilledTransactionRequest) -> Self { - TransactionRequest { - from: Some(r.from), - to: r.to, - gas_price: Some(r.gas_price), - gas: Some(r.gas), - value: Some(r.value), - data: Some(r.data.into()), - nonce: r.nonce, - condition: r.condition, - } - } + fn from(r: helpers::FilledTransactionRequest) -> Self { + TransactionRequest { + from: Some(r.from), + to: r.to, + gas_price: Some(r.gas_price), + gas: Some(r.gas), + value: Some(r.value), + data: Some(r.data.into()), + nonce: r.nonce, + condition: r.condition, + } + } } impl Into for TransactionRequest { - fn into(self) -> helpers::TransactionRequest { - helpers::TransactionRequest { - from: self.from.map(Into::into), - to: self.to.map(Into::into), - gas_price: self.gas_price.map(Into::into), - gas: self.gas.map(Into::into), - value: self.value.map(Into::into), - data: self.data.map(Into::into), - nonce: self.nonce.map(Into::into), - condition: self.condition.map(Into::into), - } - } + fn into(self) -> helpers::TransactionRequest { + helpers::TransactionRequest { + from: self.from.map(Into::into), + to: self.to.map(Into::into), + gas_price: self.gas_price.map(Into::into), + gas: self.gas.map(Into::into), + value: self.value.map(Into::into), + data: self.data.map(Into::into), + nonce: self.nonce.map(Into::into), + condition: self.condition.map(Into::into), + } + } } #[cfg(test)] mod tests { - use std::str::FromStr; - use rustc_hex::FromHex; - use serde_json; - use v1::types::TransactionCondition; - use ethereum_types::{H160, U256}; - use super::*; + use super::*; + use ethereum_types::{H160, U256}; + use rustc_hex::FromHex; + use serde_json; + use std::str::FromStr; + use v1::types::TransactionCondition; - #[test] - fn transaction_request_deserialize() { - let s = r#"{ + #[test] + fn transaction_request_deserialize() { + let s = r#"{ "from":"0x0000000000000000000000000000000000000001", "to":"0x0000000000000000000000000000000000000002", "gasPrice":"0x1", @@ -154,23 +160,26 @@ mod tests { "nonce":"0x4", "condition": { "block": 19 } }"#; - let deserialized: TransactionRequest = serde_json::from_str(s).unwrap(); + let deserialized: TransactionRequest = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, TransactionRequest { - from: Some(H160::from(1)), - to: Some(H160::from(2)), - gas_price: Some(U256::from(1)), - gas: Some(U256::from(2)), - value: Some(U256::from(3)), - data: Some(vec![0x12, 0x34, 0x56].into()), - nonce: Some(U256::from(4)), - condition: Some(TransactionCondition::Number(0x13)), - }); - } + assert_eq!( + deserialized, + TransactionRequest { + from: Some(H160::from(1)), + to: Some(H160::from(2)), + gas_price: Some(U256::from(1)), + gas: Some(U256::from(2)), + value: Some(U256::from(3)), + data: Some(vec![0x12, 0x34, 0x56].into()), + nonce: Some(U256::from(4)), + condition: Some(TransactionCondition::Number(0x13)), + } + ); + } - #[test] - fn transaction_request_deserialize2() { - let s = r#"{ + #[test] + fn transaction_request_deserialize2() { + let s = r#"{ "from": "0xb60e8dd61c5d32be8058bb8eb970870f07233155", "to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567", "gas": "0x76c0", @@ -178,9 +187,9 @@ mod tests { "value": "0x9184e72a", "data": "0xd46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675" }"#; - let deserialized: TransactionRequest = serde_json::from_str(s).unwrap(); + let deserialized: TransactionRequest = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, TransactionRequest { + assert_eq!(deserialized, TransactionRequest { from: Some(H160::from_str("b60e8dd61c5d32be8058bb8eb970870f07233155").unwrap()), to: Some(H160::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), gas_price: Some(U256::from_str("9184e72a000").unwrap()), @@ -190,28 +199,31 @@ mod tests { nonce: None, condition: None, }); - } + } - #[test] - fn transaction_request_deserialize_empty() { - let s = r#"{"from":"0x0000000000000000000000000000000000000001"}"#; - let deserialized: TransactionRequest = serde_json::from_str(s).unwrap(); + #[test] + fn transaction_request_deserialize_empty() { + let s = r#"{"from":"0x0000000000000000000000000000000000000001"}"#; + let deserialized: TransactionRequest = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, TransactionRequest { - from: Some(H160::from(1).into()), - to: None, - gas_price: None, - gas: None, - value: None, - data: None, - nonce: None, - condition: None, - }); - } + assert_eq!( + deserialized, + TransactionRequest { + from: Some(H160::from(1).into()), + to: None, + gas_price: None, + gas: None, + value: None, + data: None, + nonce: None, + condition: None, + } + ); + } - #[test] - fn transaction_request_deserialize_test() { - let s = r#"{ + #[test] + fn transaction_request_deserialize_test() { + let s = r#"{ "from":"0xb5f7502a2807cb23615c7456055e1d65b2508625", "to":"0x895d32f2db7d01ebb50053f9e48aacf26584fe40", "data":"0x8595bab1", @@ -219,23 +231,26 @@ mod tests { "gasPrice":"0x0ba43b7400" }"#; - let deserialized: TransactionRequest = serde_json::from_str(s).unwrap(); + let deserialized: TransactionRequest = serde_json::from_str(s).unwrap(); - assert_eq!(deserialized, TransactionRequest { - from: Some(H160::from_str("b5f7502a2807cb23615c7456055e1d65b2508625").unwrap()), - to: Some(H160::from_str("895d32f2db7d01ebb50053f9e48aacf26584fe40").unwrap()), - gas_price: Some(U256::from_str("0ba43b7400").unwrap()), - gas: Some(U256::from_str("2fd618").unwrap()), - value: None, - data: Some(vec![0x85, 0x95, 0xba, 0xb1].into()), - nonce: None, - condition: None, - }); - } + assert_eq!( + deserialized, + TransactionRequest { + from: Some(H160::from_str("b5f7502a2807cb23615c7456055e1d65b2508625").unwrap()), + to: Some(H160::from_str("895d32f2db7d01ebb50053f9e48aacf26584fe40").unwrap()), + gas_price: Some(U256::from_str("0ba43b7400").unwrap()), + gas: Some(U256::from_str("2fd618").unwrap()), + value: None, + data: Some(vec![0x85, 0x95, 0xba, 0xb1].into()), + nonce: None, + condition: None, + } + ); + } - #[test] - fn transaction_request_deserialize_error() { - let s = r#"{ + #[test] + fn transaction_request_deserialize_error() { + let s = r#"{ "from":"0xb5f7502a2807cb23615c7456055e1d65b2508625", "to":"", "data":"0x8595bab1", @@ -243,19 +258,19 @@ mod tests { "gasPrice":"0x0ba43b7400" }"#; - let deserialized = serde_json::from_str::(s); + let deserialized = serde_json::from_str::(s); - assert!(deserialized.is_err(), "Should be error because to is empty"); - } + assert!(deserialized.is_err(), "Should be error because to is empty"); + } - #[test] - fn test_format_ether() { - assert_eq!(&format_ether(U256::from(1000000000000000000u64)), "1"); - assert_eq!(&format_ether(U256::from(500000000000000000u64)), "0.5"); - assert_eq!(&format_ether(U256::from(50000000000000000u64)), "0.05"); - assert_eq!(&format_ether(U256::from(5000000000000000u64)), "0.005"); - assert_eq!(&format_ether(U256::from(2000000000000000000u64)), "2"); - assert_eq!(&format_ether(U256::from(2500000000000000000u64)), "2.5"); - assert_eq!(&format_ether(U256::from(10000000000000000000u64)), "10"); - } + #[test] + fn test_format_ether() { + assert_eq!(&format_ether(U256::from(1000000000000000000u64)), "1"); + assert_eq!(&format_ether(U256::from(500000000000000000u64)), "0.5"); + assert_eq!(&format_ether(U256::from(50000000000000000u64)), "0.05"); + assert_eq!(&format_ether(U256::from(5000000000000000u64)), "0.005"); + assert_eq!(&format_ether(U256::from(2000000000000000000u64)), "2"); + assert_eq!(&format_ether(U256::from(2500000000000000000u64)), "2.5"); + assert_eq!(&format_ether(U256::from(10000000000000000000u64)), "10"); + } } diff --git a/rpc/src/v1/types/work.rs b/rpc/src/v1/types/work.rs index ed6c7c8e9..947efaf5d 100644 --- a/rpc/src/v1/types/work.rs +++ b/rpc/src/v1/types/work.rs @@ -22,21 +22,30 @@ use serde::{Serialize, Serializer}; /// whether to send the block number. #[derive(Debug, PartialEq, Eq)] pub struct Work { - /// The proof-of-work hash. - pub pow_hash: H256, - /// The seed hash. - pub seed_hash: H256, - /// The target. - pub target: H256, - /// The block number: this isn't always stored. - pub number: Option, + /// The proof-of-work hash. + pub pow_hash: H256, + /// The seed hash. + pub seed_hash: H256, + /// The target. + pub target: H256, + /// The block number: this isn't always stored. + pub number: Option, } impl Serialize for Work { - fn serialize(&self, s: S) -> Result where S: Serializer { - match self.number.as_ref() { - Some(num) => (&self.pow_hash, &self.seed_hash, &self.target, U256::from(*num)).serialize(s), - None => (&self.pow_hash, &self.seed_hash, &self.target).serialize(s), - } - } + fn serialize(&self, s: S) -> Result + where + S: Serializer, + { + match self.number.as_ref() { + Some(num) => ( + &self.pow_hash, + &self.seed_hash, + &self.target, + U256::from(*num), + ) + .serialize(s), + None => (&self.pow_hash, &self.seed_hash, &self.target).serialize(s), + } + } } diff --git a/rustfmt.toml b/rustfmt.toml deleted file mode 100644 index 16016b1b2..000000000 --- a/rustfmt.toml +++ /dev/null @@ -1,16 +0,0 @@ -verbose=false -max_width=100 -comment_width=100 -tab_spaces=4 -fn_call_width=100 -struct_lit_width=32 -fn_call_style="Visual" -single_line_if_else_max_width=100 -trailing_comma="Vertical" -chain_indent="Visual" -chain_one_line_max=100 -reorder_imports=true -format_strings=false -hard_tabs=true -wrap_match_arms=false -error_on_line_overflow=false \ No newline at end of file diff --git a/secret-store/src/acl_storage.rs b/secret-store/src/acl_storage.rs index 8c6656b10..90b842a2c 100644 --- a/secret-store/src/acl_storage.rs +++ b/secret-store/src/acl_storage.rs @@ -14,15 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::collections::{HashMap, HashSet}; -use parking_lot::{Mutex, RwLock}; use call_contract::CallContract; +use ethabi::FunctionOutputDecoder; use ethcore::client::{BlockId, ChainNotify, NewBlocks}; use ethereum_types::Address; -use ethabi::FunctionOutputDecoder; +use parking_lot::{Mutex, RwLock}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; use trusted_client::TrustedClient; -use types::{Error, ServerKeyId, ContractAddress}; +use types::{ContractAddress, Error, ServerKeyId}; use_contract!(acl_storage, "res/acl_storage.json"); @@ -30,116 +32,140 @@ const ACL_CHECKER_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_acl_checke /// ACL storage of Secret Store pub trait AclStorage: Send + Sync { - /// Check if requestor can access document with hash `document` - fn check(&self, requester: Address, document: &ServerKeyId) -> Result; + /// Check if requestor can access document with hash `document` + fn check(&self, requester: Address, document: &ServerKeyId) -> Result; } /// On-chain ACL storage implementation. pub struct OnChainAclStorage { - /// Cached on-chain contract. - contract: Mutex, + /// Cached on-chain contract. + contract: Mutex, } /// Cached on-chain ACL storage contract. struct CachedContract { - /// Blockchain client. - client: TrustedClient, - /// Contract address source. - address_source: ContractAddress, - /// Current contract address. - contract_address: Option
, + /// Blockchain client. + client: TrustedClient, + /// Contract address source. + address_source: ContractAddress, + /// Current contract address. + contract_address: Option
, } /// Dummy ACL storage implementation (check always passed). #[derive(Default, Debug)] pub struct DummyAclStorage { - prohibited: RwLock>>, + prohibited: RwLock>>, } impl OnChainAclStorage { - pub fn new(trusted_client: TrustedClient, address_source: ContractAddress) -> Result, Error> { - let client = trusted_client.get_untrusted(); - let acl_storage = Arc::new(OnChainAclStorage { - contract: Mutex::new(CachedContract::new(trusted_client, address_source)), - }); - client - .ok_or_else(|| Error::Internal("Constructing OnChainAclStorage without active Client".into()))? - .add_notify(acl_storage.clone()); - Ok(acl_storage) - } + pub fn new( + trusted_client: TrustedClient, + address_source: ContractAddress, + ) -> Result, Error> { + let client = trusted_client.get_untrusted(); + let acl_storage = Arc::new(OnChainAclStorage { + contract: Mutex::new(CachedContract::new(trusted_client, address_source)), + }); + client + .ok_or_else(|| { + Error::Internal("Constructing OnChainAclStorage without active Client".into()) + })? + .add_notify(acl_storage.clone()); + Ok(acl_storage) + } } impl AclStorage for OnChainAclStorage { - fn check(&self, requester: Address, document: &ServerKeyId) -> Result { - self.contract.lock().check(requester, document) - } + fn check(&self, requester: Address, document: &ServerKeyId) -> Result { + self.contract.lock().check(requester, document) + } } impl ChainNotify for OnChainAclStorage { - fn new_blocks(&self, new_blocks: NewBlocks) { - if new_blocks.has_more_blocks_to_import { return } - if !new_blocks.route.enacted().is_empty() || !new_blocks.route.retracted().is_empty() { - self.contract.lock().update_contract_address() - } - } + fn new_blocks(&self, new_blocks: NewBlocks) { + if new_blocks.has_more_blocks_to_import { + return; + } + if !new_blocks.route.enacted().is_empty() || !new_blocks.route.retracted().is_empty() { + self.contract.lock().update_contract_address() + } + } } impl CachedContract { - pub fn new(client: TrustedClient, address_source: ContractAddress) -> Self { - let mut contract = CachedContract { - client, - address_source, - contract_address: None, - }; - contract.update_contract_address(); - contract - } + pub fn new(client: TrustedClient, address_source: ContractAddress) -> Self { + let mut contract = CachedContract { + client, + address_source, + contract_address: None, + }; + contract.update_contract_address(); + contract + } - pub fn update_contract_address(&mut self) { - let contract_address = self.client.read_contract_address(ACL_CHECKER_CONTRACT_REGISTRY_NAME.into(), &self.address_source); - if contract_address != self.contract_address { - trace!(target: "secretstore", "Configuring for ACL checker contract from address {:?}", + pub fn update_contract_address(&mut self) { + let contract_address = self.client.read_contract_address( + ACL_CHECKER_CONTRACT_REGISTRY_NAME.into(), + &self.address_source, + ); + if contract_address != self.contract_address { + trace!(target: "secretstore", "Configuring for ACL checker contract from address {:?}", contract_address); - self.contract_address = contract_address; - } - } + self.contract_address = contract_address; + } + } - pub fn check(&mut self, requester: Address, document: &ServerKeyId) -> Result { - if let Some(client) = self.client.get() { - // call contract to check accesss - match self.contract_address { - Some(contract_address) => { - let (encoded, decoder) = acl_storage::functions::check_permissions::call(requester, document.clone()); - let d = client.call_contract(BlockId::Latest, contract_address, encoded) - .map_err(|e| Error::Internal(format!("ACL checker call error: {}", e.to_string())))?; - decoder.decode(&d) - .map_err(|e| Error::Internal(format!("ACL checker call error: {}", e.to_string()))) - }, - None => Err(Error::Internal("ACL checker contract is not configured".to_owned())), - } - } else { - Err(Error::Internal("Calling ACL contract without trusted blockchain client".into())) - } - } + pub fn check(&mut self, requester: Address, document: &ServerKeyId) -> Result { + if let Some(client) = self.client.get() { + // call contract to check accesss + match self.contract_address { + Some(contract_address) => { + let (encoded, decoder) = acl_storage::functions::check_permissions::call( + requester, + document.clone(), + ); + let d = client + .call_contract(BlockId::Latest, contract_address, encoded) + .map_err(|e| { + Error::Internal(format!("ACL checker call error: {}", e.to_string())) + })?; + decoder.decode(&d).map_err(|e| { + Error::Internal(format!("ACL checker call error: {}", e.to_string())) + }) + } + None => Err(Error::Internal( + "ACL checker contract is not configured".to_owned(), + )), + } + } else { + Err(Error::Internal( + "Calling ACL contract without trusted blockchain client".into(), + )) + } + } } impl DummyAclStorage { - /// Prohibit given requestor access to given documents - #[cfg(test)] - pub fn prohibit(&self, requester: Address, document: ServerKeyId) { - self.prohibited.write() - .entry(requester) - .or_insert_with(Default::default) - .insert(document); - } + /// Prohibit given requestor access to given documents + #[cfg(test)] + pub fn prohibit(&self, requester: Address, document: ServerKeyId) { + self.prohibited + .write() + .entry(requester) + .or_insert_with(Default::default) + .insert(document); + } } impl AclStorage for DummyAclStorage { - fn check(&self, requester: Address, document: &ServerKeyId) -> Result { - Ok(self.prohibited.read() - .get(&requester) - .map(|docs| !docs.contains(document)) - .unwrap_or(true)) - } + fn check(&self, requester: Address, document: &ServerKeyId) -> Result { + Ok(self + .prohibited + .read() + .get(&requester) + .map(|docs| !docs.contains(document)) + .unwrap_or(true)) + } } diff --git a/secret-store/src/helpers.rs b/secret-store/src/helpers.rs index 19b19d1ba..7d52006fc 100644 --- a/secret-store/src/helpers.rs +++ b/secret-store/src/helpers.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use ethcore::client::{Client, BlockChainClient, BlockId}; +use ethcore::client::{BlockChainClient, BlockId, Client}; use ethereum_types::H256; // TODO: Instead of a constant, make this based on consensus finality. @@ -23,7 +23,8 @@ pub const REQUEST_CONFIRMATIONS_REQUIRED: u64 = 3; /// Get hash of the last block with at least n confirmations. pub fn get_confirmed_block_hash(client: &Client, confirmations: u64) -> Option { - client.block_number(BlockId::Latest) - .map(|b| b.saturating_sub(confirmations)) - .and_then(|b| client.block_hash(BlockId::Number(b))) + client + .block_number(BlockId::Latest) + .map(|b| b.saturating_sub(confirmations)) + .and_then(|b| client.block_hash(BlockId::Number(b))) } diff --git a/secret-store/src/key_server.rs b/secret-store/src/key_server.rs index 98c2a11c2..a16b2f746 100644 --- a/secret-store/src/key_server.rs +++ b/secret-store/src/key_server.rs @@ -14,524 +14,796 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::BTreeSet; -use std::sync::Arc; -use parking_lot::Mutex; +use super::{acl_storage::AclStorage, key_server_set::KeyServerSet, key_storage::KeyStorage}; use crypto::DEFAULT_MAC; use ethkey::crypto; +use key_server_cluster::{ + math, new_network_cluster, ClusterClient, ClusterConfiguration as NetClusterConfiguration, + NetConnectionsManagerConfig, +}; use parity_runtime::Executor; -use super::acl_storage::AclStorage; -use super::key_storage::KeyStorage; -use super::key_server_set::KeyServerSet; -use key_server_cluster::{math, new_network_cluster}; -use traits::{AdminSessionsServer, ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer, NodeKeyPair}; -use types::{Error, Public, RequestSignature, Requester, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow, - ClusterConfiguration, MessageHash, EncryptedMessageSignature, NodeId}; -use key_server_cluster::{ClusterClient, ClusterConfiguration as NetClusterConfiguration, NetConnectionsManagerConfig}; +use parking_lot::Mutex; +use std::{collections::BTreeSet, sync::Arc}; +use traits::{ + AdminSessionsServer, DocumentKeyServer, KeyServer, MessageSigner, NodeKeyPair, + ServerKeyGenerator, +}; +use types::{ + ClusterConfiguration, EncryptedDocumentKey, EncryptedDocumentKeyShadow, + EncryptedMessageSignature, Error, MessageHash, NodeId, Public, RequestSignature, Requester, + ServerKeyId, +}; /// Secret store key server implementation pub struct KeyServerImpl { - data: Arc>, + data: Arc>, } /// Secret store key server data. pub struct KeyServerCore { - cluster: Arc, + cluster: Arc, } impl KeyServerImpl { - /// Create new key server instance - pub fn new(config: &ClusterConfiguration, key_server_set: Arc, self_key_pair: Arc, - acl_storage: Arc, key_storage: Arc, executor: Executor) -> Result - { - Ok(KeyServerImpl { - data: Arc::new(Mutex::new(KeyServerCore::new(config, key_server_set, self_key_pair, acl_storage, key_storage, executor)?)), - }) - } + /// Create new key server instance + pub fn new( + config: &ClusterConfiguration, + key_server_set: Arc, + self_key_pair: Arc, + acl_storage: Arc, + key_storage: Arc, + executor: Executor, + ) -> Result { + Ok(KeyServerImpl { + data: Arc::new(Mutex::new(KeyServerCore::new( + config, + key_server_set, + self_key_pair, + acl_storage, + key_storage, + executor, + )?)), + }) + } - /// Get cluster client reference. - pub fn cluster(&self) -> Arc { - self.data.lock().cluster.clone() - } + /// Get cluster client reference. + pub fn cluster(&self) -> Arc { + self.data.lock().cluster.clone() + } } impl KeyServer for KeyServerImpl {} impl AdminSessionsServer for KeyServerImpl { - fn change_servers_set(&self, old_set_signature: RequestSignature, new_set_signature: RequestSignature, new_servers_set: BTreeSet) -> Result<(), Error> { - let servers_set_change_session = self.data.lock().cluster - .new_servers_set_change_session(None, None, new_servers_set, old_set_signature, new_set_signature)?; - servers_set_change_session.as_servers_set_change() - .expect("new_servers_set_change_session creates servers_set_change_session; qed") - .wait().map_err(Into::into) - } + fn change_servers_set( + &self, + old_set_signature: RequestSignature, + new_set_signature: RequestSignature, + new_servers_set: BTreeSet, + ) -> Result<(), Error> { + let servers_set_change_session = self.data.lock().cluster.new_servers_set_change_session( + None, + None, + new_servers_set, + old_set_signature, + new_set_signature, + )?; + servers_set_change_session + .as_servers_set_change() + .expect("new_servers_set_change_session creates servers_set_change_session; qed") + .wait() + .map_err(Into::into) + } } impl ServerKeyGenerator for KeyServerImpl { - fn generate_key(&self, key_id: &ServerKeyId, author: &Requester, threshold: usize) -> Result { - // recover requestor' public key from signature - let address = author.address(key_id).map_err(Error::InsufficientRequesterData)?; + fn generate_key( + &self, + key_id: &ServerKeyId, + author: &Requester, + threshold: usize, + ) -> Result { + // recover requestor' public key from signature + let address = author + .address(key_id) + .map_err(Error::InsufficientRequesterData)?; - // generate server key - let generation_session = self.data.lock().cluster.new_generation_session(key_id.clone(), None, address, threshold)?; - generation_session.wait(None) - .expect("when wait is called without timeout it always returns Some; qed") - .map_err(Into::into) - } + // generate server key + let generation_session = self.data.lock().cluster.new_generation_session( + key_id.clone(), + None, + address, + threshold, + )?; + generation_session + .wait(None) + .expect("when wait is called without timeout it always returns Some; qed") + .map_err(Into::into) + } - fn restore_key_public(&self, key_id: &ServerKeyId, author: &Requester) -> Result { - // recover requestor' public key from signature - let address = author.address(key_id).map_err(Error::InsufficientRequesterData)?; + fn restore_key_public( + &self, + key_id: &ServerKeyId, + author: &Requester, + ) -> Result { + // recover requestor' public key from signature + let address = author + .address(key_id) + .map_err(Error::InsufficientRequesterData)?; - // negotiate key version && retrieve common key data - let negotiation_session = self.data.lock().cluster.new_key_version_negotiation_session(*key_id)?; - negotiation_session.wait() - .and_then(|_| negotiation_session.common_key_data()) - .and_then(|key_share| if key_share.author == address { - Ok(key_share.public) - } else { - Err(Error::AccessDenied) - }) - .map_err(Into::into) - } + // negotiate key version && retrieve common key data + let negotiation_session = self + .data + .lock() + .cluster + .new_key_version_negotiation_session(*key_id)?; + negotiation_session + .wait() + .and_then(|_| negotiation_session.common_key_data()) + .and_then(|key_share| { + if key_share.author == address { + Ok(key_share.public) + } else { + Err(Error::AccessDenied) + } + }) + .map_err(Into::into) + } } impl DocumentKeyServer for KeyServerImpl { - fn store_document_key(&self, key_id: &ServerKeyId, author: &Requester, common_point: Public, encrypted_document_key: Public) -> Result<(), Error> { - // store encrypted key - let encryption_session = self.data.lock().cluster.new_encryption_session(key_id.clone(), - author.clone(), common_point, encrypted_document_key)?; - encryption_session.wait(None).map_err(Into::into) - } + fn store_document_key( + &self, + key_id: &ServerKeyId, + author: &Requester, + common_point: Public, + encrypted_document_key: Public, + ) -> Result<(), Error> { + // store encrypted key + let encryption_session = self.data.lock().cluster.new_encryption_session( + key_id.clone(), + author.clone(), + common_point, + encrypted_document_key, + )?; + encryption_session.wait(None).map_err(Into::into) + } - fn generate_document_key(&self, key_id: &ServerKeyId, author: &Requester, threshold: usize) -> Result { - // recover requestor' public key from signature - let public = author.public(key_id).map_err(Error::InsufficientRequesterData)?; + fn generate_document_key( + &self, + key_id: &ServerKeyId, + author: &Requester, + threshold: usize, + ) -> Result { + // recover requestor' public key from signature + let public = author + .public(key_id) + .map_err(Error::InsufficientRequesterData)?; - // generate server key - let server_key = self.generate_key(key_id, author, threshold)?; + // generate server key + let server_key = self.generate_key(key_id, author, threshold)?; - // generate random document key - let document_key = math::generate_random_point()?; - let encrypted_document_key = math::encrypt_secret(&document_key, &server_key)?; + // generate random document key + let document_key = math::generate_random_point()?; + let encrypted_document_key = math::encrypt_secret(&document_key, &server_key)?; - // store document key in the storage - self.store_document_key(key_id, author, encrypted_document_key.common_point, encrypted_document_key.encrypted_point)?; + // store document key in the storage + self.store_document_key( + key_id, + author, + encrypted_document_key.common_point, + encrypted_document_key.encrypted_point, + )?; - // encrypt document key with requestor public key - let document_key = crypto::ecies::encrypt(&public, &DEFAULT_MAC, &document_key) - .map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err)))?; - Ok(document_key) - } + // encrypt document key with requestor public key + let document_key = crypto::ecies::encrypt(&public, &DEFAULT_MAC, &document_key) + .map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err)))?; + Ok(document_key) + } - fn restore_document_key(&self, key_id: &ServerKeyId, requester: &Requester) -> Result { - // recover requestor' public key from signature - let public = requester.public(key_id).map_err(Error::InsufficientRequesterData)?; + fn restore_document_key( + &self, + key_id: &ServerKeyId, + requester: &Requester, + ) -> Result { + // recover requestor' public key from signature + let public = requester + .public(key_id) + .map_err(Error::InsufficientRequesterData)?; - // decrypt document key - let decryption_session = self.data.lock().cluster.new_decryption_session(key_id.clone(), - None, requester.clone(), None, false, false)?; - let document_key = decryption_session.wait(None) - .expect("when wait is called without timeout it always returns Some; qed")? - .decrypted_secret; + // decrypt document key + let decryption_session = self.data.lock().cluster.new_decryption_session( + key_id.clone(), + None, + requester.clone(), + None, + false, + false, + )?; + let document_key = decryption_session + .wait(None) + .expect("when wait is called without timeout it always returns Some; qed")? + .decrypted_secret; - // encrypt document key with requestor public key - let document_key = crypto::ecies::encrypt(&public, &DEFAULT_MAC, &document_key) - .map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err)))?; - Ok(document_key) - } + // encrypt document key with requestor public key + let document_key = crypto::ecies::encrypt(&public, &DEFAULT_MAC, &document_key) + .map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err)))?; + Ok(document_key) + } - fn restore_document_key_shadow(&self, key_id: &ServerKeyId, requester: &Requester) -> Result { - let decryption_session = self.data.lock().cluster.new_decryption_session(key_id.clone(), - None, requester.clone(), None, true, false)?; - decryption_session.wait(None) - .expect("when wait is called without timeout it always returns Some; qed") - .map_err(Into::into) - } + fn restore_document_key_shadow( + &self, + key_id: &ServerKeyId, + requester: &Requester, + ) -> Result { + let decryption_session = self.data.lock().cluster.new_decryption_session( + key_id.clone(), + None, + requester.clone(), + None, + true, + false, + )?; + decryption_session + .wait(None) + .expect("when wait is called without timeout it always returns Some; qed") + .map_err(Into::into) + } } impl MessageSigner for KeyServerImpl { - fn sign_message_schnorr(&self, key_id: &ServerKeyId, requester: &Requester, message: MessageHash) -> Result { - // recover requestor' public key from signature - let public = requester.public(key_id).map_err(Error::InsufficientRequesterData)?; + fn sign_message_schnorr( + &self, + key_id: &ServerKeyId, + requester: &Requester, + message: MessageHash, + ) -> Result { + // recover requestor' public key from signature + let public = requester + .public(key_id) + .map_err(Error::InsufficientRequesterData)?; - // sign message - let signing_session = self.data.lock().cluster.new_schnorr_signing_session(key_id.clone(), - requester.clone().into(), None, message)?; - let message_signature = signing_session.wait()?; + // sign message + let signing_session = self.data.lock().cluster.new_schnorr_signing_session( + key_id.clone(), + requester.clone().into(), + None, + message, + )?; + let message_signature = signing_session.wait()?; - // compose two message signature components into single one - let mut combined_signature = [0; 64]; - combined_signature[..32].clone_from_slice(&**message_signature.0); - combined_signature[32..].clone_from_slice(&**message_signature.1); + // compose two message signature components into single one + let mut combined_signature = [0; 64]; + combined_signature[..32].clone_from_slice(&**message_signature.0); + combined_signature[32..].clone_from_slice(&**message_signature.1); - // encrypt combined signature with requestor public key - let message_signature = crypto::ecies::encrypt(&public, &DEFAULT_MAC, &combined_signature) - .map_err(|err| Error::Internal(format!("Error encrypting message signature: {}", err)))?; - Ok(message_signature) - } + // encrypt combined signature with requestor public key + let message_signature = crypto::ecies::encrypt(&public, &DEFAULT_MAC, &combined_signature) + .map_err(|err| { + Error::Internal(format!("Error encrypting message signature: {}", err)) + })?; + Ok(message_signature) + } - fn sign_message_ecdsa(&self, key_id: &ServerKeyId, requester: &Requester, message: MessageHash) -> Result { - // recover requestor' public key from signature - let public = requester.public(key_id).map_err(Error::InsufficientRequesterData)?; + fn sign_message_ecdsa( + &self, + key_id: &ServerKeyId, + requester: &Requester, + message: MessageHash, + ) -> Result { + // recover requestor' public key from signature + let public = requester + .public(key_id) + .map_err(Error::InsufficientRequesterData)?; - // sign message - let signing_session = self.data.lock().cluster.new_ecdsa_signing_session(key_id.clone(), - requester.clone().into(), None, message)?; - let message_signature = signing_session.wait()?; + // sign message + let signing_session = self.data.lock().cluster.new_ecdsa_signing_session( + key_id.clone(), + requester.clone().into(), + None, + message, + )?; + let message_signature = signing_session.wait()?; - // encrypt combined signature with requestor public key - let message_signature = crypto::ecies::encrypt(&public, &DEFAULT_MAC, &*message_signature) - .map_err(|err| Error::Internal(format!("Error encrypting message signature: {}", err)))?; - Ok(message_signature) - } + // encrypt combined signature with requestor public key + let message_signature = crypto::ecies::encrypt(&public, &DEFAULT_MAC, &*message_signature) + .map_err(|err| { + Error::Internal(format!("Error encrypting message signature: {}", err)) + })?; + Ok(message_signature) + } } impl KeyServerCore { - pub fn new(config: &ClusterConfiguration, key_server_set: Arc, self_key_pair: Arc, - acl_storage: Arc, key_storage: Arc, executor: Executor) -> Result - { - let cconfig = NetClusterConfiguration { - self_key_pair: self_key_pair.clone(), - key_server_set: key_server_set, - acl_storage: acl_storage, - key_storage: key_storage, - admin_public: config.admin_public, - preserve_sessions: false, - }; - let net_config = NetConnectionsManagerConfig { - listen_address: (config.listener_address.address.clone(), config.listener_address.port), - allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes, - auto_migrate_enabled: config.auto_migrate_enabled, - }; + pub fn new( + config: &ClusterConfiguration, + key_server_set: Arc, + self_key_pair: Arc, + acl_storage: Arc, + key_storage: Arc, + executor: Executor, + ) -> Result { + let cconfig = NetClusterConfiguration { + self_key_pair: self_key_pair.clone(), + key_server_set: key_server_set, + acl_storage: acl_storage, + key_storage: key_storage, + admin_public: config.admin_public, + preserve_sessions: false, + }; + let net_config = NetConnectionsManagerConfig { + listen_address: ( + config.listener_address.address.clone(), + config.listener_address.port, + ), + allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes, + auto_migrate_enabled: config.auto_migrate_enabled, + }; - let core = new_network_cluster(executor, cconfig, net_config)?; - let cluster = core.client(); - core.run()?; + let core = new_network_cluster(executor, cconfig, net_config)?; + let cluster = core.client(); + core.run()?; - Ok(KeyServerCore { - cluster, - }) - } + Ok(KeyServerCore { cluster }) + } } #[cfg(test)] pub mod tests { - use std::collections::BTreeSet; - use std::time; - use std::sync::Arc; - use std::net::SocketAddr; - use std::collections::BTreeMap; - use crypto::DEFAULT_MAC; - use ethkey::{self, crypto, Secret, Random, Generator, verify_public}; - use acl_storage::DummyAclStorage; - use key_storage::KeyStorage; - use key_storage::tests::DummyKeyStorage; - use node_key_pair::PlainNodeKeyPair; - use key_server_set::tests::MapKeyServerSet; - use key_server_cluster::math; - use ethereum_types::{H256, H520}; - use parity_runtime::Runtime; - use types::{Error, Public, ClusterConfiguration, NodeAddress, RequestSignature, ServerKeyId, - EncryptedDocumentKey, EncryptedDocumentKeyShadow, MessageHash, EncryptedMessageSignature, - Requester, NodeId}; - use traits::{AdminSessionsServer, ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer}; - use super::KeyServerImpl; + use super::KeyServerImpl; + use acl_storage::DummyAclStorage; + use crypto::DEFAULT_MAC; + use ethereum_types::{H256, H520}; + use ethkey::{self, crypto, verify_public, Generator, Random, Secret}; + use key_server_cluster::math; + use key_server_set::tests::MapKeyServerSet; + use key_storage::{tests::DummyKeyStorage, KeyStorage}; + use node_key_pair::PlainNodeKeyPair; + use parity_runtime::Runtime; + use std::{ + collections::{BTreeMap, BTreeSet}, + net::SocketAddr, + sync::Arc, + time, + }; + use traits::{ + AdminSessionsServer, DocumentKeyServer, KeyServer, MessageSigner, ServerKeyGenerator, + }; + use types::{ + ClusterConfiguration, EncryptedDocumentKey, EncryptedDocumentKeyShadow, + EncryptedMessageSignature, Error, MessageHash, NodeAddress, NodeId, Public, + RequestSignature, Requester, ServerKeyId, + }; - #[derive(Default)] - pub struct DummyKeyServer; + #[derive(Default)] + pub struct DummyKeyServer; - impl KeyServer for DummyKeyServer {} + impl KeyServer for DummyKeyServer {} - impl AdminSessionsServer for DummyKeyServer { - fn change_servers_set(&self, _old_set_signature: RequestSignature, _new_set_signature: RequestSignature, _new_servers_set: BTreeSet) -> Result<(), Error> { - unimplemented!("test-only") - } - } + impl AdminSessionsServer for DummyKeyServer { + fn change_servers_set( + &self, + _old_set_signature: RequestSignature, + _new_set_signature: RequestSignature, + _new_servers_set: BTreeSet, + ) -> Result<(), Error> { + unimplemented!("test-only") + } + } - impl ServerKeyGenerator for DummyKeyServer { - fn generate_key(&self, _key_id: &ServerKeyId, _author: &Requester, _threshold: usize) -> Result { - unimplemented!("test-only") - } + impl ServerKeyGenerator for DummyKeyServer { + fn generate_key( + &self, + _key_id: &ServerKeyId, + _author: &Requester, + _threshold: usize, + ) -> Result { + unimplemented!("test-only") + } - fn restore_key_public(&self, _key_id: &ServerKeyId, _author: &Requester) -> Result { - unimplemented!("test-only") - } - } + fn restore_key_public( + &self, + _key_id: &ServerKeyId, + _author: &Requester, + ) -> Result { + unimplemented!("test-only") + } + } - impl DocumentKeyServer for DummyKeyServer { - fn store_document_key(&self, _key_id: &ServerKeyId, _author: &Requester, _common_point: Public, _encrypted_document_key: Public) -> Result<(), Error> { - unimplemented!("test-only") - } + impl DocumentKeyServer for DummyKeyServer { + fn store_document_key( + &self, + _key_id: &ServerKeyId, + _author: &Requester, + _common_point: Public, + _encrypted_document_key: Public, + ) -> Result<(), Error> { + unimplemented!("test-only") + } - fn generate_document_key(&self, _key_id: &ServerKeyId, _author: &Requester, _threshold: usize) -> Result { - unimplemented!("test-only") - } + fn generate_document_key( + &self, + _key_id: &ServerKeyId, + _author: &Requester, + _threshold: usize, + ) -> Result { + unimplemented!("test-only") + } - fn restore_document_key(&self, _key_id: &ServerKeyId, _requester: &Requester) -> Result { - unimplemented!("test-only") - } + fn restore_document_key( + &self, + _key_id: &ServerKeyId, + _requester: &Requester, + ) -> Result { + unimplemented!("test-only") + } - fn restore_document_key_shadow(&self, _key_id: &ServerKeyId, _requester: &Requester) -> Result { - unimplemented!("test-only") - } - } + fn restore_document_key_shadow( + &self, + _key_id: &ServerKeyId, + _requester: &Requester, + ) -> Result { + unimplemented!("test-only") + } + } - impl MessageSigner for DummyKeyServer { - fn sign_message_schnorr(&self, _key_id: &ServerKeyId, _requester: &Requester, _message: MessageHash) -> Result { - unimplemented!("test-only") - } + impl MessageSigner for DummyKeyServer { + fn sign_message_schnorr( + &self, + _key_id: &ServerKeyId, + _requester: &Requester, + _message: MessageHash, + ) -> Result { + unimplemented!("test-only") + } - fn sign_message_ecdsa(&self, _key_id: &ServerKeyId, _requester: &Requester, _message: MessageHash) -> Result { - unimplemented!("test-only") - } - } + fn sign_message_ecdsa( + &self, + _key_id: &ServerKeyId, + _requester: &Requester, + _message: MessageHash, + ) -> Result { + unimplemented!("test-only") + } + } - fn make_key_servers(start_port: u16, num_nodes: usize) -> (Vec, Vec>, Runtime) { - let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect(); - let configs: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration { - listener_address: NodeAddress { - address: "127.0.0.1".into(), - port: start_port + (i as u16), - }, - nodes: key_pairs.iter().enumerate().map(|(j, kp)| (kp.public().clone(), - NodeAddress { - address: "127.0.0.1".into(), - port: start_port + (j as u16), - })).collect(), - key_server_set_contract_address: None, - allow_connecting_to_higher_nodes: false, - admin_public: None, - auto_migrate_enabled: false, - }).collect(); - let key_servers_set: BTreeMap = configs[0].nodes.iter() - .map(|(k, a)| (k.clone(), format!("{}:{}", a.address, a.port).parse().unwrap())) - .collect(); - let key_storages = (0..num_nodes).map(|_| Arc::new(DummyKeyStorage::default())).collect::>(); - let runtime = Runtime::with_thread_count(4); - let key_servers: Vec<_> = configs.into_iter().enumerate().map(|(i, cfg)| - KeyServerImpl::new(&cfg, Arc::new(MapKeyServerSet::new(false, key_servers_set.clone())), - Arc::new(PlainNodeKeyPair::new(key_pairs[i].clone())), - Arc::new(DummyAclStorage::default()), - key_storages[i].clone(), runtime.executor()).unwrap() - ).collect(); + fn make_key_servers( + start_port: u16, + num_nodes: usize, + ) -> (Vec, Vec>, Runtime) { + let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect(); + let configs: Vec<_> = (0..num_nodes) + .map(|i| ClusterConfiguration { + listener_address: NodeAddress { + address: "127.0.0.1".into(), + port: start_port + (i as u16), + }, + nodes: key_pairs + .iter() + .enumerate() + .map(|(j, kp)| { + ( + kp.public().clone(), + NodeAddress { + address: "127.0.0.1".into(), + port: start_port + (j as u16), + }, + ) + }) + .collect(), + key_server_set_contract_address: None, + allow_connecting_to_higher_nodes: false, + admin_public: None, + auto_migrate_enabled: false, + }) + .collect(); + let key_servers_set: BTreeMap = configs[0] + .nodes + .iter() + .map(|(k, a)| { + ( + k.clone(), + format!("{}:{}", a.address, a.port).parse().unwrap(), + ) + }) + .collect(); + let key_storages = (0..num_nodes) + .map(|_| Arc::new(DummyKeyStorage::default())) + .collect::>(); + let runtime = Runtime::with_thread_count(4); + let key_servers: Vec<_> = configs + .into_iter() + .enumerate() + .map(|(i, cfg)| { + KeyServerImpl::new( + &cfg, + Arc::new(MapKeyServerSet::new(false, key_servers_set.clone())), + Arc::new(PlainNodeKeyPair::new(key_pairs[i].clone())), + Arc::new(DummyAclStorage::default()), + key_storages[i].clone(), + runtime.executor(), + ) + .unwrap() + }) + .collect(); - // wait until connections are established. It is fast => do not bother with events here - let start = time::Instant::now(); - let mut tried_reconnections = false; - loop { - if key_servers.iter().all(|ks| ks.cluster().is_fully_connected()) { - break; - } + // wait until connections are established. It is fast => do not bother with events here + let start = time::Instant::now(); + let mut tried_reconnections = false; + loop { + if key_servers + .iter() + .all(|ks| ks.cluster().is_fully_connected()) + { + break; + } - let old_tried_reconnections = tried_reconnections; - let mut fully_connected = true; - for key_server in &key_servers { - if !key_server.cluster().is_fully_connected() { - fully_connected = false; - if !old_tried_reconnections { - tried_reconnections = true; - key_server.cluster().connect(); - } - } - } - if fully_connected { - break; - } - if time::Instant::now() - start > time::Duration::from_millis(3000) { - panic!("connections are not established in 3000ms"); - } - } + let old_tried_reconnections = tried_reconnections; + let mut fully_connected = true; + for key_server in &key_servers { + if !key_server.cluster().is_fully_connected() { + fully_connected = false; + if !old_tried_reconnections { + tried_reconnections = true; + key_server.cluster().connect(); + } + } + } + if fully_connected { + break; + } + if time::Instant::now() - start > time::Duration::from_millis(3000) { + panic!("connections are not established in 3000ms"); + } + } - (key_servers, key_storages, runtime) - } + (key_servers, key_storages, runtime) + } - #[test] - fn document_key_generation_and_retrievement_works_over_network_with_single_node() { - let _ = ::env_logger::try_init(); - let (key_servers, _, runtime) = make_key_servers(6070, 1); + #[test] + fn document_key_generation_and_retrievement_works_over_network_with_single_node() { + let _ = ::env_logger::try_init(); + let (key_servers, _, runtime) = make_key_servers(6070, 1); - // generate document key - let threshold = 0; - let document = Random.generate().unwrap().secret().clone(); - let secret = Random.generate().unwrap().secret().clone(); - let signature = ethkey::sign(&secret, &document).unwrap(); - let generated_key = key_servers[0].generate_document_key(&document, &signature.clone().into(), threshold).unwrap(); - let generated_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &generated_key).unwrap(); + // generate document key + let threshold = 0; + let document = Random.generate().unwrap().secret().clone(); + let secret = Random.generate().unwrap().secret().clone(); + let signature = ethkey::sign(&secret, &document).unwrap(); + let generated_key = key_servers[0] + .generate_document_key(&document, &signature.clone().into(), threshold) + .unwrap(); + let generated_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &generated_key).unwrap(); - // now let's try to retrieve key back - for key_server in key_servers.iter() { - let retrieved_key = key_server.restore_document_key(&document, &signature.clone().into()).unwrap(); - let retrieved_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &retrieved_key).unwrap(); - assert_eq!(retrieved_key, generated_key); - } - drop(runtime); - } + // now let's try to retrieve key back + for key_server in key_servers.iter() { + let retrieved_key = key_server + .restore_document_key(&document, &signature.clone().into()) + .unwrap(); + let retrieved_key = + crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &retrieved_key).unwrap(); + assert_eq!(retrieved_key, generated_key); + } + drop(runtime); + } - #[test] - fn document_key_generation_and_retrievement_works_over_network_with_3_nodes() { - let _ = ::env_logger::try_init(); - let (key_servers, key_storages, runtime) = make_key_servers(6080, 3); + #[test] + fn document_key_generation_and_retrievement_works_over_network_with_3_nodes() { + let _ = ::env_logger::try_init(); + let (key_servers, key_storages, runtime) = make_key_servers(6080, 3); - let test_cases = [0, 1, 2]; - for threshold in &test_cases { - // generate document key - let document = Random.generate().unwrap().secret().clone(); - let secret = Random.generate().unwrap().secret().clone(); - let signature = ethkey::sign(&secret, &document).unwrap(); - let generated_key = key_servers[0].generate_document_key(&document, &signature.clone().into(), *threshold).unwrap(); - let generated_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &generated_key).unwrap(); + let test_cases = [0, 1, 2]; + for threshold in &test_cases { + // generate document key + let document = Random.generate().unwrap().secret().clone(); + let secret = Random.generate().unwrap().secret().clone(); + let signature = ethkey::sign(&secret, &document).unwrap(); + let generated_key = key_servers[0] + .generate_document_key(&document, &signature.clone().into(), *threshold) + .unwrap(); + let generated_key = + crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &generated_key).unwrap(); - // now let's try to retrieve key back - for (i, key_server) in key_servers.iter().enumerate() { - let retrieved_key = key_server.restore_document_key(&document, &signature.clone().into()).unwrap(); - let retrieved_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &retrieved_key).unwrap(); - assert_eq!(retrieved_key, generated_key); + // now let's try to retrieve key back + for (i, key_server) in key_servers.iter().enumerate() { + let retrieved_key = key_server + .restore_document_key(&document, &signature.clone().into()) + .unwrap(); + let retrieved_key = + crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &retrieved_key).unwrap(); + assert_eq!(retrieved_key, generated_key); - let key_share = key_storages[i].get(&document).unwrap().unwrap(); - assert!(key_share.common_point.is_some()); - assert!(key_share.encrypted_point.is_some()); - } - } - drop(runtime); - } + let key_share = key_storages[i].get(&document).unwrap().unwrap(); + assert!(key_share.common_point.is_some()); + assert!(key_share.encrypted_point.is_some()); + } + } + drop(runtime); + } - #[test] - fn server_key_generation_and_storing_document_key_works_over_network_with_3_nodes() { - let _ = ::env_logger::try_init(); - let (key_servers, _, runtime) = make_key_servers(6090, 3); + #[test] + fn server_key_generation_and_storing_document_key_works_over_network_with_3_nodes() { + let _ = ::env_logger::try_init(); + let (key_servers, _, runtime) = make_key_servers(6090, 3); - let test_cases = [0, 1, 2]; - for threshold in &test_cases { - // generate server key - let server_key_id = Random.generate().unwrap().secret().clone(); - let requestor_secret = Random.generate().unwrap().secret().clone(); - let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap(); - let server_public = key_servers[0].generate_key(&server_key_id, &signature.clone().into(), *threshold).unwrap(); + let test_cases = [0, 1, 2]; + for threshold in &test_cases { + // generate server key + let server_key_id = Random.generate().unwrap().secret().clone(); + let requestor_secret = Random.generate().unwrap().secret().clone(); + let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap(); + let server_public = key_servers[0] + .generate_key(&server_key_id, &signature.clone().into(), *threshold) + .unwrap(); - // generate document key (this is done by KS client so that document key is unknown to any KS) - let generated_key = Random.generate().unwrap().public().clone(); - let encrypted_document_key = math::encrypt_secret(&generated_key, &server_public).unwrap(); + // generate document key (this is done by KS client so that document key is unknown to any KS) + let generated_key = Random.generate().unwrap().public().clone(); + let encrypted_document_key = + math::encrypt_secret(&generated_key, &server_public).unwrap(); - // store document key - key_servers[0].store_document_key(&server_key_id, &signature.clone().into(), - encrypted_document_key.common_point, encrypted_document_key.encrypted_point).unwrap(); + // store document key + key_servers[0] + .store_document_key( + &server_key_id, + &signature.clone().into(), + encrypted_document_key.common_point, + encrypted_document_key.encrypted_point, + ) + .unwrap(); - // now let's try to retrieve key back - for key_server in key_servers.iter() { - let retrieved_key = key_server.restore_document_key(&server_key_id, &signature.clone().into()).unwrap(); - let retrieved_key = crypto::ecies::decrypt(&requestor_secret, &DEFAULT_MAC, &retrieved_key).unwrap(); - let retrieved_key = Public::from_slice(&retrieved_key); - assert_eq!(retrieved_key, generated_key); - } - } - drop(runtime); - } + // now let's try to retrieve key back + for key_server in key_servers.iter() { + let retrieved_key = key_server + .restore_document_key(&server_key_id, &signature.clone().into()) + .unwrap(); + let retrieved_key = + crypto::ecies::decrypt(&requestor_secret, &DEFAULT_MAC, &retrieved_key) + .unwrap(); + let retrieved_key = Public::from_slice(&retrieved_key); + assert_eq!(retrieved_key, generated_key); + } + } + drop(runtime); + } - #[test] - fn server_key_generation_and_message_signing_works_over_network_with_3_nodes() { - let _ = ::env_logger::try_init(); - let (key_servers, _, runtime) = make_key_servers(6100, 3); + #[test] + fn server_key_generation_and_message_signing_works_over_network_with_3_nodes() { + let _ = ::env_logger::try_init(); + let (key_servers, _, runtime) = make_key_servers(6100, 3); - let test_cases = [0, 1, 2]; - for threshold in &test_cases { - // generate server key - let server_key_id = Random.generate().unwrap().secret().clone(); - let requestor_secret = Random.generate().unwrap().secret().clone(); - let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap(); - let server_public = key_servers[0].generate_key(&server_key_id, &signature.clone().into(), *threshold).unwrap(); + let test_cases = [0, 1, 2]; + for threshold in &test_cases { + // generate server key + let server_key_id = Random.generate().unwrap().secret().clone(); + let requestor_secret = Random.generate().unwrap().secret().clone(); + let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap(); + let server_public = key_servers[0] + .generate_key(&server_key_id, &signature.clone().into(), *threshold) + .unwrap(); - // sign message - let message_hash = H256::from(42); - let combined_signature = key_servers[0].sign_message_schnorr(&server_key_id, &signature.into(), message_hash.clone()).unwrap(); - let combined_signature = crypto::ecies::decrypt(&requestor_secret, &DEFAULT_MAC, &combined_signature).unwrap(); - let signature_c = Secret::from_slice(&combined_signature[..32]).unwrap(); - let signature_s = Secret::from_slice(&combined_signature[32..]).unwrap(); + // sign message + let message_hash = H256::from(42); + let combined_signature = key_servers[0] + .sign_message_schnorr(&server_key_id, &signature.into(), message_hash.clone()) + .unwrap(); + let combined_signature = + crypto::ecies::decrypt(&requestor_secret, &DEFAULT_MAC, &combined_signature) + .unwrap(); + let signature_c = Secret::from_slice(&combined_signature[..32]).unwrap(); + let signature_s = Secret::from_slice(&combined_signature[32..]).unwrap(); - // check signature - assert_eq!(math::verify_schnorr_signature(&server_public, &(signature_c, signature_s), &message_hash), Ok(true)); - } - drop(runtime); - } + // check signature + assert_eq!( + math::verify_schnorr_signature( + &server_public, + &(signature_c, signature_s), + &message_hash + ), + Ok(true) + ); + } + drop(runtime); + } - #[test] - fn decryption_session_is_delegated_when_node_does_not_have_key_share() { - let _ = ::env_logger::try_init(); - let (key_servers, key_storages, runtime) = make_key_servers(6110, 3); + #[test] + fn decryption_session_is_delegated_when_node_does_not_have_key_share() { + let _ = ::env_logger::try_init(); + let (key_servers, key_storages, runtime) = make_key_servers(6110, 3); - // generate document key - let threshold = 0; - let document = Random.generate().unwrap().secret().clone(); - let secret = Random.generate().unwrap().secret().clone(); - let signature = ethkey::sign(&secret, &document).unwrap(); - let generated_key = key_servers[0].generate_document_key(&document, &signature.clone().into(), threshold).unwrap(); - let generated_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &generated_key).unwrap(); + // generate document key + let threshold = 0; + let document = Random.generate().unwrap().secret().clone(); + let secret = Random.generate().unwrap().secret().clone(); + let signature = ethkey::sign(&secret, &document).unwrap(); + let generated_key = key_servers[0] + .generate_document_key(&document, &signature.clone().into(), threshold) + .unwrap(); + let generated_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &generated_key).unwrap(); - // remove key from node0 - key_storages[0].remove(&document).unwrap(); + // remove key from node0 + key_storages[0].remove(&document).unwrap(); - // now let's try to retrieve key back by requesting it from node0, so that session must be delegated - let retrieved_key = key_servers[0].restore_document_key(&document, &signature.into()).unwrap(); - let retrieved_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &retrieved_key).unwrap(); - assert_eq!(retrieved_key, generated_key); - drop(runtime); - } + // now let's try to retrieve key back by requesting it from node0, so that session must be delegated + let retrieved_key = key_servers[0] + .restore_document_key(&document, &signature.into()) + .unwrap(); + let retrieved_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &retrieved_key).unwrap(); + assert_eq!(retrieved_key, generated_key); + drop(runtime); + } - #[test] - fn schnorr_signing_session_is_delegated_when_node_does_not_have_key_share() { - let _ = ::env_logger::try_init(); - let (key_servers, key_storages, runtime) = make_key_servers(6114, 3); - let threshold = 1; + #[test] + fn schnorr_signing_session_is_delegated_when_node_does_not_have_key_share() { + let _ = ::env_logger::try_init(); + let (key_servers, key_storages, runtime) = make_key_servers(6114, 3); + let threshold = 1; - // generate server key - let server_key_id = Random.generate().unwrap().secret().clone(); - let requestor_secret = Random.generate().unwrap().secret().clone(); - let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap(); - let server_public = key_servers[0].generate_key(&server_key_id, &signature.clone().into(), threshold).unwrap(); + // generate server key + let server_key_id = Random.generate().unwrap().secret().clone(); + let requestor_secret = Random.generate().unwrap().secret().clone(); + let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap(); + let server_public = key_servers[0] + .generate_key(&server_key_id, &signature.clone().into(), threshold) + .unwrap(); - // remove key from node0 - key_storages[0].remove(&server_key_id).unwrap(); + // remove key from node0 + key_storages[0].remove(&server_key_id).unwrap(); - // sign message - let message_hash = H256::from(42); - let combined_signature = key_servers[0].sign_message_schnorr(&server_key_id, &signature.into(), message_hash.clone()).unwrap(); - let combined_signature = crypto::ecies::decrypt(&requestor_secret, &DEFAULT_MAC, &combined_signature).unwrap(); - let signature_c = Secret::from_slice(&combined_signature[..32]).unwrap(); - let signature_s = Secret::from_slice(&combined_signature[32..]).unwrap(); + // sign message + let message_hash = H256::from(42); + let combined_signature = key_servers[0] + .sign_message_schnorr(&server_key_id, &signature.into(), message_hash.clone()) + .unwrap(); + let combined_signature = + crypto::ecies::decrypt(&requestor_secret, &DEFAULT_MAC, &combined_signature).unwrap(); + let signature_c = Secret::from_slice(&combined_signature[..32]).unwrap(); + let signature_s = Secret::from_slice(&combined_signature[32..]).unwrap(); - // check signature - assert_eq!(math::verify_schnorr_signature(&server_public, &(signature_c, signature_s), &message_hash), Ok(true)); - drop(runtime); - } + // check signature + assert_eq!( + math::verify_schnorr_signature( + &server_public, + &(signature_c, signature_s), + &message_hash + ), + Ok(true) + ); + drop(runtime); + } - #[test] - fn ecdsa_signing_session_is_delegated_when_node_does_not_have_key_share() { - let _ = ::env_logger::try_init(); - let (key_servers, key_storages, runtime) = make_key_servers(6117, 4); - let threshold = 1; + #[test] + fn ecdsa_signing_session_is_delegated_when_node_does_not_have_key_share() { + let _ = ::env_logger::try_init(); + let (key_servers, key_storages, runtime) = make_key_servers(6117, 4); + let threshold = 1; - // generate server key - let server_key_id = Random.generate().unwrap().secret().clone(); - let requestor_secret = Random.generate().unwrap().secret().clone(); - let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap(); - let server_public = key_servers[0].generate_key(&server_key_id, &signature.clone().into(), threshold).unwrap(); + // generate server key + let server_key_id = Random.generate().unwrap().secret().clone(); + let requestor_secret = Random.generate().unwrap().secret().clone(); + let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap(); + let server_public = key_servers[0] + .generate_key(&server_key_id, &signature.clone().into(), threshold) + .unwrap(); - // remove key from node0 - key_storages[0].remove(&server_key_id).unwrap(); + // remove key from node0 + key_storages[0].remove(&server_key_id).unwrap(); - // sign message - let message_hash = H256::random(); - let signature = key_servers[0].sign_message_ecdsa(&server_key_id, &signature.into(), message_hash.clone()).unwrap(); - let signature = crypto::ecies::decrypt(&requestor_secret, &DEFAULT_MAC, &signature).unwrap(); - let signature: H520 = signature[0..65].into(); + // sign message + let message_hash = H256::random(); + let signature = key_servers[0] + .sign_message_ecdsa(&server_key_id, &signature.into(), message_hash.clone()) + .unwrap(); + let signature = + crypto::ecies::decrypt(&requestor_secret, &DEFAULT_MAC, &signature).unwrap(); + let signature: H520 = signature[0..65].into(); - // check signature - assert!(verify_public(&server_public, &signature.into(), &message_hash).unwrap()); - drop(runtime); - } + // check signature + assert!(verify_public(&server_public, &signature.into(), &message_hash).unwrap()); + drop(runtime); + } - #[test] - fn servers_set_change_session_works_over_network() { - // TODO [Test] - } + #[test] + fn servers_set_change_session_works_over_network() { + // TODO [Test] + } } diff --git a/secret-store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs b/secret-store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs index 93b5c7242..d11d6c6bd 100644 --- a/secret-store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs +++ b/secret-store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs @@ -14,20 +14,26 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::collections::{BTreeSet, BTreeMap}; use ethereum_types::{Address, H256}; use ethkey::Secret; -use parking_lot::{Mutex, Condvar}; -use key_server_cluster::{Error, SessionId, NodeId, DocumentKeyShare}; -use key_server_cluster::cluster::Cluster; -use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession}; -use key_server_cluster::decryption_session::SessionImpl as DecryptionSession; -use key_server_cluster::signing_session_ecdsa::SessionImpl as EcdsaSigningSession; -use key_server_cluster::signing_session_schnorr::SessionImpl as SchnorrSigningSession; -use key_server_cluster::message::{Message, KeyVersionNegotiationMessage, RequestKeyVersions, - KeyVersions, KeyVersionsError, FailedKeyVersionContinueAction, CommonKeyData}; -use key_server_cluster::admin_sessions::ShareChangeSessionMeta; +use key_server_cluster::{ + admin_sessions::ShareChangeSessionMeta, + cluster::Cluster, + cluster_sessions::{ClusterSession, SessionIdWithSubSession}, + decryption_session::SessionImpl as DecryptionSession, + message::{ + CommonKeyData, FailedKeyVersionContinueAction, KeyVersionNegotiationMessage, KeyVersions, + KeyVersionsError, Message, RequestKeyVersions, + }, + signing_session_ecdsa::SessionImpl as EcdsaSigningSession, + signing_session_schnorr::SessionImpl as SchnorrSigningSession, + DocumentKeyShare, Error, NodeId, SessionId, +}; +use parking_lot::{Condvar, Mutex}; +use std::{ + collections::{BTreeMap, BTreeSet}, + sync::Arc, +}; // TODO [Opt]: change sessions so that versions are sent by chunks. /// Number of versions sent in single message. @@ -35,931 +41,1218 @@ const VERSIONS_PER_MESSAGE: usize = 32; /// Key version negotiation transport. pub trait SessionTransport { - /// Broadcast message to all nodes. - fn broadcast(&self, message: KeyVersionNegotiationMessage) -> Result<(), Error>; - /// Send message to given node. - fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error>; + /// Broadcast message to all nodes. + fn broadcast(&self, message: KeyVersionNegotiationMessage) -> Result<(), Error>; + /// Send message to given node. + fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error>; } /// Key version negotiation result computer. pub trait SessionResultComputer: Send + Sync { - /// Compute result of session, if possible. - fn compute_result(&self, threshold: Option, confirmations: &BTreeSet, versions: &BTreeMap>) -> Option>; + /// Compute result of session, if possible. + fn compute_result( + &self, + threshold: Option, + confirmations: &BTreeSet, + versions: &BTreeMap>, + ) -> Option>; } /// Key discovery session API. pub struct SessionImpl { - /// Session core. - core: SessionCore, - /// Session data. - data: Mutex, + /// Session core. + core: SessionCore, + /// Session data. + data: Mutex, } /// Action after key version is negotiated. #[derive(Clone)] pub enum ContinueAction { - /// Decryption session + origin + is_shadow_decryption + is_broadcast_decryption. - Decrypt(Arc, Option
, bool, bool), - /// Schnorr signing session + message hash. - SchnorrSign(Arc, H256), - /// ECDSA signing session + message hash. - EcdsaSign(Arc, H256), + /// Decryption session + origin + is_shadow_decryption + is_broadcast_decryption. + Decrypt(Arc, Option
, bool, bool), + /// Schnorr signing session + message hash. + SchnorrSign(Arc, H256), + /// ECDSA signing session + message hash. + EcdsaSign(Arc, H256), } /// Failed action after key version is negotiated. #[derive(Clone, Debug, PartialEq)] pub enum FailedContinueAction { - /// Decryption origin + requester. - Decrypt(Option
, Address), + /// Decryption origin + requester. + Decrypt(Option
, Address), } /// Immutable session data. struct SessionCore { - /// Session meta. - pub meta: ShareChangeSessionMeta, - /// Sub-session id. - pub sub_session: Secret, - /// Key share. - pub key_share: Option, - /// Session result computer. - pub result_computer: Arc, - /// Session transport. - pub transport: T, - /// Session nonce. - pub nonce: u64, - /// SessionImpl completion condvar. - pub completed: Condvar, + /// Session meta. + pub meta: ShareChangeSessionMeta, + /// Sub-session id. + pub sub_session: Secret, + /// Key share. + pub key_share: Option, + /// Session result computer. + pub result_computer: Arc, + /// Session transport. + pub transport: T, + /// Session nonce. + pub nonce: u64, + /// SessionImpl completion condvar. + pub completed: Condvar, } /// Mutable session data. struct SessionData { - /// Session state. - pub state: SessionState, - /// Initialization confirmations. - pub confirmations: Option>, - /// Common key data that nodes have agreed upon. - pub key_share: Option, - /// { Version => Nodes } - pub versions: Option>>, - /// Session result. - pub result: Option, Error>>, - /// Continue action. - pub continue_with: Option, - /// Failed continue action (reported in error message by master node). - pub failed_continue_with: Option, + /// Session state. + pub state: SessionState, + /// Initialization confirmations. + pub confirmations: Option>, + /// Common key data that nodes have agreed upon. + pub key_share: Option, + /// { Version => Nodes } + pub versions: Option>>, + /// Session result. + pub result: Option, Error>>, + /// Continue action. + pub continue_with: Option, + /// Failed continue action (reported in error message by master node). + pub failed_continue_with: Option, } /// SessionImpl creation parameters pub struct SessionParams { - /// Session meta. - pub meta: ShareChangeSessionMeta, - /// Sub-session id. - pub sub_session: Secret, - /// Key share. - pub key_share: Option, - /// Session result computer. - pub result_computer: Arc, - /// Session transport to communicate to other cluster nodes. - pub transport: T, - /// Session nonce. - pub nonce: u64, + /// Session meta. + pub meta: ShareChangeSessionMeta, + /// Sub-session id. + pub sub_session: Secret, + /// Key share. + pub key_share: Option, + /// Session result computer. + pub result_computer: Arc, + /// Session transport to communicate to other cluster nodes. + pub transport: T, + /// Session nonce. + pub nonce: u64, } /// Signing session state. #[derive(Debug, PartialEq)] enum SessionState { - /// Waiting for initialization. - WaitingForInitialization, - /// Waiting for responses. - WaitingForResponses, - /// Session is completed. - Finished, + /// Waiting for initialization. + WaitingForInitialization, + /// Waiting for responses. + WaitingForResponses, + /// Session is completed. + Finished, } /// Isolated session transport. pub struct IsolatedSessionTransport { - /// Cluster. - pub cluster: Arc, - /// Key id. - pub key_id: SessionId, - /// Sub session id. - pub sub_session: Secret, - /// Session-level nonce. - pub nonce: u64, + /// Cluster. + pub cluster: Arc, + /// Key id. + pub key_id: SessionId, + /// Sub session id. + pub sub_session: Secret, + /// Session-level nonce. + pub nonce: u64, } /// Fastest session result computer. Computes first possible version that can be recovered on this node. /// If there's no such version, selects version with the most support. pub struct FastestResultComputer { - /// This node id. - self_node_id: NodeId, - /// Threshold (if known). - threshold: Option, - /// Count of all configured key server nodes. - configured_nodes_count: usize, - /// Count of all connected key server nodes. - connected_nodes_count: usize, + /// This node id. + self_node_id: NodeId, + /// Threshold (if known). + threshold: Option, + /// Count of all configured key server nodes. + configured_nodes_count: usize, + /// Count of all connected key server nodes. + connected_nodes_count: usize, } /// Selects version with most support, waiting for responses from all nodes. pub struct LargestSupportResultComputer; -impl SessionImpl where T: SessionTransport { - /// Create new session. - pub fn new(params: SessionParams) -> Self { - SessionImpl { - core: SessionCore { - meta: params.meta, - sub_session: params.sub_session, - key_share: params.key_share.clone(), - result_computer: params.result_computer, - transport: params.transport, - nonce: params.nonce, - completed: Condvar::new(), - }, - data: Mutex::new(SessionData { - state: SessionState::WaitingForInitialization, - confirmations: None, - key_share: params.key_share.map(|key_share| DocumentKeyShare { - threshold: key_share.threshold, - author: key_share.author, - public: key_share.public, - ..Default::default() - }), - versions: None, - result: None, - continue_with: None, - failed_continue_with: None, - }) - } - } +impl SessionImpl +where + T: SessionTransport, +{ + /// Create new session. + pub fn new(params: SessionParams) -> Self { + SessionImpl { + core: SessionCore { + meta: params.meta, + sub_session: params.sub_session, + key_share: params.key_share.clone(), + result_computer: params.result_computer, + transport: params.transport, + nonce: params.nonce, + completed: Condvar::new(), + }, + data: Mutex::new(SessionData { + state: SessionState::WaitingForInitialization, + confirmations: None, + key_share: params.key_share.map(|key_share| DocumentKeyShare { + threshold: key_share.threshold, + author: key_share.author, + public: key_share.public, + ..Default::default() + }), + versions: None, + result: None, + continue_with: None, + failed_continue_with: None, + }), + } + } - /// Return session meta. - pub fn meta(&self) -> &ShareChangeSessionMeta { - &self.core.meta - } + /// Return session meta. + pub fn meta(&self) -> &ShareChangeSessionMeta { + &self.core.meta + } - /// Return result computer reference. - pub fn version_holders(&self, version: &H256) -> Result, Error> { - Ok(self.data.lock().versions.as_ref().ok_or(Error::InvalidStateForRequest)? - .get(version).ok_or(Error::ServerKeyIsNotFound)? - .clone()) - } + /// Return result computer reference. + pub fn version_holders(&self, version: &H256) -> Result, Error> { + Ok(self + .data + .lock() + .versions + .as_ref() + .ok_or(Error::InvalidStateForRequest)? + .get(version) + .ok_or(Error::ServerKeyIsNotFound)? + .clone()) + } - /// Set continue action. - pub fn set_continue_action(&self, action: ContinueAction) { - self.data.lock().continue_with = Some(action); - } + /// Set continue action. + pub fn set_continue_action(&self, action: ContinueAction) { + self.data.lock().continue_with = Some(action); + } - /// Take continue action. - pub fn take_continue_action(&self) -> Option { - self.data.lock().continue_with.take() - } + /// Take continue action. + pub fn take_continue_action(&self) -> Option { + self.data.lock().continue_with.take() + } - /// Take failed continue action. - pub fn take_failed_continue_action(&self) -> Option { - self.data.lock().failed_continue_with.take() - } + /// Take failed continue action. + pub fn take_failed_continue_action(&self) -> Option { + self.data.lock().failed_continue_with.take() + } - /// Wait for session completion. - pub fn wait(&self) -> Result, Error> { - Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone()) - .expect("wait_session returns Some if called without timeout; qed") - } + /// Wait for session completion. + pub fn wait(&self) -> Result, Error> { + Self::wait_session(&self.core.completed, &self.data, None, |data| { + data.result.clone() + }) + .expect("wait_session returns Some if called without timeout; qed") + } - /// Retrieve common key data (author, threshold, public), if available. - pub fn common_key_data(&self) -> Result { - self.data.lock().key_share.clone() - .ok_or(Error::InvalidStateForRequest) - } + /// Retrieve common key data (author, threshold, public), if available. + pub fn common_key_data(&self) -> Result { + self.data + .lock() + .key_share + .clone() + .ok_or(Error::InvalidStateForRequest) + } - /// Initialize session. - pub fn initialize(&self, connected_nodes: BTreeSet) -> Result<(), Error> { - // check state - let mut data = self.data.lock(); - if data.state != SessionState::WaitingForInitialization { - return Err(Error::InvalidStateForRequest); - } + /// Initialize session. + pub fn initialize(&self, connected_nodes: BTreeSet) -> Result<(), Error> { + // check state + let mut data = self.data.lock(); + if data.state != SessionState::WaitingForInitialization { + return Err(Error::InvalidStateForRequest); + } - // update state - let mut confirmations = connected_nodes; - let mut versions: BTreeMap> = BTreeMap::new(); - let received_own_confirmation = confirmations.remove(&self.core.meta.self_node_id); - if received_own_confirmation { - if let Some(key_share) = self.core.key_share.as_ref() { - for version in &key_share.versions { - versions.entry(version.hash.clone()) - .or_insert_with(Default::default) - .insert(self.core.meta.self_node_id.clone()); - } - } - } + // update state + let mut confirmations = connected_nodes; + let mut versions: BTreeMap> = BTreeMap::new(); + let received_own_confirmation = confirmations.remove(&self.core.meta.self_node_id); + if received_own_confirmation { + if let Some(key_share) = self.core.key_share.as_ref() { + for version in &key_share.versions { + versions + .entry(version.hash.clone()) + .or_insert_with(Default::default) + .insert(self.core.meta.self_node_id.clone()); + } + } + } - // update state - let no_confirmations_required = confirmations.is_empty(); - data.state = SessionState::WaitingForResponses; - data.confirmations = Some(confirmations); - data.versions = Some(versions); + // update state + let no_confirmations_required = confirmations.is_empty(); + data.state = SessionState::WaitingForResponses; + data.confirmations = Some(confirmations); + data.versions = Some(versions); - // try to complete session - Self::try_complete(&self.core, &mut *data); - if no_confirmations_required && data.state != SessionState::Finished { - return Err(Error::ServerKeyIsNotFound); - } else if data.state == SessionState::Finished { - return Ok(()); - } + // try to complete session + Self::try_complete(&self.core, &mut *data); + if no_confirmations_required && data.state != SessionState::Finished { + return Err(Error::ServerKeyIsNotFound); + } else if data.state == SessionState::Finished { + return Ok(()); + } - // send requests - let confirmations = data.confirmations.as_ref().expect("dilled couple of lines above; qed"); - for connected_node in confirmations { - self.core.transport.send(connected_node, KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions { - session: self.core.meta.id.clone().into(), - sub_session: self.core.sub_session.clone().into(), - session_nonce: self.core.nonce, - }))?; - } + // send requests + let confirmations = data + .confirmations + .as_ref() + .expect("dilled couple of lines above; qed"); + for connected_node in confirmations { + self.core.transport.send( + connected_node, + KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions { + session: self.core.meta.id.clone().into(), + sub_session: self.core.sub_session.clone().into(), + session_nonce: self.core.nonce, + }), + )?; + } - Ok(()) - } + Ok(()) + } - /// Process single message. - pub fn process_message(&self, sender: &NodeId, message: &KeyVersionNegotiationMessage) -> Result<(), Error> { - if self.core.nonce != message.session_nonce() { - return Err(Error::ReplayProtection); - } + /// Process single message. + pub fn process_message( + &self, + sender: &NodeId, + message: &KeyVersionNegotiationMessage, + ) -> Result<(), Error> { + if self.core.nonce != message.session_nonce() { + return Err(Error::ReplayProtection); + } - match message { - &KeyVersionNegotiationMessage::RequestKeyVersions(ref message) => - self.on_key_versions_request(sender, message), - &KeyVersionNegotiationMessage::KeyVersions(ref message) => - self.on_key_versions(sender, message), - &KeyVersionNegotiationMessage::KeyVersionsError(ref message) => { - // remember failed continue action - if let Some(FailedKeyVersionContinueAction::Decrypt(Some(ref origin), ref requester)) = message.continue_with { - self.data.lock().failed_continue_with = - Some(FailedContinueAction::Decrypt(Some(origin.clone().into()), requester.clone().into())); - } + match message { + &KeyVersionNegotiationMessage::RequestKeyVersions(ref message) => { + self.on_key_versions_request(sender, message) + } + &KeyVersionNegotiationMessage::KeyVersions(ref message) => { + self.on_key_versions(sender, message) + } + &KeyVersionNegotiationMessage::KeyVersionsError(ref message) => { + // remember failed continue action + if let Some(FailedKeyVersionContinueAction::Decrypt( + Some(ref origin), + ref requester, + )) = message.continue_with + { + self.data.lock().failed_continue_with = Some(FailedContinueAction::Decrypt( + Some(origin.clone().into()), + requester.clone().into(), + )); + } - self.on_session_error(sender, message.error.clone()); - Ok(()) - }, - } - } + self.on_session_error(sender, message.error.clone()); + Ok(()) + } + } + } - /// Process key versions request. - pub fn on_key_versions_request(&self, sender: &NodeId, _message: &RequestKeyVersions) -> Result<(), Error> { - debug_assert!(sender != &self.core.meta.self_node_id); + /// Process key versions request. + pub fn on_key_versions_request( + &self, + sender: &NodeId, + _message: &RequestKeyVersions, + ) -> Result<(), Error> { + debug_assert!(sender != &self.core.meta.self_node_id); - // check message - if *sender != self.core.meta.master_node_id { - return Err(Error::InvalidMessage); - } + // check message + if *sender != self.core.meta.master_node_id { + return Err(Error::InvalidMessage); + } - // check state - let mut data = self.data.lock(); - if data.state != SessionState::WaitingForInitialization { - return Err(Error::InvalidStateForRequest); - } + // check state + let mut data = self.data.lock(); + if data.state != SessionState::WaitingForInitialization { + return Err(Error::InvalidStateForRequest); + } - // send response - self.core.transport.send(sender, KeyVersionNegotiationMessage::KeyVersions(KeyVersions { - session: self.core.meta.id.clone().into(), - sub_session: self.core.sub_session.clone().into(), - session_nonce: self.core.nonce, - key_common: self.core.key_share.as_ref().map(|key_share| CommonKeyData { - threshold: key_share.threshold, - author: key_share.author.into(), - public: key_share.public.into(), - }), - versions: self.core.key_share.as_ref().map(|key_share| - key_share.versions.iter().rev() - .filter(|v| v.id_numbers.contains_key(sender)) - .chain(key_share.versions.iter().rev().filter(|v| !v.id_numbers.contains_key(sender))) - .map(|v| v.hash.clone().into()) - .take(VERSIONS_PER_MESSAGE) - .collect()) - .unwrap_or_else(|| Default::default()) - }))?; + // send response + self.core.transport.send( + sender, + KeyVersionNegotiationMessage::KeyVersions(KeyVersions { + session: self.core.meta.id.clone().into(), + sub_session: self.core.sub_session.clone().into(), + session_nonce: self.core.nonce, + key_common: self.core.key_share.as_ref().map(|key_share| CommonKeyData { + threshold: key_share.threshold, + author: key_share.author.into(), + public: key_share.public.into(), + }), + versions: self + .core + .key_share + .as_ref() + .map(|key_share| { + key_share + .versions + .iter() + .rev() + .filter(|v| v.id_numbers.contains_key(sender)) + .chain( + key_share + .versions + .iter() + .rev() + .filter(|v| !v.id_numbers.contains_key(sender)), + ) + .map(|v| v.hash.clone().into()) + .take(VERSIONS_PER_MESSAGE) + .collect() + }) + .unwrap_or_else(|| Default::default()), + }), + )?; - // update state - data.state = SessionState::Finished; - data.result = Some(Ok(None)); - self.core.completed.notify_all(); + // update state + data.state = SessionState::Finished; + data.result = Some(Ok(None)); + self.core.completed.notify_all(); - Ok(()) - } + Ok(()) + } - /// Process key versions response. - pub fn on_key_versions(&self, sender: &NodeId, message: &KeyVersions) -> Result<(), Error> { - debug_assert!(sender != &self.core.meta.self_node_id); + /// Process key versions response. + pub fn on_key_versions(&self, sender: &NodeId, message: &KeyVersions) -> Result<(), Error> { + debug_assert!(sender != &self.core.meta.self_node_id); - // check state - let mut data = self.data.lock(); - if data.state != SessionState::WaitingForResponses && data.state != SessionState::Finished { - return Err(Error::InvalidStateForRequest); - } - let reason = "this field is filled on master node when initializing; this is initialized master node; qed"; - if !data.confirmations.as_mut().expect(reason).remove(sender) { - return Err(Error::InvalidMessage); - } + // check state + let mut data = self.data.lock(); + if data.state != SessionState::WaitingForResponses && data.state != SessionState::Finished { + return Err(Error::InvalidStateForRequest); + } + let reason = "this field is filled on master node when initializing; this is initialized master node; qed"; + if !data.confirmations.as_mut().expect(reason).remove(sender) { + return Err(Error::InvalidMessage); + } - // remember versions that sender have - { - match message.key_common.as_ref() { - Some(key_common) if data.key_share.is_none() => { - data.key_share = Some(DocumentKeyShare { - threshold: key_common.threshold, - author: key_common.author.clone().into(), - public: key_common.public.clone().into(), - ..Default::default() - }); - }, - Some(key_common) => { - let prev_key_share = data.key_share.as_ref() - .expect("data.key_share.is_none() is matched by previous branch; qed"); - if prev_key_share.threshold != key_common.threshold || - prev_key_share.author[..] != key_common.author[..] || - prev_key_share.public[..] != key_common.public[..] - { - return Err(Error::InvalidMessage); - } - }, - None if message.versions.is_empty() => (), - None => return Err(Error::InvalidMessage), - } + // remember versions that sender have + { + match message.key_common.as_ref() { + Some(key_common) if data.key_share.is_none() => { + data.key_share = Some(DocumentKeyShare { + threshold: key_common.threshold, + author: key_common.author.clone().into(), + public: key_common.public.clone().into(), + ..Default::default() + }); + } + Some(key_common) => { + let prev_key_share = data + .key_share + .as_ref() + .expect("data.key_share.is_none() is matched by previous branch; qed"); + if prev_key_share.threshold != key_common.threshold + || prev_key_share.author[..] != key_common.author[..] + || prev_key_share.public[..] != key_common.public[..] + { + return Err(Error::InvalidMessage); + } + } + None if message.versions.is_empty() => (), + None => return Err(Error::InvalidMessage), + } - let versions = data.versions.as_mut().expect(reason); - for version in &message.versions { - versions.entry(version.clone().into()) - .or_insert_with(Default::default) - .insert(sender.clone()); - } - } + let versions = data.versions.as_mut().expect(reason); + for version in &message.versions { + versions + .entry(version.clone().into()) + .or_insert_with(Default::default) + .insert(sender.clone()); + } + } - // try to compute result - if data.state != SessionState::Finished { - Self::try_complete(&self.core, &mut *data); - } + // try to compute result + if data.state != SessionState::Finished { + Self::try_complete(&self.core, &mut *data); + } - Ok(()) - } + Ok(()) + } - /// Try to complete result && finish session. - fn try_complete(core: &SessionCore, data: &mut SessionData) { - let reason = "this field is filled on master node when initializing; try_complete is only called on initialized master node; qed"; - let confirmations = data.confirmations.as_ref().expect(reason); - let versions = data.versions.as_ref().expect(reason); - let threshold = data.key_share.as_ref().map(|key_share| key_share.threshold); - if let Some(result) = core.result_computer.compute_result(threshold, confirmations, versions) { - // when the master node processing decryption service request, it starts with a key version negotiation session - // if the negotiation fails, only master node knows about it - // => if the error is fatal, only the master will know about it and report it to the contract && the request will never be rejected - // => let's broadcast fatal error so that every other node know about it, and, if it trusts to master node - // will report error to the contract - if let (Some(continue_with), Err(error)) = (data.continue_with.as_ref(), result.as_ref()) { - let origin = match *continue_with { - ContinueAction::Decrypt(_, origin, _, _) => origin.clone(), - _ => None, - }; + /// Try to complete result && finish session. + fn try_complete(core: &SessionCore, data: &mut SessionData) { + let reason = "this field is filled on master node when initializing; try_complete is only called on initialized master node; qed"; + let confirmations = data.confirmations.as_ref().expect(reason); + let versions = data.versions.as_ref().expect(reason); + let threshold = data.key_share.as_ref().map(|key_share| key_share.threshold); + if let Some(result) = + core.result_computer + .compute_result(threshold, confirmations, versions) + { + // when the master node processing decryption service request, it starts with a key version negotiation session + // if the negotiation fails, only master node knows about it + // => if the error is fatal, only the master will know about it and report it to the contract && the request will never be rejected + // => let's broadcast fatal error so that every other node know about it, and, if it trusts to master node + // will report error to the contract + if let (Some(continue_with), Err(error)) = + (data.continue_with.as_ref(), result.as_ref()) + { + let origin = match *continue_with { + ContinueAction::Decrypt(_, origin, _, _) => origin.clone(), + _ => None, + }; - let requester = match *continue_with { - ContinueAction::Decrypt(ref session, _, _, _) => session.requester().and_then(|r| r.address(&core.meta.id).ok()), - _ => None, - }; + let requester = match *continue_with { + ContinueAction::Decrypt(ref session, _, _, _) => session + .requester() + .and_then(|r| r.address(&core.meta.id).ok()), + _ => None, + }; - if origin.is_some() && requester.is_some() && !error.is_non_fatal() { - let requester = requester.expect("checked in above condition; qed"); - data.failed_continue_with = - Some(FailedContinueAction::Decrypt(origin.clone(), requester.clone())); + if origin.is_some() && requester.is_some() && !error.is_non_fatal() { + let requester = requester.expect("checked in above condition; qed"); + data.failed_continue_with = Some(FailedContinueAction::Decrypt( + origin.clone(), + requester.clone(), + )); - let send_result = core.transport.broadcast(KeyVersionNegotiationMessage::KeyVersionsError(KeyVersionsError { - session: core.meta.id.clone().into(), - sub_session: core.sub_session.clone().into(), - session_nonce: core.nonce, - error: error.clone(), - continue_with: Some(FailedKeyVersionContinueAction::Decrypt( - origin.map(Into::into), - requester.into(), - )), - })); + let send_result = + core.transport + .broadcast(KeyVersionNegotiationMessage::KeyVersionsError( + KeyVersionsError { + session: core.meta.id.clone().into(), + sub_session: core.sub_session.clone().into(), + session_nonce: core.nonce, + error: error.clone(), + continue_with: Some(FailedKeyVersionContinueAction::Decrypt( + origin.map(Into::into), + requester.into(), + )), + }, + )); - if let Err(send_error) = send_result { - warn!(target: "secretstore_net", "{}: failed to broadcast key version negotiation error {}: {}", + if let Err(send_error) = send_result { + warn!(target: "secretstore_net", "{}: failed to broadcast key version negotiation error {}: {}", core.meta.self_node_id, error, send_error); - } - } - } + } + } + } - data.state = SessionState::Finished; - data.result = Some(result.map(Some)); - core.completed.notify_all(); - } - } + data.state = SessionState::Finished; + data.result = Some(result.map(Some)); + core.completed.notify_all(); + } + } } -impl ClusterSession for SessionImpl where T: SessionTransport { - type Id = SessionIdWithSubSession; +impl ClusterSession for SessionImpl +where + T: SessionTransport, +{ + type Id = SessionIdWithSubSession; - fn type_name() -> &'static str { - "version negotiation" - } + fn type_name() -> &'static str { + "version negotiation" + } - fn id(&self) -> SessionIdWithSubSession { - SessionIdWithSubSession::new(self.core.meta.id.clone(), self.core.sub_session.clone()) - } + fn id(&self) -> SessionIdWithSubSession { + SessionIdWithSubSession::new(self.core.meta.id.clone(), self.core.sub_session.clone()) + } - fn is_finished(&self) -> bool { - self.data.lock().state == SessionState::Finished - } + fn is_finished(&self) -> bool { + self.data.lock().state == SessionState::Finished + } - fn on_session_timeout(&self) { - let mut data = self.data.lock(); + fn on_session_timeout(&self) { + let mut data = self.data.lock(); - if data.confirmations.is_some() { - data.confirmations.as_mut().expect("checked a line above; qed").clear(); - Self::try_complete(&self.core, &mut *data); - if data.state != SessionState::Finished { - warn!(target: "secretstore_net", "{}: key version negotiation session failed with timeout", self.core.meta.self_node_id); + if data.confirmations.is_some() { + data.confirmations + .as_mut() + .expect("checked a line above; qed") + .clear(); + Self::try_complete(&self.core, &mut *data); + if data.state != SessionState::Finished { + warn!(target: "secretstore_net", "{}: key version negotiation session failed with timeout", self.core.meta.self_node_id); - data.result = Some(Err(Error::ConsensusTemporaryUnreachable)); - self.core.completed.notify_all(); - } - } - } + data.result = Some(Err(Error::ConsensusTemporaryUnreachable)); + self.core.completed.notify_all(); + } + } + } - fn on_node_timeout(&self, node: &NodeId) { - self.on_session_error(node, Error::NodeDisconnected) - } + fn on_node_timeout(&self, node: &NodeId) { + self.on_session_error(node, Error::NodeDisconnected) + } - fn on_session_error(&self, node: &NodeId, error: Error) { - let mut data = self.data.lock(); + fn on_session_error(&self, node: &NodeId, error: Error) { + let mut data = self.data.lock(); - if data.confirmations.is_some() { - let is_waiting_for_confirmation = data.confirmations.as_mut().expect("checked a line above; qed").remove(node); - if !is_waiting_for_confirmation { - return; - } + if data.confirmations.is_some() { + let is_waiting_for_confirmation = data + .confirmations + .as_mut() + .expect("checked a line above; qed") + .remove(node); + if !is_waiting_for_confirmation { + return; + } - Self::try_complete(&self.core, &mut *data); - if data.state == SessionState::Finished { - return; - } - } + Self::try_complete(&self.core, &mut *data); + if data.state == SessionState::Finished { + return; + } + } - warn!(target: "secretstore_net", "{}: key version negotiation session failed because of {} from {}", + warn!(target: "secretstore_net", "{}: key version negotiation session failed because of {} from {}", self.core.meta.self_node_id, error, node); - data.state = SessionState::Finished; - data.result = Some(Err(error)); - self.core.completed.notify_all(); - } + data.state = SessionState::Finished; + data.result = Some(Err(error)); + self.core.completed.notify_all(); + } - fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { - match *message { - Message::KeyVersionNegotiation(ref message) => self.process_message(sender, message), - _ => unreachable!("cluster checks message to be correct before passing; qed"), - } - } + fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { + match *message { + Message::KeyVersionNegotiation(ref message) => self.process_message(sender, message), + _ => unreachable!("cluster checks message to be correct before passing; qed"), + } + } } impl SessionTransport for IsolatedSessionTransport { - fn broadcast(&self, message: KeyVersionNegotiationMessage) -> Result<(), Error> { - self.cluster.broadcast(Message::KeyVersionNegotiation(message)) - } + fn broadcast(&self, message: KeyVersionNegotiationMessage) -> Result<(), Error> { + self.cluster + .broadcast(Message::KeyVersionNegotiation(message)) + } - fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error> { - self.cluster.send(node, Message::KeyVersionNegotiation(message)) - } + fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error> { + self.cluster + .send(node, Message::KeyVersionNegotiation(message)) + } } impl FastestResultComputer { - pub fn new(self_node_id: NodeId, key_share: Option<&DocumentKeyShare>, configured_nodes_count: usize, connected_nodes_count: usize) -> Self { - let threshold = key_share.map(|ks| ks.threshold); - FastestResultComputer { - self_node_id, - threshold, - configured_nodes_count, - connected_nodes_count, - } - }} + pub fn new( + self_node_id: NodeId, + key_share: Option<&DocumentKeyShare>, + configured_nodes_count: usize, + connected_nodes_count: usize, + ) -> Self { + let threshold = key_share.map(|ks| ks.threshold); + FastestResultComputer { + self_node_id, + threshold, + configured_nodes_count, + connected_nodes_count, + } + } +} impl SessionResultComputer for FastestResultComputer { - fn compute_result(&self, threshold: Option, confirmations: &BTreeSet, versions: &BTreeMap>) -> Option> { - match self.threshold.or(threshold) { - // if there's no versions at all && we're not waiting for confirmations anymore - _ if confirmations.is_empty() && versions.is_empty() => Some(Err(Error::ServerKeyIsNotFound)), - // if we have key share on this node - Some(threshold) => { - // select version this node have, with enough participants - let has_key_share = self.threshold.is_some(); - let version = versions.iter().find(|&(_, ref n)| !has_key_share || n.contains(&self.self_node_id) && n.len() >= threshold + 1); - // if there's no such version, wait for more confirmations - match version { - Some((version, nodes)) => Some(Ok((version.clone(), if has_key_share { self.self_node_id.clone() } else { nodes.iter().cloned().nth(0) - .expect("version is only inserted when there's at least one owner; qed") }))), - None if !confirmations.is_empty() => None, - // otherwise - try to find any version - None => Some(versions.iter() - .find(|&(_, ref n)| n.len() >= threshold + 1) - .map(|(version, nodes)| Ok((version.clone(), nodes.iter().cloned().nth(0) - .expect("version is only inserted when there's at least one owner; qed")))) - // if there's no version consensus among all connected nodes - // AND we're connected to ALL configured nodes - // OR there are less than required nodes for key restore - // => this means that we can't restore key with CURRENT configuration => respond with fatal error - // otherwise we could try later, after all nodes are connected - .unwrap_or_else(|| Err(if self.configured_nodes_count == self.connected_nodes_count - || self.configured_nodes_count < threshold + 1 { - Error::ConsensusUnreachable - } else { - Error::ConsensusTemporaryUnreachable - }))), - } - }, - // if we do not have share, then wait for all confirmations - None if !confirmations.is_empty() => None, - // ...and select version with largest support - None => Some(versions.iter() - .max_by_key(|&(_, ref n)| n.len()) - .map(|(version, nodes)| Ok((version.clone(), nodes.iter().cloned().nth(0) - .expect("version is only inserted when there's at least one owner; qed")))) - .unwrap_or_else(|| Err(if self.configured_nodes_count == self.connected_nodes_count { - Error::ConsensusUnreachable - } else { - Error::ConsensusTemporaryUnreachable - }))), - } - } + fn compute_result( + &self, + threshold: Option, + confirmations: &BTreeSet, + versions: &BTreeMap>, + ) -> Option> { + match self.threshold.or(threshold) { + // if there's no versions at all && we're not waiting for confirmations anymore + _ if confirmations.is_empty() && versions.is_empty() => { + Some(Err(Error::ServerKeyIsNotFound)) + } + // if we have key share on this node + Some(threshold) => { + // select version this node have, with enough participants + let has_key_share = self.threshold.is_some(); + let version = versions.iter().find(|&(_, ref n)| { + !has_key_share || n.contains(&self.self_node_id) && n.len() >= threshold + 1 + }); + // if there's no such version, wait for more confirmations + match version { + Some((version, nodes)) => Some(Ok(( + version.clone(), + if has_key_share { + self.self_node_id.clone() + } else { + nodes.iter().cloned().nth(0).expect( + "version is only inserted when there's at least one owner; qed", + ) + }, + ))), + None if !confirmations.is_empty() => None, + // otherwise - try to find any version + None => Some( + versions + .iter() + .find(|&(_, ref n)| n.len() >= threshold + 1) + .map(|(version, nodes)| { + Ok((version.clone(), nodes.iter().cloned().nth(0) + .expect("version is only inserted when there's at least one owner; qed"))) + }) + // if there's no version consensus among all connected nodes + // AND we're connected to ALL configured nodes + // OR there are less than required nodes for key restore + // => this means that we can't restore key with CURRENT configuration => respond with fatal error + // otherwise we could try later, after all nodes are connected + .unwrap_or_else(|| { + Err( + if self.configured_nodes_count == self.connected_nodes_count + || self.configured_nodes_count < threshold + 1 + { + Error::ConsensusUnreachable + } else { + Error::ConsensusTemporaryUnreachable + }, + ) + }), + ), + } + } + // if we do not have share, then wait for all confirmations + None if !confirmations.is_empty() => None, + // ...and select version with largest support + None => Some( + versions + .iter() + .max_by_key(|&(_, ref n)| n.len()) + .map(|(version, nodes)| { + Ok(( + version.clone(), + nodes.iter().cloned().nth(0).expect( + "version is only inserted when there's at least one owner; qed", + ), + )) + }) + .unwrap_or_else(|| { + Err( + if self.configured_nodes_count == self.connected_nodes_count { + Error::ConsensusUnreachable + } else { + Error::ConsensusTemporaryUnreachable + }, + ) + }), + ), + } + } } impl SessionResultComputer for LargestSupportResultComputer { - fn compute_result(&self, _threshold: Option, confirmations: &BTreeSet, versions: &BTreeMap>) -> Option> { - if !confirmations.is_empty() { - return None; - } - if versions.is_empty() { - return Some(Err(Error::ServerKeyIsNotFound)); - } + fn compute_result( + &self, + _threshold: Option, + confirmations: &BTreeSet, + versions: &BTreeMap>, + ) -> Option> { + if !confirmations.is_empty() { + return None; + } + if versions.is_empty() { + return Some(Err(Error::ServerKeyIsNotFound)); + } - versions.iter() - .max_by_key(|&(_, ref n)| n.len()) - .map(|(version, nodes)| Ok((version.clone(), nodes.iter().cloned().nth(0) - .expect("version is only inserted when there's at least one owner; qed")))) - } + versions + .iter() + .max_by_key(|&(_, ref n)| n.len()) + .map(|(version, nodes)| { + Ok(( + version.clone(), + nodes + .iter() + .cloned() + .nth(0) + .expect("version is only inserted when there's at least one owner; qed"), + )) + }) + } } #[cfg(test)] mod tests { - use std::sync::Arc; - use std::collections::{VecDeque, BTreeMap, BTreeSet}; - use ethereum_types::{H512, H160, Address}; - use ethkey::public_to_address; - use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage, - DocumentKeyShare, DocumentKeyShareVersion}; - use key_server_cluster::math; - use key_server_cluster::cluster::Cluster; - use key_server_cluster::cluster::tests::DummyCluster; - use key_server_cluster::cluster_sessions::ClusterSession; - use key_server_cluster::admin_sessions::ShareChangeSessionMeta; - use key_server_cluster::decryption_session::create_default_decryption_session; - use key_server_cluster::message::{ - Message, KeyVersionNegotiationMessage, RequestKeyVersions, - CommonKeyData, KeyVersions, - }; - use super::{ - SessionImpl, SessionTransport, SessionParams, FastestResultComputer, LargestSupportResultComputer, - SessionResultComputer, SessionState, ContinueAction, FailedContinueAction, - }; + use super::{ + ContinueAction, FailedContinueAction, FastestResultComputer, LargestSupportResultComputer, + SessionImpl, SessionParams, SessionResultComputer, SessionState, SessionTransport, + }; + use ethereum_types::{Address, H160, H512}; + use ethkey::public_to_address; + use key_server_cluster::{ + admin_sessions::ShareChangeSessionMeta, + cluster::{tests::DummyCluster, Cluster}, + cluster_sessions::ClusterSession, + decryption_session::create_default_decryption_session, + math, + message::{ + CommonKeyData, KeyVersionNegotiationMessage, KeyVersions, Message, RequestKeyVersions, + }, + DocumentKeyShare, DocumentKeyShareVersion, DummyKeyStorage, Error, KeyStorage, NodeId, + SessionId, + }; + use std::{ + collections::{BTreeMap, BTreeSet, VecDeque}, + sync::Arc, + }; - struct DummyTransport { - cluster: Arc, - } + struct DummyTransport { + cluster: Arc, + } - impl SessionTransport for DummyTransport { - fn broadcast(&self, message: KeyVersionNegotiationMessage) -> Result<(), Error> { - self.cluster.broadcast(Message::KeyVersionNegotiation(message)) - } + impl SessionTransport for DummyTransport { + fn broadcast(&self, message: KeyVersionNegotiationMessage) -> Result<(), Error> { + self.cluster + .broadcast(Message::KeyVersionNegotiation(message)) + } - fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error> { - self.cluster.send(node, Message::KeyVersionNegotiation(message)) - } - } + fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error> { + self.cluster + .send(node, Message::KeyVersionNegotiation(message)) + } + } - struct Node { - pub cluster: Arc, - pub key_storage: Arc, - pub session: SessionImpl, - } + struct Node { + pub cluster: Arc, + pub key_storage: Arc, + pub session: SessionImpl, + } - struct MessageLoop { - pub session_id: SessionId, - pub nodes: BTreeMap, - pub queue: VecDeque<(NodeId, NodeId, Message)>, - } + struct MessageLoop { + pub session_id: SessionId, + pub nodes: BTreeMap, + pub queue: VecDeque<(NodeId, NodeId, Message)>, + } - impl MessageLoop { - pub fn prepare_nodes(nodes_num: usize) -> BTreeMap> { - (0..nodes_num).map(|_| (math::generate_random_point().unwrap(), - Arc::new(DummyKeyStorage::default()))).collect() - } + impl MessageLoop { + pub fn prepare_nodes(nodes_num: usize) -> BTreeMap> { + (0..nodes_num) + .map(|_| { + ( + math::generate_random_point().unwrap(), + Arc::new(DummyKeyStorage::default()), + ) + }) + .collect() + } - pub fn empty(nodes_num: usize) -> Self { - Self::new(Self::prepare_nodes(nodes_num)) - } + pub fn empty(nodes_num: usize) -> Self { + Self::new(Self::prepare_nodes(nodes_num)) + } - pub fn new(nodes: BTreeMap>) -> Self { - let master_node_id = nodes.keys().cloned().nth(0).unwrap(); - let sub_sesion = math::generate_random_scalar().unwrap(); - let all_nodes_ids: BTreeSet<_> = nodes.keys().cloned().collect(); - MessageLoop { - session_id: Default::default(), - nodes: nodes.iter().map(|(node_id, key_storage)| { - let cluster = Arc::new(DummyCluster::new(node_id.clone())); - cluster.add_nodes(all_nodes_ids.iter().cloned()); - (node_id.clone(), Node { - cluster: cluster.clone(), - key_storage: key_storage.clone(), - session: SessionImpl::new(SessionParams { - meta: ShareChangeSessionMeta { - id: Default::default(), - self_node_id: node_id.clone(), - master_node_id: master_node_id.clone(), - configured_nodes_count: nodes.len(), - connected_nodes_count: nodes.len(), - }, - sub_session: sub_sesion.clone(), - key_share: key_storage.get(&Default::default()).unwrap(), - result_computer: Arc::new(FastestResultComputer::new( - node_id.clone(), - key_storage.get(&Default::default()).unwrap().as_ref(), - nodes.len(), nodes.len() - )), - transport: DummyTransport { - cluster: cluster, - }, - nonce: 0, - }), - }) - }).collect(), - queue: VecDeque::new(), - } - } + pub fn new(nodes: BTreeMap>) -> Self { + let master_node_id = nodes.keys().cloned().nth(0).unwrap(); + let sub_sesion = math::generate_random_scalar().unwrap(); + let all_nodes_ids: BTreeSet<_> = nodes.keys().cloned().collect(); + MessageLoop { + session_id: Default::default(), + nodes: nodes + .iter() + .map(|(node_id, key_storage)| { + let cluster = Arc::new(DummyCluster::new(node_id.clone())); + cluster.add_nodes(all_nodes_ids.iter().cloned()); + ( + node_id.clone(), + Node { + cluster: cluster.clone(), + key_storage: key_storage.clone(), + session: SessionImpl::new(SessionParams { + meta: ShareChangeSessionMeta { + id: Default::default(), + self_node_id: node_id.clone(), + master_node_id: master_node_id.clone(), + configured_nodes_count: nodes.len(), + connected_nodes_count: nodes.len(), + }, + sub_session: sub_sesion.clone(), + key_share: key_storage.get(&Default::default()).unwrap(), + result_computer: Arc::new(FastestResultComputer::new( + node_id.clone(), + key_storage.get(&Default::default()).unwrap().as_ref(), + nodes.len(), + nodes.len(), + )), + transport: DummyTransport { cluster: cluster }, + nonce: 0, + }), + }, + ) + }) + .collect(), + queue: VecDeque::new(), + } + } - pub fn node_id(&self, idx: usize) -> &NodeId { - self.nodes.keys().nth(idx).unwrap() - } + pub fn node_id(&self, idx: usize) -> &NodeId { + self.nodes.keys().nth(idx).unwrap() + } - pub fn session(&self, idx: usize) -> &SessionImpl { - &self.nodes.values().nth(idx).unwrap().session - } + pub fn session(&self, idx: usize) -> &SessionImpl { + &self.nodes.values().nth(idx).unwrap().session + } - pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { - self.nodes.values() - .filter_map(|n| n.cluster.take_message().map(|m| (n.session.meta().self_node_id.clone(), m.0, m.1))) - .nth(0) - .or_else(|| self.queue.pop_front()) - } + pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { + self.nodes + .values() + .filter_map(|n| { + n.cluster + .take_message() + .map(|m| (n.session.meta().self_node_id.clone(), m.0, m.1)) + }) + .nth(0) + .or_else(|| self.queue.pop_front()) + } - pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> { - match msg.2 { - Message::KeyVersionNegotiation(message) => - self.nodes[&msg.1].session.process_message(&msg.0, &message), - _ => panic!("unexpected"), - } - } + pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> { + match msg.2 { + Message::KeyVersionNegotiation(message) => { + self.nodes[&msg.1].session.process_message(&msg.0, &message) + } + _ => panic!("unexpected"), + } + } - pub fn run(&mut self) { - while let Some((from, to, message)) = self.take_message() { - self.process_message((from, to, message)).unwrap(); - } - } - } + pub fn run(&mut self) { + while let Some((from, to, message)) = self.take_message() { + self.process_message((from, to, message)).unwrap(); + } + } + } - #[test] - fn negotiation_fails_if_initialized_twice() { - let ml = MessageLoop::empty(1); - assert_eq!(ml.session(0).initialize(BTreeSet::new()), Ok(())); - assert_eq!(ml.session(0).initialize(BTreeSet::new()), Err(Error::InvalidStateForRequest)); - } + #[test] + fn negotiation_fails_if_initialized_twice() { + let ml = MessageLoop::empty(1); + assert_eq!(ml.session(0).initialize(BTreeSet::new()), Ok(())); + assert_eq!( + ml.session(0).initialize(BTreeSet::new()), + Err(Error::InvalidStateForRequest) + ); + } - #[test] - fn negotiation_fails_if_message_contains_wrong_nonce() { - let ml = MessageLoop::empty(2); - assert_eq!(ml.session(1).process_message(ml.node_id(0), &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 100, - })), Err(Error::ReplayProtection)); - } + #[test] + fn negotiation_fails_if_message_contains_wrong_nonce() { + let ml = MessageLoop::empty(2); + assert_eq!( + ml.session(1).process_message( + ml.node_id(0), + &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions { + session: Default::default(), + sub_session: math::generate_random_scalar().unwrap().into(), + session_nonce: 100, + }) + ), + Err(Error::ReplayProtection) + ); + } - #[test] - fn negotiation_fails_if_versions_request_received_from_non_master() { - let ml = MessageLoop::empty(3); - assert_eq!(ml.session(2).process_message(ml.node_id(1), &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 0, - })), Err(Error::InvalidMessage)); - } + #[test] + fn negotiation_fails_if_versions_request_received_from_non_master() { + let ml = MessageLoop::empty(3); + assert_eq!( + ml.session(2).process_message( + ml.node_id(1), + &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions { + session: Default::default(), + sub_session: math::generate_random_scalar().unwrap().into(), + session_nonce: 0, + }) + ), + Err(Error::InvalidMessage) + ); + } - #[test] - fn negotiation_fails_if_versions_request_received_twice() { - let ml = MessageLoop::empty(2); - assert_eq!(ml.session(1).process_message(ml.node_id(0), &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 0, - })), Ok(())); - assert_eq!(ml.session(1).process_message(ml.node_id(0), &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 0, - })), Err(Error::InvalidStateForRequest)); - } + #[test] + fn negotiation_fails_if_versions_request_received_twice() { + let ml = MessageLoop::empty(2); + assert_eq!( + ml.session(1).process_message( + ml.node_id(0), + &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions { + session: Default::default(), + sub_session: math::generate_random_scalar().unwrap().into(), + session_nonce: 0, + }) + ), + Ok(()) + ); + assert_eq!( + ml.session(1).process_message( + ml.node_id(0), + &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions { + session: Default::default(), + sub_session: math::generate_random_scalar().unwrap().into(), + session_nonce: 0, + }) + ), + Err(Error::InvalidStateForRequest) + ); + } - #[test] - fn negotiation_fails_if_versions_received_before_initialization() { - let ml = MessageLoop::empty(2); - assert_eq!(ml.session(1).process_message(ml.node_id(0), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 0, - key_common: Some(CommonKeyData { - threshold: 10, - author: Default::default(), - public: Default::default(), - }), - versions: Vec::new(), - })), Err(Error::InvalidStateForRequest)); - } + #[test] + fn negotiation_fails_if_versions_received_before_initialization() { + let ml = MessageLoop::empty(2); + assert_eq!( + ml.session(1).process_message( + ml.node_id(0), + &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { + session: Default::default(), + sub_session: math::generate_random_scalar().unwrap().into(), + session_nonce: 0, + key_common: Some(CommonKeyData { + threshold: 10, + author: Default::default(), + public: Default::default(), + }), + versions: Vec::new(), + }) + ), + Err(Error::InvalidStateForRequest) + ); + } - #[test] - fn negotiation_does_not_fails_if_versions_received_after_completion() { - let ml = MessageLoop::empty(3); - ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap(); - assert_eq!(ml.session(0).data.lock().state, SessionState::WaitingForResponses); + #[test] + fn negotiation_does_not_fails_if_versions_received_after_completion() { + let ml = MessageLoop::empty(3); + ml.session(0) + .initialize(ml.nodes.keys().cloned().collect()) + .unwrap(); + assert_eq!( + ml.session(0).data.lock().state, + SessionState::WaitingForResponses + ); - let version_id = (*math::generate_random_scalar().unwrap()).clone(); - assert_eq!(ml.session(0).process_message(ml.node_id(1), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 0, - key_common: Some(CommonKeyData { - threshold: 0, - author: Default::default(), - public: Default::default(), - }), + let version_id = (*math::generate_random_scalar().unwrap()).clone(); + assert_eq!( + ml.session(0).process_message( + ml.node_id(1), + &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { + session: Default::default(), + sub_session: math::generate_random_scalar().unwrap().into(), + session_nonce: 0, + key_common: Some(CommonKeyData { + threshold: 0, + author: Default::default(), + public: Default::default(), + }), - versions: vec![version_id.clone().into()] - })), Ok(())); - assert_eq!(ml.session(0).data.lock().state, SessionState::Finished); + versions: vec![version_id.clone().into()] + }) + ), + Ok(()) + ); + assert_eq!(ml.session(0).data.lock().state, SessionState::Finished); - assert_eq!(ml.session(0).process_message(ml.node_id(2), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 0, - key_common: Some(CommonKeyData { - threshold: 0, - author: Default::default(), - public: Default::default(), - }), + assert_eq!( + ml.session(0).process_message( + ml.node_id(2), + &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { + session: Default::default(), + sub_session: math::generate_random_scalar().unwrap().into(), + session_nonce: 0, + key_common: Some(CommonKeyData { + threshold: 0, + author: Default::default(), + public: Default::default(), + }), - versions: vec![version_id.clone().into()] - })), Ok(())); - assert_eq!(ml.session(0).data.lock().state, SessionState::Finished); - } + versions: vec![version_id.clone().into()] + }) + ), + Ok(()) + ); + assert_eq!(ml.session(0).data.lock().state, SessionState::Finished); + } - #[test] - fn negotiation_fails_if_wrong_common_data_sent() { - fn run_test(key_common: CommonKeyData) { - let ml = MessageLoop::empty(3); - ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap(); + #[test] + fn negotiation_fails_if_wrong_common_data_sent() { + fn run_test(key_common: CommonKeyData) { + let ml = MessageLoop::empty(3); + ml.session(0) + .initialize(ml.nodes.keys().cloned().collect()) + .unwrap(); - let version_id = (*math::generate_random_scalar().unwrap()).clone(); - assert_eq!(ml.session(0).process_message(ml.node_id(1), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 0, - key_common: Some(CommonKeyData { - threshold: 1, - author: Default::default(), - public: Default::default(), - }), - versions: vec![version_id.clone().into()] - })), Ok(())); - assert_eq!(ml.session(0).process_message(ml.node_id(2), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 0, - key_common: Some(key_common), - versions: vec![version_id.clone().into()] - })), Err(Error::InvalidMessage)); - } - - run_test(CommonKeyData { - threshold: 2, - author: Default::default(), - public: Default::default(), - }); + let version_id = (*math::generate_random_scalar().unwrap()).clone(); + assert_eq!( + ml.session(0).process_message( + ml.node_id(1), + &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { + session: Default::default(), + sub_session: math::generate_random_scalar().unwrap().into(), + session_nonce: 0, + key_common: Some(CommonKeyData { + threshold: 1, + author: Default::default(), + public: Default::default(), + }), + versions: vec![version_id.clone().into()] + }) + ), + Ok(()) + ); + assert_eq!( + ml.session(0).process_message( + ml.node_id(2), + &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { + session: Default::default(), + sub_session: math::generate_random_scalar().unwrap().into(), + session_nonce: 0, + key_common: Some(key_common), + versions: vec![version_id.clone().into()] + }) + ), + Err(Error::InvalidMessage) + ); + } - run_test(CommonKeyData { - threshold: 1, - author: H160::from(1).into(), - public: Default::default(), - }); + run_test(CommonKeyData { + threshold: 2, + author: Default::default(), + public: Default::default(), + }); - run_test(CommonKeyData { - threshold: 1, - author: H160::from(2).into(), - public: Default::default(), - }); - } + run_test(CommonKeyData { + threshold: 1, + author: H160::from(1).into(), + public: Default::default(), + }); - #[test] - fn negotiation_fails_if_threshold_empty_when_versions_are_not_empty() { - let ml = MessageLoop::empty(2); - ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap(); + run_test(CommonKeyData { + threshold: 1, + author: H160::from(2).into(), + public: Default::default(), + }); + } - let version_id = (*math::generate_random_scalar().unwrap()).clone(); - assert_eq!(ml.session(0).process_message(ml.node_id(1), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 0, - key_common: None, - versions: vec![version_id.clone().into()] - })), Err(Error::InvalidMessage)); - } + #[test] + fn negotiation_fails_if_threshold_empty_when_versions_are_not_empty() { + let ml = MessageLoop::empty(2); + ml.session(0) + .initialize(ml.nodes.keys().cloned().collect()) + .unwrap(); - #[test] - fn fast_negotiation_does_not_completes_instantly_when_enough_share_owners_are_connected() { - let nodes = MessageLoop::prepare_nodes(2); - let version_id = (*math::generate_random_scalar().unwrap()).clone(); - nodes.values().nth(0).unwrap().insert(Default::default(), DocumentKeyShare { - author: H160::from(2), - threshold: 1, - public: H512::from(3), - common_point: None, - encrypted_point: None, - versions: vec![DocumentKeyShareVersion { - hash: version_id, - id_numbers: vec![(nodes.keys().cloned().nth(0).unwrap(), math::generate_random_scalar().unwrap())].into_iter().collect(), - secret_share: math::generate_random_scalar().unwrap(), - }], - }).unwrap(); - let ml = MessageLoop::new(nodes); - ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap(); - // we can't be sure that node has given key version because previous ShareAdd session could fail - assert!(ml.session(0).data.lock().state != SessionState::Finished); + let version_id = (*math::generate_random_scalar().unwrap()).clone(); + assert_eq!( + ml.session(0).process_message( + ml.node_id(1), + &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { + session: Default::default(), + sub_session: math::generate_random_scalar().unwrap().into(), + session_nonce: 0, + key_common: None, + versions: vec![version_id.clone().into()] + }) + ), + Err(Error::InvalidMessage) + ); + } - // check that upon completion, commmon key data is known - assert_eq!(ml.session(0).common_key_data(), Ok(DocumentKeyShare { - author: H160::from(2), - threshold: 1, - public: H512::from(3), - ..Default::default() - })); - } + #[test] + fn fast_negotiation_does_not_completes_instantly_when_enough_share_owners_are_connected() { + let nodes = MessageLoop::prepare_nodes(2); + let version_id = (*math::generate_random_scalar().unwrap()).clone(); + nodes + .values() + .nth(0) + .unwrap() + .insert( + Default::default(), + DocumentKeyShare { + author: H160::from(2), + threshold: 1, + public: H512::from(3), + common_point: None, + encrypted_point: None, + versions: vec![DocumentKeyShareVersion { + hash: version_id, + id_numbers: vec![( + nodes.keys().cloned().nth(0).unwrap(), + math::generate_random_scalar().unwrap(), + )] + .into_iter() + .collect(), + secret_share: math::generate_random_scalar().unwrap(), + }], + }, + ) + .unwrap(); + let ml = MessageLoop::new(nodes); + ml.session(0) + .initialize(ml.nodes.keys().cloned().collect()) + .unwrap(); + // we can't be sure that node has given key version because previous ShareAdd session could fail + assert!(ml.session(0).data.lock().state != SessionState::Finished); - #[test] - fn fastest_computer_returns_missing_share_if_no_versions_returned() { - let computer = FastestResultComputer { - self_node_id: Default::default(), - threshold: None, - configured_nodes_count: 1, - connected_nodes_count: 1, - }; - assert_eq!(computer.compute_result(Some(10), &Default::default(), &Default::default()), Some(Err(Error::ServerKeyIsNotFound))); - } + // check that upon completion, commmon key data is known + assert_eq!( + ml.session(0).common_key_data(), + Ok(DocumentKeyShare { + author: H160::from(2), + threshold: 1, + public: H512::from(3), + ..Default::default() + }) + ); + } - #[test] - fn largest_computer_returns_missing_share_if_no_versions_returned() { - let computer = LargestSupportResultComputer; - assert_eq!(computer.compute_result(Some(10), &Default::default(), &Default::default()), Some(Err(Error::ServerKeyIsNotFound))); - } + #[test] + fn fastest_computer_returns_missing_share_if_no_versions_returned() { + let computer = FastestResultComputer { + self_node_id: Default::default(), + threshold: None, + configured_nodes_count: 1, + connected_nodes_count: 1, + }; + assert_eq!( + computer.compute_result(Some(10), &Default::default(), &Default::default()), + Some(Err(Error::ServerKeyIsNotFound)) + ); + } - #[test] - fn fatal_error_is_not_broadcasted_if_started_without_origin() { - let mut ml = MessageLoop::empty(3); - ml.session(0).set_continue_action(ContinueAction::Decrypt(create_default_decryption_session(), None, false, false)); - ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap(); - ml.run(); + #[test] + fn largest_computer_returns_missing_share_if_no_versions_returned() { + let computer = LargestSupportResultComputer; + assert_eq!( + computer.compute_result(Some(10), &Default::default(), &Default::default()), + Some(Err(Error::ServerKeyIsNotFound)) + ); + } - assert!(ml.nodes.values().all(|n| n.session.is_finished() && - n.session.take_failed_continue_action().is_none())); - } + #[test] + fn fatal_error_is_not_broadcasted_if_started_without_origin() { + let mut ml = MessageLoop::empty(3); + ml.session(0).set_continue_action(ContinueAction::Decrypt( + create_default_decryption_session(), + None, + false, + false, + )); + ml.session(0) + .initialize(ml.nodes.keys().cloned().collect()) + .unwrap(); + ml.run(); - #[test] - fn fatal_error_is_broadcasted_if_started_with_origin() { - let mut ml = MessageLoop::empty(3); - ml.session(0).set_continue_action(ContinueAction::Decrypt(create_default_decryption_session(), Some(1.into()), true, true)); - ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap(); - ml.run(); + assert!(ml + .nodes + .values() + .all(|n| n.session.is_finished() && n.session.take_failed_continue_action().is_none())); + } - // on all nodes session is completed - assert!(ml.nodes.values().all(|n| n.session.is_finished())); + #[test] + fn fatal_error_is_broadcasted_if_started_with_origin() { + let mut ml = MessageLoop::empty(3); + ml.session(0).set_continue_action(ContinueAction::Decrypt( + create_default_decryption_session(), + Some(1.into()), + true, + true, + )); + ml.session(0) + .initialize(ml.nodes.keys().cloned().collect()) + .unwrap(); + ml.run(); - // slave nodes have non-empty failed continue action - assert!(ml.nodes.values().skip(1).all(|n| n.session.take_failed_continue_action() - == Some(FailedContinueAction::Decrypt(Some(1.into()), public_to_address(&2.into()))))); - } + // on all nodes session is completed + assert!(ml.nodes.values().all(|n| n.session.is_finished())); + + // slave nodes have non-empty failed continue action + assert!(ml + .nodes + .values() + .skip(1) + .all(|n| n.session.take_failed_continue_action() + == Some(FailedContinueAction::Decrypt( + Some(1.into()), + public_to_address(&2.into()) + )))); + } } diff --git a/secret-store/src/key_server_cluster/admin_sessions/mod.rs b/secret-store/src/key_server_cluster/admin_sessions/mod.rs index 2509a76c4..7cb0bef07 100644 --- a/secret-store/src/key_server_cluster/admin_sessions/mod.rs +++ b/secret-store/src/key_server_cluster/admin_sessions/mod.rs @@ -21,33 +21,35 @@ pub mod share_change_session; mod sessions_queue; -use key_server_cluster::{SessionId, NodeId, SessionMeta, Error}; +use key_server_cluster::{Error, NodeId, SessionId, SessionMeta}; /// Share change session metadata. #[derive(Debug, Clone)] pub struct ShareChangeSessionMeta { - /// Key id. - pub id: SessionId, - /// Id of node, which has started this session. - pub master_node_id: NodeId, - /// Id of node, on which this session is running. - pub self_node_id: NodeId, - /// Count of all configured key server nodes. - pub configured_nodes_count: usize, - /// Count of all connected key server nodes. - pub connected_nodes_count: usize, + /// Key id. + pub id: SessionId, + /// Id of node, which has started this session. + pub master_node_id: NodeId, + /// Id of node, on which this session is running. + pub self_node_id: NodeId, + /// Count of all configured key server nodes. + pub configured_nodes_count: usize, + /// Count of all connected key server nodes. + pub connected_nodes_count: usize, } impl ShareChangeSessionMeta { - /// Convert to consensus session meta. `all_nodes_set` is the union of `old_nodes_set` && `new_nodes_set`. - pub fn into_consensus_meta(self, all_nodes_set_len: usize) -> Result { - Ok(SessionMeta { - id: self.id, - master_node_id: self.master_node_id, - self_node_id: self.self_node_id, - threshold: all_nodes_set_len.checked_sub(1).ok_or(Error::ConsensusUnreachable)?, - configured_nodes_count: self.configured_nodes_count, - connected_nodes_count: self.connected_nodes_count, - }) - } + /// Convert to consensus session meta. `all_nodes_set` is the union of `old_nodes_set` && `new_nodes_set`. + pub fn into_consensus_meta(self, all_nodes_set_len: usize) -> Result { + Ok(SessionMeta { + id: self.id, + master_node_id: self.master_node_id, + self_node_id: self.self_node_id, + threshold: all_nodes_set_len + .checked_sub(1) + .ok_or(Error::ConsensusUnreachable)?, + configured_nodes_count: self.configured_nodes_count, + connected_nodes_count: self.connected_nodes_count, + }) + } } diff --git a/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs b/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs index a0d4acdc1..25621d391 100644 --- a/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs +++ b/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs @@ -14,33 +14,44 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::collections::{BTreeSet, BTreeMap}; -use std::collections::btree_map::Entry; -use parking_lot::{Mutex, Condvar}; use ethereum_types::H256; use ethkey::{Public, Signature}; -use key_server_cluster::{Error, NodeId, SessionId, KeyStorage}; -use key_server_cluster::math; -use key_server_cluster::cluster::Cluster; -use key_server_cluster::cluster_sessions::ClusterSession; -use key_server_cluster::message::{Message, ServersSetChangeMessage, - ConsensusMessageWithServersSet, InitializeConsensusSessionWithServersSet, - ServersSetChangeConsensusMessage, ConfirmConsensusInitialization, UnknownSessionsRequest, UnknownSessions, - ServersSetChangeShareAddMessage, ServersSetChangeError, ServersSetChangeCompleted, - ServersSetChangeDelegate, ServersSetChangeDelegateResponse, InitializeShareChangeSession, - ConfirmShareChangeSessionInitialization, KeyVersionNegotiationMessage, ShareChangeKeyVersionNegotiation}; -use key_server_cluster::share_change_session::{ShareChangeSession, ShareChangeSessionParams, ShareChangeSessionPlan, - prepare_share_change_session_plan}; -use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSessionImpl, - SessionParams as KeyVersionNegotiationSessionParams, LargestSupportResultComputer, - SessionTransport as KeyVersionNegotiationTransport}; -use key_server_cluster::jobs::job_session::JobTransport; -use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest}; -use key_server_cluster::jobs::unknown_sessions_job::{UnknownSessionsJob}; -use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession}; -use key_server_cluster::admin_sessions::sessions_queue::SessionsQueue; -use key_server_cluster::admin_sessions::ShareChangeSessionMeta; +use key_server_cluster::{ + admin_sessions::{sessions_queue::SessionsQueue, ShareChangeSessionMeta}, + cluster::Cluster, + cluster_sessions::ClusterSession, + jobs::{ + consensus_session::{ConsensusSession, ConsensusSessionParams, ConsensusSessionState}, + job_session::JobTransport, + servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest}, + unknown_sessions_job::UnknownSessionsJob, + }, + key_version_negotiation_session::{ + LargestSupportResultComputer, SessionImpl as KeyVersionNegotiationSessionImpl, + SessionParams as KeyVersionNegotiationSessionParams, + SessionTransport as KeyVersionNegotiationTransport, + }, + math, + message::{ + ConfirmConsensusInitialization, ConfirmShareChangeSessionInitialization, + ConsensusMessageWithServersSet, InitializeConsensusSessionWithServersSet, + InitializeShareChangeSession, KeyVersionNegotiationMessage, Message, + ServersSetChangeCompleted, ServersSetChangeConsensusMessage, ServersSetChangeDelegate, + ServersSetChangeDelegateResponse, ServersSetChangeError, ServersSetChangeMessage, + ServersSetChangeShareAddMessage, ShareChangeKeyVersionNegotiation, UnknownSessions, + UnknownSessionsRequest, + }, + share_change_session::{ + prepare_share_change_session_plan, ShareChangeSession, ShareChangeSessionParams, + ShareChangeSessionPlan, + }, + Error, KeyStorage, NodeId, SessionId, +}; +use parking_lot::{Condvar, Mutex}; +use std::{ + collections::{btree_map::Entry, BTreeMap, BTreeSet}, + sync::Arc, +}; /// Maximal number of active share change sessions. const MAX_ACTIVE_KEY_SESSIONS: usize = 64; @@ -60,1392 +71,1762 @@ const MAX_ACTIVE_KEY_SESSIONS: usize = 64; /// 5.4) share change confirm is sent from sub_master to master /// 6) upon completing all known share change sessions && receiving confirmations for all unknown share change sessions, session completion signal is sent to all slave nodes && session is completed pub struct SessionImpl { - /// Session core. - core: SessionCore, - /// Session data. - data: Mutex, + /// Session core. + core: SessionCore, + /// Session data. + data: Mutex, } /// Session state. #[derive(Debug, PartialEq)] enum SessionState { - /// Establishing consensus. - EstablishingConsensus, - /// Running share change sessions. - RunningShareChangeSessions, - /// Session is completed. - Finished, + /// Establishing consensus. + EstablishingConsensus, + /// Running share change sessions. + RunningShareChangeSessions, + /// Session is completed. + Finished, } /// Immutable session data. struct SessionCore { - /// Servers set change session meta (id is computed from new_nodes_set). - pub meta: ShareChangeSessionMeta, - /// Cluster which allows this node to send messages to other nodes in the cluster. - pub cluster: Arc, - /// Keys storage. - pub key_storage: Arc, - /// Session-level nonce. - pub nonce: u64, - /// All known nodes. - pub all_nodes_set: BTreeSet, - /// Administrator public key. - pub admin_public: Public, - /// Migration id (if this session is a part of auto-migration process). - pub migration_id: Option, - /// SessionImpl completion condvar. - pub completed: Condvar, + /// Servers set change session meta (id is computed from new_nodes_set). + pub meta: ShareChangeSessionMeta, + /// Cluster which allows this node to send messages to other nodes in the cluster. + pub cluster: Arc, + /// Keys storage. + pub key_storage: Arc, + /// Session-level nonce. + pub nonce: u64, + /// All known nodes. + pub all_nodes_set: BTreeSet, + /// Administrator public key. + pub admin_public: Public, + /// Migration id (if this session is a part of auto-migration process). + pub migration_id: Option, + /// SessionImpl completion condvar. + pub completed: Condvar, } /// Servers set change consensus session type. -type ServersSetChangeConsensusSession = ConsensusSession; +type ServersSetChangeConsensusSession = ConsensusSession< + ServersSetChangeAccessJob, + ServersSetChangeConsensusTransport, + UnknownSessionsJob, + UnknownSessionsJobTransport, +>; /// Mutable session data. struct SessionData { - /// Session state. - pub state: SessionState, - /// Consensus-based servers set change session. - pub consensus_session: Option, - /// New nodes set. - pub new_nodes_set: Option>, - /// Share change sessions queue (valid on master nodes only). - pub sessions_queue: Option, - /// Share change sessions key version negotiation. - pub negotiation_sessions: BTreeMap>, - /// Share change sessions initialization state (valid on master nodes only). - pub sessions_initialization_state: BTreeMap, - /// Sessions delegated to other nodes (valid on master node only). - pub delegated_key_sessions: BTreeMap, - /// Active share change sessions. - pub active_key_sessions: BTreeMap, - /// Servers set change result. - pub result: Option>, + /// Session state. + pub state: SessionState, + /// Consensus-based servers set change session. + pub consensus_session: Option, + /// New nodes set. + pub new_nodes_set: Option>, + /// Share change sessions queue (valid on master nodes only). + pub sessions_queue: Option, + /// Share change sessions key version negotiation. + pub negotiation_sessions: BTreeMap< + SessionId, + KeyVersionNegotiationSessionImpl, + >, + /// Share change sessions initialization state (valid on master nodes only). + pub sessions_initialization_state: BTreeMap, + /// Sessions delegated to other nodes (valid on master node only). + pub delegated_key_sessions: BTreeMap, + /// Active share change sessions. + pub active_key_sessions: BTreeMap, + /// Servers set change result. + pub result: Option>, } /// Session initialization data. struct SessionInitializationData { - /// Master node id. - pub master: NodeId, - /// Nodes that have confirmed session initialization request. - pub confirmations: BTreeSet, + /// Master node id. + pub master: NodeId, + /// Nodes that have confirmed session initialization request. + pub confirmations: BTreeSet, } /// SessionImpl creation parameters pub struct SessionParams { - /// Session meta (artificial). - pub meta: ShareChangeSessionMeta, - /// Cluster. - pub cluster: Arc, - /// Keys storage. - pub key_storage: Arc, - /// Session nonce. - pub nonce: u64, - /// All known nodes. - pub all_nodes_set: BTreeSet, - /// Administrator public key. - pub admin_public: Public, - /// Migration id (if this session is a part of auto-migration process). - pub migration_id: Option, + /// Session meta (artificial). + pub meta: ShareChangeSessionMeta, + /// Cluster. + pub cluster: Arc, + /// Keys storage. + pub key_storage: Arc, + /// Session nonce. + pub nonce: u64, + /// All known nodes. + pub all_nodes_set: BTreeSet, + /// Administrator public key. + pub admin_public: Public, + /// Migration id (if this session is a part of auto-migration process). + pub migration_id: Option, } /// Servers set change consensus transport. struct ServersSetChangeConsensusTransport { - /// Session id. - id: SessionId, - /// Session-level nonce. - nonce: u64, - /// Migration id (if part of auto-migration process). - migration_id: Option, - /// Cluster. - cluster: Arc, + /// Session id. + id: SessionId, + /// Session-level nonce. + nonce: u64, + /// Migration id (if part of auto-migration process). + migration_id: Option, + /// Cluster. + cluster: Arc, } /// Unknown sessions job transport. struct UnknownSessionsJobTransport { - /// Session id. - id: SessionId, - /// Session-level nonce. - nonce: u64, - /// Cluster. - cluster: Arc, + /// Session id. + id: SessionId, + /// Session-level nonce. + nonce: u64, + /// Cluster. + cluster: Arc, } /// Key version negotiation transport. struct ServersSetChangeKeyVersionNegotiationTransport { - /// Session id. - id: SessionId, - /// Session-level nonce. - nonce: u64, - /// Cluster. - cluster: Arc, + /// Session id. + id: SessionId, + /// Session-level nonce. + nonce: u64, + /// Cluster. + cluster: Arc, } impl SessionImpl { - /// Create new servers set change session. - pub fn new(params: SessionParams) -> Result { - Ok(SessionImpl { - core: SessionCore { - meta: params.meta, - cluster: params.cluster, - key_storage: params.key_storage, - nonce: params.nonce, - all_nodes_set: params.all_nodes_set, - admin_public: params.admin_public, - migration_id: params.migration_id, - completed: Condvar::new(), - }, - data: Mutex::new(SessionData { - state: SessionState::EstablishingConsensus, - consensus_session: None, - new_nodes_set: None, - sessions_queue: None, - negotiation_sessions: BTreeMap::new(), - sessions_initialization_state: BTreeMap::new(), - delegated_key_sessions: BTreeMap::new(), - active_key_sessions: BTreeMap::new(), - result: None, - }), - }) - } + /// Create new servers set change session. + pub fn new(params: SessionParams) -> Result { + Ok(SessionImpl { + core: SessionCore { + meta: params.meta, + cluster: params.cluster, + key_storage: params.key_storage, + nonce: params.nonce, + all_nodes_set: params.all_nodes_set, + admin_public: params.admin_public, + migration_id: params.migration_id, + completed: Condvar::new(), + }, + data: Mutex::new(SessionData { + state: SessionState::EstablishingConsensus, + consensus_session: None, + new_nodes_set: None, + sessions_queue: None, + negotiation_sessions: BTreeMap::new(), + sessions_initialization_state: BTreeMap::new(), + delegated_key_sessions: BTreeMap::new(), + active_key_sessions: BTreeMap::new(), + result: None, + }), + }) + } - /// Get session id. - pub fn id(&self) -> &SessionId { - &self.core.meta.id - } + /// Get session id. + pub fn id(&self) -> &SessionId { + &self.core.meta.id + } - /// Get migration id. - pub fn migration_id(&self) -> Option<&H256> { - self.core.migration_id.as_ref() - } + /// Get migration id. + pub fn migration_id(&self) -> Option<&H256> { + self.core.migration_id.as_ref() + } - /// Wait for session completion. - pub fn wait(&self) -> Result<(), Error> { - Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone()) - .expect("wait_session returns Some if called without timeout; qed") - } + /// Wait for session completion. + pub fn wait(&self) -> Result<(), Error> { + Self::wait_session(&self.core.completed, &self.data, None, |data| { + data.result.clone() + }) + .expect("wait_session returns Some if called without timeout; qed") + } - /// Initialize servers set change session on master node. - pub fn initialize(&self, new_nodes_set: BTreeSet, all_set_signature: Signature, new_set_signature: Signature) -> Result<(), Error> { - check_nodes_set(&self.core.all_nodes_set, &new_nodes_set)?; + /// Initialize servers set change session on master node. + pub fn initialize( + &self, + new_nodes_set: BTreeSet, + all_set_signature: Signature, + new_set_signature: Signature, + ) -> Result<(), Error> { + check_nodes_set(&self.core.all_nodes_set, &new_nodes_set)?; - let mut data = self.data.lock(); - if data.state != SessionState::EstablishingConsensus || data.consensus_session.is_some() { - return Err(Error::InvalidStateForRequest); - } + let mut data = self.data.lock(); + if data.state != SessionState::EstablishingConsensus || data.consensus_session.is_some() { + return Err(Error::InvalidStateForRequest); + } - let mut consensus_session = ConsensusSession::new(ConsensusSessionParams { - meta: self.core.meta.clone().into_consensus_meta(self.core.all_nodes_set.len())?, - consensus_executor: ServersSetChangeAccessJob::new_on_master(self.core.admin_public.clone(), - self.core.all_nodes_set.clone(), - new_nodes_set.clone(), - all_set_signature, - new_set_signature), - consensus_transport: ServersSetChangeConsensusTransport { - id: self.core.meta.id.clone(), - nonce: self.core.nonce, - migration_id: self.core.migration_id.clone(), - cluster: self.core.cluster.clone(), - }, - })?; + let mut consensus_session = ConsensusSession::new(ConsensusSessionParams { + meta: self + .core + .meta + .clone() + .into_consensus_meta(self.core.all_nodes_set.len())?, + consensus_executor: ServersSetChangeAccessJob::new_on_master( + self.core.admin_public.clone(), + self.core.all_nodes_set.clone(), + new_nodes_set.clone(), + all_set_signature, + new_set_signature, + ), + consensus_transport: ServersSetChangeConsensusTransport { + id: self.core.meta.id.clone(), + nonce: self.core.nonce, + migration_id: self.core.migration_id.clone(), + cluster: self.core.cluster.clone(), + }, + })?; - consensus_session.initialize(self.core.all_nodes_set.clone())?; + consensus_session.initialize(self.core.all_nodes_set.clone())?; - let is_finished = consensus_session.state() == ConsensusSessionState::ConsensusEstablished; - data.consensus_session = Some(consensus_session); - data.new_nodes_set = Some(new_nodes_set); + let is_finished = consensus_session.state() == ConsensusSessionState::ConsensusEstablished; + data.consensus_session = Some(consensus_session); + data.new_nodes_set = Some(new_nodes_set); - // this is the case when all other nodes are isolated - if is_finished { - Self::complete_session(&self.core, &mut *data)?; - } + // this is the case when all other nodes are isolated + if is_finished { + Self::complete_session(&self.core, &mut *data)?; + } - Ok(()) - } + Ok(()) + } - /// Process servers set change message. - pub fn process_message(&self, sender: &NodeId, message: &ServersSetChangeMessage) -> Result<(), Error> { - if self.core.nonce != message.session_nonce() { - return Err(Error::ReplayProtection); - } + /// Process servers set change message. + pub fn process_message( + &self, + sender: &NodeId, + message: &ServersSetChangeMessage, + ) -> Result<(), Error> { + if self.core.nonce != message.session_nonce() { + return Err(Error::ReplayProtection); + } - match message { - &ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref message) => - self.on_consensus_message(sender, message), - &ServersSetChangeMessage::UnknownSessionsRequest(ref message) => - self.on_unknown_sessions_requested(sender, message), - &ServersSetChangeMessage::UnknownSessions(ref message) => - self.on_unknown_sessions(sender, message), - &ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(ref message) => - self.on_key_version_negotiation(sender, message), - &ServersSetChangeMessage::InitializeShareChangeSession(ref message) => - self.on_initialize_share_change_session(sender, message), - &ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ref message) => - self.on_share_change_session_confirmation(sender, message), - &ServersSetChangeMessage::ServersSetChangeDelegate(ref message) => - self.on_sessions_delegation(sender, message), - &ServersSetChangeMessage::ServersSetChangeDelegateResponse(ref message) => - self.on_delegated_session_completed(sender, message), - &ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref message) => - self.on_share_add_message(sender, message), - &ServersSetChangeMessage::ServersSetChangeError(ref message) => { - self.on_session_error(sender, message.error.clone()); - Ok(()) - }, - &ServersSetChangeMessage::ServersSetChangeCompleted(ref message) => - self.on_session_completed(sender, message), - } - } + match message { + &ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref message) => { + self.on_consensus_message(sender, message) + } + &ServersSetChangeMessage::UnknownSessionsRequest(ref message) => { + self.on_unknown_sessions_requested(sender, message) + } + &ServersSetChangeMessage::UnknownSessions(ref message) => { + self.on_unknown_sessions(sender, message) + } + &ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(ref message) => { + self.on_key_version_negotiation(sender, message) + } + &ServersSetChangeMessage::InitializeShareChangeSession(ref message) => { + self.on_initialize_share_change_session(sender, message) + } + &ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ref message) => { + self.on_share_change_session_confirmation(sender, message) + } + &ServersSetChangeMessage::ServersSetChangeDelegate(ref message) => { + self.on_sessions_delegation(sender, message) + } + &ServersSetChangeMessage::ServersSetChangeDelegateResponse(ref message) => { + self.on_delegated_session_completed(sender, message) + } + &ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref message) => { + self.on_share_add_message(sender, message) + } + &ServersSetChangeMessage::ServersSetChangeError(ref message) => { + self.on_session_error(sender, message.error.clone()); + Ok(()) + } + &ServersSetChangeMessage::ServersSetChangeCompleted(ref message) => { + self.on_session_completed(sender, message) + } + } + } - /// When consensus-related message is received. - pub fn on_consensus_message(&self, sender: &NodeId, message: &ServersSetChangeConsensusMessage) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); + /// When consensus-related message is received. + pub fn on_consensus_message( + &self, + sender: &NodeId, + message: &ServersSetChangeConsensusMessage, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); - // check state - let mut data = self.data.lock(); - if data.state != SessionState::EstablishingConsensus { - return Err(Error::InvalidStateForRequest); - } + // check state + let mut data = self.data.lock(); + if data.state != SessionState::EstablishingConsensus { + return Err(Error::InvalidStateForRequest); + } - // start slave consensus session if needed - if self.core.meta.self_node_id != self.core.meta.master_node_id { - if data.consensus_session.is_none() { - match &message.message { - &ConsensusMessageWithServersSet::InitializeConsensusSession(_) => { - data.consensus_session = Some(ConsensusSession::new(ConsensusSessionParams { - meta: self.core.meta.clone().into_consensus_meta(self.core.all_nodes_set.len())?, - consensus_executor: ServersSetChangeAccessJob::new_on_slave(self.core.admin_public.clone()), - consensus_transport: ServersSetChangeConsensusTransport { - id: self.core.meta.id.clone(), - nonce: self.core.nonce, - migration_id: self.core.migration_id.clone(), - cluster: self.core.cluster.clone(), - }, - })?); - }, - _ => return Err(Error::InvalidStateForRequest), - } - } - } + // start slave consensus session if needed + if self.core.meta.self_node_id != self.core.meta.master_node_id { + if data.consensus_session.is_none() { + match &message.message { + &ConsensusMessageWithServersSet::InitializeConsensusSession(_) => { + data.consensus_session = + Some(ConsensusSession::new(ConsensusSessionParams { + meta: self + .core + .meta + .clone() + .into_consensus_meta(self.core.all_nodes_set.len())?, + consensus_executor: ServersSetChangeAccessJob::new_on_slave( + self.core.admin_public.clone(), + ), + consensus_transport: ServersSetChangeConsensusTransport { + id: self.core.meta.id.clone(), + nonce: self.core.nonce, + migration_id: self.core.migration_id.clone(), + cluster: self.core.cluster.clone(), + }, + })?); + } + _ => return Err(Error::InvalidStateForRequest), + } + } + } - // process consensus message - let consensus_session = data.consensus_session.as_mut().ok_or(Error::InvalidMessage)?; - let is_establishing_consensus = consensus_session.state() == ConsensusSessionState::EstablishingConsensus; - match &message.message { - &ConsensusMessageWithServersSet::InitializeConsensusSession(ref message) => - consensus_session.on_consensus_partial_request(sender, ServersSetChangeAccessRequest::from(message))?, - &ConsensusMessageWithServersSet::ConfirmConsensusInitialization(ref message) => - consensus_session.on_consensus_partial_response(sender, message.is_confirmed)?, - } + // process consensus message + let consensus_session = data + .consensus_session + .as_mut() + .ok_or(Error::InvalidMessage)?; + let is_establishing_consensus = + consensus_session.state() == ConsensusSessionState::EstablishingConsensus; + match &message.message { + &ConsensusMessageWithServersSet::InitializeConsensusSession(ref message) => { + consensus_session.on_consensus_partial_request( + sender, + ServersSetChangeAccessRequest::from(message), + )? + } + &ConsensusMessageWithServersSet::ConfirmConsensusInitialization(ref message) => { + consensus_session.on_consensus_partial_response(sender, message.is_confirmed)? + } + } - // when consensus is established => request unknown sessions - let is_consensus_established = consensus_session.state() == ConsensusSessionState::ConsensusEstablished; - if self.core.meta.self_node_id != self.core.meta.master_node_id || !is_establishing_consensus || !is_consensus_established { - return Ok(()); - } + // when consensus is established => request unknown sessions + let is_consensus_established = + consensus_session.state() == ConsensusSessionState::ConsensusEstablished; + if self.core.meta.self_node_id != self.core.meta.master_node_id + || !is_establishing_consensus + || !is_consensus_established + { + return Ok(()); + } - let unknown_sessions_job = UnknownSessionsJob::new_on_master(self.core.key_storage.clone(), self.core.meta.self_node_id.clone()); - consensus_session.disseminate_jobs(unknown_sessions_job, self.unknown_sessions_transport(), false).map(|_| ()) - } + let unknown_sessions_job = UnknownSessionsJob::new_on_master( + self.core.key_storage.clone(), + self.core.meta.self_node_id.clone(), + ); + consensus_session + .disseminate_jobs( + unknown_sessions_job, + self.unknown_sessions_transport(), + false, + ) + .map(|_| ()) + } - /// When unknown sessions are requested. - pub fn on_unknown_sessions_requested(&self, sender: &NodeId, message: &UnknownSessionsRequest) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When unknown sessions are requested. + pub fn on_unknown_sessions_requested( + &self, + sender: &NodeId, + message: &UnknownSessionsRequest, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); - let mut data = self.data.lock(); + let mut data = self.data.lock(); - let new_nodes_set = { - let consensus_session = data.consensus_session.as_mut().ok_or(Error::InvalidMessage)?; - let unknown_sessions_job = UnknownSessionsJob::new_on_slave(self.core.key_storage.clone()); - let unknown_sessions_transport = self.unknown_sessions_transport(); + let new_nodes_set = { + let consensus_session = data + .consensus_session + .as_mut() + .ok_or(Error::InvalidMessage)?; + let unknown_sessions_job = + UnknownSessionsJob::new_on_slave(self.core.key_storage.clone()); + let unknown_sessions_transport = self.unknown_sessions_transport(); - // and respond with unknown sessions - consensus_session.on_job_request(&sender, sender.clone(), unknown_sessions_job, unknown_sessions_transport)?; + // and respond with unknown sessions + consensus_session.on_job_request( + &sender, + sender.clone(), + unknown_sessions_job, + unknown_sessions_transport, + )?; - consensus_session.consensus_job().executor() + consensus_session.consensus_job().executor() .new_servers_set() .expect("consensus session is now completed; new_servers_set is intermediate result of consensus session; qed") .clone() - }; + }; - // update state - data.state = SessionState::RunningShareChangeSessions; - data.new_nodes_set = Some(new_nodes_set); + // update state + data.state = SessionState::RunningShareChangeSessions; + data.new_nodes_set = Some(new_nodes_set); - Ok(()) - } + Ok(()) + } - /// When unknown sessions are received. - pub fn on_unknown_sessions(&self, sender: &NodeId, message: &UnknownSessions) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When unknown sessions are received. + pub fn on_unknown_sessions( + &self, + sender: &NodeId, + message: &UnknownSessions, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); - // check state - let mut data = self.data.lock(); - if data.state != SessionState::EstablishingConsensus { - return Err(Error::InvalidStateForRequest); - } + // check state + let mut data = self.data.lock(); + if data.state != SessionState::EstablishingConsensus { + return Err(Error::InvalidStateForRequest); + } - // process message - let unknown_sessions = { - let consensus_session = data.consensus_session.as_mut().ok_or(Error::InvalidMessage)?; - consensus_session.on_job_response(sender, message.unknown_sessions.iter().cloned().map(Into::into).collect())?; - if consensus_session.state() != ConsensusSessionState::Finished { - return Ok(()); - } + // process message + let unknown_sessions = { + let consensus_session = data + .consensus_session + .as_mut() + .ok_or(Error::InvalidMessage)?; + consensus_session.on_job_response( + sender, + message + .unknown_sessions + .iter() + .cloned() + .map(Into::into) + .collect(), + )?; + if consensus_session.state() != ConsensusSessionState::Finished { + return Ok(()); + } - // all nodes have reported their unknown sessions - // => we are ready to start adding/moving/removing shares - consensus_session.result()? - }; + // all nodes have reported their unknown sessions + // => we are ready to start adding/moving/removing shares + consensus_session.result()? + }; - // initialize sessions queue - data.state = SessionState::RunningShareChangeSessions; - data.sessions_queue = Some(SessionsQueue::new(&self.core.key_storage, unknown_sessions.keys().cloned().collect())); + // initialize sessions queue + data.state = SessionState::RunningShareChangeSessions; + data.sessions_queue = Some(SessionsQueue::new( + &self.core.key_storage, + unknown_sessions.keys().cloned().collect(), + )); - // and disseminate session initialization requests - Self::disseminate_session_initialization_requests(&self.core, &mut *data) - } + // and disseminate session initialization requests + Self::disseminate_session_initialization_requests(&self.core, &mut *data) + } - /// When key version negotiation message is received. - pub fn on_key_version_negotiation(&self, sender: &NodeId, message: &ShareChangeKeyVersionNegotiation) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When key version negotiation message is received. + pub fn on_key_version_negotiation( + &self, + sender: &NodeId, + message: &ShareChangeKeyVersionNegotiation, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); - // check state - let mut data = self.data.lock(); - if data.state != SessionState::RunningShareChangeSessions { - return Err(Error::InvalidStateForRequest); - } + // check state + let mut data = self.data.lock(); + if data.state != SessionState::RunningShareChangeSessions { + return Err(Error::InvalidStateForRequest); + } - // process message - match &message.message { - &KeyVersionNegotiationMessage::RequestKeyVersions(ref message) if sender == &self.core.meta.master_node_id => { - let key_id = message.session.clone().into(); - let key_share = self.core.key_storage.get(&key_id)?; - let negotiation_session = KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams { - meta: ShareChangeSessionMeta { - id: key_id.clone(), - self_node_id: self.core.meta.self_node_id.clone(), - master_node_id: sender.clone(), - configured_nodes_count: self.core.meta.configured_nodes_count, - connected_nodes_count: self.core.meta.connected_nodes_count, - }, - sub_session: message.sub_session.clone().into(), - key_share: key_share, - result_computer: Arc::new(LargestSupportResultComputer {}), - transport: ServersSetChangeKeyVersionNegotiationTransport { - id: self.core.meta.id.clone(), - nonce: self.core.nonce, - cluster: self.core.cluster.clone(), - }, - nonce: message.session_nonce, - }); - negotiation_session.on_key_versions_request(sender, message)?; - debug_assert!(negotiation_session.is_finished()); - Ok(()) - }, - &KeyVersionNegotiationMessage::KeyVersions(ref message) if self.core.meta.self_node_id == self.core.meta.master_node_id => { - let key_id = message.session.clone().into(); - { - let negotiation_session = data.negotiation_sessions.get(&key_id).ok_or(Error::InvalidMessage)?; - negotiation_session.on_key_versions(sender, message)?; - if !negotiation_session.is_finished() { - return Ok(()); - } - } + // process message + match &message.message { + &KeyVersionNegotiationMessage::RequestKeyVersions(ref message) + if sender == &self.core.meta.master_node_id => + { + let key_id = message.session.clone().into(); + let key_share = self.core.key_storage.get(&key_id)?; + let negotiation_session = + KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams { + meta: ShareChangeSessionMeta { + id: key_id.clone(), + self_node_id: self.core.meta.self_node_id.clone(), + master_node_id: sender.clone(), + configured_nodes_count: self.core.meta.configured_nodes_count, + connected_nodes_count: self.core.meta.connected_nodes_count, + }, + sub_session: message.sub_session.clone().into(), + key_share: key_share, + result_computer: Arc::new(LargestSupportResultComputer {}), + transport: ServersSetChangeKeyVersionNegotiationTransport { + id: self.core.meta.id.clone(), + nonce: self.core.nonce, + cluster: self.core.cluster.clone(), + }, + nonce: message.session_nonce, + }); + negotiation_session.on_key_versions_request(sender, message)?; + debug_assert!(negotiation_session.is_finished()); + Ok(()) + } + &KeyVersionNegotiationMessage::KeyVersions(ref message) + if self.core.meta.self_node_id == self.core.meta.master_node_id => + { + let key_id = message.session.clone().into(); + { + let negotiation_session = data + .negotiation_sessions + .get(&key_id) + .ok_or(Error::InvalidMessage)?; + negotiation_session.on_key_versions(sender, message)?; + if !negotiation_session.is_finished() { + return Ok(()); + } + } - // else prepare plan && start share change session - if !Self::initialize_share_change_session(&self.core, &mut *data, key_id)? { - Self::disseminate_session_initialization_requests(&self.core, &mut *data)?; - } + // else prepare plan && start share change session + if !Self::initialize_share_change_session(&self.core, &mut *data, key_id)? { + Self::disseminate_session_initialization_requests(&self.core, &mut *data)?; + } - Ok(()) - }, - _ => Err(Error::InvalidMessage), - } - } + Ok(()) + } + _ => Err(Error::InvalidMessage), + } + } - /// When share change session initialization is requested. - pub fn on_initialize_share_change_session(&self, sender: &NodeId, message: &InitializeShareChangeSession) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When share change session initialization is requested. + pub fn on_initialize_share_change_session( + &self, + sender: &NodeId, + message: &InitializeShareChangeSession, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); - // we only accept delegation requests from master node - if sender != &self.core.meta.master_node_id { - return Err(Error::InvalidMessage); - } + // we only accept delegation requests from master node + if sender != &self.core.meta.master_node_id { + return Err(Error::InvalidMessage); + } - // check state - let mut data = self.data.lock(); - if data.state != SessionState::RunningShareChangeSessions { - return Err(Error::InvalidStateForRequest); - } + // check state + let mut data = self.data.lock(); + if data.state != SessionState::RunningShareChangeSessions { + return Err(Error::InvalidStateForRequest); + } - // insert new session - let key_id = message.key_id.clone().into(); - match data.active_key_sessions.contains_key(&key_id) { - true => return Err(Error::InvalidMessage), - false => { - let master_plan = ShareChangeSessionPlan { - key_version: message.version.clone().into(), - version_holders: message.version_holders.iter().cloned().map(Into::into).collect(), - consensus_group: message.consensus_group.iter().cloned().map(Into::into).collect(), - new_nodes_map: message.new_nodes_map.iter().map(|(k, v)| (k.clone().into(), v.clone().map(Into::into))).collect(), - }; + // insert new session + let key_id = message.key_id.clone().into(); + match data.active_key_sessions.contains_key(&key_id) { + true => return Err(Error::InvalidMessage), + false => { + let master_plan = ShareChangeSessionPlan { + key_version: message.version.clone().into(), + version_holders: message + .version_holders + .iter() + .cloned() + .map(Into::into) + .collect(), + consensus_group: message + .consensus_group + .iter() + .cloned() + .map(Into::into) + .collect(), + new_nodes_map: message + .new_nodes_map + .iter() + .map(|(k, v)| (k.clone().into(), v.clone().map(Into::into))) + .collect(), + }; - // if master plan is empty, it is cheating - if master_plan.is_empty() { - return Err(Error::InvalidMessage); - } + // if master plan is empty, it is cheating + if master_plan.is_empty() { + return Err(Error::InvalidMessage); + } - // on nodes, holding selected key share version, we could check if master node plan is correct - let master_node_id = message.master_node_id.clone().into(); - if let Some(key_share) = self.core.key_storage.get(&key_id)? { - let version = message.version.clone().into(); - let key_share_owners = message.version_holders.iter().cloned().map(Into::into).collect(); - let new_nodes_set = data.new_nodes_set.as_ref() + // on nodes, holding selected key share version, we could check if master node plan is correct + let master_node_id = message.master_node_id.clone().into(); + if let Some(key_share) = self.core.key_storage.get(&key_id)? { + let version = message.version.clone().into(); + let key_share_owners = message + .version_holders + .iter() + .cloned() + .map(Into::into) + .collect(); + let new_nodes_set = data.new_nodes_set.as_ref() .expect("new_nodes_set is filled during consensus establishing; change sessions are running after this; qed"); - let local_plan = prepare_share_change_session_plan( - &self.core.all_nodes_set, - key_share.threshold, - &key_id, - version, - &master_node_id, - &key_share_owners, - new_nodes_set)?; + let local_plan = prepare_share_change_session_plan( + &self.core.all_nodes_set, + key_share.threshold, + &key_id, + version, + &master_node_id, + &key_share_owners, + new_nodes_set, + )?; - if local_plan.new_nodes_map.keys().collect::>() != master_plan.new_nodes_map.keys().collect::>() { - return Err(Error::InvalidMessage); - } - } + if local_plan.new_nodes_map.keys().collect::>() + != master_plan.new_nodes_map.keys().collect::>() + { + return Err(Error::InvalidMessage); + } + } - let session = Self::create_share_change_session(&self.core, key_id, master_node_id, master_plan)?; - if !session.is_finished() { - data.active_key_sessions.insert(key_id.clone(), session); - } - }, - }; + let session = Self::create_share_change_session( + &self.core, + key_id, + master_node_id, + master_plan, + )?; + if !session.is_finished() { + data.active_key_sessions.insert(key_id.clone(), session); + } + } + }; - // send confirmation - self.core.cluster.send(sender, Message::ServersSetChange(ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ConfirmShareChangeSessionInitialization { - session: message.session.clone(), - session_nonce: message.session_nonce.clone(), - key_id: message.key_id.clone(), - }))) - } + // send confirmation + self.core.cluster.send( + sender, + Message::ServersSetChange( + ServersSetChangeMessage::ConfirmShareChangeSessionInitialization( + ConfirmShareChangeSessionInitialization { + session: message.session.clone(), + session_nonce: message.session_nonce.clone(), + key_id: message.key_id.clone(), + }, + ), + ), + ) + } - /// When share change session initialization is confirmed. - pub fn on_share_change_session_confirmation(&self, sender: &NodeId, message: &ConfirmShareChangeSessionInitialization) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When share change session initialization is confirmed. + pub fn on_share_change_session_confirmation( + &self, + sender: &NodeId, + message: &ConfirmShareChangeSessionInitialization, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); - // we only accept delegation requests from master node - if self.core.meta.self_node_id != self.core.meta.master_node_id { - return Err(Error::InvalidMessage); - } + // we only accept delegation requests from master node + if self.core.meta.self_node_id != self.core.meta.master_node_id { + return Err(Error::InvalidMessage); + } - // check state - let mut data = self.data.lock(); - if data.state != SessionState::RunningShareChangeSessions { - return Err(Error::InvalidStateForRequest); - } + // check state + let mut data = self.data.lock(); + if data.state != SessionState::RunningShareChangeSessions { + return Err(Error::InvalidStateForRequest); + } - // add confirmation - let key_id = message.key_id.clone().into(); - let session_master = { - let session_init_data = data.sessions_initialization_state.get_mut(&key_id).ok_or(Error::InvalidMessage)?; - if !session_init_data.confirmations.remove(sender) { - return Err(Error::InvalidMessage); - } + // add confirmation + let key_id = message.key_id.clone().into(); + let session_master = { + let session_init_data = data + .sessions_initialization_state + .get_mut(&key_id) + .ok_or(Error::InvalidMessage)?; + if !session_init_data.confirmations.remove(sender) { + return Err(Error::InvalidMessage); + } - if !session_init_data.confirmations.is_empty() { - return Ok(()); - } + if !session_init_data.confirmations.is_empty() { + return Ok(()); + } - session_init_data.master.clone() - }; + session_init_data.master.clone() + }; - // and start/delegate session if required - data.sessions_initialization_state.remove(&key_id); - if self.core.meta.self_node_id != session_master { - data.delegated_key_sessions.insert(key_id, session_master.clone()); - return self.core.cluster.send(&session_master, Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate(ServersSetChangeDelegate { - session: self.core.meta.id.clone().into(), - session_nonce: self.core.nonce, - key_id: key_id.into(), - }))); - } + // and start/delegate session if required + data.sessions_initialization_state.remove(&key_id); + if self.core.meta.self_node_id != session_master { + data.delegated_key_sessions + .insert(key_id, session_master.clone()); + return self.core.cluster.send( + &session_master, + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate( + ServersSetChangeDelegate { + session: self.core.meta.id.clone().into(), + session_nonce: self.core.nonce, + key_id: key_id.into(), + }, + )), + ); + } - // initialize share change session - { - let key_session = data.active_key_sessions.get_mut(&key_id).ok_or(Error::InvalidMessage)?; - key_session.initialize()?; - if !key_session.is_finished() { - return Ok(()); - } - } + // initialize share change session + { + let key_session = data + .active_key_sessions + .get_mut(&key_id) + .ok_or(Error::InvalidMessage)?; + key_session.initialize()?; + if !key_session.is_finished() { + return Ok(()); + } + } - // complete key session - Self::complete_key_session(&self.core, &mut *data, true, key_id) - } + // complete key session + Self::complete_key_session(&self.core, &mut *data, true, key_id) + } - /// When sessions execution is delegated to this node. - pub fn on_sessions_delegation(&self, sender: &NodeId, message: &ServersSetChangeDelegate) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When sessions execution is delegated to this node. + pub fn on_sessions_delegation( + &self, + sender: &NodeId, + message: &ServersSetChangeDelegate, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); - // we only accept delegation requests from master node - if sender != &self.core.meta.master_node_id { - return Err(Error::InvalidMessage); - } + // we only accept delegation requests from master node + if sender != &self.core.meta.master_node_id { + return Err(Error::InvalidMessage); + } - // check state - let mut data = self.data.lock(); - if data.state != SessionState::RunningShareChangeSessions { - return Err(Error::InvalidStateForRequest); - } + // check state + let mut data = self.data.lock(); + if data.state != SessionState::RunningShareChangeSessions { + return Err(Error::InvalidStateForRequest); + } - // start session - let key_session = data.active_key_sessions.get_mut(&message.key_id.clone().into()).ok_or(Error::InvalidMessage)?; - key_session.initialize() - } + // start session + let key_session = data + .active_key_sessions + .get_mut(&message.key_id.clone().into()) + .ok_or(Error::InvalidMessage)?; + key_session.initialize() + } - /// When delegated session execution is completed. - pub fn on_delegated_session_completed(&self, sender: &NodeId, message: &ServersSetChangeDelegateResponse) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When delegated session execution is completed. + pub fn on_delegated_session_completed( + &self, + sender: &NodeId, + message: &ServersSetChangeDelegateResponse, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); - // we only accept delegation requests on master node - if self.core.meta.self_node_id != self.core.meta.master_node_id { - return Err(Error::InvalidMessage); - } + // we only accept delegation requests on master node + if self.core.meta.self_node_id != self.core.meta.master_node_id { + return Err(Error::InvalidMessage); + } - // check state - let mut data = self.data.lock(); - if data.state != SessionState::RunningShareChangeSessions { - return Err(Error::InvalidStateForRequest); - } + // check state + let mut data = self.data.lock(); + if data.state != SessionState::RunningShareChangeSessions { + return Err(Error::InvalidStateForRequest); + } - // forget delegated session - let key_id = message.key_id.clone().into(); - match data.delegated_key_sessions.entry(key_id) { - Entry::Occupied(entry) => if entry.get() == sender { - entry.remove() - } else { - return Err(Error::InvalidMessage); - }, - _ => return Err(Error::InvalidMessage), - }; + // forget delegated session + let key_id = message.key_id.clone().into(); + match data.delegated_key_sessions.entry(key_id) { + Entry::Occupied(entry) => { + if entry.get() == sender { + entry.remove() + } else { + return Err(Error::InvalidMessage); + } + } + _ => return Err(Error::InvalidMessage), + }; - // check if we need to complete the whole change session - Self::disseminate_session_initialization_requests(&self.core, &mut *data) - } + // check if we need to complete the whole change session + Self::disseminate_session_initialization_requests(&self.core, &mut *data) + } - /// When share add message is received. - pub fn on_share_add_message(&self, sender: &NodeId, message: &ServersSetChangeShareAddMessage) -> Result<(), Error> { - self.on_share_change_message(message.message.session_id().clone().into(), |session| - session.on_share_add_message(sender, &message.message)) - } + /// When share add message is received. + pub fn on_share_add_message( + &self, + sender: &NodeId, + message: &ServersSetChangeShareAddMessage, + ) -> Result<(), Error> { + self.on_share_change_message(message.message.session_id().clone().into(), |session| { + session.on_share_add_message(sender, &message.message) + }) + } - /// When session completion message is received. - pub fn on_session_completed(&self, sender: &NodeId, message: &ServersSetChangeCompleted) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When session completion message is received. + pub fn on_session_completed( + &self, + sender: &NodeId, + message: &ServersSetChangeCompleted, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); - if sender != &self.core.meta.master_node_id { - return Err(Error::InvalidMessage); - } + if sender != &self.core.meta.master_node_id { + return Err(Error::InvalidMessage); + } - let mut data = self.data.lock(); - data.result = Some(Ok(())); - if data.active_key_sessions.len() != 0 { - return Err(Error::TooEarlyForRequest); - } + let mut data = self.data.lock(); + data.result = Some(Ok(())); + if data.active_key_sessions.len() != 0 { + return Err(Error::TooEarlyForRequest); + } - // if we are on the set of nodes that are being removed from the cluster, let's clear database - if !data.new_nodes_set.as_ref() + // if we are on the set of nodes that are being removed from the cluster, let's clear database + if !data.new_nodes_set.as_ref() .expect("new_nodes_set is filled during initialization; session is completed after initialization; qed") .contains(&self.core.meta.self_node_id) { self.core.key_storage.clear()?; } - data.state = SessionState::Finished; - self.core.completed.notify_all(); + data.state = SessionState::Finished; + self.core.completed.notify_all(); - Ok(()) - } + Ok(()) + } - /// Create unknown sessions transport. - fn unknown_sessions_transport(&self) -> UnknownSessionsJobTransport { - UnknownSessionsJobTransport { - id: self.core.meta.id.clone(), - nonce: self.core.nonce, - cluster: self.core.cluster.clone(), - } - } + /// Create unknown sessions transport. + fn unknown_sessions_transport(&self) -> UnknownSessionsJobTransport { + UnknownSessionsJobTransport { + id: self.core.meta.id.clone(), + nonce: self.core.nonce, + cluster: self.core.cluster.clone(), + } + } - /// When share change message is received. - fn on_share_change_message Result<(), Error>>(&self, session_id: SessionId, message_processor: F) -> Result<(), Error> { - // check state - let mut data = self.data.lock(); - if data.state != SessionState::RunningShareChangeSessions { - return Err(Error::InvalidStateForRequest); - } + /// When share change message is received. + fn on_share_change_message Result<(), Error>>( + &self, + session_id: SessionId, + message_processor: F, + ) -> Result<(), Error> { + // check state + let mut data = self.data.lock(); + if data.state != SessionState::RunningShareChangeSessions { + return Err(Error::InvalidStateForRequest); + } - // process message - let (is_finished, is_master) = { - let key_session = data.active_key_sessions.get_mut(&session_id).ok_or(Error::InvalidMessage)?; - message_processor(key_session)?; - (key_session.is_finished(), key_session.is_master()) - }; + // process message + let (is_finished, is_master) = { + let key_session = data + .active_key_sessions + .get_mut(&session_id) + .ok_or(Error::InvalidMessage)?; + message_processor(key_session)?; + (key_session.is_finished(), key_session.is_master()) + }; - if is_finished { - Self::complete_key_session(&self.core, &mut *data, is_master, session_id)?; - } + if is_finished { + Self::complete_key_session(&self.core, &mut *data, is_master, session_id)?; + } - Ok(()) - } + Ok(()) + } - /// Create share change session. - fn create_share_change_session(core: &SessionCore, key_id: SessionId, master_node_id: NodeId, session_plan: ShareChangeSessionPlan) -> Result { - ShareChangeSession::new(ShareChangeSessionParams { - session_id: core.meta.id.clone(), - nonce: core.nonce, - meta: ShareChangeSessionMeta { - id: key_id, - self_node_id: core.meta.self_node_id.clone(), - master_node_id: master_node_id, - configured_nodes_count: core.meta.configured_nodes_count, - connected_nodes_count: core.meta.connected_nodes_count, - }, - cluster: core.cluster.clone(), - key_storage: core.key_storage.clone(), - plan: session_plan, - }) - } + /// Create share change session. + fn create_share_change_session( + core: &SessionCore, + key_id: SessionId, + master_node_id: NodeId, + session_plan: ShareChangeSessionPlan, + ) -> Result { + ShareChangeSession::new(ShareChangeSessionParams { + session_id: core.meta.id.clone(), + nonce: core.nonce, + meta: ShareChangeSessionMeta { + id: key_id, + self_node_id: core.meta.self_node_id.clone(), + master_node_id: master_node_id, + configured_nodes_count: core.meta.configured_nodes_count, + connected_nodes_count: core.meta.connected_nodes_count, + }, + cluster: core.cluster.clone(), + key_storage: core.key_storage.clone(), + plan: session_plan, + }) + } - /// Disseminate session initialization requests. - fn disseminate_session_initialization_requests(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { - debug_assert_eq!(core.meta.self_node_id, core.meta.master_node_id); - if data.sessions_queue.is_some() { - let number_of_sessions_active = data.active_key_sessions.len() - + data.delegated_key_sessions.len() - + data.negotiation_sessions.len(); - let mut number_of_sessions_to_start = MAX_ACTIVE_KEY_SESSIONS.saturating_sub(number_of_sessions_active); - while number_of_sessions_to_start > 0 { - let key_id = match data.sessions_queue.as_mut().expect("checked before beginning of the loop; qed").next() { - None => break, // complete session - Some(Err(e)) => return Err(e), - Some(Ok(key_id)) => key_id, - }; + /// Disseminate session initialization requests. + fn disseminate_session_initialization_requests( + core: &SessionCore, + data: &mut SessionData, + ) -> Result<(), Error> { + debug_assert_eq!(core.meta.self_node_id, core.meta.master_node_id); + if data.sessions_queue.is_some() { + let number_of_sessions_active = data.active_key_sessions.len() + + data.delegated_key_sessions.len() + + data.negotiation_sessions.len(); + let mut number_of_sessions_to_start = + MAX_ACTIVE_KEY_SESSIONS.saturating_sub(number_of_sessions_active); + while number_of_sessions_to_start > 0 { + let key_id = match data + .sessions_queue + .as_mut() + .expect("checked before beginning of the loop; qed") + .next() + { + None => break, // complete session + Some(Err(e)) => return Err(e), + Some(Ok(key_id)) => key_id, + }; - let key_share = core.key_storage.get(&key_id)?; - let negotiation_session = KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams { - meta: ShareChangeSessionMeta { - id: key_id, - self_node_id: core.meta.self_node_id.clone(), - master_node_id: core.meta.self_node_id.clone(), - configured_nodes_count: core.meta.configured_nodes_count, - connected_nodes_count: core.meta.connected_nodes_count, - }, - sub_session: math::generate_random_scalar()?, - key_share: key_share, - result_computer: Arc::new(LargestSupportResultComputer {}), // TODO [Opt]: could use modified Fast version - transport: ServersSetChangeKeyVersionNegotiationTransport { - id: core.meta.id.clone(), - nonce: core.nonce, - cluster: core.cluster.clone(), - }, - nonce: 0, - }); - negotiation_session.initialize(core.cluster.nodes())?; - if !negotiation_session.is_finished() { - data.negotiation_sessions.insert(key_id, negotiation_session); - continue; - } + let key_share = core.key_storage.get(&key_id)?; + let negotiation_session = + KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams { + meta: ShareChangeSessionMeta { + id: key_id, + self_node_id: core.meta.self_node_id.clone(), + master_node_id: core.meta.self_node_id.clone(), + configured_nodes_count: core.meta.configured_nodes_count, + connected_nodes_count: core.meta.connected_nodes_count, + }, + sub_session: math::generate_random_scalar()?, + key_share: key_share, + result_computer: Arc::new(LargestSupportResultComputer {}), // TODO [Opt]: could use modified Fast version + transport: ServersSetChangeKeyVersionNegotiationTransport { + id: core.meta.id.clone(), + nonce: core.nonce, + cluster: core.cluster.clone(), + }, + nonce: 0, + }); + negotiation_session.initialize(core.cluster.nodes())?; + if !negotiation_session.is_finished() { + data.negotiation_sessions + .insert(key_id, negotiation_session); + continue; + } - if !Self::initialize_share_change_session(core, data, key_id)? { - continue; - } + if !Self::initialize_share_change_session(core, data, key_id)? { + continue; + } - number_of_sessions_to_start = number_of_sessions_to_start - 1; - } + number_of_sessions_to_start = number_of_sessions_to_start - 1; + } - // if iteration is not yet finished => return - if number_of_sessions_to_start == 0 { - return Ok(()); - } - } + // if iteration is not yet finished => return + if number_of_sessions_to_start == 0 { + return Ok(()); + } + } - // iteration is finished => complete session - if data.state != SessionState::Finished { - data.sessions_queue = None; - if data.active_key_sessions.len() == 0 && - data.delegated_key_sessions.len() == 0 && - data.negotiation_sessions.len() == 0 { - Self::complete_session(core, data)?; - } - } + // iteration is finished => complete session + if data.state != SessionState::Finished { + data.sessions_queue = None; + if data.active_key_sessions.len() == 0 + && data.delegated_key_sessions.len() == 0 + && data.negotiation_sessions.len() == 0 + { + Self::complete_session(core, data)?; + } + } - Ok(()) - } + Ok(()) + } - /// Initialize share change session. - fn initialize_share_change_session(core: &SessionCore, data: &mut SessionData, key_id: SessionId) -> Result { - // get selected version && old nodes set from key negotiation session - let negotiation_session = data.negotiation_sessions.remove(&key_id) - .expect("share change session is only initialized when negotiation is completed; qed"); - let (selected_version, selected_master) = negotiation_session + /// Initialize share change session. + fn initialize_share_change_session( + core: &SessionCore, + data: &mut SessionData, + key_id: SessionId, + ) -> Result { + // get selected version && old nodes set from key negotiation session + let negotiation_session = data + .negotiation_sessions + .remove(&key_id) + .expect("share change session is only initialized when negotiation is completed; qed"); + let (selected_version, selected_master) = negotiation_session .wait()? .expect("initialize_share_change_session is only called on share change master; negotiation session completes with some on master; qed"); - let selected_version_holders = negotiation_session.version_holders(&selected_version)?; - let selected_version_threshold = negotiation_session.common_key_data()?.threshold; + let selected_version_holders = negotiation_session.version_holders(&selected_version)?; + let selected_version_threshold = negotiation_session.common_key_data()?.threshold; - // prepare session change plan && check if something needs to be changed - let old_nodes_set = selected_version_holders; - let new_nodes_set = data.new_nodes_set.as_ref() + // prepare session change plan && check if something needs to be changed + let old_nodes_set = selected_version_holders; + let new_nodes_set = data.new_nodes_set.as_ref() .expect("this method is called after consensus estabished; new_nodes_set is a result of consensus session; qed"); - let session_plan = prepare_share_change_session_plan(&core.all_nodes_set, - selected_version_threshold, - &key_id, - selected_version.clone(), - &selected_master, - &old_nodes_set, - new_nodes_set)?; - if session_plan.is_empty() { - return Ok(false); - } + let session_plan = prepare_share_change_session_plan( + &core.all_nodes_set, + selected_version_threshold, + &key_id, + selected_version.clone(), + &selected_master, + &old_nodes_set, + new_nodes_set, + )?; + if session_plan.is_empty() { + return Ok(false); + } - // send key session initialization requests - let mut confirmations: BTreeSet<_> = session_plan.new_nodes_map.keys().cloned().collect(); - let need_create_session = confirmations.remove(&core.meta.self_node_id); - let initialization_message = Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(InitializeShareChangeSession { - session: core.meta.id.clone().into(), - session_nonce: core.nonce, - key_id: key_id.clone().into(), - version: selected_version.into(), - version_holders: old_nodes_set.iter().cloned().map(Into::into).collect(), - master_node_id: selected_master.clone().into(), - consensus_group: session_plan.consensus_group.iter().cloned().map(Into::into).collect(), - new_nodes_map: session_plan.new_nodes_map.iter() - .map(|(n, nid)| (n.clone().into(), nid.clone().map(Into::into))) - .collect(), - })); - for node in &confirmations { - core.cluster.send(&node, initialization_message.clone())?; - } + // send key session initialization requests + let mut confirmations: BTreeSet<_> = session_plan.new_nodes_map.keys().cloned().collect(); + let need_create_session = confirmations.remove(&core.meta.self_node_id); + let initialization_message = Message::ServersSetChange( + ServersSetChangeMessage::InitializeShareChangeSession(InitializeShareChangeSession { + session: core.meta.id.clone().into(), + session_nonce: core.nonce, + key_id: key_id.clone().into(), + version: selected_version.into(), + version_holders: old_nodes_set.iter().cloned().map(Into::into).collect(), + master_node_id: selected_master.clone().into(), + consensus_group: session_plan + .consensus_group + .iter() + .cloned() + .map(Into::into) + .collect(), + new_nodes_map: session_plan + .new_nodes_map + .iter() + .map(|(n, nid)| (n.clone().into(), nid.clone().map(Into::into))) + .collect(), + }), + ); + for node in &confirmations { + core.cluster.send(&node, initialization_message.clone())?; + } - // create session on this node if required - if need_create_session { - data.active_key_sessions.insert(key_id.clone(), Self::create_share_change_session(core, key_id, - selected_master.clone(), - session_plan)?); - } + // create session on this node if required + if need_create_session { + data.active_key_sessions.insert( + key_id.clone(), + Self::create_share_change_session( + core, + key_id, + selected_master.clone(), + session_plan, + )?, + ); + } - // initialize session if required - let wait_for_confirmations = !confirmations.is_empty(); - if !wait_for_confirmations { - data.active_key_sessions.get_mut(&key_id) + // initialize session if required + let wait_for_confirmations = !confirmations.is_empty(); + if !wait_for_confirmations { + data.active_key_sessions.get_mut(&key_id) .expect("!wait_for_confirmations is true only if this is the only session participant; if this is session participant, session is created above; qed") .initialize()?; - } else { - data.sessions_initialization_state.insert(key_id, SessionInitializationData { - master: selected_master, - confirmations: confirmations, - }); - } + } else { + data.sessions_initialization_state.insert( + key_id, + SessionInitializationData { + master: selected_master, + confirmations: confirmations, + }, + ); + } - Ok(true) - } + Ok(true) + } - /// Return delegated session to master. - fn return_delegated_session(core: &SessionCore, key_id: &SessionId) -> Result<(), Error> { - assert!(core.meta.self_node_id != core.meta.master_node_id); - core.cluster.send(&core.meta.master_node_id, Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse(ServersSetChangeDelegateResponse { - session: core.meta.id.clone().into(), - session_nonce: core.nonce, - key_id: key_id.clone().into(), - }))) - } + /// Return delegated session to master. + fn return_delegated_session(core: &SessionCore, key_id: &SessionId) -> Result<(), Error> { + assert!(core.meta.self_node_id != core.meta.master_node_id); + core.cluster.send( + &core.meta.master_node_id, + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse( + ServersSetChangeDelegateResponse { + session: core.meta.id.clone().into(), + session_nonce: core.nonce, + key_id: key_id.clone().into(), + }, + )), + ) + } - /// Complete key session. - fn complete_key_session(core: &SessionCore, data: &mut SessionData, is_master: bool, session_id: SessionId) -> Result<(), Error> { - data.active_key_sessions.remove(&session_id); - let is_general_master = core.meta.self_node_id == core.meta.master_node_id; - if is_master && !is_general_master { - Self::return_delegated_session(core, &session_id)?; - } - if is_general_master { - Self::disseminate_session_initialization_requests(core, data)?; - } + /// Complete key session. + fn complete_key_session( + core: &SessionCore, + data: &mut SessionData, + is_master: bool, + session_id: SessionId, + ) -> Result<(), Error> { + data.active_key_sessions.remove(&session_id); + let is_general_master = core.meta.self_node_id == core.meta.master_node_id; + if is_master && !is_general_master { + Self::return_delegated_session(core, &session_id)?; + } + if is_general_master { + Self::disseminate_session_initialization_requests(core, data)?; + } - if data.result.is_some() && data.active_key_sessions.len() == 0 { - data.state = SessionState::Finished; - core.completed.notify_all(); - } + if data.result.is_some() && data.active_key_sessions.len() == 0 { + data.state = SessionState::Finished; + core.completed.notify_all(); + } - Ok(()) - } + Ok(()) + } - /// Complete servers set change session. - fn complete_session(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { - debug_assert_eq!(core.meta.self_node_id, core.meta.master_node_id); - - // send completion notification - core.cluster.broadcast(Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted(ServersSetChangeCompleted { - session: core.meta.id.clone().into(), - session_nonce: core.nonce, - })))?; + /// Complete servers set change session. + fn complete_session(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { + debug_assert_eq!(core.meta.self_node_id, core.meta.master_node_id); - // if we are on the set of nodes that are being removed from the cluster, let's clear database - if !data.new_nodes_set.as_ref() + // send completion notification + core.cluster.broadcast(Message::ServersSetChange( + ServersSetChangeMessage::ServersSetChangeCompleted(ServersSetChangeCompleted { + session: core.meta.id.clone().into(), + session_nonce: core.nonce, + }), + ))?; + + // if we are on the set of nodes that are being removed from the cluster, let's clear database + if !data.new_nodes_set.as_ref() .expect("new_nodes_set is filled during initialization; session is completed after initialization; qed") .contains(&core.meta.self_node_id) { core.key_storage.clear()?; } - data.state = SessionState::Finished; - data.result = Some(Ok(())); - core.completed.notify_all(); + data.state = SessionState::Finished; + data.result = Some(Ok(())); + core.completed.notify_all(); - Ok(()) - } + Ok(()) + } } impl ClusterSession for SessionImpl { - type Id = SessionId; + type Id = SessionId; - fn type_name() -> &'static str { - "servers set change" - } + fn type_name() -> &'static str { + "servers set change" + } - fn id(&self) -> SessionId { - self.core.meta.id.clone() - } + fn id(&self) -> SessionId { + self.core.meta.id.clone() + } - fn is_finished(&self) -> bool { - self.data.lock().state == SessionState::Finished - } + fn is_finished(&self) -> bool { + self.data.lock().state == SessionState::Finished + } - fn on_session_timeout(&self) { - self.on_session_error(&self.core.meta.self_node_id, Error::NodeDisconnected); - } + fn on_session_timeout(&self) { + self.on_session_error(&self.core.meta.self_node_id, Error::NodeDisconnected); + } - fn on_node_timeout(&self, node: &NodeId) { - self.on_session_error(node, Error::NodeDisconnected); - } + fn on_node_timeout(&self, node: &NodeId) { + self.on_session_error(node, Error::NodeDisconnected); + } - fn on_session_error(&self, node: &NodeId, error: Error) { - // error in generation session is considered fatal - // => broadcast error if error occured on this node - if *node == self.core.meta.self_node_id { - // do not bother processing send error, as we already processing error - let _ = self.core.cluster.broadcast(Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(ServersSetChangeError { - session: self.core.meta.id.clone().into(), - session_nonce: self.core.nonce, - error: error.clone().into(), - }))); - } + fn on_session_error(&self, node: &NodeId, error: Error) { + // error in generation session is considered fatal + // => broadcast error if error occured on this node + if *node == self.core.meta.self_node_id { + // do not bother processing send error, as we already processing error + let _ = self.core.cluster.broadcast(Message::ServersSetChange( + ServersSetChangeMessage::ServersSetChangeError(ServersSetChangeError { + session: self.core.meta.id.clone().into(), + session_nonce: self.core.nonce, + error: error.clone().into(), + }), + )); + } - let mut data = self.data.lock(); + let mut data = self.data.lock(); - warn!(target: "secretstore_net", "{}: servers set change session failed: {} on {}", + warn!(target: "secretstore_net", "{}: servers set change session failed: {} on {}", self.core.meta.self_node_id, error, node); - data.state = SessionState::Finished; - data.result = Some(Err(error)); - self.core.completed.notify_all(); - } + data.state = SessionState::Finished; + data.result = Some(Err(error)); + self.core.completed.notify_all(); + } - fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { - match *message { - Message::ServersSetChange(ref message) => self.process_message(sender, message), - _ => unreachable!("cluster checks message to be correct before passing; qed"), - } - } + fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { + match *message { + Message::ServersSetChange(ref message) => self.process_message(sender, message), + _ => unreachable!("cluster checks message to be correct before passing; qed"), + } + } } impl JobTransport for ServersSetChangeConsensusTransport { - type PartialJobRequest=ServersSetChangeAccessRequest; - type PartialJobResponse=bool; + type PartialJobRequest = ServersSetChangeAccessRequest; + type PartialJobResponse = bool; - fn send_partial_request(&self, node: &NodeId, request: ServersSetChangeAccessRequest) -> Result<(), Error> { - self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(ServersSetChangeConsensusMessage { - session: self.id.clone().into(), - session_nonce: self.nonce, - message: ConsensusMessageWithServersSet::InitializeConsensusSession(InitializeConsensusSessionWithServersSet { - migration_id: self.migration_id.clone().map(Into::into), - old_nodes_set: request.old_servers_set.into_iter().map(Into::into).collect(), - new_nodes_set: request.new_servers_set.into_iter().map(Into::into).collect(), - old_set_signature: request.old_set_signature.into(), - new_set_signature: request.new_set_signature.into(), - }), - }))) - } + fn send_partial_request( + &self, + node: &NodeId, + request: ServersSetChangeAccessRequest, + ) -> Result<(), Error> { + self.cluster.send( + node, + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage( + ServersSetChangeConsensusMessage { + session: self.id.clone().into(), + session_nonce: self.nonce, + message: ConsensusMessageWithServersSet::InitializeConsensusSession( + InitializeConsensusSessionWithServersSet { + migration_id: self.migration_id.clone().map(Into::into), + old_nodes_set: request + .old_servers_set + .into_iter() + .map(Into::into) + .collect(), + new_nodes_set: request + .new_servers_set + .into_iter() + .map(Into::into) + .collect(), + old_set_signature: request.old_set_signature.into(), + new_set_signature: request.new_set_signature.into(), + }, + ), + }, + )), + ) + } - fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { - self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(ServersSetChangeConsensusMessage { - session: self.id.clone().into(), - session_nonce: self.nonce, - message: ConsensusMessageWithServersSet::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: response, - }), - }))) - } + fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { + self.cluster.send( + node, + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage( + ServersSetChangeConsensusMessage { + session: self.id.clone().into(), + session_nonce: self.nonce, + message: ConsensusMessageWithServersSet::ConfirmConsensusInitialization( + ConfirmConsensusInitialization { + is_confirmed: response, + }, + ), + }, + )), + ) + } } impl JobTransport for UnknownSessionsJobTransport { - type PartialJobRequest=NodeId; - type PartialJobResponse=BTreeSet; + type PartialJobRequest = NodeId; + type PartialJobResponse = BTreeSet; - fn send_partial_request(&self, node: &NodeId, _request: NodeId) -> Result<(), Error> { - self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::UnknownSessionsRequest(UnknownSessionsRequest { - session: self.id.clone().into(), - session_nonce: self.nonce, - }))) - } + fn send_partial_request(&self, node: &NodeId, _request: NodeId) -> Result<(), Error> { + self.cluster.send( + node, + Message::ServersSetChange(ServersSetChangeMessage::UnknownSessionsRequest( + UnknownSessionsRequest { + session: self.id.clone().into(), + session_nonce: self.nonce, + }, + )), + ) + } - fn send_partial_response(&self, node: &NodeId, response: BTreeSet) -> Result<(), Error> { - self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::UnknownSessions(UnknownSessions { - session: self.id.clone().into(), - session_nonce: self.nonce, - unknown_sessions: response.into_iter().map(Into::into).collect(), - }))) - } + fn send_partial_response( + &self, + node: &NodeId, + response: BTreeSet, + ) -> Result<(), Error> { + self.cluster.send( + node, + Message::ServersSetChange(ServersSetChangeMessage::UnknownSessions(UnknownSessions { + session: self.id.clone().into(), + session_nonce: self.nonce, + unknown_sessions: response.into_iter().map(Into::into).collect(), + })), + ) + } } impl KeyVersionNegotiationTransport for ServersSetChangeKeyVersionNegotiationTransport { - fn broadcast(&self, message: KeyVersionNegotiationMessage) -> Result<(), Error> { - self.cluster.broadcast(Message::ServersSetChange(ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(ShareChangeKeyVersionNegotiation { - session: self.id.clone().into(), - session_nonce: self.nonce, - message: message, - }))) - } + fn broadcast(&self, message: KeyVersionNegotiationMessage) -> Result<(), Error> { + self.cluster.broadcast(Message::ServersSetChange( + ServersSetChangeMessage::ShareChangeKeyVersionNegotiation( + ShareChangeKeyVersionNegotiation { + session: self.id.clone().into(), + session_nonce: self.nonce, + message: message, + }, + ), + )) + } - fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error> { - self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(ShareChangeKeyVersionNegotiation { - session: self.id.clone().into(), - session_nonce: self.nonce, - message: message, - }))) - } + fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error> { + self.cluster.send( + node, + Message::ServersSetChange(ServersSetChangeMessage::ShareChangeKeyVersionNegotiation( + ShareChangeKeyVersionNegotiation { + session: self.id.clone().into(), + session_nonce: self.nonce, + message: message, + }, + )), + ) + } } -fn check_nodes_set(all_nodes_set: &BTreeSet, new_nodes_set: &BTreeSet) -> Result<(), Error> { - // all_nodes_set is the set of nodes we're currently connected to (and configured for) - match new_nodes_set.iter().any(|n| !all_nodes_set.contains(n)) { - true => Err(Error::NodeDisconnected), - false => Ok(()) - } +fn check_nodes_set( + all_nodes_set: &BTreeSet, + new_nodes_set: &BTreeSet, +) -> Result<(), Error> { + // all_nodes_set is the set of nodes we're currently connected to (and configured for) + match new_nodes_set.iter().any(|n| !all_nodes_set.contains(n)) { + true => Err(Error::NodeDisconnected), + false => Ok(()), + } } #[cfg(test)] pub mod tests { - use std::sync::Arc; - use std::collections::{VecDeque, BTreeMap, BTreeSet}; - use ethereum_types::H256; - use ethkey::{Random, Generator, Public, Signature, KeyPair, sign}; - use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, NodeKeyPair, PlainNodeKeyPair}; - use key_server_cluster::cluster_sessions::ClusterSession; - use key_server_cluster::cluster::tests::MessageLoop as ClusterMessageLoop; - use key_server_cluster::generation_session::tests::{MessageLoop as GenerationMessageLoop}; - use key_server_cluster::math; - use key_server_cluster::message::Message; - use key_server_cluster::admin_sessions::ShareChangeSessionMeta; - use key_server_cluster::jobs::servers_set_change_access_job::ordered_nodes_hash; - use super::{SessionImpl, SessionParams}; + use super::{SessionImpl, SessionParams}; + use ethereum_types::H256; + use ethkey::{sign, Generator, KeyPair, Public, Random, Signature}; + use key_server_cluster::{ + admin_sessions::ShareChangeSessionMeta, cluster::tests::MessageLoop as ClusterMessageLoop, + cluster_sessions::ClusterSession, + generation_session::tests::MessageLoop as GenerationMessageLoop, + jobs::servers_set_change_access_job::ordered_nodes_hash, math, message::Message, Error, + KeyStorage, NodeId, NodeKeyPair, PlainNodeKeyPair, SessionId, + }; + use std::{ + collections::{BTreeMap, BTreeSet, VecDeque}, + sync::Arc, + }; - pub trait AdminSessionAdapter { - const SIGN_NEW_NODES: bool; + pub trait AdminSessionAdapter { + const SIGN_NEW_NODES: bool; - fn create( - meta: ShareChangeSessionMeta, - admin_public: Public, - all_nodes_set: BTreeSet, - ml: &ClusterMessageLoop, - idx: usize - ) -> S; - } + fn create( + meta: ShareChangeSessionMeta, + admin_public: Public, + all_nodes_set: BTreeSet, + ml: &ClusterMessageLoop, + idx: usize, + ) -> S; + } - pub struct MessageLoop { - pub ml: ClusterMessageLoop, - pub admin_key_pair: KeyPair, - pub original_key_pair: KeyPair, - pub original_key_version: H256, - pub all_nodes_set: BTreeSet, - pub new_nodes_set: BTreeSet, - pub all_set_signature: Signature, - pub new_set_signature: Signature, - pub sessions: BTreeMap, - pub queue: VecDeque<(NodeId, NodeId, Message)>, - } + pub struct MessageLoop { + pub ml: ClusterMessageLoop, + pub admin_key_pair: KeyPair, + pub original_key_pair: KeyPair, + pub original_key_version: H256, + pub all_nodes_set: BTreeSet, + pub new_nodes_set: BTreeSet, + pub all_set_signature: Signature, + pub new_set_signature: Signature, + pub sessions: BTreeMap, + pub queue: VecDeque<(NodeId, NodeId, Message)>, + } - impl ::std::fmt::Debug for MessageLoop { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - write!(f, "{:?}", self.ml) - } - } + impl ::std::fmt::Debug for MessageLoop { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + write!(f, "{:?}", self.ml) + } + } - struct Adapter; + struct Adapter; - impl AdminSessionAdapter for Adapter { - const SIGN_NEW_NODES: bool = true; + impl AdminSessionAdapter for Adapter { + const SIGN_NEW_NODES: bool = true; - fn create( - mut meta: ShareChangeSessionMeta, - admin_public: Public, - all_nodes_set: BTreeSet, - ml: &ClusterMessageLoop, - idx: usize - ) -> SessionImpl { - meta.self_node_id = *ml.node_key_pair(idx).public(); - SessionImpl::new(SessionParams { - meta: meta, - all_nodes_set: all_nodes_set, - cluster: ml.cluster(idx).view().unwrap(), - key_storage: ml.key_storage(idx).clone(), - nonce: 1, - admin_public: admin_public, - migration_id: None, - }).unwrap() - } - } + fn create( + mut meta: ShareChangeSessionMeta, + admin_public: Public, + all_nodes_set: BTreeSet, + ml: &ClusterMessageLoop, + idx: usize, + ) -> SessionImpl { + meta.self_node_id = *ml.node_key_pair(idx).public(); + SessionImpl::new(SessionParams { + meta: meta, + all_nodes_set: all_nodes_set, + cluster: ml.cluster(idx).view().unwrap(), + key_storage: ml.key_storage(idx).clone(), + nonce: 1, + admin_public: admin_public, + migration_id: None, + }) + .unwrap() + } + } - impl MessageLoop { - pub fn with_gml>( - gml: GenerationMessageLoop, - master: NodeId, - add: Option>, - removed_nodes_ids: Option>, - isolated_nodes_ids: Option>, - ) -> Self { - // read generated key data - let original_key_pair = gml.compute_key_pair(); - let original_key_version = gml.key_version(); - Self::with_ml::( - gml.0, - original_key_pair, - original_key_version, - master, - add, - removed_nodes_ids, - isolated_nodes_ids) - } + impl MessageLoop { + pub fn with_gml>( + gml: GenerationMessageLoop, + master: NodeId, + add: Option>, + removed_nodes_ids: Option>, + isolated_nodes_ids: Option>, + ) -> Self { + // read generated key data + let original_key_pair = gml.compute_key_pair(); + let original_key_version = gml.key_version(); + Self::with_ml::( + gml.0, + original_key_pair, + original_key_version, + master, + add, + removed_nodes_ids, + isolated_nodes_ids, + ) + } - pub fn and_then>( - self, - master: NodeId, - add: Option>, - removed_nodes_ids: Option>, - isolated_nodes_ids: Option>, - ) -> Self { - Self::with_ml::( - self.ml, - self.original_key_pair, - self.original_key_version, - master, - add, - removed_nodes_ids, - isolated_nodes_ids, - ) - } + pub fn and_then>( + self, + master: NodeId, + add: Option>, + removed_nodes_ids: Option>, + isolated_nodes_ids: Option>, + ) -> Self { + Self::with_ml::( + self.ml, + self.original_key_pair, + self.original_key_version, + master, + add, + removed_nodes_ids, + isolated_nodes_ids, + ) + } - pub fn with_ml>( - mut ml: ClusterMessageLoop, - original_key_pair: KeyPair, - original_key_version: H256, - master: NodeId, - add: Option>, - removed_nodes_ids: Option>, - isolated_nodes_ids: Option>, - ) -> Self { - let add = add.unwrap_or_default(); - let removed_nodes_ids = removed_nodes_ids.unwrap_or_default(); - let isolated_nodes_ids = isolated_nodes_ids.unwrap_or_default(); + pub fn with_ml>( + mut ml: ClusterMessageLoop, + original_key_pair: KeyPair, + original_key_version: H256, + master: NodeId, + add: Option>, + removed_nodes_ids: Option>, + isolated_nodes_ids: Option>, + ) -> Self { + let add = add.unwrap_or_default(); + let removed_nodes_ids = removed_nodes_ids.unwrap_or_default(); + let isolated_nodes_ids = isolated_nodes_ids.unwrap_or_default(); - // generate admin key pair - let admin_key_pair = Random.generate().unwrap(); - let admin_public = admin_key_pair.public().clone(); + // generate admin key pair + let admin_key_pair = Random.generate().unwrap(); + let admin_public = admin_key_pair.public().clone(); - // all active nodes set - let mut all_nodes_set: BTreeSet<_> = ml.nodes().into_iter() - .filter(|n| !isolated_nodes_ids.contains(n)) - .collect(); - // new nodes set includes all old nodes, except nodes being removed + all nodes being added - let new_nodes_set: BTreeSet = all_nodes_set.iter().cloned() - .chain(add.iter().map(|kp| *kp.public())) - .filter(|n| !removed_nodes_ids.contains(n)) - .collect(); - let mut old_set_to_sign = all_nodes_set.clone(); - all_nodes_set.extend(add.iter().map(|kp| *kp.public())); - if C::SIGN_NEW_NODES { - old_set_to_sign.extend(add.iter().map(|kp| *kp.public())); - } - for isolated_node_id in &isolated_nodes_ids { - all_nodes_set.remove(isolated_node_id); - } + // all active nodes set + let mut all_nodes_set: BTreeSet<_> = ml + .nodes() + .into_iter() + .filter(|n| !isolated_nodes_ids.contains(n)) + .collect(); + // new nodes set includes all old nodes, except nodes being removed + all nodes being added + let new_nodes_set: BTreeSet = all_nodes_set + .iter() + .cloned() + .chain(add.iter().map(|kp| *kp.public())) + .filter(|n| !removed_nodes_ids.contains(n)) + .collect(); + let mut old_set_to_sign = all_nodes_set.clone(); + all_nodes_set.extend(add.iter().map(|kp| *kp.public())); + if C::SIGN_NEW_NODES { + old_set_to_sign.extend(add.iter().map(|kp| *kp.public())); + } + for isolated_node_id in &isolated_nodes_ids { + all_nodes_set.remove(isolated_node_id); + } - let meta = ShareChangeSessionMeta { - self_node_id: master, - master_node_id: master, - id: SessionId::default(), - configured_nodes_count: all_nodes_set.len(), - connected_nodes_count: all_nodes_set.len(), - }; + let meta = ShareChangeSessionMeta { + self_node_id: master, + master_node_id: master, + id: SessionId::default(), + configured_nodes_count: all_nodes_set.len(), + connected_nodes_count: all_nodes_set.len(), + }; - // include new nodes in the cluster - for node_key_pair in &add { - ml.include(Arc::new(PlainNodeKeyPair::new(node_key_pair.clone()))); - } - // isolate nodes from the cluster - for isolated_node_id in &isolated_nodes_ids { - let idx = ml.nodes().iter().position(|n| n == isolated_node_id).unwrap(); - ml.exclude(idx); - } + // include new nodes in the cluster + for node_key_pair in &add { + ml.include(Arc::new(PlainNodeKeyPair::new(node_key_pair.clone()))); + } + // isolate nodes from the cluster + for isolated_node_id in &isolated_nodes_ids { + let idx = ml + .nodes() + .iter() + .position(|n| n == isolated_node_id) + .unwrap(); + ml.exclude(idx); + } - // prepare set of nodes - let sessions: BTreeMap<_, _> = (0..ml.nodes().len()) - .map(|idx| (ml.node(idx), C::create(meta.clone(), admin_public, all_nodes_set.clone(), &ml, idx))) - .collect(); + // prepare set of nodes + let sessions: BTreeMap<_, _> = (0..ml.nodes().len()) + .map(|idx| { + ( + ml.node(idx), + C::create(meta.clone(), admin_public, all_nodes_set.clone(), &ml, idx), + ) + }) + .collect(); - let all_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&old_set_to_sign)).unwrap(); - let new_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&new_nodes_set)).unwrap(); + let all_set_signature = sign( + admin_key_pair.secret(), + &ordered_nodes_hash(&old_set_to_sign), + ) + .unwrap(); + let new_set_signature = + sign(admin_key_pair.secret(), &ordered_nodes_hash(&new_nodes_set)).unwrap(); - MessageLoop { - ml, - admin_key_pair: admin_key_pair, - original_key_pair, - original_key_version, - all_nodes_set: all_nodes_set.clone(), - new_nodes_set: new_nodes_set, - all_set_signature: all_set_signature, - new_set_signature: new_set_signature, - sessions, - queue: Default::default(), - } - } + MessageLoop { + ml, + admin_key_pair: admin_key_pair, + original_key_pair, + original_key_version, + all_nodes_set: all_nodes_set.clone(), + new_nodes_set: new_nodes_set, + all_set_signature: all_set_signature, + new_set_signature: new_set_signature, + sessions, + queue: Default::default(), + } + } - pub fn run(&mut self) { - // run session until completion - while let Some((from, to, message)) = self.take_message() { - self.process_message((from, to, message)).unwrap(); - } + pub fn run(&mut self) { + // run session until completion + while let Some((from, to, message)) = self.take_message() { + self.process_message((from, to, message)).unwrap(); + } - // check that all sessions have finished - assert!(self.sessions.values().all(|s| s.is_finished())); - } + // check that all sessions have finished + assert!(self.sessions.values().all(|s| s.is_finished())); + } - pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { - self.ml.take_message().or_else(|| self.queue.pop_front()) - } + pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { + self.ml.take_message().or_else(|| self.queue.pop_front()) + } - pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> { - match self.sessions[&msg.1].on_message(&msg.0, &msg.2) { - Ok(_) => Ok(()), - Err(Error::TooEarlyForRequest) => { - self.queue.push_back(msg); - Ok(()) - }, - Err(err) => Err(err), - } - } + pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> { + match self.sessions[&msg.1].on_message(&msg.0, &msg.2) { + Ok(_) => Ok(()), + Err(Error::TooEarlyForRequest) => { + self.queue.push_back(msg); + Ok(()) + } + Err(err) => Err(err), + } + } - /// This only works for schemes where threshold = 1 - pub fn check_secret_is_preserved<'a, I: IntoIterator>(&self, nodes: I) { - let nodes: Vec<_> = nodes.into_iter().collect(); - let key_storages: Vec<_> = nodes.iter().map(|n| self.ml.key_storage_of(n)).collect(); - let n = nodes.len(); - let document_secret_plain = math::generate_random_point().unwrap(); - for n1 in 0..n { - for n2 in n1+1..n { - let share1 = key_storages[n1].get(&SessionId::default()).unwrap(); - let share2 = key_storages[n2].get(&SessionId::default()).unwrap(); + /// This only works for schemes where threshold = 1 + pub fn check_secret_is_preserved<'a, I: IntoIterator>(&self, nodes: I) { + let nodes: Vec<_> = nodes.into_iter().collect(); + let key_storages: Vec<_> = nodes.iter().map(|n| self.ml.key_storage_of(n)).collect(); + let n = nodes.len(); + let document_secret_plain = math::generate_random_point().unwrap(); + for n1 in 0..n { + for n2 in n1 + 1..n { + let share1 = key_storages[n1].get(&SessionId::default()).unwrap(); + let share2 = key_storages[n2].get(&SessionId::default()).unwrap(); - let id_number1 = share1.as_ref().unwrap().last_version().unwrap().id_numbers[nodes[n1]].clone(); - let id_number2 = share1.as_ref().unwrap().last_version().unwrap().id_numbers[nodes[n2]].clone(); - // now encrypt and decrypt data - let (document_secret_decrypted, document_secret_decrypted_test) = - math::tests::do_encryption_and_decryption(1, - self.original_key_pair.public(), - &[id_number1, id_number2], - &[share1.unwrap().last_version().unwrap().secret_share.clone(), - share2.unwrap().last_version().unwrap().secret_share.clone()], - Some(self.original_key_pair.secret()), - document_secret_plain.clone()); + let id_number1 = share1.as_ref().unwrap().last_version().unwrap().id_numbers + [nodes[n1]] + .clone(); + let id_number2 = share1.as_ref().unwrap().last_version().unwrap().id_numbers + [nodes[n2]] + .clone(); + // now encrypt and decrypt data + let (document_secret_decrypted, document_secret_decrypted_test) = + math::tests::do_encryption_and_decryption( + 1, + self.original_key_pair.public(), + &[id_number1, id_number2], + &[ + share1.unwrap().last_version().unwrap().secret_share.clone(), + share2.unwrap().last_version().unwrap().secret_share.clone(), + ], + Some(self.original_key_pair.secret()), + document_secret_plain.clone(), + ); - assert_eq!(document_secret_plain, document_secret_decrypted_test); - assert_eq!(document_secret_plain, document_secret_decrypted); - } - } - } - } + assert_eq!(document_secret_plain, document_secret_decrypted_test); + assert_eq!(document_secret_plain, document_secret_decrypted); + } + } + } + } - impl MessageLoop { - pub fn run_at(mut self, master: NodeId) -> Self { - self.sessions[&master].initialize( - self.new_nodes_set.clone(), - self.all_set_signature.clone(), - self.new_set_signature.clone()).unwrap(); - self.run(); - self - } - } + impl MessageLoop { + pub fn run_at(mut self, master: NodeId) -> Self { + self.sessions[&master] + .initialize( + self.new_nodes_set.clone(), + self.all_set_signature.clone(), + self.new_set_signature.clone(), + ) + .unwrap(); + self.run(); + self + } + } - pub fn generate_key(num_nodes: usize, threshold: usize) -> GenerationMessageLoop { - let gml = GenerationMessageLoop::new(num_nodes).init(threshold).unwrap(); - gml.0.loop_until(|| gml.0.is_empty()); - gml - } + pub fn generate_key(num_nodes: usize, threshold: usize) -> GenerationMessageLoop { + let gml = GenerationMessageLoop::new(num_nodes) + .init(threshold) + .unwrap(); + gml.0.loop_until(|| gml.0.is_empty()); + gml + } - #[test] - fn node_added_using_servers_set_change() { - // initial 2-of-3 session - let gml = generate_key(3, 1); + #[test] + fn node_added_using_servers_set_change() { + // initial 2-of-3 session + let gml = generate_key(3, 1); - // add 1 node so that it becames 2-of-4 session - let add = vec![Random.generate().unwrap()]; - let master = gml.0.node(0); - let ml = MessageLoop::with_gml::(gml, master, Some(add), None, None).run_at(master); + // add 1 node so that it becames 2-of-4 session + let add = vec![Random.generate().unwrap()]; + let master = gml.0.node(0); + let ml = + MessageLoop::with_gml::(gml, master, Some(add), None, None).run_at(master); - // try to recover secret for every possible combination of nodes && check that secret is the same - ml.check_secret_is_preserved(ml.sessions.keys()); - } + // try to recover secret for every possible combination of nodes && check that secret is the same + ml.check_secret_is_preserved(ml.sessions.keys()); + } - #[test] - fn node_added_using_server_set_change_from_this_node() { - // initial 2-of-3 session - let gml = generate_key(3, 1); + #[test] + fn node_added_using_server_set_change_from_this_node() { + // initial 2-of-3 session + let gml = generate_key(3, 1); - // insert 1 node so that it becames 2-of-4 session - // master node is the node we are adding => - // 1) add session is delegated to one of old nodes - // 2) key share is pushed to new node - // 3) delegated session is returned back to added node - let add = vec![Random.generate().unwrap()]; - let master = add[0].public().clone(); - let ml = MessageLoop::with_gml::(gml, master, Some(add), None, None).run_at(master); + // insert 1 node so that it becames 2-of-4 session + // master node is the node we are adding => + // 1) add session is delegated to one of old nodes + // 2) key share is pushed to new node + // 3) delegated session is returned back to added node + let add = vec![Random.generate().unwrap()]; + let master = add[0].public().clone(); + let ml = + MessageLoop::with_gml::(gml, master, Some(add), None, None).run_at(master); - // try to recover secret for every possible combination of nodes && check that secret is the same - ml.check_secret_is_preserved(ml.sessions.keys()); - } + // try to recover secret for every possible combination of nodes && check that secret is the same + ml.check_secret_is_preserved(ml.sessions.keys()); + } - #[test] - fn node_moved_using_servers_set_change() { - // initial 2-of-3 session - let gml = generate_key(3, 1); + #[test] + fn node_moved_using_servers_set_change() { + // initial 2-of-3 session + let gml = generate_key(3, 1); - // remove 1 node && insert 1 node so that one share is moved - let master = gml.0.node(0); - let remove: BTreeSet<_> = ::std::iter::once(gml.0.node(1)).collect(); - let add = vec![Random.generate().unwrap()]; - let ml = MessageLoop::with_gml::(gml, master, Some(add), Some(remove.clone()), None).run_at(master); + // remove 1 node && insert 1 node so that one share is moved + let master = gml.0.node(0); + let remove: BTreeSet<_> = ::std::iter::once(gml.0.node(1)).collect(); + let add = vec![Random.generate().unwrap()]; + let ml = + MessageLoop::with_gml::(gml, master, Some(add), Some(remove.clone()), None) + .run_at(master); - // check that secret is still the same as before moving the share - ml.check_secret_is_preserved(ml.sessions.keys() - .filter(|k| !remove.contains(k))); + // check that secret is still the same as before moving the share + ml.check_secret_is_preserved(ml.sessions.keys().filter(|k| !remove.contains(k))); - // check that all removed nodes do not own key share - assert!(ml.sessions.keys().filter(|k| remove.contains(k)) - .all(|k| ml.ml.key_storage_of(k).get(&SessionId::default()).unwrap().is_none())); - } + // check that all removed nodes do not own key share + assert!(ml.sessions.keys().filter(|k| remove.contains(k)).all(|k| ml + .ml + .key_storage_of(k) + .get(&SessionId::default()) + .unwrap() + .is_none())); + } - #[test] - fn node_removed_using_servers_set_change() { - // initial 2-of-3 session - let gml = generate_key(3, 1); + #[test] + fn node_removed_using_servers_set_change() { + // initial 2-of-3 session + let gml = generate_key(3, 1); - // remove 1 node so that session becames 2-of-2 - let remove: BTreeSet<_> = ::std::iter::once(gml.0.node(0)).collect(); - let master = gml.0.node(0); - let ml = MessageLoop::with_gml::(gml, master, None, Some(remove.clone()), None).run_at(master); + // remove 1 node so that session becames 2-of-2 + let remove: BTreeSet<_> = ::std::iter::once(gml.0.node(0)).collect(); + let master = gml.0.node(0); + let ml = MessageLoop::with_gml::(gml, master, None, Some(remove.clone()), None) + .run_at(master); - // try to recover secret for every possible combination of nodes && check that secret is the same - ml.check_secret_is_preserved(ml.sessions.keys() - .filter(|k| !remove.contains(k))); + // try to recover secret for every possible combination of nodes && check that secret is the same + ml.check_secret_is_preserved(ml.sessions.keys().filter(|k| !remove.contains(k))); - // check that all removed nodes do not own key share - assert!(ml.sessions.keys().filter(|k| remove.contains(k)) - .all(|k| ml.ml.key_storage_of(k).get(&SessionId::default()).unwrap().is_none())); - } + // check that all removed nodes do not own key share + assert!(ml.sessions.keys().filter(|k| remove.contains(k)).all(|k| ml + .ml + .key_storage_of(k) + .get(&SessionId::default()) + .unwrap() + .is_none())); + } - #[test] - fn isolated_node_removed_using_servers_set_change() { - // initial 2-of-3 session - let gml = generate_key(3, 1); + #[test] + fn isolated_node_removed_using_servers_set_change() { + // initial 2-of-3 session + let gml = generate_key(3, 1); - // remove 1 node so that session becames 2-of-2 - let isolate: BTreeSet<_> = ::std::iter::once(gml.0.node(1)).collect(); - let master = gml.0.node(0); - let ml = MessageLoop::with_gml::(gml, master, None, None, Some(isolate.clone())) - .run_at(master); + // remove 1 node so that session becames 2-of-2 + let isolate: BTreeSet<_> = ::std::iter::once(gml.0.node(1)).collect(); + let master = gml.0.node(0); + let ml = MessageLoop::with_gml::(gml, master, None, None, Some(isolate.clone())) + .run_at(master); - // try to recover secret for every possible combination of nodes && check that secret is the same - ml.check_secret_is_preserved(ml.sessions.keys() - .filter(|k| !isolate.contains(k))); + // try to recover secret for every possible combination of nodes && check that secret is the same + ml.check_secret_is_preserved(ml.sessions.keys().filter(|k| !isolate.contains(k))); - // check that all isolated nodes still OWN key share - assert!(ml.sessions.keys().filter(|k| isolate.contains(k)) - .all(|k| ml.ml.key_storage_of(k).get(&SessionId::default()).unwrap().is_some())); - } + // check that all isolated nodes still OWN key share + assert!(ml + .sessions + .keys() + .filter(|k| isolate.contains(k)) + .all(|k| ml + .ml + .key_storage_of(k) + .get(&SessionId::default()) + .unwrap() + .is_some())); + } - #[test] - fn having_less_than_required_nodes_after_change_does_not_fail_change_session() { - // initial 2-of-3 session - let gml = generate_key(3, 1); + #[test] + fn having_less_than_required_nodes_after_change_does_not_fail_change_session() { + // initial 2-of-3 session + let gml = generate_key(3, 1); - // remove 2 nodes so that key becomes irrecoverable (make sure the session is completed - // even though key is irrecoverable) - let remove: BTreeSet<_> = gml.0.nodes().into_iter().skip(1).take(2).collect(); - let master = gml.0.node(0); - let ml = MessageLoop::with_gml::(gml, master, None, Some(remove.clone()), None).run_at(master); + // remove 2 nodes so that key becomes irrecoverable (make sure the session is completed + // even though key is irrecoverable) + let remove: BTreeSet<_> = gml.0.nodes().into_iter().skip(1).take(2).collect(); + let master = gml.0.node(0); + let ml = MessageLoop::with_gml::(gml, master, None, Some(remove.clone()), None) + .run_at(master); - // check that all removed nodes do not own key share - assert!(ml.sessions.keys().filter(|k| remove.contains(k)) - .all(|k| ml.ml.key_storage_of(k).get(&SessionId::default()).unwrap().is_none())); + // check that all removed nodes do not own key share + assert!(ml.sessions.keys().filter(|k| remove.contains(k)).all(|k| ml + .ml + .key_storage_of(k) + .get(&SessionId::default()) + .unwrap() + .is_none())); - // and now let's add new node (make sure the session is completed, even though key is still irrecoverable) - // isolated here are not actually isolated, but removed on the previous step - let add = vec![Random.generate().unwrap()]; - let master = add[0].public().clone(); - let ml = ml.and_then::(master, Some(add.clone()), None, Some(remove)).run_at(master); + // and now let's add new node (make sure the session is completed, even though key is still irrecoverable) + // isolated here are not actually isolated, but removed on the previous step + let add = vec![Random.generate().unwrap()]; + let master = add[0].public().clone(); + let ml = ml + .and_then::(master, Some(add.clone()), None, Some(remove)) + .run_at(master); - // check that all added nodes do not own key share (there's not enough nodes to run share add session) - assert!(ml.sessions.keys().filter(|k| add.iter().any(|n| n.public() == *k)) - .all(|k| ml.ml.key_storage_of(k).get(&SessionId::default()).unwrap().is_none())); - } + // check that all added nodes do not own key share (there's not enough nodes to run share add session) + assert!(ml + .sessions + .keys() + .filter(|k| add.iter().any(|n| n.public() == *k)) + .all(|k| ml + .ml + .key_storage_of(k) + .get(&SessionId::default()) + .unwrap() + .is_none())); + } - #[test] - fn removing_node_from_cluster_of_2_works() { - // initial 2-of-2 session - let gml = generate_key(2, 1); + #[test] + fn removing_node_from_cluster_of_2_works() { + // initial 2-of-2 session + let gml = generate_key(2, 1); - // make 2nd node isolated so that key becomes irrecoverable (make sure the session is completed, - // even though key is irrecoverable) - let isolate: BTreeSet<_> = gml.0.nodes().into_iter().skip(1).take(1).collect(); - let master = gml.0.node(0); - MessageLoop::with_gml::(gml, master, None, None, Some(isolate)).run_at(master); - } + // make 2nd node isolated so that key becomes irrecoverable (make sure the session is completed, + // even though key is irrecoverable) + let isolate: BTreeSet<_> = gml.0.nodes().into_iter().skip(1).take(1).collect(); + let master = gml.0.node(0); + MessageLoop::with_gml::(gml, master, None, None, Some(isolate)).run_at(master); + } - #[test] - fn adding_node_that_has_lost_its_database_works() { - // initial 2-of-2 session - let gml = generate_key(2, 1); + #[test] + fn adding_node_that_has_lost_its_database_works() { + // initial 2-of-2 session + let gml = generate_key(2, 1); - // insert 1 node so that it becames 2-of-3 session - let add = vec![Random.generate().unwrap()]; - let master = gml.0.node(0); - let ml = MessageLoop::with_gml::(gml, master, Some(add.clone()), None, None) - .run_at(master); + // insert 1 node so that it becames 2-of-3 session + let add = vec![Random.generate().unwrap()]; + let master = gml.0.node(0); + let ml = MessageLoop::with_gml::(gml, master, Some(add.clone()), None, None) + .run_at(master); - // now let's say new node has lost its db and we're trying to join it again - ml.ml.key_storage_of(add[0].public()).clear().unwrap(); + // now let's say new node has lost its db and we're trying to join it again + ml.ml.key_storage_of(add[0].public()).clear().unwrap(); - // this time old nodes have version, where new node is mentioned, but it doesn't report it when negotiating - let ml = ml.and_then::(master, Some(add), None, None).run_at(master); + // this time old nodes have version, where new node is mentioned, but it doesn't report it when negotiating + let ml = ml + .and_then::(master, Some(add), None, None) + .run_at(master); - // try to recover secret for every possible combination of nodes && check that secret is the same - ml.check_secret_is_preserved(ml.sessions.keys()); - } + // try to recover secret for every possible combination of nodes && check that secret is the same + ml.check_secret_is_preserved(ml.sessions.keys()); + } } diff --git a/secret-store/src/key_server_cluster/admin_sessions/sessions_queue.rs b/secret-store/src/key_server_cluster/admin_sessions/sessions_queue.rs index 91d3bc7b8..0cce7d155 100644 --- a/secret-store/src/key_server_cluster/admin_sessions/sessions_queue.rs +++ b/secret-store/src/key_server_cluster/admin_sessions/sessions_queue.rs @@ -14,43 +14,45 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::collections::{VecDeque, BTreeSet}; -use key_server_cluster::{Error, SessionId, KeyStorage}; +use key_server_cluster::{Error, KeyStorage, SessionId}; +use std::{ + collections::{BTreeSet, VecDeque}, + sync::Arc, +}; /// Queue of share change sessions. pub struct SessionsQueue { - /// Sessions, known on this node. - known_sessions: VecDeque, - /// Unknown sessions. - unknown_sessions: VecDeque, + /// Sessions, known on this node. + known_sessions: VecDeque, + /// Unknown sessions. + unknown_sessions: VecDeque, } impl SessionsQueue { - /// Create new sessions queue. - pub fn new(key_storage: &Arc, unknown_sessions: BTreeSet) -> Self { - // TODO [Opt]: - // 1) known sessions - change to iter - // 2) unknown sesions - request chunk-by-chunk - SessionsQueue { - known_sessions: key_storage.iter().map(|(k, _)| k).collect(), - unknown_sessions: unknown_sessions.into_iter().collect(), - } - } + /// Create new sessions queue. + pub fn new(key_storage: &Arc, unknown_sessions: BTreeSet) -> Self { + // TODO [Opt]: + // 1) known sessions - change to iter + // 2) unknown sesions - request chunk-by-chunk + SessionsQueue { + known_sessions: key_storage.iter().map(|(k, _)| k).collect(), + unknown_sessions: unknown_sessions.into_iter().collect(), + } + } } impl Iterator for SessionsQueue { - type Item = Result; + type Item = Result; - fn next(&mut self) -> Option { - if let Some(known_session) = self.known_sessions.pop_front() { - return Some(Ok(known_session)); - } + fn next(&mut self) -> Option { + if let Some(known_session) = self.known_sessions.pop_front() { + return Some(Ok(known_session)); + } - if let Some(unknown_session) = self.unknown_sessions.pop_front() { - return Some(Ok(unknown_session)); - } + if let Some(unknown_session) = self.unknown_sessions.pop_front() { + return Some(Ok(unknown_session)); + } - None - } + None + } } diff --git a/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs b/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs index b5195a629..689627689 100644 --- a/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs +++ b/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs @@ -14,32 +14,47 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::collections::{BTreeSet, BTreeMap}; -use ethereum_types::{H256, Address}; +use ethereum_types::{Address, H256}; use ethkey::{Public, Secret, Signature}; -use parking_lot::{Mutex, Condvar}; -use key_server_cluster::{Error, SessionId, NodeId, DocumentKeyShare, DocumentKeyShareVersion, KeyStorage}; -use key_server_cluster::cluster::Cluster; -use key_server_cluster::cluster_sessions::ClusterSession; -use key_server_cluster::math; -use key_server_cluster::message::{Message, ShareAddMessage, ShareAddConsensusMessage, ConsensusMessageOfShareAdd, - InitializeConsensusSessionOfShareAdd, KeyShareCommon, NewKeysDissemination, ShareAddError, - ConfirmConsensusInitialization, CommonKeyData}; -use key_server_cluster::jobs::job_session::JobTransport; -use key_server_cluster::jobs::dummy_job::{DummyJob, DummyJobTransport}; -use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest}; -use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession}; -use key_server_cluster::admin_sessions::ShareChangeSessionMeta; +use key_server_cluster::{ + admin_sessions::ShareChangeSessionMeta, + cluster::Cluster, + cluster_sessions::ClusterSession, + jobs::{ + consensus_session::{ConsensusSession, ConsensusSessionParams, ConsensusSessionState}, + dummy_job::{DummyJob, DummyJobTransport}, + job_session::JobTransport, + servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest}, + }, + math, + message::{ + CommonKeyData, ConfirmConsensusInitialization, ConsensusMessageOfShareAdd, + InitializeConsensusSessionOfShareAdd, KeyShareCommon, Message, NewKeysDissemination, + ShareAddConsensusMessage, ShareAddError, ShareAddMessage, + }, + DocumentKeyShare, DocumentKeyShareVersion, Error, KeyStorage, NodeId, SessionId, +}; +use parking_lot::{Condvar, Mutex}; +use std::{ + collections::{BTreeMap, BTreeSet}, + sync::Arc, +}; /// Share addition session transport. -pub trait SessionTransport: Clone + JobTransport { - /// Get all connected nodes. Since ShareAdd session requires all cluster nodes to be connected, this set equals to all known cluster nodes set. - fn nodes(&self) -> BTreeSet; - /// Send message to given node. - fn send(&self, node: &NodeId, message: ShareAddMessage) -> Result<(), Error>; - /// Set data for master node (sent to slave nodes in consensus session initialization message). - fn set_master_data(&mut self, consensus_group: BTreeSet, version_holders: BTreeSet, id_numbers: BTreeMap>); +pub trait SessionTransport: + Clone + JobTransport +{ + /// Get all connected nodes. Since ShareAdd session requires all cluster nodes to be connected, this set equals to all known cluster nodes set. + fn nodes(&self) -> BTreeSet; + /// Send message to given node. + fn send(&self, node: &NodeId, message: ShareAddMessage) -> Result<(), Error>; + /// Set data for master node (sent to slave nodes in consensus session initialization message). + fn set_master_data( + &mut self, + consensus_group: BTreeSet, + version_holders: BTreeSet, + id_numbers: BTreeMap>, + ); } /// Share addition session. @@ -51,683 +66,900 @@ pub trait SessionTransport: Clone + JobTransport { - /// Session core. - core: SessionCore, - /// Session data. - data: Mutex>, + /// Session core. + core: SessionCore, + /// Session data. + data: Mutex>, } /// Immutable session data. struct SessionCore { - /// Session metadata. - pub meta: ShareChangeSessionMeta, - /// Session-level nonce. - pub nonce: u64, - /// Original key share (for old nodes only). - pub key_share: Option, - /// Session transport to communicate to other cluster nodes. - pub transport: T, - /// Key storage. - pub key_storage: Arc, - /// Administrator public key. - pub admin_public: Option, - /// SessionImpl completion condvar. - pub completed: Condvar, + /// Session metadata. + pub meta: ShareChangeSessionMeta, + /// Session-level nonce. + pub nonce: u64, + /// Original key share (for old nodes only). + pub key_share: Option, + /// Session transport to communicate to other cluster nodes. + pub transport: T, + /// Key storage. + pub key_storage: Arc, + /// Administrator public key. + pub admin_public: Option, + /// SessionImpl completion condvar. + pub completed: Condvar, } /// Share add consensus session type. -type ShareAddChangeConsensusSession = ConsensusSession; +type ShareAddChangeConsensusSession = + ConsensusSession; /// Mutable session data. struct SessionData { - /// Session state. - pub state: SessionState, - /// Key version to use for decryption. - pub version: Option, - /// Consensus session. - pub consensus_session: Option>, - /// Holders of key version. - pub version_holders: Option>, - /// NewKeyShare (for nodes being added). - pub new_key_share: Option, - /// Nodes id numbers. - pub id_numbers: Option>>, - /// Secret subshares received from nodes. - pub secret_subshares: Option>>, - /// Share add change result. - pub result: Option>, + /// Session state. + pub state: SessionState, + /// Key version to use for decryption. + pub version: Option, + /// Consensus session. + pub consensus_session: Option>, + /// Holders of key version. + pub version_holders: Option>, + /// NewKeyShare (for nodes being added). + pub new_key_share: Option, + /// Nodes id numbers. + pub id_numbers: Option>>, + /// Secret subshares received from nodes. + pub secret_subshares: Option>>, + /// Share add change result. + pub result: Option>, } /// New key share. struct NewKeyShare { - /// NewKeyShare: threshold. - pub threshold: usize, - /// NewKeyShare: author. - pub author: Address, - /// NewKeyShare: joint public. - pub joint_public: Public, - /// NewKeyShare: Common (shared) encryption point. - pub common_point: Option, - /// NewKeyShare: Encrypted point. - pub encrypted_point: Option, + /// NewKeyShare: threshold. + pub threshold: usize, + /// NewKeyShare: author. + pub author: Address, + /// NewKeyShare: joint public. + pub joint_public: Public, + /// NewKeyShare: Common (shared) encryption point. + pub common_point: Option, + /// NewKeyShare: Encrypted point. + pub encrypted_point: Option, } /// Session state. #[derive(Debug, PartialEq)] enum SessionState { - /// State when consensus is establishing. - ConsensusEstablishing, - /// Waiting for keys dissemination. - WaitingForKeysDissemination, - /// Session is completed. - Finished, + /// State when consensus is establishing. + ConsensusEstablishing, + /// Waiting for keys dissemination. + WaitingForKeysDissemination, + /// Session is completed. + Finished, } /// SessionImpl creation parameters pub struct SessionParams { - /// Session metadata. - pub meta: ShareChangeSessionMeta, - /// Session transport. - pub transport: T, - /// Key storage. - pub key_storage: Arc, - /// Administrator public key. - pub admin_public: Option, - /// Session nonce. - pub nonce: u64, + /// Session metadata. + pub meta: ShareChangeSessionMeta, + /// Session transport. + pub transport: T, + /// Key storage. + pub key_storage: Arc, + /// Administrator public key. + pub admin_public: Option, + /// Session nonce. + pub nonce: u64, } /// Isolated ShareAdd session transport. #[derive(Clone)] pub struct IsolatedSessionTransport { - /// Key id. - session: SessionId, - /// Key version. - version: Option, - /// Session-level nonce. - nonce: u64, - /// Holders of key version. - version_holders: Option>, - /// Consensus group. - consensus_group: Option>, - /// Id numbers of all new nodes. - id_numbers: Option>>, - /// Cluster. - cluster: Arc, + /// Key id. + session: SessionId, + /// Key version. + version: Option, + /// Session-level nonce. + nonce: u64, + /// Holders of key version. + version_holders: Option>, + /// Consensus group. + consensus_group: Option>, + /// Id numbers of all new nodes. + id_numbers: Option>>, + /// Cluster. + cluster: Arc, } -impl SessionImpl where T: SessionTransport { - /// Create new share addition session. - pub fn new(params: SessionParams) -> Result { - let key_share = params.key_storage.get(¶ms.meta.id)?; +impl SessionImpl +where + T: SessionTransport, +{ + /// Create new share addition session. + pub fn new(params: SessionParams) -> Result { + let key_share = params.key_storage.get(¶ms.meta.id)?; - Ok(SessionImpl { - core: SessionCore { - meta: params.meta, - nonce: params.nonce, - key_share: key_share, - transport: params.transport, - key_storage: params.key_storage, - admin_public: params.admin_public, - completed: Condvar::new(), - }, - data: Mutex::new(SessionData { - state: SessionState::ConsensusEstablishing, - version: None, - consensus_session: None, - version_holders: None, - new_key_share: None, - id_numbers: None, - secret_subshares: None, - result: None, - }), - }) - } + Ok(SessionImpl { + core: SessionCore { + meta: params.meta, + nonce: params.nonce, + key_share: key_share, + transport: params.transport, + key_storage: params.key_storage, + admin_public: params.admin_public, + completed: Condvar::new(), + }, + data: Mutex::new(SessionData { + state: SessionState::ConsensusEstablishing, + version: None, + consensus_session: None, + version_holders: None, + new_key_share: None, + id_numbers: None, + secret_subshares: None, + result: None, + }), + }) + } - /// Set pre-established consensus data. - pub fn set_consensus_output(&self, version: &H256, consensus_group: BTreeSet, version_holders: BTreeSet, mut new_nodes_map: BTreeMap>) -> Result<(), Error> { - let mut data = self.data.lock(); + /// Set pre-established consensus data. + pub fn set_consensus_output( + &self, + version: &H256, + consensus_group: BTreeSet, + version_holders: BTreeSet, + mut new_nodes_map: BTreeMap>, + ) -> Result<(), Error> { + let mut data = self.data.lock(); - // check state - if data.state != SessionState::ConsensusEstablishing || data.consensus_session.is_some() || data.id_numbers.is_some() || data.secret_subshares.is_some() { - return Err(Error::InvalidStateForRequest); - } + // check state + if data.state != SessionState::ConsensusEstablishing + || data.consensus_session.is_some() + || data.id_numbers.is_some() + || data.secret_subshares.is_some() + { + return Err(Error::InvalidStateForRequest); + } - // key share version is required on ShareAdd master node - if let Some(key_share) = self.core.key_share.as_ref() { - if let Ok(key_version) = key_share.version(version) { - let non_isolated_nodes = self.core.transport.nodes(); - for (node, id_number) in &key_version.id_numbers { - { - let external_id_number = new_nodes_map.get(node); - match external_id_number { - Some(&Some(ref external_id_number)) => { - if !version_holders.contains(node) { - // possible when joining version holder, that has lost its database - // and haven't reported version ownership - continue; - } - if external_id_number == id_number { - continue; - } + // key share version is required on ShareAdd master node + if let Some(key_share) = self.core.key_share.as_ref() { + if let Ok(key_version) = key_share.version(version) { + let non_isolated_nodes = self.core.transport.nodes(); + for (node, id_number) in &key_version.id_numbers { + { + let external_id_number = new_nodes_map.get(node); + match external_id_number { + Some(&Some(ref external_id_number)) => { + if !version_holders.contains(node) { + // possible when joining version holder, that has lost its database + // and haven't reported version ownership + continue; + } + if external_id_number == id_number { + continue; + } - return Err(Error::ConsensusUnreachable); - }, - Some(&None) => (), - None => { - if non_isolated_nodes.contains(node) { - return Err(Error::ConsensusUnreachable) - } - continue; - }, - } - } + return Err(Error::ConsensusUnreachable); + } + Some(&None) => (), + None => { + if non_isolated_nodes.contains(node) { + return Err(Error::ConsensusUnreachable); + } + continue; + } + } + } - new_nodes_map.insert(node.clone(), Some(id_number.clone())); - } + new_nodes_map.insert(node.clone(), Some(id_number.clone())); + } - // check that all id_numbers are filled - if new_nodes_map.values().any(Option::is_none) { - return Err(Error::ConsensusUnreachable); - } - } - } + // check that all id_numbers are filled + if new_nodes_map.values().any(Option::is_none) { + return Err(Error::ConsensusUnreachable); + } + } + } - // check passed consensus data - Self::check_nodes_map(&self.core, version, &consensus_group, &version_holders, &new_nodes_map)?; + // check passed consensus data + Self::check_nodes_map( + &self.core, + version, + &consensus_group, + &version_holders, + &new_nodes_map, + )?; - // update data - data.version = Some(version.clone()); - data.id_numbers = Some(new_nodes_map); - data.secret_subshares = Some(consensus_group.into_iter() - .map(|n| (n, None)) - .collect()); - data.version_holders = Some(version_holders); + // update data + data.version = Some(version.clone()); + data.id_numbers = Some(new_nodes_map); + data.secret_subshares = Some(consensus_group.into_iter().map(|n| (n, None)).collect()); + data.version_holders = Some(version_holders); - Ok(()) - } + Ok(()) + } - /// Initialize share add session on master node. - pub fn initialize(&self, version: Option, new_nodes_set: Option>, old_set_signature: Option, new_set_signature: Option) -> Result<(), Error> { - debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id); + /// Initialize share add session on master node. + pub fn initialize( + &self, + version: Option, + new_nodes_set: Option>, + old_set_signature: Option, + new_set_signature: Option, + ) -> Result<(), Error> { + debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id); - let mut data = self.data.lock(); + let mut data = self.data.lock(); - // check state - if data.state != SessionState::ConsensusEstablishing || data.consensus_session.is_some() { - return Err(Error::InvalidStateForRequest); - } + // check state + if data.state != SessionState::ConsensusEstablishing || data.consensus_session.is_some() { + return Err(Error::InvalidStateForRequest); + } - // if consensus is pre-established => start sending ShareAdd-specific messages - let is_consensus_pre_established = data.id_numbers.is_some(); - if is_consensus_pre_established { - return Self::on_consensus_established(&self.core, &mut *data); - } + // if consensus is pre-established => start sending ShareAdd-specific messages + let is_consensus_pre_established = data.id_numbers.is_some(); + if is_consensus_pre_established { + return Self::on_consensus_established(&self.core, &mut *data); + } - // else => prepare to start consensus session - // require all initialization params for consensus session - let version = version.ok_or(Error::InvalidMessage)?; - let old_set_signature = old_set_signature.ok_or(Error::InvalidMessage)?; - let new_set_signature = new_set_signature.ok_or(Error::InvalidMessage)?; - let new_nodes_set = new_nodes_set.ok_or(Error::InvalidMessage)?; - let admin_public = self.core.admin_public.as_ref().cloned().ok_or(Error::ConsensusUnreachable)?; + // else => prepare to start consensus session + // require all initialization params for consensus session + let version = version.ok_or(Error::InvalidMessage)?; + let old_set_signature = old_set_signature.ok_or(Error::InvalidMessage)?; + let new_set_signature = new_set_signature.ok_or(Error::InvalidMessage)?; + let new_nodes_set = new_nodes_set.ok_or(Error::InvalidMessage)?; + let admin_public = self + .core + .admin_public + .as_ref() + .cloned() + .ok_or(Error::ConsensusUnreachable)?; - // key share version is required on ShareAdd master node - let key_share = self.core.key_share.as_ref().ok_or_else(|| Error::ServerKeyIsNotFound)?; - let key_version = key_share.version(&version)?; + // key share version is required on ShareAdd master node + let key_share = self + .core + .key_share + .as_ref() + .ok_or_else(|| Error::ServerKeyIsNotFound)?; + let key_version = key_share.version(&version)?; - // old nodes set is all non-isolated owners of version holders - let non_isolated_nodes = self.core.transport.nodes(); - let old_nodes_set: BTreeSet<_> = key_version.id_numbers.keys() - .filter(|n| non_isolated_nodes.contains(n)) - .cloned() - .collect(); + // old nodes set is all non-isolated owners of version holders + let non_isolated_nodes = self.core.transport.nodes(); + let old_nodes_set: BTreeSet<_> = key_version + .id_numbers + .keys() + .filter(|n| non_isolated_nodes.contains(n)) + .cloned() + .collect(); - // new nodes map contains previous id_numbers for old nodes && random number for new nodes - let mut new_nodes_map = BTreeMap::new(); - for new_node in new_nodes_set.into_iter().filter(|n| non_isolated_nodes.contains(n)) { - new_nodes_map.insert(new_node, match key_version.id_numbers.get(&new_node) { - Some(old_id_number) => Some(old_id_number.clone()), - None => Some(math::generate_random_scalar()?), - }); - } + // new nodes map contains previous id_numbers for old nodes && random number for new nodes + let mut new_nodes_map = BTreeMap::new(); + for new_node in new_nodes_set + .into_iter() + .filter(|n| non_isolated_nodes.contains(n)) + { + new_nodes_map.insert( + new_node, + match key_version.id_numbers.get(&new_node) { + Some(old_id_number) => Some(old_id_number.clone()), + None => Some(math::generate_random_scalar()?), + }, + ); + } - // let's select consensus group - let consensus_group: BTreeSet<_> = ::std::iter::once(self.core.meta.self_node_id.clone()) - .chain(old_nodes_set.iter() - .filter(|n| **n != self.core.meta.self_node_id && non_isolated_nodes.contains(*n)) - .take(key_share.threshold) - .cloned()) - .collect(); - let version_holders = &old_nodes_set; + // let's select consensus group + let consensus_group: BTreeSet<_> = ::std::iter::once(self.core.meta.self_node_id.clone()) + .chain( + old_nodes_set + .iter() + .filter(|n| { + **n != self.core.meta.self_node_id && non_isolated_nodes.contains(*n) + }) + .take(key_share.threshold) + .cloned(), + ) + .collect(); + let version_holders = &old_nodes_set; - // now check nodes map - Self::check_nodes_map(&self.core, &version, &consensus_group, version_holders, &new_nodes_map)?; + // now check nodes map + Self::check_nodes_map( + &self.core, + &version, + &consensus_group, + version_holders, + &new_nodes_map, + )?; - // prepare consensus session transport - let mut consensus_transport = self.core.transport.clone(); - consensus_transport.set_master_data(consensus_group.clone(), version_holders.clone(), new_nodes_map.clone()); + // prepare consensus session transport + let mut consensus_transport = self.core.transport.clone(); + consensus_transport.set_master_data( + consensus_group.clone(), + version_holders.clone(), + new_nodes_map.clone(), + ); - // create && initialize consensus session - let mut consensus_session = ConsensusSession::new(ConsensusSessionParams { - meta: self.core.meta.clone().into_consensus_meta(new_nodes_map.len())?, - consensus_executor: ServersSetChangeAccessJob::new_on_master(admin_public, - old_nodes_set.clone(), - new_nodes_map.keys().cloned().collect(), - old_set_signature, - new_set_signature), - consensus_transport: consensus_transport, - })?; + // create && initialize consensus session + let mut consensus_session = ConsensusSession::new(ConsensusSessionParams { + meta: self + .core + .meta + .clone() + .into_consensus_meta(new_nodes_map.len())?, + consensus_executor: ServersSetChangeAccessJob::new_on_master( + admin_public, + old_nodes_set.clone(), + new_nodes_map.keys().cloned().collect(), + old_set_signature, + new_set_signature, + ), + consensus_transport: consensus_transport, + })?; - consensus_session.initialize(new_nodes_map.keys().cloned().collect())?; + consensus_session.initialize(new_nodes_map.keys().cloned().collect())?; - // update data - data.version = Some(version); - data.consensus_session = Some(consensus_session); - data.id_numbers = Some(new_nodes_map); - data.secret_subshares = Some(consensus_group.into_iter().map(|n| (n, None)).collect()); - data.version_holders = Some(version_holders.clone()); + // update data + data.version = Some(version); + data.consensus_session = Some(consensus_session); + data.id_numbers = Some(new_nodes_map); + data.secret_subshares = Some(consensus_group.into_iter().map(|n| (n, None)).collect()); + data.version_holders = Some(version_holders.clone()); - Ok(()) - } + Ok(()) + } - /// Process single message. - pub fn process_message(&self, sender: &NodeId, message: &ShareAddMessage) -> Result<(), Error> { - if self.core.nonce != message.session_nonce() { - return Err(Error::ReplayProtection); - } + /// Process single message. + pub fn process_message(&self, sender: &NodeId, message: &ShareAddMessage) -> Result<(), Error> { + if self.core.nonce != message.session_nonce() { + return Err(Error::ReplayProtection); + } - match message { - &ShareAddMessage::ShareAddConsensusMessage(ref message) => - self.on_consensus_message(sender, message), - &ShareAddMessage::KeyShareCommon(ref message) => - self.on_common_key_share_data(sender, message), - &ShareAddMessage::NewKeysDissemination(ref message) => - self.on_new_keys_dissemination(sender, message), - &ShareAddMessage::ShareAddError(ref message) => { - self.on_session_error(sender, message.error.clone()); - Ok(()) - }, - } - } + match message { + &ShareAddMessage::ShareAddConsensusMessage(ref message) => { + self.on_consensus_message(sender, message) + } + &ShareAddMessage::KeyShareCommon(ref message) => { + self.on_common_key_share_data(sender, message) + } + &ShareAddMessage::NewKeysDissemination(ref message) => { + self.on_new_keys_dissemination(sender, message) + } + &ShareAddMessage::ShareAddError(ref message) => { + self.on_session_error(sender, message.error.clone()); + Ok(()) + } + } + } - /// When consensus-related message is received. - pub fn on_consensus_message(&self, sender: &NodeId, message: &ShareAddConsensusMessage) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When consensus-related message is received. + pub fn on_consensus_message( + &self, + sender: &NodeId, + message: &ShareAddConsensusMessage, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); - // start slave consensus session if needed - let mut data = self.data.lock(); - match &message.message { - &ConsensusMessageOfShareAdd::InitializeConsensusSession(ref message) - if data.consensus_session.is_none() && sender == &self.core.meta.master_node_id => { - let admin_public = self.core.admin_public.as_ref().cloned().ok_or(Error::ConsensusUnreachable)?; - data.consensus_session = Some(ConsensusSession::new(ConsensusSessionParams { - meta: self.core.meta.clone().into_consensus_meta(message.new_nodes_map.len())?, - consensus_executor: ServersSetChangeAccessJob::new_on_slave(admin_public), - consensus_transport: self.core.transport.clone(), - })?); - }, - _ => (), - }; + // start slave consensus session if needed + let mut data = self.data.lock(); + match &message.message { + &ConsensusMessageOfShareAdd::InitializeConsensusSession(ref message) + if data.consensus_session.is_none() && sender == &self.core.meta.master_node_id => + { + let admin_public = self + .core + .admin_public + .as_ref() + .cloned() + .ok_or(Error::ConsensusUnreachable)?; + data.consensus_session = Some(ConsensusSession::new(ConsensusSessionParams { + meta: self + .core + .meta + .clone() + .into_consensus_meta(message.new_nodes_map.len())?, + consensus_executor: ServersSetChangeAccessJob::new_on_slave(admin_public), + consensus_transport: self.core.transport.clone(), + })?); + } + _ => (), + }; - // process consensus message - let (is_establishing_consensus, is_consensus_established, version, new_nodes_map, consensus_group, version_holders) = { - let consensus_session = data.consensus_session.as_mut().ok_or(Error::InvalidMessage)?; - let is_establishing_consensus = consensus_session.state() == ConsensusSessionState::EstablishingConsensus; + // process consensus message + let ( + is_establishing_consensus, + is_consensus_established, + version, + new_nodes_map, + consensus_group, + version_holders, + ) = { + let consensus_session = data + .consensus_session + .as_mut() + .ok_or(Error::InvalidMessage)?; + let is_establishing_consensus = + consensus_session.state() == ConsensusSessionState::EstablishingConsensus; - let (version, new_nodes_map, consensus_group, version_holders) = match &message.message { - &ConsensusMessageOfShareAdd::InitializeConsensusSession(ref message) => { - consensus_session.on_consensus_partial_request(sender, ServersSetChangeAccessRequest::from(message))?; + let (version, new_nodes_map, consensus_group, version_holders) = match &message.message + { + &ConsensusMessageOfShareAdd::InitializeConsensusSession(ref message) => { + consensus_session.on_consensus_partial_request( + sender, + ServersSetChangeAccessRequest::from(message), + )?; - let version = message.version.clone().into(); - let consensus_group = message.consensus_group.iter().cloned().map(Into::into).collect(); - let version_holders = message.version_holders.iter().cloned().map(Into::into).collect(); - let new_nodes_map: BTreeMap<_, _> = message.new_nodes_map.iter() - .map(|(n, nn)| (n.clone().into(), Some(nn.clone().into()))) - .collect(); + let version = message.version.clone().into(); + let consensus_group = message + .consensus_group + .iter() + .cloned() + .map(Into::into) + .collect(); + let version_holders = message + .version_holders + .iter() + .cloned() + .map(Into::into) + .collect(); + let new_nodes_map: BTreeMap<_, _> = message + .new_nodes_map + .iter() + .map(|(n, nn)| (n.clone().into(), Some(nn.clone().into()))) + .collect(); - // check that all id_numbers are filled - if new_nodes_map.values().any(Option::is_none) { - return Err(Error::ConsensusUnreachable); - } + // check that all id_numbers are filled + if new_nodes_map.values().any(Option::is_none) { + return Err(Error::ConsensusUnreachable); + } - // check old set of nodes - Self::check_nodes_map(&self.core, &version, &consensus_group, &version_holders, &new_nodes_map)?; + // check old set of nodes + Self::check_nodes_map( + &self.core, + &version, + &consensus_group, + &version_holders, + &new_nodes_map, + )?; - (Some(version), Some(new_nodes_map), Some(consensus_group), Some(version_holders)) - }, - &ConsensusMessageOfShareAdd::ConfirmConsensusInitialization(ref message) => { - consensus_session.on_consensus_partial_response(sender, message.is_confirmed)?; - (None, None, None, None) - }, - }; + ( + Some(version), + Some(new_nodes_map), + Some(consensus_group), + Some(version_holders), + ) + } + &ConsensusMessageOfShareAdd::ConfirmConsensusInitialization(ref message) => { + consensus_session + .on_consensus_partial_response(sender, message.is_confirmed)?; + (None, None, None, None) + } + }; - ( - is_establishing_consensus, - consensus_session.state() == ConsensusSessionState::ConsensusEstablished, - version, - new_nodes_map, - consensus_group, - version_holders, - ) - }; + ( + is_establishing_consensus, + consensus_session.state() == ConsensusSessionState::ConsensusEstablished, + version, + new_nodes_map, + consensus_group, + version_holders, + ) + }; - // update data - if let Some(version) = version { - data.version = Some(version); - } - if let Some(new_nodes_map) = new_nodes_map { - data.id_numbers = Some(new_nodes_map); - } - if let Some(consensus_group) = consensus_group { - data.secret_subshares = Some(consensus_group.into_iter().map(|n| (n, None)).collect()); - } - if let Some(version_holders) = version_holders { - data.version_holders = Some(version_holders); - } + // update data + if let Some(version) = version { + data.version = Some(version); + } + if let Some(new_nodes_map) = new_nodes_map { + data.id_numbers = Some(new_nodes_map); + } + if let Some(consensus_group) = consensus_group { + data.secret_subshares = Some(consensus_group.into_iter().map(|n| (n, None)).collect()); + } + if let Some(version_holders) = version_holders { + data.version_holders = Some(version_holders); + } - // if consensus is stablished, proceed - if !is_establishing_consensus || !is_consensus_established || self.core.meta.self_node_id != self.core.meta.master_node_id { - return Ok(()); - } + // if consensus is stablished, proceed + if !is_establishing_consensus + || !is_consensus_established + || self.core.meta.self_node_id != self.core.meta.master_node_id + { + return Ok(()); + } - Self::on_consensus_established(&self.core, &mut *data) - } + Self::on_consensus_established(&self.core, &mut *data) + } - /// When common key share data is received by new node. - pub fn on_common_key_share_data(&self, sender: &NodeId, message: &KeyShareCommon) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When common key share data is received by new node. + pub fn on_common_key_share_data( + &self, + sender: &NodeId, + message: &KeyShareCommon, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); - // only master can send this message - if sender != &self.core.meta.master_node_id { - return Err(Error::InvalidMessage); - } + // only master can send this message + if sender != &self.core.meta.master_node_id { + return Err(Error::InvalidMessage); + } - let mut data = self.data.lock(); + let mut data = self.data.lock(); - // check state - if data.state != SessionState::ConsensusEstablishing || data.id_numbers.is_none() { - return Ok(()); - } + // check state + if data.state != SessionState::ConsensusEstablishing || data.id_numbers.is_none() { + return Ok(()); + } - // we only expect this message once - if data.new_key_share.is_some() { - return Err(Error::InvalidStateForRequest); - } + // we only expect this message once + if data.new_key_share.is_some() { + return Err(Error::InvalidStateForRequest); + } - // check if we actually waiting for this message - { - let version = data.version.as_ref().ok_or(Error::InvalidStateForRequest)?; - let key_version = self.core.key_share.as_ref().and_then(|ks| ks.version(version).ok()); - if key_version.is_some() { - return Ok(()); - } - } + // check if we actually waiting for this message + { + let version = data.version.as_ref().ok_or(Error::InvalidStateForRequest)?; + let key_version = self + .core + .key_share + .as_ref() + .and_then(|ks| ks.version(version).ok()); + if key_version.is_some() { + return Ok(()); + } + } - // update data - data.state = SessionState::WaitingForKeysDissemination; - data.new_key_share = Some(NewKeyShare { - threshold: message.key_common.threshold, - author: message.key_common.author.clone().into(), - joint_public: message.key_common.public.clone().into(), - common_point: message.common_point.clone().map(Into::into), - encrypted_point: message.encrypted_point.clone().map(Into::into), - }); + // update data + data.state = SessionState::WaitingForKeysDissemination; + data.new_key_share = Some(NewKeyShare { + threshold: message.key_common.threshold, + author: message.key_common.author.clone().into(), + joint_public: message.key_common.public.clone().into(), + common_point: message.common_point.clone().map(Into::into), + encrypted_point: message.encrypted_point.clone().map(Into::into), + }); - let id_numbers = data.id_numbers.as_mut() + let id_numbers = data.id_numbers.as_mut() .expect("common key share data is expected after initialization; id_numbers are filled during initialization; qed"); - for (node, id_number) in &message.id_numbers { - let id_number: Secret = id_number.clone().into(); - { - let local_id_number = id_numbers.get(&node.clone().into()); - match local_id_number { - Some(&Some(ref local_id_number)) => { - if *local_id_number == id_number { - continue; - } + for (node, id_number) in &message.id_numbers { + let id_number: Secret = id_number.clone().into(); + { + let local_id_number = id_numbers.get(&node.clone().into()); + match local_id_number { + Some(&Some(ref local_id_number)) => { + if *local_id_number == id_number { + continue; + } - return Err(Error::ConsensusUnreachable); - }, - Some(&None) => (), - None => continue, // can happen for isolated nodes - } - } + return Err(Error::ConsensusUnreachable); + } + Some(&None) => (), + None => continue, // can happen for isolated nodes + } + } - id_numbers.insert(node.clone().into(), Some(id_number)); - } + id_numbers.insert(node.clone().into(), Some(id_number)); + } - Ok(()) - } + Ok(()) + } - /// When keys dissemination message is received. - pub fn on_new_keys_dissemination(&self, sender: &NodeId, message: &NewKeysDissemination) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When keys dissemination message is received. + pub fn on_new_keys_dissemination( + &self, + sender: &NodeId, + message: &NewKeysDissemination, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); - let mut data = self.data.lock(); + let mut data = self.data.lock(); - // check state - if data.state == SessionState::ConsensusEstablishing && data.secret_subshares.is_some() { - data.state = SessionState::WaitingForKeysDissemination; - } else if data.state != SessionState::WaitingForKeysDissemination { - return Err(Error::InvalidStateForRequest); - } + // check state + if data.state == SessionState::ConsensusEstablishing && data.secret_subshares.is_some() { + data.state = SessionState::WaitingForKeysDissemination; + } else if data.state != SessionState::WaitingForKeysDissemination { + return Err(Error::InvalidStateForRequest); + } - // update data - let explanation = "secret_subshares is filled during initialization; keys are disseminated after initialization; qed"; - { - match data.secret_subshares.as_ref().expect(explanation).get(sender) { - None => return Err(Error::InvalidMessage), - Some(&Some(_)) => return Err(Error::InvalidMessage), - Some(&None) => (), - }; + // update data + let explanation = "secret_subshares is filled during initialization; keys are disseminated after initialization; qed"; + { + match data + .secret_subshares + .as_ref() + .expect(explanation) + .get(sender) + { + None => return Err(Error::InvalidMessage), + Some(&Some(_)) => return Err(Error::InvalidMessage), + Some(&None) => (), + }; - let secret_subshare = Self::compute_secret_subshare(&self.core, &mut *data, sender, &message.secret_subshare.clone().into())?; - *data.secret_subshares.as_mut().expect(explanation) - .get_mut(sender) - .expect("checked couple of lines above; qed") = Some(secret_subshare); - } + let secret_subshare = Self::compute_secret_subshare( + &self.core, + &mut *data, + sender, + &message.secret_subshare.clone().into(), + )?; + *data + .secret_subshares + .as_mut() + .expect(explanation) + .get_mut(sender) + .expect("checked couple of lines above; qed") = Some(secret_subshare); + } - // if we have received subshare from master node, it means that we should start dissemination - if sender == &self.core.meta.master_node_id { - Self::on_consensus_established(&self.core, &mut *data)?; - } + // if we have received subshare from master node, it means that we should start dissemination + if sender == &self.core.meta.master_node_id { + Self::on_consensus_established(&self.core, &mut *data)?; + } - // check if shares from all nodes are received - if data.secret_subshares.as_ref().expect(explanation).values().any(|v| v.is_none()) { - return Ok(()) - } + // check if shares from all nodes are received + if data + .secret_subshares + .as_ref() + .expect(explanation) + .values() + .any(|v| v.is_none()) + { + return Ok(()); + } - // TODO [Trust]: find a way to verificate keys - Self::complete_session(&self.core, &mut *data) - } + // TODO [Trust]: find a way to verificate keys + Self::complete_session(&self.core, &mut *data) + } - /// Check nodes map. - fn check_nodes_map(core: &SessionCore, version: &H256, consensus_group: &BTreeSet, version_holders: &BTreeSet, new_nodes_map: &BTreeMap>) -> Result<(), Error> { - // check if this node has given version - let has_this_version = match core.key_share.as_ref() { - Some(key_share) => key_share.version(version).is_ok(), - None => false, - }; + /// Check nodes map. + fn check_nodes_map( + core: &SessionCore, + version: &H256, + consensus_group: &BTreeSet, + version_holders: &BTreeSet, + new_nodes_map: &BTreeMap>, + ) -> Result<(), Error> { + // check if this node has given version + let has_this_version = match core.key_share.as_ref() { + Some(key_share) => key_share.version(version).is_ok(), + None => false, + }; - // check && update passed data - match has_this_version { - true => { - // check if version exists - let explanation = "has_this_version is true; it is true if we have given version of the key; qed"; - let key_share = core.key_share.as_ref().expect(explanation); - let key_version = key_share.version(version).expect(explanation); + // check && update passed data + match has_this_version { + true => { + // check if version exists + let explanation = + "has_this_version is true; it is true if we have given version of the key; qed"; + let key_share = core.key_share.as_ref().expect(explanation); + let key_version = key_share.version(version).expect(explanation); - // there must be exactly thresold + 1 nodes in consensus group - if consensus_group.len() != key_share.threshold + 1 { - return Err(Error::ConsensusUnreachable); - } + // there must be exactly thresold + 1 nodes in consensus group + if consensus_group.len() != key_share.threshold + 1 { + return Err(Error::ConsensusUnreachable); + } - // every non-isolated node must be a part of new_nodes_set - let non_isolated_nodes = core.transport.nodes(); - if key_version.id_numbers.keys().any(|n| non_isolated_nodes.contains(n) && !new_nodes_map.contains_key(n)) { - return Err(Error::ConsensusUnreachable); - } + // every non-isolated node must be a part of new_nodes_set + let non_isolated_nodes = core.transport.nodes(); + if key_version + .id_numbers + .keys() + .any(|n| non_isolated_nodes.contains(n) && !new_nodes_map.contains_key(n)) + { + return Err(Error::ConsensusUnreachable); + } - // there must be at least one new node in new_nodes_map - if key_version.id_numbers.keys().filter(|n| non_isolated_nodes.contains(n) && version_holders.contains(n)).count() >= new_nodes_map.len() { - return Err(Error::ConsensusUnreachable); - } - }, - false => { - // if we do not have a share, we should not be a part of consenus group - // but we must be on new nodes set, since this is a ShareAdd session - if consensus_group.contains(&core.meta.self_node_id) || - !new_nodes_map.contains_key(&core.meta.self_node_id) { - return Err(Error::ConsensusUnreachable); - } - }, - } + // there must be at least one new node in new_nodes_map + if key_version + .id_numbers + .keys() + .filter(|n| non_isolated_nodes.contains(n) && version_holders.contains(n)) + .count() + >= new_nodes_map.len() + { + return Err(Error::ConsensusUnreachable); + } + } + false => { + // if we do not have a share, we should not be a part of consenus group + // but we must be on new nodes set, since this is a ShareAdd session + if consensus_group.contains(&core.meta.self_node_id) + || !new_nodes_map.contains_key(&core.meta.self_node_id) + { + return Err(Error::ConsensusUnreachable); + } + } + } - // master node must always be a part of consensus group - if !consensus_group.contains(&core.meta.master_node_id) { - return Err(Error::ConsensusUnreachable); - } + // master node must always be a part of consensus group + if !consensus_group.contains(&core.meta.master_node_id) { + return Err(Error::ConsensusUnreachable); + } - // master node must always be a part of new_nodes_map - if !new_nodes_map.contains_key(&core.meta.master_node_id) { - return Err(Error::ConsensusUnreachable); - } + // master node must always be a part of new_nodes_map + if !new_nodes_map.contains_key(&core.meta.master_node_id) { + return Err(Error::ConsensusUnreachable); + } - Ok(()) - } + Ok(()) + } - /// Start sending ShareAdd-specific messages, when consensus is established. - fn on_consensus_established(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { - // update state - data.state = SessionState::WaitingForKeysDissemination; + /// Start sending ShareAdd-specific messages, when consensus is established. + fn on_consensus_established( + core: &SessionCore, + data: &mut SessionData, + ) -> Result<(), Error> { + // update state + data.state = SessionState::WaitingForKeysDissemination; - // if we're not a part of consensus group, wait for secret subshares - let explanation = "secret_subshares is a result of consensus job; consensus is established; qed"; - let is_consensus_group_node = data.secret_subshares.as_ref().expect(explanation).contains_key(&core.meta.self_node_id); - if !is_consensus_group_node { - return Ok(()); - } + // if we're not a part of consensus group, wait for secret subshares + let explanation = + "secret_subshares is a result of consensus job; consensus is established; qed"; + let is_consensus_group_node = data + .secret_subshares + .as_ref() + .expect(explanation) + .contains_key(&core.meta.self_node_id); + if !is_consensus_group_node { + return Ok(()); + } - // else if master => send shared data to every new node - if core.meta.self_node_id == core.meta.master_node_id { - Self::disseminate_common_share_data(core, data)?; - } + // else if master => send shared data to every new node + if core.meta.self_node_id == core.meta.master_node_id { + Self::disseminate_common_share_data(core, data)?; + } - // ...and then disseminate keys - Self::disseminate_keys(core, data)?; + // ...and then disseminate keys + Self::disseminate_keys(core, data)?; - // ..and check if session could be completed - if data.secret_subshares.as_ref().expect(explanation).values().any(|v| v.is_none()) { - return Ok(()) - } + // ..and check if session could be completed + if data + .secret_subshares + .as_ref() + .expect(explanation) + .values() + .any(|v| v.is_none()) + { + return Ok(()); + } - // TODO [Trust]: find a way to verificate keys - Self::complete_session(core, data) - } + // TODO [Trust]: find a way to verificate keys + Self::complete_session(core, data) + } - /// Send common share data to evey new node. - fn disseminate_common_share_data(core: &SessionCore, data: &SessionData) -> Result<(), Error> { - let explanation = "disseminate_common_share_data is only called on master node; master node has specified version of the key; qed"; - let old_key_share = core.key_share.as_ref().expect(explanation); - let old_key_version = old_key_share.version(data.version.as_ref().expect(explanation)).expect(explanation); - let version_holders = data.version_holders.as_ref() + /// Send common share data to evey new node. + fn disseminate_common_share_data( + core: &SessionCore, + data: &SessionData, + ) -> Result<(), Error> { + let explanation = "disseminate_common_share_data is only called on master node; master node has specified version of the key; qed"; + let old_key_share = core.key_share.as_ref().expect(explanation); + let old_key_version = old_key_share + .version(data.version.as_ref().expect(explanation)) + .expect(explanation); + let version_holders = data.version_holders.as_ref() .expect("disseminate_common_share_data is only called on master node; version holders is created during initialization on master node; qed"); - let consensus_group = data.secret_subshares.as_ref() + let consensus_group = data.secret_subshares.as_ref() .expect("disseminate_common_share_data is only called on master node; consensus group is created during initialization on master node; qed"); - let nodes = data.id_numbers.as_ref() + let nodes = data.id_numbers.as_ref() .expect("nodes are filled during consensus establishing; common share data sent after consensus is established; qed") .keys() .filter(|n| !consensus_group.contains_key(n)); - for new_node in nodes { - core.transport.send(new_node, ShareAddMessage::KeyShareCommon(KeyShareCommon { - session: core.meta.id.clone().into(), - session_nonce: core.nonce, - key_common: CommonKeyData { - threshold: old_key_share.threshold, - author: old_key_share.author.into(), - public: old_key_share.public.into(), - }, - common_point: old_key_share.common_point.clone().map(Into::into), - encrypted_point: old_key_share.encrypted_point.clone().map(Into::into), - id_numbers: old_key_version.id_numbers.iter() - .filter(|&(k, _)| version_holders.contains(k)) - .map(|(k, v)| (k.clone().into(), v.clone().into())).collect(), - }))?; - } + for new_node in nodes { + core.transport.send( + new_node, + ShareAddMessage::KeyShareCommon(KeyShareCommon { + session: core.meta.id.clone().into(), + session_nonce: core.nonce, + key_common: CommonKeyData { + threshold: old_key_share.threshold, + author: old_key_share.author.into(), + public: old_key_share.public.into(), + }, + common_point: old_key_share.common_point.clone().map(Into::into), + encrypted_point: old_key_share.encrypted_point.clone().map(Into::into), + id_numbers: old_key_version + .id_numbers + .iter() + .filter(|&(k, _)| version_holders.contains(k)) + .map(|(k, v)| (k.clone().into(), v.clone().into())) + .collect(), + }), + )?; + } - Ok(()) - } + Ok(()) + } - /// Disseminate key refreshing data. - fn disseminate_keys(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { - // generate random polynom with secret share as absolute term - let explanation = "disseminate_keys is only called on consensus group nodes; consensus group nodes have specified version of the key; qed"; - let key_share = core.key_share.as_ref().expect(explanation); - let key_version = key_share.version(data.version.as_ref().expect(explanation)).expect(explanation); - let mut secret_share_polynom = math::generate_random_polynom(key_share.threshold)?; - secret_share_polynom[0] = key_version.secret_share.clone(); + /// Disseminate key refreshing data. + fn disseminate_keys(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { + // generate random polynom with secret share as absolute term + let explanation = "disseminate_keys is only called on consensus group nodes; consensus group nodes have specified version of the key; qed"; + let key_share = core.key_share.as_ref().expect(explanation); + let key_version = key_share + .version(data.version.as_ref().expect(explanation)) + .expect(explanation); + let mut secret_share_polynom = math::generate_random_polynom(key_share.threshold)?; + secret_share_polynom[0] = key_version.secret_share.clone(); - // calculate secret subshare for every new node (including this node) - let explanation = "disseminate_keys is called after initialization has completed; this field is filled during initialization; qed"; - for (new_node, new_node_number) in data.id_numbers.as_ref().expect(explanation).iter() { - let new_node_number = new_node_number.as_ref().ok_or(Error::InvalidMessage)?; - let secret_subshare = math::compute_polynom(&secret_share_polynom, new_node_number)?; - if new_node != &core.meta.self_node_id { - core.transport.send(new_node, ShareAddMessage::NewKeysDissemination(NewKeysDissemination { - session: core.meta.id.clone().into(), - session_nonce: core.nonce, - secret_subshare: secret_subshare.into(), - }))?; - } else { - let secret_subshare = Self::compute_secret_subshare(core, data, new_node, &secret_subshare)?; - *data.secret_subshares.as_mut().expect(explanation) + // calculate secret subshare for every new node (including this node) + let explanation = "disseminate_keys is called after initialization has completed; this field is filled during initialization; qed"; + for (new_node, new_node_number) in data.id_numbers.as_ref().expect(explanation).iter() { + let new_node_number = new_node_number.as_ref().ok_or(Error::InvalidMessage)?; + let secret_subshare = math::compute_polynom(&secret_share_polynom, new_node_number)?; + if new_node != &core.meta.self_node_id { + core.transport.send( + new_node, + ShareAddMessage::NewKeysDissemination(NewKeysDissemination { + session: core.meta.id.clone().into(), + session_nonce: core.nonce, + secret_subshare: secret_subshare.into(), + }), + )?; + } else { + let secret_subshare = + Self::compute_secret_subshare(core, data, new_node, &secret_subshare)?; + *data.secret_subshares.as_mut().expect(explanation) .get_mut(&core.meta.self_node_id) .expect("disseminate_keys is only calle on consensus group nodes; there's entry for every consensus node in secret_subshares; qed") = Some(secret_subshare); - } - } + } + } - Ok(()) - } + Ok(()) + } - /// Compute secret subshare from passed secret value. - fn compute_secret_subshare(core: &SessionCore, data: &SessionData, sender: &NodeId, secret_value: &Secret) -> Result { - let explanation = "this field is a result of consensus job; compute_secret_subshare is called after consensus is established"; - let id_numbers = data.id_numbers.as_ref().expect(explanation); - let secret_subshares = data.secret_subshares.as_ref().expect(explanation); - let threshold = core.key_share.as_ref().map(|ks| ks.threshold) - .unwrap_or_else(|| data.new_key_share.as_ref() + /// Compute secret subshare from passed secret value. + fn compute_secret_subshare( + core: &SessionCore, + data: &SessionData, + sender: &NodeId, + secret_value: &Secret, + ) -> Result { + let explanation = "this field is a result of consensus job; compute_secret_subshare is called after consensus is established"; + let id_numbers = data.id_numbers.as_ref().expect(explanation); + let secret_subshares = data.secret_subshares.as_ref().expect(explanation); + let threshold = core + .key_share + .as_ref() + .map(|ks| ks.threshold) + .unwrap_or_else(|| { + data.new_key_share.as_ref() .expect("computation occurs after receiving key share threshold if not having one already; qed") - .threshold); + .threshold + }); - let explanation = "id_numbers are checked to have Some value for every consensus group node when consensus is establishe; qed"; - let sender_id_number = id_numbers[sender].as_ref().expect(explanation); - let other_id_numbers = secret_subshares.keys().filter(|k| *k != sender).map(|n| id_numbers[n].as_ref().expect(explanation)); - math::compute_secret_subshare(threshold, secret_value, sender_id_number, other_id_numbers) - } + let explanation = "id_numbers are checked to have Some value for every consensus group node when consensus is establishe; qed"; + let sender_id_number = id_numbers[sender].as_ref().expect(explanation); + let other_id_numbers = secret_subshares + .keys() + .filter(|k| *k != sender) + .map(|n| id_numbers[n].as_ref().expect(explanation)); + math::compute_secret_subshare(threshold, secret_value, sender_id_number, other_id_numbers) + } - /// Complete session. - fn complete_session(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { - // if already completed, do nothing - if data.state == SessionState::Finished { - return Ok(()); - } + /// Complete session. + fn complete_session(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { + // if already completed, do nothing + if data.state == SessionState::Finished { + return Ok(()); + } - // compose updated key share - let explanation = "this field is a result of consensus job; complete_session is called after consensus is established"; - let id_numbers = data.id_numbers.as_ref().expect(explanation); - let secret_subshares = data.secret_subshares.as_ref() + // compose updated key share + let explanation = "this field is a result of consensus job; complete_session is called after consensus is established"; + let id_numbers = data.id_numbers.as_ref().expect(explanation); + let secret_subshares = data.secret_subshares.as_ref() .expect("nodes are filled during consensus establishing; session is completed after consensus is established; qed"); - let secret_share = math::compute_secret_share(secret_subshares.values().map(|ss| ss.as_ref() - .expect("complete_session is only called when subshares from all nodes are received; qed")))?; + let secret_share = math::compute_secret_share(secret_subshares.values().map(|ss| { + ss.as_ref().expect( + "complete_session is only called when subshares from all nodes are received; qed", + ) + }))?; - let refreshed_key_version = DocumentKeyShareVersion::new(id_numbers.clone().into_iter().map(|(k, v)| (k.clone(), + let refreshed_key_version = DocumentKeyShareVersion::new(id_numbers.clone().into_iter().map(|(k, v)| (k.clone(), v.expect("id_numbers are checked to have Some value for every consensus group node when consensus is establishe; qed"))).collect(), secret_share); - let mut refreshed_key_share = core.key_share.as_ref().cloned().unwrap_or_else(|| { + let mut refreshed_key_share = core.key_share.as_ref().cloned().unwrap_or_else(|| { let new_key_share = data.new_key_share.as_ref() .expect("this is new node; on new nodes this field is filled before KRD; session is completed after KRD; qed"); DocumentKeyShare { @@ -739,103 +971,120 @@ impl SessionImpl where T: SessionTransport { versions: Vec::new(), } }); - refreshed_key_share.versions.push(refreshed_key_version); + refreshed_key_share.versions.push(refreshed_key_version); - // save encrypted data to the key storage - data.state = SessionState::Finished; - if core.key_share.is_some() { - core.key_storage.update(core.meta.id.clone(), refreshed_key_share.clone())?; - } else { - core.key_storage.insert(core.meta.id.clone(), refreshed_key_share.clone())?; - } + // save encrypted data to the key storage + data.state = SessionState::Finished; + if core.key_share.is_some() { + core.key_storage + .update(core.meta.id.clone(), refreshed_key_share.clone())?; + } else { + core.key_storage + .insert(core.meta.id.clone(), refreshed_key_share.clone())?; + } - // signal session completion - data.state = SessionState::Finished; - data.result = Some(Ok(())); - core.completed.notify_all(); + // signal session completion + data.state = SessionState::Finished; + data.result = Some(Ok(())); + core.completed.notify_all(); - Ok(()) - } + Ok(()) + } } -impl ClusterSession for SessionImpl where T: SessionTransport { - type Id = SessionId; +impl ClusterSession for SessionImpl +where + T: SessionTransport, +{ + type Id = SessionId; - fn type_name() -> &'static str { - "share add" - } + fn type_name() -> &'static str { + "share add" + } - fn id(&self) -> SessionId { - self.core.meta.id.clone() - } + fn id(&self) -> SessionId { + self.core.meta.id.clone() + } - fn is_finished(&self) -> bool { - self.data.lock().state == SessionState::Finished - } + fn is_finished(&self) -> bool { + self.data.lock().state == SessionState::Finished + } - fn on_session_timeout(&self) { - self.on_session_error(&self.core.meta.self_node_id, Error::NodeDisconnected) - } + fn on_session_timeout(&self) { + self.on_session_error(&self.core.meta.self_node_id, Error::NodeDisconnected) + } - fn on_node_timeout(&self, node: &NodeId) { - self.on_session_error(node, Error::NodeDisconnected) - } + fn on_node_timeout(&self, node: &NodeId) { + self.on_session_error(node, Error::NodeDisconnected) + } - fn on_session_error(&self, node: &NodeId, error: Error) { - // error in generation session is considered fatal - // => broadcast error if error occured on this node - if *node == self.core.meta.self_node_id { - for node in self.core.transport.nodes() { - // do not bother processing send error, as we already processing error - let _ = self.core.transport.send(&node, ShareAddMessage::ShareAddError(ShareAddError { - session: self.core.meta.id.clone().into(), - session_nonce: self.core.nonce, - error: error.clone().into(), - })); - } - } + fn on_session_error(&self, node: &NodeId, error: Error) { + // error in generation session is considered fatal + // => broadcast error if error occured on this node + if *node == self.core.meta.self_node_id { + for node in self.core.transport.nodes() { + // do not bother processing send error, as we already processing error + let _ = self.core.transport.send( + &node, + ShareAddMessage::ShareAddError(ShareAddError { + session: self.core.meta.id.clone().into(), + session_nonce: self.core.nonce, + error: error.clone().into(), + }), + ); + } + } - let mut data = self.data.lock(); + let mut data = self.data.lock(); - warn!(target: "secretstore_net", "{}: share add session failed: {} on {}", + warn!(target: "secretstore_net", "{}: share add session failed: {} on {}", self.core.meta.self_node_id, error, node); - data.state = SessionState::Finished; - data.result = Some(Err(error)); - self.core.completed.notify_all(); - } + data.state = SessionState::Finished; + data.result = Some(Err(error)); + self.core.completed.notify_all(); + } - fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { - match *message { - Message::ShareAdd(ref message) => self.process_message(sender, message), - _ => unreachable!("cluster checks message to be correct before passing; qed"), - } - } + fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { + match *message { + Message::ShareAdd(ref message) => self.process_message(sender, message), + _ => unreachable!("cluster checks message to be correct before passing; qed"), + } + } } impl IsolatedSessionTransport { - pub fn new(session_id: SessionId, version: Option, nonce: u64, cluster: Arc) -> Self { - IsolatedSessionTransport { - session: session_id, - version: version, - nonce: nonce, - cluster: cluster, - id_numbers: None, - version_holders: None, - consensus_group: None, - } - } + pub fn new( + session_id: SessionId, + version: Option, + nonce: u64, + cluster: Arc, + ) -> Self { + IsolatedSessionTransport { + session: session_id, + version: version, + nonce: nonce, + cluster: cluster, + id_numbers: None, + version_holders: None, + consensus_group: None, + } + } } impl JobTransport for IsolatedSessionTransport { - type PartialJobRequest = ServersSetChangeAccessRequest; - type PartialJobResponse = bool; + type PartialJobRequest = ServersSetChangeAccessRequest; + type PartialJobResponse = bool; - fn send_partial_request(&self, node: &NodeId, request: ServersSetChangeAccessRequest) -> Result<(), Error> { - let explanation = "partial requests are sent from master node only; on master node this field is filled during creation; qed"; - let id_numbers = self.id_numbers.as_ref().expect(explanation); + fn send_partial_request( + &self, + node: &NodeId, + request: ServersSetChangeAccessRequest, + ) -> Result<(), Error> { + let explanation = "partial requests are sent from master node only; on master node this field is filled during creation; qed"; + let id_numbers = self.id_numbers.as_ref().expect(explanation); - self.cluster.send(node, Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(ShareAddConsensusMessage { + self.cluster.send(node, Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(ShareAddConsensusMessage { session: self.session.clone().into(), session_nonce: self.nonce, message: ConsensusMessageOfShareAdd::InitializeConsensusSession(InitializeConsensusSessionOfShareAdd { @@ -853,257 +1102,362 @@ impl JobTransport for IsolatedSessionTransport { new_set_signature: request.new_set_signature.into(), }), }))) - } + } - fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { - self.cluster.send(node, Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(ShareAddConsensusMessage { - session: self.session.clone().into(), - session_nonce: self.nonce, - message: ConsensusMessageOfShareAdd::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: response, - }), - }))) - } + fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { + self.cluster.send( + node, + Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage( + ShareAddConsensusMessage { + session: self.session.clone().into(), + session_nonce: self.nonce, + message: ConsensusMessageOfShareAdd::ConfirmConsensusInitialization( + ConfirmConsensusInitialization { + is_confirmed: response, + }, + ), + }, + )), + ) + } } impl SessionTransport for IsolatedSessionTransport { - fn nodes(&self) -> BTreeSet { - self.cluster.nodes() - } + fn nodes(&self) -> BTreeSet { + self.cluster.nodes() + } - fn set_master_data(&mut self, consensus_group: BTreeSet, version_holders: BTreeSet, id_numbers: BTreeMap>) { - self.version_holders = Some(version_holders); - self.consensus_group = Some(consensus_group); - self.id_numbers = Some(id_numbers); - } + fn set_master_data( + &mut self, + consensus_group: BTreeSet, + version_holders: BTreeSet, + id_numbers: BTreeMap>, + ) { + self.version_holders = Some(version_holders); + self.consensus_group = Some(consensus_group); + self.id_numbers = Some(id_numbers); + } - fn send(&self, node: &NodeId, message: ShareAddMessage) -> Result<(), Error> { - self.cluster.send(node, Message::ShareAdd(message)) - } + fn send(&self, node: &NodeId, message: ShareAddMessage) -> Result<(), Error> { + self.cluster.send(node, Message::ShareAdd(message)) + } } #[cfg(test)] pub mod tests { - use std::collections::BTreeSet; - use ethkey::{Random, Generator, Public}; - use key_server_cluster::{NodeId, Error, KeyStorage, NodeKeyPair}; - use key_server_cluster::cluster::tests::MessageLoop as ClusterMessageLoop; - use key_server_cluster::servers_set_change_session::tests::{MessageLoop, AdminSessionAdapter, generate_key}; - use key_server_cluster::admin_sessions::ShareChangeSessionMeta; - use super::{SessionImpl, SessionParams, IsolatedSessionTransport}; + use super::{IsolatedSessionTransport, SessionImpl, SessionParams}; + use ethkey::{Generator, Public, Random}; + use key_server_cluster::{ + admin_sessions::ShareChangeSessionMeta, + cluster::tests::MessageLoop as ClusterMessageLoop, + servers_set_change_session::tests::{generate_key, AdminSessionAdapter, MessageLoop}, + Error, KeyStorage, NodeId, NodeKeyPair, + }; + use std::collections::BTreeSet; - struct Adapter; + struct Adapter; - impl AdminSessionAdapter> for Adapter { - const SIGN_NEW_NODES: bool = false; + impl AdminSessionAdapter> for Adapter { + const SIGN_NEW_NODES: bool = false; - fn create( - mut meta: ShareChangeSessionMeta, - admin_public: Public, - _: BTreeSet, - ml: &ClusterMessageLoop, - idx: usize - ) -> SessionImpl { - let key_storage = ml.key_storage(idx).clone(); - let key_version = key_storage.get(&meta.id).unwrap().map(|ks| ks.last_version().unwrap().hash); + fn create( + mut meta: ShareChangeSessionMeta, + admin_public: Public, + _: BTreeSet, + ml: &ClusterMessageLoop, + idx: usize, + ) -> SessionImpl { + let key_storage = ml.key_storage(idx).clone(); + let key_version = key_storage + .get(&meta.id) + .unwrap() + .map(|ks| ks.last_version().unwrap().hash); - meta.self_node_id = *ml.node_key_pair(idx).public(); - SessionImpl::new(SessionParams { - meta: meta.clone(), - transport: IsolatedSessionTransport::new(meta.id, key_version, 1, ml.cluster(idx).view().unwrap()), - key_storage, - admin_public: Some(admin_public), - nonce: 1, - }).unwrap() - } - } + meta.self_node_id = *ml.node_key_pair(idx).public(); + SessionImpl::new(SessionParams { + meta: meta.clone(), + transport: IsolatedSessionTransport::new( + meta.id, + key_version, + 1, + ml.cluster(idx).view().unwrap(), + ), + key_storage, + admin_public: Some(admin_public), + nonce: 1, + }) + .unwrap() + } + } - impl MessageLoop> { - pub fn init_at(self, master: NodeId) -> Result { - self.sessions[&master].initialize( - Some(self.original_key_version), - Some(self.new_nodes_set.clone()), - Some(self.all_set_signature.clone()), - Some(self.new_set_signature.clone()))?; - Ok(self) - } + impl MessageLoop> { + pub fn init_at(self, master: NodeId) -> Result { + self.sessions[&master].initialize( + Some(self.original_key_version), + Some(self.new_nodes_set.clone()), + Some(self.all_set_signature.clone()), + Some(self.new_set_signature.clone()), + )?; + Ok(self) + } - pub fn run_at(self, master: NodeId) -> Result { - let mut ml = self.init_at(master)?; - ml.run(); - Ok(ml) - } - } + pub fn run_at(self, master: NodeId) -> Result { + let mut ml = self.init_at(master)?; + ml.run(); + Ok(ml) + } + } - #[test] - fn node_add_fails_if_nodes_removed() { - // initial 2-of-3 session - let gml = generate_key(3, 1); + #[test] + fn node_add_fails_if_nodes_removed() { + // initial 2-of-3 session + let gml = generate_key(3, 1); - // try to remove 1 node - let add = vec![Random.generate().unwrap()]; - let remove: BTreeSet<_> = ::std::iter::once(gml.0.node(1)).collect(); - let master = gml.0.node(0); - assert_eq!(MessageLoop::with_gml::(gml, master, Some(add), Some(remove), None) - .run_at(master).unwrap_err(), Error::ConsensusUnreachable); - } + // try to remove 1 node + let add = vec![Random.generate().unwrap()]; + let remove: BTreeSet<_> = ::std::iter::once(gml.0.node(1)).collect(); + let master = gml.0.node(0); + assert_eq!( + MessageLoop::with_gml::(gml, master, Some(add), Some(remove), None) + .run_at(master) + .unwrap_err(), + Error::ConsensusUnreachable + ); + } - #[test] - fn node_add_fails_if_no_nodes_added() { - // initial 2-of-3 session - let gml = generate_key(3, 1); + #[test] + fn node_add_fails_if_no_nodes_added() { + // initial 2-of-3 session + let gml = generate_key(3, 1); - // try to add 0 nodes - let add = vec![]; - let master = gml.0.node(0); - assert_eq!(MessageLoop::with_gml::(gml, master, Some(add), None, None) - .run_at(master).unwrap_err(), Error::ConsensusUnreachable); - } + // try to add 0 nodes + let add = vec![]; + let master = gml.0.node(0); + assert_eq!( + MessageLoop::with_gml::(gml, master, Some(add), None, None) + .run_at(master) + .unwrap_err(), + Error::ConsensusUnreachable + ); + } - #[test] - fn node_add_fails_if_started_on_adding_node() { - // initial 2-of-3 session - let gml = generate_key(3, 1); + #[test] + fn node_add_fails_if_started_on_adding_node() { + // initial 2-of-3 session + let gml = generate_key(3, 1); - // try to add 1 node using this node as a master node - let add = vec![Random.generate().unwrap()]; - let master = *add[0].public(); - assert_eq!(MessageLoop::with_gml::(gml, master, Some(add), None, None) - .run_at(master).unwrap_err(), Error::ServerKeyIsNotFound); - } + // try to add 1 node using this node as a master node + let add = vec![Random.generate().unwrap()]; + let master = *add[0].public(); + assert_eq!( + MessageLoop::with_gml::(gml, master, Some(add), None, None) + .run_at(master) + .unwrap_err(), + Error::ServerKeyIsNotFound + ); + } - #[test] - fn node_add_fails_if_initialized_twice() { - // initial 2-of-3 session - let gml = generate_key(3, 1); + #[test] + fn node_add_fails_if_initialized_twice() { + // initial 2-of-3 session + let gml = generate_key(3, 1); - // try to add 1 node using this node as a master node - let add = vec![Random.generate().unwrap()]; - let master = gml.0.node(0); - assert_eq!(MessageLoop::with_gml::(gml, master, Some(add), None, None) - .init_at(master).unwrap() - .init_at(master).unwrap_err(), Error::InvalidStateForRequest); - } + // try to add 1 node using this node as a master node + let add = vec![Random.generate().unwrap()]; + let master = gml.0.node(0); + assert_eq!( + MessageLoop::with_gml::(gml, master, Some(add), None, None) + .init_at(master) + .unwrap() + .init_at(master) + .unwrap_err(), + Error::InvalidStateForRequest + ); + } - #[test] - fn node_add_fails_if_started_without_signatures() { - // initial 2-of-3 session - let gml = generate_key(3, 1); + #[test] + fn node_add_fails_if_started_without_signatures() { + // initial 2-of-3 session + let gml = generate_key(3, 1); - // try to add 1 node using this node as a master node - let add = vec![Random.generate().unwrap()]; - let master = gml.0.node(0); - assert_eq!(MessageLoop::with_gml::(gml, master, Some(add), None, None) - .sessions[&master] - .initialize(None, None, None, None).unwrap_err(), Error::InvalidMessage); - } + // try to add 1 node using this node as a master node + let add = vec![Random.generate().unwrap()]; + let master = gml.0.node(0); + assert_eq!( + MessageLoop::with_gml::(gml, master, Some(add), None, None).sessions[&master] + .initialize(None, None, None, None) + .unwrap_err(), + Error::InvalidMessage + ); + } - #[test] - fn nodes_added_using_share_add() { - let test_cases = vec![(3, 1), (3, 3)]; - for (n, add) in test_cases { - // generate key - let gml = generate_key(n, 1); + #[test] + fn nodes_added_using_share_add() { + let test_cases = vec![(3, 1), (3, 3)]; + for (n, add) in test_cases { + // generate key + let gml = generate_key(n, 1); - // run share add session - let add = (0..add).map(|_| Random.generate().unwrap()).collect(); - let master = gml.0.node(0); - let ml = MessageLoop::with_gml::(gml, master, Some(add), None, None) - .run_at(master).unwrap(); + // run share add session + let add = (0..add).map(|_| Random.generate().unwrap()).collect(); + let master = gml.0.node(0); + let ml = MessageLoop::with_gml::(gml, master, Some(add), None, None) + .run_at(master) + .unwrap(); - // check that secret is still the same as before adding the share - ml.check_secret_is_preserved(ml.sessions.keys()); - } - } + // check that secret is still the same as before adding the share + ml.check_secret_is_preserved(ml.sessions.keys()); + } + } - #[test] - fn nodes_added_using_share_add_with_isolated_nodes() { - let (n, add) = (3, 3); + #[test] + fn nodes_added_using_share_add_with_isolated_nodes() { + let (n, add) = (3, 3); - // generate key - let gml = generate_key(n, 1); + // generate key + let gml = generate_key(n, 1); - // run share add session - let master = gml.0.node(0); - let node_to_isolate = gml.0.node(1); - let add = (0..add).map(|_| Random.generate().unwrap()).collect(); - let isolate = ::std::iter::once(node_to_isolate).collect(); - let ml = MessageLoop::with_gml::(gml, master, Some(add), None, Some(isolate)) - .run_at(master).unwrap(); + // run share add session + let master = gml.0.node(0); + let node_to_isolate = gml.0.node(1); + let add = (0..add).map(|_| Random.generate().unwrap()).collect(); + let isolate = ::std::iter::once(node_to_isolate).collect(); + let ml = MessageLoop::with_gml::(gml, master, Some(add), None, Some(isolate)) + .run_at(master) + .unwrap(); - // check that secret is still the same as before adding the share - ml.check_secret_is_preserved(ml.sessions.keys()); - } + // check that secret is still the same as before adding the share + ml.check_secret_is_preserved(ml.sessions.keys()); + } - #[test] - fn nodes_add_to_the_node_with_obsolete_version() { - let (n, add) = (3, 3); + #[test] + fn nodes_add_to_the_node_with_obsolete_version() { + let (n, add) = (3, 3); - // generate key - let gml = generate_key(n, 1); + // generate key + let gml = generate_key(n, 1); - // run share add session - let master = gml.0.node(0); - let node_to_isolate_key_pair = gml.0.node_key_pair(1).clone(); - let node_to_isolate = gml.0.node(1); - let isolated_key_storage = gml.0.key_storage(1).clone(); - let mut oldest_nodes_set = gml.0.nodes(); - oldest_nodes_set.remove(&node_to_isolate); - let add = (0..add).map(|_| Random.generate().unwrap()).collect::>(); - let newest_nodes_set = add.iter().map(|kp| *kp.public()).collect::>(); - let isolate = ::std::iter::once(node_to_isolate).collect(); - let ml = MessageLoop::with_gml::(gml, master, Some(add), None, Some(isolate)) - .run_at(master).unwrap(); - let new_key_version = ml.ml.key_storage(0).get(&Default::default()) - .unwrap().unwrap().last_version().unwrap().hash; + // run share add session + let master = gml.0.node(0); + let node_to_isolate_key_pair = gml.0.node_key_pair(1).clone(); + let node_to_isolate = gml.0.node(1); + let isolated_key_storage = gml.0.key_storage(1).clone(); + let mut oldest_nodes_set = gml.0.nodes(); + oldest_nodes_set.remove(&node_to_isolate); + let add = (0..add) + .map(|_| Random.generate().unwrap()) + .collect::>(); + let newest_nodes_set = add.iter().map(|kp| *kp.public()).collect::>(); + let isolate = ::std::iter::once(node_to_isolate).collect(); + let ml = MessageLoop::with_gml::(gml, master, Some(add), None, Some(isolate)) + .run_at(master) + .unwrap(); + let new_key_version = ml + .ml + .key_storage(0) + .get(&Default::default()) + .unwrap() + .unwrap() + .last_version() + .unwrap() + .hash; - // now let's add back old node so that key becames 2-of-6 - let add = vec![node_to_isolate_key_pair.key_pair().clone()]; - let mut ml = ml.and_then::(master.clone(), Some(add), None, None); - ml.original_key_version = new_key_version; - ml.ml.replace_key_storage_of(&node_to_isolate, isolated_key_storage.clone()); - ml.sessions.get_mut(&node_to_isolate).unwrap().core.key_share = - isolated_key_storage.get(&Default::default()).unwrap(); - ml.sessions.get_mut(&node_to_isolate).unwrap().core.key_storage = isolated_key_storage; - let ml = ml.run_at(master).unwrap(); + // now let's add back old node so that key becames 2-of-6 + let add = vec![node_to_isolate_key_pair.key_pair().clone()]; + let mut ml = ml.and_then::(master.clone(), Some(add), None, None); + ml.original_key_version = new_key_version; + ml.ml + .replace_key_storage_of(&node_to_isolate, isolated_key_storage.clone()); + ml.sessions + .get_mut(&node_to_isolate) + .unwrap() + .core + .key_share = isolated_key_storage.get(&Default::default()).unwrap(); + ml.sessions + .get_mut(&node_to_isolate) + .unwrap() + .core + .key_storage = isolated_key_storage; + let ml = ml.run_at(master).unwrap(); - // check that secret is still the same as before adding the share - ml.check_secret_is_preserved(ml.sessions.keys()); + // check that secret is still the same as before adding the share + ml.check_secret_is_preserved(ml.sessions.keys()); - // check that all oldest nodes have versions A, B, C - // isolated node has version A, C - // new nodes have versions B, C - let oldest_key_share = ml.ml.key_storage_of(oldest_nodes_set.iter().nth(0).unwrap()) - .get(&Default::default()).unwrap().unwrap(); - debug_assert_eq!(oldest_key_share.versions.len(), 3); - let version_a = oldest_key_share.versions[0].hash.clone(); - let version_b = oldest_key_share.versions[1].hash.clone(); - let version_c = oldest_key_share.versions[2].hash.clone(); - debug_assert!(version_a != version_b && version_b != version_c); + // check that all oldest nodes have versions A, B, C + // isolated node has version A, C + // new nodes have versions B, C + let oldest_key_share = ml + .ml + .key_storage_of(oldest_nodes_set.iter().nth(0).unwrap()) + .get(&Default::default()) + .unwrap() + .unwrap(); + debug_assert_eq!(oldest_key_share.versions.len(), 3); + let version_a = oldest_key_share.versions[0].hash.clone(); + let version_b = oldest_key_share.versions[1].hash.clone(); + let version_c = oldest_key_share.versions[2].hash.clone(); + debug_assert!(version_a != version_b && version_b != version_c); - debug_assert!(oldest_nodes_set.iter().all(|n| vec![version_a.clone(), version_b.clone(), version_c.clone()] == - ml.ml.key_storage_of(n).get(&Default::default()).unwrap().unwrap() - .versions.iter().map(|v| v.hash).collect::>())); - debug_assert!(::std::iter::once(&node_to_isolate).all(|n| vec![version_a.clone(), version_c.clone()] == - ml.ml.key_storage_of(n).get(&Default::default()).unwrap().unwrap() - .versions.iter().map(|v| v.hash).collect::>())); - debug_assert!(newest_nodes_set.iter().all(|n| vec![version_b.clone(), version_c.clone()] == - ml.ml.key_storage_of(n).get(&Default::default()).unwrap().unwrap() - .versions.iter().map(|v| v.hash).collect::>())); - } + debug_assert!(oldest_nodes_set.iter().all(|n| vec![ + version_a.clone(), + version_b.clone(), + version_c.clone() + ] == ml + .ml + .key_storage_of(n) + .get(&Default::default()) + .unwrap() + .unwrap() + .versions + .iter() + .map(|v| v.hash) + .collect::>())); + debug_assert!(::std::iter::once(&node_to_isolate).all(|n| vec![ + version_a.clone(), + version_c.clone() + ] == ml + .ml + .key_storage_of(n) + .get(&Default::default()) + .unwrap() + .unwrap() + .versions + .iter() + .map(|v| v.hash) + .collect::>())); + debug_assert!(newest_nodes_set + .iter() + .all(|n| vec![version_b.clone(), version_c.clone()] + == ml + .ml + .key_storage_of(n) + .get(&Default::default()) + .unwrap() + .unwrap() + .versions + .iter() + .map(|v| v.hash) + .collect::>())); + } - #[test] - fn nodes_add_fails_when_not_enough_share_owners_are_connected() { - let (n, add) = (3, 3); + #[test] + fn nodes_add_fails_when_not_enough_share_owners_are_connected() { + let (n, add) = (3, 3); - // generate key - let gml = generate_key(n, 1); + // generate key + let gml = generate_key(n, 1); - // run share add session - let master = gml.0.node(0); - let add = (0..add).map(|_| Random.generate().unwrap()).collect::>(); - let isolate = vec![gml.0.node(1), gml.0.node(2)].into_iter().collect(); - assert_eq!(MessageLoop::with_gml::(gml, master, Some(add), None, Some(isolate)) - .run_at(master).unwrap_err(), Error::ConsensusUnreachable); - } + // run share add session + let master = gml.0.node(0); + let add = (0..add) + .map(|_| Random.generate().unwrap()) + .collect::>(); + let isolate = vec![gml.0.node(1), gml.0.node(2)].into_iter().collect(); + assert_eq!( + MessageLoop::with_gml::(gml, master, Some(add), None, Some(isolate)) + .run_at(master) + .unwrap_err(), + Error::ConsensusUnreachable + ); + } } diff --git a/secret-store/src/key_server_cluster/admin_sessions/share_change_session.rs b/secret-store/src/key_server_cluster/admin_sessions/share_change_session.rs index bd2bed2d6..f8829156b 100644 --- a/secret-store/src/key_server_cluster/admin_sessions/share_change_session.rs +++ b/secret-store/src/key_server_cluster/admin_sessions/share_change_session.rs @@ -14,21 +14,27 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::collections::{BTreeSet, BTreeMap}; use ethereum_types::H256; use ethkey::Secret; -use key_server_cluster::{Error, NodeId, SessionId, ServerKeyId, KeyStorage}; -use key_server_cluster::cluster::Cluster; -use key_server_cluster::cluster_sessions::ClusterSession; -use key_server_cluster::math; -use key_server_cluster::jobs::servers_set_change_access_job::ServersSetChangeAccessRequest; -use key_server_cluster::jobs::job_session::JobTransport; -use key_server_cluster::message::{Message, ServersSetChangeMessage, ServersSetChangeShareAddMessage}; -use key_server_cluster::share_add_session::{SessionTransport as ShareAddSessionTransport, - SessionImpl as ShareAddSessionImpl, SessionParams as ShareAddSessionParams}; -use key_server_cluster::message::ShareAddMessage; -use key_server_cluster::admin_sessions::ShareChangeSessionMeta; +use key_server_cluster::{ + admin_sessions::ShareChangeSessionMeta, + cluster::Cluster, + cluster_sessions::ClusterSession, + jobs::{ + job_session::JobTransport, servers_set_change_access_job::ServersSetChangeAccessRequest, + }, + math, + message::{Message, ServersSetChangeMessage, ServersSetChangeShareAddMessage, ShareAddMessage}, + share_add_session::{ + SessionImpl as ShareAddSessionImpl, SessionParams as ShareAddSessionParams, + SessionTransport as ShareAddSessionTransport, + }, + Error, KeyStorage, NodeId, ServerKeyId, SessionId, +}; +use std::{ + collections::{BTreeMap, BTreeSet}, + sync::Arc, +}; /// Single session meta-change session. Brief overview: /// 1) nodes that have been already removed from cluster (isolated nodes) are removed from session @@ -36,298 +42,390 @@ use key_server_cluster::admin_sessions::ShareChangeSessionMeta; /// 3) shares are moved between nodes /// 4) shares are removed from nodes pub struct ShareChangeSession { - /// Servers set change session id. - session_id: SessionId, - /// Session nonce. - nonce: u64, - /// Share change session meta. - meta: ShareChangeSessionMeta, - /// Cluster. - cluster: Arc, - /// Key storage. - key_storage: Arc, - /// Key version. - key_version: H256, - /// Nodes that have reported version ownership. - version_holders: Option>, - /// Consensus group to use in ShareAdd session. - consensus_group: Option>, - /// Nodes to add shares for. - new_nodes_map: Option>>, - /// Share add session. - share_add_session: Option>, - /// Is finished. - is_finished: bool, + /// Servers set change session id. + session_id: SessionId, + /// Session nonce. + nonce: u64, + /// Share change session meta. + meta: ShareChangeSessionMeta, + /// Cluster. + cluster: Arc, + /// Key storage. + key_storage: Arc, + /// Key version. + key_version: H256, + /// Nodes that have reported version ownership. + version_holders: Option>, + /// Consensus group to use in ShareAdd session. + consensus_group: Option>, + /// Nodes to add shares for. + new_nodes_map: Option>>, + /// Share add session. + share_add_session: Option>, + /// Is finished. + is_finished: bool, } /// Share change session plan. #[derive(Debug)] pub struct ShareChangeSessionPlan { - /// Key version that plan is valid for. - pub key_version: H256, - /// Nodes that have reported version ownership. - pub version_holders: BTreeSet, - /// Consensus group to use in ShareAdd session. - pub consensus_group: BTreeSet, - /// Nodes to add shares for. - pub new_nodes_map: BTreeMap>, + /// Key version that plan is valid for. + pub key_version: H256, + /// Nodes that have reported version ownership. + pub version_holders: BTreeSet, + /// Consensus group to use in ShareAdd session. + pub consensus_group: BTreeSet, + /// Nodes to add shares for. + pub new_nodes_map: BTreeMap>, } /// Session parameters. pub struct ShareChangeSessionParams { - /// Servers set change session id. - pub session_id: SessionId, - /// Session nonce. - pub nonce: u64, - /// Share change session meta. - pub meta: ShareChangeSessionMeta, - /// Cluster. - pub cluster: Arc, - /// Keys storage. - pub key_storage: Arc, - /// Session plan. - pub plan: ShareChangeSessionPlan, + /// Servers set change session id. + pub session_id: SessionId, + /// Session nonce. + pub nonce: u64, + /// Share change session meta. + pub meta: ShareChangeSessionMeta, + /// Cluster. + pub cluster: Arc, + /// Keys storage. + pub key_storage: Arc, + /// Session plan. + pub plan: ShareChangeSessionPlan, } /// Share add session transport. #[derive(Clone)] pub struct ShareChangeTransport { - /// Servers set change session id. - session_id: SessionId, - /// Session nonce. - nonce: u64, - /// Cluster. - cluster: Arc, + /// Servers set change session id. + session_id: SessionId, + /// Session nonce. + nonce: u64, + /// Cluster. + cluster: Arc, } impl ShareChangeSession { - /// Create new share change session. - pub fn new(params: ShareChangeSessionParams) -> Result { - // we can't create sessions right now, because key share is read when session is created, but it can change in previous session - let key_version = params.plan.key_version; - let consensus_group = if !params.plan.consensus_group.is_empty() { Some(params.plan.consensus_group) } else { None }; - let version_holders = if !params.plan.version_holders.is_empty() { Some(params.plan.version_holders) } else { None }; - let new_nodes_map = if !params.plan.new_nodes_map.is_empty() { Some(params.plan.new_nodes_map) } else { None }; - debug_assert!(new_nodes_map.is_some()); + /// Create new share change session. + pub fn new(params: ShareChangeSessionParams) -> Result { + // we can't create sessions right now, because key share is read when session is created, but it can change in previous session + let key_version = params.plan.key_version; + let consensus_group = if !params.plan.consensus_group.is_empty() { + Some(params.plan.consensus_group) + } else { + None + }; + let version_holders = if !params.plan.version_holders.is_empty() { + Some(params.plan.version_holders) + } else { + None + }; + let new_nodes_map = if !params.plan.new_nodes_map.is_empty() { + Some(params.plan.new_nodes_map) + } else { + None + }; + debug_assert!(new_nodes_map.is_some()); - let is_finished = new_nodes_map.is_none(); - Ok(ShareChangeSession { - session_id: params.session_id, - nonce: params.nonce, - meta: params.meta, - cluster: params.cluster, - key_storage: params.key_storage, - key_version: key_version, - version_holders: version_holders, - consensus_group: consensus_group, - new_nodes_map: new_nodes_map, - share_add_session: None, - is_finished: is_finished, - }) - } + let is_finished = new_nodes_map.is_none(); + Ok(ShareChangeSession { + session_id: params.session_id, + nonce: params.nonce, + meta: params.meta, + cluster: params.cluster, + key_storage: params.key_storage, + key_version: key_version, + version_holders: version_holders, + consensus_group: consensus_group, + new_nodes_map: new_nodes_map, + share_add_session: None, + is_finished: is_finished, + }) + } - /// Is finished?. - pub fn is_finished(&self) -> bool { - self.is_finished - } + /// Is finished?. + pub fn is_finished(&self) -> bool { + self.is_finished + } - /// Is master node?. - pub fn is_master(&self) -> bool { - self.meta.self_node_id == self.meta.master_node_id - } + /// Is master node?. + pub fn is_master(&self) -> bool { + self.meta.self_node_id == self.meta.master_node_id + } - /// Initialize session (on master node). - pub fn initialize(&mut self) -> Result<(), Error> { - self.proceed_to_next_state() - } + /// Initialize session (on master node). + pub fn initialize(&mut self) -> Result<(), Error> { + self.proceed_to_next_state() + } - /// When share-add message is received. - pub fn on_share_add_message(&mut self, sender: &NodeId, message: &ShareAddMessage) -> Result<(), Error> { - if self.share_add_session.is_none() { - self.create_share_add_session()?; - } + /// When share-add message is received. + pub fn on_share_add_message( + &mut self, + sender: &NodeId, + message: &ShareAddMessage, + ) -> Result<(), Error> { + if self.share_add_session.is_none() { + self.create_share_add_session()?; + } - let change_state_needed = self.share_add_session.as_ref() - .map(|share_add_session| { - let was_finished = share_add_session.is_finished(); - share_add_session.process_message(sender, message) - .map(|_| share_add_session.is_finished() && !was_finished) - }) - .unwrap_or(Err(Error::InvalidMessage))?; - if change_state_needed { - self.proceed_to_next_state()?; - } + let change_state_needed = self + .share_add_session + .as_ref() + .map(|share_add_session| { + let was_finished = share_add_session.is_finished(); + share_add_session + .process_message(sender, message) + .map(|_| share_add_session.is_finished() && !was_finished) + }) + .unwrap_or(Err(Error::InvalidMessage))?; + if change_state_needed { + self.proceed_to_next_state()?; + } - Ok(()) - } + Ok(()) + } - /// Create new share add session. - fn create_share_add_session(&mut self) -> Result<(), Error> { - let consensus_group = self.consensus_group.take().ok_or(Error::InvalidStateForRequest)?; - let version_holders = self.version_holders.take().ok_or(Error::InvalidStateForRequest)?; - let new_nodes_map = self.new_nodes_map.take().ok_or(Error::InvalidStateForRequest)?; - let share_add_session = ShareAddSessionImpl::new(ShareAddSessionParams { - meta: self.meta.clone(), - nonce: self.nonce, - transport: ShareChangeTransport::new(self.session_id, self.nonce, self.cluster.clone()), - key_storage: self.key_storage.clone(), - admin_public: None, - })?; - share_add_session.set_consensus_output(&self.key_version, consensus_group, version_holders, new_nodes_map)?; - self.share_add_session = Some(share_add_session); - Ok(()) - } + /// Create new share add session. + fn create_share_add_session(&mut self) -> Result<(), Error> { + let consensus_group = self + .consensus_group + .take() + .ok_or(Error::InvalidStateForRequest)?; + let version_holders = self + .version_holders + .take() + .ok_or(Error::InvalidStateForRequest)?; + let new_nodes_map = self + .new_nodes_map + .take() + .ok_or(Error::InvalidStateForRequest)?; + let share_add_session = ShareAddSessionImpl::new(ShareAddSessionParams { + meta: self.meta.clone(), + nonce: self.nonce, + transport: ShareChangeTransport::new(self.session_id, self.nonce, self.cluster.clone()), + key_storage: self.key_storage.clone(), + admin_public: None, + })?; + share_add_session.set_consensus_output( + &self.key_version, + consensus_group, + version_holders, + new_nodes_map, + )?; + self.share_add_session = Some(share_add_session); + Ok(()) + } - /// Proceed to the next state. - fn proceed_to_next_state(&mut self) -> Result<(), Error> { - if self.meta.self_node_id != self.meta.master_node_id { - if self.new_nodes_map.is_none() { - self.is_finished = true; - } - return Ok(()); - } + /// Proceed to the next state. + fn proceed_to_next_state(&mut self) -> Result<(), Error> { + if self.meta.self_node_id != self.meta.master_node_id { + if self.new_nodes_map.is_none() { + self.is_finished = true; + } + return Ok(()); + } - if self.new_nodes_map.is_some() { - self.create_share_add_session()?; - return self.share_add_session.as_ref() - .expect("either create_share_add_session fails, or session is created; qed") - .initialize(None, None, None, None); - } + if self.new_nodes_map.is_some() { + self.create_share_add_session()?; + return self + .share_add_session + .as_ref() + .expect("either create_share_add_session fails, or session is created; qed") + .initialize(None, None, None, None); + } - self.is_finished = true; + self.is_finished = true; - Ok(()) - } + Ok(()) + } } impl ShareChangeTransport { - pub fn new(session_id: SessionId, nonce: u64, cluster: Arc) -> Self { - ShareChangeTransport { - session_id: session_id, - nonce: nonce, - cluster: cluster, - } - } + pub fn new(session_id: SessionId, nonce: u64, cluster: Arc) -> Self { + ShareChangeTransport { + session_id: session_id, + nonce: nonce, + cluster: cluster, + } + } } impl JobTransport for ShareChangeTransport { - type PartialJobRequest = ServersSetChangeAccessRequest; - type PartialJobResponse = bool; + type PartialJobRequest = ServersSetChangeAccessRequest; + type PartialJobResponse = bool; - fn send_partial_request(&self, _node: &NodeId, _request: ServersSetChangeAccessRequest) -> Result<(), Error> { - unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed") - } + fn send_partial_request( + &self, + _node: &NodeId, + _request: ServersSetChangeAccessRequest, + ) -> Result<(), Error> { + unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed") + } - fn send_partial_response(&self, _node: &NodeId, _response: bool) -> Result<(), Error> { - unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed") - } + fn send_partial_response(&self, _node: &NodeId, _response: bool) -> Result<(), Error> { + unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed") + } } impl ShareAddSessionTransport for ShareChangeTransport { - fn nodes(&self) -> BTreeSet { - self.cluster.nodes() - } + fn nodes(&self) -> BTreeSet { + self.cluster.nodes() + } - fn set_master_data(&mut self, _consensus_group: BTreeSet, _version_holders: BTreeSet, _id_numbers: BTreeMap>) { - unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed") - } + fn set_master_data( + &mut self, + _consensus_group: BTreeSet, + _version_holders: BTreeSet, + _id_numbers: BTreeMap>, + ) { + unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed") + } - fn send(&self, node: &NodeId, message: ShareAddMessage) -> Result<(), Error> { - self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage(ServersSetChangeShareAddMessage { - session: self.session_id.clone().into(), - session_nonce: self.nonce, - message: message, - }))) - } + fn send(&self, node: &NodeId, message: ShareAddMessage) -> Result<(), Error> { + self.cluster.send( + node, + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage( + ServersSetChangeShareAddMessage { + session: self.session_id.clone().into(), + session_nonce: self.nonce, + message: message, + }, + )), + ) + } } /// Prepare share change plan for moving from old `old_key_version_owners` to `new_nodes_set`. -pub fn prepare_share_change_session_plan(cluster_nodes: &BTreeSet, threshold: usize, key_id: &ServerKeyId, key_version: H256, master: &NodeId, old_key_version_owners: &BTreeSet, new_nodes_set: &BTreeSet) -> Result { - // we can't do anything if there are no enought shares - if old_key_version_owners.len() < threshold + 1 { - warn!("cannot add shares to key {} with threshold {}: only {} shares owners are available", - key_id, threshold, old_key_version_owners.len()); - return Ok(ShareChangeSessionPlan { - key_version: key_version, - version_holders: Default::default(), - consensus_group: Default::default(), - new_nodes_map: Default::default(), - }); - } +pub fn prepare_share_change_session_plan( + cluster_nodes: &BTreeSet, + threshold: usize, + key_id: &ServerKeyId, + key_version: H256, + master: &NodeId, + old_key_version_owners: &BTreeSet, + new_nodes_set: &BTreeSet, +) -> Result { + // we can't do anything if there are no enought shares + if old_key_version_owners.len() < threshold + 1 { + warn!( + "cannot add shares to key {} with threshold {}: only {} shares owners are available", + key_id, + threshold, + old_key_version_owners.len() + ); + return Ok(ShareChangeSessionPlan { + key_version: key_version, + version_holders: Default::default(), + consensus_group: Default::default(), + new_nodes_map: Default::default(), + }); + } - // warn if we're loosing the key - if new_nodes_set.len() < threshold + 1 { - warn!("losing key {} with threshold {}: only {} nodes left after servers set change session", - key_id, threshold, new_nodes_set.len()); - } + // warn if we're loosing the key + if new_nodes_set.len() < threshold + 1 { + warn!( + "losing key {} with threshold {}: only {} nodes left after servers set change session", + key_id, + threshold, + new_nodes_set.len() + ); + } - // make new nodes map, so that: - // all non-isolated old nodes will have their id number preserved - // all new nodes will have new id number - let mut new_nodes_map = new_nodes_set.difference(&old_key_version_owners) - .map(|n| math::generate_random_scalar().map(|id| (n.clone(), Some(id)))) - .collect::, _>>()?; - if !new_nodes_map.is_empty() { - for old_node in old_key_version_owners.iter().filter(|n| cluster_nodes.contains(n)) { - new_nodes_map.insert(old_node.clone(), None); - } - } + // make new nodes map, so that: + // all non-isolated old nodes will have their id number preserved + // all new nodes will have new id number + let mut new_nodes_map = new_nodes_set + .difference(&old_key_version_owners) + .map(|n| math::generate_random_scalar().map(|id| (n.clone(), Some(id)))) + .collect::, _>>()?; + if !new_nodes_map.is_empty() { + for old_node in old_key_version_owners + .iter() + .filter(|n| cluster_nodes.contains(n)) + { + new_nodes_map.insert(old_node.clone(), None); + } + } - // select consensus group if there are some nodes to add - let consensus_group = if !new_nodes_map.is_empty() { - ::std::iter::once(master.clone()) - .chain(old_key_version_owners.iter() - .filter(|n| *n != master && cluster_nodes.contains(*n)) - .take(threshold) - .cloned()) - .collect() - } else { - BTreeSet::new() - }; + // select consensus group if there are some nodes to add + let consensus_group = if !new_nodes_map.is_empty() { + ::std::iter::once(master.clone()) + .chain( + old_key_version_owners + .iter() + .filter(|n| *n != master && cluster_nodes.contains(*n)) + .take(threshold) + .cloned(), + ) + .collect() + } else { + BTreeSet::new() + }; - Ok(ShareChangeSessionPlan { - key_version: key_version, - version_holders: old_key_version_owners.clone(), - consensus_group: consensus_group, - new_nodes_map: new_nodes_map, - }) + Ok(ShareChangeSessionPlan { + key_version: key_version, + version_holders: old_key_version_owners.clone(), + consensus_group: consensus_group, + new_nodes_map: new_nodes_map, + }) } impl ShareChangeSessionPlan { - /// Is empty (nothing-to-do) plan? - pub fn is_empty(&self) -> bool { - self.new_nodes_map.is_empty() - } + /// Is empty (nothing-to-do) plan? + pub fn is_empty(&self) -> bool { + self.new_nodes_map.is_empty() + } } #[cfg(test)] mod tests { - use key_server_cluster::math; - use super::prepare_share_change_session_plan; + use super::prepare_share_change_session_plan; + use key_server_cluster::math; - #[test] - fn share_change_plan_creates_empty_plan() { - let cluster_nodes: Vec<_> = (0..3).map(|_| math::generate_random_point().unwrap()).collect(); - let master = cluster_nodes[0].clone(); - let old_key_version_owners = cluster_nodes.iter().cloned().collect(); - let new_nodes_set = cluster_nodes.iter().cloned().collect(); - let plan = prepare_share_change_session_plan(&cluster_nodes.iter().cloned().collect(), - 1, &Default::default(), Default::default(), &master, &old_key_version_owners, &new_nodes_set).unwrap(); + #[test] + fn share_change_plan_creates_empty_plan() { + let cluster_nodes: Vec<_> = (0..3) + .map(|_| math::generate_random_point().unwrap()) + .collect(); + let master = cluster_nodes[0].clone(); + let old_key_version_owners = cluster_nodes.iter().cloned().collect(); + let new_nodes_set = cluster_nodes.iter().cloned().collect(); + let plan = prepare_share_change_session_plan( + &cluster_nodes.iter().cloned().collect(), + 1, + &Default::default(), + Default::default(), + &master, + &old_key_version_owners, + &new_nodes_set, + ) + .unwrap(); - assert!(plan.is_empty()); - } + assert!(plan.is_empty()); + } - #[test] - fn share_change_plan_adds_new_nodes() { - let cluster_nodes: Vec<_> = (0..3).map(|_| math::generate_random_point().unwrap()).collect(); - let master = cluster_nodes[0].clone(); - let old_key_version_owners = cluster_nodes[0..2].iter().cloned().collect(); - let new_nodes_set = cluster_nodes.iter().cloned().collect(); - let plan = prepare_share_change_session_plan(&cluster_nodes.iter().cloned().collect(), - 1, &Default::default(), Default::default(), &master, &old_key_version_owners, &new_nodes_set).unwrap(); + #[test] + fn share_change_plan_adds_new_nodes() { + let cluster_nodes: Vec<_> = (0..3) + .map(|_| math::generate_random_point().unwrap()) + .collect(); + let master = cluster_nodes[0].clone(); + let old_key_version_owners = cluster_nodes[0..2].iter().cloned().collect(); + let new_nodes_set = cluster_nodes.iter().cloned().collect(); + let plan = prepare_share_change_session_plan( + &cluster_nodes.iter().cloned().collect(), + 1, + &Default::default(), + Default::default(), + &master, + &old_key_version_owners, + &new_nodes_set, + ) + .unwrap(); - assert!(!plan.is_empty()); - assert_eq!(old_key_version_owners, plan.consensus_group); - assert_eq!(new_nodes_set, plan.new_nodes_map.keys().cloned().collect()); - } + assert!(!plan.is_empty()); + assert_eq!(old_key_version_owners, plan.consensus_group); + assert_eq!(new_nodes_set, plan.new_nodes_map.keys().cloned().collect()); + } } diff --git a/secret-store/src/key_server_cluster/client_sessions/decryption_session.rs b/secret-store/src/key_server_cluster/client_sessions/decryption_session.rs index ccb34e982..d9f763f17 100644 --- a/secret-store/src/key_server_cluster/client_sessions/decryption_session.rs +++ b/secret-store/src/key_server_cluster/client_sessions/decryption_session.rs @@ -14,23 +14,32 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::{BTreeSet, BTreeMap}; -use std::sync::Arc; -use std::time; -use parking_lot::{Mutex, Condvar}; use ethereum_types::{Address, H256}; use ethkey::Secret; -use key_server_cluster::{Error, AclStorage, DocumentKeyShare, NodeId, SessionId, Requester, - EncryptedDocumentKeyShadow, SessionMeta}; -use key_server_cluster::cluster::Cluster; -use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession}; -use key_server_cluster::message::{Message, DecryptionMessage, DecryptionConsensusMessage, RequestPartialDecryption, - PartialDecryption, DecryptionSessionError, DecryptionSessionCompleted, ConsensusMessage, InitializeConsensusSession, - ConfirmConsensusInitialization, DecryptionSessionDelegation, DecryptionSessionDelegationCompleted}; -use key_server_cluster::jobs::job_session::{JobSession, JobSessionState, JobTransport}; -use key_server_cluster::jobs::key_access_job::KeyAccessJob; -use key_server_cluster::jobs::decryption_job::{PartialDecryptionRequest, PartialDecryptionResponse, DecryptionJob}; -use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession}; +use key_server_cluster::{ + cluster::Cluster, + cluster_sessions::{ClusterSession, SessionIdWithSubSession}, + jobs::{ + consensus_session::{ConsensusSession, ConsensusSessionParams, ConsensusSessionState}, + decryption_job::{DecryptionJob, PartialDecryptionRequest, PartialDecryptionResponse}, + job_session::{JobSession, JobSessionState, JobTransport}, + key_access_job::KeyAccessJob, + }, + message::{ + ConfirmConsensusInitialization, ConsensusMessage, DecryptionConsensusMessage, + DecryptionMessage, DecryptionSessionCompleted, DecryptionSessionDelegation, + DecryptionSessionDelegationCompleted, DecryptionSessionError, InitializeConsensusSession, + Message, PartialDecryption, RequestPartialDecryption, + }, + AclStorage, DocumentKeyShare, EncryptedDocumentKeyShadow, Error, NodeId, Requester, SessionId, + SessionMeta, +}; +use parking_lot::{Condvar, Mutex}; +use std::{ + collections::{BTreeMap, BTreeSet}, + sync::Arc, + time, +}; /// Distributed decryption session. /// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper: @@ -41,211 +50,242 @@ use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, Consen /// 3) partial decryption: every node which has succussfully checked access for the requestor do a partial decryption /// 4) decryption: master node receives all partial decryptions of the secret and restores the secret pub struct SessionImpl { - /// Session core. - core: SessionCore, - /// Session data. - data: Mutex, + /// Session core. + core: SessionCore, + /// Session data. + data: Mutex, } /// Immutable session data. struct SessionCore { - /// Session metadata. - pub meta: SessionMeta, - /// Decryption session access key. - pub access_key: Secret, - /// Key share. - pub key_share: Option, - /// Cluster which allows this node to send messages to other nodes in the cluster. - pub cluster: Arc, - /// Session-level nonce. - pub nonce: u64, - /// SessionImpl completion condvar. - pub completed: Condvar, + /// Session metadata. + pub meta: SessionMeta, + /// Decryption session access key. + pub access_key: Secret, + /// Key share. + pub key_share: Option, + /// Cluster which allows this node to send messages to other nodes in the cluster. + pub cluster: Arc, + /// Session-level nonce. + pub nonce: u64, + /// SessionImpl completion condvar. + pub completed: Condvar, } /// Decryption consensus session type. -type DecryptionConsensusSession = ConsensusSession; +type DecryptionConsensusSession = ConsensusSession< + KeyAccessJob, + DecryptionConsensusTransport, + DecryptionJob, + DecryptionJobTransport, +>; /// Broadcast decryption job session type. type BroadcastDecryptionJobSession = JobSession; /// Mutable session data. struct SessionData { - /// Key version to use for decryption. - pub version: Option, - /// Session origin (if any). - pub origin: Option
, - /// Consensus-based decryption session. - pub consensus_session: DecryptionConsensusSession, - /// Broadcast decryption job. - pub broadcast_job_session: Option, - /// Is shadow decryption requested? - pub is_shadow_decryption: Option, - /// Decryption result must be reconstructed on all participating nodes. This is useful - /// for service contract API so that all nodes from consensus group can confirm decryption. - pub is_broadcast_session: Option, - /// Delegation status. - pub delegation_status: Option, - /// Decryption result. - pub result: Option>, + /// Key version to use for decryption. + pub version: Option, + /// Session origin (if any). + pub origin: Option
, + /// Consensus-based decryption session. + pub consensus_session: DecryptionConsensusSession, + /// Broadcast decryption job. + pub broadcast_job_session: Option, + /// Is shadow decryption requested? + pub is_shadow_decryption: Option, + /// Decryption result must be reconstructed on all participating nodes. This is useful + /// for service contract API so that all nodes from consensus group can confirm decryption. + pub is_broadcast_session: Option, + /// Delegation status. + pub delegation_status: Option, + /// Decryption result. + pub result: Option>, } /// SessionImpl creation parameters pub struct SessionParams { - /// Session metadata. - pub meta: SessionMeta, - /// Session access key. - pub access_key: Secret, - /// Key share. - pub key_share: Option, - /// ACL storage. - pub acl_storage: Arc, - /// Cluster. - pub cluster: Arc, - /// Session nonce. - pub nonce: u64, + /// Session metadata. + pub meta: SessionMeta, + /// Session access key. + pub access_key: Secret, + /// Key share. + pub key_share: Option, + /// ACL storage. + pub acl_storage: Arc, + /// Cluster. + pub cluster: Arc, + /// Session nonce. + pub nonce: u64, } /// Decryption consensus transport. struct DecryptionConsensusTransport { - /// Session id. - id: SessionId, - /// Session access key. - access_key: Secret, - /// Session-level nonce. - nonce: u64, - /// Session origin (if any). - origin: Option
, - /// Selected key version (on master node). - version: Option, - /// Cluster. - cluster: Arc, + /// Session id. + id: SessionId, + /// Session access key. + access_key: Secret, + /// Session-level nonce. + nonce: u64, + /// Session origin (if any). + origin: Option
, + /// Selected key version (on master node). + version: Option, + /// Cluster. + cluster: Arc, } /// Decryption job transport struct DecryptionJobTransport { - /// Session id. - id: SessionId, - //// Session access key. - access_key: Secret, - /// Session-level nonce. - nonce: u64, - /// Is this a broadcast transport? If true, requests are not send and responses are sent only to non-master nodes. - is_broadcast_transport: bool, - /// Master node id. - master_node_id: NodeId, - /// Cluster. - cluster: Arc, + /// Session id. + id: SessionId, + //// Session access key. + access_key: Secret, + /// Session-level nonce. + nonce: u64, + /// Is this a broadcast transport? If true, requests are not send and responses are sent only to non-master nodes. + is_broadcast_transport: bool, + /// Master node id. + master_node_id: NodeId, + /// Cluster. + cluster: Arc, } /// Session delegation status. enum DelegationStatus { - /// Delegated to other node. - DelegatedTo(NodeId), - /// Delegated from other node. - DelegatedFrom(NodeId, u64), + /// Delegated to other node. + DelegatedTo(NodeId), + /// Delegated from other node. + DelegatedFrom(NodeId, u64), } impl SessionImpl { - /// Create new decryption session. - pub fn new(params: SessionParams, requester: Option) -> Result { - debug_assert_eq!(params.meta.threshold, params.key_share.as_ref().map(|ks| ks.threshold).unwrap_or_default()); + /// Create new decryption session. + pub fn new(params: SessionParams, requester: Option) -> Result { + debug_assert_eq!( + params.meta.threshold, + params + .key_share + .as_ref() + .map(|ks| ks.threshold) + .unwrap_or_default() + ); - // check that common_point and encrypted_point are already set - if let Some(key_share) = params.key_share.as_ref() { - // encrypted data must be set - if key_share.common_point.is_none() || key_share.encrypted_point.is_none() { - return Err(Error::DocumentKeyIsNotFound); - } - } + // check that common_point and encrypted_point are already set + if let Some(key_share) = params.key_share.as_ref() { + // encrypted data must be set + if key_share.common_point.is_none() || key_share.encrypted_point.is_none() { + return Err(Error::DocumentKeyIsNotFound); + } + } - let consensus_transport = DecryptionConsensusTransport { - id: params.meta.id.clone(), - access_key: params.access_key.clone(), - nonce: params.nonce, - origin: None, - version: None, - cluster: params.cluster.clone(), - }; - let consensus_session = ConsensusSession::new(ConsensusSessionParams { - meta: params.meta.clone(), - consensus_executor: match requester { - Some(requester) => KeyAccessJob::new_on_master(params.meta.id.clone(), params.acl_storage.clone(), requester), - None => KeyAccessJob::new_on_slave(params.meta.id.clone(), params.acl_storage.clone()), - }, - consensus_transport: consensus_transport, - })?; + let consensus_transport = DecryptionConsensusTransport { + id: params.meta.id.clone(), + access_key: params.access_key.clone(), + nonce: params.nonce, + origin: None, + version: None, + cluster: params.cluster.clone(), + }; + let consensus_session = ConsensusSession::new(ConsensusSessionParams { + meta: params.meta.clone(), + consensus_executor: match requester { + Some(requester) => KeyAccessJob::new_on_master( + params.meta.id.clone(), + params.acl_storage.clone(), + requester, + ), + None => { + KeyAccessJob::new_on_slave(params.meta.id.clone(), params.acl_storage.clone()) + } + }, + consensus_transport: consensus_transport, + })?; - Ok(SessionImpl { - core: SessionCore { - meta: params.meta, - access_key: params.access_key, - key_share: params.key_share, - cluster: params.cluster, - nonce: params.nonce, - completed: Condvar::new(), - }, - data: Mutex::new(SessionData { - version: None, - origin: None, - consensus_session: consensus_session, - broadcast_job_session: None, - is_shadow_decryption: None, - is_broadcast_session: None, - delegation_status: None, - result: None, - }), - }) - } + Ok(SessionImpl { + core: SessionCore { + meta: params.meta, + access_key: params.access_key, + key_share: params.key_share, + cluster: params.cluster, + nonce: params.nonce, + completed: Condvar::new(), + }, + data: Mutex::new(SessionData { + version: None, + origin: None, + consensus_session: consensus_session, + broadcast_job_session: None, + is_shadow_decryption: None, + is_broadcast_session: None, + delegation_status: None, + result: None, + }), + }) + } - /// Get this node id. - #[cfg(test)] - pub fn node(&self) -> &NodeId { - &self.core.meta.self_node_id - } + /// Get this node id. + #[cfg(test)] + pub fn node(&self) -> &NodeId { + &self.core.meta.self_node_id + } - /// Get this session access key. - #[cfg(test)] - pub fn access_key(&self) -> &Secret { - &self.core.access_key - } + /// Get this session access key. + #[cfg(test)] + pub fn access_key(&self) -> &Secret { + &self.core.access_key + } - /// Get session state. - #[cfg(test)] - pub fn state(&self) -> ConsensusSessionState { - self.data.lock().consensus_session.state() - } + /// Get session state. + #[cfg(test)] + pub fn state(&self) -> ConsensusSessionState { + self.data.lock().consensus_session.state() + } - /// Get decrypted secret - #[cfg(test)] - pub fn decrypted_secret(&self) -> Option> { - self.data.lock().result.clone() - } + /// Get decrypted secret + #[cfg(test)] + pub fn decrypted_secret(&self) -> Option> { + self.data.lock().result.clone() + } - /// Get key requester. - pub fn requester(&self) -> Option { - self.data.lock().consensus_session.consensus_job().executor().requester().cloned() - } + /// Get key requester. + pub fn requester(&self) -> Option { + self.data + .lock() + .consensus_session + .consensus_job() + .executor() + .requester() + .cloned() + } - /// Get session origin. - pub fn origin(&self) -> Option
{ - self.data.lock().origin.clone() - } + /// Get session origin. + pub fn origin(&self) -> Option
{ + self.data.lock().origin.clone() + } - /// Wait for session completion. - pub fn wait(&self, timeout: Option) -> Option> { - Self::wait_session(&self.core.completed, &self.data, timeout, |data| data.result.clone()) - } + /// Wait for session completion. + pub fn wait( + &self, + timeout: Option, + ) -> Option> { + Self::wait_session(&self.core.completed, &self.data, timeout, |data| { + data.result.clone() + }) + } - /// Get broadcasted shadows. - pub fn broadcast_shadows(&self) -> Option>> { - let data = self.data.lock(); + /// Get broadcasted shadows. + pub fn broadcast_shadows(&self) -> Option>> { + let data = self.data.lock(); - if data.result.is_none() || (data.is_broadcast_session, data.is_shadow_decryption) != (Some(true), Some(true)) { - return None; - } + if data.result.is_none() + || (data.is_broadcast_session, data.is_shadow_decryption) != (Some(true), Some(true)) + { + return None; + } - let proof = "data.is_shadow_decryption is true; decrypt_shadow.is_some() is checked in DecryptionJob::check_partial_response; qed"; - Some(match self.core.meta.master_node_id == self.core.meta.self_node_id { + let proof = "data.is_shadow_decryption is true; decrypt_shadow.is_some() is checked in DecryptionJob::check_partial_response; qed"; + Some(match self.core.meta.master_node_id == self.core.meta.self_node_id { true => data.consensus_session.computation_job().responses().iter() .map(|(n, r)| (n.clone(), r.decrypt_shadow.clone().expect(proof))) .collect(), @@ -253,21 +293,33 @@ impl SessionImpl { .map(|(n, r)| (n.clone(), r.decrypt_shadow.clone().expect(proof))) .collect(), }) - } + } - /// Delegate session to other node. - pub fn delegate(&self, master: NodeId, origin: Option
, version: H256, is_shadow_decryption: bool, is_broadcast_session: bool) -> Result<(), Error> { - if self.core.meta.master_node_id != self.core.meta.self_node_id { - return Err(Error::InvalidStateForRequest); - } + /// Delegate session to other node. + pub fn delegate( + &self, + master: NodeId, + origin: Option
, + version: H256, + is_shadow_decryption: bool, + is_broadcast_session: bool, + ) -> Result<(), Error> { + if self.core.meta.master_node_id != self.core.meta.self_node_id { + return Err(Error::InvalidStateForRequest); + } - let mut data = self.data.lock(); - if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization || data.delegation_status.is_some() { - return Err(Error::InvalidStateForRequest); - } + let mut data = self.data.lock(); + if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization + || data.delegation_status.is_some() + { + return Err(Error::InvalidStateForRequest); + } - data.consensus_session.consensus_job_mut().executor_mut().set_has_key_share(false); - self.core.cluster.send(&master, Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(DecryptionSessionDelegation { + data.consensus_session + .consensus_job_mut() + .executor_mut() + .set_has_key_share(false); + self.core.cluster.send(&master, Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(DecryptionSessionDelegation { session: self.core.meta.id.clone().into(), sub_session: self.core.access_key.clone().into(), session_nonce: self.core.nonce, @@ -279,590 +331,869 @@ impl SessionImpl { is_shadow_decryption: is_shadow_decryption, is_broadcast_session: is_broadcast_session, })))?; - data.delegation_status = Some(DelegationStatus::DelegatedTo(master)); - Ok(()) - } + data.delegation_status = Some(DelegationStatus::DelegatedTo(master)); + Ok(()) + } - /// Initialize decryption session on master node. - pub fn initialize(&self, origin: Option
, version: H256, is_shadow_decryption: bool, is_broadcast_session: bool) -> Result<(), Error> { - debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id); + /// Initialize decryption session on master node. + pub fn initialize( + &self, + origin: Option
, + version: H256, + is_shadow_decryption: bool, + is_broadcast_session: bool, + ) -> Result<(), Error> { + debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id); - // check if version exists - let key_version = match self.core.key_share.as_ref() { - None => return Err(Error::InvalidMessage), - Some(key_share) => key_share.version(&version)?, - }; + // check if version exists + let key_version = match self.core.key_share.as_ref() { + None => return Err(Error::InvalidMessage), + Some(key_share) => key_share.version(&version)?, + }; - let mut data = self.data.lock(); - let non_isolated_nodes = self.core.cluster.nodes(); - let mut consensus_nodes: BTreeSet<_> = key_version.id_numbers.keys() - .filter(|n| non_isolated_nodes.contains(*n)) - .cloned() - .chain(::std::iter::once(self.core.meta.self_node_id.clone())) - .collect(); - if let Some(&DelegationStatus::DelegatedFrom(delegation_master, _)) = data.delegation_status.as_ref() { - consensus_nodes.remove(&delegation_master); - } + let mut data = self.data.lock(); + let non_isolated_nodes = self.core.cluster.nodes(); + let mut consensus_nodes: BTreeSet<_> = key_version + .id_numbers + .keys() + .filter(|n| non_isolated_nodes.contains(*n)) + .cloned() + .chain(::std::iter::once(self.core.meta.self_node_id.clone())) + .collect(); + if let Some(&DelegationStatus::DelegatedFrom(delegation_master, _)) = + data.delegation_status.as_ref() + { + consensus_nodes.remove(&delegation_master); + } - data.consensus_session.consensus_job_mut().transport_mut().version = Some(version.clone()); - data.consensus_session.consensus_job_mut().transport_mut().origin = origin.clone(); - data.origin = origin; - data.version = Some(version.clone()); - data.is_shadow_decryption = Some(is_shadow_decryption); - data.is_broadcast_session = Some(is_broadcast_session); - data.consensus_session.initialize(consensus_nodes)?; + data.consensus_session + .consensus_job_mut() + .transport_mut() + .version = Some(version.clone()); + data.consensus_session + .consensus_job_mut() + .transport_mut() + .origin = origin.clone(); + data.origin = origin; + data.version = Some(version.clone()); + data.is_shadow_decryption = Some(is_shadow_decryption); + data.is_broadcast_session = Some(is_broadcast_session); + data.consensus_session.initialize(consensus_nodes)?; - if data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished { - Self::disseminate_jobs(&self.core, &mut *data, &version, is_shadow_decryption, is_broadcast_session)?; + if data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished { + Self::disseminate_jobs( + &self.core, + &mut *data, + &version, + is_shadow_decryption, + is_broadcast_session, + )?; - debug_assert!(data.consensus_session.state() == ConsensusSessionState::Finished); - let result = data.consensus_session.result()?; - Self::set_decryption_result(&self.core, &mut *data, Ok(result)); - } + debug_assert!(data.consensus_session.state() == ConsensusSessionState::Finished); + let result = data.consensus_session.result()?; + Self::set_decryption_result(&self.core, &mut *data, Ok(result)); + } - Ok(()) - } + Ok(()) + } - /// Process decryption message. - pub fn process_message(&self, sender: &NodeId, message: &DecryptionMessage) -> Result<(), Error> { - if self.core.nonce != message.session_nonce() { - return Err(Error::ReplayProtection); - } + /// Process decryption message. + pub fn process_message( + &self, + sender: &NodeId, + message: &DecryptionMessage, + ) -> Result<(), Error> { + if self.core.nonce != message.session_nonce() { + return Err(Error::ReplayProtection); + } - match message { - &DecryptionMessage::DecryptionConsensusMessage(ref message) => - self.on_consensus_message(sender, message), - &DecryptionMessage::RequestPartialDecryption(ref message) => - self.on_partial_decryption_requested(sender, message), - &DecryptionMessage::PartialDecryption(ref message) => - self.on_partial_decryption(sender, message), - &DecryptionMessage::DecryptionSessionError(ref message) => - self.process_node_error(Some(&sender), message.error.clone()), - &DecryptionMessage::DecryptionSessionCompleted(ref message) => - self.on_session_completed(sender, message), - &DecryptionMessage::DecryptionSessionDelegation(ref message) => - self.on_session_delegated(sender, message), - &DecryptionMessage::DecryptionSessionDelegationCompleted(ref message) => - self.on_session_delegation_completed(sender, message), - } - } + match message { + &DecryptionMessage::DecryptionConsensusMessage(ref message) => { + self.on_consensus_message(sender, message) + } + &DecryptionMessage::RequestPartialDecryption(ref message) => { + self.on_partial_decryption_requested(sender, message) + } + &DecryptionMessage::PartialDecryption(ref message) => { + self.on_partial_decryption(sender, message) + } + &DecryptionMessage::DecryptionSessionError(ref message) => { + self.process_node_error(Some(&sender), message.error.clone()) + } + &DecryptionMessage::DecryptionSessionCompleted(ref message) => { + self.on_session_completed(sender, message) + } + &DecryptionMessage::DecryptionSessionDelegation(ref message) => { + self.on_session_delegated(sender, message) + } + &DecryptionMessage::DecryptionSessionDelegationCompleted(ref message) => { + self.on_session_delegation_completed(sender, message) + } + } + } - /// When session is delegated to this node. - pub fn on_session_delegated(&self, sender: &NodeId, message: &DecryptionSessionDelegation) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); + /// When session is delegated to this node. + pub fn on_session_delegated( + &self, + sender: &NodeId, + message: &DecryptionSessionDelegation, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); - { - let mut data = self.data.lock(); - if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization || data.delegation_status.is_some() { - return Err(Error::InvalidStateForRequest); - } + { + let mut data = self.data.lock(); + if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization + || data.delegation_status.is_some() + { + return Err(Error::InvalidStateForRequest); + } - data.consensus_session.consensus_job_mut().executor_mut().set_requester(message.requester.clone().into()); - data.delegation_status = Some(DelegationStatus::DelegatedFrom(sender.clone(), message.session_nonce)); - } + data.consensus_session + .consensus_job_mut() + .executor_mut() + .set_requester(message.requester.clone().into()); + data.delegation_status = Some(DelegationStatus::DelegatedFrom( + sender.clone(), + message.session_nonce, + )); + } - self.initialize(message.origin.clone().map(Into::into), message.version.clone().into(), message.is_shadow_decryption, message.is_broadcast_session) - } + self.initialize( + message.origin.clone().map(Into::into), + message.version.clone().into(), + message.is_shadow_decryption, + message.is_broadcast_session, + ) + } - /// When delegated session is completed on other node. - pub fn on_session_delegation_completed(&self, sender: &NodeId, message: &DecryptionSessionDelegationCompleted) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); + /// When delegated session is completed on other node. + pub fn on_session_delegation_completed( + &self, + sender: &NodeId, + message: &DecryptionSessionDelegationCompleted, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); - if self.core.meta.master_node_id != self.core.meta.self_node_id { - return Err(Error::InvalidStateForRequest); - } + if self.core.meta.master_node_id != self.core.meta.self_node_id { + return Err(Error::InvalidStateForRequest); + } - let mut data = self.data.lock(); - match data.delegation_status.as_ref() { - Some(&DelegationStatus::DelegatedTo(ref node)) if node == sender => (), - _ => return Err(Error::InvalidMessage), - } + let mut data = self.data.lock(); + match data.delegation_status.as_ref() { + Some(&DelegationStatus::DelegatedTo(ref node)) if node == sender => (), + _ => return Err(Error::InvalidMessage), + } - Self::set_decryption_result(&self.core, &mut *data, Ok(EncryptedDocumentKeyShadow { - decrypted_secret: message.decrypted_secret.clone().into(), - common_point: message.common_point.clone().map(Into::into), - decrypt_shadows: message.decrypt_shadows.clone().map(Into::into), - })); + Self::set_decryption_result( + &self.core, + &mut *data, + Ok(EncryptedDocumentKeyShadow { + decrypted_secret: message.decrypted_secret.clone().into(), + common_point: message.common_point.clone().map(Into::into), + decrypt_shadows: message.decrypt_shadows.clone().map(Into::into), + }), + ); - Ok(()) - } + Ok(()) + } - /// When consensus-related message is received. - pub fn on_consensus_message(&self, sender: &NodeId, message: &DecryptionConsensusMessage) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); + /// When consensus-related message is received. + pub fn on_consensus_message( + &self, + sender: &NodeId, + message: &DecryptionConsensusMessage, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); - let mut data = self.data.lock(); - let is_establishing_consensus = data.consensus_session.state() == ConsensusSessionState::EstablishingConsensus; - if let &ConsensusMessage::InitializeConsensusSession(ref msg) = &message.message { - let version = msg.version.clone().into(); - let has_key_share = self.core.key_share.as_ref() - .map(|ks| ks.version(&version).is_ok()) - .unwrap_or(false); - data.consensus_session.consensus_job_mut().executor_mut().set_has_key_share(has_key_share); - data.version = Some(version); - data.origin = message.origin.clone().map(Into::into); - } - data.consensus_session.on_consensus_message(&sender, &message.message)?; + let mut data = self.data.lock(); + let is_establishing_consensus = + data.consensus_session.state() == ConsensusSessionState::EstablishingConsensus; + if let &ConsensusMessage::InitializeConsensusSession(ref msg) = &message.message { + let version = msg.version.clone().into(); + let has_key_share = self + .core + .key_share + .as_ref() + .map(|ks| ks.version(&version).is_ok()) + .unwrap_or(false); + data.consensus_session + .consensus_job_mut() + .executor_mut() + .set_has_key_share(has_key_share); + data.version = Some(version); + data.origin = message.origin.clone().map(Into::into); + } + data.consensus_session + .on_consensus_message(&sender, &message.message)?; - let is_consensus_established = data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished; - if self.core.meta.self_node_id != self.core.meta.master_node_id || !is_establishing_consensus || !is_consensus_established { - return Ok(()); - } + let is_consensus_established = + data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished; + if self.core.meta.self_node_id != self.core.meta.master_node_id + || !is_establishing_consensus + || !is_consensus_established + { + return Ok(()); + } - let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); - let is_shadow_decryption = data.is_shadow_decryption + let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); + let is_shadow_decryption = data.is_shadow_decryption .expect("we are on master node; on master node is_shadow_decryption is filled in initialize(); on_consensus_message follows initialize (state check in consensus_session); qed"); - let is_broadcast_session = data.is_broadcast_session + let is_broadcast_session = data.is_broadcast_session .expect("we are on master node; on master node is_broadcast_session is filled in initialize(); on_consensus_message follows initialize (state check in consensus_session); qed"); - Self::disseminate_jobs(&self.core, &mut *data, &version, is_shadow_decryption, is_broadcast_session) - } + Self::disseminate_jobs( + &self.core, + &mut *data, + &version, + is_shadow_decryption, + is_broadcast_session, + ) + } - /// When partial decryption is requested. - pub fn on_partial_decryption_requested(&self, sender: &NodeId, message: &RequestPartialDecryption) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When partial decryption is requested. + pub fn on_partial_decryption_requested( + &self, + sender: &NodeId, + message: &RequestPartialDecryption, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); - let key_share = match self.core.key_share.as_ref() { - None => return Err(Error::InvalidMessage), - Some(key_share) => key_share, - }; + let key_share = match self.core.key_share.as_ref() { + None => return Err(Error::InvalidMessage), + Some(key_share) => key_share, + }; - let mut data = self.data.lock(); - let key_version = key_share.version(data.version.as_ref().ok_or(Error::InvalidMessage)?)?.hash.clone(); - let requester_public = data.consensus_session.consensus_job().executor().requester() - .ok_or(Error::InvalidStateForRequest)? - .public(&self.core.meta.id) - .map_err(Error::InsufficientRequesterData)?; - let decryption_job = DecryptionJob::new_on_slave(self.core.meta.self_node_id.clone(), self.core.access_key.clone(), - requester_public.clone(), key_share.clone(), key_version)?; - let decryption_transport = self.core.decryption_transport(false); + let mut data = self.data.lock(); + let key_version = key_share + .version(data.version.as_ref().ok_or(Error::InvalidMessage)?)? + .hash + .clone(); + let requester_public = data + .consensus_session + .consensus_job() + .executor() + .requester() + .ok_or(Error::InvalidStateForRequest)? + .public(&self.core.meta.id) + .map_err(Error::InsufficientRequesterData)?; + let decryption_job = DecryptionJob::new_on_slave( + self.core.meta.self_node_id.clone(), + self.core.access_key.clone(), + requester_public.clone(), + key_share.clone(), + key_version, + )?; + let decryption_transport = self.core.decryption_transport(false); - // update flags if not on master - if self.core.meta.self_node_id != self.core.meta.master_node_id { - data.is_shadow_decryption = Some(message.is_shadow_decryption); - data.is_broadcast_session = Some(message.is_broadcast_session); - } + // update flags if not on master + if self.core.meta.self_node_id != self.core.meta.master_node_id { + data.is_shadow_decryption = Some(message.is_shadow_decryption); + data.is_broadcast_session = Some(message.is_broadcast_session); + } - // respond to request - let partial_decryption = data.consensus_session.on_job_request(sender, PartialDecryptionRequest { - id: message.request_id.clone().into(), - is_shadow_decryption: message.is_shadow_decryption, - is_broadcast_session: message.is_broadcast_session, - other_nodes_ids: message.nodes.iter().cloned().map(Into::into).collect(), - }, decryption_job, decryption_transport)?; + // respond to request + let partial_decryption = data.consensus_session.on_job_request( + sender, + PartialDecryptionRequest { + id: message.request_id.clone().into(), + is_shadow_decryption: message.is_shadow_decryption, + is_broadcast_session: message.is_broadcast_session, + other_nodes_ids: message.nodes.iter().cloned().map(Into::into).collect(), + }, + decryption_job, + decryption_transport, + )?; - // ...and prepare decryption job session if we need to broadcast result - if message.is_broadcast_session { - let consensus_group: BTreeSet<_> = message.nodes.iter().cloned().map(Into::into).collect(); - let broadcast_decryption_job = DecryptionJob::new_on_master(self.core.meta.self_node_id.clone(), - self.core.access_key.clone(), requester_public, key_share.clone(), key_version, - message.is_shadow_decryption, message.is_broadcast_session)?; - Self::create_broadcast_decryption_job(&self.core, &mut *data, consensus_group, broadcast_decryption_job, - message.request_id.clone().into(), Some(partial_decryption.take_response()))?; - } + // ...and prepare decryption job session if we need to broadcast result + if message.is_broadcast_session { + let consensus_group: BTreeSet<_> = + message.nodes.iter().cloned().map(Into::into).collect(); + let broadcast_decryption_job = DecryptionJob::new_on_master( + self.core.meta.self_node_id.clone(), + self.core.access_key.clone(), + requester_public, + key_share.clone(), + key_version, + message.is_shadow_decryption, + message.is_broadcast_session, + )?; + Self::create_broadcast_decryption_job( + &self.core, + &mut *data, + consensus_group, + broadcast_decryption_job, + message.request_id.clone().into(), + Some(partial_decryption.take_response()), + )?; + } - Ok(()) - } + Ok(()) + } - /// When partial decryption is received. - pub fn on_partial_decryption(&self, sender: &NodeId, message: &PartialDecryption) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When partial decryption is received. + pub fn on_partial_decryption( + &self, + sender: &NodeId, + message: &PartialDecryption, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); - let mut data = self.data.lock(); - let is_master_node = self.core.meta.self_node_id == self.core.meta.master_node_id; - let result = if is_master_node { - data.consensus_session.on_job_response(sender, PartialDecryptionResponse { - request_id: message.request_id.clone().into(), - shadow_point: message.shadow_point.clone().into(), - decrypt_shadow: message.decrypt_shadow.clone(), - })?; + let mut data = self.data.lock(); + let is_master_node = self.core.meta.self_node_id == self.core.meta.master_node_id; + let result = if is_master_node { + data.consensus_session.on_job_response( + sender, + PartialDecryptionResponse { + request_id: message.request_id.clone().into(), + shadow_point: message.shadow_point.clone().into(), + decrypt_shadow: message.decrypt_shadow.clone(), + }, + )?; - if data.consensus_session.state() != ConsensusSessionState::Finished && - data.consensus_session.state() != ConsensusSessionState::Failed { - return Ok(()); - } + if data.consensus_session.state() != ConsensusSessionState::Finished + && data.consensus_session.state() != ConsensusSessionState::Failed + { + return Ok(()); + } - // send completion signal to all nodes, except for rejected nodes - if is_master_node { - for node in data.consensus_session.consensus_non_rejected_nodes() { - self.core.cluster.send(&node, Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(DecryptionSessionCompleted { - session: self.core.meta.id.clone().into(), - sub_session: self.core.access_key.clone().into(), - session_nonce: self.core.nonce, - })))?; - } - } + // send completion signal to all nodes, except for rejected nodes + if is_master_node { + for node in data.consensus_session.consensus_non_rejected_nodes() { + self.core.cluster.send( + &node, + Message::Decryption(DecryptionMessage::DecryptionSessionCompleted( + DecryptionSessionCompleted { + session: self.core.meta.id.clone().into(), + sub_session: self.core.access_key.clone().into(), + session_nonce: self.core.nonce, + }, + )), + )?; + } + } - data.consensus_session.result() - } else { - match data.broadcast_job_session.as_mut() { - Some(broadcast_job_session) => { - broadcast_job_session.on_partial_response(sender, PartialDecryptionResponse { - request_id: message.request_id.clone().into(), - shadow_point: message.shadow_point.clone().into(), - decrypt_shadow: message.decrypt_shadow.clone(), - })?; + data.consensus_session.result() + } else { + match data.broadcast_job_session.as_mut() { + Some(broadcast_job_session) => { + broadcast_job_session.on_partial_response( + sender, + PartialDecryptionResponse { + request_id: message.request_id.clone().into(), + shadow_point: message.shadow_point.clone().into(), + decrypt_shadow: message.decrypt_shadow.clone(), + }, + )?; - if broadcast_job_session.state() != JobSessionState::Finished && - broadcast_job_session.state() != JobSessionState::Failed { - return Ok(()); - } + if broadcast_job_session.state() != JobSessionState::Finished + && broadcast_job_session.state() != JobSessionState::Failed + { + return Ok(()); + } - broadcast_job_session.result() - }, - None => return Err(Error::InvalidMessage), - } - }; + broadcast_job_session.result() + } + None => return Err(Error::InvalidMessage), + } + }; - Self::set_decryption_result(&self.core, &mut *data, result); + Self::set_decryption_result(&self.core, &mut *data, result); - Ok(()) - } + Ok(()) + } - /// When session is completed. - pub fn on_session_completed(&self, sender: &NodeId, message: &DecryptionSessionCompleted) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When session is completed. + pub fn on_session_completed( + &self, + sender: &NodeId, + message: &DecryptionSessionCompleted, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); - let mut data = self.data.lock(); + let mut data = self.data.lock(); - // if it is a broadcast session, wait for all answers before completing the session - let decryption_result = match data.broadcast_job_session.as_ref() { - Some(broadcast_job_session) => { - if !broadcast_job_session.is_result_ready() { - return Err(Error::TooEarlyForRequest); - } + // if it is a broadcast session, wait for all answers before completing the session + let decryption_result = match data.broadcast_job_session.as_ref() { + Some(broadcast_job_session) => { + if !broadcast_job_session.is_result_ready() { + return Err(Error::TooEarlyForRequest); + } - Some(broadcast_job_session.result()) - }, - None => None, - }; - if let Some(decryption_result) = decryption_result { - Self::set_decryption_result(&self.core, &mut *data, decryption_result); - } + Some(broadcast_job_session.result()) + } + None => None, + }; + if let Some(decryption_result) = decryption_result { + Self::set_decryption_result(&self.core, &mut *data, decryption_result); + } - data.consensus_session.on_session_completed(sender) - } + data.consensus_session.on_session_completed(sender) + } - /// Process error from the other node. - fn process_node_error(&self, node: Option<&NodeId>, error: Error) -> Result<(), Error> { - let mut data = self.data.lock(); - let is_self_node_error = node.map(|n| n == &self.core.meta.self_node_id).unwrap_or(false); - // error is always fatal if coming from this node - if is_self_node_error { - Self::set_decryption_result(&self.core, &mut *data, Err(error.clone())); - return Err(error); - } + /// Process error from the other node. + fn process_node_error(&self, node: Option<&NodeId>, error: Error) -> Result<(), Error> { + let mut data = self.data.lock(); + let is_self_node_error = node + .map(|n| n == &self.core.meta.self_node_id) + .unwrap_or(false); + // error is always fatal if coming from this node + if is_self_node_error { + Self::set_decryption_result(&self.core, &mut *data, Err(error.clone())); + return Err(error); + } - match { - match node { - Some(node) => data.consensus_session.on_node_error(node, error.clone()), - None => data.consensus_session.on_session_timeout(), - } - } { - Ok(false) => Ok(()), - Ok(true) => { - let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); - let proof = "on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when is_shadow_decryption.is_some(); qed"; - let is_shadow_decryption = data.is_shadow_decryption.expect(proof); - let is_broadcast_session = data.is_broadcast_session.expect(proof); - let disseminate_result = Self::disseminate_jobs(&self.core, &mut *data, &version, is_shadow_decryption, is_broadcast_session); - match disseminate_result { - Ok(()) => Ok(()), - Err(err) => { - warn!("{}: decryption session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node); + match { + match node { + Some(node) => data.consensus_session.on_node_error(node, error.clone()), + None => data.consensus_session.on_session_timeout(), + } + } { + Ok(false) => Ok(()), + Ok(true) => { + let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); + let proof = "on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when is_shadow_decryption.is_some(); qed"; + let is_shadow_decryption = data.is_shadow_decryption.expect(proof); + let is_broadcast_session = data.is_broadcast_session.expect(proof); + let disseminate_result = Self::disseminate_jobs( + &self.core, + &mut *data, + &version, + is_shadow_decryption, + is_broadcast_session, + ); + match disseminate_result { + Ok(()) => Ok(()), + Err(err) => { + warn!( + "{}: decryption session failed with error: {:?} from {:?}", + &self.core.meta.self_node_id, error, node + ); - Self::set_decryption_result(&self.core, &mut *data, Err(err.clone())); - Err(err) - } - } - }, - Err(err) => { - warn!("{}: decryption session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node); + Self::set_decryption_result(&self.core, &mut *data, Err(err.clone())); + Err(err) + } + } + } + Err(err) => { + warn!( + "{}: decryption session failed with error: {:?} from {:?}", + &self.core.meta.self_node_id, error, node + ); - Self::set_decryption_result(&self.core, &mut *data, Err(err.clone())); - Err(err) - }, - } - } + Self::set_decryption_result(&self.core, &mut *data, Err(err.clone())); + Err(err) + } + } + } - /// Disseminate jobs on session master. - fn disseminate_jobs(core: &SessionCore, data: &mut SessionData, version: &H256, is_shadow_decryption: bool, is_broadcast_session: bool) -> Result<(), Error> { - let key_share = match core.key_share.as_ref() { - None => return Err(Error::InvalidMessage), - Some(key_share) => key_share, - }; + /// Disseminate jobs on session master. + fn disseminate_jobs( + core: &SessionCore, + data: &mut SessionData, + version: &H256, + is_shadow_decryption: bool, + is_broadcast_session: bool, + ) -> Result<(), Error> { + let key_share = match core.key_share.as_ref() { + None => return Err(Error::InvalidMessage), + Some(key_share) => key_share, + }; - let key_version = key_share.version(version)?.hash.clone(); - let requester = data.consensus_session.consensus_job().executor().requester().ok_or(Error::InvalidStateForRequest)?.clone(); - let requester_public = requester.public(&core.meta.id).map_err(Error::InsufficientRequesterData)?; - let consensus_group = data.consensus_session.select_consensus_group()?.clone(); - let decryption_job = DecryptionJob::new_on_master(core.meta.self_node_id.clone(), - core.access_key.clone(), requester_public.clone(), key_share.clone(), key_version, - is_shadow_decryption, is_broadcast_session)?; - let decryption_request_id = decryption_job.request_id().clone() + let key_version = key_share.version(version)?.hash.clone(); + let requester = data + .consensus_session + .consensus_job() + .executor() + .requester() + .ok_or(Error::InvalidStateForRequest)? + .clone(); + let requester_public = requester + .public(&core.meta.id) + .map_err(Error::InsufficientRequesterData)?; + let consensus_group = data.consensus_session.select_consensus_group()?.clone(); + let decryption_job = DecryptionJob::new_on_master( + core.meta.self_node_id.clone(), + core.access_key.clone(), + requester_public.clone(), + key_share.clone(), + key_version, + is_shadow_decryption, + is_broadcast_session, + )?; + let decryption_request_id = decryption_job.request_id().clone() .expect("DecryptionJob always have request_id when created on master; it is created using new_on_master above; qed"); - let decryption_transport = core.decryption_transport(false); - let is_broadcast_session = data.is_broadcast_session + let decryption_transport = core.decryption_transport(false); + let is_broadcast_session = data.is_broadcast_session .expect("disseminate_jobs is called on master node only; on master node is_broadcast_session is filled during initialization; qed"); - let self_response = data.consensus_session.disseminate_jobs(decryption_job, decryption_transport, is_broadcast_session)?; + let self_response = data.consensus_session.disseminate_jobs( + decryption_job, + decryption_transport, + is_broadcast_session, + )?; - // ...and prepare decryption job session if we need to broadcast result - if is_broadcast_session { - let broadcast_decryption_job = DecryptionJob::new_on_master(core.meta.self_node_id.clone(), - core.access_key.clone(), requester_public, key_share.clone(), key_version, is_shadow_decryption, is_broadcast_session)?; - Self::create_broadcast_decryption_job(&core, data, consensus_group, broadcast_decryption_job, - decryption_request_id, self_response)?; - } + // ...and prepare decryption job session if we need to broadcast result + if is_broadcast_session { + let broadcast_decryption_job = DecryptionJob::new_on_master( + core.meta.self_node_id.clone(), + core.access_key.clone(), + requester_public, + key_share.clone(), + key_version, + is_shadow_decryption, + is_broadcast_session, + )?; + Self::create_broadcast_decryption_job( + &core, + data, + consensus_group, + broadcast_decryption_job, + decryption_request_id, + self_response, + )?; + } - Ok(()) - } + Ok(()) + } - /// Create broadcast decryption job. - fn create_broadcast_decryption_job(core: &SessionCore, data: &mut SessionData, mut consensus_group: BTreeSet, mut job: DecryptionJob, request_id: Secret, self_response: Option) -> Result<(), Error> { - consensus_group.insert(core.meta.self_node_id.clone()); - job.set_request_id(request_id.clone().into()); + /// Create broadcast decryption job. + fn create_broadcast_decryption_job( + core: &SessionCore, + data: &mut SessionData, + mut consensus_group: BTreeSet, + mut job: DecryptionJob, + request_id: Secret, + self_response: Option, + ) -> Result<(), Error> { + consensus_group.insert(core.meta.self_node_id.clone()); + job.set_request_id(request_id.clone().into()); - let transport = core.decryption_transport(true); - let mut job_session = JobSession::new(SessionMeta { - id: core.meta.id.clone(), - master_node_id: core.meta.self_node_id.clone(), - self_node_id: core.meta.self_node_id.clone(), - threshold: core.meta.threshold, - configured_nodes_count: core.meta.configured_nodes_count, - connected_nodes_count: core.meta.connected_nodes_count, - }, job, transport); - job_session.initialize(consensus_group, self_response, core.meta.self_node_id != core.meta.master_node_id)?; - data.broadcast_job_session = Some(job_session); + let transport = core.decryption_transport(true); + let mut job_session = JobSession::new( + SessionMeta { + id: core.meta.id.clone(), + master_node_id: core.meta.self_node_id.clone(), + self_node_id: core.meta.self_node_id.clone(), + threshold: core.meta.threshold, + configured_nodes_count: core.meta.configured_nodes_count, + connected_nodes_count: core.meta.connected_nodes_count, + }, + job, + transport, + ); + job_session.initialize( + consensus_group, + self_response, + core.meta.self_node_id != core.meta.master_node_id, + )?; + data.broadcast_job_session = Some(job_session); - Ok(()) - } + Ok(()) + } - /// Set decryption result. - fn set_decryption_result(core: &SessionCore, data: &mut SessionData, result: Result) { - if let Some(DelegationStatus::DelegatedFrom(master, nonce)) = data.delegation_status.take() { - // error means can't communicate => ignore it - let _ = match result.as_ref() { - Ok(document_key) => core.cluster.send(&master, Message::Decryption(DecryptionMessage::DecryptionSessionDelegationCompleted(DecryptionSessionDelegationCompleted { - session: core.meta.id.clone().into(), - sub_session: core.access_key.clone().into(), - session_nonce: nonce, - decrypted_secret: document_key.decrypted_secret.clone().into(), - common_point: document_key.common_point.clone().map(Into::into), - decrypt_shadows: document_key.decrypt_shadows.clone(), - }))), - Err(error) => core.cluster.send(&master, Message::Decryption(DecryptionMessage::DecryptionSessionError(DecryptionSessionError { - session: core.meta.id.clone().into(), - sub_session: core.access_key.clone().into(), - session_nonce: nonce, - error: error.clone().into(), - }))), - }; - } + /// Set decryption result. + fn set_decryption_result( + core: &SessionCore, + data: &mut SessionData, + result: Result, + ) { + if let Some(DelegationStatus::DelegatedFrom(master, nonce)) = data.delegation_status.take() + { + // error means can't communicate => ignore it + let _ = match result.as_ref() { + Ok(document_key) => core.cluster.send( + &master, + Message::Decryption(DecryptionMessage::DecryptionSessionDelegationCompleted( + DecryptionSessionDelegationCompleted { + session: core.meta.id.clone().into(), + sub_session: core.access_key.clone().into(), + session_nonce: nonce, + decrypted_secret: document_key.decrypted_secret.clone().into(), + common_point: document_key.common_point.clone().map(Into::into), + decrypt_shadows: document_key.decrypt_shadows.clone(), + }, + )), + ), + Err(error) => core.cluster.send( + &master, + Message::Decryption(DecryptionMessage::DecryptionSessionError( + DecryptionSessionError { + session: core.meta.id.clone().into(), + sub_session: core.access_key.clone().into(), + session_nonce: nonce, + error: error.clone().into(), + }, + )), + ), + }; + } - data.result = Some(result); - core.completed.notify_all(); - } + data.result = Some(result); + core.completed.notify_all(); + } } impl ClusterSession for SessionImpl { - type Id = SessionIdWithSubSession; + type Id = SessionIdWithSubSession; - fn type_name() -> &'static str { - "decryption" - } + fn type_name() -> &'static str { + "decryption" + } - fn id(&self) -> SessionIdWithSubSession { - SessionIdWithSubSession::new(self.core.meta.id.clone(), self.core.access_key.clone()) - } + fn id(&self) -> SessionIdWithSubSession { + SessionIdWithSubSession::new(self.core.meta.id.clone(), self.core.access_key.clone()) + } - fn is_finished(&self) -> bool { - let data = self.data.lock(); - data.consensus_session.state() == ConsensusSessionState::Failed - || data.consensus_session.state() == ConsensusSessionState::Finished - || data.result.is_some() - } + fn is_finished(&self) -> bool { + let data = self.data.lock(); + data.consensus_session.state() == ConsensusSessionState::Failed + || data.consensus_session.state() == ConsensusSessionState::Finished + || data.result.is_some() + } - fn on_node_timeout(&self, node: &NodeId) { - // ignore error, only state matters - let _ = self.process_node_error(Some(node), Error::NodeDisconnected); - } + fn on_node_timeout(&self, node: &NodeId) { + // ignore error, only state matters + let _ = self.process_node_error(Some(node), Error::NodeDisconnected); + } - fn on_session_timeout(&self) { - // ignore error, only state matters - let _ = self.process_node_error(None, Error::NodeDisconnected); - } + fn on_session_timeout(&self) { + // ignore error, only state matters + let _ = self.process_node_error(None, Error::NodeDisconnected); + } - fn on_session_error(&self, node: &NodeId, error: Error) { - let is_fatal = self.process_node_error(Some(node), error.clone()).is_err(); - let is_this_node_error = *node == self.core.meta.self_node_id; - if is_fatal || is_this_node_error { - // error in signing session is non-fatal, if occurs on slave node - // => either respond with error - // => or broadcast error - let message = Message::Decryption(DecryptionMessage::DecryptionSessionError(DecryptionSessionError { - session: self.core.meta.id.clone().into(), - sub_session: self.core.access_key.clone().into(), - session_nonce: self.core.nonce, - error: error.clone().into(), - })); + fn on_session_error(&self, node: &NodeId, error: Error) { + let is_fatal = self.process_node_error(Some(node), error.clone()).is_err(); + let is_this_node_error = *node == self.core.meta.self_node_id; + if is_fatal || is_this_node_error { + // error in signing session is non-fatal, if occurs on slave node + // => either respond with error + // => or broadcast error + let message = Message::Decryption(DecryptionMessage::DecryptionSessionError( + DecryptionSessionError { + session: self.core.meta.id.clone().into(), + sub_session: self.core.access_key.clone().into(), + session_nonce: self.core.nonce, + error: error.clone().into(), + }, + )); - // do not bother processing send error, as we already processing error - let _ = if self.core.meta.master_node_id == self.core.meta.self_node_id { - self.core.cluster.broadcast(message) - } else { - self.core.cluster.send(&self.core.meta.master_node_id, message) - }; - } - } + // do not bother processing send error, as we already processing error + let _ = if self.core.meta.master_node_id == self.core.meta.self_node_id { + self.core.cluster.broadcast(message) + } else { + self.core + .cluster + .send(&self.core.meta.master_node_id, message) + }; + } + } - fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { - match *message { - Message::Decryption(ref message) => self.process_message(sender, message), - _ => unreachable!("cluster checks message to be correct before passing; qed"), - } - } + fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { + match *message { + Message::Decryption(ref message) => self.process_message(sender, message), + _ => unreachable!("cluster checks message to be correct before passing; qed"), + } + } } impl SessionCore { - pub fn decryption_transport(&self, is_broadcast_transport: bool) -> DecryptionJobTransport { - DecryptionJobTransport { - id: self.meta.id.clone(), - access_key: self.access_key.clone(), - nonce: self.nonce, - is_broadcast_transport: is_broadcast_transport, - master_node_id: self.meta.master_node_id.clone(), - cluster: self.cluster.clone(), - } - } + pub fn decryption_transport(&self, is_broadcast_transport: bool) -> DecryptionJobTransport { + DecryptionJobTransport { + id: self.meta.id.clone(), + access_key: self.access_key.clone(), + nonce: self.nonce, + is_broadcast_transport: is_broadcast_transport, + master_node_id: self.meta.master_node_id.clone(), + cluster: self.cluster.clone(), + } + } } impl JobTransport for DecryptionConsensusTransport { - type PartialJobRequest=Requester; - type PartialJobResponse=bool; + type PartialJobRequest = Requester; + type PartialJobResponse = bool; - fn send_partial_request(&self, node: &NodeId, request: Requester) -> Result<(), Error> { - let version = self.version.as_ref() + fn send_partial_request(&self, node: &NodeId, request: Requester) -> Result<(), Error> { + let version = self.version.as_ref() .expect("send_partial_request is called on initialized master node only; version is filled in before initialization starts on master node; qed"); - self.cluster.send(node, Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(DecryptionConsensusMessage { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - origin: self.origin.clone().map(Into::into), - message: ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession { - requester: request.into(), - version: version.clone().into(), - }) - }))) - } + self.cluster.send( + node, + Message::Decryption(DecryptionMessage::DecryptionConsensusMessage( + DecryptionConsensusMessage { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, + origin: self.origin.clone().map(Into::into), + message: ConsensusMessage::InitializeConsensusSession( + InitializeConsensusSession { + requester: request.into(), + version: version.clone().into(), + }, + ), + }, + )), + ) + } - fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { - self.cluster.send(node, Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(DecryptionConsensusMessage { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - origin: None, - message: ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: response, - }) - }))) - } + fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { + self.cluster.send( + node, + Message::Decryption(DecryptionMessage::DecryptionConsensusMessage( + DecryptionConsensusMessage { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, + origin: None, + message: ConsensusMessage::ConfirmConsensusInitialization( + ConfirmConsensusInitialization { + is_confirmed: response, + }, + ), + }, + )), + ) + } } impl JobTransport for DecryptionJobTransport { - type PartialJobRequest=PartialDecryptionRequest; - type PartialJobResponse=PartialDecryptionResponse; + type PartialJobRequest = PartialDecryptionRequest; + type PartialJobResponse = PartialDecryptionResponse; - fn send_partial_request(&self, node: &NodeId, request: PartialDecryptionRequest) -> Result<(), Error> { - if !self.is_broadcast_transport { - self.cluster.send(node, Message::Decryption(DecryptionMessage::RequestPartialDecryption(RequestPartialDecryption { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - request_id: request.id.into(), - is_shadow_decryption: request.is_shadow_decryption, - is_broadcast_session: request.is_broadcast_session, - nodes: request.other_nodes_ids.into_iter().map(Into::into).collect(), - })))?; - } + fn send_partial_request( + &self, + node: &NodeId, + request: PartialDecryptionRequest, + ) -> Result<(), Error> { + if !self.is_broadcast_transport { + self.cluster.send( + node, + Message::Decryption(DecryptionMessage::RequestPartialDecryption( + RequestPartialDecryption { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, + request_id: request.id.into(), + is_shadow_decryption: request.is_shadow_decryption, + is_broadcast_session: request.is_broadcast_session, + nodes: request + .other_nodes_ids + .into_iter() + .map(Into::into) + .collect(), + }, + )), + )?; + } - Ok(()) - } + Ok(()) + } - fn send_partial_response(&self, node: &NodeId, response: PartialDecryptionResponse) -> Result<(), Error> { - if !self.is_broadcast_transport || *node != self.master_node_id { - self.cluster.send(node, Message::Decryption(DecryptionMessage::PartialDecryption(PartialDecryption { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - request_id: response.request_id.into(), - shadow_point: response.shadow_point.into(), - decrypt_shadow: response.decrypt_shadow, - })))?; - } + fn send_partial_response( + &self, + node: &NodeId, + response: PartialDecryptionResponse, + ) -> Result<(), Error> { + if !self.is_broadcast_transport || *node != self.master_node_id { + self.cluster.send( + node, + Message::Decryption(DecryptionMessage::PartialDecryption(PartialDecryption { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, + request_id: response.request_id.into(), + shadow_point: response.shadow_point.into(), + decrypt_shadow: response.decrypt_shadow, + })), + )?; + } - Ok(()) - } + Ok(()) + } } #[cfg(test)] pub fn create_default_decryption_session() -> Arc { - use acl_storage::DummyAclStorage; - use key_server_cluster::cluster::tests::DummyCluster; + use acl_storage::DummyAclStorage; + use key_server_cluster::cluster::tests::DummyCluster; - Arc::new(SessionImpl::new(SessionParams { - meta: SessionMeta { - id: Default::default(), - self_node_id: Default::default(), - master_node_id: Default::default(), - threshold: 0, - configured_nodes_count: 0, - connected_nodes_count: 0, - }, - access_key: Secret::zero(), - key_share: Default::default(), - acl_storage: Arc::new(DummyAclStorage::default()), - cluster: Arc::new(DummyCluster::new(Default::default())), - nonce: 0, - }, Some(Requester::Public(2.into()))).unwrap()) + Arc::new( + SessionImpl::new( + SessionParams { + meta: SessionMeta { + id: Default::default(), + self_node_id: Default::default(), + master_node_id: Default::default(), + threshold: 0, + configured_nodes_count: 0, + connected_nodes_count: 0, + }, + access_key: Secret::zero(), + key_share: Default::default(), + acl_storage: Arc::new(DummyAclStorage::default()), + cluster: Arc::new(DummyCluster::new(Default::default())), + nonce: 0, + }, + Some(Requester::Public(2.into())), + ) + .unwrap(), + ) } #[cfg(test)] mod tests { - use std::sync::Arc; - use std::collections::{BTreeMap, VecDeque}; - use acl_storage::DummyAclStorage; - use ethkey::{self, KeyPair, Random, Generator, Public, Secret, public_to_address}; - use key_server_cluster::{NodeId, DocumentKeyShare, DocumentKeyShareVersion, SessionId, Requester, - Error, EncryptedDocumentKeyShadow, SessionMeta}; - use key_server_cluster::cluster::tests::DummyCluster; - use key_server_cluster::cluster_sessions::ClusterSession; - use key_server_cluster::decryption_session::{SessionImpl, SessionParams}; - use key_server_cluster::message::{self, Message, DecryptionMessage}; - use key_server_cluster::math; - use key_server_cluster::jobs::consensus_session::ConsensusSessionState; + use acl_storage::DummyAclStorage; + use ethkey::{self, public_to_address, Generator, KeyPair, Public, Random, Secret}; + use key_server_cluster::{ + cluster::tests::DummyCluster, + cluster_sessions::ClusterSession, + decryption_session::{SessionImpl, SessionParams}, + jobs::consensus_session::ConsensusSessionState, + math, + message::{self, DecryptionMessage, Message}, + DocumentKeyShare, DocumentKeyShareVersion, EncryptedDocumentKeyShadow, Error, NodeId, + Requester, SessionId, SessionMeta, + }; + use std::{ + collections::{BTreeMap, VecDeque}, + sync::Arc, + }; - const SECRET_PLAIN: &'static str = "d2b57ae7619e070af0af6bc8c703c0cd27814c54d5d6a999cacac0da34ede279ca0d9216e85991029e54e2f0c92ee0bd30237725fa765cbdbfc4529489864c5f"; + const SECRET_PLAIN: &'static str = "d2b57ae7619e070af0af6bc8c703c0cd27814c54d5d6a999cacac0da34ede279ca0d9216e85991029e54e2f0c92ee0bd30237725fa765cbdbfc4529489864c5f"; - fn prepare_decryption_sessions() -> (KeyPair, Vec>, Vec>, Vec) { - // prepare encrypted data + cluster configuration for scheme 4-of-5 - let session_id = SessionId::default(); - let access_key = Random.generate().unwrap().secret().clone(); - let secret_shares: Vec = vec![ - "834cb736f02d9c968dfaf0c37658a1d86ff140554fc8b59c9fdad5a8cf810eec".parse().unwrap(), - "5a3c1d90fafafa66bb808bcc464354a98b05e6b2c95b5f609d4511cdd1b17a0b".parse().unwrap(), - "71bf61e7848e08e3a8486c308ce521bdacfebcf9116a0151447eb301f3a2d0e9".parse().unwrap(), - "80c0e5e2bea66fa9b2e07f7ce09630a9563e8242446d5ee63221feb09c4338f4".parse().unwrap(), - "c06546b5669877ba579ca437a5602e89425c53808c708d44ccd6afcaa4610fad".parse().unwrap(), - ]; - let id_numbers: Vec<(NodeId, Secret)> = vec![ + fn prepare_decryption_sessions() -> ( + KeyPair, + Vec>, + Vec>, + Vec, + ) { + // prepare encrypted data + cluster configuration for scheme 4-of-5 + let session_id = SessionId::default(); + let access_key = Random.generate().unwrap().secret().clone(); + let secret_shares: Vec = vec![ + "834cb736f02d9c968dfaf0c37658a1d86ff140554fc8b59c9fdad5a8cf810eec" + .parse() + .unwrap(), + "5a3c1d90fafafa66bb808bcc464354a98b05e6b2c95b5f609d4511cdd1b17a0b" + .parse() + .unwrap(), + "71bf61e7848e08e3a8486c308ce521bdacfebcf9116a0151447eb301f3a2d0e9" + .parse() + .unwrap(), + "80c0e5e2bea66fa9b2e07f7ce09630a9563e8242446d5ee63221feb09c4338f4" + .parse() + .unwrap(), + "c06546b5669877ba579ca437a5602e89425c53808c708d44ccd6afcaa4610fad" + .parse() + .unwrap(), + ]; + let id_numbers: Vec<(NodeId, Secret)> = vec![ ("b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into(), "281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse().unwrap()), ("1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb".into(), @@ -874,594 +1205,1007 @@ mod tests { ("321977760d1d8e15b047a309e4c7fe6f355c10bb5a06c68472b676926427f69f229024fa2692c10da167d14cdc77eb95d0fce68af0a0f704f0d3db36baa83bb2".into(), "12cf422d50002d04e52bd4906fd7f5f235f051ca36abfe37e061f8da248008d8".parse().unwrap()), ]; - let common_point: Public = "6962be696e1bcbba8e64cc7fddf140f854835354b5804f3bb95ae5a2799130371b589a131bd39699ac7174ccb35fc4342dab05331202209582fc8f3a40916ab0".into(); - let encrypted_point: Public = "b07031982bde9890e12eff154765f03c56c3ab646ad47431db5dd2d742a9297679c4c65b998557f8008469afd0c43d40b6c5f6c6a1c7354875da4115237ed87a".into(); - let encrypted_datas: Vec<_> = (0..5).map(|i| DocumentKeyShare { - author: Default::default(), - threshold: 3, - public: Default::default(), - common_point: Some(common_point.clone()), - encrypted_point: Some(encrypted_point.clone()), - versions: vec![DocumentKeyShareVersion { - hash: Default::default(), - id_numbers: id_numbers.clone().into_iter().collect(), - secret_share: secret_shares[i].clone(), - }], - }).collect(); - let acl_storages: Vec<_> = (0..5).map(|_| Arc::new(DummyAclStorage::default())).collect(); - let clusters: Vec<_> = (0..5).map(|i| { - let cluster = Arc::new(DummyCluster::new(id_numbers.iter().nth(i).clone().unwrap().0)); - for id_number in &id_numbers { - cluster.add_node(id_number.0.clone()); - } - cluster - }).collect(); - let requester = Random.generate().unwrap(); - let signature = Some(ethkey::sign(requester.secret(), &SessionId::default()).unwrap()); - let sessions: Vec<_> = (0..5).map(|i| SessionImpl::new(SessionParams { - meta: SessionMeta { - id: session_id.clone(), - self_node_id: id_numbers.iter().nth(i).clone().unwrap().0, - master_node_id: id_numbers.iter().nth(0).clone().unwrap().0, - threshold: encrypted_datas[i].threshold, - configured_nodes_count: 5, - connected_nodes_count: 5, - }, - access_key: access_key.clone(), - key_share: Some(encrypted_datas[i].clone()), - acl_storage: acl_storages[i].clone(), - cluster: clusters[i].clone(), - nonce: 0, - }, if i == 0 { signature.clone().map(Into::into) } else { None }).unwrap()).collect(); - - (requester, clusters, acl_storages, sessions) - } - - fn do_messages_exchange(clusters: &[Arc], sessions: &[SessionImpl]) -> Result<(), Error> { - do_messages_exchange_until(clusters, sessions, |_, _, _| false) - } - - fn do_messages_exchange_until(clusters: &[Arc], sessions: &[SessionImpl], mut cond: F) -> Result<(), Error> where F: FnMut(&NodeId, &NodeId, &Message) -> bool { - let mut queue: VecDeque<(NodeId, NodeId, Message)> = VecDeque::new(); - while let Some((mut from, mut to, mut message)) = clusters.iter().filter_map(|c| c.take_message().map(|(to, msg)| (c.node(), to, msg))).next() { - if cond(&from, &to, &message) { - break; - } - - let mut is_queued_message = false; - loop { - let session = &sessions[sessions.iter().position(|s| s.node() == &to).unwrap()]; - match session.on_message(&from, &message) { - Ok(_) => { - if let Some(qmessage) = queue.pop_front() { - from = qmessage.0; - to = qmessage.1; - message = qmessage.2; - is_queued_message = true; - continue; - } - break; - }, - Err(Error::TooEarlyForRequest) => { - if is_queued_message { - queue.push_front((from, to, message)); - } else { - queue.push_back((from, to, message)); - } - break; - }, - Err(err) => return Err(err), - } - } - } - - Ok(()) - } - - #[test] - fn constructs_in_cluster_of_single_node() { - let mut nodes = BTreeMap::new(); - let self_node_id = Random.generate().unwrap().public().clone(); - nodes.insert(self_node_id, Random.generate().unwrap().secret().clone()); - match SessionImpl::new(SessionParams { - meta: SessionMeta { - id: SessionId::default(), - self_node_id: self_node_id.clone(), - master_node_id: self_node_id.clone(), - threshold: 0, - configured_nodes_count: 1, - connected_nodes_count: 1, - }, - access_key: Random.generate().unwrap().secret().clone(), - key_share: Some(DocumentKeyShare { - author: Default::default(), - threshold: 0, - public: Default::default(), - common_point: Some(Random.generate().unwrap().public().clone()), - encrypted_point: Some(Random.generate().unwrap().public().clone()), - versions: vec![DocumentKeyShareVersion { - hash: Default::default(), - id_numbers: nodes, - secret_share: Random.generate().unwrap().secret().clone(), - }], - }), - acl_storage: Arc::new(DummyAclStorage::default()), - cluster: Arc::new(DummyCluster::new(self_node_id.clone())), - nonce: 0, - }, Some(Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()))) { - Ok(_) => (), - _ => panic!("unexpected"), - } - } - - #[test] - fn fails_to_initialize_if_does_not_have_a_share() { - let self_node_id = Random.generate().unwrap().public().clone(); - let session = SessionImpl::new(SessionParams { - meta: SessionMeta { - id: SessionId::default(), - self_node_id: self_node_id.clone(), - master_node_id: self_node_id.clone(), - threshold: 0, - configured_nodes_count: 1, - connected_nodes_count: 1, - }, - access_key: Random.generate().unwrap().secret().clone(), - key_share: None, - acl_storage: Arc::new(DummyAclStorage::default()), - cluster: Arc::new(DummyCluster::new(self_node_id.clone())), - nonce: 0, - }, Some(Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()))).unwrap(); - assert_eq!(session.initialize(Default::default(), Default::default(), false, false), Err(Error::InvalidMessage)); - } - - #[test] - fn fails_to_initialize_if_threshold_is_wrong() { - let mut nodes = BTreeMap::new(); - let self_node_id = Random.generate().unwrap().public().clone(); - nodes.insert(self_node_id.clone(), Random.generate().unwrap().secret().clone()); - nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()); - let session = SessionImpl::new(SessionParams { - meta: SessionMeta { - id: SessionId::default(), - self_node_id: self_node_id.clone(), - master_node_id: self_node_id.clone(), - threshold: 2, - configured_nodes_count: 1, - connected_nodes_count: 1, - }, - access_key: Random.generate().unwrap().secret().clone(), - key_share: Some(DocumentKeyShare { - author: Default::default(), - threshold: 2, - public: Default::default(), - common_point: Some(Random.generate().unwrap().public().clone()), - encrypted_point: Some(Random.generate().unwrap().public().clone()), - versions: vec![DocumentKeyShareVersion { - hash: Default::default(), - id_numbers: nodes, - secret_share: Random.generate().unwrap().secret().clone(), - }], - }), - acl_storage: Arc::new(DummyAclStorage::default()), - cluster: Arc::new(DummyCluster::new(self_node_id.clone())), - nonce: 0, - }, Some(Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()))).unwrap(); - assert_eq!(session.initialize(Default::default(), Default::default(), false, false), Err(Error::ConsensusUnreachable)); - } - - #[test] - fn fails_to_initialize_when_already_initialized() { - let (_, _, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap(), ()); - assert_eq!(sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap_err(), Error::InvalidStateForRequest); - } - - #[test] - fn fails_to_accept_initialization_when_already_initialized() { - let (_, _, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap(), ()); - assert_eq!(sessions[0].on_consensus_message(sessions[1].node(), &message::DecryptionConsensusMessage { - session: SessionId::default().into(), - sub_session: sessions[0].access_key().clone().into(), - session_nonce: 0, - origin: None, - message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession { - requester: Requester::Signature(ethkey::sign( - Random.generate().unwrap().secret(), &SessionId::default()).unwrap()).into(), - version: Default::default(), - }), - }).unwrap_err(), Error::InvalidMessage); - } - - #[test] - fn fails_to_partial_decrypt_if_requested_by_slave() { - let (_, _, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[1].on_consensus_message(sessions[0].node(), &message::DecryptionConsensusMessage { - session: SessionId::default().into(), - sub_session: sessions[0].access_key().clone().into(), - session_nonce: 0, - origin: None, - message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession { - requester: Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(), - &SessionId::default()).unwrap()).into(), - version: Default::default(), - }), - }).unwrap(), ()); - assert_eq!(sessions[1].on_partial_decryption_requested(sessions[2].node(), &message::RequestPartialDecryption { - session: SessionId::default().into(), - sub_session: sessions[0].access_key().clone().into(), - session_nonce: 0, - request_id: Random.generate().unwrap().secret().clone().into(), - is_shadow_decryption: false, - is_broadcast_session: false, - nodes: sessions.iter().map(|s| s.node().clone().into()).take(4).collect(), - }).unwrap_err(), Error::InvalidMessage); - } - - #[test] - fn fails_to_partial_decrypt_if_wrong_number_of_nodes_participating() { - let (_, _, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[1].on_consensus_message(sessions[0].node(), &message::DecryptionConsensusMessage { - session: SessionId::default().into(), - sub_session: sessions[0].access_key().clone().into(), - session_nonce: 0, - origin: None, - message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession { - requester: Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(), - &SessionId::default()).unwrap()).into(), - version: Default::default(), - }), - }).unwrap(), ()); - assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node(), &message::RequestPartialDecryption { - session: SessionId::default().into(), - sub_session: sessions[0].access_key().clone().into(), - session_nonce: 0, - request_id: Random.generate().unwrap().secret().clone().into(), - is_shadow_decryption: false, - is_broadcast_session: false, - nodes: sessions.iter().map(|s| s.node().clone().into()).take(2).collect(), - }).unwrap_err(), Error::InvalidMessage); - } - - #[test] - fn fails_to_accept_partial_decrypt_if_not_waiting() { - let (_, _, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[0].on_partial_decryption(sessions[1].node(), &message::PartialDecryption { - session: SessionId::default().into(), - sub_session: sessions[0].access_key().clone().into(), - session_nonce: 0, - request_id: Random.generate().unwrap().secret().clone().into(), - shadow_point: Random.generate().unwrap().public().clone().into(), - decrypt_shadow: None, - }).unwrap_err(), Error::InvalidStateForRequest); - } - - #[test] - fn fails_to_accept_partial_decrypt_twice() { - let (_, clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap(); - - let mut pd_from = None; - let mut pd_msg = None; - do_messages_exchange_until(&clusters, &sessions, |from, _, msg| match msg { - &Message::Decryption(DecryptionMessage::PartialDecryption(ref msg)) => { - pd_from = Some(from.clone()); - pd_msg = Some(msg.clone()); - true - }, - _ => false, - }).unwrap(); - - assert_eq!(sessions[0].on_partial_decryption(pd_from.as_ref().unwrap(), &pd_msg.clone().unwrap()).unwrap(), ()); - assert_eq!(sessions[0].on_partial_decryption(pd_from.as_ref().unwrap(), &pd_msg.unwrap()).unwrap_err(), Error::InvalidNodeForRequest); - } - - #[test] - fn decryption_fails_on_session_timeout() { - let (_, _, _, sessions) = prepare_decryption_sessions(); - assert!(sessions[0].decrypted_secret().is_none()); - sessions[0].on_session_timeout(); - assert_eq!(sessions[0].decrypted_secret().unwrap().unwrap_err(), Error::ConsensusTemporaryUnreachable); - } - - #[test] - fn node_is_marked_rejected_when_timed_out_during_initialization_confirmation() { - let (_, _, _, sessions) = prepare_decryption_sessions(); - sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap(); - - // 1 node disconnects => we still can recover secret - sessions[0].on_node_timeout(sessions[1].node()); - assert!(sessions[0].data.lock().consensus_session.consensus_job().rejects().contains_key(sessions[1].node())); - assert!(sessions[0].state() == ConsensusSessionState::EstablishingConsensus); - - // 2 node are disconnected => we can not recover secret - sessions[0].on_node_timeout(sessions[2].node()); - assert!(sessions[0].state() == ConsensusSessionState::Failed); - } - - #[test] - fn session_does_not_fail_if_rejected_node_disconnects() { - let (_, clusters, acl_storages, sessions) = prepare_decryption_sessions(); - let key_pair = Random.generate().unwrap(); - - acl_storages[1].prohibit(public_to_address(key_pair.public()), SessionId::default()); - sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap(); - - do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap(); - - // 1st node disconnects => ignore this - sessions[0].on_node_timeout(sessions[1].node()); - assert_eq!(sessions[0].state(), ConsensusSessionState::EstablishingConsensus); - } - - #[test] - fn session_does_not_fail_if_requested_node_disconnects() { - let (_, clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap(); - - do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap(); - - // 1 node disconnects => we still can recover secret - sessions[0].on_node_timeout(sessions[1].node()); - assert!(sessions[0].state() == ConsensusSessionState::EstablishingConsensus); - - // 2 node are disconnected => we can not recover secret - sessions[0].on_node_timeout(sessions[2].node()); - assert!(sessions[0].state() == ConsensusSessionState::Failed); - } - - #[test] - fn session_does_not_fail_if_node_with_shadow_point_disconnects() { - let (_, clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap(); - - do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults - && sessions[0].data.lock().consensus_session.computation_job().responses().len() == 2).unwrap(); - - // disconnects from the node which has already sent us its own shadow point - let disconnected = sessions[0].data.lock(). - consensus_session.computation_job().responses().keys() - .filter(|n| *n != sessions[0].node()) - .cloned().nth(0).unwrap(); - sessions[0].on_node_timeout(&disconnected); - assert_eq!(sessions[0].state(), ConsensusSessionState::EstablishingConsensus); - } - - #[test] - fn session_restarts_if_confirmed_node_disconnects() { - let (_, clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap(); - - do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap(); - - // disconnects from the node which has already confirmed its participation - let disconnected = sessions[0].data.lock().consensus_session.computation_job().requests().iter().cloned().nth(0).unwrap(); - sessions[0].on_node_timeout(&disconnected); - assert_eq!(sessions[0].state(), ConsensusSessionState::EstablishingConsensus); - assert!(sessions[0].data.lock().consensus_session.computation_job().rejects().contains_key(&disconnected)); - assert!(!sessions[0].data.lock().consensus_session.computation_job().requests().contains(&disconnected)); - } - - #[test] - fn session_does_not_fail_if_non_master_node_disconnects_from_non_master_node() { - let (_, clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap(); - - do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap(); - - // disconnects from the node which has already confirmed its participation - sessions[1].on_node_timeout(sessions[2].node()); - assert!(sessions[0].state() == ConsensusSessionState::WaitingForPartialResults); - assert!(sessions[1].state() == ConsensusSessionState::ConsensusEstablished); - } - - #[test] - fn complete_dec_session() { - let (_, clusters, _, sessions) = prepare_decryption_sessions(); - - // now let's try to do a decryption - sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap(); - - do_messages_exchange(&clusters, &sessions).unwrap(); - - // now check that: - // 1) 5 of 5 sessions are in Finished state - assert_eq!(sessions.iter().filter(|s| s.state() == ConsensusSessionState::Finished).count(), 5); - // 2) 1 session has decrypted key value - assert!(sessions.iter().skip(1).all(|s| s.decrypted_secret().is_none())); - - assert_eq!(sessions[0].decrypted_secret().unwrap().unwrap(), EncryptedDocumentKeyShadow { - decrypted_secret: SECRET_PLAIN.into(), - common_point: None, - decrypt_shadows: None, - }); - } - - #[test] - fn complete_shadow_dec_session() { - let (key_pair, clusters, _, sessions) = prepare_decryption_sessions(); - - // now let's try to do a decryption - sessions[0].initialize(Default::default(), Default::default(), true, false).unwrap(); - - do_messages_exchange(&clusters, &sessions).unwrap(); - - // now check that: - // 1) 5 of 5 sessions are in Finished state - assert_eq!(sessions.iter().filter(|s| s.state() == ConsensusSessionState::Finished).count(), 5); - // 2) 1 session has decrypted key value - assert!(sessions.iter().skip(1).all(|s| s.decrypted_secret().is_none())); - - let decrypted_secret = sessions[0].decrypted_secret().unwrap().unwrap(); - // check that decrypted_secret != SECRET_PLAIN - assert!(decrypted_secret.decrypted_secret != SECRET_PLAIN.into()); - // check that common point && shadow coefficients are returned - assert!(decrypted_secret.common_point.is_some()); - assert!(decrypted_secret.decrypt_shadows.is_some()); - // check that KS client is able to restore original secret - use crypto::DEFAULT_MAC; - use ethkey::crypto::ecies::decrypt; - let decrypt_shadows: Vec<_> = decrypted_secret.decrypt_shadows.unwrap().into_iter() - .map(|c| Secret::from_slice(&decrypt(key_pair.secret(), &DEFAULT_MAC, &c).unwrap()).unwrap()) - .collect(); - let decrypted_secret = math::decrypt_with_shadow_coefficients(decrypted_secret.decrypted_secret, decrypted_secret.common_point.unwrap(), decrypt_shadows).unwrap(); - assert_eq!(decrypted_secret, SECRET_PLAIN.into()); - } - - #[test] - fn failed_dec_session() { - let (key_pair, clusters, acl_storages, sessions) = prepare_decryption_sessions(); - - // now let's try to do a decryption - sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap(); - - // we need 4 out of 5 nodes to agree to do a decryption - // let's say that 2 of these nodes are disagree - acl_storages[1].prohibit(public_to_address(key_pair.public()), SessionId::default()); - acl_storages[2].prohibit(public_to_address(key_pair.public()), SessionId::default()); - - assert_eq!(do_messages_exchange(&clusters, &sessions).unwrap_err(), Error::ConsensusUnreachable); - - // check that 3 nodes have failed state - assert_eq!(sessions[0].state(), ConsensusSessionState::Failed); - assert_eq!(sessions.iter().filter(|s| s.state() == ConsensusSessionState::Failed).count(), 3); - } - - #[test] - fn complete_dec_session_with_acl_check_failed_on_master() { - let (key_pair, clusters, acl_storages, sessions) = prepare_decryption_sessions(); - - // we need 4 out of 5 nodes to agree to do a decryption - // let's say that 1 of these nodes (master) is disagree - acl_storages[0].prohibit(public_to_address(key_pair.public()), SessionId::default()); - - // now let's try to do a decryption - sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap(); - - do_messages_exchange(&clusters, &sessions).unwrap(); - - // now check that: - // 1) 4 of 5 sessions are in Finished state - assert_eq!(sessions.iter().filter(|s| s.state() == ConsensusSessionState::Finished).count(), 5); - // 2) 1 session has decrypted key value - assert!(sessions.iter().skip(1).all(|s| s.decrypted_secret().is_none())); - assert_eq!(sessions[0].decrypted_secret().unwrap().unwrap(), EncryptedDocumentKeyShadow { - decrypted_secret: SECRET_PLAIN.into(), - common_point: None, - decrypt_shadows: None, - }); - } - - #[test] - fn decryption_message_fails_when_nonce_is_wrong() { - let (_, _, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[1].process_message(sessions[0].node(), &message::DecryptionMessage::DecryptionSessionCompleted( - message::DecryptionSessionCompleted { - session: SessionId::default().into(), - sub_session: sessions[0].access_key().clone().into(), - session_nonce: 10, - } - )), Err(Error::ReplayProtection)); - } - - #[test] - fn decryption_works_when_delegated_to_other_node() { - let (_, clusters, _, mut sessions) = prepare_decryption_sessions(); - - // let's say node1 doesn't have a share && delegates decryption request to node0 - // initially session is created on node1 => node1 is master for itself, but for other nodes node0 is still master - sessions[1].core.meta.master_node_id = sessions[1].core.meta.self_node_id.clone(); - sessions[1].data.lock().consensus_session.consensus_job_mut().executor_mut().set_requester( - sessions[0].data.lock().consensus_session.consensus_job().executor().requester().unwrap().clone() - ); - - // now let's try to do a decryption - sessions[1].delegate(sessions[0].core.meta.self_node_id.clone(), Default::default(), Default::default(), false, false).unwrap(); - do_messages_exchange(&clusters, &sessions).unwrap(); - - // now check that: - // 1) 4 of 5 sessions are in Finished state - assert_eq!(sessions.iter().filter(|s| s.state() == ConsensusSessionState::Finished).count(), 4); - // 2) 1 session has decrypted key value - assert_eq!(sessions[1].decrypted_secret().unwrap().unwrap(), EncryptedDocumentKeyShadow { - decrypted_secret: SECRET_PLAIN.into(), - common_point: None, - decrypt_shadows: None, - }); - } - - #[test] - fn decryption_works_when_share_owners_are_isolated() { - let (_, clusters, _, sessions) = prepare_decryption_sessions(); - - // we need 4 out of 5 nodes to agree to do a decryption - // let's say that 1 of these nodes (master) is isolated - let isolated_node_id = sessions[4].core.meta.self_node_id.clone(); - for cluster in &clusters { - cluster.remove_node(&isolated_node_id); - } - - // now let's try to do a decryption - sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap(); - do_messages_exchange(&clusters, &sessions).unwrap(); - - assert_eq!(sessions[0].decrypted_secret().unwrap().unwrap(), EncryptedDocumentKeyShadow { - decrypted_secret: SECRET_PLAIN.into(), - common_point: None, - decrypt_shadows: None, - }); - } - - #[test] - fn decryption_result_restored_on_all_nodes_if_broadcast_session_is_completed() { - let (_, clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0].initialize(Default::default(), Default::default(), false, true).unwrap(); - do_messages_exchange(&clusters, &sessions).unwrap(); - - // decryption result must be the same and available on 4 nodes - let result = sessions[0].decrypted_secret(); - assert!(result.clone().unwrap().is_ok()); - assert_eq!(result.clone().unwrap().unwrap(), EncryptedDocumentKeyShadow { - decrypted_secret: SECRET_PLAIN.into(), - common_point: None, - decrypt_shadows: None, - }); - assert_eq!(3, sessions.iter().skip(1).filter(|s| s.decrypted_secret() == result).count()); - assert_eq!(1, sessions.iter().skip(1).filter(|s| s.decrypted_secret().is_none()).count()); - } - - #[test] - fn decryption_shadows_restored_on_all_nodes_if_shadow_broadcast_session_is_completed() { - let (key_pair, clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0].initialize(Default::default(), Default::default(), true, true).unwrap(); - do_messages_exchange(&clusters, &sessions).unwrap(); - - // decryption shadows must be the same and available on 4 nodes - let broadcast_shadows = sessions[0].broadcast_shadows(); - assert!(broadcast_shadows.is_some()); - assert_eq!(3, sessions.iter().skip(1).filter(|s| s.broadcast_shadows() == broadcast_shadows).count()); - assert_eq!(1, sessions.iter().skip(1).filter(|s| s.broadcast_shadows().is_none()).count()); - - // 4 nodes must be able to recover original secret - use crypto::DEFAULT_MAC; - use ethkey::crypto::ecies::decrypt; - let result = sessions[0].decrypted_secret().unwrap().unwrap(); - assert_eq!(3, sessions.iter().skip(1).filter(|s| s.decrypted_secret() == Some(Ok(result.clone()))).count()); - let decrypt_shadows: Vec<_> = result.decrypt_shadows.unwrap().into_iter() - .map(|c| Secret::from_slice(&decrypt(key_pair.secret(), &DEFAULT_MAC, &c).unwrap()).unwrap()) - .collect(); - let decrypted_secret = math::decrypt_with_shadow_coefficients(result.decrypted_secret, result.common_point.unwrap(), decrypt_shadows).unwrap(); - assert_eq!(decrypted_secret, SECRET_PLAIN.into()); - } - - #[test] - fn decryption_session_origin_is_known_to_all_initialized_nodes() { - let (_, clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0].initialize(Some(1.into()), Default::default(), true, true).unwrap(); - do_messages_exchange(&clusters, &sessions).unwrap(); - - // all session must have origin set - assert_eq!(5, sessions.iter().filter(|s| s.origin() == Some(1.into())).count()); - } + let common_point: Public = "6962be696e1bcbba8e64cc7fddf140f854835354b5804f3bb95ae5a2799130371b589a131bd39699ac7174ccb35fc4342dab05331202209582fc8f3a40916ab0".into(); + let encrypted_point: Public = "b07031982bde9890e12eff154765f03c56c3ab646ad47431db5dd2d742a9297679c4c65b998557f8008469afd0c43d40b6c5f6c6a1c7354875da4115237ed87a".into(); + let encrypted_datas: Vec<_> = (0..5) + .map(|i| DocumentKeyShare { + author: Default::default(), + threshold: 3, + public: Default::default(), + common_point: Some(common_point.clone()), + encrypted_point: Some(encrypted_point.clone()), + versions: vec![DocumentKeyShareVersion { + hash: Default::default(), + id_numbers: id_numbers.clone().into_iter().collect(), + secret_share: secret_shares[i].clone(), + }], + }) + .collect(); + let acl_storages: Vec<_> = (0..5) + .map(|_| Arc::new(DummyAclStorage::default())) + .collect(); + let clusters: Vec<_> = (0..5) + .map(|i| { + let cluster = Arc::new(DummyCluster::new( + id_numbers.iter().nth(i).clone().unwrap().0, + )); + for id_number in &id_numbers { + cluster.add_node(id_number.0.clone()); + } + cluster + }) + .collect(); + let requester = Random.generate().unwrap(); + let signature = Some(ethkey::sign(requester.secret(), &SessionId::default()).unwrap()); + let sessions: Vec<_> = (0..5) + .map(|i| { + SessionImpl::new( + SessionParams { + meta: SessionMeta { + id: session_id.clone(), + self_node_id: id_numbers.iter().nth(i).clone().unwrap().0, + master_node_id: id_numbers.iter().nth(0).clone().unwrap().0, + threshold: encrypted_datas[i].threshold, + configured_nodes_count: 5, + connected_nodes_count: 5, + }, + access_key: access_key.clone(), + key_share: Some(encrypted_datas[i].clone()), + acl_storage: acl_storages[i].clone(), + cluster: clusters[i].clone(), + nonce: 0, + }, + if i == 0 { + signature.clone().map(Into::into) + } else { + None + }, + ) + .unwrap() + }) + .collect(); + + (requester, clusters, acl_storages, sessions) + } + + fn do_messages_exchange( + clusters: &[Arc], + sessions: &[SessionImpl], + ) -> Result<(), Error> { + do_messages_exchange_until(clusters, sessions, |_, _, _| false) + } + + fn do_messages_exchange_until( + clusters: &[Arc], + sessions: &[SessionImpl], + mut cond: F, + ) -> Result<(), Error> + where + F: FnMut(&NodeId, &NodeId, &Message) -> bool, + { + let mut queue: VecDeque<(NodeId, NodeId, Message)> = VecDeque::new(); + while let Some((mut from, mut to, mut message)) = clusters + .iter() + .filter_map(|c| c.take_message().map(|(to, msg)| (c.node(), to, msg))) + .next() + { + if cond(&from, &to, &message) { + break; + } + + let mut is_queued_message = false; + loop { + let session = &sessions[sessions.iter().position(|s| s.node() == &to).unwrap()]; + match session.on_message(&from, &message) { + Ok(_) => { + if let Some(qmessage) = queue.pop_front() { + from = qmessage.0; + to = qmessage.1; + message = qmessage.2; + is_queued_message = true; + continue; + } + break; + } + Err(Error::TooEarlyForRequest) => { + if is_queued_message { + queue.push_front((from, to, message)); + } else { + queue.push_back((from, to, message)); + } + break; + } + Err(err) => return Err(err), + } + } + } + + Ok(()) + } + + #[test] + fn constructs_in_cluster_of_single_node() { + let mut nodes = BTreeMap::new(); + let self_node_id = Random.generate().unwrap().public().clone(); + nodes.insert(self_node_id, Random.generate().unwrap().secret().clone()); + match SessionImpl::new( + SessionParams { + meta: SessionMeta { + id: SessionId::default(), + self_node_id: self_node_id.clone(), + master_node_id: self_node_id.clone(), + threshold: 0, + configured_nodes_count: 1, + connected_nodes_count: 1, + }, + access_key: Random.generate().unwrap().secret().clone(), + key_share: Some(DocumentKeyShare { + author: Default::default(), + threshold: 0, + public: Default::default(), + common_point: Some(Random.generate().unwrap().public().clone()), + encrypted_point: Some(Random.generate().unwrap().public().clone()), + versions: vec![DocumentKeyShareVersion { + hash: Default::default(), + id_numbers: nodes, + secret_share: Random.generate().unwrap().secret().clone(), + }], + }), + acl_storage: Arc::new(DummyAclStorage::default()), + cluster: Arc::new(DummyCluster::new(self_node_id.clone())), + nonce: 0, + }, + Some(Requester::Signature( + ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), + )), + ) { + Ok(_) => (), + _ => panic!("unexpected"), + } + } + + #[test] + fn fails_to_initialize_if_does_not_have_a_share() { + let self_node_id = Random.generate().unwrap().public().clone(); + let session = SessionImpl::new( + SessionParams { + meta: SessionMeta { + id: SessionId::default(), + self_node_id: self_node_id.clone(), + master_node_id: self_node_id.clone(), + threshold: 0, + configured_nodes_count: 1, + connected_nodes_count: 1, + }, + access_key: Random.generate().unwrap().secret().clone(), + key_share: None, + acl_storage: Arc::new(DummyAclStorage::default()), + cluster: Arc::new(DummyCluster::new(self_node_id.clone())), + nonce: 0, + }, + Some(Requester::Signature( + ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), + )), + ) + .unwrap(); + assert_eq!( + session.initialize(Default::default(), Default::default(), false, false), + Err(Error::InvalidMessage) + ); + } + + #[test] + fn fails_to_initialize_if_threshold_is_wrong() { + let mut nodes = BTreeMap::new(); + let self_node_id = Random.generate().unwrap().public().clone(); + nodes.insert( + self_node_id.clone(), + Random.generate().unwrap().secret().clone(), + ); + nodes.insert( + Random.generate().unwrap().public().clone(), + Random.generate().unwrap().secret().clone(), + ); + let session = SessionImpl::new( + SessionParams { + meta: SessionMeta { + id: SessionId::default(), + self_node_id: self_node_id.clone(), + master_node_id: self_node_id.clone(), + threshold: 2, + configured_nodes_count: 1, + connected_nodes_count: 1, + }, + access_key: Random.generate().unwrap().secret().clone(), + key_share: Some(DocumentKeyShare { + author: Default::default(), + threshold: 2, + public: Default::default(), + common_point: Some(Random.generate().unwrap().public().clone()), + encrypted_point: Some(Random.generate().unwrap().public().clone()), + versions: vec![DocumentKeyShareVersion { + hash: Default::default(), + id_numbers: nodes, + secret_share: Random.generate().unwrap().secret().clone(), + }], + }), + acl_storage: Arc::new(DummyAclStorage::default()), + cluster: Arc::new(DummyCluster::new(self_node_id.clone())), + nonce: 0, + }, + Some(Requester::Signature( + ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), + )), + ) + .unwrap(); + assert_eq!( + session.initialize(Default::default(), Default::default(), false, false), + Err(Error::ConsensusUnreachable) + ); + } + + #[test] + fn fails_to_initialize_when_already_initialized() { + let (_, _, _, sessions) = prepare_decryption_sessions(); + assert_eq!( + sessions[0] + .initialize(Default::default(), Default::default(), false, false) + .unwrap(), + () + ); + assert_eq!( + sessions[0] + .initialize(Default::default(), Default::default(), false, false) + .unwrap_err(), + Error::InvalidStateForRequest + ); + } + + #[test] + fn fails_to_accept_initialization_when_already_initialized() { + let (_, _, _, sessions) = prepare_decryption_sessions(); + assert_eq!( + sessions[0] + .initialize(Default::default(), Default::default(), false, false) + .unwrap(), + () + ); + assert_eq!( + sessions[0] + .on_consensus_message( + sessions[1].node(), + &message::DecryptionConsensusMessage { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + session_nonce: 0, + origin: None, + message: message::ConsensusMessage::InitializeConsensusSession( + message::InitializeConsensusSession { + requester: Requester::Signature( + ethkey::sign( + Random.generate().unwrap().secret(), + &SessionId::default() + ) + .unwrap() + ) + .into(), + version: Default::default(), + } + ), + } + ) + .unwrap_err(), + Error::InvalidMessage + ); + } + + #[test] + fn fails_to_partial_decrypt_if_requested_by_slave() { + let (_, _, _, sessions) = prepare_decryption_sessions(); + assert_eq!( + sessions[1] + .on_consensus_message( + sessions[0].node(), + &message::DecryptionConsensusMessage { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + session_nonce: 0, + origin: None, + message: message::ConsensusMessage::InitializeConsensusSession( + message::InitializeConsensusSession { + requester: Requester::Signature( + ethkey::sign( + Random.generate().unwrap().secret(), + &SessionId::default() + ) + .unwrap() + ) + .into(), + version: Default::default(), + } + ), + } + ) + .unwrap(), + () + ); + assert_eq!( + sessions[1] + .on_partial_decryption_requested( + sessions[2].node(), + &message::RequestPartialDecryption { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + session_nonce: 0, + request_id: Random.generate().unwrap().secret().clone().into(), + is_shadow_decryption: false, + is_broadcast_session: false, + nodes: sessions + .iter() + .map(|s| s.node().clone().into()) + .take(4) + .collect(), + } + ) + .unwrap_err(), + Error::InvalidMessage + ); + } + + #[test] + fn fails_to_partial_decrypt_if_wrong_number_of_nodes_participating() { + let (_, _, _, sessions) = prepare_decryption_sessions(); + assert_eq!( + sessions[1] + .on_consensus_message( + sessions[0].node(), + &message::DecryptionConsensusMessage { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + session_nonce: 0, + origin: None, + message: message::ConsensusMessage::InitializeConsensusSession( + message::InitializeConsensusSession { + requester: Requester::Signature( + ethkey::sign( + Random.generate().unwrap().secret(), + &SessionId::default() + ) + .unwrap() + ) + .into(), + version: Default::default(), + } + ), + } + ) + .unwrap(), + () + ); + assert_eq!( + sessions[1] + .on_partial_decryption_requested( + sessions[0].node(), + &message::RequestPartialDecryption { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + session_nonce: 0, + request_id: Random.generate().unwrap().secret().clone().into(), + is_shadow_decryption: false, + is_broadcast_session: false, + nodes: sessions + .iter() + .map(|s| s.node().clone().into()) + .take(2) + .collect(), + } + ) + .unwrap_err(), + Error::InvalidMessage + ); + } + + #[test] + fn fails_to_accept_partial_decrypt_if_not_waiting() { + let (_, _, _, sessions) = prepare_decryption_sessions(); + assert_eq!( + sessions[0] + .on_partial_decryption( + sessions[1].node(), + &message::PartialDecryption { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + session_nonce: 0, + request_id: Random.generate().unwrap().secret().clone().into(), + shadow_point: Random.generate().unwrap().public().clone().into(), + decrypt_shadow: None, + } + ) + .unwrap_err(), + Error::InvalidStateForRequest + ); + } + + #[test] + fn fails_to_accept_partial_decrypt_twice() { + let (_, clusters, _, sessions) = prepare_decryption_sessions(); + sessions[0] + .initialize(Default::default(), Default::default(), false, false) + .unwrap(); + + let mut pd_from = None; + let mut pd_msg = None; + do_messages_exchange_until(&clusters, &sessions, |from, _, msg| match msg { + &Message::Decryption(DecryptionMessage::PartialDecryption(ref msg)) => { + pd_from = Some(from.clone()); + pd_msg = Some(msg.clone()); + true + } + _ => false, + }) + .unwrap(); + + assert_eq!( + sessions[0] + .on_partial_decryption(pd_from.as_ref().unwrap(), &pd_msg.clone().unwrap()) + .unwrap(), + () + ); + assert_eq!( + sessions[0] + .on_partial_decryption(pd_from.as_ref().unwrap(), &pd_msg.unwrap()) + .unwrap_err(), + Error::InvalidNodeForRequest + ); + } + + #[test] + fn decryption_fails_on_session_timeout() { + let (_, _, _, sessions) = prepare_decryption_sessions(); + assert!(sessions[0].decrypted_secret().is_none()); + sessions[0].on_session_timeout(); + assert_eq!( + sessions[0].decrypted_secret().unwrap().unwrap_err(), + Error::ConsensusTemporaryUnreachable + ); + } + + #[test] + fn node_is_marked_rejected_when_timed_out_during_initialization_confirmation() { + let (_, _, _, sessions) = prepare_decryption_sessions(); + sessions[0] + .initialize(Default::default(), Default::default(), false, false) + .unwrap(); + + // 1 node disconnects => we still can recover secret + sessions[0].on_node_timeout(sessions[1].node()); + assert!(sessions[0] + .data + .lock() + .consensus_session + .consensus_job() + .rejects() + .contains_key(sessions[1].node())); + assert!(sessions[0].state() == ConsensusSessionState::EstablishingConsensus); + + // 2 node are disconnected => we can not recover secret + sessions[0].on_node_timeout(sessions[2].node()); + assert!(sessions[0].state() == ConsensusSessionState::Failed); + } + + #[test] + fn session_does_not_fail_if_rejected_node_disconnects() { + let (_, clusters, acl_storages, sessions) = prepare_decryption_sessions(); + let key_pair = Random.generate().unwrap(); + + acl_storages[1].prohibit(public_to_address(key_pair.public()), SessionId::default()); + sessions[0] + .initialize(Default::default(), Default::default(), false, false) + .unwrap(); + + do_messages_exchange_until(&clusters, &sessions, |_, _, _| { + sessions[0].state() == ConsensusSessionState::WaitingForPartialResults + }) + .unwrap(); + + // 1st node disconnects => ignore this + sessions[0].on_node_timeout(sessions[1].node()); + assert_eq!( + sessions[0].state(), + ConsensusSessionState::EstablishingConsensus + ); + } + + #[test] + fn session_does_not_fail_if_requested_node_disconnects() { + let (_, clusters, _, sessions) = prepare_decryption_sessions(); + sessions[0] + .initialize(Default::default(), Default::default(), false, false) + .unwrap(); + + do_messages_exchange_until(&clusters, &sessions, |_, _, _| { + sessions[0].state() == ConsensusSessionState::WaitingForPartialResults + }) + .unwrap(); + + // 1 node disconnects => we still can recover secret + sessions[0].on_node_timeout(sessions[1].node()); + assert!(sessions[0].state() == ConsensusSessionState::EstablishingConsensus); + + // 2 node are disconnected => we can not recover secret + sessions[0].on_node_timeout(sessions[2].node()); + assert!(sessions[0].state() == ConsensusSessionState::Failed); + } + + #[test] + fn session_does_not_fail_if_node_with_shadow_point_disconnects() { + let (_, clusters, _, sessions) = prepare_decryption_sessions(); + sessions[0] + .initialize(Default::default(), Default::default(), false, false) + .unwrap(); + + do_messages_exchange_until(&clusters, &sessions, |_, _, _| { + sessions[0].state() == ConsensusSessionState::WaitingForPartialResults + && sessions[0] + .data + .lock() + .consensus_session + .computation_job() + .responses() + .len() + == 2 + }) + .unwrap(); + + // disconnects from the node which has already sent us its own shadow point + let disconnected = sessions[0] + .data + .lock() + .consensus_session + .computation_job() + .responses() + .keys() + .filter(|n| *n != sessions[0].node()) + .cloned() + .nth(0) + .unwrap(); + sessions[0].on_node_timeout(&disconnected); + assert_eq!( + sessions[0].state(), + ConsensusSessionState::EstablishingConsensus + ); + } + + #[test] + fn session_restarts_if_confirmed_node_disconnects() { + let (_, clusters, _, sessions) = prepare_decryption_sessions(); + sessions[0] + .initialize(Default::default(), Default::default(), false, false) + .unwrap(); + + do_messages_exchange_until(&clusters, &sessions, |_, _, _| { + sessions[0].state() == ConsensusSessionState::WaitingForPartialResults + }) + .unwrap(); + + // disconnects from the node which has already confirmed its participation + let disconnected = sessions[0] + .data + .lock() + .consensus_session + .computation_job() + .requests() + .iter() + .cloned() + .nth(0) + .unwrap(); + sessions[0].on_node_timeout(&disconnected); + assert_eq!( + sessions[0].state(), + ConsensusSessionState::EstablishingConsensus + ); + assert!(sessions[0] + .data + .lock() + .consensus_session + .computation_job() + .rejects() + .contains_key(&disconnected)); + assert!(!sessions[0] + .data + .lock() + .consensus_session + .computation_job() + .requests() + .contains(&disconnected)); + } + + #[test] + fn session_does_not_fail_if_non_master_node_disconnects_from_non_master_node() { + let (_, clusters, _, sessions) = prepare_decryption_sessions(); + sessions[0] + .initialize(Default::default(), Default::default(), false, false) + .unwrap(); + + do_messages_exchange_until(&clusters, &sessions, |_, _, _| { + sessions[0].state() == ConsensusSessionState::WaitingForPartialResults + }) + .unwrap(); + + // disconnects from the node which has already confirmed its participation + sessions[1].on_node_timeout(sessions[2].node()); + assert!(sessions[0].state() == ConsensusSessionState::WaitingForPartialResults); + assert!(sessions[1].state() == ConsensusSessionState::ConsensusEstablished); + } + + #[test] + fn complete_dec_session() { + let (_, clusters, _, sessions) = prepare_decryption_sessions(); + + // now let's try to do a decryption + sessions[0] + .initialize(Default::default(), Default::default(), false, false) + .unwrap(); + + do_messages_exchange(&clusters, &sessions).unwrap(); + + // now check that: + // 1) 5 of 5 sessions are in Finished state + assert_eq!( + sessions + .iter() + .filter(|s| s.state() == ConsensusSessionState::Finished) + .count(), + 5 + ); + // 2) 1 session has decrypted key value + assert!(sessions + .iter() + .skip(1) + .all(|s| s.decrypted_secret().is_none())); + + assert_eq!( + sessions[0].decrypted_secret().unwrap().unwrap(), + EncryptedDocumentKeyShadow { + decrypted_secret: SECRET_PLAIN.into(), + common_point: None, + decrypt_shadows: None, + } + ); + } + + #[test] + fn complete_shadow_dec_session() { + let (key_pair, clusters, _, sessions) = prepare_decryption_sessions(); + + // now let's try to do a decryption + sessions[0] + .initialize(Default::default(), Default::default(), true, false) + .unwrap(); + + do_messages_exchange(&clusters, &sessions).unwrap(); + + // now check that: + // 1) 5 of 5 sessions are in Finished state + assert_eq!( + sessions + .iter() + .filter(|s| s.state() == ConsensusSessionState::Finished) + .count(), + 5 + ); + // 2) 1 session has decrypted key value + assert!(sessions + .iter() + .skip(1) + .all(|s| s.decrypted_secret().is_none())); + + let decrypted_secret = sessions[0].decrypted_secret().unwrap().unwrap(); + // check that decrypted_secret != SECRET_PLAIN + assert!(decrypted_secret.decrypted_secret != SECRET_PLAIN.into()); + // check that common point && shadow coefficients are returned + assert!(decrypted_secret.common_point.is_some()); + assert!(decrypted_secret.decrypt_shadows.is_some()); + // check that KS client is able to restore original secret + use crypto::DEFAULT_MAC; + use ethkey::crypto::ecies::decrypt; + let decrypt_shadows: Vec<_> = decrypted_secret + .decrypt_shadows + .unwrap() + .into_iter() + .map(|c| { + Secret::from_slice(&decrypt(key_pair.secret(), &DEFAULT_MAC, &c).unwrap()).unwrap() + }) + .collect(); + let decrypted_secret = math::decrypt_with_shadow_coefficients( + decrypted_secret.decrypted_secret, + decrypted_secret.common_point.unwrap(), + decrypt_shadows, + ) + .unwrap(); + assert_eq!(decrypted_secret, SECRET_PLAIN.into()); + } + + #[test] + fn failed_dec_session() { + let (key_pair, clusters, acl_storages, sessions) = prepare_decryption_sessions(); + + // now let's try to do a decryption + sessions[0] + .initialize(Default::default(), Default::default(), false, false) + .unwrap(); + + // we need 4 out of 5 nodes to agree to do a decryption + // let's say that 2 of these nodes are disagree + acl_storages[1].prohibit(public_to_address(key_pair.public()), SessionId::default()); + acl_storages[2].prohibit(public_to_address(key_pair.public()), SessionId::default()); + + assert_eq!( + do_messages_exchange(&clusters, &sessions).unwrap_err(), + Error::ConsensusUnreachable + ); + + // check that 3 nodes have failed state + assert_eq!(sessions[0].state(), ConsensusSessionState::Failed); + assert_eq!( + sessions + .iter() + .filter(|s| s.state() == ConsensusSessionState::Failed) + .count(), + 3 + ); + } + + #[test] + fn complete_dec_session_with_acl_check_failed_on_master() { + let (key_pair, clusters, acl_storages, sessions) = prepare_decryption_sessions(); + + // we need 4 out of 5 nodes to agree to do a decryption + // let's say that 1 of these nodes (master) is disagree + acl_storages[0].prohibit(public_to_address(key_pair.public()), SessionId::default()); + + // now let's try to do a decryption + sessions[0] + .initialize(Default::default(), Default::default(), false, false) + .unwrap(); + + do_messages_exchange(&clusters, &sessions).unwrap(); + + // now check that: + // 1) 4 of 5 sessions are in Finished state + assert_eq!( + sessions + .iter() + .filter(|s| s.state() == ConsensusSessionState::Finished) + .count(), + 5 + ); + // 2) 1 session has decrypted key value + assert!(sessions + .iter() + .skip(1) + .all(|s| s.decrypted_secret().is_none())); + assert_eq!( + sessions[0].decrypted_secret().unwrap().unwrap(), + EncryptedDocumentKeyShadow { + decrypted_secret: SECRET_PLAIN.into(), + common_point: None, + decrypt_shadows: None, + } + ); + } + + #[test] + fn decryption_message_fails_when_nonce_is_wrong() { + let (_, _, _, sessions) = prepare_decryption_sessions(); + assert_eq!( + sessions[1].process_message( + sessions[0].node(), + &message::DecryptionMessage::DecryptionSessionCompleted( + message::DecryptionSessionCompleted { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + session_nonce: 10, + } + ) + ), + Err(Error::ReplayProtection) + ); + } + + #[test] + fn decryption_works_when_delegated_to_other_node() { + let (_, clusters, _, mut sessions) = prepare_decryption_sessions(); + + // let's say node1 doesn't have a share && delegates decryption request to node0 + // initially session is created on node1 => node1 is master for itself, but for other nodes node0 is still master + sessions[1].core.meta.master_node_id = sessions[1].core.meta.self_node_id.clone(); + sessions[1] + .data + .lock() + .consensus_session + .consensus_job_mut() + .executor_mut() + .set_requester( + sessions[0] + .data + .lock() + .consensus_session + .consensus_job() + .executor() + .requester() + .unwrap() + .clone(), + ); + + // now let's try to do a decryption + sessions[1] + .delegate( + sessions[0].core.meta.self_node_id.clone(), + Default::default(), + Default::default(), + false, + false, + ) + .unwrap(); + do_messages_exchange(&clusters, &sessions).unwrap(); + + // now check that: + // 1) 4 of 5 sessions are in Finished state + assert_eq!( + sessions + .iter() + .filter(|s| s.state() == ConsensusSessionState::Finished) + .count(), + 4 + ); + // 2) 1 session has decrypted key value + assert_eq!( + sessions[1].decrypted_secret().unwrap().unwrap(), + EncryptedDocumentKeyShadow { + decrypted_secret: SECRET_PLAIN.into(), + common_point: None, + decrypt_shadows: None, + } + ); + } + + #[test] + fn decryption_works_when_share_owners_are_isolated() { + let (_, clusters, _, sessions) = prepare_decryption_sessions(); + + // we need 4 out of 5 nodes to agree to do a decryption + // let's say that 1 of these nodes (master) is isolated + let isolated_node_id = sessions[4].core.meta.self_node_id.clone(); + for cluster in &clusters { + cluster.remove_node(&isolated_node_id); + } + + // now let's try to do a decryption + sessions[0] + .initialize(Default::default(), Default::default(), false, false) + .unwrap(); + do_messages_exchange(&clusters, &sessions).unwrap(); + + assert_eq!( + sessions[0].decrypted_secret().unwrap().unwrap(), + EncryptedDocumentKeyShadow { + decrypted_secret: SECRET_PLAIN.into(), + common_point: None, + decrypt_shadows: None, + } + ); + } + + #[test] + fn decryption_result_restored_on_all_nodes_if_broadcast_session_is_completed() { + let (_, clusters, _, sessions) = prepare_decryption_sessions(); + sessions[0] + .initialize(Default::default(), Default::default(), false, true) + .unwrap(); + do_messages_exchange(&clusters, &sessions).unwrap(); + + // decryption result must be the same and available on 4 nodes + let result = sessions[0].decrypted_secret(); + assert!(result.clone().unwrap().is_ok()); + assert_eq!( + result.clone().unwrap().unwrap(), + EncryptedDocumentKeyShadow { + decrypted_secret: SECRET_PLAIN.into(), + common_point: None, + decrypt_shadows: None, + } + ); + assert_eq!( + 3, + sessions + .iter() + .skip(1) + .filter(|s| s.decrypted_secret() == result) + .count() + ); + assert_eq!( + 1, + sessions + .iter() + .skip(1) + .filter(|s| s.decrypted_secret().is_none()) + .count() + ); + } + + #[test] + fn decryption_shadows_restored_on_all_nodes_if_shadow_broadcast_session_is_completed() { + let (key_pair, clusters, _, sessions) = prepare_decryption_sessions(); + sessions[0] + .initialize(Default::default(), Default::default(), true, true) + .unwrap(); + do_messages_exchange(&clusters, &sessions).unwrap(); + + // decryption shadows must be the same and available on 4 nodes + let broadcast_shadows = sessions[0].broadcast_shadows(); + assert!(broadcast_shadows.is_some()); + assert_eq!( + 3, + sessions + .iter() + .skip(1) + .filter(|s| s.broadcast_shadows() == broadcast_shadows) + .count() + ); + assert_eq!( + 1, + sessions + .iter() + .skip(1) + .filter(|s| s.broadcast_shadows().is_none()) + .count() + ); + + // 4 nodes must be able to recover original secret + use crypto::DEFAULT_MAC; + use ethkey::crypto::ecies::decrypt; + let result = sessions[0].decrypted_secret().unwrap().unwrap(); + assert_eq!( + 3, + sessions + .iter() + .skip(1) + .filter(|s| s.decrypted_secret() == Some(Ok(result.clone()))) + .count() + ); + let decrypt_shadows: Vec<_> = result + .decrypt_shadows + .unwrap() + .into_iter() + .map(|c| { + Secret::from_slice(&decrypt(key_pair.secret(), &DEFAULT_MAC, &c).unwrap()).unwrap() + }) + .collect(); + let decrypted_secret = math::decrypt_with_shadow_coefficients( + result.decrypted_secret, + result.common_point.unwrap(), + decrypt_shadows, + ) + .unwrap(); + assert_eq!(decrypted_secret, SECRET_PLAIN.into()); + } + + #[test] + fn decryption_session_origin_is_known_to_all_initialized_nodes() { + let (_, clusters, _, sessions) = prepare_decryption_sessions(); + sessions[0] + .initialize(Some(1.into()), Default::default(), true, true) + .unwrap(); + do_messages_exchange(&clusters, &sessions).unwrap(); + + // all session must have origin set + assert_eq!( + 5, + sessions + .iter() + .filter(|s| s.origin() == Some(1.into())) + .count() + ); + } } diff --git a/secret-store/src/key_server_cluster/client_sessions/encryption_session.rs b/secret-store/src/key_server_cluster/client_sessions/encryption_session.rs index a3eabc35c..54977b7d3 100644 --- a/secret-store/src/key_server_cluster/client_sessions/encryption_session.rs +++ b/secret-store/src/key_server_cluster/client_sessions/encryption_session.rs @@ -14,19 +14,24 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::BTreeMap; -use std::fmt::{Debug, Formatter, Error as FmtError}; -use std::time; -use std::sync::Arc; -use parking_lot::{Condvar, Mutex}; use ethereum_types::Address; use ethkey::Public; -use key_server_cluster::{Error, NodeId, SessionId, Requester, KeyStorage, - DocumentKeyShare, ServerKeyId}; -use key_server_cluster::cluster::Cluster; -use key_server_cluster::cluster_sessions::ClusterSession; -use key_server_cluster::message::{Message, EncryptionMessage, InitializeEncryptionSession, - ConfirmEncryptionInitialization, EncryptionSessionError}; +use key_server_cluster::{ + cluster::Cluster, + cluster_sessions::ClusterSession, + message::{ + ConfirmEncryptionInitialization, EncryptionMessage, EncryptionSessionError, + InitializeEncryptionSession, Message, + }, + DocumentKeyShare, Error, KeyStorage, NodeId, Requester, ServerKeyId, SessionId, +}; +use parking_lot::{Condvar, Mutex}; +use std::{ + collections::BTreeMap, + fmt::{Debug, Error as FmtError, Formatter}, + sync::Arc, + time, +}; /// Encryption (distributed key generation) session. /// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper: @@ -37,311 +42,374 @@ use key_server_cluster::message::{Message, EncryptionMessage, InitializeEncrypti /// 3) common_point + encrypted_point are saved on all nodes /// 4) in case of error, previous values are restored pub struct SessionImpl { - /// Unique session id. - id: SessionId, - /// Public identifier of this node. - self_node_id: NodeId, - /// Encrypted data. - encrypted_data: Option, - /// Key storage. - key_storage: Arc, - /// Cluster which allows this node to send messages to other nodes in the cluster. - cluster: Arc, - /// Session nonce. - nonce: u64, - /// SessionImpl completion condvar. - completed: Condvar, - /// Mutable session data. - data: Mutex, + /// Unique session id. + id: SessionId, + /// Public identifier of this node. + self_node_id: NodeId, + /// Encrypted data. + encrypted_data: Option, + /// Key storage. + key_storage: Arc, + /// Cluster which allows this node to send messages to other nodes in the cluster. + cluster: Arc, + /// Session nonce. + nonce: u64, + /// SessionImpl completion condvar. + completed: Condvar, + /// Mutable session data. + data: Mutex, } /// SessionImpl creation parameters pub struct SessionParams { - /// SessionImpl identifier. - pub id: SessionId, - /// Id of node, on which this session is running. - pub self_node_id: Public, - /// Encrypted data (result of running generation_session::SessionImpl). - pub encrypted_data: Option, - /// Key storage. - pub key_storage: Arc, - /// Cluster - pub cluster: Arc, - /// Session nonce. - pub nonce: u64, + /// SessionImpl identifier. + pub id: SessionId, + /// Id of node, on which this session is running. + pub self_node_id: Public, + /// Encrypted data (result of running generation_session::SessionImpl). + pub encrypted_data: Option, + /// Key storage. + pub key_storage: Arc, + /// Cluster + pub cluster: Arc, + /// Session nonce. + pub nonce: u64, } /// Mutable data of encryption (distributed key generation) session. #[derive(Debug)] struct SessionData { - /// Current state of the session. - state: SessionState, - /// Nodes-specific data. - nodes: BTreeMap, - /// Encryption session result. - result: Option>, + /// Current state of the session. + state: SessionState, + /// Nodes-specific data. + nodes: BTreeMap, + /// Encryption session result. + result: Option>, } /// Mutable node-specific data. #[derive(Debug, Clone)] struct NodeData { - // === Values, filled during initialization phase === - /// Flags marking that node has confirmed session initialization. - pub initialization_confirmed: bool, + // === Values, filled during initialization phase === + /// Flags marking that node has confirmed session initialization. + pub initialization_confirmed: bool, } /// Encryption (distributed key generation) session state. #[derive(Debug, Clone, PartialEq)] pub enum SessionState { - // === Initialization states === - /// Every node starts in this state. - WaitingForInitialization, - /// Master node waits for every other node to confirm initialization. - WaitingForInitializationConfirm, + // === Initialization states === + /// Every node starts in this state. + WaitingForInitialization, + /// Master node waits for every other node to confirm initialization. + WaitingForInitializationConfirm, - // === Final states of the session === - /// Encryption data is saved. - Finished, - /// Failed to save encryption data. - Failed, + // === Final states of the session === + /// Encryption data is saved. + Finished, + /// Failed to save encryption data. + Failed, } impl SessionImpl { - /// Create new encryption session. - pub fn new(params: SessionParams) -> Result { - check_encrypted_data(params.encrypted_data.as_ref())?; + /// Create new encryption session. + pub fn new(params: SessionParams) -> Result { + check_encrypted_data(params.encrypted_data.as_ref())?; - Ok(SessionImpl { - id: params.id, - self_node_id: params.self_node_id, - encrypted_data: params.encrypted_data, - key_storage: params.key_storage, - cluster: params.cluster, - nonce: params.nonce, - completed: Condvar::new(), - data: Mutex::new(SessionData { - state: SessionState::WaitingForInitialization, - nodes: BTreeMap::new(), - result: None, - }), - }) - } + Ok(SessionImpl { + id: params.id, + self_node_id: params.self_node_id, + encrypted_data: params.encrypted_data, + key_storage: params.key_storage, + cluster: params.cluster, + nonce: params.nonce, + completed: Condvar::new(), + data: Mutex::new(SessionData { + state: SessionState::WaitingForInitialization, + nodes: BTreeMap::new(), + result: None, + }), + }) + } - /// Get this node Id. - pub fn node(&self) -> &NodeId { - &self.self_node_id - } + /// Get this node Id. + pub fn node(&self) -> &NodeId { + &self.self_node_id + } - /// Wait for session completion. - pub fn wait(&self, timeout: Option) -> Result<(), Error> { - Self::wait_session(&self.completed, &self.data, timeout, |data| data.result.clone()) - .expect("wait_session returns Some if called without timeout; qed") - } + /// Wait for session completion. + pub fn wait(&self, timeout: Option) -> Result<(), Error> { + Self::wait_session(&self.completed, &self.data, timeout, |data| { + data.result.clone() + }) + .expect("wait_session returns Some if called without timeout; qed") + } - /// Start new session initialization. This must be called on master node. - pub fn initialize(&self, requester: Requester, common_point: Public, encrypted_point: Public) -> Result<(), Error> { - let mut data = self.data.lock(); + /// Start new session initialization. This must be called on master node. + pub fn initialize( + &self, + requester: Requester, + common_point: Public, + encrypted_point: Public, + ) -> Result<(), Error> { + let mut data = self.data.lock(); - // check state - if data.state != SessionState::WaitingForInitialization { - return Err(Error::InvalidStateForRequest); - } + // check state + if data.state != SessionState::WaitingForInitialization { + return Err(Error::InvalidStateForRequest); + } - // update state - data.state = SessionState::WaitingForInitializationConfirm; - data.nodes.extend(self.cluster.nodes().into_iter().map(|n| (n, NodeData { - initialization_confirmed: &n == self.node(), - }))); + // update state + data.state = SessionState::WaitingForInitializationConfirm; + data.nodes.extend(self.cluster.nodes().into_iter().map(|n| { + ( + n, + NodeData { + initialization_confirmed: &n == self.node(), + }, + ) + })); - // TODO [Sec]: id signature is not enough here, as it was already used in key generation - // TODO [Reliability]: there could be situation when some nodes have failed to store encrypted data - // => potential problems during restore. some confirmation step is needed (2pc)? - // save encryption data - if let Some(encrypted_data) = self.encrypted_data.clone() { - let requester_address = requester.address(&self.id).map_err(Error::InsufficientRequesterData)?; - update_encrypted_data(&self.key_storage, self.id.clone(), - encrypted_data, requester_address, common_point.clone(), encrypted_point.clone())?; - } + // TODO [Sec]: id signature is not enough here, as it was already used in key generation + // TODO [Reliability]: there could be situation when some nodes have failed to store encrypted data + // => potential problems during restore. some confirmation step is needed (2pc)? + // save encryption data + if let Some(encrypted_data) = self.encrypted_data.clone() { + let requester_address = requester + .address(&self.id) + .map_err(Error::InsufficientRequesterData)?; + update_encrypted_data( + &self.key_storage, + self.id.clone(), + encrypted_data, + requester_address, + common_point.clone(), + encrypted_point.clone(), + )?; + } - // start initialization - if data.nodes.len() > 1 { - self.cluster.broadcast(Message::Encryption(EncryptionMessage::InitializeEncryptionSession(InitializeEncryptionSession { - session: self.id.clone().into(), - session_nonce: self.nonce, - requester: requester.into(), - common_point: common_point.into(), - encrypted_point: encrypted_point.into(), - }))) - } else { - data.state = SessionState::Finished; - data.result = Some(Ok(())); - self.completed.notify_all(); + // start initialization + if data.nodes.len() > 1 { + self.cluster.broadcast(Message::Encryption( + EncryptionMessage::InitializeEncryptionSession(InitializeEncryptionSession { + session: self.id.clone().into(), + session_nonce: self.nonce, + requester: requester.into(), + common_point: common_point.into(), + encrypted_point: encrypted_point.into(), + }), + )) + } else { + data.state = SessionState::Finished; + data.result = Some(Ok(())); + self.completed.notify_all(); - Ok(()) - } - } + Ok(()) + } + } - /// When session initialization message is received. - pub fn on_initialize_session(&self, sender: NodeId, message: &InitializeEncryptionSession) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(&sender != self.node()); + /// When session initialization message is received. + pub fn on_initialize_session( + &self, + sender: NodeId, + message: &InitializeEncryptionSession, + ) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(&sender != self.node()); - let mut data = self.data.lock(); + let mut data = self.data.lock(); - // check state - if data.state != SessionState::WaitingForInitialization { - return Err(Error::InvalidStateForRequest); - } + // check state + if data.state != SessionState::WaitingForInitialization { + return Err(Error::InvalidStateForRequest); + } - // check that the requester is the author of the encrypted data - if let Some(encrypted_data) = self.encrypted_data.clone() { - let requester: Requester = message.requester.clone().into(); - let requester_address = requester.address(&self.id).map_err(Error::InsufficientRequesterData)?; - update_encrypted_data(&self.key_storage, self.id.clone(), - encrypted_data, requester_address, message.common_point.clone().into(), message.encrypted_point.clone().into())?; - } + // check that the requester is the author of the encrypted data + if let Some(encrypted_data) = self.encrypted_data.clone() { + let requester: Requester = message.requester.clone().into(); + let requester_address = requester + .address(&self.id) + .map_err(Error::InsufficientRequesterData)?; + update_encrypted_data( + &self.key_storage, + self.id.clone(), + encrypted_data, + requester_address, + message.common_point.clone().into(), + message.encrypted_point.clone().into(), + )?; + } - // update state - data.state = SessionState::Finished; + // update state + data.state = SessionState::Finished; - // send confirmation back to master node - self.cluster.send(&sender, Message::Encryption(EncryptionMessage::ConfirmEncryptionInitialization(ConfirmEncryptionInitialization { - session: self.id.clone().into(), - session_nonce: self.nonce, - }))) - } + // send confirmation back to master node + self.cluster.send( + &sender, + Message::Encryption(EncryptionMessage::ConfirmEncryptionInitialization( + ConfirmEncryptionInitialization { + session: self.id.clone().into(), + session_nonce: self.nonce, + }, + )), + ) + } - /// When session initialization confirmation message is reeived. - pub fn on_confirm_initialization(&self, sender: NodeId, message: &ConfirmEncryptionInitialization) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(&sender != self.node()); + /// When session initialization confirmation message is reeived. + pub fn on_confirm_initialization( + &self, + sender: NodeId, + message: &ConfirmEncryptionInitialization, + ) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(&sender != self.node()); - let mut data = self.data.lock(); - debug_assert!(data.nodes.contains_key(&sender)); + let mut data = self.data.lock(); + debug_assert!(data.nodes.contains_key(&sender)); - // check if all nodes have confirmed initialization - data.nodes.get_mut(&sender) - .expect("message is received from cluster; nodes contains all cluster nodes; qed") - .initialization_confirmed = true; - if !data.nodes.values().all(|n| n.initialization_confirmed) { - return Ok(()); - } + // check if all nodes have confirmed initialization + data.nodes + .get_mut(&sender) + .expect("message is received from cluster; nodes contains all cluster nodes; qed") + .initialization_confirmed = true; + if !data.nodes.values().all(|n| n.initialization_confirmed) { + return Ok(()); + } - // update state - data.state = SessionState::Finished; - data.result = Some(Ok(())); - self.completed.notify_all(); + // update state + data.state = SessionState::Finished; + data.result = Some(Ok(())); + self.completed.notify_all(); - Ok(()) - } + Ok(()) + } } impl ClusterSession for SessionImpl { - type Id = SessionId; + type Id = SessionId; - fn type_name() -> &'static str { - "encryption" - } + fn type_name() -> &'static str { + "encryption" + } - fn id(&self) -> SessionId { - self.id.clone() - } + fn id(&self) -> SessionId { + self.id.clone() + } - fn is_finished(&self) -> bool { - let data = self.data.lock(); - data.state == SessionState::Failed - || data.state == SessionState::Finished - } + fn is_finished(&self) -> bool { + let data = self.data.lock(); + data.state == SessionState::Failed || data.state == SessionState::Finished + } - fn on_node_timeout(&self, node: &NodeId) { - let mut data = self.data.lock(); + fn on_node_timeout(&self, node: &NodeId) { + let mut data = self.data.lock(); - warn!("{}: encryption session failed because {} connection has timeouted", self.node(), node); + warn!( + "{}: encryption session failed because {} connection has timeouted", + self.node(), + node + ); - data.state = SessionState::Failed; - data.result = Some(Err(Error::NodeDisconnected)); - self.completed.notify_all(); - } + data.state = SessionState::Failed; + data.result = Some(Err(Error::NodeDisconnected)); + self.completed.notify_all(); + } - fn on_session_timeout(&self) { - let mut data = self.data.lock(); + fn on_session_timeout(&self) { + let mut data = self.data.lock(); - warn!("{}: encryption session failed with timeout", self.node()); + warn!("{}: encryption session failed with timeout", self.node()); - data.state = SessionState::Failed; - data.result = Some(Err(Error::NodeDisconnected)); - self.completed.notify_all(); - } + data.state = SessionState::Failed; + data.result = Some(Err(Error::NodeDisconnected)); + self.completed.notify_all(); + } - fn on_session_error(&self, node: &NodeId, error: Error) { - // error in encryption session is considered fatal - // => broadcast error if error occured on this node - if *node == self.self_node_id { - // do not bother processing send error, as we already processing error - let _ = self.cluster.broadcast(Message::Encryption(EncryptionMessage::EncryptionSessionError(EncryptionSessionError { - session: self.id.clone().into(), - session_nonce: self.nonce, - error: error.clone().into(), - }))); - } + fn on_session_error(&self, node: &NodeId, error: Error) { + // error in encryption session is considered fatal + // => broadcast error if error occured on this node + if *node == self.self_node_id { + // do not bother processing send error, as we already processing error + let _ = self.cluster.broadcast(Message::Encryption( + EncryptionMessage::EncryptionSessionError(EncryptionSessionError { + session: self.id.clone().into(), + session_nonce: self.nonce, + error: error.clone().into(), + }), + )); + } - let mut data = self.data.lock(); + let mut data = self.data.lock(); - warn!("{}: encryption session failed with error: {} from {}", self.node(), error, node); + warn!( + "{}: encryption session failed with error: {} from {}", + self.node(), + error, + node + ); - data.state = SessionState::Failed; - data.result = Some(Err(error)); - self.completed.notify_all(); - } + data.state = SessionState::Failed; + data.result = Some(Err(error)); + self.completed.notify_all(); + } - fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { - if Some(self.nonce) != message.session_nonce() { - return Err(Error::ReplayProtection); - } + fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { + if Some(self.nonce) != message.session_nonce() { + return Err(Error::ReplayProtection); + } - match message { - &Message::Encryption(ref message) => match message { - &EncryptionMessage::InitializeEncryptionSession(ref message) => - self.on_initialize_session(sender.clone(), message), - &EncryptionMessage::ConfirmEncryptionInitialization(ref message) => - self.on_confirm_initialization(sender.clone(), message), - &EncryptionMessage::EncryptionSessionError(ref message) => { - self.on_session_error(sender, message.error.clone()); - Ok(()) - }, - }, - _ => unreachable!("cluster checks message to be correct before passing; qed"), - } - } + match message { + &Message::Encryption(ref message) => match message { + &EncryptionMessage::InitializeEncryptionSession(ref message) => { + self.on_initialize_session(sender.clone(), message) + } + &EncryptionMessage::ConfirmEncryptionInitialization(ref message) => { + self.on_confirm_initialization(sender.clone(), message) + } + &EncryptionMessage::EncryptionSessionError(ref message) => { + self.on_session_error(sender, message.error.clone()); + Ok(()) + } + }, + _ => unreachable!("cluster checks message to be correct before passing; qed"), + } + } } impl Debug for SessionImpl { - fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { - write!(f, "Encryption session {} on {}", self.id, self.self_node_id) - } + fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { + write!(f, "Encryption session {} on {}", self.id, self.self_node_id) + } } /// Check that common_point and encrypted point are not yet set in key share. pub fn check_encrypted_data(key_share: Option<&DocumentKeyShare>) -> Result<(), Error> { - if let Some(key_share) = key_share { - // check that common_point and encrypted_point are still not set yet - if key_share.common_point.is_some() || key_share.encrypted_point.is_some() { - return Err(Error::DocumentKeyAlreadyStored); - } - } + if let Some(key_share) = key_share { + // check that common_point and encrypted_point are still not set yet + if key_share.common_point.is_some() || key_share.encrypted_point.is_some() { + return Err(Error::DocumentKeyAlreadyStored); + } + } - Ok(()) + Ok(()) } /// Update key share with encrypted document key. -pub fn update_encrypted_data(key_storage: &Arc, key_id: ServerKeyId, mut key_share: DocumentKeyShare, author: Address, common_point: Public, encrypted_point: Public) -> Result<(), Error> { - // author must be the same - if key_share.author != author { - return Err(Error::AccessDenied); - } +pub fn update_encrypted_data( + key_storage: &Arc, + key_id: ServerKeyId, + mut key_share: DocumentKeyShare, + author: Address, + common_point: Public, + encrypted_point: Public, +) -> Result<(), Error> { + // author must be the same + if key_share.author != author { + return Err(Error::AccessDenied); + } - // save encryption data - key_share.common_point = Some(common_point); - key_share.encrypted_point = Some(encrypted_point); - key_storage.update(key_id, key_share) + // save encryption data + key_share.common_point = Some(common_point); + key_share.encrypted_point = Some(encrypted_point); + key_storage.update(key_id, key_share) } diff --git a/secret-store/src/key_server_cluster/client_sessions/generation_session.rs b/secret-store/src/key_server_cluster/client_sessions/generation_session.rs index 0fa805f57..04ce452d2 100644 --- a/secret-store/src/key_server_cluster/client_sessions/generation_session.rs +++ b/secret-store/src/key_server_cluster/client_sessions/generation_session.rs @@ -14,19 +14,25 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::{BTreeSet, BTreeMap, VecDeque}; -use std::fmt::{Debug, Formatter, Error as FmtError}; -use std::time::Duration; -use std::sync::Arc; -use parking_lot::{Condvar, Mutex}; use ethereum_types::Address; use ethkey::{Public, Secret}; -use key_server_cluster::{Error, NodeId, SessionId, KeyStorage, DocumentKeyShare, DocumentKeyShareVersion}; -use key_server_cluster::math; -use key_server_cluster::cluster::Cluster; -use key_server_cluster::cluster_sessions::ClusterSession; -use key_server_cluster::message::{Message, GenerationMessage, InitializeSession, ConfirmInitialization, CompleteInitialization, - KeysDissemination, PublicKeyShare, SessionError, SessionCompleted}; +use key_server_cluster::{ + cluster::Cluster, + cluster_sessions::ClusterSession, + math, + message::{ + CompleteInitialization, ConfirmInitialization, GenerationMessage, InitializeSession, + KeysDissemination, Message, PublicKeyShare, SessionCompleted, SessionError, + }, + DocumentKeyShare, DocumentKeyShareVersion, Error, KeyStorage, NodeId, SessionId, +}; +use parking_lot::{Condvar, Mutex}; +use std::{ + collections::{BTreeMap, BTreeSet, VecDeque}, + fmt::{Debug, Error as FmtError, Formatter}, + sync::Arc, + time::Duration, +}; /// Distributed key generation session. /// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper: @@ -37,280 +43,292 @@ use key_server_cluster::message::{Message, GenerationMessage, InitializeSession, /// 3) key verification (KV): all nodes are checking values, received for other nodes /// 4) key generation phase (KG): nodes are exchanging with information, enough to generate joint public key pub struct SessionImpl { - /// Unique session id. - id: SessionId, - /// Public identifier of this node. - self_node_id: NodeId, - /// Key storage. - key_storage: Option>, - /// Cluster which allows this node to send messages to other nodes in the cluster. - cluster: Arc, - /// Session-level nonce. - nonce: u64, - /// SessionImpl completion condvar. - completed: Condvar, - /// Mutable session data. - data: Mutex, + /// Unique session id. + id: SessionId, + /// Public identifier of this node. + self_node_id: NodeId, + /// Key storage. + key_storage: Option>, + /// Cluster which allows this node to send messages to other nodes in the cluster. + cluster: Arc, + /// Session-level nonce. + nonce: u64, + /// SessionImpl completion condvar. + completed: Condvar, + /// Mutable session data. + data: Mutex, } /// SessionImpl creation parameters pub struct SessionParams { - /// SessionImpl identifier. - pub id: SessionId, - /// Id of node, on which this session is running. - pub self_node_id: Public, - /// Key storage. - pub key_storage: Option>, - /// Cluster - pub cluster: Arc, - /// Session nonce. - pub nonce: Option, + /// SessionImpl identifier. + pub id: SessionId, + /// Id of node, on which this session is running. + pub self_node_id: Public, + /// Key storage. + pub key_storage: Option>, + /// Cluster + pub cluster: Arc, + /// Session nonce. + pub nonce: Option, } /// Mutable data of distributed key generation session. #[derive(Debug)] struct SessionData { - /// Current state of the session. - state: SessionState, - /// Simulate faulty behaviour? - simulate_faulty_behaviour: bool, + /// Current state of the session. + state: SessionState, + /// Simulate faulty behaviour? + simulate_faulty_behaviour: bool, - // === Values, filled when session initialization just starts === - /// Reference to the node, which has started this session. - master: Option, - /// Address of the creator of the session. - author: Option
, + // === Values, filled when session initialization just starts === + /// Reference to the node, which has started this session. + master: Option, + /// Address of the creator of the session. + author: Option
, - // === Values, filled when session initialization is completed === - /// Session origin (if any). - origin: Option
, - /// Is zero secret generation session? - is_zero: Option, - /// Threshold value for this DKG. Only `threshold + 1` will be able to collectively recreate joint secret, - /// and thus - decrypt message, encrypted with joint public. - threshold: Option, - /// Random point, jointly generated by every node in the cluster. - derived_point: Option, - /// Nodes-specific data. - nodes: BTreeMap, + // === Values, filled when session initialization is completed === + /// Session origin (if any). + origin: Option
, + /// Is zero secret generation session? + is_zero: Option, + /// Threshold value for this DKG. Only `threshold + 1` will be able to collectively recreate joint secret, + /// and thus - decrypt message, encrypted with joint public. + threshold: Option, + /// Random point, jointly generated by every node in the cluster. + derived_point: Option, + /// Nodes-specific data. + nodes: BTreeMap, - // === Values, filled during KD phase === - /// Polynom1. - polynom1: Option>, - /// Value of polynom1[0], generated by this node. - secret_coeff: Option, + // === Values, filled during KD phase === + /// Polynom1. + polynom1: Option>, + /// Value of polynom1[0], generated by this node. + secret_coeff: Option, - // === Values, filled during KG phase === - /// Secret share, which this node holds. Persistent + private. - secret_share: Option, + // === Values, filled during KG phase === + /// Secret share, which this node holds. Persistent + private. + secret_share: Option, - /// === Values, filled when DKG session is completed successfully === - /// Key share. - key_share: Option>, - /// Jointly generated public key, which can be used to encrypt secret. Public. - joint_public_and_secret: Option>, + /// === Values, filled when DKG session is completed successfully === + /// Key share. + key_share: Option>, + /// Jointly generated public key, which can be used to encrypt secret. Public. + joint_public_and_secret: Option>, } /// Mutable node-specific data. #[derive(Debug, Clone)] struct NodeData { - /// Random unique scalar. Persistent. - pub id_number: Secret, + /// Random unique scalar. Persistent. + pub id_number: Secret, - // === Values, filled during KD phase === - /// Secret value1, which has been received from this node. - pub secret1: Option, - /// Secret value2, which has been received from this node. - pub secret2: Option, - /// Public values, which have been received from this node. - pub publics: Option>, + // === Values, filled during KD phase === + /// Secret value1, which has been received from this node. + pub secret1: Option, + /// Secret value2, which has been received from this node. + pub secret2: Option, + /// Public values, which have been received from this node. + pub publics: Option>, - // === Values, filled during KG phase === - /// Public share, which has been received from this node. - pub public_share: Option, + // === Values, filled during KG phase === + /// Public share, which has been received from this node. + pub public_share: Option, - // === Values, filled during completion phase === - /// Flags marking that node has confirmed session completion (generated key is stored). - pub completion_confirmed: bool, + // === Values, filled during completion phase === + /// Flags marking that node has confirmed session completion (generated key is stored). + pub completion_confirmed: bool, } /// Schedule for visiting other nodes of cluster. #[derive(Debug, Clone, PartialEq)] pub struct EveryOtherNodeVisitor { - /// Already visited nodes. - visited: BTreeSet, - /// Not yet visited nodes. - unvisited: VecDeque, - /// Nodes, which are currently visited. - in_progress: BTreeSet, + /// Already visited nodes. + visited: BTreeSet, + /// Not yet visited nodes. + unvisited: VecDeque, + /// Nodes, which are currently visited. + in_progress: BTreeSet, } /// Distributed key generation session state. #[derive(Debug, Clone, PartialEq)] pub enum SessionState { - // === Initialization states === - /// Every node starts in this state. - WaitingForInitialization, - /// Master node asks every other node to confirm initialization. - /// Derived point is generated by all nodes in the cluster. - WaitingForInitializationConfirm(EveryOtherNodeVisitor), - /// Slave nodes are in this state until initialization completion is reported by master node. - WaitingForInitializationComplete, + // === Initialization states === + /// Every node starts in this state. + WaitingForInitialization, + /// Master node asks every other node to confirm initialization. + /// Derived point is generated by all nodes in the cluster. + WaitingForInitializationConfirm(EveryOtherNodeVisitor), + /// Slave nodes are in this state until initialization completion is reported by master node. + WaitingForInitializationComplete, - // === KD phase states === - /// Node is waiting for generated keys from every other node. - WaitingForKeysDissemination, + // === KD phase states === + /// Node is waiting for generated keys from every other node. + WaitingForKeysDissemination, - // === KG phase states === - /// Node is waiting for joint public key share to be received from every other node. - WaitingForPublicKeyShare, + // === KG phase states === + /// Node is waiting for joint public key share to be received from every other node. + WaitingForPublicKeyShare, - // === Generation phase states === - /// Node is waiting for session completion/session completion confirmation. - WaitingForGenerationConfirmation, + // === Generation phase states === + /// Node is waiting for session completion/session completion confirmation. + WaitingForGenerationConfirmation, - // === Final states of the session === - /// Joint public key generation is completed. - Finished, - /// Joint public key generation is failed. - Failed, + // === Final states of the session === + /// Joint public key generation is completed. + Finished, + /// Joint public key generation is failed. + Failed, } pub enum InitializationNodes { - RandomNumbers(BTreeSet), - SpecificNumbers(BTreeMap) + RandomNumbers(BTreeSet), + SpecificNumbers(BTreeMap), } impl InitializationNodes { - pub fn set(&self) -> BTreeSet { - match *self { - InitializationNodes::RandomNumbers(ref nodes) => nodes.clone(), - InitializationNodes::SpecificNumbers(ref nodes) => nodes.keys().cloned().collect(), - } - } + pub fn set(&self) -> BTreeSet { + match *self { + InitializationNodes::RandomNumbers(ref nodes) => nodes.clone(), + InitializationNodes::SpecificNumbers(ref nodes) => nodes.keys().cloned().collect(), + } + } } impl From> for InitializationNodes { - fn from(nodes: BTreeSet) -> Self { - InitializationNodes::RandomNumbers(nodes) - } + fn from(nodes: BTreeSet) -> Self { + InitializationNodes::RandomNumbers(nodes) + } } impl From> for InitializationNodes { - fn from(nodes: BTreeMap) -> Self { - InitializationNodes::SpecificNumbers(nodes) - } + fn from(nodes: BTreeMap) -> Self { + InitializationNodes::SpecificNumbers(nodes) + } } impl SessionImpl { - /// Create new generation session. - pub fn new(params: SessionParams) -> Self { - SessionImpl { - id: params.id, - self_node_id: params.self_node_id, - key_storage: params.key_storage, - cluster: params.cluster, - // when nonce.is_nonce(), generation session is wrapped - // => nonce is checked somewhere else && we can pass any value - nonce: params.nonce.unwrap_or_default(), - completed: Condvar::new(), - data: Mutex::new(SessionData { - state: SessionState::WaitingForInitialization, - simulate_faulty_behaviour: false, - master: None, - author: None, - origin: None, - is_zero: None, - threshold: None, - derived_point: None, - nodes: BTreeMap::new(), - polynom1: None, - secret_coeff: None, - secret_share: None, - key_share: None, - joint_public_and_secret: None, - }), - } - } + /// Create new generation session. + pub fn new(params: SessionParams) -> Self { + SessionImpl { + id: params.id, + self_node_id: params.self_node_id, + key_storage: params.key_storage, + cluster: params.cluster, + // when nonce.is_nonce(), generation session is wrapped + // => nonce is checked somewhere else && we can pass any value + nonce: params.nonce.unwrap_or_default(), + completed: Condvar::new(), + data: Mutex::new(SessionData { + state: SessionState::WaitingForInitialization, + simulate_faulty_behaviour: false, + master: None, + author: None, + origin: None, + is_zero: None, + threshold: None, + derived_point: None, + nodes: BTreeMap::new(), + polynom1: None, + secret_coeff: None, + secret_share: None, + key_share: None, + joint_public_and_secret: None, + }), + } + } - /// Get this node Id. - pub fn node(&self) -> &NodeId { - &self.self_node_id - } + /// Get this node Id. + pub fn node(&self) -> &NodeId { + &self.self_node_id + } - /// Get derived point. - #[cfg(test)] - pub fn derived_point(&self) -> Option { - self.data.lock().derived_point.clone() - } + /// Get derived point. + #[cfg(test)] + pub fn derived_point(&self) -> Option { + self.data.lock().derived_point.clone() + } - /// Simulate faulty generation session behaviour. - pub fn simulate_faulty_behaviour(&self) { - self.data.lock().simulate_faulty_behaviour = true; - } + /// Simulate faulty generation session behaviour. + pub fn simulate_faulty_behaviour(&self) { + self.data.lock().simulate_faulty_behaviour = true; + } - /// Get session state. - pub fn state(&self) -> SessionState { - self.data.lock().state.clone() - } + /// Get session state. + pub fn state(&self) -> SessionState { + self.data.lock().state.clone() + } - /// Get session origin. - pub fn origin(&self) -> Option
{ - self.data.lock().origin.clone() - } + /// Get session origin. + pub fn origin(&self) -> Option
{ + self.data.lock().origin.clone() + } - /// Wait for session completion. - pub fn wait(&self, timeout: Option) -> Option> { - Self::wait_session(&self.completed, &self.data, timeout, |data| data.joint_public_and_secret.clone() - .map(|r| r.map(|r| r.0.clone()))) - } + /// Wait for session completion. + pub fn wait(&self, timeout: Option) -> Option> { + Self::wait_session(&self.completed, &self.data, timeout, |data| { + data.joint_public_and_secret + .clone() + .map(|r| r.map(|r| r.0.clone())) + }) + } - /// Get generated public and secret (if any). - pub fn joint_public_and_secret(&self) -> Option> { - self.data.lock().joint_public_and_secret.clone() - } + /// Get generated public and secret (if any). + pub fn joint_public_and_secret(&self) -> Option> { + self.data.lock().joint_public_and_secret.clone() + } - /// Start new session initialization. This must be called on master node. - pub fn initialize(&self, origin: Option
, author: Address, is_zero: bool, threshold: usize, nodes: InitializationNodes) -> Result<(), Error> { - check_cluster_nodes(self.node(), &nodes.set())?; - check_threshold(threshold, &nodes.set())?; + /// Start new session initialization. This must be called on master node. + pub fn initialize( + &self, + origin: Option
, + author: Address, + is_zero: bool, + threshold: usize, + nodes: InitializationNodes, + ) -> Result<(), Error> { + check_cluster_nodes(self.node(), &nodes.set())?; + check_threshold(threshold, &nodes.set())?; - let mut data = self.data.lock(); + let mut data = self.data.lock(); - // check state - if data.state != SessionState::WaitingForInitialization { - return Err(Error::InvalidStateForRequest); - } + // check state + if data.state != SessionState::WaitingForInitialization { + return Err(Error::InvalidStateForRequest); + } - // update state - data.master = Some(self.node().clone()); - data.author = Some(author.clone()); - data.origin = origin.clone(); - data.is_zero = Some(is_zero); - data.threshold = Some(threshold); - match nodes { - InitializationNodes::RandomNumbers(nodes) => { - for node_id in nodes { - // generate node identification parameter - let node_id_number = math::generate_random_scalar()?; - data.nodes.insert(node_id, NodeData::with_id_number(node_id_number)); - } - }, - InitializationNodes::SpecificNumbers(nodes) => { - for (node_id, node_id_number) in nodes { - data.nodes.insert(node_id, NodeData::with_id_number(node_id_number)); - } - }, - } + // update state + data.master = Some(self.node().clone()); + data.author = Some(author.clone()); + data.origin = origin.clone(); + data.is_zero = Some(is_zero); + data.threshold = Some(threshold); + match nodes { + InitializationNodes::RandomNumbers(nodes) => { + for node_id in nodes { + // generate node identification parameter + let node_id_number = math::generate_random_scalar()?; + data.nodes + .insert(node_id, NodeData::with_id_number(node_id_number)); + } + } + InitializationNodes::SpecificNumbers(nodes) => { + for (node_id, node_id_number) in nodes { + data.nodes + .insert(node_id, NodeData::with_id_number(node_id_number)); + } + } + } - let mut visit_policy = EveryOtherNodeVisitor::new(self.node(), data.nodes.keys().cloned()); - let derived_point = math::generate_random_point()?; - match visit_policy.next_node() { - Some(next_node) => { - data.state = SessionState::WaitingForInitializationConfirm(visit_policy); + let mut visit_policy = EveryOtherNodeVisitor::new(self.node(), data.nodes.keys().cloned()); + let derived_point = math::generate_random_point()?; + match visit_policy.next_node() { + Some(next_node) => { + data.state = SessionState::WaitingForInitializationConfirm(visit_policy); - // start initialization - self.cluster.send(&next_node, Message::Generation(GenerationMessage::InitializeSession(InitializeSession { + // start initialization + self.cluster.send(&next_node, Message::Generation(GenerationMessage::InitializeSession(InitializeSession { session: self.id.clone().into(), session_nonce: self.nonce, origin: origin.map(Into::into), @@ -320,111 +338,143 @@ impl SessionImpl { threshold: data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"), derived_point: derived_point.into(), }))) - }, - None => { - drop(data); - self.complete_initialization(derived_point)?; - self.disseminate_keys()?; - self.verify_keys()?; - self.complete_generation()?; + } + None => { + drop(data); + self.complete_initialization(derived_point)?; + self.disseminate_keys()?; + self.verify_keys()?; + self.complete_generation()?; - self.data.lock().state = SessionState::Finished; - self.completed.notify_all(); + self.data.lock().state = SessionState::Finished; + self.completed.notify_all(); - Ok(()) - } - } - } + Ok(()) + } + } + } - /// Process single message. - pub fn process_message(&self, sender: &NodeId, message: &GenerationMessage) -> Result<(), Error> { - if self.nonce != message.session_nonce() { - return Err(Error::ReplayProtection); - } + /// Process single message. + pub fn process_message( + &self, + sender: &NodeId, + message: &GenerationMessage, + ) -> Result<(), Error> { + if self.nonce != message.session_nonce() { + return Err(Error::ReplayProtection); + } - match message { - &GenerationMessage::InitializeSession(ref message) => - self.on_initialize_session(sender.clone(), message), - &GenerationMessage::ConfirmInitialization(ref message) => - self.on_confirm_initialization(sender.clone(), message), - &GenerationMessage::CompleteInitialization(ref message) => - self.on_complete_initialization(sender.clone(), message), - &GenerationMessage::KeysDissemination(ref message) => - self.on_keys_dissemination(sender.clone(), message), - &GenerationMessage::PublicKeyShare(ref message) => - self.on_public_key_share(sender.clone(), message), - &GenerationMessage::SessionError(ref message) => { - self.on_session_error(sender, message.error.clone()); - Ok(()) - }, - &GenerationMessage::SessionCompleted(ref message) => - self.on_session_completed(sender.clone(), message), - } - } + match message { + &GenerationMessage::InitializeSession(ref message) => { + self.on_initialize_session(sender.clone(), message) + } + &GenerationMessage::ConfirmInitialization(ref message) => { + self.on_confirm_initialization(sender.clone(), message) + } + &GenerationMessage::CompleteInitialization(ref message) => { + self.on_complete_initialization(sender.clone(), message) + } + &GenerationMessage::KeysDissemination(ref message) => { + self.on_keys_dissemination(sender.clone(), message) + } + &GenerationMessage::PublicKeyShare(ref message) => { + self.on_public_key_share(sender.clone(), message) + } + &GenerationMessage::SessionError(ref message) => { + self.on_session_error(sender, message.error.clone()); + Ok(()) + } + &GenerationMessage::SessionCompleted(ref message) => { + self.on_session_completed(sender.clone(), message) + } + } + } - /// When session initialization message is received. - pub fn on_initialize_session(&self, sender: NodeId, message: &InitializeSession) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(&sender != self.node()); + /// When session initialization message is received. + pub fn on_initialize_session( + &self, + sender: NodeId, + message: &InitializeSession, + ) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(&sender != self.node()); - // check message - let nodes_ids = message.nodes.keys().cloned().map(Into::into).collect(); - check_threshold(message.threshold, &nodes_ids)?; - check_cluster_nodes(self.node(), &nodes_ids)?; + // check message + let nodes_ids = message.nodes.keys().cloned().map(Into::into).collect(); + check_threshold(message.threshold, &nodes_ids)?; + check_cluster_nodes(self.node(), &nodes_ids)?; - let mut data = self.data.lock(); + let mut data = self.data.lock(); - // check state - if data.state != SessionState::WaitingForInitialization { - return Err(Error::InvalidStateForRequest); - } + // check state + if data.state != SessionState::WaitingForInitialization { + return Err(Error::InvalidStateForRequest); + } - // update derived point with random scalar - let mut derived_point = message.derived_point.clone().into(); - math::update_random_point(&mut derived_point)?; + // update derived point with random scalar + let mut derived_point = message.derived_point.clone().into(); + math::update_random_point(&mut derived_point)?; - // send confirmation back to master node - self.cluster.send(&sender, Message::Generation(GenerationMessage::ConfirmInitialization(ConfirmInitialization { - session: self.id.clone().into(), - session_nonce: self.nonce, - derived_point: derived_point.into(), - })))?; + // send confirmation back to master node + self.cluster.send( + &sender, + Message::Generation(GenerationMessage::ConfirmInitialization( + ConfirmInitialization { + session: self.id.clone().into(), + session_nonce: self.nonce, + derived_point: derived_point.into(), + }, + )), + )?; - // update state - data.master = Some(sender); - data.author = Some(message.author.clone().into()); - data.state = SessionState::WaitingForInitializationComplete; - data.nodes = message.nodes.iter().map(|(id, number)| (id.clone().into(), NodeData::with_id_number(number.clone().into()))).collect(); - data.origin = message.origin.clone().map(Into::into); - data.is_zero = Some(message.is_zero); - data.threshold = Some(message.threshold); + // update state + data.master = Some(sender); + data.author = Some(message.author.clone().into()); + data.state = SessionState::WaitingForInitializationComplete; + data.nodes = message + .nodes + .iter() + .map(|(id, number)| { + ( + id.clone().into(), + NodeData::with_id_number(number.clone().into()), + ) + }) + .collect(); + data.origin = message.origin.clone().map(Into::into); + data.is_zero = Some(message.is_zero); + data.threshold = Some(message.threshold); - Ok(()) - } + Ok(()) + } - /// When session initialization confirmation message is reeived. - pub fn on_confirm_initialization(&self, sender: NodeId, message: &ConfirmInitialization) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(&sender != self.node()); + /// When session initialization confirmation message is reeived. + pub fn on_confirm_initialization( + &self, + sender: NodeId, + message: &ConfirmInitialization, + ) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(&sender != self.node()); - let mut data = self.data.lock(); - debug_assert!(data.nodes.contains_key(&sender)); + let mut data = self.data.lock(); + debug_assert!(data.nodes.contains_key(&sender)); - // check state && select new node to be initialized - let next_receiver = match data.state { - SessionState::WaitingForInitializationConfirm(ref mut visit_policy) => { - if !visit_policy.mark_visited(&sender) { - return Err(Error::InvalidStateForRequest); - } + // check state && select new node to be initialized + let next_receiver = match data.state { + SessionState::WaitingForInitializationConfirm(ref mut visit_policy) => { + if !visit_policy.mark_visited(&sender) { + return Err(Error::InvalidStateForRequest); + } - visit_policy.next_node() - }, - _ => return Err(Error::InvalidStateForRequest), - }; + visit_policy.next_node() + } + _ => return Err(Error::InvalidStateForRequest), + }; - // proceed message - if let Some(next_receiver) = next_receiver { - return self.cluster.send(&next_receiver, Message::Generation(GenerationMessage::InitializeSession(InitializeSession { + // proceed message + if let Some(next_receiver) = next_receiver { + return self.cluster.send(&next_receiver, Message::Generation(GenerationMessage::InitializeSession(InitializeSession { session: self.id.clone().into(), session_nonce: self.nonce, origin: data.origin.clone().map(Into::into), @@ -434,152 +484,191 @@ impl SessionImpl { threshold: data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"), derived_point: message.derived_point.clone().into(), }))); - } + } - // now it is time for keys dissemination (KD) phase - drop(data); - self.complete_initialization(message.derived_point.clone().into())?; - self.disseminate_keys() - } + // now it is time for keys dissemination (KD) phase + drop(data); + self.complete_initialization(message.derived_point.clone().into())?; + self.disseminate_keys() + } - /// When session initialization completion message is received. - pub fn on_complete_initialization(&self, sender: NodeId, message: &CompleteInitialization) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(&sender != self.node()); + /// When session initialization completion message is received. + pub fn on_complete_initialization( + &self, + sender: NodeId, + message: &CompleteInitialization, + ) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(&sender != self.node()); - let mut data = self.data.lock(); + let mut data = self.data.lock(); - // check state - if data.state != SessionState::WaitingForInitializationComplete { - return Err(Error::InvalidStateForRequest); - } - if data.master != Some(sender) { - return Err(Error::InvalidMessage); - } + // check state + if data.state != SessionState::WaitingForInitializationComplete { + return Err(Error::InvalidStateForRequest); + } + if data.master != Some(sender) { + return Err(Error::InvalidMessage); + } - // remember passed data - data.derived_point = Some(message.derived_point.clone().into()); + // remember passed data + data.derived_point = Some(message.derived_point.clone().into()); - // now it is time for keys dissemination (KD) phase - drop(data); - self.disseminate_keys() - } + // now it is time for keys dissemination (KD) phase + drop(data); + self.disseminate_keys() + } - /// When keys dissemination message is received. - pub fn on_keys_dissemination(&self, sender: NodeId, message: &KeysDissemination) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(&sender != self.node()); + /// When keys dissemination message is received. + pub fn on_keys_dissemination( + &self, + sender: NodeId, + message: &KeysDissemination, + ) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(&sender != self.node()); - let mut data = self.data.lock(); + let mut data = self.data.lock(); - // simulate failure, if required - if data.simulate_faulty_behaviour { - return Err(Error::Internal("simulated error".into())); - } + // simulate failure, if required + if data.simulate_faulty_behaviour { + return Err(Error::Internal("simulated error".into())); + } - // check state - if data.state != SessionState::WaitingForKeysDissemination { - match data.state { - SessionState::WaitingForInitializationComplete | SessionState::WaitingForInitializationConfirm(_) => return Err(Error::TooEarlyForRequest), - _ => return Err(Error::InvalidStateForRequest), - } - } - debug_assert!(data.nodes.contains_key(&sender)); + // check state + if data.state != SessionState::WaitingForKeysDissemination { + match data.state { + SessionState::WaitingForInitializationComplete + | SessionState::WaitingForInitializationConfirm(_) => { + return Err(Error::TooEarlyForRequest) + } + _ => return Err(Error::InvalidStateForRequest), + } + } + debug_assert!(data.nodes.contains_key(&sender)); - // check message - let is_zero = data.is_zero.expect("is_zero is filled in initialization phase; KD phase follows initialization phase; qed"); - let threshold = data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"); - if !is_zero && message.publics.len() != threshold + 1 { - return Err(Error::InvalidMessage); - } + // check message + let is_zero = data.is_zero.expect( + "is_zero is filled in initialization phase; KD phase follows initialization phase; qed", + ); + let threshold = data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"); + if !is_zero && message.publics.len() != threshold + 1 { + return Err(Error::InvalidMessage); + } - // update node data - { - let node_data = data.nodes.get_mut(&sender).ok_or(Error::InvalidMessage)?; - if node_data.secret1.is_some() || node_data.secret2.is_some() || node_data.publics.is_some() { - return Err(Error::InvalidStateForRequest); - } + // update node data + { + let node_data = data.nodes.get_mut(&sender).ok_or(Error::InvalidMessage)?; + if node_data.secret1.is_some() + || node_data.secret2.is_some() + || node_data.publics.is_some() + { + return Err(Error::InvalidStateForRequest); + } - node_data.secret1 = Some(message.secret1.clone().into()); - node_data.secret2 = Some(message.secret2.clone().into()); - node_data.publics = Some(message.publics.iter().cloned().map(Into::into).collect()); - } + node_data.secret1 = Some(message.secret1.clone().into()); + node_data.secret2 = Some(message.secret2.clone().into()); + node_data.publics = Some(message.publics.iter().cloned().map(Into::into).collect()); + } - // check if we have received keys from every other node - if data.nodes.iter().any(|(node_id, node_data)| node_id != self.node() && (node_data.publics.is_none() || node_data.secret1.is_none() || node_data.secret2.is_none())) { - return Ok(()) - } + // check if we have received keys from every other node + if data.nodes.iter().any(|(node_id, node_data)| { + node_id != self.node() + && (node_data.publics.is_none() + || node_data.secret1.is_none() + || node_data.secret2.is_none()) + }) { + return Ok(()); + } - drop(data); - self.verify_keys() - } + drop(data); + self.verify_keys() + } - /// When public key share is received. - pub fn on_public_key_share(&self, sender: NodeId, message: &PublicKeyShare) -> Result<(), Error> { - let mut data = self.data.lock(); + /// When public key share is received. + pub fn on_public_key_share( + &self, + sender: NodeId, + message: &PublicKeyShare, + ) -> Result<(), Error> { + let mut data = self.data.lock(); - // check state - if data.state != SessionState::WaitingForPublicKeyShare { - match data.state { - SessionState::WaitingForInitializationComplete | - SessionState::WaitingForKeysDissemination => return Err(Error::TooEarlyForRequest), - _ => return Err(Error::InvalidStateForRequest), - } - } + // check state + if data.state != SessionState::WaitingForPublicKeyShare { + match data.state { + SessionState::WaitingForInitializationComplete + | SessionState::WaitingForKeysDissemination => { + return Err(Error::TooEarlyForRequest) + } + _ => return Err(Error::InvalidStateForRequest), + } + } - // update node data with received public share - { - let node_data = &mut data.nodes.get_mut(&sender).ok_or(Error::InvalidMessage)?; - if node_data.public_share.is_some() { - return Err(Error::InvalidMessage); - } + // update node data with received public share + { + let node_data = &mut data.nodes.get_mut(&sender).ok_or(Error::InvalidMessage)?; + if node_data.public_share.is_some() { + return Err(Error::InvalidMessage); + } - node_data.public_share = Some(message.public_share.clone().into()); - } + node_data.public_share = Some(message.public_share.clone().into()); + } - // if there's also nodes, which has not sent us their public shares - do nothing - if data.nodes.iter().any(|(node_id, node_data)| node_id != self.node() && node_data.public_share.is_none()) { - return Ok(()); - } + // if there's also nodes, which has not sent us their public shares - do nothing + if data + .nodes + .iter() + .any(|(node_id, node_data)| node_id != self.node() && node_data.public_share.is_none()) + { + return Ok(()); + } - drop(data); - self.complete_generation() - } + drop(data); + self.complete_generation() + } - /// When session completion message is received. - pub fn on_session_completed(&self, sender: NodeId, message: &SessionCompleted) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(&sender != self.node()); + /// When session completion message is received. + pub fn on_session_completed( + &self, + sender: NodeId, + message: &SessionCompleted, + ) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(&sender != self.node()); - let mut data = self.data.lock(); - debug_assert!(data.nodes.contains_key(&sender)); + let mut data = self.data.lock(); + debug_assert!(data.nodes.contains_key(&sender)); - // check state - if data.state != SessionState::WaitingForGenerationConfirmation { - match data.state { - SessionState::WaitingForPublicKeyShare => return Err(Error::TooEarlyForRequest), - _ => return Err(Error::InvalidStateForRequest), - } - } + // check state + if data.state != SessionState::WaitingForGenerationConfirmation { + match data.state { + SessionState::WaitingForPublicKeyShare => return Err(Error::TooEarlyForRequest), + _ => return Err(Error::InvalidStateForRequest), + } + } - // if we are not masters, save result and respond with confirmation - if data.master.as_ref() != Some(self.node()) { - // check that we have received message from master - if data.master.as_ref() != Some(&sender) { - return Err(Error::InvalidMessage); - } + // if we are not masters, save result and respond with confirmation + if data.master.as_ref() != Some(self.node()) { + // check that we have received message from master + if data.master.as_ref() != Some(&sender) { + return Err(Error::InvalidMessage); + } - // calculate joint public key - let is_zero = data.is_zero.expect("is_zero is filled in initialization phase; KG phase follows initialization phase; qed"); - let joint_public = if !is_zero { - let public_shares = data.nodes.values().map(|n| n.public_share.as_ref().expect("keys received on KD phase; KG phase follows KD phase; qed")); - math::compute_joint_public(public_shares)? - } else { - Default::default() - }; + // calculate joint public key + let is_zero = data.is_zero.expect("is_zero is filled in initialization phase; KG phase follows initialization phase; qed"); + let joint_public = if !is_zero { + let public_shares = data.nodes.values().map(|n| { + n.public_share + .as_ref() + .expect("keys received on KD phase; KG phase follows KD phase; qed") + }); + math::compute_joint_public(public_shares)? + } else { + Default::default() + }; - // save encrypted data to key storage - let encrypted_data = DocumentKeyShare { + // save encrypted data to key storage + let encrypted_data = DocumentKeyShare { author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(), threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"), public: joint_public, @@ -591,182 +680,245 @@ impl SessionImpl { )], }; - if let Some(ref key_storage) = self.key_storage { - key_storage.insert(self.id.clone(), encrypted_data.clone())?; - } + if let Some(ref key_storage) = self.key_storage { + key_storage.insert(self.id.clone(), encrypted_data.clone())?; + } - // then respond with confirmation - data.state = SessionState::Finished; - return self.cluster.send(&sender, Message::Generation(GenerationMessage::SessionCompleted(SessionCompleted { - session: self.id.clone().into(), - session_nonce: self.nonce, - }))); - } + // then respond with confirmation + data.state = SessionState::Finished; + return self.cluster.send( + &sender, + Message::Generation(GenerationMessage::SessionCompleted(SessionCompleted { + session: self.id.clone().into(), + session_nonce: self.nonce, + })), + ); + } - // remember that we have received confirmation from sender node - { - let sender_node = data.nodes.get_mut(&sender).expect("node is always qualified by himself; qed"); - if sender_node.completion_confirmed { - return Err(Error::InvalidMessage); - } + // remember that we have received confirmation from sender node + { + let sender_node = data + .nodes + .get_mut(&sender) + .expect("node is always qualified by himself; qed"); + if sender_node.completion_confirmed { + return Err(Error::InvalidMessage); + } - sender_node.completion_confirmed = true; - } + sender_node.completion_confirmed = true; + } - // check if we have received confirmations from all cluster nodes - if data.nodes.iter().any(|(_, node_data)| !node_data.completion_confirmed) { - return Ok(()) - } + // check if we have received confirmations from all cluster nodes + if data + .nodes + .iter() + .any(|(_, node_data)| !node_data.completion_confirmed) + { + return Ok(()); + } - // we have received enough confirmations => complete session - data.state = SessionState::Finished; - self.completed.notify_all(); + // we have received enough confirmations => complete session + data.state = SessionState::Finished; + self.completed.notify_all(); - Ok(()) - } + Ok(()) + } - /// Complete initialization (when all other nodex has responded with confirmation) - fn complete_initialization(&self, mut derived_point: Public) -> Result<(), Error> { - // update point once again to make sure that derived point is not generated by last node - math::update_random_point(&mut derived_point)?; + /// Complete initialization (when all other nodex has responded with confirmation) + fn complete_initialization(&self, mut derived_point: Public) -> Result<(), Error> { + // update point once again to make sure that derived point is not generated by last node + math::update_random_point(&mut derived_point)?; - // remember derived point - let mut data = self.data.lock(); - data.derived_point = Some(derived_point.clone().into()); + // remember derived point + let mut data = self.data.lock(); + data.derived_point = Some(derived_point.clone().into()); - // broadcast derived point && other session paraeters to every other node - self.cluster.broadcast(Message::Generation(GenerationMessage::CompleteInitialization(CompleteInitialization { - session: self.id.clone().into(), - session_nonce: self.nonce, - derived_point: derived_point.into(), - }))) - } + // broadcast derived point && other session paraeters to every other node + self.cluster.broadcast(Message::Generation( + GenerationMessage::CompleteInitialization(CompleteInitialization { + session: self.id.clone().into(), + session_nonce: self.nonce, + derived_point: derived_point.into(), + }), + )) + } - /// Keys dissemination (KD) phase - fn disseminate_keys(&self) -> Result<(), Error> { - let mut data = self.data.lock(); + /// Keys dissemination (KD) phase + fn disseminate_keys(&self) -> Result<(), Error> { + let mut data = self.data.lock(); - // pick 2t + 2 random numbers as polynomial coefficients for 2 polynoms - let threshold = data.threshold.expect("threshold is filled on initialization phase; KD phase follows initialization phase; qed"); - let is_zero = data.is_zero.expect("is_zero is filled on initialization phase; KD phase follows initialization phase; qed"); - let mut polynom1 = math::generate_random_polynom(threshold)?; - if is_zero { - polynom1[0] = math::zero_scalar(); - } - let polynom2 = math::generate_random_polynom(threshold)?; - data.polynom1 = Some(polynom1.clone()); - data.secret_coeff = Some(polynom1[0].clone()); + // pick 2t + 2 random numbers as polynomial coefficients for 2 polynoms + let threshold = data.threshold.expect("threshold is filled on initialization phase; KD phase follows initialization phase; qed"); + let is_zero = data.is_zero.expect( + "is_zero is filled on initialization phase; KD phase follows initialization phase; qed", + ); + let mut polynom1 = math::generate_random_polynom(threshold)?; + if is_zero { + polynom1[0] = math::zero_scalar(); + } + let polynom2 = math::generate_random_polynom(threshold)?; + data.polynom1 = Some(polynom1.clone()); + data.secret_coeff = Some(polynom1[0].clone()); - // compute t+1 public values - let publics = match is_zero { - false => math::public_values_generation(threshold, - data.derived_point.as_ref().expect("keys dissemination occurs after derived point is agreed; qed"), - &polynom1, - &polynom2)?, - true => Default::default(), - }; + // compute t+1 public values + let publics = match is_zero { + false => math::public_values_generation( + threshold, + data.derived_point + .as_ref() + .expect("keys dissemination occurs after derived point is agreed; qed"), + &polynom1, + &polynom2, + )?, + true => Default::default(), + }; - // compute secret values for every other node - for (node, node_data) in data.nodes.iter_mut() { - let secret1 = math::compute_polynom(&polynom1, &node_data.id_number)?; - let secret2 = math::compute_polynom(&polynom2, &node_data.id_number)?; + // compute secret values for every other node + for (node, node_data) in data.nodes.iter_mut() { + let secret1 = math::compute_polynom(&polynom1, &node_data.id_number)?; + let secret2 = math::compute_polynom(&polynom2, &node_data.id_number)?; - // send a message containing secret1 && secret2 to other node - if node != self.node() { - self.cluster.send(&node, Message::Generation(GenerationMessage::KeysDissemination(KeysDissemination { - session: self.id.clone().into(), - session_nonce: self.nonce, - secret1: secret1.into(), - secret2: secret2.into(), - publics: publics.iter().cloned().map(Into::into).collect(), - })))?; - } else { - node_data.secret1 = Some(secret1); - node_data.secret2 = Some(secret2); - node_data.publics = Some(publics.clone()); - } - } + // send a message containing secret1 && secret2 to other node + if node != self.node() { + self.cluster.send( + &node, + Message::Generation(GenerationMessage::KeysDissemination(KeysDissemination { + session: self.id.clone().into(), + session_nonce: self.nonce, + secret1: secret1.into(), + secret2: secret2.into(), + publics: publics.iter().cloned().map(Into::into).collect(), + })), + )?; + } else { + node_data.secret1 = Some(secret1); + node_data.secret2 = Some(secret2); + node_data.publics = Some(publics.clone()); + } + } - // update state - data.state = SessionState::WaitingForKeysDissemination; + // update state + data.state = SessionState::WaitingForKeysDissemination; - Ok(()) - } + Ok(()) + } - /// Keys verification (KV) phase - fn verify_keys(&self) -> Result<(), Error> { - let mut data = self.data.lock(); + /// Keys verification (KV) phase + fn verify_keys(&self) -> Result<(), Error> { + let mut data = self.data.lock(); - // key verification (KV) phase: check that other nodes have passed correct secrets - let threshold = data.threshold.expect("threshold is filled in initialization phase; KV phase follows initialization phase; qed"); - let is_zero = data.is_zero.expect("is_zero is filled in initialization phase; KV phase follows initialization phase; qed"); - let self_public_share = { - if !is_zero { - let derived_point = data.derived_point.clone().expect("derived point generated on initialization phase; KV phase follows initialization phase; qed"); - let number_id = data.nodes[self.node()].id_number.clone(); - for (_ , node_data) in data.nodes.iter_mut().filter(|&(node_id, _)| node_id != self.node()) { - let secret1 = node_data.secret1.as_ref().expect("keys received on KD phase; KV phase follows KD phase; qed"); - let secret2 = node_data.secret2.as_ref().expect("keys received on KD phase; KV phase follows KD phase; qed"); - let publics = node_data.publics.as_ref().expect("keys received on KD phase; KV phase follows KD phase; qed"); - let is_key_verification_ok = math::keys_verification(threshold, &derived_point, &number_id, - secret1, secret2, publics)?; + // key verification (KV) phase: check that other nodes have passed correct secrets + let threshold = data.threshold.expect("threshold is filled in initialization phase; KV phase follows initialization phase; qed"); + let is_zero = data.is_zero.expect( + "is_zero is filled in initialization phase; KV phase follows initialization phase; qed", + ); + let self_public_share = { + if !is_zero { + let derived_point = data.derived_point.clone().expect("derived point generated on initialization phase; KV phase follows initialization phase; qed"); + let number_id = data.nodes[self.node()].id_number.clone(); + for (_, node_data) in data + .nodes + .iter_mut() + .filter(|&(node_id, _)| node_id != self.node()) + { + let secret1 = node_data + .secret1 + .as_ref() + .expect("keys received on KD phase; KV phase follows KD phase; qed"); + let secret2 = node_data + .secret2 + .as_ref() + .expect("keys received on KD phase; KV phase follows KD phase; qed"); + let publics = node_data + .publics + .as_ref() + .expect("keys received on KD phase; KV phase follows KD phase; qed"); + let is_key_verification_ok = math::keys_verification( + threshold, + &derived_point, + &number_id, + secret1, + secret2, + publics, + )?; - if !is_key_verification_ok { - // node has sent us incorrect values. In original ECDKG protocol we should have sent complaint here. - return Err(Error::InvalidMessage); - } - } + if !is_key_verification_ok { + // node has sent us incorrect values. In original ECDKG protocol we should have sent complaint here. + return Err(Error::InvalidMessage); + } + } - // calculate public share - let self_public_share = { - let self_secret_coeff = data.secret_coeff.as_ref().expect("secret_coeff is generated on KD phase; KG phase follows KD phase; qed"); - math::compute_public_share(self_secret_coeff)? - }; + // calculate public share + let self_public_share = { + let self_secret_coeff = data.secret_coeff.as_ref().expect( + "secret_coeff is generated on KD phase; KG phase follows KD phase; qed", + ); + math::compute_public_share(self_secret_coeff)? + }; - self_public_share - } else { - // TODO [Trust]: add verification when available - Default::default() - } - }; + self_public_share + } else { + // TODO [Trust]: add verification when available + Default::default() + } + }; - // calculate self secret + public shares - let self_secret_share = { - let secret_values_iter = data.nodes.values() - .map(|n| n.secret1.as_ref().expect("keys received on KD phase; KG phase follows KD phase; qed")); - math::compute_secret_share(secret_values_iter)? - }; + // calculate self secret + public shares + let self_secret_share = { + let secret_values_iter = data.nodes.values().map(|n| { + n.secret1 + .as_ref() + .expect("keys received on KD phase; KG phase follows KD phase; qed") + }); + math::compute_secret_share(secret_values_iter)? + }; - // update state - data.state = SessionState::WaitingForPublicKeyShare; - data.secret_share = Some(self_secret_share); - let self_node = data.nodes.get_mut(self.node()).expect("node is always qualified by himself; qed"); - self_node.public_share = Some(self_public_share.clone()); + // update state + data.state = SessionState::WaitingForPublicKeyShare; + data.secret_share = Some(self_secret_share); + let self_node = data + .nodes + .get_mut(self.node()) + .expect("node is always qualified by himself; qed"); + self_node.public_share = Some(self_public_share.clone()); - // broadcast self public key share - self.cluster.broadcast(Message::Generation(GenerationMessage::PublicKeyShare(PublicKeyShare { - session: self.id.clone().into(), - session_nonce: self.nonce, - public_share: self_public_share.into(), - }))) - } + // broadcast self public key share + self.cluster + .broadcast(Message::Generation(GenerationMessage::PublicKeyShare( + PublicKeyShare { + session: self.id.clone().into(), + session_nonce: self.nonce, + public_share: self_public_share.into(), + }, + ))) + } - /// Complete generation - fn complete_generation(&self) -> Result<(), Error> { - let mut data = self.data.lock(); + /// Complete generation + fn complete_generation(&self) -> Result<(), Error> { + let mut data = self.data.lock(); - // calculate joint public key - let is_zero = data.is_zero.expect("is_zero is filled in initialization phase; KG phase follows initialization phase; qed"); - let joint_public = if !is_zero { - let public_shares = data.nodes.values().map(|n| n.public_share.as_ref().expect("keys received on KD phase; KG phase follows KD phase; qed")); - math::compute_joint_public(public_shares)? - } else { - Default::default() - }; + // calculate joint public key + let is_zero = data.is_zero.expect( + "is_zero is filled in initialization phase; KG phase follows initialization phase; qed", + ); + let joint_public = if !is_zero { + let public_shares = data.nodes.values().map(|n| { + n.public_share + .as_ref() + .expect("keys received on KD phase; KG phase follows KD phase; qed") + }); + math::compute_joint_public(public_shares)? + } else { + Default::default() + }; - // prepare key data - let secret_share = data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(); - let encrypted_data = DocumentKeyShare { + // prepare key data + let secret_share = data + .secret_share + .as_ref() + .expect("secret_share is filled in KG phase; we are at the end of KG phase; qed") + .clone(); + let encrypted_data = DocumentKeyShare { author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(), threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"), public: joint_public.clone(), @@ -778,499 +930,642 @@ impl SessionImpl { )], }; - // if we are at the slave node - wait for session completion - let secret_coeff = data.secret_coeff.as_ref().expect("secret coeff is selected on initialization phase; current phase follows initialization; qed").clone(); - if data.master.as_ref() != Some(self.node()) { - data.key_share = Some(Ok(encrypted_data)); - data.joint_public_and_secret = Some(Ok((joint_public, secret_coeff, secret_share))); - data.state = SessionState::WaitingForGenerationConfirmation; - return Ok(()); - } + // if we are at the slave node - wait for session completion + let secret_coeff = data.secret_coeff.as_ref().expect("secret coeff is selected on initialization phase; current phase follows initialization; qed").clone(); + if data.master.as_ref() != Some(self.node()) { + data.key_share = Some(Ok(encrypted_data)); + data.joint_public_and_secret = Some(Ok((joint_public, secret_coeff, secret_share))); + data.state = SessionState::WaitingForGenerationConfirmation; + return Ok(()); + } - // then save encrypted data to the key storage - if let Some(ref key_storage) = self.key_storage { - key_storage.insert(self.id.clone(), encrypted_data.clone())?; - } + // then save encrypted data to the key storage + if let Some(ref key_storage) = self.key_storage { + key_storage.insert(self.id.clone(), encrypted_data.clone())?; + } - // then distribute encrypted data to every other node - self.cluster.broadcast(Message::Generation(GenerationMessage::SessionCompleted(SessionCompleted { - session: self.id.clone().into(), - session_nonce: self.nonce, - })))?; + // then distribute encrypted data to every other node + self.cluster + .broadcast(Message::Generation(GenerationMessage::SessionCompleted( + SessionCompleted { + session: self.id.clone().into(), + session_nonce: self.nonce, + }, + )))?; - // then wait for confirmation from all other nodes - { - let self_node = data.nodes.get_mut(self.node()).expect("node is always qualified by himself; qed"); - self_node.completion_confirmed = true; - } - data.key_share = Some(Ok(encrypted_data)); - data.joint_public_and_secret = Some(Ok((joint_public, secret_coeff, secret_share))); - data.state = SessionState::WaitingForGenerationConfirmation; + // then wait for confirmation from all other nodes + { + let self_node = data + .nodes + .get_mut(self.node()) + .expect("node is always qualified by himself; qed"); + self_node.completion_confirmed = true; + } + data.key_share = Some(Ok(encrypted_data)); + data.joint_public_and_secret = Some(Ok((joint_public, secret_coeff, secret_share))); + data.state = SessionState::WaitingForGenerationConfirmation; - Ok(()) - } + Ok(()) + } } impl ClusterSession for SessionImpl { - type Id = SessionId; + type Id = SessionId; - fn type_name() -> &'static str { - "generation" - } + fn type_name() -> &'static str { + "generation" + } - fn id(&self) -> SessionId { - self.id.clone() - } + fn id(&self) -> SessionId { + self.id.clone() + } - fn is_finished(&self) -> bool { - let data = self.data.lock(); - data.state == SessionState::Failed - || data.state == SessionState::Finished - } + fn is_finished(&self) -> bool { + let data = self.data.lock(); + data.state == SessionState::Failed || data.state == SessionState::Finished + } - fn on_node_timeout(&self, node: &NodeId) { - let mut data = self.data.lock(); + fn on_node_timeout(&self, node: &NodeId) { + let mut data = self.data.lock(); - // all nodes are required for generation session - // => fail without check - warn!("{}: generation session failed because {} connection has timeouted", self.node(), node); + // all nodes are required for generation session + // => fail without check + warn!( + "{}: generation session failed because {} connection has timeouted", + self.node(), + node + ); - data.state = SessionState::Failed; - data.key_share = Some(Err(Error::NodeDisconnected)); - data.joint_public_and_secret = Some(Err(Error::NodeDisconnected)); - self.completed.notify_all(); - } + data.state = SessionState::Failed; + data.key_share = Some(Err(Error::NodeDisconnected)); + data.joint_public_and_secret = Some(Err(Error::NodeDisconnected)); + self.completed.notify_all(); + } - fn on_session_timeout(&self) { - let mut data = self.data.lock(); + fn on_session_timeout(&self) { + let mut data = self.data.lock(); - warn!("{}: generation session failed with timeout", self.node()); + warn!("{}: generation session failed with timeout", self.node()); - data.state = SessionState::Failed; - data.key_share = Some(Err(Error::NodeDisconnected)); - data.joint_public_and_secret = Some(Err(Error::NodeDisconnected)); - self.completed.notify_all(); - } + data.state = SessionState::Failed; + data.key_share = Some(Err(Error::NodeDisconnected)); + data.joint_public_and_secret = Some(Err(Error::NodeDisconnected)); + self.completed.notify_all(); + } - fn on_session_error(&self, node: &NodeId, error: Error) { - // error in generation session is considered fatal - // => broadcast error if error occured on this node - if *node == self.self_node_id { - // do not bother processing send error, as we already processing error - let _ = self.cluster.broadcast(Message::Generation(GenerationMessage::SessionError(SessionError { - session: self.id.clone().into(), - session_nonce: self.nonce, - error: error.clone().into(), - }))); - } + fn on_session_error(&self, node: &NodeId, error: Error) { + // error in generation session is considered fatal + // => broadcast error if error occured on this node + if *node == self.self_node_id { + // do not bother processing send error, as we already processing error + let _ = self + .cluster + .broadcast(Message::Generation(GenerationMessage::SessionError( + SessionError { + session: self.id.clone().into(), + session_nonce: self.nonce, + error: error.clone().into(), + }, + ))); + } - let mut data = self.data.lock(); - data.state = SessionState::Failed; - data.key_share = Some(Err(error.clone())); - data.joint_public_and_secret = Some(Err(error)); - self.completed.notify_all(); - } + let mut data = self.data.lock(); + data.state = SessionState::Failed; + data.key_share = Some(Err(error.clone())); + data.joint_public_and_secret = Some(Err(error)); + self.completed.notify_all(); + } - fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { - match *message { - Message::Generation(ref message) => self.process_message(sender, message), - _ => unreachable!("cluster checks message to be correct before passing; qed"), - } - } + fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { + match *message { + Message::Generation(ref message) => self.process_message(sender, message), + _ => unreachable!("cluster checks message to be correct before passing; qed"), + } + } } impl EveryOtherNodeVisitor { - pub fn new(self_id: &NodeId, nodes: I) -> Self where I: Iterator { - EveryOtherNodeVisitor { - visited: BTreeSet::new(), - unvisited: nodes.filter(|n| n != self_id).collect(), - in_progress: BTreeSet::new(), - } - } + pub fn new(self_id: &NodeId, nodes: I) -> Self + where + I: Iterator, + { + EveryOtherNodeVisitor { + visited: BTreeSet::new(), + unvisited: nodes.filter(|n| n != self_id).collect(), + in_progress: BTreeSet::new(), + } + } - pub fn next_node(&mut self) -> Option { - let next_node = self.unvisited.pop_front(); - if let Some(ref next_node) = next_node { - self.in_progress.insert(next_node.clone()); - } - next_node - } + pub fn next_node(&mut self) -> Option { + let next_node = self.unvisited.pop_front(); + if let Some(ref next_node) = next_node { + self.in_progress.insert(next_node.clone()); + } + next_node + } - pub fn mark_visited(&mut self, node: &NodeId) -> bool { - if !self.in_progress.remove(node) { - return false; - } - self.visited.insert(node.clone()) - } + pub fn mark_visited(&mut self, node: &NodeId) -> bool { + if !self.in_progress.remove(node) { + return false; + } + self.visited.insert(node.clone()) + } } impl NodeData { - fn with_id_number(node_id_number: Secret) -> Self { - NodeData { - id_number: node_id_number, - secret1: None, - secret2: None, - publics: None, - public_share: None, - completion_confirmed: false, - } - } + fn with_id_number(node_id_number: Secret) -> Self { + NodeData { + id_number: node_id_number, + secret1: None, + secret2: None, + publics: None, + public_share: None, + completion_confirmed: false, + } + } } impl Debug for SessionImpl { - fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { - write!(f, "Generation session {} on {}", self.id, self.self_node_id) - } + fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { + write!(f, "Generation session {} on {}", self.id, self.self_node_id) + } } fn check_cluster_nodes(self_node_id: &NodeId, nodes: &BTreeSet) -> Result<(), Error> { - assert!(nodes.contains(self_node_id)); - Ok(()) + assert!(nodes.contains(self_node_id)); + Ok(()) } fn check_threshold(threshold: usize, nodes: &BTreeSet) -> Result<(), Error> { - // at least threshold + 1 nodes are required to collectively decrypt message - if threshold >= nodes.len() { - return Err(Error::NotEnoughNodesForThreshold); - } + // at least threshold + 1 nodes are required to collectively decrypt message + if threshold >= nodes.len() { + return Err(Error::NotEnoughNodesForThreshold); + } - Ok(()) + Ok(()) } #[cfg(test)] pub mod tests { - use std::sync::Arc; - use ethereum_types::H256; - use ethkey::{Random, Generator, KeyPair, Secret}; - use key_server_cluster::{NodeId, Error, KeyStorage}; - use key_server_cluster::message::{self, Message, GenerationMessage, KeysDissemination, - PublicKeyShare, ConfirmInitialization}; - use key_server_cluster::cluster::tests::{MessageLoop as ClusterMessageLoop, make_clusters_and_preserve_sessions}; - use key_server_cluster::cluster_sessions::ClusterSession; - use key_server_cluster::generation_session::{SessionImpl, SessionState}; - use key_server_cluster::math; - use key_server_cluster::math::tests::do_encryption_and_decryption; + use ethereum_types::H256; + use ethkey::{Generator, KeyPair, Random, Secret}; + use key_server_cluster::{ + cluster::tests::{make_clusters_and_preserve_sessions, MessageLoop as ClusterMessageLoop}, + cluster_sessions::ClusterSession, + generation_session::{SessionImpl, SessionState}, + math, + math::tests::do_encryption_and_decryption, + message::{ + self, ConfirmInitialization, GenerationMessage, KeysDissemination, Message, + PublicKeyShare, + }, + Error, KeyStorage, NodeId, + }; + use std::sync::Arc; - #[derive(Debug)] - pub struct MessageLoop(pub ClusterMessageLoop); + #[derive(Debug)] + pub struct MessageLoop(pub ClusterMessageLoop); - impl MessageLoop { - pub fn new(num_nodes: usize) -> Self { - MessageLoop(make_clusters_and_preserve_sessions(num_nodes)) - } + impl MessageLoop { + pub fn new(num_nodes: usize) -> Self { + MessageLoop(make_clusters_and_preserve_sessions(num_nodes)) + } - pub fn init(self, threshold: usize) -> Result { - self.0.cluster(0).client().new_generation_session(Default::default(), None, Default::default(), threshold) - .map(|_| self) - } + pub fn init(self, threshold: usize) -> Result { + self.0 + .cluster(0) + .client() + .new_generation_session(Default::default(), None, Default::default(), threshold) + .map(|_| self) + } - pub fn session_at(&self, idx: usize) -> Arc { - self.0.sessions(idx).generation_sessions.first().unwrap() - } + pub fn session_at(&self, idx: usize) -> Arc { + self.0.sessions(idx).generation_sessions.first().unwrap() + } - pub fn session_of(&self, node: &NodeId) -> Arc { - self.0.sessions_of(node).generation_sessions.first().unwrap() - } + pub fn session_of(&self, node: &NodeId) -> Arc { + self.0 + .sessions_of(node) + .generation_sessions + .first() + .unwrap() + } - pub fn take_message_confirm_initialization(&self) -> (NodeId, NodeId, ConfirmInitialization) { - match self.0.take_message() { - Some((from, to, Message::Generation(GenerationMessage::ConfirmInitialization(msg)))) => - (from, to, msg), - _ => panic!("unexpected"), - } - } + pub fn take_message_confirm_initialization( + &self, + ) -> (NodeId, NodeId, ConfirmInitialization) { + match self.0.take_message() { + Some(( + from, + to, + Message::Generation(GenerationMessage::ConfirmInitialization(msg)), + )) => (from, to, msg), + _ => panic!("unexpected"), + } + } - pub fn take_message_keys_dissemination(&self) -> (NodeId, NodeId, KeysDissemination) { - match self.0.take_message() { - Some((from, to, Message::Generation(GenerationMessage::KeysDissemination(msg)))) => - (from, to, msg), - _ => panic!("unexpected"), - } - } + pub fn take_message_keys_dissemination(&self) -> (NodeId, NodeId, KeysDissemination) { + match self.0.take_message() { + Some(( + from, + to, + Message::Generation(GenerationMessage::KeysDissemination(msg)), + )) => (from, to, msg), + _ => panic!("unexpected"), + } + } - pub fn take_message_public_key_share(&self) -> (NodeId, NodeId, PublicKeyShare) { - match self.0.take_message() { - Some((from, to, Message::Generation(GenerationMessage::PublicKeyShare(msg)))) => - (from, to, msg), - _ => panic!("unexpected"), - } - } + pub fn take_message_public_key_share(&self) -> (NodeId, NodeId, PublicKeyShare) { + match self.0.take_message() { + Some((from, to, Message::Generation(GenerationMessage::PublicKeyShare(msg)))) => { + (from, to, msg) + } + _ => panic!("unexpected"), + } + } - pub fn nodes_id_numbers(&self) -> Vec { - let session = self.session_at(0); - let session_data = session.data.lock(); - session_data.nodes.values().map(|n| n.id_number.clone()).collect() - } + pub fn nodes_id_numbers(&self) -> Vec { + let session = self.session_at(0); + let session_data = session.data.lock(); + session_data + .nodes + .values() + .map(|n| n.id_number.clone()) + .collect() + } - pub fn nodes_secret_shares(&self) -> Vec { - (0..self.0.nodes().len()).map(|i| { - let session = self.session_at(i); - let session_data = session.data.lock(); - session_data.secret_share.as_ref().unwrap().clone() - }).collect() - } + pub fn nodes_secret_shares(&self) -> Vec { + (0..self.0.nodes().len()) + .map(|i| { + let session = self.session_at(i); + let session_data = session.data.lock(); + session_data.secret_share.as_ref().unwrap().clone() + }) + .collect() + } - pub fn compute_key_pair(&self) -> KeyPair { - let t = self.0.key_storage(0).get(&Default::default()).unwrap().unwrap().threshold; - let secret_shares = self.nodes_secret_shares(); - let id_numbers = self.nodes_id_numbers(); - let secret_shares = secret_shares.iter().take(t + 1).collect::>(); - let id_numbers = id_numbers.iter().take(t + 1).collect::>(); - let joint_secret = math::compute_joint_secret_from_shares(t, &secret_shares, &id_numbers).unwrap(); + pub fn compute_key_pair(&self) -> KeyPair { + let t = self + .0 + .key_storage(0) + .get(&Default::default()) + .unwrap() + .unwrap() + .threshold; + let secret_shares = self.nodes_secret_shares(); + let id_numbers = self.nodes_id_numbers(); + let secret_shares = secret_shares.iter().take(t + 1).collect::>(); + let id_numbers = id_numbers.iter().take(t + 1).collect::>(); + let joint_secret = + math::compute_joint_secret_from_shares(t, &secret_shares, &id_numbers).unwrap(); - KeyPair::from_secret(joint_secret).unwrap() - } + KeyPair::from_secret(joint_secret).unwrap() + } - pub fn key_version(&self) -> H256 { - self.0.key_storage(0).get(&Default::default()) - .unwrap().unwrap().versions.iter().last().unwrap().hash - } - } + pub fn key_version(&self) -> H256 { + self.0 + .key_storage(0) + .get(&Default::default()) + .unwrap() + .unwrap() + .versions + .iter() + .last() + .unwrap() + .hash + } + } - #[test] - fn initializes_in_cluster_of_single_node() { - MessageLoop::new(1).init(0).unwrap(); - } + #[test] + fn initializes_in_cluster_of_single_node() { + MessageLoop::new(1).init(0).unwrap(); + } - #[test] - fn fails_to_initialize_if_threshold_is_wrong() { - assert_eq!(MessageLoop::new(2).init(2).unwrap_err(), Error::NotEnoughNodesForThreshold); - } + #[test] + fn fails_to_initialize_if_threshold_is_wrong() { + assert_eq!( + MessageLoop::new(2).init(2).unwrap_err(), + Error::NotEnoughNodesForThreshold + ); + } - #[test] - fn fails_to_initialize_when_already_initialized() { - let ml = MessageLoop::new(2).init(0).unwrap(); - assert_eq!( - ml.session_at(0).initialize(Default::default(), Default::default(), false, 0, ml.0.nodes().into()), - Err(Error::InvalidStateForRequest), - ); - } + #[test] + fn fails_to_initialize_when_already_initialized() { + let ml = MessageLoop::new(2).init(0).unwrap(); + assert_eq!( + ml.session_at(0).initialize( + Default::default(), + Default::default(), + false, + 0, + ml.0.nodes().into() + ), + Err(Error::InvalidStateForRequest), + ); + } - #[test] - fn fails_to_accept_initialization_when_already_initialized() { - let ml = MessageLoop::new(2).init(0).unwrap(); - let (from, to, msg) = ml.0.take_message().unwrap(); - ml.0.process_message(from, to, msg.clone()); - assert_eq!( - ml.session_of(&to).on_message(&from, &msg), - Err(Error::InvalidStateForRequest), - ); - } + #[test] + fn fails_to_accept_initialization_when_already_initialized() { + let ml = MessageLoop::new(2).init(0).unwrap(); + let (from, to, msg) = ml.0.take_message().unwrap(); + ml.0.process_message(from, to, msg.clone()); + assert_eq!( + ml.session_of(&to).on_message(&from, &msg), + Err(Error::InvalidStateForRequest), + ); + } - #[test] - fn slave_updates_derived_point_on_initialization() { - let ml = MessageLoop::new(2).init(0).unwrap(); - let original_point = match ml.0.take_message().unwrap() { - (from, to, Message::Generation(GenerationMessage::InitializeSession(msg))) => { - let original_point = msg.derived_point.clone(); - let msg = Message::Generation(GenerationMessage::InitializeSession(msg)); - ml.0.process_message(from, to, msg); - original_point - }, - _ => panic!("unexpected"), - }; + #[test] + fn slave_updates_derived_point_on_initialization() { + let ml = MessageLoop::new(2).init(0).unwrap(); + let original_point = match ml.0.take_message().unwrap() { + (from, to, Message::Generation(GenerationMessage::InitializeSession(msg))) => { + let original_point = msg.derived_point.clone(); + let msg = Message::Generation(GenerationMessage::InitializeSession(msg)); + ml.0.process_message(from, to, msg); + original_point + } + _ => panic!("unexpected"), + }; - match ml.0.take_message().unwrap() { - (_, _, Message::Generation(GenerationMessage::ConfirmInitialization(msg))) => - assert!(original_point != msg.derived_point), - _ => panic!("unexpected"), - } - } + match ml.0.take_message().unwrap() { + (_, _, Message::Generation(GenerationMessage::ConfirmInitialization(msg))) => { + assert!(original_point != msg.derived_point) + } + _ => panic!("unexpected"), + } + } - #[test] - fn fails_to_accept_initialization_confirmation_if_already_accepted_from_the_same_node() { - let ml = MessageLoop::new(3).init(0).unwrap(); - ml.0.take_and_process_message(); + #[test] + fn fails_to_accept_initialization_confirmation_if_already_accepted_from_the_same_node() { + let ml = MessageLoop::new(3).init(0).unwrap(); + ml.0.take_and_process_message(); - let (from, to, msg) = ml.take_message_confirm_initialization(); - ml.0.process_message(from, to, Message::Generation(GenerationMessage::ConfirmInitialization(msg.clone()))); - assert_eq!(ml.session_of(&to).on_confirm_initialization(from, &msg), Err(Error::InvalidStateForRequest)); - } + let (from, to, msg) = ml.take_message_confirm_initialization(); + ml.0.process_message( + from, + to, + Message::Generation(GenerationMessage::ConfirmInitialization(msg.clone())), + ); + assert_eq!( + ml.session_of(&to).on_confirm_initialization(from, &msg), + Err(Error::InvalidStateForRequest) + ); + } - #[test] - fn fails_to_accept_initialization_confirmation_if_initialization_already_completed() { - let ml = MessageLoop::new(2).init(0).unwrap(); - ml.0.take_and_process_message(); - ml.0.take_and_process_message(); - assert_eq!(ml.session_at(0).on_confirm_initialization(ml.0.node(1), &message::ConfirmInitialization { - session: Default::default(), - session_nonce: 0, - derived_point: math::generate_random_point().unwrap().into(), - }), Err(Error::InvalidStateForRequest)); - } + #[test] + fn fails_to_accept_initialization_confirmation_if_initialization_already_completed() { + let ml = MessageLoop::new(2).init(0).unwrap(); + ml.0.take_and_process_message(); + ml.0.take_and_process_message(); + assert_eq!( + ml.session_at(0).on_confirm_initialization( + ml.0.node(1), + &message::ConfirmInitialization { + session: Default::default(), + session_nonce: 0, + derived_point: math::generate_random_point().unwrap().into(), + } + ), + Err(Error::InvalidStateForRequest) + ); + } - #[test] - fn master_updates_derived_point_on_initialization_completion() { - let ml = MessageLoop::new(2).init(0).unwrap(); - ml.0.take_and_process_message(); - let original_point = match ml.0.take_message().unwrap() { - (from, to, Message::Generation(GenerationMessage::ConfirmInitialization(msg))) => { - let original_point = msg.derived_point.clone(); - let msg = Message::Generation(GenerationMessage::ConfirmInitialization(msg)); - ml.session_of(&to).on_message(&from, &msg).unwrap(); - original_point - }, - _ => panic!("unexpected"), - }; + #[test] + fn master_updates_derived_point_on_initialization_completion() { + let ml = MessageLoop::new(2).init(0).unwrap(); + ml.0.take_and_process_message(); + let original_point = match ml.0.take_message().unwrap() { + (from, to, Message::Generation(GenerationMessage::ConfirmInitialization(msg))) => { + let original_point = msg.derived_point.clone(); + let msg = Message::Generation(GenerationMessage::ConfirmInitialization(msg)); + ml.session_of(&to).on_message(&from, &msg).unwrap(); + original_point + } + _ => panic!("unexpected"), + }; - assert!(ml.session_at(0).derived_point().unwrap() != original_point.into()); - } + assert!(ml.session_at(0).derived_point().unwrap() != original_point.into()); + } - #[test] - fn fails_to_complete_initialization_if_not_waiting_for_it() { - let ml = MessageLoop::new(2).init(0).unwrap(); - ml.0.take_and_process_message(); - assert_eq!(ml.session_at(0).on_complete_initialization(ml.0.node(1), &message::CompleteInitialization { - session: Default::default(), - session_nonce: 0, - derived_point: math::generate_random_point().unwrap().into(), - }), Err(Error::InvalidStateForRequest)); - } + #[test] + fn fails_to_complete_initialization_if_not_waiting_for_it() { + let ml = MessageLoop::new(2).init(0).unwrap(); + ml.0.take_and_process_message(); + assert_eq!( + ml.session_at(0).on_complete_initialization( + ml.0.node(1), + &message::CompleteInitialization { + session: Default::default(), + session_nonce: 0, + derived_point: math::generate_random_point().unwrap().into(), + } + ), + Err(Error::InvalidStateForRequest) + ); + } - #[test] - fn fails_to_complete_initialization_from_non_master_node() { - let ml = MessageLoop::new(3).init(0).unwrap(); - ml.0.take_and_process_message(); - ml.0.take_and_process_message(); - ml.0.take_and_process_message(); - ml.0.take_and_process_message(); - assert_eq!(ml.session_at(1).on_complete_initialization(ml.0.node(2), &message::CompleteInitialization { - session: Default::default(), - session_nonce: 0, - derived_point: math::generate_random_point().unwrap().into(), - }), Err(Error::InvalidMessage)); - } + #[test] + fn fails_to_complete_initialization_from_non_master_node() { + let ml = MessageLoop::new(3).init(0).unwrap(); + ml.0.take_and_process_message(); + ml.0.take_and_process_message(); + ml.0.take_and_process_message(); + ml.0.take_and_process_message(); + assert_eq!( + ml.session_at(1).on_complete_initialization( + ml.0.node(2), + &message::CompleteInitialization { + session: Default::default(), + session_nonce: 0, + derived_point: math::generate_random_point().unwrap().into(), + } + ), + Err(Error::InvalidMessage) + ); + } - #[test] - fn fails_to_accept_keys_dissemination_if_not_waiting_for_it() { - let ml = MessageLoop::new(2).init(0).unwrap(); - assert_eq!(ml.session_at(0).on_keys_dissemination(ml.0.node(1), &message::KeysDissemination { - session: Default::default(), - session_nonce: 0, - secret1: math::generate_random_scalar().unwrap().into(), - secret2: math::generate_random_scalar().unwrap().into(), - publics: vec![math::generate_random_point().unwrap().into()], - }), Err(Error::TooEarlyForRequest)); - } + #[test] + fn fails_to_accept_keys_dissemination_if_not_waiting_for_it() { + let ml = MessageLoop::new(2).init(0).unwrap(); + assert_eq!( + ml.session_at(0).on_keys_dissemination( + ml.0.node(1), + &message::KeysDissemination { + session: Default::default(), + session_nonce: 0, + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: vec![math::generate_random_point().unwrap().into()], + } + ), + Err(Error::TooEarlyForRequest) + ); + } - #[test] - fn fails_to_accept_keys_dissemination_if_wrong_number_of_publics_passed() { - let ml = MessageLoop::new(3).init(0).unwrap(); - ml.0.take_and_process_message(); // m -> s1: InitializeSession - ml.0.take_and_process_message(); // m -> s2: InitializeSession - ml.0.take_and_process_message(); // s1 -> m: ConfirmInitialization - ml.0.take_and_process_message(); // s2 -> m: ConfirmInitialization - ml.0.take_and_process_message(); // m -> s1: CompleteInitialization - ml.0.take_and_process_message(); // m -> s2: CompleteInitialization + #[test] + fn fails_to_accept_keys_dissemination_if_wrong_number_of_publics_passed() { + let ml = MessageLoop::new(3).init(0).unwrap(); + ml.0.take_and_process_message(); // m -> s1: InitializeSession + ml.0.take_and_process_message(); // m -> s2: InitializeSession + ml.0.take_and_process_message(); // s1 -> m: ConfirmInitialization + ml.0.take_and_process_message(); // s2 -> m: ConfirmInitialization + ml.0.take_and_process_message(); // m -> s1: CompleteInitialization + ml.0.take_and_process_message(); // m -> s2: CompleteInitialization - let (from, to, mut msg) = ml.take_message_keys_dissemination(); - msg.publics.clear(); - assert_eq!(ml.session_of(&to).on_keys_dissemination(from, &msg), Err(Error::InvalidMessage)); - } + let (from, to, mut msg) = ml.take_message_keys_dissemination(); + msg.publics.clear(); + assert_eq!( + ml.session_of(&to).on_keys_dissemination(from, &msg), + Err(Error::InvalidMessage) + ); + } - #[test] - fn fails_to_accept_keys_dissemination_second_time_from_the_same_node() { - let ml = MessageLoop::new(3).init(0).unwrap(); - ml.0.take_and_process_message(); // m -> s1: InitializeSession - ml.0.take_and_process_message(); // m -> s2: InitializeSession - ml.0.take_and_process_message(); // s1 -> m: ConfirmInitialization - ml.0.take_and_process_message(); // s2 -> m: ConfirmInitialization - ml.0.take_and_process_message(); // m -> s1: CompleteInitialization - ml.0.take_and_process_message(); // m -> s2: CompleteInitialization + #[test] + fn fails_to_accept_keys_dissemination_second_time_from_the_same_node() { + let ml = MessageLoop::new(3).init(0).unwrap(); + ml.0.take_and_process_message(); // m -> s1: InitializeSession + ml.0.take_and_process_message(); // m -> s2: InitializeSession + ml.0.take_and_process_message(); // s1 -> m: ConfirmInitialization + ml.0.take_and_process_message(); // s2 -> m: ConfirmInitialization + ml.0.take_and_process_message(); // m -> s1: CompleteInitialization + ml.0.take_and_process_message(); // m -> s2: CompleteInitialization - let (from, to, msg) = ml.take_message_keys_dissemination(); - ml.0.process_message(from, to, Message::Generation(GenerationMessage::KeysDissemination(msg.clone()))); - assert_eq!(ml.session_of(&to).on_keys_dissemination(from, &msg), Err(Error::InvalidStateForRequest)); - } + let (from, to, msg) = ml.take_message_keys_dissemination(); + ml.0.process_message( + from, + to, + Message::Generation(GenerationMessage::KeysDissemination(msg.clone())), + ); + assert_eq!( + ml.session_of(&to).on_keys_dissemination(from, &msg), + Err(Error::InvalidStateForRequest) + ); + } - #[test] - fn should_not_accept_public_key_share_when_is_not_waiting_for_it() { - let ml = MessageLoop::new(3).init(1).unwrap(); - assert_eq!(ml.session_at(0).on_public_key_share(ml.0.node(1), &message::PublicKeyShare { - session: Default::default(), - session_nonce: 0, - public_share: math::generate_random_point().unwrap().into(), - }), Err(Error::InvalidStateForRequest)); - } + #[test] + fn should_not_accept_public_key_share_when_is_not_waiting_for_it() { + let ml = MessageLoop::new(3).init(1).unwrap(); + assert_eq!( + ml.session_at(0).on_public_key_share( + ml.0.node(1), + &message::PublicKeyShare { + session: Default::default(), + session_nonce: 0, + public_share: math::generate_random_point().unwrap().into(), + } + ), + Err(Error::InvalidStateForRequest) + ); + } - #[test] - fn should_not_accept_public_key_share_when_receiving_twice() { - let ml = MessageLoop::new(3).init(0).unwrap(); - ml.0.take_and_process_message(); // m -> s1: InitializeSession - ml.0.take_and_process_message(); // m -> s2: InitializeSession - ml.0.take_and_process_message(); // s1 -> m: ConfirmInitialization - ml.0.take_and_process_message(); // s2 -> m: ConfirmInitialization - ml.0.take_and_process_message(); // m -> s1: CompleteInitialization - ml.0.take_and_process_message(); // m -> s2: CompleteInitialization - ml.0.take_and_process_message(); // m -> s1: KeysDissemination - ml.0.take_and_process_message(); // m -> s2: KeysDissemination - ml.0.take_and_process_message(); // s1 -> m: KeysDissemination - ml.0.take_and_process_message(); // s1 -> s2: KeysDissemination - ml.0.take_and_process_message(); // s2 -> m: KeysDissemination - ml.0.take_and_process_message(); // s2 -> s1: KeysDissemination + #[test] + fn should_not_accept_public_key_share_when_receiving_twice() { + let ml = MessageLoop::new(3).init(0).unwrap(); + ml.0.take_and_process_message(); // m -> s1: InitializeSession + ml.0.take_and_process_message(); // m -> s2: InitializeSession + ml.0.take_and_process_message(); // s1 -> m: ConfirmInitialization + ml.0.take_and_process_message(); // s2 -> m: ConfirmInitialization + ml.0.take_and_process_message(); // m -> s1: CompleteInitialization + ml.0.take_and_process_message(); // m -> s2: CompleteInitialization + ml.0.take_and_process_message(); // m -> s1: KeysDissemination + ml.0.take_and_process_message(); // m -> s2: KeysDissemination + ml.0.take_and_process_message(); // s1 -> m: KeysDissemination + ml.0.take_and_process_message(); // s1 -> s2: KeysDissemination + ml.0.take_and_process_message(); // s2 -> m: KeysDissemination + ml.0.take_and_process_message(); // s2 -> s1: KeysDissemination - let (from, to, msg) = ml.take_message_public_key_share(); - ml.0.process_message(from, to, Message::Generation(GenerationMessage::PublicKeyShare(msg.clone()))); - assert_eq!(ml.session_of(&to).on_public_key_share(from, &msg), Err(Error::InvalidMessage)); - } + let (from, to, msg) = ml.take_message_public_key_share(); + ml.0.process_message( + from, + to, + Message::Generation(GenerationMessage::PublicKeyShare(msg.clone())), + ); + assert_eq!( + ml.session_of(&to).on_public_key_share(from, &msg), + Err(Error::InvalidMessage) + ); + } - #[test] - fn encryption_fails_on_session_timeout() { - let ml = MessageLoop::new(2).init(0).unwrap(); - assert!(ml.session_at(0).joint_public_and_secret().is_none()); - ml.session_at(0).on_session_timeout(); - assert_eq!(ml.session_at(0).joint_public_and_secret().unwrap(), Err(Error::NodeDisconnected)); - } + #[test] + fn encryption_fails_on_session_timeout() { + let ml = MessageLoop::new(2).init(0).unwrap(); + assert!(ml.session_at(0).joint_public_and_secret().is_none()); + ml.session_at(0).on_session_timeout(); + assert_eq!( + ml.session_at(0).joint_public_and_secret().unwrap(), + Err(Error::NodeDisconnected) + ); + } - #[test] - fn encryption_fails_on_node_timeout() { - let ml = MessageLoop::new(2).init(0).unwrap(); - assert!(ml.session_at(0).joint_public_and_secret().is_none()); - ml.session_at(0).on_node_timeout(&ml.0.node(1)); - assert_eq!(ml.session_at(0).joint_public_and_secret().unwrap(), Err(Error::NodeDisconnected)); - } + #[test] + fn encryption_fails_on_node_timeout() { + let ml = MessageLoop::new(2).init(0).unwrap(); + assert!(ml.session_at(0).joint_public_and_secret().is_none()); + ml.session_at(0).on_node_timeout(&ml.0.node(1)); + assert_eq!( + ml.session_at(0).joint_public_and_secret().unwrap(), + Err(Error::NodeDisconnected) + ); + } - #[test] - fn complete_enc_dec_session() { - let test_cases = [(0, 5), (2, 5), (3, 5)]; - for &(threshold, num_nodes) in &test_cases { - let ml = MessageLoop::new(num_nodes).init(threshold).unwrap(); - ml.0.loop_until(|| ml.0.is_empty()); + #[test] + fn complete_enc_dec_session() { + let test_cases = [(0, 5), (2, 5), (3, 5)]; + for &(threshold, num_nodes) in &test_cases { + let ml = MessageLoop::new(num_nodes).init(threshold).unwrap(); + ml.0.loop_until(|| ml.0.is_empty()); - // check that all nodes has finished joint public generation - let joint_public_key = ml.session_at(0).joint_public_and_secret().unwrap().unwrap().0; - for i in 0..num_nodes { - let session = ml.session_at(i); - assert_eq!(session.state(), SessionState::Finished); - assert_eq!(session.joint_public_and_secret().map(|p| p.map(|p| p.0)), Some(Ok(joint_public_key))); - } + // check that all nodes has finished joint public generation + let joint_public_key = ml + .session_at(0) + .joint_public_and_secret() + .unwrap() + .unwrap() + .0; + for i in 0..num_nodes { + let session = ml.session_at(i); + assert_eq!(session.state(), SessionState::Finished); + assert_eq!( + session.joint_public_and_secret().map(|p| p.map(|p| p.0)), + Some(Ok(joint_public_key)) + ); + } - // now let's encrypt some secret (which is a point on EC) - let document_secret_plain = Random.generate().unwrap().public().clone(); - let all_nodes_id_numbers = ml.nodes_id_numbers(); - let all_nodes_secret_shares = ml.nodes_secret_shares(); - let document_secret_decrypted = do_encryption_and_decryption(threshold, &joint_public_key, - &all_nodes_id_numbers, - &all_nodes_secret_shares, - None, - document_secret_plain.clone() - ).0; - assert_eq!(document_secret_plain, document_secret_decrypted); - } - } + // now let's encrypt some secret (which is a point on EC) + let document_secret_plain = Random.generate().unwrap().public().clone(); + let all_nodes_id_numbers = ml.nodes_id_numbers(); + let all_nodes_secret_shares = ml.nodes_secret_shares(); + let document_secret_decrypted = do_encryption_and_decryption( + threshold, + &joint_public_key, + &all_nodes_id_numbers, + &all_nodes_secret_shares, + None, + document_secret_plain.clone(), + ) + .0; + assert_eq!(document_secret_plain, document_secret_decrypted); + } + } - #[test] - fn generation_message_fails_when_nonce_is_wrong() { - let ml = MessageLoop::new(2).init(0).unwrap(); - ml.0.take_and_process_message(); + #[test] + fn generation_message_fails_when_nonce_is_wrong() { + let ml = MessageLoop::new(2).init(0).unwrap(); + ml.0.take_and_process_message(); - let msg = message::GenerationMessage::KeysDissemination(message::KeysDissemination { - session: Default::default(), - session_nonce: 10, - secret1: math::generate_random_scalar().unwrap().into(), - secret2: math::generate_random_scalar().unwrap().into(), - publics: vec![math::generate_random_point().unwrap().into()], - }); - assert_eq!(ml.session_at(1).process_message(&ml.0.node(0), &msg).unwrap_err(), Error::ReplayProtection); - } + let msg = message::GenerationMessage::KeysDissemination(message::KeysDissemination { + session: Default::default(), + session_nonce: 10, + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: vec![math::generate_random_point().unwrap().into()], + }); + assert_eq!( + ml.session_at(1) + .process_message(&ml.0.node(0), &msg) + .unwrap_err(), + Error::ReplayProtection + ); + } } diff --git a/secret-store/src/key_server_cluster/client_sessions/signing_session_ecdsa.rs b/secret-store/src/key_server_cluster/client_sessions/signing_session_ecdsa.rs index fe3bd4f11..74d97cb74 100644 --- a/secret-store/src/key_server_cluster/client_sessions/signing_session_ecdsa.rs +++ b/secret-store/src/key_server_cluster/client_sessions/signing_session_ecdsa.rs @@ -14,232 +14,267 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::{BTreeSet, BTreeMap}; -use std::collections::btree_map::Entry; -use std::sync::Arc; -use parking_lot::{Mutex, Condvar}; -use ethkey::{Public, Secret, Signature, sign}; use ethereum_types::H256; -use key_server_cluster::{Error, NodeId, SessionId, SessionMeta, AclStorage, DocumentKeyShare, Requester}; -use key_server_cluster::cluster::{Cluster}; -use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession}; -use key_server_cluster::generation_session::{SessionImpl as GenerationSession, SessionParams as GenerationSessionParams, - SessionState as GenerationSessionState}; -use key_server_cluster::math; -use key_server_cluster::message::{Message, EcdsaSigningMessage, EcdsaSigningConsensusMessage, EcdsaSignatureNonceGenerationMessage, - EcdsaInversionNonceGenerationMessage, EcdsaInversionZeroGenerationMessage, EcdsaSigningInversedNonceCoeffShare, - EcdsaRequestPartialSignature, EcdsaPartialSignature, EcdsaSigningSessionCompleted, GenerationMessage, - ConsensusMessage, EcdsaSigningSessionError, InitializeConsensusSession, ConfirmConsensusInitialization, - EcdsaSigningSessionDelegation, EcdsaSigningSessionDelegationCompleted}; -use key_server_cluster::jobs::job_session::JobTransport; -use key_server_cluster::jobs::key_access_job::KeyAccessJob; -use key_server_cluster::jobs::signing_job_ecdsa::{EcdsaPartialSigningRequest, EcdsaPartialSigningResponse, EcdsaSigningJob}; -use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession}; +use ethkey::{sign, Public, Secret, Signature}; +use key_server_cluster::{ + cluster::Cluster, + cluster_sessions::{ClusterSession, SessionIdWithSubSession}, + generation_session::{ + SessionImpl as GenerationSession, SessionParams as GenerationSessionParams, + SessionState as GenerationSessionState, + }, + jobs::{ + consensus_session::{ConsensusSession, ConsensusSessionParams, ConsensusSessionState}, + job_session::JobTransport, + key_access_job::KeyAccessJob, + signing_job_ecdsa::{ + EcdsaPartialSigningRequest, EcdsaPartialSigningResponse, EcdsaSigningJob, + }, + }, + math, + message::{ + ConfirmConsensusInitialization, ConsensusMessage, EcdsaInversionNonceGenerationMessage, + EcdsaInversionZeroGenerationMessage, EcdsaPartialSignature, EcdsaRequestPartialSignature, + EcdsaSignatureNonceGenerationMessage, EcdsaSigningConsensusMessage, + EcdsaSigningInversedNonceCoeffShare, EcdsaSigningMessage, EcdsaSigningSessionCompleted, + EcdsaSigningSessionDelegation, EcdsaSigningSessionDelegationCompleted, + EcdsaSigningSessionError, GenerationMessage, InitializeConsensusSession, Message, + }, + AclStorage, DocumentKeyShare, Error, NodeId, Requester, SessionId, SessionMeta, +}; +use parking_lot::{Condvar, Mutex}; +use std::{ + collections::{btree_map::Entry, BTreeMap, BTreeSet}, + sync::Arc, +}; /// Distributed ECDSA-signing session. /// Based on "A robust threshold elliptic curve digital signature providing a new verifiable secret sharing scheme" paper. /// WARNING: can only be used if 2*t < N is true for key generation scheme pub struct SessionImpl { - /// Session core. - core: SessionCore, - /// Session data. - data: Mutex, + /// Session core. + core: SessionCore, + /// Session data. + data: Mutex, } /// Immutable session data. struct SessionCore { - /// Session metadata. - pub meta: SessionMeta, - /// Signing session access key. - pub access_key: Secret, - /// Key share. - pub key_share: Option, - /// Cluster which allows this node to send messages to other nodes in the cluster. - pub cluster: Arc, - /// Session-level nonce. - pub nonce: u64, - /// SessionImpl completion condvar. - pub completed: Condvar, + /// Session metadata. + pub meta: SessionMeta, + /// Signing session access key. + pub access_key: Secret, + /// Key share. + pub key_share: Option, + /// Cluster which allows this node to send messages to other nodes in the cluster. + pub cluster: Arc, + /// Session-level nonce. + pub nonce: u64, + /// SessionImpl completion condvar. + pub completed: Condvar, } /// Signing consensus session type. -type SigningConsensusSession = ConsensusSession; +type SigningConsensusSession = + ConsensusSession; /// Mutable session data. struct SessionData { - /// Session state. - pub state: SessionState, - /// Message hash. - pub message_hash: Option, - /// Key version to use for decryption. - pub version: Option, - /// Consensus-based signing session. - pub consensus_session: SigningConsensusSession, - /// Signature nonce generation session. - pub sig_nonce_generation_session: Option, - /// Inversion nonce generation session. - pub inv_nonce_generation_session: Option, - /// Inversion zero generation session. - pub inv_zero_generation_session: Option, - /// Inversed nonce coefficient shares. - pub inversed_nonce_coeff_shares: Option>, - /// Delegation status. - pub delegation_status: Option, - /// Decryption result. - pub result: Option>, + /// Session state. + pub state: SessionState, + /// Message hash. + pub message_hash: Option, + /// Key version to use for decryption. + pub version: Option, + /// Consensus-based signing session. + pub consensus_session: SigningConsensusSession, + /// Signature nonce generation session. + pub sig_nonce_generation_session: Option, + /// Inversion nonce generation session. + pub inv_nonce_generation_session: Option, + /// Inversion zero generation session. + pub inv_zero_generation_session: Option, + /// Inversed nonce coefficient shares. + pub inversed_nonce_coeff_shares: Option>, + /// Delegation status. + pub delegation_status: Option, + /// Decryption result. + pub result: Option>, } /// Signing session state. #[derive(Debug, PartialEq)] pub enum SessionState { - /// Consensus is establishing. - ConsensusEstablishing, - /// Nonces (signature, inversion && zero) are generating. - NoncesGenerating, - /// Waiting for inversed nonce shares. - WaitingForInversedNonceShares, - /// State when signature is computing. - SignatureComputing, + /// Consensus is establishing. + ConsensusEstablishing, + /// Nonces (signature, inversion && zero) are generating. + NoncesGenerating, + /// Waiting for inversed nonce shares. + WaitingForInversedNonceShares, + /// State when signature is computing. + SignatureComputing, } /// Session creation parameters pub struct SessionParams { - /// Session metadata. - pub meta: SessionMeta, - /// Session access key. - pub access_key: Secret, - /// Key share. - pub key_share: Option, - /// ACL storage. - pub acl_storage: Arc, - /// Cluster - pub cluster: Arc, - /// Session nonce. - pub nonce: u64, + /// Session metadata. + pub meta: SessionMeta, + /// Session access key. + pub access_key: Secret, + /// Key share. + pub key_share: Option, + /// ACL storage. + pub acl_storage: Arc, + /// Cluster + pub cluster: Arc, + /// Session nonce. + pub nonce: u64, } /// Signing consensus transport. struct SigningConsensusTransport { - /// Session id. - id: SessionId, - /// Session access key. - access_key: Secret, - /// Session-level nonce. - nonce: u64, - /// Selected key version (on master node). - version: Option, - /// Cluster. - cluster: Arc, + /// Session id. + id: SessionId, + /// Session access key. + access_key: Secret, + /// Session-level nonce. + nonce: u64, + /// Selected key version (on master node). + version: Option, + /// Cluster. + cluster: Arc, } /// Signing key generation transport. -struct NonceGenerationTransport EcdsaSigningMessage + Send + Sync> { - /// Session id. - id: SessionId, - /// Session access key. - access_key: Secret, - /// Session-level nonce. - nonce: u64, - /// Cluster. - cluster: Arc, - /// Other nodes ids. - other_nodes_ids: BTreeSet, - /// Message mapping function. - map: F, +struct NonceGenerationTransport< + F: Fn(SessionId, Secret, u64, GenerationMessage) -> EcdsaSigningMessage + Send + Sync, +> { + /// Session id. + id: SessionId, + /// Session access key. + access_key: Secret, + /// Session-level nonce. + nonce: u64, + /// Cluster. + cluster: Arc, + /// Other nodes ids. + other_nodes_ids: BTreeSet, + /// Message mapping function. + map: F, } /// Signing job transport struct SigningJobTransport { - /// Session id. - id: SessionId, - /// Session access key. - access_key: Secret, - /// Session-level nonce. - nonce: u64, - /// Cluster. - cluster: Arc, + /// Session id. + id: SessionId, + /// Session access key. + access_key: Secret, + /// Session-level nonce. + nonce: u64, + /// Cluster. + cluster: Arc, } /// Session delegation status. enum DelegationStatus { - /// Delegated to other node. - DelegatedTo(NodeId), - /// Delegated from other node. - DelegatedFrom(NodeId, u64), + /// Delegated to other node. + DelegatedTo(NodeId), + /// Delegated from other node. + DelegatedFrom(NodeId, u64), } impl SessionImpl { - /// Create new signing session. - pub fn new(params: SessionParams, requester: Option) -> Result { - debug_assert_eq!(params.meta.threshold, params.key_share.as_ref().map(|ks| ks.threshold).unwrap_or_default()); + /// Create new signing session. + pub fn new(params: SessionParams, requester: Option) -> Result { + debug_assert_eq!( + params.meta.threshold, + params + .key_share + .as_ref() + .map(|ks| ks.threshold) + .unwrap_or_default() + ); - let consensus_transport = SigningConsensusTransport { - id: params.meta.id.clone(), - access_key: params.access_key.clone(), - nonce: params.nonce, - version: None, - cluster: params.cluster.clone(), - }; - let consensus_session = ConsensusSession::new(ConsensusSessionParams { - // this session requires responses from 2 * t nodes - meta: SessionMeta { - id: params.meta.id, - master_node_id: params.meta.master_node_id, - self_node_id: params.meta.self_node_id, - threshold: params.meta.threshold * 2, - configured_nodes_count: params.meta.configured_nodes_count, - connected_nodes_count: params.meta.connected_nodes_count, - }, - consensus_executor: match requester { - Some(requester) => KeyAccessJob::new_on_master(params.meta.id.clone(), params.acl_storage.clone(), requester), - None => KeyAccessJob::new_on_slave(params.meta.id.clone(), params.acl_storage.clone()), - }, - consensus_transport: consensus_transport, - })?; + let consensus_transport = SigningConsensusTransport { + id: params.meta.id.clone(), + access_key: params.access_key.clone(), + nonce: params.nonce, + version: None, + cluster: params.cluster.clone(), + }; + let consensus_session = ConsensusSession::new(ConsensusSessionParams { + // this session requires responses from 2 * t nodes + meta: SessionMeta { + id: params.meta.id, + master_node_id: params.meta.master_node_id, + self_node_id: params.meta.self_node_id, + threshold: params.meta.threshold * 2, + configured_nodes_count: params.meta.configured_nodes_count, + connected_nodes_count: params.meta.connected_nodes_count, + }, + consensus_executor: match requester { + Some(requester) => KeyAccessJob::new_on_master( + params.meta.id.clone(), + params.acl_storage.clone(), + requester, + ), + None => { + KeyAccessJob::new_on_slave(params.meta.id.clone(), params.acl_storage.clone()) + } + }, + consensus_transport: consensus_transport, + })?; - Ok(SessionImpl { - core: SessionCore { - meta: params.meta, - access_key: params.access_key, - key_share: params.key_share, - cluster: params.cluster, - nonce: params.nonce, - completed: Condvar::new(), - }, - data: Mutex::new(SessionData { - state: SessionState::ConsensusEstablishing, - message_hash: None, - version: None, - consensus_session: consensus_session, - sig_nonce_generation_session: None, - inv_nonce_generation_session: None, - inv_zero_generation_session: None, - inversed_nonce_coeff_shares: None, - delegation_status: None, - result: None, - }), - }) - } + Ok(SessionImpl { + core: SessionCore { + meta: params.meta, + access_key: params.access_key, + key_share: params.key_share, + cluster: params.cluster, + nonce: params.nonce, + completed: Condvar::new(), + }, + data: Mutex::new(SessionData { + state: SessionState::ConsensusEstablishing, + message_hash: None, + version: None, + consensus_session: consensus_session, + sig_nonce_generation_session: None, + inv_nonce_generation_session: None, + inv_zero_generation_session: None, + inversed_nonce_coeff_shares: None, + delegation_status: None, + result: None, + }), + }) + } - /// Wait for session completion. - pub fn wait(&self) -> Result { - Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone()) - .expect("wait_session returns Some if called without timeout; qed") - } + /// Wait for session completion. + pub fn wait(&self) -> Result { + Self::wait_session(&self.core.completed, &self.data, None, |data| { + data.result.clone() + }) + .expect("wait_session returns Some if called without timeout; qed") + } - /// Delegate session to other node. - pub fn delegate(&self, master: NodeId, version: H256, message_hash: H256) -> Result<(), Error> { - if self.core.meta.master_node_id != self.core.meta.self_node_id { - return Err(Error::InvalidStateForRequest); - } + /// Delegate session to other node. + pub fn delegate(&self, master: NodeId, version: H256, message_hash: H256) -> Result<(), Error> { + if self.core.meta.master_node_id != self.core.meta.self_node_id { + return Err(Error::InvalidStateForRequest); + } - let mut data = self.data.lock(); - if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization || data.delegation_status.is_some() { - return Err(Error::InvalidStateForRequest); - } + let mut data = self.data.lock(); + if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization + || data.delegation_status.is_some() + { + return Err(Error::InvalidStateForRequest); + } - data.consensus_session.consensus_job_mut().executor_mut().set_has_key_share(false); - self.core.cluster.send(&master, Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegation(EcdsaSigningSessionDelegation { + data.consensus_session + .consensus_job_mut() + .executor_mut() + .set_has_key_share(false); + self.core.cluster.send(&master, Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegation(EcdsaSigningSessionDelegation { session: self.core.meta.id.clone().into(), sub_session: self.core.access_key.clone().into(), session_nonce: self.core.nonce, @@ -249,924 +284,1343 @@ impl SessionImpl { version: version.into(), message_hash: message_hash.into(), })))?; - data.delegation_status = Some(DelegationStatus::DelegatedTo(master)); - Ok(()) - - } - - /// Initialize signing session on master node. - pub fn initialize(&self, version: H256, message_hash: H256) -> Result<(), Error> { - debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id); - - // check if version exists - let key_version = match self.core.key_share.as_ref() { - None => return Err(Error::InvalidMessage), - Some(key_share) => key_share.version(&version)?, - }; - - // select nodes to participate in consensus etablish session - let mut data = self.data.lock(); - let non_isolated_nodes = self.core.cluster.nodes(); - let mut consensus_nodes: BTreeSet<_> = key_version.id_numbers.keys() - .filter(|n| non_isolated_nodes.contains(*n)) - .cloned() - .chain(::std::iter::once(self.core.meta.self_node_id.clone())) - .collect(); - if let Some(&DelegationStatus::DelegatedFrom(delegation_master, _)) = data.delegation_status.as_ref() { - consensus_nodes.remove(&delegation_master); - } - - // start consensus establish sesssion - data.consensus_session.consensus_job_mut().transport_mut().version = Some(version.clone()); - data.version = Some(version.clone()); - data.message_hash = Some(message_hash); - data.consensus_session.initialize(consensus_nodes)?; - - // consensus established => threshold is 0 => we can generate signature on this node - if data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished { - data.result = Some(sign(&key_version.secret_share, &message_hash).map_err(Into::into)); - self.core.completed.notify_all(); - } - - Ok(()) - } - - /// Process signing message. - pub fn process_message(&self, sender: &NodeId, message: &EcdsaSigningMessage) -> Result<(), Error> { - if self.core.nonce != message.session_nonce() { - return Err(Error::ReplayProtection); - } - - match message { - &EcdsaSigningMessage::EcdsaSigningConsensusMessage(ref message) => - self.on_consensus_message(sender, message), - &EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage(ref message) => - self.on_signature_nonce_generation_message(sender, message), - &EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage(ref message) => - self.on_inversion_nonce_generation_message(sender, message), - &EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage(ref message) => - self.on_inversion_zero_generation_message(sender, message), - &EcdsaSigningMessage::EcdsaSigningInversedNonceCoeffShare(ref message) => - self.on_inversed_nonce_coeff_share(sender, message), - &EcdsaSigningMessage::EcdsaRequestPartialSignature(ref message) => - self.on_partial_signature_requested(sender, message), - &EcdsaSigningMessage::EcdsaPartialSignature(ref message) => - self.on_partial_signature(sender, message), - &EcdsaSigningMessage::EcdsaSigningSessionError(ref message) => - self.process_node_error(Some(&sender), message.error.clone()), - &EcdsaSigningMessage::EcdsaSigningSessionCompleted(ref message) => - self.on_session_completed(sender, message), - &EcdsaSigningMessage::EcdsaSigningSessionDelegation(ref message) => - self.on_session_delegated(sender, message), - &EcdsaSigningMessage::EcdsaSigningSessionDelegationCompleted(ref message) => - self.on_session_delegation_completed(sender, message), - } - } - - /// When session is delegated to this node. - pub fn on_session_delegated(&self, sender: &NodeId, message: &EcdsaSigningSessionDelegation) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - - { - let mut data = self.data.lock(); - if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization || data.delegation_status.is_some() { - return Err(Error::InvalidStateForRequest); - } - - data.consensus_session.consensus_job_mut().executor_mut().set_requester(message.requester.clone().into()); - data.delegation_status = Some(DelegationStatus::DelegatedFrom(sender.clone(), message.session_nonce)); - } - - self.initialize(message.version.clone().into(), message.message_hash.clone().into()) - } - - /// When delegated session is completed on other node. - pub fn on_session_delegation_completed(&self, sender: &NodeId, message: &EcdsaSigningSessionDelegationCompleted) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - - if self.core.meta.master_node_id != self.core.meta.self_node_id { - return Err(Error::InvalidStateForRequest); - } - - let mut data = self.data.lock(); - match data.delegation_status.as_ref() { - Some(&DelegationStatus::DelegatedTo(ref node)) if node == sender => (), - _ => return Err(Error::InvalidMessage), - } - - Self::set_signing_result(&self.core, &mut *data, Ok(message.signature.clone().into())); - - Ok(()) - } - - /// When consensus-related message is received. - pub fn on_consensus_message(&self, sender: &NodeId, message: &EcdsaSigningConsensusMessage) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let mut data = self.data.lock(); - let is_establishing_consensus = data.consensus_session.state() == ConsensusSessionState::EstablishingConsensus; - - if let &ConsensusMessage::InitializeConsensusSession(ref msg) = &message.message { - let version = msg.version.clone().into(); - let has_key_share = self.core.key_share.as_ref() - .map(|ks| ks.version(&version).is_ok()) - .unwrap_or(false); - data.consensus_session.consensus_job_mut().executor_mut().set_has_key_share(has_key_share); - data.version = Some(version); - } - data.consensus_session.on_consensus_message(&sender, &message.message)?; - - let is_consensus_established = data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished; - if self.core.meta.self_node_id != self.core.meta.master_node_id || !is_establishing_consensus || !is_consensus_established { - return Ok(()); - } - - let key_share = self.core.key_share.as_ref() - .expect("this is master node; master node is selected so that it has key version; qed"); - let key_version = key_share.version(data.version.as_ref() - .expect("this is master node; master node is selected so that it has key version; qed"))?; - - let consensus_group = data.consensus_session.select_consensus_group()?.clone(); - let mut other_consensus_group_nodes = consensus_group.clone(); - other_consensus_group_nodes.remove(&self.core.meta.self_node_id); - let consensus_group_map: BTreeMap<_, _> = consensus_group.iter().map(|n| (n.clone(), key_version.id_numbers[n].clone())).collect(); - - // start generation of signature nonce - let sig_nonce_generation_session = Self::start_generation_session(&self.core, &other_consensus_group_nodes, - |s, k, n, m| EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage( - EcdsaSignatureNonceGenerationMessage { - session: s.into(), - sub_session: k.into(), - session_nonce: n, - message: m, - })); - sig_nonce_generation_session.initialize(Default::default(), Default::default(), false, key_share.threshold, consensus_group_map.clone().into())?; - data.sig_nonce_generation_session = Some(sig_nonce_generation_session); - - // start generation of inversed nonce computation session - let inv_nonce_generation_session = Self::start_generation_session(&self.core, &other_consensus_group_nodes, - move |s, k, n, m| EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage( - EcdsaInversionNonceGenerationMessage { - session: s.into(), - sub_session: k.into(), - session_nonce: n, - message: m, - })); - inv_nonce_generation_session.initialize(Default::default(), Default::default(), false, key_share.threshold, consensus_group_map.clone().into())?; - data.inv_nonce_generation_session = Some(inv_nonce_generation_session); - - // start generation of zero-secret shares for inversed nonce computation session - let inv_zero_generation_session = Self::start_generation_session(&self.core, &other_consensus_group_nodes, - move |s, k, n, m| EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage( - EcdsaInversionZeroGenerationMessage { - session: s.into(), - sub_session: k.into(), - session_nonce: n, - message: m, - })); - inv_zero_generation_session.initialize(Default::default(), Default::default(), true, key_share.threshold * 2, consensus_group_map.clone().into())?; - data.inv_zero_generation_session = Some(inv_zero_generation_session); - - data.state = SessionState::NoncesGenerating; - - Ok(()) - } - - /// When signature nonce generation message is received. - pub fn on_signature_nonce_generation_message(&self, sender: &NodeId, message: &EcdsaSignatureNonceGenerationMessage) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let mut data = self.data.lock(); - - if let &GenerationMessage::InitializeSession(ref message) = &message.message { - if &self.core.meta.master_node_id != sender { - match data.delegation_status.as_ref() { - Some(&DelegationStatus::DelegatedTo(s)) if s == *sender => (), - _ => return Err(Error::InvalidMessage), - } - } - - let consensus_group: BTreeSet = message.nodes.keys().cloned().map(Into::into).collect(); - let mut other_consensus_group_nodes = consensus_group.clone(); - other_consensus_group_nodes.remove(&self.core.meta.self_node_id); - - data.sig_nonce_generation_session = Some(Self::start_generation_session(&self.core, &other_consensus_group_nodes, - |s, k, n, m| EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage( - EcdsaSignatureNonceGenerationMessage { - session: s.into(), - sub_session: k.into(), - session_nonce: n, - message: m, - }))); - - data.state = SessionState::NoncesGenerating; - } - - { - let generation_session = data.sig_nonce_generation_session.as_ref().ok_or(Error::InvalidStateForRequest)?; - let is_key_generating = generation_session.state() != GenerationSessionState::Finished; - generation_session.process_message(sender, &message.message)?; - - let is_key_generated = generation_session.state() == GenerationSessionState::Finished; - if !is_key_generating || !is_key_generated { - return Ok(()); - } - } - - if !Self::check_nonces_generated(&*data) { - return Ok(()); - } - - Self::send_inversed_nonce_coeff_share(&self.core, &mut*data)?; - data.state = if self.core.meta.master_node_id != self.core.meta.self_node_id { - SessionState::SignatureComputing - } else { - SessionState::WaitingForInversedNonceShares - }; - - Ok(()) - } - - /// When inversion nonce generation message is received. - pub fn on_inversion_nonce_generation_message(&self, sender: &NodeId, message: &EcdsaInversionNonceGenerationMessage) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let mut data = self.data.lock(); - - if let &GenerationMessage::InitializeSession(ref message) = &message.message { - if &self.core.meta.master_node_id != sender { - match data.delegation_status.as_ref() { - Some(&DelegationStatus::DelegatedTo(s)) if s == *sender => (), - _ => return Err(Error::InvalidMessage), - } - } - - let consensus_group: BTreeSet = message.nodes.keys().cloned().map(Into::into).collect(); - let mut other_consensus_group_nodes = consensus_group.clone(); - other_consensus_group_nodes.remove(&self.core.meta.self_node_id); - - data.inv_nonce_generation_session = Some(Self::start_generation_session(&self.core, &other_consensus_group_nodes, - |s, k, n, m| EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage( - EcdsaInversionNonceGenerationMessage { - session: s.into(), - sub_session: k.into(), - session_nonce: n, - message: m, - }))); - - data.state = SessionState::NoncesGenerating; - } - - { - let generation_session = data.inv_nonce_generation_session.as_ref().ok_or(Error::InvalidStateForRequest)?; - let is_key_generating = generation_session.state() != GenerationSessionState::Finished; - generation_session.process_message(sender, &message.message)?; - - let is_key_generated = generation_session.state() == GenerationSessionState::Finished; - if !is_key_generating || !is_key_generated { - return Ok(()); - } - } - - if !Self::check_nonces_generated(&*data) { - return Ok(()); - } - - Self::send_inversed_nonce_coeff_share(&self.core, &mut*data)?; - data.state = if self.core.meta.master_node_id != self.core.meta.self_node_id { - SessionState::SignatureComputing - } else { - SessionState::WaitingForInversedNonceShares - }; - - Ok(()) - } - - /// When inversion zero generation message is received. - pub fn on_inversion_zero_generation_message(&self, sender: &NodeId, message: &EcdsaInversionZeroGenerationMessage) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let mut data = self.data.lock(); - - if let &GenerationMessage::InitializeSession(ref message) = &message.message { - if &self.core.meta.master_node_id != sender { - match data.delegation_status.as_ref() { - Some(&DelegationStatus::DelegatedTo(s)) if s == *sender => (), - _ => return Err(Error::InvalidMessage), - } - } - - let consensus_group: BTreeSet = message.nodes.keys().cloned().map(Into::into).collect(); - let mut other_consensus_group_nodes = consensus_group.clone(); - other_consensus_group_nodes.remove(&self.core.meta.self_node_id); - - data.inv_zero_generation_session = Some(Self::start_generation_session(&self.core, &other_consensus_group_nodes, - |s, k, n, m| EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage( - EcdsaInversionZeroGenerationMessage { - session: s.into(), - sub_session: k.into(), - session_nonce: n, - message: m, - }))); - - data.state = SessionState::NoncesGenerating; - } - - { - let generation_session = data.inv_zero_generation_session.as_ref().ok_or(Error::InvalidStateForRequest)?; - let is_key_generating = generation_session.state() != GenerationSessionState::Finished; - generation_session.process_message(sender, &message.message)?; - - let is_key_generated = generation_session.state() == GenerationSessionState::Finished; - if !is_key_generating || !is_key_generated { - return Ok(()); - } - } - - if !Self::check_nonces_generated(&*data) { - return Ok(()); - } - - Self::send_inversed_nonce_coeff_share(&self.core, &mut*data)?; - data.state = if self.core.meta.master_node_id != self.core.meta.self_node_id { - SessionState::SignatureComputing - } else { - SessionState::WaitingForInversedNonceShares - }; - - Ok(()) - } - - /// When inversed nonce share is received. - pub fn on_inversed_nonce_coeff_share(&self, sender: &NodeId, message: &EcdsaSigningInversedNonceCoeffShare) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let mut data = self.data.lock(); - - if self.core.meta.self_node_id != self.core.meta.master_node_id { - return Err(Error::InvalidMessage); - } - match data.state { - SessionState::WaitingForInversedNonceShares => (), - SessionState::NoncesGenerating => return Err(Error::TooEarlyForRequest), - _ => return Err(Error::InvalidStateForRequest), - } - - let inversed_nonce_coeff = { - let consensus_group = data.consensus_session.select_consensus_group()?.clone(); - { - let inversed_nonce_coeff_shares = data.inversed_nonce_coeff_shares.as_mut() + data.delegation_status = Some(DelegationStatus::DelegatedTo(master)); + Ok(()) + } + + /// Initialize signing session on master node. + pub fn initialize(&self, version: H256, message_hash: H256) -> Result<(), Error> { + debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id); + + // check if version exists + let key_version = match self.core.key_share.as_ref() { + None => return Err(Error::InvalidMessage), + Some(key_share) => key_share.version(&version)?, + }; + + // select nodes to participate in consensus etablish session + let mut data = self.data.lock(); + let non_isolated_nodes = self.core.cluster.nodes(); + let mut consensus_nodes: BTreeSet<_> = key_version + .id_numbers + .keys() + .filter(|n| non_isolated_nodes.contains(*n)) + .cloned() + .chain(::std::iter::once(self.core.meta.self_node_id.clone())) + .collect(); + if let Some(&DelegationStatus::DelegatedFrom(delegation_master, _)) = + data.delegation_status.as_ref() + { + consensus_nodes.remove(&delegation_master); + } + + // start consensus establish sesssion + data.consensus_session + .consensus_job_mut() + .transport_mut() + .version = Some(version.clone()); + data.version = Some(version.clone()); + data.message_hash = Some(message_hash); + data.consensus_session.initialize(consensus_nodes)?; + + // consensus established => threshold is 0 => we can generate signature on this node + if data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished { + data.result = Some(sign(&key_version.secret_share, &message_hash).map_err(Into::into)); + self.core.completed.notify_all(); + } + + Ok(()) + } + + /// Process signing message. + pub fn process_message( + &self, + sender: &NodeId, + message: &EcdsaSigningMessage, + ) -> Result<(), Error> { + if self.core.nonce != message.session_nonce() { + return Err(Error::ReplayProtection); + } + + match message { + &EcdsaSigningMessage::EcdsaSigningConsensusMessage(ref message) => { + self.on_consensus_message(sender, message) + } + &EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage(ref message) => { + self.on_signature_nonce_generation_message(sender, message) + } + &EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage(ref message) => { + self.on_inversion_nonce_generation_message(sender, message) + } + &EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage(ref message) => { + self.on_inversion_zero_generation_message(sender, message) + } + &EcdsaSigningMessage::EcdsaSigningInversedNonceCoeffShare(ref message) => { + self.on_inversed_nonce_coeff_share(sender, message) + } + &EcdsaSigningMessage::EcdsaRequestPartialSignature(ref message) => { + self.on_partial_signature_requested(sender, message) + } + &EcdsaSigningMessage::EcdsaPartialSignature(ref message) => { + self.on_partial_signature(sender, message) + } + &EcdsaSigningMessage::EcdsaSigningSessionError(ref message) => { + self.process_node_error(Some(&sender), message.error.clone()) + } + &EcdsaSigningMessage::EcdsaSigningSessionCompleted(ref message) => { + self.on_session_completed(sender, message) + } + &EcdsaSigningMessage::EcdsaSigningSessionDelegation(ref message) => { + self.on_session_delegated(sender, message) + } + &EcdsaSigningMessage::EcdsaSigningSessionDelegationCompleted(ref message) => { + self.on_session_delegation_completed(sender, message) + } + } + } + + /// When session is delegated to this node. + pub fn on_session_delegated( + &self, + sender: &NodeId, + message: &EcdsaSigningSessionDelegation, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + + { + let mut data = self.data.lock(); + if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization + || data.delegation_status.is_some() + { + return Err(Error::InvalidStateForRequest); + } + + data.consensus_session + .consensus_job_mut() + .executor_mut() + .set_requester(message.requester.clone().into()); + data.delegation_status = Some(DelegationStatus::DelegatedFrom( + sender.clone(), + message.session_nonce, + )); + } + + self.initialize( + message.version.clone().into(), + message.message_hash.clone().into(), + ) + } + + /// When delegated session is completed on other node. + pub fn on_session_delegation_completed( + &self, + sender: &NodeId, + message: &EcdsaSigningSessionDelegationCompleted, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + + if self.core.meta.master_node_id != self.core.meta.self_node_id { + return Err(Error::InvalidStateForRequest); + } + + let mut data = self.data.lock(); + match data.delegation_status.as_ref() { + Some(&DelegationStatus::DelegatedTo(ref node)) if node == sender => (), + _ => return Err(Error::InvalidMessage), + } + + Self::set_signing_result(&self.core, &mut *data, Ok(message.signature.clone().into())); + + Ok(()) + } + + /// When consensus-related message is received. + pub fn on_consensus_message( + &self, + sender: &NodeId, + message: &EcdsaSigningConsensusMessage, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); + + let mut data = self.data.lock(); + let is_establishing_consensus = + data.consensus_session.state() == ConsensusSessionState::EstablishingConsensus; + + if let &ConsensusMessage::InitializeConsensusSession(ref msg) = &message.message { + let version = msg.version.clone().into(); + let has_key_share = self + .core + .key_share + .as_ref() + .map(|ks| ks.version(&version).is_ok()) + .unwrap_or(false); + data.consensus_session + .consensus_job_mut() + .executor_mut() + .set_has_key_share(has_key_share); + data.version = Some(version); + } + data.consensus_session + .on_consensus_message(&sender, &message.message)?; + + let is_consensus_established = + data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished; + if self.core.meta.self_node_id != self.core.meta.master_node_id + || !is_establishing_consensus + || !is_consensus_established + { + return Ok(()); + } + + let key_share = + self.core.key_share.as_ref().expect( + "this is master node; master node is selected so that it has key version; qed", + ); + let key_version = key_share.version(data.version.as_ref().expect( + "this is master node; master node is selected so that it has key version; qed", + ))?; + + let consensus_group = data.consensus_session.select_consensus_group()?.clone(); + let mut other_consensus_group_nodes = consensus_group.clone(); + other_consensus_group_nodes.remove(&self.core.meta.self_node_id); + let consensus_group_map: BTreeMap<_, _> = consensus_group + .iter() + .map(|n| (n.clone(), key_version.id_numbers[n].clone())) + .collect(); + + // start generation of signature nonce + let sig_nonce_generation_session = Self::start_generation_session( + &self.core, + &other_consensus_group_nodes, + |s, k, n, m| { + EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage( + EcdsaSignatureNonceGenerationMessage { + session: s.into(), + sub_session: k.into(), + session_nonce: n, + message: m, + }, + ) + }, + ); + sig_nonce_generation_session.initialize( + Default::default(), + Default::default(), + false, + key_share.threshold, + consensus_group_map.clone().into(), + )?; + data.sig_nonce_generation_session = Some(sig_nonce_generation_session); + + // start generation of inversed nonce computation session + let inv_nonce_generation_session = Self::start_generation_session( + &self.core, + &other_consensus_group_nodes, + move |s, k, n, m| { + EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage( + EcdsaInversionNonceGenerationMessage { + session: s.into(), + sub_session: k.into(), + session_nonce: n, + message: m, + }, + ) + }, + ); + inv_nonce_generation_session.initialize( + Default::default(), + Default::default(), + false, + key_share.threshold, + consensus_group_map.clone().into(), + )?; + data.inv_nonce_generation_session = Some(inv_nonce_generation_session); + + // start generation of zero-secret shares for inversed nonce computation session + let inv_zero_generation_session = Self::start_generation_session( + &self.core, + &other_consensus_group_nodes, + move |s, k, n, m| { + EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage( + EcdsaInversionZeroGenerationMessage { + session: s.into(), + sub_session: k.into(), + session_nonce: n, + message: m, + }, + ) + }, + ); + inv_zero_generation_session.initialize( + Default::default(), + Default::default(), + true, + key_share.threshold * 2, + consensus_group_map.clone().into(), + )?; + data.inv_zero_generation_session = Some(inv_zero_generation_session); + + data.state = SessionState::NoncesGenerating; + + Ok(()) + } + + /// When signature nonce generation message is received. + pub fn on_signature_nonce_generation_message( + &self, + sender: &NodeId, + message: &EcdsaSignatureNonceGenerationMessage, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); + + let mut data = self.data.lock(); + + if let &GenerationMessage::InitializeSession(ref message) = &message.message { + if &self.core.meta.master_node_id != sender { + match data.delegation_status.as_ref() { + Some(&DelegationStatus::DelegatedTo(s)) if s == *sender => (), + _ => return Err(Error::InvalidMessage), + } + } + + let consensus_group: BTreeSet = + message.nodes.keys().cloned().map(Into::into).collect(); + let mut other_consensus_group_nodes = consensus_group.clone(); + other_consensus_group_nodes.remove(&self.core.meta.self_node_id); + + data.sig_nonce_generation_session = Some(Self::start_generation_session( + &self.core, + &other_consensus_group_nodes, + |s, k, n, m| { + EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage( + EcdsaSignatureNonceGenerationMessage { + session: s.into(), + sub_session: k.into(), + session_nonce: n, + message: m, + }, + ) + }, + )); + + data.state = SessionState::NoncesGenerating; + } + + { + let generation_session = data + .sig_nonce_generation_session + .as_ref() + .ok_or(Error::InvalidStateForRequest)?; + let is_key_generating = generation_session.state() != GenerationSessionState::Finished; + generation_session.process_message(sender, &message.message)?; + + let is_key_generated = generation_session.state() == GenerationSessionState::Finished; + if !is_key_generating || !is_key_generated { + return Ok(()); + } + } + + if !Self::check_nonces_generated(&*data) { + return Ok(()); + } + + Self::send_inversed_nonce_coeff_share(&self.core, &mut *data)?; + data.state = if self.core.meta.master_node_id != self.core.meta.self_node_id { + SessionState::SignatureComputing + } else { + SessionState::WaitingForInversedNonceShares + }; + + Ok(()) + } + + /// When inversion nonce generation message is received. + pub fn on_inversion_nonce_generation_message( + &self, + sender: &NodeId, + message: &EcdsaInversionNonceGenerationMessage, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); + + let mut data = self.data.lock(); + + if let &GenerationMessage::InitializeSession(ref message) = &message.message { + if &self.core.meta.master_node_id != sender { + match data.delegation_status.as_ref() { + Some(&DelegationStatus::DelegatedTo(s)) if s == *sender => (), + _ => return Err(Error::InvalidMessage), + } + } + + let consensus_group: BTreeSet = + message.nodes.keys().cloned().map(Into::into).collect(); + let mut other_consensus_group_nodes = consensus_group.clone(); + other_consensus_group_nodes.remove(&self.core.meta.self_node_id); + + data.inv_nonce_generation_session = Some(Self::start_generation_session( + &self.core, + &other_consensus_group_nodes, + |s, k, n, m| { + EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage( + EcdsaInversionNonceGenerationMessage { + session: s.into(), + sub_session: k.into(), + session_nonce: n, + message: m, + }, + ) + }, + )); + + data.state = SessionState::NoncesGenerating; + } + + { + let generation_session = data + .inv_nonce_generation_session + .as_ref() + .ok_or(Error::InvalidStateForRequest)?; + let is_key_generating = generation_session.state() != GenerationSessionState::Finished; + generation_session.process_message(sender, &message.message)?; + + let is_key_generated = generation_session.state() == GenerationSessionState::Finished; + if !is_key_generating || !is_key_generated { + return Ok(()); + } + } + + if !Self::check_nonces_generated(&*data) { + return Ok(()); + } + + Self::send_inversed_nonce_coeff_share(&self.core, &mut *data)?; + data.state = if self.core.meta.master_node_id != self.core.meta.self_node_id { + SessionState::SignatureComputing + } else { + SessionState::WaitingForInversedNonceShares + }; + + Ok(()) + } + + /// When inversion zero generation message is received. + pub fn on_inversion_zero_generation_message( + &self, + sender: &NodeId, + message: &EcdsaInversionZeroGenerationMessage, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); + + let mut data = self.data.lock(); + + if let &GenerationMessage::InitializeSession(ref message) = &message.message { + if &self.core.meta.master_node_id != sender { + match data.delegation_status.as_ref() { + Some(&DelegationStatus::DelegatedTo(s)) if s == *sender => (), + _ => return Err(Error::InvalidMessage), + } + } + + let consensus_group: BTreeSet = + message.nodes.keys().cloned().map(Into::into).collect(); + let mut other_consensus_group_nodes = consensus_group.clone(); + other_consensus_group_nodes.remove(&self.core.meta.self_node_id); + + data.inv_zero_generation_session = Some(Self::start_generation_session( + &self.core, + &other_consensus_group_nodes, + |s, k, n, m| { + EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage( + EcdsaInversionZeroGenerationMessage { + session: s.into(), + sub_session: k.into(), + session_nonce: n, + message: m, + }, + ) + }, + )); + + data.state = SessionState::NoncesGenerating; + } + + { + let generation_session = data + .inv_zero_generation_session + .as_ref() + .ok_or(Error::InvalidStateForRequest)?; + let is_key_generating = generation_session.state() != GenerationSessionState::Finished; + generation_session.process_message(sender, &message.message)?; + + let is_key_generated = generation_session.state() == GenerationSessionState::Finished; + if !is_key_generating || !is_key_generated { + return Ok(()); + } + } + + if !Self::check_nonces_generated(&*data) { + return Ok(()); + } + + Self::send_inversed_nonce_coeff_share(&self.core, &mut *data)?; + data.state = if self.core.meta.master_node_id != self.core.meta.self_node_id { + SessionState::SignatureComputing + } else { + SessionState::WaitingForInversedNonceShares + }; + + Ok(()) + } + + /// When inversed nonce share is received. + pub fn on_inversed_nonce_coeff_share( + &self, + sender: &NodeId, + message: &EcdsaSigningInversedNonceCoeffShare, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); + + let mut data = self.data.lock(); + + if self.core.meta.self_node_id != self.core.meta.master_node_id { + return Err(Error::InvalidMessage); + } + match data.state { + SessionState::WaitingForInversedNonceShares => (), + SessionState::NoncesGenerating => return Err(Error::TooEarlyForRequest), + _ => return Err(Error::InvalidStateForRequest), + } + + let inversed_nonce_coeff = { + let consensus_group = data.consensus_session.select_consensus_group()?.clone(); + { + let inversed_nonce_coeff_shares = data.inversed_nonce_coeff_shares.as_mut() .expect("we are in WaitingForInversedNonceShares state; inversed_nonce_coeff_shares are filled before this state; qed"); - match inversed_nonce_coeff_shares.entry(sender.clone()) { - Entry::Occupied(_) => return Err(Error::InvalidStateForRequest), - Entry::Vacant(entry) => { - entry.insert(message.inversed_nonce_coeff_share.clone().into()); - }, - } + match inversed_nonce_coeff_shares.entry(sender.clone()) { + Entry::Occupied(_) => return Err(Error::InvalidStateForRequest), + Entry::Vacant(entry) => { + entry.insert(message.inversed_nonce_coeff_share.clone().into()); + } + } - if consensus_group.iter().any(|n| !inversed_nonce_coeff_shares.contains_key(n)) { - return Ok(()); - } - } + if consensus_group + .iter() + .any(|n| !inversed_nonce_coeff_shares.contains_key(n)) + { + return Ok(()); + } + } - Self::compute_inversed_nonce_coeff(&self.core, &*data)? - }; + Self::compute_inversed_nonce_coeff(&self.core, &*data)? + }; - let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); - let message_hash = data.message_hash + let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); + let message_hash = data.message_hash .expect("we are on master node; on master node message_hash is filled in initialize(); on_generation_message follows initialize; qed"); - let nonce_exists_proof = "nonce is generated before signature is computed; we are in SignatureComputing state; qed"; - let sig_nonce_public = data.sig_nonce_generation_session.as_ref().expect(nonce_exists_proof).joint_public_and_secret().expect(nonce_exists_proof)?.0; - let inv_nonce_share = data.inv_nonce_generation_session.as_ref().expect(nonce_exists_proof).joint_public_and_secret().expect(nonce_exists_proof)?.2; + let nonce_exists_proof = "nonce is generated before signature is computed; we are in SignatureComputing state; qed"; + let sig_nonce_public = data + .sig_nonce_generation_session + .as_ref() + .expect(nonce_exists_proof) + .joint_public_and_secret() + .expect(nonce_exists_proof)? + .0; + let inv_nonce_share = data + .inv_nonce_generation_session + .as_ref() + .expect(nonce_exists_proof) + .joint_public_and_secret() + .expect(nonce_exists_proof)? + .2; - self.core.disseminate_jobs(&mut data.consensus_session, &version, sig_nonce_public, inv_nonce_share, inversed_nonce_coeff, message_hash) - } + self.core.disseminate_jobs( + &mut data.consensus_session, + &version, + sig_nonce_public, + inv_nonce_share, + inversed_nonce_coeff, + message_hash, + ) + } - /// When partial signature is requested. - pub fn on_partial_signature_requested(&self, sender: &NodeId, message: &EcdsaRequestPartialSignature) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When partial signature is requested. + pub fn on_partial_signature_requested( + &self, + sender: &NodeId, + message: &EcdsaRequestPartialSignature, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); - let key_share = match self.core.key_share.as_ref() { - None => return Err(Error::InvalidMessage), - Some(key_share) => key_share, - }; + let key_share = match self.core.key_share.as_ref() { + None => return Err(Error::InvalidMessage), + Some(key_share) => key_share, + }; - let mut data = self.data.lock(); + let mut data = self.data.lock(); - if sender != &self.core.meta.master_node_id { - return Err(Error::InvalidMessage); - } - if data.state != SessionState::SignatureComputing { - return Err(Error::InvalidStateForRequest); - } + if sender != &self.core.meta.master_node_id { + return Err(Error::InvalidMessage); + } + if data.state != SessionState::SignatureComputing { + return Err(Error::InvalidStateForRequest); + } - let nonce_exists_proof = "nonce is generated before signature is computed; we are in SignatureComputing state; qed"; - let sig_nonce_public = data.sig_nonce_generation_session.as_ref().expect(nonce_exists_proof).joint_public_and_secret().expect(nonce_exists_proof)?.0; - let inv_nonce_share = data.inv_nonce_generation_session.as_ref().expect(nonce_exists_proof).joint_public_and_secret().expect(nonce_exists_proof)?.2; + let nonce_exists_proof = "nonce is generated before signature is computed; we are in SignatureComputing state; qed"; + let sig_nonce_public = data + .sig_nonce_generation_session + .as_ref() + .expect(nonce_exists_proof) + .joint_public_and_secret() + .expect(nonce_exists_proof)? + .0; + let inv_nonce_share = data + .inv_nonce_generation_session + .as_ref() + .expect(nonce_exists_proof) + .joint_public_and_secret() + .expect(nonce_exists_proof)? + .2; - let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); - let key_version = key_share.version(&version)?.hash.clone(); + let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); + let key_version = key_share.version(&version)?.hash.clone(); - let signing_job = EcdsaSigningJob::new_on_slave(key_share.clone(), key_version, sig_nonce_public, inv_nonce_share)?; - let signing_transport = self.core.signing_transport(); + let signing_job = EcdsaSigningJob::new_on_slave( + key_share.clone(), + key_version, + sig_nonce_public, + inv_nonce_share, + )?; + let signing_transport = self.core.signing_transport(); - data.consensus_session.on_job_request(sender, EcdsaPartialSigningRequest { - id: message.request_id.clone().into(), - inversed_nonce_coeff: message.inversed_nonce_coeff.clone().into(), - message_hash: message.message_hash.clone().into(), - }, signing_job, signing_transport).map(|_| ()) - } + data.consensus_session + .on_job_request( + sender, + EcdsaPartialSigningRequest { + id: message.request_id.clone().into(), + inversed_nonce_coeff: message.inversed_nonce_coeff.clone().into(), + message_hash: message.message_hash.clone().into(), + }, + signing_job, + signing_transport, + ) + .map(|_| ()) + } - /// When partial signature is received. - pub fn on_partial_signature(&self, sender: &NodeId, message: &EcdsaPartialSignature) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When partial signature is received. + pub fn on_partial_signature( + &self, + sender: &NodeId, + message: &EcdsaPartialSignature, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); - let mut data = self.data.lock(); - data.consensus_session.on_job_response(sender, EcdsaPartialSigningResponse { - request_id: message.request_id.clone().into(), - partial_signature_s: message.partial_signature_s.clone().into(), - })?; + let mut data = self.data.lock(); + data.consensus_session.on_job_response( + sender, + EcdsaPartialSigningResponse { + request_id: message.request_id.clone().into(), + partial_signature_s: message.partial_signature_s.clone().into(), + }, + )?; - if data.consensus_session.state() != ConsensusSessionState::Finished { - return Ok(()); - } + if data.consensus_session.state() != ConsensusSessionState::Finished { + return Ok(()); + } - // send compeltion signal to all nodes, except for rejected nodes - for node in data.consensus_session.consensus_non_rejected_nodes() { - self.core.cluster.send(&node, Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionCompleted(EcdsaSigningSessionCompleted { - session: self.core.meta.id.clone().into(), - sub_session: self.core.access_key.clone().into(), - session_nonce: self.core.nonce, - })))?; - } + // send compeltion signal to all nodes, except for rejected nodes + for node in data.consensus_session.consensus_non_rejected_nodes() { + self.core.cluster.send( + &node, + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionCompleted( + EcdsaSigningSessionCompleted { + session: self.core.meta.id.clone().into(), + sub_session: self.core.access_key.clone().into(), + session_nonce: self.core.nonce, + }, + )), + )?; + } - let result = data.consensus_session.result()?; - Self::set_signing_result(&self.core, &mut *data, Ok(result)); + let result = data.consensus_session.result()?; + Self::set_signing_result(&self.core, &mut *data, Ok(result)); - Ok(()) - } + Ok(()) + } - /// When session is completed. - pub fn on_session_completed(&self, sender: &NodeId, message: &EcdsaSigningSessionCompleted) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When session is completed. + pub fn on_session_completed( + &self, + sender: &NodeId, + message: &EcdsaSigningSessionCompleted, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); - self.data.lock().consensus_session.on_session_completed(sender) - } + self.data + .lock() + .consensus_session + .on_session_completed(sender) + } - /// Process error from the other node. - fn process_node_error(&self, node: Option<&NodeId>, error: Error) -> Result<(), Error> { - let mut data = self.data.lock(); - let is_self_node_error = node.map(|n| n == &self.core.meta.self_node_id).unwrap_or(false); - // error is always fatal if coming from this node - if is_self_node_error { - Self::set_signing_result(&self.core, &mut *data, Err(error.clone())); - return Err(error); - } + /// Process error from the other node. + fn process_node_error(&self, node: Option<&NodeId>, error: Error) -> Result<(), Error> { + let mut data = self.data.lock(); + let is_self_node_error = node + .map(|n| n == &self.core.meta.self_node_id) + .unwrap_or(false); + // error is always fatal if coming from this node + if is_self_node_error { + Self::set_signing_result(&self.core, &mut *data, Err(error.clone())); + return Err(error); + } - match { - match node { - Some(node) => data.consensus_session.on_node_error(node, error.clone()), - None => data.consensus_session.on_session_timeout(), - } - } { - Ok(false) => { - Ok(()) - }, - Ok(true) => { - let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); + match { + match node { + Some(node) => data.consensus_session.on_node_error(node, error.clone()), + None => data.consensus_session.on_session_timeout(), + } + } { + Ok(false) => Ok(()), + Ok(true) => { + let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); - let message_hash = data.message_hash.as_ref().cloned() + let message_hash = data.message_hash.as_ref().cloned() .expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed"); - let nonce_exists_proof = "on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when nonces generation has completed; qed"; - let sig_nonce_public = data.sig_nonce_generation_session.as_ref().expect(nonce_exists_proof).joint_public_and_secret().expect(nonce_exists_proof)?.0; - let inv_nonce_share = data.inv_nonce_generation_session.as_ref().expect(nonce_exists_proof).joint_public_and_secret().expect(nonce_exists_proof)?.2; + let nonce_exists_proof = "on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when nonces generation has completed; qed"; + let sig_nonce_public = data + .sig_nonce_generation_session + .as_ref() + .expect(nonce_exists_proof) + .joint_public_and_secret() + .expect(nonce_exists_proof)? + .0; + let inv_nonce_share = data + .inv_nonce_generation_session + .as_ref() + .expect(nonce_exists_proof) + .joint_public_and_secret() + .expect(nonce_exists_proof)? + .2; - let inversed_nonce_coeff = Self::compute_inversed_nonce_coeff(&self.core, &*data)?; + let inversed_nonce_coeff = Self::compute_inversed_nonce_coeff(&self.core, &*data)?; - let disseminate_result = self.core.disseminate_jobs(&mut data.consensus_session, &version, sig_nonce_public, inv_nonce_share, inversed_nonce_coeff, message_hash); - match disseminate_result { - Ok(()) => Ok(()), - Err(err) => { - warn!("{}: ECDSA signing session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node); - Self::set_signing_result(&self.core, &mut *data, Err(err.clone())); - Err(err) - } - } - }, - Err(err) => { - warn!("{}: ECDSA signing session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node); - Self::set_signing_result(&self.core, &mut *data, Err(err.clone())); - Err(err) - }, - } - } + let disseminate_result = self.core.disseminate_jobs( + &mut data.consensus_session, + &version, + sig_nonce_public, + inv_nonce_share, + inversed_nonce_coeff, + message_hash, + ); + match disseminate_result { + Ok(()) => Ok(()), + Err(err) => { + warn!( + "{}: ECDSA signing session failed with error: {:?} from {:?}", + &self.core.meta.self_node_id, error, node + ); + Self::set_signing_result(&self.core, &mut *data, Err(err.clone())); + Err(err) + } + } + } + Err(err) => { + warn!( + "{}: ECDSA signing session failed with error: {:?} from {:?}", + &self.core.meta.self_node_id, error, node + ); + Self::set_signing_result(&self.core, &mut *data, Err(err.clone())); + Err(err) + } + } + } - /// Start generation session. - fn start_generation_session(core: &SessionCore, other_consensus_group_nodes: &BTreeSet, map_message: F) -> GenerationSession - where F: Fn(SessionId, Secret, u64, GenerationMessage) -> EcdsaSigningMessage + Send + Sync + 'static { - GenerationSession::new(GenerationSessionParams { - id: core.meta.id.clone(), - self_node_id: core.meta.self_node_id.clone(), - key_storage: None, - cluster: Arc::new(NonceGenerationTransport { - id: core.meta.id.clone(), - access_key: core.access_key.clone(), - nonce: core.nonce, - cluster: core.cluster.clone(), - other_nodes_ids: other_consensus_group_nodes.clone(), - map: map_message, - }), - nonce: None, - }) - } + /// Start generation session. + fn start_generation_session( + core: &SessionCore, + other_consensus_group_nodes: &BTreeSet, + map_message: F, + ) -> GenerationSession + where + F: Fn(SessionId, Secret, u64, GenerationMessage) -> EcdsaSigningMessage + + Send + + Sync + + 'static, + { + GenerationSession::new(GenerationSessionParams { + id: core.meta.id.clone(), + self_node_id: core.meta.self_node_id.clone(), + key_storage: None, + cluster: Arc::new(NonceGenerationTransport { + id: core.meta.id.clone(), + access_key: core.access_key.clone(), + nonce: core.nonce, + cluster: core.cluster.clone(), + other_nodes_ids: other_consensus_group_nodes.clone(), + map: map_message, + }), + nonce: None, + }) + } - /// Set signing session result. - fn set_signing_result(core: &SessionCore, data: &mut SessionData, result: Result) { - if let Some(DelegationStatus::DelegatedFrom(master, nonce)) = data.delegation_status.take() { - // error means can't communicate => ignore it - let _ = match result.as_ref() { - Ok(signature) => core.cluster.send(&master, Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegationCompleted(EcdsaSigningSessionDelegationCompleted { - session: core.meta.id.clone().into(), - sub_session: core.access_key.clone().into(), - session_nonce: nonce, - signature: signature.clone().into(), - }))), - Err(error) => core.cluster.send(&master, Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionError(EcdsaSigningSessionError { - session: core.meta.id.clone().into(), - sub_session: core.access_key.clone().into(), - session_nonce: nonce, - error: error.clone().into(), - }))), - }; - } + /// Set signing session result. + fn set_signing_result( + core: &SessionCore, + data: &mut SessionData, + result: Result, + ) { + if let Some(DelegationStatus::DelegatedFrom(master, nonce)) = data.delegation_status.take() + { + // error means can't communicate => ignore it + let _ = match result.as_ref() { + Ok(signature) => core.cluster.send( + &master, + Message::EcdsaSigning( + EcdsaSigningMessage::EcdsaSigningSessionDelegationCompleted( + EcdsaSigningSessionDelegationCompleted { + session: core.meta.id.clone().into(), + sub_session: core.access_key.clone().into(), + session_nonce: nonce, + signature: signature.clone().into(), + }, + ), + ), + ), + Err(error) => core.cluster.send( + &master, + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionError( + EcdsaSigningSessionError { + session: core.meta.id.clone().into(), + sub_session: core.access_key.clone().into(), + session_nonce: nonce, + error: error.clone().into(), + }, + )), + ), + }; + } - data.result = Some(result); - core.completed.notify_all(); - } + data.result = Some(result); + core.completed.notify_all(); + } - /// Check if all nonces are generated. - fn check_nonces_generated(data: &SessionData) -> bool { - let expect_proof = "check_nonces_generated is called when som nonce-gen session is completed; + /// Check if all nonces are generated. + fn check_nonces_generated(data: &SessionData) -> bool { + let expect_proof = + "check_nonces_generated is called when som nonce-gen session is completed; all nonce-gen sessions are created at once; qed"; - let sig_nonce_generation_session = data.sig_nonce_generation_session.as_ref().expect(expect_proof); - let inv_nonce_generation_session = data.inv_nonce_generation_session.as_ref().expect(expect_proof); - let inv_zero_generation_session = data.inv_zero_generation_session.as_ref().expect(expect_proof); + let sig_nonce_generation_session = data + .sig_nonce_generation_session + .as_ref() + .expect(expect_proof); + let inv_nonce_generation_session = data + .inv_nonce_generation_session + .as_ref() + .expect(expect_proof); + let inv_zero_generation_session = data + .inv_zero_generation_session + .as_ref() + .expect(expect_proof); - sig_nonce_generation_session.state() == GenerationSessionState::Finished - && inv_nonce_generation_session.state() == GenerationSessionState::Finished - && inv_zero_generation_session.state() == GenerationSessionState::Finished - } + sig_nonce_generation_session.state() == GenerationSessionState::Finished + && inv_nonce_generation_session.state() == GenerationSessionState::Finished + && inv_zero_generation_session.state() == GenerationSessionState::Finished + } - /// Broadcast inversed nonce share. - fn send_inversed_nonce_coeff_share(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { - let proof = "inversed nonce coeff share is sent after nonces generation is completed; qed"; + /// Broadcast inversed nonce share. + fn send_inversed_nonce_coeff_share( + core: &SessionCore, + data: &mut SessionData, + ) -> Result<(), Error> { + let proof = "inversed nonce coeff share is sent after nonces generation is completed; qed"; - let sig_nonce_generation_session = data.sig_nonce_generation_session.as_ref().expect(proof); - let sig_nonce = sig_nonce_generation_session.joint_public_and_secret().expect(proof).expect(proof).2; + let sig_nonce_generation_session = data.sig_nonce_generation_session.as_ref().expect(proof); + let sig_nonce = sig_nonce_generation_session + .joint_public_and_secret() + .expect(proof) + .expect(proof) + .2; - let inv_nonce_generation_session = data.inv_nonce_generation_session.as_ref().expect(proof); - let inv_nonce = inv_nonce_generation_session.joint_public_and_secret().expect(proof).expect(proof).2; + let inv_nonce_generation_session = data.inv_nonce_generation_session.as_ref().expect(proof); + let inv_nonce = inv_nonce_generation_session + .joint_public_and_secret() + .expect(proof) + .expect(proof) + .2; - let inv_zero_generation_session = data.inv_zero_generation_session.as_ref().expect(proof); - let inv_zero = inv_zero_generation_session.joint_public_and_secret().expect(proof).expect(proof).2; + let inv_zero_generation_session = data.inv_zero_generation_session.as_ref().expect(proof); + let inv_zero = inv_zero_generation_session + .joint_public_and_secret() + .expect(proof) + .expect(proof) + .2; - let inversed_nonce_coeff_share = math::compute_ecdsa_inversed_secret_coeff_share(&sig_nonce, &inv_nonce, &inv_zero)?; - if core.meta.self_node_id == core.meta.master_node_id { - let mut inversed_nonce_coeff_shares = BTreeMap::new(); - inversed_nonce_coeff_shares.insert(core.meta.self_node_id.clone(), inversed_nonce_coeff_share); - data.inversed_nonce_coeff_shares = Some(inversed_nonce_coeff_shares); - Ok(()) - } else { - core.cluster.send(&core.meta.master_node_id, Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningInversedNonceCoeffShare(EcdsaSigningInversedNonceCoeffShare { - session: core.meta.id.clone().into(), - sub_session: core.access_key.clone().into(), - session_nonce: core.nonce, - inversed_nonce_coeff_share: inversed_nonce_coeff_share.into(), - }))) - } - } + let inversed_nonce_coeff_share = + math::compute_ecdsa_inversed_secret_coeff_share(&sig_nonce, &inv_nonce, &inv_zero)?; + if core.meta.self_node_id == core.meta.master_node_id { + let mut inversed_nonce_coeff_shares = BTreeMap::new(); + inversed_nonce_coeff_shares + .insert(core.meta.self_node_id.clone(), inversed_nonce_coeff_share); + data.inversed_nonce_coeff_shares = Some(inversed_nonce_coeff_shares); + Ok(()) + } else { + core.cluster.send( + &core.meta.master_node_id, + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningInversedNonceCoeffShare( + EcdsaSigningInversedNonceCoeffShare { + session: core.meta.id.clone().into(), + sub_session: core.access_key.clone().into(), + session_nonce: core.nonce, + inversed_nonce_coeff_share: inversed_nonce_coeff_share.into(), + }, + )), + ) + } + } - /// Compute inversed nonce coefficient on master node. - fn compute_inversed_nonce_coeff(core: &SessionCore, data: &SessionData) -> Result { - let proof = "inversed nonce coeff is computed on master node; key version exists on master node"; - let key_share = core.key_share.as_ref().expect(proof); - let key_version = key_share.version(data.version.as_ref().expect(proof)).expect(proof); + /// Compute inversed nonce coefficient on master node. + fn compute_inversed_nonce_coeff( + core: &SessionCore, + data: &SessionData, + ) -> Result { + let proof = + "inversed nonce coeff is computed on master node; key version exists on master node"; + let key_share = core.key_share.as_ref().expect(proof); + let key_version = key_share + .version(data.version.as_ref().expect(proof)) + .expect(proof); - let proof = "inversed nonce coeff is computed after all shares are received; qed"; - let inversed_nonce_coeff_shares = data.inversed_nonce_coeff_shares.as_ref().expect(proof); + let proof = "inversed nonce coeff is computed after all shares are received; qed"; + let inversed_nonce_coeff_shares = data.inversed_nonce_coeff_shares.as_ref().expect(proof); - math::compute_ecdsa_inversed_secret_coeff_from_shares(key_share.threshold, - &inversed_nonce_coeff_shares.keys().map(|n| key_version.id_numbers[n].clone()).collect::>(), - &inversed_nonce_coeff_shares.values().cloned().collect::>()) - } + math::compute_ecdsa_inversed_secret_coeff_from_shares( + key_share.threshold, + &inversed_nonce_coeff_shares + .keys() + .map(|n| key_version.id_numbers[n].clone()) + .collect::>(), + &inversed_nonce_coeff_shares + .values() + .cloned() + .collect::>(), + ) + } } impl ClusterSession for SessionImpl { - type Id = SessionIdWithSubSession; + type Id = SessionIdWithSubSession; - fn type_name() -> &'static str { - "ecdsa_signing" - } + fn type_name() -> &'static str { + "ecdsa_signing" + } - fn id(&self) -> SessionIdWithSubSession { - SessionIdWithSubSession::new(self.core.meta.id.clone(), self.core.access_key.clone()) - } + fn id(&self) -> SessionIdWithSubSession { + SessionIdWithSubSession::new(self.core.meta.id.clone(), self.core.access_key.clone()) + } - fn is_finished(&self) -> bool { - let data = self.data.lock(); - data.consensus_session.state() == ConsensusSessionState::Failed - || data.consensus_session.state() == ConsensusSessionState::Finished - || data.result.is_some() - } + fn is_finished(&self) -> bool { + let data = self.data.lock(); + data.consensus_session.state() == ConsensusSessionState::Failed + || data.consensus_session.state() == ConsensusSessionState::Finished + || data.result.is_some() + } - fn on_node_timeout(&self, node: &NodeId) { - // ignore error, only state matters - let _ = self.process_node_error(Some(node), Error::NodeDisconnected); - } + fn on_node_timeout(&self, node: &NodeId) { + // ignore error, only state matters + let _ = self.process_node_error(Some(node), Error::NodeDisconnected); + } - fn on_session_timeout(&self) { - // ignore error, only state matters - let _ = self.process_node_error(None, Error::NodeDisconnected); - } + fn on_session_timeout(&self) { + // ignore error, only state matters + let _ = self.process_node_error(None, Error::NodeDisconnected); + } - fn on_session_error(&self, node: &NodeId, error: Error) { - let is_fatal = self.process_node_error(Some(node), error.clone()).is_err(); - let is_this_node_error = *node == self.core.meta.self_node_id; - if is_fatal || is_this_node_error { - // error in signing session is non-fatal, if occurs on slave node - // => either respond with error - // => or broadcast error - let message = Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionError(EcdsaSigningSessionError { - session: self.core.meta.id.clone().into(), - sub_session: self.core.access_key.clone().into(), - session_nonce: self.core.nonce, - error: error.clone().into(), - })); + fn on_session_error(&self, node: &NodeId, error: Error) { + let is_fatal = self.process_node_error(Some(node), error.clone()).is_err(); + let is_this_node_error = *node == self.core.meta.self_node_id; + if is_fatal || is_this_node_error { + // error in signing session is non-fatal, if occurs on slave node + // => either respond with error + // => or broadcast error + let message = Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionError( + EcdsaSigningSessionError { + session: self.core.meta.id.clone().into(), + sub_session: self.core.access_key.clone().into(), + session_nonce: self.core.nonce, + error: error.clone().into(), + }, + )); - // do not bother processing send error, as we already processing error - let _ = if self.core.meta.master_node_id == self.core.meta.self_node_id { - self.core.cluster.broadcast(message) - } else { - self.core.cluster.send(&self.core.meta.master_node_id, message) - }; - } - } + // do not bother processing send error, as we already processing error + let _ = if self.core.meta.master_node_id == self.core.meta.self_node_id { + self.core.cluster.broadcast(message) + } else { + self.core + .cluster + .send(&self.core.meta.master_node_id, message) + }; + } + } - fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { - match *message { - Message::EcdsaSigning(ref message) => self.process_message(sender, message), - _ => unreachable!("cluster checks message to be correct before passing; qed"), - } - } + fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { + match *message { + Message::EcdsaSigning(ref message) => self.process_message(sender, message), + _ => unreachable!("cluster checks message to be correct before passing; qed"), + } + } } -impl NonceGenerationTransport where F: Fn(SessionId, Secret, u64, GenerationMessage) -> EcdsaSigningMessage + Send + Sync { - fn map_message(&self, message: Message) -> Result { - match message { - Message::Generation(message) => Ok(Message::EcdsaSigning((self.map)(self.id.clone(), self.access_key.clone(), self.nonce, message))), - _ => Err(Error::InvalidMessage), - } - } +impl NonceGenerationTransport +where + F: Fn(SessionId, Secret, u64, GenerationMessage) -> EcdsaSigningMessage + Send + Sync, +{ + fn map_message(&self, message: Message) -> Result { + match message { + Message::Generation(message) => Ok(Message::EcdsaSigning((self.map)( + self.id.clone(), + self.access_key.clone(), + self.nonce, + message, + ))), + _ => Err(Error::InvalidMessage), + } + } } -impl Cluster for NonceGenerationTransport where F: Fn(SessionId, Secret, u64, GenerationMessage) -> EcdsaSigningMessage + Send + Sync { - fn broadcast(&self, message: Message) -> Result<(), Error> { - let message = self.map_message(message)?; - for to in &self.other_nodes_ids { - self.cluster.send(to, message.clone())?; - } - Ok(()) - } +impl Cluster for NonceGenerationTransport +where + F: Fn(SessionId, Secret, u64, GenerationMessage) -> EcdsaSigningMessage + Send + Sync, +{ + fn broadcast(&self, message: Message) -> Result<(), Error> { + let message = self.map_message(message)?; + for to in &self.other_nodes_ids { + self.cluster.send(to, message.clone())?; + } + Ok(()) + } - fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> { - debug_assert!(self.other_nodes_ids.contains(to)); - self.cluster.send(to, self.map_message(message)?) - } + fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> { + debug_assert!(self.other_nodes_ids.contains(to)); + self.cluster.send(to, self.map_message(message)?) + } - fn is_connected(&self, node: &NodeId) -> bool { - self.cluster.is_connected(node) - } + fn is_connected(&self, node: &NodeId) -> bool { + self.cluster.is_connected(node) + } - fn nodes(&self) -> BTreeSet { - self.cluster.nodes() - } + fn nodes(&self) -> BTreeSet { + self.cluster.nodes() + } - fn configured_nodes_count(&self) -> usize { - self.cluster.configured_nodes_count() - } + fn configured_nodes_count(&self) -> usize { + self.cluster.configured_nodes_count() + } - fn connected_nodes_count(&self) -> usize { - self.cluster.connected_nodes_count() - } + fn connected_nodes_count(&self) -> usize { + self.cluster.connected_nodes_count() + } } impl SessionCore { - pub fn signing_transport(&self) -> SigningJobTransport { - SigningJobTransport { - id: self.meta.id.clone(), - access_key: self.access_key.clone(), - nonce: self.nonce, - cluster: self.cluster.clone() - } - } + pub fn signing_transport(&self) -> SigningJobTransport { + SigningJobTransport { + id: self.meta.id.clone(), + access_key: self.access_key.clone(), + nonce: self.nonce, + cluster: self.cluster.clone(), + } + } - pub fn disseminate_jobs(&self, consensus_session: &mut SigningConsensusSession, version: &H256, nonce_public: Public, inv_nonce_share: Secret, inversed_nonce_coeff: Secret, message_hash: H256) -> Result<(), Error> { - let key_share = match self.key_share.as_ref() { - None => return Err(Error::InvalidMessage), - Some(key_share) => key_share, - }; + pub fn disseminate_jobs( + &self, + consensus_session: &mut SigningConsensusSession, + version: &H256, + nonce_public: Public, + inv_nonce_share: Secret, + inversed_nonce_coeff: Secret, + message_hash: H256, + ) -> Result<(), Error> { + let key_share = match self.key_share.as_ref() { + None => return Err(Error::InvalidMessage), + Some(key_share) => key_share, + }; - let key_version = key_share.version(version)?.hash.clone(); - let signing_job = EcdsaSigningJob::new_on_master(key_share.clone(), key_version, nonce_public, inv_nonce_share, inversed_nonce_coeff, message_hash)?; - consensus_session.disseminate_jobs(signing_job, self.signing_transport(), false).map(|_| ()) - } + let key_version = key_share.version(version)?.hash.clone(); + let signing_job = EcdsaSigningJob::new_on_master( + key_share.clone(), + key_version, + nonce_public, + inv_nonce_share, + inversed_nonce_coeff, + message_hash, + )?; + consensus_session + .disseminate_jobs(signing_job, self.signing_transport(), false) + .map(|_| ()) + } } impl JobTransport for SigningConsensusTransport { - type PartialJobRequest=Requester; - type PartialJobResponse=bool; + type PartialJobRequest = Requester; + type PartialJobResponse = bool; - fn send_partial_request(&self, node: &NodeId, request: Requester) -> Result<(), Error> { - let version = self.version.as_ref() + fn send_partial_request(&self, node: &NodeId, request: Requester) -> Result<(), Error> { + let version = self.version.as_ref() .expect("send_partial_request is called on initialized master node only; version is filled in before initialization starts on master node; qed"); - self.cluster.send(node, Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningConsensusMessage(EcdsaSigningConsensusMessage { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - message: ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession { - requester: request.into(), - version: version.clone().into(), - }) - }))) - } + self.cluster.send( + node, + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningConsensusMessage( + EcdsaSigningConsensusMessage { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, + message: ConsensusMessage::InitializeConsensusSession( + InitializeConsensusSession { + requester: request.into(), + version: version.clone().into(), + }, + ), + }, + )), + ) + } - fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { - self.cluster.send(node, Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningConsensusMessage(EcdsaSigningConsensusMessage { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - message: ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: response, - }) - }))) - } + fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { + self.cluster.send( + node, + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningConsensusMessage( + EcdsaSigningConsensusMessage { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, + message: ConsensusMessage::ConfirmConsensusInitialization( + ConfirmConsensusInitialization { + is_confirmed: response, + }, + ), + }, + )), + ) + } } impl JobTransport for SigningJobTransport { - type PartialJobRequest=EcdsaPartialSigningRequest; - type PartialJobResponse=EcdsaPartialSigningResponse; + type PartialJobRequest = EcdsaPartialSigningRequest; + type PartialJobResponse = EcdsaPartialSigningResponse; - fn send_partial_request(&self, node: &NodeId, request: EcdsaPartialSigningRequest) -> Result<(), Error> { - self.cluster.send(node, Message::EcdsaSigning(EcdsaSigningMessage::EcdsaRequestPartialSignature(EcdsaRequestPartialSignature { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - request_id: request.id.into(), - inversed_nonce_coeff: request.inversed_nonce_coeff.into(), - message_hash: request.message_hash.into(), - }))) - } + fn send_partial_request( + &self, + node: &NodeId, + request: EcdsaPartialSigningRequest, + ) -> Result<(), Error> { + self.cluster.send( + node, + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaRequestPartialSignature( + EcdsaRequestPartialSignature { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, + request_id: request.id.into(), + inversed_nonce_coeff: request.inversed_nonce_coeff.into(), + message_hash: request.message_hash.into(), + }, + )), + ) + } - fn send_partial_response(&self, node: &NodeId, response: EcdsaPartialSigningResponse) -> Result<(), Error> { - self.cluster.send(node, Message::EcdsaSigning(EcdsaSigningMessage::EcdsaPartialSignature(EcdsaPartialSignature { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - request_id: response.request_id.into(), - partial_signature_s: response.partial_signature_s.into(), - }))) - } + fn send_partial_response( + &self, + node: &NodeId, + response: EcdsaPartialSigningResponse, + ) -> Result<(), Error> { + self.cluster.send( + node, + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaPartialSignature( + EcdsaPartialSignature { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, + request_id: response.request_id.into(), + partial_signature_s: response.partial_signature_s.into(), + }, + )), + ) + } } #[cfg(test)] mod tests { - use std::sync::Arc; - use ethereum_types::H256; - use ethkey::{self, Random, Generator, Public, verify_public, public_to_address}; - use key_server_cluster::{SessionId, Error, KeyStorage}; - use key_server_cluster::cluster::tests::{MessageLoop as ClusterMessageLoop}; - use key_server_cluster::signing_session_ecdsa::SessionImpl; - use key_server_cluster::generation_session::tests::MessageLoop as GenerationMessageLoop; + use ethereum_types::H256; + use ethkey::{self, public_to_address, verify_public, Generator, Public, Random}; + use key_server_cluster::{ + cluster::tests::MessageLoop as ClusterMessageLoop, + generation_session::tests::MessageLoop as GenerationMessageLoop, + signing_session_ecdsa::SessionImpl, Error, KeyStorage, SessionId, + }; + use std::sync::Arc; - #[derive(Debug)] - pub struct MessageLoop(pub ClusterMessageLoop); + #[derive(Debug)] + pub struct MessageLoop(pub ClusterMessageLoop); - impl MessageLoop { - pub fn new(num_nodes: usize, threshold: usize) -> Result { - let ml = GenerationMessageLoop::new(num_nodes).init(threshold)?; - ml.0.loop_until(|| ml.0.is_empty()); // complete generation session + impl MessageLoop { + pub fn new(num_nodes: usize, threshold: usize) -> Result { + let ml = GenerationMessageLoop::new(num_nodes).init(threshold)?; + ml.0.loop_until(|| ml.0.is_empty()); // complete generation session - Ok(MessageLoop(ml.0)) - } + Ok(MessageLoop(ml.0)) + } - pub fn init_with_version(self, key_version: Option) -> Result<(Self, Public, H256), Error> { - let message_hash = H256::random(); - let requester = Random.generate().unwrap(); - let signature = ethkey::sign(requester.secret(), &SessionId::default()).unwrap(); - self.0.cluster(0).client() - .new_ecdsa_signing_session(Default::default(), signature.into(), key_version, message_hash) - .map(|_| (self, *requester.public(), message_hash)) - } + pub fn init_with_version( + self, + key_version: Option, + ) -> Result<(Self, Public, H256), Error> { + let message_hash = H256::random(); + let requester = Random.generate().unwrap(); + let signature = ethkey::sign(requester.secret(), &SessionId::default()).unwrap(); + self.0 + .cluster(0) + .client() + .new_ecdsa_signing_session( + Default::default(), + signature.into(), + key_version, + message_hash, + ) + .map(|_| (self, *requester.public(), message_hash)) + } - pub fn init(self) -> Result<(Self, Public, H256), Error> { - let key_version = self.0.key_storage(0).get(&Default::default()) - .unwrap().unwrap().versions.iter().last().unwrap().hash; - self.init_with_version(Some(key_version)) - } + pub fn init(self) -> Result<(Self, Public, H256), Error> { + let key_version = self + .0 + .key_storage(0) + .get(&Default::default()) + .unwrap() + .unwrap() + .versions + .iter() + .last() + .unwrap() + .hash; + self.init_with_version(Some(key_version)) + } - pub fn init_delegated(self) -> Result<(Self, Public, H256), Error> { - self.0.key_storage(0).remove(&Default::default()).unwrap(); - self.init_with_version(None) - } + pub fn init_delegated(self) -> Result<(Self, Public, H256), Error> { + self.0.key_storage(0).remove(&Default::default()).unwrap(); + self.init_with_version(None) + } - pub fn init_with_isolated(self) -> Result<(Self, Public, H256), Error> { - self.0.isolate(1); - self.init() - } + pub fn init_with_isolated(self) -> Result<(Self, Public, H256), Error> { + self.0.isolate(1); + self.init() + } - pub fn session_at(&self, idx: usize) -> Arc { - self.0.sessions(idx).ecdsa_signing_sessions.first().unwrap() - } + pub fn session_at(&self, idx: usize) -> Arc { + self.0.sessions(idx).ecdsa_signing_sessions.first().unwrap() + } - pub fn ensure_completed(&self) { - self.0.loop_until(|| self.0.is_empty()); - assert!(self.session_at(0).wait().is_ok()); - } - } + pub fn ensure_completed(&self) { + self.0.loop_until(|| self.0.is_empty()); + assert!(self.session_at(0).wait().is_ok()); + } + } - #[test] - fn failed_gen_ecdsa_sign_session_when_threshold_is_too_low() { - let test_cases = [(1, 2), (2, 4), (3, 6), (4, 6)]; - for &(threshold, num_nodes) in &test_cases { - assert_eq!(MessageLoop::new(num_nodes, threshold).unwrap().init().unwrap_err(), - Error::ConsensusUnreachable); - } - } + #[test] + fn failed_gen_ecdsa_sign_session_when_threshold_is_too_low() { + let test_cases = [(1, 2), (2, 4), (3, 6), (4, 6)]; + for &(threshold, num_nodes) in &test_cases { + assert_eq!( + MessageLoop::new(num_nodes, threshold) + .unwrap() + .init() + .unwrap_err(), + Error::ConsensusUnreachable + ); + } + } - #[test] - fn complete_gen_ecdsa_sign_session() { - let test_cases = [(0, 1), (2, 5), (2, 6), (3, 11), (4, 11)]; - for &(threshold, num_nodes) in &test_cases { - let (ml, _, message) = MessageLoop::new(num_nodes, threshold).unwrap().init().unwrap(); - ml.0.loop_until(|| ml.0.is_empty()); + #[test] + fn complete_gen_ecdsa_sign_session() { + let test_cases = [(0, 1), (2, 5), (2, 6), (3, 11), (4, 11)]; + for &(threshold, num_nodes) in &test_cases { + let (ml, _, message) = MessageLoop::new(num_nodes, threshold) + .unwrap() + .init() + .unwrap(); + ml.0.loop_until(|| ml.0.is_empty()); - let signer_public = ml.0.key_storage(0).get(&Default::default()).unwrap().unwrap().public; - let signature = ml.session_at(0).wait().unwrap(); - assert!(verify_public(&signer_public, &signature, &message).unwrap()); - } - } + let signer_public = + ml.0.key_storage(0) + .get(&Default::default()) + .unwrap() + .unwrap() + .public; + let signature = ml.session_at(0).wait().unwrap(); + assert!(verify_public(&signer_public, &signature, &message).unwrap()); + } + } - #[test] - fn ecdsa_complete_signing_session_with_single_node_failing() { - let (ml, requester, _) = MessageLoop::new(4, 1).unwrap().init().unwrap(); + #[test] + fn ecdsa_complete_signing_session_with_single_node_failing() { + let (ml, requester, _) = MessageLoop::new(4, 1).unwrap().init().unwrap(); - // we need at least 3-of-4 nodes to agree to reach consensus - // let's say 1 of 4 nodes disagee - ml.0.acl_storage(1).prohibit(public_to_address(&requester), Default::default()); + // we need at least 3-of-4 nodes to agree to reach consensus + // let's say 1 of 4 nodes disagee + ml.0.acl_storage(1) + .prohibit(public_to_address(&requester), Default::default()); - // then consensus reachable, but single node will disagree - ml.ensure_completed(); - } + // then consensus reachable, but single node will disagree + ml.ensure_completed(); + } - #[test] - fn ecdsa_complete_signing_session_with_acl_check_failed_on_master() { - let (ml, requester, _) = MessageLoop::new(4, 1).unwrap().init().unwrap(); + #[test] + fn ecdsa_complete_signing_session_with_acl_check_failed_on_master() { + let (ml, requester, _) = MessageLoop::new(4, 1).unwrap().init().unwrap(); - // we need at least 3-of-4 nodes to agree to reach consensus - // let's say 1 of 4 nodes (here: master) disagee - ml.0.acl_storage(0).prohibit(public_to_address(&requester), Default::default()); + // we need at least 3-of-4 nodes to agree to reach consensus + // let's say 1 of 4 nodes (here: master) disagee + ml.0.acl_storage(0) + .prohibit(public_to_address(&requester), Default::default()); - // then consensus reachable, but single node will disagree - ml.ensure_completed(); - } + // then consensus reachable, but single node will disagree + ml.ensure_completed(); + } - #[test] - fn ecdsa_signing_works_when_delegated_to_other_node() { - MessageLoop::new(4, 1).unwrap().init_delegated().unwrap().0.ensure_completed(); - } + #[test] + fn ecdsa_signing_works_when_delegated_to_other_node() { + MessageLoop::new(4, 1) + .unwrap() + .init_delegated() + .unwrap() + .0 + .ensure_completed(); + } - #[test] - fn ecdsa_signing_works_when_share_owners_are_isolated() { - MessageLoop::new(6, 2).unwrap().init_with_isolated().unwrap().0.ensure_completed(); - } + #[test] + fn ecdsa_signing_works_when_share_owners_are_isolated() { + MessageLoop::new(6, 2) + .unwrap() + .init_with_isolated() + .unwrap() + .0 + .ensure_completed(); + } } diff --git a/secret-store/src/key_server_cluster/client_sessions/signing_session_schnorr.rs b/secret-store/src/key_server_cluster/client_sessions/signing_session_schnorr.rs index 0b0619f96..b958fa065 100644 --- a/secret-store/src/key_server_cluster/client_sessions/signing_session_schnorr.rs +++ b/secret-store/src/key_server_cluster/client_sessions/signing_session_schnorr.rs @@ -14,24 +14,35 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::BTreeSet; -use std::sync::Arc; -use parking_lot::{Mutex, Condvar}; -use ethkey::{Public, Secret}; use ethereum_types::H256; -use key_server_cluster::{Error, NodeId, SessionId, Requester, SessionMeta, AclStorage, DocumentKeyShare}; -use key_server_cluster::cluster::{Cluster}; -use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession}; -use key_server_cluster::generation_session::{SessionImpl as GenerationSession, SessionParams as GenerationSessionParams, - SessionState as GenerationSessionState}; -use key_server_cluster::message::{Message, SchnorrSigningMessage, SchnorrSigningConsensusMessage, SchnorrSigningGenerationMessage, - SchnorrRequestPartialSignature, SchnorrPartialSignature, SchnorrSigningSessionCompleted, GenerationMessage, - ConsensusMessage, SchnorrSigningSessionError, InitializeConsensusSession, ConfirmConsensusInitialization, - SchnorrSigningSessionDelegation, SchnorrSigningSessionDelegationCompleted}; -use key_server_cluster::jobs::job_session::JobTransport; -use key_server_cluster::jobs::key_access_job::KeyAccessJob; -use key_server_cluster::jobs::signing_job_schnorr::{SchnorrPartialSigningRequest, SchnorrPartialSigningResponse, SchnorrSigningJob}; -use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession}; +use ethkey::{Public, Secret}; +use key_server_cluster::{ + cluster::Cluster, + cluster_sessions::{ClusterSession, SessionIdWithSubSession}, + generation_session::{ + SessionImpl as GenerationSession, SessionParams as GenerationSessionParams, + SessionState as GenerationSessionState, + }, + jobs::{ + consensus_session::{ConsensusSession, ConsensusSessionParams, ConsensusSessionState}, + job_session::JobTransport, + key_access_job::KeyAccessJob, + signing_job_schnorr::{ + SchnorrPartialSigningRequest, SchnorrPartialSigningResponse, SchnorrSigningJob, + }, + }, + message::{ + ConfirmConsensusInitialization, ConsensusMessage, GenerationMessage, + InitializeConsensusSession, Message, SchnorrPartialSignature, + SchnorrRequestPartialSignature, SchnorrSigningConsensusMessage, + SchnorrSigningGenerationMessage, SchnorrSigningMessage, SchnorrSigningSessionCompleted, + SchnorrSigningSessionDelegation, SchnorrSigningSessionDelegationCompleted, + SchnorrSigningSessionError, + }, + AclStorage, DocumentKeyShare, Error, NodeId, Requester, SessionId, SessionMeta, +}; +use parking_lot::{Condvar, Mutex}; +use std::{collections::BTreeSet, sync::Arc}; /// Distributed Schnorr-signing session. /// Based on "Efficient Multi-Party Digital Signature using Adaptive Secret Sharing for Low-Power Devices in Wireless Network" paper. @@ -41,190 +52,215 @@ use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, Consen /// 3) partial signing: every node which has succussfully checked access for the requestor do a partial signing /// 4) signing: master node receives all partial signatures of the secret and computes the signature pub struct SessionImpl { - /// Session core. - core: SessionCore, - /// Session data. - data: Mutex, + /// Session core. + core: SessionCore, + /// Session data. + data: Mutex, } /// Immutable session data. struct SessionCore { - /// Session metadata. - pub meta: SessionMeta, - /// Signing session access key. - pub access_key: Secret, - /// Key share. - pub key_share: Option, - /// Cluster which allows this node to send messages to other nodes in the cluster. - pub cluster: Arc, - /// Session-level nonce. - pub nonce: u64, - /// SessionImpl completion condvar. - pub completed: Condvar, + /// Session metadata. + pub meta: SessionMeta, + /// Signing session access key. + pub access_key: Secret, + /// Key share. + pub key_share: Option, + /// Cluster which allows this node to send messages to other nodes in the cluster. + pub cluster: Arc, + /// Session-level nonce. + pub nonce: u64, + /// SessionImpl completion condvar. + pub completed: Condvar, } /// Signing consensus session type. -type SigningConsensusSession = ConsensusSession; +type SigningConsensusSession = ConsensusSession< + KeyAccessJob, + SigningConsensusTransport, + SchnorrSigningJob, + SigningJobTransport, +>; /// Mutable session data. struct SessionData { - /// Session state. - pub state: SessionState, - /// Message hash. - pub message_hash: Option, - /// Key version to use for decryption. - pub version: Option, - /// Consensus-based signing session. - pub consensus_session: SigningConsensusSession, - /// Session key generation session. - pub generation_session: Option, - /// Delegation status. - pub delegation_status: Option, - /// Decryption result. - pub result: Option>, + /// Session state. + pub state: SessionState, + /// Message hash. + pub message_hash: Option, + /// Key version to use for decryption. + pub version: Option, + /// Consensus-based signing session. + pub consensus_session: SigningConsensusSession, + /// Session key generation session. + pub generation_session: Option, + /// Delegation status. + pub delegation_status: Option, + /// Decryption result. + pub result: Option>, } /// Signing session state. #[derive(Debug, PartialEq)] #[cfg_attr(test, derive(Clone, Copy))] pub enum SessionState { - /// State when consensus is establishing. - ConsensusEstablishing, - /// State when session key is generating. - SessionKeyGeneration, - /// State when signature is computing. - SignatureComputing, + /// State when consensus is establishing. + ConsensusEstablishing, + /// State when session key is generating. + SessionKeyGeneration, + /// State when signature is computing. + SignatureComputing, } /// Session creation parameters pub struct SessionParams { - /// Session metadata. - pub meta: SessionMeta, - /// Session access key. - pub access_key: Secret, - /// Key share. - pub key_share: Option, - /// ACL storage. - pub acl_storage: Arc, - /// Cluster - pub cluster: Arc, - /// Session nonce. - pub nonce: u64, + /// Session metadata. + pub meta: SessionMeta, + /// Session access key. + pub access_key: Secret, + /// Key share. + pub key_share: Option, + /// ACL storage. + pub acl_storage: Arc, + /// Cluster + pub cluster: Arc, + /// Session nonce. + pub nonce: u64, } /// Signing consensus transport. struct SigningConsensusTransport { - /// Session id. - id: SessionId, - /// Session access key. - access_key: Secret, - /// Session-level nonce. - nonce: u64, - /// Selected key version (on master node). - version: Option, - /// Cluster. - cluster: Arc, + /// Session id. + id: SessionId, + /// Session access key. + access_key: Secret, + /// Session-level nonce. + nonce: u64, + /// Selected key version (on master node). + version: Option, + /// Cluster. + cluster: Arc, } /// Signing key generation transport. struct SessionKeyGenerationTransport { - /// Session access key. - access_key: Secret, - /// Cluster. - cluster: Arc, - /// Session-level nonce. - nonce: u64, - /// Other nodes ids. - other_nodes_ids: BTreeSet, + /// Session access key. + access_key: Secret, + /// Cluster. + cluster: Arc, + /// Session-level nonce. + nonce: u64, + /// Other nodes ids. + other_nodes_ids: BTreeSet, } /// Signing job transport struct SigningJobTransport { - /// Session id. - id: SessionId, - /// Session access key. - access_key: Secret, - /// Session-level nonce. - nonce: u64, - /// Cluster. - cluster: Arc, + /// Session id. + id: SessionId, + /// Session access key. + access_key: Secret, + /// Session-level nonce. + nonce: u64, + /// Cluster. + cluster: Arc, } /// Session delegation status. enum DelegationStatus { - /// Delegated to other node. - DelegatedTo(NodeId), - /// Delegated from other node. - DelegatedFrom(NodeId, u64), + /// Delegated to other node. + DelegatedTo(NodeId), + /// Delegated from other node. + DelegatedFrom(NodeId, u64), } impl SessionImpl { - /// Create new signing session. - pub fn new(params: SessionParams, requester: Option) -> Result { - debug_assert_eq!(params.meta.threshold, params.key_share.as_ref().map(|ks| ks.threshold).unwrap_or_default()); + /// Create new signing session. + pub fn new(params: SessionParams, requester: Option) -> Result { + debug_assert_eq!( + params.meta.threshold, + params + .key_share + .as_ref() + .map(|ks| ks.threshold) + .unwrap_or_default() + ); - let consensus_transport = SigningConsensusTransport { - id: params.meta.id.clone(), - access_key: params.access_key.clone(), - nonce: params.nonce, - version: None, - cluster: params.cluster.clone(), - }; - let consensus_session = ConsensusSession::new(ConsensusSessionParams { - meta: params.meta.clone(), - consensus_executor: match requester { - Some(requester) => KeyAccessJob::new_on_master(params.meta.id.clone(), params.acl_storage.clone(), requester), - None => KeyAccessJob::new_on_slave(params.meta.id.clone(), params.acl_storage.clone()), - }, - consensus_transport: consensus_transport, - })?; + let consensus_transport = SigningConsensusTransport { + id: params.meta.id.clone(), + access_key: params.access_key.clone(), + nonce: params.nonce, + version: None, + cluster: params.cluster.clone(), + }; + let consensus_session = ConsensusSession::new(ConsensusSessionParams { + meta: params.meta.clone(), + consensus_executor: match requester { + Some(requester) => KeyAccessJob::new_on_master( + params.meta.id.clone(), + params.acl_storage.clone(), + requester, + ), + None => { + KeyAccessJob::new_on_slave(params.meta.id.clone(), params.acl_storage.clone()) + } + }, + consensus_transport: consensus_transport, + })?; - Ok(SessionImpl { - core: SessionCore { - meta: params.meta, - access_key: params.access_key, - key_share: params.key_share, - cluster: params.cluster, - nonce: params.nonce, - completed: Condvar::new(), - }, - data: Mutex::new(SessionData { - state: SessionState::ConsensusEstablishing, - message_hash: None, - version: None, - consensus_session: consensus_session, - generation_session: None, - delegation_status: None, - result: None, - }), - }) - } + Ok(SessionImpl { + core: SessionCore { + meta: params.meta, + access_key: params.access_key, + key_share: params.key_share, + cluster: params.cluster, + nonce: params.nonce, + completed: Condvar::new(), + }, + data: Mutex::new(SessionData { + state: SessionState::ConsensusEstablishing, + message_hash: None, + version: None, + consensus_session: consensus_session, + generation_session: None, + delegation_status: None, + result: None, + }), + }) + } - /// Get session state. - #[cfg(test)] - pub fn state(&self) -> SessionState { - self.data.lock().state - } + /// Get session state. + #[cfg(test)] + pub fn state(&self) -> SessionState { + self.data.lock().state + } - /// Wait for session completion. - pub fn wait(&self) -> Result<(Secret, Secret), Error> { - Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone()) - .expect("wait_session returns Some if called without timeout; qed") - } + /// Wait for session completion. + pub fn wait(&self) -> Result<(Secret, Secret), Error> { + Self::wait_session(&self.core.completed, &self.data, None, |data| { + data.result.clone() + }) + .expect("wait_session returns Some if called without timeout; qed") + } - /// Delegate session to other node. - pub fn delegate(&self, master: NodeId, version: H256, message_hash: H256) -> Result<(), Error> { - if self.core.meta.master_node_id != self.core.meta.self_node_id { - return Err(Error::InvalidStateForRequest); - } + /// Delegate session to other node. + pub fn delegate(&self, master: NodeId, version: H256, message_hash: H256) -> Result<(), Error> { + if self.core.meta.master_node_id != self.core.meta.self_node_id { + return Err(Error::InvalidStateForRequest); + } - let mut data = self.data.lock(); - if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization || data.delegation_status.is_some() { - return Err(Error::InvalidStateForRequest); - } + let mut data = self.data.lock(); + if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization + || data.delegation_status.is_some() + { + return Err(Error::InvalidStateForRequest); + } - data.consensus_session.consensus_job_mut().executor_mut().set_has_key_share(false); - self.core.cluster.send(&master, Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionDelegation(SchnorrSigningSessionDelegation { + data.consensus_session + .consensus_job_mut() + .executor_mut() + .set_has_key_share(false); + self.core.cluster.send(&master, Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionDelegation(SchnorrSigningSessionDelegation { session: self.core.meta.id.clone().into(), sub_session: self.core.access_key.clone().into(), session_nonce: self.core.nonce, @@ -234,878 +270,1209 @@ impl SessionImpl { version: version.into(), message_hash: message_hash.into(), })))?; - data.delegation_status = Some(DelegationStatus::DelegatedTo(master)); - Ok(()) + data.delegation_status = Some(DelegationStatus::DelegatedTo(master)); + Ok(()) + } - } + /// Initialize signing session on master node. + pub fn initialize(&self, version: H256, message_hash: H256) -> Result<(), Error> { + debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id); - /// Initialize signing session on master node. - pub fn initialize(&self, version: H256, message_hash: H256) -> Result<(), Error> { - debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id); + // check if version exists + let key_version = match self.core.key_share.as_ref() { + None => return Err(Error::InvalidMessage), + Some(key_share) => key_share.version(&version)?, + }; - // check if version exists - let key_version = match self.core.key_share.as_ref() { - None => return Err(Error::InvalidMessage), - Some(key_share) => key_share.version(&version)?, - }; + let mut data = self.data.lock(); + let non_isolated_nodes = self.core.cluster.nodes(); + let mut consensus_nodes: BTreeSet<_> = key_version + .id_numbers + .keys() + .filter(|n| non_isolated_nodes.contains(*n)) + .cloned() + .chain(::std::iter::once(self.core.meta.self_node_id.clone())) + .collect(); + if let Some(&DelegationStatus::DelegatedFrom(delegation_master, _)) = + data.delegation_status.as_ref() + { + consensus_nodes.remove(&delegation_master); + } - let mut data = self.data.lock(); - let non_isolated_nodes = self.core.cluster.nodes(); - let mut consensus_nodes: BTreeSet<_> = key_version.id_numbers.keys() - .filter(|n| non_isolated_nodes.contains(*n)) - .cloned() - .chain(::std::iter::once(self.core.meta.self_node_id.clone())) - .collect(); - if let Some(&DelegationStatus::DelegatedFrom(delegation_master, _)) = data.delegation_status.as_ref() { - consensus_nodes.remove(&delegation_master); - } + data.consensus_session + .consensus_job_mut() + .transport_mut() + .version = Some(version.clone()); + data.version = Some(version.clone()); + data.message_hash = Some(message_hash); + data.consensus_session.initialize(consensus_nodes)?; - data.consensus_session.consensus_job_mut().transport_mut().version = Some(version.clone()); - data.version = Some(version.clone()); - data.message_hash = Some(message_hash); - data.consensus_session.initialize(consensus_nodes)?; + if data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished { + let generation_session = GenerationSession::new(GenerationSessionParams { + id: self.core.meta.id.clone(), + self_node_id: self.core.meta.self_node_id.clone(), + key_storage: None, + cluster: Arc::new(SessionKeyGenerationTransport { + access_key: self.core.access_key.clone(), + cluster: self.core.cluster.clone(), + nonce: self.core.nonce, + other_nodes_ids: BTreeSet::new(), + }), + nonce: None, + }); + generation_session.initialize( + Default::default(), + Default::default(), + false, + 0, + vec![self.core.meta.self_node_id.clone()] + .into_iter() + .collect::>() + .into(), + )?; - if data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished { - let generation_session = GenerationSession::new(GenerationSessionParams { - id: self.core.meta.id.clone(), - self_node_id: self.core.meta.self_node_id.clone(), - key_storage: None, - cluster: Arc::new(SessionKeyGenerationTransport { - access_key: self.core.access_key.clone(), - cluster: self.core.cluster.clone(), - nonce: self.core.nonce, - other_nodes_ids: BTreeSet::new() - }), - nonce: None, - }); - generation_session.initialize(Default::default(), Default::default(), false, 0, vec![self.core.meta.self_node_id.clone()].into_iter().collect::>().into())?; - - debug_assert_eq!(generation_session.state(), GenerationSessionState::Finished); - let joint_public_and_secret = generation_session + debug_assert_eq!(generation_session.state(), GenerationSessionState::Finished); + let joint_public_and_secret = generation_session .joint_public_and_secret() .expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")?; - data.generation_session = Some(generation_session); - data.state = SessionState::SignatureComputing; + data.generation_session = Some(generation_session); + data.state = SessionState::SignatureComputing; - self.core.disseminate_jobs(&mut data.consensus_session, &version, joint_public_and_secret.0, joint_public_and_secret.1, message_hash)?; + self.core.disseminate_jobs( + &mut data.consensus_session, + &version, + joint_public_and_secret.0, + joint_public_and_secret.1, + message_hash, + )?; - debug_assert!(data.consensus_session.state() == ConsensusSessionState::Finished); - let result = data.consensus_session.result()?; - Self::set_signing_result(&self.core, &mut *data, Ok(result)); - } + debug_assert!(data.consensus_session.state() == ConsensusSessionState::Finished); + let result = data.consensus_session.result()?; + Self::set_signing_result(&self.core, &mut *data, Ok(result)); + } - Ok(()) - } + Ok(()) + } - /// Process signing message. - pub fn process_message(&self, sender: &NodeId, message: &SchnorrSigningMessage) -> Result<(), Error> { - if self.core.nonce != message.session_nonce() { - return Err(Error::ReplayProtection); - } + /// Process signing message. + pub fn process_message( + &self, + sender: &NodeId, + message: &SchnorrSigningMessage, + ) -> Result<(), Error> { + if self.core.nonce != message.session_nonce() { + return Err(Error::ReplayProtection); + } - match message { - &SchnorrSigningMessage::SchnorrSigningConsensusMessage(ref message) => - self.on_consensus_message(sender, message), - &SchnorrSigningMessage::SchnorrSigningGenerationMessage(ref message) => - self.on_generation_message(sender, message), - &SchnorrSigningMessage::SchnorrRequestPartialSignature(ref message) => - self.on_partial_signature_requested(sender, message), - &SchnorrSigningMessage::SchnorrPartialSignature(ref message) => - self.on_partial_signature(sender, message), - &SchnorrSigningMessage::SchnorrSigningSessionError(ref message) => - self.process_node_error(Some(&sender), message.error.clone()), - &SchnorrSigningMessage::SchnorrSigningSessionCompleted(ref message) => - self.on_session_completed(sender, message), - &SchnorrSigningMessage::SchnorrSigningSessionDelegation(ref message) => - self.on_session_delegated(sender, message), - &SchnorrSigningMessage::SchnorrSigningSessionDelegationCompleted(ref message) => - self.on_session_delegation_completed(sender, message), - } - } + match message { + &SchnorrSigningMessage::SchnorrSigningConsensusMessage(ref message) => { + self.on_consensus_message(sender, message) + } + &SchnorrSigningMessage::SchnorrSigningGenerationMessage(ref message) => { + self.on_generation_message(sender, message) + } + &SchnorrSigningMessage::SchnorrRequestPartialSignature(ref message) => { + self.on_partial_signature_requested(sender, message) + } + &SchnorrSigningMessage::SchnorrPartialSignature(ref message) => { + self.on_partial_signature(sender, message) + } + &SchnorrSigningMessage::SchnorrSigningSessionError(ref message) => { + self.process_node_error(Some(&sender), message.error.clone()) + } + &SchnorrSigningMessage::SchnorrSigningSessionCompleted(ref message) => { + self.on_session_completed(sender, message) + } + &SchnorrSigningMessage::SchnorrSigningSessionDelegation(ref message) => { + self.on_session_delegated(sender, message) + } + &SchnorrSigningMessage::SchnorrSigningSessionDelegationCompleted(ref message) => { + self.on_session_delegation_completed(sender, message) + } + } + } - /// When session is delegated to this node. - pub fn on_session_delegated(&self, sender: &NodeId, message: &SchnorrSigningSessionDelegation) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); + /// When session is delegated to this node. + pub fn on_session_delegated( + &self, + sender: &NodeId, + message: &SchnorrSigningSessionDelegation, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); - { - let mut data = self.data.lock(); - if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization || data.delegation_status.is_some() { - return Err(Error::InvalidStateForRequest); - } + { + let mut data = self.data.lock(); + if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization + || data.delegation_status.is_some() + { + return Err(Error::InvalidStateForRequest); + } - data.consensus_session.consensus_job_mut().executor_mut().set_requester(message.requester.clone().into()); - data.delegation_status = Some(DelegationStatus::DelegatedFrom(sender.clone(), message.session_nonce)); - } + data.consensus_session + .consensus_job_mut() + .executor_mut() + .set_requester(message.requester.clone().into()); + data.delegation_status = Some(DelegationStatus::DelegatedFrom( + sender.clone(), + message.session_nonce, + )); + } - self.initialize(message.version.clone().into(), message.message_hash.clone().into()) - } + self.initialize( + message.version.clone().into(), + message.message_hash.clone().into(), + ) + } - /// When delegated session is completed on other node. - pub fn on_session_delegation_completed(&self, sender: &NodeId, message: &SchnorrSigningSessionDelegationCompleted) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); + /// When delegated session is completed on other node. + pub fn on_session_delegation_completed( + &self, + sender: &NodeId, + message: &SchnorrSigningSessionDelegationCompleted, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); - if self.core.meta.master_node_id != self.core.meta.self_node_id { - return Err(Error::InvalidStateForRequest); - } + if self.core.meta.master_node_id != self.core.meta.self_node_id { + return Err(Error::InvalidStateForRequest); + } - let mut data = self.data.lock(); - match data.delegation_status.as_ref() { - Some(&DelegationStatus::DelegatedTo(ref node)) if node == sender => (), - _ => return Err(Error::InvalidMessage), - } + let mut data = self.data.lock(); + match data.delegation_status.as_ref() { + Some(&DelegationStatus::DelegatedTo(ref node)) if node == sender => (), + _ => return Err(Error::InvalidMessage), + } - Self::set_signing_result(&self.core, &mut *data, Ok((message.signature_c.clone().into(), message.signature_s.clone().into()))); + Self::set_signing_result( + &self.core, + &mut *data, + Ok(( + message.signature_c.clone().into(), + message.signature_s.clone().into(), + )), + ); - Ok(()) - } + Ok(()) + } - /// When consensus-related message is received. - pub fn on_consensus_message(&self, sender: &NodeId, message: &SchnorrSigningConsensusMessage) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When consensus-related message is received. + pub fn on_consensus_message( + &self, + sender: &NodeId, + message: &SchnorrSigningConsensusMessage, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); - let mut data = self.data.lock(); - let is_establishing_consensus = data.consensus_session.state() == ConsensusSessionState::EstablishingConsensus; + let mut data = self.data.lock(); + let is_establishing_consensus = + data.consensus_session.state() == ConsensusSessionState::EstablishingConsensus; - if let &ConsensusMessage::InitializeConsensusSession(ref msg) = &message.message { - let version = msg.version.clone().into(); - let has_key_share = self.core.key_share.as_ref() - .map(|ks| ks.version(&version).is_ok()) - .unwrap_or(false); - data.consensus_session.consensus_job_mut().executor_mut().set_has_key_share(has_key_share); - data.version = Some(version); - } - data.consensus_session.on_consensus_message(&sender, &message.message)?; + if let &ConsensusMessage::InitializeConsensusSession(ref msg) = &message.message { + let version = msg.version.clone().into(); + let has_key_share = self + .core + .key_share + .as_ref() + .map(|ks| ks.version(&version).is_ok()) + .unwrap_or(false); + data.consensus_session + .consensus_job_mut() + .executor_mut() + .set_has_key_share(has_key_share); + data.version = Some(version); + } + data.consensus_session + .on_consensus_message(&sender, &message.message)?; - let is_consensus_established = data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished; - if self.core.meta.self_node_id != self.core.meta.master_node_id || !is_establishing_consensus || !is_consensus_established { - return Ok(()); - } + let is_consensus_established = + data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished; + if self.core.meta.self_node_id != self.core.meta.master_node_id + || !is_establishing_consensus + || !is_consensus_established + { + return Ok(()); + } - let consensus_group = data.consensus_session.select_consensus_group()?.clone(); - let mut other_consensus_group_nodes = consensus_group.clone(); - other_consensus_group_nodes.remove(&self.core.meta.self_node_id); + let consensus_group = data.consensus_session.select_consensus_group()?.clone(); + let mut other_consensus_group_nodes = consensus_group.clone(); + other_consensus_group_nodes.remove(&self.core.meta.self_node_id); - let key_share = match self.core.key_share.as_ref() { - None => return Err(Error::InvalidMessage), - Some(key_share) => key_share, - }; + let key_share = match self.core.key_share.as_ref() { + None => return Err(Error::InvalidMessage), + Some(key_share) => key_share, + }; - let generation_session = GenerationSession::new(GenerationSessionParams { - id: self.core.meta.id.clone(), - self_node_id: self.core.meta.self_node_id.clone(), - key_storage: None, - cluster: Arc::new(SessionKeyGenerationTransport { - access_key: self.core.access_key.clone(), - cluster: self.core.cluster.clone(), - nonce: self.core.nonce, - other_nodes_ids: other_consensus_group_nodes, - }), - nonce: None, - }); + let generation_session = GenerationSession::new(GenerationSessionParams { + id: self.core.meta.id.clone(), + self_node_id: self.core.meta.self_node_id.clone(), + key_storage: None, + cluster: Arc::new(SessionKeyGenerationTransport { + access_key: self.core.access_key.clone(), + cluster: self.core.cluster.clone(), + nonce: self.core.nonce, + other_nodes_ids: other_consensus_group_nodes, + }), + nonce: None, + }); - generation_session.initialize(Default::default(), Default::default(), false, key_share.threshold, consensus_group.into())?; - data.generation_session = Some(generation_session); - data.state = SessionState::SessionKeyGeneration; + generation_session.initialize( + Default::default(), + Default::default(), + false, + key_share.threshold, + consensus_group.into(), + )?; + data.generation_session = Some(generation_session); + data.state = SessionState::SessionKeyGeneration; - Ok(()) - } + Ok(()) + } - /// When session key related message is received. - pub fn on_generation_message(&self, sender: &NodeId, message: &SchnorrSigningGenerationMessage) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When session key related message is received. + pub fn on_generation_message( + &self, + sender: &NodeId, + message: &SchnorrSigningGenerationMessage, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); - let mut data = self.data.lock(); + let mut data = self.data.lock(); - if let &GenerationMessage::InitializeSession(ref message) = &message.message { - if &self.core.meta.master_node_id != sender { - match data.delegation_status.as_ref() { - Some(&DelegationStatus::DelegatedTo(s)) if s == *sender => (), - _ => return Err(Error::InvalidMessage), - } - } + if let &GenerationMessage::InitializeSession(ref message) = &message.message { + if &self.core.meta.master_node_id != sender { + match data.delegation_status.as_ref() { + Some(&DelegationStatus::DelegatedTo(s)) if s == *sender => (), + _ => return Err(Error::InvalidMessage), + } + } - let consensus_group: BTreeSet = message.nodes.keys().cloned().map(Into::into).collect(); - let mut other_consensus_group_nodes = consensus_group.clone(); - other_consensus_group_nodes.remove(&self.core.meta.self_node_id); + let consensus_group: BTreeSet = + message.nodes.keys().cloned().map(Into::into).collect(); + let mut other_consensus_group_nodes = consensus_group.clone(); + other_consensus_group_nodes.remove(&self.core.meta.self_node_id); - let generation_session = GenerationSession::new(GenerationSessionParams { - id: self.core.meta.id.clone(), - self_node_id: self.core.meta.self_node_id.clone(), - key_storage: None, - cluster: Arc::new(SessionKeyGenerationTransport { - access_key: self.core.access_key.clone(), - cluster: self.core.cluster.clone(), - nonce: self.core.nonce, - other_nodes_ids: other_consensus_group_nodes - }), - nonce: None, - }); - data.generation_session = Some(generation_session); - data.state = SessionState::SessionKeyGeneration; - } + let generation_session = GenerationSession::new(GenerationSessionParams { + id: self.core.meta.id.clone(), + self_node_id: self.core.meta.self_node_id.clone(), + key_storage: None, + cluster: Arc::new(SessionKeyGenerationTransport { + access_key: self.core.access_key.clone(), + cluster: self.core.cluster.clone(), + nonce: self.core.nonce, + other_nodes_ids: other_consensus_group_nodes, + }), + nonce: None, + }); + data.generation_session = Some(generation_session); + data.state = SessionState::SessionKeyGeneration; + } - { - let generation_session = data.generation_session.as_ref().ok_or(Error::InvalidStateForRequest)?; - let is_key_generating = generation_session.state() != GenerationSessionState::Finished; - generation_session.process_message(sender, &message.message)?; + { + let generation_session = data + .generation_session + .as_ref() + .ok_or(Error::InvalidStateForRequest)?; + let is_key_generating = generation_session.state() != GenerationSessionState::Finished; + generation_session.process_message(sender, &message.message)?; - let is_key_generated = generation_session.state() == GenerationSessionState::Finished; - if !is_key_generating || !is_key_generated { - return Ok(()); - } - } + let is_key_generated = generation_session.state() == GenerationSessionState::Finished; + if !is_key_generating || !is_key_generated { + return Ok(()); + } + } - data.state = SessionState::SignatureComputing; - if self.core.meta.master_node_id != self.core.meta.self_node_id { - return Ok(()); - } + data.state = SessionState::SignatureComputing; + if self.core.meta.master_node_id != self.core.meta.self_node_id { + return Ok(()); + } - let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); - let message_hash = data.message_hash + let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); + let message_hash = data.message_hash .expect("we are on master node; on master node message_hash is filled in initialize(); on_generation_message follows initialize; qed"); - let joint_public_and_secret = data.generation_session.as_ref() + let joint_public_and_secret = data.generation_session.as_ref() .expect("session key is generated before signature is computed; we are in SignatureComputing state; qed") .joint_public_and_secret() .expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")?; - self.core.disseminate_jobs(&mut data.consensus_session, &version, joint_public_and_secret.0, joint_public_and_secret.1, message_hash) - } + self.core.disseminate_jobs( + &mut data.consensus_session, + &version, + joint_public_and_secret.0, + joint_public_and_secret.1, + message_hash, + ) + } - /// When partial signature is requested. - pub fn on_partial_signature_requested(&self, sender: &NodeId, message: &SchnorrRequestPartialSignature) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When partial signature is requested. + pub fn on_partial_signature_requested( + &self, + sender: &NodeId, + message: &SchnorrRequestPartialSignature, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); - let key_share = match self.core.key_share.as_ref() { - None => return Err(Error::InvalidMessage), - Some(key_share) => key_share, - }; + let key_share = match self.core.key_share.as_ref() { + None => return Err(Error::InvalidMessage), + Some(key_share) => key_share, + }; - let mut data = self.data.lock(); + let mut data = self.data.lock(); - if sender != &self.core.meta.master_node_id { - return Err(Error::InvalidMessage); - } - if data.state != SessionState::SignatureComputing { - return Err(Error::InvalidStateForRequest); - } + if sender != &self.core.meta.master_node_id { + return Err(Error::InvalidMessage); + } + if data.state != SessionState::SignatureComputing { + return Err(Error::InvalidStateForRequest); + } - let joint_public_and_secret = data.generation_session.as_ref() + let joint_public_and_secret = data.generation_session.as_ref() .expect("session key is generated before signature is computed; we are in SignatureComputing state; qed") .joint_public_and_secret() .expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")?; - let key_version = key_share.version(data.version.as_ref().ok_or(Error::InvalidMessage)?)?.hash.clone(); - let signing_job = SchnorrSigningJob::new_on_slave(self.core.meta.self_node_id.clone(), key_share.clone(), key_version, joint_public_and_secret.0, joint_public_and_secret.1)?; - let signing_transport = self.core.signing_transport(); + let key_version = key_share + .version(data.version.as_ref().ok_or(Error::InvalidMessage)?)? + .hash + .clone(); + let signing_job = SchnorrSigningJob::new_on_slave( + self.core.meta.self_node_id.clone(), + key_share.clone(), + key_version, + joint_public_and_secret.0, + joint_public_and_secret.1, + )?; + let signing_transport = self.core.signing_transport(); - data.consensus_session.on_job_request(sender, SchnorrPartialSigningRequest { - id: message.request_id.clone().into(), - message_hash: message.message_hash.clone().into(), - other_nodes_ids: message.nodes.iter().cloned().map(Into::into).collect(), - }, signing_job, signing_transport).map(|_| ()) - } + data.consensus_session + .on_job_request( + sender, + SchnorrPartialSigningRequest { + id: message.request_id.clone().into(), + message_hash: message.message_hash.clone().into(), + other_nodes_ids: message.nodes.iter().cloned().map(Into::into).collect(), + }, + signing_job, + signing_transport, + ) + .map(|_| ()) + } - /// When partial signature is received. - pub fn on_partial_signature(&self, sender: &NodeId, message: &SchnorrPartialSignature) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When partial signature is received. + pub fn on_partial_signature( + &self, + sender: &NodeId, + message: &SchnorrPartialSignature, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); - let mut data = self.data.lock(); - data.consensus_session.on_job_response(sender, SchnorrPartialSigningResponse { - request_id: message.request_id.clone().into(), - partial_signature: message.partial_signature.clone().into(), - })?; + let mut data = self.data.lock(); + data.consensus_session.on_job_response( + sender, + SchnorrPartialSigningResponse { + request_id: message.request_id.clone().into(), + partial_signature: message.partial_signature.clone().into(), + }, + )?; - if data.consensus_session.state() != ConsensusSessionState::Finished { - return Ok(()); - } + if data.consensus_session.state() != ConsensusSessionState::Finished { + return Ok(()); + } - // send compeltion signal to all nodes, except for rejected nodes - for node in data.consensus_session.consensus_non_rejected_nodes() { - self.core.cluster.send(&node, Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionCompleted(SchnorrSigningSessionCompleted { - session: self.core.meta.id.clone().into(), - sub_session: self.core.access_key.clone().into(), - session_nonce: self.core.nonce, - })))?; - } + // send compeltion signal to all nodes, except for rejected nodes + for node in data.consensus_session.consensus_non_rejected_nodes() { + self.core.cluster.send( + &node, + Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionCompleted( + SchnorrSigningSessionCompleted { + session: self.core.meta.id.clone().into(), + sub_session: self.core.access_key.clone().into(), + session_nonce: self.core.nonce, + }, + )), + )?; + } - let result = data.consensus_session.result()?; - Self::set_signing_result(&self.core, &mut *data, Ok(result)); + let result = data.consensus_session.result()?; + Self::set_signing_result(&self.core, &mut *data, Ok(result)); - Ok(()) - } + Ok(()) + } - /// When session is completed. - pub fn on_session_completed(&self, sender: &NodeId, message: &SchnorrSigningSessionCompleted) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); + /// When session is completed. + pub fn on_session_completed( + &self, + sender: &NodeId, + message: &SchnorrSigningSessionCompleted, + ) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); - self.data.lock().consensus_session.on_session_completed(sender) - } + self.data + .lock() + .consensus_session + .on_session_completed(sender) + } - /// Process error from the other node. - fn process_node_error(&self, node: Option<&NodeId>, error: Error) -> Result<(), Error> { - let mut data = self.data.lock(); - let is_self_node_error = node.map(|n| n == &self.core.meta.self_node_id).unwrap_or(false); - // error is always fatal if coming from this node - if is_self_node_error { - Self::set_signing_result(&self.core, &mut *data, Err(error.clone())); - return Err(error); - } + /// Process error from the other node. + fn process_node_error(&self, node: Option<&NodeId>, error: Error) -> Result<(), Error> { + let mut data = self.data.lock(); + let is_self_node_error = node + .map(|n| n == &self.core.meta.self_node_id) + .unwrap_or(false); + // error is always fatal if coming from this node + if is_self_node_error { + Self::set_signing_result(&self.core, &mut *data, Err(error.clone())); + return Err(error); + } - match { - match node { - Some(node) => data.consensus_session.on_node_error(node, error.clone()), - None => data.consensus_session.on_session_timeout(), - } - } { - Ok(false) => { - Ok(()) - }, - Ok(true) => { - let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); - let message_hash = data.message_hash.as_ref().cloned() + match { + match node { + Some(node) => data.consensus_session.on_node_error(node, error.clone()), + None => data.consensus_session.on_session_timeout(), + } + } { + Ok(false) => Ok(()), + Ok(true) => { + let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); + let message_hash = data.message_hash.as_ref().cloned() .expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed"); - let joint_public_and_secret = data.generation_session.as_ref() + let joint_public_and_secret = data.generation_session.as_ref() .expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed") .joint_public_and_secret() .expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed")?; - let disseminate_result = self.core.disseminate_jobs(&mut data.consensus_session, &version, joint_public_and_secret.0, joint_public_and_secret.1, message_hash); - match disseminate_result { - Ok(()) => Ok(()), - Err(err) => { - warn!("{}: signing session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node); - Self::set_signing_result(&self.core, &mut *data, Err(err.clone())); - Err(err) - } - } - }, - Err(err) => { - warn!("{}: signing session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node); - Self::set_signing_result(&self.core, &mut *data, Err(err.clone())); - Err(err) - }, - } - } + let disseminate_result = self.core.disseminate_jobs( + &mut data.consensus_session, + &version, + joint_public_and_secret.0, + joint_public_and_secret.1, + message_hash, + ); + match disseminate_result { + Ok(()) => Ok(()), + Err(err) => { + warn!( + "{}: signing session failed with error: {:?} from {:?}", + &self.core.meta.self_node_id, error, node + ); + Self::set_signing_result(&self.core, &mut *data, Err(err.clone())); + Err(err) + } + } + } + Err(err) => { + warn!( + "{}: signing session failed with error: {:?} from {:?}", + &self.core.meta.self_node_id, error, node + ); + Self::set_signing_result(&self.core, &mut *data, Err(err.clone())); + Err(err) + } + } + } - /// Set signing session result. - fn set_signing_result(core: &SessionCore, data: &mut SessionData, result: Result<(Secret, Secret), Error>) { - if let Some(DelegationStatus::DelegatedFrom(master, nonce)) = data.delegation_status.take() { - // error means can't communicate => ignore it - let _ = match result.as_ref() { - Ok(signature) => core.cluster.send(&master, Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionDelegationCompleted(SchnorrSigningSessionDelegationCompleted { - session: core.meta.id.clone().into(), - sub_session: core.access_key.clone().into(), - session_nonce: nonce, - signature_c: signature.0.clone().into(), - signature_s: signature.1.clone().into(), - }))), - Err(error) => core.cluster.send(&master, Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionError(SchnorrSigningSessionError { - session: core.meta.id.clone().into(), - sub_session: core.access_key.clone().into(), - session_nonce: nonce, - error: error.clone().into(), - }))), - }; - } + /// Set signing session result. + fn set_signing_result( + core: &SessionCore, + data: &mut SessionData, + result: Result<(Secret, Secret), Error>, + ) { + if let Some(DelegationStatus::DelegatedFrom(master, nonce)) = data.delegation_status.take() + { + // error means can't communicate => ignore it + let _ = match result.as_ref() { + Ok(signature) => core.cluster.send( + &master, + Message::SchnorrSigning( + SchnorrSigningMessage::SchnorrSigningSessionDelegationCompleted( + SchnorrSigningSessionDelegationCompleted { + session: core.meta.id.clone().into(), + sub_session: core.access_key.clone().into(), + session_nonce: nonce, + signature_c: signature.0.clone().into(), + signature_s: signature.1.clone().into(), + }, + ), + ), + ), + Err(error) => core.cluster.send( + &master, + Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionError( + SchnorrSigningSessionError { + session: core.meta.id.clone().into(), + sub_session: core.access_key.clone().into(), + session_nonce: nonce, + error: error.clone().into(), + }, + )), + ), + }; + } - data.result = Some(result); - core.completed.notify_all(); - } + data.result = Some(result); + core.completed.notify_all(); + } } impl ClusterSession for SessionImpl { - type Id = SessionIdWithSubSession; + type Id = SessionIdWithSubSession; - fn type_name() -> &'static str { - "signing" - } + fn type_name() -> &'static str { + "signing" + } - fn id(&self) -> SessionIdWithSubSession { - SessionIdWithSubSession::new(self.core.meta.id.clone(), self.core.access_key.clone()) - } + fn id(&self) -> SessionIdWithSubSession { + SessionIdWithSubSession::new(self.core.meta.id.clone(), self.core.access_key.clone()) + } - fn is_finished(&self) -> bool { - let data = self.data.lock(); - data.consensus_session.state() == ConsensusSessionState::Failed - || data.consensus_session.state() == ConsensusSessionState::Finished - || data.result.is_some() - } + fn is_finished(&self) -> bool { + let data = self.data.lock(); + data.consensus_session.state() == ConsensusSessionState::Failed + || data.consensus_session.state() == ConsensusSessionState::Finished + || data.result.is_some() + } - fn on_node_timeout(&self, node: &NodeId) { - // ignore error, only state matters - let _ = self.process_node_error(Some(node), Error::NodeDisconnected); - } + fn on_node_timeout(&self, node: &NodeId) { + // ignore error, only state matters + let _ = self.process_node_error(Some(node), Error::NodeDisconnected); + } - fn on_session_timeout(&self) { - // ignore error, only state matters - let _ = self.process_node_error(None, Error::NodeDisconnected); - } + fn on_session_timeout(&self) { + // ignore error, only state matters + let _ = self.process_node_error(None, Error::NodeDisconnected); + } - fn on_session_error(&self, node: &NodeId, error: Error) { - let is_fatal = self.process_node_error(Some(node), error.clone()).is_err(); - let is_this_node_error = *node == self.core.meta.self_node_id; - if is_fatal || is_this_node_error { - // error in signing session is non-fatal, if occurs on slave node - // => either respond with error - // => or broadcast error - let message = Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionError(SchnorrSigningSessionError { - session: self.core.meta.id.clone().into(), - sub_session: self.core.access_key.clone().into(), - session_nonce: self.core.nonce, - error: error.clone().into(), - })); + fn on_session_error(&self, node: &NodeId, error: Error) { + let is_fatal = self.process_node_error(Some(node), error.clone()).is_err(); + let is_this_node_error = *node == self.core.meta.self_node_id; + if is_fatal || is_this_node_error { + // error in signing session is non-fatal, if occurs on slave node + // => either respond with error + // => or broadcast error + let message = Message::SchnorrSigning( + SchnorrSigningMessage::SchnorrSigningSessionError(SchnorrSigningSessionError { + session: self.core.meta.id.clone().into(), + sub_session: self.core.access_key.clone().into(), + session_nonce: self.core.nonce, + error: error.clone().into(), + }), + ); - // do not bother processing send error, as we already processing error - let _ = if self.core.meta.master_node_id == self.core.meta.self_node_id { - self.core.cluster.broadcast(message) - } else { - self.core.cluster.send(&self.core.meta.master_node_id, message) - }; - } - } + // do not bother processing send error, as we already processing error + let _ = if self.core.meta.master_node_id == self.core.meta.self_node_id { + self.core.cluster.broadcast(message) + } else { + self.core + .cluster + .send(&self.core.meta.master_node_id, message) + }; + } + } - fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { - match *message { - Message::SchnorrSigning(ref message) => self.process_message(sender, message), - _ => unreachable!("cluster checks message to be correct before passing; qed"), - } - } + fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { + match *message { + Message::SchnorrSigning(ref message) => self.process_message(sender, message), + _ => unreachable!("cluster checks message to be correct before passing; qed"), + } + } } impl SessionKeyGenerationTransport { - fn map_message(&self, message: Message) -> Result { - match message { - Message::Generation(message) => Ok(Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningGenerationMessage(SchnorrSigningGenerationMessage { - session: message.session_id().clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - message: message, - }))), - _ => Err(Error::InvalidMessage), - } - } + fn map_message(&self, message: Message) -> Result { + match message { + Message::Generation(message) => Ok(Message::SchnorrSigning( + SchnorrSigningMessage::SchnorrSigningGenerationMessage( + SchnorrSigningGenerationMessage { + session: message.session_id().clone().into(), + sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, + message: message, + }, + ), + )), + _ => Err(Error::InvalidMessage), + } + } } impl Cluster for SessionKeyGenerationTransport { - fn broadcast(&self, message: Message) -> Result<(), Error> { - let message = self.map_message(message)?; - for to in &self.other_nodes_ids { - self.cluster.send(to, message.clone())?; - } - Ok(()) - } + fn broadcast(&self, message: Message) -> Result<(), Error> { + let message = self.map_message(message)?; + for to in &self.other_nodes_ids { + self.cluster.send(to, message.clone())?; + } + Ok(()) + } - fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> { - debug_assert!(self.other_nodes_ids.contains(to)); - self.cluster.send(to, self.map_message(message)?) - } + fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> { + debug_assert!(self.other_nodes_ids.contains(to)); + self.cluster.send(to, self.map_message(message)?) + } - fn is_connected(&self, node: &NodeId) -> bool { - self.cluster.is_connected(node) - } + fn is_connected(&self, node: &NodeId) -> bool { + self.cluster.is_connected(node) + } - fn nodes(&self) -> BTreeSet { - self.cluster.nodes() - } + fn nodes(&self) -> BTreeSet { + self.cluster.nodes() + } - fn configured_nodes_count(&self) -> usize { - self.cluster.configured_nodes_count() - } + fn configured_nodes_count(&self) -> usize { + self.cluster.configured_nodes_count() + } - fn connected_nodes_count(&self) -> usize { - self.cluster.connected_nodes_count() - } + fn connected_nodes_count(&self) -> usize { + self.cluster.connected_nodes_count() + } } impl SessionCore { - pub fn signing_transport(&self) -> SigningJobTransport { - SigningJobTransport { - id: self.meta.id.clone(), - access_key: self.access_key.clone(), - nonce: self.nonce, - cluster: self.cluster.clone() - } - } + pub fn signing_transport(&self) -> SigningJobTransport { + SigningJobTransport { + id: self.meta.id.clone(), + access_key: self.access_key.clone(), + nonce: self.nonce, + cluster: self.cluster.clone(), + } + } - pub fn disseminate_jobs(&self, consensus_session: &mut SigningConsensusSession, version: &H256, session_public: Public, session_secret_share: Secret, message_hash: H256) -> Result<(), Error> { - let key_share = match self.key_share.as_ref() { - None => return Err(Error::InvalidMessage), - Some(key_share) => key_share, - }; + pub fn disseminate_jobs( + &self, + consensus_session: &mut SigningConsensusSession, + version: &H256, + session_public: Public, + session_secret_share: Secret, + message_hash: H256, + ) -> Result<(), Error> { + let key_share = match self.key_share.as_ref() { + None => return Err(Error::InvalidMessage), + Some(key_share) => key_share, + }; - let key_version = key_share.version(version)?.hash.clone(); - let signing_job = SchnorrSigningJob::new_on_master(self.meta.self_node_id.clone(), key_share.clone(), key_version, - session_public, session_secret_share, message_hash)?; - consensus_session.disseminate_jobs(signing_job, self.signing_transport(), false).map(|_| ()) - } + let key_version = key_share.version(version)?.hash.clone(); + let signing_job = SchnorrSigningJob::new_on_master( + self.meta.self_node_id.clone(), + key_share.clone(), + key_version, + session_public, + session_secret_share, + message_hash, + )?; + consensus_session + .disseminate_jobs(signing_job, self.signing_transport(), false) + .map(|_| ()) + } } impl JobTransport for SigningConsensusTransport { - type PartialJobRequest=Requester; - type PartialJobResponse=bool; + type PartialJobRequest = Requester; + type PartialJobResponse = bool; - fn send_partial_request(&self, node: &NodeId, request: Requester) -> Result<(), Error> { - let version = self.version.as_ref() + fn send_partial_request(&self, node: &NodeId, request: Requester) -> Result<(), Error> { + let version = self.version.as_ref() .expect("send_partial_request is called on initialized master node only; version is filled in before initialization starts on master node; qed"); - self.cluster.send(node, Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningConsensusMessage(SchnorrSigningConsensusMessage { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - message: ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession { - requester: request.into(), - version: version.clone().into(), - }) - }))) - } + self.cluster.send( + node, + Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningConsensusMessage( + SchnorrSigningConsensusMessage { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, + message: ConsensusMessage::InitializeConsensusSession( + InitializeConsensusSession { + requester: request.into(), + version: version.clone().into(), + }, + ), + }, + )), + ) + } - fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { - self.cluster.send(node, Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningConsensusMessage(SchnorrSigningConsensusMessage { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - message: ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: response, - }) - }))) - } + fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { + self.cluster.send( + node, + Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningConsensusMessage( + SchnorrSigningConsensusMessage { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, + message: ConsensusMessage::ConfirmConsensusInitialization( + ConfirmConsensusInitialization { + is_confirmed: response, + }, + ), + }, + )), + ) + } } impl JobTransport for SigningJobTransport { - type PartialJobRequest=SchnorrPartialSigningRequest; - type PartialJobResponse=SchnorrPartialSigningResponse; + type PartialJobRequest = SchnorrPartialSigningRequest; + type PartialJobResponse = SchnorrPartialSigningResponse; - fn send_partial_request(&self, node: &NodeId, request: SchnorrPartialSigningRequest) -> Result<(), Error> { - self.cluster.send(node, Message::SchnorrSigning(SchnorrSigningMessage::SchnorrRequestPartialSignature(SchnorrRequestPartialSignature { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - request_id: request.id.into(), - message_hash: request.message_hash.into(), - nodes: request.other_nodes_ids.into_iter().map(Into::into).collect(), - }))) - } + fn send_partial_request( + &self, + node: &NodeId, + request: SchnorrPartialSigningRequest, + ) -> Result<(), Error> { + self.cluster.send( + node, + Message::SchnorrSigning(SchnorrSigningMessage::SchnorrRequestPartialSignature( + SchnorrRequestPartialSignature { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, + request_id: request.id.into(), + message_hash: request.message_hash.into(), + nodes: request + .other_nodes_ids + .into_iter() + .map(Into::into) + .collect(), + }, + )), + ) + } - fn send_partial_response(&self, node: &NodeId, response: SchnorrPartialSigningResponse) -> Result<(), Error> { - self.cluster.send(node, Message::SchnorrSigning(SchnorrSigningMessage::SchnorrPartialSignature(SchnorrPartialSignature { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - request_id: response.request_id.into(), - partial_signature: response.partial_signature.into(), - }))) - } + fn send_partial_response( + &self, + node: &NodeId, + response: SchnorrPartialSigningResponse, + ) -> Result<(), Error> { + self.cluster.send( + node, + Message::SchnorrSigning(SchnorrSigningMessage::SchnorrPartialSignature( + SchnorrPartialSignature { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, + request_id: response.request_id.into(), + partial_signature: response.partial_signature.into(), + }, + )), + ) + } } #[cfg(test)] mod tests { - use std::sync::Arc; - use std::str::FromStr; - use std::collections::BTreeMap; - use ethereum_types::{Address, H256}; - use ethkey::{self, Random, Generator, Public, Secret, public_to_address}; - use acl_storage::DummyAclStorage; - use key_server_cluster::{SessionId, Requester, SessionMeta, Error, KeyStorage}; - use key_server_cluster::cluster::tests::MessageLoop as ClusterMessageLoop; - use key_server_cluster::generation_session::tests::MessageLoop as GenerationMessageLoop; - use key_server_cluster::math; - use key_server_cluster::message::{SchnorrSigningMessage, SchnorrSigningConsensusMessage, - ConsensusMessage, ConfirmConsensusInitialization, SchnorrSigningGenerationMessage, GenerationMessage, - ConfirmInitialization, InitializeSession, SchnorrRequestPartialSignature}; - use key_server_cluster::signing_session_schnorr::{SessionImpl, SessionState, SessionParams}; + use acl_storage::DummyAclStorage; + use ethereum_types::{Address, H256}; + use ethkey::{self, public_to_address, Generator, Public, Random, Secret}; + use key_server_cluster::{ + cluster::tests::MessageLoop as ClusterMessageLoop, + generation_session::tests::MessageLoop as GenerationMessageLoop, + math, + message::{ + ConfirmConsensusInitialization, ConfirmInitialization, ConsensusMessage, + GenerationMessage, InitializeSession, SchnorrRequestPartialSignature, + SchnorrSigningConsensusMessage, SchnorrSigningGenerationMessage, SchnorrSigningMessage, + }, + signing_session_schnorr::{SessionImpl, SessionParams, SessionState}, + Error, KeyStorage, Requester, SessionId, SessionMeta, + }; + use std::{collections::BTreeMap, str::FromStr, sync::Arc}; - #[derive(Debug)] - pub struct MessageLoop(pub ClusterMessageLoop); + #[derive(Debug)] + pub struct MessageLoop(pub ClusterMessageLoop); - impl MessageLoop { - pub fn new(num_nodes: usize, threshold: usize) -> Result { - let ml = GenerationMessageLoop::new(num_nodes).init(threshold)?; - ml.0.loop_until(|| ml.0.is_empty()); // complete generation session + impl MessageLoop { + pub fn new(num_nodes: usize, threshold: usize) -> Result { + let ml = GenerationMessageLoop::new(num_nodes).init(threshold)?; + ml.0.loop_until(|| ml.0.is_empty()); // complete generation session - Ok(MessageLoop(ml.0)) - } + Ok(MessageLoop(ml.0)) + } - pub fn into_session(&self, at_node: usize) -> SessionImpl { - let requester = Some(Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(), - &SessionId::default()).unwrap())); - SessionImpl::new(SessionParams { - meta: SessionMeta { - id: SessionId::default(), - self_node_id: self.0.node(at_node), - master_node_id: self.0.node(0), - threshold: self.0.key_storage(at_node).get(&Default::default()).unwrap().unwrap().threshold, - configured_nodes_count: self.0.nodes().len(), - connected_nodes_count: self.0.nodes().len(), - }, - access_key: Random.generate().unwrap().secret().clone(), - key_share: self.0.key_storage(at_node).get(&Default::default()).unwrap(), - acl_storage: Arc::new(DummyAclStorage::default()), - cluster: self.0.cluster(0).view().unwrap(), - nonce: 0, - }, requester).unwrap() - } + pub fn into_session(&self, at_node: usize) -> SessionImpl { + let requester = Some(Requester::Signature( + ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), + )); + SessionImpl::new( + SessionParams { + meta: SessionMeta { + id: SessionId::default(), + self_node_id: self.0.node(at_node), + master_node_id: self.0.node(0), + threshold: self + .0 + .key_storage(at_node) + .get(&Default::default()) + .unwrap() + .unwrap() + .threshold, + configured_nodes_count: self.0.nodes().len(), + connected_nodes_count: self.0.nodes().len(), + }, + access_key: Random.generate().unwrap().secret().clone(), + key_share: self + .0 + .key_storage(at_node) + .get(&Default::default()) + .unwrap(), + acl_storage: Arc::new(DummyAclStorage::default()), + cluster: self.0.cluster(0).view().unwrap(), + nonce: 0, + }, + requester, + ) + .unwrap() + } - pub fn init_with_version(self, key_version: Option) -> Result<(Self, Public, H256), Error> { - let message_hash = H256::random(); - let requester = Random.generate().unwrap(); - let signature = ethkey::sign(requester.secret(), &SessionId::default()).unwrap(); - self.0.cluster(0).client().new_schnorr_signing_session( - Default::default(), - signature.into(), - key_version, - message_hash).map(|_| (self, *requester.public(), message_hash)) - } + pub fn init_with_version( + self, + key_version: Option, + ) -> Result<(Self, Public, H256), Error> { + let message_hash = H256::random(); + let requester = Random.generate().unwrap(); + let signature = ethkey::sign(requester.secret(), &SessionId::default()).unwrap(); + self.0 + .cluster(0) + .client() + .new_schnorr_signing_session( + Default::default(), + signature.into(), + key_version, + message_hash, + ) + .map(|_| (self, *requester.public(), message_hash)) + } - pub fn init(self) -> Result<(Self, Public, H256), Error> { - let key_version = self.key_version(); - self.init_with_version(Some(key_version)) - } + pub fn init(self) -> Result<(Self, Public, H256), Error> { + let key_version = self.key_version(); + self.init_with_version(Some(key_version)) + } - pub fn init_delegated(self) -> Result<(Self, Public, H256), Error> { - self.0.key_storage(0).remove(&Default::default()).unwrap(); - self.init_with_version(None) - } + pub fn init_delegated(self) -> Result<(Self, Public, H256), Error> { + self.0.key_storage(0).remove(&Default::default()).unwrap(); + self.init_with_version(None) + } - pub fn init_with_isolated(self) -> Result<(Self, Public, H256), Error> { - self.0.isolate(1); - self.init() - } + pub fn init_with_isolated(self) -> Result<(Self, Public, H256), Error> { + self.0.isolate(1); + self.init() + } - pub fn init_without_share(self) -> Result<(Self, Public, H256), Error> { - let key_version = self.key_version(); - self.0.key_storage(0).remove(&Default::default()).unwrap(); - self.init_with_version(Some(key_version)) - } + pub fn init_without_share(self) -> Result<(Self, Public, H256), Error> { + let key_version = self.key_version(); + self.0.key_storage(0).remove(&Default::default()).unwrap(); + self.init_with_version(Some(key_version)) + } - pub fn session_at(&self, idx: usize) -> Arc { - self.0.sessions(idx).schnorr_signing_sessions.first().unwrap() - } + pub fn session_at(&self, idx: usize) -> Arc { + self.0 + .sessions(idx) + .schnorr_signing_sessions + .first() + .unwrap() + } - pub fn ensure_completed(&self) { - self.0.loop_until(|| self.0.is_empty()); - assert!(self.session_at(0).wait().is_ok()); - } + pub fn ensure_completed(&self) { + self.0.loop_until(|| self.0.is_empty()); + assert!(self.session_at(0).wait().is_ok()); + } - pub fn key_version(&self) -> H256 { - self.0.key_storage(0).get(&Default::default()) - .unwrap().unwrap().versions.iter().last().unwrap().hash - } - } + pub fn key_version(&self) -> H256 { + self.0 + .key_storage(0) + .get(&Default::default()) + .unwrap() + .unwrap() + .versions + .iter() + .last() + .unwrap() + .hash + } + } - #[test] - fn schnorr_complete_gen_sign_session() { - let test_cases = [(0, 1), (0, 5), (2, 5), (3, 5)]; - for &(threshold, num_nodes) in &test_cases { - let (ml, _, message) = MessageLoop::new(num_nodes, threshold).unwrap().init().unwrap(); - ml.0.loop_until(|| ml.0.is_empty()); + #[test] + fn schnorr_complete_gen_sign_session() { + let test_cases = [(0, 1), (0, 5), (2, 5), (3, 5)]; + for &(threshold, num_nodes) in &test_cases { + let (ml, _, message) = MessageLoop::new(num_nodes, threshold) + .unwrap() + .init() + .unwrap(); + ml.0.loop_until(|| ml.0.is_empty()); - let signer_public = ml.0.key_storage(0).get(&Default::default()).unwrap().unwrap().public; - let signature = ml.session_at(0).wait().unwrap(); - assert!(math::verify_schnorr_signature(&signer_public, &signature, &message).unwrap()); - } - } + let signer_public = + ml.0.key_storage(0) + .get(&Default::default()) + .unwrap() + .unwrap() + .public; + let signature = ml.session_at(0).wait().unwrap(); + assert!(math::verify_schnorr_signature(&signer_public, &signature, &message).unwrap()); + } + } - #[test] - fn schnorr_constructs_in_cluster_of_single_node() { - MessageLoop::new(1, 0).unwrap().init().unwrap(); - } + #[test] + fn schnorr_constructs_in_cluster_of_single_node() { + MessageLoop::new(1, 0).unwrap().init().unwrap(); + } - #[test] - fn schnorr_fails_to_initialize_if_does_not_have_a_share() { - assert!(MessageLoop::new(2, 1).unwrap().init_without_share().is_err()); - } + #[test] + fn schnorr_fails_to_initialize_if_does_not_have_a_share() { + assert!(MessageLoop::new(2, 1) + .unwrap() + .init_without_share() + .is_err()); + } - #[test] - fn schnorr_fails_to_initialize_if_threshold_is_wrong() { - let mut ml = MessageLoop::new(3, 2).unwrap(); - ml.0.exclude(2); - assert_eq!(ml.init().unwrap_err(), Error::ConsensusUnreachable); - } + #[test] + fn schnorr_fails_to_initialize_if_threshold_is_wrong() { + let mut ml = MessageLoop::new(3, 2).unwrap(); + ml.0.exclude(2); + assert_eq!(ml.init().unwrap_err(), Error::ConsensusUnreachable); + } - #[test] - fn schnorr_fails_to_initialize_when_already_initialized() { - let (ml, _, _) = MessageLoop::new(1, 0).unwrap().init().unwrap(); - assert_eq!(ml.session_at(0).initialize(ml.key_version(), 777.into()), - Err(Error::InvalidStateForRequest)); - } + #[test] + fn schnorr_fails_to_initialize_when_already_initialized() { + let (ml, _, _) = MessageLoop::new(1, 0).unwrap().init().unwrap(); + assert_eq!( + ml.session_at(0).initialize(ml.key_version(), 777.into()), + Err(Error::InvalidStateForRequest) + ); + } - #[test] - fn schnorr_does_not_fail_when_consensus_message_received_after_consensus_established() { - let (ml, _, _) = MessageLoop::new(3, 1).unwrap().init().unwrap(); + #[test] + fn schnorr_does_not_fail_when_consensus_message_received_after_consensus_established() { + let (ml, _, _) = MessageLoop::new(3, 1).unwrap().init().unwrap(); - // consensus is established - let session = ml.session_at(0); - ml.0.loop_until(|| session.state() == SessionState::SessionKeyGeneration); + // consensus is established + let session = ml.session_at(0); + ml.0.loop_until(|| session.state() == SessionState::SessionKeyGeneration); - // but 3rd node continues to send its messages - // this should not fail session - let consensus_group = session.data.lock().consensus_session.select_consensus_group().unwrap().clone(); - let mut had_3rd_message = false; - while let Some((from, to, message)) = ml.0.take_message() { - if !consensus_group.contains(&from) { - had_3rd_message = true; - ml.0.process_message(from, to, message); - } - } - assert!(had_3rd_message); - } + // but 3rd node continues to send its messages + // this should not fail session + let consensus_group = session + .data + .lock() + .consensus_session + .select_consensus_group() + .unwrap() + .clone(); + let mut had_3rd_message = false; + while let Some((from, to, message)) = ml.0.take_message() { + if !consensus_group.contains(&from) { + had_3rd_message = true; + ml.0.process_message(from, to, message); + } + } + assert!(had_3rd_message); + } - #[test] - fn schnorr_fails_when_consensus_message_is_received_when_not_initialized() { - let ml = MessageLoop::new(3, 1).unwrap(); - let session = ml.into_session(0); - assert_eq!(session.on_consensus_message(&ml.0.node(1), &SchnorrSigningConsensusMessage { - session: SessionId::default().into(), - sub_session: session.core.access_key.clone().into(), - session_nonce: 0, - message: ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - }), - }), Err(Error::InvalidStateForRequest)); - } + #[test] + fn schnorr_fails_when_consensus_message_is_received_when_not_initialized() { + let ml = MessageLoop::new(3, 1).unwrap(); + let session = ml.into_session(0); + assert_eq!( + session.on_consensus_message( + &ml.0.node(1), + &SchnorrSigningConsensusMessage { + session: SessionId::default().into(), + sub_session: session.core.access_key.clone().into(), + session_nonce: 0, + message: ConsensusMessage::ConfirmConsensusInitialization( + ConfirmConsensusInitialization { is_confirmed: true } + ), + } + ), + Err(Error::InvalidStateForRequest) + ); + } - #[test] - fn schnorr_fails_when_generation_message_is_received_when_not_initialized() { - let ml = MessageLoop::new(3, 1).unwrap(); - let session = ml.into_session(0); - assert_eq!(session.on_generation_message(&ml.0.node(1), &SchnorrSigningGenerationMessage { - session: SessionId::default().into(), - sub_session: session.core.access_key.clone().into(), - session_nonce: 0, - message: GenerationMessage::ConfirmInitialization(ConfirmInitialization { - session: SessionId::default().into(), - session_nonce: 0, - derived_point: Public::default().into(), - }), - }), Err(Error::InvalidStateForRequest)); - } + #[test] + fn schnorr_fails_when_generation_message_is_received_when_not_initialized() { + let ml = MessageLoop::new(3, 1).unwrap(); + let session = ml.into_session(0); + assert_eq!( + session.on_generation_message( + &ml.0.node(1), + &SchnorrSigningGenerationMessage { + session: SessionId::default().into(), + sub_session: session.core.access_key.clone().into(), + session_nonce: 0, + message: GenerationMessage::ConfirmInitialization(ConfirmInitialization { + session: SessionId::default().into(), + session_nonce: 0, + derived_point: Public::default().into(), + }), + } + ), + Err(Error::InvalidStateForRequest) + ); + } - #[test] - fn schnorr_fails_when_generation_sesson_is_initialized_by_slave_node() { - let (ml, _, _) = MessageLoop::new(3, 1).unwrap().init().unwrap(); - let session = ml.session_at(0); - ml.0.loop_until(|| session.state() == SessionState::SessionKeyGeneration); + #[test] + fn schnorr_fails_when_generation_sesson_is_initialized_by_slave_node() { + let (ml, _, _) = MessageLoop::new(3, 1).unwrap().init().unwrap(); + let session = ml.session_at(0); + ml.0.loop_until(|| session.state() == SessionState::SessionKeyGeneration); - let slave2_id = ml.0.node(2); - let slave1_session = ml.session_at(1); + let slave2_id = ml.0.node(2); + let slave1_session = ml.session_at(1); - assert_eq!(slave1_session.on_generation_message(&slave2_id, &SchnorrSigningGenerationMessage { - session: SessionId::default().into(), - sub_session: session.core.access_key.clone().into(), - session_nonce: 0, - message: GenerationMessage::InitializeSession(InitializeSession { - session: SessionId::default().into(), - session_nonce: 0, - origin: None, - author: Address::default().into(), - nodes: BTreeMap::new(), - is_zero: false, - threshold: 1, - derived_point: Public::default().into(), - }) - }), Err(Error::InvalidMessage)); - } + assert_eq!( + slave1_session.on_generation_message( + &slave2_id, + &SchnorrSigningGenerationMessage { + session: SessionId::default().into(), + sub_session: session.core.access_key.clone().into(), + session_nonce: 0, + message: GenerationMessage::InitializeSession(InitializeSession { + session: SessionId::default().into(), + session_nonce: 0, + origin: None, + author: Address::default().into(), + nodes: BTreeMap::new(), + is_zero: false, + threshold: 1, + derived_point: Public::default().into(), + }) + } + ), + Err(Error::InvalidMessage) + ); + } - #[test] - fn schnorr_fails_when_signature_requested_when_not_initialized() { - let ml = MessageLoop::new(3, 1).unwrap(); - let session = ml.into_session(1); - assert_eq!(session.on_partial_signature_requested(&ml.0.node(0), &SchnorrRequestPartialSignature { - session: SessionId::default().into(), - sub_session: session.core.access_key.clone().into(), - session_nonce: 0, - request_id: Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap().into(), - message_hash: H256::default().into(), - nodes: Default::default(), - }), Err(Error::InvalidStateForRequest)); - } + #[test] + fn schnorr_fails_when_signature_requested_when_not_initialized() { + let ml = MessageLoop::new(3, 1).unwrap(); + let session = ml.into_session(1); + assert_eq!( + session.on_partial_signature_requested( + &ml.0.node(0), + &SchnorrRequestPartialSignature { + session: SessionId::default().into(), + sub_session: session.core.access_key.clone().into(), + session_nonce: 0, + request_id: Secret::from_str( + "0000000000000000000000000000000000000000000000000000000000000001" + ) + .unwrap() + .into(), + message_hash: H256::default().into(), + nodes: Default::default(), + } + ), + Err(Error::InvalidStateForRequest) + ); + } - #[test] - fn schnorr_fails_when_signature_requested_by_slave_node() { - let ml = MessageLoop::new(3, 1).unwrap(); - let session = ml.into_session(0); - assert_eq!(session.on_partial_signature_requested(&ml.0.node(1), &SchnorrRequestPartialSignature { - session: SessionId::default().into(), - sub_session: session.core.access_key.clone().into(), - session_nonce: 0, - request_id: Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap().into(), - message_hash: H256::default().into(), - nodes: Default::default(), - }), Err(Error::InvalidMessage)); - } + #[test] + fn schnorr_fails_when_signature_requested_by_slave_node() { + let ml = MessageLoop::new(3, 1).unwrap(); + let session = ml.into_session(0); + assert_eq!( + session.on_partial_signature_requested( + &ml.0.node(1), + &SchnorrRequestPartialSignature { + session: SessionId::default().into(), + sub_session: session.core.access_key.clone().into(), + session_nonce: 0, + request_id: Secret::from_str( + "0000000000000000000000000000000000000000000000000000000000000001" + ) + .unwrap() + .into(), + message_hash: H256::default().into(), + nodes: Default::default(), + } + ), + Err(Error::InvalidMessage) + ); + } - #[test] - fn schnorr_failed_signing_session() { - let (ml, requester, _) = MessageLoop::new(3, 1).unwrap().init().unwrap(); + #[test] + fn schnorr_failed_signing_session() { + let (ml, requester, _) = MessageLoop::new(3, 1).unwrap().init().unwrap(); - // we need at least 2-of-3 nodes to agree to reach consensus - // let's say 2 of 3 nodes disagee - ml.0.acl_storage(1).prohibit(public_to_address(&requester), SessionId::default()); - ml.0.acl_storage(2).prohibit(public_to_address(&requester), SessionId::default()); + // we need at least 2-of-3 nodes to agree to reach consensus + // let's say 2 of 3 nodes disagee + ml.0.acl_storage(1) + .prohibit(public_to_address(&requester), SessionId::default()); + ml.0.acl_storage(2) + .prohibit(public_to_address(&requester), SessionId::default()); - // then consensus is unreachable - ml.0.loop_until(|| ml.0.is_empty()); - assert_eq!(ml.session_at(0).wait().unwrap_err(), Error::ConsensusUnreachable); - } + // then consensus is unreachable + ml.0.loop_until(|| ml.0.is_empty()); + assert_eq!( + ml.session_at(0).wait().unwrap_err(), + Error::ConsensusUnreachable + ); + } - #[test] - fn schnorr_complete_signing_session_with_single_node_failing() { - let (ml, requester, _) = MessageLoop::new(3, 1).unwrap().init().unwrap(); + #[test] + fn schnorr_complete_signing_session_with_single_node_failing() { + let (ml, requester, _) = MessageLoop::new(3, 1).unwrap().init().unwrap(); - // we need at least 2-of-3 nodes to agree to reach consensus - // let's say 1 of 3 nodes disagee - ml.0.acl_storage(1).prohibit(public_to_address(&requester), SessionId::default()); + // we need at least 2-of-3 nodes to agree to reach consensus + // let's say 1 of 3 nodes disagee + ml.0.acl_storage(1) + .prohibit(public_to_address(&requester), SessionId::default()); - // then consensus reachable, but single node will disagree - ml.ensure_completed(); - } + // then consensus reachable, but single node will disagree + ml.ensure_completed(); + } - #[test] - fn schnorr_complete_signing_session_with_acl_check_failed_on_master() { - let (ml, requester, _) = MessageLoop::new(3, 1).unwrap().init().unwrap(); + #[test] + fn schnorr_complete_signing_session_with_acl_check_failed_on_master() { + let (ml, requester, _) = MessageLoop::new(3, 1).unwrap().init().unwrap(); - // we need at least 2-of-3 nodes to agree to reach consensus - // let's say 1 of 3 nodes disagee - ml.0.acl_storage(0).prohibit(public_to_address(&requester), SessionId::default()); + // we need at least 2-of-3 nodes to agree to reach consensus + // let's say 1 of 3 nodes disagee + ml.0.acl_storage(0) + .prohibit(public_to_address(&requester), SessionId::default()); - // then consensus reachable, but single node will disagree - ml.ensure_completed(); - } + // then consensus reachable, but single node will disagree + ml.ensure_completed(); + } - #[test] - fn schnorr_signing_message_fails_when_nonce_is_wrong() { - let ml = MessageLoop::new(3, 1).unwrap(); - let session = ml.into_session(1); - let msg = SchnorrSigningMessage::SchnorrSigningGenerationMessage(SchnorrSigningGenerationMessage { - session: SessionId::default().into(), - sub_session: session.core.access_key.clone().into(), - session_nonce: 10, - message: GenerationMessage::ConfirmInitialization(ConfirmInitialization { - session: SessionId::default().into(), - session_nonce: 0, - derived_point: Public::default().into(), - }), - }); - assert_eq!(session.process_message(&ml.0.node(1), &msg), Err(Error::ReplayProtection)); - } + #[test] + fn schnorr_signing_message_fails_when_nonce_is_wrong() { + let ml = MessageLoop::new(3, 1).unwrap(); + let session = ml.into_session(1); + let msg = SchnorrSigningMessage::SchnorrSigningGenerationMessage( + SchnorrSigningGenerationMessage { + session: SessionId::default().into(), + sub_session: session.core.access_key.clone().into(), + session_nonce: 10, + message: GenerationMessage::ConfirmInitialization(ConfirmInitialization { + session: SessionId::default().into(), + session_nonce: 0, + derived_point: Public::default().into(), + }), + }, + ); + assert_eq!( + session.process_message(&ml.0.node(1), &msg), + Err(Error::ReplayProtection) + ); + } - #[test] - fn schnorr_signing_works_when_delegated_to_other_node() { - let (ml, _, _) = MessageLoop::new(3, 1).unwrap().init_delegated().unwrap(); - ml.ensure_completed(); - } + #[test] + fn schnorr_signing_works_when_delegated_to_other_node() { + let (ml, _, _) = MessageLoop::new(3, 1).unwrap().init_delegated().unwrap(); + ml.ensure_completed(); + } - #[test] - fn schnorr_signing_works_when_share_owners_are_isolated() { - let (ml, _, _) = MessageLoop::new(3, 1).unwrap().init_with_isolated().unwrap(); - ml.ensure_completed(); - } + #[test] + fn schnorr_signing_works_when_share_owners_are_isolated() { + let (ml, _, _) = MessageLoop::new(3, 1) + .unwrap() + .init_with_isolated() + .unwrap(); + ml.ensure_completed(); + } } diff --git a/secret-store/src/key_server_cluster/cluster.rs b/secret-store/src/key_server_cluster/cluster.rs index a8416a8f7..124a215ae 100644 --- a/secret-store/src/key_server_cluster/cluster.rs +++ b/secret-store/src/key_server_cluster/cluster.rs @@ -14,1097 +14,1780 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::collections::{BTreeMap, BTreeSet}; -use parking_lot::RwLock; -use ethkey::{Public, Signature, Random, Generator}; use ethereum_types::{Address, H256}; +use ethkey::{Generator, Public, Random, Signature}; +use key_server_cluster::{ + cluster_connections::{ConnectionManager, ConnectionProvider}, + cluster_connections_net::{ + NetConnectionsContainer, NetConnectionsManager, NetConnectionsManagerConfig, + }, + cluster_message_processor::{MessageProcessor, SessionsMessageProcessor}, + cluster_sessions::{ + create_cluster_view, AdminSession, AdminSessionCreationData, ClusterSession, + ClusterSessions, ClusterSessionsContainer, ClusterSessionsListener, + SessionIdWithSubSession, SERVERS_SET_CHANGE_SESSION_ID, + }, + cluster_sessions_creator::ClusterSessionCreator, + connection_trigger::{ + ConnectionTrigger, ServersSetChangeSessionCreatorConnector, SimpleConnectionTrigger, + }, + connection_trigger_with_migration::ConnectionTriggerWithMigration, + decryption_session::SessionImpl as DecryptionSession, + encryption_session::SessionImpl as EncryptionSession, + generation_session::SessionImpl as GenerationSession, + key_version_negotiation_session::{ + ContinueAction, IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, + SessionImpl as KeyVersionNegotiationSession, + }, + message::Message, + signing_session_ecdsa::SessionImpl as EcdsaSigningSession, + signing_session_schnorr::SessionImpl as SchnorrSigningSession, + AclStorage, Error, KeyServerSet, KeyStorage, NodeId, NodeKeyPair, Requester, SessionId, +}; use parity_runtime::Executor; -use key_server_cluster::{Error, NodeId, SessionId, Requester, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair}; -use key_server_cluster::cluster_sessions::{ClusterSession, AdminSession, ClusterSessions, SessionIdWithSubSession, - ClusterSessionsContainer, SERVERS_SET_CHANGE_SESSION_ID, create_cluster_view, - AdminSessionCreationData, ClusterSessionsListener}; -use key_server_cluster::cluster_sessions_creator::ClusterSessionCreator; -use key_server_cluster::cluster_connections::{ConnectionProvider, ConnectionManager}; -use key_server_cluster::cluster_connections_net::{NetConnectionsManager, - NetConnectionsContainer, NetConnectionsManagerConfig}; -use key_server_cluster::cluster_message_processor::{MessageProcessor, SessionsMessageProcessor}; -use key_server_cluster::message::Message; -use key_server_cluster::generation_session::{SessionImpl as GenerationSession}; -use key_server_cluster::decryption_session::{SessionImpl as DecryptionSession}; -use key_server_cluster::encryption_session::{SessionImpl as EncryptionSession}; -use key_server_cluster::signing_session_ecdsa::{SessionImpl as EcdsaSigningSession}; -use key_server_cluster::signing_session_schnorr::{SessionImpl as SchnorrSigningSession}; -use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession, - IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, ContinueAction}; -use key_server_cluster::connection_trigger::{ConnectionTrigger, - SimpleConnectionTrigger, ServersSetChangeSessionCreatorConnector}; -use key_server_cluster::connection_trigger_with_migration::ConnectionTriggerWithMigration; +use parking_lot::RwLock; +use std::{ + collections::{BTreeMap, BTreeSet}, + sync::Arc, +}; #[cfg(test)] -use key_server_cluster::cluster_connections::tests::{MessagesQueue, TestConnections, new_test_connections}; +use key_server_cluster::cluster_connections::tests::{ + new_test_connections, MessagesQueue, TestConnections, +}; /// Cluster interface for external clients. pub trait ClusterClient: Send + Sync { - /// Start new generation session. - fn new_generation_session(&self, session_id: SessionId, origin: Option
, author: Address, threshold: usize) -> Result, Error>; - /// Start new encryption session. - fn new_encryption_session(&self, session_id: SessionId, author: Requester, common_point: Public, encrypted_point: Public) -> Result, Error>; - /// Start new decryption session. - fn new_decryption_session(&self, session_id: SessionId, origin: Option
, requester: Requester, version: Option, is_shadow_decryption: bool, is_broadcast_decryption: bool) -> Result, Error>; - /// Start new Schnorr signing session. - fn new_schnorr_signing_session(&self, session_id: SessionId, requester: Requester, version: Option, message_hash: H256) -> Result, Error>; - /// Start new ECDSA session. - fn new_ecdsa_signing_session(&self, session_id: SessionId, requester: Requester, version: Option, message_hash: H256) -> Result, Error>; - /// Start new key version negotiation session. - fn new_key_version_negotiation_session(&self, session_id: SessionId) -> Result>, Error>; - /// Start new servers set change session. - fn new_servers_set_change_session(&self, session_id: Option, migration_id: Option, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error>; + /// Start new generation session. + fn new_generation_session( + &self, + session_id: SessionId, + origin: Option
, + author: Address, + threshold: usize, + ) -> Result, Error>; + /// Start new encryption session. + fn new_encryption_session( + &self, + session_id: SessionId, + author: Requester, + common_point: Public, + encrypted_point: Public, + ) -> Result, Error>; + /// Start new decryption session. + fn new_decryption_session( + &self, + session_id: SessionId, + origin: Option
, + requester: Requester, + version: Option, + is_shadow_decryption: bool, + is_broadcast_decryption: bool, + ) -> Result, Error>; + /// Start new Schnorr signing session. + fn new_schnorr_signing_session( + &self, + session_id: SessionId, + requester: Requester, + version: Option, + message_hash: H256, + ) -> Result, Error>; + /// Start new ECDSA session. + fn new_ecdsa_signing_session( + &self, + session_id: SessionId, + requester: Requester, + version: Option, + message_hash: H256, + ) -> Result, Error>; + /// Start new key version negotiation session. + fn new_key_version_negotiation_session( + &self, + session_id: SessionId, + ) -> Result>, Error>; + /// Start new servers set change session. + fn new_servers_set_change_session( + &self, + session_id: Option, + migration_id: Option, + new_nodes_set: BTreeSet, + old_set_signature: Signature, + new_set_signature: Signature, + ) -> Result, Error>; - /// Listen for new generation sessions. - fn add_generation_listener(&self, listener: Arc>); - /// Listen for new decryption sessions. - fn add_decryption_listener(&self, listener: Arc>); - /// Listen for new key version negotiation sessions. - fn add_key_version_negotiation_listener(&self, listener: Arc>>); + /// Listen for new generation sessions. + fn add_generation_listener(&self, listener: Arc>); + /// Listen for new decryption sessions. + fn add_decryption_listener(&self, listener: Arc>); + /// Listen for new key version negotiation sessions. + fn add_key_version_negotiation_listener( + &self, + listener: Arc< + ClusterSessionsListener< + KeyVersionNegotiationSession, + >, + >, + ); - /// Ask node to make 'faulty' generation sessions. - #[cfg(test)] - fn make_faulty_generation_sessions(&self); - /// Get active generation session with given id. - #[cfg(test)] - fn generation_session(&self, session_id: &SessionId) -> Option>; - #[cfg(test)] - fn is_fully_connected(&self) -> bool; - /// Try connect to disconnected nodes. - #[cfg(test)] - fn connect(&self); + /// Ask node to make 'faulty' generation sessions. + #[cfg(test)] + fn make_faulty_generation_sessions(&self); + /// Get active generation session with given id. + #[cfg(test)] + fn generation_session(&self, session_id: &SessionId) -> Option>; + #[cfg(test)] + fn is_fully_connected(&self) -> bool; + /// Try connect to disconnected nodes. + #[cfg(test)] + fn connect(&self); } /// Cluster access for single session participant. pub trait Cluster: Send + Sync { - /// Broadcast message to all other nodes. - fn broadcast(&self, message: Message) -> Result<(), Error>; - /// Send message to given node. - fn send(&self, to: &NodeId, message: Message) -> Result<(), Error>; - /// Is connected to given node? - fn is_connected(&self, node: &NodeId) -> bool; - /// Get a set of connected nodes. - fn nodes(&self) -> BTreeSet; - /// Get total count of configured key server nodes (valid at the time of ClusterView creation). - fn configured_nodes_count(&self) -> usize; - /// Get total count of connected key server nodes (valid at the time of ClusterView creation). - fn connected_nodes_count(&self) -> usize; + /// Broadcast message to all other nodes. + fn broadcast(&self, message: Message) -> Result<(), Error>; + /// Send message to given node. + fn send(&self, to: &NodeId, message: Message) -> Result<(), Error>; + /// Is connected to given node? + fn is_connected(&self, node: &NodeId) -> bool; + /// Get a set of connected nodes. + fn nodes(&self) -> BTreeSet; + /// Get total count of configured key server nodes (valid at the time of ClusterView creation). + fn configured_nodes_count(&self) -> usize; + /// Get total count of connected key server nodes (valid at the time of ClusterView creation). + fn connected_nodes_count(&self) -> usize; } /// Cluster initialization parameters. #[derive(Clone)] pub struct ClusterConfiguration { - /// KeyPair this node holds. - pub self_key_pair: Arc, - /// Cluster nodes set. - pub key_server_set: Arc, - /// Reference to key storage - pub key_storage: Arc, - /// Reference to ACL storage - pub acl_storage: Arc, - /// Administrator public key. - pub admin_public: Option, - /// Do not remove sessions from container. - pub preserve_sessions: bool, + /// KeyPair this node holds. + pub self_key_pair: Arc, + /// Cluster nodes set. + pub key_server_set: Arc, + /// Reference to key storage + pub key_storage: Arc, + /// Reference to ACL storage + pub acl_storage: Arc, + /// Administrator public key. + pub admin_public: Option, + /// Do not remove sessions from container. + pub preserve_sessions: bool, } /// Network cluster implementation. pub struct ClusterCore { - /// Cluster data. - data: Arc>, + /// Cluster data. + data: Arc>, } /// Network cluster client interface implementation. pub struct ClusterClientImpl { - /// Cluster data. - data: Arc>, + /// Cluster data. + data: Arc>, } /// Network cluster view. It is a communication channel, required in single session. pub struct ClusterView { - configured_nodes_count: usize, - connected_nodes: BTreeSet, - connections: Arc, - self_key_pair: Arc, + configured_nodes_count: usize, + connected_nodes: BTreeSet, + connections: Arc, + self_key_pair: Arc, } /// Cross-thread shareable cluster data. pub struct ClusterData { - /// Cluster configuration. - pub config: ClusterConfiguration, - /// KeyPair this node holds. - pub self_key_pair: Arc, - /// Connections data. - pub connections: C, - /// Active sessions data. - pub sessions: Arc, - // Messages processor. - pub message_processor: Arc, - /// Link between servers set chnage session and the connections manager. - pub servers_set_change_creator_connector: Arc, + /// Cluster configuration. + pub config: ClusterConfiguration, + /// KeyPair this node holds. + pub self_key_pair: Arc, + /// Connections data. + pub connections: C, + /// Active sessions data. + pub sessions: Arc, + // Messages processor. + pub message_processor: Arc, + /// Link between servers set chnage session and the connections manager. + pub servers_set_change_creator_connector: Arc, } /// Create new network-backed cluster. pub fn new_network_cluster( - executor: Executor, - config: ClusterConfiguration, - net_config: NetConnectionsManagerConfig + executor: Executor, + config: ClusterConfiguration, + net_config: NetConnectionsManagerConfig, ) -> Result>, Error> { - let mut nodes = config.key_server_set.snapshot().current_set; - let is_isolated = nodes.remove(config.self_key_pair.public()).is_none(); - let connections_data = Arc::new(RwLock::new(NetConnectionsContainer { - is_isolated, - nodes, - connections: BTreeMap::new(), - })); + let mut nodes = config.key_server_set.snapshot().current_set; + let is_isolated = nodes.remove(config.self_key_pair.public()).is_none(); + let connections_data = Arc::new(RwLock::new(NetConnectionsContainer { + is_isolated, + nodes, + connections: BTreeMap::new(), + })); - let connection_trigger: Box = match net_config.auto_migrate_enabled { - false => Box::new(SimpleConnectionTrigger::with_config(&config)), - true if config.admin_public.is_none() => Box::new(ConnectionTriggerWithMigration::with_config(&config)), - true => return Err(Error::Internal( - "secret store admininstrator public key is specified with auto-migration enabled".into() - )), - }; + let connection_trigger: Box = + match net_config.auto_migrate_enabled { + false => Box::new(SimpleConnectionTrigger::with_config(&config)), + true if config.admin_public.is_none() => { + Box::new(ConnectionTriggerWithMigration::with_config(&config)) + } + true => return Err(Error::Internal( + "secret store admininstrator public key is specified with auto-migration enabled" + .into(), + )), + }; - let servers_set_change_creator_connector = connection_trigger.servers_set_change_creator_connector(); - let sessions = Arc::new(ClusterSessions::new(&config, servers_set_change_creator_connector.clone())); - let message_processor = Arc::new(SessionsMessageProcessor::new( - config.self_key_pair.clone(), - servers_set_change_creator_connector.clone(), - sessions.clone(), - connections_data.clone())); + let servers_set_change_creator_connector = + connection_trigger.servers_set_change_creator_connector(); + let sessions = Arc::new(ClusterSessions::new( + &config, + servers_set_change_creator_connector.clone(), + )); + let message_processor = Arc::new(SessionsMessageProcessor::new( + config.self_key_pair.clone(), + servers_set_change_creator_connector.clone(), + sessions.clone(), + connections_data.clone(), + )); - let connections = NetConnectionsManager::new( - executor, - message_processor.clone(), - connection_trigger, - connections_data, - &config, - net_config)?; - connections.start()?; + let connections = NetConnectionsManager::new( + executor, + message_processor.clone(), + connection_trigger, + connections_data, + &config, + net_config, + )?; + connections.start()?; - ClusterCore::new(sessions, message_processor, connections, servers_set_change_creator_connector, config) + ClusterCore::new( + sessions, + message_processor, + connections, + servers_set_change_creator_connector, + config, + ) } /// Create new in-memory backed cluster #[cfg(test)] pub fn new_test_cluster( - messages: MessagesQueue, - config: ClusterConfiguration, + messages: MessagesQueue, + config: ClusterConfiguration, ) -> Result>>, Error> { - let nodes = config.key_server_set.snapshot().current_set; - let connections = new_test_connections(messages, *config.self_key_pair.public(), nodes.keys().cloned().collect()); + let nodes = config.key_server_set.snapshot().current_set; + let connections = new_test_connections( + messages, + *config.self_key_pair.public(), + nodes.keys().cloned().collect(), + ); - let connection_trigger = Box::new(SimpleConnectionTrigger::with_config(&config)); - let servers_set_change_creator_connector = connection_trigger.servers_set_change_creator_connector(); - let mut sessions = ClusterSessions::new(&config, servers_set_change_creator_connector.clone()); - if config.preserve_sessions { - sessions.preserve_sessions(); - } - let sessions = Arc::new(sessions); + let connection_trigger = Box::new(SimpleConnectionTrigger::with_config(&config)); + let servers_set_change_creator_connector = + connection_trigger.servers_set_change_creator_connector(); + let mut sessions = ClusterSessions::new(&config, servers_set_change_creator_connector.clone()); + if config.preserve_sessions { + sessions.preserve_sessions(); + } + let sessions = Arc::new(sessions); - let message_processor = Arc::new(SessionsMessageProcessor::new( - config.self_key_pair.clone(), - servers_set_change_creator_connector.clone(), - sessions.clone(), - connections.provider(), - )); + let message_processor = Arc::new(SessionsMessageProcessor::new( + config.self_key_pair.clone(), + servers_set_change_creator_connector.clone(), + sessions.clone(), + connections.provider(), + )); - ClusterCore::new(sessions, message_processor, connections, servers_set_change_creator_connector, config) + ClusterCore::new( + sessions, + message_processor, + connections, + servers_set_change_creator_connector, + config, + ) } impl ClusterCore { - pub fn new( - sessions: Arc, - message_processor: Arc, - connections: C, - servers_set_change_creator_connector: Arc, - config: ClusterConfiguration, - ) -> Result, Error> { - Ok(Arc::new(ClusterCore { - data: Arc::new(ClusterData { - self_key_pair: config.self_key_pair.clone(), - connections, - sessions: sessions.clone(), - config, - message_processor, - servers_set_change_creator_connector - }), - })) - } + pub fn new( + sessions: Arc, + message_processor: Arc, + connections: C, + servers_set_change_creator_connector: Arc, + config: ClusterConfiguration, + ) -> Result, Error> { + Ok(Arc::new(ClusterCore { + data: Arc::new(ClusterData { + self_key_pair: config.self_key_pair.clone(), + connections, + sessions: sessions.clone(), + config, + message_processor, + servers_set_change_creator_connector, + }), + })) + } - /// Create new client interface. - pub fn client(&self) -> Arc { - Arc::new(ClusterClientImpl::new(self.data.clone())) - } + /// Create new client interface. + pub fn client(&self) -> Arc { + Arc::new(ClusterClientImpl::new(self.data.clone())) + } - /// Run cluster. - pub fn run(&self) -> Result<(), Error> { - self.data.connections.connect(); - Ok(()) - } + /// Run cluster. + pub fn run(&self) -> Result<(), Error> { + self.data.connections.connect(); + Ok(()) + } - #[cfg(test)] - pub fn view(&self) -> Result, Error> { - let connections = self.data.connections.provider(); - let mut connected_nodes = connections.connected_nodes()?; - let disconnected_nodes = connections.disconnected_nodes(); - connected_nodes.insert(self.data.self_key_pair.public().clone()); + #[cfg(test)] + pub fn view(&self) -> Result, Error> { + let connections = self.data.connections.provider(); + let mut connected_nodes = connections.connected_nodes()?; + let disconnected_nodes = connections.disconnected_nodes(); + connected_nodes.insert(self.data.self_key_pair.public().clone()); - let connected_nodes_count = connected_nodes.len(); - let disconnected_nodes_count = disconnected_nodes.len(); - Ok(Arc::new(ClusterView::new( - self.data.self_key_pair.clone(), - connections, - connected_nodes, - connected_nodes_count + disconnected_nodes_count))) - } + let connected_nodes_count = connected_nodes.len(); + let disconnected_nodes_count = disconnected_nodes.len(); + Ok(Arc::new(ClusterView::new( + self.data.self_key_pair.clone(), + connections, + connected_nodes, + connected_nodes_count + disconnected_nodes_count, + ))) + } } impl ClusterView { - pub fn new( - self_key_pair: Arc, - connections: Arc, - nodes: BTreeSet, - configured_nodes_count: usize - ) -> Self { - ClusterView { - configured_nodes_count: configured_nodes_count, - connected_nodes: nodes, - connections, - self_key_pair, - } - } + pub fn new( + self_key_pair: Arc, + connections: Arc, + nodes: BTreeSet, + configured_nodes_count: usize, + ) -> Self { + ClusterView { + configured_nodes_count: configured_nodes_count, + connected_nodes: nodes, + connections, + self_key_pair, + } + } } impl Cluster for ClusterView { - fn broadcast(&self, message: Message) -> Result<(), Error> { - for node in self.connected_nodes.iter().filter(|n| *n != self.self_key_pair.public()) { - trace!(target: "secretstore_net", "{}: sent message {} to {}", self.self_key_pair.public(), message, node); - let connection = self.connections.connection(node).ok_or(Error::NodeDisconnected)?; - connection.send_message(message.clone()); - } - Ok(()) - } + fn broadcast(&self, message: Message) -> Result<(), Error> { + for node in self + .connected_nodes + .iter() + .filter(|n| *n != self.self_key_pair.public()) + { + trace!(target: "secretstore_net", "{}: sent message {} to {}", self.self_key_pair.public(), message, node); + let connection = self + .connections + .connection(node) + .ok_or(Error::NodeDisconnected)?; + connection.send_message(message.clone()); + } + Ok(()) + } - fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> { - trace!(target: "secretstore_net", "{}: sent message {} to {}", self.self_key_pair.public(), message, to); - let connection = self.connections.connection(to).ok_or(Error::NodeDisconnected)?; - connection.send_message(message); - Ok(()) - } + fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> { + trace!(target: "secretstore_net", "{}: sent message {} to {}", self.self_key_pair.public(), message, to); + let connection = self + .connections + .connection(to) + .ok_or(Error::NodeDisconnected)?; + connection.send_message(message); + Ok(()) + } - fn is_connected(&self, node: &NodeId) -> bool { - self.connected_nodes.contains(node) - } + fn is_connected(&self, node: &NodeId) -> bool { + self.connected_nodes.contains(node) + } - fn nodes(&self) -> BTreeSet { - self.connected_nodes.clone() - } + fn nodes(&self) -> BTreeSet { + self.connected_nodes.clone() + } - fn configured_nodes_count(&self) -> usize { - self.configured_nodes_count - } + fn configured_nodes_count(&self) -> usize { + self.configured_nodes_count + } - fn connected_nodes_count(&self) -> usize { - self.connected_nodes.len() - } + fn connected_nodes_count(&self) -> usize { + self.connected_nodes.len() + } } impl ClusterClientImpl { - pub fn new(data: Arc>) -> Self { - ClusterClientImpl { - data: data, - } - } + pub fn new(data: Arc>) -> Self { + ClusterClientImpl { data: data } + } - fn create_key_version_negotiation_session(&self, session_id: SessionId) -> Result>, Error> { - let mut connected_nodes = self.data.connections.provider().connected_nodes()?; - connected_nodes.insert(self.data.self_key_pair.public().clone()); + fn create_key_version_negotiation_session( + &self, + session_id: SessionId, + ) -> Result>, Error> + { + let mut connected_nodes = self.data.connections.provider().connected_nodes()?; + connected_nodes.insert(self.data.self_key_pair.public().clone()); - let access_key = Random.generate()?.secret().clone(); - let session_id = SessionIdWithSubSession::new(session_id, access_key); - let cluster = create_cluster_view(self.data.self_key_pair.clone(), self.data.connections.provider(), false)?; - let session = self.data.sessions.negotiation_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id.clone(), None, false, None)?; - match session.initialize(connected_nodes) { - Ok(()) => Ok(session), - Err(error) => { - self.data.sessions.negotiation_sessions.remove(&session.id()); - Err(error) - } - } - } + let access_key = Random.generate()?.secret().clone(); + let session_id = SessionIdWithSubSession::new(session_id, access_key); + let cluster = create_cluster_view( + self.data.self_key_pair.clone(), + self.data.connections.provider(), + false, + )?; + let session = self.data.sessions.negotiation_sessions.insert( + cluster, + self.data.self_key_pair.public().clone(), + session_id.clone(), + None, + false, + None, + )?; + match session.initialize(connected_nodes) { + Ok(()) => Ok(session), + Err(error) => { + self.data + .sessions + .negotiation_sessions + .remove(&session.id()); + Err(error) + } + } + } } impl ClusterClient for ClusterClientImpl { - fn new_generation_session(&self, session_id: SessionId, origin: Option
, author: Address, threshold: usize) -> Result, Error> { - let mut connected_nodes = self.data.connections.provider().connected_nodes()?; - connected_nodes.insert(self.data.self_key_pair.public().clone()); + fn new_generation_session( + &self, + session_id: SessionId, + origin: Option
, + author: Address, + threshold: usize, + ) -> Result, Error> { + let mut connected_nodes = self.data.connections.provider().connected_nodes()?; + connected_nodes.insert(self.data.self_key_pair.public().clone()); - let cluster = create_cluster_view(self.data.self_key_pair.clone(), self.data.connections.provider(), true)?; - let session = self.data.sessions.generation_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id, None, false, None)?; - process_initialization_result( - session.initialize(origin, author, false, threshold, connected_nodes.into()), - session, &self.data.sessions.generation_sessions) - } + let cluster = create_cluster_view( + self.data.self_key_pair.clone(), + self.data.connections.provider(), + true, + )?; + let session = self.data.sessions.generation_sessions.insert( + cluster, + self.data.self_key_pair.public().clone(), + session_id, + None, + false, + None, + )?; + process_initialization_result( + session.initialize(origin, author, false, threshold, connected_nodes.into()), + session, + &self.data.sessions.generation_sessions, + ) + } - fn new_encryption_session(&self, session_id: SessionId, requester: Requester, common_point: Public, encrypted_point: Public) -> Result, Error> { - let mut connected_nodes = self.data.connections.provider().connected_nodes()?; - connected_nodes.insert(self.data.self_key_pair.public().clone()); + fn new_encryption_session( + &self, + session_id: SessionId, + requester: Requester, + common_point: Public, + encrypted_point: Public, + ) -> Result, Error> { + let mut connected_nodes = self.data.connections.provider().connected_nodes()?; + connected_nodes.insert(self.data.self_key_pair.public().clone()); - let cluster = create_cluster_view(self.data.self_key_pair.clone(), self.data.connections.provider(), true)?; - let session = self.data.sessions.encryption_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id, None, false, None)?; - process_initialization_result( - session.initialize(requester, common_point, encrypted_point), - session, &self.data.sessions.encryption_sessions) - } + let cluster = create_cluster_view( + self.data.self_key_pair.clone(), + self.data.connections.provider(), + true, + )?; + let session = self.data.sessions.encryption_sessions.insert( + cluster, + self.data.self_key_pair.public().clone(), + session_id, + None, + false, + None, + )?; + process_initialization_result( + session.initialize(requester, common_point, encrypted_point), + session, + &self.data.sessions.encryption_sessions, + ) + } - fn new_decryption_session(&self, session_id: SessionId, origin: Option
, requester: Requester, version: Option, is_shadow_decryption: bool, is_broadcast_decryption: bool) -> Result, Error> { - let mut connected_nodes = self.data.connections.provider().connected_nodes()?; - connected_nodes.insert(self.data.self_key_pair.public().clone()); + fn new_decryption_session( + &self, + session_id: SessionId, + origin: Option
, + requester: Requester, + version: Option, + is_shadow_decryption: bool, + is_broadcast_decryption: bool, + ) -> Result, Error> { + let mut connected_nodes = self.data.connections.provider().connected_nodes()?; + connected_nodes.insert(self.data.self_key_pair.public().clone()); - let access_key = Random.generate()?.secret().clone(); - let session_id = SessionIdWithSubSession::new(session_id, access_key); - let cluster = create_cluster_view(self.data.self_key_pair.clone(), self.data.connections.provider(), false)?; - let session = self.data.sessions.decryption_sessions.insert(cluster, self.data.self_key_pair.public().clone(), - session_id.clone(), None, false, Some(requester))?; + let access_key = Random.generate()?.secret().clone(); + let session_id = SessionIdWithSubSession::new(session_id, access_key); + let cluster = create_cluster_view( + self.data.self_key_pair.clone(), + self.data.connections.provider(), + false, + )?; + let session = self.data.sessions.decryption_sessions.insert( + cluster, + self.data.self_key_pair.public().clone(), + session_id.clone(), + None, + false, + Some(requester), + )?; - let initialization_result = match version { - Some(version) => session.initialize(origin, version, is_shadow_decryption, is_broadcast_decryption), - None => { - self.create_key_version_negotiation_session(session_id.id.clone()) - .map(|version_session| { - version_session.set_continue_action(ContinueAction::Decrypt(session.clone(), origin, is_shadow_decryption, is_broadcast_decryption)); - self.data.message_processor.try_continue_session(Some(version_session)); - }) - }, - }; + let initialization_result = match version { + Some(version) => session.initialize( + origin, + version, + is_shadow_decryption, + is_broadcast_decryption, + ), + None => self + .create_key_version_negotiation_session(session_id.id.clone()) + .map(|version_session| { + version_session.set_continue_action(ContinueAction::Decrypt( + session.clone(), + origin, + is_shadow_decryption, + is_broadcast_decryption, + )); + self.data + .message_processor + .try_continue_session(Some(version_session)); + }), + }; - process_initialization_result( - initialization_result, - session, &self.data.sessions.decryption_sessions) - } + process_initialization_result( + initialization_result, + session, + &self.data.sessions.decryption_sessions, + ) + } - fn new_schnorr_signing_session(&self, session_id: SessionId, requester: Requester, version: Option, message_hash: H256) -> Result, Error> { - let mut connected_nodes = self.data.connections.provider().connected_nodes()?; - connected_nodes.insert(self.data.self_key_pair.public().clone()); + fn new_schnorr_signing_session( + &self, + session_id: SessionId, + requester: Requester, + version: Option, + message_hash: H256, + ) -> Result, Error> { + let mut connected_nodes = self.data.connections.provider().connected_nodes()?; + connected_nodes.insert(self.data.self_key_pair.public().clone()); - let access_key = Random.generate()?.secret().clone(); - let session_id = SessionIdWithSubSession::new(session_id, access_key); - let cluster = create_cluster_view(self.data.self_key_pair.clone(), self.data.connections.provider(), false)?; - let session = self.data.sessions.schnorr_signing_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id.clone(), None, false, Some(requester))?; + let access_key = Random.generate()?.secret().clone(); + let session_id = SessionIdWithSubSession::new(session_id, access_key); + let cluster = create_cluster_view( + self.data.self_key_pair.clone(), + self.data.connections.provider(), + false, + )?; + let session = self.data.sessions.schnorr_signing_sessions.insert( + cluster, + self.data.self_key_pair.public().clone(), + session_id.clone(), + None, + false, + Some(requester), + )?; - let initialization_result = match version { - Some(version) => session.initialize(version, message_hash), - None => { - self.create_key_version_negotiation_session(session_id.id.clone()) - .map(|version_session| { - version_session.set_continue_action(ContinueAction::SchnorrSign(session.clone(), message_hash)); - self.data.message_processor.try_continue_session(Some(version_session)); - }) - }, - }; + let initialization_result = match version { + Some(version) => session.initialize(version, message_hash), + None => self + .create_key_version_negotiation_session(session_id.id.clone()) + .map(|version_session| { + version_session.set_continue_action(ContinueAction::SchnorrSign( + session.clone(), + message_hash, + )); + self.data + .message_processor + .try_continue_session(Some(version_session)); + }), + }; - process_initialization_result( - initialization_result, - session, &self.data.sessions.schnorr_signing_sessions) - } + process_initialization_result( + initialization_result, + session, + &self.data.sessions.schnorr_signing_sessions, + ) + } - fn new_ecdsa_signing_session(&self, session_id: SessionId, requester: Requester, version: Option, message_hash: H256) -> Result, Error> { - let mut connected_nodes = self.data.connections.provider().connected_nodes()?; - connected_nodes.insert(self.data.self_key_pair.public().clone()); + fn new_ecdsa_signing_session( + &self, + session_id: SessionId, + requester: Requester, + version: Option, + message_hash: H256, + ) -> Result, Error> { + let mut connected_nodes = self.data.connections.provider().connected_nodes()?; + connected_nodes.insert(self.data.self_key_pair.public().clone()); - let access_key = Random.generate()?.secret().clone(); - let session_id = SessionIdWithSubSession::new(session_id, access_key); - let cluster = create_cluster_view(self.data.self_key_pair.clone(), self.data.connections.provider(), false)?; - let session = self.data.sessions.ecdsa_signing_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id.clone(), None, false, Some(requester))?; + let access_key = Random.generate()?.secret().clone(); + let session_id = SessionIdWithSubSession::new(session_id, access_key); + let cluster = create_cluster_view( + self.data.self_key_pair.clone(), + self.data.connections.provider(), + false, + )?; + let session = self.data.sessions.ecdsa_signing_sessions.insert( + cluster, + self.data.self_key_pair.public().clone(), + session_id.clone(), + None, + false, + Some(requester), + )?; - let initialization_result = match version { - Some(version) => session.initialize(version, message_hash), - None => { - self.create_key_version_negotiation_session(session_id.id.clone()) - .map(|version_session| { - version_session.set_continue_action(ContinueAction::EcdsaSign(session.clone(), message_hash)); - self.data.message_processor.try_continue_session(Some(version_session)); - }) - }, - }; + let initialization_result = match version { + Some(version) => session.initialize(version, message_hash), + None => self + .create_key_version_negotiation_session(session_id.id.clone()) + .map(|version_session| { + version_session.set_continue_action(ContinueAction::EcdsaSign( + session.clone(), + message_hash, + )); + self.data + .message_processor + .try_continue_session(Some(version_session)); + }), + }; - process_initialization_result( - initialization_result, - session, &self.data.sessions.ecdsa_signing_sessions) - } + process_initialization_result( + initialization_result, + session, + &self.data.sessions.ecdsa_signing_sessions, + ) + } - fn new_key_version_negotiation_session(&self, session_id: SessionId) -> Result>, Error> { - let session = self.create_key_version_negotiation_session(session_id)?; - Ok(session) - } + fn new_key_version_negotiation_session( + &self, + session_id: SessionId, + ) -> Result>, Error> + { + let session = self.create_key_version_negotiation_session(session_id)?; + Ok(session) + } - fn new_servers_set_change_session(&self, session_id: Option, migration_id: Option, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error> { - new_servers_set_change_session( - self.data.self_key_pair.clone(), - &self.data.sessions, - self.data.connections.provider(), - self.data.servers_set_change_creator_connector.clone(), - ServersSetChangeParams { - session_id, - migration_id, - new_nodes_set, - old_set_signature, - new_set_signature, - }) - } + fn new_servers_set_change_session( + &self, + session_id: Option, + migration_id: Option, + new_nodes_set: BTreeSet, + old_set_signature: Signature, + new_set_signature: Signature, + ) -> Result, Error> { + new_servers_set_change_session( + self.data.self_key_pair.clone(), + &self.data.sessions, + self.data.connections.provider(), + self.data.servers_set_change_creator_connector.clone(), + ServersSetChangeParams { + session_id, + migration_id, + new_nodes_set, + old_set_signature, + new_set_signature, + }, + ) + } - fn add_generation_listener(&self, listener: Arc>) { - self.data.sessions.generation_sessions.add_listener(listener); - } + fn add_generation_listener(&self, listener: Arc>) { + self.data + .sessions + .generation_sessions + .add_listener(listener); + } - fn add_decryption_listener(&self, listener: Arc>) { - self.data.sessions.decryption_sessions.add_listener(listener); - } + fn add_decryption_listener(&self, listener: Arc>) { + self.data + .sessions + .decryption_sessions + .add_listener(listener); + } - fn add_key_version_negotiation_listener(&self, listener: Arc>>) { - self.data.sessions.negotiation_sessions.add_listener(listener); - } + fn add_key_version_negotiation_listener( + &self, + listener: Arc< + ClusterSessionsListener< + KeyVersionNegotiationSession, + >, + >, + ) { + self.data + .sessions + .negotiation_sessions + .add_listener(listener); + } - #[cfg(test)] - fn make_faulty_generation_sessions(&self) { - self.data.sessions.make_faulty_generation_sessions(); - } + #[cfg(test)] + fn make_faulty_generation_sessions(&self) { + self.data.sessions.make_faulty_generation_sessions(); + } - #[cfg(test)] - fn generation_session(&self, session_id: &SessionId) -> Option> { - self.data.sessions.generation_sessions.get(session_id, false) - } + #[cfg(test)] + fn generation_session(&self, session_id: &SessionId) -> Option> { + self.data + .sessions + .generation_sessions + .get(session_id, false) + } - #[cfg(test)] - fn is_fully_connected(&self) -> bool { - self.data.connections.provider().disconnected_nodes().is_empty() - } + #[cfg(test)] + fn is_fully_connected(&self) -> bool { + self.data + .connections + .provider() + .disconnected_nodes() + .is_empty() + } - #[cfg(test)] - fn connect(&self) { - self.data.connections.connect() - } + #[cfg(test)] + fn connect(&self) { + self.data.connections.connect() + } } pub struct ServersSetChangeParams { - pub session_id: Option, - pub migration_id: Option, - pub new_nodes_set: BTreeSet, - pub old_set_signature: Signature, - pub new_set_signature: Signature, + pub session_id: Option, + pub migration_id: Option, + pub new_nodes_set: BTreeSet, + pub old_set_signature: Signature, + pub new_set_signature: Signature, } pub fn new_servers_set_change_session( - self_key_pair: Arc, - sessions: &ClusterSessions, - connections: Arc, - servers_set_change_creator_connector: Arc, - params: ServersSetChangeParams, + self_key_pair: Arc, + sessions: &ClusterSessions, + connections: Arc, + servers_set_change_creator_connector: Arc, + params: ServersSetChangeParams, ) -> Result, Error> { - let session_id = match params.session_id { - Some(session_id) if session_id == *SERVERS_SET_CHANGE_SESSION_ID => session_id, - Some(_) => return Err(Error::InvalidMessage), - None => *SERVERS_SET_CHANGE_SESSION_ID, - }; + let session_id = match params.session_id { + Some(session_id) if session_id == *SERVERS_SET_CHANGE_SESSION_ID => session_id, + Some(_) => return Err(Error::InvalidMessage), + None => *SERVERS_SET_CHANGE_SESSION_ID, + }; - let cluster = create_cluster_view(self_key_pair.clone(), connections, true)?; - let creation_data = AdminSessionCreationData::ServersSetChange(params.migration_id, params.new_nodes_set.clone()); - let session = sessions.admin_sessions - .insert(cluster, *self_key_pair.public(), session_id, None, true, Some(creation_data))?; - let initialization_result = session.as_servers_set_change().expect("servers set change session is created; qed") - .initialize(params.new_nodes_set, params.old_set_signature, params.new_set_signature); + let cluster = create_cluster_view(self_key_pair.clone(), connections, true)?; + let creation_data = AdminSessionCreationData::ServersSetChange( + params.migration_id, + params.new_nodes_set.clone(), + ); + let session = sessions.admin_sessions.insert( + cluster, + *self_key_pair.public(), + session_id, + None, + true, + Some(creation_data), + )?; + let initialization_result = session + .as_servers_set_change() + .expect("servers set change session is created; qed") + .initialize( + params.new_nodes_set, + params.old_set_signature, + params.new_set_signature, + ); - if initialization_result.is_ok() { - servers_set_change_creator_connector.set_key_servers_set_change_session(session.clone()); - } + if initialization_result.is_ok() { + servers_set_change_creator_connector.set_key_servers_set_change_session(session.clone()); + } - process_initialization_result( - initialization_result, - session, &sessions.admin_sessions) + process_initialization_result(initialization_result, session, &sessions.admin_sessions) } fn process_initialization_result( - result: Result<(), Error>, - session: Arc, - sessions: &ClusterSessionsContainer + result: Result<(), Error>, + session: Arc, + sessions: &ClusterSessionsContainer, ) -> Result, Error> - where - S: ClusterSession, - SC: ClusterSessionCreator +where + S: ClusterSession, + SC: ClusterSessionCreator, { - match result { - Ok(()) if session.is_finished() => { - sessions.remove(&session.id()); - Ok(session) - }, - Ok(()) => Ok(session), - Err(error) => { - sessions.remove(&session.id()); - Err(error) - }, - } + match result { + Ok(()) if session.is_finished() => { + sessions.remove(&session.id()); + Ok(session) + } + Ok(()) => Ok(session), + Err(error) => { + sessions.remove(&session.id()); + Err(error) + } + } } #[cfg(test)] pub mod tests { - use std::sync::Arc; - use std::sync::atomic::{AtomicUsize, Ordering}; - use std::collections::{BTreeMap, BTreeSet, VecDeque}; - use parking_lot::{Mutex, RwLock}; - use ethereum_types::{Address, H256}; - use ethkey::{Random, Generator, Public, Signature, sign}; - use key_server_cluster::{NodeId, SessionId, Requester, Error, DummyAclStorage, DummyKeyStorage, - MapKeyServerSet, PlainNodeKeyPair, NodeKeyPair}; - use key_server_cluster::message::Message; - use key_server_cluster::cluster::{new_test_cluster, Cluster, ClusterCore, ClusterConfiguration, ClusterClient}; - use key_server_cluster::cluster_connections::ConnectionManager; - use key_server_cluster::cluster_connections::tests::{MessagesQueue, TestConnections}; - use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, AdminSession, ClusterSessionsListener}; - use key_server_cluster::generation_session::{SessionImpl as GenerationSession, - SessionState as GenerationSessionState}; - use key_server_cluster::decryption_session::{SessionImpl as DecryptionSession}; - use key_server_cluster::encryption_session::{SessionImpl as EncryptionSession}; - use key_server_cluster::signing_session_ecdsa::{SessionImpl as EcdsaSigningSession}; - use key_server_cluster::signing_session_schnorr::{SessionImpl as SchnorrSigningSession}; - use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession, - IsolatedSessionTransport as KeyVersionNegotiationSessionTransport}; - - #[derive(Default)] - pub struct DummyClusterClient { - pub generation_requests_count: AtomicUsize, - } - - #[derive(Debug)] - pub struct DummyCluster { - id: NodeId, - data: RwLock, - } - - #[derive(Debug, Default)] - struct DummyClusterData { - nodes: BTreeSet, - messages: VecDeque<(NodeId, Message)>, - } - - impl ClusterClient for DummyClusterClient { - fn new_generation_session(&self, _session_id: SessionId, _origin: Option
, _author: Address, _threshold: usize) -> Result, Error> { - self.generation_requests_count.fetch_add(1, Ordering::Relaxed); - Err(Error::Internal("test-error".into())) - } - fn new_encryption_session(&self, _session_id: SessionId, _requester: Requester, _common_point: Public, _encrypted_point: Public) -> Result, Error> { unimplemented!("test-only") } - fn new_decryption_session(&self, _session_id: SessionId, _origin: Option
, _requester: Requester, _version: Option, _is_shadow_decryption: bool, _is_broadcast_session: bool) -> Result, Error> { unimplemented!("test-only") } - fn new_schnorr_signing_session(&self, _session_id: SessionId, _requester: Requester, _version: Option, _message_hash: H256) -> Result, Error> { unimplemented!("test-only") } - fn new_ecdsa_signing_session(&self, _session_id: SessionId, _requester: Requester, _version: Option, _message_hash: H256) -> Result, Error> { unimplemented!("test-only") } - - fn new_key_version_negotiation_session(&self, _session_id: SessionId) -> Result>, Error> { unimplemented!("test-only") } - fn new_servers_set_change_session(&self, _session_id: Option, _migration_id: Option, _new_nodes_set: BTreeSet, _old_set_signature: Signature, _new_set_signature: Signature) -> Result, Error> { unimplemented!("test-only") } - - fn add_generation_listener(&self, _listener: Arc>) {} - fn add_decryption_listener(&self, _listener: Arc>) {} - fn add_key_version_negotiation_listener(&self, _listener: Arc>>) {} - - fn make_faulty_generation_sessions(&self) { unimplemented!("test-only") } - fn generation_session(&self, _session_id: &SessionId) -> Option> { unimplemented!("test-only") } - fn is_fully_connected(&self) -> bool { true } - fn connect(&self) {} - } - - impl DummyCluster { - pub fn new(id: NodeId) -> Self { - DummyCluster { - id: id, - data: RwLock::new(DummyClusterData::default()) - } - } - - pub fn node(&self) -> NodeId { - self.id.clone() - } - - pub fn add_node(&self, node: NodeId) { - self.data.write().nodes.insert(node); - } - - pub fn add_nodes>(&self, nodes: I) { - self.data.write().nodes.extend(nodes) - } - - pub fn remove_node(&self, node: &NodeId) { - self.data.write().nodes.remove(node); - } - - pub fn take_message(&self) -> Option<(NodeId, Message)> { - self.data.write().messages.pop_front() - } - } - - impl Cluster for DummyCluster { - fn broadcast(&self, message: Message) -> Result<(), Error> { - let mut data = self.data.write(); - let all_nodes: Vec<_> = data.nodes.iter().cloned().filter(|n| n != &self.id).collect(); - for node in all_nodes { - data.messages.push_back((node, message.clone())); - } - Ok(()) - } - - fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> { - debug_assert!(&self.id != to); - self.data.write().messages.push_back((to.clone(), message)); - Ok(()) - } - - fn is_connected(&self, node: &NodeId) -> bool { - let data = self.data.read(); - &self.id == node || data.nodes.contains(node) - } - - fn nodes(&self) -> BTreeSet { - self.data.read().nodes.iter().cloned().collect() - } - - fn configured_nodes_count(&self) -> usize { - self.data.read().nodes.len() - } - - fn connected_nodes_count(&self) -> usize { - self.data.read().nodes.len() - } - } - - /// Test message loop. - pub struct MessageLoop { - messages: MessagesQueue, - preserve_sessions: bool, - key_pairs_map: BTreeMap>, - acl_storages_map: BTreeMap>, - key_storages_map: BTreeMap>, - clusters_map: BTreeMap>>>, - } - - impl ::std::fmt::Debug for MessageLoop { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - write!(f, "MessageLoop({})", self.clusters_map.len()) - } - } - - impl MessageLoop { - /// Returns set of all nodes ids. - pub fn nodes(&self) -> BTreeSet { - self.clusters_map.keys().cloned().collect() - } - - /// Returns nodes id by its index. - pub fn node(&self, idx: usize) -> NodeId { - *self.clusters_map.keys().nth(idx).unwrap() - } - - /// Returns key pair of the node by its idx. - pub fn node_key_pair(&self, idx: usize) -> &Arc { - self.key_pairs_map.values().nth(idx).unwrap() - } - - /// Get cluster reference by its index. - pub fn cluster(&self, idx: usize) -> &Arc>> { - self.clusters_map.values().nth(idx).unwrap() - } - - /// Get keys storage reference by its index. - pub fn key_storage(&self, idx: usize) -> &Arc { - self.key_storages_map.values().nth(idx).unwrap() - } - - /// Get keys storage reference by node id. - pub fn key_storage_of(&self, node: &NodeId) -> &Arc { - &self.key_storages_map[node] - } - - /// Replace key storage of the node by its id. - pub fn replace_key_storage_of(&mut self, node: &NodeId, key_storage: Arc) { - *self.key_storages_map.get_mut(node).unwrap() = key_storage; - } - - /// Get ACL storage reference by its index. - pub fn acl_storage(&self, idx: usize) -> &Arc { - self.acl_storages_map.values().nth(idx).unwrap() - } - - /// Get sessions container reference by its index. - pub fn sessions(&self, idx: usize) -> &Arc { - &self.cluster(idx).data.sessions - } - - /// Get sessions container reference by node id. - pub fn sessions_of(&self, node: &NodeId) -> &Arc { - &self.clusters_map[node].data.sessions - } - - /// Isolate node from others. - pub fn isolate(&self, idx: usize) { - let node = self.node(idx); - for (i, cluster) in self.clusters_map.values().enumerate() { - if i == idx { - cluster.data.connections.isolate(); - } else { - cluster.data.connections.disconnect(node); - } - } - } - - /// Exclude node from cluster. - pub fn exclude(&mut self, idx: usize) { - let node = self.node(idx); - for (i, cluster) in self.clusters_map.values().enumerate() { - if i != idx { - cluster.data.connections.exclude(node); - } - } - self.key_storages_map.remove(&node); - self.acl_storages_map.remove(&node); - self.key_pairs_map.remove(&node); - self.clusters_map.remove(&node); - } - - /// Include new node to the cluster. - pub fn include(&mut self, node_key_pair: Arc) -> usize { - let key_storage = Arc::new(DummyKeyStorage::default()); - let acl_storage = Arc::new(DummyAclStorage::default()); - let cluster_params = ClusterConfiguration { - self_key_pair: node_key_pair.clone(), - key_server_set: Arc::new(MapKeyServerSet::new(false, self.nodes().iter() - .chain(::std::iter::once(node_key_pair.public())) - .map(|n| (*n, format!("127.0.0.1:{}", 13).parse().unwrap())) - .collect())), - key_storage: key_storage.clone(), - acl_storage: acl_storage.clone(), - admin_public: None, - preserve_sessions: self.preserve_sessions, - }; - let cluster = new_test_cluster(self.messages.clone(), cluster_params).unwrap(); - - for cluster in self.clusters_map.values(){ - cluster.data.connections.include(node_key_pair.public().clone()); - } - self.acl_storages_map.insert(*node_key_pair.public(), acl_storage); - self.key_storages_map.insert(*node_key_pair.public(), key_storage); - self.clusters_map.insert(*node_key_pair.public(), cluster); - self.key_pairs_map.insert(*node_key_pair.public(), node_key_pair.clone()); - self.clusters_map.keys().position(|k| k == node_key_pair.public()).unwrap() - } - - /// Is empty message queue? - pub fn is_empty(&self) -> bool { - self.messages.lock().is_empty() - } - - /// Takes next message from the queue. - pub fn take_message(&self) -> Option<(NodeId, NodeId, Message)> { - self.messages.lock().pop_front() - } - - /// Process single message. - pub fn process_message(&self, from: NodeId, to: NodeId, message: Message) { - let cluster_data = &self.clusters_map[&to].data; - let connection = cluster_data.connections.provider().connection(&from).unwrap(); - cluster_data.message_processor.process_connection_message(connection, message); - } - - /// Take next message and process it. - pub fn take_and_process_message(&self) -> bool { - let (from, to, message) = match self.take_message() { - Some((from, to, message)) => (from, to, message), - None => return false, - }; - - self.process_message(from, to, message); - true - } - - /// Loops until `predicate` returns `true` or there are no messages in the queue. - pub fn loop_until(&self, predicate: F) where F: Fn() -> bool { - while !predicate() { - if !self.take_and_process_message() { - panic!("message queue is empty but goal is not achieved"); - } - } - } - } - - pub fn make_clusters(num_nodes: usize) -> MessageLoop { - do_make_clusters(num_nodes, false) - } - - pub fn make_clusters_and_preserve_sessions(num_nodes: usize) -> MessageLoop { - do_make_clusters(num_nodes, true) - } - - fn do_make_clusters(num_nodes: usize, preserve_sessions: bool) -> MessageLoop { - let ports_begin = 0; - let messages = Arc::new(Mutex::new(VecDeque::new())); - let key_pairs: Vec<_> = (0..num_nodes) - .map(|_| Arc::new(PlainNodeKeyPair::new(Random.generate().unwrap()))).collect(); - let key_storages: Vec<_> = (0..num_nodes).map(|_| Arc::new(DummyKeyStorage::default())).collect(); - let acl_storages: Vec<_> = (0..num_nodes).map(|_| Arc::new(DummyAclStorage::default())).collect(); - let cluster_params: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration { - self_key_pair: key_pairs[i].clone(), - key_server_set: Arc::new(MapKeyServerSet::new(false, key_pairs.iter().enumerate() - .map(|(j, kp)| (*kp.public(), format!("127.0.0.1:{}", ports_begin + j as u16).parse().unwrap())) - .collect())), - key_storage: key_storages[i].clone(), - acl_storage: acl_storages[i].clone(), - admin_public: None, - preserve_sessions, - }).collect(); - let clusters: Vec<_> = cluster_params.into_iter() - .map(|params| new_test_cluster(messages.clone(), params).unwrap()) - .collect(); - - let clusters_map = clusters.iter().map(|c| (*c.data.config.self_key_pair.public(), c.clone())).collect(); - let key_pairs_map = key_pairs.into_iter().map(|kp| (*kp.public(), kp)).collect(); - let key_storages_map = clusters.iter().zip(key_storages.into_iter()) - .map(|(c, ks)| (*c.data.config.self_key_pair.public(), ks)).collect(); - let acl_storages_map = clusters.iter().zip(acl_storages.into_iter()) - .map(|(c, acls)| (*c.data.config.self_key_pair.public(), acls)).collect(); - MessageLoop { preserve_sessions, messages, key_pairs_map, acl_storages_map, key_storages_map, clusters_map } - } - - #[test] - fn cluster_wont_start_generation_session_if_not_fully_connected() { - let ml = make_clusters(3); - ml.cluster(0).data.connections.disconnect(*ml.cluster(0).data.self_key_pair.public()); - match ml.cluster(0).client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1) { - Err(Error::NodeDisconnected) => (), - Err(e) => panic!("unexpected error {:?}", e), - _ => panic!("unexpected success"), - } - } - - #[test] - fn error_in_generation_session_broadcasted_to_all_other_nodes() { - let _ = ::env_logger::try_init(); - let ml = make_clusters(3); - - // ask one of nodes to produce faulty generation sessions - ml.cluster(1).client().make_faulty_generation_sessions(); - - // start && wait for generation session to fail - let session = ml.cluster(0).client() - .new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); - ml.loop_until(|| session.joint_public_and_secret().is_some() - && ml.cluster(0).client().generation_session(&SessionId::default()).is_none()); - assert!(session.joint_public_and_secret().unwrap().is_err()); - - // check that faulty session is either removed from all nodes, or nonexistent (already removed) - for i in 1..3 { - if let Some(session) = ml.cluster(i).client().generation_session(&SessionId::default()) { - // wait for both session completion && session removal (session completion event is fired - // before session is removed from its own container by cluster) - ml.loop_until(|| session.joint_public_and_secret().is_some() - && ml.cluster(i).client().generation_session(&SessionId::default()).is_none()); - assert!(session.joint_public_and_secret().unwrap().is_err()); - } - } - } - - #[test] - fn generation_session_completion_signalled_if_failed_on_master() { - let _ = ::env_logger::try_init(); - let ml = make_clusters(3); - - // ask one of nodes to produce faulty generation sessions - ml.cluster(0).client().make_faulty_generation_sessions(); - - // start && wait for generation session to fail - let session = ml.cluster(0).client() - .new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); - ml.loop_until(|| session.joint_public_and_secret().is_some() - && ml.cluster(0).client().generation_session(&SessionId::default()).is_none()); - assert!(session.joint_public_and_secret().unwrap().is_err()); - - // check that faulty session is either removed from all nodes, or nonexistent (already removed) - for i in 1..3 { - if let Some(session) = ml.cluster(i).client().generation_session(&SessionId::default()) { - let session = session.clone(); - // wait for both session completion && session removal (session completion event is fired - // before session is removed from its own container by cluster) - ml.loop_until(|| session.joint_public_and_secret().is_some() - && ml.cluster(i).client().generation_session(&SessionId::default()).is_none()); - assert!(session.joint_public_and_secret().unwrap().is_err()); - } - } - } - - #[test] - fn generation_session_is_removed_when_succeeded() { - let _ = ::env_logger::try_init(); - let ml = make_clusters(3); - - // start && wait for generation session to complete - let session = ml.cluster(0).client() - .new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); - ml.loop_until(|| (session.state() == GenerationSessionState::Finished - || session.state() == GenerationSessionState::Failed) - && ml.cluster(0).client().generation_session(&SessionId::default()).is_none()); - assert!(session.joint_public_and_secret().unwrap().is_ok()); - - // check that on non-master nodes session is either: - // already removed - // or it is removed right after completion - for i in 1..3 { - if let Some(session) = ml.cluster(i).client().generation_session(&SessionId::default()) { - // run to completion if completion message is still on the way - // AND check that it is actually removed from cluster sessions - ml.loop_until(|| (session.state() == GenerationSessionState::Finished - || session.state() == GenerationSessionState::Failed) - && ml.cluster(i).client().generation_session(&SessionId::default()).is_none()); - } - } - } - - #[test] - fn sessions_are_removed_when_initialization_fails() { - let ml = make_clusters(3); - let client = ml.cluster(0).client(); - - // generation session - { - // try to start generation session => fail in initialization - assert_eq!( - client.new_generation_session(SessionId::default(), None, Default::default(), 100).map(|_| ()), - Err(Error::NotEnoughNodesForThreshold)); - - // try to start generation session => fails in initialization - assert_eq!( - client.new_generation_session(SessionId::default(), None, Default::default(), 100).map(|_| ()), - Err(Error::NotEnoughNodesForThreshold)); - - assert!(ml.cluster(0).data.sessions.generation_sessions.is_empty()); - } - - // decryption session - { - // try to start decryption session => fails in initialization - assert_eq!( - client.new_decryption_session( - Default::default(), Default::default(), Default::default(), Some(Default::default()), false, false - ).map(|_| ()), - Err(Error::InvalidMessage)); - - // try to start generation session => fails in initialization - assert_eq!( - client.new_decryption_session( - Default::default(), Default::default(), Default::default(), Some(Default::default()), false, false - ).map(|_| ()), - Err(Error::InvalidMessage)); - - assert!(ml.cluster(0).data.sessions.decryption_sessions.is_empty()); - assert!(ml.cluster(0).data.sessions.negotiation_sessions.is_empty()); - } - } - - #[test] - fn schnorr_signing_session_completes_if_node_does_not_have_a_share() { - let _ = ::env_logger::try_init(); - let ml = make_clusters(3); - - // start && wait for generation session to complete - let session = ml.cluster(0).client(). - new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); - ml.loop_until(|| (session.state() == GenerationSessionState::Finished - || session.state() == GenerationSessionState::Failed) - && ml.cluster(0).client().generation_session(&SessionId::default()).is_none()); - assert!(session.joint_public_and_secret().unwrap().is_ok()); - - // now remove share from node2 - assert!((0..3).all(|i| ml.cluster(i).data.sessions.generation_sessions.is_empty())); - ml.cluster(2).data.config.key_storage.remove(&Default::default()).unwrap(); - - // and try to sign message with generated key - let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); - let session0 = ml.cluster(0).client() - .new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap(); - let session = ml.cluster(0).data.sessions.schnorr_signing_sessions.first().unwrap(); - - ml.loop_until(|| session.is_finished() && (0..3).all(|i| - ml.cluster(i).data.sessions.schnorr_signing_sessions.is_empty())); - session0.wait().unwrap(); - - // and try to sign message with generated key using node that has no key share - let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); - let session2 = ml.cluster(2).client() - .new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap(); - let session = ml.cluster(2).data.sessions.schnorr_signing_sessions.first().unwrap(); - - ml.loop_until(|| session.is_finished() && (0..3).all(|i| - ml.cluster(i).data.sessions.schnorr_signing_sessions.is_empty())); - session2.wait().unwrap(); - - // now remove share from node1 - ml.cluster(1).data.config.key_storage.remove(&Default::default()).unwrap(); - - // and try to sign message with generated key - let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); - let session1 = ml.cluster(0).client() - .new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap(); - let session = ml.cluster(0).data.sessions.schnorr_signing_sessions.first().unwrap(); - - ml.loop_until(|| session.is_finished()); - session1.wait().unwrap_err(); - } - - #[test] - fn ecdsa_signing_session_completes_if_node_does_not_have_a_share() { - let _ = ::env_logger::try_init(); - let ml = make_clusters(4); - - // start && wait for generation session to complete - let session = ml.cluster(0).client() - .new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); - ml.loop_until(|| (session.state() == GenerationSessionState::Finished - || session.state() == GenerationSessionState::Failed) - && ml.cluster(0).client().generation_session(&SessionId::default()).is_none()); - assert!(session.joint_public_and_secret().unwrap().is_ok()); - - // now remove share from node2 - assert!((0..3).all(|i| ml.cluster(i).data.sessions.generation_sessions.is_empty())); - ml.cluster(2).data.config.key_storage.remove(&Default::default()).unwrap(); - - // and try to sign message with generated key - let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); - let session0 = ml.cluster(0).client() - .new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap(); - let session = ml.cluster(0).data.sessions.ecdsa_signing_sessions.first().unwrap(); - - ml.loop_until(|| session.is_finished() && (0..3).all(|i| - ml.cluster(i).data.sessions.ecdsa_signing_sessions.is_empty())); - session0.wait().unwrap(); - - // and try to sign message with generated key using node that has no key share - let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); - let session2 = ml.cluster(2).client() - .new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap(); - let session = ml.cluster(2).data.sessions.ecdsa_signing_sessions.first().unwrap(); - ml.loop_until(|| session.is_finished() && (0..3).all(|i| - ml.cluster(i).data.sessions.ecdsa_signing_sessions.is_empty())); - session2.wait().unwrap(); - - // now remove share from node1 - ml.cluster(1).data.config.key_storage.remove(&Default::default()).unwrap(); - - // and try to sign message with generated key - let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); - let session1 = ml.cluster(0).client() - .new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap(); - let session = ml.cluster(0).data.sessions.ecdsa_signing_sessions.first().unwrap(); - ml.loop_until(|| session.is_finished()); - session1.wait().unwrap_err(); - } + use ethereum_types::{Address, H256}; + use ethkey::{sign, Generator, Public, Random, Signature}; + use key_server_cluster::{ + cluster::{new_test_cluster, Cluster, ClusterClient, ClusterConfiguration, ClusterCore}, + cluster_connections::{ + tests::{MessagesQueue, TestConnections}, + ConnectionManager, + }, + cluster_sessions::{ + AdminSession, ClusterSession, ClusterSessions, ClusterSessionsListener, + }, + decryption_session::SessionImpl as DecryptionSession, + encryption_session::SessionImpl as EncryptionSession, + generation_session::{ + SessionImpl as GenerationSession, SessionState as GenerationSessionState, + }, + key_version_negotiation_session::{ + IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, + SessionImpl as KeyVersionNegotiationSession, + }, + message::Message, + signing_session_ecdsa::SessionImpl as EcdsaSigningSession, + signing_session_schnorr::SessionImpl as SchnorrSigningSession, + DummyAclStorage, DummyKeyStorage, Error, MapKeyServerSet, NodeId, NodeKeyPair, + PlainNodeKeyPair, Requester, SessionId, + }; + use parking_lot::{Mutex, RwLock}; + use std::{ + collections::{BTreeMap, BTreeSet, VecDeque}, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + }; + + #[derive(Default)] + pub struct DummyClusterClient { + pub generation_requests_count: AtomicUsize, + } + + #[derive(Debug)] + pub struct DummyCluster { + id: NodeId, + data: RwLock, + } + + #[derive(Debug, Default)] + struct DummyClusterData { + nodes: BTreeSet, + messages: VecDeque<(NodeId, Message)>, + } + + impl ClusterClient for DummyClusterClient { + fn new_generation_session( + &self, + _session_id: SessionId, + _origin: Option
, + _author: Address, + _threshold: usize, + ) -> Result, Error> { + self.generation_requests_count + .fetch_add(1, Ordering::Relaxed); + Err(Error::Internal("test-error".into())) + } + fn new_encryption_session( + &self, + _session_id: SessionId, + _requester: Requester, + _common_point: Public, + _encrypted_point: Public, + ) -> Result, Error> { + unimplemented!("test-only") + } + fn new_decryption_session( + &self, + _session_id: SessionId, + _origin: Option
, + _requester: Requester, + _version: Option, + _is_shadow_decryption: bool, + _is_broadcast_session: bool, + ) -> Result, Error> { + unimplemented!("test-only") + } + fn new_schnorr_signing_session( + &self, + _session_id: SessionId, + _requester: Requester, + _version: Option, + _message_hash: H256, + ) -> Result, Error> { + unimplemented!("test-only") + } + fn new_ecdsa_signing_session( + &self, + _session_id: SessionId, + _requester: Requester, + _version: Option, + _message_hash: H256, + ) -> Result, Error> { + unimplemented!("test-only") + } + + fn new_key_version_negotiation_session( + &self, + _session_id: SessionId, + ) -> Result>, Error> + { + unimplemented!("test-only") + } + fn new_servers_set_change_session( + &self, + _session_id: Option, + _migration_id: Option, + _new_nodes_set: BTreeSet, + _old_set_signature: Signature, + _new_set_signature: Signature, + ) -> Result, Error> { + unimplemented!("test-only") + } + + fn add_generation_listener( + &self, + _listener: Arc>, + ) { + } + fn add_decryption_listener( + &self, + _listener: Arc>, + ) { + } + fn add_key_version_negotiation_listener( + &self, + _listener: Arc< + ClusterSessionsListener< + KeyVersionNegotiationSession, + >, + >, + ) { + } + + fn make_faulty_generation_sessions(&self) { + unimplemented!("test-only") + } + fn generation_session(&self, _session_id: &SessionId) -> Option> { + unimplemented!("test-only") + } + fn is_fully_connected(&self) -> bool { + true + } + fn connect(&self) {} + } + + impl DummyCluster { + pub fn new(id: NodeId) -> Self { + DummyCluster { + id: id, + data: RwLock::new(DummyClusterData::default()), + } + } + + pub fn node(&self) -> NodeId { + self.id.clone() + } + + pub fn add_node(&self, node: NodeId) { + self.data.write().nodes.insert(node); + } + + pub fn add_nodes>(&self, nodes: I) { + self.data.write().nodes.extend(nodes) + } + + pub fn remove_node(&self, node: &NodeId) { + self.data.write().nodes.remove(node); + } + + pub fn take_message(&self) -> Option<(NodeId, Message)> { + self.data.write().messages.pop_front() + } + } + + impl Cluster for DummyCluster { + fn broadcast(&self, message: Message) -> Result<(), Error> { + let mut data = self.data.write(); + let all_nodes: Vec<_> = data + .nodes + .iter() + .cloned() + .filter(|n| n != &self.id) + .collect(); + for node in all_nodes { + data.messages.push_back((node, message.clone())); + } + Ok(()) + } + + fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> { + debug_assert!(&self.id != to); + self.data.write().messages.push_back((to.clone(), message)); + Ok(()) + } + + fn is_connected(&self, node: &NodeId) -> bool { + let data = self.data.read(); + &self.id == node || data.nodes.contains(node) + } + + fn nodes(&self) -> BTreeSet { + self.data.read().nodes.iter().cloned().collect() + } + + fn configured_nodes_count(&self) -> usize { + self.data.read().nodes.len() + } + + fn connected_nodes_count(&self) -> usize { + self.data.read().nodes.len() + } + } + + /// Test message loop. + pub struct MessageLoop { + messages: MessagesQueue, + preserve_sessions: bool, + key_pairs_map: BTreeMap>, + acl_storages_map: BTreeMap>, + key_storages_map: BTreeMap>, + clusters_map: BTreeMap>>>, + } + + impl ::std::fmt::Debug for MessageLoop { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + write!(f, "MessageLoop({})", self.clusters_map.len()) + } + } + + impl MessageLoop { + /// Returns set of all nodes ids. + pub fn nodes(&self) -> BTreeSet { + self.clusters_map.keys().cloned().collect() + } + + /// Returns nodes id by its index. + pub fn node(&self, idx: usize) -> NodeId { + *self.clusters_map.keys().nth(idx).unwrap() + } + + /// Returns key pair of the node by its idx. + pub fn node_key_pair(&self, idx: usize) -> &Arc { + self.key_pairs_map.values().nth(idx).unwrap() + } + + /// Get cluster reference by its index. + pub fn cluster(&self, idx: usize) -> &Arc>> { + self.clusters_map.values().nth(idx).unwrap() + } + + /// Get keys storage reference by its index. + pub fn key_storage(&self, idx: usize) -> &Arc { + self.key_storages_map.values().nth(idx).unwrap() + } + + /// Get keys storage reference by node id. + pub fn key_storage_of(&self, node: &NodeId) -> &Arc { + &self.key_storages_map[node] + } + + /// Replace key storage of the node by its id. + pub fn replace_key_storage_of(&mut self, node: &NodeId, key_storage: Arc) { + *self.key_storages_map.get_mut(node).unwrap() = key_storage; + } + + /// Get ACL storage reference by its index. + pub fn acl_storage(&self, idx: usize) -> &Arc { + self.acl_storages_map.values().nth(idx).unwrap() + } + + /// Get sessions container reference by its index. + pub fn sessions(&self, idx: usize) -> &Arc { + &self.cluster(idx).data.sessions + } + + /// Get sessions container reference by node id. + pub fn sessions_of(&self, node: &NodeId) -> &Arc { + &self.clusters_map[node].data.sessions + } + + /// Isolate node from others. + pub fn isolate(&self, idx: usize) { + let node = self.node(idx); + for (i, cluster) in self.clusters_map.values().enumerate() { + if i == idx { + cluster.data.connections.isolate(); + } else { + cluster.data.connections.disconnect(node); + } + } + } + + /// Exclude node from cluster. + pub fn exclude(&mut self, idx: usize) { + let node = self.node(idx); + for (i, cluster) in self.clusters_map.values().enumerate() { + if i != idx { + cluster.data.connections.exclude(node); + } + } + self.key_storages_map.remove(&node); + self.acl_storages_map.remove(&node); + self.key_pairs_map.remove(&node); + self.clusters_map.remove(&node); + } + + /// Include new node to the cluster. + pub fn include(&mut self, node_key_pair: Arc) -> usize { + let key_storage = Arc::new(DummyKeyStorage::default()); + let acl_storage = Arc::new(DummyAclStorage::default()); + let cluster_params = ClusterConfiguration { + self_key_pair: node_key_pair.clone(), + key_server_set: Arc::new(MapKeyServerSet::new( + false, + self.nodes() + .iter() + .chain(::std::iter::once(node_key_pair.public())) + .map(|n| (*n, format!("127.0.0.1:{}", 13).parse().unwrap())) + .collect(), + )), + key_storage: key_storage.clone(), + acl_storage: acl_storage.clone(), + admin_public: None, + preserve_sessions: self.preserve_sessions, + }; + let cluster = new_test_cluster(self.messages.clone(), cluster_params).unwrap(); + + for cluster in self.clusters_map.values() { + cluster + .data + .connections + .include(node_key_pair.public().clone()); + } + self.acl_storages_map + .insert(*node_key_pair.public(), acl_storage); + self.key_storages_map + .insert(*node_key_pair.public(), key_storage); + self.clusters_map.insert(*node_key_pair.public(), cluster); + self.key_pairs_map + .insert(*node_key_pair.public(), node_key_pair.clone()); + self.clusters_map + .keys() + .position(|k| k == node_key_pair.public()) + .unwrap() + } + + /// Is empty message queue? + pub fn is_empty(&self) -> bool { + self.messages.lock().is_empty() + } + + /// Takes next message from the queue. + pub fn take_message(&self) -> Option<(NodeId, NodeId, Message)> { + self.messages.lock().pop_front() + } + + /// Process single message. + pub fn process_message(&self, from: NodeId, to: NodeId, message: Message) { + let cluster_data = &self.clusters_map[&to].data; + let connection = cluster_data + .connections + .provider() + .connection(&from) + .unwrap(); + cluster_data + .message_processor + .process_connection_message(connection, message); + } + + /// Take next message and process it. + pub fn take_and_process_message(&self) -> bool { + let (from, to, message) = match self.take_message() { + Some((from, to, message)) => (from, to, message), + None => return false, + }; + + self.process_message(from, to, message); + true + } + + /// Loops until `predicate` returns `true` or there are no messages in the queue. + pub fn loop_until(&self, predicate: F) + where + F: Fn() -> bool, + { + while !predicate() { + if !self.take_and_process_message() { + panic!("message queue is empty but goal is not achieved"); + } + } + } + } + + pub fn make_clusters(num_nodes: usize) -> MessageLoop { + do_make_clusters(num_nodes, false) + } + + pub fn make_clusters_and_preserve_sessions(num_nodes: usize) -> MessageLoop { + do_make_clusters(num_nodes, true) + } + + fn do_make_clusters(num_nodes: usize, preserve_sessions: bool) -> MessageLoop { + let ports_begin = 0; + let messages = Arc::new(Mutex::new(VecDeque::new())); + let key_pairs: Vec<_> = (0..num_nodes) + .map(|_| Arc::new(PlainNodeKeyPair::new(Random.generate().unwrap()))) + .collect(); + let key_storages: Vec<_> = (0..num_nodes) + .map(|_| Arc::new(DummyKeyStorage::default())) + .collect(); + let acl_storages: Vec<_> = (0..num_nodes) + .map(|_| Arc::new(DummyAclStorage::default())) + .collect(); + let cluster_params: Vec<_> = (0..num_nodes) + .map(|i| ClusterConfiguration { + self_key_pair: key_pairs[i].clone(), + key_server_set: Arc::new(MapKeyServerSet::new( + false, + key_pairs + .iter() + .enumerate() + .map(|(j, kp)| { + ( + *kp.public(), + format!("127.0.0.1:{}", ports_begin + j as u16) + .parse() + .unwrap(), + ) + }) + .collect(), + )), + key_storage: key_storages[i].clone(), + acl_storage: acl_storages[i].clone(), + admin_public: None, + preserve_sessions, + }) + .collect(); + let clusters: Vec<_> = cluster_params + .into_iter() + .map(|params| new_test_cluster(messages.clone(), params).unwrap()) + .collect(); + + let clusters_map = clusters + .iter() + .map(|c| (*c.data.config.self_key_pair.public(), c.clone())) + .collect(); + let key_pairs_map = key_pairs.into_iter().map(|kp| (*kp.public(), kp)).collect(); + let key_storages_map = clusters + .iter() + .zip(key_storages.into_iter()) + .map(|(c, ks)| (*c.data.config.self_key_pair.public(), ks)) + .collect(); + let acl_storages_map = clusters + .iter() + .zip(acl_storages.into_iter()) + .map(|(c, acls)| (*c.data.config.self_key_pair.public(), acls)) + .collect(); + MessageLoop { + preserve_sessions, + messages, + key_pairs_map, + acl_storages_map, + key_storages_map, + clusters_map, + } + } + + #[test] + fn cluster_wont_start_generation_session_if_not_fully_connected() { + let ml = make_clusters(3); + ml.cluster(0) + .data + .connections + .disconnect(*ml.cluster(0).data.self_key_pair.public()); + match ml.cluster(0).client().new_generation_session( + SessionId::default(), + Default::default(), + Default::default(), + 1, + ) { + Err(Error::NodeDisconnected) => (), + Err(e) => panic!("unexpected error {:?}", e), + _ => panic!("unexpected success"), + } + } + + #[test] + fn error_in_generation_session_broadcasted_to_all_other_nodes() { + let _ = ::env_logger::try_init(); + let ml = make_clusters(3); + + // ask one of nodes to produce faulty generation sessions + ml.cluster(1).client().make_faulty_generation_sessions(); + + // start && wait for generation session to fail + let session = ml + .cluster(0) + .client() + .new_generation_session( + SessionId::default(), + Default::default(), + Default::default(), + 1, + ) + .unwrap(); + ml.loop_until(|| { + session.joint_public_and_secret().is_some() + && ml + .cluster(0) + .client() + .generation_session(&SessionId::default()) + .is_none() + }); + assert!(session.joint_public_and_secret().unwrap().is_err()); + + // check that faulty session is either removed from all nodes, or nonexistent (already removed) + for i in 1..3 { + if let Some(session) = ml + .cluster(i) + .client() + .generation_session(&SessionId::default()) + { + // wait for both session completion && session removal (session completion event is fired + // before session is removed from its own container by cluster) + ml.loop_until(|| { + session.joint_public_and_secret().is_some() + && ml + .cluster(i) + .client() + .generation_session(&SessionId::default()) + .is_none() + }); + assert!(session.joint_public_and_secret().unwrap().is_err()); + } + } + } + + #[test] + fn generation_session_completion_signalled_if_failed_on_master() { + let _ = ::env_logger::try_init(); + let ml = make_clusters(3); + + // ask one of nodes to produce faulty generation sessions + ml.cluster(0).client().make_faulty_generation_sessions(); + + // start && wait for generation session to fail + let session = ml + .cluster(0) + .client() + .new_generation_session( + SessionId::default(), + Default::default(), + Default::default(), + 1, + ) + .unwrap(); + ml.loop_until(|| { + session.joint_public_and_secret().is_some() + && ml + .cluster(0) + .client() + .generation_session(&SessionId::default()) + .is_none() + }); + assert!(session.joint_public_and_secret().unwrap().is_err()); + + // check that faulty session is either removed from all nodes, or nonexistent (already removed) + for i in 1..3 { + if let Some(session) = ml + .cluster(i) + .client() + .generation_session(&SessionId::default()) + { + let session = session.clone(); + // wait for both session completion && session removal (session completion event is fired + // before session is removed from its own container by cluster) + ml.loop_until(|| { + session.joint_public_and_secret().is_some() + && ml + .cluster(i) + .client() + .generation_session(&SessionId::default()) + .is_none() + }); + assert!(session.joint_public_and_secret().unwrap().is_err()); + } + } + } + + #[test] + fn generation_session_is_removed_when_succeeded() { + let _ = ::env_logger::try_init(); + let ml = make_clusters(3); + + // start && wait for generation session to complete + let session = ml + .cluster(0) + .client() + .new_generation_session( + SessionId::default(), + Default::default(), + Default::default(), + 1, + ) + .unwrap(); + ml.loop_until(|| { + (session.state() == GenerationSessionState::Finished + || session.state() == GenerationSessionState::Failed) + && ml + .cluster(0) + .client() + .generation_session(&SessionId::default()) + .is_none() + }); + assert!(session.joint_public_and_secret().unwrap().is_ok()); + + // check that on non-master nodes session is either: + // already removed + // or it is removed right after completion + for i in 1..3 { + if let Some(session) = ml + .cluster(i) + .client() + .generation_session(&SessionId::default()) + { + // run to completion if completion message is still on the way + // AND check that it is actually removed from cluster sessions + ml.loop_until(|| { + (session.state() == GenerationSessionState::Finished + || session.state() == GenerationSessionState::Failed) + && ml + .cluster(i) + .client() + .generation_session(&SessionId::default()) + .is_none() + }); + } + } + } + + #[test] + fn sessions_are_removed_when_initialization_fails() { + let ml = make_clusters(3); + let client = ml.cluster(0).client(); + + // generation session + { + // try to start generation session => fail in initialization + assert_eq!( + client + .new_generation_session(SessionId::default(), None, Default::default(), 100) + .map(|_| ()), + Err(Error::NotEnoughNodesForThreshold) + ); + + // try to start generation session => fails in initialization + assert_eq!( + client + .new_generation_session(SessionId::default(), None, Default::default(), 100) + .map(|_| ()), + Err(Error::NotEnoughNodesForThreshold) + ); + + assert!(ml.cluster(0).data.sessions.generation_sessions.is_empty()); + } + + // decryption session + { + // try to start decryption session => fails in initialization + assert_eq!( + client + .new_decryption_session( + Default::default(), + Default::default(), + Default::default(), + Some(Default::default()), + false, + false + ) + .map(|_| ()), + Err(Error::InvalidMessage) + ); + + // try to start generation session => fails in initialization + assert_eq!( + client + .new_decryption_session( + Default::default(), + Default::default(), + Default::default(), + Some(Default::default()), + false, + false + ) + .map(|_| ()), + Err(Error::InvalidMessage) + ); + + assert!(ml.cluster(0).data.sessions.decryption_sessions.is_empty()); + assert!(ml.cluster(0).data.sessions.negotiation_sessions.is_empty()); + } + } + + #[test] + fn schnorr_signing_session_completes_if_node_does_not_have_a_share() { + let _ = ::env_logger::try_init(); + let ml = make_clusters(3); + + // start && wait for generation session to complete + let session = ml + .cluster(0) + .client() + .new_generation_session( + SessionId::default(), + Default::default(), + Default::default(), + 1, + ) + .unwrap(); + ml.loop_until(|| { + (session.state() == GenerationSessionState::Finished + || session.state() == GenerationSessionState::Failed) + && ml + .cluster(0) + .client() + .generation_session(&SessionId::default()) + .is_none() + }); + assert!(session.joint_public_and_secret().unwrap().is_ok()); + + // now remove share from node2 + assert!((0..3).all(|i| ml.cluster(i).data.sessions.generation_sessions.is_empty())); + ml.cluster(2) + .data + .config + .key_storage + .remove(&Default::default()) + .unwrap(); + + // and try to sign message with generated key + let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); + let session0 = ml + .cluster(0) + .client() + .new_schnorr_signing_session( + Default::default(), + signature.into(), + None, + Default::default(), + ) + .unwrap(); + let session = ml + .cluster(0) + .data + .sessions + .schnorr_signing_sessions + .first() + .unwrap(); + + ml.loop_until(|| { + session.is_finished() + && (0..3).all(|i| { + ml.cluster(i) + .data + .sessions + .schnorr_signing_sessions + .is_empty() + }) + }); + session0.wait().unwrap(); + + // and try to sign message with generated key using node that has no key share + let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); + let session2 = ml + .cluster(2) + .client() + .new_schnorr_signing_session( + Default::default(), + signature.into(), + None, + Default::default(), + ) + .unwrap(); + let session = ml + .cluster(2) + .data + .sessions + .schnorr_signing_sessions + .first() + .unwrap(); + + ml.loop_until(|| { + session.is_finished() + && (0..3).all(|i| { + ml.cluster(i) + .data + .sessions + .schnorr_signing_sessions + .is_empty() + }) + }); + session2.wait().unwrap(); + + // now remove share from node1 + ml.cluster(1) + .data + .config + .key_storage + .remove(&Default::default()) + .unwrap(); + + // and try to sign message with generated key + let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); + let session1 = ml + .cluster(0) + .client() + .new_schnorr_signing_session( + Default::default(), + signature.into(), + None, + Default::default(), + ) + .unwrap(); + let session = ml + .cluster(0) + .data + .sessions + .schnorr_signing_sessions + .first() + .unwrap(); + + ml.loop_until(|| session.is_finished()); + session1.wait().unwrap_err(); + } + + #[test] + fn ecdsa_signing_session_completes_if_node_does_not_have_a_share() { + let _ = ::env_logger::try_init(); + let ml = make_clusters(4); + + // start && wait for generation session to complete + let session = ml + .cluster(0) + .client() + .new_generation_session( + SessionId::default(), + Default::default(), + Default::default(), + 1, + ) + .unwrap(); + ml.loop_until(|| { + (session.state() == GenerationSessionState::Finished + || session.state() == GenerationSessionState::Failed) + && ml + .cluster(0) + .client() + .generation_session(&SessionId::default()) + .is_none() + }); + assert!(session.joint_public_and_secret().unwrap().is_ok()); + + // now remove share from node2 + assert!((0..3).all(|i| ml.cluster(i).data.sessions.generation_sessions.is_empty())); + ml.cluster(2) + .data + .config + .key_storage + .remove(&Default::default()) + .unwrap(); + + // and try to sign message with generated key + let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); + let session0 = ml + .cluster(0) + .client() + .new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()) + .unwrap(); + let session = ml + .cluster(0) + .data + .sessions + .ecdsa_signing_sessions + .first() + .unwrap(); + + ml.loop_until(|| { + session.is_finished() + && (0..3).all(|i| { + ml.cluster(i) + .data + .sessions + .ecdsa_signing_sessions + .is_empty() + }) + }); + session0.wait().unwrap(); + + // and try to sign message with generated key using node that has no key share + let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); + let session2 = ml + .cluster(2) + .client() + .new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()) + .unwrap(); + let session = ml + .cluster(2) + .data + .sessions + .ecdsa_signing_sessions + .first() + .unwrap(); + ml.loop_until(|| { + session.is_finished() + && (0..3).all(|i| { + ml.cluster(i) + .data + .sessions + .ecdsa_signing_sessions + .is_empty() + }) + }); + session2.wait().unwrap(); + + // now remove share from node1 + ml.cluster(1) + .data + .config + .key_storage + .remove(&Default::default()) + .unwrap(); + + // and try to sign message with generated key + let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); + let session1 = ml + .cluster(0) + .client() + .new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()) + .unwrap(); + let session = ml + .cluster(0) + .data + .sessions + .ecdsa_signing_sessions + .first() + .unwrap(); + ml.loop_until(|| session.is_finished()); + session1.wait().unwrap_err(); + } } diff --git a/secret-store/src/key_server_cluster/cluster_connections.rs b/secret-store/src/key_server_cluster/cluster_connections.rs index b484e6d8e..92ff86dbf 100644 --- a/secret-store/src/key_server_cluster/cluster_connections.rs +++ b/secret-store/src/key_server_cluster/cluster_connections.rs @@ -14,34 +14,32 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::collections::BTreeSet; -use std::sync::Arc; -use key_server_cluster::{Error, NodeId}; -use key_server_cluster::message::Message; +use key_server_cluster::{message::Message, Error, NodeId}; +use std::{collections::BTreeSet, sync::Arc}; /// Connection to the single node. Provides basic information about connected node and /// allows sending messages to this node. pub trait Connection: Send + Sync { - /// Is this inbound connection? This only matters when both nodes are simultaneously establishing - /// two connections to each other. The agreement is that the inbound connection from the node with - /// lower NodeId is used and the other connection is closed. - fn is_inbound(&self) -> bool; - /// Returns id of the connected node. - fn node_id(&self) -> &NodeId; - /// Returns 'address' of the node to use in traces. - fn node_address(&self) -> String; - /// Send message to the connected node. - fn send_message(&self, message: Message); + /// Is this inbound connection? This only matters when both nodes are simultaneously establishing + /// two connections to each other. The agreement is that the inbound connection from the node with + /// lower NodeId is used and the other connection is closed. + fn is_inbound(&self) -> bool; + /// Returns id of the connected node. + fn node_id(&self) -> &NodeId; + /// Returns 'address' of the node to use in traces. + fn node_address(&self) -> String; + /// Send message to the connected node. + fn send_message(&self, message: Message); } /// Connections manager. Responsible for keeping us connected to all required nodes. pub trait ConnectionManager: 'static + Send + Sync { - /// Returns shared reference to connections provider. - fn provider(&self) -> Arc; - /// Try to reach all disconnected nodes immediately. This method is exposed mostly for - /// tests, where all 'nodes' are starting listening for incoming connections first and - /// only after this, they're actually start connecting to each other. - fn connect(&self); + /// Returns shared reference to connections provider. + fn provider(&self) -> Arc; + /// Try to reach all disconnected nodes immediately. This method is exposed mostly for + /// tests, where all 'nodes' are starting listening for incoming connections first and + /// only after this, they're actually start connecting to each other. + fn connect(&self); } /// Connections provider. Holds all active connections and the set of nodes that we need to @@ -49,128 +47,134 @@ pub trait ConnectionManager: 'static + Send + Sync { /// nodes could change (at behalf of the connection manager). /// Clone operation should be cheap (Arc). pub trait ConnectionProvider: Send + Sync { - /// Returns the set of currently connected nodes. Error is returned when our node is - /// not a part of the cluster ('isolated' node). - fn connected_nodes(&self) -> Result, Error>; - /// Returns the set of currently disconnected nodes. - fn disconnected_nodes(&self) -> BTreeSet; - /// Returns the reference to the active node connection or None if the node is not connected. - fn connection(&self, node: &NodeId) -> Option>; + /// Returns the set of currently connected nodes. Error is returned when our node is + /// not a part of the cluster ('isolated' node). + fn connected_nodes(&self) -> Result, Error>; + /// Returns the set of currently disconnected nodes. + fn disconnected_nodes(&self) -> BTreeSet; + /// Returns the reference to the active node connection or None if the node is not connected. + fn connection(&self, node: &NodeId) -> Option>; } #[cfg(test)] pub mod tests { - use std::collections::{BTreeSet, VecDeque}; - use std::sync::Arc; - use std::sync::atomic::{AtomicBool, Ordering}; - use parking_lot::Mutex; - use key_server_cluster::{Error, NodeId}; - use key_server_cluster::message::Message; - use super::{ConnectionManager, Connection, ConnectionProvider}; + use super::{Connection, ConnectionManager, ConnectionProvider}; + use key_server_cluster::{message::Message, Error, NodeId}; + use parking_lot::Mutex; + use std::{ + collections::{BTreeSet, VecDeque}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + }; - /// Shared messages queue. - pub type MessagesQueue = Arc>>; + /// Shared messages queue. + pub type MessagesQueue = Arc>>; - /// Single node connections. - pub struct TestConnections { - node: NodeId, - is_isolated: AtomicBool, - connected_nodes: Mutex>, - disconnected_nodes: Mutex>, - messages: MessagesQueue, - } + /// Single node connections. + pub struct TestConnections { + node: NodeId, + is_isolated: AtomicBool, + connected_nodes: Mutex>, + disconnected_nodes: Mutex>, + messages: MessagesQueue, + } - /// Single connection. - pub struct TestConnection { - from: NodeId, - to: NodeId, - messages: MessagesQueue, - } + /// Single connection. + pub struct TestConnection { + from: NodeId, + to: NodeId, + messages: MessagesQueue, + } - impl TestConnections { - pub fn isolate(&self) { - let connected_nodes = ::std::mem::replace(&mut *self.connected_nodes.lock(), Default::default()); - self.is_isolated.store(true, Ordering::Relaxed); - self.disconnected_nodes.lock().extend(connected_nodes) - } + impl TestConnections { + pub fn isolate(&self) { + let connected_nodes = + ::std::mem::replace(&mut *self.connected_nodes.lock(), Default::default()); + self.is_isolated.store(true, Ordering::Relaxed); + self.disconnected_nodes.lock().extend(connected_nodes) + } - pub fn disconnect(&self, node: NodeId) { - self.connected_nodes.lock().remove(&node); - self.disconnected_nodes.lock().insert(node); - } + pub fn disconnect(&self, node: NodeId) { + self.connected_nodes.lock().remove(&node); + self.disconnected_nodes.lock().insert(node); + } - pub fn exclude(&self, node: NodeId) { - self.connected_nodes.lock().remove(&node); - self.disconnected_nodes.lock().remove(&node); - } + pub fn exclude(&self, node: NodeId) { + self.connected_nodes.lock().remove(&node); + self.disconnected_nodes.lock().remove(&node); + } - pub fn include(&self, node: NodeId) { - self.connected_nodes.lock().insert(node); - } - } + pub fn include(&self, node: NodeId) { + self.connected_nodes.lock().insert(node); + } + } - impl ConnectionManager for Arc { - fn provider(&self) -> Arc { - self.clone() - } + impl ConnectionManager for Arc { + fn provider(&self) -> Arc { + self.clone() + } - fn connect(&self) {} - } + fn connect(&self) {} + } - impl ConnectionProvider for TestConnections { - fn connected_nodes(&self) -> Result, Error> { - match self.is_isolated.load(Ordering::Relaxed) { - false => Ok(self.connected_nodes.lock().clone()), - true => Err(Error::NodeDisconnected), - } - } + impl ConnectionProvider for TestConnections { + fn connected_nodes(&self) -> Result, Error> { + match self.is_isolated.load(Ordering::Relaxed) { + false => Ok(self.connected_nodes.lock().clone()), + true => Err(Error::NodeDisconnected), + } + } - fn disconnected_nodes(&self) -> BTreeSet { - self.disconnected_nodes.lock().clone() - } + fn disconnected_nodes(&self) -> BTreeSet { + self.disconnected_nodes.lock().clone() + } - fn connection(&self, node: &NodeId) -> Option> { - match self.connected_nodes.lock().contains(node) { - true => Some(Arc::new(TestConnection { - from: self.node, - to: *node, - messages: self.messages.clone(), - })), - false => None, - } - } - } + fn connection(&self, node: &NodeId) -> Option> { + match self.connected_nodes.lock().contains(node) { + true => Some(Arc::new(TestConnection { + from: self.node, + to: *node, + messages: self.messages.clone(), + })), + false => None, + } + } + } - impl Connection for TestConnection { - fn is_inbound(&self) -> bool { - false - } + impl Connection for TestConnection { + fn is_inbound(&self) -> bool { + false + } - fn node_id(&self) -> &NodeId { - &self.to - } + fn node_id(&self) -> &NodeId { + &self.to + } - fn node_address(&self) -> String { - format!("{}", self.to) - } + fn node_address(&self) -> String { + format!("{}", self.to) + } - fn send_message(&self, message: Message) { - self.messages.lock().push_back((self.from, self.to, message)) - } - } + fn send_message(&self, message: Message) { + self.messages + .lock() + .push_back((self.from, self.to, message)) + } + } - pub fn new_test_connections( - messages: MessagesQueue, - node: NodeId, - mut nodes: BTreeSet - ) -> Arc { - let is_isolated = !nodes.remove(&node); - Arc::new(TestConnections { - node, - is_isolated: AtomicBool::new(is_isolated), - connected_nodes: Mutex::new(nodes), - disconnected_nodes: Default::default(), - messages, - }) - } + pub fn new_test_connections( + messages: MessagesQueue, + node: NodeId, + mut nodes: BTreeSet, + ) -> Arc { + let is_isolated = !nodes.remove(&node); + Arc::new(TestConnections { + node, + is_isolated: AtomicBool::new(is_isolated), + connected_nodes: Mutex::new(nodes), + disconnected_nodes: Default::default(), + messages, + }) + } } diff --git a/secret-store/src/key_server_cluster/cluster_connections_net.rs b/secret-store/src/key_server_cluster/cluster_connections_net.rs index 43d61dc7e..6d3f9a6b1 100644 --- a/secret-store/src/key_server_cluster/cluster_connections_net.rs +++ b/secret-store/src/key_server_cluster/cluster_connections_net.rs @@ -14,28 +14,37 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::collections::{BTreeMap, BTreeSet}; -use std::collections::btree_map::Entry; -use std::io; -use std::net::{SocketAddr, IpAddr}; -use std::sync::Arc; -use std::time::{Duration, Instant}; -use futures::{future, Future, Stream}; -use parking_lot::{Mutex, RwLock}; -use tokio::net::{TcpListener, TcpStream}; -use tokio::timer::{Interval, timeout::Error as TimeoutError}; -use tokio_io::IoFuture; use ethkey::KeyPair; +use futures::{future, Future, Stream}; +use key_server_cluster::{ + cluster_connections::{Connection, ConnectionManager, ConnectionProvider}, + cluster_message_processor::MessageProcessor, + connection_trigger::{ConnectionTrigger, Maintain}, + io::{ + read_encrypted_message, write_encrypted_message, DeadlineStatus, ReadMessage, + SharedTcpStream, WriteMessage, + }, + message::{self, ClusterMessage, Message}, + net::{ + accept_connection as io_accept_connection, connect as io_connect, + Connection as IoConnection, + }, + ClusterConfiguration, Error, NodeId, NodeKeyPair, +}; use parity_runtime::Executor; -use key_server_cluster::{Error, NodeId, ClusterConfiguration, NodeKeyPair}; -use key_server_cluster::cluster_connections::{ConnectionProvider, Connection, ConnectionManager}; -use key_server_cluster::connection_trigger::{Maintain, ConnectionTrigger}; -use key_server_cluster::cluster_message_processor::MessageProcessor; -use key_server_cluster::io::{DeadlineStatus, ReadMessage, SharedTcpStream, - read_encrypted_message, WriteMessage, write_encrypted_message}; -use key_server_cluster::message::{self, ClusterMessage, Message}; -use key_server_cluster::net::{accept_connection as io_accept_connection, - connect as io_connect, Connection as IoConnection}; +use parking_lot::{Mutex, RwLock}; +use std::{ + collections::{btree_map::Entry, BTreeMap, BTreeSet}, + io, + net::{IpAddr, SocketAddr}, + sync::Arc, + time::{Duration, Instant}, +}; +use tokio::{ + net::{TcpListener, TcpStream}, + timer::{timeout::Error as TimeoutError, Interval}, +}; +use tokio_io::IoFuture; /// Empty future. pub type BoxedEmptyFuture = Box + Send>; @@ -55,361 +64,383 @@ const KEEP_ALIVE_DISCONNECT_INTERVAL: Duration = Duration::from_secs(60); /// Network connection manager configuration. pub struct NetConnectionsManagerConfig { - /// Allow connecting to 'higher' nodes. - pub allow_connecting_to_higher_nodes: bool, - /// Interface to listen to. - pub listen_address: (String, u16), - /// True if we should autostart key servers set change session when servers set changes? - /// This will only work when servers set is configured using KeyServerSet contract. - pub auto_migrate_enabled: bool, + /// Allow connecting to 'higher' nodes. + pub allow_connecting_to_higher_nodes: bool, + /// Interface to listen to. + pub listen_address: (String, u16), + /// True if we should autostart key servers set change session when servers set changes? + /// This will only work when servers set is configured using KeyServerSet contract. + pub auto_migrate_enabled: bool, } /// Network connections manager. pub struct NetConnectionsManager { - /// Address we're listening for incoming connections. - listen_address: SocketAddr, - /// Shared cluster connections data reference. - data: Arc, + /// Address we're listening for incoming connections. + listen_address: SocketAddr, + /// Shared cluster connections data reference. + data: Arc, } /// Network connections data. Shared among NetConnectionsManager and spawned futures. struct NetConnectionsData { - /// Allow connecting to 'higher' nodes. - allow_connecting_to_higher_nodes: bool, - /// Reference to tokio task executor. - executor: Executor, - /// Key pair of this node. - self_key_pair: Arc, - /// Network messages processor. - message_processor: Arc, - /// Connections trigger. - trigger: Mutex>, - /// Mutable connection data. - container: Arc>, + /// Allow connecting to 'higher' nodes. + allow_connecting_to_higher_nodes: bool, + /// Reference to tokio task executor. + executor: Executor, + /// Key pair of this node. + self_key_pair: Arc, + /// Network messages processor. + message_processor: Arc, + /// Connections trigger. + trigger: Mutex>, + /// Mutable connection data. + container: Arc>, } /// Network connections container. This is the only mutable data of NetConnectionsManager. /// The set of nodes is mutated by the connection trigger and the connections set is also /// mutated by spawned futures. pub struct NetConnectionsContainer { - /// Is this node isolated from cluster? - pub is_isolated: bool, - /// Current key servers set. - pub nodes: BTreeMap, - /// Active connections to key servers. - pub connections: BTreeMap>, + /// Is this node isolated from cluster? + pub is_isolated: bool, + /// Current key servers set. + pub nodes: BTreeMap, + /// Active connections to key servers. + pub connections: BTreeMap>, } /// Network connection to single key server node. pub struct NetConnection { - executor: Executor, - /// Id of the peer node. - node_id: NodeId, - /// Address of the peer node. - node_address: SocketAddr, - /// Is this inbound (true) or outbound (false) connection? - is_inbound: bool, - /// Key pair that is used to encrypt connection' messages. - key: KeyPair, - /// Last message time. - last_message_time: RwLock, - /// Underlying TCP stream. - stream: SharedTcpStream, + executor: Executor, + /// Id of the peer node. + node_id: NodeId, + /// Address of the peer node. + node_address: SocketAddr, + /// Is this inbound (true) or outbound (false) connection? + is_inbound: bool, + /// Key pair that is used to encrypt connection' messages. + key: KeyPair, + /// Last message time. + last_message_time: RwLock, + /// Underlying TCP stream. + stream: SharedTcpStream, } impl NetConnectionsManager { - /// Create new network connections manager. - pub fn new( - executor: Executor, - message_processor: Arc, - trigger: Box, - container: Arc>, - config: &ClusterConfiguration, - net_config: NetConnectionsManagerConfig, - ) -> Result { - let listen_address = make_socket_address( - &net_config.listen_address.0, - net_config.listen_address.1)?; + /// Create new network connections manager. + pub fn new( + executor: Executor, + message_processor: Arc, + trigger: Box, + container: Arc>, + config: &ClusterConfiguration, + net_config: NetConnectionsManagerConfig, + ) -> Result { + let listen_address = + make_socket_address(&net_config.listen_address.0, net_config.listen_address.1)?; - Ok(NetConnectionsManager { - listen_address, - data: Arc::new(NetConnectionsData { - allow_connecting_to_higher_nodes: net_config.allow_connecting_to_higher_nodes, - executor, - message_processor, - self_key_pair: config.self_key_pair.clone(), - trigger: Mutex::new(trigger), - container, - }), - }) - } + Ok(NetConnectionsManager { + listen_address, + data: Arc::new(NetConnectionsData { + allow_connecting_to_higher_nodes: net_config.allow_connecting_to_higher_nodes, + executor, + message_processor, + self_key_pair: config.self_key_pair.clone(), + trigger: Mutex::new(trigger), + container, + }), + }) + } - /// Start listening for connections and schedule connections maintenance. - pub fn start(&self) -> Result<(), Error> { - net_listen(&self.listen_address, self.data.clone())?; - net_schedule_maintain(self.data.clone()); - Ok(()) - } + /// Start listening for connections and schedule connections maintenance. + pub fn start(&self) -> Result<(), Error> { + net_listen(&self.listen_address, self.data.clone())?; + net_schedule_maintain(self.data.clone()); + Ok(()) + } } impl ConnectionManager for NetConnectionsManager { - fn provider(&self) -> Arc { - self.data.container.clone() - } + fn provider(&self) -> Arc { + self.data.container.clone() + } - fn connect(&self) { - net_connect_disconnected(self.data.clone()); - } + fn connect(&self) { + net_connect_disconnected(self.data.clone()); + } } impl ConnectionProvider for RwLock { - fn connected_nodes(&self) -> Result, Error> { - let connections = self.read(); - if connections.is_isolated { - return Err(Error::NodeDisconnected); - } + fn connected_nodes(&self) -> Result, Error> { + let connections = self.read(); + if connections.is_isolated { + return Err(Error::NodeDisconnected); + } - Ok(connections.connections.keys().cloned().collect()) - } + Ok(connections.connections.keys().cloned().collect()) + } - fn disconnected_nodes(&self) -> BTreeSet { - let connections = self.read(); - connections.nodes.keys() - .filter(|node_id| !connections.connections.contains_key(node_id)) - .cloned() - .collect() - } + fn disconnected_nodes(&self) -> BTreeSet { + let connections = self.read(); + connections + .nodes + .keys() + .filter(|node_id| !connections.connections.contains_key(node_id)) + .cloned() + .collect() + } - fn connection(&self, node: &NodeId) -> Option> { - match self.read().connections.get(node).cloned() { - Some(connection) => Some(connection), - None => None, - } - } + fn connection(&self, node: &NodeId) -> Option> { + match self.read().connections.get(node).cloned() { + Some(connection) => Some(connection), + None => None, + } + } } impl NetConnection { - /// Create new connection. - pub fn new(executor: Executor, is_inbound: bool, connection: IoConnection) -> NetConnection { - NetConnection { - executor, - node_id: connection.node_id, - node_address: connection.address, - is_inbound: is_inbound, - stream: connection.stream, - key: connection.key, - last_message_time: RwLock::new(Instant::now()), - } - } + /// Create new connection. + pub fn new(executor: Executor, is_inbound: bool, connection: IoConnection) -> NetConnection { + NetConnection { + executor, + node_id: connection.node_id, + node_address: connection.address, + is_inbound: is_inbound, + stream: connection.stream, + key: connection.key, + last_message_time: RwLock::new(Instant::now()), + } + } - /// Get last message time. - pub fn last_message_time(&self) -> Instant { - *self.last_message_time.read() - } + /// Get last message time. + pub fn last_message_time(&self) -> Instant { + *self.last_message_time.read() + } - /// Update last message time - pub fn set_last_message_time(&self, last_message_time: Instant) { - *self.last_message_time.write() = last_message_time - } + /// Update last message time + pub fn set_last_message_time(&self, last_message_time: Instant) { + *self.last_message_time.write() = last_message_time + } - /// Returns future that sends encrypted message over this connection. - pub fn send_message_future(&self, message: Message) -> WriteMessage { - write_encrypted_message(self.stream.clone(), &self.key, message) - } + /// Returns future that sends encrypted message over this connection. + pub fn send_message_future(&self, message: Message) -> WriteMessage { + write_encrypted_message(self.stream.clone(), &self.key, message) + } - /// Returns future that reads encrypted message from this connection. - pub fn read_message_future(&self) -> ReadMessage { - read_encrypted_message(self.stream.clone(), self.key.clone()) - } + /// Returns future that reads encrypted message from this connection. + pub fn read_message_future(&self) -> ReadMessage { + read_encrypted_message(self.stream.clone(), self.key.clone()) + } } impl Connection for NetConnection { - fn is_inbound(&self) -> bool { - self.is_inbound - } + fn is_inbound(&self) -> bool { + self.is_inbound + } - fn node_id(&self) -> &NodeId { - &self.node_id - } + fn node_id(&self) -> &NodeId { + &self.node_id + } - fn node_address(&self) -> String { - format!("{}", self.node_address) - } + fn node_address(&self) -> String { + format!("{}", self.node_address) + } - fn send_message(&self, message: Message) { - execute(&self.executor, self.send_message_future(message).then(|_| Ok(()))); - } + fn send_message(&self, message: Message) { + execute( + &self.executor, + self.send_message_future(message).then(|_| Ok(())), + ); + } } impl NetConnectionsData { - /// Executes closure for each active connection. - pub fn active_connections(&self) -> Vec> { - self.container.read().connections.values().cloned().collect() - } + /// Executes closure for each active connection. + pub fn active_connections(&self) -> Vec> { + self.container + .read() + .connections + .values() + .cloned() + .collect() + } - /// Executes closure for each disconnected node. - pub fn disconnected_nodes(&self) -> Vec<(NodeId, SocketAddr)> { - let container = self.container.read(); - container.nodes.iter() - .filter(|(node_id, _)| !container.connections.contains_key(node_id)) - .map(|(node_id, addr)| (*node_id, *addr)) - .collect() - } + /// Executes closure for each disconnected node. + pub fn disconnected_nodes(&self) -> Vec<(NodeId, SocketAddr)> { + let container = self.container.read(); + container + .nodes + .iter() + .filter(|(node_id, _)| !container.connections.contains_key(node_id)) + .map(|(node_id, addr)| (*node_id, *addr)) + .collect() + } - /// Try to insert new connection. Returns true if connection has been inserted. - /// Returns false (and ignores connections) if: - /// - we do not expect connection from this node - /// - we are already connected to the node and existing connection 'supersede' - /// new connection by agreement - pub fn insert(&self, connection: Arc) -> bool { - let node = *connection.node_id(); - let mut container = self.container.write(); - if !container.nodes.contains_key(&node) { - trace!(target: "secretstore_net", "{}: ignoring unknown connection from {} at {}", + /// Try to insert new connection. Returns true if connection has been inserted. + /// Returns false (and ignores connections) if: + /// - we do not expect connection from this node + /// - we are already connected to the node and existing connection 'supersede' + /// new connection by agreement + pub fn insert(&self, connection: Arc) -> bool { + let node = *connection.node_id(); + let mut container = self.container.write(); + if !container.nodes.contains_key(&node) { + trace!(target: "secretstore_net", "{}: ignoring unknown connection from {} at {}", self.self_key_pair.public(), node, connection.node_address()); - return false; - } + return false; + } - if container.connections.contains_key(&node) { - // we have already connected to the same node - // the agreement is that node with lower id must establish connection to node with higher id - if (*self.self_key_pair.public() < node && connection.is_inbound()) - || (*self.self_key_pair.public() > node && !connection.is_inbound()) { - return false; - } - } + if container.connections.contains_key(&node) { + // we have already connected to the same node + // the agreement is that node with lower id must establish connection to node with higher id + if (*self.self_key_pair.public() < node && connection.is_inbound()) + || (*self.self_key_pair.public() > node && !connection.is_inbound()) + { + return false; + } + } - trace!(target: "secretstore_net", + trace!(target: "secretstore_net", "{}: inserting connection to {} at {}. Connected to {} of {} nodes", self.self_key_pair.public(), node, connection.node_address(), container.connections.len() + 1, container.nodes.len()); - container.connections.insert(node, connection); + container.connections.insert(node, connection); - true - } + true + } - /// Tries to remove connection. Returns true if connection has been removed. - /// Returns false if we do not know this connection. - pub fn remove(&self, connection: &NetConnection) -> bool { - let node_id = *connection.node_id(); - let is_inbound = connection.is_inbound(); - let mut container = self.container.write(); - if let Entry::Occupied(entry) = container.connections.entry(node_id) { - if entry.get().is_inbound() != is_inbound { - return false; - } + /// Tries to remove connection. Returns true if connection has been removed. + /// Returns false if we do not know this connection. + pub fn remove(&self, connection: &NetConnection) -> bool { + let node_id = *connection.node_id(); + let is_inbound = connection.is_inbound(); + let mut container = self.container.write(); + if let Entry::Occupied(entry) = container.connections.entry(node_id) { + if entry.get().is_inbound() != is_inbound { + return false; + } - trace!(target: "secretstore_net", "{}: removing connection to {} at {}", + trace!(target: "secretstore_net", "{}: removing connection to {} at {}", self.self_key_pair.public(), node_id, entry.get().node_address()); - entry.remove_entry(); - - true - } else { - false - } - } + entry.remove_entry(); + + true + } else { + false + } + } } /// Listen incoming connections. -fn net_listen( - listen_address: &SocketAddr, - data: Arc, -) -> Result<(), Error> { - execute(&data.executor, net_listen_future(listen_address, data.clone())?); - Ok(()) +fn net_listen(listen_address: &SocketAddr, data: Arc) -> Result<(), Error> { + execute( + &data.executor, + net_listen_future(listen_address, data.clone())?, + ); + Ok(()) } /// Listen incoming connections future. fn net_listen_future( - listen_address: &SocketAddr, - data: Arc, + listen_address: &SocketAddr, + data: Arc, ) -> Result { - Ok(Box::new(TcpListener::bind(listen_address)? - .incoming() - .and_then(move |stream| { - net_accept_connection(data.clone(), stream); - Ok(()) - }) - .for_each(|_| Ok(())) - .then(|_| future::ok(())))) + Ok(Box::new( + TcpListener::bind(listen_address)? + .incoming() + .and_then(move |stream| { + net_accept_connection(data.clone(), stream); + Ok(()) + }) + .for_each(|_| Ok(())) + .then(|_| future::ok(())), + )) } /// Accept incoming connection. -fn net_accept_connection( - data: Arc, - stream: TcpStream, -) { - execute(&data.executor, net_accept_connection_future(data.clone(), stream)); +fn net_accept_connection(data: Arc, stream: TcpStream) { + execute( + &data.executor, + net_accept_connection_future(data.clone(), stream), + ); } /// Accept incoming connection future. -fn net_accept_connection_future(data: Arc, stream: TcpStream) -> BoxedEmptyFuture { - Box::new(io_accept_connection(stream, data.self_key_pair.clone()) - .then(move |result| net_process_connection_result(data, None, result)) - .then(|_| future::ok(()))) +fn net_accept_connection_future( + data: Arc, + stream: TcpStream, +) -> BoxedEmptyFuture { + Box::new( + io_accept_connection(stream, data.self_key_pair.clone()) + .then(move |result| net_process_connection_result(data, None, result)) + .then(|_| future::ok(())), + ) } /// Connect to remote node. -fn net_connect( - data: Arc, - remote: SocketAddr, -) { - execute(&data.executor, net_connect_future(data.clone(), remote)); +fn net_connect(data: Arc, remote: SocketAddr) { + execute(&data.executor, net_connect_future(data.clone(), remote)); } /// Connect to remote node future. -fn net_connect_future( - data: Arc, - remote: SocketAddr, -) -> BoxedEmptyFuture { - let disconnected_nodes = data.container.disconnected_nodes(); - Box::new(io_connect(&remote, data.self_key_pair.clone(), disconnected_nodes) - .then(move |result| net_process_connection_result(data, Some(remote), result)) - .then(|_| future::ok(()))) +fn net_connect_future(data: Arc, remote: SocketAddr) -> BoxedEmptyFuture { + let disconnected_nodes = data.container.disconnected_nodes(); + Box::new( + io_connect(&remote, data.self_key_pair.clone(), disconnected_nodes) + .then(move |result| net_process_connection_result(data, Some(remote), result)) + .then(|_| future::ok(())), + ) } /// Process network connection result. fn net_process_connection_result( - data: Arc, - outbound_addr: Option, - result: Result>, TimeoutError>, + data: Arc, + outbound_addr: Option, + result: Result>, TimeoutError>, ) -> IoFuture> { - match result { - Ok(DeadlineStatus::Meet(Ok(connection))) => { - let connection = Arc::new(NetConnection::new(data.executor.clone(), outbound_addr.is_none(), connection)); - if data.insert(connection.clone()) { - let maintain_action = data.trigger.lock().on_connection_established(connection.node_id()); - maintain_connection_trigger(data.clone(), maintain_action); + match result { + Ok(DeadlineStatus::Meet(Ok(connection))) => { + let connection = Arc::new(NetConnection::new( + data.executor.clone(), + outbound_addr.is_none(), + connection, + )); + if data.insert(connection.clone()) { + let maintain_action = data + .trigger + .lock() + .on_connection_established(connection.node_id()); + maintain_connection_trigger(data.clone(), maintain_action); - return net_process_connection_messages(data, connection); - } - }, - Ok(DeadlineStatus::Meet(Err(err))) => { - warn!(target: "secretstore_net", "{}: protocol error '{}' when establishing {} connection{}", + return net_process_connection_messages(data, connection); + } + } + Ok(DeadlineStatus::Meet(Err(err))) => { + warn!(target: "secretstore_net", "{}: protocol error '{}' when establishing {} connection{}", data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" }, outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); - }, - Ok(DeadlineStatus::Timeout) => { - warn!(target: "secretstore_net", "{}: timeout when establishing {} connection{}", + } + Ok(DeadlineStatus::Timeout) => { + warn!(target: "secretstore_net", "{}: timeout when establishing {} connection{}", data.self_key_pair.public(), if outbound_addr.is_some() { "outbound" } else { "inbound" }, outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); - }, - Err(err) => { - warn!(target: "secretstore_net", "{}: network error '{}' when establishing {} connection{}", + } + Err(err) => { + warn!(target: "secretstore_net", "{}: network error '{}' when establishing {} connection{}", data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" }, outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); - }, - } + } + } - Box::new(future::ok(Ok(()))) + Box::new(future::ok(Ok(()))) } /// Process connection messages. fn net_process_connection_messages( - data: Arc, - connection: Arc, + data: Arc, + connection: Arc, ) -> IoFuture> { - Box::new(connection + Box::new(connection .read_message_future() .then(move |result| match result { @@ -448,95 +479,110 @@ fn net_process_connection_messages( /// Schedule connections. maintain. fn net_schedule_maintain(data: Arc) { - let closure_data = data.clone(); - execute(&data.executor, Interval::new_interval(Duration::new(MAINTAIN_INTERVAL, 0)) - .and_then(move |_| Ok(net_maintain(closure_data.clone()))) - .for_each(|_| Ok(())) - .then(|_| future::ok(()))); + let closure_data = data.clone(); + execute( + &data.executor, + Interval::new_interval(Duration::new(MAINTAIN_INTERVAL, 0)) + .and_then(move |_| Ok(net_maintain(closure_data.clone()))) + .for_each(|_| Ok(())) + .then(|_| future::ok(())), + ); } /// Maintain network connections. fn net_maintain(data: Arc) { - trace!(target: "secretstore_net", "{}: executing maintain procedures", data.self_key_pair.public()); + trace!(target: "secretstore_net", "{}: executing maintain procedures", data.self_key_pair.public()); - update_nodes_set(data.clone()); - data.message_processor.maintain_sessions(); - net_keep_alive(data.clone()); - net_connect_disconnected(data); + update_nodes_set(data.clone()); + data.message_processor.maintain_sessions(); + net_keep_alive(data.clone()); + net_connect_disconnected(data); } /// Send keep alive messages to remote nodes. fn net_keep_alive(data: Arc) { - let active_connections = data.active_connections(); - for connection in active_connections { - // the last_message_time could change after active_connections() call - // => we always need to call Instant::now() after getting last_message_time - let last_message_time = connection.last_message_time(); - let now = Instant::now(); - let last_message_diff = now - last_message_time; - if last_message_diff > KEEP_ALIVE_DISCONNECT_INTERVAL { - warn!(target: "secretstore_net", "{}: keep alive timeout for node {}", + let active_connections = data.active_connections(); + for connection in active_connections { + // the last_message_time could change after active_connections() call + // => we always need to call Instant::now() after getting last_message_time + let last_message_time = connection.last_message_time(); + let now = Instant::now(); + let last_message_diff = now - last_message_time; + if last_message_diff > KEEP_ALIVE_DISCONNECT_INTERVAL { + warn!(target: "secretstore_net", "{}: keep alive timeout for node {}", data.self_key_pair.public(), connection.node_id()); - let node_id = *connection.node_id(); - if data.remove(&*connection) { - let maintain_action = data.trigger.lock().on_connection_closed(&node_id); - maintain_connection_trigger(data.clone(), maintain_action); - } - data.message_processor.process_disconnect(&node_id); - } - else if last_message_diff > KEEP_ALIVE_SEND_INTERVAL { - connection.send_message(Message::Cluster(ClusterMessage::KeepAlive(message::KeepAlive {}))); - } - } + let node_id = *connection.node_id(); + if data.remove(&*connection) { + let maintain_action = data.trigger.lock().on_connection_closed(&node_id); + maintain_connection_trigger(data.clone(), maintain_action); + } + data.message_processor.process_disconnect(&node_id); + } else if last_message_diff > KEEP_ALIVE_SEND_INTERVAL { + connection.send_message(Message::Cluster(ClusterMessage::KeepAlive( + message::KeepAlive {}, + ))); + } + } } /// Connect disconnected nodes. fn net_connect_disconnected(data: Arc) { - let disconnected_nodes = data.disconnected_nodes(); - for (node_id, address) in disconnected_nodes { - if data.allow_connecting_to_higher_nodes || *data.self_key_pair.public() < node_id { - net_connect(data.clone(), address); - } - } + let disconnected_nodes = data.disconnected_nodes(); + for (node_id, address) in disconnected_nodes { + if data.allow_connecting_to_higher_nodes || *data.self_key_pair.public() < node_id { + net_connect(data.clone(), address); + } + } } /// Schedule future execution. fn execute + Send + 'static>(executor: &Executor, f: F) { - if let Err(err) = future::Executor::execute(executor, Box::new(f)) { - error!("Secret store runtime unable to spawn task. Runtime is shutting down. ({:?})", err); - } + if let Err(err) = future::Executor::execute(executor, Box::new(f)) { + error!( + "Secret store runtime unable to spawn task. Runtime is shutting down. ({:?})", + err + ); + } } /// Try to update active nodes set from connection trigger. fn update_nodes_set(data: Arc) { - let maintain_action = data.trigger.lock().on_maintain(); - maintain_connection_trigger(data, maintain_action); + let maintain_action = data.trigger.lock().on_maintain(); + maintain_connection_trigger(data, maintain_action); } /// Execute maintain procedures of connections trigger. fn maintain_connection_trigger(data: Arc, maintain_action: Option) { - if maintain_action == Some(Maintain::SessionAndConnections) || maintain_action == Some(Maintain::Session) { - let session_params = data.trigger.lock().maintain_session(); - if let Some(session_params) = session_params { - let session = data.message_processor.start_servers_set_change_session(session_params); - match session { - Ok(_) => trace!(target: "secretstore_net", "{}: started auto-migrate session", + if maintain_action == Some(Maintain::SessionAndConnections) + || maintain_action == Some(Maintain::Session) + { + let session_params = data.trigger.lock().maintain_session(); + if let Some(session_params) = session_params { + let session = data + .message_processor + .start_servers_set_change_session(session_params); + match session { + Ok(_) => trace!(target: "secretstore_net", "{}: started auto-migrate session", data.self_key_pair.public()), - Err(err) => trace!(target: "secretstore_net", "{}: failed to start auto-migrate session with: {}", - data.self_key_pair.public(), err), - } - } - } - if maintain_action == Some(Maintain::SessionAndConnections) || maintain_action == Some(Maintain::Connections) { - let mut trigger = data.trigger.lock(); - let mut data = data.container.write(); - trigger.maintain_connections(&mut *data); - } + Err(err) => { + trace!(target: "secretstore_net", "{}: failed to start auto-migrate session with: {}", + data.self_key_pair.public(), err) + } + } + } + } + if maintain_action == Some(Maintain::SessionAndConnections) + || maintain_action == Some(Maintain::Connections) + { + let mut trigger = data.trigger.lock(); + let mut data = data.container.write(); + trigger.maintain_connections(&mut *data); + } } /// Compose SocketAddr from configuration' address and port. fn make_socket_address(address: &str, port: u16) -> Result { - let ip_address: IpAddr = address.parse().map_err(|_| Error::InvalidNodeAddress)?; - Ok(SocketAddr::new(ip_address, port)) + let ip_address: IpAddr = address.parse().map_err(|_| Error::InvalidNodeAddress)?; + Ok(SocketAddr::new(ip_address, port)) } diff --git a/secret-store/src/key_server_cluster/cluster_message_processor.rs b/secret-store/src/key_server_cluster/cluster_message_processor.rs index b4ba5ef03..eefadbde6 100644 --- a/secret-store/src/key_server_cluster/cluster_message_processor.rs +++ b/secret-store/src/key_server_cluster/cluster_message_processor.rs @@ -14,127 +14,139 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use key_server_cluster::{ + cluster::{new_servers_set_change_session, ServersSetChangeParams}, + cluster_connections::{Connection, ConnectionProvider}, + cluster_sessions::{ + create_cluster_view, AdminSession, ClusterSession, ClusterSessions, + ClusterSessionsContainer, + }, + cluster_sessions_creator::{ClusterSessionCreator, IntoSessionId}, + connection_trigger::ServersSetChangeSessionCreatorConnector, + key_version_negotiation_session::{ + ContinueAction, IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, + SessionImpl as KeyVersionNegotiationSession, + }, + message::{self, ClusterMessage, Message}, + Error, NodeId, NodeKeyPair, +}; use std::sync::Arc; -use key_server_cluster::{Error, NodeId, NodeKeyPair}; -use key_server_cluster::cluster::{ServersSetChangeParams, new_servers_set_change_session}; -use key_server_cluster::cluster_sessions::{AdminSession}; -use key_server_cluster::cluster_connections::{ConnectionProvider, Connection}; -use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, ClusterSessionsContainer, - create_cluster_view}; -use key_server_cluster::cluster_sessions_creator::{ClusterSessionCreator, IntoSessionId}; -use key_server_cluster::message::{self, Message, ClusterMessage}; -use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession, - IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, ContinueAction}; -use key_server_cluster::connection_trigger::ServersSetChangeSessionCreatorConnector; /// Something that is able to process signals/messages from other nodes. pub trait MessageProcessor: Send + Sync { - /// Process disconnect from the remote node. - fn process_disconnect(&self, node: &NodeId); - /// Process single message from the connection. - fn process_connection_message(&self, connection: Arc, message: Message); + /// Process disconnect from the remote node. + fn process_disconnect(&self, node: &NodeId); + /// Process single message from the connection. + fn process_connection_message(&self, connection: Arc, message: Message); - /// Start servers set change session. This is typically used by ConnectionManager when - /// it detects that auto-migration session needs to be started. - fn start_servers_set_change_session(&self, params: ServersSetChangeParams) -> Result, Error>; - /// Try to continue session after key version negotiation session is completed. - fn try_continue_session( - &self, - session: Option>> - ); - /// Maintain active sessions. Typically called by the ConnectionManager at some intervals. - /// Should cancel stalled sessions and send keep-alive messages for sessions that support it. - fn maintain_sessions(&self); + /// Start servers set change session. This is typically used by ConnectionManager when + /// it detects that auto-migration session needs to be started. + fn start_servers_set_change_session( + &self, + params: ServersSetChangeParams, + ) -> Result, Error>; + /// Try to continue session after key version negotiation session is completed. + fn try_continue_session( + &self, + session: Option>>, + ); + /// Maintain active sessions. Typically called by the ConnectionManager at some intervals. + /// Should cancel stalled sessions and send keep-alive messages for sessions that support it. + fn maintain_sessions(&self); } /// Bridge between ConnectionManager and ClusterSessions. pub struct SessionsMessageProcessor { - self_key_pair: Arc, - servers_set_change_creator_connector: Arc, - sessions: Arc, - connections: Arc, + self_key_pair: Arc, + servers_set_change_creator_connector: Arc, + sessions: Arc, + connections: Arc, } impl SessionsMessageProcessor { - /// Create new instance of SessionsMessageProcessor. - pub fn new( - self_key_pair: Arc, - servers_set_change_creator_connector: Arc, - sessions: Arc, - connections: Arc, - ) -> Self { - SessionsMessageProcessor { - self_key_pair, - servers_set_change_creator_connector, - sessions, - connections, - } - } + /// Create new instance of SessionsMessageProcessor. + pub fn new( + self_key_pair: Arc, + servers_set_change_creator_connector: Arc, + sessions: Arc, + connections: Arc, + ) -> Self { + SessionsMessageProcessor { + self_key_pair, + servers_set_change_creator_connector, + sessions, + connections, + } + } - /// Process single session message from connection. - fn process_message, D>( - &self, - sessions: &ClusterSessionsContainer, - connection: Arc, - mut message: Message, - ) -> Option> - where - Message: IntoSessionId - { - // get or create new session, if required - let mut sender = *connection.node_id(); - let session = self.prepare_session(sessions, &sender, &message); - // send error if session is not found, or failed to create - let session = match session { - Ok(session) => session, - Err(error) => { - // this is new session => it is not yet in container - warn!(target: "secretstore_net", + /// Process single session message from connection. + fn process_message, D>( + &self, + sessions: &ClusterSessionsContainer, + connection: Arc, + mut message: Message, + ) -> Option> + where + Message: IntoSessionId, + { + // get or create new session, if required + let mut sender = *connection.node_id(); + let session = self.prepare_session(sessions, &sender, &message); + // send error if session is not found, or failed to create + let session = match session { + Ok(session) => session, + Err(error) => { + // this is new session => it is not yet in container + warn!(target: "secretstore_net", "{}: {} session read error '{}' when requested for session from node {}", self.self_key_pair.public(), S::type_name(), error, sender); - if !message.is_error_message() { - let qed = "session_id only fails for cluster messages; + if !message.is_error_message() { + let qed = "session_id only fails for cluster messages; only session messages are passed to process_message; qed"; - let session_id = message.into_session_id().expect(qed); - let session_nonce = message.session_nonce().expect(qed); + let session_id = message.into_session_id().expect(qed); + let session_nonce = message.session_nonce().expect(qed); - connection.send_message(SC::make_error_message(session_id, session_nonce, error)); - } - return None; - }, - }; + connection.send_message(SC::make_error_message( + session_id, + session_nonce, + error, + )); + } + return None; + } + }; - let session_id = session.id(); - let mut is_queued_message = false; - loop { - let message_result = session.on_message(&sender, &message); - match message_result { - Ok(_) => { - // if session is completed => stop - if session.is_finished() { - info!(target: "secretstore_net", + let session_id = session.id(); + let mut is_queued_message = false; + loop { + let message_result = session.on_message(&sender, &message); + match message_result { + Ok(_) => { + // if session is completed => stop + if session.is_finished() { + info!(target: "secretstore_net", "{}: {} session completed", self.self_key_pair.public(), S::type_name()); - sessions.remove(&session_id); - return Some(session); - } + sessions.remove(&session_id); + return Some(session); + } - // try to dequeue message - match sessions.dequeue_message(&session_id) { - Some((msg_sender, msg)) => { - is_queued_message = true; - sender = msg_sender; - message = msg; - }, - None => return Some(session), - } - }, - Err(Error::TooEarlyForRequest) => { - sessions.enqueue_message(&session_id, sender, message, is_queued_message); - return Some(session); - }, - Err(err) => { - warn!( + // try to dequeue message + match sessions.dequeue_message(&session_id) { + Some((msg_sender, msg)) => { + is_queued_message = true; + sender = msg_sender; + message = msg; + } + None => return Some(session), + } + } + Err(Error::TooEarlyForRequest) => { + sessions.enqueue_message(&session_id, sender, message, is_queued_message); + return Some(session); + } + Err(err) => { + warn!( target: "secretstore_net", "{}: {} session error '{}' when processing message {} from node {}", self.self_key_pair.public(), @@ -142,216 +154,276 @@ impl SessionsMessageProcessor { err, message, sender); - session.on_session_error(self.self_key_pair.public(), err); - sessions.remove(&session_id); - return Some(session); - }, - } - } - } + session.on_session_error(self.self_key_pair.public(), err); + sessions.remove(&session_id); + return Some(session); + } + } + } + } - /// Get or insert new session. - fn prepare_session, D>( - &self, - sessions: &ClusterSessionsContainer, - sender: &NodeId, - message: &Message - ) -> Result, Error> - where - Message: IntoSessionId - { - fn requires_all_connections(message: &Message) -> bool { - match *message { - Message::Generation(_) => true, - Message::ShareAdd(_) => true, - Message::ServersSetChange(_) => true, - _ => false, - } - } + /// Get or insert new session. + fn prepare_session, D>( + &self, + sessions: &ClusterSessionsContainer, + sender: &NodeId, + message: &Message, + ) -> Result, Error> + where + Message: IntoSessionId, + { + fn requires_all_connections(message: &Message) -> bool { + match *message { + Message::Generation(_) => true, + Message::ShareAdd(_) => true, + Message::ServersSetChange(_) => true, + _ => false, + } + } - // get or create new session, if required - let session_id = message.into_session_id() - .expect("into_session_id fails for cluster messages only; + // get or create new session, if required + let session_id = message.into_session_id().expect( + "into_session_id fails for cluster messages only; only session messages are passed to prepare_session; - qed"); - let is_initialization_message = message.is_initialization_message(); - let is_delegation_message = message.is_delegation_message(); - match is_initialization_message || is_delegation_message { - false => sessions.get(&session_id, true).ok_or(Error::NoActiveSessionWithId), - true => { - let creation_data = SC::creation_data_from_message(&message)?; - let master = if is_initialization_message { - *sender - } else { - *self.self_key_pair.public() - }; - let cluster = create_cluster_view( - self.self_key_pair.clone(), - self.connections.clone(), - requires_all_connections(&message))?; + qed", + ); + let is_initialization_message = message.is_initialization_message(); + let is_delegation_message = message.is_delegation_message(); + match is_initialization_message || is_delegation_message { + false => sessions + .get(&session_id, true) + .ok_or(Error::NoActiveSessionWithId), + true => { + let creation_data = SC::creation_data_from_message(&message)?; + let master = if is_initialization_message { + *sender + } else { + *self.self_key_pair.public() + }; + let cluster = create_cluster_view( + self.self_key_pair.clone(), + self.connections.clone(), + requires_all_connections(&message), + )?; - let nonce = Some(message.session_nonce().ok_or(Error::InvalidMessage)?); - let exclusive = message.is_exclusive_session_message(); - sessions.insert(cluster, master, session_id, nonce, exclusive, creation_data) - }, - } - } + let nonce = Some(message.session_nonce().ok_or(Error::InvalidMessage)?); + let exclusive = message.is_exclusive_session_message(); + sessions.insert(cluster, master, session_id, nonce, exclusive, creation_data) + } + } + } - /// Process single cluster message from the connection. - fn process_cluster_message(&self, connection: Arc, message: ClusterMessage) { - match message { - ClusterMessage::KeepAlive(_) => { - let msg = Message::Cluster(ClusterMessage::KeepAliveResponse(message::KeepAliveResponse { - session_id: None, - })); - connection.send_message(msg) - }, - ClusterMessage::KeepAliveResponse(msg) => if let Some(session_id) = msg.session_id { - self.sessions.on_session_keep_alive(connection.node_id(), session_id.into()); - }, - _ => warn!(target: "secretstore_net", "{}: received unexpected message {} from node {} at {}", - self.self_key_pair.public(), message, connection.node_id(), connection.node_address()), - } - } + /// Process single cluster message from the connection. + fn process_cluster_message(&self, connection: Arc, message: ClusterMessage) { + match message { + ClusterMessage::KeepAlive(_) => { + let msg = Message::Cluster(ClusterMessage::KeepAliveResponse( + message::KeepAliveResponse { session_id: None }, + )); + connection.send_message(msg) + } + ClusterMessage::KeepAliveResponse(msg) => { + if let Some(session_id) = msg.session_id { + self.sessions + .on_session_keep_alive(connection.node_id(), session_id.into()); + } + } + _ => { + warn!(target: "secretstore_net", "{}: received unexpected message {} from node {} at {}", + self.self_key_pair.public(), message, connection.node_id(), connection.node_address()) + } + } + } } impl MessageProcessor for SessionsMessageProcessor { - fn process_disconnect(&self, node: &NodeId) { - self.sessions.on_connection_timeout(node); - } + fn process_disconnect(&self, node: &NodeId) { + self.sessions.on_connection_timeout(node); + } - fn process_connection_message(&self, connection: Arc, message: Message) { - trace!(target: "secretstore_net", "{}: received message {} from {}", + fn process_connection_message(&self, connection: Arc, message: Message) { + trace!(target: "secretstore_net", "{}: received message {} from {}", self.self_key_pair.public(), message, connection.node_id()); - // error is ignored as we only process errors on session level - match message { - Message::Generation(message) => self - .process_message(&self.sessions.generation_sessions, connection, Message::Generation(message)) - .map(|_| ()).unwrap_or_default(), - Message::Encryption(message) => self - .process_message(&self.sessions.encryption_sessions, connection, Message::Encryption(message)) - .map(|_| ()).unwrap_or_default(), - Message::Decryption(message) => self - .process_message(&self.sessions.decryption_sessions, connection, Message::Decryption(message)) - .map(|_| ()).unwrap_or_default(), - Message::SchnorrSigning(message) => self - .process_message(&self.sessions.schnorr_signing_sessions, connection, Message::SchnorrSigning(message)) - .map(|_| ()).unwrap_or_default(), - Message::EcdsaSigning(message) => self - .process_message(&self.sessions.ecdsa_signing_sessions, connection, Message::EcdsaSigning(message)) - .map(|_| ()).unwrap_or_default(), - Message::ServersSetChange(message) => { - let message = Message::ServersSetChange(message); - let is_initialization_message = message.is_initialization_message(); - let session = self.process_message(&self.sessions.admin_sessions, connection, message); - if is_initialization_message { - if let Some(session) = session { - self.servers_set_change_creator_connector - .set_key_servers_set_change_session(session.clone()); - } - } - }, - Message::KeyVersionNegotiation(message) => { - let session = self.process_message( - &self.sessions.negotiation_sessions, connection, Message::KeyVersionNegotiation(message)); - self.try_continue_session(session); - }, - Message::ShareAdd(message) => self.process_message( - &self.sessions.admin_sessions, connection, Message::ShareAdd(message)) - .map(|_| ()).unwrap_or_default(), - Message::Cluster(message) => self.process_cluster_message(connection, message), - } - } + // error is ignored as we only process errors on session level + match message { + Message::Generation(message) => self + .process_message( + &self.sessions.generation_sessions, + connection, + Message::Generation(message), + ) + .map(|_| ()) + .unwrap_or_default(), + Message::Encryption(message) => self + .process_message( + &self.sessions.encryption_sessions, + connection, + Message::Encryption(message), + ) + .map(|_| ()) + .unwrap_or_default(), + Message::Decryption(message) => self + .process_message( + &self.sessions.decryption_sessions, + connection, + Message::Decryption(message), + ) + .map(|_| ()) + .unwrap_or_default(), + Message::SchnorrSigning(message) => self + .process_message( + &self.sessions.schnorr_signing_sessions, + connection, + Message::SchnorrSigning(message), + ) + .map(|_| ()) + .unwrap_or_default(), + Message::EcdsaSigning(message) => self + .process_message( + &self.sessions.ecdsa_signing_sessions, + connection, + Message::EcdsaSigning(message), + ) + .map(|_| ()) + .unwrap_or_default(), + Message::ServersSetChange(message) => { + let message = Message::ServersSetChange(message); + let is_initialization_message = message.is_initialization_message(); + let session = + self.process_message(&self.sessions.admin_sessions, connection, message); + if is_initialization_message { + if let Some(session) = session { + self.servers_set_change_creator_connector + .set_key_servers_set_change_session(session.clone()); + } + } + } + Message::KeyVersionNegotiation(message) => { + let session = self.process_message( + &self.sessions.negotiation_sessions, + connection, + Message::KeyVersionNegotiation(message), + ); + self.try_continue_session(session); + } + Message::ShareAdd(message) => self + .process_message( + &self.sessions.admin_sessions, + connection, + Message::ShareAdd(message), + ) + .map(|_| ()) + .unwrap_or_default(), + Message::Cluster(message) => self.process_cluster_message(connection, message), + } + } - fn try_continue_session( - &self, - session: Option>> - ) { - if let Some(session) = session { - let meta = session.meta(); - let is_master_node = meta.self_node_id == meta.master_node_id; - if is_master_node && session.is_finished() { - self.sessions.negotiation_sessions.remove(&session.id()); - match session.wait() { - Ok(Some((version, master))) => match session.take_continue_action() { - Some(ContinueAction::Decrypt( - session, origin, is_shadow_decryption, is_broadcast_decryption - )) => { - let initialization_error = if self.self_key_pair.public() == &master { - session.initialize( - origin, version, is_shadow_decryption, is_broadcast_decryption) - } else { - session.delegate( - master, origin, version, is_shadow_decryption, is_broadcast_decryption) - }; + fn try_continue_session( + &self, + session: Option>>, + ) { + if let Some(session) = session { + let meta = session.meta(); + let is_master_node = meta.self_node_id == meta.master_node_id; + if is_master_node && session.is_finished() { + self.sessions.negotiation_sessions.remove(&session.id()); + match session.wait() { + Ok(Some((version, master))) => match session.take_continue_action() { + Some(ContinueAction::Decrypt( + session, + origin, + is_shadow_decryption, + is_broadcast_decryption, + )) => { + let initialization_error = if self.self_key_pair.public() == &master { + session.initialize( + origin, + version, + is_shadow_decryption, + is_broadcast_decryption, + ) + } else { + session.delegate( + master, + origin, + version, + is_shadow_decryption, + is_broadcast_decryption, + ) + }; - if let Err(error) = initialization_error { - session.on_session_error(&meta.self_node_id, error); - self.sessions.decryption_sessions.remove(&session.id()); - } - }, - Some(ContinueAction::SchnorrSign(session, message_hash)) => { - let initialization_error = if self.self_key_pair.public() == &master { - session.initialize(version, message_hash) - } else { - session.delegate(master, version, message_hash) - }; + if let Err(error) = initialization_error { + session.on_session_error(&meta.self_node_id, error); + self.sessions.decryption_sessions.remove(&session.id()); + } + } + Some(ContinueAction::SchnorrSign(session, message_hash)) => { + let initialization_error = if self.self_key_pair.public() == &master { + session.initialize(version, message_hash) + } else { + session.delegate(master, version, message_hash) + }; - if let Err(error) = initialization_error { - session.on_session_error(&meta.self_node_id, error); - self.sessions.schnorr_signing_sessions.remove(&session.id()); - } - }, - Some(ContinueAction::EcdsaSign(session, message_hash)) => { - let initialization_error = if self.self_key_pair.public() == &master { - session.initialize(version, message_hash) - } else { - session.delegate(master, version, message_hash) - }; + if let Err(error) = initialization_error { + session.on_session_error(&meta.self_node_id, error); + self.sessions.schnorr_signing_sessions.remove(&session.id()); + } + } + Some(ContinueAction::EcdsaSign(session, message_hash)) => { + let initialization_error = if self.self_key_pair.public() == &master { + session.initialize(version, message_hash) + } else { + session.delegate(master, version, message_hash) + }; - if let Err(error) = initialization_error { - session.on_session_error(&meta.self_node_id, error); - self.sessions.ecdsa_signing_sessions.remove(&session.id()); - } - }, - None => (), - }, - Ok(None) => unreachable!("is_master_node; session is finished; + if let Err(error) = initialization_error { + session.on_session_error(&meta.self_node_id, error); + self.sessions.ecdsa_signing_sessions.remove(&session.id()); + } + } + None => (), + }, + Ok(None) => unreachable!( + "is_master_node; session is finished; negotiation version always finished with result on master; - qed"), - Err(error) => match session.take_continue_action() { - Some(ContinueAction::Decrypt(session, _, _, _)) => { - session.on_session_error(&meta.self_node_id, error); - self.sessions.decryption_sessions.remove(&session.id()); - }, - Some(ContinueAction::SchnorrSign(session, _)) => { - session.on_session_error(&meta.self_node_id, error); - self.sessions.schnorr_signing_sessions.remove(&session.id()); - }, - Some(ContinueAction::EcdsaSign(session, _)) => { - session.on_session_error(&meta.self_node_id, error); - self.sessions.ecdsa_signing_sessions.remove(&session.id()); - }, - None => (), - }, - } - } - } - } + qed" + ), + Err(error) => match session.take_continue_action() { + Some(ContinueAction::Decrypt(session, _, _, _)) => { + session.on_session_error(&meta.self_node_id, error); + self.sessions.decryption_sessions.remove(&session.id()); + } + Some(ContinueAction::SchnorrSign(session, _)) => { + session.on_session_error(&meta.self_node_id, error); + self.sessions.schnorr_signing_sessions.remove(&session.id()); + } + Some(ContinueAction::EcdsaSign(session, _)) => { + session.on_session_error(&meta.self_node_id, error); + self.sessions.ecdsa_signing_sessions.remove(&session.id()); + } + None => (), + }, + } + } + } + } - fn maintain_sessions(&self) { - self.sessions.stop_stalled_sessions(); - self.sessions.sessions_keep_alive(); - } + fn maintain_sessions(&self) { + self.sessions.stop_stalled_sessions(); + self.sessions.sessions_keep_alive(); + } - fn start_servers_set_change_session(&self, params: ServersSetChangeParams) -> Result, Error> { - new_servers_set_change_session( - self.self_key_pair.clone(), - &*self.sessions, - self.connections.clone(), - self.servers_set_change_creator_connector.clone(), - params, - ) - } + fn start_servers_set_change_session( + &self, + params: ServersSetChangeParams, + ) -> Result, Error> { + new_servers_set_change_session( + self.self_key_pair.clone(), + &*self.sessions, + self.connections.clone(), + self.servers_set_change_creator_connector.clone(), + params, + ) + } } diff --git a/secret-store/src/key_server_cluster/cluster_sessions.rs b/secret-store/src/key_server_cluster/cluster_sessions.rs index 53eec1334..19979cdb2 100644 --- a/secret-store/src/key_server_cluster/cluster_sessions.rs +++ b/secret-store/src/key_server_cluster/cluster_sessions.rs @@ -14,31 +14,40 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::time::{Duration, Instant}; -use std::sync::{Arc, Weak}; -use std::sync::atomic::AtomicBool; -use std::collections::{VecDeque, BTreeMap, BTreeSet}; -use parking_lot::{Mutex, RwLock, Condvar}; use ethereum_types::H256; use ethkey::Secret; -use key_server_cluster::{Error, NodeId, SessionId, Requester, NodeKeyPair}; -use key_server_cluster::cluster::{Cluster, ClusterConfiguration, ClusterView}; -use key_server_cluster::cluster_connections::ConnectionProvider; -use key_server_cluster::connection_trigger::ServersSetChangeSessionCreatorConnector; -use key_server_cluster::message::{self, Message}; -use key_server_cluster::generation_session::{SessionImpl as GenerationSessionImpl}; -use key_server_cluster::decryption_session::{SessionImpl as DecryptionSessionImpl}; -use key_server_cluster::encryption_session::{SessionImpl as EncryptionSessionImpl}; -use key_server_cluster::signing_session_ecdsa::{SessionImpl as EcdsaSigningSessionImpl}; -use key_server_cluster::signing_session_schnorr::{SessionImpl as SchnorrSigningSessionImpl}; -use key_server_cluster::share_add_session::{SessionImpl as ShareAddSessionImpl, IsolatedSessionTransport as ShareAddTransport}; -use key_server_cluster::servers_set_change_session::{SessionImpl as ServersSetChangeSessionImpl}; -use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSessionImpl, - IsolatedSessionTransport as VersionNegotiationTransport}; +use key_server_cluster::{ + cluster::{Cluster, ClusterConfiguration, ClusterView}, + cluster_connections::ConnectionProvider, + connection_trigger::ServersSetChangeSessionCreatorConnector, + decryption_session::SessionImpl as DecryptionSessionImpl, + encryption_session::SessionImpl as EncryptionSessionImpl, + generation_session::SessionImpl as GenerationSessionImpl, + key_version_negotiation_session::{ + IsolatedSessionTransport as VersionNegotiationTransport, + SessionImpl as KeyVersionNegotiationSessionImpl, + }, + message::{self, Message}, + servers_set_change_session::SessionImpl as ServersSetChangeSessionImpl, + share_add_session::{ + IsolatedSessionTransport as ShareAddTransport, SessionImpl as ShareAddSessionImpl, + }, + signing_session_ecdsa::SessionImpl as EcdsaSigningSessionImpl, + signing_session_schnorr::SessionImpl as SchnorrSigningSessionImpl, + Error, NodeId, NodeKeyPair, Requester, SessionId, +}; +use parking_lot::{Condvar, Mutex, RwLock}; +use std::{ + collections::{BTreeMap, BTreeSet, VecDeque}, + sync::{atomic::AtomicBool, Arc, Weak}, + time::{Duration, Instant}, +}; -use key_server_cluster::cluster_sessions_creator::{GenerationSessionCreator, EncryptionSessionCreator, DecryptionSessionCreator, - SchnorrSigningSessionCreator, KeyVersionNegotiationSessionCreator, AdminSessionCreator, SessionCreatorCore, - EcdsaSigningSessionCreator, ClusterSessionCreator}; +use key_server_cluster::cluster_sessions_creator::{ + AdminSessionCreator, ClusterSessionCreator, DecryptionSessionCreator, + EcdsaSigningSessionCreator, EncryptionSessionCreator, GenerationSessionCreator, + KeyVersionNegotiationSessionCreator, SchnorrSigningSessionCreator, SessionCreatorCore, +}; /// When there are no session-related messages for SESSION_TIMEOUT_INTERVAL seconds, /// we must treat this session as stalled && finish it with an error. @@ -49,427 +58,524 @@ const SESSION_TIMEOUT_INTERVAL: Duration = Duration::from_secs(60); const SESSION_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(30); lazy_static! { - /// Servers set change session id (there could be at most 1 session => hardcoded id). - pub static ref SERVERS_SET_CHANGE_SESSION_ID: SessionId = "10b7af423bb551d5dc8645db754163a2145d37d78d468fa7330435ed77064c1c" - .parse() - .expect("hardcoded id should parse without errors; qed"); + /// Servers set change session id (there could be at most 1 session => hardcoded id). + pub static ref SERVERS_SET_CHANGE_SESSION_ID: SessionId = "10b7af423bb551d5dc8645db754163a2145d37d78d468fa7330435ed77064c1c" + .parse() + .expect("hardcoded id should parse without errors; qed"); } /// Session id with sub session. #[derive(Debug, Clone, PartialEq, Eq)] pub struct SessionIdWithSubSession { - /// Key id. - pub id: SessionId, - /// Sub session id. - pub access_key: Secret, + /// Key id. + pub id: SessionId, + /// Sub session id. + pub access_key: Secret, } /// Generic cluster session. pub trait ClusterSession { - /// Session identifier type. - type Id: ::std::fmt::Debug + Ord + Clone; + /// Session identifier type. + type Id: ::std::fmt::Debug + Ord + Clone; - /// Session type name. - fn type_name() -> &'static str; - /// Get session id. - fn id(&self) -> Self::Id; - /// If session is finished (either with succcess or not). - fn is_finished(&self) -> bool; - /// When it takes too much time to complete session. - fn on_session_timeout(&self); - /// When it takes too much time to receive response from the node. - fn on_node_timeout(&self, node_id: &NodeId); - /// Process error that has occured during session + propagate this error to required nodes. - fn on_session_error(&self, sender: &NodeId, error: Error); - /// Process session message. - fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error>; + /// Session type name. + fn type_name() -> &'static str; + /// Get session id. + fn id(&self) -> Self::Id; + /// If session is finished (either with succcess or not). + fn is_finished(&self) -> bool; + /// When it takes too much time to complete session. + fn on_session_timeout(&self); + /// When it takes too much time to receive response from the node. + fn on_node_timeout(&self, node_id: &NodeId); + /// Process error that has occured during session + propagate this error to required nodes. + fn on_session_error(&self, sender: &NodeId, error: Error); + /// Process session message. + fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error>; - /// 'Wait for session completion' helper. - fn wait_session Option>>(completion_event: &Condvar, session_data: &Mutex, timeout: Option, result_reader: F) -> Option> { - let mut locked_data = session_data.lock(); - match result_reader(&locked_data) { - Some(result) => Some(result), - None => { - match timeout { - None => completion_event.wait(&mut locked_data), - Some(timeout) => { - completion_event.wait_for(&mut locked_data, timeout); - }, - } + /// 'Wait for session completion' helper. + fn wait_session Option>>( + completion_event: &Condvar, + session_data: &Mutex, + timeout: Option, + result_reader: F, + ) -> Option> { + let mut locked_data = session_data.lock(); + match result_reader(&locked_data) { + Some(result) => Some(result), + None => { + match timeout { + None => completion_event.wait(&mut locked_data), + Some(timeout) => { + completion_event.wait_for(&mut locked_data, timeout); + } + } - result_reader(&locked_data) - }, - } - } + result_reader(&locked_data) + } + } + } } /// Administrative session. pub enum AdminSession { - /// Share add session. - ShareAdd(ShareAddSessionImpl), - /// Servers set change session. - ServersSetChange(ServersSetChangeSessionImpl), + /// Share add session. + ShareAdd(ShareAddSessionImpl), + /// Servers set change session. + ServersSetChange(ServersSetChangeSessionImpl), } /// Administrative session creation data. pub enum AdminSessionCreationData { - /// Share add session (key id). - ShareAdd(H256), - /// Servers set change session (block id, new_server_set). - ServersSetChange(Option, BTreeSet), + /// Share add session (key id). + ShareAdd(H256), + /// Servers set change session (block id, new_server_set). + ServersSetChange(Option, BTreeSet), } /// Active sessions on this cluster. pub struct ClusterSessions { - /// Key generation sessions. - pub generation_sessions: ClusterSessionsContainer, - /// Encryption sessions. - pub encryption_sessions: ClusterSessionsContainer, - /// Decryption sessions. - pub decryption_sessions: ClusterSessionsContainer, - /// Schnorr signing sessions. - pub schnorr_signing_sessions: ClusterSessionsContainer, - /// ECDSA signing sessions. - pub ecdsa_signing_sessions: ClusterSessionsContainer, - /// Key version negotiation sessions. - pub negotiation_sessions: ClusterSessionsContainer, KeyVersionNegotiationSessionCreator, ()>, - /// Administrative sessions. - pub admin_sessions: ClusterSessionsContainer, - /// Self node id. - self_node_id: NodeId, - /// Creator core. - creator_core: Arc, + /// Key generation sessions. + pub generation_sessions: + ClusterSessionsContainer, + /// Encryption sessions. + pub encryption_sessions: + ClusterSessionsContainer, + /// Decryption sessions. + pub decryption_sessions: + ClusterSessionsContainer, + /// Schnorr signing sessions. + pub schnorr_signing_sessions: ClusterSessionsContainer< + SchnorrSigningSessionImpl, + SchnorrSigningSessionCreator, + Requester, + >, + /// ECDSA signing sessions. + pub ecdsa_signing_sessions: + ClusterSessionsContainer, + /// Key version negotiation sessions. + pub negotiation_sessions: ClusterSessionsContainer< + KeyVersionNegotiationSessionImpl, + KeyVersionNegotiationSessionCreator, + (), + >, + /// Administrative sessions. + pub admin_sessions: + ClusterSessionsContainer, + /// Self node id. + self_node_id: NodeId, + /// Creator core. + creator_core: Arc, } /// Active sessions container listener. pub trait ClusterSessionsListener: Send + Sync { - /// When new session is inserted to the container. - fn on_session_inserted(&self, _session: Arc) {} - /// When session is removed from the container. - fn on_session_removed(&self, _session: Arc) {} + /// When new session is inserted to the container. + fn on_session_inserted(&self, _session: Arc) {} + /// When session is removed from the container. + fn on_session_removed(&self, _session: Arc) {} } /// Active sessions container. pub struct ClusterSessionsContainer, D> { - /// Sessions creator. - pub creator: SC, - /// Active sessions. - sessions: RwLock>>, - /// Listeners. Lock order: sessions -> listeners. - listeners: Mutex>>>, - /// Sessions container state. - container_state: Arc>, - /// Do not actually remove sessions. - preserve_sessions: bool, - /// Phantom data. - _pd: ::std::marker::PhantomData, + /// Sessions creator. + pub creator: SC, + /// Active sessions. + sessions: RwLock>>, + /// Listeners. Lock order: sessions -> listeners. + listeners: Mutex>>>, + /// Sessions container state. + container_state: Arc>, + /// Do not actually remove sessions. + preserve_sessions: bool, + /// Phantom data. + _pd: ::std::marker::PhantomData, } /// Session and its message queue. pub struct QueuedSession { - /// Session master. - pub master: NodeId, - /// Cluster view. - pub cluster_view: Arc, - /// Last keep alive time. - pub last_keep_alive_time: Instant, - /// Last received message time. - pub last_message_time: Instant, - /// Generation session. - pub session: Arc, - /// Messages queue. - pub queue: VecDeque<(NodeId, Message)>, + /// Session master. + pub master: NodeId, + /// Cluster view. + pub cluster_view: Arc, + /// Last keep alive time. + pub last_keep_alive_time: Instant, + /// Last received message time. + pub last_message_time: Instant, + /// Generation session. + pub session: Arc, + /// Messages queue. + pub queue: VecDeque<(NodeId, Message)>, } /// Cluster sessions container state. #[derive(Debug, Clone, Copy, PartialEq)] pub enum ClusterSessionsContainerState { - /// There's no active sessions => any session can be started. - Idle, - /// There are active sessions => exclusive session can't be started right now. - Active(usize), - /// Exclusive session is active => can't start any other sessions. - Exclusive, + /// There's no active sessions => any session can be started. + Idle, + /// There are active sessions => exclusive session can't be started right now. + Active(usize), + /// Exclusive session is active => can't start any other sessions. + Exclusive, } impl ClusterSessions { - /// Create new cluster sessions container. - pub fn new(config: &ClusterConfiguration, servers_set_change_session_creator_connector: Arc) -> Self { - let container_state = Arc::new(Mutex::new(ClusterSessionsContainerState::Idle)); - let creator_core = Arc::new(SessionCreatorCore::new(config)); - ClusterSessions { - self_node_id: config.self_key_pair.public().clone(), - generation_sessions: ClusterSessionsContainer::new(GenerationSessionCreator { - core: creator_core.clone(), - make_faulty_generation_sessions: AtomicBool::new(false), - }, container_state.clone()), - encryption_sessions: ClusterSessionsContainer::new(EncryptionSessionCreator { - core: creator_core.clone(), - }, container_state.clone()), - decryption_sessions: ClusterSessionsContainer::new(DecryptionSessionCreator { - core: creator_core.clone(), - }, container_state.clone()), - schnorr_signing_sessions: ClusterSessionsContainer::new(SchnorrSigningSessionCreator { - core: creator_core.clone(), - }, container_state.clone()), - ecdsa_signing_sessions: ClusterSessionsContainer::new(EcdsaSigningSessionCreator { - core: creator_core.clone(), - }, container_state.clone()), - negotiation_sessions: ClusterSessionsContainer::new(KeyVersionNegotiationSessionCreator { - core: creator_core.clone(), - }, container_state.clone()), - admin_sessions: ClusterSessionsContainer::new(AdminSessionCreator { - core: creator_core.clone(), - servers_set_change_session_creator_connector: servers_set_change_session_creator_connector, - admin_public: config.admin_public.clone(), - }, container_state), - creator_core: creator_core, - } - } + /// Create new cluster sessions container. + pub fn new( + config: &ClusterConfiguration, + servers_set_change_session_creator_connector: Arc, + ) -> Self { + let container_state = Arc::new(Mutex::new(ClusterSessionsContainerState::Idle)); + let creator_core = Arc::new(SessionCreatorCore::new(config)); + ClusterSessions { + self_node_id: config.self_key_pair.public().clone(), + generation_sessions: ClusterSessionsContainer::new( + GenerationSessionCreator { + core: creator_core.clone(), + make_faulty_generation_sessions: AtomicBool::new(false), + }, + container_state.clone(), + ), + encryption_sessions: ClusterSessionsContainer::new( + EncryptionSessionCreator { + core: creator_core.clone(), + }, + container_state.clone(), + ), + decryption_sessions: ClusterSessionsContainer::new( + DecryptionSessionCreator { + core: creator_core.clone(), + }, + container_state.clone(), + ), + schnorr_signing_sessions: ClusterSessionsContainer::new( + SchnorrSigningSessionCreator { + core: creator_core.clone(), + }, + container_state.clone(), + ), + ecdsa_signing_sessions: ClusterSessionsContainer::new( + EcdsaSigningSessionCreator { + core: creator_core.clone(), + }, + container_state.clone(), + ), + negotiation_sessions: ClusterSessionsContainer::new( + KeyVersionNegotiationSessionCreator { + core: creator_core.clone(), + }, + container_state.clone(), + ), + admin_sessions: ClusterSessionsContainer::new( + AdminSessionCreator { + core: creator_core.clone(), + servers_set_change_session_creator_connector: + servers_set_change_session_creator_connector, + admin_public: config.admin_public.clone(), + }, + container_state, + ), + creator_core: creator_core, + } + } - #[cfg(test)] - pub fn make_faulty_generation_sessions(&self) { - self.generation_sessions.creator.make_faulty_generation_sessions(); - } + #[cfg(test)] + pub fn make_faulty_generation_sessions(&self) { + self.generation_sessions + .creator + .make_faulty_generation_sessions(); + } - #[cfg(test)] - pub fn preserve_sessions(&mut self) { - self.generation_sessions.preserve_sessions = true; - self.encryption_sessions.preserve_sessions = true; - self.decryption_sessions.preserve_sessions = true; - self.schnorr_signing_sessions.preserve_sessions = true; - self.ecdsa_signing_sessions.preserve_sessions = true; - self.negotiation_sessions.preserve_sessions = true; - self.admin_sessions.preserve_sessions = true; - } + #[cfg(test)] + pub fn preserve_sessions(&mut self) { + self.generation_sessions.preserve_sessions = true; + self.encryption_sessions.preserve_sessions = true; + self.decryption_sessions.preserve_sessions = true; + self.schnorr_signing_sessions.preserve_sessions = true; + self.ecdsa_signing_sessions.preserve_sessions = true; + self.negotiation_sessions.preserve_sessions = true; + self.admin_sessions.preserve_sessions = true; + } - /// Send session-level keep-alive messages. - pub fn sessions_keep_alive(&self) { - self.admin_sessions.send_keep_alive(&*SERVERS_SET_CHANGE_SESSION_ID, &self.self_node_id); - } + /// Send session-level keep-alive messages. + pub fn sessions_keep_alive(&self) { + self.admin_sessions + .send_keep_alive(&*SERVERS_SET_CHANGE_SESSION_ID, &self.self_node_id); + } - /// When session-level keep-alive response is received. - pub fn on_session_keep_alive(&self, sender: &NodeId, session_id: SessionId) { - if session_id == *SERVERS_SET_CHANGE_SESSION_ID { - self.admin_sessions.on_keep_alive(&session_id, sender); - } - } + /// When session-level keep-alive response is received. + pub fn on_session_keep_alive(&self, sender: &NodeId, session_id: SessionId) { + if session_id == *SERVERS_SET_CHANGE_SESSION_ID { + self.admin_sessions.on_keep_alive(&session_id, sender); + } + } - /// Stop sessions that are stalling. - pub fn stop_stalled_sessions(&self) { - self.generation_sessions.stop_stalled_sessions(); - self.encryption_sessions.stop_stalled_sessions(); - self.decryption_sessions.stop_stalled_sessions(); - self.schnorr_signing_sessions.stop_stalled_sessions(); - self.ecdsa_signing_sessions.stop_stalled_sessions(); - self.negotiation_sessions.stop_stalled_sessions(); - self.admin_sessions.stop_stalled_sessions(); - } + /// Stop sessions that are stalling. + pub fn stop_stalled_sessions(&self) { + self.generation_sessions.stop_stalled_sessions(); + self.encryption_sessions.stop_stalled_sessions(); + self.decryption_sessions.stop_stalled_sessions(); + self.schnorr_signing_sessions.stop_stalled_sessions(); + self.ecdsa_signing_sessions.stop_stalled_sessions(); + self.negotiation_sessions.stop_stalled_sessions(); + self.admin_sessions.stop_stalled_sessions(); + } - /// When connection to node is lost. - pub fn on_connection_timeout(&self, node_id: &NodeId) { - self.generation_sessions.on_connection_timeout(node_id); - self.encryption_sessions.on_connection_timeout(node_id); - self.decryption_sessions.on_connection_timeout(node_id); - self.schnorr_signing_sessions.on_connection_timeout(node_id); - self.ecdsa_signing_sessions.on_connection_timeout(node_id); - self.negotiation_sessions.on_connection_timeout(node_id); - self.admin_sessions.on_connection_timeout(node_id); - self.creator_core.on_connection_timeout(node_id); - } + /// When connection to node is lost. + pub fn on_connection_timeout(&self, node_id: &NodeId) { + self.generation_sessions.on_connection_timeout(node_id); + self.encryption_sessions.on_connection_timeout(node_id); + self.decryption_sessions.on_connection_timeout(node_id); + self.schnorr_signing_sessions.on_connection_timeout(node_id); + self.ecdsa_signing_sessions.on_connection_timeout(node_id); + self.negotiation_sessions.on_connection_timeout(node_id); + self.admin_sessions.on_connection_timeout(node_id); + self.creator_core.on_connection_timeout(node_id); + } } -impl ClusterSessionsContainer where S: ClusterSession, SC: ClusterSessionCreator { - pub fn new(creator: SC, container_state: Arc>) -> Self { - ClusterSessionsContainer { - creator: creator, - sessions: RwLock::new(BTreeMap::new()), - listeners: Mutex::new(Vec::new()), - container_state: container_state, - preserve_sessions: false, - _pd: Default::default(), - } - } +impl ClusterSessionsContainer +where + S: ClusterSession, + SC: ClusterSessionCreator, +{ + pub fn new(creator: SC, container_state: Arc>) -> Self { + ClusterSessionsContainer { + creator: creator, + sessions: RwLock::new(BTreeMap::new()), + listeners: Mutex::new(Vec::new()), + container_state: container_state, + preserve_sessions: false, + _pd: Default::default(), + } + } - pub fn add_listener(&self, listener: Arc>) { - self.listeners.lock().push(Arc::downgrade(&listener)); - } + pub fn add_listener(&self, listener: Arc>) { + self.listeners.lock().push(Arc::downgrade(&listener)); + } - #[cfg(test)] - pub fn is_empty(&self) -> bool { - self.sessions.read().is_empty() - } + #[cfg(test)] + pub fn is_empty(&self) -> bool { + self.sessions.read().is_empty() + } - pub fn get(&self, session_id: &S::Id, update_last_message_time: bool) -> Option> { - let mut sessions = self.sessions.write(); - sessions.get_mut(session_id) - .map(|s| { - if update_last_message_time { - s.last_message_time = Instant::now(); - } - s.session.clone() - }) - } + pub fn get(&self, session_id: &S::Id, update_last_message_time: bool) -> Option> { + let mut sessions = self.sessions.write(); + sessions.get_mut(session_id).map(|s| { + if update_last_message_time { + s.last_message_time = Instant::now(); + } + s.session.clone() + }) + } - #[cfg(test)] - pub fn first(&self) -> Option> { - self.sessions.read().values().nth(0).map(|s| s.session.clone()) - } + #[cfg(test)] + pub fn first(&self) -> Option> { + self.sessions + .read() + .values() + .nth(0) + .map(|s| s.session.clone()) + } - pub fn insert(&self, cluster: Arc, master: NodeId, session_id: S::Id, session_nonce: Option, is_exclusive_session: bool, creation_data: Option) -> Result, Error> { - let mut sessions = self.sessions.write(); - if sessions.contains_key(&session_id) { - return Err(Error::DuplicateSessionId); - } + pub fn insert( + &self, + cluster: Arc, + master: NodeId, + session_id: S::Id, + session_nonce: Option, + is_exclusive_session: bool, + creation_data: Option, + ) -> Result, Error> { + let mut sessions = self.sessions.write(); + if sessions.contains_key(&session_id) { + return Err(Error::DuplicateSessionId); + } - // create cluster - // let cluster = create_cluster_view(data, requires_all_connections)?; - // create session - let session = self.creator.create(cluster.clone(), master.clone(), session_nonce, session_id.clone(), creation_data)?; - // check if session can be started - self.container_state.lock().on_session_starting(is_exclusive_session)?; + // create cluster + // let cluster = create_cluster_view(data, requires_all_connections)?; + // create session + let session = self.creator.create( + cluster.clone(), + master.clone(), + session_nonce, + session_id.clone(), + creation_data, + )?; + // check if session can be started + self.container_state + .lock() + .on_session_starting(is_exclusive_session)?; - // insert session - let queued_session = QueuedSession { - master: master, - cluster_view: cluster, - last_keep_alive_time: Instant::now(), - last_message_time: Instant::now(), - session: session.clone(), - queue: VecDeque::new(), - }; - sessions.insert(session_id, queued_session); - self.notify_listeners(|l| l.on_session_inserted(session.clone())); + // insert session + let queued_session = QueuedSession { + master: master, + cluster_view: cluster, + last_keep_alive_time: Instant::now(), + last_message_time: Instant::now(), + session: session.clone(), + queue: VecDeque::new(), + }; + sessions.insert(session_id, queued_session); + self.notify_listeners(|l| l.on_session_inserted(session.clone())); - Ok(session) - } + Ok(session) + } - pub fn remove(&self, session_id: &S::Id) { - self.do_remove(session_id, &mut *self.sessions.write()); - } + pub fn remove(&self, session_id: &S::Id) { + self.do_remove(session_id, &mut *self.sessions.write()); + } - pub fn enqueue_message(&self, session_id: &S::Id, sender: NodeId, message: Message, is_queued_message: bool) { - self.sessions.write().get_mut(session_id) - .map(|session| if is_queued_message { session.queue.push_front((sender, message)) } - else { session.queue.push_back((sender, message)) }); - } + pub fn enqueue_message( + &self, + session_id: &S::Id, + sender: NodeId, + message: Message, + is_queued_message: bool, + ) { + self.sessions.write().get_mut(session_id).map(|session| { + if is_queued_message { + session.queue.push_front((sender, message)) + } else { + session.queue.push_back((sender, message)) + } + }); + } - pub fn dequeue_message(&self, session_id: &S::Id) -> Option<(NodeId, Message)> { - self.sessions.write().get_mut(session_id) - .and_then(|session| session.queue.pop_front()) - } + pub fn dequeue_message(&self, session_id: &S::Id) -> Option<(NodeId, Message)> { + self.sessions + .write() + .get_mut(session_id) + .and_then(|session| session.queue.pop_front()) + } - pub fn stop_stalled_sessions(&self) { - let mut sessions = self.sessions.write(); - for sid in sessions.keys().cloned().collect::>() { - let remove_session = { - let session = sessions.get(&sid).expect("enumerating only existing sessions; qed"); - if Instant::now() - session.last_message_time > SESSION_TIMEOUT_INTERVAL { - session.session.on_session_timeout(); - session.session.is_finished() - } else { - false - } - }; + pub fn stop_stalled_sessions(&self) { + let mut sessions = self.sessions.write(); + for sid in sessions.keys().cloned().collect::>() { + let remove_session = { + let session = sessions + .get(&sid) + .expect("enumerating only existing sessions; qed"); + if Instant::now() - session.last_message_time > SESSION_TIMEOUT_INTERVAL { + session.session.on_session_timeout(); + session.session.is_finished() + } else { + false + } + }; - if remove_session { - self.do_remove(&sid, &mut *sessions); - } - } - } + if remove_session { + self.do_remove(&sid, &mut *sessions); + } + } + } - pub fn on_connection_timeout(&self, node_id: &NodeId) { - let mut sessions = self.sessions.write(); - for sid in sessions.keys().cloned().collect::>() { - let remove_session = { - let session = sessions.get(&sid).expect("enumerating only existing sessions; qed"); - session.session.on_node_timeout(node_id); - session.session.is_finished() - }; + pub fn on_connection_timeout(&self, node_id: &NodeId) { + let mut sessions = self.sessions.write(); + for sid in sessions.keys().cloned().collect::>() { + let remove_session = { + let session = sessions + .get(&sid) + .expect("enumerating only existing sessions; qed"); + session.session.on_node_timeout(node_id); + session.session.is_finished() + }; - if remove_session { - self.do_remove(&sid, &mut *sessions); - } - } - } + if remove_session { + self.do_remove(&sid, &mut *sessions); + } + } + } - fn do_remove(&self, session_id: &S::Id, sessions: &mut BTreeMap>) { - if !self.preserve_sessions { - if let Some(session) = sessions.remove(session_id) { - self.container_state.lock().on_session_completed(); - self.notify_listeners(|l| l.on_session_removed(session.session.clone())); - } - } - } + fn do_remove(&self, session_id: &S::Id, sessions: &mut BTreeMap>) { + if !self.preserve_sessions { + if let Some(session) = sessions.remove(session_id) { + self.container_state.lock().on_session_completed(); + self.notify_listeners(|l| l.on_session_removed(session.session.clone())); + } + } + } - fn notify_listeners) -> ()>(&self, callback: F) { - let mut listeners = self.listeners.lock(); - let mut listener_index = 0; - while listener_index < listeners.len() { - match listeners[listener_index].upgrade() { - Some(listener) => { - callback(&*listener); - listener_index += 1; - }, - None => { - listeners.swap_remove(listener_index); - }, - } - } - } + fn notify_listeners) -> ()>(&self, callback: F) { + let mut listeners = self.listeners.lock(); + let mut listener_index = 0; + while listener_index < listeners.len() { + match listeners[listener_index].upgrade() { + Some(listener) => { + callback(&*listener); + listener_index += 1; + } + None => { + listeners.swap_remove(listener_index); + } + } + } + } } -impl ClusterSessionsContainer where S: ClusterSession, SC: ClusterSessionCreator, SessionId: From { - pub fn send_keep_alive(&self, session_id: &S::Id, self_node_id: &NodeId) { - if let Some(session) = self.sessions.write().get_mut(session_id) { - let now = Instant::now(); - if self_node_id == &session.master && now - session.last_keep_alive_time > SESSION_KEEP_ALIVE_INTERVAL { - session.last_keep_alive_time = now; - // since we send KeepAlive message to prevent nodes from disconnecting - // && worst thing that can happen if node is disconnected is that session is failed - // => ignore error here, because probably this node is not need for the rest of the session at all - let _ = session.cluster_view.broadcast(Message::Cluster(message::ClusterMessage::KeepAliveResponse(message::KeepAliveResponse { - session_id: Some(session_id.clone().into()), - }))); - } - } - } +impl ClusterSessionsContainer +where + S: ClusterSession, + SC: ClusterSessionCreator, + SessionId: From, +{ + pub fn send_keep_alive(&self, session_id: &S::Id, self_node_id: &NodeId) { + if let Some(session) = self.sessions.write().get_mut(session_id) { + let now = Instant::now(); + if self_node_id == &session.master + && now - session.last_keep_alive_time > SESSION_KEEP_ALIVE_INTERVAL + { + session.last_keep_alive_time = now; + // since we send KeepAlive message to prevent nodes from disconnecting + // && worst thing that can happen if node is disconnected is that session is failed + // => ignore error here, because probably this node is not need for the rest of the session at all + let _ = session.cluster_view.broadcast(Message::Cluster( + message::ClusterMessage::KeepAliveResponse(message::KeepAliveResponse { + session_id: Some(session_id.clone().into()), + }), + )); + } + } + } - pub fn on_keep_alive(&self, session_id: &S::Id, sender: &NodeId) { - if let Some(session) = self.sessions.write().get_mut(session_id) { - let now = Instant::now(); - // we only accept keep alive from master node of ServersSetChange session - if sender == &session.master { - session.last_keep_alive_time = now; - } - } - } + pub fn on_keep_alive(&self, session_id: &S::Id, sender: &NodeId) { + if let Some(session) = self.sessions.write().get_mut(session_id) { + let now = Instant::now(); + // we only accept keep alive from master node of ServersSetChange session + if sender == &session.master { + session.last_keep_alive_time = now; + } + } + } } impl ClusterSessionsContainerState { - /// When session is starting. - pub fn on_session_starting(&mut self, is_exclusive_session: bool) -> Result<(), Error> { - match *self { - ClusterSessionsContainerState::Idle if is_exclusive_session => { - ::std::mem::replace(self, ClusterSessionsContainerState::Exclusive); - }, - ClusterSessionsContainerState::Idle => { - ::std::mem::replace(self, ClusterSessionsContainerState::Active(1)); - }, - ClusterSessionsContainerState::Active(_) if is_exclusive_session => - return Err(Error::HasActiveSessions), - ClusterSessionsContainerState::Active(sessions_count) => { - ::std::mem::replace(self, ClusterSessionsContainerState::Active(sessions_count + 1)); - }, - ClusterSessionsContainerState::Exclusive => - return Err(Error::ExclusiveSessionActive), - } - Ok(()) - } + /// When session is starting. + pub fn on_session_starting(&mut self, is_exclusive_session: bool) -> Result<(), Error> { + match *self { + ClusterSessionsContainerState::Idle if is_exclusive_session => { + ::std::mem::replace(self, ClusterSessionsContainerState::Exclusive); + } + ClusterSessionsContainerState::Idle => { + ::std::mem::replace(self, ClusterSessionsContainerState::Active(1)); + } + ClusterSessionsContainerState::Active(_) if is_exclusive_session => { + return Err(Error::HasActiveSessions) + } + ClusterSessionsContainerState::Active(sessions_count) => { + ::std::mem::replace( + self, + ClusterSessionsContainerState::Active(sessions_count + 1), + ); + } + ClusterSessionsContainerState::Exclusive => return Err(Error::ExclusiveSessionActive), + } + Ok(()) + } - /// When session is completed. - pub fn on_session_completed(&mut self) { - match *self { + /// When session is completed. + pub fn on_session_completed(&mut self) { + match *self { ClusterSessionsContainerState::Idle => unreachable!("idle means that there are no active sessions; on_session_completed is only called once after active session is completed; qed"), ClusterSessionsContainerState::Active(sessions_count) if sessions_count == 1 => { @@ -482,226 +588,351 @@ impl ClusterSessionsContainerState { ::std::mem::replace(self, ClusterSessionsContainerState::Idle); }, } - } + } } impl SessionIdWithSubSession { - /// Create new decryption session Id. - pub fn new(session_id: SessionId, sub_session_id: Secret) -> Self { - SessionIdWithSubSession { - id: session_id, - access_key: sub_session_id, - } - } + /// Create new decryption session Id. + pub fn new(session_id: SessionId, sub_session_id: Secret) -> Self { + SessionIdWithSubSession { + id: session_id, + access_key: sub_session_id, + } + } } impl PartialOrd for SessionIdWithSubSession { - fn partial_cmp(&self, other: &Self) -> Option<::std::cmp::Ordering> { - Some(self.cmp(other)) - } + fn partial_cmp(&self, other: &Self) -> Option<::std::cmp::Ordering> { + Some(self.cmp(other)) + } } impl Ord for SessionIdWithSubSession { - fn cmp(&self, other: &Self) -> ::std::cmp::Ordering { - match self.id.cmp(&other.id) { - ::std::cmp::Ordering::Equal => self.access_key.cmp(&other.access_key), - r @ _ => r, - } - } + fn cmp(&self, other: &Self) -> ::std::cmp::Ordering { + match self.id.cmp(&other.id) { + ::std::cmp::Ordering::Equal => self.access_key.cmp(&other.access_key), + r @ _ => r, + } + } } impl AdminSession { - pub fn as_servers_set_change(&self) -> Option<&ServersSetChangeSessionImpl> { - match *self { - AdminSession::ServersSetChange(ref session) => Some(session), - _ => None - } - } + pub fn as_servers_set_change(&self) -> Option<&ServersSetChangeSessionImpl> { + match *self { + AdminSession::ServersSetChange(ref session) => Some(session), + _ => None, + } + } } impl ClusterSession for AdminSession { - type Id = SessionId; + type Id = SessionId; - fn type_name() -> &'static str { - "admin" - } + fn type_name() -> &'static str { + "admin" + } - fn id(&self) -> SessionId { - match *self { - AdminSession::ShareAdd(ref session) => session.id().clone(), - AdminSession::ServersSetChange(ref session) => session.id().clone(), - } - } + fn id(&self) -> SessionId { + match *self { + AdminSession::ShareAdd(ref session) => session.id().clone(), + AdminSession::ServersSetChange(ref session) => session.id().clone(), + } + } - fn is_finished(&self) -> bool { - match *self { - AdminSession::ShareAdd(ref session) => session.is_finished(), - AdminSession::ServersSetChange(ref session) => session.is_finished(), - } - } + fn is_finished(&self) -> bool { + match *self { + AdminSession::ShareAdd(ref session) => session.is_finished(), + AdminSession::ServersSetChange(ref session) => session.is_finished(), + } + } - fn on_session_timeout(&self) { - match *self { - AdminSession::ShareAdd(ref session) => session.on_session_timeout(), - AdminSession::ServersSetChange(ref session) => session.on_session_timeout(), - } - } + fn on_session_timeout(&self) { + match *self { + AdminSession::ShareAdd(ref session) => session.on_session_timeout(), + AdminSession::ServersSetChange(ref session) => session.on_session_timeout(), + } + } - fn on_node_timeout(&self, node_id: &NodeId) { - match *self { - AdminSession::ShareAdd(ref session) => session.on_node_timeout(node_id), - AdminSession::ServersSetChange(ref session) => session.on_node_timeout(node_id), - } - } + fn on_node_timeout(&self, node_id: &NodeId) { + match *self { + AdminSession::ShareAdd(ref session) => session.on_node_timeout(node_id), + AdminSession::ServersSetChange(ref session) => session.on_node_timeout(node_id), + } + } - fn on_session_error(&self, node: &NodeId, error: Error) { - match *self { - AdminSession::ShareAdd(ref session) => session.on_session_error(node, error), - AdminSession::ServersSetChange(ref session) => session.on_session_error(node, error), - } - } + fn on_session_error(&self, node: &NodeId, error: Error) { + match *self { + AdminSession::ShareAdd(ref session) => session.on_session_error(node, error), + AdminSession::ServersSetChange(ref session) => session.on_session_error(node, error), + } + } - fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { - match *self { - AdminSession::ShareAdd(ref session) => session.on_message(sender, message), - AdminSession::ServersSetChange(ref session) => session.on_message(sender, message), - } - } + fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { + match *self { + AdminSession::ShareAdd(ref session) => session.on_message(sender, message), + AdminSession::ServersSetChange(ref session) => session.on_message(sender, message), + } + } } -pub fn create_cluster_view(self_key_pair: Arc, connections: Arc, requires_all_connections: bool) -> Result, Error> { - let mut connected_nodes = connections.connected_nodes()?; - let disconnected_nodes = connections.disconnected_nodes(); +pub fn create_cluster_view( + self_key_pair: Arc, + connections: Arc, + requires_all_connections: bool, +) -> Result, Error> { + let mut connected_nodes = connections.connected_nodes()?; + let disconnected_nodes = connections.disconnected_nodes(); - let disconnected_nodes_count = disconnected_nodes.len(); - if requires_all_connections { - if disconnected_nodes_count != 0 { - return Err(Error::NodeDisconnected); - } - } + let disconnected_nodes_count = disconnected_nodes.len(); + if requires_all_connections { + if disconnected_nodes_count != 0 { + return Err(Error::NodeDisconnected); + } + } - connected_nodes.insert(self_key_pair.public().clone()); + connected_nodes.insert(self_key_pair.public().clone()); - let connected_nodes_count = connected_nodes.len(); - Ok(Arc::new(ClusterView::new(self_key_pair, connections, connected_nodes, connected_nodes_count + disconnected_nodes_count))) + let connected_nodes_count = connected_nodes.len(); + Ok(Arc::new(ClusterView::new( + self_key_pair, + connections, + connected_nodes, + connected_nodes_count + disconnected_nodes_count, + ))) } #[cfg(test)] mod tests { - use std::sync::Arc; - use std::sync::atomic::{AtomicUsize, Ordering}; - use ethkey::{Random, Generator}; - use key_server_cluster::{Error, DummyAclStorage, DummyKeyStorage, MapKeyServerSet, PlainNodeKeyPair}; - use key_server_cluster::cluster::ClusterConfiguration; - use key_server_cluster::connection_trigger::SimpleServersSetChangeSessionCreatorConnector; - use key_server_cluster::cluster::tests::DummyCluster; - use key_server_cluster::generation_session::{SessionImpl as GenerationSession}; - use super::{ClusterSessions, AdminSessionCreationData, ClusterSessionsListener, - ClusterSessionsContainerState, SESSION_TIMEOUT_INTERVAL}; + use super::{ + AdminSessionCreationData, ClusterSessions, ClusterSessionsContainerState, + ClusterSessionsListener, SESSION_TIMEOUT_INTERVAL, + }; + use ethkey::{Generator, Random}; + use key_server_cluster::{ + cluster::{tests::DummyCluster, ClusterConfiguration}, + connection_trigger::SimpleServersSetChangeSessionCreatorConnector, + generation_session::SessionImpl as GenerationSession, + DummyAclStorage, DummyKeyStorage, Error, MapKeyServerSet, PlainNodeKeyPair, + }; + use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }; - pub fn make_cluster_sessions() -> ClusterSessions { - let key_pair = Random.generate().unwrap(); - let config = ClusterConfiguration { - self_key_pair: Arc::new(PlainNodeKeyPair::new(key_pair.clone())), - key_server_set: Arc::new(MapKeyServerSet::new(false, vec![(key_pair.public().clone(), format!("127.0.0.1:{}", 100).parse().unwrap())].into_iter().collect())), - key_storage: Arc::new(DummyKeyStorage::default()), - acl_storage: Arc::new(DummyAclStorage::default()), - admin_public: Some(Random.generate().unwrap().public().clone()), - preserve_sessions: false, - }; - ClusterSessions::new(&config, Arc::new(SimpleServersSetChangeSessionCreatorConnector { - admin_public: Some(Random.generate().unwrap().public().clone()), - })) - } + pub fn make_cluster_sessions() -> ClusterSessions { + let key_pair = Random.generate().unwrap(); + let config = ClusterConfiguration { + self_key_pair: Arc::new(PlainNodeKeyPair::new(key_pair.clone())), + key_server_set: Arc::new(MapKeyServerSet::new( + false, + vec![( + key_pair.public().clone(), + format!("127.0.0.1:{}", 100).parse().unwrap(), + )] + .into_iter() + .collect(), + )), + key_storage: Arc::new(DummyKeyStorage::default()), + acl_storage: Arc::new(DummyAclStorage::default()), + admin_public: Some(Random.generate().unwrap().public().clone()), + preserve_sessions: false, + }; + ClusterSessions::new( + &config, + Arc::new(SimpleServersSetChangeSessionCreatorConnector { + admin_public: Some(Random.generate().unwrap().public().clone()), + }), + ) + } - #[test] - fn cluster_session_cannot_be_started_if_exclusive_session_is_active() { - let sessions = make_cluster_sessions(); - sessions.generation_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, false, None).unwrap(); - match sessions.admin_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, true, Some(AdminSessionCreationData::ShareAdd(Default::default()))) { - Err(Error::HasActiveSessions) => (), - Err(e) => unreachable!(format!("{}", e)), - Ok(_) => unreachable!("OK"), - } - } + #[test] + fn cluster_session_cannot_be_started_if_exclusive_session_is_active() { + let sessions = make_cluster_sessions(); + sessions + .generation_sessions + .insert( + Arc::new(DummyCluster::new(Default::default())), + Default::default(), + Default::default(), + None, + false, + None, + ) + .unwrap(); + match sessions.admin_sessions.insert( + Arc::new(DummyCluster::new(Default::default())), + Default::default(), + Default::default(), + None, + true, + Some(AdminSessionCreationData::ShareAdd(Default::default())), + ) { + Err(Error::HasActiveSessions) => (), + Err(e) => unreachable!(format!("{}", e)), + Ok(_) => unreachable!("OK"), + } + } - #[test] - fn exclusive_session_cannot_be_started_if_other_session_is_active() { - let sessions = make_cluster_sessions(); + #[test] + fn exclusive_session_cannot_be_started_if_other_session_is_active() { + let sessions = make_cluster_sessions(); - sessions.admin_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, true, Some(AdminSessionCreationData::ShareAdd(Default::default()))).unwrap(); - match sessions.generation_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, false, None) { - Err(Error::ExclusiveSessionActive) => (), - Err(e) => unreachable!(format!("{}", e)), - Ok(_) => unreachable!("OK"), - } - } + sessions + .admin_sessions + .insert( + Arc::new(DummyCluster::new(Default::default())), + Default::default(), + Default::default(), + None, + true, + Some(AdminSessionCreationData::ShareAdd(Default::default())), + ) + .unwrap(); + match sessions.generation_sessions.insert( + Arc::new(DummyCluster::new(Default::default())), + Default::default(), + Default::default(), + None, + false, + None, + ) { + Err(Error::ExclusiveSessionActive) => (), + Err(e) => unreachable!(format!("{}", e)), + Ok(_) => unreachable!("OK"), + } + } - #[test] - fn session_listener_works() { - #[derive(Default)] - struct GenerationSessionListener { - inserted: AtomicUsize, - removed: AtomicUsize, - } + #[test] + fn session_listener_works() { + #[derive(Default)] + struct GenerationSessionListener { + inserted: AtomicUsize, + removed: AtomicUsize, + } - impl ClusterSessionsListener for GenerationSessionListener { - fn on_session_inserted(&self, _session: Arc) { - self.inserted.fetch_add(1, Ordering::Relaxed); - } + impl ClusterSessionsListener for GenerationSessionListener { + fn on_session_inserted(&self, _session: Arc) { + self.inserted.fetch_add(1, Ordering::Relaxed); + } - fn on_session_removed(&self, _session: Arc) { - self.removed.fetch_add(1, Ordering::Relaxed); - } - } + fn on_session_removed(&self, _session: Arc) { + self.removed.fetch_add(1, Ordering::Relaxed); + } + } - let listener = Arc::new(GenerationSessionListener::default()); - let sessions = make_cluster_sessions(); - sessions.generation_sessions.add_listener(listener.clone()); + let listener = Arc::new(GenerationSessionListener::default()); + let sessions = make_cluster_sessions(); + sessions.generation_sessions.add_listener(listener.clone()); - sessions.generation_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, false, None).unwrap(); - assert_eq!(listener.inserted.load(Ordering::Relaxed), 1); - assert_eq!(listener.removed.load(Ordering::Relaxed), 0); + sessions + .generation_sessions + .insert( + Arc::new(DummyCluster::new(Default::default())), + Default::default(), + Default::default(), + None, + false, + None, + ) + .unwrap(); + assert_eq!(listener.inserted.load(Ordering::Relaxed), 1); + assert_eq!(listener.removed.load(Ordering::Relaxed), 0); - sessions.generation_sessions.remove(&Default::default()); - assert_eq!(listener.inserted.load(Ordering::Relaxed), 1); - assert_eq!(listener.removed.load(Ordering::Relaxed), 1); - } + sessions.generation_sessions.remove(&Default::default()); + assert_eq!(listener.inserted.load(Ordering::Relaxed), 1); + assert_eq!(listener.removed.load(Ordering::Relaxed), 1); + } - #[test] - fn last_session_removal_sets_container_state_to_idle() { - let sessions = make_cluster_sessions(); + #[test] + fn last_session_removal_sets_container_state_to_idle() { + let sessions = make_cluster_sessions(); - sessions.generation_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, false, None).unwrap(); - assert_eq!(*sessions.generation_sessions.container_state.lock(), ClusterSessionsContainerState::Active(1)); + sessions + .generation_sessions + .insert( + Arc::new(DummyCluster::new(Default::default())), + Default::default(), + Default::default(), + None, + false, + None, + ) + .unwrap(); + assert_eq!( + *sessions.generation_sessions.container_state.lock(), + ClusterSessionsContainerState::Active(1) + ); - sessions.generation_sessions.remove(&Default::default()); - assert_eq!(*sessions.generation_sessions.container_state.lock(), ClusterSessionsContainerState::Idle); - } + sessions.generation_sessions.remove(&Default::default()); + assert_eq!( + *sessions.generation_sessions.container_state.lock(), + ClusterSessionsContainerState::Idle + ); + } - #[test] - fn last_session_removal_by_timeout_sets_container_state_to_idle() { - let sessions = make_cluster_sessions(); + #[test] + fn last_session_removal_by_timeout_sets_container_state_to_idle() { + let sessions = make_cluster_sessions(); - sessions.generation_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, false, None).unwrap(); - assert_eq!(*sessions.generation_sessions.container_state.lock(), ClusterSessionsContainerState::Active(1)); + sessions + .generation_sessions + .insert( + Arc::new(DummyCluster::new(Default::default())), + Default::default(), + Default::default(), + None, + false, + None, + ) + .unwrap(); + assert_eq!( + *sessions.generation_sessions.container_state.lock(), + ClusterSessionsContainerState::Active(1) + ); - sessions.generation_sessions.sessions.write().get_mut(&Default::default()).unwrap().last_message_time -= SESSION_TIMEOUT_INTERVAL * 2; + sessions + .generation_sessions + .sessions + .write() + .get_mut(&Default::default()) + .unwrap() + .last_message_time -= SESSION_TIMEOUT_INTERVAL * 2; - sessions.generation_sessions.stop_stalled_sessions(); - assert_eq!(sessions.generation_sessions.sessions.read().len(), 0); - assert_eq!(*sessions.generation_sessions.container_state.lock(), ClusterSessionsContainerState::Idle); - } + sessions.generation_sessions.stop_stalled_sessions(); + assert_eq!(sessions.generation_sessions.sessions.read().len(), 0); + assert_eq!( + *sessions.generation_sessions.container_state.lock(), + ClusterSessionsContainerState::Idle + ); + } - #[test] - fn last_session_removal_by_node_timeout_sets_container_state_to_idle() { - let sessions = make_cluster_sessions(); + #[test] + fn last_session_removal_by_node_timeout_sets_container_state_to_idle() { + let sessions = make_cluster_sessions(); - sessions.generation_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, false, None).unwrap(); - assert_eq!(*sessions.generation_sessions.container_state.lock(), ClusterSessionsContainerState::Active(1)); + sessions + .generation_sessions + .insert( + Arc::new(DummyCluster::new(Default::default())), + Default::default(), + Default::default(), + None, + false, + None, + ) + .unwrap(); + assert_eq!( + *sessions.generation_sessions.container_state.lock(), + ClusterSessionsContainerState::Active(1) + ); - sessions.generation_sessions.on_connection_timeout(&Default::default()); - assert_eq!(sessions.generation_sessions.sessions.read().len(), 0); - assert_eq!(*sessions.generation_sessions.container_state.lock(), ClusterSessionsContainerState::Idle); - } + sessions + .generation_sessions + .on_connection_timeout(&Default::default()); + assert_eq!(sessions.generation_sessions.sessions.read().len(), 0); + assert_eq!( + *sessions.generation_sessions.container_state.lock(), + ClusterSessionsContainerState::Idle + ); + } } diff --git a/secret-store/src/key_server_cluster/cluster_sessions_creator.rs b/secret-store/src/key_server_cluster/cluster_sessions_creator.rs index 23a3657c4..98aad7fcc 100644 --- a/secret-store/src/key_server_cluster/cluster_sessions_creator.rs +++ b/secret-store/src/key_server_cluster/cluster_sessions_creator.rs @@ -14,487 +14,664 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; -use std::collections::BTreeMap; -use parking_lot::RwLock; use ethkey::Public; -use key_server_cluster::{Error, NodeId, SessionId, Requester, AclStorage, KeyStorage, DocumentKeyShare, SessionMeta}; -use key_server_cluster::cluster::{Cluster, ClusterConfiguration}; -use key_server_cluster::connection_trigger::ServersSetChangeSessionCreatorConnector; -use key_server_cluster::cluster_sessions::{ClusterSession, SessionIdWithSubSession, AdminSession, AdminSessionCreationData}; -use key_server_cluster::message::{self, Message, DecryptionMessage, SchnorrSigningMessage, ConsensusMessageOfShareAdd, - ShareAddMessage, ServersSetChangeMessage, ConsensusMessage, ConsensusMessageWithServersSet, EcdsaSigningMessage}; -use key_server_cluster::generation_session::{SessionImpl as GenerationSessionImpl, SessionParams as GenerationSessionParams}; -use key_server_cluster::decryption_session::{SessionImpl as DecryptionSessionImpl, - SessionParams as DecryptionSessionParams}; -use key_server_cluster::encryption_session::{SessionImpl as EncryptionSessionImpl, SessionParams as EncryptionSessionParams}; -use key_server_cluster::signing_session_ecdsa::{SessionImpl as EcdsaSigningSessionImpl, - SessionParams as EcdsaSigningSessionParams}; -use key_server_cluster::signing_session_schnorr::{SessionImpl as SchnorrSigningSessionImpl, - SessionParams as SchnorrSigningSessionParams}; -use key_server_cluster::share_add_session::{SessionImpl as ShareAddSessionImpl, - SessionParams as ShareAddSessionParams, IsolatedSessionTransport as ShareAddTransport}; -use key_server_cluster::servers_set_change_session::{SessionImpl as ServersSetChangeSessionImpl, - SessionParams as ServersSetChangeSessionParams}; -use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSessionImpl, - SessionParams as KeyVersionNegotiationSessionParams, IsolatedSessionTransport as VersionNegotiationTransport, - FastestResultComputer as FastestResultKeyVersionsResultComputer}; -use key_server_cluster::admin_sessions::ShareChangeSessionMeta; +use key_server_cluster::{ + admin_sessions::ShareChangeSessionMeta, + cluster::{Cluster, ClusterConfiguration}, + cluster_sessions::{ + AdminSession, AdminSessionCreationData, ClusterSession, SessionIdWithSubSession, + }, + connection_trigger::ServersSetChangeSessionCreatorConnector, + decryption_session::{ + SessionImpl as DecryptionSessionImpl, SessionParams as DecryptionSessionParams, + }, + encryption_session::{ + SessionImpl as EncryptionSessionImpl, SessionParams as EncryptionSessionParams, + }, + generation_session::{ + SessionImpl as GenerationSessionImpl, SessionParams as GenerationSessionParams, + }, + key_version_negotiation_session::{ + FastestResultComputer as FastestResultKeyVersionsResultComputer, + IsolatedSessionTransport as VersionNegotiationTransport, + SessionImpl as KeyVersionNegotiationSessionImpl, + SessionParams as KeyVersionNegotiationSessionParams, + }, + message::{ + self, ConsensusMessage, ConsensusMessageOfShareAdd, ConsensusMessageWithServersSet, + DecryptionMessage, EcdsaSigningMessage, Message, SchnorrSigningMessage, + ServersSetChangeMessage, ShareAddMessage, + }, + servers_set_change_session::{ + SessionImpl as ServersSetChangeSessionImpl, SessionParams as ServersSetChangeSessionParams, + }, + share_add_session::{ + IsolatedSessionTransport as ShareAddTransport, SessionImpl as ShareAddSessionImpl, + SessionParams as ShareAddSessionParams, + }, + signing_session_ecdsa::{ + SessionImpl as EcdsaSigningSessionImpl, SessionParams as EcdsaSigningSessionParams, + }, + signing_session_schnorr::{ + SessionImpl as SchnorrSigningSessionImpl, SessionParams as SchnorrSigningSessionParams, + }, + AclStorage, DocumentKeyShare, Error, KeyStorage, NodeId, Requester, SessionId, SessionMeta, +}; +use parking_lot::RwLock; +use std::{ + collections::BTreeMap, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering}, + Arc, + }, +}; /// Generic cluster session creator. pub trait ClusterSessionCreator { - /// Get creation data from message. - fn creation_data_from_message(_message: &Message) -> Result, Error> { - Ok(None) - } + /// Get creation data from message. + fn creation_data_from_message(_message: &Message) -> Result, Error> { + Ok(None) + } - /// Prepare error message. - fn make_error_message(sid: S::Id, nonce: u64, err: Error) -> Message; + /// Prepare error message. + fn make_error_message(sid: S::Id, nonce: u64, err: Error) -> Message; - /// Create cluster session. - fn create(&self, cluster: Arc, master: NodeId, nonce: Option, id: S::Id, creation_data: Option) -> Result, Error>; + /// Create cluster session. + fn create( + &self, + cluster: Arc, + master: NodeId, + nonce: Option, + id: S::Id, + creation_data: Option, + ) -> Result, Error>; } /// Message with session id. pub trait IntoSessionId { - /// Get session id. - fn into_session_id(&self) -> Result; + /// Get session id. + fn into_session_id(&self) -> Result; } pub struct SessionCreatorCore { - /// Self node id. - self_node_id: NodeId, - /// Reference to key storage - key_storage: Arc, - /// Reference to ACL storage - acl_storage: Arc, - /// Always-increasing sessions counter. Is used as session nonce to prevent replay attacks: - /// 1) during handshake, KeyServers generate new random key to encrypt messages - /// => there's no way to use messages from previous connections for replay attacks - /// 2) when session (of any type) is started, master node increases its own session counter and broadcasts it - /// 3) when slave KeyServer receives session initialization message, it checks that new nonce is larger than previous (from the same master) - /// => there's no way to use messages from previous sessions for replay attacks - /// 4) KeyServer checks that each session message contains the same nonce that initialization message - /// Given that: (A) handshake is secure and (B) session itself is initially replay-protected - /// => this guarantees that sessions are replay-protected. - session_counter: AtomicUsize, - /// Maximal session nonce, received from given connection. - max_nonce: RwLock>, + /// Self node id. + self_node_id: NodeId, + /// Reference to key storage + key_storage: Arc, + /// Reference to ACL storage + acl_storage: Arc, + /// Always-increasing sessions counter. Is used as session nonce to prevent replay attacks: + /// 1) during handshake, KeyServers generate new random key to encrypt messages + /// => there's no way to use messages from previous connections for replay attacks + /// 2) when session (of any type) is started, master node increases its own session counter and broadcasts it + /// 3) when slave KeyServer receives session initialization message, it checks that new nonce is larger than previous (from the same master) + /// => there's no way to use messages from previous sessions for replay attacks + /// 4) KeyServer checks that each session message contains the same nonce that initialization message + /// Given that: (A) handshake is secure and (B) session itself is initially replay-protected + /// => this guarantees that sessions are replay-protected. + session_counter: AtomicUsize, + /// Maximal session nonce, received from given connection. + max_nonce: RwLock>, } impl SessionCreatorCore { - /// Create new session creator core. - pub fn new(config: &ClusterConfiguration) -> Self { - SessionCreatorCore { - self_node_id: config.self_key_pair.public().clone(), - acl_storage: config.acl_storage.clone(), - key_storage: config.key_storage.clone(), - session_counter: AtomicUsize::new(0), - max_nonce: RwLock::new(BTreeMap::new()), - } - } + /// Create new session creator core. + pub fn new(config: &ClusterConfiguration) -> Self { + SessionCreatorCore { + self_node_id: config.self_key_pair.public().clone(), + acl_storage: config.acl_storage.clone(), + key_storage: config.key_storage.clone(), + session_counter: AtomicUsize::new(0), + max_nonce: RwLock::new(BTreeMap::new()), + } + } - /// When node has teimtouted. - pub fn on_connection_timeout(&self, node_id: &NodeId) { - self.max_nonce.write().remove(node_id); - } + /// When node has teimtouted. + pub fn on_connection_timeout(&self, node_id: &NodeId) { + self.max_nonce.write().remove(node_id); + } - /// Check or generate new session nonce. - fn check_session_nonce(&self, master: &NodeId, nonce: Option) -> Result { - // if we're master node of the session, then nonce should be generated - // if we're slave node of the session, then nonce should be passed from outside - match nonce { - Some(nonce) => match nonce > *self.max_nonce.write().entry(master.clone()).or_insert(0) { - true => Ok(nonce), - false => Err(Error::ReplayProtection), - }, - None => Ok(self.session_counter.fetch_add(1, Ordering::Relaxed) as u64 + 1), - } - } + /// Check or generate new session nonce. + fn check_session_nonce(&self, master: &NodeId, nonce: Option) -> Result { + // if we're master node of the session, then nonce should be generated + // if we're slave node of the session, then nonce should be passed from outside + match nonce { + Some(nonce) => match nonce > *self.max_nonce.write().entry(master.clone()).or_insert(0) + { + true => Ok(nonce), + false => Err(Error::ReplayProtection), + }, + None => Ok(self.session_counter.fetch_add(1, Ordering::Relaxed) as u64 + 1), + } + } - /// Read key share && remove disconnected nodes. - fn read_key_share(&self, key_id: &SessionId) -> Result, Error> { - self.key_storage.get(key_id) - } + /// Read key share && remove disconnected nodes. + fn read_key_share(&self, key_id: &SessionId) -> Result, Error> { + self.key_storage.get(key_id) + } } /// Generation session creator. pub struct GenerationSessionCreator { - /// True if generation sessions must fail. - pub make_faulty_generation_sessions: AtomicBool, - /// Creator core. - pub core: Arc, + /// True if generation sessions must fail. + pub make_faulty_generation_sessions: AtomicBool, + /// Creator core. + pub core: Arc, } impl GenerationSessionCreator { - #[cfg(test)] - pub fn make_faulty_generation_sessions(&self) { - self.make_faulty_generation_sessions.store(true, Ordering::Relaxed); - } + #[cfg(test)] + pub fn make_faulty_generation_sessions(&self) { + self.make_faulty_generation_sessions + .store(true, Ordering::Relaxed); + } } impl ClusterSessionCreator for GenerationSessionCreator { - fn make_error_message(sid: SessionId, nonce: u64, err: Error) -> Message { - message::Message::Generation(message::GenerationMessage::SessionError(message::SessionError { - session: sid.into(), - session_nonce: nonce, - error: err.into(), - })) - } + fn make_error_message(sid: SessionId, nonce: u64, err: Error) -> Message { + message::Message::Generation(message::GenerationMessage::SessionError( + message::SessionError { + session: sid.into(), + session_nonce: nonce, + error: err.into(), + }, + )) + } - fn create(&self, cluster: Arc, master: NodeId, nonce: Option, id: SessionId, _creation_data: Option<()>) -> Result, Error> { - // check that there's no finished encryption session with the same id - if self.core.key_storage.contains(&id) { - return Err(Error::ServerKeyAlreadyGenerated); - } + fn create( + &self, + cluster: Arc, + master: NodeId, + nonce: Option, + id: SessionId, + _creation_data: Option<()>, + ) -> Result, Error> { + // check that there's no finished encryption session with the same id + if self.core.key_storage.contains(&id) { + return Err(Error::ServerKeyAlreadyGenerated); + } - let nonce = self.core.check_session_nonce(&master, nonce)?; - Ok(GenerationSessionImpl::new(GenerationSessionParams { - id: id.clone(), - self_node_id: self.core.self_node_id.clone(), - key_storage: Some(self.core.key_storage.clone()), - cluster: cluster, - nonce: Some(nonce), - })) - .map(|session| { - if self.make_faulty_generation_sessions.load(Ordering::Relaxed) { - session.simulate_faulty_behaviour(); - } - session - }) - .map(Arc::new) - } + let nonce = self.core.check_session_nonce(&master, nonce)?; + Ok(GenerationSessionImpl::new(GenerationSessionParams { + id: id.clone(), + self_node_id: self.core.self_node_id.clone(), + key_storage: Some(self.core.key_storage.clone()), + cluster: cluster, + nonce: Some(nonce), + })) + .map(|session| { + if self.make_faulty_generation_sessions.load(Ordering::Relaxed) { + session.simulate_faulty_behaviour(); + } + session + }) + .map(Arc::new) + } } /// Encryption session creator. pub struct EncryptionSessionCreator { - /// Creator core. - pub core: Arc, + /// Creator core. + pub core: Arc, } impl ClusterSessionCreator for EncryptionSessionCreator { - fn make_error_message(sid: SessionId, nonce: u64, err: Error) -> Message { - message::Message::Encryption(message::EncryptionMessage::EncryptionSessionError(message::EncryptionSessionError { - session: sid.into(), - session_nonce: nonce, - error: err.into(), - })) - } + fn make_error_message(sid: SessionId, nonce: u64, err: Error) -> Message { + message::Message::Encryption(message::EncryptionMessage::EncryptionSessionError( + message::EncryptionSessionError { + session: sid.into(), + session_nonce: nonce, + error: err.into(), + }, + )) + } - fn create(&self, cluster: Arc, master: NodeId, nonce: Option, id: SessionId, _creation_data: Option<()>) -> Result, Error> { - let encrypted_data = self.core.read_key_share(&id)?; - let nonce = self.core.check_session_nonce(&master, nonce)?; - Ok(Arc::new(EncryptionSessionImpl::new(EncryptionSessionParams { - id: id, - self_node_id: self.core.self_node_id.clone(), - encrypted_data: encrypted_data, - key_storage: self.core.key_storage.clone(), - cluster: cluster, - nonce: nonce, - })?)) - } + fn create( + &self, + cluster: Arc, + master: NodeId, + nonce: Option, + id: SessionId, + _creation_data: Option<()>, + ) -> Result, Error> { + let encrypted_data = self.core.read_key_share(&id)?; + let nonce = self.core.check_session_nonce(&master, nonce)?; + Ok(Arc::new(EncryptionSessionImpl::new( + EncryptionSessionParams { + id: id, + self_node_id: self.core.self_node_id.clone(), + encrypted_data: encrypted_data, + key_storage: self.core.key_storage.clone(), + cluster: cluster, + nonce: nonce, + }, + )?)) + } } /// Decryption session creator. pub struct DecryptionSessionCreator { - /// Creator core. - pub core: Arc, + /// Creator core. + pub core: Arc, } impl ClusterSessionCreator for DecryptionSessionCreator { - fn creation_data_from_message(message: &Message) -> Result, Error> { - match *message { - Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(ref message)) => match &message.message { - &ConsensusMessage::InitializeConsensusSession(ref message) => Ok(Some(message.requester.clone().into())), - _ => Err(Error::InvalidMessage), - }, - Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(ref message)) => Ok(Some(message.requester.clone().into())), - _ => Err(Error::InvalidMessage), - } - } + fn creation_data_from_message(message: &Message) -> Result, Error> { + match *message { + Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(ref message)) => { + match &message.message { + &ConsensusMessage::InitializeConsensusSession(ref message) => { + Ok(Some(message.requester.clone().into())) + } + _ => Err(Error::InvalidMessage), + } + } + Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(ref message)) => { + Ok(Some(message.requester.clone().into())) + } + _ => Err(Error::InvalidMessage), + } + } - fn make_error_message(sid: SessionIdWithSubSession, nonce: u64, err: Error) -> Message { - message::Message::Decryption(message::DecryptionMessage::DecryptionSessionError(message::DecryptionSessionError { - session: sid.id.into(), - sub_session: sid.access_key.into(), - session_nonce: nonce, - error: err.into(), - })) - } + fn make_error_message(sid: SessionIdWithSubSession, nonce: u64, err: Error) -> Message { + message::Message::Decryption(message::DecryptionMessage::DecryptionSessionError( + message::DecryptionSessionError { + session: sid.id.into(), + sub_session: sid.access_key.into(), + session_nonce: nonce, + error: err.into(), + }, + )) + } - fn create(&self, cluster: Arc, master: NodeId, nonce: Option, id: SessionIdWithSubSession, requester: Option) -> Result, Error> { - let encrypted_data = self.core.read_key_share(&id.id)?; - let nonce = self.core.check_session_nonce(&master, nonce)?; - Ok(Arc::new(DecryptionSessionImpl::new(DecryptionSessionParams { - meta: SessionMeta { - id: id.id, - self_node_id: self.core.self_node_id.clone(), - master_node_id: master, - threshold: encrypted_data.as_ref().map(|ks| ks.threshold).unwrap_or_default(), - configured_nodes_count: cluster.configured_nodes_count(), - connected_nodes_count: cluster.connected_nodes_count(), - }, - access_key: id.access_key, - key_share: encrypted_data, - acl_storage: self.core.acl_storage.clone(), - cluster: cluster, - nonce: nonce, - }, requester)?)) - } + fn create( + &self, + cluster: Arc, + master: NodeId, + nonce: Option, + id: SessionIdWithSubSession, + requester: Option, + ) -> Result, Error> { + let encrypted_data = self.core.read_key_share(&id.id)?; + let nonce = self.core.check_session_nonce(&master, nonce)?; + Ok(Arc::new(DecryptionSessionImpl::new( + DecryptionSessionParams { + meta: SessionMeta { + id: id.id, + self_node_id: self.core.self_node_id.clone(), + master_node_id: master, + threshold: encrypted_data + .as_ref() + .map(|ks| ks.threshold) + .unwrap_or_default(), + configured_nodes_count: cluster.configured_nodes_count(), + connected_nodes_count: cluster.connected_nodes_count(), + }, + access_key: id.access_key, + key_share: encrypted_data, + acl_storage: self.core.acl_storage.clone(), + cluster: cluster, + nonce: nonce, + }, + requester, + )?)) + } } /// Schnorr signing session creator. pub struct SchnorrSigningSessionCreator { - /// Creator core. - pub core: Arc, + /// Creator core. + pub core: Arc, } impl ClusterSessionCreator for SchnorrSigningSessionCreator { - fn creation_data_from_message(message: &Message) -> Result, Error> { - match *message { - Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningConsensusMessage(ref message)) => match &message.message { - &ConsensusMessage::InitializeConsensusSession(ref message) => Ok(Some(message.requester.clone().into())), - _ => Err(Error::InvalidMessage), - }, - Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionDelegation(ref message)) => Ok(Some(message.requester.clone().into())), - _ => Err(Error::InvalidMessage), - } - } + fn creation_data_from_message(message: &Message) -> Result, Error> { + match *message { + Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningConsensusMessage( + ref message, + )) => match &message.message { + &ConsensusMessage::InitializeConsensusSession(ref message) => { + Ok(Some(message.requester.clone().into())) + } + _ => Err(Error::InvalidMessage), + }, + Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionDelegation( + ref message, + )) => Ok(Some(message.requester.clone().into())), + _ => Err(Error::InvalidMessage), + } + } - fn make_error_message(sid: SessionIdWithSubSession, nonce: u64, err: Error) -> Message { - message::Message::SchnorrSigning(message::SchnorrSigningMessage::SchnorrSigningSessionError(message::SchnorrSigningSessionError { - session: sid.id.into(), - sub_session: sid.access_key.into(), - session_nonce: nonce, - error: err.into(), - })) - } + fn make_error_message(sid: SessionIdWithSubSession, nonce: u64, err: Error) -> Message { + message::Message::SchnorrSigning( + message::SchnorrSigningMessage::SchnorrSigningSessionError( + message::SchnorrSigningSessionError { + session: sid.id.into(), + sub_session: sid.access_key.into(), + session_nonce: nonce, + error: err.into(), + }, + ), + ) + } - fn create(&self, cluster: Arc, master: NodeId, nonce: Option, id: SessionIdWithSubSession, requester: Option) -> Result, Error> { - let encrypted_data = self.core.read_key_share(&id.id)?; - let nonce = self.core.check_session_nonce(&master, nonce)?; - Ok(Arc::new(SchnorrSigningSessionImpl::new(SchnorrSigningSessionParams { - meta: SessionMeta { - id: id.id, - self_node_id: self.core.self_node_id.clone(), - master_node_id: master, - threshold: encrypted_data.as_ref().map(|ks| ks.threshold).unwrap_or_default(), - configured_nodes_count: cluster.configured_nodes_count(), - connected_nodes_count: cluster.connected_nodes_count(), - }, - access_key: id.access_key, - key_share: encrypted_data, - acl_storage: self.core.acl_storage.clone(), - cluster: cluster, - nonce: nonce, - }, requester)?)) - } + fn create( + &self, + cluster: Arc, + master: NodeId, + nonce: Option, + id: SessionIdWithSubSession, + requester: Option, + ) -> Result, Error> { + let encrypted_data = self.core.read_key_share(&id.id)?; + let nonce = self.core.check_session_nonce(&master, nonce)?; + Ok(Arc::new(SchnorrSigningSessionImpl::new( + SchnorrSigningSessionParams { + meta: SessionMeta { + id: id.id, + self_node_id: self.core.self_node_id.clone(), + master_node_id: master, + threshold: encrypted_data + .as_ref() + .map(|ks| ks.threshold) + .unwrap_or_default(), + configured_nodes_count: cluster.configured_nodes_count(), + connected_nodes_count: cluster.connected_nodes_count(), + }, + access_key: id.access_key, + key_share: encrypted_data, + acl_storage: self.core.acl_storage.clone(), + cluster: cluster, + nonce: nonce, + }, + requester, + )?)) + } } /// ECDSA signing session creator. pub struct EcdsaSigningSessionCreator { - /// Creator core. - pub core: Arc, + /// Creator core. + pub core: Arc, } impl ClusterSessionCreator for EcdsaSigningSessionCreator { - fn creation_data_from_message(message: &Message) -> Result, Error> { - match *message { - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningConsensusMessage(ref message)) => match &message.message { - &ConsensusMessage::InitializeConsensusSession(ref message) => Ok(Some(message.requester.clone().into())), - _ => Err(Error::InvalidMessage), - }, - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegation(ref message)) => Ok(Some(message.requester.clone().into())), - _ => Err(Error::InvalidMessage), - } - } + fn creation_data_from_message(message: &Message) -> Result, Error> { + match *message { + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningConsensusMessage( + ref message, + )) => match &message.message { + &ConsensusMessage::InitializeConsensusSession(ref message) => { + Ok(Some(message.requester.clone().into())) + } + _ => Err(Error::InvalidMessage), + }, + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegation( + ref message, + )) => Ok(Some(message.requester.clone().into())), + _ => Err(Error::InvalidMessage), + } + } - fn make_error_message(sid: SessionIdWithSubSession, nonce: u64, err: Error) -> Message { - message::Message::EcdsaSigning(message::EcdsaSigningMessage::EcdsaSigningSessionError(message::EcdsaSigningSessionError { - session: sid.id.into(), - sub_session: sid.access_key.into(), - session_nonce: nonce, - error: err.into(), - })) - } + fn make_error_message(sid: SessionIdWithSubSession, nonce: u64, err: Error) -> Message { + message::Message::EcdsaSigning(message::EcdsaSigningMessage::EcdsaSigningSessionError( + message::EcdsaSigningSessionError { + session: sid.id.into(), + sub_session: sid.access_key.into(), + session_nonce: nonce, + error: err.into(), + }, + )) + } - fn create(&self, cluster: Arc, master: NodeId, nonce: Option, id: SessionIdWithSubSession, requester: Option) -> Result, Error> { - let encrypted_data = self.core.read_key_share(&id.id)?; - let nonce = self.core.check_session_nonce(&master, nonce)?; - Ok(Arc::new(EcdsaSigningSessionImpl::new(EcdsaSigningSessionParams { - meta: SessionMeta { - id: id.id, - self_node_id: self.core.self_node_id.clone(), - master_node_id: master, - threshold: encrypted_data.as_ref().map(|ks| ks.threshold).unwrap_or_default(), - configured_nodes_count: cluster.configured_nodes_count(), - connected_nodes_count: cluster.connected_nodes_count(), - }, - access_key: id.access_key, - key_share: encrypted_data, - acl_storage: self.core.acl_storage.clone(), - cluster: cluster, - nonce: nonce, - }, requester)?)) - } + fn create( + &self, + cluster: Arc, + master: NodeId, + nonce: Option, + id: SessionIdWithSubSession, + requester: Option, + ) -> Result, Error> { + let encrypted_data = self.core.read_key_share(&id.id)?; + let nonce = self.core.check_session_nonce(&master, nonce)?; + Ok(Arc::new(EcdsaSigningSessionImpl::new( + EcdsaSigningSessionParams { + meta: SessionMeta { + id: id.id, + self_node_id: self.core.self_node_id.clone(), + master_node_id: master, + threshold: encrypted_data + .as_ref() + .map(|ks| ks.threshold) + .unwrap_or_default(), + configured_nodes_count: cluster.configured_nodes_count(), + connected_nodes_count: cluster.connected_nodes_count(), + }, + access_key: id.access_key, + key_share: encrypted_data, + acl_storage: self.core.acl_storage.clone(), + cluster: cluster, + nonce: nonce, + }, + requester, + )?)) + } } /// Key version negotiation session creator. pub struct KeyVersionNegotiationSessionCreator { - /// Creator core. - pub core: Arc, + /// Creator core. + pub core: Arc, } -impl ClusterSessionCreator, ()> for KeyVersionNegotiationSessionCreator { - fn make_error_message(sid: SessionIdWithSubSession, nonce: u64, err: Error) -> Message { - message::Message::KeyVersionNegotiation(message::KeyVersionNegotiationMessage::KeyVersionsError(message::KeyVersionsError { - session: sid.id.into(), - sub_session: sid.access_key.into(), - session_nonce: nonce, - error: err.into(), - // we don't care about continue action here. it only matters when we're completing the session with confirmed - // fatal error from result computer - continue_with: None, - })) - } +impl ClusterSessionCreator, ()> + for KeyVersionNegotiationSessionCreator +{ + fn make_error_message(sid: SessionIdWithSubSession, nonce: u64, err: Error) -> Message { + message::Message::KeyVersionNegotiation( + message::KeyVersionNegotiationMessage::KeyVersionsError(message::KeyVersionsError { + session: sid.id.into(), + sub_session: sid.access_key.into(), + session_nonce: nonce, + error: err.into(), + // we don't care about continue action here. it only matters when we're completing the session with confirmed + // fatal error from result computer + continue_with: None, + }), + ) + } - fn create(&self, cluster: Arc, master: NodeId, nonce: Option, id: SessionIdWithSubSession, _creation_data: Option<()>) -> Result>, Error> { - let configured_nodes_count = cluster.configured_nodes_count(); - let connected_nodes_count = cluster.connected_nodes_count(); - let encrypted_data = self.core.read_key_share(&id.id)?; - let nonce = self.core.check_session_nonce(&master, nonce)?; - let computer = Arc::new(FastestResultKeyVersionsResultComputer::new(self.core.self_node_id.clone(), encrypted_data.as_ref(), - configured_nodes_count, configured_nodes_count)); - Ok(Arc::new(KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams { - meta: ShareChangeSessionMeta { - id: id.id.clone(), - self_node_id: self.core.self_node_id.clone(), - master_node_id: master, - configured_nodes_count: configured_nodes_count, - connected_nodes_count: connected_nodes_count, - }, - sub_session: id.access_key.clone(), - key_share: encrypted_data, - result_computer: computer, - transport: VersionNegotiationTransport { - cluster: cluster, - key_id: id.id, - sub_session: id.access_key.clone(), - nonce: nonce, - }, - nonce: nonce, - }))) - } + fn create( + &self, + cluster: Arc, + master: NodeId, + nonce: Option, + id: SessionIdWithSubSession, + _creation_data: Option<()>, + ) -> Result>, Error> { + let configured_nodes_count = cluster.configured_nodes_count(); + let connected_nodes_count = cluster.connected_nodes_count(); + let encrypted_data = self.core.read_key_share(&id.id)?; + let nonce = self.core.check_session_nonce(&master, nonce)?; + let computer = Arc::new(FastestResultKeyVersionsResultComputer::new( + self.core.self_node_id.clone(), + encrypted_data.as_ref(), + configured_nodes_count, + configured_nodes_count, + )); + Ok(Arc::new(KeyVersionNegotiationSessionImpl::new( + KeyVersionNegotiationSessionParams { + meta: ShareChangeSessionMeta { + id: id.id.clone(), + self_node_id: self.core.self_node_id.clone(), + master_node_id: master, + configured_nodes_count: configured_nodes_count, + connected_nodes_count: connected_nodes_count, + }, + sub_session: id.access_key.clone(), + key_share: encrypted_data, + result_computer: computer, + transport: VersionNegotiationTransport { + cluster: cluster, + key_id: id.id, + sub_session: id.access_key.clone(), + nonce: nonce, + }, + nonce: nonce, + }, + ))) + } } /// Administrative session creator. pub struct AdminSessionCreator { - /// Creator core. - pub core: Arc, - /// Administrator public. - pub admin_public: Option, - /// Servers set change sessions creator connector. - pub servers_set_change_session_creator_connector: Arc, + /// Creator core. + pub core: Arc, + /// Administrator public. + pub admin_public: Option, + /// Servers set change sessions creator connector. + pub servers_set_change_session_creator_connector: Arc, } impl ClusterSessionCreator for AdminSessionCreator { - fn creation_data_from_message(message: &Message) -> Result, Error> { - match *message { - Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref message)) => match &message.message { - &ConsensusMessageWithServersSet::InitializeConsensusSession(ref message) => Ok(Some(AdminSessionCreationData::ServersSetChange( - message.migration_id.clone().map(Into::into), - message.new_nodes_set.clone().into_iter().map(Into::into).collect() - ))), - _ => Err(Error::InvalidMessage), - }, - Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(ref message)) => match &message.message { - &ConsensusMessageOfShareAdd::InitializeConsensusSession(ref message) => Ok(Some(AdminSessionCreationData::ShareAdd(message.version.clone().into()))), - _ => Err(Error::InvalidMessage), - }, - _ => Err(Error::InvalidMessage), - } - } + fn creation_data_from_message( + message: &Message, + ) -> Result, Error> { + match *message { + Message::ServersSetChange( + ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref message), + ) => match &message.message { + &ConsensusMessageWithServersSet::InitializeConsensusSession(ref message) => { + Ok(Some(AdminSessionCreationData::ServersSetChange( + message.migration_id.clone().map(Into::into), + message + .new_nodes_set + .clone() + .into_iter() + .map(Into::into) + .collect(), + ))) + } + _ => Err(Error::InvalidMessage), + }, + Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(ref message)) => { + match &message.message { + &ConsensusMessageOfShareAdd::InitializeConsensusSession(ref message) => { + Ok(Some(AdminSessionCreationData::ShareAdd( + message.version.clone().into(), + ))) + } + _ => Err(Error::InvalidMessage), + } + } + _ => Err(Error::InvalidMessage), + } + } - fn make_error_message(sid: SessionId, nonce: u64, err: Error) -> Message { - message::Message::ServersSetChange(message::ServersSetChangeMessage::ServersSetChangeError(message::ServersSetChangeError { - session: sid.into(), - session_nonce: nonce, - error: err.into(), - })) - } + fn make_error_message(sid: SessionId, nonce: u64, err: Error) -> Message { + message::Message::ServersSetChange(message::ServersSetChangeMessage::ServersSetChangeError( + message::ServersSetChangeError { + session: sid.into(), + session_nonce: nonce, + error: err.into(), + }, + )) + } - fn create(&self, cluster: Arc, master: NodeId, nonce: Option, id: SessionId, creation_data: Option) -> Result, Error> { - let nonce = self.core.check_session_nonce(&master, nonce)?; - Ok(Arc::new(match creation_data { - Some(AdminSessionCreationData::ShareAdd(version)) => { - AdminSession::ShareAdd(ShareAddSessionImpl::new(ShareAddSessionParams { - meta: ShareChangeSessionMeta { - id: id.clone(), - self_node_id: self.core.self_node_id.clone(), - master_node_id: master, - configured_nodes_count: cluster.configured_nodes_count(), - connected_nodes_count: cluster.connected_nodes_count(), - }, - transport: ShareAddTransport::new(id.clone(), Some(version), nonce, cluster), - key_storage: self.core.key_storage.clone(), - nonce: nonce, - admin_public: Some(self.admin_public.clone().ok_or(Error::AccessDenied)?), - })?) - }, - Some(AdminSessionCreationData::ServersSetChange(migration_id, new_nodes_set)) => { - let admin_public = self.servers_set_change_session_creator_connector.admin_public(migration_id.as_ref(), new_nodes_set) - .map_err(|_| Error::AccessDenied)?; + fn create( + &self, + cluster: Arc, + master: NodeId, + nonce: Option, + id: SessionId, + creation_data: Option, + ) -> Result, Error> { + let nonce = self.core.check_session_nonce(&master, nonce)?; + Ok(Arc::new(match creation_data { + Some(AdminSessionCreationData::ShareAdd(version)) => { + AdminSession::ShareAdd(ShareAddSessionImpl::new(ShareAddSessionParams { + meta: ShareChangeSessionMeta { + id: id.clone(), + self_node_id: self.core.self_node_id.clone(), + master_node_id: master, + configured_nodes_count: cluster.configured_nodes_count(), + connected_nodes_count: cluster.connected_nodes_count(), + }, + transport: ShareAddTransport::new(id.clone(), Some(version), nonce, cluster), + key_storage: self.core.key_storage.clone(), + nonce: nonce, + admin_public: Some(self.admin_public.clone().ok_or(Error::AccessDenied)?), + })?) + } + Some(AdminSessionCreationData::ServersSetChange(migration_id, new_nodes_set)) => { + let admin_public = self + .servers_set_change_session_creator_connector + .admin_public(migration_id.as_ref(), new_nodes_set) + .map_err(|_| Error::AccessDenied)?; - AdminSession::ServersSetChange(ServersSetChangeSessionImpl::new(ServersSetChangeSessionParams { - meta: ShareChangeSessionMeta { - id: id.clone(), - self_node_id: self.core.self_node_id.clone(), - master_node_id: master, - configured_nodes_count: cluster.configured_nodes_count(), - connected_nodes_count: cluster.connected_nodes_count(), - }, - cluster: cluster.clone(), - key_storage: self.core.key_storage.clone(), - nonce: nonce, - all_nodes_set: cluster.nodes(), - admin_public: admin_public, - migration_id: migration_id, - })?) - }, - None => unreachable!("expected to call with non-empty creation data; qed"), - })) - } + AdminSession::ServersSetChange(ServersSetChangeSessionImpl::new( + ServersSetChangeSessionParams { + meta: ShareChangeSessionMeta { + id: id.clone(), + self_node_id: self.core.self_node_id.clone(), + master_node_id: master, + configured_nodes_count: cluster.configured_nodes_count(), + connected_nodes_count: cluster.connected_nodes_count(), + }, + cluster: cluster.clone(), + key_storage: self.core.key_storage.clone(), + nonce: nonce, + all_nodes_set: cluster.nodes(), + admin_public: admin_public, + migration_id: migration_id, + }, + )?) + } + None => unreachable!("expected to call with non-empty creation data; qed"), + })) + } } impl IntoSessionId for Message { - fn into_session_id(&self) -> Result { - match *self { - Message::Generation(ref message) => Ok(message.session_id().clone()), - Message::Encryption(ref message) => Ok(message.session_id().clone()), - Message::Decryption(_) => Err(Error::InvalidMessage), - Message::SchnorrSigning(_) => Err(Error::InvalidMessage), - Message::EcdsaSigning(_) => Err(Error::InvalidMessage), - Message::ServersSetChange(ref message) => Ok(message.session_id().clone()), - Message::ShareAdd(ref message) => Ok(message.session_id().clone()), - Message::KeyVersionNegotiation(_) => Err(Error::InvalidMessage), - Message::Cluster(_) => Err(Error::InvalidMessage), - } - } + fn into_session_id(&self) -> Result { + match *self { + Message::Generation(ref message) => Ok(message.session_id().clone()), + Message::Encryption(ref message) => Ok(message.session_id().clone()), + Message::Decryption(_) => Err(Error::InvalidMessage), + Message::SchnorrSigning(_) => Err(Error::InvalidMessage), + Message::EcdsaSigning(_) => Err(Error::InvalidMessage), + Message::ServersSetChange(ref message) => Ok(message.session_id().clone()), + Message::ShareAdd(ref message) => Ok(message.session_id().clone()), + Message::KeyVersionNegotiation(_) => Err(Error::InvalidMessage), + Message::Cluster(_) => Err(Error::InvalidMessage), + } + } } impl IntoSessionId for Message { - fn into_session_id(&self) -> Result { - match *self { - Message::Generation(_) => Err(Error::InvalidMessage), - Message::Encryption(_) => Err(Error::InvalidMessage), - Message::Decryption(ref message) => Ok(SessionIdWithSubSession::new(message.session_id().clone(), message.sub_session_id().clone())), - Message::SchnorrSigning(ref message) => Ok(SessionIdWithSubSession::new(message.session_id().clone(), message.sub_session_id().clone())), - Message::EcdsaSigning(ref message) => Ok(SessionIdWithSubSession::new(message.session_id().clone(), message.sub_session_id().clone())), - Message::ServersSetChange(_) => Err(Error::InvalidMessage), - Message::ShareAdd(_) => Err(Error::InvalidMessage), - Message::KeyVersionNegotiation(ref message) => Ok(SessionIdWithSubSession::new(message.session_id().clone(), message.sub_session_id().clone())), - Message::Cluster(_) => Err(Error::InvalidMessage), - } - } + fn into_session_id(&self) -> Result { + match *self { + Message::Generation(_) => Err(Error::InvalidMessage), + Message::Encryption(_) => Err(Error::InvalidMessage), + Message::Decryption(ref message) => Ok(SessionIdWithSubSession::new( + message.session_id().clone(), + message.sub_session_id().clone(), + )), + Message::SchnorrSigning(ref message) => Ok(SessionIdWithSubSession::new( + message.session_id().clone(), + message.sub_session_id().clone(), + )), + Message::EcdsaSigning(ref message) => Ok(SessionIdWithSubSession::new( + message.session_id().clone(), + message.sub_session_id().clone(), + )), + Message::ServersSetChange(_) => Err(Error::InvalidMessage), + Message::ShareAdd(_) => Err(Error::InvalidMessage), + Message::KeyVersionNegotiation(ref message) => Ok(SessionIdWithSubSession::new( + message.session_id().clone(), + message.sub_session_id().clone(), + )), + Message::Cluster(_) => Err(Error::InvalidMessage), + } + } } diff --git a/secret-store/src/key_server_cluster/connection_trigger.rs b/secret-store/src/key_server_cluster/connection_trigger.rs index 7b3649861..52a0ff807 100644 --- a/secret-store/src/key_server_cluster/connection_trigger.rs +++ b/secret-store/src/key_server_cluster/connection_trigger.rs @@ -14,379 +14,502 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::{BTreeSet, BTreeMap}; -use std::collections::btree_map::Entry; -use std::net::SocketAddr; -use std::sync::Arc; use ethereum_types::H256; use ethkey::Public; -use key_server_cluster::{KeyServerSet, KeyServerSetSnapshot}; -use key_server_cluster::cluster::{ClusterConfiguration, ServersSetChangeParams}; -use key_server_cluster::cluster_sessions::AdminSession; -use key_server_cluster::cluster_connections::{Connection}; -use key_server_cluster::cluster_connections_net::{NetConnectionsContainer}; +use key_server_cluster::{ + cluster::{ClusterConfiguration, ServersSetChangeParams}, + cluster_connections::Connection, + cluster_connections_net::NetConnectionsContainer, + cluster_sessions::AdminSession, + KeyServerSet, KeyServerSetSnapshot, +}; +use std::{ + collections::{btree_map::Entry, BTreeMap, BTreeSet}, + net::SocketAddr, + sync::Arc, +}; use types::{Error, NodeId}; use NodeKeyPair; #[derive(Debug, Clone, Copy, PartialEq)] /// Describes which maintain() call is required. pub enum Maintain { - /// We need to maintain() both connections && session. - SessionAndConnections, - /// Only call maintain_session. - Session, - /// Only call maintain_connections. - Connections, + /// We need to maintain() both connections && session. + SessionAndConnections, + /// Only call maintain_session. + Session, + /// Only call maintain_connections. + Connections, } /// Connection trigger, which executes necessary actions when set of key servers changes. pub trait ConnectionTrigger: Send + Sync { - /// On maintain interval. - fn on_maintain(&mut self) -> Option; - /// When connection is established. - fn on_connection_established(&mut self, node: &NodeId) -> Option; - /// When connection is closed. - fn on_connection_closed(&mut self, node: &NodeId) -> Option; - /// Maintain active sessions. Returns Some if servers set session creation required. - fn maintain_session(&mut self) -> Option; - /// Maintain active connections. - fn maintain_connections(&mut self, connections: &mut NetConnectionsContainer); - /// Return connector for the servers set change session creator. - fn servers_set_change_creator_connector(&self) -> Arc; + /// On maintain interval. + fn on_maintain(&mut self) -> Option; + /// When connection is established. + fn on_connection_established(&mut self, node: &NodeId) -> Option; + /// When connection is closed. + fn on_connection_closed(&mut self, node: &NodeId) -> Option; + /// Maintain active sessions. Returns Some if servers set session creation required. + fn maintain_session(&mut self) -> Option; + /// Maintain active connections. + fn maintain_connections(&mut self, connections: &mut NetConnectionsContainer); + /// Return connector for the servers set change session creator. + fn servers_set_change_creator_connector(&self) -> Arc; } /// Servers set change session creator connector. pub trait ServersSetChangeSessionCreatorConnector: Send + Sync { - /// Get actual administrator public key. For manual-migration configuration it is the pre-configured - /// administrator key. For auto-migration configurations it is the key of actual MigrationSession master node. - fn admin_public(&self, migration_id: Option<&H256>, new_server_set: BTreeSet) -> Result; - /// Set active servers set change session. - fn set_key_servers_set_change_session(&self, session: Arc); + /// Get actual administrator public key. For manual-migration configuration it is the pre-configured + /// administrator key. For auto-migration configurations it is the key of actual MigrationSession master node. + fn admin_public( + &self, + migration_id: Option<&H256>, + new_server_set: BTreeSet, + ) -> Result; + /// Set active servers set change session. + fn set_key_servers_set_change_session(&self, session: Arc); } /// Simple connection trigger, which only keeps connections to current_set. pub struct SimpleConnectionTrigger { - /// Key server set cluster. - key_server_set: Arc, - /// Trigger connections. - connections: TriggerConnections, - /// Servers set change session creator connector. - connector: Arc, + /// Key server set cluster. + key_server_set: Arc, + /// Trigger connections. + connections: TriggerConnections, + /// Servers set change session creator connector. + connector: Arc, } /// Simple Servers set change session creator connector, which will just return /// pre-configured administartor public when asked. pub struct SimpleServersSetChangeSessionCreatorConnector { - /// Secret store administrator public key. - pub admin_public: Option, + /// Secret store administrator public key. + pub admin_public: Option, } #[derive(Debug, Clone, Copy, PartialEq)] /// Action with trigger connections. pub enum ConnectionsAction { - /// Connect to nodes from old set only. - ConnectToCurrentSet, - /// Connect to nodes from migration set. - ConnectToMigrationSet, + /// Connect to nodes from old set only. + ConnectToCurrentSet, + /// Connect to nodes from migration set. + ConnectToMigrationSet, } /// Trigger connections. pub struct TriggerConnections { - /// This node key pair. - pub self_key_pair: Arc, + /// This node key pair. + pub self_key_pair: Arc, } impl SimpleConnectionTrigger { - /// Create new simple from cluster configuration. - pub fn with_config(config: &ClusterConfiguration) -> Self { - Self::new(config.key_server_set.clone(), config.self_key_pair.clone(), config.admin_public) - } + /// Create new simple from cluster configuration. + pub fn with_config(config: &ClusterConfiguration) -> Self { + Self::new( + config.key_server_set.clone(), + config.self_key_pair.clone(), + config.admin_public, + ) + } - /// Create new simple connection trigger. - pub fn new(key_server_set: Arc, self_key_pair: Arc, admin_public: Option) -> Self { - SimpleConnectionTrigger { - key_server_set: key_server_set, - connections: TriggerConnections { - self_key_pair: self_key_pair, - }, - connector: Arc::new(SimpleServersSetChangeSessionCreatorConnector { - admin_public: admin_public, - }), - } - } + /// Create new simple connection trigger. + pub fn new( + key_server_set: Arc, + self_key_pair: Arc, + admin_public: Option, + ) -> Self { + SimpleConnectionTrigger { + key_server_set: key_server_set, + connections: TriggerConnections { + self_key_pair: self_key_pair, + }, + connector: Arc::new(SimpleServersSetChangeSessionCreatorConnector { + admin_public: admin_public, + }), + } + } } impl ConnectionTrigger for SimpleConnectionTrigger { - fn on_maintain(&mut self) -> Option { - Some(Maintain::Connections) - } + fn on_maintain(&mut self) -> Option { + Some(Maintain::Connections) + } - fn on_connection_established(&mut self, _node: &NodeId) -> Option { - None - } + fn on_connection_established(&mut self, _node: &NodeId) -> Option { + None + } - fn on_connection_closed(&mut self, _node: &NodeId) -> Option { - // we do not want to reconnect after every connection close - // because it could be a part of something bigger - None - } + fn on_connection_closed(&mut self, _node: &NodeId) -> Option { + // we do not want to reconnect after every connection close + // because it could be a part of something bigger + None + } - fn maintain_session(&mut self) -> Option { - None - } + fn maintain_session(&mut self) -> Option { + None + } - fn maintain_connections(&mut self, connections: &mut NetConnectionsContainer) { - self.connections.maintain(ConnectionsAction::ConnectToCurrentSet, connections, &self.key_server_set.snapshot()) - } + fn maintain_connections(&mut self, connections: &mut NetConnectionsContainer) { + self.connections.maintain( + ConnectionsAction::ConnectToCurrentSet, + connections, + &self.key_server_set.snapshot(), + ) + } - fn servers_set_change_creator_connector(&self) -> Arc { - self.connector.clone() - } + fn servers_set_change_creator_connector(&self) -> Arc { + self.connector.clone() + } } impl ServersSetChangeSessionCreatorConnector for SimpleServersSetChangeSessionCreatorConnector { - fn admin_public(&self, _migration_id: Option<&H256>, _new_server_set: BTreeSet) -> Result { - self.admin_public.clone().ok_or(Error::AccessDenied) - } + fn admin_public( + &self, + _migration_id: Option<&H256>, + _new_server_set: BTreeSet, + ) -> Result { + self.admin_public.clone().ok_or(Error::AccessDenied) + } - fn set_key_servers_set_change_session(&self, _session: Arc) { - } + fn set_key_servers_set_change_session(&self, _session: Arc) {} } impl TriggerConnections { - pub fn maintain(&self, action: ConnectionsAction, data: &mut NetConnectionsContainer, server_set: &KeyServerSetSnapshot) { - match action { - ConnectionsAction::ConnectToCurrentSet => { - adjust_connections(self.self_key_pair.public(), data, &server_set.current_set); - }, - ConnectionsAction::ConnectToMigrationSet => { - let migration_set = server_set.migration.as_ref().map(|s| s.set.clone()).unwrap_or_default(); - adjust_connections(self.self_key_pair.public(), data, &migration_set); - }, - } - } + pub fn maintain( + &self, + action: ConnectionsAction, + data: &mut NetConnectionsContainer, + server_set: &KeyServerSetSnapshot, + ) { + match action { + ConnectionsAction::ConnectToCurrentSet => { + adjust_connections(self.self_key_pair.public(), data, &server_set.current_set); + } + ConnectionsAction::ConnectToMigrationSet => { + let migration_set = server_set + .migration + .as_ref() + .map(|s| s.set.clone()) + .unwrap_or_default(); + adjust_connections(self.self_key_pair.public(), data, &migration_set); + } + } + } } fn adjust_connections( - self_node_id: &NodeId, - data: &mut NetConnectionsContainer, - required_set: &BTreeMap + self_node_id: &NodeId, + data: &mut NetConnectionsContainer, + required_set: &BTreeMap, ) { - if !required_set.contains_key(self_node_id) { - if !data.is_isolated { - trace!(target: "secretstore_net", "{}: isolated from cluser", self_node_id); - } + if !required_set.contains_key(self_node_id) { + if !data.is_isolated { + trace!(target: "secretstore_net", "{}: isolated from cluser", self_node_id); + } - data.is_isolated = true; - data.connections.clear(); - data.nodes.clear(); - return; - } + data.is_isolated = true; + data.connections.clear(); + data.nodes.clear(); + return; + } - data.is_isolated = false; - for node_to_disconnect in select_nodes_to_disconnect(&data.nodes, required_set) { - if let Entry::Occupied(entry) = data.connections.entry(node_to_disconnect.clone()) { - trace!(target: "secretstore_net", "{}: adjusting connections - removing connection to {} at {}", + data.is_isolated = false; + for node_to_disconnect in select_nodes_to_disconnect(&data.nodes, required_set) { + if let Entry::Occupied(entry) = data.connections.entry(node_to_disconnect.clone()) { + trace!(target: "secretstore_net", "{}: adjusting connections - removing connection to {} at {}", self_node_id, entry.get().node_id(), entry.get().node_address()); - entry.remove(); - } + entry.remove(); + } - data.nodes.remove(&node_to_disconnect); - } + data.nodes.remove(&node_to_disconnect); + } - for (node_to_connect, node_addr) in required_set { - if node_to_connect != self_node_id { - data.nodes.insert(node_to_connect.clone(), node_addr.clone()); - } - } + for (node_to_connect, node_addr) in required_set { + if node_to_connect != self_node_id { + data.nodes + .insert(node_to_connect.clone(), node_addr.clone()); + } + } } -fn select_nodes_to_disconnect(current_set: &BTreeMap, new_set: &BTreeMap) -> Vec { - current_set.iter() - .filter(|&(node_id, node_addr)| match new_set.get(node_id) { - Some(new_node_addr) => node_addr != new_node_addr, - None => true, - }) - .map(|(node_id, _)| node_id.clone()) - .collect() +fn select_nodes_to_disconnect( + current_set: &BTreeMap, + new_set: &BTreeMap, +) -> Vec { + current_set + .iter() + .filter(|&(node_id, node_addr)| match new_set.get(node_id) { + Some(new_node_addr) => node_addr != new_node_addr, + None => true, + }) + .map(|(node_id, _)| node_id.clone()) + .collect() } #[cfg(test)] mod tests { - use std::collections::BTreeSet; - use std::sync::Arc; - use ethkey::{Random, Generator}; - use key_server_cluster::{MapKeyServerSet, PlainNodeKeyPair, KeyServerSetSnapshot, KeyServerSetMigration}; - use key_server_cluster::cluster_connections_net::NetConnectionsContainer; - use super::{Maintain, TriggerConnections, ConnectionsAction, ConnectionTrigger, SimpleConnectionTrigger, - select_nodes_to_disconnect, adjust_connections}; + use super::{ + adjust_connections, select_nodes_to_disconnect, ConnectionTrigger, ConnectionsAction, + Maintain, SimpleConnectionTrigger, TriggerConnections, + }; + use ethkey::{Generator, Random}; + use key_server_cluster::{ + cluster_connections_net::NetConnectionsContainer, KeyServerSetMigration, + KeyServerSetSnapshot, MapKeyServerSet, PlainNodeKeyPair, + }; + use std::{collections::BTreeSet, sync::Arc}; - fn default_connection_data() -> NetConnectionsContainer { - NetConnectionsContainer { - is_isolated: false, - nodes: Default::default(), - connections: Default::default(), - } - } + fn default_connection_data() -> NetConnectionsContainer { + NetConnectionsContainer { + is_isolated: false, + nodes: Default::default(), + connections: Default::default(), + } + } - fn create_connections() -> TriggerConnections { - TriggerConnections { - self_key_pair: Arc::new(PlainNodeKeyPair::new(Random.generate().unwrap())), - } - } + fn create_connections() -> TriggerConnections { + TriggerConnections { + self_key_pair: Arc::new(PlainNodeKeyPair::new(Random.generate().unwrap())), + } + } - #[test] - fn do_not_disconnect_if_set_is_not_changed() { - let node_id = Random.generate().unwrap().public().clone(); - assert_eq!(select_nodes_to_disconnect( - &vec![(node_id, "127.0.0.1:8081".parse().unwrap())].into_iter().collect(), - &vec![(node_id, "127.0.0.1:8081".parse().unwrap())].into_iter().collect()), - vec![]); - } + #[test] + fn do_not_disconnect_if_set_is_not_changed() { + let node_id = Random.generate().unwrap().public().clone(); + assert_eq!( + select_nodes_to_disconnect( + &vec![(node_id, "127.0.0.1:8081".parse().unwrap())] + .into_iter() + .collect(), + &vec![(node_id, "127.0.0.1:8081".parse().unwrap())] + .into_iter() + .collect() + ), + vec![] + ); + } - #[test] - fn disconnect_if_address_has_changed() { - let node_id = Random.generate().unwrap().public().clone(); - assert_eq!(select_nodes_to_disconnect( - &vec![(node_id.clone(), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(), - &vec![(node_id.clone(), "127.0.0.1:8082".parse().unwrap())].into_iter().collect()), - vec![node_id.clone()]); - } + #[test] + fn disconnect_if_address_has_changed() { + let node_id = Random.generate().unwrap().public().clone(); + assert_eq!( + select_nodes_to_disconnect( + &vec![(node_id.clone(), "127.0.0.1:8081".parse().unwrap())] + .into_iter() + .collect(), + &vec![(node_id.clone(), "127.0.0.1:8082".parse().unwrap())] + .into_iter() + .collect() + ), + vec![node_id.clone()] + ); + } - #[test] - fn disconnect_if_node_has_removed() { - let node_id = Random.generate().unwrap().public().clone(); - assert_eq!(select_nodes_to_disconnect( - &vec![(node_id.clone(), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(), - &vec![].into_iter().collect()), - vec![node_id.clone()]); - } + #[test] + fn disconnect_if_node_has_removed() { + let node_id = Random.generate().unwrap().public().clone(); + assert_eq!( + select_nodes_to_disconnect( + &vec![(node_id.clone(), "127.0.0.1:8081".parse().unwrap())] + .into_iter() + .collect(), + &vec![].into_iter().collect() + ), + vec![node_id.clone()] + ); + } - #[test] - fn does_not_disconnect_if_node_has_added() { - let node_id = Random.generate().unwrap().public().clone(); - assert_eq!(select_nodes_to_disconnect( - &vec![(node_id.clone(), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(), - &vec![(node_id.clone(), "127.0.0.1:8081".parse().unwrap()), - (Random.generate().unwrap().public().clone(), "127.0.0.1:8082".parse().unwrap())] - .into_iter().collect()), - vec![]); - } + #[test] + fn does_not_disconnect_if_node_has_added() { + let node_id = Random.generate().unwrap().public().clone(); + assert_eq!( + select_nodes_to_disconnect( + &vec![(node_id.clone(), "127.0.0.1:8081".parse().unwrap())] + .into_iter() + .collect(), + &vec![ + (node_id.clone(), "127.0.0.1:8081".parse().unwrap()), + ( + Random.generate().unwrap().public().clone(), + "127.0.0.1:8082".parse().unwrap() + ) + ] + .into_iter() + .collect() + ), + vec![] + ); + } - #[test] - fn adjust_connections_disconnects_from_all_nodes_if_not_a_part_of_key_server() { - let self_node_id = Random.generate().unwrap().public().clone(); - let other_node_id = Random.generate().unwrap().public().clone(); - let mut connection_data = default_connection_data(); - connection_data.nodes.insert(other_node_id.clone(), "127.0.0.1:8081".parse().unwrap()); + #[test] + fn adjust_connections_disconnects_from_all_nodes_if_not_a_part_of_key_server() { + let self_node_id = Random.generate().unwrap().public().clone(); + let other_node_id = Random.generate().unwrap().public().clone(); + let mut connection_data = default_connection_data(); + connection_data + .nodes + .insert(other_node_id.clone(), "127.0.0.1:8081".parse().unwrap()); - let required_set = connection_data.nodes.clone(); - adjust_connections(&self_node_id, &mut connection_data, &required_set); - assert!(connection_data.nodes.is_empty()); - assert!(connection_data.is_isolated); - } + let required_set = connection_data.nodes.clone(); + adjust_connections(&self_node_id, &mut connection_data, &required_set); + assert!(connection_data.nodes.is_empty()); + assert!(connection_data.is_isolated); + } - #[test] - fn adjust_connections_connects_to_new_nodes() { - let self_node_id = Random.generate().unwrap().public().clone(); - let other_node_id = Random.generate().unwrap().public().clone(); - let mut connection_data = default_connection_data(); + #[test] + fn adjust_connections_connects_to_new_nodes() { + let self_node_id = Random.generate().unwrap().public().clone(); + let other_node_id = Random.generate().unwrap().public().clone(); + let mut connection_data = default_connection_data(); - let required_set = vec![(self_node_id.clone(), "127.0.0.1:8081".parse().unwrap()), - (other_node_id.clone(), "127.0.0.1:8082".parse().unwrap())].into_iter().collect(); - adjust_connections(&self_node_id, &mut connection_data, &required_set); - assert!(connection_data.nodes.contains_key(&other_node_id)); - assert!(!connection_data.is_isolated); - } + let required_set = vec![ + (self_node_id.clone(), "127.0.0.1:8081".parse().unwrap()), + (other_node_id.clone(), "127.0.0.1:8082".parse().unwrap()), + ] + .into_iter() + .collect(); + adjust_connections(&self_node_id, &mut connection_data, &required_set); + assert!(connection_data.nodes.contains_key(&other_node_id)); + assert!(!connection_data.is_isolated); + } - #[test] - fn adjust_connections_reconnects_from_changed_nodes() { - let self_node_id = Random.generate().unwrap().public().clone(); - let other_node_id = Random.generate().unwrap().public().clone(); - let mut connection_data = default_connection_data(); - connection_data.nodes.insert(other_node_id.clone(), "127.0.0.1:8082".parse().unwrap()); + #[test] + fn adjust_connections_reconnects_from_changed_nodes() { + let self_node_id = Random.generate().unwrap().public().clone(); + let other_node_id = Random.generate().unwrap().public().clone(); + let mut connection_data = default_connection_data(); + connection_data + .nodes + .insert(other_node_id.clone(), "127.0.0.1:8082".parse().unwrap()); - let required_set = vec![(self_node_id.clone(), "127.0.0.1:8081".parse().unwrap()), - (other_node_id.clone(), "127.0.0.1:8083".parse().unwrap())].into_iter().collect(); - adjust_connections(&self_node_id, &mut connection_data, &required_set); - assert_eq!(connection_data.nodes.get(&other_node_id), Some(&"127.0.0.1:8083".parse().unwrap())); - assert!(!connection_data.is_isolated); - } + let required_set = vec![ + (self_node_id.clone(), "127.0.0.1:8081".parse().unwrap()), + (other_node_id.clone(), "127.0.0.1:8083".parse().unwrap()), + ] + .into_iter() + .collect(); + adjust_connections(&self_node_id, &mut connection_data, &required_set); + assert_eq!( + connection_data.nodes.get(&other_node_id), + Some(&"127.0.0.1:8083".parse().unwrap()) + ); + assert!(!connection_data.is_isolated); + } - #[test] - fn adjust_connections_disconnects_from_removed_nodes() { - let self_node_id = Random.generate().unwrap().public().clone(); - let other_node_id = Random.generate().unwrap().public().clone(); - let mut connection_data = default_connection_data(); - connection_data.nodes.insert(other_node_id.clone(), "127.0.0.1:8082".parse().unwrap()); + #[test] + fn adjust_connections_disconnects_from_removed_nodes() { + let self_node_id = Random.generate().unwrap().public().clone(); + let other_node_id = Random.generate().unwrap().public().clone(); + let mut connection_data = default_connection_data(); + connection_data + .nodes + .insert(other_node_id.clone(), "127.0.0.1:8082".parse().unwrap()); - let required_set = vec![(self_node_id.clone(), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(); - adjust_connections(&self_node_id, &mut connection_data, &required_set); - assert!(connection_data.nodes.is_empty()); - assert!(!connection_data.is_isolated); - } + let required_set = vec![(self_node_id.clone(), "127.0.0.1:8081".parse().unwrap())] + .into_iter() + .collect(); + adjust_connections(&self_node_id, &mut connection_data, &required_set); + assert!(connection_data.nodes.is_empty()); + assert!(!connection_data.is_isolated); + } - #[test] - fn adjust_connections_does_not_connects_to_self() { - let self_node_id = Random.generate().unwrap().public().clone(); - let mut connection_data = default_connection_data(); + #[test] + fn adjust_connections_does_not_connects_to_self() { + let self_node_id = Random.generate().unwrap().public().clone(); + let mut connection_data = default_connection_data(); - let required_set = vec![(self_node_id.clone(), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(); - adjust_connections(&self_node_id, &mut connection_data, &required_set); - assert!(connection_data.nodes.is_empty()); - assert!(!connection_data.is_isolated); - } + let required_set = vec![(self_node_id.clone(), "127.0.0.1:8081".parse().unwrap())] + .into_iter() + .collect(); + adjust_connections(&self_node_id, &mut connection_data, &required_set); + assert!(connection_data.nodes.is_empty()); + assert!(!connection_data.is_isolated); + } - #[test] - fn maintain_connects_to_current_set_works() { - let connections = create_connections(); - let self_node_id = connections.self_key_pair.public().clone(); - let current_node_id = Random.generate().unwrap().public().clone(); - let migration_node_id = Random.generate().unwrap().public().clone(); - let new_node_id = Random.generate().unwrap().public().clone(); + #[test] + fn maintain_connects_to_current_set_works() { + let connections = create_connections(); + let self_node_id = connections.self_key_pair.public().clone(); + let current_node_id = Random.generate().unwrap().public().clone(); + let migration_node_id = Random.generate().unwrap().public().clone(); + let new_node_id = Random.generate().unwrap().public().clone(); - let mut connections_data = default_connection_data(); - connections.maintain(ConnectionsAction::ConnectToCurrentSet, &mut connections_data, &KeyServerSetSnapshot { - current_set: vec![(self_node_id.clone(), "127.0.0.1:8081".parse().unwrap()), - (current_node_id.clone(), "127.0.0.1:8082".parse().unwrap())].into_iter().collect(), - new_set: vec![(new_node_id.clone(), "127.0.0.1:8083".parse().unwrap())].into_iter().collect(), - migration: Some(KeyServerSetMigration { - set: vec![(migration_node_id.clone(), "127.0.0.1:8084".parse().unwrap())].into_iter().collect(), - ..Default::default() - }), - }); + let mut connections_data = default_connection_data(); + connections.maintain( + ConnectionsAction::ConnectToCurrentSet, + &mut connections_data, + &KeyServerSetSnapshot { + current_set: vec![ + (self_node_id.clone(), "127.0.0.1:8081".parse().unwrap()), + (current_node_id.clone(), "127.0.0.1:8082".parse().unwrap()), + ] + .into_iter() + .collect(), + new_set: vec![(new_node_id.clone(), "127.0.0.1:8083".parse().unwrap())] + .into_iter() + .collect(), + migration: Some(KeyServerSetMigration { + set: vec![(migration_node_id.clone(), "127.0.0.1:8084".parse().unwrap())] + .into_iter() + .collect(), + ..Default::default() + }), + }, + ); - assert_eq!(vec![current_node_id], connections_data.nodes.keys().cloned().collect::>()); - } + assert_eq!( + vec![current_node_id], + connections_data.nodes.keys().cloned().collect::>() + ); + } - #[test] - fn maintain_connects_to_migration_set_works() { - let connections = create_connections(); - let self_node_id = connections.self_key_pair.public().clone(); - let current_node_id = Random.generate().unwrap().public().clone(); - let migration_node_id = Random.generate().unwrap().public().clone(); - let new_node_id = Random.generate().unwrap().public().clone(); + #[test] + fn maintain_connects_to_migration_set_works() { + let connections = create_connections(); + let self_node_id = connections.self_key_pair.public().clone(); + let current_node_id = Random.generate().unwrap().public().clone(); + let migration_node_id = Random.generate().unwrap().public().clone(); + let new_node_id = Random.generate().unwrap().public().clone(); - let mut connections_data = default_connection_data(); - connections.maintain(ConnectionsAction::ConnectToMigrationSet, &mut connections_data, &KeyServerSetSnapshot { - current_set: vec![(current_node_id.clone(), "127.0.0.1:8082".parse().unwrap())].into_iter().collect(), - new_set: vec![(new_node_id.clone(), "127.0.0.1:8083".parse().unwrap())].into_iter().collect(), - migration: Some(KeyServerSetMigration { - set: vec![(self_node_id.clone(), "127.0.0.1:8081".parse().unwrap()), - (migration_node_id.clone(), "127.0.0.1:8084".parse().unwrap())].into_iter().collect(), - ..Default::default() - }), - }); + let mut connections_data = default_connection_data(); + connections.maintain( + ConnectionsAction::ConnectToMigrationSet, + &mut connections_data, + &KeyServerSetSnapshot { + current_set: vec![(current_node_id.clone(), "127.0.0.1:8082".parse().unwrap())] + .into_iter() + .collect(), + new_set: vec![(new_node_id.clone(), "127.0.0.1:8083".parse().unwrap())] + .into_iter() + .collect(), + migration: Some(KeyServerSetMigration { + set: vec![ + (self_node_id.clone(), "127.0.0.1:8081".parse().unwrap()), + (migration_node_id.clone(), "127.0.0.1:8084".parse().unwrap()), + ] + .into_iter() + .collect(), + ..Default::default() + }), + }, + ); - assert_eq!(vec![migration_node_id].into_iter().collect::>(), - connections_data.nodes.keys().cloned().collect::>()); - } + assert_eq!( + vec![migration_node_id].into_iter().collect::>(), + connections_data + .nodes + .keys() + .cloned() + .collect::>() + ); + } - #[test] - fn simple_connections_trigger_only_maintains_connections() { - let key_server_set = Arc::new(MapKeyServerSet::new(false, Default::default())); - let self_key_pair = Arc::new(PlainNodeKeyPair::new(Random.generate().unwrap())); - let mut trigger = SimpleConnectionTrigger::new(key_server_set, self_key_pair, None); - assert_eq!(trigger.on_maintain(), Some(Maintain::Connections)); - } + #[test] + fn simple_connections_trigger_only_maintains_connections() { + let key_server_set = Arc::new(MapKeyServerSet::new(false, Default::default())); + let self_key_pair = Arc::new(PlainNodeKeyPair::new(Random.generate().unwrap())); + let mut trigger = SimpleConnectionTrigger::new(key_server_set, self_key_pair, None); + assert_eq!(trigger.on_maintain(), Some(Maintain::Connections)); + } } diff --git a/secret-store/src/key_server_cluster/connection_trigger_with_migration.rs b/secret-store/src/key_server_cluster/connection_trigger_with_migration.rs index 559bab18c..7ba03d1dd 100644 --- a/secret-store/src/key_server_cluster/connection_trigger_with_migration.rs +++ b/secret-store/src/key_server_cluster/connection_trigger_with_migration.rs @@ -14,206 +14,229 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::{BTreeSet, BTreeMap}; -use std::net::SocketAddr; -use std::sync::Arc; use ethereum_types::H256; use ethkey::Public; +use key_server_cluster::{ + cluster::{ClusterConfiguration, ServersSetChangeParams}, + cluster_connections_net::NetConnectionsContainer, + cluster_sessions::{AdminSession, ClusterSession}, + connection_trigger::{ + ConnectionTrigger, ConnectionsAction, Maintain, ServersSetChangeSessionCreatorConnector, + TriggerConnections, + }, + is_migration_required, + jobs::servers_set_change_access_job::ordered_nodes_hash, + KeyServerSet, KeyServerSetMigration, KeyServerSetSnapshot, +}; use parking_lot::Mutex; -use key_server_cluster::{KeyServerSet, KeyServerSetSnapshot, KeyServerSetMigration, is_migration_required}; -use key_server_cluster::cluster::{ClusterConfiguration, ServersSetChangeParams}; -use key_server_cluster::cluster_connections_net::NetConnectionsContainer; -use key_server_cluster::cluster_sessions::{AdminSession, ClusterSession}; -use key_server_cluster::jobs::servers_set_change_access_job::ordered_nodes_hash; -use key_server_cluster::connection_trigger::{Maintain, ConnectionsAction, ConnectionTrigger, - ServersSetChangeSessionCreatorConnector, TriggerConnections}; +use std::{ + collections::{BTreeMap, BTreeSet}, + net::SocketAddr, + sync::Arc, +}; use types::{Error, NodeId}; -use {NodeKeyPair}; +use NodeKeyPair; /// Key servers set change trigger with automated migration procedure. pub struct ConnectionTriggerWithMigration { - /// This node key pair. - self_key_pair: Arc, - /// Key server set. - key_server_set: Arc, - /// Last server set state. - snapshot: KeyServerSetSnapshot, - /// Required connections action. - connections_action: Option, - /// Required session action. - session_action: Option, - /// Currenty connected nodes. - connected: BTreeSet, - /// Trigger migration connections. - connections: TriggerConnections, - /// Trigger migration session. - session: TriggerSession, + /// This node key pair. + self_key_pair: Arc, + /// Key server set. + key_server_set: Arc, + /// Last server set state. + snapshot: KeyServerSetSnapshot, + /// Required connections action. + connections_action: Option, + /// Required session action. + session_action: Option, + /// Currenty connected nodes. + connected: BTreeSet, + /// Trigger migration connections. + connections: TriggerConnections, + /// Trigger migration session. + session: TriggerSession, } #[derive(Default)] /// Key servers set change session creator connector with migration support. pub struct ServersSetChangeSessionCreatorConnectorWithMigration { - /// This node id. - self_node_id: NodeId, - /// Active migration state to check when servers set change session is started. - migration: Mutex>, - /// Active servers set change session. - session: Mutex>>, + /// This node id. + self_node_id: NodeId, + /// Active migration state to check when servers set change session is started. + migration: Mutex>, + /// Active servers set change session. + session: Mutex>>, } #[derive(Debug, Clone, Copy, PartialEq)] /// Migration session action. enum SessionAction { - /// Start migration (confirm migration transaction). - StartMigration(H256), - /// Start migration session. - Start, - /// Confirm migration and forget migration session. - ConfirmAndDrop(H256), - /// Forget migration session. - Drop, - /// Forget migration session and retry. - DropAndRetry, + /// Start migration (confirm migration transaction). + StartMigration(H256), + /// Start migration session. + Start, + /// Confirm migration and forget migration session. + ConfirmAndDrop(H256), + /// Forget migration session. + Drop, + /// Forget migration session and retry. + DropAndRetry, } #[derive(Debug, Clone, Copy, PartialEq)] /// Migration session state. enum SessionState { - /// No active session. - Idle, - /// Session is running with given migration id. - Active(Option), - /// Session is completed successfully. - Finished(Option), - /// Session is completed with an error. - Failed(Option), + /// No active session. + Idle, + /// Session is running with given migration id. + Active(Option), + /// Session is completed successfully. + Finished(Option), + /// Session is completed with an error. + Failed(Option), } #[derive(Debug, Clone, Copy, PartialEq)] /// Migration state. pub enum MigrationState { - /// No migration required. - Idle, - /// Migration is required. - Required, - /// Migration has started. - Started, + /// No migration required. + Idle, + /// Migration is required. + Required, + /// Migration has started. + Started, } /// Migration session. struct TriggerSession { - /// Servers set change session creator connector. - connector: Arc, - /// This node key pair. - self_key_pair: Arc, - /// Key server set. - key_server_set: Arc, + /// Servers set change session creator connector. + connector: Arc, + /// This node key pair. + self_key_pair: Arc, + /// Key server set. + key_server_set: Arc, } impl ConnectionTriggerWithMigration { - /// Create new simple from cluster configuration. - pub fn with_config(config: &ClusterConfiguration) -> Self { - Self::new(config.key_server_set.clone(), config.self_key_pair.clone()) - } + /// Create new simple from cluster configuration. + pub fn with_config(config: &ClusterConfiguration) -> Self { + Self::new(config.key_server_set.clone(), config.self_key_pair.clone()) + } - /// Create new trigge with migration. - pub fn new(key_server_set: Arc, self_key_pair: Arc) -> Self { - let snapshot = key_server_set.snapshot(); - let migration = snapshot.migration.clone(); + /// Create new trigge with migration. + pub fn new(key_server_set: Arc, self_key_pair: Arc) -> Self { + let snapshot = key_server_set.snapshot(); + let migration = snapshot.migration.clone(); - ConnectionTriggerWithMigration { - self_key_pair: self_key_pair.clone(), - key_server_set: key_server_set.clone(), - snapshot: snapshot, - connected: BTreeSet::new(), - connections: TriggerConnections { - self_key_pair: self_key_pair.clone(), - }, - session: TriggerSession { - connector: Arc::new(ServersSetChangeSessionCreatorConnectorWithMigration { - self_node_id: self_key_pair.public().clone(), - migration: Mutex::new(migration), - session: Mutex::new(None), - }), - self_key_pair: self_key_pair, - key_server_set: key_server_set, - }, - connections_action: None, - session_action: None, - } - } - - /// Actually do mainteinance. - fn do_maintain(&mut self) -> Option { - loop { - let session_state = session_state(self.session.connector.session.lock().clone()); - let migration_state = migration_state(self.self_key_pair.public(), &self.snapshot); + ConnectionTriggerWithMigration { + self_key_pair: self_key_pair.clone(), + key_server_set: key_server_set.clone(), + snapshot: snapshot, + connected: BTreeSet::new(), + connections: TriggerConnections { + self_key_pair: self_key_pair.clone(), + }, + session: TriggerSession { + connector: Arc::new(ServersSetChangeSessionCreatorConnectorWithMigration { + self_node_id: self_key_pair.public().clone(), + migration: Mutex::new(migration), + session: Mutex::new(None), + }), + self_key_pair: self_key_pair, + key_server_set: key_server_set, + }, + connections_action: None, + session_action: None, + } + } - let session_action = maintain_session(self.self_key_pair.public(), &self.connected, &self.snapshot, migration_state, session_state); - let session_maintain_required = session_action.map(|session_action| - self.session.process(session_action)).unwrap_or_default(); - self.session_action = session_action; + /// Actually do mainteinance. + fn do_maintain(&mut self) -> Option { + loop { + let session_state = session_state(self.session.connector.session.lock().clone()); + let migration_state = migration_state(self.self_key_pair.public(), &self.snapshot); - let connections_action = maintain_connections(migration_state, session_state); - let connections_maintain_required = connections_action.map(|_| true).unwrap_or_default(); - self.connections_action = connections_action; + let session_action = maintain_session( + self.self_key_pair.public(), + &self.connected, + &self.snapshot, + migration_state, + session_state, + ); + let session_maintain_required = session_action + .map(|session_action| self.session.process(session_action)) + .unwrap_or_default(); + self.session_action = session_action; - if session_state != SessionState::Idle || migration_state != MigrationState::Idle { - trace!(target: "secretstore_net", "{}: non-idle auto-migration state: {:?} -> {:?}", + let connections_action = maintain_connections(migration_state, session_state); + let connections_maintain_required = + connections_action.map(|_| true).unwrap_or_default(); + self.connections_action = connections_action; + + if session_state != SessionState::Idle || migration_state != MigrationState::Idle { + trace!(target: "secretstore_net", "{}: non-idle auto-migration state: {:?} -> {:?}", self.self_key_pair.public(), (migration_state, session_state), (self.connections_action, self.session_action)); - } + } - if session_action != Some(SessionAction::DropAndRetry) { - return match (session_maintain_required, connections_maintain_required) { - (true, true) => Some(Maintain::SessionAndConnections), - (true, false) => Some(Maintain::Session), - (false, true) => Some(Maintain::Connections), - (false, false) => None, - }; - } - } - } + if session_action != Some(SessionAction::DropAndRetry) { + return match (session_maintain_required, connections_maintain_required) { + (true, true) => Some(Maintain::SessionAndConnections), + (true, false) => Some(Maintain::Session), + (false, true) => Some(Maintain::Connections), + (false, false) => None, + }; + } + } + } } impl ConnectionTrigger for ConnectionTriggerWithMigration { - fn on_maintain(&mut self) -> Option { - self.snapshot = self.key_server_set.snapshot(); - *self.session.connector.migration.lock() = self.snapshot.migration.clone(); + fn on_maintain(&mut self) -> Option { + self.snapshot = self.key_server_set.snapshot(); + *self.session.connector.migration.lock() = self.snapshot.migration.clone(); - self.do_maintain() - } + self.do_maintain() + } - fn on_connection_established(&mut self, node: &NodeId) -> Option { - self.connected.insert(node.clone()); - self.do_maintain() - } + fn on_connection_established(&mut self, node: &NodeId) -> Option { + self.connected.insert(node.clone()); + self.do_maintain() + } - fn on_connection_closed(&mut self, node: &NodeId) -> Option { - self.connected.remove(node); - self.do_maintain() - } + fn on_connection_closed(&mut self, node: &NodeId) -> Option { + self.connected.remove(node); + self.do_maintain() + } - fn maintain_session(&mut self) -> Option { - self.session_action.and_then(|action| self.session.maintain(action, &self.snapshot)) - } + fn maintain_session(&mut self) -> Option { + self.session_action + .and_then(|action| self.session.maintain(action, &self.snapshot)) + } - fn maintain_connections(&mut self, connections: &mut NetConnectionsContainer) { - if let Some(action) = self.connections_action { - self.connections.maintain(action, connections, &self.snapshot); - } - } + fn maintain_connections(&mut self, connections: &mut NetConnectionsContainer) { + if let Some(action) = self.connections_action { + self.connections + .maintain(action, connections, &self.snapshot); + } + } - fn servers_set_change_creator_connector(&self) -> Arc { - self.session.connector.clone() - } + fn servers_set_change_creator_connector(&self) -> Arc { + self.session.connector.clone() + } } -impl ServersSetChangeSessionCreatorConnector for ServersSetChangeSessionCreatorConnectorWithMigration { - fn admin_public(&self, migration_id: Option<&H256>, new_server_set: BTreeSet) -> Result { - // the idea is that all nodes are agreed upon a block number and a new set of nodes in this block - // then master node is selected of all nodes set && this master signs the old set && new set - // (signatures are inputs to ServerSetChangeSession) - self.migration.lock().as_ref() +impl ServersSetChangeSessionCreatorConnector + for ServersSetChangeSessionCreatorConnectorWithMigration +{ + fn admin_public( + &self, + migration_id: Option<&H256>, + new_server_set: BTreeSet, + ) -> Result { + // the idea is that all nodes are agreed upon a block number and a new set of nodes in this block + // then master node is selected of all nodes set && this master signs the old set && new set + // (signatures are inputs to ServerSetChangeSession) + self.migration.lock().as_ref() .map(|migration| { let is_migration_id_same = migration_id.map(|mid| mid == &migration.id).unwrap_or_default(); let is_migration_set_same = new_server_set == migration.set.keys().cloned().collect(); @@ -230,113 +253,133 @@ impl ServersSetChangeSessionCreatorConnector for ServersSetChangeSessionCreatorC warn!(target: "secretstore_net", "{}: failed to accept non-scheduled auto-migration session", self.self_node_id); Err(Error::AccessDenied) }) - } + } - fn set_key_servers_set_change_session(&self, session: Arc) { - *self.session.lock() = Some(session); - } + fn set_key_servers_set_change_session(&self, session: Arc) { + *self.session.lock() = Some(session); + } } impl TriggerSession { - /// Process session action. - pub fn process(&mut self, action: SessionAction) -> bool { - match action { - SessionAction::ConfirmAndDrop(migration_id) => { - *self.connector.session.lock() = None; - self.key_server_set.confirm_migration(migration_id); - false - }, - SessionAction::Drop | SessionAction::DropAndRetry => { - *self.connector.session.lock() = None; - false - }, - SessionAction::StartMigration(migration_id) => { - self.key_server_set.start_migration(migration_id); - false - }, - SessionAction::Start => true, - } - } + /// Process session action. + pub fn process(&mut self, action: SessionAction) -> bool { + match action { + SessionAction::ConfirmAndDrop(migration_id) => { + *self.connector.session.lock() = None; + self.key_server_set.confirm_migration(migration_id); + false + } + SessionAction::Drop | SessionAction::DropAndRetry => { + *self.connector.session.lock() = None; + false + } + SessionAction::StartMigration(migration_id) => { + self.key_server_set.start_migration(migration_id); + false + } + SessionAction::Start => true, + } + } - /// Maintain session. - pub fn maintain( - &mut self, - action: SessionAction, - server_set: &KeyServerSetSnapshot - ) -> Option { - if action != SessionAction::Start { // all other actions are processed in maintain - return None; - } - let migration = server_set.migration.as_ref() - .expect("action is Start only when migration is started (see maintain_session); qed"); + /// Maintain session. + pub fn maintain( + &mut self, + action: SessionAction, + server_set: &KeyServerSetSnapshot, + ) -> Option { + if action != SessionAction::Start { + // all other actions are processed in maintain + return None; + } + let migration = server_set + .migration + .as_ref() + .expect("action is Start only when migration is started (see maintain_session); qed"); - // we assume that authorities that are removed from the servers set are either offline, or malicious - // => they're not involved in ServersSetChangeSession - // => both sets are the same - let old_set: BTreeSet<_> = migration.set.keys().cloned().collect(); - let new_set = old_set.clone(); + // we assume that authorities that are removed from the servers set are either offline, or malicious + // => they're not involved in ServersSetChangeSession + // => both sets are the same + let old_set: BTreeSet<_> = migration.set.keys().cloned().collect(); + let new_set = old_set.clone(); - let signatures = self.self_key_pair.sign(&ordered_nodes_hash(&old_set)) - .and_then(|old_set_signature| self.self_key_pair.sign(&ordered_nodes_hash(&new_set)) - .map(|new_set_signature| (old_set_signature, new_set_signature))); + let signatures = self + .self_key_pair + .sign(&ordered_nodes_hash(&old_set)) + .and_then(|old_set_signature| { + self.self_key_pair + .sign(&ordered_nodes_hash(&new_set)) + .map(|new_set_signature| (old_set_signature, new_set_signature)) + }); - match signatures { - Ok((old_set_signature, new_set_signature)) => Some(ServersSetChangeParams { - session_id: None, - migration_id: Some(migration.id), - new_nodes_set: new_set, - old_set_signature, - new_set_signature, - }), - Err(err) => { - trace!( + match signatures { + Ok((old_set_signature, new_set_signature)) => Some(ServersSetChangeParams { + session_id: None, + migration_id: Some(migration.id), + new_nodes_set: new_set, + old_set_signature, + new_set_signature, + }), + Err(err) => { + trace!( target: "secretstore_net", "{}: failed to sign servers set for auto-migrate session with: {}", self.self_key_pair.public(), err); - None - }, - } - } + None + } + } + } } fn migration_state(self_node_id: &NodeId, snapshot: &KeyServerSetSnapshot) -> MigrationState { - // if this node is not on current && old set => we do not participate in migration - if !snapshot.current_set.contains_key(self_node_id) && - !snapshot.migration.as_ref().map(|s| s.set.contains_key(self_node_id)).unwrap_or_default() { - return MigrationState::Idle; - } + // if this node is not on current && old set => we do not participate in migration + if !snapshot.current_set.contains_key(self_node_id) + && !snapshot + .migration + .as_ref() + .map(|s| s.set.contains_key(self_node_id)) + .unwrap_or_default() + { + return MigrationState::Idle; + } - // if migration has already started no other states possible - if snapshot.migration.is_some() { - return MigrationState::Started; - } + // if migration has already started no other states possible + if snapshot.migration.is_some() { + return MigrationState::Started; + } - // we only require migration if set actually changes - // when only address changes, we could simply adjust connections - if !is_migration_required(&snapshot.current_set, &snapshot.new_set) { - return MigrationState::Idle; - } + // we only require migration if set actually changes + // when only address changes, we could simply adjust connections + if !is_migration_required(&snapshot.current_set, &snapshot.new_set) { + return MigrationState::Idle; + } - return MigrationState::Required; + return MigrationState::Required; } fn session_state(session: Option>) -> SessionState { - session - .and_then(|s| match s.as_servers_set_change() { - Some(s) if !s.is_finished() => Some(SessionState::Active(s.migration_id().cloned())), - Some(s) => match s.wait() { - Ok(_) => Some(SessionState::Finished(s.migration_id().cloned())), - Err(_) => Some(SessionState::Failed(s.migration_id().cloned())), - }, - None => None, - }) - .unwrap_or(SessionState::Idle) + session + .and_then(|s| match s.as_servers_set_change() { + Some(s) if !s.is_finished() => Some(SessionState::Active(s.migration_id().cloned())), + Some(s) => match s.wait() { + Ok(_) => Some(SessionState::Finished(s.migration_id().cloned())), + Err(_) => Some(SessionState::Failed(s.migration_id().cloned())), + }, + None => None, + }) + .unwrap_or(SessionState::Idle) } -fn maintain_session(self_node_id: &NodeId, connected: &BTreeSet, snapshot: &KeyServerSetSnapshot, migration_state: MigrationState, session_state: SessionState) -> Option { - let migration_data_proof = "migration_state is Started; migration data available when started; qed"; +fn maintain_session( + self_node_id: &NodeId, + connected: &BTreeSet, + snapshot: &KeyServerSetSnapshot, + migration_state: MigrationState, + session_state: SessionState, +) -> Option { + let migration_data_proof = + "migration_state is Started; migration data available when started; qed"; - match (migration_state, session_state) { + match (migration_state, session_state) { // === NORMAL combinations === // having no session when it is not required => ok @@ -409,31 +452,40 @@ fn maintain_session(self_node_id: &NodeId, connected: &BTreeSet, snapsho } } -fn maintain_connections(migration_state: MigrationState, session_state: SessionState) -> Option { - match (migration_state, session_state) { - // session is active => we do not alter connections when session is active - (_, SessionState::Active(_)) => None, - // when no migration required => we just keep us connected to old nodes set - (MigrationState::Idle, _) => Some(ConnectionsAction::ConnectToCurrentSet), - // when migration is either scheduled, or in progress => connect to both old and migration set. - // this could lead to situation when node is not 'officially' a part of KeyServer (i.e. it is not in current_set) - // but it participates in new key generation session - // it is ok, since 'officialy' here means that this node is a owner of all old shares - (MigrationState::Required, _) | - (MigrationState::Started, _) => Some(ConnectionsAction::ConnectToMigrationSet), - } +fn maintain_connections( + migration_state: MigrationState, + session_state: SessionState, +) -> Option { + match (migration_state, session_state) { + // session is active => we do not alter connections when session is active + (_, SessionState::Active(_)) => None, + // when no migration required => we just keep us connected to old nodes set + (MigrationState::Idle, _) => Some(ConnectionsAction::ConnectToCurrentSet), + // when migration is either scheduled, or in progress => connect to both old and migration set. + // this could lead to situation when node is not 'officially' a part of KeyServer (i.e. it is not in current_set) + // but it participates in new key generation session + // it is ok, since 'officialy' here means that this node is a owner of all old shares + (MigrationState::Required, _) | (MigrationState::Started, _) => { + Some(ConnectionsAction::ConnectToMigrationSet) + } + } } -fn is_connected_to_all_nodes(self_node_id: &NodeId, nodes: &BTreeMap, connected: &BTreeSet) -> bool { - nodes.keys() - .filter(|n| *n != self_node_id) - .all(|n| connected.contains(n)) +fn is_connected_to_all_nodes( + self_node_id: &NodeId, + nodes: &BTreeMap, + connected: &BTreeSet, +) -> bool { + nodes + .keys() + .filter(|n| *n != self_node_id) + .all(|n| connected.contains(n)) } fn select_master_node(snapshot: &KeyServerSetSnapshot) -> &NodeId { - // we want to minimize a number of UnknownSession messages => - // try to select a node which was in SS && will be in SS - match snapshot.migration.as_ref() { + // we want to minimize a number of UnknownSession messages => + // try to select a node which was in SS && will be in SS + match snapshot.migration.as_ref() { Some(migration) => &migration.master, None => snapshot.current_set.keys() .filter(|n| snapshot.new_set.contains_key(n)) @@ -448,310 +500,622 @@ fn select_master_node(snapshot: &KeyServerSetSnapshot) -> &NodeId { #[cfg(test)] mod tests { - use key_server_cluster::{KeyServerSetSnapshot, KeyServerSetMigration}; - use key_server_cluster::connection_trigger::ConnectionsAction; - use super::{MigrationState, SessionState, SessionAction, migration_state, maintain_session, - maintain_connections, select_master_node}; + use super::{ + maintain_connections, maintain_session, migration_state, select_master_node, + MigrationState, SessionAction, SessionState, + }; + use key_server_cluster::{ + connection_trigger::ConnectionsAction, KeyServerSetMigration, KeyServerSetSnapshot, + }; - #[test] - fn migration_state_is_idle_when_required_but_this_node_is_not_on_the_list() { - assert_eq!(migration_state(&1.into(), &KeyServerSetSnapshot { - current_set: vec![(2.into(), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(), - new_set: vec![(3.into(), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(), - migration: None, - }), MigrationState::Idle); - } + #[test] + fn migration_state_is_idle_when_required_but_this_node_is_not_on_the_list() { + assert_eq!( + migration_state( + &1.into(), + &KeyServerSetSnapshot { + current_set: vec![(2.into(), "127.0.0.1:8081".parse().unwrap())] + .into_iter() + .collect(), + new_set: vec![(3.into(), "127.0.0.1:8081".parse().unwrap())] + .into_iter() + .collect(), + migration: None, + } + ), + MigrationState::Idle + ); + } - #[test] - fn migration_state_is_idle_when_sets_are_equal() { - assert_eq!(migration_state(&1.into(), &KeyServerSetSnapshot { - current_set: vec![(1.into(), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(), - new_set: vec![(1.into(), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(), - migration: None, - }), MigrationState::Idle); - } + #[test] + fn migration_state_is_idle_when_sets_are_equal() { + assert_eq!( + migration_state( + &1.into(), + &KeyServerSetSnapshot { + current_set: vec![(1.into(), "127.0.0.1:8081".parse().unwrap())] + .into_iter() + .collect(), + new_set: vec![(1.into(), "127.0.0.1:8081".parse().unwrap())] + .into_iter() + .collect(), + migration: None, + } + ), + MigrationState::Idle + ); + } - #[test] - fn migration_state_is_idle_when_only_address_changes() { - assert_eq!(migration_state(&1.into(), &KeyServerSetSnapshot { - current_set: vec![(1.into(), "127.0.0.1:8080".parse().unwrap())].into_iter().collect(), - new_set: vec![(1.into(), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(), - migration: None, - }), MigrationState::Idle); - } + #[test] + fn migration_state_is_idle_when_only_address_changes() { + assert_eq!( + migration_state( + &1.into(), + &KeyServerSetSnapshot { + current_set: vec![(1.into(), "127.0.0.1:8080".parse().unwrap())] + .into_iter() + .collect(), + new_set: vec![(1.into(), "127.0.0.1:8081".parse().unwrap())] + .into_iter() + .collect(), + migration: None, + } + ), + MigrationState::Idle + ); + } - #[test] - fn migration_state_is_required_when_node_is_added() { - assert_eq!(migration_state(&1.into(), &KeyServerSetSnapshot { - current_set: vec![(1.into(), "127.0.0.1:8080".parse().unwrap())].into_iter().collect(), - new_set: vec![(1.into(), "127.0.0.1:8080".parse().unwrap()), - (2.into(), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(), - migration: None, - }), MigrationState::Required); - } + #[test] + fn migration_state_is_required_when_node_is_added() { + assert_eq!( + migration_state( + &1.into(), + &KeyServerSetSnapshot { + current_set: vec![(1.into(), "127.0.0.1:8080".parse().unwrap())] + .into_iter() + .collect(), + new_set: vec![ + (1.into(), "127.0.0.1:8080".parse().unwrap()), + (2.into(), "127.0.0.1:8081".parse().unwrap()) + ] + .into_iter() + .collect(), + migration: None, + } + ), + MigrationState::Required + ); + } - #[test] - fn migration_state_is_required_when_node_is_removed() { - assert_eq!(migration_state(&1.into(), &KeyServerSetSnapshot { - current_set: vec![(1.into(), "127.0.0.1:8080".parse().unwrap()), - (2.into(), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(), - new_set: vec![(1.into(), "127.0.0.1:8080".parse().unwrap())].into_iter().collect(), - migration: None, - }), MigrationState::Required); - } + #[test] + fn migration_state_is_required_when_node_is_removed() { + assert_eq!( + migration_state( + &1.into(), + &KeyServerSetSnapshot { + current_set: vec![ + (1.into(), "127.0.0.1:8080".parse().unwrap()), + (2.into(), "127.0.0.1:8081".parse().unwrap()) + ] + .into_iter() + .collect(), + new_set: vec![(1.into(), "127.0.0.1:8080".parse().unwrap())] + .into_iter() + .collect(), + migration: None, + } + ), + MigrationState::Required + ); + } - #[test] - fn migration_state_is_started_when_migration_is_some() { - assert_eq!(migration_state(&1.into(), &KeyServerSetSnapshot { - current_set: vec![(1.into(), "127.0.0.1:8080".parse().unwrap())].into_iter().collect(), - new_set: Default::default(), - migration: Some(KeyServerSetMigration { - id: Default::default(), - set: Default::default(), - master: Default::default(), - is_confirmed: Default::default(), - }), - }), MigrationState::Started); - } + #[test] + fn migration_state_is_started_when_migration_is_some() { + assert_eq!( + migration_state( + &1.into(), + &KeyServerSetSnapshot { + current_set: vec![(1.into(), "127.0.0.1:8080".parse().unwrap())] + .into_iter() + .collect(), + new_set: Default::default(), + migration: Some(KeyServerSetMigration { + id: Default::default(), + set: Default::default(), + master: Default::default(), + is_confirmed: Default::default(), + }), + } + ), + MigrationState::Started + ); + } - #[test] - fn existing_master_is_selected_when_migration_has_started() { - assert_eq!(select_master_node(&KeyServerSetSnapshot { - current_set: vec![(1.into(), "127.0.0.1:8180".parse().unwrap())].into_iter().collect(), - new_set: vec![(2.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - migration: Some(KeyServerSetMigration { - master: 3.into(), - ..Default::default() - }), - }), &3.into()); - } + #[test] + fn existing_master_is_selected_when_migration_has_started() { + assert_eq!( + select_master_node(&KeyServerSetSnapshot { + current_set: vec![(1.into(), "127.0.0.1:8180".parse().unwrap())] + .into_iter() + .collect(), + new_set: vec![(2.into(), "127.0.0.1:8181".parse().unwrap())] + .into_iter() + .collect(), + migration: Some(KeyServerSetMigration { + master: 3.into(), + ..Default::default() + }), + }), + &3.into() + ); + } - #[test] - fn persistent_master_is_selected_when_migration_has_not_started_yet() { - assert_eq!(select_master_node(&KeyServerSetSnapshot { - current_set: vec![(1.into(), "127.0.0.1:8180".parse().unwrap()), - (2.into(), "127.0.0.1:8180".parse().unwrap())].into_iter().collect(), - new_set: vec![(2.into(), "127.0.0.1:8181".parse().unwrap()), - (4.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - migration: None, - }), &2.into()); - } + #[test] + fn persistent_master_is_selected_when_migration_has_not_started_yet() { + assert_eq!( + select_master_node(&KeyServerSetSnapshot { + current_set: vec![ + (1.into(), "127.0.0.1:8180".parse().unwrap()), + (2.into(), "127.0.0.1:8180".parse().unwrap()) + ] + .into_iter() + .collect(), + new_set: vec![ + (2.into(), "127.0.0.1:8181".parse().unwrap()), + (4.into(), "127.0.0.1:8181".parse().unwrap()) + ] + .into_iter() + .collect(), + migration: None, + }), + &2.into() + ); + } - #[test] - fn new_master_is_selected_in_worst_case() { - assert_eq!(select_master_node(&KeyServerSetSnapshot { - current_set: vec![(1.into(), "127.0.0.1:8180".parse().unwrap()), - (2.into(), "127.0.0.1:8180".parse().unwrap())].into_iter().collect(), - new_set: vec![(3.into(), "127.0.0.1:8181".parse().unwrap()), - (4.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - migration: None, - }), &3.into()); - } + #[test] + fn new_master_is_selected_in_worst_case() { + assert_eq!( + select_master_node(&KeyServerSetSnapshot { + current_set: vec![ + (1.into(), "127.0.0.1:8180".parse().unwrap()), + (2.into(), "127.0.0.1:8180".parse().unwrap()) + ] + .into_iter() + .collect(), + new_set: vec![ + (3.into(), "127.0.0.1:8181".parse().unwrap()), + (4.into(), "127.0.0.1:8181".parse().unwrap()) + ] + .into_iter() + .collect(), + migration: None, + }), + &3.into() + ); + } - #[test] - fn maintain_connections_returns_none_when_session_is_active() { - assert_eq!(maintain_connections(MigrationState::Required, - SessionState::Active(Default::default())), None); - } + #[test] + fn maintain_connections_returns_none_when_session_is_active() { + assert_eq!( + maintain_connections( + MigrationState::Required, + SessionState::Active(Default::default()) + ), + None + ); + } - #[test] - fn maintain_connections_connects_to_current_set_when_no_migration() { - assert_eq!(maintain_connections(MigrationState::Idle, - SessionState::Idle), Some(ConnectionsAction::ConnectToCurrentSet)); - } + #[test] + fn maintain_connections_connects_to_current_set_when_no_migration() { + assert_eq!( + maintain_connections(MigrationState::Idle, SessionState::Idle), + Some(ConnectionsAction::ConnectToCurrentSet) + ); + } - #[test] - fn maintain_connections_connects_to_current_and_old_set_when_migration_is_required() { - assert_eq!(maintain_connections(MigrationState::Required, - SessionState::Idle), Some(ConnectionsAction::ConnectToMigrationSet)); - } + #[test] + fn maintain_connections_connects_to_current_and_old_set_when_migration_is_required() { + assert_eq!( + maintain_connections(MigrationState::Required, SessionState::Idle), + Some(ConnectionsAction::ConnectToMigrationSet) + ); + } - #[test] - fn maintain_connections_connects_to_current_and_old_set_when_migration_is_started() { - assert_eq!(maintain_connections(MigrationState::Started, - SessionState::Idle), Some(ConnectionsAction::ConnectToMigrationSet)); - } + #[test] + fn maintain_connections_connects_to_current_and_old_set_when_migration_is_started() { + assert_eq!( + maintain_connections(MigrationState::Started, SessionState::Idle), + Some(ConnectionsAction::ConnectToMigrationSet) + ); + } - #[test] - fn maintain_sessions_does_nothing_if_no_session_and_no_migration() { - assert_eq!(maintain_session(&1.into(), &Default::default(), &Default::default(), - MigrationState::Idle, SessionState::Idle), None); - } + #[test] + fn maintain_sessions_does_nothing_if_no_session_and_no_migration() { + assert_eq!( + maintain_session( + &1.into(), + &Default::default(), + &Default::default(), + MigrationState::Idle, + SessionState::Idle + ), + None + ); + } - #[test] - fn maintain_session_does_nothing_when_migration_required_on_slave_node_and_no_session() { - assert_eq!(maintain_session(&2.into(), &vec![2.into()].into_iter().collect(), &KeyServerSetSnapshot { - current_set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - new_set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap()), - (2.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - migration: None, - }, MigrationState::Required, SessionState::Idle), None); - } + #[test] + fn maintain_session_does_nothing_when_migration_required_on_slave_node_and_no_session() { + assert_eq!( + maintain_session( + &2.into(), + &vec![2.into()].into_iter().collect(), + &KeyServerSetSnapshot { + current_set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap())] + .into_iter() + .collect(), + new_set: vec![ + (1.into(), "127.0.0.1:8181".parse().unwrap()), + (2.into(), "127.0.0.1:8181".parse().unwrap()) + ] + .into_iter() + .collect(), + migration: None, + }, + MigrationState::Required, + SessionState::Idle + ), + None + ); + } - #[test] - fn maintain_session_does_nothing_when_migration_started_on_slave_node_and_no_session() { - assert_eq!(maintain_session(&2.into(), &vec![2.into()].into_iter().collect(), &KeyServerSetSnapshot { - current_set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - new_set: Default::default(), - migration: Some(KeyServerSetMigration { - master: 1.into(), - set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap()), - (2.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - ..Default::default() - }), - }, MigrationState::Started, SessionState::Idle), None); - } + #[test] + fn maintain_session_does_nothing_when_migration_started_on_slave_node_and_no_session() { + assert_eq!( + maintain_session( + &2.into(), + &vec![2.into()].into_iter().collect(), + &KeyServerSetSnapshot { + current_set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap())] + .into_iter() + .collect(), + new_set: Default::default(), + migration: Some(KeyServerSetMigration { + master: 1.into(), + set: vec![ + (1.into(), "127.0.0.1:8181".parse().unwrap()), + (2.into(), "127.0.0.1:8181".parse().unwrap()) + ] + .into_iter() + .collect(), + ..Default::default() + }), + }, + MigrationState::Started, + SessionState::Idle + ), + None + ); + } - #[test] - fn maintain_session_does_nothing_when_migration_started_on_master_node_and_no_session_and_not_connected_to_migration_nodes() { - assert_eq!(maintain_session(&1.into(), &Default::default(), &KeyServerSetSnapshot { - current_set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - new_set: Default::default(), - migration: Some(KeyServerSetMigration { - master: 1.into(), - set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap()), - (2.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - ..Default::default() - }), - }, MigrationState::Started, SessionState::Idle), None); - } + #[test] + fn maintain_session_does_nothing_when_migration_started_on_master_node_and_no_session_and_not_connected_to_migration_nodes( + ) { + assert_eq!( + maintain_session( + &1.into(), + &Default::default(), + &KeyServerSetSnapshot { + current_set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap())] + .into_iter() + .collect(), + new_set: Default::default(), + migration: Some(KeyServerSetMigration { + master: 1.into(), + set: vec![ + (1.into(), "127.0.0.1:8181".parse().unwrap()), + (2.into(), "127.0.0.1:8181".parse().unwrap()) + ] + .into_iter() + .collect(), + ..Default::default() + }), + }, + MigrationState::Started, + SessionState::Idle + ), + None + ); + } - #[test] - fn maintain_session_starts_session_when_migration_started_on_master_node_and_no_session() { - assert_eq!(maintain_session(&1.into(), &vec![2.into()].into_iter().collect(), &KeyServerSetSnapshot { - current_set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - new_set: Default::default(), - migration: Some(KeyServerSetMigration { - master: 1.into(), - set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap()), - (2.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - ..Default::default() - }), - }, MigrationState::Started, SessionState::Idle), Some(SessionAction::Start)); - } + #[test] + fn maintain_session_starts_session_when_migration_started_on_master_node_and_no_session() { + assert_eq!( + maintain_session( + &1.into(), + &vec![2.into()].into_iter().collect(), + &KeyServerSetSnapshot { + current_set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap())] + .into_iter() + .collect(), + new_set: Default::default(), + migration: Some(KeyServerSetMigration { + master: 1.into(), + set: vec![ + (1.into(), "127.0.0.1:8181".parse().unwrap()), + (2.into(), "127.0.0.1:8181".parse().unwrap()) + ] + .into_iter() + .collect(), + ..Default::default() + }), + }, + MigrationState::Started, + SessionState::Idle + ), + Some(SessionAction::Start) + ); + } - #[test] - fn maintain_session_does_nothing_when_both_migration_and_session_are_started() { - assert_eq!(maintain_session(&1.into(), &vec![2.into()].into_iter().collect(), &KeyServerSetSnapshot { - current_set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - new_set: Default::default(), - migration: Some(KeyServerSetMigration { - master: 1.into(), - set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap()), - (2.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - ..Default::default() - }), - }, MigrationState::Started, SessionState::Active(Default::default())), None); - } + #[test] + fn maintain_session_does_nothing_when_both_migration_and_session_are_started() { + assert_eq!( + maintain_session( + &1.into(), + &vec![2.into()].into_iter().collect(), + &KeyServerSetSnapshot { + current_set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap())] + .into_iter() + .collect(), + new_set: Default::default(), + migration: Some(KeyServerSetMigration { + master: 1.into(), + set: vec![ + (1.into(), "127.0.0.1:8181".parse().unwrap()), + (2.into(), "127.0.0.1:8181".parse().unwrap()) + ] + .into_iter() + .collect(), + ..Default::default() + }), + }, + MigrationState::Started, + SessionState::Active(Default::default()) + ), + None + ); + } - #[test] - fn maintain_session_confirms_migration_when_active_and_session_has_finished_on_new_node() { - assert_eq!(maintain_session(&1.into(), &vec![2.into()].into_iter().collect(), &KeyServerSetSnapshot { - current_set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - new_set: Default::default(), - migration: Some(KeyServerSetMigration { - master: 1.into(), - set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap()), - (2.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - ..Default::default() - }), - }, MigrationState::Started, SessionState::Finished(Default::default())), Some(SessionAction::ConfirmAndDrop(Default::default()))); - } + #[test] + fn maintain_session_confirms_migration_when_active_and_session_has_finished_on_new_node() { + assert_eq!( + maintain_session( + &1.into(), + &vec![2.into()].into_iter().collect(), + &KeyServerSetSnapshot { + current_set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap())] + .into_iter() + .collect(), + new_set: Default::default(), + migration: Some(KeyServerSetMigration { + master: 1.into(), + set: vec![ + (1.into(), "127.0.0.1:8181".parse().unwrap()), + (2.into(), "127.0.0.1:8181".parse().unwrap()) + ] + .into_iter() + .collect(), + ..Default::default() + }), + }, + MigrationState::Started, + SessionState::Finished(Default::default()) + ), + Some(SessionAction::ConfirmAndDrop(Default::default())) + ); + } - #[test] - fn maintain_session_drops_session_when_active_and_session_has_finished_on_removed_node() { - assert_eq!(maintain_session(&1.into(), &vec![2.into()].into_iter().collect(), &KeyServerSetSnapshot { - current_set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap()), - (2.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - new_set: Default::default(), - migration: Some(KeyServerSetMigration { - master: 2.into(), - set: vec![(2.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - ..Default::default() - }), - }, MigrationState::Started, SessionState::Finished(Default::default())), Some(SessionAction::Drop)); - } + #[test] + fn maintain_session_drops_session_when_active_and_session_has_finished_on_removed_node() { + assert_eq!( + maintain_session( + &1.into(), + &vec![2.into()].into_iter().collect(), + &KeyServerSetSnapshot { + current_set: vec![ + (1.into(), "127.0.0.1:8181".parse().unwrap()), + (2.into(), "127.0.0.1:8181".parse().unwrap()) + ] + .into_iter() + .collect(), + new_set: Default::default(), + migration: Some(KeyServerSetMigration { + master: 2.into(), + set: vec![(2.into(), "127.0.0.1:8181".parse().unwrap())] + .into_iter() + .collect(), + ..Default::default() + }), + }, + MigrationState::Started, + SessionState::Finished(Default::default()) + ), + Some(SessionAction::Drop) + ); + } - #[test] - fn maintain_session_drops_session_when_active_and_session_has_failed() { - assert_eq!(maintain_session(&1.into(), &vec![2.into()].into_iter().collect(), &KeyServerSetSnapshot { - current_set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - new_set: Default::default(), - migration: Some(KeyServerSetMigration { - master: 1.into(), - set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap()), - (2.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - ..Default::default() - }), - }, MigrationState::Started, SessionState::Failed(Default::default())), Some(SessionAction::Drop)); - } + #[test] + fn maintain_session_drops_session_when_active_and_session_has_failed() { + assert_eq!( + maintain_session( + &1.into(), + &vec![2.into()].into_iter().collect(), + &KeyServerSetSnapshot { + current_set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap())] + .into_iter() + .collect(), + new_set: Default::default(), + migration: Some(KeyServerSetMigration { + master: 1.into(), + set: vec![ + (1.into(), "127.0.0.1:8181".parse().unwrap()), + (2.into(), "127.0.0.1:8181".parse().unwrap()) + ] + .into_iter() + .collect(), + ..Default::default() + }), + }, + MigrationState::Started, + SessionState::Failed(Default::default()) + ), + Some(SessionAction::Drop) + ); + } - #[test] - fn maintain_session_detects_abnormal_when_no_migration_and_active_session() { - assert_eq!(maintain_session(&Default::default(), &Default::default(), &Default::default(), - MigrationState::Idle, SessionState::Active(Default::default())), Some(SessionAction::DropAndRetry)); - } + #[test] + fn maintain_session_detects_abnormal_when_no_migration_and_active_session() { + assert_eq!( + maintain_session( + &Default::default(), + &Default::default(), + &Default::default(), + MigrationState::Idle, + SessionState::Active(Default::default()) + ), + Some(SessionAction::DropAndRetry) + ); + } - #[test] - fn maintain_session_detects_abnormal_when_no_migration_and_finished_session() { - assert_eq!(maintain_session(&Default::default(), &Default::default(), &Default::default(), - MigrationState::Idle, SessionState::Finished(Default::default())), Some(SessionAction::DropAndRetry)); - } + #[test] + fn maintain_session_detects_abnormal_when_no_migration_and_finished_session() { + assert_eq!( + maintain_session( + &Default::default(), + &Default::default(), + &Default::default(), + MigrationState::Idle, + SessionState::Finished(Default::default()) + ), + Some(SessionAction::DropAndRetry) + ); + } - #[test] - fn maintain_session_detects_abnormal_when_no_migration_and_failed_session() { - assert_eq!(maintain_session(&Default::default(), &Default::default(), &Default::default(), - MigrationState::Idle, SessionState::Failed(Default::default())), Some(SessionAction::DropAndRetry)); - } + #[test] + fn maintain_session_detects_abnormal_when_no_migration_and_failed_session() { + assert_eq!( + maintain_session( + &Default::default(), + &Default::default(), + &Default::default(), + MigrationState::Idle, + SessionState::Failed(Default::default()) + ), + Some(SessionAction::DropAndRetry) + ); + } - #[test] - fn maintain_session_detects_abnormal_when_required_migration_and_active_session() { - assert_eq!(maintain_session(&Default::default(), &Default::default(), &Default::default(), - MigrationState::Required, SessionState::Active(Default::default())), Some(SessionAction::DropAndRetry)); - } + #[test] + fn maintain_session_detects_abnormal_when_required_migration_and_active_session() { + assert_eq!( + maintain_session( + &Default::default(), + &Default::default(), + &Default::default(), + MigrationState::Required, + SessionState::Active(Default::default()) + ), + Some(SessionAction::DropAndRetry) + ); + } - #[test] - fn maintain_session_detects_abnormal_when_required_migration_and_finished_session() { - assert_eq!(maintain_session(&Default::default(), &Default::default(), &Default::default(), - MigrationState::Required, SessionState::Finished(Default::default())), Some(SessionAction::DropAndRetry)); - } + #[test] + fn maintain_session_detects_abnormal_when_required_migration_and_finished_session() { + assert_eq!( + maintain_session( + &Default::default(), + &Default::default(), + &Default::default(), + MigrationState::Required, + SessionState::Finished(Default::default()) + ), + Some(SessionAction::DropAndRetry) + ); + } - #[test] - fn maintain_session_detects_abnormal_when_required_migration_and_failed_session() { - assert_eq!(maintain_session(&Default::default(), &Default::default(), &Default::default(), - MigrationState::Required, SessionState::Failed(Default::default())), Some(SessionAction::DropAndRetry)); - } + #[test] + fn maintain_session_detects_abnormal_when_required_migration_and_failed_session() { + assert_eq!( + maintain_session( + &Default::default(), + &Default::default(), + &Default::default(), + MigrationState::Required, + SessionState::Failed(Default::default()) + ), + Some(SessionAction::DropAndRetry) + ); + } - #[test] - fn maintain_session_detects_abnormal_when_active_migration_and_active_session_with_different_id() { - assert_eq!(maintain_session(&Default::default(), &Default::default(), &KeyServerSetSnapshot { - migration: Some(KeyServerSetMigration { - id: 0.into(), - ..Default::default() - }), - ..Default::default() - }, MigrationState::Started, SessionState::Active(Some(1.into()))), Some(SessionAction::DropAndRetry)); - } + #[test] + fn maintain_session_detects_abnormal_when_active_migration_and_active_session_with_different_id( + ) { + assert_eq!( + maintain_session( + &Default::default(), + &Default::default(), + &KeyServerSetSnapshot { + migration: Some(KeyServerSetMigration { + id: 0.into(), + ..Default::default() + }), + ..Default::default() + }, + MigrationState::Started, + SessionState::Active(Some(1.into())) + ), + Some(SessionAction::DropAndRetry) + ); + } - #[test] - fn maintain_session_detects_abnormal_when_active_migration_and_finished_session_with_different_id() { - assert_eq!(maintain_session(&Default::default(), &Default::default(), &KeyServerSetSnapshot { - migration: Some(KeyServerSetMigration { - id: 0.into(), - ..Default::default() - }), - ..Default::default() - }, MigrationState::Started, SessionState::Finished(Some(1.into()))), Some(SessionAction::DropAndRetry)); - } + #[test] + fn maintain_session_detects_abnormal_when_active_migration_and_finished_session_with_different_id( + ) { + assert_eq!( + maintain_session( + &Default::default(), + &Default::default(), + &KeyServerSetSnapshot { + migration: Some(KeyServerSetMigration { + id: 0.into(), + ..Default::default() + }), + ..Default::default() + }, + MigrationState::Started, + SessionState::Finished(Some(1.into())) + ), + Some(SessionAction::DropAndRetry) + ); + } - #[test] - fn maintain_session_detects_abnormal_when_active_migration_and_failed_session_with_different_id() { - assert_eq!(maintain_session(&Default::default(), &Default::default(), &KeyServerSetSnapshot { - migration: Some(KeyServerSetMigration { - id: 0.into(), - ..Default::default() - }), - ..Default::default() - }, MigrationState::Started, SessionState::Failed(Some(1.into()))), Some(SessionAction::DropAndRetry)); - } + #[test] + fn maintain_session_detects_abnormal_when_active_migration_and_failed_session_with_different_id( + ) { + assert_eq!( + maintain_session( + &Default::default(), + &Default::default(), + &KeyServerSetSnapshot { + migration: Some(KeyServerSetMigration { + id: 0.into(), + ..Default::default() + }), + ..Default::default() + }, + MigrationState::Started, + SessionState::Failed(Some(1.into())) + ), + Some(SessionAction::DropAndRetry) + ); + } } diff --git a/secret-store/src/key_server_cluster/io/deadline.rs b/secret-store/src/key_server_cluster/io/deadline.rs index 7c0893257..6efeff163 100644 --- a/secret-store/src/key_server_cluster/io/deadline.rs +++ b/secret-store/src/key_server_cluster/io/deadline.rs @@ -14,75 +14,78 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::io; -use std::time::Duration; use futures::{Future, Poll}; -use tokio::timer::timeout::{Timeout, Error as TimeoutError}; +use std::{io, time::Duration}; +use tokio::timer::timeout::{Error as TimeoutError, Timeout}; -type DeadlineBox = Box::Item>, - Error = TimeoutError<::Error> -> + Send>; +type DeadlineBox = Box< + Future< + Item = DeadlineStatus<::Item>, + Error = TimeoutError<::Error>, + > + Send, +>; /// Complete a passed future or fail if it is not completed within timeout. pub fn deadline(duration: Duration, future: F) -> Result, io::Error> - where F: Future + Send + 'static, T: Send + 'static +where + F: Future + Send + 'static, + T: Send + 'static, { - let timeout = Box::new(Timeout::new(future, duration) - .then(|res| { - match res { - Ok(fut) => Ok(DeadlineStatus::Meet(fut)), - Err(err) => { - if err.is_elapsed() { - Ok(DeadlineStatus::Timeout) - } else { - Err(err) - } - }, - } - }) - ); - let deadline = Deadline { - future: timeout, - }; - Ok(deadline) + let timeout = Box::new(Timeout::new(future, duration).then(|res| match res { + Ok(fut) => Ok(DeadlineStatus::Meet(fut)), + Err(err) => { + if err.is_elapsed() { + Ok(DeadlineStatus::Timeout) + } else { + Err(err) + } + } + })); + let deadline = Deadline { future: timeout }; + Ok(deadline) } /// Deadline future completion status. #[derive(Debug, PartialEq)] pub enum DeadlineStatus { - /// Completed a future. - Meet(T), - /// Faled with timeout. - Timeout, + /// Completed a future. + Meet(T), + /// Faled with timeout. + Timeout, } /// Future, which waits for passed future completion within given period, or fails with timeout. -pub struct Deadline where F: Future { - future: DeadlineBox, +pub struct Deadline +where + F: Future, +{ + future: DeadlineBox, } -impl Future for Deadline where F: Future { - type Item = DeadlineStatus; - type Error = TimeoutError; +impl Future for Deadline +where + F: Future, +{ + type Item = DeadlineStatus; + type Error = TimeoutError; - fn poll(&mut self) -> Poll { - self.future.poll() - } + fn poll(&mut self) -> Poll { + self.future.poll() + } } #[cfg(test)] mod tests { - use std::time::Duration; - use futures::{Future, done}; - use tokio::reactor::Reactor; - use super::{deadline, DeadlineStatus}; + use super::{deadline, DeadlineStatus}; + use futures::{done, Future}; + use std::time::Duration; + use tokio::reactor::Reactor; - #[test] - fn deadline_result_works() { - let mut reactor = Reactor::new().unwrap(); - let deadline = deadline(Duration::from_millis(1000), done(Ok(()))).unwrap(); - reactor.turn(Some(Duration::from_millis(3))).unwrap(); - assert_eq!(deadline.wait().unwrap(), DeadlineStatus::Meet(())); - } + #[test] + fn deadline_result_works() { + let mut reactor = Reactor::new().unwrap(); + let deadline = deadline(Duration::from_millis(1000), done(Ok(()))).unwrap(); + reactor.turn(Some(Duration::from_millis(3))).unwrap(); + assert_eq!(deadline.wait().unwrap(), DeadlineStatus::Meet(())); + } } diff --git a/secret-store/src/key_server_cluster/io/handshake.rs b/secret-store/src/key_server_cluster/io/handshake.rs index f378cba09..a0ba89bb7 100644 --- a/secret-store/src/key_server_cluster/io/handshake.rs +++ b/secret-store/src/key_server_cluster/io/handshake.rs @@ -14,6 +14,20 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use ethereum_types::H256; +use ethkey::{ + crypto::ecdh::agree, recover, sign, verify_public, Generator, KeyPair, Public, Random, + Signature, +}; +use futures::{Async, Future, Poll}; +use key_server_cluster::{ + io::{ + fix_shared_key, read_encrypted_message, read_message, write_encrypted_message, + write_message, ReadMessage, WriteMessage, + }, + message::{ClusterMessage, Message, NodePrivateKeySignature, NodePublicKey}, + Error, NodeId, NodeKeyPair, +}; ///! Given: two nodes each holding its own `self_key_pair`. ///! ///! Handshake process: @@ -31,352 +45,489 @@ ///! Result of handshake is: ///! 1) belief, that we are connected to the KS from our KS-set ///! 2) session key pair, which is used to enrypt all connection messages - use std::io; -use std::sync::Arc; -use std::collections::BTreeSet; -use futures::{Future, Poll, Async}; +use std::{collections::BTreeSet, sync::Arc}; use tokio_io::{AsyncRead, AsyncWrite}; -use ethkey::crypto::ecdh::agree; -use ethkey::{Random, Generator, KeyPair, Public, Signature, verify_public, sign, recover}; -use ethereum_types::H256; -use key_server_cluster::{NodeId, Error, NodeKeyPair}; -use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature}; -use key_server_cluster::io::{write_message, write_encrypted_message, WriteMessage, ReadMessage, - read_message, read_encrypted_message, fix_shared_key}; /// Start handshake procedure with another node from the cluster. -pub fn handshake(a: A, self_key_pair: Arc, trusted_nodes: BTreeSet) -> Handshake where A: AsyncWrite + AsyncRead { - let init_data = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into) - .and_then(|cp| Random.generate().map(|kp| (cp, kp)).map_err(Into::into)); - handshake_with_init_data(a, init_data, self_key_pair, trusted_nodes) +pub fn handshake( + a: A, + self_key_pair: Arc, + trusted_nodes: BTreeSet, +) -> Handshake +where + A: AsyncWrite + AsyncRead, +{ + let init_data = Random + .generate() + .map(|kp| *kp.secret().clone()) + .map_err(Into::into) + .and_then(|cp| Random.generate().map(|kp| (cp, kp)).map_err(Into::into)); + handshake_with_init_data(a, init_data, self_key_pair, trusted_nodes) } /// Start handshake procedure with another node from the cluster and given plain confirmation + session key pair. -pub fn handshake_with_init_data(a: A, init_data: Result<(H256, KeyPair), Error>, self_key_pair: Arc, trusted_nodes: BTreeSet) -> Handshake where A: AsyncWrite + AsyncRead { - let handshake_input_data = init_data - .and_then(|(cp, kp)| sign(kp.secret(), &cp).map(|sp| (cp, kp, sp)).map_err(Into::into)) - .and_then(|(cp, kp, sp)| Handshake::::make_public_key_message(self_key_pair.public().clone(), cp.clone(), sp).map(|msg| (cp, kp, msg))); +pub fn handshake_with_init_data( + a: A, + init_data: Result<(H256, KeyPair), Error>, + self_key_pair: Arc, + trusted_nodes: BTreeSet, +) -> Handshake +where + A: AsyncWrite + AsyncRead, +{ + let handshake_input_data = init_data + .and_then(|(cp, kp)| { + sign(kp.secret(), &cp) + .map(|sp| (cp, kp, sp)) + .map_err(Into::into) + }) + .and_then(|(cp, kp, sp)| { + Handshake::::make_public_key_message(self_key_pair.public().clone(), cp.clone(), sp) + .map(|msg| (cp, kp, msg)) + }); - let (error, cp, kp, state) = match handshake_input_data { - Ok((cp, kp, msg)) => (None, cp, Some(kp), HandshakeState::SendPublicKey(write_message(a, msg))), - Err(err) => (Some((a, Err(err))), Default::default(), None, HandshakeState::Finished), - }; + let (error, cp, kp, state) = match handshake_input_data { + Ok((cp, kp, msg)) => ( + None, + cp, + Some(kp), + HandshakeState::SendPublicKey(write_message(a, msg)), + ), + Err(err) => ( + Some((a, Err(err))), + Default::default(), + None, + HandshakeState::Finished, + ), + }; - Handshake { - is_active: true, - error: error, - state: state, - self_key_pair: self_key_pair, - self_session_key_pair: kp, - self_confirmation_plain: cp, - trusted_nodes: Some(trusted_nodes), - peer_node_id: None, - peer_session_public: None, - peer_confirmation_plain: None, - shared_key: None, - } + Handshake { + is_active: true, + error: error, + state: state, + self_key_pair: self_key_pair, + self_session_key_pair: kp, + self_confirmation_plain: cp, + trusted_nodes: Some(trusted_nodes), + peer_node_id: None, + peer_session_public: None, + peer_confirmation_plain: None, + shared_key: None, + } } /// Wait for handshake procedure to be started by another node from the cluster. -pub fn accept_handshake(a: A, self_key_pair: Arc) -> Handshake where A: AsyncWrite + AsyncRead { - let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into); - let handshake_input_data = self_confirmation_plain - .and_then(|cp| Random.generate().map(|kp| (cp, kp)).map_err(Into::into)); +pub fn accept_handshake(a: A, self_key_pair: Arc) -> Handshake +where + A: AsyncWrite + AsyncRead, +{ + let self_confirmation_plain = Random + .generate() + .map(|kp| *kp.secret().clone()) + .map_err(Into::into); + let handshake_input_data = self_confirmation_plain + .and_then(|cp| Random.generate().map(|kp| (cp, kp)).map_err(Into::into)); - let (error, cp, kp, state) = match handshake_input_data { - Ok((cp, kp)) => (None, cp, Some(kp), HandshakeState::ReceivePublicKey(read_message(a))), - Err(err) => (Some((a, Err(err))), Default::default(), None, HandshakeState::Finished), - }; + let (error, cp, kp, state) = match handshake_input_data { + Ok((cp, kp)) => ( + None, + cp, + Some(kp), + HandshakeState::ReceivePublicKey(read_message(a)), + ), + Err(err) => ( + Some((a, Err(err))), + Default::default(), + None, + HandshakeState::Finished, + ), + }; - Handshake { - is_active: false, - error: error, - state: state, - self_key_pair: self_key_pair, - self_session_key_pair: kp, - self_confirmation_plain: cp, - trusted_nodes: None, - peer_node_id: None, - peer_session_public: None, - peer_confirmation_plain: None, - shared_key: None, - } + Handshake { + is_active: false, + error: error, + state: state, + self_key_pair: self_key_pair, + self_session_key_pair: kp, + self_confirmation_plain: cp, + trusted_nodes: None, + peer_node_id: None, + peer_session_public: None, + peer_confirmation_plain: None, + shared_key: None, + } } /// Result of handshake procedure. #[derive(Debug, PartialEq)] pub struct HandshakeResult { - /// Node id. - pub node_id: NodeId, - /// Shared key. - pub shared_key: KeyPair, + /// Node id. + pub node_id: NodeId, + /// Shared key. + pub shared_key: KeyPair, } /// Future handshake procedure. pub struct Handshake { - is_active: bool, - error: Option<(A, Result)>, - state: HandshakeState, - self_key_pair: Arc, - self_session_key_pair: Option, - self_confirmation_plain: H256, - trusted_nodes: Option>, - peer_node_id: Option, - peer_session_public: Option, - peer_confirmation_plain: Option, - shared_key: Option, + is_active: bool, + error: Option<(A, Result)>, + state: HandshakeState, + self_key_pair: Arc, + self_session_key_pair: Option, + self_confirmation_plain: H256, + trusted_nodes: Option>, + peer_node_id: Option, + peer_session_public: Option, + peer_confirmation_plain: Option, + shared_key: Option, } /// Active handshake state. enum HandshakeState { - SendPublicKey(WriteMessage), - ReceivePublicKey(ReadMessage), - SendPrivateKeySignature(WriteMessage), - ReceivePrivateKeySignature(ReadMessage), - Finished, + SendPublicKey(WriteMessage), + ReceivePublicKey(ReadMessage), + SendPrivateKeySignature(WriteMessage), + ReceivePrivateKeySignature(ReadMessage), + Finished, } -impl Handshake where A: AsyncRead + AsyncWrite { - #[cfg(test)] - pub fn set_self_confirmation_plain(&mut self, self_confirmation_plain: H256) { - self.self_confirmation_plain = self_confirmation_plain; - } +impl Handshake +where + A: AsyncRead + AsyncWrite, +{ + #[cfg(test)] + pub fn set_self_confirmation_plain(&mut self, self_confirmation_plain: H256) { + self.self_confirmation_plain = self_confirmation_plain; + } - #[cfg(test)] - pub fn set_self_session_key_pair(&mut self, self_session_key_pair: KeyPair) { - self.self_session_key_pair = Some(self_session_key_pair); - } + #[cfg(test)] + pub fn set_self_session_key_pair(&mut self, self_session_key_pair: KeyPair) { + self.self_session_key_pair = Some(self_session_key_pair); + } - pub fn make_public_key_message(self_node_id: NodeId, confirmation_plain: H256, confirmation_signed_session: Signature) -> Result { - Ok(Message::Cluster(ClusterMessage::NodePublicKey(NodePublicKey { - node_id: self_node_id.into(), - confirmation_plain: confirmation_plain.into(), - confirmation_signed_session: confirmation_signed_session.into(), - }))) - } + pub fn make_public_key_message( + self_node_id: NodeId, + confirmation_plain: H256, + confirmation_signed_session: Signature, + ) -> Result { + Ok(Message::Cluster(ClusterMessage::NodePublicKey( + NodePublicKey { + node_id: self_node_id.into(), + confirmation_plain: confirmation_plain.into(), + confirmation_signed_session: confirmation_signed_session.into(), + }, + ))) + } - fn make_private_key_signature_message(self_key_pair: &NodeKeyPair, confirmation_plain: &H256) -> Result { - Ok(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature { - confirmation_signed: self_key_pair.sign(confirmation_plain)?.into(), - }))) - } + fn make_private_key_signature_message( + self_key_pair: &NodeKeyPair, + confirmation_plain: &H256, + ) -> Result { + Ok(Message::Cluster(ClusterMessage::NodePrivateKeySignature( + NodePrivateKeySignature { + confirmation_signed: self_key_pair.sign(confirmation_plain)?.into(), + }, + ))) + } - fn compute_shared_key(self_session_key_pair: &KeyPair, peer_session_public: &Public) -> Result { - agree(self_session_key_pair.secret(), peer_session_public) - .map_err(Into::into) - .and_then(|s| fix_shared_key(&s)) - } + fn compute_shared_key( + self_session_key_pair: &KeyPair, + peer_session_public: &Public, + ) -> Result { + agree(self_session_key_pair.secret(), peer_session_public) + .map_err(Into::into) + .and_then(|s| fix_shared_key(&s)) + } } -impl Future for Handshake where A: AsyncRead + AsyncWrite { - type Item = (A, Result); - type Error = io::Error; +impl Future for Handshake +where + A: AsyncRead + AsyncWrite, +{ + type Item = (A, Result); + type Error = io::Error; - fn poll(&mut self) -> Poll { - if let Some(error_result) = self.error.take() { - return Ok(error_result.into()); - } + fn poll(&mut self) -> Poll { + if let Some(error_result) = self.error.take() { + return Ok(error_result.into()); + } - let (next, result) = match self.state { - HandshakeState::SendPublicKey(ref mut future) => { - let (stream, _) = try_ready!(future.poll()); + let (next, result) = match self.state { + HandshakeState::SendPublicKey(ref mut future) => { + let (stream, _) = try_ready!(future.poll()); - if self.is_active { - (HandshakeState::ReceivePublicKey( - read_message(stream) - ), Async::NotReady) - } else { - let shared_key = Self::compute_shared_key( + if self.is_active { + ( + HandshakeState::ReceivePublicKey(read_message(stream)), + Async::NotReady, + ) + } else { + let shared_key = Self::compute_shared_key( self.self_session_key_pair.as_ref().expect( "self_session_key_pair is not filled only when initialization has failed; if initialization has failed, self.error.is_some(); qed"), self.peer_session_public.as_ref().expect( "we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; peer_session_public is filled in ReceivePublicKey; qed"), ); - self.shared_key = match shared_key { - Ok(shared_key) => Some(shared_key), - Err(err) => return Ok((stream, Err(err)).into()), - }; + self.shared_key = match shared_key { + Ok(shared_key) => Some(shared_key), + Err(err) => return Ok((stream, Err(err)).into()), + }; - let peer_confirmation_plain = self.peer_confirmation_plain.as_ref() + let peer_confirmation_plain = self.peer_confirmation_plain.as_ref() .expect("we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; peer_confirmation_plain is filled in ReceivePublicKey; qed"); - let message = match Handshake::::make_private_key_signature_message(&*self.self_key_pair, peer_confirmation_plain) { - Ok(message) => message, - Err(err) => return Ok((stream, Err(err)).into()), - }; + let message = match Handshake::::make_private_key_signature_message( + &*self.self_key_pair, + peer_confirmation_plain, + ) { + Ok(message) => message, + Err(err) => return Ok((stream, Err(err)).into()), + }; - (HandshakeState::SendPrivateKeySignature(write_encrypted_message(stream, - self.shared_key.as_ref().expect("filled couple of lines above; qed"), - message)), Async::NotReady) - } - }, - HandshakeState::ReceivePublicKey(ref mut future) => { - let (stream, message) = try_ready!(future.poll()); + ( + HandshakeState::SendPrivateKeySignature(write_encrypted_message( + stream, + self.shared_key + .as_ref() + .expect("filled couple of lines above; qed"), + message, + )), + Async::NotReady, + ) + } + } + HandshakeState::ReceivePublicKey(ref mut future) => { + let (stream, message) = try_ready!(future.poll()); - let message = match message { - Ok(message) => match message { - Message::Cluster(ClusterMessage::NodePublicKey(message)) => message, - _ => return Ok((stream, Err(Error::InvalidMessage)).into()), - }, - Err(err) => return Ok((stream, Err(err.into())).into()), - }; + let message = match message { + Ok(message) => match message { + Message::Cluster(ClusterMessage::NodePublicKey(message)) => message, + _ => return Ok((stream, Err(Error::InvalidMessage)).into()), + }, + Err(err) => return Ok((stream, Err(err.into())).into()), + }; - if !self.trusted_nodes.as_ref().map(|tn| tn.contains(&*message.node_id)).unwrap_or(true) { - return Ok((stream, Err(Error::InvalidNodeId)).into()); - } + if !self + .trusted_nodes + .as_ref() + .map(|tn| tn.contains(&*message.node_id)) + .unwrap_or(true) + { + return Ok((stream, Err(Error::InvalidNodeId)).into()); + } - self.peer_node_id = Some(message.node_id.into()); - self.peer_session_public = Some(match recover(&message.confirmation_signed_session, &message.confirmation_plain) { - Ok(peer_session_public) => peer_session_public, - Err(err) => return Ok((stream, Err(err.into())).into()), - }); - self.peer_confirmation_plain = Some(message.confirmation_plain.into()); - if self.is_active { - let shared_key = Self::compute_shared_key( + self.peer_node_id = Some(message.node_id.into()); + self.peer_session_public = Some( + match recover( + &message.confirmation_signed_session, + &message.confirmation_plain, + ) { + Ok(peer_session_public) => peer_session_public, + Err(err) => return Ok((stream, Err(err.into())).into()), + }, + ); + self.peer_confirmation_plain = Some(message.confirmation_plain.into()); + if self.is_active { + let shared_key = Self::compute_shared_key( self.self_session_key_pair.as_ref().expect( "self_session_key_pair is not filled only when initialization has failed; if initialization has failed, self.error.is_some(); qed"), self.peer_session_public.as_ref().expect( "we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; peer_session_public is filled in ReceivePublicKey; qed"), ); - self.shared_key = match shared_key { - Ok(shared_key) => Some(shared_key), - Err(err) => return Ok((stream, Err(err)).into()), - }; + self.shared_key = match shared_key { + Ok(shared_key) => Some(shared_key), + Err(err) => return Ok((stream, Err(err)).into()), + }; - let peer_confirmation_plain = self.peer_confirmation_plain.as_ref() - .expect("filled couple of lines above; qed"); - let message = match Handshake::::make_private_key_signature_message(&*self.self_key_pair, peer_confirmation_plain) { - Ok(message) => message, - Err(err) => return Ok((stream, Err(err)).into()), - }; + let peer_confirmation_plain = self + .peer_confirmation_plain + .as_ref() + .expect("filled couple of lines above; qed"); + let message = match Handshake::::make_private_key_signature_message( + &*self.self_key_pair, + peer_confirmation_plain, + ) { + Ok(message) => message, + Err(err) => return Ok((stream, Err(err)).into()), + }; - (HandshakeState::SendPrivateKeySignature(write_encrypted_message(stream, - self.shared_key.as_ref().expect("filled couple of lines above; qed"), - message)), Async::NotReady) - } else { - let self_session_key_pair = self.self_session_key_pair.as_ref() + ( + HandshakeState::SendPrivateKeySignature(write_encrypted_message( + stream, + self.shared_key + .as_ref() + .expect("filled couple of lines above; qed"), + message, + )), + Async::NotReady, + ) + } else { + let self_session_key_pair = self.self_session_key_pair.as_ref() .expect("self_session_key_pair is not filled only when initialization has failed; if initialization has failed, self.error.is_some(); qed"); - let confirmation_signed_session = match sign(self_session_key_pair.secret(), &self.self_confirmation_plain).map_err(Into::into) { - Ok(confirmation_signed_session) => confirmation_signed_session, - Err(err) => return Ok((stream, Err(err)).into()), - }; + let confirmation_signed_session = match sign( + self_session_key_pair.secret(), + &self.self_confirmation_plain, + ) + .map_err(Into::into) + { + Ok(confirmation_signed_session) => confirmation_signed_session, + Err(err) => return Ok((stream, Err(err)).into()), + }; - let message = match Handshake::::make_public_key_message(self.self_key_pair.public().clone(), self.self_confirmation_plain.clone(), confirmation_signed_session) { - Ok(message) => message, - Err(err) => return Ok((stream, Err(err)).into()), - }; - (HandshakeState::SendPublicKey(write_message(stream, message)), Async::NotReady) - } - }, - HandshakeState::SendPrivateKeySignature(ref mut future) => { - let (stream, _) = try_ready!(future.poll()); + let message = match Handshake::::make_public_key_message( + self.self_key_pair.public().clone(), + self.self_confirmation_plain.clone(), + confirmation_signed_session, + ) { + Ok(message) => message, + Err(err) => return Ok((stream, Err(err)).into()), + }; + ( + HandshakeState::SendPublicKey(write_message(stream, message)), + Async::NotReady, + ) + } + } + HandshakeState::SendPrivateKeySignature(ref mut future) => { + let (stream, _) = try_ready!(future.poll()); - (HandshakeState::ReceivePrivateKeySignature( + (HandshakeState::ReceivePrivateKeySignature( read_encrypted_message(stream, self.shared_key.as_ref().expect("shared_key is filled in Send/ReceivePublicKey; SendPrivateKeySignature follows Send/ReceivePublicKey; qed").clone() ) ), Async::NotReady) - }, - HandshakeState::ReceivePrivateKeySignature(ref mut future) => { - let (stream, message) = try_ready!(future.poll()); + } + HandshakeState::ReceivePrivateKeySignature(ref mut future) => { + let (stream, message) = try_ready!(future.poll()); - let message = match message { - Ok(message) => match message { - Message::Cluster(ClusterMessage::NodePrivateKeySignature(message)) => message, - _ => return Ok((stream, Err(Error::InvalidMessage)).into()), - }, - Err(err) => return Ok((stream, Err(err.into())).into()), - }; + let message = match message { + Ok(message) => match message { + Message::Cluster(ClusterMessage::NodePrivateKeySignature(message)) => { + message + } + _ => return Ok((stream, Err(Error::InvalidMessage)).into()), + }, + Err(err) => return Ok((stream, Err(err.into())).into()), + }; - let peer_public = self.peer_node_id.as_ref().expect("peer_node_id is filled in ReceivePublicKey; ReceivePrivateKeySignature follows ReceivePublicKey; qed"); - if !verify_public(peer_public, &*message.confirmation_signed, &self.self_confirmation_plain).unwrap_or(false) { - return Ok((stream, Err(Error::InvalidMessage)).into()); - } + let peer_public = self.peer_node_id.as_ref().expect("peer_node_id is filled in ReceivePublicKey; ReceivePrivateKeySignature follows ReceivePublicKey; qed"); + if !verify_public( + peer_public, + &*message.confirmation_signed, + &self.self_confirmation_plain, + ) + .unwrap_or(false) + { + return Ok((stream, Err(Error::InvalidMessage)).into()); + } - (HandshakeState::Finished, Async::Ready((stream, Ok(HandshakeResult { + (HandshakeState::Finished, Async::Ready((stream, Ok(HandshakeResult { node_id: self.peer_node_id.expect("peer_node_id is filled in ReceivePublicKey; ReceivePrivateKeySignature follows ReceivePublicKey; qed"), shared_key: self.shared_key.clone().expect("shared_key is filled in Send/ReceivePublicKey; ReceivePrivateKeySignature follows Send/ReceivePublicKey; qed"), })))) - }, - HandshakeState::Finished => panic!("poll Handshake after it's done"), - }; + } + HandshakeState::Finished => panic!("poll Handshake after it's done"), + }; - self.state = next; - match result { - // by polling again, we register new future - Async::NotReady => self.poll(), - result => Ok(result) - } - } + self.state = next; + match result { + // by polling again, we register new future + Async::NotReady => self.poll(), + result => Ok(result), + } + } } #[cfg(test)] mod tests { - use std::sync::Arc; - use std::collections::BTreeSet; - use futures::Future; - use ethkey::{Random, Generator, sign}; - use ethereum_types::H256; - use key_server_cluster::PlainNodeKeyPair; - use key_server_cluster::io::message::tests::TestIo; - use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature}; - use super::{handshake_with_init_data, accept_handshake, HandshakeResult}; + use super::{accept_handshake, handshake_with_init_data, HandshakeResult}; + use ethereum_types::H256; + use ethkey::{sign, Generator, Random}; + use futures::Future; + use key_server_cluster::{ + io::message::tests::TestIo, + message::{ClusterMessage, Message, NodePrivateKeySignature, NodePublicKey}, + PlainNodeKeyPair, + }; + use std::{collections::BTreeSet, sync::Arc}; - fn prepare_test_io() -> (H256, TestIo) { - let mut io = TestIo::new(); + fn prepare_test_io() -> (H256, TestIo) { + let mut io = TestIo::new(); - let self_confirmation_plain = *Random.generate().unwrap().secret().clone(); - let peer_confirmation_plain = *Random.generate().unwrap().secret().clone(); + let self_confirmation_plain = *Random.generate().unwrap().secret().clone(); + let peer_confirmation_plain = *Random.generate().unwrap().secret().clone(); - let self_confirmation_signed = sign(io.peer_key_pair().secret(), &self_confirmation_plain).unwrap(); - let peer_confirmation_signed = sign(io.peer_session_key_pair().secret(), &peer_confirmation_plain).unwrap(); + let self_confirmation_signed = + sign(io.peer_key_pair().secret(), &self_confirmation_plain).unwrap(); + let peer_confirmation_signed = sign( + io.peer_session_key_pair().secret(), + &peer_confirmation_plain, + ) + .unwrap(); - let peer_public = io.peer_key_pair().public().clone(); - io.add_input_message(Message::Cluster(ClusterMessage::NodePublicKey(NodePublicKey { - node_id: peer_public.into(), - confirmation_plain: peer_confirmation_plain.into(), - confirmation_signed_session: peer_confirmation_signed.into(), - }))); - io.add_encrypted_input_message(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature { - confirmation_signed: self_confirmation_signed.into(), - }))); + let peer_public = io.peer_key_pair().public().clone(); + io.add_input_message(Message::Cluster(ClusterMessage::NodePublicKey( + NodePublicKey { + node_id: peer_public.into(), + confirmation_plain: peer_confirmation_plain.into(), + confirmation_signed_session: peer_confirmation_signed.into(), + }, + ))); + io.add_encrypted_input_message(Message::Cluster(ClusterMessage::NodePrivateKeySignature( + NodePrivateKeySignature { + confirmation_signed: self_confirmation_signed.into(), + }, + ))); - (self_confirmation_plain, io) - } + (self_confirmation_plain, io) + } - #[test] - fn active_handshake_works() { - let (self_confirmation_plain, io) = prepare_test_io(); - let trusted_nodes: BTreeSet<_> = vec![io.peer_key_pair().public().clone()].into_iter().collect(); - let self_session_key_pair = io.self_session_key_pair().clone(); - let self_key_pair = Arc::new(PlainNodeKeyPair::new(io.self_key_pair().clone())); - let shared_key = io.shared_key_pair().clone(); + #[test] + fn active_handshake_works() { + let (self_confirmation_plain, io) = prepare_test_io(); + let trusted_nodes: BTreeSet<_> = vec![io.peer_key_pair().public().clone()] + .into_iter() + .collect(); + let self_session_key_pair = io.self_session_key_pair().clone(); + let self_key_pair = Arc::new(PlainNodeKeyPair::new(io.self_key_pair().clone())); + let shared_key = io.shared_key_pair().clone(); - let handshake = handshake_with_init_data(io, Ok((self_confirmation_plain, self_session_key_pair)), self_key_pair, trusted_nodes); - let handshake_result = handshake.wait().unwrap(); - assert_eq!(handshake_result.1, Ok(HandshakeResult { - node_id: handshake_result.0.peer_key_pair().public().clone(), - shared_key: shared_key, - })); - } + let handshake = handshake_with_init_data( + io, + Ok((self_confirmation_plain, self_session_key_pair)), + self_key_pair, + trusted_nodes, + ); + let handshake_result = handshake.wait().unwrap(); + assert_eq!( + handshake_result.1, + Ok(HandshakeResult { + node_id: handshake_result.0.peer_key_pair().public().clone(), + shared_key: shared_key, + }) + ); + } - #[test] - fn passive_handshake_works() { - let (self_confirmation_plain, io) = prepare_test_io(); - let self_key_pair = Arc::new(PlainNodeKeyPair::new(io.self_key_pair().clone())); - let self_session_key_pair = io.self_session_key_pair().clone(); - let shared_key = io.shared_key_pair().clone(); + #[test] + fn passive_handshake_works() { + let (self_confirmation_plain, io) = prepare_test_io(); + let self_key_pair = Arc::new(PlainNodeKeyPair::new(io.self_key_pair().clone())); + let self_session_key_pair = io.self_session_key_pair().clone(); + let shared_key = io.shared_key_pair().clone(); - let mut handshake = accept_handshake(io, self_key_pair); - handshake.set_self_confirmation_plain(self_confirmation_plain); - handshake.set_self_session_key_pair(self_session_key_pair); + let mut handshake = accept_handshake(io, self_key_pair); + handshake.set_self_confirmation_plain(self_confirmation_plain); + handshake.set_self_session_key_pair(self_session_key_pair); - let handshake_result = handshake.wait().unwrap(); - assert_eq!(handshake_result.1, Ok(HandshakeResult { - node_id: handshake_result.0.peer_key_pair().public().clone(), - shared_key: shared_key, - })); - } + let handshake_result = handshake.wait().unwrap(); + assert_eq!( + handshake_result.1, + Ok(HandshakeResult { + node_id: handshake_result.0.peer_key_pair().public().clone(), + shared_key: shared_key, + }) + ); + } } diff --git a/secret-store/src/key_server_cluster/io/message.rs b/secret-store/src/key_server_cluster/io/message.rs index 0ce2b7c01..12f329ae5 100644 --- a/secret-store/src/key_server_cluster/io/message.rs +++ b/secret-store/src/key_server_cluster/io/message.rs @@ -14,18 +14,19 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::io::Cursor; -use std::u16; -use std::ops::Deref; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; -use serde_json; -use ethkey::crypto::ecies; -use ethkey::{Secret, KeyPair}; -use ethkey::math::curve_order; use ethereum_types::{H256, U256}; -use key_server_cluster::Error; -use key_server_cluster::message::{Message, ClusterMessage, GenerationMessage, EncryptionMessage, DecryptionMessage, - SchnorrSigningMessage, EcdsaSigningMessage, ServersSetChangeMessage, ShareAddMessage, KeyVersionNegotiationMessage}; +use ethkey::{crypto::ecies, math::curve_order, KeyPair, Secret}; +use key_server_cluster::{ + message::{ + ClusterMessage, DecryptionMessage, EcdsaSigningMessage, EncryptionMessage, + GenerationMessage, KeyVersionNegotiationMessage, Message, SchnorrSigningMessage, + ServersSetChangeMessage, ShareAddMessage, + }, + Error, +}; +use serde_json; +use std::{io::Cursor, ops::Deref, u16}; /// Size of serialized header. pub const MESSAGE_HEADER_SIZE: usize = 18; @@ -35,12 +36,12 @@ pub const CURRENT_HEADER_VERSION: u64 = 1; /// Message header. #[derive(Debug, PartialEq)] pub struct MessageHeader { - /// Message/Header version. - pub version: u64, - /// Message kind. - pub kind: u64, - /// Message payload size (without header). - pub size: u16, + /// Message/Header version. + pub version: u64, + /// Message kind. + pub kind: u64, + /// Message payload size (without header). + pub size: u16, } /// Serialized message. @@ -48,382 +49,625 @@ pub struct MessageHeader { pub struct SerializedMessage(Vec); impl Deref for SerializedMessage { - type Target = [u8]; + type Target = [u8]; - fn deref(&self) -> &[u8] { - &self.0 - } + fn deref(&self) -> &[u8] { + &self.0 + } } impl Into> for SerializedMessage { - fn into(self) -> Vec { - self.0 - } + fn into(self) -> Vec { + self.0 + } } /// Serialize message. pub fn serialize_message(message: Message) -> Result { - let (message_kind, payload) = match message { - Message::Cluster(ClusterMessage::NodePublicKey(payload)) => (1, serde_json::to_vec(&payload)), - Message::Cluster(ClusterMessage::NodePrivateKeySignature(payload)) => (2, serde_json::to_vec(&payload)), - Message::Cluster(ClusterMessage::KeepAlive(payload)) => (3, serde_json::to_vec(&payload)), - Message::Cluster(ClusterMessage::KeepAliveResponse(payload)) => (4, serde_json::to_vec(&payload)), + let (message_kind, payload) = match message { + Message::Cluster(ClusterMessage::NodePublicKey(payload)) => { + (1, serde_json::to_vec(&payload)) + } + Message::Cluster(ClusterMessage::NodePrivateKeySignature(payload)) => { + (2, serde_json::to_vec(&payload)) + } + Message::Cluster(ClusterMessage::KeepAlive(payload)) => (3, serde_json::to_vec(&payload)), + Message::Cluster(ClusterMessage::KeepAliveResponse(payload)) => { + (4, serde_json::to_vec(&payload)) + } - Message::Generation(GenerationMessage::InitializeSession(payload)) => (50, serde_json::to_vec(&payload)), - Message::Generation(GenerationMessage::ConfirmInitialization(payload)) => (51, serde_json::to_vec(&payload)), - Message::Generation(GenerationMessage::CompleteInitialization(payload)) => (52, serde_json::to_vec(&payload)), - Message::Generation(GenerationMessage::KeysDissemination(payload)) => (53, serde_json::to_vec(&payload)), - Message::Generation(GenerationMessage::PublicKeyShare(payload)) => (54, serde_json::to_vec(&payload)), - Message::Generation(GenerationMessage::SessionError(payload)) => (55, serde_json::to_vec(&payload)), - Message::Generation(GenerationMessage::SessionCompleted(payload)) => (56, serde_json::to_vec(&payload)), + Message::Generation(GenerationMessage::InitializeSession(payload)) => { + (50, serde_json::to_vec(&payload)) + } + Message::Generation(GenerationMessage::ConfirmInitialization(payload)) => { + (51, serde_json::to_vec(&payload)) + } + Message::Generation(GenerationMessage::CompleteInitialization(payload)) => { + (52, serde_json::to_vec(&payload)) + } + Message::Generation(GenerationMessage::KeysDissemination(payload)) => { + (53, serde_json::to_vec(&payload)) + } + Message::Generation(GenerationMessage::PublicKeyShare(payload)) => { + (54, serde_json::to_vec(&payload)) + } + Message::Generation(GenerationMessage::SessionError(payload)) => { + (55, serde_json::to_vec(&payload)) + } + Message::Generation(GenerationMessage::SessionCompleted(payload)) => { + (56, serde_json::to_vec(&payload)) + } - Message::Encryption(EncryptionMessage::InitializeEncryptionSession(payload)) => (100, serde_json::to_vec(&payload)), - Message::Encryption(EncryptionMessage::ConfirmEncryptionInitialization(payload)) => (101, serde_json::to_vec(&payload)), - Message::Encryption(EncryptionMessage::EncryptionSessionError(payload)) => (102, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::InitializeEncryptionSession(payload)) => { + (100, serde_json::to_vec(&payload)) + } + Message::Encryption(EncryptionMessage::ConfirmEncryptionInitialization(payload)) => { + (101, serde_json::to_vec(&payload)) + } + Message::Encryption(EncryptionMessage::EncryptionSessionError(payload)) => { + (102, serde_json::to_vec(&payload)) + } - Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(payload)) => (150, serde_json::to_vec(&payload)), - Message::Decryption(DecryptionMessage::RequestPartialDecryption(payload)) => (151, serde_json::to_vec(&payload)), - Message::Decryption(DecryptionMessage::PartialDecryption(payload)) => (152, serde_json::to_vec(&payload)), - Message::Decryption(DecryptionMessage::DecryptionSessionError(payload)) => (153, serde_json::to_vec(&payload)), - Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(payload)) => (154, serde_json::to_vec(&payload)), - Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(payload)) => (155, serde_json::to_vec(&payload)), - Message::Decryption(DecryptionMessage::DecryptionSessionDelegationCompleted(payload)) - => (156, serde_json::to_vec(&payload)), + Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(payload)) => { + (150, serde_json::to_vec(&payload)) + } + Message::Decryption(DecryptionMessage::RequestPartialDecryption(payload)) => { + (151, serde_json::to_vec(&payload)) + } + Message::Decryption(DecryptionMessage::PartialDecryption(payload)) => { + (152, serde_json::to_vec(&payload)) + } + Message::Decryption(DecryptionMessage::DecryptionSessionError(payload)) => { + (153, serde_json::to_vec(&payload)) + } + Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(payload)) => { + (154, serde_json::to_vec(&payload)) + } + Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(payload)) => { + (155, serde_json::to_vec(&payload)) + } + Message::Decryption(DecryptionMessage::DecryptionSessionDelegationCompleted(payload)) => { + (156, serde_json::to_vec(&payload)) + } - Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningConsensusMessage(payload)) - => (200, serde_json::to_vec(&payload)), - Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningGenerationMessage(payload)) - => (201, serde_json::to_vec(&payload)), - Message::SchnorrSigning(SchnorrSigningMessage::SchnorrRequestPartialSignature(payload)) - => (202, serde_json::to_vec(&payload)), - Message::SchnorrSigning(SchnorrSigningMessage::SchnorrPartialSignature(payload)) => (203, serde_json::to_vec(&payload)), - Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionError(payload)) => (204, serde_json::to_vec(&payload)), - Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionCompleted(payload)) - => (205, serde_json::to_vec(&payload)), - Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionDelegation(payload)) - => (206, serde_json::to_vec(&payload)), - Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionDelegationCompleted(payload)) - => (207, serde_json::to_vec(&payload)), + Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningConsensusMessage(payload)) => { + (200, serde_json::to_vec(&payload)) + } + Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningGenerationMessage( + payload, + )) => (201, serde_json::to_vec(&payload)), + Message::SchnorrSigning(SchnorrSigningMessage::SchnorrRequestPartialSignature(payload)) => { + (202, serde_json::to_vec(&payload)) + } + Message::SchnorrSigning(SchnorrSigningMessage::SchnorrPartialSignature(payload)) => { + (203, serde_json::to_vec(&payload)) + } + Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionError(payload)) => { + (204, serde_json::to_vec(&payload)) + } + Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionCompleted(payload)) => { + (205, serde_json::to_vec(&payload)) + } + Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionDelegation( + payload, + )) => (206, serde_json::to_vec(&payload)), + Message::SchnorrSigning( + SchnorrSigningMessage::SchnorrSigningSessionDelegationCompleted(payload), + ) => (207, serde_json::to_vec(&payload)), - Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(payload)) - => (250, serde_json::to_vec(&payload)), - Message::ServersSetChange(ServersSetChangeMessage::UnknownSessionsRequest(payload)) => (251, serde_json::to_vec(&payload)), - Message::ServersSetChange(ServersSetChangeMessage::UnknownSessions(payload)) => (252, serde_json::to_vec(&payload)), - Message::ServersSetChange(ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(payload)) - => (253, serde_json::to_vec(&payload)), - Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(payload)) - => (254, serde_json::to_vec(&payload)), - Message::ServersSetChange(ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(payload)) - => (255, serde_json::to_vec(&payload)), - Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate(payload)) - => (256, serde_json::to_vec(&payload)), - Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse(payload)) - => (257, serde_json::to_vec(&payload)), - Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage(payload)) - => (258, serde_json::to_vec(&payload)), - Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(payload)) => (261, serde_json::to_vec(&payload)), - Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted(payload)) - => (262, serde_json::to_vec(&payload)), + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage( + payload, + )) => (250, serde_json::to_vec(&payload)), + Message::ServersSetChange(ServersSetChangeMessage::UnknownSessionsRequest(payload)) => { + (251, serde_json::to_vec(&payload)) + } + Message::ServersSetChange(ServersSetChangeMessage::UnknownSessions(payload)) => { + (252, serde_json::to_vec(&payload)) + } + Message::ServersSetChange(ServersSetChangeMessage::ShareChangeKeyVersionNegotiation( + payload, + )) => (253, serde_json::to_vec(&payload)), + Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession( + payload, + )) => (254, serde_json::to_vec(&payload)), + Message::ServersSetChange( + ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(payload), + ) => (255, serde_json::to_vec(&payload)), + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate(payload)) => { + (256, serde_json::to_vec(&payload)) + } + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse( + payload, + )) => (257, serde_json::to_vec(&payload)), + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage( + payload, + )) => (258, serde_json::to_vec(&payload)), + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(payload)) => { + (261, serde_json::to_vec(&payload)) + } + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted(payload)) => { + (262, serde_json::to_vec(&payload)) + } - Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(payload)) => (300, serde_json::to_vec(&payload)), - Message::ShareAdd(ShareAddMessage::KeyShareCommon(payload)) => (301, serde_json::to_vec(&payload)), - Message::ShareAdd(ShareAddMessage::NewKeysDissemination(payload)) => (302, serde_json::to_vec(&payload)), - Message::ShareAdd(ShareAddMessage::ShareAddError(payload)) => (303, serde_json::to_vec(&payload)), + Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(payload)) => { + (300, serde_json::to_vec(&payload)) + } + Message::ShareAdd(ShareAddMessage::KeyShareCommon(payload)) => { + (301, serde_json::to_vec(&payload)) + } + Message::ShareAdd(ShareAddMessage::NewKeysDissemination(payload)) => { + (302, serde_json::to_vec(&payload)) + } + Message::ShareAdd(ShareAddMessage::ShareAddError(payload)) => { + (303, serde_json::to_vec(&payload)) + } - Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::RequestKeyVersions(payload)) - => (450, serde_json::to_vec(&payload)), - Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersions(payload)) - => (451, serde_json::to_vec(&payload)), - Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersionsError(payload)) - => (452, serde_json::to_vec(&payload)), + Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::RequestKeyVersions( + payload, + )) => (450, serde_json::to_vec(&payload)), + Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersions(payload)) => { + (451, serde_json::to_vec(&payload)) + } + Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersionsError(payload)) => { + (452, serde_json::to_vec(&payload)) + } - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningConsensusMessage(payload)) => (500, serde_json::to_vec(&payload)), - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage(payload)) - => (501, serde_json::to_vec(&payload)), - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage(payload)) - => (502, serde_json::to_vec(&payload)), - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage(payload)) - => (503, serde_json::to_vec(&payload)), - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningInversedNonceCoeffShare(payload)) - => (504, serde_json::to_vec(&payload)), - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaRequestPartialSignature(payload)) => (505, serde_json::to_vec(&payload)), - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaPartialSignature(payload)) => (506, serde_json::to_vec(&payload)), - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionError(payload)) => (507, serde_json::to_vec(&payload)), - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionCompleted(payload)) => (508, serde_json::to_vec(&payload)), - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegation(payload)) => (509, serde_json::to_vec(&payload)), - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegationCompleted(payload)) - => (510, serde_json::to_vec(&payload)), - }; + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningConsensusMessage(payload)) => { + (500, serde_json::to_vec(&payload)) + } + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage( + payload, + )) => (501, serde_json::to_vec(&payload)), + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage( + payload, + )) => (502, serde_json::to_vec(&payload)), + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage( + payload, + )) => (503, serde_json::to_vec(&payload)), + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningInversedNonceCoeffShare( + payload, + )) => (504, serde_json::to_vec(&payload)), + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaRequestPartialSignature(payload)) => { + (505, serde_json::to_vec(&payload)) + } + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaPartialSignature(payload)) => { + (506, serde_json::to_vec(&payload)) + } + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionError(payload)) => { + (507, serde_json::to_vec(&payload)) + } + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionCompleted(payload)) => { + (508, serde_json::to_vec(&payload)) + } + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegation(payload)) => { + (509, serde_json::to_vec(&payload)) + } + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegationCompleted( + payload, + )) => (510, serde_json::to_vec(&payload)), + }; - let payload = payload.map_err(|err| Error::Serde(err.to_string()))?; - build_serialized_message(MessageHeader { - kind: message_kind, - version: CURRENT_HEADER_VERSION, - size: 0, - }, payload) + let payload = payload.map_err(|err| Error::Serde(err.to_string()))?; + build_serialized_message( + MessageHeader { + kind: message_kind, + version: CURRENT_HEADER_VERSION, + size: 0, + }, + payload, + ) } /// Deserialize message. pub fn deserialize_message(header: &MessageHeader, payload: Vec) -> Result { - Ok(match header.kind { - 1 => Message::Cluster(ClusterMessage::NodePublicKey(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 2 => Message::Cluster(ClusterMessage::NodePrivateKeySignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 3 => Message::Cluster(ClusterMessage::KeepAlive(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 4 => Message::Cluster(ClusterMessage::KeepAliveResponse(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + Ok(match header.kind { + 1 => Message::Cluster(ClusterMessage::NodePublicKey( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 2 => Message::Cluster(ClusterMessage::NodePrivateKeySignature( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 3 => Message::Cluster(ClusterMessage::KeepAlive( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 4 => Message::Cluster(ClusterMessage::KeepAliveResponse( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), - 50 => Message::Generation(GenerationMessage::InitializeSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 51 => Message::Generation(GenerationMessage::ConfirmInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 52 => Message::Generation(GenerationMessage::CompleteInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 53 => Message::Generation(GenerationMessage::KeysDissemination(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 54 => Message::Generation(GenerationMessage::PublicKeyShare(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 55 => Message::Generation(GenerationMessage::SessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 56 => Message::Generation(GenerationMessage::SessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 50 => Message::Generation(GenerationMessage::InitializeSession( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 51 => Message::Generation(GenerationMessage::ConfirmInitialization( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 52 => Message::Generation(GenerationMessage::CompleteInitialization( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 53 => Message::Generation(GenerationMessage::KeysDissemination( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 54 => Message::Generation(GenerationMessage::PublicKeyShare( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 55 => Message::Generation(GenerationMessage::SessionError( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 56 => Message::Generation(GenerationMessage::SessionCompleted( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), - 100 => Message::Encryption(EncryptionMessage::InitializeEncryptionSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 101 => Message::Encryption(EncryptionMessage::ConfirmEncryptionInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 102 => Message::Encryption(EncryptionMessage::EncryptionSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 100 => Message::Encryption(EncryptionMessage::InitializeEncryptionSession( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 101 => Message::Encryption(EncryptionMessage::ConfirmEncryptionInitialization( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 102 => Message::Encryption(EncryptionMessage::EncryptionSessionError( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), - 150 => Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 151 => Message::Decryption(DecryptionMessage::RequestPartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 152 => Message::Decryption(DecryptionMessage::PartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 153 => Message::Decryption(DecryptionMessage::DecryptionSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 154 => Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 155 => Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 156 => Message::Decryption(DecryptionMessage::DecryptionSessionDelegationCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 150 => Message::Decryption(DecryptionMessage::DecryptionConsensusMessage( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 151 => Message::Decryption(DecryptionMessage::RequestPartialDecryption( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 152 => Message::Decryption(DecryptionMessage::PartialDecryption( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 153 => Message::Decryption(DecryptionMessage::DecryptionSessionError( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 154 => Message::Decryption(DecryptionMessage::DecryptionSessionCompleted( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 155 => Message::Decryption(DecryptionMessage::DecryptionSessionDelegation( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 156 => Message::Decryption(DecryptionMessage::DecryptionSessionDelegationCompleted( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), - 200 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 201 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningGenerationMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 202 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrRequestPartialSignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 203 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrPartialSignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 204 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 205 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 206 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionDelegation(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 207 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionDelegationCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 200 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningConsensusMessage( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 201 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningGenerationMessage( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 202 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrRequestPartialSignature( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 203 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrPartialSignature( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 204 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionError( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 205 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionCompleted( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 206 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionDelegation( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 207 => Message::SchnorrSigning( + SchnorrSigningMessage::SchnorrSigningSessionDelegationCompleted( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + ), + ), - 250 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 251 => Message::ServersSetChange(ServersSetChangeMessage::UnknownSessionsRequest(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 252 => Message::ServersSetChange(ServersSetChangeMessage::UnknownSessions(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 253 => Message::ServersSetChange(ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 254 => Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 255 => Message::ServersSetChange(ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 256 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 257 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 258 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 261 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 262 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 250 => { + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )) + } + 251 => Message::ServersSetChange(ServersSetChangeMessage::UnknownSessionsRequest( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 252 => Message::ServersSetChange(ServersSetChangeMessage::UnknownSessions( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 253 => { + Message::ServersSetChange(ServersSetChangeMessage::ShareChangeKeyVersionNegotiation( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )) + } + 254 => Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 255 => Message::ServersSetChange( + ServersSetChangeMessage::ConfirmShareChangeSessionInitialization( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + ), + ), + 256 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 257 => { + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )) + } + 258 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 261 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 262 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), - 300 => Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 301 => Message::ShareAdd(ShareAddMessage::KeyShareCommon(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 302 => Message::ShareAdd(ShareAddMessage::NewKeysDissemination(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 303 => Message::ShareAdd(ShareAddMessage::ShareAddError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 300 => Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 301 => Message::ShareAdd(ShareAddMessage::KeyShareCommon( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 302 => Message::ShareAdd(ShareAddMessage::NewKeysDissemination( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 303 => Message::ShareAdd(ShareAddMessage::ShareAddError( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), - 450 => Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::RequestKeyVersions(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 451 => Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersions(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 452 => Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersionsError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 450 => Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::RequestKeyVersions( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 451 => Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersions( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 452 => Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersionsError( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), - 500 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 501 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 502 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 503 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 504 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningInversedNonceCoeffShare(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 505 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaRequestPartialSignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 506 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaPartialSignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 507 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 508 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 509 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegation(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 510 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegationCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 500 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningConsensusMessage( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 501 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 502 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 503 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 504 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningInversedNonceCoeffShare( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 505 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaRequestPartialSignature( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 506 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaPartialSignature( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 507 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionError( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 508 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionCompleted( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 509 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegation( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), + 510 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegationCompleted( + serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?, + )), - _ => return Err(Error::Serde(format!("unknown message type {}", header.kind))), - }) + _ => { + return Err(Error::Serde(format!( + "unknown message type {}", + header.kind + ))) + } + }) } /// Encrypt serialized message. -pub fn encrypt_message(key: &KeyPair, message: SerializedMessage) -> Result { - let mut header: Vec<_> = message.into(); - let payload = header.split_off(MESSAGE_HEADER_SIZE); - let encrypted_payload = ecies::encrypt(key.public(), &[], &payload)?; +pub fn encrypt_message( + key: &KeyPair, + message: SerializedMessage, +) -> Result { + let mut header: Vec<_> = message.into(); + let payload = header.split_off(MESSAGE_HEADER_SIZE); + let encrypted_payload = ecies::encrypt(key.public(), &[], &payload)?; - let header = deserialize_header(&header)?; - build_serialized_message(header, encrypted_payload) + let header = deserialize_header(&header)?; + build_serialized_message(header, encrypted_payload) } /// Decrypt serialized message. pub fn decrypt_message(key: &KeyPair, payload: Vec) -> Result, Error> { - Ok(ecies::decrypt(key.secret(), &[], &payload)?) + Ok(ecies::decrypt(key.secret(), &[], &payload)?) } /// Fix shared encryption key. pub fn fix_shared_key(shared_secret: &Secret) -> Result { - // secret key created in agree function is invalid, as it is not calculated mod EC.field.n - // => let's do it manually - let shared_secret: H256 = (**shared_secret).into(); - let shared_secret: U256 = shared_secret.into(); - let shared_secret: H256 = (shared_secret % curve_order()).into(); - let shared_key_pair = KeyPair::from_secret_slice(&*shared_secret)?; - Ok(shared_key_pair) + // secret key created in agree function is invalid, as it is not calculated mod EC.field.n + // => let's do it manually + let shared_secret: H256 = (**shared_secret).into(); + let shared_secret: U256 = shared_secret.into(); + let shared_secret: H256 = (shared_secret % curve_order()).into(); + let shared_key_pair = KeyPair::from_secret_slice(&*shared_secret)?; + Ok(shared_key_pair) } /// Serialize message header. fn serialize_header(header: &MessageHeader) -> Result, Error> { - let mut buffer = Vec::with_capacity(MESSAGE_HEADER_SIZE); - buffer.write_u64::(header.version)?; - buffer.write_u64::(header.kind)?; - buffer.write_u16::(header.size)?; - Ok(buffer) + let mut buffer = Vec::with_capacity(MESSAGE_HEADER_SIZE); + buffer.write_u64::(header.version)?; + buffer.write_u64::(header.kind)?; + buffer.write_u16::(header.size)?; + Ok(buffer) } /// Deserialize message header. pub fn deserialize_header(data: &[u8]) -> Result { - let mut reader = Cursor::new(data); - let version = reader.read_u64::()?; - if version != CURRENT_HEADER_VERSION { - return Err(Error::InvalidMessageVersion); - } + let mut reader = Cursor::new(data); + let version = reader.read_u64::()?; + if version != CURRENT_HEADER_VERSION { + return Err(Error::InvalidMessageVersion); + } - Ok(MessageHeader { - version: version, - kind: reader.read_u64::()?, - size: reader.read_u16::()?, - }) + Ok(MessageHeader { + version: version, + kind: reader.read_u64::()?, + size: reader.read_u16::()?, + }) } /// Build serialized message from header && payload -fn build_serialized_message(mut header: MessageHeader, payload: Vec) -> Result { - let payload_len = payload.len(); - if payload_len > u16::MAX as usize { - return Err(Error::InvalidMessage); - } - header.size = payload.len() as u16; +fn build_serialized_message( + mut header: MessageHeader, + payload: Vec, +) -> Result { + let payload_len = payload.len(); + if payload_len > u16::MAX as usize { + return Err(Error::InvalidMessage); + } + header.size = payload.len() as u16; - let mut message = serialize_header(&header)?; - message.extend(payload); - Ok(SerializedMessage(message)) + let mut message = serialize_header(&header)?; + message.extend(payload); + Ok(SerializedMessage(message)) } #[cfg(test)] pub mod tests { - use std::io; - use futures::Poll; - use tokio_io::{AsyncRead, AsyncWrite}; - use ethkey::{Random, Generator, KeyPair}; - use ethkey::crypto::ecdh::agree; - use key_server_cluster::Error; - use key_server_cluster::message::Message; - use super::{MESSAGE_HEADER_SIZE, CURRENT_HEADER_VERSION, MessageHeader, fix_shared_key, encrypt_message, - serialize_message, serialize_header, deserialize_header}; + use super::{ + deserialize_header, encrypt_message, fix_shared_key, serialize_header, serialize_message, + MessageHeader, CURRENT_HEADER_VERSION, MESSAGE_HEADER_SIZE, + }; + use ethkey::{crypto::ecdh::agree, Generator, KeyPair, Random}; + use futures::Poll; + use key_server_cluster::{message::Message, Error}; + use std::io; + use tokio_io::{AsyncRead, AsyncWrite}; - pub struct TestIo { - self_key_pair: KeyPair, - self_session_key_pair: KeyPair, - peer_key_pair: KeyPair, - peer_session_key_pair: KeyPair, - shared_key_pair: KeyPair, - input_buffer: io::Cursor>, - } + pub struct TestIo { + self_key_pair: KeyPair, + self_session_key_pair: KeyPair, + peer_key_pair: KeyPair, + peer_session_key_pair: KeyPair, + shared_key_pair: KeyPair, + input_buffer: io::Cursor>, + } - impl TestIo { - pub fn new() -> Self { - let self_session_key_pair = Random.generate().unwrap(); - let peer_session_key_pair = Random.generate().unwrap(); - let self_key_pair = Random.generate().unwrap(); - let peer_key_pair = Random.generate().unwrap(); - let shared_key_pair = fix_shared_key(&agree(self_session_key_pair.secret(), peer_session_key_pair.public()).unwrap()).unwrap(); - TestIo { - self_key_pair: self_key_pair, - self_session_key_pair: self_session_key_pair, - peer_key_pair: peer_key_pair, - peer_session_key_pair: peer_session_key_pair, - shared_key_pair: shared_key_pair, - input_buffer: io::Cursor::new(Vec::new()), - } - } + impl TestIo { + pub fn new() -> Self { + let self_session_key_pair = Random.generate().unwrap(); + let peer_session_key_pair = Random.generate().unwrap(); + let self_key_pair = Random.generate().unwrap(); + let peer_key_pair = Random.generate().unwrap(); + let shared_key_pair = fix_shared_key( + &agree( + self_session_key_pair.secret(), + peer_session_key_pair.public(), + ) + .unwrap(), + ) + .unwrap(); + TestIo { + self_key_pair: self_key_pair, + self_session_key_pair: self_session_key_pair, + peer_key_pair: peer_key_pair, + peer_session_key_pair: peer_session_key_pair, + shared_key_pair: shared_key_pair, + input_buffer: io::Cursor::new(Vec::new()), + } + } - pub fn self_key_pair(&self) -> &KeyPair { - &self.self_key_pair - } + pub fn self_key_pair(&self) -> &KeyPair { + &self.self_key_pair + } - pub fn self_session_key_pair(&self) -> &KeyPair { - &self.self_session_key_pair - } + pub fn self_session_key_pair(&self) -> &KeyPair { + &self.self_session_key_pair + } - pub fn peer_key_pair(&self) -> &KeyPair { - &self.peer_key_pair - } + pub fn peer_key_pair(&self) -> &KeyPair { + &self.peer_key_pair + } - pub fn peer_session_key_pair(&self) -> &KeyPair { - &self.peer_session_key_pair - } + pub fn peer_session_key_pair(&self) -> &KeyPair { + &self.peer_session_key_pair + } - pub fn shared_key_pair(&self) -> &KeyPair { - &self.shared_key_pair - } + pub fn shared_key_pair(&self) -> &KeyPair { + &self.shared_key_pair + } - pub fn add_input_message(&mut self, message: Message) { - let serialized_message = serialize_message(message).unwrap(); - let serialized_message: Vec<_> = serialized_message.into(); - let input_buffer = self.input_buffer.get_mut(); - for b in serialized_message { - input_buffer.push(b); - } - } + pub fn add_input_message(&mut self, message: Message) { + let serialized_message = serialize_message(message).unwrap(); + let serialized_message: Vec<_> = serialized_message.into(); + let input_buffer = self.input_buffer.get_mut(); + for b in serialized_message { + input_buffer.push(b); + } + } - pub fn add_encrypted_input_message(&mut self, message: Message) { - let serialized_message = encrypt_message(&self.shared_key_pair, serialize_message(message).unwrap()).unwrap(); - let serialized_message: Vec<_> = serialized_message.into(); - let input_buffer = self.input_buffer.get_mut(); - for b in serialized_message { - input_buffer.push(b); - } - } - } + pub fn add_encrypted_input_message(&mut self, message: Message) { + let serialized_message = + encrypt_message(&self.shared_key_pair, serialize_message(message).unwrap()) + .unwrap(); + let serialized_message: Vec<_> = serialized_message.into(); + let input_buffer = self.input_buffer.get_mut(); + for b in serialized_message { + input_buffer.push(b); + } + } + } - impl AsyncRead for TestIo {} + impl AsyncRead for TestIo {} - impl AsyncWrite for TestIo { - fn shutdown(&mut self) -> Poll<(), io::Error> { - Ok(().into()) - } - } + impl AsyncWrite for TestIo { + fn shutdown(&mut self) -> Poll<(), io::Error> { + Ok(().into()) + } + } - impl io::Read for TestIo { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - io::Read::read(&mut self.input_buffer, buf) - } - } + impl io::Read for TestIo { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + io::Read::read(&mut self.input_buffer, buf) + } + } - impl io::Write for TestIo { - fn write(&mut self, buf: &[u8]) -> io::Result { - Ok(buf.len()) - } + impl io::Write for TestIo { + fn write(&mut self, buf: &[u8]) -> io::Result { + Ok(buf.len()) + } - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } - } + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } + } - #[test] - fn header_serialization_works() { - let header = MessageHeader { - kind: 1, - version: CURRENT_HEADER_VERSION, - size: 3, - }; + #[test] + fn header_serialization_works() { + let header = MessageHeader { + kind: 1, + version: CURRENT_HEADER_VERSION, + size: 3, + }; - let serialized_header = serialize_header(&header).unwrap(); - assert_eq!(serialized_header.len(), MESSAGE_HEADER_SIZE); + let serialized_header = serialize_header(&header).unwrap(); + assert_eq!(serialized_header.len(), MESSAGE_HEADER_SIZE); - let deserialized_header = deserialize_header(&serialized_header).unwrap(); - assert_eq!(deserialized_header, header); - } + let deserialized_header = deserialize_header(&serialized_header).unwrap(); + assert_eq!(deserialized_header, header); + } - #[test] - fn deserializing_header_of_wrong_version_fails() { - let header = MessageHeader { - kind: 1, - version: CURRENT_HEADER_VERSION + 1, - size: 3, - }; + #[test] + fn deserializing_header_of_wrong_version_fails() { + let header = MessageHeader { + kind: 1, + version: CURRENT_HEADER_VERSION + 1, + size: 3, + }; - assert_eq!(deserialize_header(&serialize_header(&header).unwrap()).unwrap_err(), Error::InvalidMessageVersion); - } + assert_eq!( + deserialize_header(&serialize_header(&header).unwrap()).unwrap_err(), + Error::InvalidMessageVersion + ); + } } diff --git a/secret-store/src/key_server_cluster/io/mod.rs b/secret-store/src/key_server_cluster/io/mod.rs index c1cfe5566..7d233b36e 100644 --- a/secret-store/src/key_server_cluster/io/mod.rs +++ b/secret-store/src/key_server_cluster/io/mod.rs @@ -18,17 +18,21 @@ mod deadline; mod handshake; mod message; mod read_header; -mod read_payload; mod read_message; +mod read_payload; mod shared_tcp_stream; mod write_message; -pub use self::deadline::{deadline, Deadline, DeadlineStatus}; -pub use self::handshake::{handshake, accept_handshake, Handshake, HandshakeResult}; -pub use self::message::{MessageHeader, SerializedMessage, serialize_message, deserialize_message, - encrypt_message, fix_shared_key}; -pub use self::read_header::{read_header, ReadHeader}; -pub use self::read_payload::{read_payload, read_encrypted_payload, ReadPayload}; -pub use self::read_message::{read_message, read_encrypted_message, ReadMessage}; -pub use self::shared_tcp_stream::SharedTcpStream; -pub use self::write_message::{write_message, write_encrypted_message, WriteMessage}; +pub use self::{ + deadline::{deadline, Deadline, DeadlineStatus}, + handshake::{accept_handshake, handshake, Handshake, HandshakeResult}, + message::{ + deserialize_message, encrypt_message, fix_shared_key, serialize_message, MessageHeader, + SerializedMessage, + }, + read_header::{read_header, ReadHeader}, + read_message::{read_encrypted_message, read_message, ReadMessage}, + read_payload::{read_encrypted_payload, read_payload, ReadPayload}, + shared_tcp_stream::SharedTcpStream, + write_message::{write_encrypted_message, write_message, WriteMessage}, +}; diff --git a/secret-store/src/key_server_cluster/io/read_header.rs b/secret-store/src/key_server_cluster/io/read_header.rs index 3806537eb..c4f407581 100644 --- a/secret-store/src/key_server_cluster/io/read_header.rs +++ b/secret-store/src/key_server_cluster/io/read_header.rs @@ -14,32 +14,42 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use futures::{Async, Future, Poll}; +use key_server_cluster::{ + io::message::{deserialize_header, MessageHeader, MESSAGE_HEADER_SIZE}, + Error, +}; use std::io; -use futures::{Future, Poll, Async}; -use tokio_io::AsyncRead; -use tokio_io::io::{ReadExact, read_exact}; -use key_server_cluster::Error; -use key_server_cluster::io::message::{MESSAGE_HEADER_SIZE, MessageHeader, deserialize_header}; +use tokio_io::{ + io::{read_exact, ReadExact}, + AsyncRead, +}; /// Create future for read single message header from the stream. -pub fn read_header(a: A) -> ReadHeader where A: AsyncRead { - ReadHeader { - reader: read_exact(a, [0; MESSAGE_HEADER_SIZE]), - } +pub fn read_header(a: A) -> ReadHeader +where + A: AsyncRead, +{ + ReadHeader { + reader: read_exact(a, [0; MESSAGE_HEADER_SIZE]), + } } /// Future for read single message header from the stream. pub struct ReadHeader { - reader: ReadExact, + reader: ReadExact, } -impl Future for ReadHeader where A: AsyncRead { - type Item = (A, Result); - type Error = io::Error; +impl Future for ReadHeader +where + A: AsyncRead, +{ + type Item = (A, Result); + type Error = io::Error; - fn poll(&mut self) -> Poll { - let (read, data) = try_ready!(self.reader.poll()); - let header = deserialize_header(&data); - Ok(Async::Ready((read, header))) - } + fn poll(&mut self) -> Poll { + let (read, data) = try_ready!(self.reader.poll()); + let header = deserialize_header(&data); + Ok(Async::Ready((read, header))) + } } diff --git a/secret-store/src/key_server_cluster/io/read_message.rs b/secret-store/src/key_server_cluster/io/read_message.rs index e16de57a3..3dad05843 100644 --- a/secret-store/src/key_server_cluster/io/read_message.rs +++ b/secret-store/src/key_server_cluster/io/read_message.rs @@ -14,74 +14,85 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::io; -use futures::{Poll, Future, Async}; -use tokio_io::AsyncRead; use ethkey::KeyPair; -use key_server_cluster::Error; -use key_server_cluster::message::Message; -use key_server_cluster::io::{read_header, ReadHeader, read_payload, read_encrypted_payload, ReadPayload}; +use futures::{Async, Future, Poll}; +use key_server_cluster::{ + io::{read_encrypted_payload, read_header, read_payload, ReadHeader, ReadPayload}, + message::Message, + Error, +}; +use std::io; +use tokio_io::AsyncRead; /// Create future for read single message from the stream. -pub fn read_message(a: A) -> ReadMessage where A: AsyncRead { - ReadMessage { - key: None, - state: ReadMessageState::ReadHeader(read_header(a)), - } +pub fn read_message(a: A) -> ReadMessage +where + A: AsyncRead, +{ + ReadMessage { + key: None, + state: ReadMessageState::ReadHeader(read_header(a)), + } } /// Create future for read single encrypted message from the stream. -pub fn read_encrypted_message(a: A, key: KeyPair) -> ReadMessage where A: AsyncRead { - ReadMessage { - key: Some(key), - state: ReadMessageState::ReadHeader(read_header(a)), - } +pub fn read_encrypted_message(a: A, key: KeyPair) -> ReadMessage +where + A: AsyncRead, +{ + ReadMessage { + key: Some(key), + state: ReadMessageState::ReadHeader(read_header(a)), + } } enum ReadMessageState { - ReadHeader(ReadHeader), - ReadPayload(ReadPayload), - Finished, + ReadHeader(ReadHeader), + ReadPayload(ReadPayload), + Finished, } /// Future for read single message from the stream. pub struct ReadMessage { - key: Option, - state: ReadMessageState, + key: Option, + state: ReadMessageState, } -impl Future for ReadMessage where A: AsyncRead { - type Item = (A, Result); - type Error = io::Error; +impl Future for ReadMessage +where + A: AsyncRead, +{ + type Item = (A, Result); + type Error = io::Error; - fn poll(&mut self) -> Poll { - let (next, result) = match self.state { - ReadMessageState::ReadHeader(ref mut future) => { - let (read, header) = try_ready!(future.poll()); - let header = match header { - Ok(header) => header, - Err(err) => return Ok((read, Err(err)).into()), - }; + fn poll(&mut self) -> Poll { + let (next, result) = match self.state { + ReadMessageState::ReadHeader(ref mut future) => { + let (read, header) = try_ready!(future.poll()); + let header = match header { + Ok(header) => header, + Err(err) => return Ok((read, Err(err)).into()), + }; - let future = match self.key.take() { - Some(key) => read_encrypted_payload(read, header, key), - None => read_payload(read, header), - }; - let next = ReadMessageState::ReadPayload(future); - (next, Async::NotReady) - }, - ReadMessageState::ReadPayload(ref mut future) => { - let (read, payload) = try_ready!(future.poll()); - (ReadMessageState::Finished, Async::Ready((read, payload))) - }, - ReadMessageState::Finished => panic!("poll ReadMessage after it's done"), - }; + let future = match self.key.take() { + Some(key) => read_encrypted_payload(read, header, key), + None => read_payload(read, header), + }; + let next = ReadMessageState::ReadPayload(future); + (next, Async::NotReady) + } + ReadMessageState::ReadPayload(ref mut future) => { + let (read, payload) = try_ready!(future.poll()); + (ReadMessageState::Finished, Async::Ready((read, payload))) + } + ReadMessageState::Finished => panic!("poll ReadMessage after it's done"), + }; - self.state = next; - match result { - // by polling again, we register new future - Async::NotReady => self.poll(), - result => Ok(result) - } - } + self.state = next; + match result { + // by polling again, we register new future + Async::NotReady => self.poll(), + result => Ok(result), + } + } } diff --git a/secret-store/src/key_server_cluster/io/read_payload.rs b/secret-store/src/key_server_cluster/io/read_payload.rs index 9f3a47f66..7eca7050f 100644 --- a/secret-store/src/key_server_cluster/io/read_payload.rs +++ b/secret-store/src/key_server_cluster/io/read_payload.rs @@ -14,52 +14,64 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::io; -use futures::{Poll, Future}; -use tokio_io::AsyncRead; -use tokio_io::io::{read_exact, ReadExact}; use ethkey::KeyPair; -use key_server_cluster::Error; -use key_server_cluster::message::Message; -use key_server_cluster::io::message::{MessageHeader, deserialize_message, decrypt_message}; +use futures::{Future, Poll}; +use key_server_cluster::{ + io::message::{decrypt_message, deserialize_message, MessageHeader}, + message::Message, + Error, +}; +use std::io; +use tokio_io::{ + io::{read_exact, ReadExact}, + AsyncRead, +}; /// Create future for read single message payload from the stream. -pub fn read_payload(a: A, header: MessageHeader) -> ReadPayload where A: AsyncRead { - ReadPayload { - reader: read_exact(a, vec![0; header.size as usize]), - header: header, - key: None, - } +pub fn read_payload(a: A, header: MessageHeader) -> ReadPayload +where + A: AsyncRead, +{ + ReadPayload { + reader: read_exact(a, vec![0; header.size as usize]), + header: header, + key: None, + } } /// Create future for read single encrypted message payload from the stream. -pub fn read_encrypted_payload(a: A, header: MessageHeader, key: KeyPair) -> ReadPayload where A: AsyncRead { - ReadPayload { - reader: read_exact(a, vec![0; header.size as usize]), - header: header, - key: Some(key), - } +pub fn read_encrypted_payload(a: A, header: MessageHeader, key: KeyPair) -> ReadPayload +where + A: AsyncRead, +{ + ReadPayload { + reader: read_exact(a, vec![0; header.size as usize]), + header: header, + key: Some(key), + } } /// Future for read single message payload from the stream. pub struct ReadPayload { - reader: ReadExact>, - header: MessageHeader, - key: Option, + reader: ReadExact>, + header: MessageHeader, + key: Option, } -impl Future for ReadPayload where A: AsyncRead { - type Item = (A, Result); - type Error = io::Error; +impl Future for ReadPayload +where + A: AsyncRead, +{ + type Item = (A, Result); + type Error = io::Error; - fn poll(&mut self) -> Poll { - let (read, data) = try_ready!(self.reader.poll()); - let payload = if let Some(key) = self.key.take() { - decrypt_message(&key, data) - .and_then(|data| deserialize_message(&self.header, data)) - } else { - deserialize_message(&self.header, data) - }; - Ok((read, payload).into()) - } + fn poll(&mut self) -> Poll { + let (read, data) = try_ready!(self.reader.poll()); + let payload = if let Some(key) = self.key.take() { + decrypt_message(&key, data).and_then(|data| deserialize_message(&self.header, data)) + } else { + deserialize_message(&self.header, data) + }; + Ok((read, payload).into()) + } } diff --git a/secret-store/src/key_server_cluster/io/shared_tcp_stream.rs b/secret-store/src/key_server_cluster/io/shared_tcp_stream.rs index 99d6e4ca7..de128189b 100644 --- a/secret-store/src/key_server_cluster/io/shared_tcp_stream.rs +++ b/secret-store/src/key_server_cluster/io/shared_tcp_stream.rs @@ -14,58 +14,58 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::net::Shutdown; -use std::io::{Read, Write, Error}; use futures::Poll; -use tokio_io::{AsyncRead, AsyncWrite}; +use std::{ + io::{Error, Read, Write}, + net::Shutdown, + sync::Arc, +}; use tokio::net::TcpStream; +use tokio_io::{AsyncRead, AsyncWrite}; /// Read+Write implementation for Arc. pub struct SharedTcpStream { - io: Arc, + io: Arc, } impl SharedTcpStream { - pub fn new(a: Arc) -> Self { - SharedTcpStream { - io: a, - } - } + pub fn new(a: Arc) -> Self { + SharedTcpStream { io: a } + } } impl From for SharedTcpStream { - fn from(a: TcpStream) -> Self { - SharedTcpStream::new(Arc::new(a)) - } + fn from(a: TcpStream) -> Self { + SharedTcpStream::new(Arc::new(a)) + } } impl AsyncRead for SharedTcpStream {} impl AsyncWrite for SharedTcpStream { - fn shutdown(&mut self) -> Poll<(), Error> { - self.io.shutdown(Shutdown::Both).map(Into::into) - } + fn shutdown(&mut self) -> Poll<(), Error> { + self.io.shutdown(Shutdown::Both).map(Into::into) + } } impl Read for SharedTcpStream { - fn read(&mut self, buf: &mut [u8]) -> Result { - Read::read(&mut (&*self.io as &TcpStream), buf) - } + fn read(&mut self, buf: &mut [u8]) -> Result { + Read::read(&mut (&*self.io as &TcpStream), buf) + } } impl Write for SharedTcpStream { - fn write(&mut self, buf: &[u8]) -> Result { - Write::write(&mut (&*self.io as &TcpStream), buf) - } + fn write(&mut self, buf: &[u8]) -> Result { + Write::write(&mut (&*self.io as &TcpStream), buf) + } - fn flush(&mut self) -> Result<(), Error> { - Write::flush(&mut (&*self.io as &TcpStream)) - } + fn flush(&mut self) -> Result<(), Error> { + Write::flush(&mut (&*self.io as &TcpStream)) + } } impl Clone for SharedTcpStream { - fn clone(&self) -> Self { - SharedTcpStream::new(self.io.clone()) - } + fn clone(&self) -> Self { + SharedTcpStream::new(self.io.clone()) + } } diff --git a/secret-store/src/key_server_cluster/io/write_message.rs b/secret-store/src/key_server_cluster/io/write_message.rs index 15823730a..19be86de8 100644 --- a/secret-store/src/key_server_cluster/io/write_message.rs +++ b/secret-store/src/key_server_cluster/io/write_message.rs @@ -14,57 +14,72 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::io; -use futures::{Future, Poll}; -use tokio_io::AsyncWrite; -use tokio_io::io::{WriteAll, write_all}; use ethkey::KeyPair; -use key_server_cluster::message::Message; -use key_server_cluster::io::{serialize_message, encrypt_message}; +use futures::{Future, Poll}; +use key_server_cluster::{ + io::{encrypt_message, serialize_message}, + message::Message, +}; +use std::io; +use tokio_io::{ + io::{write_all, WriteAll}, + AsyncWrite, +}; /// Write plain message to the channel. -pub fn write_message(a: A, message: Message) -> WriteMessage where A: AsyncWrite { - let (error, future) = match serialize_message(message) - .map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) { - Ok(message) => (None, write_all(a, message.into())), - Err(error) => (Some(error), write_all(a, Vec::new())), - }; - WriteMessage { - error: error, - future: future, - } +pub fn write_message(a: A, message: Message) -> WriteMessage +where + A: AsyncWrite, +{ + let (error, future) = match serialize_message(message) + .map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) + { + Ok(message) => (None, write_all(a, message.into())), + Err(error) => (Some(error), write_all(a, Vec::new())), + }; + WriteMessage { + error: error, + future: future, + } } /// Write encrypted message to the channel. -pub fn write_encrypted_message(a: A, key: &KeyPair, message: Message) -> WriteMessage where A: AsyncWrite { - let (error, future) = match serialize_message(message) - .and_then(|message| encrypt_message(key, message)) - .map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) { - Ok(message) => (None, write_all(a, message.into())), - Err(error) => (Some(error), write_all(a, Vec::new())), - }; +pub fn write_encrypted_message(a: A, key: &KeyPair, message: Message) -> WriteMessage +where + A: AsyncWrite, +{ + let (error, future) = match serialize_message(message) + .and_then(|message| encrypt_message(key, message)) + .map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) + { + Ok(message) => (None, write_all(a, message.into())), + Err(error) => (Some(error), write_all(a, Vec::new())), + }; - WriteMessage { - error: error, - future: future, - } + WriteMessage { + error: error, + future: future, + } } /// Future message write. pub struct WriteMessage { - error: Option, - future: WriteAll>, + error: Option, + future: WriteAll>, } -impl Future for WriteMessage where A: AsyncWrite { - type Item = (A, Vec); - type Error = io::Error; +impl Future for WriteMessage +where + A: AsyncWrite, +{ + type Item = (A, Vec); + type Error = io::Error; - fn poll(&mut self) -> Poll { - if let Some(err) = self.error.take() { - return Err(err); - } + fn poll(&mut self) -> Poll { + if let Some(err) = self.error.take() { + return Err(err); + } - self.future.poll() - } + self.future.poll() + } } diff --git a/secret-store/src/key_server_cluster/jobs/consensus_session.rs b/secret-store/src/key_server_cluster/jobs/consensus_session.rs index 7daf05d3b..58a6c2f69 100644 --- a/secret-store/src/key_server_cluster/jobs/consensus_session.rs +++ b/secret-store/src/key_server_cluster/jobs/consensus_session.rs @@ -14,29 +14,33 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use key_server_cluster::{ + jobs::job_session::{ + JobExecutor, JobPartialRequestAction, JobSession, JobSessionState, JobTransport, + }, + message::ConsensusMessage, + Error, NodeId, Requester, SessionMeta, +}; use std::collections::BTreeSet; -use key_server_cluster::{Error, NodeId, SessionMeta, Requester}; -use key_server_cluster::message::ConsensusMessage; -use key_server_cluster::jobs::job_session::{JobSession, JobSessionState, JobTransport, JobExecutor, JobPartialRequestAction}; /// Consensus session state. #[derive(Debug, Clone, Copy, PartialEq)] pub enum ConsensusSessionState { - /// Every node starts in this state. - WaitingForInitialization, - /// Consensus group is establishing. - EstablishingConsensus, - /// Consensus group is established. - /// Master node can start jobs dissemination. - /// Slave node waits for partial job requests. - ConsensusEstablished, - /// Master node waits for partial jobs responses. - WaitingForPartialResults, - /// Consensus session is completed successfully. - /// Master node can call result() to get computation result. - Finished, - /// Consensus session has failed with error. - Failed, + /// Every node starts in this state. + WaitingForInitialization, + /// Consensus group is establishing. + EstablishingConsensus, + /// Consensus group is established. + /// Master node can start jobs dissemination. + /// Slave node waits for partial job requests. + ConsensusEstablished, + /// Master node waits for partial jobs responses. + WaitingForPartialResults, + /// Consensus session is completed successfully. + /// Master node can call result() to get computation result. + Finished, + /// Consensus session has failed with error. + Failed, } /// Consensus session consists of following states: @@ -44,289 +48,369 @@ pub enum ConsensusSessionState { /// 2) master node sends partial job requests to every member of consensus group /// 3) slave nodes are computing partial responses /// 4) master node computes result from partial responses -pub struct ConsensusSession, - ConsensusTransport: JobTransport, - ComputationExecutor: JobExecutor, - ComputationTransport: JobTransport +pub struct ConsensusSession< + ConsensusExecutor: JobExecutor, + ConsensusTransport: JobTransport< + PartialJobRequest = ConsensusExecutor::PartialJobRequest, + PartialJobResponse = ConsensusExecutor::PartialJobResponse, + >, + ComputationExecutor: JobExecutor, + ComputationTransport: JobTransport< + PartialJobRequest = ComputationExecutor::PartialJobRequest, + PartialJobResponse = ComputationExecutor::PartialJobResponse, + >, > { - /// Current session state. - state: ConsensusSessionState, - /// Session metadata. - meta: SessionMeta, - /// Consensus establish job. - consensus_job: JobSession, - /// Consensus group. - consensus_group: BTreeSet, - /// Computation job. - computation_job: Option>, + /// Current session state. + state: ConsensusSessionState, + /// Session metadata. + meta: SessionMeta, + /// Consensus establish job. + consensus_job: JobSession, + /// Consensus group. + consensus_group: BTreeSet, + /// Computation job. + computation_job: Option>, } /// Consensus session creation parameters. -pub struct ConsensusSessionParams, - ConsensusTransport: JobTransport +pub struct ConsensusSessionParams< + ConsensusExecutor: JobExecutor, + ConsensusTransport: JobTransport< + PartialJobRequest = ConsensusExecutor::PartialJobRequest, + PartialJobResponse = ConsensusExecutor::PartialJobResponse, + >, > { - /// Session metadata. - pub meta: SessionMeta, - /// ACL storage for access check. - pub consensus_executor: ConsensusExecutor, - /// Transport for consensus establish job. - pub consensus_transport: ConsensusTransport, + /// Session metadata. + pub meta: SessionMeta, + /// ACL storage for access check. + pub consensus_executor: ConsensusExecutor, + /// Transport for consensus establish job. + pub consensus_transport: ConsensusTransport, } -impl ConsensusSession - where ConsensusExecutor: JobExecutor>, - ConsensusTransport: JobTransport, - ComputationExecutor: JobExecutor, - ComputationTransport: JobTransport { - /// Create new consensus session. - pub fn new(params: ConsensusSessionParams) -> Result { - let consensus_job = JobSession::new(params.meta.clone(), params.consensus_executor, params.consensus_transport); - debug_assert!(consensus_job.state() == JobSessionState::Inactive); +impl + ConsensusSession< + ConsensusExecutor, + ConsensusTransport, + ComputationExecutor, + ComputationTransport, + > +where + ConsensusExecutor: JobExecutor>, + ConsensusTransport: JobTransport< + PartialJobRequest = ConsensusExecutor::PartialJobRequest, + PartialJobResponse = ConsensusExecutor::PartialJobResponse, + >, + ComputationExecutor: JobExecutor, + ComputationTransport: JobTransport< + PartialJobRequest = ComputationExecutor::PartialJobRequest, + PartialJobResponse = ComputationExecutor::PartialJobResponse, + >, +{ + /// Create new consensus session. + pub fn new( + params: ConsensusSessionParams, + ) -> Result { + let consensus_job = JobSession::new( + params.meta.clone(), + params.consensus_executor, + params.consensus_transport, + ); + debug_assert!(consensus_job.state() == JobSessionState::Inactive); - Ok(ConsensusSession { - state: ConsensusSessionState::WaitingForInitialization, - meta: params.meta, - consensus_job: consensus_job, - consensus_group: BTreeSet::new(), - computation_job: None, - }) - } + Ok(ConsensusSession { + state: ConsensusSessionState::WaitingForInitialization, + meta: params.meta, + consensus_job: consensus_job, + consensus_group: BTreeSet::new(), + computation_job: None, + }) + } - /// Get consensus job reference. - pub fn consensus_job(&self) -> &JobSession { - &self.consensus_job - } + /// Get consensus job reference. + pub fn consensus_job(&self) -> &JobSession { + &self.consensus_job + } - /// Get mutable consensus job reference. - pub fn consensus_job_mut(&mut self) -> &mut JobSession { - &mut self.consensus_job - } + /// Get mutable consensus job reference. + pub fn consensus_job_mut(&mut self) -> &mut JobSession { + &mut self.consensus_job + } - /// Get all nodes, which has not rejected consensus request. - pub fn consensus_non_rejected_nodes(&self) -> BTreeSet { - self.consensus_job.responses().iter() - .filter(|r| *r.1) - .map(|r| r.0) - .chain(self.consensus_job.requests()) - .filter(|n| **n != self.meta.self_node_id) - .cloned() - .collect() - } + /// Get all nodes, which has not rejected consensus request. + pub fn consensus_non_rejected_nodes(&self) -> BTreeSet { + self.consensus_job + .responses() + .iter() + .filter(|r| *r.1) + .map(|r| r.0) + .chain(self.consensus_job.requests()) + .filter(|n| **n != self.meta.self_node_id) + .cloned() + .collect() + } - /// Get computation job reference. - pub fn computation_job(&self) -> &JobSession { - self.computation_job.as_ref() - .expect("computation_job must only be called on master nodes") - } + /// Get computation job reference. + pub fn computation_job(&self) -> &JobSession { + self.computation_job + .as_ref() + .expect("computation_job must only be called on master nodes") + } - /// Get consensus session state. - pub fn state(&self) -> ConsensusSessionState { - self.state - } + /// Get consensus session state. + pub fn state(&self) -> ConsensusSessionState { + self.state + } - /// Get computation result. - pub fn result(&self) -> Result { - debug_assert!(self.meta.self_node_id == self.meta.master_node_id); - if self.state != ConsensusSessionState::Finished { - return Err(Error::InvalidStateForRequest); - } + /// Get computation result. + pub fn result(&self) -> Result { + debug_assert!(self.meta.self_node_id == self.meta.master_node_id); + if self.state != ConsensusSessionState::Finished { + return Err(Error::InvalidStateForRequest); + } - self.computation_job.as_ref() + self.computation_job.as_ref() .expect("we are on master node in finished state; computation_job is set on master node during initialization; qed") .result() - } + } - /// Initialize session on master node. - pub fn initialize(&mut self, nodes: BTreeSet) -> Result<(), Error> { - debug_assert!(self.meta.self_node_id == self.meta.master_node_id); - let initialization_result = self.consensus_job.initialize(nodes, None, false); - self.state = ConsensusSessionState::EstablishingConsensus; - self.process_result(initialization_result.map(|_| ())) - } + /// Initialize session on master node. + pub fn initialize(&mut self, nodes: BTreeSet) -> Result<(), Error> { + debug_assert!(self.meta.self_node_id == self.meta.master_node_id); + let initialization_result = self.consensus_job.initialize(nodes, None, false); + self.state = ConsensusSessionState::EstablishingConsensus; + self.process_result(initialization_result.map(|_| ())) + } - /// Process consensus request message. - pub fn on_consensus_partial_request(&mut self, sender: &NodeId, request: ConsensusExecutor::PartialJobRequest) -> Result<(), Error> { - let consensus_result = self.consensus_job.on_partial_request(sender, request); - self.process_result(consensus_result.map(|_| ())) - } + /// Process consensus request message. + pub fn on_consensus_partial_request( + &mut self, + sender: &NodeId, + request: ConsensusExecutor::PartialJobRequest, + ) -> Result<(), Error> { + let consensus_result = self.consensus_job.on_partial_request(sender, request); + self.process_result(consensus_result.map(|_| ())) + } - /// Process consensus message response. - pub fn on_consensus_partial_response(&mut self, sender: &NodeId, response: bool) -> Result<(), Error> { - let consensus_result = self.consensus_job.on_partial_response(sender, response); - self.process_result(consensus_result) - } + /// Process consensus message response. + pub fn on_consensus_partial_response( + &mut self, + sender: &NodeId, + response: bool, + ) -> Result<(), Error> { + let consensus_result = self.consensus_job.on_partial_response(sender, response); + self.process_result(consensus_result) + } - /// Select nodes for processing partial requests. - pub fn select_consensus_group(&mut self) -> Result<&BTreeSet, Error> { - debug_assert!(self.meta.self_node_id == self.meta.master_node_id); - if self.state != ConsensusSessionState::ConsensusEstablished { - return Err(Error::InvalidStateForRequest); - } + /// Select nodes for processing partial requests. + pub fn select_consensus_group(&mut self) -> Result<&BTreeSet, Error> { + debug_assert!(self.meta.self_node_id == self.meta.master_node_id); + if self.state != ConsensusSessionState::ConsensusEstablished { + return Err(Error::InvalidStateForRequest); + } - if self.consensus_group.is_empty() { - let consensus_group = self.consensus_job.result()?; - let is_self_in_consensus = consensus_group.contains(&self.meta.self_node_id); - self.consensus_group = consensus_group.into_iter().take(self.meta.threshold + 1).collect(); + if self.consensus_group.is_empty() { + let consensus_group = self.consensus_job.result()?; + let is_self_in_consensus = consensus_group.contains(&self.meta.self_node_id); + self.consensus_group = consensus_group + .into_iter() + .take(self.meta.threshold + 1) + .collect(); - if is_self_in_consensus { - self.consensus_group.remove(&self.meta.master_node_id); - self.consensus_group.insert(self.meta.master_node_id.clone()); - } - } + if is_self_in_consensus { + self.consensus_group.remove(&self.meta.master_node_id); + self.consensus_group + .insert(self.meta.master_node_id.clone()); + } + } - Ok(&self.consensus_group) - } + Ok(&self.consensus_group) + } - /// Disseminate jobs from master node. - pub fn disseminate_jobs(&mut self, executor: ComputationExecutor, transport: ComputationTransport, broadcast_self_response: bool) -> Result, Error> { - let consensus_group = self.select_consensus_group()?.clone(); - self.consensus_group.clear(); + /// Disseminate jobs from master node. + pub fn disseminate_jobs( + &mut self, + executor: ComputationExecutor, + transport: ComputationTransport, + broadcast_self_response: bool, + ) -> Result, Error> { + let consensus_group = self.select_consensus_group()?.clone(); + self.consensus_group.clear(); - let mut computation_job = JobSession::new(self.meta.clone(), executor, transport); - let computation_result = computation_job.initialize(consensus_group, None, broadcast_self_response); - self.computation_job = Some(computation_job); - self.state = ConsensusSessionState::WaitingForPartialResults; - match computation_result { - Ok(computation_result) => self.process_result(Ok(())).map(|_| computation_result), - Err(error) => Err(self.process_result(Err(error)).unwrap_err()), - } - } + let mut computation_job = JobSession::new(self.meta.clone(), executor, transport); + let computation_result = + computation_job.initialize(consensus_group, None, broadcast_self_response); + self.computation_job = Some(computation_job); + self.state = ConsensusSessionState::WaitingForPartialResults; + match computation_result { + Ok(computation_result) => self.process_result(Ok(())).map(|_| computation_result), + Err(error) => Err(self.process_result(Err(error)).unwrap_err()), + } + } - /// Process job request on slave node. - pub fn on_job_request(&mut self, node: &NodeId, request: ComputationExecutor::PartialJobRequest, executor: ComputationExecutor, transport: ComputationTransport) -> Result, Error> { - if &self.meta.master_node_id != node { - return Err(Error::InvalidMessage); - } - if self.state != ConsensusSessionState::ConsensusEstablished { - return Err(Error::InvalidStateForRequest); - } + /// Process job request on slave node. + pub fn on_job_request( + &mut self, + node: &NodeId, + request: ComputationExecutor::PartialJobRequest, + executor: ComputationExecutor, + transport: ComputationTransport, + ) -> Result, Error> { + if &self.meta.master_node_id != node { + return Err(Error::InvalidMessage); + } + if self.state != ConsensusSessionState::ConsensusEstablished { + return Err(Error::InvalidStateForRequest); + } - JobSession::new(self.meta.clone(), executor, transport).on_partial_request(node, request) - } + JobSession::new(self.meta.clone(), executor, transport).on_partial_request(node, request) + } - /// Process job response on slave node. - pub fn on_job_response(&mut self, node: &NodeId, response: ComputationExecutor::PartialJobResponse) -> Result<(), Error> { - if self.state != ConsensusSessionState::WaitingForPartialResults { - return Err(Error::InvalidStateForRequest); - } + /// Process job response on slave node. + pub fn on_job_response( + &mut self, + node: &NodeId, + response: ComputationExecutor::PartialJobResponse, + ) -> Result<(), Error> { + if self.state != ConsensusSessionState::WaitingForPartialResults { + return Err(Error::InvalidStateForRequest); + } - let computation_result = self.computation_job.as_mut() - .expect("WaitingForPartialResults is only set when computation_job is created; qed") - .on_partial_response(node, response); + let computation_result = self + .computation_job + .as_mut() + .expect("WaitingForPartialResults is only set when computation_job is created; qed") + .on_partial_response(node, response); - self.process_result(computation_result) - } + self.process_result(computation_result) + } - /// When session is completed on slave node. - pub fn on_session_completed(&mut self, node: &NodeId) -> Result<(), Error> { - if node != &self.meta.master_node_id { - return Err(Error::InvalidMessage); - } - if self.state != ConsensusSessionState::ConsensusEstablished { - return Err(Error::InvalidStateForRequest); - } + /// When session is completed on slave node. + pub fn on_session_completed(&mut self, node: &NodeId) -> Result<(), Error> { + if node != &self.meta.master_node_id { + return Err(Error::InvalidMessage); + } + if self.state != ConsensusSessionState::ConsensusEstablished { + return Err(Error::InvalidStateForRequest); + } - self.state = ConsensusSessionState::Finished; + self.state = ConsensusSessionState::Finished; - Ok(()) - } + Ok(()) + } - /// When error is received from node. - pub fn on_node_error(&mut self, node: &NodeId, error: Error) -> Result { - let is_self_master = self.meta.master_node_id == self.meta.self_node_id; - let is_node_master = self.meta.master_node_id == *node; - let (is_restart_needed, timeout_result) = match self.state { - ConsensusSessionState::WaitingForInitialization if is_self_master => { - // it is strange to receive error before session is initialized && slave doesn't know access_key - // => unreachable - self.state = ConsensusSessionState::Failed; - (false, Err(Error::ConsensusUnreachable)) - } - ConsensusSessionState::WaitingForInitialization if is_node_master => { - // error from master node before establishing consensus - // => unreachable - self.state = ConsensusSessionState::Failed; - (false, Err(if !error.is_non_fatal() { - Error::ConsensusUnreachable - } else { - Error::ConsensusTemporaryUnreachable - })) - }, - ConsensusSessionState::EstablishingConsensus => { - debug_assert!(is_self_master); + /// When error is received from node. + pub fn on_node_error(&mut self, node: &NodeId, error: Error) -> Result { + let is_self_master = self.meta.master_node_id == self.meta.self_node_id; + let is_node_master = self.meta.master_node_id == *node; + let (is_restart_needed, timeout_result) = match self.state { + ConsensusSessionState::WaitingForInitialization if is_self_master => { + // it is strange to receive error before session is initialized && slave doesn't know access_key + // => unreachable + self.state = ConsensusSessionState::Failed; + (false, Err(Error::ConsensusUnreachable)) + } + ConsensusSessionState::WaitingForInitialization if is_node_master => { + // error from master node before establishing consensus + // => unreachable + self.state = ConsensusSessionState::Failed; + ( + false, + Err(if !error.is_non_fatal() { + Error::ConsensusUnreachable + } else { + Error::ConsensusTemporaryUnreachable + }), + ) + } + ConsensusSessionState::EstablishingConsensus => { + debug_assert!(is_self_master); - // consensus still can be established - // => try to live without this node - (false, self.consensus_job.on_node_error(node, error)) - }, - ConsensusSessionState::ConsensusEstablished => { - // we could try to continue without this node, if enough nodes left - (false, self.consensus_job.on_node_error(node, error)) - }, - ConsensusSessionState::WaitingForPartialResults => { - // check if *current* computation job can continue without this node - let is_computation_node = self.computation_job.as_mut() + // consensus still can be established + // => try to live without this node + (false, self.consensus_job.on_node_error(node, error)) + } + ConsensusSessionState::ConsensusEstablished => { + // we could try to continue without this node, if enough nodes left + (false, self.consensus_job.on_node_error(node, error)) + } + ConsensusSessionState::WaitingForPartialResults => { + // check if *current* computation job can continue without this node + let is_computation_node = self.computation_job.as_mut() .expect("WaitingForPartialResults state is only set when computation_job is created; qed") .on_node_error(node, error.clone()) .is_err(); - if !is_computation_node { - // it is not used by current computation job - // => no restart required - (false, Ok(())) - } else { - // it is used by current computation job - // => restart is required if there are still enough nodes - self.consensus_group.clear(); - self.state = ConsensusSessionState::EstablishingConsensus; + if !is_computation_node { + // it is not used by current computation job + // => no restart required + (false, Ok(())) + } else { + // it is used by current computation job + // => restart is required if there are still enough nodes + self.consensus_group.clear(); + self.state = ConsensusSessionState::EstablishingConsensus; - let consensus_result = self.consensus_job.on_node_error(node, error); - let is_consensus_established = self.consensus_job.state() == JobSessionState::Finished; - (is_consensus_established, consensus_result) - } - }, - // in all other cases - just ignore error - ConsensusSessionState::WaitingForInitialization | ConsensusSessionState::Failed | ConsensusSessionState::Finished => (false, Ok(())), - }; - self.process_result(timeout_result)?; - Ok(is_restart_needed) - } + let consensus_result = self.consensus_job.on_node_error(node, error); + let is_consensus_established = + self.consensus_job.state() == JobSessionState::Finished; + (is_consensus_established, consensus_result) + } + } + // in all other cases - just ignore error + ConsensusSessionState::WaitingForInitialization + | ConsensusSessionState::Failed + | ConsensusSessionState::Finished => (false, Ok(())), + }; + self.process_result(timeout_result)?; + Ok(is_restart_needed) + } - /// When session is timeouted. - pub fn on_session_timeout(&mut self) -> Result { - match self.state { - // if we are waiting for results from slaves, there is a chance to send request to other nodes subset => fall through - ConsensusSessionState::WaitingForPartialResults => (), - // in some states this error is fatal - ConsensusSessionState::WaitingForInitialization | ConsensusSessionState::EstablishingConsensus | ConsensusSessionState::ConsensusEstablished => { - let _ = self.consensus_job.on_session_timeout(); + /// When session is timeouted. + pub fn on_session_timeout(&mut self) -> Result { + match self.state { + // if we are waiting for results from slaves, there is a chance to send request to other nodes subset => fall through + ConsensusSessionState::WaitingForPartialResults => (), + // in some states this error is fatal + ConsensusSessionState::WaitingForInitialization + | ConsensusSessionState::EstablishingConsensus + | ConsensusSessionState::ConsensusEstablished => { + let _ = self.consensus_job.on_session_timeout(); - self.consensus_group.clear(); - self.state = ConsensusSessionState::EstablishingConsensus; - return self.process_result(Err(Error::ConsensusTemporaryUnreachable)).map(|_| unreachable!()); - }, - // in all other cases - just ignore error - ConsensusSessionState::Finished | ConsensusSessionState::Failed => return Ok(false), - }; + self.consensus_group.clear(); + self.state = ConsensusSessionState::EstablishingConsensus; + return self + .process_result(Err(Error::ConsensusTemporaryUnreachable)) + .map(|_| unreachable!()); + } + // in all other cases - just ignore error + ConsensusSessionState::Finished | ConsensusSessionState::Failed => return Ok(false), + }; - let timeouted_nodes = self.computation_job.as_ref() - .expect("WaitingForPartialResults state is only set when computation_job is created; qed") - .requests() - .clone(); - assert!(!timeouted_nodes.is_empty()); // timeout should not ever happen if no requests are active && we are waiting for responses + let timeouted_nodes = self + .computation_job + .as_ref() + .expect( + "WaitingForPartialResults state is only set when computation_job is created; qed", + ) + .requests() + .clone(); + assert!(!timeouted_nodes.is_empty()); // timeout should not ever happen if no requests are active && we are waiting for responses - self.consensus_group.clear(); - for timeouted_node in timeouted_nodes { - let timeout_result = self.consensus_job.on_node_error(&timeouted_node, Error::NodeDisconnected); - self.state = ConsensusSessionState::EstablishingConsensus; - self.process_result(timeout_result)?; - } + self.consensus_group.clear(); + for timeouted_node in timeouted_nodes { + let timeout_result = self + .consensus_job + .on_node_error(&timeouted_node, Error::NodeDisconnected); + self.state = ConsensusSessionState::EstablishingConsensus; + self.process_result(timeout_result)?; + } - Ok(self.state == ConsensusSessionState::ConsensusEstablished) - } + Ok(self.state == ConsensusSessionState::ConsensusEstablished) + } - /// Process result of job. - fn process_result(&mut self, result: Result<(), Error>) -> Result<(), Error> { - match self.state { + /// Process result of job. + fn process_result(&mut self, result: Result<(), Error>) -> Result<(), Error> { + match self.state { ConsensusSessionState::WaitingForInitialization | ConsensusSessionState::EstablishingConsensus | ConsensusSessionState::ConsensusEstablished => match self.consensus_job.state() { JobSessionState::Finished => self.state = ConsensusSessionState::ConsensusEstablished, JobSessionState::Failed => self.state = ConsensusSessionState::Failed, @@ -342,452 +426,970 @@ impl (), } - result - } + result + } } -impl ConsensusSession - where ConsensusExecutor: JobExecutor>, - ConsensusTransport: JobTransport, - ComputationExecutor: JobExecutor, - ComputationTransport: JobTransport { - /// Process basic consensus message. - pub fn on_consensus_message(&mut self, sender: &NodeId, message: &ConsensusMessage) -> Result<(), Error> { - let consensus_result = match message { - - &ConsensusMessage::InitializeConsensusSession(ref message) => - self.consensus_job.on_partial_request(sender, message.requester.clone().into()).map(|_| ()), - &ConsensusMessage::ConfirmConsensusInitialization(ref message) => - self.consensus_job.on_partial_response(sender, message.is_confirmed), - }; - self.process_result(consensus_result) - } +impl + ConsensusSession< + ConsensusExecutor, + ConsensusTransport, + ComputationExecutor, + ComputationTransport, + > +where + ConsensusExecutor: JobExecutor< + PartialJobRequest = Requester, + PartialJobResponse = bool, + JobResponse = BTreeSet, + >, + ConsensusTransport: JobTransport< + PartialJobRequest = ConsensusExecutor::PartialJobRequest, + PartialJobResponse = ConsensusExecutor::PartialJobResponse, + >, + ComputationExecutor: JobExecutor, + ComputationTransport: JobTransport< + PartialJobRequest = ComputationExecutor::PartialJobRequest, + PartialJobResponse = ComputationExecutor::PartialJobResponse, + >, +{ + /// Process basic consensus message. + pub fn on_consensus_message( + &mut self, + sender: &NodeId, + message: &ConsensusMessage, + ) -> Result<(), Error> { + let consensus_result = match message { + &ConsensusMessage::InitializeConsensusSession(ref message) => self + .consensus_job + .on_partial_request(sender, message.requester.clone().into()) + .map(|_| ()), + &ConsensusMessage::ConfirmConsensusInitialization(ref message) => self + .consensus_job + .on_partial_response(sender, message.is_confirmed), + }; + self.process_result(consensus_result) + } } #[cfg(test)] mod tests { - use std::sync::Arc; - use ethkey::{KeyPair, Random, Generator, sign, public_to_address}; - use key_server_cluster::{Error, NodeId, SessionId, Requester, DummyAclStorage}; - use key_server_cluster::message::{ConsensusMessage, InitializeConsensusSession, ConfirmConsensusInitialization}; - use key_server_cluster::jobs::job_session::tests::{make_master_session_meta, make_slave_session_meta, SquaredSumJobExecutor, DummyJobTransport}; - use key_server_cluster::jobs::key_access_job::KeyAccessJob; - use super::{ConsensusSession, ConsensusSessionParams, ConsensusSessionState}; + use super::{ConsensusSession, ConsensusSessionParams, ConsensusSessionState}; + use ethkey::{public_to_address, sign, Generator, KeyPair, Random}; + use key_server_cluster::{ + jobs::{ + job_session::tests::{ + make_master_session_meta, make_slave_session_meta, DummyJobTransport, + SquaredSumJobExecutor, + }, + key_access_job::KeyAccessJob, + }, + message::{ConfirmConsensusInitialization, ConsensusMessage, InitializeConsensusSession}, + DummyAclStorage, Error, NodeId, Requester, SessionId, + }; + use std::sync::Arc; - type SquaredSumConsensusSession = ConsensusSession, SquaredSumJobExecutor, DummyJobTransport>; + type SquaredSumConsensusSession = ConsensusSession< + KeyAccessJob, + DummyJobTransport, + SquaredSumJobExecutor, + DummyJobTransport, + >; - fn make_master_consensus_session(threshold: usize, requester: Option, acl_storage: Option) -> SquaredSumConsensusSession { - let secret = requester.map(|kp| kp.secret().clone()).unwrap_or(Random.generate().unwrap().secret().clone()); - SquaredSumConsensusSession::new(ConsensusSessionParams { - meta: make_master_session_meta(threshold), - consensus_executor: KeyAccessJob::new_on_master(SessionId::default(), Arc::new(acl_storage.unwrap_or(DummyAclStorage::default())), - sign(&secret, &SessionId::default()).unwrap().into()), - consensus_transport: DummyJobTransport::default(), - }).unwrap() - } + fn make_master_consensus_session( + threshold: usize, + requester: Option, + acl_storage: Option, + ) -> SquaredSumConsensusSession { + let secret = requester + .map(|kp| kp.secret().clone()) + .unwrap_or(Random.generate().unwrap().secret().clone()); + SquaredSumConsensusSession::new(ConsensusSessionParams { + meta: make_master_session_meta(threshold), + consensus_executor: KeyAccessJob::new_on_master( + SessionId::default(), + Arc::new(acl_storage.unwrap_or(DummyAclStorage::default())), + sign(&secret, &SessionId::default()).unwrap().into(), + ), + consensus_transport: DummyJobTransport::default(), + }) + .unwrap() + } - fn make_slave_consensus_session(threshold: usize, acl_storage: Option) -> SquaredSumConsensusSession { - SquaredSumConsensusSession::new(ConsensusSessionParams { - meta: make_slave_session_meta(threshold), - consensus_executor: KeyAccessJob::new_on_slave(SessionId::default(), Arc::new(acl_storage.unwrap_or(DummyAclStorage::default()))), - consensus_transport: DummyJobTransport::default(), - }).unwrap() - } + fn make_slave_consensus_session( + threshold: usize, + acl_storage: Option, + ) -> SquaredSumConsensusSession { + SquaredSumConsensusSession::new(ConsensusSessionParams { + meta: make_slave_session_meta(threshold), + consensus_executor: KeyAccessJob::new_on_slave( + SessionId::default(), + Arc::new(acl_storage.unwrap_or(DummyAclStorage::default())), + ), + consensus_transport: DummyJobTransport::default(), + }) + .unwrap() + } - #[test] - fn consensus_session_consensus_is_not_reached_when_initializes_with_non_zero_threshold() { - let mut session = make_master_consensus_session(1, None, None); - session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus); - session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); - } + #[test] + fn consensus_session_consensus_is_not_reached_when_initializes_with_non_zero_threshold() { + let mut session = make_master_consensus_session(1, None, None); + session + .initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()) + .unwrap(); + assert_eq!( + session.state(), + ConsensusSessionState::EstablishingConsensus + ); + session + .on_consensus_message( + &NodeId::from(2), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + } - #[test] - fn consensus_session_consensus_is_reached_when_initializes_with_zero_threshold() { - let mut session = make_master_consensus_session(0, None, None); - session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); - session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); - } + #[test] + fn consensus_session_consensus_is_reached_when_initializes_with_zero_threshold() { + let mut session = make_master_consensus_session(0, None, None); + session + .initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()) + .unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + session + .on_consensus_message( + &NodeId::from(2), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + } - #[test] - fn consensus_session_consensus_is_not_reached_when_initializes_with_zero_threshold_and_master_rejects() { - let requester = Random.generate().unwrap(); - let acl_storage = DummyAclStorage::default(); - acl_storage.prohibit(public_to_address(requester.public()), SessionId::default()); + #[test] + fn consensus_session_consensus_is_not_reached_when_initializes_with_zero_threshold_and_master_rejects( + ) { + let requester = Random.generate().unwrap(); + let acl_storage = DummyAclStorage::default(); + acl_storage.prohibit(public_to_address(requester.public()), SessionId::default()); - let mut session = make_master_consensus_session(0, Some(requester), Some(acl_storage)); - session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus); - session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); - } + let mut session = make_master_consensus_session(0, Some(requester), Some(acl_storage)); + session + .initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()) + .unwrap(); + assert_eq!( + session.state(), + ConsensusSessionState::EstablishingConsensus + ); + session + .on_consensus_message( + &NodeId::from(2), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + } - #[test] - fn consensus_session_consensus_is_failed_by_master_node() { - let requester = Random.generate().unwrap(); - let acl_storage = DummyAclStorage::default(); - acl_storage.prohibit(public_to_address(requester.public()), SessionId::default()); + #[test] + fn consensus_session_consensus_is_failed_by_master_node() { + let requester = Random.generate().unwrap(); + let acl_storage = DummyAclStorage::default(); + acl_storage.prohibit(public_to_address(requester.public()), SessionId::default()); - let mut session = make_master_consensus_session(1, Some(requester), Some(acl_storage)); - assert_eq!(session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap_err(), Error::ConsensusUnreachable); - assert_eq!(session.state(), ConsensusSessionState::Failed); - } + let mut session = make_master_consensus_session(1, Some(requester), Some(acl_storage)); + assert_eq!( + session + .initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()) + .unwrap_err(), + Error::ConsensusUnreachable + ); + assert_eq!(session.state(), ConsensusSessionState::Failed); + } - #[test] - fn consensus_session_consensus_is_failed_by_slave_node() { - let mut session = make_master_consensus_session(1, None, None); - session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus); - assert_eq!(session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: false, - })).unwrap_err(), Error::ConsensusUnreachable); - assert_eq!(session.state(), ConsensusSessionState::Failed); - } + #[test] + fn consensus_session_consensus_is_failed_by_slave_node() { + let mut session = make_master_consensus_session(1, None, None); + session + .initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()) + .unwrap(); + assert_eq!( + session.state(), + ConsensusSessionState::EstablishingConsensus + ); + assert_eq!( + session + .on_consensus_message( + &NodeId::from(2), + &ConsensusMessage::ConfirmConsensusInitialization( + ConfirmConsensusInitialization { + is_confirmed: false, + } + ) + ) + .unwrap_err(), + Error::ConsensusUnreachable + ); + assert_eq!(session.state(), ConsensusSessionState::Failed); + } - #[test] - fn consensus_session_job_dissemination_fails_if_consensus_is_not_reached() { - let mut session = make_master_consensus_session(1, None, None); - assert_eq!(session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap_err(), Error::InvalidStateForRequest); - } + #[test] + fn consensus_session_job_dissemination_fails_if_consensus_is_not_reached() { + let mut session = make_master_consensus_session(1, None, None); + assert_eq!( + session + .disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false) + .unwrap_err(), + Error::InvalidStateForRequest + ); + } - #[test] - fn consensus_session_job_dissemination_selects_master_node_if_agreed() { - let mut session = make_master_consensus_session(0, None, None); - session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); - session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); - session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::Finished); - assert!(session.computation_job().responses().contains_key(&NodeId::from(1))); - } + #[test] + fn consensus_session_job_dissemination_selects_master_node_if_agreed() { + let mut session = make_master_consensus_session(0, None, None); + session + .initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()) + .unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + session + .on_consensus_message( + &NodeId::from(2), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + session + .disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false) + .unwrap(); + assert_eq!(session.state(), ConsensusSessionState::Finished); + assert!(session + .computation_job() + .responses() + .contains_key(&NodeId::from(1))); + } - #[test] - fn consensus_session_job_dissemination_does_not_select_master_node_if_rejected() { - let requester = Random.generate().unwrap(); - let acl_storage = DummyAclStorage::default(); - acl_storage.prohibit(public_to_address(requester.public()), SessionId::default()); + #[test] + fn consensus_session_job_dissemination_does_not_select_master_node_if_rejected() { + let requester = Random.generate().unwrap(); + let acl_storage = DummyAclStorage::default(); + acl_storage.prohibit(public_to_address(requester.public()), SessionId::default()); - let mut session = make_master_consensus_session(0, Some(requester), Some(acl_storage)); - session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus); - session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); - session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); - assert!(!session.computation_job().responses().contains_key(&NodeId::from(1))); - } + let mut session = make_master_consensus_session(0, Some(requester), Some(acl_storage)); + session + .initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()) + .unwrap(); + assert_eq!( + session.state(), + ConsensusSessionState::EstablishingConsensus + ); + session + .on_consensus_message( + &NodeId::from(2), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + session + .disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false) + .unwrap(); + assert_eq!( + session.state(), + ConsensusSessionState::WaitingForPartialResults + ); + assert!(!session + .computation_job() + .responses() + .contains_key(&NodeId::from(1))); + } - #[test] - fn consensus_session_computation_request_is_rejected_when_received_by_master_node() { - let mut session = make_master_consensus_session(0, None, None); - assert_eq!(session.on_job_request(&NodeId::from(2), 2, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap_err(), Error::InvalidMessage); - } + #[test] + fn consensus_session_computation_request_is_rejected_when_received_by_master_node() { + let mut session = make_master_consensus_session(0, None, None); + assert_eq!( + session + .on_job_request( + &NodeId::from(2), + 2, + SquaredSumJobExecutor, + DummyJobTransport::default() + ) + .unwrap_err(), + Error::InvalidMessage + ); + } - #[test] - fn consensus_session_computation_request_is_rejected_when_received_before_consensus_is_established() { - let mut session = make_slave_consensus_session(0, None); - assert_eq!(session.on_job_request(&NodeId::from(1), 2, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap_err(), Error::InvalidStateForRequest); - } + #[test] + fn consensus_session_computation_request_is_rejected_when_received_before_consensus_is_established( + ) { + let mut session = make_slave_consensus_session(0, None); + assert_eq!( + session + .on_job_request( + &NodeId::from(1), + 2, + SquaredSumJobExecutor, + DummyJobTransport::default() + ) + .unwrap_err(), + Error::InvalidStateForRequest + ); + } - #[test] - fn consensus_session_computation_request_is_ignored_when_wrong() { - let mut session = make_slave_consensus_session(0, None); - assert_eq!(session.state(), ConsensusSessionState::WaitingForInitialization); - session.on_consensus_message(&NodeId::from(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession { - requester: Requester::Signature(sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()).into(), - version: Default::default(), - })).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); - assert_eq!(session.on_job_request(&NodeId::from(1), 20, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap_err(), Error::InvalidMessage); - assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); - } + #[test] + fn consensus_session_computation_request_is_ignored_when_wrong() { + let mut session = make_slave_consensus_session(0, None); + assert_eq!( + session.state(), + ConsensusSessionState::WaitingForInitialization + ); + session + .on_consensus_message( + &NodeId::from(1), + &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession { + requester: Requester::Signature( + sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), + ) + .into(), + version: Default::default(), + }), + ) + .unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + assert_eq!( + session + .on_job_request( + &NodeId::from(1), + 20, + SquaredSumJobExecutor, + DummyJobTransport::default() + ) + .unwrap_err(), + Error::InvalidMessage + ); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + } - #[test] - fn consensus_session_computation_request_is_processed_when_correct() { - let mut session = make_slave_consensus_session(0, None); - assert_eq!(session.state(), ConsensusSessionState::WaitingForInitialization); - session.on_consensus_message(&NodeId::from(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession { - requester: Requester::Signature(sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()).into(), - version: Default::default(), - })).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); - session.on_job_request(&NodeId::from(1), 2, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); - } + #[test] + fn consensus_session_computation_request_is_processed_when_correct() { + let mut session = make_slave_consensus_session(0, None); + assert_eq!( + session.state(), + ConsensusSessionState::WaitingForInitialization + ); + session + .on_consensus_message( + &NodeId::from(1), + &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession { + requester: Requester::Signature( + sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), + ) + .into(), + version: Default::default(), + }), + ) + .unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + session + .on_job_request( + &NodeId::from(1), + 2, + SquaredSumJobExecutor, + DummyJobTransport::default(), + ) + .unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + } - #[test] - fn consensus_session_computation_response_is_ignored_when_consensus_is_not_reached() { - let mut session = make_master_consensus_session(1, None, None); - assert_eq!(session.on_job_response(&NodeId::from(2), 4).unwrap_err(), Error::InvalidStateForRequest); - } + #[test] + fn consensus_session_computation_response_is_ignored_when_consensus_is_not_reached() { + let mut session = make_master_consensus_session(1, None, None); + assert_eq!( + session.on_job_response(&NodeId::from(2), 4).unwrap_err(), + Error::InvalidStateForRequest + ); + } - #[test] - fn consessus_session_completion_is_ignored_when_received_from_non_master_node() { - let mut session = make_slave_consensus_session(0, None); - assert_eq!(session.on_session_completed(&NodeId::from(3)).unwrap_err(), Error::InvalidMessage); - } + #[test] + fn consessus_session_completion_is_ignored_when_received_from_non_master_node() { + let mut session = make_slave_consensus_session(0, None); + assert_eq!( + session.on_session_completed(&NodeId::from(3)).unwrap_err(), + Error::InvalidMessage + ); + } - #[test] - fn consessus_session_completion_is_ignored_when_consensus_is_not_established() { - let mut session = make_slave_consensus_session(0, None); - assert_eq!(session.on_session_completed(&NodeId::from(1)).unwrap_err(), Error::InvalidStateForRequest); - } + #[test] + fn consessus_session_completion_is_ignored_when_consensus_is_not_established() { + let mut session = make_slave_consensus_session(0, None); + assert_eq!( + session.on_session_completed(&NodeId::from(1)).unwrap_err(), + Error::InvalidStateForRequest + ); + } - #[test] - fn consessus_session_completion_is_accepted() { - let mut session = make_slave_consensus_session(0, None); - session.on_consensus_message(&NodeId::from(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession { - requester: Requester::Signature(sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()).into(), - version: Default::default(), - })).unwrap(); - session.on_session_completed(&NodeId::from(1)).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::Finished); - } + #[test] + fn consessus_session_completion_is_accepted() { + let mut session = make_slave_consensus_session(0, None); + session + .on_consensus_message( + &NodeId::from(1), + &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession { + requester: Requester::Signature( + sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), + ) + .into(), + version: Default::default(), + }), + ) + .unwrap(); + session.on_session_completed(&NodeId::from(1)).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::Finished); + } - #[test] - fn consensus_session_fails_if_node_error_received_by_uninitialized_master() { - let mut session = make_master_consensus_session(0, None, None); - assert_eq!(session.on_node_error(&NodeId::from(2), Error::AccessDenied), Err(Error::ConsensusUnreachable)); - assert_eq!(session.state(), ConsensusSessionState::Failed); - } + #[test] + fn consensus_session_fails_if_node_error_received_by_uninitialized_master() { + let mut session = make_master_consensus_session(0, None, None); + assert_eq!( + session.on_node_error(&NodeId::from(2), Error::AccessDenied), + Err(Error::ConsensusUnreachable) + ); + assert_eq!(session.state(), ConsensusSessionState::Failed); + } - #[test] - fn consensus_session_fails_if_node_error_received_by_uninitialized_slave_from_master() { - let mut session = make_slave_consensus_session(0, None); - assert_eq!(session.on_node_error(&NodeId::from(1), Error::AccessDenied), Err(Error::ConsensusUnreachable)); - assert_eq!(session.state(), ConsensusSessionState::Failed); - } + #[test] + fn consensus_session_fails_if_node_error_received_by_uninitialized_slave_from_master() { + let mut session = make_slave_consensus_session(0, None); + assert_eq!( + session.on_node_error(&NodeId::from(1), Error::AccessDenied), + Err(Error::ConsensusUnreachable) + ); + assert_eq!(session.state(), ConsensusSessionState::Failed); + } - #[test] - fn consensus_sessions_fails_with_temp_error_if_node_error_received_by_uninitialized_slave_from_master() { - let mut session = make_slave_consensus_session(0, None); - assert_eq!(session.on_node_error(&NodeId::from(1), Error::NodeDisconnected).unwrap_err(), Error::ConsensusTemporaryUnreachable); - } + #[test] + fn consensus_sessions_fails_with_temp_error_if_node_error_received_by_uninitialized_slave_from_master( + ) { + let mut session = make_slave_consensus_session(0, None); + assert_eq!( + session + .on_node_error(&NodeId::from(1), Error::NodeDisconnected) + .unwrap_err(), + Error::ConsensusTemporaryUnreachable + ); + } - #[test] - fn consensus_session_continues_if_node_error_received_by_master_during_establish_and_enough_nodes_left() { - let mut session = make_master_consensus_session(1, None, None); - session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3)].into_iter().collect()).unwrap(); - assert_eq!(session.on_node_error(&NodeId::from(2), Error::AccessDenied), Ok(false)); - } + #[test] + fn consensus_session_continues_if_node_error_received_by_master_during_establish_and_enough_nodes_left( + ) { + let mut session = make_master_consensus_session(1, None, None); + session + .initialize( + vec![NodeId::from(1), NodeId::from(2), NodeId::from(3)] + .into_iter() + .collect(), + ) + .unwrap(); + assert_eq!( + session.on_node_error(&NodeId::from(2), Error::AccessDenied), + Ok(false) + ); + } - #[test] - fn consensus_session_fails_if_node_error_received_by_master_during_establish_and_not_enough_nodes_left() { - let mut session = make_master_consensus_session(1, None, None); - session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap(); - assert_eq!(session.on_node_error(&NodeId::from(2), Error::AccessDenied), Err(Error::ConsensusUnreachable)); - assert_eq!(session.state(), ConsensusSessionState::Failed); - } + #[test] + fn consensus_session_fails_if_node_error_received_by_master_during_establish_and_not_enough_nodes_left( + ) { + let mut session = make_master_consensus_session(1, None, None); + session + .initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()) + .unwrap(); + assert_eq!( + session.on_node_error(&NodeId::from(2), Error::AccessDenied), + Err(Error::ConsensusUnreachable) + ); + assert_eq!(session.state(), ConsensusSessionState::Failed); + } - #[test] - fn consensus_session_continues_if_node2_error_received_by_master_after_consensus_established_and_enough_nodes_left() { - let mut session = make_master_consensus_session(1, None, None); - session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3)].into_iter().collect()).unwrap(); - session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); - assert_eq!(session.on_node_error(&NodeId::from(2), Error::AccessDenied), Ok(false)); - assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); - } + #[test] + fn consensus_session_continues_if_node2_error_received_by_master_after_consensus_established_and_enough_nodes_left( + ) { + let mut session = make_master_consensus_session(1, None, None); + session + .initialize( + vec![NodeId::from(1), NodeId::from(2), NodeId::from(3)] + .into_iter() + .collect(), + ) + .unwrap(); + session + .on_consensus_message( + &NodeId::from(2), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); + assert_eq!( + session.on_node_error(&NodeId::from(2), Error::AccessDenied), + Ok(false) + ); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + } - #[test] - fn consensus_session_continues_if_node3_error_received_by_master_after_consensus_established_and_enough_nodes_left() { - let mut session = make_master_consensus_session(1, None, None); - session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3)].into_iter().collect()).unwrap(); - session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); - assert_eq!(session.on_node_error(&NodeId::from(3), Error::AccessDenied), Ok(false)); - assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); - } + #[test] + fn consensus_session_continues_if_node3_error_received_by_master_after_consensus_established_and_enough_nodes_left( + ) { + let mut session = make_master_consensus_session(1, None, None); + session + .initialize( + vec![NodeId::from(1), NodeId::from(2), NodeId::from(3)] + .into_iter() + .collect(), + ) + .unwrap(); + session + .on_consensus_message( + &NodeId::from(2), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); + assert_eq!( + session.on_node_error(&NodeId::from(3), Error::AccessDenied), + Ok(false) + ); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + } - #[test] - fn consensus_session_fails_if_node_error_received_by_master_after_consensus_established_and_not_enough_nodes_left() { - let mut session = make_master_consensus_session(1, None, None); - session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap(); - session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); - assert_eq!(session.on_node_error(&NodeId::from(2), Error::AccessDenied), Err(Error::ConsensusUnreachable)); - assert_eq!(session.state(), ConsensusSessionState::Failed); - } + #[test] + fn consensus_session_fails_if_node_error_received_by_master_after_consensus_established_and_not_enough_nodes_left( + ) { + let mut session = make_master_consensus_session(1, None, None); + session + .initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()) + .unwrap(); + session + .on_consensus_message( + &NodeId::from(2), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); + assert_eq!( + session.on_node_error(&NodeId::from(2), Error::AccessDenied), + Err(Error::ConsensusUnreachable) + ); + assert_eq!(session.state(), ConsensusSessionState::Failed); + } - #[test] - fn consensus_session_continues_if_node_error_received_from_slave_not_participating_in_computation() { - let mut session = make_master_consensus_session(1, None, None); - session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3), NodeId::from(4)].into_iter().collect()).unwrap(); - session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); - session.on_consensus_message(&NodeId::from(3), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); - session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap(); - assert_eq!(session.on_node_error(&NodeId::from(3), Error::AccessDenied), Ok(false)); - assert_eq!(session.on_node_error(&NodeId::from(4), Error::AccessDenied), Ok(false)); - assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); - } + #[test] + fn consensus_session_continues_if_node_error_received_from_slave_not_participating_in_computation( + ) { + let mut session = make_master_consensus_session(1, None, None); + session + .initialize( + vec![ + NodeId::from(1), + NodeId::from(2), + NodeId::from(3), + NodeId::from(4), + ] + .into_iter() + .collect(), + ) + .unwrap(); + session + .on_consensus_message( + &NodeId::from(2), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); + session + .on_consensus_message( + &NodeId::from(3), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); + session + .disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false) + .unwrap(); + assert_eq!( + session.on_node_error(&NodeId::from(3), Error::AccessDenied), + Ok(false) + ); + assert_eq!( + session.on_node_error(&NodeId::from(4), Error::AccessDenied), + Ok(false) + ); + assert_eq!( + session.state(), + ConsensusSessionState::WaitingForPartialResults + ); + } - #[test] - fn consensus_session_restarts_if_node_error_received_from_slave_participating_in_computation_and_enough_nodes_left() { - let mut session = make_master_consensus_session(1, None, None); - session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3), NodeId::from(4)].into_iter().collect()).unwrap(); - session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); - session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); + #[test] + fn consensus_session_restarts_if_node_error_received_from_slave_participating_in_computation_and_enough_nodes_left( + ) { + let mut session = make_master_consensus_session(1, None, None); + session + .initialize( + vec![ + NodeId::from(1), + NodeId::from(2), + NodeId::from(3), + NodeId::from(4), + ] + .into_iter() + .collect(), + ) + .unwrap(); + session + .on_consensus_message( + &NodeId::from(2), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); + session + .disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false) + .unwrap(); + assert_eq!( + session.state(), + ConsensusSessionState::WaitingForPartialResults + ); - session.on_consensus_message(&NodeId::from(3), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); - assert_eq!(session.on_node_error(&NodeId::from(2), Error::AccessDenied), Ok(true)); - assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); - session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); + session + .on_consensus_message( + &NodeId::from(3), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); + assert_eq!( + session.on_node_error(&NodeId::from(2), Error::AccessDenied), + Ok(true) + ); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + session + .disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false) + .unwrap(); + assert_eq!( + session.state(), + ConsensusSessionState::WaitingForPartialResults + ); - assert_eq!(session.on_node_error(&NodeId::from(3), Error::AccessDenied), Ok(false)); - assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus); - } + assert_eq!( + session.on_node_error(&NodeId::from(3), Error::AccessDenied), + Ok(false) + ); + assert_eq!( + session.state(), + ConsensusSessionState::EstablishingConsensus + ); + } - #[test] - fn consensus_session_fails_if_node_error_received_from_slave_participating_in_computation_and_not_enough_nodes_left() { - let mut session = make_master_consensus_session(1, None, None); - session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap(); - session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); - session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap(); - assert_eq!(session.on_node_error(&NodeId::from(2), Error::AccessDenied), Err(Error::ConsensusUnreachable)); - assert_eq!(session.state(), ConsensusSessionState::Failed); - } + #[test] + fn consensus_session_fails_if_node_error_received_from_slave_participating_in_computation_and_not_enough_nodes_left( + ) { + let mut session = make_master_consensus_session(1, None, None); + session + .initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()) + .unwrap(); + session + .on_consensus_message( + &NodeId::from(2), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); + session + .disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false) + .unwrap(); + assert_eq!( + session.on_node_error(&NodeId::from(2), Error::AccessDenied), + Err(Error::ConsensusUnreachable) + ); + assert_eq!(session.state(), ConsensusSessionState::Failed); + } - #[test] - fn consensus_session_fails_if_uninitialized_session_timeouts() { - let mut session = make_master_consensus_session(1, None, None); - assert_eq!(session.on_session_timeout(), Err(Error::ConsensusTemporaryUnreachable)); - } + #[test] + fn consensus_session_fails_if_uninitialized_session_timeouts() { + let mut session = make_master_consensus_session(1, None, None); + assert_eq!( + session.on_session_timeout(), + Err(Error::ConsensusTemporaryUnreachable) + ); + } - #[test] - fn consensus_session_continues_if_session_timeouts_and_enough_nodes_left_for_computation() { - let mut session = make_master_consensus_session(1, None, None); - session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3), NodeId::from(4)].into_iter().collect()).unwrap(); - session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); + #[test] + fn consensus_session_continues_if_session_timeouts_and_enough_nodes_left_for_computation() { + let mut session = make_master_consensus_session(1, None, None); + session + .initialize( + vec![ + NodeId::from(1), + NodeId::from(2), + NodeId::from(3), + NodeId::from(4), + ] + .into_iter() + .collect(), + ) + .unwrap(); + session + .on_consensus_message( + &NodeId::from(2), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); - session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); + session + .disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false) + .unwrap(); + assert_eq!( + session.state(), + ConsensusSessionState::WaitingForPartialResults + ); - session.on_consensus_message(&NodeId::from(3), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); - assert_eq!(session.on_session_timeout(), Ok(true)); - assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + session + .on_consensus_message( + &NodeId::from(3), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); + assert_eq!(session.on_session_timeout(), Ok(true)); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); - session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); + session + .disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false) + .unwrap(); + assert_eq!( + session.state(), + ConsensusSessionState::WaitingForPartialResults + ); - assert_eq!(session.on_session_timeout(), Ok(false)); - assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus); - } + assert_eq!(session.on_session_timeout(), Ok(false)); + assert_eq!( + session.state(), + ConsensusSessionState::EstablishingConsensus + ); + } - #[test] - fn consensus_session_continues_if_session_timeouts_and_not_enough_nodes_left_for_computation() { - let mut session = make_master_consensus_session(1, None, None); - session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap(); - session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); - session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); + #[test] + fn consensus_session_continues_if_session_timeouts_and_not_enough_nodes_left_for_computation() { + let mut session = make_master_consensus_session(1, None, None); + session + .initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()) + .unwrap(); + session + .on_consensus_message( + &NodeId::from(2), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); + session + .disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false) + .unwrap(); + assert_eq!( + session.state(), + ConsensusSessionState::WaitingForPartialResults + ); - assert_eq!(session.on_session_timeout(), Err(Error::ConsensusUnreachable)); - assert_eq!(session.state(), ConsensusSessionState::Failed); - } + assert_eq!( + session.on_session_timeout(), + Err(Error::ConsensusUnreachable) + ); + assert_eq!(session.state(), ConsensusSessionState::Failed); + } - #[test] - fn same_consensus_group_returned_after_second_selection() { - let mut session = make_master_consensus_session(1, None, None); - session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3)].into_iter().collect()).unwrap(); - session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); - session.on_consensus_message(&NodeId::from(3), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); + #[test] + fn same_consensus_group_returned_after_second_selection() { + let mut session = make_master_consensus_session(1, None, None); + session + .initialize( + vec![NodeId::from(1), NodeId::from(2), NodeId::from(3)] + .into_iter() + .collect(), + ) + .unwrap(); + session + .on_consensus_message( + &NodeId::from(2), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); + session + .on_consensus_message( + &NodeId::from(3), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); - let consensus_group1 = session.select_consensus_group().unwrap().clone(); - let consensus_group2 = session.select_consensus_group().unwrap().clone(); - assert_eq!(consensus_group1, consensus_group2); - } + let consensus_group1 = session.select_consensus_group().unwrap().clone(); + let consensus_group2 = session.select_consensus_group().unwrap().clone(); + assert_eq!(consensus_group1, consensus_group2); + } - #[test] - fn consensus_session_complete_2_of_4() { - let mut session = make_master_consensus_session(1, None, None); - session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3), NodeId::from(3)].into_iter().collect()).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus); - session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); - session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); - session.on_job_response(&NodeId::from(2), 16).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::Finished); - assert_eq!(session.result(), Ok(20)); - } + #[test] + fn consensus_session_complete_2_of_4() { + let mut session = make_master_consensus_session(1, None, None); + session + .initialize( + vec![ + NodeId::from(1), + NodeId::from(2), + NodeId::from(3), + NodeId::from(3), + ] + .into_iter() + .collect(), + ) + .unwrap(); + assert_eq!( + session.state(), + ConsensusSessionState::EstablishingConsensus + ); + session + .on_consensus_message( + &NodeId::from(2), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + session + .disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false) + .unwrap(); + assert_eq!( + session.state(), + ConsensusSessionState::WaitingForPartialResults + ); + session.on_job_response(&NodeId::from(2), 16).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::Finished); + assert_eq!(session.result(), Ok(20)); + } - #[test] - fn consensus_session_complete_2_of_4_after_restart() { - let mut session = make_master_consensus_session(1, None, None); - session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3), NodeId::from(4)].into_iter().collect()).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus); - session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + #[test] + fn consensus_session_complete_2_of_4_after_restart() { + let mut session = make_master_consensus_session(1, None, None); + session + .initialize( + vec![ + NodeId::from(1), + NodeId::from(2), + NodeId::from(3), + NodeId::from(4), + ] + .into_iter() + .collect(), + ) + .unwrap(); + assert_eq!( + session.state(), + ConsensusSessionState::EstablishingConsensus + ); + session + .on_consensus_message( + &NodeId::from(2), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); - session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); + session + .disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false) + .unwrap(); + assert_eq!( + session.state(), + ConsensusSessionState::WaitingForPartialResults + ); - session.on_consensus_message(&NodeId::from(3), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); + session + .on_consensus_message( + &NodeId::from(3), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); - assert_eq!(session.on_node_error(&NodeId::from(2), Error::AccessDenied).unwrap(), true); - assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + assert_eq!( + session + .on_node_error(&NodeId::from(2), Error::AccessDenied) + .unwrap(), + true + ); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); - session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); + session + .disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false) + .unwrap(); + assert_eq!( + session.state(), + ConsensusSessionState::WaitingForPartialResults + ); - assert_eq!(session.on_node_error(&NodeId::from(3), Error::AccessDenied).unwrap(), false); - assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus); + assert_eq!( + session + .on_node_error(&NodeId::from(3), Error::AccessDenied) + .unwrap(), + false + ); + assert_eq!( + session.state(), + ConsensusSessionState::EstablishingConsensus + ); - session.on_consensus_message(&NodeId::from(4), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { - is_confirmed: true, - })).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + session + .on_consensus_message( + &NodeId::from(4), + &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + ) + .unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); - session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); + session + .disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false) + .unwrap(); + assert_eq!( + session.state(), + ConsensusSessionState::WaitingForPartialResults + ); - session.on_job_response(&NodeId::from(4), 16).unwrap(); - assert_eq!(session.state(), ConsensusSessionState::Finished); - assert_eq!(session.result(), Ok(20)); - } + session.on_job_response(&NodeId::from(4), 16).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::Finished); + assert_eq!(session.result(), Ok(20)); + } } diff --git a/secret-store/src/key_server_cluster/jobs/decryption_job.rs b/secret-store/src/key_server_cluster/jobs/decryption_job.rs index ca3df29b2..342429d7c 100644 --- a/secret-store/src/key_server_cluster/jobs/decryption_job.rs +++ b/secret-store/src/key_server_cluster/jobs/decryption_job.rs @@ -14,175 +14,252 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::{BTreeSet, BTreeMap}; -use ethereum_types::H256; -use ethkey::{Public, Secret}; use crypto::DEFAULT_MAC; -use ethkey::crypto::ecies::encrypt; -use key_server_cluster::{Error, NodeId, DocumentKeyShare, EncryptedDocumentKeyShadow}; -use key_server_cluster::math; -use key_server_cluster::jobs::job_session::{JobPartialRequestAction, JobPartialResponseAction, JobExecutor}; +use ethereum_types::H256; +use ethkey::{crypto::ecies::encrypt, Public, Secret}; +use key_server_cluster::{ + jobs::job_session::{JobExecutor, JobPartialRequestAction, JobPartialResponseAction}, + math, DocumentKeyShare, EncryptedDocumentKeyShadow, Error, NodeId, +}; +use std::collections::{BTreeMap, BTreeSet}; /// Decryption job. pub struct DecryptionJob { - /// This node id. - self_node_id: NodeId, - /// Access key. - access_key: Secret, - /// Requester public key. - requester: Public, - /// Key share. - key_share: DocumentKeyShare, - /// Key version. - key_version: H256, - /// Request id. - request_id: Option, - /// Is shadow decryption requested. - is_shadow_decryption: Option, - /// Is broadcast decryption requested. - is_broadcast_session: Option, + /// This node id. + self_node_id: NodeId, + /// Access key. + access_key: Secret, + /// Requester public key. + requester: Public, + /// Key share. + key_share: DocumentKeyShare, + /// Key version. + key_version: H256, + /// Request id. + request_id: Option, + /// Is shadow decryption requested. + is_shadow_decryption: Option, + /// Is broadcast decryption requested. + is_broadcast_session: Option, } /// Decryption job partial request. #[derive(Debug)] pub struct PartialDecryptionRequest { - /// Request id. - pub id: Secret, - /// Is shadow decryption requested. - pub is_shadow_decryption: bool, - /// Is broadcast decryption requested. - pub is_broadcast_session: bool, - /// Id of other nodes, participating in decryption. - pub other_nodes_ids: BTreeSet, + /// Request id. + pub id: Secret, + /// Is shadow decryption requested. + pub is_shadow_decryption: bool, + /// Is broadcast decryption requested. + pub is_broadcast_session: bool, + /// Id of other nodes, participating in decryption. + pub other_nodes_ids: BTreeSet, } /// Decryption job partial response. #[derive(Clone)] pub struct PartialDecryptionResponse { - /// Request id. - pub request_id: Secret, - /// Shadow point. - pub shadow_point: Public, - /// Decryption shadow coefficient, if requested. - pub decrypt_shadow: Option>, + /// Request id. + pub request_id: Secret, + /// Shadow point. + pub shadow_point: Public, + /// Decryption shadow coefficient, if requested. + pub decrypt_shadow: Option>, } impl DecryptionJob { - pub fn new_on_slave(self_node_id: NodeId, access_key: Secret, requester: Public, key_share: DocumentKeyShare, key_version: H256) -> Result { - debug_assert!(key_share.common_point.is_some() && key_share.encrypted_point.is_some()); - Ok(DecryptionJob { - self_node_id: self_node_id, - access_key: access_key, - requester: requester, - key_share: key_share, - key_version: key_version, - request_id: None, - is_shadow_decryption: None, - is_broadcast_session: None, - }) - } + pub fn new_on_slave( + self_node_id: NodeId, + access_key: Secret, + requester: Public, + key_share: DocumentKeyShare, + key_version: H256, + ) -> Result { + debug_assert!(key_share.common_point.is_some() && key_share.encrypted_point.is_some()); + Ok(DecryptionJob { + self_node_id: self_node_id, + access_key: access_key, + requester: requester, + key_share: key_share, + key_version: key_version, + request_id: None, + is_shadow_decryption: None, + is_broadcast_session: None, + }) + } - pub fn new_on_master(self_node_id: NodeId, access_key: Secret, requester: Public, key_share: DocumentKeyShare, key_version: H256, is_shadow_decryption: bool, is_broadcast_session: bool) -> Result { - debug_assert!(key_share.common_point.is_some() && key_share.encrypted_point.is_some()); - Ok(DecryptionJob { - self_node_id: self_node_id, - access_key: access_key, - requester: requester, - key_share: key_share, - key_version: key_version, - request_id: Some(math::generate_random_scalar()?), - is_shadow_decryption: Some(is_shadow_decryption), - is_broadcast_session: Some(is_broadcast_session), - }) - } + pub fn new_on_master( + self_node_id: NodeId, + access_key: Secret, + requester: Public, + key_share: DocumentKeyShare, + key_version: H256, + is_shadow_decryption: bool, + is_broadcast_session: bool, + ) -> Result { + debug_assert!(key_share.common_point.is_some() && key_share.encrypted_point.is_some()); + Ok(DecryptionJob { + self_node_id: self_node_id, + access_key: access_key, + requester: requester, + key_share: key_share, + key_version: key_version, + request_id: Some(math::generate_random_scalar()?), + is_shadow_decryption: Some(is_shadow_decryption), + is_broadcast_session: Some(is_broadcast_session), + }) + } - pub fn request_id(&self) -> &Option { - &self.request_id - } + pub fn request_id(&self) -> &Option { + &self.request_id + } - pub fn set_request_id(&mut self, request_id: Secret) { - self.request_id = Some(request_id); - } + pub fn set_request_id(&mut self, request_id: Secret) { + self.request_id = Some(request_id); + } } impl JobExecutor for DecryptionJob { - type PartialJobRequest = PartialDecryptionRequest; - type PartialJobResponse = PartialDecryptionResponse; - type JobResponse = EncryptedDocumentKeyShadow; + type PartialJobRequest = PartialDecryptionRequest; + type PartialJobResponse = PartialDecryptionResponse; + type JobResponse = EncryptedDocumentKeyShadow; - fn prepare_partial_request(&self, node: &NodeId, nodes: &BTreeSet) -> Result { - debug_assert!(nodes.len() == self.key_share.threshold + 1); + fn prepare_partial_request( + &self, + node: &NodeId, + nodes: &BTreeSet, + ) -> Result { + debug_assert!(nodes.len() == self.key_share.threshold + 1); - let request_id = self.request_id.as_ref() + let request_id = self.request_id.as_ref() .expect("prepare_partial_request is only called on master nodes; request_id is filed in constructor on master nodes; qed"); - let is_shadow_decryption = self.is_shadow_decryption + let is_shadow_decryption = self.is_shadow_decryption .expect("prepare_partial_request is only called on master nodes; is_shadow_decryption is filed in constructor on master nodes; qed"); - let is_broadcast_session = self.is_broadcast_session + let is_broadcast_session = self.is_broadcast_session .expect("prepare_partial_request is only called on master nodes; is_broadcast_session is filed in constructor on master nodes; qed"); - let mut other_nodes_ids = nodes.clone(); - other_nodes_ids.remove(node); + let mut other_nodes_ids = nodes.clone(); + other_nodes_ids.remove(node); - Ok(PartialDecryptionRequest { - id: request_id.clone(), - is_shadow_decryption: is_shadow_decryption, - is_broadcast_session: is_broadcast_session, - other_nodes_ids: other_nodes_ids, - }) - } + Ok(PartialDecryptionRequest { + id: request_id.clone(), + is_shadow_decryption: is_shadow_decryption, + is_broadcast_session: is_broadcast_session, + other_nodes_ids: other_nodes_ids, + }) + } - fn process_partial_request(&mut self, partial_request: PartialDecryptionRequest) -> Result, Error> { - let key_version = self.key_share.version(&self.key_version)?; - if partial_request.other_nodes_ids.len() != self.key_share.threshold - || partial_request.other_nodes_ids.contains(&self.self_node_id) - || partial_request.other_nodes_ids.iter().any(|n| !key_version.id_numbers.contains_key(n)) { - return Err(Error::InvalidMessage); - } + fn process_partial_request( + &mut self, + partial_request: PartialDecryptionRequest, + ) -> Result, Error> { + let key_version = self.key_share.version(&self.key_version)?; + if partial_request.other_nodes_ids.len() != self.key_share.threshold + || partial_request.other_nodes_ids.contains(&self.self_node_id) + || partial_request + .other_nodes_ids + .iter() + .any(|n| !key_version.id_numbers.contains_key(n)) + { + return Err(Error::InvalidMessage); + } - let self_id_number = &key_version.id_numbers[&self.self_node_id]; - let other_id_numbers = partial_request.other_nodes_ids.iter().map(|n| &key_version.id_numbers[n]); - let node_shadow = math::compute_node_shadow(&key_version.secret_share, &self_id_number, other_id_numbers)?; - let decrypt_shadow = if partial_request.is_shadow_decryption { Some(math::generate_random_scalar()?) } else { None }; - let common_point = self.key_share.common_point.as_ref().expect("DecryptionJob is only created when common_point is known; qed"); - let (shadow_point, decrypt_shadow) = math::compute_node_shadow_point(&self.access_key, &common_point, &node_shadow, decrypt_shadow)?; + let self_id_number = &key_version.id_numbers[&self.self_node_id]; + let other_id_numbers = partial_request + .other_nodes_ids + .iter() + .map(|n| &key_version.id_numbers[n]); + let node_shadow = math::compute_node_shadow( + &key_version.secret_share, + &self_id_number, + other_id_numbers, + )?; + let decrypt_shadow = if partial_request.is_shadow_decryption { + Some(math::generate_random_scalar()?) + } else { + None + }; + let common_point = self + .key_share + .common_point + .as_ref() + .expect("DecryptionJob is only created when common_point is known; qed"); + let (shadow_point, decrypt_shadow) = math::compute_node_shadow_point( + &self.access_key, + &common_point, + &node_shadow, + decrypt_shadow, + )?; - Ok(JobPartialRequestAction::Respond(PartialDecryptionResponse { - request_id: partial_request.id, - shadow_point: shadow_point, - decrypt_shadow: match decrypt_shadow.clone() { - None => None, - Some(decrypt_shadow) => Some(encrypt(&self.requester, &DEFAULT_MAC, &**decrypt_shadow)?), - }, - })) - } + Ok(JobPartialRequestAction::Respond( + PartialDecryptionResponse { + request_id: partial_request.id, + shadow_point: shadow_point, + decrypt_shadow: match decrypt_shadow.clone() { + None => None, + Some(decrypt_shadow) => { + Some(encrypt(&self.requester, &DEFAULT_MAC, &**decrypt_shadow)?) + } + }, + }, + )) + } - fn check_partial_response(&mut self, _sender: &NodeId, partial_response: &PartialDecryptionResponse) -> Result { - if Some(&partial_response.request_id) != self.request_id.as_ref() { - return Ok(JobPartialResponseAction::Ignore); - } - if self.is_shadow_decryption != Some(partial_response.decrypt_shadow.is_some()) { - return Ok(JobPartialResponseAction::Reject); - } - Ok(JobPartialResponseAction::Accept) - } + fn check_partial_response( + &mut self, + _sender: &NodeId, + partial_response: &PartialDecryptionResponse, + ) -> Result { + if Some(&partial_response.request_id) != self.request_id.as_ref() { + return Ok(JobPartialResponseAction::Ignore); + } + if self.is_shadow_decryption != Some(partial_response.decrypt_shadow.is_some()) { + return Ok(JobPartialResponseAction::Reject); + } + Ok(JobPartialResponseAction::Accept) + } - fn compute_response(&self, partial_responses: &BTreeMap) -> Result { - let is_shadow_decryption = self.is_shadow_decryption + fn compute_response( + &self, + partial_responses: &BTreeMap, + ) -> Result { + let is_shadow_decryption = self.is_shadow_decryption .expect("compute_response is only called on master nodes; is_shadow_decryption is filed in constructor on master nodes; qed"); - let common_point = self.key_share.common_point.as_ref().expect("DecryptionJob is only created when common_point is known; qed"); - let encrypted_point = self.key_share.encrypted_point.as_ref().expect("DecryptionJob is only created when encrypted_point is known; qed"); - let joint_shadow_point = math::compute_joint_shadow_point(partial_responses.values().map(|s| &s.shadow_point))?; - let decrypted_secret = math::decrypt_with_joint_shadow(self.key_share.threshold, &self.access_key, encrypted_point, &joint_shadow_point)?; - Ok(EncryptedDocumentKeyShadow { - decrypted_secret: decrypted_secret, - common_point: if is_shadow_decryption { - Some(math::make_common_shadow_point(self.key_share.threshold, common_point.clone())?) - } else { None }, - decrypt_shadows: if is_shadow_decryption { - Some(partial_responses.values().map(|r| r.decrypt_shadow.as_ref() + let common_point = self + .key_share + .common_point + .as_ref() + .expect("DecryptionJob is only created when common_point is known; qed"); + let encrypted_point = self + .key_share + .encrypted_point + .as_ref() + .expect("DecryptionJob is only created when encrypted_point is known; qed"); + let joint_shadow_point = + math::compute_joint_shadow_point(partial_responses.values().map(|s| &s.shadow_point))?; + let decrypted_secret = math::decrypt_with_joint_shadow( + self.key_share.threshold, + &self.access_key, + encrypted_point, + &joint_shadow_point, + )?; + Ok(EncryptedDocumentKeyShadow { + decrypted_secret: decrypted_secret, + common_point: if is_shadow_decryption { + Some(math::make_common_shadow_point( + self.key_share.threshold, + common_point.clone(), + )?) + } else { + None + }, + decrypt_shadows: if is_shadow_decryption { + Some(partial_responses.values().map(|r| r.decrypt_shadow.as_ref() .expect("is_shadow_decryption == true; decrypt_shadow.is_some() is checked in check_partial_response; qed") .clone()) .collect()) - } else { None }, - }) - } + } else { + None + }, + }) + } } diff --git a/secret-store/src/key_server_cluster/jobs/dummy_job.rs b/secret-store/src/key_server_cluster/jobs/dummy_job.rs index 122903eb6..06020b835 100644 --- a/secret-store/src/key_server_cluster/jobs/dummy_job.rs +++ b/secret-store/src/key_server_cluster/jobs/dummy_job.rs @@ -14,47 +14,55 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use key_server_cluster::{ + jobs::job_session::{ + JobExecutor, JobPartialRequestAction, JobPartialResponseAction, JobTransport, + }, + Error, NodeId, +}; use std::collections::{BTreeMap, BTreeSet}; -use key_server_cluster::{Error, NodeId}; -use key_server_cluster::jobs::job_session::{JobExecutor, JobTransport, JobPartialRequestAction, JobPartialResponseAction}; /// No-work job to use in generics (TODO [Refac]: create separate ShareChangeConsensusSession && remove this) pub struct DummyJob; impl JobExecutor for DummyJob { - type PartialJobRequest = (); - type PartialJobResponse = (); - type JobResponse = (); + type PartialJobRequest = (); + type PartialJobResponse = (); + type JobResponse = (); - fn prepare_partial_request(&self, _n: &NodeId, _nodes: &BTreeSet) -> Result<(), Error> { - unreachable!("dummy job methods are never called") - } + fn prepare_partial_request(&self, _n: &NodeId, _nodes: &BTreeSet) -> Result<(), Error> { + unreachable!("dummy job methods are never called") + } - fn process_partial_request(&mut self, _r: ()) -> Result, Error> { - unreachable!("dummy job methods are never called") - } + fn process_partial_request(&mut self, _r: ()) -> Result, Error> { + unreachable!("dummy job methods are never called") + } - fn check_partial_response(&mut self, _s: &NodeId, _r: &()) -> Result { - unreachable!("dummy job methods are never called") - } + fn check_partial_response( + &mut self, + _s: &NodeId, + _r: &(), + ) -> Result { + unreachable!("dummy job methods are never called") + } - fn compute_response(&self, _r: &BTreeMap) -> Result<(), Error> { - unreachable!("dummy job methods are never called") - } + fn compute_response(&self, _r: &BTreeMap) -> Result<(), Error> { + unreachable!("dummy job methods are never called") + } } /// No-work job transport to use in generics (TODO [Refac]: create separate ShareChangeConsensusSession && remove this) pub struct DummyJobTransport; impl JobTransport for DummyJobTransport { - type PartialJobRequest = (); - type PartialJobResponse = (); + type PartialJobRequest = (); + type PartialJobResponse = (); - fn send_partial_request(&self, _node: &NodeId, _request: ()) -> Result<(), Error> { - unreachable!("dummy transport methods are never called") - } + fn send_partial_request(&self, _node: &NodeId, _request: ()) -> Result<(), Error> { + unreachable!("dummy transport methods are never called") + } - fn send_partial_response(&self, _node: &NodeId, _response: ()) -> Result<(), Error> { - unreachable!("dummy transport methods are never called") - } + fn send_partial_response(&self, _node: &NodeId, _response: ()) -> Result<(), Error> { + unreachable!("dummy transport methods are never called") + } } diff --git a/secret-store/src/key_server_cluster/jobs/job_session.rs b/secret-store/src/key_server_cluster/jobs/job_session.rs index d76f8adda..0892aa278 100644 --- a/secret-store/src/key_server_cluster/jobs/job_session.rs +++ b/secret-store/src/key_server_cluster/jobs/job_session.rs @@ -14,648 +14,1030 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::{BTreeSet, BTreeMap}; use key_server_cluster::{Error, NodeId, SessionMeta}; +use std::collections::{BTreeMap, BTreeSet}; /// Partial response action. #[derive(Debug, Clone, Copy, PartialEq)] pub enum JobPartialResponseAction { - /// Ignore this response. - Ignore, - /// Mark this response as reject. - Reject, - /// Accept this response. - Accept, + /// Ignore this response. + Ignore, + /// Mark this response as reject. + Reject, + /// Accept this response. + Accept, } /// Partial request action. #[derive(Debug, Clone, Copy, PartialEq)] pub enum JobPartialRequestAction { - /// Respond with reject. - Reject(PartialJobResponse), - /// Respond with this response. - Respond(PartialJobResponse), + /// Respond with reject. + Reject(PartialJobResponse), + /// Respond with this response. + Respond(PartialJobResponse), } /// Job executor. pub trait JobExecutor { - type PartialJobRequest; - type PartialJobResponse: Clone; - type JobResponse; + type PartialJobRequest; + type PartialJobResponse: Clone; + type JobResponse; - /// Prepare job request for given node. - fn prepare_partial_request(&self, node: &NodeId, nodes: &BTreeSet) -> Result; - /// Process partial request. - fn process_partial_request(&mut self, partial_request: Self::PartialJobRequest) -> Result, Error>; - /// Check partial response of given node. - fn check_partial_response(&mut self, sender: &NodeId, partial_response: &Self::PartialJobResponse) -> Result; - /// Compute final job response. - fn compute_response(&self, partial_responses: &BTreeMap) -> Result; + /// Prepare job request for given node. + fn prepare_partial_request( + &self, + node: &NodeId, + nodes: &BTreeSet, + ) -> Result; + /// Process partial request. + fn process_partial_request( + &mut self, + partial_request: Self::PartialJobRequest, + ) -> Result, Error>; + /// Check partial response of given node. + fn check_partial_response( + &mut self, + sender: &NodeId, + partial_response: &Self::PartialJobResponse, + ) -> Result; + /// Compute final job response. + fn compute_response( + &self, + partial_responses: &BTreeMap, + ) -> Result; } /// Jobs transport. pub trait JobTransport { - type PartialJobRequest; - type PartialJobResponse; + type PartialJobRequest; + type PartialJobResponse; - /// Send partial request to given node. - fn send_partial_request(&self, node: &NodeId, request: Self::PartialJobRequest) -> Result<(), Error>; - /// Send partial request to given node. - fn send_partial_response(&self, node: &NodeId, response: Self::PartialJobResponse) -> Result<(), Error>; + /// Send partial request to given node. + fn send_partial_request( + &self, + node: &NodeId, + request: Self::PartialJobRequest, + ) -> Result<(), Error>; + /// Send partial request to given node. + fn send_partial_response( + &self, + node: &NodeId, + response: Self::PartialJobResponse, + ) -> Result<(), Error>; } /// Current state of job session. #[derive(Debug, Clone, Copy, PartialEq)] pub enum JobSessionState { - /// Session is inactive. - Inactive, - /// Session is active. - Active, - /// Session is finished. - Finished, - /// Session has failed. - Failed, + /// Session is inactive. + Inactive, + /// Session is active. + Active, + /// Session is finished. + Finished, + /// Session has failed. + Failed, } /// Basic request-response session on a set of nodes. -pub struct JobSession where Transport: JobTransport { - /// Session meta. - meta: SessionMeta, - /// Job executor. - executor: Executor, - /// Jobs transport. - transport: Transport, - /// Session data. - data: JobSessionData, +pub struct JobSession +where + Transport: JobTransport< + PartialJobRequest = Executor::PartialJobRequest, + PartialJobResponse = Executor::PartialJobResponse, + >, +{ + /// Session meta. + meta: SessionMeta, + /// Job executor. + executor: Executor, + /// Jobs transport. + transport: Transport, + /// Session data. + data: JobSessionData, } /// Data of job session. struct JobSessionData { - /// Session state. - state: JobSessionState, - /// Mutable session data. - active_data: Option>, + /// Session state. + state: JobSessionState, + /// Mutable session data. + active_data: Option>, } /// Active job session data. struct ActiveJobSessionData { - /// Active partial requests. - requests: BTreeSet, - /// Rejects to partial requests (maps to true, if reject is fatal). - rejects: BTreeMap, - /// Received partial responses. - responses: BTreeMap, + /// Active partial requests. + requests: BTreeSet, + /// Rejects to partial requests (maps to true, if reject is fatal). + rejects: BTreeMap, + /// Received partial responses. + responses: BTreeMap, } -impl JobSession where Executor: JobExecutor, Transport: JobTransport { - /// Create new session. - pub fn new(meta: SessionMeta, executor: Executor, transport: Transport) -> Self { - JobSession { - meta: meta, - executor: executor, - transport: transport, - data: JobSessionData { - state: JobSessionState::Inactive, - active_data: None, - }, - } - } +impl JobSession +where + Executor: JobExecutor, + Transport: JobTransport< + PartialJobRequest = Executor::PartialJobRequest, + PartialJobResponse = Executor::PartialJobResponse, + >, +{ + /// Create new session. + pub fn new(meta: SessionMeta, executor: Executor, transport: Transport) -> Self { + JobSession { + meta: meta, + executor: executor, + transport: transport, + data: JobSessionData { + state: JobSessionState::Inactive, + active_data: None, + }, + } + } - /// Get transport reference. - #[cfg(test)] - pub fn transport(&self) -> &Transport { - &self.transport - } + /// Get transport reference. + #[cfg(test)] + pub fn transport(&self) -> &Transport { + &self.transport + } - /// Get mutable transport reference. - pub fn transport_mut(&mut self) -> &mut Transport { - &mut self.transport - } + /// Get mutable transport reference. + pub fn transport_mut(&mut self) -> &mut Transport { + &mut self.transport + } - /// Get executor reference. - pub fn executor(&self) -> &Executor { - &self.executor - } + /// Get executor reference. + pub fn executor(&self) -> &Executor { + &self.executor + } - /// Get mutable executor reference. - pub fn executor_mut(&mut self) -> &mut Executor { - &mut self.executor - } + /// Get mutable executor reference. + pub fn executor_mut(&mut self) -> &mut Executor { + &mut self.executor + } - /// Get job state. - pub fn state(&self) -> JobSessionState { - self.data.state - } + /// Get job state. + pub fn state(&self) -> JobSessionState { + self.data.state + } - /// Get rejects. - #[cfg(test)] - pub fn rejects(&self) -> &BTreeMap { - debug_assert!(self.meta.self_node_id == self.meta.master_node_id); + /// Get rejects. + #[cfg(test)] + pub fn rejects(&self) -> &BTreeMap { + debug_assert!(self.meta.self_node_id == self.meta.master_node_id); - &self.data.active_data.as_ref() + &self.data.active_data.as_ref() .expect("rejects is only called on master nodes after initialization; on master nodes active_data is filled during initialization; qed") .rejects - } + } - /// Get active requests. - pub fn requests(&self) -> &BTreeSet { - debug_assert!(self.meta.self_node_id == self.meta.master_node_id); + /// Get active requests. + pub fn requests(&self) -> &BTreeSet { + debug_assert!(self.meta.self_node_id == self.meta.master_node_id); - &self.data.active_data.as_ref() + &self.data.active_data.as_ref() .expect("requests is only called on master nodes after initialization; on master nodes active_data is filled during initialization; qed") .requests - } + } - /// Get responses. - pub fn responses(&self) -> &BTreeMap { - debug_assert!(self.meta.self_node_id == self.meta.master_node_id); + /// Get responses. + pub fn responses(&self) -> &BTreeMap { + debug_assert!(self.meta.self_node_id == self.meta.master_node_id); - &self.data.active_data.as_ref() + &self.data.active_data.as_ref() .expect("responses is only called on master nodes after initialization; on master nodes active_data is filled during initialization; qed") .responses - } + } - /// Returns true if enough responses are ready to compute result. - pub fn is_result_ready(&self) -> bool { - debug_assert!(self.meta.self_node_id == self.meta.master_node_id); - self.data.active_data.as_ref() + /// Returns true if enough responses are ready to compute result. + pub fn is_result_ready(&self) -> bool { + debug_assert!(self.meta.self_node_id == self.meta.master_node_id); + self.data.active_data.as_ref() .expect("is_result_ready is only called on master nodes after initialization; on master nodes active_data is filled during initialization; qed") .responses.len() >= self.meta.threshold + 1 - } + } - /// Get job result. - pub fn result(&self) -> Result { - debug_assert!(self.meta.self_node_id == self.meta.master_node_id); + /// Get job result. + pub fn result(&self) -> Result { + debug_assert!(self.meta.self_node_id == self.meta.master_node_id); - if self.data.state != JobSessionState::Finished { - return Err(Error::InvalidStateForRequest); - } + if self.data.state != JobSessionState::Finished { + return Err(Error::InvalidStateForRequest); + } - self.executor.compute_response(&self.data.active_data.as_ref() + self.executor.compute_response(&self.data.active_data.as_ref() .expect("requests is only called on master nodes; on master nodes active_data is filled during initialization; qed") .responses) - } + } - /// Initialize. - pub fn initialize(&mut self, nodes: BTreeSet, self_response: Option, broadcast_self_response: bool) -> Result, Error> { - debug_assert!(self.meta.self_node_id == self.meta.master_node_id); + /// Initialize. + pub fn initialize( + &mut self, + nodes: BTreeSet, + self_response: Option, + broadcast_self_response: bool, + ) -> Result, Error> { + debug_assert!(self.meta.self_node_id == self.meta.master_node_id); - if nodes.len() < self.meta.threshold + 1 { - return Err(if self.meta.configured_nodes_count < self.meta.threshold + 1 { - Error::ConsensusUnreachable - } else { - Error::ConsensusTemporaryUnreachable - }); - } + if nodes.len() < self.meta.threshold + 1 { + return Err( + if self.meta.configured_nodes_count < self.meta.threshold + 1 { + Error::ConsensusUnreachable + } else { + Error::ConsensusTemporaryUnreachable + }, + ); + } - if self.data.state != JobSessionState::Inactive { - return Err(Error::InvalidStateForRequest); - } + if self.data.state != JobSessionState::Inactive { + return Err(Error::InvalidStateForRequest); + } - // result from self - let active_data = ActiveJobSessionData { - requests: nodes.clone(), - rejects: BTreeMap::new(), - responses: BTreeMap::new(), - }; - let waits_for_self = active_data.requests.contains(&self.meta.self_node_id); - let self_response = match self_response { - Some(self_response) => Some(self_response), - None if waits_for_self => { - let partial_request = self.executor.prepare_partial_request(&self.meta.self_node_id, &active_data.requests)?; - let self_response = self.executor.process_partial_request(partial_request)?; - Some(self_response.take_response()) - }, - None => None, - }; + // result from self + let active_data = ActiveJobSessionData { + requests: nodes.clone(), + rejects: BTreeMap::new(), + responses: BTreeMap::new(), + }; + let waits_for_self = active_data.requests.contains(&self.meta.self_node_id); + let self_response = match self_response { + Some(self_response) => Some(self_response), + None if waits_for_self => { + let partial_request = self + .executor + .prepare_partial_request(&self.meta.self_node_id, &active_data.requests)?; + let self_response = self.executor.process_partial_request(partial_request)?; + Some(self_response.take_response()) + } + None => None, + }; - // update state - self.data.active_data = Some(active_data); - self.data.state = JobSessionState::Active; + // update state + self.data.active_data = Some(active_data); + self.data.state = JobSessionState::Active; - // if we are waiting for response from self => do it - if let Some(self_response) = self_response.clone() { - let self_node_id = self.meta.self_node_id.clone(); - self.on_partial_response(&self_node_id, self_response)?; - } + // if we are waiting for response from self => do it + if let Some(self_response) = self_response.clone() { + let self_node_id = self.meta.self_node_id.clone(); + self.on_partial_response(&self_node_id, self_response)?; + } - // send requests to save nodes. we only send requests if session is still active. - for node in nodes.iter().filter(|n| **n != self.meta.self_node_id) { - if self.data.state == JobSessionState::Active { - self.transport.send_partial_request(node, self.executor.prepare_partial_request(node, &nodes)?)?; - } - if broadcast_self_response { - if let Some(self_response) = self_response.clone() { - self.transport.send_partial_response(node, self_response)?; - } - } - } + // send requests to save nodes. we only send requests if session is still active. + for node in nodes.iter().filter(|n| **n != self.meta.self_node_id) { + if self.data.state == JobSessionState::Active { + self.transport.send_partial_request( + node, + self.executor.prepare_partial_request(node, &nodes)?, + )?; + } + if broadcast_self_response { + if let Some(self_response) = self_response.clone() { + self.transport.send_partial_response(node, self_response)?; + } + } + } - Ok(self_response) - } + Ok(self_response) + } - /// When partial request is received by slave node. - pub fn on_partial_request(&mut self, node: &NodeId, request: Executor::PartialJobRequest) -> Result, Error> { - if node != &self.meta.master_node_id { - return Err(Error::InvalidMessage); - } - if self.meta.self_node_id == self.meta.master_node_id { - return Err(Error::InvalidMessage); - } - if self.data.state != JobSessionState::Inactive && self.data.state != JobSessionState::Finished { - return Err(Error::InvalidStateForRequest); - } + /// When partial request is received by slave node. + pub fn on_partial_request( + &mut self, + node: &NodeId, + request: Executor::PartialJobRequest, + ) -> Result, Error> { + if node != &self.meta.master_node_id { + return Err(Error::InvalidMessage); + } + if self.meta.self_node_id == self.meta.master_node_id { + return Err(Error::InvalidMessage); + } + if self.data.state != JobSessionState::Inactive + && self.data.state != JobSessionState::Finished + { + return Err(Error::InvalidStateForRequest); + } - let partial_request_action = self.executor.process_partial_request(request)?; - let partial_response = match partial_request_action { - JobPartialRequestAction::Respond(ref partial_response) => { - self.data.state = JobSessionState::Finished; - partial_response.clone() - }, - JobPartialRequestAction::Reject(ref partial_response) => { - self.data.state = JobSessionState::Failed; - partial_response.clone() - }, - }; - self.transport.send_partial_response(node, partial_response)?; - Ok(partial_request_action) - } + let partial_request_action = self.executor.process_partial_request(request)?; + let partial_response = match partial_request_action { + JobPartialRequestAction::Respond(ref partial_response) => { + self.data.state = JobSessionState::Finished; + partial_response.clone() + } + JobPartialRequestAction::Reject(ref partial_response) => { + self.data.state = JobSessionState::Failed; + partial_response.clone() + } + }; + self.transport + .send_partial_response(node, partial_response)?; + Ok(partial_request_action) + } - /// When partial request is received by master node. - pub fn on_partial_response(&mut self, node: &NodeId, response: Executor::PartialJobResponse) -> Result<(), Error> { - if self.meta.self_node_id != self.meta.master_node_id { - return Err(Error::InvalidMessage); - } - if self.data.state != JobSessionState::Active && self.data.state != JobSessionState::Finished { - return Err(Error::InvalidStateForRequest); - } + /// When partial request is received by master node. + pub fn on_partial_response( + &mut self, + node: &NodeId, + response: Executor::PartialJobResponse, + ) -> Result<(), Error> { + if self.meta.self_node_id != self.meta.master_node_id { + return Err(Error::InvalidMessage); + } + if self.data.state != JobSessionState::Active + && self.data.state != JobSessionState::Finished + { + return Err(Error::InvalidStateForRequest); + } - let active_data = self.data.active_data.as_mut() + let active_data = self.data.active_data.as_mut() .expect("on_partial_response is only called on master nodes; on master nodes active_data is filled during initialization; qed"); - if !active_data.requests.remove(node) { - return Err(Error::InvalidNodeForRequest); - } + if !active_data.requests.remove(node) { + return Err(Error::InvalidNodeForRequest); + } - match self.executor.check_partial_response(node, &response)? { - JobPartialResponseAction::Ignore => Ok(()), - JobPartialResponseAction::Reject => { - // direct reject is always considered as fatal - active_data.rejects.insert(node.clone(), true); - if active_data.requests.len() + active_data.responses.len() >= self.meta.threshold + 1 { - return Ok(()); - } + match self.executor.check_partial_response(node, &response)? { + JobPartialResponseAction::Ignore => Ok(()), + JobPartialResponseAction::Reject => { + // direct reject is always considered as fatal + active_data.rejects.insert(node.clone(), true); + if active_data.requests.len() + active_data.responses.len() + >= self.meta.threshold + 1 + { + return Ok(()); + } - self.data.state = JobSessionState::Failed; - Err(consensus_unreachable(&active_data.rejects)) - }, - JobPartialResponseAction::Accept => { - active_data.responses.insert(node.clone(), response); - if active_data.responses.len() < self.meta.threshold + 1 { - return Ok(()); - } + self.data.state = JobSessionState::Failed; + Err(consensus_unreachable(&active_data.rejects)) + } + JobPartialResponseAction::Accept => { + active_data.responses.insert(node.clone(), response); + if active_data.responses.len() < self.meta.threshold + 1 { + return Ok(()); + } - self.data.state = JobSessionState::Finished; - Ok(()) - }, - } - } + self.data.state = JobSessionState::Finished; + Ok(()) + } + } + } - /// When error from node is received. - pub fn on_node_error(&mut self, node: &NodeId, error: Error) -> Result<(), Error> { - if self.meta.self_node_id != self.meta.master_node_id { - if node != &self.meta.master_node_id { - return Ok(()); - } + /// When error from node is received. + pub fn on_node_error(&mut self, node: &NodeId, error: Error) -> Result<(), Error> { + if self.meta.self_node_id != self.meta.master_node_id { + if node != &self.meta.master_node_id { + return Ok(()); + } - self.data.state = JobSessionState::Failed; - return Err(if !error.is_non_fatal() { - Error::ConsensusUnreachable - } else { - Error::ConsensusTemporaryUnreachable - }); - } + self.data.state = JobSessionState::Failed; + return Err(if !error.is_non_fatal() { + Error::ConsensusUnreachable + } else { + Error::ConsensusTemporaryUnreachable + }); + } - if let Some(active_data) = self.data.active_data.as_mut() { - if active_data.rejects.contains_key(node) { - return Ok(()); - } - if active_data.requests.remove(node) || active_data.responses.remove(node).is_some() { - active_data.rejects.insert(node.clone(), !error.is_non_fatal()); - if self.data.state == JobSessionState::Finished && active_data.responses.len() < self.meta.threshold + 1 { - self.data.state = JobSessionState::Active; - } - if active_data.requests.len() + active_data.responses.len() >= self.meta.threshold + 1 { - return Ok(()); - } + if let Some(active_data) = self.data.active_data.as_mut() { + if active_data.rejects.contains_key(node) { + return Ok(()); + } + if active_data.requests.remove(node) || active_data.responses.remove(node).is_some() { + active_data + .rejects + .insert(node.clone(), !error.is_non_fatal()); + if self.data.state == JobSessionState::Finished + && active_data.responses.len() < self.meta.threshold + 1 + { + self.data.state = JobSessionState::Active; + } + if active_data.requests.len() + active_data.responses.len() + >= self.meta.threshold + 1 + { + return Ok(()); + } - self.data.state = JobSessionState::Failed; - return Err(consensus_unreachable(&active_data.rejects)); - } - } + self.data.state = JobSessionState::Failed; + return Err(consensus_unreachable(&active_data.rejects)); + } + } - Ok(()) - } + Ok(()) + } - /// When session timeouted. - pub fn on_session_timeout(&mut self) -> Result<(), Error> { - if self.data.state == JobSessionState::Finished || self.data.state == JobSessionState::Failed { - return Ok(()); - } + /// When session timeouted. + pub fn on_session_timeout(&mut self) -> Result<(), Error> { + if self.data.state == JobSessionState::Finished + || self.data.state == JobSessionState::Failed + { + return Ok(()); + } - self.data.state = JobSessionState::Failed; - // we have started session => consensus is possible in theory, but now it has failed with timeout - Err(Error::ConsensusTemporaryUnreachable) - } + self.data.state = JobSessionState::Failed; + // we have started session => consensus is possible in theory, but now it has failed with timeout + Err(Error::ConsensusTemporaryUnreachable) + } } impl JobPartialRequestAction { - /// Take actual response. - pub fn take_response(self) -> PartialJobResponse { - match self { - JobPartialRequestAction::Respond(response) => response, - JobPartialRequestAction::Reject(response) => response, - } - } + /// Take actual response. + pub fn take_response(self) -> PartialJobResponse { + match self { + JobPartialRequestAction::Respond(response) => response, + JobPartialRequestAction::Reject(response) => response, + } + } } /// Returns appropriate 'consensus unreachable' error. fn consensus_unreachable(rejects: &BTreeMap) -> Error { - // when >= 50% of nodes have responded with fatal reject => ConsensusUnreachable - if rejects.values().filter(|r| **r).count() >= rejects.len() / 2 { - Error::ConsensusUnreachable - } else { - Error::ConsensusTemporaryUnreachable - } + // when >= 50% of nodes have responded with fatal reject => ConsensusUnreachable + if rejects.values().filter(|r| **r).count() >= rejects.len() / 2 { + Error::ConsensusUnreachable + } else { + Error::ConsensusTemporaryUnreachable + } } #[cfg(test)] pub mod tests { - use std::collections::{VecDeque, BTreeMap, BTreeSet}; - use parking_lot::Mutex; - use ethkey::Public; - use key_server_cluster::{Error, NodeId, SessionId, SessionMeta}; - use super::{JobPartialResponseAction, JobPartialRequestAction, JobExecutor, JobTransport, JobSession, JobSessionState}; + use super::{ + JobExecutor, JobPartialRequestAction, JobPartialResponseAction, JobSession, + JobSessionState, JobTransport, + }; + use ethkey::Public; + use key_server_cluster::{Error, NodeId, SessionId, SessionMeta}; + use parking_lot::Mutex; + use std::collections::{BTreeMap, BTreeSet, VecDeque}; - pub struct SquaredSumJobExecutor; + pub struct SquaredSumJobExecutor; - impl JobExecutor for SquaredSumJobExecutor { - type PartialJobRequest = u32; - type PartialJobResponse = u32; - type JobResponse = u32; + impl JobExecutor for SquaredSumJobExecutor { + type PartialJobRequest = u32; + type PartialJobResponse = u32; + type JobResponse = u32; - fn prepare_partial_request(&self, _n: &NodeId, _nodes: &BTreeSet) -> Result { Ok(2) } - fn process_partial_request(&mut self, r: u32) -> Result, Error> { if r <= 10 { Ok(JobPartialRequestAction::Respond(r * r)) } else { Err(Error::InvalidMessage) } } - fn check_partial_response(&mut self, _s: &NodeId, r: &u32) -> Result { if r % 2 == 0 { Ok(JobPartialResponseAction::Accept) } else { Ok(JobPartialResponseAction::Reject) } } - fn compute_response(&self, r: &BTreeMap) -> Result { Ok(r.values().fold(0, |v1, v2| v1 + v2)) } - } + fn prepare_partial_request( + &self, + _n: &NodeId, + _nodes: &BTreeSet, + ) -> Result { + Ok(2) + } + fn process_partial_request( + &mut self, + r: u32, + ) -> Result, Error> { + if r <= 10 { + Ok(JobPartialRequestAction::Respond(r * r)) + } else { + Err(Error::InvalidMessage) + } + } + fn check_partial_response( + &mut self, + _s: &NodeId, + r: &u32, + ) -> Result { + if r % 2 == 0 { + Ok(JobPartialResponseAction::Accept) + } else { + Ok(JobPartialResponseAction::Reject) + } + } + fn compute_response(&self, r: &BTreeMap) -> Result { + Ok(r.values().fold(0, |v1, v2| v1 + v2)) + } + } - #[derive(Default)] - pub struct DummyJobTransport { - pub requests: Mutex>, - pub responses: Mutex>, - } + #[derive(Default)] + pub struct DummyJobTransport { + pub requests: Mutex>, + pub responses: Mutex>, + } - impl DummyJobTransport { - pub fn is_empty_response(&self) -> bool { - self.responses.lock().is_empty() - } + impl DummyJobTransport { + pub fn is_empty_response(&self) -> bool { + self.responses.lock().is_empty() + } - pub fn response(&self) -> (NodeId, U) { - self.responses.lock().pop_front().unwrap() - } - } + pub fn response(&self) -> (NodeId, U) { + self.responses.lock().pop_front().unwrap() + } + } - impl JobTransport for DummyJobTransport { - type PartialJobRequest = T; - type PartialJobResponse = U; + impl JobTransport for DummyJobTransport { + type PartialJobRequest = T; + type PartialJobResponse = U; - fn send_partial_request(&self, node: &NodeId, request: T) -> Result<(), Error> { self.requests.lock().push_back((node.clone(), request)); Ok(()) } - fn send_partial_response(&self, node: &NodeId, response: U) -> Result<(), Error> { self.responses.lock().push_back((node.clone(), response)); Ok(()) } - } + fn send_partial_request(&self, node: &NodeId, request: T) -> Result<(), Error> { + self.requests.lock().push_back((node.clone(), request)); + Ok(()) + } + fn send_partial_response(&self, node: &NodeId, response: U) -> Result<(), Error> { + self.responses.lock().push_back((node.clone(), response)); + Ok(()) + } + } - pub fn make_master_session_meta(threshold: usize) -> SessionMeta { - SessionMeta { id: SessionId::default(), master_node_id: NodeId::from(1), self_node_id: NodeId::from(1), threshold: threshold, - configured_nodes_count: 5, connected_nodes_count: 5 } - } + pub fn make_master_session_meta(threshold: usize) -> SessionMeta { + SessionMeta { + id: SessionId::default(), + master_node_id: NodeId::from(1), + self_node_id: NodeId::from(1), + threshold: threshold, + configured_nodes_count: 5, + connected_nodes_count: 5, + } + } - pub fn make_slave_session_meta(threshold: usize) -> SessionMeta { - SessionMeta { id: SessionId::default(), master_node_id: NodeId::from(1), self_node_id: NodeId::from(2), threshold: threshold, - configured_nodes_count: 5, connected_nodes_count: 5 } - } + pub fn make_slave_session_meta(threshold: usize) -> SessionMeta { + SessionMeta { + id: SessionId::default(), + master_node_id: NodeId::from(1), + self_node_id: NodeId::from(2), + threshold: threshold, + configured_nodes_count: 5, + connected_nodes_count: 5, + } + } - #[test] - fn job_initialize_fails_if_not_enough_nodes_for_threshold_total() { - let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default()); - job.meta.configured_nodes_count = 1; - assert_eq!(job.initialize(vec![Public::from(1)].into_iter().collect(), None, false).unwrap_err(), Error::ConsensusUnreachable); - } + #[test] + fn job_initialize_fails_if_not_enough_nodes_for_threshold_total() { + let mut job = JobSession::new( + make_master_session_meta(1), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.meta.configured_nodes_count = 1; + assert_eq!( + job.initialize(vec![Public::from(1)].into_iter().collect(), None, false) + .unwrap_err(), + Error::ConsensusUnreachable + ); + } - #[test] - fn job_initialize_fails_if_not_enough_nodes_for_threshold_connected() { - let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default()); - job.meta.connected_nodes_count = 3; - assert_eq!(job.initialize(vec![Public::from(1)].into_iter().collect(), None, false).unwrap_err(), Error::ConsensusTemporaryUnreachable); - } + #[test] + fn job_initialize_fails_if_not_enough_nodes_for_threshold_connected() { + let mut job = JobSession::new( + make_master_session_meta(1), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.meta.connected_nodes_count = 3; + assert_eq!( + job.initialize(vec![Public::from(1)].into_iter().collect(), None, false) + .unwrap_err(), + Error::ConsensusTemporaryUnreachable + ); + } - #[test] - fn job_initialize_fails_if_not_inactive() { - let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default()); - job.initialize(vec![Public::from(1)].into_iter().collect(), None, false).unwrap(); - assert_eq!(job.initialize(vec![Public::from(1)].into_iter().collect(), None, false).unwrap_err(), Error::InvalidStateForRequest); - } + #[test] + fn job_initialize_fails_if_not_inactive() { + let mut job = JobSession::new( + make_master_session_meta(0), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.initialize(vec![Public::from(1)].into_iter().collect(), None, false) + .unwrap(); + assert_eq!( + job.initialize(vec![Public::from(1)].into_iter().collect(), None, false) + .unwrap_err(), + Error::InvalidStateForRequest + ); + } - #[test] - fn job_initialization_leads_to_finish_if_single_node_is_required() { - let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default()); - job.initialize(vec![Public::from(1)].into_iter().collect(), None, false).unwrap(); - assert_eq!(job.state(), JobSessionState::Finished); - assert!(job.is_result_ready()); - assert_eq!(job.result(), Ok(4)); - } + #[test] + fn job_initialization_leads_to_finish_if_single_node_is_required() { + let mut job = JobSession::new( + make_master_session_meta(0), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.initialize(vec![Public::from(1)].into_iter().collect(), None, false) + .unwrap(); + assert_eq!(job.state(), JobSessionState::Finished); + assert!(job.is_result_ready()); + assert_eq!(job.result(), Ok(4)); + } - #[test] - fn job_initialization_does_not_leads_to_finish_if_single_other_node_is_required() { - let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default()); - job.initialize(vec![Public::from(2)].into_iter().collect(), None, false).unwrap(); - assert_eq!(job.state(), JobSessionState::Active); - } + #[test] + fn job_initialization_does_not_leads_to_finish_if_single_other_node_is_required() { + let mut job = JobSession::new( + make_master_session_meta(0), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.initialize(vec![Public::from(2)].into_iter().collect(), None, false) + .unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + } - #[test] - fn job_request_fails_if_comes_from_non_master_node() { - let mut job = JobSession::new(make_slave_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default()); - assert_eq!(job.on_partial_request(&NodeId::from(3), 2).unwrap_err(), Error::InvalidMessage); - } + #[test] + fn job_request_fails_if_comes_from_non_master_node() { + let mut job = JobSession::new( + make_slave_session_meta(0), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + assert_eq!( + job.on_partial_request(&NodeId::from(3), 2).unwrap_err(), + Error::InvalidMessage + ); + } - #[test] - fn job_request_fails_if_comes_to_master_node() { - let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default()); - assert_eq!(job.on_partial_request(&NodeId::from(1), 2).unwrap_err(), Error::InvalidMessage); - } + #[test] + fn job_request_fails_if_comes_to_master_node() { + let mut job = JobSession::new( + make_master_session_meta(0), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + assert_eq!( + job.on_partial_request(&NodeId::from(1), 2).unwrap_err(), + Error::InvalidMessage + ); + } - #[test] - fn job_request_fails_if_comes_to_failed_state() { - let mut job = JobSession::new(make_slave_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default()); - job.on_session_timeout().unwrap_err(); - assert_eq!(job.on_partial_request(&NodeId::from(1), 2).unwrap_err(), Error::InvalidStateForRequest); - } + #[test] + fn job_request_fails_if_comes_to_failed_state() { + let mut job = JobSession::new( + make_slave_session_meta(0), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.on_session_timeout().unwrap_err(); + assert_eq!( + job.on_partial_request(&NodeId::from(1), 2).unwrap_err(), + Error::InvalidStateForRequest + ); + } - #[test] - fn job_request_succeeds_if_comes_to_finished_state() { - let mut job = JobSession::new(make_slave_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default()); - job.on_partial_request(&NodeId::from(1), 2).unwrap(); - assert_eq!(job.transport().response(), (NodeId::from(1), 4)); - assert_eq!(job.state(), JobSessionState::Finished); - job.on_partial_request(&NodeId::from(1), 3).unwrap(); - assert_eq!(job.transport().response(), (NodeId::from(1), 9)); - assert_eq!(job.state(), JobSessionState::Finished); - } + #[test] + fn job_request_succeeds_if_comes_to_finished_state() { + let mut job = JobSession::new( + make_slave_session_meta(0), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.on_partial_request(&NodeId::from(1), 2).unwrap(); + assert_eq!(job.transport().response(), (NodeId::from(1), 4)); + assert_eq!(job.state(), JobSessionState::Finished); + job.on_partial_request(&NodeId::from(1), 3).unwrap(); + assert_eq!(job.transport().response(), (NodeId::from(1), 9)); + assert_eq!(job.state(), JobSessionState::Finished); + } - #[test] - fn job_response_fails_if_comes_to_slave_node() { - let mut job = JobSession::new(make_slave_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default()); - assert_eq!(job.on_partial_response(&NodeId::from(1), 2).unwrap_err(), Error::InvalidMessage); - } + #[test] + fn job_response_fails_if_comes_to_slave_node() { + let mut job = JobSession::new( + make_slave_session_meta(0), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + assert_eq!( + job.on_partial_response(&NodeId::from(1), 2).unwrap_err(), + Error::InvalidMessage + ); + } - #[test] - fn job_response_fails_if_comes_to_failed_state() { - let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default()); - job.initialize(vec![Public::from(2)].into_iter().collect(), None, false).unwrap(); - job.on_session_timeout().unwrap_err(); - assert_eq!(job.on_partial_response(&NodeId::from(2), 2).unwrap_err(), Error::InvalidStateForRequest); - } + #[test] + fn job_response_fails_if_comes_to_failed_state() { + let mut job = JobSession::new( + make_master_session_meta(0), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.initialize(vec![Public::from(2)].into_iter().collect(), None, false) + .unwrap(); + job.on_session_timeout().unwrap_err(); + assert_eq!( + job.on_partial_response(&NodeId::from(2), 2).unwrap_err(), + Error::InvalidStateForRequest + ); + } - #[test] - fn job_response_fails_if_comes_from_unknown_node() { - let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default()); - job.initialize(vec![Public::from(2)].into_iter().collect(), None, false).unwrap(); - assert_eq!(job.on_partial_response(&NodeId::from(3), 2).unwrap_err(), Error::InvalidNodeForRequest); - } + #[test] + fn job_response_fails_if_comes_from_unknown_node() { + let mut job = JobSession::new( + make_master_session_meta(0), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.initialize(vec![Public::from(2)].into_iter().collect(), None, false) + .unwrap(); + assert_eq!( + job.on_partial_response(&NodeId::from(3), 2).unwrap_err(), + Error::InvalidNodeForRequest + ); + } - #[test] - fn job_response_leads_to_failure_if_too_few_nodes_left() { - let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default()); - job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect(), None, false).unwrap(); - assert_eq!(job.state(), JobSessionState::Active); - assert_eq!(job.on_partial_response(&NodeId::from(2), 3).unwrap_err(), Error::ConsensusUnreachable); - assert_eq!(job.state(), JobSessionState::Failed); - } + #[test] + fn job_response_leads_to_failure_if_too_few_nodes_left() { + let mut job = JobSession::new( + make_master_session_meta(1), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.initialize( + vec![Public::from(1), Public::from(2)].into_iter().collect(), + None, + false, + ) + .unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + assert_eq!( + job.on_partial_response(&NodeId::from(2), 3).unwrap_err(), + Error::ConsensusUnreachable + ); + assert_eq!(job.state(), JobSessionState::Failed); + } - #[test] - fn job_response_succeeds() { - let mut job = JobSession::new(make_master_session_meta(2), SquaredSumJobExecutor, DummyJobTransport::default()); - job.initialize(vec![Public::from(1), Public::from(2), Public::from(3)].into_iter().collect(), None, false).unwrap(); - assert_eq!(job.state(), JobSessionState::Active); - assert!(!job.is_result_ready()); - job.on_partial_response(&NodeId::from(2), 2).unwrap(); - assert_eq!(job.state(), JobSessionState::Active); - assert!(!job.is_result_ready()); - } + #[test] + fn job_response_succeeds() { + let mut job = JobSession::new( + make_master_session_meta(2), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.initialize( + vec![Public::from(1), Public::from(2), Public::from(3)] + .into_iter() + .collect(), + None, + false, + ) + .unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + assert!(!job.is_result_ready()); + job.on_partial_response(&NodeId::from(2), 2).unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + assert!(!job.is_result_ready()); + } - #[test] - fn job_response_leads_to_finish() { - let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default()); - job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect(), None, false).unwrap(); - assert_eq!(job.state(), JobSessionState::Active); - job.on_partial_response(&NodeId::from(2), 2).unwrap(); - assert_eq!(job.state(), JobSessionState::Finished); - } + #[test] + fn job_response_leads_to_finish() { + let mut job = JobSession::new( + make_master_session_meta(1), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.initialize( + vec![Public::from(1), Public::from(2)].into_iter().collect(), + None, + false, + ) + .unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + job.on_partial_response(&NodeId::from(2), 2).unwrap(); + assert_eq!(job.state(), JobSessionState::Finished); + } - #[test] - fn job_node_error_ignored_when_slave_disconnects_from_slave() { - let mut job = JobSession::new(make_slave_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default()); - assert_eq!(job.state(), JobSessionState::Inactive); - job.on_node_error(&NodeId::from(3), Error::AccessDenied).unwrap(); - assert_eq!(job.state(), JobSessionState::Inactive); - } + #[test] + fn job_node_error_ignored_when_slave_disconnects_from_slave() { + let mut job = JobSession::new( + make_slave_session_meta(1), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + assert_eq!(job.state(), JobSessionState::Inactive); + job.on_node_error(&NodeId::from(3), Error::AccessDenied) + .unwrap(); + assert_eq!(job.state(), JobSessionState::Inactive); + } - #[test] - fn job_node_error_leads_to_fail_when_slave_disconnects_from_master() { - let mut job = JobSession::new(make_slave_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default()); - assert_eq!(job.state(), JobSessionState::Inactive); - assert_eq!(job.on_node_error(&NodeId::from(1), Error::AccessDenied).unwrap_err(), Error::ConsensusUnreachable); - assert_eq!(job.state(), JobSessionState::Failed); - } + #[test] + fn job_node_error_leads_to_fail_when_slave_disconnects_from_master() { + let mut job = JobSession::new( + make_slave_session_meta(1), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + assert_eq!(job.state(), JobSessionState::Inactive); + assert_eq!( + job.on_node_error(&NodeId::from(1), Error::AccessDenied) + .unwrap_err(), + Error::ConsensusUnreachable + ); + assert_eq!(job.state(), JobSessionState::Failed); + } - #[test] - fn job_node_error_ignored_when_disconnects_from_rejected() { - let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default()); - job.initialize(vec![Public::from(1), Public::from(2), Public::from(3)].into_iter().collect(), None, false).unwrap(); - assert_eq!(job.state(), JobSessionState::Active); - job.on_partial_response(&NodeId::from(2), 3).unwrap(); - job.on_node_error(&NodeId::from(2), Error::AccessDenied).unwrap(); - assert_eq!(job.state(), JobSessionState::Active); - } + #[test] + fn job_node_error_ignored_when_disconnects_from_rejected() { + let mut job = JobSession::new( + make_master_session_meta(1), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.initialize( + vec![Public::from(1), Public::from(2), Public::from(3)] + .into_iter() + .collect(), + None, + false, + ) + .unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + job.on_partial_response(&NodeId::from(2), 3).unwrap(); + job.on_node_error(&NodeId::from(2), Error::AccessDenied) + .unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + } - #[test] - fn job_node_error_ignored_when_disconnects_from_unknown() { - let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default()); - job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect(), None, false).unwrap(); - assert_eq!(job.state(), JobSessionState::Active); - job.on_node_error(&NodeId::from(3), Error::AccessDenied).unwrap(); - assert_eq!(job.state(), JobSessionState::Active); - } + #[test] + fn job_node_error_ignored_when_disconnects_from_unknown() { + let mut job = JobSession::new( + make_master_session_meta(1), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.initialize( + vec![Public::from(1), Public::from(2)].into_iter().collect(), + None, + false, + ) + .unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + job.on_node_error(&NodeId::from(3), Error::AccessDenied) + .unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + } - #[test] - fn job_node_error_ignored_when_disconnects_from_requested_and_enough_nodes_left() { - let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default()); - job.initialize(vec![Public::from(1), Public::from(2), Public::from(3)].into_iter().collect(), None, false).unwrap(); - assert_eq!(job.state(), JobSessionState::Active); - job.on_node_error(&NodeId::from(3), Error::AccessDenied).unwrap(); - assert_eq!(job.state(), JobSessionState::Active); - } + #[test] + fn job_node_error_ignored_when_disconnects_from_requested_and_enough_nodes_left() { + let mut job = JobSession::new( + make_master_session_meta(1), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.initialize( + vec![Public::from(1), Public::from(2), Public::from(3)] + .into_iter() + .collect(), + None, + false, + ) + .unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + job.on_node_error(&NodeId::from(3), Error::AccessDenied) + .unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + } - #[test] - fn job_node_error_leads_to_fail_when_disconnects_from_requested_and_not_enough_nodes_left() { - let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default()); - job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect(), None, false).unwrap(); - assert_eq!(job.state(), JobSessionState::Active); - assert_eq!(job.on_node_error(&NodeId::from(2), Error::AccessDenied).unwrap_err(), Error::ConsensusUnreachable); - assert_eq!(job.state(), JobSessionState::Failed); - } + #[test] + fn job_node_error_leads_to_fail_when_disconnects_from_requested_and_not_enough_nodes_left() { + let mut job = JobSession::new( + make_master_session_meta(1), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.initialize( + vec![Public::from(1), Public::from(2)].into_iter().collect(), + None, + false, + ) + .unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + assert_eq!( + job.on_node_error(&NodeId::from(2), Error::AccessDenied) + .unwrap_err(), + Error::ConsensusUnreachable + ); + assert_eq!(job.state(), JobSessionState::Failed); + } - #[test] - fn job_broadcasts_self_response() { - let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default()); - job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect(), None, true).unwrap(); - assert_eq!(job.state(), JobSessionState::Active); - assert_eq!(job.transport().response(), (NodeId::from(2), 4)); - } + #[test] + fn job_broadcasts_self_response() { + let mut job = JobSession::new( + make_master_session_meta(1), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.initialize( + vec![Public::from(1), Public::from(2)].into_iter().collect(), + None, + true, + ) + .unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + assert_eq!(job.transport().response(), (NodeId::from(2), 4)); + } - #[test] - fn job_does_not_broadcasts_self_response() { - let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default()); - job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect(), None, false).unwrap(); - assert_eq!(job.state(), JobSessionState::Active); - assert!(job.transport().is_empty_response()); - } + #[test] + fn job_does_not_broadcasts_self_response() { + let mut job = JobSession::new( + make_master_session_meta(1), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.initialize( + vec![Public::from(1), Public::from(2)].into_iter().collect(), + None, + false, + ) + .unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + assert!(job.transport().is_empty_response()); + } - #[test] - fn job_fails_with_temp_error_if_more_than_half_nodes_respond_with_temp_error() { - let mut job = JobSession::new(make_master_session_meta(2), SquaredSumJobExecutor, DummyJobTransport::default()); - job.initialize(vec![Public::from(1), Public::from(2), Public::from(3), Public::from(4)].into_iter().collect(), None, false).unwrap(); - job.on_node_error(&NodeId::from(2), Error::NodeDisconnected).unwrap(); - assert_eq!(job.on_node_error(&NodeId::from(3), Error::NodeDisconnected).unwrap_err(), Error::ConsensusTemporaryUnreachable); - } + #[test] + fn job_fails_with_temp_error_if_more_than_half_nodes_respond_with_temp_error() { + let mut job = JobSession::new( + make_master_session_meta(2), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.initialize( + vec![ + Public::from(1), + Public::from(2), + Public::from(3), + Public::from(4), + ] + .into_iter() + .collect(), + None, + false, + ) + .unwrap(); + job.on_node_error(&NodeId::from(2), Error::NodeDisconnected) + .unwrap(); + assert_eq!( + job.on_node_error(&NodeId::from(3), Error::NodeDisconnected) + .unwrap_err(), + Error::ConsensusTemporaryUnreachable + ); + } - #[test] - fn job_fails_with_temp_error_if_more_than_half_rejects_are_temp() { - let mut job = JobSession::new(make_master_session_meta(2), SquaredSumJobExecutor, DummyJobTransport::default()); - job.initialize(vec![Public::from(1), Public::from(2), Public::from(3), Public::from(4)].into_iter().collect(), None, false).unwrap(); - job.on_node_error(&NodeId::from(2), Error::NodeDisconnected).unwrap(); - assert_eq!(job.on_node_error(&NodeId::from(3), Error::NodeDisconnected).unwrap_err(), Error::ConsensusTemporaryUnreachable); - } + #[test] + fn job_fails_with_temp_error_if_more_than_half_rejects_are_temp() { + let mut job = JobSession::new( + make_master_session_meta(2), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.initialize( + vec![ + Public::from(1), + Public::from(2), + Public::from(3), + Public::from(4), + ] + .into_iter() + .collect(), + None, + false, + ) + .unwrap(); + job.on_node_error(&NodeId::from(2), Error::NodeDisconnected) + .unwrap(); + assert_eq!( + job.on_node_error(&NodeId::from(3), Error::NodeDisconnected) + .unwrap_err(), + Error::ConsensusTemporaryUnreachable + ); + } - #[test] - fn job_fails_if_more_than_half_rejects_are_non_temp() { - let mut job = JobSession::new(make_master_session_meta(2), SquaredSumJobExecutor, DummyJobTransport::default()); - job.initialize(vec![Public::from(1), Public::from(2), Public::from(3), Public::from(4)].into_iter().collect(), None, false).unwrap(); - job.on_node_error(&NodeId::from(2), Error::AccessDenied).unwrap(); - assert_eq!(job.on_node_error(&NodeId::from(3), Error::AccessDenied).unwrap_err(), Error::ConsensusUnreachable); - } + #[test] + fn job_fails_if_more_than_half_rejects_are_non_temp() { + let mut job = JobSession::new( + make_master_session_meta(2), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + job.initialize( + vec![ + Public::from(1), + Public::from(2), + Public::from(3), + Public::from(4), + ] + .into_iter() + .collect(), + None, + false, + ) + .unwrap(); + job.on_node_error(&NodeId::from(2), Error::AccessDenied) + .unwrap(); + assert_eq!( + job.on_node_error(&NodeId::from(3), Error::AccessDenied) + .unwrap_err(), + Error::ConsensusUnreachable + ); + } - #[test] - fn job_fails_with_temp_error_when_temp_error_is_reported_by_master_node() { - let mut job = JobSession::new(make_slave_session_meta(2), SquaredSumJobExecutor, DummyJobTransport::default()); - assert_eq!(job.on_node_error(&NodeId::from(1), Error::NodeDisconnected).unwrap_err(), Error::ConsensusTemporaryUnreachable); - } + #[test] + fn job_fails_with_temp_error_when_temp_error_is_reported_by_master_node() { + let mut job = JobSession::new( + make_slave_session_meta(2), + SquaredSumJobExecutor, + DummyJobTransport::default(), + ); + assert_eq!( + job.on_node_error(&NodeId::from(1), Error::NodeDisconnected) + .unwrap_err(), + Error::ConsensusTemporaryUnreachable + ); + } } diff --git a/secret-store/src/key_server_cluster/jobs/key_access_job.rs b/secret-store/src/key_server_cluster/jobs/key_access_job.rs index 075d7320f..93b9b16d4 100644 --- a/secret-store/src/key_server_cluster/jobs/key_access_job.rs +++ b/secret-store/src/key_server_cluster/jobs/key_access_job.rs @@ -14,79 +14,117 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::collections::{BTreeSet, BTreeMap}; -use key_server_cluster::{Error, NodeId, SessionId, Requester, AclStorage}; -use key_server_cluster::jobs::job_session::{JobPartialResponseAction, JobPartialRequestAction, JobExecutor}; +use key_server_cluster::{ + jobs::job_session::{JobExecutor, JobPartialRequestAction, JobPartialResponseAction}, + AclStorage, Error, NodeId, Requester, SessionId, +}; +use std::{ + collections::{BTreeMap, BTreeSet}, + sync::Arc, +}; /// Purpose of this job is to construct set of nodes, which have agreed to provide access to the given key for the given requestor. pub struct KeyAccessJob { - /// Key id. - id: SessionId, - /// Has key share? - has_key_share: bool, - /// ACL storage. - acl_storage: Arc, - /// Requester data. - requester: Option, + /// Key id. + id: SessionId, + /// Has key share? + has_key_share: bool, + /// ACL storage. + acl_storage: Arc, + /// Requester data. + requester: Option, } impl KeyAccessJob { - pub fn new_on_slave(id: SessionId, acl_storage: Arc) -> Self { - KeyAccessJob { - id: id, - has_key_share: true, - acl_storage: acl_storage, - requester: None, - } - } + pub fn new_on_slave(id: SessionId, acl_storage: Arc) -> Self { + KeyAccessJob { + id: id, + has_key_share: true, + acl_storage: acl_storage, + requester: None, + } + } - pub fn new_on_master(id: SessionId, acl_storage: Arc, requester: Requester) -> Self { - KeyAccessJob { - id: id, - has_key_share: true, - acl_storage: acl_storage, - requester: Some(requester), - } - } + pub fn new_on_master( + id: SessionId, + acl_storage: Arc, + requester: Requester, + ) -> Self { + KeyAccessJob { + id: id, + has_key_share: true, + acl_storage: acl_storage, + requester: Some(requester), + } + } - pub fn set_has_key_share(&mut self, has_key_share: bool) { - self.has_key_share = has_key_share; - } + pub fn set_has_key_share(&mut self, has_key_share: bool) { + self.has_key_share = has_key_share; + } - pub fn set_requester(&mut self, requester: Requester) { - self.requester = Some(requester); - } + pub fn set_requester(&mut self, requester: Requester) { + self.requester = Some(requester); + } - pub fn requester(&self) -> Option<&Requester> { - self.requester.as_ref() - } + pub fn requester(&self) -> Option<&Requester> { + self.requester.as_ref() + } } impl JobExecutor for KeyAccessJob { - type PartialJobRequest = Requester; - type PartialJobResponse = bool; - type JobResponse = BTreeSet; + type PartialJobRequest = Requester; + type PartialJobResponse = bool; + type JobResponse = BTreeSet; - fn prepare_partial_request(&self, _node: &NodeId, _nodes: &BTreeSet) -> Result { - Ok(self.requester.as_ref().expect("prepare_partial_request is only called on master nodes; new_on_master fills the signature; qed").clone()) - } + fn prepare_partial_request( + &self, + _node: &NodeId, + _nodes: &BTreeSet, + ) -> Result { + Ok(self.requester.as_ref().expect("prepare_partial_request is only called on master nodes; new_on_master fills the signature; qed").clone()) + } - fn process_partial_request(&mut self, partial_request: Requester) -> Result, Error> { - if !self.has_key_share { - return Ok(JobPartialRequestAction::Reject(false)); - } - - self.requester = Some(partial_request.clone()); - self.acl_storage.check(partial_request.address(&self.id).map_err(Error::InsufficientRequesterData)?, &self.id) - .map(|is_confirmed| if is_confirmed { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) }) - } + fn process_partial_request( + &mut self, + partial_request: Requester, + ) -> Result, Error> { + if !self.has_key_share { + return Ok(JobPartialRequestAction::Reject(false)); + } - fn check_partial_response(&mut self, _sender: &NodeId, partial_response: &bool) -> Result { - Ok(if *partial_response { JobPartialResponseAction::Accept } else { JobPartialResponseAction::Reject }) - } + self.requester = Some(partial_request.clone()); + self.acl_storage + .check( + partial_request + .address(&self.id) + .map_err(Error::InsufficientRequesterData)?, + &self.id, + ) + .map(|is_confirmed| { + if is_confirmed { + JobPartialRequestAction::Respond(true) + } else { + JobPartialRequestAction::Reject(false) + } + }) + } - fn compute_response(&self, partial_responses: &BTreeMap) -> Result, Error> { - Ok(partial_responses.keys().cloned().collect()) - } + fn check_partial_response( + &mut self, + _sender: &NodeId, + partial_response: &bool, + ) -> Result { + Ok(if *partial_response { + JobPartialResponseAction::Accept + } else { + JobPartialResponseAction::Reject + }) + } + + fn compute_response( + &self, + partial_responses: &BTreeMap, + ) -> Result, Error> { + Ok(partial_responses.keys().cloned().collect()) + } } diff --git a/secret-store/src/key_server_cluster/jobs/servers_set_change_access_job.rs b/secret-store/src/key_server_cluster/jobs/servers_set_change_access_job.rs index ace5021a0..bb4bfd9b2 100644 --- a/secret-store/src/key_server_cluster/jobs/servers_set_change_access_job.rs +++ b/secret-store/src/key_server_cluster/jobs/servers_set_change_access_job.rs @@ -14,136 +14,193 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::{BTreeSet, BTreeMap}; -use ethkey::{Public, Signature, recover}; +use ethkey::{recover, Public, Signature}; +use key_server_cluster::{ + jobs::job_session::{JobExecutor, JobPartialRequestAction, JobPartialResponseAction}, + message::{InitializeConsensusSessionOfShareAdd, InitializeConsensusSessionWithServersSet}, + Error, NodeId, SessionId, +}; +use std::collections::{BTreeMap, BTreeSet}; use tiny_keccak::Keccak; -use key_server_cluster::{Error, NodeId, SessionId}; -use key_server_cluster::message::{InitializeConsensusSessionWithServersSet, InitializeConsensusSessionOfShareAdd}; -use key_server_cluster::jobs::job_session::{JobPartialResponseAction, JobPartialRequestAction, JobExecutor}; /// Purpose of this job is to check if requestor is administrator of SecretStore (i.e. it have access to change key servers set). pub struct ServersSetChangeAccessJob { - /// Servers set administrator public key (this could be changed to ACL-based check later). - administrator: Public, - /// Old servers set. - old_servers_set: Option>, - /// New servers set. - new_servers_set: Option>, - /// Old servers set, signed by requester. - old_set_signature: Option, - /// New servers set, signed by requester. - new_set_signature: Option, + /// Servers set administrator public key (this could be changed to ACL-based check later). + administrator: Public, + /// Old servers set. + old_servers_set: Option>, + /// New servers set. + new_servers_set: Option>, + /// Old servers set, signed by requester. + old_set_signature: Option, + /// New servers set, signed by requester. + new_set_signature: Option, } /// Servers set change job partial request. pub struct ServersSetChangeAccessRequest { - /// Old servers set. - pub old_servers_set: BTreeSet, - /// New servers set. - pub new_servers_set: BTreeSet, - /// Hash(old_servers_set), signed by requester. - pub old_set_signature: Signature, - /// Hash(new_servers_set), signed by requester. - pub new_set_signature: Signature, + /// Old servers set. + pub old_servers_set: BTreeSet, + /// New servers set. + pub new_servers_set: BTreeSet, + /// Hash(old_servers_set), signed by requester. + pub old_set_signature: Signature, + /// Hash(new_servers_set), signed by requester. + pub new_set_signature: Signature, } impl<'a> From<&'a InitializeConsensusSessionWithServersSet> for ServersSetChangeAccessRequest { - fn from(message: &InitializeConsensusSessionWithServersSet) -> Self { - ServersSetChangeAccessRequest { - old_servers_set: message.old_nodes_set.iter().cloned().map(Into::into).collect(), - new_servers_set: message.new_nodes_set.iter().cloned().map(Into::into).collect(), - old_set_signature: message.old_set_signature.clone().into(), - new_set_signature: message.new_set_signature.clone().into(), - } - } + fn from(message: &InitializeConsensusSessionWithServersSet) -> Self { + ServersSetChangeAccessRequest { + old_servers_set: message + .old_nodes_set + .iter() + .cloned() + .map(Into::into) + .collect(), + new_servers_set: message + .new_nodes_set + .iter() + .cloned() + .map(Into::into) + .collect(), + old_set_signature: message.old_set_signature.clone().into(), + new_set_signature: message.new_set_signature.clone().into(), + } + } } impl<'a> From<&'a InitializeConsensusSessionOfShareAdd> for ServersSetChangeAccessRequest { - fn from(message: &InitializeConsensusSessionOfShareAdd) -> Self { - ServersSetChangeAccessRequest { - old_servers_set: message.old_nodes_set.iter().cloned().map(Into::into).collect(), - new_servers_set: message.new_nodes_map.keys().cloned().map(Into::into).collect(), - old_set_signature: message.old_set_signature.clone().into(), - new_set_signature: message.new_set_signature.clone().into(), - } - } + fn from(message: &InitializeConsensusSessionOfShareAdd) -> Self { + ServersSetChangeAccessRequest { + old_servers_set: message + .old_nodes_set + .iter() + .cloned() + .map(Into::into) + .collect(), + new_servers_set: message + .new_nodes_map + .keys() + .cloned() + .map(Into::into) + .collect(), + old_set_signature: message.old_set_signature.clone().into(), + new_set_signature: message.new_set_signature.clone().into(), + } + } } impl ServersSetChangeAccessJob { - pub fn new_on_slave(administrator: Public) -> Self { - ServersSetChangeAccessJob { - administrator: administrator, - old_servers_set: None, - new_servers_set: None, - old_set_signature: None, - new_set_signature: None, - } - } + pub fn new_on_slave(administrator: Public) -> Self { + ServersSetChangeAccessJob { + administrator: administrator, + old_servers_set: None, + new_servers_set: None, + old_set_signature: None, + new_set_signature: None, + } + } - pub fn new_on_master(administrator: Public, old_servers_set: BTreeSet, new_servers_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Self { - ServersSetChangeAccessJob { - administrator: administrator, - old_servers_set: Some(old_servers_set), - new_servers_set: Some(new_servers_set), - old_set_signature: Some(old_set_signature), - new_set_signature: Some(new_set_signature), - } - } + pub fn new_on_master( + administrator: Public, + old_servers_set: BTreeSet, + new_servers_set: BTreeSet, + old_set_signature: Signature, + new_set_signature: Signature, + ) -> Self { + ServersSetChangeAccessJob { + administrator: administrator, + old_servers_set: Some(old_servers_set), + new_servers_set: Some(new_servers_set), + old_set_signature: Some(old_set_signature), + new_set_signature: Some(new_set_signature), + } + } - pub fn new_servers_set(&self) -> Option<&BTreeSet> { - self.new_servers_set.as_ref() - } + pub fn new_servers_set(&self) -> Option<&BTreeSet> { + self.new_servers_set.as_ref() + } } impl JobExecutor for ServersSetChangeAccessJob { - type PartialJobRequest = ServersSetChangeAccessRequest; - type PartialJobResponse = bool; - type JobResponse = BTreeSet; + type PartialJobRequest = ServersSetChangeAccessRequest; + type PartialJobResponse = bool; + type JobResponse = BTreeSet; - fn prepare_partial_request(&self, _node: &NodeId, _nodes: &BTreeSet) -> Result { - let explanation = "prepare_partial_request is only called on master nodes; this field is filled on master nodes in constructor; qed"; - Ok(ServersSetChangeAccessRequest { - old_servers_set: self.old_servers_set.clone().expect(explanation), - new_servers_set: self.new_servers_set.clone().expect(explanation), - old_set_signature: self.old_set_signature.clone().expect(explanation), - new_set_signature: self.new_set_signature.clone().expect(explanation), - }) - } + fn prepare_partial_request( + &self, + _node: &NodeId, + _nodes: &BTreeSet, + ) -> Result { + let explanation = "prepare_partial_request is only called on master nodes; this field is filled on master nodes in constructor; qed"; + Ok(ServersSetChangeAccessRequest { + old_servers_set: self.old_servers_set.clone().expect(explanation), + new_servers_set: self.new_servers_set.clone().expect(explanation), + old_set_signature: self.old_set_signature.clone().expect(explanation), + new_set_signature: self.new_set_signature.clone().expect(explanation), + }) + } - fn process_partial_request(&mut self, partial_request: ServersSetChangeAccessRequest) -> Result, Error> { - let ServersSetChangeAccessRequest { - old_servers_set, - new_servers_set, - old_set_signature, - new_set_signature, - } = partial_request; + fn process_partial_request( + &mut self, + partial_request: ServersSetChangeAccessRequest, + ) -> Result, Error> { + let ServersSetChangeAccessRequest { + old_servers_set, + new_servers_set, + old_set_signature, + new_set_signature, + } = partial_request; - // check old servers set signature - let old_actual_public = recover(&old_set_signature, &ordered_nodes_hash(&old_servers_set).into())?; - let new_actual_public = recover(&new_set_signature, &ordered_nodes_hash(&new_servers_set).into())?; - let is_administrator = old_actual_public == self.administrator && new_actual_public == self.administrator; - self.new_servers_set = Some(new_servers_set); + // check old servers set signature + let old_actual_public = recover( + &old_set_signature, + &ordered_nodes_hash(&old_servers_set).into(), + )?; + let new_actual_public = recover( + &new_set_signature, + &ordered_nodes_hash(&new_servers_set).into(), + )?; + let is_administrator = + old_actual_public == self.administrator && new_actual_public == self.administrator; + self.new_servers_set = Some(new_servers_set); - Ok(if is_administrator { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) }) - } + Ok(if is_administrator { + JobPartialRequestAction::Respond(true) + } else { + JobPartialRequestAction::Reject(false) + }) + } - fn check_partial_response(&mut self, _sender: &NodeId, partial_response: &bool) -> Result { - Ok(if *partial_response { JobPartialResponseAction::Accept } else { JobPartialResponseAction::Reject }) - } + fn check_partial_response( + &mut self, + _sender: &NodeId, + partial_response: &bool, + ) -> Result { + Ok(if *partial_response { + JobPartialResponseAction::Accept + } else { + JobPartialResponseAction::Reject + }) + } - fn compute_response(&self, partial_responses: &BTreeMap) -> Result, Error> { - Ok(partial_responses.keys().cloned().collect()) - } + fn compute_response( + &self, + partial_responses: &BTreeMap, + ) -> Result, Error> { + Ok(partial_responses.keys().cloned().collect()) + } } pub fn ordered_nodes_hash(nodes: &BTreeSet) -> SessionId { - let mut nodes_keccak = Keccak::new_keccak256(); - for node in nodes { - nodes_keccak.update(&*node); - } + let mut nodes_keccak = Keccak::new_keccak256(); + for node in nodes { + nodes_keccak.update(&*node); + } - let mut nodes_keccak_value = [0u8; 32]; - nodes_keccak.finalize(&mut nodes_keccak_value); + let mut nodes_keccak_value = [0u8; 32]; + nodes_keccak.finalize(&mut nodes_keccak_value); - nodes_keccak_value.into() + nodes_keccak_value.into() } diff --git a/secret-store/src/key_server_cluster/jobs/signing_job_ecdsa.rs b/secret-store/src/key_server_cluster/jobs/signing_job_ecdsa.rs index 0628b1e75..6b64ef4f9 100644 --- a/secret-store/src/key_server_cluster/jobs/signing_job_ecdsa.rs +++ b/secret-store/src/key_server_cluster/jobs/signing_job_ecdsa.rs @@ -14,138 +14,180 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::{BTreeSet, BTreeMap}; -use ethkey::{Public, Secret, Signature}; use ethereum_types::H256; -use key_server_cluster::{Error, NodeId, DocumentKeyShare}; -use key_server_cluster::math; -use key_server_cluster::jobs::job_session::{JobPartialRequestAction, JobPartialResponseAction, JobExecutor}; +use ethkey::{Public, Secret, Signature}; +use key_server_cluster::{ + jobs::job_session::{JobExecutor, JobPartialRequestAction, JobPartialResponseAction}, + math, DocumentKeyShare, Error, NodeId, +}; +use std::collections::{BTreeMap, BTreeSet}; /// Signing job. pub struct EcdsaSigningJob { - /// Key share. - key_share: DocumentKeyShare, - /// Key version. - key_version: H256, - /// Share of inv(nonce). - inv_nonce_share: Secret, - /// Nonce public. - nonce_public: Public, - /// Request id. - request_id: Option, - /// - inversed_nonce_coeff: Option, - /// Message hash. - message_hash: Option, + /// Key share. + key_share: DocumentKeyShare, + /// Key version. + key_version: H256, + /// Share of inv(nonce). + inv_nonce_share: Secret, + /// Nonce public. + nonce_public: Public, + /// Request id. + request_id: Option, + /// + inversed_nonce_coeff: Option, + /// Message hash. + message_hash: Option, } /// Signing job partial request. pub struct EcdsaPartialSigningRequest { - /// Request id. - pub id: Secret, - /// - pub inversed_nonce_coeff: Secret, - /// Message hash to sign. - pub message_hash: H256, + /// Request id. + pub id: Secret, + /// + pub inversed_nonce_coeff: Secret, + /// Message hash to sign. + pub message_hash: H256, } /// Signing job partial response. #[derive(Clone)] pub struct EcdsaPartialSigningResponse { - /// Request id. - pub request_id: Secret, - /// Partial signature' s share. - pub partial_signature_s: Secret, + /// Request id. + pub request_id: Secret, + /// Partial signature' s share. + pub partial_signature_s: Secret, } impl EcdsaSigningJob { - pub fn new_on_slave(key_share: DocumentKeyShare, key_version: H256, nonce_public: Public, inv_nonce_share: Secret) -> Result { - Ok(EcdsaSigningJob { - key_share: key_share, - key_version: key_version, - nonce_public: nonce_public, - inv_nonce_share: inv_nonce_share, - request_id: None, - inversed_nonce_coeff: None, - message_hash: None, - }) - } + pub fn new_on_slave( + key_share: DocumentKeyShare, + key_version: H256, + nonce_public: Public, + inv_nonce_share: Secret, + ) -> Result { + Ok(EcdsaSigningJob { + key_share: key_share, + key_version: key_version, + nonce_public: nonce_public, + inv_nonce_share: inv_nonce_share, + request_id: None, + inversed_nonce_coeff: None, + message_hash: None, + }) + } - pub fn new_on_master(key_share: DocumentKeyShare, key_version: H256, nonce_public: Public, inv_nonce_share: Secret, inversed_nonce_coeff: Secret, message_hash: H256) -> Result { - Ok(EcdsaSigningJob { - key_share: key_share, - key_version: key_version, - nonce_public: nonce_public, - inv_nonce_share: inv_nonce_share, - request_id: Some(math::generate_random_scalar()?), - inversed_nonce_coeff: Some(inversed_nonce_coeff), - message_hash: Some(message_hash), - }) - } + pub fn new_on_master( + key_share: DocumentKeyShare, + key_version: H256, + nonce_public: Public, + inv_nonce_share: Secret, + inversed_nonce_coeff: Secret, + message_hash: H256, + ) -> Result { + Ok(EcdsaSigningJob { + key_share: key_share, + key_version: key_version, + nonce_public: nonce_public, + inv_nonce_share: inv_nonce_share, + request_id: Some(math::generate_random_scalar()?), + inversed_nonce_coeff: Some(inversed_nonce_coeff), + message_hash: Some(message_hash), + }) + } } impl JobExecutor for EcdsaSigningJob { - type PartialJobRequest = EcdsaPartialSigningRequest; - type PartialJobResponse = EcdsaPartialSigningResponse; - type JobResponse = Signature; + type PartialJobRequest = EcdsaPartialSigningRequest; + type PartialJobResponse = EcdsaPartialSigningResponse; + type JobResponse = Signature; - fn prepare_partial_request(&self, _node: &NodeId, nodes: &BTreeSet) -> Result { - debug_assert!(nodes.len() == self.key_share.threshold * 2 + 1); + fn prepare_partial_request( + &self, + _node: &NodeId, + nodes: &BTreeSet, + ) -> Result { + debug_assert!(nodes.len() == self.key_share.threshold * 2 + 1); - let request_id = self.request_id.as_ref() + let request_id = self.request_id.as_ref() .expect("prepare_partial_request is only called on master nodes; request_id is filed in constructor on master nodes; qed"); - let inversed_nonce_coeff = self.inversed_nonce_coeff.as_ref() + let inversed_nonce_coeff = self.inversed_nonce_coeff.as_ref() .expect("prepare_partial_request is only called on master nodes; inversed_nonce_coeff is filed in constructor on master nodes; qed"); - let message_hash = self.message_hash.as_ref() + let message_hash = self.message_hash.as_ref() .expect("compute_response is only called on master nodes; message_hash is filed in constructor on master nodes; qed"); - Ok(EcdsaPartialSigningRequest { - id: request_id.clone(), - inversed_nonce_coeff: inversed_nonce_coeff.clone(), - message_hash: message_hash.clone(), - }) - } + Ok(EcdsaPartialSigningRequest { + id: request_id.clone(), + inversed_nonce_coeff: inversed_nonce_coeff.clone(), + message_hash: message_hash.clone(), + }) + } - fn process_partial_request(&mut self, partial_request: EcdsaPartialSigningRequest) -> Result, Error> { - let inversed_nonce_coeff_mul_nonce = math::compute_secret_mul(&partial_request.inversed_nonce_coeff, &self.inv_nonce_share)?; - let key_version = self.key_share.version(&self.key_version)?; - let signature_r = math::compute_ecdsa_r(&self.nonce_public)?; - let inv_nonce_mul_secret = math::compute_secret_mul(&inversed_nonce_coeff_mul_nonce, &key_version.secret_share)?; - let partial_signature_s = math::compute_ecdsa_s_share( - &inversed_nonce_coeff_mul_nonce, - &inv_nonce_mul_secret, - &signature_r, - &math::to_scalar(partial_request.message_hash)?, - )?; + fn process_partial_request( + &mut self, + partial_request: EcdsaPartialSigningRequest, + ) -> Result, Error> { + let inversed_nonce_coeff_mul_nonce = + math::compute_secret_mul(&partial_request.inversed_nonce_coeff, &self.inv_nonce_share)?; + let key_version = self.key_share.version(&self.key_version)?; + let signature_r = math::compute_ecdsa_r(&self.nonce_public)?; + let inv_nonce_mul_secret = + math::compute_secret_mul(&inversed_nonce_coeff_mul_nonce, &key_version.secret_share)?; + let partial_signature_s = math::compute_ecdsa_s_share( + &inversed_nonce_coeff_mul_nonce, + &inv_nonce_mul_secret, + &signature_r, + &math::to_scalar(partial_request.message_hash)?, + )?; - Ok(JobPartialRequestAction::Respond(EcdsaPartialSigningResponse { - request_id: partial_request.id, - partial_signature_s: partial_signature_s, - })) - } + Ok(JobPartialRequestAction::Respond( + EcdsaPartialSigningResponse { + request_id: partial_request.id, + partial_signature_s: partial_signature_s, + }, + )) + } - fn check_partial_response(&mut self, _sender: &NodeId, partial_response: &EcdsaPartialSigningResponse) -> Result { - if Some(&partial_response.request_id) != self.request_id.as_ref() { - return Ok(JobPartialResponseAction::Ignore); - } - // TODO [Trust]: check_ecdsa_signature_share() + fn check_partial_response( + &mut self, + _sender: &NodeId, + partial_response: &EcdsaPartialSigningResponse, + ) -> Result { + if Some(&partial_response.request_id) != self.request_id.as_ref() { + return Ok(JobPartialResponseAction::Ignore); + } + // TODO [Trust]: check_ecdsa_signature_share() - Ok(JobPartialResponseAction::Accept) - } + Ok(JobPartialResponseAction::Accept) + } - fn compute_response(&self, partial_responses: &BTreeMap) -> Result { - let key_version = self.key_share.version(&self.key_version)?; - if partial_responses.keys().any(|n| !key_version.id_numbers.contains_key(n)) { - return Err(Error::InvalidMessage); - } + fn compute_response( + &self, + partial_responses: &BTreeMap, + ) -> Result { + let key_version = self.key_share.version(&self.key_version)?; + if partial_responses + .keys() + .any(|n| !key_version.id_numbers.contains_key(n)) + { + return Err(Error::InvalidMessage); + } - let id_numbers: Vec<_> = partial_responses.keys().map(|n| key_version.id_numbers[n].clone()).collect(); - let signature_s_shares: Vec<_> = partial_responses.values().map(|r| r.partial_signature_s.clone()).collect(); - let signature_s = math::compute_ecdsa_s(self.key_share.threshold, &signature_s_shares, &id_numbers)?; - let signature_r = math::compute_ecdsa_r(&self.nonce_public)?; + let id_numbers: Vec<_> = partial_responses + .keys() + .map(|n| key_version.id_numbers[n].clone()) + .collect(); + let signature_s_shares: Vec<_> = partial_responses + .values() + .map(|r| r.partial_signature_s.clone()) + .collect(); + let signature_s = + math::compute_ecdsa_s(self.key_share.threshold, &signature_s_shares, &id_numbers)?; + let signature_r = math::compute_ecdsa_r(&self.nonce_public)?; - let signature = math::serialize_ecdsa_signature(&self.nonce_public, signature_r, signature_s); + let signature = + math::serialize_ecdsa_signature(&self.nonce_public, signature_r, signature_s); - Ok(signature) - } + Ok(signature) + } } diff --git a/secret-store/src/key_server_cluster/jobs/signing_job_schnorr.rs b/secret-store/src/key_server_cluster/jobs/signing_job_schnorr.rs index 7e41dce47..643920589 100644 --- a/secret-store/src/key_server_cluster/jobs/signing_job_schnorr.rs +++ b/secret-store/src/key_server_cluster/jobs/signing_job_schnorr.rs @@ -14,138 +14,181 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::{BTreeSet, BTreeMap}; -use ethkey::{Public, Secret}; use ethereum_types::H256; -use key_server_cluster::{Error, NodeId, DocumentKeyShare}; -use key_server_cluster::math; -use key_server_cluster::jobs::job_session::{JobPartialRequestAction, JobPartialResponseAction, JobExecutor}; +use ethkey::{Public, Secret}; +use key_server_cluster::{ + jobs::job_session::{JobExecutor, JobPartialRequestAction, JobPartialResponseAction}, + math, DocumentKeyShare, Error, NodeId, +}; +use std::collections::{BTreeMap, BTreeSet}; /// Signing job. pub struct SchnorrSigningJob { - /// This node id. - self_node_id: NodeId, - /// Key share. - key_share: DocumentKeyShare, - /// Key version. - key_version: H256, - /// Session public key. - session_public: Public, - /// Session secret coefficient. - session_secret_coeff: Secret, - /// Request id. - request_id: Option, - /// Message hash. - message_hash: Option, + /// This node id. + self_node_id: NodeId, + /// Key share. + key_share: DocumentKeyShare, + /// Key version. + key_version: H256, + /// Session public key. + session_public: Public, + /// Session secret coefficient. + session_secret_coeff: Secret, + /// Request id. + request_id: Option, + /// Message hash. + message_hash: Option, } /// Signing job partial request. pub struct SchnorrPartialSigningRequest { - /// Request id. - pub id: Secret, - /// Message hash. - pub message_hash: H256, - /// Id of other nodes, participating in signing. - pub other_nodes_ids: BTreeSet, + /// Request id. + pub id: Secret, + /// Message hash. + pub message_hash: H256, + /// Id of other nodes, participating in signing. + pub other_nodes_ids: BTreeSet, } /// Signing job partial response. #[derive(Clone)] pub struct SchnorrPartialSigningResponse { - /// Request id. - pub request_id: Secret, - /// Partial signature. - pub partial_signature: Secret, + /// Request id. + pub request_id: Secret, + /// Partial signature. + pub partial_signature: Secret, } impl SchnorrSigningJob { - pub fn new_on_slave(self_node_id: NodeId, key_share: DocumentKeyShare, key_version: H256, session_public: Public, session_secret_coeff: Secret) -> Result { - Ok(SchnorrSigningJob { - self_node_id: self_node_id, - key_share: key_share, - key_version: key_version, - session_public: session_public, - session_secret_coeff: session_secret_coeff, - request_id: None, - message_hash: None, - }) - } + pub fn new_on_slave( + self_node_id: NodeId, + key_share: DocumentKeyShare, + key_version: H256, + session_public: Public, + session_secret_coeff: Secret, + ) -> Result { + Ok(SchnorrSigningJob { + self_node_id: self_node_id, + key_share: key_share, + key_version: key_version, + session_public: session_public, + session_secret_coeff: session_secret_coeff, + request_id: None, + message_hash: None, + }) + } - pub fn new_on_master(self_node_id: NodeId, key_share: DocumentKeyShare, key_version: H256, session_public: Public, session_secret_coeff: Secret, message_hash: H256) -> Result { - Ok(SchnorrSigningJob { - self_node_id: self_node_id, - key_share: key_share, - key_version: key_version, - session_public: session_public, - session_secret_coeff: session_secret_coeff, - request_id: Some(math::generate_random_scalar()?), - message_hash: Some(message_hash), - }) - } + pub fn new_on_master( + self_node_id: NodeId, + key_share: DocumentKeyShare, + key_version: H256, + session_public: Public, + session_secret_coeff: Secret, + message_hash: H256, + ) -> Result { + Ok(SchnorrSigningJob { + self_node_id: self_node_id, + key_share: key_share, + key_version: key_version, + session_public: session_public, + session_secret_coeff: session_secret_coeff, + request_id: Some(math::generate_random_scalar()?), + message_hash: Some(message_hash), + }) + } } impl JobExecutor for SchnorrSigningJob { - type PartialJobRequest = SchnorrPartialSigningRequest; - type PartialJobResponse = SchnorrPartialSigningResponse; - type JobResponse = (Secret, Secret); + type PartialJobRequest = SchnorrPartialSigningRequest; + type PartialJobResponse = SchnorrPartialSigningResponse; + type JobResponse = (Secret, Secret); - fn prepare_partial_request(&self, node: &NodeId, nodes: &BTreeSet) -> Result { - debug_assert!(nodes.len() == self.key_share.threshold + 1); + fn prepare_partial_request( + &self, + node: &NodeId, + nodes: &BTreeSet, + ) -> Result { + debug_assert!(nodes.len() == self.key_share.threshold + 1); - let request_id = self.request_id.as_ref() + let request_id = self.request_id.as_ref() .expect("prepare_partial_request is only called on master nodes; request_id is filed in constructor on master nodes; qed"); - let message_hash = self.message_hash.as_ref() + let message_hash = self.message_hash.as_ref() .expect("compute_response is only called on master nodes; message_hash is filed in constructor on master nodes; qed"); - let mut other_nodes_ids = nodes.clone(); - other_nodes_ids.remove(node); + let mut other_nodes_ids = nodes.clone(); + other_nodes_ids.remove(node); - Ok(SchnorrPartialSigningRequest { - id: request_id.clone(), - message_hash: message_hash.clone(), - other_nodes_ids: other_nodes_ids, - }) - } + Ok(SchnorrPartialSigningRequest { + id: request_id.clone(), + message_hash: message_hash.clone(), + other_nodes_ids: other_nodes_ids, + }) + } - fn process_partial_request(&mut self, partial_request: SchnorrPartialSigningRequest) -> Result, Error> { - let key_version = self.key_share.version(&self.key_version)?; - if partial_request.other_nodes_ids.len() != self.key_share.threshold - || partial_request.other_nodes_ids.contains(&self.self_node_id) - || partial_request.other_nodes_ids.iter().any(|n| !key_version.id_numbers.contains_key(n)) { - return Err(Error::InvalidMessage); - } + fn process_partial_request( + &mut self, + partial_request: SchnorrPartialSigningRequest, + ) -> Result, Error> { + let key_version = self.key_share.version(&self.key_version)?; + if partial_request.other_nodes_ids.len() != self.key_share.threshold + || partial_request.other_nodes_ids.contains(&self.self_node_id) + || partial_request + .other_nodes_ids + .iter() + .any(|n| !key_version.id_numbers.contains_key(n)) + { + return Err(Error::InvalidMessage); + } - let self_id_number = &key_version.id_numbers[&self.self_node_id]; - let other_id_numbers = partial_request.other_nodes_ids.iter().map(|n| &key_version.id_numbers[n]); - let combined_hash = math::combine_message_hash_with_public(&partial_request.message_hash, &self.session_public)?; - Ok(JobPartialRequestAction::Respond(SchnorrPartialSigningResponse { - request_id: partial_request.id, - partial_signature: math::compute_schnorr_signature_share( - self.key_share.threshold, - &combined_hash, - &self.session_secret_coeff, - &key_version.secret_share, - self_id_number, - other_id_numbers - )?, - })) - } + let self_id_number = &key_version.id_numbers[&self.self_node_id]; + let other_id_numbers = partial_request + .other_nodes_ids + .iter() + .map(|n| &key_version.id_numbers[n]); + let combined_hash = math::combine_message_hash_with_public( + &partial_request.message_hash, + &self.session_public, + )?; + Ok(JobPartialRequestAction::Respond( + SchnorrPartialSigningResponse { + request_id: partial_request.id, + partial_signature: math::compute_schnorr_signature_share( + self.key_share.threshold, + &combined_hash, + &self.session_secret_coeff, + &key_version.secret_share, + self_id_number, + other_id_numbers, + )?, + }, + )) + } - fn check_partial_response(&mut self, _sender: &NodeId, partial_response: &SchnorrPartialSigningResponse) -> Result { - if Some(&partial_response.request_id) != self.request_id.as_ref() { - return Ok(JobPartialResponseAction::Ignore); - } - // TODO [Trust]: check_schnorr_signature_share() + fn check_partial_response( + &mut self, + _sender: &NodeId, + partial_response: &SchnorrPartialSigningResponse, + ) -> Result { + if Some(&partial_response.request_id) != self.request_id.as_ref() { + return Ok(JobPartialResponseAction::Ignore); + } + // TODO [Trust]: check_schnorr_signature_share() - Ok(JobPartialResponseAction::Accept) - } + Ok(JobPartialResponseAction::Accept) + } - fn compute_response(&self, partial_responses: &BTreeMap) -> Result<(Secret, Secret), Error> { - let message_hash = self.message_hash.as_ref() + fn compute_response( + &self, + partial_responses: &BTreeMap, + ) -> Result<(Secret, Secret), Error> { + let message_hash = self.message_hash.as_ref() .expect("compute_response is only called on master nodes; message_hash is filed in constructor on master nodes; qed"); - let signature_c = math::combine_message_hash_with_public(message_hash, &self.session_public)?; - let signature_s = math::compute_schnorr_signature(partial_responses.values().map(|r| &r.partial_signature))?; + let signature_c = + math::combine_message_hash_with_public(message_hash, &self.session_public)?; + let signature_s = math::compute_schnorr_signature( + partial_responses.values().map(|r| &r.partial_signature), + )?; - Ok((signature_c, signature_s)) - } + Ok((signature_c, signature_s)) + } } diff --git a/secret-store/src/key_server_cluster/jobs/unknown_sessions_job.rs b/secret-store/src/key_server_cluster/jobs/unknown_sessions_job.rs index 33eca6583..5803c4d2e 100644 --- a/secret-store/src/key_server_cluster/jobs/unknown_sessions_job.rs +++ b/secret-store/src/key_server_cluster/jobs/unknown_sessions_job.rs @@ -14,67 +14,95 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::collections::{BTreeSet, BTreeMap}; -use key_server_cluster::{Error, NodeId, SessionId, KeyStorage}; -use key_server_cluster::jobs::job_session::{JobPartialRequestAction, JobPartialResponseAction, JobExecutor}; +use key_server_cluster::{ + jobs::job_session::{JobExecutor, JobPartialRequestAction, JobPartialResponseAction}, + Error, KeyStorage, NodeId, SessionId, +}; +use std::{ + collections::{BTreeMap, BTreeSet}, + sync::Arc, +}; /// Unknown sessions report job. pub struct UnknownSessionsJob { - /// Target node id. - target_node_id: Option, - /// Keys storage. - key_storage: Arc, + /// Target node id. + target_node_id: Option, + /// Keys storage. + key_storage: Arc, } impl UnknownSessionsJob { - pub fn new_on_slave(key_storage: Arc) -> Self { - UnknownSessionsJob { - target_node_id: None, - key_storage: key_storage, - } - } + pub fn new_on_slave(key_storage: Arc) -> Self { + UnknownSessionsJob { + target_node_id: None, + key_storage: key_storage, + } + } - pub fn new_on_master(key_storage: Arc, self_node_id: NodeId) -> Self { - UnknownSessionsJob { - target_node_id: Some(self_node_id), - key_storage: key_storage, - } - } + pub fn new_on_master(key_storage: Arc, self_node_id: NodeId) -> Self { + UnknownSessionsJob { + target_node_id: Some(self_node_id), + key_storage: key_storage, + } + } } impl JobExecutor for UnknownSessionsJob { - type PartialJobRequest = NodeId; - type PartialJobResponse = BTreeSet; - type JobResponse = BTreeMap>; + type PartialJobRequest = NodeId; + type PartialJobResponse = BTreeSet; + type JobResponse = BTreeMap>; - fn prepare_partial_request(&self, _node: &NodeId, _nodes: &BTreeSet) -> Result { - Ok(self.target_node_id.clone().expect("prepare_partial_request is only called on master nodes; this field is filled on master nodes in constructor; qed")) - } + fn prepare_partial_request( + &self, + _node: &NodeId, + _nodes: &BTreeSet, + ) -> Result { + Ok(self.target_node_id.clone().expect("prepare_partial_request is only called on master nodes; this field is filled on master nodes in constructor; qed")) + } - fn process_partial_request(&mut self, partial_request: NodeId) -> Result>, Error> { - Ok(JobPartialRequestAction::Respond(self.key_storage.iter() - .filter(|&(_, ref key_share)| !key_share.versions.last().map(|v| v.id_numbers.contains_key(&partial_request)).unwrap_or(true)) - .map(|(id, _)| id.clone()) - .collect())) - } + fn process_partial_request( + &mut self, + partial_request: NodeId, + ) -> Result>, Error> { + Ok(JobPartialRequestAction::Respond( + self.key_storage + .iter() + .filter(|&(_, ref key_share)| { + !key_share + .versions + .last() + .map(|v| v.id_numbers.contains_key(&partial_request)) + .unwrap_or(true) + }) + .map(|(id, _)| id.clone()) + .collect(), + )) + } - fn check_partial_response(&mut self, _sender: &NodeId, _partial_response: &BTreeSet) -> Result { - Ok(JobPartialResponseAction::Accept) - } + fn check_partial_response( + &mut self, + _sender: &NodeId, + _partial_response: &BTreeSet, + ) -> Result { + Ok(JobPartialResponseAction::Accept) + } - // TODO [Opt]: - // currently ALL unknown sessions are sent at once - it is better to limit messages by size/len => add partial-partial responses - fn compute_response(&self, partial_responses: &BTreeMap>) -> Result>, Error> { - let mut result: BTreeMap> = BTreeMap::new(); - for (node_id, node_sessions) in partial_responses { - for node_session in node_sessions { - result.entry(node_session.clone()) - .or_insert_with(Default::default) - .insert(node_id.clone()); - } - } + // TODO [Opt]: + // currently ALL unknown sessions are sent at once - it is better to limit messages by size/len => add partial-partial responses + fn compute_response( + &self, + partial_responses: &BTreeMap>, + ) -> Result>, Error> { + let mut result: BTreeMap> = BTreeMap::new(); + for (node_id, node_sessions) in partial_responses { + for node_session in node_sessions { + result + .entry(node_session.clone()) + .or_insert_with(Default::default) + .insert(node_id.clone()); + } + } - Ok(result) - } + Ok(result) + } } diff --git a/secret-store/src/key_server_cluster/math.rs b/secret-store/src/key_server_cluster/math.rs index 60d48b7f3..a5aef488c 100644 --- a/secret-store/src/key_server_cluster/math.rs +++ b/secret-store/src/key_server_cluster/math.rs @@ -14,1070 +14,1561 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use ethkey::{Public, Secret, Signature, Random, Generator, math}; use ethereum_types::{H256, U256}; +use ethkey::{math, Generator, Public, Random, Secret, Signature}; use hash::keccak; use key_server_cluster::Error; /// Encryption result. #[derive(Debug)] pub struct EncryptedSecret { - /// Common encryption point. - pub common_point: Public, - /// Ecnrypted point. - pub encrypted_point: Public, + /// Common encryption point. + pub common_point: Public, + /// Ecnrypted point. + pub encrypted_point: Public, } /// Create zero scalar. pub fn zero_scalar() -> Secret { - Secret::zero() + Secret::zero() } /// Convert hash to EC scalar (modulo curve order). pub fn to_scalar(hash: H256) -> Result { - let scalar: U256 = hash.into(); - let scalar: H256 = (scalar % math::curve_order()).into(); - let scalar = Secret::from(scalar.0); - scalar.check_validity()?; - Ok(scalar) + let scalar: U256 = hash.into(); + let scalar: H256 = (scalar % math::curve_order()).into(); + let scalar = Secret::from(scalar.0); + scalar.check_validity()?; + Ok(scalar) } /// Generate random scalar. pub fn generate_random_scalar() -> Result { - Ok(Random.generate()?.secret().clone()) + Ok(Random.generate()?.secret().clone()) } /// Generate random point. pub fn generate_random_point() -> Result { - Ok(Random.generate()?.public().clone()) + Ok(Random.generate()?.public().clone()) } /// Get X coordinate of point. fn public_x(public: &Public) -> H256 { - public[0..32].into() + public[0..32].into() } /// Get Y coordinate of point. fn public_y(public: &Public) -> H256 { - public[32..64].into() + public[32..64].into() } /// Compute publics sum. -pub fn compute_public_sum<'a, I>(mut publics: I) -> Result where I: Iterator { - let mut sum = publics.next().expect("compute_public_sum is called when there's at least one public; qed").clone(); - while let Some(public) = publics.next() { - math::public_add(&mut sum, &public)?; - } - Ok(sum) +pub fn compute_public_sum<'a, I>(mut publics: I) -> Result +where + I: Iterator, +{ + let mut sum = publics + .next() + .expect("compute_public_sum is called when there's at least one public; qed") + .clone(); + while let Some(public) = publics.next() { + math::public_add(&mut sum, &public)?; + } + Ok(sum) } /// Compute secrets sum. -pub fn compute_secret_sum<'a, I>(mut secrets: I) -> Result where I: Iterator { - let mut sum = secrets.next().expect("compute_secret_sum is called when there's at least one secret; qed").clone(); - while let Some(secret) = secrets.next() { - sum.add(secret)?; - } - Ok(sum) +pub fn compute_secret_sum<'a, I>(mut secrets: I) -> Result +where + I: Iterator, +{ + let mut sum = secrets + .next() + .expect("compute_secret_sum is called when there's at least one secret; qed") + .clone(); + while let Some(secret) = secrets.next() { + sum.add(secret)?; + } + Ok(sum) } /// Compute secrets multiplication. pub fn compute_secret_mul(secret1: &Secret, secret2: &Secret) -> Result { - let mut secret_mul = secret1.clone(); - secret_mul.mul(secret2)?; - Ok(secret_mul) + let mut secret_mul = secret1.clone(); + secret_mul.mul(secret2)?; + Ok(secret_mul) } /// Compute secrets 'shadow' multiplication: coeff * multiplication(s[j] / (s[i] - s[j])) for every i != j -pub fn compute_shadow_mul<'a, I>(coeff: &Secret, self_secret: &Secret, mut other_secrets: I) -> Result where I: Iterator { - // when there are no other secrets, only coeff is left - let other_secret = match other_secrets.next() { - Some(other_secret) => other_secret, - None => return Ok(coeff.clone()), - }; +pub fn compute_shadow_mul<'a, I>( + coeff: &Secret, + self_secret: &Secret, + mut other_secrets: I, +) -> Result +where + I: Iterator, +{ + // when there are no other secrets, only coeff is left + let other_secret = match other_secrets.next() { + Some(other_secret) => other_secret, + None => return Ok(coeff.clone()), + }; - let mut shadow_mul = self_secret.clone(); - shadow_mul.sub(other_secret)?; - shadow_mul.inv()?; - shadow_mul.mul(other_secret)?; - while let Some(other_secret) = other_secrets.next() { - let mut shadow_mul_element = self_secret.clone(); - shadow_mul_element.sub(other_secret)?; - shadow_mul_element.inv()?; - shadow_mul_element.mul(other_secret)?; - shadow_mul.mul(&shadow_mul_element)?; - } + let mut shadow_mul = self_secret.clone(); + shadow_mul.sub(other_secret)?; + shadow_mul.inv()?; + shadow_mul.mul(other_secret)?; + while let Some(other_secret) = other_secrets.next() { + let mut shadow_mul_element = self_secret.clone(); + shadow_mul_element.sub(other_secret)?; + shadow_mul_element.inv()?; + shadow_mul_element.mul(other_secret)?; + shadow_mul.mul(&shadow_mul_element)?; + } - shadow_mul.mul(coeff)?; - Ok(shadow_mul) + shadow_mul.mul(coeff)?; + Ok(shadow_mul) } /// Update point by multiplying to random scalar pub fn update_random_point(point: &mut Public) -> Result<(), Error> { - Ok(math::public_mul_secret(point, &generate_random_scalar()?)?) + Ok(math::public_mul_secret(point, &generate_random_scalar()?)?) } /// Generate random polynom of threshold degree pub fn generate_random_polynom(threshold: usize) -> Result, Error> { - (0..threshold + 1) - .map(|_| generate_random_scalar()) - .collect() + (0..threshold + 1) + .map(|_| generate_random_scalar()) + .collect() } /// Compute value of polynom, using `node_number` as argument pub fn compute_polynom(polynom: &[Secret], node_number: &Secret) -> Result { - debug_assert!(!polynom.is_empty()); + debug_assert!(!polynom.is_empty()); - let mut result = polynom[0].clone(); - for i in 1..polynom.len() { - // calculate pow(node_number, i) - let mut appendum = node_number.clone(); - appendum.pow(i)?; + let mut result = polynom[0].clone(); + for i in 1..polynom.len() { + // calculate pow(node_number, i) + let mut appendum = node_number.clone(); + appendum.pow(i)?; - // calculate coeff * pow(point, i) - appendum.mul(&polynom[i])?; + // calculate coeff * pow(point, i) + appendum.mul(&polynom[i])?; - // calculate result + coeff * pow(point, i) - result.add(&appendum)?; - } + // calculate result + coeff * pow(point, i) + result.add(&appendum)?; + } - Ok(result) + Ok(result) } /// Generate public keys for other participants. -pub fn public_values_generation(threshold: usize, derived_point: &Public, polynom1: &[Secret], polynom2: &[Secret]) -> Result, Error> { - debug_assert_eq!(polynom1.len(), threshold + 1); - debug_assert_eq!(polynom2.len(), threshold + 1); +pub fn public_values_generation( + threshold: usize, + derived_point: &Public, + polynom1: &[Secret], + polynom2: &[Secret], +) -> Result, Error> { + debug_assert_eq!(polynom1.len(), threshold + 1); + debug_assert_eq!(polynom2.len(), threshold + 1); - // compute t+1 public values - let mut publics = Vec::with_capacity(threshold + 1); - for i in 0..threshold + 1 { - let coeff1 = &polynom1[i]; + // compute t+1 public values + let mut publics = Vec::with_capacity(threshold + 1); + for i in 0..threshold + 1 { + let coeff1 = &polynom1[i]; - let mut multiplication1 = math::generation_point(); - math::public_mul_secret(&mut multiplication1, &coeff1)?; + let mut multiplication1 = math::generation_point(); + math::public_mul_secret(&mut multiplication1, &coeff1)?; - let coeff2 = &polynom2[i]; - let mut multiplication2 = derived_point.clone(); - math::public_mul_secret(&mut multiplication2, &coeff2)?; + let coeff2 = &polynom2[i]; + let mut multiplication2 = derived_point.clone(); + math::public_mul_secret(&mut multiplication2, &coeff2)?; - math::public_add(&mut multiplication1, &multiplication2)?; + math::public_add(&mut multiplication1, &multiplication2)?; - publics.push(multiplication1); - } - debug_assert_eq!(publics.len(), threshold + 1); + publics.push(multiplication1); + } + debug_assert_eq!(publics.len(), threshold + 1); - Ok(publics) + Ok(publics) } /// Check keys passed by other participants. -pub fn keys_verification(threshold: usize, derived_point: &Public, number_id: &Secret, secret1: &Secret, secret2: &Secret, publics: &[Public]) -> Result { - // calculate left part - let mut multiplication1 = math::generation_point(); - math::public_mul_secret(&mut multiplication1, secret1)?; +pub fn keys_verification( + threshold: usize, + derived_point: &Public, + number_id: &Secret, + secret1: &Secret, + secret2: &Secret, + publics: &[Public], +) -> Result { + // calculate left part + let mut multiplication1 = math::generation_point(); + math::public_mul_secret(&mut multiplication1, secret1)?; - let mut multiplication2 = derived_point.clone(); - math::public_mul_secret(&mut multiplication2, secret2)?; + let mut multiplication2 = derived_point.clone(); + math::public_mul_secret(&mut multiplication2, secret2)?; - math::public_add(&mut multiplication1, &multiplication2)?; - let left = multiplication1; + math::public_add(&mut multiplication1, &multiplication2)?; + let left = multiplication1; - // calculate right part - let mut right = publics[0].clone(); - for i in 1..threshold + 1 { - let mut secret_pow = number_id.clone(); - secret_pow.pow(i)?; + // calculate right part + let mut right = publics[0].clone(); + for i in 1..threshold + 1 { + let mut secret_pow = number_id.clone(); + secret_pow.pow(i)?; - let mut public_k = publics[i].clone(); - math::public_mul_secret(&mut public_k, &secret_pow)?; + let mut public_k = publics[i].clone(); + math::public_mul_secret(&mut public_k, &secret_pow)?; - math::public_add(&mut right, &public_k)?; - } + math::public_add(&mut right, &public_k)?; + } - Ok(left == right) + Ok(left == right) } /// Compute secret subshare from passed secret value. -pub fn compute_secret_subshare<'a, I>(threshold: usize, secret_value: &Secret, sender_id_number: &Secret, other_id_numbers: I) -> Result where I: Iterator { - let mut subshare = compute_shadow_mul(secret_value, sender_id_number, other_id_numbers)?; - if threshold % 2 != 0 { - subshare.neg()?; - } +pub fn compute_secret_subshare<'a, I>( + threshold: usize, + secret_value: &Secret, + sender_id_number: &Secret, + other_id_numbers: I, +) -> Result +where + I: Iterator, +{ + let mut subshare = compute_shadow_mul(secret_value, sender_id_number, other_id_numbers)?; + if threshold % 2 != 0 { + subshare.neg()?; + } - Ok(subshare) + Ok(subshare) } /// Compute secret share. -pub fn compute_secret_share<'a, I>(secret_values: I) -> Result where I: Iterator { - compute_secret_sum(secret_values) +pub fn compute_secret_share<'a, I>(secret_values: I) -> Result +where + I: Iterator, +{ + compute_secret_sum(secret_values) } /// Compute public key share. pub fn compute_public_share(self_secret_value: &Secret) -> Result { - let mut public_share = math::generation_point(); - math::public_mul_secret(&mut public_share, self_secret_value)?; - Ok(public_share) + let mut public_share = math::generation_point(); + math::public_mul_secret(&mut public_share, self_secret_value)?; + Ok(public_share) } /// Compute joint public key. -pub fn compute_joint_public<'a, I>(public_shares: I) -> Result where I: Iterator { - compute_public_sum(public_shares) +pub fn compute_joint_public<'a, I>(public_shares: I) -> Result +where + I: Iterator, +{ + compute_public_sum(public_shares) } /// Compute joint secret key from N secret coefficients. #[cfg(test)] -pub fn compute_joint_secret<'a, I>(secret_coeffs: I) -> Result where I: Iterator { - compute_secret_sum(secret_coeffs) +pub fn compute_joint_secret<'a, I>(secret_coeffs: I) -> Result +where + I: Iterator, +{ + compute_secret_sum(secret_coeffs) } /// Compute joint secret key from t+1 secret shares. -pub fn compute_joint_secret_from_shares<'a>(t: usize, secret_shares: &[&'a Secret], id_numbers: &[&'a Secret]) -> Result { - let secret_share_0 = secret_shares[0]; - let id_number_0 = id_numbers[0]; - let other_nodes_numbers = id_numbers.iter().skip(1).cloned(); - let mut result = compute_node_shadow(secret_share_0, id_number_0, other_nodes_numbers)?; - for i in 1..secret_shares.len() { - let secret_share_i = secret_shares[i]; - let id_number_i = id_numbers[i]; - let other_nodes_numbers = id_numbers.iter().enumerate().filter(|&(j, _)| j != i).map(|(_, n)| n).cloned(); - let addendum = compute_node_shadow(secret_share_i, id_number_i, other_nodes_numbers)?; - result.add(&addendum)?; - } +pub fn compute_joint_secret_from_shares<'a>( + t: usize, + secret_shares: &[&'a Secret], + id_numbers: &[&'a Secret], +) -> Result { + let secret_share_0 = secret_shares[0]; + let id_number_0 = id_numbers[0]; + let other_nodes_numbers = id_numbers.iter().skip(1).cloned(); + let mut result = compute_node_shadow(secret_share_0, id_number_0, other_nodes_numbers)?; + for i in 1..secret_shares.len() { + let secret_share_i = secret_shares[i]; + let id_number_i = id_numbers[i]; + let other_nodes_numbers = id_numbers + .iter() + .enumerate() + .filter(|&(j, _)| j != i) + .map(|(_, n)| n) + .cloned(); + let addendum = compute_node_shadow(secret_share_i, id_number_i, other_nodes_numbers)?; + result.add(&addendum)?; + } - if t % 2 != 0 { - result.neg()?; - } + if t % 2 != 0 { + result.neg()?; + } - Ok(result) + Ok(result) } /// Encrypt secret with joint public key. pub fn encrypt_secret(secret: &Public, joint_public: &Public) -> Result { - // this is performed by KS-cluster client (or KS master) - let key_pair = Random.generate()?; + // this is performed by KS-cluster client (or KS master) + let key_pair = Random.generate()?; - // k * T - let mut common_point = math::generation_point(); - math::public_mul_secret(&mut common_point, key_pair.secret())?; + // k * T + let mut common_point = math::generation_point(); + math::public_mul_secret(&mut common_point, key_pair.secret())?; - // M + k * y - let mut encrypted_point = joint_public.clone(); - math::public_mul_secret(&mut encrypted_point, key_pair.secret())?; - math::public_add(&mut encrypted_point, secret)?; + // M + k * y + let mut encrypted_point = joint_public.clone(); + math::public_mul_secret(&mut encrypted_point, key_pair.secret())?; + math::public_add(&mut encrypted_point, secret)?; - Ok(EncryptedSecret { - common_point: common_point, - encrypted_point: encrypted_point, - }) + Ok(EncryptedSecret { + common_point: common_point, + encrypted_point: encrypted_point, + }) } /// Compute shadow for the node. -pub fn compute_node_shadow<'a, I>(node_secret_share: &Secret, node_number: &Secret, other_nodes_numbers: I) -> Result where I: Iterator { - compute_shadow_mul(node_secret_share, node_number, other_nodes_numbers) +pub fn compute_node_shadow<'a, I>( + node_secret_share: &Secret, + node_number: &Secret, + other_nodes_numbers: I, +) -> Result +where + I: Iterator, +{ + compute_shadow_mul(node_secret_share, node_number, other_nodes_numbers) } /// Compute shadow point for the node. -pub fn compute_node_shadow_point(access_key: &Secret, common_point: &Public, node_shadow: &Secret, decrypt_shadow: Option) -> Result<(Public, Option), Error> { - let mut shadow_key = node_shadow.clone(); - let decrypt_shadow = match decrypt_shadow { - None => None, - Some(mut decrypt_shadow) => { - // update shadow key - shadow_key.mul(&decrypt_shadow)?; - // now udate decrypt shadow itself - decrypt_shadow.dec()?; - decrypt_shadow.mul(node_shadow)?; - Some(decrypt_shadow) - } - }; - shadow_key.mul(access_key)?; +pub fn compute_node_shadow_point( + access_key: &Secret, + common_point: &Public, + node_shadow: &Secret, + decrypt_shadow: Option, +) -> Result<(Public, Option), Error> { + let mut shadow_key = node_shadow.clone(); + let decrypt_shadow = match decrypt_shadow { + None => None, + Some(mut decrypt_shadow) => { + // update shadow key + shadow_key.mul(&decrypt_shadow)?; + // now udate decrypt shadow itself + decrypt_shadow.dec()?; + decrypt_shadow.mul(node_shadow)?; + Some(decrypt_shadow) + } + }; + shadow_key.mul(access_key)?; - let mut node_shadow_point = common_point.clone(); - math::public_mul_secret(&mut node_shadow_point, &shadow_key)?; - Ok((node_shadow_point, decrypt_shadow)) + let mut node_shadow_point = common_point.clone(); + math::public_mul_secret(&mut node_shadow_point, &shadow_key)?; + Ok((node_shadow_point, decrypt_shadow)) } /// Compute joint shadow point. -pub fn compute_joint_shadow_point<'a, I>(nodes_shadow_points: I) -> Result where I: Iterator { - compute_public_sum(nodes_shadow_points) +pub fn compute_joint_shadow_point<'a, I>(nodes_shadow_points: I) -> Result +where + I: Iterator, +{ + compute_public_sum(nodes_shadow_points) } /// Compute joint shadow point (version for tests). #[cfg(test)] -pub fn compute_joint_shadow_point_test<'a, I>(access_key: &Secret, common_point: &Public, nodes_shadows: I) -> Result where I: Iterator { - let mut joint_shadow = compute_secret_sum(nodes_shadows)?; - joint_shadow.mul(access_key)?; +pub fn compute_joint_shadow_point_test<'a, I>( + access_key: &Secret, + common_point: &Public, + nodes_shadows: I, +) -> Result +where + I: Iterator, +{ + let mut joint_shadow = compute_secret_sum(nodes_shadows)?; + joint_shadow.mul(access_key)?; - let mut joint_shadow_point = common_point.clone(); - math::public_mul_secret(&mut joint_shadow_point, &joint_shadow)?; - Ok(joint_shadow_point) + let mut joint_shadow_point = common_point.clone(); + math::public_mul_secret(&mut joint_shadow_point, &joint_shadow)?; + Ok(joint_shadow_point) } /// Decrypt data using joint shadow point. -pub fn decrypt_with_joint_shadow(threshold: usize, access_key: &Secret, encrypted_point: &Public, joint_shadow_point: &Public) -> Result { - let mut inv_access_key = access_key.clone(); - inv_access_key.inv()?; +pub fn decrypt_with_joint_shadow( + threshold: usize, + access_key: &Secret, + encrypted_point: &Public, + joint_shadow_point: &Public, +) -> Result { + let mut inv_access_key = access_key.clone(); + inv_access_key.inv()?; - let mut mul = joint_shadow_point.clone(); - math::public_mul_secret(&mut mul, &inv_access_key)?; + let mut mul = joint_shadow_point.clone(); + math::public_mul_secret(&mut mul, &inv_access_key)?; - let mut decrypted_point = encrypted_point.clone(); - if threshold % 2 != 0 { - math::public_add(&mut decrypted_point, &mul)?; - } else { - math::public_sub(&mut decrypted_point, &mul)?; - } + let mut decrypted_point = encrypted_point.clone(); + if threshold % 2 != 0 { + math::public_add(&mut decrypted_point, &mul)?; + } else { + math::public_sub(&mut decrypted_point, &mul)?; + } - Ok(decrypted_point) + Ok(decrypted_point) } /// Prepare common point for shadow decryption. -pub fn make_common_shadow_point(threshold: usize, mut common_point: Public) -> Result { - if threshold % 2 != 1 { - Ok(common_point) - } else { - math::public_negate(&mut common_point)?; - Ok(common_point) - } +pub fn make_common_shadow_point( + threshold: usize, + mut common_point: Public, +) -> Result { + if threshold % 2 != 1 { + Ok(common_point) + } else { + math::public_negate(&mut common_point)?; + Ok(common_point) + } } /// Decrypt shadow-encrypted secret. #[cfg(test)] -pub fn decrypt_with_shadow_coefficients(mut decrypted_shadow: Public, mut common_shadow_point: Public, shadow_coefficients: Vec) -> Result { - let shadow_coefficients_sum = compute_secret_sum(shadow_coefficients.iter())?; - math::public_mul_secret(&mut common_shadow_point, &shadow_coefficients_sum)?; - math::public_add(&mut decrypted_shadow, &common_shadow_point)?; - Ok(decrypted_shadow) +pub fn decrypt_with_shadow_coefficients( + mut decrypted_shadow: Public, + mut common_shadow_point: Public, + shadow_coefficients: Vec, +) -> Result { + let shadow_coefficients_sum = compute_secret_sum(shadow_coefficients.iter())?; + math::public_mul_secret(&mut common_shadow_point, &shadow_coefficients_sum)?; + math::public_add(&mut decrypted_shadow, &common_shadow_point)?; + Ok(decrypted_shadow) } /// Decrypt data using joint secret (version for tests). #[cfg(test)] -pub fn decrypt_with_joint_secret(encrypted_point: &Public, common_point: &Public, joint_secret: &Secret) -> Result { - let mut common_point_mul = common_point.clone(); - math::public_mul_secret(&mut common_point_mul, joint_secret)?; +pub fn decrypt_with_joint_secret( + encrypted_point: &Public, + common_point: &Public, + joint_secret: &Secret, +) -> Result { + let mut common_point_mul = common_point.clone(); + math::public_mul_secret(&mut common_point_mul, joint_secret)?; - let mut decrypted_point = encrypted_point.clone(); - math::public_sub(&mut decrypted_point, &common_point_mul)?; + let mut decrypted_point = encrypted_point.clone(); + math::public_sub(&mut decrypted_point, &common_point_mul)?; - Ok(decrypted_point) + Ok(decrypted_point) } /// Combine message hash with public key X coordinate. -pub fn combine_message_hash_with_public(message_hash: &H256, public: &Public) -> Result { - // buffer is just [message_hash | public.x] - let mut buffer = [0; 64]; - buffer[0..32].copy_from_slice(&message_hash[0..32]); - buffer[32..64].copy_from_slice(&public[0..32]); +pub fn combine_message_hash_with_public( + message_hash: &H256, + public: &Public, +) -> Result { + // buffer is just [message_hash | public.x] + let mut buffer = [0; 64]; + buffer[0..32].copy_from_slice(&message_hash[0..32]); + buffer[32..64].copy_from_slice(&public[0..32]); - // calculate hash of buffer - let hash = keccak(&buffer[..]); + // calculate hash of buffer + let hash = keccak(&buffer[..]); - // map hash to EC finite field value - to_scalar(hash) + // map hash to EC finite field value + to_scalar(hash) } /// Compute Schnorr signature share. -pub fn compute_schnorr_signature_share<'a, I>(threshold: usize, combined_hash: &Secret, one_time_secret_coeff: &Secret, node_secret_share: &Secret, node_number: &Secret, other_nodes_numbers: I) - -> Result where I: Iterator { - let mut sum = one_time_secret_coeff.clone(); - let mut subtrahend = compute_shadow_mul(combined_hash, node_number, other_nodes_numbers)?; - subtrahend.mul(node_secret_share)?; - if threshold % 2 == 0 { - sum.sub(&subtrahend)?; - } else { - sum.add(&subtrahend)?; - } - Ok(sum) +pub fn compute_schnorr_signature_share<'a, I>( + threshold: usize, + combined_hash: &Secret, + one_time_secret_coeff: &Secret, + node_secret_share: &Secret, + node_number: &Secret, + other_nodes_numbers: I, +) -> Result +where + I: Iterator, +{ + let mut sum = one_time_secret_coeff.clone(); + let mut subtrahend = compute_shadow_mul(combined_hash, node_number, other_nodes_numbers)?; + subtrahend.mul(node_secret_share)?; + if threshold % 2 == 0 { + sum.sub(&subtrahend)?; + } else { + sum.add(&subtrahend)?; + } + Ok(sum) } /// Check Schnorr signature share. -pub fn _check_schnorr_signature_share<'a, I>(_combined_hash: &Secret, _signature_share: &Secret, _public_share: &Public, _one_time_public_share: &Public, _node_numbers: I) - -> Result where I: Iterator { - // TODO [Trust]: in paper partial signature is checked using comparison: - // sig[i] * T = r[i] - c * lagrange_coeff(i) * y[i] - // => (k[i] - c * lagrange_coeff(i) * s[i]) * T = r[i] - c * lagrange_coeff(i) * y[i] - // => k[i] * T - c * lagrange_coeff(i) * s[i] * T = k[i] * T - c * lagrange_coeff(i) * y[i] - // => this means that y[i] = s[i] * T - // but when verifying signature (for t = 1), nonce public (r) is restored using following expression: - // r = (sig[0] + sig[1]) * T - c * y - // r = (k[0] - c * lagrange_coeff(0) * s[0] + k[1] - c * lagrange_coeff(1) * s[1]) * T - c * y - // r = (k[0] + k[1]) * T - c * (lagrange_coeff(0) * s[0] + lagrange_coeff(1) * s[1]) * T - c * y - // r = r - c * (lagrange_coeff(0) * s[0] + lagrange_coeff(1) * s[1]) * T - c * y - // => -c * y = c * (lagrange_coeff(0) * s[0] + lagrange_coeff(1) * s[1]) * T - // => -y = (lagrange_coeff(0) * s[0] + lagrange_coeff(1) * s[1]) * T - // => y[i] != s[i] * T - // => some other way is required - Ok(true) +pub fn _check_schnorr_signature_share<'a, I>( + _combined_hash: &Secret, + _signature_share: &Secret, + _public_share: &Public, + _one_time_public_share: &Public, + _node_numbers: I, +) -> Result +where + I: Iterator, +{ + // TODO [Trust]: in paper partial signature is checked using comparison: + // sig[i] * T = r[i] - c * lagrange_coeff(i) * y[i] + // => (k[i] - c * lagrange_coeff(i) * s[i]) * T = r[i] - c * lagrange_coeff(i) * y[i] + // => k[i] * T - c * lagrange_coeff(i) * s[i] * T = k[i] * T - c * lagrange_coeff(i) * y[i] + // => this means that y[i] = s[i] * T + // but when verifying signature (for t = 1), nonce public (r) is restored using following expression: + // r = (sig[0] + sig[1]) * T - c * y + // r = (k[0] - c * lagrange_coeff(0) * s[0] + k[1] - c * lagrange_coeff(1) * s[1]) * T - c * y + // r = (k[0] + k[1]) * T - c * (lagrange_coeff(0) * s[0] + lagrange_coeff(1) * s[1]) * T - c * y + // r = r - c * (lagrange_coeff(0) * s[0] + lagrange_coeff(1) * s[1]) * T - c * y + // => -c * y = c * (lagrange_coeff(0) * s[0] + lagrange_coeff(1) * s[1]) * T + // => -y = (lagrange_coeff(0) * s[0] + lagrange_coeff(1) * s[1]) * T + // => y[i] != s[i] * T + // => some other way is required + Ok(true) } /// Compute Schnorr signature. -pub fn compute_schnorr_signature<'a, I>(signature_shares: I) -> Result where I: Iterator { - compute_secret_sum(signature_shares) +pub fn compute_schnorr_signature<'a, I>(signature_shares: I) -> Result +where + I: Iterator, +{ + compute_secret_sum(signature_shares) } /// Locally compute Schnorr signature as described in https://en.wikipedia.org/wiki/Schnorr_signature#Signing. #[cfg(test)] -pub fn local_compute_schnorr_signature(nonce: &Secret, secret: &Secret, message_hash: &Secret) -> Result<(Secret, Secret), Error> { - let mut nonce_public = math::generation_point(); - math::public_mul_secret(&mut nonce_public, &nonce).unwrap(); +pub fn local_compute_schnorr_signature( + nonce: &Secret, + secret: &Secret, + message_hash: &Secret, +) -> Result<(Secret, Secret), Error> { + let mut nonce_public = math::generation_point(); + math::public_mul_secret(&mut nonce_public, &nonce).unwrap(); - let combined_hash = combine_message_hash_with_public(message_hash, &nonce_public)?; + let combined_hash = combine_message_hash_with_public(message_hash, &nonce_public)?; - let mut sig_subtrahend = combined_hash.clone(); - sig_subtrahend.mul(secret)?; - let mut sig = nonce.clone(); - sig.sub(&sig_subtrahend)?; + let mut sig_subtrahend = combined_hash.clone(); + sig_subtrahend.mul(secret)?; + let mut sig = nonce.clone(); + sig.sub(&sig_subtrahend)?; - Ok((combined_hash, sig)) + Ok((combined_hash, sig)) } /// Verify Schnorr signature as described in https://en.wikipedia.org/wiki/Schnorr_signature#Verifying. #[cfg(test)] -pub fn verify_schnorr_signature(public: &Public, signature: &(Secret, Secret), message_hash: &H256) -> Result { - let mut addendum = math::generation_point(); - math::public_mul_secret(&mut addendum, &signature.1)?; - let mut nonce_public = public.clone(); - math::public_mul_secret(&mut nonce_public, &signature.0)?; - math::public_add(&mut nonce_public, &addendum)?; +pub fn verify_schnorr_signature( + public: &Public, + signature: &(Secret, Secret), + message_hash: &H256, +) -> Result { + let mut addendum = math::generation_point(); + math::public_mul_secret(&mut addendum, &signature.1)?; + let mut nonce_public = public.clone(); + math::public_mul_secret(&mut nonce_public, &signature.0)?; + math::public_add(&mut nonce_public, &addendum)?; - let combined_hash = combine_message_hash_with_public(message_hash, &nonce_public)?; - Ok(combined_hash == signature.0) + let combined_hash = combine_message_hash_with_public(message_hash, &nonce_public)?; + Ok(combined_hash == signature.0) } /// Compute R part of ECDSA signature. pub fn compute_ecdsa_r(nonce_public: &Public) -> Result { - to_scalar(public_x(nonce_public)) + to_scalar(public_x(nonce_public)) } /// Compute share of S part of ECDSA signature. -pub fn compute_ecdsa_s_share(inv_nonce_share: &Secret, inv_nonce_mul_secret: &Secret, signature_r: &Secret, message_hash: &Secret) -> Result { - let mut nonce_inv_share_mul_message_hash = inv_nonce_share.clone(); - nonce_inv_share_mul_message_hash.mul(&message_hash.clone().into())?; +pub fn compute_ecdsa_s_share( + inv_nonce_share: &Secret, + inv_nonce_mul_secret: &Secret, + signature_r: &Secret, + message_hash: &Secret, +) -> Result { + let mut nonce_inv_share_mul_message_hash = inv_nonce_share.clone(); + nonce_inv_share_mul_message_hash.mul(&message_hash.clone().into())?; - let mut nonce_inv_share_mul_secret_share_mul_r = inv_nonce_mul_secret.clone(); - nonce_inv_share_mul_secret_share_mul_r.mul(signature_r)?; + let mut nonce_inv_share_mul_secret_share_mul_r = inv_nonce_mul_secret.clone(); + nonce_inv_share_mul_secret_share_mul_r.mul(signature_r)?; - let mut signature_s_share = nonce_inv_share_mul_message_hash; - signature_s_share.add(&nonce_inv_share_mul_secret_share_mul_r)?; + let mut signature_s_share = nonce_inv_share_mul_message_hash; + signature_s_share.add(&nonce_inv_share_mul_secret_share_mul_r)?; - Ok(signature_s_share) + Ok(signature_s_share) } /// Compute S part of ECDSA signature from shares. -pub fn compute_ecdsa_s(t: usize, signature_s_shares: &[Secret], id_numbers: &[Secret]) -> Result { - let double_t = t * 2; - debug_assert!(id_numbers.len() >= double_t + 1); - debug_assert_eq!(signature_s_shares.len(), id_numbers.len()); +pub fn compute_ecdsa_s( + t: usize, + signature_s_shares: &[Secret], + id_numbers: &[Secret], +) -> Result { + let double_t = t * 2; + debug_assert!(id_numbers.len() >= double_t + 1); + debug_assert_eq!(signature_s_shares.len(), id_numbers.len()); - compute_joint_secret_from_shares(double_t, - &signature_s_shares.iter().take(double_t + 1).collect::>(), - &id_numbers.iter().take(double_t + 1).collect::>()) + compute_joint_secret_from_shares( + double_t, + &signature_s_shares + .iter() + .take(double_t + 1) + .collect::>(), + &id_numbers.iter().take(double_t + 1).collect::>(), + ) } /// Serialize ECDSA signature to [r][s]v form. -pub fn serialize_ecdsa_signature(nonce_public: &Public, signature_r: Secret, mut signature_s: Secret) -> Signature { - // compute recovery param - let mut signature_v = { - let nonce_public_x = public_x(nonce_public); - let nonce_public_y: U256 = public_y(nonce_public).into(); - let nonce_public_y_is_odd = !(nonce_public_y % 2).is_zero(); - let bit0 = if nonce_public_y_is_odd { 1u8 } else { 0u8 }; - let bit1 = if nonce_public_x != *signature_r { 2u8 } else { 0u8 }; - bit0 | bit1 - }; +pub fn serialize_ecdsa_signature( + nonce_public: &Public, + signature_r: Secret, + mut signature_s: Secret, +) -> Signature { + // compute recovery param + let mut signature_v = { + let nonce_public_x = public_x(nonce_public); + let nonce_public_y: U256 = public_y(nonce_public).into(); + let nonce_public_y_is_odd = !(nonce_public_y % 2).is_zero(); + let bit0 = if nonce_public_y_is_odd { 1u8 } else { 0u8 }; + let bit1 = if nonce_public_x != *signature_r { + 2u8 + } else { + 0u8 + }; + bit0 | bit1 + }; - // fix high S - let curve_order = math::curve_order(); - let curve_order_half = curve_order / 2; - let s_numeric: U256 = (*signature_s).into(); - if s_numeric > curve_order_half { - let signature_s_hash: H256 = (curve_order - s_numeric).into(); - signature_s = signature_s_hash.into(); - signature_v ^= 1; - } + // fix high S + let curve_order = math::curve_order(); + let curve_order_half = curve_order / 2; + let s_numeric: U256 = (*signature_s).into(); + if s_numeric > curve_order_half { + let signature_s_hash: H256 = (curve_order - s_numeric).into(); + signature_s = signature_s_hash.into(); + signature_v ^= 1; + } - // serialize as [r][s]v - let mut signature = [0u8; 65]; - signature[..32].copy_from_slice(&**signature_r); - signature[32..64].copy_from_slice(&**signature_s); - signature[64] = signature_v; + // serialize as [r][s]v + let mut signature = [0u8; 65]; + signature[..32].copy_from_slice(&**signature_r); + signature[32..64].copy_from_slice(&**signature_s); + signature[64] = signature_v; - signature.into() + signature.into() } /// Compute share of ECDSA reversed-nonce coefficient. Result of this_coeff * secret_share gives us a share of inv(nonce). -pub fn compute_ecdsa_inversed_secret_coeff_share(secret_share: &Secret, nonce_share: &Secret, zero_share: &Secret) -> Result { - let mut coeff = secret_share.clone(); - coeff.mul(nonce_share).unwrap(); - coeff.add(zero_share).unwrap(); - Ok(coeff) +pub fn compute_ecdsa_inversed_secret_coeff_share( + secret_share: &Secret, + nonce_share: &Secret, + zero_share: &Secret, +) -> Result { + let mut coeff = secret_share.clone(); + coeff.mul(nonce_share).unwrap(); + coeff.add(zero_share).unwrap(); + Ok(coeff) } /// Compute ECDSA reversed-nonce coefficient from its shares. Result of this_coeff * secret_share gives us a share of inv(nonce). -pub fn compute_ecdsa_inversed_secret_coeff_from_shares(t: usize, id_numbers: &[Secret], shares: &[Secret]) -> Result { - debug_assert_eq!(shares.len(), 2 * t + 1); - debug_assert_eq!(shares.len(), id_numbers.len()); +pub fn compute_ecdsa_inversed_secret_coeff_from_shares( + t: usize, + id_numbers: &[Secret], + shares: &[Secret], +) -> Result { + debug_assert_eq!(shares.len(), 2 * t + 1); + debug_assert_eq!(shares.len(), id_numbers.len()); - let u_shares = (0..2*t+1).map(|i| compute_shadow_mul(&shares[i], &id_numbers[i], id_numbers.iter().enumerate() - .filter(|&(j, _)| i != j) - .map(|(_, id)| id) - .take(2 * t))).collect::, _>>()?; + let u_shares = (0..2 * t + 1) + .map(|i| { + compute_shadow_mul( + &shares[i], + &id_numbers[i], + id_numbers + .iter() + .enumerate() + .filter(|&(j, _)| i != j) + .map(|(_, id)| id) + .take(2 * t), + ) + }) + .collect::, _>>()?; - // compute u - let u = compute_secret_sum(u_shares.iter())?; + // compute u + let u = compute_secret_sum(u_shares.iter())?; - // compute inv(u) - let mut u_inv = u; - u_inv.inv()?; - Ok(u_inv) + // compute inv(u) + let mut u_inv = u; + u_inv.inv()?; + Ok(u_inv) } #[cfg(test)] pub mod tests { - use std::iter::once; - use ethkey::{KeyPair, recover, verify_public}; - use super::*; - - #[derive(Clone)] - struct KeyGenerationArtifacts { - id_numbers: Vec, - polynoms1: Vec>, - secrets1: Vec>, - public_shares: Vec, - secret_shares: Vec, - joint_public: Public, - } - - struct ZeroGenerationArtifacts { - polynoms1: Vec>, - secret_shares: Vec, - } - - fn prepare_polynoms1(t: usize, n: usize, secret_required: Option) -> Vec> { - let mut polynoms1: Vec<_> = (0..n).map(|_| generate_random_polynom(t).unwrap()).collect(); - // if we need specific secret to be shared, update polynoms so that sum of their free terms = required secret - if let Some(mut secret_required) = secret_required { - for polynom1 in polynoms1.iter_mut().take(n - 1) { - let secret_coeff1 = generate_random_scalar().unwrap(); - secret_required.sub(&secret_coeff1).unwrap(); - polynom1[0] = secret_coeff1; - } - - polynoms1[n - 1][0] = secret_required; - } - polynoms1 - } - - fn run_key_generation(t: usize, n: usize, id_numbers: Option>, secret_required: Option) -> KeyGenerationArtifacts { - // === PART1: DKG === - - // data, gathered during initialization - let derived_point = Random.generate().unwrap().public().clone(); - let id_numbers: Vec<_> = match id_numbers { - Some(id_numbers) => id_numbers, - None => (0..n).map(|_| generate_random_scalar().unwrap()).collect(), - }; - - // data, generated during keys dissemination - let polynoms1 = prepare_polynoms1(t, n, secret_required); - let secrets1: Vec<_> = (0..n).map(|i| (0..n).map(|j| compute_polynom(&polynoms1[i], &id_numbers[j]).unwrap()).collect::>()).collect(); - - // following data is used only on verification step - let polynoms2: Vec<_> = (0..n).map(|_| generate_random_polynom(t).unwrap()).collect(); - let secrets2: Vec<_> = (0..n).map(|i| (0..n).map(|j| compute_polynom(&polynoms2[i], &id_numbers[j]).unwrap()).collect::>()).collect(); - let publics: Vec<_> = (0..n).map(|i| public_values_generation(t, &derived_point, &polynoms1[i], &polynoms2[i]).unwrap()).collect(); - - // keys verification - (0..n).for_each(|i| { - (0..n) - .filter(|&j| i != j) - .for_each(|j| { - assert!(keys_verification(t, &derived_point, &id_numbers[i], &secrets1[j][i], &secrets2[j][i], &publics[j]).unwrap()); - }) - }); - - // data, generated during keys generation - let public_shares: Vec<_> = (0..n).map(|i| compute_public_share(&polynoms1[i][0]).unwrap()).collect(); - let secret_shares: Vec<_> = (0..n).map(|i| compute_secret_share(secrets1.iter().map(|s| &s[i])).unwrap()).collect(); - - // joint public key, as a result of DKG - let joint_public = compute_joint_public(public_shares.iter()).unwrap(); - - KeyGenerationArtifacts { - id_numbers: id_numbers, - polynoms1: polynoms1, - secrets1: secrets1, - public_shares: public_shares, - secret_shares: secret_shares, - joint_public: joint_public, - } - } - - fn run_zero_key_generation(t: usize, n: usize, id_numbers: &[Secret]) -> ZeroGenerationArtifacts { - // data, generated during keys dissemination - let polynoms1 = prepare_polynoms1(t, n, Some(zero_scalar())); - let secrets1: Vec<_> = (0..n).map(|i| (0..n).map(|j| compute_polynom(&polynoms1[i], &id_numbers[j]).unwrap()).collect::>()).collect(); - - // data, generated during keys generation - let secret_shares: Vec<_> = (0..n).map(|i| compute_secret_share(secrets1.iter().map(|s| &s[i])).unwrap()).collect(); - - ZeroGenerationArtifacts { - polynoms1: polynoms1, - secret_shares: secret_shares, - } - } - - fn run_key_share_refreshing(old_t: usize, new_t: usize, new_n: usize, old_artifacts: &KeyGenerationArtifacts) -> KeyGenerationArtifacts { - // === share refreshing protocol from - // === based on "Verifiable Secret Redistribution for Threshold Sharing Schemes" - // === http://www.cs.cmu.edu/~wing/publications/CMU-CS-02-114.pdf - - // generate new id_numbers for new nodes - let new_nodes = new_n.saturating_sub(old_artifacts.id_numbers.len()); - let id_numbers: Vec<_> = old_artifacts.id_numbers.iter().take(new_n).cloned() - .chain((0..new_nodes).map(|_| generate_random_scalar().unwrap())) - .collect(); - - // on every authorized node: generate random polynomial ai(j) = si + ... + ai[new_t - 1] * j^(new_t - 1) - let mut subshare_polynoms = Vec::new(); - for i in 0..old_t+1 { - let mut subshare_polynom = generate_random_polynom(new_t).unwrap(); - subshare_polynom[0] = old_artifacts.secret_shares[i].clone(); - subshare_polynoms.push(subshare_polynom); - } - - // on every authorized node: calculate subshare for every new node - let mut subshares = Vec::new(); - for j in 0..new_n { - let mut subshares_to_j = Vec::new(); - for i in 0..old_t+1 { - let subshare_from_i_to_j = compute_polynom(&subshare_polynoms[i], &id_numbers[j]).unwrap(); - subshares_to_j.push(subshare_from_i_to_j); - } - subshares.push(subshares_to_j); - } - - // on every new node: generate new share using Lagrange interpolation - // on every node: generate new share using Lagrange interpolation - let mut new_secret_shares = Vec::new(); - for j in 0..new_n { - let mut subshares_to_j = Vec::new(); - for i in 0..old_t+1 { - let subshare_from_i = &subshares[j][i]; - let id_number_i = &id_numbers[i]; - let other_id_numbers = (0usize..old_t+1).filter(|j| *j != i).map(|j| &id_numbers[j]); - let mut subshare_from_i = compute_shadow_mul(subshare_from_i, id_number_i, other_id_numbers).unwrap(); - if old_t % 2 != 0 { - subshare_from_i.neg().unwrap(); - } - subshares_to_j.push(subshare_from_i); - } - new_secret_shares.push(compute_secret_sum(subshares_to_j.iter()).unwrap()); - } - - let mut result = old_artifacts.clone(); - result.id_numbers = id_numbers; - result.secret_shares = new_secret_shares; - result - } - - fn run_multiplication_protocol(t: usize, secret_shares1: &[Secret], secret_shares2: &[Secret]) -> Vec { - let n = secret_shares1.len(); - assert!(t * 2 + 1 <= n); - - // shares of secrets multiplication = multiplication of secrets shares - let mul_shares: Vec<_> = (0..n).map(|i| { - let share1 = secret_shares1[i].clone(); - let share2 = secret_shares2[i].clone(); - let mut mul_share = share1; - mul_share.mul(&share2).unwrap(); - mul_share - }).collect(); - - mul_shares - } - - fn run_reciprocal_protocol(t: usize, artifacts: &KeyGenerationArtifacts) -> Vec { - // === Given a secret x mod r which is shared among n players, it is - // === required to generate shares of inv(x) mod r with out revealing - // === any information about x or inv(x). - // === https://www.researchgate.net/publication/280531698_Robust_Threshold_Elliptic_Curve_Digital_Signature - - // generate shared random secret e for given t - let n = artifacts.id_numbers.len(); - assert!(t * 2 + 1 <= n); - let e_artifacts = run_key_generation(t, n, Some(artifacts.id_numbers.clone()), None); - - // generate shares of zero for 2 * t threshold - let z_artifacts = run_zero_key_generation(2 * t, n, &artifacts.id_numbers); - - // each player computes && broadcast u[i] = x[i] * e[i] + z[i] - let ui: Vec<_> = (0..n).map(|i| compute_ecdsa_inversed_secret_coeff_share(&artifacts.secret_shares[i], - &e_artifacts.secret_shares[i], - &z_artifacts.secret_shares[i]).unwrap()).collect(); - - // players can interpolate the polynomial of degree 2t and compute u && inv(u): - let u_inv = compute_ecdsa_inversed_secret_coeff_from_shares(t, - &artifacts.id_numbers.iter().take(2*t + 1).cloned().collect::>(), - &ui.iter().take(2*t + 1).cloned().collect::>()).unwrap(); - - // each player Pi computes his share of inv(x) as e[i] * inv(u) - let x_inv_shares: Vec<_> = (0..n).map(|i| { - let mut x_inv_share = e_artifacts.secret_shares[i].clone(); - x_inv_share.mul(&u_inv).unwrap(); - x_inv_share - }).collect(); - - x_inv_shares - } - - pub fn do_encryption_and_decryption(t: usize, joint_public: &Public, id_numbers: &[Secret], secret_shares: &[Secret], joint_secret: Option<&Secret>, document_secret_plain: Public) -> (Public, Public) { - // === PART2: encryption using joint public key === - - // the next line is executed on KeyServer-client - let encrypted_secret = encrypt_secret(&document_secret_plain, &joint_public).unwrap(); - - // === PART3: decryption === - - // next line is executed on KeyServer client - let access_key = generate_random_scalar().unwrap(); - - // use t + 1 nodes to compute joint shadow point - let nodes_shadows: Vec<_> = (0..t + 1).map(|i| - compute_node_shadow(&secret_shares[i], &id_numbers[i], id_numbers.iter() - .enumerate() - .filter(|&(j, _)| j != i) - .take(t) - .map(|(_, id_number)| id_number)).unwrap()).collect(); - - let nodes_shadow_points: Vec<_> = nodes_shadows.iter() - .map(|s| compute_node_shadow_point(&access_key, &encrypted_secret.common_point, s, None).unwrap()) - .map(|sp| sp.0) - .collect(); - - assert_eq!(nodes_shadows.len(), t + 1); - assert_eq!(nodes_shadow_points.len(), t + 1); - - let joint_shadow_point = compute_joint_shadow_point(nodes_shadow_points.iter()).unwrap(); - let joint_shadow_point_test = compute_joint_shadow_point_test(&access_key, &encrypted_secret.common_point, nodes_shadows.iter()).unwrap(); - assert_eq!(joint_shadow_point, joint_shadow_point_test); - - // decrypt encrypted secret using joint shadow point - let document_secret_decrypted = decrypt_with_joint_shadow(t, &access_key, &encrypted_secret.encrypted_point, &joint_shadow_point).unwrap(); - - // decrypt encrypted secret using joint secret [just for test] - let document_secret_decrypted_test = match joint_secret { - Some(joint_secret) => decrypt_with_joint_secret(&encrypted_secret.encrypted_point, &encrypted_secret.common_point, joint_secret).unwrap(), - None => document_secret_decrypted.clone(), - }; - - (document_secret_decrypted, document_secret_decrypted_test) - } - - #[test] - fn full_encryption_math_session() { - let test_cases = [(0, 2), (1, 2), (1, 3), (2, 3), (1, 4), (2, 4), (3, 4), (1, 5), (2, 5), (3, 5), (4, 5), - (1, 10), (2, 10), (3, 10), (4, 10), (5, 10), (6, 10), (7, 10), (8, 10), (9, 10)]; - for &(t, n) in &test_cases { - let artifacts = run_key_generation(t, n, None, None); - - // compute joint private key [just for test] - let joint_secret = compute_joint_secret(artifacts.polynoms1.iter().map(|p| &p[0])).unwrap(); - let joint_key_pair = KeyPair::from_secret(joint_secret.clone()).unwrap(); - assert_eq!(&artifacts.joint_public, joint_key_pair.public()); - - // check secret shares computation [just for test] - let secret_shares_polynom: Vec<_> = (0..t + 1).map(|k| compute_secret_share(artifacts.polynoms1.iter().map(|p| &p[k])).unwrap()).collect(); - let secret_shares_calculated_from_polynom: Vec<_> = artifacts.id_numbers.iter().map(|id_number| compute_polynom(&*secret_shares_polynom, id_number).unwrap()).collect(); - assert_eq!(artifacts.secret_shares, secret_shares_calculated_from_polynom); - - // now encrypt and decrypt data - let document_secret_plain = generate_random_point().unwrap(); - let (document_secret_decrypted, document_secret_decrypted_test) = - do_encryption_and_decryption(t, &artifacts.joint_public, &artifacts.id_numbers, &artifacts.secret_shares, Some(&joint_secret), document_secret_plain.clone()); - - assert_eq!(document_secret_plain, document_secret_decrypted_test); - assert_eq!(document_secret_plain, document_secret_decrypted); - } - } - - #[test] - fn local_signature_works() { - let key_pair = Random.generate().unwrap(); - let message_hash = "0000000000000000000000000000000000000000000000000000000000000042".parse().unwrap(); - let nonce = generate_random_scalar().unwrap(); - let signature = local_compute_schnorr_signature(&nonce, key_pair.secret(), &message_hash).unwrap(); - assert_eq!(verify_schnorr_signature(key_pair.public(), &signature, &message_hash), Ok(true)); - } - - #[test] - fn full_schnorr_signature_math_session() { - let test_cases = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (1, 4), (2, 4), (3, 4), (1, 5), (2, 5), (3, 5), (4, 5), - (1, 10), (2, 10), (3, 10), (4, 10), (5, 10), (6, 10), (7, 10), (8, 10), (9, 10)]; - for &(t, n) in &test_cases { - // hash of the message to be signed - let message_hash: Secret = "0000000000000000000000000000000000000000000000000000000000000042".parse().unwrap(); - - // === MiDS-S algorithm === - // setup: all nodes share master secret key && every node knows master public key - let artifacts = run_key_generation(t, n, None, None); - - // in this gap (not related to math): - // master node should ask every other node if it is able to do a signing - // if there are < than t+1 nodes, able to sign => error - // select t+1 nodes for signing session - // all steps below are for this subset of nodes - let n = t + 1; - - // step 1: run DKG to generate one-time secret key (nonce) - let id_numbers = artifacts.id_numbers.iter().cloned().take(n).collect(); - let one_time_artifacts = run_key_generation(t, n, Some(id_numbers), None); - - // step 2: message hash && x coordinate of one-time public value are combined - let combined_hash = combine_message_hash_with_public(&message_hash, &one_time_artifacts.joint_public).unwrap(); - - // step 3: compute signature shares - let partial_signatures: Vec<_> = (0..n) - .map(|i| compute_schnorr_signature_share( - t, - &combined_hash, - &one_time_artifacts.polynoms1[i][0], - &artifacts.secret_shares[i], - &artifacts.id_numbers[i], - artifacts.id_numbers.iter() - .enumerate() - .filter(|&(j, _)| i != j) - .map(|(_, n)| n) - .take(t) - ).unwrap()) - .collect(); - - // step 4: receive and verify signatures shares from other nodes - let received_signatures: Vec> = (0..n) - .map(|i| (0..n) - .filter(|j| i != *j) - .map(|j| { - let signature_share = partial_signatures[j].clone(); - assert!(_check_schnorr_signature_share(&combined_hash, - &signature_share, - &artifacts.public_shares[j], - &one_time_artifacts.public_shares[j], - artifacts.id_numbers.iter().take(t)).unwrap_or(false)); - signature_share - }) - .collect()) - .collect(); - - // step 5: compute signature - let signatures: Vec<_> = (0..n) - .map(|i| (combined_hash.clone(), compute_schnorr_signature(received_signatures[i].iter().chain(once(&partial_signatures[i]))).unwrap())) - .collect(); - - // === verify signature === - let master_secret = compute_joint_secret(artifacts.polynoms1.iter().map(|p| &p[0])).unwrap(); - let nonce = compute_joint_secret(one_time_artifacts.polynoms1.iter().map(|p| &p[0])).unwrap(); - let local_signature = local_compute_schnorr_signature(&nonce, &master_secret, &message_hash).unwrap(); - for signature in &signatures { - assert_eq!(signature, &local_signature); - assert_eq!(verify_schnorr_signature(&artifacts.joint_public, signature, &message_hash), Ok(true)); - } - } - } - - #[test] - fn full_ecdsa_signature_math_session() { - let test_cases = [(2, 5), (2, 6), (3, 11), (4, 11)]; - for &(t, n) in &test_cases { - // values that can be hardcoded - let joint_secret: Secret = Random.generate().unwrap().secret().clone(); - let joint_nonce: Secret = Random.generate().unwrap().secret().clone(); - let message_hash: H256 = H256::random(); - - // convert message hash to EC scalar - let message_hash_scalar = to_scalar(message_hash.clone()).unwrap(); - - // generate secret key shares - let artifacts = run_key_generation(t, n, None, Some(joint_secret)); - - // generate nonce shares - let nonce_artifacts = run_key_generation(t, n, Some(artifacts.id_numbers.clone()), Some(joint_nonce)); - - // compute nonce public - // x coordinate (mapped to EC field) of this public is the r-portion of signature - let nonce_public_shares: Vec<_> = (0..n).map(|i| compute_public_share(&nonce_artifacts.polynoms1[i][0]).unwrap()).collect(); - let nonce_public = compute_joint_public(nonce_public_shares.iter()).unwrap(); - let signature_r = compute_ecdsa_r(&nonce_public).unwrap(); - - // compute shares of inv(nonce) so that both nonce && inv(nonce) are still unknown to all nodes - let nonce_inv_shares = run_reciprocal_protocol(t, &nonce_artifacts); - - // compute multiplication of secret-shares * inv-nonce-shares - let mul_shares = run_multiplication_protocol(t, &artifacts.secret_shares, &nonce_inv_shares); - - // compute shares for s portion of signature: nonce_inv * (message_hash + secret * signature_r) - // every node broadcasts this share - let double_t = 2 * t; - let signature_s_shares: Vec<_> = (0..double_t+1).map(|i| compute_ecdsa_s_share( - &nonce_inv_shares[i], - &mul_shares[i], - &signature_r, - &message_hash_scalar - ).unwrap()).collect(); - - // compute signature_s from received shares - let signature_s = compute_ecdsa_s(t, - &signature_s_shares, - &artifacts.id_numbers.iter().take(double_t + 1).cloned().collect::>() - ).unwrap(); - - // check signature - let signature_actual = serialize_ecdsa_signature(&nonce_public, signature_r, signature_s); - let joint_secret = compute_joint_secret(artifacts.polynoms1.iter().map(|p| &p[0])).unwrap(); - let joint_secret_pair = KeyPair::from_secret(joint_secret).unwrap(); - assert_eq!(recover(&signature_actual, &message_hash).unwrap(), *joint_secret_pair.public()); - assert!(verify_public(joint_secret_pair.public(), &signature_actual, &message_hash).unwrap()); - } - } - - #[test] - fn full_generation_math_session_with_refreshing_shares() { - let test_cases = vec![(1, 4), (6, 10)]; - for (t, n) in test_cases { - // generate key using t-of-n session - let artifacts1 = run_key_generation(t, n, None, None); - let joint_secret1 = compute_joint_secret(artifacts1.polynoms1.iter().map(|p1| &p1[0])).unwrap(); - - // let's say we want to refresh existing secret shares - // by doing this every T seconds, and assuming that in each T-second period adversary KS is not able to collect t+1 secret shares - // we can be sure that the scheme is secure - let artifacts2 = run_key_share_refreshing(t, t, n, &artifacts1); - let joint_secret2 = compute_joint_secret_from_shares(t, &artifacts2.secret_shares.iter().take(t + 1).collect::>(), - &artifacts2.id_numbers.iter().take(t + 1).collect::>()).unwrap(); - assert_eq!(joint_secret1, joint_secret2); - - // refresh again - let artifacts3 = run_key_share_refreshing(t, t, n, &artifacts2); - let joint_secret3 = compute_joint_secret_from_shares(t, &artifacts3.secret_shares.iter().take(t + 1).collect::>(), - &artifacts3.id_numbers.iter().take(t + 1).collect::>()).unwrap(); - assert_eq!(joint_secret1, joint_secret3); - } - } - - #[test] - fn full_generation_math_session_with_adding_new_nodes() { - let test_cases = vec![(1, 3), (1, 4), (6, 10)]; - for (t, n) in test_cases { - // generate key using t-of-n session - let artifacts1 = run_key_generation(t, n, None, None); - let joint_secret1 = compute_joint_secret(artifacts1.polynoms1.iter().map(|p1| &p1[0])).unwrap(); - - // let's say we want to include additional couple of servers to the set - // so that scheme becames t-of-n+2 - let artifacts2 = run_key_share_refreshing(t, t, n + 2, &artifacts1); - let joint_secret2 = compute_joint_secret_from_shares(t, &artifacts2.secret_shares.iter().take(t + 1).collect::>(), - &artifacts2.id_numbers.iter().take(t + 1).collect::>()).unwrap(); - assert_eq!(joint_secret1, joint_secret2); - - // include another server (t-of-n+3) - let artifacts3 = run_key_share_refreshing(t, t, n + 3, &artifacts2); - let joint_secret3 = compute_joint_secret_from_shares(t, &artifacts3.secret_shares.iter().take(t + 1).collect::>(), - &artifacts3.id_numbers.iter().take(t + 1).collect::>()).unwrap(); - assert_eq!(joint_secret1, joint_secret3); - } - } - - #[test] - fn full_generation_math_session_with_decreasing_threshold() { - let (t, n) = (3, 5); - - // generate key using t-of-n session - let artifacts1 = run_key_generation(t, n, None, None); - - let joint_secret1 = compute_joint_secret(artifacts1.polynoms1.iter().map(|p1| &p1[0])).unwrap(); - - // let's say we want to decrease threshold so that it becames (t-1)-of-n - let new_t = t - 1; - let artifacts2 = run_key_share_refreshing(t, new_t, n, &artifacts1); - let joint_secret2 = compute_joint_secret_from_shares(new_t, &artifacts2.secret_shares.iter().take(new_t + 1).collect::>(), - &artifacts2.id_numbers.iter().take(new_t + 1).collect::>()).unwrap(); - assert_eq!(joint_secret1, joint_secret2); - - // let's say we want to decrease threshold once again so that it becames (t-2)-of-n - let t = t - 1; - let new_t = t - 2; - let artifacts3 = run_key_share_refreshing(t, new_t, n, &artifacts2); - let joint_secret3 = compute_joint_secret_from_shares(new_t, &artifacts3.secret_shares.iter().take(new_t + 1).collect::>(), - &artifacts3.id_numbers.iter().take(new_t + 1).collect::>()).unwrap(); - assert_eq!(joint_secret1, joint_secret3); - } - - #[test] - fn full_zero_secret_generation_math_session() { - let test_cases = vec![(1, 4), (2, 4)]; - for (t, n) in test_cases { - // run joint zero generation session - let id_numbers: Vec<_> = (0..n).map(|_| generate_random_scalar().unwrap()).collect(); - let artifacts = run_zero_key_generation(t, n, &id_numbers); - - // check that zero secret is generated - // we can't compute secrets sum here, because result will be zero (invalid secret, unsupported by SECP256k1) - // so just use complement trick: x + (-x) = 0 - // TODO [Refac]: switch to SECP256K1-free scalar EC arithmetic - let partial_joint_secret = compute_secret_sum(artifacts.polynoms1.iter().take(n - 1).map(|p| &p[0])).unwrap(); - let mut partial_joint_secret_complement = artifacts.polynoms1[n - 1][0].clone(); - partial_joint_secret_complement.neg().unwrap(); - assert_eq!(partial_joint_secret, partial_joint_secret_complement); - } - } - - #[test] - fn full_generation_with_multiplication() { - let test_cases = vec![(1, 3), (2, 5), (2, 7), (3, 8)]; - for (t, n) in test_cases { - // generate two shared secrets - let artifacts1 = run_key_generation(t, n, None, None); - let artifacts2 = run_key_generation(t, n, Some(artifacts1.id_numbers.clone()), None); - - // multiplicate original secrets - let joint_secret1 = compute_joint_secret(artifacts1.polynoms1.iter().map(|p| &p[0])).unwrap(); - let joint_secret2 = compute_joint_secret(artifacts2.polynoms1.iter().map(|p| &p[0])).unwrap(); - let mut expected_joint_secret_mul = joint_secret1; - expected_joint_secret_mul.mul(&joint_secret2).unwrap(); - - // run multiplication protocol - let joint_secret_mul_shares = run_multiplication_protocol(t, &artifacts1.secret_shares, &artifacts2.secret_shares); - - // calculate actual secrets multiplication - let double_t = t * 2; - let actual_joint_secret_mul = compute_joint_secret_from_shares(double_t, - &joint_secret_mul_shares.iter().take(double_t + 1).collect::>(), - &artifacts1.id_numbers.iter().take(double_t + 1).collect::>()).unwrap(); - - assert_eq!(actual_joint_secret_mul, expected_joint_secret_mul); - } - } - - #[test] - fn full_generation_with_reciprocal() { - let test_cases = vec![(1, 3), (2, 5), (2, 7), (2, 7), (3, 8)]; - for (t, n) in test_cases { - // generate shared secret - let artifacts = run_key_generation(t, n, None, None); - - // calculate inversion of original shared secret - let joint_secret = compute_joint_secret(artifacts.polynoms1.iter().map(|p| &p[0])).unwrap(); - let mut expected_joint_secret_inv = joint_secret.clone(); - expected_joint_secret_inv.inv().unwrap(); - - // run inversion protocol - let reciprocal_shares = run_reciprocal_protocol(t, &artifacts); - - // calculate actual secret inversion - let double_t = t * 2; - let actual_joint_secret_inv = compute_joint_secret_from_shares(double_t, - &reciprocal_shares.iter().take(double_t + 1).collect::>(), - &artifacts.id_numbers.iter().take(double_t + 1).collect::>()).unwrap(); - - assert_eq!(actual_joint_secret_inv, expected_joint_secret_inv); - } - } + use super::*; + use ethkey::{recover, verify_public, KeyPair}; + use std::iter::once; + + #[derive(Clone)] + struct KeyGenerationArtifacts { + id_numbers: Vec, + polynoms1: Vec>, + secrets1: Vec>, + public_shares: Vec, + secret_shares: Vec, + joint_public: Public, + } + + struct ZeroGenerationArtifacts { + polynoms1: Vec>, + secret_shares: Vec, + } + + fn prepare_polynoms1(t: usize, n: usize, secret_required: Option) -> Vec> { + let mut polynoms1: Vec<_> = (0..n) + .map(|_| generate_random_polynom(t).unwrap()) + .collect(); + // if we need specific secret to be shared, update polynoms so that sum of their free terms = required secret + if let Some(mut secret_required) = secret_required { + for polynom1 in polynoms1.iter_mut().take(n - 1) { + let secret_coeff1 = generate_random_scalar().unwrap(); + secret_required.sub(&secret_coeff1).unwrap(); + polynom1[0] = secret_coeff1; + } + + polynoms1[n - 1][0] = secret_required; + } + polynoms1 + } + + fn run_key_generation( + t: usize, + n: usize, + id_numbers: Option>, + secret_required: Option, + ) -> KeyGenerationArtifacts { + // === PART1: DKG === + + // data, gathered during initialization + let derived_point = Random.generate().unwrap().public().clone(); + let id_numbers: Vec<_> = match id_numbers { + Some(id_numbers) => id_numbers, + None => (0..n).map(|_| generate_random_scalar().unwrap()).collect(), + }; + + // data, generated during keys dissemination + let polynoms1 = prepare_polynoms1(t, n, secret_required); + let secrets1: Vec<_> = (0..n) + .map(|i| { + (0..n) + .map(|j| compute_polynom(&polynoms1[i], &id_numbers[j]).unwrap()) + .collect::>() + }) + .collect(); + + // following data is used only on verification step + let polynoms2: Vec<_> = (0..n) + .map(|_| generate_random_polynom(t).unwrap()) + .collect(); + let secrets2: Vec<_> = (0..n) + .map(|i| { + (0..n) + .map(|j| compute_polynom(&polynoms2[i], &id_numbers[j]).unwrap()) + .collect::>() + }) + .collect(); + let publics: Vec<_> = (0..n) + .map(|i| { + public_values_generation(t, &derived_point, &polynoms1[i], &polynoms2[i]).unwrap() + }) + .collect(); + + // keys verification + (0..n).for_each(|i| { + (0..n).filter(|&j| i != j).for_each(|j| { + assert!(keys_verification( + t, + &derived_point, + &id_numbers[i], + &secrets1[j][i], + &secrets2[j][i], + &publics[j] + ) + .unwrap()); + }) + }); + + // data, generated during keys generation + let public_shares: Vec<_> = (0..n) + .map(|i| compute_public_share(&polynoms1[i][0]).unwrap()) + .collect(); + let secret_shares: Vec<_> = (0..n) + .map(|i| compute_secret_share(secrets1.iter().map(|s| &s[i])).unwrap()) + .collect(); + + // joint public key, as a result of DKG + let joint_public = compute_joint_public(public_shares.iter()).unwrap(); + + KeyGenerationArtifacts { + id_numbers: id_numbers, + polynoms1: polynoms1, + secrets1: secrets1, + public_shares: public_shares, + secret_shares: secret_shares, + joint_public: joint_public, + } + } + + fn run_zero_key_generation( + t: usize, + n: usize, + id_numbers: &[Secret], + ) -> ZeroGenerationArtifacts { + // data, generated during keys dissemination + let polynoms1 = prepare_polynoms1(t, n, Some(zero_scalar())); + let secrets1: Vec<_> = (0..n) + .map(|i| { + (0..n) + .map(|j| compute_polynom(&polynoms1[i], &id_numbers[j]).unwrap()) + .collect::>() + }) + .collect(); + + // data, generated during keys generation + let secret_shares: Vec<_> = (0..n) + .map(|i| compute_secret_share(secrets1.iter().map(|s| &s[i])).unwrap()) + .collect(); + + ZeroGenerationArtifacts { + polynoms1: polynoms1, + secret_shares: secret_shares, + } + } + + fn run_key_share_refreshing( + old_t: usize, + new_t: usize, + new_n: usize, + old_artifacts: &KeyGenerationArtifacts, + ) -> KeyGenerationArtifacts { + // === share refreshing protocol from + // === based on "Verifiable Secret Redistribution for Threshold Sharing Schemes" + // === http://www.cs.cmu.edu/~wing/publications/CMU-CS-02-114.pdf + + // generate new id_numbers for new nodes + let new_nodes = new_n.saturating_sub(old_artifacts.id_numbers.len()); + let id_numbers: Vec<_> = old_artifacts + .id_numbers + .iter() + .take(new_n) + .cloned() + .chain((0..new_nodes).map(|_| generate_random_scalar().unwrap())) + .collect(); + + // on every authorized node: generate random polynomial ai(j) = si + ... + ai[new_t - 1] * j^(new_t - 1) + let mut subshare_polynoms = Vec::new(); + for i in 0..old_t + 1 { + let mut subshare_polynom = generate_random_polynom(new_t).unwrap(); + subshare_polynom[0] = old_artifacts.secret_shares[i].clone(); + subshare_polynoms.push(subshare_polynom); + } + + // on every authorized node: calculate subshare for every new node + let mut subshares = Vec::new(); + for j in 0..new_n { + let mut subshares_to_j = Vec::new(); + for i in 0..old_t + 1 { + let subshare_from_i_to_j = + compute_polynom(&subshare_polynoms[i], &id_numbers[j]).unwrap(); + subshares_to_j.push(subshare_from_i_to_j); + } + subshares.push(subshares_to_j); + } + + // on every new node: generate new share using Lagrange interpolation + // on every node: generate new share using Lagrange interpolation + let mut new_secret_shares = Vec::new(); + for j in 0..new_n { + let mut subshares_to_j = Vec::new(); + for i in 0..old_t + 1 { + let subshare_from_i = &subshares[j][i]; + let id_number_i = &id_numbers[i]; + let other_id_numbers = (0usize..old_t + 1) + .filter(|j| *j != i) + .map(|j| &id_numbers[j]); + let mut subshare_from_i = + compute_shadow_mul(subshare_from_i, id_number_i, other_id_numbers).unwrap(); + if old_t % 2 != 0 { + subshare_from_i.neg().unwrap(); + } + subshares_to_j.push(subshare_from_i); + } + new_secret_shares.push(compute_secret_sum(subshares_to_j.iter()).unwrap()); + } + + let mut result = old_artifacts.clone(); + result.id_numbers = id_numbers; + result.secret_shares = new_secret_shares; + result + } + + fn run_multiplication_protocol( + t: usize, + secret_shares1: &[Secret], + secret_shares2: &[Secret], + ) -> Vec { + let n = secret_shares1.len(); + assert!(t * 2 + 1 <= n); + + // shares of secrets multiplication = multiplication of secrets shares + let mul_shares: Vec<_> = (0..n) + .map(|i| { + let share1 = secret_shares1[i].clone(); + let share2 = secret_shares2[i].clone(); + let mut mul_share = share1; + mul_share.mul(&share2).unwrap(); + mul_share + }) + .collect(); + + mul_shares + } + + fn run_reciprocal_protocol(t: usize, artifacts: &KeyGenerationArtifacts) -> Vec { + // === Given a secret x mod r which is shared among n players, it is + // === required to generate shares of inv(x) mod r with out revealing + // === any information about x or inv(x). + // === https://www.researchgate.net/publication/280531698_Robust_Threshold_Elliptic_Curve_Digital_Signature + + // generate shared random secret e for given t + let n = artifacts.id_numbers.len(); + assert!(t * 2 + 1 <= n); + let e_artifacts = run_key_generation(t, n, Some(artifacts.id_numbers.clone()), None); + + // generate shares of zero for 2 * t threshold + let z_artifacts = run_zero_key_generation(2 * t, n, &artifacts.id_numbers); + + // each player computes && broadcast u[i] = x[i] * e[i] + z[i] + let ui: Vec<_> = (0..n) + .map(|i| { + compute_ecdsa_inversed_secret_coeff_share( + &artifacts.secret_shares[i], + &e_artifacts.secret_shares[i], + &z_artifacts.secret_shares[i], + ) + .unwrap() + }) + .collect(); + + // players can interpolate the polynomial of degree 2t and compute u && inv(u): + let u_inv = compute_ecdsa_inversed_secret_coeff_from_shares( + t, + &artifacts + .id_numbers + .iter() + .take(2 * t + 1) + .cloned() + .collect::>(), + &ui.iter().take(2 * t + 1).cloned().collect::>(), + ) + .unwrap(); + + // each player Pi computes his share of inv(x) as e[i] * inv(u) + let x_inv_shares: Vec<_> = (0..n) + .map(|i| { + let mut x_inv_share = e_artifacts.secret_shares[i].clone(); + x_inv_share.mul(&u_inv).unwrap(); + x_inv_share + }) + .collect(); + + x_inv_shares + } + + pub fn do_encryption_and_decryption( + t: usize, + joint_public: &Public, + id_numbers: &[Secret], + secret_shares: &[Secret], + joint_secret: Option<&Secret>, + document_secret_plain: Public, + ) -> (Public, Public) { + // === PART2: encryption using joint public key === + + // the next line is executed on KeyServer-client + let encrypted_secret = encrypt_secret(&document_secret_plain, &joint_public).unwrap(); + + // === PART3: decryption === + + // next line is executed on KeyServer client + let access_key = generate_random_scalar().unwrap(); + + // use t + 1 nodes to compute joint shadow point + let nodes_shadows: Vec<_> = (0..t + 1) + .map(|i| { + compute_node_shadow( + &secret_shares[i], + &id_numbers[i], + id_numbers + .iter() + .enumerate() + .filter(|&(j, _)| j != i) + .take(t) + .map(|(_, id_number)| id_number), + ) + .unwrap() + }) + .collect(); + + let nodes_shadow_points: Vec<_> = nodes_shadows + .iter() + .map(|s| { + compute_node_shadow_point(&access_key, &encrypted_secret.common_point, s, None) + .unwrap() + }) + .map(|sp| sp.0) + .collect(); + + assert_eq!(nodes_shadows.len(), t + 1); + assert_eq!(nodes_shadow_points.len(), t + 1); + + let joint_shadow_point = compute_joint_shadow_point(nodes_shadow_points.iter()).unwrap(); + let joint_shadow_point_test = compute_joint_shadow_point_test( + &access_key, + &encrypted_secret.common_point, + nodes_shadows.iter(), + ) + .unwrap(); + assert_eq!(joint_shadow_point, joint_shadow_point_test); + + // decrypt encrypted secret using joint shadow point + let document_secret_decrypted = decrypt_with_joint_shadow( + t, + &access_key, + &encrypted_secret.encrypted_point, + &joint_shadow_point, + ) + .unwrap(); + + // decrypt encrypted secret using joint secret [just for test] + let document_secret_decrypted_test = match joint_secret { + Some(joint_secret) => decrypt_with_joint_secret( + &encrypted_secret.encrypted_point, + &encrypted_secret.common_point, + joint_secret, + ) + .unwrap(), + None => document_secret_decrypted.clone(), + }; + + (document_secret_decrypted, document_secret_decrypted_test) + } + + #[test] + fn full_encryption_math_session() { + let test_cases = [ + (0, 2), + (1, 2), + (1, 3), + (2, 3), + (1, 4), + (2, 4), + (3, 4), + (1, 5), + (2, 5), + (3, 5), + (4, 5), + (1, 10), + (2, 10), + (3, 10), + (4, 10), + (5, 10), + (6, 10), + (7, 10), + (8, 10), + (9, 10), + ]; + for &(t, n) in &test_cases { + let artifacts = run_key_generation(t, n, None, None); + + // compute joint private key [just for test] + let joint_secret = + compute_joint_secret(artifacts.polynoms1.iter().map(|p| &p[0])).unwrap(); + let joint_key_pair = KeyPair::from_secret(joint_secret.clone()).unwrap(); + assert_eq!(&artifacts.joint_public, joint_key_pair.public()); + + // check secret shares computation [just for test] + let secret_shares_polynom: Vec<_> = (0..t + 1) + .map(|k| compute_secret_share(artifacts.polynoms1.iter().map(|p| &p[k])).unwrap()) + .collect(); + let secret_shares_calculated_from_polynom: Vec<_> = artifacts + .id_numbers + .iter() + .map(|id_number| compute_polynom(&*secret_shares_polynom, id_number).unwrap()) + .collect(); + assert_eq!( + artifacts.secret_shares, + secret_shares_calculated_from_polynom + ); + + // now encrypt and decrypt data + let document_secret_plain = generate_random_point().unwrap(); + let (document_secret_decrypted, document_secret_decrypted_test) = + do_encryption_and_decryption( + t, + &artifacts.joint_public, + &artifacts.id_numbers, + &artifacts.secret_shares, + Some(&joint_secret), + document_secret_plain.clone(), + ); + + assert_eq!(document_secret_plain, document_secret_decrypted_test); + assert_eq!(document_secret_plain, document_secret_decrypted); + } + } + + #[test] + fn local_signature_works() { + let key_pair = Random.generate().unwrap(); + let message_hash = "0000000000000000000000000000000000000000000000000000000000000042" + .parse() + .unwrap(); + let nonce = generate_random_scalar().unwrap(); + let signature = + local_compute_schnorr_signature(&nonce, key_pair.secret(), &message_hash).unwrap(); + assert_eq!( + verify_schnorr_signature(key_pair.public(), &signature, &message_hash), + Ok(true) + ); + } + + #[test] + fn full_schnorr_signature_math_session() { + let test_cases = [ + (0, 1), + (0, 2), + (1, 2), + (1, 3), + (2, 3), + (1, 4), + (2, 4), + (3, 4), + (1, 5), + (2, 5), + (3, 5), + (4, 5), + (1, 10), + (2, 10), + (3, 10), + (4, 10), + (5, 10), + (6, 10), + (7, 10), + (8, 10), + (9, 10), + ]; + for &(t, n) in &test_cases { + // hash of the message to be signed + let message_hash: Secret = + "0000000000000000000000000000000000000000000000000000000000000042" + .parse() + .unwrap(); + + // === MiDS-S algorithm === + // setup: all nodes share master secret key && every node knows master public key + let artifacts = run_key_generation(t, n, None, None); + + // in this gap (not related to math): + // master node should ask every other node if it is able to do a signing + // if there are < than t+1 nodes, able to sign => error + // select t+1 nodes for signing session + // all steps below are for this subset of nodes + let n = t + 1; + + // step 1: run DKG to generate one-time secret key (nonce) + let id_numbers = artifacts.id_numbers.iter().cloned().take(n).collect(); + let one_time_artifacts = run_key_generation(t, n, Some(id_numbers), None); + + // step 2: message hash && x coordinate of one-time public value are combined + let combined_hash = + combine_message_hash_with_public(&message_hash, &one_time_artifacts.joint_public) + .unwrap(); + + // step 3: compute signature shares + let partial_signatures: Vec<_> = (0..n) + .map(|i| { + compute_schnorr_signature_share( + t, + &combined_hash, + &one_time_artifacts.polynoms1[i][0], + &artifacts.secret_shares[i], + &artifacts.id_numbers[i], + artifacts + .id_numbers + .iter() + .enumerate() + .filter(|&(j, _)| i != j) + .map(|(_, n)| n) + .take(t), + ) + .unwrap() + }) + .collect(); + + // step 4: receive and verify signatures shares from other nodes + let received_signatures: Vec> = (0..n) + .map(|i| { + (0..n) + .filter(|j| i != *j) + .map(|j| { + let signature_share = partial_signatures[j].clone(); + assert!(_check_schnorr_signature_share( + &combined_hash, + &signature_share, + &artifacts.public_shares[j], + &one_time_artifacts.public_shares[j], + artifacts.id_numbers.iter().take(t) + ) + .unwrap_or(false)); + signature_share + }) + .collect() + }) + .collect(); + + // step 5: compute signature + let signatures: Vec<_> = (0..n) + .map(|i| { + ( + combined_hash.clone(), + compute_schnorr_signature( + received_signatures[i] + .iter() + .chain(once(&partial_signatures[i])), + ) + .unwrap(), + ) + }) + .collect(); + + // === verify signature === + let master_secret = + compute_joint_secret(artifacts.polynoms1.iter().map(|p| &p[0])).unwrap(); + let nonce = + compute_joint_secret(one_time_artifacts.polynoms1.iter().map(|p| &p[0])).unwrap(); + let local_signature = + local_compute_schnorr_signature(&nonce, &master_secret, &message_hash).unwrap(); + for signature in &signatures { + assert_eq!(signature, &local_signature); + assert_eq!( + verify_schnorr_signature(&artifacts.joint_public, signature, &message_hash), + Ok(true) + ); + } + } + } + + #[test] + fn full_ecdsa_signature_math_session() { + let test_cases = [(2, 5), (2, 6), (3, 11), (4, 11)]; + for &(t, n) in &test_cases { + // values that can be hardcoded + let joint_secret: Secret = Random.generate().unwrap().secret().clone(); + let joint_nonce: Secret = Random.generate().unwrap().secret().clone(); + let message_hash: H256 = H256::random(); + + // convert message hash to EC scalar + let message_hash_scalar = to_scalar(message_hash.clone()).unwrap(); + + // generate secret key shares + let artifacts = run_key_generation(t, n, None, Some(joint_secret)); + + // generate nonce shares + let nonce_artifacts = + run_key_generation(t, n, Some(artifacts.id_numbers.clone()), Some(joint_nonce)); + + // compute nonce public + // x coordinate (mapped to EC field) of this public is the r-portion of signature + let nonce_public_shares: Vec<_> = (0..n) + .map(|i| compute_public_share(&nonce_artifacts.polynoms1[i][0]).unwrap()) + .collect(); + let nonce_public = compute_joint_public(nonce_public_shares.iter()).unwrap(); + let signature_r = compute_ecdsa_r(&nonce_public).unwrap(); + + // compute shares of inv(nonce) so that both nonce && inv(nonce) are still unknown to all nodes + let nonce_inv_shares = run_reciprocal_protocol(t, &nonce_artifacts); + + // compute multiplication of secret-shares * inv-nonce-shares + let mul_shares = + run_multiplication_protocol(t, &artifacts.secret_shares, &nonce_inv_shares); + + // compute shares for s portion of signature: nonce_inv * (message_hash + secret * signature_r) + // every node broadcasts this share + let double_t = 2 * t; + let signature_s_shares: Vec<_> = (0..double_t + 1) + .map(|i| { + compute_ecdsa_s_share( + &nonce_inv_shares[i], + &mul_shares[i], + &signature_r, + &message_hash_scalar, + ) + .unwrap() + }) + .collect(); + + // compute signature_s from received shares + let signature_s = compute_ecdsa_s( + t, + &signature_s_shares, + &artifacts + .id_numbers + .iter() + .take(double_t + 1) + .cloned() + .collect::>(), + ) + .unwrap(); + + // check signature + let signature_actual = + serialize_ecdsa_signature(&nonce_public, signature_r, signature_s); + let joint_secret = + compute_joint_secret(artifacts.polynoms1.iter().map(|p| &p[0])).unwrap(); + let joint_secret_pair = KeyPair::from_secret(joint_secret).unwrap(); + assert_eq!( + recover(&signature_actual, &message_hash).unwrap(), + *joint_secret_pair.public() + ); + assert!( + verify_public(joint_secret_pair.public(), &signature_actual, &message_hash) + .unwrap() + ); + } + } + + #[test] + fn full_generation_math_session_with_refreshing_shares() { + let test_cases = vec![(1, 4), (6, 10)]; + for (t, n) in test_cases { + // generate key using t-of-n session + let artifacts1 = run_key_generation(t, n, None, None); + let joint_secret1 = + compute_joint_secret(artifacts1.polynoms1.iter().map(|p1| &p1[0])).unwrap(); + + // let's say we want to refresh existing secret shares + // by doing this every T seconds, and assuming that in each T-second period adversary KS is not able to collect t+1 secret shares + // we can be sure that the scheme is secure + let artifacts2 = run_key_share_refreshing(t, t, n, &artifacts1); + let joint_secret2 = compute_joint_secret_from_shares( + t, + &artifacts2 + .secret_shares + .iter() + .take(t + 1) + .collect::>(), + &artifacts2.id_numbers.iter().take(t + 1).collect::>(), + ) + .unwrap(); + assert_eq!(joint_secret1, joint_secret2); + + // refresh again + let artifacts3 = run_key_share_refreshing(t, t, n, &artifacts2); + let joint_secret3 = compute_joint_secret_from_shares( + t, + &artifacts3 + .secret_shares + .iter() + .take(t + 1) + .collect::>(), + &artifacts3.id_numbers.iter().take(t + 1).collect::>(), + ) + .unwrap(); + assert_eq!(joint_secret1, joint_secret3); + } + } + + #[test] + fn full_generation_math_session_with_adding_new_nodes() { + let test_cases = vec![(1, 3), (1, 4), (6, 10)]; + for (t, n) in test_cases { + // generate key using t-of-n session + let artifacts1 = run_key_generation(t, n, None, None); + let joint_secret1 = + compute_joint_secret(artifacts1.polynoms1.iter().map(|p1| &p1[0])).unwrap(); + + // let's say we want to include additional couple of servers to the set + // so that scheme becames t-of-n+2 + let artifacts2 = run_key_share_refreshing(t, t, n + 2, &artifacts1); + let joint_secret2 = compute_joint_secret_from_shares( + t, + &artifacts2 + .secret_shares + .iter() + .take(t + 1) + .collect::>(), + &artifacts2.id_numbers.iter().take(t + 1).collect::>(), + ) + .unwrap(); + assert_eq!(joint_secret1, joint_secret2); + + // include another server (t-of-n+3) + let artifacts3 = run_key_share_refreshing(t, t, n + 3, &artifacts2); + let joint_secret3 = compute_joint_secret_from_shares( + t, + &artifacts3 + .secret_shares + .iter() + .take(t + 1) + .collect::>(), + &artifacts3.id_numbers.iter().take(t + 1).collect::>(), + ) + .unwrap(); + assert_eq!(joint_secret1, joint_secret3); + } + } + + #[test] + fn full_generation_math_session_with_decreasing_threshold() { + let (t, n) = (3, 5); + + // generate key using t-of-n session + let artifacts1 = run_key_generation(t, n, None, None); + + let joint_secret1 = + compute_joint_secret(artifacts1.polynoms1.iter().map(|p1| &p1[0])).unwrap(); + + // let's say we want to decrease threshold so that it becames (t-1)-of-n + let new_t = t - 1; + let artifacts2 = run_key_share_refreshing(t, new_t, n, &artifacts1); + let joint_secret2 = compute_joint_secret_from_shares( + new_t, + &artifacts2 + .secret_shares + .iter() + .take(new_t + 1) + .collect::>(), + &artifacts2 + .id_numbers + .iter() + .take(new_t + 1) + .collect::>(), + ) + .unwrap(); + assert_eq!(joint_secret1, joint_secret2); + + // let's say we want to decrease threshold once again so that it becames (t-2)-of-n + let t = t - 1; + let new_t = t - 2; + let artifacts3 = run_key_share_refreshing(t, new_t, n, &artifacts2); + let joint_secret3 = compute_joint_secret_from_shares( + new_t, + &artifacts3 + .secret_shares + .iter() + .take(new_t + 1) + .collect::>(), + &artifacts3 + .id_numbers + .iter() + .take(new_t + 1) + .collect::>(), + ) + .unwrap(); + assert_eq!(joint_secret1, joint_secret3); + } + + #[test] + fn full_zero_secret_generation_math_session() { + let test_cases = vec![(1, 4), (2, 4)]; + for (t, n) in test_cases { + // run joint zero generation session + let id_numbers: Vec<_> = (0..n).map(|_| generate_random_scalar().unwrap()).collect(); + let artifacts = run_zero_key_generation(t, n, &id_numbers); + + // check that zero secret is generated + // we can't compute secrets sum here, because result will be zero (invalid secret, unsupported by SECP256k1) + // so just use complement trick: x + (-x) = 0 + // TODO [Refac]: switch to SECP256K1-free scalar EC arithmetic + let partial_joint_secret = + compute_secret_sum(artifacts.polynoms1.iter().take(n - 1).map(|p| &p[0])).unwrap(); + let mut partial_joint_secret_complement = artifacts.polynoms1[n - 1][0].clone(); + partial_joint_secret_complement.neg().unwrap(); + assert_eq!(partial_joint_secret, partial_joint_secret_complement); + } + } + + #[test] + fn full_generation_with_multiplication() { + let test_cases = vec![(1, 3), (2, 5), (2, 7), (3, 8)]; + for (t, n) in test_cases { + // generate two shared secrets + let artifacts1 = run_key_generation(t, n, None, None); + let artifacts2 = run_key_generation(t, n, Some(artifacts1.id_numbers.clone()), None); + + // multiplicate original secrets + let joint_secret1 = + compute_joint_secret(artifacts1.polynoms1.iter().map(|p| &p[0])).unwrap(); + let joint_secret2 = + compute_joint_secret(artifacts2.polynoms1.iter().map(|p| &p[0])).unwrap(); + let mut expected_joint_secret_mul = joint_secret1; + expected_joint_secret_mul.mul(&joint_secret2).unwrap(); + + // run multiplication protocol + let joint_secret_mul_shares = run_multiplication_protocol( + t, + &artifacts1.secret_shares, + &artifacts2.secret_shares, + ); + + // calculate actual secrets multiplication + let double_t = t * 2; + let actual_joint_secret_mul = compute_joint_secret_from_shares( + double_t, + &joint_secret_mul_shares + .iter() + .take(double_t + 1) + .collect::>(), + &artifacts1 + .id_numbers + .iter() + .take(double_t + 1) + .collect::>(), + ) + .unwrap(); + + assert_eq!(actual_joint_secret_mul, expected_joint_secret_mul); + } + } + + #[test] + fn full_generation_with_reciprocal() { + let test_cases = vec![(1, 3), (2, 5), (2, 7), (2, 7), (3, 8)]; + for (t, n) in test_cases { + // generate shared secret + let artifacts = run_key_generation(t, n, None, None); + + // calculate inversion of original shared secret + let joint_secret = + compute_joint_secret(artifacts.polynoms1.iter().map(|p| &p[0])).unwrap(); + let mut expected_joint_secret_inv = joint_secret.clone(); + expected_joint_secret_inv.inv().unwrap(); + + // run inversion protocol + let reciprocal_shares = run_reciprocal_protocol(t, &artifacts); + + // calculate actual secret inversion + let double_t = t * 2; + let actual_joint_secret_inv = compute_joint_secret_from_shares( + double_t, + &reciprocal_shares + .iter() + .take(double_t + 1) + .collect::>(), + &artifacts + .id_numbers + .iter() + .take(double_t + 1) + .collect::>(), + ) + .unwrap(); + + assert_eq!(actual_joint_secret_inv, expected_joint_secret_inv); + } + } } diff --git a/secret-store/src/key_server_cluster/message.rs b/secret-store/src/key_server_cluster/message.rs index 98520564f..92385a11e 100644 --- a/secret-store/src/key_server_cluster/message.rs +++ b/secret-store/src/key_server_cluster/message.rs @@ -14,12 +14,16 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::fmt; -use std::collections::{BTreeSet, BTreeMap}; +use super::{ + Error, SerializableAddress, SerializableH256, SerializableMessageHash, SerializablePublic, + SerializableRequester, SerializableSecret, SerializableSignature, +}; use ethkey::Secret; use key_server_cluster::SessionId; -use super::{Error, SerializableH256, SerializablePublic, SerializableSecret, - SerializableSignature, SerializableMessageHash, SerializableRequester, SerializableAddress}; +use std::{ + collections::{BTreeMap, BTreeSet}, + fmt, +}; pub type MessageSessionId = SerializableH256; pub type MessageNodeId = SerializablePublic; @@ -27,1531 +31,1645 @@ pub type MessageNodeId = SerializablePublic; /// All possible messages that can be sent during encryption/decryption sessions. #[derive(Clone, Debug)] pub enum Message { - /// Cluster message. - Cluster(ClusterMessage), - /// Key generation message. - Generation(GenerationMessage), - /// Encryption message. - Encryption(EncryptionMessage), - /// Decryption message. - Decryption(DecryptionMessage), - /// Schnorr signing message. - SchnorrSigning(SchnorrSigningMessage), - /// ECDSA signing message. - EcdsaSigning(EcdsaSigningMessage), - /// Key version negotiation message. - KeyVersionNegotiation(KeyVersionNegotiationMessage), - /// Share add message. - ShareAdd(ShareAddMessage), - /// Servers set change message. - ServersSetChange(ServersSetChangeMessage), + /// Cluster message. + Cluster(ClusterMessage), + /// Key generation message. + Generation(GenerationMessage), + /// Encryption message. + Encryption(EncryptionMessage), + /// Decryption message. + Decryption(DecryptionMessage), + /// Schnorr signing message. + SchnorrSigning(SchnorrSigningMessage), + /// ECDSA signing message. + EcdsaSigning(EcdsaSigningMessage), + /// Key version negotiation message. + KeyVersionNegotiation(KeyVersionNegotiationMessage), + /// Share add message. + ShareAdd(ShareAddMessage), + /// Servers set change message. + ServersSetChange(ServersSetChangeMessage), } /// All possible cluster-level messages. #[derive(Clone, Debug)] pub enum ClusterMessage { - /// Introduce node public key. - NodePublicKey(NodePublicKey), - /// Confirm that node owns its private key. - NodePrivateKeySignature(NodePrivateKeySignature), - /// Keep alive message. - KeepAlive(KeepAlive), - /// Keep alive message response. - KeepAliveResponse(KeepAliveResponse), + /// Introduce node public key. + NodePublicKey(NodePublicKey), + /// Confirm that node owns its private key. + NodePrivateKeySignature(NodePrivateKeySignature), + /// Keep alive message. + KeepAlive(KeepAlive), + /// Keep alive message response. + KeepAliveResponse(KeepAliveResponse), } /// All possible messages that can be sent during key generation session. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum GenerationMessage { - /// Initialize new DKG session. - InitializeSession(InitializeSession), - /// Confirm DKG session initialization. - ConfirmInitialization(ConfirmInitialization), - /// Broadcast data, calculated during session initialization phase. - CompleteInitialization(CompleteInitialization), - /// Generated keys are sent to every node. - KeysDissemination(KeysDissemination), - /// Broadcast self public key portion. - PublicKeyShare(PublicKeyShare), - /// When session error has occured. - SessionError(SessionError), - /// When session is completed. - SessionCompleted(SessionCompleted), + /// Initialize new DKG session. + InitializeSession(InitializeSession), + /// Confirm DKG session initialization. + ConfirmInitialization(ConfirmInitialization), + /// Broadcast data, calculated during session initialization phase. + CompleteInitialization(CompleteInitialization), + /// Generated keys are sent to every node. + KeysDissemination(KeysDissemination), + /// Broadcast self public key portion. + PublicKeyShare(PublicKeyShare), + /// When session error has occured. + SessionError(SessionError), + /// When session is completed. + SessionCompleted(SessionCompleted), } /// All possible messages that can be sent during encryption session. #[derive(Clone, Debug)] pub enum EncryptionMessage { - /// Initialize encryption session. - InitializeEncryptionSession(InitializeEncryptionSession), - /// Confirm/reject encryption session initialization. - ConfirmEncryptionInitialization(ConfirmEncryptionInitialization), - /// When encryption session error has occured. - EncryptionSessionError(EncryptionSessionError), + /// Initialize encryption session. + InitializeEncryptionSession(InitializeEncryptionSession), + /// Confirm/reject encryption session initialization. + ConfirmEncryptionInitialization(ConfirmEncryptionInitialization), + /// When encryption session error has occured. + EncryptionSessionError(EncryptionSessionError), } /// All possible messages that can be sent during consensus establishing. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum ConsensusMessage { - /// Initialize consensus session. - InitializeConsensusSession(InitializeConsensusSession), - /// Confirm/reject consensus session initialization. - ConfirmConsensusInitialization(ConfirmConsensusInitialization), + /// Initialize consensus session. + InitializeConsensusSession(InitializeConsensusSession), + /// Confirm/reject consensus session initialization. + ConfirmConsensusInitialization(ConfirmConsensusInitialization), } /// All possible messages that can be sent during servers-set consensus establishing. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum ConsensusMessageWithServersSet { - /// Initialize consensus session. - InitializeConsensusSession(InitializeConsensusSessionWithServersSet), - /// Confirm/reject consensus session initialization. - ConfirmConsensusInitialization(ConfirmConsensusInitialization), + /// Initialize consensus session. + InitializeConsensusSession(InitializeConsensusSessionWithServersSet), + /// Confirm/reject consensus session initialization. + ConfirmConsensusInitialization(ConfirmConsensusInitialization), } /// All possible messages that can be sent during share add consensus establishing. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum ConsensusMessageOfShareAdd { - /// Initialize consensus session. - InitializeConsensusSession(InitializeConsensusSessionOfShareAdd), - /// Confirm/reject consensus session initialization. - ConfirmConsensusInitialization(ConfirmConsensusInitialization), + /// Initialize consensus session. + InitializeConsensusSession(InitializeConsensusSessionOfShareAdd), + /// Confirm/reject consensus session initialization. + ConfirmConsensusInitialization(ConfirmConsensusInitialization), } /// All possible messages that can be sent during decryption session. #[derive(Clone, Debug)] pub enum DecryptionMessage { - /// Consensus establishing message. - DecryptionConsensusMessage(DecryptionConsensusMessage), - /// Request partial decryption from node. - RequestPartialDecryption(RequestPartialDecryption), - /// Partial decryption is completed. - PartialDecryption(PartialDecryption), - /// When decryption session error has occured. - DecryptionSessionError(DecryptionSessionError), - /// When decryption session is completed. - DecryptionSessionCompleted(DecryptionSessionCompleted), - /// When decryption session is delegated to another node. - DecryptionSessionDelegation(DecryptionSessionDelegation), - /// When delegated decryption session is completed. - DecryptionSessionDelegationCompleted(DecryptionSessionDelegationCompleted), + /// Consensus establishing message. + DecryptionConsensusMessage(DecryptionConsensusMessage), + /// Request partial decryption from node. + RequestPartialDecryption(RequestPartialDecryption), + /// Partial decryption is completed. + PartialDecryption(PartialDecryption), + /// When decryption session error has occured. + DecryptionSessionError(DecryptionSessionError), + /// When decryption session is completed. + DecryptionSessionCompleted(DecryptionSessionCompleted), + /// When decryption session is delegated to another node. + DecryptionSessionDelegation(DecryptionSessionDelegation), + /// When delegated decryption session is completed. + DecryptionSessionDelegationCompleted(DecryptionSessionDelegationCompleted), } /// All possible messages that can be sent during Schnorr signing session. #[derive(Clone, Debug)] pub enum SchnorrSigningMessage { - /// Consensus establishing message. - SchnorrSigningConsensusMessage(SchnorrSigningConsensusMessage), - /// Session key generation message. - SchnorrSigningGenerationMessage(SchnorrSigningGenerationMessage), - /// Request partial signature from node. - SchnorrRequestPartialSignature(SchnorrRequestPartialSignature), - /// Partial signature is generated. - SchnorrPartialSignature(SchnorrPartialSignature), - /// Signing error occured. - SchnorrSigningSessionError(SchnorrSigningSessionError), - /// Signing session completed. - SchnorrSigningSessionCompleted(SchnorrSigningSessionCompleted), - /// When signing session is delegated to another node. - SchnorrSigningSessionDelegation(SchnorrSigningSessionDelegation), - /// When delegated signing session is completed. - SchnorrSigningSessionDelegationCompleted(SchnorrSigningSessionDelegationCompleted), + /// Consensus establishing message. + SchnorrSigningConsensusMessage(SchnorrSigningConsensusMessage), + /// Session key generation message. + SchnorrSigningGenerationMessage(SchnorrSigningGenerationMessage), + /// Request partial signature from node. + SchnorrRequestPartialSignature(SchnorrRequestPartialSignature), + /// Partial signature is generated. + SchnorrPartialSignature(SchnorrPartialSignature), + /// Signing error occured. + SchnorrSigningSessionError(SchnorrSigningSessionError), + /// Signing session completed. + SchnorrSigningSessionCompleted(SchnorrSigningSessionCompleted), + /// When signing session is delegated to another node. + SchnorrSigningSessionDelegation(SchnorrSigningSessionDelegation), + /// When delegated signing session is completed. + SchnorrSigningSessionDelegationCompleted(SchnorrSigningSessionDelegationCompleted), } /// All possible messages that can be sent during ECDSA signing session. #[derive(Clone, Debug)] pub enum EcdsaSigningMessage { - /// Consensus establishing message. - EcdsaSigningConsensusMessage(EcdsaSigningConsensusMessage), - /// Signature nonce generation message. - EcdsaSignatureNonceGenerationMessage(EcdsaSignatureNonceGenerationMessage), - /// Inversion nonce generation message. - EcdsaInversionNonceGenerationMessage(EcdsaInversionNonceGenerationMessage), - /// Inversion zero generation message. - EcdsaInversionZeroGenerationMessage(EcdsaInversionZeroGenerationMessage), - /// Inversed nonce coefficient share. - EcdsaSigningInversedNonceCoeffShare(EcdsaSigningInversedNonceCoeffShare), - /// Request partial signature from node. - EcdsaRequestPartialSignature(EcdsaRequestPartialSignature), - /// Partial signature is generated. - EcdsaPartialSignature(EcdsaPartialSignature), - /// Signing error occured. - EcdsaSigningSessionError(EcdsaSigningSessionError), - /// Signing session completed. - EcdsaSigningSessionCompleted(EcdsaSigningSessionCompleted), - /// When signing session is delegated to another node. - EcdsaSigningSessionDelegation(EcdsaSigningSessionDelegation), - /// When delegated signing session is completed. - EcdsaSigningSessionDelegationCompleted(EcdsaSigningSessionDelegationCompleted), + /// Consensus establishing message. + EcdsaSigningConsensusMessage(EcdsaSigningConsensusMessage), + /// Signature nonce generation message. + EcdsaSignatureNonceGenerationMessage(EcdsaSignatureNonceGenerationMessage), + /// Inversion nonce generation message. + EcdsaInversionNonceGenerationMessage(EcdsaInversionNonceGenerationMessage), + /// Inversion zero generation message. + EcdsaInversionZeroGenerationMessage(EcdsaInversionZeroGenerationMessage), + /// Inversed nonce coefficient share. + EcdsaSigningInversedNonceCoeffShare(EcdsaSigningInversedNonceCoeffShare), + /// Request partial signature from node. + EcdsaRequestPartialSignature(EcdsaRequestPartialSignature), + /// Partial signature is generated. + EcdsaPartialSignature(EcdsaPartialSignature), + /// Signing error occured. + EcdsaSigningSessionError(EcdsaSigningSessionError), + /// Signing session completed. + EcdsaSigningSessionCompleted(EcdsaSigningSessionCompleted), + /// When signing session is delegated to another node. + EcdsaSigningSessionDelegation(EcdsaSigningSessionDelegation), + /// When delegated signing session is completed. + EcdsaSigningSessionDelegationCompleted(EcdsaSigningSessionDelegationCompleted), } /// All possible messages that can be sent during servers set change session. #[derive(Clone, Debug)] pub enum ServersSetChangeMessage { - /// Consensus establishing message. - ServersSetChangeConsensusMessage(ServersSetChangeConsensusMessage), - /// Unknown sessions ids request. - UnknownSessionsRequest(UnknownSessionsRequest), - /// Unknown sessions ids. - UnknownSessions(UnknownSessions), - /// Negotiating key version to use as a base for ShareAdd session. - ShareChangeKeyVersionNegotiation(ShareChangeKeyVersionNegotiation), - /// Initialize share change session(s). - InitializeShareChangeSession(InitializeShareChangeSession), - /// Confirm share change session(s) initialization. - ConfirmShareChangeSessionInitialization(ConfirmShareChangeSessionInitialization), - /// Share change session delegation. - ServersSetChangeDelegate(ServersSetChangeDelegate), - /// Share change session delegation response. - ServersSetChangeDelegateResponse(ServersSetChangeDelegateResponse), - /// Share add message. - ServersSetChangeShareAddMessage(ServersSetChangeShareAddMessage), - /// Servers set change session completed. - ServersSetChangeError(ServersSetChangeError), - /// Servers set change session completed. - ServersSetChangeCompleted(ServersSetChangeCompleted), + /// Consensus establishing message. + ServersSetChangeConsensusMessage(ServersSetChangeConsensusMessage), + /// Unknown sessions ids request. + UnknownSessionsRequest(UnknownSessionsRequest), + /// Unknown sessions ids. + UnknownSessions(UnknownSessions), + /// Negotiating key version to use as a base for ShareAdd session. + ShareChangeKeyVersionNegotiation(ShareChangeKeyVersionNegotiation), + /// Initialize share change session(s). + InitializeShareChangeSession(InitializeShareChangeSession), + /// Confirm share change session(s) initialization. + ConfirmShareChangeSessionInitialization(ConfirmShareChangeSessionInitialization), + /// Share change session delegation. + ServersSetChangeDelegate(ServersSetChangeDelegate), + /// Share change session delegation response. + ServersSetChangeDelegateResponse(ServersSetChangeDelegateResponse), + /// Share add message. + ServersSetChangeShareAddMessage(ServersSetChangeShareAddMessage), + /// Servers set change session completed. + ServersSetChangeError(ServersSetChangeError), + /// Servers set change session completed. + ServersSetChangeCompleted(ServersSetChangeCompleted), } /// All possible messages that can be sent during share add session. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum ShareAddMessage { - /// Consensus establishing message. - ShareAddConsensusMessage(ShareAddConsensusMessage), - /// Common key share data is sent to new node. - KeyShareCommon(KeyShareCommon), - /// Generated keys are sent to every node. - NewKeysDissemination(NewKeysDissemination), - /// When session error has occured. - ShareAddError(ShareAddError), + /// Consensus establishing message. + ShareAddConsensusMessage(ShareAddConsensusMessage), + /// Common key share data is sent to new node. + KeyShareCommon(KeyShareCommon), + /// Generated keys are sent to every node. + NewKeysDissemination(NewKeysDissemination), + /// When session error has occured. + ShareAddError(ShareAddError), } /// All possible messages that can be sent during key version negotiation message. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum KeyVersionNegotiationMessage { - /// Request key versions. - RequestKeyVersions(RequestKeyVersions), - /// Key versions. - KeyVersions(KeyVersions), - /// When session error has occured. - KeyVersionsError(KeyVersionsError), + /// Request key versions. + RequestKeyVersions(RequestKeyVersions), + /// Key versions. + KeyVersions(KeyVersions), + /// When session error has occured. + KeyVersionsError(KeyVersionsError), } /// Introduce node public key. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct NodePublicKey { - /// Node identifier (aka node public key). - pub node_id: MessageNodeId, - /// Random data, which must be signed by peer to prove that he owns the corresponding private key. - pub confirmation_plain: SerializableH256, - /// The same random `confirmation_plain`, signed with one-time session key. - pub confirmation_signed_session: SerializableSignature, + /// Node identifier (aka node public key). + pub node_id: MessageNodeId, + /// Random data, which must be signed by peer to prove that he owns the corresponding private key. + pub confirmation_plain: SerializableH256, + /// The same random `confirmation_plain`, signed with one-time session key. + pub confirmation_signed_session: SerializableSignature, } /// Confirm that node owns the private key of previously passed public key (aka node id). #[derive(Clone, Debug, Serialize, Deserialize)] pub struct NodePrivateKeySignature { - /// Previously passed `confirmation_plain`, signed with node private key. - pub confirmation_signed: SerializableSignature, + /// Previously passed `confirmation_plain`, signed with node private key. + pub confirmation_signed: SerializableSignature, } /// Ask if the node is still alive. #[derive(Clone, Debug, Serialize, Deserialize)] -pub struct KeepAlive { -} +pub struct KeepAlive {} /// Confirm that the node is still alive. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct KeepAliveResponse { - /// Session id, if used for session-level keep alive. - pub session_id: Option, + /// Session id, if used for session-level keep alive. + pub session_id: Option, } /// Initialize new DKG session. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct InitializeSession { - /// Session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Session origin address (if any). - pub origin: Option, - /// Session author. - pub author: SerializableAddress, - /// All session participants along with their identification numbers. - pub nodes: BTreeMap, - /// Is zero secret generation session? - pub is_zero: bool, - /// Decryption threshold. During decryption threshold-of-route.len() nodes must came to - /// consensus to successfully decrypt message. - pub threshold: usize, - /// Derived generation point. Starting from originator, every node must multiply this - /// point by random scalar (unknown by other nodes). At the end of initialization - /// `point` will be some (k1 * k2 * ... * kn) * G = `point` where `(k1 * k2 * ... * kn)` - /// is unknown for every node. - pub derived_point: SerializablePublic, + /// Session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Session origin address (if any). + pub origin: Option, + /// Session author. + pub author: SerializableAddress, + /// All session participants along with their identification numbers. + pub nodes: BTreeMap, + /// Is zero secret generation session? + pub is_zero: bool, + /// Decryption threshold. During decryption threshold-of-route.len() nodes must came to + /// consensus to successfully decrypt message. + pub threshold: usize, + /// Derived generation point. Starting from originator, every node must multiply this + /// point by random scalar (unknown by other nodes). At the end of initialization + /// `point` will be some (k1 * k2 * ... * kn) * G = `point` where `(k1 * k2 * ... * kn)` + /// is unknown for every node. + pub derived_point: SerializablePublic, } /// Confirm DKG session initialization. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ConfirmInitialization { - /// Session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Derived generation point. - pub derived_point: SerializablePublic, + /// Session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Derived generation point. + pub derived_point: SerializablePublic, } /// Broadcast generated point to every other node. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct CompleteInitialization { - /// Session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Derived generation point. - pub derived_point: SerializablePublic, + /// Session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Derived generation point. + pub derived_point: SerializablePublic, } /// Generated keys are sent to every node. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct KeysDissemination { - /// Session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Secret 1. - pub secret1: SerializableSecret, - /// Secret 2. - pub secret2: SerializableSecret, - /// Public values. - pub publics: Vec, + /// Session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Secret 1. + pub secret1: SerializableSecret, + /// Secret 2. + pub secret2: SerializableSecret, + /// Public values. + pub publics: Vec, } /// Node is sharing its public key share. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct PublicKeyShare { - /// Session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Public key share. - pub public_share: SerializablePublic, + /// Session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Public key share. + pub public_share: SerializablePublic, } /// When session error has occured. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SessionError { - /// Session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Error message. - pub error: Error, + /// Session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Error message. + pub error: Error, } /// When session is completed. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SessionCompleted { - /// Session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, + /// Session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, } /// Node is requested to prepare for saving encrypted data. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct InitializeEncryptionSession { - /// Encryption session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Requester. - pub requester: SerializableRequester, - /// Common point. - pub common_point: SerializablePublic, - /// Encrypted data. - pub encrypted_point: SerializablePublic, + /// Encryption session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Requester. + pub requester: SerializableRequester, + /// Common point. + pub common_point: SerializablePublic, + /// Encrypted data. + pub encrypted_point: SerializablePublic, } /// Node is responding to encryption initialization request. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ConfirmEncryptionInitialization { - /// Encryption session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, + /// Encryption session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, } /// When encryption session error has occured. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct EncryptionSessionError { - /// Encryption session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Error message. - pub error: Error, + /// Encryption session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Error message. + pub error: Error, } /// Node is asked to be part of consensus group. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct InitializeConsensusSession { - /// Requester. - pub requester: SerializableRequester, - /// Key version. - pub version: SerializableH256, + /// Requester. + pub requester: SerializableRequester, + /// Key version. + pub version: SerializableH256, } /// Node is responding to consensus initialization request. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ConfirmConsensusInitialization { - /// Is node confirmed consensus participation. - pub is_confirmed: bool, + /// Is node confirmed consensus participation. + pub is_confirmed: bool, } /// Node is asked to be part of servers-set consensus group. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct InitializeConsensusSessionWithServersSet { - /// Migration id (if any). - pub migration_id: Option, - /// Old nodes set. - pub old_nodes_set: BTreeSet, - /// New nodes set. - pub new_nodes_set: BTreeSet, - /// Old server set, signed by requester. - pub old_set_signature: SerializableSignature, - /// New server set, signed by requester. - pub new_set_signature: SerializableSignature, + /// Migration id (if any). + pub migration_id: Option, + /// Old nodes set. + pub old_nodes_set: BTreeSet, + /// New nodes set. + pub new_nodes_set: BTreeSet, + /// Old server set, signed by requester. + pub old_set_signature: SerializableSignature, + /// New server set, signed by requester. + pub new_set_signature: SerializableSignature, } /// Node is asked to be part of servers-set consensus group. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct InitializeConsensusSessionOfShareAdd { - /// Key version. - pub version: SerializableH256, - /// Nodes that have reported version ownership. - pub version_holders: BTreeSet, - /// threshold+1 nodes from old_nodes_set selected for shares redistribution. - pub consensus_group: BTreeSet, - /// Old nodes set: all non-isolated owners of selected key share version. - pub old_nodes_set: BTreeSet, - /// New nodes map: node id => node id number. - pub new_nodes_map: BTreeMap, - /// Old server set, signed by requester. - pub old_set_signature: SerializableSignature, - /// New server set, signed by requester. - pub new_set_signature: SerializableSignature, + /// Key version. + pub version: SerializableH256, + /// Nodes that have reported version ownership. + pub version_holders: BTreeSet, + /// threshold+1 nodes from old_nodes_set selected for shares redistribution. + pub consensus_group: BTreeSet, + /// Old nodes set: all non-isolated owners of selected key share version. + pub old_nodes_set: BTreeSet, + /// New nodes map: node id => node id number. + pub new_nodes_map: BTreeMap, + /// Old server set, signed by requester. + pub old_set_signature: SerializableSignature, + /// New server set, signed by requester. + pub new_set_signature: SerializableSignature, } /// Consensus-related Schnorr signing message. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SchnorrSigningConsensusMessage { - /// Generation session Id. - pub session: MessageSessionId, - /// Signing session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Consensus message. - pub message: ConsensusMessage, + /// Generation session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Consensus message. + pub message: ConsensusMessage, } /// Session key generation message. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SchnorrSigningGenerationMessage { - /// Generation session Id. - pub session: MessageSessionId, - /// Signing session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Generation message. - pub message: GenerationMessage, + /// Generation session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Generation message. + pub message: GenerationMessage, } /// Request partial Schnorr signature. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SchnorrRequestPartialSignature { - /// Generation session Id. - pub session: MessageSessionId, - /// Signing session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Request id. - pub request_id: SerializableSecret, - /// Message hash. - pub message_hash: SerializableMessageHash, - /// Selected nodes. - pub nodes: BTreeSet, + /// Generation session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Request id. + pub request_id: SerializableSecret, + /// Message hash. + pub message_hash: SerializableMessageHash, + /// Selected nodes. + pub nodes: BTreeSet, } /// Partial Schnorr signature. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SchnorrPartialSignature { - /// Generation session Id. - pub session: MessageSessionId, - /// Signing session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Request id. - pub request_id: SerializableSecret, - /// S part of signature. - pub partial_signature: SerializableSecret, + /// Generation session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Request id. + pub request_id: SerializableSecret, + /// S part of signature. + pub partial_signature: SerializableSecret, } /// When Schnorr signing session error has occured. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SchnorrSigningSessionError { - /// Encryption session Id. - pub session: MessageSessionId, - /// Signing session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Error message. - pub error: Error, + /// Encryption session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Error message. + pub error: Error, } /// Schnorr signing session completed. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SchnorrSigningSessionCompleted { - /// Generation session Id. - pub session: MessageSessionId, - /// Signing session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, + /// Generation session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, } /// When Schnorr signing session is delegated to another node. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SchnorrSigningSessionDelegation { - /// Encryption session Id. - pub session: MessageSessionId, - /// Decryption session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Requester. - pub requester: SerializableRequester, - /// Key version. - pub version: SerializableH256, - /// Message hash. - pub message_hash: SerializableH256, + /// Encryption session Id. + pub session: MessageSessionId, + /// Decryption session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Requester. + pub requester: SerializableRequester, + /// Key version. + pub version: SerializableH256, + /// Message hash. + pub message_hash: SerializableH256, } /// When delegated Schnorr signing session is completed. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SchnorrSigningSessionDelegationCompleted { - /// Encryption session Id. - pub session: MessageSessionId, - /// Decryption session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// S-portion of signature. - pub signature_s: SerializableSecret, - /// C-portion of signature. - pub signature_c: SerializableSecret, + /// Encryption session Id. + pub session: MessageSessionId, + /// Decryption session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// S-portion of signature. + pub signature_s: SerializableSecret, + /// C-portion of signature. + pub signature_c: SerializableSecret, } /// Consensus-related ECDSA signing message. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct EcdsaSigningConsensusMessage { - /// Generation session Id. - pub session: MessageSessionId, - /// Signing session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Consensus message. - pub message: ConsensusMessage, + /// Generation session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Consensus message. + pub message: ConsensusMessage, } /// ECDSA signature nonce generation message. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct EcdsaSignatureNonceGenerationMessage { - /// Generation session Id. - pub session: MessageSessionId, - /// Signing session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Generation message. - pub message: GenerationMessage, + /// Generation session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Generation message. + pub message: GenerationMessage, } /// ECDSA inversion nonce generation message. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct EcdsaInversionNonceGenerationMessage { - /// Generation session Id. - pub session: MessageSessionId, - /// Signing session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Generation message. - pub message: GenerationMessage, + /// Generation session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Generation message. + pub message: GenerationMessage, } /// ECDSA inversed nonce share message. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct EcdsaSigningInversedNonceCoeffShare { - /// Generation session Id. - pub session: MessageSessionId, - /// Signing session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Inversed nonce coefficient share. - pub inversed_nonce_coeff_share: SerializableSecret, + /// Generation session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Inversed nonce coefficient share. + pub inversed_nonce_coeff_share: SerializableSecret, } /// ECDSA inversion zero generation message. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct EcdsaInversionZeroGenerationMessage { - /// Generation session Id. - pub session: MessageSessionId, - /// Signing session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Generation message. - pub message: GenerationMessage, + /// Generation session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Generation message. + pub message: GenerationMessage, } /// Request partial ECDSA signature. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct EcdsaRequestPartialSignature { - /// Generation session Id. - pub session: MessageSessionId, - /// Signing session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Request id. - pub request_id: SerializableSecret, - /// - pub inversed_nonce_coeff: SerializableSecret, - /// Message hash. - pub message_hash: SerializableMessageHash, + /// Generation session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Request id. + pub request_id: SerializableSecret, + /// + pub inversed_nonce_coeff: SerializableSecret, + /// Message hash. + pub message_hash: SerializableMessageHash, } /// Partial ECDSA signature. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct EcdsaPartialSignature { - /// Generation session Id. - pub session: MessageSessionId, - /// Signing session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Request id. - pub request_id: SerializableSecret, - /// Partial S part of signature. - pub partial_signature_s: SerializableSecret, + /// Generation session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Request id. + pub request_id: SerializableSecret, + /// Partial S part of signature. + pub partial_signature_s: SerializableSecret, } /// When ECDSA signing session error has occured. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct EcdsaSigningSessionError { - /// Encryption session Id. - pub session: MessageSessionId, - /// Signing session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Error message. - pub error: Error, + /// Encryption session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Error message. + pub error: Error, } /// ECDSA signing session completed. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct EcdsaSigningSessionCompleted { - /// Generation session Id. - pub session: MessageSessionId, - /// Signing session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, + /// Generation session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, } /// When ECDSA signing session is delegated to another node. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct EcdsaSigningSessionDelegation { - /// Encryption session Id. - pub session: MessageSessionId, - /// Decryption session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Requestor signature. - pub requester: SerializableRequester, - /// Key version. - pub version: SerializableH256, - /// Message hash. - pub message_hash: SerializableH256, + /// Encryption session Id. + pub session: MessageSessionId, + /// Decryption session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Requestor signature. + pub requester: SerializableRequester, + /// Key version. + pub version: SerializableH256, + /// Message hash. + pub message_hash: SerializableH256, } /// When delegated ECDSA signing session is completed. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct EcdsaSigningSessionDelegationCompleted { - /// Encryption session Id. - pub session: MessageSessionId, - /// Decryption session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Signature. - pub signature: SerializableSignature, + /// Encryption session Id. + pub session: MessageSessionId, + /// Decryption session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Signature. + pub signature: SerializableSignature, } /// Consensus-related decryption message. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct DecryptionConsensusMessage { - /// Generation session Id. - pub session: MessageSessionId, - /// Signing session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Session origin (in consensus initialization message). - pub origin: Option, - /// Consensus message. - pub message: ConsensusMessage, + /// Generation session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Session origin (in consensus initialization message). + pub origin: Option, + /// Consensus message. + pub message: ConsensusMessage, } /// Node is requested to do a partial decryption. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct RequestPartialDecryption { - /// Encryption session Id. - pub session: MessageSessionId, - /// Decryption session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Request id. - pub request_id: SerializableSecret, - /// Is shadow decryption requested? When true, decryption result - /// will be visible to the owner of requestor public key only. - pub is_shadow_decryption: bool, - /// Decryption result must be reconstructed on all participating nodes. This is useful - /// for service contract API so that all nodes from consensus group can confirm decryption. - pub is_broadcast_session: bool, - /// Nodes that are agreed to do a decryption. - pub nodes: BTreeSet, + /// Encryption session Id. + pub session: MessageSessionId, + /// Decryption session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Request id. + pub request_id: SerializableSecret, + /// Is shadow decryption requested? When true, decryption result + /// will be visible to the owner of requestor public key only. + pub is_shadow_decryption: bool, + /// Decryption result must be reconstructed on all participating nodes. This is useful + /// for service contract API so that all nodes from consensus group can confirm decryption. + pub is_broadcast_session: bool, + /// Nodes that are agreed to do a decryption. + pub nodes: BTreeSet, } /// Node has partially decrypted the secret. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct PartialDecryption { - /// Encryption session Id. - pub session: MessageSessionId, - /// Decryption session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Request id. - pub request_id: SerializableSecret, - /// Partially decrypted secret. - pub shadow_point: SerializablePublic, - /// Decrypt shadow coefficient (if requested), encrypted with requestor public. - pub decrypt_shadow: Option>, + /// Encryption session Id. + pub session: MessageSessionId, + /// Decryption session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Request id. + pub request_id: SerializableSecret, + /// Partially decrypted secret. + pub shadow_point: SerializablePublic, + /// Decrypt shadow coefficient (if requested), encrypted with requestor public. + pub decrypt_shadow: Option>, } /// When decryption session error has occured. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct DecryptionSessionError { - /// Encryption session Id. - pub session: MessageSessionId, - /// Decryption session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Error message. - pub error: Error, + /// Encryption session Id. + pub session: MessageSessionId, + /// Decryption session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Error message. + pub error: Error, } /// When decryption session is completed. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct DecryptionSessionCompleted { - /// Encryption session Id. - pub session: MessageSessionId, - /// Decryption session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, + /// Encryption session Id. + pub session: MessageSessionId, + /// Decryption session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, } /// When decryption session is delegated to another node. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct DecryptionSessionDelegation { - /// Encryption session Id. - pub session: MessageSessionId, - /// Decryption session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Session origin. - pub origin: Option, - /// Requester. - pub requester: SerializableRequester, - /// Key version. - pub version: SerializableH256, - /// Is shadow decryption requested? When true, decryption result - /// will be visible to the owner of requestor public key only. - pub is_shadow_decryption: bool, - /// Decryption result must be reconstructed on all participating nodes. This is useful - /// for service contract API so that all nodes from consensus group can confirm decryption. - pub is_broadcast_session: bool, + /// Encryption session Id. + pub session: MessageSessionId, + /// Decryption session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Session origin. + pub origin: Option, + /// Requester. + pub requester: SerializableRequester, + /// Key version. + pub version: SerializableH256, + /// Is shadow decryption requested? When true, decryption result + /// will be visible to the owner of requestor public key only. + pub is_shadow_decryption: bool, + /// Decryption result must be reconstructed on all participating nodes. This is useful + /// for service contract API so that all nodes from consensus group can confirm decryption. + pub is_broadcast_session: bool, } /// When delegated decryption session is completed. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct DecryptionSessionDelegationCompleted { - /// Encryption session Id. - pub session: MessageSessionId, - /// Decryption session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Decrypted secret point. It is partially decrypted if shadow decrpytion was requested. - pub decrypted_secret: SerializablePublic, - /// Shared common point. - pub common_point: Option, - /// If shadow decryption was requested: shadow decryption coefficients, encrypted with requestor public. - pub decrypt_shadows: Option>>, + /// Encryption session Id. + pub session: MessageSessionId, + /// Decryption session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Decrypted secret point. It is partially decrypted if shadow decrpytion was requested. + pub decrypted_secret: SerializablePublic, + /// Shared common point. + pub common_point: Option, + /// If shadow decryption was requested: shadow decryption coefficients, encrypted with requestor public. + pub decrypt_shadows: Option>>, } /// Consensus-related servers set change message. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ServersSetChangeConsensusMessage { - /// Servers set change session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Consensus message. - pub message: ConsensusMessageWithServersSet, + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Consensus message. + pub message: ConsensusMessageWithServersSet, } /// Unknown sessions ids request. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct UnknownSessionsRequest { - /// Servers set change session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, } /// Unknown session ids. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct UnknownSessions { - /// Servers set change session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Unknown session id. - pub unknown_sessions: BTreeSet, + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Unknown session id. + pub unknown_sessions: BTreeSet, } /// Key version negotiation message. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ShareChangeKeyVersionNegotiation { - /// Servers set change session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Key version negotiation message. - pub message: KeyVersionNegotiationMessage, + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Key version negotiation message. + pub message: KeyVersionNegotiationMessage, } /// Master node opens share initialize session on other nodes. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct InitializeShareChangeSession { - /// Servers set change session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Key id. - pub key_id: MessageSessionId, - /// Key vesion to use in ShareAdd session. - pub version: SerializableH256, - /// Nodes that have confirmed version ownership. - pub version_holders: BTreeSet, - /// Master node. - pub master_node_id: MessageNodeId, - /// Consensus group to use in ShareAdd session. - pub consensus_group: BTreeSet, - /// Shares to add. Values are filled for new nodes only. - pub new_nodes_map: BTreeMap>, + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Key id. + pub key_id: MessageSessionId, + /// Key vesion to use in ShareAdd session. + pub version: SerializableH256, + /// Nodes that have confirmed version ownership. + pub version_holders: BTreeSet, + /// Master node. + pub master_node_id: MessageNodeId, + /// Consensus group to use in ShareAdd session. + pub consensus_group: BTreeSet, + /// Shares to add. Values are filled for new nodes only. + pub new_nodes_map: BTreeMap>, } /// Slave node confirms session initialization. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ConfirmShareChangeSessionInitialization { - /// Servers set change session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Sessions that are confirmed. - pub key_id: MessageSessionId, + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Sessions that are confirmed. + pub key_id: MessageSessionId, } /// Share change is requested. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ServersSetChangeDelegate { - /// Servers set change session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Key id. - pub key_id: MessageSessionId, + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Key id. + pub key_id: MessageSessionId, } /// Share change is completed. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ServersSetChangeDelegateResponse { - /// Servers set change session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Key id. - pub key_id: MessageSessionId, + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Key id. + pub key_id: MessageSessionId, } /// Servers set change share add message. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ServersSetChangeShareAddMessage { - /// Servers set change session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Unknown session id. - pub message: ShareAddMessage, + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Unknown session id. + pub message: ShareAddMessage, } /// When servers set change session error has occured. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ServersSetChangeError { - /// Servers set change session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Error message. - pub error: Error, + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Error message. + pub error: Error, } /// When servers set change session is completed. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ServersSetChangeCompleted { - /// Servers set change session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, } /// Consensus-related share add session message. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ShareAddConsensusMessage { - /// Share add session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Consensus message. - pub message: ConsensusMessageOfShareAdd, + /// Share add session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Consensus message. + pub message: ConsensusMessageOfShareAdd, } /// Key share common data is passed to new node. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct KeyShareCommon { - /// Generation session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Common key data. - pub key_common: CommonKeyData, - /// Common (shared) encryption point. - pub common_point: Option, - /// Encrypted point. - pub encrypted_point: Option, - /// Selected version id numbers. - pub id_numbers: BTreeMap, + /// Generation session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Common key data. + pub key_common: CommonKeyData, + /// Common (shared) encryption point. + pub common_point: Option, + /// Encrypted point. + pub encrypted_point: Option, + /// Selected version id numbers. + pub id_numbers: BTreeMap, } /// Generated keys are sent to every node. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct NewKeysDissemination { - /// Generation session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Sub share of rcevier' secret share. - pub secret_subshare: SerializableSecret, + /// Generation session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Sub share of rcevier' secret share. + pub secret_subshare: SerializableSecret, } /// When share add session error has occured. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ShareAddError { - /// Generation session Id. - pub session: MessageSessionId, - /// Session-level nonce. - pub session_nonce: u64, - /// Error message. - pub error: Error, + /// Generation session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Error message. + pub error: Error, } /// Key versions are requested. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct RequestKeyVersions { - /// Generation session id. - pub session: MessageSessionId, - /// Version negotiation session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, + /// Generation session id. + pub session: MessageSessionId, + /// Version negotiation session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, } /// Key versions are sent. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct KeyVersions { - /// Generation session id. - pub session: MessageSessionId, - /// Version negotiation session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Common key data, shared by all versions. - pub key_common: Option, - /// Key versions. - pub versions: Vec, + /// Generation session id. + pub session: MessageSessionId, + /// Version negotiation session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Common key data, shared by all versions. + pub key_common: Option, + /// Key versions. + pub versions: Vec, } /// Common key data. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct CommonKeyData { - /// Key threshold. - pub threshold: usize, - /// Author of the key entry. - pub author: SerializableAddress, - /// Joint public. - pub public: SerializablePublic, + /// Key threshold. + pub threshold: usize, + /// Author of the key entry. + pub author: SerializableAddress, + /// Joint public. + pub public: SerializablePublic, } /// When key versions error has occured. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct KeyVersionsError { - /// Generation session id. - pub session: MessageSessionId, - /// Version negotiation session Id. - pub sub_session: SerializableSecret, - /// Session-level nonce. - pub session_nonce: u64, - /// Error message. - pub error: Error, - /// Continue action from failed node (if any). This field is oly filled - /// when error has occured when trying to compute result on master node. - pub continue_with: Option, + /// Generation session id. + pub session: MessageSessionId, + /// Version negotiation session Id. + pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Error message. + pub error: Error, + /// Continue action from failed node (if any). This field is oly filled + /// when error has occured when trying to compute result on master node. + pub continue_with: Option, } /// Key version continue action from failed node. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum FailedKeyVersionContinueAction { - /// Decryption session: origin + requester. - Decrypt(Option, SerializableAddress), + /// Decryption session: origin + requester. + Decrypt(Option, SerializableAddress), } impl Message { - pub fn is_initialization_message(&self) -> bool { - match *self { - Message::Generation(GenerationMessage::InitializeSession(_)) => true, - Message::Encryption(EncryptionMessage::InitializeEncryptionSession(_)) => true, - Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(ref msg)) => match msg.message { - ConsensusMessage::InitializeConsensusSession(_) => true, - _ => false - }, - Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningConsensusMessage(ref msg)) => match msg.message { - ConsensusMessage::InitializeConsensusSession(_) => true, - _ => false - }, - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningConsensusMessage(ref msg)) => match msg.message { - ConsensusMessage::InitializeConsensusSession(_) => true, - _ => false - }, - Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::RequestKeyVersions(_)) => true, - Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersionsError(ref msg)) if msg.continue_with.is_some() => true, - Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(ref msg)) => match msg.message { - ConsensusMessageOfShareAdd::InitializeConsensusSession(_) => true, - _ => false - }, - Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref msg)) => match msg.message { - ConsensusMessageWithServersSet::InitializeConsensusSession(_) => true, - _ => false - }, - _ => false, - } - } + pub fn is_initialization_message(&self) -> bool { + match *self { + Message::Generation(GenerationMessage::InitializeSession(_)) => true, + Message::Encryption(EncryptionMessage::InitializeEncryptionSession(_)) => true, + Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(ref msg)) => { + match msg.message { + ConsensusMessage::InitializeConsensusSession(_) => true, + _ => false, + } + } + Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningConsensusMessage( + ref msg, + )) => match msg.message { + ConsensusMessage::InitializeConsensusSession(_) => true, + _ => false, + }, + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningConsensusMessage(ref msg)) => { + match msg.message { + ConsensusMessage::InitializeConsensusSession(_) => true, + _ => false, + } + } + Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::RequestKeyVersions(_)) => { + true + } + Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersionsError( + ref msg, + )) if msg.continue_with.is_some() => true, + Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(ref msg)) => { + match msg.message { + ConsensusMessageOfShareAdd::InitializeConsensusSession(_) => true, + _ => false, + } + } + Message::ServersSetChange( + ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref msg), + ) => match msg.message { + ConsensusMessageWithServersSet::InitializeConsensusSession(_) => true, + _ => false, + }, + _ => false, + } + } - pub fn is_delegation_message(&self) -> bool { - match *self { - Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(_)) => true, - Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionDelegation(_)) => true, - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegation(_)) => true, - _ => false, - } - } + pub fn is_delegation_message(&self) -> bool { + match *self { + Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(_)) => true, + Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionDelegation(_)) => { + true + } + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegation(_)) => true, + _ => false, + } + } - pub fn is_error_message(&self) -> bool { - match *self { - Message::Generation(GenerationMessage::SessionError(_)) => true, - Message::Encryption(EncryptionMessage::EncryptionSessionError(_)) => true, - Message::Decryption(DecryptionMessage::DecryptionSessionError(_)) => true, - Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionError(_)) => true, - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionError(_)) => true, - Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersionsError(_)) => true, - Message::ShareAdd(ShareAddMessage::ShareAddError(_)) => true, - Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(_)) => true, - _ => false, - } - } + pub fn is_error_message(&self) -> bool { + match *self { + Message::Generation(GenerationMessage::SessionError(_)) => true, + Message::Encryption(EncryptionMessage::EncryptionSessionError(_)) => true, + Message::Decryption(DecryptionMessage::DecryptionSessionError(_)) => true, + Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionError(_)) => true, + Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionError(_)) => true, + Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersionsError(_)) => { + true + } + Message::ShareAdd(ShareAddMessage::ShareAddError(_)) => true, + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(_)) => true, + _ => false, + } + } - pub fn is_exclusive_session_message(&self) -> bool { - match *self { - Message::ServersSetChange(_) => true, - _ => false, - } - } + pub fn is_exclusive_session_message(&self) -> bool { + match *self { + Message::ServersSetChange(_) => true, + _ => false, + } + } - pub fn session_nonce(&self) -> Option { - match *self { - Message::Cluster(_) => None, - Message::Generation(ref message) => Some(message.session_nonce()), - Message::Encryption(ref message) => Some(message.session_nonce()), - Message::Decryption(ref message) => Some(message.session_nonce()), - Message::SchnorrSigning(ref message) => Some(message.session_nonce()), - Message::EcdsaSigning(ref message) => Some(message.session_nonce()), - Message::ShareAdd(ref message) => Some(message.session_nonce()), - Message::ServersSetChange(ref message) => Some(message.session_nonce()), - Message::KeyVersionNegotiation(ref message) => Some(message.session_nonce()), - } - } + pub fn session_nonce(&self) -> Option { + match *self { + Message::Cluster(_) => None, + Message::Generation(ref message) => Some(message.session_nonce()), + Message::Encryption(ref message) => Some(message.session_nonce()), + Message::Decryption(ref message) => Some(message.session_nonce()), + Message::SchnorrSigning(ref message) => Some(message.session_nonce()), + Message::EcdsaSigning(ref message) => Some(message.session_nonce()), + Message::ShareAdd(ref message) => Some(message.session_nonce()), + Message::ServersSetChange(ref message) => Some(message.session_nonce()), + Message::KeyVersionNegotiation(ref message) => Some(message.session_nonce()), + } + } } impl GenerationMessage { - pub fn session_id(&self) -> &SessionId { - match *self { - GenerationMessage::InitializeSession(ref msg) => &msg.session, - GenerationMessage::ConfirmInitialization(ref msg) => &msg.session, - GenerationMessage::CompleteInitialization(ref msg) => &msg.session, - GenerationMessage::KeysDissemination(ref msg) => &msg.session, - GenerationMessage::PublicKeyShare(ref msg) => &msg.session, - GenerationMessage::SessionError(ref msg) => &msg.session, - GenerationMessage::SessionCompleted(ref msg) => &msg.session, - } - } + pub fn session_id(&self) -> &SessionId { + match *self { + GenerationMessage::InitializeSession(ref msg) => &msg.session, + GenerationMessage::ConfirmInitialization(ref msg) => &msg.session, + GenerationMessage::CompleteInitialization(ref msg) => &msg.session, + GenerationMessage::KeysDissemination(ref msg) => &msg.session, + GenerationMessage::PublicKeyShare(ref msg) => &msg.session, + GenerationMessage::SessionError(ref msg) => &msg.session, + GenerationMessage::SessionCompleted(ref msg) => &msg.session, + } + } - pub fn session_nonce(&self) -> u64 { - match *self { - GenerationMessage::InitializeSession(ref msg) => msg.session_nonce, - GenerationMessage::ConfirmInitialization(ref msg) => msg.session_nonce, - GenerationMessage::CompleteInitialization(ref msg) => msg.session_nonce, - GenerationMessage::KeysDissemination(ref msg) => msg.session_nonce, - GenerationMessage::PublicKeyShare(ref msg) => msg.session_nonce, - GenerationMessage::SessionError(ref msg) => msg.session_nonce, - GenerationMessage::SessionCompleted(ref msg) => msg.session_nonce, - } - } + pub fn session_nonce(&self) -> u64 { + match *self { + GenerationMessage::InitializeSession(ref msg) => msg.session_nonce, + GenerationMessage::ConfirmInitialization(ref msg) => msg.session_nonce, + GenerationMessage::CompleteInitialization(ref msg) => msg.session_nonce, + GenerationMessage::KeysDissemination(ref msg) => msg.session_nonce, + GenerationMessage::PublicKeyShare(ref msg) => msg.session_nonce, + GenerationMessage::SessionError(ref msg) => msg.session_nonce, + GenerationMessage::SessionCompleted(ref msg) => msg.session_nonce, + } + } } impl EncryptionMessage { - pub fn session_id(&self) -> &SessionId { - match *self { - EncryptionMessage::InitializeEncryptionSession(ref msg) => &msg.session, - EncryptionMessage::ConfirmEncryptionInitialization(ref msg) => &msg.session, - EncryptionMessage::EncryptionSessionError(ref msg) => &msg.session, - } - } + pub fn session_id(&self) -> &SessionId { + match *self { + EncryptionMessage::InitializeEncryptionSession(ref msg) => &msg.session, + EncryptionMessage::ConfirmEncryptionInitialization(ref msg) => &msg.session, + EncryptionMessage::EncryptionSessionError(ref msg) => &msg.session, + } + } - pub fn session_nonce(&self) -> u64 { - match *self { - EncryptionMessage::InitializeEncryptionSession(ref msg) => msg.session_nonce, - EncryptionMessage::ConfirmEncryptionInitialization(ref msg) => msg.session_nonce, - EncryptionMessage::EncryptionSessionError(ref msg) => msg.session_nonce, - } - } + pub fn session_nonce(&self) -> u64 { + match *self { + EncryptionMessage::InitializeEncryptionSession(ref msg) => msg.session_nonce, + EncryptionMessage::ConfirmEncryptionInitialization(ref msg) => msg.session_nonce, + EncryptionMessage::EncryptionSessionError(ref msg) => msg.session_nonce, + } + } } impl DecryptionMessage { - pub fn session_id(&self) -> &SessionId { - match *self { - DecryptionMessage::DecryptionConsensusMessage(ref msg) => &msg.session, - DecryptionMessage::RequestPartialDecryption(ref msg) => &msg.session, - DecryptionMessage::PartialDecryption(ref msg) => &msg.session, - DecryptionMessage::DecryptionSessionError(ref msg) => &msg.session, - DecryptionMessage::DecryptionSessionCompleted(ref msg) => &msg.session, - DecryptionMessage::DecryptionSessionDelegation(ref msg) => &msg.session, - DecryptionMessage::DecryptionSessionDelegationCompleted(ref msg) => &msg.session, - } - } + pub fn session_id(&self) -> &SessionId { + match *self { + DecryptionMessage::DecryptionConsensusMessage(ref msg) => &msg.session, + DecryptionMessage::RequestPartialDecryption(ref msg) => &msg.session, + DecryptionMessage::PartialDecryption(ref msg) => &msg.session, + DecryptionMessage::DecryptionSessionError(ref msg) => &msg.session, + DecryptionMessage::DecryptionSessionCompleted(ref msg) => &msg.session, + DecryptionMessage::DecryptionSessionDelegation(ref msg) => &msg.session, + DecryptionMessage::DecryptionSessionDelegationCompleted(ref msg) => &msg.session, + } + } - pub fn sub_session_id(&self) -> &Secret { - match *self { - DecryptionMessage::DecryptionConsensusMessage(ref msg) => &msg.sub_session, - DecryptionMessage::RequestPartialDecryption(ref msg) => &msg.sub_session, - DecryptionMessage::PartialDecryption(ref msg) => &msg.sub_session, - DecryptionMessage::DecryptionSessionError(ref msg) => &msg.sub_session, - DecryptionMessage::DecryptionSessionCompleted(ref msg) => &msg.sub_session, - DecryptionMessage::DecryptionSessionDelegation(ref msg) => &msg.sub_session, - DecryptionMessage::DecryptionSessionDelegationCompleted(ref msg) => &msg.sub_session, - } - } + pub fn sub_session_id(&self) -> &Secret { + match *self { + DecryptionMessage::DecryptionConsensusMessage(ref msg) => &msg.sub_session, + DecryptionMessage::RequestPartialDecryption(ref msg) => &msg.sub_session, + DecryptionMessage::PartialDecryption(ref msg) => &msg.sub_session, + DecryptionMessage::DecryptionSessionError(ref msg) => &msg.sub_session, + DecryptionMessage::DecryptionSessionCompleted(ref msg) => &msg.sub_session, + DecryptionMessage::DecryptionSessionDelegation(ref msg) => &msg.sub_session, + DecryptionMessage::DecryptionSessionDelegationCompleted(ref msg) => &msg.sub_session, + } + } - pub fn session_nonce(&self) -> u64 { - match *self { - DecryptionMessage::DecryptionConsensusMessage(ref msg) => msg.session_nonce, - DecryptionMessage::RequestPartialDecryption(ref msg) => msg.session_nonce, - DecryptionMessage::PartialDecryption(ref msg) => msg.session_nonce, - DecryptionMessage::DecryptionSessionError(ref msg) => msg.session_nonce, - DecryptionMessage::DecryptionSessionCompleted(ref msg) => msg.session_nonce, - DecryptionMessage::DecryptionSessionDelegation(ref msg) => msg.session_nonce, - DecryptionMessage::DecryptionSessionDelegationCompleted(ref msg) => msg.session_nonce, - } - } + pub fn session_nonce(&self) -> u64 { + match *self { + DecryptionMessage::DecryptionConsensusMessage(ref msg) => msg.session_nonce, + DecryptionMessage::RequestPartialDecryption(ref msg) => msg.session_nonce, + DecryptionMessage::PartialDecryption(ref msg) => msg.session_nonce, + DecryptionMessage::DecryptionSessionError(ref msg) => msg.session_nonce, + DecryptionMessage::DecryptionSessionCompleted(ref msg) => msg.session_nonce, + DecryptionMessage::DecryptionSessionDelegation(ref msg) => msg.session_nonce, + DecryptionMessage::DecryptionSessionDelegationCompleted(ref msg) => msg.session_nonce, + } + } } impl SchnorrSigningMessage { - pub fn session_id(&self) -> &SessionId { - match *self { - SchnorrSigningMessage::SchnorrSigningConsensusMessage(ref msg) => &msg.session, - SchnorrSigningMessage::SchnorrSigningGenerationMessage(ref msg) => &msg.session, - SchnorrSigningMessage::SchnorrRequestPartialSignature(ref msg) => &msg.session, - SchnorrSigningMessage::SchnorrPartialSignature(ref msg) => &msg.session, - SchnorrSigningMessage::SchnorrSigningSessionError(ref msg) => &msg.session, - SchnorrSigningMessage::SchnorrSigningSessionCompleted(ref msg) => &msg.session, - SchnorrSigningMessage::SchnorrSigningSessionDelegation(ref msg) => &msg.session, - SchnorrSigningMessage::SchnorrSigningSessionDelegationCompleted(ref msg) => &msg.session, - } - } + pub fn session_id(&self) -> &SessionId { + match *self { + SchnorrSigningMessage::SchnorrSigningConsensusMessage(ref msg) => &msg.session, + SchnorrSigningMessage::SchnorrSigningGenerationMessage(ref msg) => &msg.session, + SchnorrSigningMessage::SchnorrRequestPartialSignature(ref msg) => &msg.session, + SchnorrSigningMessage::SchnorrPartialSignature(ref msg) => &msg.session, + SchnorrSigningMessage::SchnorrSigningSessionError(ref msg) => &msg.session, + SchnorrSigningMessage::SchnorrSigningSessionCompleted(ref msg) => &msg.session, + SchnorrSigningMessage::SchnorrSigningSessionDelegation(ref msg) => &msg.session, + SchnorrSigningMessage::SchnorrSigningSessionDelegationCompleted(ref msg) => { + &msg.session + } + } + } - pub fn sub_session_id(&self) -> &Secret { - match *self { - SchnorrSigningMessage::SchnorrSigningConsensusMessage(ref msg) => &msg.sub_session, - SchnorrSigningMessage::SchnorrSigningGenerationMessage(ref msg) => &msg.sub_session, - SchnorrSigningMessage::SchnorrRequestPartialSignature(ref msg) => &msg.sub_session, - SchnorrSigningMessage::SchnorrPartialSignature(ref msg) => &msg.sub_session, - SchnorrSigningMessage::SchnorrSigningSessionError(ref msg) => &msg.sub_session, - SchnorrSigningMessage::SchnorrSigningSessionCompleted(ref msg) => &msg.sub_session, - SchnorrSigningMessage::SchnorrSigningSessionDelegation(ref msg) => &msg.sub_session, - SchnorrSigningMessage::SchnorrSigningSessionDelegationCompleted(ref msg) => &msg.sub_session, - } - } + pub fn sub_session_id(&self) -> &Secret { + match *self { + SchnorrSigningMessage::SchnorrSigningConsensusMessage(ref msg) => &msg.sub_session, + SchnorrSigningMessage::SchnorrSigningGenerationMessage(ref msg) => &msg.sub_session, + SchnorrSigningMessage::SchnorrRequestPartialSignature(ref msg) => &msg.sub_session, + SchnorrSigningMessage::SchnorrPartialSignature(ref msg) => &msg.sub_session, + SchnorrSigningMessage::SchnorrSigningSessionError(ref msg) => &msg.sub_session, + SchnorrSigningMessage::SchnorrSigningSessionCompleted(ref msg) => &msg.sub_session, + SchnorrSigningMessage::SchnorrSigningSessionDelegation(ref msg) => &msg.sub_session, + SchnorrSigningMessage::SchnorrSigningSessionDelegationCompleted(ref msg) => { + &msg.sub_session + } + } + } - pub fn session_nonce(&self) -> u64 { - match *self { - SchnorrSigningMessage::SchnorrSigningConsensusMessage(ref msg) => msg.session_nonce, - SchnorrSigningMessage::SchnorrSigningGenerationMessage(ref msg) => msg.session_nonce, - SchnorrSigningMessage::SchnorrRequestPartialSignature(ref msg) => msg.session_nonce, - SchnorrSigningMessage::SchnorrPartialSignature(ref msg) => msg.session_nonce, - SchnorrSigningMessage::SchnorrSigningSessionError(ref msg) => msg.session_nonce, - SchnorrSigningMessage::SchnorrSigningSessionCompleted(ref msg) => msg.session_nonce, - SchnorrSigningMessage::SchnorrSigningSessionDelegation(ref msg) => msg.session_nonce, - SchnorrSigningMessage::SchnorrSigningSessionDelegationCompleted(ref msg) => msg.session_nonce, - } - } + pub fn session_nonce(&self) -> u64 { + match *self { + SchnorrSigningMessage::SchnorrSigningConsensusMessage(ref msg) => msg.session_nonce, + SchnorrSigningMessage::SchnorrSigningGenerationMessage(ref msg) => msg.session_nonce, + SchnorrSigningMessage::SchnorrRequestPartialSignature(ref msg) => msg.session_nonce, + SchnorrSigningMessage::SchnorrPartialSignature(ref msg) => msg.session_nonce, + SchnorrSigningMessage::SchnorrSigningSessionError(ref msg) => msg.session_nonce, + SchnorrSigningMessage::SchnorrSigningSessionCompleted(ref msg) => msg.session_nonce, + SchnorrSigningMessage::SchnorrSigningSessionDelegation(ref msg) => msg.session_nonce, + SchnorrSigningMessage::SchnorrSigningSessionDelegationCompleted(ref msg) => { + msg.session_nonce + } + } + } } impl EcdsaSigningMessage { - pub fn session_id(&self) -> &SessionId { - match *self { - EcdsaSigningMessage::EcdsaSigningConsensusMessage(ref msg) => &msg.session, - EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage(ref msg) => &msg.session, - EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage(ref msg) => &msg.session, - EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage(ref msg) => &msg.session, - EcdsaSigningMessage::EcdsaSigningInversedNonceCoeffShare(ref msg) => &msg.session, - EcdsaSigningMessage::EcdsaRequestPartialSignature(ref msg) => &msg.session, - EcdsaSigningMessage::EcdsaPartialSignature(ref msg) => &msg.session, - EcdsaSigningMessage::EcdsaSigningSessionError(ref msg) => &msg.session, - EcdsaSigningMessage::EcdsaSigningSessionCompleted(ref msg) => &msg.session, - EcdsaSigningMessage::EcdsaSigningSessionDelegation(ref msg) => &msg.session, - EcdsaSigningMessage::EcdsaSigningSessionDelegationCompleted(ref msg) => &msg.session, - } - } + pub fn session_id(&self) -> &SessionId { + match *self { + EcdsaSigningMessage::EcdsaSigningConsensusMessage(ref msg) => &msg.session, + EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage(ref msg) => &msg.session, + EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage(ref msg) => &msg.session, + EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage(ref msg) => &msg.session, + EcdsaSigningMessage::EcdsaSigningInversedNonceCoeffShare(ref msg) => &msg.session, + EcdsaSigningMessage::EcdsaRequestPartialSignature(ref msg) => &msg.session, + EcdsaSigningMessage::EcdsaPartialSignature(ref msg) => &msg.session, + EcdsaSigningMessage::EcdsaSigningSessionError(ref msg) => &msg.session, + EcdsaSigningMessage::EcdsaSigningSessionCompleted(ref msg) => &msg.session, + EcdsaSigningMessage::EcdsaSigningSessionDelegation(ref msg) => &msg.session, + EcdsaSigningMessage::EcdsaSigningSessionDelegationCompleted(ref msg) => &msg.session, + } + } - pub fn sub_session_id(&self) -> &Secret { - match *self { - EcdsaSigningMessage::EcdsaSigningConsensusMessage(ref msg) => &msg.sub_session, - EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage(ref msg) => &msg.sub_session, - EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage(ref msg) => &msg.sub_session, - EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage(ref msg) => &msg.sub_session, - EcdsaSigningMessage::EcdsaSigningInversedNonceCoeffShare(ref msg) => &msg.sub_session, - EcdsaSigningMessage::EcdsaRequestPartialSignature(ref msg) => &msg.sub_session, - EcdsaSigningMessage::EcdsaPartialSignature(ref msg) => &msg.sub_session, - EcdsaSigningMessage::EcdsaSigningSessionError(ref msg) => &msg.sub_session, - EcdsaSigningMessage::EcdsaSigningSessionCompleted(ref msg) => &msg.sub_session, - EcdsaSigningMessage::EcdsaSigningSessionDelegation(ref msg) => &msg.sub_session, - EcdsaSigningMessage::EcdsaSigningSessionDelegationCompleted(ref msg) => &msg.sub_session, - } - } + pub fn sub_session_id(&self) -> &Secret { + match *self { + EcdsaSigningMessage::EcdsaSigningConsensusMessage(ref msg) => &msg.sub_session, + EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage(ref msg) => &msg.sub_session, + EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage(ref msg) => &msg.sub_session, + EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage(ref msg) => &msg.sub_session, + EcdsaSigningMessage::EcdsaSigningInversedNonceCoeffShare(ref msg) => &msg.sub_session, + EcdsaSigningMessage::EcdsaRequestPartialSignature(ref msg) => &msg.sub_session, + EcdsaSigningMessage::EcdsaPartialSignature(ref msg) => &msg.sub_session, + EcdsaSigningMessage::EcdsaSigningSessionError(ref msg) => &msg.sub_session, + EcdsaSigningMessage::EcdsaSigningSessionCompleted(ref msg) => &msg.sub_session, + EcdsaSigningMessage::EcdsaSigningSessionDelegation(ref msg) => &msg.sub_session, + EcdsaSigningMessage::EcdsaSigningSessionDelegationCompleted(ref msg) => { + &msg.sub_session + } + } + } - pub fn session_nonce(&self) -> u64 { - match *self { - EcdsaSigningMessage::EcdsaSigningConsensusMessage(ref msg) => msg.session_nonce, - EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage(ref msg) => msg.session_nonce, - EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage(ref msg) => msg.session_nonce, - EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage(ref msg) => msg.session_nonce, - EcdsaSigningMessage::EcdsaSigningInversedNonceCoeffShare(ref msg) => msg.session_nonce, - EcdsaSigningMessage::EcdsaRequestPartialSignature(ref msg) => msg.session_nonce, - EcdsaSigningMessage::EcdsaPartialSignature(ref msg) => msg.session_nonce, - EcdsaSigningMessage::EcdsaSigningSessionError(ref msg) => msg.session_nonce, - EcdsaSigningMessage::EcdsaSigningSessionCompleted(ref msg) => msg.session_nonce, - EcdsaSigningMessage::EcdsaSigningSessionDelegation(ref msg) => msg.session_nonce, - EcdsaSigningMessage::EcdsaSigningSessionDelegationCompleted(ref msg) => msg.session_nonce, - } - } + pub fn session_nonce(&self) -> u64 { + match *self { + EcdsaSigningMessage::EcdsaSigningConsensusMessage(ref msg) => msg.session_nonce, + EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage(ref msg) => msg.session_nonce, + EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage(ref msg) => msg.session_nonce, + EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage(ref msg) => msg.session_nonce, + EcdsaSigningMessage::EcdsaSigningInversedNonceCoeffShare(ref msg) => msg.session_nonce, + EcdsaSigningMessage::EcdsaRequestPartialSignature(ref msg) => msg.session_nonce, + EcdsaSigningMessage::EcdsaPartialSignature(ref msg) => msg.session_nonce, + EcdsaSigningMessage::EcdsaSigningSessionError(ref msg) => msg.session_nonce, + EcdsaSigningMessage::EcdsaSigningSessionCompleted(ref msg) => msg.session_nonce, + EcdsaSigningMessage::EcdsaSigningSessionDelegation(ref msg) => msg.session_nonce, + EcdsaSigningMessage::EcdsaSigningSessionDelegationCompleted(ref msg) => { + msg.session_nonce + } + } + } } impl ServersSetChangeMessage { - pub fn session_id(&self) -> &SessionId { - match *self { - ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref msg) => &msg.session, - ServersSetChangeMessage::UnknownSessionsRequest(ref msg) => &msg.session, - ServersSetChangeMessage::UnknownSessions(ref msg) => &msg.session, - ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(ref msg) => &msg.session, - ServersSetChangeMessage::InitializeShareChangeSession(ref msg) => &msg.session, - ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ref msg) => &msg.session, - ServersSetChangeMessage::ServersSetChangeDelegate(ref msg) => &msg.session, - ServersSetChangeMessage::ServersSetChangeDelegateResponse(ref msg) => &msg.session, - ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref msg) => &msg.session, - ServersSetChangeMessage::ServersSetChangeError(ref msg) => &msg.session, - ServersSetChangeMessage::ServersSetChangeCompleted(ref msg) => &msg.session, - } - } + pub fn session_id(&self) -> &SessionId { + match *self { + ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref msg) => &msg.session, + ServersSetChangeMessage::UnknownSessionsRequest(ref msg) => &msg.session, + ServersSetChangeMessage::UnknownSessions(ref msg) => &msg.session, + ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(ref msg) => &msg.session, + ServersSetChangeMessage::InitializeShareChangeSession(ref msg) => &msg.session, + ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ref msg) => { + &msg.session + } + ServersSetChangeMessage::ServersSetChangeDelegate(ref msg) => &msg.session, + ServersSetChangeMessage::ServersSetChangeDelegateResponse(ref msg) => &msg.session, + ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref msg) => &msg.session, + ServersSetChangeMessage::ServersSetChangeError(ref msg) => &msg.session, + ServersSetChangeMessage::ServersSetChangeCompleted(ref msg) => &msg.session, + } + } - pub fn session_nonce(&self) -> u64 { - match *self { - ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref msg) => msg.session_nonce, - ServersSetChangeMessage::UnknownSessionsRequest(ref msg) => msg.session_nonce, - ServersSetChangeMessage::UnknownSessions(ref msg) => msg.session_nonce, - ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(ref msg) => msg.session_nonce, - ServersSetChangeMessage::InitializeShareChangeSession(ref msg) => msg.session_nonce, - ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ref msg) => msg.session_nonce, - ServersSetChangeMessage::ServersSetChangeDelegate(ref msg) => msg.session_nonce, - ServersSetChangeMessage::ServersSetChangeDelegateResponse(ref msg) => msg.session_nonce, - ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref msg) => msg.session_nonce, - ServersSetChangeMessage::ServersSetChangeError(ref msg) => msg.session_nonce, - ServersSetChangeMessage::ServersSetChangeCompleted(ref msg) => msg.session_nonce, - } - } + pub fn session_nonce(&self) -> u64 { + match *self { + ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref msg) => msg.session_nonce, + ServersSetChangeMessage::UnknownSessionsRequest(ref msg) => msg.session_nonce, + ServersSetChangeMessage::UnknownSessions(ref msg) => msg.session_nonce, + ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(ref msg) => msg.session_nonce, + ServersSetChangeMessage::InitializeShareChangeSession(ref msg) => msg.session_nonce, + ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ref msg) => { + msg.session_nonce + } + ServersSetChangeMessage::ServersSetChangeDelegate(ref msg) => msg.session_nonce, + ServersSetChangeMessage::ServersSetChangeDelegateResponse(ref msg) => msg.session_nonce, + ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref msg) => msg.session_nonce, + ServersSetChangeMessage::ServersSetChangeError(ref msg) => msg.session_nonce, + ServersSetChangeMessage::ServersSetChangeCompleted(ref msg) => msg.session_nonce, + } + } } impl ShareAddMessage { - pub fn session_id(&self) -> &SessionId { - match *self { - ShareAddMessage::ShareAddConsensusMessage(ref msg) => &msg.session, - ShareAddMessage::KeyShareCommon(ref msg) => &msg.session, - ShareAddMessage::NewKeysDissemination(ref msg) => &msg.session, - ShareAddMessage::ShareAddError(ref msg) => &msg.session, - } - } + pub fn session_id(&self) -> &SessionId { + match *self { + ShareAddMessage::ShareAddConsensusMessage(ref msg) => &msg.session, + ShareAddMessage::KeyShareCommon(ref msg) => &msg.session, + ShareAddMessage::NewKeysDissemination(ref msg) => &msg.session, + ShareAddMessage::ShareAddError(ref msg) => &msg.session, + } + } - pub fn session_nonce(&self) -> u64 { - match *self { - ShareAddMessage::ShareAddConsensusMessage(ref msg) => msg.session_nonce, - ShareAddMessage::KeyShareCommon(ref msg) => msg.session_nonce, - ShareAddMessage::NewKeysDissemination(ref msg) => msg.session_nonce, - ShareAddMessage::ShareAddError(ref msg) => msg.session_nonce, - } - } + pub fn session_nonce(&self) -> u64 { + match *self { + ShareAddMessage::ShareAddConsensusMessage(ref msg) => msg.session_nonce, + ShareAddMessage::KeyShareCommon(ref msg) => msg.session_nonce, + ShareAddMessage::NewKeysDissemination(ref msg) => msg.session_nonce, + ShareAddMessage::ShareAddError(ref msg) => msg.session_nonce, + } + } } impl KeyVersionNegotiationMessage { - pub fn session_id(&self) -> &SessionId { - match *self { - KeyVersionNegotiationMessage::RequestKeyVersions(ref msg) => &msg.session, - KeyVersionNegotiationMessage::KeyVersions(ref msg) => &msg.session, - KeyVersionNegotiationMessage::KeyVersionsError(ref msg) => &msg.session, - } - } + pub fn session_id(&self) -> &SessionId { + match *self { + KeyVersionNegotiationMessage::RequestKeyVersions(ref msg) => &msg.session, + KeyVersionNegotiationMessage::KeyVersions(ref msg) => &msg.session, + KeyVersionNegotiationMessage::KeyVersionsError(ref msg) => &msg.session, + } + } - pub fn sub_session_id(&self) -> &Secret { - match *self { - KeyVersionNegotiationMessage::RequestKeyVersions(ref msg) => &msg.sub_session, - KeyVersionNegotiationMessage::KeyVersions(ref msg) => &msg.sub_session, - KeyVersionNegotiationMessage::KeyVersionsError(ref msg) => &msg.sub_session, - } - } + pub fn sub_session_id(&self) -> &Secret { + match *self { + KeyVersionNegotiationMessage::RequestKeyVersions(ref msg) => &msg.sub_session, + KeyVersionNegotiationMessage::KeyVersions(ref msg) => &msg.sub_session, + KeyVersionNegotiationMessage::KeyVersionsError(ref msg) => &msg.sub_session, + } + } - pub fn session_nonce(&self) -> u64 { - match *self { - KeyVersionNegotiationMessage::RequestKeyVersions(ref msg) => msg.session_nonce, - KeyVersionNegotiationMessage::KeyVersions(ref msg) => msg.session_nonce, - KeyVersionNegotiationMessage::KeyVersionsError(ref msg) => msg.session_nonce, - } - } + pub fn session_nonce(&self) -> u64 { + match *self { + KeyVersionNegotiationMessage::RequestKeyVersions(ref msg) => msg.session_nonce, + KeyVersionNegotiationMessage::KeyVersions(ref msg) => msg.session_nonce, + KeyVersionNegotiationMessage::KeyVersionsError(ref msg) => msg.session_nonce, + } + } } impl fmt::Display for Message { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Message::Cluster(ref message) => write!(f, "Cluster.{}", message), - Message::Generation(ref message) => write!(f, "Generation.{}", message), - Message::Encryption(ref message) => write!(f, "Encryption.{}", message), - Message::Decryption(ref message) => write!(f, "Decryption.{}", message), - Message::SchnorrSigning(ref message) => write!(f, "SchnorrSigning.{}", message), - Message::EcdsaSigning(ref message) => write!(f, "EcdsaSigning.{}", message), - Message::ServersSetChange(ref message) => write!(f, "ServersSetChange.{}", message), - Message::ShareAdd(ref message) => write!(f, "ShareAdd.{}", message), - Message::KeyVersionNegotiation(ref message) => write!(f, "KeyVersionNegotiation.{}", message), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Message::Cluster(ref message) => write!(f, "Cluster.{}", message), + Message::Generation(ref message) => write!(f, "Generation.{}", message), + Message::Encryption(ref message) => write!(f, "Encryption.{}", message), + Message::Decryption(ref message) => write!(f, "Decryption.{}", message), + Message::SchnorrSigning(ref message) => write!(f, "SchnorrSigning.{}", message), + Message::EcdsaSigning(ref message) => write!(f, "EcdsaSigning.{}", message), + Message::ServersSetChange(ref message) => write!(f, "ServersSetChange.{}", message), + Message::ShareAdd(ref message) => write!(f, "ShareAdd.{}", message), + Message::KeyVersionNegotiation(ref message) => { + write!(f, "KeyVersionNegotiation.{}", message) + } + } + } } impl fmt::Display for ClusterMessage { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - ClusterMessage::NodePublicKey(_) => write!(f, "NodePublicKey"), - ClusterMessage::NodePrivateKeySignature(_) => write!(f, "NodePrivateKeySignature"), - ClusterMessage::KeepAlive(_) => write!(f, "KeepAlive"), - ClusterMessage::KeepAliveResponse(_) => write!(f, "KeepAliveResponse"), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ClusterMessage::NodePublicKey(_) => write!(f, "NodePublicKey"), + ClusterMessage::NodePrivateKeySignature(_) => write!(f, "NodePrivateKeySignature"), + ClusterMessage::KeepAlive(_) => write!(f, "KeepAlive"), + ClusterMessage::KeepAliveResponse(_) => write!(f, "KeepAliveResponse"), + } + } } impl fmt::Display for GenerationMessage { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - GenerationMessage::InitializeSession(_) => write!(f, "InitializeSession"), - GenerationMessage::ConfirmInitialization(_) => write!(f, "ConfirmInitialization"), - GenerationMessage::CompleteInitialization(_) => write!(f, "CompleteInitialization"), - GenerationMessage::KeysDissemination(_) => write!(f, "KeysDissemination"), - GenerationMessage::PublicKeyShare(_) => write!(f, "PublicKeyShare"), - GenerationMessage::SessionError(ref msg) => write!(f, "SessionError({})", msg.error), - GenerationMessage::SessionCompleted(_) => write!(f, "SessionCompleted"), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + GenerationMessage::InitializeSession(_) => write!(f, "InitializeSession"), + GenerationMessage::ConfirmInitialization(_) => write!(f, "ConfirmInitialization"), + GenerationMessage::CompleteInitialization(_) => write!(f, "CompleteInitialization"), + GenerationMessage::KeysDissemination(_) => write!(f, "KeysDissemination"), + GenerationMessage::PublicKeyShare(_) => write!(f, "PublicKeyShare"), + GenerationMessage::SessionError(ref msg) => write!(f, "SessionError({})", msg.error), + GenerationMessage::SessionCompleted(_) => write!(f, "SessionCompleted"), + } + } } impl fmt::Display for EncryptionMessage { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - EncryptionMessage::InitializeEncryptionSession(_) => write!(f, "InitializeEncryptionSession"), - EncryptionMessage::ConfirmEncryptionInitialization(_) => write!(f, "ConfirmEncryptionInitialization"), - EncryptionMessage::EncryptionSessionError(ref msg) => write!(f, "EncryptionSessionError({})", msg.error), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + EncryptionMessage::InitializeEncryptionSession(_) => { + write!(f, "InitializeEncryptionSession") + } + EncryptionMessage::ConfirmEncryptionInitialization(_) => { + write!(f, "ConfirmEncryptionInitialization") + } + EncryptionMessage::EncryptionSessionError(ref msg) => { + write!(f, "EncryptionSessionError({})", msg.error) + } + } + } } impl fmt::Display for ConsensusMessage { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - ConsensusMessage::InitializeConsensusSession(_) => write!(f, "InitializeConsensusSession"), - ConsensusMessage::ConfirmConsensusInitialization(ref msg) => write!(f, "ConfirmConsensusInitialization({})", msg.is_confirmed), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ConsensusMessage::InitializeConsensusSession(_) => { + write!(f, "InitializeConsensusSession") + } + ConsensusMessage::ConfirmConsensusInitialization(ref msg) => { + write!(f, "ConfirmConsensusInitialization({})", msg.is_confirmed) + } + } + } } impl fmt::Display for ConsensusMessageWithServersSet { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - ConsensusMessageWithServersSet::InitializeConsensusSession(_) => write!(f, "InitializeConsensusSession"), - ConsensusMessageWithServersSet::ConfirmConsensusInitialization(ref msg) => write!(f, "ConfirmConsensusInitialization({})", msg.is_confirmed), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ConsensusMessageWithServersSet::InitializeConsensusSession(_) => { + write!(f, "InitializeConsensusSession") + } + ConsensusMessageWithServersSet::ConfirmConsensusInitialization(ref msg) => { + write!(f, "ConfirmConsensusInitialization({})", msg.is_confirmed) + } + } + } } impl fmt::Display for ConsensusMessageOfShareAdd { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - ConsensusMessageOfShareAdd::InitializeConsensusSession(_) => write!(f, "InitializeConsensusSession"), - ConsensusMessageOfShareAdd::ConfirmConsensusInitialization(ref msg) => write!(f, "ConfirmConsensusInitialization({})", msg.is_confirmed), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ConsensusMessageOfShareAdd::InitializeConsensusSession(_) => { + write!(f, "InitializeConsensusSession") + } + ConsensusMessageOfShareAdd::ConfirmConsensusInitialization(ref msg) => { + write!(f, "ConfirmConsensusInitialization({})", msg.is_confirmed) + } + } + } } impl fmt::Display for DecryptionMessage { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - DecryptionMessage::DecryptionConsensusMessage(ref m) => write!(f, "DecryptionConsensusMessage.{}", m.message), - DecryptionMessage::RequestPartialDecryption(_) => write!(f, "RequestPartialDecryption"), - DecryptionMessage::PartialDecryption(_) => write!(f, "PartialDecryption"), - DecryptionMessage::DecryptionSessionError(_) => write!(f, "DecryptionSessionError"), - DecryptionMessage::DecryptionSessionCompleted(_) => write!(f, "DecryptionSessionCompleted"), - DecryptionMessage::DecryptionSessionDelegation(_) => write!(f, "DecryptionSessionDelegation"), - DecryptionMessage::DecryptionSessionDelegationCompleted(_) => write!(f, "DecryptionSessionDelegationCompleted"), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + DecryptionMessage::DecryptionConsensusMessage(ref m) => { + write!(f, "DecryptionConsensusMessage.{}", m.message) + } + DecryptionMessage::RequestPartialDecryption(_) => write!(f, "RequestPartialDecryption"), + DecryptionMessage::PartialDecryption(_) => write!(f, "PartialDecryption"), + DecryptionMessage::DecryptionSessionError(_) => write!(f, "DecryptionSessionError"), + DecryptionMessage::DecryptionSessionCompleted(_) => { + write!(f, "DecryptionSessionCompleted") + } + DecryptionMessage::DecryptionSessionDelegation(_) => { + write!(f, "DecryptionSessionDelegation") + } + DecryptionMessage::DecryptionSessionDelegationCompleted(_) => { + write!(f, "DecryptionSessionDelegationCompleted") + } + } + } } impl fmt::Display for SchnorrSigningMessage { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - SchnorrSigningMessage::SchnorrSigningConsensusMessage(ref m) => write!(f, "SchnorrSigningConsensusMessage.{}", m.message), - SchnorrSigningMessage::SchnorrSigningGenerationMessage(ref m) => write!(f, "SchnorrSigningGenerationMessage.{}", m.message), - SchnorrSigningMessage::SchnorrRequestPartialSignature(_) => write!(f, "SchnorrRequestPartialSignature"), - SchnorrSigningMessage::SchnorrPartialSignature(_) => write!(f, "SchnorrPartialSignature"), - SchnorrSigningMessage::SchnorrSigningSessionError(_) => write!(f, "SchnorrSigningSessionError"), - SchnorrSigningMessage::SchnorrSigningSessionCompleted(_) => write!(f, "SchnorrSigningSessionCompleted"), - SchnorrSigningMessage::SchnorrSigningSessionDelegation(_) => write!(f, "SchnorrSigningSessionDelegation"), - SchnorrSigningMessage::SchnorrSigningSessionDelegationCompleted(_) => write!(f, "SchnorrSigningSessionDelegationCompleted"), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + SchnorrSigningMessage::SchnorrSigningConsensusMessage(ref m) => { + write!(f, "SchnorrSigningConsensusMessage.{}", m.message) + } + SchnorrSigningMessage::SchnorrSigningGenerationMessage(ref m) => { + write!(f, "SchnorrSigningGenerationMessage.{}", m.message) + } + SchnorrSigningMessage::SchnorrRequestPartialSignature(_) => { + write!(f, "SchnorrRequestPartialSignature") + } + SchnorrSigningMessage::SchnorrPartialSignature(_) => { + write!(f, "SchnorrPartialSignature") + } + SchnorrSigningMessage::SchnorrSigningSessionError(_) => { + write!(f, "SchnorrSigningSessionError") + } + SchnorrSigningMessage::SchnorrSigningSessionCompleted(_) => { + write!(f, "SchnorrSigningSessionCompleted") + } + SchnorrSigningMessage::SchnorrSigningSessionDelegation(_) => { + write!(f, "SchnorrSigningSessionDelegation") + } + SchnorrSigningMessage::SchnorrSigningSessionDelegationCompleted(_) => { + write!(f, "SchnorrSigningSessionDelegationCompleted") + } + } + } } impl fmt::Display for EcdsaSigningMessage { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - EcdsaSigningMessage::EcdsaSigningConsensusMessage(ref m) => write!(f, "EcdsaSigningConsensusMessage.{}", m.message), - EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage(ref m) => write!(f, "EcdsaSignatureNonceGenerationMessage.{}", m.message), - EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage(ref m) => write!(f, "EcdsaInversionNonceGenerationMessage.{}", m.message), - EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage(ref m) => write!(f, "EcdsaInversionZeroGenerationMessage.{}", m.message), - EcdsaSigningMessage::EcdsaSigningInversedNonceCoeffShare(_) => write!(f, "EcdsaSigningInversedNonceCoeffShare"), - EcdsaSigningMessage::EcdsaRequestPartialSignature(_) => write!(f, "EcdsaRequestPartialSignature"), - EcdsaSigningMessage::EcdsaPartialSignature(_) => write!(f, "EcdsaPartialSignature"), - EcdsaSigningMessage::EcdsaSigningSessionError(_) => write!(f, "EcdsaSigningSessionError"), - EcdsaSigningMessage::EcdsaSigningSessionCompleted(_) => write!(f, "EcdsaSigningSessionCompleted"), - EcdsaSigningMessage::EcdsaSigningSessionDelegation(_) => write!(f, "EcdsaSigningSessionDelegation"), - EcdsaSigningMessage::EcdsaSigningSessionDelegationCompleted(_) => write!(f, "EcdsaSigningSessionDelegationCompleted"), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + EcdsaSigningMessage::EcdsaSigningConsensusMessage(ref m) => { + write!(f, "EcdsaSigningConsensusMessage.{}", m.message) + } + EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage(ref m) => { + write!(f, "EcdsaSignatureNonceGenerationMessage.{}", m.message) + } + EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage(ref m) => { + write!(f, "EcdsaInversionNonceGenerationMessage.{}", m.message) + } + EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage(ref m) => { + write!(f, "EcdsaInversionZeroGenerationMessage.{}", m.message) + } + EcdsaSigningMessage::EcdsaSigningInversedNonceCoeffShare(_) => { + write!(f, "EcdsaSigningInversedNonceCoeffShare") + } + EcdsaSigningMessage::EcdsaRequestPartialSignature(_) => { + write!(f, "EcdsaRequestPartialSignature") + } + EcdsaSigningMessage::EcdsaPartialSignature(_) => write!(f, "EcdsaPartialSignature"), + EcdsaSigningMessage::EcdsaSigningSessionError(_) => { + write!(f, "EcdsaSigningSessionError") + } + EcdsaSigningMessage::EcdsaSigningSessionCompleted(_) => { + write!(f, "EcdsaSigningSessionCompleted") + } + EcdsaSigningMessage::EcdsaSigningSessionDelegation(_) => { + write!(f, "EcdsaSigningSessionDelegation") + } + EcdsaSigningMessage::EcdsaSigningSessionDelegationCompleted(_) => { + write!(f, "EcdsaSigningSessionDelegationCompleted") + } + } + } } impl fmt::Display for ServersSetChangeMessage { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref m) => write!(f, "ServersSetChangeConsensusMessage.{}", m.message), - ServersSetChangeMessage::UnknownSessionsRequest(_) => write!(f, "UnknownSessionsRequest"), - ServersSetChangeMessage::UnknownSessions(_) => write!(f, "UnknownSessions"), - ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(ref m) => write!(f, "ShareChangeKeyVersionNegotiation.{}", m.message), - ServersSetChangeMessage::InitializeShareChangeSession(_) => write!(f, "InitializeShareChangeSession"), - ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(_) => write!(f, "ConfirmShareChangeSessionInitialization"), - ServersSetChangeMessage::ServersSetChangeDelegate(_) => write!(f, "ServersSetChangeDelegate"), - ServersSetChangeMessage::ServersSetChangeDelegateResponse(_) => write!(f, "ServersSetChangeDelegateResponse"), - ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref m) => write!(f, "ServersSetChangeShareAddMessage.{}", m.message), - ServersSetChangeMessage::ServersSetChangeError(_) => write!(f, "ServersSetChangeError"), - ServersSetChangeMessage::ServersSetChangeCompleted(_) => write!(f, "ServersSetChangeCompleted"), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref m) => { + write!(f, "ServersSetChangeConsensusMessage.{}", m.message) + } + ServersSetChangeMessage::UnknownSessionsRequest(_) => { + write!(f, "UnknownSessionsRequest") + } + ServersSetChangeMessage::UnknownSessions(_) => write!(f, "UnknownSessions"), + ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(ref m) => { + write!(f, "ShareChangeKeyVersionNegotiation.{}", m.message) + } + ServersSetChangeMessage::InitializeShareChangeSession(_) => { + write!(f, "InitializeShareChangeSession") + } + ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(_) => { + write!(f, "ConfirmShareChangeSessionInitialization") + } + ServersSetChangeMessage::ServersSetChangeDelegate(_) => { + write!(f, "ServersSetChangeDelegate") + } + ServersSetChangeMessage::ServersSetChangeDelegateResponse(_) => { + write!(f, "ServersSetChangeDelegateResponse") + } + ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref m) => { + write!(f, "ServersSetChangeShareAddMessage.{}", m.message) + } + ServersSetChangeMessage::ServersSetChangeError(_) => write!(f, "ServersSetChangeError"), + ServersSetChangeMessage::ServersSetChangeCompleted(_) => { + write!(f, "ServersSetChangeCompleted") + } + } + } } impl fmt::Display for ShareAddMessage { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - ShareAddMessage::ShareAddConsensusMessage(ref m) => write!(f, "ShareAddConsensusMessage.{}", m.message), - ShareAddMessage::KeyShareCommon(_) => write!(f, "KeyShareCommon"), - ShareAddMessage::NewKeysDissemination(_) => write!(f, "NewKeysDissemination"), - ShareAddMessage::ShareAddError(_) => write!(f, "ShareAddError"), - - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ShareAddMessage::ShareAddConsensusMessage(ref m) => { + write!(f, "ShareAddConsensusMessage.{}", m.message) + } + ShareAddMessage::KeyShareCommon(_) => write!(f, "KeyShareCommon"), + ShareAddMessage::NewKeysDissemination(_) => write!(f, "NewKeysDissemination"), + ShareAddMessage::ShareAddError(_) => write!(f, "ShareAddError"), + } + } } impl fmt::Display for KeyVersionNegotiationMessage { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - KeyVersionNegotiationMessage::RequestKeyVersions(_) => write!(f, "RequestKeyVersions"), - KeyVersionNegotiationMessage::KeyVersions(_) => write!(f, "KeyVersions"), - KeyVersionNegotiationMessage::KeyVersionsError(_) => write!(f, "KeyVersionsError"), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + KeyVersionNegotiationMessage::RequestKeyVersions(_) => write!(f, "RequestKeyVersions"), + KeyVersionNegotiationMessage::KeyVersions(_) => write!(f, "KeyVersions"), + KeyVersionNegotiationMessage::KeyVersionsError(_) => write!(f, "KeyVersionsError"), + } + } } diff --git a/secret-store/src/key_server_cluster/mod.rs b/secret-store/src/key_server_cluster/mod.rs index fc46e1031..e4b7bb95b 100644 --- a/secret-store/src/key_server_cluster/mod.rs +++ b/secret-store/src/key_server_cluster/mod.rs @@ -16,59 +16,66 @@ use super::types::ServerKeyId; -pub use super::traits::NodeKeyPair; -pub use super::types::{Error, NodeId, Requester, EncryptedDocumentKeyShadow}; -pub use super::acl_storage::AclStorage; -pub use super::key_storage::{KeyStorage, DocumentKeyShare, DocumentKeyShareVersion}; -pub use super::key_server_set::{is_migration_required, KeyServerSet, KeyServerSetSnapshot, KeyServerSetMigration}; -pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic, - SerializableRequester, SerializableMessageHash, SerializableAddress}; -pub use self::cluster::{new_network_cluster, ClusterCore, ClusterConfiguration, ClusterClient}; -pub use self::cluster_connections_net::NetConnectionsManagerConfig; -pub use self::cluster_sessions::{ClusterSession, ClusterSessionsListener}; #[cfg(test)] pub use self::cluster::tests::DummyClusterClient; +pub use self::{ + cluster::{new_network_cluster, ClusterClient, ClusterConfiguration, ClusterCore}, + cluster_connections_net::NetConnectionsManagerConfig, + cluster_sessions::{ClusterSession, ClusterSessionsListener}, +}; +pub use super::{ + acl_storage::AclStorage, + key_server_set::{ + is_migration_required, KeyServerSet, KeyServerSetMigration, KeyServerSetSnapshot, + }, + key_storage::{DocumentKeyShare, DocumentKeyShareVersion, KeyStorage}, + serialization::{ + SerializableAddress, SerializableH256, SerializableMessageHash, SerializablePublic, + SerializableRequester, SerializableSecret, SerializableSignature, + }, + traits::NodeKeyPair, + types::{EncryptedDocumentKeyShadow, Error, NodeId, Requester}, +}; -#[cfg(test)] -pub use super::node_key_pair::PlainNodeKeyPair; -#[cfg(test)] -pub use super::key_storage::tests::DummyKeyStorage; pub use super::acl_storage::DummyAclStorage; #[cfg(test)] pub use super::key_server_set::tests::MapKeyServerSet; +#[cfg(test)] +pub use super::key_storage::tests::DummyKeyStorage; +#[cfg(test)] +pub use super::node_key_pair::PlainNodeKeyPair; pub type SessionId = ServerKeyId; /// Session metadata. #[derive(Debug, Clone)] pub struct SessionMeta { - /// Key id. - pub id: SessionId, - /// Id of node, which has started this session. - pub master_node_id: NodeId, - /// Id of node, on which this session is running. - pub self_node_id: NodeId, - /// Session threshold. - pub threshold: usize, - /// Count of all configured key server nodes (valid at session start time). - pub configured_nodes_count: usize, - /// Count of all connected key server nodes (valid at session start time). - pub connected_nodes_count: usize, + /// Key id. + pub id: SessionId, + /// Id of node, which has started this session. + pub master_node_id: NodeId, + /// Id of node, on which this session is running. + pub self_node_id: NodeId, + /// Session threshold. + pub threshold: usize, + /// Count of all configured key server nodes (valid at session start time). + pub configured_nodes_count: usize, + /// Count of all connected key server nodes (valid at session start time). + pub connected_nodes_count: usize, } mod admin_sessions; mod client_sessions; -pub use self::admin_sessions::key_version_negotiation_session; -pub use self::admin_sessions::servers_set_change_session; -pub use self::admin_sessions::share_add_session; -pub use self::admin_sessions::share_change_session; +pub use self::admin_sessions::{ + key_version_negotiation_session, servers_set_change_session, share_add_session, + share_change_session, +}; -pub use self::client_sessions::decryption_session; -pub use self::client_sessions::encryption_session; -pub use self::client_sessions::generation_session; -pub use self::client_sessions::signing_session_ecdsa; -pub use self::client_sessions::signing_session_schnorr; +pub use self::client_sessions::{ + decryption_session, encryption_session, generation_session, signing_session_ecdsa, + signing_session_schnorr, +}; mod cluster; mod cluster_connections; diff --git a/secret-store/src/key_server_cluster/net/accept_connection.rs b/secret-store/src/key_server_cluster/net/accept_connection.rs index 3b66fe1d7..990b5e50a 100644 --- a/secret-store/src/key_server_cluster/net/accept_connection.rs +++ b/secret-store/src/key_server_cluster/net/accept_connection.rs @@ -14,52 +14,56 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::io; -use std::sync::Arc; -use std::net::SocketAddr; -use std::time::Duration; use futures::{Future, Poll}; +use key_server_cluster::{ + io::{accept_handshake, deadline, Deadline, Handshake}, + net::Connection, + Error, NodeKeyPair, +}; +use std::{io, net::SocketAddr, sync::Arc, time::Duration}; use tokio::net::TcpStream; -use key_server_cluster::{Error, NodeKeyPair}; -use key_server_cluster::io::{accept_handshake, Handshake, Deadline, deadline}; -use key_server_cluster::net::Connection; /// Create future for accepting incoming connection. -pub fn accept_connection(stream: TcpStream, self_key_pair: Arc) -> Deadline { - // TODO: This could fail so it would be better either to accept the - // address as a separate argument or return a result. - let address = stream.peer_addr().expect("Unable to determine tcp peer address"); +pub fn accept_connection( + stream: TcpStream, + self_key_pair: Arc, +) -> Deadline { + // TODO: This could fail so it would be better either to accept the + // address as a separate argument or return a result. + let address = stream + .peer_addr() + .expect("Unable to determine tcp peer address"); - let accept = AcceptConnection { - handshake: accept_handshake(stream, self_key_pair), - address: address, - }; + let accept = AcceptConnection { + handshake: accept_handshake(stream, self_key_pair), + address: address, + }; - deadline(Duration::new(5, 0), accept).expect("Failed to create timeout") + deadline(Duration::new(5, 0), accept).expect("Failed to create timeout") } /// Future for accepting incoming connection. pub struct AcceptConnection { - handshake: Handshake, - address: SocketAddr, + handshake: Handshake, + address: SocketAddr, } impl Future for AcceptConnection { - type Item = Result; - type Error = io::Error; + type Item = Result; + type Error = io::Error; - fn poll(&mut self) -> Poll { - let (stream, result) = try_ready!(self.handshake.poll()); - let result = match result { - Ok(result) => result, - Err(err) => return Ok(Err(err).into()), - }; - let connection = Connection { - stream: stream.into(), - address: self.address, - node_id: result.node_id, - key: result.shared_key, - }; - Ok(Ok(connection).into()) - } + fn poll(&mut self) -> Poll { + let (stream, result) = try_ready!(self.handshake.poll()); + let result = match result { + Ok(result) => result, + Err(err) => return Ok(Err(err).into()), + }; + let connection = Connection { + stream: stream.into(), + address: self.address, + node_id: result.node_id, + key: result.shared_key, + }; + Ok(Ok(connection).into()) + } } diff --git a/secret-store/src/key_server_cluster/net/connect.rs b/secret-store/src/key_server_cluster/net/connect.rs index 3c2cbc269..4f2e6d712 100644 --- a/secret-store/src/key_server_cluster/net/connect.rs +++ b/secret-store/src/key_server_cluster/net/connect.rs @@ -14,76 +14,82 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::collections::BTreeSet; -use std::io; -use std::time::Duration; -use std::net::SocketAddr; -use futures::{Future, Poll, Async}; -use tokio::net::{TcpStream, tcp::ConnectFuture}; -use key_server_cluster::{Error, NodeId, NodeKeyPair}; -use key_server_cluster::io::{handshake, Handshake, Deadline, deadline}; -use key_server_cluster::net::Connection; +use futures::{Async, Future, Poll}; +use key_server_cluster::{ + io::{deadline, handshake, Deadline, Handshake}, + net::Connection, + Error, NodeId, NodeKeyPair, +}; +use std::{collections::BTreeSet, io, net::SocketAddr, sync::Arc, time::Duration}; +use tokio::net::{tcp::ConnectFuture, TcpStream}; /// Create future for connecting to other node. -pub fn connect(address: &SocketAddr, self_key_pair: Arc, trusted_nodes: BTreeSet) -> Deadline { - let connect = Connect { - state: ConnectState::TcpConnect(TcpStream::connect(address)), - address: address.clone(), - self_key_pair: self_key_pair, - trusted_nodes: trusted_nodes, - }; +pub fn connect( + address: &SocketAddr, + self_key_pair: Arc, + trusted_nodes: BTreeSet, +) -> Deadline { + let connect = Connect { + state: ConnectState::TcpConnect(TcpStream::connect(address)), + address: address.clone(), + self_key_pair: self_key_pair, + trusted_nodes: trusted_nodes, + }; - deadline(Duration::new(5, 0), connect).expect("Failed to create timeout") + deadline(Duration::new(5, 0), connect).expect("Failed to create timeout") } enum ConnectState { - TcpConnect(ConnectFuture), - Handshake(Handshake), - Connected, + TcpConnect(ConnectFuture), + Handshake(Handshake), + Connected, } /// Future for connecting to other node. pub struct Connect { - state: ConnectState, - address: SocketAddr, - self_key_pair: Arc, - trusted_nodes: BTreeSet, + state: ConnectState, + address: SocketAddr, + self_key_pair: Arc, + trusted_nodes: BTreeSet, } impl Future for Connect { - type Item = Result; - type Error = io::Error; + type Item = Result; + type Error = io::Error; - fn poll(&mut self) -> Poll { - let (next, result) = match self.state { - ConnectState::TcpConnect(ref mut future) => { - let stream = try_ready!(future.poll()); - let handshake = handshake(stream, self.self_key_pair.clone(), self.trusted_nodes.clone()); - (ConnectState::Handshake(handshake), Async::NotReady) - }, - ConnectState::Handshake(ref mut future) => { - let (stream, result) = try_ready!(future.poll()); - let result = match result { - Ok(result) => result, - Err(err) => return Ok(Async::Ready(Err(err))), - }; - let connection = Connection { - stream: stream.into(), - address: self.address, - node_id: result.node_id, - key: result.shared_key, - }; - (ConnectState::Connected, Async::Ready(Ok(connection))) - }, - ConnectState::Connected => panic!("poll Connect after it's done"), - }; + fn poll(&mut self) -> Poll { + let (next, result) = match self.state { + ConnectState::TcpConnect(ref mut future) => { + let stream = try_ready!(future.poll()); + let handshake = handshake( + stream, + self.self_key_pair.clone(), + self.trusted_nodes.clone(), + ); + (ConnectState::Handshake(handshake), Async::NotReady) + } + ConnectState::Handshake(ref mut future) => { + let (stream, result) = try_ready!(future.poll()); + let result = match result { + Ok(result) => result, + Err(err) => return Ok(Async::Ready(Err(err))), + }; + let connection = Connection { + stream: stream.into(), + address: self.address, + node_id: result.node_id, + key: result.shared_key, + }; + (ConnectState::Connected, Async::Ready(Ok(connection))) + } + ConnectState::Connected => panic!("poll Connect after it's done"), + }; - self.state = next; - match result { - // by polling again, we register new future - Async::NotReady => self.poll(), - result => Ok(result) - } - } + self.state = next; + match result { + // by polling again, we register new future + Async::NotReady => self.poll(), + result => Ok(result), + } + } } diff --git a/secret-store/src/key_server_cluster/net/connection.rs b/secret-store/src/key_server_cluster/net/connection.rs index 8688db289..2dbae7461 100644 --- a/secret-store/src/key_server_cluster/net/connection.rs +++ b/secret-store/src/key_server_cluster/net/connection.rs @@ -14,19 +14,18 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::net; use ethkey::KeyPair; -use key_server_cluster::NodeId; -use key_server_cluster::io::SharedTcpStream; +use key_server_cluster::{io::SharedTcpStream, NodeId}; +use std::net; /// Established connection data pub struct Connection { - /// Peer address. - pub address: net::SocketAddr, - /// Connection stream. - pub stream: SharedTcpStream, - /// Peer node id. - pub node_id: NodeId, - /// Encryption key. - pub key: KeyPair, + /// Peer address. + pub address: net::SocketAddr, + /// Connection stream. + pub stream: SharedTcpStream, + /// Peer node id. + pub node_id: NodeId, + /// Encryption key. + pub key: KeyPair, } diff --git a/secret-store/src/key_server_cluster/net/mod.rs b/secret-store/src/key_server_cluster/net/mod.rs index a040596af..66327632f 100644 --- a/secret-store/src/key_server_cluster/net/mod.rs +++ b/secret-store/src/key_server_cluster/net/mod.rs @@ -18,6 +18,8 @@ mod accept_connection; mod connect; mod connection; -pub use self::accept_connection::{AcceptConnection, accept_connection}; -pub use self::connect::{Connect, connect}; -pub use self::connection::Connection; +pub use self::{ + accept_connection::{accept_connection, AcceptConnection}, + connect::{connect, Connect}, + connection::Connection, +}; diff --git a/secret-store/src/key_server_set.rs b/secret-store/src/key_server_set.rs index 5b25641ae..ed76293f9 100644 --- a/secret-store/src/key_server_set.rs +++ b/secret-store/src/key_server_set.rs @@ -14,19 +14,22 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use std::net::SocketAddr; -use std::collections::{BTreeMap, HashSet}; -use parking_lot::Mutex; +use bytes::Bytes; use call_contract::CallContract; use ethabi::FunctionOutputDecoder; -use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify, NewBlocks}; -use ethereum_types::{H256, Address}; +use ethcore::client::{BlockChainClient, BlockId, ChainNotify, Client, NewBlocks}; +use ethereum_types::{Address, H256}; use ethkey::public_to_address; -use bytes::Bytes; -use types::{Error, Public, NodeAddress, NodeId}; +use parking_lot::Mutex; +use std::{ + collections::{BTreeMap, HashSet}, + net::SocketAddr, + sync::Arc, +}; use trusted_client::TrustedClient; -use {NodeKeyPair, ContractAddress}; +use types::{Error, NodeAddress, NodeId, Public}; +use ContractAddress; +use NodeKeyPair; use_contract!(key_server, "res/key_server_set.json"); @@ -40,345 +43,406 @@ const TRANSACTION_RETRY_INTERVAL_BLOCKS: u64 = 30; #[derive(Default, Debug, Clone, PartialEq)] /// Key Server Set state. pub struct KeyServerSetSnapshot { - /// Current set of key servers. - pub current_set: BTreeMap, - /// New set of key servers. - pub new_set: BTreeMap, - /// Current migration data. - pub migration: Option, + /// Current set of key servers. + pub current_set: BTreeMap, + /// New set of key servers. + pub new_set: BTreeMap, + /// Current migration data. + pub migration: Option, } #[derive(Default, Debug, Clone, PartialEq)] /// Key server set migration. pub struct KeyServerSetMigration { - /// Migration id. - pub id: H256, - /// Migration set of key servers. It is the new_set at the moment of migration start. - pub set: BTreeMap, - /// Master node of the migration process. - pub master: NodeId, - /// Is migration confirmed by this node? - pub is_confirmed: bool, + /// Migration id. + pub id: H256, + /// Migration set of key servers. It is the new_set at the moment of migration start. + pub set: BTreeMap, + /// Master node of the migration process. + pub master: NodeId, + /// Is migration confirmed by this node? + pub is_confirmed: bool, } /// Key Server Set pub trait KeyServerSet: Send + Sync { - /// Is this node currently isolated from the set? - fn is_isolated(&self) -> bool; - /// Get server set state. - fn snapshot(&self) -> KeyServerSetSnapshot; - /// Start migration. - fn start_migration(&self, migration_id: H256); - /// Confirm migration. - fn confirm_migration(&self, migration_id: H256); + /// Is this node currently isolated from the set? + fn is_isolated(&self) -> bool; + /// Get server set state. + fn snapshot(&self) -> KeyServerSetSnapshot; + /// Start migration. + fn start_migration(&self, migration_id: H256); + /// Confirm migration. + fn confirm_migration(&self, migration_id: H256); } /// On-chain Key Server set implementation. pub struct OnChainKeyServerSet { - /// Cached on-chain contract. - contract: Mutex, + /// Cached on-chain contract. + contract: Mutex, } #[derive(Default, Debug, Clone, PartialEq)] /// Non-finalized new_set. struct FutureNewSet { - /// New servers set. - pub new_set: BTreeMap, - /// Hash of block, when this set has appeared for first time. - pub block: H256, + /// New servers set. + pub new_set: BTreeMap, + /// Hash of block, when this set has appeared for first time. + pub block: H256, } #[derive(Default, Debug, Clone, PartialEq)] /// Migration-related transaction information. struct PreviousMigrationTransaction { - /// Migration id. - pub migration_id: H256, - /// Latest actual block number at the time this transaction has been sent. - pub block: u64, + /// Migration id. + pub migration_id: H256, + /// Latest actual block number at the time this transaction has been sent. + pub block: u64, } /// Cached on-chain Key Server set contract. struct CachedContract { - /// Blockchain client. - client: TrustedClient, - /// Contract address source. - contract_address_source: Option, - /// Current contract address. - contract_address: Option
, - /// Is auto-migrate enabled? - auto_migrate_enabled: bool, - /// Current contract state. - snapshot: KeyServerSetSnapshot, - /// Scheduled contract state (if any). - future_new_set: Option, - /// Previous start migration transaction. - start_migration_tx: Option, - /// Previous confirm migration transaction. - confirm_migration_tx: Option, - /// This node key pair. - self_key_pair: Arc, + /// Blockchain client. + client: TrustedClient, + /// Contract address source. + contract_address_source: Option, + /// Current contract address. + contract_address: Option
, + /// Is auto-migrate enabled? + auto_migrate_enabled: bool, + /// Current contract state. + snapshot: KeyServerSetSnapshot, + /// Scheduled contract state (if any). + future_new_set: Option, + /// Previous start migration transaction. + start_migration_tx: Option, + /// Previous confirm migration transaction. + confirm_migration_tx: Option, + /// This node key pair. + self_key_pair: Arc, } impl OnChainKeyServerSet { - pub fn new(trusted_client: TrustedClient, contract_address_source: Option, self_key_pair: Arc, auto_migrate_enabled: bool, key_servers: BTreeMap) -> Result, Error> { - let client = trusted_client.get_untrusted(); - let key_server_set = Arc::new(OnChainKeyServerSet { - contract: Mutex::new(CachedContract::new(trusted_client, contract_address_source, self_key_pair, auto_migrate_enabled, key_servers)?), - }); - client - .ok_or_else(|| Error::Internal("Constructing OnChainKeyServerSet without active Client".into()))? - .add_notify(key_server_set.clone()); - Ok(key_server_set) - } + pub fn new( + trusted_client: TrustedClient, + contract_address_source: Option, + self_key_pair: Arc, + auto_migrate_enabled: bool, + key_servers: BTreeMap, + ) -> Result, Error> { + let client = trusted_client.get_untrusted(); + let key_server_set = Arc::new(OnChainKeyServerSet { + contract: Mutex::new(CachedContract::new( + trusted_client, + contract_address_source, + self_key_pair, + auto_migrate_enabled, + key_servers, + )?), + }); + client + .ok_or_else(|| { + Error::Internal("Constructing OnChainKeyServerSet without active Client".into()) + })? + .add_notify(key_server_set.clone()); + Ok(key_server_set) + } } impl KeyServerSet for OnChainKeyServerSet { - fn is_isolated(&self) -> bool { - self.contract.lock().is_isolated() - } + fn is_isolated(&self) -> bool { + self.contract.lock().is_isolated() + } - fn snapshot(&self) -> KeyServerSetSnapshot { - self.contract.lock().snapshot() - } + fn snapshot(&self) -> KeyServerSetSnapshot { + self.contract.lock().snapshot() + } - fn start_migration(&self, migration_id: H256) { - self.contract.lock().start_migration(migration_id) - } + fn start_migration(&self, migration_id: H256) { + self.contract.lock().start_migration(migration_id) + } - fn confirm_migration(&self, migration_id: H256) { - self.contract.lock().confirm_migration(migration_id); - } + fn confirm_migration(&self, migration_id: H256) { + self.contract.lock().confirm_migration(migration_id); + } } impl ChainNotify for OnChainKeyServerSet { - fn new_blocks(&self, new_blocks: NewBlocks) { - if new_blocks.has_more_blocks_to_import { return } - let (enacted, retracted) = new_blocks.route.into_enacted_retracted(); + fn new_blocks(&self, new_blocks: NewBlocks) { + if new_blocks.has_more_blocks_to_import { + return; + } + let (enacted, retracted) = new_blocks.route.into_enacted_retracted(); - if !enacted.is_empty() || !retracted.is_empty() { - self.contract.lock().update(enacted, retracted) - } - } + if !enacted.is_empty() || !retracted.is_empty() { + self.contract.lock().update(enacted, retracted) + } + } } trait KeyServerSubset) -> Result, String>> { - fn read_list(&self, f: &F) -> Result, String>; + fn read_list(&self, f: &F) -> Result, String>; - fn read_public(&self, address: Address, f: &F) -> Result; + fn read_public(&self, address: Address, f: &F) -> Result; - fn read_address(&self, address: Address, f: &F) -> Result; + fn read_address(&self, address: Address, f: &F) -> Result; } struct CurrentKeyServerSubset; -impl ) -> Result, String>> KeyServerSubset for CurrentKeyServerSubset { - fn read_list(&self, f: &F) -> Result, String> { - let (encoded, decoder) = key_server::functions::get_current_key_servers::call(); - decoder.decode(&f(encoded)?).map_err(|e| e.to_string()) - } +impl) -> Result, String>> KeyServerSubset for CurrentKeyServerSubset { + fn read_list(&self, f: &F) -> Result, String> { + let (encoded, decoder) = key_server::functions::get_current_key_servers::call(); + decoder.decode(&f(encoded)?).map_err(|e| e.to_string()) + } - fn read_public(&self, address: Address, f: &F) -> Result { - let (encoded, decoder) = key_server::functions::get_current_key_server_public::call(address); - decoder.decode(&f(encoded)?).map_err(|e| e.to_string()) - } + fn read_public(&self, address: Address, f: &F) -> Result { + let (encoded, decoder) = + key_server::functions::get_current_key_server_public::call(address); + decoder.decode(&f(encoded)?).map_err(|e| e.to_string()) + } - fn read_address(&self, address: Address, f: &F) -> Result { - let (encoded, decoder) = key_server::functions::get_current_key_server_address::call(address); - decoder.decode(&f(encoded)?).map_err(|e| e.to_string()) - } + fn read_address(&self, address: Address, f: &F) -> Result { + let (encoded, decoder) = + key_server::functions::get_current_key_server_address::call(address); + decoder.decode(&f(encoded)?).map_err(|e| e.to_string()) + } } struct MigrationKeyServerSubset; -impl ) -> Result, String>> KeyServerSubset for MigrationKeyServerSubset { - fn read_list(&self, f: &F) -> Result, String> { - let (encoded, decoder) = key_server::functions::get_migration_key_servers::call(); - decoder.decode(&f(encoded)?).map_err(|e| e.to_string()) - } +impl) -> Result, String>> KeyServerSubset for MigrationKeyServerSubset { + fn read_list(&self, f: &F) -> Result, String> { + let (encoded, decoder) = key_server::functions::get_migration_key_servers::call(); + decoder.decode(&f(encoded)?).map_err(|e| e.to_string()) + } - fn read_public(&self, address: Address, f: &F) -> Result { - let (encoded, decoder) = key_server::functions::get_migration_key_server_public::call(address); - decoder.decode(&f(encoded)?).map_err(|e| e.to_string()) - } + fn read_public(&self, address: Address, f: &F) -> Result { + let (encoded, decoder) = + key_server::functions::get_migration_key_server_public::call(address); + decoder.decode(&f(encoded)?).map_err(|e| e.to_string()) + } - fn read_address(&self, address: Address, f: &F) -> Result { - let (encoded, decoder) = key_server::functions::get_migration_key_server_address::call(address); - decoder.decode(&f(encoded)?).map_err(|e| e.to_string()) - } + fn read_address(&self, address: Address, f: &F) -> Result { + let (encoded, decoder) = + key_server::functions::get_migration_key_server_address::call(address); + decoder.decode(&f(encoded)?).map_err(|e| e.to_string()) + } } struct NewKeyServerSubset; -impl ) -> Result, String>> KeyServerSubset for NewKeyServerSubset { - fn read_list(&self, f: &F) -> Result, String> { - let (encoded, decoder) = key_server::functions::get_new_key_servers::call(); - decoder.decode(&f(encoded)?).map_err(|e| e.to_string()) - } +impl) -> Result, String>> KeyServerSubset for NewKeyServerSubset { + fn read_list(&self, f: &F) -> Result, String> { + let (encoded, decoder) = key_server::functions::get_new_key_servers::call(); + decoder.decode(&f(encoded)?).map_err(|e| e.to_string()) + } - fn read_public(&self, address: Address, f: &F) -> Result { - let (encoded, decoder) = key_server::functions::get_new_key_server_public::call(address); - decoder.decode(&f(encoded)?).map_err(|e| e.to_string()) - } + fn read_public(&self, address: Address, f: &F) -> Result { + let (encoded, decoder) = key_server::functions::get_new_key_server_public::call(address); + decoder.decode(&f(encoded)?).map_err(|e| e.to_string()) + } - fn read_address(&self, address: Address, f: &F) -> Result { - let (encoded, decoder) = key_server::functions::get_new_key_server_address::call(address); - decoder.decode(&f(encoded)?).map_err(|e| e.to_string()) - } + fn read_address(&self, address: Address, f: &F) -> Result { + let (encoded, decoder) = key_server::functions::get_new_key_server_address::call(address); + decoder.decode(&f(encoded)?).map_err(|e| e.to_string()) + } } impl CachedContract { - pub fn new(client: TrustedClient, contract_address_source: Option, self_key_pair: Arc, auto_migrate_enabled: bool, key_servers: BTreeMap) -> Result { - let server_set = match contract_address_source.is_none() { - true => key_servers.into_iter() - .map(|(p, addr)| { - let addr = format!("{}:{}", addr.address, addr.port).parse() - .map_err(|err| Error::Internal(format!("error parsing node address: {}", err)))?; - Ok((p, addr)) - }) - .collect::, Error>>()?, - false => Default::default(), - }; + pub fn new( + client: TrustedClient, + contract_address_source: Option, + self_key_pair: Arc, + auto_migrate_enabled: bool, + key_servers: BTreeMap, + ) -> Result { + let server_set = match contract_address_source.is_none() { + true => key_servers + .into_iter() + .map(|(p, addr)| { + let addr = + format!("{}:{}", addr.address, addr.port) + .parse() + .map_err(|err| { + Error::Internal(format!("error parsing node address: {}", err)) + })?; + Ok((p, addr)) + }) + .collect::, Error>>()?, + false => Default::default(), + }; - let mut contract = CachedContract { - client: client, - contract_address_source: contract_address_source, - contract_address: None, - auto_migrate_enabled: auto_migrate_enabled, - future_new_set: None, - confirm_migration_tx: None, - start_migration_tx: None, - snapshot: KeyServerSetSnapshot { - current_set: server_set.clone(), - new_set: server_set, - ..Default::default() - }, - self_key_pair: self_key_pair, - }; - contract.update_contract_address(); + let mut contract = CachedContract { + client: client, + contract_address_source: contract_address_source, + contract_address: None, + auto_migrate_enabled: auto_migrate_enabled, + future_new_set: None, + confirm_migration_tx: None, + start_migration_tx: None, + snapshot: KeyServerSetSnapshot { + current_set: server_set.clone(), + new_set: server_set, + ..Default::default() + }, + self_key_pair: self_key_pair, + }; + contract.update_contract_address(); - Ok(contract) - } + Ok(contract) + } - pub fn update_contract_address(&mut self) { - if let Some(ref contract_address_source) = self.contract_address_source { - let contract_address = self.client.read_contract_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.into(), contract_address_source); - if contract_address != self.contract_address { - trace!(target: "secretstore", "{}: Configuring for key server set contract from address {:?}", + pub fn update_contract_address(&mut self) { + if let Some(ref contract_address_source) = self.contract_address_source { + let contract_address = self.client.read_contract_address( + KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.into(), + contract_address_source, + ); + if contract_address != self.contract_address { + trace!(target: "secretstore", "{}: Configuring for key server set contract from address {:?}", self.self_key_pair.public(), contract_address); - self.contract_address = contract_address; - } - } - } + self.contract_address = contract_address; + } + } + } - pub fn update(&mut self, enacted: Vec, retracted: Vec) { - // no need to update when servers set is hardcoded - if self.contract_address_source.is_none() { - return; - } + pub fn update(&mut self, enacted: Vec, retracted: Vec) { + // no need to update when servers set is hardcoded + if self.contract_address_source.is_none() { + return; + } - if let Some(client) = self.client.get() { - // read new snapshot from reqistry (if something has chnaged) - if !enacted.is_empty() || !retracted.is_empty() { - self.update_contract_address(); - self.read_from_registry(&*client); - } + if let Some(client) = self.client.get() { + // read new snapshot from reqistry (if something has chnaged) + if !enacted.is_empty() || !retracted.is_empty() { + self.update_contract_address(); + self.read_from_registry(&*client); + } - // update number of confirmations (if there's future new set) - self.update_number_of_confirmations_if_required(&*client); - } - } + // update number of confirmations (if there's future new set) + self.update_number_of_confirmations_if_required(&*client); + } + } - fn is_isolated(&self) -> bool { - !self.snapshot.current_set.contains_key(self.self_key_pair.public()) - } + fn is_isolated(&self) -> bool { + !self + .snapshot + .current_set + .contains_key(self.self_key_pair.public()) + } - fn snapshot(&self) -> KeyServerSetSnapshot { - self.snapshot.clone() - } + fn snapshot(&self) -> KeyServerSetSnapshot { + self.snapshot.clone() + } - fn start_migration(&mut self, migration_id: H256) { - // trust is not needed here, because it is the reaction to the read of the trusted client - if let (Some(client), Some(contract_address)) = (self.client.get_untrusted(), self.contract_address.as_ref()) { - // check if we need to send start migration transaction - if !update_last_transaction_block(&*client, &migration_id, &mut self.start_migration_tx) { - return; - } + fn start_migration(&mut self, migration_id: H256) { + // trust is not needed here, because it is the reaction to the read of the trusted client + if let (Some(client), Some(contract_address)) = + (self.client.get_untrusted(), self.contract_address.as_ref()) + { + // check if we need to send start migration transaction + if !update_last_transaction_block(&*client, &migration_id, &mut self.start_migration_tx) + { + return; + } - // prepare transaction data - let transaction_data = key_server::functions::start_migration::encode_input(migration_id); + // prepare transaction data + let transaction_data = + key_server::functions::start_migration::encode_input(migration_id); - // send transaction - match self.client.transact_contract(*contract_address, transaction_data) { - Ok(_) => trace!(target: "secretstore_net", "{}: sent auto-migration start transaction", - self.self_key_pair.public()), - Err(error) => warn!(target: "secretstore_net", "{}: failed to submit auto-migration start transaction: {}", - self.self_key_pair.public(), error), - } - } - } + // send transaction + match self + .client + .transact_contract(*contract_address, transaction_data) + { + Ok(_) => { + trace!(target: "secretstore_net", "{}: sent auto-migration start transaction", + self.self_key_pair.public()) + } + Err(error) => { + warn!(target: "secretstore_net", "{}: failed to submit auto-migration start transaction: {}", + self.self_key_pair.public(), error) + } + } + } + } - fn confirm_migration(&mut self, migration_id: H256) { - // trust is not needed here, because we have already completed the action - if let (Some(client), Some(contract_address)) = (self.client.get(), self.contract_address) { - // check if we need to send start migration transaction - if !update_last_transaction_block(&*client, &migration_id, &mut self.confirm_migration_tx) { - return; - } + fn confirm_migration(&mut self, migration_id: H256) { + // trust is not needed here, because we have already completed the action + if let (Some(client), Some(contract_address)) = (self.client.get(), self.contract_address) { + // check if we need to send start migration transaction + if !update_last_transaction_block( + &*client, + &migration_id, + &mut self.confirm_migration_tx, + ) { + return; + } - // prepare transaction data - let transaction_data = key_server::functions::confirm_migration::encode_input(migration_id); + // prepare transaction data + let transaction_data = + key_server::functions::confirm_migration::encode_input(migration_id); - // send transaction - match self.client.transact_contract(contract_address, transaction_data) { - Ok(_) => trace!(target: "secretstore_net", "{}: sent auto-migration confirm transaction", - self.self_key_pair.public()), - Err(error) => warn!(target: "secretstore_net", "{}: failed to submit auto-migration confirmation transaction: {}", - self.self_key_pair.public(), error), - } - } - } + // send transaction + match self + .client + .transact_contract(contract_address, transaction_data) + { + Ok(_) => { + trace!(target: "secretstore_net", "{}: sent auto-migration confirm transaction", + self.self_key_pair.public()) + } + Err(error) => { + warn!(target: "secretstore_net", "{}: failed to submit auto-migration confirmation transaction: {}", + self.self_key_pair.public(), error) + } + } + } + } - fn read_from_registry(&mut self, client: &Client) { - let contract_address = match self.contract_address { - Some(contract_address) => contract_address, - None => { - // no contract installed => empty snapshot - // WARNING: after restart current_set will be reset to the set from configuration file - // even though we have reset to empty set here. We are not considering this as an issue - // because it is actually the issue of administrator. - self.snapshot = Default::default(); - self.future_new_set = None; - return; - }, - }; + fn read_from_registry(&mut self, client: &Client) { + let contract_address = match self.contract_address { + Some(contract_address) => contract_address, + None => { + // no contract installed => empty snapshot + // WARNING: after restart current_set will be reset to the set from configuration file + // even though we have reset to empty set here. We are not considering this as an issue + // because it is actually the issue of administrator. + self.snapshot = Default::default(); + self.future_new_set = None; + return; + } + }; - let do_call = |data| client.call_contract(BlockId::Latest, contract_address, data); + let do_call = |data| client.call_contract(BlockId::Latest, contract_address, data); - let current_set = Self::read_key_server_set(CurrentKeyServerSubset, &do_call); + let current_set = Self::read_key_server_set(CurrentKeyServerSubset, &do_call); - // read migration-related data if auto migration is enabled - let (new_set, migration) = match self.auto_migrate_enabled { - true => { - let new_set = Self::read_key_server_set(NewKeyServerSubset, &do_call); - let migration_set = Self::read_key_server_set(MigrationKeyServerSubset, &do_call); + // read migration-related data if auto migration is enabled + let (new_set, migration) = match self.auto_migrate_enabled { + true => { + let new_set = Self::read_key_server_set(NewKeyServerSubset, &do_call); + let migration_set = Self::read_key_server_set(MigrationKeyServerSubset, &do_call); - let migration_id = match migration_set.is_empty() { - false => { - let (encoded, decoder) = key_server::functions::get_migration_id::call(); - do_call(encoded) + let migration_id = match migration_set.is_empty() { + false => { + let (encoded, decoder) = key_server::functions::get_migration_id::call(); + do_call(encoded) .map_err(|e| e.to_string()) .and_then(|data| decoder.decode(&data).map_err(|e| e.to_string())) .map_err(|err| { trace!(target: "secretstore", "Error {} reading migration id from contract", err); err }) .ok() - }, - true => None, - }; + } + true => None, + }; - let migration_master = match migration_set.is_empty() { - false => { - let (encoded, decoder) = key_server::functions::get_migration_master::call(); - do_call(encoded) + let migration_master = match migration_set.is_empty() { + false => { + let (encoded, decoder) = + key_server::functions::get_migration_master::call(); + do_call(encoded) .map_err(|e| e.to_string()) .and_then(|data| decoder.decode(&data).map_err(|e| e.to_string())) .map_err(|err| { trace!(target: "secretstore", "Error {} reading migration master from contract", err); err }) @@ -386,450 +450,532 @@ impl CachedContract { .and_then(|address| current_set.keys().chain(migration_set.keys()) .find(|public| public_to_address(public) == address) .cloned()) - }, - true => None, - }; + } + true => None, + }; - let is_migration_confirmed = match migration_set.is_empty() { - false if current_set.contains_key(self.self_key_pair.public()) || migration_set.contains_key(self.self_key_pair.public()) => { - let (encoded, decoder) = key_server::functions::is_migration_confirmed::call(self.self_key_pair.address()); - do_call(encoded) + let is_migration_confirmed = match migration_set.is_empty() { + false + if current_set.contains_key(self.self_key_pair.public()) + || migration_set.contains_key(self.self_key_pair.public()) => + { + let (encoded, decoder) = + key_server::functions::is_migration_confirmed::call( + self.self_key_pair.address(), + ); + do_call(encoded) .map_err(|e| e.to_string()) .and_then(|data| decoder.decode(&data).map_err(|e| e.to_string())) .map_err(|err| { trace!(target: "secretstore", "Error {} reading migration confirmation from contract", err); err }) .ok() - }, - _ => None, - }; + } + _ => None, + }; - let migration = match (migration_set.is_empty(), migration_id, migration_master, is_migration_confirmed) { - (false, Some(migration_id), Some(migration_master), Some(is_migration_confirmed)) => - Some(KeyServerSetMigration { - id: migration_id, - master: migration_master, - set: migration_set, - is_confirmed: is_migration_confirmed, - }), - _ => None, - }; + let migration = match ( + migration_set.is_empty(), + migration_id, + migration_master, + is_migration_confirmed, + ) { + ( + false, + Some(migration_id), + Some(migration_master), + Some(is_migration_confirmed), + ) => Some(KeyServerSetMigration { + id: migration_id, + master: migration_master, + set: migration_set, + is_confirmed: is_migration_confirmed, + }), + _ => None, + }; - (new_set, migration) - } - false => (current_set.clone(), None), - }; + (new_set, migration) + } + false => (current_set.clone(), None), + }; - let mut new_snapshot = KeyServerSetSnapshot { - current_set: current_set, - new_set: new_set, - migration: migration, - }; + let mut new_snapshot = KeyServerSetSnapshot { + current_set: current_set, + new_set: new_set, + migration: migration, + }; - // we might want to adjust new_set if auto migration is enabled - if self.auto_migrate_enabled { - let block = client.block_hash(BlockId::Latest).unwrap_or_default(); - update_future_set(&mut self.future_new_set, &mut new_snapshot, block); - } + // we might want to adjust new_set if auto migration is enabled + if self.auto_migrate_enabled { + let block = client.block_hash(BlockId::Latest).unwrap_or_default(); + update_future_set(&mut self.future_new_set, &mut new_snapshot, block); + } - self.snapshot = new_snapshot; - } + self.snapshot = new_snapshot; + } - fn read_key_server_set(subset: T, do_call: F) -> BTreeMap - where - T: KeyServerSubset, - F: Fn(Vec) -> Result, String> { - let mut key_servers = BTreeMap::new(); - let mut key_servers_addresses = HashSet::new(); - let key_servers_list = subset.read_list(&do_call) + fn read_key_server_set(subset: T, do_call: F) -> BTreeMap + where + T: KeyServerSubset, + F: Fn(Vec) -> Result, String>, + { + let mut key_servers = BTreeMap::new(); + let mut key_servers_addresses = HashSet::new(); + let key_servers_list = subset.read_list(&do_call) .map_err(|err| { warn!(target: "secretstore_net", "error {} reading list of key servers from contract", err); err }) .unwrap_or_default(); - for key_server in key_servers_list { - let key_server_public = subset.read_public(key_server, &do_call) - .and_then(|p| if p.len() == 64 { Ok(Public::from_slice(&p)) } else { Err(format!("Invalid public length {}", p.len())) }); - let key_server_address: Result = subset.read_address(key_server, &do_call) - .and_then(|a| a.parse().map_err(|e| format!("Invalid ip address: {}", e))); + for key_server in key_servers_list { + let key_server_public = subset.read_public(key_server, &do_call).and_then(|p| { + if p.len() == 64 { + Ok(Public::from_slice(&p)) + } else { + Err(format!("Invalid public length {}", p.len())) + } + }); + let key_server_address: Result = subset + .read_address(key_server, &do_call) + .and_then(|a| a.parse().map_err(|e| format!("Invalid ip address: {}", e))); - // only add successfully parsed nodes - match (key_server_public, key_server_address) { - (Ok(key_server_public), Ok(key_server_address)) => { - if !key_servers_addresses.insert(key_server_address.clone()) { - warn!(target: "secretstore_net", "the same address ({}) specified twice in list of contracts. Ignoring server {}", + // only add successfully parsed nodes + match (key_server_public, key_server_address) { + (Ok(key_server_public), Ok(key_server_address)) => { + if !key_servers_addresses.insert(key_server_address.clone()) { + warn!(target: "secretstore_net", "the same address ({}) specified twice in list of contracts. Ignoring server {}", key_server_address, key_server_public); - continue; - } + continue; + } - key_servers.insert(key_server_public, key_server_address); - }, - (Err(public_err), _) => warn!(target: "secretstore_net", "received invalid public from key server set contract: {}", public_err), - (_, Err(ip_err)) => warn!(target: "secretstore_net", "received invalid IP from key server set contract: {}", ip_err), - } - } - key_servers - } + key_servers.insert(key_server_public, key_server_address); + } + (Err(public_err), _) => { + warn!(target: "secretstore_net", "received invalid public from key server set contract: {}", public_err) + } + (_, Err(ip_err)) => { + warn!(target: "secretstore_net", "received invalid IP from key server set contract: {}", ip_err) + } + } + } + key_servers + } - fn update_number_of_confirmations_if_required(&mut self, client: &BlockChainClient) { - if !self.auto_migrate_enabled { - return; - } + fn update_number_of_confirmations_if_required(&mut self, client: &BlockChainClient) { + if !self.auto_migrate_enabled { + return; + } - update_number_of_confirmations( - &|| latest_block_hash(&*client), - &|block| block_confirmations(&*client, block), - &mut self.future_new_set, &mut self.snapshot); - } + update_number_of_confirmations( + &|| latest_block_hash(&*client), + &|block| block_confirmations(&*client, block), + &mut self.future_new_set, + &mut self.snapshot, + ); + } } /// Check if two sets are equal (in terms of migration requirements). We do not need migration if only /// addresses are changed - simply adjusting connections is enough in this case. -pub fn is_migration_required(current_set: &BTreeMap, new_set: &BTreeMap) -> bool { - let no_nodes_removed = current_set.keys().all(|n| new_set.contains_key(n)); - let no_nodes_added = new_set.keys().all(|n| current_set.contains_key(n)); - !no_nodes_removed || !no_nodes_added +pub fn is_migration_required( + current_set: &BTreeMap, + new_set: &BTreeMap, +) -> bool { + let no_nodes_removed = current_set.keys().all(|n| new_set.contains_key(n)); + let no_nodes_added = new_set.keys().all(|n| current_set.contains_key(n)); + !no_nodes_removed || !no_nodes_added } -fn update_future_set(future_new_set: &mut Option, new_snapshot: &mut KeyServerSetSnapshot, block: H256) { - // migration has already started => no need to delay visibility - if new_snapshot.migration.is_some() { - *future_new_set = None; - return; - } +fn update_future_set( + future_new_set: &mut Option, + new_snapshot: &mut KeyServerSetSnapshot, + block: H256, +) { + // migration has already started => no need to delay visibility + if new_snapshot.migration.is_some() { + *future_new_set = None; + return; + } - // no migration is required => no need to delay visibility - if !is_migration_required(&new_snapshot.current_set, &new_snapshot.new_set) { - *future_new_set = None; - return; - } + // no migration is required => no need to delay visibility + if !is_migration_required(&new_snapshot.current_set, &new_snapshot.new_set) { + *future_new_set = None; + return; + } - // when auto-migrate is enabled, we do not want to start migration right after new_set is changed, because of: - // 1) there could be a fork && we could start migration to forked version (and potentially lose secrets) - // 2) there must be some period for new_set changes finalization (i.e. adding/removing more servers) - let mut new_set = new_snapshot.current_set.clone(); - ::std::mem::swap(&mut new_set, &mut new_snapshot.new_set); + // when auto-migrate is enabled, we do not want to start migration right after new_set is changed, because of: + // 1) there could be a fork && we could start migration to forked version (and potentially lose secrets) + // 2) there must be some period for new_set changes finalization (i.e. adding/removing more servers) + let mut new_set = new_snapshot.current_set.clone(); + ::std::mem::swap(&mut new_set, &mut new_snapshot.new_set); - // if nothing has changed in future_new_set, then we want to preserve previous block hash - let block = match Some(&new_set) == future_new_set.as_ref().map(|f| &f.new_set) { - true => future_new_set.as_ref().map(|f| &f.block).cloned().unwrap_or_else(|| block), - false => block, - }; + // if nothing has changed in future_new_set, then we want to preserve previous block hash + let block = match Some(&new_set) == future_new_set.as_ref().map(|f| &f.new_set) { + true => future_new_set + .as_ref() + .map(|f| &f.block) + .cloned() + .unwrap_or_else(|| block), + false => block, + }; - *future_new_set = Some(FutureNewSet { - new_set: new_set, - block: block, - }); + *future_new_set = Some(FutureNewSet { + new_set: new_set, + block: block, + }); } -fn update_number_of_confirmations H256, F2: Fn(H256) -> Option>(latest_block: &F1, confirmations: &F2, future_new_set: &mut Option, snapshot: &mut KeyServerSetSnapshot) { - match future_new_set.as_mut() { - // no future new set is scheduled => do nothing, - None => return, - // else we should calculate number of confirmations for future new set - Some(future_new_set) => match confirmations(future_new_set.block.clone()) { - // we have enough confirmations => should move new_set from future to snapshot - Some(confirmations) if confirmations >= MIGRATION_CONFIRMATIONS_REQUIRED => (), - // not enough confirmations => do nothing - Some(_) => return, - // if number of confirmations is None, then reorg has happened && we need to reset block - // (some more intelligent strategy is possible, but let's stick to simplest one) - None => { - future_new_set.block = latest_block(); - return; - } - } - } +fn update_number_of_confirmations H256, F2: Fn(H256) -> Option>( + latest_block: &F1, + confirmations: &F2, + future_new_set: &mut Option, + snapshot: &mut KeyServerSetSnapshot, +) { + match future_new_set.as_mut() { + // no future new set is scheduled => do nothing, + None => return, + // else we should calculate number of confirmations for future new set + Some(future_new_set) => match confirmations(future_new_set.block.clone()) { + // we have enough confirmations => should move new_set from future to snapshot + Some(confirmations) if confirmations >= MIGRATION_CONFIRMATIONS_REQUIRED => (), + // not enough confirmations => do nothing + Some(_) => return, + // if number of confirmations is None, then reorg has happened && we need to reset block + // (some more intelligent strategy is possible, but let's stick to simplest one) + None => { + future_new_set.block = latest_block(); + return; + } + }, + } - let future_new_set = future_new_set.take() - .expect("we only pass through match above when future_new_set is some; qed"); - snapshot.new_set = future_new_set.new_set; + let future_new_set = future_new_set + .take() + .expect("we only pass through match above when future_new_set is some; qed"); + snapshot.new_set = future_new_set.new_set; } -fn update_last_transaction_block(client: &Client, migration_id: &H256, previous_transaction: &mut Option) -> bool { - let last_block = client.block_number(BlockId::Latest).unwrap_or_default(); - match previous_transaction.as_ref() { - // no previous transaction => send immediately - None => (), - // previous transaction has been sent for other migration process => send immediately - Some(tx) if tx.migration_id != *migration_id => (), - // if we have sent the same type of transaction recently => do nothing (hope it will be mined eventually) - // if we have sent the same transaction some time ago => - // assume that our tx queue was full - // or we didn't have enough eth fot this tx - // or the transaction has been removed from the queue (and never reached any miner node) - // if we have restarted after sending tx => assume we have never sent it - Some(tx) => { - if tx.block > last_block || last_block - tx.block < TRANSACTION_RETRY_INTERVAL_BLOCKS { - return false; - } - }, - } +fn update_last_transaction_block( + client: &Client, + migration_id: &H256, + previous_transaction: &mut Option, +) -> bool { + let last_block = client.block_number(BlockId::Latest).unwrap_or_default(); + match previous_transaction.as_ref() { + // no previous transaction => send immediately + None => (), + // previous transaction has been sent for other migration process => send immediately + Some(tx) if tx.migration_id != *migration_id => (), + // if we have sent the same type of transaction recently => do nothing (hope it will be mined eventually) + // if we have sent the same transaction some time ago => + // assume that our tx queue was full + // or we didn't have enough eth fot this tx + // or the transaction has been removed from the queue (and never reached any miner node) + // if we have restarted after sending tx => assume we have never sent it + Some(tx) => { + if tx.block > last_block || last_block - tx.block < TRANSACTION_RETRY_INTERVAL_BLOCKS { + return false; + } + } + } - *previous_transaction = Some(PreviousMigrationTransaction { - migration_id: migration_id.clone(), - block: last_block, - }); + *previous_transaction = Some(PreviousMigrationTransaction { + migration_id: migration_id.clone(), + block: last_block, + }); - true + true } fn latest_block_hash(client: &BlockChainClient) -> H256 { - client.block_hash(BlockId::Latest).unwrap_or_default() + client.block_hash(BlockId::Latest).unwrap_or_default() } fn block_confirmations(client: &BlockChainClient, block: H256) -> Option { - client.block_number(BlockId::Hash(block)) - .and_then(|block| client.block_number(BlockId::Latest).map(|last_block| (block, last_block))) - .map(|(block, last_block)| last_block - block) + client + .block_number(BlockId::Hash(block)) + .and_then(|block| { + client + .block_number(BlockId::Latest) + .map(|last_block| (block, last_block)) + }) + .map(|(block, last_block)| last_block - block) } #[cfg(test)] pub mod tests { - use std::collections::BTreeMap; - use std::net::SocketAddr; - use ethereum_types::H256; - use ethkey::Public; - use super::{update_future_set, update_number_of_confirmations, FutureNewSet, - KeyServerSet, KeyServerSetSnapshot, MIGRATION_CONFIRMATIONS_REQUIRED}; + use super::{ + update_future_set, update_number_of_confirmations, FutureNewSet, KeyServerSet, + KeyServerSetSnapshot, MIGRATION_CONFIRMATIONS_REQUIRED, + }; + use ethereum_types::H256; + use ethkey::Public; + use std::{collections::BTreeMap, net::SocketAddr}; - #[derive(Default)] - pub struct MapKeyServerSet { - is_isolated: bool, - nodes: BTreeMap, - } + #[derive(Default)] + pub struct MapKeyServerSet { + is_isolated: bool, + nodes: BTreeMap, + } - impl MapKeyServerSet { - pub fn new(is_isolated: bool, nodes: BTreeMap) -> Self { - MapKeyServerSet { - is_isolated: is_isolated, - nodes: nodes, - } - } - } + impl MapKeyServerSet { + pub fn new(is_isolated: bool, nodes: BTreeMap) -> Self { + MapKeyServerSet { + is_isolated: is_isolated, + nodes: nodes, + } + } + } - impl KeyServerSet for MapKeyServerSet { - fn is_isolated(&self) -> bool { - self.is_isolated - } + impl KeyServerSet for MapKeyServerSet { + fn is_isolated(&self) -> bool { + self.is_isolated + } - fn snapshot(&self) -> KeyServerSetSnapshot { - KeyServerSetSnapshot { - current_set: self.nodes.clone(), - new_set: self.nodes.clone(), - ..Default::default() - } - } + fn snapshot(&self) -> KeyServerSetSnapshot { + KeyServerSetSnapshot { + current_set: self.nodes.clone(), + new_set: self.nodes.clone(), + ..Default::default() + } + } - fn start_migration(&self, _migration_id: H256) { - unimplemented!("test-only") - } + fn start_migration(&self, _migration_id: H256) { + unimplemented!("test-only") + } - fn confirm_migration(&self, _migration_id: H256) { - unimplemented!("test-only") - } - } + fn confirm_migration(&self, _migration_id: H256) { + unimplemented!("test-only") + } + } - #[test] - fn future_set_is_updated_to_none_when_migration_has_already_started() { - let mut future_new_set = Some(Default::default()); - let mut new_snapshot = KeyServerSetSnapshot { - migration: Some(Default::default()), - ..Default::default() - }; - let new_snapshot_copy = new_snapshot.clone(); - update_future_set(&mut future_new_set, &mut new_snapshot, Default::default()); - assert_eq!(future_new_set, None); - assert_eq!(new_snapshot, new_snapshot_copy); - } + #[test] + fn future_set_is_updated_to_none_when_migration_has_already_started() { + let mut future_new_set = Some(Default::default()); + let mut new_snapshot = KeyServerSetSnapshot { + migration: Some(Default::default()), + ..Default::default() + }; + let new_snapshot_copy = new_snapshot.clone(); + update_future_set(&mut future_new_set, &mut new_snapshot, Default::default()); + assert_eq!(future_new_set, None); + assert_eq!(new_snapshot, new_snapshot_copy); + } - #[test] - fn future_set_is_updated_to_none_when_no_migration_is_required() { - let node_id = Default::default(); - let address1 = "127.0.0.1:12000".parse().unwrap(); - let address2 = "127.0.0.1:12001".parse().unwrap(); + #[test] + fn future_set_is_updated_to_none_when_no_migration_is_required() { + let node_id = Default::default(); + let address1 = "127.0.0.1:12000".parse().unwrap(); + let address2 = "127.0.0.1:12001".parse().unwrap(); - // addresses are different, but node set is the same => no migration is required - let mut future_new_set = Some(Default::default()); - let mut new_snapshot = KeyServerSetSnapshot { - current_set: vec![(node_id, address1)].into_iter().collect(), - new_set: vec![(node_id, address2)].into_iter().collect(), - ..Default::default() - }; - let new_snapshot_copy = new_snapshot.clone(); - update_future_set(&mut future_new_set, &mut new_snapshot, Default::default()); - assert_eq!(future_new_set, None); - assert_eq!(new_snapshot, new_snapshot_copy); + // addresses are different, but node set is the same => no migration is required + let mut future_new_set = Some(Default::default()); + let mut new_snapshot = KeyServerSetSnapshot { + current_set: vec![(node_id, address1)].into_iter().collect(), + new_set: vec![(node_id, address2)].into_iter().collect(), + ..Default::default() + }; + let new_snapshot_copy = new_snapshot.clone(); + update_future_set(&mut future_new_set, &mut new_snapshot, Default::default()); + assert_eq!(future_new_set, None); + assert_eq!(new_snapshot, new_snapshot_copy); - // everything is the same => no migration is required - let mut future_new_set = Some(Default::default()); - let mut new_snapshot = KeyServerSetSnapshot { - current_set: vec![(node_id, address1)].into_iter().collect(), - new_set: vec![(node_id, address1)].into_iter().collect(), - ..Default::default() - }; - let new_snapshot_copy = new_snapshot.clone(); - update_future_set(&mut future_new_set, &mut new_snapshot, Default::default()); - assert_eq!(future_new_set, None); - assert_eq!(new_snapshot, new_snapshot_copy); - } + // everything is the same => no migration is required + let mut future_new_set = Some(Default::default()); + let mut new_snapshot = KeyServerSetSnapshot { + current_set: vec![(node_id, address1)].into_iter().collect(), + new_set: vec![(node_id, address1)].into_iter().collect(), + ..Default::default() + }; + let new_snapshot_copy = new_snapshot.clone(); + update_future_set(&mut future_new_set, &mut new_snapshot, Default::default()); + assert_eq!(future_new_set, None); + assert_eq!(new_snapshot, new_snapshot_copy); + } - #[test] - fn future_set_is_initialized() { - let address = "127.0.0.1:12000".parse().unwrap(); + #[test] + fn future_set_is_initialized() { + let address = "127.0.0.1:12000".parse().unwrap(); - let mut future_new_set = None; - let mut new_snapshot = KeyServerSetSnapshot { - current_set: vec![(1.into(), address)].into_iter().collect(), - new_set: vec![(2.into(), address)].into_iter().collect(), - ..Default::default() - }; - update_future_set(&mut future_new_set, &mut new_snapshot, Default::default()); - assert_eq!(future_new_set, Some(FutureNewSet { - new_set: vec![(2.into(), address)].into_iter().collect(), - block: Default::default(), - })); - assert_eq!(new_snapshot, KeyServerSetSnapshot { - current_set: vec![(1.into(), address)].into_iter().collect(), - new_set: vec![(1.into(), address)].into_iter().collect(), - ..Default::default() - }); - } + let mut future_new_set = None; + let mut new_snapshot = KeyServerSetSnapshot { + current_set: vec![(1.into(), address)].into_iter().collect(), + new_set: vec![(2.into(), address)].into_iter().collect(), + ..Default::default() + }; + update_future_set(&mut future_new_set, &mut new_snapshot, Default::default()); + assert_eq!( + future_new_set, + Some(FutureNewSet { + new_set: vec![(2.into(), address)].into_iter().collect(), + block: Default::default(), + }) + ); + assert_eq!( + new_snapshot, + KeyServerSetSnapshot { + current_set: vec![(1.into(), address)].into_iter().collect(), + new_set: vec![(1.into(), address)].into_iter().collect(), + ..Default::default() + } + ); + } - #[test] - fn future_set_is_updated_when_set_differs() { - let address = "127.0.0.1:12000".parse().unwrap(); + #[test] + fn future_set_is_updated_when_set_differs() { + let address = "127.0.0.1:12000".parse().unwrap(); - let mut future_new_set = Some(FutureNewSet { - new_set: vec![(2.into(), address)].into_iter().collect(), - block: Default::default(), - }); - let mut new_snapshot = KeyServerSetSnapshot { - current_set: vec![(1.into(), address)].into_iter().collect(), - new_set: vec![(3.into(), address)].into_iter().collect(), - ..Default::default() - }; - update_future_set(&mut future_new_set, &mut new_snapshot, 1.into()); - assert_eq!(future_new_set, Some(FutureNewSet { - new_set: vec![(3.into(), address)].into_iter().collect(), - block: 1.into(), - })); - assert_eq!(new_snapshot, KeyServerSetSnapshot { - current_set: vec![(1.into(), address)].into_iter().collect(), - new_set: vec![(1.into(), address)].into_iter().collect(), - ..Default::default() - }); - } + let mut future_new_set = Some(FutureNewSet { + new_set: vec![(2.into(), address)].into_iter().collect(), + block: Default::default(), + }); + let mut new_snapshot = KeyServerSetSnapshot { + current_set: vec![(1.into(), address)].into_iter().collect(), + new_set: vec![(3.into(), address)].into_iter().collect(), + ..Default::default() + }; + update_future_set(&mut future_new_set, &mut new_snapshot, 1.into()); + assert_eq!( + future_new_set, + Some(FutureNewSet { + new_set: vec![(3.into(), address)].into_iter().collect(), + block: 1.into(), + }) + ); + assert_eq!( + new_snapshot, + KeyServerSetSnapshot { + current_set: vec![(1.into(), address)].into_iter().collect(), + new_set: vec![(1.into(), address)].into_iter().collect(), + ..Default::default() + } + ); + } - #[test] - fn future_set_is_not_updated_when_set_is_the_same() { - let address = "127.0.0.1:12000".parse().unwrap(); + #[test] + fn future_set_is_not_updated_when_set_is_the_same() { + let address = "127.0.0.1:12000".parse().unwrap(); - let mut future_new_set = Some(FutureNewSet { - new_set: vec![(2.into(), address)].into_iter().collect(), - block: Default::default(), - }); - let mut new_snapshot = KeyServerSetSnapshot { - current_set: vec![(1.into(), address)].into_iter().collect(), - new_set: vec![(2.into(), address)].into_iter().collect(), - ..Default::default() - }; - update_future_set(&mut future_new_set, &mut new_snapshot, 1.into()); - assert_eq!(future_new_set, Some(FutureNewSet { - new_set: vec![(2.into(), address)].into_iter().collect(), - block: Default::default(), - })); - assert_eq!(new_snapshot, KeyServerSetSnapshot { - current_set: vec![(1.into(), address)].into_iter().collect(), - new_set: vec![(1.into(), address)].into_iter().collect(), - ..Default::default() - }); - } + let mut future_new_set = Some(FutureNewSet { + new_set: vec![(2.into(), address)].into_iter().collect(), + block: Default::default(), + }); + let mut new_snapshot = KeyServerSetSnapshot { + current_set: vec![(1.into(), address)].into_iter().collect(), + new_set: vec![(2.into(), address)].into_iter().collect(), + ..Default::default() + }; + update_future_set(&mut future_new_set, &mut new_snapshot, 1.into()); + assert_eq!( + future_new_set, + Some(FutureNewSet { + new_set: vec![(2.into(), address)].into_iter().collect(), + block: Default::default(), + }) + ); + assert_eq!( + new_snapshot, + KeyServerSetSnapshot { + current_set: vec![(1.into(), address)].into_iter().collect(), + new_set: vec![(1.into(), address)].into_iter().collect(), + ..Default::default() + } + ); + } - #[test] - fn when_updating_confirmations_nothing_is_changed_if_no_future_set() { - let address = "127.0.0.1:12000".parse().unwrap(); + #[test] + fn when_updating_confirmations_nothing_is_changed_if_no_future_set() { + let address = "127.0.0.1:12000".parse().unwrap(); - let mut future_new_set = None; - let mut snapshot = KeyServerSetSnapshot { - current_set: vec![(1.into(), address)].into_iter().collect(), - new_set: vec![(1.into(), address)].into_iter().collect(), - ..Default::default() - }; - let snapshot_copy = snapshot.clone(); - update_number_of_confirmations( - &|| 1.into(), - &|_| Some(MIGRATION_CONFIRMATIONS_REQUIRED), - &mut future_new_set, &mut snapshot); - assert_eq!(future_new_set, None); - assert_eq!(snapshot, snapshot_copy); - } + let mut future_new_set = None; + let mut snapshot = KeyServerSetSnapshot { + current_set: vec![(1.into(), address)].into_iter().collect(), + new_set: vec![(1.into(), address)].into_iter().collect(), + ..Default::default() + }; + let snapshot_copy = snapshot.clone(); + update_number_of_confirmations( + &|| 1.into(), + &|_| Some(MIGRATION_CONFIRMATIONS_REQUIRED), + &mut future_new_set, + &mut snapshot, + ); + assert_eq!(future_new_set, None); + assert_eq!(snapshot, snapshot_copy); + } - #[test] - fn when_updating_confirmations_migration_is_scheduled() { - let address = "127.0.0.1:12000".parse().unwrap(); + #[test] + fn when_updating_confirmations_migration_is_scheduled() { + let address = "127.0.0.1:12000".parse().unwrap(); - let mut future_new_set = Some(FutureNewSet { - new_set: vec![(2.into(), address)].into_iter().collect(), - block: Default::default(), - }); - let mut snapshot = KeyServerSetSnapshot { - current_set: vec![(1.into(), address)].into_iter().collect(), - new_set: vec![(1.into(), address)].into_iter().collect(), - ..Default::default() - }; - update_number_of_confirmations( - &|| 1.into(), - &|_| Some(MIGRATION_CONFIRMATIONS_REQUIRED), - &mut future_new_set, &mut snapshot); - assert_eq!(future_new_set, None); - assert_eq!(snapshot, KeyServerSetSnapshot { - current_set: vec![(1.into(), address)].into_iter().collect(), - new_set: vec![(2.into(), address)].into_iter().collect(), - ..Default::default() - }); - } + let mut future_new_set = Some(FutureNewSet { + new_set: vec![(2.into(), address)].into_iter().collect(), + block: Default::default(), + }); + let mut snapshot = KeyServerSetSnapshot { + current_set: vec![(1.into(), address)].into_iter().collect(), + new_set: vec![(1.into(), address)].into_iter().collect(), + ..Default::default() + }; + update_number_of_confirmations( + &|| 1.into(), + &|_| Some(MIGRATION_CONFIRMATIONS_REQUIRED), + &mut future_new_set, + &mut snapshot, + ); + assert_eq!(future_new_set, None); + assert_eq!( + snapshot, + KeyServerSetSnapshot { + current_set: vec![(1.into(), address)].into_iter().collect(), + new_set: vec![(2.into(), address)].into_iter().collect(), + ..Default::default() + } + ); + } - #[test] - fn when_updating_confirmations_migration_is_not_scheduled_when_not_enough_confirmations() { - let address = "127.0.0.1:12000".parse().unwrap(); + #[test] + fn when_updating_confirmations_migration_is_not_scheduled_when_not_enough_confirmations() { + let address = "127.0.0.1:12000".parse().unwrap(); - let mut future_new_set = Some(FutureNewSet { - new_set: vec![(2.into(), address)].into_iter().collect(), - block: Default::default(), - }); - let mut snapshot = KeyServerSetSnapshot { - current_set: vec![(1.into(), address)].into_iter().collect(), - new_set: vec![(1.into(), address)].into_iter().collect(), - ..Default::default() - }; - let future_new_set_copy = future_new_set.clone(); - let snapshot_copy = snapshot.clone(); - update_number_of_confirmations( - &|| 1.into(), - &|_| Some(MIGRATION_CONFIRMATIONS_REQUIRED - 1), - &mut future_new_set, &mut snapshot); - assert_eq!(future_new_set, future_new_set_copy); - assert_eq!(snapshot, snapshot_copy); - } + let mut future_new_set = Some(FutureNewSet { + new_set: vec![(2.into(), address)].into_iter().collect(), + block: Default::default(), + }); + let mut snapshot = KeyServerSetSnapshot { + current_set: vec![(1.into(), address)].into_iter().collect(), + new_set: vec![(1.into(), address)].into_iter().collect(), + ..Default::default() + }; + let future_new_set_copy = future_new_set.clone(); + let snapshot_copy = snapshot.clone(); + update_number_of_confirmations( + &|| 1.into(), + &|_| Some(MIGRATION_CONFIRMATIONS_REQUIRED - 1), + &mut future_new_set, + &mut snapshot, + ); + assert_eq!(future_new_set, future_new_set_copy); + assert_eq!(snapshot, snapshot_copy); + } - #[test] - fn when_updating_confirmations_migration_is_reset_when_reorganized() { - let address = "127.0.0.1:12000".parse().unwrap(); + #[test] + fn when_updating_confirmations_migration_is_reset_when_reorganized() { + let address = "127.0.0.1:12000".parse().unwrap(); - let mut future_new_set = Some(FutureNewSet { - new_set: vec![(2.into(), address)].into_iter().collect(), - block: 1.into(), - }); - let mut snapshot = KeyServerSetSnapshot { - current_set: vec![(1.into(), address)].into_iter().collect(), - new_set: vec![(1.into(), address)].into_iter().collect(), - ..Default::default() - }; - let snapshot_copy = snapshot.clone(); - update_number_of_confirmations( - &|| 2.into(), - &|_| None, - &mut future_new_set, &mut snapshot); - assert_eq!(future_new_set, Some(FutureNewSet { - new_set: vec![(2.into(), address)].into_iter().collect(), - block: 2.into(), - })); - assert_eq!(snapshot, snapshot_copy); - } + let mut future_new_set = Some(FutureNewSet { + new_set: vec![(2.into(), address)].into_iter().collect(), + block: 1.into(), + }); + let mut snapshot = KeyServerSetSnapshot { + current_set: vec![(1.into(), address)].into_iter().collect(), + new_set: vec![(1.into(), address)].into_iter().collect(), + ..Default::default() + }; + let snapshot_copy = snapshot.clone(); + update_number_of_confirmations(&|| 2.into(), &|_| None, &mut future_new_set, &mut snapshot); + assert_eq!( + future_new_set, + Some(FutureNewSet { + new_set: vec![(2.into(), address)].into_iter().collect(), + block: 2.into(), + }) + ); + assert_eq!(snapshot, snapshot_copy); + } } diff --git a/secret-store/src/key_storage.rs b/secret-store/src/key_storage.rs index 396f2c7fb..e489cece0 100644 --- a/secret-store/src/key_storage.rs +++ b/secret-store/src/key_storage.rs @@ -14,15 +14,16 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::BTreeMap; -use std::sync::Arc; -use serde_json; -use tiny_keccak::Keccak; -use ethereum_types::{H256, Address}; -use ethkey::{Secret, Public, public_to_address}; +use ethereum_types::{Address, H256}; +use ethkey::{public_to_address, Public, Secret}; use kvdb::KeyValueDB; -use types::{Error, ServerKeyId, NodeId}; -use serialization::{SerializablePublic, SerializableSecret, SerializableH256, SerializableAddress}; +use serde_json; +use serialization::{ + SerializableAddress, SerializableH256, SerializablePublic, SerializableSecret, +}; +use std::{collections::BTreeMap, sync::Arc}; +use tiny_keccak::Keccak; +use types::{Error, NodeId, ServerKeyId}; /// Key of version value. const DB_META_KEY_VERSION: &'static [u8; 7] = b"version"; @@ -36,493 +37,547 @@ type CurrentSerializableDocumentKeyVersion = SerializableDocumentKeyShareVersion /// Encrypted key share, stored by key storage on the single key server. #[derive(Debug, Default, Clone, PartialEq)] pub struct DocumentKeyShare { - /// Author of the entry. - pub author: Address, - /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). - pub threshold: usize, - /// Server public key. - pub public: Public, - /// Common (shared) encryption point. - pub common_point: Option, - /// Encrypted point. - pub encrypted_point: Option, - /// Key share versions. - pub versions: Vec, + /// Author of the entry. + pub author: Address, + /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). + pub threshold: usize, + /// Server public key. + pub public: Public, + /// Common (shared) encryption point. + pub common_point: Option, + /// Encrypted point. + pub encrypted_point: Option, + /// Key share versions. + pub versions: Vec, } /// Versioned portion of document key share. #[derive(Debug, Clone, PartialEq)] pub struct DocumentKeyShareVersion { - /// Version hash (Keccak(time + id_numbers)). - pub hash: H256, - /// Nodes ids numbers. - pub id_numbers: BTreeMap, - /// Node secret share. - pub secret_share: Secret, + /// Version hash (Keccak(time + id_numbers)). + pub hash: H256, + /// Nodes ids numbers. + pub id_numbers: BTreeMap, + /// Node secret share. + pub secret_share: Secret, } /// Document encryption keys storage pub trait KeyStorage: Send + Sync { - /// Insert document encryption key - fn insert(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error>; - /// Update document encryption key - fn update(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error>; - /// Get document encryption key - fn get(&self, document: &ServerKeyId) -> Result, Error>; - /// Remove document encryption key - fn remove(&self, document: &ServerKeyId) -> Result<(), Error>; - /// Clears the database - fn clear(&self) -> Result<(), Error>; - /// Check if storage contains document encryption key - fn contains(&self, document: &ServerKeyId) -> bool; - /// Iterate through storage - fn iter<'a>(&'a self) -> Box + 'a>; + /// Insert document encryption key + fn insert(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error>; + /// Update document encryption key + fn update(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error>; + /// Get document encryption key + fn get(&self, document: &ServerKeyId) -> Result, Error>; + /// Remove document encryption key + fn remove(&self, document: &ServerKeyId) -> Result<(), Error>; + /// Clears the database + fn clear(&self) -> Result<(), Error>; + /// Check if storage contains document encryption key + fn contains(&self, document: &ServerKeyId) -> bool; + /// Iterate through storage + fn iter<'a>(&'a self) -> Box + 'a>; } /// Persistent document encryption keys storage pub struct PersistentKeyStorage { - db: Arc, + db: Arc, } /// Persistent document encryption keys storage iterator pub struct PersistentKeyStorageIterator<'a> { - iter: Box, Box<[u8]>)> + 'a>, + iter: Box, Box<[u8]>)> + 'a>, } /// V0 of encrypted key share, as it is stored by key storage on the single key server. #[derive(Serialize, Deserialize)] pub struct SerializableDocumentKeyShareV0 { - /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). - pub threshold: usize, - /// Nodes ids numbers. - pub id_numbers: BTreeMap, - /// Node secret share. - pub secret_share: SerializableSecret, - /// Common (shared) encryption point. - pub common_point: SerializablePublic, - /// Encrypted point. - pub encrypted_point: SerializablePublic, + /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). + pub threshold: usize, + /// Nodes ids numbers. + pub id_numbers: BTreeMap, + /// Node secret share. + pub secret_share: SerializableSecret, + /// Common (shared) encryption point. + pub common_point: SerializablePublic, + /// Encrypted point. + pub encrypted_point: SerializablePublic, } /// V1 of encrypted key share, as it is stored by key storage on the single key server. #[derive(Serialize, Deserialize)] struct SerializableDocumentKeyShareV1 { - /// Author of the entry. - pub author: SerializablePublic, - /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). - pub threshold: usize, - /// Nodes ids numbers. - pub id_numbers: BTreeMap, - /// Node secret share. - pub secret_share: SerializableSecret, - /// Common (shared) encryption point. - pub common_point: Option, - /// Encrypted point. - pub encrypted_point: Option, + /// Author of the entry. + pub author: SerializablePublic, + /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). + pub threshold: usize, + /// Nodes ids numbers. + pub id_numbers: BTreeMap, + /// Node secret share. + pub secret_share: SerializableSecret, + /// Common (shared) encryption point. + pub common_point: Option, + /// Encrypted point. + pub encrypted_point: Option, } /// V2 of encrypted key share, as it is stored by key storage on the single key server. #[derive(Serialize, Deserialize)] struct SerializableDocumentKeyShareV2 { - /// Author of the entry. - pub author: SerializablePublic, - /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). - pub threshold: usize, - /// Server public. - pub public: SerializablePublic, - /// Common (shared) encryption point. - pub common_point: Option, - /// Encrypted point. - pub encrypted_point: Option, - /// Versions. - pub versions: Vec + /// Author of the entry. + pub author: SerializablePublic, + /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). + pub threshold: usize, + /// Server public. + pub public: SerializablePublic, + /// Common (shared) encryption point. + pub common_point: Option, + /// Encrypted point. + pub encrypted_point: Option, + /// Versions. + pub versions: Vec, } /// V2 of encrypted key share version, as it is stored by key storage on the single key server. #[derive(Serialize, Deserialize)] struct SerializableDocumentKeyShareVersionV2 { - /// Version hash. - pub hash: SerializableH256, - /// Nodes ids numbers. - pub id_numbers: BTreeMap, - /// Node secret share. - pub secret_share: SerializableSecret, + /// Version hash. + pub hash: SerializableH256, + /// Nodes ids numbers. + pub id_numbers: BTreeMap, + /// Node secret share. + pub secret_share: SerializableSecret, } /// V3 of encrypted key share, as it is stored by key storage on the single key server. #[derive(Serialize, Deserialize)] struct SerializableDocumentKeyShareV3 { - /// Author of the entry. - pub author: SerializableAddress, - /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). - pub threshold: usize, - /// Server public. - pub public: SerializablePublic, - /// Common (shared) encryption point. - pub common_point: Option, - /// Encrypted point. - pub encrypted_point: Option, - /// Versions. - pub versions: Vec + /// Author of the entry. + pub author: SerializableAddress, + /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). + pub threshold: usize, + /// Server public. + pub public: SerializablePublic, + /// Common (shared) encryption point. + pub common_point: Option, + /// Encrypted point. + pub encrypted_point: Option, + /// Versions. + pub versions: Vec, } /// V3 of encrypted key share version, as it is stored by key storage on the single key server. type SerializableDocumentKeyShareVersionV3 = SerializableDocumentKeyShareVersionV2; impl PersistentKeyStorage { - /// Create new persistent document encryption keys storage - pub fn new(db: Arc) -> Result { - let db = upgrade_db(db)?; + /// Create new persistent document encryption keys storage + pub fn new(db: Arc) -> Result { + let db = upgrade_db(db)?; - Ok(PersistentKeyStorage { - db: db, - }) - } + Ok(PersistentKeyStorage { db: db }) + } } fn upgrade_db(db: Arc) -> Result, Error> { - let version = db.get(None, DB_META_KEY_VERSION)?; - let version = version.and_then(|v| v.get(0).cloned()).unwrap_or(0); - match version { - 0 => { - let mut batch = db.transaction(); - batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]); - for (db_key, db_value) in db.iter(None).into_iter().filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) { - let v0_key = serde_json::from_slice::(&db_value).map_err(|e| Error::Database(e.to_string()))?; - let current_key = CurrentSerializableDocumentKeyShare { - // author is used in separate generation + encrypt sessions. - // in v0 there have been only simultaneous GenEnc sessions. - author: Address::default().into(), // added in v1 - threshold: v0_key.threshold, - public: Public::default().into(), // addded in v2 - common_point: Some(v0_key.common_point), - encrypted_point: Some(v0_key.encrypted_point), - versions: vec![CurrentSerializableDocumentKeyVersion { - hash: DocumentKeyShareVersion::data_hash(v0_key.id_numbers.iter().map(|(k, v)| (&***k, &****v))).into(), - id_numbers: v0_key.id_numbers, - secret_share: v0_key.secret_share, - }], - }; - let db_value = serde_json::to_vec(¤t_key).map_err(|e| Error::Database(e.to_string()))?; - batch.put(None, &*db_key, &*db_value); - } - db.write(batch)?; - Ok(db) - }, - 1 => { - let mut batch = db.transaction(); - batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]); - for (db_key, db_value) in db.iter(None).into_iter().filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) { - let v1_key = serde_json::from_slice::(&db_value).map_err(|e| Error::Database(e.to_string()))?; - let current_key = CurrentSerializableDocumentKeyShare { - author: public_to_address(&v1_key.author).into(), // added in v1 + changed in v3 - threshold: v1_key.threshold, - public: Public::default().into(), // addded in v2 - common_point: v1_key.common_point, - encrypted_point: v1_key.encrypted_point, - versions: vec![CurrentSerializableDocumentKeyVersion { - hash: DocumentKeyShareVersion::data_hash(v1_key.id_numbers.iter().map(|(k, v)| (&***k, &****v))).into(), - id_numbers: v1_key.id_numbers, - secret_share: v1_key.secret_share, - }], - }; - let db_value = serde_json::to_vec(¤t_key).map_err(|e| Error::Database(e.to_string()))?; - batch.put(None, &*db_key, &*db_value); - } - db.write(batch)?; - Ok(db) - } - 2 => { - let mut batch = db.transaction(); - batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]); - for (db_key, db_value) in db.iter(None).into_iter().filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) { - let v2_key = serde_json::from_slice::(&db_value).map_err(|e| Error::Database(e.to_string()))?; - let current_key = CurrentSerializableDocumentKeyShare { - author: public_to_address(&v2_key.author).into(), // changed in v3 - threshold: v2_key.threshold, - public: v2_key.public, - common_point: v2_key.common_point, - encrypted_point: v2_key.encrypted_point, - versions: v2_key.versions, - }; - let db_value = serde_json::to_vec(¤t_key).map_err(|e| Error::Database(e.to_string()))?; - batch.put(None, &*db_key, &*db_value); - } - db.write(batch)?; - Ok(db) - }, - 3 => Ok(db), - _ => Err(Error::Database(format!("unsupported SecretStore database version: {}", version))), - } + let version = db.get(None, DB_META_KEY_VERSION)?; + let version = version.and_then(|v| v.get(0).cloned()).unwrap_or(0); + match version { + 0 => { + let mut batch = db.transaction(); + batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]); + for (db_key, db_value) in db + .iter(None) + .into_iter() + .filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) + { + let v0_key = serde_json::from_slice::(&db_value) + .map_err(|e| Error::Database(e.to_string()))?; + let current_key = CurrentSerializableDocumentKeyShare { + // author is used in separate generation + encrypt sessions. + // in v0 there have been only simultaneous GenEnc sessions. + author: Address::default().into(), // added in v1 + threshold: v0_key.threshold, + public: Public::default().into(), // addded in v2 + common_point: Some(v0_key.common_point), + encrypted_point: Some(v0_key.encrypted_point), + versions: vec![CurrentSerializableDocumentKeyVersion { + hash: DocumentKeyShareVersion::data_hash( + v0_key.id_numbers.iter().map(|(k, v)| (&***k, &****v)), + ) + .into(), + id_numbers: v0_key.id_numbers, + secret_share: v0_key.secret_share, + }], + }; + let db_value = + serde_json::to_vec(¤t_key).map_err(|e| Error::Database(e.to_string()))?; + batch.put(None, &*db_key, &*db_value); + } + db.write(batch)?; + Ok(db) + } + 1 => { + let mut batch = db.transaction(); + batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]); + for (db_key, db_value) in db + .iter(None) + .into_iter() + .filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) + { + let v1_key = serde_json::from_slice::(&db_value) + .map_err(|e| Error::Database(e.to_string()))?; + let current_key = CurrentSerializableDocumentKeyShare { + author: public_to_address(&v1_key.author).into(), // added in v1 + changed in v3 + threshold: v1_key.threshold, + public: Public::default().into(), // addded in v2 + common_point: v1_key.common_point, + encrypted_point: v1_key.encrypted_point, + versions: vec![CurrentSerializableDocumentKeyVersion { + hash: DocumentKeyShareVersion::data_hash( + v1_key.id_numbers.iter().map(|(k, v)| (&***k, &****v)), + ) + .into(), + id_numbers: v1_key.id_numbers, + secret_share: v1_key.secret_share, + }], + }; + let db_value = + serde_json::to_vec(¤t_key).map_err(|e| Error::Database(e.to_string()))?; + batch.put(None, &*db_key, &*db_value); + } + db.write(batch)?; + Ok(db) + } + 2 => { + let mut batch = db.transaction(); + batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]); + for (db_key, db_value) in db + .iter(None) + .into_iter() + .filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) + { + let v2_key = serde_json::from_slice::(&db_value) + .map_err(|e| Error::Database(e.to_string()))?; + let current_key = CurrentSerializableDocumentKeyShare { + author: public_to_address(&v2_key.author).into(), // changed in v3 + threshold: v2_key.threshold, + public: v2_key.public, + common_point: v2_key.common_point, + encrypted_point: v2_key.encrypted_point, + versions: v2_key.versions, + }; + let db_value = + serde_json::to_vec(¤t_key).map_err(|e| Error::Database(e.to_string()))?; + batch.put(None, &*db_key, &*db_value); + } + db.write(batch)?; + Ok(db) + } + 3 => Ok(db), + _ => Err(Error::Database(format!( + "unsupported SecretStore database version: {}", + version + ))), + } } impl KeyStorage for PersistentKeyStorage { - fn insert(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> { - let key: CurrentSerializableDocumentKeyShare = key.into(); - let key = serde_json::to_vec(&key).map_err(|e| Error::Database(e.to_string()))?; - let mut batch = self.db.transaction(); - batch.put(None, &document, &key); - self.db.write(batch).map_err(Into::into) - } + fn insert(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> { + let key: CurrentSerializableDocumentKeyShare = key.into(); + let key = serde_json::to_vec(&key).map_err(|e| Error::Database(e.to_string()))?; + let mut batch = self.db.transaction(); + batch.put(None, &document, &key); + self.db.write(batch).map_err(Into::into) + } - fn update(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> { - self.insert(document, key) - } + fn update(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> { + self.insert(document, key) + } - fn get(&self, document: &ServerKeyId) -> Result, Error> { - self.db.get(None, document) - .map_err(|e| Error::Database(e.to_string())) - .and_then(|key| match key { - None => Ok(None), - Some(key) => serde_json::from_slice::(&key) - .map_err(|e| Error::Database(e.to_string())) - .map(Into::into) - .map(Some), - }) - } + fn get(&self, document: &ServerKeyId) -> Result, Error> { + self.db + .get(None, document) + .map_err(|e| Error::Database(e.to_string())) + .and_then(|key| match key { + None => Ok(None), + Some(key) => serde_json::from_slice::(&key) + .map_err(|e| Error::Database(e.to_string())) + .map(Into::into) + .map(Some), + }) + } - fn remove(&self, document: &ServerKeyId) -> Result<(), Error> { - let mut batch = self.db.transaction(); - batch.delete(None, &document); - self.db.write(batch).map_err(Into::into) - } + fn remove(&self, document: &ServerKeyId) -> Result<(), Error> { + let mut batch = self.db.transaction(); + batch.delete(None, &document); + self.db.write(batch).map_err(Into::into) + } - fn clear(&self) -> Result<(), Error> { - let mut batch = self.db.transaction(); - for (key, _) in self.iter() { - batch.delete(None, &key); - } - self.db.write(batch) - .map_err(|e| Error::Database(e.to_string())) - } + fn clear(&self) -> Result<(), Error> { + let mut batch = self.db.transaction(); + for (key, _) in self.iter() { + batch.delete(None, &key); + } + self.db + .write(batch) + .map_err(|e| Error::Database(e.to_string())) + } - fn contains(&self, document: &ServerKeyId) -> bool { - self.db.get(None, document) - .map(|k| k.is_some()) - .unwrap_or(false) - } + fn contains(&self, document: &ServerKeyId) -> bool { + self.db + .get(None, document) + .map(|k| k.is_some()) + .unwrap_or(false) + } - fn iter<'a>(&'a self) -> Box + 'a> { - Box::new(PersistentKeyStorageIterator { - iter: self.db.iter(None), - }) - } + fn iter<'a>(&'a self) -> Box + 'a> { + Box::new(PersistentKeyStorageIterator { + iter: self.db.iter(None), + }) + } } impl<'a> Iterator for PersistentKeyStorageIterator<'a> { - type Item = (ServerKeyId, DocumentKeyShare); + type Item = (ServerKeyId, DocumentKeyShare); - fn next(&mut self) -> Option<(ServerKeyId, DocumentKeyShare)> { - self.iter.as_mut().next() - .and_then(|(db_key, db_val)| serde_json::from_slice::(&db_val) - .ok() - .map(|key| ((*db_key).into(), key.into()))) - } + fn next(&mut self) -> Option<(ServerKeyId, DocumentKeyShare)> { + self.iter.as_mut().next().and_then(|(db_key, db_val)| { + serde_json::from_slice::(&db_val) + .ok() + .map(|key| ((*db_key).into(), key.into())) + }) + } } impl DocumentKeyShare { - /// Get last version reference. - #[cfg(test)] - pub fn last_version(&self) -> Result<&DocumentKeyShareVersion, Error> { - self.versions.iter().rev() - .nth(0) - .ok_or_else(|| Error::Database("key version is not found".into())) - } + /// Get last version reference. + #[cfg(test)] + pub fn last_version(&self) -> Result<&DocumentKeyShareVersion, Error> { + self.versions + .iter() + .rev() + .nth(0) + .ok_or_else(|| Error::Database("key version is not found".into())) + } - /// Get given version reference. - pub fn version(&self, version: &H256) -> Result<&DocumentKeyShareVersion, Error> { - self.versions.iter().rev() - .find(|v| &v.hash == version) - .ok_or_else(|| Error::Database("key version is not found".into())) - } + /// Get given version reference. + pub fn version(&self, version: &H256) -> Result<&DocumentKeyShareVersion, Error> { + self.versions + .iter() + .rev() + .find(|v| &v.hash == version) + .ok_or_else(|| Error::Database("key version is not found".into())) + } } impl DocumentKeyShareVersion { - /// Create new version - pub fn new(id_numbers: BTreeMap, secret_share: Secret) -> Self { - DocumentKeyShareVersion { - hash: Self::data_hash(id_numbers.iter().map(|(k, v)| (&**k, &***v))), - id_numbers: id_numbers, - secret_share: secret_share, - } - } + /// Create new version + pub fn new(id_numbers: BTreeMap, secret_share: Secret) -> Self { + DocumentKeyShareVersion { + hash: Self::data_hash(id_numbers.iter().map(|(k, v)| (&**k, &***v))), + id_numbers: id_numbers, + secret_share: secret_share, + } + } - /// Calculate hash of given version data. - pub fn data_hash<'a, I>(id_numbers: I) -> H256 where I: Iterator { - let mut nodes_keccak = Keccak::new_keccak256(); + /// Calculate hash of given version data. + pub fn data_hash<'a, I>(id_numbers: I) -> H256 + where + I: Iterator, + { + let mut nodes_keccak = Keccak::new_keccak256(); - for (node, node_number) in id_numbers { - nodes_keccak.update(node); - nodes_keccak.update(node_number); - } + for (node, node_number) in id_numbers { + nodes_keccak.update(node); + nodes_keccak.update(node_number); + } - let mut nodes_keccak_value = [0u8; 32]; - nodes_keccak.finalize(&mut nodes_keccak_value); + let mut nodes_keccak_value = [0u8; 32]; + nodes_keccak.finalize(&mut nodes_keccak_value); - nodes_keccak_value.into() - } + nodes_keccak_value.into() + } } impl From for SerializableDocumentKeyShareV3 { - fn from(key: DocumentKeyShare) -> Self { - SerializableDocumentKeyShareV3 { - author: key.author.into(), - threshold: key.threshold, - public: key.public.into(), - common_point: key.common_point.map(Into::into), - encrypted_point: key.encrypted_point.map(Into::into), - versions: key.versions.into_iter().map(Into::into).collect(), - } - } + fn from(key: DocumentKeyShare) -> Self { + SerializableDocumentKeyShareV3 { + author: key.author.into(), + threshold: key.threshold, + public: key.public.into(), + common_point: key.common_point.map(Into::into), + encrypted_point: key.encrypted_point.map(Into::into), + versions: key.versions.into_iter().map(Into::into).collect(), + } + } } impl From for SerializableDocumentKeyShareVersionV3 { - fn from(version: DocumentKeyShareVersion) -> Self { - SerializableDocumentKeyShareVersionV3 { - hash: version.hash.into(), - id_numbers: version.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), - secret_share: version.secret_share.into(), - } - } + fn from(version: DocumentKeyShareVersion) -> Self { + SerializableDocumentKeyShareVersionV3 { + hash: version.hash.into(), + id_numbers: version + .id_numbers + .into_iter() + .map(|(k, v)| (k.into(), v.into())) + .collect(), + secret_share: version.secret_share.into(), + } + } } impl From for DocumentKeyShare { - fn from(key: SerializableDocumentKeyShareV3) -> Self { - DocumentKeyShare { - author: key.author.into(), - threshold: key.threshold, - public: key.public.into(), - common_point: key.common_point.map(Into::into), - encrypted_point: key.encrypted_point.map(Into::into), - versions: key.versions.into_iter() - .map(|v| DocumentKeyShareVersion { - hash: v.hash.into(), - id_numbers: v.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), - secret_share: v.secret_share.into(), - }) - .collect(), - } - } + fn from(key: SerializableDocumentKeyShareV3) -> Self { + DocumentKeyShare { + author: key.author.into(), + threshold: key.threshold, + public: key.public.into(), + common_point: key.common_point.map(Into::into), + encrypted_point: key.encrypted_point.map(Into::into), + versions: key + .versions + .into_iter() + .map(|v| DocumentKeyShareVersion { + hash: v.hash.into(), + id_numbers: v + .id_numbers + .into_iter() + .map(|(k, v)| (k.into(), v.into())) + .collect(), + secret_share: v.secret_share.into(), + }) + .collect(), + } + } } #[cfg(test)] pub mod tests { - extern crate tempdir; + extern crate tempdir; - use std::collections::HashMap; - use std::sync::Arc; - use parking_lot::RwLock; - use serde_json; - use self::tempdir::TempDir; - use ethereum_types::{Address, H256}; - use ethkey::{Random, Generator, Public, Secret, public_to_address}; - use kvdb_rocksdb::Database; - use types::{Error, ServerKeyId}; - use super::{DB_META_KEY_VERSION, CURRENT_VERSION, KeyStorage, PersistentKeyStorage, DocumentKeyShare, - DocumentKeyShareVersion, CurrentSerializableDocumentKeyShare, upgrade_db, SerializableDocumentKeyShareV0, - SerializableDocumentKeyShareV1, SerializableDocumentKeyShareV2, SerializableDocumentKeyShareVersionV2}; + use self::tempdir::TempDir; + use super::{ + upgrade_db, CurrentSerializableDocumentKeyShare, DocumentKeyShare, DocumentKeyShareVersion, + KeyStorage, PersistentKeyStorage, SerializableDocumentKeyShareV0, + SerializableDocumentKeyShareV1, SerializableDocumentKeyShareV2, + SerializableDocumentKeyShareVersionV2, CURRENT_VERSION, DB_META_KEY_VERSION, + }; + use ethereum_types::{Address, H256}; + use ethkey::{public_to_address, Generator, Public, Random, Secret}; + use kvdb_rocksdb::Database; + use parking_lot::RwLock; + use serde_json; + use std::{collections::HashMap, sync::Arc}; + use types::{Error, ServerKeyId}; - /// In-memory document encryption keys storage - #[derive(Default)] - pub struct DummyKeyStorage { - keys: RwLock>, - } + /// In-memory document encryption keys storage + #[derive(Default)] + pub struct DummyKeyStorage { + keys: RwLock>, + } - impl KeyStorage for DummyKeyStorage { - fn insert(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> { - self.keys.write().insert(document, key); - Ok(()) - } + impl KeyStorage for DummyKeyStorage { + fn insert(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> { + self.keys.write().insert(document, key); + Ok(()) + } - fn update(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> { - self.keys.write().insert(document, key); - Ok(()) - } + fn update(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> { + self.keys.write().insert(document, key); + Ok(()) + } - fn get(&self, document: &ServerKeyId) -> Result, Error> { - Ok(self.keys.read().get(document).cloned()) - } + fn get(&self, document: &ServerKeyId) -> Result, Error> { + Ok(self.keys.read().get(document).cloned()) + } - fn remove(&self, document: &ServerKeyId) -> Result<(), Error> { - self.keys.write().remove(document); - Ok(()) - } + fn remove(&self, document: &ServerKeyId) -> Result<(), Error> { + self.keys.write().remove(document); + Ok(()) + } - fn clear(&self) -> Result<(), Error> { - self.keys.write().clear(); - Ok(()) - } + fn clear(&self) -> Result<(), Error> { + self.keys.write().clear(); + Ok(()) + } - fn contains(&self, document: &ServerKeyId) -> bool { - self.keys.read().contains_key(document) - } + fn contains(&self, document: &ServerKeyId) -> bool { + self.keys.read().contains_key(document) + } - fn iter<'a>(&'a self) -> Box + 'a> { - Box::new(self.keys.read().clone().into_iter()) - } - } + fn iter<'a>(&'a self) -> Box + 'a> { + Box::new(self.keys.read().clone().into_iter()) + } + } - #[test] - fn persistent_key_storage() { - let tempdir = TempDir::new("").unwrap(); - let key1 = ServerKeyId::from(1); - let value1 = DocumentKeyShare { - author: Default::default(), - threshold: 100, - public: Public::default(), - common_point: Some(Random.generate().unwrap().public().clone()), - encrypted_point: Some(Random.generate().unwrap().public().clone()), - versions: vec![DocumentKeyShareVersion { - hash: Default::default(), - id_numbers: vec![ - (Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()) - ].into_iter().collect(), - secret_share: Random.generate().unwrap().secret().clone(), - }], - }; - let key2 = ServerKeyId::from(2); - let value2 = DocumentKeyShare { - author: Default::default(), - threshold: 200, - public: Public::default(), - common_point: Some(Random.generate().unwrap().public().clone()), - encrypted_point: Some(Random.generate().unwrap().public().clone()), - versions: vec![DocumentKeyShareVersion { - hash: Default::default(), - id_numbers: vec![ - (Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()) - ].into_iter().collect(), - secret_share: Random.generate().unwrap().secret().clone(), - }], - }; - let key3 = ServerKeyId::from(3); + #[test] + fn persistent_key_storage() { + let tempdir = TempDir::new("").unwrap(); + let key1 = ServerKeyId::from(1); + let value1 = DocumentKeyShare { + author: Default::default(), + threshold: 100, + public: Public::default(), + common_point: Some(Random.generate().unwrap().public().clone()), + encrypted_point: Some(Random.generate().unwrap().public().clone()), + versions: vec![DocumentKeyShareVersion { + hash: Default::default(), + id_numbers: vec![( + Random.generate().unwrap().public().clone(), + Random.generate().unwrap().secret().clone(), + )] + .into_iter() + .collect(), + secret_share: Random.generate().unwrap().secret().clone(), + }], + }; + let key2 = ServerKeyId::from(2); + let value2 = DocumentKeyShare { + author: Default::default(), + threshold: 200, + public: Public::default(), + common_point: Some(Random.generate().unwrap().public().clone()), + encrypted_point: Some(Random.generate().unwrap().public().clone()), + versions: vec![DocumentKeyShareVersion { + hash: Default::default(), + id_numbers: vec![( + Random.generate().unwrap().public().clone(), + Random.generate().unwrap().secret().clone(), + )] + .into_iter() + .collect(), + secret_share: Random.generate().unwrap().secret().clone(), + }], + }; + let key3 = ServerKeyId::from(3); - let db = Database::open_default(&tempdir.path().display().to_string()).unwrap(); + let db = Database::open_default(&tempdir.path().display().to_string()).unwrap(); - let key_storage = PersistentKeyStorage::new(Arc::new(db)).unwrap(); - key_storage.insert(key1.clone(), value1.clone()).unwrap(); - key_storage.insert(key2.clone(), value2.clone()).unwrap(); - assert_eq!(key_storage.get(&key1), Ok(Some(value1.clone()))); - assert_eq!(key_storage.get(&key2), Ok(Some(value2.clone()))); - assert_eq!(key_storage.get(&key3), Ok(None)); - drop(key_storage); + let key_storage = PersistentKeyStorage::new(Arc::new(db)).unwrap(); + key_storage.insert(key1.clone(), value1.clone()).unwrap(); + key_storage.insert(key2.clone(), value2.clone()).unwrap(); + assert_eq!(key_storage.get(&key1), Ok(Some(value1.clone()))); + assert_eq!(key_storage.get(&key2), Ok(Some(value2.clone()))); + assert_eq!(key_storage.get(&key3), Ok(None)); + drop(key_storage); - let db = Database::open_default(&tempdir.path().display().to_string()).unwrap(); + let db = Database::open_default(&tempdir.path().display().to_string()).unwrap(); - let key_storage = PersistentKeyStorage::new(Arc::new(db)).unwrap(); - assert_eq!(key_storage.get(&key1), Ok(Some(value1))); - assert_eq!(key_storage.get(&key2), Ok(Some(value2))); - assert_eq!(key_storage.get(&key3), Ok(None)); - } + let key_storage = PersistentKeyStorage::new(Arc::new(db)).unwrap(); + assert_eq!(key_storage.get(&key1), Ok(Some(value1))); + assert_eq!(key_storage.get(&key2), Ok(Some(value2))); + assert_eq!(key_storage.get(&key3), Ok(None)); + } - #[test] - fn upgrade_db_from_0() { - let tempdir = TempDir::new("").unwrap(); - let db = Database::open_default(&tempdir.path().display().to_string()).unwrap(); + #[test] + fn upgrade_db_from_0() { + let tempdir = TempDir::new("").unwrap(); + let db = Database::open_default(&tempdir.path().display().to_string()).unwrap(); - // prepare v0 database - { - let key = serde_json::to_vec(&SerializableDocumentKeyShareV0 { + // prepare v0 database + { + let key = serde_json::to_vec(&SerializableDocumentKeyShareV0 { threshold: 777, id_numbers: vec![( "b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into(), @@ -532,38 +587,49 @@ pub mod tests { common_point: "99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".into(), encrypted_point: "7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".into(), }).unwrap(); - let mut batch = db.transaction(); - batch.put(None, &[7], &key); - db.write(batch).unwrap(); - } + let mut batch = db.transaction(); + batch.put(None, &[7], &key); + db.write(batch).unwrap(); + } - // upgrade database - let db = upgrade_db(Arc::new(db)).unwrap(); + // upgrade database + let db = upgrade_db(Arc::new(db)).unwrap(); - // check upgrade - assert_eq!(db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], CURRENT_VERSION); - let key = serde_json::from_slice::(&db.get(None, &[7]).unwrap().map(|key| key.to_vec()).unwrap()).unwrap(); - assert_eq!(Address::default(), key.author.clone().into()); - assert_eq!(777, key.threshold); - assert_eq!(Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".parse::().unwrap()), key.common_point.clone().map(Into::into)); - assert_eq!(Some("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".parse::().unwrap()), key.encrypted_point.clone().map(Into::into)); + // check upgrade + assert_eq!( + db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], + CURRENT_VERSION + ); + let key = serde_json::from_slice::( + &db.get(None, &[7]).unwrap().map(|key| key.to_vec()).unwrap(), + ) + .unwrap(); + assert_eq!(Address::default(), key.author.clone().into()); + assert_eq!(777, key.threshold); + assert_eq!(Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".parse::().unwrap()), key.common_point.clone().map(Into::into)); + assert_eq!(Some("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".parse::().unwrap()), key.encrypted_point.clone().map(Into::into)); - assert_eq!(key.versions.len(), 1); - assert_eq!(vec![( + assert_eq!(key.versions.len(), 1); + assert_eq!(vec![( "b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".parse::().unwrap(), "281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse::().unwrap(), )], key.versions[0].id_numbers.clone().into_iter().map(|(k, v)| (k.into(), v.into())).collect::>()); - assert_eq!("00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse::().unwrap(), key.versions[0].secret_share.clone().into()); - } + assert_eq!( + "00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b" + .parse::() + .unwrap(), + key.versions[0].secret_share.clone().into() + ); + } - #[test] - fn upgrade_db_from_1() { - let tempdir = TempDir::new("").unwrap(); - let db = Database::open_default(&tempdir.path().display().to_string()).unwrap(); + #[test] + fn upgrade_db_from_1() { + let tempdir = TempDir::new("").unwrap(); + let db = Database::open_default(&tempdir.path().display().to_string()).unwrap(); - // prepare v1 database - { - let key = serde_json::to_vec(&SerializableDocumentKeyShareV1 { + // prepare v1 database + { + let key = serde_json::to_vec(&SerializableDocumentKeyShareV1 { author: "b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into(), threshold: 777, id_numbers: vec![( @@ -574,40 +640,51 @@ pub mod tests { common_point: Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".into()), encrypted_point: Some("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".into()), }).unwrap(); - let mut batch = db.transaction(); - batch.put(None, DB_META_KEY_VERSION, &[1]); - batch.put(None, &[7], &key); - db.write(batch).unwrap(); - } + let mut batch = db.transaction(); + batch.put(None, DB_META_KEY_VERSION, &[1]); + batch.put(None, &[7], &key); + db.write(batch).unwrap(); + } - // upgrade database - let db = upgrade_db(Arc::new(db)).unwrap(); + // upgrade database + let db = upgrade_db(Arc::new(db)).unwrap(); - // check upgrade - assert_eq!(db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], CURRENT_VERSION); - let key = serde_json::from_slice::(&db.get(None, &[7]).unwrap().map(|key| key.to_vec()).unwrap()).unwrap(); - assert_eq!(777, key.threshold); - assert_eq!(Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".parse::().unwrap()), key.common_point.clone().map(Into::into)); - assert_eq!(Some("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".parse::().unwrap()), key.encrypted_point.clone().map(Into::into)); - assert_eq!(key.author.0, public_to_address(&"b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into())); + // check upgrade + assert_eq!( + db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], + CURRENT_VERSION + ); + let key = serde_json::from_slice::( + &db.get(None, &[7]).unwrap().map(|key| key.to_vec()).unwrap(), + ) + .unwrap(); + assert_eq!(777, key.threshold); + assert_eq!(Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".parse::().unwrap()), key.common_point.clone().map(Into::into)); + assert_eq!(Some("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".parse::().unwrap()), key.encrypted_point.clone().map(Into::into)); + assert_eq!(key.author.0, public_to_address(&"b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into())); - assert_eq!(key.versions.len(), 1); - assert_eq!(vec![( + assert_eq!(key.versions.len(), 1); + assert_eq!(vec![( "b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".parse::().unwrap(), "281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse::().unwrap(), )], key.versions[0].id_numbers.clone().into_iter().map(|(k, v)| (k.into(), v.into())).collect::>()); - assert_eq!("00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse::().unwrap(), key.versions[0].secret_share.clone().into()); - } + assert_eq!( + "00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b" + .parse::() + .unwrap(), + key.versions[0].secret_share.clone().into() + ); + } - #[test] - fn upgrade_db_from_2() { - let tempdir = TempDir::new("").unwrap(); - let db = Database::open_default(&tempdir.path().display().to_string()).unwrap(); + #[test] + fn upgrade_db_from_2() { + let tempdir = TempDir::new("").unwrap(); + let db = Database::open_default(&tempdir.path().display().to_string()).unwrap(); - // prepare v2 database - { - let key = serde_json::to_vec(&SerializableDocumentKeyShareV2 { + // prepare v2 database + { + let key = serde_json::to_vec(&SerializableDocumentKeyShareV2 { author: "b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into(), threshold: 777, common_point: Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".into()), @@ -622,29 +699,40 @@ pub mod tests { secret_share: "00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse::().unwrap().into(), }], }).unwrap(); - let mut batch = db.transaction(); - batch.put(None, DB_META_KEY_VERSION, &[2]); - batch.put(None, &[7], &key); - db.write(batch).unwrap(); - } + let mut batch = db.transaction(); + batch.put(None, DB_META_KEY_VERSION, &[2]); + batch.put(None, &[7], &key); + db.write(batch).unwrap(); + } - // upgrade database - let db = upgrade_db(Arc::new(db)).unwrap(); + // upgrade database + let db = upgrade_db(Arc::new(db)).unwrap(); - // check upgrade - assert_eq!(db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], CURRENT_VERSION); - let key = serde_json::from_slice::(&db.get(None, &[7]).unwrap().map(|key| key.to_vec()).unwrap()).unwrap(); - assert_eq!(777, key.threshold); - assert_eq!(Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".parse::().unwrap()), key.common_point.clone().map(Into::into)); - assert_eq!(Some("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".parse::().unwrap()), key.encrypted_point.clone().map(Into::into)); - assert_eq!(key.author.0, public_to_address(&"b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".parse().unwrap())); + // check upgrade + assert_eq!( + db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], + CURRENT_VERSION + ); + let key = serde_json::from_slice::( + &db.get(None, &[7]).unwrap().map(|key| key.to_vec()).unwrap(), + ) + .unwrap(); + assert_eq!(777, key.threshold); + assert_eq!(Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".parse::().unwrap()), key.common_point.clone().map(Into::into)); + assert_eq!(Some("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".parse::().unwrap()), key.encrypted_point.clone().map(Into::into)); + assert_eq!(key.author.0, public_to_address(&"b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".parse().unwrap())); - assert_eq!(key.versions.len(), 1); - assert_eq!(vec![( + assert_eq!(key.versions.len(), 1); + assert_eq!(vec![( "b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".parse::().unwrap(), "281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse::().unwrap(), )], key.versions[0].id_numbers.clone().into_iter().map(|(k, v)| (k.into(), v.into())).collect::>()); - assert_eq!("00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse::().unwrap(), key.versions[0].secret_share.clone().into()); - } + assert_eq!( + "00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b" + .parse::() + .unwrap(), + key.versions[0].secret_share.clone().into() + ); + } } diff --git a/secret-store/src/lib.rs b/secret-store/src/lib.rs index 810b1da98..915dc8a3e 100644 --- a/secret-store/src/lib.rs +++ b/secret-store/src/lib.rs @@ -60,117 +60,187 @@ extern crate kvdb_rocksdb; #[cfg(feature = "accounts")] extern crate ethcore_accounts as accounts; +mod helpers; mod key_server_cluster; mod types; -mod helpers; -mod traits; mod acl_storage; mod key_server; -mod key_storage; -mod serialization; mod key_server_set; -mod node_key_pair; +mod key_storage; mod listener; +mod node_key_pair; +mod serialization; +mod traits; mod trusted_client; -use std::sync::Arc; +use ethcore::{client::Client, miner::Miner}; use kvdb::KeyValueDB; -use ethcore::client::Client; -use ethcore::miner::Miner; -use sync::SyncProvider; use parity_runtime::Executor; +use std::sync::Arc; +use sync::SyncProvider; -pub use types::{ServerKeyId, EncryptedDocumentKey, RequestSignature, Public, - Error, NodeAddress, ContractAddress, ServiceConfiguration, ClusterConfiguration}; -pub use traits::{NodeKeyPair, KeyServer}; -pub use self::node_key_pair::PlainNodeKeyPair; #[cfg(feature = "accounts")] pub use self::node_key_pair::KeyStoreNodeKeyPair; +pub use self::node_key_pair::PlainNodeKeyPair; +pub use traits::{KeyServer, NodeKeyPair}; +pub use types::{ + ClusterConfiguration, ContractAddress, EncryptedDocumentKey, Error, NodeAddress, Public, + RequestSignature, ServerKeyId, ServiceConfiguration, +}; /// Start new key server instance -pub fn start(client: Arc, sync: Arc, miner: Arc, self_key_pair: Arc, mut config: ServiceConfiguration, - db: Arc, executor: Executor) -> Result, Error> -{ - let trusted_client = trusted_client::TrustedClient::new(self_key_pair.clone(), client.clone(), sync, miner); - let acl_storage: Arc = match config.acl_check_contract_address.take() { - Some(acl_check_contract_address) => acl_storage::OnChainAclStorage::new(trusted_client.clone(), acl_check_contract_address)?, - None => Arc::new(acl_storage::DummyAclStorage::default()), - }; +pub fn start( + client: Arc, + sync: Arc, + miner: Arc, + self_key_pair: Arc, + mut config: ServiceConfiguration, + db: Arc, + executor: Executor, +) -> Result, Error> { + let trusted_client = + trusted_client::TrustedClient::new(self_key_pair.clone(), client.clone(), sync, miner); + let acl_storage: Arc = match config.acl_check_contract_address.take() { + Some(acl_check_contract_address) => { + acl_storage::OnChainAclStorage::new(trusted_client.clone(), acl_check_contract_address)? + } + None => Arc::new(acl_storage::DummyAclStorage::default()), + }; - let key_server_set = key_server_set::OnChainKeyServerSet::new(trusted_client.clone(), config.cluster_config.key_server_set_contract_address.take(), - self_key_pair.clone(), config.cluster_config.auto_migrate_enabled, config.cluster_config.nodes.clone())?; - let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(db)?); - let key_server = Arc::new(key_server::KeyServerImpl::new(&config.cluster_config, key_server_set.clone(), self_key_pair.clone(), - acl_storage.clone(), key_storage.clone(), executor.clone())?); - let cluster = key_server.cluster(); - let key_server: Arc = key_server; + let key_server_set = key_server_set::OnChainKeyServerSet::new( + trusted_client.clone(), + config.cluster_config.key_server_set_contract_address.take(), + self_key_pair.clone(), + config.cluster_config.auto_migrate_enabled, + config.cluster_config.nodes.clone(), + )?; + let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(db)?); + let key_server = Arc::new(key_server::KeyServerImpl::new( + &config.cluster_config, + key_server_set.clone(), + self_key_pair.clone(), + acl_storage.clone(), + key_storage.clone(), + executor.clone(), + )?); + let cluster = key_server.cluster(); + let key_server: Arc = key_server; - // prepare HTTP listener - let http_listener = match config.listener_address { - Some(listener_address) => Some(listener::http_listener::KeyServerHttpListener::start(listener_address, Arc::downgrade(&key_server), executor)?), - None => None, - }; + // prepare HTTP listener + let http_listener = match config.listener_address { + Some(listener_address) => Some(listener::http_listener::KeyServerHttpListener::start( + listener_address, + Arc::downgrade(&key_server), + executor, + )?), + None => None, + }; - // prepare service contract listeners - let create_service_contract = |address, name, api_mask| - Arc::new(listener::service_contract::OnChainServiceContract::new( - api_mask, - trusted_client.clone(), - name, - address, - self_key_pair.clone())); + // prepare service contract listeners + let create_service_contract = |address, name, api_mask| { + Arc::new(listener::service_contract::OnChainServiceContract::new( + api_mask, + trusted_client.clone(), + name, + address, + self_key_pair.clone(), + )) + }; - let mut contracts: Vec> = Vec::new(); - config.service_contract_address.map(|address| - create_service_contract(address, - listener::service_contract::SERVICE_CONTRACT_REGISTRY_NAME.to_owned(), - listener::ApiMask::all())) - .map(|l| contracts.push(l)); - config.service_contract_srv_gen_address.map(|address| - create_service_contract(address, - listener::service_contract::SRV_KEY_GEN_SERVICE_CONTRACT_REGISTRY_NAME.to_owned(), - listener::ApiMask { server_key_generation_requests: true, ..Default::default() })) - .map(|l| contracts.push(l)); - config.service_contract_srv_retr_address.map(|address| - create_service_contract(address, - listener::service_contract::SRV_KEY_RETR_SERVICE_CONTRACT_REGISTRY_NAME.to_owned(), - listener::ApiMask { server_key_retrieval_requests: true, ..Default::default() })) - .map(|l| contracts.push(l)); - config.service_contract_doc_store_address.map(|address| - create_service_contract(address, - listener::service_contract::DOC_KEY_STORE_SERVICE_CONTRACT_REGISTRY_NAME.to_owned(), - listener::ApiMask { document_key_store_requests: true, ..Default::default() })) - .map(|l| contracts.push(l)); - config.service_contract_doc_sretr_address.map(|address| - create_service_contract(address, - listener::service_contract::DOC_KEY_SRETR_SERVICE_CONTRACT_REGISTRY_NAME.to_owned(), - listener::ApiMask { document_key_shadow_retrieval_requests: true, ..Default::default() })) - .map(|l| contracts.push(l)); + let mut contracts: Vec> = Vec::new(); + config + .service_contract_address + .map(|address| { + create_service_contract( + address, + listener::service_contract::SERVICE_CONTRACT_REGISTRY_NAME.to_owned(), + listener::ApiMask::all(), + ) + }) + .map(|l| contracts.push(l)); + config + .service_contract_srv_gen_address + .map(|address| { + create_service_contract( + address, + listener::service_contract::SRV_KEY_GEN_SERVICE_CONTRACT_REGISTRY_NAME.to_owned(), + listener::ApiMask { + server_key_generation_requests: true, + ..Default::default() + }, + ) + }) + .map(|l| contracts.push(l)); + config + .service_contract_srv_retr_address + .map(|address| { + create_service_contract( + address, + listener::service_contract::SRV_KEY_RETR_SERVICE_CONTRACT_REGISTRY_NAME.to_owned(), + listener::ApiMask { + server_key_retrieval_requests: true, + ..Default::default() + }, + ) + }) + .map(|l| contracts.push(l)); + config + .service_contract_doc_store_address + .map(|address| { + create_service_contract( + address, + listener::service_contract::DOC_KEY_STORE_SERVICE_CONTRACT_REGISTRY_NAME.to_owned(), + listener::ApiMask { + document_key_store_requests: true, + ..Default::default() + }, + ) + }) + .map(|l| contracts.push(l)); + config + .service_contract_doc_sretr_address + .map(|address| { + create_service_contract( + address, + listener::service_contract::DOC_KEY_SRETR_SERVICE_CONTRACT_REGISTRY_NAME.to_owned(), + listener::ApiMask { + document_key_shadow_retrieval_requests: true, + ..Default::default() + }, + ) + }) + .map(|l| contracts.push(l)); - let contract: Option> = match contracts.len() { - 0 => None, - 1 => Some(contracts.pop().expect("contract.len() is 1; qed")), - _ => Some(Arc::new(listener::service_contract_aggregate::OnChainServiceContractAggregate::new(contracts))), - }; + let contract: Option> = match contracts.len() { + 0 => None, + 1 => Some(contracts.pop().expect("contract.len() is 1; qed")), + _ => Some(Arc::new( + listener::service_contract_aggregate::OnChainServiceContractAggregate::new(contracts), + )), + }; - let contract_listener = match contract { - Some(contract) => Some({ - let listener = listener::service_contract_listener::ServiceContractListener::new( - listener::service_contract_listener::ServiceContractListenerParams { - contract: contract, - self_key_pair: self_key_pair.clone(), - key_server_set: key_server_set, - acl_storage: acl_storage, - cluster: cluster, - key_storage: key_storage, - } - )?; - client.add_notify(listener.clone()); - listener - }), - None => None, - }; + let contract_listener = match contract { + Some(contract) => Some({ + let listener = listener::service_contract_listener::ServiceContractListener::new( + listener::service_contract_listener::ServiceContractListenerParams { + contract: contract, + self_key_pair: self_key_pair.clone(), + key_server_set: key_server_set, + acl_storage: acl_storage, + cluster: cluster, + key_storage: key_storage, + }, + )?; + client.add_notify(listener.clone()); + listener + }), + None => None, + }; - Ok(Box::new(listener::Listener::new(key_server, http_listener, contract_listener))) + Ok(Box::new(listener::Listener::new( + key_server, + http_listener, + contract_listener, + ))) } diff --git a/secret-store/src/listener/http_listener.rs b/secret-store/src/listener/http_listener.rs index 47385a9fb..8c4e72d53 100644 --- a/secret-store/src/listener/http_listener.rs +++ b/secret-store/src/listener/http_listener.rs @@ -14,26 +14,33 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::BTreeSet; -use std::sync::{Arc, Weak}; -use hyper::{self, Uri, Request as HttpRequest, Response as HttpResponse, Method as HttpMethod, - StatusCode as HttpStatusCode, Body, - header::{self, HeaderValue}, - server::conn::Http, - service::Service, +use futures::{future, Future, Stream}; +use hyper::{ + self, + header::{self, HeaderValue}, + server::conn::Http, + service::Service, + Body, Method as HttpMethod, Request as HttpRequest, Response as HttpResponse, + StatusCode as HttpStatusCode, Uri, }; +use parity_runtime::Executor; +use percent_encoding::percent_decode; use serde::Serialize; use serde_json; -use tokio; -use tokio::net::TcpListener; -use parity_runtime::Executor; -use futures::{future, Future, Stream}; -use percent_encoding::percent_decode; +use std::{ + collections::BTreeSet, + sync::{Arc, Weak}, +}; +use tokio::{self, net::TcpListener}; +use serialization::{ + SerializableBytes, SerializableEncryptedDocumentKeyShadow, SerializablePublic, +}; use traits::KeyServer; -use serialization::{SerializableEncryptedDocumentKeyShadow, SerializableBytes, SerializablePublic}; -use types::{Error, Public, MessageHash, NodeAddress, RequestSignature, ServerKeyId, - EncryptedDocumentKey, EncryptedDocumentKeyShadow, NodeId}; +use types::{ + EncryptedDocumentKey, EncryptedDocumentKeyShadow, Error, MessageHash, NodeAddress, NodeId, + Public, RequestSignature, ServerKeyId, +}; /// Key server http-requests listener. Available requests: /// To generate server key: POST /shadow/{server_key_id}/{signature}/{threshold} @@ -47,85 +54,103 @@ use types::{Error, Public, MessageHash, NodeAddress, RequestSignature, ServerKey /// To change servers set: POST /admin/servers_set_change/{old_signature}/{new_signature} + BODY: json array of hex-encoded nodes ids pub struct KeyServerHttpListener { - _executor: Executor, - _handler: Arc, + _executor: Executor, + _handler: Arc, } /// Parsed http request #[derive(Debug, Clone, PartialEq)] enum Request { - /// Invalid request - Invalid, - /// Generate server key. - GenerateServerKey(ServerKeyId, RequestSignature, usize), - /// Store document key. - StoreDocumentKey(ServerKeyId, RequestSignature, Public, Public), - /// Generate encryption key. - GenerateDocumentKey(ServerKeyId, RequestSignature, usize), - /// Request public portion of server key. - GetServerKey(ServerKeyId, RequestSignature), - /// Request encryption key of given document for given requestor. - GetDocumentKey(ServerKeyId, RequestSignature), - /// Request shadow of encryption key of given document for given requestor. - GetDocumentKeyShadow(ServerKeyId, RequestSignature), - /// Generate Schnorr signature for the message. - SchnorrSignMessage(ServerKeyId, RequestSignature, MessageHash), - /// Generate ECDSA signature for the message. - EcdsaSignMessage(ServerKeyId, RequestSignature, MessageHash), - /// Change servers set. - ChangeServersSet(RequestSignature, RequestSignature, BTreeSet), + /// Invalid request + Invalid, + /// Generate server key. + GenerateServerKey(ServerKeyId, RequestSignature, usize), + /// Store document key. + StoreDocumentKey(ServerKeyId, RequestSignature, Public, Public), + /// Generate encryption key. + GenerateDocumentKey(ServerKeyId, RequestSignature, usize), + /// Request public portion of server key. + GetServerKey(ServerKeyId, RequestSignature), + /// Request encryption key of given document for given requestor. + GetDocumentKey(ServerKeyId, RequestSignature), + /// Request shadow of encryption key of given document for given requestor. + GetDocumentKeyShadow(ServerKeyId, RequestSignature), + /// Generate Schnorr signature for the message. + SchnorrSignMessage(ServerKeyId, RequestSignature, MessageHash), + /// Generate ECDSA signature for the message. + EcdsaSignMessage(ServerKeyId, RequestSignature, MessageHash), + /// Change servers set. + ChangeServersSet(RequestSignature, RequestSignature, BTreeSet), } /// Cloneable http handler #[derive(Clone)] struct KeyServerHttpHandler { - handler: Arc, + handler: Arc, } /// Shared http handler struct KeyServerSharedHttpHandler { - key_server: Weak, + key_server: Weak, } impl KeyServerHttpListener { - /// Start KeyServer http listener - pub fn start(listener_address: NodeAddress, key_server: Weak, executor: Executor) -> Result { - let shared_handler = Arc::new(KeyServerSharedHttpHandler { - key_server: key_server, - }); + /// Start KeyServer http listener + pub fn start( + listener_address: NodeAddress, + key_server: Weak, + executor: Executor, + ) -> Result { + let shared_handler = Arc::new(KeyServerSharedHttpHandler { + key_server: key_server, + }); - let listener_address = format!("{}:{}", listener_address.address, listener_address.port).parse()?; - let listener = TcpListener::bind(&listener_address)?; + let listener_address = + format!("{}:{}", listener_address.address, listener_address.port).parse()?; + let listener = TcpListener::bind(&listener_address)?; - let shared_handler2 = shared_handler.clone(); + let shared_handler2 = shared_handler.clone(); - let server = listener.incoming() - .map_err(|e| warn!("Key server listener error: {:?}", e)) - .for_each(move |socket| { - let http = Http::new(); - let serve = http.serve_connection(socket, - KeyServerHttpHandler { handler: shared_handler2.clone() } - ).map(|_| ()).map_err(|e| { - warn!("Key server handler error: {:?}", e); - }); + let server = listener + .incoming() + .map_err(|e| warn!("Key server listener error: {:?}", e)) + .for_each(move |socket| { + let http = Http::new(); + let serve = http + .serve_connection( + socket, + KeyServerHttpHandler { + handler: shared_handler2.clone(), + }, + ) + .map(|_| ()) + .map_err(|e| { + warn!("Key server handler error: {:?}", e); + }); - tokio::spawn(serve) - }); + tokio::spawn(serve) + }); - executor.spawn(server); + executor.spawn(server); - let listener = KeyServerHttpListener { - _executor: executor, - _handler: shared_handler, - }; + let listener = KeyServerHttpListener { + _executor: executor, + _handler: shared_handler, + }; - Ok(listener) - } + Ok(listener) + } } impl KeyServerHttpHandler { - fn process(self, req_method: HttpMethod, req_uri: Uri, path: &str, req_body: &[u8]) -> HttpResponse { - match parse_request(&req_method, &path, &req_body) { + fn process( + self, + req_method: HttpMethod, + req_uri: Uri, + path: &str, + req_body: &[u8], + ) -> HttpResponse { + match parse_request(&req_method, &path, &req_body) { Request::GenerateServerKey(document, signature, threshold) => { return_server_public_key(&req_uri, self.handler.key_server.upgrade() .map(|key_server| key_server.generate_key(&document, &signature.into(), threshold)) @@ -215,284 +240,341 @@ impl KeyServerHttpHandler { .expect("Nothing to parse, cannot fail; qed") }, } - } + } } impl Service for KeyServerHttpHandler { - type ReqBody = Body; - type ResBody = Body; - type Error = hyper::Error; - type Future = Box, Error=Self::Error> + Send>; + type ReqBody = Body; + type ResBody = Body; + type Error = hyper::Error; + type Future = Box, Error = Self::Error> + Send>; - fn call(&mut self, req: HttpRequest) -> Self::Future { - if req.headers().contains_key(header::ORIGIN) { - warn!(target: "secretstore", "Ignoring {}-request {} with Origin header", req.method(), req.uri()); - return Box::new(future::ok(HttpResponse::builder() - .status(HttpStatusCode::NOT_FOUND) - .body(Body::empty()) - .expect("Nothing to parse, cannot fail; qed"))) - } + fn call(&mut self, req: HttpRequest) -> Self::Future { + if req.headers().contains_key(header::ORIGIN) { + warn!(target: "secretstore", "Ignoring {}-request {} with Origin header", req.method(), req.uri()); + return Box::new(future::ok( + HttpResponse::builder() + .status(HttpStatusCode::NOT_FOUND) + .body(Body::empty()) + .expect("Nothing to parse, cannot fail; qed"), + )); + } - let req_method = req.method().clone(); - let req_uri = req.uri().clone(); - // We cannot consume Self because of the Service trait requirement. - let this = self.clone(); + let req_method = req.method().clone(); + let req_uri = req.uri().clone(); + // We cannot consume Self because of the Service trait requirement. + let this = self.clone(); - Box::new(req.into_body().concat2().map(move |body| { - let path = req_uri.path().to_string(); - if path.starts_with("/") { - this.process(req_method, req_uri, &path, &body) - } else { - warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri); - HttpResponse::builder() - .status(HttpStatusCode::NOT_FOUND) - .body(Body::empty()) - .expect("Nothing to parse, cannot fail; qed") - } - })) - } + Box::new(req.into_body().concat2().map(move |body| { + let path = req_uri.path().to_string(); + if path.starts_with("/") { + this.process(req_method, req_uri, &path, &body) + } else { + warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri); + HttpResponse::builder() + .status(HttpStatusCode::NOT_FOUND) + .body(Body::empty()) + .expect("Nothing to parse, cannot fail; qed") + } + })) + } } fn return_empty(req_uri: &Uri, empty: Result<(), Error>) -> HttpResponse { - return_bytes::(req_uri, empty.map(|_| None)) + return_bytes::(req_uri, empty.map(|_| None)) } -fn return_server_public_key(req_uri: &Uri, server_public: Result) -> HttpResponse { - return_bytes(req_uri, server_public.map(|k| Some(SerializablePublic(k)))) +fn return_server_public_key( + req_uri: &Uri, + server_public: Result, +) -> HttpResponse { + return_bytes(req_uri, server_public.map(|k| Some(SerializablePublic(k)))) } -fn return_message_signature(req_uri: &Uri, signature: Result) -> HttpResponse { - return_bytes(req_uri, signature.map(|s| Some(SerializableBytes(s)))) +fn return_message_signature( + req_uri: &Uri, + signature: Result, +) -> HttpResponse { + return_bytes(req_uri, signature.map(|s| Some(SerializableBytes(s)))) } -fn return_document_key(req_uri: &Uri, document_key: Result) -> HttpResponse { - return_bytes(req_uri, document_key.map(|k| Some(SerializableBytes(k)))) +fn return_document_key( + req_uri: &Uri, + document_key: Result, +) -> HttpResponse { + return_bytes(req_uri, document_key.map(|k| Some(SerializableBytes(k)))) } -fn return_document_key_shadow(req_uri: &Uri, document_key_shadow: Result) - -> HttpResponse -{ - return_bytes(req_uri, document_key_shadow.map(|k| Some(SerializableEncryptedDocumentKeyShadow { - decrypted_secret: k.decrypted_secret.into(), - common_point: k.common_point.expect("always filled when requesting document_key_shadow; qed").into(), - decrypt_shadows: k.decrypt_shadows.expect("always filled when requesting document_key_shadow; qed").into_iter().map(Into::into).collect(), - }))) +fn return_document_key_shadow( + req_uri: &Uri, + document_key_shadow: Result, +) -> HttpResponse { + return_bytes( + req_uri, + document_key_shadow.map(|k| { + Some(SerializableEncryptedDocumentKeyShadow { + decrypted_secret: k.decrypted_secret.into(), + common_point: k + .common_point + .expect("always filled when requesting document_key_shadow; qed") + .into(), + decrypt_shadows: k + .decrypt_shadows + .expect("always filled when requesting document_key_shadow; qed") + .into_iter() + .map(Into::into) + .collect(), + }) + }), + ) } -fn return_bytes(req_uri: &Uri, result: Result, Error>) -> HttpResponse { - match result { - Ok(Some(result)) => match serde_json::to_vec(&result) { - Ok(result) => { - let body: Body = result.into(); - HttpResponse::builder() - .header(header::CONTENT_TYPE, HeaderValue::from_static("application/json; charset=utf-8")) - .body(body) - .expect("Error creating http response") - }, - Err(err) => { - warn!(target: "secretstore", "response to request {} has failed with: {}", req_uri, err); - HttpResponse::builder() - .status(HttpStatusCode::INTERNAL_SERVER_ERROR) - .body(Body::empty()) - .expect("Nothing to parse, cannot fail; qed") - } - }, - Ok(None) => { - HttpResponse::builder() - .status(HttpStatusCode::OK) - .body(Body::empty()) - .expect("Nothing to parse, cannot fail; qed") - }, - Err(err) => return_error(err), - } +fn return_bytes( + req_uri: &Uri, + result: Result, Error>, +) -> HttpResponse { + match result { + Ok(Some(result)) => match serde_json::to_vec(&result) { + Ok(result) => { + let body: Body = result.into(); + HttpResponse::builder() + .header( + header::CONTENT_TYPE, + HeaderValue::from_static("application/json; charset=utf-8"), + ) + .body(body) + .expect("Error creating http response") + } + Err(err) => { + warn!(target: "secretstore", "response to request {} has failed with: {}", req_uri, err); + HttpResponse::builder() + .status(HttpStatusCode::INTERNAL_SERVER_ERROR) + .body(Body::empty()) + .expect("Nothing to parse, cannot fail; qed") + } + }, + Ok(None) => HttpResponse::builder() + .status(HttpStatusCode::OK) + .body(Body::empty()) + .expect("Nothing to parse, cannot fail; qed"), + Err(err) => return_error(err), + } } fn return_error(err: Error) -> HttpResponse { - let status = match err { - | Error::AccessDenied - | Error::ConsensusUnreachable - | Error::ConsensusTemporaryUnreachable => - HttpStatusCode::FORBIDDEN, - | Error::ServerKeyIsNotFound - | Error::DocumentKeyIsNotFound => - HttpStatusCode::NOT_FOUND, - | Error::InsufficientRequesterData(_) - | Error::Hyper(_) - | Error::Serde(_) - | Error::DocumentKeyAlreadyStored - | Error::ServerKeyAlreadyGenerated => - HttpStatusCode::BAD_REQUEST, - _ => HttpStatusCode::INTERNAL_SERVER_ERROR, - }; + let status = match err { + Error::AccessDenied + | Error::ConsensusUnreachable + | Error::ConsensusTemporaryUnreachable => HttpStatusCode::FORBIDDEN, + Error::ServerKeyIsNotFound | Error::DocumentKeyIsNotFound => HttpStatusCode::NOT_FOUND, + Error::InsufficientRequesterData(_) + | Error::Hyper(_) + | Error::Serde(_) + | Error::DocumentKeyAlreadyStored + | Error::ServerKeyAlreadyGenerated => HttpStatusCode::BAD_REQUEST, + _ => HttpStatusCode::INTERNAL_SERVER_ERROR, + }; - let mut res = HttpResponse::builder(); - res.status(status); + let mut res = HttpResponse::builder(); + res.status(status); - // return error text. ignore errors when returning error - let error_text = format!("\"{}\"", err); - if let Ok(error_text) = serde_json::to_vec(&error_text) { - res.header(header::CONTENT_TYPE, HeaderValue::from_static("application/json; charset=utf-8")); - res.body(error_text.into()) - .expect("`error_text` is a formatted string, parsing cannot fail; qed") - } else { - res.body(Body::empty()) - .expect("Nothing to parse, cannot fail; qed") - } + // return error text. ignore errors when returning error + let error_text = format!("\"{}\"", err); + if let Ok(error_text) = serde_json::to_vec(&error_text) { + res.header( + header::CONTENT_TYPE, + HeaderValue::from_static("application/json; charset=utf-8"), + ); + res.body(error_text.into()) + .expect("`error_text` is a formatted string, parsing cannot fail; qed") + } else { + res.body(Body::empty()) + .expect("Nothing to parse, cannot fail; qed") + } } fn parse_request(method: &HttpMethod, uri_path: &str, body: &[u8]) -> Request { - let uri_path = match percent_decode(uri_path.as_bytes()).decode_utf8() { - Ok(path) => path, - Err(_) => return Request::Invalid, - }; + let uri_path = match percent_decode(uri_path.as_bytes()).decode_utf8() { + Ok(path) => path, + Err(_) => return Request::Invalid, + }; - let path: Vec = uri_path.trim_start_matches('/').split('/').map(Into::into).collect(); - if path.len() == 0 { - return Request::Invalid; - } + let path: Vec = uri_path + .trim_start_matches('/') + .split('/') + .map(Into::into) + .collect(); + if path.len() == 0 { + return Request::Invalid; + } - if path[0] == "admin" { - return parse_admin_request(method, path, body); - } + if path[0] == "admin" { + return parse_admin_request(method, path, body); + } - let is_known_prefix = &path[0] == "shadow" || &path[0] == "schnorr" || &path[0] == "ecdsa" || &path[0] == "server"; - let (prefix, args_offset) = if is_known_prefix { (&*path[0], 1) } else { ("", 0) }; - let args_count = path.len() - args_offset; - if args_count < 2 || path[args_offset].is_empty() || path[args_offset + 1].is_empty() { - return Request::Invalid; - } + let is_known_prefix = &path[0] == "shadow" + || &path[0] == "schnorr" + || &path[0] == "ecdsa" + || &path[0] == "server"; + let (prefix, args_offset) = if is_known_prefix { + (&*path[0], 1) + } else { + ("", 0) + }; + let args_count = path.len() - args_offset; + if args_count < 2 || path[args_offset].is_empty() || path[args_offset + 1].is_empty() { + return Request::Invalid; + } - let document = match path[args_offset].parse() { - Ok(document) => document, - _ => return Request::Invalid, - }; - let signature = match path[args_offset + 1].parse() { - Ok(signature) => signature, - _ => return Request::Invalid, - }; + let document = match path[args_offset].parse() { + Ok(document) => document, + _ => return Request::Invalid, + }; + let signature = match path[args_offset + 1].parse() { + Ok(signature) => signature, + _ => return Request::Invalid, + }; - let threshold = path.get(args_offset + 2).map(|v| v.parse()); - let message_hash = path.get(args_offset + 2).map(|v| v.parse()); - let common_point = path.get(args_offset + 2).map(|v| v.parse()); - let encrypted_key = path.get(args_offset + 3).map(|v| v.parse()); - match (prefix, args_count, method, threshold, message_hash, common_point, encrypted_key) { - ("shadow", 3, &HttpMethod::POST, Some(Ok(threshold)), _, _, _) => - Request::GenerateServerKey(document, signature, threshold), - ("shadow", 4, &HttpMethod::POST, _, _, Some(Ok(common_point)), Some(Ok(encrypted_key))) => - Request::StoreDocumentKey(document, signature, common_point, encrypted_key), - ("", 3, &HttpMethod::POST, Some(Ok(threshold)), _, _, _) => - Request::GenerateDocumentKey(document, signature, threshold), - ("server", 2, &HttpMethod::GET, _, _, _, _) => - Request::GetServerKey(document, signature), - ("", 2, &HttpMethod::GET, _, _, _, _) => - Request::GetDocumentKey(document, signature), - ("shadow", 2, &HttpMethod::GET, _, _, _, _) => - Request::GetDocumentKeyShadow(document, signature), - ("schnorr", 3, &HttpMethod::GET, _, Some(Ok(message_hash)), _, _) => - Request::SchnorrSignMessage(document, signature, message_hash), - ("ecdsa", 3, &HttpMethod::GET, _, Some(Ok(message_hash)), _, _) => - Request::EcdsaSignMessage(document, signature, message_hash), - _ => Request::Invalid, - } + let threshold = path.get(args_offset + 2).map(|v| v.parse()); + let message_hash = path.get(args_offset + 2).map(|v| v.parse()); + let common_point = path.get(args_offset + 2).map(|v| v.parse()); + let encrypted_key = path.get(args_offset + 3).map(|v| v.parse()); + match ( + prefix, + args_count, + method, + threshold, + message_hash, + common_point, + encrypted_key, + ) { + ("shadow", 3, &HttpMethod::POST, Some(Ok(threshold)), _, _, _) => { + Request::GenerateServerKey(document, signature, threshold) + } + ("shadow", 4, &HttpMethod::POST, _, _, Some(Ok(common_point)), Some(Ok(encrypted_key))) => { + Request::StoreDocumentKey(document, signature, common_point, encrypted_key) + } + ("", 3, &HttpMethod::POST, Some(Ok(threshold)), _, _, _) => { + Request::GenerateDocumentKey(document, signature, threshold) + } + ("server", 2, &HttpMethod::GET, _, _, _, _) => Request::GetServerKey(document, signature), + ("", 2, &HttpMethod::GET, _, _, _, _) => Request::GetDocumentKey(document, signature), + ("shadow", 2, &HttpMethod::GET, _, _, _, _) => { + Request::GetDocumentKeyShadow(document, signature) + } + ("schnorr", 3, &HttpMethod::GET, _, Some(Ok(message_hash)), _, _) => { + Request::SchnorrSignMessage(document, signature, message_hash) + } + ("ecdsa", 3, &HttpMethod::GET, _, Some(Ok(message_hash)), _, _) => { + Request::EcdsaSignMessage(document, signature, message_hash) + } + _ => Request::Invalid, + } } fn parse_admin_request(method: &HttpMethod, path: Vec, body: &[u8]) -> Request { - let args_count = path.len(); - if *method != HttpMethod::POST || args_count != 4 || path[1] != "servers_set_change" { - return Request::Invalid; - } + let args_count = path.len(); + if *method != HttpMethod::POST || args_count != 4 || path[1] != "servers_set_change" { + return Request::Invalid; + } - let old_set_signature = match path[2].parse() { - Ok(signature) => signature, - _ => return Request::Invalid, - }; + let old_set_signature = match path[2].parse() { + Ok(signature) => signature, + _ => return Request::Invalid, + }; - let new_set_signature = match path[3].parse() { - Ok(signature) => signature, - _ => return Request::Invalid, - }; + let new_set_signature = match path[3].parse() { + Ok(signature) => signature, + _ => return Request::Invalid, + }; - let new_servers_set: BTreeSet = match serde_json::from_slice(body) { - Ok(new_servers_set) => new_servers_set, - _ => return Request::Invalid, - }; + let new_servers_set: BTreeSet = match serde_json::from_slice(body) { + Ok(new_servers_set) => new_servers_set, + _ => return Request::Invalid, + }; - Request::ChangeServersSet(old_set_signature, new_set_signature, - new_servers_set.into_iter().map(Into::into).collect()) + Request::ChangeServersSet( + old_set_signature, + new_set_signature, + new_servers_set.into_iter().map(Into::into).collect(), + ) } #[cfg(test)] mod tests { - use std::sync::Arc; - use std::str::FromStr; + use std::{str::FromStr, sync::Arc}; - use hyper::Method as HttpMethod; - use ethkey::Public; - use ethereum_types::H256; - use traits::KeyServer; - use key_server::tests::DummyKeyServer; - use types::NodeAddress; - use parity_runtime::Runtime; - use super::{parse_request, Request, KeyServerHttpListener}; + use super::{parse_request, KeyServerHttpListener, Request}; + use ethereum_types::H256; + use ethkey::Public; + use hyper::Method as HttpMethod; + use key_server::tests::DummyKeyServer; + use parity_runtime::Runtime; + use traits::KeyServer; + use types::NodeAddress; - #[test] - fn http_listener_successfully_drops() { - let key_server: Arc = Arc::new(DummyKeyServer::default()); - let address = NodeAddress { address: "127.0.0.1".into(), port: 9000 }; - let runtime = Runtime::with_thread_count(1); - let listener = KeyServerHttpListener::start(address, Arc::downgrade(&key_server), - runtime.executor()).unwrap(); - drop(listener); - } + #[test] + fn http_listener_successfully_drops() { + let key_server: Arc = Arc::new(DummyKeyServer::default()); + let address = NodeAddress { + address: "127.0.0.1".into(), + port: 9000, + }; + let runtime = Runtime::with_thread_count(1); + let listener = + KeyServerHttpListener::start(address, Arc::downgrade(&key_server), runtime.executor()) + .unwrap(); + drop(listener); + } - #[test] - fn parse_request_successful() { - // POST /shadow/{server_key_id}/{signature}/{threshold} => generate server key - assert_eq!(parse_request(&HttpMethod::POST, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/2", Default::default()), + #[test] + fn parse_request_successful() { + // POST /shadow/{server_key_id}/{signature}/{threshold} => generate server key + assert_eq!(parse_request(&HttpMethod::POST, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/2", Default::default()), Request::GenerateServerKey("0000000000000000000000000000000000000000000000000000000000000001".into(), "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(), 2)); - // POST /shadow/{server_key_id}/{signature}/{common_point}/{encrypted_key} => store encrypted document key - assert_eq!(parse_request(&HttpMethod::POST, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8/1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb", Default::default()), + // POST /shadow/{server_key_id}/{signature}/{common_point}/{encrypted_key} => store encrypted document key + assert_eq!(parse_request(&HttpMethod::POST, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8/1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb", Default::default()), Request::StoreDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(), "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(), "b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".parse().unwrap(), "1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb".parse().unwrap())); - // POST /{server_key_id}/{signature}/{threshold} => generate server && document key - assert_eq!(parse_request(&HttpMethod::POST, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/2", Default::default()), + // POST /{server_key_id}/{signature}/{threshold} => generate server && document key + assert_eq!(parse_request(&HttpMethod::POST, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/2", Default::default()), Request::GenerateDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(), "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(), 2)); - // GET /server/{server_key_id}/{signature} => get public portion of server key - assert_eq!(parse_request(&HttpMethod::GET, "/server/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()), + // GET /server/{server_key_id}/{signature} => get public portion of server key + assert_eq!(parse_request(&HttpMethod::GET, "/server/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()), Request::GetServerKey(H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(), "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap())); - // GET /{server_key_id}/{signature} => get document key - assert_eq!(parse_request(&HttpMethod::GET, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()), + // GET /{server_key_id}/{signature} => get document key + assert_eq!(parse_request(&HttpMethod::GET, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()), Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(), "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap())); - assert_eq!(parse_request(&HttpMethod::GET, "/%30000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()), + assert_eq!(parse_request(&HttpMethod::GET, "/%30000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()), Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(), "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap())); - // GET /shadow/{server_key_id}/{signature} => get document key shadow - assert_eq!(parse_request(&HttpMethod::GET, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()), + // GET /shadow/{server_key_id}/{signature} => get document key shadow + assert_eq!(parse_request(&HttpMethod::GET, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()), Request::GetDocumentKeyShadow("0000000000000000000000000000000000000000000000000000000000000001".into(), "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap())); - // GET /schnorr/{server_key_id}/{signature}/{message_hash} => schnorr-sign message with server key - assert_eq!(parse_request(&HttpMethod::GET, "/schnorr/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c", Default::default()), + // GET /schnorr/{server_key_id}/{signature}/{message_hash} => schnorr-sign message with server key + assert_eq!(parse_request(&HttpMethod::GET, "/schnorr/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c", Default::default()), Request::SchnorrSignMessage("0000000000000000000000000000000000000000000000000000000000000001".into(), "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(), "281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse().unwrap())); - // GET /ecdsa/{server_key_id}/{signature}/{message_hash} => ecdsa-sign message with server key - assert_eq!(parse_request(&HttpMethod::GET, "/ecdsa/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c", Default::default()), + // GET /ecdsa/{server_key_id}/{signature}/{message_hash} => ecdsa-sign message with server key + assert_eq!(parse_request(&HttpMethod::GET, "/ecdsa/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c", Default::default()), Request::EcdsaSignMessage("0000000000000000000000000000000000000000000000000000000000000001".into(), "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(), "281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse().unwrap())); - // POST /admin/servers_set_change/{old_set_signature}/{new_set_signature} + body - let node1: Public = "843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91".parse().unwrap(); - let node2: Public = "07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3".parse().unwrap(); - let nodes = vec![node1, node2].into_iter().collect(); - assert_eq!(parse_request(&HttpMethod::POST, "/admin/servers_set_change/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/b199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", + // POST /admin/servers_set_change/{old_set_signature}/{new_set_signature} + body + let node1: Public = "843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91".parse().unwrap(); + let node2: Public = "07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3".parse().unwrap(); + let nodes = vec![node1, node2].into_iter().collect(); + assert_eq!(parse_request(&HttpMethod::POST, "/admin/servers_set_change/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/b199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", &r#"["0x843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91", "0x07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3"]"#.as_bytes()), Request::ChangeServersSet( @@ -500,24 +582,53 @@ mod tests { "b199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(), nodes, )); - } + } - #[test] - fn parse_request_failed() { - assert_eq!(parse_request(&HttpMethod::GET, "", Default::default()), Request::Invalid); - assert_eq!(parse_request(&HttpMethod::GET, "/shadow", Default::default()), Request::Invalid); - assert_eq!(parse_request(&HttpMethod::GET, "///2", Default::default()), Request::Invalid); - assert_eq!(parse_request(&HttpMethod::GET, "/shadow///2", Default::default()), Request::Invalid); - assert_eq!(parse_request(&HttpMethod::GET, "/0000000000000000000000000000000000000000000000000000000000000001", Default::default()), Request::Invalid); - assert_eq!(parse_request(&HttpMethod::GET, "/0000000000000000000000000000000000000000000000000000000000000001/", Default::default()), Request::Invalid); - assert_eq!(parse_request(&HttpMethod::GET, "/a/b", Default::default()), Request::Invalid); - assert_eq!(parse_request(&HttpMethod::GET, "/schnorr/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002/0000000000000000000000000000000000000000000000000000000000000002", Default::default()), Request::Invalid); - assert_eq!(parse_request(&HttpMethod::GET, "/ecdsa/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002/0000000000000000000000000000000000000000000000000000000000000002", Default::default()), Request::Invalid); - assert_eq!(parse_request(&HttpMethod::POST, "/admin/servers_set_change/xxx/yyy", + #[test] + fn parse_request_failed() { + assert_eq!( + parse_request(&HttpMethod::GET, "", Default::default()), + Request::Invalid + ); + assert_eq!( + parse_request(&HttpMethod::GET, "/shadow", Default::default()), + Request::Invalid + ); + assert_eq!( + parse_request(&HttpMethod::GET, "///2", Default::default()), + Request::Invalid + ); + assert_eq!( + parse_request(&HttpMethod::GET, "/shadow///2", Default::default()), + Request::Invalid + ); + assert_eq!( + parse_request( + &HttpMethod::GET, + "/0000000000000000000000000000000000000000000000000000000000000001", + Default::default() + ), + Request::Invalid + ); + assert_eq!( + parse_request( + &HttpMethod::GET, + "/0000000000000000000000000000000000000000000000000000000000000001/", + Default::default() + ), + Request::Invalid + ); + assert_eq!( + parse_request(&HttpMethod::GET, "/a/b", Default::default()), + Request::Invalid + ); + assert_eq!(parse_request(&HttpMethod::GET, "/schnorr/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002/0000000000000000000000000000000000000000000000000000000000000002", Default::default()), Request::Invalid); + assert_eq!(parse_request(&HttpMethod::GET, "/ecdsa/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002/0000000000000000000000000000000000000000000000000000000000000002", Default::default()), Request::Invalid); + assert_eq!(parse_request(&HttpMethod::POST, "/admin/servers_set_change/xxx/yyy", &r#"["0x843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91", "0x07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3"]"#.as_bytes()), Request::Invalid); - assert_eq!(parse_request(&HttpMethod::POST, "/admin/servers_set_change/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", "".as_bytes()), + assert_eq!(parse_request(&HttpMethod::POST, "/admin/servers_set_change/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", "".as_bytes()), Request::Invalid); - } + } } diff --git a/secret-store/src/listener/mod.rs b/secret-store/src/listener/mod.rs index 0fde173c8..2223aec91 100644 --- a/secret-store/src/listener/mod.rs +++ b/secret-store/src/listener/mod.rs @@ -20,97 +20,153 @@ pub mod service_contract_aggregate; pub mod service_contract_listener; mod tasks_queue; -use std::collections::BTreeSet; -use std::sync::Arc; -use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, AdminSessionsServer, KeyServer}; -use types::{Error, Public, MessageHash, EncryptedMessageSignature, RequestSignature, ServerKeyId, - EncryptedDocumentKey, EncryptedDocumentKeyShadow, NodeId, Requester}; +use std::{collections::BTreeSet, sync::Arc}; +use traits::{ + AdminSessionsServer, DocumentKeyServer, KeyServer, MessageSigner, ServerKeyGenerator, +}; +use types::{ + EncryptedDocumentKey, EncryptedDocumentKeyShadow, EncryptedMessageSignature, Error, + MessageHash, NodeId, Public, RequestSignature, Requester, ServerKeyId, +}; /// Available API mask. #[derive(Debug, Default)] pub struct ApiMask { - /// Accept server key generation requests. - pub server_key_generation_requests: bool, - /// Accept server key retrieval requests. - pub server_key_retrieval_requests: bool, - /// Accept document key store requests. - pub document_key_store_requests: bool, - /// Accept document key shadow retrieval requests. - pub document_key_shadow_retrieval_requests: bool, + /// Accept server key generation requests. + pub server_key_generation_requests: bool, + /// Accept server key retrieval requests. + pub server_key_retrieval_requests: bool, + /// Accept document key store requests. + pub document_key_store_requests: bool, + /// Accept document key shadow retrieval requests. + pub document_key_shadow_retrieval_requests: bool, } /// Combined HTTP + service contract listener. pub struct Listener { - key_server: Arc, - _http: Option, - _contract: Option>, + key_server: Arc, + _http: Option, + _contract: Option>, } impl ApiMask { - /// Create mask that accepts all requests. - pub fn all() -> Self { - ApiMask { - server_key_generation_requests: true, - server_key_retrieval_requests: true, - document_key_store_requests: true, - document_key_shadow_retrieval_requests: true, - } - } + /// Create mask that accepts all requests. + pub fn all() -> Self { + ApiMask { + server_key_generation_requests: true, + server_key_retrieval_requests: true, + document_key_store_requests: true, + document_key_shadow_retrieval_requests: true, + } + } } impl Listener { - /// Create new listener. - pub fn new(key_server: Arc, http: Option, contract: Option>) -> Self { - Self { - key_server: key_server, - _http: http, - _contract: contract, - } - } + /// Create new listener. + pub fn new( + key_server: Arc, + http: Option, + contract: Option>, + ) -> Self { + Self { + key_server: key_server, + _http: http, + _contract: contract, + } + } } impl KeyServer for Listener {} impl ServerKeyGenerator for Listener { - fn generate_key(&self, key_id: &ServerKeyId, author: &Requester, threshold: usize) -> Result { - self.key_server.generate_key(key_id, author, threshold) - } + fn generate_key( + &self, + key_id: &ServerKeyId, + author: &Requester, + threshold: usize, + ) -> Result { + self.key_server.generate_key(key_id, author, threshold) + } - fn restore_key_public(&self, key_id: &ServerKeyId, author: &Requester) -> Result { - self.key_server.restore_key_public(key_id, author) - } + fn restore_key_public( + &self, + key_id: &ServerKeyId, + author: &Requester, + ) -> Result { + self.key_server.restore_key_public(key_id, author) + } } impl DocumentKeyServer for Listener { - fn store_document_key(&self, key_id: &ServerKeyId, author: &Requester, common_point: Public, encrypted_document_key: Public) -> Result<(), Error> { - self.key_server.store_document_key(key_id, author, common_point, encrypted_document_key) - } + fn store_document_key( + &self, + key_id: &ServerKeyId, + author: &Requester, + common_point: Public, + encrypted_document_key: Public, + ) -> Result<(), Error> { + self.key_server + .store_document_key(key_id, author, common_point, encrypted_document_key) + } - fn generate_document_key(&self, key_id: &ServerKeyId, author: &Requester, threshold: usize) -> Result { - self.key_server.generate_document_key(key_id, author, threshold) - } + fn generate_document_key( + &self, + key_id: &ServerKeyId, + author: &Requester, + threshold: usize, + ) -> Result { + self.key_server + .generate_document_key(key_id, author, threshold) + } - fn restore_document_key(&self, key_id: &ServerKeyId, requester: &Requester) -> Result { - self.key_server.restore_document_key(key_id, requester) - } + fn restore_document_key( + &self, + key_id: &ServerKeyId, + requester: &Requester, + ) -> Result { + self.key_server.restore_document_key(key_id, requester) + } - fn restore_document_key_shadow(&self, key_id: &ServerKeyId, requester: &Requester) -> Result { - self.key_server.restore_document_key_shadow(key_id, requester) - } + fn restore_document_key_shadow( + &self, + key_id: &ServerKeyId, + requester: &Requester, + ) -> Result { + self.key_server + .restore_document_key_shadow(key_id, requester) + } } impl MessageSigner for Listener { - fn sign_message_schnorr(&self, key_id: &ServerKeyId, requester: &Requester, message: MessageHash) -> Result { - self.key_server.sign_message_schnorr(key_id, requester, message) - } + fn sign_message_schnorr( + &self, + key_id: &ServerKeyId, + requester: &Requester, + message: MessageHash, + ) -> Result { + self.key_server + .sign_message_schnorr(key_id, requester, message) + } - fn sign_message_ecdsa(&self, key_id: &ServerKeyId, requester: &Requester, message: MessageHash) -> Result { - self.key_server.sign_message_ecdsa(key_id, requester, message) - } + fn sign_message_ecdsa( + &self, + key_id: &ServerKeyId, + requester: &Requester, + message: MessageHash, + ) -> Result { + self.key_server + .sign_message_ecdsa(key_id, requester, message) + } } impl AdminSessionsServer for Listener { - fn change_servers_set(&self, old_set_signature: RequestSignature, new_set_signature: RequestSignature, new_servers_set: BTreeSet) -> Result<(), Error> { - self.key_server.change_servers_set(old_set_signature, new_set_signature, new_servers_set) - } + fn change_servers_set( + &self, + old_set_signature: RequestSignature, + new_set_signature: RequestSignature, + new_servers_set: BTreeSet, + ) -> Result<(), Error> { + self.key_server + .change_servers_set(old_set_signature, new_set_signature, new_servers_set) + } } diff --git a/secret-store/src/listener/service_contract.rs b/secret-store/src/listener/service_contract.rs index 795e75d2c..bc9fc181d 100644 --- a/secret-store/src/listener/service_contract.rs +++ b/secret-store/src/listener/service_contract.rs @@ -14,22 +14,22 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; -use parking_lot::RwLock; -use common_types::filter::Filter; -use ethabi::RawLog; -use ethabi::FunctionOutputDecoder; -use call_contract::CallContract; -use ethcore::client::{Client, BlockChainClient, BlockId}; -use ethkey::{Public, public_to_address}; -use hash::keccak; use bytes::Bytes; -use ethereum_types::{H256, U256, Address}; -use listener::ApiMask; -use listener::service_contract_listener::ServiceTask; -use trusted_client::TrustedClient; +use call_contract::CallContract; +use common_types::filter::Filter; +use ethabi::{FunctionOutputDecoder, RawLog}; +use ethcore::client::{BlockChainClient, BlockId, Client}; +use ethereum_types::{Address, H256, U256}; +use ethkey::{public_to_address, Public}; +use hash::keccak; use helpers::{get_confirmed_block_hash, REQUEST_CONFIRMATIONS_REQUIRED}; -use {ServerKeyId, NodeKeyPair, ContractAddress}; +use listener::{service_contract_listener::ServiceTask, ApiMask}; +use parking_lot::RwLock; +use std::sync::Arc; +use trusted_client::TrustedClient; +use ContractAddress; +use NodeKeyPair; +use ServerKeyId; use_contract!(service, "res/service.json"); @@ -38,91 +38,151 @@ pub const SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service"; /// Name of the server key generation SecretStore contract in the registry. pub const SRV_KEY_GEN_SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service_srv_gen"; /// Name of the server key retrieval SecretStore contract in the registry. -pub const SRV_KEY_RETR_SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service_srv_retr"; +pub const SRV_KEY_RETR_SERVICE_CONTRACT_REGISTRY_NAME: &'static str = + "secretstore_service_srv_retr"; /// Name of the document key store SecretStore contract in the registry. -pub const DOC_KEY_STORE_SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service_doc_store"; +pub const DOC_KEY_STORE_SERVICE_CONTRACT_REGISTRY_NAME: &'static str = + "secretstore_service_doc_store"; /// Name of the document key retrieval SecretStore contract in the registry. -pub const DOC_KEY_SRETR_SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service_doc_sretr"; +pub const DOC_KEY_SRETR_SERVICE_CONTRACT_REGISTRY_NAME: &'static str = + "secretstore_service_doc_sretr"; /// Server key generation has been requested. -const SERVER_KEY_GENERATION_REQUESTED_EVENT_NAME: &'static [u8] = &*b"ServerKeyGenerationRequested(bytes32,address,uint8)"; +const SERVER_KEY_GENERATION_REQUESTED_EVENT_NAME: &'static [u8] = + &*b"ServerKeyGenerationRequested(bytes32,address,uint8)"; /// Server key retrieval has been requested. -const SERVER_KEY_RETRIEVAL_REQUESTED_EVENT_NAME: &'static [u8] = &*b"ServerKeyRetrievalRequested(bytes32)"; +const SERVER_KEY_RETRIEVAL_REQUESTED_EVENT_NAME: &'static [u8] = + &*b"ServerKeyRetrievalRequested(bytes32)"; /// Document key store has been requested. -const DOCUMENT_KEY_STORE_REQUESTED_EVENT_NAME: &'static [u8] = &*b"DocumentKeyStoreRequested(bytes32,address,bytes,bytes)"; +const DOCUMENT_KEY_STORE_REQUESTED_EVENT_NAME: &'static [u8] = + &*b"DocumentKeyStoreRequested(bytes32,address,bytes,bytes)"; /// Document key common part retrieval has been requested. -const DOCUMENT_KEY_COMMON_PART_RETRIEVAL_REQUESTED_EVENT_NAME: &'static [u8] = &*b"DocumentKeyCommonRetrievalRequested(bytes32,address)"; +const DOCUMENT_KEY_COMMON_PART_RETRIEVAL_REQUESTED_EVENT_NAME: &'static [u8] = + &*b"DocumentKeyCommonRetrievalRequested(bytes32,address)"; /// Document key personal part retrieval has been requested. -const DOCUMENT_KEY_PERSONAL_PART_RETRIEVAL_REQUESTED_EVENT_NAME: &'static [u8] = &*b"DocumentKeyPersonalRetrievalRequested(bytes32,bytes)"; +const DOCUMENT_KEY_PERSONAL_PART_RETRIEVAL_REQUESTED_EVENT_NAME: &'static [u8] = + &*b"DocumentKeyPersonalRetrievalRequested(bytes32,bytes)"; lazy_static! { - pub static ref SERVER_KEY_GENERATION_REQUESTED_EVENT_NAME_HASH: H256 = keccak(SERVER_KEY_GENERATION_REQUESTED_EVENT_NAME); - pub static ref SERVER_KEY_RETRIEVAL_REQUESTED_EVENT_NAME_HASH: H256 = keccak(SERVER_KEY_RETRIEVAL_REQUESTED_EVENT_NAME); - pub static ref DOCUMENT_KEY_STORE_REQUESTED_EVENT_NAME_HASH: H256 = keccak(DOCUMENT_KEY_STORE_REQUESTED_EVENT_NAME); - pub static ref DOCUMENT_KEY_COMMON_PART_RETRIEVAL_REQUESTED_EVENT_NAME_HASH: H256 = keccak(DOCUMENT_KEY_COMMON_PART_RETRIEVAL_REQUESTED_EVENT_NAME); - pub static ref DOCUMENT_KEY_PERSONAL_PART_RETRIEVAL_REQUESTED_EVENT_NAME_HASH: H256 = keccak(DOCUMENT_KEY_PERSONAL_PART_RETRIEVAL_REQUESTED_EVENT_NAME); + pub static ref SERVER_KEY_GENERATION_REQUESTED_EVENT_NAME_HASH: H256 = + keccak(SERVER_KEY_GENERATION_REQUESTED_EVENT_NAME); + pub static ref SERVER_KEY_RETRIEVAL_REQUESTED_EVENT_NAME_HASH: H256 = + keccak(SERVER_KEY_RETRIEVAL_REQUESTED_EVENT_NAME); + pub static ref DOCUMENT_KEY_STORE_REQUESTED_EVENT_NAME_HASH: H256 = + keccak(DOCUMENT_KEY_STORE_REQUESTED_EVENT_NAME); + pub static ref DOCUMENT_KEY_COMMON_PART_RETRIEVAL_REQUESTED_EVENT_NAME_HASH: H256 = + keccak(DOCUMENT_KEY_COMMON_PART_RETRIEVAL_REQUESTED_EVENT_NAME); + pub static ref DOCUMENT_KEY_PERSONAL_PART_RETRIEVAL_REQUESTED_EVENT_NAME_HASH: H256 = + keccak(DOCUMENT_KEY_PERSONAL_PART_RETRIEVAL_REQUESTED_EVENT_NAME); } /// Service contract trait. pub trait ServiceContract: Send + Sync { - /// Update contract when new blocks are enacted. Returns true if contract is installed && up-to-date (i.e. chain is synced). - fn update(&self) -> bool; - /// Read recent contract logs. Returns topics of every entry. - fn read_logs(&self) -> Box>; - /// Publish generated key. - fn read_pending_requests(&self) -> Box>; - /// Publish generated server key. - fn publish_generated_server_key(&self, origin: &Address, server_key_id: &ServerKeyId, server_key: Public) -> Result<(), String>; - /// Publish server key generation error. - fn publish_server_key_generation_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String>; - /// Publish retrieved server key. - fn publish_retrieved_server_key(&self, origin: &Address, server_key_id: &ServerKeyId, server_key: Public, threshold: usize) -> Result<(), String>; - /// Publish server key retrieval error. - fn publish_server_key_retrieval_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String>; - /// Publish stored document key. - fn publish_stored_document_key(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String>; - /// Publish document key store error. - fn publish_document_key_store_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String>; - /// Publish retrieved document key common. - fn publish_retrieved_document_key_common(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address, common_point: Public, threshold: usize) -> Result<(), String>; - /// Publish retrieved document key personal. - fn publish_retrieved_document_key_personal(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address, participants: &[Address], decrypted_secret: Public, shadow: Bytes) -> Result<(), String>; - /// Publish document key store error. - fn publish_document_key_retrieval_error(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address) -> Result<(), String>; + /// Update contract when new blocks are enacted. Returns true if contract is installed && up-to-date (i.e. chain is synced). + fn update(&self) -> bool; + /// Read recent contract logs. Returns topics of every entry. + fn read_logs(&self) -> Box>; + /// Publish generated key. + fn read_pending_requests(&self) -> Box>; + /// Publish generated server key. + fn publish_generated_server_key( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + server_key: Public, + ) -> Result<(), String>; + /// Publish server key generation error. + fn publish_server_key_generation_error( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + ) -> Result<(), String>; + /// Publish retrieved server key. + fn publish_retrieved_server_key( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + server_key: Public, + threshold: usize, + ) -> Result<(), String>; + /// Publish server key retrieval error. + fn publish_server_key_retrieval_error( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + ) -> Result<(), String>; + /// Publish stored document key. + fn publish_stored_document_key( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + ) -> Result<(), String>; + /// Publish document key store error. + fn publish_document_key_store_error( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + ) -> Result<(), String>; + /// Publish retrieved document key common. + fn publish_retrieved_document_key_common( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + requester: &Address, + common_point: Public, + threshold: usize, + ) -> Result<(), String>; + /// Publish retrieved document key personal. + fn publish_retrieved_document_key_personal( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + requester: &Address, + participants: &[Address], + decrypted_secret: Public, + shadow: Bytes, + ) -> Result<(), String>; + /// Publish document key store error. + fn publish_document_key_retrieval_error( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + requester: &Address, + ) -> Result<(), String>; } /// On-chain service contract. pub struct OnChainServiceContract { - /// Requests mask. - mask: ApiMask, - /// Blockchain client. - client: TrustedClient, - /// This node key pair. - self_key_pair: Arc, - /// Contract registry name (if any). - name: String, - /// Contract address source. - address_source: ContractAddress, - /// Contract. - data: RwLock, + /// Requests mask. + mask: ApiMask, + /// Blockchain client. + client: TrustedClient, + /// This node key pair. + self_key_pair: Arc, + /// Contract registry name (if any). + name: String, + /// Contract address source. + address_source: ContractAddress, + /// Contract. + data: RwLock, } /// On-chain service contract data. struct ServiceData { - /// Current contract address. - pub contract_address: Option
, - /// Last block we have read logs from. - pub last_log_block: Option, + /// Current contract address. + pub contract_address: Option
, + /// Last block we have read logs from. + pub last_log_block: Option, } /// Pending requests iterator. struct PendingRequestsIterator Option<(bool, ServiceTask)>> { - /// Pending request read function. - read_request: F, - /// Current request index. - index: U256, - /// Requests length. - length: U256, + /// Pending request read function. + read_request: F, + /// Current request index. + index: U256, + /// Requests length. + length: U256, } /// Server key generation related functions. @@ -135,158 +195,203 @@ struct DocumentKeyStoreService; struct DocumentKeyShadowRetrievalService; impl OnChainServiceContract { - /// Create new on-chain service contract. - pub fn new(mask: ApiMask, client: TrustedClient, name: String, address_source: ContractAddress, self_key_pair: Arc) -> Self { - let contract = OnChainServiceContract { - mask: mask, - client: client, - self_key_pair: self_key_pair, - name: name, - address_source: address_source, - data: RwLock::new(ServiceData { - contract_address: None, - last_log_block: None, - }), - }; + /// Create new on-chain service contract. + pub fn new( + mask: ApiMask, + client: TrustedClient, + name: String, + address_source: ContractAddress, + self_key_pair: Arc, + ) -> Self { + let contract = OnChainServiceContract { + mask: mask, + client: client, + self_key_pair: self_key_pair, + name: name, + address_source: address_source, + data: RwLock::new(ServiceData { + contract_address: None, + last_log_block: None, + }), + }; - contract.update_contract_address(); - contract - } + contract.update_contract_address(); + contract + } - /// Send transaction to the service contract. - fn send_contract_transaction(&self, tx_name: &str, origin: &Address, server_key_id: &ServerKeyId, is_response_required: C, prepare_tx: P) -> Result<(), String> - where C: FnOnce(&Client, &Address, &ServerKeyId, &Address) -> bool, - P: FnOnce(&Client, &Address) -> Result { - // only publish if contract address is set && client is online - let client = match self.client.get() { - Some(client) => client, - None => return Err("trusted client is required to publish key".into()), - }; + /// Send transaction to the service contract. + fn send_contract_transaction( + &self, + tx_name: &str, + origin: &Address, + server_key_id: &ServerKeyId, + is_response_required: C, + prepare_tx: P, + ) -> Result<(), String> + where + C: FnOnce(&Client, &Address, &ServerKeyId, &Address) -> bool, + P: FnOnce(&Client, &Address) -> Result, + { + // only publish if contract address is set && client is online + let client = match self.client.get() { + Some(client) => client, + None => return Err("trusted client is required to publish key".into()), + }; - // only publish key if contract waits for publication - // failing is ok here - it could be that enough confirmations have been recevied - // or key has been requested using HTTP API - let self_address = public_to_address(self.self_key_pair.public()); - if !is_response_required(&*client, origin, server_key_id, &self_address) { - return Ok(()); - } + // only publish key if contract waits for publication + // failing is ok here - it could be that enough confirmations have been recevied + // or key has been requested using HTTP API + let self_address = public_to_address(self.self_key_pair.public()); + if !is_response_required(&*client, origin, server_key_id, &self_address) { + return Ok(()); + } - // prepare transaction data - let transaction_data = prepare_tx(&*client, origin)?; + // prepare transaction data + let transaction_data = prepare_tx(&*client, origin)?; - // send transaction - self.client.transact_contract( - origin.clone(), - transaction_data - ).map_err(|e| format!("{}", e))?; + // send transaction + self.client + .transact_contract(origin.clone(), transaction_data) + .map_err(|e| format!("{}", e))?; - trace!(target: "secretstore", "{}: transaction {} sent to service contract", + trace!(target: "secretstore", "{}: transaction {} sent to service contract", self.self_key_pair.public(), tx_name); - Ok(()) - } + Ok(()) + } - /// Create task-specific pending requests iterator. - fn create_pending_requests_iterator< - C: 'static + Fn(&Client, &Address, &BlockId) -> Result, - R: 'static + Fn(&NodeKeyPair, &Client, &Address, &BlockId, U256) -> Result<(bool, ServiceTask), String> - >(&self, client: Arc, contract_address: &Address, block: &BlockId, get_count: C, read_item: R) -> Box> { - get_count(&*client, contract_address, block) - .map(|count| { - let client = client.clone(); - let self_key_pair = self.self_key_pair.clone(); - let contract_address = contract_address.clone(); - let block = block.clone(); - Box::new(PendingRequestsIterator { - read_request: move |index| read_item(&*self_key_pair, &*client, &contract_address, &block, index) + /// Create task-specific pending requests iterator. + fn create_pending_requests_iterator< + C: 'static + Fn(&Client, &Address, &BlockId) -> Result, + R: 'static + + Fn( + &NodeKeyPair, + &Client, + &Address, + &BlockId, + U256, + ) -> Result<(bool, ServiceTask), String>, + >( + &self, + client: Arc, + contract_address: &Address, + block: &BlockId, + get_count: C, + read_item: R, + ) -> Box> { + get_count(&*client, contract_address, block) + .map(|count| { + let client = client.clone(); + let self_key_pair = self.self_key_pair.clone(); + let contract_address = contract_address.clone(); + let block = block.clone(); + Box::new(PendingRequestsIterator { + read_request: move |index| { + read_item(&*self_key_pair, &*client, &contract_address, &block, index) .map_err(|error| { warn!(target: "secretstore", "{}: reading pending request failed: {}", self_key_pair.public(), error); error }) - .ok(), - index: 0.into(), - length: count, - }) as Box> - }) - .map_err(|error| { - warn!(target: "secretstore", "{}: creating pending requests iterator failed: {}", + .ok() + }, + index: 0.into(), + length: count, + }) as Box> + }) + .map_err(|error| { + warn!(target: "secretstore", "{}: creating pending requests iterator failed: {}", self.self_key_pair.public(), error); - error - }) - .ok() - .unwrap_or_else(|| Box::new(::std::iter::empty())) - } + error + }) + .ok() + .unwrap_or_else(|| Box::new(::std::iter::empty())) + } - /// Update service contract address. - fn update_contract_address(&self) -> bool { - let contract_address = self.client.read_contract_address(self.name.clone(), &self.address_source); - let mut data = self.data.write(); - if contract_address != data.contract_address { - trace!(target: "secretstore", "{}: installing {} service contract from address {:?}", + /// Update service contract address. + fn update_contract_address(&self) -> bool { + let contract_address = self + .client + .read_contract_address(self.name.clone(), &self.address_source); + let mut data = self.data.write(); + if contract_address != data.contract_address { + trace!(target: "secretstore", "{}: installing {} service contract from address {:?}", self.self_key_pair.public(), self.name, contract_address); - data.contract_address = contract_address; - } + data.contract_address = contract_address; + } - data.contract_address.is_some() - } + data.contract_address.is_some() + } } impl ServiceContract for OnChainServiceContract { - fn update(&self) -> bool { - self.update_contract_address() && self.client.get().is_some() - } + fn update(&self) -> bool { + self.update_contract_address() && self.client.get().is_some() + } - fn read_logs(&self) -> Box> { - let client = match self.client.get() { - Some(client) => client, - None => { - warn!(target: "secretstore", "{}: client is offline during read_logs call", + fn read_logs(&self) -> Box> { + let client = match self.client.get() { + Some(client) => client, + None => { + warn!(target: "secretstore", "{}: client is offline during read_logs call", self.self_key_pair.public()); - return Box::new(::std::iter::empty()); - }, - }; + return Box::new(::std::iter::empty()); + } + }; - // prepare range of blocks to read logs from - let (address, first_block, last_block) = { - let mut data = self.data.write(); - let address = match data.contract_address { - Some(address) => address, - None => return Box::new(::std::iter::empty()), // no contract installed - }; - let confirmed_block = match get_confirmed_block_hash(&*client, REQUEST_CONFIRMATIONS_REQUIRED) { - Some(confirmed_block) => confirmed_block, - None => return Box::new(::std::iter::empty()), // no block with enough confirmations - }; - let first_block = match data.last_log_block.take().and_then(|b| client.tree_route(&b, &confirmed_block)) { - // if we have a route from last_log_block to confirmed_block => search for logs on this route - // - // potentially this could lead us to reading same logs twice when reorganizing to the fork, which - // already has been canonical previosuly - // the worst thing that can happen in this case is spending some time reading unneeded data from SS db - Some(ref route) if route.index < route.blocks.len() => route.blocks[route.index], - // else we care only about confirmed block - _ => confirmed_block.clone(), - }; + // prepare range of blocks to read logs from + let (address, first_block, last_block) = { + let mut data = self.data.write(); + let address = match data.contract_address { + Some(address) => address, + None => return Box::new(::std::iter::empty()), // no contract installed + }; + let confirmed_block = + match get_confirmed_block_hash(&*client, REQUEST_CONFIRMATIONS_REQUIRED) { + Some(confirmed_block) => confirmed_block, + None => return Box::new(::std::iter::empty()), // no block with enough confirmations + }; + let first_block = match data + .last_log_block + .take() + .and_then(|b| client.tree_route(&b, &confirmed_block)) + { + // if we have a route from last_log_block to confirmed_block => search for logs on this route + // + // potentially this could lead us to reading same logs twice when reorganizing to the fork, which + // already has been canonical previosuly + // the worst thing that can happen in this case is spending some time reading unneeded data from SS db + Some(ref route) if route.index < route.blocks.len() => route.blocks[route.index], + // else we care only about confirmed block + _ => confirmed_block.clone(), + }; - data.last_log_block = Some(confirmed_block.clone()); - (address, first_block, confirmed_block) - }; + data.last_log_block = Some(confirmed_block.clone()); + (address, first_block, confirmed_block) + }; - // read server key generation requests - let request_logs = client.logs(Filter { - from_block: BlockId::Hash(first_block), - to_block: BlockId::Hash(last_block), - address: Some(vec![address]), - topics: vec![Some(mask_topics(&self.mask))], - limit: None, - }).unwrap_or_default(); + // read server key generation requests + let request_logs = client + .logs(Filter { + from_block: BlockId::Hash(first_block), + to_block: BlockId::Hash(last_block), + address: Some(vec![address]), + topics: vec![Some(mask_topics(&self.mask))], + limit: None, + }) + .unwrap_or_default(); - Box::new(request_logs.into_iter() - .filter_map(|log| { - let raw_log: RawLog = (log.entry.topics.into_iter().map(|t| t.0.into()).collect(), log.entry.data).into(); - if raw_log.topics[0] == *SERVER_KEY_GENERATION_REQUESTED_EVENT_NAME_HASH { + Box::new( + request_logs + .into_iter() + .filter_map(|log| { + let raw_log: RawLog = ( + log.entry.topics.into_iter().map(|t| t.0.into()).collect(), + log.entry.data, + ) + .into(); + if raw_log.topics[0] == *SERVER_KEY_GENERATION_REQUESTED_EVENT_NAME_HASH { ServerKeyGenerationService::parse_log(&address, raw_log) } else if raw_log.topics[0] == *SERVER_KEY_RETRIEVAL_REQUESTED_EVENT_NAME_HASH { ServerKeyRetrievalService::parse_log(&address, raw_log) @@ -305,525 +410,956 @@ impl ServiceContract for OnChainServiceContract { error }) .ok() - }).collect::>().into_iter()) - } + }) + .collect::>() + .into_iter(), + ) + } - fn read_pending_requests(&self) -> Box> { - let client = match self.client.get() { - Some(client) => client, - None => return Box::new(::std::iter::empty()), - }; + fn read_pending_requests(&self) -> Box> { + let client = match self.client.get() { + Some(client) => client, + None => return Box::new(::std::iter::empty()), + }; - // we only need requests that are here for more than REQUEST_CONFIRMATIONS_REQUIRED blocks - // => we're reading from Latest - (REQUEST_CONFIRMATIONS_REQUIRED + 1) block - let data = self.data.read(); - match data.contract_address { - None => Box::new(::std::iter::empty()), - Some(contract_address) => get_confirmed_block_hash(&*client, REQUEST_CONFIRMATIONS_REQUIRED + 1) - .map(|b| { - let block = BlockId::Hash(b); - let iter = match self.mask.server_key_generation_requests { - true => Box::new(self.create_pending_requests_iterator(client.clone(), &contract_address, &block, - &ServerKeyGenerationService::read_pending_requests_count, - &ServerKeyGenerationService::read_pending_request)) as Box>, - false => Box::new(::std::iter::empty()), - }; - let iter = match self.mask.server_key_retrieval_requests { - true => Box::new(iter.chain(self.create_pending_requests_iterator(client.clone(), &contract_address, &block, - &ServerKeyRetrievalService::read_pending_requests_count, - &ServerKeyRetrievalService::read_pending_request))), - false => iter, - }; - let iter = match self.mask.document_key_store_requests { - true => Box::new(iter.chain(self.create_pending_requests_iterator(client.clone(), &contract_address, &block, - &DocumentKeyStoreService::read_pending_requests_count, - &DocumentKeyStoreService::read_pending_request))), - false => iter, - }; - let iter = match self.mask.document_key_shadow_retrieval_requests { - true => Box::new(iter.chain(self.create_pending_requests_iterator(client, &contract_address, &block, - &DocumentKeyShadowRetrievalService::read_pending_requests_count, - &DocumentKeyShadowRetrievalService::read_pending_request))), - false => iter - }; + // we only need requests that are here for more than REQUEST_CONFIRMATIONS_REQUIRED blocks + // => we're reading from Latest - (REQUEST_CONFIRMATIONS_REQUIRED + 1) block + let data = self.data.read(); + match data.contract_address { + None => Box::new(::std::iter::empty()), + Some(contract_address) => { + get_confirmed_block_hash(&*client, REQUEST_CONFIRMATIONS_REQUIRED + 1) + .map(|b| { + let block = BlockId::Hash(b); + let iter = match self.mask.server_key_generation_requests { + true => Box::new(self.create_pending_requests_iterator( + client.clone(), + &contract_address, + &block, + &ServerKeyGenerationService::read_pending_requests_count, + &ServerKeyGenerationService::read_pending_request, + )) + as Box>, + false => Box::new(::std::iter::empty()), + }; + let iter = match self.mask.server_key_retrieval_requests { + true => Box::new(iter.chain(self.create_pending_requests_iterator( + client.clone(), + &contract_address, + &block, + &ServerKeyRetrievalService::read_pending_requests_count, + &ServerKeyRetrievalService::read_pending_request, + ))), + false => iter, + }; + let iter = match self.mask.document_key_store_requests { + true => Box::new(iter.chain(self.create_pending_requests_iterator( + client.clone(), + &contract_address, + &block, + &DocumentKeyStoreService::read_pending_requests_count, + &DocumentKeyStoreService::read_pending_request, + ))), + false => iter, + }; + let iter = match self.mask.document_key_shadow_retrieval_requests { + true => Box::new(iter.chain(self.create_pending_requests_iterator( + client, + &contract_address, + &block, + &DocumentKeyShadowRetrievalService::read_pending_requests_count, + &DocumentKeyShadowRetrievalService::read_pending_request, + ))), + false => iter, + }; - iter - }) - .unwrap_or_else(|| Box::new(::std::iter::empty())) - } - } + iter + }) + .unwrap_or_else(|| Box::new(::std::iter::empty())) + } + } + } - fn publish_generated_server_key(&self, origin: &Address, server_key_id: &ServerKeyId, server_key: Public) -> Result<(), String> { - self.send_contract_transaction("publish_generated_server_key", origin, server_key_id, ServerKeyGenerationService::is_response_required, - |_, _| Ok(ServerKeyGenerationService::prepare_pubish_tx_data(server_key_id, &server_key))) - } + fn publish_generated_server_key( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + server_key: Public, + ) -> Result<(), String> { + self.send_contract_transaction( + "publish_generated_server_key", + origin, + server_key_id, + ServerKeyGenerationService::is_response_required, + |_, _| { + Ok(ServerKeyGenerationService::prepare_pubish_tx_data( + server_key_id, + &server_key, + )) + }, + ) + } - fn publish_server_key_generation_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> { - self.send_contract_transaction("publish_server_key_generation_error", origin, server_key_id, ServerKeyGenerationService::is_response_required, - |_, _| Ok(ServerKeyGenerationService::prepare_error_tx_data(server_key_id))) - } + fn publish_server_key_generation_error( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + ) -> Result<(), String> { + self.send_contract_transaction( + "publish_server_key_generation_error", + origin, + server_key_id, + ServerKeyGenerationService::is_response_required, + |_, _| { + Ok(ServerKeyGenerationService::prepare_error_tx_data( + server_key_id, + )) + }, + ) + } - fn publish_retrieved_server_key(&self, origin: &Address, server_key_id: &ServerKeyId, server_key: Public, threshold: usize) -> Result<(), String> { - let threshold = serialize_threshold(threshold)?; - self.send_contract_transaction("publish_retrieved_server_key", origin, server_key_id, ServerKeyRetrievalService::is_response_required, - |_, _| Ok(ServerKeyRetrievalService::prepare_pubish_tx_data(server_key_id, server_key, threshold))) - } + fn publish_retrieved_server_key( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + server_key: Public, + threshold: usize, + ) -> Result<(), String> { + let threshold = serialize_threshold(threshold)?; + self.send_contract_transaction( + "publish_retrieved_server_key", + origin, + server_key_id, + ServerKeyRetrievalService::is_response_required, + |_, _| { + Ok(ServerKeyRetrievalService::prepare_pubish_tx_data( + server_key_id, + server_key, + threshold, + )) + }, + ) + } - fn publish_server_key_retrieval_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> { - self.send_contract_transaction("publish_server_key_retrieval_error", origin, server_key_id, ServerKeyRetrievalService::is_response_required, - |_, _| Ok(ServerKeyRetrievalService::prepare_error_tx_data(server_key_id))) - } + fn publish_server_key_retrieval_error( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + ) -> Result<(), String> { + self.send_contract_transaction( + "publish_server_key_retrieval_error", + origin, + server_key_id, + ServerKeyRetrievalService::is_response_required, + |_, _| { + Ok(ServerKeyRetrievalService::prepare_error_tx_data( + server_key_id, + )) + }, + ) + } - fn publish_stored_document_key(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> { - self.send_contract_transaction("publish_stored_document_key", origin, server_key_id, DocumentKeyStoreService::is_response_required, - |_, _| Ok(DocumentKeyStoreService::prepare_pubish_tx_data(server_key_id))) - } + fn publish_stored_document_key( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + ) -> Result<(), String> { + self.send_contract_transaction( + "publish_stored_document_key", + origin, + server_key_id, + DocumentKeyStoreService::is_response_required, + |_, _| { + Ok(DocumentKeyStoreService::prepare_pubish_tx_data( + server_key_id, + )) + }, + ) + } - fn publish_document_key_store_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> { - self.send_contract_transaction("publish_document_key_store_error", origin, server_key_id, DocumentKeyStoreService::is_response_required, - |_, _| Ok(DocumentKeyStoreService::prepare_error_tx_data(server_key_id))) - } + fn publish_document_key_store_error( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + ) -> Result<(), String> { + self.send_contract_transaction( + "publish_document_key_store_error", + origin, + server_key_id, + DocumentKeyStoreService::is_response_required, + |_, _| { + Ok(DocumentKeyStoreService::prepare_error_tx_data( + server_key_id, + )) + }, + ) + } - fn publish_retrieved_document_key_common(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address, common_point: Public, threshold: usize) -> Result<(), String> { - let threshold = serialize_threshold(threshold)?; - self.send_contract_transaction("publish_retrieved_document_key_common", origin, server_key_id, - |client, contract_address, server_key_id, key_server| - DocumentKeyShadowRetrievalService::is_response_required(client, contract_address, server_key_id, requester, key_server), - |_, _| - Ok(DocumentKeyShadowRetrievalService::prepare_pubish_common_tx_data(server_key_id, requester, common_point, threshold)) - ) - } + fn publish_retrieved_document_key_common( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + requester: &Address, + common_point: Public, + threshold: usize, + ) -> Result<(), String> { + let threshold = serialize_threshold(threshold)?; + self.send_contract_transaction( + "publish_retrieved_document_key_common", + origin, + server_key_id, + |client, contract_address, server_key_id, key_server| { + DocumentKeyShadowRetrievalService::is_response_required( + client, + contract_address, + server_key_id, + requester, + key_server, + ) + }, + |_, _| { + Ok( + DocumentKeyShadowRetrievalService::prepare_pubish_common_tx_data( + server_key_id, + requester, + common_point, + threshold, + ), + ) + }, + ) + } - fn publish_retrieved_document_key_personal(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address, participants: &[Address], decrypted_secret: Public, shadow: Bytes) -> Result<(), String> { - self.send_contract_transaction("publish_retrieved_document_key_personal", origin, server_key_id, |_, _, _, _| true, - move |client, address| - DocumentKeyShadowRetrievalService::prepare_pubish_personal_tx_data(client, address, server_key_id, requester, participants, decrypted_secret, shadow) - ) - } + fn publish_retrieved_document_key_personal( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + requester: &Address, + participants: &[Address], + decrypted_secret: Public, + shadow: Bytes, + ) -> Result<(), String> { + self.send_contract_transaction( + "publish_retrieved_document_key_personal", + origin, + server_key_id, + |_, _, _, _| true, + move |client, address| { + DocumentKeyShadowRetrievalService::prepare_pubish_personal_tx_data( + client, + address, + server_key_id, + requester, + participants, + decrypted_secret, + shadow, + ) + }, + ) + } - fn publish_document_key_retrieval_error(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address) -> Result<(), String> { - self.send_contract_transaction("publish_document_key_retrieval_error", origin, server_key_id, - |client, contract_address, server_key_id, key_server| - DocumentKeyShadowRetrievalService::is_response_required(client, contract_address, server_key_id, requester, key_server), - |_, _| - Ok(DocumentKeyShadowRetrievalService::prepare_error_tx_data(server_key_id, requester)) - ) - } + fn publish_document_key_retrieval_error( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + requester: &Address, + ) -> Result<(), String> { + self.send_contract_transaction( + "publish_document_key_retrieval_error", + origin, + server_key_id, + |client, contract_address, server_key_id, key_server| { + DocumentKeyShadowRetrievalService::is_response_required( + client, + contract_address, + server_key_id, + requester, + key_server, + ) + }, + |_, _| { + Ok(DocumentKeyShadowRetrievalService::prepare_error_tx_data( + server_key_id, + requester, + )) + }, + ) + } } -impl Iterator for PendingRequestsIterator where F: Fn(U256) -> Option<(bool, ServiceTask)> { - type Item = (bool, ServiceTask); +impl Iterator for PendingRequestsIterator +where + F: Fn(U256) -> Option<(bool, ServiceTask)>, +{ + type Item = (bool, ServiceTask); - fn next(&mut self) -> Option<(bool, ServiceTask)> { - if self.index >= self.length { - return None; - } + fn next(&mut self) -> Option<(bool, ServiceTask)> { + if self.index >= self.length { + return None; + } - let index = self.index.clone(); - self.index = self.index + 1; + let index = self.index.clone(); + self.index = self.index + 1; - (self.read_request)(index) - } + (self.read_request)(index) + } } /// Returns vector of logs topics to listen to. pub fn mask_topics(mask: &ApiMask) -> Vec { - let mut topics = Vec::new(); - if mask.server_key_generation_requests { - topics.push(*SERVER_KEY_GENERATION_REQUESTED_EVENT_NAME_HASH); - } - if mask.server_key_retrieval_requests { - topics.push(*SERVER_KEY_RETRIEVAL_REQUESTED_EVENT_NAME_HASH); - } - if mask.document_key_store_requests { - topics.push(*DOCUMENT_KEY_STORE_REQUESTED_EVENT_NAME_HASH); - } - if mask.document_key_shadow_retrieval_requests { - topics.push(*DOCUMENT_KEY_COMMON_PART_RETRIEVAL_REQUESTED_EVENT_NAME_HASH); - topics.push(*DOCUMENT_KEY_PERSONAL_PART_RETRIEVAL_REQUESTED_EVENT_NAME_HASH); - } - topics + let mut topics = Vec::new(); + if mask.server_key_generation_requests { + topics.push(*SERVER_KEY_GENERATION_REQUESTED_EVENT_NAME_HASH); + } + if mask.server_key_retrieval_requests { + topics.push(*SERVER_KEY_RETRIEVAL_REQUESTED_EVENT_NAME_HASH); + } + if mask.document_key_store_requests { + topics.push(*DOCUMENT_KEY_STORE_REQUESTED_EVENT_NAME_HASH); + } + if mask.document_key_shadow_retrieval_requests { + topics.push(*DOCUMENT_KEY_COMMON_PART_RETRIEVAL_REQUESTED_EVENT_NAME_HASH); + topics.push(*DOCUMENT_KEY_PERSONAL_PART_RETRIEVAL_REQUESTED_EVENT_NAME_HASH); + } + topics } impl ServerKeyGenerationService { - /// Parse request log entry. - pub fn parse_log(origin: &Address, raw_log: RawLog) -> Result { - match service::events::server_key_generation_requested::parse_log(raw_log) { - Ok(l) => Ok(ServiceTask::GenerateServerKey(origin.clone(), l.server_key_id, l.author, parse_threshold(l.threshold)?)), - Err(e) => Err(format!("{}", e)), - } - } + /// Parse request log entry. + pub fn parse_log(origin: &Address, raw_log: RawLog) -> Result { + match service::events::server_key_generation_requested::parse_log(raw_log) { + Ok(l) => Ok(ServiceTask::GenerateServerKey( + origin.clone(), + l.server_key_id, + l.author, + parse_threshold(l.threshold)?, + )), + Err(e) => Err(format!("{}", e)), + } + } - /// Check if response from key server is required. - pub fn is_response_required(client: &Client, contract_address: &Address, server_key_id: &ServerKeyId, key_server: &Address) -> bool { - // we're checking confirmation in Latest block, because we're interested in latest contract state here - let (encoded, decoder) = service::functions::is_server_key_generation_response_required::call(*server_key_id, *key_server); - match client.call_contract(BlockId::Latest, *contract_address, encoded) { - Err(_) => true, - Ok(data) => decoder.decode(&data).unwrap_or(true) - } - } + /// Check if response from key server is required. + pub fn is_response_required( + client: &Client, + contract_address: &Address, + server_key_id: &ServerKeyId, + key_server: &Address, + ) -> bool { + // we're checking confirmation in Latest block, because we're interested in latest contract state here + let (encoded, decoder) = + service::functions::is_server_key_generation_response_required::call( + *server_key_id, + *key_server, + ); + match client.call_contract(BlockId::Latest, *contract_address, encoded) { + Err(_) => true, + Ok(data) => decoder.decode(&data).unwrap_or(true), + } + } - /// Prepare publish key transaction data. - pub fn prepare_pubish_tx_data(server_key_id: &ServerKeyId, server_key_public: &Public) -> Bytes { - service::functions::server_key_generated::encode_input(*server_key_id, server_key_public.to_vec()) - } + /// Prepare publish key transaction data. + pub fn prepare_pubish_tx_data( + server_key_id: &ServerKeyId, + server_key_public: &Public, + ) -> Bytes { + service::functions::server_key_generated::encode_input( + *server_key_id, + server_key_public.to_vec(), + ) + } - /// Prepare error transaction data. - pub fn prepare_error_tx_data(server_key_id: &ServerKeyId) -> Bytes { - service::functions::server_key_generation_error::encode_input(*server_key_id) - } + /// Prepare error transaction data. + pub fn prepare_error_tx_data(server_key_id: &ServerKeyId) -> Bytes { + service::functions::server_key_generation_error::encode_input(*server_key_id) + } - /// Read pending requests count. - fn read_pending_requests_count(client: &Client, contract_address: &Address, block: &BlockId) -> Result { - let (encoded, decoder) = service::functions::server_key_generation_requests_count::call(); - decoder.decode(&client.call_contract(*block, *contract_address, encoded)?) - .map_err(|e| e.to_string()) - } + /// Read pending requests count. + fn read_pending_requests_count( + client: &Client, + contract_address: &Address, + block: &BlockId, + ) -> Result { + let (encoded, decoder) = service::functions::server_key_generation_requests_count::call(); + decoder + .decode(&client.call_contract(*block, *contract_address, encoded)?) + .map_err(|e| e.to_string()) + } - /// Read pending request. - fn read_pending_request(self_key_pair: &NodeKeyPair, client: &Client, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> { - let self_address = public_to_address(self_key_pair.public()); + /// Read pending request. + fn read_pending_request( + self_key_pair: &NodeKeyPair, + client: &Client, + contract_address: &Address, + block: &BlockId, + index: U256, + ) -> Result<(bool, ServiceTask), String> { + let self_address = public_to_address(self_key_pair.public()); - let (encoded, decoder) = service::functions::get_server_key_generation_request::call(index); - let (server_key_id, author, threshold) = decoder.decode(&client.call_contract(*block, *contract_address, encoded)?) - .map_err(|e| e.to_string())?; - let threshold = parse_threshold(threshold)?; + let (encoded, decoder) = service::functions::get_server_key_generation_request::call(index); + let (server_key_id, author, threshold) = decoder + .decode(&client.call_contract(*block, *contract_address, encoded)?) + .map_err(|e| e.to_string())?; + let threshold = parse_threshold(threshold)?; - let (encoded, decoder) = service::functions::is_server_key_generation_response_required::call(server_key_id, self_address); - let not_confirmed = decoder.decode(&client.call_contract(*block, *contract_address, encoded)?) - .map_err(|e| e.to_string())?; + let (encoded, decoder) = + service::functions::is_server_key_generation_response_required::call( + server_key_id, + self_address, + ); + let not_confirmed = decoder + .decode(&client.call_contract(*block, *contract_address, encoded)?) + .map_err(|e| e.to_string())?; - let task = ServiceTask::GenerateServerKey( - contract_address.clone(), - server_key_id, - author, - threshold, - ); + let task = ServiceTask::GenerateServerKey( + contract_address.clone(), + server_key_id, + author, + threshold, + ); - Ok((not_confirmed, task)) - } + Ok((not_confirmed, task)) + } } impl ServerKeyRetrievalService { - /// Parse request log entry. - pub fn parse_log(origin: &Address, raw_log: RawLog) -> Result { - match service::events::server_key_retrieval_requested::parse_log(raw_log) { - Ok(l) => Ok(ServiceTask::RetrieveServerKey(*origin, l.server_key_id)), - Err(e) => Err(e.to_string()) - } - } + /// Parse request log entry. + pub fn parse_log(origin: &Address, raw_log: RawLog) -> Result { + match service::events::server_key_retrieval_requested::parse_log(raw_log) { + Ok(l) => Ok(ServiceTask::RetrieveServerKey(*origin, l.server_key_id)), + Err(e) => Err(e.to_string()), + } + } - /// Check if response from key server is required. - pub fn is_response_required(client: &Client, contract_address: &Address, server_key_id: &ServerKeyId, key_server: &Address) -> bool { - // we're checking confirmation in Latest block, because we're interested in latest contract state here - let (encoded, decoder) = service::functions::is_server_key_retrieval_response_required::call(*server_key_id, *key_server); - match client.call_contract(BlockId::Latest, *contract_address, encoded) { - Err(_) => true, - Ok(data) => decoder.decode(&data).unwrap_or(true) - } - } + /// Check if response from key server is required. + pub fn is_response_required( + client: &Client, + contract_address: &Address, + server_key_id: &ServerKeyId, + key_server: &Address, + ) -> bool { + // we're checking confirmation in Latest block, because we're interested in latest contract state here + let (encoded, decoder) = + service::functions::is_server_key_retrieval_response_required::call( + *server_key_id, + *key_server, + ); + match client.call_contract(BlockId::Latest, *contract_address, encoded) { + Err(_) => true, + Ok(data) => decoder.decode(&data).unwrap_or(true), + } + } - /// Prepare publish key transaction data. - pub fn prepare_pubish_tx_data(server_key_id: &ServerKeyId, server_key_public: Public, threshold: U256) -> Bytes { - service::functions::server_key_retrieved::encode_input(*server_key_id, server_key_public.to_vec(), threshold) - } + /// Prepare publish key transaction data. + pub fn prepare_pubish_tx_data( + server_key_id: &ServerKeyId, + server_key_public: Public, + threshold: U256, + ) -> Bytes { + service::functions::server_key_retrieved::encode_input( + *server_key_id, + server_key_public.to_vec(), + threshold, + ) + } - /// Prepare error transaction data. - pub fn prepare_error_tx_data(server_key_id: &ServerKeyId) -> Bytes { - service::functions::server_key_retrieval_error::encode_input(*server_key_id) - } + /// Prepare error transaction data. + pub fn prepare_error_tx_data(server_key_id: &ServerKeyId) -> Bytes { + service::functions::server_key_retrieval_error::encode_input(*server_key_id) + } - /// Read pending requests count. - fn read_pending_requests_count(client: &Client, contract_address: &Address, block: &BlockId) -> Result { - let (encoded, decoder) = service::functions::server_key_retrieval_requests_count::call(); - decoder.decode(&client.call_contract(*block, *contract_address, encoded)?) - .map_err(|e| e.to_string()) - } + /// Read pending requests count. + fn read_pending_requests_count( + client: &Client, + contract_address: &Address, + block: &BlockId, + ) -> Result { + let (encoded, decoder) = service::functions::server_key_retrieval_requests_count::call(); + decoder + .decode(&client.call_contract(*block, *contract_address, encoded)?) + .map_err(|e| e.to_string()) + } - /// Read pending request. - fn read_pending_request(self_key_pair: &NodeKeyPair, client: &Client, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> { - let self_address = public_to_address(self_key_pair.public()); + /// Read pending request. + fn read_pending_request( + self_key_pair: &NodeKeyPair, + client: &Client, + contract_address: &Address, + block: &BlockId, + index: U256, + ) -> Result<(bool, ServiceTask), String> { + let self_address = public_to_address(self_key_pair.public()); - let (encoded, decoder) = service::functions::get_server_key_retrieval_request::call(index); - let server_key_id = decoder.decode(&client.call_contract(*block, *contract_address, encoded)?) - .map_err(|e| e.to_string())?; + let (encoded, decoder) = service::functions::get_server_key_retrieval_request::call(index); + let server_key_id = decoder + .decode(&client.call_contract(*block, *contract_address, encoded)?) + .map_err(|e| e.to_string())?; - let (encoded, decoder) = service::functions::is_server_key_retrieval_response_required::call(server_key_id, self_address); - let not_confirmed = decoder.decode(&client.call_contract(*block, *contract_address, encoded)?) - .map_err(|e| e.to_string())?; + let (encoded, decoder) = + service::functions::is_server_key_retrieval_response_required::call( + server_key_id, + self_address, + ); + let not_confirmed = decoder + .decode(&client.call_contract(*block, *contract_address, encoded)?) + .map_err(|e| e.to_string())?; - let task = ServiceTask::RetrieveServerKey( - *contract_address, - server_key_id, - ); + let task = ServiceTask::RetrieveServerKey(*contract_address, server_key_id); - Ok((not_confirmed, task)) - } + Ok((not_confirmed, task)) + } } impl DocumentKeyStoreService { - /// Parse request log entry. - pub fn parse_log(origin: &Address, raw_log: RawLog) -> Result { - match service::events::document_key_store_requested::parse_log(raw_log) { - Ok(l) => Ok(ServiceTask::StoreDocumentKey(origin.clone(), l.server_key_id, l.author, (*l.common_point).into(), (*l.encrypted_point).into())), - Err(e) => Err(format!("{}", e)), - } - } + /// Parse request log entry. + pub fn parse_log(origin: &Address, raw_log: RawLog) -> Result { + match service::events::document_key_store_requested::parse_log(raw_log) { + Ok(l) => Ok(ServiceTask::StoreDocumentKey( + origin.clone(), + l.server_key_id, + l.author, + (*l.common_point).into(), + (*l.encrypted_point).into(), + )), + Err(e) => Err(format!("{}", e)), + } + } - /// Check if response from key server is required. - pub fn is_response_required(client: &Client, contract_address: &Address, server_key_id: &ServerKeyId, key_server: &Address) -> bool { - // we're checking confirmation in Latest block, because we're interested in latest contract state here - let (encoded, decoder) = service::functions::is_document_key_store_response_required::call(*server_key_id, *key_server); - match client.call_contract(BlockId::Latest, *contract_address, encoded) { - Err(_) => true, - Ok(data) => decoder.decode(&data).unwrap_or(true) - } - } + /// Check if response from key server is required. + pub fn is_response_required( + client: &Client, + contract_address: &Address, + server_key_id: &ServerKeyId, + key_server: &Address, + ) -> bool { + // we're checking confirmation in Latest block, because we're interested in latest contract state here + let (encoded, decoder) = service::functions::is_document_key_store_response_required::call( + *server_key_id, + *key_server, + ); + match client.call_contract(BlockId::Latest, *contract_address, encoded) { + Err(_) => true, + Ok(data) => decoder.decode(&data).unwrap_or(true), + } + } - /// Prepare publish key transaction data. - pub fn prepare_pubish_tx_data(server_key_id: &ServerKeyId) -> Bytes { - service::functions::document_key_stored::encode_input(*server_key_id) - } + /// Prepare publish key transaction data. + pub fn prepare_pubish_tx_data(server_key_id: &ServerKeyId) -> Bytes { + service::functions::document_key_stored::encode_input(*server_key_id) + } - /// Prepare error transaction data. - pub fn prepare_error_tx_data(server_key_id: &ServerKeyId) -> Bytes { - service::functions::document_key_store_error::encode_input(*server_key_id) - } + /// Prepare error transaction data. + pub fn prepare_error_tx_data(server_key_id: &ServerKeyId) -> Bytes { + service::functions::document_key_store_error::encode_input(*server_key_id) + } - /// Read pending requests count. - fn read_pending_requests_count(client: &Client, contract_address: &Address, block: &BlockId) -> Result { - let (encoded, decoder) = service::functions::document_key_store_requests_count::call(); - decoder.decode(&client.call_contract(*block, *contract_address, encoded)?) - .map_err(|e| e.to_string()) - } + /// Read pending requests count. + fn read_pending_requests_count( + client: &Client, + contract_address: &Address, + block: &BlockId, + ) -> Result { + let (encoded, decoder) = service::functions::document_key_store_requests_count::call(); + decoder + .decode(&client.call_contract(*block, *contract_address, encoded)?) + .map_err(|e| e.to_string()) + } - /// Read pending request. - fn read_pending_request(self_key_pair: &NodeKeyPair, client: &Client, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> { - let self_address = public_to_address(self_key_pair.public()); - let (encoded, decoder) = service::functions::get_document_key_store_request::call(index); - let (server_key_id, author, common_point, encrypted_point) = decoder.decode(&client.call_contract(*block, *contract_address, encoded)?) - .map_err(|e| e.to_string())?; + /// Read pending request. + fn read_pending_request( + self_key_pair: &NodeKeyPair, + client: &Client, + contract_address: &Address, + block: &BlockId, + index: U256, + ) -> Result<(bool, ServiceTask), String> { + let self_address = public_to_address(self_key_pair.public()); + let (encoded, decoder) = service::functions::get_document_key_store_request::call(index); + let (server_key_id, author, common_point, encrypted_point) = decoder + .decode(&client.call_contract(*block, *contract_address, encoded)?) + .map_err(|e| e.to_string())?; - let (encoded, decoder) = service::functions::is_document_key_store_response_required::call(server_key_id, self_address); - let not_confirmed = decoder.decode(&client.call_contract(*block, *contract_address, encoded)?) - .map_err(|e| e.to_string())?; + let (encoded, decoder) = service::functions::is_document_key_store_response_required::call( + server_key_id, + self_address, + ); + let not_confirmed = decoder + .decode(&client.call_contract(*block, *contract_address, encoded)?) + .map_err(|e| e.to_string())?; - let task = ServiceTask::StoreDocumentKey( - *contract_address, - server_key_id, - author, - Public::from_slice(&common_point), - Public::from_slice(&encrypted_point), - ); + let task = ServiceTask::StoreDocumentKey( + *contract_address, + server_key_id, + author, + Public::from_slice(&common_point), + Public::from_slice(&encrypted_point), + ); - Ok((not_confirmed, task)) - } + Ok((not_confirmed, task)) + } } impl DocumentKeyShadowRetrievalService { - /// Parse common request log entry. - pub fn parse_common_request_log(origin: &Address, raw_log: RawLog) -> Result { - match service::events::document_key_common_retrieval_requested::parse_log(raw_log) { - Ok(l) => Ok(ServiceTask::RetrieveShadowDocumentKeyCommon(origin.clone(), l.server_key_id, l.requester)), - Err(e) => Err(e.to_string()) - } - } + /// Parse common request log entry. + pub fn parse_common_request_log( + origin: &Address, + raw_log: RawLog, + ) -> Result { + match service::events::document_key_common_retrieval_requested::parse_log(raw_log) { + Ok(l) => Ok(ServiceTask::RetrieveShadowDocumentKeyCommon( + origin.clone(), + l.server_key_id, + l.requester, + )), + Err(e) => Err(e.to_string()), + } + } - /// Parse personal request log entry. - pub fn parse_personal_request_log(origin: &Address, raw_log: RawLog) -> Result { - match service::events::document_key_personal_retrieval_requested::parse_log(raw_log) { - Ok(l) => Ok(ServiceTask::RetrieveShadowDocumentKeyPersonal(origin.clone(), l.server_key_id, (*l.requester_public).into())), - Err(e) => Err(e.to_string()) - } - } + /// Parse personal request log entry. + pub fn parse_personal_request_log( + origin: &Address, + raw_log: RawLog, + ) -> Result { + match service::events::document_key_personal_retrieval_requested::parse_log(raw_log) { + Ok(l) => Ok(ServiceTask::RetrieveShadowDocumentKeyPersonal( + origin.clone(), + l.server_key_id, + (*l.requester_public).into(), + )), + Err(e) => Err(e.to_string()), + } + } - /// Check if response from key server is required. - pub fn is_response_required(client: &Client, contract_address: &Address, server_key_id: &ServerKeyId, requester: &Address, key_server: &Address) -> bool { - // we're checking confirmation in Latest block, because we're interested in latest contract state here - let (encoded, decoder) = service::functions::is_document_key_shadow_retrieval_response_required::call(*server_key_id, *requester, *key_server); - match client.call_contract(BlockId::Latest, *contract_address, encoded) { - Err(_) => true, - Ok(data) => decoder.decode(&data).unwrap_or(true) - } - } + /// Check if response from key server is required. + pub fn is_response_required( + client: &Client, + contract_address: &Address, + server_key_id: &ServerKeyId, + requester: &Address, + key_server: &Address, + ) -> bool { + // we're checking confirmation in Latest block, because we're interested in latest contract state here + let (encoded, decoder) = + service::functions::is_document_key_shadow_retrieval_response_required::call( + *server_key_id, + *requester, + *key_server, + ); + match client.call_contract(BlockId::Latest, *contract_address, encoded) { + Err(_) => true, + Ok(data) => decoder.decode(&data).unwrap_or(true), + } + } - /// Prepare publish common key transaction data. - pub fn prepare_pubish_common_tx_data(server_key_id: &ServerKeyId, requester: &Address, common_point: Public, threshold: U256) -> Bytes { - service::functions::document_key_common_retrieved::encode_input(*server_key_id, *requester, common_point.to_vec(), threshold) - } + /// Prepare publish common key transaction data. + pub fn prepare_pubish_common_tx_data( + server_key_id: &ServerKeyId, + requester: &Address, + common_point: Public, + threshold: U256, + ) -> Bytes { + service::functions::document_key_common_retrieved::encode_input( + *server_key_id, + *requester, + common_point.to_vec(), + threshold, + ) + } - /// Prepare publish personal key transaction data. - pub fn prepare_pubish_personal_tx_data(client: &Client, contract_address: &Address, server_key_id: &ServerKeyId, requester: &Address, participants: &[Address], decrypted_secret: Public, shadow: Bytes) -> Result { - let mut participants_mask = U256::default(); - for participant in participants { - let participant_index = Self::map_key_server_address(client, contract_address, participant.clone()) - .map_err(|e| format!("Error searching for {} participant: {}", participant, e))?; - participants_mask = participants_mask | (U256::one() << participant_index); - } - Ok(service::functions::document_key_personal_retrieved::encode_input( - *server_key_id, *requester, participants_mask, decrypted_secret.to_vec(), shadow - )) - } + /// Prepare publish personal key transaction data. + pub fn prepare_pubish_personal_tx_data( + client: &Client, + contract_address: &Address, + server_key_id: &ServerKeyId, + requester: &Address, + participants: &[Address], + decrypted_secret: Public, + shadow: Bytes, + ) -> Result { + let mut participants_mask = U256::default(); + for participant in participants { + let participant_index = + Self::map_key_server_address(client, contract_address, participant.clone()) + .map_err(|e| { + format!("Error searching for {} participant: {}", participant, e) + })?; + participants_mask = participants_mask | (U256::one() << participant_index); + } + Ok( + service::functions::document_key_personal_retrieved::encode_input( + *server_key_id, + *requester, + participants_mask, + decrypted_secret.to_vec(), + shadow, + ), + ) + } - /// Prepare error transaction data. - pub fn prepare_error_tx_data(server_key_id: &ServerKeyId, requester: &Address) -> Bytes { - service::functions::document_key_shadow_retrieval_error::encode_input(*server_key_id, *requester) - } + /// Prepare error transaction data. + pub fn prepare_error_tx_data(server_key_id: &ServerKeyId, requester: &Address) -> Bytes { + service::functions::document_key_shadow_retrieval_error::encode_input( + *server_key_id, + *requester, + ) + } - /// Read pending requests count. - fn read_pending_requests_count(client: &Client, contract_address: &Address, block: &BlockId) -> Result { - let (encoded, decoder) = service::functions::document_key_shadow_retrieval_requests_count::call(); - decoder.decode(&client.call_contract(*block, *contract_address, encoded)?) - .map_err(|e| e.to_string()) - } + /// Read pending requests count. + fn read_pending_requests_count( + client: &Client, + contract_address: &Address, + block: &BlockId, + ) -> Result { + let (encoded, decoder) = + service::functions::document_key_shadow_retrieval_requests_count::call(); + decoder + .decode(&client.call_contract(*block, *contract_address, encoded)?) + .map_err(|e| e.to_string()) + } - /// Read pending request. - fn read_pending_request(self_key_pair: &NodeKeyPair, client: &Client, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> { - let self_address = public_to_address(self_key_pair.public()); + /// Read pending request. + fn read_pending_request( + self_key_pair: &NodeKeyPair, + client: &Client, + contract_address: &Address, + block: &BlockId, + index: U256, + ) -> Result<(bool, ServiceTask), String> { + let self_address = public_to_address(self_key_pair.public()); - let (encoded, decoder) = service::functions::get_document_key_shadow_retrieval_request::call(index); - let (server_key_id, requester, is_common_retrieval_completed) = - decoder.decode(&client.call_contract(*block, *contract_address, encoded)?) - .map_err(|e| e.to_string())?; + let (encoded, decoder) = + service::functions::get_document_key_shadow_retrieval_request::call(index); + let (server_key_id, requester, is_common_retrieval_completed) = decoder + .decode(&client.call_contract(*block, *contract_address, encoded)?) + .map_err(|e| e.to_string())?; - let requester = Public::from_slice(&requester); - let (encoded, decoder) = service::functions::is_document_key_shadow_retrieval_response_required::call(server_key_id, public_to_address(&requester), self_address); - let not_confirmed = decoder.decode(&client.call_contract(*block, *contract_address, encoded)?) - .map_err(|e| e.to_string())?; + let requester = Public::from_slice(&requester); + let (encoded, decoder) = + service::functions::is_document_key_shadow_retrieval_response_required::call( + server_key_id, + public_to_address(&requester), + self_address, + ); + let not_confirmed = decoder + .decode(&client.call_contract(*block, *contract_address, encoded)?) + .map_err(|e| e.to_string())?; - let task = match is_common_retrieval_completed { - true => ServiceTask::RetrieveShadowDocumentKeyPersonal( - *contract_address, - server_key_id, - requester, - ), - false => ServiceTask::RetrieveShadowDocumentKeyCommon( - *contract_address, - server_key_id, - public_to_address(&requester), - ), - }; + let task = match is_common_retrieval_completed { + true => ServiceTask::RetrieveShadowDocumentKeyPersonal( + *contract_address, + server_key_id, + requester, + ), + false => ServiceTask::RetrieveShadowDocumentKeyCommon( + *contract_address, + server_key_id, + public_to_address(&requester), + ), + }; - Ok((not_confirmed, task)) - } + Ok((not_confirmed, task)) + } - /// Map from key server address to key server index. - fn map_key_server_address(client: &Client, contract_address: &Address, key_server: Address) -> Result { - // we're checking confirmation in Latest block, because tx ,ust be appended to the latest state - let (encoded, decoder) = service::functions::require_key_server::call(key_server); - let index = decoder.decode(&client.call_contract(BlockId::Latest, *contract_address, encoded)?) - .map_err(|e| e.to_string())?; + /// Map from key server address to key server index. + fn map_key_server_address( + client: &Client, + contract_address: &Address, + key_server: Address, + ) -> Result { + // we're checking confirmation in Latest block, because tx ,ust be appended to the latest state + let (encoded, decoder) = service::functions::require_key_server::call(key_server); + let index = decoder + .decode(&client.call_contract(BlockId::Latest, *contract_address, encoded)?) + .map_err(|e| e.to_string())?; - if index > u8::max_value().into() { - Err(format!("key server index is too big: {}", index)) - } else { - let index: u32 = index.into(); - Ok(index as u8) - } - } + if index > u8::max_value().into() { + Err(format!("key server index is too big: {}", index)) + } else { + let index: u32 = index.into(); + Ok(index as u8) + } + } } /// Parse threshold (we only supposrt 256 KS at max). fn parse_threshold(threshold: U256) -> Result { - let threshold_num = threshold.low_u64(); - if threshold != threshold_num.into() || threshold_num >= ::std::u8::MAX as u64 { - return Err(format!("invalid threshold to use in service contract: {}", threshold)); - } + let threshold_num = threshold.low_u64(); + if threshold != threshold_num.into() || threshold_num >= ::std::u8::MAX as u64 { + return Err(format!( + "invalid threshold to use in service contract: {}", + threshold + )); + } - Ok(threshold_num as usize) + Ok(threshold_num as usize) } /// Serialize threshold (we only support 256 KS at max). fn serialize_threshold(threshold: usize) -> Result { - if threshold > ::std::u8::MAX as usize { - return Err(format!("invalid threshold to use in service contract: {}", threshold)); - } - Ok(threshold.into()) + if threshold > ::std::u8::MAX as usize { + return Err(format!( + "invalid threshold to use in service contract: {}", + threshold + )); + } + Ok(threshold.into()) } #[cfg(test)] pub mod tests { - use parking_lot::Mutex; - use bytes::Bytes; - use ethkey::Public; - use ethereum_types::Address; - use listener::service_contract_listener::ServiceTask; - use {ServerKeyId}; - use super::ServiceContract; + use super::ServiceContract; + use bytes::Bytes; + use ethereum_types::Address; + use ethkey::Public; + use listener::service_contract_listener::ServiceTask; + use parking_lot::Mutex; + use ServerKeyId; - #[derive(Default)] - pub struct DummyServiceContract { - pub is_actual: bool, - pub logs: Vec, - pub pending_requests: Vec<(bool, ServiceTask)>, - pub generated_server_keys: Mutex>, - pub server_keys_generation_failures: Mutex>, - pub retrieved_server_keys: Mutex>, - pub server_keys_retrieval_failures: Mutex>, - pub stored_document_keys: Mutex>, - pub document_keys_store_failures: Mutex>, - pub common_shadow_retrieved_document_keys: Mutex>, - pub personal_shadow_retrieved_document_keys: Mutex, Public, Bytes)>>, - pub document_keys_shadow_retrieval_failures: Mutex>, - } + #[derive(Default)] + pub struct DummyServiceContract { + pub is_actual: bool, + pub logs: Vec, + pub pending_requests: Vec<(bool, ServiceTask)>, + pub generated_server_keys: Mutex>, + pub server_keys_generation_failures: Mutex>, + pub retrieved_server_keys: Mutex>, + pub server_keys_retrieval_failures: Mutex>, + pub stored_document_keys: Mutex>, + pub document_keys_store_failures: Mutex>, + pub common_shadow_retrieved_document_keys: + Mutex>, + pub personal_shadow_retrieved_document_keys: + Mutex, Public, Bytes)>>, + pub document_keys_shadow_retrieval_failures: Mutex>, + } - impl ServiceContract for DummyServiceContract { - fn update(&self) -> bool { - true - } + impl ServiceContract for DummyServiceContract { + fn update(&self) -> bool { + true + } - fn read_logs(&self) -> Box> { - Box::new(self.logs.clone().into_iter()) - } + fn read_logs(&self) -> Box> { + Box::new(self.logs.clone().into_iter()) + } - fn read_pending_requests(&self) -> Box> { - Box::new(self.pending_requests.clone().into_iter()) - } + fn read_pending_requests(&self) -> Box> { + Box::new(self.pending_requests.clone().into_iter()) + } - fn publish_generated_server_key(&self, _origin: &Address, server_key_id: &ServerKeyId, server_key: Public) -> Result<(), String> { - self.generated_server_keys.lock().push((server_key_id.clone(), server_key.clone())); - Ok(()) - } + fn publish_generated_server_key( + &self, + _origin: &Address, + server_key_id: &ServerKeyId, + server_key: Public, + ) -> Result<(), String> { + self.generated_server_keys + .lock() + .push((server_key_id.clone(), server_key.clone())); + Ok(()) + } - fn publish_server_key_generation_error(&self, _origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> { - self.server_keys_generation_failures.lock().push(server_key_id.clone()); - Ok(()) - } + fn publish_server_key_generation_error( + &self, + _origin: &Address, + server_key_id: &ServerKeyId, + ) -> Result<(), String> { + self.server_keys_generation_failures + .lock() + .push(server_key_id.clone()); + Ok(()) + } - fn publish_retrieved_server_key(&self, _origin: &Address, server_key_id: &ServerKeyId, server_key: Public, threshold: usize) -> Result<(), String> { - self.retrieved_server_keys.lock().push((server_key_id.clone(), server_key.clone(), threshold)); - Ok(()) - } + fn publish_retrieved_server_key( + &self, + _origin: &Address, + server_key_id: &ServerKeyId, + server_key: Public, + threshold: usize, + ) -> Result<(), String> { + self.retrieved_server_keys.lock().push(( + server_key_id.clone(), + server_key.clone(), + threshold, + )); + Ok(()) + } - fn publish_server_key_retrieval_error(&self, _origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> { - self.server_keys_retrieval_failures.lock().push(server_key_id.clone()); - Ok(()) - } + fn publish_server_key_retrieval_error( + &self, + _origin: &Address, + server_key_id: &ServerKeyId, + ) -> Result<(), String> { + self.server_keys_retrieval_failures + .lock() + .push(server_key_id.clone()); + Ok(()) + } - fn publish_stored_document_key(&self, _origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> { - self.stored_document_keys.lock().push(server_key_id.clone()); - Ok(()) - } + fn publish_stored_document_key( + &self, + _origin: &Address, + server_key_id: &ServerKeyId, + ) -> Result<(), String> { + self.stored_document_keys.lock().push(server_key_id.clone()); + Ok(()) + } - fn publish_document_key_store_error(&self, _origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> { - self.document_keys_store_failures.lock().push(server_key_id.clone()); - Ok(()) - } + fn publish_document_key_store_error( + &self, + _origin: &Address, + server_key_id: &ServerKeyId, + ) -> Result<(), String> { + self.document_keys_store_failures + .lock() + .push(server_key_id.clone()); + Ok(()) + } - fn publish_retrieved_document_key_common(&self, _origin: &Address, server_key_id: &ServerKeyId, requester: &Address, common_point: Public, threshold: usize) -> Result<(), String> { - self.common_shadow_retrieved_document_keys.lock().push((server_key_id.clone(), requester.clone(), common_point.clone(), threshold)); - Ok(()) - } + fn publish_retrieved_document_key_common( + &self, + _origin: &Address, + server_key_id: &ServerKeyId, + requester: &Address, + common_point: Public, + threshold: usize, + ) -> Result<(), String> { + self.common_shadow_retrieved_document_keys.lock().push(( + server_key_id.clone(), + requester.clone(), + common_point.clone(), + threshold, + )); + Ok(()) + } - fn publish_retrieved_document_key_personal(&self, _origin: &Address, server_key_id: &ServerKeyId, requester: &Address, participants: &[Address], decrypted_secret: Public, shadow: Bytes) -> Result<(), String> { - self.personal_shadow_retrieved_document_keys.lock().push((server_key_id.clone(), requester.clone(), participants.iter().cloned().collect(), decrypted_secret, shadow)); - Ok(()) - } + fn publish_retrieved_document_key_personal( + &self, + _origin: &Address, + server_key_id: &ServerKeyId, + requester: &Address, + participants: &[Address], + decrypted_secret: Public, + shadow: Bytes, + ) -> Result<(), String> { + self.personal_shadow_retrieved_document_keys.lock().push(( + server_key_id.clone(), + requester.clone(), + participants.iter().cloned().collect(), + decrypted_secret, + shadow, + )); + Ok(()) + } - fn publish_document_key_retrieval_error(&self, _origin: &Address, server_key_id: &ServerKeyId, requester: &Address) -> Result<(), String> { - self.document_keys_shadow_retrieval_failures.lock().push((server_key_id.clone(), requester.clone())); - Ok(()) - } - } + fn publish_document_key_retrieval_error( + &self, + _origin: &Address, + server_key_id: &ServerKeyId, + requester: &Address, + ) -> Result<(), String> { + self.document_keys_shadow_retrieval_failures + .lock() + .push((server_key_id.clone(), requester.clone())); + Ok(()) + } + } } diff --git a/secret-store/src/listener/service_contract_aggregate.rs b/secret-store/src/listener/service_contract_aggregate.rs index 29a4730e2..1812e1132 100644 --- a/secret-store/src/listener/service_contract_aggregate.rs +++ b/secret-store/src/listener/service_contract_aggregate.rs @@ -14,87 +14,148 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::Arc; use bytes::Bytes; use ethereum_types::Address; use ethkey::Public; -use listener::service_contract::ServiceContract; -use listener::service_contract_listener::ServiceTask; -use {ServerKeyId}; +use listener::{service_contract::ServiceContract, service_contract_listener::ServiceTask}; +use std::sync::Arc; +use ServerKeyId; /// Aggregated on-chain service contract. pub struct OnChainServiceContractAggregate { - /// All hosted service contracts. - contracts: Vec>, + /// All hosted service contracts. + contracts: Vec>, } impl OnChainServiceContractAggregate { - /// Create new aggregated service contract listener. - pub fn new(contracts: Vec>) -> Self { - debug_assert!(contracts.len() > 1); - OnChainServiceContractAggregate { - contracts: contracts, - } - } + /// Create new aggregated service contract listener. + pub fn new(contracts: Vec>) -> Self { + debug_assert!(contracts.len() > 1); + OnChainServiceContractAggregate { + contracts: contracts, + } + } } impl ServiceContract for OnChainServiceContractAggregate { - fn update(&self) -> bool { - let mut result = false; - for contract in &self.contracts { - result = contract.update() || result; - } - result - } + fn update(&self) -> bool { + let mut result = false; + for contract in &self.contracts { + result = contract.update() || result; + } + result + } - fn read_logs(&self) -> Box> { - self.contracts.iter() - .fold(Box::new(::std::iter::empty()) as Box>, |i, c| - Box::new(i.chain(c.read_logs()))) - } + fn read_logs(&self) -> Box> { + self.contracts.iter().fold( + Box::new(::std::iter::empty()) as Box>, + |i, c| Box::new(i.chain(c.read_logs())), + ) + } - fn read_pending_requests(&self) -> Box> { - self.contracts.iter() - .fold(Box::new(::std::iter::empty()) as Box>, |i, c| - Box::new(i.chain(c.read_pending_requests()))) - } + fn read_pending_requests(&self) -> Box> { + self.contracts.iter().fold( + Box::new(::std::iter::empty()) as Box>, + |i, c| Box::new(i.chain(c.read_pending_requests())), + ) + } - // in current implementation all publish methods are independent of actual contract adddress - // (tx is sent to origin) => we do not care which contract to use for publish data in methods below + // in current implementation all publish methods are independent of actual contract adddress + // (tx is sent to origin) => we do not care which contract to use for publish data in methods below - fn publish_generated_server_key(&self, origin: &Address, server_key_id: &ServerKeyId, server_key: Public) -> Result<(), String> { - self.contracts[0].publish_generated_server_key(origin, server_key_id, server_key) - } + fn publish_generated_server_key( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + server_key: Public, + ) -> Result<(), String> { + self.contracts[0].publish_generated_server_key(origin, server_key_id, server_key) + } - fn publish_server_key_generation_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> { - self.contracts[0].publish_server_key_generation_error(origin, server_key_id) - } + fn publish_server_key_generation_error( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + ) -> Result<(), String> { + self.contracts[0].publish_server_key_generation_error(origin, server_key_id) + } - fn publish_retrieved_server_key(&self, origin: &Address, server_key_id: &ServerKeyId, server_key: Public, threshold: usize) -> Result<(), String> { - self.contracts[0].publish_retrieved_server_key(origin, server_key_id, server_key, threshold) - } + fn publish_retrieved_server_key( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + server_key: Public, + threshold: usize, + ) -> Result<(), String> { + self.contracts[0].publish_retrieved_server_key(origin, server_key_id, server_key, threshold) + } - fn publish_server_key_retrieval_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> { - self.contracts[0].publish_server_key_retrieval_error(origin, server_key_id) - } + fn publish_server_key_retrieval_error( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + ) -> Result<(), String> { + self.contracts[0].publish_server_key_retrieval_error(origin, server_key_id) + } - fn publish_stored_document_key(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> { - self.contracts[0].publish_stored_document_key(origin, server_key_id) - } + fn publish_stored_document_key( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + ) -> Result<(), String> { + self.contracts[0].publish_stored_document_key(origin, server_key_id) + } - fn publish_document_key_store_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> { - self.contracts[0].publish_document_key_store_error(origin, server_key_id) - } + fn publish_document_key_store_error( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + ) -> Result<(), String> { + self.contracts[0].publish_document_key_store_error(origin, server_key_id) + } - fn publish_retrieved_document_key_common(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address, common_point: Public, threshold: usize) -> Result<(), String> { - self.contracts[0].publish_retrieved_document_key_common(origin, server_key_id, requester, common_point, threshold) - } + fn publish_retrieved_document_key_common( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + requester: &Address, + common_point: Public, + threshold: usize, + ) -> Result<(), String> { + self.contracts[0].publish_retrieved_document_key_common( + origin, + server_key_id, + requester, + common_point, + threshold, + ) + } - fn publish_retrieved_document_key_personal(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address, participants: &[Address], decrypted_secret: Public, shadow: Bytes) -> Result<(), String> { - self.contracts[0].publish_retrieved_document_key_personal(origin, server_key_id, requester, participants, decrypted_secret, shadow) - } + fn publish_retrieved_document_key_personal( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + requester: &Address, + participants: &[Address], + decrypted_secret: Public, + shadow: Bytes, + ) -> Result<(), String> { + self.contracts[0].publish_retrieved_document_key_personal( + origin, + server_key_id, + requester, + participants, + decrypted_secret, + shadow, + ) + } - fn publish_document_key_retrieval_error(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address) -> Result<(), String> { - self.contracts[0].publish_document_key_retrieval_error(origin, server_key_id, requester) - } + fn publish_document_key_retrieval_error( + &self, + origin: &Address, + server_key_id: &ServerKeyId, + requester: &Address, + ) -> Result<(), String> { + self.contracts[0].publish_document_key_retrieval_error(origin, server_key_id, requester) + } } diff --git a/secret-store/src/listener/service_contract_listener.rs b/secret-store/src/listener/service_contract_listener.rs index 61dcda176..9dd647f01 100644 --- a/secret-store/src/listener/service_contract_listener.rs +++ b/secret-store/src/listener/service_contract_listener.rs @@ -14,28 +14,36 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::HashSet; -use std::sync::Arc; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::thread; -use parking_lot::Mutex; -use ethcore::client::{ChainNotify, NewBlocks}; -use ethkey::{Public, public_to_address}; -use bytes::Bytes; -use ethereum_types::{H256, U256, Address}; -use key_server_set::KeyServerSet; -use key_server_cluster::{NodeId, ClusterClient, ClusterSessionsListener, ClusterSession}; -use key_server_cluster::math; -use key_server_cluster::generation_session::SessionImpl as GenerationSession; -use key_server_cluster::encryption_session::{check_encrypted_data, update_encrypted_data}; -use key_server_cluster::decryption_session::SessionImpl as DecryptionSession; -use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession, - IsolatedSessionTransport as KeyVersionNegotiationTransport, FailedContinueAction}; -use key_storage::KeyStorage; use acl_storage::AclStorage; -use listener::service_contract::ServiceContract; -use listener::tasks_queue::TasksQueue; -use {ServerKeyId, NodeKeyPair, Error}; +use bytes::Bytes; +use ethcore::client::{ChainNotify, NewBlocks}; +use ethereum_types::{Address, H256, U256}; +use ethkey::{public_to_address, Public}; +use key_server_cluster::{ + decryption_session::SessionImpl as DecryptionSession, + encryption_session::{check_encrypted_data, update_encrypted_data}, + generation_session::SessionImpl as GenerationSession, + key_version_negotiation_session::{ + FailedContinueAction, IsolatedSessionTransport as KeyVersionNegotiationTransport, + SessionImpl as KeyVersionNegotiationSession, + }, + math, ClusterClient, ClusterSession, ClusterSessionsListener, NodeId, +}; +use key_server_set::KeyServerSet; +use key_storage::KeyStorage; +use listener::{service_contract::ServiceContract, tasks_queue::TasksQueue}; +use parking_lot::Mutex; +use std::{ + collections::HashSet, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + thread, +}; +use Error; +use NodeKeyPair; +use ServerKeyId; /// Retry interval (in blocks). Every RETRY_INTERVAL_BLOCKS blocks each KeyServer reads pending requests from /// service contract && tries to re-execute. The reason to have this mechanism is primarily because keys @@ -52,177 +60,235 @@ const MAX_FAILED_RETRY_REQUESTS: usize = 1; /// 2. redirecting requests to key server /// 3. publishing response on SecretStore contract pub struct ServiceContractListener { - /// Service contract listener data. - data: Arc, - /// Service thread handle. - service_handle: Option>, + /// Service contract listener data. + data: Arc, + /// Service thread handle. + service_handle: Option>, } /// Service contract listener parameters. pub struct ServiceContractListenerParams { - /// Service contract. - pub contract: Arc, - /// This node key pair. - pub self_key_pair: Arc, - /// Key servers set. - pub key_server_set: Arc, - /// ACL storage reference. - pub acl_storage: Arc, - /// Cluster reference. - pub cluster: Arc, - /// Key storage reference. - pub key_storage: Arc, + /// Service contract. + pub contract: Arc, + /// This node key pair. + pub self_key_pair: Arc, + /// Key servers set. + pub key_server_set: Arc, + /// ACL storage reference. + pub acl_storage: Arc, + /// Cluster reference. + pub cluster: Arc, + /// Key storage reference. + pub key_storage: Arc, } /// Service contract listener data. struct ServiceContractListenerData { - /// Blocks since last retry. - pub last_retry: AtomicUsize, - /// Retry-related data. - pub retry_data: Mutex, - /// Service tasks queue. - pub tasks_queue: Arc>, - /// Service contract. - pub contract: Arc, - /// ACL storage reference. - pub acl_storage: Arc, - /// Cluster client reference. - pub cluster: Arc, - /// This node key pair. - pub self_key_pair: Arc, - /// Key servers set. - pub key_server_set: Arc, - /// Key storage reference. - pub key_storage: Arc, - + /// Blocks since last retry. + pub last_retry: AtomicUsize, + /// Retry-related data. + pub retry_data: Mutex, + /// Service tasks queue. + pub tasks_queue: Arc>, + /// Service contract. + pub contract: Arc, + /// ACL storage reference. + pub acl_storage: Arc, + /// Cluster client reference. + pub cluster: Arc, + /// This node key pair. + pub self_key_pair: Arc, + /// Key servers set. + pub key_server_set: Arc, + /// Key storage reference. + pub key_storage: Arc, } /// Retry-related data. #[derive(Default)] struct ServiceContractRetryData { - /// Server keys, which we have 'touched' since last retry. - pub affected_server_keys: HashSet, - /// Document keys + requesters, which we have 'touched' since last retry. - pub affected_document_keys: HashSet<(ServerKeyId, Address)>, + /// Server keys, which we have 'touched' since last retry. + pub affected_server_keys: HashSet, + /// Document keys + requesters, which we have 'touched' since last retry. + pub affected_document_keys: HashSet<(ServerKeyId, Address)>, } /// Service task. #[derive(Debug, Clone, PartialEq)] pub enum ServiceTask { - /// Retry all 'stalled' tasks. - Retry, - /// Generate server key (origin, server_key_id, author, threshold). - GenerateServerKey(Address, ServerKeyId, Address, usize), - /// Retrieve server key (origin, server_key_id). - RetrieveServerKey(Address, ServerKeyId), - /// Store document key (origin, server_key_id, author, common_point, encrypted_point). - StoreDocumentKey(Address, ServerKeyId, Address, Public, Public), - /// Retrieve common data of document key (origin, server_key_id, requester). - RetrieveShadowDocumentKeyCommon(Address, ServerKeyId, Address), - /// Retrieve personal data of document key (origin, server_key_id, requester). - RetrieveShadowDocumentKeyPersonal(Address, ServerKeyId, Public), - /// Shutdown listener. - Shutdown, + /// Retry all 'stalled' tasks. + Retry, + /// Generate server key (origin, server_key_id, author, threshold). + GenerateServerKey(Address, ServerKeyId, Address, usize), + /// Retrieve server key (origin, server_key_id). + RetrieveServerKey(Address, ServerKeyId), + /// Store document key (origin, server_key_id, author, common_point, encrypted_point). + StoreDocumentKey(Address, ServerKeyId, Address, Public, Public), + /// Retrieve common data of document key (origin, server_key_id, requester). + RetrieveShadowDocumentKeyCommon(Address, ServerKeyId, Address), + /// Retrieve personal data of document key (origin, server_key_id, requester). + RetrieveShadowDocumentKeyPersonal(Address, ServerKeyId, Public), + /// Shutdown listener. + Shutdown, } impl ServiceContractListener { - /// Create new service contract listener. - pub fn new(params: ServiceContractListenerParams) -> Result, Error> { - let data = Arc::new(ServiceContractListenerData { - last_retry: AtomicUsize::new(0), - retry_data: Default::default(), - tasks_queue: Arc::new(TasksQueue::new()), - contract: params.contract, - acl_storage: params.acl_storage, - cluster: params.cluster, - self_key_pair: params.self_key_pair, - key_server_set: params.key_server_set, - key_storage: params.key_storage, - }); + /// Create new service contract listener. + pub fn new( + params: ServiceContractListenerParams, + ) -> Result, Error> { + let data = Arc::new(ServiceContractListenerData { + last_retry: AtomicUsize::new(0), + retry_data: Default::default(), + tasks_queue: Arc::new(TasksQueue::new()), + contract: params.contract, + acl_storage: params.acl_storage, + cluster: params.cluster, + self_key_pair: params.self_key_pair, + key_server_set: params.key_server_set, + key_storage: params.key_storage, + }); - // we are not starting thread when in test mode - let service_handle = if cfg!(test) { - None - } else { - let service_thread_data = data.clone(); - Some(thread::Builder::new().name("ServiceContractListener".into()).spawn(move || - Self::run_service_thread(service_thread_data)).map_err(|e| Error::Internal(format!("{}", e)))?) - }; - let contract = Arc::new(ServiceContractListener { - data: data, - service_handle: service_handle, - }); - contract.data.cluster.add_generation_listener(contract.clone()); - contract.data.cluster.add_decryption_listener(contract.clone()); - contract.data.cluster.add_key_version_negotiation_listener(contract.clone()); - Ok(contract) - } + // we are not starting thread when in test mode + let service_handle = if cfg!(test) { + None + } else { + let service_thread_data = data.clone(); + Some( + thread::Builder::new() + .name("ServiceContractListener".into()) + .spawn(move || Self::run_service_thread(service_thread_data)) + .map_err(|e| Error::Internal(format!("{}", e)))?, + ) + }; + let contract = Arc::new(ServiceContractListener { + data: data, + service_handle: service_handle, + }); + contract + .data + .cluster + .add_generation_listener(contract.clone()); + contract + .data + .cluster + .add_decryption_listener(contract.clone()); + contract + .data + .cluster + .add_key_version_negotiation_listener(contract.clone()); + Ok(contract) + } - /// Process incoming events of service contract. - fn process_service_contract_events(&self) { - // shortcut: do not process events if we're isolated from the cluster - if self.data.key_server_set.is_isolated() { - return; - } + /// Process incoming events of service contract. + fn process_service_contract_events(&self) { + // shortcut: do not process events if we're isolated from the cluster + if self.data.key_server_set.is_isolated() { + return; + } - self.data.tasks_queue.push_many(self.data.contract.read_logs() - .filter_map(|task| Self::filter_task(&self.data, task))); - } + self.data.tasks_queue.push_many( + self.data + .contract + .read_logs() + .filter_map(|task| Self::filter_task(&self.data, task)), + ); + } - /// Filter service task. Only returns Some if task must be executed by this server. - fn filter_task(data: &Arc, task: ServiceTask) -> Option { - match task { - // when this node should be master of this server key generation session - ServiceTask::GenerateServerKey(origin, server_key_id, author, threshold) if is_processed_by_this_key_server( - &*data.key_server_set, data.self_key_pair.public(), &server_key_id) => - Some(ServiceTask::GenerateServerKey(origin, server_key_id, author, threshold)), - // when server key is not yet generated and generation must be initiated by other node - ServiceTask::GenerateServerKey(_, _, _, _) => None, + /// Filter service task. Only returns Some if task must be executed by this server. + fn filter_task( + data: &Arc, + task: ServiceTask, + ) -> Option { + match task { + // when this node should be master of this server key generation session + ServiceTask::GenerateServerKey(origin, server_key_id, author, threshold) + if is_processed_by_this_key_server( + &*data.key_server_set, + data.self_key_pair.public(), + &server_key_id, + ) => + { + Some(ServiceTask::GenerateServerKey( + origin, + server_key_id, + author, + threshold, + )) + } + // when server key is not yet generated and generation must be initiated by other node + ServiceTask::GenerateServerKey(_, _, _, _) => None, - // when server key retrieval is requested - ServiceTask::RetrieveServerKey(origin, server_key_id) => - Some(ServiceTask::RetrieveServerKey(origin, server_key_id)), + // when server key retrieval is requested + ServiceTask::RetrieveServerKey(origin, server_key_id) => { + Some(ServiceTask::RetrieveServerKey(origin, server_key_id)) + } - // when document key store is requested - ServiceTask::StoreDocumentKey(origin, server_key_id, author, common_point, encrypted_point) => - Some(ServiceTask::StoreDocumentKey(origin, server_key_id, author, common_point, encrypted_point)), + // when document key store is requested + ServiceTask::StoreDocumentKey( + origin, + server_key_id, + author, + common_point, + encrypted_point, + ) => Some(ServiceTask::StoreDocumentKey( + origin, + server_key_id, + author, + common_point, + encrypted_point, + )), - // when common document key data retrieval is requested - ServiceTask::RetrieveShadowDocumentKeyCommon(origin, server_key_id, requester) => - Some(ServiceTask::RetrieveShadowDocumentKeyCommon(origin, server_key_id, requester)), + // when common document key data retrieval is requested + ServiceTask::RetrieveShadowDocumentKeyCommon(origin, server_key_id, requester) => Some( + ServiceTask::RetrieveShadowDocumentKeyCommon(origin, server_key_id, requester), + ), - // when this node should be master of this document key decryption session - ServiceTask::RetrieveShadowDocumentKeyPersonal(origin, server_key_id, requester) if is_processed_by_this_key_server( - &*data.key_server_set, data.self_key_pair.public(), &server_key_id) => - Some(ServiceTask::RetrieveShadowDocumentKeyPersonal(origin, server_key_id, requester)), - // when server key is not yet generated and generation must be initiated by other node - ServiceTask::RetrieveShadowDocumentKeyPersonal(_, _, _) => None, + // when this node should be master of this document key decryption session + ServiceTask::RetrieveShadowDocumentKeyPersonal(origin, server_key_id, requester) + if is_processed_by_this_key_server( + &*data.key_server_set, + data.self_key_pair.public(), + &server_key_id, + ) => + { + Some(ServiceTask::RetrieveShadowDocumentKeyPersonal( + origin, + server_key_id, + requester, + )) + } + // when server key is not yet generated and generation must be initiated by other node + ServiceTask::RetrieveShadowDocumentKeyPersonal(_, _, _) => None, - ServiceTask::Retry | ServiceTask::Shutdown => unreachable!("must be filtered outside"), - } - } + ServiceTask::Retry | ServiceTask::Shutdown => unreachable!("must be filtered outside"), + } + } - /// Service thread procedure. - fn run_service_thread(data: Arc) { - loop { - let task = data.tasks_queue.wait(); - trace!(target: "secretstore", "{}: processing {:?} task", data.self_key_pair.public(), task); + /// Service thread procedure. + fn run_service_thread(data: Arc) { + loop { + let task = data.tasks_queue.wait(); + trace!(target: "secretstore", "{}: processing {:?} task", data.self_key_pair.public(), task); - match task { - ServiceTask::Shutdown => break, - task => { - // the only possible reaction to an error is a tx+trace && it is already happened - let _ = Self::process_service_task(&data, task); - }, - }; - } + match task { + ServiceTask::Shutdown => break, + task => { + // the only possible reaction to an error is a tx+trace && it is already happened + let _ = Self::process_service_task(&data, task); + } + }; + } - trace!(target: "secretstore", "{}: ServiceContractListener thread stopped", data.self_key_pair.public()); - } + trace!(target: "secretstore", "{}: ServiceContractListener thread stopped", data.self_key_pair.public()); + } - /// Process single service task. - fn process_service_task(data: &Arc, task: ServiceTask) -> Result<(), String> { - match &task { + /// Process single service task. + fn process_service_task( + data: &Arc, + task: ServiceTask, + ) -> Result<(), String> { + match &task { &ServiceTask::GenerateServerKey(origin, server_key_id, author, threshold) => { data.retry_data.lock().affected_server_keys.insert(server_key_id.clone()); log_service_task_result(&task, data.self_key_pair.public(), @@ -265,354 +331,558 @@ impl ServiceContractListener { }, &ServiceTask::Shutdown => unreachable!("must be filtered outside"), } - } + } - /// Retry processing pending requests. - fn retry_pending_requests(data: &Arc) -> Result { - let mut failed_requests = 0; - let mut processed_requests = 0; - let retry_data = ::std::mem::replace(&mut *data.retry_data.lock(), Default::default()); - let pending_tasks = data.contract.read_pending_requests() - .filter_map(|(is_confirmed, task)| Self::filter_task(data, task) - .map(|t| (is_confirmed, t))); - for (is_response_required, task) in pending_tasks { - // only process requests, which we haven't confirmed yet - if !is_response_required { - continue; - } + /// Retry processing pending requests. + fn retry_pending_requests(data: &Arc) -> Result { + let mut failed_requests = 0; + let mut processed_requests = 0; + let retry_data = ::std::mem::replace(&mut *data.retry_data.lock(), Default::default()); + let pending_tasks = + data.contract + .read_pending_requests() + .filter_map(|(is_confirmed, task)| { + Self::filter_task(data, task).map(|t| (is_confirmed, t)) + }); + for (is_response_required, task) in pending_tasks { + // only process requests, which we haven't confirmed yet + if !is_response_required { + continue; + } - match task { - ServiceTask::GenerateServerKey(_, ref key, _, _) | ServiceTask::RetrieveServerKey(_, ref key) - if retry_data.affected_server_keys.contains(key) => continue, - ServiceTask::StoreDocumentKey(_, ref key, ref author, _, _) | - ServiceTask::RetrieveShadowDocumentKeyCommon(_, ref key, ref author) - if retry_data.affected_document_keys.contains(&(key.clone(), author.clone())) => continue, - ServiceTask::RetrieveShadowDocumentKeyPersonal(_, ref key, ref requester) - if retry_data.affected_document_keys.contains(&(key.clone(), public_to_address(requester))) => continue, - _ => (), - } + match task { + ServiceTask::GenerateServerKey(_, ref key, _, _) + | ServiceTask::RetrieveServerKey(_, ref key) + if retry_data.affected_server_keys.contains(key) => + { + continue + } + ServiceTask::StoreDocumentKey(_, ref key, ref author, _, _) + | ServiceTask::RetrieveShadowDocumentKeyCommon(_, ref key, ref author) + if retry_data + .affected_document_keys + .contains(&(key.clone(), author.clone())) => + { + continue + } + ServiceTask::RetrieveShadowDocumentKeyPersonal(_, ref key, ref requester) + if retry_data + .affected_document_keys + .contains(&(key.clone(), public_to_address(requester))) => + { + continue + } + _ => (), + } - // process request result - let request_result = Self::process_service_task(data, task); - match request_result { - Ok(_) => processed_requests += 1, - Err(_) => { - failed_requests += 1; - if failed_requests > MAX_FAILED_RETRY_REQUESTS { - return Err("too many failed requests".into()); - } - }, - } - } + // process request result + let request_result = Self::process_service_task(data, task); + match request_result { + Ok(_) => processed_requests += 1, + Err(_) => { + failed_requests += 1; + if failed_requests > MAX_FAILED_RETRY_REQUESTS { + return Err("too many failed requests".into()); + } + } + } + } - Ok(processed_requests) - } + Ok(processed_requests) + } - /// Generate server key (start generation session). - fn generate_server_key(data: &Arc, origin: Address, server_key_id: &ServerKeyId, author: Address, threshold: usize) -> Result<(), String> { - Self::process_server_key_generation_result(data, origin, server_key_id, data.cluster.new_generation_session( - server_key_id.clone(), Some(origin), author, threshold).map(|_| None).map_err(Into::into)) - } + /// Generate server key (start generation session). + fn generate_server_key( + data: &Arc, + origin: Address, + server_key_id: &ServerKeyId, + author: Address, + threshold: usize, + ) -> Result<(), String> { + Self::process_server_key_generation_result( + data, + origin, + server_key_id, + data.cluster + .new_generation_session(server_key_id.clone(), Some(origin), author, threshold) + .map(|_| None) + .map_err(Into::into), + ) + } - /// Process server key generation result. - fn process_server_key_generation_result(data: &Arc, origin: Address, server_key_id: &ServerKeyId, result: Result, Error>) -> Result<(), String> { - match result { - Ok(None) => Ok(()), - Ok(Some(server_key)) => { - data.contract.publish_generated_server_key(&origin, server_key_id, server_key) - }, - Err(ref error) if error.is_non_fatal() => Err(format!("{}", error)), - Err(ref error) => { - // ignore error as we're already processing an error - let _ = data.contract.publish_server_key_generation_error(&origin, server_key_id) + /// Process server key generation result. + fn process_server_key_generation_result( + data: &Arc, + origin: Address, + server_key_id: &ServerKeyId, + result: Result, Error>, + ) -> Result<(), String> { + match result { + Ok(None) => Ok(()), + Ok(Some(server_key)) => { + data.contract + .publish_generated_server_key(&origin, server_key_id, server_key) + } + Err(ref error) if error.is_non_fatal() => Err(format!("{}", error)), + Err(ref error) => { + // ignore error as we're already processing an error + let _ = data.contract.publish_server_key_generation_error(&origin, server_key_id) .map_err(|error| warn!(target: "secretstore", "{}: failed to publish GenerateServerKey({}) error: {}", data.self_key_pair.public(), server_key_id, error)); - Err(format!("{}", error)) - } - } - } + Err(format!("{}", error)) + } + } + } - /// Retrieve server key. - fn retrieve_server_key(data: &Arc, origin: Address, server_key_id: &ServerKeyId) -> Result<(), String> { - match data.key_storage.get(server_key_id) { - Ok(Some(server_key_share)) => { - data.contract.publish_retrieved_server_key(&origin, server_key_id, server_key_share.public, server_key_share.threshold) - }, - Ok(None) => { - data.contract.publish_server_key_retrieval_error(&origin, server_key_id) - } - Err(ref error) if error.is_non_fatal() => Err(format!("{}", error)), - Err(ref error) => { - // ignore error as we're already processing an error - let _ = data.contract.publish_server_key_retrieval_error(&origin, server_key_id) + /// Retrieve server key. + fn retrieve_server_key( + data: &Arc, + origin: Address, + server_key_id: &ServerKeyId, + ) -> Result<(), String> { + match data.key_storage.get(server_key_id) { + Ok(Some(server_key_share)) => data.contract.publish_retrieved_server_key( + &origin, + server_key_id, + server_key_share.public, + server_key_share.threshold, + ), + Ok(None) => data + .contract + .publish_server_key_retrieval_error(&origin, server_key_id), + Err(ref error) if error.is_non_fatal() => Err(format!("{}", error)), + Err(ref error) => { + // ignore error as we're already processing an error + let _ = data.contract.publish_server_key_retrieval_error(&origin, server_key_id) .map_err(|error| warn!(target: "secretstore", "{}: failed to publish RetrieveServerKey({}) error: {}", data.self_key_pair.public(), server_key_id, error)); - Err(format!("{}", error)) - } - } - } + Err(format!("{}", error)) + } + } + } - /// Store document key. - fn store_document_key(data: &Arc, origin: Address, server_key_id: &ServerKeyId, author: &Address, common_point: &Public, encrypted_point: &Public) -> Result<(), String> { - let store_result = data.key_storage.get(server_key_id) - .and_then(|key_share| key_share.ok_or(Error::ServerKeyIsNotFound)) - .and_then(|key_share| check_encrypted_data(Some(&key_share)).map(|_| key_share).map_err(Into::into)) - .and_then(|key_share| update_encrypted_data(&data.key_storage, server_key_id.clone(), key_share, - author.clone(), common_point.clone(), encrypted_point.clone()).map_err(Into::into)); - match store_result { - Ok(()) => { - data.contract.publish_stored_document_key(&origin, server_key_id) - }, - Err(ref error) if error.is_non_fatal() => Err(format!("{}", error)), - Err(ref error) => { - // ignore error as we're already processing an error - let _ = data.contract.publish_document_key_store_error(&origin, server_key_id) + /// Store document key. + fn store_document_key( + data: &Arc, + origin: Address, + server_key_id: &ServerKeyId, + author: &Address, + common_point: &Public, + encrypted_point: &Public, + ) -> Result<(), String> { + let store_result = data + .key_storage + .get(server_key_id) + .and_then(|key_share| key_share.ok_or(Error::ServerKeyIsNotFound)) + .and_then(|key_share| { + check_encrypted_data(Some(&key_share)) + .map(|_| key_share) + .map_err(Into::into) + }) + .and_then(|key_share| { + update_encrypted_data( + &data.key_storage, + server_key_id.clone(), + key_share, + author.clone(), + common_point.clone(), + encrypted_point.clone(), + ) + .map_err(Into::into) + }); + match store_result { + Ok(()) => data + .contract + .publish_stored_document_key(&origin, server_key_id), + Err(ref error) if error.is_non_fatal() => Err(format!("{}", error)), + Err(ref error) => { + // ignore error as we're already processing an error + let _ = data.contract.publish_document_key_store_error(&origin, server_key_id) .map_err(|error| warn!(target: "secretstore", "{}: failed to publish StoreDocumentKey({}) error: {}", data.self_key_pair.public(), server_key_id, error)); - Err(format!("{}", error)) - }, - } - } + Err(format!("{}", error)) + } + } + } - /// Retrieve common part of document key. - fn retrieve_document_key_common(data: &Arc, origin: Address, server_key_id: &ServerKeyId, requester: &Address) -> Result<(), String> { - let retrieval_result = data.acl_storage.check(requester.clone(), server_key_id) - .and_then(|is_allowed| if !is_allowed { Err(Error::AccessDenied) } else { Ok(()) }) - .and_then(|_| data.key_storage.get(server_key_id).and_then(|key_share| key_share.ok_or(Error::ServerKeyIsNotFound))) - .and_then(|key_share| key_share.common_point - .ok_or(Error::DocumentKeyIsNotFound) - .and_then(|common_point| math::make_common_shadow_point(key_share.threshold, common_point)) - .map(|common_point| (common_point, key_share.threshold))); - match retrieval_result { - Ok((common_point, threshold)) => { - data.contract.publish_retrieved_document_key_common(&origin, server_key_id, requester, common_point, threshold) - }, - Err(ref error) if error.is_non_fatal() => Err(format!("{}", error)), - Err(ref error) => { - // ignore error as we're already processing an error - let _ = data.contract.publish_document_key_retrieval_error(&origin, server_key_id, requester) + /// Retrieve common part of document key. + fn retrieve_document_key_common( + data: &Arc, + origin: Address, + server_key_id: &ServerKeyId, + requester: &Address, + ) -> Result<(), String> { + let retrieval_result = data + .acl_storage + .check(requester.clone(), server_key_id) + .and_then(|is_allowed| { + if !is_allowed { + Err(Error::AccessDenied) + } else { + Ok(()) + } + }) + .and_then(|_| { + data.key_storage + .get(server_key_id) + .and_then(|key_share| key_share.ok_or(Error::ServerKeyIsNotFound)) + }) + .and_then(|key_share| { + key_share + .common_point + .ok_or(Error::DocumentKeyIsNotFound) + .and_then(|common_point| { + math::make_common_shadow_point(key_share.threshold, common_point) + }) + .map(|common_point| (common_point, key_share.threshold)) + }); + match retrieval_result { + Ok((common_point, threshold)) => data.contract.publish_retrieved_document_key_common( + &origin, + server_key_id, + requester, + common_point, + threshold, + ), + Err(ref error) if error.is_non_fatal() => Err(format!("{}", error)), + Err(ref error) => { + // ignore error as we're already processing an error + let _ = data.contract.publish_document_key_retrieval_error(&origin, server_key_id, requester) .map_err(|error| warn!(target: "secretstore", "{}: failed to publish RetrieveDocumentKey({}) error: {}", data.self_key_pair.public(), server_key_id, error)); - Err(format!("{}", error)) - }, - } - } + Err(format!("{}", error)) + } + } + } - /// Retrieve personal part of document key (start decryption session). - fn retrieve_document_key_personal(data: &Arc, origin: Address, server_key_id: &ServerKeyId, requester: Public) -> Result<(), String> { - Self::process_document_key_retrieval_result(data, origin, server_key_id, &public_to_address(&requester), data.cluster.new_decryption_session( - server_key_id.clone(), Some(origin), requester.clone().into(), None, true, true).map(|_| None).map_err(Into::into)) - } + /// Retrieve personal part of document key (start decryption session). + fn retrieve_document_key_personal( + data: &Arc, + origin: Address, + server_key_id: &ServerKeyId, + requester: Public, + ) -> Result<(), String> { + Self::process_document_key_retrieval_result( + data, + origin, + server_key_id, + &public_to_address(&requester), + data.cluster + .new_decryption_session( + server_key_id.clone(), + Some(origin), + requester.clone().into(), + None, + true, + true, + ) + .map(|_| None) + .map_err(Into::into), + ) + } - /// Process document key retrieval result. - fn process_document_key_retrieval_result(data: &Arc, origin: Address, server_key_id: &ServerKeyId, requester: &Address, result: Result, Public, Bytes)>, Error>) -> Result<(), String> { - match result { - Ok(None) => Ok(()), - Ok(Some((participants, decrypted_secret, shadow))) => { - data.contract.publish_retrieved_document_key_personal(&origin, server_key_id, &requester, &participants, decrypted_secret, shadow) - }, - Err(ref error) if error.is_non_fatal() => Err(format!("{}", error)), - Err(ref error) => { - // ignore error as we're already processing an error - let _ = data.contract.publish_document_key_retrieval_error(&origin, server_key_id, &requester) + /// Process document key retrieval result. + fn process_document_key_retrieval_result( + data: &Arc, + origin: Address, + server_key_id: &ServerKeyId, + requester: &Address, + result: Result, Public, Bytes)>, Error>, + ) -> Result<(), String> { + match result { + Ok(None) => Ok(()), + Ok(Some((participants, decrypted_secret, shadow))) => { + data.contract.publish_retrieved_document_key_personal( + &origin, + server_key_id, + &requester, + &participants, + decrypted_secret, + shadow, + ) + } + Err(ref error) if error.is_non_fatal() => Err(format!("{}", error)), + Err(ref error) => { + // ignore error as we're already processing an error + let _ = data.contract.publish_document_key_retrieval_error(&origin, server_key_id, &requester) .map_err(|error| warn!(target: "secretstore", "{}: failed to publish RetrieveDocumentKey({}) error: {}", data.self_key_pair.public(), server_key_id, error)); - Err(format!("{}", error)) - } - } - } + Err(format!("{}", error)) + } + } + } } impl Drop for ServiceContractListener { - fn drop(&mut self) { - if let Some(service_handle) = self.service_handle.take() { - self.data.tasks_queue.push_front(ServiceTask::Shutdown); - // ignore error as we are already closing - let _ = service_handle.join(); - } - } + fn drop(&mut self) { + if let Some(service_handle) = self.service_handle.take() { + self.data.tasks_queue.push_front(ServiceTask::Shutdown); + // ignore error as we are already closing + let _ = service_handle.join(); + } + } } impl ChainNotify for ServiceContractListener { - fn new_blocks(&self, new_blocks: NewBlocks) { - if new_blocks.has_more_blocks_to_import { return } - let enacted_len = new_blocks.route.enacted().len(); - if enacted_len == 0 && new_blocks.route.retracted().is_empty() { - return; - } + fn new_blocks(&self, new_blocks: NewBlocks) { + if new_blocks.has_more_blocks_to_import { + return; + } + let enacted_len = new_blocks.route.enacted().len(); + if enacted_len == 0 && new_blocks.route.retracted().is_empty() { + return; + } - if !self.data.contract.update() { - return; - } + if !self.data.contract.update() { + return; + } - self.process_service_contract_events(); + self.process_service_contract_events(); - // schedule retry if received enough blocks since last retry - // it maybe inaccurate when switching syncing/synced states, but that's ok - if self.data.last_retry.fetch_add(enacted_len, Ordering::Relaxed) >= RETRY_INTERVAL_BLOCKS { - // shortcut: do not retry if we're isolated from the cluster - if !self.data.key_server_set.is_isolated() { - self.data.tasks_queue.push(ServiceTask::Retry); - self.data.last_retry.store(0, Ordering::Relaxed); - } - } - } + // schedule retry if received enough blocks since last retry + // it maybe inaccurate when switching syncing/synced states, but that's ok + if self + .data + .last_retry + .fetch_add(enacted_len, Ordering::Relaxed) + >= RETRY_INTERVAL_BLOCKS + { + // shortcut: do not retry if we're isolated from the cluster + if !self.data.key_server_set.is_isolated() { + self.data.tasks_queue.push(ServiceTask::Retry); + self.data.last_retry.store(0, Ordering::Relaxed); + } + } + } } impl ClusterSessionsListener for ServiceContractListener { - fn on_session_removed(&self, session: Arc) { - // by this time sesion must already be completed - either successfully, or not - assert!(session.is_finished()); + fn on_session_removed(&self, session: Arc) { + // by this time sesion must already be completed - either successfully, or not + assert!(session.is_finished()); - // ignore result - the only thing that we can do is to log the error - let server_key_id = session.id(); - if let Some(origin) = session.origin() { - if let Some(generation_result) = session.wait(Some(Default::default())) { - let generation_result = generation_result.map(Some).map_err(Into::into); - let _ = Self::process_server_key_generation_result(&self.data, origin, &server_key_id, generation_result); - } - } - } + // ignore result - the only thing that we can do is to log the error + let server_key_id = session.id(); + if let Some(origin) = session.origin() { + if let Some(generation_result) = session.wait(Some(Default::default())) { + let generation_result = generation_result.map(Some).map_err(Into::into); + let _ = Self::process_server_key_generation_result( + &self.data, + origin, + &server_key_id, + generation_result, + ); + } + } + } } impl ClusterSessionsListener for ServiceContractListener { - fn on_session_removed(&self, session: Arc) { - // by this time sesion must already be completed - either successfully, or not - assert!(session.is_finished()); + fn on_session_removed(&self, session: Arc) { + // by this time sesion must already be completed - either successfully, or not + assert!(session.is_finished()); - // ignore result - the only thing that we can do is to log the error - let session_id = session.id(); - let server_key_id = session_id.id; - if let (Some(requester), Some(origin)) = (session.requester().and_then(|r| r.address(&server_key_id).ok()), session.origin()) { - if let Some(retrieval_result) = session.wait(Some(Default::default())) { - let retrieval_result = retrieval_result.map(|key_shadow| - session.broadcast_shadows() - .and_then(|broadcast_shadows| - broadcast_shadows.get(self.data.self_key_pair.public()) - .map(|self_shadow| ( - broadcast_shadows.keys().map(public_to_address).collect(), - key_shadow.decrypted_secret, - self_shadow.clone() - ))) - ).map_err(Into::into); - let _ = Self::process_document_key_retrieval_result(&self.data, origin, &server_key_id, &requester, retrieval_result); - } - } - } + // ignore result - the only thing that we can do is to log the error + let session_id = session.id(); + let server_key_id = session_id.id; + if let (Some(requester), Some(origin)) = ( + session + .requester() + .and_then(|r| r.address(&server_key_id).ok()), + session.origin(), + ) { + if let Some(retrieval_result) = session.wait(Some(Default::default())) { + let retrieval_result = retrieval_result + .map(|key_shadow| { + session.broadcast_shadows().and_then(|broadcast_shadows| { + broadcast_shadows.get(self.data.self_key_pair.public()).map( + |self_shadow| { + ( + broadcast_shadows.keys().map(public_to_address).collect(), + key_shadow.decrypted_secret, + self_shadow.clone(), + ) + }, + ) + }) + }) + .map_err(Into::into); + let _ = Self::process_document_key_retrieval_result( + &self.data, + origin, + &server_key_id, + &requester, + retrieval_result, + ); + } + } + } } -impl ClusterSessionsListener> for ServiceContractListener { - fn on_session_removed(&self, session: Arc>) { - // by this time sesion must already be completed - either successfully, or not - assert!(session.is_finished()); +impl ClusterSessionsListener> + for ServiceContractListener +{ + fn on_session_removed( + &self, + session: Arc>, + ) { + // by this time sesion must already be completed - either successfully, or not + assert!(session.is_finished()); - // we're interested in: - // 1) sessions failed with fatal error - // 2) with decryption continue action - let error = match session.wait() { - Err(ref error) if !error.is_non_fatal() => error.clone(), - _ => return, - }; + // we're interested in: + // 1) sessions failed with fatal error + // 2) with decryption continue action + let error = match session.wait() { + Err(ref error) if !error.is_non_fatal() => error.clone(), + _ => return, + }; - let (origin, requester) = match session.take_failed_continue_action() { - Some(FailedContinueAction::Decrypt(Some(origin), requester)) => (origin, requester), - _ => return, - }; + let (origin, requester) = match session.take_failed_continue_action() { + Some(FailedContinueAction::Decrypt(Some(origin), requester)) => (origin, requester), + _ => return, + }; - // check if master node is responsible for processing key requests - let meta = session.meta(); - if !is_processed_by_this_key_server(&*self.data.key_server_set, &meta.master_node_id, &meta.id) { - return; - } + // check if master node is responsible for processing key requests + let meta = session.meta(); + if !is_processed_by_this_key_server( + &*self.data.key_server_set, + &meta.master_node_id, + &meta.id, + ) { + return; + } - // ignore result as we're already processing an error - let _ = Self::process_document_key_retrieval_result(&self.data, origin, &meta.id, &requester, Err(error)); - } + // ignore result as we're already processing an error + let _ = Self::process_document_key_retrieval_result( + &self.data, + origin, + &meta.id, + &requester, + Err(error), + ); + } } impl ::std::fmt::Display for ServiceTask { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - match *self { - ServiceTask::Retry => write!(f, "Retry"), - ServiceTask::GenerateServerKey(_, ref server_key_id, ref author, ref threshold) => - write!(f, "GenerateServerKey({}, {}, {})", server_key_id, author, threshold), - ServiceTask::RetrieveServerKey(_, ref server_key_id) => - write!(f, "RetrieveServerKey({})", server_key_id), - ServiceTask::StoreDocumentKey(_, ref server_key_id, ref author, _, _) => - write!(f, "StoreDocumentKey({}, {})", server_key_id, author), - ServiceTask::RetrieveShadowDocumentKeyCommon(_, ref server_key_id, ref requester) => - write!(f, "RetrieveShadowDocumentKeyCommon({}, {})", server_key_id, requester), - ServiceTask::RetrieveShadowDocumentKeyPersonal(_, ref server_key_id, ref requester) => - write!(f, "RetrieveShadowDocumentKeyPersonal({}, {})", server_key_id, public_to_address(requester)), - ServiceTask::Shutdown => write!(f, "Shutdown"), - } - } + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + match *self { + ServiceTask::Retry => write!(f, "Retry"), + ServiceTask::GenerateServerKey(_, ref server_key_id, ref author, ref threshold) => { + write!( + f, + "GenerateServerKey({}, {}, {})", + server_key_id, author, threshold + ) + } + ServiceTask::RetrieveServerKey(_, ref server_key_id) => { + write!(f, "RetrieveServerKey({})", server_key_id) + } + ServiceTask::StoreDocumentKey(_, ref server_key_id, ref author, _, _) => { + write!(f, "StoreDocumentKey({}, {})", server_key_id, author) + } + ServiceTask::RetrieveShadowDocumentKeyCommon(_, ref server_key_id, ref requester) => { + write!( + f, + "RetrieveShadowDocumentKeyCommon({}, {})", + server_key_id, requester + ) + } + ServiceTask::RetrieveShadowDocumentKeyPersonal(_, ref server_key_id, ref requester) => { + write!( + f, + "RetrieveShadowDocumentKeyPersonal({}, {})", + server_key_id, + public_to_address(requester) + ) + } + ServiceTask::Shutdown => write!(f, "Shutdown"), + } + } } /// Log service task result. -fn log_service_task_result(task: &ServiceTask, self_id: &Public, result: Result<(), String>) -> Result<(), String> { - match result { - Ok(_) => trace!(target: "secretstore", "{}: processed {} request", self_id, task), - Err(ref error) => warn!(target: "secretstore", "{}: failed to process {} request with: {}", self_id, task, error), - } +fn log_service_task_result( + task: &ServiceTask, + self_id: &Public, + result: Result<(), String>, +) -> Result<(), String> { + match result { + Ok(_) => trace!(target: "secretstore", "{}: processed {} request", self_id, task), + Err(ref error) => { + warn!(target: "secretstore", "{}: failed to process {} request with: {}", self_id, task, error) + } + } - result + result } /// Returns true when session, related to `server_key_id` must be started on `node`. -fn is_processed_by_this_key_server(key_server_set: &KeyServerSet, node: &NodeId, server_key_id: &H256) -> bool { - let servers = key_server_set.snapshot().current_set; - let total_servers_count = servers.len(); - match total_servers_count { - 0 => return false, - 1 => return true, - _ => (), - } +fn is_processed_by_this_key_server( + key_server_set: &KeyServerSet, + node: &NodeId, + server_key_id: &H256, +) -> bool { + let servers = key_server_set.snapshot().current_set; + let total_servers_count = servers.len(); + match total_servers_count { + 0 => return false, + 1 => return true, + _ => (), + } - let this_server_index = match servers.keys().enumerate().find(|&(_, s)| s == node) { - Some((index, _)) => index, - None => return false, - }; + let this_server_index = match servers.keys().enumerate().find(|&(_, s)| s == node) { + Some((index, _)) => index, + None => return false, + }; - let server_key_id_value: U256 = server_key_id.into(); - let range_interval = U256::max_value() / total_servers_count; - let range_begin = (range_interval + 1) * this_server_index as u32; - let range_end = range_begin.saturating_add(range_interval); + let server_key_id_value: U256 = server_key_id.into(); + let range_interval = U256::max_value() / total_servers_count; + let range_begin = (range_interval + 1) * this_server_index as u32; + let range_end = range_begin.saturating_add(range_interval); - server_key_id_value >= range_begin && server_key_id_value <= range_end + server_key_id_value >= range_begin && server_key_id_value <= range_end } #[cfg(test)] mod tests { - use std::sync::Arc; - use std::sync::atomic::Ordering; - use ethkey::{Random, Generator, KeyPair}; - use listener::service_contract::ServiceContract; - use listener::service_contract::tests::DummyServiceContract; - use key_server_cluster::DummyClusterClient; - use acl_storage::{AclStorage, DummyAclStorage}; - use key_storage::{KeyStorage, DocumentKeyShare}; - use key_storage::tests::DummyKeyStorage; - use key_server_set::KeyServerSet; - use key_server_set::tests::MapKeyServerSet; - use {NodeKeyPair, PlainNodeKeyPair, ServerKeyId}; - use super::{ServiceTask, ServiceContractListener, ServiceContractListenerParams, is_processed_by_this_key_server}; + use super::{ + is_processed_by_this_key_server, ServiceContractListener, ServiceContractListenerParams, + ServiceTask, + }; + use acl_storage::{AclStorage, DummyAclStorage}; + use ethkey::{Generator, KeyPair, Random}; + use key_server_cluster::DummyClusterClient; + use key_server_set::{tests::MapKeyServerSet, KeyServerSet}; + use key_storage::{tests::DummyKeyStorage, DocumentKeyShare, KeyStorage}; + use listener::service_contract::{tests::DummyServiceContract, ServiceContract}; + use std::sync::{atomic::Ordering, Arc}; + use NodeKeyPair; + use PlainNodeKeyPair; + use ServerKeyId; - fn create_non_empty_key_storage(has_doc_key: bool) -> Arc { - let key_storage = Arc::new(DummyKeyStorage::default()); - let mut key_share = DocumentKeyShare::default(); - key_share.public = KeyPair::from_secret("0000000000000000000000000000000000000000000000000000000000000001" - .parse().unwrap()).unwrap().public().clone(); - if has_doc_key { - key_share.common_point = Some(Default::default()); - key_share.encrypted_point = Some(Default::default()); - } - key_storage.insert(Default::default(), key_share.clone()).unwrap(); - key_storage - } + fn create_non_empty_key_storage(has_doc_key: bool) -> Arc { + let key_storage = Arc::new(DummyKeyStorage::default()); + let mut key_share = DocumentKeyShare::default(); + key_share.public = KeyPair::from_secret( + "0000000000000000000000000000000000000000000000000000000000000001" + .parse() + .unwrap(), + ) + .unwrap() + .public() + .clone(); + if has_doc_key { + key_share.common_point = Some(Default::default()); + key_share.encrypted_point = Some(Default::default()); + } + key_storage + .insert(Default::default(), key_share.clone()) + .unwrap(); + key_storage + } - fn make_servers_set(is_isolated: bool) -> Arc { - Arc::new(MapKeyServerSet::new(is_isolated, vec![ + fn make_servers_set(is_isolated: bool) -> Arc { + Arc::new(MapKeyServerSet::new(is_isolated, vec![ ("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8".parse().unwrap(), "127.0.0.1:8080".parse().unwrap()), ("c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee51ae168fea63dc339a3c58419466ceaeef7f632653266d0e1236431a950cfe52a".parse().unwrap(), @@ -620,58 +890,93 @@ mod tests { ("f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9388f7b0f632de8140fe337e62a37f3566500a99934c2231b6cb9fd7584b8e672".parse().unwrap(), "127.0.0.1:8080".parse().unwrap()), ].into_iter().collect())) - } + } - fn make_service_contract_listener(contract: Option>, cluster: Option>, key_storage: Option>, acl_storage: Option>, servers_set: Option>) -> Arc { - let contract = contract.unwrap_or_else(|| Arc::new(DummyServiceContract::default())); - let cluster = cluster.unwrap_or_else(|| Arc::new(DummyClusterClient::default())); - let key_storage = key_storage.unwrap_or_else(|| Arc::new(DummyKeyStorage::default())); - let acl_storage = acl_storage.unwrap_or_else(|| Arc::new(DummyAclStorage::default())); - let servers_set = servers_set.unwrap_or_else(|| make_servers_set(false)); - let self_key_pair = Arc::new(PlainNodeKeyPair::new(KeyPair::from_secret("0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap())); - ServiceContractListener::new(ServiceContractListenerParams { - contract: contract, - self_key_pair: self_key_pair, - key_server_set: servers_set, - acl_storage: acl_storage, - cluster: cluster, - key_storage: key_storage, - }).unwrap() - } + fn make_service_contract_listener( + contract: Option>, + cluster: Option>, + key_storage: Option>, + acl_storage: Option>, + servers_set: Option>, + ) -> Arc { + let contract = contract.unwrap_or_else(|| Arc::new(DummyServiceContract::default())); + let cluster = cluster.unwrap_or_else(|| Arc::new(DummyClusterClient::default())); + let key_storage = key_storage.unwrap_or_else(|| Arc::new(DummyKeyStorage::default())); + let acl_storage = acl_storage.unwrap_or_else(|| Arc::new(DummyAclStorage::default())); + let servers_set = servers_set.unwrap_or_else(|| make_servers_set(false)); + let self_key_pair = Arc::new(PlainNodeKeyPair::new( + KeyPair::from_secret( + "0000000000000000000000000000000000000000000000000000000000000001" + .parse() + .unwrap(), + ) + .unwrap(), + )); + ServiceContractListener::new(ServiceContractListenerParams { + contract: contract, + self_key_pair: self_key_pair, + key_server_set: servers_set, + acl_storage: acl_storage, + cluster: cluster, + key_storage: key_storage, + }) + .unwrap() + } - #[test] - fn is_not_processed_by_this_key_server_with_zero_servers() { - assert_eq!(is_processed_by_this_key_server( - &MapKeyServerSet::default(), - Random.generate().unwrap().public(), - &Default::default()), false); - } + #[test] + fn is_not_processed_by_this_key_server_with_zero_servers() { + assert_eq!( + is_processed_by_this_key_server( + &MapKeyServerSet::default(), + Random.generate().unwrap().public(), + &Default::default() + ), + false + ); + } - #[test] - fn is_processed_by_this_key_server_with_single_server() { - let self_key_pair = Random.generate().unwrap(); - assert_eq!(is_processed_by_this_key_server( - &MapKeyServerSet::new(false, vec![ - (self_key_pair.public().clone(), "127.0.0.1:8080".parse().unwrap()) - ].into_iter().collect()), - self_key_pair.public(), - &Default::default()), true); - } + #[test] + fn is_processed_by_this_key_server_with_single_server() { + let self_key_pair = Random.generate().unwrap(); + assert_eq!( + is_processed_by_this_key_server( + &MapKeyServerSet::new( + false, + vec![( + self_key_pair.public().clone(), + "127.0.0.1:8080".parse().unwrap() + )] + .into_iter() + .collect() + ), + self_key_pair.public(), + &Default::default() + ), + true + ); + } - #[test] - fn is_not_processed_by_this_key_server_when_not_a_part_of_servers_set() { - assert!(is_processed_by_this_key_server( - &MapKeyServerSet::new(false, vec![ - (Random.generate().unwrap().public().clone(), "127.0.0.1:8080".parse().unwrap()) - ].into_iter().collect()), - Random.generate().unwrap().public(), - &Default::default())); - } + #[test] + fn is_not_processed_by_this_key_server_when_not_a_part_of_servers_set() { + assert!(is_processed_by_this_key_server( + &MapKeyServerSet::new( + false, + vec![( + Random.generate().unwrap().public().clone(), + "127.0.0.1:8080".parse().unwrap() + )] + .into_iter() + .collect() + ), + Random.generate().unwrap().public(), + &Default::default() + )); + } - #[test] - fn is_processed_by_this_key_server_in_set_of_3() { - // servers set is ordered && server range depends on index of this server - let servers_set = MapKeyServerSet::new(false, vec![ + #[test] + fn is_processed_by_this_key_server_in_set_of_3() { + // servers set is ordered && server range depends on index of this server + let servers_set = MapKeyServerSet::new(false, vec![ // secret: 0000000000000000000000000000000000000000000000000000000000000001 ("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8".parse().unwrap(), "127.0.0.1:8080".parse().unwrap()), @@ -683,49 +988,171 @@ mod tests { "127.0.0.1:8080".parse().unwrap()), ].into_iter().collect()); - // 1st server: process hashes [0x0; 0x555...555] - let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret( - "0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap()); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"0000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"3000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"5555555555555555555555555555555555555555555555555555555555555555".parse().unwrap()), true); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"5555555555555555555555555555555555555555555555555555555555555556".parse().unwrap()), false); + // 1st server: process hashes [0x0; 0x555...555] + let key_pair = PlainNodeKeyPair::new( + KeyPair::from_secret( + "0000000000000000000000000000000000000000000000000000000000000001" + .parse() + .unwrap(), + ) + .unwrap(), + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"0000000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap() + ), + true + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"3000000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap() + ), + true + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"5555555555555555555555555555555555555555555555555555555555555555" + .parse() + .unwrap() + ), + true + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"5555555555555555555555555555555555555555555555555555555555555556" + .parse() + .unwrap() + ), + false + ); - // 2nd server: process hashes from 0x555...556 to 0xaaa...aab - let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret( - "0000000000000000000000000000000000000000000000000000000000000002".parse().unwrap()).unwrap()); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"5555555555555555555555555555555555555555555555555555555555555555".parse().unwrap()), false); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"5555555555555555555555555555555555555555555555555555555555555556".parse().unwrap()), true); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"7555555555555555555555555555555555555555555555555555555555555555".parse().unwrap()), true); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab".parse().unwrap()), true); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac".parse().unwrap()), false); + // 2nd server: process hashes from 0x555...556 to 0xaaa...aab + let key_pair = PlainNodeKeyPair::new( + KeyPair::from_secret( + "0000000000000000000000000000000000000000000000000000000000000002" + .parse() + .unwrap(), + ) + .unwrap(), + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"5555555555555555555555555555555555555555555555555555555555555555" + .parse() + .unwrap() + ), + false + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"5555555555555555555555555555555555555555555555555555555555555556" + .parse() + .unwrap() + ), + true + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"7555555555555555555555555555555555555555555555555555555555555555" + .parse() + .unwrap() + ), + true + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab" + .parse() + .unwrap() + ), + true + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac" + .parse() + .unwrap() + ), + false + ); - // 3rd server: process hashes from 0x800...000 to 0xbff...ff - let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret( - "0000000000000000000000000000000000000000000000000000000000000003".parse().unwrap()).unwrap()); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab".parse().unwrap()), false); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac".parse().unwrap()), true); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"daaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac".parse().unwrap()), true); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true); - } + // 3rd server: process hashes from 0x800...000 to 0xbff...ff + let key_pair = PlainNodeKeyPair::new( + KeyPair::from_secret( + "0000000000000000000000000000000000000000000000000000000000000003" + .parse() + .unwrap(), + ) + .unwrap(), + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab" + .parse() + .unwrap() + ), + false + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac" + .parse() + .unwrap() + ), + true + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"daaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac" + .parse() + .unwrap() + ), + true + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + .parse() + .unwrap() + ), + true + ); + } - #[test] - fn is_processed_by_this_key_server_in_set_of_4() { - // servers set is ordered && server range depends on index of this server - let servers_set = MapKeyServerSet::new(false, vec![ + #[test] + fn is_processed_by_this_key_server_in_set_of_4() { + // servers set is ordered && server range depends on index of this server + let servers_set = MapKeyServerSet::new(false, vec![ // secret: 0000000000000000000000000000000000000000000000000000000000000001 ("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8".parse().unwrap(), "127.0.0.1:8080".parse().unwrap()), @@ -740,319 +1167,804 @@ mod tests { "127.0.0.1:8080".parse().unwrap()), ].into_iter().collect()); - // 1st server: process hashes [0x0; 0x3ff...ff] - let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret( - "0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap()); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"0000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"2000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"4000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), false); + // 1st server: process hashes [0x0; 0x3ff...ff] + let key_pair = PlainNodeKeyPair::new( + KeyPair::from_secret( + "0000000000000000000000000000000000000000000000000000000000000001" + .parse() + .unwrap(), + ) + .unwrap(), + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"0000000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap() + ), + true + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"2000000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap() + ), + true + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + .parse() + .unwrap() + ), + true + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"4000000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap() + ), + false + ); - // 2nd server: process hashes from 0x400...000 to 0x7ff...ff - let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret( - "0000000000000000000000000000000000000000000000000000000000000002".parse().unwrap()).unwrap()); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), false); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"4000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"6000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"8000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), false); + // 2nd server: process hashes from 0x400...000 to 0x7ff...ff + let key_pair = PlainNodeKeyPair::new( + KeyPair::from_secret( + "0000000000000000000000000000000000000000000000000000000000000002" + .parse() + .unwrap(), + ) + .unwrap(), + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + .parse() + .unwrap() + ), + false + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"4000000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap() + ), + true + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"6000000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap() + ), + true + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + .parse() + .unwrap() + ), + true + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"8000000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap() + ), + false + ); - // 3rd server: process hashes from 0x800...000 to 0xbff...ff - let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret( - "0000000000000000000000000000000000000000000000000000000000000004".parse().unwrap()).unwrap()); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), false); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"8000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"a000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"bfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"c000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), false); + // 3rd server: process hashes from 0x800...000 to 0xbff...ff + let key_pair = PlainNodeKeyPair::new( + KeyPair::from_secret( + "0000000000000000000000000000000000000000000000000000000000000004" + .parse() + .unwrap(), + ) + .unwrap(), + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + .parse() + .unwrap() + ), + false + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"8000000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap() + ), + true + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"a000000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap() + ), + true + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"bfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + .parse() + .unwrap() + ), + true + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"c000000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap() + ), + false + ); - // 4th server: process hashes from 0xc00...000 to 0xfff...ff - let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret( - "0000000000000000000000000000000000000000000000000000000000000003".parse().unwrap()).unwrap()); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"bfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), false); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"c000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"e000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true); - assert_eq!(is_processed_by_this_key_server(&servers_set, key_pair.public(), - &"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true); - } + // 4th server: process hashes from 0xc00...000 to 0xfff...ff + let key_pair = PlainNodeKeyPair::new( + KeyPair::from_secret( + "0000000000000000000000000000000000000000000000000000000000000003" + .parse() + .unwrap(), + ) + .unwrap(), + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"bfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + .parse() + .unwrap() + ), + false + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"c000000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap() + ), + true + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"e000000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap() + ), + true + ); + assert_eq!( + is_processed_by_this_key_server( + &servers_set, + key_pair.public(), + &"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + .parse() + .unwrap() + ), + true + ); + } - #[test] - fn no_tasks_scheduled_when_no_contract_events() { - let listener = make_service_contract_listener(None, None, None, None, None); - assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); - listener.process_service_contract_events(); - assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); - } + #[test] + fn no_tasks_scheduled_when_no_contract_events() { + let listener = make_service_contract_listener(None, None, None, None, None); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); + listener.process_service_contract_events(); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); + } - #[test] - fn tasks_are_not_scheduled_on_isolated_node() { - let mut contract = DummyServiceContract::default(); - contract.logs.push(ServiceTask::GenerateServerKey(Default::default(), Default::default(), Default::default(), 0)); - let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None, None, Some(make_servers_set(true))); - assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); - listener.process_service_contract_events(); - assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); - } + #[test] + fn tasks_are_not_scheduled_on_isolated_node() { + let mut contract = DummyServiceContract::default(); + contract.logs.push(ServiceTask::GenerateServerKey( + Default::default(), + Default::default(), + Default::default(), + 0, + )); + let listener = make_service_contract_listener( + Some(Arc::new(contract)), + None, + None, + None, + Some(make_servers_set(true)), + ); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); + listener.process_service_contract_events(); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); + } - // server key generation tests + // server key generation tests - #[test] - fn server_key_generation_is_scheduled_when_requested() { - let mut contract = DummyServiceContract::default(); - contract.logs.push(ServiceTask::GenerateServerKey(Default::default(), Default::default(), Default::default(), 0)); - let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None, None, None); - assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); - listener.process_service_contract_events(); - assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); - assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::GenerateServerKey( - Default::default(), Default::default(), Default::default(), 0))); - } + #[test] + fn server_key_generation_is_scheduled_when_requested() { + let mut contract = DummyServiceContract::default(); + contract.logs.push(ServiceTask::GenerateServerKey( + Default::default(), + Default::default(), + Default::default(), + 0, + )); + let listener = + make_service_contract_listener(Some(Arc::new(contract)), None, None, None, None); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); + listener.process_service_contract_events(); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); + assert_eq!( + listener.data.tasks_queue.snapshot().pop_back(), + Some(ServiceTask::GenerateServerKey( + Default::default(), + Default::default(), + Default::default(), + 0 + )) + ); + } - #[test] - fn no_new_tasks_scheduled_when_server_key_generation_requested_and_request_belongs_to_other_key_server() { - let server_key_id = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap(); - let mut contract = DummyServiceContract::default(); - contract.logs.push(ServiceTask::GenerateServerKey(Default::default(), server_key_id, Default::default(), 0)); - let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None, None, None); - assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); - listener.process_service_contract_events(); - assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); - } + #[test] + fn no_new_tasks_scheduled_when_server_key_generation_requested_and_request_belongs_to_other_key_server( + ) { + let server_key_id = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + .parse() + .unwrap(); + let mut contract = DummyServiceContract::default(); + contract.logs.push(ServiceTask::GenerateServerKey( + Default::default(), + server_key_id, + Default::default(), + 0, + )); + let listener = + make_service_contract_listener(Some(Arc::new(contract)), None, None, None, None); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); + listener.process_service_contract_events(); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); + } - #[test] - fn generation_session_is_created_when_processing_generate_server_key_task() { - let cluster = Arc::new(DummyClusterClient::default()); - let listener = make_service_contract_listener(None, Some(cluster.clone()), None, None, None); - ServiceContractListener::process_service_task(&listener.data, ServiceTask::GenerateServerKey( - Default::default(), Default::default(), Default::default(), Default::default())).unwrap_err(); - assert_eq!(cluster.generation_requests_count.load(Ordering::Relaxed), 1); - } + #[test] + fn generation_session_is_created_when_processing_generate_server_key_task() { + let cluster = Arc::new(DummyClusterClient::default()); + let listener = + make_service_contract_listener(None, Some(cluster.clone()), None, None, None); + ServiceContractListener::process_service_task( + &listener.data, + ServiceTask::GenerateServerKey( + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ), + ) + .unwrap_err(); + assert_eq!(cluster.generation_requests_count.load(Ordering::Relaxed), 1); + } - #[test] - fn server_key_generation_is_not_retried_if_tried_in_the_same_cycle() { - let mut contract = DummyServiceContract::default(); - contract.pending_requests.push((false, ServiceTask::GenerateServerKey(Default::default(), - Default::default(), Default::default(), Default::default()))); - let cluster = Arc::new(DummyClusterClient::default()); - let listener = make_service_contract_listener(Some(Arc::new(contract)), Some(cluster.clone()), None, None, None); - listener.data.retry_data.lock().affected_server_keys.insert(Default::default()); - ServiceContractListener::retry_pending_requests(&listener.data).unwrap(); - assert_eq!(cluster.generation_requests_count.load(Ordering::Relaxed), 0); - } + #[test] + fn server_key_generation_is_not_retried_if_tried_in_the_same_cycle() { + let mut contract = DummyServiceContract::default(); + contract.pending_requests.push(( + false, + ServiceTask::GenerateServerKey( + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ), + )); + let cluster = Arc::new(DummyClusterClient::default()); + let listener = make_service_contract_listener( + Some(Arc::new(contract)), + Some(cluster.clone()), + None, + None, + None, + ); + listener + .data + .retry_data + .lock() + .affected_server_keys + .insert(Default::default()); + ServiceContractListener::retry_pending_requests(&listener.data).unwrap(); + assert_eq!(cluster.generation_requests_count.load(Ordering::Relaxed), 0); + } - // server key retrieval tests + // server key retrieval tests - #[test] - fn server_key_retrieval_is_scheduled_when_requested() { - let mut contract = DummyServiceContract::default(); - contract.logs.push(ServiceTask::RetrieveServerKey(Default::default(), Default::default())); - let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None, None, None); - assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); - listener.process_service_contract_events(); - assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); - assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::RetrieveServerKey( - Default::default(), Default::default()))); - } + #[test] + fn server_key_retrieval_is_scheduled_when_requested() { + let mut contract = DummyServiceContract::default(); + contract.logs.push(ServiceTask::RetrieveServerKey( + Default::default(), + Default::default(), + )); + let listener = + make_service_contract_listener(Some(Arc::new(contract)), None, None, None, None); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); + listener.process_service_contract_events(); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); + assert_eq!( + listener.data.tasks_queue.snapshot().pop_back(), + Some(ServiceTask::RetrieveServerKey( + Default::default(), + Default::default() + )) + ); + } - #[test] - fn server_key_retrieval_is_scheduled_when_requested_and_request_belongs_to_other_key_server() { - let server_key_id: ServerKeyId = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap(); - let mut contract = DummyServiceContract::default(); - contract.logs.push(ServiceTask::RetrieveServerKey(Default::default(), server_key_id.clone())); - let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None, None, None); - assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); - listener.process_service_contract_events(); - assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); - assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::RetrieveServerKey( - Default::default(), server_key_id))); - } + #[test] + fn server_key_retrieval_is_scheduled_when_requested_and_request_belongs_to_other_key_server() { + let server_key_id: ServerKeyId = + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + .parse() + .unwrap(); + let mut contract = DummyServiceContract::default(); + contract.logs.push(ServiceTask::RetrieveServerKey( + Default::default(), + server_key_id.clone(), + )); + let listener = + make_service_contract_listener(Some(Arc::new(contract)), None, None, None, None); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); + listener.process_service_contract_events(); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); + assert_eq!( + listener.data.tasks_queue.snapshot().pop_back(), + Some(ServiceTask::RetrieveServerKey( + Default::default(), + server_key_id + )) + ); + } - #[test] - fn server_key_is_retrieved_when_processing_retrieve_server_key_task() { - let contract = Arc::new(DummyServiceContract::default()); - let key_storage = create_non_empty_key_storage(false); - let listener = make_service_contract_listener(Some(contract.clone()), None, Some(key_storage), None, None); - ServiceContractListener::process_service_task(&listener.data, ServiceTask::RetrieveServerKey( - Default::default(), Default::default())).unwrap(); - assert_eq!(*contract.retrieved_server_keys.lock(), vec![(Default::default(), - KeyPair::from_secret("0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap().public().clone(), 0)]); - } + #[test] + fn server_key_is_retrieved_when_processing_retrieve_server_key_task() { + let contract = Arc::new(DummyServiceContract::default()); + let key_storage = create_non_empty_key_storage(false); + let listener = make_service_contract_listener( + Some(contract.clone()), + None, + Some(key_storage), + None, + None, + ); + ServiceContractListener::process_service_task( + &listener.data, + ServiceTask::RetrieveServerKey(Default::default(), Default::default()), + ) + .unwrap(); + assert_eq!( + *contract.retrieved_server_keys.lock(), + vec![( + Default::default(), + KeyPair::from_secret( + "0000000000000000000000000000000000000000000000000000000000000001" + .parse() + .unwrap() + ) + .unwrap() + .public() + .clone(), + 0 + )] + ); + } - #[test] - fn server_key_retrieval_failure_is_reported_when_processing_retrieve_server_key_task_and_key_is_unknown() { - let contract = Arc::new(DummyServiceContract::default()); - let listener = make_service_contract_listener(Some(contract.clone()), None, None, None, None); - ServiceContractListener::process_service_task(&listener.data, ServiceTask::RetrieveServerKey( - Default::default(), Default::default())).unwrap(); - assert_eq!(*contract.server_keys_retrieval_failures.lock(), vec![Default::default()]); - } + #[test] + fn server_key_retrieval_failure_is_reported_when_processing_retrieve_server_key_task_and_key_is_unknown( + ) { + let contract = Arc::new(DummyServiceContract::default()); + let listener = + make_service_contract_listener(Some(contract.clone()), None, None, None, None); + ServiceContractListener::process_service_task( + &listener.data, + ServiceTask::RetrieveServerKey(Default::default(), Default::default()), + ) + .unwrap(); + assert_eq!( + *contract.server_keys_retrieval_failures.lock(), + vec![Default::default()] + ); + } - #[test] - fn server_key_retrieval_is_not_retried_if_tried_in_the_same_cycle() { - let mut contract = DummyServiceContract::default(); - contract.pending_requests.push((false, ServiceTask::RetrieveServerKey(Default::default(), Default::default()))); - let cluster = Arc::new(DummyClusterClient::default()); - let listener = make_service_contract_listener(Some(Arc::new(contract)), Some(cluster.clone()), None, None, None); - listener.data.retry_data.lock().affected_server_keys.insert(Default::default()); - ServiceContractListener::retry_pending_requests(&listener.data).unwrap(); - assert_eq!(cluster.generation_requests_count.load(Ordering::Relaxed), 0); - } + #[test] + fn server_key_retrieval_is_not_retried_if_tried_in_the_same_cycle() { + let mut contract = DummyServiceContract::default(); + contract.pending_requests.push(( + false, + ServiceTask::RetrieveServerKey(Default::default(), Default::default()), + )); + let cluster = Arc::new(DummyClusterClient::default()); + let listener = make_service_contract_listener( + Some(Arc::new(contract)), + Some(cluster.clone()), + None, + None, + None, + ); + listener + .data + .retry_data + .lock() + .affected_server_keys + .insert(Default::default()); + ServiceContractListener::retry_pending_requests(&listener.data).unwrap(); + assert_eq!(cluster.generation_requests_count.load(Ordering::Relaxed), 0); + } - // document key store tests + // document key store tests - #[test] - fn document_key_store_is_scheduled_when_requested() { - let mut contract = DummyServiceContract::default(); - contract.logs.push(ServiceTask::StoreDocumentKey(Default::default(), Default::default(), - Default::default(), Default::default(), Default::default())); - let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None, None, None); - assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); - listener.process_service_contract_events(); - assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); - assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::StoreDocumentKey( - Default::default(), Default::default(), Default::default(), Default::default(), Default::default()))); - } + #[test] + fn document_key_store_is_scheduled_when_requested() { + let mut contract = DummyServiceContract::default(); + contract.logs.push(ServiceTask::StoreDocumentKey( + Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), + )); + let listener = + make_service_contract_listener(Some(Arc::new(contract)), None, None, None, None); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); + listener.process_service_contract_events(); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); + assert_eq!( + listener.data.tasks_queue.snapshot().pop_back(), + Some(ServiceTask::StoreDocumentKey( + Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default() + )) + ); + } - #[test] - fn document_key_store_is_scheduled_when_requested_and_request_belongs_to_other_key_server() { - let server_key_id: ServerKeyId = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap(); - let mut contract = DummyServiceContract::default(); - contract.logs.push(ServiceTask::StoreDocumentKey(Default::default(), server_key_id.clone(), - Default::default(), Default::default(), Default::default())); - let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None, None, None); - assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); - listener.process_service_contract_events(); - assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); - assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::StoreDocumentKey( - Default::default(), server_key_id, Default::default(), Default::default(), Default::default()))); - } + #[test] + fn document_key_store_is_scheduled_when_requested_and_request_belongs_to_other_key_server() { + let server_key_id: ServerKeyId = + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + .parse() + .unwrap(); + let mut contract = DummyServiceContract::default(); + contract.logs.push(ServiceTask::StoreDocumentKey( + Default::default(), + server_key_id.clone(), + Default::default(), + Default::default(), + Default::default(), + )); + let listener = + make_service_contract_listener(Some(Arc::new(contract)), None, None, None, None); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); + listener.process_service_contract_events(); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); + assert_eq!( + listener.data.tasks_queue.snapshot().pop_back(), + Some(ServiceTask::StoreDocumentKey( + Default::default(), + server_key_id, + Default::default(), + Default::default(), + Default::default() + )) + ); + } - #[test] - fn document_key_is_stored_when_processing_store_document_key_task() { - let contract = Arc::new(DummyServiceContract::default()); - let key_storage = create_non_empty_key_storage(false); - let listener = make_service_contract_listener(Some(contract.clone()), None, Some(key_storage.clone()), None, None); - ServiceContractListener::process_service_task(&listener.data, ServiceTask::StoreDocumentKey( - Default::default(), Default::default(), Default::default(), Default::default(), Default::default())).unwrap(); - assert_eq!(*contract.stored_document_keys.lock(), vec![Default::default()]); + #[test] + fn document_key_is_stored_when_processing_store_document_key_task() { + let contract = Arc::new(DummyServiceContract::default()); + let key_storage = create_non_empty_key_storage(false); + let listener = make_service_contract_listener( + Some(contract.clone()), + None, + Some(key_storage.clone()), + None, + None, + ); + ServiceContractListener::process_service_task( + &listener.data, + ServiceTask::StoreDocumentKey( + Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ), + ) + .unwrap(); + assert_eq!( + *contract.stored_document_keys.lock(), + vec![Default::default()] + ); - let key_share = key_storage.get(&Default::default()).unwrap().unwrap(); - assert_eq!(key_share.common_point, Some(Default::default())); - assert_eq!(key_share.encrypted_point, Some(Default::default())); - } + let key_share = key_storage.get(&Default::default()).unwrap().unwrap(); + assert_eq!(key_share.common_point, Some(Default::default())); + assert_eq!(key_share.encrypted_point, Some(Default::default())); + } - #[test] - fn document_key_store_failure_reported_when_no_server_key() { - let contract = Arc::new(DummyServiceContract::default()); - let listener = make_service_contract_listener(Some(contract.clone()), None, None, None, None); - ServiceContractListener::process_service_task(&listener.data, ServiceTask::StoreDocumentKey( - Default::default(), Default::default(), Default::default(), Default::default(), Default::default())).unwrap_err(); - assert_eq!(*contract.document_keys_store_failures.lock(), vec![Default::default()]); - } + #[test] + fn document_key_store_failure_reported_when_no_server_key() { + let contract = Arc::new(DummyServiceContract::default()); + let listener = + make_service_contract_listener(Some(contract.clone()), None, None, None, None); + ServiceContractListener::process_service_task( + &listener.data, + ServiceTask::StoreDocumentKey( + Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ), + ) + .unwrap_err(); + assert_eq!( + *contract.document_keys_store_failures.lock(), + vec![Default::default()] + ); + } - #[test] - fn document_key_store_failure_reported_when_document_key_already_set() { - let contract = Arc::new(DummyServiceContract::default()); - let key_storage = create_non_empty_key_storage(true); - let listener = make_service_contract_listener(Some(contract.clone()), None, Some(key_storage), None, None); - ServiceContractListener::process_service_task(&listener.data, ServiceTask::StoreDocumentKey( - Default::default(), Default::default(), Default::default(), Default::default(), Default::default())).unwrap_err(); - assert_eq!(*contract.document_keys_store_failures.lock(), vec![Default::default()]); - } + #[test] + fn document_key_store_failure_reported_when_document_key_already_set() { + let contract = Arc::new(DummyServiceContract::default()); + let key_storage = create_non_empty_key_storage(true); + let listener = make_service_contract_listener( + Some(contract.clone()), + None, + Some(key_storage), + None, + None, + ); + ServiceContractListener::process_service_task( + &listener.data, + ServiceTask::StoreDocumentKey( + Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ), + ) + .unwrap_err(); + assert_eq!( + *contract.document_keys_store_failures.lock(), + vec![Default::default()] + ); + } - #[test] - fn document_key_store_failure_reported_when_author_differs() { - let contract = Arc::new(DummyServiceContract::default()); - let key_storage = create_non_empty_key_storage(false); - let listener = make_service_contract_listener(Some(contract.clone()), None, Some(key_storage), None, None); - ServiceContractListener::process_service_task(&listener.data, ServiceTask::StoreDocumentKey( - Default::default(), Default::default(), 1.into(), Default::default(), Default::default())).unwrap_err(); - assert_eq!(*contract.document_keys_store_failures.lock(), vec![Default::default()]); - } + #[test] + fn document_key_store_failure_reported_when_author_differs() { + let contract = Arc::new(DummyServiceContract::default()); + let key_storage = create_non_empty_key_storage(false); + let listener = make_service_contract_listener( + Some(contract.clone()), + None, + Some(key_storage), + None, + None, + ); + ServiceContractListener::process_service_task( + &listener.data, + ServiceTask::StoreDocumentKey( + Default::default(), + Default::default(), + 1.into(), + Default::default(), + Default::default(), + ), + ) + .unwrap_err(); + assert_eq!( + *contract.document_keys_store_failures.lock(), + vec![Default::default()] + ); + } - // document key shadow common retrieval tests + // document key shadow common retrieval tests - #[test] - fn document_key_shadow_common_retrieval_is_scheduled_when_requested() { - let mut contract = DummyServiceContract::default(); - contract.logs.push(ServiceTask::RetrieveShadowDocumentKeyCommon(Default::default(), Default::default(), Default::default())); - let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None, None, None); - assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); - listener.process_service_contract_events(); - assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); - assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::RetrieveShadowDocumentKeyCommon( - Default::default(), Default::default(), Default::default()))); - } + #[test] + fn document_key_shadow_common_retrieval_is_scheduled_when_requested() { + let mut contract = DummyServiceContract::default(); + contract + .logs + .push(ServiceTask::RetrieveShadowDocumentKeyCommon( + Default::default(), + Default::default(), + Default::default(), + )); + let listener = + make_service_contract_listener(Some(Arc::new(contract)), None, None, None, None); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); + listener.process_service_contract_events(); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); + assert_eq!( + listener.data.tasks_queue.snapshot().pop_back(), + Some(ServiceTask::RetrieveShadowDocumentKeyCommon( + Default::default(), + Default::default(), + Default::default() + )) + ); + } - #[test] - fn document_key_shadow_common_retrieval_is_scheduled_when_requested_and_request_belongs_to_other_key_server() { - let server_key_id: ServerKeyId = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap(); - let mut contract = DummyServiceContract::default(); - contract.logs.push(ServiceTask::RetrieveShadowDocumentKeyCommon(Default::default(), server_key_id.clone(), Default::default())); - let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None, None, None); - assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); - listener.process_service_contract_events(); - assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); - assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::RetrieveShadowDocumentKeyCommon( - Default::default(), server_key_id, Default::default()))); - } + #[test] + fn document_key_shadow_common_retrieval_is_scheduled_when_requested_and_request_belongs_to_other_key_server( + ) { + let server_key_id: ServerKeyId = + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + .parse() + .unwrap(); + let mut contract = DummyServiceContract::default(); + contract + .logs + .push(ServiceTask::RetrieveShadowDocumentKeyCommon( + Default::default(), + server_key_id.clone(), + Default::default(), + )); + let listener = + make_service_contract_listener(Some(Arc::new(contract)), None, None, None, None); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 0); + listener.process_service_contract_events(); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); + assert_eq!( + listener.data.tasks_queue.snapshot().pop_back(), + Some(ServiceTask::RetrieveShadowDocumentKeyCommon( + Default::default(), + server_key_id, + Default::default() + )) + ); + } - #[test] - fn document_key_shadow_common_is_retrieved_when_processing_document_key_shadow_common_retrieval_task() { - let contract = Arc::new(DummyServiceContract::default()); - let key_storage = create_non_empty_key_storage(true); - let listener = make_service_contract_listener(Some(contract.clone()), None, Some(key_storage.clone()), None, None); - ServiceContractListener::process_service_task(&listener.data, ServiceTask::RetrieveShadowDocumentKeyCommon( - Default::default(), Default::default(), Default::default())).unwrap(); - assert_eq!(*contract.common_shadow_retrieved_document_keys.lock(), vec![(Default::default(), Default::default(), - Default::default(), 0)]); - } + #[test] + fn document_key_shadow_common_is_retrieved_when_processing_document_key_shadow_common_retrieval_task( + ) { + let contract = Arc::new(DummyServiceContract::default()); + let key_storage = create_non_empty_key_storage(true); + let listener = make_service_contract_listener( + Some(contract.clone()), + None, + Some(key_storage.clone()), + None, + None, + ); + ServiceContractListener::process_service_task( + &listener.data, + ServiceTask::RetrieveShadowDocumentKeyCommon( + Default::default(), + Default::default(), + Default::default(), + ), + ) + .unwrap(); + assert_eq!( + *contract.common_shadow_retrieved_document_keys.lock(), + vec![( + Default::default(), + Default::default(), + Default::default(), + 0 + )] + ); + } - #[test] - fn document_key_shadow_common_retrieval_failure_reported_when_access_denied() { - let acl_storage = DummyAclStorage::default(); - acl_storage.prohibit(Default::default(), Default::default()); - let contract = Arc::new(DummyServiceContract::default()); - let key_storage = create_non_empty_key_storage(true); - let listener = make_service_contract_listener(Some(contract.clone()), None, Some(key_storage.clone()), Some(Arc::new(acl_storage)), None); - ServiceContractListener::process_service_task(&listener.data, ServiceTask::RetrieveShadowDocumentKeyCommon( - Default::default(), Default::default(), Default::default())).unwrap_err(); - assert_eq!(*contract.document_keys_shadow_retrieval_failures.lock(), vec![(Default::default(), Default::default())]); - } + #[test] + fn document_key_shadow_common_retrieval_failure_reported_when_access_denied() { + let acl_storage = DummyAclStorage::default(); + acl_storage.prohibit(Default::default(), Default::default()); + let contract = Arc::new(DummyServiceContract::default()); + let key_storage = create_non_empty_key_storage(true); + let listener = make_service_contract_listener( + Some(contract.clone()), + None, + Some(key_storage.clone()), + Some(Arc::new(acl_storage)), + None, + ); + ServiceContractListener::process_service_task( + &listener.data, + ServiceTask::RetrieveShadowDocumentKeyCommon( + Default::default(), + Default::default(), + Default::default(), + ), + ) + .unwrap_err(); + assert_eq!( + *contract.document_keys_shadow_retrieval_failures.lock(), + vec![(Default::default(), Default::default())] + ); + } - #[test] - fn document_key_shadow_common_retrieval_failure_reported_when_no_server_key() { - let contract = Arc::new(DummyServiceContract::default()); - let listener = make_service_contract_listener(Some(contract.clone()), None, None, None, None); - ServiceContractListener::process_service_task(&listener.data, ServiceTask::RetrieveShadowDocumentKeyCommon( - Default::default(), Default::default(), Default::default())).unwrap_err(); - assert_eq!(*contract.document_keys_shadow_retrieval_failures.lock(), vec![(Default::default(), Default::default())]); - } + #[test] + fn document_key_shadow_common_retrieval_failure_reported_when_no_server_key() { + let contract = Arc::new(DummyServiceContract::default()); + let listener = + make_service_contract_listener(Some(contract.clone()), None, None, None, None); + ServiceContractListener::process_service_task( + &listener.data, + ServiceTask::RetrieveShadowDocumentKeyCommon( + Default::default(), + Default::default(), + Default::default(), + ), + ) + .unwrap_err(); + assert_eq!( + *contract.document_keys_shadow_retrieval_failures.lock(), + vec![(Default::default(), Default::default())] + ); + } - #[test] - fn document_key_shadow_common_retrieval_failure_reported_when_no_document_key() { - let contract = Arc::new(DummyServiceContract::default()); - let key_storage = create_non_empty_key_storage(false); - let listener = make_service_contract_listener(Some(contract.clone()), None, Some(key_storage.clone()), None, None); - ServiceContractListener::process_service_task(&listener.data, ServiceTask::RetrieveShadowDocumentKeyCommon( - Default::default(), Default::default(), Default::default())).unwrap_err(); - assert_eq!(*contract.document_keys_shadow_retrieval_failures.lock(), vec![(Default::default(), Default::default())]); - } + #[test] + fn document_key_shadow_common_retrieval_failure_reported_when_no_document_key() { + let contract = Arc::new(DummyServiceContract::default()); + let key_storage = create_non_empty_key_storage(false); + let listener = make_service_contract_listener( + Some(contract.clone()), + None, + Some(key_storage.clone()), + None, + None, + ); + ServiceContractListener::process_service_task( + &listener.data, + ServiceTask::RetrieveShadowDocumentKeyCommon( + Default::default(), + Default::default(), + Default::default(), + ), + ) + .unwrap_err(); + assert_eq!( + *contract.document_keys_shadow_retrieval_failures.lock(), + vec![(Default::default(), Default::default())] + ); + } } diff --git a/secret-store/src/listener/tasks_queue.rs b/secret-store/src/listener/tasks_queue.rs index 2d9bdba11..2dffdc2d0 100644 --- a/secret-store/src/listener/tasks_queue.rs +++ b/secret-store/src/listener/tasks_queue.rs @@ -14,65 +14,69 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use parking_lot::{Condvar, Mutex}; use std::collections::VecDeque; -use parking_lot::{Mutex, Condvar}; #[derive(Default)] /// General deque-based tasks queue. pub struct TasksQueue { - /// Service event. - service_event: Condvar, - /// Service tasks queue. - service_tasks: Mutex>, + /// Service event. + service_event: Condvar, + /// Service tasks queue. + service_tasks: Mutex>, } -impl TasksQueue where Task: Clone { - /// Create new tasks queue. - pub fn new() -> Self { - TasksQueue { - service_event: Condvar::new(), - service_tasks: Mutex::new(VecDeque::new()), - } - } +impl TasksQueue +where + Task: Clone, +{ + /// Create new tasks queue. + pub fn new() -> Self { + TasksQueue { + service_event: Condvar::new(), + service_tasks: Mutex::new(VecDeque::new()), + } + } - #[cfg(test)] - /// Get current tasks snapshot. - pub fn snapshot(&self) -> VecDeque { - self.service_tasks.lock().clone() - } + #[cfg(test)] + /// Get current tasks snapshot. + pub fn snapshot(&self) -> VecDeque { + self.service_tasks.lock().clone() + } - /// Push task to the front of queue. - pub fn push_front(&self, task: Task) { - let mut service_tasks = self.service_tasks.lock(); - service_tasks.push_front(task); - self.service_event.notify_all(); - } + /// Push task to the front of queue. + pub fn push_front(&self, task: Task) { + let mut service_tasks = self.service_tasks.lock(); + service_tasks.push_front(task); + self.service_event.notify_all(); + } - /// Push task to the back of queue. - pub fn push(&self, task: Task) { - let mut service_tasks = self.service_tasks.lock(); - service_tasks.push_back(task); - self.service_event.notify_all(); - } + /// Push task to the back of queue. + pub fn push(&self, task: Task) { + let mut service_tasks = self.service_tasks.lock(); + service_tasks.push_back(task); + self.service_event.notify_all(); + } - /// Push task to the back of queue. - pub fn push_many>(&self, tasks: I) { - let mut service_tasks = self.service_tasks.lock(); - let previous_len = service_tasks.len(); - service_tasks.extend(tasks); - if service_tasks.len() != previous_len { - self.service_event.notify_all(); - } - } + /// Push task to the back of queue. + pub fn push_many>(&self, tasks: I) { + let mut service_tasks = self.service_tasks.lock(); + let previous_len = service_tasks.len(); + service_tasks.extend(tasks); + if service_tasks.len() != previous_len { + self.service_event.notify_all(); + } + } - /// Wait for new task (task is removed from the front of queue). - pub fn wait(&self) -> Task { - let mut service_tasks = self.service_tasks.lock(); - if service_tasks.is_empty() { - self.service_event.wait(&mut service_tasks); - } + /// Wait for new task (task is removed from the front of queue). + pub fn wait(&self) -> Task { + let mut service_tasks = self.service_tasks.lock(); + if service_tasks.is_empty() { + self.service_event.wait(&mut service_tasks); + } - service_tasks.pop_front() - .expect("service_event is only fired when there are new tasks; qed") - } + service_tasks + .pop_front() + .expect("service_event is only fired when there are new tasks; qed") + } } diff --git a/secret-store/src/node_key_pair.rs b/secret-store/src/node_key_pair.rs index f50f75ad1..64747523d 100644 --- a/secret-store/src/node_key_pair.rs +++ b/secret-store/src/node_key_pair.rs @@ -14,93 +14,110 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use ethkey::crypto::ecdh::agree; -use ethkey::{KeyPair, Public, Signature, Error as EthKeyError, sign, public_to_address}; -use ethereum_types::{H256, Address}; +use ethereum_types::{Address, H256}; +use ethkey::{ + crypto::ecdh::agree, public_to_address, sign, Error as EthKeyError, KeyPair, Public, Signature, +}; use traits::NodeKeyPair; pub struct PlainNodeKeyPair { - key_pair: KeyPair, + key_pair: KeyPair, } impl PlainNodeKeyPair { - pub fn new(key_pair: KeyPair) -> Self { - PlainNodeKeyPair { - key_pair: key_pair, - } - } + pub fn new(key_pair: KeyPair) -> Self { + PlainNodeKeyPair { key_pair: key_pair } + } - #[cfg(test)] - pub fn key_pair(&self) -> &KeyPair { - &self.key_pair - } + #[cfg(test)] + pub fn key_pair(&self) -> &KeyPair { + &self.key_pair + } } impl NodeKeyPair for PlainNodeKeyPair { - fn public(&self) -> &Public { - self.key_pair.public() - } + fn public(&self) -> &Public { + self.key_pair.public() + } - fn address(&self) -> Address { - public_to_address(self.key_pair.public()) - } + fn address(&self) -> Address { + public_to_address(self.key_pair.public()) + } - fn sign(&self, data: &H256) -> Result { - sign(self.key_pair.secret(), data) - } + fn sign(&self, data: &H256) -> Result { + sign(self.key_pair.secret(), data) + } - fn compute_shared_key(&self, peer_public: &Public) -> Result { - agree(self.key_pair.secret(), peer_public) - .map_err(|e| EthKeyError::Custom(e.to_string())) - .and_then(KeyPair::from_secret) - } + fn compute_shared_key(&self, peer_public: &Public) -> Result { + agree(self.key_pair.secret(), peer_public) + .map_err(|e| EthKeyError::Custom(e.to_string())) + .and_then(KeyPair::from_secret) + } } #[cfg(feature = "accounts")] mod accounts { - use super::*; - use std::sync::Arc; - use ethkey::Password; - use accounts::AccountProvider; + use super::*; + use accounts::AccountProvider; + use ethkey::Password; + use std::sync::Arc; - pub struct KeyStoreNodeKeyPair { - account_provider: Arc, - address: Address, - public: Public, - password: Password, - } + pub struct KeyStoreNodeKeyPair { + account_provider: Arc, + address: Address, + public: Public, + password: Password, + } - impl KeyStoreNodeKeyPair { - pub fn new(account_provider: Arc, address: Address, password: Password) -> Result { - let public = account_provider.account_public(address.clone(), &password).map_err(|e| EthKeyError::Custom(format!("{}", e)))?; - Ok(KeyStoreNodeKeyPair { - account_provider: account_provider, - address: address, - public: public, - password: password, - }) - } - } + impl KeyStoreNodeKeyPair { + pub fn new( + account_provider: Arc, + address: Address, + password: Password, + ) -> Result { + let public = account_provider + .account_public(address.clone(), &password) + .map_err(|e| EthKeyError::Custom(format!("{}", e)))?; + Ok(KeyStoreNodeKeyPair { + account_provider: account_provider, + address: address, + public: public, + password: password, + }) + } + } - impl NodeKeyPair for KeyStoreNodeKeyPair { - fn public(&self) -> &Public { - &self.public - } + impl NodeKeyPair for KeyStoreNodeKeyPair { + fn public(&self) -> &Public { + &self.public + } - fn address(&self) -> Address { - public_to_address(&self.public) - } + fn address(&self) -> Address { + public_to_address(&self.public) + } - fn sign(&self, data: &H256) -> Result { - self.account_provider.sign(self.address.clone(), Some(self.password.clone()), data.clone()) - .map_err(|e| EthKeyError::Custom(format!("{}", e))) - } + fn sign(&self, data: &H256) -> Result { + self.account_provider + .sign( + self.address.clone(), + Some(self.password.clone()), + data.clone(), + ) + .map_err(|e| EthKeyError::Custom(format!("{}", e))) + } - fn compute_shared_key(&self, peer_public: &Public) -> Result { - KeyPair::from_secret(self.account_provider.agree(self.address.clone(), Some(self.password.clone()), peer_public) - .map_err(|e| EthKeyError::Custom(format!("{}", e)))?) - } - } + fn compute_shared_key(&self, peer_public: &Public) -> Result { + KeyPair::from_secret( + self.account_provider + .agree( + self.address.clone(), + Some(self.password.clone()), + peer_public, + ) + .map_err(|e| EthKeyError::Custom(format!("{}", e)))?, + ) + } + } } #[cfg(feature = "accounts")] diff --git a/secret-store/src/serialization.rs b/secret-store/src/serialization.rs index 64cc5f1ce..9be41e569 100644 --- a/secret-store/src/serialization.rs +++ b/secret-store/src/serialization.rs @@ -14,23 +14,27 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::fmt; -use std::ops::Deref; -use rustc_hex::{ToHex, FromHex}; -use serde::{Serialize, Deserialize, Serializer, Deserializer}; -use serde::de::{Visitor, Error as SerdeError}; -use ethkey::{Public, Secret, Signature}; -use ethereum_types::{H160, H256}; use bytes::Bytes; +use ethereum_types::{H160, H256}; +use ethkey::{Public, Secret, Signature}; +use rustc_hex::{FromHex, ToHex}; +use serde::{ + de::{Error as SerdeError, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, +}; +use std::{fmt, ops::Deref}; use types::Requester; macro_rules! impl_bytes_deserialize { - ($name: ident, $value: expr, true) => { - $value[2..].from_hex().map($name).map_err(SerdeError::custom) - }; - ($name: ident, $value: expr, false) => { - $value[2..].parse().map($name).map_err(SerdeError::custom) - } + ($name: ident, $value: expr, true) => { + $value[2..] + .from_hex() + .map($name) + .map_err(SerdeError::custom) + }; + ($name: ident, $value: expr, false) => { + $value[2..].parse().map($name).map_err(SerdeError::custom) + }; } macro_rules! impl_bytes { @@ -108,7 +112,12 @@ impl_bytes!(SerializableH256, H256, false, (Default, PartialOrd, Ord)); /// Serializable H160. impl_bytes!(SerializableH160, H160, false, (Default)); /// Serializable H512 (aka Public). -impl_bytes!(SerializablePublic, Public, false, (Default, PartialOrd, Ord)); +impl_bytes!( + SerializablePublic, + Public, + false, + (Default, PartialOrd, Ord) +); /// Serializable Secret. impl_bytes!(SerializableSecret, Secret, false, ()); /// Serializable Signature. @@ -117,65 +126,70 @@ impl_bytes!(SerializableSignature, Signature, false, ()); /// Serializable shadow decryption result. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SerializableEncryptedDocumentKeyShadow { - /// Decrypted secret point. It is partially decrypted if shadow decryption was requested. - pub decrypted_secret: SerializablePublic, - /// Shared common point. - pub common_point: SerializablePublic, - /// If shadow decryption was requested: shadow decryption coefficients, encrypted with requestor public. - pub decrypt_shadows: Vec, + /// Decrypted secret point. It is partially decrypted if shadow decryption was requested. + pub decrypted_secret: SerializablePublic, + /// Shared common point. + pub common_point: SerializablePublic, + /// If shadow decryption was requested: shadow decryption coefficients, encrypted with requestor public. + pub decrypt_shadows: Vec, } /// Serializable requester identification data. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum SerializableRequester { - /// Requested with server key id signature. - Signature(SerializableSignature), - /// Requested with public key. - Public(SerializablePublic), - /// Requested with verified address. - Address(SerializableAddress), + /// Requested with server key id signature. + Signature(SerializableSignature), + /// Requested with public key. + Public(SerializablePublic), + /// Requested with verified address. + Address(SerializableAddress), } impl From for Requester { - fn from(requester: SerializableRequester) -> Requester { - match requester { - SerializableRequester::Signature(signature) => Requester::Signature(signature.into()), - SerializableRequester::Public(public) => Requester::Public(public.into()), - SerializableRequester::Address(address) => Requester::Address(address.into()), - } - } + fn from(requester: SerializableRequester) -> Requester { + match requester { + SerializableRequester::Signature(signature) => Requester::Signature(signature.into()), + SerializableRequester::Public(public) => Requester::Public(public.into()), + SerializableRequester::Address(address) => Requester::Address(address.into()), + } + } } impl From for SerializableRequester { - fn from(requester: Requester) -> SerializableRequester { - match requester { - Requester::Signature(signature) => SerializableRequester::Signature(signature.into()), - Requester::Public(public) => SerializableRequester::Public(public.into()), - Requester::Address(address) => SerializableRequester::Address(address.into()), - } - } + fn from(requester: Requester) -> SerializableRequester { + match requester { + Requester::Signature(signature) => SerializableRequester::Signature(signature.into()), + Requester::Public(public) => SerializableRequester::Public(public.into()), + Requester::Address(address) => SerializableRequester::Address(address.into()), + } + } } #[cfg(test)] mod tests { - use serde_json; - use super::{SerializableBytes, SerializablePublic}; + use super::{SerializableBytes, SerializablePublic}; + use serde_json; - #[test] - fn serialize_and_deserialize_bytes() { - let bytes = SerializableBytes(vec![1, 2, 3, 4]); - let bytes_serialized = serde_json::to_string(&bytes).unwrap(); - assert_eq!(&bytes_serialized, r#""0x01020304""#); - let bytes_deserialized: SerializableBytes = serde_json::from_str(&bytes_serialized).unwrap(); - assert_eq!(bytes_deserialized, bytes); - } + #[test] + fn serialize_and_deserialize_bytes() { + let bytes = SerializableBytes(vec![1, 2, 3, 4]); + let bytes_serialized = serde_json::to_string(&bytes).unwrap(); + assert_eq!(&bytes_serialized, r#""0x01020304""#); + let bytes_deserialized: SerializableBytes = + serde_json::from_str(&bytes_serialized).unwrap(); + assert_eq!(bytes_deserialized, bytes); + } - #[test] - fn serialize_and_deserialize_public() { - let public = SerializablePublic("cac6c205eb06c8308d65156ff6c862c62b000b8ead121a4455a8ddeff7248128d895692136f240d5d1614dc7cc4147b1bd584bd617e30560bb872064d09ea325".parse().unwrap()); - let public_serialized = serde_json::to_string(&public).unwrap(); - assert_eq!(&public_serialized, r#""0xcac6c205eb06c8308d65156ff6c862c62b000b8ead121a4455a8ddeff7248128d895692136f240d5d1614dc7cc4147b1bd584bd617e30560bb872064d09ea325""#); - let public_deserialized: SerializablePublic = serde_json::from_str(&public_serialized).unwrap(); - assert_eq!(public_deserialized, public); - } + #[test] + fn serialize_and_deserialize_public() { + let public = SerializablePublic("cac6c205eb06c8308d65156ff6c862c62b000b8ead121a4455a8ddeff7248128d895692136f240d5d1614dc7cc4147b1bd584bd617e30560bb872064d09ea325".parse().unwrap()); + let public_serialized = serde_json::to_string(&public).unwrap(); + assert_eq!( + &public_serialized, + r#""0xcac6c205eb06c8308d65156ff6c862c62b000b8ead121a4455a8ddeff7248128d895692136f240d5d1614dc7cc4147b1bd584bd617e30560bb872064d09ea325""# + ); + let public_deserialized: SerializablePublic = + serde_json::from_str(&public_serialized).unwrap(); + assert_eq!(public_deserialized, public); + } } diff --git a/secret-store/src/traits.rs b/secret-store/src/traits.rs index e12c75e5d..c10273791 100644 --- a/secret-store/src/traits.rs +++ b/secret-store/src/traits.rs @@ -14,96 +14,137 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use ethereum_types::{Address, H256}; +use ethkey::{Error as EthKeyError, KeyPair, Signature}; use std::collections::BTreeSet; -use ethkey::{KeyPair, Signature, Error as EthKeyError}; -use ethereum_types::{H256, Address}; -use types::{Error, Public, ServerKeyId, MessageHash, EncryptedMessageSignature, RequestSignature, Requester, - EncryptedDocumentKey, EncryptedDocumentKeyShadow, NodeId}; +use types::{ + EncryptedDocumentKey, EncryptedDocumentKeyShadow, EncryptedMessageSignature, Error, + MessageHash, NodeId, Public, RequestSignature, Requester, ServerKeyId, +}; /// Node key pair. pub trait NodeKeyPair: Send + Sync { - /// Public portion of key. - fn public(&self) -> &Public; - /// Address of key owner. - fn address(&self) -> Address; - /// Sign data with node key. - fn sign(&self, data: &H256) -> Result; - /// Compute shared key to encrypt channel between two nodes. - fn compute_shared_key(&self, peer_public: &Public) -> Result; + /// Public portion of key. + fn public(&self) -> &Public; + /// Address of key owner. + fn address(&self) -> Address; + /// Sign data with node key. + fn sign(&self, data: &H256) -> Result; + /// Compute shared key to encrypt channel between two nodes. + fn compute_shared_key(&self, peer_public: &Public) -> Result; } /// Server key (SK) generator. pub trait ServerKeyGenerator { - /// Generate new SK. - /// `key_id` is the caller-provided identifier of generated SK. - /// `author` is the author of key entry. - /// `threshold + 1` is the minimal number of nodes, required to restore private key. - /// Result is a public portion of SK. - fn generate_key(&self, key_id: &ServerKeyId, author: &Requester, threshold: usize) -> Result; - /// Retrieve public portion of previously generated SK. - /// `key_id` is identifier of previously generated SK. - /// `author` is the same author, that has created the server key. - fn restore_key_public(&self, key_id: &ServerKeyId, author: &Requester) -> Result; + /// Generate new SK. + /// `key_id` is the caller-provided identifier of generated SK. + /// `author` is the author of key entry. + /// `threshold + 1` is the minimal number of nodes, required to restore private key. + /// Result is a public portion of SK. + fn generate_key( + &self, + key_id: &ServerKeyId, + author: &Requester, + threshold: usize, + ) -> Result; + /// Retrieve public portion of previously generated SK. + /// `key_id` is identifier of previously generated SK. + /// `author` is the same author, that has created the server key. + fn restore_key_public(&self, key_id: &ServerKeyId, author: &Requester) + -> Result; } /// Document key (DK) server. pub trait DocumentKeyServer: ServerKeyGenerator { - /// Store externally generated DK. - /// `key_id` is identifier of previously generated SK. - /// `author` is the same author, that has created the server key. - /// `common_point` is a result of `k * T` expression, where `T` is generation point and `k` is random scalar in EC field. - /// `encrypted_document_key` is a result of `M + k * y` expression, where `M` is unencrypted document key (point on EC), - /// `k` is the same scalar used in `common_point` calculation and `y` is previously generated public part of SK. - fn store_document_key(&self, key_id: &ServerKeyId, author: &Requester, common_point: Public, encrypted_document_key: Public) -> Result<(), Error>; - /// Generate and store both SK and DK. This is a shortcut for consequent calls of `generate_key` and `store_document_key`. - /// The only difference is that DK is generated by DocumentKeyServer (which might be considered unsafe). - /// `key_id` is the caller-provided identifier of generated SK. - /// `author` is the author of server && document key entry. - /// `threshold + 1` is the minimal number of nodes, required to restore private key. - /// Result is a DK, encrypted with caller public key. - fn generate_document_key(&self, key_id: &ServerKeyId, author: &Requester, threshold: usize) -> Result; - /// Restore previously stored DK. - /// DK is decrypted on the key server (which might be considered unsafe), and then encrypted with caller public key. - /// `key_id` is identifier of previously generated SK. - /// `requester` is the one who requests access to document key. Caller must be on ACL for this function to succeed. - /// Result is a DK, encrypted with caller public key. - fn restore_document_key(&self, key_id: &ServerKeyId, requester: &Requester) -> Result; - /// Restore previously stored DK. - /// To decrypt DK on client: - /// 1) use requestor secret key to decrypt secret coefficients from result.decrypt_shadows - /// 2) calculate decrypt_shadows_sum = sum of all secrets from (1) - /// 3) calculate decrypt_shadow_point: decrypt_shadows_sum * result.common_point - /// 4) calculate decrypted_secret: result.decrypted_secret + decrypt_shadow_point - /// Result is a DK shadow. - fn restore_document_key_shadow(&self, key_id: &ServerKeyId, requester: &Requester) -> Result; + /// Store externally generated DK. + /// `key_id` is identifier of previously generated SK. + /// `author` is the same author, that has created the server key. + /// `common_point` is a result of `k * T` expression, where `T` is generation point and `k` is random scalar in EC field. + /// `encrypted_document_key` is a result of `M + k * y` expression, where `M` is unencrypted document key (point on EC), + /// `k` is the same scalar used in `common_point` calculation and `y` is previously generated public part of SK. + fn store_document_key( + &self, + key_id: &ServerKeyId, + author: &Requester, + common_point: Public, + encrypted_document_key: Public, + ) -> Result<(), Error>; + /// Generate and store both SK and DK. This is a shortcut for consequent calls of `generate_key` and `store_document_key`. + /// The only difference is that DK is generated by DocumentKeyServer (which might be considered unsafe). + /// `key_id` is the caller-provided identifier of generated SK. + /// `author` is the author of server && document key entry. + /// `threshold + 1` is the minimal number of nodes, required to restore private key. + /// Result is a DK, encrypted with caller public key. + fn generate_document_key( + &self, + key_id: &ServerKeyId, + author: &Requester, + threshold: usize, + ) -> Result; + /// Restore previously stored DK. + /// DK is decrypted on the key server (which might be considered unsafe), and then encrypted with caller public key. + /// `key_id` is identifier of previously generated SK. + /// `requester` is the one who requests access to document key. Caller must be on ACL for this function to succeed. + /// Result is a DK, encrypted with caller public key. + fn restore_document_key( + &self, + key_id: &ServerKeyId, + requester: &Requester, + ) -> Result; + /// Restore previously stored DK. + /// To decrypt DK on client: + /// 1) use requestor secret key to decrypt secret coefficients from result.decrypt_shadows + /// 2) calculate decrypt_shadows_sum = sum of all secrets from (1) + /// 3) calculate decrypt_shadow_point: decrypt_shadows_sum * result.common_point + /// 4) calculate decrypted_secret: result.decrypted_secret + decrypt_shadow_point + /// Result is a DK shadow. + fn restore_document_key_shadow( + &self, + key_id: &ServerKeyId, + requester: &Requester, + ) -> Result; } /// Message signer. pub trait MessageSigner: ServerKeyGenerator { - /// Generate Schnorr signature for message with previously generated SK. - /// `key_id` is the caller-provided identifier of generated SK. - /// `requester` is the one who requests access to server key private. - /// `message` is the message to be signed. - /// Result is a signed message, encrypted with caller public key. - fn sign_message_schnorr(&self, key_id: &ServerKeyId, requester: &Requester, message: MessageHash) -> Result; - /// Generate ECDSA signature for message with previously generated SK. - /// WARNING: only possible when SK was generated using t <= 2 * N. - /// `key_id` is the caller-provided identifier of generated SK. - /// `signature` is `key_id`, signed with caller public key. - /// `message` is the message to be signed. - /// Result is a signed message, encrypted with caller public key. - fn sign_message_ecdsa(&self, key_id: &ServerKeyId, signature: &Requester, message: MessageHash) -> Result; + /// Generate Schnorr signature for message with previously generated SK. + /// `key_id` is the caller-provided identifier of generated SK. + /// `requester` is the one who requests access to server key private. + /// `message` is the message to be signed. + /// Result is a signed message, encrypted with caller public key. + fn sign_message_schnorr( + &self, + key_id: &ServerKeyId, + requester: &Requester, + message: MessageHash, + ) -> Result; + /// Generate ECDSA signature for message with previously generated SK. + /// WARNING: only possible when SK was generated using t <= 2 * N. + /// `key_id` is the caller-provided identifier of generated SK. + /// `signature` is `key_id`, signed with caller public key. + /// `message` is the message to be signed. + /// Result is a signed message, encrypted with caller public key. + fn sign_message_ecdsa( + &self, + key_id: &ServerKeyId, + signature: &Requester, + message: MessageHash, + ) -> Result; } /// Administrative sessions server. pub trait AdminSessionsServer { - /// Change servers set so that nodes in new_servers_set became owners of shares for all keys. - /// And old nodes (i.e. cluster nodes except new_servers_set) have clear databases. - /// WARNING: newly generated keys will be distributed among all cluster nodes. So this session - /// must be followed with cluster nodes change (either via contract, or config files). - fn change_servers_set(&self, old_set_signature: RequestSignature, new_set_signature: RequestSignature, new_servers_set: BTreeSet) -> Result<(), Error>; + /// Change servers set so that nodes in new_servers_set became owners of shares for all keys. + /// And old nodes (i.e. cluster nodes except new_servers_set) have clear databases. + /// WARNING: newly generated keys will be distributed among all cluster nodes. So this session + /// must be followed with cluster nodes change (either via contract, or config files). + fn change_servers_set( + &self, + old_set_signature: RequestSignature, + new_set_signature: RequestSignature, + new_servers_set: BTreeSet, + ) -> Result<(), Error>; } /// Key server. -pub trait KeyServer: AdminSessionsServer + DocumentKeyServer + MessageSigner + Send + Sync { -} +pub trait KeyServer: AdminSessionsServer + DocumentKeyServer + MessageSigner + Send + Sync {} diff --git a/secret-store/src/trusted_client.rs b/secret-store/src/trusted_client.rs index a20373ad0..1b8a2427b 100644 --- a/secret-store/src/trusted_client.rs +++ b/secret-store/src/trusted_client.rs @@ -14,90 +14,111 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::{Arc, Weak}; use bytes::Bytes; use call_contract::RegistryInfo; -use common_types::transaction::{Transaction, SignedTransaction, Action}; +use common_types::transaction::{Action, SignedTransaction, Transaction}; +use ethcore::{ + client::{BlockChainClient, BlockId, ChainInfo, Client, Nonce}, + miner::{Miner, MinerService}, +}; use ethereum_types::Address; -use ethcore::client::{Client, BlockChainClient, ChainInfo, Nonce, BlockId}; -use ethcore::miner::{Miner, MinerService}; -use sync::SyncProvider; use helpers::{get_confirmed_block_hash, REQUEST_CONFIRMATIONS_REQUIRED}; -use {Error, NodeKeyPair, ContractAddress}; +use std::sync::{Arc, Weak}; +use sync::SyncProvider; +use ContractAddress; +use Error; +use NodeKeyPair; #[derive(Clone)] /// 'Trusted' client weak reference. pub struct TrustedClient { - /// This key server node key pair. - self_key_pair: Arc, - /// Blockchain client. - client: Weak, - /// Sync provider. - sync: Weak, - /// Miner service. - miner: Weak, + /// This key server node key pair. + self_key_pair: Arc, + /// Blockchain client. + client: Weak, + /// Sync provider. + sync: Weak, + /// Miner service. + miner: Weak, } impl TrustedClient { - /// Create new trusted client. - pub fn new(self_key_pair: Arc, client: Arc, sync: Arc, miner: Arc) -> Self { - TrustedClient { - self_key_pair: self_key_pair, - client: Arc::downgrade(&client), - sync: Arc::downgrade(&sync), - miner: Arc::downgrade(&miner), - } - } + /// Create new trusted client. + pub fn new( + self_key_pair: Arc, + client: Arc, + sync: Arc, + miner: Arc, + ) -> Self { + TrustedClient { + self_key_pair: self_key_pair, + client: Arc::downgrade(&client), + sync: Arc::downgrade(&sync), + miner: Arc::downgrade(&miner), + } + } - /// Get 'trusted' `Client` reference only if it is synchronized && trusted. - pub fn get(&self) -> Option> { - self.client.upgrade() - .and_then(|client| self.sync.upgrade().map(|sync| (client, sync))) - .and_then(|(client, sync)| { - let is_synced = !sync.status().is_syncing(client.queue_info()); - let is_trusted = client.chain_info().security_level().is_full(); - match is_synced && is_trusted { - true => Some(client), - false => None, - } - }) - } + /// Get 'trusted' `Client` reference only if it is synchronized && trusted. + pub fn get(&self) -> Option> { + self.client + .upgrade() + .and_then(|client| self.sync.upgrade().map(|sync| (client, sync))) + .and_then(|(client, sync)| { + let is_synced = !sync.status().is_syncing(client.queue_info()); + let is_trusted = client.chain_info().security_level().is_full(); + match is_synced && is_trusted { + true => Some(client), + false => None, + } + }) + } - /// Get untrusted `Client` reference. - pub fn get_untrusted(&self) -> Option> { - self.client.upgrade() - } + /// Get untrusted `Client` reference. + pub fn get_untrusted(&self) -> Option> { + self.client.upgrade() + } - /// Transact contract. - pub fn transact_contract(&self, contract: Address, tx_data: Bytes) -> Result<(), Error> { - let client = self.client.upgrade().ok_or_else(|| Error::Internal("cannot submit tx when client is offline".into()))?; - let miner = self.miner.upgrade().ok_or_else(|| Error::Internal("cannot submit tx when miner is offline".into()))?; - let engine = client.engine(); - let transaction = Transaction { - nonce: client.latest_nonce(&self.self_key_pair.address()), - action: Action::Call(contract), - gas: miner.authoring_params().gas_range_target.0, - gas_price: miner.sensible_gas_price(), - value: Default::default(), - data: tx_data, - }; - let chain_id = engine.signing_chain_id(&client.latest_env_info()); - let signature = self.self_key_pair.sign(&transaction.hash(chain_id))?; - let signed = SignedTransaction::new(transaction.with_signature(signature, chain_id))?; - miner.import_own_transaction(&*client, signed.into()) - .map_err(|e| Error::Internal(format!("failed to import tx: {}", e))) - } + /// Transact contract. + pub fn transact_contract(&self, contract: Address, tx_data: Bytes) -> Result<(), Error> { + let client = self + .client + .upgrade() + .ok_or_else(|| Error::Internal("cannot submit tx when client is offline".into()))?; + let miner = self + .miner + .upgrade() + .ok_or_else(|| Error::Internal("cannot submit tx when miner is offline".into()))?; + let engine = client.engine(); + let transaction = Transaction { + nonce: client.latest_nonce(&self.self_key_pair.address()), + action: Action::Call(contract), + gas: miner.authoring_params().gas_range_target.0, + gas_price: miner.sensible_gas_price(), + value: Default::default(), + data: tx_data, + }; + let chain_id = engine.signing_chain_id(&client.latest_env_info()); + let signature = self.self_key_pair.sign(&transaction.hash(chain_id))?; + let signed = SignedTransaction::new(transaction.with_signature(signature, chain_id))?; + miner + .import_own_transaction(&*client, signed.into()) + .map_err(|e| Error::Internal(format!("failed to import tx: {}", e))) + } - /// Read contract address. If address source is registry, address only returned if current client state is - /// trusted. Address from registry is read from registry from block latest block with - /// REQUEST_CONFIRMATIONS_REQUIRED confirmations. - pub fn read_contract_address(&self, registry_name: String, address: &ContractAddress) -> Option
{ - match *address { - ContractAddress::Address(ref address) => Some(address.clone()), - ContractAddress::Registry => self.get().and_then(|client| - get_confirmed_block_hash(&*client, REQUEST_CONFIRMATIONS_REQUIRED) - .and_then(|block| client.registry_address(registry_name, BlockId::Hash(block))) - ), - } - } + /// Read contract address. If address source is registry, address only returned if current client state is + /// trusted. Address from registry is read from registry from block latest block with + /// REQUEST_CONFIRMATIONS_REQUIRED confirmations. + pub fn read_contract_address( + &self, + registry_name: String, + address: &ContractAddress, + ) -> Option
{ + match *address { + ContractAddress::Address(ref address) => Some(address.clone()), + ContractAddress::Registry => self.get().and_then(|client| { + get_confirmed_block_hash(&*client, REQUEST_CONFIRMATIONS_REQUIRED) + .and_then(|block| client.registry_address(registry_name, BlockId::Hash(block))) + }), + } + } } diff --git a/secret-store/src/types/all.rs b/secret-store/src/types/all.rs index 3a1e9df70..419c1d1cb 100644 --- a/secret-store/src/types/all.rs +++ b/secret-store/src/types/all.rs @@ -16,7 +16,9 @@ use std::collections::BTreeMap; -use {ethkey, bytes, ethereum_types}; +use bytes; +use ethereum_types; +use ethkey; /// Node id. pub type NodeId = ethkey::Public; @@ -36,119 +38,119 @@ pub use ethkey::Public; /// Secret store configuration #[derive(Debug, Clone)] pub struct NodeAddress { - /// IP address. - pub address: String, - /// IP port. - pub port: u16, + /// IP address. + pub address: String, + /// IP port. + pub port: u16, } /// Contract address. #[derive(Debug, Clone)] pub enum ContractAddress { - /// Address is read from registry. - Registry, - /// Address is specified. - Address(ethkey::Address), + /// Address is read from registry. + Registry, + /// Address is specified. + Address(ethkey::Address), } /// Secret store configuration #[derive(Debug)] pub struct ServiceConfiguration { - /// HTTP listener address. If None, HTTP API is disabled. - pub listener_address: Option, - /// Service contract address. - pub service_contract_address: Option, - /// Server key generation service contract address. - pub service_contract_srv_gen_address: Option, - /// Server key retrieval service contract address. - pub service_contract_srv_retr_address: Option, - /// Document key store service contract address. - pub service_contract_doc_store_address: Option, - /// Document key shadow retrieval service contract address. - pub service_contract_doc_sretr_address: Option, - /// ACL check contract address. If None, everyone has access to all keys. Useful for tests only. - pub acl_check_contract_address: Option, - /// Cluster configuration. - pub cluster_config: ClusterConfiguration, + /// HTTP listener address. If None, HTTP API is disabled. + pub listener_address: Option, + /// Service contract address. + pub service_contract_address: Option, + /// Server key generation service contract address. + pub service_contract_srv_gen_address: Option, + /// Server key retrieval service contract address. + pub service_contract_srv_retr_address: Option, + /// Document key store service contract address. + pub service_contract_doc_store_address: Option, + /// Document key shadow retrieval service contract address. + pub service_contract_doc_sretr_address: Option, + /// ACL check contract address. If None, everyone has access to all keys. Useful for tests only. + pub acl_check_contract_address: Option, + /// Cluster configuration. + pub cluster_config: ClusterConfiguration, } /// Key server cluster configuration #[derive(Debug)] pub struct ClusterConfiguration { - /// This node address. - pub listener_address: NodeAddress, - /// All cluster nodes addresses. - pub nodes: BTreeMap, - /// Key Server Set contract address. If None, servers from 'nodes' map are used. - pub key_server_set_contract_address: Option, - /// Allow outbound connections to 'higher' nodes. - /// This is useful for tests, but slower a bit for production. - pub allow_connecting_to_higher_nodes: bool, - /// Administrator public key. - pub admin_public: Option, - /// Should key servers set change session should be started when servers set changes. - /// This will only work when servers set is configured using KeyServerSet contract. - pub auto_migrate_enabled: bool, + /// This node address. + pub listener_address: NodeAddress, + /// All cluster nodes addresses. + pub nodes: BTreeMap, + /// Key Server Set contract address. If None, servers from 'nodes' map are used. + pub key_server_set_contract_address: Option, + /// Allow outbound connections to 'higher' nodes. + /// This is useful for tests, but slower a bit for production. + pub allow_connecting_to_higher_nodes: bool, + /// Administrator public key. + pub admin_public: Option, + /// Should key servers set change session should be started when servers set changes. + /// This will only work when servers set is configured using KeyServerSet contract. + pub auto_migrate_enabled: bool, } /// Shadow decryption result. #[derive(Clone, Debug, PartialEq)] pub struct EncryptedDocumentKeyShadow { - /// Decrypted secret point. It is partially decrypted if shadow decryption was requested. - pub decrypted_secret: ethkey::Public, - /// Shared common point. - pub common_point: Option, - /// If shadow decryption was requested: shadow decryption coefficients, encrypted with requestor public. - pub decrypt_shadows: Option>>, + /// Decrypted secret point. It is partially decrypted if shadow decryption was requested. + pub decrypted_secret: ethkey::Public, + /// Shared common point. + pub common_point: Option, + /// If shadow decryption was requested: shadow decryption coefficients, encrypted with requestor public. + pub decrypt_shadows: Option>>, } /// Requester identification data. #[derive(Debug, Clone)] pub enum Requester { - /// Requested with server key id signature. - Signature(ethkey::Signature), - /// Requested with public key. - Public(ethkey::Public), - /// Requested with verified address. - Address(ethereum_types::Address), + /// Requested with server key id signature. + Signature(ethkey::Signature), + /// Requested with public key. + Public(ethkey::Public), + /// Requested with verified address. + Address(ethereum_types::Address), } impl Default for Requester { - fn default() -> Self { - Requester::Signature(Default::default()) - } + fn default() -> Self { + Requester::Signature(Default::default()) + } } impl Requester { - pub fn public(&self, server_key_id: &ServerKeyId) -> Result { - match *self { - Requester::Signature(ref signature) => ethkey::recover(signature, server_key_id) - .map_err(|e| format!("bad signature: {}", e)), - Requester::Public(ref public) => Ok(public.clone()), - Requester::Address(_) => Err("cannot recover public from address".into()), - } - } + pub fn public(&self, server_key_id: &ServerKeyId) -> Result { + match *self { + Requester::Signature(ref signature) => ethkey::recover(signature, server_key_id) + .map_err(|e| format!("bad signature: {}", e)), + Requester::Public(ref public) => Ok(public.clone()), + Requester::Address(_) => Err("cannot recover public from address".into()), + } + } - pub fn address(&self, server_key_id: &ServerKeyId) -> Result { - self.public(server_key_id) - .map(|p| ethkey::public_to_address(&p)) - } + pub fn address(&self, server_key_id: &ServerKeyId) -> Result { + self.public(server_key_id) + .map(|p| ethkey::public_to_address(&p)) + } } impl From for Requester { - fn from(signature: ethkey::Signature) -> Requester { - Requester::Signature(signature) - } + fn from(signature: ethkey::Signature) -> Requester { + Requester::Signature(signature) + } } impl From for Requester { - fn from(public: ethereum_types::Public) -> Requester { - Requester::Public(public) - } + fn from(public: ethereum_types::Public) -> Requester { + Requester::Public(public) + } } impl From for Requester { - fn from(address: ethereum_types::Address) -> Requester { - Requester::Address(address) - } + fn from(address: ethereum_types::Address) -> Requester { + Requester::Address(address) + } } diff --git a/secret-store/src/types/error.rs b/secret-store/src/types/error.rs index 72dfded78..ee2ac5a00 100644 --- a/secret-store/src/types/error.rs +++ b/secret-store/src/types/error.rs @@ -14,88 +14,87 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::fmt; -use std::net; -use std::io::Error as IoError; +use std::{fmt, io::Error as IoError, net}; -use {ethkey, crypto}; +use crypto; +use ethkey; /// Secret store error. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Error { - /// Invalid node address has been passed. - InvalidNodeAddress, - /// Invalid node id has been passed. - InvalidNodeId, - /// Session with the given id already exists. - DuplicateSessionId, - /// No active session with given id. - NoActiveSessionWithId, - /// Invalid threshold value has been passed. - /// Threshold value must be in [0; n - 1], where n is a number of nodes participating in the encryption. - NotEnoughNodesForThreshold, - /// Current state of encryption/decryption session does not allow to proceed request. - /// Reschedule this request for later processing. - TooEarlyForRequest, - /// Current state of encryption/decryption session does not allow to proceed request. - /// This means that either there is some comm-failure or node is misbehaving/cheating. - InvalidStateForRequest, - /// Request cannot be sent/received from this node. - InvalidNodeForRequest, - /// Message or some data in the message was recognized as invalid. - /// This means that node is misbehaving/cheating. - InvalidMessage, - /// Message version is not supported. - InvalidMessageVersion, - /// Message is invalid because of replay-attack protection. - ReplayProtection, - /// Connection to node, required for this session is not established. - NodeDisconnected, - /// Server key with this ID is already generated. - ServerKeyAlreadyGenerated, - /// Server key with this ID is not yet generated. - ServerKeyIsNotFound, - /// Document key with this ID is already stored. - DocumentKeyAlreadyStored, - /// Document key with this ID is not yet stored. - DocumentKeyIsNotFound, - /// Consensus is temporary unreachable. Means that something is currently blocking us from either forming - /// consensus group (like disconnecting from too many nodes, which are AGREE to participate in consensus) - /// or from rejecting request (disconnecting from AccessDenied-nodes). - ConsensusTemporaryUnreachable, - /// Consensus is unreachable. It doesn't mean that it will ALWAYS remain unreachable, but right NOW we have - /// enough nodes confirmed that they do not want to be a part of consensus. Example: we're connected to 10 - /// of 100 nodes. Key threshold is 6 (i.e. 7 nodes are required for consensus). 4 nodes are responding with - /// reject => consensus is considered unreachable, even though another 90 nodes still can respond with OK. - ConsensusUnreachable, - /// Acl storage error. - AccessDenied, - /// Can't start session, because exclusive session is active. - ExclusiveSessionActive, - /// Can't start exclusive session, because there are other active sessions. - HasActiveSessions, - /// Insufficient requester data. - InsufficientRequesterData(String), - /// Cryptographic error. - EthKey(String), - /// I/O error has occurred. - Io(String), - /// Deserialization error has occurred. - Serde(String), - /// Hyper error. - Hyper(String), - /// Database-related error. - Database(String), - /// Internal error. - Internal(String), + /// Invalid node address has been passed. + InvalidNodeAddress, + /// Invalid node id has been passed. + InvalidNodeId, + /// Session with the given id already exists. + DuplicateSessionId, + /// No active session with given id. + NoActiveSessionWithId, + /// Invalid threshold value has been passed. + /// Threshold value must be in [0; n - 1], where n is a number of nodes participating in the encryption. + NotEnoughNodesForThreshold, + /// Current state of encryption/decryption session does not allow to proceed request. + /// Reschedule this request for later processing. + TooEarlyForRequest, + /// Current state of encryption/decryption session does not allow to proceed request. + /// This means that either there is some comm-failure or node is misbehaving/cheating. + InvalidStateForRequest, + /// Request cannot be sent/received from this node. + InvalidNodeForRequest, + /// Message or some data in the message was recognized as invalid. + /// This means that node is misbehaving/cheating. + InvalidMessage, + /// Message version is not supported. + InvalidMessageVersion, + /// Message is invalid because of replay-attack protection. + ReplayProtection, + /// Connection to node, required for this session is not established. + NodeDisconnected, + /// Server key with this ID is already generated. + ServerKeyAlreadyGenerated, + /// Server key with this ID is not yet generated. + ServerKeyIsNotFound, + /// Document key with this ID is already stored. + DocumentKeyAlreadyStored, + /// Document key with this ID is not yet stored. + DocumentKeyIsNotFound, + /// Consensus is temporary unreachable. Means that something is currently blocking us from either forming + /// consensus group (like disconnecting from too many nodes, which are AGREE to participate in consensus) + /// or from rejecting request (disconnecting from AccessDenied-nodes). + ConsensusTemporaryUnreachable, + /// Consensus is unreachable. It doesn't mean that it will ALWAYS remain unreachable, but right NOW we have + /// enough nodes confirmed that they do not want to be a part of consensus. Example: we're connected to 10 + /// of 100 nodes. Key threshold is 6 (i.e. 7 nodes are required for consensus). 4 nodes are responding with + /// reject => consensus is considered unreachable, even though another 90 nodes still can respond with OK. + ConsensusUnreachable, + /// Acl storage error. + AccessDenied, + /// Can't start session, because exclusive session is active. + ExclusiveSessionActive, + /// Can't start exclusive session, because there are other active sessions. + HasActiveSessions, + /// Insufficient requester data. + InsufficientRequesterData(String), + /// Cryptographic error. + EthKey(String), + /// I/O error has occurred. + Io(String), + /// Deserialization error has occurred. + Serde(String), + /// Hyper error. + Hyper(String), + /// Database-related error. + Database(String), + /// Internal error. + Internal(String), } impl Error { - /// Is this a fatal error? Non-fatal means that it is possible to replay the same request with a non-zero - /// chance to success. I.e. the error is not about request itself (or current environment factors that - /// are affecting request processing), but about current SecretStore state. - pub fn is_non_fatal(&self) -> bool { - match *self { + /// Is this a fatal error? Non-fatal means that it is possible to replay the same request with a non-zero + /// chance to success. I.e. the error is not about request itself (or current environment factors that + /// are affecting request processing), but about current SecretStore state. + pub fn is_non_fatal(&self) -> bool { + match *self { // non-fatal errors: // session start errors => restarting session is a solution @@ -124,76 +123,91 @@ impl Error { // but we still consider these errors as fatal Error::EthKey(_) | Error::Serde(_) | Error::Hyper(_) | Error::Database(_) | Error::Internal(_) | Error::Io(_) => false, } - } + } } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - match *self { - Error::InvalidNodeAddress => write!(f, "invalid node address has been passed"), - Error::InvalidNodeId => write!(f, "invalid node id has been passed"), - Error::DuplicateSessionId => write!(f, "session with the same id is already registered"), - Error::NoActiveSessionWithId => write!(f, "no active session with given id"), - Error::NotEnoughNodesForThreshold => write!(f, "not enough nodes for passed threshold"), - Error::TooEarlyForRequest => write!(f, "session is not yet ready to process this request"), - Error::InvalidStateForRequest => write!(f, "session is in invalid state for processing this request"), - Error::InvalidNodeForRequest => write!(f, "invalid node for this request"), - Error::InvalidMessage => write!(f, "invalid message is received"), - Error::InvalidMessageVersion => write!(f, "unsupported message is received"), - Error::ReplayProtection => write!(f, "replay message is received"), - Error::NodeDisconnected => write!(f, "node required for this operation is currently disconnected"), - Error::ServerKeyAlreadyGenerated => write!(f, "Server key with this ID is already generated"), - Error::ServerKeyIsNotFound => write!(f, "Server key with this ID is not found"), - Error::DocumentKeyAlreadyStored => write!(f, "Document key with this ID is already stored"), - Error::DocumentKeyIsNotFound => write!(f, "Document key with this ID is not found"), - Error::ConsensusUnreachable => write!(f, "Consensus unreachable"), - Error::ConsensusTemporaryUnreachable => write!(f, "Consensus temporary unreachable"), - Error::AccessDenied => write!(f, "Access denied"), - Error::ExclusiveSessionActive => write!(f, "Exclusive session active"), - Error::HasActiveSessions => write!(f, "Unable to start exclusive session"), - Error::InsufficientRequesterData(ref e) => write!(f, "Insufficient requester data: {}", e), - Error::EthKey(ref e) => write!(f, "cryptographic error {}", e), - Error::Hyper(ref msg) => write!(f, "Hyper error: {}", msg), - Error::Serde(ref msg) => write!(f, "Serialization error: {}", msg), - Error::Database(ref msg) => write!(f, "Database error: {}", msg), - Error::Internal(ref msg) => write!(f, "Internal error: {}", msg), - Error::Io(ref msg) => write!(f, "IO error: {}", msg), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + match *self { + Error::InvalidNodeAddress => write!(f, "invalid node address has been passed"), + Error::InvalidNodeId => write!(f, "invalid node id has been passed"), + Error::DuplicateSessionId => { + write!(f, "session with the same id is already registered") + } + Error::NoActiveSessionWithId => write!(f, "no active session with given id"), + Error::NotEnoughNodesForThreshold => write!(f, "not enough nodes for passed threshold"), + Error::TooEarlyForRequest => { + write!(f, "session is not yet ready to process this request") + } + Error::InvalidStateForRequest => { + write!(f, "session is in invalid state for processing this request") + } + Error::InvalidNodeForRequest => write!(f, "invalid node for this request"), + Error::InvalidMessage => write!(f, "invalid message is received"), + Error::InvalidMessageVersion => write!(f, "unsupported message is received"), + Error::ReplayProtection => write!(f, "replay message is received"), + Error::NodeDisconnected => write!( + f, + "node required for this operation is currently disconnected" + ), + Error::ServerKeyAlreadyGenerated => { + write!(f, "Server key with this ID is already generated") + } + Error::ServerKeyIsNotFound => write!(f, "Server key with this ID is not found"), + Error::DocumentKeyAlreadyStored => { + write!(f, "Document key with this ID is already stored") + } + Error::DocumentKeyIsNotFound => write!(f, "Document key with this ID is not found"), + Error::ConsensusUnreachable => write!(f, "Consensus unreachable"), + Error::ConsensusTemporaryUnreachable => write!(f, "Consensus temporary unreachable"), + Error::AccessDenied => write!(f, "Access denied"), + Error::ExclusiveSessionActive => write!(f, "Exclusive session active"), + Error::HasActiveSessions => write!(f, "Unable to start exclusive session"), + Error::InsufficientRequesterData(ref e) => { + write!(f, "Insufficient requester data: {}", e) + } + Error::EthKey(ref e) => write!(f, "cryptographic error {}", e), + Error::Hyper(ref msg) => write!(f, "Hyper error: {}", msg), + Error::Serde(ref msg) => write!(f, "Serialization error: {}", msg), + Error::Database(ref msg) => write!(f, "Database error: {}", msg), + Error::Internal(ref msg) => write!(f, "Internal error: {}", msg), + Error::Io(ref msg) => write!(f, "IO error: {}", msg), + } + } } impl From for Error { - fn from(err: ethkey::Error) -> Self { - Error::EthKey(err.into()) - } + fn from(err: ethkey::Error) -> Self { + Error::EthKey(err.into()) + } } impl From for Error { - fn from(err: ethkey::crypto::Error) -> Self { - Error::EthKey(err.to_string()) - } + fn from(err: ethkey::crypto::Error) -> Self { + Error::EthKey(err.to_string()) + } } impl From for Error { - fn from(err: crypto::Error) -> Self { - Error::EthKey(err.to_string()) - } + fn from(err: crypto::Error) -> Self { + Error::EthKey(err.to_string()) + } } impl From for Error { - fn from(err: IoError) -> Self { - Error::Io(err.to_string()) - } + fn from(err: IoError) -> Self { + Error::Io(err.to_string()) + } } impl Into for Error { - fn into(self) -> String { - format!("{}", self) - } + fn into(self) -> String { + format!("{}", self) + } } impl From for Error { - fn from(err: net::AddrParseError) -> Error { - Error::Internal(err.to_string()) - } + fn from(err: net::AddrParseError) -> Error { + Error::Internal(err.to_string()) + } } diff --git a/secret-store/src/types/mod.rs b/secret-store/src/types/mod.rs index 2001efc5a..2e8dc75f2 100644 --- a/secret-store/src/types/mod.rs +++ b/secret-store/src/types/mod.rs @@ -19,5 +19,4 @@ mod all; mod error; -pub use self::all::*; -pub use self::error::*; +pub use self::{all::*, error::*}; diff --git a/updater/hash-fetch/src/client.rs b/updater/hash-fetch/src/client.rs index 513b02f25..7b288cbb8 100644 --- a/updater/hash-fetch/src/client.rs +++ b/updater/hash-fetch/src/client.rs @@ -16,270 +16,312 @@ //! Hash-addressed content resolver & fetcher. -use std::{io, fs}; -use std::io::Write; -use std::sync::Arc; -use std::path::PathBuf; +use std::{fs, io, io::Write, path::PathBuf, sync::Arc}; -use hash::keccak_buffer; +use ethereum_types::H256; use fetch::{self, Fetch}; use futures::{Future, IntoFuture}; +use hash::keccak_buffer; use parity_runtime::Executor; -use urlhint::{URLHintContract, URLHint, URLHintResult}; -use registrar::{RegistrarClient, Asynchronous}; -use ethereum_types::H256; +use registrar::{Asynchronous, RegistrarClient}; +use urlhint::{URLHint, URLHintContract, URLHintResult}; /// API for fetching by hash. pub trait HashFetch: Send + Sync + 'static { - /// Fetch hash-addressed content. - /// Parameters: - /// 1. `hash` - content hash - /// 2. `on_done` - callback function invoked when the content is ready (or there was error during fetch) - /// - /// This function may fail immediately when fetch cannot be initialized or content cannot be resolved. - fn fetch(&self, hash: H256, abort: fetch::Abort, on_done: Box) + Send>); + /// Fetch hash-addressed content. + /// Parameters: + /// 1. `hash` - content hash + /// 2. `on_done` - callback function invoked when the content is ready (or there was error during fetch) + /// + /// This function may fail immediately when fetch cannot be initialized or content cannot be resolved. + fn fetch( + &self, + hash: H256, + abort: fetch::Abort, + on_done: Box) + Send>, + ); } /// Hash-fetching error. #[derive(Debug)] pub enum Error { - /// Hash could not be resolved to a valid content address. - NoResolution, - /// Downloaded content hash does not match. - HashMismatch { - /// Expected hash - expected: H256, - /// Computed hash - got: H256, - }, - /// Server didn't respond with OK status. - InvalidStatus, - /// IO Error while validating hash. - IO(io::Error), - /// Error during fetch. - Fetch(fetch::Error), + /// Hash could not be resolved to a valid content address. + NoResolution, + /// Downloaded content hash does not match. + HashMismatch { + /// Expected hash + expected: H256, + /// Computed hash + got: H256, + }, + /// Server didn't respond with OK status. + InvalidStatus, + /// IO Error while validating hash. + IO(io::Error), + /// Error during fetch. + Fetch(fetch::Error), } #[cfg(test)] impl PartialEq for Error { - fn eq(&self, other: &Self) -> bool { - use Error::*; - match (self, other) { - (&HashMismatch { expected, got }, &HashMismatch { expected: e, got: g }) => { - expected == e && got == g - }, - (&NoResolution, &NoResolution) => true, - (&InvalidStatus, &InvalidStatus) => true, - (&IO(_), &IO(_)) => true, - (&Fetch(_), &Fetch(_)) => true, - _ => false, - } - } + fn eq(&self, other: &Self) -> bool { + use Error::*; + match (self, other) { + ( + &HashMismatch { expected, got }, + &HashMismatch { + expected: e, + got: g, + }, + ) => expected == e && got == g, + (&NoResolution, &NoResolution) => true, + (&InvalidStatus, &InvalidStatus) => true, + (&IO(_), &IO(_)) => true, + (&Fetch(_), &Fetch(_)) => true, + _ => false, + } + } } impl From for Error { - fn from(error: fetch::Error) -> Self { - Error::Fetch(error) - } + fn from(error: fetch::Error) -> Self { + Error::Fetch(error) + } } impl From for Error { - fn from(error: io::Error) -> Self { - Error::IO(error) - } + fn from(error: io::Error) -> Self { + Error::IO(error) + } } fn validate_hash(path: PathBuf, hash: H256, body: fetch::BodyReader) -> Result { - // Read the response - let mut reader = io::BufReader::new(body); - let mut writer = io::BufWriter::new(fs::File::create(&path)?); - io::copy(&mut reader, &mut writer)?; - writer.flush()?; + // Read the response + let mut reader = io::BufReader::new(body); + let mut writer = io::BufWriter::new(fs::File::create(&path)?); + io::copy(&mut reader, &mut writer)?; + writer.flush()?; - // And validate the hash - let mut file_reader = io::BufReader::new(fs::File::open(&path)?); - let content_hash = keccak_buffer(&mut file_reader)?; - if content_hash != hash { - Err(Error::HashMismatch{ got: content_hash, expected: hash }) - } else { - Ok(path) - } + // And validate the hash + let mut file_reader = io::BufReader::new(fs::File::open(&path)?); + let content_hash = keccak_buffer(&mut file_reader)?; + if content_hash != hash { + Err(Error::HashMismatch { + got: content_hash, + expected: hash, + }) + } else { + Ok(path) + } } /// Default Hash-fetching client using on-chain contract to resolve hashes to URLs. pub struct Client { - contract: URLHintContract, - fetch: F, - executor: Executor, - random_path: Arc PathBuf + Sync + Send>, + contract: URLHintContract, + fetch: F, + executor: Executor, + random_path: Arc PathBuf + Sync + Send>, } impl Client { - /// Creates new instance of the `Client` given on-chain contract client, fetch service and task runner. - pub fn with_fetch(contract: Arc>, fetch: F, executor: Executor) -> Self { - Client { - contract: URLHintContract::new(contract), - fetch: fetch, - executor: executor, - random_path: Arc::new(random_temp_path), - } - } + /// Creates new instance of the `Client` given on-chain contract client, fetch service and task runner. + pub fn with_fetch( + contract: Arc>, + fetch: F, + executor: Executor, + ) -> Self { + Client { + contract: URLHintContract::new(contract), + fetch: fetch, + executor: executor, + random_path: Arc::new(random_temp_path), + } + } } impl HashFetch for Client { - fn fetch(&self, hash: H256, abort: fetch::Abort, on_done: Box) + Send>) { - debug!(target: "fetch", "Fetching: {:?}", hash); + fn fetch( + &self, + hash: H256, + abort: fetch::Abort, + on_done: Box) + Send>, + ) { + debug!(target: "fetch", "Fetching: {:?}", hash); - let random_path = self.random_path.clone(); - let remote_fetch = self.fetch.clone(); - let future = self.contract.resolve(hash) - .map_err(|e| { warn!("Error resolving URL: {}", e); Error::NoResolution }) - .and_then(|maybe_url| maybe_url.ok_or(Error::NoResolution)) - .map(|content| match content { - URLHintResult::Dapp(dapp) => { - dapp.url() - }, - URLHintResult::GithubDapp(content) => { - content.url - }, - URLHintResult::Content(content) => { - content.url - }, - }) - .into_future() - .and_then(move |url| { - debug!(target: "fetch", "Resolved {:?} to {:?}. Fetching...", hash, url); - remote_fetch.get(&url, abort).from_err() - }) - .and_then(move |response| { - if !response.is_success() { - Err(Error::InvalidStatus) - } else { - Ok(response) - } - }) - .and_then(move |response| { - debug!(target: "fetch", "Content fetched, validating hash ({:?})", hash); - let path = random_path(); - let res = validate_hash(path.clone(), hash, fetch::BodyReader::new(response)); - if let Err(ref err) = res { - trace!(target: "fetch", "Error: {:?}", err); - // Remove temporary file in case of error - let _ = fs::remove_file(&path); - } - res - }) - .then(move |res| { on_done(res); Ok(()) as Result<(), ()> }); + let random_path = self.random_path.clone(); + let remote_fetch = self.fetch.clone(); + let future = self + .contract + .resolve(hash) + .map_err(|e| { + warn!("Error resolving URL: {}", e); + Error::NoResolution + }) + .and_then(|maybe_url| maybe_url.ok_or(Error::NoResolution)) + .map(|content| match content { + URLHintResult::Dapp(dapp) => dapp.url(), + URLHintResult::GithubDapp(content) => content.url, + URLHintResult::Content(content) => content.url, + }) + .into_future() + .and_then(move |url| { + debug!(target: "fetch", "Resolved {:?} to {:?}. Fetching...", hash, url); + remote_fetch.get(&url, abort).from_err() + }) + .and_then(move |response| { + if !response.is_success() { + Err(Error::InvalidStatus) + } else { + Ok(response) + } + }) + .and_then(move |response| { + debug!(target: "fetch", "Content fetched, validating hash ({:?})", hash); + let path = random_path(); + let res = validate_hash(path.clone(), hash, fetch::BodyReader::new(response)); + if let Err(ref err) = res { + trace!(target: "fetch", "Error: {:?}", err); + // Remove temporary file in case of error + let _ = fs::remove_file(&path); + } + res + }) + .then(move |res| { + on_done(res); + Ok(()) as Result<(), ()> + }); - self.executor.spawn(future); - } + self.executor.spawn(future); + } } fn random_temp_path() -> PathBuf { - use ::rand::Rng; - use ::std::env; + use rand::Rng; + use std::env; - let mut rng = ::rand::OsRng::new().expect("Reliable random source is required to work."); - let file: String = rng.gen_ascii_chars().take(12).collect(); + let mut rng = ::rand::OsRng::new().expect("Reliable random source is required to work."); + let file: String = rng.gen_ascii_chars().take(12).collect(); - let mut path = env::temp_dir(); - path.push(file); - path + let mut path = env::temp_dir(); + path.push(file); + path } #[cfg(test)] mod tests { - use fake_fetch::FakeFetch; - use rustc_hex::FromHex; - use std::sync::{Arc, mpsc}; - use parking_lot::Mutex; - use parity_runtime::Executor; - use urlhint::tests::{FakeRegistrar, URLHINT}; - use super::{Error, Client, HashFetch, random_temp_path}; + use super::{random_temp_path, Client, Error, HashFetch}; + use fake_fetch::FakeFetch; + use parity_runtime::Executor; + use parking_lot::Mutex; + use rustc_hex::FromHex; + use std::sync::{mpsc, Arc}; + use urlhint::tests::{FakeRegistrar, URLHINT}; - fn registrar() -> FakeRegistrar { - let mut registrar = FakeRegistrar::new(); - registrar.responses = Mutex::new(vec![ + fn registrar() -> FakeRegistrar { + let mut registrar = FakeRegistrar::new(); + registrar.responses = Mutex::new(vec![ Ok(format!("000000000000000000000000{}", URLHINT).from_hex().unwrap()), Ok("00000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000deadcafebeefbeefcafedeaddeedfeedffffffff000000000000000000000000000000000000000000000000000000000000003c68747470733a2f2f7061726974792e696f2f6173736574732f696d616765732f657468636f72652d626c61636b2d686f72697a6f6e74616c2e706e6700000000".from_hex().unwrap()), ]); - registrar - } + registrar + } - #[test] - fn should_return_error_if_hash_not_found() { - // given - let contract = Arc::new(FakeRegistrar::new()); - let fetch = FakeFetch::new(None::); - let client = Client::with_fetch(contract.clone(), fetch, Executor::new_sync()); + #[test] + fn should_return_error_if_hash_not_found() { + // given + let contract = Arc::new(FakeRegistrar::new()); + let fetch = FakeFetch::new(None::); + let client = Client::with_fetch(contract.clone(), fetch, Executor::new_sync()); - // when - let (tx, rx) = mpsc::channel(); - client.fetch(2.into(), Default::default(), Box::new(move |result| { - tx.send(result).unwrap(); - })); + // when + let (tx, rx) = mpsc::channel(); + client.fetch( + 2.into(), + Default::default(), + Box::new(move |result| { + tx.send(result).unwrap(); + }), + ); - // then - let result = rx.recv().unwrap(); - assert_eq!(result.unwrap_err(), Error::NoResolution); - } + // then + let result = rx.recv().unwrap(); + assert_eq!(result.unwrap_err(), Error::NoResolution); + } - #[test] - fn should_return_error_if_response_is_not_successful() { - // given - let registrar = Arc::new(registrar()); - let fetch = FakeFetch::new(None::); - let client = Client::with_fetch(registrar.clone(), fetch, Executor::new_sync()); + #[test] + fn should_return_error_if_response_is_not_successful() { + // given + let registrar = Arc::new(registrar()); + let fetch = FakeFetch::new(None::); + let client = Client::with_fetch(registrar.clone(), fetch, Executor::new_sync()); - // when - let (tx, rx) = mpsc::channel(); - client.fetch(2.into(), Default::default(), Box::new(move |result| { - tx.send(result).unwrap(); - })); + // when + let (tx, rx) = mpsc::channel(); + client.fetch( + 2.into(), + Default::default(), + Box::new(move |result| { + tx.send(result).unwrap(); + }), + ); - // then - let result = rx.recv().unwrap(); - assert_eq!(result.unwrap_err(), Error::InvalidStatus); - } + // then + let result = rx.recv().unwrap(); + assert_eq!(result.unwrap_err(), Error::InvalidStatus); + } - #[test] - fn should_return_hash_mismatch() { - // given - let registrar = Arc::new(registrar()); - let fetch = FakeFetch::new(Some(1)); - let mut client = Client::with_fetch(registrar.clone(), fetch, Executor::new_sync()); - let path = random_temp_path(); - let path2 = path.clone(); - client.random_path = Arc::new(move || path2.clone()); + #[test] + fn should_return_hash_mismatch() { + // given + let registrar = Arc::new(registrar()); + let fetch = FakeFetch::new(Some(1)); + let mut client = Client::with_fetch(registrar.clone(), fetch, Executor::new_sync()); + let path = random_temp_path(); + let path2 = path.clone(); + client.random_path = Arc::new(move || path2.clone()); - // when - let (tx, rx) = mpsc::channel(); - client.fetch(2.into(), Default::default(), Box::new(move |result| { - tx.send(result).unwrap(); - })); + // when + let (tx, rx) = mpsc::channel(); + client.fetch( + 2.into(), + Default::default(), + Box::new(move |result| { + tx.send(result).unwrap(); + }), + ); - // then - let result = rx.recv().unwrap(); - let hash = "0x2be00befcf008bc0e7d9cdefc194db9c75352e8632f48498b5a6bfce9f02c88e".into(); - assert_eq!(result.unwrap_err(), Error::HashMismatch { expected: 2.into(), got: hash }); - assert!(!path.exists(), "Temporary file should be removed."); - } + // then + let result = rx.recv().unwrap(); + let hash = "0x2be00befcf008bc0e7d9cdefc194db9c75352e8632f48498b5a6bfce9f02c88e".into(); + assert_eq!( + result.unwrap_err(), + Error::HashMismatch { + expected: 2.into(), + got: hash + } + ); + assert!(!path.exists(), "Temporary file should be removed."); + } - #[test] - fn should_return_path_if_hash_matches() { - // given - let registrar = Arc::new(registrar()); - let fetch = FakeFetch::new(Some(1)); - let client = Client::with_fetch(registrar.clone(), fetch, Executor::new_sync()); + #[test] + fn should_return_path_if_hash_matches() { + // given + let registrar = Arc::new(registrar()); + let fetch = FakeFetch::new(Some(1)); + let client = Client::with_fetch(registrar.clone(), fetch, Executor::new_sync()); - // when - let (tx, rx) = mpsc::channel(); - client.fetch("0x2be00befcf008bc0e7d9cdefc194db9c75352e8632f48498b5a6bfce9f02c88e".into(), - Default::default(), - Box::new(move |result| { tx.send(result).unwrap(); })); + // when + let (tx, rx) = mpsc::channel(); + client.fetch( + "0x2be00befcf008bc0e7d9cdefc194db9c75352e8632f48498b5a6bfce9f02c88e".into(), + Default::default(), + Box::new(move |result| { + tx.send(result).unwrap(); + }), + ); - // then - let result = rx.recv().unwrap(); - assert!(result.is_ok(), "Should return path, got: {:?}", result); - } + // then + let result = rx.recv().unwrap(); + assert!(result.is_ok(), "Should return path, got: {:?}", result); + } } diff --git a/updater/hash-fetch/src/lib.rs b/updater/hash-fetch/src/lib.rs index a9ddc7363..b502ea523 100644 --- a/updater/hash-fetch/src/lib.rs +++ b/updater/hash-fetch/src/lib.rs @@ -22,16 +22,16 @@ extern crate log; extern crate ethabi; -extern crate parity_bytes as bytes; extern crate ethereum_types; extern crate futures; extern crate keccak_hash as hash; extern crate mime; extern crate mime_guess; +extern crate parity_bytes as bytes; extern crate parity_runtime; extern crate rand; -extern crate rustc_hex; extern crate registrar; +extern crate rustc_hex; pub extern crate fetch; @@ -40,13 +40,13 @@ extern crate ethabi_derive; #[macro_use] extern crate ethabi_contract; #[cfg(test)] -extern crate parking_lot; -#[cfg(test)] extern crate fake_fetch; +#[cfg(test)] +extern crate parking_lot; mod client; pub mod urlhint; -pub use client::{HashFetch, Client, Error}; +pub use client::{Client, Error, HashFetch}; pub use fetch::Abort; diff --git a/updater/hash-fetch/src/urlhint.rs b/updater/hash-fetch/src/urlhint.rs index 73520fd3b..c03bfc355 100644 --- a/updater/hash-fetch/src/urlhint.rs +++ b/updater/hash-fetch/src/urlhint.rs @@ -16,15 +16,14 @@ //! URLHint Contract -use std::sync::Arc; -use rustc_hex::ToHex; use mime::{self, Mime}; use mime_guess; +use rustc_hex::ToHex; +use std::sync::Arc; -use futures::{future, Future}; -use futures::future::Either; -use ethereum_types::{H256, Address}; -use registrar::{Registrar, RegistrarClient, Asynchronous}; +use ethereum_types::{Address, H256}; +use futures::{future, future::Either, Future}; +use registrar::{Asynchronous, Registrar, RegistrarClient}; use_contract!(urlhint, "res/urlhint.json"); @@ -33,325 +32,362 @@ const GITHUB_HINT: &'static str = "githubhint"; /// GithubHint entries with commit set as `0x0..01` should be treated /// as Github Dapp, downloadable zip files, than can be extracted, containing /// the manifest.json file along with the dapp -static GITHUB_DAPP_COMMIT: &[u8; COMMIT_LEN] = &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; +static GITHUB_DAPP_COMMIT: &[u8; COMMIT_LEN] = + &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; /// Github-hosted dapp. #[derive(Debug, PartialEq)] pub struct GithubApp { - /// Github Account - pub account: String, - /// Github Repository - pub repo: String, - /// Commit on Github - pub commit: [u8; COMMIT_LEN], - /// Dapp owner address - pub owner: Address, + /// Github Account + pub account: String, + /// Github Repository + pub repo: String, + /// Commit on Github + pub commit: [u8; COMMIT_LEN], + /// Dapp owner address + pub owner: Address, } impl GithubApp { - /// Returns URL of this Github-hosted dapp package. - pub fn url(&self) -> String { - // Since https fetcher doesn't support redirections we use direct link - // format!("https://github.com/{}/{}/archive/{}.zip", self.account, self.repo, self.commit.to_hex()) - format!("https://codeload.github.com/{}/{}/zip/{}", self.account, self.repo, self.commit.to_hex()) - } + /// Returns URL of this Github-hosted dapp package. + pub fn url(&self) -> String { + // Since https fetcher doesn't support redirections we use direct link + // format!("https://github.com/{}/{}/archive/{}.zip", self.account, self.repo, self.commit.to_hex()) + format!( + "https://codeload.github.com/{}/{}/zip/{}", + self.account, + self.repo, + self.commit.to_hex() + ) + } - fn commit(bytes: &[u8]) -> Option<[u8;COMMIT_LEN]> { - if bytes.len() < COMMIT_LEN { - return None; - } + fn commit(bytes: &[u8]) -> Option<[u8; COMMIT_LEN]> { + if bytes.len() < COMMIT_LEN { + return None; + } - let mut commit = [0; COMMIT_LEN]; - for i in 0..COMMIT_LEN { - commit[i] = bytes[i]; - } + let mut commit = [0; COMMIT_LEN]; + for i in 0..COMMIT_LEN { + commit[i] = bytes[i]; + } - Some(commit) - } + Some(commit) + } } /// Hash-Addressed Content #[derive(Debug, PartialEq)] pub struct Content { - /// URL of the content - pub url: String, - /// MIME type of the content - pub mime: Mime, - /// Content owner address - pub owner: Address, + /// URL of the content + pub url: String, + /// MIME type of the content + pub mime: Mime, + /// Content owner address + pub owner: Address, } /// Result of resolving id to URL #[derive(Debug, PartialEq)] pub enum URLHintResult { - /// Dapp - Dapp(GithubApp), - /// GithubDapp - GithubDapp(Content), - /// Content - Content(Content), + /// Dapp + Dapp(GithubApp), + /// GithubDapp + GithubDapp(Content), + /// Content + Content(Content), } /// URLHint Contract interface pub trait URLHint: Send + Sync { - /// Resolves given id to registrar entry. - fn resolve(&self, id: H256) -> Box, Error = String> + Send>; + /// Resolves given id to registrar entry. + fn resolve(&self, id: H256) + -> Box, Error = String> + Send>; } /// `URLHintContract` API pub struct URLHintContract { - registrar: Registrar, - client: Arc>, + registrar: Registrar, + client: Arc>, } impl URLHintContract { - /// Creates new `URLHintContract` - pub fn new(client: Arc>) -> Self { - URLHintContract { - registrar: Registrar::new(client.clone()), - client: client, - } - } + /// Creates new `URLHintContract` + pub fn new(client: Arc>) -> Self { + URLHintContract { + registrar: Registrar::new(client.clone()), + client: client, + } + } } fn get_urlhint_content(account_slash_repo: String, owner: Address) -> Content { - let mime = guess_mime_type(&account_slash_repo).unwrap_or(mime::APPLICATION_JSON); - Content { - url: account_slash_repo, - mime, - owner, - } + let mime = guess_mime_type(&account_slash_repo).unwrap_or(mime::APPLICATION_JSON); + Content { + url: account_slash_repo, + mime, + owner, + } } fn decode_urlhint_output(output: (String, [u8; 20], Address)) -> Option { - let (account_slash_repo, commit, owner) = output; + let (account_slash_repo, commit, owner) = output; - if owner == Address::default() { - return None; - } + if owner == Address::default() { + return None; + } - let commit = GithubApp::commit(&commit); + let commit = GithubApp::commit(&commit); - if commit == Some(Default::default()) { - let content = get_urlhint_content(account_slash_repo, owner); - return Some(URLHintResult::Content(content)); - } + if commit == Some(Default::default()) { + let content = get_urlhint_content(account_slash_repo, owner); + return Some(URLHintResult::Content(content)); + } - if commit == Some(*GITHUB_DAPP_COMMIT) { - let content = get_urlhint_content(account_slash_repo, owner); - return Some(URLHintResult::GithubDapp(content)); - } + if commit == Some(*GITHUB_DAPP_COMMIT) { + let content = get_urlhint_content(account_slash_repo, owner); + return Some(URLHintResult::GithubDapp(content)); + } - let (account, repo) = { - let mut it = account_slash_repo.split('/'); - match (it.next(), it.next()) { - (Some(account), Some(repo)) => (account.into(), repo.into()), - _ => return None, - } - }; + let (account, repo) = { + let mut it = account_slash_repo.split('/'); + match (it.next(), it.next()) { + (Some(account), Some(repo)) => (account.into(), repo.into()), + _ => return None, + } + }; - commit.map(|commit| URLHintResult::Dapp(GithubApp { - account: account, - repo: repo, - commit: commit, - owner: owner, - })) + commit.map(|commit| { + URLHintResult::Dapp(GithubApp { + account: account, + repo: repo, + commit: commit, + owner: owner, + }) + }) } impl URLHint for URLHintContract { - fn resolve(&self, id: H256) -> Box, Error = String> + Send> { - let client = self.client.clone(); + fn resolve( + &self, + id: H256, + ) -> Box, Error = String> + Send> { + let client = self.client.clone(); - let future = self.registrar.get_address(GITHUB_HINT) - .and_then(move |addr| if !addr.is_zero() { - let data = urlhint::functions::entries::encode_input(id); - let result = client.call_contract(addr, data) - .and_then(move |output| urlhint::functions::entries::decode_output(&output).map_err(|e| e.to_string())) - .map(decode_urlhint_output); - Either::B(result) - } else { - Either::A(future::ok(None)) - }); - Box::new(future) - } + let future = self + .registrar + .get_address(GITHUB_HINT) + .and_then(move |addr| { + if !addr.is_zero() { + let data = urlhint::functions::entries::encode_input(id); + let result = client + .call_contract(addr, data) + .and_then(move |output| { + urlhint::functions::entries::decode_output(&output) + .map_err(|e| e.to_string()) + }) + .map(decode_urlhint_output); + Either::B(result) + } else { + Either::A(future::ok(None)) + } + }); + Box::new(future) + } } fn guess_mime_type(url: &str) -> Option { - const CONTENT_TYPE: &'static str = "content-type="; + const CONTENT_TYPE: &'static str = "content-type="; - let mut it = url.split('#'); - // skip url - let url = it.next(); - // get meta headers - let metas = it.next(); - if let Some(metas) = metas { - for meta in metas.split('&') { - let meta = meta.to_lowercase(); - if meta.starts_with(CONTENT_TYPE) { - return meta[CONTENT_TYPE.len()..].parse().ok(); - } - } - } - url.and_then(|url| { - url.split('.').last() - }).and_then(|extension| { - mime_guess::get_mime_type_opt(extension) - }) + let mut it = url.split('#'); + // skip url + let url = it.next(); + // get meta headers + let metas = it.next(); + if let Some(metas) = metas { + for meta in metas.split('&') { + let meta = meta.to_lowercase(); + if meta.starts_with(CONTENT_TYPE) { + return meta[CONTENT_TYPE.len()..].parse().ok(); + } + } + } + url.and_then(|url| url.split('.').last()) + .and_then(|extension| mime_guess::get_mime_type_opt(extension)) } #[cfg(test)] pub mod tests { - use std::sync::Arc; - use std::str::FromStr; - use rustc_hex::FromHex; + use rustc_hex::FromHex; + use std::{str::FromStr, sync::Arc}; - use futures::{Future, IntoFuture}; + use futures::{Future, IntoFuture}; - use super::*; - use super::guess_mime_type; - use parking_lot::Mutex; - use ethereum_types::Address; - use bytes::{Bytes, ToPretty}; + use super::{guess_mime_type, *}; + use bytes::{Bytes, ToPretty}; + use ethereum_types::Address; + use parking_lot::Mutex; - pub struct FakeRegistrar { - pub calls: Arc>>, - pub responses: Mutex>>, - } + pub struct FakeRegistrar { + pub calls: Arc>>, + pub responses: Mutex>>, + } - pub const REGISTRAR: &'static str = "8e4e9b13d4b45cb0befc93c3061b1408f67316b2"; - pub const URLHINT: &'static str = "deadbeefcafe0000000000000000000000000000"; + pub const REGISTRAR: &'static str = "8e4e9b13d4b45cb0befc93c3061b1408f67316b2"; + pub const URLHINT: &'static str = "deadbeefcafe0000000000000000000000000000"; - impl FakeRegistrar { - pub fn new() -> Self { - FakeRegistrar { - calls: Arc::new(Mutex::new(Vec::new())), - responses: Mutex::new( - vec![ - Ok(format!("000000000000000000000000{}", URLHINT).from_hex().unwrap()), - Ok(Vec::new()) - ] - ), - } - } - } + impl FakeRegistrar { + pub fn new() -> Self { + FakeRegistrar { + calls: Arc::new(Mutex::new(Vec::new())), + responses: Mutex::new(vec![ + Ok(format!("000000000000000000000000{}", URLHINT) + .from_hex() + .unwrap()), + Ok(Vec::new()), + ]), + } + } + } - impl RegistrarClient for FakeRegistrar { - type Call = Asynchronous; + impl RegistrarClient for FakeRegistrar { + type Call = Asynchronous; - fn registrar_address(&self) -> Result { - Ok(REGISTRAR.parse().unwrap()) - } + fn registrar_address(&self) -> Result { + Ok(REGISTRAR.parse().unwrap()) + } - fn call_contract(&self, address: Address, data: Bytes) -> Self::Call { - self.calls.lock().push((address.to_hex(), data.to_hex())); - let res = self.responses.lock().remove(0); - Box::new(res.into_future()) - } - } + fn call_contract(&self, address: Address, data: Bytes) -> Self::Call { + self.calls.lock().push((address.to_hex(), data.to_hex())); + let res = self.responses.lock().remove(0); + Box::new(res.into_future()) + } + } - #[test] - fn should_call_registrar_and_urlhint_contracts() { - // given - let registrar = FakeRegistrar::new(); - let resolve_result = { - use ethabi::{encode, Token}; - encode(&[Token::String(String::new()), Token::FixedBytes(vec![0; 20]), Token::Address([0; 20].into())]) - }; - registrar.responses.lock()[1] = Ok(resolve_result); + #[test] + fn should_call_registrar_and_urlhint_contracts() { + // given + let registrar = FakeRegistrar::new(); + let resolve_result = { + use ethabi::{encode, Token}; + encode(&[ + Token::String(String::new()), + Token::FixedBytes(vec![0; 20]), + Token::Address([0; 20].into()), + ]) + }; + registrar.responses.lock()[1] = Ok(resolve_result); - let calls = registrar.calls.clone(); - let urlhint = URLHintContract::new(Arc::new(registrar)); + let calls = registrar.calls.clone(); + let urlhint = URLHintContract::new(Arc::new(registrar)); - // when - let res = urlhint.resolve("test".as_bytes().into()).wait().unwrap(); - let calls = calls.lock(); - let call0 = calls.get(0).expect("Registrar resolve called"); - let call1 = calls.get(1).expect("URLHint Resolve called"); + // when + let res = urlhint.resolve("test".as_bytes().into()).wait().unwrap(); + let calls = calls.lock(); + let call0 = calls.get(0).expect("Registrar resolve called"); + let call1 = calls.get(1).expect("URLHint Resolve called"); - // then - assert!(res.is_none()); - assert_eq!(call0.0, REGISTRAR); - assert_eq!(call0.1, + // then + assert!(res.is_none()); + assert_eq!(call0.0, REGISTRAR); + assert_eq!(call0.1, "6795dbcd058740ee9a5a3fb9f1cfa10752baec87e09cc45cd7027fd54708271aca300c75000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000014100000000000000000000000000000000000000000000000000000000000000".to_owned() ); - assert_eq!(call1.0, URLHINT); - assert_eq!(call1.1, - "267b69227465737400000000000000000000000000000000000000000000000000000000".to_owned() - ); - } + assert_eq!(call1.0, URLHINT); + assert_eq!( + call1.1, + "267b69227465737400000000000000000000000000000000000000000000000000000000".to_owned() + ); + } - #[test] - fn should_decode_urlhint_output() { - // given - let mut registrar = FakeRegistrar::new(); - registrar.responses = Mutex::new(vec![ + #[test] + fn should_decode_urlhint_output() { + // given + let mut registrar = FakeRegistrar::new(); + registrar.responses = Mutex::new(vec![ Ok(format!("000000000000000000000000{}", URLHINT).from_hex().unwrap()), Ok("0000000000000000000000000000000000000000000000000000000000000060ec4c1fe06c808fe3739858c347109b1f5f1ed4b5000000000000000000000000000000000000000000000000deadcafebeefbeefcafedeaddeedfeedffffffff0000000000000000000000000000000000000000000000000000000000000011657468636f72652f64616f2e636c61696d000000000000000000000000000000".from_hex().unwrap()), ]); - let urlhint = URLHintContract::new(Arc::new(registrar)); + let urlhint = URLHintContract::new(Arc::new(registrar)); - // when - let res = urlhint.resolve("test".as_bytes().into()).wait().unwrap(); + // when + let res = urlhint.resolve("test".as_bytes().into()).wait().unwrap(); - // then - assert_eq!(res, Some(URLHintResult::Dapp(GithubApp { - account: "ethcore".into(), - repo: "dao.claim".into(), - commit: GithubApp::commit(&"ec4c1fe06c808fe3739858c347109b1f5f1ed4b5".from_hex().unwrap()).unwrap(), - owner: Address::from_str("deadcafebeefbeefcafedeaddeedfeedffffffff").unwrap(), - }))) - } + // then + assert_eq!( + res, + Some(URLHintResult::Dapp(GithubApp { + account: "ethcore".into(), + repo: "dao.claim".into(), + commit: GithubApp::commit( + &"ec4c1fe06c808fe3739858c347109b1f5f1ed4b5" + .from_hex() + .unwrap() + ) + .unwrap(), + owner: Address::from_str("deadcafebeefbeefcafedeaddeedfeedffffffff").unwrap(), + })) + ) + } - #[test] - fn should_decode_urlhint_content_output() { - // given - let mut registrar = FakeRegistrar::new(); - registrar.responses = Mutex::new(vec![ + #[test] + fn should_decode_urlhint_content_output() { + // given + let mut registrar = FakeRegistrar::new(); + registrar.responses = Mutex::new(vec![ Ok(format!("000000000000000000000000{}", URLHINT).from_hex().unwrap()), Ok("00000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000deadcafebeefbeefcafedeaddeedfeedffffffff000000000000000000000000000000000000000000000000000000000000003c68747470733a2f2f7061726974792e696f2f6173736574732f696d616765732f657468636f72652d626c61636b2d686f72697a6f6e74616c2e706e6700000000".from_hex().unwrap()), ]); - let urlhint = URLHintContract::new(Arc::new(registrar)); + let urlhint = URLHintContract::new(Arc::new(registrar)); - // when - let res = urlhint.resolve("test".as_bytes().into()).wait().unwrap(); + // when + let res = urlhint.resolve("test".as_bytes().into()).wait().unwrap(); - // then - assert_eq!(res, Some(URLHintResult::Content(Content { - url: "https://parity.io/assets/images/ethcore-black-horizontal.png".into(), - mime: mime::IMAGE_PNG, - owner: Address::from_str("deadcafebeefbeefcafedeaddeedfeedffffffff").unwrap(), - }))) - } + // then + assert_eq!( + res, + Some(URLHintResult::Content(Content { + url: "https://parity.io/assets/images/ethcore-black-horizontal.png".into(), + mime: mime::IMAGE_PNG, + owner: Address::from_str("deadcafebeefbeefcafedeaddeedfeedffffffff").unwrap(), + })) + ) + } - #[test] - fn should_return_valid_url() { - // given - let app = GithubApp { - account: "test".into(), - repo: "xyz".into(), - commit: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19], - owner: Address::default(), - }; + #[test] + fn should_return_valid_url() { + // given + let app = GithubApp { + account: "test".into(), + repo: "xyz".into(), + commit: [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + ], + owner: Address::default(), + }; - // when - let url = app.url(); + // when + let url = app.url(); - // then - assert_eq!(url, "https://codeload.github.com/test/xyz/zip/000102030405060708090a0b0c0d0e0f10111213".to_owned()); - } + // then + assert_eq!( + url, + "https://codeload.github.com/test/xyz/zip/000102030405060708090a0b0c0d0e0f10111213" + .to_owned() + ); + } - #[test] - fn should_guess_mime_type_from_url() { - let url1 = "https://parity.io/parity"; - let url2 = "https://parity.io/parity#content-type=image/png"; - let url3 = "https://parity.io/parity#something&content-type=image/png"; - let url4 = "https://parity.io/parity.png#content-type=image/jpeg"; - let url5 = "https://parity.io/parity.png"; + #[test] + fn should_guess_mime_type_from_url() { + let url1 = "https://parity.io/parity"; + let url2 = "https://parity.io/parity#content-type=image/png"; + let url3 = "https://parity.io/parity#something&content-type=image/png"; + let url4 = "https://parity.io/parity.png#content-type=image/jpeg"; + let url5 = "https://parity.io/parity.png"; - assert_eq!(guess_mime_type(url1), None); - assert_eq!(guess_mime_type(url2), Some(mime::IMAGE_PNG)); - assert_eq!(guess_mime_type(url3), Some(mime::IMAGE_PNG)); - assert_eq!(guess_mime_type(url4), Some(mime::IMAGE_JPEG)); - assert_eq!(guess_mime_type(url5), Some(mime::IMAGE_PNG)); - } + assert_eq!(guess_mime_type(url1), None); + assert_eq!(guess_mime_type(url2), Some(mime::IMAGE_PNG)); + assert_eq!(guess_mime_type(url3), Some(mime::IMAGE_PNG)); + assert_eq!(guess_mime_type(url4), Some(mime::IMAGE_JPEG)); + assert_eq!(guess_mime_type(url5), Some(mime::IMAGE_PNG)); + } } diff --git a/updater/src/lib.rs b/updater/src/lib.rs index f6fad1719..628737d85 100644 --- a/updater/src/lib.rs +++ b/updater/src/lib.rs @@ -49,10 +49,10 @@ extern crate tempdir; #[macro_use] extern crate matches; -mod updater; -mod types; mod service; +mod types; +mod updater; pub use service::Service; -pub use types::{ReleaseInfo, OperationsInfo, CapState, VersionInfo, ReleaseTrack}; -pub use updater::{Updater, UpdateFilter, UpdatePolicy}; +pub use types::{CapState, OperationsInfo, ReleaseInfo, ReleaseTrack, VersionInfo}; +pub use updater::{UpdateFilter, UpdatePolicy, Updater}; diff --git a/updater/src/service.rs b/updater/src/service.rs index b9ef2f965..03ece5e44 100644 --- a/updater/src/service.rs +++ b/updater/src/service.rs @@ -14,25 +14,25 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use types::{CapState, ReleaseInfo, OperationsInfo, VersionInfo}; +use types::{CapState, OperationsInfo, ReleaseInfo, VersionInfo}; /// Parity updater service trait pub trait Service: Send + Sync { - /// Is the currently running client capable of supporting the current chain? - /// We default to true if there's no clear information. - fn capability(&self) -> CapState; + /// Is the currently running client capable of supporting the current chain? + /// We default to true if there's no clear information. + fn capability(&self) -> CapState; - /// The release which is ready to be upgraded to, if any. If this returns `Some`, then - /// `execute_upgrade` may be called. - fn upgrade_ready(&self) -> Option; + /// The release which is ready to be upgraded to, if any. If this returns `Some`, then + /// `execute_upgrade` may be called. + fn upgrade_ready(&self) -> Option; - /// Actually upgrades the client. Assumes that the binary has been downloaded. - /// @returns `true` on success. - fn execute_upgrade(&self) -> bool; + /// Actually upgrades the client. Assumes that the binary has been downloaded. + /// @returns `true` on success. + fn execute_upgrade(&self) -> bool; - /// Our version info. - fn version_info(&self) -> VersionInfo; + /// Our version info. + fn version_info(&self) -> VersionInfo; - /// Information gathered concerning the release. - fn info(&self) -> Option; + /// Information gathered concerning the release. + fn info(&self) -> Option; } diff --git a/updater/src/types/all.rs b/updater/src/types/all.rs index f17480c8a..af32411cb 100644 --- a/updater/src/types/all.rs +++ b/updater/src/types/all.rs @@ -22,44 +22,46 @@ use types::VersionInfo; /// Information regarding a particular release of Parity #[derive(Debug, Clone, PartialEq)] pub struct ReleaseInfo { - /// Information on the version. - pub version: VersionInfo, - /// Does this release contain critical security updates? - pub is_critical: bool, - /// The latest fork that this release can handle. - pub fork: u64, - /// Our platform's binary, if known. - pub binary: Option, + /// Information on the version. + pub version: VersionInfo, + /// Does this release contain critical security updates? + pub is_critical: bool, + /// The latest fork that this release can handle. + pub fork: u64, + /// Our platform's binary, if known. + pub binary: Option, } /// Information on our operations environment. #[derive(Debug, Clone, PartialEq)] pub struct OperationsInfo { - /// Our blockchain's latest fork. - pub fork: u64, + /// Our blockchain's latest fork. + pub fork: u64, - /// Last fork our client supports, if known. - pub this_fork: Option, + /// Last fork our client supports, if known. + pub this_fork: Option, - /// Information on our track's latest release. - pub track: ReleaseInfo, - /// Information on our minor version's latest release. - pub minor: Option, + /// Information on our track's latest release. + pub track: ReleaseInfo, + /// Information on our minor version's latest release. + pub minor: Option, } /// Information on the current version's consensus capabililty. #[derive(Debug, Clone, Copy, PartialEq)] pub enum CapState { - /// Unknown. - Unknown, - /// Capable of consensus indefinitely. - Capable, - /// Capable of consensus up until a definite block. - CapableUntil(u64), - /// Incapable of consensus since a particular block. - IncapableSince(u64), + /// Unknown. + Unknown, + /// Capable of consensus indefinitely. + Capable, + /// Capable of consensus up until a definite block. + CapableUntil(u64), + /// Incapable of consensus since a particular block. + IncapableSince(u64), } impl Default for CapState { - fn default() -> Self { CapState::Unknown } + fn default() -> Self { + CapState::Unknown + } } diff --git a/updater/src/types/mod.rs b/updater/src/types/mod.rs index fab4ec4d0..7dac9e63b 100644 --- a/updater/src/types/mod.rs +++ b/updater/src/types/mod.rs @@ -20,6 +20,8 @@ mod all; mod release_track; mod version_info; -pub use self::all::{ReleaseInfo, OperationsInfo, CapState}; -pub use self::release_track::ReleaseTrack; -pub use self::version_info::VersionInfo; +pub use self::{ + all::{CapState, OperationsInfo, ReleaseInfo}, + release_track::ReleaseTrack, + version_info::VersionInfo, +}; diff --git a/updater/src/types/release_track.rs b/updater/src/types/release_track.rs index 0648ebc47..33f8777f3 100644 --- a/updater/src/types/release_track.rs +++ b/updater/src/types/release_track.rs @@ -22,97 +22,101 @@ use std::fmt; #[repr(u8)] #[derive(PartialEq, Eq, Clone, Copy, Debug)] pub enum ReleaseTrack { - /// Stable track. - Stable = 1, - /// Beta track. - Beta = 2, - /// Nightly track. - Nightly = 3, - /// Testing track. - Testing = 4, - /// No known track, also "current executable's track" when it's not yet known. - Unknown = 0, + /// Stable track. + Stable = 1, + /// Beta track. + Beta = 2, + /// Nightly track. + Nightly = 3, + /// Testing track. + Testing = 4, + /// No known track, also "current executable's track" when it's not yet known. + Unknown = 0, } impl fmt::Display for ReleaseTrack { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", match *self { - ReleaseTrack::Stable => "stable", - ReleaseTrack::Beta => "beta", - ReleaseTrack::Nightly => "nightly", - ReleaseTrack::Testing => "testing", - ReleaseTrack::Unknown => "unknown", - }) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "{}", + match *self { + ReleaseTrack::Stable => "stable", + ReleaseTrack::Beta => "beta", + ReleaseTrack::Nightly => "nightly", + ReleaseTrack::Testing => "testing", + ReleaseTrack::Unknown => "unknown", + } + ) + } } impl<'a> From<&'a str> for ReleaseTrack { - fn from(s: &'a str) -> Self { - match s { - "stable" => ReleaseTrack::Stable, - "beta" => ReleaseTrack::Beta, - "nightly" => ReleaseTrack::Nightly, - "testing" => ReleaseTrack::Testing, - _ => ReleaseTrack::Unknown, - } - } + fn from(s: &'a str) -> Self { + match s { + "stable" => ReleaseTrack::Stable, + "beta" => ReleaseTrack::Beta, + "nightly" => ReleaseTrack::Nightly, + "testing" => ReleaseTrack::Testing, + _ => ReleaseTrack::Unknown, + } + } } impl From for ReleaseTrack { - fn from(i: u8) -> Self { - match i { - 1 => ReleaseTrack::Stable, - 2 => ReleaseTrack::Beta, - 3 => ReleaseTrack::Nightly, - 4 => ReleaseTrack::Testing, - _ => ReleaseTrack::Unknown, - } - } + fn from(i: u8) -> Self { + match i { + 1 => ReleaseTrack::Stable, + 2 => ReleaseTrack::Beta, + 3 => ReleaseTrack::Nightly, + 4 => ReleaseTrack::Testing, + _ => ReleaseTrack::Unknown, + } + } } impl From for u8 { - fn from(rt: ReleaseTrack) -> Self { - rt as u8 - } + fn from(rt: ReleaseTrack) -> Self { + rt as u8 + } } #[cfg(test)] mod tests { - use super::ReleaseTrack; + use super::ReleaseTrack; - #[test] - fn test_release_track_from() { - assert_eq!(ReleaseTrack::Stable, 1u8.into()); - assert_eq!(ReleaseTrack::Beta, 2u8.into()); - assert_eq!(ReleaseTrack::Nightly, 3u8.into()); - assert_eq!(ReleaseTrack::Testing, 4u8.into()); - assert_eq!(ReleaseTrack::Unknown, 0u8.into()); - } + #[test] + fn test_release_track_from() { + assert_eq!(ReleaseTrack::Stable, 1u8.into()); + assert_eq!(ReleaseTrack::Beta, 2u8.into()); + assert_eq!(ReleaseTrack::Nightly, 3u8.into()); + assert_eq!(ReleaseTrack::Testing, 4u8.into()); + assert_eq!(ReleaseTrack::Unknown, 0u8.into()); + } - #[test] - fn test_release_track_into() { - assert_eq!(1u8, u8::from(ReleaseTrack::Stable)); - assert_eq!(2u8, u8::from(ReleaseTrack::Beta)); - assert_eq!(3u8, u8::from(ReleaseTrack::Nightly)); - assert_eq!(4u8, u8::from(ReleaseTrack::Testing)); - assert_eq!(0u8, u8::from(ReleaseTrack::Unknown)); - } + #[test] + fn test_release_track_into() { + assert_eq!(1u8, u8::from(ReleaseTrack::Stable)); + assert_eq!(2u8, u8::from(ReleaseTrack::Beta)); + assert_eq!(3u8, u8::from(ReleaseTrack::Nightly)); + assert_eq!(4u8, u8::from(ReleaseTrack::Testing)); + assert_eq!(0u8, u8::from(ReleaseTrack::Unknown)); + } - #[test] - fn test_release_track_from_str() { - assert_eq!(ReleaseTrack::Stable, "stable".into()); - assert_eq!(ReleaseTrack::Beta, "beta".into()); - assert_eq!(ReleaseTrack::Nightly, "nightly".into()); - assert_eq!(ReleaseTrack::Testing, "testing".into()); - assert_eq!(ReleaseTrack::Unknown, "unknown".into()); - } + #[test] + fn test_release_track_from_str() { + assert_eq!(ReleaseTrack::Stable, "stable".into()); + assert_eq!(ReleaseTrack::Beta, "beta".into()); + assert_eq!(ReleaseTrack::Nightly, "nightly".into()); + assert_eq!(ReleaseTrack::Testing, "testing".into()); + assert_eq!(ReleaseTrack::Unknown, "unknown".into()); + } - #[test] - fn test_release_track_into_str() { - assert_eq!("stable", ReleaseTrack::Stable.to_string()); - assert_eq!("beta", ReleaseTrack::Beta.to_string()); - assert_eq!("nightly", ReleaseTrack::Nightly.to_string()); - assert_eq!("testing", ReleaseTrack::Testing.to_string()); - assert_eq!("unknown", ReleaseTrack::Unknown.to_string()); - } + #[test] + fn test_release_track_into_str() { + assert_eq!("stable", ReleaseTrack::Stable.to_string()); + assert_eq!("beta", ReleaseTrack::Beta.to_string()); + assert_eq!("nightly", ReleaseTrack::Nightly.to_string()); + assert_eq!("testing", ReleaseTrack::Testing.to_string()); + assert_eq!("unknown", ReleaseTrack::Unknown.to_string()); + } } diff --git a/updater/src/types/version_info.rs b/updater/src/types/version_info.rs index b55e08cb1..6eef189a8 100644 --- a/updater/src/types/version_info.rs +++ b/updater/src/types/version_info.rs @@ -16,53 +16,63 @@ //! Types used in the public API -use std::fmt; -use semver::Version; use ethereum_types::H160; -use version::raw_package_info; +use semver::Version; +use std::fmt; use types::ReleaseTrack; +use version::raw_package_info; /// Version information of a particular release. #[derive(Debug, Clone, PartialEq)] pub struct VersionInfo { - /// The track on which it was released. - pub track: ReleaseTrack, - /// The version. - pub version: Version, - /// The (SHA1?) 160-bit hash of this build's code base. - pub hash: H160, + /// The track on which it was released. + pub track: ReleaseTrack, + /// The version. + pub version: Version, + /// The (SHA1?) 160-bit hash of this build's code base. + pub hash: H160, } impl fmt::Display for VersionInfo { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - write!(f, "{}.{}.{}-{}-{}", self.version.major, self.version.minor, self.version.patch, self.track, self.hash) - } + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!( + f, + "{}.{}.{}-{}-{}", + self.version.major, self.version.minor, self.version.patch, self.track, self.hash + ) + } } impl VersionInfo { - /// Get information for this (currently running) binary. - pub fn this() -> Self { - let raw = raw_package_info(); - VersionInfo { - track: raw.0.into(), - version: { let mut v = Version::parse(raw.1).expect("Environment variables are known to be valid; qed"); v.build = vec![]; v.pre = vec![]; v }, - hash: raw.2.parse::().unwrap_or_else(|_| H160::zero()), - } - } + /// Get information for this (currently running) binary. + pub fn this() -> Self { + let raw = raw_package_info(); + VersionInfo { + track: raw.0.into(), + version: { + let mut v = Version::parse(raw.1) + .expect("Environment variables are known to be valid; qed"); + v.build = vec![]; + v.pre = vec![]; + v + }, + hash: raw.2.parse::().unwrap_or_else(|_| H160::zero()), + } + } - /// Compose the information from the provided raw fields. - pub fn from_raw(semver: u32, track: u8, hash: H160) -> Self { - let t = track.into(); - VersionInfo { - version: Version { - major: u64::from(semver >> 16), - minor: u64::from((semver >> 8) & 0xff), - patch: u64::from(semver & 0xff), - build: vec![], - pre: vec![], - }, - track: t, - hash, - } - } + /// Compose the information from the provided raw fields. + pub fn from_raw(semver: u32, track: u8, hash: H160) -> Self { + let t = track.into(); + VersionInfo { + version: Version { + major: u64::from(semver >> 16), + minor: u64::from((semver >> 8) & 0xff), + patch: u64::from(semver & 0xff), + build: vec![], + pre: vec![], + }, + track: t, + hash, + } + } } diff --git a/updater/src/updater.rs b/updater/src/updater.rs index 70adc9f3b..5c26ae97b 100644 --- a/updater/src/updater.rs +++ b/updater/src/updater.rs @@ -14,1243 +14,1449 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::cmp; -use std::fs; -use std::io::Write; -use std::path::{Path, PathBuf}; -use std::sync::{Arc, Weak}; -use std::time::{Duration, Instant}; +use std::{ + cmp, fs, + io::Write, + path::{Path, PathBuf}, + sync::{Arc, Weak}, + time::{Duration, Instant}, +}; use parking_lot::{Mutex, MutexGuard}; use rand::{self, Rng}; use target_info::Target; -use common_types::BlockNumber; -use common_types::filter::Filter; -use ethcore::client::{BlockId, BlockChainClient, ChainNotify, NewBlocks}; +use common_types::{filter::Filter, BlockNumber}; +use ethabi::FunctionOutputDecoder; +use ethcore::client::{BlockChainClient, BlockId, ChainNotify, NewBlocks}; use ethereum_types::H256; use hash_fetch::{self as fetch, HashFetch}; use parity_path::restrict_permissions_owner; -use service::Service; -use sync::{SyncProvider}; -use types::{ReleaseInfo, OperationsInfo, CapState, VersionInfo, ReleaseTrack}; -use version; use semver::Version; -use ethabi::FunctionOutputDecoder; +use service::Service; +use sync::SyncProvider; +use types::{CapState, OperationsInfo, ReleaseInfo, ReleaseTrack, VersionInfo}; +use version; use_contract!(operations, "res/operations.json"); /// Filter for releases. #[derive(Debug, Eq, PartialEq, Clone)] pub enum UpdateFilter { - /// All releases following the same track. - All, - /// As with `All`, but only those which are known to be critical. - Critical, - /// None. - None, + /// All releases following the same track. + All, + /// As with `All`, but only those which are known to be critical. + Critical, + /// None. + None, } /// The policy for auto-updating. #[derive(Debug, Eq, PartialEq, Clone)] pub struct UpdatePolicy { - /// Download potential updates. - pub enable_downloading: bool, - /// Disable client if we know we're incapable of syncing. - pub require_consensus: bool, - /// Which of those downloaded should be automatically installed. - pub filter: UpdateFilter, - /// Which track we should be following. - pub track: ReleaseTrack, - /// Path for the updates to go. - pub path: PathBuf, - /// Maximum download size. - pub max_size: usize, - /// Random update delay range in blocks. - pub max_delay: u64, - /// Number of blocks between each check for updates. - pub frequency: u64, + /// Download potential updates. + pub enable_downloading: bool, + /// Disable client if we know we're incapable of syncing. + pub require_consensus: bool, + /// Which of those downloaded should be automatically installed. + pub filter: UpdateFilter, + /// Which track we should be following. + pub track: ReleaseTrack, + /// Path for the updates to go. + pub path: PathBuf, + /// Maximum download size. + pub max_size: usize, + /// Random update delay range in blocks. + pub max_delay: u64, + /// Number of blocks between each check for updates. + pub frequency: u64, } impl Default for UpdatePolicy { - fn default() -> Self { - UpdatePolicy { - enable_downloading: false, - require_consensus: true, - filter: UpdateFilter::None, - track: ReleaseTrack::Unknown, - path: Default::default(), - max_size: 128 * 1024 * 1024, - max_delay: 100, - frequency: 20, - } - } + fn default() -> Self { + UpdatePolicy { + enable_downloading: false, + require_consensus: true, + filter: UpdateFilter::None, + track: ReleaseTrack::Unknown, + path: Default::default(), + max_size: 128 * 1024 * 1024, + max_delay: 100, + frequency: 20, + } + } } /// The current updater status #[derive(Clone, Debug, PartialEq)] enum UpdaterStatus { - /// Updater is currently disabled. - Disabled, - /// Updater is currently idle. - Idle, - /// Updater is waiting for block number to fetch a new release. - Waiting { - release: ReleaseInfo, - binary: H256, - block_number: BlockNumber, - }, - /// Updater is fetching a new release. - Fetching { - release: ReleaseInfo, - binary: H256, - retries: u32, - }, - /// Updater failed fetching a new release and it is now backing off until the next retry. - FetchBackoff { - release: ReleaseInfo, - binary: H256, - backoff: (u32, Instant), - }, - /// Updater is ready to update to a new release. - Ready { - release: ReleaseInfo, - }, - /// Updater has installed a new release and can be manually restarted. - Installed { - release: ReleaseInfo, - }, + /// Updater is currently disabled. + Disabled, + /// Updater is currently idle. + Idle, + /// Updater is waiting for block number to fetch a new release. + Waiting { + release: ReleaseInfo, + binary: H256, + block_number: BlockNumber, + }, + /// Updater is fetching a new release. + Fetching { + release: ReleaseInfo, + binary: H256, + retries: u32, + }, + /// Updater failed fetching a new release and it is now backing off until the next retry. + FetchBackoff { + release: ReleaseInfo, + binary: H256, + backoff: (u32, Instant), + }, + /// Updater is ready to update to a new release. + Ready { release: ReleaseInfo }, + /// Updater has installed a new release and can be manually restarted. + Installed { release: ReleaseInfo }, } impl Default for UpdaterStatus { - fn default() -> Self { - UpdaterStatus::Idle - } + fn default() -> Self { + UpdaterStatus::Idle + } } #[derive(Debug, Default)] struct UpdaterState { - latest: Option, - capability: CapState, - status: UpdaterStatus, + latest: Option, + capability: CapState, + status: UpdaterStatus, } /// Service for checking for updates and determining whether we can achieve consensus. -pub struct Updater { - // Useful environmental stuff. - update_policy: UpdatePolicy, - weak_self: Mutex>>, - client: Weak, - sync: Option>, - fetcher: F, - operations_client: O, - exit_handler: Mutex>>, +pub struct Updater< + O = OperationsContractClient, + F = fetch::Client, + T = StdTimeProvider, + R = ThreadRngGenRange, +> { + // Useful environmental stuff. + update_policy: UpdatePolicy, + weak_self: Mutex>>, + client: Weak, + sync: Option>, + fetcher: F, + operations_client: O, + exit_handler: Mutex>>, - time_provider: T, - rng: R, + time_provider: T, + rng: R, - // Our version info (static) - this: VersionInfo, + // Our version info (static) + this: VersionInfo, - // All the other info - this changes so leave it behind a Mutex. - state: Mutex, + // All the other info - this changes so leave it behind a Mutex. + state: Mutex, } const CLIENT_ID: &str = "parity"; lazy_static! { - static ref CLIENT_ID_HASH: H256 = CLIENT_ID.as_bytes().into(); + static ref CLIENT_ID_HASH: H256 = CLIENT_ID.as_bytes().into(); } lazy_static! { - static ref PLATFORM: String = { - if cfg!(target_os = "macos") { - "x86_64-apple-darwin".into() - } else if cfg!(windows) { - "x86_64-pc-windows-msvc".into() - } else if cfg!(target_os = "linux") { - format!("{}-unknown-linux-gnu", Target::arch()) - } else { - version::platform() - } - }; + static ref PLATFORM: String = { + if cfg!(target_os = "macos") { + "x86_64-apple-darwin".into() + } else if cfg!(windows) { + "x86_64-pc-windows-msvc".into() + } else if cfg!(target_os = "linux") { + format!("{}-unknown-linux-gnu", Target::arch()) + } else { + version::platform() + } + }; } lazy_static! { - static ref PLATFORM_ID_HASH: H256 = PLATFORM.as_bytes().into(); + static ref PLATFORM_ID_HASH: H256 = PLATFORM.as_bytes().into(); } /// Client trait for getting latest release information from operations contract. /// Useful for mocking in tests. pub trait OperationsClient: Send + Sync + 'static { - /// Get the latest release operations info for the given track. - fn latest(&self, this: &VersionInfo, track: ReleaseTrack) -> Result; + /// Get the latest release operations info for the given track. + fn latest(&self, this: &VersionInfo, track: ReleaseTrack) -> Result; - /// Fetches the block number when the given release was added, checking the interval [from; latest_block]. - fn release_block_number(&self, from: BlockNumber, release: &ReleaseInfo) -> Option; + /// Fetches the block number when the given release was added, checking the interval [from; latest_block]. + fn release_block_number(&self, from: BlockNumber, release: &ReleaseInfo) + -> Option; } /// `OperationsClient` that delegates calls to the operations contract. pub struct OperationsContractClient { - client: Weak, + client: Weak, } impl OperationsContractClient { - fn new(client: Weak) -> Self { - OperationsContractClient { - client - } - } + fn new(client: Weak) -> Self { + OperationsContractClient { client } + } - /// Get the hash of the latest release for the given track - fn latest_hash(&self, track: ReleaseTrack, do_call: &F) -> Result - where F: Fn(Vec) -> Result, String> { - let (data, decoder) = operations::functions::latest_in_track::call(*CLIENT_ID_HASH, u8::from(track)); - let value = do_call(data)?; - decoder.decode(&value).map_err(|e| e.to_string()) - } + /// Get the hash of the latest release for the given track + fn latest_hash(&self, track: ReleaseTrack, do_call: &F) -> Result + where + F: Fn(Vec) -> Result, String>, + { + let (data, decoder) = + operations::functions::latest_in_track::call(*CLIENT_ID_HASH, u8::from(track)); + let value = do_call(data)?; + decoder.decode(&value).map_err(|e| e.to_string()) + } - /// Get release info for the given release - fn release_info(&self, release_id: H256, do_call: &F) -> Result - where F: Fn(Vec) -> Result, String> { - let (data, decoder) = operations::functions::release::call(*CLIENT_ID_HASH, release_id); + /// Get release info for the given release + fn release_info(&self, release_id: H256, do_call: &F) -> Result + where + F: Fn(Vec) -> Result, String>, + { + let (data, decoder) = operations::functions::release::call(*CLIENT_ID_HASH, release_id); - let (fork, track, semver, is_critical) = decoder.decode(&do_call(data)?).map_err(|e| e.to_string())?; + let (fork, track, semver, is_critical) = + decoder.decode(&do_call(data)?).map_err(|e| e.to_string())?; - let (fork, track, semver) = (fork.low_u64(), track.low_u32(), semver.low_u32()); + let (fork, track, semver) = (fork.low_u64(), track.low_u32(), semver.low_u32()); - let (data, decoder) = operations::functions::checksum::call(*CLIENT_ID_HASH, release_id, *PLATFORM_ID_HASH); - let latest_binary = decoder.decode(&do_call(data)?).map_err(|e| e.to_string())?; + let (data, decoder) = + operations::functions::checksum::call(*CLIENT_ID_HASH, release_id, *PLATFORM_ID_HASH); + let latest_binary = decoder.decode(&do_call(data)?).map_err(|e| e.to_string())?; - Ok(ReleaseInfo { - version: VersionInfo::from_raw(semver, track as u8, release_id.into()), - is_critical, - fork, - binary: if latest_binary.is_zero() { None } else { Some(latest_binary) }, - }) - } + Ok(ReleaseInfo { + version: VersionInfo::from_raw(semver, track as u8, release_id.into()), + is_critical, + fork, + binary: if latest_binary.is_zero() { + None + } else { + Some(latest_binary) + }, + }) + } } impl OperationsClient for OperationsContractClient { - fn latest(&self, this: &VersionInfo, track: ReleaseTrack) -> Result { - if track == ReleaseTrack::Unknown { - return Err(format!("Current executable ({}) is unreleased.", this.hash)); - } + fn latest(&self, this: &VersionInfo, track: ReleaseTrack) -> Result { + if track == ReleaseTrack::Unknown { + return Err(format!("Current executable ({}) is unreleased.", this.hash)); + } - let client = self.client.upgrade().ok_or_else(|| "Cannot obtain client")?; - let address = client.registry_address("operations".into(), BlockId::Latest).ok_or_else(|| "Cannot get operations contract address")?; - let do_call = |data| client.call_contract(BlockId::Latest, address, data).map_err(|e| format!("{:?}", e)); + let client = self + .client + .upgrade() + .ok_or_else(|| "Cannot obtain client")?; + let address = client + .registry_address("operations".into(), BlockId::Latest) + .ok_or_else(|| "Cannot get operations contract address")?; + let do_call = |data| { + client + .call_contract(BlockId::Latest, address, data) + .map_err(|e| format!("{:?}", e)) + }; - trace!(target: "updater", "Looking up this_fork for our release: {}/{:?}", CLIENT_ID, this.hash); + trace!(target: "updater", "Looking up this_fork for our release: {}/{:?}", CLIENT_ID, this.hash); - // get the fork number of this release - let (data, decoder) = operations::functions::release::call(*CLIENT_ID_HASH, this.hash); - let this_fork = do_call(data) - .and_then(|value| decoder.decode(&value).map_err(|e| e.to_string())) - .ok() - .and_then(|(fork, track, _, _)| { - let this_track: ReleaseTrack = (track.low_u64() as u8).into(); - match this_track { - ReleaseTrack::Unknown => None, - _ => Some(fork.low_u64()), - } - }); + // get the fork number of this release + let (data, decoder) = operations::functions::release::call(*CLIENT_ID_HASH, this.hash); + let this_fork = do_call(data) + .and_then(|value| decoder.decode(&value).map_err(|e| e.to_string())) + .ok() + .and_then(|(fork, track, _, _)| { + let this_track: ReleaseTrack = (track.low_u64() as u8).into(); + match this_track { + ReleaseTrack::Unknown => None, + _ => Some(fork.low_u64()), + } + }); - // get the hash of the latest release in our track - let latest_in_track = self.latest_hash(track, &do_call)?; + // get the hash of the latest release in our track + let latest_in_track = self.latest_hash(track, &do_call)?; - // get the release info for the latest version in track - let in_track = self.release_info(latest_in_track, &do_call)?; - let mut in_minor = Some(in_track.clone()); - const PROOF: &str = "in_minor initialized and assigned with Some; loop breaks if None assigned; qed"; + // get the release info for the latest version in track + let in_track = self.release_info(latest_in_track, &do_call)?; + let mut in_minor = Some(in_track.clone()); + const PROOF: &str = + "in_minor initialized and assigned with Some; loop breaks if None assigned; qed"; - // if the minor version has changed, let's check the minor version on a different track - while in_minor.as_ref().expect(PROOF).version.version.minor != this.version.minor { - let track = match in_minor.as_ref().expect(PROOF).version.track { - ReleaseTrack::Beta => ReleaseTrack::Stable, - ReleaseTrack::Nightly => ReleaseTrack::Beta, - _ => { in_minor = None; break; } - }; + // if the minor version has changed, let's check the minor version on a different track + while in_minor.as_ref().expect(PROOF).version.version.minor != this.version.minor { + let track = match in_minor.as_ref().expect(PROOF).version.track { + ReleaseTrack::Beta => ReleaseTrack::Stable, + ReleaseTrack::Nightly => ReleaseTrack::Beta, + _ => { + in_minor = None; + break; + } + }; - let latest_in_track = self.latest_hash(track, &do_call)?; - in_minor = Some(self.release_info(latest_in_track, &do_call)?); - } + let latest_in_track = self.latest_hash(track, &do_call)?; + in_minor = Some(self.release_info(latest_in_track, &do_call)?); + } - let (data, decoder) = operations::functions::latest_fork::call(); - let fork = do_call(data) - .and_then(|value| decoder.decode(&value).map_err(|e| e.to_string()))? - .low_u64(); + let (data, decoder) = operations::functions::latest_fork::call(); + let fork = do_call(data) + .and_then(|value| decoder.decode(&value).map_err(|e| e.to_string()))? + .low_u64(); - Ok(OperationsInfo { - fork, - this_fork, - track: in_track, - minor: in_minor, - }) - } + Ok(OperationsInfo { + fork, + this_fork, + track: in_track, + minor: in_minor, + }) + } - fn release_block_number(&self, from: BlockNumber, release: &ReleaseInfo) -> Option { - let client = self.client.upgrade()?; - let address = client.registry_address("operations".into(), BlockId::Latest)?; + fn release_block_number( + &self, + from: BlockNumber, + release: &ReleaseInfo, + ) -> Option { + let client = self.client.upgrade()?; + let address = client.registry_address("operations".into(), BlockId::Latest)?; - let topics = operations::events::release_added::filter(Some(*CLIENT_ID_HASH), Some(release.fork.into()), Some(release.is_critical)); - let topics = vec![topics.topic0, topics.topic1, topics.topic2, topics.topic3]; - let topics = topics.into_iter().map(Into::into).map(Some).collect(); + let topics = operations::events::release_added::filter( + Some(*CLIENT_ID_HASH), + Some(release.fork.into()), + Some(release.is_critical), + ); + let topics = vec![topics.topic0, topics.topic1, topics.topic2, topics.topic3]; + let topics = topics.into_iter().map(Into::into).map(Some).collect(); - let filter = Filter { - from_block: BlockId::Number(from), - to_block: BlockId::Latest, - address: Some(vec![address]), - topics, - limit: None, - }; + let filter = Filter { + from_block: BlockId::Number(from), + to_block: BlockId::Latest, + address: Some(vec![address]), + topics, + limit: None, + }; - client.logs(filter) - .unwrap_or_default() - .iter() - .filter_map(|log| { - let event = operations::events::release_added::parse_log((log.topics.clone(), log.data.clone()).into()).ok()?; - let version_info = VersionInfo::from_raw(event.semver.low_u32(), event.track.low_u32() as u8, event.release.into()); - if version_info == release.version { - Some(log.block_number) - } else { - None - } - }) - .last() - } + client + .logs(filter) + .unwrap_or_default() + .iter() + .filter_map(|log| { + let event = operations::events::release_added::parse_log( + (log.topics.clone(), log.data.clone()).into(), + ) + .ok()?; + let version_info = VersionInfo::from_raw( + event.semver.low_u32(), + event.track.low_u32() as u8, + event.release.into(), + ); + if version_info == release.version { + Some(log.block_number) + } else { + None + } + }) + .last() + } } /// Trait to provide current time. Useful for mocking in tests. pub trait TimeProvider: Send + Sync + 'static { - /// Returns an instant corresponding to "now". - fn now(&self) -> Instant; + /// Returns an instant corresponding to "now". + fn now(&self) -> Instant; } /// `TimeProvider` implementation that delegates calls to `std::time`. pub struct StdTimeProvider; impl TimeProvider for StdTimeProvider { - fn now(&self) -> Instant { - Instant::now() - } + fn now(&self) -> Instant { + Instant::now() + } } /// Trait to generate a random number within a given range. /// Useful for mocking in tests. pub trait GenRange: Send + Sync + 'static { - /// Generate a random value in the range [low, high), i.e. inclusive of low and exclusive of high. - fn gen_range(&self, low: u64, high: u64) -> u64; + /// Generate a random value in the range [low, high), i.e. inclusive of low and exclusive of high. + fn gen_range(&self, low: u64, high: u64) -> u64; } /// `GenRange` implementation that uses a `rand::thread_rng` for randomness. pub struct ThreadRngGenRange; impl GenRange for ThreadRngGenRange { - fn gen_range(&self, low: u64, high: u64) -> u64 { - rand::thread_rng().gen_range(low, high) - } + fn gen_range(&self, low: u64, high: u64) -> u64 { + rand::thread_rng().gen_range(low, high) + } } impl Updater { - /// `Updater` constructor - pub fn new( - client: &Weak, - sync: &Weak, - update_policy: UpdatePolicy, - fetcher: fetch::Client, - ) -> Arc { - let r = Arc::new(Updater { - update_policy, - weak_self: Mutex::new(Default::default()), - client: client.clone(), - sync: Some(sync.clone()), - fetcher, - operations_client: OperationsContractClient::new( - client.clone()), - exit_handler: Mutex::new(None), - this: if cfg!(feature = "test-updater") { - VersionInfo { - track: ReleaseTrack::Stable, - version: Version::new(1, 3, 7), - hash: 0.into(), - } - } else { - VersionInfo::this() - }, - time_provider: StdTimeProvider, - rng: ThreadRngGenRange, - state: Mutex::new(Default::default()), - }); - *r.weak_self.lock() = Arc::downgrade(&r); + /// `Updater` constructor + pub fn new( + client: &Weak, + sync: &Weak, + update_policy: UpdatePolicy, + fetcher: fetch::Client, + ) -> Arc { + let r = Arc::new(Updater { + update_policy, + weak_self: Mutex::new(Default::default()), + client: client.clone(), + sync: Some(sync.clone()), + fetcher, + operations_client: OperationsContractClient::new(client.clone()), + exit_handler: Mutex::new(None), + this: if cfg!(feature = "test-updater") { + VersionInfo { + track: ReleaseTrack::Stable, + version: Version::new(1, 3, 7), + hash: 0.into(), + } + } else { + VersionInfo::this() + }, + time_provider: StdTimeProvider, + rng: ThreadRngGenRange, + state: Mutex::new(Default::default()), + }); + *r.weak_self.lock() = Arc::downgrade(&r); - r.poll(); - r - } + r.poll(); + r + } - fn update_file_name(v: &VersionInfo) -> String { - format!("parity-{}.{}.{}-{:x}", v.version.major, v.version.minor, v.version.patch, v.hash) - } + fn update_file_name(v: &VersionInfo) -> String { + format!( + "parity-{}.{}.{}-{:x}", + v.version.major, v.version.minor, v.version.patch, v.hash + ) + } } impl Updater { - /// Set a closure to call when we want to restart the client - pub fn set_exit_handler(&self, g: G) where G: Fn() + 'static + Send { - *self.exit_handler.lock() = Some(Box::new(g)); - } + /// Set a closure to call when we want to restart the client + pub fn set_exit_handler(&self, g: G) + where + G: Fn() + 'static + Send, + { + *self.exit_handler.lock() = Some(Box::new(g)); + } - /// Returns release track of the parity node. - /// `update_policy.track` is the track specified from the command line, whereas `this.track` - /// is the track of the software which is currently run - fn track(&self) -> ReleaseTrack { - match self.update_policy.track { - ReleaseTrack::Unknown => self.this.track, - x => x, - } - } + /// Returns release track of the parity node. + /// `update_policy.track` is the track specified from the command line, whereas `this.track` + /// is the track of the software which is currently run + fn track(&self) -> ReleaseTrack { + match self.update_policy.track { + ReleaseTrack::Unknown => self.this.track, + x => x, + } + } - fn updates_path(&self, name: &str) -> PathBuf { - self.update_policy.path.join(name) - } + fn updates_path(&self, name: &str) -> PathBuf { + self.update_policy.path.join(name) + } - fn on_fetch(&self, latest: &OperationsInfo, res: Result) { - let mut state = self.state.lock(); + fn on_fetch(&self, latest: &OperationsInfo, res: Result) { + let mut state = self.state.lock(); - // Bail out if the latest release has changed in the meantime - if state.latest.as_ref() != Some(&latest) { - return; - } + // Bail out if the latest release has changed in the meantime + if state.latest.as_ref() != Some(&latest) { + return; + } - // The updated status should be set to fetching - if let UpdaterStatus::Fetching { ref release, binary, retries } = state.status.clone() { - match res { - // We've successfully fetched the binary - Ok(path) => { - let setup = |path: &Path| -> Result<(), String> { - let dest = self.updates_path(&Updater::update_file_name(&release.version)); - if !dest.exists() { - info!(target: "updater", "Fetched latest version ({}) OK to {}", release.version, path.display()); - fs::create_dir_all(dest.parent().expect("at least one thing pushed; qed")).map_err(|e| format!("Unable to create updates path: {:?}", e))?; - fs::copy(path, &dest).map_err(|e| format!("Unable to copy update: {:?}", e))?; - restrict_permissions_owner(&dest, false, true).map_err(|e| format!("Unable to update permissions: {}", e))?; - info!(target: "updater", "Copied updated binary to {}", dest.display()); - } + // The updated status should be set to fetching + if let UpdaterStatus::Fetching { + ref release, + binary, + retries, + } = state.status.clone() + { + match res { + // We've successfully fetched the binary + Ok(path) => { + let setup = |path: &Path| -> Result<(), String> { + let dest = self.updates_path(&Updater::update_file_name(&release.version)); + if !dest.exists() { + info!(target: "updater", "Fetched latest version ({}) OK to {}", release.version, path.display()); + fs::create_dir_all( + dest.parent().expect("at least one thing pushed; qed"), + ) + .map_err(|e| format!("Unable to create updates path: {:?}", e))?; + fs::copy(path, &dest) + .map_err(|e| format!("Unable to copy update: {:?}", e))?; + restrict_permissions_owner(&dest, false, true) + .map_err(|e| format!("Unable to update permissions: {}", e))?; + info!(target: "updater", "Copied updated binary to {}", dest.display()); + } - Ok(()) - }; + Ok(()) + }; - // There was a fatal error setting up the update, disable the updater - if let Err(err) = setup(&path) { - state.status = UpdaterStatus::Disabled; - warn!("{}", err); - } else { - state.status = UpdaterStatus::Ready { release: release.clone() }; - self.updater_step(state); - } - }, - // There was an error fetching the update, apply a backoff delay before retrying - Err(err) => { - let delay = 2_usize.pow(retries) as u64; - // cap maximum backoff to 1 day - let delay = cmp::min(delay, 24 * 60 * 60); - let backoff = (retries, self.time_provider.now() + Duration::from_secs(delay)); + // There was a fatal error setting up the update, disable the updater + if let Err(err) = setup(&path) { + state.status = UpdaterStatus::Disabled; + warn!("{}", err); + } else { + state.status = UpdaterStatus::Ready { + release: release.clone(), + }; + self.updater_step(state); + } + } + // There was an error fetching the update, apply a backoff delay before retrying + Err(err) => { + let delay = 2_usize.pow(retries) as u64; + // cap maximum backoff to 1 day + let delay = cmp::min(delay, 24 * 60 * 60); + let backoff = ( + retries, + self.time_provider.now() + Duration::from_secs(delay), + ); - state.status = UpdaterStatus::FetchBackoff { release: release.clone(), backoff, binary }; + state.status = UpdaterStatus::FetchBackoff { + release: release.clone(), + backoff, + binary, + }; - warn!("Unable to fetch update ({}): {:?}, retrying in {} seconds.", release.version, err, delay); - }, - } - } - } + warn!( + "Unable to fetch update ({}): {:?}, retrying in {} seconds.", + release.version, err, delay + ); + } + } + } + } - fn execute_upgrade(&self, mut state: MutexGuard) -> bool { - if let UpdaterStatus::Ready { ref release } = state.status.clone() { - let file = Updater::update_file_name(&release.version); - let path = self.updates_path("latest"); + fn execute_upgrade(&self, mut state: MutexGuard) -> bool { + if let UpdaterStatus::Ready { ref release } = state.status.clone() { + let file = Updater::update_file_name(&release.version); + let path = self.updates_path("latest"); - // TODO: creating then writing is a bit fragile. would be nice to make it atomic. - if let Err(err) = fs::File::create(&path).and_then(|mut f| f.write_all(file.as_bytes())) { - state.status = UpdaterStatus::Disabled; + // TODO: creating then writing is a bit fragile. would be nice to make it atomic. + if let Err(err) = fs::File::create(&path).and_then(|mut f| f.write_all(file.as_bytes())) + { + state.status = UpdaterStatus::Disabled; - warn!(target: "updater", "Unable to create soft-link for update {:?}", err); - return false; - } + warn!(target: "updater", "Unable to create soft-link for update {:?}", err); + return false; + } - info!(target: "updater", "Completed upgrade to {}", &release.version); - state.status = UpdaterStatus::Installed { release: release.clone() }; + info!(target: "updater", "Completed upgrade to {}", &release.version); + state.status = UpdaterStatus::Installed { + release: release.clone(), + }; - match *self.exit_handler.lock() { - Some(ref h) => (*h)(), - None => info!(target: "updater", "Update installed, ready for restart."), - } + match *self.exit_handler.lock() { + Some(ref h) => (*h)(), + None => info!(target: "updater", "Update installed, ready for restart."), + } - return true; - }; + return true; + }; - warn!(target: "updater", "Execute upgrade called when no upgrade ready."); - false - } + warn!(target: "updater", "Execute upgrade called when no upgrade ready."); + false + } - fn updater_step(&self, mut state: MutexGuard) { - let current_block_number = self.client.upgrade().map_or(0, |c| c.block_number(BlockId::Latest).unwrap_or(0)); + fn updater_step(&self, mut state: MutexGuard) { + let current_block_number = self + .client + .upgrade() + .map_or(0, |c| c.block_number(BlockId::Latest).unwrap_or(0)); - if let Some(latest) = state.latest.clone() { - let fetch = |latest, binary| { - info!(target: "updater", "Attempting to get parity binary {}", binary); - let weak_self = self.weak_self.lock().clone(); - let f = move |res: Result| { - if let Some(this) = weak_self.upgrade() { - this.on_fetch(&latest, res) - } - }; + if let Some(latest) = state.latest.clone() { + let fetch = |latest, binary| { + info!(target: "updater", "Attempting to get parity binary {}", binary); + let weak_self = self.weak_self.lock().clone(); + let f = move |res: Result| { + if let Some(this) = weak_self.upgrade() { + this.on_fetch(&latest, res) + } + }; - self.fetcher.fetch( - binary, - fetch::Abort::default().with_max_size(self.update_policy.max_size), - Box::new(f)); - }; + self.fetcher.fetch( + binary, + fetch::Abort::default().with_max_size(self.update_policy.max_size), + Box::new(f), + ); + }; - match state.status.clone() { - // updater is disabled - UpdaterStatus::Disabled => {}, - // the update has already been installed - UpdaterStatus::Installed { ref release, .. } if *release == latest.track => {}, - // we're currently fetching this update - UpdaterStatus::Fetching { ref release, .. } if *release == latest.track => {}, - // the fetch has failed and we're backing off the next retry - UpdaterStatus::FetchBackoff { ref release, backoff, .. } if *release == latest.track && self.time_provider.now() < backoff.1 => {}, - // we're delaying the update until the given block number - UpdaterStatus::Waiting { ref release, block_number, .. } if *release == latest.track && current_block_number < block_number => {}, - // we're at (or past) the block that triggers the update, let's fetch the binary - UpdaterStatus::Waiting { ref release, block_number, binary } if *release == latest.track && current_block_number >= block_number => { - info!(target: "updater", "Update for binary {} triggered", binary); + match state.status.clone() { + // updater is disabled + UpdaterStatus::Disabled => {} + // the update has already been installed + UpdaterStatus::Installed { ref release, .. } if *release == latest.track => {} + // we're currently fetching this update + UpdaterStatus::Fetching { ref release, .. } if *release == latest.track => {} + // the fetch has failed and we're backing off the next retry + UpdaterStatus::FetchBackoff { + ref release, + backoff, + .. + } if *release == latest.track && self.time_provider.now() < backoff.1 => {} + // we're delaying the update until the given block number + UpdaterStatus::Waiting { + ref release, + block_number, + .. + } if *release == latest.track && current_block_number < block_number => {} + // we're at (or past) the block that triggers the update, let's fetch the binary + UpdaterStatus::Waiting { + ref release, + block_number, + binary, + } if *release == latest.track && current_block_number >= block_number => { + info!(target: "updater", "Update for binary {} triggered", binary); - state.status = UpdaterStatus::Fetching { release: release.clone(), binary, retries: 1 }; - fetch(latest, binary); - }, - // we're ready to retry the fetch after we applied a backoff for the previous failure - UpdaterStatus::FetchBackoff { ref release, backoff, binary } if *release == latest.track && self.time_provider.now() >= backoff.1 => { - state.status = UpdaterStatus::Fetching { release: release.clone(), binary, retries: backoff.0 + 1 }; - fetch(latest, binary); - }, - // the update is ready to be installed - UpdaterStatus::Ready { ref release } if *release == latest.track => { - let auto = match self.update_policy.filter { + state.status = UpdaterStatus::Fetching { + release: release.clone(), + binary, + retries: 1, + }; + fetch(latest, binary); + } + // we're ready to retry the fetch after we applied a backoff for the previous failure + UpdaterStatus::FetchBackoff { + ref release, + backoff, + binary, + } if *release == latest.track && self.time_provider.now() >= backoff.1 => { + state.status = UpdaterStatus::Fetching { + release: release.clone(), + binary, + retries: backoff.0 + 1, + }; + fetch(latest, binary); + } + // the update is ready to be installed + UpdaterStatus::Ready { ref release } if *release == latest.track => { + let auto = match self.update_policy.filter { UpdateFilter::All => true, UpdateFilter::Critical if release.is_critical /* TODO: or is on a bad fork */ => true, _ => false, }; - if auto { - self.execute_upgrade(state); - } - }, - // this is the default case that does the initial triggering to update. we can reach this case by being - // `Idle` but also if the latest release is updated, regardless of the state we're in (except if the - // updater is in the `Disabled` state). if we push a bad update (e.g. wrong hashes or download url) - // clients might eventually be on a really long backoff state for that release, but as soon a new - // release is pushed we'll fall through to the default case. - _ => { - if let Some(binary) = latest.track.binary { - let running_later = latest.track.version.version < self.version_info().version; - let running_latest = latest.track.version.hash == self.version_info().hash; + if auto { + self.execute_upgrade(state); + } + } + // this is the default case that does the initial triggering to update. we can reach this case by being + // `Idle` but also if the latest release is updated, regardless of the state we're in (except if the + // updater is in the `Disabled` state). if we push a bad update (e.g. wrong hashes or download url) + // clients might eventually be on a really long backoff state for that release, but as soon a new + // release is pushed we'll fall through to the default case. + _ => { + if let Some(binary) = latest.track.binary { + let running_later = + latest.track.version.version < self.version_info().version; + let running_latest = latest.track.version.hash == self.version_info().hash; - // Bail out if we're already running the latest version or a later one - if running_later || running_latest { - return; - } + // Bail out if we're already running the latest version or a later one + if running_later || running_latest { + return; + } - let path = self.updates_path(&Updater::update_file_name(&latest.track.version)); - if path.exists() { - info!(target: "updater", "Already fetched binary."); - state.status = UpdaterStatus::Ready { release: latest.track.clone() }; - self.updater_step(state); + let path = + self.updates_path(&Updater::update_file_name(&latest.track.version)); + if path.exists() { + info!(target: "updater", "Already fetched binary."); + state.status = UpdaterStatus::Ready { + release: latest.track.clone(), + }; + self.updater_step(state); + } else if self.update_policy.enable_downloading { + let update_block_number = { + let max_delay = if latest.fork >= current_block_number { + cmp::min( + latest.fork - current_block_number, + self.update_policy.max_delay, + ) + } else { + self.update_policy.max_delay + }; - } else if self.update_policy.enable_downloading { - let update_block_number = { - let max_delay = if latest.fork >= current_block_number { - cmp::min(latest.fork - current_block_number, self.update_policy.max_delay) - } else { - self.update_policy.max_delay - }; + let from = current_block_number.saturating_sub(max_delay); + match self + .operations_client + .release_block_number(from, &latest.track) + { + Some(block_number) => { + let delay = self.rng.gen_range(0, max_delay); + block_number.saturating_add(delay) + } + None => current_block_number, + } + }; - let from = current_block_number.saturating_sub(max_delay); - match self.operations_client.release_block_number(from, &latest.track) { - Some(block_number) => { - let delay = self.rng.gen_range(0, max_delay); - block_number.saturating_add(delay) - }, - None => current_block_number, - } - }; + state.status = UpdaterStatus::Waiting { + release: latest.track.clone(), + binary, + block_number: update_block_number, + }; - state.status = UpdaterStatus::Waiting { release: latest.track.clone(), binary, block_number: update_block_number }; + if update_block_number > current_block_number { + info!(target: "updater", "Update for binary {} will be triggered at block {}", binary, update_block_number); + } else { + self.updater_step(state); + } + } + } + } + } + } + } - if update_block_number > current_block_number { - info!(target: "updater", "Update for binary {} will be triggered at block {}", binary, update_block_number); - } else { - self.updater_step(state); - } - } - } - }, - } - } - } + fn poll(&self) { + trace!(target: "updater", "Current release is {} ({:?})", self.this, self.this.hash); - fn poll(&self) { - trace!(target: "updater", "Current release is {} ({:?})", self.this, self.this.hash); + // We rely on a secure state. Bail if we're unsure about it. + if !cfg!(feature = "test-updater") { + if self + .client + .upgrade() + .map_or(true, |c| !c.chain_info().security_level().is_full()) + { + return; + } + } - // We rely on a secure state. Bail if we're unsure about it. - if !cfg!(feature = "test-updater") { - if self.client.upgrade().map_or(true, |c| !c.chain_info().security_level().is_full()) { - return; - } - } + // Only check for updates every n blocks + let current_block_number = self + .client + .upgrade() + .map_or(0, |c| c.block_number(BlockId::Latest).unwrap_or(0)); - // Only check for updates every n blocks - let current_block_number = self.client.upgrade().map_or(0, |c| c.block_number(BlockId::Latest).unwrap_or(0)); + if !cfg!(feature = "test-updater") { + if current_block_number % cmp::max(self.update_policy.frequency, 1) != 0 { + return; + } + } - if !cfg!(feature = "test-updater") { - if current_block_number % cmp::max(self.update_policy.frequency, 1) != 0 { - return; - } - } + let mut state = self.state.lock(); - let mut state = self.state.lock(); + // Get the latest available release + let latest = self.operations_client.latest(&self.this, self.track()).ok(); - // Get the latest available release - let latest = self.operations_client.latest(&self.this, self.track()).ok(); + if let Some(latest) = latest { + // Update current capability + state.capability = match latest.this_fork { + // We're behind the latest fork. Now is the time to be upgrading, perhaps we're too late... + Some(this_fork) if this_fork < latest.fork => { + if current_block_number >= latest.fork - 1 { + // We're at (or past) the last block we can import. Disable the client. + if self.update_policy.require_consensus { + if let Some(c) = self.client.upgrade() { + c.disable(); + } + } - if let Some(latest) = latest { - // Update current capability - state.capability = match latest.this_fork { - // We're behind the latest fork. Now is the time to be upgrading, perhaps we're too late... - Some(this_fork) if this_fork < latest.fork => { - if current_block_number >= latest.fork - 1 { - // We're at (or past) the last block we can import. Disable the client. - if self.update_policy.require_consensus { - if let Some(c) = self.client.upgrade() { - c.disable(); - } - } + CapState::IncapableSince(latest.fork) + } else { + CapState::CapableUntil(latest.fork) + } + } + Some(_) => CapState::Capable, + None => CapState::Unknown, + }; - CapState::IncapableSince(latest.fork) - } else { - CapState::CapableUntil(latest.fork) - } - }, - Some(_) => CapState::Capable, - None => CapState::Unknown, - }; - - // There's a new release available - if state.latest.as_ref() != Some(&latest) { - trace!(target: "updater", "Latest release in our track is v{} it is {}critical ({} binary is {})", + // There's a new release available + if state.latest.as_ref() != Some(&latest) { + trace!(target: "updater", "Latest release in our track is v{} it is {}critical ({} binary is {})", latest.track.version, if latest.track.is_critical {""} else {"non-"}, *PLATFORM, latest.track.binary.map_or_else(|| "unreleased".into(), |b| format!("{}", b))); - trace!(target: "updater", "Fork: this/current/latest/latest-known: {}/#{}/#{}/#{}", + trace!(target: "updater", "Fork: this/current/latest/latest-known: {}/#{}/#{}/#{}", latest.this_fork.map_or_else(|| "unknown".into(), |f| format!("#{}", f)), current_block_number, latest.track.fork, latest.fork); - // Update latest release - state.latest = Some(latest); - } - } + // Update latest release + state.latest = Some(latest); + } + } - self.updater_step(state); - } + self.updater_step(state); + } } impl ChainNotify for Updater { - fn new_blocks(&self, new_blocks: NewBlocks) { - if new_blocks.has_more_blocks_to_import { return } - match (self.client.upgrade(), self.sync.as_ref().and_then(Weak::upgrade)) { - (Some(ref c), Some(ref s)) if !s.status().is_syncing(c.queue_info()) => self.poll(), - _ => {}, - } - } + fn new_blocks(&self, new_blocks: NewBlocks) { + if new_blocks.has_more_blocks_to_import { + return; + } + match ( + self.client.upgrade(), + self.sync.as_ref().and_then(Weak::upgrade), + ) { + (Some(ref c), Some(ref s)) if !s.status().is_syncing(c.queue_info()) => self.poll(), + _ => {} + } + } } -impl Service for Updater { - fn capability(&self) -> CapState { - self.state.lock().capability - } +impl Service + for Updater +{ + fn capability(&self) -> CapState { + self.state.lock().capability + } - fn upgrade_ready(&self) -> Option { - match self.state.lock().status { - UpdaterStatus::Ready { ref release, .. } => Some(release.clone()), - _ => None, - } - } + fn upgrade_ready(&self) -> Option { + match self.state.lock().status { + UpdaterStatus::Ready { ref release, .. } => Some(release.clone()), + _ => None, + } + } - fn execute_upgrade(&self) -> bool { - let state = self.state.lock(); - self.execute_upgrade(state) - } + fn execute_upgrade(&self) -> bool { + let state = self.state.lock(); + self.execute_upgrade(state) + } - fn version_info(&self) -> VersionInfo { - self.this.clone() - } + fn version_info(&self) -> VersionInfo { + self.this.clone() + } - fn info(&self) -> Option { - self.state.lock().latest.clone() - } + fn info(&self) -> Option { + self.state.lock().latest.clone() + } } #[cfg(test)] pub mod tests { - use std::fs::File; - use std::io::Read; - use std::sync::Arc; - use semver::Version; - use tempdir::TempDir; - use ethcore::client::{TestBlockChainClient, EachBlockWith}; - use self::fetch::Error; - use super::*; + use self::fetch::Error; + use super::*; + use ethcore::client::{EachBlockWith, TestBlockChainClient}; + use semver::Version; + use std::{fs::File, io::Read, sync::Arc}; + use tempdir::TempDir; - #[derive(Clone)] - struct FakeOperationsClient { - result: Arc, Option)>>, - } + #[derive(Clone)] + struct FakeOperationsClient { + result: Arc, Option)>>, + } - impl FakeOperationsClient { - fn new() -> FakeOperationsClient { - FakeOperationsClient { result: Arc::new(Mutex::new((None, None))) } - } + impl FakeOperationsClient { + fn new() -> FakeOperationsClient { + FakeOperationsClient { + result: Arc::new(Mutex::new((None, None))), + } + } - fn set_result(&self, operations_info: Option, release_block_number: Option) { - let mut result = self.result.lock(); - result.0 = operations_info; - result.1 = release_block_number; - } - } + fn set_result( + &self, + operations_info: Option, + release_block_number: Option, + ) { + let mut result = self.result.lock(); + result.0 = operations_info; + result.1 = release_block_number; + } + } - impl OperationsClient for FakeOperationsClient { - fn latest(&self, _this: &VersionInfo, _track: ReleaseTrack) -> Result { - self.result.lock().0.clone().ok_or("unavailable".into()) - } + impl OperationsClient for FakeOperationsClient { + fn latest( + &self, + _this: &VersionInfo, + _track: ReleaseTrack, + ) -> Result { + self.result.lock().0.clone().ok_or("unavailable".into()) + } - fn release_block_number(&self, _from: BlockNumber, _release: &ReleaseInfo) -> Option { - self.result.lock().1.clone() - } - } + fn release_block_number( + &self, + _from: BlockNumber, + _release: &ReleaseInfo, + ) -> Option { + self.result.lock().1.clone() + } + } - #[derive(Clone)] - struct FakeFetch { - on_done: Arc) + Send>>>>, - } + #[derive(Clone)] + struct FakeFetch { + on_done: Arc) + Send>>>>, + } - impl FakeFetch { - fn new() -> FakeFetch { - FakeFetch { on_done: Arc::new(Mutex::new(None)) } - } + impl FakeFetch { + fn new() -> FakeFetch { + FakeFetch { + on_done: Arc::new(Mutex::new(None)), + } + } - fn trigger(&self, result: Option) { - if let Some(ref on_done) = *self.on_done.lock() { - on_done(result.ok_or(Error::NoResolution)) - } - } - } + fn trigger(&self, result: Option) { + if let Some(ref on_done) = *self.on_done.lock() { + on_done(result.ok_or(Error::NoResolution)) + } + } + } - impl HashFetch for FakeFetch { - fn fetch(&self, _hash: H256, _abort: fetch::Abort, on_done: Box) + Send>) { - *self.on_done.lock() = Some(on_done); - } - } + impl HashFetch for FakeFetch { + fn fetch( + &self, + _hash: H256, + _abort: fetch::Abort, + on_done: Box) + Send>, + ) { + *self.on_done.lock() = Some(on_done); + } + } - #[derive(Clone)] - struct FakeTimeProvider { - result: Arc>, - } + #[derive(Clone)] + struct FakeTimeProvider { + result: Arc>, + } - impl FakeTimeProvider { - fn new() -> FakeTimeProvider { - FakeTimeProvider { result: Arc::new(Mutex::new(Instant::now())) } - } + impl FakeTimeProvider { + fn new() -> FakeTimeProvider { + FakeTimeProvider { + result: Arc::new(Mutex::new(Instant::now())), + } + } - fn set_result(&self, result: Instant) { - *self.result.lock() = result; - } - } + fn set_result(&self, result: Instant) { + *self.result.lock() = result; + } + } - impl TimeProvider for FakeTimeProvider { - fn now(&self) -> Instant { - *self.result.lock() - } - } + impl TimeProvider for FakeTimeProvider { + fn now(&self) -> Instant { + *self.result.lock() + } + } - #[derive(Clone)] - struct FakeGenRange { - result: Arc>, - } + #[derive(Clone)] + struct FakeGenRange { + result: Arc>, + } - impl FakeGenRange { - fn new() -> FakeGenRange { - FakeGenRange { result: Arc::new(Mutex::new(0)) } - } + impl FakeGenRange { + fn new() -> FakeGenRange { + FakeGenRange { + result: Arc::new(Mutex::new(0)), + } + } - fn set_result(&self, result: u64) { - *self.result.lock() = result; - } - } + fn set_result(&self, result: u64) { + *self.result.lock() = result; + } + } - impl GenRange for FakeGenRange { - fn gen_range(&self, _low: u64, _high: u64) -> u64 { - *self.result.lock() - } - } + impl GenRange for FakeGenRange { + fn gen_range(&self, _low: u64, _high: u64) -> u64 { + *self.result.lock() + } + } - type TestUpdater = Updater; + type TestUpdater = Updater; - fn setup(update_policy: UpdatePolicy) -> ( - Arc, - Arc, - FakeOperationsClient, - FakeFetch, - FakeTimeProvider, - FakeGenRange) { + fn setup( + update_policy: UpdatePolicy, + ) -> ( + Arc, + Arc, + FakeOperationsClient, + FakeFetch, + FakeTimeProvider, + FakeGenRange, + ) { + let client = Arc::new(TestBlockChainClient::new()); + let weak_client = Arc::downgrade(&client); - let client = Arc::new(TestBlockChainClient::new()); - let weak_client = Arc::downgrade(&client); + let operations_client = FakeOperationsClient::new(); + let fetcher = FakeFetch::new(); + let time_provider = FakeTimeProvider::new(); + let rng = FakeGenRange::new(); - let operations_client = FakeOperationsClient::new(); - let fetcher = FakeFetch::new(); - let time_provider = FakeTimeProvider::new(); - let rng = FakeGenRange::new(); + let this = VersionInfo { + track: ReleaseTrack::Beta, + version: Version::parse("1.0.0").unwrap(), + hash: 0.into(), + }; - let this = VersionInfo { - track: ReleaseTrack::Beta, - version: Version::parse("1.0.0").unwrap(), - hash: 0.into(), - }; + let updater = Arc::new(Updater { + update_policy: update_policy, + weak_self: Mutex::new(Default::default()), + client: weak_client, + sync: None, + fetcher: fetcher.clone(), + operations_client: operations_client.clone(), + exit_handler: Mutex::new(None), + this: this, + time_provider: time_provider.clone(), + rng: rng.clone(), + state: Mutex::new(Default::default()), + }); - let updater = Arc::new(Updater { - update_policy: update_policy, - weak_self: Mutex::new(Default::default()), - client: weak_client, - sync: None, - fetcher: fetcher.clone(), - operations_client: operations_client.clone(), - exit_handler: Mutex::new(None), - this: this, - time_provider: time_provider.clone(), - rng: rng.clone(), - state: Mutex::new(Default::default()), - }); + *updater.weak_self.lock() = Arc::downgrade(&updater); - *updater.weak_self.lock() = Arc::downgrade(&updater); + ( + client, + updater, + operations_client, + fetcher, + time_provider, + rng, + ) + } - (client, updater, operations_client, fetcher, time_provider, rng) - } + fn update_policy() -> (UpdatePolicy, TempDir) { + let tempdir = TempDir::new("").unwrap(); - fn update_policy() -> (UpdatePolicy, TempDir) { - let tempdir = TempDir::new("").unwrap(); + let update_policy = UpdatePolicy { + path: tempdir.path().into(), + enable_downloading: true, + max_delay: 10, + frequency: 1, + ..Default::default() + }; - let update_policy = UpdatePolicy { - path: tempdir.path().into(), - enable_downloading: true, - max_delay: 10, - frequency: 1, - ..Default::default() - }; + (update_policy, tempdir) + } - (update_policy, tempdir) - } + fn new_upgrade(version: &str) -> (VersionInfo, ReleaseInfo, OperationsInfo) { + let latest_version = VersionInfo { + track: ReleaseTrack::Beta, + version: Version::parse(version).unwrap(), + hash: 1.into(), + }; - fn new_upgrade(version: &str) -> (VersionInfo, ReleaseInfo, OperationsInfo) { - let latest_version = VersionInfo { - track: ReleaseTrack::Beta, - version: Version::parse(version).unwrap(), - hash: 1.into(), - }; + let latest_release = ReleaseInfo { + version: latest_version.clone(), + is_critical: false, + fork: 0, + binary: Some(0.into()), + }; - let latest_release = ReleaseInfo { - version: latest_version.clone(), - is_critical: false, - fork: 0, - binary: Some(0.into()), - }; + let latest = OperationsInfo { + fork: 0, + this_fork: Some(0), + track: latest_release.clone(), + minor: None, + }; - let latest = OperationsInfo { - fork: 0, - this_fork: Some(0), - track: latest_release.clone(), - minor: None, - }; + (latest_version, latest_release, latest) + } - (latest_version, latest_release, latest) - } + #[test] + fn should_stay_idle_when_no_release() { + let (update_policy, _) = update_policy(); + let (_client, updater, _, _, ..) = setup(update_policy); - #[test] - fn should_stay_idle_when_no_release() { - let (update_policy, _) = update_policy(); - let (_client, updater, _, _, ..) = setup(update_policy); + assert_eq!(updater.state.lock().status, UpdaterStatus::Idle); + updater.poll(); + assert_eq!(updater.state.lock().status, UpdaterStatus::Idle); + } - assert_eq!(updater.state.lock().status, UpdaterStatus::Idle); - updater.poll(); - assert_eq!(updater.state.lock().status, UpdaterStatus::Idle); - } + #[test] + fn should_update_on_new_release() { + let (update_policy, tempdir) = update_policy(); + let (_client, updater, operations_client, fetcher, ..) = setup(update_policy); + let (latest_version, latest_release, latest) = new_upgrade("1.0.1"); - #[test] - fn should_update_on_new_release() { - let (update_policy, tempdir) = update_policy(); - let (_client, updater, operations_client, fetcher, ..) = setup(update_policy); - let (latest_version, latest_release, latest) = new_upgrade("1.0.1"); + // mock operations contract with a new version + operations_client.set_result(Some(latest.clone()), None); - // mock operations contract with a new version - operations_client.set_result(Some(latest.clone()), None); + // we start in idle state and with no information regarding the latest release + assert_eq!(updater.state.lock().latest, None); + assert_eq!(updater.state.lock().status, UpdaterStatus::Idle); - // we start in idle state and with no information regarding the latest release - assert_eq!(updater.state.lock().latest, None); - assert_eq!(updater.state.lock().status, UpdaterStatus::Idle); + updater.poll(); - updater.poll(); - - // after the first poll the latest release should be set to the one we're mocking and the updater should be - // fetching it - assert_eq!(updater.state.lock().latest, Some(latest)); - assert_matches!( + // after the first poll the latest release should be set to the one we're mocking and the updater should be + // fetching it + assert_eq!(updater.state.lock().latest, Some(latest)); + assert_matches!( updater.state.lock().status, UpdaterStatus::Fetching { ref release, retries, .. } if *release == latest_release && retries == 1); - // mock fetcher with update binary and trigger the fetch - let update_file = tempdir.path().join("parity"); - File::create(update_file.clone()).unwrap(); - fetcher.trigger(Some(update_file)); + // mock fetcher with update binary and trigger the fetch + let update_file = tempdir.path().join("parity"); + File::create(update_file.clone()).unwrap(); + fetcher.trigger(Some(update_file)); - // after the fetch finishes the upgrade should be ready to install - assert_eq!(updater.state.lock().status, UpdaterStatus::Ready { release: latest_release.clone() }); - assert_eq!(updater.upgrade_ready(), Some(latest_release.clone())); + // after the fetch finishes the upgrade should be ready to install + assert_eq!( + updater.state.lock().status, + UpdaterStatus::Ready { + release: latest_release.clone() + } + ); + assert_eq!(updater.upgrade_ready(), Some(latest_release.clone())); - // the current update_policy doesn't allow updating automatically, but we can trigger the update manually - ::execute_upgrade(&*updater); + // the current update_policy doesn't allow updating automatically, but we can trigger the update manually + ::execute_upgrade(&*updater); - assert_eq!(updater.state.lock().status, UpdaterStatus::Installed { release: latest_release }); + assert_eq!( + updater.state.lock().status, + UpdaterStatus::Installed { + release: latest_release + } + ); - // the final binary should exist in the updates folder and the 'latest' file should be updated to point to it - let updated_binary = tempdir.path().join(Updater::update_file_name(&latest_version)); - let latest_file = tempdir.path().join("latest"); + // the final binary should exist in the updates folder and the 'latest' file should be updated to point to it + let updated_binary = tempdir + .path() + .join(Updater::update_file_name(&latest_version)); + let latest_file = tempdir.path().join("latest"); - assert!(updated_binary.exists()); - assert!(latest_file.exists()); + assert!(updated_binary.exists()); + assert!(latest_file.exists()); - let mut latest_file_content = String::new(); - File::open(latest_file).unwrap().read_to_string(&mut latest_file_content).unwrap(); + let mut latest_file_content = String::new(); + File::open(latest_file) + .unwrap() + .read_to_string(&mut latest_file_content) + .unwrap(); - assert_eq!(latest_file_content, updated_binary.file_name().and_then(|n| n.to_str()).unwrap()); - } + assert_eq!( + latest_file_content, + updated_binary.file_name().and_then(|n| n.to_str()).unwrap() + ); + } - #[test] - fn should_randomly_delay_new_updates() { - let (update_policy, _) = update_policy(); - let (client, updater, operations_client, _, _, rng) = setup(update_policy); + #[test] + fn should_randomly_delay_new_updates() { + let (update_policy, _) = update_policy(); + let (client, updater, operations_client, _, _, rng) = setup(update_policy); - let (_, latest_release, latest) = new_upgrade("1.0.1"); - operations_client.set_result(Some(latest.clone()), Some(0)); + let (_, latest_release, latest) = new_upgrade("1.0.1"); + operations_client.set_result(Some(latest.clone()), Some(0)); - rng.set_result(5); + rng.set_result(5); - updater.poll(); + updater.poll(); - // the update should be delayed for 5 blocks - assert_matches!( + // the update should be delayed for 5 blocks + assert_matches!( updater.state.lock().status, UpdaterStatus::Waiting { ref release, block_number, .. } if *release == latest_release && block_number == 5); - client.add_blocks(1, EachBlockWith::Nothing); - updater.poll(); + client.add_blocks(1, EachBlockWith::Nothing); + updater.poll(); - // we should still be in the waiting state after we push one block - assert_matches!( + // we should still be in the waiting state after we push one block + assert_matches!( updater.state.lock().status, UpdaterStatus::Waiting { ref release, block_number, .. } if *release == latest_release && block_number == 5); - client.add_blocks(5, EachBlockWith::Nothing); - updater.poll(); + client.add_blocks(5, EachBlockWith::Nothing); + updater.poll(); - // after we're past the delay the status should switch to fetching - assert_matches!( + // after we're past the delay the status should switch to fetching + assert_matches!( updater.state.lock().status, UpdaterStatus::Fetching { ref release, .. } if *release == latest_release); - } + } - #[test] - fn should_not_delay_old_updates() { - let (update_policy, _) = update_policy(); - let (client, updater, operations_client, ..) = setup(update_policy); - client.add_blocks(100, EachBlockWith::Nothing); + #[test] + fn should_not_delay_old_updates() { + let (update_policy, _) = update_policy(); + let (client, updater, operations_client, ..) = setup(update_policy); + client.add_blocks(100, EachBlockWith::Nothing); - let (_, latest_release, latest) = new_upgrade("1.0.1"); - operations_client.set_result(Some(latest.clone()), Some(0)); + let (_, latest_release, latest) = new_upgrade("1.0.1"); + operations_client.set_result(Some(latest.clone()), Some(0)); - updater.poll(); + updater.poll(); - // the update should not be delayed since it's older than the maximum delay - // the update was at block 0 (100 blocks ago), and the maximum delay is 10 blocks - assert_matches!( + // the update should not be delayed since it's older than the maximum delay + // the update was at block 0 (100 blocks ago), and the maximum delay is 10 blocks + assert_matches!( updater.state.lock().status, UpdaterStatus::Fetching { ref release, .. } if *release == latest_release); - } + } - #[test] - fn should_check_for_updates_with_configured_frequency() { - let (mut update_policy, _) = update_policy(); - update_policy.frequency = 2; + #[test] + fn should_check_for_updates_with_configured_frequency() { + let (mut update_policy, _) = update_policy(); + update_policy.frequency = 2; - let (client, updater, operations_client, _, _, rng) = setup(update_policy); - let (_, latest_release, latest) = new_upgrade("1.0.1"); - operations_client.set_result(Some(latest.clone()), Some(0)); - rng.set_result(5); + let (client, updater, operations_client, _, _, rng) = setup(update_policy); + let (_, latest_release, latest) = new_upgrade("1.0.1"); + operations_client.set_result(Some(latest.clone()), Some(0)); + rng.set_result(5); - client.add_blocks(1, EachBlockWith::Nothing); - updater.poll(); + client.add_blocks(1, EachBlockWith::Nothing); + updater.poll(); - // the updater should stay idle since we only check for updates every other block (odd blocks in this case) - assert_eq!(updater.state.lock().status, UpdaterStatus::Idle); + // the updater should stay idle since we only check for updates every other block (odd blocks in this case) + assert_eq!(updater.state.lock().status, UpdaterStatus::Idle); - client.add_blocks(1, EachBlockWith::Nothing); - updater.poll(); + client.add_blocks(1, EachBlockWith::Nothing); + updater.poll(); - // after adding a block we check for a new update and trigger the random delay (of 5 blocks) - assert_matches!( + // after adding a block we check for a new update and trigger the random delay (of 5 blocks) + assert_matches!( updater.state.lock().status, UpdaterStatus::Waiting { ref release, block_number, .. } if *release == latest_release && block_number == 5); - } + } - #[test] - fn should_backoff_retry_when_update_fails() { - let (update_policy, tempdir) = update_policy(); - let (_client, updater, operations_client, fetcher, time_provider, ..) = setup(update_policy); - let (_, latest_release, latest) = new_upgrade("1.0.1"); + #[test] + fn should_backoff_retry_when_update_fails() { + let (update_policy, tempdir) = update_policy(); + let (_client, updater, operations_client, fetcher, time_provider, ..) = + setup(update_policy); + let (_, latest_release, latest) = new_upgrade("1.0.1"); - // mock operations contract with a new version - operations_client.set_result(Some(latest.clone()), None); + // mock operations contract with a new version + operations_client.set_result(Some(latest.clone()), None); - let mut now = Instant::now(); - time_provider.set_result(now); + let mut now = Instant::now(); + time_provider.set_result(now); - updater.poll(); - fetcher.trigger(None); + updater.poll(); + fetcher.trigger(None); - // we triggered the fetcher with an error result so the updater should backoff any retry - assert_matches!( + // we triggered the fetcher with an error result so the updater should backoff any retry + assert_matches!( updater.state.lock().status, UpdaterStatus::FetchBackoff { ref release, ref backoff, .. } if *release == latest_release && backoff.0 == 1); - now += Duration::from_secs(1); - time_provider.set_result(now); - updater.poll(); + now += Duration::from_secs(1); + time_provider.set_result(now); + updater.poll(); - // if we don't wait for the elapsed time the updater status should stay the same - assert_matches!( + // if we don't wait for the elapsed time the updater status should stay the same + assert_matches!( updater.state.lock().status, UpdaterStatus::FetchBackoff { ref release, ref backoff, .. } if *release == latest_release && backoff.0 == 1); - now += Duration::from_secs(1); - time_provider.set_result(now); - updater.poll(); - fetcher.trigger(None); + now += Duration::from_secs(1); + time_provider.set_result(now); + updater.poll(); + fetcher.trigger(None); - // the backoff time has elapsed so we retried again (and failed) - assert_matches!( + // the backoff time has elapsed so we retried again (and failed) + assert_matches!( updater.state.lock().status, UpdaterStatus::FetchBackoff { ref release, ref backoff, .. } if *release == latest_release && backoff.0 == 2); - now += Duration::from_secs(4); - time_provider.set_result(now); - updater.poll(); + now += Duration::from_secs(4); + time_provider.set_result(now); + updater.poll(); - let update_file = tempdir.path().join("parity"); - File::create(update_file.clone()).unwrap(); - fetcher.trigger(Some(update_file)); + let update_file = tempdir.path().join("parity"); + File::create(update_file.clone()).unwrap(); + fetcher.trigger(Some(update_file)); - // after setting up the mocked fetch and waiting for the backoff period the update should succeed - assert_eq!(updater.state.lock().status, UpdaterStatus::Ready { release: latest_release }); - } + // after setting up the mocked fetch and waiting for the backoff period the update should succeed + assert_eq!( + updater.state.lock().status, + UpdaterStatus::Ready { + release: latest_release + } + ); + } - #[test] - fn should_quit_backoff_on_new_release() { - let (update_policy, tempdir) = update_policy(); - let (_client, updater, operations_client, fetcher, ..) = setup(update_policy); - let (_, latest_release, latest) = new_upgrade("1.0.1"); + #[test] + fn should_quit_backoff_on_new_release() { + let (update_policy, tempdir) = update_policy(); + let (_client, updater, operations_client, fetcher, ..) = setup(update_policy); + let (_, latest_release, latest) = new_upgrade("1.0.1"); - // mock operations contract with a new version - operations_client.set_result(Some(latest.clone()), None); + // mock operations contract with a new version + operations_client.set_result(Some(latest.clone()), None); - updater.poll(); - fetcher.trigger(None); + updater.poll(); + fetcher.trigger(None); - // we triggered the fetcher with an error result so the updater should backoff any retry - assert_matches!( + // we triggered the fetcher with an error result so the updater should backoff any retry + assert_matches!( updater.state.lock().status, UpdaterStatus::FetchBackoff { ref release, ref backoff, .. } if *release == latest_release && backoff.0 == 1); - // mock new working release and trigger the fetch afterwards - let (_, latest_release, latest) = new_upgrade("1.0.2"); - operations_client.set_result(Some(latest.clone()), None); - let update_file = tempdir.path().join("parity"); - File::create(update_file.clone()).unwrap(); + // mock new working release and trigger the fetch afterwards + let (_, latest_release, latest) = new_upgrade("1.0.2"); + operations_client.set_result(Some(latest.clone()), None); + let update_file = tempdir.path().join("parity"); + File::create(update_file.clone()).unwrap(); - updater.poll(); - fetcher.trigger(Some(update_file)); + updater.poll(); + fetcher.trigger(Some(update_file)); - // a new release should short-circuit the backoff - assert_eq!(updater.state.lock().status, UpdaterStatus::Ready { release: latest_release }); - } + // a new release should short-circuit the backoff + assert_eq!( + updater.state.lock().status, + UpdaterStatus::Ready { + release: latest_release + } + ); + } - #[test] - fn should_detect_already_downloaded_releases() { - let (update_policy, tempdir) = update_policy(); - let (_client, updater, operations_client, ..) = setup(update_policy); - let (latest_version, latest_release, latest) = new_upgrade("1.0.1"); + #[test] + fn should_detect_already_downloaded_releases() { + let (update_policy, tempdir) = update_policy(); + let (_client, updater, operations_client, ..) = setup(update_policy); + let (latest_version, latest_release, latest) = new_upgrade("1.0.1"); - // mock operations contract with a new version - operations_client.set_result(Some(latest.clone()), None); + // mock operations contract with a new version + operations_client.set_result(Some(latest.clone()), None); - // mock final update file - let update_file = tempdir.path().join(Updater::update_file_name(&latest_version)); - File::create(update_file.clone()).unwrap(); + // mock final update file + let update_file = tempdir + .path() + .join(Updater::update_file_name(&latest_version)); + File::create(update_file.clone()).unwrap(); - updater.poll(); + updater.poll(); - // after checking for a new update we immediately declare it as ready since it already exists on disk - // there was no need to trigger the fetch - assert_eq!(updater.state.lock().status, UpdaterStatus::Ready { release: latest_release }); - } + // after checking for a new update we immediately declare it as ready since it already exists on disk + // there was no need to trigger the fetch + assert_eq!( + updater.state.lock().status, + UpdaterStatus::Ready { + release: latest_release + } + ); + } - #[test] - fn should_stay_disabled_after_fatal_error() { - let (update_policy, tempdir) = update_policy(); - let (client, updater, operations_client, fetcher, ..) = setup(update_policy); - let (_, _, latest) = new_upgrade("1.0.1"); + #[test] + fn should_stay_disabled_after_fatal_error() { + let (update_policy, tempdir) = update_policy(); + let (client, updater, operations_client, fetcher, ..) = setup(update_policy); + let (_, _, latest) = new_upgrade("1.0.1"); - // mock operations contract with a new version - operations_client.set_result(Some(latest.clone()), None); + // mock operations contract with a new version + operations_client.set_result(Some(latest.clone()), None); - updater.poll(); - // trigger the fetch but don't create the file on-disk. this should lead to a fatal error that disables the updater - let update_file = tempdir.path().join("parity"); - fetcher.trigger(Some(update_file)); + updater.poll(); + // trigger the fetch but don't create the file on-disk. this should lead to a fatal error that disables the updater + let update_file = tempdir.path().join("parity"); + fetcher.trigger(Some(update_file)); - assert_eq!(updater.state.lock().status, UpdaterStatus::Disabled); + assert_eq!(updater.state.lock().status, UpdaterStatus::Disabled); - client.add_blocks(100, EachBlockWith::Nothing); - updater.poll(); + client.add_blocks(100, EachBlockWith::Nothing); + updater.poll(); - // the updater should stay disabled after new blocks are pushed - assert_eq!(updater.state.lock().status, UpdaterStatus::Disabled); + // the updater should stay disabled after new blocks are pushed + assert_eq!(updater.state.lock().status, UpdaterStatus::Disabled); - let (_, _, latest) = new_upgrade("1.0.2"); - operations_client.set_result(Some(latest.clone()), None); + let (_, _, latest) = new_upgrade("1.0.2"); + operations_client.set_result(Some(latest.clone()), None); - updater.poll(); + updater.poll(); - // the updater should stay disabled after a new release is pushed - assert_eq!(updater.state.lock().status, UpdaterStatus::Disabled); - } + // the updater should stay disabled after a new release is pushed + assert_eq!(updater.state.lock().status, UpdaterStatus::Disabled); + } - #[test] - fn should_ignore_current_fetch_on_new_release() { - let (update_policy, _) = update_policy(); - let (_client, updater, operations_client, fetcher, ..) = setup(update_policy); - let (_, latest_release, latest) = new_upgrade("1.0.1"); + #[test] + fn should_ignore_current_fetch_on_new_release() { + let (update_policy, _) = update_policy(); + let (_client, updater, operations_client, fetcher, ..) = setup(update_policy); + let (_, latest_release, latest) = new_upgrade("1.0.1"); - // mock operations contract with a new version - operations_client.set_result(Some(latest.clone()), None); + // mock operations contract with a new version + operations_client.set_result(Some(latest.clone()), None); - updater.poll(); + updater.poll(); - assert_matches!( + assert_matches!( updater.state.lock().status, UpdaterStatus::Fetching { ref release, .. } if *release == latest_release); - let (_, latest_release, latest) = new_upgrade("1.0.2"); - operations_client.set_result(Some(latest.clone()), None); - fetcher.trigger(None); - updater.poll(); + let (_, latest_release, latest) = new_upgrade("1.0.2"); + operations_client.set_result(Some(latest.clone()), None); + fetcher.trigger(None); + updater.poll(); - // even though we triggered the previous fetch with an error, the current state was updated to fetch the new - // release, and the previous fetch is ignored - assert_matches!( + // even though we triggered the previous fetch with an error, the current state was updated to fetch the new + // release, and the previous fetch is ignored + assert_matches!( updater.state.lock().status, UpdaterStatus::Fetching { ref release, .. } if *release == latest_release); - } + } - #[test] - fn should_auto_install_updates_if_update_policy_allows() { - let (mut update_policy, tempdir) = update_policy(); - update_policy.filter = UpdateFilter::All; - let (_client, updater, operations_client, fetcher, ..) = setup(update_policy); - let (latest_version, latest_release, latest) = new_upgrade("1.0.1"); + #[test] + fn should_auto_install_updates_if_update_policy_allows() { + let (mut update_policy, tempdir) = update_policy(); + update_policy.filter = UpdateFilter::All; + let (_client, updater, operations_client, fetcher, ..) = setup(update_policy); + let (latest_version, latest_release, latest) = new_upgrade("1.0.1"); - // mock operations contract with a new version - operations_client.set_result(Some(latest.clone()), None); + // mock operations contract with a new version + operations_client.set_result(Some(latest.clone()), None); - // we start in idle state and with no information regarding the latest release - assert_eq!(updater.state.lock().latest, None); - assert_eq!(updater.state.lock().status, UpdaterStatus::Idle); + // we start in idle state and with no information regarding the latest release + assert_eq!(updater.state.lock().latest, None); + assert_eq!(updater.state.lock().status, UpdaterStatus::Idle); - updater.poll(); + updater.poll(); - // mock fetcher with update binary and trigger the fetch - let update_file = tempdir.path().join("parity"); - File::create(update_file.clone()).unwrap(); - fetcher.trigger(Some(update_file)); + // mock fetcher with update binary and trigger the fetch + let update_file = tempdir.path().join("parity"); + File::create(update_file.clone()).unwrap(); + fetcher.trigger(Some(update_file)); - // the update is auto installed since the update policy allows it - assert_eq!(updater.state.lock().status, UpdaterStatus::Installed { release: latest_release }); + // the update is auto installed since the update policy allows it + assert_eq!( + updater.state.lock().status, + UpdaterStatus::Installed { + release: latest_release + } + ); - // the final binary should exist in the updates folder and the 'latest' file should be updated to point to it - let updated_binary = tempdir.path().join(Updater::update_file_name(&latest_version)); - let latest_file = tempdir.path().join("latest"); + // the final binary should exist in the updates folder and the 'latest' file should be updated to point to it + let updated_binary = tempdir + .path() + .join(Updater::update_file_name(&latest_version)); + let latest_file = tempdir.path().join("latest"); - assert!(updated_binary.exists()); - assert!(latest_file.exists()); + assert!(updated_binary.exists()); + assert!(latest_file.exists()); - let mut latest_file_content = String::new(); - File::open(latest_file).unwrap().read_to_string(&mut latest_file_content).unwrap(); + let mut latest_file_content = String::new(); + File::open(latest_file) + .unwrap() + .read_to_string(&mut latest_file_content) + .unwrap(); - assert_eq!(latest_file_content, updated_binary.file_name().and_then(|n| n.to_str()).unwrap()); - } + assert_eq!( + latest_file_content, + updated_binary.file_name().and_then(|n| n.to_str()).unwrap() + ); + } - #[test] - fn should_update_capability() { - let (update_policy, _tempdir) = update_policy(); - let (client, updater, operations_client, _, ..) = setup(update_policy); - let (_, _, mut latest) = new_upgrade("1.0.1"); + #[test] + fn should_update_capability() { + let (update_policy, _tempdir) = update_policy(); + let (client, updater, operations_client, _, ..) = setup(update_policy); + let (_, _, mut latest) = new_upgrade("1.0.1"); - // mock operations contract with a new version - operations_client.set_result(Some(latest.clone()), None); + // mock operations contract with a new version + operations_client.set_result(Some(latest.clone()), None); - // we start with no information regarding our node's capabilities - assert_eq!(updater.state.lock().capability, CapState::Unknown); + // we start with no information regarding our node's capabilities + assert_eq!(updater.state.lock().capability, CapState::Unknown); - updater.poll(); + updater.poll(); - // our node supports the current fork - assert_eq!(updater.state.lock().capability, CapState::Capable); + // our node supports the current fork + assert_eq!(updater.state.lock().capability, CapState::Capable); - // lets announce a new fork which our node doesn't support - latest.fork = 2; - operations_client.set_result(Some(latest.clone()), None); - updater.poll(); + // lets announce a new fork which our node doesn't support + latest.fork = 2; + operations_client.set_result(Some(latest.clone()), None); + updater.poll(); - // our node is only capable of operating until block #2 when the fork triggers - assert_eq!(updater.state.lock().capability, CapState::CapableUntil(2)); + // our node is only capable of operating until block #2 when the fork triggers + assert_eq!(updater.state.lock().capability, CapState::CapableUntil(2)); - client.add_blocks(3, EachBlockWith::Nothing); - updater.poll(); + client.add_blocks(3, EachBlockWith::Nothing); + updater.poll(); - // after we move past the fork the capability should be updated to incapable - assert_eq!(updater.state.lock().capability, CapState::IncapableSince(2)); + // after we move past the fork the capability should be updated to incapable + assert_eq!(updater.state.lock().capability, CapState::IncapableSince(2)); - // and since our update policy requires consensus, the client should be disabled - assert!(client.is_disabled()); - } + // and since our update policy requires consensus, the client should be disabled + assert!(client.is_disabled()); + } } diff --git a/util/EIP-152/src/lib.rs b/util/EIP-152/src/lib.rs index fd68b9072..0debe9c40 100644 --- a/util/EIP-152/src/lib.rs +++ b/util/EIP-152/src/lib.rs @@ -18,38 +18,42 @@ /// There are 10 16-byte arrays - one for each round /// the entries are calculated from the sigma constants. const SIGMA: [[usize; 16]; 10] = [ - [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], - [14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3], - [11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4], - [ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8], - [ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13], - [ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9], - [12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11], - [13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10], - [ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5], - [10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0], + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], + [14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3], + [11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4], + [7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8], + [9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13], + [2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9], + [12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11], + [13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10], + [6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5], + [10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0], ]; - /// IV is the initialization vector for BLAKE2b. See https://tools.ietf.org/html/rfc7693#section-2.6 /// for details. const IV: [u64; 8] = [ - 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, - 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, + 0x6a09e667f3bcc908, + 0xbb67ae8584caa73b, + 0x3c6ef372fe94f82b, + 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, + 0x9b05688c2b3e6c1f, + 0x1f83d9abfb41bd6b, + 0x5be0cd19137e2179, ]; - #[inline(always)] /// The G mixing function. See https://tools.ietf.org/html/rfc7693#section-3.1 fn g(v: &mut [u64], a: usize, b: usize, c: usize, d: usize, x: u64, y: u64) { - v[a] = v[a].wrapping_add(v[b]).wrapping_add(x); - v[d] = (v[d] ^ v[a]).rotate_right(32); - v[c] = v[c].wrapping_add(v[d]); - v[b] = (v[b] ^ v[c]).rotate_right(24); - v[a] = v[a].wrapping_add(v[b]).wrapping_add(y); - v[d] = (v[d] ^ v[a]).rotate_right(16); - v[c] = v[c].wrapping_add(v[d]); - v[b] = (v[b] ^ v[c]).rotate_right(63); + v[a] = v[a].wrapping_add(v[b]).wrapping_add(x); + v[d] = (v[d] ^ v[a]).rotate_right(32); + v[c] = v[c].wrapping_add(v[d]); + v[b] = (v[b] ^ v[c]).rotate_right(24); + v[a] = v[a].wrapping_add(v[b]).wrapping_add(y); + v[d] = (v[d] ^ v[a]).rotate_right(16); + v[c] = v[c].wrapping_add(v[d]); + v[b] = (v[b] ^ v[c]).rotate_right(63); } /// The Blake2 compression function F. See https://tools.ietf.org/html/rfc7693#section-3.2 @@ -57,82 +61,102 @@ fn g(v: &mut [u64], a: usize, b: usize, c: usize, d: usize, x: u64, y: u64) { /// block indicator flag `f`, and number of rounds `rounds`. The state vector provided as the first /// parameter is modified by the function. pub fn compress(h: &mut [u64; 8], m: [u64; 16], t: [u64; 2], f: bool, rounds: usize) { - let mut v = [0u64; 16]; - v[..h.len()].copy_from_slice(h); // First half from state. - v[h.len()..].copy_from_slice(&IV); // Second half from IV. + let mut v = [0u64; 16]; + v[..h.len()].copy_from_slice(h); // First half from state. + v[h.len()..].copy_from_slice(&IV); // Second half from IV. - v[12] ^= t[0]; - v[13] ^= t[1]; + v[12] ^= t[0]; + v[13] ^= t[1]; - if f { - v[14] = !v[14] // Invert all bits if the last-block-flag is set. - } - for i in 0..rounds { - // Message word selection permutation for this round. - let s = &SIGMA[i % 10]; - g(&mut v, 0, 4, 8, 12, m[s[0]], m[s[1]]); - g(&mut v, 1, 5, 9, 13, m[s[2]], m[s[3]]); - g(&mut v, 2, 6, 10, 14, m[s[4]], m[s[5]]); - g(&mut v, 3, 7, 11, 15, m[s[6]], m[s[7]]); + if f { + v[14] = !v[14] // Invert all bits if the last-block-flag is set. + } + for i in 0..rounds { + // Message word selection permutation for this round. + let s = &SIGMA[i % 10]; + g(&mut v, 0, 4, 8, 12, m[s[0]], m[s[1]]); + g(&mut v, 1, 5, 9, 13, m[s[2]], m[s[3]]); + g(&mut v, 2, 6, 10, 14, m[s[4]], m[s[5]]); + g(&mut v, 3, 7, 11, 15, m[s[6]], m[s[7]]); - g(&mut v, 0, 5, 10, 15, m[s[8]], m[s[9]]); - g(&mut v, 1, 6, 11, 12, m[s[10]], m[s[11]]); - g(&mut v, 2, 7, 8, 13, m[s[12]], m[s[13]]); - g(&mut v, 3, 4, 9, 14, m[s[14]], m[s[15]]); - } + g(&mut v, 0, 5, 10, 15, m[s[8]], m[s[9]]); + g(&mut v, 1, 6, 11, 12, m[s[10]], m[s[11]]); + g(&mut v, 2, 7, 8, 13, m[s[12]], m[s[13]]); + g(&mut v, 3, 4, 9, 14, m[s[14]], m[s[15]]); + } - for i in 0..8 { - h[i] ^= v[i] ^ v[i + 8]; - } + for i in 0..8 { + h[i] ^= v[i] ^ v[i + 8]; + } } - #[cfg(test)] mod tests { - use crate::compress; - use rustc_hex::FromHex; + use crate::compress; + use rustc_hex::FromHex; - #[test] - fn test_blake2_f() { - // test from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-152.md#example-usage-in-solidity - let mut h_in = [ - 0x6a09e667f2bdc948_u64, 0xbb67ae8584caa73b_u64, - 0x3c6ef372fe94f82b_u64, 0xa54ff53a5f1d36f1_u64, - 0x510e527fade682d1_u64, 0x9b05688c2b3e6c1f_u64, - 0x1f83d9abfb41bd6b_u64, 0x5be0cd19137e2179_u64, - ]; + #[test] + fn test_blake2_f() { + // test from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-152.md#example-usage-in-solidity + let mut h_in = [ + 0x6a09e667f2bdc948_u64, + 0xbb67ae8584caa73b_u64, + 0x3c6ef372fe94f82b_u64, + 0xa54ff53a5f1d36f1_u64, + 0x510e527fade682d1_u64, + 0x9b05688c2b3e6c1f_u64, + 0x1f83d9abfb41bd6b_u64, + 0x5be0cd19137e2179_u64, + ]; - let m = [ - 0x0000000000636261_u64, 0x0000000000000000_u64, 0x0000000000000000_u64, - 0x0000000000000000_u64, 0x0000000000000000_u64, 0x0000000000000000_u64, - 0x0000000000000000_u64, 0x0000000000000000_u64, 0x0000000000000000_u64, - 0x0000000000000000_u64, 0x0000000000000000_u64, 0x0000000000000000_u64, - 0x0000000000000000_u64, 0x0000000000000000_u64, 0x0000000000000000_u64, - 0x0000000000000000_u64, - ]; - let c = [3, 0]; - let f = true; - let rounds = 12; - let h_out: [u64; 8] = [ - 0x0D4D1C983FA580BA_u64, 0xE9F6129FB697276A_u64, 0xB7C45A68142F214C_u64, - 0xD1A2FFDB6FBB124B_u64, 0x2D79AB2A39C5877D_u64, 0x95CC3345DED552C2_u64, - 0x5A92F1DBA88AD318_u64, 0x239900D4ED8623B9_u64, - ]; + let m = [ + 0x0000000000636261_u64, + 0x0000000000000000_u64, + 0x0000000000000000_u64, + 0x0000000000000000_u64, + 0x0000000000000000_u64, + 0x0000000000000000_u64, + 0x0000000000000000_u64, + 0x0000000000000000_u64, + 0x0000000000000000_u64, + 0x0000000000000000_u64, + 0x0000000000000000_u64, + 0x0000000000000000_u64, + 0x0000000000000000_u64, + 0x0000000000000000_u64, + 0x0000000000000000_u64, + 0x0000000000000000_u64, + ]; + let c = [3, 0]; + let f = true; + let rounds = 12; + let h_out: [u64; 8] = [ + 0x0D4D1C983FA580BA_u64, + 0xE9F6129FB697276A_u64, + 0xB7C45A68142F214C_u64, + 0xD1A2FFDB6FBB124B_u64, + 0x2D79AB2A39C5877D_u64, + 0x95CC3345DED552C2_u64, + 0x5A92F1DBA88AD318_u64, + 0x239900D4ED8623B9_u64, + ]; - compress(&mut h_in, m, c, f, rounds); + compress(&mut h_in, m, c, f, rounds); - assert_eq!(h_in, h_out); - } + assert_eq!(h_in, h_out); + } - fn to_u64_slice(vec: &[u8], slice: &mut [u64]) { - vec.chunks(8).enumerate().for_each(|(index, val)| { - slice[index] = u64::from_le_bytes([val[0], val[1], val[2], val[3], val[4], val[5], val[6], val[7]]) - }) - } + fn to_u64_slice(vec: &[u8], slice: &mut [u64]) { + vec.chunks(8).enumerate().for_each(|(index, val)| { + slice[index] = u64::from_le_bytes([ + val[0], val[1], val[2], val[3], val[4], val[5], val[6], val[7], + ]) + }) + } - #[test] - fn test_vectors_from_eip() { - let vec = vec![ + #[test] + fn test_vectors_from_eip() { + let vec = vec![ ( // Test vector 4 "0000000048c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001", @@ -158,35 +182,35 @@ mod tests { // "fc59093aafa9ab43daae0e914c57635c5402d8e3d2130eb9b3cc181de7f0ecf9b22bf99a7815ce16419e200e01846e6b5df8cc7703041bbceb571de6631d2615", // ), ]; - for (hex, output) in vec { - let hex = hex; - let bytes: Vec = hex.from_hex().unwrap(); + for (hex, output) in vec { + let hex = hex; + let bytes: Vec = hex.from_hex().unwrap(); - assert_eq!(bytes.len(), 213); + assert_eq!(bytes.len(), 213); - let mut h = [0u64; 8]; - let mut m = [0u64; 16]; - let mut t = [0u64; 2]; + let mut h = [0u64; 8]; + let mut m = [0u64; 16]; + let mut t = [0u64; 2]; - let rounds = u32::from_be_bytes([bytes[0], bytes[1], bytes[2], bytes[3]]); - let f = match bytes[212] { - 1 => true, - 0 => false, - _ => unreachable!() - }; + let rounds = u32::from_be_bytes([bytes[0], bytes[1], bytes[2], bytes[3]]); + let f = match bytes[212] { + 1 => true, + 0 => false, + _ => unreachable!(), + }; - to_u64_slice(&bytes[4..68], &mut h); - to_u64_slice(&bytes[68..196], &mut m); - to_u64_slice(&bytes[196..212], &mut t); + to_u64_slice(&bytes[4..68], &mut h); + to_u64_slice(&bytes[68..196], &mut m); + to_u64_slice(&bytes[196..212], &mut t); - compress(&mut h, m, t, f, rounds as usize); + compress(&mut h, m, t, f, rounds as usize); - let output: Vec = output.from_hex().unwrap(); + let output: Vec = output.from_hex().unwrap(); - let mut out = [0u64; 8]; - to_u64_slice(&output[..], &mut out); + let mut out = [0u64; 8]; + to_u64_slice(&output[..], &mut out); - assert_eq!(out, h); - } - } + assert_eq!(out, h); + } + } } diff --git a/util/EIP-712/src/eip712.rs b/util/EIP-712/src/eip712.rs index 426303403..8d167727d 100644 --- a/util/EIP-712/src/eip712.rs +++ b/util/EIP-712/src/eip712.rs @@ -15,85 +15,97 @@ // along with Parity Ethereum. If not, see . //! EIP712 structs -use serde_json::{Value}; -use std::collections::HashMap; -use ethereum_types::{U256, H256, Address}; -use regex::Regex; -use validator::Validate; -use validator::ValidationErrors; +use ethereum_types::{Address, H256, U256}; use lazy_static::lazy_static; +use regex::Regex; +use serde_json::Value; +use std::collections::HashMap; +use validator::{Validate, ValidationErrors}; pub(crate) type MessageTypes = HashMap>; lazy_static! { - // match solidity identifier with the addition of '[(\d)*]*' - static ref TYPE_REGEX: Regex = Regex::new(r"^[a-zA-Z_$][a-zA-Z_$0-9]*(\[([1-9]\d*)*\])*$").unwrap(); - static ref IDENT_REGEX: Regex = Regex::new(r"^[a-zA-Z_$][a-zA-Z_$0-9]*$").unwrap(); + // match solidity identifier with the addition of '[(\d)*]*' + static ref TYPE_REGEX: Regex = Regex::new(r"^[a-zA-Z_$][a-zA-Z_$0-9]*(\[([1-9]\d*)*\])*$").unwrap(); + static ref IDENT_REGEX: Regex = Regex::new(r"^[a-zA-Z_$][a-zA-Z_$0-9]*$").unwrap(); } #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] #[derive(Deserialize, Serialize, Validate, Debug, Clone)] pub(crate) struct EIP712Domain { - pub(crate) name: String, - pub(crate) version: String, - pub(crate) chain_id: U256, - pub(crate) verifying_contract: Address, - #[serde(skip_serializing_if="Option::is_none")] - pub(crate) salt: Option, + pub(crate) name: String, + pub(crate) version: String, + pub(crate) chain_id: U256, + pub(crate) verifying_contract: Address, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) salt: Option, } /// EIP-712 struct #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] #[derive(Deserialize, Debug, Clone)] pub struct EIP712 { - pub(crate) types: MessageTypes, - pub(crate) primary_type: String, - pub(crate) message: Value, - pub(crate) domain: EIP712Domain, + pub(crate) types: MessageTypes, + pub(crate) primary_type: String, + pub(crate) message: Value, + pub(crate) domain: EIP712Domain, } impl Validate for EIP712 { - fn validate(&self) -> Result<(), ValidationErrors> { - for field_types in self.types.values() { - for field_type in field_types { - field_type.validate()?; - } - } - Ok(()) - } + fn validate(&self) -> Result<(), ValidationErrors> { + for field_types in self.types.values() { + for field_type in field_types { + field_type.validate()?; + } + } + Ok(()) + } } #[derive(Serialize, Deserialize, Validate, Debug, Clone)] pub(crate) struct FieldType { - #[validate(regex = "IDENT_REGEX")] - pub name: String, - #[serde(rename = "type")] - #[validate(regex = "TYPE_REGEX")] - pub type_: String, + #[validate(regex = "IDENT_REGEX")] + pub name: String, + #[serde(rename = "type")] + #[validate(regex = "TYPE_REGEX")] + pub type_: String, } #[cfg(test)] mod tests { - use super::*; - use serde_json::from_str; + use super::*; + use serde_json::from_str; - #[test] - fn test_regex() { - let test_cases = vec!["unint bytes32", "Seun\\[]", "byte[]uint", "byte[7[]uint][]", "Person[0]"]; - for case in test_cases { - assert_eq!(TYPE_REGEX.is_match(case), false) - } + #[test] + fn test_regex() { + let test_cases = vec![ + "unint bytes32", + "Seun\\[]", + "byte[]uint", + "byte[7[]uint][]", + "Person[0]", + ]; + for case in test_cases { + assert_eq!(TYPE_REGEX.is_match(case), false) + } - let test_cases = vec!["bytes32", "Foo[]", "bytes1", "bytes32[][]", "byte[9]", "contents"]; - for case in test_cases { - assert_eq!(TYPE_REGEX.is_match(case), true) - } - } + let test_cases = vec![ + "bytes32", + "Foo[]", + "bytes1", + "bytes32[][]", + "byte[9]", + "contents", + ]; + for case in test_cases { + assert_eq!(TYPE_REGEX.is_match(case), true) + } + } - #[test] - fn test_deserialization() { - let string = r#"{ + #[test] + fn test_deserialization() { + let string = r#"{ "primaryType": "Mail", "domain": { "name": "Ether Mail", @@ -130,12 +142,12 @@ mod tests { ] } }"#; - let _ = from_str::(string).unwrap(); - } + let _ = from_str::(string).unwrap(); + } - #[test] - fn test_failing_deserialization() { - let string = r#"{ + #[test] + fn test_failing_deserialization() { + let string = r#"{ "primaryType": "Mail", "domain": { "name": "Ether Mail", @@ -172,7 +184,7 @@ mod tests { ] } }"#; - let data = from_str::(string).unwrap(); - assert_eq!(data.validate().is_err(), true); - } + let data = from_str::(string).unwrap(); + assert_eq!(data.validate().is_err(), true); + } } diff --git a/util/EIP-712/src/encode.rs b/util/EIP-712/src/encode.rs index 29ae7b06f..6a52db303 100644 --- a/util/EIP-712/src/encode.rs +++ b/util/EIP-712/src/encode.rs @@ -15,264 +15,254 @@ // along with Parity Ethereum. If not, see . //! EIP712 Encoder +use crate::{ + eip712::{MessageTypes, EIP712}, + error::{serde_error, ErrorKind, Result}, + parser::{parse_type, Type}, +}; use ethabi::{encode, Token as EthAbiToken}; -use ethereum_types::{Address as EthAddress, U256, H256}; -use keccak_hash::keccak; -use serde_json::Value; -use std::str::FromStr; -use itertools::Itertools; +use ethereum_types::{Address as EthAddress, H256, U256}; use indexmap::IndexSet; -use serde_json::to_value; -use crate::parser::{parse_type, Type}; -use crate::error::{Result, ErrorKind, serde_error}; -use crate::eip712::{EIP712, MessageTypes}; +use itertools::Itertools; +use keccak_hash::keccak; use rustc_hex::FromHex; +use serde_json::{to_value, Value}; +use std::{collections::HashSet, str::FromStr}; use validator::Validate; -use std::collections::HashSet; fn check_hex(string: &str) -> Result<()> { - if string.len() >= 2 && &string[..2] == "0x" { - return Ok(()) - } + if string.len() >= 2 && &string[..2] == "0x" { + return Ok(()); + } - return Err(ErrorKind::HexParseError( - format!("Expected a 0x-prefixed string of even length, found {} length string", string.len())) - )? + return Err(ErrorKind::HexParseError(format!( + "Expected a 0x-prefixed string of even length, found {} length string", + string.len() + )))?; } /// given a type and HashMap> /// returns a HashSet of dependent types of the given type -fn build_dependencies<'a>(message_type: &'a str, message_types: &'a MessageTypes) -> Option<(HashSet<&'a str>)> -{ - if message_types.get(message_type).is_none() { - return None; - } +fn build_dependencies<'a>( + message_type: &'a str, + message_types: &'a MessageTypes, +) -> Option<(HashSet<&'a str>)> { + if message_types.get(message_type).is_none() { + return None; + } - let mut types = IndexSet::new(); - types.insert(message_type); - let mut deps = HashSet::new(); + let mut types = IndexSet::new(); + types.insert(message_type); + let mut deps = HashSet::new(); - while let Some(item) = types.pop() { - if let Some(fields) = message_types.get(item) { - deps.insert(item); + while let Some(item) = types.pop() { + if let Some(fields) = message_types.get(item) { + deps.insert(item); - for field in fields { - // check if this field is an array type - let field_type = if let Some(index) = field.type_.find('[') { - &field.type_[..index] - } else { - &field.type_ - }; - // seen this type before? or not a custom type skip - if !deps.contains(field_type) || message_types.contains_key(field_type) { - types.insert(field_type); - } - } - } - }; + for field in fields { + // check if this field is an array type + let field_type = if let Some(index) = field.type_.find('[') { + &field.type_[..index] + } else { + &field.type_ + }; + // seen this type before? or not a custom type skip + if !deps.contains(field_type) || message_types.contains_key(field_type) { + types.insert(field_type); + } + } + } + } - return Some(deps) + return Some(deps); } fn encode_type(message_type: &str, message_types: &MessageTypes) -> Result { - let deps = { - let mut temp = build_dependencies(message_type, message_types) - .ok_or(ErrorKind::NonExistentType)?; - temp.remove(message_type); - let mut temp = temp.into_iter().collect::>(); - (&mut temp[..]).sort_unstable(); - temp.insert(0, message_type); - temp - }; + let deps = { + let mut temp = + build_dependencies(message_type, message_types).ok_or(ErrorKind::NonExistentType)?; + temp.remove(message_type); + let mut temp = temp.into_iter().collect::>(); + (&mut temp[..]).sort_unstable(); + temp.insert(0, message_type); + temp + }; - let encoded = deps - .into_iter() - .filter_map(|dep| { - message_types.get(dep).map(|field_types| { - let types = field_types - .iter() - .map(|value| format!("{} {}", value.type_, value.name)) - .join(","); - return format!("{}({})", dep, types); - }) - }) - .collect::>() - .concat(); - Ok(encoded) + let encoded = deps + .into_iter() + .filter_map(|dep| { + message_types.get(dep).map(|field_types| { + let types = field_types + .iter() + .map(|value| format!("{} {}", value.type_, value.name)) + .join(","); + return format!("{}({})", dep, types); + }) + }) + .collect::>() + .concat(); + Ok(encoded) } fn type_hash(message_type: &str, typed_data: &MessageTypes) -> Result { - Ok(keccak(encode_type(message_type, typed_data)?)) + Ok(keccak(encode_type(message_type, typed_data)?)) } fn encode_data( - message_type: &Type, - message_types: &MessageTypes, - value: &Value, - field_name: Option<&str> -) -> Result> -{ - let encoded = match message_type { - Type::Array { - inner, - length - } => { - let mut items = vec![]; - let values = value.as_array() - .ok_or(serde_error("array", field_name))?; + message_type: &Type, + message_types: &MessageTypes, + value: &Value, + field_name: Option<&str>, +) -> Result> { + let encoded = match message_type { + Type::Array { inner, length } => { + let mut items = vec![]; + let values = value.as_array().ok_or(serde_error("array", field_name))?; - // check if the type definition actually matches - // the length of items to be encoded - if length.is_some() && Some(values.len() as u64) != *length { - let array_type = format!("{}[{}]", *inner, length.unwrap()); - return Err( - ErrorKind::UnequalArrayItems(length.unwrap(), array_type, values.len() as u64) - )? - } + // check if the type definition actually matches + // the length of items to be encoded + if length.is_some() && Some(values.len() as u64) != *length { + let array_type = format!("{}[{}]", *inner, length.unwrap()); + return Err(ErrorKind::UnequalArrayItems( + length.unwrap(), + array_type, + values.len() as u64, + ))?; + } - for item in values { - let mut encoded = encode_data( - &*inner, - &message_types, - item, - field_name - )?; - items.append(&mut encoded); - } + for item in values { + let mut encoded = encode_data(&*inner, &message_types, item, field_name)?; + items.append(&mut encoded); + } -// keccak(items).as_ref().to_vec() - keccak(items).to_vec() - } + // keccak(items).as_ref().to_vec() + keccak(items).to_vec() + } - Type::Custom(ref ident) if message_types.get(&*ident).is_some() => { - let type_hash = (&type_hash(ident, &message_types)?).0.to_vec(); - let mut tokens = encode(&[EthAbiToken::FixedBytes(type_hash)]); + Type::Custom(ref ident) if message_types.get(&*ident).is_some() => { + let type_hash = (&type_hash(ident, &message_types)?).0.to_vec(); + let mut tokens = encode(&[EthAbiToken::FixedBytes(type_hash)]); - for field in message_types.get(ident).expect("Already checked in match guard; qed") { - let value = &value[&field.name]; - let type_ = parse_type(&*field.type_)?; - let mut encoded = encode_data( - &type_, - &message_types, - &value, - Some(&*field.name) - )?; - tokens.append(&mut encoded); - } + for field in message_types + .get(ident) + .expect("Already checked in match guard; qed") + { + let value = &value[&field.name]; + let type_ = parse_type(&*field.type_)?; + let mut encoded = encode_data(&type_, &message_types, &value, Some(&*field.name))?; + tokens.append(&mut encoded); + } -// keccak(tokens).as_ref().to_vec() - keccak(tokens).to_vec() - } + // keccak(tokens).as_ref().to_vec() + keccak(tokens).to_vec() + } - Type::Bytes => { - let string = value.as_str() - .ok_or(serde_error("string", field_name))?; + Type::Bytes => { + let string = value.as_str().ok_or(serde_error("string", field_name))?; - check_hex(&string)?; + check_hex(&string)?; - let bytes = (&string[2..]) - .from_hex::>() - .map_err(|err| ErrorKind::HexParseError(format!("{}", err)))?; -// let bytes = keccak(&bytes).as_ref().to_vec(); - let bytes = keccak(&bytes).to_vec(); + let bytes = (&string[2..]) + .from_hex::>() + .map_err(|err| ErrorKind::HexParseError(format!("{}", err)))?; + // let bytes = keccak(&bytes).as_ref().to_vec(); + let bytes = keccak(&bytes).to_vec(); - encode(&[EthAbiToken::FixedBytes(bytes)]) - } + encode(&[EthAbiToken::FixedBytes(bytes)]) + } - Type::Byte(_) => { - let string = value.as_str() - .ok_or(serde_error("string", field_name))?; + Type::Byte(_) => { + let string = value.as_str().ok_or(serde_error("string", field_name))?; - check_hex(&string)?; + check_hex(&string)?; - let bytes = (&string[2..]) - .from_hex::>() - .map_err(|err| ErrorKind::HexParseError(format!("{}", err)))?; + let bytes = (&string[2..]) + .from_hex::>() + .map_err(|err| ErrorKind::HexParseError(format!("{}", err)))?; - encode(&[EthAbiToken::FixedBytes(bytes)]) - } + encode(&[EthAbiToken::FixedBytes(bytes)]) + } - Type::String => { - let value = value.as_str() - .ok_or(serde_error("string", field_name))?; -// let hash = keccak(value).as_ref().to_vec(); - let hash = keccak(value).to_vec(); - encode(&[EthAbiToken::FixedBytes(hash)]) - } + Type::String => { + let value = value.as_str().ok_or(serde_error("string", field_name))?; + // let hash = keccak(value).as_ref().to_vec(); + let hash = keccak(value).to_vec(); + encode(&[EthAbiToken::FixedBytes(hash)]) + } - Type::Bool => encode(&[EthAbiToken::Bool(value.as_bool() - .ok_or(serde_error("bool", field_name))?)]), + Type::Bool => encode(&[EthAbiToken::Bool( + value.as_bool().ok_or(serde_error("bool", field_name))?, + )]), - Type::Address => { - let addr = value.as_str() - .ok_or(serde_error("string", field_name))?; - if addr.len() != 42 { - return Err(ErrorKind::InvalidAddressLength(addr.len()))?; - } - let address = EthAddress::from_str(&addr[2..]) - .map_err(|err| ErrorKind::HexParseError(format!("{}", err)))?; - encode(&[EthAbiToken::Address(address)]) - } + Type::Address => { + let addr = value.as_str().ok_or(serde_error("string", field_name))?; + if addr.len() != 42 { + return Err(ErrorKind::InvalidAddressLength(addr.len()))?; + } + let address = EthAddress::from_str(&addr[2..]) + .map_err(|err| ErrorKind::HexParseError(format!("{}", err)))?; + encode(&[EthAbiToken::Address(address)]) + } - Type::Uint | Type::Int => { - let string = value.as_str() - .ok_or(serde_error("int/uint", field_name))?; + Type::Uint | Type::Int => { + let string = value.as_str().ok_or(serde_error("int/uint", field_name))?; - check_hex(&string)?; + check_hex(&string)?; - let uint = U256::from_str(&string[2..]) - .map_err(|err| ErrorKind::HexParseError(format!("{}", err)))?; + let uint = U256::from_str(&string[2..]) + .map_err(|err| ErrorKind::HexParseError(format!("{}", err)))?; - let token = if *message_type == Type::Uint { - EthAbiToken::Uint(uint) - } else { - EthAbiToken::Int(uint) - }; - encode(&[token]) - } + let token = if *message_type == Type::Uint { + EthAbiToken::Uint(uint) + } else { + EthAbiToken::Int(uint) + }; + encode(&[token]) + } - _ => return Err( - ErrorKind::UnknownType( - format!("{}", field_name.unwrap_or("")), - format!("{}", *message_type) - ).into() - ) - }; + _ => { + return Err(ErrorKind::UnknownType( + format!("{}", field_name.unwrap_or("")), + format!("{}", *message_type), + ) + .into()) + } + }; - Ok(encoded) + Ok(encoded) } /// encodes and hashes the given EIP712 struct pub fn hash_structured_data(typed_data: EIP712) -> Result { - // validate input - typed_data.validate()?; - // EIP-191 compliant - let prefix = (b"\x19\x01").to_vec(); - let domain = to_value(&typed_data.domain).unwrap(); - let (domain_hash, data_hash) = ( - encode_data( - &Type::Custom("EIP712Domain".into()), - &typed_data.types, - &domain, - None - )?, - encode_data( - &Type::Custom(typed_data.primary_type), - &typed_data.types, - &typed_data.message, - None - )? - ); - let concat = [&prefix[..], &domain_hash[..], &data_hash[..]].concat(); - Ok(keccak(concat)) + // validate input + typed_data.validate()?; + // EIP-191 compliant + let prefix = (b"\x19\x01").to_vec(); + let domain = to_value(&typed_data.domain).unwrap(); + let (domain_hash, data_hash) = ( + encode_data( + &Type::Custom("EIP712Domain".into()), + &typed_data.types, + &domain, + None, + )?, + encode_data( + &Type::Custom(typed_data.primary_type), + &typed_data.types, + &typed_data.message, + None, + )?, + ); + let concat = [&prefix[..], &domain_hash[..], &data_hash[..]].concat(); + Ok(keccak(concat)) } #[cfg(test)] mod tests { - use super::*; - use serde_json::from_str; - use rustc_hex::ToHex; + use super::*; + use rustc_hex::ToHex; + use serde_json::from_str; - const JSON: &'static str = r#"{ + const JSON: &'static str = r#"{ "primaryType": "Mail", "domain": { "name": "Ether Mail", @@ -310,9 +300,9 @@ mod tests { } }"#; - #[test] - fn test_build_dependencies() { - let string = r#"{ + #[test] + fn test_build_dependencies() { + let string = r#"{ "EIP712Domain": [ { "name": "name", "type": "string" }, { "name": "version", "type": "string" }, @@ -330,22 +320,22 @@ mod tests { ] }"#; - let value = from_str::(string).expect("alas error!"); - let mail = "Mail"; - let person = "Person"; + let value = from_str::(string).expect("alas error!"); + let mail = "Mail"; + let person = "Person"; - let hashset = { - let mut temp = HashSet::new(); - temp.insert(mail); - temp.insert(person); - temp - }; - assert_eq!(build_dependencies(mail, &value), Some(hashset)); - } + let hashset = { + let mut temp = HashSet::new(); + temp.insert(mail); + temp.insert(person); + temp + }; + assert_eq!(build_dependencies(mail, &value), Some(hashset)); + } - #[test] - fn test_encode_type() { - let string = r#"{ + #[test] + fn test_encode_type() { + let string = r#"{ "EIP712Domain": [ { "name": "name", "type": "string" }, { "name": "version", "type": "string" }, @@ -363,17 +353,17 @@ mod tests { ] }"#; - let value = from_str::(string).expect("alas error!"); - let mail = &String::from("Mail"); - assert_eq!( - "Mail(Person from,Person to,string contents)Person(string name,address wallet)", - encode_type(&mail, &value).expect("alas error!") - ) - } + let value = from_str::(string).expect("alas error!"); + let mail = &String::from("Mail"); + assert_eq!( + "Mail(Person from,Person to,string contents)Person(string name,address wallet)", + encode_type(&mail, &value).expect("alas error!") + ) + } - #[test] - fn test_encode_type_hash() { - let string = r#"{ + #[test] + fn test_encode_type_hash() { + let string = r#"{ "EIP712Domain": [ { "name": "name", "type": "string" }, { "name": "version", "type": "string" }, @@ -391,28 +381,28 @@ mod tests { ] }"#; - let value = from_str::(string).expect("alas error!"); - let mail = &String::from("Mail"); - let hash = (type_hash(&mail, &value).expect("alas error!").0).to_hex::(); - assert_eq!( - hash, - "a0cedeb2dc280ba39b857546d74f5549c3a1d7bdc2dd96bf881f76108e23dac2" - ); - } + let value = from_str::(string).expect("alas error!"); + let mail = &String::from("Mail"); + let hash = (type_hash(&mail, &value).expect("alas error!").0).to_hex::(); + assert_eq!( + hash, + "a0cedeb2dc280ba39b857546d74f5549c3a1d7bdc2dd96bf881f76108e23dac2" + ); + } - #[test] - fn test_hash_data() { - let typed_data = from_str::(JSON).expect("alas error!"); - let hash = hash_structured_data(typed_data).expect("alas error!"); - assert_eq!( - &format!("{:x}", hash)[..], - "be609aee343fb3c4b28e1df9e632fca64fcfaede20f02e86244efddf30957bd2", - ) - } + #[test] + fn test_hash_data() { + let typed_data = from_str::(JSON).expect("alas error!"); + let hash = hash_structured_data(typed_data).expect("alas error!"); + assert_eq!( + &format!("{:x}", hash)[..], + "be609aee343fb3c4b28e1df9e632fca64fcfaede20f02e86244efddf30957bd2", + ) + } - #[test] - fn test_unequal_array_lengths() { - const TEST: &'static str = r#"{ + #[test] + fn test_unequal_array_lengths() { + const TEST: &'static str = r#"{ "primaryType": "Mail", "domain": { "name": "Ether Mail", @@ -450,16 +440,16 @@ mod tests { } }"#; - let typed_data = from_str::(TEST).expect("alas error!"); - assert_eq!( - hash_structured_data(typed_data).unwrap_err().kind(), - ErrorKind::UnequalArrayItems(2, "Person[2]".into(), 1) - ) - } + let typed_data = from_str::(TEST).expect("alas error!"); + assert_eq!( + hash_structured_data(typed_data).unwrap_err().kind(), + ErrorKind::UnequalArrayItems(2, "Person[2]".into(), 1) + ) + } - #[test] - fn test_typed_data_v4() { - let string = r#"{ + #[test] + fn test_typed_data_v4() { + let string = r#"{ "types": { "EIP712Domain": [ { @@ -543,18 +533,17 @@ mod tests { } }"#; - let typed_data = from_str::(string).expect("alas error!"); - let hash = hash_structured_data(typed_data.clone()).expect("alas error!"); - assert_eq!( - &format!("{:x}", hash)[..], + let typed_data = from_str::(string).expect("alas error!"); + let hash = hash_structured_data(typed_data.clone()).expect("alas error!"); + assert_eq!( + &format!("{:x}", hash)[..], + "a85c2e2b118698e88db68a8105b794a8cc7cec074e89ef991cb4f5f533819cc2", + ); + } - "a85c2e2b118698e88db68a8105b794a8cc7cec074e89ef991cb4f5f533819cc2", - ); - } - - #[test] - fn test_typed_data_v4_custom_array() { - let string = r#"{ + #[test] + fn test_typed_data_v4_custom_array() { + let string = r#"{ "types": { "EIP712Domain": [ { @@ -640,12 +629,12 @@ mod tests { "contents": "Hello, Bob!" } }"#; - let typed_data = from_str::(string).expect("alas error!"); - let hash = hash_structured_data(typed_data.clone()).expect("alas error!"); + let typed_data = from_str::(string).expect("alas error!"); + let hash = hash_structured_data(typed_data.clone()).expect("alas error!"); - assert_eq!( - &format!("{:x}", hash)[..], - "cd8b34cd09c541cfc0a2fcd147e47809b98b335649c2aa700db0b0c4501a02a0", - ); - } + assert_eq!( + &format!("{:x}", hash)[..], + "cd8b34cd09c541cfc0a2fcd147e47809b98b335649c2aa700db0b0c4501a02a0", + ); + } } diff --git a/util/EIP-712/src/error.rs b/util/EIP-712/src/error.rs index 0eaa8e7bd..0ddfee6c9 100644 --- a/util/EIP-712/src/error.rs +++ b/util/EIP-712/src/error.rs @@ -14,112 +14,120 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use failure::{Backtrace, Context, Fail}; use std::fmt::{self, Display}; -use failure::{Fail, Context, Backtrace}; -use validator::ValidationErrors; -use validator::ValidationErrorsKind; +use validator::{ValidationErrors, ValidationErrorsKind}; pub(crate) type Result = ::std::result::Result; /// Error type #[derive(Debug)] pub struct Error { - inner: Context, + inner: Context, } /// Possible errors encountered while hashing/encoding an EIP-712 compliant data structure #[derive(Clone, Fail, Debug, PartialEq)] pub enum ErrorKind { - /// if we fail to deserialize from a serde::Value as a type specified in message types - /// fail with this error. - #[fail(display = "Expected type '{}' for field '{}'", _0, _1)] - UnexpectedType(String, String), - /// the primary type supplied doesn't exist in the MessageTypes - #[fail(display = "The given primaryType wasn't found in the types field")] - NonExistentType, - /// an invalid address was encountered during encoding - #[fail(display = "Address string should be a 0x-prefixed 40 character string, got '{}'", _0)] - InvalidAddressLength(usize), - /// a hex parse error occured - #[fail(display = "Failed to parse hex '{}'", _0)] - HexParseError(String), - /// the field was declared with a unknown type - #[fail(display = "The field '{}' has an unknown type '{}'", _0, _1)] - UnknownType(String, String), - /// Unexpected token - #[fail(display = "Unexpected token '{}' while parsing typename '{}'", _0, _1)] - UnexpectedToken(String, String), - /// the user has attempted to define a typed array with a depth > 10 - #[fail(display = "Maximum depth for nested arrays is 10")] - UnsupportedArrayDepth, - /// FieldType validation error - #[fail(display = "{}", _0)] - ValidationError(String), - /// the typed array defined in message types was declared with a fixed length - /// that is of unequal length with the items to be encoded - #[fail(display = "Expected {} items for array type {}, got {} items", _0, _1, _2)] - UnequalArrayItems(u64, String, u64), - /// Typed array length doesn't fit into a u64 - #[fail(display = "Attempted to declare fixed size with length {}", _0)] - InvalidArraySize(String) + /// if we fail to deserialize from a serde::Value as a type specified in message types + /// fail with this error. + #[fail(display = "Expected type '{}' for field '{}'", _0, _1)] + UnexpectedType(String, String), + /// the primary type supplied doesn't exist in the MessageTypes + #[fail(display = "The given primaryType wasn't found in the types field")] + NonExistentType, + /// an invalid address was encountered during encoding + #[fail( + display = "Address string should be a 0x-prefixed 40 character string, got '{}'", + _0 + )] + InvalidAddressLength(usize), + /// a hex parse error occured + #[fail(display = "Failed to parse hex '{}'", _0)] + HexParseError(String), + /// the field was declared with a unknown type + #[fail(display = "The field '{}' has an unknown type '{}'", _0, _1)] + UnknownType(String, String), + /// Unexpected token + #[fail(display = "Unexpected token '{}' while parsing typename '{}'", _0, _1)] + UnexpectedToken(String, String), + /// the user has attempted to define a typed array with a depth > 10 + #[fail(display = "Maximum depth for nested arrays is 10")] + UnsupportedArrayDepth, + /// FieldType validation error + #[fail(display = "{}", _0)] + ValidationError(String), + /// the typed array defined in message types was declared with a fixed length + /// that is of unequal length with the items to be encoded + #[fail( + display = "Expected {} items for array type {}, got {} items", + _0, _1, _2 + )] + UnequalArrayItems(u64, String, u64), + /// Typed array length doesn't fit into a u64 + #[fail(display = "Attempted to declare fixed size with length {}", _0)] + InvalidArraySize(String), } pub(crate) fn serde_error(expected: &str, field: Option<&str>) -> ErrorKind { - ErrorKind::UnexpectedType(expected.to_owned(), field.unwrap_or("").to_owned()) + ErrorKind::UnexpectedType(expected.to_owned(), field.unwrap_or("").to_owned()) } impl Fail for Error { - fn cause(&self) -> Option<&dyn Fail> { - self.inner.cause() - } + fn cause(&self) -> Option<&dyn Fail> { + self.inner.cause() + } - fn backtrace(&self) -> Option<&Backtrace> { - self.inner.backtrace() - } + fn backtrace(&self) -> Option<&Backtrace> { + self.inner.backtrace() + } } impl Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Display::fmt(&self.inner, f) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Display::fmt(&self.inner, f) + } } impl Error { - /// extract the error kind - pub fn kind(&self) -> ErrorKind { - self.inner.get_context().clone() - } + /// extract the error kind + pub fn kind(&self) -> ErrorKind { + self.inner.get_context().clone() + } } impl From for Error { - fn from(kind: ErrorKind) -> Error { - Error { inner: Context::new(kind) } - } + fn from(kind: ErrorKind) -> Error { + Error { + inner: Context::new(kind), + } + } } impl From> for Error { - fn from(inner: Context) -> Error { - Error { inner } - } + fn from(inner: Context) -> Error { + Error { inner } + } } impl From for Error { - fn from(error: ValidationErrors) -> Self { - let mut string: String = "".into(); - for (field_name, error_kind) in error.errors() { - match error_kind { - ValidationErrorsKind::Field(validation_errors) => { - for error in validation_errors { - let str_error = format!( - "the field '{}', has an invalid value {}", - field_name, - error.params["value"] - ); - string.push_str(&str_error); - } - }, - _ => unreachable!("#[validate] is only used on fields for regex;\ - its impossible to get any other ErrorKind; qed") - } - } - ErrorKind::ValidationError(string).into() - } + fn from(error: ValidationErrors) -> Self { + let mut string: String = "".into(); + for (field_name, error_kind) in error.errors() { + match error_kind { + ValidationErrorsKind::Field(validation_errors) => { + for error in validation_errors { + let str_error = format!( + "the field '{}', has an invalid value {}", + field_name, error.params["value"] + ); + string.push_str(&str_error); + } + } + _ => unreachable!( + "#[validate] is only used on fields for regex;\ + its impossible to get any other ErrorKind; qed" + ), + } + } + ErrorKind::ValidationError(string).into() + } } diff --git a/util/EIP-712/src/lib.rs b/util/EIP-712/src/lib.rs index 26ce9615c..c5b57a9d2 100644 --- a/util/EIP-712/src/lib.rs +++ b/util/EIP-712/src/lib.rs @@ -164,13 +164,13 @@ extern crate validator_derive; extern crate serde_derive; mod eip712; +mod encode; mod error; mod parser; -mod encode; +/// EIP712 struct +pub use crate::eip712::EIP712; /// the EIP-712 encoding function pub use crate::encode::hash_structured_data; /// encoding Error types -pub use crate::error::{ErrorKind, Error}; -/// EIP712 struct -pub use crate::eip712::EIP712; +pub use crate::error::{Error, ErrorKind}; diff --git a/util/EIP-712/src/parser.rs b/util/EIP-712/src/parser.rs index 21415d11d..378ace0f7 100644 --- a/util/EIP-712/src/parser.rs +++ b/util/EIP-712/src/parser.rs @@ -15,143 +15,149 @@ // along with Parity Ethereum. If not, see . //! Solidity type-name parsing -use lunarity_lexer::{Lexer, Token}; use crate::error::*; +use lunarity_lexer::{Lexer, Token}; use std::{fmt, result}; #[derive(Debug, Clone, PartialEq)] pub enum Type { - Address, - Uint, - Int, - String, - Bool, - Bytes, - Byte(u8), - Custom(String), - Array { - length: Option, - inner: Box - } + Address, + Uint, + Int, + String, + Bool, + Bytes, + Byte(u8), + Custom(String), + Array { + length: Option, + inner: Box, + }, } impl From for String { - fn from(field_type: Type) -> String { - match field_type { - Type::Address => "address".into(), - Type::Uint => "uint".into(), - Type::Int => "int".into(), - Type::String => "string".into(), - Type::Bool => "bool".into(), - Type::Bytes => "bytes".into(), - Type::Byte(len) => format!("bytes{}", len), - Type::Custom(custom) => custom, - Type::Array { - inner, - length - } => { - let inner: String = (*inner).into(); - match length { - None => format!("{}[]", inner), - Some(length) => format!("{}[{}]", inner, length) - } - } - } - } + fn from(field_type: Type) -> String { + match field_type { + Type::Address => "address".into(), + Type::Uint => "uint".into(), + Type::Int => "int".into(), + Type::String => "string".into(), + Type::Bool => "bool".into(), + Type::Bytes => "bytes".into(), + Type::Byte(len) => format!("bytes{}", len), + Type::Custom(custom) => custom, + Type::Array { inner, length } => { + let inner: String = (*inner).into(); + match length { + None => format!("{}[]", inner), + Some(length) => format!("{}[{}]", inner, length), + } + } + } + } } impl fmt::Display for Type { - fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { - let item: String = self.clone().into(); - write!(f, "{}", item) - } + fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { + let item: String = self.clone().into(); + write!(f, "{}", item) + } } - /// the type string is being validated before it's parsed. pub fn parse_type(field_type: &str) -> Result { - #[derive(PartialEq)] - enum State { Open, Close } + #[derive(PartialEq)] + enum State { + Open, + Close, + } - let mut lexer = Lexer::new(field_type); - let mut token = None; - let mut state = State::Close; - let mut array_depth = 0; - let mut current_array_length: Option = None; + let mut lexer = Lexer::new(field_type); + let mut token = None; + let mut state = State::Close; + let mut array_depth = 0; + let mut current_array_length: Option = None; - while lexer.token != Token::EndOfProgram { - let type_ = match lexer.token { - Token::Identifier => Type::Custom(lexer.slice().to_owned()), - Token::TypeByte => Type::Byte(lexer.extras.0), - Token::TypeBytes => Type::Bytes, - Token::TypeBool => Type::Bool, - Token::TypeUint => Type::Uint, - Token::TypeInt => Type::Int, - Token::TypeString => Type::String, - Token::TypeAddress => Type::Address, - Token::LiteralInteger => { - let length = lexer.slice(); - current_array_length = Some(length - .parse() - .map_err(|_| - ErrorKind::InvalidArraySize(length.into()) - )? - ); - lexer.advance(); - continue; - } - Token::BracketOpen if token.is_some() && state == State::Close => { - state = State::Open; - lexer.advance(); - continue; - } - Token::BracketClose if array_depth < 10 => { - if state == State::Open && token.is_some() { - let length = current_array_length.take(); - state = State::Close; - token = Some(Type::Array { - inner: Box::new(token.expect("if statement checks for some; qed")), - length, - }); - lexer.advance(); - array_depth += 1; - continue; - } else { - return Err(ErrorKind::UnexpectedToken(lexer.slice().to_owned(), field_type.to_owned()))?; - } - } - Token::BracketClose if array_depth == 10 => { - return Err(ErrorKind::UnsupportedArrayDepth)?; - } - _ => return Err(ErrorKind::UnexpectedToken(lexer.slice().to_owned(), field_type.to_owned()))? - }; + while lexer.token != Token::EndOfProgram { + let type_ = match lexer.token { + Token::Identifier => Type::Custom(lexer.slice().to_owned()), + Token::TypeByte => Type::Byte(lexer.extras.0), + Token::TypeBytes => Type::Bytes, + Token::TypeBool => Type::Bool, + Token::TypeUint => Type::Uint, + Token::TypeInt => Type::Int, + Token::TypeString => Type::String, + Token::TypeAddress => Type::Address, + Token::LiteralInteger => { + let length = lexer.slice(); + current_array_length = Some( + length + .parse() + .map_err(|_| ErrorKind::InvalidArraySize(length.into()))?, + ); + lexer.advance(); + continue; + } + Token::BracketOpen if token.is_some() && state == State::Close => { + state = State::Open; + lexer.advance(); + continue; + } + Token::BracketClose if array_depth < 10 => { + if state == State::Open && token.is_some() { + let length = current_array_length.take(); + state = State::Close; + token = Some(Type::Array { + inner: Box::new(token.expect("if statement checks for some; qed")), + length, + }); + lexer.advance(); + array_depth += 1; + continue; + } else { + return Err(ErrorKind::UnexpectedToken( + lexer.slice().to_owned(), + field_type.to_owned(), + ))?; + } + } + Token::BracketClose if array_depth == 10 => { + return Err(ErrorKind::UnsupportedArrayDepth)?; + } + _ => { + return Err(ErrorKind::UnexpectedToken( + lexer.slice().to_owned(), + field_type.to_owned(), + ))? + } + }; - token = Some(type_); - lexer.advance(); - } + token = Some(type_); + lexer.advance(); + } - Ok(token.ok_or(ErrorKind::NonExistentType)?) + Ok(token.ok_or(ErrorKind::NonExistentType)?) } #[cfg(test)] mod tests { - use super::*; + use super::*; - #[test] - fn test_parser() { - let source = "byte[][][7][][][][][][][]"; - parse_type(source).unwrap(); - } + #[test] + fn test_parser() { + let source = "byte[][][7][][][][][][][]"; + parse_type(source).unwrap(); + } - #[test] - fn test_nested_array() { - let source = "byte[][][7][][][][][][][][]"; - assert_eq!(parse_type(source).is_err(), true); - } + #[test] + fn test_nested_array() { + let source = "byte[][][7][][][][][][][][]"; + assert_eq!(parse_type(source).is_err(), true); + } - #[test] - fn test_malformed_array_type() { - let source = "byte[7[]uint][]"; - assert_eq!(parse_type(source).is_err(), true) - } + #[test] + fn test_malformed_array_type() { + let source = "byte[7[]uint][]"; + assert_eq!(parse_type(source).is_err(), true) + } } diff --git a/util/bloom/src/lib.rs b/util/bloom/src/lib.rs index 3dec641cd..af61e1196 100644 --- a/util/bloom/src/lib.rs +++ b/util/bloom/src/lib.rs @@ -16,12 +16,14 @@ extern crate siphasher; -use std::cmp; -use std::mem; -use std::f64; -use std::hash::{Hash, Hasher}; -use std::collections::HashSet; use siphasher::sip::SipHasher; +use std::{ + cmp, + collections::HashSet, + f64, + hash::{Hash, Hasher}, + mem, +}; /// BitVec structure with journalling /// Every time any of the blocks is getting set it's index is tracked @@ -32,171 +34,179 @@ struct BitVecJournal { } impl BitVecJournal { - pub fn new(size: usize) -> BitVecJournal { - let extra = if size % 64 > 0 { 1 } else { 0 }; - BitVecJournal { - elems: vec![0u64; size / 64 + extra], - journal: HashSet::new(), - } - } + pub fn new(size: usize) -> BitVecJournal { + let extra = if size % 64 > 0 { 1 } else { 0 }; + BitVecJournal { + elems: vec![0u64; size / 64 + extra], + journal: HashSet::new(), + } + } - pub fn from_parts(parts: &[u64]) -> BitVecJournal { - BitVecJournal { - elems: parts.to_vec(), - journal: HashSet::new(), - } - } + pub fn from_parts(parts: &[u64]) -> BitVecJournal { + BitVecJournal { + elems: parts.to_vec(), + journal: HashSet::new(), + } + } - pub fn set(&mut self, index: usize) { - let e_index = index / 64; - let bit_index = index % 64; - let val = self.elems.get_mut(e_index).unwrap(); - *val |= 1u64 << bit_index; - self.journal.insert(e_index); - } + pub fn set(&mut self, index: usize) { + let e_index = index / 64; + let bit_index = index % 64; + let val = self.elems.get_mut(e_index).unwrap(); + *val |= 1u64 << bit_index; + self.journal.insert(e_index); + } - pub fn get(&self, index: usize) -> bool { - let e_index = index / 64; - let bit_index = index % 64; - self.elems[e_index] & (1 << bit_index) != 0 - } + pub fn get(&self, index: usize) -> bool { + let e_index = index / 64; + let bit_index = index % 64; + self.elems[e_index] & (1 << bit_index) != 0 + } - pub fn drain(&mut self) -> Vec<(usize, u64)> { - let journal = mem::replace(&mut self.journal, HashSet::new()).into_iter(); - journal.map(|idx| (idx, self.elems[idx])).collect::>() - } + pub fn drain(&mut self) -> Vec<(usize, u64)> { + let journal = mem::replace(&mut self.journal, HashSet::new()).into_iter(); + journal + .map(|idx| (idx, self.elems[idx])) + .collect::>() + } - pub fn saturation(&self) -> f64 { - self.elems.iter().fold(0u64, |acc, e| acc + e.count_ones() as u64) as f64 / (self.elems.len() * 64) as f64 - } + pub fn saturation(&self) -> f64 { + self.elems + .iter() + .fold(0u64, |acc, e| acc + e.count_ones() as u64) as f64 + / (self.elems.len() * 64) as f64 + } } /// Bloom filter structure pub struct Bloom { - bitmap: BitVecJournal, - bitmap_bits: u64, - k_num: u32, + bitmap: BitVecJournal, + bitmap_bits: u64, + k_num: u32, } impl Bloom { - /// Create a new bloom filter structure. - /// bitmap_size is the size in bytes (not bits) that will be allocated in memory - /// items_count is an estimation of the maximum number of items to store. - pub fn new(bitmap_size: usize, items_count: usize) -> Bloom { - assert!(bitmap_size > 0 && items_count > 0); - let bitmap_bits = (bitmap_size as u64) * 8u64; - let k_num = Bloom::optimal_k_num(bitmap_bits, items_count); - let bitmap = BitVecJournal::new(bitmap_bits as usize); - Bloom { - bitmap: bitmap, - bitmap_bits: bitmap_bits, - k_num: k_num, - } - } + /// Create a new bloom filter structure. + /// bitmap_size is the size in bytes (not bits) that will be allocated in memory + /// items_count is an estimation of the maximum number of items to store. + pub fn new(bitmap_size: usize, items_count: usize) -> Bloom { + assert!(bitmap_size > 0 && items_count > 0); + let bitmap_bits = (bitmap_size as u64) * 8u64; + let k_num = Bloom::optimal_k_num(bitmap_bits, items_count); + let bitmap = BitVecJournal::new(bitmap_bits as usize); + Bloom { + bitmap: bitmap, + bitmap_bits: bitmap_bits, + k_num: k_num, + } + } - /// Initializes bloom filter from saved state - pub fn from_parts(parts: &[u64], k_num: u32) -> Bloom { - let bitmap_size = parts.len() * 8; - let bitmap_bits = (bitmap_size as u64) * 8u64; - let bitmap = BitVecJournal::from_parts(parts); - Bloom { - bitmap: bitmap, - bitmap_bits: bitmap_bits, - k_num: k_num, - } - } + /// Initializes bloom filter from saved state + pub fn from_parts(parts: &[u64], k_num: u32) -> Bloom { + let bitmap_size = parts.len() * 8; + let bitmap_bits = (bitmap_size as u64) * 8u64; + let bitmap = BitVecJournal::from_parts(parts); + Bloom { + bitmap: bitmap, + bitmap_bits: bitmap_bits, + k_num: k_num, + } + } - /// Create a new bloom filter structure. - /// items_count is an estimation of the maximum number of items to store. - /// fp_p is the wanted rate of false positives, in ]0.0, 1.0[ - pub fn new_for_fp_rate(items_count: usize, fp_p: f64) -> Bloom { - let bitmap_size = Bloom::compute_bitmap_size(items_count, fp_p); - Bloom::new(bitmap_size, items_count) - } + /// Create a new bloom filter structure. + /// items_count is an estimation of the maximum number of items to store. + /// fp_p is the wanted rate of false positives, in ]0.0, 1.0[ + pub fn new_for_fp_rate(items_count: usize, fp_p: f64) -> Bloom { + let bitmap_size = Bloom::compute_bitmap_size(items_count, fp_p); + Bloom::new(bitmap_size, items_count) + } - /// Compute a recommended bitmap size for items_count items - /// and a fp_p rate of false positives. - /// fp_p obviously has to be within the ]0.0, 1.0[ range. - pub fn compute_bitmap_size(items_count: usize, fp_p: f64) -> usize { - assert!(items_count > 0); - assert!(fp_p > 0.0 && fp_p < 1.0); - let log2 = f64::consts::LN_2; - let log2_2 = log2 * log2; - ((items_count as f64) * f64::ln(fp_p) / (-8.0 * log2_2)).ceil() as usize - } + /// Compute a recommended bitmap size for items_count items + /// and a fp_p rate of false positives. + /// fp_p obviously has to be within the ]0.0, 1.0[ range. + pub fn compute_bitmap_size(items_count: usize, fp_p: f64) -> usize { + assert!(items_count > 0); + assert!(fp_p > 0.0 && fp_p < 1.0); + let log2 = f64::consts::LN_2; + let log2_2 = log2 * log2; + ((items_count as f64) * f64::ln(fp_p) / (-8.0 * log2_2)).ceil() as usize + } - /// Records the presence of an item. - pub fn set(&mut self, item: T) - where T: Hash - { - let base_hash = Bloom::sip_hash(&item); - for k_i in 0..self.k_num { - let bit_offset = (Bloom::bloom_hash(base_hash, k_i) % self.bitmap_bits) as usize; - self.bitmap.set(bit_offset); - } - } + /// Records the presence of an item. + pub fn set(&mut self, item: T) + where + T: Hash, + { + let base_hash = Bloom::sip_hash(&item); + for k_i in 0..self.k_num { + let bit_offset = (Bloom::bloom_hash(base_hash, k_i) % self.bitmap_bits) as usize; + self.bitmap.set(bit_offset); + } + } - /// Check if an item is present in the set. - /// There can be false positives, but no false negatives. - pub fn check(&self, item: T) -> bool - where T: Hash - { - let base_hash = Bloom::sip_hash(&item); - for k_i in 0..self.k_num { - let bit_offset = (Bloom::bloom_hash(base_hash, k_i) % self.bitmap_bits) as usize; - if !self.bitmap.get(bit_offset) { - return false; - } - } - true - } + /// Check if an item is present in the set. + /// There can be false positives, but no false negatives. + pub fn check(&self, item: T) -> bool + where + T: Hash, + { + let base_hash = Bloom::sip_hash(&item); + for k_i in 0..self.k_num { + let bit_offset = (Bloom::bloom_hash(base_hash, k_i) % self.bitmap_bits) as usize; + if !self.bitmap.get(bit_offset) { + return false; + } + } + true + } - /// Return the number of bits in the filter - pub fn number_of_bits(&self) -> u64 { - self.bitmap_bits - } + /// Return the number of bits in the filter + pub fn number_of_bits(&self) -> u64 { + self.bitmap_bits + } - /// Return the number of hash functions used for `check` and `set` - pub fn number_of_hash_functions(&self) -> u32 { - self.k_num - } + /// Return the number of hash functions used for `check` and `set` + pub fn number_of_hash_functions(&self) -> u32 { + self.k_num + } - fn optimal_k_num(bitmap_bits: u64, items_count: usize) -> u32 { - let m = bitmap_bits as f64; - let n = items_count as f64; - let k_num = (m / n * f64::ln(2.0f64)).ceil() as u32; - cmp::max(k_num, 1) - } + fn optimal_k_num(bitmap_bits: u64, items_count: usize) -> u32 { + let m = bitmap_bits as f64; + let n = items_count as f64; + let k_num = (m / n * f64::ln(2.0f64)).ceil() as u32; + cmp::max(k_num, 1) + } - fn sip_hash(item: &T) -> u64 - where T: Hash - { - let mut sip = SipHasher::new(); - item.hash(&mut sip); - let hash = sip.finish(); - hash - } + fn sip_hash(item: &T) -> u64 + where + T: Hash, + { + let mut sip = SipHasher::new(); + item.hash(&mut sip); + let hash = sip.finish(); + hash + } - fn bloom_hash(base_hash: u64, k_i: u32) -> u64 { - if k_i < 2 { - base_hash - } else { - base_hash.wrapping_add((k_i as u64).wrapping_mul(base_hash) % 0xffffffffffffffc5) - } - } + fn bloom_hash(base_hash: u64, k_i: u32) -> u64 { + if k_i < 2 { + base_hash + } else { + base_hash.wrapping_add((k_i as u64).wrapping_mul(base_hash) % 0xffffffffffffffc5) + } + } - /// Drains the bloom journal returning the updated bloom part - pub fn drain_journal(&mut self) -> BloomJournal { - BloomJournal { - entries: self.bitmap.drain(), - hash_functions: self.k_num, - } - } + /// Drains the bloom journal returning the updated bloom part + pub fn drain_journal(&mut self) -> BloomJournal { + BloomJournal { + entries: self.bitmap.drain(), + hash_functions: self.k_num, + } + } - /// Returns the ratio of set bits in the bloom filter to the total bits - pub fn saturation(&self) -> f64 { - self.bitmap.saturation() - } + /// Returns the ratio of set bits in the bloom filter to the total bits + pub fn saturation(&self) -> f64 { + self.bitmap.saturation() + } } /// Bloom journal @@ -209,67 +219,90 @@ pub struct BloomJournal { #[cfg(test)] mod tests { - use super::Bloom; - use std::collections::HashSet; + use super::Bloom; + use std::collections::HashSet; - #[test] - fn get_set() { - let mut bloom = Bloom::new(10, 80); - let key = vec![115u8, 99]; - assert!(!bloom.check(&key)); - bloom.set(&key); - assert!(bloom.check(&key)); - } + #[test] + fn get_set() { + let mut bloom = Bloom::new(10, 80); + let key = vec![115u8, 99]; + assert!(!bloom.check(&key)); + bloom.set(&key); + assert!(bloom.check(&key)); + } - #[test] - fn journalling() { - let initial = vec![0u64; 8]; - let mut bloom = Bloom::from_parts(&initial, 3); - bloom.set(&vec![5u8, 4]); - let drain = bloom.drain_journal(); + #[test] + fn journalling() { + let initial = vec![0u64; 8]; + let mut bloom = Bloom::from_parts(&initial, 3); + bloom.set(&vec![5u8, 4]); + let drain = bloom.drain_journal(); - assert_eq!(2, drain.entries.len()) - } + assert_eq!(2, drain.entries.len()) + } - #[test] - fn saturation() { - let initial = vec![0u64; 8]; - let mut bloom = Bloom::from_parts(&initial, 3); - bloom.set(&vec![5u8, 4]); + #[test] + fn saturation() { + let initial = vec![0u64; 8]; + let mut bloom = Bloom::from_parts(&initial, 3); + bloom.set(&vec![5u8, 4]); - let full = bloom.saturation(); - // 2/8/64 = 0.00390625 - assert!(full >= 0.0039f64 && full <= 0.004f64); - } + let full = bloom.saturation(); + // 2/8/64 = 0.00390625 + assert!(full >= 0.0039f64 && full <= 0.004f64); + } - #[test] - fn hash_backward_compatibility_for_new() { - let ss = vec!["you", "should", "not", "break", "hash", "backward", "compatibility"]; - let mut bloom = Bloom::new(16, 8); - for s in ss.iter() { - bloom.set(&s); - } + #[test] + fn hash_backward_compatibility_for_new() { + let ss = vec![ + "you", + "should", + "not", + "break", + "hash", + "backward", + "compatibility", + ]; + let mut bloom = Bloom::new(16, 8); + for s in ss.iter() { + bloom.set(&s); + } - let drained_elems: HashSet = bloom.drain_journal().entries.into_iter().map(|t| t.1).collect(); - let expected: HashSet = [2094615114573771027u64, 244675582389208413u64].iter().cloned().collect(); - assert_eq!(drained_elems, expected); - assert_eq!(bloom.k_num, 12); - } + let drained_elems: HashSet = bloom + .drain_journal() + .entries + .into_iter() + .map(|t| t.1) + .collect(); + let expected: HashSet = [2094615114573771027u64, 244675582389208413u64] + .iter() + .cloned() + .collect(); + assert_eq!(drained_elems, expected); + assert_eq!(bloom.k_num, 12); + } - #[test] - fn hash_backward_compatibility_for_from_parts() { - let stored_state = vec![2094615114573771027u64, 244675582389208413u64]; - let k_num = 12; - let bloom = Bloom::from_parts(&stored_state, k_num); + #[test] + fn hash_backward_compatibility_for_from_parts() { + let stored_state = vec![2094615114573771027u64, 244675582389208413u64]; + let k_num = 12; + let bloom = Bloom::from_parts(&stored_state, k_num); - let ss = vec!["you", "should", "not", "break", "hash", "backward", "compatibility"]; - let tt = vec!["this", "doesnot", "exist"]; - for s in ss.iter() { - assert!(bloom.check(&s)); - } - for s in tt.iter() { - assert!(!bloom.check(&s)); - } - - } + let ss = vec![ + "you", + "should", + "not", + "break", + "hash", + "backward", + "compatibility", + ]; + let tt = vec!["this", "doesnot", "exist"]; + for s in ss.iter() { + assert!(bloom.check(&s)); + } + for s in tt.iter() { + assert!(!bloom.check(&s)); + } + } } diff --git a/util/blooms-db/benches/blooms.rs b/util/blooms-db/benches/blooms.rs index 190caca99..d7a5c0c77 100644 --- a/util/blooms-db/benches/blooms.rs +++ b/util/blooms-db/benches/blooms.rs @@ -16,76 +16,84 @@ #[macro_use] extern crate criterion; -extern crate tempdir; extern crate blooms_db; extern crate ethbloom; +extern crate tempdir; -use std::iter; -use criterion::Criterion; -use tempdir::TempDir; use blooms_db::Database; +use criterion::Criterion; use ethbloom::Bloom; +use std::iter; +use tempdir::TempDir; criterion_group!( - blooms, - bench_blooms_filter_1_million_ok, - bench_blooms_filter_1_million_miss, - bench_blooms_filter_1_million_miss_and_ok, + blooms, + bench_blooms_filter_1_million_ok, + bench_blooms_filter_1_million_miss, + bench_blooms_filter_1_million_miss_and_ok, ); criterion_main!(blooms); fn bench_blooms_filter_1_million_ok(c: &mut Criterion) { - let tempdir = TempDir::new("").unwrap(); - let database = Database::open(tempdir.path()).unwrap(); - database.insert_blooms(999_999, iter::once(&Bloom::zero())).unwrap(); - let bloom = Bloom::from(0x001); - database.insert_blooms(200_000, iter::once(&bloom)).unwrap(); - database.insert_blooms(400_000, iter::once(&bloom)).unwrap(); - database.insert_blooms(600_000, iter::once(&bloom)).unwrap(); - database.insert_blooms(800_000, iter::once(&bloom)).unwrap(); + let tempdir = TempDir::new("").unwrap(); + let database = Database::open(tempdir.path()).unwrap(); + database + .insert_blooms(999_999, iter::once(&Bloom::zero())) + .unwrap(); + let bloom = Bloom::from(0x001); + database.insert_blooms(200_000, iter::once(&bloom)).unwrap(); + database.insert_blooms(400_000, iter::once(&bloom)).unwrap(); + database.insert_blooms(600_000, iter::once(&bloom)).unwrap(); + database.insert_blooms(800_000, iter::once(&bloom)).unwrap(); - c.bench_function("blooms_filter_1_million_ok", move |b| { - b.iter(|| { - let matches = database.filter(0, 999_999, Some(&bloom)).unwrap(); - assert_eq!(matches, vec![200_000, 400_000, 600_000, 800_000]); - }) - }); + c.bench_function("blooms_filter_1_million_ok", move |b| { + b.iter(|| { + let matches = database.filter(0, 999_999, Some(&bloom)).unwrap(); + assert_eq!(matches, vec![200_000, 400_000, 600_000, 800_000]); + }) + }); } fn bench_blooms_filter_1_million_miss(c: &mut Criterion) { - let tempdir = TempDir::new("").unwrap(); - let database = Database::open(tempdir.path()).unwrap(); - database.insert_blooms(999_999, iter::once(&Bloom::zero())).unwrap(); - let bloom = Bloom::from(0x001); - let bad_bloom = Bloom::from(0x0001); - database.insert_blooms(200_000, iter::once(&bloom)).unwrap(); - database.insert_blooms(400_000, iter::once(&bloom)).unwrap(); - database.insert_blooms(600_000, iter::once(&bloom)).unwrap(); - database.insert_blooms(800_000, iter::once(&bloom)).unwrap(); + let tempdir = TempDir::new("").unwrap(); + let database = Database::open(tempdir.path()).unwrap(); + database + .insert_blooms(999_999, iter::once(&Bloom::zero())) + .unwrap(); + let bloom = Bloom::from(0x001); + let bad_bloom = Bloom::from(0x0001); + database.insert_blooms(200_000, iter::once(&bloom)).unwrap(); + database.insert_blooms(400_000, iter::once(&bloom)).unwrap(); + database.insert_blooms(600_000, iter::once(&bloom)).unwrap(); + database.insert_blooms(800_000, iter::once(&bloom)).unwrap(); - c.bench_function("blooms_filter_1_million_miss", move |b| { - b.iter(|| { - let matches = database.filter(0, 999_999, Some(&bad_bloom)).unwrap(); - assert_eq!(matches, vec![200_000, 400_000, 600_000, 800_000]); - }) - }); + c.bench_function("blooms_filter_1_million_miss", move |b| { + b.iter(|| { + let matches = database.filter(0, 999_999, Some(&bad_bloom)).unwrap(); + assert_eq!(matches, vec![200_000, 400_000, 600_000, 800_000]); + }) + }); } fn bench_blooms_filter_1_million_miss_and_ok(c: &mut Criterion) { - let tempdir = TempDir::new("").unwrap(); - let database = Database::open(tempdir.path()).unwrap(); - database.insert_blooms(999_999, iter::once(&Bloom::zero())).unwrap(); - let bloom = Bloom::from(0x001); - let bad_bloom = Bloom::from(0x0001); - database.insert_blooms(200_000, iter::once(&bloom)).unwrap(); - database.insert_blooms(400_000, iter::once(&bloom)).unwrap(); - database.insert_blooms(600_000, iter::once(&bloom)).unwrap(); - database.insert_blooms(800_000, iter::once(&bloom)).unwrap(); + let tempdir = TempDir::new("").unwrap(); + let database = Database::open(tempdir.path()).unwrap(); + database + .insert_blooms(999_999, iter::once(&Bloom::zero())) + .unwrap(); + let bloom = Bloom::from(0x001); + let bad_bloom = Bloom::from(0x0001); + database.insert_blooms(200_000, iter::once(&bloom)).unwrap(); + database.insert_blooms(400_000, iter::once(&bloom)).unwrap(); + database.insert_blooms(600_000, iter::once(&bloom)).unwrap(); + database.insert_blooms(800_000, iter::once(&bloom)).unwrap(); - c.bench_function("blooms_filter_1_million_miss_and_ok", move |b| { - b.iter(|| { - let matches = database.filter(0, 999_999, &vec![bad_bloom, bloom]).unwrap(); - assert_eq!(matches, vec![200_000, 400_000, 600_000, 800_000]); - }) - }); + c.bench_function("blooms_filter_1_million_miss_and_ok", move |b| { + b.iter(|| { + let matches = database + .filter(0, 999_999, &vec![bad_bloom, bloom]) + .unwrap(); + assert_eq!(matches, vec![200_000, 400_000, 600_000, 800_000]); + }) + }); } diff --git a/util/blooms-db/src/db.rs b/util/blooms-db/src/db.rs index 2a05ec812..fa0890bab 100644 --- a/util/blooms-db/src/db.rs +++ b/util/blooms-db/src/db.rs @@ -14,356 +14,474 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::{error, io, fmt}; -use std::path::{Path, PathBuf}; +use std::{ + error, fmt, io, + path::{Path, PathBuf}, +}; use ethbloom; use file::{File, FileIterator}; -fn other_io_err(e: E) -> io::Error where E: Into> { - io::Error::new(io::ErrorKind::Other, e) +fn other_io_err(e: E) -> io::Error +where + E: Into>, +{ + io::Error::new(io::ErrorKind::Other, e) } /// Bloom positions in database files. #[derive(Debug)] struct Positions { - top: u64, - mid: u64, - bot: u64 + top: u64, + mid: u64, + bot: u64, } impl Positions { - fn from_index(index: u64) -> Self { - Positions { - top: index >> 8, - mid: index >> 4, - bot: index, - } - } + fn from_index(index: u64) -> Self { + Positions { + top: index >> 8, + mid: index >> 4, + bot: index, + } + } } struct DatabaseFilesIterator<'a> { - pub top: FileIterator<'a>, - pub mid: FileIterator<'a>, - pub bot: FileIterator<'a>, + pub top: FileIterator<'a>, + pub mid: FileIterator<'a>, + pub bot: FileIterator<'a>, } /// Blooms database files. struct DatabaseFiles { - /// Top level bloom file - /// - /// Every bloom represents 16 blooms on mid level - top: File, - /// Mid level bloom file - /// - /// Every bloom represents 16 blooms on bot level - mid: File, - /// Bot level bloom file - /// - /// Every bloom is an ethereum header bloom - bot: File, + /// Top level bloom file + /// + /// Every bloom represents 16 blooms on mid level + top: File, + /// Mid level bloom file + /// + /// Every bloom represents 16 blooms on bot level + mid: File, + /// Bot level bloom file + /// + /// Every bloom is an ethereum header bloom + bot: File, } impl DatabaseFiles { - /// Open the blooms db files - pub fn open(path: &Path) -> io::Result { - Ok(DatabaseFiles { - top: File::open(path.join("top.bdb"))?, - mid: File::open(path.join("mid.bdb"))?, - bot: File::open(path.join("bot.bdb"))?, - }) - } + /// Open the blooms db files + pub fn open(path: &Path) -> io::Result { + Ok(DatabaseFiles { + top: File::open(path.join("top.bdb"))?, + mid: File::open(path.join("mid.bdb"))?, + bot: File::open(path.join("bot.bdb"))?, + }) + } - pub fn accrue_bloom(&mut self, pos: Positions, bloom: ethbloom::BloomRef) -> io::Result<()> { - self.top.accrue_bloom::(pos.top, bloom)?; - self.mid.accrue_bloom::(pos.mid, bloom)?; - self.bot.replace_bloom::(pos.bot, bloom)?; - Ok(()) - } + pub fn accrue_bloom(&mut self, pos: Positions, bloom: ethbloom::BloomRef) -> io::Result<()> { + self.top + .accrue_bloom::(pos.top, bloom)?; + self.mid + .accrue_bloom::(pos.mid, bloom)?; + self.bot + .replace_bloom::(pos.bot, bloom)?; + Ok(()) + } - pub fn iterator_from(&mut self, pos: Positions) -> io::Result { - Ok(DatabaseFilesIterator { - top: self.top.iterator_from(pos.top)?, - mid: self.mid.iterator_from(pos.mid)?, - bot: self.bot.iterator_from(pos.bot)?, - }) - } + pub fn iterator_from(&mut self, pos: Positions) -> io::Result { + Ok(DatabaseFilesIterator { + top: self.top.iterator_from(pos.top)?, + mid: self.mid.iterator_from(pos.mid)?, + bot: self.bot.iterator_from(pos.bot)?, + }) + } - fn flush(&mut self) -> io::Result<()> { - self.top.flush()?; - self.mid.flush()?; - self.bot.flush()?; - Ok(()) - } + fn flush(&mut self) -> io::Result<()> { + self.top.flush()?; + self.mid.flush()?; + self.bot.flush()?; + Ok(()) + } } impl Drop for DatabaseFiles { - /// Flush the database files on drop - fn drop(&mut self) { - self.flush().ok(); - } + /// Flush the database files on drop + fn drop(&mut self) { + self.flush().ok(); + } } /// Blooms database. pub struct Database { - /// Database files - db_files: Option, - /// Database path - path: PathBuf, + /// Database files + db_files: Option, + /// Database path + path: PathBuf, } impl Database { - /// Opens blooms database. - pub fn open

(path: P) -> io::Result where P: AsRef { - let path: PathBuf = path.as_ref().to_path_buf(); - let database = Database { - db_files: Some(DatabaseFiles::open(&path)?), - path: path, - }; + /// Opens blooms database. + pub fn open

(path: P) -> io::Result + where + P: AsRef, + { + let path: PathBuf = path.as_ref().to_path_buf(); + let database = Database { + db_files: Some(DatabaseFiles::open(&path)?), + path: path, + }; - Ok(database) - } + Ok(database) + } - /// Close the inner-files - pub fn close(&mut self) -> io::Result<()> { - self.db_files = None; - Ok(()) - } + /// Close the inner-files + pub fn close(&mut self) -> io::Result<()> { + self.db_files = None; + Ok(()) + } - /// Reopens the database at the same location. - pub fn reopen(&mut self) -> io::Result<()> { - self.db_files = Some(DatabaseFiles::open(&self.path)?); - Ok(()) - } + /// Reopens the database at the same location. + pub fn reopen(&mut self) -> io::Result<()> { + self.db_files = Some(DatabaseFiles::open(&self.path)?); + Ok(()) + } - /// Insert consecutive blooms into database starting at the given positon. - pub fn insert_blooms<'a, I, B>(&mut self, from: u64, blooms: I) -> io::Result<()> - where ethbloom::BloomRef<'a>: From, I: Iterator { - match self.db_files { - Some(ref mut db_files) => { - for (index, bloom) in (from..).into_iter().zip(blooms.map(Into::into)) { - let pos = Positions::from_index(index); + /// Insert consecutive blooms into database starting at the given positon. + pub fn insert_blooms<'a, I, B>(&mut self, from: u64, blooms: I) -> io::Result<()> + where + ethbloom::BloomRef<'a>: From, + I: Iterator, + { + match self.db_files { + Some(ref mut db_files) => { + for (index, bloom) in (from..).into_iter().zip(blooms.map(Into::into)) { + let pos = Positions::from_index(index); - // Constant forks may lead to increased ratio of false positives in bloom filters - // since we do not rebuild top or mid level, but we should not be worried about that - // because most of the time events at block n(a) occur also on block n(b) or n+1(b) - db_files.accrue_bloom(pos, bloom)?; - } - db_files.flush()?; - Ok(()) - }, - None => Err(other_io_err("Database is closed")), - } - } + // Constant forks may lead to increased ratio of false positives in bloom filters + // since we do not rebuild top or mid level, but we should not be worried about that + // because most of the time events at block n(a) occur also on block n(b) or n+1(b) + db_files.accrue_bloom(pos, bloom)?; + } + db_files.flush()?; + Ok(()) + } + None => Err(other_io_err("Database is closed")), + } + } - /// Returns an iterator yielding all indexes containing given bloom. - pub fn iterate_matching<'a, 'b, B, I, II>(&'a mut self, from: u64, to: u64, blooms: II) -> io::Result> - where ethbloom::BloomRef<'b>: From, 'b: 'a, II: IntoIterator + Copy, I: Iterator { - match self.db_files { - Some(ref mut db_files) => { - let index = from / 256 * 256; - let pos = Positions::from_index(index); - let files_iter = db_files.iterator_from(pos)?; + /// Returns an iterator yielding all indexes containing given bloom. + pub fn iterate_matching<'a, 'b, B, I, II>( + &'a mut self, + from: u64, + to: u64, + blooms: II, + ) -> io::Result> + where + ethbloom::BloomRef<'b>: From, + 'b: 'a, + II: IntoIterator + Copy, + I: Iterator, + { + match self.db_files { + Some(ref mut db_files) => { + let index = from / 256 * 256; + let pos = Positions::from_index(index); + let files_iter = db_files.iterator_from(pos)?; - let iter = DatabaseIterator { - top: files_iter.top, - mid: files_iter.mid, - bot: files_iter.bot, - state: IteratorState::Top, - from, - to, - index, - blooms, - }; + let iter = DatabaseIterator { + top: files_iter.top, + mid: files_iter.mid, + bot: files_iter.bot, + state: IteratorState::Top, + from, + to, + index, + blooms, + }; - Ok(iter) - }, - None => Err(other_io_err("Database is closed")), - } - } + Ok(iter) + } + None => Err(other_io_err("Database is closed")), + } + } } fn contains_any<'a, I, B>(bloom: ethbloom::Bloom, mut iterator: I) -> bool -where ethbloom::BloomRef<'a>: From, I: Iterator { - iterator.any(|item| bloom.contains_bloom(item)) +where + ethbloom::BloomRef<'a>: From, + I: Iterator, +{ + iterator.any(|item| bloom.contains_bloom(item)) } /// Blooms database iterator pub struct DatabaseIterator<'a, I> { - top: FileIterator<'a>, - mid: FileIterator<'a>, - bot: FileIterator<'a>, - state: IteratorState, - from: u64, - to: u64, - index: u64, - blooms: I, + top: FileIterator<'a>, + mid: FileIterator<'a>, + bot: FileIterator<'a>, + state: IteratorState, + from: u64, + to: u64, + index: u64, + blooms: I, } impl<'a, I> fmt::Debug for DatabaseIterator<'a, I> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("DatabaseIterator") - .field("state", &self.state) - .field("from", &self.from) - .field("to", &self.to) - .field("index", &self.index) - .field("blooms", &"...") - .field("top", &"...") - .field("mid", &"...") - .field("bot", &"...") - .finish() - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("DatabaseIterator") + .field("state", &self.state) + .field("from", &self.from) + .field("to", &self.to) + .field("index", &self.index) + .field("blooms", &"...") + .field("top", &"...") + .field("mid", &"...") + .field("bot", &"...") + .finish() + } } /// Database iterator state. #[derive(Debug)] enum IteratorState { - /// Iterator should read top level bloom - Top, - /// Iterator should read mid level bloom `x` more times - Mid(usize), - /// Iterator should read mid level bloom `mid` more times - /// and bot level `mix * 16 + bot` times - Bot { mid: usize, bot: usize }, + /// Iterator should read top level bloom + Top, + /// Iterator should read mid level bloom `x` more times + Mid(usize), + /// Iterator should read mid level bloom `mid` more times + /// and bot level `mix * 16 + bot` times + Bot { mid: usize, bot: usize }, } impl<'a, 'b, B, I, II> Iterator for DatabaseIterator<'a, II> -where ethbloom::BloomRef<'b>: From, 'b: 'a, II: IntoIterator + Copy, I: Iterator { - type Item = io::Result; +where + ethbloom::BloomRef<'b>: From, + 'b: 'a, + II: IntoIterator + Copy, + I: Iterator, +{ + type Item = io::Result; - fn next(&mut self) -> Option { - macro_rules! try_o { - ($expr: expr) => { - match $expr { - Err(err) => return Some(Err(err)), - Ok(ok) => ok, - } - } - } + fn next(&mut self) -> Option { + macro_rules! try_o { + ($expr: expr) => { + match $expr { + Err(err) => return Some(Err(err)), + Ok(ok) => ok, + } + }; + } - macro_rules! next_bloom { - ($iter: expr) => { - try_o!($iter.next()?) - } - } + macro_rules! next_bloom { + ($iter: expr) => { + try_o!($iter.next()?) + }; + } - loop { - if self.index > self.to { - return None; - } + loop { + if self.index > self.to { + return None; + } - self.state = match self.state { - IteratorState::Top => { - if contains_any(next_bloom!(self.top), self.blooms.into_iter()) { - IteratorState::Mid(16) - } else { - self.index += 256; - try_o!(self.mid.advance(16)); - try_o!(self.bot.advance(256)); - IteratorState::Top - } - }, - IteratorState::Mid(left) => { - if left == 0 { - IteratorState::Top - } else if contains_any(next_bloom!(self.mid), self.blooms.into_iter()) && self.index + 16 >= self.from { - IteratorState::Bot { mid: left - 1, bot: 16 } - } else { - self.index += 16; - try_o!(self.bot.advance(16)); - IteratorState::Mid(left - 1) - } - }, - IteratorState::Bot { mid, bot } => { - if bot == 0 { - IteratorState::Mid(mid) - } else if contains_any(next_bloom!(self.bot), self.blooms.into_iter()) && self.index >= self.from { - let result = self.index; - self.index += 1; - self.state = IteratorState::Bot { mid, bot: bot - 1 }; - return Some(Ok(result)); - } else { - self.index += 1; - IteratorState::Bot { mid, bot: bot - 1 } - } - } - } - } - } + self.state = match self.state { + IteratorState::Top => { + if contains_any(next_bloom!(self.top), self.blooms.into_iter()) { + IteratorState::Mid(16) + } else { + self.index += 256; + try_o!(self.mid.advance(16)); + try_o!(self.bot.advance(256)); + IteratorState::Top + } + } + IteratorState::Mid(left) => { + if left == 0 { + IteratorState::Top + } else if contains_any(next_bloom!(self.mid), self.blooms.into_iter()) + && self.index + 16 >= self.from + { + IteratorState::Bot { + mid: left - 1, + bot: 16, + } + } else { + self.index += 16; + try_o!(self.bot.advance(16)); + IteratorState::Mid(left - 1) + } + } + IteratorState::Bot { mid, bot } => { + if bot == 0 { + IteratorState::Mid(mid) + } else if contains_any(next_bloom!(self.bot), self.blooms.into_iter()) + && self.index >= self.from + { + let result = self.index; + self.index += 1; + self.state = IteratorState::Bot { mid, bot: bot - 1 }; + return Some(Ok(result)); + } else { + self.index += 1; + IteratorState::Bot { mid, bot: bot - 1 } + } + } + } + } + } } #[cfg(test)] mod tests { - use ethbloom::Bloom; - use tempdir::TempDir; - use super::Database; + use super::Database; + use ethbloom::Bloom; + use tempdir::TempDir; - #[test] - fn test_database() { - let tempdir = TempDir::new("").unwrap(); - let mut database = Database::open(tempdir.path()).unwrap(); - database.insert_blooms(0, vec![Bloom::from(0), Bloom::from(0x01), Bloom::from(0x10), Bloom::from(0x11)].iter()).unwrap(); + #[test] + fn test_database() { + let tempdir = TempDir::new("").unwrap(); + let mut database = Database::open(tempdir.path()).unwrap(); + database + .insert_blooms( + 0, + vec![ + Bloom::from(0), + Bloom::from(0x01), + Bloom::from(0x10), + Bloom::from(0x11), + ] + .iter(), + ) + .unwrap(); - let matches = database.iterate_matching(0, 3, Some(&Bloom::from(0))).unwrap().collect::, _>>().unwrap(); - assert_eq!(matches, vec![0, 1, 2, 3]); + let matches = database + .iterate_matching(0, 3, Some(&Bloom::from(0))) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!(matches, vec![0, 1, 2, 3]); - let matches = database.iterate_matching(0, 4, Some(&Bloom::from(0))).unwrap().collect::, _>>().unwrap(); - assert_eq!(matches, vec![0, 1, 2, 3]); + let matches = database + .iterate_matching(0, 4, Some(&Bloom::from(0))) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!(matches, vec![0, 1, 2, 3]); - let matches = database.iterate_matching(1, 3, Some(&Bloom::from(0))).unwrap().collect::, _>>().unwrap(); - assert_eq!(matches, vec![1, 2, 3]); + let matches = database + .iterate_matching(1, 3, Some(&Bloom::from(0))) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!(matches, vec![1, 2, 3]); - let matches = database.iterate_matching(1, 2, Some(&Bloom::from(0))).unwrap().collect::, _>>().unwrap(); - assert_eq!(matches, vec![1, 2]); + let matches = database + .iterate_matching(1, 2, Some(&Bloom::from(0))) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!(matches, vec![1, 2]); - let matches = database.iterate_matching(0, 3, Some(&Bloom::from(0x01))).unwrap().collect::, _>>().unwrap(); - assert_eq!(matches, vec![1, 3]); + let matches = database + .iterate_matching(0, 3, Some(&Bloom::from(0x01))) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!(matches, vec![1, 3]); - let matches = database.iterate_matching(0, 3, Some(&Bloom::from(0x10))).unwrap().collect::, _>>().unwrap(); - assert_eq!(matches, vec![2, 3]); + let matches = database + .iterate_matching(0, 3, Some(&Bloom::from(0x10))) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!(matches, vec![2, 3]); - let matches = database.iterate_matching(2, 2, Some(&Bloom::from(0x10))).unwrap().collect::, _>>().unwrap(); - assert_eq!(matches, vec![2]); - } + let matches = database + .iterate_matching(2, 2, Some(&Bloom::from(0x10))) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!(matches, vec![2]); + } - #[test] - fn test_database2() { - let tempdir = TempDir::new("").unwrap(); - let mut database = Database::open(tempdir.path()).unwrap(); - database.insert_blooms(254, vec![Bloom::from(0x100), Bloom::from(0x01), Bloom::from(0x10), Bloom::from(0x11)].iter()).unwrap(); + #[test] + fn test_database2() { + let tempdir = TempDir::new("").unwrap(); + let mut database = Database::open(tempdir.path()).unwrap(); + database + .insert_blooms( + 254, + vec![ + Bloom::from(0x100), + Bloom::from(0x01), + Bloom::from(0x10), + Bloom::from(0x11), + ] + .iter(), + ) + .unwrap(); - let matches = database.iterate_matching(0, 257, Some(&Bloom::from(0x01))).unwrap().collect::, _>>().unwrap(); - assert_eq!(matches, vec![255, 257]); + let matches = database + .iterate_matching(0, 257, Some(&Bloom::from(0x01))) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!(matches, vec![255, 257]); - let matches = database.iterate_matching(0, 258, Some(&Bloom::from(0x100))).unwrap().collect::, _>>().unwrap(); - assert_eq!(matches, vec![254]); + let matches = database + .iterate_matching(0, 258, Some(&Bloom::from(0x100))) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!(matches, vec![254]); - let matches = database.iterate_matching(0, 256, Some(&Bloom::from(0x01))).unwrap().collect::, _>>().unwrap(); - assert_eq!(matches, vec![255]); + let matches = database + .iterate_matching(0, 256, Some(&Bloom::from(0x01))) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!(matches, vec![255]); - let matches = database.iterate_matching(255, 255, Some(&Bloom::from(0x01))).unwrap().collect::, _>>().unwrap(); - assert_eq!(matches, vec![255]); + let matches = database + .iterate_matching(255, 255, Some(&Bloom::from(0x01))) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!(matches, vec![255]); - let matches = database.iterate_matching(256, 256, Some(&Bloom::from(0x10))).unwrap().collect::, _>>().unwrap(); - assert_eq!(matches, vec![256]); + let matches = database + .iterate_matching(256, 256, Some(&Bloom::from(0x10))) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!(matches, vec![256]); - let matches = database.iterate_matching(256, 257, Some(&Bloom::from(0x10))).unwrap().collect::, _>>().unwrap(); - assert_eq!(matches, vec![256, 257]); - } + let matches = database + .iterate_matching(256, 257, Some(&Bloom::from(0x10))) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!(matches, vec![256, 257]); + } - #[test] - fn test_db_close() { - let tempdir = TempDir::new("").unwrap(); - let blooms = vec![Bloom::from(0x100), Bloom::from(0x01), Bloom::from(0x10), Bloom::from(0x11)]; - let mut database = Database::open(tempdir.path()).unwrap(); + #[test] + fn test_db_close() { + let tempdir = TempDir::new("").unwrap(); + let blooms = vec![ + Bloom::from(0x100), + Bloom::from(0x01), + Bloom::from(0x10), + Bloom::from(0x11), + ]; + let mut database = Database::open(tempdir.path()).unwrap(); - // Close the DB and ensure inserting blooms errors - database.close().unwrap(); - assert!(database.insert_blooms(254, blooms.iter()).is_err()); + // Close the DB and ensure inserting blooms errors + database.close().unwrap(); + assert!(database.insert_blooms(254, blooms.iter()).is_err()); - // Reopen it and ensure inserting blooms is OK - database.reopen().unwrap(); - assert!(database.insert_blooms(254, blooms.iter()).is_ok()); - } + // Reopen it and ensure inserting blooms is OK + database.reopen().unwrap(); + assert!(database.insert_blooms(254, blooms.iter()).is_ok()); + } } diff --git a/util/blooms-db/src/file.rs b/util/blooms-db/src/file.rs index 0362ce884..d07c9ad6e 100644 --- a/util/blooms-db/src/file.rs +++ b/util/blooms-db/src/file.rs @@ -14,141 +14,145 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::io::{Seek, SeekFrom, Write, Read}; -use std::path::Path; -use std::{io, fs}; +use std::{ + fs, io, + io::{Read, Seek, SeekFrom, Write}, + path::Path, +}; use ethbloom; /// Autoresizable file containing blooms. pub struct File { - /// Backing file. - file: fs::File, - /// Current file len. - len: u64, + /// Backing file. + file: fs::File, + /// Current file len. + len: u64, } impl File { - /// Opens database file. Creates new file if database file does not exist. - pub fn open

(path: P) -> io::Result where P: AsRef { - let file = fs::OpenOptions::new() - .read(true) - .write(true) - .create(true) - // appending is done manually by calling `ensure_space_for_write` - .append(false) - .open(path)?; - let len = file.metadata()?.len(); + /// Opens database file. Creates new file if database file does not exist. + pub fn open

(path: P) -> io::Result + where + P: AsRef, + { + let file = fs::OpenOptions::new() + .read(true) + .write(true) + .create(true) + // appending is done manually by calling `ensure_space_for_write` + .append(false) + .open(path)?; + let len = file.metadata()?.len(); - let file = File { - file, - len, - }; + let file = File { file, len }; - Ok(file) + Ok(file) + } - } + /// Resizes the file if there is not enough space to write bloom at given position. + fn ensure_space_for_write(&mut self, pos: u64) -> io::Result<()> { + // position to write + 256 bytes + let required_space = (pos + 1) * 256; + if required_space > self.len { + self.file.set_len(required_space)?; + self.len = required_space; + } + Ok(()) + } - /// Resizes the file if there is not enough space to write bloom at given position. - fn ensure_space_for_write(&mut self, pos: u64) -> io::Result<()> { - // position to write + 256 bytes - let required_space = (pos + 1) * 256; - if required_space > self.len { - self.file.set_len(required_space)?; - self.len = required_space; - } - Ok(()) - } + /// Read bloom at given position. + pub fn read_bloom(&self, pos: u64) -> io::Result { + let mut file_ref = &self.file; + file_ref.seek(SeekFrom::Start(pos * 256))?; + let mut bloom = ethbloom::Bloom::default(); + file_ref.read_exact(&mut bloom)?; + Ok(bloom) + } - /// Read bloom at given position. - pub fn read_bloom(&self, pos: u64) -> io::Result { - let mut file_ref = &self.file; - file_ref.seek(SeekFrom::Start(pos * 256))?; - let mut bloom = ethbloom::Bloom::default(); - file_ref.read_exact(&mut bloom)?; - Ok(bloom) - } + /// Accrue bloom into bloom at given position. + pub fn accrue_bloom<'a, B>(&mut self, pos: u64, bloom: B) -> io::Result<()> + where + ethbloom::BloomRef<'a>: From, + { + self.ensure_space_for_write(pos)?; + let mut old_bloom: ethbloom::Bloom = self.read_bloom(pos)?; + old_bloom.accrue_bloom(bloom); + let mut file_ref = &self.file; + file_ref.seek(SeekFrom::Start(pos * 256))?; + file_ref.write_all(&old_bloom) + } - /// Accrue bloom into bloom at given position. - pub fn accrue_bloom<'a, B>(&mut self, pos: u64, bloom: B) -> io::Result<()> where ethbloom::BloomRef<'a>: From { - self.ensure_space_for_write(pos)?; - let mut old_bloom: ethbloom::Bloom = self.read_bloom(pos)?; - old_bloom.accrue_bloom(bloom); - let mut file_ref = &self.file; - file_ref.seek(SeekFrom::Start(pos * 256))?; - file_ref.write_all(&old_bloom) - } + /// Replace bloom at given position with a new one. + pub fn replace_bloom<'a, B>(&mut self, pos: u64, bloom: B) -> io::Result<()> + where + ethbloom::BloomRef<'a>: From, + { + self.ensure_space_for_write(pos)?; + let mut file_ref = &self.file; + file_ref.seek(SeekFrom::Start(pos * 256))?; + file_ref.write_all(ethbloom::BloomRef::from(bloom).data()) + } - /// Replace bloom at given position with a new one. - pub fn replace_bloom<'a, B>(&mut self, pos: u64, bloom: B) -> io::Result<()> where ethbloom::BloomRef<'a>: From { - self.ensure_space_for_write(pos)?; - let mut file_ref = &self.file; - file_ref.seek(SeekFrom::Start(pos * 256))?; - file_ref.write_all(ethbloom::BloomRef::from(bloom).data()) - } + /// Returns an iterator over file. + /// + /// This function needs to be mutable `fs::File` is just a shared reference a system file handle. + /// https://users.rust-lang.org/t/how-to-handle-match-with-irrelevant-ok--/6291/15 + pub fn iterator_from(&mut self, pos: u64) -> io::Result { + let start = std::cmp::min(self.len, pos * 256); + let mut buf_reader = io::BufReader::new(&self.file); + buf_reader.seek(SeekFrom::Start(start))?; - /// Returns an iterator over file. - /// - /// This function needs to be mutable `fs::File` is just a shared reference a system file handle. - /// https://users.rust-lang.org/t/how-to-handle-match-with-irrelevant-ok--/6291/15 - pub fn iterator_from(&mut self, pos: u64) -> io::Result { - let start = std::cmp::min(self.len, pos * 256); - let mut buf_reader = io::BufReader::new(&self.file); - buf_reader.seek(SeekFrom::Start(start))?; + let iter = FileIterator { file: buf_reader }; - let iter = FileIterator { - file: buf_reader, - }; + Ok(iter) + } - Ok(iter) - } - - /// Flush outstanding modifications to the disk - pub fn flush(&mut self) -> io::Result<()> { - self.file.flush() - } + /// Flush outstanding modifications to the disk + pub fn flush(&mut self) -> io::Result<()> { + self.file.flush() + } } /// Iterator over blooms of a single file. pub struct FileIterator<'a> { - /// Backing file. - file: io::BufReader<&'a fs::File>, + /// Backing file. + file: io::BufReader<&'a fs::File>, } impl<'a> FileIterator<'a> { - /// Advance file by n blooms - pub fn advance(&mut self, n: u64) -> io::Result<()> { - self.file.seek(SeekFrom::Current(n as i64 * 256))?; - Ok(()) - } + /// Advance file by n blooms + pub fn advance(&mut self, n: u64) -> io::Result<()> { + self.file.seek(SeekFrom::Current(n as i64 * 256))?; + Ok(()) + } } impl<'a> Iterator for FileIterator<'a> { - type Item = io::Result; + type Item = io::Result; - fn next(&mut self) -> Option { - let mut bloom = ethbloom::Bloom::default(); - match self.file.read_exact(&mut bloom) { - Ok(_) => Some(Ok(bloom)), - Err(ref err) if err.kind() == io::ErrorKind::UnexpectedEof => None, - Err(err) => Some(Err(err)), - } - } + fn next(&mut self) -> Option { + let mut bloom = ethbloom::Bloom::default(); + match self.file.read_exact(&mut bloom) { + Ok(_) => Some(Ok(bloom)), + Err(ref err) if err.kind() == io::ErrorKind::UnexpectedEof => None, + Err(err) => Some(Err(err)), + } + } } #[cfg(test)] mod tests { - use ethbloom::Bloom; - use tempdir::TempDir; - use super::File; + use super::File; + use ethbloom::Bloom; + use tempdir::TempDir; - #[test] - fn test_file() { - let tempdir = TempDir::new("").unwrap(); - let mut file = File::open(tempdir.path().join("file")).unwrap(); - file.accrue_bloom(0, &Bloom::from(1)).unwrap(); - file.flush().unwrap(); - assert_eq!(file.read_bloom(0).unwrap(), Bloom::from(1)); - - } + #[test] + fn test_file() { + let tempdir = TempDir::new("").unwrap(); + let mut file = File::open(tempdir.path().join("file")).unwrap(); + file.accrue_bloom(0, &Bloom::from(1)).unwrap(); + file.flush().unwrap(); + assert_eq!(file.read_bloom(0).unwrap(), Bloom::from(1)); + } } diff --git a/util/blooms-db/src/lib.rs b/util/blooms-db/src/lib.rs index 83baaffae..9594c3a1c 100644 --- a/util/blooms-db/src/lib.rs +++ b/util/blooms-db/src/lib.rs @@ -27,9 +27,8 @@ extern crate tempdir; mod db; mod file; -use std::io; -use std::path::Path; use parking_lot::Mutex; +use std::{io, path::Path}; /// Threadsafe API for blooms database. /// @@ -37,55 +36,66 @@ use parking_lot::Mutex; /// /// This database does not guarantee atomic writes. pub struct Database { - database: Mutex, + database: Mutex, } impl Database { - /// Creates new database handle. - /// - /// # Arguments - /// - /// * `path` - database directory - pub fn open

(path: P) -> io::Result where P: AsRef { - let result = Database { - database: Mutex::new(db::Database::open(path)?), - }; + /// Creates new database handle. + /// + /// # Arguments + /// + /// * `path` - database directory + pub fn open

(path: P) -> io::Result + where + P: AsRef, + { + let result = Database { + database: Mutex::new(db::Database::open(path)?), + }; - Ok(result) - } + Ok(result) + } - /// Closes the inner database - pub fn close(&self) -> io::Result<()> { - self.database.lock().close() - } + /// Closes the inner database + pub fn close(&self) -> io::Result<()> { + self.database.lock().close() + } - /// Reopens database at the same location. - pub fn reopen(&self) -> io::Result<()> { - self.database.lock().reopen() - } + /// Reopens database at the same location. + pub fn reopen(&self) -> io::Result<()> { + self.database.lock().reopen() + } - /// Inserts one or more blooms into database. - /// - /// # Arguments - /// - /// * `from` - index of the first bloom that needs to be inserted - /// * `blooms` - iterator over blooms - pub fn insert_blooms<'a, I, B>(&self, from: u64, blooms: I) -> io::Result<()> - where ethbloom::BloomRef<'a>: From, I: Iterator { - self.database.lock().insert_blooms(from, blooms) - } + /// Inserts one or more blooms into database. + /// + /// # Arguments + /// + /// * `from` - index of the first bloom that needs to be inserted + /// * `blooms` - iterator over blooms + pub fn insert_blooms<'a, I, B>(&self, from: u64, blooms: I) -> io::Result<()> + where + ethbloom::BloomRef<'a>: From, + I: Iterator, + { + self.database.lock().insert_blooms(from, blooms) + } - /// Returns indexes of all headers matching given bloom in a specified range. - /// - /// # Arguments - /// - /// * `from` - index of the first bloom that needs to be checked - /// * `to` - index of the last bloom that needs to be checked (inclusive range) - /// * `blooms` - searched pattern - pub fn filter<'a, B, I, II>(&self, from: u64, to: u64, blooms: II) -> io::Result> - where ethbloom::BloomRef<'a>: From, II: IntoIterator + Copy, I: Iterator { - self.database.lock() - .iterate_matching(from, to, blooms)? - .collect::, _>>() - } + /// Returns indexes of all headers matching given bloom in a specified range. + /// + /// # Arguments + /// + /// * `from` - index of the first bloom that needs to be checked + /// * `to` - index of the last bloom that needs to be checked (inclusive range) + /// * `blooms` - searched pattern + pub fn filter<'a, B, I, II>(&self, from: u64, to: u64, blooms: II) -> io::Result> + where + ethbloom::BloomRef<'a>: From, + II: IntoIterator + Copy, + I: Iterator, + { + self.database + .lock() + .iterate_matching(from, to, blooms)? + .collect::, _>>() + } } diff --git a/util/dir/src/helpers.rs b/util/dir/src/helpers.rs index 39cc767ca..1684ad3b9 100644 --- a/util/dir/src/helpers.rs +++ b/util/dir/src/helpers.rs @@ -15,23 +15,26 @@ // along with Parity Ethereum. If not, see . //! Directory helper functions -use ::home_dir; +use home_dir; /// Replaces `$HOME` str with home directory path. pub fn replace_home(base: &str, arg: &str) -> String { - // the $HOME directory on mac os should be `~/Library` or `~/Library/Application Support` - // We use an `if` so that we don't need to call `home_dir()` if not necessary. - let r = if arg.contains("$HOME") { - arg.replace("$HOME", home_dir().expect("$HOME isn't defined").to_str().unwrap()) - } else { - arg.to_owned() - }; - let r = r.replace("$BASE", base); - r.replace("/", &::std::path::MAIN_SEPARATOR.to_string()) + // the $HOME directory on mac os should be `~/Library` or `~/Library/Application Support` + // We use an `if` so that we don't need to call `home_dir()` if not necessary. + let r = if arg.contains("$HOME") { + arg.replace( + "$HOME", + home_dir().expect("$HOME isn't defined").to_str().unwrap(), + ) + } else { + arg.to_owned() + }; + let r = r.replace("$BASE", base); + r.replace("/", &::std::path::MAIN_SEPARATOR.to_string()) } /// Replaces `$HOME` str with home directory path and `$LOCAL` with local path. pub fn replace_home_and_local(base: &str, local: &str, arg: &str) -> String { - let r = replace_home(base, arg); - r.replace("$LOCAL", local) + let r = replace_home(base, arg); + r.replace("$LOCAL", local) } diff --git a/util/dir/src/lib.rs b/util/dir/src/lib.rs index fa7882cdd..d2460b9c4 100644 --- a/util/dir/src/lib.rs +++ b/util/dir/src/lib.rs @@ -19,34 +19,42 @@ //! Dir utilities for platform-specific operations extern crate app_dirs; extern crate ethereum_types; -extern crate journaldb; extern crate home; +extern crate journaldb; pub mod helpers; -use std::fs; -use std::path::{PathBuf, Path}; -use ethereum_types::{H64, H256}; -use journaldb::Algorithm; +use app_dirs::{get_app_root, AppDataType, AppInfo}; +use ethereum_types::{H256, H64}; use helpers::{replace_home, replace_home_and_local}; -use app_dirs::{AppInfo, get_app_root, AppDataType}; +use journaldb::Algorithm; +use std::{ + fs, + path::{Path, PathBuf}, +}; // re-export platform-specific functions use platform::*; pub use home::home_dir; /// Platform-specific chains path for standard client - Windows only -#[cfg(target_os = "windows")] pub const CHAINS_PATH: &str = "$LOCAL/chains"; +#[cfg(target_os = "windows")] +pub const CHAINS_PATH: &str = "$LOCAL/chains"; /// Platform-specific chains path for light client - Windows only -#[cfg(target_os = "windows")] pub const CHAINS_PATH_LIGHT: &str = "$LOCAL/chains_light"; +#[cfg(target_os = "windows")] +pub const CHAINS_PATH_LIGHT: &str = "$LOCAL/chains_light"; /// Platform-specific chains path for standard client -#[cfg(not(target_os = "windows"))] pub const CHAINS_PATH: &str = "$BASE/chains"; +#[cfg(not(target_os = "windows"))] +pub const CHAINS_PATH: &str = "$BASE/chains"; /// Platform-specific chains path for light client -#[cfg(not(target_os = "windows"))] pub const CHAINS_PATH_LIGHT: &str = "$BASE/chains_light"; +#[cfg(not(target_os = "windows"))] +pub const CHAINS_PATH_LIGHT: &str = "$BASE/chains_light"; /// Platform-specific cache path - Windows only -#[cfg(target_os = "windows")] pub const CACHE_PATH: &str = "$LOCAL/cache"; +#[cfg(target_os = "windows")] +pub const CACHE_PATH: &str = "$LOCAL/cache"; /// Platform-specific cache path -#[cfg(not(target_os = "windows"))] pub const CACHE_PATH: &str = "$BASE/cache"; +#[cfg(not(target_os = "windows"))] +pub const CACHE_PATH: &str = "$BASE/cache"; // this const is irrelevent cause we do have migrations now, // but we still use it for backwards compatibility @@ -55,307 +63,355 @@ const LEGACY_CLIENT_DB_VER_STR: &str = "5.3"; #[derive(Debug, PartialEq)] /// Parity local data directories pub struct Directories { - /// Base dir - pub base: String, - /// Database dir - pub db: String, - /// Cache dir - pub cache: String, - /// Dir to store keys - pub keys: String, - /// Signer dir - pub signer: String, - /// Secrets dir - pub secretstore: String, + /// Base dir + pub base: String, + /// Database dir + pub db: String, + /// Cache dir + pub cache: String, + /// Dir to store keys + pub keys: String, + /// Signer dir + pub signer: String, + /// Secrets dir + pub secretstore: String, } impl Default for Directories { - fn default() -> Self { - let data_dir = default_data_path(); - let local_dir = default_local_path(); - Directories { - base: replace_home(&data_dir, "$BASE"), - db: replace_home_and_local(&data_dir, &local_dir, CHAINS_PATH), - cache: replace_home_and_local(&data_dir, &local_dir, CACHE_PATH), - keys: replace_home(&data_dir, "$BASE/keys"), - signer: replace_home(&data_dir, "$BASE/signer"), - secretstore: replace_home(&data_dir, "$BASE/secretstore"), - } - } + fn default() -> Self { + let data_dir = default_data_path(); + let local_dir = default_local_path(); + Directories { + base: replace_home(&data_dir, "$BASE"), + db: replace_home_and_local(&data_dir, &local_dir, CHAINS_PATH), + cache: replace_home_and_local(&data_dir, &local_dir, CACHE_PATH), + keys: replace_home(&data_dir, "$BASE/keys"), + signer: replace_home(&data_dir, "$BASE/signer"), + secretstore: replace_home(&data_dir, "$BASE/secretstore"), + } + } } impl Directories { - /// Create local directories - pub fn create_dirs(&self, signer_enabled: bool, secretstore_enabled: bool) -> Result<(), String> { - fs::create_dir_all(&self.base).map_err(|e| e.to_string())?; - fs::create_dir_all(&self.db).map_err(|e| e.to_string())?; - fs::create_dir_all(&self.cache).map_err(|e| e.to_string())?; - fs::create_dir_all(&self.keys).map_err(|e| e.to_string())?; - if signer_enabled { - fs::create_dir_all(&self.signer).map_err(|e| e.to_string())?; - } - if secretstore_enabled { - fs::create_dir_all(&self.secretstore).map_err(|e| e.to_string())?; - } - Ok(()) - } + /// Create local directories + pub fn create_dirs( + &self, + signer_enabled: bool, + secretstore_enabled: bool, + ) -> Result<(), String> { + fs::create_dir_all(&self.base).map_err(|e| e.to_string())?; + fs::create_dir_all(&self.db).map_err(|e| e.to_string())?; + fs::create_dir_all(&self.cache).map_err(|e| e.to_string())?; + fs::create_dir_all(&self.keys).map_err(|e| e.to_string())?; + if signer_enabled { + fs::create_dir_all(&self.signer).map_err(|e| e.to_string())?; + } + if secretstore_enabled { + fs::create_dir_all(&self.secretstore).map_err(|e| e.to_string())?; + } + Ok(()) + } - /// Database paths. - pub fn database(&self, genesis_hash: H256, fork_name: Option, spec_name: String) -> DatabaseDirectories { - DatabaseDirectories { - path: self.db.clone(), - legacy_path: self.base.clone(), - genesis_hash, - fork_name, - spec_name, - } - } + /// Database paths. + pub fn database( + &self, + genesis_hash: H256, + fork_name: Option, + spec_name: String, + ) -> DatabaseDirectories { + DatabaseDirectories { + path: self.db.clone(), + legacy_path: self.base.clone(), + genesis_hash, + fork_name, + spec_name, + } + } - /// Get the ipc sockets path - pub fn ipc_path(&self) -> PathBuf { - let mut dir = Path::new(&self.base).to_path_buf(); - dir.push("ipc"); - dir - } + /// Get the ipc sockets path + pub fn ipc_path(&self) -> PathBuf { + let mut dir = Path::new(&self.base).to_path_buf(); + dir.push("ipc"); + dir + } - /// Legacy keys path - // TODO: remove in 1.7 - pub fn legacy_keys_path(&self, testnet: bool) -> PathBuf { - let mut dir = Path::new(&self.base).to_path_buf(); - if testnet { - dir.push("testnet_keys"); - } else { - dir.push("keys"); - } - dir - } + /// Legacy keys path + // TODO: remove in 1.7 + pub fn legacy_keys_path(&self, testnet: bool) -> PathBuf { + let mut dir = Path::new(&self.base).to_path_buf(); + if testnet { + dir.push("testnet_keys"); + } else { + dir.push("keys"); + } + dir + } - /// Get the keys path - pub fn keys_path(&self, data_dir: &str) -> PathBuf { - let mut dir = PathBuf::from(&self.keys); - dir.push(data_dir); - dir - } + /// Get the keys path + pub fn keys_path(&self, data_dir: &str) -> PathBuf { + let mut dir = PathBuf::from(&self.keys); + dir.push(data_dir); + dir + } } #[derive(Debug, PartialEq)] /// Database directories for the given fork. pub struct DatabaseDirectories { - /// Base path - pub path: String, - /// Legacy path - pub legacy_path: String, - /// Genesis hash - pub genesis_hash: H256, - /// Name of current fork - pub fork_name: Option, - /// Name of current spec - pub spec_name: String, + /// Base path + pub path: String, + /// Legacy path + pub legacy_path: String, + /// Genesis hash + pub genesis_hash: H256, + /// Name of current fork + pub fork_name: Option, + /// Name of current spec + pub spec_name: String, } impl DatabaseDirectories { - /// Base DB directory for the given fork. - // TODO: remove in 1.7 - pub fn legacy_fork_path(&self) -> PathBuf { - Path::new(&self.legacy_path).join(format!("{:x}{}", H64::from(self.genesis_hash), self.fork_name.as_ref().map(|f| format!("-{}", f)).unwrap_or_default())) - } + /// Base DB directory for the given fork. + // TODO: remove in 1.7 + pub fn legacy_fork_path(&self) -> PathBuf { + Path::new(&self.legacy_path).join(format!( + "{:x}{}", + H64::from(self.genesis_hash), + self.fork_name + .as_ref() + .map(|f| format!("-{}", f)) + .unwrap_or_default() + )) + } - /// Spec root directory for the given fork. - pub fn spec_root_path(&self) -> PathBuf { - Path::new(&self.path).join(&self.spec_name) - } + /// Spec root directory for the given fork. + pub fn spec_root_path(&self) -> PathBuf { + Path::new(&self.path).join(&self.spec_name) + } - /// Generic client path - pub fn client_path(&self, pruning: Algorithm) -> PathBuf { - self.db_root_path().join(pruning.as_internal_name_str()).join("db") - } + /// Generic client path + pub fn client_path(&self, pruning: Algorithm) -> PathBuf { + self.db_root_path() + .join(pruning.as_internal_name_str()) + .join("db") + } - /// DB root path, named after genesis hash - pub fn db_root_path(&self) -> PathBuf { - self.spec_root_path().join("db").join(format!("{:x}", H64::from(self.genesis_hash))) - } + /// DB root path, named after genesis hash + pub fn db_root_path(&self) -> PathBuf { + self.spec_root_path() + .join("db") + .join(format!("{:x}", H64::from(self.genesis_hash))) + } - /// DB path - pub fn db_path(&self, pruning: Algorithm) -> PathBuf { - self.db_root_path().join(pruning.as_internal_name_str()) - } + /// DB path + pub fn db_path(&self, pruning: Algorithm) -> PathBuf { + self.db_root_path().join(pruning.as_internal_name_str()) + } - /// Get the root path for database - // TODO: remove in 1.7 - pub fn legacy_version_path(&self, pruning: Algorithm) -> PathBuf { - self.legacy_fork_path().join(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str())) - } + /// Get the root path for database + // TODO: remove in 1.7 + pub fn legacy_version_path(&self, pruning: Algorithm) -> PathBuf { + self.legacy_fork_path().join(format!( + "v{}-sec-{}", + LEGACY_CLIENT_DB_VER_STR, + pruning.as_internal_name_str() + )) + } - /// Get user defaults path, legacy way - // TODO: remove in 1.7 - pub fn legacy_user_defaults_path(&self) -> PathBuf { - self.legacy_fork_path().join("user_defaults") - } + /// Get user defaults path, legacy way + // TODO: remove in 1.7 + pub fn legacy_user_defaults_path(&self) -> PathBuf { + self.legacy_fork_path().join("user_defaults") + } - /// Get snapshot path, legacy way - // TODO: remove in 1.7 - pub fn legacy_snapshot_path(&self) -> PathBuf { - self.legacy_fork_path().join("snapshot") - } + /// Get snapshot path, legacy way + // TODO: remove in 1.7 + pub fn legacy_snapshot_path(&self) -> PathBuf { + self.legacy_fork_path().join("snapshot") + } - /// Get user defaults path, legacy way - // TODO: remove in 1.7 - pub fn legacy_network_path(&self) -> PathBuf { - self.legacy_fork_path().join("network") - } + /// Get user defaults path, legacy way + // TODO: remove in 1.7 + pub fn legacy_network_path(&self) -> PathBuf { + self.legacy_fork_path().join("network") + } - /// Get user defauls path - pub fn user_defaults_path(&self) -> PathBuf { - self.spec_root_path().join("user_defaults") - } + /// Get user defauls path + pub fn user_defaults_path(&self) -> PathBuf { + self.spec_root_path().join("user_defaults") + } - /// Get the path for the snapshot directory given the genesis hash and fork name. - pub fn snapshot_path(&self) -> PathBuf { - self.db_root_path().join("snapshot") - } + /// Get the path for the snapshot directory given the genesis hash and fork name. + pub fn snapshot_path(&self) -> PathBuf { + self.db_root_path().join("snapshot") + } - /// Get the path for the network directory. - pub fn network_path(&self) -> PathBuf { - self.spec_root_path().join("network") - } + /// Get the path for the network directory. + pub fn network_path(&self) -> PathBuf { + self.spec_root_path().join("network") + } } /// Default data path pub fn default_data_path() -> String { - let app_info = AppInfo { name: PRODUCT, author: AUTHOR }; - get_app_root(AppDataType::UserData, &app_info).map(|p| p.to_string_lossy().into_owned()).unwrap_or_else(|_| "$HOME/.parity".to_owned()) + let app_info = AppInfo { + name: PRODUCT, + author: AUTHOR, + }; + get_app_root(AppDataType::UserData, &app_info) + .map(|p| p.to_string_lossy().into_owned()) + .unwrap_or_else(|_| "$HOME/.parity".to_owned()) } /// Default local path pub fn default_local_path() -> String { - let app_info = AppInfo { name: PRODUCT, author: AUTHOR }; - get_app_root(AppDataType::UserCache, &app_info).map(|p| p.to_string_lossy().into_owned()).unwrap_or_else(|_| "$HOME/.parity".to_owned()) + let app_info = AppInfo { + name: PRODUCT, + author: AUTHOR, + }; + get_app_root(AppDataType::UserCache, &app_info) + .map(|p| p.to_string_lossy().into_owned()) + .unwrap_or_else(|_| "$HOME/.parity".to_owned()) } /// Default hypervisor path pub fn default_hypervisor_path() -> PathBuf { - let app_info = AppInfo { name: PRODUCT_HYPERVISOR, author: AUTHOR }; - get_app_root(AppDataType::UserData, &app_info).unwrap_or_else(|_| "$HOME/.parity-hypervisor".into()) + let app_info = AppInfo { + name: PRODUCT_HYPERVISOR, + author: AUTHOR, + }; + get_app_root(AppDataType::UserData, &app_info) + .unwrap_or_else(|_| "$HOME/.parity-hypervisor".into()) } /// Get home directory. fn home() -> PathBuf { - home_dir().expect("Failed to get home dir") + home_dir().expect("Failed to get home dir") } /// Geth path pub fn geth(testnet: bool) -> PathBuf { - let mut base = geth_base(); - if testnet { - base.push("testnet"); - } - base.push("keystore"); - base + let mut base = geth_base(); + if testnet { + base.push("testnet"); + } + base.push("keystore"); + base } /// Parity path for specific chain pub fn parity(chain: &str) -> PathBuf { - let mut base = parity_base(); - base.push(chain); - base + let mut base = parity_base(); + base.push(chain); + base } #[cfg(target_os = "macos")] mod platform { - use std::path::PathBuf; - pub const AUTHOR: &str = "Parity"; - pub const PRODUCT: &str = "io.parity.ethereum"; - pub const PRODUCT_HYPERVISOR: &str = "io.parity.ethereum-updates"; + use std::path::PathBuf; + pub const AUTHOR: &str = "Parity"; + pub const PRODUCT: &str = "io.parity.ethereum"; + pub const PRODUCT_HYPERVISOR: &str = "io.parity.ethereum-updates"; - pub fn parity_base() -> PathBuf { - let mut home = super::home(); - home.push("Library"); - home.push("Application Support"); - home.push("io.parity.ethereum"); - home.push("keys"); - home - } + pub fn parity_base() -> PathBuf { + let mut home = super::home(); + home.push("Library"); + home.push("Application Support"); + home.push("io.parity.ethereum"); + home.push("keys"); + home + } - pub fn geth_base() -> PathBuf { - let mut home = super::home(); - home.push("Library"); - home.push("Ethereum"); - home - } + pub fn geth_base() -> PathBuf { + let mut home = super::home(); + home.push("Library"); + home.push("Ethereum"); + home + } } #[cfg(windows)] mod platform { - use std::path::PathBuf; - pub const AUTHOR: &str = "Parity"; - pub const PRODUCT: &str = "Ethereum"; - pub const PRODUCT_HYPERVISOR: &str = "EthereumUpdates"; + use std::path::PathBuf; + pub const AUTHOR: &str = "Parity"; + pub const PRODUCT: &str = "Ethereum"; + pub const PRODUCT_HYPERVISOR: &str = "EthereumUpdates"; - pub fn parity_base() -> PathBuf { - let mut home = super::home(); - home.push("AppData"); - home.push("Roaming"); - home.push("Parity"); - home.push("Ethereum"); - home.push("keys"); - home - } + pub fn parity_base() -> PathBuf { + let mut home = super::home(); + home.push("AppData"); + home.push("Roaming"); + home.push("Parity"); + home.push("Ethereum"); + home.push("keys"); + home + } - pub fn geth_base() -> PathBuf { - let mut home = super::home(); - home.push("AppData"); - home.push("Roaming"); - home.push("Ethereum"); - home - } + pub fn geth_base() -> PathBuf { + let mut home = super::home(); + home.push("AppData"); + home.push("Roaming"); + home.push("Ethereum"); + home + } } #[cfg(not(any(target_os = "macos", windows)))] mod platform { - use std::path::PathBuf; - pub const AUTHOR: &str = "parity"; - pub const PRODUCT: &str = "io.parity.ethereum"; - pub const PRODUCT_HYPERVISOR: &str = "io.parity.ethereum-updates"; + use std::path::PathBuf; + pub const AUTHOR: &str = "parity"; + pub const PRODUCT: &str = "io.parity.ethereum"; + pub const PRODUCT_HYPERVISOR: &str = "io.parity.ethereum-updates"; - pub fn parity_base() -> PathBuf { - let mut home = super::home(); - home.push(".local"); - home.push("share"); - home.push("io.parity.ethereum"); - home.push("keys"); - home - } + pub fn parity_base() -> PathBuf { + let mut home = super::home(); + home.push(".local"); + home.push("share"); + home.push("io.parity.ethereum"); + home.push("keys"); + home + } - pub fn geth_base() -> PathBuf { - let mut home = super::home(); - home.push(".ethereum"); - home - } + pub fn geth_base() -> PathBuf { + let mut home = super::home(); + home.push(".ethereum"); + home + } } #[cfg(test)] mod tests { - use super::Directories; - use helpers::{replace_home, replace_home_and_local}; + use super::Directories; + use helpers::{replace_home, replace_home_and_local}; - #[test] - fn test_default_directories() { - let data_dir = super::default_data_path(); - let local_dir = super::default_local_path(); - let expected = Directories { - base: replace_home(&data_dir, "$BASE"), - db: replace_home_and_local(&data_dir, &local_dir, - if cfg!(target_os = "windows") { "$LOCAL/chains" } - else { "$BASE/chains" } - ), - cache: replace_home_and_local(&data_dir, &local_dir, - if cfg!(target_os = "windows") { "$LOCAL/cache" } - else { "$BASE/cache" } - ), - keys: replace_home(&data_dir, "$BASE/keys"), - signer: replace_home(&data_dir, "$BASE/signer"), - secretstore: replace_home(&data_dir, "$BASE/secretstore"), - }; - assert_eq!(expected, Directories::default()); - } + #[test] + fn test_default_directories() { + let data_dir = super::default_data_path(); + let local_dir = super::default_local_path(); + let expected = Directories { + base: replace_home(&data_dir, "$BASE"), + db: replace_home_and_local( + &data_dir, + &local_dir, + if cfg!(target_os = "windows") { + "$LOCAL/chains" + } else { + "$BASE/chains" + }, + ), + cache: replace_home_and_local( + &data_dir, + &local_dir, + if cfg!(target_os = "windows") { + "$LOCAL/cache" + } else { + "$BASE/cache" + }, + ), + keys: replace_home(&data_dir, "$BASE/keys"), + signer: replace_home(&data_dir, "$BASE/signer"), + secretstore: replace_home(&data_dir, "$BASE/secretstore"), + }; + assert_eq!(expected, Directories::default()); + } } diff --git a/util/fake-fetch/src/lib.rs b/util/fake-fetch/src/lib.rs index 9dc52fc93..f4a9fb387 100644 --- a/util/fake-fetch/src/lib.rs +++ b/util/fake-fetch/src/lib.rs @@ -15,53 +15,63 @@ // along with Parity Ethereum. If not, see . extern crate fetch; -extern crate hyper; extern crate futures; +extern crate hyper; -use hyper::{StatusCode, Body}; +use fetch::{Fetch, Request, Url}; use futures::{future, future::FutureResult}; -use fetch::{Fetch, Url, Request}; +use hyper::{Body, StatusCode}; #[derive(Clone, Default)] -pub struct FakeFetch where T: Clone + Send + Sync { - val: Option, +pub struct FakeFetch +where + T: Clone + Send + Sync, +{ + val: Option, } -impl FakeFetch where T: Clone + Send + Sync { - pub fn new(t: Option) -> Self { - FakeFetch { val : t } - } +impl FakeFetch +where + T: Clone + Send + Sync, +{ + pub fn new(t: Option) -> Self { + FakeFetch { val: t } + } } -impl Fetch for FakeFetch where T: Clone + Send+ Sync { - type Result = FutureResult; +impl Fetch for FakeFetch +where + T: Clone + Send + Sync, +{ + type Result = FutureResult; - fn fetch(&self, request: Request, abort: fetch::Abort) -> Self::Result { - let u = request.url().clone(); - future::ok(if self.val.is_some() { - let r = hyper::Response::new("Some content".into()); - fetch::client::Response::new(u, r, abort) - } else { - let r = hyper::Response::builder() - .status(StatusCode::NOT_FOUND) - .body(Body::empty()).expect("Nothing to parse, can not fail; qed"); - fetch::client::Response::new(u, r, abort) - }) - } + fn fetch(&self, request: Request, abort: fetch::Abort) -> Self::Result { + let u = request.url().clone(); + future::ok(if self.val.is_some() { + let r = hyper::Response::new("Some content".into()); + fetch::client::Response::new(u, r, abort) + } else { + let r = hyper::Response::builder() + .status(StatusCode::NOT_FOUND) + .body(Body::empty()) + .expect("Nothing to parse, can not fail; qed"); + fetch::client::Response::new(u, r, abort) + }) + } - fn get(&self, url: &str, abort: fetch::Abort) -> Self::Result { - let url: Url = match url.parse() { - Ok(u) => u, - Err(e) => return future::err(e.into()) - }; - self.fetch(Request::get(url), abort) - } + fn get(&self, url: &str, abort: fetch::Abort) -> Self::Result { + let url: Url = match url.parse() { + Ok(u) => u, + Err(e) => return future::err(e.into()), + }; + self.fetch(Request::get(url), abort) + } - fn post(&self, url: &str, abort: fetch::Abort) -> Self::Result { - let url: Url = match url.parse() { - Ok(u) => u, - Err(e) => return future::err(e.into()) - }; - self.fetch(Request::post(url), abort) - } + fn post(&self, url: &str, abort: fetch::Abort) -> Self::Result { + let url: Url = match url.parse() { + Ok(u) => u, + Err(e) => return future::err(e.into()), + }; + self.fetch(Request::post(url), abort) + } } diff --git a/util/fastmap/src/lib.rs b/util/fastmap/src/lib.rs index 97f7d3b24..c0e0edcec 100644 --- a/util/fastmap/src/lib.rs +++ b/util/fastmap/src/lib.rs @@ -20,9 +20,11 @@ extern crate ethereum_types; extern crate plain_hasher; use ethereum_types::H256; -use std::hash; -use std::collections::{HashMap, HashSet}; use plain_hasher::PlainHasher; +use std::{ + collections::{HashMap, HashSet}, + hash, +}; /// Specialized version of `HashMap` with H256 keys and fast hashing function. pub type H256FastMap = HashMap>; diff --git a/util/fetch/src/client.rs b/util/fetch/src/client.rs index e4474fb79..e6617295b 100644 --- a/util/fetch/src/client.rs +++ b/util/fetch/src/client.rs @@ -14,23 +14,33 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use futures::future::{self, Loop}; -use futures::sync::{mpsc, oneshot}; -use futures::{self, Future, Async, Sink, Stream}; -use hyper::header::{self, HeaderMap, HeaderValue, IntoHeaderName}; -use hyper::{self, Method, StatusCode}; +use bytes::Bytes; +use futures::{ + self, + future::{self, Loop}, + sync::{mpsc, oneshot}, + Async, Future, Sink, Stream, +}; +use hyper::{ + self, + header::{self, HeaderMap, HeaderValue, IntoHeaderName}, + Method, StatusCode, +}; use hyper_rustls; -use std; -use std::cmp::min; -use std::sync::Arc; -use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; -use std::sync::mpsc::RecvTimeoutError; -use std::thread; -use std::time::Duration; -use std::{io, fmt}; +use std::{ + self, + cmp::min, + fmt, io, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering}, + mpsc::RecvTimeoutError, + Arc, + }, + thread, + time::Duration, +}; use tokio::{self, util::FutureExt}; use url::{self, Url}; -use bytes::Bytes; const MAX_SIZE: usize = 64 * 1024 * 1024; const MAX_SECS: Duration = Duration::from_secs(5); @@ -43,89 +53,89 @@ const MAX_REDR: usize = 5; /// they can be aborted explicitly by the calling code. #[derive(Clone, Debug)] pub struct Abort { - abort: Arc, - size: usize, - time: Duration, - redir: usize, + abort: Arc, + size: usize, + time: Duration, + redir: usize, } impl Default for Abort { - fn default() -> Abort { - Abort { - abort: Arc::new(AtomicBool::new(false)), - size: MAX_SIZE, - time: MAX_SECS, - redir: MAX_REDR, - } - } + fn default() -> Abort { + Abort { + abort: Arc::new(AtomicBool::new(false)), + size: MAX_SIZE, + time: MAX_SECS, + redir: MAX_REDR, + } + } } impl From> for Abort { - fn from(a: Arc) -> Abort { - Abort { - abort: a, - size: MAX_SIZE, - time: MAX_SECS, - redir: MAX_REDR, - } - } + fn from(a: Arc) -> Abort { + Abort { + abort: a, + size: MAX_SIZE, + time: MAX_SECS, + redir: MAX_REDR, + } + } } impl Abort { - /// True if `abort` has been invoked. - pub fn is_aborted(&self) -> bool { - self.abort.load(Ordering::SeqCst) - } + /// True if `abort` has been invoked. + pub fn is_aborted(&self) -> bool { + self.abort.load(Ordering::SeqCst) + } - /// The maximum response body size. - pub fn max_size(&self) -> usize { - self.size - } + /// The maximum response body size. + pub fn max_size(&self) -> usize { + self.size + } - /// The maximum total time, including redirects. - pub fn max_duration(&self) -> Duration { - self.time - } + /// The maximum total time, including redirects. + pub fn max_duration(&self) -> Duration { + self.time + } - /// The maximum number of redirects to allow. - pub fn max_redirects(&self) -> usize { - self.redir - } + /// The maximum number of redirects to allow. + pub fn max_redirects(&self) -> usize { + self.redir + } - /// Mark as aborted. - pub fn abort(&self) { - self.abort.store(true, Ordering::SeqCst) - } + /// Mark as aborted. + pub fn abort(&self) { + self.abort.store(true, Ordering::SeqCst) + } - /// Set the maximum reponse body size. - pub fn with_max_size(self, n: usize) -> Abort { - Abort { size: n, .. self } - } + /// Set the maximum reponse body size. + pub fn with_max_size(self, n: usize) -> Abort { + Abort { size: n, ..self } + } - /// Set the maximum duration (including redirects). - pub fn with_max_duration(self, d: Duration) -> Abort { - Abort { time: d, .. self } - } + /// Set the maximum duration (including redirects). + pub fn with_max_duration(self, d: Duration) -> Abort { + Abort { time: d, ..self } + } - /// Set the maximum number of redirects to follow. - pub fn with_max_redirects(self, n: usize) -> Abort { - Abort { redir: n, .. self } - } + /// Set the maximum number of redirects to follow. + pub fn with_max_redirects(self, n: usize) -> Abort { + Abort { redir: n, ..self } + } } /// Types which retrieve content from some URL. pub trait Fetch: Clone + Send + Sync + 'static { - /// The result future. - type Result: Future + Send + 'static; + /// The result future. + type Result: Future + Send + 'static; - /// Make a request to given URL - fn fetch(&self, request: Request, abort: Abort) -> Self::Result; + /// Make a request to given URL + fn fetch(&self, request: Request, abort: Abort) -> Self::Result; - /// Get content from some URL. - fn get(&self, url: &str, abort: Abort) -> Self::Result; + /// Get content from some URL. + fn get(&self, url: &str, abort: Abort) -> Self::Result; - /// Post content to some URL. - fn post(&self, url: &str, abort: Abort) -> Self::Result; + /// Post content to some URL. + fn post(&self, url: &str, abort: Abort) -> Self::Result; } type TxResponse = oneshot::Sender>; @@ -138,86 +148,93 @@ type ChanItem = Option<(Request, Abort, TxResponse)>; // not implement `Send` currently. #[derive(Debug)] pub struct Client { - runtime: mpsc::Sender, - refs: Arc, + runtime: mpsc::Sender, + refs: Arc, } // When cloning a client we increment the internal reference counter. impl Clone for Client { - fn clone(&self) -> Client { - self.refs.fetch_add(1, Ordering::SeqCst); - Client { - runtime: self.runtime.clone(), - refs: self.refs.clone(), - } - } + fn clone(&self) -> Client { + self.refs.fetch_add(1, Ordering::SeqCst); + Client { + runtime: self.runtime.clone(), + refs: self.refs.clone(), + } + } } // When dropping a client, we decrement the reference counter. // Once it reaches 0 we terminate the background thread. impl Drop for Client { - fn drop(&mut self) { - if self.refs.fetch_sub(1, Ordering::SeqCst) == 1 { - // ignore send error as it means the background thread is gone already - let _ = self.runtime.clone().send(None).wait(); - } - } + fn drop(&mut self) { + if self.refs.fetch_sub(1, Ordering::SeqCst) == 1 { + // ignore send error as it means the background thread is gone already + let _ = self.runtime.clone().send(None).wait(); + } + } } impl Client { - /// Create a new fetch client. - pub fn new(num_dns_threads: usize) -> Result { - let (tx_start, rx_start) = std::sync::mpsc::sync_channel(1); - let (tx_proto, rx_proto) = mpsc::channel(64); + /// Create a new fetch client. + pub fn new(num_dns_threads: usize) -> Result { + let (tx_start, rx_start) = std::sync::mpsc::sync_channel(1); + let (tx_proto, rx_proto) = mpsc::channel(64); - Client::background_thread(tx_start, rx_proto, num_dns_threads)?; + Client::background_thread(tx_start, rx_proto, num_dns_threads)?; - match rx_start.recv_timeout(Duration::from_secs(10)) { - Err(RecvTimeoutError::Timeout) => { - error!(target: "fetch", "timeout starting background thread"); - return Err(Error::BackgroundThreadDead) - } - Err(RecvTimeoutError::Disconnected) => { - error!(target: "fetch", "background thread gone"); - return Err(Error::BackgroundThreadDead) - } - Ok(Err(e)) => { - error!(target: "fetch", "error starting background thread: {}", e); - return Err(e.into()) - } - Ok(Ok(())) => {} - } + match rx_start.recv_timeout(Duration::from_secs(10)) { + Err(RecvTimeoutError::Timeout) => { + error!(target: "fetch", "timeout starting background thread"); + return Err(Error::BackgroundThreadDead); + } + Err(RecvTimeoutError::Disconnected) => { + error!(target: "fetch", "background thread gone"); + return Err(Error::BackgroundThreadDead); + } + Ok(Err(e)) => { + error!(target: "fetch", "error starting background thread: {}", e); + return Err(e.into()); + } + Ok(Ok(())) => {} + } - Ok(Client { - runtime: tx_proto, - refs: Arc::new(AtomicUsize::new(1)), - }) - } + Ok(Client { + runtime: tx_proto, + refs: Arc::new(AtomicUsize::new(1)), + }) + } - fn background_thread(tx_start: TxStartup, rx_proto: mpsc::Receiver, num_dns_threads: usize) -> io::Result> { - thread::Builder::new().name("fetch".into()).spawn(move || { - let mut runtime = match tokio::runtime::current_thread::Runtime::new() { - Ok(c) => c, - Err(e) => return tx_start.send(Err(e)).unwrap_or(()) - }; + fn background_thread( + tx_start: TxStartup, + rx_proto: mpsc::Receiver, + num_dns_threads: usize, + ) -> io::Result> { + thread::Builder::new().name("fetch".into()).spawn(move || { + let mut runtime = match tokio::runtime::current_thread::Runtime::new() { + Ok(c) => c, + Err(e) => return tx_start.send(Err(e)).unwrap_or(()), + }; - let hyper = hyper::Client::builder() - .build(hyper_rustls::HttpsConnector::new(num_dns_threads)); + let hyper = + hyper::Client::builder().build(hyper_rustls::HttpsConnector::new(num_dns_threads)); - let future = rx_proto.take_while(|item| Ok(item.is_some())) - .map(|item| item.expect("`take_while` is only passing on channel items != None; qed")) - .for_each(|(request, abort, sender)| - { - trace!(target: "fetch", "new request to {}", request.url()); - if abort.is_aborted() { - return future::ok(sender.send(Err(Error::Aborted)).unwrap_or(())) - } - let ini = (hyper.clone(), request, abort, 0); - let fut = future::loop_fn(ini, |(client, request, abort, redirects)| { - let request2 = request.clone(); - let url2 = request2.url().clone(); - let abort2 = abort.clone(); - client.request(request.into()) + let future = rx_proto + .take_while(|item| Ok(item.is_some())) + .map(|item| { + item.expect("`take_while` is only passing on channel items != None; qed") + }) + .for_each(|(request, abort, sender)| { + trace!(target: "fetch", "new request to {}", request.url()); + if abort.is_aborted() { + return future::ok(sender.send(Err(Error::Aborted)).unwrap_or(())); + } + let ini = (hyper.clone(), request, abort, 0); + let fut = + future::loop_fn(ini, |(client, request, abort, redirects)| { + let request2 = request.clone(); + let url2 = request2.url().clone(); + let abort2 = abort.clone(); + client.request(request.into()) .map(move |resp| Response::new(url2, resp, abort2)) .from_err() .and_then(move |resp| { @@ -250,248 +267,258 @@ impl Client { Ok(Loop::Break(resp)) } }) - }) - .then(|result| { - future::ok(sender.send(result).unwrap_or(())) - }); - tokio::spawn(fut); - trace!(target: "fetch", "waiting for next request ..."); - future::ok(()) - }); + }) + .then(|result| future::ok(sender.send(result).unwrap_or(()))); + tokio::spawn(fut); + trace!(target: "fetch", "waiting for next request ..."); + future::ok(()) + }); - tx_start.send(Ok(())).unwrap_or(()); + tx_start.send(Ok(())).unwrap_or(()); - debug!(target: "fetch", "processing requests ..."); - if let Err(()) = runtime.block_on(future) { - error!(target: "fetch", "error while executing future") - } - debug!(target: "fetch", "fetch background thread finished") - }) - } + debug!(target: "fetch", "processing requests ..."); + if let Err(()) = runtime.block_on(future) { + error!(target: "fetch", "error while executing future") + } + debug!(target: "fetch", "fetch background thread finished") + }) + } } impl Fetch for Client { - type Result = Box + Send + 'static>; + type Result = Box + Send + 'static>; - fn fetch(&self, request: Request, abort: Abort) -> Self::Result { - debug!(target: "fetch", "fetching: {:?}", request.url()); - if abort.is_aborted() { - return Box::new(future::err(Error::Aborted)) - } - let (tx_res, rx_res) = oneshot::channel(); - let maxdur = abort.max_duration(); - let sender = self.runtime.clone(); - let future = sender.send(Some((request, abort, tx_res))) - .map_err(|e| { - error!(target: "fetch", "failed to schedule request: {}", e); - Error::BackgroundThreadDead - }) - .and_then(|_| rx_res.map_err(|oneshot::Canceled| Error::BackgroundThreadDead)) - .and_then(future::result); + fn fetch(&self, request: Request, abort: Abort) -> Self::Result { + debug!(target: "fetch", "fetching: {:?}", request.url()); + if abort.is_aborted() { + return Box::new(future::err(Error::Aborted)); + } + let (tx_res, rx_res) = oneshot::channel(); + let maxdur = abort.max_duration(); + let sender = self.runtime.clone(); + let future = sender + .send(Some((request, abort, tx_res))) + .map_err(|e| { + error!(target: "fetch", "failed to schedule request: {}", e); + Error::BackgroundThreadDead + }) + .and_then(|_| rx_res.map_err(|oneshot::Canceled| Error::BackgroundThreadDead)) + .and_then(future::result); - Box::new(future.timeout(maxdur) - .map_err(|err| { - if err.is_inner() { - Error::from(err.into_inner().unwrap()) - } else { - Error::from(err) - } - }) - ) - } + Box::new(future.timeout(maxdur).map_err(|err| { + if err.is_inner() { + Error::from(err.into_inner().unwrap()) + } else { + Error::from(err) + } + })) + } - /// Get content from some URL. - fn get(&self, url: &str, abort: Abort) -> Self::Result { - let url: Url = match url.parse() { - Ok(u) => u, - Err(e) => return Box::new(future::err(e.into())) - }; - self.fetch(Request::get(url), abort) - } + /// Get content from some URL. + fn get(&self, url: &str, abort: Abort) -> Self::Result { + let url: Url = match url.parse() { + Ok(u) => u, + Err(e) => return Box::new(future::err(e.into())), + }; + self.fetch(Request::get(url), abort) + } - /// Post content to some URL. - fn post(&self, url: &str, abort: Abort) -> Self::Result { - let url: Url = match url.parse() { - Ok(u) => u, - Err(e) => return Box::new(future::err(e.into())) - }; - self.fetch(Request::post(url), abort) - } + /// Post content to some URL. + fn post(&self, url: &str, abort: Abort) -> Self::Result { + let url: Url = match url.parse() { + Ok(u) => u, + Err(e) => return Box::new(future::err(e.into())), + }; + self.fetch(Request::post(url), abort) + } } // Extract redirect location from response. The second return value indicate whether the original method should be preserved. fn redirect_location(u: Url, r: &Response) -> Option<(Url, bool)> { - let preserve_method = match r.status() { - StatusCode::TEMPORARY_REDIRECT | StatusCode::PERMANENT_REDIRECT => true, - _ => false, - }; - match r.status() { - StatusCode::MOVED_PERMANENTLY - | StatusCode::PERMANENT_REDIRECT - | StatusCode::TEMPORARY_REDIRECT - | StatusCode::FOUND - | StatusCode::SEE_OTHER => { - r.headers.get(header::LOCATION).and_then(|loc| { - loc.to_str().ok().and_then(|loc_s| { - u.join(loc_s).ok().map(|url| (url, preserve_method)) - }) - }) - } - _ => None - } + let preserve_method = match r.status() { + StatusCode::TEMPORARY_REDIRECT | StatusCode::PERMANENT_REDIRECT => true, + _ => false, + }; + match r.status() { + StatusCode::MOVED_PERMANENTLY + | StatusCode::PERMANENT_REDIRECT + | StatusCode::TEMPORARY_REDIRECT + | StatusCode::FOUND + | StatusCode::SEE_OTHER => r.headers.get(header::LOCATION).and_then(|loc| { + loc.to_str() + .ok() + .and_then(|loc_s| u.join(loc_s).ok().map(|url| (url, preserve_method))) + }), + _ => None, + } } /// A wrapper for hyper::Request using Url and with methods. #[derive(Debug, Clone)] pub struct Request { - url: Url, - method: Method, - headers: HeaderMap, - body: Bytes, + url: Url, + method: Method, + headers: HeaderMap, + body: Bytes, } impl Request { - /// Create a new request, with given url and method. - pub fn new(url: Url, method: Method) -> Request { - Request { - url, method, - headers: HeaderMap::new(), - body: Default::default(), - } - } + /// Create a new request, with given url and method. + pub fn new(url: Url, method: Method) -> Request { + Request { + url, + method, + headers: HeaderMap::new(), + body: Default::default(), + } + } - /// Create a new GET request. - pub fn get(url: Url) -> Request { - Request::new(url, Method::GET) - } + /// Create a new GET request. + pub fn get(url: Url) -> Request { + Request::new(url, Method::GET) + } - /// Create a new empty POST request. - pub fn post(url: Url) -> Request { - Request::new(url, Method::POST) - } + /// Create a new empty POST request. + pub fn post(url: Url) -> Request { + Request::new(url, Method::POST) + } - /// Read the url. - pub fn url(&self) -> &Url { - &self.url - } + /// Read the url. + pub fn url(&self) -> &Url { + &self.url + } - /// Read the request headers. - pub fn headers(&self) -> &HeaderMap { - &self.headers - } + /// Read the request headers. + pub fn headers(&self) -> &HeaderMap { + &self.headers + } - /// Get a mutable reference to the headers. - pub fn headers_mut(&mut self) -> &mut HeaderMap { - &mut self.headers - } + /// Get a mutable reference to the headers. + pub fn headers_mut(&mut self) -> &mut HeaderMap { + &mut self.headers + } - /// Set the body of the request. - pub fn set_body>(&mut self, body: T) { - self.body = body.into(); - } + /// Set the body of the request. + pub fn set_body>(&mut self, body: T) { + self.body = body.into(); + } - /// Set the url of the request. - pub fn set_url(&mut self, url: Url) { - self.url = url; - } + /// Set the url of the request. + pub fn set_url(&mut self, url: Url) { + self.url = url; + } - /// Consume self, and return it with the added given header. - pub fn with_header(mut self, key: K, val: HeaderValue) -> Self - where K: IntoHeaderName, - { - self.headers_mut().append(key, val); - self - } + /// Consume self, and return it with the added given header. + pub fn with_header(mut self, key: K, val: HeaderValue) -> Self + where + K: IntoHeaderName, + { + self.headers_mut().append(key, val); + self + } - /// Consume self, and return it with the body. - pub fn with_body>(mut self, body: T) -> Self { - self.set_body(body); - self - } + /// Consume self, and return it with the body. + pub fn with_body>(mut self, body: T) -> Self { + self.set_body(body); + self + } } impl From for hyper::Request { - fn from(req: Request) -> hyper::Request { - let uri: hyper::Uri = req.url.as_ref().parse().expect("Every valid URLis also a URI."); - hyper::Request::builder() - .method(req.method) - .uri(uri) - .header(header::USER_AGENT, HeaderValue::from_static("Parity Fetch Neo")) - .body(req.body.into()) - .expect("Header, uri, method, and body are already valid and can not fail to parse; qed") - } + fn from(req: Request) -> hyper::Request { + let uri: hyper::Uri = req + .url + .as_ref() + .parse() + .expect("Every valid URLis also a URI."); + hyper::Request::builder() + .method(req.method) + .uri(uri) + .header( + header::USER_AGENT, + HeaderValue::from_static("Parity Fetch Neo"), + ) + .body(req.body.into()) + .expect( + "Header, uri, method, and body are already valid and can not fail to parse; qed", + ) + } } /// An HTTP response. #[derive(Debug)] pub struct Response { - url: Url, - status: StatusCode, - headers: HeaderMap, - body: hyper::Body, - abort: Abort, - nread: usize, + url: Url, + status: StatusCode, + headers: HeaderMap, + body: hyper::Body, + abort: Abort, + nread: usize, } impl Response { - /// Create a new response, wrapping a hyper response. - pub fn new(u: Url, r: hyper::Response, a: Abort) -> Response { - Response { - url: u, - status: r.status(), - headers: r.headers().clone(), - body: r.into_body(), - abort: a, - nread: 0, - } - } + /// Create a new response, wrapping a hyper response. + pub fn new(u: Url, r: hyper::Response, a: Abort) -> Response { + Response { + url: u, + status: r.status(), + headers: r.headers().clone(), + body: r.into_body(), + abort: a, + nread: 0, + } + } - /// The response status. - pub fn status(&self) -> StatusCode { - self.status - } + /// The response status. + pub fn status(&self) -> StatusCode { + self.status + } - /// Status code == OK (200)? - pub fn is_success(&self) -> bool { - self.status() == StatusCode::OK - } + /// Status code == OK (200)? + pub fn is_success(&self) -> bool { + self.status() == StatusCode::OK + } - /// Status code == 404. - pub fn is_not_found(&self) -> bool { - self.status() == StatusCode::NOT_FOUND - } + /// Status code == 404. + pub fn is_not_found(&self) -> bool { + self.status() == StatusCode::NOT_FOUND + } - /// Is the content-type text/html? - pub fn is_html(&self) -> bool { - self.headers.get(header::CONTENT_TYPE).and_then(|ct_val| { - ct_val.to_str().ok().map(|ct_str| { - ct_str.contains("text") && ct_str.contains("html") - }) - }).unwrap_or(false) - } + /// Is the content-type text/html? + pub fn is_html(&self) -> bool { + self.headers + .get(header::CONTENT_TYPE) + .and_then(|ct_val| { + ct_val + .to_str() + .ok() + .map(|ct_str| ct_str.contains("text") && ct_str.contains("html")) + }) + .unwrap_or(false) + } } impl Stream for Response { - type Item = hyper::Chunk; - type Error = Error; + type Item = hyper::Chunk; + type Error = Error; - fn poll(&mut self) -> futures::Poll, Self::Error> { - if self.abort.is_aborted() { - debug!(target: "fetch", "fetch of {} aborted", self.url); - return Err(Error::Aborted) - } - match try_ready!(self.body.poll()) { - None => Ok(Async::Ready(None)), - Some(c) => { - if self.nread + c.len() > self.abort.max_size() { - debug!(target: "fetch", "size limit {:?} for {} exceeded", self.abort.max_size(), self.url); - return Err(Error::SizeLimit) - } - self.nread += c.len(); - Ok(Async::Ready(Some(c))) - } - } - } + fn poll(&mut self) -> futures::Poll, Self::Error> { + if self.abort.is_aborted() { + debug!(target: "fetch", "fetch of {} aborted", self.url); + return Err(Error::Aborted); + } + match try_ready!(self.body.poll()) { + None => Ok(Async::Ready(None)), + Some(c) => { + if self.nread + c.len() > self.abort.max_size() { + debug!(target: "fetch", "size limit {:?} for {} exceeded", self.abort.max_size(), self.url); + return Err(Error::SizeLimit); + } + self.nread += c.len(); + Ok(Async::Ready(Some(c))) + } + } + } } /// `BodyReader` serves as an adapter from async to sync I/O. @@ -499,456 +526,499 @@ impl Stream for Response { /// It implements `io::Read` by repedately waiting for the next `Chunk` /// of hyper's response `Body` which blocks the current thread. pub struct BodyReader { - chunk: hyper::Chunk, - body: Option, - abort: Abort, - offset: usize, - count: usize, + chunk: hyper::Chunk, + body: Option, + abort: Abort, + offset: usize, + count: usize, } impl BodyReader { - /// Create a new body reader for the given response. - pub fn new(r: Response) -> BodyReader { - BodyReader { - body: Some(r.body), - chunk: Default::default(), - abort: r.abort, - offset: 0, - count: 0, - } - } + /// Create a new body reader for the given response. + pub fn new(r: Response) -> BodyReader { + BodyReader { + body: Some(r.body), + chunk: Default::default(), + abort: r.abort, + offset: 0, + count: 0, + } + } } impl io::Read for BodyReader { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - let mut n = 0; - while self.body.is_some() { - // Can we still read from the current chunk? - if self.offset < self.chunk.len() { - let k = min(self.chunk.len() - self.offset, buf.len() - n); - if self.count + k > self.abort.max_size() { - debug!(target: "fetch", "size limit {:?} exceeded", self.abort.max_size()); - return Err(io::Error::new(io::ErrorKind::PermissionDenied, "size limit exceeded")) - } - let c = &self.chunk[self.offset .. self.offset + k]; - (&mut buf[n .. n + k]).copy_from_slice(c); - self.offset += k; - self.count += k; - n += k; - if n == buf.len() { - break - } - } else { - let body = self.body.take().expect("loop condition ensures `self.body` is always defined; qed"); - match body.into_future().wait() { // wait for next chunk - Err((e, _)) => { - error!(target: "fetch", "failed to read chunk: {}", e); - return Err(io::Error::new(io::ErrorKind::Other, "failed to read body chunk")) - } - Ok((None, _)) => break, // body is exhausted, break out of the loop - Ok((Some(c), b)) => { - self.body = Some(b); - self.chunk = c; - self.offset = 0 - } - } - } - } - Ok(n) - } + fn read(&mut self, buf: &mut [u8]) -> io::Result { + let mut n = 0; + while self.body.is_some() { + // Can we still read from the current chunk? + if self.offset < self.chunk.len() { + let k = min(self.chunk.len() - self.offset, buf.len() - n); + if self.count + k > self.abort.max_size() { + debug!(target: "fetch", "size limit {:?} exceeded", self.abort.max_size()); + return Err(io::Error::new( + io::ErrorKind::PermissionDenied, + "size limit exceeded", + )); + } + let c = &self.chunk[self.offset..self.offset + k]; + (&mut buf[n..n + k]).copy_from_slice(c); + self.offset += k; + self.count += k; + n += k; + if n == buf.len() { + break; + } + } else { + let body = self + .body + .take() + .expect("loop condition ensures `self.body` is always defined; qed"); + match body.into_future().wait() { + // wait for next chunk + Err((e, _)) => { + error!(target: "fetch", "failed to read chunk: {}", e); + return Err(io::Error::new( + io::ErrorKind::Other, + "failed to read body chunk", + )); + } + Ok((None, _)) => break, // body is exhausted, break out of the loop + Ok((Some(c), b)) => { + self.body = Some(b); + self.chunk = c; + self.offset = 0 + } + } + } + } + Ok(n) + } } /// Fetch error cases. #[derive(Debug)] pub enum Error { - /// Hyper gave us an error. - Hyper(hyper::Error), - /// A hyper header conversion error. - HyperHeaderToStrError(hyper::header::ToStrError), - /// An integer parsing error. - ParseInt(std::num::ParseIntError), - /// Some I/O error occured. - Io(io::Error), - /// Invalid URLs where attempted to parse. - Url(url::ParseError), - /// Calling code invoked `Abort::abort`. - Aborted, - /// Too many redirects have been encountered. - TooManyRedirects, - /// tokio-timer inner future gave us an error. - TokioTimeoutInnerVal(String), - /// tokio-timer gave us an error. - TokioTimer(Option), - /// The maximum duration was reached. - Timeout, - /// The response body is too large. - SizeLimit, - /// The background processing thread does not run. - BackgroundThreadDead, + /// Hyper gave us an error. + Hyper(hyper::Error), + /// A hyper header conversion error. + HyperHeaderToStrError(hyper::header::ToStrError), + /// An integer parsing error. + ParseInt(std::num::ParseIntError), + /// Some I/O error occured. + Io(io::Error), + /// Invalid URLs where attempted to parse. + Url(url::ParseError), + /// Calling code invoked `Abort::abort`. + Aborted, + /// Too many redirects have been encountered. + TooManyRedirects, + /// tokio-timer inner future gave us an error. + TokioTimeoutInnerVal(String), + /// tokio-timer gave us an error. + TokioTimer(Option), + /// The maximum duration was reached. + Timeout, + /// The response body is too large. + SizeLimit, + /// The background processing thread does not run. + BackgroundThreadDead, } impl fmt::Display for Error { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - match *self { - Error::Aborted => write!(fmt, "The request has been aborted."), - Error::Hyper(ref e) => write!(fmt, "{}", e), - Error::HyperHeaderToStrError(ref e) => write!(fmt, "{}", e), - Error::ParseInt(ref e) => write!(fmt, "{}", e), - Error::Url(ref e) => write!(fmt, "{}", e), - Error::Io(ref e) => write!(fmt, "{}", e), - Error::BackgroundThreadDead => write!(fmt, "background thread gond"), - Error::TooManyRedirects => write!(fmt, "too many redirects"), - Error::TokioTimeoutInnerVal(ref s) => write!(fmt, "tokio timer inner value error: {:?}", s), - Error::TokioTimer(ref e) => write!(fmt, "tokio timer error: {:?}", e), - Error::Timeout => write!(fmt, "request timed out"), - Error::SizeLimit => write!(fmt, "size limit reached"), - } - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + match *self { + Error::Aborted => write!(fmt, "The request has been aborted."), + Error::Hyper(ref e) => write!(fmt, "{}", e), + Error::HyperHeaderToStrError(ref e) => write!(fmt, "{}", e), + Error::ParseInt(ref e) => write!(fmt, "{}", e), + Error::Url(ref e) => write!(fmt, "{}", e), + Error::Io(ref e) => write!(fmt, "{}", e), + Error::BackgroundThreadDead => write!(fmt, "background thread gond"), + Error::TooManyRedirects => write!(fmt, "too many redirects"), + Error::TokioTimeoutInnerVal(ref s) => { + write!(fmt, "tokio timer inner value error: {:?}", s) + } + Error::TokioTimer(ref e) => write!(fmt, "tokio timer error: {:?}", e), + Error::Timeout => write!(fmt, "request timed out"), + Error::SizeLimit => write!(fmt, "size limit reached"), + } + } } impl ::std::error::Error for Error { - fn description(&self) -> &str { "Fetch client error" } - fn cause(&self) -> Option<&::std::error::Error> { None } + fn description(&self) -> &str { + "Fetch client error" + } + fn cause(&self) -> Option<&::std::error::Error> { + None + } } impl From for Error { - fn from(e: hyper::Error) -> Self { - Error::Hyper(e) - } + fn from(e: hyper::Error) -> Self { + Error::Hyper(e) + } } impl From for Error { - fn from(e: hyper::header::ToStrError) -> Self { - Error::HyperHeaderToStrError(e) - } + fn from(e: hyper::header::ToStrError) -> Self { + Error::HyperHeaderToStrError(e) + } } impl From for Error { - fn from(e: std::num::ParseIntError) -> Self { - Error::ParseInt(e) - } + fn from(e: std::num::ParseIntError) -> Self { + Error::ParseInt(e) + } } impl From for Error { - fn from(e: io::Error) -> Self { - Error::Io(e) - } + fn from(e: io::Error) -> Self { + Error::Io(e) + } } impl From for Error { - fn from(e: url::ParseError) -> Self { - Error::Url(e) - } + fn from(e: url::ParseError) -> Self { + Error::Url(e) + } } impl From> for Error { - fn from(e: tokio::timer::timeout::Error) -> Self { - if e.is_inner() { - Error::TokioTimeoutInnerVal(format!("{:?}", e.into_inner().unwrap())) - } else if e.is_elapsed() { - Error::Timeout - } else { - Error::TokioTimer(e.into_timer()) - } - } + fn from(e: tokio::timer::timeout::Error) -> Self { + if e.is_inner() { + Error::TokioTimeoutInnerVal(format!("{:?}", e.into_inner().unwrap())) + } else if e.is_elapsed() { + Error::Timeout + } else { + Error::TokioTimer(e.into_timer()) + } + } } impl From for Error { - fn from(e: tokio::timer::Error) -> Self { - Error::TokioTimer(Some(e)) - } + fn from(e: tokio::timer::Error) -> Self { + Error::TokioTimer(Some(e)) + } } #[cfg(test)] mod test { - use super::*; - use futures::future; - use futures::sync::oneshot; - use hyper::{ - StatusCode, - service::Service, - }; - use tokio::timer::Delay; - use tokio::runtime::current_thread::Runtime; - use std::io::Read; - use std::net::SocketAddr; + use super::*; + use futures::{future, sync::oneshot}; + use hyper::{service::Service, StatusCode}; + use std::{io::Read, net::SocketAddr}; + use tokio::{runtime::current_thread::Runtime, timer::Delay}; - const ADDRESS: &str = "127.0.0.1:0"; + const ADDRESS: &str = "127.0.0.1:0"; - #[test] - fn it_should_fetch() { - let server = TestServer::run(); - let client = Client::new(4).unwrap(); - let mut runtime = Runtime::new().unwrap(); + #[test] + fn it_should_fetch() { + let server = TestServer::run(); + let client = Client::new(4).unwrap(); + let mut runtime = Runtime::new().unwrap(); - let future = client.get(&format!("http://{}?123", server.addr()), Abort::default()) - .map(|resp| { - assert!(resp.is_success()); - resp - }) - .map(|resp| resp.concat2()) - .flatten() - .map(|body| assert_eq!(&body[..], b"123")) - .map_err(|err| panic!(err)); + let future = client + .get(&format!("http://{}?123", server.addr()), Abort::default()) + .map(|resp| { + assert!(resp.is_success()); + resp + }) + .map(|resp| resp.concat2()) + .flatten() + .map(|body| assert_eq!(&body[..], b"123")) + .map_err(|err| panic!(err)); - runtime.block_on(future).unwrap(); - } + runtime.block_on(future).unwrap(); + } - #[test] - fn it_should_fetch_in_light_mode() { - let server = TestServer::run(); - let client = Client::new(1).unwrap(); - let mut runtime = Runtime::new().unwrap(); + #[test] + fn it_should_fetch_in_light_mode() { + let server = TestServer::run(); + let client = Client::new(1).unwrap(); + let mut runtime = Runtime::new().unwrap(); - let future = client.get(&format!("http://{}?123", server.addr()), Abort::default()) - .map(|resp| { - assert!(resp.is_success()); - resp - }) - .map(|resp| resp.concat2()) - .flatten() - .map(|body| assert_eq!(&body[..], b"123")) - .map_err(|err| panic!(err)); + let future = client + .get(&format!("http://{}?123", server.addr()), Abort::default()) + .map(|resp| { + assert!(resp.is_success()); + resp + }) + .map(|resp| resp.concat2()) + .flatten() + .map(|body| assert_eq!(&body[..], b"123")) + .map_err(|err| panic!(err)); - runtime.block_on(future).unwrap(); - } + runtime.block_on(future).unwrap(); + } - #[test] - fn it_should_timeout() { - let server = TestServer::run(); - let client = Client::new(4).unwrap(); - let mut runtime = Runtime::new().unwrap(); + #[test] + fn it_should_timeout() { + let server = TestServer::run(); + let client = Client::new(4).unwrap(); + let mut runtime = Runtime::new().unwrap(); - let abort = Abort::default().with_max_duration(Duration::from_secs(1)); + let abort = Abort::default().with_max_duration(Duration::from_secs(1)); - let future = client.get(&format!("http://{}/delay?3", server.addr()), abort) - .then(|res| { - match res { - Err(Error::Timeout) => Ok::<_, ()>(()), - other => panic!("expected timeout, got {:?}", other), - } - }); + let future = client + .get(&format!("http://{}/delay?3", server.addr()), abort) + .then(|res| match res { + Err(Error::Timeout) => Ok::<_, ()>(()), + other => panic!("expected timeout, got {:?}", other), + }); - runtime.block_on(future).unwrap(); - } + runtime.block_on(future).unwrap(); + } - #[test] - fn it_should_follow_redirects() { - let server = TestServer::run(); - let client = Client::new(4).unwrap(); - let mut runtime = Runtime::new().unwrap(); + #[test] + fn it_should_follow_redirects() { + let server = TestServer::run(); + let client = Client::new(4).unwrap(); + let mut runtime = Runtime::new().unwrap(); - let abort = Abort::default(); + let abort = Abort::default(); - let future = client.get(&format!("http://{}/redirect?http://{}/", server.addr(), server.addr()), abort) - .and_then(|resp| { - if resp.is_success() { Ok(()) } else { panic!("Response unsuccessful") } - }); + let future = client + .get( + &format!( + "http://{}/redirect?http://{}/", + server.addr(), + server.addr() + ), + abort, + ) + .and_then(|resp| { + if resp.is_success() { + Ok(()) + } else { + panic!("Response unsuccessful") + } + }); - runtime.block_on(future).unwrap(); - } + runtime.block_on(future).unwrap(); + } - #[test] - fn it_should_follow_relative_redirects() { - let server = TestServer::run(); - let client = Client::new(4).unwrap(); - let mut runtime = Runtime::new().unwrap(); + #[test] + fn it_should_follow_relative_redirects() { + let server = TestServer::run(); + let client = Client::new(4).unwrap(); + let mut runtime = Runtime::new().unwrap(); - let abort = Abort::default().with_max_redirects(4); - let future = client.get(&format!("http://{}/redirect?/", server.addr()), abort) - .and_then(|resp| { - if resp.is_success() { Ok(()) } else { panic!("Response unsuccessful") } - }); + let abort = Abort::default().with_max_redirects(4); + let future = client + .get(&format!("http://{}/redirect?/", server.addr()), abort) + .and_then(|resp| { + if resp.is_success() { + Ok(()) + } else { + panic!("Response unsuccessful") + } + }); - runtime.block_on(future).unwrap(); - } + runtime.block_on(future).unwrap(); + } - #[test] - fn it_should_not_follow_too_many_redirects() { - let server = TestServer::run(); - let client = Client::new(4).unwrap(); - let mut runtime = Runtime::new().unwrap(); + #[test] + fn it_should_not_follow_too_many_redirects() { + let server = TestServer::run(); + let client = Client::new(4).unwrap(); + let mut runtime = Runtime::new().unwrap(); - let abort = Abort::default().with_max_redirects(3); - let future = client.get(&format!("http://{}/loop", server.addr()), abort) - .then(|res| { - match res { - Err(Error::TooManyRedirects) => Ok::<_, ()>(()), - other => panic!("expected too many redirects error, got {:?}", other) - } - }); + let abort = Abort::default().with_max_redirects(3); + let future = client + .get(&format!("http://{}/loop", server.addr()), abort) + .then(|res| match res { + Err(Error::TooManyRedirects) => Ok::<_, ()>(()), + other => panic!("expected too many redirects error, got {:?}", other), + }); - runtime.block_on(future).unwrap(); - } + runtime.block_on(future).unwrap(); + } - #[test] - fn it_should_read_data() { - let server = TestServer::run(); - let client = Client::new(4).unwrap(); - let mut runtime = Runtime::new().unwrap(); + #[test] + fn it_should_read_data() { + let server = TestServer::run(); + let client = Client::new(4).unwrap(); + let mut runtime = Runtime::new().unwrap(); - let abort = Abort::default(); - let future = client.get(&format!("http://{}?abcdefghijklmnopqrstuvwxyz", server.addr()), abort) - .and_then(|resp| { - if resp.is_success() { Ok(resp) } else { panic!("Response unsuccessful") } - }) - .map(|resp| resp.concat2()) - .flatten() - .map(|body| assert_eq!(&body[..], b"abcdefghijklmnopqrstuvwxyz")); + let abort = Abort::default(); + let future = client + .get( + &format!("http://{}?abcdefghijklmnopqrstuvwxyz", server.addr()), + abort, + ) + .and_then(|resp| { + if resp.is_success() { + Ok(resp) + } else { + panic!("Response unsuccessful") + } + }) + .map(|resp| resp.concat2()) + .flatten() + .map(|body| assert_eq!(&body[..], b"abcdefghijklmnopqrstuvwxyz")); - runtime.block_on(future).unwrap(); - } + runtime.block_on(future).unwrap(); + } - #[test] - fn it_should_not_read_too_much_data() { - let server = TestServer::run(); - let client = Client::new(4).unwrap(); - let mut runtime = Runtime::new().unwrap(); + #[test] + fn it_should_not_read_too_much_data() { + let server = TestServer::run(); + let client = Client::new(4).unwrap(); + let mut runtime = Runtime::new().unwrap(); - let abort = Abort::default().with_max_size(3); - let future = client.get(&format!("http://{}/?1234", server.addr()), abort) - .and_then(|resp| { - if resp.is_success() { Ok(resp) } else { panic!("Response unsuccessful") } - }) - .map(|resp| resp.concat2()) - .flatten() - .then(|body| { - match body { - Err(Error::SizeLimit) => Ok::<_, ()>(()), - other => panic!("expected size limit error, got {:?}", other), - } - }); + let abort = Abort::default().with_max_size(3); + let future = client + .get(&format!("http://{}/?1234", server.addr()), abort) + .and_then(|resp| { + if resp.is_success() { + Ok(resp) + } else { + panic!("Response unsuccessful") + } + }) + .map(|resp| resp.concat2()) + .flatten() + .then(|body| match body { + Err(Error::SizeLimit) => Ok::<_, ()>(()), + other => panic!("expected size limit error, got {:?}", other), + }); - runtime.block_on(future).unwrap(); - } + runtime.block_on(future).unwrap(); + } - #[test] - fn it_should_not_read_too_much_data_sync() { - let server = TestServer::run(); - let client = Client::new(4).unwrap(); - let mut runtime = Runtime::new().unwrap(); + #[test] + fn it_should_not_read_too_much_data_sync() { + let server = TestServer::run(); + let client = Client::new(4).unwrap(); + let mut runtime = Runtime::new().unwrap(); - // let abort = Abort::default().with_max_size(3); - // let resp = client.get(&format!("http://{}/?1234", server.addr()), abort).wait().unwrap(); - // assert!(resp.is_success()); - // let mut buffer = Vec::new(); - // let mut reader = BodyReader::new(resp); - // match reader.read_to_end(&mut buffer) { - // Err(ref e) if e.kind() == io::ErrorKind::PermissionDenied => {} - // other => panic!("expected size limit error, got {:?}", other) - // } + // let abort = Abort::default().with_max_size(3); + // let resp = client.get(&format!("http://{}/?1234", server.addr()), abort).wait().unwrap(); + // assert!(resp.is_success()); + // let mut buffer = Vec::new(); + // let mut reader = BodyReader::new(resp); + // match reader.read_to_end(&mut buffer) { + // Err(ref e) if e.kind() == io::ErrorKind::PermissionDenied => {} + // other => panic!("expected size limit error, got {:?}", other) + // } - // FIXME (c0gent): The prior version of this test (pre-hyper-0.12, - // commented out above) is not possible to recreate. It relied on an - // apparent bug in `Client::background_thread` which suppressed the - // `SizeLimit` error from occurring. This is due to the headers - // collection not returning a value for content length when queried. - // The precise reason why this was happening is unclear. + // FIXME (c0gent): The prior version of this test (pre-hyper-0.12, + // commented out above) is not possible to recreate. It relied on an + // apparent bug in `Client::background_thread` which suppressed the + // `SizeLimit` error from occurring. This is due to the headers + // collection not returning a value for content length when queried. + // The precise reason why this was happening is unclear. - let abort = Abort::default().with_max_size(3); - let future = client.get(&format!("http://{}/?1234", server.addr()), abort) - .and_then(|resp| { - assert_eq!(true, false, "Unreachable. (see FIXME note)"); - assert!(resp.is_success()); - let mut buffer = Vec::new(); - let mut reader = BodyReader::new(resp); - match reader.read_to_end(&mut buffer) { - Err(ref e) if e.kind() == io::ErrorKind::PermissionDenied => Ok(()), - other => panic!("expected size limit error, got {:?}", other) - } - }); + let abort = Abort::default().with_max_size(3); + let future = client + .get(&format!("http://{}/?1234", server.addr()), abort) + .and_then(|resp| { + assert_eq!(true, false, "Unreachable. (see FIXME note)"); + assert!(resp.is_success()); + let mut buffer = Vec::new(); + let mut reader = BodyReader::new(resp); + match reader.read_to_end(&mut buffer) { + Err(ref e) if e.kind() == io::ErrorKind::PermissionDenied => Ok(()), + other => panic!("expected size limit error, got {:?}", other), + } + }); - // FIXME: This simply demonstrates the above point. - match runtime.block_on(future) { - Err(Error::SizeLimit) => {}, - other => panic!("Expected `Error::SizeLimit`, got: {:?}", other), - } - } + // FIXME: This simply demonstrates the above point. + match runtime.block_on(future) { + Err(Error::SizeLimit) => {} + other => panic!("Expected `Error::SizeLimit`, got: {:?}", other), + } + } - struct TestServer; + struct TestServer; - impl Service for TestServer { - type ReqBody = hyper::Body; - type ResBody = hyper::Body; - type Error = Error; - type Future = Box, Error=Self::Error> + Send + 'static>; + impl Service for TestServer { + type ReqBody = hyper::Body; + type ResBody = hyper::Body; + type Error = Error; + type Future = Box< + Future, Error = Self::Error> + Send + 'static, + >; - fn call(&mut self, req: hyper::Request) -> Self::Future { - match req.uri().path() { - "/" => { - let body = req.uri().query().unwrap_or("").to_string(); - let res = hyper::Response::new(body.into()); - Box::new(future::ok(res)) - } - "/redirect" => { - let loc = req.uri().query().unwrap_or("/").to_string(); - let res = hyper::Response::builder() - .status(StatusCode::MOVED_PERMANENTLY) - .header(hyper::header::LOCATION, loc) - .body(hyper::Body::empty()) - .expect("Unable to create response"); - Box::new(future::ok(res)) - } - "/loop" => { - let res = hyper::Response::builder() - .status(StatusCode::MOVED_PERMANENTLY) - .header(hyper::header::LOCATION, "/loop") - .body(hyper::Body::empty()) - .expect("Unable to create response"); - Box::new(future::ok(res)) - } - "/delay" => { - let dur = Duration::from_secs(req.uri().query().unwrap_or("0").parse().unwrap()); - let delayed_res = Delay::new(std::time::Instant::now() + dur) - .and_then(|_| Ok::<_, _>(hyper::Response::new(hyper::Body::empty()))) - .from_err(); - Box::new(delayed_res) - } - _ => { - let res = hyper::Response::builder() - .status(StatusCode::NOT_FOUND) - .body(hyper::Body::empty()) - .expect("Unable to create response"); - Box::new(future::ok(res)) - } - } - } - } + fn call(&mut self, req: hyper::Request) -> Self::Future { + match req.uri().path() { + "/" => { + let body = req.uri().query().unwrap_or("").to_string(); + let res = hyper::Response::new(body.into()); + Box::new(future::ok(res)) + } + "/redirect" => { + let loc = req.uri().query().unwrap_or("/").to_string(); + let res = hyper::Response::builder() + .status(StatusCode::MOVED_PERMANENTLY) + .header(hyper::header::LOCATION, loc) + .body(hyper::Body::empty()) + .expect("Unable to create response"); + Box::new(future::ok(res)) + } + "/loop" => { + let res = hyper::Response::builder() + .status(StatusCode::MOVED_PERMANENTLY) + .header(hyper::header::LOCATION, "/loop") + .body(hyper::Body::empty()) + .expect("Unable to create response"); + Box::new(future::ok(res)) + } + "/delay" => { + let dur = + Duration::from_secs(req.uri().query().unwrap_or("0").parse().unwrap()); + let delayed_res = Delay::new(std::time::Instant::now() + dur) + .and_then(|_| Ok::<_, _>(hyper::Response::new(hyper::Body::empty()))) + .from_err(); + Box::new(delayed_res) + } + _ => { + let res = hyper::Response::builder() + .status(StatusCode::NOT_FOUND) + .body(hyper::Body::empty()) + .expect("Unable to create response"); + Box::new(future::ok(res)) + } + } + } + } - impl TestServer { - fn run() -> Handle { - let (tx_start, rx_start) = std::sync::mpsc::sync_channel(1); - let (tx_end, rx_end) = oneshot::channel(); - let rx_end_fut = rx_end.map(|_| ()).map_err(|_| ()); - thread::spawn(move || { - let addr = ADDRESS.parse().unwrap(); + impl TestServer { + fn run() -> Handle { + let (tx_start, rx_start) = std::sync::mpsc::sync_channel(1); + let (tx_end, rx_end) = oneshot::channel(); + let rx_end_fut = rx_end.map(|_| ()).map_err(|_| ()); + thread::spawn(move || { + let addr = ADDRESS.parse().unwrap(); - let server = hyper::server::Server::bind(&addr) - .serve(|| future::ok::<_, hyper::Error>(TestServer)); + let server = hyper::server::Server::bind(&addr) + .serve(|| future::ok::<_, hyper::Error>(TestServer)); - tx_start.send(server.local_addr()).unwrap_or(()); + tx_start.send(server.local_addr()).unwrap_or(()); - tokio::run( - server.with_graceful_shutdown(rx_end_fut) - .map_err(|e| panic!("server error: {}", e)) - ); - }); + tokio::run( + server + .with_graceful_shutdown(rx_end_fut) + .map_err(|e| panic!("server error: {}", e)), + ); + }); - Handle(rx_start.recv().unwrap(), Some(tx_end)) - } - } + Handle(rx_start.recv().unwrap(), Some(tx_end)) + } + } - struct Handle(SocketAddr, Option>); + struct Handle(SocketAddr, Option>); - impl Handle { - fn addr(&self) -> SocketAddr { - self.0 - } - } + impl Handle { + fn addr(&self) -> SocketAddr { + self.0 + } + } - impl Drop for Handle { - fn drop(&mut self) { - self.1.take().unwrap().send(()).unwrap(); - } - } + impl Drop for Handle { + fn drop(&mut self) { + self.1.take().unwrap().send(()).unwrap(); + } + } } diff --git a/util/fetch/src/lib.rs b/util/fetch/src/lib.rs index eb172d2c6..e444d1fff 100644 --- a/util/fetch/src/lib.rs +++ b/util/fetch/src/lib.rs @@ -24,17 +24,17 @@ extern crate log; #[macro_use] extern crate futures; +extern crate http; extern crate hyper; extern crate hyper_rustls; -extern crate http; +extern crate bytes; extern crate tokio; extern crate url; -extern crate bytes; /// Fetch client implementation. pub mod client; -pub use url::Url; -pub use self::client::{Client, Fetch, Error, Response, Request, Abort, BodyReader}; +pub use self::client::{Abort, BodyReader, Client, Error, Fetch, Request, Response}; pub use hyper::Method; +pub use url::Url; diff --git a/util/io/src/lib.rs b/util/io/src/lib.rs index 997edd7c4..895e0988f 100644 --- a/util/io/src/lib.rs +++ b/util/io/src/lib.rs @@ -73,15 +73,15 @@ extern crate mio; #[macro_use] extern crate log as rlog; -extern crate slab; extern crate crossbeam_deque as deque; -extern crate parking_lot; -extern crate num_cpus; -extern crate timer; extern crate fnv; -extern crate time; -extern crate tokio; extern crate futures; +extern crate num_cpus; +extern crate parking_lot; +extern crate slab; +extern crate time; +extern crate timer; +extern crate tokio; #[cfg(feature = "mio")] mod service_mio; @@ -90,191 +90,220 @@ mod service_non_mio; #[cfg(feature = "mio")] mod worker; -use std::cell::Cell; -use std::{fmt, error}; #[cfg(feature = "mio")] use mio::deprecated::{EventLoop, NotifyError}; #[cfg(feature = "mio")] use mio::Token; +use std::{cell::Cell, error, fmt}; thread_local! { - /// Stack size - /// Should be modified if it is changed in Rust since it is no way - /// to know or get it - pub static LOCAL_STACK_SIZE: Cell = Cell::new(::std::env::var("RUST_MIN_STACK").ok().and_then(|s| s.parse().ok()).unwrap_or(2 * 1024 * 1024)); + /// Stack size + /// Should be modified if it is changed in Rust since it is no way + /// to know or get it + pub static LOCAL_STACK_SIZE: Cell = Cell::new(::std::env::var("RUST_MIN_STACK").ok().and_then(|s| s.parse().ok()).unwrap_or(2 * 1024 * 1024)); } #[derive(Debug)] /// IO Error pub enum IoError { - /// Low level error from mio crate - #[cfg(feature = "mio")] - Mio(::std::io::Error), - /// Error concerning the Rust standard library's IO subsystem. - StdIo(::std::io::Error), + /// Low level error from mio crate + #[cfg(feature = "mio")] + Mio(::std::io::Error), + /// Error concerning the Rust standard library's IO subsystem. + StdIo(::std::io::Error), } impl fmt::Display for IoError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // just defer to the std implementation for now. - // we can refine the formatting when more variants are added. - match *self { - #[cfg(feature = "mio")] - IoError::Mio(ref std_err) => std_err.fmt(f), - IoError::StdIo(ref std_err) => std_err.fmt(f), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // just defer to the std implementation for now. + // we can refine the formatting when more variants are added. + match *self { + #[cfg(feature = "mio")] + IoError::Mio(ref std_err) => std_err.fmt(f), + IoError::StdIo(ref std_err) => std_err.fmt(f), + } + } } impl error::Error for IoError { - fn description(&self) -> &str { - "IO error" - } + fn description(&self) -> &str { + "IO error" + } } impl From<::std::io::Error> for IoError { - fn from(err: ::std::io::Error) -> IoError { - IoError::StdIo(err) - } + fn from(err: ::std::io::Error) -> IoError { + IoError::StdIo(err) + } } #[cfg(feature = "mio")] -impl From>> for IoError where Message: Send { - fn from(_err: NotifyError>) -> IoError { - IoError::Mio(::std::io::Error::new(::std::io::ErrorKind::ConnectionAborted, "Network IO notification error")) - } +impl From>> for IoError +where + Message: Send, +{ + fn from(_err: NotifyError>) -> IoError { + IoError::Mio(::std::io::Error::new( + ::std::io::ErrorKind::ConnectionAborted, + "Network IO notification error", + )) + } } /// Generic IO handler. /// All the handler function are called from within IO event loop. /// `Message` type is used as notification data -pub trait IoHandler: Send + Sync where Message: Send + Sync + 'static { - /// Initialize the handler - fn initialize(&self, _io: &IoContext) {} - /// Timer function called after a timeout created with `HandlerIo::timeout`. - fn timeout(&self, _io: &IoContext, _timer: TimerToken) {} - /// Called when a broadcasted message is received. The message can only be sent from a different IO handler. - fn message(&self, _io: &IoContext, _message: &Message) {} - /// Called when an IO stream gets closed - #[cfg(feature = "mio")] - fn stream_hup(&self, _io: &IoContext, _stream: StreamToken) {} - /// Called when an IO stream can be read from - #[cfg(feature = "mio")] - fn stream_readable(&self, _io: &IoContext, _stream: StreamToken) {} - /// Called when an IO stream can be written to - #[cfg(feature = "mio")] - fn stream_writable(&self, _io: &IoContext, _stream: StreamToken) {} - /// Register a new stream with the event loop - #[cfg(feature = "mio")] - fn register_stream(&self, _stream: StreamToken, _reg: Token, _event_loop: &mut EventLoop>) {} - /// Re-register a stream with the event loop - #[cfg(feature = "mio")] - fn update_stream(&self, _stream: StreamToken, _reg: Token, _event_loop: &mut EventLoop>) {} - /// Deregister a stream. Called whenstream is removed from event loop - #[cfg(feature = "mio")] - fn deregister_stream(&self, _stream: StreamToken, _event_loop: &mut EventLoop>) {} +pub trait IoHandler: Send + Sync +where + Message: Send + Sync + 'static, +{ + /// Initialize the handler + fn initialize(&self, _io: &IoContext) {} + /// Timer function called after a timeout created with `HandlerIo::timeout`. + fn timeout(&self, _io: &IoContext, _timer: TimerToken) {} + /// Called when a broadcasted message is received. The message can only be sent from a different IO handler. + fn message(&self, _io: &IoContext, _message: &Message) {} + /// Called when an IO stream gets closed + #[cfg(feature = "mio")] + fn stream_hup(&self, _io: &IoContext, _stream: StreamToken) {} + /// Called when an IO stream can be read from + #[cfg(feature = "mio")] + fn stream_readable(&self, _io: &IoContext, _stream: StreamToken) {} + /// Called when an IO stream can be written to + #[cfg(feature = "mio")] + fn stream_writable(&self, _io: &IoContext, _stream: StreamToken) {} + /// Register a new stream with the event loop + #[cfg(feature = "mio")] + fn register_stream( + &self, + _stream: StreamToken, + _reg: Token, + _event_loop: &mut EventLoop>, + ) { + } + /// Re-register a stream with the event loop + #[cfg(feature = "mio")] + fn update_stream( + &self, + _stream: StreamToken, + _reg: Token, + _event_loop: &mut EventLoop>, + ) { + } + /// Deregister a stream. Called whenstream is removed from event loop + #[cfg(feature = "mio")] + fn deregister_stream( + &self, + _stream: StreamToken, + _event_loop: &mut EventLoop>, + ) { + } } #[cfg(feature = "mio")] -pub use service_mio::{TimerToken, StreamToken, IoContext, IoService, IoChannel, IoManager, TOKENS_PER_HANDLER}; +pub use service_mio::{ + IoChannel, IoContext, IoManager, IoService, StreamToken, TimerToken, TOKENS_PER_HANDLER, +}; #[cfg(not(feature = "mio"))] -pub use service_non_mio::{TimerToken, IoContext, IoService, IoChannel, TOKENS_PER_HANDLER}; +pub use service_non_mio::{IoChannel, IoContext, IoService, TimerToken, TOKENS_PER_HANDLER}; #[cfg(test)] mod tests { - use std::sync::Arc; - use std::sync::atomic; - use std::thread; - use std::time::Duration; - use super::*; + use super::*; + use std::{ + sync::{atomic, Arc}, + thread, + time::Duration, + }; - // Mio's behaviour is too unstable for this test. Sometimes we have to wait a few milliseconds, - // sometimes more than 5 seconds for the message to arrive. - // Therefore we ignore this test in order to not have spurious failure when running continuous - // integration. - #[test] - #[cfg_attr(feature = "mio", ignore)] - fn send_message_to_handler() { - struct MyHandler(atomic::AtomicBool); + // Mio's behaviour is too unstable for this test. Sometimes we have to wait a few milliseconds, + // sometimes more than 5 seconds for the message to arrive. + // Therefore we ignore this test in order to not have spurious failure when running continuous + // integration. + #[test] + #[cfg_attr(feature = "mio", ignore)] + fn send_message_to_handler() { + struct MyHandler(atomic::AtomicBool); - #[derive(Clone)] - struct MyMessage { - data: u32 - } + #[derive(Clone)] + struct MyMessage { + data: u32, + } - impl IoHandler for MyHandler { - fn message(&self, _io: &IoContext, message: &MyMessage) { - assert_eq!(message.data, 5); - self.0.store(true, atomic::Ordering::SeqCst); - } - } + impl IoHandler for MyHandler { + fn message(&self, _io: &IoContext, message: &MyMessage) { + assert_eq!(message.data, 5); + self.0.store(true, atomic::Ordering::SeqCst); + } + } - let handler = Arc::new(MyHandler(atomic::AtomicBool::new(false))); + let handler = Arc::new(MyHandler(atomic::AtomicBool::new(false))); - let service = IoService::::start().expect("Error creating network service"); - service.register_handler(handler.clone()).unwrap(); + let service = IoService::::start().expect("Error creating network service"); + service.register_handler(handler.clone()).unwrap(); - service.send_message(MyMessage { data: 5 }).unwrap(); + service.send_message(MyMessage { data: 5 }).unwrap(); - thread::sleep(Duration::from_secs(1)); - assert!(handler.0.load(atomic::Ordering::SeqCst)); - } + thread::sleep(Duration::from_secs(1)); + assert!(handler.0.load(atomic::Ordering::SeqCst)); + } - #[test] - fn timeout_working() { - struct MyHandler(atomic::AtomicBool); + #[test] + fn timeout_working() { + struct MyHandler(atomic::AtomicBool); - #[derive(Clone)] - struct MyMessage { - data: u32 - } + #[derive(Clone)] + struct MyMessage { + data: u32, + } - impl IoHandler for MyHandler { - fn initialize(&self, io: &IoContext) { - io.register_timer_once(1234, Duration::from_millis(500)).unwrap(); - } + impl IoHandler for MyHandler { + fn initialize(&self, io: &IoContext) { + io.register_timer_once(1234, Duration::from_millis(500)) + .unwrap(); + } - fn timeout(&self, _io: &IoContext, timer: TimerToken) { - assert_eq!(timer, 1234); - assert!(!self.0.swap(true, atomic::Ordering::SeqCst)); - } - } + fn timeout(&self, _io: &IoContext, timer: TimerToken) { + assert_eq!(timer, 1234); + assert!(!self.0.swap(true, atomic::Ordering::SeqCst)); + } + } - let handler = Arc::new(MyHandler(atomic::AtomicBool::new(false))); + let handler = Arc::new(MyHandler(atomic::AtomicBool::new(false))); - let service = IoService::::start().expect("Error creating network service"); - service.register_handler(handler.clone()).unwrap(); + let service = IoService::::start().expect("Error creating network service"); + service.register_handler(handler.clone()).unwrap(); - thread::sleep(Duration::from_secs(2)); - assert!(handler.0.load(atomic::Ordering::SeqCst)); - } + thread::sleep(Duration::from_secs(2)); + assert!(handler.0.load(atomic::Ordering::SeqCst)); + } - #[test] - fn multi_timeout_working() { - struct MyHandler(atomic::AtomicUsize); + #[test] + fn multi_timeout_working() { + struct MyHandler(atomic::AtomicUsize); - #[derive(Clone)] - struct MyMessage { - data: u32 - } + #[derive(Clone)] + struct MyMessage { + data: u32, + } - impl IoHandler for MyHandler { - fn initialize(&self, io: &IoContext) { - io.register_timer(1234, Duration::from_millis(500)).unwrap(); - } + impl IoHandler for MyHandler { + fn initialize(&self, io: &IoContext) { + io.register_timer(1234, Duration::from_millis(500)).unwrap(); + } - fn timeout(&self, _io: &IoContext, timer: TimerToken) { - assert_eq!(timer, 1234); - self.0.fetch_add(1, atomic::Ordering::SeqCst); - } - } + fn timeout(&self, _io: &IoContext, timer: TimerToken) { + assert_eq!(timer, 1234); + self.0.fetch_add(1, atomic::Ordering::SeqCst); + } + } - let handler = Arc::new(MyHandler(atomic::AtomicUsize::new(0))); + let handler = Arc::new(MyHandler(atomic::AtomicUsize::new(0))); - let service = IoService::::start().expect("Error creating network service"); - service.register_handler(handler.clone()).unwrap(); + let service = IoService::::start().expect("Error creating network service"); + service.register_handler(handler.clone()).unwrap(); - thread::sleep(Duration::from_secs(2)); - assert!(handler.0.load(atomic::Ordering::SeqCst) >= 2); - } + thread::sleep(Duration::from_secs(2)); + assert!(handler.0.load(atomic::Ordering::SeqCst) >= 2); + } } diff --git a/util/io/src/service_mio.rs b/util/io/src/service_mio.rs index f1f19bfc2..1351b1c97 100644 --- a/util/io/src/service_mio.rs +++ b/util/io/src/service_mio.rs @@ -14,18 +14,23 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::{Arc, Weak}; -use std::thread::{self, JoinHandle}; -use std::collections::HashMap; -use mio::*; -use mio::timer::{Timeout}; -use mio::deprecated::{EventLoop, Handler, Sender, EventLoopBuilder}; use deque; +use mio::{ + deprecated::{EventLoop, EventLoopBuilder, Handler, Sender}, + timer::Timeout, + *, +}; +use parking_lot::{Condvar, Mutex, RwLock}; use slab::Slab; -use {IoError, IoHandler}; -use worker::{Worker, Work, WorkType}; -use parking_lot::{Condvar, RwLock, Mutex}; -use std::time::Duration; +use std::{ + collections::HashMap, + sync::{Arc, Weak}, + thread::{self, JoinHandle}, + time::Duration, +}; +use worker::{Work, WorkType, Worker}; +use IoError; +use IoHandler; /// Timer ID pub type TimerToken = usize; @@ -40,456 +45,559 @@ const MAX_HANDLERS: usize = 8; /// Messages used to communicate with the event loop from other threads. #[derive(Clone)] -pub enum IoMessage where Message: Send + Sized { - /// Shutdown the event loop - Shutdown, - /// Register a new protocol handler. - AddHandler { - handler: Arc+Send>, - }, - RemoveHandler { - handler_id: HandlerId, - }, - AddTimer { - handler_id: HandlerId, - token: TimerToken, - delay: Duration, - once: bool, - }, - RemoveTimer { - handler_id: HandlerId, - token: TimerToken, - }, - RegisterStream { - handler_id: HandlerId, - token: StreamToken, - }, - DeregisterStream { - handler_id: HandlerId, - token: StreamToken, - }, - UpdateStreamRegistration { - handler_id: HandlerId, - token: StreamToken, - }, - /// Broadcast a message across all protocol handlers. - UserMessage(Arc) +pub enum IoMessage +where + Message: Send + Sized, +{ + /// Shutdown the event loop + Shutdown, + /// Register a new protocol handler. + AddHandler { + handler: Arc + Send>, + }, + RemoveHandler { + handler_id: HandlerId, + }, + AddTimer { + handler_id: HandlerId, + token: TimerToken, + delay: Duration, + once: bool, + }, + RemoveTimer { + handler_id: HandlerId, + token: TimerToken, + }, + RegisterStream { + handler_id: HandlerId, + token: StreamToken, + }, + DeregisterStream { + handler_id: HandlerId, + token: StreamToken, + }, + UpdateStreamRegistration { + handler_id: HandlerId, + token: StreamToken, + }, + /// Broadcast a message across all protocol handlers. + UserMessage(Arc), } /// IO access point. This is passed to all IO handlers and provides an interface to the IO subsystem. -pub struct IoContext where Message: Send + Sync + 'static { - channel: IoChannel, - handler: HandlerId, +pub struct IoContext +where + Message: Send + Sync + 'static, +{ + channel: IoChannel, + handler: HandlerId, } -impl IoContext where Message: Send + Sync + 'static { - /// Create a new IO access point. Takes references to all the data that can be updated within the IO handler. - pub fn new(channel: IoChannel, handler: HandlerId) -> IoContext { - IoContext { - handler: handler, - channel: channel, - } - } +impl IoContext +where + Message: Send + Sync + 'static, +{ + /// Create a new IO access point. Takes references to all the data that can be updated within the IO handler. + pub fn new(channel: IoChannel, handler: HandlerId) -> IoContext { + IoContext { + handler: handler, + channel: channel, + } + } - /// Register a new recurring IO timer. 'IoHandler::timeout' will be called with the token. - pub fn register_timer(&self, token: TimerToken, delay: Duration) -> Result<(), IoError> { - self.channel.send_io(IoMessage::AddTimer { - token, - delay, - handler_id: self.handler, - once: false, - })?; - Ok(()) - } + /// Register a new recurring IO timer. 'IoHandler::timeout' will be called with the token. + pub fn register_timer(&self, token: TimerToken, delay: Duration) -> Result<(), IoError> { + self.channel.send_io(IoMessage::AddTimer { + token, + delay, + handler_id: self.handler, + once: false, + })?; + Ok(()) + } - /// Register a new IO timer once. 'IoHandler::timeout' will be called with the token. - pub fn register_timer_once(&self, token: TimerToken, delay: Duration) -> Result<(), IoError> { - self.channel.send_io(IoMessage::AddTimer { - token, - delay, - handler_id: self.handler, - once: true, - })?; - Ok(()) - } + /// Register a new IO timer once. 'IoHandler::timeout' will be called with the token. + pub fn register_timer_once(&self, token: TimerToken, delay: Duration) -> Result<(), IoError> { + self.channel.send_io(IoMessage::AddTimer { + token, + delay, + handler_id: self.handler, + once: true, + })?; + Ok(()) + } - /// Delete a timer. - pub fn clear_timer(&self, token: TimerToken) -> Result<(), IoError> { - self.channel.send_io(IoMessage::RemoveTimer { - token: token, - handler_id: self.handler, - })?; - Ok(()) - } + /// Delete a timer. + pub fn clear_timer(&self, token: TimerToken) -> Result<(), IoError> { + self.channel.send_io(IoMessage::RemoveTimer { + token: token, + handler_id: self.handler, + })?; + Ok(()) + } - /// Register a new IO stream. - pub fn register_stream(&self, token: StreamToken) -> Result<(), IoError> { - self.channel.send_io(IoMessage::RegisterStream { - token: token, - handler_id: self.handler, - })?; - Ok(()) - } + /// Register a new IO stream. + pub fn register_stream(&self, token: StreamToken) -> Result<(), IoError> { + self.channel.send_io(IoMessage::RegisterStream { + token: token, + handler_id: self.handler, + })?; + Ok(()) + } - /// Deregister an IO stream. - pub fn deregister_stream(&self, token: StreamToken) -> Result<(), IoError> { - self.channel.send_io(IoMessage::DeregisterStream { - token: token, - handler_id: self.handler, - })?; - Ok(()) - } + /// Deregister an IO stream. + pub fn deregister_stream(&self, token: StreamToken) -> Result<(), IoError> { + self.channel.send_io(IoMessage::DeregisterStream { + token: token, + handler_id: self.handler, + })?; + Ok(()) + } - /// Reregister an IO stream. - pub fn update_registration(&self, token: StreamToken) -> Result<(), IoError> { - self.channel.send_io(IoMessage::UpdateStreamRegistration { - token: token, - handler_id: self.handler, - })?; - Ok(()) - } + /// Reregister an IO stream. + pub fn update_registration(&self, token: StreamToken) -> Result<(), IoError> { + self.channel.send_io(IoMessage::UpdateStreamRegistration { + token: token, + handler_id: self.handler, + })?; + Ok(()) + } - /// Broadcast a message to other IO clients - pub fn message(&self, message: Message) -> Result<(), IoError> { - self.channel.send(message)?; - Ok(()) - } + /// Broadcast a message to other IO clients + pub fn message(&self, message: Message) -> Result<(), IoError> { + self.channel.send(message)?; + Ok(()) + } - /// Get message channel - pub fn channel(&self) -> IoChannel { - self.channel.clone() - } - - /// Unregister current IO handler. - pub fn unregister_handler(&self) { - // `send_io` returns an error only if the channel is closed, which means that the - // background thread is no longer running. Therefore the handler is no longer active and - // can be considered as unregistered. - let _ = self.channel.send_io(IoMessage::RemoveHandler { - handler_id: self.handler, - }); - } + /// Get message channel + pub fn channel(&self) -> IoChannel { + self.channel.clone() + } + /// Unregister current IO handler. + pub fn unregister_handler(&self) { + // `send_io` returns an error only if the channel is closed, which means that the + // background thread is no longer running. Therefore the handler is no longer active and + // can be considered as unregistered. + let _ = self.channel.send_io(IoMessage::RemoveHandler { + handler_id: self.handler, + }); + } } #[derive(Clone)] struct UserTimer { - delay: Duration, - timeout: Timeout, - once: bool, + delay: Duration, + timeout: Timeout, + once: bool, } /// Root IO handler. Manages user handlers, messages and IO timers. -pub struct IoManager where Message: Send + Sync { - timers: Arc>>, - handlers: Arc>>>>, - workers: Vec, - worker_channel: deque::Worker>, - work_ready: Arc, +pub struct IoManager +where + Message: Send + Sync, +{ + timers: Arc>>, + handlers: Arc>>>>, + workers: Vec, + worker_channel: deque::Worker>, + work_ready: Arc, } -impl IoManager where Message: Send + Sync + 'static { - /// Creates a new instance and registers it with the event loop. - pub fn start( - event_loop: &mut EventLoop>, - handlers: Arc>>>> - ) -> Result<(), IoError> { - let (worker, stealer) = deque::fifo(); - let num_workers = 4; - let work_ready_mutex = Arc::new(Mutex::new(())); - let work_ready = Arc::new(Condvar::new()); - let workers = (0..num_workers).map(|i| - Worker::new( - i, - stealer.clone(), - IoChannel::new(event_loop.channel(), Arc::downgrade(&handlers)), - work_ready.clone(), - work_ready_mutex.clone(), - ) - ).collect(); +impl IoManager +where + Message: Send + Sync + 'static, +{ + /// Creates a new instance and registers it with the event loop. + pub fn start( + event_loop: &mut EventLoop>, + handlers: Arc>>>>, + ) -> Result<(), IoError> { + let (worker, stealer) = deque::fifo(); + let num_workers = 4; + let work_ready_mutex = Arc::new(Mutex::new(())); + let work_ready = Arc::new(Condvar::new()); + let workers = (0..num_workers) + .map(|i| { + Worker::new( + i, + stealer.clone(), + IoChannel::new(event_loop.channel(), Arc::downgrade(&handlers)), + work_ready.clone(), + work_ready_mutex.clone(), + ) + }) + .collect(); - let mut io = IoManager { - timers: Arc::new(RwLock::new(HashMap::new())), - handlers: handlers, - worker_channel: worker, - workers: workers, - work_ready: work_ready, - }; - event_loop.run(&mut io)?; - Ok(()) - } + let mut io = IoManager { + timers: Arc::new(RwLock::new(HashMap::new())), + handlers: handlers, + worker_channel: worker, + workers: workers, + work_ready: work_ready, + }; + event_loop.run(&mut io)?; + Ok(()) + } } -impl Handler for IoManager where Message: Send + Sync + 'static { - type Timeout = Token; - type Message = IoMessage; +impl Handler for IoManager +where + Message: Send + Sync + 'static, +{ + type Timeout = Token; + type Message = IoMessage; - fn ready(&mut self, _event_loop: &mut EventLoop, token: Token, events: Ready) { - let handler_index = token.0 / TOKENS_PER_HANDLER; - let token_id = token.0 % TOKENS_PER_HANDLER; - if let Some(handler) = self.handlers.read().get(handler_index) { - if events.is_hup() { - self.worker_channel.push(Work { work_type: WorkType::Hup, token: token_id, handler: handler.clone(), handler_id: handler_index }); - } - else { - if events.is_readable() { - self.worker_channel.push(Work { work_type: WorkType::Readable, token: token_id, handler: handler.clone(), handler_id: handler_index }); - } - if events.is_writable() { - self.worker_channel.push(Work { work_type: WorkType::Writable, token: token_id, handler: handler.clone(), handler_id: handler_index }); - } - } - self.work_ready.notify_all(); - } - } + fn ready(&mut self, _event_loop: &mut EventLoop, token: Token, events: Ready) { + let handler_index = token.0 / TOKENS_PER_HANDLER; + let token_id = token.0 % TOKENS_PER_HANDLER; + if let Some(handler) = self.handlers.read().get(handler_index) { + if events.is_hup() { + self.worker_channel.push(Work { + work_type: WorkType::Hup, + token: token_id, + handler: handler.clone(), + handler_id: handler_index, + }); + } else { + if events.is_readable() { + self.worker_channel.push(Work { + work_type: WorkType::Readable, + token: token_id, + handler: handler.clone(), + handler_id: handler_index, + }); + } + if events.is_writable() { + self.worker_channel.push(Work { + work_type: WorkType::Writable, + token: token_id, + handler: handler.clone(), + handler_id: handler_index, + }); + } + } + self.work_ready.notify_all(); + } + } - fn timeout(&mut self, event_loop: &mut EventLoop, token: Token) { - let handler_index = token.0 / TOKENS_PER_HANDLER; - let token_id = token.0 % TOKENS_PER_HANDLER; - if let Some(handler) = self.handlers.read().get(handler_index) { - let maybe_timer = self.timers.read().get(&token.0).cloned(); - if let Some(timer) = maybe_timer { - if timer.once { - self.timers.write().remove(&token_id); - event_loop.clear_timeout(&timer.timeout); - } else { - event_loop.timeout(token, timer.delay).expect("Error re-registering user timer"); - } - self.worker_channel.push(Work { work_type: WorkType::Timeout, token: token_id, handler: handler.clone(), handler_id: handler_index }); - self.work_ready.notify_all(); - } - } - } + fn timeout(&mut self, event_loop: &mut EventLoop, token: Token) { + let handler_index = token.0 / TOKENS_PER_HANDLER; + let token_id = token.0 % TOKENS_PER_HANDLER; + if let Some(handler) = self.handlers.read().get(handler_index) { + let maybe_timer = self.timers.read().get(&token.0).cloned(); + if let Some(timer) = maybe_timer { + if timer.once { + self.timers.write().remove(&token_id); + event_loop.clear_timeout(&timer.timeout); + } else { + event_loop + .timeout(token, timer.delay) + .expect("Error re-registering user timer"); + } + self.worker_channel.push(Work { + work_type: WorkType::Timeout, + token: token_id, + handler: handler.clone(), + handler_id: handler_index, + }); + self.work_ready.notify_all(); + } + } + } - fn notify(&mut self, event_loop: &mut EventLoop, msg: Self::Message) { - match msg { - IoMessage::Shutdown => { - self.workers.clear(); - event_loop.shutdown(); - }, - IoMessage::AddHandler { handler } => { - let handler_id = self.handlers.write().insert(handler.clone()); - assert!(handler_id <= MAX_HANDLERS, "Too many handlers registered"); - handler.initialize(&IoContext::new(IoChannel::new(event_loop.channel(), Arc::downgrade(&self.handlers)), handler_id)); - }, - IoMessage::RemoveHandler { handler_id } => { - // TODO: flush event loop - self.handlers.write().remove(handler_id); - // unregister timers - let mut timers = self.timers.write(); - let to_remove: Vec<_> = timers.keys().cloned().filter(|timer_id| timer_id / TOKENS_PER_HANDLER == handler_id).collect(); - for timer_id in to_remove { - let timer = timers.remove(&timer_id).expect("to_remove only contains keys from timers; qed"); - event_loop.clear_timeout(&timer.timeout); - } - }, - IoMessage::AddTimer { handler_id, token, delay, once } => { - let timer_id = token + handler_id * TOKENS_PER_HANDLER; - let timeout = event_loop.timeout(Token(timer_id), delay).expect("Error registering user timer"); - self.timers.write().insert(timer_id, UserTimer { delay: delay, timeout: timeout, once: once }); - }, - IoMessage::RemoveTimer { handler_id, token } => { - let timer_id = token + handler_id * TOKENS_PER_HANDLER; - if let Some(timer) = self.timers.write().remove(&timer_id) { - event_loop.clear_timeout(&timer.timeout); - } - }, - IoMessage::RegisterStream { handler_id, token } => { - if let Some(handler) = self.handlers.read().get(handler_id) { - handler.register_stream(token, Token(token + handler_id * TOKENS_PER_HANDLER), event_loop); - } - }, - IoMessage::DeregisterStream { handler_id, token } => { - if let Some(handler) = self.handlers.read().get(handler_id) { - handler.deregister_stream(token, event_loop); - // unregister a timer associated with the token (if any) - let timer_id = token + handler_id * TOKENS_PER_HANDLER; - if let Some(timer) = self.timers.write().remove(&timer_id) { - event_loop.clear_timeout(&timer.timeout); - } - } - }, - IoMessage::UpdateStreamRegistration { handler_id, token } => { - if let Some(handler) = self.handlers.read().get(handler_id) { - handler.update_stream(token, Token(token + handler_id * TOKENS_PER_HANDLER), event_loop); - } - }, - IoMessage::UserMessage(data) => { - //TODO: better way to iterate the slab - for id in 0 .. MAX_HANDLERS { - if let Some(h) = self.handlers.read().get(id) { - let handler = h.clone(); - self.worker_channel.push(Work { - work_type: WorkType::Message(data.clone()), - token: 0, - handler: handler, - handler_id: id - }); - } - } - self.work_ready.notify_all(); - } - } - } + fn notify(&mut self, event_loop: &mut EventLoop, msg: Self::Message) { + match msg { + IoMessage::Shutdown => { + self.workers.clear(); + event_loop.shutdown(); + } + IoMessage::AddHandler { handler } => { + let handler_id = self.handlers.write().insert(handler.clone()); + assert!(handler_id <= MAX_HANDLERS, "Too many handlers registered"); + handler.initialize(&IoContext::new( + IoChannel::new(event_loop.channel(), Arc::downgrade(&self.handlers)), + handler_id, + )); + } + IoMessage::RemoveHandler { handler_id } => { + // TODO: flush event loop + self.handlers.write().remove(handler_id); + // unregister timers + let mut timers = self.timers.write(); + let to_remove: Vec<_> = timers + .keys() + .cloned() + .filter(|timer_id| timer_id / TOKENS_PER_HANDLER == handler_id) + .collect(); + for timer_id in to_remove { + let timer = timers + .remove(&timer_id) + .expect("to_remove only contains keys from timers; qed"); + event_loop.clear_timeout(&timer.timeout); + } + } + IoMessage::AddTimer { + handler_id, + token, + delay, + once, + } => { + let timer_id = token + handler_id * TOKENS_PER_HANDLER; + let timeout = event_loop + .timeout(Token(timer_id), delay) + .expect("Error registering user timer"); + self.timers.write().insert( + timer_id, + UserTimer { + delay: delay, + timeout: timeout, + once: once, + }, + ); + } + IoMessage::RemoveTimer { handler_id, token } => { + let timer_id = token + handler_id * TOKENS_PER_HANDLER; + if let Some(timer) = self.timers.write().remove(&timer_id) { + event_loop.clear_timeout(&timer.timeout); + } + } + IoMessage::RegisterStream { handler_id, token } => { + if let Some(handler) = self.handlers.read().get(handler_id) { + handler.register_stream( + token, + Token(token + handler_id * TOKENS_PER_HANDLER), + event_loop, + ); + } + } + IoMessage::DeregisterStream { handler_id, token } => { + if let Some(handler) = self.handlers.read().get(handler_id) { + handler.deregister_stream(token, event_loop); + // unregister a timer associated with the token (if any) + let timer_id = token + handler_id * TOKENS_PER_HANDLER; + if let Some(timer) = self.timers.write().remove(&timer_id) { + event_loop.clear_timeout(&timer.timeout); + } + } + } + IoMessage::UpdateStreamRegistration { handler_id, token } => { + if let Some(handler) = self.handlers.read().get(handler_id) { + handler.update_stream( + token, + Token(token + handler_id * TOKENS_PER_HANDLER), + event_loop, + ); + } + } + IoMessage::UserMessage(data) => { + //TODO: better way to iterate the slab + for id in 0..MAX_HANDLERS { + if let Some(h) = self.handlers.read().get(id) { + let handler = h.clone(); + self.worker_channel.push(Work { + work_type: WorkType::Message(data.clone()), + token: 0, + handler: handler, + handler_id: id, + }); + } + } + self.work_ready.notify_all(); + } + } + } } -enum Handlers where Message: Send { - SharedCollection(Weak>>>>), - Single(Weak>), +enum Handlers +where + Message: Send, +{ + SharedCollection(Weak>>>>), + Single(Weak>), } impl Clone for Handlers { - fn clone(&self) -> Self { - use self::Handlers::*; + fn clone(&self) -> Self { + use self::Handlers::*; - match *self { - SharedCollection(ref w) => SharedCollection(w.clone()), - Single(ref w) => Single(w.clone()), - } - } + match *self { + SharedCollection(ref w) => SharedCollection(w.clone()), + Single(ref w) => Single(w.clone()), + } + } } /// Allows sending messages into the event loop. All the IO handlers will get the message /// in the `message` callback. -pub struct IoChannel where Message: Send { - channel: Option>>, - handlers: Handlers, +pub struct IoChannel +where + Message: Send, +{ + channel: Option>>, + handlers: Handlers, } -impl Clone for IoChannel where Message: Send + Sync + 'static { - fn clone(&self) -> IoChannel { - IoChannel { - channel: self.channel.clone(), - handlers: self.handlers.clone(), - } - } +impl Clone for IoChannel +where + Message: Send + Sync + 'static, +{ + fn clone(&self) -> IoChannel { + IoChannel { + channel: self.channel.clone(), + handlers: self.handlers.clone(), + } + } } -impl IoChannel where Message: Send + Sync + 'static { - /// Send a message through the channel - pub fn send(&self, message: Message) -> Result<(), IoError> { - match self.channel { - Some(ref channel) => channel.send(IoMessage::UserMessage(Arc::new(message)))?, - None => self.send_sync(message)? - } - Ok(()) - } +impl IoChannel +where + Message: Send + Sync + 'static, +{ + /// Send a message through the channel + pub fn send(&self, message: Message) -> Result<(), IoError> { + match self.channel { + Some(ref channel) => channel.send(IoMessage::UserMessage(Arc::new(message)))?, + None => self.send_sync(message)?, + } + Ok(()) + } - /// Send a message through the channel and handle it synchronously - pub fn send_sync(&self, message: Message) -> Result<(), IoError> { - match self.handlers { - Handlers::SharedCollection(ref handlers) => { - if let Some(handlers) = handlers.upgrade() { - for id in 0 .. MAX_HANDLERS { - if let Some(h) = handlers.read().get(id) { - let handler = h.clone(); - handler.message(&IoContext::new(self.clone(), id), &message); - } - } - } - }, - Handlers::Single(ref handler) => { - if let Some(handler) = handler.upgrade() { - handler.message(&IoContext::new(self.clone(), 0), &message); - } - } - } - Ok(()) - } + /// Send a message through the channel and handle it synchronously + pub fn send_sync(&self, message: Message) -> Result<(), IoError> { + match self.handlers { + Handlers::SharedCollection(ref handlers) => { + if let Some(handlers) = handlers.upgrade() { + for id in 0..MAX_HANDLERS { + if let Some(h) = handlers.read().get(id) { + let handler = h.clone(); + handler.message(&IoContext::new(self.clone(), id), &message); + } + } + } + } + Handlers::Single(ref handler) => { + if let Some(handler) = handler.upgrade() { + handler.message(&IoContext::new(self.clone(), 0), &message); + } + } + } + Ok(()) + } - /// Send low level io message - pub fn send_io(&self, message: IoMessage) -> Result<(), IoError> { - if let Some(ref channel) = self.channel { - channel.send(message)? - } - Ok(()) - } - /// Create a new channel disconnected from an event loop. - pub fn disconnected() -> IoChannel { - IoChannel { - channel: None, - handlers: Handlers::SharedCollection(Weak::default()), - } - } + /// Send low level io message + pub fn send_io(&self, message: IoMessage) -> Result<(), IoError> { + if let Some(ref channel) = self.channel { + channel.send(message)? + } + Ok(()) + } + /// Create a new channel disconnected from an event loop. + pub fn disconnected() -> IoChannel { + IoChannel { + channel: None, + handlers: Handlers::SharedCollection(Weak::default()), + } + } - /// Create a new synchronous channel to a given handler. - pub fn to_handler(handler: Weak>) -> IoChannel { - IoChannel { - channel: None, - handlers: Handlers::Single(handler), - } - } - fn new(channel: Sender>, handlers: Weak>>>>) -> IoChannel { - IoChannel { - channel: Some(channel), - handlers: Handlers::SharedCollection(handlers), - } - } + /// Create a new synchronous channel to a given handler. + pub fn to_handler(handler: Weak>) -> IoChannel { + IoChannel { + channel: None, + handlers: Handlers::Single(handler), + } + } + fn new( + channel: Sender>, + handlers: Weak>>>>, + ) -> IoChannel { + IoChannel { + channel: Some(channel), + handlers: Handlers::SharedCollection(handlers), + } + } } /// General IO Service. Starts an event loop and dispatches IO requests. /// 'Message' is a notification message type -pub struct IoService where Message: Send + Sync + 'static { - thread: Option>, - host_channel: Mutex>>, - handlers: Arc>>>>, +pub struct IoService +where + Message: Send + Sync + 'static, +{ + thread: Option>, + host_channel: Mutex>>, + handlers: Arc>>>>, } -impl IoService where Message: Send + Sync + 'static { - /// Starts IO event loop - pub fn start() -> Result, IoError> { - let mut config = EventLoopBuilder::new(); - config.messages_per_tick(1024); - let mut event_loop = config.build().expect("Error creating event loop"); - let channel = event_loop.channel(); - let handlers = Arc::new(RwLock::new(Slab::with_capacity(MAX_HANDLERS))); - let h = handlers.clone(); - let thread = thread::spawn(move || { - IoManager::::start(&mut event_loop, h).expect("Error starting IO service"); - }); - Ok(IoService { - thread: Some(thread), - host_channel: Mutex::new(channel), - handlers: handlers, - }) - } +impl IoService +where + Message: Send + Sync + 'static, +{ + /// Starts IO event loop + pub fn start() -> Result, IoError> { + let mut config = EventLoopBuilder::new(); + config.messages_per_tick(1024); + let mut event_loop = config.build().expect("Error creating event loop"); + let channel = event_loop.channel(); + let handlers = Arc::new(RwLock::new(Slab::with_capacity(MAX_HANDLERS))); + let h = handlers.clone(); + let thread = thread::spawn(move || { + IoManager::::start(&mut event_loop, h).expect("Error starting IO service"); + }); + Ok(IoService { + thread: Some(thread), + host_channel: Mutex::new(channel), + handlers: handlers, + }) + } - pub fn stop(&mut self) { - trace!(target: "shutdown", "[IoService] Closing..."); - // Clear handlers so that shared pointers are not stuck on stack - // in Channel::send_sync - self.handlers.write().clear(); - self.host_channel.lock().send(IoMessage::Shutdown).unwrap_or_else(|e| warn!("Error on IO service shutdown: {:?}", e)); - if let Some(thread) = self.thread.take() { - thread.join().unwrap_or_else(|e| { - debug!(target: "shutdown", "Error joining IO service event loop thread: {:?}", e); - }); - } - trace!(target: "shutdown", "[IoService] Closed."); - } + pub fn stop(&mut self) { + trace!(target: "shutdown", "[IoService] Closing..."); + // Clear handlers so that shared pointers are not stuck on stack + // in Channel::send_sync + self.handlers.write().clear(); + self.host_channel + .lock() + .send(IoMessage::Shutdown) + .unwrap_or_else(|e| warn!("Error on IO service shutdown: {:?}", e)); + if let Some(thread) = self.thread.take() { + thread.join().unwrap_or_else(|e| { + debug!(target: "shutdown", "Error joining IO service event loop thread: {:?}", e); + }); + } + trace!(target: "shutdown", "[IoService] Closed."); + } - /// Regiter an IO handler with the event loop. - pub fn register_handler(&self, handler: Arc+Send>) -> Result<(), IoError> { - self.host_channel.lock().send(IoMessage::AddHandler { - handler: handler, - })?; - Ok(()) - } + /// Regiter an IO handler with the event loop. + pub fn register_handler(&self, handler: Arc + Send>) -> Result<(), IoError> { + self.host_channel + .lock() + .send(IoMessage::AddHandler { handler: handler })?; + Ok(()) + } - /// Send a message over the network. Normaly `HostIo::send` should be used. This can be used from non-io threads. - pub fn send_message(&self, message: Message) -> Result<(), IoError> { - self.host_channel.lock().send(IoMessage::UserMessage(Arc::new(message)))?; - Ok(()) - } + /// Send a message over the network. Normaly `HostIo::send` should be used. This can be used from non-io threads. + pub fn send_message(&self, message: Message) -> Result<(), IoError> { + self.host_channel + .lock() + .send(IoMessage::UserMessage(Arc::new(message)))?; + Ok(()) + } - /// Create a new message channel - pub fn channel(&self) -> IoChannel { - IoChannel::new(self.host_channel.lock().clone(), Arc::downgrade(&self.handlers)) - } + /// Create a new message channel + pub fn channel(&self) -> IoChannel { + IoChannel::new( + self.host_channel.lock().clone(), + Arc::downgrade(&self.handlers), + ) + } } -impl Drop for IoService where Message: Send + Sync { - fn drop(&mut self) { - self.stop() - } +impl Drop for IoService +where + Message: Send + Sync, +{ + fn drop(&mut self) { + self.stop() + } } diff --git a/util/io/src/service_non_mio.rs b/util/io/src/service_non_mio.rs index 1cef3574e..d958a5d23 100644 --- a/util/io/src/service_non_mio.rs +++ b/util/io/src/service_non_mio.rs @@ -14,17 +14,20 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::sync::{Arc, Weak}; -use std::thread; use deque; -use slab::Slab; use fnv::FnvHashMap; -use {IoError, IoHandler}; -use parking_lot::{RwLock, Mutex}; use num_cpus; -use std::time::Duration; -use timer::{Timer, Guard as TimerGuard}; +use parking_lot::{Mutex, RwLock}; +use slab::Slab; +use std::{ + sync::{Arc, Weak}, + thread, + time::Duration, +}; use time::Duration as TimeDuration; +use timer::{Guard as TimerGuard, Timer}; +use IoError; +use IoHandler; /// Timer ID pub type TimerToken = usize; @@ -36,299 +39,353 @@ pub const TOKENS_PER_HANDLER: usize = 16384; const MAX_HANDLERS: usize = 8; /// IO access point. This is passed to all IO handlers and provides an interface to the IO subsystem. -pub struct IoContext where Message: Send + Sync + 'static { - handler: HandlerId, - shared: Arc>, +pub struct IoContext +where + Message: Send + Sync + 'static, +{ + handler: HandlerId, + shared: Arc>, } -impl IoContext where Message: Send + Sync + 'static { - /// Register a new recurring IO timer. 'IoHandler::timeout' will be called with the token. - pub fn register_timer(&self, token: TimerToken, delay: Duration) -> Result<(), IoError> { - let channel = self.channel(); +impl IoContext +where + Message: Send + Sync + 'static, +{ + /// Register a new recurring IO timer. 'IoHandler::timeout' will be called with the token. + pub fn register_timer(&self, token: TimerToken, delay: Duration) -> Result<(), IoError> { + let channel = self.channel(); - let msg = WorkTask::TimerTrigger { - handler_id: self.handler, - token: token, - }; + let msg = WorkTask::TimerTrigger { + handler_id: self.handler, + token: token, + }; - let delay = TimeDuration::from_std(delay) - .map_err(|e| ::std::io::Error::new(::std::io::ErrorKind::Other, e))?; - let guard = self.shared.timer.lock().schedule_repeating(delay, move || { - channel.send_raw(msg.clone()); - }); + let delay = TimeDuration::from_std(delay) + .map_err(|e| ::std::io::Error::new(::std::io::ErrorKind::Other, e))?; + let guard = self.shared.timer.lock().schedule_repeating(delay, move || { + channel.send_raw(msg.clone()); + }); - self.shared.timers.lock().insert(token, guard); + self.shared.timers.lock().insert(token, guard); - Ok(()) - } + Ok(()) + } - /// Register a new IO timer once. 'IoHandler::timeout' will be called with the token. - pub fn register_timer_once(&self, token: TimerToken, delay: Duration) -> Result<(), IoError> { - let channel = self.channel(); + /// Register a new IO timer once. 'IoHandler::timeout' will be called with the token. + pub fn register_timer_once(&self, token: TimerToken, delay: Duration) -> Result<(), IoError> { + let channel = self.channel(); - let msg = WorkTask::TimerTrigger { - handler_id: self.handler, - token: token, - }; + let msg = WorkTask::TimerTrigger { + handler_id: self.handler, + token: token, + }; - let delay = TimeDuration::from_std(delay) - .map_err(|e| ::std::io::Error::new(::std::io::ErrorKind::Other, e))?; - let guard = self.shared.timer.lock().schedule_with_delay(delay, move || { - channel.send_raw(msg.clone()); - }); + let delay = TimeDuration::from_std(delay) + .map_err(|e| ::std::io::Error::new(::std::io::ErrorKind::Other, e))?; + let guard = self + .shared + .timer + .lock() + .schedule_with_delay(delay, move || { + channel.send_raw(msg.clone()); + }); - self.shared.timers.lock().insert(token, guard); + self.shared.timers.lock().insert(token, guard); - Ok(()) - } + Ok(()) + } - /// Delete a timer. - pub fn clear_timer(&self, token: TimerToken) -> Result<(), IoError> { - self.shared.timers.lock().remove(&token); - Ok(()) - } + /// Delete a timer. + pub fn clear_timer(&self, token: TimerToken) -> Result<(), IoError> { + self.shared.timers.lock().remove(&token); + Ok(()) + } - /// Broadcast a message to other IO clients - pub fn message(&self, message: Message) -> Result<(), IoError> { - if let Some(ref channel) = *self.shared.channel.lock() { - channel.push(WorkTask::UserMessage(Arc::new(message))); - } - for thread in self.shared.threads.read().iter() { - thread.unpark(); - } + /// Broadcast a message to other IO clients + pub fn message(&self, message: Message) -> Result<(), IoError> { + if let Some(ref channel) = *self.shared.channel.lock() { + channel.push(WorkTask::UserMessage(Arc::new(message))); + } + for thread in self.shared.threads.read().iter() { + thread.unpark(); + } - Ok(()) - } + Ok(()) + } - /// Get message channel - pub fn channel(&self) -> IoChannel { - IoChannel { shared: Arc::downgrade(&self.shared) } - } + /// Get message channel + pub fn channel(&self) -> IoChannel { + IoChannel { + shared: Arc::downgrade(&self.shared), + } + } - /// Unregister current IO handler. - pub fn unregister_handler(&self) -> Result<(), IoError> { - self.shared.handlers.write().remove(self.handler); - Ok(()) - } + /// Unregister current IO handler. + pub fn unregister_handler(&self) -> Result<(), IoError> { + self.shared.handlers.write().remove(self.handler); + Ok(()) + } } /// Allows sending messages into the event loop. All the IO handlers will get the message /// in the `message` callback. -pub struct IoChannel where Message: Send + Sync + 'static { - shared: Weak>, +pub struct IoChannel +where + Message: Send + Sync + 'static, +{ + shared: Weak>, } -impl Clone for IoChannel where Message: Send + Sync + 'static { - fn clone(&self) -> IoChannel { - IoChannel { - shared: self.shared.clone(), - } - } +impl Clone for IoChannel +where + Message: Send + Sync + 'static, +{ + fn clone(&self) -> IoChannel { + IoChannel { + shared: self.shared.clone(), + } + } } -impl IoChannel where Message: Send + Sync + 'static { - /// Send a message through the channel - pub fn send(&self, message: Message) -> Result<(), IoError> { - if let Some(shared) = self.shared.upgrade() { - match *shared.channel.lock() { - Some(ref channel) => channel.push(WorkTask::UserMessage(Arc::new(message))), - None => self.send_sync(message)? - }; +impl IoChannel +where + Message: Send + Sync + 'static, +{ + /// Send a message through the channel + pub fn send(&self, message: Message) -> Result<(), IoError> { + if let Some(shared) = self.shared.upgrade() { + match *shared.channel.lock() { + Some(ref channel) => channel.push(WorkTask::UserMessage(Arc::new(message))), + None => self.send_sync(message)?, + }; - for thread in shared.threads.read().iter() { - thread.unpark(); - } - } + for thread in shared.threads.read().iter() { + thread.unpark(); + } + } - Ok(()) - } + Ok(()) + } - /// Send a message through the channel and handle it synchronously - pub fn send_sync(&self, message: Message) -> Result<(), IoError> { - if let Some(shared) = self.shared.upgrade() { - for id in 0 .. MAX_HANDLERS { - if let Some(h) = shared.handlers.read().get(id) { - let handler = h.clone(); - let ctxt = IoContext { handler: id, shared: shared.clone() }; - handler.message(&ctxt, &message); - } - } - } + /// Send a message through the channel and handle it synchronously + pub fn send_sync(&self, message: Message) -> Result<(), IoError> { + if let Some(shared) = self.shared.upgrade() { + for id in 0..MAX_HANDLERS { + if let Some(h) = shared.handlers.read().get(id) { + let handler = h.clone(); + let ctxt = IoContext { + handler: id, + shared: shared.clone(), + }; + handler.message(&ctxt, &message); + } + } + } - Ok(()) - } + Ok(()) + } - // Send low level io message - fn send_raw(&self, message: WorkTask) { - if let Some(shared) = self.shared.upgrade() { - if let Some(ref channel) = *shared.channel.lock() { - channel.push(message); - } + // Send low level io message + fn send_raw(&self, message: WorkTask) { + if let Some(shared) = self.shared.upgrade() { + if let Some(ref channel) = *shared.channel.lock() { + channel.push(message); + } - for thread in shared.threads.read().iter() { - thread.unpark(); - } - } - } + for thread in shared.threads.read().iter() { + thread.unpark(); + } + } + } - /// Create a new channel disconnected from an event loop. - pub fn disconnected() -> IoChannel { - IoChannel { - shared: Weak::default(), - } - } + /// Create a new channel disconnected from an event loop. + pub fn disconnected() -> IoChannel { + IoChannel { + shared: Weak::default(), + } + } } /// General IO Service. Starts an event loop and dispatches IO requests. /// 'Message' is a notification message type -pub struct IoService where Message: Send + Sync + 'static { - thread_joins: Mutex>>, - shared: Arc>, +pub struct IoService +where + Message: Send + Sync + 'static, +{ + thread_joins: Mutex>>, + shared: Arc>, } // Struct shared throughout the whole implementation. -struct Shared where Message: Send + Sync + 'static { - // All the I/O handlers that have been registered. - handlers: RwLock>>>, - // All the background threads, so that we can unpark them. - threads: RwLock>, - // Used to create timeouts. - timer: Mutex, - // List of created timers. We need to keep them in a data struct so that we can cancel them if - // necessary. - timers: Mutex>, - // Channel used to send work to the worker threads. - channel: Mutex>>>, +struct Shared +where + Message: Send + Sync + 'static, +{ + // All the I/O handlers that have been registered. + handlers: RwLock>>>, + // All the background threads, so that we can unpark them. + threads: RwLock>, + // Used to create timeouts. + timer: Mutex, + // List of created timers. We need to keep them in a data struct so that we can cancel them if + // necessary. + timers: Mutex>, + // Channel used to send work to the worker threads. + channel: Mutex>>>, } // Messages used to communicate with the event loop from other threads. -enum WorkTask where Message: Send + Sized { - Shutdown, - TimerTrigger { - handler_id: HandlerId, - token: TimerToken, - }, - UserMessage(Arc) +enum WorkTask +where + Message: Send + Sized, +{ + Shutdown, + TimerTrigger { + handler_id: HandlerId, + token: TimerToken, + }, + UserMessage(Arc), } -impl Clone for WorkTask where Message: Send + Sized { - fn clone(&self) -> WorkTask { - match *self { - WorkTask::Shutdown => WorkTask::Shutdown, - WorkTask::TimerTrigger { handler_id, token } => WorkTask::TimerTrigger { handler_id, token }, - WorkTask::UserMessage(ref msg) => WorkTask::UserMessage(msg.clone()), - } - } +impl Clone for WorkTask +where + Message: Send + Sized, +{ + fn clone(&self) -> WorkTask { + match *self { + WorkTask::Shutdown => WorkTask::Shutdown, + WorkTask::TimerTrigger { handler_id, token } => { + WorkTask::TimerTrigger { handler_id, token } + } + WorkTask::UserMessage(ref msg) => WorkTask::UserMessage(msg.clone()), + } + } } -impl IoService where Message: Send + Sync + 'static { - /// Starts IO event loop - pub fn start() -> Result, IoError> { - let (tx, rx) = deque::fifo(); +impl IoService +where + Message: Send + Sync + 'static, +{ + /// Starts IO event loop + pub fn start() -> Result, IoError> { + let (tx, rx) = deque::fifo(); - let shared = Arc::new(Shared { - handlers: RwLock::new(Slab::with_capacity(MAX_HANDLERS)), - threads: RwLock::new(Vec::new()), - timer: Mutex::new(Timer::new()), - timers: Mutex::new(FnvHashMap::default()), - channel: Mutex::new(Some(tx)), - }); + let shared = Arc::new(Shared { + handlers: RwLock::new(Slab::with_capacity(MAX_HANDLERS)), + threads: RwLock::new(Vec::new()), + timer: Mutex::new(Timer::new()), + timers: Mutex::new(FnvHashMap::default()), + channel: Mutex::new(Some(tx)), + }); - let thread_joins = (0 .. num_cpus::get()).map(|_| { - let rx = rx.clone(); - let shared = shared.clone(); - thread::spawn(move || { - do_work(&shared, rx) - }) - }).collect::>(); + let thread_joins = (0..num_cpus::get()) + .map(|_| { + let rx = rx.clone(); + let shared = shared.clone(); + thread::spawn(move || do_work(&shared, rx)) + }) + .collect::>(); - *shared.threads.write() = thread_joins.iter().map(|t| t.thread().clone()).collect(); + *shared.threads.write() = thread_joins.iter().map(|t| t.thread().clone()).collect(); - Ok(IoService { - thread_joins: Mutex::new(thread_joins), - shared, - }) - } + Ok(IoService { + thread_joins: Mutex::new(thread_joins), + shared, + }) + } - /// Stops the IO service. - pub fn stop(&mut self) { - trace!(target: "shutdown", "[IoService] Closing..."); - // Clear handlers so that shared pointers are not stuck on stack - // in Channel::send_sync - self.shared.handlers.write().clear(); - let channel = self.shared.channel.lock().take(); - let mut thread_joins = self.thread_joins.lock(); - if let Some(channel) = channel { - for _ in 0 .. thread_joins.len() { - channel.push(WorkTask::Shutdown); - } - } - for thread in thread_joins.drain(..) { - thread.thread().unpark(); - thread.join().unwrap_or_else(|e| { - debug!(target: "shutdown", "Error joining IO service worker thread: {:?}", e); - }); - } - trace!(target: "shutdown", "[IoService] Closed."); - } + /// Stops the IO service. + pub fn stop(&mut self) { + trace!(target: "shutdown", "[IoService] Closing..."); + // Clear handlers so that shared pointers are not stuck on stack + // in Channel::send_sync + self.shared.handlers.write().clear(); + let channel = self.shared.channel.lock().take(); + let mut thread_joins = self.thread_joins.lock(); + if let Some(channel) = channel { + for _ in 0..thread_joins.len() { + channel.push(WorkTask::Shutdown); + } + } + for thread in thread_joins.drain(..) { + thread.thread().unpark(); + thread.join().unwrap_or_else(|e| { + debug!(target: "shutdown", "Error joining IO service worker thread: {:?}", e); + }); + } + trace!(target: "shutdown", "[IoService] Closed."); + } - /// Register an IO handler with the event loop. - pub fn register_handler(&self, handler: Arc+Send>) -> Result<(), IoError> { - let id = self.shared.handlers.write().insert(handler.clone()); - assert!(id <= MAX_HANDLERS, "Too many handlers registered"); - let ctxt = IoContext { handler: id, shared: self.shared.clone() }; - handler.initialize(&ctxt); - Ok(()) - } + /// Register an IO handler with the event loop. + pub fn register_handler(&self, handler: Arc + Send>) -> Result<(), IoError> { + let id = self.shared.handlers.write().insert(handler.clone()); + assert!(id <= MAX_HANDLERS, "Too many handlers registered"); + let ctxt = IoContext { + handler: id, + shared: self.shared.clone(), + }; + handler.initialize(&ctxt); + Ok(()) + } - /// Send a message over the network. Normaly `HostIo::send` should be used. This can be used from non-io threads. - pub fn send_message(&self, message: Message) -> Result<(), IoError> { - if let Some(ref channel) = *self.shared.channel.lock() { - channel.push(WorkTask::UserMessage(Arc::new(message))); - } - for thread in self.shared.threads.read().iter() { - thread.unpark(); - } - Ok(()) - } + /// Send a message over the network. Normaly `HostIo::send` should be used. This can be used from non-io threads. + pub fn send_message(&self, message: Message) -> Result<(), IoError> { + if let Some(ref channel) = *self.shared.channel.lock() { + channel.push(WorkTask::UserMessage(Arc::new(message))); + } + for thread in self.shared.threads.read().iter() { + thread.unpark(); + } + Ok(()) + } - /// Create a new message channel - #[inline] - pub fn channel(&self) -> IoChannel { - IoChannel { - shared: Arc::downgrade(&self.shared) - } - } + /// Create a new message channel + #[inline] + pub fn channel(&self) -> IoChannel { + IoChannel { + shared: Arc::downgrade(&self.shared), + } + } } -impl Drop for IoService where Message: Send + Sync { - fn drop(&mut self) { - self.stop() - } +impl Drop for IoService +where + Message: Send + Sync, +{ + fn drop(&mut self) { + self.stop() + } } fn do_work(shared: &Arc>, rx: deque::Stealer>) - where Message: Send + Sync + 'static +where + Message: Send + Sync + 'static, { - loop { - match rx.steal() { - deque::Steal::Retry => continue, - deque::Steal::Empty => thread::park(), - deque::Steal::Data(WorkTask::Shutdown) => break, - deque::Steal::Data(WorkTask::UserMessage(message)) => { - for id in 0 .. MAX_HANDLERS { - if let Some(handler) = shared.handlers.read().get(id) { - let ctxt = IoContext { handler: id, shared: shared.clone() }; - handler.message(&ctxt, &message); - } - } - }, - deque::Steal::Data(WorkTask::TimerTrigger { handler_id, token }) => { - if let Some(handler) = shared.handlers.read().get(handler_id) { - let ctxt = IoContext { handler: handler_id, shared: shared.clone() }; - handler.timeout(&ctxt, token); - } - }, - } - } + loop { + match rx.steal() { + deque::Steal::Retry => continue, + deque::Steal::Empty => thread::park(), + deque::Steal::Data(WorkTask::Shutdown) => break, + deque::Steal::Data(WorkTask::UserMessage(message)) => { + for id in 0..MAX_HANDLERS { + if let Some(handler) = shared.handlers.read().get(id) { + let ctxt = IoContext { + handler: id, + shared: shared.clone(), + }; + handler.message(&ctxt, &message); + } + } + } + deque::Steal::Data(WorkTask::TimerTrigger { handler_id, token }) => { + if let Some(handler) = shared.handlers.read().get(handler_id) { + let ctxt = IoContext { + handler: handler_id, + shared: shared.clone(), + }; + handler.timeout(&ctxt, token); + } + } + } + } } diff --git a/util/io/src/worker.rs b/util/io/src/worker.rs index 458882a85..f2b81b4e1 100644 --- a/util/io/src/worker.rs +++ b/util/io/src/worker.rs @@ -14,120 +14,144 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use futures::future::{self, Loop}; -use std::sync::Arc; -use std::thread::{JoinHandle, self}; -use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; use deque; +use futures::future::{self, Loop}; use service_mio::{HandlerId, IoChannel, IoContext}; +use std::{ + sync::{ + atomic::{AtomicBool, Ordering as AtomicOrdering}, + Arc, + }, + thread::{self, JoinHandle}, +}; use tokio::{self}; use IoHandler; use LOCAL_STACK_SIZE; use parking_lot::{Condvar, Mutex}; -const STACK_SIZE: usize = 16*1024*1024; +const STACK_SIZE: usize = 16 * 1024 * 1024; pub enum WorkType { - Readable, - Writable, - Hup, - Timeout, - Message(Arc) + Readable, + Writable, + Hup, + Timeout, + Message(Arc), } pub struct Work { - pub work_type: WorkType, - pub token: usize, - pub handler_id: HandlerId, - pub handler: Arc>, + pub work_type: WorkType, + pub token: usize, + pub handler_id: HandlerId, + pub handler: Arc>, } /// An IO worker thread /// Sorts them ready for blockchain insertion. pub struct Worker { - thread: Option>, - wait: Arc, - deleting: Arc, - wait_mutex: Arc>, + thread: Option>, + wait: Arc, + deleting: Arc, + wait_mutex: Arc>, } impl Worker { - /// Creates a new worker instance. - pub fn new(index: usize, - stealer: deque::Stealer>, - channel: IoChannel, - wait: Arc, - wait_mutex: Arc>, - ) -> Worker - where Message: Send + Sync + 'static { - let deleting = Arc::new(AtomicBool::new(false)); - let mut worker = Worker { - thread: None, - wait: wait.clone(), - deleting: deleting.clone(), - wait_mutex: wait_mutex.clone(), - }; - worker.thread = Some(thread::Builder::new().stack_size(STACK_SIZE).name(format!("IO Worker #{}", index)).spawn( - move || { - LOCAL_STACK_SIZE.with(|val| val.set(STACK_SIZE)); - let ini = (stealer, channel.clone(), wait, wait_mutex.clone(), deleting); - let future = future::loop_fn(ini, |(stealer, channel, wait, wait_mutex, deleting)| { - { - let mut lock = wait_mutex.lock(); - if deleting.load(AtomicOrdering::Acquire) { - return Ok(Loop::Break(())); - } - wait.wait(&mut lock); - } + /// Creates a new worker instance. + pub fn new( + index: usize, + stealer: deque::Stealer>, + channel: IoChannel, + wait: Arc, + wait_mutex: Arc>, + ) -> Worker + where + Message: Send + Sync + 'static, + { + let deleting = Arc::new(AtomicBool::new(false)); + let mut worker = Worker { + thread: None, + wait: wait.clone(), + deleting: deleting.clone(), + wait_mutex: wait_mutex.clone(), + }; + worker.thread = Some( + thread::Builder::new() + .stack_size(STACK_SIZE) + .name(format!("IO Worker #{}", index)) + .spawn(move || { + LOCAL_STACK_SIZE.with(|val| val.set(STACK_SIZE)); + let ini = (stealer, channel.clone(), wait, wait_mutex.clone(), deleting); + let future = + future::loop_fn(ini, |(stealer, channel, wait, wait_mutex, deleting)| { + { + let mut lock = wait_mutex.lock(); + if deleting.load(AtomicOrdering::Acquire) { + return Ok(Loop::Break(())); + } + wait.wait(&mut lock); + } - while !deleting.load(AtomicOrdering::Acquire) { - match stealer.steal() { - deque::Steal::Data(work) => Worker::do_work(work, channel.clone()), - deque::Steal::Retry => {}, - deque::Steal::Empty => break, - } - } - Ok(Loop::Continue((stealer, channel, wait, wait_mutex, deleting))) - }); - if let Err(()) = tokio::runtime::current_thread::block_on_all(future) { - error!(target: "ioworker", "error while executing future") - } - }) - .expect("Error creating worker thread")); - worker - } + while !deleting.load(AtomicOrdering::Acquire) { + match stealer.steal() { + deque::Steal::Data(work) => { + Worker::do_work(work, channel.clone()) + } + deque::Steal::Retry => {} + deque::Steal::Empty => break, + } + } + Ok(Loop::Continue(( + stealer, channel, wait, wait_mutex, deleting, + ))) + }); + if let Err(()) = tokio::runtime::current_thread::block_on_all(future) { + error!(target: "ioworker", "error while executing future") + } + }) + .expect("Error creating worker thread"), + ); + worker + } - fn do_work(work: Work, channel: IoChannel) where Message: Send + Sync + 'static { - match work.work_type { - WorkType::Readable => { - work.handler.stream_readable(&IoContext::new(channel, work.handler_id), work.token); - }, - WorkType::Writable => { - work.handler.stream_writable(&IoContext::new(channel, work.handler_id), work.token); - } - WorkType::Hup => { - work.handler.stream_hup(&IoContext::new(channel, work.handler_id), work.token); - } - WorkType::Timeout => { - work.handler.timeout(&IoContext::new(channel, work.handler_id), work.token); - } - WorkType::Message(message) => { - work.handler.message(&IoContext::new(channel, work.handler_id), &*message); - } - } - } + fn do_work(work: Work, channel: IoChannel) + where + Message: Send + Sync + 'static, + { + match work.work_type { + WorkType::Readable => { + work.handler + .stream_readable(&IoContext::new(channel, work.handler_id), work.token); + } + WorkType::Writable => { + work.handler + .stream_writable(&IoContext::new(channel, work.handler_id), work.token); + } + WorkType::Hup => { + work.handler + .stream_hup(&IoContext::new(channel, work.handler_id), work.token); + } + WorkType::Timeout => { + work.handler + .timeout(&IoContext::new(channel, work.handler_id), work.token); + } + WorkType::Message(message) => { + work.handler + .message(&IoContext::new(channel, work.handler_id), &*message); + } + } + } } impl Drop for Worker { - fn drop(&mut self) { - trace!(target: "shutdown", "[IoWorker] Closing..."); - let _ = self.wait_mutex.lock(); - self.deleting.store(true, AtomicOrdering::Release); - self.wait.notify_all(); - if let Some(thread) = self.thread.take() { - thread.join().ok(); - } - trace!(target: "shutdown", "[IoWorker] Closed"); - } + fn drop(&mut self) { + trace!(target: "shutdown", "[IoWorker] Closing..."); + let _ = self.wait_mutex.lock(); + self.deleting.store(true, AtomicOrdering::Release); + self.wait.notify_all(); + if let Some(thread) = self.thread.take() { + thread.join().ok(); + } + trace!(target: "shutdown", "[IoWorker] Closed"); + } } diff --git a/util/journaldb/src/archivedb.rs b/util/journaldb/src/archivedb.rs index a068919a7..bcf984c1a 100644 --- a/util/journaldb/src/archivedb.rs +++ b/util/journaldb/src/archivedb.rs @@ -16,19 +16,22 @@ //! Disk-backed `HashDB` implementation. -use std::collections::HashMap; -use std::collections::hash_map::Entry; -use std::io; -use std::sync::Arc; +use std::{ + collections::{hash_map::Entry, HashMap}, + io, + sync::Arc, +}; +use super::{ + error_key_already_exists, error_negatively_reference_hash, memory_db::*, DB_PREFIX_LEN, + LATEST_ERA_KEY, +}; use bytes::Bytes; use ethereum_types::H256; -use hash_db::{HashDB}; +use hash_db::HashDB; use keccak_hasher::KeccakHasher; -use kvdb::{KeyValueDB, DBTransaction, DBValue}; -use rlp::{encode, decode}; -use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, error_key_already_exists, error_negatively_reference_hash}; -use super::memory_db::*; +use kvdb::{DBTransaction, DBValue, KeyValueDB}; +use rlp::{decode, encode}; use traits::JournalDB; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay @@ -39,435 +42,486 @@ use traits::JournalDB; /// immediately. As this is an "archive" database, nothing is ever removed. This means /// that the states of any block the node has ever processed will be accessible. pub struct ArchiveDB { - overlay: MemoryDB, - backing: Arc, - latest_era: Option, - column: Option, + overlay: MemoryDB, + backing: Arc, + latest_era: Option, + column: Option, } impl ArchiveDB { - /// Create a new instance from a key-value db. - pub fn new(backing: Arc, column: Option) -> ArchiveDB { - let latest_era = backing.get(column, &LATEST_ERA_KEY) - .expect("Low-level database error.") - .map(|val| decode::(&val).expect("decoding db value failed")); - ArchiveDB { - overlay: ::new_memory_db(), - backing, - latest_era, - column, - } - } - - fn payload(&self, key: &H256) -> Option { - self.backing.get(self.column, key).expect("Low-level database error. Some issue with your hard disk?") - } + /// Create a new instance from a key-value db. + pub fn new(backing: Arc, column: Option) -> ArchiveDB { + let latest_era = backing + .get(column, &LATEST_ERA_KEY) + .expect("Low-level database error.") + .map(|val| decode::(&val).expect("decoding db value failed")); + ArchiveDB { + overlay: ::new_memory_db(), + backing, + latest_era, + column, + } + } + fn payload(&self, key: &H256) -> Option { + self.backing + .get(self.column, key) + .expect("Low-level database error. Some issue with your hard disk?") + } } impl HashDB for ArchiveDB { - fn get(&self, key: &H256) -> Option { - if let Some((d, rc)) = self.overlay.raw(key) { - if rc > 0 { - return Some(d.clone()); - } - } - self.payload(key) - } + fn get(&self, key: &H256) -> Option { + if let Some((d, rc)) = self.overlay.raw(key) { + if rc > 0 { + return Some(d.clone()); + } + } + self.payload(key) + } - fn contains(&self, key: &H256) -> bool { - self.get(key).is_some() - } + fn contains(&self, key: &H256) -> bool { + self.get(key).is_some() + } - fn insert(&mut self, value: &[u8]) -> H256 { - self.overlay.insert(value) - } + fn insert(&mut self, value: &[u8]) -> H256 { + self.overlay.insert(value) + } - fn emplace(&mut self, key: H256, value: DBValue) { - self.overlay.emplace(key, value); - } + fn emplace(&mut self, key: H256, value: DBValue) { + self.overlay.emplace(key, value); + } - fn remove(&mut self, key: &H256) { - self.overlay.remove(key); - } + fn remove(&mut self, key: &H256) { + self.overlay.remove(key); + } } impl ::traits::KeyedHashDB for ArchiveDB { - fn keys(&self) -> HashMap { - let mut ret: HashMap = self.backing.iter(self.column) - .map(|(key, _)| (H256::from_slice(&*key), 1)) - .collect(); + fn keys(&self) -> HashMap { + let mut ret: HashMap = self + .backing + .iter(self.column) + .map(|(key, _)| (H256::from_slice(&*key), 1)) + .collect(); - for (key, refs) in self.overlay.keys() { - match ret.entry(key) { - Entry::Occupied(mut entry) => { - *entry.get_mut() += refs; - }, - Entry::Vacant(entry) => { - entry.insert(refs); - } - } - } - ret - } + for (key, refs) in self.overlay.keys() { + match ret.entry(key) { + Entry::Occupied(mut entry) => { + *entry.get_mut() += refs; + } + Entry::Vacant(entry) => { + entry.insert(refs); + } + } + } + ret + } } impl JournalDB for ArchiveDB { + fn boxed_clone(&self) -> Box { + Box::new(ArchiveDB { + overlay: self.overlay.clone(), + backing: self.backing.clone(), + latest_era: self.latest_era, + column: self.column.clone(), + }) + } - fn boxed_clone(&self) -> Box { - Box::new(ArchiveDB { - overlay: self.overlay.clone(), - backing: self.backing.clone(), - latest_era: self.latest_era, - column: self.column.clone(), - }) - } + fn mem_used(&self) -> usize { + self.overlay.mem_used() + } - fn mem_used(&self) -> usize { - self.overlay.mem_used() - } + fn is_empty(&self) -> bool { + self.latest_era.is_none() + } - fn is_empty(&self) -> bool { - self.latest_era.is_none() - } + fn journal_under( + &mut self, + batch: &mut DBTransaction, + now: u64, + _id: &H256, + ) -> io::Result { + let mut inserts = 0usize; + let mut deletes = 0usize; - fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, _id: &H256) -> io::Result { - let mut inserts = 0usize; - let mut deletes = 0usize; + for i in self.overlay.drain() { + let (key, (value, rc)) = i; + if rc > 0 { + batch.put(self.column, &key, &value); + inserts += 1; + } + if rc < 0 { + assert!(rc == -1); + deletes += 1; + } + } - for i in self.overlay.drain() { - let (key, (value, rc)) = i; - if rc > 0 { - batch.put(self.column, &key, &value); - inserts += 1; - } - if rc < 0 { - assert!(rc == -1); - deletes += 1; - } - } + if self.latest_era.map_or(true, |e| now > e) { + batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)); + self.latest_era = Some(now); + } + Ok((inserts + deletes) as u32) + } - if self.latest_era.map_or(true, |e| now > e) { - batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)); - self.latest_era = Some(now); - } - Ok((inserts + deletes) as u32) - } + fn mark_canonical( + &mut self, + _batch: &mut DBTransaction, + _end_era: u64, + _canon_id: &H256, + ) -> io::Result { + // keep everything! it's an archive, after all. + Ok(0) + } - fn mark_canonical(&mut self, _batch: &mut DBTransaction, _end_era: u64, _canon_id: &H256) -> io::Result { - // keep everything! it's an archive, after all. - Ok(0) - } + fn inject(&mut self, batch: &mut DBTransaction) -> io::Result { + let mut inserts = 0usize; + let mut deletes = 0usize; - fn inject(&mut self, batch: &mut DBTransaction) -> io::Result { - let mut inserts = 0usize; - let mut deletes = 0usize; + for i in self.overlay.drain() { + let (key, (value, rc)) = i; + if rc > 0 { + if self.backing.get(self.column, &key)?.is_some() { + return Err(error_key_already_exists(&key)); + } + batch.put(self.column, &key, &value); + inserts += 1; + } + if rc < 0 { + assert!(rc == -1); + if self.backing.get(self.column, &key)?.is_none() { + return Err(error_negatively_reference_hash(&key)); + } + batch.delete(self.column, &key); + deletes += 1; + } + } - for i in self.overlay.drain() { - let (key, (value, rc)) = i; - if rc > 0 { - if self.backing.get(self.column, &key)?.is_some() { - return Err(error_key_already_exists(&key)); - } - batch.put(self.column, &key, &value); - inserts += 1; - } - if rc < 0 { - assert!(rc == -1); - if self.backing.get(self.column, &key)?.is_none() { - return Err(error_negatively_reference_hash(&key)); - } - batch.delete(self.column, &key); - deletes += 1; - } - } + Ok((inserts + deletes) as u32) + } - Ok((inserts + deletes) as u32) - } + fn latest_era(&self) -> Option { + self.latest_era + } - fn latest_era(&self) -> Option { self.latest_era } + fn state(&self, id: &H256) -> Option { + self.backing + .get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]) + .map(|b| b.into_vec()) + } - fn state(&self, id: &H256) -> Option { - self.backing.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]).map(|b| b.into_vec()) - } + fn is_pruned(&self) -> bool { + false + } - fn is_pruned(&self) -> bool { false } + fn backing(&self) -> &Arc { + &self.backing + } - fn backing(&self) -> &Arc { - &self.backing - } - - fn consolidate(&mut self, with: MemoryDB) { - self.overlay.consolidate(with); - } + fn consolidate(&mut self, with: MemoryDB) { + self.overlay.consolidate(with); + } } #[cfg(test)] mod tests { - use keccak::keccak; - use hash_db::HashDB; - use super::*; - use {kvdb_memorydb, JournalDB}; + use super::*; + use hash_db::HashDB; + use keccak::keccak; + use kvdb_memorydb; + use JournalDB; - #[test] - fn insert_same_in_fork() { - // history is 1 - let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); + #[test] + fn insert_same_in_fork() { + // history is 1 + let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); - let x = jdb.insert(b"X"); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); - jdb.commit_batch(3, &keccak(b"1002a"), Some((1, keccak(b"1")))).unwrap(); - jdb.commit_batch(4, &keccak(b"1003a"), Some((2, keccak(b"2")))).unwrap(); + let x = jdb.insert(b"X"); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + jdb.commit_batch(3, &keccak(b"1002a"), Some((1, keccak(b"1")))) + .unwrap(); + jdb.commit_batch(4, &keccak(b"1003a"), Some((2, keccak(b"2")))) + .unwrap(); - jdb.remove(&x); - jdb.commit_batch(3, &keccak(b"1002b"), Some((1, keccak(b"1")))).unwrap(); - let x = jdb.insert(b"X"); - jdb.commit_batch(4, &keccak(b"1003b"), Some((2, keccak(b"2")))).unwrap(); + jdb.remove(&x); + jdb.commit_batch(3, &keccak(b"1002b"), Some((1, keccak(b"1")))) + .unwrap(); + let x = jdb.insert(b"X"); + jdb.commit_batch(4, &keccak(b"1003b"), Some((2, keccak(b"2")))) + .unwrap(); - jdb.commit_batch(5, &keccak(b"1004a"), Some((3, keccak(b"1002a")))).unwrap(); - jdb.commit_batch(6, &keccak(b"1005a"), Some((4, keccak(b"1003a")))).unwrap(); + jdb.commit_batch(5, &keccak(b"1004a"), Some((3, keccak(b"1002a")))) + .unwrap(); + jdb.commit_batch(6, &keccak(b"1005a"), Some((4, keccak(b"1003a")))) + .unwrap(); - assert!(jdb.contains(&x)); - } + assert!(jdb.contains(&x)); + } - #[test] - fn long_history() { - // history is 3 - let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); - let h = jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.contains(&h)); - jdb.remove(&h); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); - assert!(jdb.contains(&h)); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); - assert!(jdb.contains(&h)); - jdb.commit_batch(3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.contains(&h)); - jdb.commit_batch(4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.contains(&h)); - } + #[test] + fn long_history() { + // history is 3 + let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); + let h = jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.contains(&h)); + jdb.remove(&h); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + assert!(jdb.contains(&h)); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + assert!(jdb.contains(&h)); + jdb.commit_batch(3, &keccak(b"3"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.contains(&h)); + jdb.commit_batch(4, &keccak(b"4"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.contains(&h)); + } - #[test] - #[should_panic] - fn multiple_owed_removal_not_allowed() { - let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); - let h = jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.contains(&h)); - jdb.remove(&h); - jdb.remove(&h); - // commit_batch would call journal_under(), - // and we don't allow multiple owned removals. - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); - } + #[test] + #[should_panic] + fn multiple_owed_removal_not_allowed() { + let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); + let h = jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.contains(&h)); + jdb.remove(&h); + jdb.remove(&h); + // commit_batch would call journal_under(), + // and we don't allow multiple owned removals. + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + } - #[test] - fn complex() { - // history is 1 - let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); + #[test] + fn complex() { + // history is 1 + let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); - let foo = jdb.insert(b"foo"); - let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); - jdb.remove(&foo); - jdb.remove(&bar); - let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); - assert!(jdb.contains(&baz)); + jdb.remove(&foo); + jdb.remove(&bar); + let baz = jdb.insert(b"baz"); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); + assert!(jdb.contains(&baz)); - let foo = jdb.insert(b"foo"); - jdb.remove(&baz); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&baz)); + let foo = jdb.insert(b"foo"); + jdb.remove(&baz); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&baz)); - jdb.remove(&foo); - jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); - assert!(jdb.contains(&foo)); + jdb.remove(&foo); + jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))) + .unwrap(); + assert!(jdb.contains(&foo)); - jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); - } + jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))) + .unwrap(); + } - #[test] - fn fork() { - // history is 1 - let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); + #[test] + fn fork() { + // history is 1 + let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); - let foo = jdb.insert(b"foo"); - let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); - jdb.remove(&foo); - let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))) + .unwrap(); - jdb.remove(&bar); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); + jdb.remove(&bar); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))) + .unwrap(); - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); - assert!(jdb.contains(&baz)); + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); + assert!(jdb.contains(&baz)); - jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); - assert!(jdb.contains(&foo)); - } + jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))) + .unwrap(); + assert!(jdb.contains(&foo)); + } - #[test] - fn overwrite() { - // history is 1 - let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); + #[test] + fn overwrite() { + // history is 1 + let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); - let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.contains(&foo)); + let foo = jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.contains(&foo)); - jdb.remove(&foo); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); - jdb.insert(b"foo"); - assert!(jdb.contains(&foo)); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.contains(&foo)); - jdb.commit_batch(3, &keccak(b"2"), Some((0, keccak(b"2")))).unwrap(); - assert!(jdb.contains(&foo)); - } + jdb.remove(&foo); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))) + .unwrap(); + jdb.insert(b"foo"); + assert!(jdb.contains(&foo)); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.contains(&foo)); + jdb.commit_batch(3, &keccak(b"2"), Some((0, keccak(b"2")))) + .unwrap(); + assert!(jdb.contains(&foo)); + } - #[test] - fn fork_same_key() { - // history is 1 - let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + #[test] + fn fork_same_key() { + // history is 1 + let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - let foo = jdb.insert(b"foo"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); + let foo = jdb.insert(b"foo"); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))) + .unwrap(); - jdb.insert(b"foo"); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.contains(&foo)); + jdb.insert(b"foo"); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.contains(&foo)); - jdb.commit_batch(2, &keccak(b"2a"), Some((1, keccak(b"1a")))).unwrap(); - assert!(jdb.contains(&foo)); - } + jdb.commit_batch(2, &keccak(b"2a"), Some((1, keccak(b"1a")))) + .unwrap(); + assert!(jdb.contains(&foo)); + } - #[test] - fn reopen() { - let shared_db = Arc::new(kvdb_memorydb::create(0)); - let bar = H256::random(); + #[test] + fn reopen() { + let shared_db = Arc::new(kvdb_memorydb::create(0)); + let bar = H256::random(); - let foo = { - let mut jdb = ArchiveDB::new(shared_db.clone(), None); - // history is 1 - let foo = jdb.insert(b"foo"); - jdb.emplace(bar.clone(), DBValue::from_slice(b"bar")); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - foo - }; + let foo = { + let mut jdb = ArchiveDB::new(shared_db.clone(), None); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.emplace(bar.clone(), DBValue::from_slice(b"bar")); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + foo + }; - { - let mut jdb = ArchiveDB::new(shared_db.clone(), None); - jdb.remove(&foo); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); - } + { + let mut jdb = ArchiveDB::new(shared_db.clone(), None); + jdb.remove(&foo); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))) + .unwrap(); + } - { - let mut jdb = ArchiveDB::new(shared_db, None); - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); - } - } + { + let mut jdb = ArchiveDB::new(shared_db, None); + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))) + .unwrap(); + } + } - #[test] - fn reopen_remove() { - let shared_db = Arc::new(kvdb_memorydb::create(0)); + #[test] + fn reopen_remove() { + let shared_db = Arc::new(kvdb_memorydb::create(0)); - let foo = { - let mut jdb = ArchiveDB::new(shared_db.clone(), None); - // history is 1 - let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); + let foo = { + let mut jdb = ArchiveDB::new(shared_db.clone(), None); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))) + .unwrap(); - // foo is ancient history. + // foo is ancient history. - jdb.insert(b"foo"); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); - foo - }; + jdb.insert(b"foo"); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))) + .unwrap(); + foo + }; - { - let mut jdb = ArchiveDB::new(shared_db, None); - jdb.remove(&foo); - jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); - assert!(jdb.contains(&foo)); - jdb.remove(&foo); - jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); - jdb.commit_batch(5, &keccak(b"5"), Some((4, keccak(b"4")))).unwrap(); - } - } + { + let mut jdb = ArchiveDB::new(shared_db, None); + jdb.remove(&foo); + jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))) + .unwrap(); + assert!(jdb.contains(&foo)); + jdb.remove(&foo); + jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))) + .unwrap(); + jdb.commit_batch(5, &keccak(b"5"), Some((4, keccak(b"4")))) + .unwrap(); + } + } - #[test] - fn reopen_fork() { - let shared_db = Arc::new(kvdb_memorydb::create(0)); - let (foo, _, _) = { - let mut jdb = ArchiveDB::new(shared_db.clone(), None); - // history is 1 - let foo = jdb.insert(b"foo"); - let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - jdb.remove(&foo); - let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); + #[test] + fn reopen_fork() { + let shared_db = Arc::new(kvdb_memorydb::create(0)); + let (foo, _, _) = { + let mut jdb = ArchiveDB::new(shared_db.clone(), None); + // history is 1 + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))) + .unwrap(); - jdb.remove(&bar); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); - (foo, bar, baz) - }; + jdb.remove(&bar); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))) + .unwrap(); + (foo, bar, baz) + }; - { - let mut jdb = ArchiveDB::new(shared_db, None); - jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); - assert!(jdb.contains(&foo)); - } - } + { + let mut jdb = ArchiveDB::new(shared_db, None); + jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))) + .unwrap(); + assert!(jdb.contains(&foo)); + } + } - #[test] - fn returns_state() { - let shared_db = Arc::new(kvdb_memorydb::create(0)); + #[test] + fn returns_state() { + let shared_db = Arc::new(kvdb_memorydb::create(0)); - let key = { - let mut jdb = ArchiveDB::new(shared_db.clone(), None); - let key = jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - key - }; + let key = { + let mut jdb = ArchiveDB::new(shared_db.clone(), None); + let key = jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + key + }; - { - let jdb = ArchiveDB::new(shared_db, None); - let state = jdb.state(&key); - assert!(state.is_some()); - } - } + { + let jdb = ArchiveDB::new(shared_db, None); + let state = jdb.state(&key); + assert!(state.is_some()); + } + } - #[test] - fn inject() { - let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); - let key = jdb.insert(b"dog"); - jdb.inject_batch().unwrap(); + #[test] + fn inject() { + let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); + let key = jdb.insert(b"dog"); + jdb.inject_batch().unwrap(); - assert_eq!(jdb.get(&key).unwrap(), DBValue::from_slice(b"dog")); - jdb.remove(&key); - jdb.inject_batch().unwrap(); + assert_eq!(jdb.get(&key).unwrap(), DBValue::from_slice(b"dog")); + jdb.remove(&key); + jdb.inject_batch().unwrap(); - assert!(jdb.get(&key).is_none()); - } + assert!(jdb.get(&key).is_none()); + } } diff --git a/util/journaldb/src/as_hash_db_impls.rs b/util/journaldb/src/as_hash_db_impls.rs index 8a380ea56..69eb674bb 100644 --- a/util/journaldb/src/as_hash_db_impls.rs +++ b/util/journaldb/src/as_hash_db_impls.rs @@ -15,57 +15,87 @@ // along with Parity Ethereum. If not, see . //! Impls of the `AsHashDB` upcast trait for all different variants of DB -use hash_db::{HashDB, AsHashDB}; -use keccak_hasher::KeccakHasher; +use crate::{AsKeyedHashDB, KeyedHashDB}; use archivedb::ArchiveDB; use earlymergedb::EarlyMergeDB; +use hash_db::{AsHashDB, HashDB}; +use keccak_hasher::KeccakHasher; +use kvdb::DBValue; +use overlaydb::OverlayDB; use overlayrecentdb::OverlayRecentDB; use refcounteddb::RefCountedDB; -use overlaydb::OverlayDB; -use kvdb::DBValue; -use crate::{KeyedHashDB, AsKeyedHashDB}; impl AsHashDB for ArchiveDB { - fn as_hash_db(&self) -> &HashDB { self } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } + fn as_hash_db(&self) -> &HashDB { + self + } + fn as_hash_db_mut(&mut self) -> &mut HashDB { + self + } } impl AsHashDB for EarlyMergeDB { - fn as_hash_db(&self) -> &HashDB { self } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } + fn as_hash_db(&self) -> &HashDB { + self + } + fn as_hash_db_mut(&mut self) -> &mut HashDB { + self + } } impl AsHashDB for OverlayRecentDB { - fn as_hash_db(&self) -> &HashDB { self } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } + fn as_hash_db(&self) -> &HashDB { + self + } + fn as_hash_db_mut(&mut self) -> &mut HashDB { + self + } } impl AsHashDB for RefCountedDB { - fn as_hash_db(&self) -> &HashDB { self } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } + fn as_hash_db(&self) -> &HashDB { + self + } + fn as_hash_db_mut(&mut self) -> &mut HashDB { + self + } } impl AsHashDB for OverlayDB { - fn as_hash_db(&self) -> &HashDB { self } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } + fn as_hash_db(&self) -> &HashDB { + self + } + fn as_hash_db_mut(&mut self) -> &mut HashDB { + self + } } impl AsKeyedHashDB for ArchiveDB { - fn as_keyed_hash_db(&self) -> &KeyedHashDB { self } + fn as_keyed_hash_db(&self) -> &KeyedHashDB { + self + } } impl AsKeyedHashDB for EarlyMergeDB { - fn as_keyed_hash_db(&self) -> &KeyedHashDB { self } + fn as_keyed_hash_db(&self) -> &KeyedHashDB { + self + } } impl AsKeyedHashDB for OverlayRecentDB { - fn as_keyed_hash_db(&self) -> &KeyedHashDB { self } + fn as_keyed_hash_db(&self) -> &KeyedHashDB { + self + } } impl AsKeyedHashDB for RefCountedDB { - fn as_keyed_hash_db(&self) -> &KeyedHashDB { self } + fn as_keyed_hash_db(&self) -> &KeyedHashDB { + self + } } impl AsKeyedHashDB for OverlayDB { - fn as_keyed_hash_db(&self) -> &KeyedHashDB { self } + fn as_keyed_hash_db(&self) -> &KeyedHashDB { + self + } } diff --git a/util/journaldb/src/earlymergedb.rs b/util/journaldb/src/earlymergedb.rs index 2a55200c4..37c821d90 100644 --- a/util/journaldb/src/earlymergedb.rs +++ b/util/journaldb/src/earlymergedb.rs @@ -16,38 +16,43 @@ //! Disk-backed `HashDB` implementation. -use std::collections::HashMap; -use std::collections::hash_map::Entry; -use std::io; -use std::sync::Arc; +use std::{ + collections::{hash_map::Entry, HashMap}, + io, + sync::Arc, +}; +use super::{ + error_key_already_exists, error_negatively_reference_hash, traits::JournalDB, DB_PREFIX_LEN, + LATEST_ERA_KEY, +}; use bytes::Bytes; use ethereum_types::H256; -use hash_db::{HashDB}; +use hash_db::HashDB; use heapsize::HeapSizeOf; use keccak_hasher::KeccakHasher; -use kvdb::{KeyValueDB, DBTransaction, DBValue}; +use kvdb::{DBTransaction, DBValue, KeyValueDB}; use memory_db::*; use parking_lot::RwLock; -use rlp::{encode, decode}; -use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, error_negatively_reference_hash, error_key_already_exists}; -use super::traits::JournalDB; -use util::{DatabaseKey, DatabaseValueView, DatabaseValueRef}; +use rlp::{decode, encode}; +use util::{DatabaseKey, DatabaseValueRef, DatabaseValueView}; #[derive(Debug, Clone, PartialEq, Eq)] struct RefInfo { - queue_refs: usize, - in_archive: bool, + queue_refs: usize, + in_archive: bool, } impl HeapSizeOf for RefInfo { - fn heap_size_of_children(&self) -> usize { 0 } + fn heap_size_of_children(&self) -> usize { + 0 + } } #[derive(Clone, PartialEq, Eq)] enum RemoveFrom { - Queue, - Archive, + Queue, + Archive, } /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay @@ -107,955 +112,1108 @@ enum RemoveFrom { /// /// TODO: `store_reclaim_period` pub struct EarlyMergeDB { - overlay: MemoryDB, - backing: Arc, - refs: Option>>>, - latest_era: Option, - column: Option, + overlay: MemoryDB, + backing: Arc, + refs: Option>>>, + latest_era: Option, + column: Option, } impl EarlyMergeDB { - /// Create a new instance from file - pub fn new(backing: Arc, col: Option) -> EarlyMergeDB { - let (latest_era, refs) = EarlyMergeDB::read_refs(&*backing, col); - let refs = Some(Arc::new(RwLock::new(refs))); - EarlyMergeDB { - overlay: ::new_memory_db(), - backing: backing, - refs: refs, - latest_era: latest_era, - column: col, - } - } + /// Create a new instance from file + pub fn new(backing: Arc, col: Option) -> EarlyMergeDB { + let (latest_era, refs) = EarlyMergeDB::read_refs(&*backing, col); + let refs = Some(Arc::new(RwLock::new(refs))); + EarlyMergeDB { + overlay: ::new_memory_db(), + backing: backing, + refs: refs, + latest_era: latest_era, + column: col, + } + } - fn morph_key(key: &H256, index: u8) -> Bytes { - let mut ret = (&**key).to_owned(); - ret.push(index); - ret - } + fn morph_key(key: &H256, index: u8) -> Bytes { + let mut ret = (&**key).to_owned(); + ret.push(index); + ret + } - // The next three are valid only as long as there is an insert operation of `key` in the journal. - fn set_already_in(batch: &mut DBTransaction, col: Option, key: &H256) { batch.put(col, &Self::morph_key(key, 0), &[1u8]); } - fn reset_already_in(batch: &mut DBTransaction, col: Option, key: &H256) { batch.delete(col, &Self::morph_key(key, 0)); } - fn is_already_in(backing: &KeyValueDB, col: Option, key: &H256) -> bool { - backing.get(col, &Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some() - } + // The next three are valid only as long as there is an insert operation of `key` in the journal. + fn set_already_in(batch: &mut DBTransaction, col: Option, key: &H256) { + batch.put(col, &Self::morph_key(key, 0), &[1u8]); + } + fn reset_already_in(batch: &mut DBTransaction, col: Option, key: &H256) { + batch.delete(col, &Self::morph_key(key, 0)); + } + fn is_already_in(backing: &KeyValueDB, col: Option, key: &H256) -> bool { + backing + .get(col, &Self::morph_key(key, 0)) + .expect("Low-level database error. Some issue with your hard disk?") + .is_some() + } - fn insert_keys(inserts: &[(H256, DBValue)], backing: &KeyValueDB, col: Option, refs: &mut HashMap, batch: &mut DBTransaction) { - for &(ref h, ref d) in inserts { - match refs.entry(*h) { - Entry::Occupied(mut entry) => { - let info = entry.get_mut(); - // already counting. increment. - info.queue_refs += 1; - trace!(target: "jdb.fine", " insert({}): In queue: Incrementing refs to {}", h, info.queue_refs); - }, - Entry::Vacant(entry) => { - // this is the first entry for this node in the journal. - let in_archive = backing.get(col, h).expect("Low-level database error. Some issue with your hard disk?").is_some(); - if in_archive { - // already in the backing DB. start counting, and remember it was already in. - Self::set_already_in(batch, col, h); - trace!(target: "jdb.fine", " insert({}): New to queue, in DB: Recording and inserting into queue", h); - } else { - // Gets removed when a key leaves the journal, so should never be set when we're placing a new key. - //Self::reset_already_in(&h); - assert!(!Self::is_already_in(backing, col, h)); - trace!(target: "jdb.fine", " insert({}): New to queue, not in DB: Inserting into queue and DB", h); - batch.put(col, h, d); - } - entry.insert(RefInfo { - queue_refs: 1, - in_archive: in_archive, - }); - }, - } - } - } + fn insert_keys( + inserts: &[(H256, DBValue)], + backing: &KeyValueDB, + col: Option, + refs: &mut HashMap, + batch: &mut DBTransaction, + ) { + for &(ref h, ref d) in inserts { + match refs.entry(*h) { + Entry::Occupied(mut entry) => { + let info = entry.get_mut(); + // already counting. increment. + info.queue_refs += 1; + trace!(target: "jdb.fine", " insert({}): In queue: Incrementing refs to {}", h, info.queue_refs); + } + Entry::Vacant(entry) => { + // this is the first entry for this node in the journal. + let in_archive = backing + .get(col, h) + .expect("Low-level database error. Some issue with your hard disk?") + .is_some(); + if in_archive { + // already in the backing DB. start counting, and remember it was already in. + Self::set_already_in(batch, col, h); + trace!(target: "jdb.fine", " insert({}): New to queue, in DB: Recording and inserting into queue", h); + } else { + // Gets removed when a key leaves the journal, so should never be set when we're placing a new key. + //Self::reset_already_in(&h); + assert!(!Self::is_already_in(backing, col, h)); + trace!(target: "jdb.fine", " insert({}): New to queue, not in DB: Inserting into queue and DB", h); + batch.put(col, h, d); + } + entry.insert(RefInfo { + queue_refs: 1, + in_archive: in_archive, + }); + } + } + } + } - fn replay_keys(inserts: &[H256], backing: &KeyValueDB, col: Option, refs: &mut HashMap) { - trace!(target: "jdb.fine", "replay_keys: inserts={:?}, refs={:?}", inserts, refs); - for h in inserts { - match refs.entry(*h) { - // already counting. increment. - Entry::Occupied(mut entry) => { - entry.get_mut().queue_refs += 1; - }, - // this is the first entry for this node in the journal. - // it is initialised to 1 if it was already in. - Entry::Vacant(entry) => { - entry.insert(RefInfo { - queue_refs: 1, - in_archive: Self::is_already_in(backing, col, h), - }); - }, - } - } - trace!(target: "jdb.fine", "replay_keys: (end) refs={:?}", refs); - } + fn replay_keys( + inserts: &[H256], + backing: &KeyValueDB, + col: Option, + refs: &mut HashMap, + ) { + trace!(target: "jdb.fine", "replay_keys: inserts={:?}, refs={:?}", inserts, refs); + for h in inserts { + match refs.entry(*h) { + // already counting. increment. + Entry::Occupied(mut entry) => { + entry.get_mut().queue_refs += 1; + } + // this is the first entry for this node in the journal. + // it is initialised to 1 if it was already in. + Entry::Vacant(entry) => { + entry.insert(RefInfo { + queue_refs: 1, + in_archive: Self::is_already_in(backing, col, h), + }); + } + } + } + trace!(target: "jdb.fine", "replay_keys: (end) refs={:?}", refs); + } - fn remove_keys(deletes: &[H256], refs: &mut HashMap, batch: &mut DBTransaction, col: Option, from: RemoveFrom) { - // with a remove on {queue_refs: 1, in_archive: true}, we have two options: - // - convert to {queue_refs: 1, in_archive: false} (i.e. remove it from the conceptual archive) - // - convert to {queue_refs: 0, in_archive: true} (i.e. remove it from the conceptual queue) - // (the latter option would then mean removing the RefInfo, since it would no longer be counted in the queue.) - // both are valid, but we switch between them depending on context. - // All inserts in queue (i.e. those which may yet be reverted) have an entry in refs. - for h in deletes { - match refs.entry(*h) { - Entry::Occupied(mut entry) => { - if entry.get().in_archive && from == RemoveFrom::Archive { - entry.get_mut().in_archive = false; - Self::reset_already_in(batch, col, h); - trace!(target: "jdb.fine", " remove({}): In archive, 1 in queue: Reducing to queue only and recording", h); - continue; - } - if entry.get().queue_refs > 1 { - entry.get_mut().queue_refs -= 1; - trace!(target: "jdb.fine", " remove({}): In queue > 1 refs: Decrementing ref count to {}", h, entry.get().queue_refs); - continue; - } + fn remove_keys( + deletes: &[H256], + refs: &mut HashMap, + batch: &mut DBTransaction, + col: Option, + from: RemoveFrom, + ) { + // with a remove on {queue_refs: 1, in_archive: true}, we have two options: + // - convert to {queue_refs: 1, in_archive: false} (i.e. remove it from the conceptual archive) + // - convert to {queue_refs: 0, in_archive: true} (i.e. remove it from the conceptual queue) + // (the latter option would then mean removing the RefInfo, since it would no longer be counted in the queue.) + // both are valid, but we switch between them depending on context. + // All inserts in queue (i.e. those which may yet be reverted) have an entry in refs. + for h in deletes { + match refs.entry(*h) { + Entry::Occupied(mut entry) => { + if entry.get().in_archive && from == RemoveFrom::Archive { + entry.get_mut().in_archive = false; + Self::reset_already_in(batch, col, h); + trace!(target: "jdb.fine", " remove({}): In archive, 1 in queue: Reducing to queue only and recording", h); + continue; + } + if entry.get().queue_refs > 1 { + entry.get_mut().queue_refs -= 1; + trace!(target: "jdb.fine", " remove({}): In queue > 1 refs: Decrementing ref count to {}", h, entry.get().queue_refs); + continue; + } - let queue_refs = entry.get().queue_refs; - let in_archive = entry.get().in_archive; + let queue_refs = entry.get().queue_refs; + let in_archive = entry.get().in_archive; - match (queue_refs, in_archive) { - (1, true) => { - entry.remove(); - Self::reset_already_in(batch, col, h); - trace!(target: "jdb.fine", " remove({}): In archive, 1 in queue: Removing from queue and leaving in archive", h); - }, - (1, false) => { - entry.remove(); - batch.delete(col, h); - trace!(target: "jdb.fine", " remove({}): Not in archive, only 1 ref in queue: Removing from queue and DB", h); - }, - _ => panic!("Invalid value in refs: {:?}", entry.get()), - } - }, - Entry::Vacant(_entry) => { - // Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs. - //assert!(!Self::is_already_in(db, &h)); - batch.delete(col, h); - trace!(target: "jdb.fine", " remove({}): Not in queue - MUST BE IN ARCHIVE: Removing from DB", h); - }, - } - } - } + match (queue_refs, in_archive) { + (1, true) => { + entry.remove(); + Self::reset_already_in(batch, col, h); + trace!(target: "jdb.fine", " remove({}): In archive, 1 in queue: Removing from queue and leaving in archive", h); + } + (1, false) => { + entry.remove(); + batch.delete(col, h); + trace!(target: "jdb.fine", " remove({}): Not in archive, only 1 ref in queue: Removing from queue and DB", h); + } + _ => panic!("Invalid value in refs: {:?}", entry.get()), + } + } + Entry::Vacant(_entry) => { + // Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs. + //assert!(!Self::is_already_in(db, &h)); + batch.delete(col, h); + trace!(target: "jdb.fine", " remove({}): Not in queue - MUST BE IN ARCHIVE: Removing from DB", h); + } + } + } + } - #[cfg(test)] - fn can_reconstruct_refs(&self) -> bool { - let (latest_era, reconstructed) = Self::read_refs(&*self.backing, self.column); - let refs = self.refs.as_ref().unwrap().write(); - if *refs != reconstructed || latest_era != self.latest_era { - let clean_refs = refs.iter().filter_map(|(k, v)| if reconstructed.get(k) == Some(v) {None} else {Some((k.clone(), v.clone()))}).collect::>(); - let clean_recon = reconstructed.into_iter().filter_map(|(k, v)| if refs.get(&k) == Some(&v) {None} else {Some((k.clone(), v.clone()))}).collect::>(); - warn!(target: "jdb", "mem: {:?} != log: {:?}", clean_refs, clean_recon); - false - } else { - true - } - } + #[cfg(test)] + fn can_reconstruct_refs(&self) -> bool { + let (latest_era, reconstructed) = Self::read_refs(&*self.backing, self.column); + let refs = self.refs.as_ref().unwrap().write(); + if *refs != reconstructed || latest_era != self.latest_era { + let clean_refs = refs + .iter() + .filter_map(|(k, v)| { + if reconstructed.get(k) == Some(v) { + None + } else { + Some((k.clone(), v.clone())) + } + }) + .collect::>(); + let clean_recon = reconstructed + .into_iter() + .filter_map(|(k, v)| { + if refs.get(&k) == Some(&v) { + None + } else { + Some((k.clone(), v.clone())) + } + }) + .collect::>(); + warn!(target: "jdb", "mem: {:?} != log: {:?}", clean_refs, clean_recon); + false + } else { + true + } + } - fn payload(&self, key: &H256) -> Option { - self.backing.get(self.column, key).expect("Low-level database error. Some issue with your hard disk?") - } - - fn read_refs(db: &KeyValueDB, col: Option) -> (Option, HashMap) { - let mut refs = HashMap::new(); - let mut latest_era = None; - if let Some(val) = db.get(col, &LATEST_ERA_KEY).expect("Low-level database error.") { - let mut era = decode::(&val).expect("decoding db value failed"); - latest_era = Some(era); - loop { - let mut db_key = DatabaseKey { - era, - index: 0usize, - }; - while let Some(rlp_data) = db.get(col, &encode(&db_key)).expect("Low-level database error.") { - let inserts = DatabaseValueView::from_rlp(&rlp_data).inserts().expect("rlp read from db; qed"); - Self::replay_keys(&inserts, db, col, &mut refs); - db_key.index += 1; - }; - if db_key.index == 0 || era == 0 { - break; - } - era -= 1; - } - } - (latest_era, refs) - } + fn payload(&self, key: &H256) -> Option { + self.backing + .get(self.column, key) + .expect("Low-level database error. Some issue with your hard disk?") + } + fn read_refs(db: &KeyValueDB, col: Option) -> (Option, HashMap) { + let mut refs = HashMap::new(); + let mut latest_era = None; + if let Some(val) = db + .get(col, &LATEST_ERA_KEY) + .expect("Low-level database error.") + { + let mut era = decode::(&val).expect("decoding db value failed"); + latest_era = Some(era); + loop { + let mut db_key = DatabaseKey { era, index: 0usize }; + while let Some(rlp_data) = db + .get(col, &encode(&db_key)) + .expect("Low-level database error.") + { + let inserts = DatabaseValueView::from_rlp(&rlp_data) + .inserts() + .expect("rlp read from db; qed"); + Self::replay_keys(&inserts, db, col, &mut refs); + db_key.index += 1; + } + if db_key.index == 0 || era == 0 { + break; + } + era -= 1; + } + } + (latest_era, refs) + } } impl HashDB for EarlyMergeDB { - fn get(&self, key: &H256) -> Option { - if let Some((d, rc)) = self.overlay.raw(key) { - if rc > 0 { - return Some(d.clone()) - } - } - self.payload(key) - } + fn get(&self, key: &H256) -> Option { + if let Some((d, rc)) = self.overlay.raw(key) { + if rc > 0 { + return Some(d.clone()); + } + } + self.payload(key) + } - fn contains(&self, key: &H256) -> bool { - self.get(key).is_some() - } + fn contains(&self, key: &H256) -> bool { + self.get(key).is_some() + } - fn insert(&mut self, value: &[u8]) -> H256 { - self.overlay.insert(value) - } - fn emplace(&mut self, key: H256, value: DBValue) { - self.overlay.emplace(key, value); - } - fn remove(&mut self, key: &H256) { - self.overlay.remove(key); - } + fn insert(&mut self, value: &[u8]) -> H256 { + self.overlay.insert(value) + } + fn emplace(&mut self, key: H256, value: DBValue) { + self.overlay.emplace(key, value); + } + fn remove(&mut self, key: &H256) { + self.overlay.remove(key); + } } impl ::traits::KeyedHashDB for EarlyMergeDB { - fn keys(&self) -> HashMap { - let mut ret: HashMap = self.backing.iter(self.column) - .map(|(key, _)| (H256::from_slice(&*key), 1)) - .collect(); + fn keys(&self) -> HashMap { + let mut ret: HashMap = self + .backing + .iter(self.column) + .map(|(key, _)| (H256::from_slice(&*key), 1)) + .collect(); - for (key, refs) in self.overlay.keys() { - match ret.entry(key) { - Entry::Occupied(mut entry) => { - *entry.get_mut() += refs; - }, - Entry::Vacant(entry) => { - entry.insert(refs); - } - } - } - ret - } + for (key, refs) in self.overlay.keys() { + match ret.entry(key) { + Entry::Occupied(mut entry) => { + *entry.get_mut() += refs; + } + Entry::Vacant(entry) => { + entry.insert(refs); + } + } + } + ret + } } impl JournalDB for EarlyMergeDB { - fn boxed_clone(&self) -> Box { - Box::new(EarlyMergeDB { - overlay: self.overlay.clone(), - backing: self.backing.clone(), - refs: self.refs.clone(), - latest_era: self.latest_era.clone(), - column: self.column.clone(), - }) - } + fn boxed_clone(&self) -> Box { + Box::new(EarlyMergeDB { + overlay: self.overlay.clone(), + backing: self.backing.clone(), + refs: self.refs.clone(), + latest_era: self.latest_era.clone(), + column: self.column.clone(), + }) + } - fn is_empty(&self) -> bool { - self.backing.get(self.column, &LATEST_ERA_KEY).expect("Low level database error").is_none() - } + fn is_empty(&self) -> bool { + self.backing + .get(self.column, &LATEST_ERA_KEY) + .expect("Low level database error") + .is_none() + } - fn backing(&self) -> &Arc { - &self.backing - } + fn backing(&self) -> &Arc { + &self.backing + } - fn latest_era(&self) -> Option { self.latest_era } + fn latest_era(&self) -> Option { + self.latest_era + } - fn mem_used(&self) -> usize { - self.overlay.mem_used() + match self.refs { - Some(ref c) => c.read().heap_size_of_children(), - None => 0 - } - } + fn mem_used(&self) -> usize { + self.overlay.mem_used() + + match self.refs { + Some(ref c) => c.read().heap_size_of_children(), + None => 0, + } + } - fn state(&self, id: &H256) -> Option { - self.backing.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]).map(|b| b.into_vec()) - } + fn state(&self, id: &H256) -> Option { + self.backing + .get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]) + .map(|b| b.into_vec()) + } - fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result { - // record new commit's details. - let mut refs = match self.refs.as_ref() { - Some(refs) => refs.write(), - None => return Ok(0), - }; + fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result { + // record new commit's details. + let mut refs = match self.refs.as_ref() { + Some(refs) => refs.write(), + None => return Ok(0), + }; - { - let mut db_key = DatabaseKey { - era: now, - index: 0usize, - }; - let mut last; + { + let mut db_key = DatabaseKey { + era: now, + index: 0usize, + }; + let mut last; - while self.backing.get(self.column, { - last = encode(&db_key); - &last - })?.is_some() { - db_key.index += 1; - } + while self + .backing + .get(self.column, { + last = encode(&db_key); + &last + })? + .is_some() + { + db_key.index += 1; + } - let drained = self.overlay.drain(); + let drained = self.overlay.drain(); - trace!(target: "jdb", "commit: #{} ({})", now, id); + trace!(target: "jdb", "commit: #{} ({})", now, id); - let removes: Vec = drained - .iter() - .filter_map(|(k, &(_, c))| if c < 0 {Some(k.clone())} else {None}) - .collect(); - let inserts: Vec<(H256, _)> = drained - .into_iter() - .filter_map(|(k, (v, r))| if r > 0 { assert!(r == 1); Some((k, v)) } else { assert!(r >= -1); None }) - .collect(); + let removes: Vec = drained + .iter() + .filter_map(|(k, &(_, c))| if c < 0 { Some(k.clone()) } else { None }) + .collect(); + let inserts: Vec<(H256, _)> = drained + .into_iter() + .filter_map(|(k, (v, r))| { + if r > 0 { + assert!(r == 1); + Some((k, v)) + } else { + assert!(r >= -1); + None + } + }) + .collect(); - // TODO: check all removes are in the db. + // TODO: check all removes are in the db. - // Process the new inserts. - // We use the inserts for three things. For each: - // - we place into the backing DB or increment the counter if already in; - // - we note in the backing db that it was already in; - // - we write the key into our journal for this block; + // Process the new inserts. + // We use the inserts for three things. For each: + // - we place into the backing DB or increment the counter if already in; + // - we note in the backing db that it was already in; + // - we write the key into our journal for this block; - Self::insert_keys(&inserts, &*self.backing, self.column, &mut refs, batch); + Self::insert_keys(&inserts, &*self.backing, self.column, &mut refs, batch); - let ins = inserts.iter().map(|&(k, _)| k).collect::>(); - let value_ref = DatabaseValueRef { - id, - inserts: &ins, - deletes: &removes, - }; + let ins = inserts.iter().map(|&(k, _)| k).collect::>(); + let value_ref = DatabaseValueRef { + id, + inserts: &ins, + deletes: &removes, + }; - trace!(target: "jdb.ops", " Deletes: {:?}", removes); - trace!(target: "jdb.ops", " Inserts: {:?}", ins); + trace!(target: "jdb.ops", " Deletes: {:?}", removes); + trace!(target: "jdb.ops", " Inserts: {:?}", ins); - batch.put(self.column, &last, &encode(&value_ref)); - if self.latest_era.map_or(true, |e| now > e) { - batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)); - self.latest_era = Some(now); - } + batch.put(self.column, &last, &encode(&value_ref)); + if self.latest_era.map_or(true, |e| now > e) { + batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)); + self.latest_era = Some(now); + } - Ok((ins.len() + removes.len()) as u32) - } - } + Ok((ins.len() + removes.len()) as u32) + } + } - fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> io::Result { - let mut refs = self.refs.as_ref().unwrap().write(); + fn mark_canonical( + &mut self, + batch: &mut DBTransaction, + end_era: u64, + canon_id: &H256, + ) -> io::Result { + let mut refs = self.refs.as_ref().unwrap().write(); - // apply old commits' details - let mut db_key = DatabaseKey { - era: end_era, - index: 0usize, - }; - let mut last; + // apply old commits' details + let mut db_key = DatabaseKey { + era: end_era, + index: 0usize, + }; + let mut last; - while let Some(rlp_data) = { - last = encode(&db_key); - self.backing.get(self.column, &last) - }? { - let view = DatabaseValueView::from_rlp(&rlp_data); - let inserts = view.inserts().expect("rlp read from db; qed"); + while let Some(rlp_data) = { + last = encode(&db_key); + self.backing.get(self.column, &last) + }? { + let view = DatabaseValueView::from_rlp(&rlp_data); + let inserts = view.inserts().expect("rlp read from db; qed"); - if canon_id == &view.id().expect("rlp read from db; qed") { - // Collect keys to be removed. Canon block - remove the (enacted) deletes. - let deletes = view.deletes().expect("rlp read from db; qed"); - trace!(target: "jdb.ops", " Expunging: {:?}", deletes); - Self::remove_keys(&deletes, &mut refs, batch, self.column, RemoveFrom::Archive); + if canon_id == &view.id().expect("rlp read from db; qed") { + // Collect keys to be removed. Canon block - remove the (enacted) deletes. + let deletes = view.deletes().expect("rlp read from db; qed"); + trace!(target: "jdb.ops", " Expunging: {:?}", deletes); + Self::remove_keys(&deletes, &mut refs, batch, self.column, RemoveFrom::Archive); - trace!(target: "jdb.ops", " Finalising: {:?}", inserts); - for k in &inserts { - match refs.get(k).cloned() { - None => { - // [in archive] -> SHIFT remove -> SHIFT insert None->Some{queue_refs: 1, in_archive: true} -> TAKE remove Some{queue_refs: 1, in_archive: true}->None -> TAKE insert - // already expunged from the queue (which is allowed since the key is in the archive). - // leave well alone. - } - Some( RefInfo{queue_refs: 1, in_archive: false} ) => { - // just delete the refs entry. - refs.remove(k); - } - Some( RefInfo{queue_refs: x, in_archive: false} ) => { - // must set already in; , - Self::set_already_in(batch, self.column, k); - refs.insert(k.clone(), RefInfo{ queue_refs: x - 1, in_archive: true }); - } - Some( RefInfo{in_archive: true, ..} ) => { - // Invalid! Reinserted the same key twice. - warn!("Key {} inserted twice into same fork.", k); - } - } - } - } else { - // Collect keys to be removed. Non-canon block - remove the (reverted) inserts. - trace!(target: "jdb.ops", " Reverting: {:?}", inserts); - Self::remove_keys(&inserts, &mut refs, batch, self.column, RemoveFrom::Queue); - } + trace!(target: "jdb.ops", " Finalising: {:?}", inserts); + for k in &inserts { + match refs.get(k).cloned() { + None => { + // [in archive] -> SHIFT remove -> SHIFT insert None->Some{queue_refs: 1, in_archive: true} -> TAKE remove Some{queue_refs: 1, in_archive: true}->None -> TAKE insert + // already expunged from the queue (which is allowed since the key is in the archive). + // leave well alone. + } + Some(RefInfo { + queue_refs: 1, + in_archive: false, + }) => { + // just delete the refs entry. + refs.remove(k); + } + Some(RefInfo { + queue_refs: x, + in_archive: false, + }) => { + // must set already in; , + Self::set_already_in(batch, self.column, k); + refs.insert( + k.clone(), + RefInfo { + queue_refs: x - 1, + in_archive: true, + }, + ); + } + Some(RefInfo { + in_archive: true, .. + }) => { + // Invalid! Reinserted the same key twice. + warn!("Key {} inserted twice into same fork.", k); + } + } + } + } else { + // Collect keys to be removed. Non-canon block - remove the (reverted) inserts. + trace!(target: "jdb.ops", " Reverting: {:?}", inserts); + Self::remove_keys(&inserts, &mut refs, batch, self.column, RemoveFrom::Queue); + } - batch.delete(self.column, &last); - db_key.index += 1; - } + batch.delete(self.column, &last); + db_key.index += 1; + } - trace!(target: "jdb", "EarlyMergeDB: delete journal for time #{}.{}, (canon was {})", end_era, db_key.index, canon_id); - trace!(target: "jdb", "OK: {:?}", &*refs); + trace!(target: "jdb", "EarlyMergeDB: delete journal for time #{}.{}, (canon was {})", end_era, db_key.index, canon_id); + trace!(target: "jdb", "OK: {:?}", &*refs); - Ok(0) - } + Ok(0) + } - fn inject(&mut self, batch: &mut DBTransaction) -> io::Result { - let mut ops = 0; - for (key, (value, rc)) in self.overlay.drain() { - if rc != 0 { ops += 1 } + fn inject(&mut self, batch: &mut DBTransaction) -> io::Result { + let mut ops = 0; + for (key, (value, rc)) in self.overlay.drain() { + if rc != 0 { + ops += 1 + } - match rc { - 0 => {} - 1 => { - if self.backing.get(self.column, &key)?.is_some() { - return Err(error_key_already_exists(&key)); - } - batch.put(self.column, &key, &value) - } - -1 => { - if self.backing.get(self.column, &key)?.is_none() { - return Err(error_negatively_reference_hash(&key)); - } - batch.delete(self.column, &key) - } - _ => panic!("Attempted to inject invalid state."), - } - } + match rc { + 0 => {} + 1 => { + if self.backing.get(self.column, &key)?.is_some() { + return Err(error_key_already_exists(&key)); + } + batch.put(self.column, &key, &value) + } + -1 => { + if self.backing.get(self.column, &key)?.is_none() { + return Err(error_negatively_reference_hash(&key)); + } + batch.delete(self.column, &key) + } + _ => panic!("Attempted to inject invalid state."), + } + } - Ok(ops) - } + Ok(ops) + } - fn consolidate(&mut self, with: MemoryDB) { - self.overlay.consolidate(with); - } + fn consolidate(&mut self, with: MemoryDB) { + self.overlay.consolidate(with); + } } #[cfg(test)] mod tests { - use keccak::keccak; - use hash_db::HashDB; - use super::*; - use super::super::traits::JournalDB; - use kvdb_memorydb; - - #[test] - fn insert_same_in_fork() { - // history is 1 - let mut jdb = new_db(); - - let x = jdb.insert(b"X"); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(3, &keccak(b"1002a"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(4, &keccak(b"1003a"), Some((2, keccak(b"2")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.remove(&x); - jdb.commit_batch(3, &keccak(b"1002b"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - let x = jdb.insert(b"X"); - jdb.commit_batch(4, &keccak(b"1003b"), Some((2, keccak(b"2")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.commit_batch(5, &keccak(b"1004a"), Some((3, keccak(b"1002a")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(6, &keccak(b"1005a"), Some((4, keccak(b"1003a")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - assert!(jdb.contains(&x)); - } - - #[test] - fn insert_older_era() { - let mut jdb = new_db(); - let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0a"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - let bar = jdb.insert(b"bar"); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0a")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.remove(&bar); - jdb.commit_batch(0, &keccak(b"0b"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); - - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); - } - - #[test] - fn long_history() { - // history is 3 - let mut jdb = new_db(); - let h = jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&h)); - jdb.remove(&h); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&h)); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&h)); - jdb.commit_batch(3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&h)); - jdb.commit_batch(4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(!jdb.contains(&h)); - } - - #[test] - fn complex() { - // history is 1 - let mut jdb = new_db(); - - let foo = jdb.insert(b"foo"); - let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); - - jdb.remove(&foo); - jdb.remove(&bar); - let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); - assert!(jdb.contains(&baz)); - - let foo = jdb.insert(b"foo"); - jdb.remove(&baz); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - assert!(!jdb.contains(&bar)); - assert!(jdb.contains(&baz)); - - jdb.remove(&foo); - jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - assert!(!jdb.contains(&bar)); - assert!(!jdb.contains(&baz)); - - jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(!jdb.contains(&foo)); - assert!(!jdb.contains(&bar)); - assert!(!jdb.contains(&baz)); - } - - #[test] - fn fork() { - // history is 1 - let mut jdb = new_db(); - - let foo = jdb.insert(b"foo"); - let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); - - jdb.remove(&foo); - let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.remove(&bar); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); - assert!(jdb.contains(&baz)); - - jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - assert!(!jdb.contains(&baz)); - assert!(!jdb.contains(&bar)); - } - - #[test] - fn overwrite() { - // history is 1 - let mut jdb = new_db(); - - let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - - jdb.remove(&foo); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.insert(b"foo"); - assert!(jdb.contains(&foo)); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - jdb.commit_batch(3, &keccak(b"2"), Some((0, keccak(b"2")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - } - - #[test] - fn fork_same_key_one() { - - let mut jdb = new_db(); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - let foo = jdb.insert(b"foo"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(1, &keccak(b"1c"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - assert!(jdb.contains(&foo)); - - jdb.commit_batch(2, &keccak(b"2a"), Some((1, keccak(b"1a")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - } - - #[test] - fn fork_same_key_other() { - let mut jdb = new_db(); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - let foo = jdb.insert(b"foo"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(1, &keccak(b"1c"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - assert!(jdb.contains(&foo)); - - jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - } - - #[test] - fn fork_ins_del_ins() { - let mut jdb = new_db(); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - let foo = jdb.insert(b"foo"); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.remove(&foo); - jdb.commit_batch(2, &keccak(b"2a"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.remove(&foo); - jdb.commit_batch(2, &keccak(b"2b"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(3, &keccak(b"3a"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(3, &keccak(b"3b"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.commit_batch(4, &keccak(b"4a"), Some((2, keccak(b"2a")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.commit_batch(5, &keccak(b"5a"), Some((3, keccak(b"3a")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - } - - fn new_db() -> EarlyMergeDB { - let backing = Arc::new(kvdb_memorydb::create(0)); - EarlyMergeDB::new(backing, None) - } - - #[test] - fn reopen() { - let shared_db = Arc::new(kvdb_memorydb::create(0)); - let bar = H256::random(); - - let foo = { - let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); - // history is 1 - let foo = jdb.insert(b"foo"); - jdb.emplace(bar.clone(), DBValue::from_slice(b"bar")); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - foo - }; - - { - let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); - jdb.remove(&foo); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - } - - { - let mut jdb = EarlyMergeDB::new(shared_db, None); - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(!jdb.contains(&foo)); - } - } - - #[test] - fn insert_delete_insert_delete_insert_expunge() { - let _ = ::env_logger::try_init(); - - let mut jdb = new_db(); - - // history is 4 - let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.remove(&foo); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.insert(b"foo"); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.remove(&foo); - jdb.commit_batch(3, &keccak(b"3"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.insert(b"foo"); - jdb.commit_batch(4, &keccak(b"4"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - // expunge foo - jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - } - - #[test] - fn forked_insert_delete_insert_delete_insert_expunge() { - let _ = ::env_logger::try_init(); - let mut jdb = new_db(); - - // history is 4 - let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.remove(&foo); - jdb.commit_batch(1, &keccak(b"1a"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.remove(&foo); - jdb.commit_batch(1, &keccak(b"1b"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(2, &keccak(b"2a"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(2, &keccak(b"2b"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.remove(&foo); - jdb.commit_batch(3, &keccak(b"3a"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.remove(&foo); - jdb.commit_batch(3, &keccak(b"3b"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(4, &keccak(b"4a"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(4, &keccak(b"4b"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - // expunge foo - jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1a")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - } - - #[test] - fn broken_assert() { - let mut jdb = new_db(); - - // history is 1 - let foo = jdb.insert(b"foo"); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - // foo is ancient history. - - jdb.remove(&foo); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); // BROKEN - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - - jdb.remove(&foo); - jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.commit_batch(5, &keccak(b"5"), Some((4, keccak(b"4")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(!jdb.contains(&foo)); - } - - #[test] - fn reopen_test() { - let mut jdb = new_db(); - - // history is 4 - let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(3, &keccak(b"3"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(4, &keccak(b"4"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - // foo is ancient history. - - jdb.insert(b"foo"); - let bar = jdb.insert(b"bar"); - jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.remove(&foo); - jdb.remove(&bar); - jdb.commit_batch(6, &keccak(b"6"), Some((2, keccak(b"2")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.insert(b"foo"); - jdb.insert(b"bar"); - jdb.commit_batch(7, &keccak(b"7"), Some((3, keccak(b"3")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - } - - #[test] - fn reopen_remove_three() { - let _ = ::env_logger::try_init(); - - let shared_db = Arc::new(kvdb_memorydb::create(0)); - let foo = keccak(b"foo"); - - { - let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); - // history is 1 - jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - // foo is ancient history. - - jdb.remove(&foo); - jdb.commit_batch(2, &keccak(b"2"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - - jdb.insert(b"foo"); - jdb.commit_batch(3, &keccak(b"3"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - - // incantation to reopen the db - }; { - let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); - - jdb.remove(&foo); - jdb.commit_batch(4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - - // incantation to reopen the db - }; { - let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); - - jdb.commit_batch(5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - - // incantation to reopen the db - }; { - let mut jdb = EarlyMergeDB::new(shared_db, None); - - jdb.commit_batch(6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(!jdb.contains(&foo)); - } - } - - #[test] - fn reopen_fork() { - let shared_db = Arc::new(kvdb_memorydb::create(0)); - - let (foo, bar, baz) = { - let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); - // history is 1 - let foo = jdb.insert(b"foo"); - let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.remove(&foo); - let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.remove(&bar); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - (foo, bar, baz) - }; - - { - let mut jdb = EarlyMergeDB::new(shared_db, None); - jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - assert!(!jdb.contains(&baz)); - assert!(!jdb.contains(&bar)); - } - } - - #[test] - fn inject() { - let mut jdb = new_db(); - let key = jdb.insert(b"dog"); - jdb.inject_batch().unwrap(); - - assert_eq!(jdb.get(&key).unwrap(), DBValue::from_slice(b"dog")); - jdb.remove(&key); - jdb.inject_batch().unwrap(); - - assert!(jdb.get(&key).is_none()); - } + use super::{super::traits::JournalDB, *}; + use hash_db::HashDB; + use keccak::keccak; + use kvdb_memorydb; + + #[test] + fn insert_same_in_fork() { + // history is 1 + let mut jdb = new_db(); + + let x = jdb.insert(b"X"); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit_batch(3, &keccak(b"1002a"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit_batch(4, &keccak(b"1003a"), Some((2, keccak(b"2")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&x); + jdb.commit_batch(3, &keccak(b"1002b"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + let x = jdb.insert(b"X"); + jdb.commit_batch(4, &keccak(b"1003b"), Some((2, keccak(b"2")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit_batch(5, &keccak(b"1004a"), Some((3, keccak(b"1002a")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit_batch(6, &keccak(b"1005a"), Some((4, keccak(b"1003a")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.contains(&x)); + } + + #[test] + fn insert_older_era() { + let mut jdb = new_db(); + let foo = jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0a"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let bar = jdb.insert(b"bar"); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0a")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&bar); + jdb.commit_batch(0, &keccak(b"0b"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))) + .unwrap(); + + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); + } + + #[test] + fn long_history() { + // history is 3 + let mut jdb = new_db(); + let h = jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&h)); + jdb.remove(&h); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&h)); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&h)); + jdb.commit_batch(3, &keccak(b"3"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&h)); + jdb.commit_batch(4, &keccak(b"4"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.contains(&h)); + } + + #[test] + fn complex() { + // history is 1 + let mut jdb = new_db(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); + + jdb.remove(&foo); + jdb.remove(&bar); + let baz = jdb.insert(b"baz"); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); + assert!(jdb.contains(&baz)); + + let foo = jdb.insert(b"foo"); + jdb.remove(&baz); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + assert!(!jdb.contains(&bar)); + assert!(jdb.contains(&baz)); + + jdb.remove(&foo); + jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + assert!(!jdb.contains(&bar)); + assert!(!jdb.contains(&baz)); + + jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.contains(&foo)); + assert!(!jdb.contains(&bar)); + assert!(!jdb.contains(&baz)); + } + + #[test] + fn fork() { + // history is 1 + let mut jdb = new_db(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); + + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&bar); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); + assert!(jdb.contains(&baz)); + + jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + assert!(!jdb.contains(&baz)); + assert!(!jdb.contains(&bar)); + } + + #[test] + fn overwrite() { + // history is 1 + let mut jdb = new_db(); + + let foo = jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + + jdb.remove(&foo); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + assert!(jdb.contains(&foo)); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + jdb.commit_batch(3, &keccak(b"2"), Some((0, keccak(b"2")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + } + + #[test] + fn fork_same_key_one() { + let mut jdb = new_db(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(1, &keccak(b"1c"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.contains(&foo)); + + jdb.commit_batch(2, &keccak(b"2a"), Some((1, keccak(b"1a")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + } + + #[test] + fn fork_same_key_other() { + let mut jdb = new_db(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(1, &keccak(b"1c"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.contains(&foo)); + + jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + } + + #[test] + fn fork_ins_del_ins() { + let mut jdb = new_db(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit_batch(2, &keccak(b"2a"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit_batch(2, &keccak(b"2b"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(3, &keccak(b"3a"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(3, &keccak(b"3b"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit_batch(4, &keccak(b"4a"), Some((2, keccak(b"2a")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit_batch(5, &keccak(b"5a"), Some((3, keccak(b"3a")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + fn new_db() -> EarlyMergeDB { + let backing = Arc::new(kvdb_memorydb::create(0)); + EarlyMergeDB::new(backing, None) + } + + #[test] + fn reopen() { + let shared_db = Arc::new(kvdb_memorydb::create(0)); + let bar = H256::random(); + + let foo = { + let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.emplace(bar.clone(), DBValue::from_slice(b"bar")); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + foo + }; + + { + let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); + jdb.remove(&foo); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + { + let mut jdb = EarlyMergeDB::new(shared_db, None); + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.contains(&foo)); + } + } + + #[test] + fn insert_delete_insert_delete_insert_expunge() { + let _ = ::env_logger::try_init(); + + let mut jdb = new_db(); + + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit_batch(3, &keccak(b"3"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit_batch(4, &keccak(b"4"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + // expunge foo + jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn forked_insert_delete_insert_delete_insert_expunge() { + let _ = ::env_logger::try_init(); + let mut jdb = new_db(); + + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit_batch(1, &keccak(b"1a"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit_batch(1, &keccak(b"1b"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(2, &keccak(b"2a"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(2, &keccak(b"2b"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit_batch(3, &keccak(b"3a"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit_batch(3, &keccak(b"3b"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(4, &keccak(b"4a"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(4, &keccak(b"4b"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // expunge foo + jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1a")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn broken_assert() { + let mut jdb = new_db(); + + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.remove(&foo); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))) + .unwrap(); // BROKEN + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + + jdb.remove(&foo); + jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit_batch(5, &keccak(b"5"), Some((4, keccak(b"4")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.contains(&foo)); + } + + #[test] + fn reopen_test() { + let mut jdb = new_db(); + + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit_batch(3, &keccak(b"3"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit_batch(4, &keccak(b"4"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.remove(&bar); + jdb.commit_batch(6, &keccak(b"6"), Some((2, keccak(b"2")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.insert(b"bar"); + jdb.commit_batch(7, &keccak(b"7"), Some((3, keccak(b"3")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn reopen_remove_three() { + let _ = ::env_logger::try_init(); + + let shared_db = Arc::new(kvdb_memorydb::create(0)); + let foo = keccak(b"foo"); + + { + let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); + // history is 1 + jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.remove(&foo); + jdb.commit_batch(2, &keccak(b"2"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + + jdb.insert(b"foo"); + jdb.commit_batch(3, &keccak(b"3"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + + // incantation to reopen the db + }; + { + let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); + + jdb.remove(&foo); + jdb.commit_batch(4, &keccak(b"4"), Some((2, keccak(b"2")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + + // incantation to reopen the db + }; + { + let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); + + jdb.commit_batch(5, &keccak(b"5"), Some((3, keccak(b"3")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + + // incantation to reopen the db + }; + { + let mut jdb = EarlyMergeDB::new(shared_db, None); + + jdb.commit_batch(6, &keccak(b"6"), Some((4, keccak(b"4")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.contains(&foo)); + } + } + + #[test] + fn reopen_fork() { + let shared_db = Arc::new(kvdb_memorydb::create(0)); + + let (foo, bar, baz) = { + let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); + // history is 1 + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&bar); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + (foo, bar, baz) + }; + + { + let mut jdb = EarlyMergeDB::new(shared_db, None); + jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + assert!(!jdb.contains(&baz)); + assert!(!jdb.contains(&bar)); + } + } + + #[test] + fn inject() { + let mut jdb = new_db(); + let key = jdb.insert(b"dog"); + jdb.inject_batch().unwrap(); + + assert_eq!(jdb.get(&key).unwrap(), DBValue::from_slice(b"dog")); + jdb.remove(&key); + jdb.inject_batch().unwrap(); + + assert!(jdb.get(&key).is_none()); + } } diff --git a/util/journaldb/src/lib.rs b/util/journaldb/src/lib.rs index 3eb6ee38c..fc8d2b82a 100644 --- a/util/journaldb/src/lib.rs +++ b/util/journaldb/src/lib.rs @@ -21,13 +21,13 @@ extern crate heapsize; extern crate log; extern crate ethereum_types; -extern crate parity_bytes as bytes; +extern crate fastmap; extern crate hash_db; extern crate keccak_hasher; extern crate kvdb; extern crate memory_db; +extern crate parity_bytes as bytes; extern crate parking_lot; -extern crate fastmap; extern crate rlp; #[cfg(test)] @@ -37,184 +37,198 @@ extern crate keccak_hash as keccak; #[cfg(test)] extern crate kvdb_memorydb; -use std::{fmt, str, io}; -use std::sync::Arc; +use std::{fmt, io, str, sync::Arc}; -/// Export the journaldb module. -mod traits; mod archivedb; +mod as_hash_db_impls; mod earlymergedb; mod overlayrecentdb; mod refcounteddb; +/// Export the journaldb module. +mod traits; mod util; -mod as_hash_db_impls; pub mod overlaydb; /// Export the `JournalDB` trait. pub use self::traits::JournalDB; -/// Export keyed hash trait -pub use self::traits::KeyedHashDB; /// Export as keyed hash trait pub use self::traits::AsKeyedHashDB; +/// Export keyed hash trait +pub use self::traits::KeyedHashDB; /// Journal database operating strategy. #[derive(Debug, PartialEq, Clone, Copy)] pub enum Algorithm { - /// Keep all keys forever. - Archive, + /// Keep all keys forever. + Archive, - /// Ancient and recent history maintained separately; recent history lasts for particular - /// number of blocks. - /// - /// Inserts go into backing database, journal retains knowledge of whether backing DB key is - /// ancient or recent. Non-canon inserts get explicitly reverted and removed from backing DB. - EarlyMerge, + /// Ancient and recent history maintained separately; recent history lasts for particular + /// number of blocks. + /// + /// Inserts go into backing database, journal retains knowledge of whether backing DB key is + /// ancient or recent. Non-canon inserts get explicitly reverted and removed from backing DB. + EarlyMerge, - /// Ancient and recent history maintained separately; recent history lasts for particular - /// number of blocks. - /// - /// Inserts go into memory overlay, which is tried for key fetches. Memory overlay gets - /// flushed in backing only at end of recent history. - OverlayRecent, + /// Ancient and recent history maintained separately; recent history lasts for particular + /// number of blocks. + /// + /// Inserts go into memory overlay, which is tried for key fetches. Memory overlay gets + /// flushed in backing only at end of recent history. + OverlayRecent, - /// Ancient and recent history maintained separately; recent history lasts for particular - /// number of blocks. - /// - /// References are counted in disk-backed DB. - RefCounted, + /// Ancient and recent history maintained separately; recent history lasts for particular + /// number of blocks. + /// + /// References are counted in disk-backed DB. + RefCounted, } impl str::FromStr for Algorithm { - type Err = String; + type Err = String; - fn from_str(s: &str) -> Result { - match s { - "archive" => Ok(Algorithm::Archive), - "light" => Ok(Algorithm::EarlyMerge), - "fast" => Ok(Algorithm::OverlayRecent), - "basic" => Ok(Algorithm::RefCounted), - e => Err(format!("Invalid algorithm: {}", e)), - } - } + fn from_str(s: &str) -> Result { + match s { + "archive" => Ok(Algorithm::Archive), + "light" => Ok(Algorithm::EarlyMerge), + "fast" => Ok(Algorithm::OverlayRecent), + "basic" => Ok(Algorithm::RefCounted), + e => Err(format!("Invalid algorithm: {}", e)), + } + } } impl Algorithm { - /// Returns static str describing journal database algorithm. - pub fn as_str(&self) -> &'static str { - match *self { - Algorithm::Archive => "archive", - Algorithm::EarlyMerge => "light", - Algorithm::OverlayRecent => "fast", - Algorithm::RefCounted => "basic", - } - } + /// Returns static str describing journal database algorithm. + pub fn as_str(&self) -> &'static str { + match *self { + Algorithm::Archive => "archive", + Algorithm::EarlyMerge => "light", + Algorithm::OverlayRecent => "fast", + Algorithm::RefCounted => "basic", + } + } - /// Returns static str describing journal database algorithm. - pub fn as_internal_name_str(&self) -> &'static str { - match *self { - Algorithm::Archive => "archive", - Algorithm::EarlyMerge => "earlymerge", - Algorithm::OverlayRecent => "overlayrecent", - Algorithm::RefCounted => "refcounted", - } - } + /// Returns static str describing journal database algorithm. + pub fn as_internal_name_str(&self) -> &'static str { + match *self { + Algorithm::Archive => "archive", + Algorithm::EarlyMerge => "earlymerge", + Algorithm::OverlayRecent => "overlayrecent", + Algorithm::RefCounted => "refcounted", + } + } - /// Returns true if pruning strategy is stable - pub fn is_stable(&self) -> bool { - match *self { - Algorithm::Archive | Algorithm::OverlayRecent => true, - _ => false, - } - } + /// Returns true if pruning strategy is stable + pub fn is_stable(&self) -> bool { + match *self { + Algorithm::Archive | Algorithm::OverlayRecent => true, + _ => false, + } + } - /// Returns all algorithm types. - pub fn all_types() -> Vec { - vec![Algorithm::Archive, Algorithm::EarlyMerge, Algorithm::OverlayRecent, Algorithm::RefCounted] - } + /// Returns all algorithm types. + pub fn all_types() -> Vec { + vec![ + Algorithm::Archive, + Algorithm::EarlyMerge, + Algorithm::OverlayRecent, + Algorithm::RefCounted, + ] + } } impl fmt::Display for Algorithm { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.as_str()) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.as_str()) + } } /// Create a new `JournalDB` trait object over a generic key-value database. -pub fn new(backing: Arc<::kvdb::KeyValueDB>, algorithm: Algorithm, col: Option) -> Box { - match algorithm { - Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(backing, col)), - Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(backing, col)), - Algorithm::OverlayRecent => Box::new(overlayrecentdb::OverlayRecentDB::new(backing, col)), - Algorithm::RefCounted => Box::new(refcounteddb::RefCountedDB::new(backing, col)), - } +pub fn new( + backing: Arc<::kvdb::KeyValueDB>, + algorithm: Algorithm, + col: Option, +) -> Box { + match algorithm { + Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(backing, col)), + Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(backing, col)), + Algorithm::OverlayRecent => Box::new(overlayrecentdb::OverlayRecentDB::new(backing, col)), + Algorithm::RefCounted => Box::new(refcounteddb::RefCountedDB::new(backing, col)), + } } // all keys must be at least 12 bytes -const DB_PREFIX_LEN : usize = ::kvdb::PREFIX_LEN; -const LATEST_ERA_KEY : [u8; ::kvdb::PREFIX_LEN] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const DB_PREFIX_LEN: usize = ::kvdb::PREFIX_LEN; +const LATEST_ERA_KEY: [u8; ::kvdb::PREFIX_LEN] = [b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0]; fn error_key_already_exists(hash: ðereum_types::H256) -> io::Error { - io::Error::new(io::ErrorKind::AlreadyExists, hash.to_string()) + io::Error::new(io::ErrorKind::AlreadyExists, hash.to_string()) } fn error_negatively_reference_hash(hash: ðereum_types::H256) -> io::Error { - io::Error::new(io::ErrorKind::Other, format!("Entry {} removed from database more times than it was added.", hash)) + io::Error::new( + io::ErrorKind::Other, + format!( + "Entry {} removed from database more times than it was added.", + hash + ), + ) } pub fn new_memory_db() -> memory_db::MemoryDB { - memory_db::MemoryDB::from_null_node(&rlp::NULL_RLP, rlp::NULL_RLP.as_ref().into()) + memory_db::MemoryDB::from_null_node(&rlp::NULL_RLP, rlp::NULL_RLP.as_ref().into()) } #[cfg(test)] mod tests { - use super::Algorithm; + use super::Algorithm; - #[test] - fn test_journal_algorithm_parsing() { - assert_eq!(Algorithm::Archive, "archive".parse().unwrap()); - assert_eq!(Algorithm::EarlyMerge, "light".parse().unwrap()); - assert_eq!(Algorithm::OverlayRecent, "fast".parse().unwrap()); - assert_eq!(Algorithm::RefCounted, "basic".parse().unwrap()); - } + #[test] + fn test_journal_algorithm_parsing() { + assert_eq!(Algorithm::Archive, "archive".parse().unwrap()); + assert_eq!(Algorithm::EarlyMerge, "light".parse().unwrap()); + assert_eq!(Algorithm::OverlayRecent, "fast".parse().unwrap()); + assert_eq!(Algorithm::RefCounted, "basic".parse().unwrap()); + } - #[test] - fn test_journal_algorithm_printing() { - assert_eq!(Algorithm::Archive.to_string(), "archive".to_owned()); - assert_eq!(Algorithm::EarlyMerge.to_string(), "light".to_owned()); - assert_eq!(Algorithm::OverlayRecent.to_string(), "fast".to_owned()); - assert_eq!(Algorithm::RefCounted.to_string(), "basic".to_owned()); - } + #[test] + fn test_journal_algorithm_printing() { + assert_eq!(Algorithm::Archive.to_string(), "archive".to_owned()); + assert_eq!(Algorithm::EarlyMerge.to_string(), "light".to_owned()); + assert_eq!(Algorithm::OverlayRecent.to_string(), "fast".to_owned()); + assert_eq!(Algorithm::RefCounted.to_string(), "basic".to_owned()); + } - #[test] - fn test_journal_algorithm_is_stable() { - assert!(Algorithm::Archive.is_stable()); - assert!(Algorithm::OverlayRecent.is_stable()); - assert!(!Algorithm::EarlyMerge.is_stable()); - assert!(!Algorithm::RefCounted.is_stable()); - } + #[test] + fn test_journal_algorithm_is_stable() { + assert!(Algorithm::Archive.is_stable()); + assert!(Algorithm::OverlayRecent.is_stable()); + assert!(!Algorithm::EarlyMerge.is_stable()); + assert!(!Algorithm::RefCounted.is_stable()); + } - #[test] - fn test_journal_algorithm_all_types() { - // compiling should fail if some cases are not covered - let mut archive = 0; - let mut earlymerge = 0; - let mut overlayrecent = 0; - let mut refcounted = 0; + #[test] + fn test_journal_algorithm_all_types() { + // compiling should fail if some cases are not covered + let mut archive = 0; + let mut earlymerge = 0; + let mut overlayrecent = 0; + let mut refcounted = 0; - for a in &Algorithm::all_types() { - match *a { - Algorithm::Archive => archive += 1, - Algorithm::EarlyMerge => earlymerge += 1, - Algorithm::OverlayRecent => overlayrecent += 1, - Algorithm::RefCounted => refcounted += 1, - } - } + for a in &Algorithm::all_types() { + match *a { + Algorithm::Archive => archive += 1, + Algorithm::EarlyMerge => earlymerge += 1, + Algorithm::OverlayRecent => overlayrecent += 1, + Algorithm::RefCounted => refcounted += 1, + } + } - assert_eq!(archive, 1); - assert_eq!(earlymerge, 1); - assert_eq!(overlayrecent, 1); - assert_eq!(refcounted, 1); - } + assert_eq!(archive, 1); + assert_eq!(earlymerge, 1); + assert_eq!(overlayrecent, 1); + assert_eq!(refcounted, 1); + } } diff --git a/util/journaldb/src/overlaydb.rs b/util/journaldb/src/overlaydb.rs index 757a92e62..00551cb09 100644 --- a/util/journaldb/src/overlaydb.rs +++ b/util/journaldb/src/overlaydb.rs @@ -16,18 +16,19 @@ //! Disk-backed `HashDB` implementation. -use std::collections::HashMap; -use std::collections::hash_map::Entry; -use std::io; -use std::sync::Arc; +use std::{ + collections::{hash_map::Entry, HashMap}, + io, + sync::Arc, +}; +use super::error_negatively_reference_hash; use ethereum_types::H256; -use rlp::{Rlp, RlpStream, Encodable, DecoderError, Decodable, encode, decode}; -use hash_db::{HashDB}; +use hash_db::HashDB; use keccak_hasher::KeccakHasher; +use kvdb::{DBTransaction, DBValue, KeyValueDB}; use memory_db::*; -use kvdb::{KeyValueDB, DBTransaction, DBValue}; -use super::{error_negatively_reference_hash}; +use rlp::{decode, encode, Decodable, DecoderError, Encodable, Rlp, RlpStream}; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay. /// @@ -39,301 +40,321 @@ use super::{error_negatively_reference_hash}; /// queries have an immediate effect in terms of these functions. #[derive(Clone)] pub struct OverlayDB { - overlay: MemoryDB, - backing: Arc, - column: Option, + overlay: MemoryDB, + backing: Arc, + column: Option, } struct Payload { - count: u32, - value: DBValue, + count: u32, + value: DBValue, } impl Payload { - fn new(count: u32, value: DBValue) -> Self { - Payload { - count, - value, - } - } + fn new(count: u32, value: DBValue) -> Self { + Payload { count, value } + } } impl Encodable for Payload { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(2); - s.append(&self.count); - s.append(&&*self.value); - } + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2); + s.append(&self.count); + s.append(&&*self.value); + } } impl Decodable for Payload { - fn decode(rlp: &Rlp) -> Result { - let payload = Payload { - count: rlp.val_at(0)?, - value: DBValue::from_slice(rlp.at(1)?.data()?), - }; + fn decode(rlp: &Rlp) -> Result { + let payload = Payload { + count: rlp.val_at(0)?, + value: DBValue::from_slice(rlp.at(1)?.data()?), + }; - Ok(payload) - } + Ok(payload) + } } impl OverlayDB { - /// Create a new instance of OverlayDB given a `backing` database. - pub fn new(backing: Arc, col: Option) -> OverlayDB { - OverlayDB{ overlay: ::new_memory_db(), backing: backing, column: col } - } + /// Create a new instance of OverlayDB given a `backing` database. + pub fn new(backing: Arc, col: Option) -> OverlayDB { + OverlayDB { + overlay: ::new_memory_db(), + backing: backing, + column: col, + } + } - /// Create a new instance of OverlayDB with an anonymous temporary database. - #[cfg(test)] - pub fn new_temp() -> OverlayDB { - let backing = Arc::new(::kvdb_memorydb::create(0)); - Self::new(backing, None) - } + /// Create a new instance of OverlayDB with an anonymous temporary database. + #[cfg(test)] + pub fn new_temp() -> OverlayDB { + let backing = Arc::new(::kvdb_memorydb::create(0)); + Self::new(backing, None) + } - /// Commit all operations in a single batch. - #[cfg(test)] - pub fn commit(&mut self) -> io::Result { - let mut batch = self.backing.transaction(); - let res = self.commit_to_batch(&mut batch)?; - self.backing.write(batch).map(|_| res).map_err(|e| e.into()) - } + /// Commit all operations in a single batch. + #[cfg(test)] + pub fn commit(&mut self) -> io::Result { + let mut batch = self.backing.transaction(); + let res = self.commit_to_batch(&mut batch)?; + self.backing.write(batch).map(|_| res).map_err(|e| e.into()) + } - /// Commit all operations to given batch. - pub fn commit_to_batch(&mut self, batch: &mut DBTransaction) -> io::Result { - let mut ret = 0u32; - let mut deletes = 0usize; - for i in self.overlay.drain() { - let (key, (value, rc)) = i; - if rc != 0 { - match self.payload(&key) { - Some(x) => { - let total_rc: i32 = x.count as i32 + rc; - if total_rc < 0 { - return Err(error_negatively_reference_hash(&key)); - } - let payload = Payload::new(total_rc as u32, x.value); - deletes += if self.put_payload_in_batch(batch, &key, &payload) {1} else {0}; - } - None => { - if rc < 0 { - return Err(error_negatively_reference_hash(&key)); - } - let payload = Payload::new(rc as u32, value); - self.put_payload_in_batch(batch, &key, &payload); - } - }; - ret += 1; - } - } - trace!("OverlayDB::commit() deleted {} nodes", deletes); - Ok(ret) - } + /// Commit all operations to given batch. + pub fn commit_to_batch(&mut self, batch: &mut DBTransaction) -> io::Result { + let mut ret = 0u32; + let mut deletes = 0usize; + for i in self.overlay.drain() { + let (key, (value, rc)) = i; + if rc != 0 { + match self.payload(&key) { + Some(x) => { + let total_rc: i32 = x.count as i32 + rc; + if total_rc < 0 { + return Err(error_negatively_reference_hash(&key)); + } + let payload = Payload::new(total_rc as u32, x.value); + deletes += if self.put_payload_in_batch(batch, &key, &payload) { + 1 + } else { + 0 + }; + } + None => { + if rc < 0 { + return Err(error_negatively_reference_hash(&key)); + } + let payload = Payload::new(rc as u32, value); + self.put_payload_in_batch(batch, &key, &payload); + } + }; + ret += 1; + } + } + trace!("OverlayDB::commit() deleted {} nodes", deletes); + Ok(ret) + } - /// Revert all operations on this object (i.e. `insert()`s and `remove()`s) since the - /// last `commit()`. - pub fn revert(&mut self) { self.overlay.clear(); } + /// Revert all operations on this object (i.e. `insert()`s and `remove()`s) since the + /// last `commit()`. + pub fn revert(&mut self) { + self.overlay.clear(); + } - /// Get the number of references that would be committed. - pub fn commit_refs(&self, key: &H256) -> i32 { self.overlay.raw(key).map_or(0, |(_, refs)| refs) } + /// Get the number of references that would be committed. + pub fn commit_refs(&self, key: &H256) -> i32 { + self.overlay.raw(key).map_or(0, |(_, refs)| refs) + } - /// Get the refs and value of the given key. - fn payload(&self, key: &H256) -> Option { - self.backing.get(self.column, key) - .expect("Low-level database error. Some issue with your hard disk?") - .map(|ref d| decode(d).expect("decoding db value failed") ) - } - - /// Put the refs and value of the given key, possibly deleting it from the db. - fn put_payload_in_batch(&self, batch: &mut DBTransaction, key: &H256, payload: &Payload) -> bool { - if payload.count > 0 { - batch.put(self.column, key, &encode(payload)); - false - } else { - batch.delete(self.column, key); - true - } - } + /// Get the refs and value of the given key. + fn payload(&self, key: &H256) -> Option { + self.backing + .get(self.column, key) + .expect("Low-level database error. Some issue with your hard disk?") + .map(|ref d| decode(d).expect("decoding db value failed")) + } + /// Put the refs and value of the given key, possibly deleting it from the db. + fn put_payload_in_batch( + &self, + batch: &mut DBTransaction, + key: &H256, + payload: &Payload, + ) -> bool { + if payload.count > 0 { + batch.put(self.column, key, &encode(payload)); + false + } else { + batch.delete(self.column, key); + true + } + } } impl crate::KeyedHashDB for OverlayDB { - fn keys(&self) -> HashMap { - let mut ret: HashMap = self.backing.iter(self.column) - .map(|(key, _)| { - let h = H256::from_slice(&*key); - let r = self.payload(&h).unwrap().count; - (h, r as i32) - }) - .collect(); - - for (key, refs) in self.overlay.keys() { - match ret.entry(key) { - Entry::Occupied(mut entry) => { - *entry.get_mut() += refs; - }, - Entry::Vacant(entry) => { - entry.insert(refs); - } - } - } - ret - } + fn keys(&self) -> HashMap { + let mut ret: HashMap = self + .backing + .iter(self.column) + .map(|(key, _)| { + let h = H256::from_slice(&*key); + let r = self.payload(&h).unwrap().count; + (h, r as i32) + }) + .collect(); + for (key, refs) in self.overlay.keys() { + match ret.entry(key) { + Entry::Occupied(mut entry) => { + *entry.get_mut() += refs; + } + Entry::Vacant(entry) => { + entry.insert(refs); + } + } + } + ret + } } impl HashDB for OverlayDB { - fn get(&self, key: &H256) -> Option { - // return ok if positive; if negative, check backing - might be enough references there to make - // it positive again. - let k = self.overlay.raw(key); - let memrc = { - if let Some((d, rc)) = k { - if rc > 0 { return Some(d.clone()); } - rc - } else { - 0 - } - }; - match self.payload(key) { - Some(x) => { - if x.count as i32 + memrc > 0 { - Some(x.value) - } - else { - None - } - } - // Replace above match arm with this once https://github.com/rust-lang/rust/issues/15287 is done. - //Some((d, rc)) if rc + memrc > 0 => Some(d), - _ => None, - } - } + fn get(&self, key: &H256) -> Option { + // return ok if positive; if negative, check backing - might be enough references there to make + // it positive again. + let k = self.overlay.raw(key); + let memrc = { + if let Some((d, rc)) = k { + if rc > 0 { + return Some(d.clone()); + } + rc + } else { + 0 + } + }; + match self.payload(key) { + Some(x) => { + if x.count as i32 + memrc > 0 { + Some(x.value) + } else { + None + } + } + // Replace above match arm with this once https://github.com/rust-lang/rust/issues/15287 is done. + //Some((d, rc)) if rc + memrc > 0 => Some(d), + _ => None, + } + } - fn contains(&self, key: &H256) -> bool { - // return ok if positive; if negative, check backing - might be enough references there to make - // it positive again. - let k = self.overlay.raw(key); - match k { - Some((_, rc)) if rc > 0 => true, - _ => { - let memrc = k.map_or(0, |(_, rc)| rc); - match self.payload(key) { - Some(x) => { - x.count as i32 + memrc > 0 - } - // Replace above match arm with this once https://github.com/rust-lang/rust/issues/15287 is done. - //Some((d, rc)) if rc + memrc > 0 => true, - _ => false, - } - } - } - } + fn contains(&self, key: &H256) -> bool { + // return ok if positive; if negative, check backing - might be enough references there to make + // it positive again. + let k = self.overlay.raw(key); + match k { + Some((_, rc)) if rc > 0 => true, + _ => { + let memrc = k.map_or(0, |(_, rc)| rc); + match self.payload(key) { + Some(x) => x.count as i32 + memrc > 0, + // Replace above match arm with this once https://github.com/rust-lang/rust/issues/15287 is done. + //Some((d, rc)) if rc + memrc > 0 => true, + _ => false, + } + } + } + } - fn insert(&mut self, value: &[u8]) -> H256 { self.overlay.insert(value) } - fn emplace(&mut self, key: H256, value: DBValue) { self.overlay.emplace(key, value); } - fn remove(&mut self, key: &H256) { self.overlay.remove(key); } + fn insert(&mut self, value: &[u8]) -> H256 { + self.overlay.insert(value) + } + fn emplace(&mut self, key: H256, value: DBValue) { + self.overlay.emplace(key, value); + } + fn remove(&mut self, key: &H256) { + self.overlay.remove(key); + } } #[test] fn overlaydb_revert() { - let mut m = OverlayDB::new_temp(); - let foo = m.insert(b"foo"); // insert foo. - let mut batch = m.backing.transaction(); - m.commit_to_batch(&mut batch).unwrap(); // commit - new operations begin here... - m.backing.write(batch).unwrap(); - let bar = m.insert(b"bar"); // insert bar. - m.remove(&foo); // remove foo. - assert!(!m.contains(&foo)); // foo is gone. - assert!(m.contains(&bar)); // bar is here. - m.revert(); // revert the last two operations. - assert!(m.contains(&foo)); // foo is here. - assert!(!m.contains(&bar)); // bar is gone. + let mut m = OverlayDB::new_temp(); + let foo = m.insert(b"foo"); // insert foo. + let mut batch = m.backing.transaction(); + m.commit_to_batch(&mut batch).unwrap(); // commit - new operations begin here... + m.backing.write(batch).unwrap(); + let bar = m.insert(b"bar"); // insert bar. + m.remove(&foo); // remove foo. + assert!(!m.contains(&foo)); // foo is gone. + assert!(m.contains(&bar)); // bar is here. + m.revert(); // revert the last two operations. + assert!(m.contains(&foo)); // foo is here. + assert!(!m.contains(&bar)); // bar is gone. } #[test] fn overlaydb_overlay_insert_and_remove() { - let mut trie = OverlayDB::new_temp(); - let h = trie.insert(b"hello world"); - assert_eq!(trie.get(&h).unwrap(), DBValue::from_slice(b"hello world")); - trie.remove(&h); - assert_eq!(trie.get(&h), None); + let mut trie = OverlayDB::new_temp(); + let h = trie.insert(b"hello world"); + assert_eq!(trie.get(&h).unwrap(), DBValue::from_slice(b"hello world")); + trie.remove(&h); + assert_eq!(trie.get(&h), None); } #[test] fn overlaydb_backing_insert_revert() { - let mut trie = OverlayDB::new_temp(); - let h = trie.insert(b"hello world"); - assert_eq!(trie.get(&h).unwrap(), DBValue::from_slice(b"hello world")); - trie.commit().unwrap(); - assert_eq!(trie.get(&h).unwrap(), DBValue::from_slice(b"hello world")); - trie.revert(); - assert_eq!(trie.get(&h).unwrap(), DBValue::from_slice(b"hello world")); + let mut trie = OverlayDB::new_temp(); + let h = trie.insert(b"hello world"); + assert_eq!(trie.get(&h).unwrap(), DBValue::from_slice(b"hello world")); + trie.commit().unwrap(); + assert_eq!(trie.get(&h).unwrap(), DBValue::from_slice(b"hello world")); + trie.revert(); + assert_eq!(trie.get(&h).unwrap(), DBValue::from_slice(b"hello world")); } #[test] fn overlaydb_backing_remove() { - let mut trie = OverlayDB::new_temp(); - let h = trie.insert(b"hello world"); - trie.commit().unwrap(); - trie.remove(&h); - assert_eq!(trie.get(&h), None); - trie.commit().unwrap(); - assert_eq!(trie.get(&h), None); - trie.revert(); - assert_eq!(trie.get(&h), None); + let mut trie = OverlayDB::new_temp(); + let h = trie.insert(b"hello world"); + trie.commit().unwrap(); + trie.remove(&h); + assert_eq!(trie.get(&h), None); + trie.commit().unwrap(); + assert_eq!(trie.get(&h), None); + trie.revert(); + assert_eq!(trie.get(&h), None); } #[test] fn overlaydb_backing_remove_revert() { - let mut trie = OverlayDB::new_temp(); - let h = trie.insert(b"hello world"); - trie.commit().unwrap(); - trie.remove(&h); - assert_eq!(trie.get(&h), None); - trie.revert(); - assert_eq!(trie.get(&h).unwrap(), DBValue::from_slice(b"hello world")); + let mut trie = OverlayDB::new_temp(); + let h = trie.insert(b"hello world"); + trie.commit().unwrap(); + trie.remove(&h); + assert_eq!(trie.get(&h), None); + trie.revert(); + assert_eq!(trie.get(&h).unwrap(), DBValue::from_slice(b"hello world")); } #[test] fn overlaydb_negative() { - let mut trie = OverlayDB::new_temp(); - let h = trie.insert(b"hello world"); - trie.commit().unwrap(); - trie.remove(&h); - trie.remove(&h); //bad - sends us into negative refs. - assert_eq!(trie.get(&h), None); - assert!(trie.commit().is_err()); + let mut trie = OverlayDB::new_temp(); + let h = trie.insert(b"hello world"); + trie.commit().unwrap(); + trie.remove(&h); + trie.remove(&h); //bad - sends us into negative refs. + assert_eq!(trie.get(&h), None); + assert!(trie.commit().is_err()); } #[test] fn overlaydb_complex() { - let mut trie = OverlayDB::new_temp(); - let hfoo = trie.insert(b"foo"); - assert_eq!(trie.get(&hfoo).unwrap(), DBValue::from_slice(b"foo")); - let hbar = trie.insert(b"bar"); - assert_eq!(trie.get(&hbar).unwrap(), DBValue::from_slice(b"bar")); - trie.commit().unwrap(); - assert_eq!(trie.get(&hfoo).unwrap(), DBValue::from_slice(b"foo")); - assert_eq!(trie.get(&hbar).unwrap(), DBValue::from_slice(b"bar")); - trie.insert(b"foo"); // two refs - assert_eq!(trie.get(&hfoo).unwrap(), DBValue::from_slice(b"foo")); - trie.commit().unwrap(); - assert_eq!(trie.get(&hfoo).unwrap(), DBValue::from_slice(b"foo")); - assert_eq!(trie.get(&hbar).unwrap(), DBValue::from_slice(b"bar")); - trie.remove(&hbar); // zero refs - delete - assert_eq!(trie.get(&hbar), None); - trie.remove(&hfoo); // one ref - keep - assert_eq!(trie.get(&hfoo).unwrap(), DBValue::from_slice(b"foo")); - trie.commit().unwrap(); - assert_eq!(trie.get(&hfoo).unwrap(), DBValue::from_slice(b"foo")); - trie.remove(&hfoo); // zero ref - would delete, but... - assert_eq!(trie.get(&hfoo), None); - trie.insert(b"foo"); // one ref - keep after all. - assert_eq!(trie.get(&hfoo).unwrap(), DBValue::from_slice(b"foo")); - trie.commit().unwrap(); - assert_eq!(trie.get(&hfoo).unwrap(), DBValue::from_slice(b"foo")); - trie.remove(&hfoo); // zero ref - delete - assert_eq!(trie.get(&hfoo), None); - trie.commit().unwrap(); // - assert_eq!(trie.get(&hfoo), None); + let mut trie = OverlayDB::new_temp(); + let hfoo = trie.insert(b"foo"); + assert_eq!(trie.get(&hfoo).unwrap(), DBValue::from_slice(b"foo")); + let hbar = trie.insert(b"bar"); + assert_eq!(trie.get(&hbar).unwrap(), DBValue::from_slice(b"bar")); + trie.commit().unwrap(); + assert_eq!(trie.get(&hfoo).unwrap(), DBValue::from_slice(b"foo")); + assert_eq!(trie.get(&hbar).unwrap(), DBValue::from_slice(b"bar")); + trie.insert(b"foo"); // two refs + assert_eq!(trie.get(&hfoo).unwrap(), DBValue::from_slice(b"foo")); + trie.commit().unwrap(); + assert_eq!(trie.get(&hfoo).unwrap(), DBValue::from_slice(b"foo")); + assert_eq!(trie.get(&hbar).unwrap(), DBValue::from_slice(b"bar")); + trie.remove(&hbar); // zero refs - delete + assert_eq!(trie.get(&hbar), None); + trie.remove(&hfoo); // one ref - keep + assert_eq!(trie.get(&hfoo).unwrap(), DBValue::from_slice(b"foo")); + trie.commit().unwrap(); + assert_eq!(trie.get(&hfoo).unwrap(), DBValue::from_slice(b"foo")); + trie.remove(&hfoo); // zero ref - would delete, but... + assert_eq!(trie.get(&hfoo), None); + trie.insert(b"foo"); // one ref - keep after all. + assert_eq!(trie.get(&hfoo).unwrap(), DBValue::from_slice(b"foo")); + trie.commit().unwrap(); + assert_eq!(trie.get(&hfoo).unwrap(), DBValue::from_slice(b"foo")); + trie.remove(&hfoo); // zero ref - delete + assert_eq!(trie.get(&hfoo), None); + trie.commit().unwrap(); // + assert_eq!(trie.get(&hfoo), None); } diff --git a/util/journaldb/src/overlayrecentdb.rs b/util/journaldb/src/overlayrecentdb.rs index a48e59d91..5dff8b2fd 100644 --- a/util/journaldb/src/overlayrecentdb.rs +++ b/util/journaldb/src/overlayrecentdb.rs @@ -16,22 +16,23 @@ //! `JournalDB` over in-memory overlay -use std::collections::HashMap; -use std::collections::hash_map::Entry; -use std::io; -use std::sync::Arc; +use std::{ + collections::{hash_map::Entry, HashMap}, + io, + sync::Arc, +}; +use super::{error_negatively_reference_hash, JournalDB, DB_PREFIX_LEN, LATEST_ERA_KEY}; use bytes::Bytes; use ethereum_types::H256; -use hash_db::{HashDB}; +use fastmap::H256FastMap; +use hash_db::HashDB; use heapsize::HeapSizeOf; use keccak_hasher::KeccakHasher; -use kvdb::{KeyValueDB, DBTransaction, DBValue}; +use kvdb::{DBTransaction, DBValue, KeyValueDB}; use memory_db::*; use parking_lot::RwLock; -use fastmap::H256FastMap; -use rlp::{Rlp, RlpStream, encode, decode, DecoderError, Decodable, Encodable}; -use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, JournalDB, error_negatively_reference_hash}; +use rlp::{decode, encode, Decodable, DecoderError, Encodable, Rlp, RlpStream}; use util::DatabaseKey; /// Implementation of the `JournalDB` trait for a disk-backed database with a memory overlay @@ -66,1006 +67,1132 @@ use util::DatabaseKey; /// 7. Delete ancient record from memory and disk. pub struct OverlayRecentDB { - transaction_overlay: MemoryDB, - backing: Arc, - journal_overlay: Arc>, - column: Option, + transaction_overlay: MemoryDB, + backing: Arc, + journal_overlay: Arc>, + column: Option, } struct DatabaseValue { - id: H256, - inserts: Vec<(H256, DBValue)>, - deletes: Vec, + id: H256, + inserts: Vec<(H256, DBValue)>, + deletes: Vec, } impl Decodable for DatabaseValue { - fn decode(rlp: &Rlp) -> Result { - let id = rlp.val_at(0)?; - let inserts = rlp.at(1)?.iter().map(|r| { - let k = r.val_at(0)?; - let v = DBValue::from_slice(r.at(1)?.data()?); - Ok((k, v)) - }).collect::, _>>()?; - let deletes = rlp.list_at(2)?; + fn decode(rlp: &Rlp) -> Result { + let id = rlp.val_at(0)?; + let inserts = rlp + .at(1)? + .iter() + .map(|r| { + let k = r.val_at(0)?; + let v = DBValue::from_slice(r.at(1)?.data()?); + Ok((k, v)) + }) + .collect::, _>>()?; + let deletes = rlp.list_at(2)?; - let value = DatabaseValue { - id, - inserts, - deletes, - }; + let value = DatabaseValue { + id, + inserts, + deletes, + }; - Ok(value) - } + Ok(value) + } } struct DatabaseValueRef<'a> { - id: &'a H256, - inserts: &'a [(H256, DBValue)], - deletes: &'a [H256], + id: &'a H256, + inserts: &'a [(H256, DBValue)], + deletes: &'a [H256], } impl<'a> Encodable for DatabaseValueRef<'a> { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(3); - s.append(self.id); - s.begin_list(self.inserts.len()); - for kv in self.inserts { - s.begin_list(2); - s.append(&kv.0); - s.append(&&*kv.1); - } - s.append_list(self.deletes); - } + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(3); + s.append(self.id); + s.begin_list(self.inserts.len()); + for kv in self.inserts { + s.begin_list(2); + s.append(&kv.0); + s.append(&&*kv.1); + } + s.append_list(self.deletes); + } } #[derive(PartialEq)] struct JournalOverlay { - backing_overlay: MemoryDB, // Nodes added in the history period - pending_overlay: H256FastMap, // Nodes being transfered from backing_overlay to backing db - journal: HashMap>, - latest_era: Option, - earliest_era: Option, - cumulative_size: usize, // cumulative size of all entries. + backing_overlay: MemoryDB, // Nodes added in the history period + pending_overlay: H256FastMap, // Nodes being transfered from backing_overlay to backing db + journal: HashMap>, + latest_era: Option, + earliest_era: Option, + cumulative_size: usize, // cumulative size of all entries. } #[derive(PartialEq)] struct JournalEntry { - id: H256, - insertions: Vec, - deletions: Vec, + id: H256, + insertions: Vec, + deletions: Vec, } impl HeapSizeOf for JournalEntry { - fn heap_size_of_children(&self) -> usize { - self.insertions.heap_size_of_children() + self.deletions.heap_size_of_children() - } + fn heap_size_of_children(&self) -> usize { + self.insertions.heap_size_of_children() + self.deletions.heap_size_of_children() + } } impl Clone for OverlayRecentDB { - fn clone(&self) -> OverlayRecentDB { - OverlayRecentDB { - transaction_overlay: self.transaction_overlay.clone(), - backing: self.backing.clone(), - journal_overlay: self.journal_overlay.clone(), - column: self.column.clone(), - } - } + fn clone(&self) -> OverlayRecentDB { + OverlayRecentDB { + transaction_overlay: self.transaction_overlay.clone(), + backing: self.backing.clone(), + journal_overlay: self.journal_overlay.clone(), + column: self.column.clone(), + } + } } impl OverlayRecentDB { - /// Create a new instance. - pub fn new(backing: Arc, col: Option) -> OverlayRecentDB { - let journal_overlay = Arc::new(RwLock::new(OverlayRecentDB::read_overlay(&*backing, col))); - OverlayRecentDB { - transaction_overlay: ::new_memory_db(), - backing: backing, - journal_overlay: journal_overlay, - column: col, - } - } + /// Create a new instance. + pub fn new(backing: Arc, col: Option) -> OverlayRecentDB { + let journal_overlay = Arc::new(RwLock::new(OverlayRecentDB::read_overlay(&*backing, col))); + OverlayRecentDB { + transaction_overlay: ::new_memory_db(), + backing: backing, + journal_overlay: journal_overlay, + column: col, + } + } - #[cfg(test)] - fn can_reconstruct_refs(&self) -> bool { - let reconstructed = Self::read_overlay(&*self.backing, self.column); - let journal_overlay = self.journal_overlay.read(); - journal_overlay.backing_overlay == reconstructed.backing_overlay && - journal_overlay.pending_overlay == reconstructed.pending_overlay && - journal_overlay.journal == reconstructed.journal && - journal_overlay.latest_era == reconstructed.latest_era && - journal_overlay.cumulative_size == reconstructed.cumulative_size - } + #[cfg(test)] + fn can_reconstruct_refs(&self) -> bool { + let reconstructed = Self::read_overlay(&*self.backing, self.column); + let journal_overlay = self.journal_overlay.read(); + journal_overlay.backing_overlay == reconstructed.backing_overlay + && journal_overlay.pending_overlay == reconstructed.pending_overlay + && journal_overlay.journal == reconstructed.journal + && journal_overlay.latest_era == reconstructed.latest_era + && journal_overlay.cumulative_size == reconstructed.cumulative_size + } - fn payload(&self, key: &H256) -> Option { - self.backing.get(self.column, key).expect("Low-level database error. Some issue with your hard disk?") - } + fn payload(&self, key: &H256) -> Option { + self.backing + .get(self.column, key) + .expect("Low-level database error. Some issue with your hard disk?") + } - fn read_overlay(db: &KeyValueDB, col: Option) -> JournalOverlay { - let mut journal = HashMap::new(); - let mut overlay = ::new_memory_db(); - let mut count = 0; - let mut latest_era = None; - let mut earliest_era = None; - let mut cumulative_size = 0; - if let Some(val) = db.get(col, &LATEST_ERA_KEY).expect("Low-level database error.") { - let mut era = decode::(&val).expect("decoding db value failed"); - latest_era = Some(era); - loop { - let mut db_key = DatabaseKey { - era, - index: 0usize, - }; - while let Some(rlp_data) = db.get(col, &encode(&db_key)).expect("Low-level database error.") { - trace!("read_overlay: era={}, index={}", era, db_key.index); - let value = decode::(&rlp_data).expect(&format!("read_overlay: Error decoding DatabaseValue era={}, index{}", era, db_key.index)); - count += value.inserts.len(); - let mut inserted_keys = Vec::new(); - for (k, v) in value.inserts { - let short_key = to_short_key(&k); + fn read_overlay(db: &KeyValueDB, col: Option) -> JournalOverlay { + let mut journal = HashMap::new(); + let mut overlay = ::new_memory_db(); + let mut count = 0; + let mut latest_era = None; + let mut earliest_era = None; + let mut cumulative_size = 0; + if let Some(val) = db + .get(col, &LATEST_ERA_KEY) + .expect("Low-level database error.") + { + let mut era = decode::(&val).expect("decoding db value failed"); + latest_era = Some(era); + loop { + let mut db_key = DatabaseKey { era, index: 0usize }; + while let Some(rlp_data) = db + .get(col, &encode(&db_key)) + .expect("Low-level database error.") + { + trace!("read_overlay: era={}, index={}", era, db_key.index); + let value = decode::(&rlp_data).expect(&format!( + "read_overlay: Error decoding DatabaseValue era={}, index{}", + era, db_key.index + )); + count += value.inserts.len(); + let mut inserted_keys = Vec::new(); + for (k, v) in value.inserts { + let short_key = to_short_key(&k); - if !overlay.contains(&short_key) { - cumulative_size += v.len(); - } - - overlay.emplace(short_key, v); - inserted_keys.push(k); - } - journal.entry(era).or_insert_with(Vec::new).push(JournalEntry { - id: value.id, - insertions: inserted_keys, - deletions: value.deletes, - }); - db_key.index += 1; - earliest_era = Some(era); - }; - if db_key.index == 0 || era == 0 { - break; - } - era -= 1; - } - } - trace!("Recovered {} overlay entries, {} journal entries", count, journal.len()); - JournalOverlay { - backing_overlay: overlay, - pending_overlay: HashMap::default(), - journal: journal, - latest_era: latest_era, - earliest_era: earliest_era, - cumulative_size: cumulative_size, - } - } + if !overlay.contains(&short_key) { + cumulative_size += v.len(); + } + overlay.emplace(short_key, v); + inserted_keys.push(k); + } + journal + .entry(era) + .or_insert_with(Vec::new) + .push(JournalEntry { + id: value.id, + insertions: inserted_keys, + deletions: value.deletes, + }); + db_key.index += 1; + earliest_era = Some(era); + } + if db_key.index == 0 || era == 0 { + break; + } + era -= 1; + } + } + trace!( + "Recovered {} overlay entries, {} journal entries", + count, + journal.len() + ); + JournalOverlay { + backing_overlay: overlay, + pending_overlay: HashMap::default(), + journal: journal, + latest_era: latest_era, + earliest_era: earliest_era, + cumulative_size: cumulative_size, + } + } } #[inline] fn to_short_key(key: &H256) -> H256 { - let mut k = H256::new(); - k[0..DB_PREFIX_LEN].copy_from_slice(&key[0..DB_PREFIX_LEN]); - k + let mut k = H256::new(); + k[0..DB_PREFIX_LEN].copy_from_slice(&key[0..DB_PREFIX_LEN]); + k } impl ::traits::KeyedHashDB for OverlayRecentDB { - fn keys(&self) -> HashMap { - let mut ret: HashMap = self.backing.iter(self.column) - .map(|(key, _)| (H256::from_slice(&*key), 1)) - .collect(); + fn keys(&self) -> HashMap { + let mut ret: HashMap = self + .backing + .iter(self.column) + .map(|(key, _)| (H256::from_slice(&*key), 1)) + .collect(); - for (key, refs) in self.transaction_overlay.keys() { - match ret.entry(key) { - Entry::Occupied(mut entry) => { - *entry.get_mut() += refs; - }, - Entry::Vacant(entry) => { - entry.insert(refs); - } - } - } - ret - } + for (key, refs) in self.transaction_overlay.keys() { + match ret.entry(key) { + Entry::Occupied(mut entry) => { + *entry.get_mut() += refs; + } + Entry::Vacant(entry) => { + entry.insert(refs); + } + } + } + ret + } } impl JournalDB for OverlayRecentDB { + fn boxed_clone(&self) -> Box { + Box::new(self.clone()) + } - fn boxed_clone(&self) -> Box { - Box::new(self.clone()) - } + fn mem_used(&self) -> usize { + let mut mem = self.transaction_overlay.mem_used(); + let overlay = self.journal_overlay.read(); - fn mem_used(&self) -> usize { - let mut mem = self.transaction_overlay.mem_used(); - let overlay = self.journal_overlay.read(); + mem += overlay.backing_overlay.mem_used(); + mem += overlay.pending_overlay.heap_size_of_children(); + mem += overlay.journal.heap_size_of_children(); - mem += overlay.backing_overlay.mem_used(); - mem += overlay.pending_overlay.heap_size_of_children(); - mem += overlay.journal.heap_size_of_children(); + mem + } - mem - } + fn journal_size(&self) -> usize { + self.journal_overlay.read().cumulative_size + } - fn journal_size(&self) -> usize { - self.journal_overlay.read().cumulative_size + fn is_empty(&self) -> bool { + self.backing + .get(self.column, &LATEST_ERA_KEY) + .expect("Low level database error") + .is_none() + } - } + fn backing(&self) -> &Arc { + &self.backing + } - fn is_empty(&self) -> bool { - self.backing.get(self.column, &LATEST_ERA_KEY).expect("Low level database error").is_none() - } + fn latest_era(&self) -> Option { + self.journal_overlay.read().latest_era + } - fn backing(&self) -> &Arc { - &self.backing - } + fn earliest_era(&self) -> Option { + self.journal_overlay.read().earliest_era + } - fn latest_era(&self) -> Option { self.journal_overlay.read().latest_era } + fn state(&self, key: &H256) -> Option { + let journal_overlay = self.journal_overlay.read(); + let key = to_short_key(key); + journal_overlay + .backing_overlay + .get(&key) + .map(|v| v.into_vec()) + .or_else(|| { + journal_overlay + .pending_overlay + .get(&key) + .map(|d| d.clone().into_vec()) + }) + .or_else(|| { + self.backing + .get_by_prefix(self.column, &key[0..DB_PREFIX_LEN]) + .map(|b| b.into_vec()) + }) + } - fn earliest_era(&self) -> Option { self.journal_overlay.read().earliest_era } + fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result { + trace!(target: "journaldb", "entry: #{} ({})", now, id); - fn state(&self, key: &H256) -> Option { - let journal_overlay = self.journal_overlay.read(); - let key = to_short_key(key); - journal_overlay.backing_overlay.get(&key).map(|v| v.into_vec()) - .or_else(|| journal_overlay.pending_overlay.get(&key).map(|d| d.clone().into_vec())) - .or_else(|| self.backing.get_by_prefix(self.column, &key[0..DB_PREFIX_LEN]).map(|b| b.into_vec())) - } + let mut journal_overlay = self.journal_overlay.write(); - fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result { - trace!(target: "journaldb", "entry: #{} ({})", now, id); + // flush previous changes + journal_overlay.pending_overlay.clear(); - let mut journal_overlay = self.journal_overlay.write(); + let mut tx = self.transaction_overlay.drain(); + let inserted_keys: Vec<_> = tx + .iter() + .filter_map(|(k, &(_, c))| if c > 0 { Some(k.clone()) } else { None }) + .collect(); + let removed_keys: Vec<_> = tx + .iter() + .filter_map(|(k, &(_, c))| if c < 0 { Some(k.clone()) } else { None }) + .collect(); + let ops = inserted_keys.len() + removed_keys.len(); - // flush previous changes - journal_overlay.pending_overlay.clear(); + // Increase counter for each inserted key no matter if the block is canonical or not. + let insertions: Vec<_> = tx + .drain() + .filter_map(|(k, (v, c))| if c > 0 { Some((k, v)) } else { None }) + .collect(); - let mut tx = self.transaction_overlay.drain(); - let inserted_keys: Vec<_> = tx.iter().filter_map(|(k, &(_, c))| if c > 0 { Some(k.clone()) } else { None }).collect(); - let removed_keys: Vec<_> = tx.iter().filter_map(|(k, &(_, c))| if c < 0 { Some(k.clone()) } else { None }).collect(); - let ops = inserted_keys.len() + removed_keys.len(); + let encoded_value = { + let value_ref = DatabaseValueRef { + id, + inserts: &insertions, + deletes: &removed_keys, + }; + encode(&value_ref) + }; - // Increase counter for each inserted key no matter if the block is canonical or not. - let insertions: Vec<_> = tx.drain().filter_map(|(k, (v, c))| if c > 0 { Some((k, v)) } else { None }).collect(); + for (k, v) in insertions { + let short_key = to_short_key(&k); + if !journal_overlay.backing_overlay.contains(&short_key) { + journal_overlay.cumulative_size += v.len(); + } - let encoded_value = { - let value_ref = DatabaseValueRef { - id, - inserts: &insertions, - deletes: &removed_keys, - }; - encode(&value_ref) - }; + journal_overlay.backing_overlay.emplace(short_key, v); + } - for (k, v) in insertions { - let short_key = to_short_key(&k); - if !journal_overlay.backing_overlay.contains(&short_key) { - journal_overlay.cumulative_size += v.len(); - } + let index = journal_overlay.journal.get(&now).map_or(0, |j| j.len()); + let db_key = DatabaseKey { era: now, index }; - journal_overlay.backing_overlay.emplace(short_key, v); - } + batch.put_vec(self.column, &encode(&db_key), encoded_value.to_vec()); + if journal_overlay.latest_era.map_or(true, |e| now > e) { + trace!(target: "journaldb", "Set latest era to {}", now); + batch.put_vec(self.column, &LATEST_ERA_KEY, encode(&now).to_vec()); + journal_overlay.latest_era = Some(now); + } - let index = journal_overlay.journal.get(&now).map_or(0, |j| j.len()); - let db_key = DatabaseKey { - era: now, - index, - }; + if journal_overlay.earliest_era.map_or(true, |e| e > now) { + trace!(target: "journaldb", "Set earliest era to {}", now); + journal_overlay.earliest_era = Some(now); + } - batch.put_vec(self.column, &encode(&db_key), encoded_value.to_vec()); - if journal_overlay.latest_era.map_or(true, |e| now > e) { - trace!(target: "journaldb", "Set latest era to {}", now); - batch.put_vec(self.column, &LATEST_ERA_KEY, encode(&now).to_vec()); - journal_overlay.latest_era = Some(now); - } + journal_overlay + .journal + .entry(now) + .or_insert_with(Vec::new) + .push(JournalEntry { + id: id.clone(), + insertions: inserted_keys, + deletions: removed_keys, + }); + Ok(ops as u32) + } - if journal_overlay.earliest_era.map_or(true, |e| e > now) { - trace!(target: "journaldb", "Set earliest era to {}", now); - journal_overlay.earliest_era = Some(now); - } + fn mark_canonical( + &mut self, + batch: &mut DBTransaction, + end_era: u64, + canon_id: &H256, + ) -> io::Result { + trace!(target: "journaldb", "canonical: #{} ({})", end_era, canon_id); - journal_overlay.journal.entry(now).or_insert_with(Vec::new).push(JournalEntry { id: id.clone(), insertions: inserted_keys, deletions: removed_keys }); - Ok(ops as u32) - } + let mut journal_overlay = self.journal_overlay.write(); + let journal_overlay = &mut *journal_overlay; - fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> io::Result { - trace!(target: "journaldb", "canonical: #{} ({})", end_era, canon_id); + let mut ops = 0; + // apply old commits' details + if let Some(ref mut records) = journal_overlay.journal.get_mut(&end_era) { + let mut canon_insertions: Vec<(H256, DBValue)> = Vec::new(); + let mut canon_deletions: Vec = Vec::new(); + let mut overlay_deletions: Vec = Vec::new(); + let mut index = 0usize; + for mut journal in records.drain(..) { + //delete the record from the db + let db_key = DatabaseKey { + era: end_era, + index, + }; + batch.delete(self.column, &encode(&db_key)); + trace!(target: "journaldb", "Delete journal for time #{}.{}: {}, (canon was {}): +{} -{} entries", end_era, index, journal.id, canon_id, journal.insertions.len(), journal.deletions.len()); + { + if *canon_id == journal.id { + for h in &journal.insertions { + if let Some((d, rc)) = + journal_overlay.backing_overlay.raw(&to_short_key(h)) + { + if rc > 0 { + canon_insertions.push((h.clone(), d.clone())); + //TODO: optimize this to avoid data copy + } + } + } + canon_deletions = journal.deletions; + } + overlay_deletions.append(&mut journal.insertions); + } + index += 1; + } - let mut journal_overlay = self.journal_overlay.write(); - let journal_overlay = &mut *journal_overlay; + ops += canon_insertions.len(); + ops += canon_deletions.len(); - let mut ops = 0; - // apply old commits' details - if let Some(ref mut records) = journal_overlay.journal.get_mut(&end_era) { - let mut canon_insertions: Vec<(H256, DBValue)> = Vec::new(); - let mut canon_deletions: Vec = Vec::new(); - let mut overlay_deletions: Vec = Vec::new(); - let mut index = 0usize; - for mut journal in records.drain(..) { - //delete the record from the db - let db_key = DatabaseKey { - era: end_era, - index, - }; - batch.delete(self.column, &encode(&db_key)); - trace!(target: "journaldb", "Delete journal for time #{}.{}: {}, (canon was {}): +{} -{} entries", end_era, index, journal.id, canon_id, journal.insertions.len(), journal.deletions.len()); - { - if *canon_id == journal.id { - for h in &journal.insertions { - if let Some((d, rc)) = journal_overlay.backing_overlay.raw(&to_short_key(h)) { - if rc > 0 { - canon_insertions.push((h.clone(), d.clone())); //TODO: optimize this to avoid data copy - } - } - } - canon_deletions = journal.deletions; - } - overlay_deletions.append(&mut journal.insertions); - } - index += 1; - } + // apply canon inserts first + for (k, v) in canon_insertions { + batch.put(self.column, &k, &v); + journal_overlay.pending_overlay.insert(to_short_key(&k), v); + } + // update the overlay + for k in overlay_deletions { + if let Some(val) = journal_overlay + .backing_overlay + .remove_and_purge(&to_short_key(&k)) + { + journal_overlay.cumulative_size -= val.len(); + } + } + // apply canon deletions + for k in canon_deletions { + if !journal_overlay.backing_overlay.contains(&to_short_key(&k)) { + batch.delete(self.column, &k); + } + } + } + journal_overlay.journal.remove(&end_era); - ops += canon_insertions.len(); - ops += canon_deletions.len(); + if !journal_overlay.journal.is_empty() { + trace!(target: "journaldb", "Set earliest_era to {}", end_era + 1); + journal_overlay.earliest_era = Some(end_era + 1); + } - // apply canon inserts first - for (k, v) in canon_insertions { - batch.put(self.column, &k, &v); - journal_overlay.pending_overlay.insert(to_short_key(&k), v); - } - // update the overlay - for k in overlay_deletions { - if let Some(val) = journal_overlay.backing_overlay.remove_and_purge(&to_short_key(&k)) { - journal_overlay.cumulative_size -= val.len(); - } - } - // apply canon deletions - for k in canon_deletions { - if !journal_overlay.backing_overlay.contains(&to_short_key(&k)) { - batch.delete(self.column, &k); - } - } - } - journal_overlay.journal.remove(&end_era); + Ok(ops as u32) + } - if !journal_overlay.journal.is_empty() { - trace!(target: "journaldb", "Set earliest_era to {}", end_era + 1); - journal_overlay.earliest_era = Some(end_era + 1); - } + fn flush(&self) { + self.journal_overlay.write().pending_overlay.clear(); + } - Ok(ops as u32) - } + fn inject(&mut self, batch: &mut DBTransaction) -> io::Result { + let mut ops = 0; + for (key, (value, rc)) in self.transaction_overlay.drain() { + if rc != 0 { + ops += 1 + } - fn flush(&self) { - self.journal_overlay.write().pending_overlay.clear(); - } + match rc { + 0 => {} + _ if rc > 0 => batch.put(self.column, &key, &value), + -1 => { + if cfg!(debug_assertions) && self.backing.get(self.column, &key)?.is_none() { + return Err(error_negatively_reference_hash(&key)); + } + batch.delete(self.column, &key) + } + _ => panic!("Attempted to inject invalid state ({})", rc), + } + } - fn inject(&mut self, batch: &mut DBTransaction) -> io::Result { - let mut ops = 0; - for (key, (value, rc)) in self.transaction_overlay.drain() { - if rc != 0 { ops += 1 } + Ok(ops) + } - match rc { - 0 => {} - _ if rc > 0 => { - batch.put(self.column, &key, &value) - } - -1 => { - if cfg!(debug_assertions) && self.backing.get(self.column, &key)?.is_none() { - return Err(error_negatively_reference_hash(&key)); - } - batch.delete(self.column, &key) - } - _ => panic!("Attempted to inject invalid state ({})", rc), - } - } - - Ok(ops) - } - - fn consolidate(&mut self, with: MemoryDB) { - self.transaction_overlay.consolidate(with); - } + fn consolidate(&mut self, with: MemoryDB) { + self.transaction_overlay.consolidate(with); + } } impl HashDB for OverlayRecentDB { - fn get(&self, key: &H256) -> Option { - if let Some((d, rc)) = self.transaction_overlay.raw(key) { - if rc > 0 { - return Some(d.clone()) - } - } - let v = { - let journal_overlay = self.journal_overlay.read(); - let key = to_short_key(key); - journal_overlay.backing_overlay.get(&key) - .or_else(|| journal_overlay.pending_overlay.get(&key).cloned()) - }; - v.or_else(|| self.payload(key)) - } + fn get(&self, key: &H256) -> Option { + if let Some((d, rc)) = self.transaction_overlay.raw(key) { + if rc > 0 { + return Some(d.clone()); + } + } + let v = { + let journal_overlay = self.journal_overlay.read(); + let key = to_short_key(key); + journal_overlay + .backing_overlay + .get(&key) + .or_else(|| journal_overlay.pending_overlay.get(&key).cloned()) + }; + v.or_else(|| self.payload(key)) + } - fn contains(&self, key: &H256) -> bool { - self.get(key).is_some() - } + fn contains(&self, key: &H256) -> bool { + self.get(key).is_some() + } - fn insert(&mut self, value: &[u8]) -> H256 { - self.transaction_overlay.insert(value) - } - fn emplace(&mut self, key: H256, value: DBValue) { - self.transaction_overlay.emplace(key, value); - } - fn remove(&mut self, key: &H256) { - self.transaction_overlay.remove(key); - } + fn insert(&mut self, value: &[u8]) -> H256 { + self.transaction_overlay.insert(value) + } + fn emplace(&mut self, key: H256, value: DBValue) { + self.transaction_overlay.emplace(key, value); + } + fn remove(&mut self, key: &H256) { + self.transaction_overlay.remove(key); + } } #[cfg(test)] mod tests { - use keccak::keccak; - use super::*; - use hash_db::HashDB; - use {kvdb_memorydb, JournalDB}; - - fn new_db() -> OverlayRecentDB { - let backing = Arc::new(kvdb_memorydb::create(0)); - OverlayRecentDB::new(backing, None) - } - - #[test] - fn insert_same_in_fork() { - // history is 1 - let mut jdb = new_db(); - - let x = jdb.insert(b"X"); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(3, &keccak(b"1002a"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(4, &keccak(b"1003a"), Some((2, keccak(b"2")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.remove(&x); - jdb.commit_batch(3, &keccak(b"1002b"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - let x = jdb.insert(b"X"); - jdb.commit_batch(4, &keccak(b"1003b"), Some((2, keccak(b"2")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.commit_batch(5, &keccak(b"1004a"), Some((3, keccak(b"1002a")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(6, &keccak(b"1005a"), Some((4, keccak(b"1003a")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - assert!(jdb.contains(&x)); - } - - #[test] - fn long_history() { - // history is 3 - let mut jdb = new_db(); - let h = jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&h)); - jdb.remove(&h); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&h)); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&h)); - jdb.commit_batch(3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&h)); - jdb.commit_batch(4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(!jdb.contains(&h)); - } - - #[test] - fn complex() { - // history is 1 - let mut jdb = new_db(); - - let foo = jdb.insert(b"foo"); - let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); - - jdb.remove(&foo); - jdb.remove(&bar); - let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); - assert!(jdb.contains(&baz)); - - let foo = jdb.insert(b"foo"); - jdb.remove(&baz); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - assert!(!jdb.contains(&bar)); - assert!(jdb.contains(&baz)); - - jdb.remove(&foo); - jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - assert!(!jdb.contains(&bar)); - assert!(!jdb.contains(&baz)); - - jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(!jdb.contains(&foo)); - assert!(!jdb.contains(&bar)); - assert!(!jdb.contains(&baz)); - } - - #[test] - fn fork() { - // history is 1 - let mut jdb = new_db(); - - let foo = jdb.insert(b"foo"); - let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); - - jdb.remove(&foo); - let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.remove(&bar); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); - assert!(jdb.contains(&baz)); - - jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - assert!(!jdb.contains(&baz)); - assert!(!jdb.contains(&bar)); - } - - #[test] - fn overwrite() { - // history is 1 - let mut jdb = new_db(); - - let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - - jdb.remove(&foo); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.insert(b"foo"); - assert!(jdb.contains(&foo)); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - jdb.commit_batch(3, &keccak(b"2"), Some((0, keccak(b"2")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - } - - #[test] - fn fork_same_key_one() { - let mut jdb = new_db(); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - let foo = jdb.insert(b"foo"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(1, &keccak(b"1c"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - assert!(jdb.contains(&foo)); - - jdb.commit_batch(2, &keccak(b"2a"), Some((1, keccak(b"1a")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - } - - #[test] - fn fork_same_key_other() { - let mut jdb = new_db(); - - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - let foo = jdb.insert(b"foo"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(1, &keccak(b"1c"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - assert!(jdb.contains(&foo)); - - jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - } - - #[test] - fn fork_ins_del_ins() { - let mut jdb = new_db(); - - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - let foo = jdb.insert(b"foo"); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.remove(&foo); - jdb.commit_batch(2, &keccak(b"2a"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.remove(&foo); - jdb.commit_batch(2, &keccak(b"2b"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(3, &keccak(b"3a"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(3, &keccak(b"3b"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.commit_batch(4, &keccak(b"4a"), Some((2, keccak(b"2a")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.commit_batch(5, &keccak(b"5a"), Some((3, keccak(b"3a")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - } - - #[test] - fn reopen() { - let shared_db = Arc::new(kvdb_memorydb::create(0)); - let bar = H256::random(); - - let foo = { - let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); - // history is 1 - let foo = jdb.insert(b"foo"); - jdb.emplace(bar.clone(), DBValue::from_slice(b"bar")); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - foo - }; - - { - let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); - jdb.remove(&foo); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - } - - { - let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(!jdb.contains(&foo)); - } - } - - #[test] - fn insert_delete_insert_delete_insert_expunge() { - let _ = ::env_logger::try_init(); - let mut jdb = new_db(); - - // history is 4 - let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.remove(&foo); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.insert(b"foo"); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.remove(&foo); - jdb.commit_batch(3, &keccak(b"3"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.insert(b"foo"); - jdb.commit_batch(4, &keccak(b"4"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - // expunge foo - jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - } - - #[test] - fn forked_insert_delete_insert_delete_insert_expunge() { - let _ = ::env_logger::try_init(); - let mut jdb = new_db(); - - // history is 4 - let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.remove(&foo); - jdb.commit_batch(1, &keccak(b"1a"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.remove(&foo); - jdb.commit_batch(1, &keccak(b"1b"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(2, &keccak(b"2a"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(2, &keccak(b"2b"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.remove(&foo); - jdb.commit_batch(3, &keccak(b"3a"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.remove(&foo); - jdb.commit_batch(3, &keccak(b"3b"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(4, &keccak(b"4a"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(4, &keccak(b"4b"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - // expunge foo - jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1a")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - } - - #[test] - fn broken_assert() { - let mut jdb = new_db(); - - let foo = jdb.insert(b"foo"); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - // foo is ancient history. - - jdb.remove(&foo); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.insert(b"foo"); - jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); // BROKEN - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - - jdb.remove(&foo); - jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.commit_batch(5, &keccak(b"5"), Some((4, keccak(b"4")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(!jdb.contains(&foo)); - } - - #[test] - fn reopen_test() { - let mut jdb = new_db(); - // history is 4 - let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(3, &keccak(b"3"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(4, &keccak(b"4"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - // foo is ancient history. - - jdb.insert(b"foo"); - let bar = jdb.insert(b"bar"); - jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.remove(&foo); - jdb.remove(&bar); - jdb.commit_batch(6, &keccak(b"6"), Some((2, keccak(b"2")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.insert(b"foo"); - jdb.insert(b"bar"); - jdb.commit_batch(7, &keccak(b"7"), Some((3, keccak(b"3")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - } - - #[test] - fn reopen_remove_three() { - let _ = ::env_logger::try_init(); - - let shared_db = Arc::new(kvdb_memorydb::create(0)); - let foo = keccak(b"foo"); - - { - let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); - // history is 1 - jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - // foo is ancient history. - - jdb.remove(&foo); - jdb.commit_batch(2, &keccak(b"2"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - - jdb.insert(b"foo"); - jdb.commit_batch(3, &keccak(b"3"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - - // incantation to reopen the db - }; { - let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); - - jdb.remove(&foo); - jdb.commit_batch(4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - - // incantation to reopen the db - }; { - let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); - - jdb.commit_batch(5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - - // incantation to reopen the db - }; { - let mut jdb = OverlayRecentDB::new(shared_db, None); - - jdb.commit_batch(6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(!jdb.contains(&foo)); - } - } - - #[test] - fn reopen_fork() { - let shared_db = Arc::new(kvdb_memorydb::create(0)); - - let (foo, bar, baz) = { - let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); - // history is 1 - let foo = jdb.insert(b"foo"); - let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.remove(&foo); - let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.remove(&bar); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - (foo, bar, baz) - }; - - { - let mut jdb = OverlayRecentDB::new(shared_db, None); - jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - assert!(jdb.contains(&foo)); - assert!(!jdb.contains(&baz)); - assert!(!jdb.contains(&bar)); - } - } - - #[test] - fn insert_older_era() { - let mut jdb = new_db(); - let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0a"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - let bar = jdb.insert(b"bar"); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0a")))).unwrap(); - assert!(jdb.can_reconstruct_refs()); - - jdb.remove(&bar); - jdb.commit_batch(0, &keccak(b"0b"), None).unwrap(); - assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); - - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); - } - - #[test] - fn inject() { - let mut jdb = new_db(); - let key = jdb.insert(b"dog"); - jdb.inject_batch().unwrap(); - - assert_eq!(jdb.get(&key).unwrap(), DBValue::from_slice(b"dog")); - jdb.remove(&key); - jdb.inject_batch().unwrap(); - - assert!(jdb.get(&key).is_none()); - } - - #[test] - fn earliest_era() { - let shared_db = Arc::new(kvdb_memorydb::create(0)); - - // empty DB - let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); - assert!(jdb.earliest_era().is_none()); - - // single journalled era. - let _key = jdb.insert(b"hello!"); - let mut batch = jdb.backing().transaction(); - jdb.journal_under(&mut batch, 0, &keccak(b"0")).unwrap(); - jdb.backing().write_buffered(batch); - - assert_eq!(jdb.earliest_era(), Some(0)); - - // second journalled era. - let mut batch = jdb.backing().transaction(); - jdb.journal_under(&mut batch, 1, &keccak(b"1")).unwrap(); - jdb.backing().write_buffered(batch); - - assert_eq!(jdb.earliest_era(), Some(0)); - - // single journalled era. - let mut batch = jdb.backing().transaction(); - jdb.mark_canonical(&mut batch, 0, &keccak(b"0")).unwrap(); - jdb.backing().write_buffered(batch); - - assert_eq!(jdb.earliest_era(), Some(1)); - - // no journalled eras. - let mut batch = jdb.backing().transaction(); - jdb.mark_canonical(&mut batch, 1, &keccak(b"1")).unwrap(); - jdb.backing().write_buffered(batch); - - assert_eq!(jdb.earliest_era(), Some(1)); - - // reconstructed: no journal entries. - drop(jdb); - let jdb = OverlayRecentDB::new(shared_db, None); - assert_eq!(jdb.earliest_era(), None); - } + use super::*; + use hash_db::HashDB; + use keccak::keccak; + use kvdb_memorydb; + use JournalDB; + + fn new_db() -> OverlayRecentDB { + let backing = Arc::new(kvdb_memorydb::create(0)); + OverlayRecentDB::new(backing, None) + } + + #[test] + fn insert_same_in_fork() { + // history is 1 + let mut jdb = new_db(); + + let x = jdb.insert(b"X"); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit_batch(3, &keccak(b"1002a"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit_batch(4, &keccak(b"1003a"), Some((2, keccak(b"2")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&x); + jdb.commit_batch(3, &keccak(b"1002b"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + let x = jdb.insert(b"X"); + jdb.commit_batch(4, &keccak(b"1003b"), Some((2, keccak(b"2")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit_batch(5, &keccak(b"1004a"), Some((3, keccak(b"1002a")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit_batch(6, &keccak(b"1005a"), Some((4, keccak(b"1003a")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.contains(&x)); + } + + #[test] + fn long_history() { + // history is 3 + let mut jdb = new_db(); + let h = jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&h)); + jdb.remove(&h); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&h)); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&h)); + jdb.commit_batch(3, &keccak(b"3"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&h)); + jdb.commit_batch(4, &keccak(b"4"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.contains(&h)); + } + + #[test] + fn complex() { + // history is 1 + let mut jdb = new_db(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); + + jdb.remove(&foo); + jdb.remove(&bar); + let baz = jdb.insert(b"baz"); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); + assert!(jdb.contains(&baz)); + + let foo = jdb.insert(b"foo"); + jdb.remove(&baz); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + assert!(!jdb.contains(&bar)); + assert!(jdb.contains(&baz)); + + jdb.remove(&foo); + jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + assert!(!jdb.contains(&bar)); + assert!(!jdb.contains(&baz)); + + jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.contains(&foo)); + assert!(!jdb.contains(&bar)); + assert!(!jdb.contains(&baz)); + } + + #[test] + fn fork() { + // history is 1 + let mut jdb = new_db(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); + + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&bar); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); + assert!(jdb.contains(&baz)); + + jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + assert!(!jdb.contains(&baz)); + assert!(!jdb.contains(&bar)); + } + + #[test] + fn overwrite() { + // history is 1 + let mut jdb = new_db(); + + let foo = jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + + jdb.remove(&foo); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + assert!(jdb.contains(&foo)); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + jdb.commit_batch(3, &keccak(b"2"), Some((0, keccak(b"2")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + } + + #[test] + fn fork_same_key_one() { + let mut jdb = new_db(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(1, &keccak(b"1c"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.contains(&foo)); + + jdb.commit_batch(2, &keccak(b"2a"), Some((1, keccak(b"1a")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + } + + #[test] + fn fork_same_key_other() { + let mut jdb = new_db(); + + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(1, &keccak(b"1c"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.contains(&foo)); + + jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + } + + #[test] + fn fork_ins_del_ins() { + let mut jdb = new_db(); + + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit_batch(2, &keccak(b"2a"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit_batch(2, &keccak(b"2b"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(3, &keccak(b"3a"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(3, &keccak(b"3b"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit_batch(4, &keccak(b"4a"), Some((2, keccak(b"2a")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit_batch(5, &keccak(b"5a"), Some((3, keccak(b"3a")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn reopen() { + let shared_db = Arc::new(kvdb_memorydb::create(0)); + let bar = H256::random(); + + let foo = { + let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.emplace(bar.clone(), DBValue::from_slice(b"bar")); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + foo + }; + + { + let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); + jdb.remove(&foo); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + { + let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.contains(&foo)); + } + } + + #[test] + fn insert_delete_insert_delete_insert_expunge() { + let _ = ::env_logger::try_init(); + let mut jdb = new_db(); + + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit_batch(3, &keccak(b"3"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit_batch(4, &keccak(b"4"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + // expunge foo + jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn forked_insert_delete_insert_delete_insert_expunge() { + let _ = ::env_logger::try_init(); + let mut jdb = new_db(); + + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit_batch(1, &keccak(b"1a"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit_batch(1, &keccak(b"1b"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(2, &keccak(b"2a"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(2, &keccak(b"2b"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit_batch(3, &keccak(b"3a"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit_batch(3, &keccak(b"3b"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(4, &keccak(b"4a"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(4, &keccak(b"4b"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // expunge foo + jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1a")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn broken_assert() { + let mut jdb = new_db(); + + let foo = jdb.insert(b"foo"); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.remove(&foo); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))) + .unwrap(); // BROKEN + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + + jdb.remove(&foo); + jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit_batch(5, &keccak(b"5"), Some((4, keccak(b"4")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.contains(&foo)); + } + + #[test] + fn reopen_test() { + let mut jdb = new_db(); + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit_batch(3, &keccak(b"3"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit_batch(4, &keccak(b"4"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.remove(&bar); + jdb.commit_batch(6, &keccak(b"6"), Some((2, keccak(b"2")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.insert(b"bar"); + jdb.commit_batch(7, &keccak(b"7"), Some((3, keccak(b"3")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn reopen_remove_three() { + let _ = ::env_logger::try_init(); + + let shared_db = Arc::new(kvdb_memorydb::create(0)); + let foo = keccak(b"foo"); + + { + let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); + // history is 1 + jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.remove(&foo); + jdb.commit_batch(2, &keccak(b"2"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + + jdb.insert(b"foo"); + jdb.commit_batch(3, &keccak(b"3"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + + // incantation to reopen the db + }; + { + let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); + + jdb.remove(&foo); + jdb.commit_batch(4, &keccak(b"4"), Some((2, keccak(b"2")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + + // incantation to reopen the db + }; + { + let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); + + jdb.commit_batch(5, &keccak(b"5"), Some((3, keccak(b"3")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + + // incantation to reopen the db + }; + { + let mut jdb = OverlayRecentDB::new(shared_db, None); + + jdb.commit_batch(6, &keccak(b"6"), Some((4, keccak(b"4")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.contains(&foo)); + } + } + + #[test] + fn reopen_fork() { + let shared_db = Arc::new(kvdb_memorydb::create(0)); + + let (foo, bar, baz) = { + let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); + // history is 1 + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&bar); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + (foo, bar, baz) + }; + + { + let mut jdb = OverlayRecentDB::new(shared_db, None); + jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.contains(&foo)); + assert!(!jdb.contains(&baz)); + assert!(!jdb.contains(&bar)); + } + } + + #[test] + fn insert_older_era() { + let mut jdb = new_db(); + let foo = jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0a"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let bar = jdb.insert(b"bar"); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0a")))) + .unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&bar); + jdb.commit_batch(0, &keccak(b"0b"), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))) + .unwrap(); + + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); + } + + #[test] + fn inject() { + let mut jdb = new_db(); + let key = jdb.insert(b"dog"); + jdb.inject_batch().unwrap(); + + assert_eq!(jdb.get(&key).unwrap(), DBValue::from_slice(b"dog")); + jdb.remove(&key); + jdb.inject_batch().unwrap(); + + assert!(jdb.get(&key).is_none()); + } + + #[test] + fn earliest_era() { + let shared_db = Arc::new(kvdb_memorydb::create(0)); + + // empty DB + let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); + assert!(jdb.earliest_era().is_none()); + + // single journalled era. + let _key = jdb.insert(b"hello!"); + let mut batch = jdb.backing().transaction(); + jdb.journal_under(&mut batch, 0, &keccak(b"0")).unwrap(); + jdb.backing().write_buffered(batch); + + assert_eq!(jdb.earliest_era(), Some(0)); + + // second journalled era. + let mut batch = jdb.backing().transaction(); + jdb.journal_under(&mut batch, 1, &keccak(b"1")).unwrap(); + jdb.backing().write_buffered(batch); + + assert_eq!(jdb.earliest_era(), Some(0)); + + // single journalled era. + let mut batch = jdb.backing().transaction(); + jdb.mark_canonical(&mut batch, 0, &keccak(b"0")).unwrap(); + jdb.backing().write_buffered(batch); + + assert_eq!(jdb.earliest_era(), Some(1)); + + // no journalled eras. + let mut batch = jdb.backing().transaction(); + jdb.mark_canonical(&mut batch, 1, &keccak(b"1")).unwrap(); + jdb.backing().write_buffered(batch); + + assert_eq!(jdb.earliest_era(), Some(1)); + + // reconstructed: no journal entries. + drop(jdb); + let jdb = OverlayRecentDB::new(shared_db, None); + assert_eq!(jdb.earliest_era(), None); + } } diff --git a/util/journaldb/src/refcounteddb.rs b/util/journaldb/src/refcounteddb.rs index bdd396a48..7e83e396c 100644 --- a/util/journaldb/src/refcounteddb.rs +++ b/util/journaldb/src/refcounteddb.rs @@ -16,22 +16,19 @@ //! Disk-backed, ref-counted `JournalDB` implementation. -use std::collections::HashMap; -use std::io; -use std::sync::Arc; +use std::{collections::HashMap, io, sync::Arc}; +use super::{traits::JournalDB, DB_PREFIX_LEN, LATEST_ERA_KEY}; use bytes::Bytes; use ethereum_types::H256; -use hash_db::{HashDB}; +use hash_db::HashDB; use heapsize::HeapSizeOf; use keccak_hasher::KeccakHasher; -use kvdb::{KeyValueDB, DBTransaction, DBValue}; +use kvdb::{DBTransaction, DBValue, KeyValueDB}; use memory_db::MemoryDB; use overlaydb::OverlayDB; -use rlp::{encode, decode}; -use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; -use super::traits::JournalDB; -use util::{DatabaseKey, DatabaseValueView, DatabaseValueRef}; +use rlp::{decode, encode}; +use util::{DatabaseKey, DatabaseValueRef, DatabaseValueView}; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay /// and latent-removal semantics. @@ -54,293 +51,335 @@ use util::{DatabaseKey, DatabaseValueView, DatabaseValueRef}; /// of its inserts otherwise. // TODO: store last_era, reclaim_period. pub struct RefCountedDB { - forward: OverlayDB, - backing: Arc, - latest_era: Option, - inserts: Vec, - removes: Vec, - column: Option, + forward: OverlayDB, + backing: Arc, + latest_era: Option, + inserts: Vec, + removes: Vec, + column: Option, } impl RefCountedDB { - /// Create a new instance given a `backing` database. - pub fn new(backing: Arc, column: Option) -> RefCountedDB { - let latest_era = backing.get(column, &LATEST_ERA_KEY) - .expect("Low-level database error.") - .map(|v| decode::(&v).expect("decoding db value failed")); + /// Create a new instance given a `backing` database. + pub fn new(backing: Arc, column: Option) -> RefCountedDB { + let latest_era = backing + .get(column, &LATEST_ERA_KEY) + .expect("Low-level database error.") + .map(|v| decode::(&v).expect("decoding db value failed")); - RefCountedDB { - forward: OverlayDB::new(backing.clone(), column), - backing, - inserts: vec![], - removes: vec![], - latest_era, - column, - } - } + RefCountedDB { + forward: OverlayDB::new(backing.clone(), column), + backing, + inserts: vec![], + removes: vec![], + latest_era, + column, + } + } } impl HashDB for RefCountedDB { - fn get(&self, key: &H256) -> Option { self.forward.get(key) } - fn contains(&self, key: &H256) -> bool { self.forward.contains(key) } - fn insert(&mut self, value: &[u8]) -> H256 { let r = self.forward.insert(value); self.inserts.push(r.clone()); r } - fn emplace(&mut self, key: H256, value: DBValue) { self.inserts.push(key.clone()); self.forward.emplace(key, value); } - fn remove(&mut self, key: &H256) { self.removes.push(key.clone()); } + fn get(&self, key: &H256) -> Option { + self.forward.get(key) + } + fn contains(&self, key: &H256) -> bool { + self.forward.contains(key) + } + fn insert(&mut self, value: &[u8]) -> H256 { + let r = self.forward.insert(value); + self.inserts.push(r.clone()); + r + } + fn emplace(&mut self, key: H256, value: DBValue) { + self.inserts.push(key.clone()); + self.forward.emplace(key, value); + } + fn remove(&mut self, key: &H256) { + self.removes.push(key.clone()); + } } impl ::traits::KeyedHashDB for RefCountedDB { - fn keys(&self) -> HashMap { self.forward.keys() } + fn keys(&self) -> HashMap { + self.forward.keys() + } } impl JournalDB for RefCountedDB { - fn boxed_clone(&self) -> Box { - Box::new(RefCountedDB { - forward: self.forward.clone(), - backing: self.backing.clone(), - latest_era: self.latest_era, - inserts: self.inserts.clone(), - removes: self.removes.clone(), - column: self.column.clone(), - }) - } + fn boxed_clone(&self) -> Box { + Box::new(RefCountedDB { + forward: self.forward.clone(), + backing: self.backing.clone(), + latest_era: self.latest_era, + inserts: self.inserts.clone(), + removes: self.removes.clone(), + column: self.column.clone(), + }) + } - fn mem_used(&self) -> usize { - self.inserts.heap_size_of_children() + self.removes.heap_size_of_children() - } + fn mem_used(&self) -> usize { + self.inserts.heap_size_of_children() + self.removes.heap_size_of_children() + } - fn is_empty(&self) -> bool { - self.latest_era.is_none() - } + fn is_empty(&self) -> bool { + self.latest_era.is_none() + } - fn backing(&self) -> &Arc { - &self.backing - } + fn backing(&self) -> &Arc { + &self.backing + } - fn latest_era(&self) -> Option { self.latest_era } + fn latest_era(&self) -> Option { + self.latest_era + } - fn state(&self, id: &H256) -> Option { - self.backing.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]).map(|b| b.into_vec()) - } + fn state(&self, id: &H256) -> Option { + self.backing + .get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]) + .map(|b| b.into_vec()) + } - fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result { - // record new commit's details. - let mut db_key = DatabaseKey { - era: now, - index: 0usize, - }; - let mut last; + fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result { + // record new commit's details. + let mut db_key = DatabaseKey { + era: now, + index: 0usize, + }; + let mut last; - while self.backing.get(self.column, { - last = encode(&db_key); - &last - })?.is_some() { - db_key.index += 1; - } + while self + .backing + .get(self.column, { + last = encode(&db_key); + &last + })? + .is_some() + { + db_key.index += 1; + } - { - let value_ref = DatabaseValueRef { - id, - inserts: &self.inserts, - deletes: &self.removes, - }; + { + let value_ref = DatabaseValueRef { + id, + inserts: &self.inserts, + deletes: &self.removes, + }; - batch.put(self.column, &last, &encode(&value_ref)); - } + batch.put(self.column, &last, &encode(&value_ref)); + } - let ops = self.inserts.len() + self.removes.len(); + let ops = self.inserts.len() + self.removes.len(); - trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, db_key.index, id, self.inserts, self.removes); + trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, db_key.index, id, self.inserts, self.removes); - self.inserts.clear(); - self.removes.clear(); + self.inserts.clear(); + self.removes.clear(); - if self.latest_era.map_or(true, |e| now > e) { - batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)); - self.latest_era = Some(now); - } + if self.latest_era.map_or(true, |e| now > e) { + batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)); + self.latest_era = Some(now); + } - Ok(ops as u32) - } + Ok(ops as u32) + } - fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> io::Result { - // apply old commits' details - let mut db_key = DatabaseKey { - era: end_era, - index: 0usize, - }; - let mut last; - while let Some(rlp_data) = { - self.backing.get(self.column, { - last = encode(&db_key); - &last - })? - } { - let view = DatabaseValueView::from_rlp(&rlp_data); - let our_id = view.id().expect("rlp read from db; qed"); - let to_remove = if canon_id == &our_id { - view.deletes() - } else { - view.inserts() - }.expect("rlp read from db; qed"); - trace!(target: "rcdb", "delete journal for time #{}.{}=>{}, (canon was {}): deleting {:?}", end_era, db_key.index, our_id, canon_id, to_remove); - for i in &to_remove { - self.forward.remove(i); - } - batch.delete(self.column, &last); - db_key.index += 1; - } + fn mark_canonical( + &mut self, + batch: &mut DBTransaction, + end_era: u64, + canon_id: &H256, + ) -> io::Result { + // apply old commits' details + let mut db_key = DatabaseKey { + era: end_era, + index: 0usize, + }; + let mut last; + while let Some(rlp_data) = { + self.backing.get(self.column, { + last = encode(&db_key); + &last + })? + } { + let view = DatabaseValueView::from_rlp(&rlp_data); + let our_id = view.id().expect("rlp read from db; qed"); + let to_remove = if canon_id == &our_id { + view.deletes() + } else { + view.inserts() + } + .expect("rlp read from db; qed"); + trace!(target: "rcdb", "delete journal for time #{}.{}=>{}, (canon was {}): deleting {:?}", end_era, db_key.index, our_id, canon_id, to_remove); + for i in &to_remove { + self.forward.remove(i); + } + batch.delete(self.column, &last); + db_key.index += 1; + } - let r = self.forward.commit_to_batch(batch)?; - Ok(r) - } + let r = self.forward.commit_to_batch(batch)?; + Ok(r) + } - fn inject(&mut self, batch: &mut DBTransaction) -> io::Result { - self.inserts.clear(); - for remove in self.removes.drain(..) { - self.forward.remove(&remove); - } - self.forward.commit_to_batch(batch) - } + fn inject(&mut self, batch: &mut DBTransaction) -> io::Result { + self.inserts.clear(); + for remove in self.removes.drain(..) { + self.forward.remove(&remove); + } + self.forward.commit_to_batch(batch) + } - fn consolidate(&mut self, mut with: MemoryDB) { - for (key, (value, rc)) in with.drain() { - for _ in 0..rc { - self.emplace(key, value.clone()); - } + fn consolidate(&mut self, mut with: MemoryDB) { + for (key, (value, rc)) in with.drain() { + for _ in 0..rc { + self.emplace(key, value.clone()); + } - for _ in rc..0 { - self.remove(&key); - } - } - } + for _ in rc..0 { + self.remove(&key); + } + } + } } #[cfg(test)] mod tests { - use keccak::keccak; - use hash_db::HashDB; - use super::*; - use {JournalDB, kvdb_memorydb}; + use super::*; + use hash_db::HashDB; + use keccak::keccak; + use kvdb_memorydb; + use JournalDB; - fn new_db() -> RefCountedDB { - let backing = Arc::new(kvdb_memorydb::create(0)); - RefCountedDB::new(backing, None) - } + fn new_db() -> RefCountedDB { + let backing = Arc::new(kvdb_memorydb::create(0)); + RefCountedDB::new(backing, None) + } - #[test] - fn long_history() { - // history is 3 - let mut jdb = new_db(); - let h = jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.contains(&h)); - jdb.remove(&h); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); - assert!(jdb.contains(&h)); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); - assert!(jdb.contains(&h)); - jdb.commit_batch(3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.contains(&h)); - jdb.commit_batch(4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); - assert!(!jdb.contains(&h)); - } + #[test] + fn long_history() { + // history is 3 + let mut jdb = new_db(); + let h = jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.contains(&h)); + jdb.remove(&h); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + assert!(jdb.contains(&h)); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + assert!(jdb.contains(&h)); + jdb.commit_batch(3, &keccak(b"3"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.contains(&h)); + jdb.commit_batch(4, &keccak(b"4"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(!jdb.contains(&h)); + } - #[test] - fn latest_era_should_work() { - // history is 3 - let mut jdb = new_db(); - assert_eq!(jdb.latest_era(), None); - let h = jdb.insert(b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert_eq!(jdb.latest_era(), Some(0)); - jdb.remove(&h); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); - assert_eq!(jdb.latest_era(), Some(1)); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); - assert_eq!(jdb.latest_era(), Some(2)); - jdb.commit_batch(3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); - assert_eq!(jdb.latest_era(), Some(3)); - jdb.commit_batch(4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); - assert_eq!(jdb.latest_era(), Some(4)); - } + #[test] + fn latest_era_should_work() { + // history is 3 + let mut jdb = new_db(); + assert_eq!(jdb.latest_era(), None); + let h = jdb.insert(b"foo"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert_eq!(jdb.latest_era(), Some(0)); + jdb.remove(&h); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + assert_eq!(jdb.latest_era(), Some(1)); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + assert_eq!(jdb.latest_era(), Some(2)); + jdb.commit_batch(3, &keccak(b"3"), Some((0, keccak(b"0")))) + .unwrap(); + assert_eq!(jdb.latest_era(), Some(3)); + jdb.commit_batch(4, &keccak(b"4"), Some((1, keccak(b"1")))) + .unwrap(); + assert_eq!(jdb.latest_era(), Some(4)); + } - #[test] - fn complex() { - // history is 1 - let mut jdb = new_db(); + #[test] + fn complex() { + // history is 1 + let mut jdb = new_db(); - let foo = jdb.insert(b"foo"); - let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); - jdb.remove(&foo); - jdb.remove(&bar); - let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); - assert!(jdb.contains(&baz)); + jdb.remove(&foo); + jdb.remove(&bar); + let baz = jdb.insert(b"baz"); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))) + .unwrap(); + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); + assert!(jdb.contains(&baz)); - let foo = jdb.insert(b"foo"); - jdb.remove(&baz); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); - assert!(jdb.contains(&foo)); - assert!(!jdb.contains(&bar)); - assert!(jdb.contains(&baz)); + let foo = jdb.insert(b"foo"); + jdb.remove(&baz); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))) + .unwrap(); + assert!(jdb.contains(&foo)); + assert!(!jdb.contains(&bar)); + assert!(jdb.contains(&baz)); - jdb.remove(&foo); - jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); - assert!(jdb.contains(&foo)); - assert!(!jdb.contains(&bar)); - assert!(!jdb.contains(&baz)); + jdb.remove(&foo); + jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))) + .unwrap(); + assert!(jdb.contains(&foo)); + assert!(!jdb.contains(&bar)); + assert!(!jdb.contains(&baz)); - jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); - assert!(!jdb.contains(&foo)); - assert!(!jdb.contains(&bar)); - assert!(!jdb.contains(&baz)); - } + jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))) + .unwrap(); + assert!(!jdb.contains(&foo)); + assert!(!jdb.contains(&bar)); + assert!(!jdb.contains(&baz)); + } - #[test] - fn fork() { - // history is 1 - let mut jdb = new_db(); + #[test] + fn fork() { + // history is 1 + let mut jdb = new_db(); - let foo = jdb.insert(b"foo"); - let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); - jdb.remove(&foo); - let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))) + .unwrap(); - jdb.remove(&bar); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); + jdb.remove(&bar); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))) + .unwrap(); - assert!(jdb.contains(&foo)); - assert!(jdb.contains(&bar)); - assert!(jdb.contains(&baz)); + assert!(jdb.contains(&foo)); + assert!(jdb.contains(&bar)); + assert!(jdb.contains(&baz)); - jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); - assert!(jdb.contains(&foo)); - assert!(!jdb.contains(&baz)); - assert!(!jdb.contains(&bar)); - } + jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))) + .unwrap(); + assert!(jdb.contains(&foo)); + assert!(!jdb.contains(&baz)); + assert!(!jdb.contains(&bar)); + } - #[test] - fn inject() { - let mut jdb = new_db(); - let key = jdb.insert(b"dog"); - jdb.inject_batch().unwrap(); + #[test] + fn inject() { + let mut jdb = new_db(); + let key = jdb.insert(b"dog"); + jdb.inject_batch().unwrap(); - assert_eq!(jdb.get(&key).unwrap(), DBValue::from_slice(b"dog")); - jdb.remove(&key); - jdb.inject_batch().unwrap(); + assert_eq!(jdb.get(&key).unwrap(), DBValue::from_slice(b"dog")); + jdb.remove(&key); + jdb.inject_batch().unwrap(); - assert!(jdb.get(&key).is_none()); - } + assert!(jdb.get(&key).is_none()); + } } diff --git a/util/journaldb/src/traits.rs b/util/journaldb/src/traits.rs index e6ce8acb0..da0b656c4 100644 --- a/util/journaldb/src/traits.rs +++ b/util/journaldb/src/traits.rs @@ -16,105 +16,109 @@ //! Disk-backed `HashDB` implementation. -use std::io; -use std::sync::Arc; +use std::{io, sync::Arc}; use bytes::Bytes; use ethereum_types::H256; -use hash_db::{HashDB, AsHashDB}; +use hash_db::{AsHashDB, HashDB}; use keccak_hasher::KeccakHasher; use kvdb::{self, DBTransaction, DBValue}; use std::collections::HashMap; - /// expose keys of a hashDB for debugging or tests (slow). pub trait KeyedHashDB: HashDB { - /// Primarily use for tests, highly inefficient. - fn keys(&self) -> HashMap; + /// Primarily use for tests, highly inefficient. + fn keys(&self) -> HashMap; } /// Upcast to `KeyedHashDB` pub trait AsKeyedHashDB: AsHashDB { - /// Perform upcast to KeyedHashDB. - fn as_keyed_hash_db(&self) -> &KeyedHashDB; + /// Perform upcast to KeyedHashDB. + fn as_keyed_hash_db(&self) -> &KeyedHashDB; } /// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually /// exclusive actions. pub trait JournalDB: KeyedHashDB { + /// Return a copy of ourself, in a box. + fn boxed_clone(&self) -> Box; - /// Return a copy of ourself, in a box. - fn boxed_clone(&self) -> Box; + /// Returns heap memory size used + fn mem_used(&self) -> usize; - /// Returns heap memory size used - fn mem_used(&self) -> usize; + /// Returns the size of journalled state in memory. + /// This function has a considerable speed requirement -- + /// it must be fast enough to call several times per block imported. + fn journal_size(&self) -> usize { + 0 + } - /// Returns the size of journalled state in memory. - /// This function has a considerable speed requirement -- - /// it must be fast enough to call several times per block imported. - fn journal_size(&self) -> usize { 0 } + /// Check if this database has any commits + fn is_empty(&self) -> bool; - /// Check if this database has any commits - fn is_empty(&self) -> bool; + /// Get the earliest era in the DB. None if there isn't yet any data in there. + fn earliest_era(&self) -> Option { + None + } - /// Get the earliest era in the DB. None if there isn't yet any data in there. - fn earliest_era(&self) -> Option { None } + /// Get the latest era in the DB. None if there isn't yet any data in there. + fn latest_era(&self) -> Option; - /// Get the latest era in the DB. None if there isn't yet any data in there. - fn latest_era(&self) -> Option; + /// Journal recent database operations as being associated with a given era and id. + // TODO: give the overlay to this function so journaldbs don't manage the overlays themeselves. + fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result; - /// Journal recent database operations as being associated with a given era and id. - // TODO: give the overlay to this function so journaldbs don't manage the overlays themeselves. - fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result; + /// Mark a given block as canonical, indicating that competing blocks' states may be pruned out. + fn mark_canonical(&mut self, batch: &mut DBTransaction, era: u64, id: &H256) + -> io::Result; - /// Mark a given block as canonical, indicating that competing blocks' states may be pruned out. - fn mark_canonical(&mut self, batch: &mut DBTransaction, era: u64, id: &H256) -> io::Result; + /// Commit all queued insert and delete operations without affecting any journalling -- this requires that all insertions + /// and deletions are indeed canonical and will likely lead to an invalid database if that assumption is violated. + /// + /// Any keys or values inserted or deleted must be completely independent of those affected + /// by any previous `commit` operations. Essentially, this means that `inject` can be used + /// either to restore a state to a fresh database, or to insert data which may only be journalled + /// from this point onwards. + fn inject(&mut self, batch: &mut DBTransaction) -> io::Result; - /// Commit all queued insert and delete operations without affecting any journalling -- this requires that all insertions - /// and deletions are indeed canonical and will likely lead to an invalid database if that assumption is violated. - /// - /// Any keys or values inserted or deleted must be completely independent of those affected - /// by any previous `commit` operations. Essentially, this means that `inject` can be used - /// either to restore a state to a fresh database, or to insert data which may only be journalled - /// from this point onwards. - fn inject(&mut self, batch: &mut DBTransaction) -> io::Result; + /// State data query + fn state(&self, _id: &H256) -> Option; - /// State data query - fn state(&self, _id: &H256) -> Option; + /// Whether this database is pruned. + fn is_pruned(&self) -> bool { + true + } - /// Whether this database is pruned. - fn is_pruned(&self) -> bool { true } + /// Get backing database. + fn backing(&self) -> &Arc; - /// Get backing database. - fn backing(&self) -> &Arc; + /// Clear internal strucutres. This should called after changes have been written + /// to the backing strage + fn flush(&self) {} - /// Clear internal strucutres. This should called after changes have been written - /// to the backing strage - fn flush(&self) {} + /// Consolidate all the insertions and deletions in the given memory overlay. + fn consolidate(&mut self, overlay: ::memory_db::MemoryDB); - /// Consolidate all the insertions and deletions in the given memory overlay. - fn consolidate(&mut self, overlay: ::memory_db::MemoryDB); + /// Commit all changes in a single batch + #[cfg(test)] + fn commit_batch(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> io::Result { + let mut batch = self.backing().transaction(); + let mut ops = self.journal_under(&mut batch, now, id)?; - /// Commit all changes in a single batch - #[cfg(test)] - fn commit_batch(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> io::Result { - let mut batch = self.backing().transaction(); - let mut ops = self.journal_under(&mut batch, now, id)?; + if let Some((end_era, canon_id)) = end { + ops += self.mark_canonical(&mut batch, end_era, &canon_id)?; + } - if let Some((end_era, canon_id)) = end { - ops += self.mark_canonical(&mut batch, end_era, &canon_id)?; - } + let result = self.backing().write(batch).map(|_| ops).map_err(Into::into); + self.flush(); + result + } - let result = self.backing().write(batch).map(|_| ops).map_err(Into::into); - self.flush(); - result - } - - /// Inject all changes in a single batch. - #[cfg(test)] - fn inject_batch(&mut self) -> io::Result { - let mut batch = self.backing().transaction(); - let res = self.inject(&mut batch)?; - self.backing().write(batch).map(|_| res).map_err(Into::into) - } + /// Inject all changes in a single batch. + #[cfg(test)] + fn inject_batch(&mut self) -> io::Result { + let mut batch = self.backing().transaction(); + let res = self.inject(&mut batch)?; + self.backing().write(batch).map(|_| res).map_err(Into::into) + } } diff --git a/util/journaldb/src/util.rs b/util/journaldb/src/util.rs index 11d329595..a93ca5dff 100644 --- a/util/journaldb/src/util.rs +++ b/util/journaldb/src/util.rs @@ -15,62 +15,62 @@ // along with Parity Ethereum. If not, see . use ethereum_types::H256; -use rlp::{RlpStream, Encodable, Rlp, DecoderError}; +use rlp::{DecoderError, Encodable, Rlp, RlpStream}; -const PADDING : [u8; 10] = [ 0u8; 10 ]; +const PADDING: [u8; 10] = [0u8; 10]; pub struct DatabaseKey { - pub era: u64, - pub index: usize, + pub era: u64, + pub index: usize, } impl Encodable for DatabaseKey { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(3); - s.append(&self.era); - s.append(&self.index); - s.append(&&PADDING[..]); - } + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(3); + s.append(&self.era); + s.append(&self.index); + s.append(&&PADDING[..]); + } } pub struct DatabaseValueView<'a> { - rlp: Rlp<'a>, + rlp: Rlp<'a>, } impl<'a> DatabaseValueView<'a> { - pub fn from_rlp(data: &'a [u8]) -> Self { - DatabaseValueView { - rlp: Rlp::new(data), - } - } + pub fn from_rlp(data: &'a [u8]) -> Self { + DatabaseValueView { + rlp: Rlp::new(data), + } + } - #[inline] - pub fn id(&self) -> Result { - self.rlp.val_at(0) - } + #[inline] + pub fn id(&self) -> Result { + self.rlp.val_at(0) + } - #[inline] - pub fn inserts(&self) -> Result, DecoderError> { - self.rlp.list_at(1) - } + #[inline] + pub fn inserts(&self) -> Result, DecoderError> { + self.rlp.list_at(1) + } - #[inline] - pub fn deletes(&self) -> Result, DecoderError> { - self.rlp.list_at(2) - } + #[inline] + pub fn deletes(&self) -> Result, DecoderError> { + self.rlp.list_at(2) + } } pub struct DatabaseValueRef<'a> { - pub id: &'a H256, - pub inserts: &'a [H256], - pub deletes: &'a [H256], + pub id: &'a H256, + pub inserts: &'a [H256], + pub deletes: &'a [H256], } impl<'a> Encodable for DatabaseValueRef<'a> { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(3); - s.append(self.id); - s.append_list(self.inserts); - s.append_list(self.deletes); - } + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(3); + s.append(self.id); + s.append_list(self.inserts); + s.append_list(self.deletes); + } } diff --git a/util/keccak-hasher/src/lib.rs b/util/keccak-hasher/src/lib.rs index d9c9be545..a205729bb 100644 --- a/util/keccak-hasher/src/lib.rs +++ b/util/keccak-hasher/src/lib.rs @@ -15,25 +15,25 @@ // along with Parity Ethereum. If not, see . //! Hasher implementation for the Keccak-256 hash -extern crate hash_db; extern crate ethereum_types; -extern crate tiny_keccak; +extern crate hash_db; extern crate plain_hasher; +extern crate tiny_keccak; -use hash_db::Hasher; use ethereum_types::H256; -use tiny_keccak::Keccak; +use hash_db::Hasher; use plain_hasher::PlainHasher; +use tiny_keccak::Keccak; /// Concrete `Hasher` impl for the Keccak-256 hash #[derive(Default, Debug, Clone, PartialEq)] pub struct KeccakHasher; impl Hasher for KeccakHasher { - type Out = H256; - type StdHasher = PlainHasher; - const LENGTH: usize = 32; - fn hash(x: &[u8]) -> Self::Out { - let mut out = [0;32]; - Keccak::keccak256(x, &mut out); - out.into() - } + type Out = H256; + type StdHasher = PlainHasher; + const LENGTH: usize = 32; + fn hash(x: &[u8]) -> Self::Out { + let mut out = [0; 32]; + Keccak::keccak256(x, &mut out); + out.into() + } } diff --git a/util/len-caching-lock/src/lib.rs b/util/len-caching-lock/src/lib.rs index f2c04e454..c3e4d436e 100644 --- a/util/len-caching-lock/src/lib.rs +++ b/util/len-caching-lock/src/lib.rs @@ -14,12 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -//! This crate allows automatic caching of `T.len()` with an api that +//! This crate allows automatic caching of `T.len()` with an api that //! allows drop in replacement for `parking_lot` //! [`Mutex`](../lock_api/struct.Mutex.html) //! and [`RwLock`](../lock_api/struct.RwLock.html) for most common use-cases. //! -//! This crate implements `Len` for the following types: +//! This crate implements `Len` for the following types: //! `std::collections::{VecDeque, LinkedList, HashMap, BTreeMap, HashSet, BTreeSet, BinaryHeap}` //! //! ## Example @@ -38,8 +38,10 @@ //! ``` extern crate parking_lot; -use std::collections::{VecDeque, LinkedList, HashMap, BTreeMap, HashSet, BTreeSet, BinaryHeap}; -use std::hash::Hash; +use std::{ + collections::{BTreeMap, BTreeSet, BinaryHeap, HashMap, HashSet, LinkedList, VecDeque}, + hash::Hash, +}; pub mod mutex; pub mod rwlock; @@ -51,37 +53,53 @@ pub use rwlock::LenCachingRwLock; /// with [`LenCachingMutex`](mutex/struct.LenCachingMutex.html) /// or [`LenCachingRwLock`](rwlock/struct.LenCachingRwLock.html) pub trait Len { - fn len(&self) -> usize; + fn len(&self) -> usize; } impl Len for Vec { - fn len(&self) -> usize { Vec::len(self) } + fn len(&self) -> usize { + Vec::len(self) + } } impl Len for VecDeque { - fn len(&self) -> usize { VecDeque::len(self) } + fn len(&self) -> usize { + VecDeque::len(self) + } } impl Len for LinkedList { - fn len(&self) -> usize { LinkedList::len(self) } + fn len(&self) -> usize { + LinkedList::len(self) + } } impl Len for HashMap { - fn len(&self) -> usize { HashMap::len(self) } + fn len(&self) -> usize { + HashMap::len(self) + } } impl Len for BTreeMap { - fn len(&self) -> usize { BTreeMap::len(self) } + fn len(&self) -> usize { + BTreeMap::len(self) + } } impl Len for HashSet { - fn len(&self) -> usize { HashSet::len(self) } + fn len(&self) -> usize { + HashSet::len(self) + } } impl Len for BTreeSet { - fn len(&self) -> usize { BTreeSet::len(self) } + fn len(&self) -> usize { + BTreeSet::len(self) + } } impl Len for BinaryHeap { - fn len(&self) -> usize { BinaryHeap::len(self) } + fn len(&self) -> usize { + BinaryHeap::len(self) + } } diff --git a/util/len-caching-lock/src/mutex.rs b/util/len-caching-lock/src/mutex.rs index c9a2ea02a..a54ab4e9f 100644 --- a/util/len-caching-lock/src/mutex.rs +++ b/util/len-caching-lock/src/mutex.rs @@ -14,137 +14,138 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::ops::{Deref, DerefMut}; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; +use std::{ + ops::{Deref, DerefMut}, + sync::atomic::{AtomicUsize, Ordering}, +}; use parking_lot::{Mutex, MutexGuard}; use Len; -/// Can be used in place of a [`Mutex`](../../lock_api/struct.Mutex.html) where reading `T`'s `len()` without -/// needing to lock, is advantageous. +/// Can be used in place of a [`Mutex`](../../lock_api/struct.Mutex.html) where reading `T`'s `len()` without +/// needing to lock, is advantageous. /// When the Guard is released, `T`'s `len()` will be cached. /// The cached `len()` may be at most 1 lock behind current state. #[derive(Debug)] pub struct LenCachingMutex { - len: AtomicUsize, - data: Mutex, + len: AtomicUsize, + data: Mutex, } impl Default for LenCachingMutex { - fn default() -> Self { - LenCachingMutex::new(T::default()) - } + fn default() -> Self { + LenCachingMutex::new(T::default()) + } } impl From for LenCachingMutex { - fn from(data: T) -> Self { - LenCachingMutex::new(data) - } + fn from(data: T) -> Self { + LenCachingMutex::new(data) + } } impl LenCachingMutex { - /// Constructs a new LenCachingMutex - pub fn new(data: T) -> Self { - LenCachingMutex { - len: AtomicUsize::new(data.len()), - data: Mutex::new(data), - } - } + /// Constructs a new LenCachingMutex + pub fn new(data: T) -> Self { + LenCachingMutex { + len: AtomicUsize::new(data.len()), + data: Mutex::new(data), + } + } } impl LenCachingMutex { - /// Load the cached value that was returned from your `T`'s `len()` - /// subsequent to the most recent lock being released. - pub fn load_len(&self) -> usize { - self.len.load(Ordering::SeqCst) - } + /// Load the cached value that was returned from your `T`'s `len()` + /// subsequent to the most recent lock being released. + pub fn load_len(&self) -> usize { + self.len.load(Ordering::SeqCst) + } - /// Delegates to `parking_lot::Mutex` - /// [`lock()`](../../lock_api/struct.Mutex.html#method.lock). - pub fn lock(&self) -> CachingMutexGuard { - CachingMutexGuard { - mutex_guard: self.data.lock(), - len: &self.len, - } - } + /// Delegates to `parking_lot::Mutex` + /// [`lock()`](../../lock_api/struct.Mutex.html#method.lock). + pub fn lock(&self) -> CachingMutexGuard { + CachingMutexGuard { + mutex_guard: self.data.lock(), + len: &self.len, + } + } - /// Delegates to `parking_lot::Mutex` - /// [`try_lock()`](../../lock_api/struct.Mutex.html#method.try_lock). - pub fn try_lock(&self) -> Option> { - Some(CachingMutexGuard { - mutex_guard: self.data.try_lock()?, - len: &self.len, - }) - } + /// Delegates to `parking_lot::Mutex` + /// [`try_lock()`](../../lock_api/struct.Mutex.html#method.try_lock). + pub fn try_lock(&self) -> Option> { + Some(CachingMutexGuard { + mutex_guard: self.data.try_lock()?, + len: &self.len, + }) + } } /// Guard comprising `MutexGuard` and `AtomicUsize` for cache pub struct CachingMutexGuard<'a, T: Len + 'a + ?Sized> { - mutex_guard: MutexGuard<'a, T>, - len: &'a AtomicUsize, + mutex_guard: MutexGuard<'a, T>, + len: &'a AtomicUsize, } impl<'a, T: Len + ?Sized> CachingMutexGuard<'a, T> { - /// Returns a mutable reference to the contained - /// [`MutexGuard`](../../parking_lot/mutex/type.MutexGuard.html) - pub fn inner_mut(&mut self) -> &mut MutexGuard<'a, T> { - &mut self.mutex_guard - } + /// Returns a mutable reference to the contained + /// [`MutexGuard`](../../parking_lot/mutex/type.MutexGuard.html) + pub fn inner_mut(&mut self) -> &mut MutexGuard<'a, T> { + &mut self.mutex_guard + } - /// Returns a non-mutable reference to the contained - /// [`MutexGuard`](../../parking_lot/mutex/type.MutexGuard.html) - pub fn inner(&self) -> &MutexGuard<'a, T> { - &self.mutex_guard - } + /// Returns a non-mutable reference to the contained + /// [`MutexGuard`](../../parking_lot/mutex/type.MutexGuard.html) + pub fn inner(&self) -> &MutexGuard<'a, T> { + &self.mutex_guard + } } impl<'a, T: Len + ?Sized> Drop for CachingMutexGuard<'a, T> { - fn drop(&mut self) { - self.len.store(self.mutex_guard.len(), Ordering::SeqCst); - } + fn drop(&mut self) { + self.len.store(self.mutex_guard.len(), Ordering::SeqCst); + } } impl<'a, T: Len + ?Sized> Deref for CachingMutexGuard<'a, T> { - type Target = T; - fn deref(&self) -> &T { - self.mutex_guard.deref() - } + type Target = T; + fn deref(&self) -> &T { + self.mutex_guard.deref() + } } impl<'a, T: Len + ?Sized> DerefMut for CachingMutexGuard<'a, T> { - fn deref_mut(&mut self) -> &mut T { - self.mutex_guard.deref_mut() - } + fn deref_mut(&mut self) -> &mut T { + self.mutex_guard.deref_mut() + } } #[cfg(test)] mod tests { - use super::*; - use std::collections::VecDeque; + use super::*; + use std::collections::VecDeque; - #[test] - fn caches_len() { - let v = vec![1,2,3]; - let lcm = LenCachingMutex::new(v); - assert_eq!(lcm.load_len(), 3); - lcm.lock().push(4); - assert_eq!(lcm.load_len(), 4); - } + #[test] + fn caches_len() { + let v = vec![1, 2, 3]; + let lcm = LenCachingMutex::new(v); + assert_eq!(lcm.load_len(), 3); + lcm.lock().push(4); + assert_eq!(lcm.load_len(), 4); + } - #[test] - fn works_with_vec() { - let v: Vec = Vec::new(); - let lcm = LenCachingMutex::new(v); - assert!(lcm.lock().is_empty()); - } + #[test] + fn works_with_vec() { + let v: Vec = Vec::new(); + let lcm = LenCachingMutex::new(v); + assert!(lcm.lock().is_empty()); + } - #[test] - fn works_with_vecdeque() { - let v: VecDeque = VecDeque::new(); - let lcm = LenCachingMutex::new(v); - lcm.lock().push_front(4); - assert_eq!(lcm.load_len(), 1); - } + #[test] + fn works_with_vecdeque() { + let v: VecDeque = VecDeque::new(); + let lcm = LenCachingMutex::new(v); + lcm.lock().push_front(4); + assert_eq!(lcm.load_len(), 1); + } } diff --git a/util/len-caching-lock/src/rwlock.rs b/util/len-caching-lock/src/rwlock.rs index 3593cbcf7..419127430 100644 --- a/util/len-caching-lock/src/rwlock.rs +++ b/util/len-caching-lock/src/rwlock.rs @@ -14,155 +14,156 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::ops::{Deref, DerefMut}; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; +use std::{ + ops::{Deref, DerefMut}, + sync::atomic::{AtomicUsize, Ordering}, +}; use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; use Len; -/// Can be used in place of a [`RwLock`](../../lock_api/struct.RwLock.html) where -/// reading `T`'s `len()` without needing to lock, is advantageous. +/// Can be used in place of a [`RwLock`](../../lock_api/struct.RwLock.html) where +/// reading `T`'s `len()` without needing to lock, is advantageous. /// When the WriteGuard is released, `T`'s `len()` will be cached. #[derive(Debug)] pub struct LenCachingRwLock { - len: AtomicUsize, - data: RwLock, + len: AtomicUsize, + data: RwLock, } impl Default for LenCachingRwLock { - fn default() -> Self { - LenCachingRwLock::new(T::default()) - } + fn default() -> Self { + LenCachingRwLock::new(T::default()) + } } impl From for LenCachingRwLock { - fn from(data: T) -> Self { - LenCachingRwLock::new(data) - } + fn from(data: T) -> Self { + LenCachingRwLock::new(data) + } } impl LenCachingRwLock { - /// Constructs a new LenCachingRwLock - pub fn new(data: T) -> Self { - LenCachingRwLock { - len: AtomicUsize::new(data.len()), - data: RwLock::new(data), - } - } + /// Constructs a new LenCachingRwLock + pub fn new(data: T) -> Self { + LenCachingRwLock { + len: AtomicUsize::new(data.len()), + data: RwLock::new(data), + } + } } impl LenCachingRwLock { - /// Load the cached value that was returned from your `T`'s `len()` - /// subsequent to the most recent lock being released. - pub fn load_len(&self) -> usize { - self.len.load(Ordering::SeqCst) - } + /// Load the cached value that was returned from your `T`'s `len()` + /// subsequent to the most recent lock being released. + pub fn load_len(&self) -> usize { + self.len.load(Ordering::SeqCst) + } - /// Delegates to `parking_lot::RwLock` - /// [`write()`](../../lock_api/struct.RwLock.html#method.write). - pub fn write(&self) -> CachingRwLockWriteGuard { - CachingRwLockWriteGuard { - write_guard: self.data.write(), - len: &self.len, - } - } + /// Delegates to `parking_lot::RwLock` + /// [`write()`](../../lock_api/struct.RwLock.html#method.write). + pub fn write(&self) -> CachingRwLockWriteGuard { + CachingRwLockWriteGuard { + write_guard: self.data.write(), + len: &self.len, + } + } - /// Delegates to `parking_lot::RwLock` - /// [`try_write()`](../../lock_api/struct.RwLock.html#method.try_write). - pub fn try_write(&self) -> Option> { - Some(CachingRwLockWriteGuard { - write_guard: self.data.try_write()?, - len: &self.len, - }) - } + /// Delegates to `parking_lot::RwLock` + /// [`try_write()`](../../lock_api/struct.RwLock.html#method.try_write). + pub fn try_write(&self) -> Option> { + Some(CachingRwLockWriteGuard { + write_guard: self.data.try_write()?, + len: &self.len, + }) + } - /// Delegates to `parking_lot::RwLock` - /// [`read()`](../../lock_api/struct.RwLock.html#method.read). - pub fn read(&self) -> RwLockReadGuard { - self.data.read() - } + /// Delegates to `parking_lot::RwLock` + /// [`read()`](../../lock_api/struct.RwLock.html#method.read). + pub fn read(&self) -> RwLockReadGuard { + self.data.read() + } - /// Delegates to `parking_lot::RwLock` - /// [`try_read()`](../../lock_api/struct.RwLock.html#method.try_read). - pub fn try_read(&self) -> Option> { - self.data.try_read() - } + /// Delegates to `parking_lot::RwLock` + /// [`try_read()`](../../lock_api/struct.RwLock.html#method.try_read). + pub fn try_read(&self) -> Option> { + self.data.try_read() + } } /// Guard that caches `T`'s `len()` in an `AtomicUsize` when dropped pub struct CachingRwLockWriteGuard<'a, T: Len + 'a + ?Sized> { - write_guard: RwLockWriteGuard<'a, T>, - len: &'a AtomicUsize, + write_guard: RwLockWriteGuard<'a, T>, + len: &'a AtomicUsize, } impl<'a, T: Len + ?Sized> CachingRwLockWriteGuard<'a, T> { - /// Returns a mutable reference to the contained - /// [`RwLockWriteGuard`](../../parking_lot/rwlock/type.RwLockWriteGuard.html) - pub fn inner_mut(&mut self) -> &mut RwLockWriteGuard<'a, T> { - &mut self.write_guard - } + /// Returns a mutable reference to the contained + /// [`RwLockWriteGuard`](../../parking_lot/rwlock/type.RwLockWriteGuard.html) + pub fn inner_mut(&mut self) -> &mut RwLockWriteGuard<'a, T> { + &mut self.write_guard + } - /// Returns a non-mutable reference to the contained - /// [`RwLockWriteGuard`](../../parking_lot/rwlock/type.RwLockWriteGuard.html) - pub fn inner(&self) -> &RwLockWriteGuard<'a, T> { - &self.write_guard - } + /// Returns a non-mutable reference to the contained + /// [`RwLockWriteGuard`](../../parking_lot/rwlock/type.RwLockWriteGuard.html) + pub fn inner(&self) -> &RwLockWriteGuard<'a, T> { + &self.write_guard + } } impl<'a, T: Len + ?Sized> Drop for CachingRwLockWriteGuard<'a, T> { - fn drop(&mut self) { - self.len.store(self.write_guard.len(), Ordering::SeqCst); - } + fn drop(&mut self) { + self.len.store(self.write_guard.len(), Ordering::SeqCst); + } } impl<'a, T: Len + ?Sized> Deref for CachingRwLockWriteGuard<'a, T> { - type Target = T; - fn deref(&self) -> &T { - self.write_guard.deref() - } + type Target = T; + fn deref(&self) -> &T { + self.write_guard.deref() + } } impl<'a, T: Len + ?Sized> DerefMut for CachingRwLockWriteGuard<'a, T> { - fn deref_mut(&mut self) -> &mut T { - self.write_guard.deref_mut() - } + fn deref_mut(&mut self) -> &mut T { + self.write_guard.deref_mut() + } } #[cfg(test)] mod tests { - use super::*; - use std::collections::VecDeque; + use super::*; + use std::collections::VecDeque; - #[test] - fn caches_len() { - let v = vec![1,2,3]; - let lcl = LenCachingRwLock::new(v); - assert_eq!(lcl.load_len(), 3); - lcl.write().push(4); - assert_eq!(lcl.load_len(), 4); - } + #[test] + fn caches_len() { + let v = vec![1, 2, 3]; + let lcl = LenCachingRwLock::new(v); + assert_eq!(lcl.load_len(), 3); + lcl.write().push(4); + assert_eq!(lcl.load_len(), 4); + } - #[test] - fn works_with_vec() { - let v: Vec = Vec::new(); - let lcl = LenCachingRwLock::new(v); - assert!(lcl.write().is_empty()); - } + #[test] + fn works_with_vec() { + let v: Vec = Vec::new(); + let lcl = LenCachingRwLock::new(v); + assert!(lcl.write().is_empty()); + } - #[test] - fn works_with_vecdeque() { - let v: VecDeque = VecDeque::new(); - let lcl = LenCachingRwLock::new(v); - lcl.write().push_front(4); - assert_eq!(lcl.load_len(), 1); - } + #[test] + fn works_with_vecdeque() { + let v: VecDeque = VecDeque::new(); + let lcl = LenCachingRwLock::new(v); + lcl.write().push_front(4); + assert_eq!(lcl.load_len(), 1); + } - #[test] - fn read_works() { - let v = vec![1,2,3]; - let lcl = LenCachingRwLock::new(v); - assert_eq!(lcl.read().len(), 3); - } + #[test] + fn read_works() { + let v = vec![1, 2, 3]; + let lcl = LenCachingRwLock::new(v); + assert_eq!(lcl.read().len(), 3); + } } diff --git a/util/macros/src/lib.rs b/util/macros/src/lib.rs index 999c5122e..28ccab98c 100644 --- a/util/macros/src/lib.rs +++ b/util/macros/src/lib.rs @@ -94,11 +94,11 @@ macro_rules! flushln { #[doc(hidden)] pub fn flush(s: String) { - let _ = io::Write::write(&mut io::stdout(), s.as_bytes()); - let _ = io::Write::flush(&mut io::stdout()); + let _ = io::Write::write(&mut io::stdout(), s.as_bytes()); + let _ = io::Write::flush(&mut io::stdout()); } #[test] fn test_flush() { - flushln!("hello_world {:?}", 1); + flushln!("hello_world {:?}", 1); } diff --git a/util/memory-cache/src/lib.rs b/util/memory-cache/src/lib.rs index 64d10336f..028a65422 100644 --- a/util/memory-cache/src/lib.rs +++ b/util/memory-cache/src/lib.rs @@ -30,89 +30,89 @@ const INITIAL_CAPACITY: usize = 4; /// An LRU-cache which operates on memory used. pub struct MemoryLruCache { - inner: LruCache, - cur_size: usize, - max_size: usize, + inner: LruCache, + cur_size: usize, + max_size: usize, } // amount of memory used when the item will be put on the heap. fn heap_size_of(val: &T) -> usize { - ::std::mem::size_of::() + val.heap_size_of_children() + ::std::mem::size_of::() + val.heap_size_of_children() } impl MemoryLruCache { - /// Create a new cache with a maximum size in bytes. - pub fn new(max_size: usize) -> Self { - MemoryLruCache { - inner: LruCache::new(INITIAL_CAPACITY), - max_size: max_size, - cur_size: 0, - } - } + /// Create a new cache with a maximum size in bytes. + pub fn new(max_size: usize) -> Self { + MemoryLruCache { + inner: LruCache::new(INITIAL_CAPACITY), + max_size: max_size, + cur_size: 0, + } + } - /// Insert an item. - pub fn insert(&mut self, key: K, val: V) { - let cap = self.inner.capacity(); + /// Insert an item. + pub fn insert(&mut self, key: K, val: V) { + let cap = self.inner.capacity(); - // grow the cache as necessary; it operates on amount of items - // but we're working based on memory usage. - if self.inner.len() == cap && self.cur_size < self.max_size { - self.inner.set_capacity(cap * 2); - } + // grow the cache as necessary; it operates on amount of items + // but we're working based on memory usage. + if self.inner.len() == cap && self.cur_size < self.max_size { + self.inner.set_capacity(cap * 2); + } - self.cur_size += heap_size_of(&val); + self.cur_size += heap_size_of(&val); - // account for any element displaced from the cache. - if let Some(lru) = self.inner.insert(key, val) { - self.cur_size -= heap_size_of(&lru); - } + // account for any element displaced from the cache. + if let Some(lru) = self.inner.insert(key, val) { + self.cur_size -= heap_size_of(&lru); + } - // remove elements until we are below the memory target. - while self.cur_size > self.max_size { - match self.inner.remove_lru() { - Some((_, v)) => self.cur_size -= heap_size_of(&v), - _ => break, - } - } - } + // remove elements until we are below the memory target. + while self.cur_size > self.max_size { + match self.inner.remove_lru() { + Some((_, v)) => self.cur_size -= heap_size_of(&v), + _ => break, + } + } + } - /// Get a reference to an item in the cache. It is a logic error for its - /// heap size to be altered while borrowed. - pub fn get_mut(&mut self, key: &K) -> Option<&mut V> { - self.inner.get_mut(key) - } + /// Get a reference to an item in the cache. It is a logic error for its + /// heap size to be altered while borrowed. + pub fn get_mut(&mut self, key: &K) -> Option<&mut V> { + self.inner.get_mut(key) + } - /// Currently-used size of values in bytes. - pub fn current_size(&self) -> usize { - self.cur_size - } + /// Currently-used size of values in bytes. + pub fn current_size(&self) -> usize { + self.cur_size + } - /// Get backing LRU cache instance (read only) - pub fn backstore(&self) -> &LruCache { - &self.inner - } + /// Get backing LRU cache instance (read only) + pub fn backstore(&self) -> &LruCache { + &self.inner + } } #[cfg(test)] mod tests { - use super::*; + use super::*; - #[test] - fn it_works() { - let mut cache = MemoryLruCache::new(256); - let val1 = vec![0u8; 100]; - let size1 = heap_size_of(&val1); - cache.insert("hello", val1); + #[test] + fn it_works() { + let mut cache = MemoryLruCache::new(256); + let val1 = vec![0u8; 100]; + let size1 = heap_size_of(&val1); + cache.insert("hello", val1); - assert_eq!(cache.current_size(), size1); + assert_eq!(cache.current_size(), size1); - let val2 = vec![0u8; 210]; - let size2 = heap_size_of(&val2); - cache.insert("world", val2); + let val2 = vec![0u8; 210]; + let size2 = heap_size_of(&val2); + cache.insert("world", val2); - assert!(cache.get_mut(&"hello").is_none()); - assert!(cache.get_mut(&"world").is_some()); + assert!(cache.get_mut(&"hello").is_none()); + assert!(cache.get_mut(&"world").is_some()); - assert_eq!(cache.current_size(), size2); - } + assert_eq!(cache.current_size(), size2); + } } diff --git a/util/memzero/src/lib.rs b/util/memzero/src/lib.rs index 827407a91..ebbbc8b92 100644 --- a/util/memzero/src/lib.rs +++ b/util/memzero/src/lib.rs @@ -14,41 +14,43 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::ops::{Deref, DerefMut}; -use std::ptr; +use std::{ + ops::{Deref, DerefMut}, + ptr, +}; /// Wrapper to zero out memory when dropped. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Memzero> { - mem: T, + mem: T, } impl> From for Memzero { - fn from(mem: T) -> Memzero { - Memzero { mem } - } + fn from(mem: T) -> Memzero { + Memzero { mem } + } } impl> Drop for Memzero { - fn drop(&mut self) { - unsafe { - for byte_ref in self.mem.as_mut() { - ptr::write_volatile(byte_ref, 0) - } - } - } + fn drop(&mut self) { + unsafe { + for byte_ref in self.mem.as_mut() { + ptr::write_volatile(byte_ref, 0) + } + } + } } impl> Deref for Memzero { - type Target = T; + type Target = T; - fn deref(&self) -> &Self::Target { - &self.mem - } + fn deref(&self) -> &Self::Target { + &self.mem + } } impl> DerefMut for Memzero { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.mem - } + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.mem + } } diff --git a/util/migration-rocksdb/src/lib.rs b/util/migration-rocksdb/src/lib.rs index d63f2843c..38af27a62 100644 --- a/util/migration-rocksdb/src/lib.rs +++ b/util/migration-rocksdb/src/lib.rs @@ -24,326 +24,379 @@ extern crate macros; extern crate kvdb; extern crate kvdb_rocksdb; -use std::collections::BTreeMap; -use std::path::{Path, PathBuf}; -use std::sync::Arc; -use std::{fs, io, error}; +use std::{ + collections::BTreeMap, + error, fs, io, + path::{Path, PathBuf}, + sync::Arc, +}; use kvdb::DBTransaction; use kvdb_rocksdb::{CompactionProfile, Database, DatabaseConfig}; -fn other_io_err(e: E) -> io::Error where E: Into> { - io::Error::new(io::ErrorKind::Other, e) +fn other_io_err(e: E) -> io::Error +where + E: Into>, +{ + io::Error::new(io::ErrorKind::Other, e) } /// Migration config. #[derive(Clone)] pub struct Config { - /// Defines how many elements should be migrated at once. - pub batch_size: usize, - /// Database compaction profile. - pub compaction_profile: CompactionProfile, + /// Defines how many elements should be migrated at once. + pub batch_size: usize, + /// Database compaction profile. + pub compaction_profile: CompactionProfile, } impl Default for Config { - fn default() -> Self { - Config { - batch_size: 1024, - compaction_profile: Default::default(), - } - } + fn default() -> Self { + Config { + batch_size: 1024, + compaction_profile: Default::default(), + } + } } /// A batch of key-value pairs to be written into the database. pub struct Batch { - inner: BTreeMap, Vec>, - batch_size: usize, - column: Option, + inner: BTreeMap, Vec>, + batch_size: usize, + column: Option, } impl Batch { - /// Make a new batch with the given config. - pub fn new(config: &Config, col: Option) -> Self { - Batch { - inner: BTreeMap::new(), - batch_size: config.batch_size, - column: col, - } - } + /// Make a new batch with the given config. + pub fn new(config: &Config, col: Option) -> Self { + Batch { + inner: BTreeMap::new(), + batch_size: config.batch_size, + column: col, + } + } - /// Insert a value into the batch, committing if necessary. - pub fn insert(&mut self, key: Vec, value: Vec, dest: &mut Database) -> io::Result<()> { - self.inner.insert(key, value); - if self.inner.len() == self.batch_size { - self.commit(dest)?; - } - Ok(()) - } + /// Insert a value into the batch, committing if necessary. + pub fn insert(&mut self, key: Vec, value: Vec, dest: &mut Database) -> io::Result<()> { + self.inner.insert(key, value); + if self.inner.len() == self.batch_size { + self.commit(dest)?; + } + Ok(()) + } - /// Commit all the items in the batch to the given database. - pub fn commit(&mut self, dest: &mut Database) -> io::Result<()> { - if self.inner.is_empty() { return Ok(()) } + /// Commit all the items in the batch to the given database. + pub fn commit(&mut self, dest: &mut Database) -> io::Result<()> { + if self.inner.is_empty() { + return Ok(()); + } - let mut transaction = DBTransaction::new(); + let mut transaction = DBTransaction::new(); - for keypair in &self.inner { - transaction.put(self.column, &keypair.0, &keypair.1); - } + for keypair in &self.inner { + transaction.put(self.column, &keypair.0, &keypair.1); + } - self.inner.clear(); - dest.write(transaction) - } + self.inner.clear(); + dest.write(transaction) + } } /// A generalized migration from the given db to a destination db. pub trait Migration: 'static { - /// Number of columns in the database before the migration. - fn pre_columns(&self) -> Option { self.columns() } - /// Number of columns in database after the migration. - fn columns(&self) -> Option; - /// Whether this migration alters any existing columns. - /// if not, then column families will simply be added and `migrate` will never be called. - fn alters_existing(&self) -> bool { true } - /// Version of the database after the migration. - fn version(&self) -> u32; - /// Migrate a source to a destination. - fn migrate(&mut self, source: Arc, config: &Config, destination: &mut Database, col: Option) -> io::Result<()>; + /// Number of columns in the database before the migration. + fn pre_columns(&self) -> Option { + self.columns() + } + /// Number of columns in database after the migration. + fn columns(&self) -> Option; + /// Whether this migration alters any existing columns. + /// if not, then column families will simply be added and `migrate` will never be called. + fn alters_existing(&self) -> bool { + true + } + /// Version of the database after the migration. + fn version(&self) -> u32; + /// Migrate a source to a destination. + fn migrate( + &mut self, + source: Arc, + config: &Config, + destination: &mut Database, + col: Option, + ) -> io::Result<()>; } /// A simple migration over key-value pairs of a single column. pub trait SimpleMigration: 'static { - /// Number of columns in database after the migration. - fn columns(&self) -> Option; - /// Version of database after the migration. - fn version(&self) -> u32; - /// Index of column which should be migrated. - fn migrated_column_index(&self) -> Option; - /// Should migrate existing object to new database. - /// Returns `None` if the object does not exist in new version of database. - fn simple_migrate(&mut self, key: Vec, value: Vec) -> Option<(Vec, Vec)>; + /// Number of columns in database after the migration. + fn columns(&self) -> Option; + /// Version of database after the migration. + fn version(&self) -> u32; + /// Index of column which should be migrated. + fn migrated_column_index(&self) -> Option; + /// Should migrate existing object to new database. + /// Returns `None` if the object does not exist in new version of database. + fn simple_migrate(&mut self, key: Vec, value: Vec) -> Option<(Vec, Vec)>; } impl Migration for T { - fn columns(&self) -> Option { SimpleMigration::columns(self) } + fn columns(&self) -> Option { + SimpleMigration::columns(self) + } - fn version(&self) -> u32 { SimpleMigration::version(self) } + fn version(&self) -> u32 { + SimpleMigration::version(self) + } - fn alters_existing(&self) -> bool { true } + fn alters_existing(&self) -> bool { + true + } - fn migrate(&mut self, source: Arc, config: &Config, dest: &mut Database, col: Option) -> io::Result<()> { - let migration_needed = col == SimpleMigration::migrated_column_index(self); - let mut batch = Batch::new(config, col); + fn migrate( + &mut self, + source: Arc, + config: &Config, + dest: &mut Database, + col: Option, + ) -> io::Result<()> { + let migration_needed = col == SimpleMigration::migrated_column_index(self); + let mut batch = Batch::new(config, col); - let iter = match source.iter(col) { - Some(iter) => iter, - None => return Ok(()), - }; + let iter = match source.iter(col) { + Some(iter) => iter, + None => return Ok(()), + }; - for (key, value) in iter { - if migration_needed { - if let Some((key, value)) = self.simple_migrate(key.into_vec(), value.into_vec()) { - batch.insert(key, value, dest)?; - } - } else { - batch.insert(key.into_vec(), value.into_vec(), dest)?; - } - } + for (key, value) in iter { + if migration_needed { + if let Some((key, value)) = self.simple_migrate(key.into_vec(), value.into_vec()) { + batch.insert(key, value, dest)?; + } + } else { + batch.insert(key.into_vec(), value.into_vec(), dest)?; + } + } - batch.commit(dest) - } + batch.commit(dest) + } } /// An even simpler migration which just changes the number of columns. pub struct ChangeColumns { - /// The amount of columns before this migration. - pub pre_columns: Option, - /// The amount of columns after this migration. - pub post_columns: Option, - /// The version after this migration. - pub version: u32, + /// The amount of columns before this migration. + pub pre_columns: Option, + /// The amount of columns after this migration. + pub post_columns: Option, + /// The version after this migration. + pub version: u32, } impl Migration for ChangeColumns { - fn pre_columns(&self) -> Option { self.pre_columns } - fn columns(&self) -> Option { self.post_columns } - fn version(&self) -> u32 { self.version } - fn alters_existing(&self) -> bool { false } - fn migrate(&mut self, _: Arc, _: &Config, _: &mut Database, _: Option) -> io::Result<()> { - Ok(()) - } + fn pre_columns(&self) -> Option { + self.pre_columns + } + fn columns(&self) -> Option { + self.post_columns + } + fn version(&self) -> u32 { + self.version + } + fn alters_existing(&self) -> bool { + false + } + fn migrate( + &mut self, + _: Arc, + _: &Config, + _: &mut Database, + _: Option, + ) -> io::Result<()> { + Ok(()) + } } /// Get the path where all databases reside. fn database_path(path: &Path) -> PathBuf { - let mut temp_path = path.to_owned(); - temp_path.pop(); - temp_path + let mut temp_path = path.to_owned(); + temp_path.pop(); + temp_path } enum TempIndex { - One, - Two, + One, + Two, } impl TempIndex { - fn swap(&mut self) { - match *self { - TempIndex::One => *self = TempIndex::Two, - TempIndex::Two => *self = TempIndex::One, - } - } + fn swap(&mut self) { + match *self { + TempIndex::One => *self = TempIndex::Two, + TempIndex::Two => *self = TempIndex::One, + } + } - // given the path to the old database, get the path of this one. - fn path(&self, db_root: &Path) -> PathBuf { - let mut buf = db_root.to_owned(); + // given the path to the old database, get the path of this one. + fn path(&self, db_root: &Path) -> PathBuf { + let mut buf = db_root.to_owned(); - match *self { - TempIndex::One => buf.push("temp_migration_1"), - TempIndex::Two => buf.push("temp_migration_2"), - }; + match *self { + TempIndex::One => buf.push("temp_migration_1"), + TempIndex::Two => buf.push("temp_migration_2"), + }; - buf - } + buf + } } /// Manages database migration. pub struct Manager { - config: Config, - migrations: Vec>, + config: Config, + migrations: Vec>, } impl Manager { - /// Creates new migration manager with given configuration. - pub fn new(config: Config) -> Self { - Manager { - config: config, - migrations: vec![], - } - } + /// Creates new migration manager with given configuration. + pub fn new(config: Config) -> Self { + Manager { + config: config, + migrations: vec![], + } + } - /// Adds new migration rules. - pub fn add_migration(&mut self, migration: T) -> io::Result<()> where T: Migration { - let is_new = match self.migrations.last() { - Some(last) => migration.version() > last.version(), - None => true, - }; + /// Adds new migration rules. + pub fn add_migration(&mut self, migration: T) -> io::Result<()> + where + T: Migration, + { + let is_new = match self.migrations.last() { + Some(last) => migration.version() > last.version(), + None => true, + }; - match is_new { - true => Ok(self.migrations.push(Box::new(migration))), - false => Err(other_io_err("Cannot add migration.")), - } - } + match is_new { + true => Ok(self.migrations.push(Box::new(migration))), + false => Err(other_io_err("Cannot add migration.")), + } + } - /// Performs migration in order, starting with a source path, migrating between two temporary databases, - /// and producing a path where the final migration lives. - pub fn execute(&mut self, old_path: &Path, version: u32) -> io::Result { - let config = self.config.clone(); - let migrations = self.migrations_from(version); - trace!(target: "migration", "Total migrations to execute for version {}: {}", version, migrations.len()); - if migrations.is_empty() { - return Err(other_io_err("Migration impossible")); - }; + /// Performs migration in order, starting with a source path, migrating between two temporary databases, + /// and producing a path where the final migration lives. + pub fn execute(&mut self, old_path: &Path, version: u32) -> io::Result { + let config = self.config.clone(); + let migrations = self.migrations_from(version); + trace!(target: "migration", "Total migrations to execute for version {}: {}", version, migrations.len()); + if migrations.is_empty() { + return Err(other_io_err("Migration impossible")); + }; - let columns = migrations.get(0).and_then(|m| m.pre_columns()); + let columns = migrations.get(0).and_then(|m| m.pre_columns()); - trace!(target: "migration", "Expecting database to contain {:?} columns", columns); - let mut db_config = DatabaseConfig { - max_open_files: 64, - memory_budget: None, - compaction: config.compaction_profile, - columns: columns, - }; + trace!(target: "migration", "Expecting database to contain {:?} columns", columns); + let mut db_config = DatabaseConfig { + max_open_files: 64, + memory_budget: None, + compaction: config.compaction_profile, + columns: columns, + }; - let db_root = database_path(old_path); - let mut temp_idx = TempIndex::One; - let mut temp_path = old_path.to_path_buf(); + let db_root = database_path(old_path); + let mut temp_idx = TempIndex::One; + let mut temp_path = old_path.to_path_buf(); - // start with the old db. - let old_path_str = old_path.to_str().ok_or_else(|| other_io_err("Migration impossible."))?; - let mut cur_db = Arc::new(Database::open(&db_config, old_path_str)?); + // start with the old db. + let old_path_str = old_path + .to_str() + .ok_or_else(|| other_io_err("Migration impossible."))?; + let mut cur_db = Arc::new(Database::open(&db_config, old_path_str)?); - for migration in migrations { - trace!(target: "migration", "starting migration to version {}", migration.version()); - // Change number of columns in new db - let current_columns = db_config.columns; - db_config.columns = migration.columns(); + for migration in migrations { + trace!(target: "migration", "starting migration to version {}", migration.version()); + // Change number of columns in new db + let current_columns = db_config.columns; + db_config.columns = migration.columns(); - // slow migrations: alter existing data. - if migration.alters_existing() { - temp_path = temp_idx.path(&db_root); + // slow migrations: alter existing data. + if migration.alters_existing() { + temp_path = temp_idx.path(&db_root); - // open the target temporary database. - let temp_path_str = temp_path.to_str().ok_or_else(|| other_io_err("Migration impossible."))?; - let mut new_db = Database::open(&db_config, temp_path_str)?; + // open the target temporary database. + let temp_path_str = temp_path + .to_str() + .ok_or_else(|| other_io_err("Migration impossible."))?; + let mut new_db = Database::open(&db_config, temp_path_str)?; - match current_columns { - // migrate only default column - None => migration.migrate(cur_db.clone(), &config, &mut new_db, None)?, - Some(v) => { - // Migrate all columns in previous DB - for col in 0..v { - migration.migrate(cur_db.clone(), &config, &mut new_db, Some(col))? - } - } - } - // next iteration, we will migrate from this db into the other temp. - cur_db = Arc::new(new_db); - temp_idx.swap(); + match current_columns { + // migrate only default column + None => migration.migrate(cur_db.clone(), &config, &mut new_db, None)?, + Some(v) => { + // Migrate all columns in previous DB + for col in 0..v { + migration.migrate(cur_db.clone(), &config, &mut new_db, Some(col))? + } + } + } + // next iteration, we will migrate from this db into the other temp. + cur_db = Arc::new(new_db); + temp_idx.swap(); - // remove the other temporary migration database. - let _ = fs::remove_dir_all(temp_idx.path(&db_root)); - } else { - // migrations which simply add or remove column families. - // we can do this in-place. - let goal_columns = migration.columns().unwrap_or(0); - while cur_db.num_columns() < goal_columns { - cur_db.add_column().map_err(other_io_err)?; - } + // remove the other temporary migration database. + let _ = fs::remove_dir_all(temp_idx.path(&db_root)); + } else { + // migrations which simply add or remove column families. + // we can do this in-place. + let goal_columns = migration.columns().unwrap_or(0); + while cur_db.num_columns() < goal_columns { + cur_db.add_column().map_err(other_io_err)?; + } - while cur_db.num_columns() > goal_columns { - cur_db.drop_column().map_err(other_io_err)?; - } - } - } - Ok(temp_path) - } + while cur_db.num_columns() > goal_columns { + cur_db.drop_column().map_err(other_io_err)?; + } + } + } + Ok(temp_path) + } - /// Returns true if migration is needed. - pub fn is_needed(&self, version: u32) -> bool { - match self.migrations.last() { - Some(last) => version < last.version(), - None => false, - } - } + /// Returns true if migration is needed. + pub fn is_needed(&self, version: u32) -> bool { + match self.migrations.last() { + Some(last) => version < last.version(), + None => false, + } + } - /// Find all needed migrations. - fn migrations_from(&mut self, version: u32) -> Vec<&mut Box> { - self.migrations.iter_mut().filter(|m| m.version() > version).collect() - } + /// Find all needed migrations. + fn migrations_from(&mut self, version: u32) -> Vec<&mut Box> { + self.migrations + .iter_mut() + .filter(|m| m.version() > version) + .collect() + } } /// Prints a dot every `max` ticks pub struct Progress { - current: usize, - max: usize, + current: usize, + max: usize, } impl Default for Progress { - fn default() -> Self { - Progress { - current: 0, - max: 100_000, - } - } + fn default() -> Self { + Progress { + current: 0, + max: 100_000, + } + } } impl Progress { - /// Tick progress meter. - pub fn tick(&mut self) { - self.current += 1; - if self.current == self.max { - self.current = 0; - flush!("."); - } - } + /// Tick progress meter. + pub fn tick(&mut self) { + self.current += 1; + if self.current == self.max { + self.current = 0; + flush!("."); + } + } } diff --git a/util/migration-rocksdb/tests/tests.rs b/util/migration-rocksdb/tests/tests.rs index 7458c2d32..1ff2a81ff 100644 --- a/util/migration-rocksdb/tests/tests.rs +++ b/util/migration-rocksdb/tests/tests.rs @@ -20,253 +20,277 @@ #[macro_use] extern crate macros; -extern crate tempdir; extern crate kvdb_rocksdb; extern crate migration_rocksdb as migration; +extern crate tempdir; -use std::collections::BTreeMap; -use std::io; -use std::path::{Path, PathBuf}; -use std::sync::Arc; -use tempdir::TempDir; use kvdb_rocksdb::Database; -use migration::{Batch, Config, SimpleMigration, Migration, Manager, ChangeColumns}; +use migration::{Batch, ChangeColumns, Config, Manager, Migration, SimpleMigration}; +use std::{ + collections::BTreeMap, + io, + path::{Path, PathBuf}, + sync::Arc, +}; +use tempdir::TempDir; #[inline] fn db_path(path: &Path) -> PathBuf { - path.join("db") + path.join("db") } // initialize a database at the given directory with the given values. fn make_db(path: &Path, pairs: BTreeMap, Vec>) { - let db = Database::open_default(path.to_str().unwrap()).expect("failed to open temp database"); - { - let mut transaction = db.transaction(); - for (k, v) in pairs { - transaction.put(None, &k, &v); - } + let db = Database::open_default(path.to_str().unwrap()).expect("failed to open temp database"); + { + let mut transaction = db.transaction(); + for (k, v) in pairs { + transaction.put(None, &k, &v); + } - db.write(transaction).expect("failed to write db transaction"); - } + db.write(transaction) + .expect("failed to write db transaction"); + } } // helper for verifying a migrated database. fn verify_migration(path: &Path, pairs: BTreeMap, Vec>) { - let db = Database::open_default(path.to_str().unwrap()).unwrap(); + let db = Database::open_default(path.to_str().unwrap()).unwrap(); - for (k, v) in pairs { - let x = db.get(None, &k).unwrap().unwrap(); + for (k, v) in pairs { + let x = db.get(None, &k).unwrap().unwrap(); - assert_eq!(&x[..], &v[..]); - } + assert_eq!(&x[..], &v[..]); + } } struct Migration0; impl SimpleMigration for Migration0 { - fn columns(&self) -> Option { - None - } + fn columns(&self) -> Option { + None + } - fn version(&self) -> u32 { - 1 - } + fn version(&self) -> u32 { + 1 + } - fn migrated_column_index(&self) -> Option { - None - } + fn migrated_column_index(&self) -> Option { + None + } - fn simple_migrate(&mut self, mut key: Vec, mut value: Vec) -> Option<(Vec, Vec)> { - key.push(0x11); - value.push(0x22); + fn simple_migrate( + &mut self, + mut key: Vec, + mut value: Vec, + ) -> Option<(Vec, Vec)> { + key.push(0x11); + value.push(0x22); - Some((key, value)) - } + Some((key, value)) + } } struct Migration1; impl SimpleMigration for Migration1 { - fn columns(&self) -> Option { - None - } + fn columns(&self) -> Option { + None + } - fn version(&self) -> u32 { - 2 - } + fn version(&self) -> u32 { + 2 + } - fn migrated_column_index(&self) -> Option { - None - } + fn migrated_column_index(&self) -> Option { + None + } - fn simple_migrate(&mut self, key: Vec, _value: Vec) -> Option<(Vec, Vec)> { - Some((key, vec![])) - } + fn simple_migrate(&mut self, key: Vec, _value: Vec) -> Option<(Vec, Vec)> { + Some((key, vec![])) + } } struct AddsColumn; impl Migration for AddsColumn { - fn pre_columns(&self) -> Option { None } + fn pre_columns(&self) -> Option { + None + } - fn columns(&self) -> Option { Some(1) } + fn columns(&self) -> Option { + Some(1) + } - fn version(&self) -> u32 { 1 } + fn version(&self) -> u32 { + 1 + } - fn migrate(&mut self, source: Arc, config: &Config, dest: &mut Database, col: Option) -> io::Result<()> { - let mut batch = Batch::new(config, col); + fn migrate( + &mut self, + source: Arc, + config: &Config, + dest: &mut Database, + col: Option, + ) -> io::Result<()> { + let mut batch = Batch::new(config, col); - for (key, value) in source.iter(col).into_iter().flat_map(|inner| inner) { - batch.insert(key.into_vec(), value.into_vec(), dest)?; - } + for (key, value) in source.iter(col).into_iter().flat_map(|inner| inner) { + batch.insert(key.into_vec(), value.into_vec(), dest)?; + } - if col == Some(1) { - batch.insert(vec![1, 2, 3], vec![4, 5, 6], dest)?; - } + if col == Some(1) { + batch.insert(vec![1, 2, 3], vec![4, 5, 6], dest)?; + } - batch.commit(dest) - } + batch.commit(dest) + } } #[test] fn one_simple_migration() { - let tempdir = TempDir::new("").unwrap(); - let db_path = db_path(tempdir.path()); - let mut manager = Manager::new(Config::default()); - make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]); - let expected = map![vec![0x11] => vec![0x22], vec![1, 0x11] => vec![1, 0x22]]; + let tempdir = TempDir::new("").unwrap(); + let db_path = db_path(tempdir.path()); + let mut manager = Manager::new(Config::default()); + make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]); + let expected = map![vec![0x11] => vec![0x22], vec![1, 0x11] => vec![1, 0x22]]; - manager.add_migration(Migration0).unwrap(); - let end_path = manager.execute(&db_path, 0).unwrap(); + manager.add_migration(Migration0).unwrap(); + let end_path = manager.execute(&db_path, 0).unwrap(); - verify_migration(&end_path, expected); + verify_migration(&end_path, expected); } #[test] #[should_panic] fn no_migration_needed() { - let tempdir = TempDir::new("").unwrap(); - let db_path = db_path(tempdir.path()); - let mut manager = Manager::new(Config::default()); - make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]); + let tempdir = TempDir::new("").unwrap(); + let db_path = db_path(tempdir.path()); + let mut manager = Manager::new(Config::default()); + make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]); - manager.add_migration(Migration0).unwrap(); - manager.execute(&db_path, 1).unwrap(); + manager.add_migration(Migration0).unwrap(); + manager.execute(&db_path, 1).unwrap(); } #[test] #[should_panic] fn wrong_adding_order() { - let tempdir = TempDir::new("").unwrap(); - let db_path = db_path(tempdir.path()); - let mut manager = Manager::new(Config::default()); - make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]); + let tempdir = TempDir::new("").unwrap(); + let db_path = db_path(tempdir.path()); + let mut manager = Manager::new(Config::default()); + make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]); - manager.add_migration(Migration1).unwrap(); - manager.add_migration(Migration0).unwrap(); + manager.add_migration(Migration1).unwrap(); + manager.add_migration(Migration0).unwrap(); } #[test] fn multiple_migrations() { - let tempdir = TempDir::new("").unwrap(); - let db_path = db_path(tempdir.path()); - let mut manager = Manager::new(Config::default()); - make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]); - let expected = map![vec![0x11] => vec![], vec![1, 0x11] => vec![]]; + let tempdir = TempDir::new("").unwrap(); + let db_path = db_path(tempdir.path()); + let mut manager = Manager::new(Config::default()); + make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]); + let expected = map![vec![0x11] => vec![], vec![1, 0x11] => vec![]]; - manager.add_migration(Migration0).unwrap(); - manager.add_migration(Migration1).unwrap(); - let end_path = manager.execute(&db_path, 0).unwrap(); + manager.add_migration(Migration0).unwrap(); + manager.add_migration(Migration1).unwrap(); + let end_path = manager.execute(&db_path, 0).unwrap(); - verify_migration(&end_path, expected); + verify_migration(&end_path, expected); } #[test] fn second_migration() { - let tempdir = TempDir::new("").unwrap(); - let db_path = db_path(tempdir.path()); - let mut manager = Manager::new(Config::default()); - make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]); - let expected = map![vec![] => vec![], vec![1] => vec![]]; + let tempdir = TempDir::new("").unwrap(); + let db_path = db_path(tempdir.path()); + let mut manager = Manager::new(Config::default()); + make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]); + let expected = map![vec![] => vec![], vec![1] => vec![]]; - manager.add_migration(Migration0).unwrap(); - manager.add_migration(Migration1).unwrap(); - let end_path = manager.execute(&db_path, 1).unwrap(); + manager.add_migration(Migration0).unwrap(); + manager.add_migration(Migration1).unwrap(); + let end_path = manager.execute(&db_path, 1).unwrap(); - verify_migration(&end_path, expected); + verify_migration(&end_path, expected); } #[test] fn first_and_noop_migration() { - let tempdir = TempDir::new("").unwrap(); - let db_path = db_path(tempdir.path()); - let mut manager = Manager::new(Config::default()); - make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]); - let expected = map![vec![0x11] => vec![0x22], vec![1, 0x11] => vec![1, 0x22]]; + let tempdir = TempDir::new("").unwrap(); + let db_path = db_path(tempdir.path()); + let mut manager = Manager::new(Config::default()); + make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]); + let expected = map![vec![0x11] => vec![0x22], vec![1, 0x11] => vec![1, 0x22]]; - manager.add_migration(Migration0).unwrap(); - let end_path = manager.execute(&db_path, 0).unwrap(); + manager.add_migration(Migration0).unwrap(); + let end_path = manager.execute(&db_path, 0).unwrap(); - verify_migration(&end_path, expected); + verify_migration(&end_path, expected); } #[test] fn noop_and_second_migration() { - let tempdir = TempDir::new("").unwrap(); - let db_path = db_path(tempdir.path()); - let mut manager = Manager::new(Config::default()); - make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]); - let expected = map![vec![] => vec![], vec![1] => vec![]]; + let tempdir = TempDir::new("").unwrap(); + let db_path = db_path(tempdir.path()); + let mut manager = Manager::new(Config::default()); + make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]); + let expected = map![vec![] => vec![], vec![1] => vec![]]; - manager.add_migration(Migration1).unwrap(); - let end_path = manager.execute(&db_path, 0).unwrap(); + manager.add_migration(Migration1).unwrap(); + let end_path = manager.execute(&db_path, 0).unwrap(); - verify_migration(&end_path, expected); + verify_migration(&end_path, expected); } #[test] fn is_migration_needed() { - let mut manager = Manager::new(Config::default()); - manager.add_migration(Migration0).unwrap(); - manager.add_migration(Migration1).unwrap(); + let mut manager = Manager::new(Config::default()); + manager.add_migration(Migration0).unwrap(); + manager.add_migration(Migration1).unwrap(); - assert!(manager.is_needed(0)); - assert!(manager.is_needed(1)); - assert!(!manager.is_needed(2)); + assert!(manager.is_needed(0)); + assert!(manager.is_needed(1)); + assert!(!manager.is_needed(2)); } #[test] fn pre_columns() { - let mut manager = Manager::new(Config::default()); - manager.add_migration(AddsColumn).unwrap(); + let mut manager = Manager::new(Config::default()); + manager.add_migration(AddsColumn).unwrap(); - let tempdir = TempDir::new("").unwrap(); - let db_path = db_path(tempdir.path()); + let tempdir = TempDir::new("").unwrap(); + let db_path = db_path(tempdir.path()); - // this shouldn't fail to open the database even though it's one column - // short of the one before it. - manager.execute(&db_path, 0).unwrap(); + // this shouldn't fail to open the database even though it's one column + // short of the one before it. + manager.execute(&db_path, 0).unwrap(); } #[test] fn change_columns() { - use kvdb_rocksdb::DatabaseConfig; + use kvdb_rocksdb::DatabaseConfig; - let mut manager = Manager::new(Config::default()); - manager.add_migration(ChangeColumns { - pre_columns: None, - post_columns: Some(4), - version: 1, - }).unwrap(); + let mut manager = Manager::new(Config::default()); + manager + .add_migration(ChangeColumns { + pre_columns: None, + post_columns: Some(4), + version: 1, + }) + .unwrap(); - let tempdir = TempDir::new("").unwrap(); - let db_path = db_path(tempdir.path()); + let tempdir = TempDir::new("").unwrap(); + let db_path = db_path(tempdir.path()); - let new_path = manager.execute(&db_path, 0).unwrap(); + let new_path = manager.execute(&db_path, 0).unwrap(); - assert_eq!(db_path, new_path, "Changing columns is an in-place migration."); + assert_eq!( + db_path, new_path, + "Changing columns is an in-place migration." + ); - let config = DatabaseConfig::with_columns(Some(4)); - let db = Database::open(&config, new_path.to_str().unwrap()).unwrap(); - assert_eq!(db.num_columns(), 4); + let config = DatabaseConfig::with_columns(Some(4)); + let db = Database::open(&config, new_path.to_str().unwrap()).unwrap(); + assert_eq!(db.num_columns(), 4); } diff --git a/util/network-devp2p/src/connection.rs b/util/network-devp2p/src/connection.rs index ae34ec44c..9bc7cf8ac 100644 --- a/util/network-devp2p/src/connection.rs +++ b/util/network-devp2p/src/connection.rs @@ -14,28 +14,29 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::collections::VecDeque; -use std::net::SocketAddr; -use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; -use std::time::Duration; -use hash::{keccak, write_keccak}; -use mio::{Token, Ready, PollOpt}; -use mio::deprecated::{Handler, EventLoop, TryRead, TryWrite}; -use mio::tcp::*; -use ethereum_types::{H128, H256, H512}; -use parity_bytes::*; -use rlp::{Rlp, RlpStream}; -use std::io::{self, Cursor, Read, Write}; -use io::{IoContext, StreamToken}; -use handshake::Handshake; -use rcrypto::blockmodes::*; -use rcrypto::aessafe::*; -use rcrypto::symmetriccipher::*; -use rcrypto::buffer::*; -use tiny_keccak::Keccak; use bytes::{Buf, BufMut}; +use ethereum_types::{H128, H256, H512}; use ethkey::crypto; +use handshake::Handshake; +use hash::{keccak, write_keccak}; +use io::{IoContext, StreamToken}; +use mio::{ + deprecated::{EventLoop, Handler, TryRead, TryWrite}, + tcp::*, + PollOpt, Ready, Token, +}; use network::{Error, ErrorKind}; +use parity_bytes::*; +use rcrypto::{aessafe::*, blockmodes::*, buffer::*, symmetriccipher::*}; +use rlp::{Rlp, RlpStream}; +use std::{ + collections::VecDeque, + io::{self, Cursor, Read, Write}, + net::SocketAddr, + sync::atomic::{AtomicBool, Ordering as AtomicOrdering}, + time::Duration, +}; +use tiny_keccak::Keccak; const ENCRYPTED_HEADER_LEN: usize = 32; const RECEIVE_PAYLOAD: Duration = Duration::from_secs(30); @@ -45,707 +46,850 @@ pub const MAX_PAYLOAD_SIZE: usize = (1 << 24) - 1; /// This should be lower than MAX_PAYLOAD_SIZE pub const PAYLOAD_SOFT_LIMIT: usize = (1 << 22) - 1; -pub trait GenericSocket : Read + Write { -} +pub trait GenericSocket: Read + Write {} -impl GenericSocket for TcpStream { -} +impl GenericSocket for TcpStream {} pub struct GenericConnection { - /// Connection id (token) - pub token: StreamToken, - /// Network socket - pub socket: Socket, - /// Receive buffer - rec_buf: Bytes, - /// Expected size - rec_size: usize, - /// Send out packets FIFO - send_queue: VecDeque>, - /// Event flags this connection expects - interest: Ready, - /// Registered flag - registered: AtomicBool, + /// Connection id (token) + pub token: StreamToken, + /// Network socket + pub socket: Socket, + /// Receive buffer + rec_buf: Bytes, + /// Expected size + rec_size: usize, + /// Send out packets FIFO + send_queue: VecDeque>, + /// Event flags this connection expects + interest: Ready, + /// Registered flag + registered: AtomicBool, } impl GenericConnection { - pub fn expect(&mut self, size: usize) { - trace!(target:"network", "Expect to read {} bytes", size); - if self.rec_size != self.rec_buf.len() { - warn!(target:"network", "Unexpected connection read start"); - } - self.rec_size = size; - } - - /// Readable IO handler. Called when there is some data to be read. - pub fn readable(&mut self) -> io::Result> { - if self.rec_size == 0 || self.rec_buf.len() >= self.rec_size { - return Ok(None); - } - let sock_ref = ::by_ref(&mut self.socket); - loop { - let max = self.rec_size - self.rec_buf.len(); - match sock_ref.take(max as u64).try_read(unsafe { self.rec_buf.bytes_mut() }) { - Ok(Some(size)) if size != 0 => { - unsafe { self.rec_buf.advance_mut(size); } - trace!(target:"network", "{}: Read {} of {} bytes", self.token, self.rec_buf.len(), self.rec_size); - if self.rec_size != 0 && self.rec_buf.len() == self.rec_size { - self.rec_size = 0; - return Ok(Some(::std::mem::replace(&mut self.rec_buf, Bytes::new()))) - } - else if self.rec_buf.len() > self.rec_size { - warn!(target:"network", "Read past buffer {} bytes", self.rec_buf.len() - self.rec_size); - return Ok(Some(::std::mem::replace(&mut self.rec_buf, Bytes::new()))) - } - }, - Ok(_) => return Ok(None), - Err(e) => { - debug!(target:"network", "Read error {} ({})", self.token, e); - return Err(e) - } - } + pub fn expect(&mut self, size: usize) { + trace!(target:"network", "Expect to read {} bytes", size); + if self.rec_size != self.rec_buf.len() { + warn!(target:"network", "Unexpected connection read start"); } - } + self.rec_size = size; + } - /// Add a packet to send queue. - pub fn send(&mut self, io: &IoContext, data: Bytes) where Message: Send + Clone + Sync + 'static { - if !data.is_empty() { - trace!(target:"network", "{}: Sending {} bytes", self.token, data.len()); - self.send_queue.push_back(Cursor::new(data)); - if !self.interest.is_writable() { - self.interest.insert(Ready::writable()); - } - io.update_registration(self.token).ok(); - } - } + /// Readable IO handler. Called when there is some data to be read. + pub fn readable(&mut self) -> io::Result> { + if self.rec_size == 0 || self.rec_buf.len() >= self.rec_size { + return Ok(None); + } + let sock_ref = ::by_ref(&mut self.socket); + loop { + let max = self.rec_size - self.rec_buf.len(); + match sock_ref + .take(max as u64) + .try_read(unsafe { self.rec_buf.bytes_mut() }) + { + Ok(Some(size)) if size != 0 => { + unsafe { + self.rec_buf.advance_mut(size); + } + trace!(target:"network", "{}: Read {} of {} bytes", self.token, self.rec_buf.len(), self.rec_size); + if self.rec_size != 0 && self.rec_buf.len() == self.rec_size { + self.rec_size = 0; + return Ok(Some(::std::mem::replace(&mut self.rec_buf, Bytes::new()))); + } else if self.rec_buf.len() > self.rec_size { + warn!(target:"network", "Read past buffer {} bytes", self.rec_buf.len() - self.rec_size); + return Ok(Some(::std::mem::replace(&mut self.rec_buf, Bytes::new()))); + } + } + Ok(_) => return Ok(None), + Err(e) => { + debug!(target:"network", "Read error {} ({})", self.token, e); + return Err(e); + } + } + } + } - /// Check if this connection has data to be sent. - pub fn is_sending(&self) -> bool { - self.interest.is_writable() - } + /// Add a packet to send queue. + pub fn send(&mut self, io: &IoContext, data: Bytes) + where + Message: Send + Clone + Sync + 'static, + { + if !data.is_empty() { + trace!(target:"network", "{}: Sending {} bytes", self.token, data.len()); + self.send_queue.push_back(Cursor::new(data)); + if !self.interest.is_writable() { + self.interest.insert(Ready::writable()); + } + io.update_registration(self.token).ok(); + } + } - /// Writable IO handler. Called when the socket is ready to send. - pub fn writable(&mut self, io: &IoContext) -> Result where Message: Send + Clone + Sync + 'static { - { - let buf = match self.send_queue.front_mut() { - Some(buf) => buf, - None => return Ok(WriteStatus::Complete), - }; - let send_size = buf.get_ref().len(); - let pos = buf.position() as usize; - if (pos as usize) >= send_size { - warn!(target:"net", "Unexpected connection data"); - return Ok(WriteStatus::Complete) - } + /// Check if this connection has data to be sent. + pub fn is_sending(&self) -> bool { + self.interest.is_writable() + } - match self.socket.try_write(Buf::bytes(&buf)) { - Ok(Some(size)) if (pos + size) < send_size => { - buf.advance(size); - Ok(WriteStatus::Ongoing) - }, - Ok(Some(size)) if (pos + size) == send_size => { - trace!(target:"network", "{}: Wrote {} bytes", self.token, send_size); - Ok(WriteStatus::Complete) - }, - Ok(Some(_)) => { panic!("Wrote past buffer");}, - Ok(None) => Ok(WriteStatus::Ongoing), - Err(e) => Err(e)? - } - }.and_then(|r| { - if r == WriteStatus::Complete { - self.send_queue.pop_front(); - } - if self.send_queue.is_empty() { - self.interest.remove(Ready::writable()); - } - io.update_registration(self.token)?; - Ok(r) - }) - } + /// Writable IO handler. Called when the socket is ready to send. + pub fn writable(&mut self, io: &IoContext) -> Result + where + Message: Send + Clone + Sync + 'static, + { + { + let buf = match self.send_queue.front_mut() { + Some(buf) => buf, + None => return Ok(WriteStatus::Complete), + }; + let send_size = buf.get_ref().len(); + let pos = buf.position() as usize; + if (pos as usize) >= send_size { + warn!(target:"net", "Unexpected connection data"); + return Ok(WriteStatus::Complete); + } + + match self.socket.try_write(Buf::bytes(&buf)) { + Ok(Some(size)) if (pos + size) < send_size => { + buf.advance(size); + Ok(WriteStatus::Ongoing) + } + Ok(Some(size)) if (pos + size) == send_size => { + trace!(target:"network", "{}: Wrote {} bytes", self.token, send_size); + Ok(WriteStatus::Complete) + } + Ok(Some(_)) => { + panic!("Wrote past buffer"); + } + Ok(None) => Ok(WriteStatus::Ongoing), + Err(e) => Err(e)?, + } + } + .and_then(|r| { + if r == WriteStatus::Complete { + self.send_queue.pop_front(); + } + if self.send_queue.is_empty() { + self.interest.remove(Ready::writable()); + } + io.update_registration(self.token)?; + Ok(r) + }) + } } /// Low level tcp connection pub type Connection = GenericConnection; impl Connection { - /// Create a new connection with given id and socket. - pub fn new(token: StreamToken, socket: TcpStream) -> Connection { - Connection { - token, - socket, - send_queue: VecDeque::new(), - rec_buf: Bytes::new(), - rec_size: 0, - interest: Ready::hup() | Ready::readable(), - registered: AtomicBool::new(false), - } - } - - /// Get socket token - pub fn token(&self) -> StreamToken { - self.token - } - - /// Get remote peer address - pub fn remote_addr(&self) -> io::Result { - self.socket.peer_addr() - } - - /// Get remote peer address string - pub fn remote_addr_str(&self) -> String { - self.socket.peer_addr().map(|a| a.to_string()).unwrap_or_else(|_| "Unknown".to_owned()) - } - - /// Get local peer address string - pub fn local_addr_str(&self) -> String { - self.socket.local_addr().map(|a| a.to_string()).unwrap_or_else(|_| "Unknown".to_owned()) - } - - /// Clone this connection. Clears the receiving buffer of the returned connection. - pub fn try_clone(&self) -> io::Result { - Ok(Connection { - token: self.token, - socket: self.socket.try_clone()?, - rec_buf: Vec::new(), - rec_size: 0, - send_queue: self.send_queue.clone(), - interest: Ready::hup(), - registered: AtomicBool::new(false), - }) - } - - /// Register this connection with the IO event loop. - pub fn register_socket(&self, reg: Token, event_loop: &mut EventLoop) -> io::Result<()> { - if self.registered.load(AtomicOrdering::SeqCst) { - return Ok(()); + /// Create a new connection with given id and socket. + pub fn new(token: StreamToken, socket: TcpStream) -> Connection { + Connection { + token, + socket, + send_queue: VecDeque::new(), + rec_buf: Bytes::new(), + rec_size: 0, + interest: Ready::hup() | Ready::readable(), + registered: AtomicBool::new(false), } - trace!(target: "network", "connection register; token={:?}", reg); - if let Err(e) = event_loop.register(&self.socket, reg, self.interest, PollOpt::edge() /* | PollOpt::oneshot() */) { // TODO: oneshot is broken on windows - trace!(target: "network", "Failed to register {:?}, {:?}", reg, e); - } - self.registered.store(true, AtomicOrdering::SeqCst); - Ok(()) - } + } - /// Update connection registration. Should be called at the end of the IO handler. - pub fn update_socket(&self, reg: Token, event_loop: &mut EventLoop) -> io::Result<()> { - trace!(target: "network", "connection reregister; token={:?}", reg); - if !self.registered.load(AtomicOrdering::SeqCst) { - self.register_socket(reg, event_loop) + /// Get socket token + pub fn token(&self) -> StreamToken { + self.token + } + + /// Get remote peer address + pub fn remote_addr(&self) -> io::Result { + self.socket.peer_addr() + } + + /// Get remote peer address string + pub fn remote_addr_str(&self) -> String { + self.socket + .peer_addr() + .map(|a| a.to_string()) + .unwrap_or_else(|_| "Unknown".to_owned()) + } + + /// Get local peer address string + pub fn local_addr_str(&self) -> String { + self.socket + .local_addr() + .map(|a| a.to_string()) + .unwrap_or_else(|_| "Unknown".to_owned()) + } + + /// Clone this connection. Clears the receiving buffer of the returned connection. + pub fn try_clone(&self) -> io::Result { + Ok(Connection { + token: self.token, + socket: self.socket.try_clone()?, + rec_buf: Vec::new(), + rec_size: 0, + send_queue: self.send_queue.clone(), + interest: Ready::hup(), + registered: AtomicBool::new(false), + }) + } + + /// Register this connection with the IO event loop. + pub fn register_socket( + &self, + reg: Token, + event_loop: &mut EventLoop, + ) -> io::Result<()> { + if self.registered.load(AtomicOrdering::SeqCst) { + return Ok(()); + } + trace!(target: "network", "connection register; token={:?}", reg); + if let Err(e) = event_loop.register( + &self.socket, + reg, + self.interest, + PollOpt::edge(), /* | PollOpt::oneshot() */ + ) { + // TODO: oneshot is broken on windows + trace!(target: "network", "Failed to register {:?}, {:?}", reg, e); + } + self.registered.store(true, AtomicOrdering::SeqCst); + Ok(()) + } + + /// Update connection registration. Should be called at the end of the IO handler. + pub fn update_socket( + &self, + reg: Token, + event_loop: &mut EventLoop, + ) -> io::Result<()> { + trace!(target: "network", "connection reregister; token={:?}", reg); + if !self.registered.load(AtomicOrdering::SeqCst) { + self.register_socket(reg, event_loop) } else { - event_loop.reregister(&self.socket, reg, self.interest, PollOpt::edge() /* | PollOpt::oneshot() */ ).unwrap_or_else(|e| { // TODO: oneshot is broken on windows - trace!(target: "network", "Failed to reregister {:?}, {:?}", reg, e); - }); - Ok(()) - } - } + event_loop + .reregister( + &self.socket, + reg, + self.interest, + PollOpt::edge(), /* | PollOpt::oneshot() */ + ) + .unwrap_or_else(|e| { + // TODO: oneshot is broken on windows + trace!(target: "network", "Failed to reregister {:?}, {:?}", reg, e); + }); + Ok(()) + } + } - /// Delete connection registration. Should be called at the end of the IO handler. - pub fn deregister_socket(&self, event_loop: &mut EventLoop) -> io::Result<()> { - trace!(target: "network", "connection deregister; token={:?}", self.token); - event_loop.deregister(&self.socket).ok(); // ignore errors here - Ok(()) - } + /// Delete connection registration. Should be called at the end of the IO handler. + pub fn deregister_socket( + &self, + event_loop: &mut EventLoop, + ) -> io::Result<()> { + trace!(target: "network", "connection deregister; token={:?}", self.token); + event_loop.deregister(&self.socket).ok(); // ignore errors here + Ok(()) + } } /// Connection write status. #[derive(PartialEq, Eq)] pub enum WriteStatus { - /// Some data is still pending for current packet - Ongoing, - /// All data sent. - Complete + /// Some data is still pending for current packet + Ongoing, + /// All data sent. + Complete, } /// `RLPx` packet pub struct Packet { - pub protocol: u16, - pub data: Bytes, + pub protocol: u16, + pub data: Bytes, } /// Encrypted connection receiving state. enum EncryptedConnectionState { - /// Reading a header. - Header, - /// Reading the rest of the packet. - Payload, + /// Reading a header. + Header, + /// Reading the rest of the packet. + Payload, } /// Connection implementing `RLPx` framing /// https://github.com/ethereum/devp2p/blob/master/rlpx.md#framing pub struct EncryptedConnection { - /// Underlying tcp connection - pub connection: Connection, - /// Egress data encryptor - encoder: CtrMode, - /// Ingress data decryptor - decoder: CtrMode, - /// Ingress data decryptor - mac_encoder: EcbEncryptor>, - /// MAC for egress data - egress_mac: Keccak, - /// MAC for ingress data - ingress_mac: Keccak, - /// Read state - read_state: EncryptedConnectionState, - /// Protocol id for the last received packet - protocol_id: u16, - /// Payload expected to be received for the last header. - payload_len: usize, + /// Underlying tcp connection + pub connection: Connection, + /// Egress data encryptor + encoder: CtrMode, + /// Ingress data decryptor + decoder: CtrMode, + /// Ingress data decryptor + mac_encoder: EcbEncryptor>, + /// MAC for egress data + egress_mac: Keccak, + /// MAC for ingress data + ingress_mac: Keccak, + /// Read state + read_state: EncryptedConnectionState, + /// Protocol id for the last received packet + protocol_id: u16, + /// Payload expected to be received for the last header. + payload_len: usize, } impl EncryptedConnection { - /// Create an encrypted connection out of the handshake. - pub fn new(handshake: &mut Handshake) -> Result { - let shared = crypto::ecdh::agree(handshake.ecdhe.secret(), &handshake.remote_ephemeral)?; - let mut nonce_material = H512::new(); - if handshake.originated { - handshake.remote_nonce.copy_to(&mut nonce_material[0..32]); - handshake.nonce.copy_to(&mut nonce_material[32..64]); - } - else { - handshake.nonce.copy_to(&mut nonce_material[0..32]); - handshake.remote_nonce.copy_to(&mut nonce_material[32..64]); - } - let mut key_material = H512::new(); - shared.copy_to(&mut key_material[0..32]); - write_keccak(&nonce_material, &mut key_material[32..64]); - keccak(&key_material).copy_to(&mut key_material[32..64]); - keccak(&key_material).copy_to(&mut key_material[32..64]); + /// Create an encrypted connection out of the handshake. + pub fn new(handshake: &mut Handshake) -> Result { + let shared = crypto::ecdh::agree(handshake.ecdhe.secret(), &handshake.remote_ephemeral)?; + let mut nonce_material = H512::new(); + if handshake.originated { + handshake.remote_nonce.copy_to(&mut nonce_material[0..32]); + handshake.nonce.copy_to(&mut nonce_material[32..64]); + } else { + handshake.nonce.copy_to(&mut nonce_material[0..32]); + handshake.remote_nonce.copy_to(&mut nonce_material[32..64]); + } + let mut key_material = H512::new(); + shared.copy_to(&mut key_material[0..32]); + write_keccak(&nonce_material, &mut key_material[32..64]); + keccak(&key_material).copy_to(&mut key_material[32..64]); + keccak(&key_material).copy_to(&mut key_material[32..64]); - let iv = vec![0u8; 16]; - let encoder = CtrMode::new(AesSafe256Encryptor::new(&key_material[32..64]), iv); - let iv = vec![0u8; 16]; - let decoder = CtrMode::new(AesSafe256Encryptor::new(&key_material[32..64]), iv); + let iv = vec![0u8; 16]; + let encoder = CtrMode::new(AesSafe256Encryptor::new(&key_material[32..64]), iv); + let iv = vec![0u8; 16]; + let decoder = CtrMode::new(AesSafe256Encryptor::new(&key_material[32..64]), iv); - keccak(&key_material).copy_to(&mut key_material[32..64]); - let mac_encoder = EcbEncryptor::new(AesSafe256Encryptor::new(&key_material[32..64]), NoPadding); + keccak(&key_material).copy_to(&mut key_material[32..64]); + let mac_encoder = + EcbEncryptor::new(AesSafe256Encryptor::new(&key_material[32..64]), NoPadding); - let mut egress_mac = Keccak::new_keccak256(); - let mut mac_material = H256::from_slice(&key_material[32..64]) ^ handshake.remote_nonce; - egress_mac.update(&mac_material); - egress_mac.update(if handshake.originated { &handshake.auth_cipher } else { &handshake.ack_cipher }); + let mut egress_mac = Keccak::new_keccak256(); + let mut mac_material = H256::from_slice(&key_material[32..64]) ^ handshake.remote_nonce; + egress_mac.update(&mac_material); + egress_mac.update(if handshake.originated { + &handshake.auth_cipher + } else { + &handshake.ack_cipher + }); - let mut ingress_mac = Keccak::new_keccak256(); - mac_material = H256::from_slice(&key_material[32..64]) ^ handshake.nonce; - ingress_mac.update(&mac_material); - ingress_mac.update(if handshake.originated { &handshake.ack_cipher } else { &handshake.auth_cipher }); + let mut ingress_mac = Keccak::new_keccak256(); + mac_material = H256::from_slice(&key_material[32..64]) ^ handshake.nonce; + ingress_mac.update(&mac_material); + ingress_mac.update(if handshake.originated { + &handshake.ack_cipher + } else { + &handshake.auth_cipher + }); - let old_connection = handshake.connection.try_clone()?; - let connection = ::std::mem::replace(&mut handshake.connection, old_connection); - let mut enc = EncryptedConnection { - connection, - encoder, - decoder, - mac_encoder, - egress_mac, - ingress_mac, - read_state: EncryptedConnectionState::Header, - protocol_id: 0, - payload_len: 0, - }; - enc.connection.expect(ENCRYPTED_HEADER_LEN); - Ok(enc) - } + let old_connection = handshake.connection.try_clone()?; + let connection = ::std::mem::replace(&mut handshake.connection, old_connection); + let mut enc = EncryptedConnection { + connection, + encoder, + decoder, + mac_encoder, + egress_mac, + ingress_mac, + read_state: EncryptedConnectionState::Header, + protocol_id: 0, + payload_len: 0, + }; + enc.connection.expect(ENCRYPTED_HEADER_LEN); + Ok(enc) + } - /// Send a packet - pub fn send_packet(&mut self, io: &IoContext, payload: &[u8]) -> Result<(), Error> where Message: Send + Clone + Sync + 'static { - let mut header = RlpStream::new(); - let len = payload.len(); - if len > MAX_PAYLOAD_SIZE { - bail!(ErrorKind::OversizedPacket); - } - header.append_raw(&[(len >> 16) as u8, (len >> 8) as u8, len as u8], 1); - header.append_raw(&[0xc2u8, 0x80u8, 0x80u8], 1); - //TODO: get rid of vectors here - let mut header = header.out(); - let padding = (16 - (payload.len() % 16)) % 16; - header.resize(16, 0u8); + /// Send a packet + pub fn send_packet( + &mut self, + io: &IoContext, + payload: &[u8], + ) -> Result<(), Error> + where + Message: Send + Clone + Sync + 'static, + { + let mut header = RlpStream::new(); + let len = payload.len(); + if len > MAX_PAYLOAD_SIZE { + bail!(ErrorKind::OversizedPacket); + } + header.append_raw(&[(len >> 16) as u8, (len >> 8) as u8, len as u8], 1); + header.append_raw(&[0xc2u8, 0x80u8, 0x80u8], 1); + //TODO: get rid of vectors here + let mut header = header.out(); + let padding = (16 - (payload.len() % 16)) % 16; + header.resize(16, 0u8); - let mut packet = vec![0u8; 32 + payload.len() + padding + 16]; - self.encoder.encrypt(&mut RefReadBuffer::new(&header), &mut RefWriteBuffer::new(&mut packet), false).expect("Invalid length or padding"); - EncryptedConnection::update_mac(&mut self.egress_mac, &mut self.mac_encoder, &packet[0..16]); - self.egress_mac.clone().finalize(&mut packet[16..32]); - self.encoder.encrypt(&mut RefReadBuffer::new(payload), &mut RefWriteBuffer::new(&mut packet[32..(32 + len)]), padding == 0).expect("Invalid length or padding"); - if padding != 0 { - let pad = [0u8; 16]; - self.encoder.encrypt(&mut RefReadBuffer::new(&pad[0..padding]), &mut RefWriteBuffer::new(&mut packet[(32 + len)..(32 + len + padding)]), true).expect("Invalid length or padding"); - } - self.egress_mac.update(&packet[32..(32 + len + padding)]); - EncryptedConnection::update_mac(&mut self.egress_mac, &mut self.mac_encoder, &[0u8; 0]); - self.egress_mac.clone().finalize(&mut packet[(32 + len + padding)..]); - self.connection.send(io, packet); - Ok(()) - } + let mut packet = vec![0u8; 32 + payload.len() + padding + 16]; + self.encoder + .encrypt( + &mut RefReadBuffer::new(&header), + &mut RefWriteBuffer::new(&mut packet), + false, + ) + .expect("Invalid length or padding"); + EncryptedConnection::update_mac( + &mut self.egress_mac, + &mut self.mac_encoder, + &packet[0..16], + ); + self.egress_mac.clone().finalize(&mut packet[16..32]); + self.encoder + .encrypt( + &mut RefReadBuffer::new(payload), + &mut RefWriteBuffer::new(&mut packet[32..(32 + len)]), + padding == 0, + ) + .expect("Invalid length or padding"); + if padding != 0 { + let pad = [0u8; 16]; + self.encoder + .encrypt( + &mut RefReadBuffer::new(&pad[0..padding]), + &mut RefWriteBuffer::new(&mut packet[(32 + len)..(32 + len + padding)]), + true, + ) + .expect("Invalid length or padding"); + } + self.egress_mac.update(&packet[32..(32 + len + padding)]); + EncryptedConnection::update_mac(&mut self.egress_mac, &mut self.mac_encoder, &[0u8; 0]); + self.egress_mac + .clone() + .finalize(&mut packet[(32 + len + padding)..]); + self.connection.send(io, packet); + Ok(()) + } - /// Decrypt and authenticate an incoming packet header. Prepare for receiving payload. - fn read_header(&mut self, header: &[u8]) -> Result<(), Error> { - if header.len() != ENCRYPTED_HEADER_LEN { - return Err(ErrorKind::Auth.into()); - } - EncryptedConnection::update_mac(&mut self.ingress_mac, &mut self.mac_encoder, &header[0..16]); - let mac = &header[16..]; - let mut expected = H256::new(); - self.ingress_mac.clone().finalize(&mut expected); - if mac != &expected[0..16] { - return Err(ErrorKind::Auth.into()); - } + /// Decrypt and authenticate an incoming packet header. Prepare for receiving payload. + fn read_header(&mut self, header: &[u8]) -> Result<(), Error> { + if header.len() != ENCRYPTED_HEADER_LEN { + return Err(ErrorKind::Auth.into()); + } + EncryptedConnection::update_mac( + &mut self.ingress_mac, + &mut self.mac_encoder, + &header[0..16], + ); + let mac = &header[16..]; + let mut expected = H256::new(); + self.ingress_mac.clone().finalize(&mut expected); + if mac != &expected[0..16] { + return Err(ErrorKind::Auth.into()); + } - let mut hdec = H128::new(); - self.decoder.decrypt(&mut RefReadBuffer::new(&header[0..16]), &mut RefWriteBuffer::new(&mut hdec), false).expect("Invalid length or padding"); + let mut hdec = H128::new(); + self.decoder + .decrypt( + &mut RefReadBuffer::new(&header[0..16]), + &mut RefWriteBuffer::new(&mut hdec), + false, + ) + .expect("Invalid length or padding"); - let length = ((((hdec[0] as u32) << 8) + (hdec[1] as u32)) << 8) + (hdec[2] as u32); - let header_rlp = Rlp::new(&hdec[3..6]); - let protocol_id = header_rlp.val_at::(0)?; + let length = ((((hdec[0] as u32) << 8) + (hdec[1] as u32)) << 8) + (hdec[2] as u32); + let header_rlp = Rlp::new(&hdec[3..6]); + let protocol_id = header_rlp.val_at::(0)?; - self.payload_len = length as usize; - self.protocol_id = protocol_id; - self.read_state = EncryptedConnectionState::Payload; + self.payload_len = length as usize; + self.protocol_id = protocol_id; + self.read_state = EncryptedConnectionState::Payload; - let padding = (16 - (length % 16)) % 16; - let full_length = length + padding + 16; - self.connection.expect(full_length as usize); - Ok(()) - } + let padding = (16 - (length % 16)) % 16; + let full_length = length + padding + 16; + self.connection.expect(full_length as usize); + Ok(()) + } - /// Decrypt and authenticate packet payload. - fn read_payload(&mut self, payload: &[u8]) -> Result { - let padding = (16 - (self.payload_len % 16)) % 16; - let full_length = self.payload_len + padding + 16; - if payload.len() != full_length { - return Err(ErrorKind::Auth.into()); - } - self.ingress_mac.update(&payload[0..payload.len() - 16]); - EncryptedConnection::update_mac(&mut self.ingress_mac, &mut self.mac_encoder, &[0u8; 0]); - let mac = &payload[(payload.len() - 16)..]; - let mut expected = H128::new(); - self.ingress_mac.clone().finalize(&mut expected); - if mac != &expected[..] { - return Err(ErrorKind::Auth.into()); - } + /// Decrypt and authenticate packet payload. + fn read_payload(&mut self, payload: &[u8]) -> Result { + let padding = (16 - (self.payload_len % 16)) % 16; + let full_length = self.payload_len + padding + 16; + if payload.len() != full_length { + return Err(ErrorKind::Auth.into()); + } + self.ingress_mac.update(&payload[0..payload.len() - 16]); + EncryptedConnection::update_mac(&mut self.ingress_mac, &mut self.mac_encoder, &[0u8; 0]); + let mac = &payload[(payload.len() - 16)..]; + let mut expected = H128::new(); + self.ingress_mac.clone().finalize(&mut expected); + if mac != &expected[..] { + return Err(ErrorKind::Auth.into()); + } - let mut packet = vec![0u8; self.payload_len]; - self.decoder.decrypt(&mut RefReadBuffer::new(&payload[0..self.payload_len]), &mut RefWriteBuffer::new(&mut packet), false).expect("Invalid length or padding"); - let mut pad_buf = [0u8; 16]; - self.decoder.decrypt(&mut RefReadBuffer::new(&payload[self.payload_len..(payload.len() - 16)]), &mut RefWriteBuffer::new(&mut pad_buf), false).expect("Invalid length or padding"); - Ok(Packet { - protocol: self.protocol_id, - data: packet - }) - } + let mut packet = vec![0u8; self.payload_len]; + self.decoder + .decrypt( + &mut RefReadBuffer::new(&payload[0..self.payload_len]), + &mut RefWriteBuffer::new(&mut packet), + false, + ) + .expect("Invalid length or padding"); + let mut pad_buf = [0u8; 16]; + self.decoder + .decrypt( + &mut RefReadBuffer::new(&payload[self.payload_len..(payload.len() - 16)]), + &mut RefWriteBuffer::new(&mut pad_buf), + false, + ) + .expect("Invalid length or padding"); + Ok(Packet { + protocol: self.protocol_id, + data: packet, + }) + } - /// Update MAC after reading or writing any data. - fn update_mac(mac: &mut Keccak, mac_encoder: &mut EcbEncryptor>, seed: &[u8]) { - let mut prev = H128::new(); - mac.clone().finalize(&mut prev); - let mut enc = H128::new(); - mac_encoder.encrypt(&mut RefReadBuffer::new(&prev), &mut RefWriteBuffer::new(&mut enc), true).expect("Error updating MAC"); - mac_encoder.reset(); + /// Update MAC after reading or writing any data. + fn update_mac( + mac: &mut Keccak, + mac_encoder: &mut EcbEncryptor>, + seed: &[u8], + ) { + let mut prev = H128::new(); + mac.clone().finalize(&mut prev); + let mut enc = H128::new(); + mac_encoder + .encrypt( + &mut RefReadBuffer::new(&prev), + &mut RefWriteBuffer::new(&mut enc), + true, + ) + .expect("Error updating MAC"); + mac_encoder.reset(); - enc = enc ^ if seed.is_empty() { prev } else { H128::from_slice(seed) }; - mac.update(&enc); - } + enc = enc + ^ if seed.is_empty() { + prev + } else { + H128::from_slice(seed) + }; + mac.update(&enc); + } - /// Readable IO handler. Tracker receive status and returns decoded packet if available. - pub fn readable(&mut self, io: &IoContext) -> Result, Error> where Message: Send + Clone + Sync + 'static { - io.clear_timer(self.connection.token)?; - if let EncryptedConnectionState::Header = self.read_state { - if let Some(data) = self.connection.readable()? { - self.read_header(&data)?; - io.register_timer(self.connection.token, RECEIVE_PAYLOAD)?; - } - }; - if let EncryptedConnectionState::Payload = self.read_state { - match self.connection.readable()? { - Some(data) => { - self.read_state = EncryptedConnectionState::Header; - self.connection.expect(ENCRYPTED_HEADER_LEN); - Ok(Some(self.read_payload(&data)?)) - }, - None => Ok(None) - } - } else { - Ok(None) - } - } + /// Readable IO handler. Tracker receive status and returns decoded packet if available. + pub fn readable(&mut self, io: &IoContext) -> Result, Error> + where + Message: Send + Clone + Sync + 'static, + { + io.clear_timer(self.connection.token)?; + if let EncryptedConnectionState::Header = self.read_state { + if let Some(data) = self.connection.readable()? { + self.read_header(&data)?; + io.register_timer(self.connection.token, RECEIVE_PAYLOAD)?; + } + }; + if let EncryptedConnectionState::Payload = self.read_state { + match self.connection.readable()? { + Some(data) => { + self.read_state = EncryptedConnectionState::Header; + self.connection.expect(ENCRYPTED_HEADER_LEN); + Ok(Some(self.read_payload(&data)?)) + } + None => Ok(None), + } + } else { + Ok(None) + } + } - /// Writable IO handler. Processes send queue. - pub fn writable(&mut self, io: &IoContext) -> Result<(), Error> where Message: Send + Clone + Sync + 'static { - self.connection.writable(io)?; - Ok(()) - } + /// Writable IO handler. Processes send queue. + pub fn writable(&mut self, io: &IoContext) -> Result<(), Error> + where + Message: Send + Clone + Sync + 'static, + { + self.connection.writable(io)?; + Ok(()) + } } #[test] pub fn test_encryption() { - use ethereum_types::{H256, H128}; - use std::str::FromStr; - let key = H256::from_str("2212767d793a7a3d66f869ae324dd11bd17044b82c9f463b8a541a4d089efec5").unwrap(); - let before = H128::from_str("12532abaec065082a3cf1da7d0136f15").unwrap(); - let before2 = H128::from_str("7e99f682356fdfbc6b67a9562787b18a").unwrap(); - let after = H128::from_str("89464c6b04e7c99e555c81d3f7266a05").unwrap(); - let after2 = H128::from_str("85c070030589ef9c7a2879b3a8489316").unwrap(); + use ethereum_types::{H128, H256}; + use std::str::FromStr; + let key = + H256::from_str("2212767d793a7a3d66f869ae324dd11bd17044b82c9f463b8a541a4d089efec5").unwrap(); + let before = H128::from_str("12532abaec065082a3cf1da7d0136f15").unwrap(); + let before2 = H128::from_str("7e99f682356fdfbc6b67a9562787b18a").unwrap(); + let after = H128::from_str("89464c6b04e7c99e555c81d3f7266a05").unwrap(); + let after2 = H128::from_str("85c070030589ef9c7a2879b3a8489316").unwrap(); - let mut got = H128::new(); + let mut got = H128::new(); - let mut encoder = EcbEncryptor::new(AesSafe256Encryptor::new(&key), NoPadding); - encoder.encrypt(&mut RefReadBuffer::new(&before), &mut RefWriteBuffer::new(&mut got), true).unwrap(); - encoder.reset(); - assert_eq!(got, after); - got = H128::new(); - encoder.encrypt(&mut RefReadBuffer::new(&before2), &mut RefWriteBuffer::new(&mut got), true).unwrap(); - encoder.reset(); - assert_eq!(got, after2); + let mut encoder = EcbEncryptor::new(AesSafe256Encryptor::new(&key), NoPadding); + encoder + .encrypt( + &mut RefReadBuffer::new(&before), + &mut RefWriteBuffer::new(&mut got), + true, + ) + .unwrap(); + encoder.reset(); + assert_eq!(got, after); + got = H128::new(); + encoder + .encrypt( + &mut RefReadBuffer::new(&before2), + &mut RefWriteBuffer::new(&mut got), + true, + ) + .unwrap(); + encoder.reset(); + assert_eq!(got, after2); } #[cfg(test)] mod tests { - use std::cmp; - use std::collections::VecDeque; - use std::io::{Read, Write, Cursor, ErrorKind, Result, Error}; - use std::sync::atomic::AtomicBool; + use std::{ + cmp, + collections::VecDeque, + io::{Cursor, Error, ErrorKind, Read, Result, Write}, + sync::atomic::AtomicBool, + }; - use mio::{Ready}; - use parity_bytes::Bytes; - use io::*; - use super::*; + use super::*; + use io::*; + use mio::Ready; + use parity_bytes::Bytes; - pub struct TestSocket { - pub read_buffer: Vec, - pub write_buffer: Vec, - pub cursor: usize, - pub buf_size: usize, - } + pub struct TestSocket { + pub read_buffer: Vec, + pub write_buffer: Vec, + pub cursor: usize, + pub buf_size: usize, + } - impl Default for TestSocket { - fn default() -> Self { - TestSocket::new() - } - } + impl Default for TestSocket { + fn default() -> Self { + TestSocket::new() + } + } - impl TestSocket { - pub fn new() -> Self { - TestSocket { - read_buffer: vec![], - write_buffer: vec![], - cursor: 0, - buf_size: 0, - } - } + impl TestSocket { + pub fn new() -> Self { + TestSocket { + read_buffer: vec![], + write_buffer: vec![], + cursor: 0, + buf_size: 0, + } + } - pub fn new_buf(buf_size: usize) -> TestSocket { - TestSocket { - read_buffer: vec![], - write_buffer: vec![], - cursor: 0, - buf_size, - } - } - } + pub fn new_buf(buf_size: usize) -> TestSocket { + TestSocket { + read_buffer: vec![], + write_buffer: vec![], + cursor: 0, + buf_size, + } + } + } - impl Read for TestSocket { - fn read(&mut self, buf: &mut [u8]) -> Result { - let end_position = cmp::min(self.read_buffer.len(), self.cursor+buf.len()); - if self.cursor > end_position { return Ok(0) } - let len = cmp::max(end_position - self.cursor, 0); - match len { - 0 => Ok(0), - _ => { - for i in self.cursor..end_position { - buf[i-self.cursor] = self.read_buffer[i]; - } - self.cursor = end_position; - Ok(len) - } - } - } - } + impl Read for TestSocket { + fn read(&mut self, buf: &mut [u8]) -> Result { + let end_position = cmp::min(self.read_buffer.len(), self.cursor + buf.len()); + if self.cursor > end_position { + return Ok(0); + } + let len = cmp::max(end_position - self.cursor, 0); + match len { + 0 => Ok(0), + _ => { + for i in self.cursor..end_position { + buf[i - self.cursor] = self.read_buffer[i]; + } + self.cursor = end_position; + Ok(len) + } + } + } + } - impl Write for TestSocket { - fn write(&mut self, buf: &[u8]) -> Result { - if self.buf_size == 0 || buf.len() < self.buf_size { - self.write_buffer.extend(buf.iter().cloned()); - Ok(buf.len()) - } - else { - self.write_buffer.extend(buf.iter().take(self.buf_size).cloned()); - Ok(self.buf_size) - } - } + impl Write for TestSocket { + fn write(&mut self, buf: &[u8]) -> Result { + if self.buf_size == 0 || buf.len() < self.buf_size { + self.write_buffer.extend(buf.iter().cloned()); + Ok(buf.len()) + } else { + self.write_buffer + .extend(buf.iter().take(self.buf_size).cloned()); + Ok(self.buf_size) + } + } - fn flush(&mut self) -> Result<()> { - unimplemented!(); - } - } + fn flush(&mut self) -> Result<()> { + unimplemented!(); + } + } - impl GenericSocket for TestSocket {} + impl GenericSocket for TestSocket {} - struct TestBrokenSocket { - error: String - } + struct TestBrokenSocket { + error: String, + } - impl Read for TestBrokenSocket { - fn read(&mut self, _: &mut [u8]) -> Result { - Err(Error::new(ErrorKind::Other, self.error.clone())) - } - } + impl Read for TestBrokenSocket { + fn read(&mut self, _: &mut [u8]) -> Result { + Err(Error::new(ErrorKind::Other, self.error.clone())) + } + } - impl Write for TestBrokenSocket { - fn write(&mut self, _: &[u8]) -> Result { - Err(Error::new(ErrorKind::Other, self.error.clone())) - } + impl Write for TestBrokenSocket { + fn write(&mut self, _: &[u8]) -> Result { + Err(Error::new(ErrorKind::Other, self.error.clone())) + } - fn flush(&mut self) -> Result<()> { - unimplemented!(); - } - } + fn flush(&mut self) -> Result<()> { + unimplemented!(); + } + } - impl GenericSocket for TestBrokenSocket {} + impl GenericSocket for TestBrokenSocket {} - type TestConnection = GenericConnection; + type TestConnection = GenericConnection; - impl Default for TestConnection { - fn default() -> Self { - TestConnection::new() - } - } + impl Default for TestConnection { + fn default() -> Self { + TestConnection::new() + } + } - impl TestConnection { - pub fn new() -> Self { - TestConnection { - token: 999998888usize, - socket: TestSocket::new(), - send_queue: VecDeque::new(), - rec_buf: Bytes::new(), - rec_size: 0, - interest: Ready::hup() | Ready::readable(), - registered: AtomicBool::new(false), - } - } - } + impl TestConnection { + pub fn new() -> Self { + TestConnection { + token: 999998888usize, + socket: TestSocket::new(), + send_queue: VecDeque::new(), + rec_buf: Bytes::new(), + rec_size: 0, + interest: Ready::hup() | Ready::readable(), + registered: AtomicBool::new(false), + } + } + } - type TestBrokenConnection = GenericConnection; + type TestBrokenConnection = GenericConnection; - impl Default for TestBrokenConnection { - fn default() -> Self { - TestBrokenConnection::new() - } - } + impl Default for TestBrokenConnection { + fn default() -> Self { + TestBrokenConnection::new() + } + } - impl TestBrokenConnection { - pub fn new() -> Self { - TestBrokenConnection { - token: 999998888usize, - socket: TestBrokenSocket { error: "test broken socket".to_owned() }, - send_queue: VecDeque::new(), - rec_buf: Bytes::new(), - rec_size: 0, - interest: Ready::hup() | Ready::readable(), - registered: AtomicBool::new(false), - } - } - } + impl TestBrokenConnection { + pub fn new() -> Self { + TestBrokenConnection { + token: 999998888usize, + socket: TestBrokenSocket { + error: "test broken socket".to_owned(), + }, + send_queue: VecDeque::new(), + rec_buf: Bytes::new(), + rec_size: 0, + interest: Ready::hup() | Ready::readable(), + registered: AtomicBool::new(false), + } + } + } - fn test_io() -> IoContext { - IoContext::new(IoChannel::disconnected(), 0) - } + fn test_io() -> IoContext { + IoContext::new(IoChannel::disconnected(), 0) + } - #[test] - fn connection_expect() { - let mut connection = TestConnection::new(); - connection.expect(1024); - assert_eq!(1024, connection.rec_size); - } + #[test] + fn connection_expect() { + let mut connection = TestConnection::new(); + connection.expect(1024); + assert_eq!(1024, connection.rec_size); + } - #[test] - fn connection_write_empty() { - let mut connection = TestConnection::new(); - let status = connection.writable(&test_io()); - assert!(status.is_ok()); - assert!(WriteStatus::Complete == status.unwrap()); - } + #[test] + fn connection_write_empty() { + let mut connection = TestConnection::new(); + let status = connection.writable(&test_io()); + assert!(status.is_ok()); + assert!(WriteStatus::Complete == status.unwrap()); + } - #[test] - fn connection_write() { - let mut connection = TestConnection::new(); - let data = Cursor::new(vec![0; 10240]); - connection.send_queue.push_back(data); + #[test] + fn connection_write() { + let mut connection = TestConnection::new(); + let data = Cursor::new(vec![0; 10240]); + connection.send_queue.push_back(data); - let status = connection.writable(&test_io()); - assert!(status.is_ok()); - assert!(WriteStatus::Complete == status.unwrap()); - assert_eq!(10240, connection.socket.write_buffer.len()); - } + let status = connection.writable(&test_io()); + assert!(status.is_ok()); + assert!(WriteStatus::Complete == status.unwrap()); + assert_eq!(10240, connection.socket.write_buffer.len()); + } - #[test] - fn connection_write_is_buffered() { - let mut connection = TestConnection::new(); - connection.socket = TestSocket::new_buf(1024); - let data = Cursor::new(vec![0; 10240]); - connection.send_queue.push_back(data); + #[test] + fn connection_write_is_buffered() { + let mut connection = TestConnection::new(); + connection.socket = TestSocket::new_buf(1024); + let data = Cursor::new(vec![0; 10240]); + connection.send_queue.push_back(data); - let status = connection.writable(&test_io()); + let status = connection.writable(&test_io()); - assert!(status.is_ok()); - assert!(WriteStatus::Ongoing == status.unwrap()); - assert_eq!(1024, connection.socket.write_buffer.len()); - } + assert!(status.is_ok()); + assert!(WriteStatus::Ongoing == status.unwrap()); + assert_eq!(1024, connection.socket.write_buffer.len()); + } - #[test] - fn connection_write_to_broken() { - let mut connection = TestBrokenConnection::new(); - let data = Cursor::new(vec![0; 10240]); - connection.send_queue.push_back(data); + #[test] + fn connection_write_to_broken() { + let mut connection = TestBrokenConnection::new(); + let data = Cursor::new(vec![0; 10240]); + connection.send_queue.push_back(data); - let status = connection.writable(&test_io()); + let status = connection.writable(&test_io()); - assert!(!status.is_ok()); - assert_eq!(1, connection.send_queue.len()); - } + assert!(!status.is_ok()); + assert_eq!(1, connection.send_queue.len()); + } - #[test] - fn connection_read() { - let mut connection = TestConnection::new(); - connection.rec_size = 2048; - connection.rec_buf = vec![10; 1024]; - connection.socket.read_buffer = vec![99; 2048]; + #[test] + fn connection_read() { + let mut connection = TestConnection::new(); + connection.rec_size = 2048; + connection.rec_buf = vec![10; 1024]; + connection.socket.read_buffer = vec![99; 2048]; - let status = connection.readable(); + let status = connection.readable(); - assert!(status.is_ok()); - assert_eq!(1024, connection.socket.cursor); - } + assert!(status.is_ok()); + assert_eq!(1024, connection.socket.cursor); + } - #[test] - fn connection_read_from_broken() { - let mut connection = TestBrokenConnection::new(); - connection.rec_size = 2048; + #[test] + fn connection_read_from_broken() { + let mut connection = TestBrokenConnection::new(); + connection.rec_size = 2048; - let status = connection.readable(); - assert!(!status.is_ok()); - assert_eq!(0, connection.rec_buf.len()); - } + let status = connection.readable(); + assert!(!status.is_ok()); + assert_eq!(0, connection.rec_buf.len()); + } - #[test] - fn connection_read_nothing() { - let mut connection = TestConnection::new(); - connection.rec_size = 2048; + #[test] + fn connection_read_nothing() { + let mut connection = TestConnection::new(); + connection.rec_size = 2048; - let status = connection.readable(); + let status = connection.readable(); - assert!(status.is_ok()); - assert_eq!(0, connection.rec_buf.len()); - } + assert!(status.is_ok()); + assert_eq!(0, connection.rec_buf.len()); + } - #[test] - fn connection_read_full() { - let mut connection = TestConnection::new(); - connection.rec_size = 1024; - connection.rec_buf = vec![76;1024]; + #[test] + fn connection_read_full() { + let mut connection = TestConnection::new(); + connection.rec_size = 1024; + connection.rec_buf = vec![76; 1024]; - let status = connection.readable(); + let status = connection.readable(); - assert!(status.is_ok()); - assert_eq!(0, connection.socket.cursor); - } + assert!(status.is_ok()); + assert_eq!(0, connection.socket.cursor); + } } diff --git a/util/network-devp2p/src/discovery.rs b/util/network-devp2p/src/discovery.rs index f18469e16..d31968150 100644 --- a/util/network-devp2p/src/discovery.rs +++ b/util/network-devp2p/src/discovery.rs @@ -14,28 +14,28 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use parity_bytes::Bytes; -use std::net::SocketAddr; -use std::collections::{HashSet, HashMap, VecDeque}; -use std::collections::hash_map::Entry; -use std::default::Default; -use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; -use lru_cache::LruCache; -use hash::keccak; use ethereum_types::{H256, H520}; -use rlp::{Rlp, RlpStream}; +use ethkey::{recover, sign, KeyPair, Secret}; +use hash::keccak; +use lru_cache::LruCache; +use network::{Error, ErrorKind, IpFilter}; use node_table::*; -use network::{Error, ErrorKind}; -use ethkey::{Secret, KeyPair, sign, recover}; -use network::IpFilter; +use parity_bytes::Bytes; +use rlp::{Rlp, RlpStream}; +use std::{ + collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, + default::Default, + net::SocketAddr, + time::{Duration, Instant, SystemTime, UNIX_EPOCH}, +}; use PROTOCOL_VERSION; -const ADDRESS_BYTES_SIZE: usize = 32; // Size of address type in bytes. -const ADDRESS_BITS: usize = 8 * ADDRESS_BYTES_SIZE; // Denoted by n in [Kademlia]. -const DISCOVERY_MAX_STEPS: u16 = 8; // Max iterations of discovery. (discover) -const BUCKET_SIZE: usize = 16; // Denoted by k in [Kademlia]. Number of nodes stored in each bucket. -const ALPHA: usize = 3; // Denoted by \alpha in [Kademlia]. Number of concurrent FindNode requests. +const ADDRESS_BYTES_SIZE: usize = 32; // Size of address type in bytes. +const ADDRESS_BITS: usize = 8 * ADDRESS_BYTES_SIZE; // Denoted by n in [Kademlia]. +const DISCOVERY_MAX_STEPS: u16 = 8; // Max iterations of discovery. (discover) +const BUCKET_SIZE: usize = 16; // Denoted by k in [Kademlia]. Number of nodes stored in each bucket. +const ALPHA: usize = 3; // Denoted by \alpha in [Kademlia]. Number of concurrent FindNode requests. pub const MAX_DATAGRAM_SIZE: usize = 1280; const PACKET_PING: u8 = 1; @@ -48,209 +48,226 @@ const FIND_NODE_TIMEOUT: Duration = Duration::from_secs(2); const EXPIRY_TIME: Duration = Duration::from_secs(20); const MAX_NODES_PING: usize = 32; // Max nodes to add/ping at once const REQUEST_BACKOFF: [Duration; 4] = [ - Duration::from_secs(1), - Duration::from_secs(4), - Duration::from_secs(16), - Duration::from_secs(64) + Duration::from_secs(1), + Duration::from_secs(4), + Duration::from_secs(16), + Duration::from_secs(64), ]; -const NODE_LAST_SEEN_TIMEOUT: Duration = Duration::from_secs(24*60*60); +const NODE_LAST_SEEN_TIMEOUT: Duration = Duration::from_secs(24 * 60 * 60); const OBSERVED_NODES_MAX_SIZE: usize = 10_000; #[derive(Clone, Debug)] pub struct NodeEntry { - pub id: NodeId, - pub endpoint: NodeEndpoint, + pub id: NodeId, + pub endpoint: NodeEndpoint, } #[derive(Debug)] pub struct BucketEntry { - pub address: NodeEntry, - pub id_hash: H256, - pub last_seen: Instant, - backoff_until: Instant, - fail_count: usize, + pub address: NodeEntry, + pub id_hash: H256, + pub last_seen: Instant, + backoff_until: Instant, + fail_count: usize, } impl BucketEntry { - fn new(address: NodeEntry) -> Self { - let now = Instant::now(); - BucketEntry { - id_hash: keccak(address.id), - address, - last_seen: now, - backoff_until: now, - fail_count: 0, - } - } + fn new(address: NodeEntry) -> Self { + let now = Instant::now(); + BucketEntry { + id_hash: keccak(address.id), + address, + last_seen: now, + backoff_until: now, + fail_count: 0, + } + } } struct FindNodeRequest { - // Time when the request was sent - sent_at: Instant, - // Number of items sent by the node - response_count: usize, - // Whether the request have been answered yet - answered: bool, + // Time when the request was sent + sent_at: Instant, + // Number of items sent by the node + response_count: usize, + // Whether the request have been answered yet + answered: bool, } #[derive(Clone, Copy)] enum PingReason { - Default, - FromDiscoveryRequest(NodeId, NodeValidity), + Default, + FromDiscoveryRequest(NodeId, NodeValidity), } #[derive(Clone, Copy, PartialEq)] enum NodeCategory { - Bucket, - Observed + Bucket, + Observed, } #[derive(Clone, Copy, PartialEq)] enum NodeValidity { - Ourselves, - ValidNode(NodeCategory), - ExpiredNode(NodeCategory), - UnknownNode + Ourselves, + ValidNode(NodeCategory), + ExpiredNode(NodeCategory), + UnknownNode, } #[derive(Debug)] enum BucketError { - Ourselves, - NotInTheBucket{node_entry: NodeEntry, bucket_distance: usize}, + Ourselves, + NotInTheBucket { + node_entry: NodeEntry, + bucket_distance: usize, + }, } struct PingRequest { - // Time when the request was sent - sent_at: Instant, - // The node to which the request was sent - node: NodeEntry, - // The hash sent in the Ping request - echo_hash: H256, - // The hash Parity used to respond with (until rev 01f825b0e1f1c4c420197b51fc801cbe89284b29) - #[deprecated()] - deprecated_echo_hash: H256, - reason: PingReason + // Time when the request was sent + sent_at: Instant, + // The node to which the request was sent + node: NodeEntry, + // The hash sent in the Ping request + echo_hash: H256, + // The hash Parity used to respond with (until rev 01f825b0e1f1c4c420197b51fc801cbe89284b29) + #[deprecated()] + deprecated_echo_hash: H256, + reason: PingReason, } #[derive(Debug)] pub struct NodeBucket { - nodes: VecDeque, //sorted by last active + nodes: VecDeque, //sorted by last active } impl Default for NodeBucket { - fn default() -> Self { - NodeBucket::new() - } + fn default() -> Self { + NodeBucket::new() + } } impl NodeBucket { - fn new() -> Self { - NodeBucket { - nodes: VecDeque::new() - } - } + fn new() -> Self { + NodeBucket { + nodes: VecDeque::new(), + } + } } pub struct Datagram { - pub payload: Bytes, - pub address: SocketAddr, + pub payload: Bytes, + pub address: SocketAddr, } pub struct Discovery<'a> { - id: NodeId, - id_hash: H256, - secret: Secret, - public_endpoint: NodeEndpoint, - discovery_initiated: bool, - discovery_round: Option, - discovery_id: NodeId, - discovery_nodes: HashSet, - node_buckets: Vec, - // Sometimes we don't want to add nodes to the NodeTable, but still want to - // keep track of them to avoid excessive pinging (happens when an unknown node sends - // a discovery request to us -- the node might be on a different net). - other_observed_nodes: LruCache, + id: NodeId, + id_hash: H256, + secret: Secret, + public_endpoint: NodeEndpoint, + discovery_initiated: bool, + discovery_round: Option, + discovery_id: NodeId, + discovery_nodes: HashSet, + node_buckets: Vec, + // Sometimes we don't want to add nodes to the NodeTable, but still want to + // keep track of them to avoid excessive pinging (happens when an unknown node sends + // a discovery request to us -- the node might be on a different net). + other_observed_nodes: LruCache, - in_flight_pings: HashMap, - in_flight_find_nodes: HashMap, - send_queue: VecDeque, - check_timestamps: bool, - adding_nodes: Vec, - ip_filter: IpFilter, - request_backoff: &'a [Duration], + in_flight_pings: HashMap, + in_flight_find_nodes: HashMap, + send_queue: VecDeque, + check_timestamps: bool, + adding_nodes: Vec, + ip_filter: IpFilter, + request_backoff: &'a [Duration], } pub struct TableUpdates { - pub added: HashMap, - pub removed: HashSet, + pub added: HashMap, + pub removed: HashSet, } impl<'a> Discovery<'a> { - pub fn new(key: &KeyPair, public: NodeEndpoint, ip_filter: IpFilter) -> Discovery<'static> { - Discovery { - id: *key.public(), - id_hash: keccak(key.public()), - secret: key.secret().clone(), - public_endpoint: public, - discovery_initiated: false, - discovery_round: None, - discovery_id: NodeId::new(), - discovery_nodes: HashSet::new(), - node_buckets: (0..ADDRESS_BITS).map(|_| NodeBucket::new()).collect(), - other_observed_nodes: LruCache::new(OBSERVED_NODES_MAX_SIZE), - in_flight_pings: HashMap::new(), - in_flight_find_nodes: HashMap::new(), - send_queue: VecDeque::new(), - check_timestamps: true, - adding_nodes: Vec::new(), - ip_filter, - request_backoff: &REQUEST_BACKOFF, - } - } + pub fn new(key: &KeyPair, public: NodeEndpoint, ip_filter: IpFilter) -> Discovery<'static> { + Discovery { + id: *key.public(), + id_hash: keccak(key.public()), + secret: key.secret().clone(), + public_endpoint: public, + discovery_initiated: false, + discovery_round: None, + discovery_id: NodeId::new(), + discovery_nodes: HashSet::new(), + node_buckets: (0..ADDRESS_BITS).map(|_| NodeBucket::new()).collect(), + other_observed_nodes: LruCache::new(OBSERVED_NODES_MAX_SIZE), + in_flight_pings: HashMap::new(), + in_flight_find_nodes: HashMap::new(), + send_queue: VecDeque::new(), + check_timestamps: true, + adding_nodes: Vec::new(), + ip_filter, + request_backoff: &REQUEST_BACKOFF, + } + } - /// Add a new node to discovery table. Pings the node. - pub fn add_node(&mut self, e: NodeEntry) { - // If distance returns None, then we are trying to add ourself. - let id_hash = keccak(e.id); - if let Some(dist) = Discovery::distance(&self.id_hash, &id_hash) { - if self.node_buckets[dist].nodes.iter().any(|n| n.id_hash == id_hash) { - return; - } - self.try_ping(e, PingReason::Default); - } - } + /// Add a new node to discovery table. Pings the node. + pub fn add_node(&mut self, e: NodeEntry) { + // If distance returns None, then we are trying to add ourself. + let id_hash = keccak(e.id); + if let Some(dist) = Discovery::distance(&self.id_hash, &id_hash) { + if self.node_buckets[dist] + .nodes + .iter() + .any(|n| n.id_hash == id_hash) + { + return; + } + self.try_ping(e, PingReason::Default); + } + } - /// Add a list of nodes. Pings a few nodes each round - pub fn add_node_list(&mut self, nodes: Vec) { - for node in nodes { - self.add_node(node); - } - } + /// Add a list of nodes. Pings a few nodes each round + pub fn add_node_list(&mut self, nodes: Vec) { + for node in nodes { + self.add_node(node); + } + } - fn update_bucket_record(&mut self, e: NodeEntry) -> Result<(), BucketError> { - let id_hash = keccak(e.id); - let dist = match Discovery::distance(&self.id_hash, &id_hash) { - Some(dist) => dist, - None => { - debug!(target: "discovery", "Attempted to update own entry: {:?}", e); - return Err(BucketError::Ourselves); - } - }; - let bucket = &mut self.node_buckets[dist]; - bucket.nodes.iter_mut().find(|n| n.address.id == e.id) - .map_or(Err(BucketError::NotInTheBucket{node_entry: e.clone(), bucket_distance: dist}.into()), |entry| { - entry.address = e; - entry.last_seen = Instant::now(); - entry.backoff_until = Instant::now(); - entry.fail_count = 0; - Ok(()) - }) - } + fn update_bucket_record(&mut self, e: NodeEntry) -> Result<(), BucketError> { + let id_hash = keccak(e.id); + let dist = match Discovery::distance(&self.id_hash, &id_hash) { + Some(dist) => dist, + None => { + debug!(target: "discovery", "Attempted to update own entry: {:?}", e); + return Err(BucketError::Ourselves); + } + }; + let bucket = &mut self.node_buckets[dist]; + bucket + .nodes + .iter_mut() + .find(|n| n.address.id == e.id) + .map_or( + Err(BucketError::NotInTheBucket { + node_entry: e.clone(), + bucket_distance: dist, + } + .into()), + |entry| { + entry.address = e; + entry.last_seen = Instant::now(); + entry.backoff_until = Instant::now(); + entry.fail_count = 0; + Ok(()) + }, + ) + } - fn update_node(&mut self, e: NodeEntry) -> Option { - trace!(target: "discovery", "Inserting {:?}", &e); + fn update_node(&mut self, e: NodeEntry) -> Option { + trace!(target: "discovery", "Inserting {:?}", &e); match self.update_bucket_record(e) { Ok(()) => None, @@ -281,489 +298,550 @@ impl<'a> Discovery<'a> { None } }) - } + } - /// Starts the discovery process at round 0 - fn start(&mut self) { - trace!(target: "discovery", "Starting discovery"); - self.discovery_round = Some(0); - self.discovery_id.randomize(); //TODO: use cryptographic nonce - self.discovery_nodes.clear(); - } + /// Starts the discovery process at round 0 + fn start(&mut self) { + trace!(target: "discovery", "Starting discovery"); + self.discovery_round = Some(0); + self.discovery_id.randomize(); //TODO: use cryptographic nonce + self.discovery_nodes.clear(); + } - /// Complete the discovery process - fn stop(&mut self) { - trace!(target: "discovery", "Completing discovery"); - self.discovery_round = None; - self.discovery_nodes.clear(); - } + /// Complete the discovery process + fn stop(&mut self) { + trace!(target: "discovery", "Completing discovery"); + self.discovery_round = None; + self.discovery_nodes.clear(); + } - fn update_new_nodes(&mut self) { - while self.in_flight_pings.len() < MAX_NODES_PING { - match self.adding_nodes.pop() { - Some(next) => self.try_ping(next, PingReason::Default), - None => break, - } - } - } + fn update_new_nodes(&mut self) { + while self.in_flight_pings.len() < MAX_NODES_PING { + match self.adding_nodes.pop() { + Some(next) => self.try_ping(next, PingReason::Default), + None => break, + } + } + } - fn discover(&mut self) { - let discovery_round = match self.discovery_round { - Some(r) => r, - None => return, - }; - if discovery_round == DISCOVERY_MAX_STEPS { - self.stop(); - return; - } - trace!(target: "discovery", "Starting round {:?}", self.discovery_round); - let mut tried_count = 0; - { - let nearest = self.nearest_node_entries(&self.discovery_id).into_iter(); - let nearest = nearest.filter(|x| !self.discovery_nodes.contains(&x.id)).take(ALPHA).collect::>(); - let target = self.discovery_id; - for r in nearest { - match self.send_find_node(&r, &target) { - Ok(()) => { - self.discovery_nodes.insert(r.id); - tried_count += 1; - }, - Err(e) => { - warn!(target: "discovery", "Error sending node discovery packet for {:?}: {:?}", &r.endpoint, e); - }, - }; - } - } + fn discover(&mut self) { + let discovery_round = match self.discovery_round { + Some(r) => r, + None => return, + }; + if discovery_round == DISCOVERY_MAX_STEPS { + self.stop(); + return; + } + trace!(target: "discovery", "Starting round {:?}", self.discovery_round); + let mut tried_count = 0; + { + let nearest = self.nearest_node_entries(&self.discovery_id).into_iter(); + let nearest = nearest + .filter(|x| !self.discovery_nodes.contains(&x.id)) + .take(ALPHA) + .collect::>(); + let target = self.discovery_id; + for r in nearest { + match self.send_find_node(&r, &target) { + Ok(()) => { + self.discovery_nodes.insert(r.id); + tried_count += 1; + } + Err(e) => { + warn!(target: "discovery", "Error sending node discovery packet for {:?}: {:?}", &r.endpoint, e); + } + }; + } + } - if tried_count == 0 { - self.stop(); - return; - } - self.discovery_round = Some(discovery_round + 1); - } + if tried_count == 0 { + self.stop(); + return; + } + self.discovery_round = Some(discovery_round + 1); + } - /// The base 2 log of the distance between a and b using the XOR metric. - fn distance(a: &H256, b: &H256) -> Option { - for i in (0..ADDRESS_BYTES_SIZE).rev() { - let byte_index = ADDRESS_BYTES_SIZE - i - 1; - let d: u8 = a[byte_index] ^ b[byte_index]; - if d != 0 { - let high_bit_index = 7 - d.leading_zeros() as usize; - return Some(i * 8 + high_bit_index); - } - } - None // a and b are equal, so log distance is -inf - } + /// The base 2 log of the distance between a and b using the XOR metric. + fn distance(a: &H256, b: &H256) -> Option { + for i in (0..ADDRESS_BYTES_SIZE).rev() { + let byte_index = ADDRESS_BYTES_SIZE - i - 1; + let d: u8 = a[byte_index] ^ b[byte_index]; + if d != 0 { + let high_bit_index = 7 - d.leading_zeros() as usize; + return Some(i * 8 + high_bit_index); + } + } + None // a and b are equal, so log distance is -inf + } - fn try_ping(&mut self, node: NodeEntry, reason: PingReason) { - if !self.is_allowed(&node) { - trace!(target: "discovery", "Node {:?} not allowed", node); - return; - } - if self.in_flight_pings.contains_key(&node.id) || self.in_flight_find_nodes.contains_key(&node.id) { - trace!(target: "discovery", "Node {:?} in flight requests", node); - return; - } - if self.adding_nodes.iter().any(|n| n.id == node.id) { - trace!(target: "discovery", "Node {:?} in adding nodes", node); - return; - } + fn try_ping(&mut self, node: NodeEntry, reason: PingReason) { + if !self.is_allowed(&node) { + trace!(target: "discovery", "Node {:?} not allowed", node); + return; + } + if self.in_flight_pings.contains_key(&node.id) + || self.in_flight_find_nodes.contains_key(&node.id) + { + trace!(target: "discovery", "Node {:?} in flight requests", node); + return; + } + if self.adding_nodes.iter().any(|n| n.id == node.id) { + trace!(target: "discovery", "Node {:?} in adding nodes", node); + return; + } - if self.in_flight_pings.len() < MAX_NODES_PING { - self.ping(&node, reason) - .unwrap_or_else(|e| { - warn!(target: "discovery", "Error sending Ping packet: {:?}", e); - }); - } else { - self.adding_nodes.push(node); - } - } + if self.in_flight_pings.len() < MAX_NODES_PING { + self.ping(&node, reason).unwrap_or_else(|e| { + warn!(target: "discovery", "Error sending Ping packet: {:?}", e); + }); + } else { + self.adding_nodes.push(node); + } + } - fn ping(&mut self, node: &NodeEntry, reason: PingReason) -> Result<(), Error> { - let mut rlp = RlpStream::new_list(4); - rlp.append(&PROTOCOL_VERSION); - self.public_endpoint.to_rlp_list(&mut rlp); - node.endpoint.to_rlp_list(&mut rlp); - append_expiration(&mut rlp); - let old_parity_hash = keccak(rlp.as_raw()); - let hash = self.send_packet(PACKET_PING, &node.endpoint.udp_address(), &rlp.drain())?; + fn ping(&mut self, node: &NodeEntry, reason: PingReason) -> Result<(), Error> { + let mut rlp = RlpStream::new_list(4); + rlp.append(&PROTOCOL_VERSION); + self.public_endpoint.to_rlp_list(&mut rlp); + node.endpoint.to_rlp_list(&mut rlp); + append_expiration(&mut rlp); + let old_parity_hash = keccak(rlp.as_raw()); + let hash = self.send_packet(PACKET_PING, &node.endpoint.udp_address(), &rlp.drain())?; - self.in_flight_pings.insert(node.id, PingRequest { - sent_at: Instant::now(), - node: node.clone(), - echo_hash: hash, - deprecated_echo_hash: old_parity_hash, - reason: reason - }); + self.in_flight_pings.insert( + node.id, + PingRequest { + sent_at: Instant::now(), + node: node.clone(), + echo_hash: hash, + deprecated_echo_hash: old_parity_hash, + reason: reason, + }, + ); - trace!(target: "discovery", "Sent Ping to {:?} ; node_id={:#x}", &node.endpoint, node.id); - Ok(()) - } + trace!(target: "discovery", "Sent Ping to {:?} ; node_id={:#x}", &node.endpoint, node.id); + Ok(()) + } - fn send_find_node(&mut self, node: &NodeEntry, target: &NodeId) -> Result<(), Error> { - let mut rlp = RlpStream::new_list(2); - rlp.append(target); - append_expiration(&mut rlp); - self.send_packet(PACKET_FIND_NODE, &node.endpoint.udp_address(), &rlp.drain())?; + fn send_find_node(&mut self, node: &NodeEntry, target: &NodeId) -> Result<(), Error> { + let mut rlp = RlpStream::new_list(2); + rlp.append(target); + append_expiration(&mut rlp); + self.send_packet(PACKET_FIND_NODE, &node.endpoint.udp_address(), &rlp.drain())?; - self.in_flight_find_nodes.insert(node.id, FindNodeRequest { - sent_at: Instant::now(), - response_count: 0, - answered: false, - }); + self.in_flight_find_nodes.insert( + node.id, + FindNodeRequest { + sent_at: Instant::now(), + response_count: 0, + answered: false, + }, + ); - trace!(target: "discovery", "Sent FindNode to {:?}", &node.endpoint); - Ok(()) - } + trace!(target: "discovery", "Sent FindNode to {:?}", &node.endpoint); + Ok(()) + } - fn send_packet(&mut self, packet_id: u8, address: &SocketAddr, payload: &[u8]) -> Result { - let packet = assemble_packet(packet_id, payload, &self.secret)?; - let hash = H256::from(&packet[0..32]); - self.send_to(packet, address.clone()); - Ok(hash) - } + fn send_packet( + &mut self, + packet_id: u8, + address: &SocketAddr, + payload: &[u8], + ) -> Result { + let packet = assemble_packet(packet_id, payload, &self.secret)?; + let hash = H256::from(&packet[0..32]); + self.send_to(packet, address.clone()); + Ok(hash) + } - fn nearest_node_entries(&self, target: &NodeId) -> Vec { - let target_hash = keccak(target); - let target_distance = self.id_hash ^ target_hash; + fn nearest_node_entries(&self, target: &NodeId) -> Vec { + let target_hash = keccak(target); + let target_distance = self.id_hash ^ target_hash; - let mut ret = Vec::::with_capacity(BUCKET_SIZE); + let mut ret = Vec::::with_capacity(BUCKET_SIZE); - // Sort bucket entries by distance to target and append to end of result vector. - let append_bucket = |results: &mut Vec, bucket: &NodeBucket| -> bool { - let mut sorted_entries: Vec<&BucketEntry> = bucket.nodes.iter().collect(); - sorted_entries.sort_unstable_by_key(|entry| entry.id_hash ^ target_hash); + // Sort bucket entries by distance to target and append to end of result vector. + let append_bucket = |results: &mut Vec, bucket: &NodeBucket| -> bool { + let mut sorted_entries: Vec<&BucketEntry> = bucket.nodes.iter().collect(); + sorted_entries.sort_unstable_by_key(|entry| entry.id_hash ^ target_hash); - let remaining_capacity = results.capacity() - results.len(); - let to_append = if remaining_capacity < sorted_entries.len() { - &sorted_entries[0..remaining_capacity] - } else { - &sorted_entries - }; - for entry in to_append.iter() { - results.push(entry.address.clone()); - } - results.len() == results.capacity() - }; + let remaining_capacity = results.capacity() - results.len(); + let to_append = if remaining_capacity < sorted_entries.len() { + &sorted_entries[0..remaining_capacity] + } else { + &sorted_entries + }; + for entry in to_append.iter() { + results.push(entry.address.clone()); + } + results.len() == results.capacity() + }; - // This algorithm leverages the structure of the routing table to efficiently find the - // nearest entries to a target hash. First, we compute the XOR distance from this node to - // the target. On a first pass, we iterate from the MSB of the distance, stopping at any - // buckets where the distance bit is set, and skipping the buckets where it is unset. These - // must be in order the nearest to the target. On a second pass, we traverse from LSB to - // MSB, appending the buckets skipped on the first pass. The reason this works is that all - // entries in bucket i have a common prefix of length exactly 32 - i - 1 with the ID of this - // node. + // This algorithm leverages the structure of the routing table to efficiently find the + // nearest entries to a target hash. First, we compute the XOR distance from this node to + // the target. On a first pass, we iterate from the MSB of the distance, stopping at any + // buckets where the distance bit is set, and skipping the buckets where it is unset. These + // must be in order the nearest to the target. On a second pass, we traverse from LSB to + // MSB, appending the buckets skipped on the first pass. The reason this works is that all + // entries in bucket i have a common prefix of length exactly 32 - i - 1 with the ID of this + // node. - for i in 0..ADDRESS_BITS { - if ((target_distance[i / 8] << (i % 8)) & 0x80) != 0 { - let bucket = &self.node_buckets[ADDRESS_BITS - i - 1]; - if !bucket.nodes.is_empty() && append_bucket(&mut ret, bucket) { - return ret; - } - } - } - for i in (0..ADDRESS_BITS).rev() { - if ((target_distance[i / 8] << (i % 8)) & 0x80) == 0 { - let bucket = &self.node_buckets[ADDRESS_BITS - i - 1]; - if !bucket.nodes.is_empty() && append_bucket(&mut ret, bucket) { - return ret; - } - } - } - ret - } + for i in 0..ADDRESS_BITS { + if ((target_distance[i / 8] << (i % 8)) & 0x80) != 0 { + let bucket = &self.node_buckets[ADDRESS_BITS - i - 1]; + if !bucket.nodes.is_empty() && append_bucket(&mut ret, bucket) { + return ret; + } + } + } + for i in (0..ADDRESS_BITS).rev() { + if ((target_distance[i / 8] << (i % 8)) & 0x80) == 0 { + let bucket = &self.node_buckets[ADDRESS_BITS - i - 1]; + if !bucket.nodes.is_empty() && append_bucket(&mut ret, bucket) { + return ret; + } + } + } + ret + } - fn send_to(&mut self, payload: Bytes, address: SocketAddr) { - self.send_queue.push_back(Datagram { payload, address }); - } + fn send_to(&mut self, payload: Bytes, address: SocketAddr) { + self.send_queue.push_back(Datagram { payload, address }); + } - pub fn on_packet(&mut self, packet: &[u8], from: SocketAddr) -> Result, Error> { - // validate packet - if packet.len() < 32 + 65 + 4 + 1 { - return Err(ErrorKind::BadProtocol.into()); - } + pub fn on_packet( + &mut self, + packet: &[u8], + from: SocketAddr, + ) -> Result, Error> { + // validate packet + if packet.len() < 32 + 65 + 4 + 1 { + return Err(ErrorKind::BadProtocol.into()); + } - let hash_signed = keccak(&packet[32..]); - if hash_signed[..] != packet[0..32] { - return Err(ErrorKind::BadProtocol.into()); - } + let hash_signed = keccak(&packet[32..]); + if hash_signed[..] != packet[0..32] { + return Err(ErrorKind::BadProtocol.into()); + } - let signed = &packet[(32 + 65)..]; - let signature = H520::from_slice(&packet[32..(32 + 65)]); - let node_id = recover(&signature.into(), &keccak(signed))?; - let packet_id = signed[0]; - let rlp = Rlp::new(&signed[1..]); - match packet_id { - PACKET_PING => self.on_ping(&rlp, &node_id, &from, &hash_signed), - PACKET_PONG => self.on_pong(&rlp, &node_id, &from), - PACKET_FIND_NODE => self.on_find_node(&rlp, &node_id, &from), - PACKET_NEIGHBOURS => self.on_neighbours(&rlp, &node_id, &from), - _ => { - debug!(target: "discovery", "Unknown UDP packet: {}", packet_id); - Ok(None) - } - } - } + let signed = &packet[(32 + 65)..]; + let signature = H520::from_slice(&packet[32..(32 + 65)]); + let node_id = recover(&signature.into(), &keccak(signed))?; + let packet_id = signed[0]; + let rlp = Rlp::new(&signed[1..]); + match packet_id { + PACKET_PING => self.on_ping(&rlp, &node_id, &from, &hash_signed), + PACKET_PONG => self.on_pong(&rlp, &node_id, &from), + PACKET_FIND_NODE => self.on_find_node(&rlp, &node_id, &from), + PACKET_NEIGHBOURS => self.on_neighbours(&rlp, &node_id, &from), + _ => { + debug!(target: "discovery", "Unknown UDP packet: {}", packet_id); + Ok(None) + } + } + } - /// Validate that given timestamp is in within one second of now or in the future - fn check_timestamp(&self, timestamp: u64) -> Result<(), Error> { - let secs_since_epoch = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(); - if self.check_timestamps && timestamp < secs_since_epoch { - debug!(target: "discovery", "Expired packet"); - return Err(ErrorKind::Expired.into()); - } - Ok(()) - } + /// Validate that given timestamp is in within one second of now or in the future + fn check_timestamp(&self, timestamp: u64) -> Result<(), Error> { + let secs_since_epoch = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_secs(); + if self.check_timestamps && timestamp < secs_since_epoch { + debug!(target: "discovery", "Expired packet"); + return Err(ErrorKind::Expired.into()); + } + Ok(()) + } - fn is_allowed(&self, entry: &NodeEntry) -> bool { - entry.endpoint.is_allowed(&self.ip_filter) && entry.id != self.id - } + fn is_allowed(&self, entry: &NodeEntry) -> bool { + entry.endpoint.is_allowed(&self.ip_filter) && entry.id != self.id + } - fn on_ping(&mut self, rlp: &Rlp, node_id: &NodeId, from: &SocketAddr, echo_hash: &[u8]) -> Result, Error> { - trace!(target: "discovery", "Got Ping from {:?}", &from); - let ping_from = if let Ok(node_endpoint) = NodeEndpoint::from_rlp(&rlp.at(1)?) { - node_endpoint - } else { - let mut address = from.clone(); - // address here is the node's tcp port. If we are unable to get the `NodeEndpoint` from the `ping_from` - // rlp field then this is most likely a BootNode, set the tcp port to 0 because it can not be used for syncing. - address.set_port(0); - NodeEndpoint { - address, - udp_port: from.port() - } - }; - let ping_to = NodeEndpoint::from_rlp(&rlp.at(2)?)?; - let timestamp: u64 = rlp.val_at(3)?; - self.check_timestamp(timestamp)?; - let mut response = RlpStream::new_list(3); - let pong_to = NodeEndpoint { - address: from.clone(), - udp_port: ping_from.udp_port - }; - // Here the PONG's `To` field should be the node we are - // sending the request to - // WARNING: this field _should not be used_, but old Parity versions - // use it in order to get the node's address. - // So this is a temporary fix so that older Parity versions don't brake completely. - ping_to.to_rlp_list(&mut response); - // pong_to.to_rlp_list(&mut response); + fn on_ping( + &mut self, + rlp: &Rlp, + node_id: &NodeId, + from: &SocketAddr, + echo_hash: &[u8], + ) -> Result, Error> { + trace!(target: "discovery", "Got Ping from {:?}", &from); + let ping_from = if let Ok(node_endpoint) = NodeEndpoint::from_rlp(&rlp.at(1)?) { + node_endpoint + } else { + let mut address = from.clone(); + // address here is the node's tcp port. If we are unable to get the `NodeEndpoint` from the `ping_from` + // rlp field then this is most likely a BootNode, set the tcp port to 0 because it can not be used for syncing. + address.set_port(0); + NodeEndpoint { + address, + udp_port: from.port(), + } + }; + let ping_to = NodeEndpoint::from_rlp(&rlp.at(2)?)?; + let timestamp: u64 = rlp.val_at(3)?; + self.check_timestamp(timestamp)?; + let mut response = RlpStream::new_list(3); + let pong_to = NodeEndpoint { + address: from.clone(), + udp_port: ping_from.udp_port, + }; + // Here the PONG's `To` field should be the node we are + // sending the request to + // WARNING: this field _should not be used_, but old Parity versions + // use it in order to get the node's address. + // So this is a temporary fix so that older Parity versions don't brake completely. + ping_to.to_rlp_list(&mut response); + // pong_to.to_rlp_list(&mut response); - response.append(&echo_hash); - append_expiration(&mut response); - self.send_packet(PACKET_PONG, from, &response.drain())?; + response.append(&echo_hash); + append_expiration(&mut response); + self.send_packet(PACKET_PONG, from, &response.drain())?; - let entry = NodeEntry { id: *node_id, endpoint: pong_to.clone() }; - if !entry.endpoint.is_valid_discovery_node() { - debug!(target: "discovery", "Got bad address: {:?}", entry); - } else if !self.is_allowed(&entry) { - debug!(target: "discovery", "Address not allowed: {:?}", entry); - } else { - self.add_node(entry.clone()); - } - Ok(None) - } + let entry = NodeEntry { + id: *node_id, + endpoint: pong_to.clone(), + }; + if !entry.endpoint.is_valid_discovery_node() { + debug!(target: "discovery", "Got bad address: {:?}", entry); + } else if !self.is_allowed(&entry) { + debug!(target: "discovery", "Address not allowed: {:?}", entry); + } else { + self.add_node(entry.clone()); + } + Ok(None) + } - fn on_pong(&mut self, rlp: &Rlp, node_id: &NodeId, from: &SocketAddr) -> Result, Error> { - trace!(target: "discovery", "Got Pong from {:?} ; node_id={:#x}", &from, node_id); - let _pong_to = NodeEndpoint::from_rlp(&rlp.at(0)?)?; - let echo_hash: H256 = rlp.val_at(1)?; - let timestamp: u64 = rlp.val_at(2)?; - self.check_timestamp(timestamp)?; + fn on_pong( + &mut self, + rlp: &Rlp, + node_id: &NodeId, + from: &SocketAddr, + ) -> Result, Error> { + trace!(target: "discovery", "Got Pong from {:?} ; node_id={:#x}", &from, node_id); + let _pong_to = NodeEndpoint::from_rlp(&rlp.at(0)?)?; + let echo_hash: H256 = rlp.val_at(1)?; + let timestamp: u64 = rlp.val_at(2)?; + self.check_timestamp(timestamp)?; - let expected_node = match self.in_flight_pings.entry(*node_id) { - Entry::Occupied(entry) => { - let expected_node = { - let request = entry.get(); - if request.echo_hash != echo_hash && request.deprecated_echo_hash != echo_hash { - debug!(target: "discovery", "Got unexpected Pong from {:?} ; packet_hash={:#x} ; expected_hash={:#x}", &from, request.echo_hash, echo_hash); - None - } else { - if request.deprecated_echo_hash == echo_hash { - trace!(target: "discovery", "Got Pong from an old parity-ethereum version."); - } - Some((request.node.clone(), request.reason.clone())) - } - }; + let expected_node = match self.in_flight_pings.entry(*node_id) { + Entry::Occupied(entry) => { + let expected_node = { + let request = entry.get(); + if request.echo_hash != echo_hash && request.deprecated_echo_hash != echo_hash { + debug!(target: "discovery", "Got unexpected Pong from {:?} ; packet_hash={:#x} ; expected_hash={:#x}", &from, request.echo_hash, echo_hash); + None + } else { + if request.deprecated_echo_hash == echo_hash { + trace!(target: "discovery", "Got Pong from an old parity-ethereum version."); + } + Some((request.node.clone(), request.reason.clone())) + } + }; - if expected_node.is_some() { - entry.remove(); - } - expected_node - }, - Entry::Vacant(_) => { - None - }, - }; + if expected_node.is_some() { + entry.remove(); + } + expected_node + } + Entry::Vacant(_) => None, + }; - if let Some((node, ping_reason)) = expected_node { - if let PingReason::FromDiscoveryRequest(target, validity) = ping_reason { - self.respond_with_discovery(target, &node)?; - // kirushik: I would prefer to probe the network id of the remote node here, and add it to the nodes list if it's on "our" net -- - // but `on_packet` happens synchronously, so doing the full TCP handshake ceremony here is a bad idea. - // So instead we just LRU-caching most recently seen nodes to avoid unnecessary pinging - match validity { - NodeValidity::ValidNode(NodeCategory::Bucket) | NodeValidity::ExpiredNode(NodeCategory::Bucket) => { - trace!(target: "discovery", "Updating node {:?} in our Kad buckets", &node); - self.update_bucket_record(node).unwrap_or_else(|error| { + if let Some((node, ping_reason)) = expected_node { + if let PingReason::FromDiscoveryRequest(target, validity) = ping_reason { + self.respond_with_discovery(target, &node)?; + // kirushik: I would prefer to probe the network id of the remote node here, and add it to the nodes list if it's on "our" net -- + // but `on_packet` happens synchronously, so doing the full TCP handshake ceremony here is a bad idea. + // So instead we just LRU-caching most recently seen nodes to avoid unnecessary pinging + match validity { + NodeValidity::ValidNode(NodeCategory::Bucket) + | NodeValidity::ExpiredNode(NodeCategory::Bucket) => { + trace!(target: "discovery", "Updating node {:?} in our Kad buckets", &node); + self.update_bucket_record(node).unwrap_or_else(|error| { debug!(target: "discovery", "Error occured when processing ping from a bucket node: {:?}", &error); }); - }, - NodeValidity::UnknownNode | NodeValidity::ExpiredNode(NodeCategory::Observed) | NodeValidity::ValidNode(NodeCategory::Observed)=> { - trace!(target: "discovery", "Updating node {:?} in the list of other_observed_nodes", &node); - self.other_observed_nodes.insert(node.id, (node.endpoint, Instant::now())); - }, - NodeValidity::Ourselves => (), - } - Ok(None) - } else { - Ok(self.update_node(node)) - } - } else { - debug!(target: "discovery", "Got unexpected Pong from {:?} ; request not found", &from); - Ok(None) - } - } + } + NodeValidity::UnknownNode + | NodeValidity::ExpiredNode(NodeCategory::Observed) + | NodeValidity::ValidNode(NodeCategory::Observed) => { + trace!(target: "discovery", "Updating node {:?} in the list of other_observed_nodes", &node); + self.other_observed_nodes + .insert(node.id, (node.endpoint, Instant::now())); + } + NodeValidity::Ourselves => (), + } + Ok(None) + } else { + Ok(self.update_node(node)) + } + } else { + debug!(target: "discovery", "Got unexpected Pong from {:?} ; request not found", &from); + Ok(None) + } + } - fn on_find_node(&mut self, rlp: &Rlp, node_id: &NodeId, from: &SocketAddr) -> Result, Error> { - trace!(target: "discovery", "Got FindNode from {:?}", &from); - let target: NodeId = rlp.val_at(0)?; - let timestamp: u64 = rlp.val_at(1)?; - self.check_timestamp(timestamp)?; + fn on_find_node( + &mut self, + rlp: &Rlp, + node_id: &NodeId, + from: &SocketAddr, + ) -> Result, Error> { + trace!(target: "discovery", "Got FindNode from {:?}", &from); + let target: NodeId = rlp.val_at(0)?; + let timestamp: u64 = rlp.val_at(1)?; + self.check_timestamp(timestamp)?; - let node = NodeEntry { - id: node_id.clone(), - endpoint: NodeEndpoint { - address: *from, - udp_port: from.port() - } - }; + let node = NodeEntry { + id: node_id.clone(), + endpoint: NodeEndpoint { + address: *from, + udp_port: from.port(), + }, + }; - match self.check_validity(&node) { - NodeValidity::Ourselves => (), // It makes no sense to respond to the discovery request from ourselves - NodeValidity::ValidNode(_) => self.respond_with_discovery(target, &node)?, - // Make sure the request source is actually there and responds to pings before actually responding - invalidity_reason => self.try_ping(node, PingReason::FromDiscoveryRequest(target, invalidity_reason)) - } - Ok(None) - } + match self.check_validity(&node) { + NodeValidity::Ourselves => (), // It makes no sense to respond to the discovery request from ourselves + NodeValidity::ValidNode(_) => self.respond_with_discovery(target, &node)?, + // Make sure the request source is actually there and responds to pings before actually responding + invalidity_reason => self.try_ping( + node, + PingReason::FromDiscoveryRequest(target, invalidity_reason), + ), + } + Ok(None) + } - fn check_validity(&mut self, node: &NodeEntry) -> NodeValidity { - let id_hash = keccak(node.id); - let dist = match Discovery::distance(&self.id_hash, &id_hash) { - Some(dist) => dist, - None => { - debug!(target: "discovery", "Got an incoming discovery request from self: {:?}", node); - return NodeValidity::Ourselves; - } - }; + fn check_validity(&mut self, node: &NodeEntry) -> NodeValidity { + let id_hash = keccak(node.id); + let dist = match Discovery::distance(&self.id_hash, &id_hash) { + Some(dist) => dist, + None => { + debug!(target: "discovery", "Got an incoming discovery request from self: {:?}", node); + return NodeValidity::Ourselves; + } + }; - let bucket = &self.node_buckets[dist]; - if let Some(known_node) = bucket.nodes.iter().find(|n| n.address.id == node.id) { - debug!(target: "discovery", "Found a known node in a bucket when processing discovery: {:?}/{:?}", known_node, node); - match ((known_node.address.endpoint == node.endpoint), (known_node.last_seen.elapsed() < NODE_LAST_SEEN_TIMEOUT)) { - (true, true) => NodeValidity::ValidNode(NodeCategory::Bucket), - (true, false) => NodeValidity::ExpiredNode(NodeCategory::Bucket), - _ => NodeValidity::UnknownNode - } - } else { - self.other_observed_nodes.get_mut(&node.id).map_or(NodeValidity::UnknownNode, |(endpoint, observed_at)| { - match ((node.endpoint==*endpoint), (observed_at.elapsed() < NODE_LAST_SEEN_TIMEOUT)) { - (true, true) => NodeValidity::ValidNode(NodeCategory::Observed), - (true, false) => NodeValidity::ExpiredNode(NodeCategory::Observed), - _ => NodeValidity::UnknownNode - } - }) - } - } + let bucket = &self.node_buckets[dist]; + if let Some(known_node) = bucket.nodes.iter().find(|n| n.address.id == node.id) { + debug!(target: "discovery", "Found a known node in a bucket when processing discovery: {:?}/{:?}", known_node, node); + match ( + (known_node.address.endpoint == node.endpoint), + (known_node.last_seen.elapsed() < NODE_LAST_SEEN_TIMEOUT), + ) { + (true, true) => NodeValidity::ValidNode(NodeCategory::Bucket), + (true, false) => NodeValidity::ExpiredNode(NodeCategory::Bucket), + _ => NodeValidity::UnknownNode, + } + } else { + self.other_observed_nodes.get_mut(&node.id).map_or( + NodeValidity::UnknownNode, + |(endpoint, observed_at)| match ( + (node.endpoint == *endpoint), + (observed_at.elapsed() < NODE_LAST_SEEN_TIMEOUT), + ) { + (true, true) => NodeValidity::ValidNode(NodeCategory::Observed), + (true, false) => NodeValidity::ExpiredNode(NodeCategory::Observed), + _ => NodeValidity::UnknownNode, + }, + ) + } + } - fn respond_with_discovery(&mut self, target: NodeId, node: &NodeEntry) -> Result<(), Error> { - let nearest = self.nearest_node_entries(&target); - if nearest.is_empty() { - return Ok(()); - } - let mut packets = Discovery::prepare_neighbours_packets(&nearest); - for p in packets.drain(..) { - self.send_packet(PACKET_NEIGHBOURS, &node.endpoint.address, &p)?; - } - trace!(target: "discovery", "Sent {} Neighbours to {:?}", nearest.len(), &node.endpoint); - Ok(()) - } + fn respond_with_discovery(&mut self, target: NodeId, node: &NodeEntry) -> Result<(), Error> { + let nearest = self.nearest_node_entries(&target); + if nearest.is_empty() { + return Ok(()); + } + let mut packets = Discovery::prepare_neighbours_packets(&nearest); + for p in packets.drain(..) { + self.send_packet(PACKET_NEIGHBOURS, &node.endpoint.address, &p)?; + } + trace!(target: "discovery", "Sent {} Neighbours to {:?}", nearest.len(), &node.endpoint); + Ok(()) + } - fn prepare_neighbours_packets(nearest: &[NodeEntry]) -> Vec { - let limit = (MAX_DATAGRAM_SIZE - 109) / 90; - let chunks = nearest.chunks(limit); - let packets = chunks.map(|c| { - let mut rlp = RlpStream::new_list(2); - rlp.begin_list(c.len()); - for n in c { - rlp.begin_list(4); - n.endpoint.to_rlp(&mut rlp); - rlp.append(&n.id); - } - append_expiration(&mut rlp); - rlp.out() - }); - packets.collect() - } + fn prepare_neighbours_packets(nearest: &[NodeEntry]) -> Vec { + let limit = (MAX_DATAGRAM_SIZE - 109) / 90; + let chunks = nearest.chunks(limit); + let packets = chunks.map(|c| { + let mut rlp = RlpStream::new_list(2); + rlp.begin_list(c.len()); + for n in c { + rlp.begin_list(4); + n.endpoint.to_rlp(&mut rlp); + rlp.append(&n.id); + } + append_expiration(&mut rlp); + rlp.out() + }); + packets.collect() + } - fn on_neighbours(&mut self, rlp: &Rlp, node_id: &NodeId, from: &SocketAddr) -> Result, Error> { - let results_count = rlp.at(0)?.item_count()?; + fn on_neighbours( + &mut self, + rlp: &Rlp, + node_id: &NodeId, + from: &SocketAddr, + ) -> Result, Error> { + let results_count = rlp.at(0)?.item_count()?; - let is_expected = match self.in_flight_find_nodes.entry(*node_id) { - Entry::Occupied(mut entry) => { - let expected = { - let request = entry.get_mut(); - // Mark the request as answered - request.answered = true; - if request.response_count + results_count <= BUCKET_SIZE { - request.response_count += results_count; - true - } else { - debug!(target: "discovery", "Got unexpected Neighbors from {:?} ; oversized packet ({} + {}) node_id={:#x}", &from, request.response_count, results_count, node_id); - false - } - }; - if entry.get().response_count == BUCKET_SIZE { - entry.remove(); - } - expected - } - Entry::Vacant(_) => { - debug!(target: "discovery", "Got unexpected Neighbors from {:?} ; couldn't find node_id={:#x}", &from, node_id); - false - }, - }; + let is_expected = match self.in_flight_find_nodes.entry(*node_id) { + Entry::Occupied(mut entry) => { + let expected = { + let request = entry.get_mut(); + // Mark the request as answered + request.answered = true; + if request.response_count + results_count <= BUCKET_SIZE { + request.response_count += results_count; + true + } else { + debug!(target: "discovery", "Got unexpected Neighbors from {:?} ; oversized packet ({} + {}) node_id={:#x}", &from, request.response_count, results_count, node_id); + false + } + }; + if entry.get().response_count == BUCKET_SIZE { + entry.remove(); + } + expected + } + Entry::Vacant(_) => { + debug!(target: "discovery", "Got unexpected Neighbors from {:?} ; couldn't find node_id={:#x}", &from, node_id); + false + } + }; - if !is_expected { - return Ok(None); - } + if !is_expected { + return Ok(None); + } - trace!(target: "discovery", "Got {} Neighbours from {:?}", results_count, &from); - for r in rlp.at(0)?.iter() { - let endpoint = NodeEndpoint::from_rlp(&r)?; - if !endpoint.is_valid_discovery_node() { - debug!(target: "discovery", "Bad address: {:?}", endpoint); - continue; - } - let node_id: NodeId = r.val_at(3)?; - if node_id == self.id { - continue; - } - let entry = NodeEntry { id: node_id, endpoint }; - if !self.is_allowed(&entry) { - debug!(target: "discovery", "Address not allowed: {:?}", entry); - continue; - } - self.add_node(entry); - } - Ok(None) - } + trace!(target: "discovery", "Got {} Neighbours from {:?}", results_count, &from); + for r in rlp.at(0)?.iter() { + let endpoint = NodeEndpoint::from_rlp(&r)?; + if !endpoint.is_valid_discovery_node() { + debug!(target: "discovery", "Bad address: {:?}", endpoint); + continue; + } + let node_id: NodeId = r.val_at(3)?; + if node_id == self.id { + continue; + } + let entry = NodeEntry { + id: node_id, + endpoint, + }; + if !self.is_allowed(&entry) { + debug!(target: "discovery", "Address not allowed: {:?}", entry); + continue; + } + self.add_node(entry); + } + Ok(None) + } - fn check_expired(&mut self, time: Instant) { - let mut nodes_to_expire = Vec::new(); - self.in_flight_pings.retain(|node_id, ping_request| { + fn check_expired(&mut self, time: Instant) { + let mut nodes_to_expire = Vec::new(); + self.in_flight_pings.retain(|node_id, ping_request| { if time.duration_since(ping_request.sent_at) > PING_TIMEOUT { debug!(target: "discovery", "Removing expired PING request for node_id={:#x}", node_id); nodes_to_expire.push(*node_id); @@ -772,7 +850,7 @@ impl<'a> Discovery<'a> { true } }); - self.in_flight_find_nodes.retain(|node_id, find_node_request| { + self.in_flight_find_nodes.retain(|node_id, find_node_request| { if time.duration_since(find_node_request.sent_at) > FIND_NODE_TIMEOUT { if !find_node_request.answered { debug!(target: "discovery", "Removing expired FIND NODE request for node_id={:#x}", node_id); @@ -783,323 +861,376 @@ impl<'a> Discovery<'a> { true } }); - for node_id in nodes_to_expire { - self.expire_node_request(node_id); - } - } + for node_id in nodes_to_expire { + self.expire_node_request(node_id); + } + } - fn expire_node_request(&mut self, node_id: NodeId) { - // Attempt to remove from bucket if in one. - let id_hash = keccak(&node_id); - let dist = Discovery::distance(&self.id_hash, &id_hash) - .expect("distance is None only if id hashes are equal; will never send request to self; qed"); - let bucket = &mut self.node_buckets[dist]; - if let Some(index) = bucket.nodes.iter().position(|n| n.id_hash == id_hash) { - if bucket.nodes[index].fail_count < self.request_backoff.len() { - let node = &mut bucket.nodes[index]; - node.backoff_until = Instant::now() + self.request_backoff[node.fail_count]; - node.fail_count += 1; - trace!( - target: "discovery", - "Requests to node {:?} timed out {} consecutive time(s)", - &node.address, node.fail_count - ); - } else { - let node = bucket.nodes.remove(index).expect("index was located in if condition"); - debug!(target: "discovery", "Removed expired node {:?}", &node.address); - } - } - } + fn expire_node_request(&mut self, node_id: NodeId) { + // Attempt to remove from bucket if in one. + let id_hash = keccak(&node_id); + let dist = Discovery::distance(&self.id_hash, &id_hash).expect( + "distance is None only if id hashes are equal; will never send request to self; qed", + ); + let bucket = &mut self.node_buckets[dist]; + if let Some(index) = bucket.nodes.iter().position(|n| n.id_hash == id_hash) { + if bucket.nodes[index].fail_count < self.request_backoff.len() { + let node = &mut bucket.nodes[index]; + node.backoff_until = Instant::now() + self.request_backoff[node.fail_count]; + node.fail_count += 1; + trace!( + target: "discovery", + "Requests to node {:?} timed out {} consecutive time(s)", + &node.address, node.fail_count + ); + } else { + let node = bucket + .nodes + .remove(index) + .expect("index was located in if condition"); + debug!(target: "discovery", "Removed expired node {:?}", &node.address); + } + } + } - pub fn round(&mut self) { - self.check_expired(Instant::now()); - self.update_new_nodes(); + pub fn round(&mut self) { + self.check_expired(Instant::now()); + self.update_new_nodes(); - if self.discovery_round.is_some() { - self.discover(); - // Start discovering if the first pings have been sent (or timed out) - } else if self.in_flight_pings.len() == 0 && !self.discovery_initiated { - self.discovery_initiated = true; - self.refresh(); - } - } + if self.discovery_round.is_some() { + self.discover(); + // Start discovering if the first pings have been sent (or timed out) + } else if self.in_flight_pings.len() == 0 && !self.discovery_initiated { + self.discovery_initiated = true; + self.refresh(); + } + } - pub fn refresh(&mut self) { - if self.discovery_round.is_none() { - self.start(); - } - } + pub fn refresh(&mut self) { + if self.discovery_round.is_none() { + self.start(); + } + } - pub fn any_sends_queued(&self) -> bool { - !self.send_queue.is_empty() - } + pub fn any_sends_queued(&self) -> bool { + !self.send_queue.is_empty() + } - pub fn dequeue_send(&mut self) -> Option { - self.send_queue.pop_front() - } + pub fn dequeue_send(&mut self) -> Option { + self.send_queue.pop_front() + } - pub fn requeue_send(&mut self, datagram: Datagram) { - self.send_queue.push_front(datagram) - } + pub fn requeue_send(&mut self, datagram: Datagram) { + self.send_queue.push_front(datagram) + } - /// Add a list of known nodes to the table. - #[cfg(test)] - pub fn init_node_list(&mut self, nodes: Vec) { - for n in nodes { - if self.is_allowed(&n) { - self.update_node(n); - } - } - } + /// Add a list of known nodes to the table. + #[cfg(test)] + pub fn init_node_list(&mut self, nodes: Vec) { + for n in nodes { + if self.is_allowed(&n) { + self.update_node(n); + } + } + } } fn append_expiration(rlp: &mut RlpStream) { - let expiry = SystemTime::now() + EXPIRY_TIME; - let timestamp = expiry.duration_since(UNIX_EPOCH).unwrap_or_default().as_secs() as u32; - rlp.append(×tamp); + let expiry = SystemTime::now() + EXPIRY_TIME; + let timestamp = expiry + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_secs() as u32; + rlp.append(×tamp); } fn assemble_packet(packet_id: u8, bytes: &[u8], secret: &Secret) -> Result { - let mut packet = Bytes::with_capacity(bytes.len() + 32 + 65 + 1); - packet.resize(32 + 65, 0); // Filled in below - packet.push(packet_id); - packet.extend_from_slice(bytes); + let mut packet = Bytes::with_capacity(bytes.len() + 32 + 65 + 1); + packet.resize(32 + 65, 0); // Filled in below + packet.push(packet_id); + packet.extend_from_slice(bytes); - let hash = keccak(&packet[(32 + 65)..]); - let signature = match sign(secret, &hash) { - Ok(s) => s, - Err(e) => { - warn!(target: "discovery", "Error signing UDP packet"); - return Err(Error::from(e)); - } - }; - packet[32..(32 + 65)].copy_from_slice(&signature[..]); - let signed_hash = keccak(&packet[32..]); - packet[0..32].copy_from_slice(&signed_hash); - Ok(packet) + let hash = keccak(&packet[(32 + 65)..]); + let signature = match sign(secret, &hash) { + Ok(s) => s, + Err(e) => { + warn!(target: "discovery", "Error signing UDP packet"); + return Err(Error::from(e)); + } + }; + packet[32..(32 + 65)].copy_from_slice(&signature[..]); + let signed_hash = keccak(&packet[32..]); + packet[0..32].copy_from_slice(&signed_hash); + Ok(packet) } // Selects the next node in a bucket to ping. Chooses the eligible node least recently seen. fn select_bucket_ping<'a, I>(nodes: I) -> Option where - I: Iterator + I: Iterator, { - let now = Instant::now(); - nodes - .filter(|n| n.backoff_until < now) - .min_by_key(|n| n.last_seen) - .map(|n| n.address.clone()) + let now = Instant::now(); + nodes + .filter(|n| n.backoff_until < now) + .min_by_key(|n| n.last_seen) + .map(|n| n.address.clone()) } #[cfg(test)] mod tests { - use super::*; - use std::net::{IpAddr,Ipv4Addr}; - use node_table::{Node, NodeId, NodeEndpoint}; + use super::*; + use node_table::{Node, NodeEndpoint, NodeId}; + use std::net::{IpAddr, Ipv4Addr}; - use std::str::FromStr; - use rustc_hex::FromHex; - use ethkey::{Random, Generator}; + use ethkey::{Generator, Random}; + use rustc_hex::FromHex; + use std::str::FromStr; - #[test] - fn find_node() { - let mut nearest = Vec::new(); - let node = Node::from_str("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@127.0.0.1:7770").unwrap(); - for _ in 0..1000 { - nearest.push( NodeEntry { id: node.id.clone(), endpoint: node.endpoint.clone() }); - } + #[test] + fn find_node() { + let mut nearest = Vec::new(); + let node = Node::from_str("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@127.0.0.1:7770").unwrap(); + for _ in 0..1000 { + nearest.push(NodeEntry { + id: node.id.clone(), + endpoint: node.endpoint.clone(), + }); + } - let packets = Discovery::prepare_neighbours_packets(&nearest); - assert_eq!(packets.len(), 77); - for p in &packets[0..76] { - assert!(p.len() > 1280/2); - assert!(p.len() <= 1280); - } - assert!(packets.last().unwrap().len() > 0); - } + let packets = Discovery::prepare_neighbours_packets(&nearest); + assert_eq!(packets.len(), 77); + for p in &packets[0..76] { + assert!(p.len() > 1280 / 2); + assert!(p.len() <= 1280); + } + assert!(packets.last().unwrap().len() > 0); + } - #[test] - fn ping_queue() { - let key = Random.generate().unwrap(); - let ep = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40445").unwrap(), udp_port: 40445 }; - let mut discovery = Discovery::new(&key, ep.clone(), IpFilter::default()); + #[test] + fn ping_queue() { + let key = Random.generate().unwrap(); + let ep = NodeEndpoint { + address: SocketAddr::from_str("127.0.0.1:40445").unwrap(), + udp_port: 40445, + }; + let mut discovery = Discovery::new(&key, ep.clone(), IpFilter::default()); - for i in 1..(MAX_NODES_PING+1) { - discovery.add_node(NodeEntry { id: NodeId::random(), endpoint: ep.clone() }); - assert_eq!(discovery.in_flight_pings.len(), i); - assert_eq!(discovery.send_queue.len(), i); - assert_eq!(discovery.adding_nodes.len(), 0); - } - for i in 1..20 { - discovery.add_node(NodeEntry { id: NodeId::random(), endpoint: ep.clone() }); - assert_eq!(discovery.in_flight_pings.len(), MAX_NODES_PING); - assert_eq!(discovery.send_queue.len(), MAX_NODES_PING); - assert_eq!(discovery.adding_nodes.len(), i); - } - } + for i in 1..(MAX_NODES_PING + 1) { + discovery.add_node(NodeEntry { + id: NodeId::random(), + endpoint: ep.clone(), + }); + assert_eq!(discovery.in_flight_pings.len(), i); + assert_eq!(discovery.send_queue.len(), i); + assert_eq!(discovery.adding_nodes.len(), 0); + } + for i in 1..20 { + discovery.add_node(NodeEntry { + id: NodeId::random(), + endpoint: ep.clone(), + }); + assert_eq!(discovery.in_flight_pings.len(), MAX_NODES_PING); + assert_eq!(discovery.send_queue.len(), MAX_NODES_PING); + assert_eq!(discovery.adding_nodes.len(), i); + } + } - #[test] - fn discovery() { - let mut discovery_handlers = (0..5).map(|i| { - let key = Random.generate().unwrap(); - let ep = NodeEndpoint { - address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 41000 + i), - udp_port: 41000 + i, - }; - Discovery::new(&key, ep, IpFilter::default()) - }) - .collect::>(); + #[test] + fn discovery() { + let mut discovery_handlers = (0..5) + .map(|i| { + let key = Random.generate().unwrap(); + let ep = NodeEndpoint { + address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 41000 + i), + udp_port: 41000 + i, + }; + Discovery::new(&key, ep, IpFilter::default()) + }) + .collect::>(); - // Sort inversely by XOR distance to the 0 hash. - discovery_handlers.sort_by(|a, b| b.id_hash.cmp(&a.id_hash)); + // Sort inversely by XOR distance to the 0 hash. + discovery_handlers.sort_by(|a, b| b.id_hash.cmp(&a.id_hash)); - // Initialize the routing table of each with the next one in order. - for i in 0 .. 5 { - let node = NodeEntry { - id: discovery_handlers[(i + 1) % 5].id, - endpoint: discovery_handlers[(i + 1) % 5].public_endpoint.clone(), - }; - discovery_handlers[i].update_node(node); - } + // Initialize the routing table of each with the next one in order. + for i in 0..5 { + let node = NodeEntry { + id: discovery_handlers[(i + 1) % 5].id, + endpoint: discovery_handlers[(i + 1) % 5].public_endpoint.clone(), + }; + discovery_handlers[i].update_node(node); + } - // After 4 discovery rounds, the first one should have learned about the rest. - for _round in 0 .. 5 { - discovery_handlers[0].round(); + // After 4 discovery rounds, the first one should have learned about the rest. + for _round in 0..5 { + discovery_handlers[0].round(); - let mut continue_loop = true; - while continue_loop { - continue_loop = false; + let mut continue_loop = true; + while continue_loop { + continue_loop = false; - // Process all queued messages. - for i in 0 .. 20 { - let src = discovery_handlers[i%5].public_endpoint.address.clone(); - while let Some(datagram) = discovery_handlers[i%5].dequeue_send() { - let dest = discovery_handlers.iter_mut() - .find(|disc| datagram.address == disc.public_endpoint.address) - .unwrap(); - dest.on_packet(&datagram.payload, src).ok(); + // Process all queued messages. + for i in 0..20 { + let src = discovery_handlers[i % 5].public_endpoint.address.clone(); + while let Some(datagram) = discovery_handlers[i % 5].dequeue_send() { + let dest = discovery_handlers + .iter_mut() + .find(|disc| datagram.address == disc.public_endpoint.address) + .unwrap(); + dest.on_packet(&datagram.payload, src).ok(); - continue_loop = true; - } - } - } - } + continue_loop = true; + } + } + } + } - let results = discovery_handlers[0].nearest_node_entries(&NodeId::new()); - assert_eq!(results.len(), 4); - } + let results = discovery_handlers[0].nearest_node_entries(&NodeId::new()); + assert_eq!(results.len(), 4); + } - #[test] - fn removes_expired() { - let key = Random.generate().unwrap(); - let ep = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40446").unwrap(), udp_port: 40447 }; - let discovery = Discovery::new(&key, ep.clone(), IpFilter::default()); + #[test] + fn removes_expired() { + let key = Random.generate().unwrap(); + let ep = NodeEndpoint { + address: SocketAddr::from_str("127.0.0.1:40446").unwrap(), + udp_port: 40447, + }; + let discovery = Discovery::new(&key, ep.clone(), IpFilter::default()); - let mut discovery = Discovery { request_backoff: &[], ..discovery }; + let mut discovery = Discovery { + request_backoff: &[], + ..discovery + }; - let total_bucket_nodes = |node_buckets: &Vec| -> usize { - node_buckets.iter().map(|bucket| bucket.nodes.len()).sum() - }; + let total_bucket_nodes = |node_buckets: &Vec| -> usize { + node_buckets.iter().map(|bucket| bucket.nodes.len()).sum() + }; - let node_entries = (0..1200) - .map(|_| NodeEntry { id: NodeId::random(), endpoint: ep.clone() }) - .collect::>(); + let node_entries = (0..1200) + .map(|_| NodeEntry { + id: NodeId::random(), + endpoint: ep.clone(), + }) + .collect::>(); - discovery.init_node_list(node_entries.clone()); - assert_eq!(total_bucket_nodes(&discovery.node_buckets), 1200); + discovery.init_node_list(node_entries.clone()); + assert_eq!(total_bucket_nodes(&discovery.node_buckets), 1200); - // Requests have not expired yet. - let num_nodes = total_bucket_nodes(&discovery.node_buckets); - discovery.check_expired(Instant::now()); - let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); - assert_eq!(removed, 0); + // Requests have not expired yet. + let num_nodes = total_bucket_nodes(&discovery.node_buckets); + discovery.check_expired(Instant::now()); + let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); + assert_eq!(removed, 0); - // Expiring pings to bucket nodes removes them from bucket. - let num_nodes = total_bucket_nodes(&discovery.node_buckets); - discovery.check_expired(Instant::now() + PING_TIMEOUT); - let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); - assert!(removed > 0); - assert_eq!(total_bucket_nodes(&discovery.node_buckets), 1200 - removed); + // Expiring pings to bucket nodes removes them from bucket. + let num_nodes = total_bucket_nodes(&discovery.node_buckets); + discovery.check_expired(Instant::now() + PING_TIMEOUT); + let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); + assert!(removed > 0); + assert_eq!(total_bucket_nodes(&discovery.node_buckets), 1200 - removed); - for _ in 0..100 { - discovery.add_node(NodeEntry { id: NodeId::random(), endpoint: ep.clone() }); - } - assert!(discovery.in_flight_pings.len() > 0); + for _ in 0..100 { + discovery.add_node(NodeEntry { + id: NodeId::random(), + endpoint: ep.clone(), + }); + } + assert!(discovery.in_flight_pings.len() > 0); - // Expire pings to nodes that are not in buckets. - let num_nodes = total_bucket_nodes(&discovery.node_buckets); - discovery.check_expired(Instant::now() + PING_TIMEOUT); - let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); - assert_eq!(removed, 0); - assert_eq!(discovery.in_flight_pings.len(), 0); + // Expire pings to nodes that are not in buckets. + let num_nodes = total_bucket_nodes(&discovery.node_buckets); + discovery.check_expired(Instant::now() + PING_TIMEOUT); + let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); + assert_eq!(removed, 0); + assert_eq!(discovery.in_flight_pings.len(), 0); - let from = SocketAddr::from_str("99.99.99.99:40445").unwrap(); + let from = SocketAddr::from_str("99.99.99.99:40445").unwrap(); - // FIND_NODE times out because it doesn't receive k results. - let key = Random.generate().unwrap(); - discovery.send_find_node(&node_entries[100], key.public()).unwrap(); - for payload in Discovery::prepare_neighbours_packets(&node_entries[101..116]) { - let packet = assemble_packet(PACKET_NEIGHBOURS, &payload, &key.secret()).unwrap(); - discovery.on_packet(&packet, from.clone()).unwrap(); - } + // FIND_NODE times out because it doesn't receive k results. + let key = Random.generate().unwrap(); + discovery + .send_find_node(&node_entries[100], key.public()) + .unwrap(); + for payload in Discovery::prepare_neighbours_packets(&node_entries[101..116]) { + let packet = assemble_packet(PACKET_NEIGHBOURS, &payload, &key.secret()).unwrap(); + discovery.on_packet(&packet, from.clone()).unwrap(); + } - let num_nodes = total_bucket_nodes(&discovery.node_buckets); - discovery.check_expired(Instant::now() + FIND_NODE_TIMEOUT); - let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); - assert!(removed > 0); + let num_nodes = total_bucket_nodes(&discovery.node_buckets); + discovery.check_expired(Instant::now() + FIND_NODE_TIMEOUT); + let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); + assert!(removed > 0); - // FIND_NODE does not time out because it receives k results. - discovery.send_find_node(&node_entries[100], key.public()).unwrap(); - for payload in Discovery::prepare_neighbours_packets(&node_entries[101..117]) { - let packet = assemble_packet(PACKET_NEIGHBOURS, &payload, &key.secret()).unwrap(); - discovery.on_packet(&packet, from.clone()).unwrap(); - } + // FIND_NODE does not time out because it receives k results. + discovery + .send_find_node(&node_entries[100], key.public()) + .unwrap(); + for payload in Discovery::prepare_neighbours_packets(&node_entries[101..117]) { + let packet = assemble_packet(PACKET_NEIGHBOURS, &payload, &key.secret()).unwrap(); + discovery.on_packet(&packet, from.clone()).unwrap(); + } - let num_nodes = total_bucket_nodes(&discovery.node_buckets); - discovery.check_expired(Instant::now() + FIND_NODE_TIMEOUT); - let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); - assert_eq!(removed, 0); + let num_nodes = total_bucket_nodes(&discovery.node_buckets); + discovery.check_expired(Instant::now() + FIND_NODE_TIMEOUT); + let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); + assert_eq!(removed, 0); - // Test bucket evictions with retries. - let request_backoff = [Duration::new(0, 0); 2]; - let mut discovery = Discovery { request_backoff: &request_backoff, ..discovery }; + // Test bucket evictions with retries. + let request_backoff = [Duration::new(0, 0); 2]; + let mut discovery = Discovery { + request_backoff: &request_backoff, + ..discovery + }; - for _ in 0..2 { - discovery.ping(&node_entries[101], PingReason::Default).unwrap(); - let num_nodes = total_bucket_nodes(&discovery.node_buckets); - discovery.check_expired(Instant::now() + PING_TIMEOUT); - let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); - assert_eq!(removed, 0); - } + for _ in 0..2 { + discovery + .ping(&node_entries[101], PingReason::Default) + .unwrap(); + let num_nodes = total_bucket_nodes(&discovery.node_buckets); + discovery.check_expired(Instant::now() + PING_TIMEOUT); + let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); + assert_eq!(removed, 0); + } - discovery.ping(&node_entries[101], PingReason::Default).unwrap(); - let num_nodes = total_bucket_nodes(&discovery.node_buckets); - discovery.check_expired(Instant::now() + PING_TIMEOUT); - let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); - assert_eq!(removed, 1); - } + discovery + .ping(&node_entries[101], PingReason::Default) + .unwrap(); + let num_nodes = total_bucket_nodes(&discovery.node_buckets); + discovery.check_expired(Instant::now() + PING_TIMEOUT); + let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); + assert_eq!(removed, 1); + } - #[test] - fn find_nearest_saturated() { - use super::*; + #[test] + fn find_nearest_saturated() { + use super::*; - let key = Random.generate().unwrap(); - let ep = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40447").unwrap(), udp_port: 40447 }; - let mut discovery = Discovery::new(&key, ep.clone(), IpFilter::default()); + let key = Random.generate().unwrap(); + let ep = NodeEndpoint { + address: SocketAddr::from_str("127.0.0.1:40447").unwrap(), + udp_port: 40447, + }; + let mut discovery = Discovery::new(&key, ep.clone(), IpFilter::default()); - for _ in 0..(16 + 10) { - let entry = BucketEntry::new(NodeEntry { id: NodeId::new(), endpoint: ep.clone() }); - discovery.node_buckets[0].nodes.push_back(entry); - } - let nearest = discovery.nearest_node_entries(&NodeId::new()); - assert_eq!(nearest.len(), 16) - } + for _ in 0..(16 + 10) { + let entry = BucketEntry::new(NodeEntry { + id: NodeId::new(), + endpoint: ep.clone(), + }); + discovery.node_buckets[0].nodes.push_back(entry); + } + let nearest = discovery.nearest_node_entries(&NodeId::new()); + assert_eq!(nearest.len(), 16) + } - #[test] - fn routing_table_insertions_lookups() { - use super::*; - let ep = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40448").unwrap(), udp_port: 40447 }; - let node_ids_hex: [&str; 32] = [ + #[test] + fn routing_table_insertions_lookups() { + use super::*; + let ep = NodeEndpoint { + address: SocketAddr::from_str("127.0.0.1:40448").unwrap(), + udp_port: 40447, + }; + let node_ids_hex: [&str; 32] = [ "22536fa57acc12c4993295cbc26fef4550513496712b301ad2283d356c8108521244a362e64e6d907a0d0b4e65526699c5ae3cfebfc680505fe3b33d50672835", "22c482f42401546f8dd7ed6b1c0cad976da6630730f1116614579ccb084791a528ff2676bfe94434de80e5d7e479f1ea1d7737077da3bd5e69a0f3e5bf596091", "234c73e3a8f6835a7f9a9d2a896bff4908d66d21d5433a2c37d94f1fa9a6ca17d02388f31013ff87e3ad86506e76bd1006b9cac3815974a2b47c8d4f2124697e", @@ -1133,73 +1264,79 @@ mod tests { "e1268f5dd9552a11989df9d4953bb388e7466711b2bd9882a3ed4d0767a21f046c53c20f9a18d66bae1d6a5544492857ddecb0b5b4818bd4557be252ddd66c71", "e626019dc0b50b9e254461f19d29e69a4669c5256134a6352c6c30d3bc55d201a5b43fc2e006556cfaf29765b683e807e03093798942826244e4ee9e47c75d3f", ]; - let node_entries = node_ids_hex.iter() - .map(|node_id_hex| NodeId::from_str(node_id_hex).unwrap()) - .map(|node_id| NodeEntry { id: node_id, endpoint: ep.clone() }) - .collect::>(); + let node_entries = node_ids_hex + .iter() + .map(|node_id_hex| NodeId::from_str(node_id_hex).unwrap()) + .map(|node_id| NodeEntry { + id: node_id, + endpoint: ep.clone(), + }) + .collect::>(); - let secret_hex = "6c71d1b8930d29e6371be1081f2c909c64b46440a1716314c3c9df995cb3aed1"; - let key = Secret::from_str(secret_hex) - .and_then(|secret| KeyPair::from_secret(secret)) - .unwrap(); - let mut discovery = Discovery::new(&key, ep.clone(), IpFilter::default()); + let secret_hex = "6c71d1b8930d29e6371be1081f2c909c64b46440a1716314c3c9df995cb3aed1"; + let key = Secret::from_str(secret_hex) + .and_then(|secret| KeyPair::from_secret(secret)) + .unwrap(); + let mut discovery = Discovery::new(&key, ep.clone(), IpFilter::default()); - discovery.init_node_list(node_entries.clone()); + discovery.init_node_list(node_entries.clone()); - let expected_bucket_sizes = vec![ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 7, 8, 12 - ]; - let actual_bucket_sizes = discovery.node_buckets.iter() - .map(|ref bucket| bucket.nodes.len()) - .collect::>(); - assert_eq!(actual_bucket_sizes, expected_bucket_sizes); + let expected_bucket_sizes = vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 7, 8, 12, + ]; + let actual_bucket_sizes = discovery + .node_buckets + .iter() + .map(|ref bucket| bucket.nodes.len()) + .collect::>(); + assert_eq!(actual_bucket_sizes, expected_bucket_sizes); - for entry in &node_entries { - let nearest = discovery.nearest_node_entries(&entry.id); - assert_eq!(nearest.len(), 16); - assert_eq!(nearest[0].id, entry.id); + for entry in &node_entries { + let nearest = discovery.nearest_node_entries(&entry.id); + assert_eq!(nearest.len(), 16); + assert_eq!(nearest[0].id, entry.id); - let mut expected_ids: Vec = node_entries.iter().map(|entry| entry.id).collect(); - expected_ids.sort_unstable_by_key(|id| keccak(id) ^ keccak(entry.id)); - expected_ids.resize(BUCKET_SIZE, NodeId::default()); + let mut expected_ids: Vec = node_entries.iter().map(|entry| entry.id).collect(); + expected_ids.sort_unstable_by_key(|id| keccak(id) ^ keccak(entry.id)); + expected_ids.resize(BUCKET_SIZE, NodeId::default()); - let actual_ids: Vec = nearest.iter().map(|entry| entry.id).collect(); - assert_eq!(actual_ids, expected_ids); - } - } + let actual_ids: Vec = nearest.iter().map(|entry| entry.id).collect(); + assert_eq!(actual_ids, expected_ids); + } + } - #[test] - fn packets() { - let key = Random.generate().unwrap(); - let ep = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40449").unwrap(), udp_port: 40449 }; - let mut discovery = Discovery::new(&key, ep.clone(), IpFilter::default()); - discovery.check_timestamps = false; - let from = SocketAddr::from_str("99.99.99.99:40445").unwrap(); + #[test] + fn packets() { + let key = Random.generate().unwrap(); + let ep = NodeEndpoint { + address: SocketAddr::from_str("127.0.0.1:40449").unwrap(), + udp_port: 40449, + }; + let mut discovery = Discovery::new(&key, ep.clone(), IpFilter::default()); + discovery.check_timestamps = false; + let from = SocketAddr::from_str("99.99.99.99:40445").unwrap(); - let packet = "\ + let packet = "\ e9614ccfd9fc3e74360018522d30e1419a143407ffcce748de3e22116b7e8dc92ff74788c0b6663a\ aa3d67d641936511c8f8d6ad8698b820a7cf9e1be7155e9a241f556658c55428ec0563514365799a\ 4be2be5a685a80971ddcfa80cb422cdd0101ec04cb847f000001820cfa8215a8d790000000000000\ 000000000000000000018208ae820d058443b9a3550102\ - ".from_hex().unwrap(); - let _ = discovery.on_packet(&packet, from.clone()).expect("packet to be ok"); + " + .from_hex() + .unwrap(); + let _ = discovery + .on_packet(&packet, from.clone()) + .expect("packet to be ok"); - let packet = "\ + let packet = "\ 577be4349c4dd26768081f58de4c6f375a7a22f3f7adda654d1428637412c3d7fe917cadc56d4e5e\ 7ffae1dbe3efffb9849feb71b262de37977e7c7a44e677295680e9e38ab26bee2fcbae207fba3ff3\ d74069a50b902a82c9903ed37cc993c50001f83e82022bd79020010db83c4d001500000000abcdef\ @@ -1208,30 +1345,42 @@ mod tests { 3fa4090c408f6b4bc3701562c031041d4702971d102c9ab7fa5eed4cd6bab8f7af956f7d565ee191\ 7084a95398b6a21eac920fe3dd1345ec0a7ef39367ee69ddf092cbfe5b93e5e568ebc491983c09c7\ 6d922dc3\ - ".from_hex().unwrap(); - let _ = discovery.on_packet(&packet, from.clone()).expect("packet to be ok"); + " + .from_hex() + .unwrap(); + let _ = discovery + .on_packet(&packet, from.clone()) + .expect("packet to be ok"); - let packet = "\ + let packet = "\ 09b2428d83348d27cdf7064ad9024f526cebc19e4958f0fdad87c15eb598dd61d08423e0bf66b206\ 9869e1724125f820d851c136684082774f870e614d95a2855d000f05d1648b2d5945470bc187c2d2\ 216fbe870f43ed0909009882e176a46b0102f846d79020010db885a308d313198a2e037073488208\ ae82823aa0fbc914b16819237dcd8801d7e53f69e9719adecb3cc0e790c57e91ca4461c9548443b9\ a355c6010203c2040506a0c969a58f6f9095004c0177a6b47f451530cab38966a25cca5cb58f0555 42124e\ - ".from_hex().unwrap(); - let _ = discovery.on_packet(&packet, from.clone()).expect("packet to be ok"); + " + .from_hex() + .unwrap(); + let _ = discovery + .on_packet(&packet, from.clone()) + .expect("packet to be ok"); - let packet = "\ + let packet = "\ c7c44041b9f7c7e41934417ebac9a8e1a4c6298f74553f2fcfdcae6ed6fe53163eb3d2b52e39fe91\ 831b8a927bf4fc222c3902202027e5e9eb812195f95d20061ef5cd31d502e47ecb61183f74a504fe\ 04c51e73df81f25c4d506b26db4517490103f84eb840ca634cae0d49acb401d8a4c6b6fe8c55b70d\ 115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be0081290476\ 7bf5ccd1fc7f8443b9a35582999983999999280dc62cc8255c73471e0a61da0c89acdc0e035e260a\ dd7fc0c04ad9ebf3919644c91cb247affc82b69bd2ca235c71eab8e49737c937a2c396\ - ".from_hex().unwrap(); - let _ = discovery.on_packet(&packet, from.clone()).expect("packet to be ok"); + " + .from_hex() + .unwrap(); + let _ = discovery + .on_packet(&packet, from.clone()) + .expect("packet to be ok"); - let packet = "\ + let packet = "\ c679fc8fe0b8b12f06577f2e802d34f6fa257e6137a995f6f4cbfc9ee50ed3710faf6e66f932c4c8\ d81d64343f429651328758b47d3dbc02c4042f0fff6946a50f4a49037a72bb550f3a7872363a83e1\ b9ee6469856c24eb4ef80b7535bcf99c0004f9015bf90150f84d846321163782115c82115db84031\ @@ -1244,80 +1393,114 @@ mod tests { 13198a2e037073488203e78203e8b8408dcab8618c3253b558d459da53bd8fa68935a719aff8b811\ 197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df73\ 8443b9a355010203b525a138aa34383fec3d2719a0\ - ".from_hex().unwrap(); - let _ = discovery.on_packet(&packet, from.clone()).expect("packet to be ok"); - } + " + .from_hex() + .unwrap(); + let _ = discovery + .on_packet(&packet, from.clone()) + .expect("packet to be ok"); + } - #[test] - fn test_ping() { - let key1 = Random.generate().unwrap(); - let key2 = Random.generate().unwrap(); - let key3 = Random.generate().unwrap(); - let ep1 = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40344").unwrap(), udp_port: 40344 }; - let ep2 = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40345").unwrap(), udp_port: 40345 }; - let ep3 = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40346").unwrap(), udp_port: 40345 }; - let mut discovery1 = Discovery::new(&key1, ep1.clone(), IpFilter::default()); - let mut discovery2 = Discovery::new(&key2, ep2.clone(), IpFilter::default()); + #[test] + fn test_ping() { + let key1 = Random.generate().unwrap(); + let key2 = Random.generate().unwrap(); + let key3 = Random.generate().unwrap(); + let ep1 = NodeEndpoint { + address: SocketAddr::from_str("127.0.0.1:40344").unwrap(), + udp_port: 40344, + }; + let ep2 = NodeEndpoint { + address: SocketAddr::from_str("127.0.0.1:40345").unwrap(), + udp_port: 40345, + }; + let ep3 = NodeEndpoint { + address: SocketAddr::from_str("127.0.0.1:40346").unwrap(), + udp_port: 40345, + }; + let mut discovery1 = Discovery::new(&key1, ep1.clone(), IpFilter::default()); + let mut discovery2 = Discovery::new(&key2, ep2.clone(), IpFilter::default()); - discovery1.ping(&NodeEntry { id: discovery2.id, endpoint: ep2.clone() }, PingReason::Default).unwrap(); - let ping_data = discovery1.dequeue_send().unwrap(); - assert!(!discovery1.any_sends_queued()); - let data = &ping_data.payload[(32 + 65)..]; - assert_eq!(data[0], PACKET_PING); - let rlp = Rlp::new(&data[1..]); - assert_eq!(ep1, NodeEndpoint::from_rlp(&rlp.at(1).unwrap()).unwrap()); - assert_eq!(ep2, NodeEndpoint::from_rlp(&rlp.at(2).unwrap()).unwrap()); + discovery1 + .ping( + &NodeEntry { + id: discovery2.id, + endpoint: ep2.clone(), + }, + PingReason::Default, + ) + .unwrap(); + let ping_data = discovery1.dequeue_send().unwrap(); + assert!(!discovery1.any_sends_queued()); + let data = &ping_data.payload[(32 + 65)..]; + assert_eq!(data[0], PACKET_PING); + let rlp = Rlp::new(&data[1..]); + assert_eq!(ep1, NodeEndpoint::from_rlp(&rlp.at(1).unwrap()).unwrap()); + assert_eq!(ep2, NodeEndpoint::from_rlp(&rlp.at(2).unwrap()).unwrap()); - // `discovery1` should be added to node table on ping received - if let Some(_) = discovery2.on_packet(&ping_data.payload, ep1.address.clone()).unwrap() { - panic!("Expected no changes to discovery2's table"); - } + // `discovery1` should be added to node table on ping received + if let Some(_) = discovery2 + .on_packet(&ping_data.payload, ep1.address.clone()) + .unwrap() + { + panic!("Expected no changes to discovery2's table"); + } - let pong_data = discovery2.dequeue_send().unwrap(); - let data = &pong_data.payload[(32 + 65)..]; - assert_eq!(data[0], PACKET_PONG); - let rlp = Rlp::new(&data[1..]); - assert_eq!(ping_data.payload[0..32], rlp.val_at::>(1).unwrap()[..]); + let pong_data = discovery2.dequeue_send().unwrap(); + let data = &pong_data.payload[(32 + 65)..]; + assert_eq!(data[0], PACKET_PONG); + let rlp = Rlp::new(&data[1..]); + assert_eq!( + ping_data.payload[0..32], + rlp.val_at::>(1).unwrap()[..] + ); - // Create a pong packet with incorrect echo hash and assert that it is rejected. - let mut incorrect_pong_rlp = RlpStream::new_list(3); - ep1.to_rlp_list(&mut incorrect_pong_rlp); - incorrect_pong_rlp.append(&H256::default()); - append_expiration(&mut incorrect_pong_rlp); - let incorrect_pong_data = assemble_packet( - PACKET_PONG, &incorrect_pong_rlp.drain(), &discovery2.secret - ).unwrap(); - if let Some(_) = discovery1.on_packet(&incorrect_pong_data, ep2.address.clone()).unwrap() { - panic!("Expected no changes to discovery1's table because pong hash is incorrect"); - } + // Create a pong packet with incorrect echo hash and assert that it is rejected. + let mut incorrect_pong_rlp = RlpStream::new_list(3); + ep1.to_rlp_list(&mut incorrect_pong_rlp); + incorrect_pong_rlp.append(&H256::default()); + append_expiration(&mut incorrect_pong_rlp); + let incorrect_pong_data = + assemble_packet(PACKET_PONG, &incorrect_pong_rlp.drain(), &discovery2.secret).unwrap(); + if let Some(_) = discovery1 + .on_packet(&incorrect_pong_data, ep2.address.clone()) + .unwrap() + { + panic!("Expected no changes to discovery1's table because pong hash is incorrect"); + } - // Delivery of valid pong response should add to routing table. - if let Some(table_updates) = discovery1.on_packet(&pong_data.payload, ep2.address.clone()).unwrap() { - assert_eq!(table_updates.added.len(), 1); - assert_eq!(table_updates.removed.len(), 0); - assert!(table_updates.added.contains_key(&discovery2.id)); - } else { - panic!("Expected discovery1 to be added to discovery1's table"); - } + // Delivery of valid pong response should add to routing table. + if let Some(table_updates) = discovery1 + .on_packet(&pong_data.payload, ep2.address.clone()) + .unwrap() + { + assert_eq!(table_updates.added.len(), 1); + assert_eq!(table_updates.removed.len(), 0); + assert!(table_updates.added.contains_key(&discovery2.id)); + } else { + panic!("Expected discovery1 to be added to discovery1's table"); + } - let ping_back = discovery2.dequeue_send().unwrap(); - assert!(!discovery2.any_sends_queued()); - let data = &ping_back.payload[(32 + 65)..]; - assert_eq!(data[0], PACKET_PING); - let rlp = Rlp::new(&data[1..]); - assert_eq!(ep2, NodeEndpoint::from_rlp(&rlp.at(1).unwrap()).unwrap()); - assert_eq!(ep1, NodeEndpoint::from_rlp(&rlp.at(2).unwrap()).unwrap()); + let ping_back = discovery2.dequeue_send().unwrap(); + assert!(!discovery2.any_sends_queued()); + let data = &ping_back.payload[(32 + 65)..]; + assert_eq!(data[0], PACKET_PING); + let rlp = Rlp::new(&data[1..]); + assert_eq!(ep2, NodeEndpoint::from_rlp(&rlp.at(1).unwrap()).unwrap()); + assert_eq!(ep1, NodeEndpoint::from_rlp(&rlp.at(2).unwrap()).unwrap()); - // Deliver an unexpected PONG message to discover1. - let mut unexpected_pong_rlp = RlpStream::new_list(3); - ep3.to_rlp_list(&mut unexpected_pong_rlp); - unexpected_pong_rlp.append(&H256::default()); - append_expiration(&mut unexpected_pong_rlp); - let unexpected_pong = assemble_packet( - PACKET_PONG, &unexpected_pong_rlp.drain(), key3.secret() - ).unwrap(); - if let Some(_) = discovery1.on_packet(&unexpected_pong, ep3.address.clone()).unwrap() { - panic!("Expected no changes to discovery1's table for unexpected pong"); - } - } + // Deliver an unexpected PONG message to discover1. + let mut unexpected_pong_rlp = RlpStream::new_list(3); + ep3.to_rlp_list(&mut unexpected_pong_rlp); + unexpected_pong_rlp.append(&H256::default()); + append_expiration(&mut unexpected_pong_rlp); + let unexpected_pong = + assemble_packet(PACKET_PONG, &unexpected_pong_rlp.drain(), key3.secret()).unwrap(); + if let Some(_) = discovery1 + .on_packet(&unexpected_pong, ep3.address.clone()) + .unwrap() + { + panic!("Expected no changes to discovery1's table for unexpected pong"); + } + } } diff --git a/util/network-devp2p/src/handshake.rs b/util/network-devp2p/src/handshake.rs index 5388318a7..4b4aa295a 100644 --- a/util/network-devp2p/src/handshake.rs +++ b/util/network-devp2p/src/handshake.rs @@ -14,61 +14,63 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::time::Duration; -use rand::random; -use hash::write_keccak; -use mio::tcp::*; -use ethereum_types::{H256, H520}; -use parity_bytes::Bytes; -use rlp::{Rlp, RlpStream}; use connection::Connection; -use node_table::NodeId; -use io::{IoContext, StreamToken}; -use ethkey::{KeyPair, Public, Secret, recover, sign, Generator, Random}; -use ethkey::crypto::{ecdh, ecies}; -use network::{Error, ErrorKind}; +use ethereum_types::{H256, H520}; +use ethkey::{ + crypto::{ecdh, ecies}, + recover, sign, Generator, KeyPair, Public, Random, Secret, +}; +use hash::write_keccak; use host::HostInfo; +use io::{IoContext, StreamToken}; +use mio::tcp::*; +use network::{Error, ErrorKind}; +use node_table::NodeId; +use parity_bytes::Bytes; +use rand::random; +use rlp::{Rlp, RlpStream}; +use std::time::Duration; #[derive(PartialEq, Eq, Debug)] enum HandshakeState { - /// Just created - New, - /// Waiting for auth packet - ReadingAuth, - /// Waiting for extended auth packet - ReadingAuthEip8, - /// Waiting for ack packet - ReadingAck, - /// Waiting for extended ack packet - ReadingAckEip8, - /// Ready to start a session - StartSession, + /// Just created + New, + /// Waiting for auth packet + ReadingAuth, + /// Waiting for extended auth packet + ReadingAuthEip8, + /// Waiting for ack packet + ReadingAck, + /// Waiting for extended ack packet + ReadingAckEip8, + /// Ready to start a session + StartSession, } /// `RLPx` protocol handshake. See https://github.com/ethereum/devp2p/blob/master/rlpx.md#encrypted-handshake pub struct Handshake { - /// Remote node public key - pub id: NodeId, - /// Underlying connection - pub connection: Connection, - /// Handshake state - state: HandshakeState, - /// Outgoing or incoming connection - pub originated: bool, - /// ECDH ephemeral - pub ecdhe: KeyPair, - /// Connection nonce - pub nonce: H256, - /// Handshake public key - pub remote_ephemeral: Public, - /// Remote connection nonce. - pub remote_nonce: H256, - /// Remote `RLPx` protocol version. - pub remote_version: u64, - /// A copy of received encrypted auth packet - pub auth_cipher: Bytes, - /// A copy of received encrypted ack packet - pub ack_cipher: Bytes, + /// Remote node public key + pub id: NodeId, + /// Underlying connection + pub connection: Connection, + /// Handshake state + state: HandshakeState, + /// Outgoing or incoming connection + pub originated: bool, + /// ECDH ephemeral + pub ecdhe: KeyPair, + /// Connection nonce + pub nonce: H256, + /// Handshake public key + pub remote_ephemeral: Public, + /// Remote connection nonce. + pub remote_nonce: H256, + /// Remote `RLPx` protocol version. + pub remote_version: u64, + /// A copy of received encrypted auth packet + pub auth_cipher: Bytes, + /// A copy of received encrypted ack packet + pub ack_cipher: Bytes, } const V4_AUTH_PACKET_SIZE: usize = 307; @@ -79,281 +81,358 @@ const PROTOCOL_VERSION: u64 = 4; const ECIES_OVERHEAD: usize = 113; impl Handshake { - /// Create a new handshake object - pub fn new(token: StreamToken, id: Option<&NodeId>, socket: TcpStream, nonce: &H256) -> Result { - Ok(Handshake { - id: if let Some(id) = id { *id } else { NodeId::new() }, - connection: Connection::new(token, socket), - originated: false, - state: HandshakeState::New, - ecdhe: Random.generate()?, - nonce: *nonce, - remote_ephemeral: Public::new(), - remote_nonce: H256::new(), - remote_version: PROTOCOL_VERSION, - auth_cipher: Bytes::new(), - ack_cipher: Bytes::new(), - }) - } + /// Create a new handshake object + pub fn new( + token: StreamToken, + id: Option<&NodeId>, + socket: TcpStream, + nonce: &H256, + ) -> Result { + Ok(Handshake { + id: if let Some(id) = id { + *id + } else { + NodeId::new() + }, + connection: Connection::new(token, socket), + originated: false, + state: HandshakeState::New, + ecdhe: Random.generate()?, + nonce: *nonce, + remote_ephemeral: Public::new(), + remote_nonce: H256::new(), + remote_version: PROTOCOL_VERSION, + auth_cipher: Bytes::new(), + ack_cipher: Bytes::new(), + }) + } - /// Start a handshake - pub fn start(&mut self, io: &IoContext, host: &HostInfo, originated: bool) -> Result<(), Error> where Message: Send + Clone+ Sync + 'static { - self.originated = originated; - io.register_timer(self.connection.token, HANDSHAKE_TIMEOUT).ok(); - if originated { - self.write_auth(io, host.secret(), host.id())?; - } - else { - self.state = HandshakeState::ReadingAuth; - self.connection.expect(V4_AUTH_PACKET_SIZE); - }; - Ok(()) - } + /// Start a handshake + pub fn start( + &mut self, + io: &IoContext, + host: &HostInfo, + originated: bool, + ) -> Result<(), Error> + where + Message: Send + Clone + Sync + 'static, + { + self.originated = originated; + io.register_timer(self.connection.token, HANDSHAKE_TIMEOUT) + .ok(); + if originated { + self.write_auth(io, host.secret(), host.id())?; + } else { + self.state = HandshakeState::ReadingAuth; + self.connection.expect(V4_AUTH_PACKET_SIZE); + }; + Ok(()) + } - /// Check if handshake is complete - pub fn done(&self) -> bool { - self.state == HandshakeState::StartSession - } + /// Check if handshake is complete + pub fn done(&self) -> bool { + self.state == HandshakeState::StartSession + } - /// Readable IO handler. Drives the state change. - pub fn readable(&mut self, io: &IoContext, host: &HostInfo) -> Result<(), Error> where Message: Send + Clone + Sync + 'static { - while let Some(data) = self.connection.readable()? { - match self.state { - HandshakeState::New => {}, - HandshakeState::StartSession => {}, - HandshakeState::ReadingAuth => { - self.read_auth(io, host.secret(), &data)?; - }, - HandshakeState::ReadingAuthEip8 => { - self.read_auth_eip8(io, host.secret(), &data)?; - }, - HandshakeState::ReadingAck => { - self.read_ack(host.secret(), &data)?; - }, - HandshakeState::ReadingAckEip8 => { - self.read_ack_eip8(host.secret(), &data)?; - }, - } - if self.state == HandshakeState::StartSession { - io.clear_timer(self.connection.token).ok(); - break; - } - } - Ok(()) - } + /// Readable IO handler. Drives the state change. + pub fn readable( + &mut self, + io: &IoContext, + host: &HostInfo, + ) -> Result<(), Error> + where + Message: Send + Clone + Sync + 'static, + { + while let Some(data) = self.connection.readable()? { + match self.state { + HandshakeState::New => {} + HandshakeState::StartSession => {} + HandshakeState::ReadingAuth => { + self.read_auth(io, host.secret(), &data)?; + } + HandshakeState::ReadingAuthEip8 => { + self.read_auth_eip8(io, host.secret(), &data)?; + } + HandshakeState::ReadingAck => { + self.read_ack(host.secret(), &data)?; + } + HandshakeState::ReadingAckEip8 => { + self.read_ack_eip8(host.secret(), &data)?; + } + } + if self.state == HandshakeState::StartSession { + io.clear_timer(self.connection.token).ok(); + break; + } + } + Ok(()) + } - /// Writable IO handler. - pub fn writable(&mut self, io: &IoContext) -> Result<(), Error> where Message: Send + Clone + Sync + 'static { - self.connection.writable(io)?; - Ok(()) - } + /// Writable IO handler. + pub fn writable(&mut self, io: &IoContext) -> Result<(), Error> + where + Message: Send + Clone + Sync + 'static, + { + self.connection.writable(io)?; + Ok(()) + } - fn set_auth(&mut self, host_secret: &Secret, sig: &[u8], remote_public: &[u8], remote_nonce: &[u8], remote_version: u64) -> Result<(), Error> { - self.id.clone_from_slice(remote_public); - self.remote_nonce.clone_from_slice(remote_nonce); - self.remote_version = remote_version; - let shared = *ecdh::agree(host_secret, &self.id)?; - let signature = H520::from_slice(sig); - self.remote_ephemeral = recover(&signature.into(), &(shared ^ self.remote_nonce))?; - Ok(()) - } + fn set_auth( + &mut self, + host_secret: &Secret, + sig: &[u8], + remote_public: &[u8], + remote_nonce: &[u8], + remote_version: u64, + ) -> Result<(), Error> { + self.id.clone_from_slice(remote_public); + self.remote_nonce.clone_from_slice(remote_nonce); + self.remote_version = remote_version; + let shared = *ecdh::agree(host_secret, &self.id)?; + let signature = H520::from_slice(sig); + self.remote_ephemeral = recover(&signature.into(), &(shared ^ self.remote_nonce))?; + Ok(()) + } - /// Parse, validate and confirm auth message - fn read_auth(&mut self, io: &IoContext, secret: &Secret, data: &[u8]) -> Result<(), Error> where Message: Send + Clone + Sync + 'static { - trace!(target: "network", "Received handshake auth from {:?}", self.connection.remote_addr_str()); - if data.len() != V4_AUTH_PACKET_SIZE { - debug!(target: "network", "Wrong auth packet size"); - return Err(ErrorKind::BadProtocol.into()); - } - self.auth_cipher = data.to_vec(); - match ecies::decrypt(secret, &[], data) { - Ok(auth) => { - let (sig, rest) = auth.split_at(65); - let (_, rest) = rest.split_at(32); - let (pubk, rest) = rest.split_at(64); - let (nonce, _) = rest.split_at(32); - self.set_auth(secret, sig, pubk, nonce, PROTOCOL_VERSION)?; - self.write_ack(io)?; - } - Err(_) => { - // Try to interpret as EIP-8 packet - let total = ((u16::from(data[0]) << 8 | (u16::from(data[1]))) as usize) + 2; - if total < V4_AUTH_PACKET_SIZE { - debug!(target: "network", "Wrong EIP8 auth packet size"); - return Err(ErrorKind::BadProtocol.into()); - } - let rest = total - data.len(); - self.state = HandshakeState::ReadingAuthEip8; - self.connection.expect(rest); - } - } - Ok(()) - } + /// Parse, validate and confirm auth message + fn read_auth( + &mut self, + io: &IoContext, + secret: &Secret, + data: &[u8], + ) -> Result<(), Error> + where + Message: Send + Clone + Sync + 'static, + { + trace!(target: "network", "Received handshake auth from {:?}", self.connection.remote_addr_str()); + if data.len() != V4_AUTH_PACKET_SIZE { + debug!(target: "network", "Wrong auth packet size"); + return Err(ErrorKind::BadProtocol.into()); + } + self.auth_cipher = data.to_vec(); + match ecies::decrypt(secret, &[], data) { + Ok(auth) => { + let (sig, rest) = auth.split_at(65); + let (_, rest) = rest.split_at(32); + let (pubk, rest) = rest.split_at(64); + let (nonce, _) = rest.split_at(32); + self.set_auth(secret, sig, pubk, nonce, PROTOCOL_VERSION)?; + self.write_ack(io)?; + } + Err(_) => { + // Try to interpret as EIP-8 packet + let total = ((u16::from(data[0]) << 8 | (u16::from(data[1]))) as usize) + 2; + if total < V4_AUTH_PACKET_SIZE { + debug!(target: "network", "Wrong EIP8 auth packet size"); + return Err(ErrorKind::BadProtocol.into()); + } + let rest = total - data.len(); + self.state = HandshakeState::ReadingAuthEip8; + self.connection.expect(rest); + } + } + Ok(()) + } - fn read_auth_eip8(&mut self, io: &IoContext, secret: &Secret, data: &[u8]) -> Result<(), Error> where Message: Send + Clone + Sync + 'static { - trace!(target: "network", "Received EIP8 handshake auth from {:?}", self.connection.remote_addr_str()); - self.auth_cipher.extend_from_slice(data); - let auth = ecies::decrypt(secret, &self.auth_cipher[0..2], &self.auth_cipher[2..])?; - let rlp = Rlp::new(&auth); - let signature: H520 = rlp.val_at(0)?; - let remote_public: Public = rlp.val_at(1)?; - let remote_nonce: H256 = rlp.val_at(2)?; - let remote_version: u64 = rlp.val_at(3)?; - self.set_auth(secret, &signature, &remote_public, &remote_nonce, remote_version)?; - self.write_ack_eip8(io)?; - Ok(()) - } + fn read_auth_eip8( + &mut self, + io: &IoContext, + secret: &Secret, + data: &[u8], + ) -> Result<(), Error> + where + Message: Send + Clone + Sync + 'static, + { + trace!(target: "network", "Received EIP8 handshake auth from {:?}", self.connection.remote_addr_str()); + self.auth_cipher.extend_from_slice(data); + let auth = ecies::decrypt(secret, &self.auth_cipher[0..2], &self.auth_cipher[2..])?; + let rlp = Rlp::new(&auth); + let signature: H520 = rlp.val_at(0)?; + let remote_public: Public = rlp.val_at(1)?; + let remote_nonce: H256 = rlp.val_at(2)?; + let remote_version: u64 = rlp.val_at(3)?; + self.set_auth( + secret, + &signature, + &remote_public, + &remote_nonce, + remote_version, + )?; + self.write_ack_eip8(io)?; + Ok(()) + } - /// Parse and validate ack message - fn read_ack(&mut self, secret: &Secret, data: &[u8]) -> Result<(), Error> { - trace!(target: "network", "Received handshake ack from {:?}", self.connection.remote_addr_str()); - if data.len() != V4_ACK_PACKET_SIZE { - debug!(target: "network", "Wrong ack packet size"); - return Err(ErrorKind::BadProtocol.into()); - } - self.ack_cipher = data.to_vec(); - match ecies::decrypt(secret, &[], data) { - Ok(ack) => { - self.remote_ephemeral.clone_from_slice(&ack[0..64]); - self.remote_nonce.clone_from_slice(&ack[64..(64+32)]); - self.state = HandshakeState::StartSession; - } - Err(_) => { - // Try to interpret as EIP-8 packet - let total = (((u16::from(data[0])) << 8 | (u16::from(data[1]))) as usize) + 2; - if total < V4_ACK_PACKET_SIZE { - debug!(target: "network", "Wrong EIP8 ack packet size"); - return Err(ErrorKind::BadProtocol.into()); - } - let rest = total - data.len(); - self.state = HandshakeState::ReadingAckEip8; - self.connection.expect(rest); - } - } - Ok(()) - } + /// Parse and validate ack message + fn read_ack(&mut self, secret: &Secret, data: &[u8]) -> Result<(), Error> { + trace!(target: "network", "Received handshake ack from {:?}", self.connection.remote_addr_str()); + if data.len() != V4_ACK_PACKET_SIZE { + debug!(target: "network", "Wrong ack packet size"); + return Err(ErrorKind::BadProtocol.into()); + } + self.ack_cipher = data.to_vec(); + match ecies::decrypt(secret, &[], data) { + Ok(ack) => { + self.remote_ephemeral.clone_from_slice(&ack[0..64]); + self.remote_nonce.clone_from_slice(&ack[64..(64 + 32)]); + self.state = HandshakeState::StartSession; + } + Err(_) => { + // Try to interpret as EIP-8 packet + let total = (((u16::from(data[0])) << 8 | (u16::from(data[1]))) as usize) + 2; + if total < V4_ACK_PACKET_SIZE { + debug!(target: "network", "Wrong EIP8 ack packet size"); + return Err(ErrorKind::BadProtocol.into()); + } + let rest = total - data.len(); + self.state = HandshakeState::ReadingAckEip8; + self.connection.expect(rest); + } + } + Ok(()) + } - fn read_ack_eip8(&mut self, secret: &Secret, data: &[u8]) -> Result<(), Error> { - trace!(target: "network", "Received EIP8 handshake auth from {:?}", self.connection.remote_addr_str()); - self.ack_cipher.extend_from_slice(data); - let ack = ecies::decrypt(secret, &self.ack_cipher[0..2], &self.ack_cipher[2..])?; - let rlp = Rlp::new(&ack); - self.remote_ephemeral = rlp.val_at(0)?; - self.remote_nonce = rlp.val_at(1)?; - self.remote_version = rlp.val_at(2)?; - self.state = HandshakeState::StartSession; - Ok(()) - } + fn read_ack_eip8(&mut self, secret: &Secret, data: &[u8]) -> Result<(), Error> { + trace!(target: "network", "Received EIP8 handshake auth from {:?}", self.connection.remote_addr_str()); + self.ack_cipher.extend_from_slice(data); + let ack = ecies::decrypt(secret, &self.ack_cipher[0..2], &self.ack_cipher[2..])?; + let rlp = Rlp::new(&ack); + self.remote_ephemeral = rlp.val_at(0)?; + self.remote_nonce = rlp.val_at(1)?; + self.remote_version = rlp.val_at(2)?; + self.state = HandshakeState::StartSession; + Ok(()) + } - /// Sends auth message - fn write_auth(&mut self, io: &IoContext, secret: &Secret, public: &Public) -> Result<(), Error> where Message: Send + Clone + Sync + 'static { - trace!(target: "network", "Sending handshake auth to {:?}", self.connection.remote_addr_str()); - let mut data = [0u8; /*Signature::SIZE*/ 65 + /*H256::SIZE*/ 32 + /*Public::SIZE*/ 64 + /*H256::SIZE*/ 32 + 1]; //TODO: use associated constants - let len = data.len(); - { - data[len - 1] = 0x0; - let (sig, rest) = data.split_at_mut(65); - let (hepubk, rest) = rest.split_at_mut(32); - let (pubk, rest) = rest.split_at_mut(64); - let (nonce, _) = rest.split_at_mut(32); + /// Sends auth message + fn write_auth( + &mut self, + io: &IoContext, + secret: &Secret, + public: &Public, + ) -> Result<(), Error> + where + Message: Send + Clone + Sync + 'static, + { + trace!(target: "network", "Sending handshake auth to {:?}", self.connection.remote_addr_str()); + let mut data = [0u8; /*Signature::SIZE*/ 65 + /*H256::SIZE*/ 32 + /*Public::SIZE*/ 64 + /*H256::SIZE*/ 32 + 1]; //TODO: use associated constants + let len = data.len(); + { + data[len - 1] = 0x0; + let (sig, rest) = data.split_at_mut(65); + let (hepubk, rest) = rest.split_at_mut(32); + let (pubk, rest) = rest.split_at_mut(64); + let (nonce, _) = rest.split_at_mut(32); - // E(remote-pubk, S(ecdhe-random, ecdh-shared-secret^nonce) || H(ecdhe-random-pubk) || pubk || nonce || 0x0) - let shared = *ecdh::agree(secret, &self.id)?; - sig.copy_from_slice(&*sign(self.ecdhe.secret(), &(shared ^ self.nonce))?); - write_keccak(self.ecdhe.public(), hepubk); - pubk.copy_from_slice(public); - nonce.copy_from_slice(&self.nonce); - } - let message = ecies::encrypt(&self.id, &[], &data)?; - self.auth_cipher = message.clone(); - self.connection.send(io, message); - self.connection.expect(V4_ACK_PACKET_SIZE); - self.state = HandshakeState::ReadingAck; - Ok(()) - } + // E(remote-pubk, S(ecdhe-random, ecdh-shared-secret^nonce) || H(ecdhe-random-pubk) || pubk || nonce || 0x0) + let shared = *ecdh::agree(secret, &self.id)?; + sig.copy_from_slice(&*sign(self.ecdhe.secret(), &(shared ^ self.nonce))?); + write_keccak(self.ecdhe.public(), hepubk); + pubk.copy_from_slice(public); + nonce.copy_from_slice(&self.nonce); + } + let message = ecies::encrypt(&self.id, &[], &data)?; + self.auth_cipher = message.clone(); + self.connection.send(io, message); + self.connection.expect(V4_ACK_PACKET_SIZE); + self.state = HandshakeState::ReadingAck; + Ok(()) + } - /// Sends ack message - fn write_ack(&mut self, io: &IoContext) -> Result<(), Error> where Message: Send + Clone + Sync + 'static { - trace!(target: "network", "Sending handshake ack to {:?}", self.connection.remote_addr_str()); - let mut data = [0u8; 1 + /*Public::SIZE*/ 64 + /*H256::SIZE*/ 32]; //TODO: use associated constants - let len = data.len(); - { - data[len - 1] = 0x0; - let (epubk, rest) = data.split_at_mut(64); - let (nonce, _) = rest.split_at_mut(32); - self.ecdhe.public().copy_to(epubk); - self.nonce.copy_to(nonce); - } - let message = ecies::encrypt(&self.id, &[], &data)?; - self.ack_cipher = message.clone(); - self.connection.send(io, message); - self.state = HandshakeState::StartSession; - Ok(()) - } + /// Sends ack message + fn write_ack(&mut self, io: &IoContext) -> Result<(), Error> + where + Message: Send + Clone + Sync + 'static, + { + trace!(target: "network", "Sending handshake ack to {:?}", self.connection.remote_addr_str()); + let mut data = [0u8; 1 + /*Public::SIZE*/ 64 + /*H256::SIZE*/ 32]; //TODO: use associated constants + let len = data.len(); + { + data[len - 1] = 0x0; + let (epubk, rest) = data.split_at_mut(64); + let (nonce, _) = rest.split_at_mut(32); + self.ecdhe.public().copy_to(epubk); + self.nonce.copy_to(nonce); + } + let message = ecies::encrypt(&self.id, &[], &data)?; + self.ack_cipher = message.clone(); + self.connection.send(io, message); + self.state = HandshakeState::StartSession; + Ok(()) + } - /// Sends EIP8 ack message - fn write_ack_eip8(&mut self, io: &IoContext) -> Result<(), Error> where Message: Send + Clone + Sync + 'static { - trace!(target: "network", "Sending EIP8 handshake ack to {:?}", self.connection.remote_addr_str()); - let mut rlp = RlpStream::new_list(3); - rlp.append(self.ecdhe.public()); - rlp.append(&self.nonce); - rlp.append(&PROTOCOL_VERSION); + /// Sends EIP8 ack message + fn write_ack_eip8(&mut self, io: &IoContext) -> Result<(), Error> + where + Message: Send + Clone + Sync + 'static, + { + trace!(target: "network", "Sending EIP8 handshake ack to {:?}", self.connection.remote_addr_str()); + let mut rlp = RlpStream::new_list(3); + rlp.append(self.ecdhe.public()); + rlp.append(&self.nonce); + rlp.append(&PROTOCOL_VERSION); - let pad_array = [0u8; 200]; - let pad = &pad_array[0 .. 100 + random::() % 100]; - rlp.append_raw(pad, 0); + let pad_array = [0u8; 200]; + let pad = &pad_array[0..100 + random::() % 100]; + rlp.append_raw(pad, 0); - let encoded = rlp.drain(); - let len = (encoded.len() + ECIES_OVERHEAD) as u16; - let prefix = [ (len >> 8) as u8, (len & 0xff) as u8 ]; - let message = ecies::encrypt(&self.id, &prefix, &encoded)?; - self.ack_cipher.extend_from_slice(&prefix); - self.ack_cipher.extend_from_slice(&message); - self.connection.send(io, self.ack_cipher.clone()); - self.state = HandshakeState::StartSession; - Ok(()) - } + let encoded = rlp.drain(); + let len = (encoded.len() + ECIES_OVERHEAD) as u16; + let prefix = [(len >> 8) as u8, (len & 0xff) as u8]; + let message = ecies::encrypt(&self.id, &prefix, &encoded)?; + self.ack_cipher.extend_from_slice(&prefix); + self.ack_cipher.extend_from_slice(&message); + self.connection.send(io, self.ack_cipher.clone()); + self.state = HandshakeState::StartSession; + Ok(()) + } } #[cfg(test)] mod test { - use rustc_hex::FromHex; - use super::*; - use ethereum_types::H256; - use io::*; - use mio::tcp::TcpStream; - use ethkey::Public; + use super::*; + use ethereum_types::H256; + use ethkey::Public; + use io::*; + use mio::tcp::TcpStream; + use rustc_hex::FromHex; - fn check_auth(h: &Handshake, version: u64) { - assert_eq!(h.id, "fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877".into()); - assert_eq!(h.remote_nonce, "7e968bba13b6c50e2c4cd7f241cc0d64d1ac25c7f5952df231ac6a2bda8ee5d6".into()); - assert_eq!(h.remote_ephemeral, "654d1044b69c577a44e5f01a1209523adb4026e70c62d1c13a067acabc09d2667a49821a0ad4b634554d330a15a58fe61f8a8e0544b310c6de7b0c8da7528a8d".into()); - assert_eq!(h.remote_version, version); - } + fn check_auth(h: &Handshake, version: u64) { + assert_eq!(h.id, "fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877".into()); + assert_eq!( + h.remote_nonce, + "7e968bba13b6c50e2c4cd7f241cc0d64d1ac25c7f5952df231ac6a2bda8ee5d6".into() + ); + assert_eq!(h.remote_ephemeral, "654d1044b69c577a44e5f01a1209523adb4026e70c62d1c13a067acabc09d2667a49821a0ad4b634554d330a15a58fe61f8a8e0544b310c6de7b0c8da7528a8d".into()); + assert_eq!(h.remote_version, version); + } - fn check_ack(h: &Handshake, version: u64) { - assert_eq!(h.remote_nonce, "559aead08264d5795d3909718cdd05abd49572e84fe55590eef31a88a08fdffd".into()); - assert_eq!(h.remote_ephemeral, "b6d82fa3409da933dbf9cb0140c5dde89f4e64aec88d476af648880f4a10e1e49fe35ef3e69e93dd300b4797765a747c6384a6ecf5db9c2690398607a86181e4".into()); - assert_eq!(h.remote_version, version); - } + fn check_ack(h: &Handshake, version: u64) { + assert_eq!( + h.remote_nonce, + "559aead08264d5795d3909718cdd05abd49572e84fe55590eef31a88a08fdffd".into() + ); + assert_eq!(h.remote_ephemeral, "b6d82fa3409da933dbf9cb0140c5dde89f4e64aec88d476af648880f4a10e1e49fe35ef3e69e93dd300b4797765a747c6384a6ecf5db9c2690398607a86181e4".into()); + assert_eq!(h.remote_version, version); + } - fn create_handshake(to: Option<&Public>) -> Handshake { - let addr = "127.0.0.1:50556".parse().unwrap(); - let socket = TcpStream::connect(&addr).unwrap(); - let nonce = H256::new(); - Handshake::new(0, to, socket, &nonce).unwrap() - } + fn create_handshake(to: Option<&Public>) -> Handshake { + let addr = "127.0.0.1:50556".parse().unwrap(); + let socket = TcpStream::connect(&addr).unwrap(); + let nonce = H256::new(); + Handshake::new(0, to, socket, &nonce).unwrap() + } - fn test_io() -> IoContext { - IoContext::new(IoChannel::disconnected(), 0) - } + fn test_io() -> IoContext { + IoContext::new(IoChannel::disconnected(), 0) + } - #[test] - fn test_handshake_auth_plain() { - let mut h = create_handshake(None); - let secret = "b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291".parse().unwrap(); - let auth = - "\ + #[test] + fn test_handshake_auth_plain() { + let mut h = create_handshake(None); + let secret = "b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291" + .parse() + .unwrap(); + let auth = "\ 048ca79ad18e4b0659fab4853fe5bc58eb83992980f4c9cc147d2aa31532efd29a3d3dc6a3d89eaf\ 913150cfc777ce0ce4af2758bf4810235f6e6ceccfee1acc6b22c005e9e3a49d6448610a58e98744\ ba3ac0399e82692d67c1f58849050b3024e21a52c9d3b01d871ff5f210817912773e610443a9ef14\ @@ -362,19 +441,22 @@ mod test { 0f0128410fd05250273156d548a414444ae2f7dea4dfca2d43c057adb701a715bf59f6fb66b2d1d2\ 0f2c703f851cbf5ac47396d9ca65b6260bd141ac4d53e2de585a73d1750780db4c9ee4cd4d225173\ a4592ee77e2bd94d0be3691f3b406f9bba9b591fc63facc016bfa8\ - ".from_hex().unwrap(); + " + .from_hex() + .unwrap(); - h.read_auth(&test_io(), &secret, &auth).unwrap(); - assert_eq!(h.state, super::HandshakeState::StartSession); - check_auth(&h, 4); - } + h.read_auth(&test_io(), &secret, &auth).unwrap(); + assert_eq!(h.state, super::HandshakeState::StartSession); + check_auth(&h, 4); + } - #[test] - fn test_handshake_auth_eip8() { - let mut h = create_handshake(None); - let secret = "b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291".parse().unwrap(); - let auth = - "\ + #[test] + fn test_handshake_auth_eip8() { + let mut h = create_handshake(None); + let secret = "b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291" + .parse() + .unwrap(); + let auth = "\ 01b304ab7578555167be8154d5cc456f567d5ba302662433674222360f08d5f1534499d3678b513b\ 0fca474f3a514b18e75683032eb63fccb16c156dc6eb2c0b1593f0d84ac74f6e475f1b8d56116b84\ 9634a8c458705bf83a626ea0384d4d7341aae591fae42ce6bd5c850bfe0b999a694a49bbbaf3ef6c\ @@ -386,21 +468,26 @@ mod test { 6f917bc5e1eafd5896d46bd61ff23f1a863a8a8dcd54c7b109b771c8e61ec9c8908c733c0263440e\ 2aa067241aaa433f0bb053c7b31a838504b148f570c0ad62837129e547678c5190341e4f1693956c\ 3bf7678318e2d5b5340c9e488eefea198576344afbdf66db5f51204a6961a63ce072c8926c\ - ".from_hex().unwrap(); + " + .from_hex() + .unwrap(); - h.read_auth(&test_io(), &secret, &auth[0..super::V4_AUTH_PACKET_SIZE]).unwrap(); - assert_eq!(h.state, super::HandshakeState::ReadingAuthEip8); - h.read_auth_eip8(&test_io(), &secret, &auth[super::V4_AUTH_PACKET_SIZE..]).unwrap(); - assert_eq!(h.state, super::HandshakeState::StartSession); - check_auth(&h, 4); - } + h.read_auth(&test_io(), &secret, &auth[0..super::V4_AUTH_PACKET_SIZE]) + .unwrap(); + assert_eq!(h.state, super::HandshakeState::ReadingAuthEip8); + h.read_auth_eip8(&test_io(), &secret, &auth[super::V4_AUTH_PACKET_SIZE..]) + .unwrap(); + assert_eq!(h.state, super::HandshakeState::StartSession); + check_auth(&h, 4); + } - #[test] - fn test_handshake_auth_eip8_2() { - let mut h = create_handshake(None); - let secret = "b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291".parse().unwrap(); - let auth = - "\ + #[test] + fn test_handshake_auth_eip8_2() { + let mut h = create_handshake(None); + let secret = "b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291" + .parse() + .unwrap(); + let auth = "\ 01b8044c6c312173685d1edd268aa95e1d495474c6959bcdd10067ba4c9013df9e40ff45f5bfd6f7\ 2471f93a91b493f8e00abc4b80f682973de715d77ba3a005a242eb859f9a211d93a347fa64b597bf\ 280a6b88e26299cf263b01b8dfdb712278464fd1c25840b995e84d367d743f66c0e54a586725b7bb\ @@ -413,45 +500,53 @@ mod test { 7ac044b55be0908ecb94d4ed172ece66fd31bfdadf2b97a8bc690163ee11f5b575a4b44e36e2bfb2\ f0fce91676fd64c7773bac6a003f481fddd0bae0a1f31aa27504e2a533af4cef3b623f4791b2cca6\ d490\ - ".from_hex().unwrap(); + " + .from_hex() + .unwrap(); - h.read_auth(&test_io(), &secret, &auth[0..super::V4_AUTH_PACKET_SIZE]).unwrap(); - assert_eq!(h.state, super::HandshakeState::ReadingAuthEip8); - h.read_auth_eip8(&test_io(), &secret, &auth[super::V4_AUTH_PACKET_SIZE..]).unwrap(); - assert_eq!(h.state, super::HandshakeState::StartSession); - check_auth(&h, 56); - let ack = h.ack_cipher.clone(); - let total = (((ack[0] as u16) << 8 | (ack[1] as u16)) as usize) + 2; - assert_eq!(ack.len(), total); - } + h.read_auth(&test_io(), &secret, &auth[0..super::V4_AUTH_PACKET_SIZE]) + .unwrap(); + assert_eq!(h.state, super::HandshakeState::ReadingAuthEip8); + h.read_auth_eip8(&test_io(), &secret, &auth[super::V4_AUTH_PACKET_SIZE..]) + .unwrap(); + assert_eq!(h.state, super::HandshakeState::StartSession); + check_auth(&h, 56); + let ack = h.ack_cipher.clone(); + let total = (((ack[0] as u16) << 8 | (ack[1] as u16)) as usize) + 2; + assert_eq!(ack.len(), total); + } - #[test] - fn test_handshake_ack_plain() { - let remote = "fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877".into(); - let mut h = create_handshake(Some(&remote)); - let secret = "49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee".parse().unwrap(); - let ack = - "\ + #[test] + fn test_handshake_ack_plain() { + let remote = "fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877".into(); + let mut h = create_handshake(Some(&remote)); + let secret = "49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee" + .parse() + .unwrap(); + let ack = "\ 049f8abcfa9c0dc65b982e98af921bc0ba6e4243169348a236abe9df5f93aa69d99cadddaa387662\ b0ff2c08e9006d5a11a278b1b3331e5aaabf0a32f01281b6f4ede0e09a2d5f585b26513cb794d963\ 5a57563921c04a9090b4f14ee42be1a5461049af4ea7a7f49bf4c97a352d39c8d02ee4acc416388c\ 1c66cec761d2bc1c72da6ba143477f049c9d2dde846c252c111b904f630ac98e51609b3b1f58168d\ dca6505b7196532e5f85b259a20c45e1979491683fee108e9660edbf38f3add489ae73e3dda2c71b\ d1497113d5c755e942d1\ - ".from_hex().unwrap(); + " + .from_hex() + .unwrap(); - h.read_ack(&secret, &ack).unwrap(); - assert_eq!(h.state, super::HandshakeState::StartSession); - check_ack(&h, 4); - } + h.read_ack(&secret, &ack).unwrap(); + assert_eq!(h.state, super::HandshakeState::StartSession); + check_ack(&h, 4); + } - #[test] - fn test_handshake_ack_eip8() { - let remote = "fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877".into(); - let mut h = create_handshake(Some(&remote)); - let secret = "49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee".parse().unwrap(); - let ack = - "\ + #[test] + fn test_handshake_ack_eip8() { + let remote = "fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877".into(); + let mut h = create_handshake(Some(&remote)); + let secret = "49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee" + .parse() + .unwrap(); + let ack = "\ 01ea0451958701280a56482929d3b0757da8f7fbe5286784beead59d95089c217c9b917788989470\ b0e330cc6e4fb383c0340ed85fab836ec9fb8a49672712aeabbdfd1e837c1ff4cace34311cd7f4de\ 05d59279e3524ab26ef753a0095637ac88f2b499b9914b5f64e143eae548a1066e14cd2f4bd7f814\ @@ -465,22 +560,27 @@ mod test { 8000cdb6a912778426260c47f38919a91f25f4b5ffb455d6aaaf150f7e5529c100ce62d6d92826a7\ 1778d809bdf60232ae21ce8a437eca8223f45ac37f6487452ce626f549b3b5fdee26afd2072e4bc7\ 5833c2464c805246155289f4\ - ".from_hex().unwrap(); + " + .from_hex() + .unwrap(); - h.read_ack(&secret, &ack[0..super::V4_ACK_PACKET_SIZE]).unwrap(); - assert_eq!(h.state, super::HandshakeState::ReadingAckEip8); - h.read_ack_eip8(&secret, &ack[super::V4_ACK_PACKET_SIZE..]).unwrap(); - assert_eq!(h.state, super::HandshakeState::StartSession); - check_ack(&h, 4); - } + h.read_ack(&secret, &ack[0..super::V4_ACK_PACKET_SIZE]) + .unwrap(); + assert_eq!(h.state, super::HandshakeState::ReadingAckEip8); + h.read_ack_eip8(&secret, &ack[super::V4_ACK_PACKET_SIZE..]) + .unwrap(); + assert_eq!(h.state, super::HandshakeState::StartSession); + check_ack(&h, 4); + } - #[test] - fn test_handshake_ack_eip8_2() { - let remote = "fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877".into(); - let mut h = create_handshake(Some(&remote)); - let secret = "49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee".parse().unwrap(); - let ack = - "\ + #[test] + fn test_handshake_ack_eip8_2() { + let remote = "fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877".into(); + let mut h = create_handshake(Some(&remote)); + let secret = "49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee" + .parse() + .unwrap(); + let ack = "\ 01f004076e58aae772bb101ab1a8e64e01ee96e64857ce82b1113817c6cdd52c09d26f7b90981cd7\ ae835aeac72e1573b8a0225dd56d157a010846d888dac7464baf53f2ad4e3d584531fa203658fab0\ 3a06c9fd5e35737e417bc28c1cbf5e5dfc666de7090f69c3b29754725f84f75382891c561040ea1d\ @@ -494,12 +594,16 @@ mod test { 3011b7348c16cf58f66b9633906ba54a2ee803187344b394f75dd2e663a57b956cb830dd7a908d4f\ 39a2336a61ef9fda549180d4ccde21514d117b6c6fd07a9102b5efe710a32af4eeacae2cb3b1dec0\ 35b9593b48b9d3ca4c13d245d5f04169b0b1\ - ".from_hex().unwrap(); + " + .from_hex() + .unwrap(); - h.read_ack(&secret, &ack[0..super::V4_ACK_PACKET_SIZE]).unwrap(); - assert_eq!(h.state, super::HandshakeState::ReadingAckEip8); - h.read_ack_eip8(&secret, &ack[super::V4_ACK_PACKET_SIZE..]).unwrap(); - assert_eq!(h.state, super::HandshakeState::StartSession); - check_ack(&h, 57); - } + h.read_ack(&secret, &ack[0..super::V4_ACK_PACKET_SIZE]) + .unwrap(); + assert_eq!(h.state, super::HandshakeState::ReadingAckEip8); + h.read_ack_eip8(&secret, &ack[super::V4_ACK_PACKET_SIZE..]) + .unwrap(); + assert_eq!(h.state, super::HandshakeState::StartSession); + check_ack(&h, 57); + } } diff --git a/util/network-devp2p/src/host.rs b/util/network-devp2p/src/host.rs index 338148af8..286070676 100644 --- a/util/network-devp2p/src/host.rs +++ b/util/network-devp2p/src/host.rs @@ -14,40 +14,41 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::net::{SocketAddr, SocketAddrV4, Ipv4Addr}; -use std::collections::{HashMap, HashSet}; -use std::str::FromStr; -use std::sync::Arc; -use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; -use std::ops::*; -use std::cmp::{min, max}; -use std::path::{Path, PathBuf}; -use std::io::{Read, Write, self}; -use std::fs; -use std::time::Duration; -use ethkey::{KeyPair, Secret, Random, Generator}; -use hash::keccak; -use mio::*; -use mio::deprecated::{EventLoop}; -use mio::tcp::*; -use mio::udp::*; use ethereum_types::H256; -use rlp::{RlpStream, Encodable}; +use ethkey::{Generator, KeyPair, Random, Secret}; +use hash::keccak; +use mio::{deprecated::EventLoop, tcp::*, udp::*, *}; +use rlp::{Encodable, RlpStream}; +use std::{ + cmp::{max, min}, + collections::{HashMap, HashSet}, + fs, + io::{self, Read, Write}, + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + ops::*, + path::{Path, PathBuf}, + str::FromStr, + sync::{ + atomic::{AtomicBool, Ordering as AtomicOrdering}, + Arc, + }, + time::Duration, +}; -use session::{Session, SessionData}; +use connection::PAYLOAD_SOFT_LIMIT; +use discovery::{Discovery, NodeEntry, TableUpdates, MAX_DATAGRAM_SIZE}; use io::*; -use PROTOCOL_VERSION; -use node_table::*; -use network::{NetworkConfiguration, NetworkIoMessage, ProtocolId, PeerId, PacketId}; -use network::{NonReservedPeerMode, NetworkContext as NetworkContextTrait}; -use network::{SessionInfo, Error, ErrorKind, DisconnectReason, NetworkProtocolHandler}; -use discovery::{Discovery, TableUpdates, NodeEntry, MAX_DATAGRAM_SIZE}; -use network::client_version::ClientVersion; use ip_utils::{map_external_address, select_public_address}; +use network::{ + client_version::ClientVersion, ConnectionDirection, ConnectionFilter, DisconnectReason, Error, + ErrorKind, NetworkConfiguration, NetworkContext as NetworkContextTrait, NetworkIoMessage, + NetworkProtocolHandler, NonReservedPeerMode, PacketId, PeerId, ProtocolId, SessionInfo, +}; +use node_table::*; use parity_path::restrict_permissions_owner; use parking_lot::{Mutex, RwLock}; -use network::{ConnectionFilter, ConnectionDirection}; -use connection::PAYLOAD_SOFT_LIMIT; +use session::{Session, SessionData}; +use PROTOCOL_VERSION; type Slab = ::slab::Slab; @@ -84,1200 +85,1433 @@ const NODE_TABLE_TIMEOUT: Duration = Duration::from_secs(300); #[derive(Debug, PartialEq, Eq)] /// Protocol info pub struct CapabilityInfo { - /// Protocol ID - pub protocol: ProtocolId, - /// Protocol version - pub version: u8, - /// Total number of packet IDs this protocol support. - pub packet_count: u8, + /// Protocol ID + pub protocol: ProtocolId, + /// Protocol version + pub version: u8, + /// Total number of packet IDs this protocol support. + pub packet_count: u8, } impl Encodable for CapabilityInfo { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(2); - s.append(&&self.protocol[..]); - s.append(&self.version); - } + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2); + s.append(&&self.protocol[..]); + s.append(&self.version); + } } /// IO access point. This is passed to all IO handlers and provides an interface to the IO subsystem. pub struct NetworkContext<'s> { - io: &'s IoContext, - protocol: ProtocolId, - sessions: Arc>>, - session: Option, - session_id: Option, - reserved_peers: &'s HashSet, + io: &'s IoContext, + protocol: ProtocolId, + sessions: Arc>>, + session: Option, + session_id: Option, + reserved_peers: &'s HashSet, } impl<'s> NetworkContext<'s> { - /// Create a new network IO access point. Takes references to all the data that can be updated within the IO handler. - fn new( - io: &'s IoContext, - protocol: ProtocolId, - session: Option, - sessions: Arc>>, - reserved_peers: &'s HashSet, - ) -> NetworkContext<'s> { - let id = session.as_ref().map(|s| s.lock().token()); - NetworkContext { - io, - protocol, - session_id: id, - session, - sessions, - reserved_peers: reserved_peers, - } - } + /// Create a new network IO access point. Takes references to all the data that can be updated within the IO handler. + fn new( + io: &'s IoContext, + protocol: ProtocolId, + session: Option, + sessions: Arc>>, + reserved_peers: &'s HashSet, + ) -> NetworkContext<'s> { + let id = session.as_ref().map(|s| s.lock().token()); + NetworkContext { + io, + protocol, + session_id: id, + session, + sessions, + reserved_peers: reserved_peers, + } + } - fn resolve_session(&self, peer: PeerId) -> Option { - match self.session_id { - Some(id) if id == peer => self.session.clone(), - _ => self.sessions.read().get(peer).cloned(), - } - } + fn resolve_session(&self, peer: PeerId) -> Option { + match self.session_id { + Some(id) if id == peer => self.session.clone(), + _ => self.sessions.read().get(peer).cloned(), + } + } } impl<'s> NetworkContextTrait for NetworkContext<'s> { - fn send(&self, peer: PeerId, packet_id: PacketId, data: Vec) -> Result<(), Error> { - self.send_protocol(self.protocol, peer, packet_id, data) - } + fn send(&self, peer: PeerId, packet_id: PacketId, data: Vec) -> Result<(), Error> { + self.send_protocol(self.protocol, peer, packet_id, data) + } - fn send_protocol(&self, protocol: ProtocolId, peer: PeerId, packet_id: PacketId, data: Vec) -> Result<(), Error> { - let session = self.resolve_session(peer); - if let Some(session) = session { - session.lock().send_packet(self.io, Some(protocol), packet_id as u8, &data)?; - } else { - trace!(target: "network", "Send: Peer no longer exist") - } - Ok(()) - } + fn send_protocol( + &self, + protocol: ProtocolId, + peer: PeerId, + packet_id: PacketId, + data: Vec, + ) -> Result<(), Error> { + let session = self.resolve_session(peer); + if let Some(session) = session { + session + .lock() + .send_packet(self.io, Some(protocol), packet_id as u8, &data)?; + } else { + trace!(target: "network", "Send: Peer no longer exist") + } + Ok(()) + } - fn respond(&self, packet_id: PacketId, data: Vec) -> Result<(), Error> { - assert!(self.session.is_some(), "Respond called without network context"); - self.session_id.map_or_else(|| Err(ErrorKind::Expired.into()), |id| self.send(id, packet_id, data)) - } + fn respond(&self, packet_id: PacketId, data: Vec) -> Result<(), Error> { + assert!( + self.session.is_some(), + "Respond called without network context" + ); + self.session_id.map_or_else( + || Err(ErrorKind::Expired.into()), + |id| self.send(id, packet_id, data), + ) + } - fn disable_peer(&self, peer: PeerId) { - self.io.message(NetworkIoMessage::DisablePeer(peer)) - .unwrap_or_else(|e| warn!("Error sending network IO message: {:?}", e)); - } + fn disable_peer(&self, peer: PeerId) { + self.io + .message(NetworkIoMessage::DisablePeer(peer)) + .unwrap_or_else(|e| warn!("Error sending network IO message: {:?}", e)); + } - fn disconnect_peer(&self, peer: PeerId) { - self.io.message(NetworkIoMessage::Disconnect(peer)) - .unwrap_or_else(|e| warn!("Error sending network IO message: {:?}", e)); - } + fn disconnect_peer(&self, peer: PeerId) { + self.io + .message(NetworkIoMessage::Disconnect(peer)) + .unwrap_or_else(|e| warn!("Error sending network IO message: {:?}", e)); + } - fn is_expired(&self) -> bool { - self.session.as_ref().map_or(false, |s| s.lock().expired()) - } + fn is_expired(&self) -> bool { + self.session.as_ref().map_or(false, |s| s.lock().expired()) + } - fn register_timer(&self, token: TimerToken, delay: Duration) -> Result<(), Error> { - self.io.message(NetworkIoMessage::AddTimer { - token, - delay, - protocol: self.protocol, - }).unwrap_or_else(|e| warn!("Error sending network IO message: {:?}", e)); - Ok(()) - } + fn register_timer(&self, token: TimerToken, delay: Duration) -> Result<(), Error> { + self.io + .message(NetworkIoMessage::AddTimer { + token, + delay, + protocol: self.protocol, + }) + .unwrap_or_else(|e| warn!("Error sending network IO message: {:?}", e)); + Ok(()) + } - fn peer_client_version(&self, peer: PeerId) -> ClientVersion { - self.resolve_session(peer).map_or(ClientVersion::from("unknown").to_owned(), |s| s.lock().info.client_version.clone()) - } + fn peer_client_version(&self, peer: PeerId) -> ClientVersion { + self.resolve_session(peer) + .map_or(ClientVersion::from("unknown").to_owned(), |s| { + s.lock().info.client_version.clone() + }) + } - fn session_info(&self, peer: PeerId) -> Option { - self.resolve_session(peer).map(|s| s.lock().info.clone()) - } + fn session_info(&self, peer: PeerId) -> Option { + self.resolve_session(peer).map(|s| s.lock().info.clone()) + } - fn protocol_version(&self, protocol: ProtocolId, peer: PeerId) -> Option { - let session = self.resolve_session(peer); - session.and_then(|s| s.lock().capability_version(protocol)) - } + fn protocol_version(&self, protocol: ProtocolId, peer: PeerId) -> Option { + let session = self.resolve_session(peer); + session.and_then(|s| s.lock().capability_version(protocol)) + } - fn subprotocol_name(&self) -> ProtocolId { self.protocol } + fn subprotocol_name(&self) -> ProtocolId { + self.protocol + } - fn is_reserved_peer(&self, peer: PeerId) -> bool { - self.session_info(peer) - .and_then(|info| info.id) - .map(|node| self.reserved_peers.contains(&node)) - .unwrap_or(false) - } + fn is_reserved_peer(&self, peer: PeerId) -> bool { + self.session_info(peer) + .and_then(|info| info.id) + .map(|node| self.reserved_peers.contains(&node)) + .unwrap_or(false) + } - fn payload_soft_limit(&self) -> usize { - PAYLOAD_SOFT_LIMIT - } + fn payload_soft_limit(&self) -> usize { + PAYLOAD_SOFT_LIMIT + } } /// Shared host information pub struct HostInfo { - /// Our private and public keys. - keys: KeyPair, - /// Current network configuration - config: NetworkConfiguration, - /// Connection nonce. - nonce: H256, - /// RLPx protocol version - pub protocol_version: u32, - /// Registered capabilities (handlers) - pub capabilities: Vec, - /// Local address + discovery port - pub local_endpoint: NodeEndpoint, - /// Public address + discovery port - pub public_endpoint: Option, + /// Our private and public keys. + keys: KeyPair, + /// Current network configuration + config: NetworkConfiguration, + /// Connection nonce. + nonce: H256, + /// RLPx protocol version + pub protocol_version: u32, + /// Registered capabilities (handlers) + pub capabilities: Vec, + /// Local address + discovery port + pub local_endpoint: NodeEndpoint, + /// Public address + discovery port + pub public_endpoint: Option, } impl HostInfo { - fn next_nonce(&mut self) -> H256 { - self.nonce = keccak(&self.nonce); - self.nonce - } + fn next_nonce(&mut self) -> H256 { + self.nonce = keccak(&self.nonce); + self.nonce + } - pub(crate) fn client_version(&self) -> &str { - &self.config.client_version - } + pub(crate) fn client_version(&self) -> &str { + &self.config.client_version + } - pub(crate) fn secret(&self) -> &Secret { - self.keys.secret() - } + pub(crate) fn secret(&self) -> &Secret { + self.keys.secret() + } - pub(crate) fn id(&self) -> &NodeId { - self.keys.public() - } + pub(crate) fn id(&self) -> &NodeId { + self.keys.public() + } } type SharedSession = Arc>; #[derive(Copy, Clone)] struct ProtocolTimer { - pub protocol: ProtocolId, - pub token: TimerToken, // Handler level token + pub protocol: ProtocolId, + pub token: TimerToken, // Handler level token } /// Root IO handler. Manages protocol handlers, IO timers and network connections. /// /// NOTE: must keep the lock in order of: reserved_nodes (rwlock) -> session (mutex, from sessions) pub struct Host { - pub info: RwLock, - udp_socket: Mutex>, - tcp_listener: Mutex, - sessions: Arc>>, - discovery: Mutex>>, - nodes: RwLock, - handlers: RwLock>>, - timers: RwLock>, - timer_counter: RwLock, - reserved_nodes: RwLock>, - stopping: AtomicBool, - filter: Option>, + pub info: RwLock, + udp_socket: Mutex>, + tcp_listener: Mutex, + sessions: Arc>>, + discovery: Mutex>>, + nodes: RwLock, + handlers: RwLock>>, + timers: RwLock>, + timer_counter: RwLock, + reserved_nodes: RwLock>, + stopping: AtomicBool, + filter: Option>, } impl Host { - /// Create a new instance - pub fn new(mut config: NetworkConfiguration, filter: Option>) -> Result { - let mut listen_address = match config.listen_address { - None => SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), DEFAULT_PORT)), - Some(addr) => addr, - }; - - let keys = if let Some(ref secret) = config.use_secret { - KeyPair::from_secret(secret.clone())? - } else { - config.config_path.clone().and_then(|ref p| load_key(Path::new(&p))) - .map_or_else(|| { - let key = Random.generate().expect("Error generating random key pair"); - if let Some(path) = config.config_path.clone() { - save_key(Path::new(&path), key.secret()); - } - key - }, - |s| KeyPair::from_secret(s).expect("Error creating node secret key")) - }; - let path = config.net_config_path.clone(); - // Setup the server socket - let tcp_listener = TcpListener::bind(&listen_address)?; - listen_address = SocketAddr::new(listen_address.ip(), tcp_listener.local_addr()?.port()); - debug!(target: "network", "Listening at {:?}", listen_address); - let udp_port = config.udp_port.unwrap_or_else(|| listen_address.port()); - let local_endpoint = NodeEndpoint { address: listen_address, udp_port }; - - let boot_nodes = config.boot_nodes.clone(); - let reserved_nodes = config.reserved_nodes.clone(); - config.max_handshakes = min(config.max_handshakes, MAX_HANDSHAKES as u32); - - let mut host = Host { - info: RwLock::new(HostInfo { - keys, - config, - nonce: H256::random(), - protocol_version: PROTOCOL_VERSION, - capabilities: Vec::new(), - public_endpoint: None, - local_endpoint, - }), - discovery: Mutex::new(None), - udp_socket: Mutex::new(None), - tcp_listener: Mutex::new(tcp_listener), - sessions: Arc::new(RwLock::new(Slab::new_starting_at(FIRST_SESSION, MAX_SESSIONS))), - nodes: RwLock::new(NodeTable::new(path)), - handlers: RwLock::new(HashMap::new()), - timers: RwLock::new(HashMap::new()), - timer_counter: RwLock::new(USER_TIMER), - reserved_nodes: RwLock::new(HashSet::new()), - stopping: AtomicBool::new(false), - filter, - }; - - for n in boot_nodes { - host.add_node(&n); - } - - for n in reserved_nodes { - if let Err(e) = host.add_reserved_node(&n) { - debug!(target: "network", "Error parsing node id: {}: {:?}", n, e); - } - } - Ok(host) - } - - pub fn add_node(&mut self, id: &str) { - match Node::from_str(id) { - Err(e) => { debug!(target: "network", "Could not add node {}: {:?}", id, e); }, - Ok(n) => { - let entry = NodeEntry { endpoint: n.endpoint.clone(), id: n.id }; - - self.nodes.write().add_node(n); - if let Some(ref mut discovery) = *self.discovery.lock() { - discovery.add_node(entry); - } - } - } - } - - pub fn add_reserved_node(&self, id: &str) -> Result<(), Error> { - let n = Node::from_str(id)?; - - let entry = NodeEntry { endpoint: n.endpoint.clone(), id: n.id }; - self.reserved_nodes.write().insert(n.id); - self.nodes.write().add_node(Node::new(entry.id, entry.endpoint.clone())); - - if let Some(ref mut discovery) = *self.discovery.lock() { - discovery.add_node(entry); - } - - Ok(()) - } - - pub fn set_non_reserved_mode(&self, mode: NonReservedPeerMode, io: &IoContext) { - let mut info = self.info.write(); - - if info.config.non_reserved_mode != mode { - info.config.non_reserved_mode = mode; - drop(info); - if let NonReservedPeerMode::Deny = mode { - // disconnect all non-reserved peers here. - let reserved: HashSet = self.reserved_nodes.read().clone(); - let mut to_kill = Vec::new(); - for e in self.sessions.read().iter() { - let mut s = e.lock(); - { - let id = s.id(); - if id.map_or(false, |id| reserved.contains(id)) { - continue; - } - } - - s.disconnect(io, DisconnectReason::ClientQuit); - to_kill.push(s.token()); - } - for p in to_kill { - trace!(target: "network", "Disconnecting on reserved-only mode: {}", p); - self.kill_connection(p, io, false); - } - } - } - } - - pub fn remove_reserved_node(&self, id: &str) -> Result<(), Error> { - let n = Node::from_str(id)?; - self.reserved_nodes.write().remove(&n.id); - - Ok(()) - } - - pub fn external_url(&self) -> Option { - let info = self.info.read(); - info.public_endpoint.as_ref().map(|e| format!("{}", Node::new(*info.id(), e.clone()))) - } - - pub fn local_url(&self) -> String { - let info = self.info.read(); - format!("{}", Node::new(*info.id(), info.local_endpoint.clone())) - } - - pub fn stop(&self, io: &IoContext) { - self.stopping.store(true, AtomicOrdering::Release); - let mut to_kill = Vec::new(); - for e in self.sessions.read().iter() { - let mut s = e.lock(); - s.disconnect(io, DisconnectReason::ClientQuit); - to_kill.push(s.token()); - } - for p in to_kill { - trace!(target: "network", "Disconnecting on shutdown: {}", p); - self.kill_connection(p, io, true); - } - io.unregister_handler(); - } - - /// Get all connected peers. - pub fn connected_peers(&self) -> Vec { - let sessions = self.sessions.read(); - let sessions = &*sessions; - - let mut peers = Vec::with_capacity(sessions.count()); - for i in (0..MAX_SESSIONS).map(|x| x + FIRST_SESSION) { - if sessions.get(i).is_some() { - peers.push(i); - } - } - peers - } - - fn init_public_interface(&self, io: &IoContext) -> Result<(), Error> { - if self.info.read().public_endpoint.is_some() { - return Ok(()); - } - let local_endpoint = self.info.read().local_endpoint.clone(); - let public_address = self.info.read().config.public_address; - let allow_ips = self.info.read().config.ip_filter.clone(); - let public_endpoint = match public_address { - None => { - let public_address = select_public_address(local_endpoint.address.port()); - let public_endpoint = NodeEndpoint { address: public_address, udp_port: local_endpoint.udp_port }; - if self.info.read().config.nat_enabled { - match map_external_address(&local_endpoint) { - Some(endpoint) => { - info!("NAT mapped to external address {}", endpoint.address); - endpoint - }, - None => public_endpoint - } - } else { - public_endpoint - } - } - Some(addr) => NodeEndpoint { address: addr, udp_port: local_endpoint.udp_port } - }; - - self.info.write().public_endpoint = Some(public_endpoint.clone()); - - if let Some(url) = self.external_url() { - io.message(NetworkIoMessage::NetworkStarted(url)).unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e)); - } - - // Initialize discovery. - let discovery = { - let info = self.info.read(); - if info.config.discovery_enabled && info.config.non_reserved_mode == NonReservedPeerMode::Accept { - Some(Discovery::new(&info.keys, public_endpoint, allow_ips)) - } else { None } - }; - - if let Some(mut discovery) = discovery { - let mut udp_addr = local_endpoint.address; - udp_addr.set_port(local_endpoint.udp_port); - let socket = UdpSocket::bind(&udp_addr).expect("Error binding UDP socket"); - *self.udp_socket.lock() = Some(socket); - - discovery.add_node_list(self.nodes.read().entries()); - *self.discovery.lock() = Some(discovery); - io.register_stream(DISCOVERY)?; - io.register_timer(FAST_DISCOVERY_REFRESH, FAST_DISCOVERY_REFRESH_TIMEOUT)?; - io.register_timer(DISCOVERY_REFRESH, DISCOVERY_REFRESH_TIMEOUT)?; - io.register_timer(DISCOVERY_ROUND, DISCOVERY_ROUND_TIMEOUT)?; - } - io.register_timer(NODE_TABLE, NODE_TABLE_TIMEOUT)?; - io.register_stream(TCP_ACCEPT)?; - Ok(()) - } - - fn maintain_network(&self, io: &IoContext) { - self.keep_alive(io); - self.connect_peers(io); - } - - fn have_session(&self, id: &NodeId) -> bool { - self.sessions.read().iter().any(|e| e.lock().info.id == Some(*id)) - } - - // returns (handshakes, egress, ingress) - fn session_count(&self) -> (usize, usize, usize) { - let mut handshakes = 0; - let mut egress = 0; - let mut ingress = 0; - for s in self.sessions.read().iter() { - match s.try_lock() { - Some(ref s) if s.is_ready() && s.info.originated => egress += 1, - Some(ref s) if s.is_ready() && !s.info.originated => ingress += 1, - _ => handshakes +=1, - } - } - (handshakes, egress, ingress) - } - - fn connecting_to(&self, id: &NodeId) -> bool { - self.sessions.read().iter().any(|e| e.lock().id() == Some(id)) - } - - fn keep_alive(&self, io: &IoContext) { - let mut to_kill = Vec::new(); - for e in self.sessions.read().iter() { - let mut s = e.lock(); - if !s.keep_alive(io) { - s.disconnect(io, DisconnectReason::PingTimeout); - to_kill.push(s.token()); - } - } - for p in to_kill { - trace!(target: "network", "Ping timeout: {}", p); - self.kill_connection(p, io, true); - } - } - - fn has_enough_peers(&self) -> bool { - let min_peers = { - let info = self.info.read(); - let config = &info.config; - - config.min_peers - }; - let (_, egress_count, ingress_count) = self.session_count(); - - return egress_count + ingress_count >= min_peers as usize; - } - - fn connect_peers(&self, io: &IoContext) { - let (min_peers, mut pin, max_handshakes, allow_ips, self_id) = { - let info = self.info.read(); - if info.capabilities.is_empty() { - return; - } - let config = &info.config; - - (config.min_peers, config.non_reserved_mode == NonReservedPeerMode::Deny, config.max_handshakes as usize, config.ip_filter.clone(), *info.id()) - }; - - let (handshake_count, egress_count, ingress_count) = self.session_count(); - let reserved_nodes = self.reserved_nodes.read(); - if egress_count + ingress_count >= min_peers as usize + reserved_nodes.len() { - // check if all pinned nodes are connected. - if reserved_nodes.iter().all(|n| self.have_session(n) && self.connecting_to(n)) { - return; - } - - // if not, only attempt connect to reserved peers - pin = true; - } - - // allow 16 slots for incoming connections - if handshake_count >= max_handshakes { - return; - } - - // iterate over all nodes, reserved ones coming first. - // if we are pinned to only reserved nodes, ignore all others. - let nodes = reserved_nodes.iter().cloned().chain(if !pin { - self.nodes.read().nodes(&allow_ips) - } else { - Vec::new() - }); - - let max_handshakes_per_round = max_handshakes / 2; - let mut started: usize = 0; - for id in nodes.filter(|id| - !self.have_session(id) && - !self.connecting_to(id) && - *id != self_id && - self.filter.as_ref().map_or(true, |f| f.connection_allowed(&self_id, &id, ConnectionDirection::Outbound)) - ).take(min(max_handshakes_per_round, max_handshakes - handshake_count)) { - self.connect_peer(&id, io); - started += 1; - } - debug!(target: "network", "Connecting peers: {} sessions, {} pending + {} started", egress_count + ingress_count, handshake_count, started); - } - - fn connect_peer(&self, id: &NodeId, io: &IoContext) { - if self.have_session(id) { - trace!(target: "network", "Aborted connect. Node already connected."); - return; - } - if self.connecting_to(id) { - trace!(target: "network", "Aborted connect. Node already connecting."); - return; - } - - let socket = { - let address = { - let mut nodes = self.nodes.write(); - if let Some(node) = nodes.get_mut(id) { - node.endpoint.address - } else { - debug!(target: "network", "Connection to expired node aborted"); - return; - } - }; - match TcpStream::connect(&address) { - Ok(socket) => { - trace!(target: "network", "{}: Connecting to {:?}", id, address); - socket - }, - Err(e) => { - debug!(target: "network", "{}: Can't connect to address {:?}: {:?}", id, address, e); - self.nodes.write().note_failure(&id); - return; - } - } - }; - - if let Err(e) = self.create_connection(socket, Some(id), io) { - debug!(target: "network", "Can't create connection: {:?}", e); - } - } - - fn create_connection(&self, socket: TcpStream, id: Option<&NodeId>, io: &IoContext) -> Result<(), Error> { - let nonce = self.info.write().next_nonce(); - let mut sessions = self.sessions.write(); - - let token = sessions.insert_with_opt(|token| { - trace!(target: "network", "{}: Initiating session {:?}", token, id); - match Session::new(io, socket, token, id, &nonce, &self.info.read()) { - Ok(s) => Some(Arc::new(Mutex::new(s))), - Err(e) => { - debug!(target: "network", "Session create error: {:?}", e); - None - } - } - }); - - match token { - Some(t) => io.register_stream(t).map(|_| ()).map_err(Into::into), - None => { - debug!(target: "network", "Max sessions reached"); - Ok(()) - } - } - } - - fn accept(&self, io: &IoContext) { - trace!(target: "network", "Accepting incoming connection"); - loop { - let socket = match self.tcp_listener.lock().accept() { - Ok((sock, _addr)) => sock, - Err(e) => { - if e.kind() != io::ErrorKind::WouldBlock { - debug!(target: "network", "Error accepting connection: {:?}", e); - } - break - }, - }; - if let Err(e) = self.create_connection(socket, None, io) { - debug!(target: "network", "Can't accept connection: {:?}", e); - } - } - } - - fn session_writable(&self, token: StreamToken, io: &IoContext) { - let session = { self.sessions.read().get(token).cloned() }; - - if let Some(session) = session { - let mut s = session.lock(); - if let Err(e) = s.writable(io, &self.info.read()) { - trace!(target: "network", "Session write error: {}: {:?}", token, e); - } - if s.done() { - io.deregister_stream(token).unwrap_or_else(|e| debug!("Error deregistering stream: {:?}", e)); - } - } - } - - fn connection_closed(&self, token: StreamToken, io: &IoContext) { - trace!(target: "network", "Connection closed: {}", token); - self.kill_connection(token, io, true); - } - - fn session_readable(&self, token: StreamToken, io: &IoContext) { - let mut ready_data: Vec = Vec::new(); - let mut packet_data: Vec<(ProtocolId, PacketId, Vec)> = Vec::new(); - let mut kill = false; - let session = { self.sessions.read().get(token).cloned() }; - let mut ready_id = None; - if let Some(session) = session.clone() { - { - loop { - let session_result = session.lock().readable(io, &self.info.read()); - match session_result { - Err(e) => { - let reserved_nodes = self.reserved_nodes.read(); - let s = session.lock(); - trace!(target: "network", "Session read error: {}:{:?} ({:?}) {:?}", token, s.id(), s.remote_addr(), e); - match *e.kind() { - ErrorKind::Disconnect(DisconnectReason::IncompatibleProtocol) | ErrorKind::Disconnect(DisconnectReason::UselessPeer) => { - if let Some(id) = s.id() { - if !reserved_nodes.contains(id) { - let mut nodes = self.nodes.write(); - nodes.note_failure(&id); - nodes.mark_as_useless(id); - } - } - }, - _ => {}, - } - kill = true; - break; - }, - Ok(SessionData::Ready) => { - let (_, egress_count, ingress_count) = self.session_count(); - let reserved_nodes = self.reserved_nodes.read(); - let mut s = session.lock(); - let (min_peers, mut max_peers, reserved_only, self_id) = { - let info = self.info.read(); - let mut max_peers = info.config.max_peers; - for cap in &s.info.capabilities { - if let Some(num) = info.config.reserved_protocols.get(&cap.protocol) { - max_peers += *num; - break; - } - } - (info.config.min_peers as usize, max_peers as usize, info.config.non_reserved_mode == NonReservedPeerMode::Deny, *info.id()) - }; - - max_peers = max(max_peers, min_peers); - - let id = *s.id().expect("Ready session always has id"); - - // Check for the session limit. - // Outgoing connections are allowed as long as their count is <= min_peers - // Incoming connections are allowed to take all of the max_peers reserve, or at most half of the slots. - let max_ingress = max(max_peers - min_peers, min_peers / 2); - if reserved_only || - (s.info.originated && egress_count > min_peers) || - (!s.info.originated && ingress_count > max_ingress) { - if !reserved_nodes.contains(&id) { - // only proceed if the connecting peer is reserved. - trace!(target: "network", "Disconnecting non-reserved peer {:?}", id); - s.disconnect(io, DisconnectReason::TooManyPeers); - kill = true; - break; - } - } - - if !self.filter.as_ref().map_or(true, |f| f.connection_allowed(&self_id, &id, ConnectionDirection::Inbound)) { - trace!(target: "network", "Inbound connection not allowed for {:?}", id); - s.disconnect(io, DisconnectReason::UnexpectedIdentity); - kill = true; - break; - } - - ready_id = Some(id); - - // Add it to the node table - if !s.info.originated { - if let Ok(address) = s.remote_addr() { - // We can't know remote listening ports, so just assume defaults and hope for the best. - let endpoint = NodeEndpoint { address: SocketAddr::new(address.ip(), DEFAULT_PORT), udp_port: DEFAULT_PORT }; - let entry = NodeEntry { id, endpoint }; - let mut nodes = self.nodes.write(); - if !nodes.contains(&entry.id) { - nodes.add_node(Node::new(entry.id, entry.endpoint.clone())); - let mut discovery = self.discovery.lock(); - if let Some(ref mut discovery) = *discovery { - discovery.add_node(entry); - } - } - } - } - - // Note connection success - self.nodes.write().note_success(&id); - - for (p, _) in self.handlers.read().iter() { - if s.have_capability(*p) { - ready_data.push(*p); - } - } - }, - Ok(SessionData::Packet { - data, - protocol, - packet_id, - }) => { - match self.handlers.read().get(&protocol) { - None => { warn!(target: "network", "No handler found for protocol: {:?}", protocol) }, - Some(_) => packet_data.push((protocol, packet_id, data)), - } - }, - Ok(SessionData::Continue) => (), - Ok(SessionData::None) => break, - } - } - } - - if kill { - self.kill_connection(token, io, true); - } - - let handlers = self.handlers.read(); - if !ready_data.is_empty() { - let duplicate = self.sessions.read().iter().any(|e| { - let session = e.lock(); - session.token() != token && session.info.id == ready_id - }); - if duplicate { - trace!(target: "network", "Rejected duplicate connection: {}", token); - session.lock().disconnect(io, DisconnectReason::DuplicatePeer); - drop(handlers); - self.kill_connection(token, io, false); - return; - } - for p in ready_data { - let reserved = self.reserved_nodes.read(); - if let Some(h) = handlers.get(&p) { - h.connected(&NetworkContext::new(io, p, Some(session.clone()), self.sessions.clone(), &reserved), &token); - // accumulate pending packets. - let mut session = session.lock(); - packet_data.extend(session.mark_connected(p)); - } - } - } - - for (p, packet_id, data) in packet_data { - let reserved = self.reserved_nodes.read(); - if let Some(h) = handlers.get(&p) { - h.read(&NetworkContext::new(io, p, Some(session.clone()), self.sessions.clone(), &reserved), &token, packet_id, &data); - } - } - } - } - - fn discovery_readable(&self, io: &IoContext) { - let node_changes = match (self.udp_socket.lock().as_ref(), self.discovery.lock().as_mut()) { - (Some(udp_socket), Some(discovery)) => { - let mut buf = [0u8; MAX_DATAGRAM_SIZE]; - let writable = discovery.any_sends_queued(); - let res = match udp_socket.recv_from(&mut buf) { - Ok(Some((len, address))) => discovery.on_packet(&buf[0..len], address).unwrap_or_else(|e| { - debug!(target: "network", "Error processing UDP packet: {:?}", e); - None - }), - Ok(_) => None, - Err(e) => { - debug!(target: "network", "Error reading UPD socket: {:?}", e); - None - } - }; - let new_writable = discovery.any_sends_queued(); - if writable != new_writable { - io.update_registration(DISCOVERY) + /// Create a new instance + pub fn new( + mut config: NetworkConfiguration, + filter: Option>, + ) -> Result { + let mut listen_address = match config.listen_address { + None => SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), DEFAULT_PORT)), + Some(addr) => addr, + }; + + let keys = if let Some(ref secret) = config.use_secret { + KeyPair::from_secret(secret.clone())? + } else { + config + .config_path + .clone() + .and_then(|ref p| load_key(Path::new(&p))) + .map_or_else( + || { + let key = Random.generate().expect("Error generating random key pair"); + if let Some(path) = config.config_path.clone() { + save_key(Path::new(&path), key.secret()); + } + key + }, + |s| KeyPair::from_secret(s).expect("Error creating node secret key"), + ) + }; + let path = config.net_config_path.clone(); + // Setup the server socket + let tcp_listener = TcpListener::bind(&listen_address)?; + listen_address = SocketAddr::new(listen_address.ip(), tcp_listener.local_addr()?.port()); + debug!(target: "network", "Listening at {:?}", listen_address); + let udp_port = config.udp_port.unwrap_or_else(|| listen_address.port()); + let local_endpoint = NodeEndpoint { + address: listen_address, + udp_port, + }; + + let boot_nodes = config.boot_nodes.clone(); + let reserved_nodes = config.reserved_nodes.clone(); + config.max_handshakes = min(config.max_handshakes, MAX_HANDSHAKES as u32); + + let mut host = Host { + info: RwLock::new(HostInfo { + keys, + config, + nonce: H256::random(), + protocol_version: PROTOCOL_VERSION, + capabilities: Vec::new(), + public_endpoint: None, + local_endpoint, + }), + discovery: Mutex::new(None), + udp_socket: Mutex::new(None), + tcp_listener: Mutex::new(tcp_listener), + sessions: Arc::new(RwLock::new(Slab::new_starting_at( + FIRST_SESSION, + MAX_SESSIONS, + ))), + nodes: RwLock::new(NodeTable::new(path)), + handlers: RwLock::new(HashMap::new()), + timers: RwLock::new(HashMap::new()), + timer_counter: RwLock::new(USER_TIMER), + reserved_nodes: RwLock::new(HashSet::new()), + stopping: AtomicBool::new(false), + filter, + }; + + for n in boot_nodes { + host.add_node(&n); + } + + for n in reserved_nodes { + if let Err(e) = host.add_reserved_node(&n) { + debug!(target: "network", "Error parsing node id: {}: {:?}", n, e); + } + } + Ok(host) + } + + pub fn add_node(&mut self, id: &str) { + match Node::from_str(id) { + Err(e) => { + debug!(target: "network", "Could not add node {}: {:?}", id, e); + } + Ok(n) => { + let entry = NodeEntry { + endpoint: n.endpoint.clone(), + id: n.id, + }; + + self.nodes.write().add_node(n); + if let Some(ref mut discovery) = *self.discovery.lock() { + discovery.add_node(entry); + } + } + } + } + + pub fn add_reserved_node(&self, id: &str) -> Result<(), Error> { + let n = Node::from_str(id)?; + + let entry = NodeEntry { + endpoint: n.endpoint.clone(), + id: n.id, + }; + self.reserved_nodes.write().insert(n.id); + self.nodes + .write() + .add_node(Node::new(entry.id, entry.endpoint.clone())); + + if let Some(ref mut discovery) = *self.discovery.lock() { + discovery.add_node(entry); + } + + Ok(()) + } + + pub fn set_non_reserved_mode( + &self, + mode: NonReservedPeerMode, + io: &IoContext, + ) { + let mut info = self.info.write(); + + if info.config.non_reserved_mode != mode { + info.config.non_reserved_mode = mode; + drop(info); + if let NonReservedPeerMode::Deny = mode { + // disconnect all non-reserved peers here. + let reserved: HashSet = self.reserved_nodes.read().clone(); + let mut to_kill = Vec::new(); + for e in self.sessions.read().iter() { + let mut s = e.lock(); + { + let id = s.id(); + if id.map_or(false, |id| reserved.contains(id)) { + continue; + } + } + + s.disconnect(io, DisconnectReason::ClientQuit); + to_kill.push(s.token()); + } + for p in to_kill { + trace!(target: "network", "Disconnecting on reserved-only mode: {}", p); + self.kill_connection(p, io, false); + } + } + } + } + + pub fn remove_reserved_node(&self, id: &str) -> Result<(), Error> { + let n = Node::from_str(id)?; + self.reserved_nodes.write().remove(&n.id); + + Ok(()) + } + + pub fn external_url(&self) -> Option { + let info = self.info.read(); + info.public_endpoint + .as_ref() + .map(|e| format!("{}", Node::new(*info.id(), e.clone()))) + } + + pub fn local_url(&self) -> String { + let info = self.info.read(); + format!("{}", Node::new(*info.id(), info.local_endpoint.clone())) + } + + pub fn stop(&self, io: &IoContext) { + self.stopping.store(true, AtomicOrdering::Release); + let mut to_kill = Vec::new(); + for e in self.sessions.read().iter() { + let mut s = e.lock(); + s.disconnect(io, DisconnectReason::ClientQuit); + to_kill.push(s.token()); + } + for p in to_kill { + trace!(target: "network", "Disconnecting on shutdown: {}", p); + self.kill_connection(p, io, true); + } + io.unregister_handler(); + } + + /// Get all connected peers. + pub fn connected_peers(&self) -> Vec { + let sessions = self.sessions.read(); + let sessions = &*sessions; + + let mut peers = Vec::with_capacity(sessions.count()); + for i in (0..MAX_SESSIONS).map(|x| x + FIRST_SESSION) { + if sessions.get(i).is_some() { + peers.push(i); + } + } + peers + } + + fn init_public_interface(&self, io: &IoContext) -> Result<(), Error> { + if self.info.read().public_endpoint.is_some() { + return Ok(()); + } + let local_endpoint = self.info.read().local_endpoint.clone(); + let public_address = self.info.read().config.public_address; + let allow_ips = self.info.read().config.ip_filter.clone(); + let public_endpoint = match public_address { + None => { + let public_address = select_public_address(local_endpoint.address.port()); + let public_endpoint = NodeEndpoint { + address: public_address, + udp_port: local_endpoint.udp_port, + }; + if self.info.read().config.nat_enabled { + match map_external_address(&local_endpoint) { + Some(endpoint) => { + info!("NAT mapped to external address {}", endpoint.address); + endpoint + } + None => public_endpoint, + } + } else { + public_endpoint + } + } + Some(addr) => NodeEndpoint { + address: addr, + udp_port: local_endpoint.udp_port, + }, + }; + + self.info.write().public_endpoint = Some(public_endpoint.clone()); + + if let Some(url) = self.external_url() { + io.message(NetworkIoMessage::NetworkStarted(url)) + .unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e)); + } + + // Initialize discovery. + let discovery = { + let info = self.info.read(); + if info.config.discovery_enabled + && info.config.non_reserved_mode == NonReservedPeerMode::Accept + { + Some(Discovery::new(&info.keys, public_endpoint, allow_ips)) + } else { + None + } + }; + + if let Some(mut discovery) = discovery { + let mut udp_addr = local_endpoint.address; + udp_addr.set_port(local_endpoint.udp_port); + let socket = UdpSocket::bind(&udp_addr).expect("Error binding UDP socket"); + *self.udp_socket.lock() = Some(socket); + + discovery.add_node_list(self.nodes.read().entries()); + *self.discovery.lock() = Some(discovery); + io.register_stream(DISCOVERY)?; + io.register_timer(FAST_DISCOVERY_REFRESH, FAST_DISCOVERY_REFRESH_TIMEOUT)?; + io.register_timer(DISCOVERY_REFRESH, DISCOVERY_REFRESH_TIMEOUT)?; + io.register_timer(DISCOVERY_ROUND, DISCOVERY_ROUND_TIMEOUT)?; + } + io.register_timer(NODE_TABLE, NODE_TABLE_TIMEOUT)?; + io.register_stream(TCP_ACCEPT)?; + Ok(()) + } + + fn maintain_network(&self, io: &IoContext) { + self.keep_alive(io); + self.connect_peers(io); + } + + fn have_session(&self, id: &NodeId) -> bool { + self.sessions + .read() + .iter() + .any(|e| e.lock().info.id == Some(*id)) + } + + // returns (handshakes, egress, ingress) + fn session_count(&self) -> (usize, usize, usize) { + let mut handshakes = 0; + let mut egress = 0; + let mut ingress = 0; + for s in self.sessions.read().iter() { + match s.try_lock() { + Some(ref s) if s.is_ready() && s.info.originated => egress += 1, + Some(ref s) if s.is_ready() && !s.info.originated => ingress += 1, + _ => handshakes += 1, + } + } + (handshakes, egress, ingress) + } + + fn connecting_to(&self, id: &NodeId) -> bool { + self.sessions + .read() + .iter() + .any(|e| e.lock().id() == Some(id)) + } + + fn keep_alive(&self, io: &IoContext) { + let mut to_kill = Vec::new(); + for e in self.sessions.read().iter() { + let mut s = e.lock(); + if !s.keep_alive(io) { + s.disconnect(io, DisconnectReason::PingTimeout); + to_kill.push(s.token()); + } + } + for p in to_kill { + trace!(target: "network", "Ping timeout: {}", p); + self.kill_connection(p, io, true); + } + } + + fn has_enough_peers(&self) -> bool { + let min_peers = { + let info = self.info.read(); + let config = &info.config; + + config.min_peers + }; + let (_, egress_count, ingress_count) = self.session_count(); + + return egress_count + ingress_count >= min_peers as usize; + } + + fn connect_peers(&self, io: &IoContext) { + let (min_peers, mut pin, max_handshakes, allow_ips, self_id) = { + let info = self.info.read(); + if info.capabilities.is_empty() { + return; + } + let config = &info.config; + + ( + config.min_peers, + config.non_reserved_mode == NonReservedPeerMode::Deny, + config.max_handshakes as usize, + config.ip_filter.clone(), + *info.id(), + ) + }; + + let (handshake_count, egress_count, ingress_count) = self.session_count(); + let reserved_nodes = self.reserved_nodes.read(); + if egress_count + ingress_count >= min_peers as usize + reserved_nodes.len() { + // check if all pinned nodes are connected. + if reserved_nodes + .iter() + .all(|n| self.have_session(n) && self.connecting_to(n)) + { + return; + } + + // if not, only attempt connect to reserved peers + pin = true; + } + + // allow 16 slots for incoming connections + if handshake_count >= max_handshakes { + return; + } + + // iterate over all nodes, reserved ones coming first. + // if we are pinned to only reserved nodes, ignore all others. + let nodes = reserved_nodes.iter().cloned().chain(if !pin { + self.nodes.read().nodes(&allow_ips) + } else { + Vec::new() + }); + + let max_handshakes_per_round = max_handshakes / 2; + let mut started: usize = 0; + for id in nodes + .filter(|id| { + !self.have_session(id) + && !self.connecting_to(id) + && *id != self_id + && self.filter.as_ref().map_or(true, |f| { + f.connection_allowed(&self_id, &id, ConnectionDirection::Outbound) + }) + }) + .take(min( + max_handshakes_per_round, + max_handshakes - handshake_count, + )) + { + self.connect_peer(&id, io); + started += 1; + } + debug!(target: "network", "Connecting peers: {} sessions, {} pending + {} started", egress_count + ingress_count, handshake_count, started); + } + + fn connect_peer(&self, id: &NodeId, io: &IoContext) { + if self.have_session(id) { + trace!(target: "network", "Aborted connect. Node already connected."); + return; + } + if self.connecting_to(id) { + trace!(target: "network", "Aborted connect. Node already connecting."); + return; + } + + let socket = { + let address = { + let mut nodes = self.nodes.write(); + if let Some(node) = nodes.get_mut(id) { + node.endpoint.address + } else { + debug!(target: "network", "Connection to expired node aborted"); + return; + } + }; + match TcpStream::connect(&address) { + Ok(socket) => { + trace!(target: "network", "{}: Connecting to {:?}", id, address); + socket + } + Err(e) => { + debug!(target: "network", "{}: Can't connect to address {:?}: {:?}", id, address, e); + self.nodes.write().note_failure(&id); + return; + } + } + }; + + if let Err(e) = self.create_connection(socket, Some(id), io) { + debug!(target: "network", "Can't create connection: {:?}", e); + } + } + + fn create_connection( + &self, + socket: TcpStream, + id: Option<&NodeId>, + io: &IoContext, + ) -> Result<(), Error> { + let nonce = self.info.write().next_nonce(); + let mut sessions = self.sessions.write(); + + let token = sessions.insert_with_opt(|token| { + trace!(target: "network", "{}: Initiating session {:?}", token, id); + match Session::new(io, socket, token, id, &nonce, &self.info.read()) { + Ok(s) => Some(Arc::new(Mutex::new(s))), + Err(e) => { + debug!(target: "network", "Session create error: {:?}", e); + None + } + } + }); + + match token { + Some(t) => io.register_stream(t).map(|_| ()).map_err(Into::into), + None => { + debug!(target: "network", "Max sessions reached"); + Ok(()) + } + } + } + + fn accept(&self, io: &IoContext) { + trace!(target: "network", "Accepting incoming connection"); + loop { + let socket = match self.tcp_listener.lock().accept() { + Ok((sock, _addr)) => sock, + Err(e) => { + if e.kind() != io::ErrorKind::WouldBlock { + debug!(target: "network", "Error accepting connection: {:?}", e); + } + break; + } + }; + if let Err(e) = self.create_connection(socket, None, io) { + debug!(target: "network", "Can't accept connection: {:?}", e); + } + } + } + + fn session_writable(&self, token: StreamToken, io: &IoContext) { + let session = { self.sessions.read().get(token).cloned() }; + + if let Some(session) = session { + let mut s = session.lock(); + if let Err(e) = s.writable(io, &self.info.read()) { + trace!(target: "network", "Session write error: {}: {:?}", token, e); + } + if s.done() { + io.deregister_stream(token) + .unwrap_or_else(|e| debug!("Error deregistering stream: {:?}", e)); + } + } + } + + fn connection_closed(&self, token: StreamToken, io: &IoContext) { + trace!(target: "network", "Connection closed: {}", token); + self.kill_connection(token, io, true); + } + + fn session_readable(&self, token: StreamToken, io: &IoContext) { + let mut ready_data: Vec = Vec::new(); + let mut packet_data: Vec<(ProtocolId, PacketId, Vec)> = Vec::new(); + let mut kill = false; + let session = { self.sessions.read().get(token).cloned() }; + let mut ready_id = None; + if let Some(session) = session.clone() { + { + loop { + let session_result = session.lock().readable(io, &self.info.read()); + match session_result { + Err(e) => { + let reserved_nodes = self.reserved_nodes.read(); + let s = session.lock(); + trace!(target: "network", "Session read error: {}:{:?} ({:?}) {:?}", token, s.id(), s.remote_addr(), e); + match *e.kind() { + ErrorKind::Disconnect(DisconnectReason::IncompatibleProtocol) + | ErrorKind::Disconnect(DisconnectReason::UselessPeer) => { + if let Some(id) = s.id() { + if !reserved_nodes.contains(id) { + let mut nodes = self.nodes.write(); + nodes.note_failure(&id); + nodes.mark_as_useless(id); + } + } + } + _ => {} + } + kill = true; + break; + } + Ok(SessionData::Ready) => { + let (_, egress_count, ingress_count) = self.session_count(); + let reserved_nodes = self.reserved_nodes.read(); + let mut s = session.lock(); + let (min_peers, mut max_peers, reserved_only, self_id) = { + let info = self.info.read(); + let mut max_peers = info.config.max_peers; + for cap in &s.info.capabilities { + if let Some(num) = + info.config.reserved_protocols.get(&cap.protocol) + { + max_peers += *num; + break; + } + } + ( + info.config.min_peers as usize, + max_peers as usize, + info.config.non_reserved_mode == NonReservedPeerMode::Deny, + *info.id(), + ) + }; + + max_peers = max(max_peers, min_peers); + + let id = *s.id().expect("Ready session always has id"); + + // Check for the session limit. + // Outgoing connections are allowed as long as their count is <= min_peers + // Incoming connections are allowed to take all of the max_peers reserve, or at most half of the slots. + let max_ingress = max(max_peers - min_peers, min_peers / 2); + if reserved_only + || (s.info.originated && egress_count > min_peers) + || (!s.info.originated && ingress_count > max_ingress) + { + if !reserved_nodes.contains(&id) { + // only proceed if the connecting peer is reserved. + trace!(target: "network", "Disconnecting non-reserved peer {:?}", id); + s.disconnect(io, DisconnectReason::TooManyPeers); + kill = true; + break; + } + } + + if !self.filter.as_ref().map_or(true, |f| { + f.connection_allowed(&self_id, &id, ConnectionDirection::Inbound) + }) { + trace!(target: "network", "Inbound connection not allowed for {:?}", id); + s.disconnect(io, DisconnectReason::UnexpectedIdentity); + kill = true; + break; + } + + ready_id = Some(id); + + // Add it to the node table + if !s.info.originated { + if let Ok(address) = s.remote_addr() { + // We can't know remote listening ports, so just assume defaults and hope for the best. + let endpoint = NodeEndpoint { + address: SocketAddr::new(address.ip(), DEFAULT_PORT), + udp_port: DEFAULT_PORT, + }; + let entry = NodeEntry { id, endpoint }; + let mut nodes = self.nodes.write(); + if !nodes.contains(&entry.id) { + nodes.add_node(Node::new(entry.id, entry.endpoint.clone())); + let mut discovery = self.discovery.lock(); + if let Some(ref mut discovery) = *discovery { + discovery.add_node(entry); + } + } + } + } + + // Note connection success + self.nodes.write().note_success(&id); + + for (p, _) in self.handlers.read().iter() { + if s.have_capability(*p) { + ready_data.push(*p); + } + } + } + Ok(SessionData::Packet { + data, + protocol, + packet_id, + }) => match self.handlers.read().get(&protocol) { + None => { + warn!(target: "network", "No handler found for protocol: {:?}", protocol) + } + Some(_) => packet_data.push((protocol, packet_id, data)), + }, + Ok(SessionData::Continue) => (), + Ok(SessionData::None) => break, + } + } + } + + if kill { + self.kill_connection(token, io, true); + } + + let handlers = self.handlers.read(); + if !ready_data.is_empty() { + let duplicate = self.sessions.read().iter().any(|e| { + let session = e.lock(); + session.token() != token && session.info.id == ready_id + }); + if duplicate { + trace!(target: "network", "Rejected duplicate connection: {}", token); + session + .lock() + .disconnect(io, DisconnectReason::DuplicatePeer); + drop(handlers); + self.kill_connection(token, io, false); + return; + } + for p in ready_data { + let reserved = self.reserved_nodes.read(); + if let Some(h) = handlers.get(&p) { + h.connected( + &NetworkContext::new( + io, + p, + Some(session.clone()), + self.sessions.clone(), + &reserved, + ), + &token, + ); + // accumulate pending packets. + let mut session = session.lock(); + packet_data.extend(session.mark_connected(p)); + } + } + } + + for (p, packet_id, data) in packet_data { + let reserved = self.reserved_nodes.read(); + if let Some(h) = handlers.get(&p) { + h.read( + &NetworkContext::new( + io, + p, + Some(session.clone()), + self.sessions.clone(), + &reserved, + ), + &token, + packet_id, + &data, + ); + } + } + } + } + + fn discovery_readable(&self, io: &IoContext) { + let node_changes = match ( + self.udp_socket.lock().as_ref(), + self.discovery.lock().as_mut(), + ) { + (Some(udp_socket), Some(discovery)) => { + let mut buf = [0u8; MAX_DATAGRAM_SIZE]; + let writable = discovery.any_sends_queued(); + let res = match udp_socket.recv_from(&mut buf) { + Ok(Some((len, address))) => discovery + .on_packet(&buf[0..len], address) + .unwrap_or_else(|e| { + debug!(target: "network", "Error processing UDP packet: {:?}", e); + None + }), + Ok(_) => None, + Err(e) => { + debug!(target: "network", "Error reading UPD socket: {:?}", e); + None + } + }; + let new_writable = discovery.any_sends_queued(); + if writable != new_writable { + io.update_registration(DISCOVERY) .unwrap_or_else(|e| { debug!(target: "network" ,"Error updating discovery registration: {:?}", e) }); - } - res - }, - _ => None, - }; - if let Some(node_changes) = node_changes { - self.update_nodes(io, node_changes); - } - } + } + res + } + _ => None, + }; + if let Some(node_changes) = node_changes { + self.update_nodes(io, node_changes); + } + } - fn discovery_writable(&self, io: &IoContext) { - if let (Some(udp_socket), Some(discovery)) = (self.udp_socket.lock().as_ref(), self.discovery.lock().as_mut()) { - while let Some(data) = discovery.dequeue_send() { - match udp_socket.send_to(&data.payload, &data.address) { - Ok(Some(size)) if size == data.payload.len() => { - }, - Ok(Some(_)) => { - warn!(target: "network", "UDP sent incomplete datagram"); - }, - Ok(None) => { - discovery.requeue_send(data); - return; - } - Err(e) => { - debug!(target: "network", "UDP send error: {:?}, address: {:?}", e, &data.address); - return; - } - } - } - io.update_registration(DISCOVERY) - .unwrap_or_else(|e| { - debug!(target: "network", "Error updating discovery registration: {:?}", e) - }); - } - } + fn discovery_writable(&self, io: &IoContext) { + if let (Some(udp_socket), Some(discovery)) = ( + self.udp_socket.lock().as_ref(), + self.discovery.lock().as_mut(), + ) { + while let Some(data) = discovery.dequeue_send() { + match udp_socket.send_to(&data.payload, &data.address) { + Ok(Some(size)) if size == data.payload.len() => {} + Ok(Some(_)) => { + warn!(target: "network", "UDP sent incomplete datagram"); + } + Ok(None) => { + discovery.requeue_send(data); + return; + } + Err(e) => { + debug!(target: "network", "UDP send error: {:?}, address: {:?}", e, &data.address); + return; + } + } + } + io.update_registration(DISCOVERY).unwrap_or_else( + |e| debug!(target: "network", "Error updating discovery registration: {:?}", e), + ); + } + } - fn connection_timeout(&self, token: StreamToken, io: &IoContext) { - trace!(target: "network", "Connection timeout: {}", token); - self.kill_connection(token, io, true) - } + fn connection_timeout(&self, token: StreamToken, io: &IoContext) { + trace!(target: "network", "Connection timeout: {}", token); + self.kill_connection(token, io, true) + } - fn kill_connection(&self, token: StreamToken, io: &IoContext, remote: bool) { - let mut to_disconnect: Vec = Vec::new(); - let mut failure_id = None; - let mut deregister = false; - let mut expired_session = None; - if let FIRST_SESSION ... LAST_SESSION = token { - let sessions = self.sessions.read(); - if let Some(session) = sessions.get(token).cloned() { - expired_session = Some(session.clone()); - let mut s = session.lock(); - if !s.expired() { - if s.is_ready() { - for (p, _) in self.handlers.read().iter() { - if s.have_capability(*p) { - to_disconnect.push(*p); - } - } - } - s.set_expired(); - failure_id = s.id().cloned(); - } - deregister = remote || s.done(); - } - } - if let Some(id) = failure_id { - if remote { - self.nodes.write().note_failure(&id); - } - } - for p in to_disconnect { - let reserved = self.reserved_nodes.read(); - if let Some(h) = self.handlers.read().get(&p) { - h.disconnected(&NetworkContext::new(io, p, expired_session.clone(), self.sessions.clone(), &reserved), &token); - } - } - if deregister { - io.deregister_stream(token).unwrap_or_else(|e| debug!("Error deregistering stream: {:?}", e)); - } - } + fn kill_connection(&self, token: StreamToken, io: &IoContext, remote: bool) { + let mut to_disconnect: Vec = Vec::new(); + let mut failure_id = None; + let mut deregister = false; + let mut expired_session = None; + if let FIRST_SESSION...LAST_SESSION = token { + let sessions = self.sessions.read(); + if let Some(session) = sessions.get(token).cloned() { + expired_session = Some(session.clone()); + let mut s = session.lock(); + if !s.expired() { + if s.is_ready() { + for (p, _) in self.handlers.read().iter() { + if s.have_capability(*p) { + to_disconnect.push(*p); + } + } + } + s.set_expired(); + failure_id = s.id().cloned(); + } + deregister = remote || s.done(); + } + } + if let Some(id) = failure_id { + if remote { + self.nodes.write().note_failure(&id); + } + } + for p in to_disconnect { + let reserved = self.reserved_nodes.read(); + if let Some(h) = self.handlers.read().get(&p) { + h.disconnected( + &NetworkContext::new( + io, + p, + expired_session.clone(), + self.sessions.clone(), + &reserved, + ), + &token, + ); + } + } + if deregister { + io.deregister_stream(token) + .unwrap_or_else(|e| debug!("Error deregistering stream: {:?}", e)); + } + } - fn update_nodes(&self, _io: &IoContext, node_changes: TableUpdates) { - let mut to_remove: Vec = Vec::new(); - { - let sessions = self.sessions.read(); - for c in sessions.iter() { - let s = c.lock(); - if let Some(id) = s.id() { - if node_changes.removed.contains(id) { - to_remove.push(s.token()); - } - } - } - } - for i in to_remove { - trace!(target: "network", "Removed from node table: {}", i); - } - let reserved_nodes = self.reserved_nodes.read(); - self.nodes.write().update(node_changes, &*reserved_nodes); - } + fn update_nodes(&self, _io: &IoContext, node_changes: TableUpdates) { + let mut to_remove: Vec = Vec::new(); + { + let sessions = self.sessions.read(); + for c in sessions.iter() { + let s = c.lock(); + if let Some(id) = s.id() { + if node_changes.removed.contains(id) { + to_remove.push(s.token()); + } + } + } + } + for i in to_remove { + trace!(target: "network", "Removed from node table: {}", i); + } + let reserved_nodes = self.reserved_nodes.read(); + self.nodes.write().update(node_changes, &*reserved_nodes); + } - pub fn with_context(&self, protocol: ProtocolId, io: &IoContext, action: F) where F: FnOnce(&NetworkContextTrait) { - let reserved = { self.reserved_nodes.read() }; + pub fn with_context(&self, protocol: ProtocolId, io: &IoContext, action: F) + where + F: FnOnce(&NetworkContextTrait), + { + let reserved = { self.reserved_nodes.read() }; - let context = NetworkContext::new(io, protocol, None, self.sessions.clone(), &reserved); - action(&context); - } + let context = NetworkContext::new(io, protocol, None, self.sessions.clone(), &reserved); + action(&context); + } - pub fn with_context_eval(&self, protocol: ProtocolId, io: &IoContext, action: F) -> T where F: FnOnce(&NetworkContextTrait) -> T { - let reserved = { self.reserved_nodes.read() }; + pub fn with_context_eval( + &self, + protocol: ProtocolId, + io: &IoContext, + action: F, + ) -> T + where + F: FnOnce(&NetworkContextTrait) -> T, + { + let reserved = { self.reserved_nodes.read() }; - let context = NetworkContext::new(io, protocol, None, self.sessions.clone(), &reserved); - action(&context) - } + let context = NetworkContext::new(io, protocol, None, self.sessions.clone(), &reserved); + action(&context) + } } impl IoHandler for Host { - /// Initialize networking - fn initialize(&self, io: &IoContext) { - io.register_timer(IDLE, MAINTENANCE_TIMEOUT).expect("Error registering Network idle timer"); - io.message(NetworkIoMessage::InitPublicInterface).unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e)); - self.maintain_network(io) - } + /// Initialize networking + fn initialize(&self, io: &IoContext) { + io.register_timer(IDLE, MAINTENANCE_TIMEOUT) + .expect("Error registering Network idle timer"); + io.message(NetworkIoMessage::InitPublicInterface) + .unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e)); + self.maintain_network(io) + } - fn stream_hup(&self, io: &IoContext, stream: StreamToken) { - trace!(target: "network", "Hup: {}", stream); - match stream { - FIRST_SESSION ... LAST_SESSION => self.connection_closed(stream, io), - _ => warn!(target: "network", "Unexpected hup"), - }; - } + fn stream_hup(&self, io: &IoContext, stream: StreamToken) { + trace!(target: "network", "Hup: {}", stream); + match stream { + FIRST_SESSION...LAST_SESSION => self.connection_closed(stream, io), + _ => warn!(target: "network", "Unexpected hup"), + }; + } - fn stream_readable(&self, io: &IoContext, stream: StreamToken) { - if self.stopping.load(AtomicOrdering::Acquire) { - return; - } - match stream { - FIRST_SESSION ... LAST_SESSION => self.session_readable(stream, io), - DISCOVERY => self.discovery_readable(io), - TCP_ACCEPT => self.accept(io), - _ => panic!("Received unknown readable token"), - } - } + fn stream_readable(&self, io: &IoContext, stream: StreamToken) { + if self.stopping.load(AtomicOrdering::Acquire) { + return; + } + match stream { + FIRST_SESSION...LAST_SESSION => self.session_readable(stream, io), + DISCOVERY => self.discovery_readable(io), + TCP_ACCEPT => self.accept(io), + _ => panic!("Received unknown readable token"), + } + } - fn stream_writable(&self, io: &IoContext, stream: StreamToken) { - if self.stopping.load(AtomicOrdering::Acquire) { - return; - } - match stream { - FIRST_SESSION ... LAST_SESSION => self.session_writable(stream, io), - DISCOVERY => self.discovery_writable(io), - _ => panic!("Received unknown writable token"), - } - } + fn stream_writable(&self, io: &IoContext, stream: StreamToken) { + if self.stopping.load(AtomicOrdering::Acquire) { + return; + } + match stream { + FIRST_SESSION...LAST_SESSION => self.session_writable(stream, io), + DISCOVERY => self.discovery_writable(io), + _ => panic!("Received unknown writable token"), + } + } - fn timeout(&self, io: &IoContext, token: TimerToken) { - if self.stopping.load(AtomicOrdering::Acquire) { - return; - } - match token { - IDLE => self.maintain_network(io), - FIRST_SESSION ... LAST_SESSION => self.connection_timeout(token, io), - DISCOVERY_REFRESH => { - // Run the _slow_ discovery if enough peers are connected - if !self.has_enough_peers() { - return; - } - self.discovery.lock().as_mut().map(|d| d.refresh()); - io.update_registration(DISCOVERY).unwrap_or_else(|e| debug!("Error updating discovery registration: {:?}", e)); - }, - FAST_DISCOVERY_REFRESH => { - // Run the fast discovery if not enough peers are connected - if self.has_enough_peers() { - return; - } - self.discovery.lock().as_mut().map(|d| d.refresh()); - io.update_registration(DISCOVERY).unwrap_or_else(|e| debug!("Error updating discovery registration: {:?}", e)); - }, - DISCOVERY_ROUND => { - self.discovery.lock().as_mut().map(|d| d.round()); - io.update_registration(DISCOVERY).unwrap_or_else(|e| debug!("Error updating discovery registration: {:?}", e)); - }, - NODE_TABLE => { - trace!(target: "network", "Refreshing node table"); - let mut nodes = self.nodes.write(); - nodes.clear_useless(); - nodes.save(); - }, - _ => match self.timers.read().get(&token).cloned() { - Some(timer) => match self.handlers.read().get(&timer.protocol).cloned() { - None => { warn!(target: "network", "No handler found for protocol: {:?}", timer.protocol) }, - Some(h) => { - let reserved = self.reserved_nodes.read(); - h.timeout(&NetworkContext::new(io, timer.protocol, None, self.sessions.clone(), &reserved), timer.token); - } - }, - None => { warn!("Unknown timer token: {}", token); } // timer is not registerd through us - } - } - } + fn timeout(&self, io: &IoContext, token: TimerToken) { + if self.stopping.load(AtomicOrdering::Acquire) { + return; + } + match token { + IDLE => self.maintain_network(io), + FIRST_SESSION...LAST_SESSION => self.connection_timeout(token, io), + DISCOVERY_REFRESH => { + // Run the _slow_ discovery if enough peers are connected + if !self.has_enough_peers() { + return; + } + self.discovery.lock().as_mut().map(|d| d.refresh()); + io.update_registration(DISCOVERY) + .unwrap_or_else(|e| debug!("Error updating discovery registration: {:?}", e)); + } + FAST_DISCOVERY_REFRESH => { + // Run the fast discovery if not enough peers are connected + if self.has_enough_peers() { + return; + } + self.discovery.lock().as_mut().map(|d| d.refresh()); + io.update_registration(DISCOVERY) + .unwrap_or_else(|e| debug!("Error updating discovery registration: {:?}", e)); + } + DISCOVERY_ROUND => { + self.discovery.lock().as_mut().map(|d| d.round()); + io.update_registration(DISCOVERY) + .unwrap_or_else(|e| debug!("Error updating discovery registration: {:?}", e)); + } + NODE_TABLE => { + trace!(target: "network", "Refreshing node table"); + let mut nodes = self.nodes.write(); + nodes.clear_useless(); + nodes.save(); + } + _ => match self.timers.read().get(&token).cloned() { + Some(timer) => match self.handlers.read().get(&timer.protocol).cloned() { + None => { + warn!(target: "network", "No handler found for protocol: {:?}", timer.protocol) + } + Some(h) => { + let reserved = self.reserved_nodes.read(); + h.timeout( + &NetworkContext::new( + io, + timer.protocol, + None, + self.sessions.clone(), + &reserved, + ), + timer.token, + ); + } + }, + None => { + warn!("Unknown timer token: {}", token); + } // timer is not registerd through us + }, + } + } - fn message(&self, io: &IoContext, message: &NetworkIoMessage) { - if self.stopping.load(AtomicOrdering::Acquire) { - return; - } - match *message { - NetworkIoMessage::AddHandler { - ref handler, - ref protocol, - ref versions, - } => { - let h = handler.clone(); - let reserved = self.reserved_nodes.read(); - h.initialize( - &NetworkContext::new(io, *protocol, None, self.sessions.clone(), &reserved), - ); - self.handlers.write().insert(*protocol, h); - let mut info = self.info.write(); - for &(version, packet_count) in versions { - info.capabilities.push(CapabilityInfo { - protocol: *protocol, - version, - packet_count, - }); - } - }, - NetworkIoMessage::AddTimer { - ref protocol, - ref delay, - ref token, - } => { - let handler_token = { - let mut timer_counter = self.timer_counter.write(); - let counter = &mut *timer_counter; - let handler_token = *counter; - *counter += 1; - handler_token - }; - self.timers.write().insert(handler_token, ProtocolTimer { protocol: *protocol, token: *token }); - io.register_timer(handler_token, *delay).unwrap_or_else(|e| debug!("Error registering timer {}: {:?}", token, e)); - }, - NetworkIoMessage::Disconnect(ref peer) => { - let session = { self.sessions.read().get(*peer).cloned() }; - if let Some(session) = session { - session.lock().disconnect(io, DisconnectReason::DisconnectRequested); - } - trace!(target: "network", "Disconnect requested {}", peer); - self.kill_connection(*peer, io, false); - }, - NetworkIoMessage::DisablePeer(ref peer) => { - let session = { self.sessions.read().get(*peer).cloned() }; - if let Some(session) = session { - session.lock().disconnect(io, DisconnectReason::DisconnectRequested); - if let Some(id) = session.lock().id() { - let mut nodes = self.nodes.write(); - nodes.note_failure(&id); - nodes.mark_as_useless(id); - } - } - trace!(target: "network", "Disabling peer {}", peer); - self.kill_connection(*peer, io, false); - }, - NetworkIoMessage::InitPublicInterface => - self.init_public_interface(io).unwrap_or_else(|e| warn!("Error initializing public interface: {:?}", e)), - _ => {} // ignore others. - } - } + fn message(&self, io: &IoContext, message: &NetworkIoMessage) { + if self.stopping.load(AtomicOrdering::Acquire) { + return; + } + match *message { + NetworkIoMessage::AddHandler { + ref handler, + ref protocol, + ref versions, + } => { + let h = handler.clone(); + let reserved = self.reserved_nodes.read(); + h.initialize(&NetworkContext::new( + io, + *protocol, + None, + self.sessions.clone(), + &reserved, + )); + self.handlers.write().insert(*protocol, h); + let mut info = self.info.write(); + for &(version, packet_count) in versions { + info.capabilities.push(CapabilityInfo { + protocol: *protocol, + version, + packet_count, + }); + } + } + NetworkIoMessage::AddTimer { + ref protocol, + ref delay, + ref token, + } => { + let handler_token = { + let mut timer_counter = self.timer_counter.write(); + let counter = &mut *timer_counter; + let handler_token = *counter; + *counter += 1; + handler_token + }; + self.timers.write().insert( + handler_token, + ProtocolTimer { + protocol: *protocol, + token: *token, + }, + ); + io.register_timer(handler_token, *delay) + .unwrap_or_else(|e| debug!("Error registering timer {}: {:?}", token, e)); + } + NetworkIoMessage::Disconnect(ref peer) => { + let session = { self.sessions.read().get(*peer).cloned() }; + if let Some(session) = session { + session + .lock() + .disconnect(io, DisconnectReason::DisconnectRequested); + } + trace!(target: "network", "Disconnect requested {}", peer); + self.kill_connection(*peer, io, false); + } + NetworkIoMessage::DisablePeer(ref peer) => { + let session = { self.sessions.read().get(*peer).cloned() }; + if let Some(session) = session { + session + .lock() + .disconnect(io, DisconnectReason::DisconnectRequested); + if let Some(id) = session.lock().id() { + let mut nodes = self.nodes.write(); + nodes.note_failure(&id); + nodes.mark_as_useless(id); + } + } + trace!(target: "network", "Disabling peer {}", peer); + self.kill_connection(*peer, io, false); + } + NetworkIoMessage::InitPublicInterface => self + .init_public_interface(io) + .unwrap_or_else(|e| warn!("Error initializing public interface: {:?}", e)), + _ => {} // ignore others. + } + } - fn register_stream(&self, stream: StreamToken, reg: Token, event_loop: &mut EventLoop>) { - match stream { - FIRST_SESSION ... LAST_SESSION => { - let session = { self.sessions.read().get(stream).cloned() }; - if let Some(session) = session { - session.lock().register_socket(reg, event_loop).expect("Error registering socket"); - } - } - DISCOVERY => match self.udp_socket.lock().as_ref() { - Some(udp_socket) => { - event_loop.register(udp_socket, reg, Ready::all(), PollOpt::edge()) - .expect("Error registering UDP socket"); - }, - _ => panic!("Error registering discovery socket"), - } - TCP_ACCEPT => event_loop.register(&*self.tcp_listener.lock(), Token(TCP_ACCEPT), Ready::all(), PollOpt::edge()).expect("Error registering stream"), - _ => warn!("Unexpected stream registration") - } - } + fn register_stream( + &self, + stream: StreamToken, + reg: Token, + event_loop: &mut EventLoop>, + ) { + match stream { + FIRST_SESSION...LAST_SESSION => { + let session = { self.sessions.read().get(stream).cloned() }; + if let Some(session) = session { + session + .lock() + .register_socket(reg, event_loop) + .expect("Error registering socket"); + } + } + DISCOVERY => match self.udp_socket.lock().as_ref() { + Some(udp_socket) => { + event_loop + .register(udp_socket, reg, Ready::all(), PollOpt::edge()) + .expect("Error registering UDP socket"); + } + _ => panic!("Error registering discovery socket"), + }, + TCP_ACCEPT => event_loop + .register( + &*self.tcp_listener.lock(), + Token(TCP_ACCEPT), + Ready::all(), + PollOpt::edge(), + ) + .expect("Error registering stream"), + _ => warn!("Unexpected stream registration"), + } + } - fn deregister_stream(&self, stream: StreamToken, event_loop: &mut EventLoop>) { - match stream { - FIRST_SESSION ... LAST_SESSION => { - let mut connections = self.sessions.write(); - if let Some(connection) = connections.get(stream).cloned() { - let c = connection.lock(); - if c.expired() { // make sure it is the same connection that the event was generated for - c.deregister_socket(event_loop).expect("Error deregistering socket"); - connections.remove(stream); - } - } - } - DISCOVERY => (), - _ => warn!("Unexpected stream deregistration") - } - } + fn deregister_stream( + &self, + stream: StreamToken, + event_loop: &mut EventLoop>, + ) { + match stream { + FIRST_SESSION...LAST_SESSION => { + let mut connections = self.sessions.write(); + if let Some(connection) = connections.get(stream).cloned() { + let c = connection.lock(); + if c.expired() { + // make sure it is the same connection that the event was generated for + c.deregister_socket(event_loop) + .expect("Error deregistering socket"); + connections.remove(stream); + } + } + } + DISCOVERY => (), + _ => warn!("Unexpected stream deregistration"), + } + } - fn update_stream(&self, stream: StreamToken, reg: Token, event_loop: &mut EventLoop>) { - match stream { - FIRST_SESSION ... LAST_SESSION => { - let connection = { self.sessions.read().get(stream).cloned() }; - if let Some(connection) = connection { - connection.lock().update_socket(reg, event_loop).expect("Error updating socket"); - } - } - DISCOVERY => match (self.udp_socket.lock().as_ref(), self.discovery.lock().as_ref()) { - (Some(udp_socket), Some(discovery)) => { - let registration = if discovery.any_sends_queued() { - Ready::readable() | Ready::writable() - } else { - Ready::readable() - }; - event_loop.reregister(udp_socket, reg, registration, PollOpt::edge()) - .expect("Error reregistering UDP socket"); - }, - _ => panic!("Error reregistering discovery socket"), - } - TCP_ACCEPT => event_loop.reregister(&*self.tcp_listener.lock(), Token(TCP_ACCEPT), Ready::all(), PollOpt::edge()).expect("Error reregistering stream"), - _ => warn!("Unexpected stream update") - } - } + fn update_stream( + &self, + stream: StreamToken, + reg: Token, + event_loop: &mut EventLoop>, + ) { + match stream { + FIRST_SESSION...LAST_SESSION => { + let connection = { self.sessions.read().get(stream).cloned() }; + if let Some(connection) = connection { + connection + .lock() + .update_socket(reg, event_loop) + .expect("Error updating socket"); + } + } + DISCOVERY => match ( + self.udp_socket.lock().as_ref(), + self.discovery.lock().as_ref(), + ) { + (Some(udp_socket), Some(discovery)) => { + let registration = if discovery.any_sends_queued() { + Ready::readable() | Ready::writable() + } else { + Ready::readable() + }; + event_loop + .reregister(udp_socket, reg, registration, PollOpt::edge()) + .expect("Error reregistering UDP socket"); + } + _ => panic!("Error reregistering discovery socket"), + }, + TCP_ACCEPT => event_loop + .reregister( + &*self.tcp_listener.lock(), + Token(TCP_ACCEPT), + Ready::all(), + PollOpt::edge(), + ) + .expect("Error reregistering stream"), + _ => warn!("Unexpected stream update"), + } + } } fn save_key(path: &Path, key: &Secret) { - let mut path_buf = PathBuf::from(path); - if let Err(e) = fs::create_dir_all(path_buf.as_path()) { - warn!("Error creating key directory: {:?}", e); - return; - }; - path_buf.push("key"); - let path = path_buf.as_path(); - let mut file = match fs::File::create(&path) { - Ok(file) => file, - Err(e) => { - warn!("Error creating key file: {:?}", e); - return; - } - }; - if let Err(e) = restrict_permissions_owner(path, true, false) { - warn!(target: "network", "Failed to modify permissions of the file ({})", e); - } - if let Err(e) = file.write(&key.hex().into_bytes()[2..]) { - warn!("Error writing key file: {:?}", e); - } + let mut path_buf = PathBuf::from(path); + if let Err(e) = fs::create_dir_all(path_buf.as_path()) { + warn!("Error creating key directory: {:?}", e); + return; + }; + path_buf.push("key"); + let path = path_buf.as_path(); + let mut file = match fs::File::create(&path) { + Ok(file) => file, + Err(e) => { + warn!("Error creating key file: {:?}", e); + return; + } + }; + if let Err(e) = restrict_permissions_owner(path, true, false) { + warn!(target: "network", "Failed to modify permissions of the file ({})", e); + } + if let Err(e) = file.write(&key.hex().into_bytes()[2..]) { + warn!("Error writing key file: {:?}", e); + } } fn load_key(path: &Path) -> Option { - let mut path_buf = PathBuf::from(path); - path_buf.push("key"); - let mut file = match fs::File::open(path_buf.as_path()) { - Ok(file) => file, - Err(e) => { - debug!("Error opening key file: {:?}", e); - return None; - } - }; - let mut buf = String::new(); - match file.read_to_string(&mut buf) { - Ok(_) => {}, - Err(e) => { - warn!("Error reading key file: {:?}", e); - return None; - } - } - match Secret::from_str(&buf) { - Ok(key) => Some(key), - Err(e) => { - warn!("Error parsing key file: {:?}", e); - None - } - } + let mut path_buf = PathBuf::from(path); + path_buf.push("key"); + let mut file = match fs::File::open(path_buf.as_path()) { + Ok(file) => file, + Err(e) => { + debug!("Error opening key file: {:?}", e); + return None; + } + }; + let mut buf = String::new(); + match file.read_to_string(&mut buf) { + Ok(_) => {} + Err(e) => { + warn!("Error reading key file: {:?}", e); + return None; + } + } + match Secret::from_str(&buf) { + Ok(key) => Some(key), + Err(e) => { + warn!("Error parsing key file: {:?}", e); + None + } + } } #[test] fn key_save_load() { - use tempdir::TempDir; + use tempdir::TempDir; - let tempdir = TempDir::new("").unwrap(); - let key = H256::random().into(); - save_key(tempdir.path(), &key); - let r = load_key(tempdir.path()); - assert_eq!(key, r.unwrap()); + let tempdir = TempDir::new("").unwrap(); + let key = H256::random().into(); + save_key(tempdir.path(), &key); + let r = load_key(tempdir.path()); + assert_eq!(key, r.unwrap()); } #[test] fn host_client_url() { - let mut config = NetworkConfiguration::new_local(); - let key = "6f7b0d801bc7b5ce7bbd930b84fd0369b3eb25d09be58d64ba811091046f3aa2".parse().unwrap(); - config.use_secret = Some(key); - let host: Host = Host::new(config, None).unwrap(); - assert!(host.local_url().starts_with("enode://101b3ef5a4ea7a1c7928e24c4c75fd053c235d7b80c22ae5c03d145d0ac7396e2a4ffff9adee3133a7b05044a5cee08115fd65145e5165d646bde371010d803c@")); + let mut config = NetworkConfiguration::new_local(); + let key = "6f7b0d801bc7b5ce7bbd930b84fd0369b3eb25d09be58d64ba811091046f3aa2" + .parse() + .unwrap(); + config.use_secret = Some(key); + let host: Host = Host::new(config, None).unwrap(); + assert!(host.local_url().starts_with("enode://101b3ef5a4ea7a1c7928e24c4c75fd053c235d7b80c22ae5c03d145d0ac7396e2a4ffff9adee3133a7b05044a5cee08115fd65145e5165d646bde371010d803c@")); } diff --git a/util/network-devp2p/src/ip_utils.rs b/util/network-devp2p/src/ip_utils.rs index 4b8473cec..f3b78d118 100644 --- a/util/network-devp2p/src/ip_utils.rs +++ b/util/network-devp2p/src/ip_utils.rs @@ -16,517 +16,722 @@ // Based on original work by David Levy https://raw.githubusercontent.com/dlevy47/rust-interfaces -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; -use std::io; -use igd::{PortMappingProtocol, search_gateway_from_timeout}; -use std::time::Duration; -use node_table::NodeEndpoint; +use igd::{search_gateway_from_timeout, PortMappingProtocol}; use ipnetwork::IpNetwork; +use node_table::NodeEndpoint; +use std::{ + io, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, + time::Duration, +}; /// Socket address extension for rustc beta. To be replaces with now unstable API pub trait SocketAddrExt { - /// Returns true if the address appears to be globally routable. - fn is_global_s(&self) -> bool; + /// Returns true if the address appears to be globally routable. + fn is_global_s(&self) -> bool; - // Ipv4 specific - fn is_shared_space(&self) -> bool { false } - fn is_special_purpose(&self) -> bool { false } - fn is_benchmarking(&self) -> bool { false } - fn is_future_use(&self) -> bool { false } + // Ipv4 specific + fn is_shared_space(&self) -> bool { + false + } + fn is_special_purpose(&self) -> bool { + false + } + fn is_benchmarking(&self) -> bool { + false + } + fn is_future_use(&self) -> bool { + false + } - // Ipv6 specific - fn is_unique_local_s(&self) -> bool { false } - fn is_unicast_link_local_s(&self) -> bool { false } - fn is_documentation_s(&self) -> bool { false } - fn is_global_multicast(&self) -> bool { false } - fn is_other_multicast(&self) -> bool { false } + // Ipv6 specific + fn is_unique_local_s(&self) -> bool { + false + } + fn is_unicast_link_local_s(&self) -> bool { + false + } + fn is_documentation_s(&self) -> bool { + false + } + fn is_global_multicast(&self) -> bool { + false + } + fn is_other_multicast(&self) -> bool { + false + } - fn is_reserved(&self) -> bool; - fn is_usable_public(&self) -> bool; - fn is_usable_private(&self) -> bool; + fn is_reserved(&self) -> bool; + fn is_usable_public(&self) -> bool; + fn is_usable_private(&self) -> bool; - fn is_within(&self, ipnet: &IpNetwork) -> bool; + fn is_within(&self, ipnet: &IpNetwork) -> bool; } impl SocketAddrExt for Ipv4Addr { - fn is_global_s(&self) -> bool { - !self.is_private() && - !self.is_loopback() && - !self.is_link_local() && - !self.is_broadcast() && - !self.is_documentation() - } + fn is_global_s(&self) -> bool { + !self.is_private() + && !self.is_loopback() + && !self.is_link_local() + && !self.is_broadcast() + && !self.is_documentation() + } - // Used for communications between a service provider and its subscribers when using a carrier-grade NAT - // see: https://en.wikipedia.org/wiki/Reserved_IP_addresses - fn is_shared_space(&self) -> bool { - *self >= Ipv4Addr::new(100, 64, 0, 0) && - *self <= Ipv4Addr::new(100, 127, 255, 255) - } + // Used for communications between a service provider and its subscribers when using a carrier-grade NAT + // see: https://en.wikipedia.org/wiki/Reserved_IP_addresses + fn is_shared_space(&self) -> bool { + *self >= Ipv4Addr::new(100, 64, 0, 0) && *self <= Ipv4Addr::new(100, 127, 255, 255) + } - // Used for the IANA IPv4 Special Purpose Address Registry - // see: https://en.wikipedia.org/wiki/Reserved_IP_addresses - fn is_special_purpose(&self) -> bool { - *self >= Ipv4Addr::new(192, 0, 0, 0) && - *self <= Ipv4Addr::new(192, 0, 0, 255) - } + // Used for the IANA IPv4 Special Purpose Address Registry + // see: https://en.wikipedia.org/wiki/Reserved_IP_addresses + fn is_special_purpose(&self) -> bool { + *self >= Ipv4Addr::new(192, 0, 0, 0) && *self <= Ipv4Addr::new(192, 0, 0, 255) + } - // Used for testing of inter-network communications between two separate subnets - // see: https://en.wikipedia.org/wiki/Reserved_IP_addresses - fn is_benchmarking(&self) -> bool { - *self >= Ipv4Addr::new(198, 18, 0, 0) && - *self <= Ipv4Addr::new(198, 19, 255, 255) - } + // Used for testing of inter-network communications between two separate subnets + // see: https://en.wikipedia.org/wiki/Reserved_IP_addresses + fn is_benchmarking(&self) -> bool { + *self >= Ipv4Addr::new(198, 18, 0, 0) && *self <= Ipv4Addr::new(198, 19, 255, 255) + } - // Reserved for future use - // see: https://en.wikipedia.org/wiki/Reserved_IP_addresses - fn is_future_use(&self) -> bool { - *self >= Ipv4Addr::new(240, 0, 0, 0) && - *self <= Ipv4Addr::new(255, 255, 255, 254) - } + // Reserved for future use + // see: https://en.wikipedia.org/wiki/Reserved_IP_addresses + fn is_future_use(&self) -> bool { + *self >= Ipv4Addr::new(240, 0, 0, 0) && *self <= Ipv4Addr::new(255, 255, 255, 254) + } - fn is_reserved(&self) -> bool { - self.is_unspecified() || - self.is_loopback() || - self.is_link_local() || - self.is_broadcast() || - self.is_documentation() || - self.is_multicast() || - self.is_shared_space() || - self.is_special_purpose() || - self.is_benchmarking() || - self.is_future_use() - } + fn is_reserved(&self) -> bool { + self.is_unspecified() + || self.is_loopback() + || self.is_link_local() + || self.is_broadcast() + || self.is_documentation() + || self.is_multicast() + || self.is_shared_space() + || self.is_special_purpose() + || self.is_benchmarking() + || self.is_future_use() + } - fn is_usable_public(&self) -> bool { - !self.is_reserved() && - !self.is_private() - } + fn is_usable_public(&self) -> bool { + !self.is_reserved() && !self.is_private() + } - fn is_usable_private(&self) -> bool { - self.is_private() - } + fn is_usable_private(&self) -> bool { + self.is_private() + } - fn is_within(&self, ipnet: &IpNetwork) -> bool { - match ipnet { - IpNetwork::V4(ipnet) => ipnet.contains(*self), - _ => false - } - } + fn is_within(&self, ipnet: &IpNetwork) -> bool { + match ipnet { + IpNetwork::V4(ipnet) => ipnet.contains(*self), + _ => false, + } + } } impl SocketAddrExt for Ipv6Addr { - fn is_global_s(&self) -> bool { - self.is_global_multicast() || - (!self.is_loopback() && - !self.is_unique_local_s() && - !self.is_unicast_link_local_s() && - !self.is_documentation_s() && - !self.is_other_multicast()) - } + fn is_global_s(&self) -> bool { + self.is_global_multicast() + || (!self.is_loopback() + && !self.is_unique_local_s() + && !self.is_unicast_link_local_s() + && !self.is_documentation_s() + && !self.is_other_multicast()) + } - // unique local address (fc00::/7). - fn is_unique_local_s(&self) -> bool { - (self.segments()[0] & 0xfe00) == 0xfc00 - } + // unique local address (fc00::/7). + fn is_unique_local_s(&self) -> bool { + (self.segments()[0] & 0xfe00) == 0xfc00 + } - // unicast and link-local (fe80::/10). - fn is_unicast_link_local_s(&self) -> bool { - (self.segments()[0] & 0xffc0) == 0xfe80 - } + // unicast and link-local (fe80::/10). + fn is_unicast_link_local_s(&self) -> bool { + (self.segments()[0] & 0xffc0) == 0xfe80 + } - // reserved for documentation (2001:db8::/32). - fn is_documentation_s(&self) -> bool { - (self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8) - } + // reserved for documentation (2001:db8::/32). + fn is_documentation_s(&self) -> bool { + (self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8) + } - fn is_global_multicast(&self) -> bool { - self.segments()[0] & 0x000f == 14 - } + fn is_global_multicast(&self) -> bool { + self.segments()[0] & 0x000f == 14 + } - fn is_other_multicast(&self) -> bool { - self.is_multicast() && !self.is_global_multicast() - } + fn is_other_multicast(&self) -> bool { + self.is_multicast() && !self.is_global_multicast() + } - fn is_reserved(&self) -> bool { - self.is_unspecified() || - self.is_loopback() || - self.is_unicast_link_local_s() || - self.is_documentation_s() || - self.is_other_multicast() - } + fn is_reserved(&self) -> bool { + self.is_unspecified() + || self.is_loopback() + || self.is_unicast_link_local_s() + || self.is_documentation_s() + || self.is_other_multicast() + } - fn is_usable_public(&self) -> bool { - !self.is_reserved() && - !self.is_unique_local_s() - } + fn is_usable_public(&self) -> bool { + !self.is_reserved() && !self.is_unique_local_s() + } - fn is_usable_private(&self) -> bool { - self.is_unique_local_s() - } + fn is_usable_private(&self) -> bool { + self.is_unique_local_s() + } - fn is_within(&self, ipnet: &IpNetwork) -> bool { - match ipnet { - IpNetwork::V6(ipnet) => ipnet.contains(*self), - _ => false - } - } + fn is_within(&self, ipnet: &IpNetwork) -> bool { + match ipnet { + IpNetwork::V6(ipnet) => ipnet.contains(*self), + _ => false, + } + } } impl SocketAddrExt for IpAddr { - fn is_global_s(&self) -> bool { - match *self { - IpAddr::V4(ref ip) => ip.is_global_s(), - IpAddr::V6(ref ip) => ip.is_global_s(), - } - } + fn is_global_s(&self) -> bool { + match *self { + IpAddr::V4(ref ip) => ip.is_global_s(), + IpAddr::V6(ref ip) => ip.is_global_s(), + } + } - fn is_reserved(&self) -> bool { - match *self { - IpAddr::V4(ref ip) => ip.is_reserved(), - IpAddr::V6(ref ip) => ip.is_reserved(), - } - } + fn is_reserved(&self) -> bool { + match *self { + IpAddr::V4(ref ip) => ip.is_reserved(), + IpAddr::V6(ref ip) => ip.is_reserved(), + } + } - fn is_usable_public(&self) -> bool { - match *self { - IpAddr::V4(ref ip) => ip.is_usable_public(), - IpAddr::V6(ref ip) => ip.is_usable_public(), - } - } + fn is_usable_public(&self) -> bool { + match *self { + IpAddr::V4(ref ip) => ip.is_usable_public(), + IpAddr::V6(ref ip) => ip.is_usable_public(), + } + } - fn is_usable_private(&self) -> bool { - match *self { - IpAddr::V4(ref ip) => ip.is_usable_private(), - IpAddr::V6(ref ip) => ip.is_usable_private(), - } - } + fn is_usable_private(&self) -> bool { + match *self { + IpAddr::V4(ref ip) => ip.is_usable_private(), + IpAddr::V6(ref ip) => ip.is_usable_private(), + } + } - fn is_within(&self, ipnet: &IpNetwork) -> bool { - match *self { - IpAddr::V4(ref ip) => ip.is_within(ipnet), - IpAddr::V6(ref ip) => ip.is_within(ipnet) - } - } + fn is_within(&self, ipnet: &IpNetwork) -> bool { + match *self { + IpAddr::V4(ref ip) => ip.is_within(ipnet), + IpAddr::V6(ref ip) => ip.is_within(ipnet), + } + } } #[cfg(not(any(windows, target_os = "android")))] mod getinterfaces { - use std::{mem, io}; - use libc::{AF_INET, AF_INET6}; - use libc::{getifaddrs, freeifaddrs, ifaddrs, sockaddr, sockaddr_in, sockaddr_in6}; - use std::net::{Ipv4Addr, Ipv6Addr, IpAddr}; + use libc::{ + freeifaddrs, getifaddrs, ifaddrs, sockaddr, sockaddr_in, sockaddr_in6, AF_INET, AF_INET6, + }; + use std::{ + io, mem, + net::{IpAddr, Ipv4Addr, Ipv6Addr}, + }; - fn convert_sockaddr(sa: *mut sockaddr) -> Option { - if sa.is_null() { return None; } + fn convert_sockaddr(sa: *mut sockaddr) -> Option { + if sa.is_null() { + return None; + } - let (addr, _) = match i32::from(unsafe { *sa }.sa_family) { - AF_INET => { - let sa: *const sockaddr_in = sa as *const sockaddr_in; - let sa = unsafe { &*sa }; - let (addr, port) = (sa.sin_addr.s_addr, sa.sin_port); - // convert u32 to an `Ipv4 address`, but the u32 must be converted to `host-order` - // that's why `from_be` is used! - (IpAddr::V4(Ipv4Addr::from(::from_be(addr))), port) - }, - AF_INET6 => { - let sa: *const sockaddr_in6 = sa as *const sockaddr_in6; - let sa = & unsafe { *sa }; - let (addr, port) = (sa.sin6_addr.s6_addr, sa.sin6_port); - let ip_addr = Ipv6Addr::from(addr); - debug_assert!(addr == ip_addr.octets()); - (IpAddr::V6(ip_addr), port) - }, - _ => return None, - }; - Some(addr) - } + let (addr, _) = match i32::from(unsafe { *sa }.sa_family) { + AF_INET => { + let sa: *const sockaddr_in = sa as *const sockaddr_in; + let sa = unsafe { &*sa }; + let (addr, port) = (sa.sin_addr.s_addr, sa.sin_port); + // convert u32 to an `Ipv4 address`, but the u32 must be converted to `host-order` + // that's why `from_be` is used! + (IpAddr::V4(Ipv4Addr::from(::from_be(addr))), port) + } + AF_INET6 => { + let sa: *const sockaddr_in6 = sa as *const sockaddr_in6; + let sa = &unsafe { *sa }; + let (addr, port) = (sa.sin6_addr.s6_addr, sa.sin6_port); + let ip_addr = Ipv6Addr::from(addr); + debug_assert!(addr == ip_addr.octets()); + (IpAddr::V6(ip_addr), port) + } + _ => return None, + }; + Some(addr) + } - fn convert_ifaddrs(ifa: *mut ifaddrs) -> Option { - let ifa = unsafe { &mut *ifa }; - convert_sockaddr(ifa.ifa_addr) - } + fn convert_ifaddrs(ifa: *mut ifaddrs) -> Option { + let ifa = unsafe { &mut *ifa }; + convert_sockaddr(ifa.ifa_addr) + } - pub fn get_all() -> io::Result> { - let mut ifap: *mut ifaddrs = unsafe { mem::zeroed() }; - if unsafe { getifaddrs(&mut ifap as *mut _) } != 0 { - return Err(io::Error::last_os_error()); - } + pub fn get_all() -> io::Result> { + let mut ifap: *mut ifaddrs = unsafe { mem::zeroed() }; + if unsafe { getifaddrs(&mut ifap as *mut _) } != 0 { + return Err(io::Error::last_os_error()); + } - let mut ret = Vec::new(); - let mut cur: *mut ifaddrs = ifap; - while !cur.is_null() { - if let Some(ip_addr) = convert_ifaddrs(cur) { - ret.push(ip_addr); - } + let mut ret = Vec::new(); + let mut cur: *mut ifaddrs = ifap; + while !cur.is_null() { + if let Some(ip_addr) = convert_ifaddrs(cur) { + ret.push(ip_addr); + } - //TODO: do something else maybe? - cur = unsafe { (*cur).ifa_next }; - } + //TODO: do something else maybe? + cur = unsafe { (*cur).ifa_next }; + } - unsafe { freeifaddrs(ifap) }; - Ok(ret) - } + unsafe { freeifaddrs(ifap) }; + Ok(ret) + } } #[cfg(not(any(windows, target_os = "android")))] fn get_if_addrs() -> io::Result> { - getinterfaces::get_all() + getinterfaces::get_all() } #[cfg(any(windows, target_os = "android"))] fn get_if_addrs() -> io::Result> { - Ok(Vec::new()) + Ok(Vec::new()) } /// Select the best available public address pub fn select_public_address(port: u16) -> SocketAddr { - match get_if_addrs() { - Ok(list) => { - //prefer IPV4 bindings - for addr in &list { //TODO: use better criteria than just the first in the list - match addr { - IpAddr::V4(a) if !a.is_reserved() => { - return SocketAddr::V4(SocketAddrV4::new(*a, port)); - }, - _ => {}, - } - } - for addr in &list { - match addr { - IpAddr::V6(a) if !a.is_reserved() => { - return SocketAddr::V6(SocketAddrV6::new(*a, port, 0, 0)); - }, - _ => {}, - } - } - }, - Err(e) => debug!("Error listing public interfaces: {:?}", e) - } - SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), port)) + match get_if_addrs() { + Ok(list) => { + //prefer IPV4 bindings + for addr in &list { + //TODO: use better criteria than just the first in the list + match addr { + IpAddr::V4(a) if !a.is_reserved() => { + return SocketAddr::V4(SocketAddrV4::new(*a, port)); + } + _ => {} + } + } + for addr in &list { + match addr { + IpAddr::V6(a) if !a.is_reserved() => { + return SocketAddr::V6(SocketAddrV6::new(*a, port, 0, 0)); + } + _ => {} + } + } + } + Err(e) => debug!("Error listing public interfaces: {:?}", e), + } + SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), port)) } pub fn map_external_address(local: &NodeEndpoint) -> Option { - if let SocketAddr::V4(ref local_addr) = local.address { - let local_ip = *local_addr.ip(); - let local_port = local_addr.port(); - let local_udp_port = local.udp_port; + if let SocketAddr::V4(ref local_addr) = local.address { + let local_ip = *local_addr.ip(); + let local_port = local_addr.port(); + let local_udp_port = local.udp_port; - let search_gateway_child = ::std::thread::spawn(move || { - match search_gateway_from_timeout(local_ip, Duration::new(5, 0)) { - Err(ref err) => debug!("Gateway search error: {}", err), - Ok(gateway) => { - match gateway.get_external_ip() { - Err(ref err) => { - debug!("IP request error: {}", err); - }, - Ok(external_addr) => { - match gateway.add_any_port(PortMappingProtocol::TCP, SocketAddrV4::new(local_ip, local_port), 0, "Parity Node/TCP") { - Err(ref err) => { - debug!("Port mapping error: {}", err); - }, - Ok(tcp_port) => { - match gateway.add_any_port(PortMappingProtocol::UDP, SocketAddrV4::new(local_ip, local_udp_port), 0, "Parity Node/UDP") { - Err(ref err) => { - debug!("Port mapping error: {}", err); - }, - Ok(udp_port) => { - return Some(NodeEndpoint { address: SocketAddr::V4(SocketAddrV4::new(external_addr, tcp_port)), udp_port }); - }, - } - }, - } - }, - } - }, - } - None - }); - return search_gateway_child.join().ok()?; - } - None + let search_gateway_child = ::std::thread::spawn(move || { + match search_gateway_from_timeout(local_ip, Duration::new(5, 0)) { + Err(ref err) => debug!("Gateway search error: {}", err), + Ok(gateway) => match gateway.get_external_ip() { + Err(ref err) => { + debug!("IP request error: {}", err); + } + Ok(external_addr) => { + match gateway.add_any_port( + PortMappingProtocol::TCP, + SocketAddrV4::new(local_ip, local_port), + 0, + "Parity Node/TCP", + ) { + Err(ref err) => { + debug!("Port mapping error: {}", err); + } + Ok(tcp_port) => { + match gateway.add_any_port( + PortMappingProtocol::UDP, + SocketAddrV4::new(local_ip, local_udp_port), + 0, + "Parity Node/UDP", + ) { + Err(ref err) => { + debug!("Port mapping error: {}", err); + } + Ok(udp_port) => { + return Some(NodeEndpoint { + address: SocketAddr::V4(SocketAddrV4::new( + external_addr, + tcp_port, + )), + udp_port, + }); + } + } + } + } + } + }, + } + None + }); + return search_gateway_child.join().ok()?; + } + None } #[test] fn can_select_public_address() { - let pub_address = select_public_address(40477); - assert!(pub_address.port() == 40477); + let pub_address = select_public_address(40477); + assert!(pub_address.port() == 40477); } #[ignore] #[test] fn can_map_external_address_or_fail() { - let pub_address = select_public_address(40478); - let _ = map_external_address(&NodeEndpoint { address: pub_address, udp_port: 40478 }); + let pub_address = select_public_address(40478); + let _ = map_external_address(&NodeEndpoint { + address: pub_address, + udp_port: 40478, + }); } #[test] fn ipv4_properties() { - fn check(octets: &[u8; 4], unspec: bool, loopback: bool, - private: bool, link_local: bool, global: bool, - multicast: bool, broadcast: bool, documentation: bool) { - let ip = Ipv4Addr::new(octets[0], octets[1], octets[2], octets[3]); - assert_eq!(octets, &ip.octets()); + fn check( + octets: &[u8; 4], + unspec: bool, + loopback: bool, + private: bool, + link_local: bool, + global: bool, + multicast: bool, + broadcast: bool, + documentation: bool, + ) { + let ip = Ipv4Addr::new(octets[0], octets[1], octets[2], octets[3]); + assert_eq!(octets, &ip.octets()); - assert_eq!(ip.is_unspecified(), unspec); - assert_eq!(ip.is_loopback(), loopback); - assert_eq!(ip.is_private(), private); - assert_eq!(ip.is_link_local(), link_local); - assert_eq!(ip.is_global_s(), global); - assert_eq!(ip.is_multicast(), multicast); - assert_eq!(ip.is_broadcast(), broadcast); - assert_eq!(ip.is_documentation(), documentation); - } + assert_eq!(ip.is_unspecified(), unspec); + assert_eq!(ip.is_loopback(), loopback); + assert_eq!(ip.is_private(), private); + assert_eq!(ip.is_link_local(), link_local); + assert_eq!(ip.is_global_s(), global); + assert_eq!(ip.is_multicast(), multicast); + assert_eq!(ip.is_broadcast(), broadcast); + assert_eq!(ip.is_documentation(), documentation); + } - // address unspec loopbk privt linloc global multicast brdcast doc - check(&[0, 0, 0, 0], true, false, false, false, true, false, false, false); - check(&[0, 0, 0, 1], false, false, false, false, true, false, false, false); - check(&[1, 0, 0, 0], false, false, false, false, true, false, false, false); - check(&[10, 9, 8, 7], false, false, true, false, false, false, false, false); - check(&[127, 1, 2, 3], false, true, false, false, false, false, false, false); - check(&[172, 31, 254, 253], false, false, true, false, false, false, false, false); - check(&[169, 254, 253, 242], false, false, false, true, false, false, false, false); - check(&[192, 0, 2, 183], false, false, false, false, false, false, false, true); - check(&[192, 1, 2, 183], false, false, false, false, true, false, false, false); - check(&[192, 168, 254, 253], false, false, true, false, false, false, false, false); - check(&[198, 51, 100, 0], false, false, false, false, false, false, false, true); - check(&[203, 0, 113, 0], false, false, false, false, false, false, false, true); - check(&[203, 2, 113, 0], false, false, false, false, true, false, false, false); - check(&[224, 0, 0, 0], false, false, false, false, true, true, false, false); - check(&[239, 255, 255, 255], false, false, false, false, true, true, false, false); - check(&[255, 255, 255, 255], false, false, false, false, false, false, true, false); + // address unspec loopbk privt linloc global multicast brdcast doc + check( + &[0, 0, 0, 0], + true, + false, + false, + false, + true, + false, + false, + false, + ); + check( + &[0, 0, 0, 1], + false, + false, + false, + false, + true, + false, + false, + false, + ); + check( + &[1, 0, 0, 0], + false, + false, + false, + false, + true, + false, + false, + false, + ); + check( + &[10, 9, 8, 7], + false, + false, + true, + false, + false, + false, + false, + false, + ); + check( + &[127, 1, 2, 3], + false, + true, + false, + false, + false, + false, + false, + false, + ); + check( + &[172, 31, 254, 253], + false, + false, + true, + false, + false, + false, + false, + false, + ); + check( + &[169, 254, 253, 242], + false, + false, + false, + true, + false, + false, + false, + false, + ); + check( + &[192, 0, 2, 183], + false, + false, + false, + false, + false, + false, + false, + true, + ); + check( + &[192, 1, 2, 183], + false, + false, + false, + false, + true, + false, + false, + false, + ); + check( + &[192, 168, 254, 253], + false, + false, + true, + false, + false, + false, + false, + false, + ); + check( + &[198, 51, 100, 0], + false, + false, + false, + false, + false, + false, + false, + true, + ); + check( + &[203, 0, 113, 0], + false, + false, + false, + false, + false, + false, + false, + true, + ); + check( + &[203, 2, 113, 0], + false, + false, + false, + false, + true, + false, + false, + false, + ); + check( + &[224, 0, 0, 0], + false, + false, + false, + false, + true, + true, + false, + false, + ); + check( + &[239, 255, 255, 255], + false, + false, + false, + false, + true, + true, + false, + false, + ); + check( + &[255, 255, 255, 255], + false, + false, + false, + false, + false, + false, + true, + false, + ); } #[test] fn ipv4_shared_space() { - assert!(!Ipv4Addr::new(100, 63, 255, 255).is_shared_space()); - assert!(Ipv4Addr::new(100, 64, 0, 0).is_shared_space()); - assert!(Ipv4Addr::new(100, 127, 255, 255).is_shared_space()); - assert!(!Ipv4Addr::new(100, 128, 0, 0).is_shared_space()); + assert!(!Ipv4Addr::new(100, 63, 255, 255).is_shared_space()); + assert!(Ipv4Addr::new(100, 64, 0, 0).is_shared_space()); + assert!(Ipv4Addr::new(100, 127, 255, 255).is_shared_space()); + assert!(!Ipv4Addr::new(100, 128, 0, 0).is_shared_space()); } #[test] fn ipv4_special_purpose() { - assert!(!Ipv4Addr::new(191, 255, 255, 255).is_special_purpose()); - assert!(Ipv4Addr::new(192, 0, 0, 0).is_special_purpose()); - assert!(Ipv4Addr::new(192, 0, 0, 255).is_special_purpose()); - assert!(!Ipv4Addr::new(192, 0, 1, 255).is_special_purpose()); + assert!(!Ipv4Addr::new(191, 255, 255, 255).is_special_purpose()); + assert!(Ipv4Addr::new(192, 0, 0, 0).is_special_purpose()); + assert!(Ipv4Addr::new(192, 0, 0, 255).is_special_purpose()); + assert!(!Ipv4Addr::new(192, 0, 1, 255).is_special_purpose()); } #[test] fn ipv4_benchmarking() { - assert!(!Ipv4Addr::new(198, 17, 255, 255).is_benchmarking()); - assert!(Ipv4Addr::new(198, 18, 0, 0).is_benchmarking()); - assert!(Ipv4Addr::new(198, 19, 255, 255).is_benchmarking()); - assert!(!Ipv4Addr::new(198, 20, 0, 0).is_benchmarking()); + assert!(!Ipv4Addr::new(198, 17, 255, 255).is_benchmarking()); + assert!(Ipv4Addr::new(198, 18, 0, 0).is_benchmarking()); + assert!(Ipv4Addr::new(198, 19, 255, 255).is_benchmarking()); + assert!(!Ipv4Addr::new(198, 20, 0, 0).is_benchmarking()); } #[test] fn ipv4_future_use() { - assert!(!Ipv4Addr::new(239, 255, 255, 255).is_future_use()); - assert!(Ipv4Addr::new(240, 0, 0, 0).is_future_use()); - assert!(Ipv4Addr::new(255, 255, 255, 254).is_future_use()); - assert!(!Ipv4Addr::new(255, 255, 255, 255).is_future_use()); + assert!(!Ipv4Addr::new(239, 255, 255, 255).is_future_use()); + assert!(Ipv4Addr::new(240, 0, 0, 0).is_future_use()); + assert!(Ipv4Addr::new(255, 255, 255, 254).is_future_use()); + assert!(!Ipv4Addr::new(255, 255, 255, 255).is_future_use()); } #[test] fn ipv4_usable_public() { - assert!(!Ipv4Addr::new(0,0,0,0).is_usable_public()); // unspecified - assert!(Ipv4Addr::new(0,0,0,1).is_usable_public()); + assert!(!Ipv4Addr::new(0, 0, 0, 0).is_usable_public()); // unspecified + assert!(Ipv4Addr::new(0, 0, 0, 1).is_usable_public()); - assert!(Ipv4Addr::new(9,255,255,255).is_usable_public()); - assert!(!Ipv4Addr::new(10,0,0,0).is_usable_public()); // private intra-network - assert!(!Ipv4Addr::new(10,255,255,255).is_usable_public()); // private intra-network - assert!(Ipv4Addr::new(11,0,0,0).is_usable_public()); + assert!(Ipv4Addr::new(9, 255, 255, 255).is_usable_public()); + assert!(!Ipv4Addr::new(10, 0, 0, 0).is_usable_public()); // private intra-network + assert!(!Ipv4Addr::new(10, 255, 255, 255).is_usable_public()); // private intra-network + assert!(Ipv4Addr::new(11, 0, 0, 0).is_usable_public()); - assert!(Ipv4Addr::new(100, 63, 255, 255).is_usable_public()); - assert!(!Ipv4Addr::new(100, 64, 0, 0).is_usable_public()); // shared space - assert!(!Ipv4Addr::new(100, 127, 255, 255).is_usable_public()); // shared space - assert!(Ipv4Addr::new(100, 128, 0, 0).is_usable_public()); + assert!(Ipv4Addr::new(100, 63, 255, 255).is_usable_public()); + assert!(!Ipv4Addr::new(100, 64, 0, 0).is_usable_public()); // shared space + assert!(!Ipv4Addr::new(100, 127, 255, 255).is_usable_public()); // shared space + assert!(Ipv4Addr::new(100, 128, 0, 0).is_usable_public()); - assert!(Ipv4Addr::new(126,255,255,255).is_usable_public()); - assert!(!Ipv4Addr::new(127,0,0,0).is_usable_public()); // loopback - assert!(!Ipv4Addr::new(127,255,255,255).is_usable_public()); // loopback - assert!(Ipv4Addr::new(128,0,0,0).is_usable_public()); + assert!(Ipv4Addr::new(126, 255, 255, 255).is_usable_public()); + assert!(!Ipv4Addr::new(127, 0, 0, 0).is_usable_public()); // loopback + assert!(!Ipv4Addr::new(127, 255, 255, 255).is_usable_public()); // loopback + assert!(Ipv4Addr::new(128, 0, 0, 0).is_usable_public()); - assert!(Ipv4Addr::new(169,253,255,255).is_usable_public()); - assert!(!Ipv4Addr::new(169,254,0,0).is_usable_public()); // link-local - assert!(!Ipv4Addr::new(169,254,255,255).is_usable_public()); // link-local - assert!(Ipv4Addr::new(169,255,0,0).is_usable_public()); + assert!(Ipv4Addr::new(169, 253, 255, 255).is_usable_public()); + assert!(!Ipv4Addr::new(169, 254, 0, 0).is_usable_public()); // link-local + assert!(!Ipv4Addr::new(169, 254, 255, 255).is_usable_public()); // link-local + assert!(Ipv4Addr::new(169, 255, 0, 0).is_usable_public()); - assert!(Ipv4Addr::new(172,15,255,255).is_usable_public()); - assert!(!Ipv4Addr::new(172,16,0,0).is_usable_public()); // private intra-network - assert!(!Ipv4Addr::new(172,31,255,255).is_usable_public()); // private intra-network - assert!(Ipv4Addr::new(172,32,255,255).is_usable_public()); + assert!(Ipv4Addr::new(172, 15, 255, 255).is_usable_public()); + assert!(!Ipv4Addr::new(172, 16, 0, 0).is_usable_public()); // private intra-network + assert!(!Ipv4Addr::new(172, 31, 255, 255).is_usable_public()); // private intra-network + assert!(Ipv4Addr::new(172, 32, 255, 255).is_usable_public()); - assert!(Ipv4Addr::new(191,255,255,255).is_usable_public()); - assert!(!Ipv4Addr::new(192,0,0,0).is_usable_public()); // special purpose - assert!(!Ipv4Addr::new(192,0,0,255).is_usable_public()); // special purpose - assert!(Ipv4Addr::new(192,0,1,0).is_usable_public()); + assert!(Ipv4Addr::new(191, 255, 255, 255).is_usable_public()); + assert!(!Ipv4Addr::new(192, 0, 0, 0).is_usable_public()); // special purpose + assert!(!Ipv4Addr::new(192, 0, 0, 255).is_usable_public()); // special purpose + assert!(Ipv4Addr::new(192, 0, 1, 0).is_usable_public()); - assert!(Ipv4Addr::new(192,0,1,255).is_usable_public()); - assert!(!Ipv4Addr::new(192,0,2,0).is_usable_public()); // documentation - assert!(!Ipv4Addr::new(192,0,2,255).is_usable_public()); // documentation - assert!(Ipv4Addr::new(192,0,3,0).is_usable_public()); + assert!(Ipv4Addr::new(192, 0, 1, 255).is_usable_public()); + assert!(!Ipv4Addr::new(192, 0, 2, 0).is_usable_public()); // documentation + assert!(!Ipv4Addr::new(192, 0, 2, 255).is_usable_public()); // documentation + assert!(Ipv4Addr::new(192, 0, 3, 0).is_usable_public()); - assert!(Ipv4Addr::new(192,167,255,255).is_usable_public()); - assert!(!Ipv4Addr::new(192,168,0,0).is_usable_public()); // private intra-network - assert!(!Ipv4Addr::new(192,168,255,255).is_usable_public()); // private intra-network - assert!(Ipv4Addr::new(192,169,0,0).is_usable_public()); + assert!(Ipv4Addr::new(192, 167, 255, 255).is_usable_public()); + assert!(!Ipv4Addr::new(192, 168, 0, 0).is_usable_public()); // private intra-network + assert!(!Ipv4Addr::new(192, 168, 255, 255).is_usable_public()); // private intra-network + assert!(Ipv4Addr::new(192, 169, 0, 0).is_usable_public()); - assert!(Ipv4Addr::new(198,17,255,255).is_usable_public()); - assert!(!Ipv4Addr::new(198,18,0,0).is_usable_public()); // benchmarking - assert!(!Ipv4Addr::new(198,19,255,255).is_usable_public()); // benchmarking - assert!(Ipv4Addr::new(198,20,0,0).is_usable_public()); + assert!(Ipv4Addr::new(198, 17, 255, 255).is_usable_public()); + assert!(!Ipv4Addr::new(198, 18, 0, 0).is_usable_public()); // benchmarking + assert!(!Ipv4Addr::new(198, 19, 255, 255).is_usable_public()); // benchmarking + assert!(Ipv4Addr::new(198, 20, 0, 0).is_usable_public()); - assert!(Ipv4Addr::new(198,51,99,255).is_usable_public()); - assert!(!Ipv4Addr::new(198,51,100,0).is_usable_public()); // documentation - assert!(!Ipv4Addr::new(198,51,100,255).is_usable_public()); // documentation - assert!(Ipv4Addr::new(198,51,101,0).is_usable_public()); + assert!(Ipv4Addr::new(198, 51, 99, 255).is_usable_public()); + assert!(!Ipv4Addr::new(198, 51, 100, 0).is_usable_public()); // documentation + assert!(!Ipv4Addr::new(198, 51, 100, 255).is_usable_public()); // documentation + assert!(Ipv4Addr::new(198, 51, 101, 0).is_usable_public()); - assert!(Ipv4Addr::new(203,0,112,255).is_usable_public()); - assert!(!Ipv4Addr::new(203,0,113,0).is_usable_public()); // documentation - assert!(!Ipv4Addr::new(203,0,113,255).is_usable_public()); // documentation - assert!(Ipv4Addr::new(203,0,114,0).is_usable_public()); + assert!(Ipv4Addr::new(203, 0, 112, 255).is_usable_public()); + assert!(!Ipv4Addr::new(203, 0, 113, 0).is_usable_public()); // documentation + assert!(!Ipv4Addr::new(203, 0, 113, 255).is_usable_public()); // documentation + assert!(Ipv4Addr::new(203, 0, 114, 0).is_usable_public()); - assert!(Ipv4Addr::new(223,255,255,255).is_usable_public()); - assert!(!Ipv4Addr::new(224,0,0,0).is_usable_public()); // multicast - assert!(!Ipv4Addr::new(239, 255, 255, 255).is_usable_public()); // multicast - assert!(!Ipv4Addr::new(240, 0, 0, 0).is_usable_public()); // future use - assert!(!Ipv4Addr::new(255, 255, 255, 254).is_usable_public()); // future use - assert!(!Ipv4Addr::new(255, 255, 255, 255).is_usable_public()); // limited broadcast + assert!(Ipv4Addr::new(223, 255, 255, 255).is_usable_public()); + assert!(!Ipv4Addr::new(224, 0, 0, 0).is_usable_public()); // multicast + assert!(!Ipv4Addr::new(239, 255, 255, 255).is_usable_public()); // multicast + assert!(!Ipv4Addr::new(240, 0, 0, 0).is_usable_public()); // future use + assert!(!Ipv4Addr::new(255, 255, 255, 254).is_usable_public()); // future use + assert!(!Ipv4Addr::new(255, 255, 255, 255).is_usable_public()); // limited broadcast } #[test] fn ipv4_usable_private() { - assert!(!Ipv4Addr::new(9,255,255,255).is_usable_private()); - assert!(Ipv4Addr::new(10,0,0,0).is_usable_private()); // private intra-network - assert!(Ipv4Addr::new(10,255,255,255).is_usable_private()); // private intra-network - assert!(!Ipv4Addr::new(11,0,0,0).is_usable_private()); + assert!(!Ipv4Addr::new(9, 255, 255, 255).is_usable_private()); + assert!(Ipv4Addr::new(10, 0, 0, 0).is_usable_private()); // private intra-network + assert!(Ipv4Addr::new(10, 255, 255, 255).is_usable_private()); // private intra-network + assert!(!Ipv4Addr::new(11, 0, 0, 0).is_usable_private()); - assert!(!Ipv4Addr::new(172,15,255,255).is_usable_private()); - assert!(Ipv4Addr::new(172,16,0,0).is_usable_private()); // private intra-network - assert!(Ipv4Addr::new(172,31,255,255).is_usable_private()); // private intra-network - assert!(!Ipv4Addr::new(172,32,255,255).is_usable_private()); + assert!(!Ipv4Addr::new(172, 15, 255, 255).is_usable_private()); + assert!(Ipv4Addr::new(172, 16, 0, 0).is_usable_private()); // private intra-network + assert!(Ipv4Addr::new(172, 31, 255, 255).is_usable_private()); // private intra-network + assert!(!Ipv4Addr::new(172, 32, 255, 255).is_usable_private()); - assert!(!Ipv4Addr::new(192,167,255,255).is_usable_private()); - assert!(Ipv4Addr::new(192,168,0,0).is_usable_private()); // private intra-network - assert!(Ipv4Addr::new(192,168,255,255).is_usable_private()); // private intra-network - assert!(!Ipv4Addr::new(192,169,0,0).is_usable_private()); + assert!(!Ipv4Addr::new(192, 167, 255, 255).is_usable_private()); + assert!(Ipv4Addr::new(192, 168, 0, 0).is_usable_private()); // private intra-network + assert!(Ipv4Addr::new(192, 168, 255, 255).is_usable_private()); // private intra-network + assert!(!Ipv4Addr::new(192, 169, 0, 0).is_usable_private()); } #[test] fn ipv6_properties() { - fn check(str_addr: &str, unspec: bool, loopback: bool, global: bool) { - let ip: Ipv6Addr = str_addr.parse().unwrap(); - assert_eq!(str_addr, ip.to_string()); + fn check(str_addr: &str, unspec: bool, loopback: bool, global: bool) { + let ip: Ipv6Addr = str_addr.parse().unwrap(); + assert_eq!(str_addr, ip.to_string()); - assert_eq!(ip.is_unspecified(), unspec); - assert_eq!(ip.is_loopback(), loopback); - assert_eq!(ip.is_global_s(), global); - } + assert_eq!(ip.is_unspecified(), unspec); + assert_eq!(ip.is_loopback(), loopback); + assert_eq!(ip.is_global_s(), global); + } - // unspec loopbk global - check("::", true, false, true); - check("::1", false, true, false); + // unspec loopbk global + check("::", true, false, true); + check("::1", false, true, false); } diff --git a/util/network-devp2p/src/lib.rs b/util/network-devp2p/src/lib.rs index 6082049e8..daa9b80e2 100644 --- a/util/network-devp2p/src/lib.rs +++ b/util/network-devp2p/src/lib.rs @@ -60,31 +60,31 @@ //TODO: use Poll from mio #![allow(deprecated)] -extern crate ethcore_io as io; -extern crate parity_bytes; -extern crate parity_crypto as crypto; -extern crate ethereum_types; -extern crate parking_lot; -extern crate mio; -extern crate tiny_keccak; -extern crate crypto as rcrypto; -extern crate rand; extern crate ansi_term; //TODO: remove this -extern crate rustc_hex; -extern crate igd; -extern crate libc; -extern crate slab; -extern crate ethkey; -extern crate rlp; extern crate bytes; -extern crate parity_path; +extern crate crypto as rcrypto; +extern crate ethcore_io as io; extern crate ethcore_network as network; +extern crate ethereum_types; +extern crate ethkey; +extern crate igd; extern crate ipnetwork; extern crate keccak_hash as hash; +extern crate libc; +extern crate lru_cache; +extern crate mio; +extern crate parity_bytes; +extern crate parity_crypto as crypto; +extern crate parity_path; +extern crate parity_snappy as snappy; +extern crate parking_lot; +extern crate rand; +extern crate rlp; +extern crate rustc_hex; extern crate serde; extern crate serde_json; -extern crate parity_snappy as snappy; -extern crate lru_cache; +extern crate slab; +extern crate tiny_keccak; #[macro_use] extern crate error_chain; @@ -97,20 +97,21 @@ extern crate serde_derive; extern crate env_logger; #[cfg(test)] extern crate tempdir; -#[cfg(test)] #[macro_use] +#[cfg(test)] +#[macro_use] extern crate assert_matches; -mod host; mod connection; -mod handshake; -mod session; mod discovery; -mod service; -mod node_table; +mod handshake; +mod host; mod ip_utils; +mod node_table; +mod service; +mod session; -pub use service::NetworkService; pub use host::NetworkContext; +pub use service::NetworkService; pub use io::TimerToken; pub use node_table::{validate_node_url, NodeId}; diff --git a/util/network-devp2p/src/node_table.rs b/util/network-devp2p/src/node_table.rs index db001bfe7..b1a263874 100644 --- a/util/network-devp2p/src/node_table.rs +++ b/util/network-devp2p/src/node_table.rs @@ -14,21 +14,24 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use discovery::{TableUpdates, NodeEntry}; +use discovery::{NodeEntry, TableUpdates}; use ethereum_types::H512; use ip_utils::*; -use network::{Error, ErrorKind, AllowIP, IpFilter}; -use rlp::{Rlp, RlpStream, DecoderError}; -use serde_json; -use std::collections::{HashMap, HashSet}; -use std::fmt::{self, Display, Formatter}; -use std::hash::{Hash, Hasher}; -use std::net::{SocketAddr, ToSocketAddrs, SocketAddrV4, SocketAddrV6, Ipv4Addr, Ipv6Addr}; -use std::path::PathBuf; -use std::str::FromStr; -use std::{fs, slice}; -use std::time::{self, Duration, SystemTime}; +use network::{AllowIP, Error, ErrorKind, IpFilter}; use rand::{self, Rng}; +use rlp::{DecoderError, Rlp, RlpStream}; +use serde_json; +use std::{ + collections::{HashMap, HashSet}, + fmt::{self, Display, Formatter}, + fs, + hash::{Hash, Hasher}, + net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs}, + path::PathBuf, + slice, + str::FromStr, + time::{self, Duration, SystemTime}, +}; /// Node public key pub type NodeId = H512; @@ -36,209 +39,235 @@ pub type NodeId = H512; #[derive(Debug, Clone, PartialEq)] /// Node address info pub struct NodeEndpoint { - /// IP(V4 or V6) address - pub address: SocketAddr, - /// Connection port. - pub udp_port: u16 + /// IP(V4 or V6) address + pub address: SocketAddr, + /// Connection port. + pub udp_port: u16, } impl NodeEndpoint { - pub fn udp_address(&self) -> SocketAddr { - match self.address { - SocketAddr::V4(a) => SocketAddr::V4(SocketAddrV4::new(*a.ip(), self.udp_port)), - SocketAddr::V6(a) => SocketAddr::V6(SocketAddrV6::new(*a.ip(), self.udp_port, a.flowinfo(), a.scope_id())), - } - } + pub fn udp_address(&self) -> SocketAddr { + match self.address { + SocketAddr::V4(a) => SocketAddr::V4(SocketAddrV4::new(*a.ip(), self.udp_port)), + SocketAddr::V6(a) => SocketAddr::V6(SocketAddrV6::new( + *a.ip(), + self.udp_port, + a.flowinfo(), + a.scope_id(), + )), + } + } - pub fn is_allowed(&self, filter: &IpFilter) -> bool { - (self.is_allowed_by_predefined(&filter.predefined) || filter.custom_allow.iter().any(|ipnet| { - self.address.ip().is_within(ipnet) - })) - && !filter.custom_block.iter().any(|ipnet| { - self.address.ip().is_within(ipnet) - }) - } + pub fn is_allowed(&self, filter: &IpFilter) -> bool { + (self.is_allowed_by_predefined(&filter.predefined) + || filter + .custom_allow + .iter() + .any(|ipnet| self.address.ip().is_within(ipnet))) + && !filter + .custom_block + .iter() + .any(|ipnet| self.address.ip().is_within(ipnet)) + } - pub fn is_allowed_by_predefined(&self, filter: &AllowIP) -> bool { - match filter { - AllowIP::All => true, - AllowIP::Private => self.address.ip().is_usable_private(), - AllowIP::Public => self.address.ip().is_usable_public(), - AllowIP::None => false, - } - } + pub fn is_allowed_by_predefined(&self, filter: &AllowIP) -> bool { + match filter { + AllowIP::All => true, + AllowIP::Private => self.address.ip().is_usable_private(), + AllowIP::Public => self.address.ip().is_usable_public(), + AllowIP::None => false, + } + } - pub fn from_rlp(rlp: &Rlp) -> Result { - let tcp_port = rlp.val_at::(2)?; - let udp_port = rlp.val_at::(1)?; - let addr_bytes = rlp.at(0)?.data()?; - let address = match addr_bytes.len() { - 4 => Ok(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(addr_bytes[0], addr_bytes[1], addr_bytes[2], addr_bytes[3]), tcp_port))), - 16 => unsafe { - let o: *const u16 = addr_bytes.as_ptr() as *const u16; - let o = slice::from_raw_parts(o, 8); - Ok(SocketAddr::V6(SocketAddrV6::new(Ipv6Addr::new(o[0], o[1], o[2], o[3], o[4], o[5], o[6], o[7]), tcp_port, 0, 0))) - }, - _ => Err(DecoderError::RlpInconsistentLengthAndData) - }?; - Ok(NodeEndpoint { address, udp_port }) - } + pub fn from_rlp(rlp: &Rlp) -> Result { + let tcp_port = rlp.val_at::(2)?; + let udp_port = rlp.val_at::(1)?; + let addr_bytes = rlp.at(0)?.data()?; + let address = match addr_bytes.len() { + 4 => Ok(SocketAddr::V4(SocketAddrV4::new( + Ipv4Addr::new(addr_bytes[0], addr_bytes[1], addr_bytes[2], addr_bytes[3]), + tcp_port, + ))), + 16 => unsafe { + let o: *const u16 = addr_bytes.as_ptr() as *const u16; + let o = slice::from_raw_parts(o, 8); + Ok(SocketAddr::V6(SocketAddrV6::new( + Ipv6Addr::new(o[0], o[1], o[2], o[3], o[4], o[5], o[6], o[7]), + tcp_port, + 0, + 0, + ))) + }, + _ => Err(DecoderError::RlpInconsistentLengthAndData), + }?; + Ok(NodeEndpoint { address, udp_port }) + } - pub fn to_rlp(&self, rlp: &mut RlpStream) { - match self.address { - SocketAddr::V4(a) => { - rlp.append(&(&a.ip().octets()[..])); - } - SocketAddr::V6(a) => unsafe { - let o: *const u8 = a.ip().segments().as_ptr() as *const u8; - rlp.append(&slice::from_raw_parts(o, 16)); - } - }; - rlp.append(&self.udp_port); - rlp.append(&self.address.port()); - } + pub fn to_rlp(&self, rlp: &mut RlpStream) { + match self.address { + SocketAddr::V4(a) => { + rlp.append(&(&a.ip().octets()[..])); + } + SocketAddr::V6(a) => unsafe { + let o: *const u8 = a.ip().segments().as_ptr() as *const u8; + rlp.append(&slice::from_raw_parts(o, 16)); + }, + }; + rlp.append(&self.udp_port); + rlp.append(&self.address.port()); + } - pub fn to_rlp_list(&self, rlp: &mut RlpStream) { - rlp.begin_list(3); - self.to_rlp(rlp); - } + pub fn to_rlp_list(&self, rlp: &mut RlpStream) { + rlp.begin_list(3); + self.to_rlp(rlp); + } - /// Validates that the tcp port is not 0 and that the node is a valid discovery node (i.e. `is_valid_discovery_node()` is true). - /// Sync happens over tcp. - pub fn is_valid_sync_node(&self) -> bool { - self.is_valid_discovery_node() && self.address.port() != 0 - } + /// Validates that the tcp port is not 0 and that the node is a valid discovery node (i.e. `is_valid_discovery_node()` is true). + /// Sync happens over tcp. + pub fn is_valid_sync_node(&self) -> bool { + self.is_valid_discovery_node() && self.address.port() != 0 + } - /// Validates that the udp port is not 0 and address IP is specified. - /// Peer discovery happens over udp. - pub fn is_valid_discovery_node(&self) -> bool { - self.udp_port != 0 && match self.address { - SocketAddr::V4(a) => !a.ip().is_unspecified(), - SocketAddr::V6(a) => !a.ip().is_unspecified() - } - } + /// Validates that the udp port is not 0 and address IP is specified. + /// Peer discovery happens over udp. + pub fn is_valid_discovery_node(&self) -> bool { + self.udp_port != 0 + && match self.address { + SocketAddr::V4(a) => !a.ip().is_unspecified(), + SocketAddr::V6(a) => !a.ip().is_unspecified(), + } + } } impl FromStr for NodeEndpoint { - type Err = Error; + type Err = Error; - /// Create endpoint from string. Performs name resolution if given a host name. - fn from_str(s: &str) -> Result { - let address = s.to_socket_addrs().map(|mut i| i.next()); - match address { - Ok(Some(a)) => Ok(NodeEndpoint { - address: a, - udp_port: a.port() - }), - Ok(None) => bail!(ErrorKind::AddressResolve(None)), - Err(_) => Err(ErrorKind::AddressParse.into()) // always an io::Error of InvalidInput kind - } - } + /// Create endpoint from string. Performs name resolution if given a host name. + fn from_str(s: &str) -> Result { + let address = s.to_socket_addrs().map(|mut i| i.next()); + match address { + Ok(Some(a)) => Ok(NodeEndpoint { + address: a, + udp_port: a.port(), + }), + Ok(None) => bail!(ErrorKind::AddressResolve(None)), + Err(_) => Err(ErrorKind::AddressParse.into()), // always an io::Error of InvalidInput kind + } + } } #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub enum PeerType { - _Required, - Optional + _Required, + Optional, } /// A type for representing an interaction (contact) with a node at a given time /// that was either a success or a failure. #[derive(Clone, Copy, Debug)] pub enum NodeContact { - Success(SystemTime), - Failure(SystemTime), + Success(SystemTime), + Failure(SystemTime), } impl NodeContact { - fn success() -> NodeContact { - NodeContact::Success(SystemTime::now()) - } + fn success() -> NodeContact { + NodeContact::Success(SystemTime::now()) + } - fn failure() -> NodeContact { - NodeContact::Failure(SystemTime::now()) - } + fn failure() -> NodeContact { + NodeContact::Failure(SystemTime::now()) + } - fn time(&self) -> SystemTime { - match *self { - NodeContact::Success(t) | NodeContact::Failure(t) => t - } - } + fn time(&self) -> SystemTime { + match *self { + NodeContact::Success(t) | NodeContact::Failure(t) => t, + } + } - /// Filters and old contact, returning `None` if it happened longer than a - /// week ago. - fn recent(&self) -> Option<&NodeContact> { - let t = self.time(); - if let Ok(d) = t.elapsed() { - if d < Duration::from_secs(60 * 60 * 24 * 7) { - return Some(self); - } - } + /// Filters and old contact, returning `None` if it happened longer than a + /// week ago. + fn recent(&self) -> Option<&NodeContact> { + let t = self.time(); + if let Ok(d) = t.elapsed() { + if d < Duration::from_secs(60 * 60 * 24 * 7) { + return Some(self); + } + } - None - } + None + } } #[derive(Debug)] pub struct Node { - pub id: NodeId, - pub endpoint: NodeEndpoint, - pub peer_type: PeerType, - pub last_contact: Option, + pub id: NodeId, + pub endpoint: NodeEndpoint, + pub peer_type: PeerType, + pub last_contact: Option, } impl Node { - pub fn new(id: NodeId, endpoint: NodeEndpoint) -> Node { - Node { - id, - endpoint, - peer_type: PeerType::Optional, - last_contact: None, - } - } + pub fn new(id: NodeId, endpoint: NodeEndpoint) -> Node { + Node { + id, + endpoint, + peer_type: PeerType::Optional, + last_contact: None, + } + } } impl Display for Node { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - if self.endpoint.udp_port != self.endpoint.address.port() { - write!(f, "enode://{:x}@{}+{}", self.id, self.endpoint.address, self.endpoint.udp_port)?; - } else { - write!(f, "enode://{:x}@{}", self.id, self.endpoint.address)?; - } - Ok(()) - } + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + if self.endpoint.udp_port != self.endpoint.address.port() { + write!( + f, + "enode://{:x}@{}+{}", + self.id, self.endpoint.address, self.endpoint.udp_port + )?; + } else { + write!(f, "enode://{:x}@{}", self.id, self.endpoint.address)?; + } + Ok(()) + } } impl FromStr for Node { - type Err = Error; - fn from_str(s: &str) -> Result { - let (id, endpoint) = if s.len() > 136 && &s[0..8] == "enode://" && &s[136..137] == "@" { - (s[8..136].parse().map_err(|_| ErrorKind::InvalidNodeId)?, NodeEndpoint::from_str(&s[137..])?) - } - else { - (NodeId::new(), NodeEndpoint::from_str(s)?) - }; + type Err = Error; + fn from_str(s: &str) -> Result { + let (id, endpoint) = if s.len() > 136 && &s[0..8] == "enode://" && &s[136..137] == "@" { + ( + s[8..136].parse().map_err(|_| ErrorKind::InvalidNodeId)?, + NodeEndpoint::from_str(&s[137..])?, + ) + } else { + (NodeId::new(), NodeEndpoint::from_str(s)?) + }; - Ok(Node { - id, - endpoint, - peer_type: PeerType::Optional, - last_contact: None, - }) - } + Ok(Node { + id, + endpoint, + peer_type: PeerType::Optional, + last_contact: None, + }) + } } impl PartialEq for Node { - fn eq(&self, other: &Self) -> bool { - self.id == other.id - } + fn eq(&self, other: &Self) -> bool { + self.id == other.id + } } impl Eq for Node {} impl Hash for Node { - fn hash(&self, state: &mut H) where H: Hasher { - self.id.hash(state) - } + fn hash(&self, state: &mut H) + where + H: Hasher, + { + self.id.hash(state) + } } const MAX_NODES: usize = 1024; @@ -246,487 +275,530 @@ const NODES_FILE: &str = "nodes.json"; /// Node table backed by disk file. pub struct NodeTable { - nodes: HashMap, - useless_nodes: HashSet, - path: Option, + nodes: HashMap, + useless_nodes: HashSet, + path: Option, } impl NodeTable { - pub fn new(path: Option) -> NodeTable { - NodeTable { - path: path.clone(), - nodes: NodeTable::load(path), - useless_nodes: HashSet::new(), - } - } + pub fn new(path: Option) -> NodeTable { + NodeTable { + path: path.clone(), + nodes: NodeTable::load(path), + useless_nodes: HashSet::new(), + } + } - /// Add a node to table - pub fn add_node(&mut self, mut node: Node) { - // preserve node last_contact - node.last_contact = self.nodes.get(&node.id).and_then(|n| n.last_contact); - self.nodes.insert(node.id, node); - } + /// Add a node to table + pub fn add_node(&mut self, mut node: Node) { + // preserve node last_contact + node.last_contact = self.nodes.get(&node.id).and_then(|n| n.last_contact); + self.nodes.insert(node.id, node); + } - /// Returns a list of ordered nodes according to their most recent contact - /// and filtering useless nodes. The algorithm for creating the sorted nodes - /// is: - /// - Contacts that aren't recent (older than 1 week) are discarded - /// - (1) Nodes with a successful contact are ordered (most recent success first) - /// - (2) Nodes with unknown contact (older than 1 week or new nodes) are randomly shuffled - /// - (3) Nodes with a failed contact are ordered (oldest failure first) - /// - The final result is the concatenation of (1), (2) and (3) - fn ordered_entries(&self) -> Vec<&Node> { - let mut success = Vec::new(); - let mut failures = Vec::new(); - let mut unknown = Vec::new(); + /// Returns a list of ordered nodes according to their most recent contact + /// and filtering useless nodes. The algorithm for creating the sorted nodes + /// is: + /// - Contacts that aren't recent (older than 1 week) are discarded + /// - (1) Nodes with a successful contact are ordered (most recent success first) + /// - (2) Nodes with unknown contact (older than 1 week or new nodes) are randomly shuffled + /// - (3) Nodes with a failed contact are ordered (oldest failure first) + /// - The final result is the concatenation of (1), (2) and (3) + fn ordered_entries(&self) -> Vec<&Node> { + let mut success = Vec::new(); + let mut failures = Vec::new(); + let mut unknown = Vec::new(); - let nodes = self.nodes.values() - .filter(|n| !self.useless_nodes.contains(&n.id)); + let nodes = self + .nodes + .values() + .filter(|n| !self.useless_nodes.contains(&n.id)); - for node in nodes { - // discard contact points older that aren't recent - match node.last_contact.as_ref().and_then(|c| c.recent()) { - Some(&NodeContact::Success(_)) => { - success.push(node); - }, - Some(&NodeContact::Failure(_)) => { - failures.push(node); - }, - None => { - unknown.push(node); - }, - } - } + for node in nodes { + // discard contact points older that aren't recent + match node.last_contact.as_ref().and_then(|c| c.recent()) { + Some(&NodeContact::Success(_)) => { + success.push(node); + } + Some(&NodeContact::Failure(_)) => { + failures.push(node); + } + None => { + unknown.push(node); + } + } + } - success.sort_by(|a, b| { - let a = a.last_contact.expect("vector only contains values with defined last_contact; qed"); - let b = b.last_contact.expect("vector only contains values with defined last_contact; qed"); - // inverse ordering, most recent successes come first - b.time().cmp(&a.time()) - }); + success.sort_by(|a, b| { + let a = a + .last_contact + .expect("vector only contains values with defined last_contact; qed"); + let b = b + .last_contact + .expect("vector only contains values with defined last_contact; qed"); + // inverse ordering, most recent successes come first + b.time().cmp(&a.time()) + }); - failures.sort_by(|a, b| { - let a = a.last_contact.expect("vector only contains values with defined last_contact; qed"); - let b = b.last_contact.expect("vector only contains values with defined last_contact; qed"); - // normal ordering, most distant failures come first - a.time().cmp(&b.time()) - }); + failures.sort_by(|a, b| { + let a = a + .last_contact + .expect("vector only contains values with defined last_contact; qed"); + let b = b + .last_contact + .expect("vector only contains values with defined last_contact; qed"); + // normal ordering, most distant failures come first + a.time().cmp(&b.time()) + }); - rand::thread_rng().shuffle(&mut unknown); + rand::thread_rng().shuffle(&mut unknown); - success.append(&mut unknown); - success.append(&mut failures); - success - } + success.append(&mut unknown); + success.append(&mut failures); + success + } - /// Returns node ids sorted by failure percentage, for nodes with the same failure percentage the absolute number of - /// failures is considered. - pub fn nodes(&self, filter: &IpFilter) -> Vec { - self.ordered_entries().iter() - .filter(|n| n.endpoint.is_allowed(&filter)) - .map(|n| n.id) - .collect() - } + /// Returns node ids sorted by failure percentage, for nodes with the same failure percentage the absolute number of + /// failures is considered. + pub fn nodes(&self, filter: &IpFilter) -> Vec { + self.ordered_entries() + .iter() + .filter(|n| n.endpoint.is_allowed(&filter)) + .map(|n| n.id) + .collect() + } - /// Ordered list of all entries by failure percentage, for nodes with the same failure percentage the absolute - /// number of failures is considered. - pub fn entries(&self) -> Vec { - self.ordered_entries().iter().map(|n| NodeEntry { - endpoint: n.endpoint.clone(), - id: n.id, - }).collect() - } + /// Ordered list of all entries by failure percentage, for nodes with the same failure percentage the absolute + /// number of failures is considered. + pub fn entries(&self) -> Vec { + self.ordered_entries() + .iter() + .map(|n| NodeEntry { + endpoint: n.endpoint.clone(), + id: n.id, + }) + .collect() + } - /// Get particular node - pub fn get_mut(&mut self, id: &NodeId) -> Option<&mut Node> { - self.nodes.get_mut(id) - } + /// Get particular node + pub fn get_mut(&mut self, id: &NodeId) -> Option<&mut Node> { + self.nodes.get_mut(id) + } - /// Check if a node exists in the table. - pub fn contains(&self, id: &NodeId) -> bool { - self.nodes.contains_key(id) - } + /// Check if a node exists in the table. + pub fn contains(&self, id: &NodeId) -> bool { + self.nodes.contains_key(id) + } - /// Apply table changes coming from discovery - pub fn update(&mut self, mut update: TableUpdates, reserved: &HashSet) { - for (_, node) in update.added.drain() { - let entry = self.nodes.entry(node.id).or_insert_with(|| Node::new(node.id, node.endpoint.clone())); - entry.endpoint = node.endpoint; - } - for r in update.removed { - if !reserved.contains(&r) { - self.nodes.remove(&r); - } - } - } + /// Apply table changes coming from discovery + pub fn update(&mut self, mut update: TableUpdates, reserved: &HashSet) { + for (_, node) in update.added.drain() { + let entry = self + .nodes + .entry(node.id) + .or_insert_with(|| Node::new(node.id, node.endpoint.clone())); + entry.endpoint = node.endpoint; + } + for r in update.removed { + if !reserved.contains(&r) { + self.nodes.remove(&r); + } + } + } - /// Set last contact as failure for a node - pub fn note_failure(&mut self, id: &NodeId) { - if let Some(node) = self.nodes.get_mut(id) { - node.last_contact = Some(NodeContact::failure()); - } - } + /// Set last contact as failure for a node + pub fn note_failure(&mut self, id: &NodeId) { + if let Some(node) = self.nodes.get_mut(id) { + node.last_contact = Some(NodeContact::failure()); + } + } - /// Set last contact as success for a node - pub fn note_success(&mut self, id: &NodeId) { - if let Some(node) = self.nodes.get_mut(id) { - node.last_contact = Some(NodeContact::success()); - } - } + /// Set last contact as success for a node + pub fn note_success(&mut self, id: &NodeId) { + if let Some(node) = self.nodes.get_mut(id) { + node.last_contact = Some(NodeContact::success()); + } + } - /// Mark as useless, no further attempts to connect until next call to `clear_useless`. - pub fn mark_as_useless(&mut self, id: &NodeId) { - self.useless_nodes.insert(id.clone()); - } + /// Mark as useless, no further attempts to connect until next call to `clear_useless`. + pub fn mark_as_useless(&mut self, id: &NodeId) { + self.useless_nodes.insert(id.clone()); + } - /// Attempt to connect to useless nodes again. - pub fn clear_useless(&mut self) { - self.useless_nodes.clear(); - } + /// Attempt to connect to useless nodes again. + pub fn clear_useless(&mut self) { + self.useless_nodes.clear(); + } - /// Save the nodes.json file. - pub fn save(&self) { - let mut path = match self.path { - Some(ref path) => PathBuf::from(path), - None => return, - }; - if let Err(e) = fs::create_dir_all(&path) { - warn!(target: "network", "Error creating node table directory: {:?}", e); - return; - } - path.push(NODES_FILE); - let node_ids = self.nodes(&IpFilter::default()); - let nodes = node_ids.into_iter() - .map(|id| self.nodes.get(&id).expect("self.nodes() only returns node IDs from self.nodes")) - .take(MAX_NODES) - .map(Into::into) - .collect(); - let table = json::NodeTable { nodes }; + /// Save the nodes.json file. + pub fn save(&self) { + let mut path = match self.path { + Some(ref path) => PathBuf::from(path), + None => return, + }; + if let Err(e) = fs::create_dir_all(&path) { + warn!(target: "network", "Error creating node table directory: {:?}", e); + return; + } + path.push(NODES_FILE); + let node_ids = self.nodes(&IpFilter::default()); + let nodes = node_ids + .into_iter() + .map(|id| { + self.nodes + .get(&id) + .expect("self.nodes() only returns node IDs from self.nodes") + }) + .take(MAX_NODES) + .map(Into::into) + .collect(); + let table = json::NodeTable { nodes }; - match fs::File::create(&path) { - Ok(file) => { - if let Err(e) = serde_json::to_writer_pretty(file, &table) { - warn!(target: "network", "Error writing node table file: {:?}", e); - } - }, - Err(e) => { - warn!(target: "network", "Error creating node table file: {:?}", e); - } - } - } + match fs::File::create(&path) { + Ok(file) => { + if let Err(e) = serde_json::to_writer_pretty(file, &table) { + warn!(target: "network", "Error writing node table file: {:?}", e); + } + } + Err(e) => { + warn!(target: "network", "Error creating node table file: {:?}", e); + } + } + } - fn load(path: Option) -> HashMap { - let path = match path { - Some(path) => PathBuf::from(path).join(NODES_FILE), - None => return Default::default(), - }; + fn load(path: Option) -> HashMap { + let path = match path { + Some(path) => PathBuf::from(path).join(NODES_FILE), + None => return Default::default(), + }; - let file = match fs::File::open(&path) { - Ok(file) => file, - Err(e) => { - debug!(target: "network", "Error opening node table file: {:?}", e); - return Default::default(); - }, - }; - let res: Result = serde_json::from_reader(file); - match res { - Ok(table) => { - table.nodes.into_iter() - .filter_map(|n| n.into_node()) - .map(|n| (n.id, n)) - .collect() - }, - Err(e) => { - warn!(target: "network", "Error reading node table file: {:?}", e); - Default::default() - }, - } - } + let file = match fs::File::open(&path) { + Ok(file) => file, + Err(e) => { + debug!(target: "network", "Error opening node table file: {:?}", e); + return Default::default(); + } + }; + let res: Result = serde_json::from_reader(file); + match res { + Ok(table) => table + .nodes + .into_iter() + .filter_map(|n| n.into_node()) + .map(|n| (n.id, n)) + .collect(), + Err(e) => { + warn!(target: "network", "Error reading node table file: {:?}", e); + Default::default() + } + } + } } impl Drop for NodeTable { - fn drop(&mut self) { - self.save(); - } + fn drop(&mut self) { + self.save(); + } } /// Check if node url is valid pub fn validate_node_url(url: &str) -> Option { - match Node::from_str(url) { - Ok(_) => None, - Err(e) => Some(e) - } + match Node::from_str(url) { + Ok(_) => None, + Err(e) => Some(e), + } } mod json { - use super::*; + use super::*; - #[derive(Serialize, Deserialize)] - pub struct NodeTable { - pub nodes: Vec, - } + #[derive(Serialize, Deserialize)] + pub struct NodeTable { + pub nodes: Vec, + } - #[derive(Serialize, Deserialize)] - pub enum NodeContact { - #[serde(rename = "success")] - Success(u64), - #[serde(rename = "failure")] - Failure(u64), - } + #[derive(Serialize, Deserialize)] + pub enum NodeContact { + #[serde(rename = "success")] + Success(u64), + #[serde(rename = "failure")] + Failure(u64), + } - impl NodeContact { - pub fn into_node_contact(self) -> super::NodeContact { - match self { - NodeContact::Success(s) => super::NodeContact::Success( - time::UNIX_EPOCH + Duration::from_secs(s) - ), - NodeContact::Failure(s) => super::NodeContact::Failure( - time::UNIX_EPOCH + Duration::from_secs(s) - ), - } - } - } + impl NodeContact { + pub fn into_node_contact(self) -> super::NodeContact { + match self { + NodeContact::Success(s) => { + super::NodeContact::Success(time::UNIX_EPOCH + Duration::from_secs(s)) + } + NodeContact::Failure(s) => { + super::NodeContact::Failure(time::UNIX_EPOCH + Duration::from_secs(s)) + } + } + } + } - #[derive(Serialize, Deserialize)] - pub struct Node { - pub url: String, - pub last_contact: Option, - } + #[derive(Serialize, Deserialize)] + pub struct Node { + pub url: String, + pub last_contact: Option, + } - impl Node { - pub fn into_node(self) -> Option { - match super::Node::from_str(&self.url) { - Ok(mut node) => { - node.last_contact = self.last_contact.map(|c| c.into_node_contact()); - Some(node) - }, - _ => None, - } - } - } + impl Node { + pub fn into_node(self) -> Option { + match super::Node::from_str(&self.url) { + Ok(mut node) => { + node.last_contact = self.last_contact.map(|c| c.into_node_contact()); + Some(node) + } + _ => None, + } + } + } - impl<'a> From<&'a super::Node> for Node { - fn from(node: &'a super::Node) -> Self { - let last_contact = node.last_contact.and_then(|c| { - match c { - super::NodeContact::Success(t) => - t.duration_since(time::UNIX_EPOCH).ok().map(|d| NodeContact::Success(d.as_secs())), - super::NodeContact::Failure(t) => - t.duration_since(time::UNIX_EPOCH).ok().map(|d| NodeContact::Failure(d.as_secs())), - } - }); + impl<'a> From<&'a super::Node> for Node { + fn from(node: &'a super::Node) -> Self { + let last_contact = node.last_contact.and_then(|c| match c { + super::NodeContact::Success(t) => t + .duration_since(time::UNIX_EPOCH) + .ok() + .map(|d| NodeContact::Success(d.as_secs())), + super::NodeContact::Failure(t) => t + .duration_since(time::UNIX_EPOCH) + .ok() + .map(|d| NodeContact::Failure(d.as_secs())), + }); - Node { - url: format!("{}", node), - last_contact - } - } - } + Node { + url: format!("{}", node), + last_contact, + } + } + } } #[cfg(test)] mod tests { - use super::*; - use std::net::{SocketAddr, SocketAddrV4, Ipv4Addr}; - use ethereum_types::H512; - use std::str::FromStr; - use tempdir::TempDir; - use ipnetwork::IpNetwork; + use super::*; + use ethereum_types::H512; + use ipnetwork::IpNetwork; + use std::{ + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + str::FromStr, + }; + use tempdir::TempDir; - #[test] - fn endpoint_parse() { - let endpoint = NodeEndpoint::from_str("123.99.55.44:7770"); - assert!(endpoint.is_ok()); - let v4 = match endpoint.unwrap().address { - SocketAddr::V4(v4address) => v4address, - _ => panic!("should be v4 address") - }; - assert_eq!(SocketAddrV4::new(Ipv4Addr::new(123, 99, 55, 44), 7770), v4); - } + #[test] + fn endpoint_parse() { + let endpoint = NodeEndpoint::from_str("123.99.55.44:7770"); + assert!(endpoint.is_ok()); + let v4 = match endpoint.unwrap().address { + SocketAddr::V4(v4address) => v4address, + _ => panic!("should be v4 address"), + }; + assert_eq!(SocketAddrV4::new(Ipv4Addr::new(123, 99, 55, 44), 7770), v4); + } - #[test] - fn endpoint_parse_empty_ip_string_returns_error() { - let endpoint = NodeEndpoint::from_str(""); - assert!(endpoint.is_err()); - assert_matches!(endpoint.unwrap_err().kind(), &ErrorKind::AddressParse); - } + #[test] + fn endpoint_parse_empty_ip_string_returns_error() { + let endpoint = NodeEndpoint::from_str(""); + assert!(endpoint.is_err()); + assert_matches!(endpoint.unwrap_err().kind(), &ErrorKind::AddressParse); + } - #[test] - fn endpoint_parse_invalid_ip_string_returns_error() { - let endpoint = NodeEndpoint::from_str("beef"); - assert!(endpoint.is_err()); - assert_matches!(endpoint.unwrap_err().kind(), &ErrorKind::AddressParse); - } + #[test] + fn endpoint_parse_invalid_ip_string_returns_error() { + let endpoint = NodeEndpoint::from_str("beef"); + assert!(endpoint.is_err()); + assert_matches!(endpoint.unwrap_err().kind(), &ErrorKind::AddressParse); + } - #[test] - fn endpoint_parse_valid_ip_without_port_returns_error() { - let endpoint = NodeEndpoint::from_str("123.123.123.123"); - assert!(endpoint.is_err()); - assert_matches!(endpoint.unwrap_err().kind(), &ErrorKind::AddressParse); - let endpoint = NodeEndpoint::from_str("123.123.123.123:123"); - assert!(endpoint.is_ok()) - } + #[test] + fn endpoint_parse_valid_ip_without_port_returns_error() { + let endpoint = NodeEndpoint::from_str("123.123.123.123"); + assert!(endpoint.is_err()); + assert_matches!(endpoint.unwrap_err().kind(), &ErrorKind::AddressParse); + let endpoint = NodeEndpoint::from_str("123.123.123.123:123"); + assert!(endpoint.is_ok()) + } - #[test] - fn node_parse() { - assert!(validate_node_url("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").is_none()); - let node = Node::from_str("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770"); - assert!(node.is_ok()); - let node = node.unwrap(); - let v4 = match node.endpoint.address { - SocketAddr::V4(v4address) => v4address, - _ => panic!("should ve v4 address") - }; - assert_eq!(SocketAddrV4::new(Ipv4Addr::new(22, 99, 55, 44), 7770), v4); - assert_eq!( + #[test] + fn node_parse() { + assert!(validate_node_url("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").is_none()); + let node = Node::from_str("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770"); + assert!(node.is_ok()); + let node = node.unwrap(); + let v4 = match node.endpoint.address { + SocketAddr::V4(v4address) => v4address, + _ => panic!("should ve v4 address"), + }; + assert_eq!(SocketAddrV4::new(Ipv4Addr::new(22, 99, 55, 44), 7770), v4); + assert_eq!( H512::from_str("a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(), node.id); - } + } - #[test] - fn node_parse_fails_for_invalid_urls() { - let node = Node::from_str("foo"); - assert!(node.is_err()); - assert_matches!(node.unwrap_err().kind(), &ErrorKind::AddressParse); + #[test] + fn node_parse_fails_for_invalid_urls() { + let node = Node::from_str("foo"); + assert!(node.is_err()); + assert_matches!(node.unwrap_err().kind(), &ErrorKind::AddressParse); - let node = Node::from_str("enode://foo@bar"); - assert!(node.is_err()); - assert_matches!(node.unwrap_err().kind(), &ErrorKind::AddressParse); - } + let node = Node::from_str("enode://foo@bar"); + assert!(node.is_err()); + assert_matches!(node.unwrap_err().kind(), &ErrorKind::AddressParse); + } - #[test] - fn table_last_contact_order() { - let node1 = Node::from_str("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); - let node2 = Node::from_str("enode://b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); - let node3 = Node::from_str("enode://c979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); - let node4 = Node::from_str("enode://d979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); - let node5 = Node::from_str("enode://e979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); - let node6 = Node::from_str("enode://f979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); - let id1 = H512::from_str("a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); - let id2 = H512::from_str("b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); - let id3 = H512::from_str("c979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); - let id4 = H512::from_str("d979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); - let id5 = H512::from_str("e979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); - let id6 = H512::from_str("f979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); - let mut table = NodeTable::new(None); + #[test] + fn table_last_contact_order() { + let node1 = Node::from_str("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); + let node2 = Node::from_str("enode://b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); + let node3 = Node::from_str("enode://c979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); + let node4 = Node::from_str("enode://d979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); + let node5 = Node::from_str("enode://e979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); + let node6 = Node::from_str("enode://f979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); + let id1 = H512::from_str("a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); + let id2 = H512::from_str("b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); + let id3 = H512::from_str("c979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); + let id4 = H512::from_str("d979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); + let id5 = H512::from_str("e979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); + let id6 = H512::from_str("f979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); + let mut table = NodeTable::new(None); - table.add_node(node1); - table.add_node(node2); - table.add_node(node3); - table.add_node(node4); - table.add_node(node5); - table.add_node(node6); + table.add_node(node1); + table.add_node(node2); + table.add_node(node3); + table.add_node(node4); + table.add_node(node5); + table.add_node(node6); - // failures - nodes 1 & 2 - table.note_failure(&id1); - table.note_failure(&id2); + // failures - nodes 1 & 2 + table.note_failure(&id1); + table.note_failure(&id2); - // success - nodes 3 & 4 - table.note_success(&id3); - table.note_success(&id4); + // success - nodes 3 & 4 + table.note_success(&id3); + table.note_success(&id4); - // success - node 5 (old contact) - table.get_mut(&id5).unwrap().last_contact = Some(NodeContact::Success(time::UNIX_EPOCH)); + // success - node 5 (old contact) + table.get_mut(&id5).unwrap().last_contact = Some(NodeContact::Success(time::UNIX_EPOCH)); - // unknown - node 6 + // unknown - node 6 - // nodes are also ordered according to their addition time - // - // nanosecond precision lost since mac os x high sierra update so let's not compare their order - // https://github.com/paritytech/parity-ethereum/issues/9632 - let r = table.nodes(&IpFilter::default()); + // nodes are also ordered according to their addition time + // + // nanosecond precision lost since mac os x high sierra update so let's not compare their order + // https://github.com/paritytech/parity-ethereum/issues/9632 + let r = table.nodes(&IpFilter::default()); - // most recent success - assert!( - (r[0] == id4 && r[1] == id3) || - (r[0] == id3 && r[1] == id4) - ); + // most recent success + assert!((r[0] == id4 && r[1] == id3) || (r[0] == id3 && r[1] == id4)); - // unknown (old contacts and new nodes), randomly shuffled - assert!( - (r[2] == id5 && r[3] == id6) || - (r[2] == id6 && r[3] == id5) - ); + // unknown (old contacts and new nodes), randomly shuffled + assert!((r[2] == id5 && r[3] == id6) || (r[2] == id6 && r[3] == id5)); - // oldest failure - assert!( - (r[4] == id1 && r[5] == id2) || - (r[4] == id2 && r[5] == id1) - ); - } + // oldest failure + assert!((r[4] == id1 && r[5] == id2) || (r[4] == id2 && r[5] == id1)); + } - #[test] - fn table_save_load() { - let tempdir = TempDir::new("").unwrap(); - let node1 = Node::from_str("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); - let node2 = Node::from_str("enode://b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); - let node3 = Node::from_str("enode://c979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); - let id1 = H512::from_str("a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); - let id2 = H512::from_str("b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); - let id3 = H512::from_str("c979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); + #[test] + fn table_save_load() { + let tempdir = TempDir::new("").unwrap(); + let node1 = Node::from_str("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); + let node2 = Node::from_str("enode://b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); + let node3 = Node::from_str("enode://c979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); + let id1 = H512::from_str("a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); + let id2 = H512::from_str("b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); + let id3 = H512::from_str("c979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); - { - let mut table = NodeTable::new(Some(tempdir.path().to_str().unwrap().to_owned())); - table.add_node(node1); - table.add_node(node2); - table.add_node(node3); + { + let mut table = NodeTable::new(Some(tempdir.path().to_str().unwrap().to_owned())); + table.add_node(node1); + table.add_node(node2); + table.add_node(node3); - table.note_success(&id2); - table.note_failure(&id3); - } + table.note_success(&id2); + table.note_failure(&id3); + } - { - let table = NodeTable::new(Some(tempdir.path().to_str().unwrap().to_owned())); - let r = table.nodes(&IpFilter::default()); - assert_eq!(r[0][..], id2[..]); // latest success - assert_eq!(r[1][..], id1[..]); // unknown - assert_eq!(r[2][..], id3[..]); // oldest failure - } - } + { + let table = NodeTable::new(Some(tempdir.path().to_str().unwrap().to_owned())); + let r = table.nodes(&IpFilter::default()); + assert_eq!(r[0][..], id2[..]); // latest success + assert_eq!(r[1][..], id1[..]); // unknown + assert_eq!(r[2][..], id3[..]); // oldest failure + } + } - #[test] - fn custom_allow() { - let filter = IpFilter { - predefined: AllowIP::None, - custom_allow: vec![IpNetwork::from_str(&"10.0.0.0/8").unwrap(), IpNetwork::from_str(&"1.0.0.0/8").unwrap()], - custom_block: vec![], - }; - assert!(!NodeEndpoint::from_str("123.99.55.44:7770").unwrap().is_allowed(&filter)); - assert!(NodeEndpoint::from_str("10.0.0.1:7770").unwrap().is_allowed(&filter)); - assert!(NodeEndpoint::from_str("1.0.0.55:5550").unwrap().is_allowed(&filter)); - } + #[test] + fn custom_allow() { + let filter = IpFilter { + predefined: AllowIP::None, + custom_allow: vec![ + IpNetwork::from_str(&"10.0.0.0/8").unwrap(), + IpNetwork::from_str(&"1.0.0.0/8").unwrap(), + ], + custom_block: vec![], + }; + assert!(!NodeEndpoint::from_str("123.99.55.44:7770") + .unwrap() + .is_allowed(&filter)); + assert!(NodeEndpoint::from_str("10.0.0.1:7770") + .unwrap() + .is_allowed(&filter)); + assert!(NodeEndpoint::from_str("1.0.0.55:5550") + .unwrap() + .is_allowed(&filter)); + } - #[test] - fn custom_block() { - let filter = IpFilter { - predefined: AllowIP::All, - custom_allow: vec![], - custom_block: vec![IpNetwork::from_str(&"10.0.0.0/8").unwrap(), IpNetwork::from_str(&"1.0.0.0/8").unwrap()], - }; - assert!(NodeEndpoint::from_str("123.99.55.44:7770").unwrap().is_allowed(&filter)); - assert!(!NodeEndpoint::from_str("10.0.0.1:7770").unwrap().is_allowed(&filter)); - assert!(!NodeEndpoint::from_str("1.0.0.55:5550").unwrap().is_allowed(&filter)); - } + #[test] + fn custom_block() { + let filter = IpFilter { + predefined: AllowIP::All, + custom_allow: vec![], + custom_block: vec![ + IpNetwork::from_str(&"10.0.0.0/8").unwrap(), + IpNetwork::from_str(&"1.0.0.0/8").unwrap(), + ], + }; + assert!(NodeEndpoint::from_str("123.99.55.44:7770") + .unwrap() + .is_allowed(&filter)); + assert!(!NodeEndpoint::from_str("10.0.0.1:7770") + .unwrap() + .is_allowed(&filter)); + assert!(!NodeEndpoint::from_str("1.0.0.55:5550") + .unwrap() + .is_allowed(&filter)); + } - #[test] - fn custom_allow_ipv6() { - let filter = IpFilter { - predefined: AllowIP::None, - custom_allow: vec![IpNetwork::from_str(&"fc00::/8").unwrap()], - custom_block: vec![], - }; - assert!(NodeEndpoint::from_str("[fc00::]:5550").unwrap().is_allowed(&filter)); - assert!(!NodeEndpoint::from_str("[fd00::]:5550").unwrap().is_allowed(&filter)); - } + #[test] + fn custom_allow_ipv6() { + let filter = IpFilter { + predefined: AllowIP::None, + custom_allow: vec![IpNetwork::from_str(&"fc00::/8").unwrap()], + custom_block: vec![], + }; + assert!(NodeEndpoint::from_str("[fc00::]:5550") + .unwrap() + .is_allowed(&filter)); + assert!(!NodeEndpoint::from_str("[fd00::]:5550") + .unwrap() + .is_allowed(&filter)); + } - #[test] - fn custom_block_ipv6() { - let filter = IpFilter { - predefined: AllowIP::All, - custom_allow: vec![], - custom_block: vec![IpNetwork::from_str(&"fc00::/8").unwrap()], - }; - assert!(!NodeEndpoint::from_str("[fc00::]:5550").unwrap().is_allowed(&filter)); - assert!(NodeEndpoint::from_str("[fd00::]:5550").unwrap().is_allowed(&filter)); - } + #[test] + fn custom_block_ipv6() { + let filter = IpFilter { + predefined: AllowIP::All, + custom_allow: vec![], + custom_block: vec![IpNetwork::from_str(&"fc00::/8").unwrap()], + }; + assert!(!NodeEndpoint::from_str("[fc00::]:5550") + .unwrap() + .is_allowed(&filter)); + assert!(NodeEndpoint::from_str("[fd00::]:5550") + .unwrap() + .is_allowed(&filter)); + } } diff --git a/util/network-devp2p/src/service.rs b/util/network-devp2p/src/service.rs index dfacec4be..19e128003 100644 --- a/util/network-devp2p/src/service.rs +++ b/util/network-devp2p/src/service.rs @@ -14,182 +14,201 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use network::{Error, NetworkConfiguration, NetworkProtocolHandler, NonReservedPeerMode}; -use network::{NetworkContext, PeerId, ProtocolId, NetworkIoMessage}; +use ansi_term::Colour; use host::Host; use io::*; +use network::{ + ConnectionFilter, Error, NetworkConfiguration, NetworkContext, NetworkIoMessage, + NetworkProtocolHandler, NonReservedPeerMode, PeerId, ProtocolId, +}; use parking_lot::RwLock; -use std::net::SocketAddr; -use std::ops::RangeInclusive; -use std::sync::Arc; -use ansi_term::Colour; -use network::ConnectionFilter; +use std::{net::SocketAddr, ops::RangeInclusive, sync::Arc}; struct HostHandler { - public_url: RwLock> + public_url: RwLock>, } impl IoHandler for HostHandler { - fn message(&self, _io: &IoContext, message: &NetworkIoMessage) { - if let NetworkIoMessage::NetworkStarted(ref public_url) = *message { - let mut url = self.public_url.write(); - if url.as_ref().map_or(true, |uref| uref != public_url) { - info!(target: "network", "Public node URL: {}", Colour::White.bold().paint(AsRef::::as_ref(public_url))); - } - *url = Some(public_url.to_owned()); - } - } + fn message(&self, _io: &IoContext, message: &NetworkIoMessage) { + if let NetworkIoMessage::NetworkStarted(ref public_url) = *message { + let mut url = self.public_url.write(); + if url.as_ref().map_or(true, |uref| uref != public_url) { + info!(target: "network", "Public node URL: {}", Colour::White.bold().paint(AsRef::::as_ref(public_url))); + } + *url = Some(public_url.to_owned()); + } + } } /// IO Service with networking /// `Message` defines a notification data type. pub struct NetworkService { - io_service: IoService, - host_info: String, - host: RwLock>>, - host_handler: Arc, - config: NetworkConfiguration, - filter: Option>, + io_service: IoService, + host_info: String, + host: RwLock>>, + host_handler: Arc, + config: NetworkConfiguration, + filter: Option>, } impl NetworkService { - /// Starts IO event loop - pub fn new(config: NetworkConfiguration, filter: Option>) -> Result { - let host_handler = Arc::new(HostHandler { public_url: RwLock::new(None) }); - let io_service = IoService::::start()?; + /// Starts IO event loop + pub fn new( + config: NetworkConfiguration, + filter: Option>, + ) -> Result { + let host_handler = Arc::new(HostHandler { + public_url: RwLock::new(None), + }); + let io_service = IoService::::start()?; - Ok(NetworkService { - io_service, - host_info: config.client_version.clone(), - host: RwLock::new(None), - config, - host_handler, - filter, - }) - } + Ok(NetworkService { + io_service, + host_info: config.client_version.clone(), + host: RwLock::new(None), + config, + host_handler, + filter, + }) + } - /// Register a new protocol handler with the event loop. - pub fn register_protocol( - &self, - handler: Arc, - protocol: ProtocolId, - // version id + packet count - versions: &[(u8, u8)] - ) -> Result<(), Error> { - self.io_service.send_message(NetworkIoMessage::AddHandler { - handler, - protocol, - versions: versions.to_vec(), - })?; - Ok(()) - } + /// Register a new protocol handler with the event loop. + pub fn register_protocol( + &self, + handler: Arc, + protocol: ProtocolId, + // version id + packet count + versions: &[(u8, u8)], + ) -> Result<(), Error> { + self.io_service.send_message(NetworkIoMessage::AddHandler { + handler, + protocol, + versions: versions.to_vec(), + })?; + Ok(()) + } - /// Returns host identifier string as advertised to other peers - pub fn host_info(&self) -> String { - self.host_info.clone() - } + /// Returns host identifier string as advertised to other peers + pub fn host_info(&self) -> String { + self.host_info.clone() + } - /// Returns underlying io service. - pub fn io(&self) -> &IoService { - &self.io_service - } + /// Returns underlying io service. + pub fn io(&self) -> &IoService { + &self.io_service + } - /// Returns the number of peers allowed. - pub fn num_peers_range(&self) -> RangeInclusive { - self.config.min_peers..=self.config.max_peers - } + /// Returns the number of peers allowed. + pub fn num_peers_range(&self) -> RangeInclusive { + self.config.min_peers..=self.config.max_peers + } - /// Returns external url if available. - pub fn external_url(&self) -> Option { - let host = self.host.read(); - host.as_ref().and_then(|h| h.external_url()) - } + /// Returns external url if available. + pub fn external_url(&self) -> Option { + let host = self.host.read(); + host.as_ref().and_then(|h| h.external_url()) + } - /// Returns external url if available. - pub fn local_url(&self) -> Option { - let host = self.host.read(); - host.as_ref().map(|h| h.local_url()) - } + /// Returns external url if available. + pub fn local_url(&self) -> Option { + let host = self.host.read(); + host.as_ref().map(|h| h.local_url()) + } - /// Start network IO. - /// - /// In case of error, also returns the listening address for better error reporting. - pub fn start(&self) -> Result<(), (Error, Option)> { - let mut host = self.host.write(); - let listen_addr = self.config.listen_address; - if host.is_none() { - let h = Arc::new(Host::new(self.config.clone(), self.filter.clone()) - .map_err(|err| (err, listen_addr))?); - self.io_service.register_handler(h.clone()) - .map_err(|err| (err.into(), listen_addr))?; - *host = Some(h); - } + /// Start network IO. + /// + /// In case of error, also returns the listening address for better error reporting. + pub fn start(&self) -> Result<(), (Error, Option)> { + let mut host = self.host.write(); + let listen_addr = self.config.listen_address; + if host.is_none() { + let h = Arc::new( + Host::new(self.config.clone(), self.filter.clone()) + .map_err(|err| (err, listen_addr))?, + ); + self.io_service + .register_handler(h.clone()) + .map_err(|err| (err.into(), listen_addr))?; + *host = Some(h); + } - if self.host_handler.public_url.read().is_none() { - self.io_service.register_handler(self.host_handler.clone()) - .map_err(|err| (err.into(), listen_addr))?; - } + if self.host_handler.public_url.read().is_none() { + self.io_service + .register_handler(self.host_handler.clone()) + .map_err(|err| (err.into(), listen_addr))?; + } - Ok(()) - } + Ok(()) + } - /// Stop network IO. - pub fn stop(&self) { - let mut host = self.host.write(); - if let Some(ref host) = *host { - let io = IoContext::new(self.io_service.channel(), 0); //TODO: take token id from host - host.stop(&io); - } - *host = None; - } + /// Stop network IO. + pub fn stop(&self) { + let mut host = self.host.write(); + if let Some(ref host) = *host { + let io = IoContext::new(self.io_service.channel(), 0); //TODO: take token id from host + host.stop(&io); + } + *host = None; + } - /// Get a list of all connected peers by id. - pub fn connected_peers(&self) -> Vec { - self.host.read().as_ref().map(|h| h.connected_peers()).unwrap_or_else(Vec::new) - } + /// Get a list of all connected peers by id. + pub fn connected_peers(&self) -> Vec { + self.host + .read() + .as_ref() + .map(|h| h.connected_peers()) + .unwrap_or_else(Vec::new) + } - /// Try to add a reserved peer. - pub fn add_reserved_peer(&self, peer: &str) -> Result<(), Error> { - let host = self.host.read(); - if let Some(ref host) = *host { - host.add_reserved_node(peer) - } else { - Ok(()) - } - } + /// Try to add a reserved peer. + pub fn add_reserved_peer(&self, peer: &str) -> Result<(), Error> { + let host = self.host.read(); + if let Some(ref host) = *host { + host.add_reserved_node(peer) + } else { + Ok(()) + } + } - /// Try to remove a reserved peer. - pub fn remove_reserved_peer(&self, peer: &str) -> Result<(), Error> { - let host = self.host.read(); - if let Some(ref host) = *host { - host.remove_reserved_node(peer) - } else { - Ok(()) - } - } + /// Try to remove a reserved peer. + pub fn remove_reserved_peer(&self, peer: &str) -> Result<(), Error> { + let host = self.host.read(); + if let Some(ref host) = *host { + host.remove_reserved_node(peer) + } else { + Ok(()) + } + } - /// Set the non-reserved peer mode. - pub fn set_non_reserved_mode(&self, mode: NonReservedPeerMode) { - let host = self.host.read(); - if let Some(ref host) = *host { - let io_ctxt = IoContext::new(self.io_service.channel(), 0); - host.set_non_reserved_mode(mode, &io_ctxt); - } - } + /// Set the non-reserved peer mode. + pub fn set_non_reserved_mode(&self, mode: NonReservedPeerMode) { + let host = self.host.read(); + if let Some(ref host) = *host { + let io_ctxt = IoContext::new(self.io_service.channel(), 0); + host.set_non_reserved_mode(mode, &io_ctxt); + } + } - /// Executes action in the network context - pub fn with_context(&self, protocol: ProtocolId, action: F) where F: FnOnce(&NetworkContext) { - let io = IoContext::new(self.io_service.channel(), 0); - let host = self.host.read(); - if let Some(ref host) = host.as_ref() { - host.with_context(protocol, &io, action); - }; - } + /// Executes action in the network context + pub fn with_context(&self, protocol: ProtocolId, action: F) + where + F: FnOnce(&NetworkContext), + { + let io = IoContext::new(self.io_service.channel(), 0); + let host = self.host.read(); + if let Some(ref host) = host.as_ref() { + host.with_context(protocol, &io, action); + }; + } - /// Evaluates function in the network context - pub fn with_context_eval(&self, protocol: ProtocolId, action: F) -> Option where F: FnOnce(&NetworkContext) -> T { - let io = IoContext::new(self.io_service.channel(), 0); - let host = self.host.read(); - host.as_ref().map(|ref host| host.with_context_eval(protocol, &io, action)) - } + /// Evaluates function in the network context + pub fn with_context_eval(&self, protocol: ProtocolId, action: F) -> Option + where + F: FnOnce(&NetworkContext) -> T, + { + let io = IoContext::new(self.io_service.channel(), 0); + let host = self.host.read(); + host.as_ref() + .map(|ref host| host.with_context_eval(protocol, &io, action)) + } } diff --git a/util/network-devp2p/src/session.rs b/util/network-devp2p/src/session.rs index 6cecaf361..e1fb6ce75 100644 --- a/util/network-devp2p/src/session.rs +++ b/util/network-devp2p/src/session.rs @@ -14,24 +14,30 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::{str, io}; -use std::net::SocketAddr; -use std::collections::HashMap; -use std::time::{Duration, Instant}; +use std::{ + collections::HashMap, + io, + net::SocketAddr, + str, + time::{Duration, Instant}, +}; -use mio::*; -use mio::deprecated::{Handler, EventLoop}; -use mio::tcp::*; +use connection::{Connection, EncryptedConnection, Packet, MAX_PAYLOAD_SIZE}; use ethereum_types::H256; -use rlp::{Rlp, RlpStream, EMPTY_LIST_RLP}; -use connection::{EncryptedConnection, Packet, Connection, MAX_PAYLOAD_SIZE}; use handshake::Handshake; -use io::{IoContext, StreamToken}; -use network::{Error, ErrorKind, DisconnectReason, SessionInfo, ProtocolId, PeerCapabilityInfo}; -use network::SessionCapabilityInfo; -use network::client_version::ClientVersion; use host::*; +use io::{IoContext, StreamToken}; +use mio::{ + deprecated::{EventLoop, Handler}, + tcp::*, + *, +}; +use network::{ + client_version::ClientVersion, DisconnectReason, Error, ErrorKind, PeerCapabilityInfo, + ProtocolId, SessionCapabilityInfo, SessionInfo, +}; use node_table::NodeId; +use rlp::{Rlp, RlpStream, EMPTY_LIST_RLP}; use snappy; // Timeout must be less than (interval - 1). @@ -42,51 +48,51 @@ const MIN_COMPRESSION_PROTOCOL_VERSION: u32 = 5; #[derive(Debug, Clone)] enum ProtocolState { - // Packets pending protocol on_connect event return. - Pending(Vec<(Vec, u8)>), - // Protocol connected. - Connected, + // Packets pending protocol on_connect event return. + Pending(Vec<(Vec, u8)>), + // Protocol connected. + Connected, } /// Peer session over encrypted connection. /// When created waits for Hello packet exchange and signals ready state. /// Sends and receives protocol packets and handles basic packets such as ping/pong and disconnect. pub struct Session { - /// Shared session information - pub info: SessionInfo, - /// Session ready flag. Set after successful Hello packet exchange - had_hello: bool, - /// Session is no longer active flag. - expired: bool, - ping_time: Instant, - pong_time: Option, - state: State, - // Protocol states -- accumulates pending packets until signaled as ready. - protocol_states: HashMap, - compression: bool, + /// Shared session information + pub info: SessionInfo, + /// Session ready flag. Set after successful Hello packet exchange + had_hello: bool, + /// Session is no longer active flag. + expired: bool, + ping_time: Instant, + pong_time: Option, + state: State, + // Protocol states -- accumulates pending packets until signaled as ready. + protocol_states: HashMap, + compression: bool, } enum State { - Handshake(Handshake), - Session(EncryptedConnection), + Handshake(Handshake), + Session(EncryptedConnection), } /// Structure used to report various session events. pub enum SessionData { - None, - /// Session is ready to send/receive packets. - Ready, - /// A packet has been received - Packet { - /// Packet data - data: Vec, - /// Packet protocol ID - protocol: [u8; 3], - /// Zero based packet ID - packet_id: u8, - }, - /// Session has more data to be read - Continue, + None, + /// Session is ready to send/receive packets. + Ready, + /// A packet has been received + Packet { + /// Packet data + data: Vec, + /// Packet protocol ID + protocol: [u8; 3], + /// Zero based packet ID + packet_id: u8, + }, + /// Session has more data to be read + Continue, } const PACKET_HELLO: u8 = 0x80; @@ -99,418 +105,543 @@ const PACKET_USER: u8 = 0x10; const PACKET_LAST: u8 = 0x7f; impl Session { - /// Create a new session out of completed handshake. This clones the handshake connection object - /// and leaves the handshake in limbo to be de-registered from the event loop. - pub fn new(io: &IoContext, socket: TcpStream, token: StreamToken, id: Option<&NodeId>, - nonce: &H256, host: &HostInfo) -> Result - where Message: Send + Clone + Sync + 'static { - let originated = id.is_some(); - let mut handshake = Handshake::new(token, id, socket, nonce).expect("Can't create handshake"); - let local_addr = handshake.connection.local_addr_str(); - handshake.start(io, host, originated)?; - Ok(Session { - state: State::Handshake(handshake), - had_hello: false, - info: SessionInfo { - id: id.cloned(), - client_version: ClientVersion::from(""), - protocol_version: 0, - capabilities: Vec::new(), - peer_capabilities: Vec::new(), - ping: None, - originated, - remote_address: "Handshake".to_owned(), - local_address: local_addr, - }, - ping_time: Instant::now(), - pong_time: None, - expired: false, - protocol_states: HashMap::new(), - compression: false, - }) - } + /// Create a new session out of completed handshake. This clones the handshake connection object + /// and leaves the handshake in limbo to be de-registered from the event loop. + pub fn new( + io: &IoContext, + socket: TcpStream, + token: StreamToken, + id: Option<&NodeId>, + nonce: &H256, + host: &HostInfo, + ) -> Result + where + Message: Send + Clone + Sync + 'static, + { + let originated = id.is_some(); + let mut handshake = + Handshake::new(token, id, socket, nonce).expect("Can't create handshake"); + let local_addr = handshake.connection.local_addr_str(); + handshake.start(io, host, originated)?; + Ok(Session { + state: State::Handshake(handshake), + had_hello: false, + info: SessionInfo { + id: id.cloned(), + client_version: ClientVersion::from(""), + protocol_version: 0, + capabilities: Vec::new(), + peer_capabilities: Vec::new(), + ping: None, + originated, + remote_address: "Handshake".to_owned(), + local_address: local_addr, + }, + ping_time: Instant::now(), + pong_time: None, + expired: false, + protocol_states: HashMap::new(), + compression: false, + }) + } - fn complete_handshake(&mut self, io: &IoContext, host: &HostInfo) -> Result<(), Error> where Message: Send + Sync + Clone { - let connection = if let State::Handshake(ref mut h) = self.state { - self.info.id = Some(h.id); - self.info.remote_address = h.connection.remote_addr_str(); - EncryptedConnection::new(h)? - } else { - panic!("Unexpected state"); - }; - self.state = State::Session(connection); - self.write_hello(io, host)?; - Ok(()) - } + fn complete_handshake( + &mut self, + io: &IoContext, + host: &HostInfo, + ) -> Result<(), Error> + where + Message: Send + Sync + Clone, + { + let connection = if let State::Handshake(ref mut h) = self.state { + self.info.id = Some(h.id); + self.info.remote_address = h.connection.remote_addr_str(); + EncryptedConnection::new(h)? + } else { + panic!("Unexpected state"); + }; + self.state = State::Session(connection); + self.write_hello(io, host)?; + Ok(()) + } - fn connection(&self) -> &Connection { - match self.state { - State::Handshake(ref h) => &h.connection, - State::Session(ref s) => &s.connection, - } - } + fn connection(&self) -> &Connection { + match self.state { + State::Handshake(ref h) => &h.connection, + State::Session(ref s) => &s.connection, + } + } - /// Get id of the remote peer - pub fn id(&self) -> Option<&NodeId> { - self.info.id.as_ref() - } + /// Get id of the remote peer + pub fn id(&self) -> Option<&NodeId> { + self.info.id.as_ref() + } - /// Check if session is ready to send/receive data - pub fn is_ready(&self) -> bool { - self.had_hello - } + /// Check if session is ready to send/receive data + pub fn is_ready(&self) -> bool { + self.had_hello + } - /// Mark this session as inactive to be deleted lated. - pub fn set_expired(&mut self) { - self.expired = true; - } + /// Mark this session as inactive to be deleted lated. + pub fn set_expired(&mut self) { + self.expired = true; + } - /// Check if this session is expired. - pub fn expired(&self) -> bool { - self.expired - } + /// Check if this session is expired. + pub fn expired(&self) -> bool { + self.expired + } - /// Check if this session is over and there is nothing to be sent. - pub fn done(&self) -> bool { - self.expired() && !self.connection().is_sending() - } + /// Check if this session is over and there is nothing to be sent. + pub fn done(&self) -> bool { + self.expired() && !self.connection().is_sending() + } - /// Get remote peer address - pub fn remote_addr(&self) -> io::Result { - self.connection().remote_addr() - } + /// Get remote peer address + pub fn remote_addr(&self) -> io::Result { + self.connection().remote_addr() + } - /// Readable IO handler. Returns packet data if available. - pub fn readable(&mut self, io: &IoContext, host: &HostInfo) -> Result where Message: Send + Sync + Clone { - if self.expired() { - return Ok(SessionData::None) - } - let mut create_session = false; - let mut packet_data = None; - match self.state { - State::Handshake(ref mut h) => { - h.readable(io, host)?; - if h.done() { - create_session = true; - } - } - State::Session(ref mut c) => { - match c.readable(io)? { - data @ Some(_) => packet_data = data, - None => return Ok(SessionData::None) - } - } - } - if let Some(data) = packet_data { - return Ok(self.read_packet(io, &data, host)?); - } - if create_session { - self.complete_handshake(io, host)?; - io.update_registration(self.token()).unwrap_or_else(|e| debug!(target: "network", "Token registration error: {:?}", e)); - } - Ok(SessionData::None) - } + /// Readable IO handler. Returns packet data if available. + pub fn readable( + &mut self, + io: &IoContext, + host: &HostInfo, + ) -> Result + where + Message: Send + Sync + Clone, + { + if self.expired() { + return Ok(SessionData::None); + } + let mut create_session = false; + let mut packet_data = None; + match self.state { + State::Handshake(ref mut h) => { + h.readable(io, host)?; + if h.done() { + create_session = true; + } + } + State::Session(ref mut c) => match c.readable(io)? { + data @ Some(_) => packet_data = data, + None => return Ok(SessionData::None), + }, + } + if let Some(data) = packet_data { + return Ok(self.read_packet(io, &data, host)?); + } + if create_session { + self.complete_handshake(io, host)?; + io.update_registration(self.token()) + .unwrap_or_else(|e| debug!(target: "network", "Token registration error: {:?}", e)); + } + Ok(SessionData::None) + } - /// Writable IO handler. Sends pending packets. - pub fn writable(&mut self, io: &IoContext, _host: &HostInfo) -> Result<(), Error> where Message: Send + Sync + Clone { - match self.state { - State::Handshake(ref mut h) => h.writable(io), - State::Session(ref mut s) => s.writable(io), - } - } + /// Writable IO handler. Sends pending packets. + pub fn writable( + &mut self, + io: &IoContext, + _host: &HostInfo, + ) -> Result<(), Error> + where + Message: Send + Sync + Clone, + { + match self.state { + State::Handshake(ref mut h) => h.writable(io), + State::Session(ref mut s) => s.writable(io), + } + } - /// Checks if peer supports given capability - pub fn have_capability(&self, protocol: [u8; 3]) -> bool { - self.info.capabilities.iter().any(|c| c.protocol == protocol) - } + /// Checks if peer supports given capability + pub fn have_capability(&self, protocol: [u8; 3]) -> bool { + self.info + .capabilities + .iter() + .any(|c| c.protocol == protocol) + } - /// Checks if peer supports given capability - pub fn capability_version(&self, protocol: [u8; 3]) -> Option { - self.info.capabilities.iter().filter_map(|c| if c.protocol == protocol { Some(c.version) } else { None }).max() - } + /// Checks if peer supports given capability + pub fn capability_version(&self, protocol: [u8; 3]) -> Option { + self.info + .capabilities + .iter() + .filter_map(|c| { + if c.protocol == protocol { + Some(c.version) + } else { + None + } + }) + .max() + } - /// Register the session socket with the event loop - pub fn register_socket>(&self, reg: Token, event_loop: &mut EventLoop) -> Result<(), Error> { - if self.expired() { - return Ok(()); - } - self.connection().register_socket(reg, event_loop)?; - Ok(()) - } + /// Register the session socket with the event loop + pub fn register_socket>( + &self, + reg: Token, + event_loop: &mut EventLoop, + ) -> Result<(), Error> { + if self.expired() { + return Ok(()); + } + self.connection().register_socket(reg, event_loop)?; + Ok(()) + } - /// Update registration with the event loop. Should be called at the end of the IO handler. - pub fn update_socket(&self, reg:Token, event_loop: &mut EventLoop) -> Result<(), Error> { - self.connection().update_socket(reg, event_loop)?; - Ok(()) - } + /// Update registration with the event loop. Should be called at the end of the IO handler. + pub fn update_socket( + &self, + reg: Token, + event_loop: &mut EventLoop, + ) -> Result<(), Error> { + self.connection().update_socket(reg, event_loop)?; + Ok(()) + } - /// Delete registration - pub fn deregister_socket(&self, event_loop: &mut EventLoop) -> Result<(), Error> { - self.connection().deregister_socket(event_loop)?; - Ok(()) - } + /// Delete registration + pub fn deregister_socket( + &self, + event_loop: &mut EventLoop, + ) -> Result<(), Error> { + self.connection().deregister_socket(event_loop)?; + Ok(()) + } - /// Send a protocol packet to peer. - pub fn send_packet(&mut self, io: &IoContext, protocol: Option<[u8; 3]>, packet_id: u8, data: &[u8]) -> Result<(), Error> - where Message: Send + Sync + Clone { - if protocol.is_some() && (self.info.capabilities.is_empty() || !self.had_hello) { - debug!(target: "network", "Sending to unconfirmed session {}, protocol: {:?}, packet: {}", self.token(), protocol.as_ref().map(|p| str::from_utf8(&p[..]).unwrap_or("??")), packet_id); - bail!(ErrorKind::BadProtocol); - } - if self.expired() { - return Err(ErrorKind::Expired.into()); - } - let mut i = 0usize; - let pid = match protocol { - Some(protocol) => { - while protocol != self.info.capabilities[i].protocol { - i += 1; - if i == self.info.capabilities.len() { - debug!(target: "network", "Unknown protocol: {:?}", protocol); - return Ok(()) - } - } - self.info.capabilities[i].id_offset + packet_id - }, - None => packet_id - }; - let mut rlp = RlpStream::new(); - rlp.append(&(u32::from(pid))); - let mut compressed = Vec::new(); - let mut payload = data; // create a reference with local lifetime - if self.compression { - if payload.len() > MAX_PAYLOAD_SIZE { - bail!(ErrorKind::OversizedPacket); - } - let len = snappy::compress_into(&payload, &mut compressed); - trace!(target: "network", "compressed {} to {}", payload.len(), len); - payload = &compressed[0..len]; - } - rlp.append_raw(payload, 1); - self.send(io, &rlp.drain()) - } + /// Send a protocol packet to peer. + pub fn send_packet( + &mut self, + io: &IoContext, + protocol: Option<[u8; 3]>, + packet_id: u8, + data: &[u8], + ) -> Result<(), Error> + where + Message: Send + Sync + Clone, + { + if protocol.is_some() && (self.info.capabilities.is_empty() || !self.had_hello) { + debug!(target: "network", "Sending to unconfirmed session {}, protocol: {:?}, packet: {}", self.token(), protocol.as_ref().map(|p| str::from_utf8(&p[..]).unwrap_or("??")), packet_id); + bail!(ErrorKind::BadProtocol); + } + if self.expired() { + return Err(ErrorKind::Expired.into()); + } + let mut i = 0usize; + let pid = match protocol { + Some(protocol) => { + while protocol != self.info.capabilities[i].protocol { + i += 1; + if i == self.info.capabilities.len() { + debug!(target: "network", "Unknown protocol: {:?}", protocol); + return Ok(()); + } + } + self.info.capabilities[i].id_offset + packet_id + } + None => packet_id, + }; + let mut rlp = RlpStream::new(); + rlp.append(&(u32::from(pid))); + let mut compressed = Vec::new(); + let mut payload = data; // create a reference with local lifetime + if self.compression { + if payload.len() > MAX_PAYLOAD_SIZE { + bail!(ErrorKind::OversizedPacket); + } + let len = snappy::compress_into(&payload, &mut compressed); + trace!(target: "network", "compressed {} to {}", payload.len(), len); + payload = &compressed[0..len]; + } + rlp.append_raw(payload, 1); + self.send(io, &rlp.drain()) + } - /// Keep this session alive. Returns false if ping timeout happened - pub fn keep_alive(&mut self, io: &IoContext) -> bool where Message: Send + Sync + Clone { - if let State::Handshake(_) = self.state { - return true; - } - let timed_out = if let Some(pong) = self.pong_time { - pong.duration_since(self.ping_time) > PING_TIMEOUT - } else { - self.ping_time.elapsed() > PING_TIMEOUT - }; + /// Keep this session alive. Returns false if ping timeout happened + pub fn keep_alive(&mut self, io: &IoContext) -> bool + where + Message: Send + Sync + Clone, + { + if let State::Handshake(_) = self.state { + return true; + } + let timed_out = if let Some(pong) = self.pong_time { + pong.duration_since(self.ping_time) > PING_TIMEOUT + } else { + self.ping_time.elapsed() > PING_TIMEOUT + }; - if !timed_out && self.ping_time.elapsed() > PING_INTERVAL { - if let Err(e) = self.send_ping(io) { - debug!("Error sending ping message: {:?}", e); - } - } - !timed_out - } + if !timed_out && self.ping_time.elapsed() > PING_INTERVAL { + if let Err(e) = self.send_ping(io) { + debug!("Error sending ping message: {:?}", e); + } + } + !timed_out + } - pub fn token(&self) -> StreamToken { - self.connection().token() - } + pub fn token(&self) -> StreamToken { + self.connection().token() + } - /// Signal that a subprotocol has handled the connection successfully and - /// get all pending packets in order received. - pub fn mark_connected(&mut self, protocol: ProtocolId) -> Vec<(ProtocolId, u8, Vec)> { - match self.protocol_states.insert(protocol, ProtocolState::Connected) { - None => Vec::new(), - Some(ProtocolState::Connected) => { - debug!(target: "network", "Protocol {:?} marked as connected more than once", protocol); - Vec::new() - } - Some(ProtocolState::Pending(pending)) => - pending.into_iter().map(|(data, id)| (protocol, id, data)).collect(), - } - } + /// Signal that a subprotocol has handled the connection successfully and + /// get all pending packets in order received. + pub fn mark_connected(&mut self, protocol: ProtocolId) -> Vec<(ProtocolId, u8, Vec)> { + match self + .protocol_states + .insert(protocol, ProtocolState::Connected) + { + None => Vec::new(), + Some(ProtocolState::Connected) => { + debug!(target: "network", "Protocol {:?} marked as connected more than once", protocol); + Vec::new() + } + Some(ProtocolState::Pending(pending)) => pending + .into_iter() + .map(|(data, id)| (protocol, id, data)) + .collect(), + } + } - fn read_packet(&mut self, io: &IoContext, packet: &Packet, host: &HostInfo) -> Result - where Message: Send + Sync + Clone { - if packet.data.len() < 2 { - return Err(ErrorKind::BadProtocol.into()); - } - let packet_id = packet.data[0]; - if packet_id != PACKET_HELLO && packet_id != PACKET_DISCONNECT && !self.had_hello { - return Err(ErrorKind::BadProtocol.into()); - } - let data = if self.compression { - let compressed = &packet.data[1..]; - if snappy::decompressed_len(&compressed)? > MAX_PAYLOAD_SIZE { - bail!(ErrorKind::OversizedPacket); - } - snappy::decompress(&compressed)? - } else { - packet.data[1..].to_owned() - }; - match packet_id { - PACKET_HELLO => { - let rlp = Rlp::new(&data); //TODO: validate rlp expected size - self.read_hello(io, &rlp, host)?; - Ok(SessionData::Ready) - }, - PACKET_DISCONNECT => { - let rlp = Rlp::new(&data); - let reason: u8 = rlp.val_at(0)?; - if self.had_hello { - debug!(target:"network", "Disconnected: {}: {:?}", self.token(), DisconnectReason::from_u8(reason)); - } - Err(ErrorKind::Disconnect(DisconnectReason::from_u8(reason)).into()) - } - PACKET_PING => { - self.send_pong(io)?; - Ok(SessionData::Continue) - }, - PACKET_PONG => { - let time = Instant::now(); - self.pong_time = Some(time); - self.info.ping = Some(time.duration_since(self.ping_time)); - Ok(SessionData::Continue) - }, - PACKET_GET_PEERS => Ok(SessionData::None), //TODO; - PACKET_PEERS => Ok(SessionData::None), - PACKET_USER ... PACKET_LAST => { - let mut i = 0usize; - while packet_id >= self.info.capabilities[i].id_offset + self.info.capabilities[i].packet_count { - i += 1; - if i == self.info.capabilities.len() { - debug!(target: "network", "Unknown packet: {:?}", packet_id); - return Ok(SessionData::Continue) - } - } + fn read_packet( + &mut self, + io: &IoContext, + packet: &Packet, + host: &HostInfo, + ) -> Result + where + Message: Send + Sync + Clone, + { + if packet.data.len() < 2 { + return Err(ErrorKind::BadProtocol.into()); + } + let packet_id = packet.data[0]; + if packet_id != PACKET_HELLO && packet_id != PACKET_DISCONNECT && !self.had_hello { + return Err(ErrorKind::BadProtocol.into()); + } + let data = if self.compression { + let compressed = &packet.data[1..]; + if snappy::decompressed_len(&compressed)? > MAX_PAYLOAD_SIZE { + bail!(ErrorKind::OversizedPacket); + } + snappy::decompress(&compressed)? + } else { + packet.data[1..].to_owned() + }; + match packet_id { + PACKET_HELLO => { + let rlp = Rlp::new(&data); //TODO: validate rlp expected size + self.read_hello(io, &rlp, host)?; + Ok(SessionData::Ready) + } + PACKET_DISCONNECT => { + let rlp = Rlp::new(&data); + let reason: u8 = rlp.val_at(0)?; + if self.had_hello { + debug!(target:"network", "Disconnected: {}: {:?}", self.token(), DisconnectReason::from_u8(reason)); + } + Err(ErrorKind::Disconnect(DisconnectReason::from_u8(reason)).into()) + } + PACKET_PING => { + self.send_pong(io)?; + Ok(SessionData::Continue) + } + PACKET_PONG => { + let time = Instant::now(); + self.pong_time = Some(time); + self.info.ping = Some(time.duration_since(self.ping_time)); + Ok(SessionData::Continue) + } + PACKET_GET_PEERS => Ok(SessionData::None), //TODO; + PACKET_PEERS => Ok(SessionData::None), + PACKET_USER...PACKET_LAST => { + let mut i = 0usize; + while packet_id + >= self.info.capabilities[i].id_offset + self.info.capabilities[i].packet_count + { + i += 1; + if i == self.info.capabilities.len() { + debug!(target: "network", "Unknown packet: {:?}", packet_id); + return Ok(SessionData::Continue); + } + } - // map to protocol - let protocol = self.info.capabilities[i].protocol; - let protocol_packet_id = packet_id - self.info.capabilities[i].id_offset; + // map to protocol + let protocol = self.info.capabilities[i].protocol; + let protocol_packet_id = packet_id - self.info.capabilities[i].id_offset; - match *self.protocol_states.entry(protocol).or_insert_with(|| ProtocolState::Pending(Vec::new())) { - ProtocolState::Connected => { - trace!(target: "network", "Packet {} mapped to {:?}:{}, i={}, capabilities={:?}", packet_id, protocol, protocol_packet_id, i, self.info.capabilities); - Ok(SessionData::Packet { data, protocol, packet_id: protocol_packet_id } ) - } - ProtocolState::Pending(ref mut pending) => { - trace!(target: "network", "Packet {} deferred until protocol connection event completion", packet_id); - pending.push((data, protocol_packet_id)); + match *self + .protocol_states + .entry(protocol) + .or_insert_with(|| ProtocolState::Pending(Vec::new())) + { + ProtocolState::Connected => { + trace!(target: "network", "Packet {} mapped to {:?}:{}, i={}, capabilities={:?}", packet_id, protocol, protocol_packet_id, i, self.info.capabilities); + Ok(SessionData::Packet { + data, + protocol, + packet_id: protocol_packet_id, + }) + } + ProtocolState::Pending(ref mut pending) => { + trace!(target: "network", "Packet {} deferred until protocol connection event completion", packet_id); + pending.push((data, protocol_packet_id)); - Ok(SessionData::Continue) - } - } - }, - _ => { - debug!(target: "network", "Unknown packet: {:?}", packet_id); - Ok(SessionData::Continue) - } - } - } + Ok(SessionData::Continue) + } + } + } + _ => { + debug!(target: "network", "Unknown packet: {:?}", packet_id); + Ok(SessionData::Continue) + } + } + } - fn write_hello(&mut self, io: &IoContext, host: &HostInfo) -> Result<(), Error> where Message: Send + Sync + Clone { - let mut rlp = RlpStream::new(); - rlp.append_raw(&[PACKET_HELLO as u8], 0); - rlp.begin_list(5) - .append(&host.protocol_version) - .append(&host.client_version()) - .append_list(&host.capabilities) - .append(&host.local_endpoint.address.port()) - .append(host.id()); - self.send(io, &rlp.drain()) - } + fn write_hello( + &mut self, + io: &IoContext, + host: &HostInfo, + ) -> Result<(), Error> + where + Message: Send + Sync + Clone, + { + let mut rlp = RlpStream::new(); + rlp.append_raw(&[PACKET_HELLO as u8], 0); + rlp.begin_list(5) + .append(&host.protocol_version) + .append(&host.client_version()) + .append_list(&host.capabilities) + .append(&host.local_endpoint.address.port()) + .append(host.id()); + self.send(io, &rlp.drain()) + } - fn read_hello(&mut self, io: &IoContext, rlp: &Rlp, host: &HostInfo) -> Result<(), Error> - where Message: Send + Sync + Clone { - let protocol = rlp.val_at::(0)?; - let client_version_string = rlp.val_at::(1)?; - let client_version = ClientVersion::from(client_version_string); - let peer_caps: Vec = rlp.list_at(2)?; - let id = rlp.val_at::(4)?; + fn read_hello( + &mut self, + io: &IoContext, + rlp: &Rlp, + host: &HostInfo, + ) -> Result<(), Error> + where + Message: Send + Sync + Clone, + { + let protocol = rlp.val_at::(0)?; + let client_version_string = rlp.val_at::(1)?; + let client_version = ClientVersion::from(client_version_string); + let peer_caps: Vec = rlp.list_at(2)?; + let id = rlp.val_at::(4)?; - // Intersect with host capabilities - // Leave only highset mutually supported capability version - let mut caps: Vec = Vec::new(); - for hc in &host.capabilities { - if peer_caps.iter().any(|c| c.protocol == hc.protocol && c.version == hc.version) { - caps.push(SessionCapabilityInfo { - protocol: hc.protocol, - version: hc.version, - id_offset: 0, - packet_count: hc.packet_count, - }); - } - } + // Intersect with host capabilities + // Leave only highset mutually supported capability version + let mut caps: Vec = Vec::new(); + for hc in &host.capabilities { + if peer_caps + .iter() + .any(|c| c.protocol == hc.protocol && c.version == hc.version) + { + caps.push(SessionCapabilityInfo { + protocol: hc.protocol, + version: hc.version, + id_offset: 0, + packet_count: hc.packet_count, + }); + } + } - caps.retain(|c| host.capabilities.iter().any(|hc| hc.protocol == c.protocol && hc.version == c.version)); - let mut i = 0; - while i < caps.len() { - if caps.iter().any(|c| c.protocol == caps[i].protocol && c.version > caps[i].version) { - caps.remove(i); - } else { - i += 1; - } - } + caps.retain(|c| { + host.capabilities + .iter() + .any(|hc| hc.protocol == c.protocol && hc.version == c.version) + }); + let mut i = 0; + while i < caps.len() { + if caps + .iter() + .any(|c| c.protocol == caps[i].protocol && c.version > caps[i].version) + { + caps.remove(i); + } else { + i += 1; + } + } - // Sort capabilities alphabetically. - caps.sort(); + // Sort capabilities alphabetically. + caps.sort(); - i = 0; - let mut offset: u8 = PACKET_USER; - while i < caps.len() { - caps[i].id_offset = offset; - offset += caps[i].packet_count; - i += 1; - } - debug!(target: "network", "Hello: {} v{} {} {:?}", client_version, protocol, id, caps); - let protocol = ::std::cmp::min(protocol, host.protocol_version); - self.info.protocol_version = protocol; - self.info.client_version = client_version; - self.info.capabilities = caps; - self.info.peer_capabilities = peer_caps; - if self.info.capabilities.is_empty() { - trace!(target: "network", "No common capabilities with peer."); - return Err(self.disconnect(io, DisconnectReason::UselessPeer)); - } - if protocol < MIN_PROTOCOL_VERSION { - trace!(target: "network", "Peer protocol version mismatch: {}", protocol); - return Err(self.disconnect(io, DisconnectReason::UselessPeer)); - } - self.compression = protocol >= MIN_COMPRESSION_PROTOCOL_VERSION; - self.send_ping(io)?; - self.had_hello = true; - Ok(()) - } + i = 0; + let mut offset: u8 = PACKET_USER; + while i < caps.len() { + caps[i].id_offset = offset; + offset += caps[i].packet_count; + i += 1; + } + debug!(target: "network", "Hello: {} v{} {} {:?}", client_version, protocol, id, caps); + let protocol = ::std::cmp::min(protocol, host.protocol_version); + self.info.protocol_version = protocol; + self.info.client_version = client_version; + self.info.capabilities = caps; + self.info.peer_capabilities = peer_caps; + if self.info.capabilities.is_empty() { + trace!(target: "network", "No common capabilities with peer."); + return Err(self.disconnect(io, DisconnectReason::UselessPeer)); + } + if protocol < MIN_PROTOCOL_VERSION { + trace!(target: "network", "Peer protocol version mismatch: {}", protocol); + return Err(self.disconnect(io, DisconnectReason::UselessPeer)); + } + self.compression = protocol >= MIN_COMPRESSION_PROTOCOL_VERSION; + self.send_ping(io)?; + self.had_hello = true; + Ok(()) + } - /// Send ping packet - pub fn send_ping(&mut self, io: &IoContext) -> Result<(), Error> where Message: Send + Sync + Clone { - self.send_packet(io, None, PACKET_PING, &EMPTY_LIST_RLP)?; - self.ping_time = Instant::now(); - self.pong_time = None; - Ok(()) - } + /// Send ping packet + pub fn send_ping(&mut self, io: &IoContext) -> Result<(), Error> + where + Message: Send + Sync + Clone, + { + self.send_packet(io, None, PACKET_PING, &EMPTY_LIST_RLP)?; + self.ping_time = Instant::now(); + self.pong_time = None; + Ok(()) + } - fn send_pong(&mut self, io: &IoContext) -> Result<(), Error> where Message: Send + Sync + Clone { - self.send_packet(io, None, PACKET_PONG, &EMPTY_LIST_RLP) - } + fn send_pong(&mut self, io: &IoContext) -> Result<(), Error> + where + Message: Send + Sync + Clone, + { + self.send_packet(io, None, PACKET_PONG, &EMPTY_LIST_RLP) + } - /// Disconnect this session - pub fn disconnect(&mut self, io: &IoContext, reason: DisconnectReason) -> Error where Message: Send + Sync + Clone { - if let State::Session(_) = self.state { - let mut rlp = RlpStream::new(); - rlp.begin_list(1); - rlp.append(&(reason as u32)); - self.send_packet(io, None, PACKET_DISCONNECT, &rlp.drain()).ok(); - } - ErrorKind::Disconnect(reason).into() - } + /// Disconnect this session + pub fn disconnect( + &mut self, + io: &IoContext, + reason: DisconnectReason, + ) -> Error + where + Message: Send + Sync + Clone, + { + if let State::Session(_) = self.state { + let mut rlp = RlpStream::new(); + rlp.begin_list(1); + rlp.append(&(reason as u32)); + self.send_packet(io, None, PACKET_DISCONNECT, &rlp.drain()) + .ok(); + } + ErrorKind::Disconnect(reason).into() + } - fn send(&mut self, io: &IoContext, data: &[u8]) -> Result<(), Error> where Message: Send + Sync + Clone { - match self.state { - State::Handshake(_) => { - warn!(target:"network", "Unexpected send request"); - }, - State::Session(ref mut s) => { - s.send_packet(io, data)? - }, - } - Ok(()) - } + fn send(&mut self, io: &IoContext, data: &[u8]) -> Result<(), Error> + where + Message: Send + Sync + Clone, + { + match self.state { + State::Handshake(_) => { + warn!(target:"network", "Unexpected send request"); + } + State::Session(ref mut s) => s.send_packet(io, data)?, + } + Ok(()) + } } diff --git a/util/network-devp2p/tests/tests.rs b/util/network-devp2p/tests/tests.rs index 00f811e46..83e5d7a92 100644 --- a/util/network-devp2p/tests/tests.rs +++ b/util/network-devp2p/tests/tests.rs @@ -22,127 +22,136 @@ extern crate ethkey; extern crate parity_bytes; extern crate parking_lot; -use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; -use std::sync::Arc; -use std::thread; -use std::time::*; -use parking_lot::Mutex; -use parity_bytes::Bytes; use ethcore_network::*; use ethcore_network_devp2p::NetworkService; -use ethkey::{Random, Generator}; +use ethkey::{Generator, Random}; use io::TimerToken; +use parity_bytes::Bytes; +use parking_lot::Mutex; +use std::{ + sync::{ + atomic::{AtomicBool, Ordering as AtomicOrdering}, + Arc, + }, + thread, + time::*, +}; pub struct TestProtocol { - drop_session: bool, - pub packet: Mutex, - pub got_timeout: AtomicBool, - pub got_disconnect: AtomicBool, + drop_session: bool, + pub packet: Mutex, + pub got_timeout: AtomicBool, + pub got_disconnect: AtomicBool, } impl TestProtocol { - pub fn new(drop_session: bool) -> Self { - TestProtocol { - packet: Mutex::new(Vec::new()), - got_timeout: AtomicBool::new(false), - got_disconnect: AtomicBool::new(false), - drop_session: drop_session, - } - } - /// Creates and register protocol with the network service - pub fn register(service: &mut NetworkService, drop_session: bool) -> Arc { - let handler = Arc::new(TestProtocol::new(drop_session)); - service.register_protocol(handler.clone(), *b"tst", &[(42u8, 1u8), (43u8, 1u8)]).expect("Error registering test protocol handler"); - handler - } + pub fn new(drop_session: bool) -> Self { + TestProtocol { + packet: Mutex::new(Vec::new()), + got_timeout: AtomicBool::new(false), + got_disconnect: AtomicBool::new(false), + drop_session: drop_session, + } + } + /// Creates and register protocol with the network service + pub fn register(service: &mut NetworkService, drop_session: bool) -> Arc { + let handler = Arc::new(TestProtocol::new(drop_session)); + service + .register_protocol(handler.clone(), *b"tst", &[(42u8, 1u8), (43u8, 1u8)]) + .expect("Error registering test protocol handler"); + handler + } - pub fn got_packet(&self) -> bool { - self.packet.lock()[..] == b"hello"[..] - } + pub fn got_packet(&self) -> bool { + self.packet.lock()[..] == b"hello"[..] + } - pub fn got_timeout(&self) -> bool { - self.got_timeout.load(AtomicOrdering::Relaxed) - } + pub fn got_timeout(&self) -> bool { + self.got_timeout.load(AtomicOrdering::Relaxed) + } - pub fn got_disconnect(&self) -> bool { - self.got_disconnect.load(AtomicOrdering::Relaxed) - } + pub fn got_disconnect(&self) -> bool { + self.got_disconnect.load(AtomicOrdering::Relaxed) + } } impl NetworkProtocolHandler for TestProtocol { - fn initialize(&self, io: &NetworkContext) { - io.register_timer(0, Duration::from_millis(10)).unwrap(); - } + fn initialize(&self, io: &NetworkContext) { + io.register_timer(0, Duration::from_millis(10)).unwrap(); + } - fn read(&self, _io: &NetworkContext, _peer: &PeerId, packet_id: u8, data: &[u8]) { - assert_eq!(packet_id, 33); - self.packet.lock().extend(data); - } + fn read(&self, _io: &NetworkContext, _peer: &PeerId, packet_id: u8, data: &[u8]) { + assert_eq!(packet_id, 33); + self.packet.lock().extend(data); + } - fn connected(&self, io: &NetworkContext, peer: &PeerId) { - assert!(io.peer_client_version(*peer).to_string().contains("Parity")); - if self.drop_session { - io.disconnect_peer(*peer) - } else { - io.respond(33, "hello".to_owned().into_bytes()).unwrap(); - } - } + fn connected(&self, io: &NetworkContext, peer: &PeerId) { + assert!(io.peer_client_version(*peer).to_string().contains("Parity")); + if self.drop_session { + io.disconnect_peer(*peer) + } else { + io.respond(33, "hello".to_owned().into_bytes()).unwrap(); + } + } - fn disconnected(&self, _io: &NetworkContext, _peer: &PeerId) { - self.got_disconnect.store(true, AtomicOrdering::Relaxed); - } + fn disconnected(&self, _io: &NetworkContext, _peer: &PeerId) { + self.got_disconnect.store(true, AtomicOrdering::Relaxed); + } - /// Timer function called after a timeout created with `NetworkContext::timeout`. - fn timeout(&self, _io: &NetworkContext, timer: TimerToken) { - assert_eq!(timer, 0); - self.got_timeout.store(true, AtomicOrdering::Relaxed); - } + /// Timer function called after a timeout created with `NetworkContext::timeout`. + fn timeout(&self, _io: &NetworkContext, timer: TimerToken) { + assert_eq!(timer, 0); + self.got_timeout.store(true, AtomicOrdering::Relaxed); + } } #[test] fn net_service() { - let service = NetworkService::new(NetworkConfiguration::new_local(), None).expect("Error creating network service"); - service.start().unwrap(); - service.register_protocol(Arc::new(TestProtocol::new(false)), *b"myp", &[(1u8, 1u8)]).unwrap(); + let service = NetworkService::new(NetworkConfiguration::new_local(), None) + .expect("Error creating network service"); + service.start().unwrap(); + service + .register_protocol(Arc::new(TestProtocol::new(false)), *b"myp", &[(1u8, 1u8)]) + .unwrap(); } #[test] fn net_start_stop() { - let config = NetworkConfiguration::new_local(); - let service = NetworkService::new(config, None).unwrap(); - service.start().unwrap(); - service.stop(); - service.start().unwrap(); + let config = NetworkConfiguration::new_local(); + let service = NetworkService::new(config, None).unwrap(); + service.start().unwrap(); + service.stop(); + service.start().unwrap(); } #[test] fn net_disconnect() { - let key1 = Random.generate().unwrap(); - let mut config1 = NetworkConfiguration::new_local(); - config1.use_secret = Some(key1.secret().clone()); - config1.boot_nodes = vec![ ]; - let mut service1 = NetworkService::new(config1, None).unwrap(); - service1.start().unwrap(); - let handler1 = TestProtocol::register(&mut service1, false); - let mut config2 = NetworkConfiguration::new_local(); - config2.boot_nodes = vec![ service1.local_url().unwrap() ]; - let mut service2 = NetworkService::new(config2, None).unwrap(); - service2.start().unwrap(); - let handler2 = TestProtocol::register(&mut service2, true); - while !(handler1.got_disconnect() && handler2.got_disconnect()) { - thread::sleep(Duration::from_millis(50)); - } - assert!(handler1.got_disconnect()); - assert!(handler2.got_disconnect()); + let key1 = Random.generate().unwrap(); + let mut config1 = NetworkConfiguration::new_local(); + config1.use_secret = Some(key1.secret().clone()); + config1.boot_nodes = vec![]; + let mut service1 = NetworkService::new(config1, None).unwrap(); + service1.start().unwrap(); + let handler1 = TestProtocol::register(&mut service1, false); + let mut config2 = NetworkConfiguration::new_local(); + config2.boot_nodes = vec![service1.local_url().unwrap()]; + let mut service2 = NetworkService::new(config2, None).unwrap(); + service2.start().unwrap(); + let handler2 = TestProtocol::register(&mut service2, true); + while !(handler1.got_disconnect() && handler2.got_disconnect()) { + thread::sleep(Duration::from_millis(50)); + } + assert!(handler1.got_disconnect()); + assert!(handler2.got_disconnect()); } #[test] fn net_timeout() { - let config = NetworkConfiguration::new_local(); - let mut service = NetworkService::new(config, None).unwrap(); - service.start().unwrap(); - let handler = TestProtocol::register(&mut service, false); - while !handler.got_timeout() { - thread::sleep(Duration::from_millis(50)); - } + let config = NetworkConfiguration::new_local(); + let mut service = NetworkService::new(config, None).unwrap(); + service.start().unwrap(); + let handler = TestProtocol::register(&mut service, false); + while !handler.got_timeout() { + thread::sleep(Duration::from_millis(50)); + } } diff --git a/util/network/src/client_version.rs b/util/network/src/client_version.rs index 47d81fad8..afccb45b6 100644 --- a/util/network/src/client_version.rs +++ b/util/network/src/client_version.rs @@ -28,168 +28,169 @@ const PARITY_CLIENT_ID_PREFIX: &str = "Parity-Ethereum"; lazy_static! { /// Parity versions starting from this will accept block bodies requests /// of 256 bodies - static ref PARITY_CLIENT_LARGE_REQUESTS_VERSION: Version = Version::parse("2.4.0").unwrap(); + static ref PARITY_CLIENT_LARGE_REQUESTS_VERSION: Version = Version::parse("2.4.0").unwrap(); } /// Description of the software version running in a peer /// according to https://github.com/ethereum/wiki/wiki/Client-Version-Strings /// This structure as it is represents the format used by Parity clients. Other /// vendors may provide additional fields. -#[derive(Clone,Debug,PartialEq,Eq,Serialize)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize)] pub struct ParityClientData { - name: String, - identity: Option, - semver: Version, - os: String, - compiler: String, + name: String, + identity: Option, + semver: Version, + os: String, + compiler: String, - // Capability flags, should be calculated in constructor - can_handle_large_requests: bool, + // Capability flags, should be calculated in constructor + can_handle_large_requests: bool, } /// Accessor methods for ParityClientData. This will probably /// need to be abstracted away into a trait. impl ParityClientData { - fn new( - name: String, - identity: Option, - semver: Version, - os: String, - compiler: String, - ) -> Self { - // Flags logic - let can_handle_large_requests = &semver >= &PARITY_CLIENT_LARGE_REQUESTS_VERSION; + fn new( + name: String, + identity: Option, + semver: Version, + os: String, + compiler: String, + ) -> Self { + // Flags logic + let can_handle_large_requests = &semver >= &PARITY_CLIENT_LARGE_REQUESTS_VERSION; - // Instantiate and return - ParityClientData { - name: name, - identity: identity, - semver: semver, - os: os, - compiler: compiler, + // Instantiate and return + ParityClientData { + name: name, + identity: identity, + semver: semver, + os: os, + compiler: compiler, - can_handle_large_requests: can_handle_large_requests, - } - } + can_handle_large_requests: can_handle_large_requests, + } + } - fn name(&self) -> &str { - self.name.as_str() - } + fn name(&self) -> &str { + self.name.as_str() + } - fn identity(&self) -> Option<&str> { - self.identity.as_ref().map(String::as_str) - } + fn identity(&self) -> Option<&str> { + self.identity.as_ref().map(String::as_str) + } - fn semver(&self) -> &Version { - &self.semver - } + fn semver(&self) -> &Version { + &self.semver + } - fn os(&self) -> &str { - self.os.as_str() - } + fn os(&self) -> &str { + self.os.as_str() + } - fn compiler(&self) -> &str { - self.compiler.as_str() - } + fn compiler(&self) -> &str { + self.compiler.as_str() + } - fn can_handle_large_requests(&self) -> bool { - self.can_handle_large_requests - } + fn can_handle_large_requests(&self) -> bool { + self.can_handle_large_requests + } } /// Enum describing the version of the software running on a peer. -#[derive(Clone,Debug,Eq,PartialEq,Serialize)] +#[derive(Clone, Debug, Eq, PartialEq, Serialize)] pub enum ClientVersion { - /// The peer runs software from parity and the string format is known - ParityClient( - /// The actual information fields: name, version, os, ... - ParityClientData - ), - /// The string ID is recognized as Parity but the overall format - /// could not be parsed - ParityUnknownFormat(String), - /// Other software vendors than Parity - Other(String), + /// The peer runs software from parity and the string format is known + ParityClient( + /// The actual information fields: name, version, os, ... + ParityClientData, + ), + /// The string ID is recognized as Parity but the overall format + /// could not be parsed + ParityUnknownFormat(String), + /// Other software vendors than Parity + Other(String), } impl Default for ClientVersion { - fn default() -> Self { - ClientVersion::Other("".to_owned()) - } + fn default() -> Self { + ClientVersion::Other("".to_owned()) + } } /// Provide information about what a particular version of a /// peer software can do pub trait ClientCapabilities { - /// Parity versions before PARITY_CLIENT_LARGE_REQUESTS_VERSION would not - /// check the accumulated size of a packet when building a response to a - /// GET_BLOCK_BODIES request. If the packet was larger than a given limit, - /// instead of sending fewer blocks no packet would get sent at all. Query - /// if this version can handle requests for a large number of block bodies. - fn can_handle_large_requests(&self) -> bool; + /// Parity versions before PARITY_CLIENT_LARGE_REQUESTS_VERSION would not + /// check the accumulated size of a packet when building a response to a + /// GET_BLOCK_BODIES request. If the packet was larger than a given limit, + /// instead of sending fewer blocks no packet would get sent at all. Query + /// if this version can handle requests for a large number of block bodies. + fn can_handle_large_requests(&self) -> bool; - /// Service transactions are specific to parity. Query if this version - /// accepts them. - fn accepts_service_transaction(&self) -> bool; + /// Service transactions are specific to parity. Query if this version + /// accepts them. + fn accepts_service_transaction(&self) -> bool; } impl ClientCapabilities for ClientVersion { - fn can_handle_large_requests(&self) -> bool { - match self { - ClientVersion::ParityClient(data) => data.can_handle_large_requests(), - ClientVersion::ParityUnknownFormat(_) => false, // Play it safe - ClientVersion::Other(_) => true // As far as we know - } - } - - fn accepts_service_transaction(&self) -> bool { - match self { - ClientVersion::ParityClient(_) => true, - ClientVersion::ParityUnknownFormat(_) => true, - ClientVersion::Other(_) => false - } - } + fn can_handle_large_requests(&self) -> bool { + match self { + ClientVersion::ParityClient(data) => data.can_handle_large_requests(), + ClientVersion::ParityUnknownFormat(_) => false, // Play it safe + ClientVersion::Other(_) => true, // As far as we know + } + } + fn accepts_service_transaction(&self) -> bool { + match self { + ClientVersion::ParityClient(_) => true, + ClientVersion::ParityUnknownFormat(_) => true, + ClientVersion::Other(_) => false, + } + } } fn is_parity(client_id: &str) -> bool { - client_id.starts_with(LEGACY_CLIENT_ID_PREFIX) || client_id.starts_with(PARITY_CLIENT_ID_PREFIX) + client_id.starts_with(LEGACY_CLIENT_ID_PREFIX) || client_id.starts_with(PARITY_CLIENT_ID_PREFIX) } /// Parse known parity formats. Recognizes either a short format with four fields /// or a long format which includes the same fields and an identity one. fn parse_parity_format(client_version: &str) -> Result { - const PARITY_ID_STRING_MINIMUM_TOKENS: usize = 4; + const PARITY_ID_STRING_MINIMUM_TOKENS: usize = 4; - let tokens: Vec<&str> = client_version.split("/").collect(); + let tokens: Vec<&str> = client_version.split("/").collect(); - if tokens.len() < PARITY_ID_STRING_MINIMUM_TOKENS { - return Err(()) - } + if tokens.len() < PARITY_ID_STRING_MINIMUM_TOKENS { + return Err(()); + } - let name = tokens[0]; + let name = tokens[0]; - let identity = if tokens.len() - 3 > 1 { - Some(tokens[1..(tokens.len() - 3)].join("/")) - } else { - None - }; + let identity = if tokens.len() - 3 > 1 { + Some(tokens[1..(tokens.len() - 3)].join("/")) + } else { + None + }; - let compiler = tokens[tokens.len() - 1]; - let os = tokens[tokens.len() - 2]; + let compiler = tokens[tokens.len() - 1]; + let os = tokens[tokens.len() - 2]; - // If version is in the right position and valid format return a valid - // result. Otherwise return an error. - get_number_from_version(tokens[tokens.len() - 3]) - .and_then(|v| Version::parse(v).ok()) - .map(|semver| ParityClientData::new( - name.to_owned(), - identity, - semver, - os.to_owned(), - compiler.to_owned(), - )) - .ok_or(()) + // If version is in the right position and valid format return a valid + // result. Otherwise return an error. + get_number_from_version(tokens[tokens.len() - 3]) + .and_then(|v| Version::parse(v).ok()) + .map(|semver| { + ParityClientData::new( + name.to_owned(), + identity, + semver, + os.to_owned(), + compiler.to_owned(), + ) + }) + .ok_or(()) } /// Parse a version string and return the corresponding @@ -199,317 +200,347 @@ fn parse_parity_format(client_version: &str) -> Result { /// The parsing for parity may still fail, in which case return a ParityUnknownFormat with /// the original version string. TryFrom would be a better trait to implement. impl From for ClientVersion -where T: AsRef { - fn from(client_version: T) -> Self { - let client_version_str: &str = client_version.as_ref(); +where + T: AsRef, +{ + fn from(client_version: T) -> Self { + let client_version_str: &str = client_version.as_ref(); - if !is_parity(client_version_str) { - return ClientVersion::Other(client_version_str.to_owned()); - } + if !is_parity(client_version_str) { + return ClientVersion::Other(client_version_str.to_owned()); + } - if let Ok(data) = parse_parity_format(client_version_str) { - ClientVersion::ParityClient(data) - } else { - ClientVersion::ParityUnknownFormat(client_version_str.to_owned()) - } - } + if let Ok(data) = parse_parity_format(client_version_str) { + ClientVersion::ParityClient(data) + } else { + ClientVersion::ParityUnknownFormat(client_version_str.to_owned()) + } + } } -fn format_parity_version_string(client_version: &ParityClientData, f: &mut fmt::Formatter) -> std::fmt::Result { - let name = client_version.name(); - let semver = client_version.semver(); - let os = client_version.os(); - let compiler = client_version.compiler(); +fn format_parity_version_string( + client_version: &ParityClientData, + f: &mut fmt::Formatter, +) -> std::fmt::Result { + let name = client_version.name(); + let semver = client_version.semver(); + let os = client_version.os(); + let compiler = client_version.compiler(); - match client_version.identity() { - None => write!(f, "{}/v{}/{}/{}", name, semver, os, compiler), - Some(identity) => write!(f, "{}/{}/v{}/{}/{}", name, identity, semver, os, compiler), - } + match client_version.identity() { + None => write!(f, "{}/v{}/{}/{}", name, semver, os, compiler), + Some(identity) => write!(f, "{}/{}/v{}/{}/{}", name, identity, semver, os, compiler), + } } impl fmt::Display for ClientVersion { - fn fmt(&self, f: &mut fmt::Formatter) -> std::fmt::Result { - match self { - ClientVersion::ParityClient(data) => format_parity_version_string(data, f), - ClientVersion::ParityUnknownFormat(id) => write!(f, "{}", id), - ClientVersion::Other(id) => write!(f, "{}", id) - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> std::fmt::Result { + match self { + ClientVersion::ParityClient(data) => format_parity_version_string(data, f), + ClientVersion::ParityUnknownFormat(id) => write!(f, "{}", id), + ClientVersion::Other(id) => write!(f, "{}", id), + } + } } fn get_number_from_version(version: &str) -> Option<&str> { - if version.starts_with("v") { - return version.get(1..); - } + if version.starts_with("v") { + return version.get(1..); + } - None + None } #[cfg(test)] pub mod tests { - use super::*; + use super::*; - const PARITY_CLIENT_SEMVER: &str = "2.4.0"; - const PARITY_CLIENT_OLD_SEMVER: &str = "2.2.0"; - const PARITY_CLIENT_OS: &str = "linux"; - const PARITY_CLIENT_COMPILER: &str = "rustc"; - const PARITY_CLIENT_IDENTITY: &str = "ExpanseSOLO"; - const PARITY_CLIENT_MULTITOKEN_IDENTITY: &str = "ExpanseSOLO/abc/v1.2.3"; + const PARITY_CLIENT_SEMVER: &str = "2.4.0"; + const PARITY_CLIENT_OLD_SEMVER: &str = "2.2.0"; + const PARITY_CLIENT_OS: &str = "linux"; + const PARITY_CLIENT_COMPILER: &str = "rustc"; + const PARITY_CLIENT_IDENTITY: &str = "ExpanseSOLO"; + const PARITY_CLIENT_MULTITOKEN_IDENTITY: &str = "ExpanseSOLO/abc/v1.2.3"; + fn make_default_version_string() -> String { + format!( + "{}/v{}/{}/{}", + PARITY_CLIENT_ID_PREFIX, PARITY_CLIENT_SEMVER, PARITY_CLIENT_OS, PARITY_CLIENT_COMPILER + ) + } - fn make_default_version_string() -> String { - format!( - "{}/v{}/{}/{}", - PARITY_CLIENT_ID_PREFIX, - PARITY_CLIENT_SEMVER, - PARITY_CLIENT_OS, - PARITY_CLIENT_COMPILER - ) - } + fn make_default_long_version_string() -> String { + format!( + "{}/{}/v{}/{}/{}", + PARITY_CLIENT_ID_PREFIX, + PARITY_CLIENT_IDENTITY, + PARITY_CLIENT_SEMVER, + PARITY_CLIENT_OS, + PARITY_CLIENT_COMPILER + ) + } - fn make_default_long_version_string() -> String { - format!( - "{}/{}/v{}/{}/{}", - PARITY_CLIENT_ID_PREFIX, - PARITY_CLIENT_IDENTITY, - PARITY_CLIENT_SEMVER, - PARITY_CLIENT_OS, - PARITY_CLIENT_COMPILER - ) - } + fn make_multitoken_identity_long_version_string() -> String { + format!( + "{}/{}/v{}/{}/{}", + PARITY_CLIENT_ID_PREFIX, + PARITY_CLIENT_MULTITOKEN_IDENTITY, + PARITY_CLIENT_SEMVER, + PARITY_CLIENT_OS, + PARITY_CLIENT_COMPILER + ) + } - fn make_multitoken_identity_long_version_string() -> String { - format!( - "{}/{}/v{}/{}/{}", - PARITY_CLIENT_ID_PREFIX, - PARITY_CLIENT_MULTITOKEN_IDENTITY, - PARITY_CLIENT_SEMVER, - PARITY_CLIENT_OS, - PARITY_CLIENT_COMPILER - ) - } + fn make_old_semver_version_string() -> String { + format!( + "{}/v{}/{}/{}", + PARITY_CLIENT_ID_PREFIX, + PARITY_CLIENT_OLD_SEMVER, + PARITY_CLIENT_OS, + PARITY_CLIENT_COMPILER + ) + } - fn make_old_semver_version_string() -> String { - format!( - "{}/v{}/{}/{}", - PARITY_CLIENT_ID_PREFIX, - PARITY_CLIENT_OLD_SEMVER, - PARITY_CLIENT_OS, - PARITY_CLIENT_COMPILER - ) - } + #[test] + pub fn client_version_when_from_empty_string_then_default() { + let default = ClientVersion::default(); - #[test] - pub fn client_version_when_from_empty_string_then_default() { - let default = ClientVersion::default(); + assert_eq!(ClientVersion::from(""), default); + } - assert_eq!(ClientVersion::from(""), default); - } + #[test] + pub fn get_number_from_version_when_valid_then_number() { + let version_string = format!("v{}", PARITY_CLIENT_SEMVER); - #[test] - pub fn get_number_from_version_when_valid_then_number() { - let version_string = format!("v{}", PARITY_CLIENT_SEMVER); + assert_eq!( + get_number_from_version(&version_string).unwrap(), + PARITY_CLIENT_SEMVER + ); + } - assert_eq!(get_number_from_version(&version_string).unwrap(), PARITY_CLIENT_SEMVER); - } + #[test] + pub fn client_version_when_str_parity_format_and_valid_then_all_fields_match() { + let client_version_string = make_default_version_string(); - #[test] - pub fn client_version_when_str_parity_format_and_valid_then_all_fields_match() { - let client_version_string = make_default_version_string(); + if let ClientVersion::ParityClient(client_version) = + ClientVersion::from(client_version_string.as_str()) + { + assert_eq!(client_version.name(), PARITY_CLIENT_ID_PREFIX); + assert_eq!( + *client_version.semver(), + Version::parse(PARITY_CLIENT_SEMVER).unwrap() + ); + assert_eq!(client_version.os(), PARITY_CLIENT_OS); + assert_eq!(client_version.compiler(), PARITY_CLIENT_COMPILER); + } else { + panic!("shouldn't be here"); + } + } - if let ClientVersion::ParityClient(client_version) = ClientVersion::from(client_version_string.as_str()) { - assert_eq!(client_version.name(), PARITY_CLIENT_ID_PREFIX); - assert_eq!(*client_version.semver(), Version::parse(PARITY_CLIENT_SEMVER).unwrap()); - assert_eq!(client_version.os(), PARITY_CLIENT_OS); - assert_eq!(client_version.compiler(), PARITY_CLIENT_COMPILER); - } else { - panic!("shouldn't be here"); - } - } + #[test] + pub fn client_version_when_str_parity_long_format_and_valid_then_all_fields_match() { + let client_version_string = make_default_long_version_string(); - #[test] - pub fn client_version_when_str_parity_long_format_and_valid_then_all_fields_match() { - let client_version_string = make_default_long_version_string(); + if let ClientVersion::ParityClient(client_version) = + ClientVersion::from(client_version_string.as_str()) + { + assert_eq!(client_version.name(), PARITY_CLIENT_ID_PREFIX); + assert_eq!(client_version.identity().unwrap(), PARITY_CLIENT_IDENTITY); + assert_eq!( + *client_version.semver(), + Version::parse(PARITY_CLIENT_SEMVER).unwrap() + ); + assert_eq!(client_version.os(), PARITY_CLIENT_OS); + assert_eq!(client_version.compiler(), PARITY_CLIENT_COMPILER); + } else { + panic!("shouldnt be here"); + } + } - if let ClientVersion::ParityClient(client_version) = ClientVersion::from(client_version_string.as_str()) { - assert_eq!(client_version.name(), PARITY_CLIENT_ID_PREFIX); - assert_eq!(client_version.identity().unwrap(), PARITY_CLIENT_IDENTITY); - assert_eq!(*client_version.semver(), Version::parse(PARITY_CLIENT_SEMVER).unwrap()); - assert_eq!(client_version.os(), PARITY_CLIENT_OS); - assert_eq!(client_version.compiler(), PARITY_CLIENT_COMPILER); - } else { - panic!("shouldnt be here"); - } - } + #[test] + pub fn client_version_when_str_parity_long_format_and_valid_and_identity_multiple_tokens_then_all_fields_match( + ) { + let client_version_string = make_multitoken_identity_long_version_string(); - #[test] - pub fn client_version_when_str_parity_long_format_and_valid_and_identity_multiple_tokens_then_all_fields_match() { - let client_version_string = make_multitoken_identity_long_version_string(); + if let ClientVersion::ParityClient(client_version) = + ClientVersion::from(client_version_string.as_str()) + { + assert_eq!(client_version.name(), PARITY_CLIENT_ID_PREFIX); + assert_eq!( + client_version.identity().unwrap(), + PARITY_CLIENT_MULTITOKEN_IDENTITY + ); + assert_eq!( + *client_version.semver(), + Version::parse(PARITY_CLIENT_SEMVER).unwrap() + ); + assert_eq!(client_version.os(), PARITY_CLIENT_OS); + assert_eq!(client_version.compiler(), PARITY_CLIENT_COMPILER); + } else { + panic!("shouldnt be here"); + } + } - if let ClientVersion::ParityClient(client_version) = ClientVersion::from(client_version_string.as_str()) { - assert_eq!(client_version.name(), PARITY_CLIENT_ID_PREFIX); - assert_eq!(client_version.identity().unwrap(), PARITY_CLIENT_MULTITOKEN_IDENTITY); - assert_eq!(*client_version.semver(), Version::parse(PARITY_CLIENT_SEMVER).unwrap()); - assert_eq!(client_version.os(), PARITY_CLIENT_OS); - assert_eq!(client_version.compiler(), PARITY_CLIENT_COMPILER); - } else { - panic!("shouldnt be here"); - } - } + #[test] + pub fn client_version_when_string_parity_format_and_valid_then_all_fields_match() { + let client_version_string: String = make_default_version_string(); - #[test] - pub fn client_version_when_string_parity_format_and_valid_then_all_fields_match() { - let client_version_string: String = make_default_version_string(); + if let ClientVersion::ParityClient(client_version) = + ClientVersion::from(client_version_string.as_str()) + { + assert_eq!(client_version.name(), PARITY_CLIENT_ID_PREFIX); + assert_eq!( + *client_version.semver(), + Version::parse(PARITY_CLIENT_SEMVER).unwrap() + ); + assert_eq!(client_version.os(), PARITY_CLIENT_OS); + assert_eq!(client_version.compiler(), PARITY_CLIENT_COMPILER); + } else { + panic!("shouldn't be here"); + } + } - if let ClientVersion::ParityClient(client_version) = ClientVersion::from(client_version_string.as_str()) { - assert_eq!(client_version.name(), PARITY_CLIENT_ID_PREFIX); - assert_eq!(*client_version.semver(), Version::parse(PARITY_CLIENT_SEMVER).unwrap()); - assert_eq!(client_version.os(), PARITY_CLIENT_OS); - assert_eq!(client_version.compiler(), PARITY_CLIENT_COMPILER); - } else { - panic!("shouldn't be here"); - } - } + #[test] + pub fn client_version_when_parity_format_and_invalid_then_equals_parity_unknown_client_version_string( + ) { + // This is invalid because version has no leading 'v' + let client_version_string = format!( + "{}/{}/{}/{}", + PARITY_CLIENT_ID_PREFIX, PARITY_CLIENT_SEMVER, PARITY_CLIENT_OS, PARITY_CLIENT_COMPILER + ); - #[test] - pub fn client_version_when_parity_format_and_invalid_then_equals_parity_unknown_client_version_string() { - // This is invalid because version has no leading 'v' - let client_version_string = format!( - "{}/{}/{}/{}", - PARITY_CLIENT_ID_PREFIX, - PARITY_CLIENT_SEMVER, - PARITY_CLIENT_OS, - PARITY_CLIENT_COMPILER); + let client_version = ClientVersion::from(client_version_string.as_str()); - let client_version = ClientVersion::from(client_version_string.as_str()); + let parity_unknown = ClientVersion::ParityUnknownFormat(client_version_string.to_string()); - let parity_unknown = ClientVersion::ParityUnknownFormat(client_version_string.to_string()); + assert_eq!(client_version, parity_unknown); + } - assert_eq!(client_version, parity_unknown); - } + #[test] + pub fn client_version_when_parity_format_without_identity_and_missing_compiler_field_then_equals_parity_unknown_client_version_string( + ) { + let client_version_string = format!( + "{}/v{}/{}", + PARITY_CLIENT_ID_PREFIX, PARITY_CLIENT_SEMVER, PARITY_CLIENT_OS, + ); - #[test] - pub fn client_version_when_parity_format_without_identity_and_missing_compiler_field_then_equals_parity_unknown_client_version_string() { - let client_version_string = format!( - "{}/v{}/{}", - PARITY_CLIENT_ID_PREFIX, - PARITY_CLIENT_SEMVER, - PARITY_CLIENT_OS, - ); + let client_version = ClientVersion::from(client_version_string.as_str()); - let client_version = ClientVersion::from(client_version_string.as_str()); + let parity_unknown = ClientVersion::ParityUnknownFormat(client_version_string.to_string()); - let parity_unknown = ClientVersion::ParityUnknownFormat(client_version_string.to_string()); + assert_eq!(client_version, parity_unknown); + } - assert_eq!(client_version, parity_unknown); - } + #[test] + pub fn client_version_when_parity_format_with_identity_and_missing_compiler_field_then_equals_parity_unknown_client_version_string( + ) { + let client_version_string = format!( + "{}/{}/v{}/{}", + PARITY_CLIENT_ID_PREFIX, PARITY_CLIENT_IDENTITY, PARITY_CLIENT_SEMVER, PARITY_CLIENT_OS, + ); - #[test] - pub fn client_version_when_parity_format_with_identity_and_missing_compiler_field_then_equals_parity_unknown_client_version_string() { - let client_version_string = format!( - "{}/{}/v{}/{}", - PARITY_CLIENT_ID_PREFIX, - PARITY_CLIENT_IDENTITY, - PARITY_CLIENT_SEMVER, - PARITY_CLIENT_OS, - ); + let client_version = ClientVersion::from(client_version_string.as_str()); - let client_version = ClientVersion::from(client_version_string.as_str()); + let parity_unknown = ClientVersion::ParityUnknownFormat(client_version_string.to_string()); - let parity_unknown = ClientVersion::ParityUnknownFormat(client_version_string.to_string()); + assert_eq!(client_version, parity_unknown); + } - assert_eq!(client_version, parity_unknown); - } + #[test] + pub fn client_version_when_not_parity_format_and_valid_then_other_with_client_version_string() { + let client_version_string = "Geth/main.jnode.network/v1.8.21-stable-9dc5d1a9/linux"; - #[test] - pub fn client_version_when_not_parity_format_and_valid_then_other_with_client_version_string() { - let client_version_string = "Geth/main.jnode.network/v1.8.21-stable-9dc5d1a9/linux"; + let client_version = ClientVersion::from(client_version_string); - let client_version = ClientVersion::from(client_version_string); + assert_eq!( + client_version, + ClientVersion::Other(client_version_string.to_string()) + ); + } - assert_eq!(client_version, ClientVersion::Other(client_version_string.to_string())); - } + #[test] + pub fn client_version_when_parity_format_and_valid_then_to_string_equal() { + let client_version_string: String = make_default_version_string(); - #[test] - pub fn client_version_when_parity_format_and_valid_then_to_string_equal() { - let client_version_string: String = make_default_version_string(); + let client_version = ClientVersion::from(client_version_string.as_str()); - let client_version = ClientVersion::from(client_version_string.as_str()); + assert_eq!(client_version.to_string(), client_version_string); + } - assert_eq!(client_version.to_string(), client_version_string); - } + #[test] + pub fn client_version_when_other_then_to_string_equal_input_string() { + let client_version_string: String = "Other".to_string(); - #[test] - pub fn client_version_when_other_then_to_string_equal_input_string() { - let client_version_string: String = "Other".to_string(); + let client_version = ClientVersion::from("Other"); - let client_version = ClientVersion::from("Other"); + assert_eq!(client_version.to_string(), client_version_string); + } - assert_eq!(client_version.to_string(), client_version_string); - } + #[test] + pub fn client_capabilities_when_parity_old_version_then_handles_large_requests_false() { + let client_version_string: String = make_old_semver_version_string(); - #[test] - pub fn client_capabilities_when_parity_old_version_then_handles_large_requests_false() { - let client_version_string: String = make_old_semver_version_string(); + let client_version = ClientVersion::from(client_version_string.as_str()); - let client_version = ClientVersion::from(client_version_string.as_str()); + assert!(!client_version.can_handle_large_requests()); + } - assert!(!client_version.can_handle_large_requests()); - } + #[test] + pub fn client_capabilities_when_parity_beta_version_then_not_handles_large_requests_true() { + let client_version_string: String = format!( + "{}/v{}/{}/{}", + "Parity-Ethereum", "2.4.0-beta", "x86_64-linux-gnu", "rustc1.31.1" + ) + .to_string(); - #[test] - pub fn client_capabilities_when_parity_beta_version_then_not_handles_large_requests_true() { - let client_version_string: String = format!( - "{}/v{}/{}/{}", - "Parity-Ethereum", - "2.4.0-beta", - "x86_64-linux-gnu", - "rustc1.31.1") - .to_string(); + let client_version = ClientVersion::from(client_version_string.as_str()); - let client_version = ClientVersion::from(client_version_string.as_str()); + assert!(!client_version.can_handle_large_requests()); + } - assert!(!client_version.can_handle_large_requests()); - } + #[test] + pub fn client_version_when_to_owned_then_both_objects_equal() { + let client_version_string: String = make_old_semver_version_string(); - #[test] - pub fn client_version_when_to_owned_then_both_objects_equal() { - let client_version_string: String = make_old_semver_version_string(); + let origin = ClientVersion::from(client_version_string.as_str()); - let origin = ClientVersion::from(client_version_string.as_str()); + let borrowed = &origin; - let borrowed = &origin; + let owned = origin.to_owned(); - let owned = origin.to_owned(); + assert_eq!(*borrowed, owned); + } - assert_eq!(*borrowed, owned); - } + #[test] + fn client_version_accepts_service_transaction_for_different_versions() { + assert!(!ClientVersion::from("Geth").accepts_service_transaction()); + assert!( + ClientVersion::from("Parity-Ethereum/v2.6.0/linux/rustc").accepts_service_transaction() + ); + assert!( + ClientVersion::from("Parity-Ethereum/ABCDEFGH/v2.7.3/linux/rustc") + .accepts_service_transaction() + ); + } - #[test] - fn client_version_accepts_service_transaction_for_different_versions() { - assert!(!ClientVersion::from("Geth").accepts_service_transaction()); - assert!(ClientVersion::from("Parity-Ethereum/v2.6.0/linux/rustc").accepts_service_transaction()); - assert!(ClientVersion::from("Parity-Ethereum/ABCDEFGH/v2.7.3/linux/rustc").accepts_service_transaction()); - } + #[test] + fn is_parity_when_parity_then_true() { + let client_id = format!("{}/", PARITY_CLIENT_ID_PREFIX); - #[test] - fn is_parity_when_parity_then_true() { - let client_id = format!("{}/", PARITY_CLIENT_ID_PREFIX); + assert!(is_parity(&client_id)); + } - assert!(is_parity(&client_id)); - } + #[test] + fn is_parity_when_empty_then_false() { + let client_id = ""; - #[test] - fn is_parity_when_empty_then_false() { - let client_id = ""; + assert!(!is_parity(&client_id)); + } - assert!(!is_parity(&client_id)); - } + #[test] + fn is_parity_when_other_then_false() { + let client_id = "other"; - #[test] - fn is_parity_when_other_then_false() { - let client_id = "other"; - - assert!(!is_parity(&client_id)); - } + assert!(!is_parity(&client_id)); + } } diff --git a/util/network/src/connection_filter.rs b/util/network/src/connection_filter.rs index 4efd1c503..2d6efc0b0 100644 --- a/util/network/src/connection_filter.rs +++ b/util/network/src/connection_filter.rs @@ -20,12 +20,17 @@ use super::NodeId; /// Filtered connection direction. pub enum ConnectionDirection { - Inbound, - Outbound, + Inbound, + Outbound, } /// Connection filter. Each connection is checked against `connection_allowed`. -pub trait ConnectionFilter : Send + Sync { - /// Filter a connection. Returns `true` if connection should be allowed. `false` if rejected. - fn connection_allowed(&self, own_id: &NodeId, connecting_id: &NodeId, direction: ConnectionDirection) -> bool; +pub trait ConnectionFilter: Send + Sync { + /// Filter a connection. Returns `true` if connection should be allowed. `false` if rejected. + fn connection_allowed( + &self, + own_id: &NodeId, + connecting_id: &NodeId, + direction: ConnectionDirection, + ) -> bool; } diff --git a/util/network/src/error.rs b/util/network/src/error.rs index bd48830c1..7d47acb46 100644 --- a/util/network/src/error.rs +++ b/util/network/src/error.rs @@ -18,226 +18,229 @@ // https://github.com/paritytech/parity-ethereum/issues/10302 #![allow(deprecated)] -use std::{io, net, fmt}; -use libc::{ENFILE, EMFILE}; +use crypto; +use ethkey; use io::IoError; -use {rlp, ethkey, crypto, snappy}; +use libc::{EMFILE, ENFILE}; +use rlp; +use snappy; +use std::{fmt, io, net}; #[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum DisconnectReason -{ - DisconnectRequested, - TCPError, - BadProtocol, - UselessPeer, - TooManyPeers, - DuplicatePeer, - IncompatibleProtocol, - NullIdentity, - ClientQuit, - UnexpectedIdentity, - LocalIdentity, - PingTimeout, - Unknown, +pub enum DisconnectReason { + DisconnectRequested, + TCPError, + BadProtocol, + UselessPeer, + TooManyPeers, + DuplicatePeer, + IncompatibleProtocol, + NullIdentity, + ClientQuit, + UnexpectedIdentity, + LocalIdentity, + PingTimeout, + Unknown, } impl DisconnectReason { - pub fn from_u8(n: u8) -> DisconnectReason { - match n { - 0 => DisconnectReason::DisconnectRequested, - 1 => DisconnectReason::TCPError, - 2 => DisconnectReason::BadProtocol, - 3 => DisconnectReason::UselessPeer, - 4 => DisconnectReason::TooManyPeers, - 5 => DisconnectReason::DuplicatePeer, - 6 => DisconnectReason::IncompatibleProtocol, - 7 => DisconnectReason::NullIdentity, - 8 => DisconnectReason::ClientQuit, - 9 => DisconnectReason::UnexpectedIdentity, - 10 => DisconnectReason::LocalIdentity, - 11 => DisconnectReason::PingTimeout, - _ => DisconnectReason::Unknown, - } - } + pub fn from_u8(n: u8) -> DisconnectReason { + match n { + 0 => DisconnectReason::DisconnectRequested, + 1 => DisconnectReason::TCPError, + 2 => DisconnectReason::BadProtocol, + 3 => DisconnectReason::UselessPeer, + 4 => DisconnectReason::TooManyPeers, + 5 => DisconnectReason::DuplicatePeer, + 6 => DisconnectReason::IncompatibleProtocol, + 7 => DisconnectReason::NullIdentity, + 8 => DisconnectReason::ClientQuit, + 9 => DisconnectReason::UnexpectedIdentity, + 10 => DisconnectReason::LocalIdentity, + 11 => DisconnectReason::PingTimeout, + _ => DisconnectReason::Unknown, + } + } } impl fmt::Display for DisconnectReason { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use self::DisconnectReason::*; + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::DisconnectReason::*; - let msg = match *self { - DisconnectRequested => "disconnect requested", - TCPError => "TCP error", - BadProtocol => "bad protocol", - UselessPeer => "useless peer", - TooManyPeers => "too many peers", - DuplicatePeer => "duplicate peer", - IncompatibleProtocol => "incompatible protocol", - NullIdentity => "null identity", - ClientQuit => "client quit", - UnexpectedIdentity => "unexpected identity", - LocalIdentity => "local identity", - PingTimeout => "ping timeout", - Unknown => "unknown", - }; + let msg = match *self { + DisconnectRequested => "disconnect requested", + TCPError => "TCP error", + BadProtocol => "bad protocol", + UselessPeer => "useless peer", + TooManyPeers => "too many peers", + DuplicatePeer => "duplicate peer", + IncompatibleProtocol => "incompatible protocol", + NullIdentity => "null identity", + ClientQuit => "client quit", + UnexpectedIdentity => "unexpected identity", + LocalIdentity => "local identity", + PingTimeout => "ping timeout", + Unknown => "unknown", + }; - f.write_str(msg) - } + f.write_str(msg) + } } error_chain! { - foreign_links { - SocketIo(IoError) #[doc = "Socket IO error."]; - Decompression(snappy::InvalidInput) #[doc = "Decompression error."]; - Rlp(rlp::DecoderError) #[doc = "Rlp decoder error."]; - } + foreign_links { + SocketIo(IoError) #[doc = "Socket IO error."]; + Decompression(snappy::InvalidInput) #[doc = "Decompression error."]; + Rlp(rlp::DecoderError) #[doc = "Rlp decoder error."]; + } - errors { - #[doc = "Error concerning the network address parsing subsystem."] - AddressParse { - description("Failed to parse network address"), - display("Failed to parse network address"), - } + errors { + #[doc = "Error concerning the network address parsing subsystem."] + AddressParse { + description("Failed to parse network address"), + display("Failed to parse network address"), + } - #[doc = "Error concerning the network address resolution subsystem."] - AddressResolve(err: Option) { - description("Failed to resolve network address"), - display("Failed to resolve network address {}", err.as_ref().map_or("".to_string(), |e| e.to_string())), - } + #[doc = "Error concerning the network address resolution subsystem."] + AddressResolve(err: Option) { + description("Failed to resolve network address"), + display("Failed to resolve network address {}", err.as_ref().map_or("".to_string(), |e| e.to_string())), + } - #[doc = "Authentication failure"] - Auth { - description("Authentication failure"), - display("Authentication failure"), - } + #[doc = "Authentication failure"] + Auth { + description("Authentication failure"), + display("Authentication failure"), + } - #[doc = "Unrecognised protocol"] - BadProtocol { - description("Bad protocol"), - display("Bad protocol"), - } + #[doc = "Unrecognised protocol"] + BadProtocol { + description("Bad protocol"), + display("Bad protocol"), + } - #[doc = "Expired message"] - Expired { - description("Expired message"), - display("Expired message"), - } + #[doc = "Expired message"] + Expired { + description("Expired message"), + display("Expired message"), + } - #[doc = "Peer not found"] - PeerNotFound { - description("Peer not found"), - display("Peer not found"), - } + #[doc = "Peer not found"] + PeerNotFound { + description("Peer not found"), + display("Peer not found"), + } - #[doc = "Peer is disconnected"] - Disconnect(reason: DisconnectReason) { - description("Peer disconnected"), - display("Peer disconnected: {}", reason), - } + #[doc = "Peer is disconnected"] + Disconnect(reason: DisconnectReason) { + description("Peer disconnected"), + display("Peer disconnected: {}", reason), + } - #[doc = "Invalid node id"] - InvalidNodeId { - description("Invalid node id"), - display("Invalid node id"), - } + #[doc = "Invalid node id"] + InvalidNodeId { + description("Invalid node id"), + display("Invalid node id"), + } - #[doc = "Packet size is over the protocol limit"] - OversizedPacket { - description("Packet is too large"), - display("Packet is too large"), - } + #[doc = "Packet size is over the protocol limit"] + OversizedPacket { + description("Packet is too large"), + display("Packet is too large"), + } - #[doc = "Reached system resource limits for this process"] - ProcessTooManyFiles { - description("Too many open files in process."), - display("Too many open files in this process. Check your resource limits and restart parity"), - } + #[doc = "Reached system resource limits for this process"] + ProcessTooManyFiles { + description("Too many open files in process."), + display("Too many open files in this process. Check your resource limits and restart parity"), + } - #[doc = "Reached system wide resource limits"] - SystemTooManyFiles { - description("Too many open files on system."), - display("Too many open files on system. Consider closing some processes/release some file handlers or increas the system-wide resource limits and restart parity."), - } + #[doc = "Reached system wide resource limits"] + SystemTooManyFiles { + description("Too many open files on system."), + display("Too many open files on system. Consider closing some processes/release some file handlers or increas the system-wide resource limits and restart parity."), + } - #[doc = "An unknown IO error occurred."] - Io(err: io::Error) { - description("IO Error"), - display("Unexpected IO error: {}", err), - } - } + #[doc = "An unknown IO error occurred."] + Io(err: io::Error) { + description("IO Error"), + display("Unexpected IO error: {}", err), + } + } } impl From for Error { - fn from(err: io::Error) -> Self { - match err.raw_os_error() { - Some(ENFILE) => ErrorKind::ProcessTooManyFiles.into(), - Some(EMFILE) => ErrorKind::SystemTooManyFiles.into(), - _ => Error::from_kind(ErrorKind::Io(err)) - } - } + fn from(err: io::Error) -> Self { + match err.raw_os_error() { + Some(ENFILE) => ErrorKind::ProcessTooManyFiles.into(), + Some(EMFILE) => ErrorKind::SystemTooManyFiles.into(), + _ => Error::from_kind(ErrorKind::Io(err)), + } + } } impl From for Error { - fn from(_err: ethkey::Error) -> Self { - ErrorKind::Auth.into() - } + fn from(_err: ethkey::Error) -> Self { + ErrorKind::Auth.into() + } } impl From for Error { - fn from(_err: ethkey::crypto::Error) -> Self { - ErrorKind::Auth.into() - } + fn from(_err: ethkey::crypto::Error) -> Self { + ErrorKind::Auth.into() + } } impl From for Error { - fn from(_err: crypto::error::SymmError) -> Self { - ErrorKind::Auth.into() - } + fn from(_err: crypto::error::SymmError) -> Self { + ErrorKind::Auth.into() + } } impl From for Error { - fn from(_err: net::AddrParseError) -> Self { ErrorKind::AddressParse.into() } + fn from(_err: net::AddrParseError) -> Self { + ErrorKind::AddressParse.into() + } } #[test] fn test_errors() { - assert_eq!(DisconnectReason::ClientQuit, DisconnectReason::from_u8(8)); - let mut r = DisconnectReason::DisconnectRequested; - for i in 0 .. 20 { - r = DisconnectReason::from_u8(i); - } - assert_eq!(DisconnectReason::Unknown, r); + assert_eq!(DisconnectReason::ClientQuit, DisconnectReason::from_u8(8)); + let mut r = DisconnectReason::DisconnectRequested; + for i in 0..20 { + r = DisconnectReason::from_u8(i); + } + assert_eq!(DisconnectReason::Unknown, r); - match *>::from(rlp::DecoderError::RlpIsTooBig).kind() { - ErrorKind::Rlp(_) => {}, - _ => panic!("Unexpected error"), - } + match *>::from(rlp::DecoderError::RlpIsTooBig).kind() { + ErrorKind::Rlp(_) => {} + _ => panic!("Unexpected error"), + } - match *>::from(ethkey::crypto::Error::InvalidMessage).kind() { - ErrorKind::Auth => {}, - _ => panic!("Unexpected error"), - } + match *>::from(ethkey::crypto::Error::InvalidMessage) + .kind() + { + ErrorKind::Auth => {} + _ => panic!("Unexpected error"), + } } #[test] fn test_io_errors() { - use libc::{EMFILE, ENFILE}; + use libc::{EMFILE, ENFILE}; - assert_matches!( - >::from( - io::Error::from_raw_os_error(ENFILE) - ).kind(), - ErrorKind::ProcessTooManyFiles); + assert_matches!( + >::from(io::Error::from_raw_os_error(ENFILE)).kind(), + ErrorKind::ProcessTooManyFiles + ); - assert_matches!( - >::from( - io::Error::from_raw_os_error(EMFILE) - ).kind(), - ErrorKind::SystemTooManyFiles); + assert_matches!( + >::from(io::Error::from_raw_os_error(EMFILE)).kind(), + ErrorKind::SystemTooManyFiles + ); - assert_matches!( - >::from( - io::Error::from_raw_os_error(0) - ).kind(), - ErrorKind::Io(_)); + assert_matches!( + >::from(io::Error::from_raw_os_error(0)).kind(), + ErrorKind::Io(_) + ); } diff --git a/util/network/src/lib.rs b/util/network/src/lib.rs index 9e6f71fdd..ed5c47d6d 100644 --- a/util/network/src/lib.rs +++ b/util/network/src/lib.rs @@ -14,23 +14,24 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -#![recursion_limit="128"] +#![recursion_limit = "128"] -extern crate parity_crypto as crypto; extern crate ethcore_io as io; extern crate ethereum_types; extern crate ethkey; -extern crate rlp; extern crate ipnetwork; -extern crate parity_snappy as snappy; extern crate libc; +extern crate parity_crypto as crypto; +extern crate parity_snappy as snappy; +extern crate rlp; extern crate semver; extern crate serde; #[macro_use] extern crate serde_derive; -#[cfg(test)] #[macro_use] +#[cfg(test)] +#[macro_use] extern crate assert_matches; #[macro_use] @@ -44,21 +45,23 @@ pub mod client_version; mod connection_filter; mod error; -pub use connection_filter::{ConnectionFilter, ConnectionDirection}; +pub use connection_filter::{ConnectionDirection, ConnectionFilter}; +pub use error::{DisconnectReason, Error, ErrorKind}; pub use io::TimerToken; -pub use error::{Error, ErrorKind, DisconnectReason}; use client_version::ClientVersion; -use std::cmp::Ordering; -use std::collections::HashMap; -use std::net::{SocketAddr, SocketAddrV4, Ipv4Addr}; -use std::str::{self, FromStr}; -use std::sync::Arc; -use std::time::Duration; -use ipnetwork::{IpNetwork, IpNetworkError}; -use ethkey::Secret; use ethereum_types::H512; +use ethkey::Secret; +use ipnetwork::{IpNetwork, IpNetworkError}; use rlp::{Decodable, DecoderError, Rlp}; +use std::{ + cmp::Ordering, + collections::HashMap, + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + str::{self, FromStr}, + sync::Arc, + time::Duration, +}; /// Protocol handler level packet id pub type PacketId = u8; @@ -74,324 +77,351 @@ pub type PeerId = usize; /// Messages used to communitate with the event loop from other threads. #[derive(Clone)] pub enum NetworkIoMessage { - /// Register a new protocol handler. - AddHandler { - /// Handler shared instance. - handler: Arc, - /// Protocol Id. - protocol: ProtocolId, - /// Supported protocol versions and number of packet IDs reserved by the protocol (packet count). - versions: Vec<(u8, u8)>, - }, - /// Register a new protocol timer - AddTimer { - /// Protocol Id. - protocol: ProtocolId, - /// Timer token. - token: TimerToken, - /// Timer delay. - delay: Duration, - }, - /// Initliaze public interface. - InitPublicInterface, - /// Disconnect a peer. - Disconnect(PeerId), - /// Disconnect and temporary disable peer. - DisablePeer(PeerId), - /// Network has been started with the host as the given enode. - NetworkStarted(String), + /// Register a new protocol handler. + AddHandler { + /// Handler shared instance. + handler: Arc, + /// Protocol Id. + protocol: ProtocolId, + /// Supported protocol versions and number of packet IDs reserved by the protocol (packet count). + versions: Vec<(u8, u8)>, + }, + /// Register a new protocol timer + AddTimer { + /// Protocol Id. + protocol: ProtocolId, + /// Timer token. + token: TimerToken, + /// Timer delay. + delay: Duration, + }, + /// Initliaze public interface. + InitPublicInterface, + /// Disconnect a peer. + Disconnect(PeerId), + /// Disconnect and temporary disable peer. + DisablePeer(PeerId), + /// Network has been started with the host as the given enode. + NetworkStarted(String), } /// Shared session information #[derive(Debug, Clone)] pub struct SessionInfo { - /// Peer public key - pub id: Option, - /// Peer client ID - pub client_version: ClientVersion, - /// Peer RLPx protocol version - pub protocol_version: u32, - /// Session protocol capabilities - pub capabilities: Vec, - /// Peer protocol capabilities - pub peer_capabilities: Vec, - /// Peer ping delay - pub ping: Option, - /// True if this session was originated by us. - pub originated: bool, - /// Remote endpoint address of the session - pub remote_address: String, - /// Local endpoint address of the session - pub local_address: String, + /// Peer public key + pub id: Option, + /// Peer client ID + pub client_version: ClientVersion, + /// Peer RLPx protocol version + pub protocol_version: u32, + /// Session protocol capabilities + pub capabilities: Vec, + /// Peer protocol capabilities + pub peer_capabilities: Vec, + /// Peer ping delay + pub ping: Option, + /// True if this session was originated by us. + pub originated: bool, + /// Remote endpoint address of the session + pub remote_address: String, + /// Local endpoint address of the session + pub local_address: String, } #[derive(Debug, Clone, PartialEq, Eq)] pub struct PeerCapabilityInfo { - pub protocol: ProtocolId, - pub version: u8, + pub protocol: ProtocolId, + pub version: u8, } impl Decodable for PeerCapabilityInfo { - fn decode(rlp: &Rlp) -> Result { - let p: Vec = rlp.val_at(0)?; - if p.len() != 3 { - return Err(DecoderError::Custom("Invalid subprotocol string length. Should be 3")); - } - let mut p2: ProtocolId = [0u8; 3]; - p2.clone_from_slice(&p); - Ok(PeerCapabilityInfo { - protocol: p2, - version: rlp.val_at(1)? - }) - } + fn decode(rlp: &Rlp) -> Result { + let p: Vec = rlp.val_at(0)?; + if p.len() != 3 { + return Err(DecoderError::Custom( + "Invalid subprotocol string length. Should be 3", + )); + } + let mut p2: ProtocolId = [0u8; 3]; + p2.clone_from_slice(&p); + Ok(PeerCapabilityInfo { + protocol: p2, + version: rlp.val_at(1)?, + }) + } } impl ToString for PeerCapabilityInfo { - fn to_string(&self) -> String { - format!("{}/{}", str::from_utf8(&self.protocol[..]).unwrap_or("???"), self.version) - } + fn to_string(&self) -> String { + format!( + "{}/{}", + str::from_utf8(&self.protocol[..]).unwrap_or("???"), + self.version + ) + } } #[derive(Debug, Clone, PartialEq, Eq)] pub struct SessionCapabilityInfo { - pub protocol: [u8; 3], - pub version: u8, - pub packet_count: u8, - pub id_offset: u8, + pub protocol: [u8; 3], + pub version: u8, + pub packet_count: u8, + pub id_offset: u8, } impl PartialOrd for SessionCapabilityInfo { - fn partial_cmp(&self, other: &SessionCapabilityInfo) -> Option { - Some(self.cmp(other)) - } + fn partial_cmp(&self, other: &SessionCapabilityInfo) -> Option { + Some(self.cmp(other)) + } } impl Ord for SessionCapabilityInfo { - fn cmp(&self, b: &SessionCapabilityInfo) -> Ordering { - // By protocol id first - if self.protocol != b.protocol { - return self.protocol.cmp(&b.protocol); - } - // By version - self.version.cmp(&b.version) - } + fn cmp(&self, b: &SessionCapabilityInfo) -> Ordering { + // By protocol id first + if self.protocol != b.protocol { + return self.protocol.cmp(&b.protocol); + } + // By version + self.version.cmp(&b.version) + } } /// Network service configuration #[derive(Debug, PartialEq, Clone)] pub struct NetworkConfiguration { - /// Directory path to store general network configuration. None means nothing will be saved - pub config_path: Option, - /// Directory path to store network-specific configuration. None means nothing will be saved - pub net_config_path: Option, - /// IP address to listen for incoming connections. Listen to all connections by default - pub listen_address: Option, - /// IP address to advertise. Detected automatically if none. - pub public_address: Option, - /// Port for UDP connections, same as TCP by default - pub udp_port: Option, - /// Enable NAT configuration - pub nat_enabled: bool, - /// Enable discovery - pub discovery_enabled: bool, - /// List of initial node addresses - pub boot_nodes: Vec, - /// Use provided node key instead of default - pub use_secret: Option, - /// Minimum number of connected peers to maintain - pub min_peers: u32, - /// Maximum allowed number of peers - pub max_peers: u32, - /// Maximum handshakes - pub max_handshakes: u32, - /// Reserved protocols. Peers with protocol get additional connection slots. - pub reserved_protocols: HashMap, - /// List of reserved node addresses. - pub reserved_nodes: Vec, - /// The non-reserved peer mode. - pub non_reserved_mode: NonReservedPeerMode, - /// IP filter - pub ip_filter: IpFilter, - /// Client identifier - pub client_version: String, + /// Directory path to store general network configuration. None means nothing will be saved + pub config_path: Option, + /// Directory path to store network-specific configuration. None means nothing will be saved + pub net_config_path: Option, + /// IP address to listen for incoming connections. Listen to all connections by default + pub listen_address: Option, + /// IP address to advertise. Detected automatically if none. + pub public_address: Option, + /// Port for UDP connections, same as TCP by default + pub udp_port: Option, + /// Enable NAT configuration + pub nat_enabled: bool, + /// Enable discovery + pub discovery_enabled: bool, + /// List of initial node addresses + pub boot_nodes: Vec, + /// Use provided node key instead of default + pub use_secret: Option, + /// Minimum number of connected peers to maintain + pub min_peers: u32, + /// Maximum allowed number of peers + pub max_peers: u32, + /// Maximum handshakes + pub max_handshakes: u32, + /// Reserved protocols. Peers with protocol get additional connection slots. + pub reserved_protocols: HashMap, + /// List of reserved node addresses. + pub reserved_nodes: Vec, + /// The non-reserved peer mode. + pub non_reserved_mode: NonReservedPeerMode, + /// IP filter + pub ip_filter: IpFilter, + /// Client identifier + pub client_version: String, } impl Default for NetworkConfiguration { - fn default() -> Self { - NetworkConfiguration::new() - } + fn default() -> Self { + NetworkConfiguration::new() + } } impl NetworkConfiguration { - /// Create a new instance of default settings. - pub fn new() -> Self { - NetworkConfiguration { - config_path: None, - net_config_path: None, - listen_address: None, - public_address: None, - udp_port: None, - nat_enabled: true, - discovery_enabled: true, - boot_nodes: Vec::new(), - use_secret: None, - min_peers: 25, - max_peers: 50, - max_handshakes: 64, - reserved_protocols: HashMap::new(), - ip_filter: IpFilter::default(), - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Accept, - client_version: "Parity-network".into(), - } - } + /// Create a new instance of default settings. + pub fn new() -> Self { + NetworkConfiguration { + config_path: None, + net_config_path: None, + listen_address: None, + public_address: None, + udp_port: None, + nat_enabled: true, + discovery_enabled: true, + boot_nodes: Vec::new(), + use_secret: None, + min_peers: 25, + max_peers: 50, + max_handshakes: 64, + reserved_protocols: HashMap::new(), + ip_filter: IpFilter::default(), + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Accept, + client_version: "Parity-network".into(), + } + } - /// Create new default configuration with specified listen port. - pub fn new_with_port(port: u16) -> NetworkConfiguration { - let mut config = NetworkConfiguration::new(); - config.listen_address = Some(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), port))); - config - } + /// Create new default configuration with specified listen port. + pub fn new_with_port(port: u16) -> NetworkConfiguration { + let mut config = NetworkConfiguration::new(); + config.listen_address = Some(SocketAddr::V4(SocketAddrV4::new( + Ipv4Addr::new(0, 0, 0, 0), + port, + ))); + config + } - /// Create new default configuration for localhost-only connection with random port (usefull for testing) - pub fn new_local() -> NetworkConfiguration { - let mut config = NetworkConfiguration::new(); - config.listen_address = Some(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 0))); - config.nat_enabled = false; - config - } + /// Create new default configuration for localhost-only connection with random port (usefull for testing) + pub fn new_local() -> NetworkConfiguration { + let mut config = NetworkConfiguration::new(); + config.listen_address = Some(SocketAddr::V4(SocketAddrV4::new( + Ipv4Addr::new(127, 0, 0, 1), + 0, + ))); + config.nat_enabled = false; + config + } } /// IO access point. This is passed to all IO handlers and provides an interface to the IO subsystem. pub trait NetworkContext { - /// Send a packet over the network to another peer. - fn send(&self, peer: PeerId, packet_id: PacketId, data: Vec) -> Result<(), Error>; + /// Send a packet over the network to another peer. + fn send(&self, peer: PeerId, packet_id: PacketId, data: Vec) -> Result<(), Error>; - /// Send a packet over the network to another peer using specified protocol. - fn send_protocol(&self, protocol: ProtocolId, peer: PeerId, packet_id: PacketId, data: Vec) -> Result<(), Error>; + /// Send a packet over the network to another peer using specified protocol. + fn send_protocol( + &self, + protocol: ProtocolId, + peer: PeerId, + packet_id: PacketId, + data: Vec, + ) -> Result<(), Error>; - /// Respond to a current network message. Panics if no there is no packet in the context. If the session is expired returns nothing. - fn respond(&self, packet_id: PacketId, data: Vec) -> Result<(), Error>; + /// Respond to a current network message. Panics if no there is no packet in the context. If the session is expired returns nothing. + fn respond(&self, packet_id: PacketId, data: Vec) -> Result<(), Error>; - /// Disconnect a peer and prevent it from connecting again. - fn disable_peer(&self, peer: PeerId); + /// Disconnect a peer and prevent it from connecting again. + fn disable_peer(&self, peer: PeerId); - /// Disconnect peer. Reconnect can be attempted later. - fn disconnect_peer(&self, peer: PeerId); + /// Disconnect peer. Reconnect can be attempted later. + fn disconnect_peer(&self, peer: PeerId); - /// Check if the session is still active. - fn is_expired(&self) -> bool; + /// Check if the session is still active. + fn is_expired(&self) -> bool; - /// Register a new IO timer. 'IoHandler::timeout' will be called with the token. - fn register_timer(&self, token: TimerToken, delay: Duration) -> Result<(), Error>; + /// Register a new IO timer. 'IoHandler::timeout' will be called with the token. + fn register_timer(&self, token: TimerToken, delay: Duration) -> Result<(), Error>; - /// Returns peer identification string - fn peer_client_version(&self, peer: PeerId) -> ClientVersion; + /// Returns peer identification string + fn peer_client_version(&self, peer: PeerId) -> ClientVersion; - /// Returns information on p2p session - fn session_info(&self, peer: PeerId) -> Option; + /// Returns information on p2p session + fn session_info(&self, peer: PeerId) -> Option; - /// Returns max version for a given protocol. - fn protocol_version(&self, protocol: ProtocolId, peer: PeerId) -> Option; + /// Returns max version for a given protocol. + fn protocol_version(&self, protocol: ProtocolId, peer: PeerId) -> Option; - /// Returns this object's subprotocol name. - fn subprotocol_name(&self) -> ProtocolId; + /// Returns this object's subprotocol name. + fn subprotocol_name(&self) -> ProtocolId; - /// Returns whether the given peer ID is a reserved peer. - fn is_reserved_peer(&self, peer: PeerId) -> bool; + /// Returns whether the given peer ID is a reserved peer. + fn is_reserved_peer(&self, peer: PeerId) -> bool; - /// Returns the size the payload shouldn't exceed - fn payload_soft_limit(&self) -> usize; + /// Returns the size the payload shouldn't exceed + fn payload_soft_limit(&self) -> usize; } -impl<'a, T> NetworkContext for &'a T where T: ?Sized + NetworkContext { - fn send(&self, peer: PeerId, packet_id: PacketId, data: Vec) -> Result<(), Error> { - (**self).send(peer, packet_id, data) - } +impl<'a, T> NetworkContext for &'a T +where + T: ?Sized + NetworkContext, +{ + fn send(&self, peer: PeerId, packet_id: PacketId, data: Vec) -> Result<(), Error> { + (**self).send(peer, packet_id, data) + } - fn send_protocol(&self, protocol: ProtocolId, peer: PeerId, packet_id: PacketId, data: Vec) -> Result<(), Error> { - (**self).send_protocol(protocol, peer, packet_id, data) - } + fn send_protocol( + &self, + protocol: ProtocolId, + peer: PeerId, + packet_id: PacketId, + data: Vec, + ) -> Result<(), Error> { + (**self).send_protocol(protocol, peer, packet_id, data) + } - fn respond(&self, packet_id: PacketId, data: Vec) -> Result<(), Error> { - (**self).respond(packet_id, data) - } + fn respond(&self, packet_id: PacketId, data: Vec) -> Result<(), Error> { + (**self).respond(packet_id, data) + } - fn disable_peer(&self, peer: PeerId) { - (**self).disable_peer(peer) - } + fn disable_peer(&self, peer: PeerId) { + (**self).disable_peer(peer) + } - fn disconnect_peer(&self, peer: PeerId) { - (**self).disconnect_peer(peer) - } + fn disconnect_peer(&self, peer: PeerId) { + (**self).disconnect_peer(peer) + } - fn is_expired(&self) -> bool { - (**self).is_expired() - } + fn is_expired(&self) -> bool { + (**self).is_expired() + } - fn register_timer(&self, token: TimerToken, delay: Duration) -> Result<(), Error> { - (**self).register_timer(token, delay) - } + fn register_timer(&self, token: TimerToken, delay: Duration) -> Result<(), Error> { + (**self).register_timer(token, delay) + } - fn peer_client_version(&self, peer: PeerId) -> ClientVersion { - (**self).peer_client_version(peer) - } + fn peer_client_version(&self, peer: PeerId) -> ClientVersion { + (**self).peer_client_version(peer) + } - fn session_info(&self, peer: PeerId) -> Option { - (**self).session_info(peer) - } + fn session_info(&self, peer: PeerId) -> Option { + (**self).session_info(peer) + } - fn protocol_version(&self, protocol: ProtocolId, peer: PeerId) -> Option { - (**self).protocol_version(protocol, peer) - } + fn protocol_version(&self, protocol: ProtocolId, peer: PeerId) -> Option { + (**self).protocol_version(protocol, peer) + } - fn subprotocol_name(&self) -> ProtocolId { - (**self).subprotocol_name() - } + fn subprotocol_name(&self) -> ProtocolId { + (**self).subprotocol_name() + } - fn is_reserved_peer(&self, peer: PeerId) -> bool { - (**self).is_reserved_peer(peer) - } + fn is_reserved_peer(&self, peer: PeerId) -> bool { + (**self).is_reserved_peer(peer) + } - fn payload_soft_limit(&self) -> usize { - (**self).payload_soft_limit() - } + fn payload_soft_limit(&self) -> usize { + (**self).payload_soft_limit() + } } /// Network IO protocol handler. This needs to be implemented for each new subprotocol. /// All the handler function are called from within IO event loop. /// `Message` is the type for message data. pub trait NetworkProtocolHandler: Sync + Send { - /// Initialize the handler - fn initialize(&self, _io: &NetworkContext) {} - /// Called when new network packet received. - fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]); - /// Called when new peer is connected. Only called when peer supports the same protocol. - fn connected(&self, io: &NetworkContext, peer: &PeerId); - /// Called when a previously connected peer disconnects. - fn disconnected(&self, io: &NetworkContext, peer: &PeerId); - /// Timer function called after a timeout created with `NetworkContext::timeout`. - fn timeout(&self, _io: &NetworkContext, _timer: TimerToken) {} + /// Initialize the handler + fn initialize(&self, _io: &NetworkContext) {} + /// Called when new network packet received. + fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]); + /// Called when new peer is connected. Only called when peer supports the same protocol. + fn connected(&self, io: &NetworkContext, peer: &PeerId); + /// Called when a previously connected peer disconnects. + fn disconnected(&self, io: &NetworkContext, peer: &PeerId); + /// Timer function called after a timeout created with `NetworkContext::timeout`. + fn timeout(&self, _io: &NetworkContext, _timer: TimerToken) {} } /// Non-reserved peer modes. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum NonReservedPeerMode { - /// Accept them. This is the default. - Accept, - /// Deny them. - Deny, + /// Accept them. This is the default. + Accept, + /// Deny them. + Deny, } impl NonReservedPeerMode { - /// Attempt to parse the peer mode from a string. - pub fn parse(s: &str) -> Option { - match s { - "accept" => Some(NonReservedPeerMode::Accept), - "deny" => Some(NonReservedPeerMode::Deny), - _ => None, - } - } + /// Attempt to parse the peer mode from a string. + pub fn parse(s: &str) -> Option { + match s { + "accept" => Some(NonReservedPeerMode::Accept), + "deny" => Some(NonReservedPeerMode::Deny), + _ => None, + } + } } #[derive(Clone, Debug, PartialEq, Eq)] @@ -423,7 +453,9 @@ impl IpFilter { "none" => filter.predefined = AllowIP::None, custom => { if custom.starts_with("-") { - filter.custom_block.push(IpNetwork::from_str(&custom.to_owned().split_off(1))?) + filter + .custom_block + .push(IpNetwork::from_str(&custom.to_owned().split_off(1))?) } else { filter.custom_allow.push(IpNetwork::from_str(custom)?) } @@ -437,12 +469,12 @@ impl IpFilter { /// IP fiter #[derive(Clone, Debug, PartialEq, Eq)] pub enum AllowIP { - /// Connect to any address - All, - /// Connect to private network only - Private, - /// Connect to public network only - Public, + /// Connect to any address + All, + /// Connect to private network only + Private, + /// Connect to public network only + Public, /// Block all addresses None, } diff --git a/util/panic-hook/src/lib.rs b/util/panic-hook/src/lib.rs index 03501a47f..6acd4d8b9 100644 --- a/util/panic-hook/src/lib.rs +++ b/util/panic-hook/src/lib.rs @@ -18,17 +18,18 @@ extern crate backtrace; -use std::panic::{self, PanicInfo}; -use std::thread; -use std::process; use backtrace::Backtrace; +use std::{ + panic::{self, PanicInfo}, + process, thread, +}; /// Set the panic hook to write to stderr and abort the process when a panic happens. pub fn set_abort() { - set_with(|msg| { - eprintln!("{}", msg); - process::abort() - }); + set_with(|msg| { + eprintln!("{}", msg); + process::abort() + }); } /// Set the panic hook with a closure to be called. The closure receives the panic message. @@ -38,12 +39,13 @@ pub fn set_abort() { /// /// If you panic within the closure, a double panic happens and the process will stop. pub fn set_with(f: F) -where F: Fn(&str) + Send + Sync + 'static +where + F: Fn(&str) + Send + Sync + 'static, { - panic::set_hook(Box::new(move |info| { - let msg = gen_panic_msg(info); - f(&msg); - })); + panic::set_hook(Box::new(move |info| { + let msg = gen_panic_msg(info); + f(&msg); + })); } static ABOUT_PANIC: &str = " @@ -53,24 +55,25 @@ This is a bug. Please report it at: "; fn gen_panic_msg(info: &PanicInfo) -> String { - let location = info.location(); - let file = location.as_ref().map(|l| l.file()).unwrap_or(""); - let line = location.as_ref().map(|l| l.line()).unwrap_or(0); + let location = info.location(); + let file = location.as_ref().map(|l| l.file()).unwrap_or(""); + let line = location.as_ref().map(|l| l.line()).unwrap_or(0); - let msg = match info.payload().downcast_ref::<&'static str>() { - Some(s) => *s, - None => match info.payload().downcast_ref::() { - Some(s) => &s[..], - None => "Box", - } - }; + let msg = match info.payload().downcast_ref::<&'static str>() { + Some(s) => *s, + None => match info.payload().downcast_ref::() { + Some(s) => &s[..], + None => "Box", + }, + }; - let thread = thread::current(); - let name = thread.name().unwrap_or(""); + let thread = thread::current(); + let name = thread.name().unwrap_or(""); - let backtrace = Backtrace::new(); + let backtrace = Backtrace::new(); - format!(r#" + format!( + r#" ==================== @@ -78,5 +81,12 @@ fn gen_panic_msg(info: &PanicInfo) -> String { Thread '{name}' panicked at '{msg}', {file}:{line} {about} -"#, backtrace = backtrace, name = name, msg = msg, file = file, line = line, about = ABOUT_PANIC) +"#, + backtrace = backtrace, + name = name, + msg = msg, + file = file, + line = line, + about = ABOUT_PANIC + ) } diff --git a/util/patricia-trie-ethereum/src/lib.rs b/util/patricia-trie-ethereum/src/lib.rs index ab44f4e83..600f8506f 100644 --- a/util/patricia-trie-ethereum/src/lib.rs +++ b/util/patricia-trie-ethereum/src/lib.rs @@ -16,13 +16,13 @@ //! Façade crate for `patricia_trie` for Ethereum specific impls -pub extern crate trie_db as trie; // `pub` because we need to import this crate for the tests in `patricia_trie` and there were issues: https://gist.github.com/dvdplm/869251ee557a1b4bd53adc7c971979aa extern crate elastic_array; -extern crate parity_bytes; extern crate ethereum_types; extern crate hash_db; extern crate keccak_hasher; +extern crate parity_bytes; extern crate rlp; +pub extern crate trie_db as trie; // `pub` because we need to import this crate for the tests in `patricia_trie` and there were issues: https://gist.github.com/dvdplm/869251ee557a1b4bd53adc7c971979aa mod rlp_node_codec; diff --git a/util/patricia-trie-ethereum/src/rlp_node_codec.rs b/util/patricia-trie-ethereum/src/rlp_node_codec.rs index ea8aea8a7..dc2fcdd0b 100644 --- a/util/patricia-trie-ethereum/src/rlp_node_codec.rs +++ b/util/patricia-trie-ethereum/src/rlp_node_codec.rs @@ -20,115 +20,131 @@ use elastic_array::ElasticArray128; use ethereum_types::H256; use hash_db::Hasher; use keccak_hasher::KeccakHasher; -use rlp::{DecoderError, RlpStream, Rlp, Prototype}; +use rlp::{DecoderError, Prototype, Rlp, RlpStream}; use std::marker::PhantomData; -use trie::{NibbleSlice, NodeCodec, node::Node, ChildReference}; +use trie::{node::Node, ChildReference, NibbleSlice, NodeCodec}; /// Concrete implementation of a `NodeCodec` with Rlp encoding, generic over the `Hasher` #[derive(Default, Clone)] -pub struct RlpNodeCodec {mark: PhantomData} +pub struct RlpNodeCodec { + mark: PhantomData, +} -const HASHED_NULL_NODE_BYTES : [u8;32] = [0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, 0x5b, 0x48, 0xe0, 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x01, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21]; -const HASHED_NULL_NODE : H256 = H256( HASHED_NULL_NODE_BYTES ); +const HASHED_NULL_NODE_BYTES: [u8; 32] = [ + 0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, + 0x5b, 0x48, 0xe0, 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x01, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21, +]; +const HASHED_NULL_NODE: H256 = H256(HASHED_NULL_NODE_BYTES); // NOTE: what we'd really like here is: // `impl NodeCodec for RlpNodeCodec where H::Out: Decodable` // but due to the current limitations of Rust const evaluation we can't // do `const HASHED_NULL_NODE: H::Out = H::Out( … … )`. Perhaps one day soon? impl NodeCodec for RlpNodeCodec { - type Error = DecoderError; - fn hashed_null_node() -> ::Out { - HASHED_NULL_NODE - } - fn decode(data: &[u8]) -> ::std::result::Result { - let r = Rlp::new(data); - match r.prototype()? { - // either leaf or extension - decode first item with NibbleSlice::??? - // and use is_leaf return to figure out which. - // if leaf, second item is a value (is_data()) - // if extension, second item is a node (either SHA3 to be looked up and - // fed back into this function or inline RLP which can be fed back into this function). - Prototype::List(2) => match NibbleSlice::from_encoded(r.at(0)?.data()?) { - (slice, true) => Ok(Node::Leaf(slice, r.at(1)?.data()?)), - (slice, false) => Ok(Node::Extension(slice, r.at(1)?.as_raw())), - }, - // branch - first 16 are nodes, 17th is a value (or empty). - Prototype::List(17) => { - let mut nodes = [None as Option<&[u8]>; 16]; - for i in 0..16 { - let v = r.at(i)?; - if v.is_empty() { - nodes[i] = None; - } else { - nodes[i] = Some(v.as_raw()); - } - } - Ok(Node::Branch(nodes, if r.at(16)?.is_empty() { None } else { Some(r.at(16)?.data()?) })) - }, - // an empty branch index. - Prototype::Data(0) => Ok(Node::Empty), - // something went wrong. - _ => Err(DecoderError::Custom("Rlp is not valid.")) - } - } - fn try_decode_hash(data: &[u8]) -> Option<::Out> { - let r = Rlp::new(data); - if r.is_data() && r.size() == KeccakHasher::LENGTH { - Some(r.as_val().expect("Hash is the correct size; qed")) - } else { - None - } - } - fn is_empty_node(data: &[u8]) -> bool { - Rlp::new(data).is_empty() - } - fn empty_node() -> Vec { - let mut stream = RlpStream::new(); - stream.append_empty_data(); - stream.drain() - } + type Error = DecoderError; + fn hashed_null_node() -> ::Out { + HASHED_NULL_NODE + } + fn decode(data: &[u8]) -> ::std::result::Result { + let r = Rlp::new(data); + match r.prototype()? { + // either leaf or extension - decode first item with NibbleSlice::??? + // and use is_leaf return to figure out which. + // if leaf, second item is a value (is_data()) + // if extension, second item is a node (either SHA3 to be looked up and + // fed back into this function or inline RLP which can be fed back into this function). + Prototype::List(2) => match NibbleSlice::from_encoded(r.at(0)?.data()?) { + (slice, true) => Ok(Node::Leaf(slice, r.at(1)?.data()?)), + (slice, false) => Ok(Node::Extension(slice, r.at(1)?.as_raw())), + }, + // branch - first 16 are nodes, 17th is a value (or empty). + Prototype::List(17) => { + let mut nodes = [None as Option<&[u8]>; 16]; + for i in 0..16 { + let v = r.at(i)?; + if v.is_empty() { + nodes[i] = None; + } else { + nodes[i] = Some(v.as_raw()); + } + } + Ok(Node::Branch( + nodes, + if r.at(16)?.is_empty() { + None + } else { + Some(r.at(16)?.data()?) + }, + )) + } + // an empty branch index. + Prototype::Data(0) => Ok(Node::Empty), + // something went wrong. + _ => Err(DecoderError::Custom("Rlp is not valid.")), + } + } + fn try_decode_hash(data: &[u8]) -> Option<::Out> { + let r = Rlp::new(data); + if r.is_data() && r.size() == KeccakHasher::LENGTH { + Some(r.as_val().expect("Hash is the correct size; qed")) + } else { + None + } + } + fn is_empty_node(data: &[u8]) -> bool { + Rlp::new(data).is_empty() + } + fn empty_node() -> Vec { + let mut stream = RlpStream::new(); + stream.append_empty_data(); + stream.drain() + } - fn leaf_node(partial: &[u8], value: &[u8]) -> Vec { - let mut stream = RlpStream::new_list(2); - stream.append(&partial); - stream.append(&value); - stream.drain() - } + fn leaf_node(partial: &[u8], value: &[u8]) -> Vec { + let mut stream = RlpStream::new_list(2); + stream.append(&partial); + stream.append(&value); + stream.drain() + } - fn ext_node(partial: &[u8], child_ref: ChildReference<::Out>) -> Vec { - let mut stream = RlpStream::new_list(2); - stream.append(&partial); - match child_ref { - ChildReference::Hash(h) => stream.append(&h), - ChildReference::Inline(inline_data, len) => { - let bytes = &AsRef::<[u8]>::as_ref(&inline_data)[..len]; - stream.append_raw(bytes, 1) - }, - }; - stream.drain() - } + fn ext_node( + partial: &[u8], + child_ref: ChildReference<::Out>, + ) -> Vec { + let mut stream = RlpStream::new_list(2); + stream.append(&partial); + match child_ref { + ChildReference::Hash(h) => stream.append(&h), + ChildReference::Inline(inline_data, len) => { + let bytes = &AsRef::<[u8]>::as_ref(&inline_data)[..len]; + stream.append_raw(bytes, 1) + } + }; + stream.drain() + } - // fn branch_node(children: I, value: Option>) -> Vec - fn branch_node(children: I, value: Option>) -> Vec - where I: IntoIterator::Out>>> - { - let mut stream = RlpStream::new_list(17); - for child_ref in children { - match child_ref { - Some(c) => match c { - ChildReference::Hash(h) => stream.append(&h), - ChildReference::Inline(inline_data, len) => { - let bytes = &AsRef::<[u8]>::as_ref(&inline_data)[..len]; - stream.append_raw(bytes, 1) - }, - }, - None => stream.append_empty_data() - }; - } - if let Some(value) = value { - stream.append(&&*value); - } else { - stream.append_empty_data(); - } - stream.drain() - } + // fn branch_node(children: I, value: Option>) -> Vec + fn branch_node(children: I, value: Option>) -> Vec + where + I: IntoIterator::Out>>>, + { + let mut stream = RlpStream::new_list(17); + for child_ref in children { + match child_ref { + Some(c) => match c { + ChildReference::Hash(h) => stream.append(&h), + ChildReference::Inline(inline_data, len) => { + let bytes = &AsRef::<[u8]>::as_ref(&inline_data)[..len]; + stream.append_raw(bytes, 1) + } + }, + None => stream.append_empty_data(), + }; + } + if let Some(value) = value { + stream.append(&&*value); + } else { + stream.append_empty_data(); + } + stream.drain() + } } diff --git a/util/registrar/src/lib.rs b/util/registrar/src/lib.rs index d07e17c1b..bcbbad1da 100644 --- a/util/registrar/src/lib.rs +++ b/util/registrar/src/lib.rs @@ -14,8 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -extern crate futures; extern crate ethabi; +extern crate futures; extern crate keccak_hash; #[macro_use] @@ -24,4 +24,4 @@ extern crate ethabi_derive; extern crate ethabi_contract; mod registrar; -pub use registrar::{Registrar, RegistrarClient, Synchronous, Asynchronous}; +pub use registrar::{Asynchronous, Registrar, RegistrarClient, Synchronous}; diff --git a/util/registrar/src/registrar.rs b/util/registrar/src/registrar.rs index cd6d35660..7dc290e13 100644 --- a/util/registrar/src/registrar.rs +++ b/util/registrar/src/registrar.rs @@ -14,59 +14,65 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use futures::{Future, future, IntoFuture}; use ethabi::{Address, Bytes}; -use std::sync::Arc; +use futures::{future, Future, IntoFuture}; use keccak_hash::keccak; +use std::sync::Arc; use_contract!(registrar, "res/registrar.json"); // Maps a domain name to an Ethereum address const DNS_A_RECORD: &'static str = "A"; -pub type Asynchronous = Box + Send>; +pub type Asynchronous = Box + Send>; pub type Synchronous = Result; /// Registrar is dedicated interface to access the registrar contract /// which in turn generates an address when a client requests one pub struct Registrar { - client: Arc>, + client: Arc>, } impl Registrar { - /// Registrar constructor - pub fn new(client: Arc>) -> Self { - Self { - client: client, - } - } + /// Registrar constructor + pub fn new(client: Arc>) -> Self { + Self { client: client } + } - /// Generate an address for the given key - pub fn get_address<'a>(&self, key: &'a str) -> Box + Send> { - // Address of the registrar itself - let registrar_address = match self.client.registrar_address() { - Ok(a) => a, - Err(e) => return Box::new(future::err(e)), - }; + /// Generate an address for the given key + pub fn get_address<'a>( + &self, + key: &'a str, + ) -> Box + Send> { + // Address of the registrar itself + let registrar_address = match self.client.registrar_address() { + Ok(a) => a, + Err(e) => return Box::new(future::err(e)), + }; - let hashed_key: [u8; 32] = keccak(key).into(); - let id = registrar::functions::get_address::encode_input(hashed_key, DNS_A_RECORD); + let hashed_key: [u8; 32] = keccak(key).into(); + let id = registrar::functions::get_address::encode_input(hashed_key, DNS_A_RECORD); - let future = self.client.call_contract(registrar_address, id) - .and_then(move |address| registrar::functions::get_address::decode_output(&address).map_err(|e| e.to_string())); + let future = self + .client + .call_contract(registrar_address, id) + .and_then(move |address| { + registrar::functions::get_address::decode_output(&address) + .map_err(|e| e.to_string()) + }); - Box::new(future) - } + Box::new(future) + } } /// Registrar contract interface /// Should execute transaction using current blockchain state. pub trait RegistrarClient: Send + Sync { - /// Specifies synchronous or asynchronous communication - type Call: IntoFuture; + /// Specifies synchronous or asynchronous communication + type Call: IntoFuture; - /// Get registrar address - fn registrar_address(&self) -> Result; - /// Call Contract - fn call_contract(&self, address: Address, data: Bytes) -> Self::Call; + /// Get registrar address + fn registrar_address(&self) -> Result; + /// Call Contract + fn call_contract(&self, address: Address, data: Bytes) -> Self::Call; } diff --git a/util/rlp-compress/src/common.rs b/util/rlp-compress/src/common.rs index 81767955b..846b66010 100644 --- a/util/rlp-compress/src/common.rs +++ b/util/rlp-compress/src/common.rs @@ -11,32 +11,191 @@ use Swapper; lazy_static! { - /// Swapper for snapshot compression. - pub static ref SNAPSHOT_SWAPPER: Swapper<'static> = Swapper::new(EMPTY_RLPS, INVALID_RLPS); + /// Swapper for snapshot compression. + pub static ref SNAPSHOT_SWAPPER: Swapper<'static> = Swapper::new(EMPTY_RLPS, INVALID_RLPS); } lazy_static! { - /// Swapper with common long RLPs, up to 127 can be added. - pub static ref BLOCKS_SWAPPER: Swapper<'static> = Swapper::new(COMMON_RLPS, INVALID_RLPS); + /// Swapper with common long RLPs, up to 127 can be added. + pub static ref BLOCKS_SWAPPER: Swapper<'static> = Swapper::new(COMMON_RLPS, INVALID_RLPS); } static EMPTY_RLPS: &'static [&'static [u8]] = &[ - // RLP of KECCAK_NULL_RLP - &[160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33], - // RLP of KECCAK_EMPTY - &[160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112] + // RLP of KECCAK_NULL_RLP + &[ + 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, + 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, + ], + // RLP of KECCAK_EMPTY + &[ + 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, + 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112, + ], ]; static COMMON_RLPS: &'static [&'static [u8]] = &[ - // RLP of KECCAK_NULL_RLP - &[160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33], - // RLP of KECCAK_EMPTY - &[160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112], - // Other RLPs found in blocks DB using the test below. - &[160, 29, 204, 77, 232, 222, 199, 93, 122, 171, 133, 181, 103, 182, 204, 212, 26, 211, 18, 69, 27, 148, 138, 116, 19, 240, 161, 66, 253, 64, 212, 147, 71], - &[148, 50, 190, 52, 59, 148, 248, 96, 18, 77, 196, 254, 226, 120, 253, 203, 211, 140, 16, 45, 136], - &[148, 82, 188, 68, 213, 55, 131, 9, 238, 42, 191, 21, 57, 191, 113, 222, 27, 125, 123, 227, 181], - &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + // RLP of KECCAK_NULL_RLP + &[ + 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, + 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, + ], + // RLP of KECCAK_EMPTY + &[ + 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, + 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112, + ], + // Other RLPs found in blocks DB using the test below. + &[ + 160, 29, 204, 77, 232, 222, 199, 93, 122, 171, 133, 181, 103, 182, 204, 212, 26, 211, 18, + 69, 27, 148, 138, 116, 19, 240, 161, 66, 253, 64, 212, 147, 71, + ], + &[ + 148, 50, 190, 52, 59, 148, 248, 96, 18, 77, 196, 254, 226, 120, 253, 203, 211, 140, 16, 45, + 136, + ], + &[ + 148, 82, 188, 68, 213, 55, 131, 9, 238, 42, 191, 21, 57, 191, 113, 222, 27, 125, 123, 227, + 181, + ], + &[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], ]; -static INVALID_RLPS: &'static [&'static [u8]] = &[&[0x81, 0x0], &[0x81, 0x1], &[0x81, 0x2], &[0x81, 0x3], &[0x81, 0x4], &[0x81, 0x5], &[0x81, 0x6], &[0x81, 0x7], &[0x81, 0x8], &[0x81, 0x9], &[0x81, 0xa], &[0x81, 0xb], &[0x81, 0xc], &[0x81, 0xd], &[0x81, 0xe], &[0x81, 0xf], &[0x81, 0x10], &[0x81, 0x11], &[0x81, 0x12], &[0x81, 0x13], &[0x81, 0x14], &[0x81, 0x15], &[0x81, 0x16], &[0x81, 0x17], &[0x81, 0x18], &[0x81, 0x19], &[0x81, 0x1a], &[0x81, 0x1b], &[0x81, 0x1c], &[0x81, 0x1d], &[0x81, 0x1e], &[0x81, 0x1f], &[0x81, 0x20], &[0x81, 0x21], &[0x81, 0x22], &[0x81, 0x23], &[0x81, 0x24], &[0x81, 0x25], &[0x81, 0x26], &[0x81, 0x27], &[0x81, 0x28], &[0x81, 0x29], &[0x81, 0x2a], &[0x81, 0x2b], &[0x81, 0x2c], &[0x81, 0x2d], &[0x81, 0x2e], &[0x81, 0x2f], &[0x81, 0x30], &[0x81, 0x31], &[0x81, 0x32], &[0x81, 0x33], &[0x81, 0x34], &[0x81, 0x35], &[0x81, 0x36], &[0x81, 0x37], &[0x81, 0x38], &[0x81, 0x39], &[0x81, 0x3a], &[0x81, 0x3b], &[0x81, 0x3c], &[0x81, 0x3d], &[0x81, 0x3e], &[0x81, 0x3f], &[0x81, 0x40], &[0x81, 0x41], &[0x81, 0x42], &[0x81, 0x43], &[0x81, 0x44], &[0x81, 0x45], &[0x81, 0x46], &[0x81, 0x47], &[0x81, 0x48], &[0x81, 0x49], &[0x81, 0x4a], &[0x81, 0x4b], &[0x81, 0x4c], &[0x81, 0x4d], &[0x81, 0x4e], &[0x81, 0x4f], &[0x81, 0x50], &[0x81, 0x51], &[0x81, 0x52], &[0x81, 0x53], &[0x81, 0x54], &[0x81, 0x55], &[0x81, 0x56], &[0x81, 0x57], &[0x81, 0x58], &[0x81, 0x59], &[0x81, 0x5a], &[0x81, 0x5b], &[0x81, 0x5c], &[0x81, 0x5d], &[0x81, 0x5e], &[0x81, 0x5f], &[0x81, 0x60], &[0x81, 0x61], &[0x81, 0x62], &[0x81, 0x63], &[0x81, 0x64], &[0x81, 0x65], &[0x81, 0x66], &[0x81, 0x67], &[0x81, 0x68], &[0x81, 0x69], &[0x81, 0x6a], &[0x81, 0x6b], &[0x81, 0x6c], &[0x81, 0x6d], &[0x81, 0x6e], &[0x81, 0x6f], &[0x81, 0x70], &[0x81, 0x71], &[0x81, 0x72], &[0x81, 0x73], &[0x81, 0x74], &[0x81, 0x75], &[0x81, 0x76], &[0x81, 0x77], &[0x81, 0x78], &[0x81, 0x79], &[0x81, 0x7a], &[0x81, 0x7b], &[0x81, 0x7c], &[0x81, 0x7d], &[0x81, 0x7e]]; +static INVALID_RLPS: &'static [&'static [u8]] = &[ + &[0x81, 0x0], + &[0x81, 0x1], + &[0x81, 0x2], + &[0x81, 0x3], + &[0x81, 0x4], + &[0x81, 0x5], + &[0x81, 0x6], + &[0x81, 0x7], + &[0x81, 0x8], + &[0x81, 0x9], + &[0x81, 0xa], + &[0x81, 0xb], + &[0x81, 0xc], + &[0x81, 0xd], + &[0x81, 0xe], + &[0x81, 0xf], + &[0x81, 0x10], + &[0x81, 0x11], + &[0x81, 0x12], + &[0x81, 0x13], + &[0x81, 0x14], + &[0x81, 0x15], + &[0x81, 0x16], + &[0x81, 0x17], + &[0x81, 0x18], + &[0x81, 0x19], + &[0x81, 0x1a], + &[0x81, 0x1b], + &[0x81, 0x1c], + &[0x81, 0x1d], + &[0x81, 0x1e], + &[0x81, 0x1f], + &[0x81, 0x20], + &[0x81, 0x21], + &[0x81, 0x22], + &[0x81, 0x23], + &[0x81, 0x24], + &[0x81, 0x25], + &[0x81, 0x26], + &[0x81, 0x27], + &[0x81, 0x28], + &[0x81, 0x29], + &[0x81, 0x2a], + &[0x81, 0x2b], + &[0x81, 0x2c], + &[0x81, 0x2d], + &[0x81, 0x2e], + &[0x81, 0x2f], + &[0x81, 0x30], + &[0x81, 0x31], + &[0x81, 0x32], + &[0x81, 0x33], + &[0x81, 0x34], + &[0x81, 0x35], + &[0x81, 0x36], + &[0x81, 0x37], + &[0x81, 0x38], + &[0x81, 0x39], + &[0x81, 0x3a], + &[0x81, 0x3b], + &[0x81, 0x3c], + &[0x81, 0x3d], + &[0x81, 0x3e], + &[0x81, 0x3f], + &[0x81, 0x40], + &[0x81, 0x41], + &[0x81, 0x42], + &[0x81, 0x43], + &[0x81, 0x44], + &[0x81, 0x45], + &[0x81, 0x46], + &[0x81, 0x47], + &[0x81, 0x48], + &[0x81, 0x49], + &[0x81, 0x4a], + &[0x81, 0x4b], + &[0x81, 0x4c], + &[0x81, 0x4d], + &[0x81, 0x4e], + &[0x81, 0x4f], + &[0x81, 0x50], + &[0x81, 0x51], + &[0x81, 0x52], + &[0x81, 0x53], + &[0x81, 0x54], + &[0x81, 0x55], + &[0x81, 0x56], + &[0x81, 0x57], + &[0x81, 0x58], + &[0x81, 0x59], + &[0x81, 0x5a], + &[0x81, 0x5b], + &[0x81, 0x5c], + &[0x81, 0x5d], + &[0x81, 0x5e], + &[0x81, 0x5f], + &[0x81, 0x60], + &[0x81, 0x61], + &[0x81, 0x62], + &[0x81, 0x63], + &[0x81, 0x64], + &[0x81, 0x65], + &[0x81, 0x66], + &[0x81, 0x67], + &[0x81, 0x68], + &[0x81, 0x69], + &[0x81, 0x6a], + &[0x81, 0x6b], + &[0x81, 0x6c], + &[0x81, 0x6d], + &[0x81, 0x6e], + &[0x81, 0x6f], + &[0x81, 0x70], + &[0x81, 0x71], + &[0x81, 0x72], + &[0x81, 0x73], + &[0x81, 0x74], + &[0x81, 0x75], + &[0x81, 0x76], + &[0x81, 0x77], + &[0x81, 0x78], + &[0x81, 0x79], + &[0x81, 0x7a], + &[0x81, 0x7b], + &[0x81, 0x7c], + &[0x81, 0x7d], + &[0x81, 0x7e], +]; diff --git a/util/rlp-compress/src/lib.rs b/util/rlp-compress/src/lib.rs index 48620ed23..9751373eb 100644 --- a/util/rlp-compress/src/lib.rs +++ b/util/rlp-compress/src/lib.rs @@ -13,97 +13,104 @@ extern crate rlp; mod common; -use std::cmp; -use std::collections::HashMap; +use common::{BLOCKS_SWAPPER, SNAPSHOT_SWAPPER}; use elastic_array::ElasticArray1024; use rlp::{Rlp, RlpStream}; -use common::{SNAPSHOT_SWAPPER, BLOCKS_SWAPPER}; +use std::{cmp, collections::HashMap}; pub fn snapshot_swapper() -> &'static Swapper<'static> { - &SNAPSHOT_SWAPPER as &Swapper + &SNAPSHOT_SWAPPER as &Swapper } pub fn blocks_swapper() -> &'static Swapper<'static> { - &BLOCKS_SWAPPER as &Swapper + &BLOCKS_SWAPPER as &Swapper } /// A trait used to compress rlp. pub trait Compressor { - /// Get compressed version of given rlp. - fn compressed(&self, rlp: &[u8]) -> Option<&[u8]>; + /// Get compressed version of given rlp. + fn compressed(&self, rlp: &[u8]) -> Option<&[u8]>; } /// A trait used to convert compressed rlp into it's original version. pub trait Decompressor { - /// Get decompressed rlp. - fn decompressed(&self, compressed: &[u8]) -> Option<&[u8]>; + /// Get decompressed rlp. + fn decompressed(&self, compressed: &[u8]) -> Option<&[u8]>; } /// Call this function to compress rlp. pub fn compress(c: &[u8], swapper: &Compressor) -> ElasticArray1024 { - let rlp = Rlp::new(c); - if rlp.is_data() { - ElasticArray1024::from_slice(swapper.compressed(rlp.as_raw()).unwrap_or_else(|| rlp.as_raw())) - } else { - map_rlp(&rlp, |r| compress(r.as_raw(), swapper)) - } + let rlp = Rlp::new(c); + if rlp.is_data() { + ElasticArray1024::from_slice( + swapper + .compressed(rlp.as_raw()) + .unwrap_or_else(|| rlp.as_raw()), + ) + } else { + map_rlp(&rlp, |r| compress(r.as_raw(), swapper)) + } } /// Call this function to decompress rlp. pub fn decompress(c: &[u8], swapper: &Decompressor) -> ElasticArray1024 { - let rlp = Rlp::new(c); - if rlp.is_data() { - ElasticArray1024::from_slice(swapper.decompressed(rlp.as_raw()).unwrap_or_else(|| rlp.as_raw())) - } else { - map_rlp(&rlp, |r| decompress(r.as_raw(), swapper)) - } + let rlp = Rlp::new(c); + if rlp.is_data() { + ElasticArray1024::from_slice( + swapper + .decompressed(rlp.as_raw()) + .unwrap_or_else(|| rlp.as_raw()), + ) + } else { + map_rlp(&rlp, |r| decompress(r.as_raw(), swapper)) + } } fn map_rlp ElasticArray1024>(rlp: &Rlp, f: F) -> ElasticArray1024 { - let mut stream = RlpStream::new_list(rlp.item_count().unwrap_or_default()); - for subrlp in rlp.iter() { - stream.append_raw(&f(&subrlp), 1); - } - stream.drain().as_slice().into() + let mut stream = RlpStream::new_list(rlp.item_count().unwrap_or_default()); + for subrlp in rlp.iter() { + stream.append_raw(&f(&subrlp), 1); + } + stream.drain().as_slice().into() } /// Stores RLPs used for compression pub struct Swapper<'a> { - compressed_to_rlp: HashMap<&'a [u8], &'a [u8]>, - rlp_to_compressed: HashMap<&'a [u8], &'a [u8]>, + compressed_to_rlp: HashMap<&'a [u8], &'a [u8]>, + rlp_to_compressed: HashMap<&'a [u8], &'a [u8]>, } impl<'a> Swapper<'a> { - /// Construct a swapper from a list of common RLPs - pub fn new(rlps_to_swap: &[&'a [u8]], compressed: &[&'a [u8]]) -> Self { - if rlps_to_swap.len() > 0x7e { - panic!("Invalid usage, only 127 RLPs can be swappable."); - } + /// Construct a swapper from a list of common RLPs + pub fn new(rlps_to_swap: &[&'a [u8]], compressed: &[&'a [u8]]) -> Self { + if rlps_to_swap.len() > 0x7e { + panic!("Invalid usage, only 127 RLPs can be swappable."); + } - let items = cmp::min(rlps_to_swap.len(), compressed.len()); - let mut compressed_to_rlp = HashMap::with_capacity(items); - let mut rlp_to_compressed = HashMap::with_capacity(items); + let items = cmp::min(rlps_to_swap.len(), compressed.len()); + let mut compressed_to_rlp = HashMap::with_capacity(items); + let mut rlp_to_compressed = HashMap::with_capacity(items); - for (&rlp, &compressed) in rlps_to_swap.iter().zip(compressed.iter()) { - compressed_to_rlp.insert(compressed, rlp); - rlp_to_compressed.insert(rlp, compressed); - } + for (&rlp, &compressed) in rlps_to_swap.iter().zip(compressed.iter()) { + compressed_to_rlp.insert(compressed, rlp); + rlp_to_compressed.insert(rlp, compressed); + } - Swapper { - compressed_to_rlp, - rlp_to_compressed, - } - } + Swapper { + compressed_to_rlp, + rlp_to_compressed, + } + } } impl<'a> Decompressor for Swapper<'a> { - fn decompressed(&self, compressed: &[u8]) -> Option<&[u8]> { - self.compressed_to_rlp.get(compressed).cloned() - } + fn decompressed(&self, compressed: &[u8]) -> Option<&[u8]> { + self.compressed_to_rlp.get(compressed).cloned() + } } impl<'a> Compressor for Swapper<'a> { - fn compressed(&self, rlp: &[u8]) -> Option<&[u8]> { - self.rlp_to_compressed.get(rlp).cloned() - } + fn compressed(&self, rlp: &[u8]) -> Option<&[u8]> { + self.rlp_to_compressed.get(rlp).cloned() + } } diff --git a/util/rlp-compress/tests/compress.rs b/util/rlp-compress/tests/compress.rs index 6c5a02374..3383d6eeb 100644 --- a/util/rlp-compress/tests/compress.rs +++ b/util/rlp-compress/tests/compress.rs @@ -16,51 +16,1314 @@ extern crate rlp_compress; -use rlp_compress::{compress, decompress, Swapper, snapshot_swapper, blocks_swapper, Compressor, Decompressor}; +use rlp_compress::{ + blocks_swapper, compress, decompress, snapshot_swapper, Compressor, Decompressor, Swapper, +}; #[test] fn invalid_rlp_swapper() { - let to_swap: &[&[u8]] = &[&[0x83, b'c', b'a', b't'], &[0x83, b'd', b'o', b'g']]; - let invalid_rlp: &[&[u8]] = &[&[0x81, 0x00], &[0x81, 0x01]]; - let swapper = Swapper::new(to_swap, invalid_rlp); - assert_eq!(Some(invalid_rlp[0]), swapper.compressed(&[0x83, b'c', b'a', b't'])); - assert_eq!(None, swapper.compressed(&[0x83, b'b', b'a', b't'])); - assert_eq!(Some(to_swap[1]), swapper.decompressed(invalid_rlp[1])); + let to_swap: &[&[u8]] = &[&[0x83, b'c', b'a', b't'], &[0x83, b'd', b'o', b'g']]; + let invalid_rlp: &[&[u8]] = &[&[0x81, 0x00], &[0x81, 0x01]]; + let swapper = Swapper::new(to_swap, invalid_rlp); + assert_eq!( + Some(invalid_rlp[0]), + swapper.compressed(&[0x83, b'c', b'a', b't']) + ); + assert_eq!(None, swapper.compressed(&[0x83, b'b', b'a', b't'])); + assert_eq!(Some(to_swap[1]), swapper.decompressed(invalid_rlp[1])); } #[test] fn simple_compression() { - let basic_account_rlp = vec![248, 68, 4, 2, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112]; - let compressed = compress(&basic_account_rlp, snapshot_swapper()); - assert_eq!(compressed.to_vec(), vec![198, 4, 2, 129, 0, 129, 1]); - let decompressed = decompress(&compressed, snapshot_swapper()); - assert_eq!(decompressed.to_vec(), basic_account_rlp); + let basic_account_rlp = vec![ + 248, 68, 4, 2, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, + 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, + 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, + 39, 59, 123, 250, 216, 4, 93, 133, 164, 112, + ]; + let compressed = compress(&basic_account_rlp, snapshot_swapper()); + assert_eq!(compressed.to_vec(), vec![198, 4, 2, 129, 0, 129, 1]); + let decompressed = decompress(&compressed, snapshot_swapper()); + assert_eq!(decompressed.to_vec(), basic_account_rlp); } #[test] fn nested_list_rlp() { - let nested_basic_account_rlp = vec![228, 4, 226, 2, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33]; + let nested_basic_account_rlp = vec![ + 228, 4, 226, 2, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, + 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, + ]; - let compressed = compress(&nested_basic_account_rlp, blocks_swapper()); - assert_eq!(compressed, vec![197, 4, 195, 2, 129, 0]); - let decompressed = decompress(&compressed, blocks_swapper()); - assert_eq!(decompressed.into_vec(), nested_basic_account_rlp); - let compressed = compress(&nested_basic_account_rlp, snapshot_swapper()); - assert_eq!(compressed, vec![197, 4, 195, 2, 129, 0]); - let decompressed = decompress(&compressed, snapshot_swapper()); - assert_eq!(decompressed.into_vec(), nested_basic_account_rlp); + let compressed = compress(&nested_basic_account_rlp, blocks_swapper()); + assert_eq!(compressed, vec![197, 4, 195, 2, 129, 0]); + let decompressed = decompress(&compressed, blocks_swapper()); + assert_eq!(decompressed.into_vec(), nested_basic_account_rlp); + let compressed = compress(&nested_basic_account_rlp, snapshot_swapper()); + assert_eq!(compressed, vec![197, 4, 195, 2, 129, 0]); + let decompressed = decompress(&compressed, snapshot_swapper()); + assert_eq!(decompressed.into_vec(), nested_basic_account_rlp); } #[test] fn malformed_rlp() { - let malformed = vec![248, 81, 128, 128, 128, 128, 128, 160, 12, 51, 241, 93, 69, 218, 74, 138, 79, 115, 227, 44, 216, 81, 46, 132, 85, 235, 96, 45, 252, 48, 181, 29, 75, 141, 217, 215, 86, 160, 109, 130, 160, 140, 36, 93, 200, 109, 215, 100, 241, 246, 99, 135, 92, 168, 149, 170, 114, 9, 143, 4, 93, 25, 76, 54, 176, 119, 230, 170, 154, 105, 47, 121, 10, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128]; - assert_eq!(decompress(&malformed, blocks_swapper()).into_vec(), malformed); + let malformed = vec![ + 248, 81, 128, 128, 128, 128, 128, 160, 12, 51, 241, 93, 69, 218, 74, 138, 79, 115, 227, 44, + 216, 81, 46, 132, 85, 235, 96, 45, 252, 48, 181, 29, 75, 141, 217, 215, 86, 160, 109, 130, + 160, 140, 36, 93, 200, 109, 215, 100, 241, 246, 99, 135, 92, 168, 149, 170, 114, 9, 143, 4, + 93, 25, 76, 54, 176, 119, 230, 170, 154, 105, 47, 121, 10, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, + ]; + assert_eq!( + decompress(&malformed, blocks_swapper()).into_vec(), + malformed + ); } #[test] fn large_block() { - let block = vec![249, 97, 87, 249, 2, 19, 160, 137, 152, 36, 115, 234, 67, 89, 207, 44, 42, 186, 128, 91, 242, 10, 16, 42, 193, 195, 2, 129, 60, 181, 150, 192, 178, 117, 15, 18, 100, 174, 249, 160, 29, 204, 77, 232, 222, 199, 93, 122, 171, 133, 181, 103, 182, 204, 212, 26, 211, 18, 69, 27, 148, 138, 116, 19, 240, 161, 66, 253, 64, 212, 147, 71, 148, 223, 125, 126, 5, 57, 51, 181, 204, 36, 55, 47, 135, 140, 144, 230, 45, 173, 173, 93, 66, 160, 93, 42, 52, 28, 156, 139, 242, 60, 121, 90, 117, 99, 92, 182, 196, 25, 131, 16, 155, 186, 239, 137, 33, 118, 105, 232, 230, 239, 213, 240, 207, 6, 160, 59, 72, 35, 216, 124, 37, 62, 178, 34, 97, 180, 254, 212, 103, 179, 45, 247, 168, 205, 145, 7, 157, 75, 247, 83, 230, 233, 248, 97, 132, 232, 161, 160, 122, 167, 249, 196, 203, 2, 173, 180, 106, 203, 129, 214, 232, 181, 87, 39, 60, 99, 135, 6, 40, 34, 163, 118, 140, 149, 79, 241, 238, 230, 201, 194, 185, 1, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 134, 36, 160, 31, 187, 182, 29, 131, 58, 212, 207, 131, 71, 168, 13, 131, 45, 60, 7, 132, 89, 53, 51, 233, 147, 69, 84, 67, 32, 101, 116, 104, 101, 114, 109, 105, 110, 101, 32, 45, 32, 69, 85, 49, 160, 204, 49, 229, 99, 26, 47, 30, 50, 223, 117, 111, 168, 102, 158, 12, 186, 140, 98, 193, 196, 214, 29, 13, 87, 44, 31, 216, 48, 250, 251, 148, 69, 136, 225, 206, 131, 96, 2, 39, 50, 46, 249, 95, 61, 248, 109, 1, 133, 4, 227, 178, 146, 0, 130, 82, 8, 148, 123, 45, 95, 28, 247, 139, 190, 196, 52, 39, 251, 53, 226, 79, 251, 1, 98, 34, 68, 240, 136, 18, 81, 1, 26, 180, 46, 168, 0, 128, 129, 157, 160, 31, 164, 62, 186, 38, 56, 118, 133, 24, 180, 239, 139, 254, 154, 196, 115, 8, 246, 45, 233, 227, 165, 192, 193, 7, 111, 1, 169, 2, 204, 2, 144, 160, 42, 60, 78, 200, 5, 113, 98, 65, 250, 105, 0, 164, 152, 81, 235, 154, 100, 204, 182, 141, 174, 39, 107, 127, 219, 120, 63, 221, 237, 87, 57, 9, 249, 94, 203, 52, 133, 2, 84, 11, 228, 0, 131, 44, 233, 255, 148, 137, 61, 196, 25, 119, 102, 53, 248, 253, 27, 31, 169, 147, 75, 245, 41, 174, 242, 86, 7, 128, 185, 94, 100, 194, 32, 154, 203, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 60, 131, 110, 76, 27, 36, 34, 34, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 138, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 91, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 92, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 248, 249, 1, 245, 160, 89, 97, 73, 3, 84, 63, 105, 163, 169, 7, 237, 254, 220, 243, 13, 27, 0, 45, 215, 39, 235, 232, 237, 22, 162, 83, 99, 164, 247, 240, 48, 66, 160, 29, 204, 77, 232, 222, 199, 93, 122, 171, 133, 181, 103, 182, 204, 212, 26, 211, 18, 69, 27, 148, 138, 116, 19, 240, 161, 66, 253, 64, 212, 147, 71, 148, 137, 61, 196, 25, 119, 102, 53, 248, 253, 27, 31, 169, 147, 75, 245, 41, 174, 242, 86, 7, 160, 243, 100, 253, 32, 58, 153, 124, 189, 215, 216, 21, 229, 230, 182, 39, 222, 135, 217, 92, 155, 164, 143, 79, 14, 178, 128, 26, 245, 152, 197, 247, 33, 160, 98, 156, 249, 227, 33, 40, 127, 226, 250, 254, 222, 21, 37, 66, 5, 23, 142, 73, 121, 64, 233, 198, 110, 212, 131, 134, 126, 38, 218, 85, 149, 212, 160, 32, 179, 60, 112, 219, 114, 185, 205, 184, 204, 31, 210, 181, 8, 109, 97, 227, 206, 41, 177, 238, 122, 205, 193, 93, 163, 176, 107, 28, 181, 63, 19, 185, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 134, 36, 236, 76, 26, 58, 130, 131, 58, 210, 155, 131, 71, 183, 132, 130, 82, 8, 132, 89, 53, 19, 83, 160, 83, 109, 97, 114, 116, 80, 111, 111, 108, 45, 79, 76, 69, 66, 100, 120, 122, 50, 54, 106, 73, 50, 74, 65, 72, 109, 52, 48, 48, 48, 48, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 203, 86, 242, 75, 191, 176, 245, 37, 106, 66, 34, 44, 77, 161, 164, 143, 56, 229, 102, 192, 191, 114, 46, 137, 15, 59, 249, 198, 20, 64, 167, 176, 167, 146, 234, 72, 147, 122, 168, 34, 174, 210, 94, 131, 105, 110, 180, 48, 248, 10, 127, 156, 194, 83, 60, 48, 89, 126, 86, 90, 218, 235, 170, 15, 206, 23, 82, 13, 250, 255, 240, 112, 168, 137, 111, 204, 205, 125, 24, 81, 118, 165, 92, 28, 127, 50, 223, 173, 231, 234, 212, 181, 9, 113, 72, 250, 97, 126, 194, 18, 102, 51, 189, 147, 246, 2, 201, 27, 206, 15, 130, 172, 1, 206, 219, 204, 49, 211, 162, 78, 101, 26, 16, 73, 106, 70, 209, 118, 140, 59, 133, 162, 114, 14, 92, 8, 26, 83, 213, 91, 136, 207, 228, 86, 13, 208, 64, 85, 211, 143, 252, 181, 128, 77, 187, 198, 50, 91, 26, 156, 109, 233, 136, 36, 216, 142, 87, 112, 45, 166, 235, 118, 128, 191, 66, 23, 48, 32, 3, 41, 37, 103, 8, 180, 236, 77, 106, 59, 18, 77, 206, 23, 154, 143, 236, 63, 235, 52, 105, 155, 38, 207, 31, 145, 193, 98, 251, 216, 224, 213, 250, 164, 22, 21, 227, 190, 223, 74, 29, 107, 0, 103, 187, 53, 129, 204, 202, 170, 156, 19, 67, 185, 100, 252, 5, 129, 164, 58, 228, 121, 214, 27, 53, 35, 16, 238, 93, 64, 94, 4, 250, 138, 102, 22, 128, 37, 229, 104, 40, 245, 216, 107, 16, 52, 202, 54, 98, 43, 162, 245, 56, 48, 150, 41, 3, 224, 167, 171, 5, 254, 106, 199, 0, 47, 238, 162, 27, 114, 168, 111, 231, 241, 102, 79, 138, 66, 168, 38, 157, 140, 76, 56, 155, 66, 109, 253, 243, 207, 79, 88, 167, 111, 251, 135, 38, 249, 85, 167, 36, 113, 7, 243, 34, 132, 113, 117, 8, 245, 93, 79, 171, 12, 223, 157, 41, 201, 221, 129, 60, 186, 164, 123, 1, 247, 202, 28, 61, 121, 156, 214, 55, 68, 36, 205, 113, 203, 45, 173, 24, 190, 32, 42, 207, 69, 189, 209, 221, 50, 18, 77, 60, 249, 233, 101, 216, 168, 47, 16, 155, 172, 248, 90, 115, 162, 134, 95, 6, 227, 202, 46, 26, 133, 135, 65, 251, 97, 109, 181, 141, 87, 156, 59, 40, 253, 87, 116, 244, 225, 242, 133, 77, 199, 120, 210, 152, 108, 90, 137, 127, 214, 137, 250, 180, 148, 113, 71, 68, 183, 216, 166, 169, 54, 174, 235, 51, 45, 172, 47, 5, 205, 154, 71, 241, 106, 228, 222, 150, 34, 95, 68, 251, 78, 244, 111, 59, 252, 138, 106, 31, 185, 107, 123, 22, 37, 184, 249, 117, 231, 56, 224, 137, 13, 254, 50, 240, 252, 220, 74, 33, 223, 163, 97, 74, 180, 232, 189, 92, 104, 193, 59, 255, 80, 10, 146, 236, 17, 80, 90, 139, 35, 212, 153, 19, 13, 159, 7, 184, 77, 193, 43, 251, 149, 176, 147, 169, 227, 236, 74, 98, 163, 193, 180, 4, 186, 19, 29, 212, 166, 177, 114, 30, 254, 241, 141, 95, 72, 99, 104, 101, 21, 141, 181, 85, 200, 184, 190, 11, 255, 74, 39, 89, 41, 204, 187, 96, 202, 188, 191, 157, 135, 239, 166, 155, 86, 32, 228, 198, 117, 41, 31, 54, 151, 229, 214, 56, 149, 187, 177, 123, 164, 182, 232, 188, 21, 94, 2, 155, 177, 124, 94, 151, 249, 116, 78, 62, 0, 194, 76, 156, 86, 227, 11, 25, 35, 6, 175, 77, 151, 60, 44, 114, 234, 153, 124, 79, 114, 88, 36, 178, 223, 229, 61, 90, 198, 60, 237, 224, 169, 115, 22, 152, 254, 235, 17, 130, 53, 1, 235, 67, 90, 238, 52, 170, 182, 73, 182, 138, 182, 189, 130, 170, 200, 91, 14, 150, 174, 100, 63, 29, 93, 171, 150, 154, 93, 72, 34, 156, 173, 108, 74, 29, 107, 197, 130, 86, 106, 178, 138, 210, 0, 92, 11, 78, 111, 226, 120, 169, 222, 50, 18, 156, 170, 98, 206, 97, 21, 28, 112, 234, 145, 2, 216, 188, 172, 150, 183, 39, 125, 17, 146, 49, 184, 242, 2, 60, 19, 162, 180, 210, 254, 249, 182, 240, 148, 184, 118, 113, 198, 27, 115, 165, 193, 97, 232, 201, 140, 63, 200, 153, 10, 162, 170, 69, 68, 152, 199, 103, 14, 164, 228, 120, 71, 47, 196, 217, 251, 181, 26, 112, 185, 116, 184, 77, 93, 236, 160, 235, 40, 0, 162, 199, 123, 223, 37, 53, 73, 177, 69, 207, 11, 137, 67, 204, 57, 231, 50, 185, 148, 48, 30, 102, 195, 248, 40, 189, 227, 187, 95, 166, 1, 7, 30, 112, 32, 101, 38, 72, 208, 52, 67, 131, 96, 34, 192, 156, 48, 92, 231, 188, 97, 103, 97, 41, 254, 33, 72, 190, 68, 184, 85, 114, 25, 94, 65, 144, 102, 176, 150, 76, 247, 146, 9, 188, 66, 4, 144, 98, 233, 196, 166, 7, 15, 54, 148, 247, 81, 52, 253, 27, 113, 235, 255, 230, 179, 168, 140, 95, 230, 163, 86, 235, 119, 185, 152, 223, 129, 48, 169, 170, 225, 224, 154, 164, 209, 129, 232, 92, 190, 187, 38, 217, 36, 113, 250, 61, 125, 168, 25, 70, 227, 39, 142, 12, 226, 20, 134, 82, 143, 147, 211, 173, 149, 223, 58, 27, 163, 18, 252, 148, 112, 19, 101, 144, 21, 105, 34, 200, 254, 11, 254, 8, 247, 210, 142, 194, 54, 150, 34, 171, 33, 203, 79, 79, 135, 186, 51, 68, 215, 106, 100, 21, 239, 178, 68, 230, 238, 72, 4, 31, 101, 161, 29, 204, 129, 234, 66, 195, 6, 69, 77, 48, 157, 137, 236, 96, 138, 119, 86, 147, 205, 172, 242, 28, 115, 188, 254, 216, 35, 89, 33, 27, 162, 243, 58, 102, 131, 50, 23, 169, 58, 218, 111, 6, 199, 164, 171, 104, 207, 34, 111, 229, 3, 207, 124, 96, 241, 95, 105, 113, 4, 16, 48, 163, 153, 0, 96, 174, 246, 3, 154, 124, 14, 132, 237, 184, 91, 217, 201, 53, 80, 39, 31, 113, 246, 67, 132, 85, 62, 153, 187, 240, 180, 117, 162, 17, 27, 190, 72, 178, 14, 101, 173, 200, 14, 196, 225, 212, 38, 153, 165, 147, 130, 51, 134, 40, 36, 7, 234, 179, 73, 28, 224, 4, 222, 10, 61, 65, 78, 23, 98, 246, 144, 9, 16, 180, 248, 56, 214, 162, 214, 18, 50, 67, 207, 3, 77, 7, 6, 28, 43, 131, 124, 117, 124, 213, 160, 67, 1, 44, 39, 15, 28, 149, 45, 54, 127, 157, 237, 193, 132, 174, 214, 8, 164, 106, 159, 213, 27, 173, 33, 24, 31, 110, 210, 95, 140, 236, 71, 68, 207, 225, 134, 105, 25, 226, 168, 119, 172, 1, 252, 11, 231, 182, 93, 254, 211, 58, 78, 192, 168, 67, 71, 11, 160, 76, 111, 234, 138, 119, 135, 51, 226, 113, 167, 146, 103, 97, 43, 192, 133, 252, 124, 42, 190, 217, 38, 95, 95, 222, 14, 245, 192, 116, 245, 68, 114, 253, 25, 229, 221, 0, 158, 27, 163, 45, 244, 200, 0, 205, 150, 162, 76, 224, 168, 203, 79, 114, 137, 18, 206, 12, 152, 241, 159, 245, 197, 175, 146, 16, 62, 152, 112, 233, 23, 132, 151, 100, 175, 26, 83, 0, 254, 150, 230, 94, 174, 77, 39, 57, 106, 155, 69, 85, 230, 239, 202, 205, 49, 17, 47, 72, 128, 251, 42, 157, 218, 234, 213, 127, 118, 152, 74, 139, 99, 120, 217, 116, 16, 73, 79, 40, 88, 107, 38, 132, 34, 101, 225, 109, 114, 91, 66, 69, 10, 44, 191, 6, 160, 208, 104, 123, 247, 247, 49, 151, 246, 216, 228, 157, 69, 140, 155, 98, 147, 202, 12, 79, 168, 40, 210, 164, 230, 241, 107, 109, 231, 243, 187, 252, 100, 70, 41, 140, 35, 231, 115, 98, 49, 192, 102, 46, 12, 82, 162, 226, 104, 86, 169, 42, 34, 213, 76, 89, 197, 131, 10, 25, 120, 204, 115, 231, 103, 217, 164, 13, 166, 139, 141, 77, 75, 27, 21, 84, 212, 209, 189, 148, 198, 213, 96, 234, 190, 213, 40, 210, 152, 141, 4, 110, 167, 209, 226, 236, 247, 139, 119, 215, 175, 30, 49, 225, 174, 42, 195, 251, 228, 170, 246, 94, 117, 128, 34, 177, 10, 15, 126, 165, 52, 159, 249, 67, 35, 200, 86, 65, 254, 141, 198, 219, 55, 225, 240, 111, 248, 101, 29, 235, 192, 112, 225, 19, 255, 139, 129, 46, 42, 9, 8, 74, 58, 254, 191, 49, 173, 189, 32, 5, 174, 81, 223, 94, 145, 85, 40, 194, 145, 251, 88, 168, 166, 143, 39, 33, 111, 230, 115, 77, 113, 83, 100, 211, 244, 163, 201, 141, 71, 193, 106, 138, 3, 215, 134, 217, 191, 10, 177, 250, 233, 247, 127, 162, 112, 206, 150, 187, 205, 141, 211, 181, 58, 199, 11, 220, 91, 180, 169, 182, 178, 224, 159, 144, 105, 146, 29, 162, 221, 106, 190, 209, 151, 126, 98, 187, 91, 50, 91, 198, 210, 78, 189, 155, 193, 208, 178, 164, 0, 189, 145, 189, 7, 162, 88, 156, 33, 118, 176, 125, 130, 199, 108, 240, 99, 125, 119, 202, 188, 53, 17, 140, 102, 26, 30, 23, 30, 214, 203, 93, 82, 6, 67, 28, 31, 12, 166, 105, 63, 132, 69, 166, 208, 252, 140, 56, 175, 245, 17, 170, 3, 193, 26, 168, 92, 40, 131, 82, 17, 144, 128, 91, 48, 220, 16, 209, 53, 226, 4, 225, 6, 253, 141, 25, 254, 146, 41, 63, 90, 19, 6, 75, 96, 130, 156, 14, 91, 231, 207, 128, 244, 15, 135, 63, 45, 235, 51, 134, 10, 75, 252, 212, 63, 51, 131, 253, 110, 144, 239, 182, 194, 107, 45, 103, 124, 83, 154, 173, 181, 51, 60, 63, 75, 172, 103, 222, 53, 31, 73, 93, 76, 159, 184, 24, 240, 41, 97, 72, 131, 54, 113, 170, 233, 182, 86, 62, 88, 131, 206, 86, 180, 32, 143, 172, 114, 41, 233, 121, 221, 190, 192, 97, 48, 196, 185, 135, 67, 79, 190, 66, 181, 196, 246, 211, 94, 250, 228, 62, 117, 159, 141, 54, 186, 81, 143, 113, 108, 102, 252, 182, 146, 233, 20, 135, 30, 255, 237, 33, 224, 68, 8, 175, 33, 58, 49, 16, 226, 113, 54, 90, 209, 159, 211, 193, 203, 187, 186, 156, 89, 81, 127, 211, 171, 113, 92, 238, 193, 159, 11, 242, 183, 97, 67, 211, 138, 203, 35, 113, 247, 64, 205, 71, 2, 127, 208, 90, 139, 132, 148, 180, 247, 7, 119, 195, 128, 96, 221, 195, 110, 44, 53, 114, 26, 196, 226, 219, 134, 195, 190, 205, 171, 29, 137, 224, 75, 167, 188, 94, 123, 199, 64, 167, 105, 191, 52, 247, 43, 224, 181, 250, 58, 109, 25, 1, 120, 7, 223, 250, 69, 199, 83, 15, 20, 136, 90, 247, 128, 109, 169, 243, 24, 220, 215, 188, 169, 46, 31, 131, 112, 79, 132, 93, 170, 127, 238, 207, 138, 110, 201, 232, 163, 214, 77, 45, 113, 103, 179, 197, 206, 83, 148, 6, 185, 231, 174, 111, 171, 162, 147, 39, 252, 36, 132, 173, 145, 77, 43, 223, 164, 46, 136, 129, 210, 219, 221, 202, 246, 182, 95, 135, 8, 55, 229, 71, 245, 57, 131, 217, 93, 90, 179, 195, 230, 102, 169, 80, 51, 197, 6, 48, 143, 112, 121, 27, 79, 174, 47, 88, 240, 47, 10, 62, 94, 177, 78, 234, 112, 136, 65, 84, 4, 245, 190, 138, 38, 98, 108, 187, 47, 236, 65, 95, 233, 245, 47, 126, 208, 199, 74, 229, 158, 20, 238, 148, 218, 241, 233, 229, 13, 14, 206, 95, 42, 22, 65, 191, 218, 73, 116, 90, 23, 4, 166, 32, 236, 51, 107, 252, 198, 10, 163, 100, 140, 221, 174, 111, 179, 171, 183, 159, 204, 229, 104, 156, 190, 99, 142, 130, 46, 142, 31, 244, 30, 114, 69, 86, 159, 173, 185, 88, 130, 238, 12, 216, 140, 134, 15, 104, 64, 131, 106, 95, 206, 11, 1, 183, 75, 167, 43, 63, 188, 39, 144, 14, 179, 140, 47, 140, 253, 202, 192, 215, 156, 108, 18, 211, 9, 75, 108, 47, 212, 81, 24, 149, 244, 189, 60, 199, 210, 251, 22, 15, 247, 145, 136, 59, 51, 228, 31, 227, 226, 12, 199, 36, 126, 167, 153, 168, 220, 216, 56, 68, 79, 99, 88, 13, 34, 125, 123, 33, 239, 167, 121, 240, 167, 65, 1, 82, 9, 126, 165, 52, 107, 207, 11, 48, 112, 3, 150, 102, 30, 177, 250, 211, 194, 239, 204, 242, 27, 31, 118, 231, 44, 200, 178, 138, 104, 164, 131, 113, 205, 95, 88, 165, 136, 177, 7, 111, 216, 83, 186, 110, 193, 241, 164, 24, 237, 204, 95, 250, 183, 26, 100, 70, 58, 91, 190, 43, 165, 31, 136, 139, 190, 27, 67, 52, 72, 65, 120, 121, 54, 81, 27, 209, 28, 74, 51, 58, 211, 86, 46, 92, 123, 222, 10, 44, 145, 78, 90, 26, 61, 64, 143, 228, 227, 254, 73, 218, 85, 93, 218, 33, 123, 181, 195, 9, 76, 237, 41, 119, 145, 131, 106, 4, 150, 58, 7, 156, 65, 113, 108, 28, 135, 43, 21, 245, 159, 20, 163, 123, 27, 20, 58, 212, 183, 242, 82, 228, 110, 80, 53, 164, 161, 180, 244, 125, 168, 207, 81, 4, 253, 95, 101, 88, 31, 63, 25, 134, 59, 130, 0, 90, 220, 24, 200, 221, 149, 76, 225, 16, 26, 77, 179, 172, 190, 145, 199, 145, 239, 140, 106, 131, 29, 211, 187, 12, 207, 40, 205, 237, 151, 159, 18, 76, 49, 193, 137, 196, 197, 105, 181, 171, 23, 172, 208, 115, 95, 139, 136, 150, 211, 251, 132, 247, 206, 93, 70, 19, 181, 13, 205, 216, 79, 238, 229, 123, 12, 38, 24, 233, 96, 205, 223, 126, 95, 54, 15, 145, 149, 40, 41, 34, 30, 17, 1, 153, 252, 244, 214, 40, 66, 158, 71, 219, 142, 206, 216, 251, 200, 191, 75, 34, 147, 163, 27, 127, 64, 195, 187, 129, 132, 63, 224, 112, 1, 154, 28, 76, 141, 165, 195, 155, 53, 84, 189, 57, 148, 144, 98, 14, 232, 213, 179, 55, 191, 169, 203, 103, 99, 120, 159, 254, 201, 113, 51, 23, 99, 116, 246, 200, 59, 122, 13, 165, 118, 14, 230, 35, 231, 158, 48, 20, 104, 147, 121, 161, 241, 83, 109, 58, 84, 246, 251, 170, 145, 99, 179, 150, 32, 203, 93, 193, 99, 13, 86, 8, 224, 237, 61, 69, 58, 27, 79, 229, 84, 129, 145, 109, 58, 2, 230, 50, 0, 38, 189, 160, 18, 89, 158, 44, 255, 190, 108, 59, 161, 43, 49, 238, 84, 229, 223, 183, 138, 75, 130, 84, 208, 112, 183, 75, 108, 229, 254, 186, 103, 207, 244, 244, 149, 217, 71, 217, 238, 59, 98, 49, 214, 251, 83, 43, 104, 84, 0, 244, 181, 253, 235, 119, 236, 31, 36, 163, 105, 46, 52, 21, 212, 81, 170, 106, 186, 162, 63, 24, 235, 197, 184, 239, 229, 147, 105, 73, 145, 164, 214, 128, 23, 141, 50, 203, 41, 135, 32, 169, 168, 152, 137, 199, 38, 93, 226, 205, 20, 190, 208, 216, 76, 204, 133, 134, 137, 99, 89, 32, 57, 235, 138, 176, 10, 63, 159, 110, 85, 155, 164, 250, 194, 190, 58, 202, 52, 237, 95, 54, 149, 206, 144, 74, 123, 152, 231, 41, 202, 121, 248, 243, 40, 247, 161, 225, 88, 148, 188, 79, 6, 235, 43, 132, 24, 200, 229, 86, 53, 166, 123, 116, 233, 203, 23, 212, 55, 0, 195, 238, 247, 152, 247, 216, 229, 23, 93, 244, 224, 96, 37, 6, 61, 24, 107, 51, 63, 254, 187, 179, 71, 7, 45, 170, 122, 160, 223, 44, 203, 190, 142, 134, 129, 210, 67, 41, 17, 118, 139, 196, 163, 243, 54, 81, 192, 212, 129, 52, 227, 168, 113, 238, 178, 185, 158, 213, 144, 12, 198, 236, 234, 35, 179, 157, 91, 138, 46, 220, 35, 65, 82, 11, 16, 125, 96, 248, 199, 155, 100, 48, 154, 106, 187, 141, 250, 191, 130, 222, 131, 227, 14, 138, 219, 23, 23, 190, 180, 155, 147, 58, 224, 150, 39, 182, 226, 239, 3, 45, 47, 99, 255, 32, 73, 242, 233, 180, 149, 238, 45, 103, 39, 196, 231, 138, 16, 51, 224, 221, 136, 174, 205, 149, 132, 92, 83, 186, 48, 133, 204, 210, 5, 40, 57, 22, 64, 160, 4, 73, 168, 178, 229, 222, 185, 235, 109, 196, 109, 183, 150, 158, 0, 56, 16, 41, 43, 193, 31, 204, 169, 219, 50, 167, 242, 107, 206, 105, 228, 246, 59, 53, 18, 42, 19, 217, 56, 220, 59, 46, 160, 107, 200, 164, 0, 5, 213, 178, 133, 187, 142, 174, 218, 60, 252, 237, 61, 66, 146, 207, 179, 93, 234, 83, 219, 93, 55, 107, 92, 139, 101, 151, 198, 135, 246, 223, 101, 139, 44, 166, 82, 97, 233, 217, 130, 67, 22, 88, 36, 131, 80, 179, 192, 34, 206, 119, 167, 17, 65, 217, 71, 250, 52, 253, 29, 4, 156, 63, 64, 81, 6, 67, 130, 79, 0, 95, 87, 36, 43, 4, 26, 51, 250, 24, 178, 197, 41, 20, 142, 187, 180, 239, 156, 102, 113, 15, 13, 18, 90, 70, 184, 7, 250, 193, 117, 127, 60, 0, 198, 218, 128, 116, 119, 124, 126, 160, 129, 231, 68, 189, 98, 48, 43, 120, 178, 91, 54, 242, 66, 112, 55, 188, 42, 179, 94, 60, 249, 136, 81, 149, 132, 98, 209, 181, 219, 223, 31, 40, 235, 55, 5, 69, 11, 63, 192, 51, 224, 119, 107, 193, 84, 178, 191, 58, 217, 222, 183, 118, 114, 95, 45, 48, 126, 184, 10, 207, 0, 217, 59, 167, 244, 219, 9, 238, 41, 13, 127, 191, 123, 139, 9, 74, 40, 164, 194, 205, 116, 204, 185, 230, 133, 148, 215, 50, 105, 173, 220, 113, 117, 98, 233, 80, 209, 185, 211, 232, 29, 157, 12, 206, 228, 187, 26, 240, 193, 21, 15, 69, 115, 120, 66, 2, 92, 165, 103, 29, 177, 151, 154, 211, 107, 177, 89, 16, 135, 243, 135, 178, 237, 184, 110, 191, 169, 230, 243, 106, 16, 95, 128, 124, 73, 132, 107, 116, 123, 38, 0, 72, 24, 171, 8, 113, 227, 43, 136, 168, 25, 174, 100, 201, 252, 212, 119, 246, 131, 95, 78, 65, 222, 89, 192, 87, 66, 102, 58, 237, 94, 56, 138, 71, 121, 65, 49, 22, 65, 29, 183, 56, 59, 27, 16, 176, 232, 120, 22, 104, 212, 83, 32, 55, 90, 2, 37, 108, 112, 146, 113, 34, 64, 78, 65, 80, 198, 101, 190, 60, 69, 60, 230, 72, 216, 215, 225, 166, 155, 122, 140, 137, 245, 89, 77, 237, 209, 242, 67, 37, 88, 5, 117, 101, 165, 98, 84, 139, 18, 57, 118, 67, 78, 254, 178, 60, 181, 124, 144, 169, 13, 20, 233, 60, 51, 55, 1, 215, 225, 224, 42, 191, 189, 227, 246, 25, 53, 164, 27, 1, 66, 112, 36, 211, 67, 195, 172, 119, 52, 81, 244, 10, 39, 11, 219, 160, 35, 230, 225, 176, 171, 74, 254, 100, 229, 4, 64, 140, 85, 94, 85, 197, 63, 234, 173, 243, 129, 52, 220, 154, 175, 195, 223, 23, 85, 20, 171, 79, 240, 185, 10, 14, 93, 185, 93, 100, 11, 41, 39, 106, 137, 70, 122, 173, 221, 195, 73, 174, 145, 7, 85, 1, 232, 126, 67, 38, 213, 104, 164, 213, 213, 5, 23, 233, 233, 47, 222, 14, 3, 33, 180, 180, 49, 136, 73, 46, 237, 209, 47, 197, 173, 95, 131, 4, 157, 40, 217, 217, 156, 31, 81, 103, 24, 116, 208, 225, 123, 11, 122, 143, 55, 224, 125, 121, 42, 109, 151, 120, 245, 141, 176, 100, 12, 237, 222, 197, 100, 79, 16, 168, 247, 103, 223, 85, 92, 26, 17, 23, 91, 90, 249, 228, 162, 166, 164, 178, 215, 48, 203, 209, 138, 42, 42, 87, 132, 134, 120, 39, 13, 50, 77, 187, 15, 42, 82, 80, 242, 210, 141, 28, 213, 194, 241, 75, 86, 159, 120, 217, 98, 94, 100, 129, 1, 228, 157, 183, 171, 186, 164, 245, 72, 107, 119, 241, 227, 77, 109, 120, 58, 248, 36, 82, 38, 188, 50, 177, 65, 227, 192, 169, 124, 7, 137, 254, 80, 151, 200, 73, 55, 99, 30, 150, 192, 13, 106, 41, 236, 18, 6, 73, 73, 16, 239, 162, 0, 218, 201, 184, 78, 184, 137, 253, 12, 130, 241, 245, 158, 233, 159, 0, 130, 99, 50, 12, 240, 186, 207, 221, 250, 179, 115, 138, 137, 154, 182, 72, 119, 204, 16, 18, 101, 107, 27, 98, 142, 16, 143, 116, 161, 238, 232, 227, 70, 247, 124, 150, 157, 67, 114, 24, 162, 246, 158, 205, 24, 77, 79, 107, 232, 141, 103, 99, 128, 101, 40, 241, 212, 127, 90, 40, 2, 229, 105, 51, 164, 70, 216, 95, 2, 240, 54, 224, 11, 241, 201, 181, 97, 113, 49, 10, 170, 203, 10, 7, 130, 42, 236, 195, 146, 175, 222, 91, 235, 145, 13, 33, 82, 81, 187, 250, 248, 59, 250, 222, 136, 69, 192, 178, 189, 103, 176, 235, 101, 89, 133, 145, 203, 35, 138, 157, 180, 144, 6, 172, 55, 58, 147, 217, 109, 95, 81, 141, 214, 209, 245, 146, 96, 23, 130, 101, 107, 180, 188, 69, 204, 224, 195, 29, 108, 199, 70, 239, 18, 199, 143, 68, 161, 121, 110, 132, 238, 186, 97, 217, 227, 106, 86, 162, 24, 170, 155, 34, 8, 92, 154, 199, 184, 210, 214, 20, 31, 200, 22, 169, 186, 176, 26, 52, 62, 5, 251, 241, 108, 67, 153, 104, 242, 20, 28, 143, 23, 92, 191, 96, 254, 141, 235, 113, 154, 10, 219, 117, 159, 56, 31, 140, 161, 225, 247, 236, 253, 126, 160, 115, 23, 15, 175, 190, 227, 94, 149, 166, 102, 173, 251, 60, 192, 100, 70, 126, 89, 88, 154, 70, 176, 82, 212, 137, 12, 243, 203, 215, 247, 252, 91, 27, 19, 104, 229, 123, 83, 164, 166, 1, 90, 189, 251, 6, 81, 168, 28, 250, 206, 104, 62, 122, 66, 100, 183, 204, 196, 12, 57, 237, 25, 151, 165, 202, 65, 176, 124, 4, 94, 99, 250, 31, 43, 245, 160, 19, 247, 137, 120, 147, 24, 227, 192, 34, 21, 113, 168, 214, 235, 104, 140, 252, 170, 127, 90, 181, 149, 35, 160, 69, 110, 233, 71, 194, 66, 156, 225, 152, 129, 163, 168, 162, 154, 99, 172, 144, 85, 253, 238, 250, 48, 69, 99, 139, 49, 133, 200, 148, 34, 238, 235, 163, 13, 229, 232, 206, 118, 9, 132, 186, 253, 112, 251, 185, 191, 253, 155, 31, 161, 100, 249, 50, 145, 99, 243, 16, 21, 182, 137, 81, 104, 165, 96, 66, 112, 137, 68, 233, 58, 217, 65, 209, 62, 125, 164, 71, 64, 3, 44, 47, 172, 14, 211, 105, 241, 194, 50, 196, 89, 82, 26, 174, 194, 101, 119, 171, 140, 162, 141, 123, 206, 115, 85, 248, 33, 85, 228, 42, 92, 97, 100, 0, 16, 52, 27, 111, 146, 22, 223, 104, 54, 254, 70, 119, 180, 9, 126, 226, 173, 218, 140, 232, 213, 96, 232, 138, 148, 131, 144, 15, 69, 136, 43, 252, 203, 249, 20, 198, 32, 153, 17, 9, 28, 69, 183, 178, 240, 203, 31, 197, 112, 215, 34, 199, 95, 180, 116, 231, 191, 216, 12, 90, 216, 161, 227, 246, 5, 183, 26, 243, 239, 47, 193, 208, 140, 195, 151, 178, 100, 250, 38, 182, 15, 156, 211, 44, 207, 216, 110, 150, 234, 92, 160, 204, 228, 246, 38, 224, 16, 83, 166, 33, 22, 163, 174, 99, 80, 3, 23, 142, 55, 3, 253, 128, 63, 222, 111, 158, 184, 10, 60, 153, 124, 202, 53, 189, 14, 118, 45, 87, 79, 252, 20, 226, 58, 249, 197, 29, 162, 78, 211, 147, 165, 240, 7, 60, 175, 73, 90, 61, 167, 239, 68, 104, 66, 207, 208, 194, 87, 148, 158, 184, 247, 129, 7, 150, 171, 62, 157, 62, 240, 137, 36, 88, 122, 225, 83, 173, 245, 238, 245, 249, 227, 252, 152, 40, 8, 3, 150, 155, 71, 121, 187, 46, 173, 248, 107, 78, 135, 232, 20, 174, 165, 135, 5, 106, 244, 62, 79, 249, 72, 99, 252, 177, 27, 158, 223, 235, 59, 34, 141, 45, 221, 238, 31, 102, 247, 65, 170, 15, 219, 210, 232, 80, 100, 68, 171, 37, 211, 156, 83, 147, 246, 219, 89, 41, 195, 110, 252, 146, 164, 200, 63, 22, 154, 235, 157, 224, 24, 13, 102, 218, 53, 116, 235, 195, 57, 44, 157, 20, 89, 193, 102, 118, 163, 6, 24, 185, 177, 103, 106, 237, 7, 242, 206, 186, 232, 186, 164, 89, 138, 96, 207, 218, 228, 8, 77, 218, 15, 219, 168, 183, 178, 106, 155, 150, 37, 27, 139, 195, 72, 245, 64, 122, 219, 34, 125, 244, 159, 107, 23, 200, 151, 221, 249, 115, 21, 122, 60, 189, 31, 36, 127, 78, 52, 175, 187, 142, 179, 50, 53, 30, 246, 156, 89, 21, 120, 137, 129, 51, 10, 219, 176, 167, 197, 56, 128, 1, 21, 249, 225, 157, 135, 91, 248, 216, 237, 108, 143, 105, 159, 168, 97, 56, 54, 26, 181, 62, 66, 194, 192, 246, 142, 174, 157, 28, 52, 18, 33, 44, 54, 106, 28, 19, 94, 56, 49, 80, 181, 104, 237, 198, 100, 25, 119, 50, 224, 202, 71, 203, 246, 127, 108, 93, 3, 58, 61, 195, 222, 215, 50, 146, 156, 110, 216, 51, 40, 228, 9, 192, 77, 26, 58, 144, 247, 220, 122, 136, 51, 97, 227, 255, 158, 63, 76, 105, 203, 118, 96, 163, 154, 19, 163, 65, 249, 170, 225, 220, 193, 235, 177, 57, 168, 2, 14, 44, 34, 130, 107, 145, 189, 125, 158, 223, 106, 229, 0, 175, 136, 163, 176, 151, 192, 102, 36, 251, 135, 128, 146, 36, 182, 219, 135, 150, 115, 34, 164, 3, 69, 39, 128, 31, 3, 250, 103, 109, 228, 88, 77, 91, 51, 229, 228, 39, 140, 87, 33, 182, 110, 20, 127, 154, 139, 110, 252, 14, 92, 5, 39, 245, 215, 141, 132, 66, 99, 195, 210, 248, 104, 29, 72, 126, 58, 54, 254, 137, 250, 235, 51, 200, 139, 32, 61, 174, 43, 194, 99, 179, 203, 207, 53, 245, 133, 177, 124, 73, 203, 155, 38, 11, 171, 223, 12, 171, 48, 169, 129, 190, 14, 43, 247, 147, 163, 204, 5, 102, 247, 251, 92, 79, 18, 195, 208, 25, 55, 228, 229, 55, 83, 17, 96, 4, 60, 85, 10, 218, 76, 101, 24, 172, 80, 228, 108, 45, 55, 129, 87, 97, 22, 86, 102, 70, 31, 187, 191, 1, 178, 184, 253, 120, 104, 197, 84, 242, 112, 17, 245, 82, 85, 96, 40, 21, 9, 4, 45, 209, 147, 162, 110, 90, 34, 41, 194, 135, 169, 75, 169, 114, 206, 220, 166, 81, 141, 187, 64, 120, 169, 165, 23, 66, 222, 214, 18, 184, 136, 116, 199, 67, 91, 151, 67, 123, 201, 40, 171, 30, 81, 53, 172, 11, 252, 52, 200, 191, 236, 81, 63, 28, 28, 79, 197, 11, 72, 49, 4, 124, 134, 233, 232, 35, 46, 146, 22, 245, 251, 182, 162, 76, 231, 32, 196, 244, 181, 102, 124, 215, 106, 183, 214, 60, 118, 51, 84, 203, 105, 179, 56, 73, 212, 219, 228, 251, 38, 126, 206, 115, 119, 115, 151, 21, 49, 80, 168, 206, 170, 174, 215, 59, 70, 84, 59, 126, 167, 254, 115, 232, 114, 60, 182, 40, 129, 177, 240, 200, 38, 37, 198, 209, 23, 76, 170, 118, 83, 1, 131, 247, 56, 26, 57, 127, 156, 31, 144, 27, 184, 203, 148, 35, 11, 202, 85, 112, 185, 103, 107, 238, 6, 233, 105, 127, 30, 36, 5, 87, 171, 241, 3, 56, 204, 132, 15, 217, 229, 139, 21, 217, 129, 72, 249, 87, 89, 79, 96, 37, 236, 89, 92, 155, 19, 126, 84, 218, 53, 184, 26, 224, 179, 7, 109, 243, 241, 91, 4, 121, 186, 223, 116, 222, 74, 23, 153, 101, 79, 27, 149, 143, 26, 68, 95, 238, 21, 43, 211, 107, 255, 231, 205, 156, 7, 52, 151, 74, 253, 249, 109, 51, 153, 31, 163, 151, 172, 121, 248, 150, 198, 13, 118, 76, 248, 18, 210, 184, 219, 23, 234, 59, 115, 254, 81, 254, 67, 110, 53, 177, 188, 60, 180, 25, 87, 10, 150, 44, 208, 115, 144, 16, 3, 244, 119, 137, 173, 22, 158, 135, 89, 82, 86, 224, 43, 127, 108, 234, 223, 64, 224, 50, 214, 124, 246, 207, 122, 29, 99, 234, 199, 48, 125, 106, 221, 148, 40, 5, 16, 140, 234, 185, 222, 239, 214, 67, 77, 22, 165, 135, 224, 121, 236, 251, 70, 6, 123, 22, 230, 117, 208, 172, 131, 59, 53, 108, 100, 173, 136, 63, 22, 130, 83, 137, 15, 28, 82, 36, 34, 221, 235, 226, 114, 79, 126, 119, 218, 50, 130, 122, 143, 128, 240, 138, 49, 95, 254, 117, 105, 94, 76, 117, 93, 6, 189, 236, 192, 252, 227, 60, 208, 190, 216, 173, 157, 42, 240, 134, 97, 184, 215, 46, 30, 152, 214, 186, 205, 122, 64, 241, 9, 165, 113, 56, 244, 89, 27, 38, 41, 168, 30, 3, 50, 249, 121, 35, 55, 79, 10, 253, 33, 221, 231, 112, 53, 230, 15, 241, 247, 11, 202, 123, 27, 169, 254, 215, 208, 212, 184, 229, 58, 145, 142, 227, 155, 8, 98, 108, 50, 239, 238, 82, 161, 80, 228, 170, 128, 65, 71, 94, 4, 135, 27, 30, 28, 176, 66, 87, 245, 58, 0, 242, 162, 155, 1, 230, 130, 191, 161, 30, 167, 132, 37, 170, 137, 45, 70, 182, 29, 64, 230, 204, 218, 12, 26, 236, 146, 21, 130, 38, 184, 0, 185, 21, 57, 51, 227, 123, 26, 216, 87, 227, 198, 78, 106, 171, 224, 255, 223, 12, 92, 86, 191, 171, 109, 68, 29, 196, 31, 189, 44, 47, 198, 240, 63, 119, 237, 15, 4, 66, 204, 61, 54, 105, 188, 52, 38, 85, 29, 10, 181, 57, 21, 63, 27, 248, 254, 144, 201, 69, 96, 72, 176, 206, 105, 209, 182, 227, 107, 31, 34, 36, 199, 71, 50, 233, 205, 40, 110, 204, 197, 190, 202, 175, 155, 208, 26, 241, 28, 118, 239, 86, 87, 198, 167, 203, 148, 79, 69, 114, 108, 111, 178, 8, 29, 141, 229, 254, 121, 55, 99, 138, 88, 211, 25, 213, 124, 32, 248, 28, 178, 12, 184, 96, 118, 178, 219, 74, 186, 235, 22, 144, 68, 124, 152, 163, 132, 152, 125, 193, 123, 203, 54, 115, 216, 254, 245, 228, 88, 47, 119, 152, 56, 194, 120, 93, 63, 251, 138, 157, 142, 83, 235, 111, 98, 208, 67, 226, 204, 115, 172, 14, 15, 217, 122, 68, 94, 214, 215, 29, 196, 49, 82, 155, 80, 204, 30, 180, 143, 142, 231, 118, 154, 68, 4, 61, 180, 33, 86, 157, 251, 119, 49, 46, 239, 175, 251, 115, 135, 182, 34, 219, 38, 134, 237, 12, 183, 159, 71, 196, 175, 119, 134, 146, 175, 239, 200, 85, 24, 217, 227, 238, 110, 252, 34, 213, 35, 175, 138, 193, 218, 182, 239, 83, 198, 26, 3, 30, 86, 54, 168, 174, 79, 129, 135, 146, 192, 218, 114, 151, 242, 108, 220, 232, 99, 46, 215, 49, 77, 5, 233, 222, 144, 69, 82, 85, 250, 180, 112, 61, 41, 7, 70, 35, 129, 219, 1, 102, 71, 40, 17, 52, 46, 102, 92, 75, 71, 121, 235, 18, 248, 78, 23, 237, 185, 36, 1, 253, 155, 195, 206, 244, 208, 255, 183, 10, 124, 207, 30, 200, 87, 15, 2, 31, 82, 248, 77, 199, 99, 110, 49, 133, 23, 175, 42, 20, 172, 13, 61, 178, 71, 197, 1, 87, 56, 214, 83, 138, 223, 121, 216, 209, 94, 188, 153, 136, 22, 101, 223, 251, 36, 70, 159, 160, 137, 55, 158, 136, 136, 94, 201, 249, 232, 129, 31, 163, 204, 148, 185, 219, 185, 188, 250, 87, 91, 233, 206, 199, 222, 41, 173, 48, 157, 166, 37, 53, 134, 142, 204, 169, 185, 136, 17, 238, 221, 67, 31, 233, 131, 78, 85, 250, 158, 115, 59, 6, 197, 0, 29, 209, 138, 21, 65, 208, 208, 100, 2, 249, 160, 42, 114, 168, 84, 100, 83, 98, 42, 113, 93, 199, 99, 3, 23, 187, 223, 35, 173, 167, 172, 12, 250, 132, 84, 191, 41, 161, 177, 90, 245, 36, 174, 96, 92, 4, 65, 80, 221, 118, 130, 137, 210, 181, 216, 234, 6, 114, 245, 247, 11, 183, 159, 78, 227, 93, 225, 188, 56, 210, 246, 105, 24, 127, 13, 3, 223, 213, 164, 180, 246, 129, 106, 153, 92, 20, 112, 229, 65, 37, 202, 16, 204, 79, 45, 14, 159, 111, 103, 125, 63, 209, 11, 212, 23, 29, 14, 106, 187, 95, 125, 231, 159, 247, 143, 149, 128, 20, 201, 139, 220, 243, 141, 124, 26, 171, 218, 135, 196, 74, 252, 154, 52, 245, 147, 236, 196, 248, 112, 70, 142, 237, 97, 191, 75, 100, 47, 234, 68, 85, 201, 28, 130, 3, 198, 86, 253, 79, 150, 148, 144, 204, 233, 54, 3, 5, 106, 19, 90, 161, 130, 16, 147, 168, 51, 234, 73, 72, 190, 54, 204, 146, 106, 213, 51, 33, 182, 127, 68, 91, 247, 225, 121, 4, 90, 117, 154, 82, 244, 70, 88, 15, 122, 187, 153, 136, 172, 52, 21, 174, 107, 175, 69, 121, 80, 180, 17, 154, 197, 30, 205, 200, 150, 198, 136, 64, 199, 22, 175, 74, 4, 195, 49, 111, 218, 52, 213, 176, 230, 58, 138, 61, 209, 13, 2, 114, 124, 80, 26, 50, 139, 86, 36, 192, 192, 36, 71, 80, 0, 59, 30, 135, 185, 103, 130, 115, 138, 250, 126, 155, 175, 240, 84, 122, 137, 200, 171, 68, 72, 164, 79, 79, 28, 90, 84, 110, 130, 122, 133, 165, 245, 35, 124, 195, 10, 99, 78, 140, 219, 139, 219, 107, 1, 172, 39, 0, 28, 150, 91, 243, 188, 215, 147, 12, 70, 161, 15, 172, 102, 17, 146, 175, 95, 79, 20, 209, 180, 207, 154, 153, 229, 190, 251, 73, 215, 11, 113, 37, 230, 193, 192, 218, 119, 83, 16, 237, 167, 193, 157, 157, 102, 102, 203, 198, 68, 237, 252, 121, 120, 132, 126, 65, 199, 197, 59, 186, 66, 182, 38, 126, 221, 17, 96, 102, 99, 218, 185, 124, 152, 18, 240, 92, 241, 171, 178, 231, 64, 223, 228, 161, 110, 182, 217, 95, 166, 165, 113, 6, 203, 8, 254, 9, 241, 90, 157, 60, 194, 125, 210, 73, 213, 243, 16, 159, 123, 119, 162, 42, 178, 159, 91, 130, 235, 17, 142, 202, 65, 31, 213, 60, 108, 95, 53, 161, 25, 85, 114, 18, 117, 199, 73, 71, 168, 243, 188, 126, 180, 37, 10, 239, 179, 38, 227, 177, 184, 213, 193, 130, 45, 115, 2, 196, 136, 54, 99, 161, 163, 212, 189, 9, 142, 113, 251, 137, 138, 187, 11, 117, 189, 246, 30, 193, 150, 187, 60, 70, 108, 189, 10, 152, 108, 36, 102, 189, 204, 224, 8, 100, 72, 219, 66, 97, 53, 90, 23, 109, 82, 49, 94, 227, 67, 207, 146, 127, 199, 212, 174, 5, 115, 160, 81, 23, 44, 130, 49, 248, 2, 67, 94, 32, 231, 7, 238, 13, 65, 115, 85, 211, 138, 184, 26, 88, 21, 69, 115, 129, 30, 63, 79, 116, 118, 207, 139, 204, 42, 88, 114, 154, 26, 166, 116, 28, 189, 75, 201, 163, 102, 236, 156, 198, 58, 122, 99, 97, 179, 132, 66, 245, 26, 90, 49, 91, 69, 117, 71, 163, 122, 152, 220, 240, 131, 139, 80, 95, 199, 50, 34, 131, 99, 154, 8, 240, 77, 238, 85, 0, 60, 144, 102, 5, 83, 16, 174, 3, 125, 158, 231, 212, 80, 67, 109, 64, 129, 65, 152, 126, 217, 180, 3, 123, 31, 139, 40, 177, 229, 154, 149, 0, 9, 108, 240, 174, 68, 247, 2, 95, 106, 173, 104, 173, 73, 133, 174, 64, 164, 216, 90, 210, 114, 220, 105, 214, 232, 155, 157, 25, 103, 74, 72, 105, 6, 96, 3, 14, 168, 64, 0, 14, 74, 135, 241, 67, 80, 41, 107, 78, 44, 201, 206, 166, 31, 187, 72, 108, 29, 225, 109, 224, 21, 189, 198, 90, 74, 132, 170, 45, 181, 124, 167, 55, 178, 176, 245, 3, 121, 248, 88, 55, 57, 84, 168, 38, 97, 212, 185, 211, 75, 237, 191, 148, 160, 19, 135, 106, 139, 109, 230, 29, 169, 143, 40, 88, 55, 125, 152, 99, 211, 249, 146, 19, 108, 39, 87, 216, 202, 57, 193, 173, 54, 237, 94, 199, 249, 113, 60, 220, 114, 138, 147, 252, 244, 211, 29, 16, 5, 38, 145, 100, 157, 148, 247, 85, 131, 132, 234, 56, 134, 249, 247, 149, 240, 128, 102, 26, 196, 211, 204, 188, 41, 146, 59, 238, 60, 118, 144, 97, 167, 147, 181, 37, 102, 245, 87, 136, 136, 183, 194, 224, 215, 16, 26, 176, 144, 248, 243, 216, 151, 198, 208, 45, 25, 158, 102, 99, 84, 117, 57, 69, 24, 248, 8, 171, 198, 227, 0, 97, 117, 125, 39, 56, 98, 7, 78, 237, 199, 252, 17, 128, 35, 21, 191, 162, 252, 97, 165, 185, 20, 182, 71, 25, 192, 235, 193, 9, 108, 32, 162, 82, 45, 226, 247, 149, 106, 183, 197, 123, 208, 64, 32, 231, 114, 123, 249, 204, 31, 126, 229, 191, 93, 154, 255, 251, 163, 84, 109, 176, 62, 9, 91, 54, 135, 174, 214, 219, 244, 55, 159, 138, 47, 141, 95, 225, 222, 146, 81, 136, 14, 49, 71, 142, 141, 198, 178, 143, 42, 126, 67, 81, 73, 244, 110, 174, 53, 160, 244, 221, 117, 44, 13, 86, 181, 20, 163, 145, 49, 126, 196, 202, 24, 55, 167, 72, 21, 88, 64, 190, 217, 248, 29, 46, 25, 147, 233, 239, 109, 41, 178, 123, 54, 78, 205, 11, 77, 66, 172, 204, 254, 48, 206, 14, 170, 65, 25, 4, 88, 231, 174, 21, 231, 98, 191, 171, 70, 249, 226, 12, 15, 173, 103, 0, 52, 83, 81, 236, 239, 45, 4, 3, 243, 172, 42, 73, 19, 244, 141, 114, 119, 169, 73, 185, 221, 81, 46, 251, 25, 248, 147, 227, 140, 25, 133, 123, 209, 122, 68, 120, 188, 68, 131, 220, 204, 48, 59, 131, 50, 0, 63, 164, 97, 145, 194, 192, 196, 187, 250, 118, 225, 50, 121, 49, 17, 98, 221, 231, 79, 222, 91, 99, 124, 27, 226, 73, 141, 116, 188, 90, 55, 204, 111, 53, 54, 36, 225, 24, 179, 36, 97, 25, 224, 43, 204, 164, 245, 213, 62, 220, 246, 111, 166, 164, 10, 239, 90, 231, 156, 71, 192, 68, 16, 48, 152, 110, 158, 120, 170, 117, 188, 129, 88, 26, 66, 1, 172, 32, 102, 243, 220, 12, 165, 248, 190, 83, 143, 22, 45, 166, 182, 87, 198, 76, 59, 235, 90, 82, 147, 207, 219, 77, 15, 175, 33, 94, 132, 155, 247, 0, 233, 108, 219, 2, 115, 43, 239, 200, 102, 48, 22, 29, 127, 98, 33, 131, 111, 232, 27, 45, 119, 14, 138, 8, 165, 210, 165, 59, 92, 49, 87, 238, 157, 19, 109, 116, 34, 152, 203, 239, 125, 242, 148, 246, 83, 229, 231, 67, 221, 170, 75, 187, 111, 96, 105, 206, 20, 123, 222, 223, 9, 33, 251, 167, 255, 69, 65, 196, 201, 26, 44, 149, 169, 81, 133, 65, 107, 143, 168, 33, 169, 39, 12, 34, 37, 116, 244, 21, 198, 162, 120, 221, 110, 71, 169, 248, 173, 37, 178, 128, 28, 26, 113, 92, 144, 75, 42, 223, 212, 129, 210, 141, 135, 166, 193, 192, 97, 52, 222, 254, 23, 148, 9, 112, 114, 64, 84, 12, 0, 125, 189, 173, 232, 223, 134, 210, 13, 236, 70, 153, 231, 212, 20, 224, 146, 173, 139, 57, 190, 134, 118, 153, 147, 141, 236, 92, 50, 237, 88, 178, 184, 155, 140, 69, 133, 101, 211, 123, 101, 137, 130, 38, 50, 225, 155, 224, 53, 213, 65, 112, 198, 206, 81, 96, 203, 29, 141, 206, 29, 232, 124, 216, 35, 8, 62, 149, 236, 178, 118, 57, 28, 94, 86, 43, 220, 48, 108, 137, 108, 195, 130, 28, 212, 104, 37, 93, 83, 174, 52, 92, 137, 136, 245, 238, 54, 132, 123, 121, 136, 105, 15, 23, 185, 141, 208, 44, 117, 156, 169, 74, 234, 83, 135, 181, 51, 173, 98, 228, 101, 216, 166, 90, 42, 211, 115, 53, 198, 255, 53, 31, 204, 244, 146, 83, 100, 59, 103, 100, 81, 139, 131, 47, 109, 149, 157, 235, 100, 64, 188, 73, 77, 255, 230, 192, 121, 88, 233, 145, 46, 214, 145, 195, 177, 107, 1, 169, 44, 54, 69, 197, 68, 16, 16, 211, 16, 41, 134, 144, 71, 40, 46, 249, 160, 144, 31, 188, 218, 244, 180, 195, 35, 145, 40, 126, 240, 86, 250, 200, 74, 203, 70, 69, 84, 247, 148, 178, 102, 232, 217, 29, 244, 63, 41, 26, 206, 12, 208, 188, 160, 66, 96, 29, 86, 185, 152, 172, 183, 67, 182, 163, 158, 61, 102, 22, 162, 176, 127, 184, 115, 86, 214, 84, 179, 215, 43, 8, 32, 27, 18, 198, 40, 69, 143, 122, 55, 55, 223, 36, 87, 74, 116, 38, 173, 95, 4, 148, 104, 217, 54, 158, 80, 118, 68, 201, 80, 167, 172, 236, 11, 8, 3, 182, 231, 105, 248, 122, 83, 91, 190, 108, 124, 232, 144, 34, 30, 140, 1, 223, 30, 13, 33, 206, 8, 1, 116, 227, 249, 185, 192, 4, 233, 99, 7, 81, 144, 170, 102, 29, 7, 5, 87, 164, 208, 142, 13, 147, 99, 116, 47, 176, 157, 71, 76, 229, 175, 151, 190, 117, 238, 161, 140, 126, 154, 114, 180, 109, 197, 60, 160, 11, 117, 113, 139, 72, 90, 145, 223, 126, 26, 201, 46, 122, 157, 144, 164, 170, 154, 74, 54, 76, 213, 67, 33, 232, 148, 219, 175, 216, 59, 68, 148, 51, 30, 162, 24, 112, 162, 40, 8, 181, 90, 210, 95, 250, 35, 209, 11, 162, 205, 2, 241, 95, 18, 61, 11, 6, 90, 79, 128, 18, 110, 247, 101, 192, 234, 27, 38, 155, 204, 170, 160, 194, 35, 212, 120, 110, 174, 173, 19, 10, 66, 171, 122, 206, 171, 1, 209, 22, 166, 136, 66, 166, 203, 93, 90, 27, 82, 13, 158, 240, 229, 146, 152, 71, 67, 152, 107, 142, 44, 196, 232, 54, 36, 188, 124, 55, 50, 233, 162, 60, 143, 197, 182, 77, 110, 109, 87, 254, 90, 79, 17, 102, 226, 93, 222, 158, 110, 239, 210, 198, 179, 5, 50, 42, 156, 176, 5, 177, 27, 249, 68, 0, 187, 12, 115, 74, 112, 137, 39, 176, 122, 69, 69, 138, 17, 215, 146, 56, 43, 68, 4, 249, 214, 116, 40, 27, 97, 79, 13, 190, 147, 73, 27, 111, 70, 178, 119, 109, 108, 188, 243, 136, 192, 39, 30, 74, 240, 247, 103, 121, 55, 51, 207, 225, 5, 98, 186, 68, 147, 229, 160, 13, 92, 57, 193, 90, 130, 235, 169, 255, 92, 162, 111, 143, 244, 67, 83, 68, 23, 140, 96, 32, 91, 24, 238, 76, 85, 235, 104, 111, 221, 13, 159, 85, 162, 98, 242, 60, 137, 64, 25, 248, 161, 233, 176, 134, 117, 35, 128, 69, 199, 149, 178, 73, 219, 5, 27, 165, 40, 31, 28, 17, 2, 217, 119, 10, 80, 102, 140, 22, 214, 243, 96, 35, 87, 61, 225, 211, 101, 235, 249, 232, 211, 175, 52, 136, 197, 21, 125, 175, 194, 6, 252, 236, 9, 18, 139, 76, 85, 88, 173, 19, 238, 191, 220, 28, 29, 225, 9, 88, 23, 211, 214, 102, 232, 121, 143, 243, 133, 66, 152, 113, 159, 120, 114, 177, 77, 51, 44, 176, 78, 165, 41, 182, 156, 139, 101, 89, 0, 43, 60, 178, 117, 161, 19, 17, 29, 9, 71, 38, 75, 70, 176, 161, 253, 47, 35, 75, 129, 198, 163, 219, 120, 3, 49, 32, 70, 134, 81, 62, 62, 236, 194, 82, 106, 132, 113, 138, 5, 40, 178, 48, 80, 151, 204, 209, 117, 201, 208, 89, 209, 130, 50, 66, 137, 242, 219, 157, 9, 186, 112, 202, 119, 138, 43, 241, 249, 46, 200, 126, 31, 27, 3, 119, 210, 152, 232, 126, 119, 116, 239, 79, 245, 79, 189, 48, 234, 24, 193, 98, 172, 103, 103, 7, 188, 95, 183, 218, 250, 187, 230, 48, 173, 251, 206, 246, 244, 233, 56, 163, 21, 23, 155, 101, 135, 29, 94, 4, 27, 194, 130, 146, 177, 228, 245, 191, 85, 9, 243, 101, 174, 242, 230, 183, 221, 66, 169, 52, 217, 74, 73, 47, 16, 174, 199, 174, 236, 138, 59, 88, 131, 209, 190, 146, 212, 5, 126, 237, 23, 43, 112, 107, 85, 53, 245, 47, 214, 10, 165, 155, 170, 227, 242, 91, 253, 25, 151, 177, 80, 22, 69, 42, 197, 119, 193, 18, 70, 115, 36, 18, 118, 57, 93, 194, 23, 84, 154, 40, 66, 194, 238, 31, 241, 251, 215, 147, 250, 144, 65, 168, 67, 128, 213, 2, 210, 39, 193, 34, 4, 29, 111, 112, 25, 249, 169, 1, 157, 135, 246, 133, 148, 202, 202, 72, 208, 231, 115, 19, 123, 246, 9, 168, 182, 85, 181, 26, 184, 157, 236, 238, 146, 87, 141, 13, 30, 82, 23, 22, 76, 35, 123, 61, 53, 148, 248, 52, 49, 84, 72, 238, 109, 106, 69, 159, 230, 51, 229, 164, 61, 192, 192, 52, 172, 173, 121, 35, 209, 109, 154, 199, 51, 45, 255, 29, 66, 154, 251, 78, 14, 205, 6, 166, 20, 118, 225, 82, 242, 122, 89, 184, 105, 91, 232, 232, 36, 254, 8, 147, 188, 247, 201, 227, 123, 132, 66, 178, 130, 166, 246, 162, 213, 195, 204, 53, 230, 201, 219, 161, 254, 65, 134, 224, 52, 61, 92, 115, 135, 127, 130, 225, 130, 188, 60, 35, 146, 126, 246, 50, 216, 171, 206, 209, 193, 222, 29, 57, 236, 252, 61, 184, 236, 74, 180, 23, 197, 160, 46, 25, 22, 144, 114, 224, 53, 164, 147, 179, 190, 98, 200, 197, 50, 153, 163, 153, 240, 205, 73, 94, 129, 40, 225, 2, 132, 61, 241, 227, 99, 9, 206, 10, 244, 61, 16, 2, 204, 37, 103, 242, 104, 28, 132, 115, 153, 186, 27, 102, 215, 98, 254, 149, 188, 50, 52, 184, 139, 133, 54, 210, 81, 77, 215, 134, 70, 70, 56, 83, 64, 237, 5, 175, 232, 241, 64, 34, 237, 149, 117, 112, 136, 26, 240, 126, 71, 131, 27, 248, 73, 249, 150, 227, 212, 13, 141, 158, 95, 83, 46, 225, 74, 116, 112, 90, 252, 58, 102, 95, 58, 186, 138, 60, 204, 74, 0, 235, 103, 141, 117, 40, 48, 175, 208, 192, 30, 88, 253, 35, 1, 212, 172, 63, 176, 12, 149, 222, 34, 41, 71, 192, 86, 224, 88, 217, 16, 133, 235, 4, 70, 205, 25, 154, 138, 33, 243, 32, 176, 250, 65, 82, 107, 42, 184, 230, 159, 165, 30, 54, 62, 111, 235, 222, 214, 173, 232, 15, 22, 52, 10, 66, 205, 42, 11, 144, 212, 214, 45, 177, 229, 128, 5, 189, 248, 35, 169, 215, 78, 15, 149, 188, 162, 209, 253, 193, 239, 244, 183, 37, 233, 157, 190, 203, 172, 86, 163, 49, 91, 41, 166, 98, 137, 88, 31, 76, 157, 204, 185, 162, 36, 169, 111, 212, 107, 59, 47, 16, 193, 109, 235, 128, 230, 166, 239, 157, 14, 23, 13, 176, 237, 152, 255, 147, 93, 114, 117, 39, 23, 178, 75, 67, 183, 124, 38, 140, 121, 186, 74, 115, 178, 9, 197, 118, 1, 23, 222, 221, 151, 92, 64, 145, 130, 131, 245, 205, 21, 189, 155, 201, 244, 155, 239, 101, 67, 186, 249, 9, 55, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 192, 52, 174, 242, 81, 205, 211, 61, 25, 139, 159, 177, 105, 41, 30, 42, 213, 46, 197, 53, 167, 58, 7, 239, 29, 185, 82, 211, 68, 212, 184, 107, 226, 41, 43, 224, 22, 195, 136, 195, 83, 229, 114, 175, 66, 156, 29, 234, 244, 207, 2, 130, 205, 111, 77, 101, 218, 137, 131, 12, 134, 83, 223, 104, 127, 53, 221, 160, 131, 7, 177, 14, 230, 144, 57, 252, 186, 129, 17, 232, 197, 196, 98, 209, 1, 45, 125, 38, 79, 78, 79, 205, 86, 137, 34, 148, 236, 104, 255, 148, 126, 139, 170, 152, 81, 124, 241, 99, 85, 64, 219, 190, 41, 26, 241, 189, 46, 120, 146, 174, 125, 38, 89, 232, 191, 159, 41, 60, 59, 124, 3, 83, 90, 193, 179, 178, 218, 237, 71, 118, 84, 146, 132, 171, 138, 85, 203, 68, 215, 196, 179, 144, 14, 177, 7, 131, 178, 35, 178, 60, 195, 206, 133, 204, 114, 45, 244, 85, 215, 54, 6, 195, 13, 21, 169, 53, 170, 193, 40, 111, 41, 129, 135, 1, 7, 231, 11, 30, 182, 1, 49, 39, 153, 125, 42, 194, 181, 161, 31, 225, 12, 7, 203, 136, 56, 221, 249, 243, 10, 70, 40, 7, 185, 44, 130, 246, 110, 117, 80, 156, 16, 107, 9, 208, 52, 225, 196, 107, 78, 99, 182, 163, 75, 85, 211, 230, 155, 237, 90, 188, 158, 228, 168, 219, 214, 1, 214, 10, 208, 65, 24, 6, 106, 197, 31, 24, 182, 90, 212, 193, 164, 165, 72, 252, 129, 220, 23, 60, 110, 87, 37, 248, 212, 75, 104, 132, 28, 240, 211, 62, 28, 98, 56, 14, 84, 116, 201, 72, 203, 112, 226, 194, 235, 85, 186, 124, 193, 204, 158, 88, 88, 154, 237, 87, 112, 209, 204, 251, 167, 218, 9, 3, 92, 176, 47, 83, 94, 17, 116, 73, 210, 76, 89, 65, 53, 117, 81, 135, 24, 123, 72, 228, 209, 12, 130, 113, 64, 15, 0, 129, 145, 180, 246, 137, 98, 85, 142, 28, 11, 98, 31, 212, 141, 27, 126, 36, 194, 225, 96, 107, 28, 56, 46, 129, 86, 211, 202, 198, 8, 130, 145, 168, 251, 168, 47, 190, 124, 154, 7, 24, 236, 155, 6, 216, 2, 52, 183, 14, 81, 237, 145, 106, 163, 57, 70, 110, 201, 195, 76, 225, 169, 141, 27, 123, 4, 168, 127, 46, 127, 235, 142, 233, 238, 31, 216, 199, 11, 226, 248, 73, 60, 46, 31, 130, 42, 199, 120, 19, 114, 194, 175, 145, 98, 24, 241, 112, 251, 162, 211, 147, 73, 105, 44, 119, 124, 155, 203, 208, 30, 84, 236, 50, 175, 194, 84, 2, 84, 57, 101, 137, 115, 18, 205, 178, 85, 140, 239, 187, 40, 123, 255, 215, 212, 221, 248, 8, 108, 82, 221, 132, 14, 109, 202, 131, 101, 219, 111, 5, 104, 94, 147, 8, 157, 43, 127, 65, 229, 59, 138, 201, 160, 29, 119, 166, 145, 221, 160, 237, 117, 188, 140, 19, 233, 86, 27, 112, 176, 201, 67, 209, 58, 131, 86, 64, 101, 220, 165, 73, 191, 134, 42, 233, 90, 100, 18, 227, 209, 125, 238, 13, 77, 212, 120, 177, 12, 53, 15, 253, 142, 192, 182, 156, 74, 171, 128, 12, 35, 10, 156, 182, 209, 168, 84, 192, 114, 145, 152, 124, 207, 225, 128, 108, 54, 189, 60, 250, 115, 88, 184, 129, 7, 54, 68, 99, 212, 251, 57, 193, 136, 36, 125, 173, 9, 110, 84, 241, 59, 165, 113, 22, 152, 51, 85, 215, 160, 211, 164, 74, 19, 92, 36, 28, 182, 244, 9, 168, 250, 79, 123, 164, 160, 207, 240, 29, 29, 91, 38, 31, 188, 16, 228, 118, 149, 198, 198, 123, 14, 133, 20, 110, 176, 129, 138, 94, 248, 218, 77, 108, 191, 3, 141, 69, 248, 163, 99, 44, 87, 208, 18, 202, 33, 184, 40, 2, 222, 223, 120, 144, 93, 173, 140, 217, 214, 57, 15, 78, 141, 161, 248, 165, 4, 189, 14, 213, 23, 46, 113, 141, 18, 31, 137, 224, 228, 19, 62, 168, 70, 248, 204, 171, 125, 122, 155, 19, 50, 210, 89, 69, 6, 23, 241, 173, 182, 133, 86, 219, 241, 9, 215, 247, 155, 68, 18, 22, 107, 167, 120, 118, 169, 9, 221, 231, 153, 100, 199, 129, 43, 239, 122, 102, 79, 161, 133, 75, 74, 184, 210, 30, 175, 5, 20, 8, 48, 208, 5, 219, 33, 224, 96, 252, 1, 42, 19, 38, 147, 44, 197, 172, 222, 84, 156, 79, 217, 103, 46, 60, 174, 144, 121, 208, 111, 207, 54, 16, 163, 104, 156, 10, 207, 160, 151, 193, 121, 38, 251, 43, 200, 28, 137, 109, 22, 140, 17, 13, 95, 69, 119, 61, 222, 131, 92, 105, 130, 56, 96, 236, 209, 218, 51, 172, 213, 244, 44, 64, 60, 88, 192, 199, 41, 83, 141, 237, 224, 62, 187, 134, 251, 89, 48, 18, 12, 121, 63, 28, 218, 86, 160, 86, 109, 99, 206, 2, 236, 50, 26, 239, 237, 63, 12, 102, 84, 252, 215, 145, 131, 231, 49, 212, 54, 43, 99, 224, 120, 178, 36, 106, 99, 92, 77, 4, 176, 3, 99, 0, 188, 98, 168, 58, 123, 240, 159, 102, 55, 108, 176, 198, 49, 8, 141, 220, 122, 216, 43, 55, 35, 106, 133, 53, 123, 69, 84, 167, 23, 9, 209, 194, 29, 186, 63, 184, 71, 241, 49, 33, 47, 88, 39, 65, 91, 85, 92, 198, 126, 212, 129, 102, 167, 100, 174, 89, 228, 134, 42, 212, 208, 194, 252, 67, 150, 198, 132, 209, 16, 209, 249, 44, 238, 138, 179, 128, 92, 6, 197, 10, 134, 97, 232, 147, 219, 143, 72, 209, 83, 35, 210, 182, 162, 183, 213, 125, 47, 156, 218, 18, 194, 137, 234, 117, 231, 112, 250, 48, 146, 123, 141, 174, 61, 213, 177, 251, 162, 214, 211, 122, 171, 155, 211, 117, 228, 198, 235, 155, 225, 14, 85, 57, 54, 8, 168, 75, 216, 68, 135, 19, 133, 24, 26, 138, 111, 72, 0, 250, 101, 237, 246, 137, 8, 38, 83, 123, 237, 250, 104, 106, 31, 217, 235, 103, 177, 73, 174, 208, 233, 230, 69, 77, 161, 21, 10, 223, 157, 62, 232, 133, 80, 117, 214, 254, 247, 93, 53, 45, 130, 226, 118, 216, 149, 78, 143, 25, 30, 110, 134, 48, 72, 1, 107, 197, 211, 48, 235, 82, 179, 54, 95, 171, 91, 36, 188, 53, 85, 24, 216, 236, 79, 62, 90, 52, 157, 175, 171, 127, 132, 118, 187, 200, 5, 60, 22, 6, 117, 78, 242, 169, 102, 46, 74, 185, 188, 138, 221, 79, 49, 13, 247, 196, 176, 199, 53, 220, 208, 175, 132, 8, 186, 175, 76, 125, 227, 33, 44, 193, 209, 17, 52, 110, 227, 212, 139, 93, 154, 218, 220, 57, 5, 255, 119, 252, 7, 69, 145, 109, 118, 76, 45, 48, 20, 218, 123, 26, 193, 203, 254, 58, 206, 189, 91, 87, 28, 112, 27, 204, 145, 8, 8, 203, 83, 199, 179, 70, 32, 208, 8, 44, 44, 217, 69, 93, 32, 74, 13, 105, 100, 232, 134, 89, 67, 98, 52, 242, 113, 109, 36, 244, 169, 115, 146, 186, 48, 75, 128, 102, 149, 174, 100, 5, 87, 23, 88, 161, 184, 135, 198, 80, 194, 179, 150, 4, 182, 153, 124, 63, 34, 251, 63, 73, 37, 18, 188, 143, 151, 211, 2, 11, 182, 108, 6, 109, 9, 192, 85, 67, 251, 113, 129, 185, 10, 81, 246, 134, 57, 249, 181, 39, 2, 172, 177, 69, 221, 100, 77, 234, 159, 108, 192, 187, 22, 239, 204, 26, 94, 199, 59, 193, 124, 97, 107, 238, 43, 72, 36, 172, 173, 100, 3, 161, 92, 128, 18, 51, 253, 186, 99, 120, 241, 227, 110, 163, 21, 128, 76, 0, 148, 199, 82, 165, 71, 150, 145, 226, 233, 70, 52, 185, 206, 113, 110, 186, 143, 46, 101, 113, 96, 134, 203, 115, 46, 55, 192, 157, 132, 213, 103, 74, 56, 60, 226, 172, 157, 32, 97, 223, 104, 48, 125, 83, 201, 108, 78, 237, 12, 215, 156, 61, 116, 134, 118, 63, 200, 81, 7, 47, 77, 30, 241, 4, 137, 72, 8, 91, 145, 131, 254, 195, 186, 45, 197, 110, 40, 219, 97, 152, 169, 225, 55, 192, 230, 111, 60, 241, 161, 50, 177, 243, 51, 234, 106, 27, 142, 1, 189, 193, 154, 186, 97, 14, 148, 58, 207, 191, 184, 68, 201, 243, 178, 192, 239, 119, 184, 134, 119, 241, 66, 223, 13, 66, 95, 251, 229, 94, 98, 209, 155, 193, 245, 65, 137, 119, 28, 21, 248, 157, 7, 38, 235, 152, 149, 26, 232, 137, 163, 244, 168, 119, 255, 98, 154, 244, 184, 192, 128, 41, 22, 106, 88, 67, 221, 88, 250, 13, 146, 186, 159, 222, 62, 120, 192, 209, 111, 237, 74, 128, 194, 125, 45, 48, 161, 135, 15, 178, 28, 138, 107, 200, 198, 196, 70, 193, 133, 132, 133, 166, 177, 202, 99, 13, 217, 192, 252, 46, 121, 76, 53, 138, 194, 89, 188, 116, 79, 121, 226, 110, 5, 152, 64, 100, 50, 210, 195, 168, 80, 1, 241, 155, 92, 104, 113, 111, 242, 97, 185, 175, 105, 220, 50, 208, 175, 77, 147, 100, 181, 2, 190, 96, 109, 124, 113, 30, 81, 143, 151, 17, 71, 202, 252, 10, 155, 190, 218, 183, 185, 200, 67, 199, 111, 150, 251, 13, 147, 58, 133, 186, 141, 249, 64, 223, 148, 110, 28, 236, 49, 22, 57, 17, 73, 146, 239, 146, 30, 234, 88, 167, 108, 235, 12, 88, 131, 33, 37, 202, 137, 81, 186, 89, 6, 6, 72, 210, 191, 90, 124, 235, 98, 144, 97, 245, 49, 67, 169, 106, 190, 157, 56, 227, 0, 196, 55, 177, 246, 5, 171, 178, 130, 214, 132, 191, 0, 218, 236, 129, 191, 155, 129, 241, 33, 208, 156, 254, 132, 121, 163, 189, 231, 51, 180, 21, 190, 87, 29, 5, 102, 185, 218, 136, 248, 113, 65, 209, 238, 177, 109, 248, 167, 189, 241, 181, 115, 24, 89, 95, 67, 115, 211, 244, 218, 63, 79, 148, 168, 238, 47, 110, 82, 130, 96, 55, 120, 139, 149, 120, 204, 85, 249, 232, 88, 139, 152, 111, 170, 143, 209, 129, 105, 87, 50, 12, 192, 50, 227, 191, 29, 194, 102, 243, 193, 142, 62, 49, 248, 188, 92, 11, 60, 228, 133, 100, 209, 8, 246, 85, 143, 169, 197, 81, 169, 72, 51, 102, 19, 210, 249, 185, 78, 200, 179, 42, 40, 111, 94, 84, 221, 156, 90, 2, 216, 95, 53, 92, 217, 172, 192, 97, 130, 77, 87, 163, 65, 112, 45, 250, 81, 131, 95, 4, 68, 136, 139, 240, 43, 141, 251, 195, 36, 31, 179, 197, 10, 76, 230, 247, 154, 134, 215, 76, 134, 240, 248, 151, 186, 16, 253, 99, 129, 147, 47, 55, 53, 116, 139, 65, 64, 44, 29, 168, 200, 169, 8, 118, 38, 100, 162, 124, 48, 46, 137, 246, 255, 39, 156, 70, 228, 139, 6, 35, 67, 229, 235, 18, 114, 66, 206, 217, 236, 11, 86, 119, 106, 120, 177, 150, 118, 52, 6, 204, 14, 4, 196, 40, 149, 120, 162, 11, 143, 168, 138, 248, 243, 62, 123, 15, 78, 255, 23, 100, 206, 115, 143, 144, 114, 210, 165, 115, 161, 73, 211, 146, 104, 185, 127, 166, 117, 50, 63, 25, 242, 101, 119, 219, 108, 208, 71, 204, 116, 138, 226, 175, 178, 243, 52, 238, 124, 85, 161, 173, 70, 233, 141, 108, 161, 84, 14, 226, 235, 12, 28, 16, 249, 79, 198, 120, 192, 204, 17, 138, 231, 159, 81, 25, 130, 216, 29, 246, 62, 212, 113, 237, 67, 131, 145, 221, 189, 143, 137, 244, 17, 214, 165, 13, 205, 92, 172, 241, 8, 205, 146, 131, 246, 97, 210, 91, 107, 167, 251, 32, 155, 208, 141, 74, 184, 228, 40, 237, 171, 12, 29, 152, 31, 144, 168, 66, 216, 171, 187, 23, 118, 55, 159, 194, 137, 51, 119, 14, 95, 153, 62, 186, 38, 69, 119, 144, 155, 50, 15, 54, 37, 88, 32, 225, 3, 105, 136, 254, 28, 104, 210, 231, 16, 238, 109, 37, 117, 150, 0, 253, 121, 33, 14, 83, 178, 187, 112, 57, 96, 197, 72, 0, 164, 107, 236, 45, 138, 56, 214, 34, 5, 141, 86, 227, 107, 107, 110, 121, 191, 132, 151, 203, 91, 158, 82, 180, 65, 249, 109, 94, 169, 45, 25, 99, 86, 87, 79, 140, 44, 141, 40, 166, 128, 206, 146, 244, 31, 200, 192, 125, 35, 161, 202, 241, 225, 82, 44, 114, 189, 42, 135, 50, 99, 166, 200, 107, 92, 178, 232, 4, 88, 33, 27, 122, 192, 174, 138, 147, 154, 54, 111, 75, 46, 159, 73, 34, 154, 255, 102, 16, 123, 161, 222, 172, 154, 208, 206, 23, 69, 76, 84, 152, 153, 178, 151, 12, 41, 104, 98, 214, 142, 202, 114, 183, 30, 242, 8, 237, 182, 133, 229, 57, 209, 32, 164, 139, 247, 137, 6, 42, 127, 100, 170, 141, 113, 52, 13, 52, 137, 66, 237, 43, 61, 149, 191, 124, 73, 232, 14, 109, 168, 151, 164, 237, 141, 6, 228, 197, 168, 143, 4, 171, 113, 28, 46, 228, 101, 111, 198, 205, 32, 110, 35, 49, 127, 95, 207, 11, 155, 221, 27, 85, 188, 14, 127, 211, 19, 208, 139, 67, 182, 153, 229, 29, 214, 156, 191, 29, 10, 66, 187, 194, 168, 194, 246, 234, 118, 170, 194, 82, 179, 242, 24, 58, 142, 24, 232, 103, 109, 114, 210, 10, 237, 8, 201, 66, 125, 245, 52, 129, 60, 10, 211, 199, 73, 152, 116, 175, 216, 111, 213, 141, 162, 172, 78, 204, 92, 182, 12, 158, 165, 129, 98, 54, 130, 236, 12, 47, 203, 63, 133, 20, 225, 128, 178, 150, 84, 94, 228, 236, 54, 44, 68, 65, 204, 84, 216, 7, 253, 22, 254, 225, 203, 108, 200, 59, 166, 91, 70, 119, 185, 166, 175, 76, 129, 175, 203, 58, 205, 39, 126, 15, 239, 104, 20, 95, 127, 161, 196, 28, 88, 51, 11, 232, 44, 119, 45, 25, 32, 240, 237, 147, 222, 74, 129, 161, 39, 7, 235, 65, 113, 108, 75, 194, 117, 54, 46, 48, 252, 23, 145, 253, 146, 26, 193, 71, 31, 93, 190, 129, 74, 30, 64, 22, 204, 73, 25, 56, 130, 29, 202, 84, 67, 84, 150, 164, 154, 229, 67, 49, 125, 31, 114, 44, 74, 83, 199, 26, 129, 218, 21, 91, 180, 169, 142, 104, 114, 150, 34, 97, 39, 153, 115, 240, 90, 71, 179, 198, 137, 15, 66, 82, 243, 142, 188, 61, 73, 39, 162, 29, 127, 183, 2, 204, 3, 194, 51, 47, 29, 7, 155, 6, 98, 225, 25, 98, 224, 30, 47, 152, 184, 23, 80, 115, 202, 48, 161, 23, 137, 32, 230, 210, 45, 42, 173, 117, 116, 181, 11, 250, 108, 100, 129, 45, 98, 77, 208, 215, 113, 103, 107, 140, 17, 213, 253, 202, 22, 158, 153, 224, 216, 53, 131, 125, 60, 138, 229, 199, 236, 168, 121, 174, 126, 204, 113, 113, 66, 147, 2, 58, 143, 50, 120, 123, 27, 55, 132, 118, 255, 152, 128, 137, 97, 145, 144, 164, 102, 71, 95, 59, 57, 109, 99, 172, 77, 159, 69, 81, 107, 38, 58, 81, 173, 30, 17, 123, 58, 208, 159, 212, 204, 195, 146, 48, 129, 223, 62, 74, 34, 161, 188, 185, 150, 124, 2, 175, 205, 241, 202, 161, 7, 50, 19, 78, 41, 248, 70, 9, 211, 53, 242, 147, 222, 158, 230, 133, 149, 90, 83, 177, 133, 78, 189, 246, 129, 242, 39, 93, 171, 33, 26, 131, 211, 140, 34, 230, 172, 138, 61, 193, 246, 253, 205, 210, 92, 248, 181, 219, 3, 47, 200, 24, 91, 124, 122, 15, 213, 192, 115, 177, 196, 142, 141, 121, 98, 28, 202, 32, 156, 46, 6, 34, 21, 164, 212, 186, 205, 85, 161, 61, 176, 237, 250, 33, 184, 250, 248, 154, 133, 16, 186, 207, 2, 199, 145, 74, 196, 234, 201, 66, 76, 221, 85, 66, 241, 112, 97, 15, 52, 249, 211, 90, 132, 14, 74, 167, 81, 28, 184, 221, 51, 158, 96, 161, 144, 80, 7, 154, 174, 61, 234, 66, 93, 40, 229, 187, 186, 239, 121, 90, 137, 103, 229, 48, 116, 19, 212, 231, 195, 114, 57, 28, 234, 149, 211, 73, 204, 202, 52, 71, 112, 132, 143, 103, 10, 83, 205, 185, 251, 62, 174, 243, 32, 121, 28, 252, 92, 167, 231, 119, 110, 129, 14, 233, 37, 69, 120, 15, 90, 41, 124, 169, 196, 161, 137, 0, 217, 44, 15, 93, 212, 239, 58, 181, 20, 176, 159, 162, 74, 229, 41, 99, 14, 145, 65, 31, 224, 179, 30, 132, 226, 71, 18, 132, 48, 78, 33, 247, 86, 205, 28, 197, 175, 204, 126, 44, 140, 66, 13, 112, 183, 84, 3, 143, 214, 80, 133, 126, 145, 143, 34, 77, 23, 146, 177, 165, 151, 70, 240, 223, 116, 115, 25, 212, 248, 27, 113, 47, 200, 90, 132, 56, 26, 94, 0, 76, 138, 174, 112, 47, 54, 107, 193, 149, 73, 53, 142, 218, 91, 57, 238, 46, 178, 154, 71, 19, 189, 166, 196, 94, 170, 218, 133, 118, 205, 225, 108, 153, 24, 237, 27, 111, 37, 90, 14, 100, 15, 229, 18, 128, 213, 4, 155, 143, 166, 39, 142, 206, 78, 189, 91, 166, 159, 129, 30, 7, 139, 72, 227, 164, 164, 161, 100, 94, 162, 55, 49, 248, 189, 179, 206, 146, 255, 91, 179, 82, 6, 138, 24, 141, 171, 146, 217, 50, 135, 32, 203, 177, 27, 91, 24, 204, 252, 137, 121, 90, 226, 43, 35, 226, 246, 38, 72, 212, 172, 188, 173, 69, 107, 148, 163, 88, 125, 144, 127, 44, 41, 190, 71, 0, 88, 70, 175, 189, 233, 55, 206, 220, 185, 192, 197, 165, 193, 87, 146, 249, 94, 72, 59, 57, 43, 52, 203, 194, 56, 221, 21, 130, 183, 78, 75, 154, 67, 18, 74, 151, 201, 38, 202, 108, 61, 8, 29, 98, 62, 172, 77, 155, 227, 189, 10, 116, 179, 50, 165, 152, 74, 209, 96, 200, 230, 180, 188, 191, 165, 245, 108, 36, 26, 37, 17, 167, 125, 120, 111, 236, 211, 66, 183, 1, 251, 79, 34, 26, 121, 28, 136, 33, 149, 17, 174, 43, 206, 137, 77, 10, 173, 243, 111, 73, 106, 241, 252, 70, 43, 147, 120, 40, 229, 121, 201, 69, 195, 63, 43, 87, 45, 203, 174, 214, 181, 35, 46, 226, 47, 44, 56, 123, 56, 73, 166, 252, 144, 150, 100, 73, 156, 177, 225, 105, 161, 41, 226, 162, 167, 132, 45, 62, 211, 104, 16, 89, 173, 255, 70, 194, 254, 186, 130, 50, 135, 121, 197, 147, 84, 97, 49, 107, 26, 164, 189, 151, 234, 39, 72, 117, 21, 92, 43, 119, 110, 45, 209, 172, 192, 131, 45, 2, 99, 179, 131, 248, 69, 144, 223, 23, 151, 0, 196, 64, 161, 172, 49, 56, 114, 108, 68, 134, 160, 209, 154, 30, 240, 206, 156, 140, 80, 70, 34, 218, 43, 187, 80, 209, 26, 166, 69, 4, 240, 135, 148, 68, 64, 170, 244, 134, 32, 69, 87, 50, 135, 112, 91, 71, 0, 134, 245, 80, 30, 27, 12, 234, 212, 223, 168, 27, 78, 19, 126, 77, 138, 121, 75, 51, 47, 44, 5, 253, 227, 88, 41, 226, 189, 183, 124, 184, 68, 210, 6, 171, 77, 78, 221, 154, 78, 229, 88, 147, 147, 21, 119, 13, 251, 71, 89, 237, 224, 152, 21, 88, 234, 234, 130, 74, 119, 115, 27, 8, 139, 155, 243, 195, 29, 196, 213, 5, 161, 117, 41, 246, 92, 204, 143, 186, 229, 59, 18, 76, 171, 224, 152, 224, 82, 193, 115, 208, 9, 84, 177, 130, 178, 122, 251, 216, 39, 253, 73, 199, 154, 88, 236, 215, 219, 154, 195, 11, 235, 165, 217, 212, 248, 65, 43, 0, 112, 35, 39, 18, 238, 212, 151, 187, 53, 30, 235, 202, 217, 163, 177, 80, 70, 97, 90, 98, 174, 5, 163, 246, 219, 124, 216, 253, 43, 114, 108, 69, 2, 242, 77, 85, 51, 91, 7, 226, 100, 238, 213, 183, 55, 47, 222, 65, 222, 75, 49, 102, 29, 20, 10, 103, 232, 107, 0, 27, 247, 112, 177, 121, 185, 250, 215, 18, 78, 186, 87, 112, 147, 225, 75, 236, 241, 197, 161, 123, 146, 132, 217, 240, 98, 201, 17, 53, 82, 60, 237, 232, 92, 177, 174, 1, 51, 12, 54, 226, 215, 58, 61, 186, 15, 135, 103, 195, 176, 10, 74, 110, 97, 106, 100, 112, 153, 16, 115, 248, 136, 36, 43, 245, 26, 211, 246, 143, 10, 112, 14, 176, 86, 8, 62, 93, 100, 135, 160, 125, 244, 49, 17, 192, 63, 42, 110, 36, 61, 93, 14, 16, 41, 98, 232, 84, 222, 245, 155, 24, 210, 135, 16, 84, 204, 214, 83, 67, 255, 217, 117, 85, 99, 107, 3, 247, 112, 24, 143, 221, 99, 49, 222, 12, 113, 185, 146, 231, 24, 10, 163, 80, 92, 208, 40, 35, 42, 4, 179, 164, 133, 228, 238, 37, 194, 173, 2, 132, 221, 127, 26, 108, 192, 48, 29, 100, 252, 150, 41, 204, 9, 103, 1, 14, 158, 249, 155, 88, 236, 122, 184, 21, 190, 12, 42, 239, 63, 211, 105, 218, 77, 199, 63, 86, 126, 40, 209, 229, 23, 173, 73, 180, 54, 17, 136, 94, 9, 80, 145, 207, 18, 196, 81, 163, 232, 195, 15, 97, 189, 3, 77, 171, 13, 209, 115, 43, 111, 139, 101, 26, 181, 212, 49, 27, 98, 60, 243, 94, 127, 50, 134, 131, 80, 13, 83, 89, 166, 11, 230, 42, 243, 219, 131, 143, 64, 179, 1, 44, 206, 1, 25, 71, 47, 102, 1, 174, 91, 96, 145, 66, 205, 155, 46, 175, 197, 35, 134, 19, 142, 96, 224, 80, 249, 187, 203, 133, 189, 178, 243, 148, 190, 210, 96, 235, 209, 39, 47, 228, 34, 95, 211, 22, 212, 179, 144, 17, 7, 167, 105, 181, 197, 163, 21, 92, 227, 106, 99, 97, 69, 82, 75, 81, 252, 134, 112, 88, 12, 199, 223, 255, 127, 120, 203, 252, 45, 155, 20, 111, 208, 148, 38, 62, 26, 44, 198, 171, 101, 105, 55, 59, 34, 149, 250, 144, 241, 173, 52, 152, 11, 206, 218, 250, 31, 106, 194, 66, 21, 172, 43, 23, 96, 152, 74, 41, 111, 46, 57, 67, 46, 115, 157, 120, 32, 244, 181, 120, 138, 172, 192, 72, 145, 198, 4, 153, 118, 250, 158, 115, 197, 143, 215, 34, 193, 194, 184, 38, 232, 194, 11, 238, 179, 12, 171, 247, 147, 122, 216, 224, 15, 240, 70, 64, 139, 209, 176, 39, 37, 185, 63, 97, 103, 177, 114, 120, 230, 242, 13, 34, 185, 145, 67, 129, 107, 21, 250, 38, 222, 4, 102, 72, 61, 149, 25, 82, 132, 101, 245, 189, 223, 10, 48, 15, 181, 35, 98, 9, 193, 88, 8, 9, 178, 105, 94, 34, 146, 236, 134, 168, 46, 201, 101, 93, 30, 11, 6, 30, 76, 87, 11, 46, 168, 244, 47, 150, 194, 127, 252, 157, 124, 161, 197, 169, 21, 17, 224, 26, 55, 22, 112, 96, 0, 46, 220, 143, 129, 166, 108, 155, 239, 158, 186, 125, 240, 44, 54, 141, 156, 129, 184, 216, 180, 189, 158, 14, 197, 64, 97, 152, 154, 39, 159, 106, 74, 166, 252, 70, 203, 206, 30, 218, 69, 26, 139, 113, 26, 68, 177, 99, 237, 180, 12, 66, 108, 95, 8, 136, 80, 81, 206, 131, 94, 203, 20, 229, 54, 14, 70, 50, 176, 87, 128, 171, 187, 37, 118, 77, 147, 114, 186, 163, 3, 198, 64, 85, 226, 183, 5, 182, 14, 213, 237, 27, 89, 154, 51, 218, 52, 253, 88, 56, 106, 3, 52, 10, 165, 125, 60, 174, 246, 189, 50, 27, 253, 116, 62, 247, 131, 198, 18, 119, 202, 10, 60, 233, 29, 6, 46, 39, 204, 152, 116, 182, 243, 99, 52, 230, 84, 207, 220, 58, 27, 222, 198, 85, 142, 186, 46, 182, 163, 65, 94, 109, 13, 50, 127, 209, 124, 50, 2, 55, 128, 139, 183, 23, 15, 168, 159, 36, 109, 138, 41, 12, 175, 132, 224, 65, 70, 52, 80, 2, 179, 46, 220, 218, 110, 187, 70, 230, 200, 212, 235, 95, 35, 160, 155, 161, 34, 168, 200, 173, 13, 3, 229, 241, 25, 186, 115, 182, 157, 24, 88, 100, 80, 207, 59, 128, 100, 106, 141, 146, 170, 121, 110, 112, 25, 5, 26, 101, 107, 30, 17, 72, 177, 162, 164, 54, 100, 210, 196, 191, 71, 5, 236, 4, 55, 242, 132, 64, 101, 37, 124, 222, 15, 39, 229, 111, 76, 210, 171, 236, 7, 93, 14, 216, 248, 45, 79, 218, 85, 211, 199, 250, 104, 33, 143, 222, 15, 88, 238, 164, 172, 135, 2, 101, 11, 255, 112, 8, 238, 139, 63, 190, 149, 130, 215, 130, 110, 68, 83, 56, 239, 253, 120, 96, 196, 38, 198, 153, 137, 110, 157, 87, 94, 69, 70, 176, 197, 220, 237, 73, 176, 217, 112, 186, 20, 16, 151, 251, 244, 204, 120, 42, 216, 194, 250, 127, 191, 45, 87, 138, 204, 145, 110, 119, 217, 185, 242, 50, 47, 102, 253, 107, 242, 121, 122, 204, 10, 232, 48, 214, 183, 190, 152, 249, 7, 228, 139, 181, 252, 84, 123, 199, 146, 175, 30, 152, 54, 4, 162, 26, 19, 103, 112, 216, 130, 148, 218, 40, 215, 53, 114, 87, 174, 194, 30, 148, 70, 183, 213, 103, 2, 24, 161, 173, 212, 50, 52, 10, 46, 252, 205, 46, 192, 187, 105, 16, 187, 193, 255, 182, 92, 222, 209, 193, 118, 129, 137, 218, 80, 242, 213, 89, 122, 224, 65, 158, 65, 104, 40, 15, 135, 86, 159, 63, 139, 29, 195, 230, 20, 180, 82, 142, 98, 48, 228, 204, 22, 199, 47, 96, 30, 125, 192, 222, 237, 243, 115, 222, 31, 182, 150, 172, 42, 17, 232, 108, 142, 241, 119, 143, 215, 117, 221, 37, 188, 136, 68, 166, 53, 120, 198, 16, 59, 74, 184, 175, 120, 42, 72, 102, 137, 171, 56, 141, 189, 173, 89, 128, 12, 114, 241, 189, 111, 136, 73, 132, 123, 93, 249, 199, 130, 191, 251, 1, 251, 208, 58, 108, 177, 41, 72, 20, 202, 120, 187, 38, 8, 163, 75, 63, 139, 91, 159, 26, 233, 162, 204, 152, 80, 228, 52, 96, 166, 8, 250, 206, 115, 97, 28, 220, 48, 59, 19, 66, 12, 137, 203, 7, 95, 23, 107, 3, 185, 110, 57, 39, 117, 13, 68, 70, 54, 238, 106, 230, 167, 242, 216, 208, 42, 154, 110, 41, 188, 51, 156, 153, 0, 199, 221, 58, 21, 57, 68, 24, 114, 67, 93, 90, 0, 252, 188, 145, 7, 24, 55, 189, 176, 50, 106, 154, 57, 17, 95, 223, 93, 17, 125, 163, 80, 18, 22, 229, 212, 90, 138, 222, 86, 8, 125, 63, 229, 145, 190, 200, 10, 39, 176, 9, 182, 211, 236, 122, 29, 134, 1, 186, 231, 227, 215, 194, 42, 240, 7, 196, 0, 179, 211, 150, 193, 21, 137, 84, 32, 92, 218, 185, 7, 200, 34, 179, 138, 244, 114, 167, 159, 23, 51, 124, 60, 253, 124, 75, 8, 65, 123, 59, 88, 219, 250, 242, 250, 112, 27, 222, 143, 17, 107, 48, 44, 172, 13, 223, 78, 218, 107, 93, 235, 5, 208, 23, 179, 176, 38, 213, 101, 154, 64, 132, 134, 83, 63, 77, 229, 126, 65, 142, 245, 167, 180, 96, 51, 24, 148, 105, 243, 63, 186, 163, 124, 34, 50, 179, 29, 215, 142, 5, 239, 54, 7, 196, 36, 81, 188, 247, 192, 3, 244, 124, 253, 168, 19, 233, 109, 231, 192, 175, 145, 117, 76, 63, 70, 62, 40, 182, 78, 254, 130, 186, 51, 198, 159, 107, 19, 156, 134, 103, 50, 90, 103, 74, 56, 89, 219, 199, 9, 95, 82, 84, 194, 128, 42, 20, 199, 148, 228, 7, 118, 119, 197, 45, 128, 9, 170, 208, 53, 183, 193, 219, 5, 217, 212, 134, 223, 36, 133, 92, 186, 235, 96, 221, 117, 0, 193, 177, 162, 117, 175, 70, 109, 31, 153, 192, 154, 149, 154, 14, 97, 150, 111, 227, 28, 62, 28, 87, 9, 143, 179, 167, 61, 34, 56, 49, 214, 103, 226, 6, 252, 32, 41, 67, 251, 82, 211, 79, 59, 118, 84, 13, 186, 17, 210, 208, 173, 195, 188, 134, 107, 74, 31, 134, 203, 90, 191, 112, 36, 15, 24, 25, 235, 139, 143, 62, 206, 200, 184, 140, 76, 38, 33, 145, 111, 208, 150, 137, 130, 192, 60, 198, 200, 136, 65, 8, 120, 228, 137, 230, 157, 86, 120, 28, 249, 236, 246, 108, 236, 190, 181, 14, 130, 11, 117, 191, 234, 124, 3, 114, 238, 202, 204, 12, 64, 157, 68, 242, 69, 222, 71, 38, 20, 167, 248, 156, 18, 118, 68, 41, 139, 228, 235, 148, 91, 226, 42, 148, 22, 166, 232, 244, 29, 159, 154, 36, 142, 96, 176, 49, 183, 130, 138, 26, 37, 220, 156, 202, 183, 34, 131, 245, 174, 136, 48, 154, 230, 37, 118, 121, 68, 231, 79, 63, 221, 46, 137, 160, 179, 87, 17, 218, 214, 63, 102, 102, 99, 169, 198, 113, 33, 76, 13, 61, 36, 89, 148, 49, 134, 239, 253, 12, 242, 28, 5, 21, 143, 242, 106, 146, 125, 164, 137, 207, 95, 235, 57, 211, 192, 118, 224, 242, 249, 16, 83, 56, 41, 181, 124, 108, 16, 28, 185, 71, 171, 15, 164, 118, 198, 66, 198, 194, 107, 83, 84, 163, 186, 219, 43, 221, 9, 3, 20, 103, 40, 59, 249, 24, 216, 106, 16, 99, 168, 109, 128, 169, 57, 46, 158, 81, 167, 201, 255, 62, 103, 14, 62, 238, 99, 254, 210, 106, 23, 120, 215, 2, 3, 73, 251, 237, 135, 128, 128, 206, 101, 77, 245, 27, 70, 53, 196, 81, 223, 13, 0, 204, 10, 222, 12, 164, 202, 100, 79, 25, 249, 121, 215, 148, 225, 91, 127, 75, 75, 163, 110, 205, 5, 204, 177, 254, 134, 245, 217, 178, 64, 239, 106, 55, 124, 16, 80, 124, 16, 81, 169, 61, 200, 221, 24, 221, 18, 233, 230, 164, 121, 212, 126, 79, 2, 216, 164, 249, 41, 255, 39, 98, 231, 12, 123, 13, 116, 220, 172, 166, 89, 112, 113, 250, 61, 176, 153, 84, 225, 187, 162, 79, 65, 106, 221, 173, 209, 157, 70, 33, 75, 158, 110, 152, 39, 200, 89, 91, 89, 253, 126, 84, 27, 158, 181, 101, 249, 32, 129, 198, 38, 144, 92, 64, 112, 208, 113, 60, 212, 72, 138, 49, 82, 95, 183, 174, 225, 77, 53, 121, 211, 223, 131, 161, 148, 220, 88, 131, 151, 69, 208, 240, 49, 163, 151, 121, 50, 79, 129, 137, 98, 57, 18, 33, 202, 150, 120, 255, 99, 34, 54, 85, 204, 172, 120, 77, 42, 123, 242, 129, 92, 196, 78, 101, 98, 25, 146, 73, 186, 132, 227, 246, 96, 236, 208, 63, 50, 90, 183, 134, 219, 143, 100, 54, 175, 20, 172, 107, 171, 169, 199, 50, 110, 252, 135, 93, 169, 144, 100, 236, 91, 122, 111, 40, 196, 143, 73, 177, 56, 50, 27, 21, 249, 185, 212, 182, 40, 134, 197, 187, 55, 165, 125, 48, 132, 76, 242, 184, 173, 177, 111, 150, 67, 58, 189, 122, 152, 228, 51, 241, 130, 42, 113, 112, 62, 70, 131, 4, 23, 177, 114, 212, 125, 157, 28, 8, 238, 223, 158, 211, 172, 145, 188, 65, 125, 62, 45, 220, 124, 226, 100, 34, 36, 153, 93, 76, 157, 89, 219, 60, 42, 88, 68, 158, 155, 181, 124, 242, 72, 167, 74, 56, 176, 56, 114, 203, 54, 170, 85, 155, 235, 79, 29, 201, 71, 124, 181, 127, 180, 139, 147, 47, 167, 26, 18, 67, 141, 150, 37, 197, 6, 170, 96, 68, 89, 209, 246, 182, 183, 53, 68, 239, 126, 147, 50, 210, 34, 248, 246, 109, 34, 41, 213, 229, 134, 154, 97, 241, 132, 17, 162, 180, 74, 151, 253, 150, 69, 251, 151, 63, 192, 219, 79, 253, 212, 137, 234, 196, 133, 215, 211, 147, 128, 54, 85, 93, 202, 30, 168, 167, 128, 125, 150, 188, 250, 23, 195, 249, 52, 179, 26, 207, 129, 227, 68, 78, 56, 2, 17, 86, 220, 89, 99, 19, 20, 173, 57, 203, 140, 66, 231, 27, 69, 214, 238, 201, 134, 89, 102, 254, 85, 249, 132, 115, 51, 39, 65, 184, 199, 187, 14, 44, 6, 217, 16, 227, 116, 185, 66, 201, 188, 95, 66, 178, 243, 103, 79, 144, 147, 139, 59, 123, 238, 28, 18, 202, 166, 75, 59, 246, 190, 150, 75, 73, 182, 251, 132, 15, 248, 96, 112, 253, 41, 3, 203, 44, 141, 147, 255, 69, 85, 78, 129, 60, 145, 81, 166, 224, 131, 132, 215, 210, 169, 5, 136, 178, 23, 30, 139, 153, 106, 40, 54, 139, 244, 214, 43, 132, 229, 221, 58, 81, 81, 57, 190, 168, 255, 114, 63, 122, 173, 6, 136, 110, 159, 188, 50, 188, 91, 56, 185, 99, 107, 139, 120, 176, 228, 142, 146, 209, 96, 27, 133, 77, 69, 254, 114, 99, 194, 195, 4, 80, 36, 220, 147, 252, 207, 142, 66, 65, 64, 226, 63, 85, 218, 66, 125, 108, 197, 49, 143, 59, 43, 182, 0, 253, 169, 234, 209, 57, 90, 190, 234, 14, 25, 237, 62, 155, 188, 246, 92, 78, 241, 21, 154, 242, 201, 59, 16, 147, 73, 252, 52, 13, 175, 51, 158, 248, 118, 159, 4, 186, 101, 250, 170, 10, 16, 135, 251, 224, 114, 97, 61, 25, 102, 23, 74, 58, 118, 158, 32, 74, 115, 104, 240, 131, 42, 187, 155, 134, 209, 171, 225, 85, 13, 71, 252, 156, 81, 126, 72, 110, 45, 207, 84, 113, 187, 61, 152, 107, 111, 80, 105, 35, 104, 81, 245, 68, 28, 51, 67, 212, 172, 99, 191, 248, 217, 91, 186, 232, 145, 201, 135, 56, 164, 52, 127, 137, 111, 186, 6, 164, 236, 112, 31, 232, 75, 224, 58, 209, 31, 205, 204, 46, 180, 127, 215, 44, 45, 13, 147, 165, 245, 241, 162, 178, 249, 226, 241, 113, 104, 80, 175, 59, 1, 43, 12, 25, 255, 146, 70, 208, 140, 140, 225, 122, 125, 181, 239, 73, 89, 5, 107, 9, 241, 8, 16, 223, 203, 176, 57, 33, 4, 115, 216, 80, 155, 249, 103, 241, 188, 44, 216, 37, 115, 171, 104, 154, 145, 134, 183, 58, 141, 141, 178, 123, 166, 222, 67, 68, 154, 167, 52, 235, 31, 112, 206, 223, 53, 6, 183, 5, 146, 93, 226, 30, 73, 237, 197, 140, 19, 34, 104, 116, 213, 152, 6, 75, 215, 218, 214, 66, 101, 19, 72, 244, 9, 81, 57, 43, 202, 165, 46, 36, 100, 31, 15, 218, 44, 181, 120, 172, 25, 99, 242, 188, 3, 83, 202, 190, 27, 251, 130, 200, 101, 175, 176, 105, 39, 227, 210, 240, 121, 137, 42, 27, 128, 5, 30, 234, 109, 232, 77, 59, 100, 180, 107, 5, 215, 28, 55, 228, 186, 224, 16, 156, 182, 238, 181, 59, 124, 105, 117, 32, 66, 83, 110, 109, 112, 68, 95, 77, 121, 139, 94, 176, 190, 6, 58, 109, 213, 214, 24, 253, 72, 61, 86, 209, 20, 88, 36, 146, 179, 79, 168, 66, 209, 168, 48, 253, 177, 239, 139, 166, 213, 72, 242, 160, 36, 254, 135, 57, 207, 55, 198, 205, 150, 198, 54, 140, 247, 242, 52, 106, 132, 71, 67, 233, 29, 181, 247, 189, 36, 48, 232, 111, 130, 149, 234, 243, 241, 165, 66, 221, 64, 173, 207, 0, 69, 201, 111, 209, 157, 16, 56, 217, 29, 157, 106, 232, 206, 148, 110, 71, 113, 29, 228, 195, 57, 249, 181, 71, 203, 211, 131, 64, 92, 141, 89, 58, 210, 70, 246, 227, 146, 137, 63, 41, 250, 187, 56, 160, 17, 174, 45, 28, 245, 204, 63, 18, 174, 162, 53, 208, 121, 187, 1, 121, 46, 50, 245, 207, 140, 5, 186, 237, 72, 158, 163, 44, 143, 197, 127, 34, 160, 41, 242, 108, 220, 123, 138, 205, 164, 23, 164, 18, 117, 123, 153, 22, 150, 73, 141, 43, 176, 88, 251, 198, 143, 93, 33, 20, 40, 143, 84, 197, 67, 255, 148, 57, 146, 198, 114, 33, 71, 30, 82, 192, 170, 52, 234, 239, 80, 64, 40, 102, 84, 166, 173, 142, 183, 193, 94, 230, 23, 141, 252, 243, 157, 35, 237, 118, 118, 42, 74, 1, 54, 87, 226, 144, 53, 58, 204, 32, 19, 74, 57, 167, 120, 21, 185, 51, 252, 176, 109, 2, 92, 125, 240, 132, 20, 197, 113, 129, 104, 179, 18, 64, 47, 115, 96, 209, 210, 76, 191, 39, 127, 230, 135, 242, 56, 42, 214, 109, 188, 170, 192, 204, 9, 204, 190, 58, 162, 143, 115, 247, 60, 155, 37, 146, 119, 159, 64, 247, 242, 239, 189, 181, 85, 25, 145, 123, 81, 123, 205, 83, 33, 144, 45, 196, 64, 114, 13, 18, 247, 116, 14, 202, 246, 213, 177, 225, 152, 217, 119, 24, 39, 253, 158, 222, 242, 242, 175, 77, 241, 126, 29, 93, 45, 170, 185, 71, 131, 214, 114, 43, 181, 150, 252, 39, 6, 87, 37, 134, 229, 17, 8, 15, 186, 181, 205, 161, 20, 235, 0, 12, 137, 197, 184, 41, 11, 222, 237, 139, 70, 19, 213, 30, 122, 10, 226, 173, 98, 29, 245, 52, 215, 124, 12, 134, 182, 57, 22, 177, 59, 226, 249, 248, 41, 180, 183, 47, 165, 116, 170, 152, 20, 74, 194, 122, 200, 136, 75, 176, 189, 240, 211, 88, 98, 205, 34, 105, 216, 138, 137, 72, 56, 215, 175, 140, 70, 137, 91, 54, 133, 221, 38, 226, 239, 139, 212, 80, 177, 248, 110, 195, 61, 196, 255, 7, 156, 229, 136, 198, 72, 3, 234, 106, 16, 83, 14, 210, 94, 51, 205, 43, 23, 224, 60, 41, 139, 192, 59, 25, 111, 190, 82, 39, 54, 189, 75, 126, 229, 83, 220, 32, 148, 43, 104, 248, 214, 206, 224, 215, 16, 60, 193, 15, 56, 193, 32, 170, 160, 23, 190, 102, 110, 19, 17, 72, 35, 17, 208, 211, 97, 113, 200, 24, 54, 117, 191, 45, 89, 163, 74, 198, 191, 150, 95, 253, 192, 22, 129, 130, 32, 152, 172, 103, 119, 229, 128, 56, 107, 92, 200, 44, 214, 191, 125, 210, 215, 82, 117, 251, 113, 109, 109, 226, 6, 169, 109, 74, 23, 249, 245, 34, 66, 19, 101, 185, 162, 211, 82, 34, 142, 17, 229, 51, 118, 89, 199, 214, 28, 129, 55, 42, 45, 223, 195, 53, 145, 14, 230, 108, 57, 10, 38, 30, 54, 47, 103, 228, 177, 43, 124, 215, 126, 179, 126, 224, 136, 52, 213, 200, 239, 248, 56, 200, 140, 211, 100, 66, 168, 7, 73, 198, 24, 116, 195, 168, 244, 221, 29, 96, 70, 60, 223, 74, 211, 190, 250, 31, 253, 102, 252, 5, 79, 214, 195, 57, 178, 97, 227, 185, 105, 107, 5, 36, 58, 5, 55, 206, 148, 62, 127, 7, 185, 106, 245, 3, 39, 244, 34, 213, 127, 5, 102, 234, 34, 21, 55, 212, 6, 181, 98, 25, 92, 178, 172, 22, 221, 208, 146, 54, 119, 64, 138, 248, 54, 134, 7, 72, 247, 6, 228, 125, 93, 153, 23, 141, 207, 95, 56, 117, 215, 95, 159, 159, 214, 117, 97, 43, 109, 88, 50, 163, 148, 70, 44, 108, 208, 165, 89, 202, 104, 76, 241, 196, 12, 40, 102, 154, 234, 158, 197, 135, 31, 129, 203, 135, 181, 145, 20, 117, 153, 231, 212, 228, 118, 193, 211, 167, 59, 198, 170, 28, 74, 11, 164, 211, 1, 189, 68, 202, 163, 110, 158, 216, 123, 31, 16, 30, 31, 111, 81, 108, 235, 56, 98, 114, 76, 29, 142, 234, 159, 214, 18, 38, 226, 183, 37, 36, 15, 56, 53, 141, 161, 85, 198, 76, 246, 223, 82, 137, 2, 27, 186, 111, 208, 68, 146, 181, 205, 85, 101, 70, 19, 33, 38, 254, 177, 231, 88, 176, 43, 130, 244, 94, 228, 81, 193, 13, 224, 8, 45, 35, 185, 9, 103, 112, 133, 124, 107, 172, 66, 48, 64, 33, 84, 14, 127, 69, 54, 46, 68, 14, 225, 255, 48, 107, 32, 75, 160, 60, 67, 158, 149, 12, 243, 130, 168, 250, 146, 60, 29, 121, 117, 186, 93, 171, 26, 187, 23, 130, 200, 105, 169, 14, 241, 206, 210, 146, 59, 79, 197, 56, 14, 119, 202, 166, 82, 77, 90, 96, 193, 7, 176, 83, 166, 12, 5, 99, 4, 173, 44, 162, 241, 216, 251, 218, 158, 2, 222, 159, 93, 5, 189, 82, 143, 147, 50, 220, 207, 150, 229, 93, 47, 39, 242, 159, 236, 141, 31, 209, 237, 21, 156, 8, 199, 201, 62, 55, 18, 132, 140, 65, 177, 159, 152, 137, 179, 4, 196, 88, 135, 212, 251, 156, 138, 108, 108, 84, 178, 243, 153, 151, 175, 177, 23, 163, 44, 180, 29, 98, 159, 125, 91, 169, 25, 131, 30, 65, 139, 56, 16, 177, 161, 230, 244, 121, 38, 207, 45, 64, 0, 235, 250, 149, 220, 128, 161, 118, 12, 148, 229, 107, 33, 108, 158, 197, 103, 236, 220, 167, 93, 108, 227, 34, 17, 54, 31, 131, 135, 182, 166, 130, 136, 244, 128, 130, 64, 45, 59, 203, 121, 207, 228, 46, 75, 96, 55, 66, 87, 173, 145, 207, 106, 111, 97, 224, 103, 86, 128, 36, 37, 105, 165, 77, 69, 13, 29, 220, 163, 123, 49, 209, 38, 212, 238, 25, 4, 232, 186, 232, 133, 3, 163, 79, 139, 249, 111, 69, 0, 187, 128, 212, 60, 242, 147, 148, 223, 17, 73, 194, 244, 125, 146, 191, 38, 15, 247, 52, 4, 91, 229, 37, 22, 64, 135, 77, 181, 241, 149, 80, 196, 124, 21, 200, 117, 76, 105, 121, 30, 188, 3, 65, 161, 205, 178, 242, 67, 111, 175, 58, 9, 95, 46, 201, 105, 23, 224, 249, 98, 100, 205, 255, 23, 50, 131, 30, 216, 87, 102, 59, 91, 156, 114, 45, 237, 187, 117, 192, 231, 51, 65, 183, 127, 231, 90, 69, 131, 6, 141, 83, 217, 17, 41, 150, 182, 89, 2, 191, 56, 69, 42, 120, 196, 10, 177, 255, 154, 238, 104, 210, 58, 33, 13, 163, 136, 178, 244, 61, 163, 206, 79, 81, 212, 50, 23, 56, 28, 182, 165, 29, 84, 29, 173, 150, 194, 201, 221, 239, 145, 151, 60, 119, 233, 183, 171, 240, 201, 10, 91, 26, 12, 55, 126, 52, 113, 9, 105, 150, 111, 58, 76, 222, 35, 54, 5, 36, 51, 229, 13, 222, 167, 248, 130, 166, 6, 108, 78, 44, 228, 219, 155, 216, 122, 66, 163, 52, 100, 111, 104, 166, 127, 253, 123, 15, 179, 63, 102, 161, 23, 86, 81, 41, 241, 164, 196, 71, 122, 216, 18, 22, 196, 39, 126, 223, 86, 220, 81, 90, 103, 74, 116, 226, 16, 125, 173, 210, 46, 194, 102, 224, 246, 149, 225, 35, 1, 248, 108, 64, 20, 74, 48, 89, 109, 173, 212, 114, 175, 87, 97, 217, 241, 107, 253, 67, 82, 32, 14, 1, 219, 60, 10, 73, 19, 183, 87, 110, 95, 252, 101, 56, 34, 67, 24, 180, 179, 55, 32, 247, 187, 98, 99, 235, 73, 77, 243, 29, 29, 93, 242, 60, 26, 51, 27, 102, 109, 98, 251, 98, 76, 235, 39, 89, 1, 108, 46, 238, 115, 33, 59, 45, 46, 4, 56, 141, 65, 192, 26, 110, 192, 216, 245, 71, 57, 65, 144, 246, 236, 191, 64, 159, 245, 245, 14, 36, 66, 6, 98, 18, 148, 99, 141, 82, 64, 51, 168, 215, 201, 82, 150, 68, 84, 237, 2, 185, 49, 39, 122, 245, 239, 219, 60, 142, 16, 78, 165, 59, 145, 226, 38, 19, 228, 26, 159, 176, 90, 32, 1, 163, 43, 127, 125, 232, 29, 223, 231, 232, 176, 78, 57, 34, 219, 177, 64, 17, 186, 201, 72, 51, 195, 59, 140, 226, 123, 219, 243, 10, 190, 99, 40, 236, 187, 46, 119, 180, 164, 197, 70, 134, 167, 185, 241, 187, 218, 249, 220, 62, 226, 47, 97, 251, 132, 155, 218, 32, 253, 182, 144, 98, 22, 209, 215, 16, 95, 246, 4, 58, 32, 214, 187, 1, 103, 224, 60, 163, 97, 233, 219, 15, 97, 6, 176, 177, 176, 174, 149, 111, 229, 17, 47, 10, 167, 193, 114, 2, 14, 131, 72, 117, 147, 68, 11, 217, 168, 7, 84, 89, 255, 130, 70, 149, 123, 55, 161, 5, 27, 3, 133, 147, 51, 133, 120, 139, 225, 134, 107, 121, 199, 129, 150, 117, 73, 6, 190, 67, 1, 127, 99, 203, 123, 26, 10, 131, 160, 255, 55, 205, 8, 109, 202, 165, 46, 13, 144, 141, 238, 4, 15, 30, 91, 78, 167, 56, 207, 181, 100, 89, 226, 166, 91, 6, 87, 42, 161, 18, 173, 46, 134, 84, 47, 192, 51, 192, 86, 143, 106, 143, 68, 251, 155, 48, 132, 148, 113, 151, 135, 133, 68, 135, 223, 141, 62, 40, 237, 173, 32, 255, 82, 138, 211, 98, 135, 17, 208, 76, 110, 10, 169, 109, 29, 94, 79, 56, 157, 83, 52, 181, 250, 138, 162, 188, 220, 51, 204, 209, 222, 189, 226, 142, 162, 134, 34, 176, 136, 149, 252, 25, 172, 22, 56, 41, 175, 96, 189, 47, 60, 197, 122, 228, 229, 174, 7, 187, 59, 62, 90, 104, 81, 171, 136, 40, 100, 33, 61, 107, 104, 93, 125, 99, 111, 143, 164, 107, 234, 102, 20, 82, 79, 151, 211, 159, 79, 36, 159, 15, 137, 116, 236, 128, 31, 195, 155, 61, 48, 44, 168, 114, 179, 176, 19, 154, 247, 138, 187, 48, 108, 202, 239, 31, 5, 253, 101, 72, 112, 65, 48, 8, 136, 4, 140, 125, 19, 45, 76, 151, 233, 69, 65, 39, 222, 33, 169, 153, 248, 69, 22, 63, 82, 47, 141, 89, 69, 102, 183, 148, 108, 230, 206, 254, 173, 111, 126, 205, 73, 85, 110, 8, 195, 112, 225, 241, 117, 201, 155, 66, 182, 43, 95, 192, 172, 85, 95, 200, 55, 150, 254, 183, 79, 51, 140, 194, 243, 218, 6, 207, 43, 112, 155, 165, 185, 81, 231, 61, 156, 78, 134, 159, 39, 52, 3, 54, 202, 111, 201, 137, 237, 153, 86, 65, 220, 47, 171, 44, 200, 196, 150, 157, 71, 245, 230, 122, 68, 43, 159, 42, 220, 239, 39, 59, 81, 215, 190, 128, 130, 45, 90, 180, 95, 118, 120, 37, 13, 88, 3, 66, 233, 214, 12, 132, 171, 162, 38, 115, 151, 111, 181, 74, 10, 105, 88, 147, 18, 179, 28, 209, 232, 145, 185, 92, 144, 230, 74, 12, 70, 230, 10, 109, 202, 47, 127, 142, 241, 226, 160, 99, 236, 6, 133, 69, 212, 154, 166, 91, 50, 29, 34, 28, 146, 191, 43, 169, 86, 236, 44, 39, 96, 174, 26, 12, 210, 205, 182, 94, 148, 21, 71, 7, 55, 243, 169, 236, 251, 148, 255, 130, 169, 82, 153, 21, 140, 25, 11, 83, 255, 108, 240, 128, 13, 36, 35, 182, 182, 242, 102, 57, 202, 171, 124, 196, 136, 92, 214, 60, 128, 224, 239, 59, 200, 208, 243, 143, 95, 97, 220, 171, 167, 40, 18, 63, 70, 152, 3, 129, 146, 154, 72, 146, 176, 144, 56, 0, 216, 206, 217, 145, 170, 82, 110, 208, 56, 155, 158, 133, 32, 72, 127, 55, 98, 223, 55, 57, 226, 184, 221, 143, 62, 205, 153, 184, 72, 233, 147, 245, 235, 115, 24, 146, 28, 29, 191, 188, 195, 216, 0, 79, 123, 176, 54, 74, 228, 161, 151, 255, 57, 132, 208, 222, 172, 239, 205, 243, 127, 50, 141, 9, 28, 72, 123, 85, 122, 138, 248, 107, 55, 73, 183, 2, 189, 140, 161, 129, 16, 183, 182, 207, 76, 105, 61, 159, 207, 220, 140, 223, 15, 72, 164, 232, 55, 246, 173, 204, 217, 67, 72, 116, 35, 22, 247, 217, 8, 97, 20, 37, 53, 199, 137, 14, 183, 78, 33, 179, 245, 104, 161, 143, 174, 147, 54, 58, 153, 221, 189, 50, 27, 25, 252, 142, 76, 18, 135, 116, 106, 21, 78, 71, 130, 246, 120, 35, 81, 60, 230, 156, 147, 127, 104, 209, 94, 21, 168, 51, 226, 215, 97, 176, 83, 122, 225, 117, 106, 49, 117, 171, 146, 184, 154, 203, 205, 153, 155, 84, 153, 95, 190, 76, 32, 248, 17, 43, 20, 48, 114, 56, 234, 90, 101, 59, 154, 217, 35, 15, 234, 180, 156, 178, 214, 117, 220, 215, 178, 12, 37, 247, 61, 212, 186, 28, 176, 157, 38, 125, 116, 68, 128, 229, 191, 65, 183, 125, 81, 130, 183, 131, 93, 244, 175, 42, 119, 240, 179, 174, 78, 1, 125, 226, 231, 160, 105, 14, 32, 19, 86, 170, 9, 226, 40, 148, 47, 70, 27, 37, 13, 56, 247, 89, 179, 103, 45, 105, 94, 115, 97, 185, 56, 13, 24, 135, 47, 150, 57, 56, 72, 31, 70, 31, 48, 75, 206, 216, 69, 184, 201, 237, 221, 113, 183, 214, 140, 167, 68, 69, 187, 226, 254, 182, 189, 142, 132, 1, 220, 204, 174, 248, 29, 216, 137, 194, 87, 139, 131, 224, 76, 73, 194, 165, 71, 63, 157, 208, 134, 167, 116, 14, 87, 226, 46, 76, 101, 73, 42, 99, 13, 76, 238, 140, 44, 48, 118, 246, 56, 74, 153, 113, 185, 44, 218, 130, 32, 218, 143, 177, 174, 194, 189, 188, 12, 248, 30, 201, 111, 41, 179, 117, 45, 168, 224, 254, 10, 90, 226, 81, 242, 96, 40, 61, 225, 156, 143, 248, 187, 220, 74, 237, 159, 165, 63, 246, 200, 229, 75, 103, 203, 208, 187, 92, 84, 138, 17, 36, 207, 108, 67, 138, 71, 82, 252, 122, 231, 150, 11, 109, 169, 15, 75, 46, 149, 100, 227, 59, 95, 15, 40, 246, 192, 66, 68, 36, 248, 149, 74, 54, 28, 207, 167, 237, 62, 186, 11, 49, 155, 38, 20, 117, 62, 18, 201, 9, 166, 33, 74, 232, 160, 202, 178, 77, 25, 107, 220, 210, 141, 40, 172, 79, 179, 55, 185, 100, 199, 98, 115, 207, 191, 151, 67, 174, 160, 99, 155, 76, 33, 141, 242, 154, 100, 87, 243, 208, 43, 160, 42, 126, 22, 177, 199, 150, 245, 244, 247, 186, 241, 222, 110, 37, 208, 33, 61, 184, 15, 72, 19, 103, 39, 215, 155, 210, 224, 71, 180, 11, 172, 63, 25, 19, 134, 108, 93, 35, 239, 49, 159, 42, 238, 79, 117, 86, 16, 121, 66, 93, 45, 199, 23, 187, 127, 36, 85, 208, 230, 132, 244, 81, 51, 107, 51, 72, 30, 81, 50, 238, 169, 204, 134, 134, 45, 5, 148, 235, 84, 132, 46, 226, 36, 102, 68, 51, 213, 138, 238, 72, 88, 110, 117, 31, 222, 121, 76, 130, 195, 232, 169, 252, 218, 169, 141, 41, 51, 63, 78, 122, 236, 42, 150, 182, 48, 40, 80, 44, 60, 40, 167, 42, 133, 21, 222, 175, 242, 236, 255, 53, 141, 153, 55, 223, 175, 247, 251, 102, 105, 54, 31, 47, 236, 176, 106, 191, 35, 249, 141, 170, 115, 8, 43, 117, 98, 108, 207, 43, 62, 247, 140, 37, 234, 0, 243, 31, 52, 42, 240, 226, 94, 128, 246, 249, 56, 37, 226, 92, 53, 36, 72, 144, 138, 181, 113, 54, 159, 116, 37, 26, 174, 73, 158, 179, 22, 173, 173, 76, 118, 38, 13, 255, 67, 194, 86, 90, 199, 243, 65, 3, 10, 210, 37, 198, 22, 139, 92, 210, 95, 166, 208, 227, 48, 255, 255, 78, 64, 150, 209, 86, 77, 140, 215, 139, 1, 112, 216, 14, 126, 41, 52, 81, 240, 166, 110, 246, 236, 70, 106, 95, 165, 112, 102, 214, 225, 248, 58, 57, 164, 225, 7, 167, 15, 66, 26, 243, 153, 184, 43, 67, 226, 136, 56, 251, 245, 85, 141, 129, 183, 244, 109, 227, 163, 251, 110, 135, 174, 205, 49, 67, 131, 32, 134, 231, 225, 203, 78, 160, 182, 78, 140, 213, 115, 82, 156, 130, 102, 106, 184, 123, 84, 13, 160, 185, 162, 47, 227, 197, 165, 83, 122, 68, 128, 131, 150, 125, 112, 150, 138, 8, 148, 69, 176, 172, 114, 73, 127, 86, 70, 212, 0, 157, 63, 184, 197, 6, 82, 157, 55, 154, 33, 170, 56, 181, 70, 206, 169, 92, 122, 27, 184, 159, 242, 206, 225, 177, 242, 162, 148, 117, 158, 172, 72, 76, 186, 59, 60, 8, 161, 188, 101, 31, 233, 37, 240, 69, 226, 40, 247, 105, 19, 211, 15, 90, 90, 102, 180, 61, 128, 50, 212, 248, 144, 135, 178, 49, 175, 106, 28, 225, 242, 203, 247, 6, 165, 202, 108, 36, 8, 155, 177, 240, 70, 84, 16, 250, 91, 0, 19, 54, 91, 196, 133, 73, 197, 2, 211, 143, 201, 123, 80, 142, 191, 119, 98, 18, 245, 100, 73, 93, 243, 201, 35, 54, 231, 238, 241, 5, 155, 127, 0, 254, 121, 146, 12, 127, 122, 195, 64, 99, 255, 95, 186, 185, 45, 7, 180, 159, 154, 64, 100, 182, 150, 27, 189, 63, 7, 52, 11, 159, 184, 38, 230, 77, 198, 241, 22, 194, 222, 20, 202, 53, 236, 141, 149, 64, 148, 242, 76, 79, 56, 46, 188, 118, 201, 56, 142, 141, 123, 75, 119, 211, 5, 54, 61, 145, 48, 231, 130, 81, 12, 80, 29, 248, 136, 242, 226, 149, 57, 110, 1, 184, 147, 187, 208, 86, 103, 131, 34, 214, 35, 146, 124, 252, 177, 76, 110, 156, 99, 118, 76, 153, 73, 89, 49, 37, 178, 14, 250, 151, 170, 254, 220, 59, 171, 75, 84, 164, 65, 121, 87, 122, 212, 27, 65, 94, 57, 60, 250, 249, 156, 80, 109, 238, 235, 92, 201, 39, 192, 86, 162, 30, 106, 104, 114, 182, 9, 183, 97, 95, 231, 16, 40, 87, 196, 43, 69, 29, 136, 219, 68, 42, 215, 122, 197, 109, 134, 88, 61, 55, 177, 209, 174, 109, 110, 217, 95, 201, 16, 133, 149, 147, 244, 155, 105, 39, 28, 105, 247, 251, 139, 219, 63, 176, 161, 71, 101, 63, 98, 218, 96, 33, 127, 192, 31, 69, 166, 61, 74, 143, 142, 88, 225, 134, 65, 235, 214, 145, 245, 107, 56, 238, 119, 101, 160, 247, 38, 236, 87, 124, 117, 93, 199, 107, 151, 217, 41, 30, 69, 61, 60, 228, 120, 38, 61, 202, 4, 196, 60, 107, 93, 230, 40, 247, 224, 159, 40, 66, 236, 243, 135, 202, 146, 209, 197, 56, 33, 126, 2, 150, 230, 154, 253, 35, 136, 132, 121, 244, 123, 201, 99, 100, 34, 233, 123, 210, 194, 205, 243, 251, 205, 118, 7, 211, 100, 55, 210, 42, 228, 153, 167, 251, 150, 182, 182, 23, 158, 81, 162, 157, 253, 5, 44, 163, 226, 150, 71, 84, 7, 94, 209, 240, 30, 86, 146, 182, 130, 210, 61, 179, 233, 42, 26, 151, 54, 91, 14, 155, 19, 142, 113, 38, 169, 23, 73, 254, 124, 16, 15, 81, 67, 86, 230, 56, 77, 86, 225, 157, 240, 176, 138, 0, 179, 194, 157, 235, 199, 57, 160, 108, 54, 67, 205, 162, 28, 140, 215, 230, 18, 47, 130, 185, 162, 65, 150, 58, 177, 194, 149, 41, 64, 20, 205, 76, 57, 68, 53, 128, 73, 52, 99, 239, 127, 206, 232, 68, 50, 48, 176, 235, 249, 65, 38, 39, 152, 147, 79, 184, 174, 106, 120, 67, 79, 81, 97, 56, 196, 246, 63, 10, 133, 104, 2, 134, 231, 66, 48, 202, 47, 125, 78, 73, 181, 93, 140, 185, 53, 223, 108, 194, 31, 97, 227, 137, 251, 150, 126, 82, 77, 115, 123, 6, 224, 250, 235, 144, 171, 159, 213, 113, 246, 46, 151, 87, 13, 119, 210, 206, 215, 189, 246, 27, 228, 25, 69, 4, 248, 63, 128, 9, 67, 54, 72, 217, 224, 134, 171, 137, 235, 100, 152, 160, 155, 85, 223, 5, 138, 178, 94, 25, 223, 12, 180, 199, 151, 120, 38, 4, 121, 192, 69, 126, 105, 16, 7, 22, 26, 187, 124, 6, 203, 17, 11, 231, 196, 67, 234, 38, 86, 246, 52, 16, 112, 156, 178, 231, 251, 108, 147, 239, 177, 185, 211, 79, 1, 136, 247, 203, 252, 140, 213, 247, 230, 147, 90, 110, 53, 207, 160, 70, 250, 109, 6, 44, 231, 3, 185, 25, 85, 200, 137, 200, 34, 92, 52, 151, 250, 44, 132, 111, 80, 243, 47, 234, 216, 229, 22, 231, 52, 195, 171, 132, 133, 33, 38, 232, 245, 226, 186, 146, 58, 40, 73, 163, 32, 82, 203, 121, 68, 128, 95, 107, 20, 11, 76, 242, 165, 111, 52, 144, 156, 147, 63, 100, 94, 226, 149, 56, 39, 32, 58, 34, 153, 138, 224, 4, 95, 129, 192, 92, 247, 139, 190, 177, 0, 50, 83, 231, 45, 59, 59, 13, 24, 117, 66, 5, 16, 145, 172, 84, 230, 74, 90, 94, 208, 92, 219, 54, 218, 134, 162, 251, 162, 99, 78, 191, 188, 71, 249, 101, 232, 123, 16, 123, 177, 248, 47, 254, 56, 11, 39, 62, 155, 143, 137, 174, 99, 73, 186, 53, 169, 27, 241, 40, 29, 166, 22, 41, 90, 31, 62, 28, 12, 230, 143, 15, 68, 119, 28, 126, 94, 55, 161, 151, 211, 129, 162, 206, 109, 158, 173, 85, 119, 18, 54, 107, 213, 68, 29, 99, 62, 188, 115, 105, 95, 2, 135, 83, 183, 67, 210, 255, 160, 128, 83, 141, 177, 169, 129, 223, 157, 30, 0, 68, 103, 164, 42, 103, 35, 179, 193, 54, 200, 250, 197, 233, 163, 86, 231, 73, 242, 217, 81, 48, 62, 230, 40, 173, 144, 84, 7, 199, 88, 61, 166, 38, 187, 80, 60, 136, 44, 242, 62, 224, 98, 169, 210, 116, 9, 233, 199, 224, 1, 82, 201, 179, 116, 145, 37, 75, 101, 153, 31, 55, 129, 184, 193, 179, 55, 174, 186, 144, 178, 179, 118, 148, 33, 84, 250, 164, 85, 137, 168, 250, 89, 21, 237, 153, 127, 29, 86, 131, 72, 118, 175, 255, 160, 160, 78, 202, 88, 140, 237, 161, 195, 157, 85, 15, 234, 31, 168, 47, 189, 111, 47, 243, 160, 208, 218, 128, 2, 3, 82, 214, 40, 174, 190, 92, 201, 109, 235, 231, 112, 248, 229, 169, 93, 38, 100, 47, 43, 17, 55, 160, 208, 158, 64, 96, 147, 178, 156, 194, 179, 197, 166, 34, 156, 165, 124, 160, 189, 213, 20, 144, 108, 45, 99, 202, 209, 121, 163, 23, 138, 219, 56, 251, 153, 134, 129, 8, 97, 220, 30, 228, 217, 13, 225, 75, 209, 193, 32, 9, 16, 195, 85, 27, 193, 112, 192, 21, 21, 37, 46, 24, 252, 43, 212, 110, 152, 164, 90, 125, 111, 218, 35, 67, 171, 237, 146, 80, 22, 243, 160, 105, 146, 234, 181, 180, 71, 41, 146, 102, 250, 55, 46, 175, 92, 103, 43, 197, 59, 67, 118, 181, 189, 30, 185, 114, 71, 82, 16, 177, 167, 54, 106, 253, 174, 16, 51, 123, 54, 138, 200, 171, 89, 180, 183, 101, 207, 34, 112, 59, 3, 70, 172, 251, 177, 30, 232, 31, 10, 182, 220, 90, 116, 128, 201, 9, 43, 165, 0, 240, 72, 63, 137, 152, 20, 249, 146, 189, 148, 197, 160, 236, 224, 27, 222, 150, 0, 116, 38, 147, 169, 95, 0, 248, 196, 168, 222, 48, 72, 33, 67, 78, 73, 34, 186, 173, 18, 62, 181, 23, 165, 248, 62, 244, 123, 26, 241, 223, 76, 3, 186, 242, 41, 210, 210, 237, 65, 61, 55, 58, 94, 241, 147, 119, 27, 74, 112, 249, 193, 147, 157, 20, 4, 128, 142, 54, 213, 217, 60, 47, 9, 109, 162, 197, 147, 81, 76, 167, 7, 175, 223, 128, 217, 229, 40, 157, 182, 157, 171, 75, 204, 114, 152, 7, 164, 198, 239, 88, 148, 178, 217, 122, 187, 151, 105, 66, 51, 164, 92, 78, 168, 118, 143, 117, 64, 252, 91, 214, 145, 212, 138, 131, 5, 225, 150, 184, 184, 6, 138, 153, 120, 154, 194, 161, 33, 99, 204, 177, 237, 41, 32, 157, 36, 164, 246, 212, 120, 128, 48, 82, 218, 165, 206, 206, 105, 150, 204, 206, 124, 214, 233, 139, 53, 2, 36, 13, 21, 44, 79, 163, 163, 164, 28, 181, 134, 74, 128, 3, 53, 190, 125, 115, 155, 58, 63, 198, 87, 227, 191, 172, 49, 217, 71, 178, 118, 68, 19, 105, 142, 170, 115, 32, 135, 82, 203, 62, 132, 241, 200, 132, 101, 220, 251, 229, 204, 138, 31, 106, 33, 253, 157, 217, 71, 132, 66, 165, 203, 227, 243, 231, 22, 249, 159, 131, 27, 33, 156, 16, 58, 211, 229, 167, 144, 173, 111, 0, 195, 144, 177, 116, 78, 103, 158, 13, 248, 193, 107, 210, 62, 245, 116, 118, 24, 127, 187, 225, 100, 120, 91, 10, 102, 4, 200, 220, 200, 230, 238, 51, 89, 166, 63, 147, 149, 104, 47, 62, 171, 171, 59, 23, 143, 241, 99, 30, 84, 138, 19, 223, 133, 9, 221, 142, 76, 204, 164, 189, 170, 45, 75, 123, 133, 106, 196, 83, 57, 213, 182, 237, 52, 36, 222, 231, 127, 66, 120, 60, 81, 164, 109, 202, 239, 232, 180, 138, 153, 2, 255, 32, 247, 187, 79, 240, 104, 129, 192, 138, 249, 160, 8, 121, 178, 118, 13, 166, 27, 59, 29, 190, 147, 216, 204, 246, 184, 122, 194, 184, 58, 155, 133, 170, 194, 86, 22, 250, 122, 210, 127, 128, 157, 175, 131, 19, 188, 121, 118, 158, 93, 19, 110, 23, 209, 26, 19, 197, 2, 55, 221, 229, 155, 177, 20, 89, 154, 211, 185, 72, 213, 202, 136, 238, 42, 251, 228, 224, 191, 158, 39, 162, 231, 2, 128, 188, 49, 36, 104, 158, 163, 95, 82, 92, 255, 213, 162, 147, 211, 189, 44, 190, 220, 166, 0, 102, 10, 162, 99, 13, 55, 222, 95, 172, 195, 4, 37, 44, 100, 97, 112, 244, 251, 65, 67, 114, 88, 179, 150, 13, 176, 190, 86, 221, 113, 80, 243, 68, 221, 148, 98, 24, 179, 251, 40, 158, 171, 97, 223, 158, 131, 74, 182, 28, 39, 38, 254, 13, 3, 35, 107, 161, 178, 99, 22, 87, 101, 77, 208, 8, 58, 138, 113, 13, 60, 26, 3, 89, 77, 215, 108, 69, 221, 115, 207, 213, 216, 93, 32, 85, 210, 87, 199, 212, 79, 2, 63, 85, 70, 221, 136, 237, 79, 99, 59, 92, 57, 58, 51, 217, 141, 103, 17, 219, 153, 125, 209, 188, 57, 180, 23, 146, 104, 85, 229, 209, 155, 46, 99, 89, 244, 120, 122, 124, 218, 56, 44, 25, 61, 129, 130, 228, 55, 241, 74, 32, 187, 145, 149, 209, 10, 230, 245, 20, 16, 94, 26, 128, 15, 207, 132, 34, 33, 15, 61, 105, 98, 250, 221, 91, 113, 81, 64, 11, 131, 255, 75, 225, 190, 50, 95, 170, 69, 209, 215, 178, 89, 223, 190, 175, 55, 159, 176, 99, 12, 21, 221, 201, 211, 132, 234, 58, 251, 204, 61, 85, 153, 173, 81, 11, 8, 239, 170, 130, 41, 53, 165, 199, 70, 111, 30, 79, 181, 39, 105, 105, 104, 159, 52, 124, 253, 38, 191, 242, 73, 99, 65, 106, 128, 191, 172, 116, 182, 53, 71, 143, 85, 113, 195, 169, 227, 233, 232, 50, 99, 69, 244, 53, 148, 212, 1, 204, 147, 138, 157, 119, 246, 39, 240, 9, 65, 207, 52, 81, 43, 68, 81, 155, 65, 168, 204, 178, 108, 223, 249, 218, 180, 228, 83, 136, 79, 230, 12, 138, 229, 11, 69, 2, 97, 77, 99, 174, 37, 180, 12, 83, 141, 139, 41, 108, 40, 93, 160, 232, 30, 232, 159, 126, 223, 93, 90, 45, 88, 245, 158, 141, 88, 241, 91, 120, 252, 230, 140, 200, 195, 248, 58, 98, 152, 93, 232, 127, 89, 95, 106, 173, 142, 242, 24, 115, 66, 215, 211, 201, 240, 36, 140, 223, 207, 165, 241, 180, 34, 11, 159, 155, 250, 94, 136, 164, 214, 95, 154, 30, 106, 97, 114, 217, 176, 92, 18, 198, 157, 255, 78, 82, 208, 161, 185, 45, 209, 137, 91, 97, 227, 8, 185, 59, 203, 189, 25, 50, 230, 220, 135, 167, 13, 227, 236, 142, 141, 8, 16, 173, 250, 39, 18, 9, 75, 191, 174, 189, 226, 50, 125, 158, 188, 26, 109, 99, 57, 200, 231, 153, 197, 250, 247, 224, 57, 218, 53, 34, 172, 142, 30, 196, 98, 117, 187, 203, 134, 45, 87, 207, 6, 40, 205, 151, 192, 214, 214, 139, 206, 36, 121, 162, 25, 28, 206, 229, 18, 154, 136, 107, 204, 65, 235, 122, 185, 132, 50, 165, 227, 161, 234, 187, 27, 231, 212, 27, 113, 54, 208, 235, 204, 46, 47, 85, 217, 212, 6, 5, 222, 164, 126, 75, 51, 39, 94, 145, 145, 159, 255, 29, 240, 196, 214, 43, 242, 169, 34, 138, 132, 111, 181, 251, 51, 100, 115, 156, 246, 20, 179, 242, 151, 174, 252, 146, 226, 109, 180, 228, 254, 82, 143, 136, 145, 239, 4, 163, 200, 121, 67, 9, 11, 192, 42, 211, 156, 37, 16, 169, 120, 181, 223, 204, 86, 98, 174, 15, 46, 85, 75, 156, 43, 98, 90, 90, 158, 8, 135, 228, 42, 231, 56, 201, 199, 80, 141, 242, 25, 34, 169, 190, 232, 245, 47, 73, 17, 195, 41, 168, 247, 75, 124, 125, 201, 38, 85, 69, 132, 255, 113, 31, 89, 209, 45, 8, 142, 77, 148, 109, 204, 246, 229, 34, 22, 213, 34, 255, 106, 3, 42, 161, 193, 77, 135, 204, 225, 218, 48, 3, 254, 213, 42, 199, 174, 123, 143, 86, 217, 94, 137, 239, 137, 99, 200, 251, 208, 39, 101, 154, 44, 7, 26, 125, 245, 76, 122, 201, 11, 190, 108, 134, 231, 225, 43, 8, 158, 154, 158, 37, 191, 27, 72, 150, 249, 39, 230, 240, 58, 76, 61, 59, 127, 14, 172, 38, 117, 115, 231, 148, 243, 194, 3, 213, 191, 219, 84, 101, 30, 210, 88, 191, 188, 113, 92, 66, 253, 166, 28, 146, 23, 51, 85, 65, 217, 233, 190, 248, 126, 238, 242, 244, 113, 189, 105, 192, 143, 189, 236, 37, 222, 28, 212, 33, 101, 138, 157, 203, 134, 87, 129, 74, 122, 251, 3, 202, 170, 239, 162, 148, 182, 203, 183, 157, 130, 115, 213, 90, 4, 96, 25, 196, 143, 234, 135, 14, 0, 103, 8, 178, 46, 205, 167, 74, 183, 57, 201, 148, 226, 211, 177, 145, 85, 144, 34, 66, 144, 101, 192, 93, 86, 205, 82, 35, 231, 189, 37, 56, 59, 121, 40, 108, 211, 252, 202, 71, 209, 174, 29, 171, 63, 75, 226, 253, 6, 106, 207, 195, 184, 134, 148, 172, 59, 167, 223, 138, 168, 181, 51, 85, 107, 239, 40, 201, 2, 236, 162, 34, 243, 200, 227, 21, 180, 114, 11, 33, 191, 87, 55, 119, 41, 126, 220, 86, 243, 165, 101, 67, 208, 29, 191, 16, 138, 156, 196, 200, 178, 9, 39, 202, 174, 182, 241, 166, 116, 90, 30, 185, 68, 199, 239, 177, 201, 163, 61, 195, 82, 180, 170, 34, 101, 201, 79, 150, 243, 34, 99, 197, 177, 224, 181, 203, 222, 205, 86, 60, 138, 175, 0, 84, 209, 163, 39, 233, 91, 106, 117, 23, 59, 6, 126, 120, 206, 73, 106, 245, 115, 17, 5, 19, 225, 61, 149, 216, 108, 251, 192, 196, 6, 155, 69, 58, 162, 106, 211, 167, 116, 191, 222, 187, 101, 51, 250, 76, 247, 187, 195, 40, 120, 236, 32, 86, 221, 8, 65, 202, 233, 74, 242, 25, 197, 188, 240, 41, 148, 107, 49, 198, 214, 142, 122, 186, 28, 193, 27, 2, 115, 140, 146, 126, 45, 52, 216, 53, 167, 60, 225, 147, 67, 50, 115, 24, 193, 230, 170, 141, 49, 253, 13, 251, 92, 207, 24, 157, 212, 223, 21, 158, 171, 90, 173, 218, 152, 1, 4, 74, 128, 245, 51, 167, 73, 61, 253, 20, 120, 152, 240, 195, 200, 169, 207, 207, 199, 179, 74, 115, 32, 172, 199, 119, 59, 66, 53, 124, 115, 104, 159, 26, 161, 110, 77, 238, 114, 225, 75, 25, 152, 24, 80, 84, 223, 99, 224, 99, 190, 88, 73, 235, 76, 117, 179, 228, 193, 205, 227, 61, 202, 175, 239, 231, 221, 176, 198, 240, 214, 66, 131, 123, 161, 255, 16, 184, 163, 207, 149, 32, 65, 160, 120, 224, 255, 106, 36, 131, 192, 71, 15, 168, 214, 81, 48, 126, 178, 172, 35, 174, 78, 144, 7, 159, 13, 254, 74, 177, 112, 85, 14, 86, 82, 92, 14, 214, 148, 241, 177, 165, 0, 15, 51, 162, 181, 60, 117, 39, 242, 239, 82, 227, 236, 216, 139, 193, 55, 113, 19, 133, 63, 76, 83, 4, 237, 100, 118, 87, 150, 163, 127, 176, 226, 140, 181, 2, 249, 73, 81, 192, 4, 196, 165, 197, 196, 81, 214, 193, 144, 239, 115, 210, 135, 229, 193, 131, 224, 18, 157, 211, 54, 189, 124, 73, 166, 180, 210, 23, 228, 78, 169, 106, 217, 242, 68, 86, 50, 138, 249, 136, 123, 99, 129, 144, 18, 33, 67, 117, 168, 140, 231, 225, 179, 70, 4, 209, 82, 40, 231, 4, 232, 248, 137, 234, 15, 194, 115, 71, 101, 219, 154, 36, 119, 29, 150, 31, 28, 6, 33, 251, 86, 176, 240, 192, 141, 246, 134, 158, 194, 124, 33, 173, 241, 9, 102, 21, 236, 162, 98, 17, 119, 128, 86, 87, 148, 250, 148, 219, 51, 133, 29, 252, 50, 108, 1, 22, 80, 80, 60, 104, 238, 72, 11, 179, 15, 206, 48, 189, 24, 108, 101, 203, 17, 117, 32, 196, 147, 250, 234, 49, 147, 132, 250, 105, 149, 170, 104, 192, 184, 115, 46, 248, 209, 182, 38, 60, 144, 231, 158, 96, 229, 121, 234, 12, 209, 120, 186, 192, 245, 124, 237, 19, 113, 117, 110, 163, 30, 26, 63, 49, 98, 8, 253, 24, 82, 51, 117, 64, 45, 250, 6, 74, 199, 184, 39, 122, 187, 75, 119, 101, 5, 62, 243, 82, 61, 239, 20, 65, 88, 194, 141, 218, 217, 127, 176, 235, 26, 88, 175, 75, 154, 86, 82, 111, 226, 147, 149, 229, 182, 17, 32, 95, 6, 116, 75, 7, 87, 108, 154, 94, 194, 250, 97, 106, 50, 125, 249, 29, 243, 218, 74, 218, 36, 136, 231, 41, 43, 199, 226, 225, 83, 248, 39, 115, 187, 233, 231, 28, 68, 168, 221, 37, 197, 152, 95, 128, 61, 109, 87, 87, 28, 45, 245, 191, 132, 168, 143, 181, 227, 13, 35, 22, 40, 179, 55, 230, 103, 145, 87, 61, 50, 104, 245, 173, 34, 19, 57, 22, 215, 86, 144, 0, 194, 139, 14, 235, 236, 119, 79, 186, 44, 185, 152, 190, 154, 91, 31, 132, 45, 68, 187, 97, 253, 122, 113, 4, 144, 92, 102, 7, 172, 235, 28, 36, 159, 27, 139, 245, 165, 97, 44, 80, 233, 55, 72, 221, 162, 147, 198, 151, 201, 174, 60, 186, 119, 78, 95, 141, 66, 99, 35, 32, 20, 61, 67, 71, 69, 166, 237, 165, 45, 194, 143, 44, 242, 96, 14, 54, 204, 121, 108, 115, 132, 172, 151, 47, 6, 217, 127, 202, 228, 166, 154, 94, 174, 198, 5, 62, 145, 112, 149, 165, 188, 55, 47, 13, 62, 75, 234, 208, 167, 136, 37, 209, 203, 188, 85, 72, 228, 173, 50, 51, 212, 69, 194, 112, 112, 26, 135, 27, 82, 219, 210, 93, 168, 193, 1, 167, 153, 116, 207, 12, 101, 157, 98, 8, 211, 170, 92, 200, 152, 33, 129, 81, 167, 2, 189, 245, 210, 6, 159, 83, 29, 211, 128, 148, 221, 28, 103, 45, 133, 245, 212, 39, 204, 94, 16, 92, 116, 251, 163, 18, 159, 160, 49, 38, 95, 212, 191, 235, 215, 248, 88, 202, 45, 45, 93, 244, 187, 79, 134, 75, 104, 83, 189, 31, 172, 21, 64, 217, 93, 123, 45, 161, 68, 50, 194, 147, 23, 19, 1, 37, 169, 199, 207, 140, 31, 184, 104, 81, 143, 200, 198, 14, 205, 209, 252, 228, 253, 247, 242, 132, 29, 43, 149, 212, 68, 29, 154, 221, 71, 150, 77, 169, 83, 200, 251, 152, 46, 214, 206, 245, 199, 53, 166, 12, 112, 216, 52, 135, 33, 2, 188, 75, 63, 110, 140, 42, 155, 136, 164, 175, 113, 54, 151, 16, 80, 8, 140, 220, 48, 252, 143, 70, 151, 255, 150, 253, 105, 192, 249, 80, 92, 142, 21, 76, 55, 223, 85, 224, 171, 61, 14, 81, 66, 24, 141, 70, 171, 223, 250, 200, 140, 132, 239, 151, 235, 4, 118, 218, 235, 51, 210, 0, 61, 54, 136, 131, 52, 98, 157, 243, 68, 99, 139, 159, 61, 208, 155, 124, 90, 189, 236, 37, 222, 28, 212, 33, 101, 138, 157, 203, 134, 87, 129, 74, 122, 251, 3, 202, 170, 239, 162, 148, 182, 203, 183, 157, 130, 115, 213, 90, 4, 96, 25, 196, 143, 234, 135, 14, 0, 103, 8, 178, 46, 205, 167, 74, 183, 57, 201, 148, 226, 211, 177, 145, 85, 144, 34, 66, 144, 101, 192, 93, 86, 205, 82, 35, 231, 189, 37, 56, 59, 121, 40, 108, 211, 252, 202, 71, 209, 174, 29, 171, 63, 75, 226, 253, 6, 106, 207, 195, 184, 134, 148, 172, 59, 167, 223, 138, 168, 181, 51, 85, 107, 239, 40, 201, 2, 236, 162, 34, 243, 200, 227, 21, 180, 114, 11, 33, 191, 87, 55, 119, 41, 126, 220, 86, 243, 171, 6, 126, 106, 15, 240, 10, 62, 186, 1, 93, 195, 223, 205, 122, 188, 96, 141, 116, 119, 239, 8, 101, 111, 226, 65, 153, 103, 109, 240, 238, 201, 133, 154, 97, 7, 164, 128, 8, 239, 201, 39, 153, 121, 233, 62, 104, 57, 194, 223, 107, 234, 83, 63, 117, 127, 15, 171, 177, 140, 226, 182, 166, 233, 207, 204, 175, 222, 11, 7, 78, 222, 243, 139, 61, 73, 157, 159, 146, 86, 223, 99, 13, 51, 57, 95, 141, 178, 3, 125, 3, 177, 21, 158, 43, 16, 64, 3, 35, 54, 6, 88, 72, 81, 133, 184, 179, 125, 243, 26, 144, 145, 51, 119, 72, 121, 218, 50, 190, 106, 233, 75, 190, 137, 222, 240, 124, 228, 52, 0, 69, 73, 216, 67, 65, 204, 4, 167, 173, 64, 105, 5, 164, 30, 17, 59, 1, 65, 159, 250, 254, 99, 159, 75, 170, 11, 40, 181, 130, 112, 214, 190, 10, 140, 254, 137, 249, 211, 65, 109, 159, 135, 11, 182, 194, 192, 112, 83, 73, 19, 73, 184, 58, 192, 204, 129, 60, 250, 180, 116, 136, 60, 214, 240, 155, 212, 2, 38, 34, 4, 89, 116, 98, 128, 174, 158, 113, 229, 236, 55, 95, 168, 88, 173, 231, 151, 32, 39, 127, 88, 222, 77, 27, 211, 1, 214, 34, 55, 204, 205, 186, 252, 201, 160, 99, 52, 38, 173, 187, 57, 126, 151, 1, 18, 5, 127, 67, 254, 222, 143, 200, 1, 241, 176, 10, 223, 156, 96, 61, 230, 53, 42, 22, 105, 153, 97, 225, 204, 98, 5, 67, 32, 247, 191, 186, 100, 32, 110, 216, 180, 108, 225, 54, 219, 68, 174, 71, 168, 205, 118, 38, 248, 144, 7, 160, 125, 112, 19, 125, 178, 165, 150, 239, 71, 163, 236, 204, 201, 40, 124, 125, 196, 17, 109, 238, 45, 177, 46, 107, 98, 48, 28, 191, 65, 86, 13, 133, 7, 60, 139, 16, 8, 235, 38, 185, 253, 187, 183, 207, 11, 233, 134, 246, 230, 241, 121, 110, 253, 126, 89, 29, 126, 177, 62, 51, 39, 95, 119, 123, 255, 188, 37, 244, 140, 187, 210, 55, 198, 175, 198, 218, 0, 8, 84, 103, 128, 73, 198, 0, 4, 235, 15, 193, 194, 239, 6, 56, 197, 251, 119, 135, 113, 166, 133, 243, 237, 192, 104, 180, 171, 13, 116, 144, 40, 192, 49, 119, 150, 69, 149, 23, 54, 234, 118, 49, 169, 242, 232, 80, 103, 30, 169, 52, 98, 49, 140, 255, 186, 65, 192, 224, 90, 86, 248, 173, 27, 183, 162, 183, 208, 145, 104, 210, 220, 136, 26, 227, 167, 161, 69, 73, 218, 119, 46, 126, 10, 224, 207, 60, 107, 47, 128, 194, 66, 127, 18, 104, 161, 209, 64, 0, 18, 45, 212, 82, 204, 204, 50, 98, 224, 32, 169, 240, 5, 180, 202, 189, 22, 201, 86, 250, 239, 212, 197, 56, 6, 45, 5, 24, 188, 98, 15, 75, 224, 227, 173, 61, 233, 116, 148, 98, 51, 165, 138, 170, 62, 94, 228, 187, 142, 57, 214, 181, 10, 6, 178, 122, 138, 242, 181, 54, 229, 80, 48, 48, 167, 54, 25, 245, 79, 63, 64, 72, 191, 44, 146, 70, 79, 109, 90, 32, 91, 141, 67, 251, 66, 37, 21, 155, 42, 3, 97, 61, 40, 106, 169, 225, 180, 5, 158, 116, 32, 148, 214, 27, 227, 40, 215, 227, 104, 91, 181, 203, 86, 76, 91, 0, 222, 98, 74, 178, 24, 1, 163, 217, 120, 207, 112, 153, 10, 231, 149, 147, 15, 90, 238, 106, 196, 130, 148, 98, 26, 208, 71, 47, 144, 72, 19, 23, 208, 43, 75, 38, 161, 225, 203, 133, 147, 249, 225, 233, 245, 225, 37, 222, 77, 227, 65, 12, 60, 155, 177, 76, 243, 93, 35, 168, 70, 180, 23, 148, 195, 239, 135, 63, 179, 193, 153, 73, 99, 230, 83, 220, 232, 190, 113, 86, 24, 130, 182, 79, 80, 250, 67, 144, 140, 128, 228, 110, 174, 255, 4, 37, 242, 168, 11, 2, 0, 184, 10, 140, 16, 71, 5, 82, 10, 253, 85, 168, 36, 76, 89, 222, 22, 115, 173, 206, 44, 252, 127, 242, 131, 111, 129, 79, 185, 20, 102, 38, 193, 74, 120, 182, 211, 9, 108, 225, 0, 250, 103, 67, 231, 218, 20, 11, 250, 33, 77, 157, 222, 239, 116, 253, 58, 232, 191, 196, 232, 100, 206, 56, 202, 68, 121, 33, 186, 22, 255, 114, 243, 66, 197, 25, 122, 223, 62, 11, 101, 92, 75, 36, 20, 165, 177, 66, 171, 215, 102, 109, 180, 38, 140, 26, 174, 159, 227, 113, 202, 106, 221, 58, 229, 111, 8, 248, 144, 140, 178, 225, 43, 151, 226, 211, 211, 49, 123, 98, 50, 50, 121, 68, 91, 154, 31, 106, 95, 52, 200, 208, 147, 73, 167, 95, 254, 81, 178, 41, 66, 7, 116, 143, 156, 147, 193, 60, 83, 116, 21, 105, 68, 133, 25, 39, 211, 80, 39, 0, 237, 159, 165, 230, 232, 7, 198, 140, 184, 38, 12, 210, 38, 149, 8, 36, 154, 108, 28, 100, 25, 144, 85, 86, 197, 68, 54, 175, 49, 167, 183, 4, 111, 174, 169, 125, 79, 201, 234, 129, 250, 49, 95, 233, 170, 26, 88, 76, 245, 178, 253, 34, 160, 61, 140, 30, 87, 105, 114, 35, 165, 10, 178, 35, 192, 237, 220, 249, 59, 18, 237, 162, 206, 250, 211, 218, 47, 74, 159, 162, 33, 208, 234, 96, 63, 104, 121, 36, 187, 162, 155, 7, 247, 173, 223, 10, 144, 21, 191, 60, 129, 109, 191, 65, 116, 54, 228, 29, 237, 174, 65, 145, 21, 240, 44, 63, 236, 88, 184, 28, 67, 55, 123, 9, 152, 72, 108, 108, 237, 194, 6, 42, 88, 221, 80, 109, 224, 123, 209, 53, 159, 7, 0, 49, 66, 54, 99, 36, 105, 97, 3, 7, 73, 159, 72, 142, 146, 141, 164, 150, 134, 74, 89, 192, 208, 96, 8, 3, 127, 118, 154, 48, 34, 209, 255, 145, 208, 178, 246, 209, 134, 55, 193, 124, 32, 34, 151, 212, 120, 155, 94, 137, 142, 201, 99, 196, 42, 162, 94, 109, 129, 3, 69, 73, 0, 91, 252, 83, 61, 226, 164, 137, 162, 17, 2, 83, 154, 202, 241, 189, 236, 182, 84, 162, 184, 79, 179, 52, 233, 176, 53, 151, 228, 80, 153, 220, 158, 133, 134, 160, 220, 171, 149, 170, 164, 64, 181, 27, 19, 249, 32, 77, 110, 194, 11, 18, 110, 79, 57, 198, 172, 56, 54, 217, 86, 88, 122, 150, 76, 94, 117, 149, 234, 147, 237, 22, 191, 87, 107, 204, 108, 95, 178, 38, 3, 158, 84, 85, 245, 208, 78, 59, 4, 1, 159, 0, 172, 90, 118, 165, 24, 228, 176, 56, 254, 90, 34, 4, 134, 245, 119, 34, 35, 210, 56, 140, 137, 216, 234, 218, 78, 202, 207, 205, 46, 116, 90, 200, 190, 160, 123, 213, 246, 177, 232, 114, 52, 141, 44, 223, 183, 194, 145, 57, 155, 69, 96, 68, 41, 176, 128, 233, 84, 209, 146, 5, 119, 144, 46, 204, 49, 231, 47, 249, 202, 62, 255, 213, 195, 148, 42, 216, 124, 122, 89, 131, 78, 158, 213, 218, 29, 32, 122, 56, 80, 233, 104, 42, 55, 189, 107, 194, 238, 245, 219, 88, 183, 209, 49, 211, 215, 169, 193, 248, 3, 217, 53, 165, 201, 20, 8, 208, 254, 23, 152, 88, 94, 8, 60, 214, 18, 234, 252, 251, 0, 95, 130, 207, 248, 26, 224, 84, 177, 80, 194, 83, 132, 147, 21, 166, 77, 40, 42, 220, 172, 151, 164, 190, 173, 177, 139, 255, 12, 10, 19, 62, 94, 92, 174, 4, 36, 9, 173, 249, 58, 100, 243, 221, 42, 221, 88, 19, 23, 252, 179, 226, 75, 43, 25, 8, 250, 174, 14, 39, 230, 247, 87, 97, 215, 51, 228, 46, 222, 103, 151, 200, 137, 73, 223, 202, 205, 127, 72, 132, 3, 195, 156, 97, 166, 22, 194, 236, 46, 57, 47, 49, 249, 90, 153, 15, 47, 198, 219, 110, 8, 203, 43, 140, 232, 191, 49, 44, 103, 30, 39, 168, 70, 136, 180, 23, 245, 82, 207, 240, 107, 65, 66, 74, 96, 178, 136, 191, 161, 92, 42, 225, 100, 210, 52, 229, 252, 189, 69, 110, 255, 244, 15, 21, 209, 140, 80, 203, 140, 85, 205, 8, 135, 86, 176, 166, 216, 91, 189, 196, 79, 80, 246, 106, 41, 248, 185, 195, 46, 179, 168, 109, 71, 121, 153, 143, 111, 63, 22, 69, 52, 37, 2, 155, 145, 104, 85, 43, 47, 69, 60, 65, 22, 19, 198, 104, 215, 56, 12, 184, 98, 37, 32, 199, 106, 71, 115, 176, 75, 122, 248, 39, 177, 189, 244, 72, 96, 135, 214, 77, 209, 140, 68, 180, 102, 233, 127, 212, 59, 4, 181, 231, 62, 225, 240, 62, 18, 179, 44, 59, 135, 47, 197, 182, 6, 100, 162, 56, 159, 43, 35, 209, 35, 197, 203, 143, 58, 139, 166, 13, 80, 9, 242, 49, 45, 99, 115, 238, 190, 236, 47, 250, 96, 28, 6, 119, 197, 178, 112, 171, 50, 41, 143, 171, 212, 168, 8, 51, 212, 90, 171, 75, 47, 156, 103, 80, 28, 172, 228, 49, 227, 215, 213, 62, 111, 153, 20, 112, 179, 11, 16, 60, 207, 161, 86, 116, 116, 250, 81, 74, 232, 151, 34, 165, 85, 139, 202, 161, 208, 179, 141, 201, 224, 108, 168, 30, 54, 32, 34, 4, 243, 35, 18, 47, 174, 240, 115, 229, 17, 185, 144, 180, 30, 239, 68, 235, 182, 77, 76, 207, 102, 165, 1, 200, 145, 125, 139, 253, 239, 40, 83, 255, 171, 78, 120, 109, 92, 120, 76, 120, 205, 116, 211, 161, 17, 76, 109, 126, 28, 241, 63, 183, 39, 177, 224, 192, 198, 137, 110, 225, 202, 64, 6, 239, 237, 121, 13, 35, 19, 205, 39, 43, 230, 203, 77, 184, 99, 145, 240, 37, 114, 134, 117, 17, 54, 9, 95, 193, 88, 65, 126, 39, 146, 126, 62, 114, 55, 187, 222, 121, 145, 149, 211, 121, 6, 17, 107, 175, 89, 122, 34, 44, 116, 218, 126, 187, 71, 191, 16, 228, 98, 23, 87, 77, 81, 13, 14, 38, 157, 141, 169, 233, 29, 98, 152, 8, 234, 96, 91, 77, 120, 220, 211, 46, 121, 180, 111, 168, 40, 242, 38, 236, 168, 111, 10, 85, 94, 133, 45, 74, 250, 28, 253, 107, 199, 6, 218, 55, 207, 103, 119, 97, 160, 250, 176, 135, 252, 21, 176, 224, 188, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 89, 53, 19, 83, 80, 184, 110, 48, 13, 148, 38, 57, 0, 0, 0, 0, 89, 53, 19, 83, 80, 184, 110, 48, 13, 148, 38, 57, 0, 0, 0, 0, 89, 53, 19, 83, 53, 157, 46, 228, 44, 100, 6, 157, 0, 0, 0, 0, 89, 53, 19, 83, 49, 100, 95, 92, 3, 214, 107, 168, 0, 0, 0, 0, 89, 53, 19, 83, 105, 200, 47, 192, 42, 36, 168, 23, 0, 0, 0, 0, 89, 53, 19, 83, 103, 248, 160, 44, 8, 171, 233, 143, 0, 0, 0, 0, 89, 53, 19, 83, 43, 27, 189, 16, 28, 10, 94, 76, 0, 0, 0, 0, 89, 53, 19, 68, 242, 185, 50, 196, 1, 216, 57, 65, 0, 0, 0, 0, 89, 53, 19, 139, 37, 34, 243, 88, 5, 112, 102, 11, 0, 0, 0, 0, 89, 53, 19, 83, 123, 8, 171, 64, 33, 188, 59, 16, 0, 0, 0, 0, 89, 53, 19, 163, 245, 104, 124, 68, 9, 98, 25, 235, 0, 0, 0, 0, 89, 53, 19, 139, 65, 107, 226, 4, 8, 144, 155, 158, 0, 0, 0, 0, 89, 53, 19, 234, 192, 216, 187, 232, 5, 154, 84, 215, 0, 0, 0, 0, 89, 53, 19, 191, 0, 65, 23, 88, 9, 157, 187, 194, 0, 0, 0, 0, 89, 53, 19, 68, 167, 194, 165, 224, 10, 219, 222, 27, 0, 0, 0, 0, 89, 53, 18, 125, 157, 130, 214, 120, 4, 43, 225, 191, 0, 0, 0, 0, 89, 53, 21, 136, 206, 70, 37, 64, 17, 207, 214, 132, 0, 0, 0, 0, 89, 53, 19, 234, 199, 35, 129, 64, 7, 207, 4, 156, 0, 0, 0, 0, 89, 53, 18, 125, 97, 236, 218, 12, 5, 48, 197, 88, 0, 0, 0, 0, 89, 53, 15, 12, 8, 154, 2, 16, 3, 86, 85, 126, 0, 0, 0, 0, 89, 53, 21, 208, 43, 160, 216, 240, 2, 140, 168, 206, 0, 0, 0, 0, 89, 53, 21, 136, 214, 251, 86, 200, 5, 246, 36, 108, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 196, 143, 72, 43, 189, 196, 235, 72, 200, 143, 170, 119, 170, 14, 207, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 177, 44, 72, 181, 35, 106, 117, 91, 162, 103, 79, 11, 78, 19, 252, 154, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 184, 42, 92, 84, 222, 230, 10, 98, 4, 230, 99, 133, 197, 27, 254, 186, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 162, 126, 6, 242, 61, 40, 168, 218, 169, 143, 71, 23, 196, 156, 153, 165, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 205, 239, 124, 102, 148, 144, 125, 146, 206, 3, 105, 225, 248, 47, 202, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 95, 181, 12, 13, 236, 84, 197, 170, 108, 219, 112, 173, 238, 156, 144, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 81, 73, 188, 4, 159, 115, 0, 157, 56, 153, 159, 78, 129, 71, 60, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 209, 66, 138, 207, 127, 187, 145, 247, 131, 145, 228, 118, 103, 112, 67, 178, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 31, 243, 190, 107, 187, 182, 254, 134, 98, 69, 119, 21, 167, 187, 240, 107, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 193, 218, 206, 184, 36, 238, 107, 27, 165, 73, 170, 187, 47, 157, 136, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 180, 158, 110, 146, 49, 211, 192, 184, 117, 18, 88, 103, 37, 136, 172, 79, 27, 160, 123, 142, 7, 160, 51, 68, 215, 216, 144, 248, 89, 147, 47, 63, 12, 16, 98, 29, 203, 214, 232, 245, 170, 92, 58, 10, 193, 138, 40, 201, 159, 227, 160, 120, 50, 119, 93, 239, 86, 22, 245, 98, 16, 169, 180, 14, 136, 163, 121, 187, 213, 145, 33, 8, 153, 67, 177, 91, 117, 75, 65, 71, 46, 130, 81, 192]; - let compressed = compress(&block, blocks_swapper()); - let decompressed = decompress(&compressed, blocks_swapper()); - assert_eq!(decompressed.into_vec(), block); + let block = vec![ + 249, 97, 87, 249, 2, 19, 160, 137, 152, 36, 115, 234, 67, 89, 207, 44, 42, 186, 128, 91, + 242, 10, 16, 42, 193, 195, 2, 129, 60, 181, 150, 192, 178, 117, 15, 18, 100, 174, 249, 160, + 29, 204, 77, 232, 222, 199, 93, 122, 171, 133, 181, 103, 182, 204, 212, 26, 211, 18, 69, + 27, 148, 138, 116, 19, 240, 161, 66, 253, 64, 212, 147, 71, 148, 223, 125, 126, 5, 57, 51, + 181, 204, 36, 55, 47, 135, 140, 144, 230, 45, 173, 173, 93, 66, 160, 93, 42, 52, 28, 156, + 139, 242, 60, 121, 90, 117, 99, 92, 182, 196, 25, 131, 16, 155, 186, 239, 137, 33, 118, + 105, 232, 230, 239, 213, 240, 207, 6, 160, 59, 72, 35, 216, 124, 37, 62, 178, 34, 97, 180, + 254, 212, 103, 179, 45, 247, 168, 205, 145, 7, 157, 75, 247, 83, 230, 233, 248, 97, 132, + 232, 161, 160, 122, 167, 249, 196, 203, 2, 173, 180, 106, 203, 129, 214, 232, 181, 87, 39, + 60, 99, 135, 6, 40, 34, 163, 118, 140, 149, 79, 241, 238, 230, 201, 194, 185, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, + 0, 0, 0, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 32, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 134, 36, 160, 31, 187, 182, 29, 131, 58, 212, + 207, 131, 71, 168, 13, 131, 45, 60, 7, 132, 89, 53, 51, 233, 147, 69, 84, 67, 32, 101, 116, + 104, 101, 114, 109, 105, 110, 101, 32, 45, 32, 69, 85, 49, 160, 204, 49, 229, 99, 26, 47, + 30, 50, 223, 117, 111, 168, 102, 158, 12, 186, 140, 98, 193, 196, 214, 29, 13, 87, 44, 31, + 216, 48, 250, 251, 148, 69, 136, 225, 206, 131, 96, 2, 39, 50, 46, 249, 95, 61, 248, 109, + 1, 133, 4, 227, 178, 146, 0, 130, 82, 8, 148, 123, 45, 95, 28, 247, 139, 190, 196, 52, 39, + 251, 53, 226, 79, 251, 1, 98, 34, 68, 240, 136, 18, 81, 1, 26, 180, 46, 168, 0, 128, 129, + 157, 160, 31, 164, 62, 186, 38, 56, 118, 133, 24, 180, 239, 139, 254, 154, 196, 115, 8, + 246, 45, 233, 227, 165, 192, 193, 7, 111, 1, 169, 2, 204, 2, 144, 160, 42, 60, 78, 200, 5, + 113, 98, 65, 250, 105, 0, 164, 152, 81, 235, 154, 100, 204, 182, 141, 174, 39, 107, 127, + 219, 120, 63, 221, 237, 87, 57, 9, 249, 94, 203, 52, 133, 2, 84, 11, 228, 0, 131, 44, 233, + 255, 148, 137, 61, 196, 25, 119, 102, 53, 248, 253, 27, 31, 169, 147, 75, 245, 41, 174, + 242, 86, 7, 128, 185, 94, 100, 194, 32, 154, 203, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 60, 131, 110, 76, 27, 36, 34, 34, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 138, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 32, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, + 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 91, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 92, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 248, 249, 1, 245, 160, 89, 97, 73, 3, 84, 63, 105, 163, 169, 7, 237, + 254, 220, 243, 13, 27, 0, 45, 215, 39, 235, 232, 237, 22, 162, 83, 99, 164, 247, 240, 48, + 66, 160, 29, 204, 77, 232, 222, 199, 93, 122, 171, 133, 181, 103, 182, 204, 212, 26, 211, + 18, 69, 27, 148, 138, 116, 19, 240, 161, 66, 253, 64, 212, 147, 71, 148, 137, 61, 196, 25, + 119, 102, 53, 248, 253, 27, 31, 169, 147, 75, 245, 41, 174, 242, 86, 7, 160, 243, 100, 253, + 32, 58, 153, 124, 189, 215, 216, 21, 229, 230, 182, 39, 222, 135, 217, 92, 155, 164, 143, + 79, 14, 178, 128, 26, 245, 152, 197, 247, 33, 160, 98, 156, 249, 227, 33, 40, 127, 226, + 250, 254, 222, 21, 37, 66, 5, 23, 142, 73, 121, 64, 233, 198, 110, 212, 131, 134, 126, 38, + 218, 85, 149, 212, 160, 32, 179, 60, 112, 219, 114, 185, 205, 184, 204, 31, 210, 181, 8, + 109, 97, 227, 206, 41, 177, 238, 122, 205, 193, 93, 163, 176, 107, 28, 181, 63, 19, 185, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 134, 36, 236, 76, 26, 58, 130, 131, 58, + 210, 155, 131, 71, 183, 132, 130, 82, 8, 132, 89, 53, 19, 83, 160, 83, 109, 97, 114, 116, + 80, 111, 111, 108, 45, 79, 76, 69, 66, 100, 120, 122, 50, 54, 106, 73, 50, 74, 65, 72, 109, + 52, 48, 48, 48, 48, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 203, 86, 242, 75, 191, 176, 245, 37, + 106, 66, 34, 44, 77, 161, 164, 143, 56, 229, 102, 192, 191, 114, 46, 137, 15, 59, 249, 198, + 20, 64, 167, 176, 167, 146, 234, 72, 147, 122, 168, 34, 174, 210, 94, 131, 105, 110, 180, + 48, 248, 10, 127, 156, 194, 83, 60, 48, 89, 126, 86, 90, 218, 235, 170, 15, 206, 23, 82, + 13, 250, 255, 240, 112, 168, 137, 111, 204, 205, 125, 24, 81, 118, 165, 92, 28, 127, 50, + 223, 173, 231, 234, 212, 181, 9, 113, 72, 250, 97, 126, 194, 18, 102, 51, 189, 147, 246, 2, + 201, 27, 206, 15, 130, 172, 1, 206, 219, 204, 49, 211, 162, 78, 101, 26, 16, 73, 106, 70, + 209, 118, 140, 59, 133, 162, 114, 14, 92, 8, 26, 83, 213, 91, 136, 207, 228, 86, 13, 208, + 64, 85, 211, 143, 252, 181, 128, 77, 187, 198, 50, 91, 26, 156, 109, 233, 136, 36, 216, + 142, 87, 112, 45, 166, 235, 118, 128, 191, 66, 23, 48, 32, 3, 41, 37, 103, 8, 180, 236, 77, + 106, 59, 18, 77, 206, 23, 154, 143, 236, 63, 235, 52, 105, 155, 38, 207, 31, 145, 193, 98, + 251, 216, 224, 213, 250, 164, 22, 21, 227, 190, 223, 74, 29, 107, 0, 103, 187, 53, 129, + 204, 202, 170, 156, 19, 67, 185, 100, 252, 5, 129, 164, 58, 228, 121, 214, 27, 53, 35, 16, + 238, 93, 64, 94, 4, 250, 138, 102, 22, 128, 37, 229, 104, 40, 245, 216, 107, 16, 52, 202, + 54, 98, 43, 162, 245, 56, 48, 150, 41, 3, 224, 167, 171, 5, 254, 106, 199, 0, 47, 238, 162, + 27, 114, 168, 111, 231, 241, 102, 79, 138, 66, 168, 38, 157, 140, 76, 56, 155, 66, 109, + 253, 243, 207, 79, 88, 167, 111, 251, 135, 38, 249, 85, 167, 36, 113, 7, 243, 34, 132, 113, + 117, 8, 245, 93, 79, 171, 12, 223, 157, 41, 201, 221, 129, 60, 186, 164, 123, 1, 247, 202, + 28, 61, 121, 156, 214, 55, 68, 36, 205, 113, 203, 45, 173, 24, 190, 32, 42, 207, 69, 189, + 209, 221, 50, 18, 77, 60, 249, 233, 101, 216, 168, 47, 16, 155, 172, 248, 90, 115, 162, + 134, 95, 6, 227, 202, 46, 26, 133, 135, 65, 251, 97, 109, 181, 141, 87, 156, 59, 40, 253, + 87, 116, 244, 225, 242, 133, 77, 199, 120, 210, 152, 108, 90, 137, 127, 214, 137, 250, 180, + 148, 113, 71, 68, 183, 216, 166, 169, 54, 174, 235, 51, 45, 172, 47, 5, 205, 154, 71, 241, + 106, 228, 222, 150, 34, 95, 68, 251, 78, 244, 111, 59, 252, 138, 106, 31, 185, 107, 123, + 22, 37, 184, 249, 117, 231, 56, 224, 137, 13, 254, 50, 240, 252, 220, 74, 33, 223, 163, 97, + 74, 180, 232, 189, 92, 104, 193, 59, 255, 80, 10, 146, 236, 17, 80, 90, 139, 35, 212, 153, + 19, 13, 159, 7, 184, 77, 193, 43, 251, 149, 176, 147, 169, 227, 236, 74, 98, 163, 193, 180, + 4, 186, 19, 29, 212, 166, 177, 114, 30, 254, 241, 141, 95, 72, 99, 104, 101, 21, 141, 181, + 85, 200, 184, 190, 11, 255, 74, 39, 89, 41, 204, 187, 96, 202, 188, 191, 157, 135, 239, + 166, 155, 86, 32, 228, 198, 117, 41, 31, 54, 151, 229, 214, 56, 149, 187, 177, 123, 164, + 182, 232, 188, 21, 94, 2, 155, 177, 124, 94, 151, 249, 116, 78, 62, 0, 194, 76, 156, 86, + 227, 11, 25, 35, 6, 175, 77, 151, 60, 44, 114, 234, 153, 124, 79, 114, 88, 36, 178, 223, + 229, 61, 90, 198, 60, 237, 224, 169, 115, 22, 152, 254, 235, 17, 130, 53, 1, 235, 67, 90, + 238, 52, 170, 182, 73, 182, 138, 182, 189, 130, 170, 200, 91, 14, 150, 174, 100, 63, 29, + 93, 171, 150, 154, 93, 72, 34, 156, 173, 108, 74, 29, 107, 197, 130, 86, 106, 178, 138, + 210, 0, 92, 11, 78, 111, 226, 120, 169, 222, 50, 18, 156, 170, 98, 206, 97, 21, 28, 112, + 234, 145, 2, 216, 188, 172, 150, 183, 39, 125, 17, 146, 49, 184, 242, 2, 60, 19, 162, 180, + 210, 254, 249, 182, 240, 148, 184, 118, 113, 198, 27, 115, 165, 193, 97, 232, 201, 140, 63, + 200, 153, 10, 162, 170, 69, 68, 152, 199, 103, 14, 164, 228, 120, 71, 47, 196, 217, 251, + 181, 26, 112, 185, 116, 184, 77, 93, 236, 160, 235, 40, 0, 162, 199, 123, 223, 37, 53, 73, + 177, 69, 207, 11, 137, 67, 204, 57, 231, 50, 185, 148, 48, 30, 102, 195, 248, 40, 189, 227, + 187, 95, 166, 1, 7, 30, 112, 32, 101, 38, 72, 208, 52, 67, 131, 96, 34, 192, 156, 48, 92, + 231, 188, 97, 103, 97, 41, 254, 33, 72, 190, 68, 184, 85, 114, 25, 94, 65, 144, 102, 176, + 150, 76, 247, 146, 9, 188, 66, 4, 144, 98, 233, 196, 166, 7, 15, 54, 148, 247, 81, 52, 253, + 27, 113, 235, 255, 230, 179, 168, 140, 95, 230, 163, 86, 235, 119, 185, 152, 223, 129, 48, + 169, 170, 225, 224, 154, 164, 209, 129, 232, 92, 190, 187, 38, 217, 36, 113, 250, 61, 125, + 168, 25, 70, 227, 39, 142, 12, 226, 20, 134, 82, 143, 147, 211, 173, 149, 223, 58, 27, 163, + 18, 252, 148, 112, 19, 101, 144, 21, 105, 34, 200, 254, 11, 254, 8, 247, 210, 142, 194, 54, + 150, 34, 171, 33, 203, 79, 79, 135, 186, 51, 68, 215, 106, 100, 21, 239, 178, 68, 230, 238, + 72, 4, 31, 101, 161, 29, 204, 129, 234, 66, 195, 6, 69, 77, 48, 157, 137, 236, 96, 138, + 119, 86, 147, 205, 172, 242, 28, 115, 188, 254, 216, 35, 89, 33, 27, 162, 243, 58, 102, + 131, 50, 23, 169, 58, 218, 111, 6, 199, 164, 171, 104, 207, 34, 111, 229, 3, 207, 124, 96, + 241, 95, 105, 113, 4, 16, 48, 163, 153, 0, 96, 174, 246, 3, 154, 124, 14, 132, 237, 184, + 91, 217, 201, 53, 80, 39, 31, 113, 246, 67, 132, 85, 62, 153, 187, 240, 180, 117, 162, 17, + 27, 190, 72, 178, 14, 101, 173, 200, 14, 196, 225, 212, 38, 153, 165, 147, 130, 51, 134, + 40, 36, 7, 234, 179, 73, 28, 224, 4, 222, 10, 61, 65, 78, 23, 98, 246, 144, 9, 16, 180, + 248, 56, 214, 162, 214, 18, 50, 67, 207, 3, 77, 7, 6, 28, 43, 131, 124, 117, 124, 213, 160, + 67, 1, 44, 39, 15, 28, 149, 45, 54, 127, 157, 237, 193, 132, 174, 214, 8, 164, 106, 159, + 213, 27, 173, 33, 24, 31, 110, 210, 95, 140, 236, 71, 68, 207, 225, 134, 105, 25, 226, 168, + 119, 172, 1, 252, 11, 231, 182, 93, 254, 211, 58, 78, 192, 168, 67, 71, 11, 160, 76, 111, + 234, 138, 119, 135, 51, 226, 113, 167, 146, 103, 97, 43, 192, 133, 252, 124, 42, 190, 217, + 38, 95, 95, 222, 14, 245, 192, 116, 245, 68, 114, 253, 25, 229, 221, 0, 158, 27, 163, 45, + 244, 200, 0, 205, 150, 162, 76, 224, 168, 203, 79, 114, 137, 18, 206, 12, 152, 241, 159, + 245, 197, 175, 146, 16, 62, 152, 112, 233, 23, 132, 151, 100, 175, 26, 83, 0, 254, 150, + 230, 94, 174, 77, 39, 57, 106, 155, 69, 85, 230, 239, 202, 205, 49, 17, 47, 72, 128, 251, + 42, 157, 218, 234, 213, 127, 118, 152, 74, 139, 99, 120, 217, 116, 16, 73, 79, 40, 88, 107, + 38, 132, 34, 101, 225, 109, 114, 91, 66, 69, 10, 44, 191, 6, 160, 208, 104, 123, 247, 247, + 49, 151, 246, 216, 228, 157, 69, 140, 155, 98, 147, 202, 12, 79, 168, 40, 210, 164, 230, + 241, 107, 109, 231, 243, 187, 252, 100, 70, 41, 140, 35, 231, 115, 98, 49, 192, 102, 46, + 12, 82, 162, 226, 104, 86, 169, 42, 34, 213, 76, 89, 197, 131, 10, 25, 120, 204, 115, 231, + 103, 217, 164, 13, 166, 139, 141, 77, 75, 27, 21, 84, 212, 209, 189, 148, 198, 213, 96, + 234, 190, 213, 40, 210, 152, 141, 4, 110, 167, 209, 226, 236, 247, 139, 119, 215, 175, 30, + 49, 225, 174, 42, 195, 251, 228, 170, 246, 94, 117, 128, 34, 177, 10, 15, 126, 165, 52, + 159, 249, 67, 35, 200, 86, 65, 254, 141, 198, 219, 55, 225, 240, 111, 248, 101, 29, 235, + 192, 112, 225, 19, 255, 139, 129, 46, 42, 9, 8, 74, 58, 254, 191, 49, 173, 189, 32, 5, 174, + 81, 223, 94, 145, 85, 40, 194, 145, 251, 88, 168, 166, 143, 39, 33, 111, 230, 115, 77, 113, + 83, 100, 211, 244, 163, 201, 141, 71, 193, 106, 138, 3, 215, 134, 217, 191, 10, 177, 250, + 233, 247, 127, 162, 112, 206, 150, 187, 205, 141, 211, 181, 58, 199, 11, 220, 91, 180, 169, + 182, 178, 224, 159, 144, 105, 146, 29, 162, 221, 106, 190, 209, 151, 126, 98, 187, 91, 50, + 91, 198, 210, 78, 189, 155, 193, 208, 178, 164, 0, 189, 145, 189, 7, 162, 88, 156, 33, 118, + 176, 125, 130, 199, 108, 240, 99, 125, 119, 202, 188, 53, 17, 140, 102, 26, 30, 23, 30, + 214, 203, 93, 82, 6, 67, 28, 31, 12, 166, 105, 63, 132, 69, 166, 208, 252, 140, 56, 175, + 245, 17, 170, 3, 193, 26, 168, 92, 40, 131, 82, 17, 144, 128, 91, 48, 220, 16, 209, 53, + 226, 4, 225, 6, 253, 141, 25, 254, 146, 41, 63, 90, 19, 6, 75, 96, 130, 156, 14, 91, 231, + 207, 128, 244, 15, 135, 63, 45, 235, 51, 134, 10, 75, 252, 212, 63, 51, 131, 253, 110, 144, + 239, 182, 194, 107, 45, 103, 124, 83, 154, 173, 181, 51, 60, 63, 75, 172, 103, 222, 53, 31, + 73, 93, 76, 159, 184, 24, 240, 41, 97, 72, 131, 54, 113, 170, 233, 182, 86, 62, 88, 131, + 206, 86, 180, 32, 143, 172, 114, 41, 233, 121, 221, 190, 192, 97, 48, 196, 185, 135, 67, + 79, 190, 66, 181, 196, 246, 211, 94, 250, 228, 62, 117, 159, 141, 54, 186, 81, 143, 113, + 108, 102, 252, 182, 146, 233, 20, 135, 30, 255, 237, 33, 224, 68, 8, 175, 33, 58, 49, 16, + 226, 113, 54, 90, 209, 159, 211, 193, 203, 187, 186, 156, 89, 81, 127, 211, 171, 113, 92, + 238, 193, 159, 11, 242, 183, 97, 67, 211, 138, 203, 35, 113, 247, 64, 205, 71, 2, 127, 208, + 90, 139, 132, 148, 180, 247, 7, 119, 195, 128, 96, 221, 195, 110, 44, 53, 114, 26, 196, + 226, 219, 134, 195, 190, 205, 171, 29, 137, 224, 75, 167, 188, 94, 123, 199, 64, 167, 105, + 191, 52, 247, 43, 224, 181, 250, 58, 109, 25, 1, 120, 7, 223, 250, 69, 199, 83, 15, 20, + 136, 90, 247, 128, 109, 169, 243, 24, 220, 215, 188, 169, 46, 31, 131, 112, 79, 132, 93, + 170, 127, 238, 207, 138, 110, 201, 232, 163, 214, 77, 45, 113, 103, 179, 197, 206, 83, 148, + 6, 185, 231, 174, 111, 171, 162, 147, 39, 252, 36, 132, 173, 145, 77, 43, 223, 164, 46, + 136, 129, 210, 219, 221, 202, 246, 182, 95, 135, 8, 55, 229, 71, 245, 57, 131, 217, 93, 90, + 179, 195, 230, 102, 169, 80, 51, 197, 6, 48, 143, 112, 121, 27, 79, 174, 47, 88, 240, 47, + 10, 62, 94, 177, 78, 234, 112, 136, 65, 84, 4, 245, 190, 138, 38, 98, 108, 187, 47, 236, + 65, 95, 233, 245, 47, 126, 208, 199, 74, 229, 158, 20, 238, 148, 218, 241, 233, 229, 13, + 14, 206, 95, 42, 22, 65, 191, 218, 73, 116, 90, 23, 4, 166, 32, 236, 51, 107, 252, 198, 10, + 163, 100, 140, 221, 174, 111, 179, 171, 183, 159, 204, 229, 104, 156, 190, 99, 142, 130, + 46, 142, 31, 244, 30, 114, 69, 86, 159, 173, 185, 88, 130, 238, 12, 216, 140, 134, 15, 104, + 64, 131, 106, 95, 206, 11, 1, 183, 75, 167, 43, 63, 188, 39, 144, 14, 179, 140, 47, 140, + 253, 202, 192, 215, 156, 108, 18, 211, 9, 75, 108, 47, 212, 81, 24, 149, 244, 189, 60, 199, + 210, 251, 22, 15, 247, 145, 136, 59, 51, 228, 31, 227, 226, 12, 199, 36, 126, 167, 153, + 168, 220, 216, 56, 68, 79, 99, 88, 13, 34, 125, 123, 33, 239, 167, 121, 240, 167, 65, 1, + 82, 9, 126, 165, 52, 107, 207, 11, 48, 112, 3, 150, 102, 30, 177, 250, 211, 194, 239, 204, + 242, 27, 31, 118, 231, 44, 200, 178, 138, 104, 164, 131, 113, 205, 95, 88, 165, 136, 177, + 7, 111, 216, 83, 186, 110, 193, 241, 164, 24, 237, 204, 95, 250, 183, 26, 100, 70, 58, 91, + 190, 43, 165, 31, 136, 139, 190, 27, 67, 52, 72, 65, 120, 121, 54, 81, 27, 209, 28, 74, 51, + 58, 211, 86, 46, 92, 123, 222, 10, 44, 145, 78, 90, 26, 61, 64, 143, 228, 227, 254, 73, + 218, 85, 93, 218, 33, 123, 181, 195, 9, 76, 237, 41, 119, 145, 131, 106, 4, 150, 58, 7, + 156, 65, 113, 108, 28, 135, 43, 21, 245, 159, 20, 163, 123, 27, 20, 58, 212, 183, 242, 82, + 228, 110, 80, 53, 164, 161, 180, 244, 125, 168, 207, 81, 4, 253, 95, 101, 88, 31, 63, 25, + 134, 59, 130, 0, 90, 220, 24, 200, 221, 149, 76, 225, 16, 26, 77, 179, 172, 190, 145, 199, + 145, 239, 140, 106, 131, 29, 211, 187, 12, 207, 40, 205, 237, 151, 159, 18, 76, 49, 193, + 137, 196, 197, 105, 181, 171, 23, 172, 208, 115, 95, 139, 136, 150, 211, 251, 132, 247, + 206, 93, 70, 19, 181, 13, 205, 216, 79, 238, 229, 123, 12, 38, 24, 233, 96, 205, 223, 126, + 95, 54, 15, 145, 149, 40, 41, 34, 30, 17, 1, 153, 252, 244, 214, 40, 66, 158, 71, 219, 142, + 206, 216, 251, 200, 191, 75, 34, 147, 163, 27, 127, 64, 195, 187, 129, 132, 63, 224, 112, + 1, 154, 28, 76, 141, 165, 195, 155, 53, 84, 189, 57, 148, 144, 98, 14, 232, 213, 179, 55, + 191, 169, 203, 103, 99, 120, 159, 254, 201, 113, 51, 23, 99, 116, 246, 200, 59, 122, 13, + 165, 118, 14, 230, 35, 231, 158, 48, 20, 104, 147, 121, 161, 241, 83, 109, 58, 84, 246, + 251, 170, 145, 99, 179, 150, 32, 203, 93, 193, 99, 13, 86, 8, 224, 237, 61, 69, 58, 27, 79, + 229, 84, 129, 145, 109, 58, 2, 230, 50, 0, 38, 189, 160, 18, 89, 158, 44, 255, 190, 108, + 59, 161, 43, 49, 238, 84, 229, 223, 183, 138, 75, 130, 84, 208, 112, 183, 75, 108, 229, + 254, 186, 103, 207, 244, 244, 149, 217, 71, 217, 238, 59, 98, 49, 214, 251, 83, 43, 104, + 84, 0, 244, 181, 253, 235, 119, 236, 31, 36, 163, 105, 46, 52, 21, 212, 81, 170, 106, 186, + 162, 63, 24, 235, 197, 184, 239, 229, 147, 105, 73, 145, 164, 214, 128, 23, 141, 50, 203, + 41, 135, 32, 169, 168, 152, 137, 199, 38, 93, 226, 205, 20, 190, 208, 216, 76, 204, 133, + 134, 137, 99, 89, 32, 57, 235, 138, 176, 10, 63, 159, 110, 85, 155, 164, 250, 194, 190, 58, + 202, 52, 237, 95, 54, 149, 206, 144, 74, 123, 152, 231, 41, 202, 121, 248, 243, 40, 247, + 161, 225, 88, 148, 188, 79, 6, 235, 43, 132, 24, 200, 229, 86, 53, 166, 123, 116, 233, 203, + 23, 212, 55, 0, 195, 238, 247, 152, 247, 216, 229, 23, 93, 244, 224, 96, 37, 6, 61, 24, + 107, 51, 63, 254, 187, 179, 71, 7, 45, 170, 122, 160, 223, 44, 203, 190, 142, 134, 129, + 210, 67, 41, 17, 118, 139, 196, 163, 243, 54, 81, 192, 212, 129, 52, 227, 168, 113, 238, + 178, 185, 158, 213, 144, 12, 198, 236, 234, 35, 179, 157, 91, 138, 46, 220, 35, 65, 82, 11, + 16, 125, 96, 248, 199, 155, 100, 48, 154, 106, 187, 141, 250, 191, 130, 222, 131, 227, 14, + 138, 219, 23, 23, 190, 180, 155, 147, 58, 224, 150, 39, 182, 226, 239, 3, 45, 47, 99, 255, + 32, 73, 242, 233, 180, 149, 238, 45, 103, 39, 196, 231, 138, 16, 51, 224, 221, 136, 174, + 205, 149, 132, 92, 83, 186, 48, 133, 204, 210, 5, 40, 57, 22, 64, 160, 4, 73, 168, 178, + 229, 222, 185, 235, 109, 196, 109, 183, 150, 158, 0, 56, 16, 41, 43, 193, 31, 204, 169, + 219, 50, 167, 242, 107, 206, 105, 228, 246, 59, 53, 18, 42, 19, 217, 56, 220, 59, 46, 160, + 107, 200, 164, 0, 5, 213, 178, 133, 187, 142, 174, 218, 60, 252, 237, 61, 66, 146, 207, + 179, 93, 234, 83, 219, 93, 55, 107, 92, 139, 101, 151, 198, 135, 246, 223, 101, 139, 44, + 166, 82, 97, 233, 217, 130, 67, 22, 88, 36, 131, 80, 179, 192, 34, 206, 119, 167, 17, 65, + 217, 71, 250, 52, 253, 29, 4, 156, 63, 64, 81, 6, 67, 130, 79, 0, 95, 87, 36, 43, 4, 26, + 51, 250, 24, 178, 197, 41, 20, 142, 187, 180, 239, 156, 102, 113, 15, 13, 18, 90, 70, 184, + 7, 250, 193, 117, 127, 60, 0, 198, 218, 128, 116, 119, 124, 126, 160, 129, 231, 68, 189, + 98, 48, 43, 120, 178, 91, 54, 242, 66, 112, 55, 188, 42, 179, 94, 60, 249, 136, 81, 149, + 132, 98, 209, 181, 219, 223, 31, 40, 235, 55, 5, 69, 11, 63, 192, 51, 224, 119, 107, 193, + 84, 178, 191, 58, 217, 222, 183, 118, 114, 95, 45, 48, 126, 184, 10, 207, 0, 217, 59, 167, + 244, 219, 9, 238, 41, 13, 127, 191, 123, 139, 9, 74, 40, 164, 194, 205, 116, 204, 185, 230, + 133, 148, 215, 50, 105, 173, 220, 113, 117, 98, 233, 80, 209, 185, 211, 232, 29, 157, 12, + 206, 228, 187, 26, 240, 193, 21, 15, 69, 115, 120, 66, 2, 92, 165, 103, 29, 177, 151, 154, + 211, 107, 177, 89, 16, 135, 243, 135, 178, 237, 184, 110, 191, 169, 230, 243, 106, 16, 95, + 128, 124, 73, 132, 107, 116, 123, 38, 0, 72, 24, 171, 8, 113, 227, 43, 136, 168, 25, 174, + 100, 201, 252, 212, 119, 246, 131, 95, 78, 65, 222, 89, 192, 87, 66, 102, 58, 237, 94, 56, + 138, 71, 121, 65, 49, 22, 65, 29, 183, 56, 59, 27, 16, 176, 232, 120, 22, 104, 212, 83, 32, + 55, 90, 2, 37, 108, 112, 146, 113, 34, 64, 78, 65, 80, 198, 101, 190, 60, 69, 60, 230, 72, + 216, 215, 225, 166, 155, 122, 140, 137, 245, 89, 77, 237, 209, 242, 67, 37, 88, 5, 117, + 101, 165, 98, 84, 139, 18, 57, 118, 67, 78, 254, 178, 60, 181, 124, 144, 169, 13, 20, 233, + 60, 51, 55, 1, 215, 225, 224, 42, 191, 189, 227, 246, 25, 53, 164, 27, 1, 66, 112, 36, 211, + 67, 195, 172, 119, 52, 81, 244, 10, 39, 11, 219, 160, 35, 230, 225, 176, 171, 74, 254, 100, + 229, 4, 64, 140, 85, 94, 85, 197, 63, 234, 173, 243, 129, 52, 220, 154, 175, 195, 223, 23, + 85, 20, 171, 79, 240, 185, 10, 14, 93, 185, 93, 100, 11, 41, 39, 106, 137, 70, 122, 173, + 221, 195, 73, 174, 145, 7, 85, 1, 232, 126, 67, 38, 213, 104, 164, 213, 213, 5, 23, 233, + 233, 47, 222, 14, 3, 33, 180, 180, 49, 136, 73, 46, 237, 209, 47, 197, 173, 95, 131, 4, + 157, 40, 217, 217, 156, 31, 81, 103, 24, 116, 208, 225, 123, 11, 122, 143, 55, 224, 125, + 121, 42, 109, 151, 120, 245, 141, 176, 100, 12, 237, 222, 197, 100, 79, 16, 168, 247, 103, + 223, 85, 92, 26, 17, 23, 91, 90, 249, 228, 162, 166, 164, 178, 215, 48, 203, 209, 138, 42, + 42, 87, 132, 134, 120, 39, 13, 50, 77, 187, 15, 42, 82, 80, 242, 210, 141, 28, 213, 194, + 241, 75, 86, 159, 120, 217, 98, 94, 100, 129, 1, 228, 157, 183, 171, 186, 164, 245, 72, + 107, 119, 241, 227, 77, 109, 120, 58, 248, 36, 82, 38, 188, 50, 177, 65, 227, 192, 169, + 124, 7, 137, 254, 80, 151, 200, 73, 55, 99, 30, 150, 192, 13, 106, 41, 236, 18, 6, 73, 73, + 16, 239, 162, 0, 218, 201, 184, 78, 184, 137, 253, 12, 130, 241, 245, 158, 233, 159, 0, + 130, 99, 50, 12, 240, 186, 207, 221, 250, 179, 115, 138, 137, 154, 182, 72, 119, 204, 16, + 18, 101, 107, 27, 98, 142, 16, 143, 116, 161, 238, 232, 227, 70, 247, 124, 150, 157, 67, + 114, 24, 162, 246, 158, 205, 24, 77, 79, 107, 232, 141, 103, 99, 128, 101, 40, 241, 212, + 127, 90, 40, 2, 229, 105, 51, 164, 70, 216, 95, 2, 240, 54, 224, 11, 241, 201, 181, 97, + 113, 49, 10, 170, 203, 10, 7, 130, 42, 236, 195, 146, 175, 222, 91, 235, 145, 13, 33, 82, + 81, 187, 250, 248, 59, 250, 222, 136, 69, 192, 178, 189, 103, 176, 235, 101, 89, 133, 145, + 203, 35, 138, 157, 180, 144, 6, 172, 55, 58, 147, 217, 109, 95, 81, 141, 214, 209, 245, + 146, 96, 23, 130, 101, 107, 180, 188, 69, 204, 224, 195, 29, 108, 199, 70, 239, 18, 199, + 143, 68, 161, 121, 110, 132, 238, 186, 97, 217, 227, 106, 86, 162, 24, 170, 155, 34, 8, 92, + 154, 199, 184, 210, 214, 20, 31, 200, 22, 169, 186, 176, 26, 52, 62, 5, 251, 241, 108, 67, + 153, 104, 242, 20, 28, 143, 23, 92, 191, 96, 254, 141, 235, 113, 154, 10, 219, 117, 159, + 56, 31, 140, 161, 225, 247, 236, 253, 126, 160, 115, 23, 15, 175, 190, 227, 94, 149, 166, + 102, 173, 251, 60, 192, 100, 70, 126, 89, 88, 154, 70, 176, 82, 212, 137, 12, 243, 203, + 215, 247, 252, 91, 27, 19, 104, 229, 123, 83, 164, 166, 1, 90, 189, 251, 6, 81, 168, 28, + 250, 206, 104, 62, 122, 66, 100, 183, 204, 196, 12, 57, 237, 25, 151, 165, 202, 65, 176, + 124, 4, 94, 99, 250, 31, 43, 245, 160, 19, 247, 137, 120, 147, 24, 227, 192, 34, 21, 113, + 168, 214, 235, 104, 140, 252, 170, 127, 90, 181, 149, 35, 160, 69, 110, 233, 71, 194, 66, + 156, 225, 152, 129, 163, 168, 162, 154, 99, 172, 144, 85, 253, 238, 250, 48, 69, 99, 139, + 49, 133, 200, 148, 34, 238, 235, 163, 13, 229, 232, 206, 118, 9, 132, 186, 253, 112, 251, + 185, 191, 253, 155, 31, 161, 100, 249, 50, 145, 99, 243, 16, 21, 182, 137, 81, 104, 165, + 96, 66, 112, 137, 68, 233, 58, 217, 65, 209, 62, 125, 164, 71, 64, 3, 44, 47, 172, 14, 211, + 105, 241, 194, 50, 196, 89, 82, 26, 174, 194, 101, 119, 171, 140, 162, 141, 123, 206, 115, + 85, 248, 33, 85, 228, 42, 92, 97, 100, 0, 16, 52, 27, 111, 146, 22, 223, 104, 54, 254, 70, + 119, 180, 9, 126, 226, 173, 218, 140, 232, 213, 96, 232, 138, 148, 131, 144, 15, 69, 136, + 43, 252, 203, 249, 20, 198, 32, 153, 17, 9, 28, 69, 183, 178, 240, 203, 31, 197, 112, 215, + 34, 199, 95, 180, 116, 231, 191, 216, 12, 90, 216, 161, 227, 246, 5, 183, 26, 243, 239, 47, + 193, 208, 140, 195, 151, 178, 100, 250, 38, 182, 15, 156, 211, 44, 207, 216, 110, 150, 234, + 92, 160, 204, 228, 246, 38, 224, 16, 83, 166, 33, 22, 163, 174, 99, 80, 3, 23, 142, 55, 3, + 253, 128, 63, 222, 111, 158, 184, 10, 60, 153, 124, 202, 53, 189, 14, 118, 45, 87, 79, 252, + 20, 226, 58, 249, 197, 29, 162, 78, 211, 147, 165, 240, 7, 60, 175, 73, 90, 61, 167, 239, + 68, 104, 66, 207, 208, 194, 87, 148, 158, 184, 247, 129, 7, 150, 171, 62, 157, 62, 240, + 137, 36, 88, 122, 225, 83, 173, 245, 238, 245, 249, 227, 252, 152, 40, 8, 3, 150, 155, 71, + 121, 187, 46, 173, 248, 107, 78, 135, 232, 20, 174, 165, 135, 5, 106, 244, 62, 79, 249, 72, + 99, 252, 177, 27, 158, 223, 235, 59, 34, 141, 45, 221, 238, 31, 102, 247, 65, 170, 15, 219, + 210, 232, 80, 100, 68, 171, 37, 211, 156, 83, 147, 246, 219, 89, 41, 195, 110, 252, 146, + 164, 200, 63, 22, 154, 235, 157, 224, 24, 13, 102, 218, 53, 116, 235, 195, 57, 44, 157, 20, + 89, 193, 102, 118, 163, 6, 24, 185, 177, 103, 106, 237, 7, 242, 206, 186, 232, 186, 164, + 89, 138, 96, 207, 218, 228, 8, 77, 218, 15, 219, 168, 183, 178, 106, 155, 150, 37, 27, 139, + 195, 72, 245, 64, 122, 219, 34, 125, 244, 159, 107, 23, 200, 151, 221, 249, 115, 21, 122, + 60, 189, 31, 36, 127, 78, 52, 175, 187, 142, 179, 50, 53, 30, 246, 156, 89, 21, 120, 137, + 129, 51, 10, 219, 176, 167, 197, 56, 128, 1, 21, 249, 225, 157, 135, 91, 248, 216, 237, + 108, 143, 105, 159, 168, 97, 56, 54, 26, 181, 62, 66, 194, 192, 246, 142, 174, 157, 28, 52, + 18, 33, 44, 54, 106, 28, 19, 94, 56, 49, 80, 181, 104, 237, 198, 100, 25, 119, 50, 224, + 202, 71, 203, 246, 127, 108, 93, 3, 58, 61, 195, 222, 215, 50, 146, 156, 110, 216, 51, 40, + 228, 9, 192, 77, 26, 58, 144, 247, 220, 122, 136, 51, 97, 227, 255, 158, 63, 76, 105, 203, + 118, 96, 163, 154, 19, 163, 65, 249, 170, 225, 220, 193, 235, 177, 57, 168, 2, 14, 44, 34, + 130, 107, 145, 189, 125, 158, 223, 106, 229, 0, 175, 136, 163, 176, 151, 192, 102, 36, 251, + 135, 128, 146, 36, 182, 219, 135, 150, 115, 34, 164, 3, 69, 39, 128, 31, 3, 250, 103, 109, + 228, 88, 77, 91, 51, 229, 228, 39, 140, 87, 33, 182, 110, 20, 127, 154, 139, 110, 252, 14, + 92, 5, 39, 245, 215, 141, 132, 66, 99, 195, 210, 248, 104, 29, 72, 126, 58, 54, 254, 137, + 250, 235, 51, 200, 139, 32, 61, 174, 43, 194, 99, 179, 203, 207, 53, 245, 133, 177, 124, + 73, 203, 155, 38, 11, 171, 223, 12, 171, 48, 169, 129, 190, 14, 43, 247, 147, 163, 204, 5, + 102, 247, 251, 92, 79, 18, 195, 208, 25, 55, 228, 229, 55, 83, 17, 96, 4, 60, 85, 10, 218, + 76, 101, 24, 172, 80, 228, 108, 45, 55, 129, 87, 97, 22, 86, 102, 70, 31, 187, 191, 1, 178, + 184, 253, 120, 104, 197, 84, 242, 112, 17, 245, 82, 85, 96, 40, 21, 9, 4, 45, 209, 147, + 162, 110, 90, 34, 41, 194, 135, 169, 75, 169, 114, 206, 220, 166, 81, 141, 187, 64, 120, + 169, 165, 23, 66, 222, 214, 18, 184, 136, 116, 199, 67, 91, 151, 67, 123, 201, 40, 171, 30, + 81, 53, 172, 11, 252, 52, 200, 191, 236, 81, 63, 28, 28, 79, 197, 11, 72, 49, 4, 124, 134, + 233, 232, 35, 46, 146, 22, 245, 251, 182, 162, 76, 231, 32, 196, 244, 181, 102, 124, 215, + 106, 183, 214, 60, 118, 51, 84, 203, 105, 179, 56, 73, 212, 219, 228, 251, 38, 126, 206, + 115, 119, 115, 151, 21, 49, 80, 168, 206, 170, 174, 215, 59, 70, 84, 59, 126, 167, 254, + 115, 232, 114, 60, 182, 40, 129, 177, 240, 200, 38, 37, 198, 209, 23, 76, 170, 118, 83, 1, + 131, 247, 56, 26, 57, 127, 156, 31, 144, 27, 184, 203, 148, 35, 11, 202, 85, 112, 185, 103, + 107, 238, 6, 233, 105, 127, 30, 36, 5, 87, 171, 241, 3, 56, 204, 132, 15, 217, 229, 139, + 21, 217, 129, 72, 249, 87, 89, 79, 96, 37, 236, 89, 92, 155, 19, 126, 84, 218, 53, 184, 26, + 224, 179, 7, 109, 243, 241, 91, 4, 121, 186, 223, 116, 222, 74, 23, 153, 101, 79, 27, 149, + 143, 26, 68, 95, 238, 21, 43, 211, 107, 255, 231, 205, 156, 7, 52, 151, 74, 253, 249, 109, + 51, 153, 31, 163, 151, 172, 121, 248, 150, 198, 13, 118, 76, 248, 18, 210, 184, 219, 23, + 234, 59, 115, 254, 81, 254, 67, 110, 53, 177, 188, 60, 180, 25, 87, 10, 150, 44, 208, 115, + 144, 16, 3, 244, 119, 137, 173, 22, 158, 135, 89, 82, 86, 224, 43, 127, 108, 234, 223, 64, + 224, 50, 214, 124, 246, 207, 122, 29, 99, 234, 199, 48, 125, 106, 221, 148, 40, 5, 16, 140, + 234, 185, 222, 239, 214, 67, 77, 22, 165, 135, 224, 121, 236, 251, 70, 6, 123, 22, 230, + 117, 208, 172, 131, 59, 53, 108, 100, 173, 136, 63, 22, 130, 83, 137, 15, 28, 82, 36, 34, + 221, 235, 226, 114, 79, 126, 119, 218, 50, 130, 122, 143, 128, 240, 138, 49, 95, 254, 117, + 105, 94, 76, 117, 93, 6, 189, 236, 192, 252, 227, 60, 208, 190, 216, 173, 157, 42, 240, + 134, 97, 184, 215, 46, 30, 152, 214, 186, 205, 122, 64, 241, 9, 165, 113, 56, 244, 89, 27, + 38, 41, 168, 30, 3, 50, 249, 121, 35, 55, 79, 10, 253, 33, 221, 231, 112, 53, 230, 15, 241, + 247, 11, 202, 123, 27, 169, 254, 215, 208, 212, 184, 229, 58, 145, 142, 227, 155, 8, 98, + 108, 50, 239, 238, 82, 161, 80, 228, 170, 128, 65, 71, 94, 4, 135, 27, 30, 28, 176, 66, 87, + 245, 58, 0, 242, 162, 155, 1, 230, 130, 191, 161, 30, 167, 132, 37, 170, 137, 45, 70, 182, + 29, 64, 230, 204, 218, 12, 26, 236, 146, 21, 130, 38, 184, 0, 185, 21, 57, 51, 227, 123, + 26, 216, 87, 227, 198, 78, 106, 171, 224, 255, 223, 12, 92, 86, 191, 171, 109, 68, 29, 196, + 31, 189, 44, 47, 198, 240, 63, 119, 237, 15, 4, 66, 204, 61, 54, 105, 188, 52, 38, 85, 29, + 10, 181, 57, 21, 63, 27, 248, 254, 144, 201, 69, 96, 72, 176, 206, 105, 209, 182, 227, 107, + 31, 34, 36, 199, 71, 50, 233, 205, 40, 110, 204, 197, 190, 202, 175, 155, 208, 26, 241, 28, + 118, 239, 86, 87, 198, 167, 203, 148, 79, 69, 114, 108, 111, 178, 8, 29, 141, 229, 254, + 121, 55, 99, 138, 88, 211, 25, 213, 124, 32, 248, 28, 178, 12, 184, 96, 118, 178, 219, 74, + 186, 235, 22, 144, 68, 124, 152, 163, 132, 152, 125, 193, 123, 203, 54, 115, 216, 254, 245, + 228, 88, 47, 119, 152, 56, 194, 120, 93, 63, 251, 138, 157, 142, 83, 235, 111, 98, 208, 67, + 226, 204, 115, 172, 14, 15, 217, 122, 68, 94, 214, 215, 29, 196, 49, 82, 155, 80, 204, 30, + 180, 143, 142, 231, 118, 154, 68, 4, 61, 180, 33, 86, 157, 251, 119, 49, 46, 239, 175, 251, + 115, 135, 182, 34, 219, 38, 134, 237, 12, 183, 159, 71, 196, 175, 119, 134, 146, 175, 239, + 200, 85, 24, 217, 227, 238, 110, 252, 34, 213, 35, 175, 138, 193, 218, 182, 239, 83, 198, + 26, 3, 30, 86, 54, 168, 174, 79, 129, 135, 146, 192, 218, 114, 151, 242, 108, 220, 232, 99, + 46, 215, 49, 77, 5, 233, 222, 144, 69, 82, 85, 250, 180, 112, 61, 41, 7, 70, 35, 129, 219, + 1, 102, 71, 40, 17, 52, 46, 102, 92, 75, 71, 121, 235, 18, 248, 78, 23, 237, 185, 36, 1, + 253, 155, 195, 206, 244, 208, 255, 183, 10, 124, 207, 30, 200, 87, 15, 2, 31, 82, 248, 77, + 199, 99, 110, 49, 133, 23, 175, 42, 20, 172, 13, 61, 178, 71, 197, 1, 87, 56, 214, 83, 138, + 223, 121, 216, 209, 94, 188, 153, 136, 22, 101, 223, 251, 36, 70, 159, 160, 137, 55, 158, + 136, 136, 94, 201, 249, 232, 129, 31, 163, 204, 148, 185, 219, 185, 188, 250, 87, 91, 233, + 206, 199, 222, 41, 173, 48, 157, 166, 37, 53, 134, 142, 204, 169, 185, 136, 17, 238, 221, + 67, 31, 233, 131, 78, 85, 250, 158, 115, 59, 6, 197, 0, 29, 209, 138, 21, 65, 208, 208, + 100, 2, 249, 160, 42, 114, 168, 84, 100, 83, 98, 42, 113, 93, 199, 99, 3, 23, 187, 223, 35, + 173, 167, 172, 12, 250, 132, 84, 191, 41, 161, 177, 90, 245, 36, 174, 96, 92, 4, 65, 80, + 221, 118, 130, 137, 210, 181, 216, 234, 6, 114, 245, 247, 11, 183, 159, 78, 227, 93, 225, + 188, 56, 210, 246, 105, 24, 127, 13, 3, 223, 213, 164, 180, 246, 129, 106, 153, 92, 20, + 112, 229, 65, 37, 202, 16, 204, 79, 45, 14, 159, 111, 103, 125, 63, 209, 11, 212, 23, 29, + 14, 106, 187, 95, 125, 231, 159, 247, 143, 149, 128, 20, 201, 139, 220, 243, 141, 124, 26, + 171, 218, 135, 196, 74, 252, 154, 52, 245, 147, 236, 196, 248, 112, 70, 142, 237, 97, 191, + 75, 100, 47, 234, 68, 85, 201, 28, 130, 3, 198, 86, 253, 79, 150, 148, 144, 204, 233, 54, + 3, 5, 106, 19, 90, 161, 130, 16, 147, 168, 51, 234, 73, 72, 190, 54, 204, 146, 106, 213, + 51, 33, 182, 127, 68, 91, 247, 225, 121, 4, 90, 117, 154, 82, 244, 70, 88, 15, 122, 187, + 153, 136, 172, 52, 21, 174, 107, 175, 69, 121, 80, 180, 17, 154, 197, 30, 205, 200, 150, + 198, 136, 64, 199, 22, 175, 74, 4, 195, 49, 111, 218, 52, 213, 176, 230, 58, 138, 61, 209, + 13, 2, 114, 124, 80, 26, 50, 139, 86, 36, 192, 192, 36, 71, 80, 0, 59, 30, 135, 185, 103, + 130, 115, 138, 250, 126, 155, 175, 240, 84, 122, 137, 200, 171, 68, 72, 164, 79, 79, 28, + 90, 84, 110, 130, 122, 133, 165, 245, 35, 124, 195, 10, 99, 78, 140, 219, 139, 219, 107, 1, + 172, 39, 0, 28, 150, 91, 243, 188, 215, 147, 12, 70, 161, 15, 172, 102, 17, 146, 175, 95, + 79, 20, 209, 180, 207, 154, 153, 229, 190, 251, 73, 215, 11, 113, 37, 230, 193, 192, 218, + 119, 83, 16, 237, 167, 193, 157, 157, 102, 102, 203, 198, 68, 237, 252, 121, 120, 132, 126, + 65, 199, 197, 59, 186, 66, 182, 38, 126, 221, 17, 96, 102, 99, 218, 185, 124, 152, 18, 240, + 92, 241, 171, 178, 231, 64, 223, 228, 161, 110, 182, 217, 95, 166, 165, 113, 6, 203, 8, + 254, 9, 241, 90, 157, 60, 194, 125, 210, 73, 213, 243, 16, 159, 123, 119, 162, 42, 178, + 159, 91, 130, 235, 17, 142, 202, 65, 31, 213, 60, 108, 95, 53, 161, 25, 85, 114, 18, 117, + 199, 73, 71, 168, 243, 188, 126, 180, 37, 10, 239, 179, 38, 227, 177, 184, 213, 193, 130, + 45, 115, 2, 196, 136, 54, 99, 161, 163, 212, 189, 9, 142, 113, 251, 137, 138, 187, 11, 117, + 189, 246, 30, 193, 150, 187, 60, 70, 108, 189, 10, 152, 108, 36, 102, 189, 204, 224, 8, + 100, 72, 219, 66, 97, 53, 90, 23, 109, 82, 49, 94, 227, 67, 207, 146, 127, 199, 212, 174, + 5, 115, 160, 81, 23, 44, 130, 49, 248, 2, 67, 94, 32, 231, 7, 238, 13, 65, 115, 85, 211, + 138, 184, 26, 88, 21, 69, 115, 129, 30, 63, 79, 116, 118, 207, 139, 204, 42, 88, 114, 154, + 26, 166, 116, 28, 189, 75, 201, 163, 102, 236, 156, 198, 58, 122, 99, 97, 179, 132, 66, + 245, 26, 90, 49, 91, 69, 117, 71, 163, 122, 152, 220, 240, 131, 139, 80, 95, 199, 50, 34, + 131, 99, 154, 8, 240, 77, 238, 85, 0, 60, 144, 102, 5, 83, 16, 174, 3, 125, 158, 231, 212, + 80, 67, 109, 64, 129, 65, 152, 126, 217, 180, 3, 123, 31, 139, 40, 177, 229, 154, 149, 0, + 9, 108, 240, 174, 68, 247, 2, 95, 106, 173, 104, 173, 73, 133, 174, 64, 164, 216, 90, 210, + 114, 220, 105, 214, 232, 155, 157, 25, 103, 74, 72, 105, 6, 96, 3, 14, 168, 64, 0, 14, 74, + 135, 241, 67, 80, 41, 107, 78, 44, 201, 206, 166, 31, 187, 72, 108, 29, 225, 109, 224, 21, + 189, 198, 90, 74, 132, 170, 45, 181, 124, 167, 55, 178, 176, 245, 3, 121, 248, 88, 55, 57, + 84, 168, 38, 97, 212, 185, 211, 75, 237, 191, 148, 160, 19, 135, 106, 139, 109, 230, 29, + 169, 143, 40, 88, 55, 125, 152, 99, 211, 249, 146, 19, 108, 39, 87, 216, 202, 57, 193, 173, + 54, 237, 94, 199, 249, 113, 60, 220, 114, 138, 147, 252, 244, 211, 29, 16, 5, 38, 145, 100, + 157, 148, 247, 85, 131, 132, 234, 56, 134, 249, 247, 149, 240, 128, 102, 26, 196, 211, 204, + 188, 41, 146, 59, 238, 60, 118, 144, 97, 167, 147, 181, 37, 102, 245, 87, 136, 136, 183, + 194, 224, 215, 16, 26, 176, 144, 248, 243, 216, 151, 198, 208, 45, 25, 158, 102, 99, 84, + 117, 57, 69, 24, 248, 8, 171, 198, 227, 0, 97, 117, 125, 39, 56, 98, 7, 78, 237, 199, 252, + 17, 128, 35, 21, 191, 162, 252, 97, 165, 185, 20, 182, 71, 25, 192, 235, 193, 9, 108, 32, + 162, 82, 45, 226, 247, 149, 106, 183, 197, 123, 208, 64, 32, 231, 114, 123, 249, 204, 31, + 126, 229, 191, 93, 154, 255, 251, 163, 84, 109, 176, 62, 9, 91, 54, 135, 174, 214, 219, + 244, 55, 159, 138, 47, 141, 95, 225, 222, 146, 81, 136, 14, 49, 71, 142, 141, 198, 178, + 143, 42, 126, 67, 81, 73, 244, 110, 174, 53, 160, 244, 221, 117, 44, 13, 86, 181, 20, 163, + 145, 49, 126, 196, 202, 24, 55, 167, 72, 21, 88, 64, 190, 217, 248, 29, 46, 25, 147, 233, + 239, 109, 41, 178, 123, 54, 78, 205, 11, 77, 66, 172, 204, 254, 48, 206, 14, 170, 65, 25, + 4, 88, 231, 174, 21, 231, 98, 191, 171, 70, 249, 226, 12, 15, 173, 103, 0, 52, 83, 81, 236, + 239, 45, 4, 3, 243, 172, 42, 73, 19, 244, 141, 114, 119, 169, 73, 185, 221, 81, 46, 251, + 25, 248, 147, 227, 140, 25, 133, 123, 209, 122, 68, 120, 188, 68, 131, 220, 204, 48, 59, + 131, 50, 0, 63, 164, 97, 145, 194, 192, 196, 187, 250, 118, 225, 50, 121, 49, 17, 98, 221, + 231, 79, 222, 91, 99, 124, 27, 226, 73, 141, 116, 188, 90, 55, 204, 111, 53, 54, 36, 225, + 24, 179, 36, 97, 25, 224, 43, 204, 164, 245, 213, 62, 220, 246, 111, 166, 164, 10, 239, 90, + 231, 156, 71, 192, 68, 16, 48, 152, 110, 158, 120, 170, 117, 188, 129, 88, 26, 66, 1, 172, + 32, 102, 243, 220, 12, 165, 248, 190, 83, 143, 22, 45, 166, 182, 87, 198, 76, 59, 235, 90, + 82, 147, 207, 219, 77, 15, 175, 33, 94, 132, 155, 247, 0, 233, 108, 219, 2, 115, 43, 239, + 200, 102, 48, 22, 29, 127, 98, 33, 131, 111, 232, 27, 45, 119, 14, 138, 8, 165, 210, 165, + 59, 92, 49, 87, 238, 157, 19, 109, 116, 34, 152, 203, 239, 125, 242, 148, 246, 83, 229, + 231, 67, 221, 170, 75, 187, 111, 96, 105, 206, 20, 123, 222, 223, 9, 33, 251, 167, 255, 69, + 65, 196, 201, 26, 44, 149, 169, 81, 133, 65, 107, 143, 168, 33, 169, 39, 12, 34, 37, 116, + 244, 21, 198, 162, 120, 221, 110, 71, 169, 248, 173, 37, 178, 128, 28, 26, 113, 92, 144, + 75, 42, 223, 212, 129, 210, 141, 135, 166, 193, 192, 97, 52, 222, 254, 23, 148, 9, 112, + 114, 64, 84, 12, 0, 125, 189, 173, 232, 223, 134, 210, 13, 236, 70, 153, 231, 212, 20, 224, + 146, 173, 139, 57, 190, 134, 118, 153, 147, 141, 236, 92, 50, 237, 88, 178, 184, 155, 140, + 69, 133, 101, 211, 123, 101, 137, 130, 38, 50, 225, 155, 224, 53, 213, 65, 112, 198, 206, + 81, 96, 203, 29, 141, 206, 29, 232, 124, 216, 35, 8, 62, 149, 236, 178, 118, 57, 28, 94, + 86, 43, 220, 48, 108, 137, 108, 195, 130, 28, 212, 104, 37, 93, 83, 174, 52, 92, 137, 136, + 245, 238, 54, 132, 123, 121, 136, 105, 15, 23, 185, 141, 208, 44, 117, 156, 169, 74, 234, + 83, 135, 181, 51, 173, 98, 228, 101, 216, 166, 90, 42, 211, 115, 53, 198, 255, 53, 31, 204, + 244, 146, 83, 100, 59, 103, 100, 81, 139, 131, 47, 109, 149, 157, 235, 100, 64, 188, 73, + 77, 255, 230, 192, 121, 88, 233, 145, 46, 214, 145, 195, 177, 107, 1, 169, 44, 54, 69, 197, + 68, 16, 16, 211, 16, 41, 134, 144, 71, 40, 46, 249, 160, 144, 31, 188, 218, 244, 180, 195, + 35, 145, 40, 126, 240, 86, 250, 200, 74, 203, 70, 69, 84, 247, 148, 178, 102, 232, 217, 29, + 244, 63, 41, 26, 206, 12, 208, 188, 160, 66, 96, 29, 86, 185, 152, 172, 183, 67, 182, 163, + 158, 61, 102, 22, 162, 176, 127, 184, 115, 86, 214, 84, 179, 215, 43, 8, 32, 27, 18, 198, + 40, 69, 143, 122, 55, 55, 223, 36, 87, 74, 116, 38, 173, 95, 4, 148, 104, 217, 54, 158, 80, + 118, 68, 201, 80, 167, 172, 236, 11, 8, 3, 182, 231, 105, 248, 122, 83, 91, 190, 108, 124, + 232, 144, 34, 30, 140, 1, 223, 30, 13, 33, 206, 8, 1, 116, 227, 249, 185, 192, 4, 233, 99, + 7, 81, 144, 170, 102, 29, 7, 5, 87, 164, 208, 142, 13, 147, 99, 116, 47, 176, 157, 71, 76, + 229, 175, 151, 190, 117, 238, 161, 140, 126, 154, 114, 180, 109, 197, 60, 160, 11, 117, + 113, 139, 72, 90, 145, 223, 126, 26, 201, 46, 122, 157, 144, 164, 170, 154, 74, 54, 76, + 213, 67, 33, 232, 148, 219, 175, 216, 59, 68, 148, 51, 30, 162, 24, 112, 162, 40, 8, 181, + 90, 210, 95, 250, 35, 209, 11, 162, 205, 2, 241, 95, 18, 61, 11, 6, 90, 79, 128, 18, 110, + 247, 101, 192, 234, 27, 38, 155, 204, 170, 160, 194, 35, 212, 120, 110, 174, 173, 19, 10, + 66, 171, 122, 206, 171, 1, 209, 22, 166, 136, 66, 166, 203, 93, 90, 27, 82, 13, 158, 240, + 229, 146, 152, 71, 67, 152, 107, 142, 44, 196, 232, 54, 36, 188, 124, 55, 50, 233, 162, 60, + 143, 197, 182, 77, 110, 109, 87, 254, 90, 79, 17, 102, 226, 93, 222, 158, 110, 239, 210, + 198, 179, 5, 50, 42, 156, 176, 5, 177, 27, 249, 68, 0, 187, 12, 115, 74, 112, 137, 39, 176, + 122, 69, 69, 138, 17, 215, 146, 56, 43, 68, 4, 249, 214, 116, 40, 27, 97, 79, 13, 190, 147, + 73, 27, 111, 70, 178, 119, 109, 108, 188, 243, 136, 192, 39, 30, 74, 240, 247, 103, 121, + 55, 51, 207, 225, 5, 98, 186, 68, 147, 229, 160, 13, 92, 57, 193, 90, 130, 235, 169, 255, + 92, 162, 111, 143, 244, 67, 83, 68, 23, 140, 96, 32, 91, 24, 238, 76, 85, 235, 104, 111, + 221, 13, 159, 85, 162, 98, 242, 60, 137, 64, 25, 248, 161, 233, 176, 134, 117, 35, 128, 69, + 199, 149, 178, 73, 219, 5, 27, 165, 40, 31, 28, 17, 2, 217, 119, 10, 80, 102, 140, 22, 214, + 243, 96, 35, 87, 61, 225, 211, 101, 235, 249, 232, 211, 175, 52, 136, 197, 21, 125, 175, + 194, 6, 252, 236, 9, 18, 139, 76, 85, 88, 173, 19, 238, 191, 220, 28, 29, 225, 9, 88, 23, + 211, 214, 102, 232, 121, 143, 243, 133, 66, 152, 113, 159, 120, 114, 177, 77, 51, 44, 176, + 78, 165, 41, 182, 156, 139, 101, 89, 0, 43, 60, 178, 117, 161, 19, 17, 29, 9, 71, 38, 75, + 70, 176, 161, 253, 47, 35, 75, 129, 198, 163, 219, 120, 3, 49, 32, 70, 134, 81, 62, 62, + 236, 194, 82, 106, 132, 113, 138, 5, 40, 178, 48, 80, 151, 204, 209, 117, 201, 208, 89, + 209, 130, 50, 66, 137, 242, 219, 157, 9, 186, 112, 202, 119, 138, 43, 241, 249, 46, 200, + 126, 31, 27, 3, 119, 210, 152, 232, 126, 119, 116, 239, 79, 245, 79, 189, 48, 234, 24, 193, + 98, 172, 103, 103, 7, 188, 95, 183, 218, 250, 187, 230, 48, 173, 251, 206, 246, 244, 233, + 56, 163, 21, 23, 155, 101, 135, 29, 94, 4, 27, 194, 130, 146, 177, 228, 245, 191, 85, 9, + 243, 101, 174, 242, 230, 183, 221, 66, 169, 52, 217, 74, 73, 47, 16, 174, 199, 174, 236, + 138, 59, 88, 131, 209, 190, 146, 212, 5, 126, 237, 23, 43, 112, 107, 85, 53, 245, 47, 214, + 10, 165, 155, 170, 227, 242, 91, 253, 25, 151, 177, 80, 22, 69, 42, 197, 119, 193, 18, 70, + 115, 36, 18, 118, 57, 93, 194, 23, 84, 154, 40, 66, 194, 238, 31, 241, 251, 215, 147, 250, + 144, 65, 168, 67, 128, 213, 2, 210, 39, 193, 34, 4, 29, 111, 112, 25, 249, 169, 1, 157, + 135, 246, 133, 148, 202, 202, 72, 208, 231, 115, 19, 123, 246, 9, 168, 182, 85, 181, 26, + 184, 157, 236, 238, 146, 87, 141, 13, 30, 82, 23, 22, 76, 35, 123, 61, 53, 148, 248, 52, + 49, 84, 72, 238, 109, 106, 69, 159, 230, 51, 229, 164, 61, 192, 192, 52, 172, 173, 121, 35, + 209, 109, 154, 199, 51, 45, 255, 29, 66, 154, 251, 78, 14, 205, 6, 166, 20, 118, 225, 82, + 242, 122, 89, 184, 105, 91, 232, 232, 36, 254, 8, 147, 188, 247, 201, 227, 123, 132, 66, + 178, 130, 166, 246, 162, 213, 195, 204, 53, 230, 201, 219, 161, 254, 65, 134, 224, 52, 61, + 92, 115, 135, 127, 130, 225, 130, 188, 60, 35, 146, 126, 246, 50, 216, 171, 206, 209, 193, + 222, 29, 57, 236, 252, 61, 184, 236, 74, 180, 23, 197, 160, 46, 25, 22, 144, 114, 224, 53, + 164, 147, 179, 190, 98, 200, 197, 50, 153, 163, 153, 240, 205, 73, 94, 129, 40, 225, 2, + 132, 61, 241, 227, 99, 9, 206, 10, 244, 61, 16, 2, 204, 37, 103, 242, 104, 28, 132, 115, + 153, 186, 27, 102, 215, 98, 254, 149, 188, 50, 52, 184, 139, 133, 54, 210, 81, 77, 215, + 134, 70, 70, 56, 83, 64, 237, 5, 175, 232, 241, 64, 34, 237, 149, 117, 112, 136, 26, 240, + 126, 71, 131, 27, 248, 73, 249, 150, 227, 212, 13, 141, 158, 95, 83, 46, 225, 74, 116, 112, + 90, 252, 58, 102, 95, 58, 186, 138, 60, 204, 74, 0, 235, 103, 141, 117, 40, 48, 175, 208, + 192, 30, 88, 253, 35, 1, 212, 172, 63, 176, 12, 149, 222, 34, 41, 71, 192, 86, 224, 88, + 217, 16, 133, 235, 4, 70, 205, 25, 154, 138, 33, 243, 32, 176, 250, 65, 82, 107, 42, 184, + 230, 159, 165, 30, 54, 62, 111, 235, 222, 214, 173, 232, 15, 22, 52, 10, 66, 205, 42, 11, + 144, 212, 214, 45, 177, 229, 128, 5, 189, 248, 35, 169, 215, 78, 15, 149, 188, 162, 209, + 253, 193, 239, 244, 183, 37, 233, 157, 190, 203, 172, 86, 163, 49, 91, 41, 166, 98, 137, + 88, 31, 76, 157, 204, 185, 162, 36, 169, 111, 212, 107, 59, 47, 16, 193, 109, 235, 128, + 230, 166, 239, 157, 14, 23, 13, 176, 237, 152, 255, 147, 93, 114, 117, 39, 23, 178, 75, 67, + 183, 124, 38, 140, 121, 186, 74, 115, 178, 9, 197, 118, 1, 23, 222, 221, 151, 92, 64, 145, + 130, 131, 245, 205, 21, 189, 155, 201, 244, 155, 239, 101, 67, 186, 249, 9, 55, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 192, 52, + 174, 242, 81, 205, 211, 61, 25, 139, 159, 177, 105, 41, 30, 42, 213, 46, 197, 53, 167, 58, + 7, 239, 29, 185, 82, 211, 68, 212, 184, 107, 226, 41, 43, 224, 22, 195, 136, 195, 83, 229, + 114, 175, 66, 156, 29, 234, 244, 207, 2, 130, 205, 111, 77, 101, 218, 137, 131, 12, 134, + 83, 223, 104, 127, 53, 221, 160, 131, 7, 177, 14, 230, 144, 57, 252, 186, 129, 17, 232, + 197, 196, 98, 209, 1, 45, 125, 38, 79, 78, 79, 205, 86, 137, 34, 148, 236, 104, 255, 148, + 126, 139, 170, 152, 81, 124, 241, 99, 85, 64, 219, 190, 41, 26, 241, 189, 46, 120, 146, + 174, 125, 38, 89, 232, 191, 159, 41, 60, 59, 124, 3, 83, 90, 193, 179, 178, 218, 237, 71, + 118, 84, 146, 132, 171, 138, 85, 203, 68, 215, 196, 179, 144, 14, 177, 7, 131, 178, 35, + 178, 60, 195, 206, 133, 204, 114, 45, 244, 85, 215, 54, 6, 195, 13, 21, 169, 53, 170, 193, + 40, 111, 41, 129, 135, 1, 7, 231, 11, 30, 182, 1, 49, 39, 153, 125, 42, 194, 181, 161, 31, + 225, 12, 7, 203, 136, 56, 221, 249, 243, 10, 70, 40, 7, 185, 44, 130, 246, 110, 117, 80, + 156, 16, 107, 9, 208, 52, 225, 196, 107, 78, 99, 182, 163, 75, 85, 211, 230, 155, 237, 90, + 188, 158, 228, 168, 219, 214, 1, 214, 10, 208, 65, 24, 6, 106, 197, 31, 24, 182, 90, 212, + 193, 164, 165, 72, 252, 129, 220, 23, 60, 110, 87, 37, 248, 212, 75, 104, 132, 28, 240, + 211, 62, 28, 98, 56, 14, 84, 116, 201, 72, 203, 112, 226, 194, 235, 85, 186, 124, 193, 204, + 158, 88, 88, 154, 237, 87, 112, 209, 204, 251, 167, 218, 9, 3, 92, 176, 47, 83, 94, 17, + 116, 73, 210, 76, 89, 65, 53, 117, 81, 135, 24, 123, 72, 228, 209, 12, 130, 113, 64, 15, 0, + 129, 145, 180, 246, 137, 98, 85, 142, 28, 11, 98, 31, 212, 141, 27, 126, 36, 194, 225, 96, + 107, 28, 56, 46, 129, 86, 211, 202, 198, 8, 130, 145, 168, 251, 168, 47, 190, 124, 154, 7, + 24, 236, 155, 6, 216, 2, 52, 183, 14, 81, 237, 145, 106, 163, 57, 70, 110, 201, 195, 76, + 225, 169, 141, 27, 123, 4, 168, 127, 46, 127, 235, 142, 233, 238, 31, 216, 199, 11, 226, + 248, 73, 60, 46, 31, 130, 42, 199, 120, 19, 114, 194, 175, 145, 98, 24, 241, 112, 251, 162, + 211, 147, 73, 105, 44, 119, 124, 155, 203, 208, 30, 84, 236, 50, 175, 194, 84, 2, 84, 57, + 101, 137, 115, 18, 205, 178, 85, 140, 239, 187, 40, 123, 255, 215, 212, 221, 248, 8, 108, + 82, 221, 132, 14, 109, 202, 131, 101, 219, 111, 5, 104, 94, 147, 8, 157, 43, 127, 65, 229, + 59, 138, 201, 160, 29, 119, 166, 145, 221, 160, 237, 117, 188, 140, 19, 233, 86, 27, 112, + 176, 201, 67, 209, 58, 131, 86, 64, 101, 220, 165, 73, 191, 134, 42, 233, 90, 100, 18, 227, + 209, 125, 238, 13, 77, 212, 120, 177, 12, 53, 15, 253, 142, 192, 182, 156, 74, 171, 128, + 12, 35, 10, 156, 182, 209, 168, 84, 192, 114, 145, 152, 124, 207, 225, 128, 108, 54, 189, + 60, 250, 115, 88, 184, 129, 7, 54, 68, 99, 212, 251, 57, 193, 136, 36, 125, 173, 9, 110, + 84, 241, 59, 165, 113, 22, 152, 51, 85, 215, 160, 211, 164, 74, 19, 92, 36, 28, 182, 244, + 9, 168, 250, 79, 123, 164, 160, 207, 240, 29, 29, 91, 38, 31, 188, 16, 228, 118, 149, 198, + 198, 123, 14, 133, 20, 110, 176, 129, 138, 94, 248, 218, 77, 108, 191, 3, 141, 69, 248, + 163, 99, 44, 87, 208, 18, 202, 33, 184, 40, 2, 222, 223, 120, 144, 93, 173, 140, 217, 214, + 57, 15, 78, 141, 161, 248, 165, 4, 189, 14, 213, 23, 46, 113, 141, 18, 31, 137, 224, 228, + 19, 62, 168, 70, 248, 204, 171, 125, 122, 155, 19, 50, 210, 89, 69, 6, 23, 241, 173, 182, + 133, 86, 219, 241, 9, 215, 247, 155, 68, 18, 22, 107, 167, 120, 118, 169, 9, 221, 231, 153, + 100, 199, 129, 43, 239, 122, 102, 79, 161, 133, 75, 74, 184, 210, 30, 175, 5, 20, 8, 48, + 208, 5, 219, 33, 224, 96, 252, 1, 42, 19, 38, 147, 44, 197, 172, 222, 84, 156, 79, 217, + 103, 46, 60, 174, 144, 121, 208, 111, 207, 54, 16, 163, 104, 156, 10, 207, 160, 151, 193, + 121, 38, 251, 43, 200, 28, 137, 109, 22, 140, 17, 13, 95, 69, 119, 61, 222, 131, 92, 105, + 130, 56, 96, 236, 209, 218, 51, 172, 213, 244, 44, 64, 60, 88, 192, 199, 41, 83, 141, 237, + 224, 62, 187, 134, 251, 89, 48, 18, 12, 121, 63, 28, 218, 86, 160, 86, 109, 99, 206, 2, + 236, 50, 26, 239, 237, 63, 12, 102, 84, 252, 215, 145, 131, 231, 49, 212, 54, 43, 99, 224, + 120, 178, 36, 106, 99, 92, 77, 4, 176, 3, 99, 0, 188, 98, 168, 58, 123, 240, 159, 102, 55, + 108, 176, 198, 49, 8, 141, 220, 122, 216, 43, 55, 35, 106, 133, 53, 123, 69, 84, 167, 23, + 9, 209, 194, 29, 186, 63, 184, 71, 241, 49, 33, 47, 88, 39, 65, 91, 85, 92, 198, 126, 212, + 129, 102, 167, 100, 174, 89, 228, 134, 42, 212, 208, 194, 252, 67, 150, 198, 132, 209, 16, + 209, 249, 44, 238, 138, 179, 128, 92, 6, 197, 10, 134, 97, 232, 147, 219, 143, 72, 209, 83, + 35, 210, 182, 162, 183, 213, 125, 47, 156, 218, 18, 194, 137, 234, 117, 231, 112, 250, 48, + 146, 123, 141, 174, 61, 213, 177, 251, 162, 214, 211, 122, 171, 155, 211, 117, 228, 198, + 235, 155, 225, 14, 85, 57, 54, 8, 168, 75, 216, 68, 135, 19, 133, 24, 26, 138, 111, 72, 0, + 250, 101, 237, 246, 137, 8, 38, 83, 123, 237, 250, 104, 106, 31, 217, 235, 103, 177, 73, + 174, 208, 233, 230, 69, 77, 161, 21, 10, 223, 157, 62, 232, 133, 80, 117, 214, 254, 247, + 93, 53, 45, 130, 226, 118, 216, 149, 78, 143, 25, 30, 110, 134, 48, 72, 1, 107, 197, 211, + 48, 235, 82, 179, 54, 95, 171, 91, 36, 188, 53, 85, 24, 216, 236, 79, 62, 90, 52, 157, 175, + 171, 127, 132, 118, 187, 200, 5, 60, 22, 6, 117, 78, 242, 169, 102, 46, 74, 185, 188, 138, + 221, 79, 49, 13, 247, 196, 176, 199, 53, 220, 208, 175, 132, 8, 186, 175, 76, 125, 227, 33, + 44, 193, 209, 17, 52, 110, 227, 212, 139, 93, 154, 218, 220, 57, 5, 255, 119, 252, 7, 69, + 145, 109, 118, 76, 45, 48, 20, 218, 123, 26, 193, 203, 254, 58, 206, 189, 91, 87, 28, 112, + 27, 204, 145, 8, 8, 203, 83, 199, 179, 70, 32, 208, 8, 44, 44, 217, 69, 93, 32, 74, 13, + 105, 100, 232, 134, 89, 67, 98, 52, 242, 113, 109, 36, 244, 169, 115, 146, 186, 48, 75, + 128, 102, 149, 174, 100, 5, 87, 23, 88, 161, 184, 135, 198, 80, 194, 179, 150, 4, 182, 153, + 124, 63, 34, 251, 63, 73, 37, 18, 188, 143, 151, 211, 2, 11, 182, 108, 6, 109, 9, 192, 85, + 67, 251, 113, 129, 185, 10, 81, 246, 134, 57, 249, 181, 39, 2, 172, 177, 69, 221, 100, 77, + 234, 159, 108, 192, 187, 22, 239, 204, 26, 94, 199, 59, 193, 124, 97, 107, 238, 43, 72, 36, + 172, 173, 100, 3, 161, 92, 128, 18, 51, 253, 186, 99, 120, 241, 227, 110, 163, 21, 128, 76, + 0, 148, 199, 82, 165, 71, 150, 145, 226, 233, 70, 52, 185, 206, 113, 110, 186, 143, 46, + 101, 113, 96, 134, 203, 115, 46, 55, 192, 157, 132, 213, 103, 74, 56, 60, 226, 172, 157, + 32, 97, 223, 104, 48, 125, 83, 201, 108, 78, 237, 12, 215, 156, 61, 116, 134, 118, 63, 200, + 81, 7, 47, 77, 30, 241, 4, 137, 72, 8, 91, 145, 131, 254, 195, 186, 45, 197, 110, 40, 219, + 97, 152, 169, 225, 55, 192, 230, 111, 60, 241, 161, 50, 177, 243, 51, 234, 106, 27, 142, 1, + 189, 193, 154, 186, 97, 14, 148, 58, 207, 191, 184, 68, 201, 243, 178, 192, 239, 119, 184, + 134, 119, 241, 66, 223, 13, 66, 95, 251, 229, 94, 98, 209, 155, 193, 245, 65, 137, 119, 28, + 21, 248, 157, 7, 38, 235, 152, 149, 26, 232, 137, 163, 244, 168, 119, 255, 98, 154, 244, + 184, 192, 128, 41, 22, 106, 88, 67, 221, 88, 250, 13, 146, 186, 159, 222, 62, 120, 192, + 209, 111, 237, 74, 128, 194, 125, 45, 48, 161, 135, 15, 178, 28, 138, 107, 200, 198, 196, + 70, 193, 133, 132, 133, 166, 177, 202, 99, 13, 217, 192, 252, 46, 121, 76, 53, 138, 194, + 89, 188, 116, 79, 121, 226, 110, 5, 152, 64, 100, 50, 210, 195, 168, 80, 1, 241, 155, 92, + 104, 113, 111, 242, 97, 185, 175, 105, 220, 50, 208, 175, 77, 147, 100, 181, 2, 190, 96, + 109, 124, 113, 30, 81, 143, 151, 17, 71, 202, 252, 10, 155, 190, 218, 183, 185, 200, 67, + 199, 111, 150, 251, 13, 147, 58, 133, 186, 141, 249, 64, 223, 148, 110, 28, 236, 49, 22, + 57, 17, 73, 146, 239, 146, 30, 234, 88, 167, 108, 235, 12, 88, 131, 33, 37, 202, 137, 81, + 186, 89, 6, 6, 72, 210, 191, 90, 124, 235, 98, 144, 97, 245, 49, 67, 169, 106, 190, 157, + 56, 227, 0, 196, 55, 177, 246, 5, 171, 178, 130, 214, 132, 191, 0, 218, 236, 129, 191, 155, + 129, 241, 33, 208, 156, 254, 132, 121, 163, 189, 231, 51, 180, 21, 190, 87, 29, 5, 102, + 185, 218, 136, 248, 113, 65, 209, 238, 177, 109, 248, 167, 189, 241, 181, 115, 24, 89, 95, + 67, 115, 211, 244, 218, 63, 79, 148, 168, 238, 47, 110, 82, 130, 96, 55, 120, 139, 149, + 120, 204, 85, 249, 232, 88, 139, 152, 111, 170, 143, 209, 129, 105, 87, 50, 12, 192, 50, + 227, 191, 29, 194, 102, 243, 193, 142, 62, 49, 248, 188, 92, 11, 60, 228, 133, 100, 209, 8, + 246, 85, 143, 169, 197, 81, 169, 72, 51, 102, 19, 210, 249, 185, 78, 200, 179, 42, 40, 111, + 94, 84, 221, 156, 90, 2, 216, 95, 53, 92, 217, 172, 192, 97, 130, 77, 87, 163, 65, 112, 45, + 250, 81, 131, 95, 4, 68, 136, 139, 240, 43, 141, 251, 195, 36, 31, 179, 197, 10, 76, 230, + 247, 154, 134, 215, 76, 134, 240, 248, 151, 186, 16, 253, 99, 129, 147, 47, 55, 53, 116, + 139, 65, 64, 44, 29, 168, 200, 169, 8, 118, 38, 100, 162, 124, 48, 46, 137, 246, 255, 39, + 156, 70, 228, 139, 6, 35, 67, 229, 235, 18, 114, 66, 206, 217, 236, 11, 86, 119, 106, 120, + 177, 150, 118, 52, 6, 204, 14, 4, 196, 40, 149, 120, 162, 11, 143, 168, 138, 248, 243, 62, + 123, 15, 78, 255, 23, 100, 206, 115, 143, 144, 114, 210, 165, 115, 161, 73, 211, 146, 104, + 185, 127, 166, 117, 50, 63, 25, 242, 101, 119, 219, 108, 208, 71, 204, 116, 138, 226, 175, + 178, 243, 52, 238, 124, 85, 161, 173, 70, 233, 141, 108, 161, 84, 14, 226, 235, 12, 28, 16, + 249, 79, 198, 120, 192, 204, 17, 138, 231, 159, 81, 25, 130, 216, 29, 246, 62, 212, 113, + 237, 67, 131, 145, 221, 189, 143, 137, 244, 17, 214, 165, 13, 205, 92, 172, 241, 8, 205, + 146, 131, 246, 97, 210, 91, 107, 167, 251, 32, 155, 208, 141, 74, 184, 228, 40, 237, 171, + 12, 29, 152, 31, 144, 168, 66, 216, 171, 187, 23, 118, 55, 159, 194, 137, 51, 119, 14, 95, + 153, 62, 186, 38, 69, 119, 144, 155, 50, 15, 54, 37, 88, 32, 225, 3, 105, 136, 254, 28, + 104, 210, 231, 16, 238, 109, 37, 117, 150, 0, 253, 121, 33, 14, 83, 178, 187, 112, 57, 96, + 197, 72, 0, 164, 107, 236, 45, 138, 56, 214, 34, 5, 141, 86, 227, 107, 107, 110, 121, 191, + 132, 151, 203, 91, 158, 82, 180, 65, 249, 109, 94, 169, 45, 25, 99, 86, 87, 79, 140, 44, + 141, 40, 166, 128, 206, 146, 244, 31, 200, 192, 125, 35, 161, 202, 241, 225, 82, 44, 114, + 189, 42, 135, 50, 99, 166, 200, 107, 92, 178, 232, 4, 88, 33, 27, 122, 192, 174, 138, 147, + 154, 54, 111, 75, 46, 159, 73, 34, 154, 255, 102, 16, 123, 161, 222, 172, 154, 208, 206, + 23, 69, 76, 84, 152, 153, 178, 151, 12, 41, 104, 98, 214, 142, 202, 114, 183, 30, 242, 8, + 237, 182, 133, 229, 57, 209, 32, 164, 139, 247, 137, 6, 42, 127, 100, 170, 141, 113, 52, + 13, 52, 137, 66, 237, 43, 61, 149, 191, 124, 73, 232, 14, 109, 168, 151, 164, 237, 141, 6, + 228, 197, 168, 143, 4, 171, 113, 28, 46, 228, 101, 111, 198, 205, 32, 110, 35, 49, 127, 95, + 207, 11, 155, 221, 27, 85, 188, 14, 127, 211, 19, 208, 139, 67, 182, 153, 229, 29, 214, + 156, 191, 29, 10, 66, 187, 194, 168, 194, 246, 234, 118, 170, 194, 82, 179, 242, 24, 58, + 142, 24, 232, 103, 109, 114, 210, 10, 237, 8, 201, 66, 125, 245, 52, 129, 60, 10, 211, 199, + 73, 152, 116, 175, 216, 111, 213, 141, 162, 172, 78, 204, 92, 182, 12, 158, 165, 129, 98, + 54, 130, 236, 12, 47, 203, 63, 133, 20, 225, 128, 178, 150, 84, 94, 228, 236, 54, 44, 68, + 65, 204, 84, 216, 7, 253, 22, 254, 225, 203, 108, 200, 59, 166, 91, 70, 119, 185, 166, 175, + 76, 129, 175, 203, 58, 205, 39, 126, 15, 239, 104, 20, 95, 127, 161, 196, 28, 88, 51, 11, + 232, 44, 119, 45, 25, 32, 240, 237, 147, 222, 74, 129, 161, 39, 7, 235, 65, 113, 108, 75, + 194, 117, 54, 46, 48, 252, 23, 145, 253, 146, 26, 193, 71, 31, 93, 190, 129, 74, 30, 64, + 22, 204, 73, 25, 56, 130, 29, 202, 84, 67, 84, 150, 164, 154, 229, 67, 49, 125, 31, 114, + 44, 74, 83, 199, 26, 129, 218, 21, 91, 180, 169, 142, 104, 114, 150, 34, 97, 39, 153, 115, + 240, 90, 71, 179, 198, 137, 15, 66, 82, 243, 142, 188, 61, 73, 39, 162, 29, 127, 183, 2, + 204, 3, 194, 51, 47, 29, 7, 155, 6, 98, 225, 25, 98, 224, 30, 47, 152, 184, 23, 80, 115, + 202, 48, 161, 23, 137, 32, 230, 210, 45, 42, 173, 117, 116, 181, 11, 250, 108, 100, 129, + 45, 98, 77, 208, 215, 113, 103, 107, 140, 17, 213, 253, 202, 22, 158, 153, 224, 216, 53, + 131, 125, 60, 138, 229, 199, 236, 168, 121, 174, 126, 204, 113, 113, 66, 147, 2, 58, 143, + 50, 120, 123, 27, 55, 132, 118, 255, 152, 128, 137, 97, 145, 144, 164, 102, 71, 95, 59, 57, + 109, 99, 172, 77, 159, 69, 81, 107, 38, 58, 81, 173, 30, 17, 123, 58, 208, 159, 212, 204, + 195, 146, 48, 129, 223, 62, 74, 34, 161, 188, 185, 150, 124, 2, 175, 205, 241, 202, 161, 7, + 50, 19, 78, 41, 248, 70, 9, 211, 53, 242, 147, 222, 158, 230, 133, 149, 90, 83, 177, 133, + 78, 189, 246, 129, 242, 39, 93, 171, 33, 26, 131, 211, 140, 34, 230, 172, 138, 61, 193, + 246, 253, 205, 210, 92, 248, 181, 219, 3, 47, 200, 24, 91, 124, 122, 15, 213, 192, 115, + 177, 196, 142, 141, 121, 98, 28, 202, 32, 156, 46, 6, 34, 21, 164, 212, 186, 205, 85, 161, + 61, 176, 237, 250, 33, 184, 250, 248, 154, 133, 16, 186, 207, 2, 199, 145, 74, 196, 234, + 201, 66, 76, 221, 85, 66, 241, 112, 97, 15, 52, 249, 211, 90, 132, 14, 74, 167, 81, 28, + 184, 221, 51, 158, 96, 161, 144, 80, 7, 154, 174, 61, 234, 66, 93, 40, 229, 187, 186, 239, + 121, 90, 137, 103, 229, 48, 116, 19, 212, 231, 195, 114, 57, 28, 234, 149, 211, 73, 204, + 202, 52, 71, 112, 132, 143, 103, 10, 83, 205, 185, 251, 62, 174, 243, 32, 121, 28, 252, 92, + 167, 231, 119, 110, 129, 14, 233, 37, 69, 120, 15, 90, 41, 124, 169, 196, 161, 137, 0, 217, + 44, 15, 93, 212, 239, 58, 181, 20, 176, 159, 162, 74, 229, 41, 99, 14, 145, 65, 31, 224, + 179, 30, 132, 226, 71, 18, 132, 48, 78, 33, 247, 86, 205, 28, 197, 175, 204, 126, 44, 140, + 66, 13, 112, 183, 84, 3, 143, 214, 80, 133, 126, 145, 143, 34, 77, 23, 146, 177, 165, 151, + 70, 240, 223, 116, 115, 25, 212, 248, 27, 113, 47, 200, 90, 132, 56, 26, 94, 0, 76, 138, + 174, 112, 47, 54, 107, 193, 149, 73, 53, 142, 218, 91, 57, 238, 46, 178, 154, 71, 19, 189, + 166, 196, 94, 170, 218, 133, 118, 205, 225, 108, 153, 24, 237, 27, 111, 37, 90, 14, 100, + 15, 229, 18, 128, 213, 4, 155, 143, 166, 39, 142, 206, 78, 189, 91, 166, 159, 129, 30, 7, + 139, 72, 227, 164, 164, 161, 100, 94, 162, 55, 49, 248, 189, 179, 206, 146, 255, 91, 179, + 82, 6, 138, 24, 141, 171, 146, 217, 50, 135, 32, 203, 177, 27, 91, 24, 204, 252, 137, 121, + 90, 226, 43, 35, 226, 246, 38, 72, 212, 172, 188, 173, 69, 107, 148, 163, 88, 125, 144, + 127, 44, 41, 190, 71, 0, 88, 70, 175, 189, 233, 55, 206, 220, 185, 192, 197, 165, 193, 87, + 146, 249, 94, 72, 59, 57, 43, 52, 203, 194, 56, 221, 21, 130, 183, 78, 75, 154, 67, 18, 74, + 151, 201, 38, 202, 108, 61, 8, 29, 98, 62, 172, 77, 155, 227, 189, 10, 116, 179, 50, 165, + 152, 74, 209, 96, 200, 230, 180, 188, 191, 165, 245, 108, 36, 26, 37, 17, 167, 125, 120, + 111, 236, 211, 66, 183, 1, 251, 79, 34, 26, 121, 28, 136, 33, 149, 17, 174, 43, 206, 137, + 77, 10, 173, 243, 111, 73, 106, 241, 252, 70, 43, 147, 120, 40, 229, 121, 201, 69, 195, 63, + 43, 87, 45, 203, 174, 214, 181, 35, 46, 226, 47, 44, 56, 123, 56, 73, 166, 252, 144, 150, + 100, 73, 156, 177, 225, 105, 161, 41, 226, 162, 167, 132, 45, 62, 211, 104, 16, 89, 173, + 255, 70, 194, 254, 186, 130, 50, 135, 121, 197, 147, 84, 97, 49, 107, 26, 164, 189, 151, + 234, 39, 72, 117, 21, 92, 43, 119, 110, 45, 209, 172, 192, 131, 45, 2, 99, 179, 131, 248, + 69, 144, 223, 23, 151, 0, 196, 64, 161, 172, 49, 56, 114, 108, 68, 134, 160, 209, 154, 30, + 240, 206, 156, 140, 80, 70, 34, 218, 43, 187, 80, 209, 26, 166, 69, 4, 240, 135, 148, 68, + 64, 170, 244, 134, 32, 69, 87, 50, 135, 112, 91, 71, 0, 134, 245, 80, 30, 27, 12, 234, 212, + 223, 168, 27, 78, 19, 126, 77, 138, 121, 75, 51, 47, 44, 5, 253, 227, 88, 41, 226, 189, + 183, 124, 184, 68, 210, 6, 171, 77, 78, 221, 154, 78, 229, 88, 147, 147, 21, 119, 13, 251, + 71, 89, 237, 224, 152, 21, 88, 234, 234, 130, 74, 119, 115, 27, 8, 139, 155, 243, 195, 29, + 196, 213, 5, 161, 117, 41, 246, 92, 204, 143, 186, 229, 59, 18, 76, 171, 224, 152, 224, 82, + 193, 115, 208, 9, 84, 177, 130, 178, 122, 251, 216, 39, 253, 73, 199, 154, 88, 236, 215, + 219, 154, 195, 11, 235, 165, 217, 212, 248, 65, 43, 0, 112, 35, 39, 18, 238, 212, 151, 187, + 53, 30, 235, 202, 217, 163, 177, 80, 70, 97, 90, 98, 174, 5, 163, 246, 219, 124, 216, 253, + 43, 114, 108, 69, 2, 242, 77, 85, 51, 91, 7, 226, 100, 238, 213, 183, 55, 47, 222, 65, 222, + 75, 49, 102, 29, 20, 10, 103, 232, 107, 0, 27, 247, 112, 177, 121, 185, 250, 215, 18, 78, + 186, 87, 112, 147, 225, 75, 236, 241, 197, 161, 123, 146, 132, 217, 240, 98, 201, 17, 53, + 82, 60, 237, 232, 92, 177, 174, 1, 51, 12, 54, 226, 215, 58, 61, 186, 15, 135, 103, 195, + 176, 10, 74, 110, 97, 106, 100, 112, 153, 16, 115, 248, 136, 36, 43, 245, 26, 211, 246, + 143, 10, 112, 14, 176, 86, 8, 62, 93, 100, 135, 160, 125, 244, 49, 17, 192, 63, 42, 110, + 36, 61, 93, 14, 16, 41, 98, 232, 84, 222, 245, 155, 24, 210, 135, 16, 84, 204, 214, 83, 67, + 255, 217, 117, 85, 99, 107, 3, 247, 112, 24, 143, 221, 99, 49, 222, 12, 113, 185, 146, 231, + 24, 10, 163, 80, 92, 208, 40, 35, 42, 4, 179, 164, 133, 228, 238, 37, 194, 173, 2, 132, + 221, 127, 26, 108, 192, 48, 29, 100, 252, 150, 41, 204, 9, 103, 1, 14, 158, 249, 155, 88, + 236, 122, 184, 21, 190, 12, 42, 239, 63, 211, 105, 218, 77, 199, 63, 86, 126, 40, 209, 229, + 23, 173, 73, 180, 54, 17, 136, 94, 9, 80, 145, 207, 18, 196, 81, 163, 232, 195, 15, 97, + 189, 3, 77, 171, 13, 209, 115, 43, 111, 139, 101, 26, 181, 212, 49, 27, 98, 60, 243, 94, + 127, 50, 134, 131, 80, 13, 83, 89, 166, 11, 230, 42, 243, 219, 131, 143, 64, 179, 1, 44, + 206, 1, 25, 71, 47, 102, 1, 174, 91, 96, 145, 66, 205, 155, 46, 175, 197, 35, 134, 19, 142, + 96, 224, 80, 249, 187, 203, 133, 189, 178, 243, 148, 190, 210, 96, 235, 209, 39, 47, 228, + 34, 95, 211, 22, 212, 179, 144, 17, 7, 167, 105, 181, 197, 163, 21, 92, 227, 106, 99, 97, + 69, 82, 75, 81, 252, 134, 112, 88, 12, 199, 223, 255, 127, 120, 203, 252, 45, 155, 20, 111, + 208, 148, 38, 62, 26, 44, 198, 171, 101, 105, 55, 59, 34, 149, 250, 144, 241, 173, 52, 152, + 11, 206, 218, 250, 31, 106, 194, 66, 21, 172, 43, 23, 96, 152, 74, 41, 111, 46, 57, 67, 46, + 115, 157, 120, 32, 244, 181, 120, 138, 172, 192, 72, 145, 198, 4, 153, 118, 250, 158, 115, + 197, 143, 215, 34, 193, 194, 184, 38, 232, 194, 11, 238, 179, 12, 171, 247, 147, 122, 216, + 224, 15, 240, 70, 64, 139, 209, 176, 39, 37, 185, 63, 97, 103, 177, 114, 120, 230, 242, 13, + 34, 185, 145, 67, 129, 107, 21, 250, 38, 222, 4, 102, 72, 61, 149, 25, 82, 132, 101, 245, + 189, 223, 10, 48, 15, 181, 35, 98, 9, 193, 88, 8, 9, 178, 105, 94, 34, 146, 236, 134, 168, + 46, 201, 101, 93, 30, 11, 6, 30, 76, 87, 11, 46, 168, 244, 47, 150, 194, 127, 252, 157, + 124, 161, 197, 169, 21, 17, 224, 26, 55, 22, 112, 96, 0, 46, 220, 143, 129, 166, 108, 155, + 239, 158, 186, 125, 240, 44, 54, 141, 156, 129, 184, 216, 180, 189, 158, 14, 197, 64, 97, + 152, 154, 39, 159, 106, 74, 166, 252, 70, 203, 206, 30, 218, 69, 26, 139, 113, 26, 68, 177, + 99, 237, 180, 12, 66, 108, 95, 8, 136, 80, 81, 206, 131, 94, 203, 20, 229, 54, 14, 70, 50, + 176, 87, 128, 171, 187, 37, 118, 77, 147, 114, 186, 163, 3, 198, 64, 85, 226, 183, 5, 182, + 14, 213, 237, 27, 89, 154, 51, 218, 52, 253, 88, 56, 106, 3, 52, 10, 165, 125, 60, 174, + 246, 189, 50, 27, 253, 116, 62, 247, 131, 198, 18, 119, 202, 10, 60, 233, 29, 6, 46, 39, + 204, 152, 116, 182, 243, 99, 52, 230, 84, 207, 220, 58, 27, 222, 198, 85, 142, 186, 46, + 182, 163, 65, 94, 109, 13, 50, 127, 209, 124, 50, 2, 55, 128, 139, 183, 23, 15, 168, 159, + 36, 109, 138, 41, 12, 175, 132, 224, 65, 70, 52, 80, 2, 179, 46, 220, 218, 110, 187, 70, + 230, 200, 212, 235, 95, 35, 160, 155, 161, 34, 168, 200, 173, 13, 3, 229, 241, 25, 186, + 115, 182, 157, 24, 88, 100, 80, 207, 59, 128, 100, 106, 141, 146, 170, 121, 110, 112, 25, + 5, 26, 101, 107, 30, 17, 72, 177, 162, 164, 54, 100, 210, 196, 191, 71, 5, 236, 4, 55, 242, + 132, 64, 101, 37, 124, 222, 15, 39, 229, 111, 76, 210, 171, 236, 7, 93, 14, 216, 248, 45, + 79, 218, 85, 211, 199, 250, 104, 33, 143, 222, 15, 88, 238, 164, 172, 135, 2, 101, 11, 255, + 112, 8, 238, 139, 63, 190, 149, 130, 215, 130, 110, 68, 83, 56, 239, 253, 120, 96, 196, 38, + 198, 153, 137, 110, 157, 87, 94, 69, 70, 176, 197, 220, 237, 73, 176, 217, 112, 186, 20, + 16, 151, 251, 244, 204, 120, 42, 216, 194, 250, 127, 191, 45, 87, 138, 204, 145, 110, 119, + 217, 185, 242, 50, 47, 102, 253, 107, 242, 121, 122, 204, 10, 232, 48, 214, 183, 190, 152, + 249, 7, 228, 139, 181, 252, 84, 123, 199, 146, 175, 30, 152, 54, 4, 162, 26, 19, 103, 112, + 216, 130, 148, 218, 40, 215, 53, 114, 87, 174, 194, 30, 148, 70, 183, 213, 103, 2, 24, 161, + 173, 212, 50, 52, 10, 46, 252, 205, 46, 192, 187, 105, 16, 187, 193, 255, 182, 92, 222, + 209, 193, 118, 129, 137, 218, 80, 242, 213, 89, 122, 224, 65, 158, 65, 104, 40, 15, 135, + 86, 159, 63, 139, 29, 195, 230, 20, 180, 82, 142, 98, 48, 228, 204, 22, 199, 47, 96, 30, + 125, 192, 222, 237, 243, 115, 222, 31, 182, 150, 172, 42, 17, 232, 108, 142, 241, 119, 143, + 215, 117, 221, 37, 188, 136, 68, 166, 53, 120, 198, 16, 59, 74, 184, 175, 120, 42, 72, 102, + 137, 171, 56, 141, 189, 173, 89, 128, 12, 114, 241, 189, 111, 136, 73, 132, 123, 93, 249, + 199, 130, 191, 251, 1, 251, 208, 58, 108, 177, 41, 72, 20, 202, 120, 187, 38, 8, 163, 75, + 63, 139, 91, 159, 26, 233, 162, 204, 152, 80, 228, 52, 96, 166, 8, 250, 206, 115, 97, 28, + 220, 48, 59, 19, 66, 12, 137, 203, 7, 95, 23, 107, 3, 185, 110, 57, 39, 117, 13, 68, 70, + 54, 238, 106, 230, 167, 242, 216, 208, 42, 154, 110, 41, 188, 51, 156, 153, 0, 199, 221, + 58, 21, 57, 68, 24, 114, 67, 93, 90, 0, 252, 188, 145, 7, 24, 55, 189, 176, 50, 106, 154, + 57, 17, 95, 223, 93, 17, 125, 163, 80, 18, 22, 229, 212, 90, 138, 222, 86, 8, 125, 63, 229, + 145, 190, 200, 10, 39, 176, 9, 182, 211, 236, 122, 29, 134, 1, 186, 231, 227, 215, 194, 42, + 240, 7, 196, 0, 179, 211, 150, 193, 21, 137, 84, 32, 92, 218, 185, 7, 200, 34, 179, 138, + 244, 114, 167, 159, 23, 51, 124, 60, 253, 124, 75, 8, 65, 123, 59, 88, 219, 250, 242, 250, + 112, 27, 222, 143, 17, 107, 48, 44, 172, 13, 223, 78, 218, 107, 93, 235, 5, 208, 23, 179, + 176, 38, 213, 101, 154, 64, 132, 134, 83, 63, 77, 229, 126, 65, 142, 245, 167, 180, 96, 51, + 24, 148, 105, 243, 63, 186, 163, 124, 34, 50, 179, 29, 215, 142, 5, 239, 54, 7, 196, 36, + 81, 188, 247, 192, 3, 244, 124, 253, 168, 19, 233, 109, 231, 192, 175, 145, 117, 76, 63, + 70, 62, 40, 182, 78, 254, 130, 186, 51, 198, 159, 107, 19, 156, 134, 103, 50, 90, 103, 74, + 56, 89, 219, 199, 9, 95, 82, 84, 194, 128, 42, 20, 199, 148, 228, 7, 118, 119, 197, 45, + 128, 9, 170, 208, 53, 183, 193, 219, 5, 217, 212, 134, 223, 36, 133, 92, 186, 235, 96, 221, + 117, 0, 193, 177, 162, 117, 175, 70, 109, 31, 153, 192, 154, 149, 154, 14, 97, 150, 111, + 227, 28, 62, 28, 87, 9, 143, 179, 167, 61, 34, 56, 49, 214, 103, 226, 6, 252, 32, 41, 67, + 251, 82, 211, 79, 59, 118, 84, 13, 186, 17, 210, 208, 173, 195, 188, 134, 107, 74, 31, 134, + 203, 90, 191, 112, 36, 15, 24, 25, 235, 139, 143, 62, 206, 200, 184, 140, 76, 38, 33, 145, + 111, 208, 150, 137, 130, 192, 60, 198, 200, 136, 65, 8, 120, 228, 137, 230, 157, 86, 120, + 28, 249, 236, 246, 108, 236, 190, 181, 14, 130, 11, 117, 191, 234, 124, 3, 114, 238, 202, + 204, 12, 64, 157, 68, 242, 69, 222, 71, 38, 20, 167, 248, 156, 18, 118, 68, 41, 139, 228, + 235, 148, 91, 226, 42, 148, 22, 166, 232, 244, 29, 159, 154, 36, 142, 96, 176, 49, 183, + 130, 138, 26, 37, 220, 156, 202, 183, 34, 131, 245, 174, 136, 48, 154, 230, 37, 118, 121, + 68, 231, 79, 63, 221, 46, 137, 160, 179, 87, 17, 218, 214, 63, 102, 102, 99, 169, 198, 113, + 33, 76, 13, 61, 36, 89, 148, 49, 134, 239, 253, 12, 242, 28, 5, 21, 143, 242, 106, 146, + 125, 164, 137, 207, 95, 235, 57, 211, 192, 118, 224, 242, 249, 16, 83, 56, 41, 181, 124, + 108, 16, 28, 185, 71, 171, 15, 164, 118, 198, 66, 198, 194, 107, 83, 84, 163, 186, 219, 43, + 221, 9, 3, 20, 103, 40, 59, 249, 24, 216, 106, 16, 99, 168, 109, 128, 169, 57, 46, 158, 81, + 167, 201, 255, 62, 103, 14, 62, 238, 99, 254, 210, 106, 23, 120, 215, 2, 3, 73, 251, 237, + 135, 128, 128, 206, 101, 77, 245, 27, 70, 53, 196, 81, 223, 13, 0, 204, 10, 222, 12, 164, + 202, 100, 79, 25, 249, 121, 215, 148, 225, 91, 127, 75, 75, 163, 110, 205, 5, 204, 177, + 254, 134, 245, 217, 178, 64, 239, 106, 55, 124, 16, 80, 124, 16, 81, 169, 61, 200, 221, 24, + 221, 18, 233, 230, 164, 121, 212, 126, 79, 2, 216, 164, 249, 41, 255, 39, 98, 231, 12, 123, + 13, 116, 220, 172, 166, 89, 112, 113, 250, 61, 176, 153, 84, 225, 187, 162, 79, 65, 106, + 221, 173, 209, 157, 70, 33, 75, 158, 110, 152, 39, 200, 89, 91, 89, 253, 126, 84, 27, 158, + 181, 101, 249, 32, 129, 198, 38, 144, 92, 64, 112, 208, 113, 60, 212, 72, 138, 49, 82, 95, + 183, 174, 225, 77, 53, 121, 211, 223, 131, 161, 148, 220, 88, 131, 151, 69, 208, 240, 49, + 163, 151, 121, 50, 79, 129, 137, 98, 57, 18, 33, 202, 150, 120, 255, 99, 34, 54, 85, 204, + 172, 120, 77, 42, 123, 242, 129, 92, 196, 78, 101, 98, 25, 146, 73, 186, 132, 227, 246, 96, + 236, 208, 63, 50, 90, 183, 134, 219, 143, 100, 54, 175, 20, 172, 107, 171, 169, 199, 50, + 110, 252, 135, 93, 169, 144, 100, 236, 91, 122, 111, 40, 196, 143, 73, 177, 56, 50, 27, 21, + 249, 185, 212, 182, 40, 134, 197, 187, 55, 165, 125, 48, 132, 76, 242, 184, 173, 177, 111, + 150, 67, 58, 189, 122, 152, 228, 51, 241, 130, 42, 113, 112, 62, 70, 131, 4, 23, 177, 114, + 212, 125, 157, 28, 8, 238, 223, 158, 211, 172, 145, 188, 65, 125, 62, 45, 220, 124, 226, + 100, 34, 36, 153, 93, 76, 157, 89, 219, 60, 42, 88, 68, 158, 155, 181, 124, 242, 72, 167, + 74, 56, 176, 56, 114, 203, 54, 170, 85, 155, 235, 79, 29, 201, 71, 124, 181, 127, 180, 139, + 147, 47, 167, 26, 18, 67, 141, 150, 37, 197, 6, 170, 96, 68, 89, 209, 246, 182, 183, 53, + 68, 239, 126, 147, 50, 210, 34, 248, 246, 109, 34, 41, 213, 229, 134, 154, 97, 241, 132, + 17, 162, 180, 74, 151, 253, 150, 69, 251, 151, 63, 192, 219, 79, 253, 212, 137, 234, 196, + 133, 215, 211, 147, 128, 54, 85, 93, 202, 30, 168, 167, 128, 125, 150, 188, 250, 23, 195, + 249, 52, 179, 26, 207, 129, 227, 68, 78, 56, 2, 17, 86, 220, 89, 99, 19, 20, 173, 57, 203, + 140, 66, 231, 27, 69, 214, 238, 201, 134, 89, 102, 254, 85, 249, 132, 115, 51, 39, 65, 184, + 199, 187, 14, 44, 6, 217, 16, 227, 116, 185, 66, 201, 188, 95, 66, 178, 243, 103, 79, 144, + 147, 139, 59, 123, 238, 28, 18, 202, 166, 75, 59, 246, 190, 150, 75, 73, 182, 251, 132, 15, + 248, 96, 112, 253, 41, 3, 203, 44, 141, 147, 255, 69, 85, 78, 129, 60, 145, 81, 166, 224, + 131, 132, 215, 210, 169, 5, 136, 178, 23, 30, 139, 153, 106, 40, 54, 139, 244, 214, 43, + 132, 229, 221, 58, 81, 81, 57, 190, 168, 255, 114, 63, 122, 173, 6, 136, 110, 159, 188, 50, + 188, 91, 56, 185, 99, 107, 139, 120, 176, 228, 142, 146, 209, 96, 27, 133, 77, 69, 254, + 114, 99, 194, 195, 4, 80, 36, 220, 147, 252, 207, 142, 66, 65, 64, 226, 63, 85, 218, 66, + 125, 108, 197, 49, 143, 59, 43, 182, 0, 253, 169, 234, 209, 57, 90, 190, 234, 14, 25, 237, + 62, 155, 188, 246, 92, 78, 241, 21, 154, 242, 201, 59, 16, 147, 73, 252, 52, 13, 175, 51, + 158, 248, 118, 159, 4, 186, 101, 250, 170, 10, 16, 135, 251, 224, 114, 97, 61, 25, 102, 23, + 74, 58, 118, 158, 32, 74, 115, 104, 240, 131, 42, 187, 155, 134, 209, 171, 225, 85, 13, 71, + 252, 156, 81, 126, 72, 110, 45, 207, 84, 113, 187, 61, 152, 107, 111, 80, 105, 35, 104, 81, + 245, 68, 28, 51, 67, 212, 172, 99, 191, 248, 217, 91, 186, 232, 145, 201, 135, 56, 164, 52, + 127, 137, 111, 186, 6, 164, 236, 112, 31, 232, 75, 224, 58, 209, 31, 205, 204, 46, 180, + 127, 215, 44, 45, 13, 147, 165, 245, 241, 162, 178, 249, 226, 241, 113, 104, 80, 175, 59, + 1, 43, 12, 25, 255, 146, 70, 208, 140, 140, 225, 122, 125, 181, 239, 73, 89, 5, 107, 9, + 241, 8, 16, 223, 203, 176, 57, 33, 4, 115, 216, 80, 155, 249, 103, 241, 188, 44, 216, 37, + 115, 171, 104, 154, 145, 134, 183, 58, 141, 141, 178, 123, 166, 222, 67, 68, 154, 167, 52, + 235, 31, 112, 206, 223, 53, 6, 183, 5, 146, 93, 226, 30, 73, 237, 197, 140, 19, 34, 104, + 116, 213, 152, 6, 75, 215, 218, 214, 66, 101, 19, 72, 244, 9, 81, 57, 43, 202, 165, 46, 36, + 100, 31, 15, 218, 44, 181, 120, 172, 25, 99, 242, 188, 3, 83, 202, 190, 27, 251, 130, 200, + 101, 175, 176, 105, 39, 227, 210, 240, 121, 137, 42, 27, 128, 5, 30, 234, 109, 232, 77, 59, + 100, 180, 107, 5, 215, 28, 55, 228, 186, 224, 16, 156, 182, 238, 181, 59, 124, 105, 117, + 32, 66, 83, 110, 109, 112, 68, 95, 77, 121, 139, 94, 176, 190, 6, 58, 109, 213, 214, 24, + 253, 72, 61, 86, 209, 20, 88, 36, 146, 179, 79, 168, 66, 209, 168, 48, 253, 177, 239, 139, + 166, 213, 72, 242, 160, 36, 254, 135, 57, 207, 55, 198, 205, 150, 198, 54, 140, 247, 242, + 52, 106, 132, 71, 67, 233, 29, 181, 247, 189, 36, 48, 232, 111, 130, 149, 234, 243, 241, + 165, 66, 221, 64, 173, 207, 0, 69, 201, 111, 209, 157, 16, 56, 217, 29, 157, 106, 232, 206, + 148, 110, 71, 113, 29, 228, 195, 57, 249, 181, 71, 203, 211, 131, 64, 92, 141, 89, 58, 210, + 70, 246, 227, 146, 137, 63, 41, 250, 187, 56, 160, 17, 174, 45, 28, 245, 204, 63, 18, 174, + 162, 53, 208, 121, 187, 1, 121, 46, 50, 245, 207, 140, 5, 186, 237, 72, 158, 163, 44, 143, + 197, 127, 34, 160, 41, 242, 108, 220, 123, 138, 205, 164, 23, 164, 18, 117, 123, 153, 22, + 150, 73, 141, 43, 176, 88, 251, 198, 143, 93, 33, 20, 40, 143, 84, 197, 67, 255, 148, 57, + 146, 198, 114, 33, 71, 30, 82, 192, 170, 52, 234, 239, 80, 64, 40, 102, 84, 166, 173, 142, + 183, 193, 94, 230, 23, 141, 252, 243, 157, 35, 237, 118, 118, 42, 74, 1, 54, 87, 226, 144, + 53, 58, 204, 32, 19, 74, 57, 167, 120, 21, 185, 51, 252, 176, 109, 2, 92, 125, 240, 132, + 20, 197, 113, 129, 104, 179, 18, 64, 47, 115, 96, 209, 210, 76, 191, 39, 127, 230, 135, + 242, 56, 42, 214, 109, 188, 170, 192, 204, 9, 204, 190, 58, 162, 143, 115, 247, 60, 155, + 37, 146, 119, 159, 64, 247, 242, 239, 189, 181, 85, 25, 145, 123, 81, 123, 205, 83, 33, + 144, 45, 196, 64, 114, 13, 18, 247, 116, 14, 202, 246, 213, 177, 225, 152, 217, 119, 24, + 39, 253, 158, 222, 242, 242, 175, 77, 241, 126, 29, 93, 45, 170, 185, 71, 131, 214, 114, + 43, 181, 150, 252, 39, 6, 87, 37, 134, 229, 17, 8, 15, 186, 181, 205, 161, 20, 235, 0, 12, + 137, 197, 184, 41, 11, 222, 237, 139, 70, 19, 213, 30, 122, 10, 226, 173, 98, 29, 245, 52, + 215, 124, 12, 134, 182, 57, 22, 177, 59, 226, 249, 248, 41, 180, 183, 47, 165, 116, 170, + 152, 20, 74, 194, 122, 200, 136, 75, 176, 189, 240, 211, 88, 98, 205, 34, 105, 216, 138, + 137, 72, 56, 215, 175, 140, 70, 137, 91, 54, 133, 221, 38, 226, 239, 139, 212, 80, 177, + 248, 110, 195, 61, 196, 255, 7, 156, 229, 136, 198, 72, 3, 234, 106, 16, 83, 14, 210, 94, + 51, 205, 43, 23, 224, 60, 41, 139, 192, 59, 25, 111, 190, 82, 39, 54, 189, 75, 126, 229, + 83, 220, 32, 148, 43, 104, 248, 214, 206, 224, 215, 16, 60, 193, 15, 56, 193, 32, 170, 160, + 23, 190, 102, 110, 19, 17, 72, 35, 17, 208, 211, 97, 113, 200, 24, 54, 117, 191, 45, 89, + 163, 74, 198, 191, 150, 95, 253, 192, 22, 129, 130, 32, 152, 172, 103, 119, 229, 128, 56, + 107, 92, 200, 44, 214, 191, 125, 210, 215, 82, 117, 251, 113, 109, 109, 226, 6, 169, 109, + 74, 23, 249, 245, 34, 66, 19, 101, 185, 162, 211, 82, 34, 142, 17, 229, 51, 118, 89, 199, + 214, 28, 129, 55, 42, 45, 223, 195, 53, 145, 14, 230, 108, 57, 10, 38, 30, 54, 47, 103, + 228, 177, 43, 124, 215, 126, 179, 126, 224, 136, 52, 213, 200, 239, 248, 56, 200, 140, 211, + 100, 66, 168, 7, 73, 198, 24, 116, 195, 168, 244, 221, 29, 96, 70, 60, 223, 74, 211, 190, + 250, 31, 253, 102, 252, 5, 79, 214, 195, 57, 178, 97, 227, 185, 105, 107, 5, 36, 58, 5, 55, + 206, 148, 62, 127, 7, 185, 106, 245, 3, 39, 244, 34, 213, 127, 5, 102, 234, 34, 21, 55, + 212, 6, 181, 98, 25, 92, 178, 172, 22, 221, 208, 146, 54, 119, 64, 138, 248, 54, 134, 7, + 72, 247, 6, 228, 125, 93, 153, 23, 141, 207, 95, 56, 117, 215, 95, 159, 159, 214, 117, 97, + 43, 109, 88, 50, 163, 148, 70, 44, 108, 208, 165, 89, 202, 104, 76, 241, 196, 12, 40, 102, + 154, 234, 158, 197, 135, 31, 129, 203, 135, 181, 145, 20, 117, 153, 231, 212, 228, 118, + 193, 211, 167, 59, 198, 170, 28, 74, 11, 164, 211, 1, 189, 68, 202, 163, 110, 158, 216, + 123, 31, 16, 30, 31, 111, 81, 108, 235, 56, 98, 114, 76, 29, 142, 234, 159, 214, 18, 38, + 226, 183, 37, 36, 15, 56, 53, 141, 161, 85, 198, 76, 246, 223, 82, 137, 2, 27, 186, 111, + 208, 68, 146, 181, 205, 85, 101, 70, 19, 33, 38, 254, 177, 231, 88, 176, 43, 130, 244, 94, + 228, 81, 193, 13, 224, 8, 45, 35, 185, 9, 103, 112, 133, 124, 107, 172, 66, 48, 64, 33, 84, + 14, 127, 69, 54, 46, 68, 14, 225, 255, 48, 107, 32, 75, 160, 60, 67, 158, 149, 12, 243, + 130, 168, 250, 146, 60, 29, 121, 117, 186, 93, 171, 26, 187, 23, 130, 200, 105, 169, 14, + 241, 206, 210, 146, 59, 79, 197, 56, 14, 119, 202, 166, 82, 77, 90, 96, 193, 7, 176, 83, + 166, 12, 5, 99, 4, 173, 44, 162, 241, 216, 251, 218, 158, 2, 222, 159, 93, 5, 189, 82, 143, + 147, 50, 220, 207, 150, 229, 93, 47, 39, 242, 159, 236, 141, 31, 209, 237, 21, 156, 8, 199, + 201, 62, 55, 18, 132, 140, 65, 177, 159, 152, 137, 179, 4, 196, 88, 135, 212, 251, 156, + 138, 108, 108, 84, 178, 243, 153, 151, 175, 177, 23, 163, 44, 180, 29, 98, 159, 125, 91, + 169, 25, 131, 30, 65, 139, 56, 16, 177, 161, 230, 244, 121, 38, 207, 45, 64, 0, 235, 250, + 149, 220, 128, 161, 118, 12, 148, 229, 107, 33, 108, 158, 197, 103, 236, 220, 167, 93, 108, + 227, 34, 17, 54, 31, 131, 135, 182, 166, 130, 136, 244, 128, 130, 64, 45, 59, 203, 121, + 207, 228, 46, 75, 96, 55, 66, 87, 173, 145, 207, 106, 111, 97, 224, 103, 86, 128, 36, 37, + 105, 165, 77, 69, 13, 29, 220, 163, 123, 49, 209, 38, 212, 238, 25, 4, 232, 186, 232, 133, + 3, 163, 79, 139, 249, 111, 69, 0, 187, 128, 212, 60, 242, 147, 148, 223, 17, 73, 194, 244, + 125, 146, 191, 38, 15, 247, 52, 4, 91, 229, 37, 22, 64, 135, 77, 181, 241, 149, 80, 196, + 124, 21, 200, 117, 76, 105, 121, 30, 188, 3, 65, 161, 205, 178, 242, 67, 111, 175, 58, 9, + 95, 46, 201, 105, 23, 224, 249, 98, 100, 205, 255, 23, 50, 131, 30, 216, 87, 102, 59, 91, + 156, 114, 45, 237, 187, 117, 192, 231, 51, 65, 183, 127, 231, 90, 69, 131, 6, 141, 83, 217, + 17, 41, 150, 182, 89, 2, 191, 56, 69, 42, 120, 196, 10, 177, 255, 154, 238, 104, 210, 58, + 33, 13, 163, 136, 178, 244, 61, 163, 206, 79, 81, 212, 50, 23, 56, 28, 182, 165, 29, 84, + 29, 173, 150, 194, 201, 221, 239, 145, 151, 60, 119, 233, 183, 171, 240, 201, 10, 91, 26, + 12, 55, 126, 52, 113, 9, 105, 150, 111, 58, 76, 222, 35, 54, 5, 36, 51, 229, 13, 222, 167, + 248, 130, 166, 6, 108, 78, 44, 228, 219, 155, 216, 122, 66, 163, 52, 100, 111, 104, 166, + 127, 253, 123, 15, 179, 63, 102, 161, 23, 86, 81, 41, 241, 164, 196, 71, 122, 216, 18, 22, + 196, 39, 126, 223, 86, 220, 81, 90, 103, 74, 116, 226, 16, 125, 173, 210, 46, 194, 102, + 224, 246, 149, 225, 35, 1, 248, 108, 64, 20, 74, 48, 89, 109, 173, 212, 114, 175, 87, 97, + 217, 241, 107, 253, 67, 82, 32, 14, 1, 219, 60, 10, 73, 19, 183, 87, 110, 95, 252, 101, 56, + 34, 67, 24, 180, 179, 55, 32, 247, 187, 98, 99, 235, 73, 77, 243, 29, 29, 93, 242, 60, 26, + 51, 27, 102, 109, 98, 251, 98, 76, 235, 39, 89, 1, 108, 46, 238, 115, 33, 59, 45, 46, 4, + 56, 141, 65, 192, 26, 110, 192, 216, 245, 71, 57, 65, 144, 246, 236, 191, 64, 159, 245, + 245, 14, 36, 66, 6, 98, 18, 148, 99, 141, 82, 64, 51, 168, 215, 201, 82, 150, 68, 84, 237, + 2, 185, 49, 39, 122, 245, 239, 219, 60, 142, 16, 78, 165, 59, 145, 226, 38, 19, 228, 26, + 159, 176, 90, 32, 1, 163, 43, 127, 125, 232, 29, 223, 231, 232, 176, 78, 57, 34, 219, 177, + 64, 17, 186, 201, 72, 51, 195, 59, 140, 226, 123, 219, 243, 10, 190, 99, 40, 236, 187, 46, + 119, 180, 164, 197, 70, 134, 167, 185, 241, 187, 218, 249, 220, 62, 226, 47, 97, 251, 132, + 155, 218, 32, 253, 182, 144, 98, 22, 209, 215, 16, 95, 246, 4, 58, 32, 214, 187, 1, 103, + 224, 60, 163, 97, 233, 219, 15, 97, 6, 176, 177, 176, 174, 149, 111, 229, 17, 47, 10, 167, + 193, 114, 2, 14, 131, 72, 117, 147, 68, 11, 217, 168, 7, 84, 89, 255, 130, 70, 149, 123, + 55, 161, 5, 27, 3, 133, 147, 51, 133, 120, 139, 225, 134, 107, 121, 199, 129, 150, 117, 73, + 6, 190, 67, 1, 127, 99, 203, 123, 26, 10, 131, 160, 255, 55, 205, 8, 109, 202, 165, 46, 13, + 144, 141, 238, 4, 15, 30, 91, 78, 167, 56, 207, 181, 100, 89, 226, 166, 91, 6, 87, 42, 161, + 18, 173, 46, 134, 84, 47, 192, 51, 192, 86, 143, 106, 143, 68, 251, 155, 48, 132, 148, 113, + 151, 135, 133, 68, 135, 223, 141, 62, 40, 237, 173, 32, 255, 82, 138, 211, 98, 135, 17, + 208, 76, 110, 10, 169, 109, 29, 94, 79, 56, 157, 83, 52, 181, 250, 138, 162, 188, 220, 51, + 204, 209, 222, 189, 226, 142, 162, 134, 34, 176, 136, 149, 252, 25, 172, 22, 56, 41, 175, + 96, 189, 47, 60, 197, 122, 228, 229, 174, 7, 187, 59, 62, 90, 104, 81, 171, 136, 40, 100, + 33, 61, 107, 104, 93, 125, 99, 111, 143, 164, 107, 234, 102, 20, 82, 79, 151, 211, 159, 79, + 36, 159, 15, 137, 116, 236, 128, 31, 195, 155, 61, 48, 44, 168, 114, 179, 176, 19, 154, + 247, 138, 187, 48, 108, 202, 239, 31, 5, 253, 101, 72, 112, 65, 48, 8, 136, 4, 140, 125, + 19, 45, 76, 151, 233, 69, 65, 39, 222, 33, 169, 153, 248, 69, 22, 63, 82, 47, 141, 89, 69, + 102, 183, 148, 108, 230, 206, 254, 173, 111, 126, 205, 73, 85, 110, 8, 195, 112, 225, 241, + 117, 201, 155, 66, 182, 43, 95, 192, 172, 85, 95, 200, 55, 150, 254, 183, 79, 51, 140, 194, + 243, 218, 6, 207, 43, 112, 155, 165, 185, 81, 231, 61, 156, 78, 134, 159, 39, 52, 3, 54, + 202, 111, 201, 137, 237, 153, 86, 65, 220, 47, 171, 44, 200, 196, 150, 157, 71, 245, 230, + 122, 68, 43, 159, 42, 220, 239, 39, 59, 81, 215, 190, 128, 130, 45, 90, 180, 95, 118, 120, + 37, 13, 88, 3, 66, 233, 214, 12, 132, 171, 162, 38, 115, 151, 111, 181, 74, 10, 105, 88, + 147, 18, 179, 28, 209, 232, 145, 185, 92, 144, 230, 74, 12, 70, 230, 10, 109, 202, 47, 127, + 142, 241, 226, 160, 99, 236, 6, 133, 69, 212, 154, 166, 91, 50, 29, 34, 28, 146, 191, 43, + 169, 86, 236, 44, 39, 96, 174, 26, 12, 210, 205, 182, 94, 148, 21, 71, 7, 55, 243, 169, + 236, 251, 148, 255, 130, 169, 82, 153, 21, 140, 25, 11, 83, 255, 108, 240, 128, 13, 36, 35, + 182, 182, 242, 102, 57, 202, 171, 124, 196, 136, 92, 214, 60, 128, 224, 239, 59, 200, 208, + 243, 143, 95, 97, 220, 171, 167, 40, 18, 63, 70, 152, 3, 129, 146, 154, 72, 146, 176, 144, + 56, 0, 216, 206, 217, 145, 170, 82, 110, 208, 56, 155, 158, 133, 32, 72, 127, 55, 98, 223, + 55, 57, 226, 184, 221, 143, 62, 205, 153, 184, 72, 233, 147, 245, 235, 115, 24, 146, 28, + 29, 191, 188, 195, 216, 0, 79, 123, 176, 54, 74, 228, 161, 151, 255, 57, 132, 208, 222, + 172, 239, 205, 243, 127, 50, 141, 9, 28, 72, 123, 85, 122, 138, 248, 107, 55, 73, 183, 2, + 189, 140, 161, 129, 16, 183, 182, 207, 76, 105, 61, 159, 207, 220, 140, 223, 15, 72, 164, + 232, 55, 246, 173, 204, 217, 67, 72, 116, 35, 22, 247, 217, 8, 97, 20, 37, 53, 199, 137, + 14, 183, 78, 33, 179, 245, 104, 161, 143, 174, 147, 54, 58, 153, 221, 189, 50, 27, 25, 252, + 142, 76, 18, 135, 116, 106, 21, 78, 71, 130, 246, 120, 35, 81, 60, 230, 156, 147, 127, 104, + 209, 94, 21, 168, 51, 226, 215, 97, 176, 83, 122, 225, 117, 106, 49, 117, 171, 146, 184, + 154, 203, 205, 153, 155, 84, 153, 95, 190, 76, 32, 248, 17, 43, 20, 48, 114, 56, 234, 90, + 101, 59, 154, 217, 35, 15, 234, 180, 156, 178, 214, 117, 220, 215, 178, 12, 37, 247, 61, + 212, 186, 28, 176, 157, 38, 125, 116, 68, 128, 229, 191, 65, 183, 125, 81, 130, 183, 131, + 93, 244, 175, 42, 119, 240, 179, 174, 78, 1, 125, 226, 231, 160, 105, 14, 32, 19, 86, 170, + 9, 226, 40, 148, 47, 70, 27, 37, 13, 56, 247, 89, 179, 103, 45, 105, 94, 115, 97, 185, 56, + 13, 24, 135, 47, 150, 57, 56, 72, 31, 70, 31, 48, 75, 206, 216, 69, 184, 201, 237, 221, + 113, 183, 214, 140, 167, 68, 69, 187, 226, 254, 182, 189, 142, 132, 1, 220, 204, 174, 248, + 29, 216, 137, 194, 87, 139, 131, 224, 76, 73, 194, 165, 71, 63, 157, 208, 134, 167, 116, + 14, 87, 226, 46, 76, 101, 73, 42, 99, 13, 76, 238, 140, 44, 48, 118, 246, 56, 74, 153, 113, + 185, 44, 218, 130, 32, 218, 143, 177, 174, 194, 189, 188, 12, 248, 30, 201, 111, 41, 179, + 117, 45, 168, 224, 254, 10, 90, 226, 81, 242, 96, 40, 61, 225, 156, 143, 248, 187, 220, 74, + 237, 159, 165, 63, 246, 200, 229, 75, 103, 203, 208, 187, 92, 84, 138, 17, 36, 207, 108, + 67, 138, 71, 82, 252, 122, 231, 150, 11, 109, 169, 15, 75, 46, 149, 100, 227, 59, 95, 15, + 40, 246, 192, 66, 68, 36, 248, 149, 74, 54, 28, 207, 167, 237, 62, 186, 11, 49, 155, 38, + 20, 117, 62, 18, 201, 9, 166, 33, 74, 232, 160, 202, 178, 77, 25, 107, 220, 210, 141, 40, + 172, 79, 179, 55, 185, 100, 199, 98, 115, 207, 191, 151, 67, 174, 160, 99, 155, 76, 33, + 141, 242, 154, 100, 87, 243, 208, 43, 160, 42, 126, 22, 177, 199, 150, 245, 244, 247, 186, + 241, 222, 110, 37, 208, 33, 61, 184, 15, 72, 19, 103, 39, 215, 155, 210, 224, 71, 180, 11, + 172, 63, 25, 19, 134, 108, 93, 35, 239, 49, 159, 42, 238, 79, 117, 86, 16, 121, 66, 93, 45, + 199, 23, 187, 127, 36, 85, 208, 230, 132, 244, 81, 51, 107, 51, 72, 30, 81, 50, 238, 169, + 204, 134, 134, 45, 5, 148, 235, 84, 132, 46, 226, 36, 102, 68, 51, 213, 138, 238, 72, 88, + 110, 117, 31, 222, 121, 76, 130, 195, 232, 169, 252, 218, 169, 141, 41, 51, 63, 78, 122, + 236, 42, 150, 182, 48, 40, 80, 44, 60, 40, 167, 42, 133, 21, 222, 175, 242, 236, 255, 53, + 141, 153, 55, 223, 175, 247, 251, 102, 105, 54, 31, 47, 236, 176, 106, 191, 35, 249, 141, + 170, 115, 8, 43, 117, 98, 108, 207, 43, 62, 247, 140, 37, 234, 0, 243, 31, 52, 42, 240, + 226, 94, 128, 246, 249, 56, 37, 226, 92, 53, 36, 72, 144, 138, 181, 113, 54, 159, 116, 37, + 26, 174, 73, 158, 179, 22, 173, 173, 76, 118, 38, 13, 255, 67, 194, 86, 90, 199, 243, 65, + 3, 10, 210, 37, 198, 22, 139, 92, 210, 95, 166, 208, 227, 48, 255, 255, 78, 64, 150, 209, + 86, 77, 140, 215, 139, 1, 112, 216, 14, 126, 41, 52, 81, 240, 166, 110, 246, 236, 70, 106, + 95, 165, 112, 102, 214, 225, 248, 58, 57, 164, 225, 7, 167, 15, 66, 26, 243, 153, 184, 43, + 67, 226, 136, 56, 251, 245, 85, 141, 129, 183, 244, 109, 227, 163, 251, 110, 135, 174, 205, + 49, 67, 131, 32, 134, 231, 225, 203, 78, 160, 182, 78, 140, 213, 115, 82, 156, 130, 102, + 106, 184, 123, 84, 13, 160, 185, 162, 47, 227, 197, 165, 83, 122, 68, 128, 131, 150, 125, + 112, 150, 138, 8, 148, 69, 176, 172, 114, 73, 127, 86, 70, 212, 0, 157, 63, 184, 197, 6, + 82, 157, 55, 154, 33, 170, 56, 181, 70, 206, 169, 92, 122, 27, 184, 159, 242, 206, 225, + 177, 242, 162, 148, 117, 158, 172, 72, 76, 186, 59, 60, 8, 161, 188, 101, 31, 233, 37, 240, + 69, 226, 40, 247, 105, 19, 211, 15, 90, 90, 102, 180, 61, 128, 50, 212, 248, 144, 135, 178, + 49, 175, 106, 28, 225, 242, 203, 247, 6, 165, 202, 108, 36, 8, 155, 177, 240, 70, 84, 16, + 250, 91, 0, 19, 54, 91, 196, 133, 73, 197, 2, 211, 143, 201, 123, 80, 142, 191, 119, 98, + 18, 245, 100, 73, 93, 243, 201, 35, 54, 231, 238, 241, 5, 155, 127, 0, 254, 121, 146, 12, + 127, 122, 195, 64, 99, 255, 95, 186, 185, 45, 7, 180, 159, 154, 64, 100, 182, 150, 27, 189, + 63, 7, 52, 11, 159, 184, 38, 230, 77, 198, 241, 22, 194, 222, 20, 202, 53, 236, 141, 149, + 64, 148, 242, 76, 79, 56, 46, 188, 118, 201, 56, 142, 141, 123, 75, 119, 211, 5, 54, 61, + 145, 48, 231, 130, 81, 12, 80, 29, 248, 136, 242, 226, 149, 57, 110, 1, 184, 147, 187, 208, + 86, 103, 131, 34, 214, 35, 146, 124, 252, 177, 76, 110, 156, 99, 118, 76, 153, 73, 89, 49, + 37, 178, 14, 250, 151, 170, 254, 220, 59, 171, 75, 84, 164, 65, 121, 87, 122, 212, 27, 65, + 94, 57, 60, 250, 249, 156, 80, 109, 238, 235, 92, 201, 39, 192, 86, 162, 30, 106, 104, 114, + 182, 9, 183, 97, 95, 231, 16, 40, 87, 196, 43, 69, 29, 136, 219, 68, 42, 215, 122, 197, + 109, 134, 88, 61, 55, 177, 209, 174, 109, 110, 217, 95, 201, 16, 133, 149, 147, 244, 155, + 105, 39, 28, 105, 247, 251, 139, 219, 63, 176, 161, 71, 101, 63, 98, 218, 96, 33, 127, 192, + 31, 69, 166, 61, 74, 143, 142, 88, 225, 134, 65, 235, 214, 145, 245, 107, 56, 238, 119, + 101, 160, 247, 38, 236, 87, 124, 117, 93, 199, 107, 151, 217, 41, 30, 69, 61, 60, 228, 120, + 38, 61, 202, 4, 196, 60, 107, 93, 230, 40, 247, 224, 159, 40, 66, 236, 243, 135, 202, 146, + 209, 197, 56, 33, 126, 2, 150, 230, 154, 253, 35, 136, 132, 121, 244, 123, 201, 99, 100, + 34, 233, 123, 210, 194, 205, 243, 251, 205, 118, 7, 211, 100, 55, 210, 42, 228, 153, 167, + 251, 150, 182, 182, 23, 158, 81, 162, 157, 253, 5, 44, 163, 226, 150, 71, 84, 7, 94, 209, + 240, 30, 86, 146, 182, 130, 210, 61, 179, 233, 42, 26, 151, 54, 91, 14, 155, 19, 142, 113, + 38, 169, 23, 73, 254, 124, 16, 15, 81, 67, 86, 230, 56, 77, 86, 225, 157, 240, 176, 138, 0, + 179, 194, 157, 235, 199, 57, 160, 108, 54, 67, 205, 162, 28, 140, 215, 230, 18, 47, 130, + 185, 162, 65, 150, 58, 177, 194, 149, 41, 64, 20, 205, 76, 57, 68, 53, 128, 73, 52, 99, + 239, 127, 206, 232, 68, 50, 48, 176, 235, 249, 65, 38, 39, 152, 147, 79, 184, 174, 106, + 120, 67, 79, 81, 97, 56, 196, 246, 63, 10, 133, 104, 2, 134, 231, 66, 48, 202, 47, 125, 78, + 73, 181, 93, 140, 185, 53, 223, 108, 194, 31, 97, 227, 137, 251, 150, 126, 82, 77, 115, + 123, 6, 224, 250, 235, 144, 171, 159, 213, 113, 246, 46, 151, 87, 13, 119, 210, 206, 215, + 189, 246, 27, 228, 25, 69, 4, 248, 63, 128, 9, 67, 54, 72, 217, 224, 134, 171, 137, 235, + 100, 152, 160, 155, 85, 223, 5, 138, 178, 94, 25, 223, 12, 180, 199, 151, 120, 38, 4, 121, + 192, 69, 126, 105, 16, 7, 22, 26, 187, 124, 6, 203, 17, 11, 231, 196, 67, 234, 38, 86, 246, + 52, 16, 112, 156, 178, 231, 251, 108, 147, 239, 177, 185, 211, 79, 1, 136, 247, 203, 252, + 140, 213, 247, 230, 147, 90, 110, 53, 207, 160, 70, 250, 109, 6, 44, 231, 3, 185, 25, 85, + 200, 137, 200, 34, 92, 52, 151, 250, 44, 132, 111, 80, 243, 47, 234, 216, 229, 22, 231, 52, + 195, 171, 132, 133, 33, 38, 232, 245, 226, 186, 146, 58, 40, 73, 163, 32, 82, 203, 121, 68, + 128, 95, 107, 20, 11, 76, 242, 165, 111, 52, 144, 156, 147, 63, 100, 94, 226, 149, 56, 39, + 32, 58, 34, 153, 138, 224, 4, 95, 129, 192, 92, 247, 139, 190, 177, 0, 50, 83, 231, 45, 59, + 59, 13, 24, 117, 66, 5, 16, 145, 172, 84, 230, 74, 90, 94, 208, 92, 219, 54, 218, 134, 162, + 251, 162, 99, 78, 191, 188, 71, 249, 101, 232, 123, 16, 123, 177, 248, 47, 254, 56, 11, 39, + 62, 155, 143, 137, 174, 99, 73, 186, 53, 169, 27, 241, 40, 29, 166, 22, 41, 90, 31, 62, 28, + 12, 230, 143, 15, 68, 119, 28, 126, 94, 55, 161, 151, 211, 129, 162, 206, 109, 158, 173, + 85, 119, 18, 54, 107, 213, 68, 29, 99, 62, 188, 115, 105, 95, 2, 135, 83, 183, 67, 210, + 255, 160, 128, 83, 141, 177, 169, 129, 223, 157, 30, 0, 68, 103, 164, 42, 103, 35, 179, + 193, 54, 200, 250, 197, 233, 163, 86, 231, 73, 242, 217, 81, 48, 62, 230, 40, 173, 144, 84, + 7, 199, 88, 61, 166, 38, 187, 80, 60, 136, 44, 242, 62, 224, 98, 169, 210, 116, 9, 233, + 199, 224, 1, 82, 201, 179, 116, 145, 37, 75, 101, 153, 31, 55, 129, 184, 193, 179, 55, 174, + 186, 144, 178, 179, 118, 148, 33, 84, 250, 164, 85, 137, 168, 250, 89, 21, 237, 153, 127, + 29, 86, 131, 72, 118, 175, 255, 160, 160, 78, 202, 88, 140, 237, 161, 195, 157, 85, 15, + 234, 31, 168, 47, 189, 111, 47, 243, 160, 208, 218, 128, 2, 3, 82, 214, 40, 174, 190, 92, + 201, 109, 235, 231, 112, 248, 229, 169, 93, 38, 100, 47, 43, 17, 55, 160, 208, 158, 64, 96, + 147, 178, 156, 194, 179, 197, 166, 34, 156, 165, 124, 160, 189, 213, 20, 144, 108, 45, 99, + 202, 209, 121, 163, 23, 138, 219, 56, 251, 153, 134, 129, 8, 97, 220, 30, 228, 217, 13, + 225, 75, 209, 193, 32, 9, 16, 195, 85, 27, 193, 112, 192, 21, 21, 37, 46, 24, 252, 43, 212, + 110, 152, 164, 90, 125, 111, 218, 35, 67, 171, 237, 146, 80, 22, 243, 160, 105, 146, 234, + 181, 180, 71, 41, 146, 102, 250, 55, 46, 175, 92, 103, 43, 197, 59, 67, 118, 181, 189, 30, + 185, 114, 71, 82, 16, 177, 167, 54, 106, 253, 174, 16, 51, 123, 54, 138, 200, 171, 89, 180, + 183, 101, 207, 34, 112, 59, 3, 70, 172, 251, 177, 30, 232, 31, 10, 182, 220, 90, 116, 128, + 201, 9, 43, 165, 0, 240, 72, 63, 137, 152, 20, 249, 146, 189, 148, 197, 160, 236, 224, 27, + 222, 150, 0, 116, 38, 147, 169, 95, 0, 248, 196, 168, 222, 48, 72, 33, 67, 78, 73, 34, 186, + 173, 18, 62, 181, 23, 165, 248, 62, 244, 123, 26, 241, 223, 76, 3, 186, 242, 41, 210, 210, + 237, 65, 61, 55, 58, 94, 241, 147, 119, 27, 74, 112, 249, 193, 147, 157, 20, 4, 128, 142, + 54, 213, 217, 60, 47, 9, 109, 162, 197, 147, 81, 76, 167, 7, 175, 223, 128, 217, 229, 40, + 157, 182, 157, 171, 75, 204, 114, 152, 7, 164, 198, 239, 88, 148, 178, 217, 122, 187, 151, + 105, 66, 51, 164, 92, 78, 168, 118, 143, 117, 64, 252, 91, 214, 145, 212, 138, 131, 5, 225, + 150, 184, 184, 6, 138, 153, 120, 154, 194, 161, 33, 99, 204, 177, 237, 41, 32, 157, 36, + 164, 246, 212, 120, 128, 48, 82, 218, 165, 206, 206, 105, 150, 204, 206, 124, 214, 233, + 139, 53, 2, 36, 13, 21, 44, 79, 163, 163, 164, 28, 181, 134, 74, 128, 3, 53, 190, 125, 115, + 155, 58, 63, 198, 87, 227, 191, 172, 49, 217, 71, 178, 118, 68, 19, 105, 142, 170, 115, 32, + 135, 82, 203, 62, 132, 241, 200, 132, 101, 220, 251, 229, 204, 138, 31, 106, 33, 253, 157, + 217, 71, 132, 66, 165, 203, 227, 243, 231, 22, 249, 159, 131, 27, 33, 156, 16, 58, 211, + 229, 167, 144, 173, 111, 0, 195, 144, 177, 116, 78, 103, 158, 13, 248, 193, 107, 210, 62, + 245, 116, 118, 24, 127, 187, 225, 100, 120, 91, 10, 102, 4, 200, 220, 200, 230, 238, 51, + 89, 166, 63, 147, 149, 104, 47, 62, 171, 171, 59, 23, 143, 241, 99, 30, 84, 138, 19, 223, + 133, 9, 221, 142, 76, 204, 164, 189, 170, 45, 75, 123, 133, 106, 196, 83, 57, 213, 182, + 237, 52, 36, 222, 231, 127, 66, 120, 60, 81, 164, 109, 202, 239, 232, 180, 138, 153, 2, + 255, 32, 247, 187, 79, 240, 104, 129, 192, 138, 249, 160, 8, 121, 178, 118, 13, 166, 27, + 59, 29, 190, 147, 216, 204, 246, 184, 122, 194, 184, 58, 155, 133, 170, 194, 86, 22, 250, + 122, 210, 127, 128, 157, 175, 131, 19, 188, 121, 118, 158, 93, 19, 110, 23, 209, 26, 19, + 197, 2, 55, 221, 229, 155, 177, 20, 89, 154, 211, 185, 72, 213, 202, 136, 238, 42, 251, + 228, 224, 191, 158, 39, 162, 231, 2, 128, 188, 49, 36, 104, 158, 163, 95, 82, 92, 255, 213, + 162, 147, 211, 189, 44, 190, 220, 166, 0, 102, 10, 162, 99, 13, 55, 222, 95, 172, 195, 4, + 37, 44, 100, 97, 112, 244, 251, 65, 67, 114, 88, 179, 150, 13, 176, 190, 86, 221, 113, 80, + 243, 68, 221, 148, 98, 24, 179, 251, 40, 158, 171, 97, 223, 158, 131, 74, 182, 28, 39, 38, + 254, 13, 3, 35, 107, 161, 178, 99, 22, 87, 101, 77, 208, 8, 58, 138, 113, 13, 60, 26, 3, + 89, 77, 215, 108, 69, 221, 115, 207, 213, 216, 93, 32, 85, 210, 87, 199, 212, 79, 2, 63, + 85, 70, 221, 136, 237, 79, 99, 59, 92, 57, 58, 51, 217, 141, 103, 17, 219, 153, 125, 209, + 188, 57, 180, 23, 146, 104, 85, 229, 209, 155, 46, 99, 89, 244, 120, 122, 124, 218, 56, 44, + 25, 61, 129, 130, 228, 55, 241, 74, 32, 187, 145, 149, 209, 10, 230, 245, 20, 16, 94, 26, + 128, 15, 207, 132, 34, 33, 15, 61, 105, 98, 250, 221, 91, 113, 81, 64, 11, 131, 255, 75, + 225, 190, 50, 95, 170, 69, 209, 215, 178, 89, 223, 190, 175, 55, 159, 176, 99, 12, 21, 221, + 201, 211, 132, 234, 58, 251, 204, 61, 85, 153, 173, 81, 11, 8, 239, 170, 130, 41, 53, 165, + 199, 70, 111, 30, 79, 181, 39, 105, 105, 104, 159, 52, 124, 253, 38, 191, 242, 73, 99, 65, + 106, 128, 191, 172, 116, 182, 53, 71, 143, 85, 113, 195, 169, 227, 233, 232, 50, 99, 69, + 244, 53, 148, 212, 1, 204, 147, 138, 157, 119, 246, 39, 240, 9, 65, 207, 52, 81, 43, 68, + 81, 155, 65, 168, 204, 178, 108, 223, 249, 218, 180, 228, 83, 136, 79, 230, 12, 138, 229, + 11, 69, 2, 97, 77, 99, 174, 37, 180, 12, 83, 141, 139, 41, 108, 40, 93, 160, 232, 30, 232, + 159, 126, 223, 93, 90, 45, 88, 245, 158, 141, 88, 241, 91, 120, 252, 230, 140, 200, 195, + 248, 58, 98, 152, 93, 232, 127, 89, 95, 106, 173, 142, 242, 24, 115, 66, 215, 211, 201, + 240, 36, 140, 223, 207, 165, 241, 180, 34, 11, 159, 155, 250, 94, 136, 164, 214, 95, 154, + 30, 106, 97, 114, 217, 176, 92, 18, 198, 157, 255, 78, 82, 208, 161, 185, 45, 209, 137, 91, + 97, 227, 8, 185, 59, 203, 189, 25, 50, 230, 220, 135, 167, 13, 227, 236, 142, 141, 8, 16, + 173, 250, 39, 18, 9, 75, 191, 174, 189, 226, 50, 125, 158, 188, 26, 109, 99, 57, 200, 231, + 153, 197, 250, 247, 224, 57, 218, 53, 34, 172, 142, 30, 196, 98, 117, 187, 203, 134, 45, + 87, 207, 6, 40, 205, 151, 192, 214, 214, 139, 206, 36, 121, 162, 25, 28, 206, 229, 18, 154, + 136, 107, 204, 65, 235, 122, 185, 132, 50, 165, 227, 161, 234, 187, 27, 231, 212, 27, 113, + 54, 208, 235, 204, 46, 47, 85, 217, 212, 6, 5, 222, 164, 126, 75, 51, 39, 94, 145, 145, + 159, 255, 29, 240, 196, 214, 43, 242, 169, 34, 138, 132, 111, 181, 251, 51, 100, 115, 156, + 246, 20, 179, 242, 151, 174, 252, 146, 226, 109, 180, 228, 254, 82, 143, 136, 145, 239, 4, + 163, 200, 121, 67, 9, 11, 192, 42, 211, 156, 37, 16, 169, 120, 181, 223, 204, 86, 98, 174, + 15, 46, 85, 75, 156, 43, 98, 90, 90, 158, 8, 135, 228, 42, 231, 56, 201, 199, 80, 141, 242, + 25, 34, 169, 190, 232, 245, 47, 73, 17, 195, 41, 168, 247, 75, 124, 125, 201, 38, 85, 69, + 132, 255, 113, 31, 89, 209, 45, 8, 142, 77, 148, 109, 204, 246, 229, 34, 22, 213, 34, 255, + 106, 3, 42, 161, 193, 77, 135, 204, 225, 218, 48, 3, 254, 213, 42, 199, 174, 123, 143, 86, + 217, 94, 137, 239, 137, 99, 200, 251, 208, 39, 101, 154, 44, 7, 26, 125, 245, 76, 122, 201, + 11, 190, 108, 134, 231, 225, 43, 8, 158, 154, 158, 37, 191, 27, 72, 150, 249, 39, 230, 240, + 58, 76, 61, 59, 127, 14, 172, 38, 117, 115, 231, 148, 243, 194, 3, 213, 191, 219, 84, 101, + 30, 210, 88, 191, 188, 113, 92, 66, 253, 166, 28, 146, 23, 51, 85, 65, 217, 233, 190, 248, + 126, 238, 242, 244, 113, 189, 105, 192, 143, 189, 236, 37, 222, 28, 212, 33, 101, 138, 157, + 203, 134, 87, 129, 74, 122, 251, 3, 202, 170, 239, 162, 148, 182, 203, 183, 157, 130, 115, + 213, 90, 4, 96, 25, 196, 143, 234, 135, 14, 0, 103, 8, 178, 46, 205, 167, 74, 183, 57, 201, + 148, 226, 211, 177, 145, 85, 144, 34, 66, 144, 101, 192, 93, 86, 205, 82, 35, 231, 189, 37, + 56, 59, 121, 40, 108, 211, 252, 202, 71, 209, 174, 29, 171, 63, 75, 226, 253, 6, 106, 207, + 195, 184, 134, 148, 172, 59, 167, 223, 138, 168, 181, 51, 85, 107, 239, 40, 201, 2, 236, + 162, 34, 243, 200, 227, 21, 180, 114, 11, 33, 191, 87, 55, 119, 41, 126, 220, 86, 243, 165, + 101, 67, 208, 29, 191, 16, 138, 156, 196, 200, 178, 9, 39, 202, 174, 182, 241, 166, 116, + 90, 30, 185, 68, 199, 239, 177, 201, 163, 61, 195, 82, 180, 170, 34, 101, 201, 79, 150, + 243, 34, 99, 197, 177, 224, 181, 203, 222, 205, 86, 60, 138, 175, 0, 84, 209, 163, 39, 233, + 91, 106, 117, 23, 59, 6, 126, 120, 206, 73, 106, 245, 115, 17, 5, 19, 225, 61, 149, 216, + 108, 251, 192, 196, 6, 155, 69, 58, 162, 106, 211, 167, 116, 191, 222, 187, 101, 51, 250, + 76, 247, 187, 195, 40, 120, 236, 32, 86, 221, 8, 65, 202, 233, 74, 242, 25, 197, 188, 240, + 41, 148, 107, 49, 198, 214, 142, 122, 186, 28, 193, 27, 2, 115, 140, 146, 126, 45, 52, 216, + 53, 167, 60, 225, 147, 67, 50, 115, 24, 193, 230, 170, 141, 49, 253, 13, 251, 92, 207, 24, + 157, 212, 223, 21, 158, 171, 90, 173, 218, 152, 1, 4, 74, 128, 245, 51, 167, 73, 61, 253, + 20, 120, 152, 240, 195, 200, 169, 207, 207, 199, 179, 74, 115, 32, 172, 199, 119, 59, 66, + 53, 124, 115, 104, 159, 26, 161, 110, 77, 238, 114, 225, 75, 25, 152, 24, 80, 84, 223, 99, + 224, 99, 190, 88, 73, 235, 76, 117, 179, 228, 193, 205, 227, 61, 202, 175, 239, 231, 221, + 176, 198, 240, 214, 66, 131, 123, 161, 255, 16, 184, 163, 207, 149, 32, 65, 160, 120, 224, + 255, 106, 36, 131, 192, 71, 15, 168, 214, 81, 48, 126, 178, 172, 35, 174, 78, 144, 7, 159, + 13, 254, 74, 177, 112, 85, 14, 86, 82, 92, 14, 214, 148, 241, 177, 165, 0, 15, 51, 162, + 181, 60, 117, 39, 242, 239, 82, 227, 236, 216, 139, 193, 55, 113, 19, 133, 63, 76, 83, 4, + 237, 100, 118, 87, 150, 163, 127, 176, 226, 140, 181, 2, 249, 73, 81, 192, 4, 196, 165, + 197, 196, 81, 214, 193, 144, 239, 115, 210, 135, 229, 193, 131, 224, 18, 157, 211, 54, 189, + 124, 73, 166, 180, 210, 23, 228, 78, 169, 106, 217, 242, 68, 86, 50, 138, 249, 136, 123, + 99, 129, 144, 18, 33, 67, 117, 168, 140, 231, 225, 179, 70, 4, 209, 82, 40, 231, 4, 232, + 248, 137, 234, 15, 194, 115, 71, 101, 219, 154, 36, 119, 29, 150, 31, 28, 6, 33, 251, 86, + 176, 240, 192, 141, 246, 134, 158, 194, 124, 33, 173, 241, 9, 102, 21, 236, 162, 98, 17, + 119, 128, 86, 87, 148, 250, 148, 219, 51, 133, 29, 252, 50, 108, 1, 22, 80, 80, 60, 104, + 238, 72, 11, 179, 15, 206, 48, 189, 24, 108, 101, 203, 17, 117, 32, 196, 147, 250, 234, 49, + 147, 132, 250, 105, 149, 170, 104, 192, 184, 115, 46, 248, 209, 182, 38, 60, 144, 231, 158, + 96, 229, 121, 234, 12, 209, 120, 186, 192, 245, 124, 237, 19, 113, 117, 110, 163, 30, 26, + 63, 49, 98, 8, 253, 24, 82, 51, 117, 64, 45, 250, 6, 74, 199, 184, 39, 122, 187, 75, 119, + 101, 5, 62, 243, 82, 61, 239, 20, 65, 88, 194, 141, 218, 217, 127, 176, 235, 26, 88, 175, + 75, 154, 86, 82, 111, 226, 147, 149, 229, 182, 17, 32, 95, 6, 116, 75, 7, 87, 108, 154, 94, + 194, 250, 97, 106, 50, 125, 249, 29, 243, 218, 74, 218, 36, 136, 231, 41, 43, 199, 226, + 225, 83, 248, 39, 115, 187, 233, 231, 28, 68, 168, 221, 37, 197, 152, 95, 128, 61, 109, 87, + 87, 28, 45, 245, 191, 132, 168, 143, 181, 227, 13, 35, 22, 40, 179, 55, 230, 103, 145, 87, + 61, 50, 104, 245, 173, 34, 19, 57, 22, 215, 86, 144, 0, 194, 139, 14, 235, 236, 119, 79, + 186, 44, 185, 152, 190, 154, 91, 31, 132, 45, 68, 187, 97, 253, 122, 113, 4, 144, 92, 102, + 7, 172, 235, 28, 36, 159, 27, 139, 245, 165, 97, 44, 80, 233, 55, 72, 221, 162, 147, 198, + 151, 201, 174, 60, 186, 119, 78, 95, 141, 66, 99, 35, 32, 20, 61, 67, 71, 69, 166, 237, + 165, 45, 194, 143, 44, 242, 96, 14, 54, 204, 121, 108, 115, 132, 172, 151, 47, 6, 217, 127, + 202, 228, 166, 154, 94, 174, 198, 5, 62, 145, 112, 149, 165, 188, 55, 47, 13, 62, 75, 234, + 208, 167, 136, 37, 209, 203, 188, 85, 72, 228, 173, 50, 51, 212, 69, 194, 112, 112, 26, + 135, 27, 82, 219, 210, 93, 168, 193, 1, 167, 153, 116, 207, 12, 101, 157, 98, 8, 211, 170, + 92, 200, 152, 33, 129, 81, 167, 2, 189, 245, 210, 6, 159, 83, 29, 211, 128, 148, 221, 28, + 103, 45, 133, 245, 212, 39, 204, 94, 16, 92, 116, 251, 163, 18, 159, 160, 49, 38, 95, 212, + 191, 235, 215, 248, 88, 202, 45, 45, 93, 244, 187, 79, 134, 75, 104, 83, 189, 31, 172, 21, + 64, 217, 93, 123, 45, 161, 68, 50, 194, 147, 23, 19, 1, 37, 169, 199, 207, 140, 31, 184, + 104, 81, 143, 200, 198, 14, 205, 209, 252, 228, 253, 247, 242, 132, 29, 43, 149, 212, 68, + 29, 154, 221, 71, 150, 77, 169, 83, 200, 251, 152, 46, 214, 206, 245, 199, 53, 166, 12, + 112, 216, 52, 135, 33, 2, 188, 75, 63, 110, 140, 42, 155, 136, 164, 175, 113, 54, 151, 16, + 80, 8, 140, 220, 48, 252, 143, 70, 151, 255, 150, 253, 105, 192, 249, 80, 92, 142, 21, 76, + 55, 223, 85, 224, 171, 61, 14, 81, 66, 24, 141, 70, 171, 223, 250, 200, 140, 132, 239, 151, + 235, 4, 118, 218, 235, 51, 210, 0, 61, 54, 136, 131, 52, 98, 157, 243, 68, 99, 139, 159, + 61, 208, 155, 124, 90, 189, 236, 37, 222, 28, 212, 33, 101, 138, 157, 203, 134, 87, 129, + 74, 122, 251, 3, 202, 170, 239, 162, 148, 182, 203, 183, 157, 130, 115, 213, 90, 4, 96, 25, + 196, 143, 234, 135, 14, 0, 103, 8, 178, 46, 205, 167, 74, 183, 57, 201, 148, 226, 211, 177, + 145, 85, 144, 34, 66, 144, 101, 192, 93, 86, 205, 82, 35, 231, 189, 37, 56, 59, 121, 40, + 108, 211, 252, 202, 71, 209, 174, 29, 171, 63, 75, 226, 253, 6, 106, 207, 195, 184, 134, + 148, 172, 59, 167, 223, 138, 168, 181, 51, 85, 107, 239, 40, 201, 2, 236, 162, 34, 243, + 200, 227, 21, 180, 114, 11, 33, 191, 87, 55, 119, 41, 126, 220, 86, 243, 171, 6, 126, 106, + 15, 240, 10, 62, 186, 1, 93, 195, 223, 205, 122, 188, 96, 141, 116, 119, 239, 8, 101, 111, + 226, 65, 153, 103, 109, 240, 238, 201, 133, 154, 97, 7, 164, 128, 8, 239, 201, 39, 153, + 121, 233, 62, 104, 57, 194, 223, 107, 234, 83, 63, 117, 127, 15, 171, 177, 140, 226, 182, + 166, 233, 207, 204, 175, 222, 11, 7, 78, 222, 243, 139, 61, 73, 157, 159, 146, 86, 223, 99, + 13, 51, 57, 95, 141, 178, 3, 125, 3, 177, 21, 158, 43, 16, 64, 3, 35, 54, 6, 88, 72, 81, + 133, 184, 179, 125, 243, 26, 144, 145, 51, 119, 72, 121, 218, 50, 190, 106, 233, 75, 190, + 137, 222, 240, 124, 228, 52, 0, 69, 73, 216, 67, 65, 204, 4, 167, 173, 64, 105, 5, 164, 30, + 17, 59, 1, 65, 159, 250, 254, 99, 159, 75, 170, 11, 40, 181, 130, 112, 214, 190, 10, 140, + 254, 137, 249, 211, 65, 109, 159, 135, 11, 182, 194, 192, 112, 83, 73, 19, 73, 184, 58, + 192, 204, 129, 60, 250, 180, 116, 136, 60, 214, 240, 155, 212, 2, 38, 34, 4, 89, 116, 98, + 128, 174, 158, 113, 229, 236, 55, 95, 168, 88, 173, 231, 151, 32, 39, 127, 88, 222, 77, 27, + 211, 1, 214, 34, 55, 204, 205, 186, 252, 201, 160, 99, 52, 38, 173, 187, 57, 126, 151, 1, + 18, 5, 127, 67, 254, 222, 143, 200, 1, 241, 176, 10, 223, 156, 96, 61, 230, 53, 42, 22, + 105, 153, 97, 225, 204, 98, 5, 67, 32, 247, 191, 186, 100, 32, 110, 216, 180, 108, 225, 54, + 219, 68, 174, 71, 168, 205, 118, 38, 248, 144, 7, 160, 125, 112, 19, 125, 178, 165, 150, + 239, 71, 163, 236, 204, 201, 40, 124, 125, 196, 17, 109, 238, 45, 177, 46, 107, 98, 48, 28, + 191, 65, 86, 13, 133, 7, 60, 139, 16, 8, 235, 38, 185, 253, 187, 183, 207, 11, 233, 134, + 246, 230, 241, 121, 110, 253, 126, 89, 29, 126, 177, 62, 51, 39, 95, 119, 123, 255, 188, + 37, 244, 140, 187, 210, 55, 198, 175, 198, 218, 0, 8, 84, 103, 128, 73, 198, 0, 4, 235, 15, + 193, 194, 239, 6, 56, 197, 251, 119, 135, 113, 166, 133, 243, 237, 192, 104, 180, 171, 13, + 116, 144, 40, 192, 49, 119, 150, 69, 149, 23, 54, 234, 118, 49, 169, 242, 232, 80, 103, 30, + 169, 52, 98, 49, 140, 255, 186, 65, 192, 224, 90, 86, 248, 173, 27, 183, 162, 183, 208, + 145, 104, 210, 220, 136, 26, 227, 167, 161, 69, 73, 218, 119, 46, 126, 10, 224, 207, 60, + 107, 47, 128, 194, 66, 127, 18, 104, 161, 209, 64, 0, 18, 45, 212, 82, 204, 204, 50, 98, + 224, 32, 169, 240, 5, 180, 202, 189, 22, 201, 86, 250, 239, 212, 197, 56, 6, 45, 5, 24, + 188, 98, 15, 75, 224, 227, 173, 61, 233, 116, 148, 98, 51, 165, 138, 170, 62, 94, 228, 187, + 142, 57, 214, 181, 10, 6, 178, 122, 138, 242, 181, 54, 229, 80, 48, 48, 167, 54, 25, 245, + 79, 63, 64, 72, 191, 44, 146, 70, 79, 109, 90, 32, 91, 141, 67, 251, 66, 37, 21, 155, 42, + 3, 97, 61, 40, 106, 169, 225, 180, 5, 158, 116, 32, 148, 214, 27, 227, 40, 215, 227, 104, + 91, 181, 203, 86, 76, 91, 0, 222, 98, 74, 178, 24, 1, 163, 217, 120, 207, 112, 153, 10, + 231, 149, 147, 15, 90, 238, 106, 196, 130, 148, 98, 26, 208, 71, 47, 144, 72, 19, 23, 208, + 43, 75, 38, 161, 225, 203, 133, 147, 249, 225, 233, 245, 225, 37, 222, 77, 227, 65, 12, 60, + 155, 177, 76, 243, 93, 35, 168, 70, 180, 23, 148, 195, 239, 135, 63, 179, 193, 153, 73, 99, + 230, 83, 220, 232, 190, 113, 86, 24, 130, 182, 79, 80, 250, 67, 144, 140, 128, 228, 110, + 174, 255, 4, 37, 242, 168, 11, 2, 0, 184, 10, 140, 16, 71, 5, 82, 10, 253, 85, 168, 36, 76, + 89, 222, 22, 115, 173, 206, 44, 252, 127, 242, 131, 111, 129, 79, 185, 20, 102, 38, 193, + 74, 120, 182, 211, 9, 108, 225, 0, 250, 103, 67, 231, 218, 20, 11, 250, 33, 77, 157, 222, + 239, 116, 253, 58, 232, 191, 196, 232, 100, 206, 56, 202, 68, 121, 33, 186, 22, 255, 114, + 243, 66, 197, 25, 122, 223, 62, 11, 101, 92, 75, 36, 20, 165, 177, 66, 171, 215, 102, 109, + 180, 38, 140, 26, 174, 159, 227, 113, 202, 106, 221, 58, 229, 111, 8, 248, 144, 140, 178, + 225, 43, 151, 226, 211, 211, 49, 123, 98, 50, 50, 121, 68, 91, 154, 31, 106, 95, 52, 200, + 208, 147, 73, 167, 95, 254, 81, 178, 41, 66, 7, 116, 143, 156, 147, 193, 60, 83, 116, 21, + 105, 68, 133, 25, 39, 211, 80, 39, 0, 237, 159, 165, 230, 232, 7, 198, 140, 184, 38, 12, + 210, 38, 149, 8, 36, 154, 108, 28, 100, 25, 144, 85, 86, 197, 68, 54, 175, 49, 167, 183, 4, + 111, 174, 169, 125, 79, 201, 234, 129, 250, 49, 95, 233, 170, 26, 88, 76, 245, 178, 253, + 34, 160, 61, 140, 30, 87, 105, 114, 35, 165, 10, 178, 35, 192, 237, 220, 249, 59, 18, 237, + 162, 206, 250, 211, 218, 47, 74, 159, 162, 33, 208, 234, 96, 63, 104, 121, 36, 187, 162, + 155, 7, 247, 173, 223, 10, 144, 21, 191, 60, 129, 109, 191, 65, 116, 54, 228, 29, 237, 174, + 65, 145, 21, 240, 44, 63, 236, 88, 184, 28, 67, 55, 123, 9, 152, 72, 108, 108, 237, 194, 6, + 42, 88, 221, 80, 109, 224, 123, 209, 53, 159, 7, 0, 49, 66, 54, 99, 36, 105, 97, 3, 7, 73, + 159, 72, 142, 146, 141, 164, 150, 134, 74, 89, 192, 208, 96, 8, 3, 127, 118, 154, 48, 34, + 209, 255, 145, 208, 178, 246, 209, 134, 55, 193, 124, 32, 34, 151, 212, 120, 155, 94, 137, + 142, 201, 99, 196, 42, 162, 94, 109, 129, 3, 69, 73, 0, 91, 252, 83, 61, 226, 164, 137, + 162, 17, 2, 83, 154, 202, 241, 189, 236, 182, 84, 162, 184, 79, 179, 52, 233, 176, 53, 151, + 228, 80, 153, 220, 158, 133, 134, 160, 220, 171, 149, 170, 164, 64, 181, 27, 19, 249, 32, + 77, 110, 194, 11, 18, 110, 79, 57, 198, 172, 56, 54, 217, 86, 88, 122, 150, 76, 94, 117, + 149, 234, 147, 237, 22, 191, 87, 107, 204, 108, 95, 178, 38, 3, 158, 84, 85, 245, 208, 78, + 59, 4, 1, 159, 0, 172, 90, 118, 165, 24, 228, 176, 56, 254, 90, 34, 4, 134, 245, 119, 34, + 35, 210, 56, 140, 137, 216, 234, 218, 78, 202, 207, 205, 46, 116, 90, 200, 190, 160, 123, + 213, 246, 177, 232, 114, 52, 141, 44, 223, 183, 194, 145, 57, 155, 69, 96, 68, 41, 176, + 128, 233, 84, 209, 146, 5, 119, 144, 46, 204, 49, 231, 47, 249, 202, 62, 255, 213, 195, + 148, 42, 216, 124, 122, 89, 131, 78, 158, 213, 218, 29, 32, 122, 56, 80, 233, 104, 42, 55, + 189, 107, 194, 238, 245, 219, 88, 183, 209, 49, 211, 215, 169, 193, 248, 3, 217, 53, 165, + 201, 20, 8, 208, 254, 23, 152, 88, 94, 8, 60, 214, 18, 234, 252, 251, 0, 95, 130, 207, 248, + 26, 224, 84, 177, 80, 194, 83, 132, 147, 21, 166, 77, 40, 42, 220, 172, 151, 164, 190, 173, + 177, 139, 255, 12, 10, 19, 62, 94, 92, 174, 4, 36, 9, 173, 249, 58, 100, 243, 221, 42, 221, + 88, 19, 23, 252, 179, 226, 75, 43, 25, 8, 250, 174, 14, 39, 230, 247, 87, 97, 215, 51, 228, + 46, 222, 103, 151, 200, 137, 73, 223, 202, 205, 127, 72, 132, 3, 195, 156, 97, 166, 22, + 194, 236, 46, 57, 47, 49, 249, 90, 153, 15, 47, 198, 219, 110, 8, 203, 43, 140, 232, 191, + 49, 44, 103, 30, 39, 168, 70, 136, 180, 23, 245, 82, 207, 240, 107, 65, 66, 74, 96, 178, + 136, 191, 161, 92, 42, 225, 100, 210, 52, 229, 252, 189, 69, 110, 255, 244, 15, 21, 209, + 140, 80, 203, 140, 85, 205, 8, 135, 86, 176, 166, 216, 91, 189, 196, 79, 80, 246, 106, 41, + 248, 185, 195, 46, 179, 168, 109, 71, 121, 153, 143, 111, 63, 22, 69, 52, 37, 2, 155, 145, + 104, 85, 43, 47, 69, 60, 65, 22, 19, 198, 104, 215, 56, 12, 184, 98, 37, 32, 199, 106, 71, + 115, 176, 75, 122, 248, 39, 177, 189, 244, 72, 96, 135, 214, 77, 209, 140, 68, 180, 102, + 233, 127, 212, 59, 4, 181, 231, 62, 225, 240, 62, 18, 179, 44, 59, 135, 47, 197, 182, 6, + 100, 162, 56, 159, 43, 35, 209, 35, 197, 203, 143, 58, 139, 166, 13, 80, 9, 242, 49, 45, + 99, 115, 238, 190, 236, 47, 250, 96, 28, 6, 119, 197, 178, 112, 171, 50, 41, 143, 171, 212, + 168, 8, 51, 212, 90, 171, 75, 47, 156, 103, 80, 28, 172, 228, 49, 227, 215, 213, 62, 111, + 153, 20, 112, 179, 11, 16, 60, 207, 161, 86, 116, 116, 250, 81, 74, 232, 151, 34, 165, 85, + 139, 202, 161, 208, 179, 141, 201, 224, 108, 168, 30, 54, 32, 34, 4, 243, 35, 18, 47, 174, + 240, 115, 229, 17, 185, 144, 180, 30, 239, 68, 235, 182, 77, 76, 207, 102, 165, 1, 200, + 145, 125, 139, 253, 239, 40, 83, 255, 171, 78, 120, 109, 92, 120, 76, 120, 205, 116, 211, + 161, 17, 76, 109, 126, 28, 241, 63, 183, 39, 177, 224, 192, 198, 137, 110, 225, 202, 64, 6, + 239, 237, 121, 13, 35, 19, 205, 39, 43, 230, 203, 77, 184, 99, 145, 240, 37, 114, 134, 117, + 17, 54, 9, 95, 193, 88, 65, 126, 39, 146, 126, 62, 114, 55, 187, 222, 121, 145, 149, 211, + 121, 6, 17, 107, 175, 89, 122, 34, 44, 116, 218, 126, 187, 71, 191, 16, 228, 98, 23, 87, + 77, 81, 13, 14, 38, 157, 141, 169, 233, 29, 98, 152, 8, 234, 96, 91, 77, 120, 220, 211, 46, + 121, 180, 111, 168, 40, 242, 38, 236, 168, 111, 10, 85, 94, 133, 45, 74, 250, 28, 253, 107, + 199, 6, 218, 55, 207, 103, 119, 97, 160, 250, 176, 135, 252, 21, 176, 224, 188, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, + 0, 0, 89, 53, 19, 83, 80, 184, 110, 48, 13, 148, 38, 57, 0, 0, 0, 0, 89, 53, 19, 83, 80, + 184, 110, 48, 13, 148, 38, 57, 0, 0, 0, 0, 89, 53, 19, 83, 53, 157, 46, 228, 44, 100, 6, + 157, 0, 0, 0, 0, 89, 53, 19, 83, 49, 100, 95, 92, 3, 214, 107, 168, 0, 0, 0, 0, 89, 53, 19, + 83, 105, 200, 47, 192, 42, 36, 168, 23, 0, 0, 0, 0, 89, 53, 19, 83, 103, 248, 160, 44, 8, + 171, 233, 143, 0, 0, 0, 0, 89, 53, 19, 83, 43, 27, 189, 16, 28, 10, 94, 76, 0, 0, 0, 0, 89, + 53, 19, 68, 242, 185, 50, 196, 1, 216, 57, 65, 0, 0, 0, 0, 89, 53, 19, 139, 37, 34, 243, + 88, 5, 112, 102, 11, 0, 0, 0, 0, 89, 53, 19, 83, 123, 8, 171, 64, 33, 188, 59, 16, 0, 0, 0, + 0, 89, 53, 19, 163, 245, 104, 124, 68, 9, 98, 25, 235, 0, 0, 0, 0, 89, 53, 19, 139, 65, + 107, 226, 4, 8, 144, 155, 158, 0, 0, 0, 0, 89, 53, 19, 234, 192, 216, 187, 232, 5, 154, 84, + 215, 0, 0, 0, 0, 89, 53, 19, 191, 0, 65, 23, 88, 9, 157, 187, 194, 0, 0, 0, 0, 89, 53, 19, + 68, 167, 194, 165, 224, 10, 219, 222, 27, 0, 0, 0, 0, 89, 53, 18, 125, 157, 130, 214, 120, + 4, 43, 225, 191, 0, 0, 0, 0, 89, 53, 21, 136, 206, 70, 37, 64, 17, 207, 214, 132, 0, 0, 0, + 0, 89, 53, 19, 234, 199, 35, 129, 64, 7, 207, 4, 156, 0, 0, 0, 0, 89, 53, 18, 125, 97, 236, + 218, 12, 5, 48, 197, 88, 0, 0, 0, 0, 89, 53, 15, 12, 8, 154, 2, 16, 3, 86, 85, 126, 0, 0, + 0, 0, 89, 53, 21, 208, 43, 160, 216, 240, 2, 140, 168, 206, 0, 0, 0, 0, 89, 53, 21, 136, + 214, 251, 86, 200, 5, 246, 36, 108, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 196, 143, 72, 43, 189, 196, 235, 72, 200, 143, 170, 119, 170, 14, 207, 160, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 177, 44, 72, 181, 35, 106, 117, 91, 162, 103, 79, 11, 78, + 19, 252, 154, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 184, 42, 92, 84, 222, 230, + 10, 98, 4, 230, 99, 133, 197, 27, 254, 186, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 162, 126, 6, 242, 61, 40, 168, 218, 169, 143, 71, 23, 196, 156, 153, 165, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 205, 239, 124, 102, 148, 144, 125, 146, 206, 3, 105, 225, + 248, 47, 202, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 95, 181, 12, 13, 236, 84, + 197, 170, 108, 219, 112, 173, 238, 156, 144, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 4, 81, 73, 188, 4, 159, 115, 0, 157, 56, 153, 159, 78, 129, 71, 60, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 209, 66, 138, 207, 127, 187, 145, 247, 131, 145, 228, 118, + 103, 112, 67, 178, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 31, 243, 190, 107, 187, + 182, 254, 134, 98, 69, 119, 21, 167, 187, 240, 107, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 193, 218, 206, 184, 36, 238, 107, 27, 165, 73, 170, 187, 47, 157, 136, 15, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 180, 158, 110, 146, 49, 211, 192, 184, 117, 18, + 88, 103, 37, 136, 172, 79, 27, 160, 123, 142, 7, 160, 51, 68, 215, 216, 144, 248, 89, 147, + 47, 63, 12, 16, 98, 29, 203, 214, 232, 245, 170, 92, 58, 10, 193, 138, 40, 201, 159, 227, + 160, 120, 50, 119, 93, 239, 86, 22, 245, 98, 16, 169, 180, 14, 136, 163, 121, 187, 213, + 145, 33, 8, 153, 67, 177, 91, 117, 75, 65, 71, 46, 130, 81, 192, + ]; + let compressed = compress(&block, blocks_swapper()); + let decompressed = decompress(&compressed, blocks_swapper()); + assert_eq!(decompressed.into_vec(), block); } diff --git a/util/rlp-derive/src/de.rs b/util/rlp-derive/src/de.rs index 234bcbcb8..1df1d6956 100644 --- a/util/rlp-derive/src/de.rs +++ b/util/rlp-derive/src/de.rs @@ -14,137 +14,154 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use proc_macro2::{Span, TokenStream}; use syn; -use proc_macro2::{TokenStream, Span}; struct ParseQuotes { - single: TokenStream, - list: TokenStream, - takes_index: bool, + single: TokenStream, + list: TokenStream, + takes_index: bool, } fn decodable_parse_quotes() -> ParseQuotes { - ParseQuotes { - single: quote! { rlp.val_at }, - list: quote! { rlp.list_at }, - takes_index: true, - } + ParseQuotes { + single: quote! { rlp.val_at }, + list: quote! { rlp.list_at }, + takes_index: true, + } } fn decodable_wrapper_parse_quotes() -> ParseQuotes { - ParseQuotes { - single: quote! { rlp.as_val }, - list: quote! { rlp.as_list }, - takes_index: false, - } + ParseQuotes { + single: quote! { rlp.as_val }, + list: quote! { rlp.as_list }, + takes_index: false, + } } pub fn impl_decodable(ast: &syn::DeriveInput) -> TokenStream { - let body = match ast.data { - syn::Data::Struct(ref s) => s, - _ => panic!("#[derive(RlpDecodable)] is only defined for structs."), - }; + let body = match ast.data { + syn::Data::Struct(ref s) => s, + _ => panic!("#[derive(RlpDecodable)] is only defined for structs."), + }; - let stmts: Vec<_> = body.fields.iter().enumerate().map(decodable_field_map).collect(); - let name = &ast.ident; + let stmts: Vec<_> = body + .fields + .iter() + .enumerate() + .map(decodable_field_map) + .collect(); + let name = &ast.ident; - let dummy_const = syn::Ident::new(&format!("_IMPL_RLP_DECODABLE_FOR_{}", name), Span::call_site()); - let impl_block = quote! { - impl rlp::Decodable for #name { - fn decode(rlp: &rlp::Rlp) -> Result { - let result = #name { - #(#stmts)* - }; + let dummy_const = syn::Ident::new( + &format!("_IMPL_RLP_DECODABLE_FOR_{}", name), + Span::call_site(), + ); + let impl_block = quote! { + impl rlp::Decodable for #name { + fn decode(rlp: &rlp::Rlp) -> Result { + let result = #name { + #(#stmts)* + }; - Ok(result) - } - } - }; + Ok(result) + } + } + }; - quote! { - #[allow(non_upper_case_globals, unused_attributes, unused_qualifications)] - const #dummy_const: () = { - extern crate rlp; - #impl_block - }; - } + quote! { + #[allow(non_upper_case_globals, unused_attributes, unused_qualifications)] + const #dummy_const: () = { + extern crate rlp; + #impl_block + }; + } } pub fn impl_decodable_wrapper(ast: &syn::DeriveInput) -> TokenStream { - let body = match ast.data { - syn::Data::Struct(ref s) => s, - _ => panic!("#[derive(RlpDecodableWrapper)] is only defined for structs."), - }; + let body = match ast.data { + syn::Data::Struct(ref s) => s, + _ => panic!("#[derive(RlpDecodableWrapper)] is only defined for structs."), + }; - let stmt = { - let fields: Vec<_> = body.fields.iter().collect(); - if fields.len() == 1 { - let field = fields.first().expect("fields.len() == 1; qed"); - decodable_field(0, field, decodable_wrapper_parse_quotes()) - } else { - panic!("#[derive(RlpEncodableWrapper)] is only defined for structs with one field.") - } - }; + let stmt = { + let fields: Vec<_> = body.fields.iter().collect(); + if fields.len() == 1 { + let field = fields.first().expect("fields.len() == 1; qed"); + decodable_field(0, field, decodable_wrapper_parse_quotes()) + } else { + panic!("#[derive(RlpEncodableWrapper)] is only defined for structs with one field.") + } + }; - let name = &ast.ident; + let name = &ast.ident; - let dummy_const = syn::Ident::new(&format!("_IMPL_RLP_DECODABLE_FOR_{}", name), Span::call_site()); - let impl_block = quote! { - impl rlp::Decodable for #name { - fn decode(rlp: &rlp::Rlp) -> Result { - let result = #name { - #stmt - }; + let dummy_const = syn::Ident::new( + &format!("_IMPL_RLP_DECODABLE_FOR_{}", name), + Span::call_site(), + ); + let impl_block = quote! { + impl rlp::Decodable for #name { + fn decode(rlp: &rlp::Rlp) -> Result { + let result = #name { + #stmt + }; - Ok(result) - } - } - }; + Ok(result) + } + } + }; - quote! { - #[allow(non_upper_case_globals, unused_attributes, unused_qualifications)] - const #dummy_const: () = { - extern crate rlp; - #impl_block - }; - } + quote! { + #[allow(non_upper_case_globals, unused_attributes, unused_qualifications)] + const #dummy_const: () = { + extern crate rlp; + #impl_block + }; + } } fn decodable_field_map(tuple: (usize, &syn::Field)) -> TokenStream { - decodable_field(tuple.0, tuple.1, decodable_parse_quotes()) + decodable_field(tuple.0, tuple.1, decodable_parse_quotes()) } fn decodable_field(index: usize, field: &syn::Field, quotes: ParseQuotes) -> TokenStream { - let id = match field.ident { - Some(ref ident) => quote! { #ident }, - None => { - let index: syn::Index = index.into(); - quote! { #index } - } - }; + let id = match field.ident { + Some(ref ident) => quote! { #ident }, + None => { + let index: syn::Index = index.into(); + quote! { #index } + } + }; - let index = quote! { #index }; + let index = quote! { #index }; - let single = quotes.single; - let list = quotes.list; + let single = quotes.single; + let list = quotes.list; - match field.ty { - syn::Type::Path(ref path) => { - let ident = &path.path.segments.first().expect("there must be at least 1 segment").value().ident; - if &ident.to_string() == "Vec" { - if quotes.takes_index { - quote! { #id: #list(#index)?, } - } else { - quote! { #id: #list()?, } - } - } else { - if quotes.takes_index { - quote! { #id: #single(#index)?, } - } else { - quote! { #id: #single()?, } - } - } - }, - _ => panic!("rlp_derive not supported"), - } + match field.ty { + syn::Type::Path(ref path) => { + let ident = &path + .path + .segments + .first() + .expect("there must be at least 1 segment") + .value() + .ident; + if &ident.to_string() == "Vec" { + if quotes.takes_index { + quote! { #id: #list(#index)?, } + } else { + quote! { #id: #list()?, } + } + } else { + if quotes.takes_index { + quote! { #id: #single(#index)?, } + } else { + quote! { #id: #single()?, } + } + } + } + _ => panic!("rlp_derive not supported"), + } } diff --git a/util/rlp-derive/src/en.rs b/util/rlp-derive/src/en.rs index 95e5b9142..90b9f5150 100644 --- a/util/rlp-derive/src/en.rs +++ b/util/rlp-derive/src/en.rs @@ -14,110 +14,136 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use proc_macro2::{Span, TokenStream}; use syn; -use proc_macro2::{TokenStream, Span}; pub fn impl_encodable(ast: &syn::DeriveInput) -> TokenStream { - let body = match ast.data { - syn::Data::Struct(ref s) => s, - _ => panic!("#[derive(RlpEncodable)] is only defined for structs."), - }; + let body = match ast.data { + syn::Data::Struct(ref s) => s, + _ => panic!("#[derive(RlpEncodable)] is only defined for structs."), + }; - let stmts: Vec<_> = body.fields.iter().enumerate().map(encodable_field_map).collect(); - let name = &ast.ident; + let stmts: Vec<_> = body + .fields + .iter() + .enumerate() + .map(encodable_field_map) + .collect(); + let name = &ast.ident; - let stmts_len = stmts.len(); - let stmts_len = quote! { #stmts_len }; - let dummy_const = syn::Ident::new(&format!("_IMPL_RLP_ENCODABLE_FOR_{}", name), Span::call_site()); - let impl_block = quote! { - impl rlp::Encodable for #name { - fn rlp_append(&self, stream: &mut rlp::RlpStream) { - stream.begin_list(#stmts_len); - #(#stmts)* - } - } - }; + let stmts_len = stmts.len(); + let stmts_len = quote! { #stmts_len }; + let dummy_const = syn::Ident::new( + &format!("_IMPL_RLP_ENCODABLE_FOR_{}", name), + Span::call_site(), + ); + let impl_block = quote! { + impl rlp::Encodable for #name { + fn rlp_append(&self, stream: &mut rlp::RlpStream) { + stream.begin_list(#stmts_len); + #(#stmts)* + } + } + }; - quote! { - #[allow(non_upper_case_globals, unused_attributes, unused_qualifications)] - const #dummy_const: () = { - extern crate rlp; - #impl_block - }; - } + quote! { + #[allow(non_upper_case_globals, unused_attributes, unused_qualifications)] + const #dummy_const: () = { + extern crate rlp; + #impl_block + }; + } } pub fn impl_encodable_wrapper(ast: &syn::DeriveInput) -> TokenStream { - let body = match ast.data { - syn::Data::Struct(ref s) => s, - _ => panic!("#[derive(RlpEncodableWrapper)] is only defined for structs."), - }; + let body = match ast.data { + syn::Data::Struct(ref s) => s, + _ => panic!("#[derive(RlpEncodableWrapper)] is only defined for structs."), + }; - let stmt = { - let fields: Vec<_> = body.fields.iter().collect(); - if fields.len() == 1 { - let field = fields.first().expect("fields.len() == 1; qed"); - encodable_field(0, field) - } else { - panic!("#[derive(RlpEncodableWrapper)] is only defined for structs with one field.") - } - }; + let stmt = { + let fields: Vec<_> = body.fields.iter().collect(); + if fields.len() == 1 { + let field = fields.first().expect("fields.len() == 1; qed"); + encodable_field(0, field) + } else { + panic!("#[derive(RlpEncodableWrapper)] is only defined for structs with one field.") + } + }; - let name = &ast.ident; + let name = &ast.ident; - let dummy_const = syn::Ident::new(&format!("_IMPL_RLP_ENCODABLE_FOR_{}", name), Span::call_site()); - let impl_block = quote! { - impl rlp::Encodable for #name { - fn rlp_append(&self, stream: &mut rlp::RlpStream) { - #stmt - } - } - }; + let dummy_const = syn::Ident::new( + &format!("_IMPL_RLP_ENCODABLE_FOR_{}", name), + Span::call_site(), + ); + let impl_block = quote! { + impl rlp::Encodable for #name { + fn rlp_append(&self, stream: &mut rlp::RlpStream) { + #stmt + } + } + }; - quote! { - #[allow(non_upper_case_globals, unused_attributes, unused_qualifications)] - const #dummy_const: () = { - extern crate rlp; - #impl_block - }; - } + quote! { + #[allow(non_upper_case_globals, unused_attributes, unused_qualifications)] + const #dummy_const: () = { + extern crate rlp; + #impl_block + }; + } } fn encodable_field_map(tuple: (usize, &syn::Field)) -> TokenStream { - encodable_field(tuple.0, tuple.1) + encodable_field(tuple.0, tuple.1) } fn encodable_field(index: usize, field: &syn::Field) -> TokenStream { - let ident = match field.ident { - Some(ref ident) => quote! { #ident }, - None => { - let index: syn::Index = index.into(); - quote! { #index } - } - }; + let ident = match field.ident { + Some(ref ident) => quote! { #ident }, + None => { + let index: syn::Index = index.into(); + quote! { #index } + } + }; - let id = quote! { self.#ident }; + let id = quote! { self.#ident }; - match field.ty { - syn::Type::Path(ref path) => { - let top_segment = path.path.segments.first().expect("there must be at least 1 segment"); - let ident = &top_segment.value().ident; - if &ident.to_string() == "Vec" { - let inner_ident = match top_segment.value().arguments { - syn::PathArguments::AngleBracketed(ref angle) => { - let ty = angle.args.first().expect("Vec has only one angle bracketed type; qed"); - match **ty.value() { - syn::GenericArgument::Type(syn::Type::Path(ref path)) => &path.path.segments.first().expect("there must be at least 1 segment").value().ident, - _ => panic!("rlp_derive not supported"), - } - }, - _ => unreachable!("Vec has only one angle bracketed type; qed"), - }; - quote! { stream.append_list::<#inner_ident, _>(&#id); } - } else { - quote! { stream.append(&#id); } - } - }, - _ => panic!("rlp_derive not supported"), - } + match field.ty { + syn::Type::Path(ref path) => { + let top_segment = path + .path + .segments + .first() + .expect("there must be at least 1 segment"); + let ident = &top_segment.value().ident; + if &ident.to_string() == "Vec" { + let inner_ident = match top_segment.value().arguments { + syn::PathArguments::AngleBracketed(ref angle) => { + let ty = angle + .args + .first() + .expect("Vec has only one angle bracketed type; qed"); + match **ty.value() { + syn::GenericArgument::Type(syn::Type::Path(ref path)) => { + &path + .path + .segments + .first() + .expect("there must be at least 1 segment") + .value() + .ident + } + _ => panic!("rlp_derive not supported"), + } + } + _ => unreachable!("Vec has only one angle bracketed type; qed"), + }; + quote! { stream.append_list::<#inner_ident, _>(&#id); } + } else { + quote! { stream.append(&#id); } + } + } + _ => panic!("rlp_derive not supported"), + } } diff --git a/util/rlp-derive/src/lib.rs b/util/rlp-derive/src/lib.rs index 0f5d442f4..385102e3b 100644 --- a/util/rlp-derive/src/lib.rs +++ b/util/rlp-derive/src/lib.rs @@ -20,37 +20,37 @@ extern crate syn; #[macro_use] extern crate quote; -mod en; mod de; +mod en; -use proc_macro::TokenStream; -use en::{impl_encodable, impl_encodable_wrapper}; use de::{impl_decodable, impl_decodable_wrapper}; +use en::{impl_encodable, impl_encodable_wrapper}; +use proc_macro::TokenStream; #[proc_macro_derive(RlpEncodable)] pub fn encodable(input: TokenStream) -> TokenStream { - let ast = syn::parse(input).unwrap(); - let gen = impl_encodable(&ast); - gen.into() + let ast = syn::parse(input).unwrap(); + let gen = impl_encodable(&ast); + gen.into() } #[proc_macro_derive(RlpEncodableWrapper)] pub fn encodable_wrapper(input: TokenStream) -> TokenStream { - let ast = syn::parse(input).unwrap(); - let gen = impl_encodable_wrapper(&ast); - gen.into() + let ast = syn::parse(input).unwrap(); + let gen = impl_encodable_wrapper(&ast); + gen.into() } #[proc_macro_derive(RlpDecodable)] pub fn decodable(input: TokenStream) -> TokenStream { - let ast = syn::parse(input).unwrap(); - let gen = impl_decodable(&ast); - gen.into() + let ast = syn::parse(input).unwrap(); + let gen = impl_decodable(&ast); + gen.into() } #[proc_macro_derive(RlpDecodableWrapper)] pub fn decodable_wrapper(input: TokenStream) -> TokenStream { - let ast = syn::parse(input).unwrap(); - let gen = impl_decodable_wrapper(&ast); - gen.into() + let ast = syn::parse(input).unwrap(); + let gen = impl_decodable_wrapper(&ast); + gen.into() } diff --git a/util/rlp-derive/tests/rlp.rs b/util/rlp-derive/tests/rlp.rs index a6819ba4b..5ff915519 100644 --- a/util/rlp-derive/tests/rlp.rs +++ b/util/rlp-derive/tests/rlp.rs @@ -18,42 +18,38 @@ extern crate rlp; #[macro_use] extern crate rlp_derive; -use rlp::{encode, decode}; +use rlp::{decode, encode}; #[derive(Debug, PartialEq, RlpEncodable, RlpDecodable)] struct Foo { - a: String, + a: String, } #[derive(Debug, PartialEq, RlpEncodableWrapper, RlpDecodableWrapper)] struct FooWrapper { - a: String, + a: String, } #[test] fn test_encode_foo() { - let foo = Foo { - a: "cat".into(), - }; + let foo = Foo { a: "cat".into() }; - let expected = vec![0xc4, 0x83, b'c', b'a', b't']; - let out = encode(&foo); - assert_eq!(out, expected); + let expected = vec![0xc4, 0x83, b'c', b'a', b't']; + let out = encode(&foo); + assert_eq!(out, expected); - let decoded = decode(&expected).expect("decode failure"); - assert_eq!(foo, decoded); + let decoded = decode(&expected).expect("decode failure"); + assert_eq!(foo, decoded); } #[test] fn test_encode_foo_wrapper() { - let foo = FooWrapper { - a: "cat".into(), - }; + let foo = FooWrapper { a: "cat".into() }; - let expected = vec![0x83, b'c', b'a', b't']; - let out = encode(&foo); - assert_eq!(out, expected); + let expected = vec![0x83, b'c', b'a', b't']; + let out = encode(&foo); + assert_eq!(out, expected); - let decoded = decode(&expected).expect("decode failure"); - assert_eq!(foo, decoded); + let decoded = decode(&expected).expect("decode failure"); + assert_eq!(foo, decoded); } diff --git a/util/runtime/src/lib.rs b/util/runtime/src/lib.rs index 0c78c7f4a..d286ebf85 100644 --- a/util/runtime/src/lib.rs +++ b/util/runtime/src/lib.rs @@ -19,256 +19,270 @@ pub extern crate futures; pub extern crate tokio; -use std::{fmt, thread}; -use std::sync::mpsc; -use std::time::{Duration, Instant}; use futures::{future, Future, IntoFuture}; -pub use tokio::timer::Delay; -pub use tokio::runtime::{Runtime as TokioRuntime, Builder as TokioRuntimeBuilder, TaskExecutor}; +use std::{ + fmt, + sync::mpsc, + thread, + time::{Duration, Instant}, +}; +pub use tokio::{ + runtime::{Builder as TokioRuntimeBuilder, Runtime as TokioRuntime, TaskExecutor}, + timer::Delay, +}; /// Runtime for futures. /// /// Runs in a separate thread. pub struct Runtime { - executor: Executor, - handle: RuntimeHandle, + executor: Executor, + handle: RuntimeHandle, } impl Runtime { - fn new(runtime_bldr: &mut TokioRuntimeBuilder) -> Self { - let mut runtime = runtime_bldr - .build() - .expect("Building a Tokio runtime will only fail when mio components \ - cannot be initialized (catastrophic)"); - let (stop, stopped) = futures::oneshot(); - let (tx, rx) = mpsc::channel(); - let handle = thread::spawn(move || { - tx.send(runtime.executor()).expect("Rx is blocking upper thread."); - runtime.block_on(futures::empty().select(stopped).map(|_| ()).map_err(|_| ())) - .expect("Tokio runtime should not have unhandled errors."); - }); - let executor = rx.recv().expect("tx is transfered to a newly spawned thread."); + fn new(runtime_bldr: &mut TokioRuntimeBuilder) -> Self { + let mut runtime = runtime_bldr.build().expect( + "Building a Tokio runtime will only fail when mio components \ + cannot be initialized (catastrophic)", + ); + let (stop, stopped) = futures::oneshot(); + let (tx, rx) = mpsc::channel(); + let handle = thread::spawn(move || { + tx.send(runtime.executor()) + .expect("Rx is blocking upper thread."); + runtime + .block_on(futures::empty().select(stopped).map(|_| ()).map_err(|_| ())) + .expect("Tokio runtime should not have unhandled errors."); + }); + let executor = rx + .recv() + .expect("tx is transfered to a newly spawned thread."); - Runtime { - executor: Executor { - inner: Mode::Tokio(executor), - }, - handle: RuntimeHandle { - close: Some(stop), - handle: Some(handle), - }, - } - } + Runtime { + executor: Executor { + inner: Mode::Tokio(executor), + }, + handle: RuntimeHandle { + close: Some(stop), + handle: Some(handle), + }, + } + } - /// Spawns a new tokio runtime with a default thread count on a background - /// thread and returns a `Runtime` which can be used to spawn tasks via - /// its executor. - pub fn with_default_thread_count() -> Self { - let mut runtime_bldr = TokioRuntimeBuilder::new(); - Self::new(&mut runtime_bldr) - } + /// Spawns a new tokio runtime with a default thread count on a background + /// thread and returns a `Runtime` which can be used to spawn tasks via + /// its executor. + pub fn with_default_thread_count() -> Self { + let mut runtime_bldr = TokioRuntimeBuilder::new(); + Self::new(&mut runtime_bldr) + } - /// Spawns a new tokio runtime with a the specified thread count on a - /// background thread and returns a `Runtime` which can be used to spawn - /// tasks via its executor. - pub fn with_thread_count(thread_count: usize) -> Self { - let mut runtime_bldr = TokioRuntimeBuilder::new(); - runtime_bldr.core_threads(thread_count); + /// Spawns a new tokio runtime with a the specified thread count on a + /// background thread and returns a `Runtime` which can be used to spawn + /// tasks via its executor. + pub fn with_thread_count(thread_count: usize) -> Self { + let mut runtime_bldr = TokioRuntimeBuilder::new(); + runtime_bldr.core_threads(thread_count); - Self::new(&mut runtime_bldr) - } + Self::new(&mut runtime_bldr) + } - /// Returns this runtime raw executor. - /// - /// Deprecated: Exists only to connect with current JSONRPC implementation. - pub fn raw_executor(&self) -> TaskExecutor { - if let Mode::Tokio(ref executor) = self.executor.inner { - executor.clone() - } else { - panic!("Runtime is not initialized in Tokio mode.") - } - } + /// Returns this runtime raw executor. + /// + /// Deprecated: Exists only to connect with current JSONRPC implementation. + pub fn raw_executor(&self) -> TaskExecutor { + if let Mode::Tokio(ref executor) = self.executor.inner { + executor.clone() + } else { + panic!("Runtime is not initialized in Tokio mode.") + } + } - /// Returns runtime executor. - pub fn executor(&self) -> Executor { - self.executor.clone() - } + /// Returns runtime executor. + pub fn executor(&self) -> Executor { + self.executor.clone() + } } #[derive(Clone)] enum Mode { - Tokio(TaskExecutor), - Sync, - ThreadPerFuture, + Tokio(TaskExecutor), + Sync, + ThreadPerFuture, } impl fmt::Debug for Mode { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - use self::Mode::*; + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use self::Mode::*; - match *self { - Tokio(_) => write!(fmt, "tokio"), - Sync => write!(fmt, "synchronous"), - ThreadPerFuture => write!(fmt, "thread per future"), - } - } + match *self { + Tokio(_) => write!(fmt, "tokio"), + Sync => write!(fmt, "synchronous"), + ThreadPerFuture => write!(fmt, "thread per future"), + } + } } /// Returns a future which runs `f` until `duration` has elapsed, at which /// time `on_timeout` is run and the future resolves. -fn timeout(f: F, duration: Duration, on_timeout: T) - -> impl Future + Send + 'static +fn timeout( + f: F, + duration: Duration, + on_timeout: T, +) -> impl Future + Send + 'static where - T: FnOnce() -> () + Send + 'static, - F: FnOnce() -> R + Send + 'static, - R: IntoFuture + Send + 'static, - R::Future: Send + 'static, + T: FnOnce() -> () + Send + 'static, + F: FnOnce() -> R + Send + 'static, + R: IntoFuture + Send + 'static, + R::Future: Send + 'static, { - let future = future::lazy(f); - let timeout = Delay::new(Instant::now() + duration) - .then(move |_| { - on_timeout(); - Ok(()) - }); - future.select(timeout).then(|_| Ok(())) + let future = future::lazy(f); + let timeout = Delay::new(Instant::now() + duration).then(move |_| { + on_timeout(); + Ok(()) + }); + future.select(timeout).then(|_| Ok(())) } #[derive(Debug, Clone)] pub struct Executor { - inner: Mode, + inner: Mode, } impl Executor { - /// Executor for existing runtime. - /// - /// Deprecated: Exists only to connect with current JSONRPC implementation. - pub fn new(executor: TaskExecutor) -> Self { - Executor { - inner: Mode::Tokio(executor), - } - } + /// Executor for existing runtime. + /// + /// Deprecated: Exists only to connect with current JSONRPC implementation. + pub fn new(executor: TaskExecutor) -> Self { + Executor { + inner: Mode::Tokio(executor), + } + } - /// Synchronous executor, used mostly for tests. - pub fn new_sync() -> Self { - Executor { - inner: Mode::Sync, - } - } + /// Synchronous executor, used mostly for tests. + pub fn new_sync() -> Self { + Executor { inner: Mode::Sync } + } - /// Spawns a new thread for each future (use only for tests). - pub fn new_thread_per_future() -> Self { - Executor { - inner: Mode::ThreadPerFuture, - } - } + /// Spawns a new thread for each future (use only for tests). + pub fn new_thread_per_future() -> Self { + Executor { + inner: Mode::ThreadPerFuture, + } + } - /// Spawn a future to this runtime - pub fn spawn(&self, r: R) where - R: IntoFuture + Send + 'static, - R::Future: Send + 'static, - { - match self.inner { - Mode::Tokio(ref executor) => executor.spawn(r.into_future()), - Mode::Sync => { - let _= r.into_future().wait(); - }, - Mode::ThreadPerFuture => { - thread::spawn(move || { - let _= r.into_future().wait(); - }); - }, - } - } + /// Spawn a future to this runtime + pub fn spawn(&self, r: R) + where + R: IntoFuture + Send + 'static, + R::Future: Send + 'static, + { + match self.inner { + Mode::Tokio(ref executor) => executor.spawn(r.into_future()), + Mode::Sync => { + let _ = r.into_future().wait(); + } + Mode::ThreadPerFuture => { + thread::spawn(move || { + let _ = r.into_future().wait(); + }); + } + } + } - /// Spawn a new future returned by given closure. - pub fn spawn_fn(&self, f: F) where - F: FnOnce() -> R + Send + 'static, - R: IntoFuture + Send + 'static, - R::Future: Send + 'static, - { - match self.inner { - Mode::Tokio(ref executor) => executor.spawn(future::lazy(f)), - Mode::Sync => { - let _ = future::lazy(f).wait(); - }, - Mode::ThreadPerFuture => { - thread::spawn(move || { - let _= f().into_future().wait(); - }); - }, - } - } + /// Spawn a new future returned by given closure. + pub fn spawn_fn(&self, f: F) + where + F: FnOnce() -> R + Send + 'static, + R: IntoFuture + Send + 'static, + R::Future: Send + 'static, + { + match self.inner { + Mode::Tokio(ref executor) => executor.spawn(future::lazy(f)), + Mode::Sync => { + let _ = future::lazy(f).wait(); + } + Mode::ThreadPerFuture => { + thread::spawn(move || { + let _ = f().into_future().wait(); + }); + } + } + } - /// Spawn a new future and wait for it or for a timeout to occur. - pub fn spawn_with_timeout(&self, f: F, duration: Duration, on_timeout: T) where - T: FnOnce() -> () + Send + 'static, - F: FnOnce() -> R + Send + 'static, - R: IntoFuture + Send + 'static, - R::Future: Send + 'static, - { - match self.inner { - Mode::Tokio(ref executor) => { - executor.spawn(timeout(f, duration, on_timeout)) - }, - Mode::Sync => { - let _ = timeout(f, duration, on_timeout).wait(); - }, - Mode::ThreadPerFuture => { - thread::spawn(move || { - let _ = timeout(f, duration, on_timeout).wait(); - }); - }, - } - } + /// Spawn a new future and wait for it or for a timeout to occur. + pub fn spawn_with_timeout(&self, f: F, duration: Duration, on_timeout: T) + where + T: FnOnce() -> () + Send + 'static, + F: FnOnce() -> R + Send + 'static, + R: IntoFuture + Send + 'static, + R::Future: Send + 'static, + { + match self.inner { + Mode::Tokio(ref executor) => executor.spawn(timeout(f, duration, on_timeout)), + Mode::Sync => { + let _ = timeout(f, duration, on_timeout).wait(); + } + Mode::ThreadPerFuture => { + thread::spawn(move || { + let _ = timeout(f, duration, on_timeout).wait(); + }); + } + } + } } impl + Send + 'static> future::Executor for Executor { - fn execute(&self, future: F) -> Result<(), future::ExecuteError> { - match self.inner { - Mode::Tokio(ref executor) => executor.execute(future), - Mode::Sync => { - let _= future.wait(); - Ok(()) - }, - Mode::ThreadPerFuture => { - thread::spawn(move || { - let _= future.wait(); - }); - Ok(()) - }, - } - } + fn execute(&self, future: F) -> Result<(), future::ExecuteError> { + match self.inner { + Mode::Tokio(ref executor) => executor.execute(future), + Mode::Sync => { + let _ = future.wait(); + Ok(()) + } + Mode::ThreadPerFuture => { + thread::spawn(move || { + let _ = future.wait(); + }); + Ok(()) + } + } + } } /// A handle to a runtime. Dropping the handle will cause runtime to shutdown. pub struct RuntimeHandle { - close: Option>, - handle: Option> + close: Option>, + handle: Option>, } impl From for RuntimeHandle { - fn from(el: Runtime) -> Self { - el.handle - } + fn from(el: Runtime) -> Self { + el.handle + } } impl Drop for RuntimeHandle { - fn drop(&mut self) { - self.close.take().map(|v| v.send(())); - } + fn drop(&mut self) { + self.close.take().map(|v| v.send(())); + } } impl RuntimeHandle { - /// Blocks current thread and waits until the runtime is finished. - pub fn wait(mut self) -> thread::Result<()> { - self.handle.take() - .expect("Handle is taken only in `wait`, `wait` is consuming; qed").join() - } + /// Blocks current thread and waits until the runtime is finished. + pub fn wait(mut self) -> thread::Result<()> { + self.handle + .take() + .expect("Handle is taken only in `wait`, `wait` is consuming; qed") + .join() + } - /// Finishes this runtime. - pub fn close(mut self) { - let _ = self.close.take() - .expect("Close is taken only in `close` and `drop`. `close` is consuming; qed") - .send(()); - } + /// Finishes this runtime. + pub fn close(mut self) { + let _ = self + .close + .take() + .expect("Close is taken only in `close` and `drop`. `close` is consuming; qed") + .send(()); + } } diff --git a/util/stats/src/lib.rs b/util/stats/src/lib.rs index e1ceb4fe8..b3c547b51 100644 --- a/util/stats/src/lib.rs +++ b/util/stats/src/lib.rs @@ -16,8 +16,10 @@ //! Statistical functions and helpers. -use std::iter::FromIterator; -use std::ops::{Add, Sub, Deref, Div}; +use std::{ + iter::FromIterator, + ops::{Add, Deref, Div, Sub}, +}; #[macro_use] extern crate log; @@ -27,158 +29,200 @@ extern crate log; pub struct Corpus(Vec); impl From> for Corpus { - fn from(mut data: Vec) -> Self { - data.sort(); - Corpus(data) - } + fn from(mut data: Vec) -> Self { + data.sort(); + Corpus(data) + } } impl FromIterator for Corpus { - fn from_iter>(iterable: I) -> Self { - iterable.into_iter().collect::>().into() - } + fn from_iter>(iterable: I) -> Self { + iterable.into_iter().collect::>().into() + } } impl Deref for Corpus { - type Target = [T]; + type Target = [T]; - fn deref(&self) -> &[T] { &self.0[..] } + fn deref(&self) -> &[T] { + &self.0[..] + } } impl Corpus { - /// Get given percentile (approximated). - pub fn percentile(&self, val: usize) -> Option<&T> { - let len = self.0.len(); - let x = val * len / 100; - let x = ::std::cmp::min(x, len); - if x == 0 { - return None; - } + /// Get given percentile (approximated). + pub fn percentile(&self, val: usize) -> Option<&T> { + let len = self.0.len(); + let x = val * len / 100; + let x = ::std::cmp::min(x, len); + if x == 0 { + return None; + } - self.0.get(x - 1) - } + self.0.get(x - 1) + } - /// Get the median element, if it exists. - pub fn median(&self) -> Option<&T> { - self.0.get(self.0.len() / 2) - } + /// Get the median element, if it exists. + pub fn median(&self) -> Option<&T> { + self.0.get(self.0.len() / 2) + } - /// Whether the corpus is empty. - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } + /// Whether the corpus is empty. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } - /// Number of elements in the corpus. - pub fn len(&self) -> usize { - self.0.len() - } + /// Number of elements in the corpus. + pub fn len(&self) -> usize { + self.0.len() + } } impl Corpus - where T: Add + Sub + Div + From +where + T: Add + Sub + Div + From, { - /// Create a histogram of this corpus if it at least spans the buckets. Bounds are left closed. - /// Excludes outliers. - pub fn histogram(&self, bucket_number: usize) -> Option> { - // TODO: get outliers properly. - let upto = self.len() - self.len() / 40; - Histogram::create(&self.0[..upto], bucket_number) - } + /// Create a histogram of this corpus if it at least spans the buckets. Bounds are left closed. + /// Excludes outliers. + pub fn histogram(&self, bucket_number: usize) -> Option> { + // TODO: get outliers properly. + let upto = self.len() - self.len() / 40; + Histogram::create(&self.0[..upto], bucket_number) + } } /// Discretised histogram. #[derive(Debug, PartialEq)] pub struct Histogram { - /// Bounds of each bucket. - pub bucket_bounds: Vec, - /// Count within each bucket. - pub counts: Vec, + /// Bounds of each bucket. + pub bucket_bounds: Vec, + /// Count within each bucket. + pub counts: Vec, } impl Histogram - where T: Add + Sub + Div + From +where + T: Add + Sub + Div + From, { - // Histogram of a sorted corpus if it at least spans the buckets. Bounds are left closed. - fn create(corpus: &[T], bucket_number: usize) -> Option> { - if corpus.len() < 1 { return None; } - let corpus_end = corpus.last().expect("there is at least 1 element; qed").clone(); - let corpus_start = corpus.first().expect("there is at least 1 element; qed").clone(); - trace!(target: "stats", "Computing histogram from {} to {} with {} buckets.", corpus_start, corpus_end, bucket_number); - // Bucket needs to be at least 1 wide. - let bucket_size = { - // Round up to get the entire corpus included. - let raw_bucket_size = (corpus_end - corpus_start + bucket_number.into()) / bucket_number.into(); - if raw_bucket_size == 0.into() { 1.into() } else { raw_bucket_size } - }; - let mut bucket_end = corpus_start + bucket_size; + // Histogram of a sorted corpus if it at least spans the buckets. Bounds are left closed. + fn create(corpus: &[T], bucket_number: usize) -> Option> { + if corpus.len() < 1 { + return None; + } + let corpus_end = corpus + .last() + .expect("there is at least 1 element; qed") + .clone(); + let corpus_start = corpus + .first() + .expect("there is at least 1 element; qed") + .clone(); + trace!(target: "stats", "Computing histogram from {} to {} with {} buckets.", corpus_start, corpus_end, bucket_number); + // Bucket needs to be at least 1 wide. + let bucket_size = { + // Round up to get the entire corpus included. + let raw_bucket_size = + (corpus_end - corpus_start + bucket_number.into()) / bucket_number.into(); + if raw_bucket_size == 0.into() { + 1.into() + } else { + raw_bucket_size + } + }; + let mut bucket_end = corpus_start + bucket_size; - let mut bucket_bounds = vec![corpus_start; bucket_number + 1]; - let mut counts = vec![0; bucket_number]; - let mut corpus_i = 0; - // Go through the corpus adding to buckets. - for bucket in 0..bucket_number { - while corpus.get(corpus_i).map_or(false, |v| v < &bucket_end) { - // Initialized to size bucket_number above; iterates up to bucket_number; qed - counts[bucket] += 1; - corpus_i += 1; - } - // Initialized to size bucket_number + 1 above; iterates up to bucket_number; subscript is in range; qed - bucket_bounds[bucket + 1] = bucket_end; - bucket_end = bucket_end + bucket_size; - } - Some(Histogram { bucket_bounds: bucket_bounds, counts: counts }) - } + let mut bucket_bounds = vec![corpus_start; bucket_number + 1]; + let mut counts = vec![0; bucket_number]; + let mut corpus_i = 0; + // Go through the corpus adding to buckets. + for bucket in 0..bucket_number { + while corpus.get(corpus_i).map_or(false, |v| v < &bucket_end) { + // Initialized to size bucket_number above; iterates up to bucket_number; qed + counts[bucket] += 1; + corpus_i += 1; + } + // Initialized to size bucket_number + 1 above; iterates up to bucket_number; subscript is in range; qed + bucket_bounds[bucket + 1] = bucket_end; + bucket_end = bucket_end + bucket_size; + } + Some(Histogram { + bucket_bounds: bucket_bounds, + counts: counts, + }) + } } #[cfg(test)] mod tests { - use super::*; + use super::*; - #[test] - fn check_corpus() { - let corpus = Corpus::from(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); - assert_eq!(corpus.percentile(0), None); - assert_eq!(corpus.percentile(1), None); - assert_eq!(corpus.percentile(101), Some(&10)); - assert_eq!(corpus.percentile(100), Some(&10)); - assert_eq!(corpus.percentile(50), Some(&5)); - assert_eq!(corpus.percentile(60), Some(&6)); - assert_eq!(corpus.median(), Some(&6)); - } + #[test] + fn check_corpus() { + let corpus = Corpus::from(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); + assert_eq!(corpus.percentile(0), None); + assert_eq!(corpus.percentile(1), None); + assert_eq!(corpus.percentile(101), Some(&10)); + assert_eq!(corpus.percentile(100), Some(&10)); + assert_eq!(corpus.percentile(50), Some(&5)); + assert_eq!(corpus.percentile(60), Some(&6)); + assert_eq!(corpus.median(), Some(&6)); + } - #[test] - fn check_histogram() { - let hist = Histogram::create(&[643,689,1408,2000,2296,2512,4250,4320,4842,4958,5804,6065,6098,6354,7002,7145,7845,8589,8593,8895], 5).unwrap(); - let correct_bounds: Vec = vec![643, 2294, 3945, 5596, 7247, 8898]; - assert_eq!(Histogram { bucket_bounds: correct_bounds, counts: vec![4,2,4,6,4] }, hist); - } + #[test] + fn check_histogram() { + let hist = Histogram::create( + &[ + 643, 689, 1408, 2000, 2296, 2512, 4250, 4320, 4842, 4958, 5804, 6065, 6098, 6354, + 7002, 7145, 7845, 8589, 8593, 8895, + ], + 5, + ) + .unwrap(); + let correct_bounds: Vec = vec![643, 2294, 3945, 5596, 7247, 8898]; + assert_eq!( + Histogram { + bucket_bounds: correct_bounds, + counts: vec![4, 2, 4, 6, 4] + }, + hist + ); + } - #[test] - fn smaller_data_range_than_bucket_range() { - assert_eq!( - Histogram::create(&[1, 2, 2], 3), - Some(Histogram { bucket_bounds: vec![1, 2, 3, 4], counts: vec![1, 2, 0] }) - ); - } + #[test] + fn smaller_data_range_than_bucket_range() { + assert_eq!( + Histogram::create(&[1, 2, 2], 3), + Some(Histogram { + bucket_bounds: vec![1, 2, 3, 4], + counts: vec![1, 2, 0] + }) + ); + } - #[test] - fn data_range_is_not_multiple_of_bucket_range() { - assert_eq!( - Histogram::create(&[1, 2, 5], 2), - Some(Histogram { bucket_bounds: vec![1, 4, 7], counts: vec![2, 1] }) - ); - } + #[test] + fn data_range_is_not_multiple_of_bucket_range() { + assert_eq!( + Histogram::create(&[1, 2, 5], 2), + Some(Histogram { + bucket_bounds: vec![1, 4, 7], + counts: vec![2, 1] + }) + ); + } - #[test] - fn data_range_is_multiple_of_bucket_range() { - assert_eq!( - Histogram::create(&[1, 2, 6], 2), - Some(Histogram { bucket_bounds: vec![1, 4, 7], counts: vec![2, 1] }) - ); - } + #[test] + fn data_range_is_multiple_of_bucket_range() { + assert_eq!( + Histogram::create(&[1, 2, 6], 2), + Some(Histogram { + bucket_bounds: vec![1, 4, 7], + counts: vec![2, 1] + }) + ); + } - #[test] - fn none_when_too_few_data() { - assert!(Histogram::::create(&[], 1).is_none()); - } + #[test] + fn none_when_too_few_data() { + assert!(Histogram::::create(&[], 1).is_none()); + } } diff --git a/util/time-utils/src/lib.rs b/util/time-utils/src/lib.rs index 0bfc3bb98..927ebc3cb 100644 --- a/util/time-utils/src/lib.rs +++ b/util/time-utils/src/lib.rs @@ -18,49 +18,63 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; /// Temporary trait for `checked operations` on SystemTime until these are available in the standard library pub trait CheckedSystemTime { - /// Returns `Some` when the result less or equal to `i32::max_value` to prevent `SystemTime` to panic because - /// it is platform specific, possible representations are i32, i64, u64 or Duration. `None` otherwise - fn checked_add(self, _d: Duration) -> Option; - /// Returns `Some` when the result is successful and `None` when it is not - fn checked_sub(self, _d: Duration) -> Option; + /// Returns `Some` when the result less or equal to `i32::max_value` to prevent `SystemTime` to panic because + /// it is platform specific, possible representations are i32, i64, u64 or Duration. `None` otherwise + fn checked_add(self, _d: Duration) -> Option; + /// Returns `Some` when the result is successful and `None` when it is not + fn checked_sub(self, _d: Duration) -> Option; } impl CheckedSystemTime for SystemTime { - fn checked_add(self, dur: Duration) -> Option { - let this_dur = self.duration_since(UNIX_EPOCH).ok()?; - let total_time = this_dur.checked_add(dur)?; + fn checked_add(self, dur: Duration) -> Option { + let this_dur = self.duration_since(UNIX_EPOCH).ok()?; + let total_time = this_dur.checked_add(dur)?; - if total_time.as_secs() <= i32::max_value() as u64 { - Some(self + dur) - } else { - None - } - } + if total_time.as_secs() <= i32::max_value() as u64 { + Some(self + dur) + } else { + None + } + } - fn checked_sub(self, dur: Duration) -> Option { - let this_dur = self.duration_since(UNIX_EPOCH).ok()?; - let total_time = this_dur.checked_sub(dur)?; + fn checked_sub(self, dur: Duration) -> Option { + let this_dur = self.duration_since(UNIX_EPOCH).ok()?; + let total_time = this_dur.checked_sub(dur)?; - if total_time.as_secs() <= i32::max_value() as u64 { - Some(self - dur) - } else { - None - } - } + if total_time.as_secs() <= i32::max_value() as u64 { + Some(self - dur) + } else { + None + } + } } #[cfg(test)] mod tests { #[test] fn it_works() { - use super::CheckedSystemTime; - use std::time::{Duration, SystemTime, UNIX_EPOCH}; + use super::CheckedSystemTime; + use std::time::{Duration, SystemTime, UNIX_EPOCH}; - assert!(CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::new(i32::max_value() as u64 + 1, 0)).is_none()); - assert!(CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::new(i32::max_value() as u64, 0)).is_some()); - assert!(CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::new(i32::max_value() as u64 - 1, 1_000_000_000)).is_some()); + assert!(CheckedSystemTime::checked_add( + UNIX_EPOCH, + Duration::new(i32::max_value() as u64 + 1, 0) + ) + .is_none()); + assert!(CheckedSystemTime::checked_add( + UNIX_EPOCH, + Duration::new(i32::max_value() as u64, 0) + ) + .is_some()); + assert!(CheckedSystemTime::checked_add( + UNIX_EPOCH, + Duration::new(i32::max_value() as u64 - 1, 1_000_000_000) + ) + .is_some()); - assert!(CheckedSystemTime::checked_sub(UNIX_EPOCH, Duration::from_secs(120)).is_none()); - assert!(CheckedSystemTime::checked_sub(SystemTime::now(), Duration::from_secs(1000)).is_some()); - } + assert!(CheckedSystemTime::checked_sub(UNIX_EPOCH, Duration::from_secs(120)).is_none()); + assert!( + CheckedSystemTime::checked_sub(SystemTime::now(), Duration::from_secs(1000)).is_some() + ); + } } diff --git a/util/triehash-ethereum/src/lib.rs b/util/triehash-ethereum/src/lib.rs index 696ed61ab..6bbf40e7b 100644 --- a/util/triehash-ethereum/src/lib.rs +++ b/util/triehash-ethereum/src/lib.rs @@ -54,34 +54,41 @@ where #[cfg(test)] mod tests { - use super::{trie_root, sec_trie_root, ordered_trie_root}; + use super::{ordered_trie_root, sec_trie_root, trie_root}; + use keccak_hasher::KeccakHasher; use triehash; - use keccak_hasher::KeccakHasher; - #[test] - fn simple_test() { - assert_eq!(trie_root(vec![ - (b"A", b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" as &[u8]) - ]), "d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab".into()); - } + #[test] + fn simple_test() { + assert_eq!( + trie_root(vec![( + b"A", + b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" as &[u8] + )]), + "d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab".into() + ); + } - #[test] - fn proxy_works() { - let input = vec![(b"A", b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" as &[u8])]; - assert_eq!( + #[test] + fn proxy_works() { + let input = vec![( + b"A", + b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" as &[u8], + )]; + assert_eq!( trie_root(input.clone()), triehash::trie_root::(input.clone()) ); - assert_eq!( + assert_eq!( sec_trie_root(input.clone()), triehash::sec_trie_root::(input.clone()) ); let data = &["cake", "pie", "candy"]; - assert_eq!( + assert_eq!( ordered_trie_root(data), triehash::ordered_trie_root::(data) ); - } + } } diff --git a/util/unexpected/src/lib.rs b/util/unexpected/src/lib.rs index 9a1a709be..dd93bd1eb 100644 --- a/util/unexpected/src/lib.rs +++ b/util/unexpected/src/lib.rs @@ -21,50 +21,54 @@ use std::fmt; #[derive(Debug, PartialEq, Eq, Clone, Copy)] /// Error indicating an expected value was not found. pub struct Mismatch { - /// Value expected. - pub expected: T, - /// Value found. - pub found: T, + /// Value expected. + pub expected: T, + /// Value found. + pub found: T, } impl fmt::Display for Mismatch { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_fmt(format_args!("Expected {}, found {}", self.expected, self.found)) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_fmt(format_args!( + "Expected {}, found {}", + self.expected, self.found + )) + } } #[derive(Debug, PartialEq, Eq, Clone, Copy)] /// Error indicating value found is outside of a valid range. pub struct OutOfBounds { - /// Minimum allowed value. - pub min: Option, - /// Maximum allowed value. - pub max: Option, - /// Value found. - pub found: T, + /// Minimum allowed value. + pub min: Option, + /// Maximum allowed value. + pub max: Option, + /// Value found. + pub found: T, } impl OutOfBounds { - pub fn map(self, map: F) -> OutOfBounds - where F: Fn(T) -> U - { - OutOfBounds { - min: self.min.map(&map), - max: self.max.map(&map), - found: map(self.found), - } - } + pub fn map(self, map: F) -> OutOfBounds + where + F: Fn(T) -> U, + { + OutOfBounds { + min: self.min.map(&map), + max: self.max.map(&map), + found: map(self.found), + } + } } impl fmt::Display for OutOfBounds { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let msg = match (self.min.as_ref(), self.max.as_ref()) { - (Some(min), Some(max)) => format!("Min={}, Max={}", min, max), - (Some(min), _) => format!("Min={}", min), - (_, Some(max)) => format!("Max={}", max), - (None, None) => "".into(), - }; + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let msg = match (self.min.as_ref(), self.max.as_ref()) { + (Some(min), Some(max)) => format!("Min={}, Max={}", min, max), + (Some(min), _) => format!("Min={}", min), + (_, Some(max)) => format!("Max={}", max), + (None, None) => "".into(), + }; - f.write_fmt(format_args!("Value {} out of bounds. {}", self.found, msg)) - } + f.write_fmt(format_args!("Value {} out of bounds. {}", self.found, msg)) + } } diff --git a/util/version/build.rs b/util/version/build.rs index 3ee37fbca..1b7d103d1 100644 --- a/util/version/build.rs +++ b/util/version/build.rs @@ -18,23 +18,25 @@ extern crate rustc_version; extern crate toml; extern crate vergen; -use std::env; -use std::fs::File; -use std::io::Write; -use std::path::Path; +use std::{env, fs::File, io::Write, path::Path}; use vergen::{vergen, OutputFns}; const ERROR_MSG: &'static str = "Failed to generate metadata files"; fn main() { - vergen(OutputFns::all()).expect(ERROR_MSG); + vergen(OutputFns::all()).expect(ERROR_MSG); - let version = rustc_version::version().expect(ERROR_MSG); + let version = rustc_version::version().expect(ERROR_MSG); - let cargo: toml::Value = toml::from_str(include_str!("./Cargo.toml")).expect(ERROR_MSG); - let track = cargo["package"]["metadata"]["track"].as_str().expect("'track' has to be a string!"); + let cargo: toml::Value = toml::from_str(include_str!("./Cargo.toml")).expect(ERROR_MSG); + let track = cargo["package"]["metadata"]["track"] + .as_str() + .expect("'track' has to be a string!"); - create_file("meta.rs", format!(" + create_file( + "meta.rs", + format!( + " /// This versions track. #[allow(unused)] pub const TRACK: &str = {track:?}; @@ -44,14 +46,15 @@ fn main() { \"{version}\" }} ", - track = track, - version = version, - )); + track = track, + version = version, + ), + ); } fn create_file(filename: &str, data: String) { - let out_dir = env::var("OUT_DIR").expect(ERROR_MSG); - let dest_path = Path::new(&out_dir).join(filename); - let mut f = File::create(&dest_path).expect(ERROR_MSG); - f.write_all(data.as_bytes()).expect(ERROR_MSG); + let out_dir = env::var("OUT_DIR").expect(ERROR_MSG); + let dest_path = Path::new(&out_dir).join(filename); + let mut f = File::create(&dest_path).expect(ERROR_MSG); + f.write_all(data.as_bytes()).expect(ERROR_MSG); } diff --git a/util/version/src/lib.rs b/util/version/src/lib.rs index 00f9f4550..c2dc6908c 100644 --- a/util/version/src/lib.rs +++ b/util/version/src/lib.rs @@ -16,21 +16,21 @@ //! Parity version specific information. -extern crate target_info; extern crate parity_bytes as bytes; extern crate rlp; +extern crate target_info; -use target_info::Target; use bytes::Bytes; use rlp::RlpStream; +use target_info::Target; mod vergen { - #![allow(unused)] - include!(concat!(env!("OUT_DIR"), "/version.rs")); + #![allow(unused)] + include!(concat!(env!("OUT_DIR"), "/version.rs")); } mod generated { - include!(concat!(env!("OUT_DIR"), "/meta.rs")); + include!(concat!(env!("OUT_DIR"), "/meta.rs")); } #[cfg(feature = "final")] @@ -43,35 +43,52 @@ const THIS_TRACK: &'static str = "unstable"; /// Get the platform identifier. pub fn platform() -> String { - let env = Target::env(); - let env_dash = if env.is_empty() { "" } else { "-" }; - format!("{}-{}{}{}", Target::arch(), Target::os(), env_dash, env) + let env = Target::env(); + let env_dash = if env.is_empty() { "" } else { "-" }; + format!("{}-{}{}{}", Target::arch(), Target::os(), env_dash, env) } /// Get the standard version string for this software. pub fn version() -> String { - let sha3 = vergen::short_sha(); - let sha3_dash = if sha3.is_empty() { "" } else { "-" }; - let commit_date = vergen::commit_date().replace("-", ""); - let date_dash = if commit_date.is_empty() { "" } else { "-" }; - format!("Parity-Ethereum/v{}-{}{}{}{}{}/{}/rustc{}", env!("CARGO_PKG_VERSION"), THIS_TRACK, sha3_dash, sha3, date_dash, commit_date, platform(), generated::rustc_version()) + let sha3 = vergen::short_sha(); + let sha3_dash = if sha3.is_empty() { "" } else { "-" }; + let commit_date = vergen::commit_date().replace("-", ""); + let date_dash = if commit_date.is_empty() { "" } else { "-" }; + format!( + "Parity-Ethereum/v{}-{}{}{}{}{}/{}/rustc{}", + env!("CARGO_PKG_VERSION"), + THIS_TRACK, + sha3_dash, + sha3, + date_dash, + commit_date, + platform(), + generated::rustc_version() + ) } /// Get the standard version data for this software. pub fn version_data() -> Bytes { - let mut s = RlpStream::new_list(4); - let v = - (env!("CARGO_PKG_VERSION_MAJOR").parse::().expect("Environment variables are known to be valid; qed") << 16) + - (env!("CARGO_PKG_VERSION_MINOR").parse::().expect("Environment variables are known to be valid; qed") << 8) + - env!("CARGO_PKG_VERSION_PATCH").parse::().expect("Environment variables are known to be valid; qed"); - s.append(&v); - s.append(&"Parity-Ethereum"); - s.append(&generated::rustc_version()); - s.append(&&Target::os()[0..2]); - s.out() + let mut s = RlpStream::new_list(4); + let v = (env!("CARGO_PKG_VERSION_MAJOR") + .parse::() + .expect("Environment variables are known to be valid; qed") + << 16) + + (env!("CARGO_PKG_VERSION_MINOR") + .parse::() + .expect("Environment variables are known to be valid; qed") + << 8) + + env!("CARGO_PKG_VERSION_PATCH") + .parse::() + .expect("Environment variables are known to be valid; qed"); + s.append(&v); + s.append(&"Parity-Ethereum"); + s.append(&generated::rustc_version()); + s.append(&&Target::os()[0..2]); + s.out() } /// Provide raw information on the package. pub fn raw_package_info() -> (&'static str, &'static str, &'static str) { - (THIS_TRACK, env!["CARGO_PKG_VERSION"], vergen::sha()) + (THIS_TRACK, env!["CARGO_PKG_VERSION"], vergen::sha()) } diff --git a/whisper/cli/src/main.rs b/whisper/cli/src/main.rs index 41d9e80e4..b2b837ddc 100644 --- a/whisper/cli/src/main.rs +++ b/whisper/cli/src/main.rs @@ -30,10 +30,10 @@ extern crate panic_hook; extern crate parity_whisper as whisper; extern crate serde; -extern crate jsonrpc_core; -extern crate jsonrpc_pubsub; -extern crate jsonrpc_http_server; extern crate ethkey; +extern crate jsonrpc_core; +extern crate jsonrpc_http_server; +extern crate jsonrpc_pubsub; extern crate rustc_hex; #[macro_use] @@ -43,14 +43,18 @@ extern crate log as rlog; extern crate serde_derive; use docopt::Docopt; -use std::{fmt, io, process, env, sync::Arc}; -use jsonrpc_core::{Metadata, MetaIoHandler}; -use jsonrpc_pubsub::{PubSubMetadata, Session}; -use jsonrpc_http_server::{AccessControlAllowOrigin, DomainsValidation}; -use std::net::{SocketAddr, SocketAddrV4, Ipv4Addr}; -use std::str::FromStr; use ethkey::Secret; +use jsonrpc_core::{MetaIoHandler, Metadata}; +use jsonrpc_http_server::{AccessControlAllowOrigin, DomainsValidation}; +use jsonrpc_pubsub::{PubSubMetadata, Session}; use rustc_hex::FromHex; +use std::{ + env, fmt, io, + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + process, + str::FromStr, + sync::Arc, +}; const POOL_UNIT: usize = 1024 * 1024; const USAGE: &'static str = r#" @@ -78,266 +82,292 @@ struct Meta; impl Metadata for Meta {} impl PubSubMetadata for Meta { - fn session(&self) -> Option> { - None - } + fn session(&self) -> Option> { + None + } } #[derive(Debug, Deserialize)] struct Args { - flag_whisper_pool_size: usize, - flag_port: String, - flag_address: String, - flag_rpc_port: String, - flag_rpc_address: String, - flag_log: String, - flag_secret: String, + flag_whisper_pool_size: usize, + flag_port: String, + flag_address: String, + flag_rpc_port: String, + flag_rpc_address: String, + flag_log: String, + flag_secret: String, } struct WhisperPoolHandle { - /// Pool handle. - handle: Arc>>, - /// Network manager. - net: Arc, + /// Pool handle. + handle: Arc>>, + /// Network manager. + net: Arc, } impl whisper::rpc::PoolHandle for WhisperPoolHandle { - fn relay(&self, message: whisper::message::Message) -> bool { - let mut res = false; - let mut message = Some(message); - self.with_proto_context(whisper::net::PROTOCOL_ID, &mut |ctx| { - if let Some(message) = message.take() { - res = self.handle.post_message(message, ctx); - } - }); - res - } + fn relay(&self, message: whisper::message::Message) -> bool { + let mut res = false; + let mut message = Some(message); + self.with_proto_context(whisper::net::PROTOCOL_ID, &mut |ctx| { + if let Some(message) = message.take() { + res = self.handle.post_message(message, ctx); + } + }); + res + } - fn pool_status(&self) -> whisper::net::PoolStatus { - self.handle.pool_status() - } + fn pool_status(&self) -> whisper::net::PoolStatus { + self.handle.pool_status() + } } impl WhisperPoolHandle { - fn with_proto_context(&self, proto: net::ProtocolId, f: &mut FnMut(&net::NetworkContext)) { - self.net.with_context_eval(proto, f); - } + fn with_proto_context(&self, proto: net::ProtocolId, f: &mut FnMut(&net::NetworkContext)) { + self.net.with_context_eval(proto, f); + } } struct RpcFactory { - handle: Arc>>, - manager: Arc, + handle: Arc>>, + manager: Arc, } impl RpcFactory { - fn make_handler(&self, net: Arc) -> whisper::rpc::WhisperClient { - let whisper_pool_handle = WhisperPoolHandle { handle: self.handle.clone(), net: net }; - whisper::rpc::WhisperClient::new(whisper_pool_handle, self.manager.clone()) - } + fn make_handler( + &self, + net: Arc, + ) -> whisper::rpc::WhisperClient { + let whisper_pool_handle = WhisperPoolHandle { + handle: self.handle.clone(), + net: net, + }; + whisper::rpc::WhisperClient::new(whisper_pool_handle, self.manager.clone()) + } } #[derive(Debug)] enum Error { - Docopt(docopt::Error), - Io(io::Error), - JsonRpc(jsonrpc_core::Error), - Network(net::Error), - SockAddr(std::net::AddrParseError), - FromHex(rustc_hex::FromHexError), - ParseInt(std::num::ParseIntError), + Docopt(docopt::Error), + Io(io::Error), + JsonRpc(jsonrpc_core::Error), + Network(net::Error), + SockAddr(std::net::AddrParseError), + FromHex(rustc_hex::FromHexError), + ParseInt(std::num::ParseIntError), } impl From for Error { - fn from(err: std::net::AddrParseError) -> Self { - Error::SockAddr(err) - } + fn from(err: std::net::AddrParseError) -> Self { + Error::SockAddr(err) + } } impl From for Error { - fn from(err: net::Error) -> Self { - Error::Network(err) - } + fn from(err: net::Error) -> Self { + Error::Network(err) + } } impl From for Error { - fn from(err: docopt::Error) -> Self { - Error::Docopt(err) - } + fn from(err: docopt::Error) -> Self { + Error::Docopt(err) + } } impl From for Error { - fn from(err: io::Error) -> Self { - Error::Io(err) - } + fn from(err: io::Error) -> Self { + Error::Io(err) + } } impl From for Error { - fn from(err: jsonrpc_core::Error) -> Self { - Error::JsonRpc(err) - } + fn from(err: jsonrpc_core::Error) -> Self { + Error::JsonRpc(err) + } } impl From for Error { - fn from(err: rustc_hex::FromHexError) -> Self { - Error::FromHex(err) - } + fn from(err: rustc_hex::FromHexError) -> Self { + Error::FromHex(err) + } } impl From for Error { - fn from(err: std::num::ParseIntError) -> Self { - Error::ParseInt(err) - } + fn from(err: std::num::ParseIntError) -> Self { + Error::ParseInt(err) + } } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - match *self { - Error::SockAddr(ref e) => write!(f, "{}", e), - Error::Docopt(ref e) => write!(f, "{}", e), - Error::Io(ref e) => write!(f, "{}", e), - Error::JsonRpc(ref e) => write!(f, "{:?}", e), - Error::Network(ref e) => write!(f, "{}", e), - Error::ParseInt(ref e) => write!(f, "Invalid port: {}", e), - Error::FromHex(ref e) => write!(f, "Error deciphering key: {}", e), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + match *self { + Error::SockAddr(ref e) => write!(f, "{}", e), + Error::Docopt(ref e) => write!(f, "{}", e), + Error::Io(ref e) => write!(f, "{}", e), + Error::JsonRpc(ref e) => write!(f, "{:?}", e), + Error::Network(ref e) => write!(f, "{}", e), + Error::ParseInt(ref e) => write!(f, "Invalid port: {}", e), + Error::FromHex(ref e) => write!(f, "Error deciphering key: {}", e), + } + } } fn main() { - panic_hook::set_abort(); + panic_hook::set_abort(); - match execute(env::args()) { - Ok(_) => { - println!("whisper-cli terminated"); - process::exit(1); - }, - Err(Error::Docopt(ref e)) => e.exit(), - Err(err) => { - println!("{}", err); - process::exit(1); - } - } + match execute(env::args()) { + Ok(_) => { + println!("whisper-cli terminated"); + process::exit(1); + } + Err(Error::Docopt(ref e)) => e.exit(), + Err(err) => { + println!("{}", err); + process::exit(1); + } + } } -fn execute(command: I) -> Result<(), Error> where I: IntoIterator, S: AsRef { +fn execute(command: I) -> Result<(), Error> +where + I: IntoIterator, + S: AsRef, +{ + // Parse arguments + let args: Args = Docopt::new(USAGE).and_then(|d| d.argv(command).deserialize())?; + let pool_size = args.flag_whisper_pool_size * POOL_UNIT; + let rpc_url = format!("{}:{}", args.flag_rpc_address, args.flag_rpc_port); - // Parse arguments - let args: Args = Docopt::new(USAGE).and_then(|d| d.argv(command).deserialize())?; - let pool_size = args.flag_whisper_pool_size * POOL_UNIT; - let rpc_url = format!("{}:{}", args.flag_rpc_address, args.flag_rpc_port); + initialize_logger(args.flag_log); + info!(target: "whisper-cli", "start"); - initialize_logger(args.flag_log); - info!(target: "whisper-cli", "start"); + // Filter manager that will dispatch `decryption tasks` + let manager = Arc::new(whisper::rpc::FilterManager::new()?); - // Filter manager that will dispatch `decryption tasks` - let manager = Arc::new(whisper::rpc::FilterManager::new()?); + // Whisper protocol network handler + let whisper_network_handler = Arc::new(whisper::net::Network::new(pool_size, manager.clone())); - // Whisper protocol network handler - let whisper_network_handler = Arc::new(whisper::net::Network::new(pool_size, manager.clone())); + let network_config = { + let mut cfg = net::NetworkConfiguration::new(); + let port = match args.flag_port.as_str() { + "random" => 0 as u16, + port => port.parse::()?, + }; + let addr = Ipv4Addr::from_str(&args.flag_address[..])?; + cfg.listen_address = Some(SocketAddr::V4(SocketAddrV4::new(addr, port))); + cfg.use_secret = match args.flag_secret.as_str() { + "" => None, + fname => { + let key_text = std::fs::read_to_string(fname)?; + let key: Vec = FromHex::from_hex(key_text.as_str())?; + Secret::from_slice(key.as_slice()) + } + }; + cfg.nat_enabled = false; + cfg + }; - let network_config = { - let mut cfg = net::NetworkConfiguration::new(); - let port = match args.flag_port.as_str() { - "random" => 0 as u16, - port => port.parse::()?, + // Create network service + let network = devp2p::NetworkService::new(network_config, None)?; - }; - let addr = Ipv4Addr::from_str(&args.flag_address[..])?; - cfg.listen_address = Some(SocketAddr::V4(SocketAddrV4::new(addr, port))); - cfg.use_secret = match args.flag_secret.as_str() { - "" => None, - fname => { - let key_text = std::fs::read_to_string(fname)?; - let key : Vec = FromHex::from_hex(key_text.as_str())?; - Secret::from_slice(key.as_slice()) - } - }; - cfg.nat_enabled = false; - cfg - }; + // Start network service + network.start().map_err(|(err, _)| err)?; - // Create network service - let network = devp2p::NetworkService::new(network_config, None)?; + // Attach whisper protocol to the network service + network.register_protocol( + whisper_network_handler.clone(), + whisper::net::PROTOCOL_ID, + whisper::net::SUPPORTED_VERSIONS, + )?; + network.register_protocol( + Arc::new(whisper::net::ParityExtensions), + whisper::net::PARITY_PROTOCOL_ID, + whisper::net::SUPPORTED_VERSIONS, + )?; - // Start network service - network.start().map_err(|(err, _)| err)?; + // Request handler + let mut io = MetaIoHandler::default(); - // Attach whisper protocol to the network service - network.register_protocol(whisper_network_handler.clone(), whisper::net::PROTOCOL_ID, - whisper::net::SUPPORTED_VERSIONS)?; - network.register_protocol(Arc::new(whisper::net::ParityExtensions), whisper::net::PARITY_PROTOCOL_ID, - whisper::net::SUPPORTED_VERSIONS)?; + // Shared network service + let shared_network = Arc::new(network); - // Request handler - let mut io = MetaIoHandler::default(); + // Pool handler + let whisper_factory = RpcFactory { + handle: whisper_network_handler, + manager: manager, + }; - // Shared network service - let shared_network = Arc::new(network); + io.extend_with(whisper::rpc::Whisper::to_delegate( + whisper_factory.make_handler(shared_network.clone()), + )); + io.extend_with(whisper::rpc::WhisperPubSub::to_delegate( + whisper_factory.make_handler(shared_network.clone()), + )); - // Pool handler - let whisper_factory = RpcFactory { handle: whisper_network_handler, manager: manager }; + let server = jsonrpc_http_server::ServerBuilder::new(io) + .cors(DomainsValidation::AllowOnly(vec![ + AccessControlAllowOrigin::Null, + ])) + .start_http(&rpc_url.parse()?)?; - io.extend_with(whisper::rpc::Whisper::to_delegate(whisper_factory.make_handler(shared_network.clone()))); - io.extend_with(whisper::rpc::WhisperPubSub::to_delegate(whisper_factory.make_handler(shared_network.clone()))); + server.wait(); - let server = jsonrpc_http_server::ServerBuilder::new(io) - .cors(DomainsValidation::AllowOnly(vec![AccessControlAllowOrigin::Null])) - .start_http(&rpc_url.parse()?)?; - - server.wait(); - - // This will never return if the http server runs without errors - Ok(()) + // This will never return if the http server runs without errors + Ok(()) } fn initialize_logger(log_level: String) { - env_logger::Builder::from_env(env_logger::Env::default()) - .parse(&log_level) - .init(); + env_logger::Builder::from_env(env_logger::Env::default()) + .parse(&log_level) + .init(); } #[cfg(test)] mod tests { - use super::execute; + use super::execute; - #[test] - fn invalid_argument() { - let command = vec!["whisper", "--foo=12"] - .into_iter() - .map(Into::into) - .collect::>(); + #[test] + fn invalid_argument() { + let command = vec!["whisper", "--foo=12"] + .into_iter() + .map(Into::into) + .collect::>(); - assert!(execute(command).is_err()); - } + assert!(execute(command).is_err()); + } - #[test] - #[ignore] - fn privileged_port() { - let command = vec!["whisper", "--port=3"] - .into_iter() - .map(Into::into) - .collect::>(); + #[test] + #[ignore] + fn privileged_port() { + let command = vec!["whisper", "--port=3"] + .into_iter() + .map(Into::into) + .collect::>(); - assert!(execute(command).is_err()); - } + assert!(execute(command).is_err()); + } - #[test] - fn invalid_ip_address() { - let command = vec!["whisper", "--address=x.x.x.x"] - .into_iter() - .map(Into::into) - .collect::>(); + #[test] + fn invalid_ip_address() { + let command = vec!["whisper", "--address=x.x.x.x"] + .into_iter() + .map(Into::into) + .collect::>(); - assert!(execute(command).is_err()); - } + assert!(execute(command).is_err()); + } - #[test] - fn invalid_whisper_pool_size() { - let command = vec!["whisper", "--whisper-pool-size=-100000000000000000000000000000000000000"] - .into_iter() - .map(Into::into) - .collect::>(); + #[test] + fn invalid_whisper_pool_size() { + let command = vec![ + "whisper", + "--whisper-pool-size=-100000000000000000000000000000000000000", + ] + .into_iter() + .map(Into::into) + .collect::>(); - assert!(execute(command).is_err()); - } + assert!(execute(command).is_err()); + } } diff --git a/whisper/src/lib.rs b/whisper/src/lib.rs index cdc88780d..b38903d42 100644 --- a/whisper/src/lib.rs +++ b/whisper/src/lib.rs @@ -20,13 +20,13 @@ #![cfg_attr(feature = "time_checked_add", feature(time_checked_add))] extern crate byteorder; -extern crate parity_crypto as crypto; extern crate ethcore_network as network; extern crate ethereum_types; extern crate ethkey; extern crate hex; extern crate memzero; extern crate ordered_float; +extern crate parity_crypto as crypto; extern crate parking_lot; extern crate rand; extern crate rlp; @@ -54,8 +54,10 @@ extern crate time_utils; #[cfg(test)] extern crate serde_json; -pub use self::message::Message; -pub use self::net::{Network, MessageHandler}; +pub use self::{ + message::Message, + net::{MessageHandler, Network}, +}; pub mod message; pub mod net; diff --git a/whisper/src/message.rs b/whisper/src/message.rs index c10d39700..7d1df9b34 100644 --- a/whisper/src/message.rs +++ b/whisper/src/message.rs @@ -16,11 +16,13 @@ //! Whisper message parsing, handlers, and construction. -use std::fmt; -use std::time::{self, SystemTime, Duration, Instant}; +use std::{ + fmt, + time::{self, Duration, Instant, SystemTime}, +}; use ethereum_types::{H256, H512}; -use rlp::{self, DecoderError, RlpStream, Rlp}; +use rlp::{self, DecoderError, Rlp, RlpStream}; use smallvec::SmallVec; use tiny_keccak::{keccak256, Keccak}; @@ -32,16 +34,18 @@ use time_utils::CheckedSystemTime; /// /// Panics if size or TTL is zero. pub fn work_factor_proved(size: u64, ttl: u64, hash: H256) -> f64 { - assert!(size != 0 && ttl != 0); + assert!(size != 0 && ttl != 0); - let leading_zeros = { - let leading_bytes = hash.iter().take_while(|&&x| x == 0).count(); - let remaining_leading_bits = hash.get(leading_bytes).map_or(0, |byte| byte.leading_zeros() as usize); - (leading_bytes * 8) + remaining_leading_bits - }; - let spacetime = size as f64 * ttl as f64; + let leading_zeros = { + let leading_bytes = hash.iter().take_while(|&&x| x == 0).count(); + let remaining_leading_bits = hash + .get(leading_bytes) + .map_or(0, |byte| byte.leading_zeros() as usize); + (leading_bytes * 8) + remaining_leading_bits + }; + let spacetime = size as f64 * ttl as f64; - 2.0_f64.powi(leading_zeros as i32) / spacetime + 2.0_f64.powi(leading_zeros as i32) / spacetime } /// A topic of a message. @@ -49,490 +53,524 @@ pub fn work_factor_proved(size: u64, ttl: u64, hash: H256) -> f64 { pub struct Topic(pub [u8; 4]); impl From<[u8; 4]> for Topic { - fn from(x: [u8; 4]) -> Self { - Topic(x) - } + fn from(x: [u8; 4]) -> Self { + Topic(x) + } } impl Topic { - /// set up to three bits in the 64-byte bloom passed. - /// - /// this takes 3 sets of 9 bits, treating each as an index in the range - /// 0..512 into the bloom and setting the corresponding bit in the bloom to 1. - pub fn bloom_into(&self, bloom: &mut H512) { + /// set up to three bits in the 64-byte bloom passed. + /// + /// this takes 3 sets of 9 bits, treating each as an index in the range + /// 0..512 into the bloom and setting the corresponding bit in the bloom to 1. + pub fn bloom_into(&self, bloom: &mut H512) { + let data = &self.0; + for i in 0..3 { + let mut idx = data[i] as usize; - let data = &self.0; - for i in 0..3 { - let mut idx = data[i] as usize; + if data[3] & (1 << i) != 0 { + idx += 256; + } - if data[3] & (1 << i) != 0 { - idx += 256; - } + debug_assert!(idx <= 511); + bloom[idx / 8] |= 1 << (7 - idx % 8); + } + } - debug_assert!(idx <= 511); - bloom[idx / 8] |= 1 << (7 - idx % 8); - } - } - - /// Get bloom for single topic. - pub fn bloom(&self) -> H512 { - let mut bloom = Default::default(); - self.bloom_into(&mut bloom); - bloom - } + /// Get bloom for single topic. + pub fn bloom(&self) -> H512 { + let mut bloom = Default::default(); + self.bloom_into(&mut bloom); + bloom + } } impl rlp::Encodable for Topic { - fn rlp_append(&self, s: &mut RlpStream) { - s.encoder().encode_value(&self.0); - } + fn rlp_append(&self, s: &mut RlpStream) { + s.encoder().encode_value(&self.0); + } } impl rlp::Decodable for Topic { - fn decode(rlp: &Rlp) -> Result { - use std::cmp; + fn decode(rlp: &Rlp) -> Result { + use std::cmp; - rlp.decoder().decode_value(|bytes| match bytes.len().cmp(&4) { - cmp::Ordering::Less => Err(DecoderError::RlpIsTooShort), - cmp::Ordering::Greater => Err(DecoderError::RlpIsTooBig), - cmp::Ordering::Equal => { - let mut t = [0u8; 4]; - t.copy_from_slice(bytes); - Ok(Topic(t)) - } - }) - } + rlp.decoder() + .decode_value(|bytes| match bytes.len().cmp(&4) { + cmp::Ordering::Less => Err(DecoderError::RlpIsTooShort), + cmp::Ordering::Greater => Err(DecoderError::RlpIsTooBig), + cmp::Ordering::Equal => { + let mut t = [0u8; 4]; + t.copy_from_slice(bytes); + Ok(Topic(t)) + } + }) + } } /// Calculate union of blooms for given topics. pub fn bloom_topics(topics: &[Topic]) -> H512 { - let mut bloom = H512::default(); - for topic in topics { - topic.bloom_into(&mut bloom); - } - bloom + let mut bloom = H512::default(); + for topic in topics { + topic.bloom_into(&mut bloom); + } + bloom } /// Message errors. #[derive(Debug)] pub enum Error { - Decoder(DecoderError), - EmptyTopics, - LivesTooLong, - IssuedInFuture, - TimestampOverflow, - ZeroTTL, + Decoder(DecoderError), + EmptyTopics, + LivesTooLong, + IssuedInFuture, + TimestampOverflow, + ZeroTTL, } impl From for Error { - fn from(err: DecoderError) -> Self { - Error::Decoder(err) - } + fn from(err: DecoderError) -> Self { + Error::Decoder(err) + } } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Error::Decoder(ref err) => write!(f, "Failed to decode message: {}", err), - Error::LivesTooLong => write!(f, "Message claims to be issued before the unix epoch."), - Error::IssuedInFuture => write!(f, "Message issued in future."), - Error::ZeroTTL => write!(f, "Message live for zero time."), - Error::TimestampOverflow => write!(f, "Timestamp overflow"), - Error::EmptyTopics => write!(f, "Message has no topics."), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Error::Decoder(ref err) => write!(f, "Failed to decode message: {}", err), + Error::LivesTooLong => write!(f, "Message claims to be issued before the unix epoch."), + Error::IssuedInFuture => write!(f, "Message issued in future."), + Error::ZeroTTL => write!(f, "Message live for zero time."), + Error::TimestampOverflow => write!(f, "Timestamp overflow"), + Error::EmptyTopics => write!(f, "Message has no topics."), + } + } } fn append_topics<'a>(s: &'a mut RlpStream, topics: &[Topic]) -> &'a mut RlpStream { - if topics.len() == 1 { - s.append(&topics[0]) - } else { - s.append_list(&topics) - } + if topics.len() == 1 { + s.append(&topics[0]) + } else { + s.append_list(&topics) + } } fn decode_topics(rlp: Rlp) -> Result, DecoderError> { - if rlp.is_list() { - rlp.iter().map(|r| r.as_val::()).collect() - } else { - rlp.as_val().map(|t| SmallVec::from_slice(&[t])) - } + if rlp.is_list() { + rlp.iter().map(|r| r.as_val::()).collect() + } else { + rlp.as_val().map(|t| SmallVec::from_slice(&[t])) + } } // Raw envelope struct. #[derive(Clone, Debug, PartialEq, Eq)] pub struct Envelope { - /// Expiry timestamp - pub expiry: u64, - /// Time-to-live in seconds - pub ttl: u64, - /// series of 4-byte topics. - pub topics: SmallVec<[Topic; 4]>, - /// The message contained within. - pub data: Vec, - /// Arbitrary value used to target lower PoW hash. - pub nonce: u64, + /// Expiry timestamp + pub expiry: u64, + /// Time-to-live in seconds + pub ttl: u64, + /// series of 4-byte topics. + pub topics: SmallVec<[Topic; 4]>, + /// The message contained within. + pub data: Vec, + /// Arbitrary value used to target lower PoW hash. + pub nonce: u64, } impl Envelope { - /// Whether the message is multi-topic. Only relay these to Parity peers. - pub fn is_multitopic(&self) -> bool { - self.topics.len() != 1 - } + /// Whether the message is multi-topic. Only relay these to Parity peers. + pub fn is_multitopic(&self) -> bool { + self.topics.len() != 1 + } - fn proving_hash(&self) -> H256 { - use byteorder::{BigEndian, ByteOrder}; + fn proving_hash(&self) -> H256 { + use byteorder::{BigEndian, ByteOrder}; - let mut buf = [0; 32]; + let mut buf = [0; 32]; - let mut stream = RlpStream::new_list(4); - stream.append(&self.expiry).append(&self.ttl); + let mut stream = RlpStream::new_list(4); + stream.append(&self.expiry).append(&self.ttl); - append_topics(&mut stream, &self.topics) - .append(&self.data); + append_topics(&mut stream, &self.topics).append(&self.data); - let mut digest = Keccak::new_keccak256(); - digest.update(&*stream.drain()); - digest.update(&{ - let mut nonce_bytes = [0u8; 8]; - BigEndian::write_u64(&mut nonce_bytes, self.nonce); + let mut digest = Keccak::new_keccak256(); + digest.update(&*stream.drain()); + digest.update(&{ + let mut nonce_bytes = [0u8; 8]; + BigEndian::write_u64(&mut nonce_bytes, self.nonce); - nonce_bytes - }); + nonce_bytes + }); - digest.finalize(&mut buf); - H256(buf) - } + digest.finalize(&mut buf); + H256(buf) + } } impl rlp::Encodable for Envelope { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(5) - .append(&self.expiry) - .append(&self.ttl); + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(5).append(&self.expiry).append(&self.ttl); - append_topics(s, &self.topics) - .append(&self.data) - .append(&self.nonce); - } + append_topics(s, &self.topics) + .append(&self.data) + .append(&self.nonce); + } } impl rlp::Decodable for Envelope { - fn decode(rlp: &Rlp) -> Result { - if rlp.item_count()? != 5 { return Err(DecoderError::RlpIncorrectListLen) } + fn decode(rlp: &Rlp) -> Result { + if rlp.item_count()? != 5 { + return Err(DecoderError::RlpIncorrectListLen); + } - Ok(Envelope { - expiry: rlp.val_at(0)?, - ttl: rlp.val_at(1)?, - topics: decode_topics(rlp.at(2)?)?, - data: rlp.val_at(3)?, - nonce: rlp.val_at(4)?, - }) - } + Ok(Envelope { + expiry: rlp.val_at(0)?, + ttl: rlp.val_at(1)?, + topics: decode_topics(rlp.at(2)?)?, + data: rlp.val_at(3)?, + nonce: rlp.val_at(4)?, + }) + } } /// Message creation parameters. /// Pass this to `Message::create` to make a message. pub struct CreateParams { - /// time-to-live in seconds. - pub ttl: u64, - /// payload data. - pub payload: Vec, - /// Topics. May not be empty. - pub topics: Vec, - /// How many milliseconds to spend proving work. - pub work: u64, + /// time-to-live in seconds. + pub ttl: u64, + /// payload data. + pub payload: Vec, + /// Topics. May not be empty. + pub topics: Vec, + /// How many milliseconds to spend proving work. + pub work: u64, } /// A whisper message. This is a checked message carrying around metadata. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Message { - envelope: Envelope, - bloom: H512, - hash: H256, - encoded_size: usize, + envelope: Envelope, + bloom: H512, + hash: H256, + encoded_size: usize, } impl Message { - /// Create a message from creation parameters. - /// Panics if TTL is 0. - pub fn create(params: CreateParams) -> Result { - use byteorder::{BigEndian, ByteOrder}; - use rand::{Rng, SeedableRng, XorShiftRng}; + /// Create a message from creation parameters. + /// Panics if TTL is 0. + pub fn create(params: CreateParams) -> Result { + use byteorder::{BigEndian, ByteOrder}; + use rand::{Rng, SeedableRng, XorShiftRng}; - if params.topics.is_empty() { return Err(Error::EmptyTopics) } + if params.topics.is_empty() { + return Err(Error::EmptyTopics); + } - let mut rng = { - let mut thread_rng = ::rand::thread_rng(); - XorShiftRng::from_seed(thread_rng.gen::<[u32; 4]>()) - }; + let mut rng = { + let mut thread_rng = ::rand::thread_rng(); + XorShiftRng::from_seed(thread_rng.gen::<[u32; 4]>()) + }; - assert!(params.ttl > 0); + assert!(params.ttl > 0); - let expiry = { - let since_epoch = SystemTime::now() - .checked_add(Duration::from_secs(params.ttl)) - .and_then(|t| t.checked_add(Duration::from_millis(params.work))) - .ok_or(Error::TimestampOverflow)? - .duration_since(time::UNIX_EPOCH).expect("time after now is after unix epoch; qed"); + let expiry = { + let since_epoch = SystemTime::now() + .checked_add(Duration::from_secs(params.ttl)) + .and_then(|t| t.checked_add(Duration::from_millis(params.work))) + .ok_or(Error::TimestampOverflow)? + .duration_since(time::UNIX_EPOCH) + .expect("time after now is after unix epoch; qed"); - // round up the sub-second to next whole second. - since_epoch.as_secs() + if since_epoch.subsec_nanos() == 0 { 0 } else { 1 } - }; + // round up the sub-second to next whole second. + since_epoch.as_secs() + + if since_epoch.subsec_nanos() == 0 { + 0 + } else { + 1 + } + }; - let start_digest = { - let mut stream = RlpStream::new_list(4); - stream.append(&expiry).append(¶ms.ttl); - append_topics(&mut stream, ¶ms.topics).append(¶ms.payload); + let start_digest = { + let mut stream = RlpStream::new_list(4); + stream.append(&expiry).append(¶ms.ttl); + append_topics(&mut stream, ¶ms.topics).append(¶ms.payload); - let mut digest = Keccak::new_keccak256(); - digest.update(&*stream.drain()); - digest - }; + let mut digest = Keccak::new_keccak256(); + digest.update(&*stream.drain()); + digest + }; - let mut buf = [0; 32]; - let mut try_nonce = move |nonce: &[u8; 8]| { - let mut digest = start_digest.clone(); - digest.update(&nonce[..]); - digest.finalize(&mut buf[..]); + let mut buf = [0; 32]; + let mut try_nonce = move |nonce: &[u8; 8]| { + let mut digest = start_digest.clone(); + digest.update(&nonce[..]); + digest.finalize(&mut buf[..]); - buf.clone() - }; + buf.clone() + }; - let mut nonce: [u8; 8] = rng.gen(); - let mut best_found = try_nonce(&nonce); + let mut nonce: [u8; 8] = rng.gen(); + let mut best_found = try_nonce(&nonce); - let start = Instant::now(); + let start = Instant::now(); - while start.elapsed() <= Duration::from_millis(params.work) { - let temp_nonce = rng.gen(); - let hash = try_nonce(&temp_nonce); + while start.elapsed() <= Duration::from_millis(params.work) { + let temp_nonce = rng.gen(); + let hash = try_nonce(&temp_nonce); - if hash < best_found { - nonce = temp_nonce; - best_found = hash; - } - } + if hash < best_found { + nonce = temp_nonce; + best_found = hash; + } + } - let envelope = Envelope { - expiry: expiry, - ttl: params.ttl, - topics: params.topics.into_iter().collect(), - data: params.payload, - nonce: BigEndian::read_u64(&nonce[..]), - }; + let envelope = Envelope { + expiry: expiry, + ttl: params.ttl, + topics: params.topics.into_iter().collect(), + data: params.payload, + nonce: BigEndian::read_u64(&nonce[..]), + }; - debug_assert_eq!(H256(best_found.clone()), envelope.proving_hash()); + debug_assert_eq!(H256(best_found.clone()), envelope.proving_hash()); - let encoded = ::rlp::encode(&envelope); + let encoded = ::rlp::encode(&envelope); - Ok(Message::from_components( - envelope, - encoded.len(), - H256(keccak256(&encoded)), - SystemTime::now(), - ).expect("Message generated here known to be valid; qed")) - } + Ok(Message::from_components( + envelope, + encoded.len(), + H256(keccak256(&encoded)), + SystemTime::now(), + ) + .expect("Message generated here known to be valid; qed")) + } - /// Decode message from RLP and check for validity against system time. - pub fn decode(rlp: Rlp, now: SystemTime) -> Result { - let envelope: Envelope = rlp.as_val()?; - let encoded_size = rlp.as_raw().len(); - let hash = H256(keccak256(rlp.as_raw())); + /// Decode message from RLP and check for validity against system time. + pub fn decode(rlp: Rlp, now: SystemTime) -> Result { + let envelope: Envelope = rlp.as_val()?; + let encoded_size = rlp.as_raw().len(); + let hash = H256(keccak256(rlp.as_raw())); - Message::from_components(envelope, encoded_size, hash, now) - } + Message::from_components(envelope, encoded_size, hash, now) + } - // create message from envelope, hash, and encoded size. - // does checks for validity. - fn from_components(envelope: Envelope, size: usize, hash: H256, now: SystemTime) - -> Result - { - const LEEWAY_SECONDS: u64 = 2; + // create message from envelope, hash, and encoded size. + // does checks for validity. + fn from_components( + envelope: Envelope, + size: usize, + hash: H256, + now: SystemTime, + ) -> Result { + const LEEWAY_SECONDS: u64 = 2; - if envelope.expiry <= envelope.ttl { return Err(Error::LivesTooLong) } - if envelope.ttl == 0 { return Err(Error::ZeroTTL) } + if envelope.expiry <= envelope.ttl { + return Err(Error::LivesTooLong); + } + if envelope.ttl == 0 { + return Err(Error::ZeroTTL); + } - if envelope.topics.is_empty() { return Err(Error::EmptyTopics) } + if envelope.topics.is_empty() { + return Err(Error::EmptyTopics); + } - let issue_time_adjusted = Duration::from_secs( - (envelope.expiry - envelope.ttl).saturating_sub(LEEWAY_SECONDS) - ); + let issue_time_adjusted = + Duration::from_secs((envelope.expiry - envelope.ttl).saturating_sub(LEEWAY_SECONDS)); - let issue_time_adjusted = time::UNIX_EPOCH.checked_add(issue_time_adjusted) - .ok_or(Error::TimestampOverflow)?; + let issue_time_adjusted = time::UNIX_EPOCH + .checked_add(issue_time_adjusted) + .ok_or(Error::TimestampOverflow)?; - if issue_time_adjusted > now { - return Err(Error::IssuedInFuture); - } + if issue_time_adjusted > now { + return Err(Error::IssuedInFuture); + } - // other validity checks? - let bloom = bloom_topics(&envelope.topics); + // other validity checks? + let bloom = bloom_topics(&envelope.topics); - Ok(Message { - envelope: envelope, - bloom: bloom, - hash: hash, - encoded_size: size, - }) - } + Ok(Message { + envelope: envelope, + bloom: bloom, + hash: hash, + encoded_size: size, + }) + } - /// Get a reference to the envelope. - pub fn envelope(&self) -> &Envelope { - &self.envelope - } + /// Get a reference to the envelope. + pub fn envelope(&self) -> &Envelope { + &self.envelope + } - /// Get the encoded size of the envelope. - pub fn encoded_size(&self) -> usize { - self.encoded_size - } + /// Get the encoded size of the envelope. + pub fn encoded_size(&self) -> usize { + self.encoded_size + } - /// Get a uniquely identifying hash for the message. - pub fn hash(&self) -> &H256 { - &self.hash - } + /// Get a uniquely identifying hash for the message. + pub fn hash(&self) -> &H256 { + &self.hash + } - /// Get the bloom filter of the topics - pub fn bloom(&self) -> &H512 { - &self.bloom - } + /// Get the bloom filter of the topics + pub fn bloom(&self) -> &H512 { + &self.bloom + } - /// Get the work proved by the hash. - pub fn work_proved(&self) -> f64 { - let proving_hash = self.envelope.proving_hash(); + /// Get the work proved by the hash. + pub fn work_proved(&self) -> f64 { + let proving_hash = self.envelope.proving_hash(); - work_factor_proved(self.encoded_size as _, self.envelope.ttl, proving_hash) - } + work_factor_proved(self.encoded_size as _, self.envelope.ttl, proving_hash) + } - /// Get the expiry time. - pub fn expiry(&self) -> Option { - time::UNIX_EPOCH.checked_add(Duration::from_secs(self.envelope.expiry)) - } + /// Get the expiry time. + pub fn expiry(&self) -> Option { + time::UNIX_EPOCH.checked_add(Duration::from_secs(self.envelope.expiry)) + } - /// Get the topics. - pub fn topics(&self) -> &[Topic] { - &self.envelope.topics - } + /// Get the topics. + pub fn topics(&self) -> &[Topic] { + &self.envelope.topics + } - /// Get the message data. - pub fn data(&self) -> &[u8] { - &self.envelope.data - } + /// Get the message data. + pub fn data(&self) -> &[u8] { + &self.envelope.data + } } #[cfg(test)] mod tests { - use ethereum_types::H256; - use super::*; - use std::time::{self, Duration, SystemTime}; - use rlp::Rlp; - use smallvec::SmallVec; + use super::*; + use ethereum_types::H256; + use rlp::Rlp; + use smallvec::SmallVec; + use std::time::{self, Duration, SystemTime}; - fn unix_time(x: u64) -> SystemTime { - time::UNIX_EPOCH + Duration::from_secs(x) - } + fn unix_time(x: u64) -> SystemTime { + time::UNIX_EPOCH + Duration::from_secs(x) + } - #[test] - fn create_message() { - assert!(Message::create(CreateParams { - ttl: 100, - payload: vec![1, 2, 3, 4], - topics: vec![Topic([1, 2, 1, 2])], - work: 50, - }).is_ok()); - } + #[test] + fn create_message() { + assert!(Message::create(CreateParams { + ttl: 100, + payload: vec![1, 2, 3, 4], + topics: vec![Topic([1, 2, 1, 2])], + work: 50, + }) + .is_ok()); + } - #[test] - fn round_trip() { - let envelope = Envelope { - expiry: 100_000, - ttl: 30, - data: vec![9; 256], - topics: SmallVec::from_slice(&[Default::default()]), - nonce: 1010101, - }; + #[test] + fn round_trip() { + let envelope = Envelope { + expiry: 100_000, + ttl: 30, + data: vec![9; 256], + topics: SmallVec::from_slice(&[Default::default()]), + nonce: 1010101, + }; - let encoded = ::rlp::encode(&envelope); - let decoded = ::rlp::decode(&encoded).expect("failure decoding Envelope"); + let encoded = ::rlp::encode(&envelope); + let decoded = ::rlp::decode(&encoded).expect("failure decoding Envelope"); - assert_eq!(envelope, decoded) - } + assert_eq!(envelope, decoded) + } - #[test] - fn round_trip_multitopic() { - let envelope = Envelope { - expiry: 100_000, - ttl: 30, - data: vec![9; 256], - topics: SmallVec::from_slice(&[Default::default(), Topic([1, 2, 3, 4])]), - nonce: 1010101, - }; + #[test] + fn round_trip_multitopic() { + let envelope = Envelope { + expiry: 100_000, + ttl: 30, + data: vec![9; 256], + topics: SmallVec::from_slice(&[Default::default(), Topic([1, 2, 3, 4])]), + nonce: 1010101, + }; - let encoded = ::rlp::encode(&envelope); - let decoded = ::rlp::decode(&encoded).expect("failure decoding Envelope"); + let encoded = ::rlp::encode(&envelope); + let decoded = ::rlp::decode(&encoded).expect("failure decoding Envelope"); - assert_eq!(envelope, decoded) - } + assert_eq!(envelope, decoded) + } - #[test] - fn passes_checks() { - let envelope = Envelope { - expiry: 100_000, - ttl: 30, - data: vec![9; 256], - topics: SmallVec::from_slice(&[Default::default()]), - nonce: 1010101, - }; + #[test] + fn passes_checks() { + let envelope = Envelope { + expiry: 100_000, + ttl: 30, + data: vec![9; 256], + topics: SmallVec::from_slice(&[Default::default()]), + nonce: 1010101, + }; - let encoded = ::rlp::encode(&envelope); + let encoded = ::rlp::encode(&envelope); - for i in 0..30 { - let now = unix_time(100_000 - i); - Message::decode(Rlp::new(&*encoded), now).unwrap(); - } - } + for i in 0..30 { + let now = unix_time(100_000 - i); + Message::decode(Rlp::new(&*encoded), now).unwrap(); + } + } - #[test] - #[should_panic] - fn future_message() { - let envelope = Envelope { - expiry: 100_000, - ttl: 30, - data: vec![9; 256], - topics: SmallVec::from_slice(&[Default::default()]), - nonce: 1010101, - }; + #[test] + #[should_panic] + fn future_message() { + let envelope = Envelope { + expiry: 100_000, + ttl: 30, + data: vec![9; 256], + topics: SmallVec::from_slice(&[Default::default()]), + nonce: 1010101, + }; - let encoded = ::rlp::encode(&envelope); + let encoded = ::rlp::encode(&envelope); - let now = unix_time(100_000 - 1_000); - Message::decode(Rlp::new(&*encoded), now).unwrap(); - } + let now = unix_time(100_000 - 1_000); + Message::decode(Rlp::new(&*encoded), now).unwrap(); + } - #[test] - #[should_panic] - fn pre_epoch() { - let envelope = Envelope { - expiry: 100_000, - ttl: 200_000, - data: vec![9; 256], - topics: SmallVec::from_slice(&[Default::default()]), - nonce: 1010101, - }; + #[test] + #[should_panic] + fn pre_epoch() { + let envelope = Envelope { + expiry: 100_000, + ttl: 200_000, + data: vec![9; 256], + topics: SmallVec::from_slice(&[Default::default()]), + nonce: 1010101, + }; - let encoded = ::rlp::encode(&envelope); + let encoded = ::rlp::encode(&envelope); - let now = unix_time(95_000); - Message::decode(Rlp::new(&*encoded), now).unwrap(); - } + let now = unix_time(95_000); + Message::decode(Rlp::new(&*encoded), now).unwrap(); + } - #[test] - fn work_factor() { - // 256 leading zeros -> 2^256 / 1 - assert_eq!(work_factor_proved(1, 1, H256::from(0)), 115792089237316200000000000000000000000000000000000000000000000000000000000000.0); - // 255 leading zeros -> 2^255 / 1 - assert_eq!(work_factor_proved(1, 1, H256::from(1)), 57896044618658100000000000000000000000000000000000000000000000000000000000000.0); - // 0 leading zeros -> 2^0 / 1 - assert_eq!(work_factor_proved(1, 1, serde_json::from_str::("\"0xff00000000000000000000000000000000000000000000000000000000000000\"").unwrap()), 1.0); - } + #[test] + fn work_factor() { + // 256 leading zeros -> 2^256 / 1 + assert_eq!( + work_factor_proved(1, 1, H256::from(0)), + 115792089237316200000000000000000000000000000000000000000000000000000000000000.0 + ); + // 255 leading zeros -> 2^255 / 1 + assert_eq!( + work_factor_proved(1, 1, H256::from(1)), + 57896044618658100000000000000000000000000000000000000000000000000000000000000.0 + ); + // 0 leading zeros -> 2^0 / 1 + assert_eq!( + work_factor_proved( + 1, + 1, + serde_json::from_str::( + "\"0xff00000000000000000000000000000000000000000000000000000000000000\"" + ) + .unwrap() + ), + 1.0 + ); + } } diff --git a/whisper/src/net/mod.rs b/whisper/src/net/mod.rs index d263f6cfb..a48a9f022 100644 --- a/whisper/src/net/mod.rs +++ b/whisper/src/net/mod.rs @@ -16,19 +16,21 @@ //! Whisper messaging system as a DevP2P subprotocol. -use std::collections::{HashMap, HashSet}; -use std::cmp::Ordering; -use std::fmt; -use std::time::{Duration, SystemTime}; -use std::sync::Arc; +use std::{ + cmp::Ordering, + collections::{HashMap, HashSet}, + fmt, + sync::Arc, + time::{Duration, SystemTime}, +}; use ethereum_types::{H256, H512}; use network::{self, NetworkContext, NodeId, PeerId, ProtocolId, TimerToken}; use ordered_float::OrderedFloat; use parking_lot::{Mutex, RwLock}; -use rlp::{DecoderError, RlpStream, Rlp}; +use rlp::{DecoderError, Rlp, RlpStream}; -use message::{Message, Error as MessageError}; +use message::{Error as MessageError, Message}; #[cfg(test)] mod tests; @@ -45,9 +47,7 @@ pub const PROTOCOL_VERSION: usize = 6; const PACKET_COUNT: u8 = 128; /// Supported protocol versions. -pub const SUPPORTED_VERSIONS: &'static [(u8, u8)] = &[ - (PROTOCOL_VERSION as u8, PACKET_COUNT) -]; +pub const SUPPORTED_VERSIONS: &'static [(u8, u8)] = &[(PROTOCOL_VERSION as u8, PACKET_COUNT)]; // maximum tolerated delay between messages packets. const MAX_TOLERATED_DELAY: Duration = Duration::from_millis(5000); @@ -61,657 +61,692 @@ pub const PROTOCOL_ID: ::network::ProtocolId = *b"shh"; pub const PARITY_PROTOCOL_ID: ::network::ProtocolId = *b"pwh"; mod packet { - pub const STATUS: u8 = 0; - pub const MESSAGES: u8 = 1; - pub const POW_REQUIREMENT: u8 = 2; - pub const TOPIC_FILTER: u8 = 3; + pub const STATUS: u8 = 0; + pub const MESSAGES: u8 = 1; + pub const POW_REQUIREMENT: u8 = 2; + pub const TOPIC_FILTER: u8 = 3; - // 126, 127 for mail server stuff we will never implement here. + // 126, 127 for mail server stuff we will never implement here. } /// Handles messages within a single packet. pub trait MessageHandler: Send + Sync { - /// Evaluate the message and handle it. - /// - /// The same message will not be passed twice. - /// Heavy handling should be done asynchronously. - /// If there is a significant overhead in this thread, then an attacker - /// can determine which kinds of messages we are listening for. - fn handle_messages(&self, message: &[Message]); + /// Evaluate the message and handle it. + /// + /// The same message will not be passed twice. + /// Heavy handling should be done asynchronously. + /// If there is a significant overhead in this thread, then an attacker + /// can determine which kinds of messages we are listening for. + fn handle_messages(&self, message: &[Message]); } // errors in importing a whisper message. #[derive(Debug)] enum Error { - Decoder(DecoderError), - Network(network::Error), - Message(MessageError), - UnknownPeer(PeerId), - UnexpectedMessage, - InvalidPowReq, + Decoder(DecoderError), + Network(network::Error), + Message(MessageError), + UnknownPeer(PeerId), + UnexpectedMessage, + InvalidPowReq, } impl From for Error { - fn from(err: DecoderError) -> Self { - Error::Decoder(err) - } + fn from(err: DecoderError) -> Self { + Error::Decoder(err) + } } impl From for Error { - fn from(err: network::Error) -> Self { - Error::Network(err) - } + fn from(err: network::Error) -> Self { + Error::Network(err) + } } impl From for Error { - fn from(err: MessageError) -> Self { - Error::Message(err) - } + fn from(err: MessageError) -> Self { + Error::Message(err) + } } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Error::Decoder(ref err) => write!(f, "Failed to decode packet: {}", err), - Error::Network(ref err) => write!(f, "Network error: {}", err), - Error::Message(ref err) => write!(f, "Error decoding message: {}", err), - Error::UnknownPeer(ref id) => write!(f, "Message received from unknown peer: {}", id), - Error::UnexpectedMessage => write!(f, "Unexpected message."), - Error::InvalidPowReq => write!(f, "Peer sent invalid PoW requirement."), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Error::Decoder(ref err) => write!(f, "Failed to decode packet: {}", err), + Error::Network(ref err) => write!(f, "Network error: {}", err), + Error::Message(ref err) => write!(f, "Error decoding message: {}", err), + Error::UnknownPeer(ref id) => write!(f, "Message received from unknown peer: {}", id), + Error::UnexpectedMessage => write!(f, "Unexpected message."), + Error::InvalidPowReq => write!(f, "Peer sent invalid PoW requirement."), + } + } } // sorts by work proved, descending. #[derive(PartialEq, Eq)] struct SortedEntry { - slab_id: usize, - work_proved: OrderedFloat, - expiry: SystemTime, + slab_id: usize, + work_proved: OrderedFloat, + expiry: SystemTime, } impl Ord for SortedEntry { - fn cmp(&self, other: &SortedEntry) -> Ordering { - self.work_proved.cmp(&other.work_proved) - } + fn cmp(&self, other: &SortedEntry) -> Ordering { + self.work_proved.cmp(&other.work_proved) + } } impl PartialOrd for SortedEntry { - fn partial_cmp(&self, other: &SortedEntry) -> Option { - Some(self.cmp(other)) - } + fn partial_cmp(&self, other: &SortedEntry) -> Option { + Some(self.cmp(other)) + } } // stores messages by two metrics: expiry and PoW rating // when full, will accept messages above the minimum stored. struct Messages { - slab: ::slab::Slab, - sorted: Vec, - known: HashSet, - removed_hashes: Vec, - cumulative_size: usize, - ideal_size: usize, + slab: ::slab::Slab, + sorted: Vec, + known: HashSet, + removed_hashes: Vec, + cumulative_size: usize, + ideal_size: usize, } impl Messages { - fn new(ideal_size: usize) -> Self { - Messages { - slab: ::slab::Slab::with_capacity(0), - sorted: Vec::new(), - known: HashSet::new(), - removed_hashes: Vec::new(), - cumulative_size: 0, - ideal_size: ideal_size, - } - } + fn new(ideal_size: usize) -> Self { + Messages { + slab: ::slab::Slab::with_capacity(0), + sorted: Vec::new(), + known: HashSet::new(), + removed_hashes: Vec::new(), + cumulative_size: 0, + ideal_size: ideal_size, + } + } - // reserve space for additional elements. - fn reserve(&mut self, additional: usize) { - self.slab.reserve_exact(additional); - self.sorted.reserve(additional); - self.known.reserve(additional); - } + // reserve space for additional elements. + fn reserve(&mut self, additional: usize) { + self.slab.reserve_exact(additional); + self.sorted.reserve(additional); + self.known.reserve(additional); + } - // whether a message is not known and within the bounds of PoW. - fn may_accept(&self, message: &Message) -> bool { - !self.known.contains(message.hash()) && { - self.sorted.last().map_or(true, |entry| { - let work_proved = OrderedFloat(message.work_proved()); - OrderedFloat(self.slab[entry.slab_id].work_proved()) < work_proved - }) - } - } + // whether a message is not known and within the bounds of PoW. + fn may_accept(&self, message: &Message) -> bool { + !self.known.contains(message.hash()) && { + self.sorted.last().map_or(true, |entry| { + let work_proved = OrderedFloat(message.work_proved()); + OrderedFloat(self.slab[entry.slab_id].work_proved()) < work_proved + }) + } + } - // insert a message into the store. for best performance, - // call `reserve` before inserting a bunch. - // - fn insert(&mut self, message: Message) -> bool { - if !self.known.insert(message.hash().clone()) { return false } + // insert a message into the store. for best performance, + // call `reserve` before inserting a bunch. + // + fn insert(&mut self, message: Message) -> bool { + if !self.known.insert(message.hash().clone()) { + return false; + } - let work_proved = OrderedFloat(message.work_proved()); + let work_proved = OrderedFloat(message.work_proved()); - // pop off entries by low PoW until we have enough space for the higher - // PoW message being inserted. - let size_upon_insertion = self.cumulative_size + message.encoded_size(); - if size_upon_insertion >= self.ideal_size { - let diff = size_upon_insertion - self.ideal_size; - let mut found_diff = 0; - for entry in self.sorted.iter().rev() { - if found_diff >= diff { break } + // pop off entries by low PoW until we have enough space for the higher + // PoW message being inserted. + let size_upon_insertion = self.cumulative_size + message.encoded_size(); + if size_upon_insertion >= self.ideal_size { + let diff = size_upon_insertion - self.ideal_size; + let mut found_diff = 0; + for entry in self.sorted.iter().rev() { + if found_diff >= diff { + break; + } - // if we encounter a message with at least the PoW we're looking - // at, don't push that message out. - if entry.work_proved >= work_proved { return false } - found_diff += self.slab[entry.slab_id].encoded_size(); - } + // if we encounter a message with at least the PoW we're looking + // at, don't push that message out. + if entry.work_proved >= work_proved { + return false; + } + found_diff += self.slab[entry.slab_id].encoded_size(); + } - // message larger than ideal size. - if found_diff < diff { return false } + // message larger than ideal size. + if found_diff < diff { + return false; + } - while found_diff > 0 { - let entry = self.sorted.pop() + while found_diff > 0 { + let entry = self.sorted.pop() .expect("found_diff built by traversing entries; therefore that many entries exist; qed"); - let message = self.slab.remove(entry.slab_id) - .expect("sorted entry slab IDs always filled; qed"); + let message = self + .slab + .remove(entry.slab_id) + .expect("sorted entry slab IDs always filled; qed"); - found_diff -= message.encoded_size(); + found_diff -= message.encoded_size(); - self.cumulative_size -= message.encoded_size(); - self.known.remove(message.hash()); - self.removed_hashes.push(message.hash().clone()); - } - } + self.cumulative_size -= message.encoded_size(); + self.known.remove(message.hash()); + self.removed_hashes.push(message.hash().clone()); + } + } - let expiry = match message.expiry() { - Some(time) => time, - _ => return false, - }; + let expiry = match message.expiry() { + Some(time) => time, + _ => return false, + }; - self.cumulative_size += message.encoded_size(); + self.cumulative_size += message.encoded_size(); - if !self.slab.has_available() { self.slab.reserve_exact(1) } - let id = self.slab.insert(message).expect("just ensured enough space in slab; qed"); + if !self.slab.has_available() { + self.slab.reserve_exact(1) + } + let id = self + .slab + .insert(message) + .expect("just ensured enough space in slab; qed"); - let sorted_entry = SortedEntry { - slab_id: id, - work_proved, - expiry, - }; + let sorted_entry = SortedEntry { + slab_id: id, + work_proved, + expiry, + }; - match self.sorted.binary_search(&sorted_entry) { - Ok(idx) | Err(idx) => self.sorted.insert(idx, sorted_entry), - } + match self.sorted.binary_search(&sorted_entry) { + Ok(idx) | Err(idx) => self.sorted.insert(idx, sorted_entry), + } - true - } + true + } - // prune expired messages, and then prune low proof-of-work messages - // until below ideal size. - fn prune(&mut self, now: SystemTime) -> Vec { - { - let slab = &mut self.slab; - let known = &mut self.known; - let cumulative_size = &mut self.cumulative_size; - let ideal_size = &self.ideal_size; - let removed = &mut self.removed_hashes; + // prune expired messages, and then prune low proof-of-work messages + // until below ideal size. + fn prune(&mut self, now: SystemTime) -> Vec { + { + let slab = &mut self.slab; + let known = &mut self.known; + let cumulative_size = &mut self.cumulative_size; + let ideal_size = &self.ideal_size; + let removed = &mut self.removed_hashes; - // first pass, we look just at expired entries. - let all_expired = self.sorted.iter() - .filter(|entry| entry.expiry <= now) - .map(|x| (true, x)); + // first pass, we look just at expired entries. + let all_expired = self + .sorted + .iter() + .filter(|entry| entry.expiry <= now) + .map(|x| (true, x)); - // second pass, we look at entries which aren't expired but in order - // by PoW - let low_proof = self.sorted.iter().rev() - .filter(|entry| entry.expiry > now) - .map(|x| (false, x)); + // second pass, we look at entries which aren't expired but in order + // by PoW + let low_proof = self + .sorted + .iter() + .rev() + .filter(|entry| entry.expiry > now) + .map(|x| (false, x)); - for (is_expired, entry) in all_expired.chain(low_proof) { - // break once we've removed all expired entries - // or have taken enough low-work entries. - if !is_expired && *cumulative_size <= *ideal_size { - break - } + for (is_expired, entry) in all_expired.chain(low_proof) { + // break once we've removed all expired entries + // or have taken enough low-work entries. + if !is_expired && *cumulative_size <= *ideal_size { + break; + } - let message = slab.remove(entry.slab_id) - .expect("references to ID kept upon creation; only destroyed upon removal; qed"); + let message = slab.remove(entry.slab_id).expect( + "references to ID kept upon creation; only destroyed upon removal; qed", + ); - known.remove(message.hash()); - removed.push(message.hash().clone()); + known.remove(message.hash()); + removed.push(message.hash().clone()); - *cumulative_size -= message.encoded_size(); - } - } + *cumulative_size -= message.encoded_size(); + } + } - // clear all the sorted entries we removed from slab. - let slab = &self.slab; - self.sorted.retain(|entry| slab.contains(entry.slab_id)); + // clear all the sorted entries we removed from slab. + let slab = &self.slab; + self.sorted.retain(|entry| slab.contains(entry.slab_id)); - ::std::mem::replace(&mut self.removed_hashes, Vec::new()) - } + ::std::mem::replace(&mut self.removed_hashes, Vec::new()) + } - fn iter(&self) -> ::slab::Iter { - self.slab.iter() - } + fn iter(&self) -> ::slab::Iter { + self.slab.iter() + } - fn is_full(&self) -> bool { - self.cumulative_size >= self.ideal_size - } + fn is_full(&self) -> bool { + self.cumulative_size >= self.ideal_size + } - fn status(&self) -> PoolStatus { - PoolStatus { - required_pow: if self.is_full() { - self.sorted.last().map(|entry| entry.work_proved.0) - } else { - None - }, - message_count: self.sorted.len(), - cumulative_size: self.cumulative_size, - target_size: self.ideal_size, - } - } + fn status(&self) -> PoolStatus { + PoolStatus { + required_pow: if self.is_full() { + self.sorted.last().map(|entry| entry.work_proved.0) + } else { + None + }, + message_count: self.sorted.len(), + cumulative_size: self.cumulative_size, + target_size: self.ideal_size, + } + } } enum State { - Unconfirmed(SystemTime), // awaiting status packet. - Confirmed, + Unconfirmed(SystemTime), // awaiting status packet. + Confirmed, } #[allow(dead_code)] // for node key. this will be useful for topic routing. struct Peer { - node_key: NodeId, - state: State, - known_messages: HashSet, - topic_filter: Option, - pow_requirement: f64, - is_parity: bool, - _protocol_version: usize, + node_key: NodeId, + state: State, + known_messages: HashSet, + topic_filter: Option, + pow_requirement: f64, + is_parity: bool, + _protocol_version: usize, } impl Peer { - // note that a message has been evicted from the queue. - fn note_evicted(&mut self, messages: &[H256]) { - for message_hash in messages { - self.known_messages.remove(message_hash); - } - } + // note that a message has been evicted from the queue. + fn note_evicted(&mut self, messages: &[H256]) { + for message_hash in messages { + self.known_messages.remove(message_hash); + } + } - // whether this peer will accept the message. - fn will_accept(&self, message: &Message) -> bool { - if self.known_messages.contains(message.hash()) { return false } + // whether this peer will accept the message. + fn will_accept(&self, message: &Message) -> bool { + if self.known_messages.contains(message.hash()) { + return false; + } - // only parity peers will accept multitopic messages. - if message.envelope().is_multitopic() && !self.is_parity { return false } - if message.work_proved() < self.pow_requirement { return false } + // only parity peers will accept multitopic messages. + if message.envelope().is_multitopic() && !self.is_parity { + return false; + } + if message.work_proved() < self.pow_requirement { + return false; + } - self.topic_filter.as_ref() - .map_or(true, |filter| &(filter & message.bloom()) == message.bloom()) - } + self.topic_filter.as_ref().map_or(true, |filter| { + &(filter & message.bloom()) == message.bloom() + }) + } - // note a message as known. returns false if it was already - // known, true otherwise. - fn note_known(&mut self, message: &Message) -> bool { - self.known_messages.insert(message.hash().clone()) - } + // note a message as known. returns false if it was already + // known, true otherwise. + fn note_known(&mut self, message: &Message) -> bool { + self.known_messages.insert(message.hash().clone()) + } - fn set_topic_filter(&mut self, topic: H512) { - self.topic_filter = Some(topic); - } + fn set_topic_filter(&mut self, topic: H512) { + self.topic_filter = Some(topic); + } - fn set_pow_requirement(&mut self, pow_requirement: f64) { - self.pow_requirement = pow_requirement; - } + fn set_pow_requirement(&mut self, pow_requirement: f64) { + self.pow_requirement = pow_requirement; + } - fn can_send_messages(&self) -> bool { - match self.state { - State::Unconfirmed(_) => false, - State::Confirmed => true, - } - } + fn can_send_messages(&self) -> bool { + match self.state { + State::Unconfirmed(_) => false, + State::Confirmed => true, + } + } } /// Pool status. pub struct PoolStatus { - /// Required PoW to be accepted into the pool - pub required_pow: Option, - /// Number of messages in the pool. - pub message_count: usize, - /// Cumulative size of the messages in the pool - pub cumulative_size: usize, - /// Target size of the pool. - pub target_size: usize, + /// Required PoW to be accepted into the pool + pub required_pow: Option, + /// Number of messages in the pool. + pub message_count: usize, + /// Cumulative size of the messages in the pool + pub cumulative_size: usize, + /// Target size of the pool. + pub target_size: usize, } /// Generic network context. pub trait Context { - /// Disconnect a peer. - fn disconnect_peer(&self, PeerId); - /// Disable a peer. - fn disable_peer(&self, PeerId); - /// Get a peer's node key. - fn node_key(&self, PeerId) -> Option; - /// Get a peer's protocol version for given protocol. - fn protocol_version(&self, ProtocolId, PeerId) -> Option; - /// Send message to peer. - fn send(&self, PeerId, u8, Vec); + /// Disconnect a peer. + fn disconnect_peer(&self, PeerId); + /// Disable a peer. + fn disable_peer(&self, PeerId); + /// Get a peer's node key. + fn node_key(&self, PeerId) -> Option; + /// Get a peer's protocol version for given protocol. + fn protocol_version(&self, ProtocolId, PeerId) -> Option; + /// Send message to peer. + fn send(&self, PeerId, u8, Vec); } -impl Context for T where T: ?Sized + NetworkContext { - fn disconnect_peer(&self, peer: PeerId) { - NetworkContext::disconnect_peer(self, peer); - } - fn disable_peer(&self, peer: PeerId) { - NetworkContext::disable_peer(self, peer) - } - fn node_key(&self, peer: PeerId) -> Option { - self.session_info(peer).and_then(|info| info.id) - } - fn protocol_version(&self, proto_id: ProtocolId, peer: PeerId) -> Option { - NetworkContext::protocol_version(self, proto_id, peer) - } +impl Context for T +where + T: ?Sized + NetworkContext, +{ + fn disconnect_peer(&self, peer: PeerId) { + NetworkContext::disconnect_peer(self, peer); + } + fn disable_peer(&self, peer: PeerId) { + NetworkContext::disable_peer(self, peer) + } + fn node_key(&self, peer: PeerId) -> Option { + self.session_info(peer).and_then(|info| info.id) + } + fn protocol_version(&self, proto_id: ProtocolId, peer: PeerId) -> Option { + NetworkContext::protocol_version(self, proto_id, peer) + } - fn send(&self, peer: PeerId, packet_id: u8, message: Vec) { - if let Err(e) = NetworkContext::send(self, peer, packet_id, message) { - debug!(target: "whisper", "Failed to send packet {} to peer {}: {}", + fn send(&self, peer: PeerId, packet_id: u8, message: Vec) { + if let Err(e) = NetworkContext::send(self, peer, packet_id, message) { + debug!(target: "whisper", "Failed to send packet {} to peer {}: {}", packet_id, peer, e); - self.disconnect_peer(peer) - } - } + self.disconnect_peer(peer) + } + } } /// The whisper network protocol handler. pub struct Network { - messages: Arc>, - handler: T, - peers: RwLock>>, + messages: Arc>, + handler: T, + peers: RwLock>>, } // public API. impl Network { - /// Create a new network handler. - pub fn new(messages_size_bytes: usize, handler: T) -> Self { - Network { - messages: Arc::new(RwLock::new(Messages::new(messages_size_bytes))), - handler: handler, - peers: RwLock::new(HashMap::new()), - } - } + /// Create a new network handler. + pub fn new(messages_size_bytes: usize, handler: T) -> Self { + Network { + messages: Arc::new(RwLock::new(Messages::new(messages_size_bytes))), + handler: handler, + peers: RwLock::new(HashMap::new()), + } + } - /// Post a message to the whisper network to be relayed. - pub fn post_message(&self, message: Message, context: &C) -> bool - where T: MessageHandler - { - let ok = self.messages.write().insert(message); - if ok { self.rally(context) } - ok - } + /// Post a message to the whisper network to be relayed. + pub fn post_message(&self, message: Message, context: &C) -> bool + where + T: MessageHandler, + { + let ok = self.messages.write().insert(message); + if ok { + self.rally(context) + } + ok + } - /// Get number of messages and amount of memory used by them. - pub fn pool_status(&self) -> PoolStatus { - self.messages.read().status() - } + /// Get number of messages and amount of memory used by them. + pub fn pool_status(&self) -> PoolStatus { + self.messages.read().status() + } } impl Network { - fn rally(&self, io: &C) { - // cannot be greater than 16MB (protocol limitation) - const MAX_MESSAGES_PACKET_SIZE: usize = 8 * 1024 * 1024; + fn rally(&self, io: &C) { + // cannot be greater than 16MB (protocol limitation) + const MAX_MESSAGES_PACKET_SIZE: usize = 8 * 1024 * 1024; - // prune messages. - let now = SystemTime::now(); - let pruned_hashes = self.messages.write().prune(now); + // prune messages. + let now = SystemTime::now(); + let pruned_hashes = self.messages.write().prune(now); - let messages = self.messages.read(); - let peers = self.peers.read(); + let messages = self.messages.read(); + let peers = self.peers.read(); - // send each peer a packet with new messages it may find relevant. - for (peer_id, peer) in peers.iter() { - let mut peer_data = peer.lock(); - peer_data.note_evicted(&pruned_hashes); + // send each peer a packet with new messages it may find relevant. + for (peer_id, peer) in peers.iter() { + let mut peer_data = peer.lock(); + peer_data.note_evicted(&pruned_hashes); - let punish_timeout = |last_activity: &SystemTime| { - if *last_activity + MAX_TOLERATED_DELAY <= now { - debug!(target: "whisper", "Disconnecting peer {} due to excessive timeout.", peer_id); - io.disconnect_peer(*peer_id); - } - }; + let punish_timeout = |last_activity: &SystemTime| { + if *last_activity + MAX_TOLERATED_DELAY <= now { + debug!(target: "whisper", "Disconnecting peer {} due to excessive timeout.", peer_id); + io.disconnect_peer(*peer_id); + } + }; - // check timeouts and skip peers who we can't send a rally to. - match peer_data.state { - State::Unconfirmed(ref time) => { - punish_timeout(time); - continue; - } - State::Confirmed => {} - } + // check timeouts and skip peers who we can't send a rally to. + match peer_data.state { + State::Unconfirmed(ref time) => { + punish_timeout(time); + continue; + } + State::Confirmed => {} + } - // construct packet, skipping messages the peer won't accept. - let mut stream = RlpStream::new(); - stream.begin_unbounded_list(); + // construct packet, skipping messages the peer won't accept. + let mut stream = RlpStream::new(); + stream.begin_unbounded_list(); - for message in messages.iter() { - if !peer_data.will_accept(message) { continue } + for message in messages.iter() { + if !peer_data.will_accept(message) { + continue; + } - if stream.estimate_size(message.encoded_size()) > MAX_MESSAGES_PACKET_SIZE { - break; - } + if stream.estimate_size(message.encoded_size()) > MAX_MESSAGES_PACKET_SIZE { + break; + } - peer_data.note_known(message); - stream.append(message.envelope()); - } + peer_data.note_known(message); + stream.append(message.envelope()); + } - stream.complete_unbounded_list(); + stream.complete_unbounded_list(); - io.send(*peer_id, packet::MESSAGES, stream.out()); - } - } + io.send(*peer_id, packet::MESSAGES, stream.out()); + } + } - // handle status packet from peer. - fn on_status(&self, peer: &PeerId, _status: Rlp) - -> Result<(), Error> - { - let peers = self.peers.read(); + // handle status packet from peer. + fn on_status(&self, peer: &PeerId, _status: Rlp) -> Result<(), Error> { + let peers = self.peers.read(); - match peers.get(peer) { - Some(peer) => { - peer.lock().state = State::Confirmed; - Ok(()) - } - None => { - debug!(target: "whisper", "Received message from unknown peer."); - Err(Error::UnknownPeer(*peer)) - } - } - } + match peers.get(peer) { + Some(peer) => { + peer.lock().state = State::Confirmed; + Ok(()) + } + None => { + debug!(target: "whisper", "Received message from unknown peer."); + Err(Error::UnknownPeer(*peer)) + } + } + } - fn on_messages(&self, peer: &PeerId, message_packet: Rlp) - -> Result<(), Error> - { - let mut messages_vec = { - let peers = self.peers.read(); - let peer = match peers.get(peer) { - Some(peer) => peer, - None => { - debug!(target: "whisper", "Received message from unknown peer."); - return Err(Error::UnknownPeer(*peer)); - } - }; + fn on_messages(&self, peer: &PeerId, message_packet: Rlp) -> Result<(), Error> { + let mut messages_vec = { + let peers = self.peers.read(); + let peer = match peers.get(peer) { + Some(peer) => peer, + None => { + debug!(target: "whisper", "Received message from unknown peer."); + return Err(Error::UnknownPeer(*peer)); + } + }; - let mut peer = peer.lock(); + let mut peer = peer.lock(); - if !peer.can_send_messages() { - return Err(Error::UnexpectedMessage); - } + if !peer.can_send_messages() { + return Err(Error::UnexpectedMessage); + } - let now = SystemTime::now(); - let mut messages_vec = message_packet.iter().map(|rlp| Message::decode(rlp, now)) - .collect::, _>>()?; + let now = SystemTime::now(); + let mut messages_vec = message_packet + .iter() + .map(|rlp| Message::decode(rlp, now)) + .collect::, _>>()?; - if messages_vec.is_empty() { return Ok(()) } + if messages_vec.is_empty() { + return Ok(()); + } - // disallow duplicates in packet. - messages_vec.retain(|message| peer.note_known(&message)); - messages_vec - }; + // disallow duplicates in packet. + messages_vec.retain(|message| peer.note_known(&message)); + messages_vec + }; - // import for relaying. - let mut messages = self.messages.write(); + // import for relaying. + let mut messages = self.messages.write(); - messages_vec.retain(|message| messages.may_accept(&message)); - messages.reserve(messages_vec.len()); + messages_vec.retain(|message| messages.may_accept(&message)); + messages.reserve(messages_vec.len()); - self.handler.handle_messages(&messages_vec); + self.handler.handle_messages(&messages_vec); - for message in messages_vec { - messages.insert(message); - } + for message in messages_vec { + messages.insert(message); + } - Ok(()) - } + Ok(()) + } - fn on_pow_requirement(&self, peer: &PeerId, requirement: Rlp) - -> Result<(), Error> - { - use byteorder::{ByteOrder, BigEndian}; + fn on_pow_requirement(&self, peer: &PeerId, requirement: Rlp) -> Result<(), Error> { + use byteorder::{BigEndian, ByteOrder}; - let peers = self.peers.read(); - match peers.get(peer) { - Some(peer) => { - let mut peer = peer.lock(); + let peers = self.peers.read(); + match peers.get(peer) { + Some(peer) => { + let mut peer = peer.lock(); - if let State::Unconfirmed(_) = peer.state { - return Err(Error::UnexpectedMessage); - } - let bytes: Vec = requirement.as_val()?; - if bytes.len() != ::std::mem::size_of::() { - return Err(Error::InvalidPowReq); - } + if let State::Unconfirmed(_) = peer.state { + return Err(Error::UnexpectedMessage); + } + let bytes: Vec = requirement.as_val()?; + if bytes.len() != ::std::mem::size_of::() { + return Err(Error::InvalidPowReq); + } - // as of byteorder 1.1.0, this is always defined. - let req = BigEndian::read_f64(&bytes[..]); + // as of byteorder 1.1.0, this is always defined. + let req = BigEndian::read_f64(&bytes[..]); - if !req.is_normal() { - return Err(Error::InvalidPowReq); - } + if !req.is_normal() { + return Err(Error::InvalidPowReq); + } - peer.set_pow_requirement(req); - } - None => { - debug!(target: "whisper", "Received message from unknown peer."); - return Err(Error::UnknownPeer(*peer)); - } - } + peer.set_pow_requirement(req); + } + None => { + debug!(target: "whisper", "Received message from unknown peer."); + return Err(Error::UnknownPeer(*peer)); + } + } - Ok(()) - } + Ok(()) + } - fn on_topic_filter(&self, peer: &PeerId, filter: Rlp) - -> Result<(), Error> - { - let peers = self.peers.read(); - match peers.get(peer) { - Some(peer) => { - let mut peer = peer.lock(); + fn on_topic_filter(&self, peer: &PeerId, filter: Rlp) -> Result<(), Error> { + let peers = self.peers.read(); + match peers.get(peer) { + Some(peer) => { + let mut peer = peer.lock(); - if let State::Unconfirmed(_) = peer.state { - return Err(Error::UnexpectedMessage); - } + if let State::Unconfirmed(_) = peer.state { + return Err(Error::UnexpectedMessage); + } - peer.set_topic_filter(filter.as_val()?) - } - None => { - debug!(target: "whisper", "Received message from unknown peer."); - return Err(Error::UnknownPeer(*peer)); - } - } + peer.set_topic_filter(filter.as_val()?) + } + None => { + debug!(target: "whisper", "Received message from unknown peer."); + return Err(Error::UnknownPeer(*peer)); + } + } - Ok(()) - } + Ok(()) + } - fn on_connect(&self, io: &C, peer: &PeerId) { - trace!(target: "whisper", "Connecting peer {}", peer); + fn on_connect(&self, io: &C, peer: &PeerId) { + trace!(target: "whisper", "Connecting peer {}", peer); - let node_key = match io.node_key(*peer) { - Some(node_key) => node_key, - None => { - debug!(target: "whisper", "Disconnecting peer {}, who has no node key.", peer); - io.disable_peer(*peer); - return; - } - }; + let node_key = match io.node_key(*peer) { + Some(node_key) => node_key, + None => { + debug!(target: "whisper", "Disconnecting peer {}, who has no node key.", peer); + io.disable_peer(*peer); + return; + } + }; - let version = match io.protocol_version(PROTOCOL_ID, *peer) { - Some(version) => version as usize, - None => { - io.disable_peer(*peer); - return - } - }; + let version = match io.protocol_version(PROTOCOL_ID, *peer) { + Some(version) => version as usize, + None => { + io.disable_peer(*peer); + return; + } + }; - self.peers.write().insert(*peer, Mutex::new(Peer { - node_key: node_key, - state: State::Unconfirmed(SystemTime::now()), - known_messages: HashSet::new(), - topic_filter: None, - pow_requirement: 0f64, - is_parity: io.protocol_version(PARITY_PROTOCOL_ID, *peer).is_some(), - _protocol_version: version, - })); + self.peers.write().insert( + *peer, + Mutex::new(Peer { + node_key: node_key, + state: State::Unconfirmed(SystemTime::now()), + known_messages: HashSet::new(), + topic_filter: None, + pow_requirement: 0f64, + is_parity: io.protocol_version(PARITY_PROTOCOL_ID, *peer).is_some(), + _protocol_version: version, + }), + ); - io.send(*peer, packet::STATUS, ::rlp::EMPTY_LIST_RLP.to_vec()); - } + io.send(*peer, packet::STATUS, ::rlp::EMPTY_LIST_RLP.to_vec()); + } - fn on_packet(&self, io: &C, peer: &PeerId, packet_id: u8, data: &[u8]) { - let rlp = Rlp::new(data); - let res = match packet_id { - packet::STATUS => self.on_status(peer, rlp), - packet::MESSAGES => self.on_messages(peer, rlp), - packet::POW_REQUIREMENT => self.on_pow_requirement(peer, rlp), - packet::TOPIC_FILTER => self.on_topic_filter(peer, rlp), - _ => Ok(()), // ignore unknown packets. - }; + fn on_packet(&self, io: &C, peer: &PeerId, packet_id: u8, data: &[u8]) { + let rlp = Rlp::new(data); + let res = match packet_id { + packet::STATUS => self.on_status(peer, rlp), + packet::MESSAGES => self.on_messages(peer, rlp), + packet::POW_REQUIREMENT => self.on_pow_requirement(peer, rlp), + packet::TOPIC_FILTER => self.on_topic_filter(peer, rlp), + _ => Ok(()), // ignore unknown packets. + }; - if let Err(e) = res { - trace!(target: "whisper", "Disabling peer due to misbehavior: {}", e); - io.disable_peer(*peer); - } - } + if let Err(e) = res { + trace!(target: "whisper", "Disabling peer due to misbehavior: {}", e); + io.disable_peer(*peer); + } + } - fn on_disconnect(&self, peer: &PeerId) { - trace!(target: "whisper", "Disconnecting peer {}", peer); - let _ = self.peers.write().remove(peer); - } + fn on_disconnect(&self, peer: &PeerId) { + trace!(target: "whisper", "Disconnecting peer {}", peer); + let _ = self.peers.write().remove(peer); + } } impl ::network::NetworkProtocolHandler for Network { - fn initialize(&self, io: &NetworkContext) { - // set up broadcast timer (< 1s) - io.register_timer(RALLY_TOKEN, RALLY_TIMEOUT) - .expect("Failed to initialize message rally timer"); - } + fn initialize(&self, io: &NetworkContext) { + // set up broadcast timer (< 1s) + io.register_timer(RALLY_TOKEN, RALLY_TIMEOUT) + .expect("Failed to initialize message rally timer"); + } - fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { - self.on_packet(io, peer, packet_id, data) - } + fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { + self.on_packet(io, peer, packet_id, data) + } - fn connected(&self, io: &NetworkContext, peer: &PeerId) { - // peer with higher ID should begin rallying. - self.on_connect(io, peer) - } + fn connected(&self, io: &NetworkContext, peer: &PeerId) { + // peer with higher ID should begin rallying. + self.on_connect(io, peer) + } - fn disconnected(&self, _io: &NetworkContext, peer: &PeerId) { - self.on_disconnect(peer) - } + fn disconnected(&self, _io: &NetworkContext, peer: &PeerId) { + self.on_disconnect(peer) + } - fn timeout(&self, io: &NetworkContext, timer: TimerToken) { - // rally with each peer and handle timeouts. - match timer { - RALLY_TOKEN => self.rally(io), - other => debug!(target: "whisper", "Timeout triggered on unknown token {}", other), - } - } + fn timeout(&self, io: &NetworkContext, timer: TimerToken) { + // rally with each peer and handle timeouts. + match timer { + RALLY_TOKEN => self.rally(io), + other => debug!(target: "whisper", "Timeout triggered on unknown token {}", other), + } + } } /// Dummy subprotocol used for parity extensions. @@ -719,13 +754,13 @@ impl ::network::NetworkProtocolHandler for Network { pub struct ParityExtensions; impl ::network::NetworkProtocolHandler for ParityExtensions { - fn initialize(&self, _io: &NetworkContext) { } + fn initialize(&self, _io: &NetworkContext) {} - fn read(&self, _io: &NetworkContext, _peer: &PeerId, _id: u8, _msg: &[u8]) { } + fn read(&self, _io: &NetworkContext, _peer: &PeerId, _id: u8, _msg: &[u8]) {} - fn connected(&self, _io: &NetworkContext, _peer: &PeerId) { } + fn connected(&self, _io: &NetworkContext, _peer: &PeerId) {} - fn disconnected(&self, _io: &NetworkContext, _peer: &PeerId) { } + fn disconnected(&self, _io: &NetworkContext, _peer: &PeerId) {} - fn timeout(&self, _io: &NetworkContext, _timer: TimerToken) { } + fn timeout(&self, _io: &NetworkContext, _timer: TimerToken) {} } diff --git a/whisper/src/net/tests.rs b/whisper/src/net/tests.rs index 7af0cb8d3..9f00f28a3 100644 --- a/whisper/src/net/tests.rs +++ b/whisper/src/net/tests.rs @@ -16,177 +16,183 @@ //! Tests for the whisper network module. -use std::collections::HashSet; -use std::sync::mpsc; +use std::{collections::HashSet, sync::mpsc}; -use parking_lot::Mutex; use network::{NodeId, PeerId}; +use parking_lot::Mutex; -use message::{CreateParams, Message}; use super::*; +use message::{CreateParams, Message}; struct TestHandler(Mutex>); impl MessageHandler for TestHandler { - fn handle_messages(&self, messages: &[Message]) { - let tx = self.0.lock(); - for message in messages { - let _ = tx.send(message.clone()); - } - } + fn handle_messages(&self, messages: &[Message]) { + let tx = self.0.lock(); + for message in messages { + let _ = tx.send(message.clone()); + } + } } struct TestPeer { - network: Network, - recv: mpsc::Receiver, - disconnected: Mutex>, + network: Network, + recv: mpsc::Receiver, + disconnected: Mutex>, } impl TestPeer { - fn create() -> Self { - let (tx, rx) = mpsc::channel(); + fn create() -> Self { + let (tx, rx) = mpsc::channel(); - TestPeer { - network: Network::new(10 * 1024 * 1024, TestHandler(Mutex::new(tx))), - recv: rx, - disconnected: Mutex::new(HashSet::new()), - } - } + TestPeer { + network: Network::new(10 * 1024 * 1024, TestHandler(Mutex::new(tx))), + recv: rx, + disconnected: Mutex::new(HashSet::new()), + } + } } struct TestNetwork { - peers: Vec, + peers: Vec, } impl TestNetwork { - fn new(n_peers: usize) -> Self { - let unconnected_peers: Vec<_> = (0..n_peers).map(|_| TestPeer::create()).collect(); - for i in 0..n_peers { - for j in (i + 1)..n_peers { - let (peer1, peer2) = (&unconnected_peers[i], &unconnected_peers[j]); - let ctx1 = TestContext::new(&unconnected_peers, i); - let ctx2 = TestContext::new(&unconnected_peers, j); + fn new(n_peers: usize) -> Self { + let unconnected_peers: Vec<_> = (0..n_peers).map(|_| TestPeer::create()).collect(); + for i in 0..n_peers { + for j in (i + 1)..n_peers { + let (peer1, peer2) = (&unconnected_peers[i], &unconnected_peers[j]); + let ctx1 = TestContext::new(&unconnected_peers, i); + let ctx2 = TestContext::new(&unconnected_peers, j); - peer1.network.on_connect(&ctx1, &j); - peer2.network.on_connect(&ctx2, &i); - } - } + peer1.network.on_connect(&ctx1, &j); + peer2.network.on_connect(&ctx2, &i); + } + } - TestNetwork { - peers: unconnected_peers, - } - } + TestNetwork { + peers: unconnected_peers, + } + } - fn post_message_from(&self, id: PeerId, msg: Message) { - self.peers[id].network.post_message(msg, &TestContext::new(&self.peers, id)); - } + fn post_message_from(&self, id: PeerId, msg: Message) { + self.peers[id] + .network + .post_message(msg, &TestContext::new(&self.peers, id)); + } } enum Event { - Disconnect(PeerId, PeerId), - Send(PeerId, PeerId, u8, Vec), + Disconnect(PeerId, PeerId), + Send(PeerId, PeerId, u8, Vec), } struct TestContext<'a> { - peers: &'a [TestPeer], - local_id: PeerId, - events: Mutex>, + peers: &'a [TestPeer], + local_id: PeerId, + events: Mutex>, } impl<'a> TestContext<'a> { - fn new(peers: &'a [TestPeer], local_id: PeerId) -> Self { - TestContext { - peers, - local_id, - events: Mutex::new(Vec::new()), - } - } + fn new(peers: &'a [TestPeer], local_id: PeerId) -> Self { + TestContext { + peers, + local_id, + events: Mutex::new(Vec::new()), + } + } } impl<'a> Context for TestContext<'a> { - fn disconnect_peer(&self, id: PeerId) { - self.events.lock().push(Event::Disconnect(self.local_id, id)); - } + fn disconnect_peer(&self, id: PeerId) { + self.events + .lock() + .push(Event::Disconnect(self.local_id, id)); + } - fn disable_peer(&self, id: PeerId) { - self.events.lock().push(Event::Disconnect(self.local_id, id)); - } + fn disable_peer(&self, id: PeerId) { + self.events + .lock() + .push(Event::Disconnect(self.local_id, id)); + } - fn node_key(&self, peer: PeerId) -> Option { - let mut id = NodeId::default(); - id[0] = peer as _; - Some(id) - } + fn node_key(&self, peer: PeerId) -> Option { + let mut id = NodeId::default(); + id[0] = peer as _; + Some(id) + } - fn protocol_version(&self, id: ::network::ProtocolId, _peer: PeerId) -> Option { - if &id == b"shh" || &id == b"pwh" { - Some(PROTOCOL_VERSION as _) - } else { - None - } - } + fn protocol_version(&self, id: ::network::ProtocolId, _peer: PeerId) -> Option { + if &id == b"shh" || &id == b"pwh" { + Some(PROTOCOL_VERSION as _) + } else { + None + } + } - fn send(&self, peer: PeerId, packet: u8, data: Vec) { - self.events.lock().push(Event::Send(self.local_id, peer, packet, data)); - } + fn send(&self, peer: PeerId, packet: u8, data: Vec) { + self.events + .lock() + .push(Event::Send(self.local_id, peer, packet, data)); + } } impl<'a> Drop for TestContext<'a> { - fn drop(&mut self) { - let events = self.events.get_mut(); - while !events.is_empty() { - let mut deferred = Vec::new(); - for event in events.drain(..) { - match event { - Event::Disconnect(from, target) => { - self.peers[from].network.on_disconnect(&target); - self.peers[target].network.on_disconnect(&from); + fn drop(&mut self) { + let events = self.events.get_mut(); + while !events.is_empty() { + let mut deferred = Vec::new(); + for event in events.drain(..) { + match event { + Event::Disconnect(from, target) => { + self.peers[from].network.on_disconnect(&target); + self.peers[target].network.on_disconnect(&from); - self.peers[from].disconnected.lock().insert(target); - self.peers[target].disconnected.lock().insert(from); - } - Event::Send(from, target, packet, data) => { - if self.peers[from].disconnected.lock().contains(&target) { - continue; - } + self.peers[from].disconnected.lock().insert(target); + self.peers[target].disconnected.lock().insert(from); + } + Event::Send(from, target, packet, data) => { + if self.peers[from].disconnected.lock().contains(&target) { + continue; + } - let mut inner_ctx = TestContext::new(self.peers, target); + let mut inner_ctx = TestContext::new(self.peers, target); - self.peers[target].network.on_packet( - &inner_ctx, - &from, - packet, - &data[..] - ); + self.peers[target] + .network + .on_packet(&inner_ctx, &from, packet, &data[..]); - // don't recursively apply disconnects or new messages - // from the receiver's actions yet. - let inner_events = ::std::mem::replace(inner_ctx.events.get_mut(), Vec::new()); - deferred.extend(inner_events); - } - } - } + // don't recursively apply disconnects or new messages + // from the receiver's actions yet. + let inner_events = + ::std::mem::replace(inner_ctx.events.get_mut(), Vec::new()); + deferred.extend(inner_events); + } + } + } - events.extend(deferred); - } - } + events.extend(deferred); + } + } } #[test] fn message_gets_relayed() { - let network = TestNetwork::new(5); - let message = Message::create(CreateParams { - ttl: 500, - payload: b"this is my payload, pal".to_vec(), - topics: vec![[0, 1, 2, 3].into()], - work: 25, - }).unwrap(); + let network = TestNetwork::new(5); + let message = Message::create(CreateParams { + ttl: 500, + payload: b"this is my payload, pal".to_vec(), + topics: vec![[0, 1, 2, 3].into()], + work: 25, + }) + .unwrap(); - network.post_message_from(0, message.clone()); + network.post_message_from(0, message.clone()); - assert!(network.peers[0].recv.try_recv().is_err()); + assert!(network.peers[0].recv.try_recv().is_err()); - for i in 1..5 { - assert_eq!(network.peers[i].recv.try_recv().unwrap(), message); - } + for i in 1..5 { + assert_eq!(network.peers[i].recv.try_recv().unwrap(), message); + } } diff --git a/whisper/src/rpc/crypto.rs b/whisper/src/rpc/crypto.rs index 891163453..081fb6c03 100644 --- a/whisper/src/rpc/crypto.rs +++ b/whisper/src/rpc/crypto.rs @@ -16,10 +16,9 @@ //! Encryption schemes supported by RPC layer. -use crypto::aes_gcm::{Encryptor, Decryptor}; -use ethkey::crypto::ecies; +use crypto::aes_gcm::{Decryptor, Encryptor}; use ethereum_types::H256; -use ethkey::{self, Public, Secret}; +use ethkey::{self, crypto::ecies, Public, Secret}; use memzero::Memzero; /// Length of AES key @@ -32,239 +31,249 @@ const BROADCAST_IV: [u8; AES_NONCE_LEN] = [0xff; AES_NONCE_LEN]; // how to encode aes key/nonce. enum AesEncode { - AppendedNonce, // receiver known, random nonce appended. - OnTopics(Vec), // receiver knows topics but not key. nonce global. + AppendedNonce, // receiver known, random nonce appended. + OnTopics(Vec), // receiver knows topics but not key. nonce global. } enum EncryptionInner { - AES(Memzero<[u8; AES_KEY_LEN]>, [u8; AES_NONCE_LEN], AesEncode), - ECIES(Public), + AES(Memzero<[u8; AES_KEY_LEN]>, [u8; AES_NONCE_LEN], AesEncode), + ECIES(Public), } /// Encryption good for single usage. pub struct EncryptionInstance(EncryptionInner); impl EncryptionInstance { - /// ECIES encryption using public key. Fails if invalid public key. - pub fn ecies(public: Public) -> Result { - if !ethkey::public_is_valid(&public) { - return Err("Invalid public key"); - } + /// ECIES encryption using public key. Fails if invalid public key. + pub fn ecies(public: Public) -> Result { + if !ethkey::public_is_valid(&public) { + return Err("Invalid public key"); + } - Ok(EncryptionInstance(EncryptionInner::ECIES(public))) - } + Ok(EncryptionInstance(EncryptionInner::ECIES(public))) + } - /// 256-bit AES GCM encryption with given nonce. - /// It is extremely insecure to reuse nonces. - /// - /// If generating nonces with a secure RNG, limit uses such that - /// the chance of collision is negligible. - pub fn aes(key: Memzero<[u8; AES_KEY_LEN]>, nonce: [u8; AES_NONCE_LEN]) -> Self { - EncryptionInstance(EncryptionInner::AES(key, nonce, AesEncode::AppendedNonce)) - } + /// 256-bit AES GCM encryption with given nonce. + /// It is extremely insecure to reuse nonces. + /// + /// If generating nonces with a secure RNG, limit uses such that + /// the chance of collision is negligible. + pub fn aes(key: Memzero<[u8; AES_KEY_LEN]>, nonce: [u8; AES_NONCE_LEN]) -> Self { + EncryptionInstance(EncryptionInner::AES(key, nonce, AesEncode::AppendedNonce)) + } - /// Broadcast encryption for the message based on the given topics. - /// - /// Key reuse here is extremely dangerous. It should be randomly generated - /// with a secure RNG. - pub fn broadcast(key: Memzero<[u8; AES_KEY_LEN]>, topics: Vec) -> Self { - EncryptionInstance(EncryptionInner::AES(key, BROADCAST_IV, AesEncode::OnTopics(topics))) - } + /// Broadcast encryption for the message based on the given topics. + /// + /// Key reuse here is extremely dangerous. It should be randomly generated + /// with a secure RNG. + pub fn broadcast(key: Memzero<[u8; AES_KEY_LEN]>, topics: Vec) -> Self { + EncryptionInstance(EncryptionInner::AES( + key, + BROADCAST_IV, + AesEncode::OnTopics(topics), + )) + } - /// Encrypt the supplied plaintext - pub fn encrypt(self, plain: &[u8]) -> Option> { - match self.0 { - EncryptionInner::AES(key, nonce, encode) => { - match encode { - AesEncode::AppendedNonce => { - let mut enc = Encryptor::aes_256_gcm(&*key).ok()?; - let mut buf = enc.encrypt(&nonce, plain.to_vec()).ok()?; - buf.extend(&nonce[..]); - Some(buf) - } - AesEncode::OnTopics(topics) => { - let mut buf = Vec::new(); - for mut t in topics { - xor(&mut t.0, &key); - buf.extend(&t.0); - } - let mut enc = Encryptor::aes_256_gcm(&*key).ok()?; - enc.offset(buf.len()); - buf.extend(plain); - let ciphertext = enc.encrypt(&nonce, buf).ok()?; - Some(ciphertext) - } - } - } - EncryptionInner::ECIES(valid_public) => { - ecies::encrypt(&valid_public, &[], plain).ok() - } - } - } + /// Encrypt the supplied plaintext + pub fn encrypt(self, plain: &[u8]) -> Option> { + match self.0 { + EncryptionInner::AES(key, nonce, encode) => match encode { + AesEncode::AppendedNonce => { + let mut enc = Encryptor::aes_256_gcm(&*key).ok()?; + let mut buf = enc.encrypt(&nonce, plain.to_vec()).ok()?; + buf.extend(&nonce[..]); + Some(buf) + } + AesEncode::OnTopics(topics) => { + let mut buf = Vec::new(); + for mut t in topics { + xor(&mut t.0, &key); + buf.extend(&t.0); + } + let mut enc = Encryptor::aes_256_gcm(&*key).ok()?; + enc.offset(buf.len()); + buf.extend(plain); + let ciphertext = enc.encrypt(&nonce, buf).ok()?; + Some(ciphertext) + } + }, + EncryptionInner::ECIES(valid_public) => ecies::encrypt(&valid_public, &[], plain).ok(), + } + } } #[inline] fn xor(a: &mut [u8; 32], b: &[u8; 32]) { - for i in 0 .. 32 { - a[i] ^= b[i] - } + for i in 0..32 { + a[i] ^= b[i] + } } enum AesExtract { - AppendedNonce(Memzero<[u8; AES_KEY_LEN]>), // extract appended nonce. - OnTopics(usize, usize, H256), // number of topics, index we know, topic we know. + AppendedNonce(Memzero<[u8; AES_KEY_LEN]>), // extract appended nonce. + OnTopics(usize, usize, H256), // number of topics, index we know, topic we know. } enum DecryptionInner { - AES(AesExtract), - ECIES(Secret), + AES(AesExtract), + ECIES(Secret), } /// Decryption instance good for single usage. pub struct DecryptionInstance(DecryptionInner); impl DecryptionInstance { - /// ECIES decryption using secret key. Fails if invalid secret. - pub fn ecies(secret: Secret) -> Result { - secret.check_validity().map_err(|_| "Invalid secret key")?; + /// ECIES decryption using secret key. Fails if invalid secret. + pub fn ecies(secret: Secret) -> Result { + secret.check_validity().map_err(|_| "Invalid secret key")?; - Ok(DecryptionInstance(DecryptionInner::ECIES(secret))) - } + Ok(DecryptionInstance(DecryptionInner::ECIES(secret))) + } - /// 256-bit AES GCM decryption with appended nonce. - pub fn aes(key: Memzero<[u8; AES_KEY_LEN]>) -> Self { - DecryptionInstance(DecryptionInner::AES(AesExtract::AppendedNonce(key))) - } + /// 256-bit AES GCM decryption with appended nonce. + pub fn aes(key: Memzero<[u8; AES_KEY_LEN]>) -> Self { + DecryptionInstance(DecryptionInner::AES(AesExtract::AppendedNonce(key))) + } - /// Decode broadcast based on number of topics and known topic. - /// Known topic index may not be larger than num topics - 1. - pub fn broadcast(num_topics: usize, topic_idx: usize, known_topic: H256) -> Result { - if topic_idx >= num_topics { return Err("topic index out of bounds") } + /// Decode broadcast based on number of topics and known topic. + /// Known topic index may not be larger than num topics - 1. + pub fn broadcast( + num_topics: usize, + topic_idx: usize, + known_topic: H256, + ) -> Result { + if topic_idx >= num_topics { + return Err("topic index out of bounds"); + } - Ok(DecryptionInstance(DecryptionInner::AES(AesExtract::OnTopics(num_topics, topic_idx, known_topic)))) - } + Ok(DecryptionInstance(DecryptionInner::AES( + AesExtract::OnTopics(num_topics, topic_idx, known_topic), + ))) + } - /// Decrypt ciphertext. Fails if it's an invalid message. - pub fn decrypt(self, ciphertext: &[u8]) -> Option> { - match self.0 { - DecryptionInner::AES(extract) => { - match extract { - AesExtract::AppendedNonce(key) => { - if ciphertext.len() < AES_NONCE_LEN { - return None - } - // nonce is the suffix of ciphertext. - let mut nonce = [0; AES_NONCE_LEN]; - let nonce_offset = ciphertext.len() - AES_NONCE_LEN; - nonce.copy_from_slice(&ciphertext[nonce_offset..]); - Decryptor::aes_256_gcm(&*key).ok()? - .decrypt(&nonce, Vec::from(&ciphertext[..nonce_offset])) - .ok() - } - AesExtract::OnTopics(num_topics, known_index, known_topic) => { - if ciphertext.len() < num_topics * 32 { - return None - } - let mut salted_topic = H256::new(); - salted_topic.copy_from_slice(&ciphertext[(known_index * 32)..][..32]); - let key = Memzero::from((salted_topic ^ known_topic).0); - let offset = num_topics * 32; - Decryptor::aes_256_gcm(&*key).ok()? - .decrypt(&BROADCAST_IV, Vec::from(&ciphertext[offset..])) - .ok() - } - } - } - DecryptionInner::ECIES(secret) => { - // secret is checked for validity, so only fails on invalid message. - ecies::decrypt(&secret, &[], ciphertext).ok() - } - } - } + /// Decrypt ciphertext. Fails if it's an invalid message. + pub fn decrypt(self, ciphertext: &[u8]) -> Option> { + match self.0 { + DecryptionInner::AES(extract) => { + match extract { + AesExtract::AppendedNonce(key) => { + if ciphertext.len() < AES_NONCE_LEN { + return None; + } + // nonce is the suffix of ciphertext. + let mut nonce = [0; AES_NONCE_LEN]; + let nonce_offset = ciphertext.len() - AES_NONCE_LEN; + nonce.copy_from_slice(&ciphertext[nonce_offset..]); + Decryptor::aes_256_gcm(&*key) + .ok()? + .decrypt(&nonce, Vec::from(&ciphertext[..nonce_offset])) + .ok() + } + AesExtract::OnTopics(num_topics, known_index, known_topic) => { + if ciphertext.len() < num_topics * 32 { + return None; + } + let mut salted_topic = H256::new(); + salted_topic.copy_from_slice(&ciphertext[(known_index * 32)..][..32]); + let key = Memzero::from((salted_topic ^ known_topic).0); + let offset = num_topics * 32; + Decryptor::aes_256_gcm(&*key) + .ok()? + .decrypt(&BROADCAST_IV, Vec::from(&ciphertext[offset..])) + .ok() + } + } + } + DecryptionInner::ECIES(secret) => { + // secret is checked for validity, so only fails on invalid message. + ecies::decrypt(&secret, &[], ciphertext).ok() + } + } + } } #[cfg(test)] mod tests { - use super::*; + use super::*; - #[test] - fn encrypt_asymmetric() { - use ethkey::{Generator, Random}; + #[test] + fn encrypt_asymmetric() { + use ethkey::{Generator, Random}; - let key_pair = Random.generate().unwrap(); - let test_message = move |message: &[u8]| { - let instance = EncryptionInstance::ecies(key_pair.public().clone()).unwrap(); - let ciphertext = instance.encrypt(&message).unwrap(); + let key_pair = Random.generate().unwrap(); + let test_message = move |message: &[u8]| { + let instance = EncryptionInstance::ecies(key_pair.public().clone()).unwrap(); + let ciphertext = instance.encrypt(&message).unwrap(); - if !message.is_empty() { - assert!(&ciphertext[..message.len()] != message) - } + if !message.is_empty() { + assert!(&ciphertext[..message.len()] != message) + } - let instance = DecryptionInstance::ecies(key_pair.secret().clone()).unwrap(); - let decrypted = instance.decrypt(&ciphertext).unwrap(); + let instance = DecryptionInstance::ecies(key_pair.secret().clone()).unwrap(); + let decrypted = instance.decrypt(&ciphertext).unwrap(); - assert_eq!(message, &decrypted[..]) - }; + assert_eq!(message, &decrypted[..]) + }; - test_message(&[1, 2, 3, 4, 5]); - test_message(&[]); - test_message(&[255; 512]); - } + test_message(&[1, 2, 3, 4, 5]); + test_message(&[]); + test_message(&[255; 512]); + } - #[test] - fn encrypt_symmetric() { - use rand::{Rng, OsRng}; + #[test] + fn encrypt_symmetric() { + use rand::{OsRng, Rng}; - let mut rng = OsRng::new().unwrap(); - let mut test_message = move |message: &[u8]| { - let key = Memzero::from(rng.gen::<[u8; 32]>()); + let mut rng = OsRng::new().unwrap(); + let mut test_message = move |message: &[u8]| { + let key = Memzero::from(rng.gen::<[u8; 32]>()); - let instance = EncryptionInstance::aes(key.clone(), rng.gen()); - let ciphertext = instance.encrypt(message).unwrap(); + let instance = EncryptionInstance::aes(key.clone(), rng.gen()); + let ciphertext = instance.encrypt(message).unwrap(); - if !message.is_empty() { - assert!(&ciphertext[..message.len()] != message) - } + if !message.is_empty() { + assert!(&ciphertext[..message.len()] != message) + } - let instance = DecryptionInstance::aes(key); - let decrypted = instance.decrypt(&ciphertext).unwrap(); + let instance = DecryptionInstance::aes(key); + let decrypted = instance.decrypt(&ciphertext).unwrap(); - assert_eq!(message, &decrypted[..]) - }; + assert_eq!(message, &decrypted[..]) + }; - test_message(&[1, 2, 3, 4, 5]); - test_message(&[]); - test_message(&[255; 512]); - } + test_message(&[1, 2, 3, 4, 5]); + test_message(&[]); + test_message(&[255; 512]); + } - #[test] - fn encrypt_broadcast() { - use rand::{Rng, OsRng}; + #[test] + fn encrypt_broadcast() { + use rand::{OsRng, Rng}; - let mut rng = OsRng::new().unwrap(); + let mut rng = OsRng::new().unwrap(); - let mut test_message = move |message: &[u8]| { - let all_topics = (0..5).map(|_| rng.gen()).collect::>(); - let known_idx = 2; - let known_topic = all_topics[2]; - let key = Memzero::from(rng.gen::<[u8; 32]>()); + let mut test_message = move |message: &[u8]| { + let all_topics = (0..5).map(|_| rng.gen()).collect::>(); + let known_idx = 2; + let known_topic = all_topics[2]; + let key = Memzero::from(rng.gen::<[u8; 32]>()); - let instance = EncryptionInstance::broadcast(key, all_topics); - let ciphertext = instance.encrypt(message).unwrap(); + let instance = EncryptionInstance::broadcast(key, all_topics); + let ciphertext = instance.encrypt(message).unwrap(); - if !message.is_empty() { - assert!(&ciphertext[..message.len()] != message) - } + if !message.is_empty() { + assert!(&ciphertext[..message.len()] != message) + } - let instance = DecryptionInstance::broadcast(5, known_idx, known_topic).unwrap(); + let instance = DecryptionInstance::broadcast(5, known_idx, known_topic).unwrap(); - let decrypted = instance.decrypt(&ciphertext).unwrap(); + let decrypted = instance.decrypt(&ciphertext).unwrap(); - assert_eq!(message, &decrypted[..]) - }; + assert_eq!(message, &decrypted[..]) + }; - test_message(&[1, 2, 3, 4, 5]); - test_message(&[]); - test_message(&[255; 512]); - } + test_message(&[1, 2, 3, 4, 5]); + test_message(&[]); + test_message(&[255; 512]); + } } diff --git a/whisper/src/rpc/filter.rs b/whisper/src/rpc/filter.rs index 46cefd61e..05282782c 100644 --- a/whisper/src/rpc/filter.rs +++ b/whisper/src/rpc/filter.rs @@ -16,423 +16,471 @@ //! Abstraction over filters which works with polling and subscription. -use std::collections::HashMap; -use std::{sync::{Arc, atomic, atomic::AtomicBool, mpsc}, thread}; +use std::{ + collections::HashMap, + sync::{atomic, atomic::AtomicBool, mpsc, Arc}, + thread, +}; use ethereum_types::{H256, H512}; use ethkey::Public; -use jsonrpc_pubsub::typed::{Subscriber, Sink}; +use jsonrpc_pubsub::typed::{Sink, Subscriber}; use parking_lot::{Mutex, RwLock}; -use rand::{Rng, OsRng}; +use rand::{OsRng, Rng}; +use super::{ + key_store::KeyStore, + types::{self, FilterItem, HexEncode}, +}; use message::{Message, Topic}; -use super::{key_store::KeyStore, types::{self, FilterItem, HexEncode}}; /// Kinds of filters, #[derive(PartialEq, Eq, Clone, Copy)] pub enum Kind { - /// Polled filter only returns data upon request - Poll, - /// Subscription filter pushes data to subscriber immediately. - Subscription, + /// Polled filter only returns data upon request + Poll, + /// Subscription filter pushes data to subscriber immediately. + Subscription, } pub type ItemBuffer = Arc>>; enum FilterEntry { - Poll(Arc, ItemBuffer), - Subscription(Arc, Sink), + Poll(Arc, ItemBuffer), + Subscription(Arc, Sink), } /// Filter manager. Handles filters as well as a thread for doing decryption /// and payload decoding. pub struct Manager { - key_store: Arc>, - filters: RwLock>, - tx: Mutex>>, - join: Option>, - exit: Arc, + key_store: Arc>, + filters: RwLock>, + tx: Mutex>>, + join: Option>, + exit: Arc, } impl Manager { - /// Create a new filter manager that will dispatch decryption tasks onto - /// the given thread pool. - pub fn new() -> ::std::io::Result { - let (tx, rx) = mpsc::channel::>(); - let exit = Arc::new(AtomicBool::new(false)); - let e = exit.clone(); + /// Create a new filter manager that will dispatch decryption tasks onto + /// the given thread pool. + pub fn new() -> ::std::io::Result { + let (tx, rx) = mpsc::channel::>(); + let exit = Arc::new(AtomicBool::new(false)); + let e = exit.clone(); - let join_handle = thread::Builder::new() - .name("Whisper Decryption Worker".to_string()) - .spawn(move || { - trace!(target: "parity_whisper", "Start decryption worker"); - loop { - if exit.load(atomic::Ordering::Acquire) { - break; - } - if let Ok(item) = rx.try_recv() { - item(); - } - } - })?; + let join_handle = thread::Builder::new() + .name("Whisper Decryption Worker".to_string()) + .spawn(move || { + trace!(target: "parity_whisper", "Start decryption worker"); + loop { + if exit.load(atomic::Ordering::Acquire) { + break; + } + if let Ok(item) = rx.try_recv() { + item(); + } + } + })?; - Ok(Manager { - key_store: Arc::new(RwLock::new(KeyStore::new()?)), - filters: RwLock::new(HashMap::new()), - tx: Mutex::new(tx), - join: Some(join_handle), - exit: e, - }) - } + Ok(Manager { + key_store: Arc::new(RwLock::new(KeyStore::new()?)), + filters: RwLock::new(HashMap::new()), + tx: Mutex::new(tx), + join: Some(join_handle), + exit: e, + }) + } - /// Get a handle to the key store. - pub fn key_store(&self) -> Arc> { - self.key_store.clone() - } + /// Get a handle to the key store. + pub fn key_store(&self) -> Arc> { + self.key_store.clone() + } - /// Get filter kind if it's known. - pub fn kind(&self, id: &H256) -> Option { - self.filters.read().get(id).map(|filter| match *filter { - FilterEntry::Poll(_, _) => Kind::Poll, - FilterEntry::Subscription(_, _) => Kind::Subscription, - }) - } + /// Get filter kind if it's known. + pub fn kind(&self, id: &H256) -> Option { + self.filters.read().get(id).map(|filter| match *filter { + FilterEntry::Poll(_, _) => Kind::Poll, + FilterEntry::Subscription(_, _) => Kind::Subscription, + }) + } - /// Remove filter by ID. - pub fn remove(&self, id: &H256) { - self.filters.write().remove(id); - } + /// Remove filter by ID. + pub fn remove(&self, id: &H256) { + self.filters.write().remove(id); + } - /// Add a new polled filter. - pub fn insert_polled(&self, filter: Filter) -> Result { - let buffer = Arc::new(Mutex::new(Vec::new())); - let entry = FilterEntry::Poll(Arc::new(filter), buffer); - let id = OsRng::new() - .map_err(|_| "unable to acquire secure randomness")? - .gen(); + /// Add a new polled filter. + pub fn insert_polled(&self, filter: Filter) -> Result { + let buffer = Arc::new(Mutex::new(Vec::new())); + let entry = FilterEntry::Poll(Arc::new(filter), buffer); + let id = OsRng::new() + .map_err(|_| "unable to acquire secure randomness")? + .gen(); - self.filters.write().insert(id, entry); - Ok(id) - } + self.filters.write().insert(id, entry); + Ok(id) + } - /// Insert new subscription filter. Generates a secure ID and sends it to - /// the subscriber - pub fn insert_subscription(&self, filter: Filter, sub: Subscriber) - -> Result<(), &'static str> - { - let id: H256 = OsRng::new() - .map_err(|_| "unable to acquire secure randomness")? - .gen(); + /// Insert new subscription filter. Generates a secure ID and sends it to + /// the subscriber + pub fn insert_subscription( + &self, + filter: Filter, + sub: Subscriber, + ) -> Result<(), &'static str> { + let id: H256 = OsRng::new() + .map_err(|_| "unable to acquire secure randomness")? + .gen(); - sub.assign_id(::jsonrpc_pubsub::SubscriptionId::String(format!("{:x}", id))) - .map(move |sink| { - let entry = FilterEntry::Subscription(Arc::new(filter), sink); - self.filters.write().insert(id, entry); - }) - .map_err(|_| "subscriber disconnected") - } + sub.assign_id(::jsonrpc_pubsub::SubscriptionId::String(format!( + "{:x}", + id + ))) + .map(move |sink| { + let entry = FilterEntry::Subscription(Arc::new(filter), sink); + self.filters.write().insert(id, entry); + }) + .map_err(|_| "subscriber disconnected") + } - /// Poll changes on filter identified by ID. - pub fn poll_changes(&self, id: &H256) -> Option> { - self.filters.read().get(id).and_then(|filter| match *filter { - FilterEntry::Subscription(_, _) => None, - FilterEntry::Poll(_, ref changes) - => Some(::std::mem::replace(&mut *changes.lock(), Vec::new())), - }) - } + /// Poll changes on filter identified by ID. + pub fn poll_changes(&self, id: &H256) -> Option> { + self.filters + .read() + .get(id) + .and_then(|filter| match *filter { + FilterEntry::Subscription(_, _) => None, + FilterEntry::Poll(_, ref changes) => { + Some(::std::mem::replace(&mut *changes.lock(), Vec::new())) + } + }) + } } // machinery for attaching the manager to the network instance. impl ::net::MessageHandler for Arc { - fn handle_messages(&self, messages: &[Message]) { - let filters = self.filters.read(); - let filters_iter = filters - .values() - .flat_map(|filter| messages.iter().map(move |msg| (filter, msg))) ; + fn handle_messages(&self, messages: &[Message]) { + let filters = self.filters.read(); + let filters_iter = filters + .values() + .flat_map(|filter| messages.iter().map(move |msg| (filter, msg))); - for (filter, message) in filters_iter { - // if the message matches any of the possible bloom filters, - // send to thread pool to attempt decryption and avoid - // blocking the network thread for long. - let failed_send = match *filter { - FilterEntry::Poll(ref filter, _) | FilterEntry::Subscription(ref filter, _) - if !filter.basic_matches(message) => None, - FilterEntry::Poll(ref filter, ref buffer) => { - let (message, key_store) = (message.clone(), self.key_store.clone()); - let (filter, buffer) = (filter.clone(), buffer.clone()); + for (filter, message) in filters_iter { + // if the message matches any of the possible bloom filters, + // send to thread pool to attempt decryption and avoid + // blocking the network thread for long. + let failed_send = match *filter { + FilterEntry::Poll(ref filter, _) | FilterEntry::Subscription(ref filter, _) + if !filter.basic_matches(message) => + { + None + } + FilterEntry::Poll(ref filter, ref buffer) => { + let (message, key_store) = (message.clone(), self.key_store.clone()); + let (filter, buffer) = (filter.clone(), buffer.clone()); - self.tx.lock().send(Box::new(move || { - filter.handle_message( - &message, - &*key_store, - |matched| buffer.lock().push(matched), - ) - })).err().map(|x| x.0) - } - FilterEntry::Subscription(ref filter, ref sink) => { - let (message, key_store) = (message.clone(), self.key_store.clone()); - let (filter, sink) = (filter.clone(), sink.clone()); + self.tx + .lock() + .send(Box::new(move || { + filter.handle_message(&message, &*key_store, |matched| { + buffer.lock().push(matched) + }) + })) + .err() + .map(|x| x.0) + } + FilterEntry::Subscription(ref filter, ref sink) => { + let (message, key_store) = (message.clone(), self.key_store.clone()); + let (filter, sink) = (filter.clone(), sink.clone()); - self.tx.lock().send(Box::new(move || { - filter.handle_message( - &message, - &*key_store, - |matched| { let _ = sink.notify(Ok(matched)); }, - ) - })).err().map(|x| x.0) - } - }; + self.tx + .lock() + .send(Box::new(move || { + filter.handle_message(&message, &*key_store, |matched| { + let _ = sink.notify(Ok(matched)); + }) + })) + .err() + .map(|x| x.0) + } + }; - // if we failed to send work, no option but to do it locally. - if let Some(local_work) = failed_send { - (local_work)() - } - } - } + // if we failed to send work, no option but to do it locally. + if let Some(local_work) = failed_send { + (local_work)() + } + } + } } impl Drop for Manager { - fn drop(&mut self) { - trace!(target: "parity_whisper", "waiting to drop FilterManager"); - self.exit.store(true, atomic::Ordering::Release); - if let Some(guard) = self.join.take() { - let _ = guard.join(); - } - trace!(target: "parity_whisper", "FilterManager dropped"); - } + fn drop(&mut self) { + trace!(target: "parity_whisper", "waiting to drop FilterManager"); + self.exit.store(true, atomic::Ordering::Release); + if let Some(guard) = self.join.take() { + let _ = guard.join(); + } + trace!(target: "parity_whisper", "FilterManager dropped"); + } } /// Filter incoming messages by critera. pub struct Filter { - topics: Vec<(Vec, H512, Topic)>, - from: Option, - decrypt_with: Option, + topics: Vec<(Vec, H512, Topic)>, + from: Option, + decrypt_with: Option, } impl Filter { - /// Create a new filter from filter request. - /// - /// Fails if the topics vector is empty. - pub fn new(params: types::FilterRequest) -> Result { - if params.topics.is_empty() { - return Err("no topics for filter"); - } + /// Create a new filter from filter request. + /// + /// Fails if the topics vector is empty. + pub fn new(params: types::FilterRequest) -> Result { + if params.topics.is_empty() { + return Err("no topics for filter"); + } - let topics: Vec<_> = params.topics.into_iter() - .map(|x| x.into_inner()) - .map(|topic| { - let abridged = super::abridge_topic(&topic); - (topic, abridged.bloom(), abridged) - }) - .collect(); + let topics: Vec<_> = params + .topics + .into_iter() + .map(|x| x.into_inner()) + .map(|topic| { + let abridged = super::abridge_topic(&topic); + (topic, abridged.bloom(), abridged) + }) + .collect(); - Ok(Filter { - topics: topics, - from: params.from.map(|x| x.into_inner()), - decrypt_with: params.decrypt_with.map(|x| x.into_inner()), - }) - } + Ok(Filter { + topics: topics, + from: params.from.map(|x| x.into_inner()), + decrypt_with: params.decrypt_with.map(|x| x.into_inner()), + }) + } - // does basic matching: - // whether the given message matches at least one of the topics of the - // filter. - // TODO: minimum PoW heuristic. - fn basic_matches(&self, message: &Message) -> bool { - self.topics.iter().any(|&(_, ref bloom, _)| { - &(bloom & message.bloom()) == bloom - }) - } + // does basic matching: + // whether the given message matches at least one of the topics of the + // filter. + // TODO: minimum PoW heuristic. + fn basic_matches(&self, message: &Message) -> bool { + self.topics + .iter() + .any(|&(_, ref bloom, _)| &(bloom & message.bloom()) == bloom) + } - // handle a message that matches the bloom. - fn handle_message( - &self, - message: &Message, - store: &RwLock, - on_match: F, - ) { - use rpc::crypto::DecryptionInstance; - use tiny_keccak::keccak256; + // handle a message that matches the bloom. + fn handle_message( + &self, + message: &Message, + store: &RwLock, + on_match: F, + ) { + use rpc::crypto::DecryptionInstance; + use tiny_keccak::keccak256; - let matched_indices: Vec<_> = self.topics.iter() - .enumerate() - .filter_map(|(i, &(_, ref bloom, ref abridged))| { - let contains_topic = &(bloom & message.bloom()) == bloom - && message.topics().contains(abridged); + let matched_indices: Vec<_> = self + .topics + .iter() + .enumerate() + .filter_map(|(i, &(_, ref bloom, ref abridged))| { + let contains_topic = + &(bloom & message.bloom()) == bloom && message.topics().contains(abridged); - if contains_topic { Some(i) } else { None } - }) - .collect(); + if contains_topic { + Some(i) + } else { + None + } + }) + .collect(); - if matched_indices.is_empty() { return } + if matched_indices.is_empty() { + return; + } - let decrypt = match self.decrypt_with { - Some(ref id) => match store.read().decryption_instance(id) { - Some(d) => d, - None => { - warn!(target: "whisper", "Filter attempted to decrypt with destroyed identity {}", + let decrypt = match self.decrypt_with { + Some(ref id) => match store.read().decryption_instance(id) { + Some(d) => d, + None => { + warn!(target: "whisper", "Filter attempted to decrypt with destroyed identity {}", id); - return - } - }, - None => { - let known_idx = matched_indices[0]; - let known_topic = H256(keccak256(&self.topics[0].0)); + return; + } + }, + None => { + let known_idx = matched_indices[0]; + let known_topic = H256(keccak256(&self.topics[0].0)); - DecryptionInstance::broadcast(message.topics().len(), known_idx, known_topic) - .expect("known idx is within the range 0..message.topics.len(); qed") - } - }; + DecryptionInstance::broadcast(message.topics().len(), known_idx, known_topic) + .expect("known idx is within the range 0..message.topics.len(); qed") + } + }; - let decrypted = match decrypt.decrypt(message.data()) { - Some(d) => d, - None => { - trace!(target: "whisper", "Failed to decrypt message with {} matching topics", + let decrypted = match decrypt.decrypt(message.data()) { + Some(d) => d, + None => { + trace!(target: "whisper", "Failed to decrypt message with {} matching topics", matched_indices.len()); - return - } - }; + return; + } + }; - match ::rpc::payload::decode(&decrypted) { - Ok(decoded) => { - if decoded.from != self.from { return } + match ::rpc::payload::decode(&decrypted) { + Ok(decoded) => { + if decoded.from != self.from { + return; + } - let matched_topics = matched_indices - .into_iter() - .map(|i| self.topics[i].0.clone()) - .map(HexEncode) - .collect(); + let matched_topics = matched_indices + .into_iter() + .map(|i| self.topics[i].0.clone()) + .map(HexEncode) + .collect(); - on_match(FilterItem { - from: decoded.from.map(HexEncode), - recipient: self.decrypt_with.map(HexEncode), - ttl: message.envelope().ttl, - topics: matched_topics, - timestamp: message.envelope().expiry - message.envelope().ttl, - payload: HexEncode(decoded.message.to_vec()), - padding: decoded.padding.map(|pad| HexEncode(pad.to_vec())), - }) - } - Err(reason) => - trace!(target: "whisper", "Bad payload in decrypted message with {} topics: {}", - matched_indices.len(), reason), - } - } + on_match(FilterItem { + from: decoded.from.map(HexEncode), + recipient: self.decrypt_with.map(HexEncode), + ttl: message.envelope().ttl, + topics: matched_topics, + timestamp: message.envelope().expiry - message.envelope().ttl, + payload: HexEncode(decoded.message.to_vec()), + padding: decoded.padding.map(|pad| HexEncode(pad.to_vec())), + }) + } + Err(reason) => { + trace!(target: "whisper", "Bad payload in decrypted message with {} topics: {}", + matched_indices.len(), reason) + } + } + } } #[cfg(test)] mod tests { - use message::{CreateParams, Message, Topic}; - use rpc::types::{FilterRequest, HexEncode}; - use rpc::abridge_topic; - use super::*; + use super::*; + use message::{CreateParams, Message, Topic}; + use rpc::{ + abridge_topic, + types::{FilterRequest, HexEncode}, + }; - #[test] - fn rejects_empty_topics() { - let req = FilterRequest { - decrypt_with: Default::default(), - from: None, - topics: Vec::new(), - }; + #[test] + fn rejects_empty_topics() { + let req = FilterRequest { + decrypt_with: Default::default(), + from: None, + topics: Vec::new(), + }; - assert!(Filter::new(req).is_err()); - } + assert!(Filter::new(req).is_err()); + } - #[test] - fn basic_match() { - let topics = vec![vec![1, 2, 3, 4], vec![5, 6, 7, 8]]; - let abridged_topics: Vec<_> = topics.iter().map(|x| abridge_topic(&x)).collect(); + #[test] + fn basic_match() { + let topics = vec![vec![1, 2, 3, 4], vec![5, 6, 7, 8]]; + let abridged_topics: Vec<_> = topics.iter().map(|x| abridge_topic(&x)).collect(); - let req = FilterRequest { - decrypt_with: Default::default(), - from: None, - topics: topics.into_iter().map(HexEncode).collect(), - }; + let req = FilterRequest { + decrypt_with: Default::default(), + from: None, + topics: topics.into_iter().map(HexEncode).collect(), + }; - let filter = Filter::new(req).unwrap(); - let message = Message::create(CreateParams { - ttl: 100, - payload: vec![1, 3, 5, 7, 9], - topics: abridged_topics.clone(), - work: 0, - }).unwrap(); + let filter = Filter::new(req).unwrap(); + let message = Message::create(CreateParams { + ttl: 100, + payload: vec![1, 3, 5, 7, 9], + topics: abridged_topics.clone(), + work: 0, + }) + .unwrap(); - assert!(filter.basic_matches(&message)); + assert!(filter.basic_matches(&message)); - let message = Message::create(CreateParams { - ttl: 100, - payload: vec![1, 3, 5, 7, 9], - topics: abridged_topics.clone(), - work: 0, - }).unwrap(); + let message = Message::create(CreateParams { + ttl: 100, + payload: vec![1, 3, 5, 7, 9], + topics: abridged_topics.clone(), + work: 0, + }) + .unwrap(); - assert!(filter.basic_matches(&message)); + assert!(filter.basic_matches(&message)); - let message = Message::create(CreateParams { - ttl: 100, - payload: vec![1, 3, 5, 7, 9], - topics: vec![Topic([1, 8, 3, 99])], - work: 0, - }).unwrap(); + let message = Message::create(CreateParams { + ttl: 100, + payload: vec![1, 3, 5, 7, 9], + topics: vec![Topic([1, 8, 3, 99])], + work: 0, + }) + .unwrap(); - assert!(!filter.basic_matches(&message)); - } + assert!(!filter.basic_matches(&message)); + } - #[test] - fn decrypt_and_decode() { - use rpc::payload::{self, EncodeParams}; - use rpc::key_store::{Key, KeyStore}; + #[test] + fn decrypt_and_decode() { + use rpc::{ + key_store::{Key, KeyStore}, + payload::{self, EncodeParams}, + }; - let topics = vec![vec![1, 2, 3, 4], vec![5, 6, 7, 8]]; - let abridged_topics: Vec<_> = topics.iter().map(|x| abridge_topic(&x)).collect(); + let topics = vec![vec![1, 2, 3, 4], vec![5, 6, 7, 8]]; + let abridged_topics: Vec<_> = topics.iter().map(|x| abridge_topic(&x)).collect(); - let mut store = KeyStore::new().unwrap(); - let signing_pair = Key::new_asymmetric(store.rng()); - let encrypting_key = Key::new_symmetric(store.rng()); + let mut store = KeyStore::new().unwrap(); + let signing_pair = Key::new_asymmetric(store.rng()); + let encrypting_key = Key::new_symmetric(store.rng()); - let decrypt_id = store.insert(encrypting_key); - let encryption_instance = store.encryption_instance(&decrypt_id).unwrap(); + let decrypt_id = store.insert(encrypting_key); + let encryption_instance = store.encryption_instance(&decrypt_id).unwrap(); - let store = ::parking_lot::RwLock::new(store); + let store = ::parking_lot::RwLock::new(store); - let payload = payload::encode(EncodeParams { - message: &[1, 2, 3], - padding: Some(&[4, 5, 4, 5]), - sign_with: Some(signing_pair.secret().unwrap()) - }).unwrap(); + let payload = payload::encode(EncodeParams { + message: &[1, 2, 3], + padding: Some(&[4, 5, 4, 5]), + sign_with: Some(signing_pair.secret().unwrap()), + }) + .unwrap(); - let encrypted = encryption_instance.encrypt(&payload).unwrap(); + let encrypted = encryption_instance.encrypt(&payload).unwrap(); - let message = Message::create(CreateParams { - ttl: 100, - payload: encrypted, - topics: abridged_topics.clone(), - work: 0, - }).unwrap(); + let message = Message::create(CreateParams { + ttl: 100, + payload: encrypted, + topics: abridged_topics.clone(), + work: 0, + }) + .unwrap(); - let message2 = Message::create(CreateParams { - ttl: 100, - payload: vec![3, 5, 7, 9], - topics: abridged_topics, - work: 0, - }).unwrap(); + let message2 = Message::create(CreateParams { + ttl: 100, + payload: vec![3, 5, 7, 9], + topics: abridged_topics, + work: 0, + }) + .unwrap(); - let filter = Filter::new(FilterRequest { - decrypt_with: Some(HexEncode(decrypt_id)), - from: Some(HexEncode(signing_pair.public().unwrap().clone())), - topics: topics.into_iter().map(HexEncode).collect(), - }).unwrap(); + let filter = Filter::new(FilterRequest { + decrypt_with: Some(HexEncode(decrypt_id)), + from: Some(HexEncode(signing_pair.public().unwrap().clone())), + topics: topics.into_iter().map(HexEncode).collect(), + }) + .unwrap(); - assert!(filter.basic_matches(&message)); - assert!(filter.basic_matches(&message2)); + assert!(filter.basic_matches(&message)); + assert!(filter.basic_matches(&message2)); - let items = ::std::cell::Cell::new(0); - let on_match = |_| { items.set(items.get() + 1); }; + let items = ::std::cell::Cell::new(0); + let on_match = |_| { + items.set(items.get() + 1); + }; - filter.handle_message(&message, &store, &on_match); - filter.handle_message(&message2, &store, &on_match); + filter.handle_message(&message, &store, &on_match); + filter.handle_message(&message2, &store, &on_match); - assert_eq!(items.get(), 1); - } + assert_eq!(items.get(), 1); + } } diff --git a/whisper/src/rpc/key_store.rs b/whisper/src/rpc/key_store.rs index 081a8b374..101430693 100644 --- a/whisper/src/rpc/key_store.rs +++ b/whisper/src/rpc/key_store.rs @@ -24,172 +24,173 @@ use std::collections::HashMap; use ethereum_types::H256; use ethkey::{KeyPair, Public, Secret}; use memzero::Memzero; -use rand::{Rng, OsRng}; +use rand::{OsRng, Rng}; -use rpc::crypto::{AES_KEY_LEN, EncryptionInstance, DecryptionInstance}; +use rpc::crypto::{DecryptionInstance, EncryptionInstance, AES_KEY_LEN}; /// A symmetric or asymmetric key used for encryption, decryption, and signing /// of payloads. pub enum Key { - /// ECIES key pair for Secp2561k curve. Suitable for encryption, decryption, - /// and signing. - Asymmetric(KeyPair), - /// AES-256 GCM mode. Suitable for encryption, decryption, but not signing. - Symmetric(Memzero<[u8; AES_KEY_LEN]>), + /// ECIES key pair for Secp2561k curve. Suitable for encryption, decryption, + /// and signing. + Asymmetric(KeyPair), + /// AES-256 GCM mode. Suitable for encryption, decryption, but not signing. + Symmetric(Memzero<[u8; AES_KEY_LEN]>), } impl Key { - /// Generate a random asymmetric key with the given cryptographic RNG. - pub fn new_asymmetric(rng: &mut OsRng) -> Self { - match ::ethkey::Generator::generate(rng) { - Ok(pair) => Key::Asymmetric(pair), - Err(void) => match void {}, - } - } + /// Generate a random asymmetric key with the given cryptographic RNG. + pub fn new_asymmetric(rng: &mut OsRng) -> Self { + match ::ethkey::Generator::generate(rng) { + Ok(pair) => Key::Asymmetric(pair), + Err(void) => match void {}, + } + } - /// Generate a random symmetric key with the given cryptographic RNG. - pub fn new_symmetric(rng: &mut OsRng) -> Self { - Key::Symmetric(Memzero::from(rng.gen::<[u8; 32]>())) - } + /// Generate a random symmetric key with the given cryptographic RNG. + pub fn new_symmetric(rng: &mut OsRng) -> Self { + Key::Symmetric(Memzero::from(rng.gen::<[u8; 32]>())) + } - /// From secret asymmetric key. Fails if secret is invalid. - pub fn from_secret(secret: Secret) -> Option { - KeyPair::from_secret(secret).map(Key::Asymmetric).ok() - } + /// From secret asymmetric key. Fails if secret is invalid. + pub fn from_secret(secret: Secret) -> Option { + KeyPair::from_secret(secret).map(Key::Asymmetric).ok() + } - /// From raw symmetric key. - pub fn from_raw_symmetric(key: [u8; AES_KEY_LEN]) -> Self { - Key::Symmetric(Memzero::from(key)) - } + /// From raw symmetric key. + pub fn from_raw_symmetric(key: [u8; AES_KEY_LEN]) -> Self { + Key::Symmetric(Memzero::from(key)) + } - /// Get a handle to the public key if this is an asymmetric key. - pub fn public(&self) -> Option<&Public> { - match *self { - Key::Asymmetric(ref pair) => Some(pair.public()), - Key::Symmetric(_) => None, - } - } + /// Get a handle to the public key if this is an asymmetric key. + pub fn public(&self) -> Option<&Public> { + match *self { + Key::Asymmetric(ref pair) => Some(pair.public()), + Key::Symmetric(_) => None, + } + } - /// Get a handle to the secret key if this is an asymmetric key. - pub fn secret(&self) -> Option<&Secret> { - match *self { - Key::Asymmetric(ref pair) => Some(pair.secret()), - Key::Symmetric(_) => None, - } - } + /// Get a handle to the secret key if this is an asymmetric key. + pub fn secret(&self) -> Option<&Secret> { + match *self { + Key::Asymmetric(ref pair) => Some(pair.secret()), + Key::Symmetric(_) => None, + } + } - /// Get a handle to the symmetric key. - pub fn symmetric(&self) -> Option<&[u8; AES_KEY_LEN]> { - match *self { - Key::Asymmetric(_) => None, - Key::Symmetric(ref key) => Some(key), - } - } + /// Get a handle to the symmetric key. + pub fn symmetric(&self) -> Option<&[u8; AES_KEY_LEN]> { + match *self { + Key::Asymmetric(_) => None, + Key::Symmetric(ref key) => Some(key), + } + } } /// Key store. pub struct KeyStore { - rng: OsRng, - identities: HashMap, + rng: OsRng, + identities: HashMap, } impl KeyStore { - /// Create the key store. Returns any error in accessing the system's secure - /// RNG. - pub fn new() -> Result { - Ok(KeyStore { - rng: OsRng::new()?, - identities: HashMap::new(), - }) - } + /// Create the key store. Returns any error in accessing the system's secure + /// RNG. + pub fn new() -> Result { + Ok(KeyStore { + rng: OsRng::new()?, + identities: HashMap::new(), + }) + } - /// Import a key, generating a random identity for it. - pub fn insert(&mut self, key: Key) -> H256 { - let id = self.rng().gen(); - self.identities.insert(id, key); + /// Import a key, generating a random identity for it. + pub fn insert(&mut self, key: Key) -> H256 { + let id = self.rng().gen(); + self.identities.insert(id, key); - id - } + id + } - /// Get a key by ID. - pub fn get<'a>(&'a self, id: &H256) -> Option<&'a Key> { - self.identities.get(id) - } + /// Get a key by ID. + pub fn get<'a>(&'a self, id: &H256) -> Option<&'a Key> { + self.identities.get(id) + } - /// Get asymmetric ID's public key. - pub fn public<'a>(&'a self, id: &H256) -> Option<&'a Public> { - self.get(id).and_then(Key::public) - } + /// Get asymmetric ID's public key. + pub fn public<'a>(&'a self, id: &H256) -> Option<&'a Public> { + self.get(id).and_then(Key::public) + } - /// Get asymmetric ID's secret key. - pub fn secret<'a>(&'a self, id: &H256) -> Option<&'a Secret> { - self.get(id).and_then(Key::secret) - } + /// Get asymmetric ID's secret key. + pub fn secret<'a>(&'a self, id: &H256) -> Option<&'a Secret> { + self.get(id).and_then(Key::secret) + } - /// Get symmetric ID's key. - pub fn symmetric<'a>(&'a self, id: &H256) -> Option<&'a [u8; AES_KEY_LEN]> { - self.get(id).and_then(Key::symmetric) - } + /// Get symmetric ID's key. + pub fn symmetric<'a>(&'a self, id: &H256) -> Option<&'a [u8; AES_KEY_LEN]> { + self.get(id).and_then(Key::symmetric) + } - /// Get encryption instance for identity. - pub fn encryption_instance(&self, id: &H256) -> Result { - self.get(id).ok_or("no such identity").and_then(|key| match *key { - Key::Asymmetric(ref pair) => EncryptionInstance::ecies(pair.public().clone()) - .map_err(|_| "could not create encryption instance for id"), - Key::Symmetric(ref key) => - OsRng::new() - .map(|mut rng| EncryptionInstance::aes(key.clone(), rng.gen())) - .map_err(|_| "unable to get secure randomness") - }) - } + /// Get encryption instance for identity. + pub fn encryption_instance(&self, id: &H256) -> Result { + self.get(id) + .ok_or("no such identity") + .and_then(|key| match *key { + Key::Asymmetric(ref pair) => EncryptionInstance::ecies(pair.public().clone()) + .map_err(|_| "could not create encryption instance for id"), + Key::Symmetric(ref key) => OsRng::new() + .map(|mut rng| EncryptionInstance::aes(key.clone(), rng.gen())) + .map_err(|_| "unable to get secure randomness"), + }) + } - /// Get decryption instance for identity. - /// If the identity is known, always succeeds. - pub fn decryption_instance(&self, id: &H256) -> Option { - self.get(id).map(|key| match *key { - Key::Asymmetric(ref pair) => DecryptionInstance::ecies(pair.secret().clone()) - .expect("all keys stored are valid; qed"), - Key::Symmetric(ref key) => DecryptionInstance::aes(key.clone()), - }) - } + /// Get decryption instance for identity. + /// If the identity is known, always succeeds. + pub fn decryption_instance(&self, id: &H256) -> Option { + self.get(id).map(|key| match *key { + Key::Asymmetric(ref pair) => DecryptionInstance::ecies(pair.secret().clone()) + .expect("all keys stored are valid; qed"), + Key::Symmetric(ref key) => DecryptionInstance::aes(key.clone()), + }) + } - /// Whether the store contains a key by this ID. - pub fn contains(&self, id: &H256) -> bool { - self.identities.contains_key(id) - } + /// Whether the store contains a key by this ID. + pub fn contains(&self, id: &H256) -> bool { + self.identities.contains_key(id) + } - /// Remove a key by ID. - pub fn remove(&mut self, id: &H256) -> bool { - self.identities.remove(id).is_some() - } + /// Remove a key by ID. + pub fn remove(&mut self, id: &H256) -> bool { + self.identities.remove(id).is_some() + } - /// Get RNG. - pub fn rng(&mut self) -> &mut OsRng { - &mut self.rng - } + /// Get RNG. + pub fn rng(&mut self) -> &mut OsRng { + &mut self.rng + } } #[cfg(test)] mod tests { - use super::*; + use super::*; - #[test] - fn rejects_invalid_secret() { - let bad_secret = ::ethkey::Secret::from([0xff; 32]); - assert!(Key::from_secret(bad_secret).is_none()); - } + #[test] + fn rejects_invalid_secret() { + let bad_secret = ::ethkey::Secret::from([0xff; 32]); + assert!(Key::from_secret(bad_secret).is_none()); + } - #[test] - fn generated_key_should_exist() { - let mut store = KeyStore::new().unwrap(); - let key = Key::new_asymmetric(store.rng()); + #[test] + fn generated_key_should_exist() { + let mut store = KeyStore::new().unwrap(); + let key = Key::new_asymmetric(store.rng()); - assert!(key.public().is_some()); - assert!(key.secret().is_some()); + assert!(key.public().is_some()); + assert!(key.secret().is_some()); - let id = store.insert(key); + let id = store.insert(key); - assert!(store.contains(&id)); - assert!(store.get(&id).is_some()); - } + assert!(store.contains(&id)); + assert!(store.get(&id).is_some()); + } } diff --git a/whisper/src/rpc/mod.rs b/whisper/src/rpc/mod.rs index 3aa7aca1c..94ccba4c7 100644 --- a/whisper/src/rpc/mod.rs +++ b/whisper/src/rpc/mod.rs @@ -25,15 +25,17 @@ use std::sync::Arc; use jsonrpc_core::{Error, ErrorCode, Metadata}; use jsonrpc_derive::rpc; -use jsonrpc_pubsub::{Session, PubSubMetadata, SubscriptionId, typed::Subscriber}; +use jsonrpc_pubsub::{typed::Subscriber, PubSubMetadata, Session, SubscriptionId}; use ethereum_types::H256; use memzero::Memzero; use parking_lot::RwLock; -use self::filter::Filter; -use self::key_store::{Key, KeyStore}; -use self::types::HexEncode; +use self::{ + filter::Filter, + key_store::{Key, KeyStore}, + types::HexEncode, +}; use message::{CreateParams, Message, Topic}; @@ -47,342 +49,376 @@ pub use self::filter::Manager as FilterManager; // create whisper RPC error. fn whisper_error>(message: T) -> Error { - const ERROR_CODE: i64 = -32085; + const ERROR_CODE: i64 = -32085; - Error { - code: ErrorCode::ServerError(ERROR_CODE), - message: message.into(), - data: None, - } + Error { + code: ErrorCode::ServerError(ERROR_CODE), + message: message.into(), + data: None, + } } fn topic_hash(topic: &[u8]) -> H256 { - H256(::tiny_keccak::keccak256(topic)) + H256(::tiny_keccak::keccak256(topic)) } // abridge topic using first four bytes of hash. fn abridge_topic(topic: &[u8]) -> Topic { - let mut abridged = [0; 4]; - let hash = topic_hash(topic).0; - abridged.copy_from_slice(&hash[..4]); - abridged.into() + let mut abridged = [0; 4]; + let hash = topic_hash(topic).0; + abridged.copy_from_slice(&hash[..4]); + abridged.into() } /// Whisper RPC interface. #[rpc(server)] pub trait Whisper { - /// Info about the node. - #[rpc(name = "shh_info")] - fn info(&self) -> Result; + /// Info about the node. + #[rpc(name = "shh_info")] + fn info(&self) -> Result; - /// Generate a new asymmetric key pair and return an identity. - #[rpc(name = "shh_newKeyPair")] - fn new_key_pair(&self) -> Result; + /// Generate a new asymmetric key pair and return an identity. + #[rpc(name = "shh_newKeyPair")] + fn new_key_pair(&self) -> Result; - /// Import the given SECP2561k private key and return an identity. - #[rpc(name = "shh_addPrivateKey")] - fn add_private_key(&self, _:types::Private) -> Result; + /// Import the given SECP2561k private key and return an identity. + #[rpc(name = "shh_addPrivateKey")] + fn add_private_key(&self, _: types::Private) -> Result; - /// Generate a new symmetric key and return an identity. - #[rpc(name = "shh_newSymKey")] - fn new_sym_key(&self) -> Result; + /// Generate a new symmetric key and return an identity. + #[rpc(name = "shh_newSymKey")] + fn new_sym_key(&self) -> Result; - /// Import the given symmetric key and return an identity. - #[rpc(name = "shh_addSymKey")] - fn add_sym_key(&self, _:types::Symmetric) -> Result; + /// Import the given symmetric key and return an identity. + #[rpc(name = "shh_addSymKey")] + fn add_sym_key(&self, _: types::Symmetric) -> Result; - /// Get public key. Succeeds if identity is stored and asymmetric. - #[rpc(name = "shh_getPublicKey")] - fn get_public(&self, _: types::Identity) -> Result; + /// Get public key. Succeeds if identity is stored and asymmetric. + #[rpc(name = "shh_getPublicKey")] + fn get_public(&self, _: types::Identity) -> Result; - /// Get private key. Succeeds if identity is stored and asymmetric. - #[rpc(name = "shh_getPrivateKey")] - fn get_private(&self, _: types::Identity) -> Result; + /// Get private key. Succeeds if identity is stored and asymmetric. + #[rpc(name = "shh_getPrivateKey")] + fn get_private(&self, _: types::Identity) -> Result; - #[rpc(name = "shh_getSymKey")] - fn get_symmetric(&self, _: types::Identity) -> Result; + #[rpc(name = "shh_getSymKey")] + fn get_symmetric(&self, _: types::Identity) -> Result; - /// Delete key pair denoted by given identity. - /// - /// Return true if successfully removed, false if unknown, - /// and error otherwise. - #[rpc(name = "shh_deleteKey")] - fn remove_key(&self, _: types::Identity) -> Result; + /// Delete key pair denoted by given identity. + /// + /// Return true if successfully removed, false if unknown, + /// and error otherwise. + #[rpc(name = "shh_deleteKey")] + fn remove_key(&self, _: types::Identity) -> Result; - /// Post a message to the network with given parameters. - #[rpc(name = "shh_post")] - fn post(&self, _: types::PostRequest) -> Result; + /// Post a message to the network with given parameters. + #[rpc(name = "shh_post")] + fn post(&self, _: types::PostRequest) -> Result; - /// Create a new polled filter. - #[rpc(name = "shh_newMessageFilter")] - fn new_filter(&self, _: types::FilterRequest) -> Result; + /// Create a new polled filter. + #[rpc(name = "shh_newMessageFilter")] + fn new_filter(&self, _: types::FilterRequest) -> Result; - /// Poll changes on a polled filter. - #[rpc(name = "shh_getFilterMessages")] - fn poll_changes(&self, _: types::Identity) -> Result, Error>; + /// Poll changes on a polled filter. + #[rpc(name = "shh_getFilterMessages")] + fn poll_changes(&self, _: types::Identity) -> Result, Error>; - /// Delete polled filter. Return bool indicating success. - #[rpc(name = "shh_deleteMessageFilter")] - fn delete_filter(&self, _: types::Identity) -> Result; + /// Delete polled filter. Return bool indicating success. + #[rpc(name = "shh_deleteMessageFilter")] + fn delete_filter(&self, _: types::Identity) -> Result; } /// Whisper RPC pubsub. #[rpc(server)] pub trait WhisperPubSub { - // RPC Metadata - type Metadata; - /// Subscribe to messages matching the filter. - #[pubsub(subscription = "shh_subscription", subscribe, name = "shh_subscribe")] - fn subscribe(&self, _: Self::Metadata, _: Subscriber, _: types::FilterRequest); + // RPC Metadata + type Metadata; + /// Subscribe to messages matching the filter. + #[pubsub(subscription = "shh_subscription", subscribe, name = "shh_subscribe")] + fn subscribe( + &self, + _: Self::Metadata, + _: Subscriber, + _: types::FilterRequest, + ); - /// Unsubscribe from filter matching given ID. Return - /// true on success, error otherwise. - #[pubsub(subscription = "shh_subscription", unsubscribe, name = "shh_unsubscribe")] - fn unsubscribe(&self, _: Option, _: SubscriptionId) -> Result; + /// Unsubscribe from filter matching given ID. Return + /// true on success, error otherwise. + #[pubsub( + subscription = "shh_subscription", + unsubscribe, + name = "shh_unsubscribe" + )] + fn unsubscribe(&self, _: Option, _: SubscriptionId) -> Result; } /// Something which can send messages to the network. pub trait PoolHandle: Send + Sync { - /// Give message to the whisper network for relay. - /// Returns false if PoW too low. - fn relay(&self, message: Message) -> bool; + /// Give message to the whisper network for relay. + /// Returns false if PoW too low. + fn relay(&self, message: Message) -> bool; - /// Number of messages and memory used by resident messages. - fn pool_status(&self) -> ::net::PoolStatus; + /// Number of messages and memory used by resident messages. + fn pool_status(&self) -> ::net::PoolStatus; } /// Default, simple metadata implementation. #[derive(Clone, Default)] pub struct Meta { - session: Option>, + session: Option>, } impl Metadata for Meta {} impl PubSubMetadata for Meta { - fn session(&self) -> Option> { - self.session.clone() - } + fn session(&self) -> Option> { + self.session.clone() + } } /// Implementation of whisper RPC. pub struct WhisperClient { - store: Arc>, - pool: P, - filter_manager: Arc, - _meta: ::std::marker::PhantomData, + store: Arc>, + pool: P, + filter_manager: Arc, + _meta: ::std::marker::PhantomData, } impl

WhisperClient

{ - /// Create a new whisper client with basic metadata. - pub fn with_simple_meta(pool: P, filter_manager: Arc) -> Self { - WhisperClient::new(pool, filter_manager) - } + /// Create a new whisper client with basic metadata. + pub fn with_simple_meta(pool: P, filter_manager: Arc) -> Self { + WhisperClient::new(pool, filter_manager) + } } impl WhisperClient { - /// Create a new whisper client. - pub fn new(pool: P, filter_manager: Arc) -> Self { - WhisperClient { - store: filter_manager.key_store(), - pool: pool, - filter_manager: filter_manager, - _meta: ::std::marker::PhantomData, - } - } + /// Create a new whisper client. + pub fn new(pool: P, filter_manager: Arc) -> Self { + WhisperClient { + store: filter_manager.key_store(), + pool: pool, + filter_manager: filter_manager, + _meta: ::std::marker::PhantomData, + } + } - fn delete_filter_kind(&self, id: H256, kind: filter::Kind) -> bool { - match self.filter_manager.kind(&id) { - Some(k) if k == kind => { - self.filter_manager.remove(&id); - true - } - None | Some(_) => false, - } - } + fn delete_filter_kind(&self, id: H256, kind: filter::Kind) -> bool { + match self.filter_manager.kind(&id) { + Some(k) if k == kind => { + self.filter_manager.remove(&id); + true + } + None | Some(_) => false, + } + } } impl Whisper for WhisperClient { - fn info(&self) -> Result { - let status = self.pool.pool_status(); + fn info(&self) -> Result { + let status = self.pool.pool_status(); - Ok(types::NodeInfo { - required_pow: status.required_pow, - messages: status.message_count, - memory: status.cumulative_size, - target_memory: status.target_size, - }) - } + Ok(types::NodeInfo { + required_pow: status.required_pow, + messages: status.message_count, + memory: status.cumulative_size, + target_memory: status.target_size, + }) + } - fn new_key_pair(&self) -> Result { - let mut store = self.store.write(); - let key_pair = Key::new_asymmetric(store.rng()); + fn new_key_pair(&self) -> Result { + let mut store = self.store.write(); + let key_pair = Key::new_asymmetric(store.rng()); - Ok(HexEncode(store.insert(key_pair))) - } + Ok(HexEncode(store.insert(key_pair))) + } - fn add_private_key(&self, private: types::Private) -> Result { - let key_pair = Key::from_secret(private.into_inner().into()) - .ok_or_else(|| whisper_error("Invalid private key"))?; + fn add_private_key(&self, private: types::Private) -> Result { + let key_pair = Key::from_secret(private.into_inner().into()) + .ok_or_else(|| whisper_error("Invalid private key"))?; - Ok(HexEncode(self.store.write().insert(key_pair))) - } + Ok(HexEncode(self.store.write().insert(key_pair))) + } - fn new_sym_key(&self) -> Result { - let mut store = self.store.write(); - let key = Key::new_symmetric(store.rng()); + fn new_sym_key(&self) -> Result { + let mut store = self.store.write(); + let key = Key::new_symmetric(store.rng()); - Ok(HexEncode(store.insert(key))) - } + Ok(HexEncode(store.insert(key))) + } - fn add_sym_key(&self, raw_key: types::Symmetric) -> Result { - let raw_key = raw_key.into_inner().0; - let key = Key::from_raw_symmetric(raw_key); + fn add_sym_key(&self, raw_key: types::Symmetric) -> Result { + let raw_key = raw_key.into_inner().0; + let key = Key::from_raw_symmetric(raw_key); - Ok(HexEncode(self.store.write().insert(key))) - } + Ok(HexEncode(self.store.write().insert(key))) + } - fn get_public(&self, id: types::Identity) -> Result { - self.store.read().public(&id.into_inner()) - .cloned() - .map(HexEncode) - .ok_or_else(|| whisper_error("Unknown identity")) - } + fn get_public(&self, id: types::Identity) -> Result { + self.store + .read() + .public(&id.into_inner()) + .cloned() + .map(HexEncode) + .ok_or_else(|| whisper_error("Unknown identity")) + } - fn get_private(&self, id: types::Identity) -> Result { - self.store.read().secret(&id.into_inner()) - .map(|x| (&**x).clone()) - .map(HexEncode) - .ok_or_else(|| whisper_error("Unknown identity")) - } + fn get_private(&self, id: types::Identity) -> Result { + self.store + .read() + .secret(&id.into_inner()) + .map(|x| (&**x).clone()) + .map(HexEncode) + .ok_or_else(|| whisper_error("Unknown identity")) + } - fn get_symmetric(&self, id: types::Identity) -> Result { - self.store.read().symmetric(&id.into_inner()) - .cloned() - .map(H256) - .map(HexEncode) - .ok_or_else(|| whisper_error("Unknown identity")) - } + fn get_symmetric(&self, id: types::Identity) -> Result { + self.store + .read() + .symmetric(&id.into_inner()) + .cloned() + .map(H256) + .map(HexEncode) + .ok_or_else(|| whisper_error("Unknown identity")) + } - fn remove_key(&self, id: types::Identity) -> Result { - Ok(self.store.write().remove(&id.into_inner())) - } + fn remove_key(&self, id: types::Identity) -> Result { + Ok(self.store.write().remove(&id.into_inner())) + } - fn post(&self, req: types::PostRequest) -> Result { - use self::crypto::EncryptionInstance; + fn post(&self, req: types::PostRequest) -> Result { + use self::crypto::EncryptionInstance; - let encryption = match req.to { - Some(types::Receiver::Public(public)) => EncryptionInstance::ecies(public.into_inner()) - .map_err(whisper_error)?, - Some(types::Receiver::Identity(id)) => self.store.read().encryption_instance(&id.into_inner()) - .map_err(whisper_error)?, - None => { - use rand::{Rng, OsRng}; + let encryption = match req.to { + Some(types::Receiver::Public(public)) => { + EncryptionInstance::ecies(public.into_inner()).map_err(whisper_error)? + } + Some(types::Receiver::Identity(id)) => self + .store + .read() + .encryption_instance(&id.into_inner()) + .map_err(whisper_error)?, + None => { + use rand::{OsRng, Rng}; - // broadcast mode: use fixed nonce and fresh key each time. + // broadcast mode: use fixed nonce and fresh key each time. - let mut rng = OsRng::new() - .map_err(|_| whisper_error("unable to acquire secure randomness"))?; + let mut rng = OsRng::new() + .map_err(|_| whisper_error("unable to acquire secure randomness"))?; - let key = Memzero::from(rng.gen::<[u8; 32]>()); - if req.topics.is_empty() { - return Err(whisper_error("must supply at least one topic for broadcast message")); - } + let key = Memzero::from(rng.gen::<[u8; 32]>()); + if req.topics.is_empty() { + return Err(whisper_error( + "must supply at least one topic for broadcast message", + )); + } - EncryptionInstance::broadcast( - key, - req.topics.iter().map(|x| topic_hash(&x)).collect() - ) - } - }; + EncryptionInstance::broadcast( + key, + req.topics.iter().map(|x| topic_hash(&x)).collect(), + ) + } + }; - let sign_with = match req.from { - Some(from) => { - Some( - self.store.read().secret(&from.into_inner()) - .cloned() - .ok_or_else(|| whisper_error("Unknown identity `from`"))? - ) - } - None => None, - }; + let sign_with = match req.from { + Some(from) => Some( + self.store + .read() + .secret(&from.into_inner()) + .cloned() + .ok_or_else(|| whisper_error("Unknown identity `from`"))?, + ), + None => None, + }; - let encrypted = { - let payload = payload::encode(payload::EncodeParams { - message: &req.payload.into_inner(), - padding: req.padding.map(|p| p.into_inner()).as_ref().map(|x| &x[..]), - sign_with: sign_with.as_ref(), - }).map_err(whisper_error)?; + let encrypted = { + let payload = payload::encode(payload::EncodeParams { + message: &req.payload.into_inner(), + padding: req.padding.map(|p| p.into_inner()).as_ref().map(|x| &x[..]), + sign_with: sign_with.as_ref(), + }) + .map_err(whisper_error)?; - encryption.encrypt(&payload).ok_or(whisper_error("encryption error"))? - }; + encryption + .encrypt(&payload) + .ok_or(whisper_error("encryption error"))? + }; - // mining the packet is the heaviest item of work by far. - // there may be a benefit to dispatching this onto the CPU pool - // and returning a future. but then things get _less_ efficient - // if the server infrastructure has more threads than the CPU pool. - let message = Message::create(CreateParams { - ttl: req.ttl, - payload: encrypted, - topics: req.topics.into_iter().map(|x| abridge_topic(&x.into_inner())).collect(), - work: req.priority, - }).map_err(|_| whisper_error("Empty topics"))?; + // mining the packet is the heaviest item of work by far. + // there may be a benefit to dispatching this onto the CPU pool + // and returning a future. but then things get _less_ efficient + // if the server infrastructure has more threads than the CPU pool. + let message = Message::create(CreateParams { + ttl: req.ttl, + payload: encrypted, + topics: req + .topics + .into_iter() + .map(|x| abridge_topic(&x.into_inner())) + .collect(), + work: req.priority, + }) + .map_err(|_| whisper_error("Empty topics"))?; - if !self.pool.relay(message) { - Err(whisper_error("PoW too low to compete with other messages")) - } else { - Ok(true) - } - } + if !self.pool.relay(message) { + Err(whisper_error("PoW too low to compete with other messages")) + } else { + Ok(true) + } + } - fn new_filter(&self, req: types::FilterRequest) -> Result { - let filter = Filter::new(req).map_err(whisper_error)?; + fn new_filter(&self, req: types::FilterRequest) -> Result { + let filter = Filter::new(req).map_err(whisper_error)?; - self.filter_manager.insert_polled(filter) - .map(HexEncode) - .map_err(whisper_error) - } + self.filter_manager + .insert_polled(filter) + .map(HexEncode) + .map_err(whisper_error) + } - fn poll_changes(&self, id: types::Identity) -> Result, Error> { - match self.filter_manager.poll_changes(&id.into_inner()) { - None => Err(whisper_error("no such message filter")), - Some(items) => Ok(items), - } - } + fn poll_changes(&self, id: types::Identity) -> Result, Error> { + match self.filter_manager.poll_changes(&id.into_inner()) { + None => Err(whisper_error("no such message filter")), + Some(items) => Ok(items), + } + } - fn delete_filter(&self, id: types::Identity) -> Result { - Ok(self.delete_filter_kind(id.into_inner(), filter::Kind::Poll)) - } + fn delete_filter(&self, id: types::Identity) -> Result { + Ok(self.delete_filter_kind(id.into_inner(), filter::Kind::Poll)) + } } -impl WhisperPubSub for WhisperClient { - type Metadata = M; +impl WhisperPubSub + for WhisperClient +{ + type Metadata = M; - fn subscribe( - &self, - _meta: Self::Metadata, - subscriber: Subscriber, - req: types::FilterRequest, - ) { - match Filter::new(req) { - Ok(filter) => { - if let Err(e) = self.filter_manager.insert_subscription(filter, subscriber) { - debug!(target: "whisper", "Failed to add subscription: {}", e); - } - } - Err(reason) => { let _ = subscriber.reject(whisper_error(reason)); } - } - } + fn subscribe( + &self, + _meta: Self::Metadata, + subscriber: Subscriber, + req: types::FilterRequest, + ) { + match Filter::new(req) { + Ok(filter) => { + if let Err(e) = self.filter_manager.insert_subscription(filter, subscriber) { + debug!(target: "whisper", "Failed to add subscription: {}", e); + } + } + Err(reason) => { + let _ = subscriber.reject(whisper_error(reason)); + } + } + } - fn unsubscribe(&self, _: Option, id: SubscriptionId) -> Result { - use std::str::FromStr; + fn unsubscribe(&self, _: Option, id: SubscriptionId) -> Result { + use std::str::FromStr; - let res = match id { - SubscriptionId::String(s) => H256::from_str(&s) - .map_err(|_| "unrecognized ID") - .map(|id| self.delete_filter_kind(id, filter::Kind::Subscription)), - SubscriptionId::Number(_) => Err("unrecognized ID"), - }; + let res = match id { + SubscriptionId::String(s) => H256::from_str(&s) + .map_err(|_| "unrecognized ID") + .map(|id| self.delete_filter_kind(id, filter::Kind::Subscription)), + SubscriptionId::Number(_) => Err("unrecognized ID"), + }; - res.map_err(whisper_error) - } + res.map_err(whisper_error) + } } diff --git a/whisper/src/rpc/payload.rs b/whisper/src/rpc/payload.rs index 326a6b6e2..79e33b4d0 100644 --- a/whisper/src/rpc/payload.rs +++ b/whisper/src/rpc/payload.rs @@ -33,8 +33,8 @@ //! //! AES-256-GCM will append 12 bytes of metadata to the front of the message. -use ethereum_types::H256; use byteorder::{BigEndian, ByteOrder, WriteBytesExt}; +use ethereum_types::H256; use ethkey::{Public, Secret}; use tiny_keccak::keccak256; @@ -43,314 +43,324 @@ const SIGNATURE_LEN: usize = 65; const STANDARD_PAYLOAD_VERSION: u8 = 1; bitflags! { - struct Flags: u8 { - const FLAG_PAD_LEN_HIGH = 0b10000000; - const FLAG_PAD_LEN_LOW = 0b01000000; - const FLAG_SIGNED = 0b00100000; - } + struct Flags: u8 { + const FLAG_PAD_LEN_HIGH = 0b10000000; + const FLAG_PAD_LEN_LOW = 0b01000000; + const FLAG_SIGNED = 0b00100000; + } } // number of bytes of padding length (in the range 0..4) fn padding_length_bytes(flags: Flags) -> usize { - match (flags & FLAG_PAD_LEN_HIGH, flags & FLAG_PAD_LEN_LOW) { - (FLAG_PAD_LEN_HIGH, FLAG_PAD_LEN_LOW) => 3, - (FLAG_PAD_LEN_HIGH, _) => 2, - (_, FLAG_PAD_LEN_LOW) => 1, - (_, _) => 0, - } + match (flags & FLAG_PAD_LEN_HIGH, flags & FLAG_PAD_LEN_LOW) { + (FLAG_PAD_LEN_HIGH, FLAG_PAD_LEN_LOW) => 3, + (FLAG_PAD_LEN_HIGH, _) => 2, + (_, FLAG_PAD_LEN_LOW) => 1, + (_, _) => 0, + } } // how many bytes are necessary to encode the given length. Range 0..4. // `None` if too large. fn num_padding_length_bytes(padding_len: usize) -> Option { - let bits = 64 - (padding_len as u64).leading_zeros(); - match bits { - 0 => Some(0), - 0 ... 8 => Some(1), - 0 ... 16 => Some(2), - 0 ... 24 => Some(3), - _ => None, - } + let bits = 64 - (padding_len as u64).leading_zeros(); + match bits { + 0 => Some(0), + 0...8 => Some(1), + 0...16 => Some(2), + 0...24 => Some(3), + _ => None, + } } /// Parameters for encoding a standard payload. pub struct EncodeParams<'a> { - /// Message to encode. - pub message: &'a [u8], - /// Padding bytes. Maximum padding allowed is 65536 bytes. - pub padding: Option<&'a [u8]>, - /// Private key to sign with. - pub sign_with: Option<&'a Secret>, + /// Message to encode. + pub message: &'a [u8], + /// Padding bytes. Maximum padding allowed is 65536 bytes. + pub padding: Option<&'a [u8]>, + /// Private key to sign with. + pub sign_with: Option<&'a Secret>, } impl<'a> Default for EncodeParams<'a> { - fn default() -> Self { - EncodeParams { - message: &[], - padding: None, - sign_with: None, - } - } + fn default() -> Self { + EncodeParams { + message: &[], + padding: None, + sign_with: None, + } + } } /// Parameters for decoding a standard payload. pub struct Decoded<'a> { - /// Decoded message. - pub message: &'a [u8], - /// optional padding. - pub padding: Option<&'a [u8]>, - /// Recovered signature. - pub from: Option, + /// Decoded message. + pub message: &'a [u8], + /// optional padding. + pub padding: Option<&'a [u8]>, + /// Recovered signature. + pub from: Option, } /// Encode using provided parameters. pub fn encode(params: EncodeParams) -> Result, &'static str> { - const VEC_WRITE_INFALLIBLE: &'static str = "writing to a Vec can never fail; qed"; + const VEC_WRITE_INFALLIBLE: &'static str = "writing to a Vec can never fail; qed"; - let padding_len = params.padding.map_or(0, |x| x.len()); - let padding_len_bytes = num_padding_length_bytes(padding_len) - .ok_or_else(|| "padding size too long")?; + let padding_len = params.padding.map_or(0, |x| x.len()); + let padding_len_bytes = + num_padding_length_bytes(padding_len).ok_or_else(|| "padding size too long")?; - let signature = params.sign_with.map(|secret| { - let hash = H256(keccak256(params.message)); - ::ethkey::sign(secret, &hash) - }); + let signature = params.sign_with.map(|secret| { + let hash = H256(keccak256(params.message)); + ::ethkey::sign(secret, &hash) + }); - let signature = match signature { - Some(Ok(sig)) => Some(sig), - Some(Err(_)) => return Err("invalid signing key provided"), - None => None, - }; + let signature = match signature { + Some(Ok(sig)) => Some(sig), + Some(Err(_)) => return Err("invalid signing key provided"), + None => None, + }; - let (flags, plaintext_size) = { - let mut flags = Flags::empty(); + let (flags, plaintext_size) = { + let mut flags = Flags::empty(); - // 1 byte each for flags and version. - let mut plaintext_size = 2 - + padding_len_bytes - + padding_len - + params.message.len(); + // 1 byte each for flags and version. + let mut plaintext_size = 2 + padding_len_bytes + padding_len + params.message.len(); - flags.bits = (padding_len_bytes << 6) as u8; - debug_assert_eq!(padding_length_bytes(flags), padding_len_bytes); + flags.bits = (padding_len_bytes << 6) as u8; + debug_assert_eq!(padding_length_bytes(flags), padding_len_bytes); - if let Some(ref sig) = signature { - plaintext_size += sig.len(); - flags |= FLAG_SIGNED; - } + if let Some(ref sig) = signature { + plaintext_size += sig.len(); + flags |= FLAG_SIGNED; + } - (flags, plaintext_size) - }; + (flags, plaintext_size) + }; - let mut plaintext = Vec::with_capacity(plaintext_size); + let mut plaintext = Vec::with_capacity(plaintext_size); - plaintext.push(STANDARD_PAYLOAD_VERSION); - plaintext.push(flags.bits); + plaintext.push(STANDARD_PAYLOAD_VERSION); + plaintext.push(flags.bits); - if let Some(padding) = params.padding { - plaintext.write_uint::(padding_len as u64, padding_len_bytes) - .expect(VEC_WRITE_INFALLIBLE); + if let Some(padding) = params.padding { + plaintext + .write_uint::(padding_len as u64, padding_len_bytes) + .expect(VEC_WRITE_INFALLIBLE); - plaintext.extend(padding) - } + plaintext.extend(padding) + } - if let Some(signature) = signature { - plaintext.extend(signature.r()); - plaintext.extend(signature.s()); - plaintext.push(signature.v()); - } + if let Some(signature) = signature { + plaintext.extend(signature.r()); + plaintext.extend(signature.s()); + plaintext.push(signature.v()); + } - plaintext.extend(params.message); + plaintext.extend(params.message); - Ok(plaintext) + Ok(plaintext) } /// Decode using provided parameters pub fn decode(payload: &[u8]) -> Result { - let mut offset = 0; + let mut offset = 0; - let (padding, signature) = { - // use a closure for reading slices since std::io::Read would require - // us to copy. - let mut next_slice = |len| { - let end = offset + len; - if payload.len() >= end { - let slice = &payload[offset .. end]; - offset = end; + let (padding, signature) = { + // use a closure for reading slices since std::io::Read would require + // us to copy. + let mut next_slice = |len| { + let end = offset + len; + if payload.len() >= end { + let slice = &payload[offset..end]; + offset = end; - Ok(slice) - } else { - return Err("unexpected end of payload") - } - }; + Ok(slice) + } else { + return Err("unexpected end of payload"); + } + }; - if next_slice(1)?[0] != STANDARD_PAYLOAD_VERSION { - return Err("unknown payload version."); - } + if next_slice(1)?[0] != STANDARD_PAYLOAD_VERSION { + return Err("unknown payload version."); + } - let flags = Flags::from_bits_truncate(next_slice(1)?[0]); + let flags = Flags::from_bits_truncate(next_slice(1)?[0]); - let padding_len_bytes = padding_length_bytes(flags); - let padding = if padding_len_bytes != 0 { - let padding_len = BigEndian::read_uint( - next_slice(padding_len_bytes)?, - padding_len_bytes, - ); + let padding_len_bytes = padding_length_bytes(flags); + let padding = if padding_len_bytes != 0 { + let padding_len = + BigEndian::read_uint(next_slice(padding_len_bytes)?, padding_len_bytes); - Some(next_slice(padding_len as usize)?) - } else { - None - }; + Some(next_slice(padding_len as usize)?) + } else { + None + }; - let signature = if flags & FLAG_SIGNED == FLAG_SIGNED { - let slice = next_slice(SIGNATURE_LEN)?; - let mut arr = [0; SIGNATURE_LEN]; + let signature = if flags & FLAG_SIGNED == FLAG_SIGNED { + let slice = next_slice(SIGNATURE_LEN)?; + let mut arr = [0; SIGNATURE_LEN]; - arr.copy_from_slice(slice); - let signature = ::ethkey::Signature::from(arr); + arr.copy_from_slice(slice); + let signature = ::ethkey::Signature::from(arr); - let not_rsv = signature.r() != &slice[..32] - || signature.s() != &slice[32..64] - || signature.v() != slice[64]; + let not_rsv = signature.r() != &slice[..32] + || signature.s() != &slice[32..64] + || signature.v() != slice[64]; - if not_rsv { - return Err("signature not in RSV format"); - } else { - Some(signature) - } - } else { - None - }; + if not_rsv { + return Err("signature not in RSV format"); + } else { + Some(signature) + } + } else { + None + }; - (padding, signature) - }; + (padding, signature) + }; - // remaining data is the message. - let message = &payload[offset..]; + // remaining data is the message. + let message = &payload[offset..]; - let from = match signature { - None => None, - Some(sig) => { - let hash = H256(keccak256(message)); - Some(::ethkey::recover(&sig, &hash).map_err(|_| "invalid signature")?) - } - }; + let from = match signature { + None => None, + Some(sig) => { + let hash = H256(keccak256(message)); + Some(::ethkey::recover(&sig, &hash).map_err(|_| "invalid signature")?) + } + }; - Ok(Decoded { - message: message, - padding: padding, - from: from, - }) + Ok(Decoded { + message: message, + padding: padding, + from: from, + }) } #[cfg(test)] mod tests { - use ethkey::{Generator, Random}; - use super::*; + use super::*; + use ethkey::{Generator, Random}; - #[test] - fn padding_len_bytes_sanity() { - const U24_MAX: usize = (1 << 24) - 1; + #[test] + fn padding_len_bytes_sanity() { + const U24_MAX: usize = (1 << 24) - 1; - assert_eq!(padding_length_bytes(FLAG_PAD_LEN_HIGH | FLAG_PAD_LEN_LOW), 3); - assert_eq!(padding_length_bytes(FLAG_PAD_LEN_HIGH), 2); - assert_eq!(padding_length_bytes(FLAG_PAD_LEN_LOW), 1); - assert_eq!(padding_length_bytes(Flags::empty()), 0); + assert_eq!( + padding_length_bytes(FLAG_PAD_LEN_HIGH | FLAG_PAD_LEN_LOW), + 3 + ); + assert_eq!(padding_length_bytes(FLAG_PAD_LEN_HIGH), 2); + assert_eq!(padding_length_bytes(FLAG_PAD_LEN_LOW), 1); + assert_eq!(padding_length_bytes(Flags::empty()), 0); - assert!(num_padding_length_bytes(u32::max_value() as _).is_none()); - assert!(num_padding_length_bytes(U24_MAX + 1).is_none()); + assert!(num_padding_length_bytes(u32::max_value() as _).is_none()); + assert!(num_padding_length_bytes(U24_MAX + 1).is_none()); - assert_eq!(num_padding_length_bytes(U24_MAX), Some(3)); + assert_eq!(num_padding_length_bytes(U24_MAX), Some(3)); - assert_eq!(num_padding_length_bytes(u16::max_value() as usize + 1), Some(3)); - assert_eq!(num_padding_length_bytes(u16::max_value() as usize), Some(2)); + assert_eq!( + num_padding_length_bytes(u16::max_value() as usize + 1), + Some(3) + ); + assert_eq!(num_padding_length_bytes(u16::max_value() as usize), Some(2)); - assert_eq!(num_padding_length_bytes(u8::max_value() as usize + 1), Some(2)); - assert_eq!(num_padding_length_bytes(u8::max_value() as usize), Some(1)); + assert_eq!( + num_padding_length_bytes(u8::max_value() as usize + 1), + Some(2) + ); + assert_eq!(num_padding_length_bytes(u8::max_value() as usize), Some(1)); - assert_eq!(num_padding_length_bytes(1), Some(1)); - assert_eq!(num_padding_length_bytes(0), Some(0)); - } + assert_eq!(num_padding_length_bytes(1), Some(1)); + assert_eq!(num_padding_length_bytes(0), Some(0)); + } - #[test] - fn encode_decode_roundtrip() { - let message = [1, 2, 3, 4, 5]; - let encoded = encode(EncodeParams { - message: &message, - padding: None, - sign_with: None, - }).unwrap(); + #[test] + fn encode_decode_roundtrip() { + let message = [1, 2, 3, 4, 5]; + let encoded = encode(EncodeParams { + message: &message, + padding: None, + sign_with: None, + }) + .unwrap(); - let decoded = decode(&encoded).unwrap(); + let decoded = decode(&encoded).unwrap(); - assert_eq!(message, decoded.message); - } + assert_eq!(message, decoded.message); + } - #[test] - fn encode_empty() { - let encoded = encode(EncodeParams { - message: &[], - padding: None, - sign_with: None, - }).unwrap(); + #[test] + fn encode_empty() { + let encoded = encode(EncodeParams { + message: &[], + padding: None, + sign_with: None, + }) + .unwrap(); - let decoded = decode(&encoded).unwrap(); + let decoded = decode(&encoded).unwrap(); - assert!(decoded.message.is_empty()); - } + assert!(decoded.message.is_empty()); + } - #[test] - fn encode_with_signature() { - let key_pair = Random.generate().unwrap(); - let message = [1, 3, 5, 7, 9]; + #[test] + fn encode_with_signature() { + let key_pair = Random.generate().unwrap(); + let message = [1, 3, 5, 7, 9]; - let encoded = encode(EncodeParams { - message: &message, - padding: None, - sign_with: Some(key_pair.secret()), - }).unwrap(); + let encoded = encode(EncodeParams { + message: &message, + padding: None, + sign_with: Some(key_pair.secret()), + }) + .unwrap(); - let decoded = decode(&encoded).unwrap(); + let decoded = decode(&encoded).unwrap(); - assert_eq!(decoded.message, message); - assert_eq!(decoded.from, Some(key_pair.public().clone())); - assert!(decoded.padding.is_none()); - } + assert_eq!(decoded.message, message); + assert_eq!(decoded.from, Some(key_pair.public().clone())); + assert!(decoded.padding.is_none()); + } - #[test] - fn encode_with_padding() { - let message = [1, 3, 5, 7, 9]; - let padding = [0xff; 1024 - 5]; + #[test] + fn encode_with_padding() { + let message = [1, 3, 5, 7, 9]; + let padding = [0xff; 1024 - 5]; - let encoded = encode(EncodeParams { - message: &message, - padding: Some(&padding), - sign_with: None, - }).unwrap(); + let encoded = encode(EncodeParams { + message: &message, + padding: Some(&padding), + sign_with: None, + }) + .unwrap(); - let decoded = decode(&encoded).unwrap(); + let decoded = decode(&encoded).unwrap(); - assert_eq!(decoded.message, message); - assert_eq!(decoded.padding, Some(&padding[..])); - assert!(decoded.from.is_none()); - } + assert_eq!(decoded.message, message); + assert_eq!(decoded.padding, Some(&padding[..])); + assert!(decoded.from.is_none()); + } - #[test] - fn encode_with_padding_and_signature() { - let key_pair = Random.generate().unwrap(); - let message = [1, 3, 5, 7, 9]; - let padding = [0xff; 1024 - 5]; + #[test] + fn encode_with_padding_and_signature() { + let key_pair = Random.generate().unwrap(); + let message = [1, 3, 5, 7, 9]; + let padding = [0xff; 1024 - 5]; - let encoded = encode(EncodeParams { - message: &message, - padding: Some(&padding), - sign_with: Some(key_pair.secret()), - }).unwrap(); + let encoded = encode(EncodeParams { + message: &message, + padding: Some(&padding), + sign_with: Some(key_pair.secret()), + }) + .unwrap(); - let decoded = decode(&encoded).unwrap(); + let decoded = decode(&encoded).unwrap(); - assert_eq!(decoded.message, message); - assert_eq!(decoded.padding, Some(&padding[..])); - assert_eq!(decoded.from, Some(key_pair.public().clone())); - } + assert_eq!(decoded.message, message); + assert_eq!(decoded.padding, Some(&padding[..])); + assert_eq!(decoded.from, Some(key_pair.public().clone())); + } } diff --git a/whisper/src/rpc/types.rs b/whisper/src/rpc/types.rs index 40e440bf5..a4f6d9284 100644 --- a/whisper/src/rpc/types.rs +++ b/whisper/src/rpc/types.rs @@ -16,22 +16,25 @@ //! Types for Whisper RPC. -use std::fmt; -use std::ops::Deref; +use std::{fmt, ops::Deref}; -use ethereum_types::{H32, H64, H128, H256, H264, H512}; -use hex::{ToHex, FromHex}; +use ethereum_types::{H128, H256, H264, H32, H512, H64}; +use hex::{FromHex, ToHex}; -use serde::{Serialize, Serializer, Deserialize, Deserializer}; -use serde::de::{Error, Visitor}; +use serde::{ + de::{Error, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, +}; /// Helper trait for generic hex bytes encoding. -pub trait HexEncodable: Sized + ::std::ops::Deref { - fn from_bytes(bytes: Vec) -> Option; +pub trait HexEncodable: Sized + ::std::ops::Deref { + fn from_bytes(bytes: Vec) -> Option; } impl HexEncodable for Vec { - fn from_bytes(bytes: Vec) -> Option { Some(bytes) } + fn from_bytes(bytes: Vec) -> Option { + Some(bytes) + } } macro_rules! impl_hex_for_hash { @@ -51,7 +54,7 @@ macro_rules! impl_hex_for_hash { } impl_hex_for_hash!( - H32 H64 H128 H256 H264 H512 + H32 H64 H128 H256 H264 H512 ); /// Wrapper structure around hex-encoded data. @@ -59,23 +62,29 @@ impl_hex_for_hash!( pub struct HexEncode(pub T); impl From for HexEncode { - fn from(x: T) -> Self { - HexEncode(x) - } + fn from(x: T) -> Self { + HexEncode(x) + } } impl HexEncode { - /// Create a new wrapper from the inner value. - pub fn new(x: T) -> Self { HexEncode(x) } + /// Create a new wrapper from the inner value. + pub fn new(x: T) -> Self { + HexEncode(x) + } - /// Consume the wrapper, yielding the inner value. - pub fn into_inner(self) -> T { self.0 } + /// Consume the wrapper, yielding the inner value. + pub fn into_inner(self) -> T { + self.0 + } } impl Deref for HexEncode { - type Target = T; + type Target = T; - fn deref(&self) -> &T { &self.0 } + fn deref(&self) -> &T { + &self.0 + } } /// Hex-encoded arbitrary-byte vector. @@ -99,47 +108,51 @@ pub type AbridgedTopic = HexEncode; pub type Symmetric = HexEncode; impl Serialize for HexEncode { - fn serialize(&self, serializer: S) -> Result { - let data = &self.0[..]; - let serialized = "0x".to_owned() + &data.to_hex(); + fn serialize(&self, serializer: S) -> Result { + let data = &self.0[..]; + let serialized = "0x".to_owned() + &data.to_hex(); - serializer.serialize_str(serialized.as_ref()) - } + serializer.serialize_str(serialized.as_ref()) + } } impl<'a, T: 'a + HexEncodable> Deserialize<'a> for HexEncode { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> - { - deserializer.deserialize_any(HexEncodeVisitor::(::std::marker::PhantomData)) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'a>, + { + deserializer.deserialize_any(HexEncodeVisitor::(::std::marker::PhantomData)) + } } // helper type for decoding anything from hex. struct HexEncodeVisitor(::std::marker::PhantomData); impl<'a, T: HexEncodable> Visitor<'a> for HexEncodeVisitor { - type Value = HexEncode; + type Value = HexEncode; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a 0x-prefixed, hex-encoded vector of bytes") - } + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a 0x-prefixed, hex-encoded vector of bytes") + } - fn visit_str(self, value: &str) -> Result { - let decoded = if value.len() >= 2 && &value[0..2] == "0x" && value.len() & 1 == 0 { - Ok(Vec::from_hex(&value[2..]).map_err(|_| Error::custom("invalid hex"))?) - } else { - Err(Error::custom("invalid format")) - }; + fn visit_str(self, value: &str) -> Result { + let decoded = if value.len() >= 2 && &value[0..2] == "0x" && value.len() & 1 == 0 { + Ok(Vec::from_hex(&value[2..]).map_err(|_| Error::custom("invalid hex"))?) + } else { + Err(Error::custom("invalid format")) + }; - decoded - .and_then(|x| T::from_bytes(x).ok_or(Error::custom("invalid format"))) - .map(HexEncode) - } + decoded + .and_then(|x| T::from_bytes(x).ok_or(Error::custom("invalid format"))) + .map(HexEncode) + } - fn visit_string(self, value: String) -> Result where E: Error { - self.visit_str(value.as_ref()) - } + fn visit_string(self, value: String) -> Result + where + E: Error, + { + self.visit_str(value.as_ref()) + } } /// Receiver of a message. Either a public key, identity (presumably symmetric), @@ -147,151 +160,151 @@ impl<'a, T: HexEncodable> Visitor<'a> for HexEncodeVisitor { #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub enum Receiver { - Public(Public), - Identity(Identity), + Public(Public), + Identity(Identity), } /// A request to post a message to the whisper network. #[derive(Deserialize)] pub struct PostRequest { - /// Receiver of the message. Either a public key or - /// an identity. If the identity is symmetric, it will - /// encrypt to that identity. - /// - /// If the receiver is missing, this will be a broadcast message. - pub to: Option, + /// Receiver of the message. Either a public key or + /// an identity. If the identity is symmetric, it will + /// encrypt to that identity. + /// + /// If the receiver is missing, this will be a broadcast message. + pub to: Option, - /// Sender of the message. - /// - /// If present, the payload will be signed by this - /// identity. The call will fail if the whisper node doesn't store the - /// signing key for this identity. - #[serde(skip_serializing_if = "Option::is_none")] - pub from: Option, + /// Sender of the message. + /// + /// If present, the payload will be signed by this + /// identity. The call will fail if the whisper node doesn't store the + /// signing key for this identity. + #[serde(skip_serializing_if = "Option::is_none")] + pub from: Option, - /// Full topics to identify a message by. - /// At least one topic must be specified if the receiver is - /// not specified. - pub topics: Vec, + /// Full topics to identify a message by. + /// At least one topic must be specified if the receiver is + /// not specified. + pub topics: Vec, - /// Payload of the message - pub payload: Bytes, + /// Payload of the message + pub payload: Bytes, - /// Optional padding of the message. No larger than 2^24 - 1. - pub padding: Option, + /// Optional padding of the message. No larger than 2^24 - 1. + pub padding: Option, - /// Priority of the message: how many milliseconds to spend doing PoW - pub priority: u64, + /// Priority of the message: how many milliseconds to spend doing PoW + pub priority: u64, - /// Time-To-Live of the message in seconds. - pub ttl: u64, + /// Time-To-Live of the message in seconds. + pub ttl: u64, } /// Request for filter or subscription creation. #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct FilterRequest { - /// ID of key used for decryption. - /// - /// If this identity is removed, then no further messages will be returned. - /// - /// If optional, this will listen for broadcast messages. - pub decrypt_with: Option, + /// ID of key used for decryption. + /// + /// If this identity is removed, then no further messages will be returned. + /// + /// If optional, this will listen for broadcast messages. + pub decrypt_with: Option, - /// Accept only messages signed by given public key. - pub from: Option, + /// Accept only messages signed by given public key. + pub from: Option, - /// Possible topics. Cannot be empty if the identity is `None` - pub topics: Vec, + /// Possible topics. Cannot be empty if the identity is `None` + pub topics: Vec, } /// A message captured by a filter or subscription. #[derive(Serialize, Clone)] pub struct FilterItem { - /// Public key that signed this message. - #[serde(skip_serializing_if = "Option::is_none")] - pub from: Option, + /// Public key that signed this message. + #[serde(skip_serializing_if = "Option::is_none")] + pub from: Option, - /// Identity of recipient. If the filter wasn't registered with a - /// recipient, this will be `None`. - #[serde(skip_serializing_if = "Option::is_none")] - pub recipient: Option, + /// Identity of recipient. If the filter wasn't registered with a + /// recipient, this will be `None`. + #[serde(skip_serializing_if = "Option::is_none")] + pub recipient: Option, - /// Time to live in seconds. - pub ttl: u64, + /// Time to live in seconds. + pub ttl: u64, - /// Topics that matched the filter. - pub topics: Vec, + /// Topics that matched the filter. + pub topics: Vec, - /// Unix timestamp of the message generation. - pub timestamp: u64, + /// Unix timestamp of the message generation. + pub timestamp: u64, - /// Decrypted/Interpreted payload. - pub payload: Bytes, + /// Decrypted/Interpreted payload. + pub payload: Bytes, - /// Optional padding data. - #[serde(skip_serializing_if = "Option::is_none")] - pub padding: Option, + /// Optional padding data. + #[serde(skip_serializing_if = "Option::is_none")] + pub padding: Option, } /// Whisper node info. #[derive(Serialize)] #[serde(rename_all = "camelCase")] pub struct NodeInfo { - /// min PoW to be accepted into the local pool. - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(rename = "minPow")] - pub required_pow: Option, + /// min PoW to be accepted into the local pool. + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(rename = "minPow")] + pub required_pow: Option, - /// Number of messages in the pool. - pub messages: usize, + /// Number of messages in the pool. + pub messages: usize, - /// Memory used by messages in the pool. - pub memory: usize, + /// Memory used by messages in the pool. + pub memory: usize, - /// Target memory of the pool. - pub target_memory: usize, + /// Target memory of the pool. + pub target_memory: usize, } #[cfg(test)] mod tests { - use super::*; - use serde_json; - use hex::FromHex; + use super::*; + use hex::FromHex; + use serde_json; - #[test] - fn test_bytes_serialize() { - let bytes = Bytes::new(Vec::from_hex("0123456789abcdef").unwrap()); - let serialized = serde_json::to_string(&bytes).unwrap(); - assert_eq!(serialized, r#""0x0123456789abcdef""#); - } + #[test] + fn test_bytes_serialize() { + let bytes = Bytes::new(Vec::from_hex("0123456789abcdef").unwrap()); + let serialized = serde_json::to_string(&bytes).unwrap(); + assert_eq!(serialized, r#""0x0123456789abcdef""#); + } - #[test] - fn test_bytes_deserialize() { - let bytes2: Result = serde_json::from_str(r#""0x123""#); - let bytes3: Result = serde_json::from_str(r#""0xgg""#); + #[test] + fn test_bytes_deserialize() { + let bytes2: Result = serde_json::from_str(r#""0x123""#); + let bytes3: Result = serde_json::from_str(r#""0xgg""#); - let bytes4: Bytes = serde_json::from_str(r#""0x""#).unwrap(); - let bytes5: Bytes = serde_json::from_str(r#""0x12""#).unwrap(); - let bytes6: Bytes = serde_json::from_str(r#""0x0123""#).unwrap(); + let bytes4: Bytes = serde_json::from_str(r#""0x""#).unwrap(); + let bytes5: Bytes = serde_json::from_str(r#""0x12""#).unwrap(); + let bytes6: Bytes = serde_json::from_str(r#""0x0123""#).unwrap(); - assert!(bytes2.is_err()); - assert!(bytes3.is_err()); - assert_eq!(bytes4, Bytes::new(vec![])); - assert_eq!(bytes5, Bytes::new(vec![0x12])); - assert_eq!(bytes6, Bytes::new(vec![0x1, 0x23])); - } + assert!(bytes2.is_err()); + assert!(bytes3.is_err()); + assert_eq!(bytes4, Bytes::new(vec![])); + assert_eq!(bytes5, Bytes::new(vec![0x12])); + assert_eq!(bytes6, Bytes::new(vec![0x1, 0x23])); + } - #[test] - fn deserialize_topic() { - let topic = AbridgedTopic::new([1, 2, 3, 15].into()); + #[test] + fn deserialize_topic() { + let topic = AbridgedTopic::new([1, 2, 3, 15].into()); - let topic1: Result = serde_json::from_str(r#""0x010203""#); - let topic2: Result = serde_json::from_str(r#""0102030F""#); - let topic3: AbridgedTopic = serde_json::from_str(r#""0x0102030F""#).unwrap(); + let topic1: Result = serde_json::from_str(r#""0x010203""#); + let topic2: Result = serde_json::from_str(r#""0102030F""#); + let topic3: AbridgedTopic = serde_json::from_str(r#""0x0102030F""#).unwrap(); - assert!(topic1.is_err()); - assert!(topic2.is_err()); - assert_eq!(topic3, topic); - } + assert!(topic1.is_err()); + assert!(topic2.is_err()); + assert_eq!(topic3, topic); + } }